diff --git a/.codacy.yml b/.codacy.yml index a3673d804e3236..cd5231d5fe240b 100644 --- a/.codacy.yml +++ b/.codacy.yml @@ -6,19 +6,19 @@ exclude_paths: - src/collectors/python.d.plugin/python_modules/third_party/** - contrib/** - packaging/makeself/** - - web/gui/css/** - - web/gui/lib/** - - web/gui/old/** - - web/gui/src/** - - web/gui/v1/** - - web/gui/v2/** - - web/gui/main.js + - src/web/gui/css/** + - src/web/gui/lib/** + - src/web/gui/old/** + - src/web/gui/src/** + - src/web/gui/v1/** + - src/web/gui/v2/** + - src/web/gui/main.js - tests/** - aclk/tests/** - src/libnetdata/libjudy/** - src/database/sqlite/sqlite3.c - src/ml/dlib/** - - web/server/h2o/libh2o/** + - src/web/server/h2o/libh2o/** - build/** - build_external/** - packaging/** diff --git a/.codeclimate.yml b/.codeclimate.yml index 2d33eafd25ad14..c7ae689e2ce5e1 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -84,10 +84,10 @@ exclude_patterns: - ".githooks/" - "tests/" - "m4/" - - "web/css/" - - "web/lib/" - - "web/fonts/" - - "web/old/" + - "src/web/css/" + - "src/web/lib/" + - "src/web/fonts/" + - "src/web/old/" - "collectors/python.d.plugin/python_modules/pyyaml2/" - "collectors/python.d.plugin/python_modules/pyyaml3/" - "collectors/python.d.plugin/python_modules/urllib3/" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 82a5f8005b6146..ece601f61db062 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -19,6 +19,7 @@ src/exporting/ @thiagoftsm src/daemon/ @thiagoftsm @vkalintiris src/database/ @thiagoftsm @vkalintiris docs/ @tkatsoulas @Ancairon +src/go/ @ilyam8 src/health/ @thiagoftsm @vkalintiris src/health/health.d/ @thiagoftsm src/health/notifications/ @Ferroin @thiagoftsm @@ -29,8 +30,8 @@ src/registry/ @novykh src/streaming/ @thiagoftsm system/ @Ferroin @tkatsoulas tests/ @Ferroin @vkalintiris @tkatsoulas -web/ @thiagoftsm @vkalintiris -web/gui/ @novykh +src/web/ @thiagoftsm @vkalintiris +src/web/gui/ @novykh src/logsmanagement/ @thiagoftsm # Ownership by filetype (overwrites ownership by directory) diff --git a/.github/codeql/python-config.yml b/.github/codeql/python-config.yml index 14d1b4dbc6c635..a31b3c805303a8 100644 --- a/.github/codeql/python-config.yml +++ b/.github/codeql/python-config.yml @@ -1,10 +1,9 @@ paths-ignore: - .github - build_external/ - - ml/dlib - - ml/json + - src/ml/dlib - tests/api - - web/gui + - src/web/gui - src/collectors/python.d.plugin/python_modules/pyyaml* - src/collectors/python.d.plugin/python_modules/third_party - src/collectors/python.d.plugin/python_modules/urllib3 diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b02b155d30624b..48b729622923ee 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,3 +7,9 @@ updates: labels: - "no changelog" - "area/ci" + - package-ecosystem: gomod + directory: /src/go/collectors/go.d.plugin + schedule: + interval: weekly + labels: + - "area/go" diff --git a/.github/labeler.yml b/.github/labeler.yml index 9bd74190f6f372..30ebf663cadc81 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -66,7 +66,7 @@ area/docs: - "*.md" - "**/*.md" - "**/*.mdx" - - diagrams/** + - docs/diagrams/** # -----------------collectors---------------------- @@ -75,6 +75,7 @@ area/collectors: - changed-files: - any-glob-to-any-file: - src/collectors/** + - src/go/collectors/go.d.plugin/** collectors/plugins.d: - any: @@ -136,6 +137,12 @@ collectors/freeipmi: - any-glob-to-any-file: - src/collectors/freeipmi.plugin/** +collectors/go.d.plugin: + - any: + - changed-files: + - any-glob-to-any-file: + - src/go/collectors/go.d.plugin/** + collectors/idlejitter: - any: - changed-files: @@ -216,6 +223,12 @@ collectors/xenstat: # ----------------/collectors---------------------- +area/go: + - any: + - changed-files: + - any-glob-to-any-file: + - src/go/** + area/health: - any: - changed-files: @@ -265,14 +278,12 @@ area/tests: - tests/** - src/daemon/unit_test* - coverity-scan.sh - - cppcheck.sh - - netdata.cppcheck area/web: - any: - changed-files: - any-glob-to-any-file: - - web/** + - src/web/** area/logs-management: - any: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6446d1400edb65..303fe399a1d7f4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -61,16 +61,17 @@ jobs: src/aclk/aclk-schemas/ src/ml/dlib/ src/fluent-bit/ - web/server/h2o/libh2o/ + src/web/server/h2o/libh2o/ files_ignore: | netdata.spec.in **/*.md - - name: List all modified files in pattern + - name: List all changed files in pattern + continue-on-error: true env: - ALL_MODIFIED_FILES: ${{ steps.check-files.outputs.all_modified_files }} + ALL_CHANGED_FILES: ${{ steps.check-files.outputs.all_changed_files }} run: | - for file in ${ALL_MODIFIED_FILES}; do - echo "$file was modified" + for file in ${ALL_CHANGED_FILES}; do + echo "$file was changed" done - name: Check Run id: check-run diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 27d40ca89dcf75..bb9a929978e460 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -49,16 +49,17 @@ jobs: src/aclk/aclk-schemas/ src/ml/dlib/ src/fluent-bit/ - web/server/h2o/libh2o/ + src/web/server/h2o/libh2o/ files_ignore: | netdata.spec.in **/*.md - - name: List all modified files in pattern + - name: List all changed files in pattern + continue-on-error: true env: - ALL_MODIFIED_FILES: ${{ steps.check-files.outputs.all_modified_files }} + ALL_CHANGED_FILES: ${{ steps.check-files.outputs.all_changed_files }} run: | - for file in ${ALL_MODIFIED_FILES}; do - echo "$file was modified" + for file in ${ALL_CHANGED_FILES}; do + echo "$file was changed" done - name: Check Run id: check-run diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index b18ffc38055590..6c2c36365b6f07 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,6 +21,7 @@ jobs: outputs: cpp: ${{ steps.cpp.outputs.run }} python: ${{ steps.python.outputs.run }} + go: ${{ steps.go.outputs.run }} steps: - name: Clone repository uses: actions/checkout@v4 @@ -66,6 +67,19 @@ jobs: else echo "run=true" >> "${GITHUB_OUTPUT}" fi + - name: Check for Go changes + id: go + run: | + if [ "${{ steps.always.outputs.run }}" = "false" ]; then + if git diff --name-only origin/${{ github.base_ref }} HEAD | grep -Eq 'src/go/*\.go' ; then + echo "run=true" >> "${GITHUB_OUTPUT}" + echo '::notice::Go code has changed, need to run CodeQL.' + else + echo "run=false" >> "${GITHUB_OUTPUT}" + fi + else + echo "run=true" >> "${GITHUB_OUTPUT}" + fi analyze-cpp: name: Analyze C/C++ @@ -116,3 +130,33 @@ jobs: uses: github/codeql-action/analyze@v3 with: category: "/language:python" + + analyze-go: + name: Analyze Go + runs-on: ubuntu-latest + needs: prepare + if: needs.prepare.outputs.go == 'true' + strategy: + matrix: + tree: + - src/go/collectors/go.d.plugin + permissions: + security-events: write + steps: + - name: Git clone repository + uses: actions/checkout@v4 + with: + submodules: recursive + fetch-depth: 0 + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: go + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + with: + working-directory: ${{ matrix.tree }} + - name: Run CodeQL + uses: github/codeql-action/analyze@v3 + with: + category: "/language:go" diff --git a/.github/workflows/dashboard-pr.yml b/.github/workflows/dashboard-pr.yml index 3552c90650fc22..418a8b8e6e233d 100644 --- a/.github/workflows/dashboard-pr.yml +++ b/.github/workflows/dashboard-pr.yml @@ -25,7 +25,7 @@ jobs: - name: Update Files id: update run: | - web/gui/bundle_dashboard_v1.py ${{ github.event.inputs.dashboard_version }} + src/web/gui/bundle_dashboard_v1.py ${{ github.event.inputs.dashboard_version }} - name: Create Pull Request id: pr uses: peter-evans/create-pull-request@v6 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1beed584043592..da916ac0eb722c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -55,16 +55,17 @@ jobs: src/aclk/aclk-schemas/ src/ml/dlib/ src/fluent-bit/ - web/server/h2o/libh2o/ + src/web/server/h2o/libh2o/ files_ignore: | netdata.spec.in **/*.md - - name: List all modified files in pattern + - name: List all changed files in pattern + continue-on-error: true env: - ALL_MODIFIED_FILES: ${{ steps.check-files.outputs.all_modified_files }} + ALL_CHANGED_FILES: ${{ steps.check-files.outputs.all_changed_files }} run: | - for file in ${ALL_MODIFIED_FILES}; do - echo "$file was modified" + for file in ${ALL_CHANGED_FILES}; do + echo "$file was changed" done - name: Check Run id: check-run diff --git a/.github/workflows/packaging.yml b/.github/workflows/packaging.yml index 1b7e7f9e9342f0..7c58466f383ccd 100644 --- a/.github/workflows/packaging.yml +++ b/.github/workflows/packaging.yml @@ -65,15 +65,16 @@ jobs: src/aclk/aclk-schemas/ src/ml/dlib/ src/fluent-bit/ - web/server/h2o/libh2o/ + src/web/server/h2o/libh2o/ files_ignore: | **/*.md - - name: List all modified files in pattern + - name: List all changed files in pattern + continue-on-error: true env: - ALL_MODIFIED_FILES: ${{ steps.check-files.outputs.all_modified_files }} + ALL_CHANGED_FILES: ${{ steps.check-files.outputs.all_changed_files }} run: | - for file in ${ALL_MODIFIED_FILES}; do - echo "$file was modified" + for file in ${ALL_CHANGED_FILES}; do + echo "$file was changed" done - name: Check Run id: check-run diff --git a/.github/workflows/review.yml b/.github/workflows/review.yml index 6c62dafc487b74..c7b03813512d4b 100644 --- a/.github/workflows/review.yml +++ b/.github/workflows/review.yml @@ -17,6 +17,7 @@ jobs: actionlint: ${{ steps.actionlint.outputs.run }} clangformat: ${{ steps.clangformat.outputs.run }} flake8: ${{ steps.flake8.outputs.run }} + golangci-lint: ${{ steps.golangci-lint.outputs.run }} hadolint: ${{ steps.hadolint.outputs.run }} shellcheck: ${{ steps.shellcheck.outputs.run }} yamllint: ${{ steps.yamllint.outputs.run }} @@ -59,6 +60,17 @@ jobs: else echo "run=false" >> "${GITHUB_OUTPUT}" fi + - name: Check files for golangci-lint + id: golangci-lint + run: | + if [ "${{ contains(github.event.pull_request.labels.*.name, 'run-ci/golangci-lint') }}" = "true" ]; then + echo "run=true" >> "${GITHUB_OUTPUT}" + elif git diff --name-only origin/${{ github.base_ref }} HEAD -- | grep -Eq '.*\.go' ; then + echo "run=true" >> $GITHUB_OUTPUT + echo 'Go code has changed, need to run golangci-lint.' + else + echo "run=false" >> $GITHUB_OUTPUT + fi - name: Check files for hadolint id: hadolint run: | @@ -164,6 +176,26 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} reporter: github-pr-check + golangci-lint: + name: golangci-lint + needs: prep-review + if: needs.prep-review.outputs.golangci-lint == 'true' + strategy: + matrix: + tree: + - src/go/collectors/go.d.plugin + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run golangci-lint + uses: reviewdog/action-golangci-lint@v2 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + reporter: github-pr-check + golangci_lint_flags: '--timeout=10m' + workdir: ${{ matrix.tree }} + hadolint: name: hadolint needs: prep-review diff --git a/.gitignore b/.gitignore index b2a4ab76c9559f..f650a8a639189b 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ artifacts/ ebpf.plugin src/collectors/ebpf.plugin/reset_netdata_trace.sh !ebpf.plugin/ -src/collectors/ebpf.plugin/includes/ +src/libnetdata/ebpf/includes/ # protoc generated files *.pb.cc @@ -59,13 +59,13 @@ README TODO.md TODO.txt -web/gui/chart-info/ -web/gui/control.html -web/gui/dashboard.js -web/gui/datasource.css -web/gui/gadget.xml -web/gui/index_new.html -web/gui/version.txt +src/web/gui/chart-info/ +src/web/gui/control.html +src/web/gui/dashboard.js +src/web/gui/datasource.css +src/web/gui/gadget.xml +src/web/gui/index_new.html +src/web/gui/version.txt # related to karma/javascript/node /node_modules/ @@ -95,7 +95,7 @@ src/collectors/charts.d.plugin/charts.d.plugin src/collectors/python.d.plugin/python.d.plugin src/collectors/ioping.plugin/ioping.plugin src/collectors/go.d.plugin -web/netdata-switch-dashboard.sh +src/web/netdata-switch-dashboard.sh src/logsmanagement/stress_test/stress_test @@ -119,13 +119,10 @@ compile_commands.json webcopylocal* # converted diagrams -diagrams/*.png -diagrams/*.svg -diagrams/*.atxt -diagrams/plantuml.jar - -# cppcheck -cppcheck-build/ +docs/diagrams/*.png +docs/diagrams/*.svg +docs/diagrams/*.atxt +docs/diagrams/plantuml.jar # python virtual environment venv/ @@ -194,3 +191,10 @@ src/libnetdata/gorilla/fuzz-*.log # ignore build/ directory (default dir for many IDEs/LSPs) build/ + +# ignore rules for go plugin code +src/go/collectors/go.d.plugin/bin/ +src/go/collectors/go.d.plugin/mocks/springboot2/.gradle/ +src/go/collectors/go.d.plugin/mocks/tmp/* +!src/go/collectors/go.d.plugin/mocks/tmp/.gitkeep +src/go/collectors/go.d.plugin/vendor diff --git a/.gitmodules b/.gitmodules index 09a829c1d65bd2..862f4808d57ba9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,12 +6,12 @@ url = https://github.com/davisking/dlib.git shallow = true ignore = dirty -[submodule "web/server/h2o/libh2o"] - path = web/server/h2o/libh2o +[submodule "src/web/server/h2o/libh2o"] + path = src/web/server/h2o/libh2o url = https://github.com/h2o/h2o.git ignore = untracked [submodule "fluent-bit"] path = src/fluent-bit url = https://github.com/fluent/fluent-bit.git shallow = true - ignore = dirty \ No newline at end of file + ignore = dirty diff --git a/CHANGELOG.md b/CHANGELOG.md index 17ddcc8ed7a21e..d1484baff72c5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,15 +2,25 @@ ## [**Next release**](https://github.com/netdata/netdata/tree/HEAD) -[Full Changelog](https://github.com/netdata/netdata/compare/v1.44.2...HEAD) +[Full Changelog](https://github.com/netdata/netdata/compare/v1.44.3...HEAD) **Merged pull requests:** +- Adjust storage tiers if we fail to create the requested number of tiers [\#16999](https://github.com/netdata/netdata/pull/16999) ([stelfrag](https://github.com/stelfrag)) +- Move diagrams/ under docs/ [\#16998](https://github.com/netdata/netdata/pull/16998) ([vkalintiris](https://github.com/vkalintiris)) +- Include Go plugin sources in main repository. [\#16997](https://github.com/netdata/netdata/pull/16997) ([Ferroin](https://github.com/Ferroin)) +- Small cleanup [\#16996](https://github.com/netdata/netdata/pull/16996) ([vkalintiris](https://github.com/vkalintiris)) +- Remove historical changelog and cppcheck [\#16995](https://github.com/netdata/netdata/pull/16995) ([vkalintiris](https://github.com/vkalintiris)) +- Remove config macros that are always set. [\#16994](https://github.com/netdata/netdata/pull/16994) ([vkalintiris](https://github.com/vkalintiris)) +- Use changed files in check-files workflow [\#16993](https://github.com/netdata/netdata/pull/16993) ([tkatsoulas](https://github.com/tkatsoulas)) +- Move web/ under src/ [\#16992](https://github.com/netdata/netdata/pull/16992) ([vkalintiris](https://github.com/vkalintiris)) +- Add spinlock to protect metric release [\#16989](https://github.com/netdata/netdata/pull/16989) ([stelfrag](https://github.com/stelfrag)) - updated message ids for systemd and dbus [\#16987](https://github.com/netdata/netdata/pull/16987) ([ktsaou](https://github.com/ktsaou)) - Update input skip patterns [\#16984](https://github.com/netdata/netdata/pull/16984) ([tkatsoulas](https://github.com/tkatsoulas)) - Update input paths for tj-actions/changed-files [\#16982](https://github.com/netdata/netdata/pull/16982) ([tkatsoulas](https://github.com/tkatsoulas)) - Update synology.md [\#16980](https://github.com/netdata/netdata/pull/16980) ([pschaer](https://github.com/pschaer)) - Detect machine GUID change [\#16979](https://github.com/netdata/netdata/pull/16979) ([stelfrag](https://github.com/stelfrag)) +- Move CO-RE headers \(integration between eBPF and Network Viewer\) [\#16978](https://github.com/netdata/netdata/pull/16978) ([thiagoftsm](https://github.com/thiagoftsm)) - Regenerate integrations.js [\#16974](https://github.com/netdata/netdata/pull/16974) ([netdatabot](https://github.com/netdatabot)) - Use C++14 by default when building on systems that support it. [\#16972](https://github.com/netdata/netdata/pull/16972) ([Ferroin](https://github.com/Ferroin)) - change edac ecc errors from incremental to absolute [\#16970](https://github.com/netdata/netdata/pull/16970) ([ilyam8](https://github.com/ilyam8)) @@ -279,6 +289,10 @@ - Remove openSUSE 15.4 from CI [\#16449](https://github.com/netdata/netdata/pull/16449) ([tkatsoulas](https://github.com/tkatsoulas)) - Remove fedora 37 from CI [\#16422](https://github.com/netdata/netdata/pull/16422) ([tkatsoulas](https://github.com/tkatsoulas)) +## [v1.44.3](https://github.com/netdata/netdata/tree/v1.44.3) (2024-02-12) + +[Full Changelog](https://github.com/netdata/netdata/compare/v1.44.2...v1.44.3) + ## [v1.44.2](https://github.com/netdata/netdata/tree/v1.44.2) (2024-02-06) [Full Changelog](https://github.com/netdata/netdata/compare/v1.44.1...v1.44.2) @@ -390,15 +404,6 @@ - Don't print errors from reading filtered alerts [\#16417](https://github.com/netdata/netdata/pull/16417) ([MrZammler](https://github.com/MrZammler)) - /api/v1/charts: bring back chart id to `title` [\#16416](https://github.com/netdata/netdata/pull/16416) ([ilyam8](https://github.com/ilyam8)) - fix: don't count reused connections as new [\#16414](https://github.com/netdata/netdata/pull/16414) ([ilyam8](https://github.com/ilyam8)) -- Add support for installing a specific major version of the agent on install. [\#16413](https://github.com/netdata/netdata/pull/16413) ([Ferroin](https://github.com/Ferroin)) -- Remove queue limit from ACLK sync event loop [\#16411](https://github.com/netdata/netdata/pull/16411) ([stelfrag](https://github.com/stelfrag)) -- Regenerate integrations.js [\#16409](https://github.com/netdata/netdata/pull/16409) ([netdatabot](https://github.com/netdatabot)) -- Improve handling around EPEL requirement for RPM packages. [\#16406](https://github.com/netdata/netdata/pull/16406) ([Ferroin](https://github.com/Ferroin)) -- Fix typo in metadata \(eBPF\) [\#16405](https://github.com/netdata/netdata/pull/16405) ([thiagoftsm](https://github.com/thiagoftsm)) -- docker: use /host/etc/hostname if mounted [\#16401](https://github.com/netdata/netdata/pull/16401) ([ilyam8](https://github.com/ilyam8)) -- adaptec\_raid: fix parsing PD without NCQ status [\#16400](https://github.com/netdata/netdata/pull/16400) ([ilyam8](https://github.com/ilyam8)) -- eBPF apps order [\#16395](https://github.com/netdata/netdata/pull/16395) ([thiagoftsm](https://github.com/thiagoftsm)) -- fix systemd-units func expiration time [\#16393](https://github.com/netdata/netdata/pull/16393) ([ilyam8](https://github.com/ilyam8)) ## [v1.43.2](https://github.com/netdata/netdata/tree/v1.43.2) (2023-10-30) diff --git a/CMakeLists.txt b/CMakeLists.txt index 944a5105eca9f9..6945ebdc711d9e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -725,85 +725,85 @@ if(ENABLE_PLUGIN_EBPF) endif() set(LIBH2O_FILES - web/server/h2o/libh2o/deps/cloexec/cloexec.c - web/server/h2o/libh2o/deps/libgkc/gkc.c - web/server/h2o/libh2o/deps/libyrmcds/close.c - web/server/h2o/libh2o/deps/libyrmcds/connect.c - web/server/h2o/libh2o/deps/libyrmcds/recv.c - web/server/h2o/libh2o/deps/libyrmcds/send.c - web/server/h2o/libh2o/deps/libyrmcds/send_text.c - web/server/h2o/libh2o/deps/libyrmcds/socket.c - web/server/h2o/libh2o/deps/libyrmcds/strerror.c - web/server/h2o/libh2o/deps/libyrmcds/text_mode.c - web/server/h2o/libh2o/deps/picohttpparser/picohttpparser.c - web/server/h2o/libh2o/lib/common/cache.c - web/server/h2o/libh2o/lib/common/file.c - web/server/h2o/libh2o/lib/common/filecache.c - web/server/h2o/libh2o/lib/common/hostinfo.c - web/server/h2o/libh2o/lib/common/http1client.c - web/server/h2o/libh2o/lib/common/memcached.c - web/server/h2o/libh2o/lib/common/memory.c - web/server/h2o/libh2o/lib/common/multithread.c - web/server/h2o/libh2o/lib/common/serverutil.c - web/server/h2o/libh2o/lib/common/socket.c - web/server/h2o/libh2o/lib/common/socketpool.c - web/server/h2o/libh2o/lib/common/string.c - web/server/h2o/libh2o/lib/common/time.c - web/server/h2o/libh2o/lib/common/timeout.c - web/server/h2o/libh2o/lib/common/url.c - web/server/h2o/libh2o/lib/core/config.c - web/server/h2o/libh2o/lib/core/configurator.c - web/server/h2o/libh2o/lib/core/context.c - web/server/h2o/libh2o/lib/core/headers.c - web/server/h2o/libh2o/lib/core/logconf.c - web/server/h2o/libh2o/lib/core/proxy.c - web/server/h2o/libh2o/lib/core/request.c - web/server/h2o/libh2o/lib/core/token.c - web/server/h2o/libh2o/lib/core/util.c - web/server/h2o/libh2o/lib/handler/access_log.c - web/server/h2o/libh2o/lib/handler/chunked.c - web/server/h2o/libh2o/lib/handler/compress.c - web/server/h2o/libh2o/lib/handler/compress/gzip.c - web/server/h2o/libh2o/lib/handler/errordoc.c - web/server/h2o/libh2o/lib/handler/expires.c - web/server/h2o/libh2o/lib/handler/fastcgi.c - web/server/h2o/libh2o/lib/handler/file.c - web/server/h2o/libh2o/lib/handler/headers.c - web/server/h2o/libh2o/lib/handler/mimemap.c - web/server/h2o/libh2o/lib/handler/proxy.c - web/server/h2o/libh2o/lib/handler/redirect.c - web/server/h2o/libh2o/lib/handler/reproxy.c - web/server/h2o/libh2o/lib/handler/throttle_resp.c - web/server/h2o/libh2o/lib/handler/status.c - web/server/h2o/libh2o/lib/handler/headers_util.c - web/server/h2o/libh2o/lib/handler/status/events.c - web/server/h2o/libh2o/lib/handler/status/requests.c - web/server/h2o/libh2o/lib/handler/http2_debug_state.c - web/server/h2o/libh2o/lib/handler/status/durations.c - web/server/h2o/libh2o/lib/handler/configurator/access_log.c - web/server/h2o/libh2o/lib/handler/configurator/compress.c - web/server/h2o/libh2o/lib/handler/configurator/errordoc.c - web/server/h2o/libh2o/lib/handler/configurator/expires.c - web/server/h2o/libh2o/lib/handler/configurator/fastcgi.c - web/server/h2o/libh2o/lib/handler/configurator/file.c - web/server/h2o/libh2o/lib/handler/configurator/headers.c - web/server/h2o/libh2o/lib/handler/configurator/proxy.c - web/server/h2o/libh2o/lib/handler/configurator/redirect.c - web/server/h2o/libh2o/lib/handler/configurator/reproxy.c - web/server/h2o/libh2o/lib/handler/configurator/throttle_resp.c - web/server/h2o/libh2o/lib/handler/configurator/status.c - web/server/h2o/libh2o/lib/handler/configurator/http2_debug_state.c - web/server/h2o/libh2o/lib/handler/configurator/headers_util.c - web/server/h2o/libh2o/lib/http1.c - web/server/h2o/libh2o/lib/tunnel.c - web/server/h2o/libh2o/lib/http2/cache_digests.c - web/server/h2o/libh2o/lib/http2/casper.c - web/server/h2o/libh2o/lib/http2/connection.c - web/server/h2o/libh2o/lib/http2/frame.c - web/server/h2o/libh2o/lib/http2/hpack.c - web/server/h2o/libh2o/lib/http2/scheduler.c - web/server/h2o/libh2o/lib/http2/stream.c - web/server/h2o/libh2o/lib/http2/http2_debug_state.c + src/web/server/h2o/libh2o/deps/cloexec/cloexec.c + src/web/server/h2o/libh2o/deps/libgkc/gkc.c + src/web/server/h2o/libh2o/deps/libyrmcds/close.c + src/web/server/h2o/libh2o/deps/libyrmcds/connect.c + src/web/server/h2o/libh2o/deps/libyrmcds/recv.c + src/web/server/h2o/libh2o/deps/libyrmcds/send.c + src/web/server/h2o/libh2o/deps/libyrmcds/send_text.c + src/web/server/h2o/libh2o/deps/libyrmcds/socket.c + src/web/server/h2o/libh2o/deps/libyrmcds/strerror.c + src/web/server/h2o/libh2o/deps/libyrmcds/text_mode.c + src/web/server/h2o/libh2o/deps/picohttpparser/picohttpparser.c + src/web/server/h2o/libh2o/lib/common/cache.c + src/web/server/h2o/libh2o/lib/common/file.c + src/web/server/h2o/libh2o/lib/common/filecache.c + src/web/server/h2o/libh2o/lib/common/hostinfo.c + src/web/server/h2o/libh2o/lib/common/http1client.c + src/web/server/h2o/libh2o/lib/common/memcached.c + src/web/server/h2o/libh2o/lib/common/memory.c + src/web/server/h2o/libh2o/lib/common/multithread.c + src/web/server/h2o/libh2o/lib/common/serverutil.c + src/web/server/h2o/libh2o/lib/common/socket.c + src/web/server/h2o/libh2o/lib/common/socketpool.c + src/web/server/h2o/libh2o/lib/common/string.c + src/web/server/h2o/libh2o/lib/common/time.c + src/web/server/h2o/libh2o/lib/common/timeout.c + src/web/server/h2o/libh2o/lib/common/url.c + src/web/server/h2o/libh2o/lib/core/config.c + src/web/server/h2o/libh2o/lib/core/configurator.c + src/web/server/h2o/libh2o/lib/core/context.c + src/web/server/h2o/libh2o/lib/core/headers.c + src/web/server/h2o/libh2o/lib/core/logconf.c + src/web/server/h2o/libh2o/lib/core/proxy.c + src/web/server/h2o/libh2o/lib/core/request.c + src/web/server/h2o/libh2o/lib/core/token.c + src/web/server/h2o/libh2o/lib/core/util.c + src/web/server/h2o/libh2o/lib/handler/access_log.c + src/web/server/h2o/libh2o/lib/handler/chunked.c + src/web/server/h2o/libh2o/lib/handler/compress.c + src/web/server/h2o/libh2o/lib/handler/compress/gzip.c + src/web/server/h2o/libh2o/lib/handler/errordoc.c + src/web/server/h2o/libh2o/lib/handler/expires.c + src/web/server/h2o/libh2o/lib/handler/fastcgi.c + src/web/server/h2o/libh2o/lib/handler/file.c + src/web/server/h2o/libh2o/lib/handler/headers.c + src/web/server/h2o/libh2o/lib/handler/mimemap.c + src/web/server/h2o/libh2o/lib/handler/proxy.c + src/web/server/h2o/libh2o/lib/handler/redirect.c + src/web/server/h2o/libh2o/lib/handler/reproxy.c + src/web/server/h2o/libh2o/lib/handler/throttle_resp.c + src/web/server/h2o/libh2o/lib/handler/status.c + src/web/server/h2o/libh2o/lib/handler/headers_util.c + src/web/server/h2o/libh2o/lib/handler/status/events.c + src/web/server/h2o/libh2o/lib/handler/status/requests.c + src/web/server/h2o/libh2o/lib/handler/http2_debug_state.c + src/web/server/h2o/libh2o/lib/handler/status/durations.c + src/web/server/h2o/libh2o/lib/handler/configurator/access_log.c + src/web/server/h2o/libh2o/lib/handler/configurator/compress.c + src/web/server/h2o/libh2o/lib/handler/configurator/errordoc.c + src/web/server/h2o/libh2o/lib/handler/configurator/expires.c + src/web/server/h2o/libh2o/lib/handler/configurator/fastcgi.c + src/web/server/h2o/libh2o/lib/handler/configurator/file.c + src/web/server/h2o/libh2o/lib/handler/configurator/headers.c + src/web/server/h2o/libh2o/lib/handler/configurator/proxy.c + src/web/server/h2o/libh2o/lib/handler/configurator/redirect.c + src/web/server/h2o/libh2o/lib/handler/configurator/reproxy.c + src/web/server/h2o/libh2o/lib/handler/configurator/throttle_resp.c + src/web/server/h2o/libh2o/lib/handler/configurator/status.c + src/web/server/h2o/libh2o/lib/handler/configurator/http2_debug_state.c + src/web/server/h2o/libh2o/lib/handler/configurator/headers_util.c + src/web/server/h2o/libh2o/lib/http1.c + src/web/server/h2o/libh2o/lib/tunnel.c + src/web/server/h2o/libh2o/lib/http2/cache_digests.c + src/web/server/h2o/libh2o/lib/http2/casper.c + src/web/server/h2o/libh2o/lib/http2/connection.c + src/web/server/h2o/libh2o/lib/http2/frame.c + src/web/server/h2o/libh2o/lib/http2/hpack.c + src/web/server/h2o/libh2o/lib/http2/scheduler.c + src/web/server/h2o/libh2o/lib/http2/stream.c + src/web/server/h2o/libh2o/lib/http2/http2_debug_state.c ) set(DAEMON_FILES @@ -844,14 +844,14 @@ set(DAEMON_FILES ) set(H2O_FILES - web/server/h2o/http_server.c - web/server/h2o/http_server.h - web/server/h2o/h2o_utils.c - web/server/h2o/h2o_utils.h - web/server/h2o/streaming.c - web/server/h2o/streaming.h - web/server/h2o/connlist.c - web/server/h2o/connlist.h + src/web/server/h2o/http_server.c + src/web/server/h2o/http_server.h + src/web/server/h2o/h2o_utils.c + src/web/server/h2o/h2o_utils.h + src/web/server/h2o/streaming.c + src/web/server/h2o/streaming.h + src/web/server/h2o/connlist.c + src/web/server/h2o/connlist.h ) if(ENABLE_H2O) @@ -859,72 +859,72 @@ if(ENABLE_H2O) endif() set(API_PLUGIN_FILES - web/api/web_api.c - web/api/web_api.h - web/api/web_api_v1.c - web/api/web_api_v1.h - web/api/web_api_v2.c - web/api/web_api_v2.h - web/api/http_auth.c - web/api/http_auth.h - web/api/http_header.c - web/api/http_header.h - web/api/badges/web_buffer_svg.c - web/api/badges/web_buffer_svg.h - web/api/exporters/allmetrics.c - web/api/exporters/allmetrics.h - web/api/exporters/shell/allmetrics_shell.c - web/api/exporters/shell/allmetrics_shell.h - web/api/queries/rrdr.c - web/api/queries/rrdr.h - web/api/queries/query.c - web/api/queries/query.h - web/api/queries/average/average.c - web/api/queries/average/average.h - web/api/queries/countif/countif.c - web/api/queries/countif/countif.h - web/api/queries/incremental_sum/incremental_sum.c - web/api/queries/incremental_sum/incremental_sum.h - web/api/queries/max/max.c - web/api/queries/max/max.h - web/api/queries/min/min.c - web/api/queries/min/min.h - web/api/queries/sum/sum.c - web/api/queries/sum/sum.h - web/api/queries/median/median.c - web/api/queries/median/median.h - web/api/queries/percentile/percentile.c - web/api/queries/percentile/percentile.h - web/api/queries/stddev/stddev.c - web/api/queries/stddev/stddev.h - web/api/queries/ses/ses.c - web/api/queries/ses/ses.h - web/api/queries/des/des.c - web/api/queries/des/des.h - web/api/queries/trimmed_mean/trimmed_mean.c - web/api/queries/trimmed_mean/trimmed_mean.h - web/api/queries/weights.c - web/api/queries/weights.h - web/api/formatters/rrd2json.c - web/api/formatters/rrd2json.h - web/api/formatters/csv/csv.c - web/api/formatters/csv/csv.h - web/api/formatters/json/json.c - web/api/formatters/json/json.h - web/api/formatters/ssv/ssv.c - web/api/formatters/ssv/ssv.h - web/api/formatters/value/value.c - web/api/formatters/value/value.h - web/api/formatters/json_wrapper.c - web/api/formatters/json_wrapper.h - web/api/formatters/charts2json.c - web/api/formatters/charts2json.h - web/api/formatters/rrdset2json.c - web/api/formatters/rrdset2json.h - web/api/ilove/ilove.c - web/api/ilove/ilove.h - web/rtc/webrtc.c - web/rtc/webrtc.h + src/web/api/web_api.c + src/web/api/web_api.h + src/web/api/web_api_v1.c + src/web/api/web_api_v1.h + src/web/api/web_api_v2.c + src/web/api/web_api_v2.h + src/web/api/http_auth.c + src/web/api/http_auth.h + src/web/api/http_header.c + src/web/api/http_header.h + src/web/api/badges/web_buffer_svg.c + src/web/api/badges/web_buffer_svg.h + src/web/api/exporters/allmetrics.c + src/web/api/exporters/allmetrics.h + src/web/api/exporters/shell/allmetrics_shell.c + src/web/api/exporters/shell/allmetrics_shell.h + src/web/api/queries/rrdr.c + src/web/api/queries/rrdr.h + src/web/api/queries/query.c + src/web/api/queries/query.h + src/web/api/queries/average/average.c + src/web/api/queries/average/average.h + src/web/api/queries/countif/countif.c + src/web/api/queries/countif/countif.h + src/web/api/queries/incremental_sum/incremental_sum.c + src/web/api/queries/incremental_sum/incremental_sum.h + src/web/api/queries/max/max.c + src/web/api/queries/max/max.h + src/web/api/queries/min/min.c + src/web/api/queries/min/min.h + src/web/api/queries/sum/sum.c + src/web/api/queries/sum/sum.h + src/web/api/queries/median/median.c + src/web/api/queries/median/median.h + src/web/api/queries/percentile/percentile.c + src/web/api/queries/percentile/percentile.h + src/web/api/queries/stddev/stddev.c + src/web/api/queries/stddev/stddev.h + src/web/api/queries/ses/ses.c + src/web/api/queries/ses/ses.h + src/web/api/queries/des/des.c + src/web/api/queries/des/des.h + src/web/api/queries/trimmed_mean/trimmed_mean.c + src/web/api/queries/trimmed_mean/trimmed_mean.h + src/web/api/queries/weights.c + src/web/api/queries/weights.h + src/web/api/formatters/rrd2json.c + src/web/api/formatters/rrd2json.h + src/web/api/formatters/csv/csv.c + src/web/api/formatters/csv/csv.h + src/web/api/formatters/json/json.c + src/web/api/formatters/json/json.h + src/web/api/formatters/ssv/ssv.c + src/web/api/formatters/ssv/ssv.h + src/web/api/formatters/value/value.c + src/web/api/formatters/value/value.h + src/web/api/formatters/json_wrapper.c + src/web/api/formatters/json_wrapper.h + src/web/api/formatters/charts2json.c + src/web/api/formatters/charts2json.h + src/web/api/formatters/rrdset2json.c + src/web/api/formatters/rrdset2json.h + src/web/api/ilove/ilove.c + src/web/api/ilove/ilove.h + src/web/rtc/webrtc.c + src/web/rtc/webrtc.h ) set(EXPORTING_ENGINE_FILES @@ -1146,14 +1146,14 @@ set(STREAMING_PLUGIN_FILES ) set(WEB_PLUGIN_FILES - web/server/web_client.c - web/server/web_client.h - web/server/web_server.c - web/server/web_server.h - web/server/static/static-threaded.c - web/server/static/static-threaded.h - web/server/web_client_cache.c - web/server/web_client_cache.h + src/web/server/web_client.c + src/web/server/web_client.h + src/web/server/web_server.c + src/web/server/web_server.h + src/web/server/static/static-threaded.c + src/web/server/static/static-threaded.h + src/web/server/web_client_cache.c + src/web/server/web_client_cache.h ) set(CLAIM_PLUGIN_FILES @@ -1462,18 +1462,18 @@ if(ENABLE_H2O) add_library(h2o STATIC ${LIBH2O_FILES}) target_include_directories(h2o BEFORE PUBLIC - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/include" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/cloexec" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/brotli/enc" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/golombset" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/libgkc" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/libyrmcds" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/klib" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/neverbleed" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/picohttpparser" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/picotest" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/yaml/include" - "${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/deps/yoml" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/include" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/cloexec" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/brotli/enc" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/golombset" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/libgkc" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/libyrmcds" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/klib" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/neverbleed" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/picohttpparser" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/picotest" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/yaml/include" + "${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/deps/yoml" ) target_compile_options(h2o PRIVATE @@ -1724,7 +1724,7 @@ if(ENABLE_MQTTWEBSOCKETS) -DMQTT_WSS_CPUSTATS) target_include_directories(mqttwebsockets PUBLIC ${CMAKE_SOURCE_DIR}/aclk/helpers - ${CMAKE_SOURCE_DIR}/web/server/h2o/libh2o/include) + ${CMAKE_SOURCE_DIR}/src/web/server/h2o/libh2o/include) target_link_libraries(mqttwebsockets PRIVATE libnetdata) @@ -2202,10 +2202,6 @@ install(TARGETS netdatacli # Generate config file # -add_definitions(-DHAVE_CONFIG_H) - -set(STORAGE_WITH_MATH On) - if(NOT CMAKE_INSTALL_PREFIX STREQUAL "") string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") endif() @@ -2340,8 +2336,8 @@ install(FILES # swagger # install(FILES - web/api/netdata-swagger.json - web/api/netdata-swagger.yaml + src/web/api/netdata-swagger.json + src/web/api/netdata-swagger.yaml DESTINATION ${WEB_DEST}) # @@ -2713,17 +2709,16 @@ endif() # dashboard # -include(web/gui/v1/dashboard_v1.cmake) -include(web/gui/v2/dashboard_v2.cmake) - -include(web/gui/gui.cmake) +include(src/web/gui/v1/dashboard_v1.cmake) +include(src/web/gui/v2/dashboard_v2.cmake) +include(src/web/gui/gui.cmake) function(cat IN_FILE OUT_FILE) file(READ ${IN_FILE} CONTENTS) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction() -file(WRITE ${CMAKE_BINARY_DIR}/web/gui/dashboard.js.in "") +file(WRITE ${CMAKE_BINARY_DIR}/src/web/gui/dashboard.js.in "") foreach(JS_FILE ${DASHBOARD_JS_FILES}) cat(${JS_FILE} ${CMAKE_BINARY_DIR}/dashboard.js.in) endforeach() @@ -2735,36 +2730,36 @@ install(FILES DESTINATION ${WEB_DEST}) install(FILES - web/gui/dashboard_info_custom_example.js - web/gui/dashboard_info.js - web/gui/index.html - web/gui/main.css - web/gui/main.js - web/gui/registry-access.html - web/gui/registry-alert-redirect.html - web/gui/registry-hello.html - web/gui/switch.html - web/gui/ilove.html + src/web/gui/dashboard_info_custom_example.js + src/web/gui/dashboard_info.js + src/web/gui/index.html + src/web/gui/main.css + src/web/gui/main.js + src/web/gui/registry-access.html + src/web/gui/registry-alert-redirect.html + src/web/gui/registry-hello.html + src/web/gui/switch.html + src/web/gui/ilove.html DESTINATION ${WEB_DEST}) install(FILES - web/gui/old/index.html + src/web/gui/old/index.html DESTINATION ${WEB_DEST}/old) install(FILES - web/gui/static/img/netdata-logomark.svg + src/web/gui/static/img/netdata-logomark.svg DESTINATION ${WEB_DEST}/static/img) install(FILES - web/gui/css/morris-0.5.1.css - web/gui/css/c3-0.4.18.min.css + src/web/gui/css/morris-0.5.1.css + src/web/gui/css/c3-0.4.18.min.css DESTINATION ${WEB_DEST}/css) install(FILES - web/gui/.well-known/dnt/cookies + src/web/gui/.well-known/dnt/cookies DESTINATION ${WEB_DEST}/.well-known/dnt) # v0 dashboard install(FILES - web/gui/v0/index.html + src/web/gui/v0/index.html DESTINATION ${WEB_DEST}/v0) diff --git a/HISTORICAL_CHANGELOG.md b/HISTORICAL_CHANGELOG.md deleted file mode 100644 index 04c8f2c232ff00..00000000000000 --- a/HISTORICAL_CHANGELOG.md +++ /dev/null @@ -1,650 +0,0 @@ -netdata (1.10.0) - 2018-03-27 - - Please check full changelog at github. - - -netdata (1.9.0) - 2017-12-17 - - Please check full changelog at github. - - -netdata (1.8.0) - 2017-09-17 - - This is mainly a bugfix release. - Please check full changelog at github. - -netdata (1.7.0) - 2017-07-16 - -- netdata is still spreading fast - - we are at 320.000 users and 132.000 servers - - Almost 100k new users, 52k new installations and 800k docker pulls - since the previous release, 4 and a half months ago. - - netdata user base grows at about 1000 new users and 600 new servers - per day. Thank you. You are awesome. - -- The next release (v1.8) will be focused on providing a global health - monitoring service, for all netdata users, for free. - -- netdata is now a (very fast) fully featured statsd server and the - only one with automatic visualization: push a statsd metric and hit - F5 on the netdata dashboard: your metric visualized. It also supports - synthetic charts, defined by you, so that you can correlate and - visualize your application the way you like it. - -- netdata got new installation options - It is now easier than ever to install netdata - we also distribute a - statically linked netdata x86_64 binary, including key dependencies - (like bash, curl, etc) that can run everywhere a Linux kernel runs - (CoreOS, CirrOS, etc). - -- metrics streaming and replication has been improved significantly. - All known issues have been solved and key enhancements have been added. - Headless collectors and proxies can now send metrics to backends when - data source = as collected. - -- backends have got quite a few enhancements, including host tags and - metrics filtering at the netdata side; - prometheus support has been re-written to utilize more prometheus - features and provide more flexibility and integration options. - -- netdata now monitors ZFS (on Linux and FreeBSD), ElasticSearch, - RabbitMQ, Go applications (via expvar), ipfw (on FreeBSD 11), samba, - squid logs (with web_log plugin). - -- netdata dashboard loading times have been improved significantly - (hit F5 a few times on a netdata dashboard - it is now amazingly fast), - to support dashboards with thousands of charts. - -- netdata alarms now support custom hooks, so you can run whatever you - like in parallel with netdata alarms. - -- As usual, this release brings dozens of more improvements, enhancements - and compatibility fixes. - -netdata (1.6.0) - 2017-03-20 - -- birthday release: 1 year netdata - - netdata was first published on March 30th, 2016. - It has been a crazy year since then: - - 225.000 unique netdata users - currently, at 1.000 new unique users per day - - 80.000 unique netdata installations - currently, at 500 new installation per day - - 610.000 docker pulls on docker hub - - 4.000.000 netdata sessions served - currently, at 15.000 sessions served per day - - 20.000 github stars - - ``` - Thank you! - You are awesome! - ``` - -- central netdata is here - - This is the first release that supports real-time streaming of - metrics between netdata servers. - - netdata can now be: - - - autonomous host monitoring - (like it always has been) - - - headless data collector - (collect and stream metrics in real-time to another netdata) - - - headless proxy - (collect metrics from multiple netdata and stream them to another netdata) - - - store and forward proxy - (like headless proxy, but with a local database) - - - central database - (metrics from multiple hosts are aggregated) - - metrics databases can be configured on all nodes and each node maintaining - a database may have a different retention policy and possibly run - (even different) alarms on them. - -- monitoring ephemeral nodes - - netdata now supports monitoring autoscaled ephemeral nodes, - that are started and stopped on demand (their IP is not known). - - When the ephemeral nodes start streaming metrics to the central - netdata, the central netdata will show register them at "my-netdata" - menu on the dashboard. - - For more information check: - - -- monitoring ephemeral containers and VM guests - - netdata now cleans up container, guest VM, network interfaces and mounted - disk metrics, disabling automatically their alarms too. - - For more information check: - - -- apps.plugin ported for FreeBSD - - @vlvkobal has ported "apps.plugin" to FreeBSD. netdata can now provide - "Applications", "Users" and "User Groups" on FreeBSD. - -- web_log plugin - - @l2isbad has done a wonderful job creating a unified web log parsing plugin - for all kinds of web server logs. With it, netdata provides real-time - performance information and health monitoring alarms for web applications - and web sites! - - For more information check: - - -- backends - - netdata can now archive metrics to `JSON` backends - (both push, by @lfdominguez, and pull modes). - -- IPMI monitoring - - netdata now has an IPMI plugin (based on freeipmi) - for monitoring server hardware. - - The plugin creates (up to) 8 charts: - - 1. number of sensors by state - 2. number of events in SEL - 3. Temperatures CELSIUS - 4. Temperatures FAHRENHEIT - 5. Voltages - 6. Currents - 7. Power - 8. Fans - - It also supports alarms (including the number of sensors in critical state). - - For more information, check: - - -- new plugins - - @l2isbad builds python data collection plugins for netdata at an wonderful - rate! He rocks! - - - **web_log** for monitoring in real-time all kinds of web server log files @l2isbad - - **freeipmi** for monitoring IPMI (server hardware) - - **nsd** (the [name server daemon](https://www.nlnetlabs.nl/projects/nsd/)) @383c57 - - **mongodb** @l2isbad - - **smartd_log** (monitoring disk S.M.A.R.T. values) @l2isbad - -- improved plugins - - - **nfacct** reworked and now collects connection tracker information using netlink. - - **ElasticSearch** re-worked @l2isbad - - **mysql** re-worked to allow faster development of custom mysql based plugins (MySQLService) @l2isbad - - **SNMP** - - **tomcat** @NMcCloud - - **ap** (monitoring hostapd access points) - - **php_fpm** @l2isbad - - **postgres** @l2isbad - - **isc_dhcpd** @l2isbad - - **bind_rndc** @l2isbad - - **numa** - - **apps.plugin** improvements and freebsd support @vlvkobal - - **fail2ban** @l2isbad - - **freeradius** @l2isbad - - **nut** (monitoring UPSes) - - **tc** (Linux QoS) now works on qdiscs instead of classes for the same result (a lot faster) @t-h-e - - **varnish** @l2isbad - -- new and improved alarms - - **web_log**, many alarms to detect common web site/API issues - - **fping**, alarms to detect packet loss, disconnects and unusually high latency - - **cpu**, cpu utilization alarm now ignores `nice` - -- new and improved alarm notification methods - - **HipChat** to allow hosted HipChat @frei-style - - **discordapp** @lowfive - -- dashboard improvements - - dashboard now works on HiDPi screens - - dashboard now shows version of netdata - - dashboard now resets charts properly - - dashboard updated to use latest gauge.js release - -- other improvements - - thanks to @rlefevre netdata now uses a lot of different high resolution system clocks. - - netdata has received a lot more improvements from many more contributors! - - Thank you all! - -netdata (1.5.0) - 2017-01-22 - -- yet another release that makes netdata the fastest - netdata ever! - -- netdata runs on FreeBSD, FreeNAS and MacOS ! - - Vladimir Kobal (@vlvkobal) has done a magnificent work - porting netdata to FreeBSD and MacOS. - - Everything works: cpu, memory, disks performance, disks space, - network interfaces, interrupts, IPv4 metrics, IPv6 metrics - processes, context switches, softnet, IPC queues, - IPC semaphores, IPC shared memory, uptime, etc. Wow! - -- netdata supports data archiving to backend databases: - - - Graphite - - OpenTSDB - - Prometheus - - and of course all the compatible ones - (KairosDB, InfluxDB, Blueflood, etc) - -- new plugins: - - Ilya Mashchenko (@l2isbad) has created most of the python - data collection plugins in this release ! - - - systemd Services (using cgroups!) - - FPing (yes, network latency in netdata!) - - postgres databases @facetoe, @moumoul - - Vanish disk cache (v3 and v4) @l2isbad - - ElasticSearch @l2isbad - - HAproxy @l2isbad - - FreeRadius @l2isbad, @lgz - - mdstat (RAID) @l2isbad - - ISC bind (via rndc) @l2isbad - - ISC dhcpd @l2isbad, @lgz - - Fail2Ban @l2isbad - - OpenVPN status log @l2isbad, @lgz - - NUMA memory @tycho - - CPU Idle @tycho - - gunicorn log @deltaskelta - - ECC memory hardware errors - - IPC semaphores - - uptime plugin (with a nice badge too) - -- improved plugins: - - - netfilter conntrack - - mysql (replication) @l2isbad - - ipfs @pjz - - cpufreq @tycho - - hddtemp @l2isbad - - sensors @l2isbad - - nginx @leolovenet - - nginx_log @paulfantom - - phpfpm @leolovenet - - redis @leolovenet - - dovecot @justohall - - cgroups - - disk space - - apps.plugin - - /proc/interrupts @rlefevre - - /proc/softirqs @rlefevre - - /proc/vmstat (system memory charts) - - /proc/net/snmp6 (IPv6 charts) - - /proc/self/meminfo (system memory charts) - - /proc/net/dev (network interfaces) - - tc (linux QoS) - -- new/improved alarms: - - - MySQL / MariaDB alarms (incl. replication) - - IPFS alarms - - HAproxy alarms - - UDP buffer alarms - - TCP AttemptFails - - ECC memory alarms - - netfilter connections alarms - - SNMP - -- new alarm notifications: - - - messagebird.com @tech-no-logical - - pagerduty.com @jimcooley - - pushbullet.com @tperalta82 - - twilio.com @shadycuz - - HipChat - - kafka - -- shell integration - - - shell scripts can now query netdata easily! - -- dashboard improvements: - - dashboard is now faster on firefox, safari, opera, edge - (edge is still the slowest) - - dashboard now has a little bigger fonts - - SHIFT + mouse wheel to zoom charts, works on all browsers - - perfect-scrollbar on the dashboard - - dashboard 4K resolution fixes - - dashboard compatibility fixes for embedding charts in - third party web sites - - charts on custom dashboards can have common min/max - even if they come from different netdata servers - - alarm log is now saved and loaded back so that - the alarm history is available at the dashboard - -- other improvements: - - python.d.plugin has received way to many improvements - from many contributors! - - charts.d.plugin can now be forked to support - multiple independent instances - - registry has been re-factored to lower its memory - requirements (required for the public registry) - - simple patterns in cgroups, disks and alarms - - netdata-installer.sh can now correctly install - netdata in containers - - supplied logrotate script compatibility fixes - - spec cleanup @breed808 - - clocks and timers reworked @rlefevre - - netdata has received a lot more improvements from many more - contributors! - - Thank you all guys! - -netdata (1.4.0) - 2016-10-04 - - At a glance: - -- the fastest netdata ever (with a better look too)! - -- improved IoT and containers support! - -- alarms improved in almost every way! - -- new plugins: - softnet netdev, - extended TCP metrics, - UDPLite - NFS v2, v3 client (server was there already), - NFS v4 server & client, - APCUPSd, - RetroShare - -- improved plugins: - mysql, - cgroups, - hddtemp, - sensors, - phpfpm, - tc (QoS) - - In detail: - -- improved alarms - - Many new alarms have been added to detect common kernel - configuration errors and old alarms have been re-worked - to avoid notification floods. - - Alarms now support notification hysteresis (both static - and dynamic), notification self-cancellation, dynamic - thresholds based on current alarm status - -- improved alarm notifications - - netdata now supports: - - - email notifications - - slack.com notifications on slack channels - - pushover.net notifications (mobile push notifications) - - telegram.org notifications - - For all the above methods, netdata supports role-based - notifications, with multiple recipients for each role - and severity filtering per recipient! - - Also, netdata support HTML5 notifications, while the - dashboard is open in a browser window (no need to be - the active one). - - All notifications are now clickable to get to the chart - that raised the alarm. - -- improved IoT support! - - netdata builds and runs with musl libc and runs on systems - based on busybox. - -- improved containers support! - - netdata runs on alpine linux (a low profile linux distribution - used in containers). - -- Dozens of other improvements and bugfixes - -netdata (1.3.0) - 2016-08-28 - - At a glance: - -- netdata has health monitoring / alarms! -- netdata has badges that can be embeded anywhere! -- netdata plugins are now written in Python! -- new plugins: redis, memcached, nginx_log, ipfs, apache_cache - - IMPORTANT: - Since netdata now uses Python plugins, new packages are - required to be installed on a system to allow it work. - For more information, please check the installation page: - - - - In detail: - -- netdata has alarms! - - Based on the POLL we made on github - (), - health monitoring was the winner. So here it is! - - netdata now has a powerful health monitoring system embedded. - Please check the wiki page: - - - -- netdata has badges! - - netdata can generate badges with live information from the - collected metrics. - Please check the wiki page: - - - -- netdata plugins are now written in Python! - - Thanks to the great work of Paweł Krupa (@paulfantom), most BASH - plugins have been ported to Python. - - The new python.d.plugin supports both python2 and python3 and - data collection from multiple sources for all modules. - - The following pre-existing modules have been ported to Python: - - - apache - - cpufreq - - example - - exim - - hddtemp - - mysql - - nginx - - phpfpm - - postfix - - sensors - - squid - - tomcat - - The following new modules have been added: - - - apache_cache - - dovecot - - ipfs - - memcached - - nginx_log - - redis - -- other data collectors: - - - Thanks to @simonnagl netdata now reports disk space usage. - -- dashboards now transfer a certain settings from server to server - when changing servers via the my-netdata menu. - - The settings transferred are the dashboard theme, the online - help status and current pan and zoom timeframe of the dashboard. - -- API improvements: - - - reduction functions now support 'min', 'sum' and 'incremental-sum'. - - - netdata now offers a multi-threaded and a single threaded - web server (single threaded is better for IoT). - -- apps.plugin improvements: - - - can now run with command line argument 'without-files' - to prevent it from enumerating all the open files/sockets/pipes - of all running processes. - - - apps.plugin now scales the collected values to match the - the total system usage. - - - apps.plugin can now report guest CPU usage per process. - - - repeating errors are now logged once per process. - -- netdata now runs with IDLE process priority (lower than nice 19) - -- netdata now instructs the kernel to kill it first when it starves - for memory. - -- netdata listens for signals: - - - SIGHUP to netdata instructs it to re-open its log files - (new logrotate files added too). - - - SIGUSR1 to netdata saves the database - - - SIGUSR2 to netdata reloads health / alarms configuration - -- netdata can now bind to multiple IPs and ports. - -- netdata now has new systemd service file (it starts as user - netdata and does not fork). - -- Dozens of other improvements and bugfixes - -netdata (1.2.0) - 2016-05-16 - - At a glance: - -- netdata is now 30% faster -- netdata now has a registry (my-netdata dashboard menu) -- netdata now monitors Linux Containers (docker, lxc, etc) - - IMPORTANT: - This version requires libuuid. The package you need is: - -- uuid-dev (debian/ubuntu), or -- libuuid-devel (centos/fedora/redhat) - - In detail: - -- netdata is now 30% faster ! - - - Patches submitted by @fredericopissarra improved overall - netdata performance by 10%. - - - A new improved search function in the internal indexes - made all searches faster by 50%, resulting in about - 20% better performance for the core of netdata. - - - More efficient threads locking in key components - contributed to the overall efficiency. - -- netdata now has a CENTRAL REGISTRY ! - - The central registry tracks all your netdata servers - and bookmarks them for you at the 'my-netdata' menu - on all dashboards. - - Every netdata can act as a registry, but there is also - a global registry provided for free for all netdata users! - -- netdata now monitors CONTAINERS ! - - docker, lxc, or anything else. For each container it monitors - CPU, RAM, DISK I/O (network interfaces were already monitored) - -- apps.plugin: now uses linux capabilities by default - without setuid to root - -- netdata has now an improved signal handler - thanks to @simonnagl - -- API: new improved CORS support - -- SNMP: counter64 support fixed - -- MYSQL: more charts, about QCache, MyISAM key cache, - InnoDB buffer pools, open files - -- DISK charts now show mount point when available - -- Dashboard: improved support for older web browsers - and mobile web browsers (thanks to @simonnagl) - -- Multi-server dashboards now allow de-coupled refreshes for - each chart, so that if one netdata has a network latency - the other charts are not affected - -- Several other minor improvements and bugfixes - -netdata (1.1.0) - 2016-04-20 - - Dozens of commits that improve netdata in several ways: - -- Data collection: added IPv6 monitoring -- Data collection: added SYNPROXY DDoS protection monitoring -- Data collection: apps.plugin: added charts for users and user groups -- Data collection: apps.plugin: grouping of processes now support patterns -- Data collection: apps.plugin: now it is faster, after the new features added -- Data collection: better auto-detection of partitions for disk monitoring -- Data collection: better fireqos integration for QoS monitoring -- Data collection: squid monitoring now uses squidclient -- Data collection: SNMP monitoring now supports 64bit counters -- API: fixed issues in CSV output generation -- API: netdata can now be restricted to listen on a specific IP -- Core and apps.plugin: error log flood protection -- Dashboard: better error handling when the netdata server is unreachable -- Dashboard: each chart now has a toolbox -- Dashboard: on-line help support -- Dashboard: check for netdata updates button -- Dashboard: added example /tv.html dashboard -- Packaging: now compiles with musl libc (alpine linux) -- Packaging: added debian packaging -- Packaging: support non-root installations -- Packaging: the installer generates uninstall script - -netdata (1.0.0) - 2016-03-22 - -- first public release - -netdata (1.0.0-rc.1) - 2015-11-28 - -- initial packaging diff --git a/README.md b/README.md index 7ad7a9940ba7e5..7475135cc78b7f 100644 --- a/README.md +++ b/README.md @@ -723,7 +723,7 @@ The Netdata Agent is shipped with multiple UI versions: - `http://agent.ip:19999/v0/`, the original open-source single-node UI, GPLv3+. - `http://agent.ip:19999/v1/`, the latest open-source single-node UI, GPLv3+. -- `http://agent.ip:19999/v2/`, a snapshot of the latest Netdata Cloud UI as it was at the time the agent was released, licensed to be distributed with Netdata Agents under [NCUL1](https://github.com/netdata/netdata/blob/master/web/gui/v2/LICENSE.md). +- `http://agent.ip:19999/v2/`, a snapshot of the latest Netdata Cloud UI as it was at the time the agent was released, licensed to be distributed with Netdata Agents under [NCUL1](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/LICENSE.md). When you access a Netdata Agent via `http://agent.ip:19999/` a splash screen attempts to use the latest live version of Netdata Cloud UI (downloaded from Cloudflare). This only happens when the web browser has internet connectivity and Netdata Cloud is not disabled at the agent configuration. Otherwise, it falls back to `http://agent.ip:19999/v2/`. @@ -830,4 +830,4 @@ instructions on building each Netdata component from the source and preparing a Netdata is released under [GPLv3+](https://github.com/netdata/netdata/blob/master/LICENSE). Netdata re-distributes other open-source tools and libraries. Please check the [third party licenses](https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md). -The Latest Netdata UI, is distributed under [NCUL1](https://github.com/netdata/netdata/blob/master/web/gui/v2/LICENSE.md). It also uses third party open source components. Check the [UI third party licenses](https://github.com/netdata/netdata/blob/master/web/gui/v2/3D_PARTY_LICENSES.txt) +The Latest Netdata UI, is distributed under [NCUL1](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/LICENSE.md). It also uses third party open source components. Check the [UI third party licenses](https://github.com/netdata/netdata/blob/master/src/web/gui/v2/3D_PARTY_LICENSES.txt) diff --git a/REDISTRIBUTED.md b/REDISTRIBUTED.md index cee205705198d8..5149127f650bb1 100644 --- a/REDISTRIBUTED.md +++ b/REDISTRIBUTED.md @@ -16,7 +16,7 @@ Released under [GPL v3 or later](https://raw.githubusercontent.com/netdata/netda Netdata uses SPDX license tags to identify the license for its files. Individual licenses referenced in the tags are available on the [SPDX project site](http://spdx.org/licenses/). -Netdata redistributes the Netdata Cloud UI, licensed under [Netdata Cloud UI License v1.0 (NCUL1)](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/v2/LICENSE.md). Netdata Cloud UI includes [third party open-source software](https://raw.githubusercontent.com/netdata/netdata/master/web/gui/v2/3D_PARTY_LICENSES.txt). +Netdata redistributes the Netdata Cloud UI, licensed under [Netdata Cloud UI License v1.0 (NCUL1)](https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/LICENSE.md). Netdata Cloud UI includes [third party open-source software](https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/v2/3D_PARTY_LICENSES.txt). Netdata redistributes the following third-party software. We have decided to redistribute all these, instead of using them diff --git a/config.cmake.h.in b/config.cmake.h.in index 2f52877bfdd9c2..276b051f9e8be7 100644 --- a/config.cmake.h.in +++ b/config.cmake.h.in @@ -91,7 +91,6 @@ #cmakedefine ENABLE_LZ4 #cmakedefine ENABLE_ZSTD #cmakedefine ENABLE_BROTLI -#cmakedefine STORAGE_WITH_MATH #cmakedefine ENABLE_LOGSMANAGEMENT #cmakedefine ENABLE_LOGSMANAGEMENT_TESTS diff --git a/cppcheck.sh b/cppcheck.sh deleted file mode 100755 index ebbeeaf8f92e96..00000000000000 --- a/cppcheck.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# echo >>/tmp/cppcheck.log "cppcheck ${*}" - -# shellcheck disable=SC2230 -cppcheck=$(which cppcheck 2>/dev/null || command -v cppcheck 2>/dev/null) -[ -z "${cppcheck}" ] && echo >&2 "install cppcheck." && exit 1 - -processors=$(grep -c ^processor /proc/cpuinfo) -[ $(( processors )) -lt 1 ] && processors=1 - -base="$(dirname "${0}")" -[ "${base}" = "." ] && base="${PWD}" - -cd "${base}" || exit 1 - -[ ! -d "cppcheck-build" ] && mkdir "cppcheck-build" - -file="${1}" -shift -# shellcheck disable=SC2235 -([ "${file}" = "${base}" ] || [ -z "${file}" ]) && file="${base}" - -"${cppcheck}" \ - -j ${processors} \ - --cppcheck-build-dir="cppcheck-build" \ - -I .. \ - --force \ - --enable=warning,performance,portability,information \ - --library=gnu \ - --library=posix \ - --suppress="unusedFunction:*" \ - --suppress="nullPointerRedundantCheck:*" \ - --suppress="readdirCalled:*" \ - "${file}" "${@}" diff --git a/docs/Running-behind-h2o.md b/docs/Running-behind-h2o.md index a8916422136fe5..7e4601ae87125c 100644 --- a/docs/Running-behind-h2o.md +++ b/docs/Running-behind-h2o.md @@ -105,7 +105,7 @@ Using the above, you access Netdata on the backend servers, like this: ### Encrypt the communication between H2O and Netdata -In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support), it is +In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#enabling-tls-support), it is necessary to specify inside the H2O configuration that the final destination is using TLS. To do this, change the `http://` on the `proxy.reverse.url` line in your H2O configuration with `https://` diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md index 528a6dab28c622..2cda4ab2a0ba31 100644 --- a/docs/Running-behind-nginx.md +++ b/docs/Running-behind-nginx.md @@ -164,7 +164,7 @@ Using the above, you access Netdata on the backend servers, like this: ### Encrypt the communication between Nginx and Netdata -In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support), it is +In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#enabling-tls-support), it is necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append the following parameters in your `nginx.conf` diff --git a/docs/category-overview-pages/accessing-netdata-dashboards.md b/docs/category-overview-pages/accessing-netdata-dashboards.md index 97df8b8352c1a8..af7b0df82481d2 100644 --- a/docs/category-overview-pages/accessing-netdata-dashboards.md +++ b/docs/category-overview-pages/accessing-netdata-dashboards.md @@ -35,4 +35,4 @@ Netdata starts a web server for its dashboard at port `19999`. Open up your web navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. -Documentation for previous Agent dashboard can still be found [here](https://github.com/netdata/netdata/blob/master/web/gui/README.md). \ No newline at end of file +Documentation for previous Agent dashboard can still be found [here](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md). \ No newline at end of file diff --git a/docs/category-overview-pages/reverse-proxies.md b/docs/category-overview-pages/reverse-proxies.md index 07c8b9bd549a2a..1b4d935a30e4f7 100644 --- a/docs/category-overview-pages/reverse-proxies.md +++ b/docs/category-overview-pages/reverse-proxies.md @@ -3,7 +3,7 @@ If you need to access a Netdata agent's user interface or API in a production environment we recommend you put Netdata behind another web server and secure access to the dashboard via SSL, user authentication and firewall rules. -A dedicated web server also provides more robustness and capabilities than the Agent's [internal web server](https://github.com/netdata/netdata/blob/master/web/README.md). +A dedicated web server also provides more robustness and capabilities than the Agent's [internal web server](https://github.com/netdata/netdata/blob/master/src/web/README.md). We have documented running behind [nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md), diff --git a/docs/category-overview-pages/secure-nodes.md b/docs/category-overview-pages/secure-nodes.md index 4e5741467fb2d4..5c93278264d8ac 100644 --- a/docs/category-overview-pages/secure-nodes.md +++ b/docs/category-overview-pages/secure-nodes.md @@ -98,7 +98,7 @@ the internet using multiple hosting providers). ## Fine-grained access control If you want to keep using the local dashboard, but don't want it exposed to the internet, you can restrict access with -[access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists). This method also fully +[access lists](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#access-lists). This method also fully retains the ability to stream metrics on-demand through Netdata Cloud. @@ -134,9 +134,9 @@ The `allow connections from` setting is global and restricts access to the dashb allow management from = localhost ``` -See the [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) docs for additional details +See the [web server](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#access-lists) docs for additional details about access lists. You can take -access lists one step further by [enabling SSL](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) to encrypt data from local +access lists one step further by [enabling SSL](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#enabling-tls-support) to encrypt data from local dashboard in transit. The connection to Netdata Cloud is always secured with TLS. ## Use an authenticating web server in proxy mode diff --git a/docs/cloud/visualize/interact-new-charts.md b/docs/cloud/visualize/interact-new-charts.md index 1f562d514d6b80..b6c6cab05fab8a 100644 --- a/docs/cloud/visualize/interact-new-charts.md +++ b/docs/cloud/visualize/interact-new-charts.md @@ -43,7 +43,7 @@ While Netdata's charts require no configuration and are easy to interact with, t Understanding how these work will help you more easily navigate the dashboard, [write new alerts](https://github.com/netdata/netdata/blob/master/src/health/REFERENCE.md), or play around -with the [API](https://github.com/netdata/netdata/blob/master/web/api/README.md). +with the [API](https://github.com/netdata/netdata/blob/master/src/web/api/README.md). ### Dimensions @@ -262,7 +262,7 @@ By default the aggregation applied is _average_ but the user can choose differen - Delta - Single or Double exponential smoothing -For more details on each, you can refer to our Agent's HTTP API details on [Data Queries - Data Grouping](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md#data-grouping). +For more details on each, you can refer to our Agent's HTTP API details on [Data Queries - Data Grouping](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md#data-grouping). ### Reset to defaults diff --git a/docs/contributing/style-guide.md b/docs/contributing/style-guide.md index 6ec845099b7ca7..b77927a9c1a04b 100644 --- a/docs/contributing/style-guide.md +++ b/docs/contributing/style-guide.md @@ -366,7 +366,7 @@ In Markdown, use the standard image syntax, `![]()`, and place the alt text betw using our logo: ```markdown -![The Netdata logo](https://github.com/netdata/netdata/blob/master/web/gui/static/img/netdata-logomark.svg) +![The Netdata logo](https://github.com/netdata/netdata/blob/master/src/web/gui/static/img/netdata-logomark.svg) ``` Reference in-product text, code samples, and terminal output with actual text content, not screen captures or other diff --git a/docs/dashboard/customize.md b/docs/dashboard/customize.md index 301f0bd6b58a88..afcf9216b88f5e 100644 --- a/docs/dashboard/customize.md +++ b/docs/dashboard/customize.md @@ -4,7 +4,7 @@ > > This document is only applicable to the v1 version of the dashboard and doesn't affect the [Netdata Dashboard](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/accessing-netdata-dashboards.md). -While the [Netdata dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md) comes preconfigured with hundreds of charts and +While the [Netdata dashboard](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md) comes preconfigured with hundreds of charts and thousands of metrics, you may want to alter your experience based on a particular use case or preferences. ## Dashboard settings diff --git a/diagrams/build.sh b/docs/diagrams/build.sh similarity index 100% rename from diagrams/build.sh rename to docs/diagrams/build.sh diff --git a/diagrams/config.puml b/docs/diagrams/config.puml similarity index 100% rename from diagrams/config.puml rename to docs/diagrams/config.puml diff --git a/diagrams/data_structures/netdata_config.svg b/docs/diagrams/data_structures/netdata_config.svg similarity index 100% rename from diagrams/data_structures/netdata_config.svg rename to docs/diagrams/data_structures/netdata_config.svg diff --git a/diagrams/data_structures/registry.svg b/docs/diagrams/data_structures/registry.svg similarity index 100% rename from diagrams/data_structures/registry.svg rename to docs/diagrams/data_structures/registry.svg diff --git a/diagrams/data_structures/rrd.svg b/docs/diagrams/data_structures/rrd.svg similarity index 100% rename from diagrams/data_structures/rrd.svg rename to docs/diagrams/data_structures/rrd.svg diff --git a/diagrams/data_structures/src/netdata_config.xml b/docs/diagrams/data_structures/src/netdata_config.xml similarity index 100% rename from diagrams/data_structures/src/netdata_config.xml rename to docs/diagrams/data_structures/src/netdata_config.xml diff --git a/diagrams/data_structures/src/registry.xml b/docs/diagrams/data_structures/src/registry.xml similarity index 100% rename from diagrams/data_structures/src/registry.xml rename to docs/diagrams/data_structures/src/registry.xml diff --git a/diagrams/data_structures/src/rrd.xml b/docs/diagrams/data_structures/src/rrd.xml similarity index 100% rename from diagrams/data_structures/src/rrd.xml rename to docs/diagrams/data_structures/src/rrd.xml diff --git a/diagrams/data_structures/src/web.xml b/docs/diagrams/data_structures/src/web.xml similarity index 100% rename from diagrams/data_structures/src/web.xml rename to docs/diagrams/data_structures/src/web.xml diff --git a/diagrams/data_structures/web.svg b/docs/diagrams/data_structures/web.svg similarity index 100% rename from diagrams/data_structures/web.svg rename to docs/diagrams/data_structures/web.svg diff --git a/diagrams/docs/Makefile b/docs/diagrams/docs/Makefile similarity index 100% rename from diagrams/docs/Makefile rename to docs/diagrams/docs/Makefile diff --git a/diagrams/docs/deployment-parent.drawio b/docs/diagrams/docs/deployment-parent.drawio similarity index 100% rename from diagrams/docs/deployment-parent.drawio rename to docs/diagrams/docs/deployment-parent.drawio diff --git a/diagrams/docs/deployment-parents.drawio b/docs/diagrams/docs/deployment-parents.drawio similarity index 100% rename from diagrams/docs/deployment-parents.drawio rename to docs/diagrams/docs/deployment-parents.drawio diff --git a/diagrams/docs/deployment-standalone.drawio b/docs/diagrams/docs/deployment-standalone.drawio similarity index 100% rename from diagrams/docs/deployment-standalone.drawio rename to docs/diagrams/docs/deployment-standalone.drawio diff --git a/diagrams/ephemeral-nodes-two-parents.xml b/docs/diagrams/ephemeral-nodes-two-parents.xml similarity index 100% rename from diagrams/ephemeral-nodes-two-parents.xml rename to docs/diagrams/ephemeral-nodes-two-parents.xml diff --git a/diagrams/netdata-for-ephemeral-nodes.xml b/docs/diagrams/netdata-for-ephemeral-nodes.xml similarity index 100% rename from diagrams/netdata-for-ephemeral-nodes.xml rename to docs/diagrams/netdata-for-ephemeral-nodes.xml diff --git a/diagrams/netdata-overview.xml b/docs/diagrams/netdata-overview.xml similarity index 99% rename from diagrams/netdata-overview.xml rename to docs/diagrams/netdata-overview.xml index 74e87f63c365c4..16c967e6ed3408 100644 --- a/diagrams/netdata-overview.xml +++ b/docs/diagrams/netdata-overview.xml @@ -189,7 +189,7 @@ - + @@ -489,7 +489,7 @@ - + @@ -497,7 +497,7 @@ - + diff --git a/diagrams/netdata-proxies-example.xml b/docs/diagrams/netdata-proxies-example.xml similarity index 100% rename from diagrams/netdata-proxies-example.xml rename to docs/diagrams/netdata-proxies-example.xml diff --git a/diagrams/registry.puml b/docs/diagrams/registry.puml similarity index 100% rename from diagrams/registry.puml rename to docs/diagrams/registry.puml diff --git a/diagrams/simple-parent-child-no-cloud.xml b/docs/diagrams/simple-parent-child-no-cloud.xml similarity index 100% rename from diagrams/simple-parent-child-no-cloud.xml rename to docs/diagrams/simple-parent-child-no-cloud.xml diff --git a/diagrams/simple-parent-child.xml b/docs/diagrams/simple-parent-child.xml similarity index 100% rename from diagrams/simple-parent-child.xml rename to docs/diagrams/simple-parent-child.xml diff --git a/diagrams/windows.xml b/docs/diagrams/windows.xml similarity index 100% rename from diagrams/windows.xml rename to docs/diagrams/windows.xml diff --git a/docs/glossary.md b/docs/glossary.md index 2c16abfcfbd229..ae936394a8eb3f 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -43,7 +43,7 @@ Use the alphabatized list below to find the answer to your single-term questions - [**Context**](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts): A way of grouping charts by the types of metrics collected and dimensions displayed. It's kind of like a machine-readable naming and organization scheme. -- [**Custom dashboards**](https://github.com/netdata/netdata/blob/master/web/gui/custom/README.md) A dashboard that you can create using simple HTML (no javascript is required for basic dashboards). +- [**Custom dashboards**](https://github.com/netdata/netdata/blob/master/src/web/gui/custom/README.md) A dashboard that you can create using simple HTML (no javascript is required for basic dashboards). ## D diff --git a/docs/guides/configure/performance.md b/docs/guides/configure/performance.md index 8639785264f845..e1b32778e0e00e 100644 --- a/docs/guides/configure/performance.md +++ b/docs/guides/configure/performance.md @@ -237,7 +237,7 @@ outside your production infrastructure, or if you have cpu and memory to spare. ## Run Netdata behind a proxy A dedicated web server like nginx provides more robustness than the Agent's -internal [web server](https://github.com/netdata/netdata/blob/master/web/README.md). +internal [web server](https://github.com/netdata/netdata/blob/master/src/web/README.md). Nginx can handle more concurrent connections, reuse idle connections, and use fast gzip compression to reduce payloads. For details on installing another web server as a proxy for the local Agent dashboard, diff --git a/docs/guides/monitor/process.md b/docs/guides/monitor/process.md index 30e9520af127c3..473927eb821eb8 100644 --- a/docs/guides/monitor/process.md +++ b/docs/guides/monitor/process.md @@ -146,7 +146,7 @@ others, and groups them into `sql`. That makes sense, since all these processes sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr ``` -These groups are then reflected as [dimensions](https://github.com/netdata/netdata/blob/master/web/README.md#dimensions) +These groups are then reflected as [dimensions](https://github.com/netdata/netdata/blob/master/src/web/README.md#dimensions) within Netdata's charts. ![An example per-process CPU utilization chart in Netdata diff --git a/docs/guides/python-collector.md b/docs/guides/python-collector.md index 7ffa46a954588e..102673c1b782b8 100644 --- a/docs/guides/python-collector.md +++ b/docs/guides/python-collector.md @@ -117,7 +117,7 @@ context, charttype]`, where: that is `A.B`, with `A` being the name of the collector, and `B` being the name of the specific metric. - `charttype`: Either `line`, `area`, or `stacked`. If null line is the default value. -You can read more about `family` and `context` in the [web dashboard](https://github.com/netdata/netdata/blob/master/web/README.md#families) doc. +You can read more about `family` and `context` in the [web dashboard](https://github.com/netdata/netdata/blob/master/src/web/README.md#families) doc. Once the chart has been defined, you should define the dimensions of the chart. Dimensions are basically the metrics to be represented in this chart and each chart can have more than one dimension. In order to define the dimensions, the @@ -539,7 +539,7 @@ At minimum, to be buildable and testable, the PR needs to include: - A basic configuration for the plugin in the appropriate global config file: `collectors/python.d.plugin/python.d.conf`, which is also in YAML format. Either add a line that reads `# : yes` if the module is to be enabled by default, or one that reads `: no` if it is to be disabled by default. - A makefile for the plugin at `collectors/python.d.plugin//Makefile.inc`. Check an existing plugin for what this should look like. - A line in `collectors/python.d.plugin/Makefile.am` including the above-mentioned makefile. Place it with the other plugin includes (please keep the includes sorted alphabetically). -- Optionally, chart information in `web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. +- Optionally, chart information in `src/web/gui/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts. - Optionally, some default alert configurations for your collector in `health/health.d/.conf` and a line adding `.conf` in `health/Makefile.am`. ## Framework class reference diff --git a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md index a4dcd06a6fc798..a5baaee8b1448b 100644 --- a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md +++ b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md @@ -147,7 +147,7 @@ followed by a similar spike from the Apache benchmark. > To see other charts, replace `apps.file_open` with the context of the chart you want to see data for. > > To see all the API options, visit our [Swagger -> documentation](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml) +> documentation](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/src/web/api/netdata-swagger.yaml) > and look under the **/data** section. ## Troubleshoot and debug applications with eBPF diff --git a/docs/guides/using-host-labels.md b/docs/guides/using-host-labels.md index 686e1e202069b5..f8a54a8afd3879 100644 --- a/docs/guides/using-host-labels.md +++ b/docs/guides/using-host-labels.md @@ -140,7 +140,7 @@ child system. It's a vastly simplified way of accessing critical information abo > ⚠️ Because automatic labels for child nodes are accessible via API calls, and contain sensitive information like > kernel and operating system versions, you should secure streaming connections with SSL. See the [streaming > documentation](https://github.com/netdata/netdata/blob/master/src/streaming/README.md#securing-streaming-communications) for details. You may also want to use -> [access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) or [expose the API only to LAN/localhost +> [access lists](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#access-lists) or [expose the API only to LAN/localhost > connections](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#expose-netdata-only-in-a-private-lan). You can also use `_is_parent`, `_is_child`, and any other host labels in both health entities and metrics diff --git a/docs/metrics-storage-management/enable-streaming.md b/docs/metrics-storage-management/enable-streaming.md index 0f1f0ca9e702f3..49c798804a5a71 100644 --- a/docs/metrics-storage-management/enable-streaming.md +++ b/docs/metrics-storage-management/enable-streaming.md @@ -179,7 +179,7 @@ sudo chown netdata:netdata /etc/netdata/ssl/cert.pem /etc/netdata/ssl/key.pem Next, enforce TLS/SSL on the web server. Open `netdata.conf`, scroll down to the `[web]` section, and look for the `bind to` setting. Add `^SSL=force` to turn on TLS/SSL. See the [web server -reference](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) for other TLS/SSL options. +reference](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#enabling-tls-support) for other TLS/SSL options. ```conf [web] diff --git a/integrations/integrations.js b/integrations/integrations.js index be0a056c673754..f8415a217df6a4 100644 --- a/integrations/integrations.js +++ b/integrations/integrations.js @@ -2,4 +2,4 @@ // It gets generated by integrations/gen_integrations.py in the Netdata repo export const categories = [{"id": "deploy", "name": "Deploy", "description": "", "most_popular": true, "priority": 1, "children": [{"id": "deploy.operating-systems", "name": "Operating Systems", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "deploy.docker-kubernetes", "name": "Docker & Kubernetes", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "deploy.provisioning-systems", "parent": "deploy", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection", "name": "Data Collection", "description": "", "most_popular": true, "priority": 2, "children": [{"id": "data-collection.other", "name": "Other", "description": "", "most_popular": false, "priority": -1, "collector_default": true, "children": []}, {"id": "data-collection.ebpf", "name": "eBPF", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd", "name": "FreeBSD", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.containers-and-vms", "name": "Containers and VMs", "description": "", "most_popular": true, "priority": 6, "children": []}, {"id": "data-collection.database-servers", "name": "Databases", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.kubernetes", "name": "Kubernetes", "description": "", "most_popular": true, "priority": 7, "children": []}, {"id": "data-collection.notifications", "name": "Incident Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.service-discovery-registry", "name": "Service Discovery / Registry", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.web-servers-and-web-proxies", "name": "Web Servers and Web Proxies", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.cloud-provider-managed", "name": "Cloud Provider Managed", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.windows-systems", "name": "Windows Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.apm", "name": "APM", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.hardware-devices-and-sensors", "name": "Hardware Devices and Sensors", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.macos-systems", "name": "macOS Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.message-brokers", "name": "Message Brokers", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.provisioning-systems", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.search-engines", "name": "Search Engines", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems", "name": "Linux Systems", "description": "", "most_popular": true, "priority": 5, "children": [{"id": "data-collection.linux-systems.system-metrics", "name": "System", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.linux-systems.memory-metrics", "name": "Memory", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.linux-systems.cpu-metrics", "name": "CPU", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.linux-systems.pressure-metrics", "name": "Pressure", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.network-metrics", "name": "Network", "description": "", "most_popular": true, "priority": 5, "children": []}, {"id": "data-collection.linux-systems.ipc-metrics", "name": "IPC", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.disk-metrics", "name": "Disk", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.linux-systems.firewall-metrics", "name": "Firewall", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.power-supply-metrics", "name": "Power Supply", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics", "name": "Filesystem", "description": "", "most_popular": false, "priority": -1, "children": [{"id": "data-collection.linux-systems.filesystem-metrics.zfs", "name": "ZFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.btrfs", "name": "BTRFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.nfs", "name": "NFS", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.linux-systems.kernel-metrics", "name": "Kernel", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.networking-stack-and-network-interfaces", "name": "Networking Stack and Network Interfaces", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.synthetic-checks", "name": "Synthetic Checks", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ci-cd-systems", "name": "CICD Platforms", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ups", "name": "UPS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd-systems", "name": "FreeBSD Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.logs-servers", "name": "Logs Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.security-systems", "name": "Security Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.observability", "name": "Observability", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.gaming", "name": "Gaming", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.iot-devices", "name": "IoT Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.media-streaming-servers", "name": "Media Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.authentication-and-authorization", "name": "Authentication and Authorization", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.project-management", "name": "Project Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.application-servers", "name": "Application Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.dns-and-dhcp-servers", "name": "DNS and DHCP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.mail-servers", "name": "Mail Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.processes-and-system-services", "name": "Processes and System Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.storage-mount-points-and-filesystems", "name": "Storage, Mount Points and Filesystems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.systemd", "name": "Systemd", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.telephony-servers", "name": "Telephony Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.vpns", "name": "VPNs", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.blockchain-servers", "name": "Blockchain Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.distributed-computing-systems", "name": "Distributed Computing Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.generic-data-collection", "name": "Generic Data Collection", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.p2p", "name": "P2P", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.snmp-and-networked-devices", "name": "SNMP and Networked Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.system-clock-and-ntp", "name": "System Clock and NTP", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.nas", "name": "NAS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.api-gateways", "name": "API Gateways", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.task-queues", "name": "Task Queues", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ftp-servers", "name": "FTP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "logs", "name": "Logs", "description": "Monitoring logs on your infrastructure", "most_popular": true, "priority": 3, "children": []}, {"id": "export", "name": "exporters", "description": "Exporter Integrations", "most_popular": true, "priority": 5, "children": []}, {"id": "notify", "name": "notifications", "description": "Notification Integrations", "most_popular": true, "priority": 4, "children": [{"id": "notify.agent", "name": "Agent Dispatched Notifications", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "notify.cloud", "name": "Centralized Cloud Notifications", "description": "", "most_popular": true, "priority": 1, "children": []}]}] -export const integrations = [{"meta": {"plugin_name": "apps.plugin", "module_name": "apps", "monitored_instance": {"name": "Applications", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "applications.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["applications", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-apps-Applications", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "groups", "monitored_instance": {"name": "User Groups", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "user.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["groups", "processes", "user auditing", "authorization", "os", "host monitoring"], "most_popular": false}, "overview": "# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-groups-User_Groups", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "users", "monitored_instance": {"name": "Users", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "users.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["users", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-users-Users", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Containers", "link": "", "categories": ["data-collection.containers-and-vms"], "icon_filename": "container.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Kubernetes Containers", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["k8s", "kubernetes", "pods", "containers"], "most_popular": true}, "overview": "# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "LXC Containers", "link": "", "icon_filename": "lxc.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["lxc", "lxd", "container"], "most_popular": true}, "overview": "# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-LXC_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Libvirt Containers", "link": "", "icon_filename": "libvirt.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["libvirt", "container"], "most_popular": true}, "overview": "# Libvirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Libvirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Libvirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Proxmox Containers", "link": "", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["proxmox", "container"], "most_popular": true}, "overview": "# Proxmox Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Proxmox_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Systemd Services", "link": "", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"], "keywords": ["systemd", "services"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Systemd_Services", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Virtual Machines", "link": "", "icon_filename": "container.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vms", "virtualization", "container"], "most_popular": true}, "overview": "# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Virtual Machines for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Virtual_Machines", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "oVirt Containers", "link": "", "icon_filename": "ovirt.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ovirt", "container"], "most_popular": true}, "overview": "# oVirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-oVirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "ap", "monitored_instance": {"name": "Access Points", "link": "https://learn.netdata.cloud/docs/data-collection/networking-stack-and-network-interfaces/linux-access-points", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ap", "access", "point", "wireless", "network"], "most_popular": false}, "overview": "# Access Points\n\nPlugin: charts.d.plugin\nModule: ap\n\n## Overview\n\nThe ap collector visualizes data related to wireless access points.\n\nIt uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect if you are running access points on your linux box.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/ap.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the ap collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |\n| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Change the collection frequency\n\nSpecify a custom collection frequence (update_every) for this collector\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\nap_update_every=10\n\n# the charts priority on the dashboard\n#ap_priority=6900\n\n# the number of retries to do in case of failure\n# before disabling the module\n#ap_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 ap\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit, expected | Mbps |\n\n", "integration_type": "collector", "id": "charts.d.plugin-ap-Access_Points", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/ap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "apcupsd", "monitored_instance": {"name": "APC UPS", "link": "https://www.apc.com", "categories": ["data-collection.ups"], "icon_filename": "apc.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ups", "apc", "power", "supply", "battery", "apcupsd"], "most_popular": false}, "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", "integration_type": "collector", "id": "charts.d.plugin-apcupsd-APC_UPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "libreswan", "monitored_instance": {"name": "Libreswan", "link": "https://libreswan.org/", "categories": ["data-collection.vpns"], "icon_filename": "libreswan.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vpn", "libreswan", "network", "ipsec"], "most_popular": false}, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "charts.d.plugin-libreswan-Libreswan", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "opensips", "monitored_instance": {"name": "OpenSIPS", "link": "https://opensips.org/", "categories": ["data-collection.telephony-servers"], "icon_filename": "opensips.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["opensips", "sip", "voice", "video", "stream"], "most_popular": false}, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", "integration_type": "collector", "id": "charts.d.plugin-opensips-OpenSIPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (sysfs)", "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "sysfs", "hwmon", "rpi", "raspberry pi"], "most_popular": false}, "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", "integration_type": "collector", "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cups.plugin", "module_name": "cups.plugin", "monitored_instance": {"name": "CUPS", "link": "https://www.cups.org/", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "cups.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", "integration_type": "collector", "id": "cups.plugin-cups.plugin-CUPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/extfrag", "monitored_instance": {"name": "System Memory Fragmentation", "link": "https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["extfrag", "extfrag_threshold", "memory fragmentation"], "most_popular": false}, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/zswap", "monitored_instance": {"name": "Linux ZSwap", "link": "https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "zswap", "frontswap", "swap cache"], "most_popular": false}, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "intel_rapl", "monitored_instance": {"name": "Power Capping", "link": "https://www.kernel.org/doc/html/next/power/powercap/powercap.html", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["power capping", "energy"], "most_popular": false}, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", "integration_type": "collector", "id": "debugfs.plugin-intel_rapl-Power_Capping", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "diskspace.plugin", "module_name": "diskspace.plugin", "monitored_instance": {"name": "Disk space", "link": "", "categories": ["data-collection.linux-systems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "ebpf.plugin", "module_name": "disk"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "I/O", "space", "inode"], "most_popular": false}, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "diskspace.plugin-diskspace.plugin-Disk_space", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "cachestat", "monitored_instance": {"name": "eBPF Cachestat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Page cache", "Hit ratio", "eBPF"], "most_popular": false}, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-cachestat-eBPF_Cachestat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "dcstat", "monitored_instance": {"name": "eBPF DCstat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Directory Cache", "File system", "eBPF"], "most_popular": false}, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", "integration_type": "collector", "id": "ebpf.plugin-dcstat-eBPF_DCstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "disk", "monitored_instance": {"name": "eBPF Disk", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hard Disk", "eBPF", "latency", "partition"], "most_popular": false}, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-disk-eBPF_Disk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filedescriptor", "monitored_instance": {"name": "eBPF Filedescriptor", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["file", "eBPF", "fd", "open", "close"], "most_popular": false}, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filedescriptor-eBPF_Filedescriptor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filesystem", "monitored_instance": {"name": "eBPF Filesystem", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Filesystem", "ext4", "btrfs", "nfs", "xfs", "zfs", "eBPF", "latency", "I/O"], "most_popular": false}, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filesystem-eBPF_Filesystem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "hardirq", "monitored_instance": {"name": "eBPF Hardirq", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["HardIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-hardirq-eBPF_Hardirq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mdflush", "monitored_instance": {"name": "eBPF MDflush", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["MD", "RAID", "eBPF"], "most_popular": false}, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mdflush-eBPF_MDflush", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mount", "monitored_instance": {"name": "eBPF Mount", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["mount", "umount", "device", "eBPF"], "most_popular": false}, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mount-eBPF_Mount", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "oomkill", "monitored_instance": {"name": "eBPF OOMkill", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application", "memory"], "most_popular": false}, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", "integration_type": "collector", "id": "ebpf.plugin-oomkill-eBPF_OOMkill", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "process", "monitored_instance": {"name": "eBPF Process", "link": "https://github.com/netdata/netdata/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Memory", "plugin", "eBPF"], "most_popular": false}, "overview": "# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n\n", "integration_type": "collector", "id": "ebpf.plugin-process-eBPF_Process", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "processes", "monitored_instance": {"name": "eBPF Processes", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["thread", "fork", "process", "eBPF"], "most_popular": false}, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-processes-eBPF_Processes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "shm", "monitored_instance": {"name": "eBPF SHM", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "shared memory", "eBPF"], "most_popular": false}, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-shm-eBPF_SHM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "socket", "monitored_instance": {"name": "eBPF Socket", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["TCP", "UDP", "bandwidth", "server", "connection", "socket"], "most_popular": false}, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |\n| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connected_v4 | connections/s |\n| cgroup.net_conn_ipv6 | connected_v6 | connections/s |\n| cgroup.net_bytes_recv | received | calls/s |\n| cgroup.net_bytes_sent | sent | calls/s |\n| cgroup.net_tcp_recv | received | calls/s |\n| cgroup.net_tcp_send | sent | calls/s |\n| cgroup.net_retransmit | retransmitted | calls/s |\n| cgroup.net_udp_send | sent | calls/s |\n| cgroup.net_udp_recv | received | calls/s |\n| services.net_conn_ipv6 | a dimension per systemd service | connections/s |\n| services.net_bytes_recv | a dimension per systemd service | kilobits/s |\n| services.net_bytes_sent | a dimension per systemd service | kilobits/s |\n| services.net_tcp_recv | a dimension per systemd service | calls/s |\n| services.net_tcp_send | a dimension per systemd service | calls/s |\n| services.net_tcp_retransmit | a dimension per systemd service | calls/s |\n| services.net_udp_send | a dimension per systemd service | calls/s |\n| services.net_udp_recv | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-socket-eBPF_Socket", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "softirq", "monitored_instance": {"name": "eBPF SoftIRQ", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SoftIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-softirq-eBPF_SoftIRQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "swap", "monitored_instance": {"name": "eBPF SWAP", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SWAP", "memory", "eBPF", "Hard Disk"], "most_popular": false}, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-swap-eBPF_SWAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "sync", "monitored_instance": {"name": "eBPF Sync", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "eBPF", "hard disk", "memory"], "most_popular": false}, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.meory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-sync-eBPF_Sync", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "vfs", "monitored_instance": {"name": "eBPF VFS", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["virtual", "filesystem", "eBPF", "I/O", "files"], "most_popular": false}, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-vfs-eBPF_VFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.0.freq", "monitored_instance": {"name": "dev.cpu.0.freq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n{% details summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.temperature", "monitored_instance": {"name": "dev.cpu.temperature", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "devstat", "monitored_instance": {"name": "devstat", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", "integration_type": "collector", "id": "freebsd.plugin-devstat-devstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getifaddrs", "monitored_instance": {"name": "getifaddrs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getifaddrs-getifaddrs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getmntinfo", "monitored_instance": {"name": "getmntinfo", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getmntinfo-getmntinfo", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "hw.intrcnt", "monitored_instance": {"name": "hw.intrcnt", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-hw.intrcnt-hw.intrcnt", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "ipfw", "monitored_instance": {"name": "ipfw", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", "integration_type": "collector", "id": "freebsd.plugin-ipfw-ipfw", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.cp_time", "monitored_instance": {"name": "kern.cp_time", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.cp_time-kern.cp_time", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.msq", "monitored_instance": {"name": "kern.ipc.msq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.msq-kern.ipc.msq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.sem", "monitored_instance": {"name": "kern.ipc.sem", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.sem-kern.ipc.sem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.shm", "monitored_instance": {"name": "kern.ipc.shm", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.shm-kern.ipc.shm", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.icmp.stats", "monitored_instance": {"name": "net.inet.icmp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.ip.stats", "monitored_instance": {"name": "net.inet.ip.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.states", "monitored_instance": {"name": "net.inet.tcp.states", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.stats", "monitored_instance": {"name": "net.inet.tcp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.udp.stats", "monitored_instance": {"name": "net.inet.udp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.icmp6.stats", "monitored_instance": {"name": "net.inet6.icmp6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.ip6.stats", "monitored_instance": {"name": "net.inet6.ip6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.isr", "monitored_instance": {"name": "net.isr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.isr-net.isr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "system.ram", "monitored_instance": {"name": "system.ram", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-system.ram-system.ram", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "uptime", "monitored_instance": {"name": "uptime", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "freebsd.plugin-uptime-uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.loadavg", "monitored_instance": {"name": "vm.loadavg", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.loadavg-vm.loadavg", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_intr", "monitored_instance": {"name": "vm.stats.sys.v_intr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_soft", "monitored_instance": {"name": "vm.stats.sys.v_soft", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_swtch", "monitored_instance": {"name": "vm.stats.sys.v_swtch", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_pgfaults", "monitored_instance": {"name": "vm.stats.vm.v_pgfaults", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_swappgs", "monitored_instance": {"name": "vm.stats.vm.v_swappgs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.swap_info", "monitored_instance": {"name": "vm.swap_info", "link": "", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.swap_info-vm.swap_info", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.vmtotal", "monitored_instance": {"name": "vm.vmtotal", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.vmtotal-vm.vmtotal", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "zfs", "monitored_instance": {"name": "zfs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", "integration_type": "collector", "id": "freebsd.plugin-zfs-zfs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freeipmi.plugin", "module_name": "freeipmi", "monitored_instance": {"name": "Intelligent Platform Management Interface (IPMI)", "link": "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "ipmi", "freeipmi", "ipmimonitoring"], "most_popular": true}, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details summary=\"Command options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n{% /details %}\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n{% /details %}\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", "integration_type": "collector", "id": "freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-activemq", "module_name": "activemq", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.message-brokers"], "icon_filename": "activemq.png", "name": "ActiveMQ", "link": "https://activemq.apache.org/"}, "alternative_monitored_instances": [], "keywords": ["message broker"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", "integration_type": "collector", "id": "go.d.plugin-activemq-ActiveMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-apache", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "Apache", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Apache", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-energid", "module_name": "apache", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energi Core Wallet", "link": "", "icon_filename": "energi.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["energid"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Energi Core Wallet\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis module monitors Energi Core Wallet instances.\nWorks only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/energid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/energid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9796 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9796\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n - name: remote\n url: http://192.0.2.1:9796\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Energi Core Wallet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| energid.blockindex | blocks, headers | count |\n| energid.difficulty | difficulty | difficulty |\n| energid.mempool | max, usage, tx_size | bytes |\n| energid.secmem | total, used, free, locked | bytes |\n| energid.network | connections | connections |\n| energid.timeoffset | timeoffset | seconds |\n| energid.utxo_transactions | transactions, output_transactions | transactions |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Energi_Core_Wallet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/energid/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpd", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "HTTPD", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-HTTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cassandra", "module_name": "cassandra", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.database-servers"], "icon_filename": "cassandra.svg", "name": "Cassandra", "link": "https://cassandra.apache.org/_/index.html"}, "alternative_monitored_instances": [], "keywords": ["nosql", "dbms", "db", "database"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-cassandra-Cassandra", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-chrony", "module_name": "chrony", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "chrony.jpg", "name": "Chrony", "link": "https://chrony.tuxfamily.org/"}, "alternative_monitored_instances": [], "keywords": [], "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}, "most_popular": false}, "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cockroachdb", "plugin_name": "go.d.plugin", "module_name": "cockroachdb", "monitored_instance": {"name": "CockroachDB", "link": "https://www.cockroachlabs.com/", "icon_filename": "cockroachdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cockroachdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", "integration_type": "collector", "id": "go.d.plugin-cockroachdb-CockroachDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-consul", "plugin_name": "go.d.plugin", "module_name": "consul", "monitored_instance": {"name": "Consul", "link": "https://www.consul.io/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "consul.svg"}, "alternative_monitored_instances": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["service networking platform", "hashicorp"], "most_popular": true}, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-consul-Consul", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/consul/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-coredns", "plugin_name": "go.d.plugin", "module_name": "coredns", "monitored_instance": {"name": "CoreDNS", "link": "https://coredns.io/", "icon_filename": "coredns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["coredns", "dns", "kubernetes"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-coredns-CoreDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchbase", "plugin_name": "go.d.plugin", "module_name": "couchbase", "monitored_instance": {"name": "Couchbase", "link": "https://www.couchbase.com/", "icon_filename": "couchbase.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchbase", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchbase-Couchbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchdb", "plugin_name": "go.d.plugin", "module_name": "couchdb", "monitored_instance": {"name": "CouchDB", "link": "https://couchdb.apache.org/", "icon_filename": "couchdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchdb-CouchDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dns_query", "plugin_name": "go.d.plugin", "module_name": "dns_query", "monitored_instance": {"name": "DNS query", "link": "", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dns_query-DNS_query", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsdist", "plugin_name": "go.d.plugin", "module_name": "dnsdist", "monitored_instance": {"name": "DNSdist", "link": "https://dnsdist.org/", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsdist", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsdist-DNSdist", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq", "plugin_name": "go.d.plugin", "module_name": "dnsmasq", "monitored_instance": {"name": "Dnsmasq", "link": "https://thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n{% /details %}\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq-Dnsmasq", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq_dhcp", "plugin_name": "go.d.plugin", "module_name": "dnsmasq_dhcp", "monitored_instance": {"name": "Dnsmasq DHCP", "link": "https://www.thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dhcp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker", "plugin_name": "go.d.plugin", "module_name": "docker", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["container"], "most_popular": true}, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 1 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker-Docker", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker_engine", "plugin_name": "go.d.plugin", "module_name": "docker_engine", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker Engine", "link": "https://docs.docker.com/engine/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["docker", "container"], "most_popular": false}, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker_engine-Docker_Engine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dockerhub", "plugin_name": "go.d.plugin", "module_name": "dockerhub", "monitored_instance": {"name": "Docker Hub repository", "link": "https://hub.docker.com/", "icon_filename": "docker.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["dockerhub"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dockerhub-Docker_Hub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-elasticsearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elasticsearch", "link": "https://www.elastic.co/elasticsearch/", "icon_filename": "elasticsearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-Elasticsearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-opensearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenSearch", "link": "https://opensearch.org/", "icon_filename": "opensearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-OpenSearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-envoy", "plugin_name": "go.d.plugin", "module_name": "envoy", "monitored_instance": {"name": "Envoy", "link": "https://www.envoyproxy.io/", "icon_filename": "envoy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["envoy", "proxy"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-envoy-Envoy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-filecheck", "plugin_name": "go.d.plugin", "module_name": "filecheck", "monitored_instance": {"name": "Files and directories", "link": "", "icon_filename": "filesystem.svg", "categories": ["data-collection.linux-systems"]}, "keywords": ["files", "directories"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors files and directories.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n{% /details %}\n##### Directories\n\nDirectories monitoring example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Files and directories instance\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence | a dimension per file | boolean |\n| filecheck.file_mtime_ago | a dimension per file | seconds |\n| filecheck.file_size | a dimension per file | bytes |\n| filecheck.dir_existence | a dimension per directory | boolean |\n| filecheck.dir_mtime_ago | a dimension per directory | seconds |\n| filecheck.dir_num_of_files | a dimension per directory | files |\n| filecheck.dir_size | a dimension per directory | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-filecheck-Files_and_directories", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-fluentd", "plugin_name": "go.d.plugin", "module_name": "fluentd", "monitored_instance": {"name": "Fluentd", "link": "https://www.fluentd.org/", "icon_filename": "fluentd.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["fluentd", "logging"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", "integration_type": "collector", "id": "go.d.plugin-fluentd-Fluentd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-freeradius", "plugin_name": "go.d.plugin", "module_name": "freeradius", "monitored_instance": {"name": "FreeRADIUS", "link": "https://freeradius.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "freeradius.svg"}, "keywords": ["freeradius", "radius"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-freeradius-FreeRADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-geth", "plugin_name": "go.d.plugin", "module_name": "geth", "monitored_instance": {"name": "Go-ethereum", "link": "https://github.com/ethereum/go-ethereum", "icon_filename": "geth.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["geth", "ethereum", "blockchain"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-geth-Go-ethereum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/geth/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-haproxy", "plugin_name": "go.d.plugin", "module_name": "haproxy", "monitored_instance": {"name": "HAProxy", "link": "https://www.haproxy.org/", "icon_filename": "haproxy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["haproxy", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-haproxy-HAProxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-hfs", "plugin_name": "go.d.plugin", "module_name": "hfs", "monitored_instance": {"name": "Hadoop Distributed File System (HDFS)", "link": "https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html", "icon_filename": "hadoop.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["hdfs", "hadoop"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpcheck", "plugin_name": "go.d.plugin", "module_name": "httpcheck", "monitored_instance": {"name": "HTTP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n{% /details %}\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n{% /details %}\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n{% /details %}\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-httpcheck-HTTP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-isc_dhcpd", "plugin_name": "go.d.plugin", "module_name": "isc_dhcpd", "monitored_instance": {"name": "ISC DHCP", "link": "https://www.isc.org/dhcp/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "keywords": ["dhcpd", "dhcp"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n| isc_dhcpd.pool_active_leases | a dimension per DHCP pool | leases |\n| isc_dhcpd.pool_utilization | a dimension per DHCP pool | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-isc_dhcpd-ISC_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubelet", "plugin_name": "go.d.plugin", "module_name": "k8s_kubelet", "monitored_instance": {"name": "Kubelet", "link": "https://kubernetes.io/docs/concepts/overview/components/#kubelet", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubelet", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubelet-Kubelet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubeproxy", "plugin_name": "go.d.plugin", "module_name": "k8s_kubeproxy", "monitored_instance": {"name": "Kubeproxy", "link": "https://kubernetes.io/docs/concepts/overview/components/#kube-proxy", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubeproxy", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubeproxy-Kubeproxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_state", "plugin_name": "go.d.plugin", "module_name": "k8s_state", "monitored_instance": {"name": "Kubernetes Cluster State", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubernetes", "k8s"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-lighttpd", "plugin_name": "go.d.plugin", "module_name": "lighttpd", "monitored_instance": {"name": "Lighttpd", "link": "https://www.lighttpd.net/", "icon_filename": "lighttpd.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-lighttpd-Lighttpd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logind", "plugin_name": "go.d.plugin", "module_name": "logind", "monitored_instance": {"name": "systemd-logind users", "link": "https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html", "icon_filename": "users.svg", "categories": ["data-collection.systemd"]}, "keywords": ["logind", "systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", "integration_type": "collector", "id": "go.d.plugin-logind-systemd-logind_users", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logind/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logstash", "plugin_name": "go.d.plugin", "module_name": "logstash", "monitored_instance": {"name": "Logstash", "link": "https://www.elastic.co/products/logstash", "icon_filename": "elastic-logstash.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["logstatsh"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event | event, queue | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-logstash-Logstash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mongodb", "plugin_name": "go.d.plugin", "module_name": "mongodb", "monitored_instance": {"name": "MongoDB", "link": "https://www.mongodb.com/", "icon_filename": "mongodb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["mongodb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n##### With databases metrics\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", "integration_type": "collector", "id": "go.d.plugin-mongodb-MongoDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mariadb", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MariaDB", "link": "https://mariadb.org/", "icon_filename": "mariadb.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MariaDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MySQL", "link": "https://www.mysql.com/", "categories": ["data-collection.database-servers"], "icon_filename": "mysql.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-percona_mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "Percona MySQL", "link": "https://www.percona.com/software/mysql-database/percona-server", "icon_filename": "percona.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": false}, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-Percona_MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginx", "plugin_name": "go.d.plugin", "module_name": "nginx", "monitored_instance": {"name": "NGINX", "link": "https://www.nginx.com/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "nginx.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "go.d.plugin", "module_name": "web_log"}, {"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nginx", "web", "webserver", "http", "proxy"], "most_popular": true}, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginx-NGINX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxplus", "plugin_name": "go.d.plugin", "module_name": "nginxplus", "monitored_instance": {"name": "NGINX Plus", "link": "https://www.nginx.com/products/nginx/", "icon_filename": "nginxplus.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["nginxplus", "nginx", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxplus-NGINX_Plus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxvts", "plugin_name": "go.d.plugin", "module_name": "nginxvts", "monitored_instance": {"name": "NGINX VTS", "link": "https://www.nginx.com/", "icon_filename": "nginx.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxvts-NGINX_VTS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ntpd", "plugin_name": "go.d.plugin", "module_name": "ntpd", "monitored_instance": {"name": "NTPd", "link": "https://www.ntp.org/documentation/4.2.8-series/ntpd", "icon_filename": "ntp.png", "categories": ["data-collection.system-clock-and-ntp"]}, "keywords": ["ntpd", "ntp", "time"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 3 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n{% /details %}\n##### With peers metrics\n\nCollect peers metrics.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", "integration_type": "collector", "id": "go.d.plugin-ntpd-NTPd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvidia_smi", "plugin_name": "go.d.plugin", "module_name": "nvidia_smi", "monitored_instance": {"name": "Nvidia GPU", "link": "https://www.nvidia.com/en-us/", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["nvidia", "gpu", "hardware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| use_csv_format | Used format when requesting GPU information. XML is used if set to 'no'. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### XML format\n\nUse XML format when requesting GPU information.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n use_csv_format: no\n\n```\n{% /details %}\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | \u2022 | |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | \u2022 | |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_utilization | gpu | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_memory_utilization | memory | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_decoder_utilization | decoder | % | \u2022 | |\n| nvidia_smi.gpu_encoder_utilization | encoder | % | \u2022 | |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | \u2022 |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B | \u2022 | |\n| nvidia_smi.gpu_temperature | temperature | Celsius | \u2022 | \u2022 |\n| nvidia_smi.gpu_voltage | voltage | V | \u2022 | |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | \u2022 | \u2022 |\n| nvidia_smi.gpu_power_draw | power_draw | Watts | \u2022 | \u2022 |\n| nvidia_smi.gpu_performance_state | P0-P15 | state | \u2022 | \u2022 |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | \u2022 | |\n| nvidia_smi.gpu_mig_devices_count | mig | devices | \u2022 | |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvme", "plugin_name": "go.d.plugin", "module_name": "nvme", "monitored_instance": {"name": "NVMe devices", "link": "", "icon_filename": "nvme.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["nvme"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices using the command line tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and assumes it is set up so that the netdata user can execute `nvme` as root without a password.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### Allow netdata to execute nvme\n\nAdd the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary):\n\n```bash\nnetdata ALL=(root) NOPASSWD: /usr/sbin/nvme\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvme binary. The default is \"nvme\" and the executable is looked for in the directories specified in the PATH environment variable. | nvme | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvme\n binary_path: /usr/local/sbin/nvme\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn", "plugin_name": "go.d.plugin", "module_name": "openvpn", "monitored_instance": {"name": "OpenVPN", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n| connect_timeout | Connection timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n| read_timeout | Read timeout in seconds. Sets deadline for read calls. | 2 | no |\n| write_timeout | Write timeout in seconds. Sets deadline for write calls. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n{% /details %}\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn-OpenVPN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn_status_log", "plugin_name": "go.d.plugin", "module_name": "openvpn_status_log", "monitored_instance": {"name": "OpenVPN status log", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn_status_log-OpenVPN_status_log", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", "module_name": "pgbouncer", "monitored_instance": {"name": "PgBouncer", "link": "https://www.pgbouncer.org/", "icon_filename": "postgres.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pgbouncer"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-pgbouncer-PgBouncer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpdaemon", "plugin_name": "go.d.plugin", "module_name": "phpdaemon", "monitored_instance": {"name": "phpDaemon", "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": ["data-collection.apm"]}, "keywords": ["phpdaemon", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpdaemon-phpDaemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpfpm", "plugin_name": "go.d.plugin", "module_name": "phpfpm", "monitored_instance": {"name": "PHP-FPM", "link": "https://php-fpm.org/", "icon_filename": "php.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["phpfpm", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n{% /details %}\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n{% /details %}\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpfpm-PHP-FPM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pihole", "plugin_name": "go.d.plugin", "module_name": "pihole", "monitored_instance": {"name": "Pi-hole", "link": "https://pi-hole.net", "icon_filename": "pihole.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["pihole"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-pihole-Pi-hole", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pika", "plugin_name": "go.d.plugin", "module_name": "pika", "monitored_instance": {"name": "Pika", "link": "https://github.com/OpenAtomFoundation/pika", "icon_filename": "pika.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pika", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-pika-Pika", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pika/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ping", "plugin_name": "go.d.plugin", "module_name": "ping", "monitored_instance": {"name": "Ping", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["ping"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-tripe time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=\"0 2147483647\"` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n{% /details %}\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Unprivileged mode\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", "integration_type": "collector", "id": "go.d.plugin-ping-Ping", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ping/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-portcheck", "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": {"name": "TCP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", "integration_type": "collector", "id": "go.d.plugin-portcheck-TCP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-postgres", "plugin_name": "go.d.plugin", "module_name": "postgres", "monitored_instance": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["data-collection.database-servers"], "icon_filename": "postgres.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "postgres", "postgresql", "sql"], "most_popular": true}, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-postgres-PostgreSQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns", "plugin_name": "go.d.plugin", "module_name": "powerdns", "monitored_instance": {"name": "PowerDNS Authoritative Server", "link": "https://doc.powerdns.com/authoritative/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns-PowerDNS_Authoritative_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns_recursor", "plugin_name": "go.d.plugin", "module_name": "powerdns_recursor", "monitored_instance": {"name": "PowerDNS Recursor", "link": "https://doc.powerdns.com/recursor/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns_recursor-PowerDNS_Recursor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-4d_server", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "4D Server", "link": "https://github.com/ThomasMaul/Prometheus_4D_Exporter", "icon_filename": "4d_server.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-4D_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-8430ft-modem", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "8430FT modem", "link": "https://github.com/dernasherbrezon/8430ft_exporter", "icon_filename": "mtc.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-8430FT_modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-a10-acos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "A10 ACOS network devices", "link": "https://github.com/a10networks/PrometheusExporter", "icon_filename": "a10-networks.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-A10_ACOS_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-amd_smi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AMD CPU & GPU", "link": "https://github.com/amd/amd_smi_exporter", "icon_filename": "amd.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AMD_CPU_&_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apicast", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "APIcast", "link": "https://github.com/3scale/apicast", "icon_filename": "apicast.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-APIcast", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arm_hwcpipe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ARM HWCPipe", "link": "https://github.com/ylz-at/arm-hwcpipe-exporter", "icon_filename": "arm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ARM_HWCPipe", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Compute instances", "link": "https://github.com/O1ahmad/aws_ec2_exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Compute_instances", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2_spot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Spot Instance", "link": "https://github.com/patcadelina/ec2-spot-exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Spot_Instance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS ECS", "link": "https://github.com/bevers222/ecs-exporter", "icon_filename": "amazon-ecs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_ECS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Health events", "link": "https://github.com/vladvasiliu/aws-health-exporter-rs", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Health_events", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Quota", "link": "https://github.com/emylincon/aws_quota_exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_rds", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS RDS", "link": "https://github.com/percona/rds_exporter", "icon_filename": "aws-rds.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_RDS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_s3", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS S3 buckets", "link": "https://github.com/ribbybibby/s3_exporter", "icon_filename": "aws-s3.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_S3_buckets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_sqs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS SQS", "link": "https://github.com/jmal98/sqs-exporter", "icon_filename": "aws-sqs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_SQS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_instance_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS instance health", "link": "https://github.com/bobtfish/aws-instance-health-exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_instance_health", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airthings_waveplus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Airthings Waveplus air sensor", "link": "https://github.com/jeremybz/waveplus_exporter", "icon_filename": "airthings.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Airthings_Waveplus_air_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_edgedns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Edge DNS Traffic", "link": "https://github.com/akamai/akamai-edgedns-traffic-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Edge_DNS_Traffic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_gtm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Global Traffic Management", "link": "https://github.com/akamai/akamai-gtm-metrics-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Global_Traffic_Management", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_cloudmonitor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akami Cloudmonitor", "link": "https://github.com/ExpressenAB/cloudmonitor_exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akami_Cloudmonitor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alamos_fe2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alamos FE2 server", "link": "https://github.com/codemonauts/prometheus-fe2-exporter", "icon_filename": "alamos_fe2.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alamos_FE2_server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alibaba-cloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alibaba Cloud", "link": "https://github.com/aylei/aliyun-exporter", "icon_filename": "alibaba-cloud.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alibaba_Cloud", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-altaro_backup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Altaro Backup", "link": "https://github.com/raph2i/altaro_backup_exporter", "icon_filename": "altaro.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Altaro_Backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aaisp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Andrews & Arnold line status", "link": "https://github.com/daveio/aaisp-exporter", "icon_filename": "andrewsarnold.jpg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Andrews_&_Arnold_line_status", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Airflow", "link": "https://github.com/shalb/airflow-exporter", "icon_filename": "airflow.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Airflow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-flink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Flink", "link": "https://github.com/matsumana/flink_exporter", "icon_filename": "apache_flink.png", "categories": ["data-collection.apm"]}, "keywords": ["web server", "http", "https"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Flink", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apple_timemachine", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apple Time Machine", "link": "https://github.com/znerol/prometheus-timemachine-exporter", "icon_filename": "apple.svg", "categories": ["data-collection.macos-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apple_Time_Machine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aruba", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Aruba devices", "link": "https://github.com/slashdoom/aruba_exporter", "icon_filename": "aruba.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "aruba devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Aruba_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arvancloud_cdn", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ArvanCloud CDN", "link": "https://github.com/arvancloud/ar-prometheus-exporter", "icon_filename": "arvancloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ArvanCloud_CDN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-audisto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Audisto", "link": "https://github.com/ZeitOnline/audisto_exporter", "icon_filename": "audisto.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Audisto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-authlog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AuthLog", "link": "https://github.com/woblerr/authlog_exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AuthLog", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_ad_app_passwords", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure AD App passwords", "link": "https://github.com/vladvasiliu/azure-app-secrets-monitor", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_AD_App_passwords", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_elastic_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Elastic Pool SQL", "link": "https://github.com/benclapp/azure_elastic_sql_exporter", "icon_filename": "azure-elastic-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Elastic_Pool_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_res", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Resources", "link": "https://github.com/FXinnovation/azure-resources-exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Resources", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure SQL", "link": "https://github.com/iamseth/azure_sql_exporter", "icon_filename": "azure-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_service_bus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Service Bus", "link": "https://github.com/marcinbudny/servicebus_exporter", "icon_filename": "azure-service-bus.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Service_Bus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_app", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure application", "link": "https://github.com/RobustPerception/azure_metrics_exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_application", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bosh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BOSH", "link": "https://github.com/bosh-prometheus/bosh_exporter", "icon_filename": "bosh.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BOSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bigquery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BigQuery", "link": "https://github.com/m-lab/prometheus-bigquery-exporter", "icon_filename": "bigquery.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BigQuery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bird", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bird Routing Daemon", "link": "https://github.com/czerwonk/bird_exporter", "icon_filename": "bird.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bird_Routing_Daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Blackbox", "link": "https://github.com/prometheus/blackbox_exporter", "icon_filename": "prometheus.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["blackbox"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bobcat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bobcat Miner 300", "link": "https://github.com/pperzyna/bobcat_exporter", "icon_filename": "bobcat.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bobcat_Miner_300", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-borg", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Borg backup", "link": "https://github.com/k0ral/borg-exporter", "icon_filename": "borg.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Borg_backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bungeecord", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BungeeCord", "link": "https://github.com/weihao/bungeecord-prometheus-exporter", "icon_filename": "bungee.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BungeeCord", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-csgo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CS:GO", "link": "https://github.com/kinduff/csgo_exporter", "icon_filename": "csgo.svg", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CS:GO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Counter-Strike: Global Offensive server metrics for improved game performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CS:GO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cvmfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CVMFS clients", "link": "https://github.com/guilbaults/cvmfs-exporter", "icon_filename": "cvmfs.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CVMFS_clients", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-celery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Celery", "link": "https://github.com/ZeitOnline/celery_redis_prometheus", "icon_filename": "celery.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Celery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-certificate_transparency", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Certificate Transparency", "link": "https://github.com/Hsn723/ct-exporter", "icon_filename": "ct.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Certificate_Transparency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-checkpoint", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Checkpoint device", "link": "https://github.com/RespiroConsulting/CheckPointExporter", "icon_filename": "checkpoint.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Checkpoint_device", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-chia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Chia", "link": "https://github.com/chia-network/chia-exporter", "icon_filename": "chia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Chia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clm5ip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Christ Elektronik CLM5IP power panel", "link": "https://github.com/christmann/clm5ip_exporter/", "icon_filename": "christelec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_agent", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Agent", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Agent", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_operator", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Operator", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Operator", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_proxy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Proxy", "link": "https://github.com/cilium/proxy", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Proxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cisco_aci", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cisco ACI", "link": "https://github.com/RavuAlHemio/prometheus_aci_exporter", "icon_filename": "cisco.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "cisco devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cisco_ACI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-citrix_netscaler", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Citrix NetScaler", "link": "https://github.com/rokett/Citrix-NetScaler-Exporter", "icon_filename": "citrix.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Citrix_NetScaler", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClamAV daemon", "link": "https://github.com/sergeymakinen/clamav_exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClamAV_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamscan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clamscan results", "link": "https://github.com/FortnoxAB/clamscan-exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clamscan_results", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clash", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clash", "link": "https://github.com/elonzh/clash_exporter", "icon_filename": "clash.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clickhouse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClickHouse", "link": "https://github.com/ClickHouse/ClickHouse", "icon_filename": "clickhouse.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClickHouse database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClickHouse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_cloudwatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CloudWatch", "link": "https://github.com/prometheus/cloudwatch_exporter", "icon_filename": "aws-cloudwatch.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CloudWatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry", "link": "https://github.com/bosh-prometheus/cf_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry_firebase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry Firehose", "link": "https://github.com/bosh-prometheus/firehose_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry_Firehose", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloudflare_pcap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloudflare PCAP", "link": "https://github.com/wehkamp/docker-prometheus-cloudflare-exporter", "icon_filename": "cloudflare.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloudflare_PCAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClusterControl CMON", "link": "https://github.com/severalnines/cmon_exporter", "icon_filename": "cluster-control.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClusterControl_CMON", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-collectd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Collectd", "link": "https://github.com/prometheus/collectd_exporter", "icon_filename": "collectd.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Collectd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-concourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Concourse", "link": "https://concourse-ci.org", "icon_filename": "concourse.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Concourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ftbeerpi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CraftBeerPi", "link": "https://github.com/jo-hannes/craftbeerpi_exporter", "icon_filename": "craftbeer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CraftBeerPi", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crowdsec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crowdsec", "link": "https://docs.crowdsec.net/docs/observability/prometheus", "icon_filename": "crowdsec.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crowdsec", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crypto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crypto exchanges", "link": "https://github.com/ix-ai/crypto-exporter", "icon_filename": "crypto.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crypto_exchanges", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cryptowatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cryptowatch", "link": "https://github.com/nbarrientos/cryptowat_exporter", "icon_filename": "cryptowatch.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cryptowatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-custom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Custom Exporter", "link": "https://github.com/orange-cloudfoundry/custom_exporter", "icon_filename": "customdata.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Custom_Exporter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ddwrt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DDWRT Routers", "link": "https://github.com/camelusferus/ddwrt_collector", "icon_filename": "ddwrt.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DDWRT_Routers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dmarc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DMARC", "link": "https://github.com/jgosmann/dmarc-metrics-exporter", "icon_filename": "dmarc.png", "categories": ["data-collection.mail-servers"]}, "keywords": ["email authentication", "policy", "reporting"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DMARC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dnsbl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DNSBL", "link": "https://github.com/Luzilla/dnsbl_exporter/", "icon_filename": "dnsbl.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DNSBL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC ECS cluster", "link": "https://github.com/paychex/prometheus-emcecs-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_ECS_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_isilon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC Isilon cluster", "link": "https://github.com/paychex/prometheus-isilon-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_Isilon_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_xtremio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC XtremIO cluster", "link": "https://github.com/cthiel42/prometheus-xtremio-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_XtremIO_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_powermax", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell PowerMax", "link": "https://github.com/kckecheng/powermax_exporter", "icon_filename": "powermax.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_PowerMax", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dependency_track", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dependency-Track", "link": "https://github.com/jetstack/dependency-track-exporter", "icon_filename": "dependency-track.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dependency-Track", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-digitalocean", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DigitalOcean", "link": "https://github.com/metalmatze/digitalocean_exporter", "icon_filename": "digitalocean.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DigitalOcean", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-discourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Discourse", "link": "https://github.com/discourse/discourse-prometheus", "icon_filename": "discourse.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Discourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dutch_electricity_smart_meter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dutch Electricity Smart Meter", "link": "https://github.com/TobiasDeBruijn/prometheus-p1-exporter", "icon_filename": "dutch-electricity.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dynatrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dynatrace", "link": "https://github.com/Apside-TOP/dynatrace_exporter", "icon_filename": "dynatrace.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dynatrace", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eos_web", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "EOS", "link": "https://eos-web.web.cern.ch/eos-web/", "icon_filename": "eos.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-EOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eaton_ups", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Eaton UPS", "link": "https://github.com/psyinfra/prometheus-eaton-ups-exporter", "icon_filename": "eaton.svg", "categories": ["data-collection.ups"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Eaton_UPS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-elgato_keylight", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elgato Key Light devices.", "link": "https://github.com/mdlayher/keylight_exporter", "icon_filename": "elgato.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Elgato_Key_Light_devices.", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-energomera", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energomera smart power meters", "link": "https://github.com/peak-load/energomera_exporter", "icon_filename": "energomera.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Energomera_smart_power_meters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-excel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Excel spreadsheet", "link": "https://github.com/MarcusCalidus/excel-exporter", "icon_filename": "excel.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Excel_spreadsheet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-frrouting", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FRRouting", "link": "https://github.com/tynany/frr_exporter", "icon_filename": "frrouting.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FRRouting", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fastd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fastd", "link": "https://github.com/freifunk-darmstadt/fastd-exporter", "icon_filename": "fastd.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fastd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fortigate", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fortigate firewall", "link": "https://github.com/bluecmd/fortigate_exporter", "icon_filename": "fortinet.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fortigate_firewall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_nfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD NFS", "link": "https://github.com/Axcient/freebsd-nfs-exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_NFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_rctl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD RCTL-RACCT", "link": "https://github.com/yo000/rctl_exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_RCTL-RACCT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freifunk", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Freifunk network", "link": "https://github.com/xperimental/freifunk-exporter", "icon_filename": "freifunk.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Freifunk_network", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fritzbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fritzbox network devices", "link": "https://github.com/pdreker/fritz_exporter", "icon_filename": "avm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fritzbox_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_gce", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP GCE", "link": "https://github.com/O1ahmad/gcp-gce-exporter", "icon_filename": "gcp-gce.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_GCE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP Quota", "link": "https://github.com/mintel/gcp-quota-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gtp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GTP", "link": "https://github.com/wmnsk/gtp_exporter", "icon_filename": "gtpu.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GTP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic_cli", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic Command Line Output", "link": "https://github.com/MarioMartReq/generic-exporter", "icon_filename": "cli.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_Command_Line_Output", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-enclosure", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic storage enclosure tool", "link": "https://github.com/Gandi/jbod-rs", "icon_filename": "storage-enclosure.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_storage_enclosure_tool", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_ratelimit", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub API rate limit", "link": "https://github.com/lunarway/github-ratelimit-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_API_rate_limit", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_repo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub repository", "link": "https://github.com/githubexporter/github-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gitlab_runner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitLab Runner", "link": "https://gitlab.com/gitlab-org/gitlab-runner", "icon_filename": "gitlab.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitLab_Runner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gobetween", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Gobetween", "link": "https://github.com/yyyar/gobetween", "icon_filename": "gobetween.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Gobetween", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Cloud Platform", "link": "https://github.com/DazWilkin/gcp-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Cloud_Platform", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-google_pagespeed", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Pagespeed", "link": "https://github.com/foomo/pagespeed_exporter", "icon_filename": "google.svg", "categories": ["data-collection.apm"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Pagespeed", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_stackdriver", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Stackdriver", "link": "https://github.com/prometheus-community/stackdriver_exporter", "icon_filename": "gcp-stackdriver.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Stackdriver", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-grafana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Grafana", "link": "https://grafana.com/", "icon_filename": "grafana.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Grafana", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-graylog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Graylog Server", "link": "https://github.com/Graylog2/graylog2-server/", "icon_filename": "graylog.svg", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Graylog_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HANA", "link": "https://github.com/jenningsloy318/hana_exporter", "icon_filename": "sap.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HANA", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hdsentinel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HDSentinel", "link": "https://github.com/qusielle/hdsentinel-exporter", "icon_filename": "harddisk.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HDSentinel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hhvm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HHVM", "link": "https://github.com/wikimedia/operations-software-hhvm_exporter", "icon_filename": "hhvm.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HHVM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hp_ilo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HP iLO", "link": "https://github.com/infinityworks/hpilo-exporter", "icon_filename": "hp.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HP_iLO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-halon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Halon", "link": "https://github.com/tobiasbp/halon_exporter", "icon_filename": "halon.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Halon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hashicorp_vault", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HashiCorp Vault secrets", "link": "https://github.com/tomtom-international/vault-assessment-prometheus-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HashiCorp_Vault_secrets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hasura_graphql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hasura GraphQL Server", "link": "https://github.com/zolamk/hasura-exporter", "icon_filename": "hasura.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hasura_GraphQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_hotspot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium hotspot", "link": "https://github.com/tedder/helium_hotspot_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_hotspot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_miner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium miner (validator)", "link": "https://github.com/tedder/miner_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_miner_(validator)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_cgm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CGN series CPE", "link": "https://github.com/yrro/hitron-exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CGN_series_CPE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_coda", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CODA Cable Modem", "link": "https://github.com/hairyhenderson/hitron_coda_exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CODA_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homebridge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homebridge", "link": "https://github.com/lstrojny/homebridge-prometheus-exporter", "icon_filename": "homebridge.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homebridge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homey", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homey", "link": "https://github.com/rickardp/homey-prometheus-exporter", "icon_filename": "homey.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homey", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-honeypot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Honeypot", "link": "https://github.com/Intrinsec/honeypot_exporter", "icon_filename": "intrinsec.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Honeypot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hilink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Huawei devices", "link": "https://github.com/eliecharra/hilink-exporter", "icon_filename": "huawei.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Huawei_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hubble", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hubble", "link": "https://github.com/cilium/hubble", "icon_filename": "hubble.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hubble", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_aix_njmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM AIX systems Njmon", "link": "https://github.com/crooks/njmon_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_AIX_systems_Njmon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_cex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM CryptoExpress (CEX) cards", "link": "https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_mq", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM MQ", "link": "https://github.com/agebhar1/mq_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_MQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum", "link": "https://github.com/topine/ibm-spectrum-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum_virtualize", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum Virtualize", "link": "https://github.com/bluecmd/spectrum_virtualize_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum_Virtualize", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_zhmc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Z Hardware Management Console", "link": "https://github.com/zhmcclient/zhmc-prometheus-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IOTA full node", "link": "https://github.com/crholliday/iota-prom-exporter", "icon_filename": "iota.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IOTA_full_node", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ipmi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IPMI (By SoundCloud)", "link": "https://github.com/prometheus-community/ipmi_exporter", "icon_filename": "soundcloud.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IPMI_(By_SoundCloud)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-influxdb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "InfluxDB", "link": "https://github.com/prometheus/influxdb_exporter", "icon_filename": "influxdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-InfluxDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jmx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JMX", "link": "https://github.com/prometheus/jmx_exporter", "icon_filename": "java.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JMX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jarvis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jarvis Standing Desk", "link": "https://github.com/hairyhenderson/jarvis_exporter/", "icon_filename": "jarvis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jarvis_Standing_Desk", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jenkins", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jenkins", "link": "https://www.jenkins.io/", "icon_filename": "jenkins.svg", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jenkins", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jetbrains_fls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JetBrains Floating License Server", "link": "https://github.com/mkreu/jetbrains-fls-exporter", "icon_filename": "jetbrains.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JetBrains_Floating_License_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka", "link": "https://github.com/danielqsj/kafka_exporter/", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_connect", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Connect", "link": "https://github.com/findelabs/kafka-connect-exporter-rs", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Connect", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_consumer_lag", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Consumer Lag", "link": "https://github.com/omarsmak/kafka-consumer-lag-monitoring", "icon_filename": "kafka.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Consumer_Lag", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_zookeeper", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka ZooKeeper", "link": "https://github.com/cloudflare/kafka_zookeeper_exporter", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kannel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kannel", "link": "https://github.com/apostvav/kannel_exporter", "icon_filename": "kannel.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kannel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-keepalived", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Keepalived", "link": "https://github.com/gen2brain/keepalived_exporter", "icon_filename": "keepalived.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Keepalived", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-korral", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kubernetes Cluster Cloud Cost", "link": "https://github.com/agilestacks/korral", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kubernetes_Cluster_Cloud_Cost", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "LDAP", "link": "https://github.com/titisan/ldap_exporter", "icon_filename": "ldap.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-LDAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lagerist", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lagerist Disk latency", "link": "https://github.com/Svedrin/lagerist", "icon_filename": "linux.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lagerist_Disk_latency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-linode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Linode", "link": "https://github.com/DazWilkin/linode-exporter", "icon_filename": "linode.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Linode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lustre", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lustre metadata", "link": "https://github.com/GSI-HPC/prometheus-cluster-exporter", "icon_filename": "lustre.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lustre_metadata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lynis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lynis audit reports", "link": "https://github.com/MauveSoftware/lynis_exporter", "icon_filename": "lynis.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lynis_audit_reports", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mp707", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MP707 USB thermometer", "link": "https://github.com/nradchenko/mp707_exporter", "icon_filename": "thermometer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MP707_USB_thermometer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mqtt_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MQTT Blackbox", "link": "https://github.com/inovex/mqtt_blackbox_exporter", "icon_filename": "mqtt.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MQTT_Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-machbase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Machbase", "link": "https://github.com/MACHBASE/prometheus-machbase-exporter", "icon_filename": "machbase.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Machbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-maildir", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Maildir", "link": "https://github.com/cherti/mailexporter", "icon_filename": "mailserver.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Maildir", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meilisearch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meilisearch", "link": "https://github.com/scottaglia/meilisearch_exporter", "icon_filename": "meilisearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meilisearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-memcached", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Memcached (community)", "link": "https://github.com/prometheus/memcached_exporter", "icon_filename": "memcached.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Memcached_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meraki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meraki dashboard", "link": "https://github.com/TheHolm/meraki-dashboard-promethus-exporter", "icon_filename": "meraki.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meraki_dashboard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mesos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mesos", "link": "http://github.com/mesosphere/mesos_exporter", "icon_filename": "mesos.svg", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mesos", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mikrotik", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MikroTik devices", "link": "https://github.com/swoga/mikrotik-exporter", "icon_filename": "mikrotik.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MikroTik_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-routeros", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mikrotik RouterOS devices", "link": "https://github.com/welbymcroberts/routeros_exporter", "icon_filename": "routeros.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mikrotik_RouterOS_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-minecraft", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Minecraft", "link": "https://github.com/sladkoff/minecraft-prometheus-exporter", "icon_filename": "minecraft.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Minecraft", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-modbus_rtu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Modbus protocol", "link": "https://github.com/dernasherbrezon/modbusrtu_exporter", "icon_filename": "modbus.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Modbus_protocol", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mogilefs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MogileFS", "link": "https://github.com/KKBOX/mogilefs-exporter", "icon_filename": "filesystem.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MogileFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-monnit_mqtt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Monnit Sensors MQTT", "link": "https://github.com/braxton9460/monnit-mqtt-exporter", "icon_filename": "monnit.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Monnit_Sensors_MQTT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nrpe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NRPE daemon", "link": "https://github.com/canonical/nrpe_exporter", "icon_filename": "nrpelinux.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NRPE_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nsxt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NSX-T", "link": "https://github.com/jk8s/nsxt_exporter", "icon_filename": "vmware-nsx.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NSX-T", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nvml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NVML", "link": "https://github.com/oko/nvml-exporter-rs", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NVML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-naemon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Naemon", "link": "https://github.com/Griesbacher/Iapetos", "icon_filename": "naemon.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Naemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nagios", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nagios", "link": "https://github.com/wbollock/nagios_exporter", "icon_filename": "nagios.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nagios", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nature_remo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nature Remo E lite devices", "link": "https://github.com/kenfdev/remo-exporter", "icon_filename": "nature-remo.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nature_Remo_E_lite_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_solidfire", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetApp Solidfire", "link": "https://github.com/mjavier2k/solidfire-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetApp_Solidfire", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetFlow", "link": "https://github.com/paihu/netflow_exporter", "icon_filename": "netflow.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetFlow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netmeter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetMeter", "link": "https://github.com/ssbostan/netmeter-exporter", "icon_filename": "netmeter.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetMeter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_ontap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netapp ONTAP API", "link": "https://github.com/sapcc/netapp-api-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netapp_ONTAP_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netatmo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netatmo sensors", "link": "https://github.com/xperimental/netatmo-exporter", "icon_filename": "netatmo.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netatmo_sensors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-newrelic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "New Relic", "link": "https://github.com/jfindley/newrelic_exporter", "icon_filename": "newrelic.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-New_Relic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextdns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NextDNS", "link": "https://github.com/raylas/nextdns-exporter", "icon_filename": "nextdns.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NextDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextcloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nextcloud servers", "link": "https://github.com/xperimental/nextcloud-exporter", "icon_filename": "nextcloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nextcloud_servers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-obs_studio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OBS Studio", "link": "https://github.com/lukegb/obs_studio_exporter", "icon_filename": "obs-studio.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OBS_Studio", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-odbc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ODBC", "link": "https://github.com/MACHBASE/prometheus-odbc-exporter", "icon_filename": "odbc.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ODBC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-otrs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OTRS", "link": "https://github.com/JulianDroste/otrs_exporter", "icon_filename": "otrs.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OTRS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openhab", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenHAB", "link": "https://github.com/pdreker/openhab_exporter", "icon_filename": "openhab.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenHAB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenLDAP (community)", "link": "https://github.com/tomcz/openldap_exporter", "icon_filename": "openldap.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenLDAP_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRC", "link": "https://git.sr.ht/~tomleb/openrc-exporter", "icon_filename": "linux.png", "categories": ["data-collection.linux-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrct2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRCT2", "link": "https://github.com/terinjokes/openrct2-prometheus-exporter", "icon_filename": "openRCT2.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRCT2", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openroadm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenROADM devices", "link": "https://github.com/utdal/openroadm_exporter", "icon_filename": "openroadm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenROADM_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openstack", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenStack", "link": "https://github.com/CanonicalLtd/prometheus-openstack-exporter", "icon_filename": "openstack.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenStack", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenVAS", "link": "https://github.com/ModeClearCode/openvas_exporter", "icon_filename": "openVAS.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenVAS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openweathermap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenWeatherMap", "link": "https://github.com/Tenzer/openweathermap-exporter", "icon_filename": "openweather.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenWeatherMap", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvswitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Open vSwitch", "link": "https://github.com/digitalocean/openvswitch_exporter", "icon_filename": "ovs.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Open_vSwitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-oracledb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Oracle DB (community)", "link": "https://github.com/iamseth/oracledb_exporter", "icon_filename": "oracle.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["oracle", "database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Oracle_DB_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-patroni", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Patroni", "link": "https://github.com/gopaytech/patroni_exporter", "icon_filename": "patroni.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Patroni", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pws", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Personal Weather Station", "link": "https://github.com/JohnOrthoefer/pws-exporter", "icon_filename": "wunderground.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Personal_Weather_Station", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgpool2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pgpool-II", "link": "https://github.com/pgpool/pgpool2_exporter", "icon_filename": "pgpool2.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pgpool-II", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-philips_hue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Philips Hue", "link": "https://github.com/aexel90/hue_exporter", "icon_filename": "hue.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Philips_Hue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pimoroni_enviro_plus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pimoroni Enviro+", "link": "https://github.com/terradolor/prometheus-enviro-exporter", "icon_filename": "pimorino.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pimoroni_Enviro+", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pingdom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pingdom", "link": "https://github.com/veepee-oss/pingdom_exporter", "icon_filename": "solarwinds.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pingdom", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-podman", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Podman", "link": "https://github.com/containers/prometheus-podman-exporter", "icon_filename": "podman.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Podman", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-powerpal", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Powerpal devices", "link": "https://github.com/aashley/powerpal_exporter", "icon_filename": "powerpal.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Powerpal_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proftpd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ProFTPD", "link": "https://github.com/transnano/proftpd_exporter", "icon_filename": "proftpd.png", "categories": ["data-collection.ftp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ProFTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Prometheus endpoint", "link": "https://prometheus.io/", "icon_filename": "prometheus.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["prometheus", "openmetrics"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Prometheus_endpoint", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proxmox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Proxmox VE", "link": "https://github.com/prometheus-pve/prometheus-pve-exporter", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Proxmox_VE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radius", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RADIUS", "link": "https://github.com/devon-mar/radius-exporter", "icon_filename": "radius.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ripe_atlas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RIPE Atlas", "link": "https://github.com/czerwonk/atlas_exporter", "icon_filename": "ripe.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RIPE_Atlas", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radio_thermostat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Radio Thermostat", "link": "https://github.com/andrewlow/radio-thermostat-exporter", "icon_filename": "radiots.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Radio_Thermostat", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-rancher", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Rancher", "link": "https://github.com/infinityworksltd/prometheus-rancher-exporter", "icon_filename": "rancher.svg", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Rancher", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-raritan_pdu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Raritan PDU", "link": "https://github.com/psyinfra/prometheus-raritan-pdu-exporter", "icon_filename": "raritan.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Raritan_PDU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-redis_queue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Redis Queue", "link": "https://github.com/mdawar/rq-exporter", "icon_filename": "rq.png", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Redis_Queue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sabnzbd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SABnzbd", "link": "https://github.com/msroest/sabnzbd_exporter", "icon_filename": "sabnzbd.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SABnzbd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sma_inverter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SMA Inverters", "link": "https://github.com/dr0ps/sma_inverter_exporter", "icon_filename": "sma.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SMA_Inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sonic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SONiC NOS", "link": "https://github.com/kamelnetworks/sonic_exporter", "icon_filename": "sonic.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SONiC_NOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SQL Database agnostic", "link": "https://github.com/free/sql_exporter", "icon_filename": "sql.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SQL_Database_agnostic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSH", "link": "https://github.com/Nordstrom/ssh_exporter", "icon_filename": "ssh.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSL Certificate", "link": "https://github.com/ribbybibby/ssl_exporter", "icon_filename": "ssl.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSL_Certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-salicru_eqx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Salicru EQX inverter", "link": "https://github.com/alejandroscf/prometheus_salicru_exporter", "icon_filename": "salicru.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Salicru_EQX_inverter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sense_energy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sense Energy", "link": "https://github.com/ejsuncy/sense_energy_prometheus_exporter", "icon_filename": "sense.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sense_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sentry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sentry", "link": "https://github.com/snakecharmer/sentry_exporter", "icon_filename": "sentry.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sentry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-servertech", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ServerTech", "link": "https://github.com/tynany/servertech_exporter", "icon_filename": "servertech.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ServerTech", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shell_cmd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shell command", "link": "https://github.com/tomwilkie/prom-run", "icon_filename": "crunner.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shell_command", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shelly", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shelly humidity sensor", "link": "https://github.com/aexel90/shelly_exporter", "icon_filename": "shelly.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shelly_humidity_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sia", "link": "https://github.com/tbenz9/sia_exporter", "icon_filename": "sia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-s7_plc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Siemens S7 PLC", "link": "https://github.com/MarcusCalidus/s7-plc-exporter", "icon_filename": "siemens.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Siemens_S7_PLC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-site24x7", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Site 24x7", "link": "https://github.com/svenstaro/site24x7_exporter", "icon_filename": "site24x7.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Site_24x7", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-slurm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Slurm", "link": "https://github.com/vpenso/prometheus-slurm-exporter", "icon_filename": "slurm.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Slurm", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-smartrg808ac", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SmartRG 808AC Cable Modem", "link": "https://github.com/AdamIsrael/smartrg808ac_exporter", "icon_filename": "smartr.jpeg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SmartRG_808AC_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Smart meters SML", "link": "https://github.com/mweinelt/sml-exporter", "icon_filename": "sml.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Smart_meters_SML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-softether", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SoftEther VPN Server", "link": "https://github.com/dalance/softether_exporter", "icon_filename": "softether.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SoftEther_VPN_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solaredge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SolarEdge inverters", "link": "https://github.com/dave92082/SolarEdge-Exporter", "icon_filename": "solaredge.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SolarEdge_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lsx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solar logging stick", "link": "https://gitlab.com/bhavin192/lsx-exporter", "icon_filename": "solar.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solar_logging_stick", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solis Ginlong 5G inverters", "link": "https://github.com/candlerb/solis_exporter", "icon_filename": "solis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solis_Ginlong_5G_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-spacelift", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Spacelift", "link": "https://github.com/spacelift-io/prometheus-exporter", "icon_filename": "spacelift.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Spacelift", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-speedify", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Speedify CLI", "link": "https://github.com/willshen/speedify_exporter", "icon_filename": "speedify.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Speedify_CLI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sphinx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sphinx", "link": "https://github.com/foxdalas/sphinx_exporter", "icon_filename": "sphinx.png", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sphinx", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starlink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starlink (SpaceX)", "link": "https://github.com/danopstech/starlink_exporter", "icon_filename": "starlink.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starlink_(SpaceX)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starwind_vsan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starwind VSAN VSphere Edition", "link": "https://github.com/evoicefire/starwind-vsan-exporter", "icon_filename": "starwind.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starwind_VSAN_VSphere_Edition", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-statuspage", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "StatusPage", "link": "https://github.com/vladvasiliu/statuspage-exporter", "icon_filename": "statuspage.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-StatusPage", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-steam_a2s", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Steam", "link": "https://github.com/armsnyder/a2s-exporter", "icon_filename": "a2s.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Steam", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-storidge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Storidge", "link": "https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md", "icon_filename": "storidge.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Storidge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-stream_generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Stream", "link": "https://github.com/carlpett/stream_exporter", "icon_filename": "stream.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Stream", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sunspec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sunspec Solar Energy", "link": "https://github.com/inosion/prometheus-sunspec-exporter", "icon_filename": "sunspec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sunspec_Solar_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-suricata", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Suricata", "link": "https://github.com/corelight/suricata_exporter", "icon_filename": "suricata.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Suricata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-synology_activebackup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Synology ActiveBackup", "link": "https://github.com/codemonauts/activebackup-prometheus-exporter", "icon_filename": "synology.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Synology_ActiveBackup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sysload", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sysload", "link": "https://github.com/egmc/sysload_exporter", "icon_filename": "sysload.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sysload", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-trex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "T-Rex NVIDIA GPU Miner", "link": "https://github.com/dennisstritzke/trex_exporter", "icon_filename": "trex.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-T-Rex_NVIDIA_GPU_Miner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tacas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TACACS", "link": "https://github.com/devon-mar/tacacs-exporter", "icon_filename": "tacacs.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TACACS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tplink_p110", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TP-Link P110", "link": "https://github.com/ijohanne/prometheus-tplink-p110-exporter", "icon_filename": "tplink.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TP-Link_P110", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tado", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tado smart heating solution", "link": "https://github.com/eko/tado-exporter", "icon_filename": "tado.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tado_smart_heating_solution", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tankerkoenig", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tankerkoenig API", "link": "https://github.com/lukasmalkmus/tankerkoenig_exporter", "icon_filename": "tanker.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tankerkoenig_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_powerwall", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Powerwall", "link": "https://github.com/foogod/powerwall_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Powerwall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_wall_connector", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Wall Connector", "link": "https://github.com/benclapp/tesla_wall_connector_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Wall_Connector", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_vehicle", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla vehicle", "link": "https://github.com/wywywywy/tesla-prometheus-exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_vehicle", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-traceroute", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Traceroute", "link": "https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter", "icon_filename": "traceroute.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Traceroute", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twincat_ads_webservice", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TwinCAT ADS Web Service", "link": "https://github.com/MarcusCalidus/twincat-ads-webservice-exporter", "icon_filename": "twincat.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TwinCAT_ADS_Web_Service", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Twitch", "link": "https://github.com/damoun/twitch_exporter", "icon_filename": "twitch.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Twitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ubiquity_ufiber", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Ubiquiti UFiber OLT", "link": "https://github.com/swoga/ufiber-exporter", "icon_filename": "ubiquiti.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Ubiquiti_UFiber_OLT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-uptimerobot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Uptimerobot", "link": "https://github.com/wosc/prometheus-uptimerobot", "icon_filename": "uptimerobot.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Uptimerobot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vscode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "VSCode", "link": "https://github.com/guicaulada/vscode-exporter", "icon_filename": "vscode.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-VSCode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vault_pki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vault PKI", "link": "https://github.com/aarnaud/vault-pki-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vault_PKI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vertica", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vertica", "link": "https://github.com/vertica/vertica-prometheus-exporter", "icon_filename": "vertica.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vertica", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-warp10", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Warp10", "link": "https://github.com/centreon/warp10-sensision-exporter", "icon_filename": "warp10.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Warp10", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xmpp_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "XMPP Server", "link": "https://github.com/horazont/xmpp-blackbox-exporter", "icon_filename": "xmpp.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-XMPP_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xiaomi_mi_flora", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Xiaomi Mi Flora", "link": "https://github.com/xperimental/flowercare-exporter", "icon_filename": "xiaomi.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Xiaomi_Mi_Flora", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-yourls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "YOURLS URL Shortener", "link": "https://github.com/just1not2/prometheus-exporter-yourls", "icon_filename": "yourls.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-YOURLS_URL_Shortener", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zerto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zerto", "link": "https://github.com/claranet/zerto-exporter", "icon_filename": "zerto.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zerto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zulip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zulip", "link": "https://github.com/brokenpip3/zulip-exporter", "icon_filename": "zulip.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zulip", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zyxel_gs1200", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zyxel GS1200-8", "link": "https://github.com/robinelfrink/gs1200-exporter", "icon_filename": "zyxel.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zyxel_GS1200-8", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bpftrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "bpftrace variables", "link": "https://github.com/andreasgerstmayr/bpftrace_exporter", "icon_filename": "bpftrace.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-bpftrace_variables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cadvisor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "cAdvisor", "link": "https://github.com/google/cadvisor", "icon_filename": "cadvisor.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-cAdvisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-etcd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "etcd", "link": "https://etcd.io/", "icon_filename": "etcd.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-etcd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gpsd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "gpsd", "link": "https://github.com/natesales/gpsd-exporter", "icon_filename": "gpsd.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-gpsd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iqair", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "iqAir AirVisual air quality monitors", "link": "https://github.com/Packetslave/iqair_exporter", "icon_filename": "iqair.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-iqAir_AirVisual_air_quality_monitors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jolokia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "jolokia", "link": "https://github.com/aklinkert/jolokia_exporter", "icon_filename": "jolokia.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-jolokia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-journald", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "journald", "link": "https://github.com/dead-claudia/journald-exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-journald", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-loki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "loki", "link": "https://github.com/grafana/loki", "icon_filename": "loki.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-loki", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mosquitto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mosquitto", "link": "https://github.com/sapcc/mosquitto-exporter", "icon_filename": "mosquitto.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mosquitto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mtail", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mtail", "link": "https://github.com/google/mtail", "icon_filename": "mtail.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mtail", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nftables", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "nftables", "link": "https://github.com/Sheridan/nftables_exporter", "icon_filename": "nftables.png", "categories": ["data-collection.linux-systems.firewall-metrics"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-nftables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgbackrest", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "pgBackRest", "link": "https://github.com/woblerr/pgbackrest_exporter", "icon_filename": "pgbackrest.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-pgBackRest", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-strongswan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "strongSwan", "link": "https://github.com/jlti-dev/ipsec_exporter", "icon_filename": "strongswan.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-strongSwan", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-proxysql", "plugin_name": "go.d.plugin", "module_name": "proxysql", "monitored_instance": {"name": "ProxySQL", "link": "https://www.proxysql.com/", "icon_filename": "proxysql.png", "categories": ["data-collection.database-servers"]}, "keywords": ["proxysql", "databases", "sql"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| my.cnf | Specifies my.cnf file to read connection parameters from under the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-proxysql-ProxySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pulsar", "plugin_name": "go.d.plugin", "module_name": "pulsar", "monitored_instance": {"name": "Apache Pulsar", "link": "https://pulsar.apache.org/", "icon_filename": "pulsar.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["pulsar"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", "integration_type": "collector", "id": "go.d.plugin-pulsar-Apache_Pulsar", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-rabbitmq", "plugin_name": "go.d.plugin", "module_name": "rabbitmq", "monitored_instance": {"name": "RabbitMQ", "link": "https://www.rabbitmq.com/", "icon_filename": "rabbitmq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["rabbitmq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-rabbitmq-RabbitMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-redis", "plugin_name": "go.d.plugin", "module_name": "redis", "monitored_instance": {"name": "Redis", "link": "https://redis.com/", "categories": ["data-collection.database-servers"], "icon_filename": "redis.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["redis", "databases"], "most_popular": true}, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-redis-Redis", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/redis/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-scaleio", "plugin_name": "go.d.plugin", "module_name": "scaleio", "monitored_instance": {"name": "Dell EMC ScaleIO", "link": "https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["scaleio"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", "integration_type": "collector", "id": "go.d.plugin-scaleio-Dell_EMC_ScaleIO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-snmp", "plugin_name": "go.d.plugin", "module_name": "snmp", "monitored_instance": {"name": "SNMP devices", "link": "", "icon_filename": "snmp.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["snmp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\nIt supports:\n\n- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3.\n- any number of SNMP devices.\n- each SNMP device can be used to collect data for any number of charts.\n- each chart may have any number of dimensions.\n- each SNMP device may have a different update frequency.\n- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).\n\nKeep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second.\n`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode.\n\nAlso, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped.\nThis is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Find OIDs\n\nUse `snmpwalk`, like this:\n\n```sh\nsnmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1\n```\n\n- `-t 20` is the timeout in seconds.\n- `-O fn` will display full OIDs in numeric format.\n- `-v 2c` is the SNMP version.\n- `-c public` is the SNMP community.\n- `192.0.2.1` is the SNMP device.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | 127.0.0.1 | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 10 | no |\n| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n{% /details %}\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`.\n\n> **SNMPv1**: just set `options.version` to 1.\n> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\nThe rest of the configuration is the same as in the SNMPv1/2 example.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n##### Multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThe metrics that will be collected are defined in the configuration file.\n", "integration_type": "collector", "id": "go.d.plugin-snmp-SNMP_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-solr", "plugin_name": "go.d.plugin", "module_name": "solr", "monitored_instance": {"name": "Solr", "link": "https://lucene.apache.org/solr/", "icon_filename": "solr.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["solr"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Solr\n\nPlugin: go.d.plugin\nModule: solr\n\n## Overview\n\nThis collector monitors Solr instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Solr version 6.4+\n\nThis collector does not work with Solr versions lower 6.4.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/solr.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/solr.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8983 | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal Solr instance with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n - name: remote\n url: http://203.0.113.10:8983\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m solr\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Solr instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| solr.search_requests | search | requests/s |\n| solr.search_errors | errors | errors/s |\n| solr.search_errors_by_type | client, server, timeouts | errors/s |\n| solr.search_requests_processing_time | time | milliseconds |\n| solr.search_requests_timings | min, median, mean, max | milliseconds |\n| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n| solr.update_requests | search | requests/s |\n| solr.update_errors | errors | errors/s |\n| solr.update_errors_by_type | client, server, timeouts | errors/s |\n| solr.update_requests_processing_time | time | milliseconds |\n| solr.update_requests_timings | min, median, mean, max | milliseconds |\n| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-solr-Solr", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/solr/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-springboot2", "plugin_name": "go.d.plugin", "module_name": "springboot2", "monitored_instance": {"name": "Java Spring-boot 2 applications", "link": "", "icon_filename": "springboot.png", "categories": ["data-collection.apm"]}, "keywords": ["springboot"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Java Spring-boot 2 applications\n\nPlugin: go.d.plugin\nModule: springboot2\n\n## Overview\n\nThis collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects applications running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Spring Boot Actuator\n\nThe Spring Boot Actuator exposes metrics over HTTP, to use it:\n\n- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.\n- set `management.endpoints.web.exposure.include=*` in your `application.properties`.\n\nRefer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. \u2018How-to\u2019 guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/springboot2.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/springboot2.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/actuator/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n - name: remote\n url: http://192.0.2.1:8080/actuator/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m springboot2\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Java Spring-boot 2 applications instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| springboot2.thread | daemon, total | threads |\n| springboot2.heap | free, eden, survivor, old | B |\n| springboot2.heap_eden | used, commited | B |\n| springboot2.heap_survivor | used, commited | B |\n| springboot2.heap_old | used, commited | B |\n| springboot2.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-springboot2-Java_Spring-boot_2_applications", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-squidlog", "plugin_name": "go.d.plugin", "module_name": "squidlog", "monitored_instance": {"name": "Squid log files", "link": "https://www.lighttpd.net/", "icon_filename": "squid.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["squid", "logs"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/% **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-squidlog-Squid_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-supervisord", "plugin_name": "go.d.plugin", "module_name": "supervisord", "monitored_instance": {"name": "Supervisor", "link": "http://supervisord.org/", "icon_filename": "supervisord.png", "categories": ["data-collection.processes-and-system-services"]}, "keywords": ["supervisor"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n##### Socket\n\nCollect metrics via Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-supervisord-Supervisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-systemdunits", "plugin_name": "go.d.plugin", "module_name": "systemdunits", "monitored_instance": {"name": "Systemd Units", "link": "https://www.freedesktop.org/wiki/Software/systemd/", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"]}, "keywords": ["systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors Systemd units state.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| include | Systemd units filter. | *.service | no |\n| timeout | System bus requests timeout. | 1 | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n{% /details %}\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n{% /details %}\n##### One specific unit\n\nCollect state of one specific unit.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n{% /details %}\n##### All unit types\n\nCollect state of all units.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-systemdunits-Systemd_Units", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-tengine", "plugin_name": "go.d.plugin", "module_name": "tengine", "monitored_instance": {"name": "Tengine", "link": "https://tengine.taobao.org/", "icon_filename": "tengine.jpeg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["tengine", "web", "webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-tengine-Tengine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-traefik", "plugin_name": "go.d.plugin", "module_name": "traefik", "monitored_instance": {"name": "Traefik", "link": "Traefik", "icon_filename": "traefik.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["traefik", "proxy", "webproxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", "integration_type": "collector", "id": "go.d.plugin-traefik-Traefik", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-unbound", "plugin_name": "go.d.plugin", "module_name": "unbound", "monitored_instance": {"name": "Unbound", "link": "https://nlnetlabs.nl/projects/unbound/about/", "icon_filename": "unbound.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["unbound", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n{% /details %}\n##### Unix socket\n\nConnecting through Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", "integration_type": "collector", "id": "go.d.plugin-unbound-Unbound", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-upsd", "plugin_name": "go.d.plugin", "module_name": "upsd", "monitored_instance": {"name": "UPS (NUT)", "link": "", "icon_filename": "plug-circle-bolt.svg", "categories": ["data-collection.ups"]}, "keywords": ["ups", "nut"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", "integration_type": "collector", "id": "go.d.plugin-upsd-UPS_(NUT)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vcsa", "plugin_name": "go.d.plugin", "module_name": "vcsa", "monitored_instance": {"name": "vCenter Server Appliance", "link": "https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-vcsa-vCenter_Server_Appliance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vernemq", "plugin_name": "go.d.plugin", "module_name": "vernemq", "monitored_instance": {"name": "VerneMQ", "link": "https://vernemq.com", "icon_filename": "vernemq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["vernemq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vsphere", "plugin_name": "go.d.plugin", "module_name": "vsphere", "monitored_instance": {"name": "VMware vCenter Server", "link": "https://www.vmware.com/products/vcenter-server.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware", "esxi", "vcenter"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vsphere-VMware_vCenter_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-web_log", "plugin_name": "go.d.plugin", "module_name": "web_log", "monitored_instance": {"name": "Web server log files", "link": "", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "webservers.svg"}, "keywords": ["webserver", "apache", "httpd", "nginx", "lighttpd", "logs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). | | yes |\n| parser | Log parser configuration. | | no |\n| parser.log_type | Log parser type. | auto | no |\n| parser.csv_config | CSV log parser config. | | no |\n| parser.csv_config.delimiter | CSV field delimiter. | , | no |\n| parser.csv_config.format | CSV log format. | | no |\n| parser.ltsv_config | LTSV log parser config. | | no |\n| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| parser.json_config | JSON log parser config. | | no |\n| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| parser.regexp_config | RegExp log parser config. | | no |\n| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### parser.log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nparser:\n log_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### parser.csv_config.format\n\n\n\n##### parser.ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: json\n json_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-web_log-Web_server_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-whoisquery", "plugin_name": "go.d.plugin", "module_name": "whoisquery", "monitored_instance": {"name": "Domain expiration date", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["whois"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-whoisquery-Domain_expiration_date", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-ad", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Active Directory", "link": "https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "active directory", "ad", "adcs", "adfs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-hyperv", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "HyperV", "link": "https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "hyperv", "virtualization", "vm"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-msexchange", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS Exchange", "link": "https://www.microsoft.com/en-us/microsoft-365/exchange/email", "icon_filename": "exchange.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mail"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-mssql", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS SQL Server", "link": "https://www.microsoft.com/en-us/sql-server/", "icon_filename": "mssql.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mssql", "database", "db"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-dotnet", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "NET Framework", "link": "https://dotnet.microsoft.com/en-us/download/dotnet-framework", "icon_filename": "dotnet.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "dotnet"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["data-collection.windows-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows", "microsoft"], "most_popular": true, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-wireguard", "plugin_name": "go.d.plugin", "module_name": "wireguard", "monitored_instance": {"name": "WireGuard", "link": "https://www.wireguard.com/", "categories": ["data-collection.vpns"], "icon_filename": "wireguard.svg"}, "keywords": ["wireguard", "vpn", "security"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-wireguard-WireGuard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-x509check", "plugin_name": "go.d.plugin", "module_name": "x509check", "monitored_instance": {"name": "X.509 certificate", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "lock.svg"}, "keywords": ["x509", "certificate"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n{% /details %}\n##### Local file certificate\n\nLocal file certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n{% /details %}\n##### SMTP certificate\n\nSMTP certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | revoked | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-x509check-X.509_certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-zookeeper", "plugin_name": "go.d.plugin", "module_name": "zookeeper", "monitored_instance": {"name": "ZooKeeper", "link": "https://zookeeper.apache.org/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "zookeeper.svg"}, "keywords": ["zookeeper"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nLocal server.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n{% /details %}\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-zookeeper-ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "idlejitter.plugin", "module_name": "idlejitter.plugin", "monitored_instance": {"name": "Idle OS Jitter", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["latency", "jitter"], "most_popular": false}, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", "integration_type": "collector", "id": "idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ioping.plugin", "module_name": "ioping.plugin", "monitored_instance": {"name": "IOPing", "link": "https://github.com/koct9i/ioping", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details summary=\"Config\" %}\n```yaml\ndestination=\"/dev/sda\"\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "ioping.plugin-ioping.plugin-IOPing", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "macos.plugin", "module_name": "mach_smi", "monitored_instance": {"name": "macOS", "link": "https://www.apple.com/macos", "categories": ["data-collection.macos-systems"], "icon_filename": "macos.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["macos", "apple", "darwin"], "most_popular": false}, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n{% /details %}\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "macos.plugin-mach_smi-macOS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "nfacct.plugin", "module_name": "nfacct.plugin", "monitored_instance": {"name": "Netfilter", "link": "https://www.netfilter.org/", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "netfilter.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", "integration_type": "collector", "id": "nfacct.plugin-nfacct.plugin-Netfilter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "perf.plugin", "module_name": "perf.plugin", "monitored_instance": {"name": "CPU performance", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux", "cpu performance", "cpu cache", "perf.plugin"], "most_popular": false}, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptior to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the pref plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n{% /details %}\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", "integration_type": "collector", "id": "perf.plugin-perf.plugin-CPU_performance", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/diskstats", "monitored_instance": {"name": "Disk Statistics", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "disks", "io", "bcache", "block devices"], "most_popular": false}, "overview": "# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/diskstats-Disk_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/interrupts", "monitored_instance": {"name": "Interrupts", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["interrupts"], "most_popular": false}, "overview": "# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n understand what your system is doing. It can provide insights into the system's interaction with hardware,\n drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/interrupts-Interrupts", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/loadavg", "monitored_instance": {"name": "System Load Average", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["load", "load average"], "most_popular": false}, "overview": "# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/loadavg-System_Load_Average", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/mdstat", "monitored_instance": {"name": "MD RAID", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["raid", "mdadm", "mdstat", "raid"], "most_popular": false}, "overview": "# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/mdstat-MD_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/meminfo", "monitored_instance": {"name": "Memory Usage", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory", "ram", "available", "committed"], "most_popular": false}, "overview": "# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/meminfo-Memory_Usage", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/dev", "monitored_instance": {"name": "Network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["network interfaces"], "most_popular": false}, "overview": "# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/dev-Network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/ip_vs_stats", "monitored_instance": {"name": "IP Virtual Server", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip virtual server"], "most_popular": false}, "overview": "# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/netstat", "monitored_instance": {"name": "Network statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip", "udp", "udplite", "icmp", "netstat", "snmp"], "most_popular": false}, "overview": "# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipc4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/netstat-Network_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfs", "monitored_instance": {"name": "NFS Client", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs client", "filesystem"], "most_popular": false}, "overview": "# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfs-NFS_Client", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfsd", "monitored_instance": {"name": "NFS Server", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs server", "filesystem"], "most_popular": false}, "overview": "# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfsd-NFS_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sctp/snmp", "monitored_instance": {"name": "SCTP Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sctp", "stream control transmission protocol"], "most_popular": false}, "overview": "# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat", "monitored_instance": {"name": "Socket statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sockets"], "most_popular": false}, "overview": "# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat-Socket_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat6", "monitored_instance": {"name": "IPv6 Socket Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipv6 sockets"], "most_popular": false}, "overview": "# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/softnet_stat", "monitored_instance": {"name": "Softnet Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softnet"], "most_popular": false}, "overview": "# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/softnet_stat-Softnet_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/nf_conntrack", "monitored_instance": {"name": "Conntrack", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["connection tracking mechanism", "netfilter", "conntrack"], "most_popular": false}, "overview": "# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/nf_conntrack-Conntrack", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/synproxy", "monitored_instance": {"name": "Synproxy", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["synproxy"], "most_popular": false}, "overview": "# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/synproxy-Synproxy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/wireless", "monitored_instance": {"name": "Wireless network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["wireless devices"], "most_popular": false}, "overview": "# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/wireless-Wireless_network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pagetypeinfo", "monitored_instance": {"name": "Page types", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory page types"], "most_popular": false}, "overview": "# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pagetypeinfo-Page_types", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pressure", "monitored_instance": {"name": "Pressure Stall Information", "link": "", "categories": ["data-collection.linux-systems.pressure-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pressure"], "most_popular": false}, "overview": "# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pressure-Pressure_Stall_Information", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/softirqs", "monitored_instance": {"name": "SoftIRQ statistics", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softirqs", "interrupts"], "most_popular": false}, "overview": "# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/softirqs-SoftIRQ_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs", "monitored_instance": {"name": "ZFS Pools", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs pools", "pools", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Pools\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs\n\n## Overview\n\nThis integration provides metrics about the state of ZFS pools.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs-ZFS_Pools", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs/arcstats", "monitored_instance": {"name": "ZFS Adaptive Replacement Cache", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs arc", "arc", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/stat", "monitored_instance": {"name": "System statistics", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["cpu utilization", "process counts"], "most_popular": false}, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/stat-System_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/sys/kernel/random/entropy_avail", "monitored_instance": {"name": "Entropy", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["entropy"], "most_popular": false}, "overview": "# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/uptime", "monitored_instance": {"name": "System Uptime", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["uptime"], "most_popular": false}, "overview": "# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/uptime-System_Uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/vmstat", "monitored_instance": {"name": "Memory Statistics", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "page faults", "oom", "numa"], "most_popular": false}, "overview": "# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/vmstat-Memory_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/block/zram", "monitored_instance": {"name": "ZRAM", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zram"], "most_popular": false}, "overview": "# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/block/zram-ZRAM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/drm", "monitored_instance": {"name": "AMD GPU", "link": "https://www.amd.com", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "amd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["amd", "gpu", "hardware"], "most_popular": false}, "overview": "# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/drm-AMD_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/infiniband", "monitored_instance": {"name": "InfiniBand", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["infiniband", "rdma"], "most_popular": false}, "overview": "# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/infiniband-InfiniBand", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/power_supply", "monitored_instance": {"name": "Power Supply", "link": "", "categories": ["data-collection.linux-systems.power-supply-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["psu", "power supply"], "most_popular": false}, "overview": "# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/power_supply-Power_Supply", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/edac/mc", "monitored_instance": {"name": "Memory modules (DIMMs)", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["edac", "ecc", "dimm", "ram", "hardware"], "most_popular": false}, "overview": "# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n - errors related to a DIMM\n - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n - memory controllers that can identify the physical DIMMS and report errors directly for them,\n - memory controllers that report errors for memory address ranges that can be linked to dimms.\n In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/node", "monitored_instance": {"name": "Non-Uniform Memory Access", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["numa"], "most_popular": false}, "overview": "# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/fs/btrfs", "monitored_instance": {"name": "BTRFS", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.btrfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["btrfs", "filesystem"], "most_popular": false}, "overview": "# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/fs/btrfs-BTRFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/kernel/mm/ksm", "monitored_instance": {"name": "Kernel Same-Page Merging", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ksm", "samepage", "merging"], "most_popular": false}, "overview": "# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "ipc", "monitored_instance": {"name": "Inter Process Communication", "link": "", "categories": ["data-collection.linux-systems.ipc-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipc", "semaphores", "shared memory"], "most_popular": false}, "overview": "# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n processes are trying to access a single shared resource, semaphores can ensure that only one process\n accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-ipc-Inter_Process_Communication", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "adaptec_raid", "monitored_instance": {"name": "AdaptecRAID", "link": "https://www.microchip.com/en-us/products/storage", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "adaptec.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# AdaptecRAID\n\nPlugin: python.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nThis collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.\n\n\nIt uses the arcconf command line utility (from adaptec) to monitor your raid controller.\n\nExecuted commands:\n - `sudo -n arcconf GETCONFIG 1 LD`\n - `sudo -n arcconf GETCONFIG 1 PD`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run arcconf as sudoer\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich arcconf shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/arcconf\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/adaptec_raid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: my_job_name \n update_every: 1 # the JOB's data collection frequency\n priority: 60000 # the JOB's order on the dashboard\n penalty: yes # the JOB's penalty\n autodetection_retry: 0 # the JOB's re-check interval in seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin adaptec_raid debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |\n| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AdaptecRAID instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptec_raid.ld_status | a dimension per logical device | bool |\n| adaptec_raid.pd_state | a dimension per physical device | bool |\n| adaptec_raid.smart_warnings | a dimension per physical device | count |\n| adaptec_raid.temperature | a dimension per physical device | celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-adaptec_raid-AdaptecRAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/adaptec_raid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "alarms", "monitored_instance": {"name": "Netdata Agent alarms", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["alarms", "netdata"], "most_popular": false}, "overview": "# Netdata Agent alarms\n\nPlugin: python.d.plugin\nModule: alarms\n\n## Overview\n\nThis collector creates an 'Alarms' menu with one line plot of `alarms.status`.\n\n\nAlarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/alarms.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/alarms.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |\n| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {\"CLEAR\": 0, \"WARNING\": 1, \"CRITICAL\": 2} | yes |\n| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |\n| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |\n| alarm_contains_words | A \",\" separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with \"cpu\" or \"load\" in alarm name. Default includes all. | | yes |\n| alarm_excludes_words | A \",\" separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with \"cpu\" or \"load\" in alarm name. Default excludes None. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n\n```\n##### Advanced\n\nAn advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.\n\"ML\" job will collect status and values for all alarms with \"ml_\" in the name. Default job will collect status for all other alarms.\n\n\n{% details summary=\"Config\" %}\n```yaml\nML:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: true\n alarm_status_chart_type: 'stacked'\n alarm_contains_words: 'ml_'\n\nDefault:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: false\n alarm_status_chart_type: 'stacked'\n alarm_excludes_words: 'ml_'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin alarms debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netdata Agent alarms instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |\n| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |\n\n", "integration_type": "collector", "id": "python.d.plugin-alarms-Netdata_Agent_alarms", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "am2320", "monitored_instance": {"name": "AM2320", "link": "https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "am2320", "sensor", "humidity"], "most_popular": false}, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", "integration_type": "collector", "id": "python.d.plugin-am2320-AM2320", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "beanstalk", "monitored_instance": {"name": "Beanstalk", "link": "https://beanstalkd.github.io/", "categories": ["data-collection.message-brokers"], "icon_filename": "beanstalk.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["beanstalk", "beanstalkd", "message"], "most_popular": false}, "overview": "# Beanstalk\n\nPlugin: python.d.plugin\nModule: beanstalk\n\n## Overview\n\nMonitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.\n\nThe collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### beanstalkc python module\n\nThe collector requires the `beanstalkc` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/beanstalk.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |\n| port | Port to the IP or URL to a beanstalk service. | 11300 | no |\n\n{% /details %}\n#### Examples\n\n##### Remote beanstalk server\n\nA basic remote beanstalk server\n\n```yaml\nremote:\n name: 'beanstalk'\n host: '1.2.3.4'\n port: 11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local_beanstalk'\n host: '127.0.0.1'\n port: 11300\n\nremote_job:\n name: 'remote_beanstalk'\n host: '192.0.2.1'\n port: 113000\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin beanstalk debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.cpu_usage | user, system | cpu time |\n| beanstalk.jobs_rate | total, timeouts | jobs/s |\n| beanstalk.connections_rate | connections | connections/s |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.connections_rate | tubes | tubes |\n| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.current_connections | written, producers, workers, waiting | connections |\n| beanstalk.binlog | written, migrated | records/s |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.jobs_rate | jobs | jobs/s |\n| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.connections | using, waiting, watching | connections |\n| beanstalk.commands | deletes, pauses | commands/s |\n| beanstalk.pause | since, left | seconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-beanstalk-Beanstalk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "bind_rndc", "monitored_instance": {"name": "ISC Bind (RNDC)", "link": "https://www.isc.org/bind/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dns", "bind", "server"], "most_popular": false}, "overview": "# ISC Bind (RNDC)\n\nPlugin: python.d.plugin\nModule: bind_rndc\n\n## Overview\n\nMonitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.\n\nThis collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum bind version and permissions\n\nVersion of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`\n\n#### Setup log rotate for bind stats\n\nBIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.\nIt is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.\n\nTo set up BIND to dump stats do the following:\n\n1. Add to 'named.conf.options' options {}:\n`statistics-file \"/var/log/bind/named.stats\";`\n\n2. Create bind/ directory in /var/log:\n`cd /var/log/ && mkdir bind`\n\n3. Change owner of directory to 'bind' user:\n`chown bind bind/`\n\n4. RELOAD (NOT restart) BIND:\n`systemctl reload bind9.service`\n\n5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)\n\nTo allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:\n`chown :netdata rndc.key`\n\nLast, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:\n```\n/var/log/bind/named.stats {\n\n daily\n rotate 4\n compress\n delaycompress\n create 0644 bind bind\n missingok\n postrotate\n rndc reload > /dev/null\n endscript\n}\n```\nTo test your logrotate conf file run as root:\n`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/bind_rndc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/bind_rndc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |\n\n{% /details %}\n#### Examples\n\n##### Local bind stats\n\nDefine a local path to bind stats file\n\n```yaml\nlocal:\n named_stats_path: '/var/log/bind/named.stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin bind_rndc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC Bind (RNDC) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |\n| bind_rndc.incoming_queries | a dimension per incoming query type | queries |\n| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |\n| bind_rndc.stats_size | stats_size | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-bind_rndc-ISC_Bind_(RNDC)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/bind_rndc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "boinc", "monitored_instance": {"name": "BOINC", "link": "https://boinc.berkeley.edu/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["boinc", "distributed"], "most_popular": false}, "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n{% /details %}\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", "integration_type": "collector", "id": "python.d.plugin-boinc-BOINC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ceph", "monitored_instance": {"name": "Ceph", "link": "https://ceph.io/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ceph.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ceph", "storage"], "most_popular": false}, "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n{% /details %}\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-ceph-Ceph", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "changefinder", "monitored_instance": {"name": "python.d changefinder", "link": "", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["change detection", "anomaly detection", "machine learning", "ml"], "most_popular": false}, "overview": "# python.d changefinder\n\nPlugin: python.d.plugin\nModule: changefinder\n\n## Overview\n\nThis collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to\nperform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)\non your Netdata charts and/or dimensions.\n\n\nInstead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).\n### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's\n typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly\n this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw\n score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have\n already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then\n should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning\n approaches which need some initial window of time before they can be useful.\n- As this collector does most of the work in Python itself, you may want to try it out first on a test or development\n system to get a sense of its performance characteristics on a node similar to where you would like to use it.\n- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the\n typical performance characteristics we saw from running this collector (with defaults) were:\n - A runtime (`netdata.runtime_changefinder`) of ~30ms.\n - Typically ~1% additional cpu usage.\n - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default this collector will work over all `system.*` charts.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the packages below be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages for the netdata user\npip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4\n```\n\n**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section\nof your `netdata.conf` file.\n\n```yaml\n[ plugin:python.d ]\n # update every = 1\n command options = -ppython3\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/changefinder.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/changefinder.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |\n| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |\n| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |\n| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |\n| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |\n| cf_threshold | the percentile above which scores will be flagged. | 99 | no |\n| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |\n| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: ''\n mode: 'per_chart'\n cf_r: 0.5\n cf_order: 1\n cf_smooth: 15\n cf_threshold: 99\n n_score_samples: 14400\n show_scores: false\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin changefinder debug trace\n ```\n\n### Debug Mode\n\n\n\n### Log Messages\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d changefinder instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| changefinder.scores | a dimension per chart | score |\n| changefinder.flags | a dimension per chart | flag |\n\n", "integration_type": "collector", "id": "python.d.plugin-changefinder-python.d_changefinder", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/changefinder/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "dovecot", "monitored_instance": {"name": "Dovecot", "link": "https://www.dovecot.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "dovecot.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dovecot", "imap", "mail"], "most_popular": false}, "overview": "# Dovecot\n\nPlugin: python.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\nIt uses the dovecot socket and executes the `EXPORT global` command to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Dovecot configuration\n\nThe Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/dovecot.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |\n| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |\n| port | Used in combination with host, configures the port devcot listens to. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration.\n\n{% details summary=\"Config\" %}\n```yaml\nlocaltcpip:\n name: 'local'\n host: '127.0.0.1'\n port: 24242\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details summary=\"Config\" %}\n```yaml\nlocalsocket:\n name: 'local'\n socket: '/var/run/dovecot/stats'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin dovecot debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.sessions | active sessions | number |\n| dovecot.logins | logins | number |\n| dovecot.commands | commands | commands |\n| dovecot.faults | minor, major | faults |\n| dovecot.context_switches | voluntary, involuntary | switches |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | number/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth | ok, failed | attempts |\n| dovecot.auth_cache | hit, miss | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-dovecot-Dovecot", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/dovecot/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "example", "monitored_instance": {"name": "Example collector", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["example", "netdata", "python"], "most_popular": false}, "overview": "# Example collector\n\nPlugin: python.d.plugin\nModule: example\n\n## Overview\n\nExample collector that generates some random numbers as metrics.\n\nIf you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.\n\n\nThe `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/example.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/example.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| num_lines | The number of lines to create. | 4 | no |\n| lower | The lower bound of numbers to randomly sample from. | 0 | no |\n| upper | The upper bound of numbers to randomly sample from. | 100 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nfour_lines:\n name: \"Four Lines\"\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n num_lines: 4\n lower: 0\n upper: 100\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin example debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Example collector instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| example.random | random | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-example-Example_collector", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "exim", "monitored_instance": {"name": "Exim", "link": "https://www.exim.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "exim.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["exim", "mail", "server"], "most_popular": false}, "overview": "# Exim\n\nPlugin: python.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue.\n\nIt uses the `exim` command line binary to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Exim configuration - local installation\n\nThe module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.\n\n1. Edit the `exim` configuration with your preferred editor and add:\n`queue_list_requires_admin = false`\n2. Restart `exim` and Netdata\n\n\n#### Exim configuration - WHM (CPanel) server\n\nOn a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.\n\n1. Login to WHM\n2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor\n3. Scroll down to the button **Add additional configuration setting** and click on it.\n4. In the new dropdown which will appear above we need to find and choose:\n`queue_list_requires_admin` and set to `false`\n5. Scroll to the end and click the **Save** button.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/exim.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | Path and command to the `exim` binary | exim -bpc | no |\n\n{% /details %}\n#### Examples\n\n##### Local exim install\n\nA basic local exim install\n\n```yaml\nlocal:\n command: 'exim -bpc'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin exim debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", "integration_type": "collector", "id": "python.d.plugin-exim-Exim", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/exim/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "fail2ban", "monitored_instance": {"name": "Fail2ban", "link": "https://www.fail2ban.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "fail2ban.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["fail2ban", "security", "authentication", "authorization"], "most_popular": false}, "overview": "# Fail2ban\n\nPlugin: python.d.plugin\nModule: fail2ban\n\n## Overview\n\nMonitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.\n\n\nIt collects metrics through reading the default log and configuration files of fail2ban.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `fail2ban.log` file must be readable by the user `netdata`.\n - change the file ownership and access permissions.\n - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.\n\nTo change the file ownership and access permissions, execute the following:\n\n```shell\nsudo chown root:netdata /var/log/fail2ban.log\nsudo chmod 640 /var/log/fail2ban.log\n```\n\nTo persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:\n\n```shell\n/var/log/fail2ban.log {\n\n weekly\n rotate 4\n compress\n\n delaycompress\n missingok\n postrotate\n fail2ban-client flushlogs 1>/dev/null\n endscript\n\n # If fail2ban runs as non-root it still needs to have write access\n # to logfiles.\n # create 640 fail2ban adm\n create 640 root netdata\n}\n```\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.\nIf conf file is not found default jail is ssh.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/fail2ban.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |\n| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |\n| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |\n| exclude | jails you want to exclude from autodetection. | | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocal:\n log_path: '/var/log/fail2ban.log'\n conf_path: '/etc/fail2ban/jail.local'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin fail2ban debug trace\n ```\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fail2ban instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.failed_attempts | a dimension per jail | attempts/s |\n| fail2ban.bans | a dimension per jail | bans/s |\n| fail2ban.banned_ips | a dimension per jail | ips |\n\n", "integration_type": "collector", "id": "python.d.plugin-fail2ban-Fail2ban", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/fail2ban/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "gearman", "monitored_instance": {"name": "Gearman", "link": "http://gearman.org/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "gearman.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["gearman", "gearman job server"], "most_popular": false}, "overview": "# Gearman\n\nPlugin: python.d.plugin\nModule: gearman\n\n## Overview\n\nMonitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.\n\nThis collector connects to a Gearman instance via either TCP or unix socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Socket permissions\n\nThe gearman UNIX socket should have read permission for user netdata.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/gearman.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | URL or IP where gearman is running. | localhost | no |\n| port | Port of URL or IP where gearman is running. | 4730 | no |\n| tls | Use tls to connect to gearman. | false | no |\n| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |\n| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local gearman service\n\nA basic host and port gearman configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\nremote:\n name: 'remote'\n host: '192.0.2.1'\n port: 4730\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin gearman debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.total_jobs | Pending, Running | Jobs |\n\n### Per gearman job\n\nMetrics related to Gearman jobs. Each job produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.single_job | Pending, Idle, Runnning | Jobs |\n\n", "integration_type": "collector", "id": "python.d.plugin-gearman-Gearman", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/gearman/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "go_expvar", "monitored_instance": {"name": "Go applications (EXPVAR)", "link": "https://pkg.go.dev/expvar", "categories": ["data-collection.apm"], "icon_filename": "go.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["go", "expvar", "application"], "most_popular": false}, "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n{% /details %}\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hddtemp", "monitored_instance": {"name": "HDD temperature", "link": "https://linux.die.net/man/8/hddtemp", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hardware", "hdd temperature", "disk temperature", "temperature"], "most_popular": false}, "overview": "# HDD temperature\n\nPlugin: python.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt uses the `hddtemp` daemon to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Run `hddtemp` in daemon mode\n\nYou can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.\n\nSo running `hddtemp -d` would run the daemon, by default on port 7634.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hddtemp.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\nBy default this collector will try to autodetect disks (autodetection works only for disk which names start with \"sd\"). However this can be overridden by setting the option `disks` to an array of desired disks.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |\n| host | The IP or HOSTNAME to connect to. | localhost | yes |\n| port | The port to connect to. | 7634 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\n```\n##### Custom disk names\n\nAn example defining the disk names to detect.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n devices:\n - customdisk1\n - customdisk2\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\nremote_job:\n name : 'remote'\n host : 'http://192.0.2.1:2812'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hddtemp debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HDD temperature instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.temperatures | a dimension per disk | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hddtemp-HDD_temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hddtemp/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hpssa", "monitored_instance": {"name": "HP Smart Storage Arrays", "link": "https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hp.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "hp", "hpssa", "array"], "most_popular": false}, "overview": "# HP Smart Storage Arrays\n\nPlugin: python.d.plugin\nModule: hpssa\n\n## Overview\n\nThis collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.\n\nIt uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided, the collector will try to execute the `ssacli` binary.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the hpssa collector\n\nThe `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Allow user netdata to execute `ssacli` as root.\n\nThis module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.\n\n- Add to your `/etc/sudoers` file:\n\n`which ssacli` shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/ssacli\n```\n\n- Reset Netdata's systemd\n unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux\n distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.\n\nAs the `root` user, do the following:\n\n```cmd\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hpssa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hpssa.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |\n| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |\n\n{% /details %}\n#### Examples\n\n##### Local simple config\n\nA basic configuration, specyfing the path to `ssacli`\n\n```yaml\nlocal:\n ssacli_path: /usr/sbin/ssacli\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hpssa debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HP Smart Storage Arrays instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |\n| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |\n| hpssa.ld_status | a dimension per logical drive | Status |\n| hpssa.pd_status | a dimension per physical drive | Status |\n| hpssa.pd_temperature | a dimension per physical drive | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hpssa-HP_Smart_Storage_Arrays", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hpssa/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "icecast", "monitored_instance": {"name": "Icecast", "link": "https://icecast.org/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "icecast.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["icecast", "streaming", "media"], "most_popular": false}, "overview": "# Icecast\n\nPlugin: python.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWithout configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/icecast.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |\n| user | Username to use to connect to `url` if it's password protected. | | no |\n| pass | Password to use to connect to `url` if it's password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Remote Icecast server\n\nConfigure a remote icecast server\n\n```yaml\nremote:\n url: 'http://1.2.3.4:8443/status-json.xsl'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin icecast debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | a dimension for each active source | listeners |\n\n", "integration_type": "collector", "id": "python.d.plugin-icecast-Icecast", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/icecast/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ipfs", "monitored_instance": {"name": "IPFS", "link": "https://ipfs.tech/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ipfs.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IPFS\n\nPlugin: python.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS server metrics about its quality and performance.\n\nIt connects to an http endpoint of the IPFS server to collect the metrics\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the endpoint is accessible by the Agent, netdata will autodetect it\n\n#### Limits\n\nCalls to the following endpoints are disabled due to IPFS bugs:\n\n/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)\n/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ipfs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| url | URL to the IPFS API | no | yes |\n| repoapi | Collect repo metrics. | no | no |\n| pinapi | Set status of IPFS pinned object polling. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\nremote_host:\n name: 'remote'\n url: 'http://192.0.2.1:5001'\n repoapi: no\n pinapi: no\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ipfs debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | kilobits/s |\n| ipfs.peers | peers | peers |\n| ipfs.repo_size | avail, size | GiB |\n| ipfs.repo_objects | objects, pinned, recursive_pins | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-ipfs-IPFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ipfs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "litespeed", "monitored_instance": {"name": "Litespeed", "link": "https://www.litespeedtech.com/products/litespeed-web-server", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "litespeed.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["litespeed", "web", "server"], "most_popular": false}, "overview": "# Litespeed\n\nPlugin: python.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/litespeed.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |\n\n{% /details %}\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocalhost:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin litespeed debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.connections | free, used | conns |\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.cache | hits | hits/s |\n| litespeed.cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-litespeed-Litespeed", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/litespeed/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "megacli", "monitored_instance": {"name": "MegaCLI", "link": "https://wikitech.wikimedia.org/wiki/MegaCli", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# MegaCLI\n\nPlugin: python.d.plugin\nModule: megacli\n\n## Overview\n\nExamine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.\n\nCollects adapter, physical drives and battery stats using megacli command-line tool\n\nExecuted commands:\n\n - `sudo -n megacli -LDPDInfo -aAll`\n - `sudo -n megacli -AdpBbuCmd -a0`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run megacli as sudoer\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich megacli shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/megacli\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/megacli.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: myname\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin megacli debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |\n| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |\n| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |\n| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |\n| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MegaCLI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_degraded | a dimension per adapter | is degraded |\n| megacli.pd_media_error | a dimension per physical drive | errors/s |\n| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |\n\n### Per battery\n\nMetrics related to Battery Backup Units, each BBU provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_relative_charge | adapter {battery id} | percentage |\n| megacli.bbu_cycle_count | adapter {battery id} | cycle count |\n\n", "integration_type": "collector", "id": "python.d.plugin-megacli-MegaCLI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/megacli/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "memcached", "monitored_instance": {"name": "Memcached", "link": "https://memcached.org/", "categories": ["data-collection.database-servers"], "icon_filename": "memcached.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memcached", "memcache", "cache", "database"], "most_popular": false}, "overview": "# Memcached\n\nPlugin: python.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/memcached.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| host | the host to connect to. | 127.0.0.1 | no |\n| port | the port to connect to. | 11211 | no |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### localhost\n\nAn example configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 11211\n\n```\n##### localipv4\n\nAn example configuration for localipv4.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 11211\n\n```\n{% /details %}\n##### localipv6\n\nAn example configuration for localipv6.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '::1'\n port: 11211\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin memcached debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-memcached-Memcached", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/memcached/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "monit", "monitored_instance": {"name": "Monit", "link": "https://mmonit.com/monit/", "categories": ["data-collection.synthetic-checks"], "icon_filename": "monit.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["monit", "mmonit", "supervision tool", "monitrc"], "most_popular": false}, "overview": "# Monit\n\nPlugin: python.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.\n\n\nIt gathers data from Monit's XML interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to Monit at `http://localhost:2812`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/monit.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |\n| user | Username in case the URL is password protected. | | no |\n| pass | Password in case the URL is password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n\n```\n##### Basic Authentication\n\nExample using basic username and password in order to authenticate.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n user: 'foo'\n pass: 'bar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:2812'\n\nremote_job:\n name: 'remote'\n url: 'http://192.0.2.1:2812'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin monit debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Monit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.filesystems | a dimension per target | filesystems |\n| monit.directories | a dimension per target | directories |\n| monit.files | a dimension per target | files |\n| monit.fifos | a dimension per target | pipes |\n| monit.programs | a dimension per target | programs |\n| monit.services | a dimension per target | processes |\n| monit.process_uptime | a dimension per target | seconds |\n| monit.process_threads | a dimension per target | threads |\n| monit.process_childrens | a dimension per target | children |\n| monit.hosts | a dimension per target | hosts |\n| monit.host_latency | a dimension per target | milliseconds |\n| monit.networks | a dimension per target | interfaces |\n\n", "integration_type": "collector", "id": "python.d.plugin-monit-Monit", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/monit/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "nsd", "monitored_instance": {"name": "Name Server Daemon", "link": "https://nsd.docs.nlnetlabs.nl/en/latest/#", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "nsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nsd", "name server daemon"], "most_popular": false}, "overview": "# Name Server Daemon\n\nPlugin: python.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more.\n\n\nIt uses the `nsd-control stats_noreset` command to gather metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### NSD version\n\nThe version of `nsd` must be 4.0+.\n\n\n#### Provide Netdata the permissions to run the command\n\nNetdata must have permissions to run the `nsd-control stats_noreset` command.\n\nYou can:\n\n- Add \"netdata\" user to \"nsd\" group:\n ```\n usermod -aG nsd netdata\n ```\n- Add Netdata to sudoers\n 1. Edit the sudoers file:\n ```\n visudo -f /etc/sudoers.d/netdata\n ```\n 2. Add the entry:\n ```\n Defaults:netdata !requiretty\n netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset\n ```\n\n > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/nsd.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | The command to run | nsd-control stats_noreset | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: 'nsd_local'\n command: 'nsd-control stats_noreset'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin nsd debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Name Server Daemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.zones | master, slave | zones |\n| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |\n| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |\n| nsd.transfer | NOTIFY, AXFR | queries/s |\n| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-nsd-Name_Server_Daemon", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/nsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "openldap", "monitored_instance": {"name": "OpenLDAP", "link": "https://www.openldap.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "statsd.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["openldap", "RBAC", "Directory access"], "most_popular": false}, "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-openldap-OpenLDAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "oracledb", "monitored_instance": {"name": "Oracle DB", "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", "categories": ["data-collection.database-servers"], "icon_filename": "oracle.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "oracle", "data warehouse", "SQL"], "most_popular": false}, "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", "integration_type": "collector", "id": "python.d.plugin-oracledb-Oracle_DB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "pandas", "monitored_instance": {"name": "Pandas", "link": "https://pandas.pydata.org/", "categories": ["data-collection.generic-data-collection"], "icon_filename": "pandas.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pandas", "python"], "most_popular": false}, "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details summary=\"Config\" %}\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n{% /details %}\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n{% /details %}\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n{% /details %}\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details summary=\"Config\" %}\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", "id": "python.d.plugin-pandas-Pandas", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "postfix", "monitored_instance": {"name": "Postfix", "link": "https://www.postfix.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "postfix.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["postfix", "mail", "mail server"], "most_popular": false}, "overview": "# Postfix\n\nPlugin: python.d.plugin\nModule: postfix\n\n## Overview\n\nKeep an eye on Postfix metrics for efficient mail server operations. \nImprove your mail server performance with Netdata's real-time metrics and built-in alerts.\n\n\nMonitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPostfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.\nSee the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin postfix debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-postfix-Postfix", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/postfix/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "puppet", "monitored_instance": {"name": "Puppet", "link": "https://www.puppet.com/", "categories": ["data-collection.ci-cd-systems"], "icon_filename": "puppet.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["puppet", "jvm heap"], "most_popular": false}, "overview": "# Puppet\n\nPlugin: python.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'\n\n\nIt uses Puppet's metrics API endpoint to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/puppet.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n> Notes:\n> - Exact Fully Qualified Domain Name of the node should be used.\n> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.\n> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |\n| tls_verify | Control HTTPS server certificate verification. | False | no |\n| tls_ca_file | Optional CA (bundle) file to use | | no |\n| tls_cert_file | Optional client certificate file | | no |\n| tls_key_file | Optional client key file | | no |\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\npuppetserver:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\n```\n##### TLS Certificate\n\nAn example using a TLS certificate\n\n{% details summary=\"Config\" %}\n```yaml\npuppetdb:\n url: 'https://fqdn.example.com:8081'\n tls_cert_file: /path/to/client.crt\n tls_key_file: /path/to/client.key\n autodetection_retry: 1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\npuppetserver1:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\npuppetserver2:\n url: 'https://fqdn.example2.com:8140'\n autodetection_retry: 1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin puppet debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm | committed, used | MiB |\n| puppet.jvm | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", "integration_type": "collector", "id": "python.d.plugin-puppet-Puppet", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/puppet/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "rethinkdbs", "monitored_instance": {"name": "RethinkDB", "link": "https://rethinkdb.com/", "categories": ["data-collection.database-servers"], "icon_filename": "rethinkdb.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["rethinkdb", "database", "db"], "most_popular": false}, "overview": "# RethinkDB\n\nPlugin: python.d.plugin\nModule: rethinkdbs\n\n## Overview\n\nThis collector monitors metrics about RethinkDB clusters and database servers.\n\nIt uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to 127.0.0.1:28015.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe collector requires the `rethinkdb` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/rethinkdbs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/rethinkdbs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | Hostname or ip of the RethinkDB server. | localhost | no |\n| port | Port to connect to the RethinkDB server. | 28015 | no |\n| user | The username to use to connect to the RethinkDB server. | admin | no |\n| password | The password to use to connect to the RethinkDB server. | | no |\n| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RethinkDB server\n\nAn example of a configuration for a local RethinkDB server\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 28015\n user: \"user\"\n password: \"pass\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin rethinkdbs debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_connected_servers | connected, missing | servers |\n| rethinkdb.cluster_clients_active | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | reads, writes | documents/s |\n\n### Per database server\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.client_connections | connections | connections |\n| rethinkdb.clients_active | active | clients |\n| rethinkdb.queries | queries | queries/s |\n| rethinkdb.documents | reads, writes | documents/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-rethinkdbs-RethinkDB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "retroshare", "monitored_instance": {"name": "RetroShare", "link": "https://retroshare.cc/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "retroshare.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["retroshare", "p2p"], "most_popular": false}, "overview": "# RetroShare\n\nPlugin: python.d.plugin\nModule: retroshare\n\n## Overview\n\nThis collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.\n\nIt connects to the RetroShare web interface to gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### RetroShare web interface\n\nRetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/retroshare.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/retroshare.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RetroShare Web UI\n\nA basic configuration for a RetroShare server running on localhost.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local retroshare'\n url: 'http://localhost:9090'\n\n```\n{% /details %}\n##### Remote RetroShare Web UI\n\nA basic configuration for a remote RetroShare server.\n\n{% details summary=\"Config\" %}\n```yaml\nremote:\n name: 'remote retroshare'\n url: 'http://1.2.3.4:9090'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin retroshare debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RetroShare instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| retroshare.bandwidth | Upload, Download | kilobits/s |\n| retroshare.peers | All friends, Connected friends | peers |\n| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |\n\n", "integration_type": "collector", "id": "python.d.plugin-retroshare-RetroShare", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/retroshare/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "riakkv", "monitored_instance": {"name": "RiakKV", "link": "https://riak.com/products/riak-kv/index.html", "categories": ["data-collection.database-servers"], "icon_filename": "riak.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "nosql", "big data"], "most_popular": false}, "overview": "# RiakKV\n\nPlugin: python.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.'\n\n\nThis collector reads the database stats from the `/stats` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure RiakKV to enable /stats endpoint\n\nYou can follow the RiakKV configuration reference documentation for how to enable this.\n\nSource : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/riakkv.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The url of the server | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic (default)\n\nA basic example configuration per job\n\n```yaml\nlocal:\nurl: 'http://localhost:8098/stats'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n url: 'http://localhost:8098/stats'\n\nremote:\n url: 'http://192.0.2.1:8098/stats'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin riakkv debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |\n| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |\n| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |\n| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RiakKV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | errors | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n| riak.search.index | bad_entry, extract_fail | writes |\n\n", "integration_type": "collector", "id": "python.d.plugin-riakkv-RiakKV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/riakkv/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "samba", "monitored_instance": {"name": "Samba", "link": "https://www.samba.org/samba/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "samba.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["samba", "file sharing"], "most_popular": false}, "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-samba-Samba", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (lm-sensors)", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "temperature", "voltage", "current", "power", "fan", "energy", "humidity"], "most_popular": false}, "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: python.d.plugin\nModule: sensors\n\n## Overview\n\nExamine Linux Sensors metrics with Netdata for insights into hardware health and performance.\n\nEnhance your system's reliability with real-time hardware health insights.\n\n\nReads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n- temperature - fan - voltage - current - power - energy - humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/sensors.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\ntypes:\n - temperature\n - fan\n - voltage\n - current\n - power\n - energy\n - humidity\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin sensors debug trace\n ```\n\n### lm-sensors doesn't work on your device\n\n\n\n### ACPI ring buffer errors are printed\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per chip\n\nMetrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temperature | a dimension per sensor | Celsius |\n| sensors.voltage | a dimension per sensor | Volts |\n| sensors.current | a dimension per sensor | Ampere |\n| sensors.power | a dimension per sensor | Watt |\n| sensors.fan | a dimension per sensor | Rotations/min |\n| sensors.energy | a dimension per sensor | Joule |\n| sensors.humidity | a dimension per sensor | Percent |\n\n", "integration_type": "collector", "id": "python.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "smartd_log", "monitored_instance": {"name": "S.M.A.R.T.", "link": "https://linux.die.net/man/8/smartd", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "smart.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["smart", "S.M.A.R.T.", "SCSI devices", "ATA devices"], "most_popular": false}, "overview": "# S.M.A.R.T.\n\nPlugin: python.d.plugin\nModule: smartd_log\n\n## Overview\n\nThis collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.\n\n\nIt reads `smartd` log files to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nUpon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure `smartd` to write attribute information to files.\n\n`smartd` must be running with `-A` option to write `smartd` attribute information to files.\n\nFor this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:\n\n```\n# dump smartd attrs info every 600 seconds\nsmartd_opts=\"-A /var/log/smartd/ -i 600\"\n```\n\nYou may need to create the smartd directory before smartd will write to it: \n\n```sh\nmkdir -p /var/log/smartd\n```\n\nOtherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command.\n\n`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/smartd_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/smartd_log.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to smartd log files. | /var/log/smartd | yes |\n| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |\n| age | Time in minutes since the last dump to file. | 30 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\ncustom:\n name: smartd_log\n log_path: '/var/log/smartd/'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin smartd_log debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics listed below are split in terms of availability on device type, SCSI or ATA.\n\n### Per S.M.A.R.T. instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | SCSI | ATA |\n|:------|:----------|:----|:---:|:---:|\n| smartd_log.read_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.seek_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.soft_read_error_rate | a dimension per device | errors | | \u2022 |\n| smartd_log.write_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.read_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.read_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.sata_interface_downshift | a dimension per device | events | | \u2022 |\n| smartd_log.udma_crc_error_count | a dimension per device | errors | | \u2022 |\n| smartd_log.throughput_performance | a dimension per device | value | | \u2022 |\n| smartd_log.seek_time_performance | a dimension per device | value | | \u2022 |\n| smartd_log.start_stop_count | a dimension per device | events | | \u2022 |\n| smartd_log.power_on_hours_count | a dimension per device | hours | | \u2022 |\n| smartd_log.power_cycle_count | a dimension per device | events | | \u2022 |\n| smartd_log.unexpected_power_loss | a dimension per device | events | | \u2022 |\n| smartd_log.spin_up_time | a dimension per device | ms | | \u2022 |\n| smartd_log.spin_up_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.calibration_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | \u2022 |\n| smartd_log.temperature_celsius | a dimension per device | celsius | \u2022 | \u2022 |\n| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.reserved_block_count | a dimension per device | percentage | | \u2022 |\n| smartd_log.program_fail_count | a dimension per device | errors | | \u2022 |\n| smartd_log.erase_fail_count | a dimension per device | failures | | \u2022 |\n| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | \u2022 |\n| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | \u2022 |\n| smartd_log.reallocation_event_count | a dimension per device | events | | \u2022 |\n| smartd_log.current_pending_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.percent_lifetime_used | a dimension per device | percentage | | \u2022 |\n| smartd_log.media_wearout_indicator | a dimension per device | percentage | | \u2022 |\n| smartd_log.nand_writes_1gib | a dimension per device | GiB | | \u2022 |\n\n", "integration_type": "collector", "id": "python.d.plugin-smartd_log-S.M.A.R.T.", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/smartd_log/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "spigotmc", "monitored_instance": {"name": "SpigotMC", "link": "", "categories": ["data-collection.gaming"], "icon_filename": "spigot.jfif"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["minecraft server", "spigotmc server", "spigot"], "most_popular": false}, "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-spigotmc-SpigotMC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "squid", "monitored_instance": {"name": "Squid", "link": "http://www.squid-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "squid.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["squid", "web delivery", "squid caching proxy"], "most_popular": false}, "overview": "# Squid\n\nPlugin: python.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the endpoint where Squid exposes its `counters` data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Squid's Cache Manager\n\nTake a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/squid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| host | The host to connect to. | | yes |\n| port | The port to connect to. | | yes |\n| request | The URL to request from Squid. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nexample_job_name:\n name: 'local'\n host: 'localhost'\n port: 3128\n request: 'cache_object://localhost:3128/counters'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_job:\n name: 'local'\n host: '127.0.0.1'\n port: 3128\n request: 'cache_object://127.0.0.1:3128/counters'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 3128\n request: 'cache_object://192.0.2.1:3128/counters'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin squid debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-squid-Squid", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/squid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tomcat", "monitored_instance": {"name": "Tomcat", "link": "https://tomcat.apache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "tomcat.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["apache", "tomcat", "webserver", "websocket", "jakarta", "javaEE"], "most_popular": false}, "overview": "# Tomcat\n\nPlugin: python.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the http endpoint of the `/manager/status` in XML format\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nYou need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.\n\n#### Limits\n\nThis module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only `netdata` user, to monitor the `/status` endpoint.\n\nThis is necessary for configuring the collector.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tomcat.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options per job\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |\n| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |\n| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |\n| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:8080/manager/status?XML=true'\n\n```\n##### Using an IPv4 endpoint\n\nA typical configuration using an IPv4 endpoint\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_ipv4:\n name : 'local'\n url : 'http://127.0.0.1:8080/manager/status?XML=true'\n\n```\n{% /details %}\n##### Using an IPv6 endpoint\n\nA typical configuration using an IPv6 endpoint\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_ipv6:\n name : 'local'\n url : 'http://[::1]:8080/manager/status?XML=true'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tomcat debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.accesses | accesses, errors | requests/s |\n| tomcat.bandwidth | sent, received | KiB/s |\n| tomcat.processing_time | processing time | seconds |\n| tomcat.threads | current, busy | current threads |\n| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |\n| tomcat.jvm_eden | used, committed, max | MiB |\n| tomcat.jvm_survivor | used, committed, max | MiB |\n| tomcat.jvm_tenured | used, committed, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-tomcat-Tomcat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tomcat/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tor", "monitored_instance": {"name": "Tor", "link": "https://www.torproject.org/", "categories": ["data-collection.vpns"], "icon_filename": "tor.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["tor", "traffic", "vpn"], "most_popular": false}, "overview": "# Tor\n\nPlugin: python.d.plugin\nModule: tor\n\n## Overview\n\nThis collector monitors Tor bandwidth traffic .\n\nIt connects to the Tor control port to collect traffic statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe `stem` python library needs to be installed.\n\n\n#### Required Tor configuration\n\nAdd to /etc/tor/torrc:\n\nControlPort 9051\n\nFor more options please read the manual.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| control_addr | Tor control IP address | 127.0.0.1 | no |\n| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |\n| password | Tor control password | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_tcp:\n name: 'local'\n control_port: 9051\n password: # if required\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_socket:\n name: 'local'\n control_port: '/var/run/tor/control'\n password: # if required\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-tor-Tor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "uwsgi", "monitored_instance": {"name": "uWSGI", "link": "https://github.com/unbit/uwsgi/tree/2.0.21", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "uwsgi.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application server", "python", "web applications"], "most_popular": false}, "overview": "# uWSGI\n\nPlugin: python.d.plugin\nModule: uwsgi\n\n## Overview\n\nThis collector monitors uWSGI metrics about requests, workers, memory and more.\n\nIt collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats server\n\nMake sure that you uWSGI exposes it's metrics via a Stats server.\n\nSource: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/uwsgi.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| socket | The 'path/to/uwsgistats.sock' | no | no |\n| host | The host to connect to | no | no |\n| port | The port to connect to | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.\n\n{% details summary=\"Config\" %}\n```yaml\nsocket:\n name : 'local'\n socket : '/tmp/stats.socket'\n\nlocalhost:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nlocalipv4:\n name : 'local'\n host : '127.0.0.1'\n port : 1717\n\nlocalipv6:\n name : 'local'\n host : '::1'\n port : 1717\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nremote:\n name : 'remote'\n host : '192.0.2.1'\n port : 1717\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin uwsgi debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.requests | a dimension per worker | requests/s |\n| uwsgi.tx | a dimension per worker | KiB/s |\n| uwsgi.avg_rt | a dimension per worker | milliseconds |\n| uwsgi.memory_rss | a dimension per worker | MiB |\n| uwsgi.memory_vsz | a dimension per worker | MiB |\n| uwsgi.exceptions | exceptions | exceptions |\n| uwsgi.harakiris | harakiris | harakiris |\n| uwsgi.respawns | respawns | respawns |\n\n", "integration_type": "collector", "id": "python.d.plugin-uwsgi-uWSGI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "varnish", "monitored_instance": {"name": "Varnish", "link": "https://varnish-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "varnish.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["varnish", "varnishstat", "varnishd", "cache", "web server", "web cache"], "most_popular": false}, "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-varnish-Varnish", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "w1sensor", "monitored_instance": {"name": "1-Wire Sensors", "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "1-wire.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "sensor", "1-wire"], "most_popular": false}, "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n{% /details %}\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-w1sensor-1-Wire_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "zscores", "monitored_instance": {"name": "python.d zscores", "link": "https://en.wikipedia.org/wiki/Standard_score", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zscore", "z-score", "standard score", "standard deviation", "anomaly detection", "statistical anomaly detection"], "most_popular": false}, "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-zscores-python.d_zscores", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "slabinfo.plugin", "module_name": "slabinfo.plugin", "monitored_instance": {"name": "Linux kernel SLAB allocator statistics", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux kernel", "slab", "slub", "slob", "slabinfo"], "most_popular": false}, "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"The main configuration file.\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "tc.plugin", "module_name": "tc.plugin", "monitored_instance": {"name": "tc QoS classes", "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", "id": "tc.plugin-tc.plugin-tc_QoS_classes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "timex.plugin", "module_name": "timex.plugin", "monitored_instance": {"name": "Timex", "link": "", "categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", "id": "timex.plugin-timex.plugin-Timex", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "xenstat.plugin", "module_name": "xenstat.plugin", "monitored_instance": {"name": "Xen XCP-ng", "link": "https://xenproject.org/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "xen.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", "integration_type": "collector", "id": "xenstat.plugin-xenstat.plugin-Xen_XCP-ng", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml", "related_resources": ""}, {"id": "deploy-alpinelinux", "meta": {"name": "Alpine Linux", "link": "https://www.alpinelinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "alpine.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-amazonlinux", "meta": {"name": "Amazon Linux", "link": "https://aws.amazon.com/amazon-linux-2/", "categories": ["deploy.operating-systems"], "icon_filename": "amazonlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-archlinux", "meta": {"name": "Arch Linux", "link": "https://archlinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "archlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos", "meta": {"name": "CentOS", "link": "https://www.centos.org/", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos-stream", "meta": {"name": "CentOS Stream", "link": "https://www.centos.org/centos-stream", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n| 8 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-debian", "meta": {"name": "Debian", "link": "https://www.debian.org/", "categories": ["deploy.operating-systems"], "icon_filename": "debian.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n| 10 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-docker", "meta": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["deploy.docker-kubernetes"], "icon_filename": "docker.svg"}, "most_popular": true, "keywords": ["docker", "container", "containers"], "install_description": "Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n", "methods": [{"method": "Docker CLI", "commands": [{"channel": "nightly", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:edge\n"}, {"channel": "stable", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:stable\n"}]}, {"method": "Docker Compose", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}, {"method": "Docker Swarm", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}], "additional_info": "", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-fedora", "meta": {"name": "Fedora", "link": "https://www.fedoraproject.org/", "categories": ["deploy.operating-systems"], "icon_filename": "fedora.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 39 | Core | x86_64, aarch64 | |\n| 38 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-freebsd", "meta": {"name": "FreeBSD", "link": "https://www.freebsd.org/", "categories": ["deploy.operating-systems"], "icon_filename": "freebsd.svg"}, "most_popular": true, "keywords": ["freebsd"], "install_description": "## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "fetch", "commands": [{"channel": "nightly", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-kubernetes", "meta": {"name": "Kubernetes (Helm)", "link": "", "categories": ["deploy.docker-kubernetes"], "icon_filename": "kubernetes.svg"}, "keywords": ["kubernetes", "container", "Orchestrator"], "install_description": "**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n", "methods": [{"method": "Helm", "commands": [{"channel": "nightly", "command": "helm install netdata netdata/netdata \\\n--set image.tag=edge{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled=\"true\" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled=\"true\" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n"}, {"channel": "stable", "command": "helm install netdata netdata/netdata \\\n--set image.tag=stable{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled=\"true\" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled=\"true\" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n"}]}, {"method": "Existing Cluster", "commands": [{"channel": "nightly", "command": "image:\n tag: edge\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"}, {"channel": "stable", "command": "image:\n tag: stable\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-linux-generic", "meta": {"name": "Linux", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "linux.svg"}, "keywords": ["linux"], "most_popular": true, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-macos", "meta": {"name": "macOS", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "macos.svg"}, "most_popular": true, "keywords": ["macOS", "mac", "apple"], "install_description": "Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-manjarolinux", "meta": {"name": "Manjaro Linux", "link": "https://manjaro.org/", "categories": ["deploy.operating-systems"], "icon_filename": "manjaro.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-opensuse", "meta": {"name": "SUSE Linux", "link": "https://www.suse.com/", "categories": ["deploy.operating-systems"], "icon_filename": "openSUSE.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-oraclelinux", "meta": {"name": "Oracle Linux", "link": "https://www.oracle.com/linux/", "categories": ["deploy.operating-systems"], "icon_filename": "oraclelinux.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rhel", "meta": {"name": "Red Hat Enterprise Linux", "link": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux", "categories": ["deploy.operating-systems"], "icon_filename": "rhel.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rockylinux", "meta": {"name": "Rocky Linux", "link": "https://rockylinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "rocky.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-ubuntu", "meta": {"name": "Ubuntu", "link": "https://ubuntu.com/", "categories": ["deploy.operating-systems"], "icon_filename": "ubuntu.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 23.10 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-windows", "meta": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["deploy.operating-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows"], "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "export-appoptics", "meta": {"name": "AppOptics", "link": "https://www.solarwinds.com/appoptics", "categories": ["export"], "icon_filename": "solarwinds.svg", "keywords": ["app optics", "AppOptics", "Solarwinds"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-aws-kinesis", "meta": {"name": "AWS Kinesis", "link": "https://aws.amazon.com/kinesis/", "categories": ["export"], "icon_filename": "aws-kinesis.svg"}, "keywords": ["exporter", "AWS", "Kinesis"], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-data", "meta": {"name": "Azure Data Explorer", "link": "https://azure.microsoft.com/en-us/pricing/details/data-explorer/", "categories": ["export"], "icon_filename": "azuredataex.jpg", "keywords": ["Azure Data Explorer", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-event", "meta": {"name": "Azure Event Hub", "link": "https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about", "categories": ["export"], "icon_filename": "azureeventhub.png", "keywords": ["Azure Event Hub", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-bigquery", "meta": {"name": "Google BigQuery", "link": "https://cloud.google.com/bigquery/", "categories": ["export"], "icon_filename": "bigquery.png", "keywords": ["export", "Google BigQuery", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-blueflood", "meta": {"name": "Blueflood", "link": "http://blueflood.io/", "categories": ["export"], "icon_filename": "blueflood.png", "keywords": ["export", "Blueflood", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-chronix", "meta": {"name": "Chronix", "link": "https://dbdb.io/db/chronix", "categories": ["export"], "icon_filename": "chronix.png", "keywords": ["export", "chronix", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-cortex", "meta": {"name": "Cortex", "link": "https://cortexmetrics.io/", "categories": ["export"], "icon_filename": "cortex.png", "keywords": ["export", "cortex", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-crate", "meta": {"name": "CrateDB", "link": "https://crate.io/", "categories": ["export"], "icon_filename": "crate.svg", "keywords": ["export", "CrateDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-elastic", "meta": {"name": "ElasticSearch", "link": "https://www.elastic.co/", "categories": ["export"], "icon_filename": "elasticsearch.svg", "keywords": ["export", "ElasticSearch", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-gnocchi", "meta": {"name": "Gnocchi", "link": "https://wiki.openstack.org/wiki/Gnocchi", "categories": ["export"], "icon_filename": "gnocchi.svg", "keywords": ["export", "Gnocchi", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-google-pubsub", "meta": {"name": "Google Cloud Pub Sub", "link": "https://cloud.google.com/pubsub", "categories": ["export"], "icon_filename": "pubsub.png"}, "keywords": ["exporter", "Google Cloud", "Pub Sub"], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": ""}, {"id": "export-graphite", "meta": {"name": "Graphite", "link": "https://graphite.readthedocs.io/en/latest/", "categories": ["export"], "icon_filename": "graphite.png"}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-influxdb", "meta": {"name": "InfluxDB", "link": "https://www.influxdata.com/", "categories": ["export"], "icon_filename": "influxdb.svg", "keywords": ["InfluxDB", "Influx", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-irondb", "meta": {"name": "IRONdb", "link": "https://docs.circonus.com/irondb/", "categories": ["export"], "icon_filename": "irondb.png", "keywords": ["export", "IRONdb", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-json", "meta": {"name": "JSON", "link": "https://learn.netdata.cloud/docs/exporting/json-document-databases", "categories": ["export"], "icon_filename": "json.svg"}, "keywords": ["exporter", "json"], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": ""}, {"id": "export-kafka", "meta": {"name": "Kafka", "link": "https://kafka.apache.org/", "categories": ["export"], "icon_filename": "kafka.svg", "keywords": ["export", "Kafka", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-kairosdb", "meta": {"name": "KairosDB", "link": "https://kairosdb.github.io/", "categories": ["export"], "icon_filename": "kairos.png", "keywords": ["KairosDB", "kairos", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-m3db", "meta": {"name": "M3DB", "link": "https://m3db.io/", "categories": ["export"], "icon_filename": "m3db.png", "keywords": ["export", "M3DB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-metricfire", "meta": {"name": "MetricFire", "link": "https://www.metricfire.com/", "categories": ["export"], "icon_filename": "metricfire.png", "keywords": ["export", "MetricFire", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-mongodb", "meta": {"name": "MongoDB", "link": "https://www.mongodb.com/", "categories": ["export"], "icon_filename": "mongodb.svg"}, "keywords": ["exporter", "MongoDB"], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": ""}, {"id": "export-newrelic", "meta": {"name": "New Relic", "link": "https://newrelic.com/", "categories": ["export"], "icon_filename": "newrelic.svg", "keywords": ["export", "NewRelic", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-opentsdb", "meta": {"name": "OpenTSDB", "link": "https://github.com/OpenTSDB/opentsdb", "categories": ["export"], "icon_filename": "opentsdb.png"}, "keywords": ["exporter", "OpenTSDB", "scalable time series"], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": ""}, {"id": "export-pgsql", "meta": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["export"], "icon_filename": "postgres.svg", "keywords": ["export", "PostgreSQL", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-prometheus-remote", "meta": {"name": "Prometheus Remote Write", "link": "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage", "categories": ["export"], "icon_filename": "prometheus.svg"}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-quasar", "meta": {"name": "QuasarDB", "link": "https://doc.quasar.ai/master/", "categories": ["export"], "icon_filename": "quasar.jpeg", "keywords": ["export", "quasar", "quasarDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-splunk", "meta": {"name": "Splunk SignalFx", "link": "https://www.splunk.com/en_us/products/observability.html", "categories": ["export"], "icon_filename": "splunk.svg", "keywords": ["export", "splunk", "signalfx", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-thanos", "meta": {"name": "Thanos", "link": "https://thanos.io/", "categories": ["export"], "icon_filename": "thanos.png", "keywords": ["export", "thanos", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-tikv", "meta": {"name": "TiKV", "link": "https://tikv.org/", "categories": ["export"], "icon_filename": "tikv.png", "keywords": ["export", "TiKV", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-timescaledb", "meta": {"name": "TimescaleDB", "link": "https://www.timescale.com/", "categories": ["export"], "icon_filename": "timescale.png", "keywords": ["export", "TimescaleDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-victoria", "meta": {"name": "VictoriaMetrics", "link": "https://victoriametrics.com/products/open-source/", "categories": ["export"], "icon_filename": "victoriametrics.png", "keywords": ["export", "victoriametrics", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-vmware", "meta": {"name": "VMware Aria", "link": "https://www.vmware.com/products/aria-operations-for-applications.html", "categories": ["export"], "icon_filename": "aria.png", "keywords": ["export", "VMware", "Aria", "Tanzu", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-wavefront", "meta": {"name": "Wavefront", "link": "https://docs.wavefront.com/wavefront_data_ingestion.html", "categories": ["export"], "icon_filename": "wavefront.png", "keywords": ["export", "Wavefront", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "notify-alerta", "meta": {"name": "Alerta", "link": "https://alerta.io/", "categories": ["notify.agent"], "icon_filename": "alerta.png"}, "keywords": ["Alerta"], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"}, {"id": "notify-awssns", "meta": {"name": "AWS SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.agent"], "icon_filename": "aws.svg"}, "keywords": ["AWS SNS"], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"}, {"id": "notify-cloud-awssns", "meta": {"name": "Amazon SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.cloud"], "icon_filename": "awssns.png"}, "keywords": ["awssns"], "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.cloud"], "icon_filename": "discord.png"}, "keywords": ["discord", "community"], "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mattermost", "meta": {"name": "Mattermost", "link": "https://mattermost.com/", "categories": ["notify.cloud"], "icon_filename": "mattermost.png"}, "keywords": ["mattermost"], "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-microsoftteams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams", "categories": ["notify.cloud"], "icon_filename": "teams.svg"}, "keywords": ["microsoft", "teams"], "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **administrator**.\n- The Space to be on **Business** plan or higher.\n- A [Microsoft 365 for Business Account](https://www.microsoft.com/en-us/microsoft-365/business). Note that this is a **paid** account.\n\n### Settings on Microsoft Teams\n\n- The integration gets enabled at a team's channel level.\n- Click on the `...` (aka three dots) icon showing up next to the channel name, it should appear when you hover over it.\n- Click on `Connectors`.\n- Look for the `Incoming Webhook` connector and click configure.\n- Provide a name for your Incoming Webhook Connector, for example _Netdata Alerts_. You can also customize it with a proper icon instead of using the default image.\n- Click `Create`.\n- The _Incoming Webhook URL_ is created.\n- That is the URL to be provided to the Netdata Cloud configuration.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mobile-app", "meta": {"name": "Netdata Mobile App", "link": "https://netdata.cloud", "categories": ["notify.cloud"], "icon_filename": "netdata.png"}, "keywords": ["mobile-app", "phone", "personal-notifications"], "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Business Subscription**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-opsgenie", "meta": {"name": "Opsgenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.cloud"], "icon_filename": "opsgenie.png"}, "keywords": ["opsgenie", "atlassian"], "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.cloud"], "icon_filename": "pagerduty.png"}, "keywords": ["pagerduty"], "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-rocketchat", "meta": {"name": "RocketChat", "link": "https://www.rocket.chat/", "categories": ["notify.cloud"], "icon_filename": "rocketchat.png"}, "keywords": ["rocketchat"], "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.cloud"], "icon_filename": "slack.png"}, "keywords": ["slack"], "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-splunk", "meta": {"name": "Splunk", "link": "https://splunk.com/", "categories": ["notify.cloud"], "icon_filename": "splunk-black.svg"}, "keywords": ["Splunk"], "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.cloud"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- The Telegram bot token and chat ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n\n### Getting the Telegram bot token and chat ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-webhook", "meta": {"name": "Webhook", "link": "https://en.wikipedia.org/wiki/Webhook", "categories": ["notify.cloud"], "icon_filename": "webhook.svg"}, "keywords": ["generic webhooks", "webhooks"], "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Pro** plan or higher\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected.\n\n The notification content sent to the destination service will be a JSON object having these properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | rooms | object[object(string,string)] | Object with list of rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-custom", "meta": {"name": "Custom", "link": "", "categories": ["notify.agent"], "icon_filename": "custom.png"}, "keywords": ["custom"], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"}, {"id": "notify-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.agent"], "icon_filename": "discord.png"}, "keywords": ["Discord"], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"}, {"id": "notify-dynatrace", "meta": {"name": "Dynatrace", "link": "https://dynatrace.com", "categories": ["notify.agent"], "icon_filename": "dynatrace.svg"}, "keywords": ["Dynatrace"], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"}, {"id": "notify-email", "meta": {"name": "Email", "link": "", "categories": ["notify.agent"], "icon_filename": "email.png"}, "keywords": ["email"], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"}, {"id": "notify-flock", "meta": {"name": "Flock", "link": "https://support.flock.com/", "categories": ["notify.agent"], "icon_filename": "flock.png"}, "keywords": ["Flock"], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"}, {"id": "notify-gotify", "meta": {"name": "Gotify", "link": "https://gotify.net/", "categories": ["notify.agent"], "icon_filename": "gotify.png"}, "keywords": ["gotify"], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"}, {"id": "notify-irc", "meta": {"name": "IRC", "link": "", "categories": ["notify.agent"], "icon_filename": "irc.png"}, "keywords": ["IRC"], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"}, {"id": "notify-kavenegar", "meta": {"name": "Kavenegar", "link": "https://kavenegar.com/", "categories": ["notify.agent"], "icon_filename": "kavenegar.png"}, "keywords": ["Kavenegar"], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"}, {"id": "notify-matrix", "meta": {"name": "Matrix", "link": "https://spec.matrix.org/unstable/push-gateway-api/", "categories": ["notify.agent"], "icon_filename": "matrix.svg"}, "keywords": ["Matrix"], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe room ids are unique identifiers and can be obtained from the room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"}, {"id": "notify-messagebird", "meta": {"name": "MessageBird", "link": "https://messagebird.com/", "categories": ["notify.agent"], "icon_filename": "messagebird.svg"}, "keywords": ["MessageBird"], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"}, {"id": "notify-ntfy", "meta": {"name": "ntfy", "link": "https://ntfy.sh/", "categories": ["notify.agent"], "icon_filename": "ntfy.svg"}, "keywords": ["ntfy"], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"}, {"id": "notify-opsgenie", "meta": {"name": "OpsGenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.agent"], "icon_filename": "opsgenie.png"}, "keywords": ["OpsGenie"], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"}, {"id": "notify-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.agent"], "icon_filename": "pagerduty.png"}, "keywords": ["PagerDuty"], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"}, {"id": "notify-prowl", "meta": {"name": "Prowl", "link": "https://www.prowlapp.com/", "categories": ["notify.agent"], "icon_filename": "prowl.png"}, "keywords": ["Prowl"], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"}, {"id": "notify-pushbullet", "meta": {"name": "Pushbullet", "link": "https://www.pushbullet.com/", "categories": ["notify.agent"], "icon_filename": "pushbullet.png"}, "keywords": ["Pushbullet"], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"}, {"id": "notify-pushover", "meta": {"name": "PushOver", "link": "https://pushover.net/", "categories": ["notify.agent"], "icon_filename": "pushover.png"}, "keywords": ["PushOver"], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"}, {"id": "notify-rocketchat", "meta": {"name": "RocketChat", "link": "https://rocket.chat/", "categories": ["notify.agent"], "icon_filename": "rocketchat.png"}, "keywords": ["RocketChat"], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"}, {"id": "notify-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.agent"], "icon_filename": "slack.png"}, "keywords": ["Slack"], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"}, {"id": "notify-sms", "meta": {"name": "SMS", "link": "http://smstools3.kekekasvi.com/", "categories": ["notify.agent"], "icon_filename": "sms.svg"}, "keywords": ["SMS tools 3", "SMS", "Messaging"], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"}, {"id": "notify-syslog", "meta": {"name": "syslog", "link": "", "categories": ["notify.agent"], "icon_filename": "syslog.png"}, "keywords": ["syslog"], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"}, {"id": "notify-teams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams/log-in", "categories": ["notify.agent"], "icon_filename": "msteams.svg"}, "keywords": ["Microsoft", "Teams", "MS teams"], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"}, {"id": "notify-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.agent"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set `DEFAULT_RECIPIENT_TELEGRAM` to the chat ID you want the alert notifications to be sent to. You can define multiple chat IDs like this: -49999333322 -1009999222255. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"}, {"id": "notify-twilio", "meta": {"name": "Twilio", "link": "https://www.twilio.com/", "categories": ["notify.agent"], "icon_filename": "twilio.png"}, "keywords": ["Twilio"], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"}] +export const integrations = [{"meta": {"plugin_name": "apps.plugin", "module_name": "apps", "monitored_instance": {"name": "Applications", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "applications.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["applications", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-apps-Applications", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "groups", "monitored_instance": {"name": "User Groups", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "user.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["groups", "processes", "user auditing", "authorization", "os", "host monitoring"], "most_popular": false}, "overview": "# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-groups-User_Groups", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "users", "monitored_instance": {"name": "Users", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "users.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["users", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-users-Users", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Containers", "link": "", "categories": ["data-collection.containers-and-vms"], "icon_filename": "container.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Kubernetes Containers", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["k8s", "kubernetes", "pods", "containers"], "most_popular": true}, "overview": "# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "LXC Containers", "link": "", "icon_filename": "lxc.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["lxc", "lxd", "container"], "most_popular": true}, "overview": "# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-LXC_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Libvirt Containers", "link": "", "icon_filename": "libvirt.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["libvirt", "container"], "most_popular": true}, "overview": "# Libvirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Libvirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Libvirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Proxmox Containers", "link": "", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["proxmox", "container"], "most_popular": true}, "overview": "# Proxmox Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Proxmox_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Systemd Services", "link": "", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"], "keywords": ["systemd", "services"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Systemd_Services", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Virtual Machines", "link": "", "icon_filename": "container.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vms", "virtualization", "container"], "most_popular": true}, "overview": "# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Virtual Machines for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Virtual_Machines", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "oVirt Containers", "link": "", "icon_filename": "ovirt.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ovirt", "container"], "most_popular": true}, "overview": "# oVirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-oVirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "ap", "monitored_instance": {"name": "Access Points", "link": "https://learn.netdata.cloud/docs/data-collection/networking-stack-and-network-interfaces/linux-access-points", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ap", "access", "point", "wireless", "network"], "most_popular": false}, "overview": "# Access Points\n\nPlugin: charts.d.plugin\nModule: ap\n\n## Overview\n\nThe ap collector visualizes data related to wireless access points.\n\nIt uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect if you are running access points on your linux box.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/ap.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the ap collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |\n| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Change the collection frequency\n\nSpecify a custom collection frequence (update_every) for this collector\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\nap_update_every=10\n\n# the charts priority on the dashboard\n#ap_priority=6900\n\n# the number of retries to do in case of failure\n# before disabling the module\n#ap_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 ap\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit, expected | Mbps |\n\n", "integration_type": "collector", "id": "charts.d.plugin-ap-Access_Points", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/ap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "apcupsd", "monitored_instance": {"name": "APC UPS", "link": "https://www.apc.com", "categories": ["data-collection.ups"], "icon_filename": "apc.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ups", "apc", "power", "supply", "battery", "apcupsd"], "most_popular": false}, "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", "integration_type": "collector", "id": "charts.d.plugin-apcupsd-APC_UPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "libreswan", "monitored_instance": {"name": "Libreswan", "link": "https://libreswan.org/", "categories": ["data-collection.vpns"], "icon_filename": "libreswan.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vpn", "libreswan", "network", "ipsec"], "most_popular": false}, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "charts.d.plugin-libreswan-Libreswan", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "opensips", "monitored_instance": {"name": "OpenSIPS", "link": "https://opensips.org/", "categories": ["data-collection.telephony-servers"], "icon_filename": "opensips.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["opensips", "sip", "voice", "video", "stream"], "most_popular": false}, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", "integration_type": "collector", "id": "charts.d.plugin-opensips-OpenSIPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (sysfs)", "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "sysfs", "hwmon", "rpi", "raspberry pi"], "most_popular": false}, "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n{% /details %}\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", "integration_type": "collector", "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cups.plugin", "module_name": "cups.plugin", "monitored_instance": {"name": "CUPS", "link": "https://www.cups.org/", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "cups.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", "integration_type": "collector", "id": "cups.plugin-cups.plugin-CUPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/extfrag", "monitored_instance": {"name": "System Memory Fragmentation", "link": "https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["extfrag", "extfrag_threshold", "memory fragmentation"], "most_popular": false}, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/zswap", "monitored_instance": {"name": "Linux ZSwap", "link": "https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "zswap", "frontswap", "swap cache"], "most_popular": false}, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "intel_rapl", "monitored_instance": {"name": "Power Capping", "link": "https://www.kernel.org/doc/html/next/power/powercap/powercap.html", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["power capping", "energy"], "most_popular": false}, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", "integration_type": "collector", "id": "debugfs.plugin-intel_rapl-Power_Capping", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "diskspace.plugin", "module_name": "diskspace.plugin", "monitored_instance": {"name": "Disk space", "link": "", "categories": ["data-collection.linux-systems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "ebpf.plugin", "module_name": "disk"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "I/O", "space", "inode"], "most_popular": false}, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "diskspace.plugin-diskspace.plugin-Disk_space", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "cachestat", "monitored_instance": {"name": "eBPF Cachestat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Page cache", "Hit ratio", "eBPF"], "most_popular": false}, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-cachestat-eBPF_Cachestat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "dcstat", "monitored_instance": {"name": "eBPF DCstat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Directory Cache", "File system", "eBPF"], "most_popular": false}, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", "integration_type": "collector", "id": "ebpf.plugin-dcstat-eBPF_DCstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "disk", "monitored_instance": {"name": "eBPF Disk", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hard Disk", "eBPF", "latency", "partition"], "most_popular": false}, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-disk-eBPF_Disk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filedescriptor", "monitored_instance": {"name": "eBPF Filedescriptor", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["file", "eBPF", "fd", "open", "close"], "most_popular": false}, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filedescriptor-eBPF_Filedescriptor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filesystem", "monitored_instance": {"name": "eBPF Filesystem", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Filesystem", "ext4", "btrfs", "nfs", "xfs", "zfs", "eBPF", "latency", "I/O"], "most_popular": false}, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filesystem-eBPF_Filesystem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "hardirq", "monitored_instance": {"name": "eBPF Hardirq", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["HardIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-hardirq-eBPF_Hardirq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mdflush", "monitored_instance": {"name": "eBPF MDflush", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["MD", "RAID", "eBPF"], "most_popular": false}, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mdflush-eBPF_MDflush", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mount", "monitored_instance": {"name": "eBPF Mount", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["mount", "umount", "device", "eBPF"], "most_popular": false}, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mount-eBPF_Mount", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "oomkill", "monitored_instance": {"name": "eBPF OOMkill", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application", "memory"], "most_popular": false}, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", "integration_type": "collector", "id": "ebpf.plugin-oomkill-eBPF_OOMkill", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "process", "monitored_instance": {"name": "eBPF Process", "link": "https://github.com/netdata/netdata/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Memory", "plugin", "eBPF"], "most_popular": false}, "overview": "# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n\n", "integration_type": "collector", "id": "ebpf.plugin-process-eBPF_Process", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "processes", "monitored_instance": {"name": "eBPF Processes", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["thread", "fork", "process", "eBPF"], "most_popular": false}, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-processes-eBPF_Processes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "shm", "monitored_instance": {"name": "eBPF SHM", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "shared memory", "eBPF"], "most_popular": false}, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-shm-eBPF_SHM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "socket", "monitored_instance": {"name": "eBPF Socket", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["TCP", "UDP", "bandwidth", "server", "connection", "socket"], "most_popular": false}, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |\n| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connected_v4 | connections/s |\n| cgroup.net_conn_ipv6 | connected_v6 | connections/s |\n| cgroup.net_bytes_recv | received | calls/s |\n| cgroup.net_bytes_sent | sent | calls/s |\n| cgroup.net_tcp_recv | received | calls/s |\n| cgroup.net_tcp_send | sent | calls/s |\n| cgroup.net_retransmit | retransmitted | calls/s |\n| cgroup.net_udp_send | sent | calls/s |\n| cgroup.net_udp_recv | received | calls/s |\n| services.net_conn_ipv6 | a dimension per systemd service | connections/s |\n| services.net_bytes_recv | a dimension per systemd service | kilobits/s |\n| services.net_bytes_sent | a dimension per systemd service | kilobits/s |\n| services.net_tcp_recv | a dimension per systemd service | calls/s |\n| services.net_tcp_send | a dimension per systemd service | calls/s |\n| services.net_tcp_retransmit | a dimension per systemd service | calls/s |\n| services.net_udp_send | a dimension per systemd service | calls/s |\n| services.net_udp_recv | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-socket-eBPF_Socket", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "softirq", "monitored_instance": {"name": "eBPF SoftIRQ", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SoftIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-softirq-eBPF_SoftIRQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "swap", "monitored_instance": {"name": "eBPF SWAP", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SWAP", "memory", "eBPF", "Hard Disk"], "most_popular": false}, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-swap-eBPF_SWAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "sync", "monitored_instance": {"name": "eBPF Sync", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "eBPF", "hard disk", "memory"], "most_popular": false}, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.meory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-sync-eBPF_Sync", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "vfs", "monitored_instance": {"name": "eBPF VFS", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["virtual", "filesystem", "eBPF", "I/O", "files"], "most_popular": false}, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-vfs-eBPF_VFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.0.freq", "monitored_instance": {"name": "dev.cpu.0.freq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n{% details summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.temperature", "monitored_instance": {"name": "dev.cpu.temperature", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "devstat", "monitored_instance": {"name": "devstat", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", "integration_type": "collector", "id": "freebsd.plugin-devstat-devstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getifaddrs", "monitored_instance": {"name": "getifaddrs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getifaddrs-getifaddrs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getmntinfo", "monitored_instance": {"name": "getmntinfo", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getmntinfo-getmntinfo", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "hw.intrcnt", "monitored_instance": {"name": "hw.intrcnt", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-hw.intrcnt-hw.intrcnt", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "ipfw", "monitored_instance": {"name": "ipfw", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", "integration_type": "collector", "id": "freebsd.plugin-ipfw-ipfw", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.cp_time", "monitored_instance": {"name": "kern.cp_time", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.cp_time-kern.cp_time", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.msq", "monitored_instance": {"name": "kern.ipc.msq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.msq-kern.ipc.msq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.sem", "monitored_instance": {"name": "kern.ipc.sem", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.sem-kern.ipc.sem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.shm", "monitored_instance": {"name": "kern.ipc.shm", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.shm-kern.ipc.shm", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.icmp.stats", "monitored_instance": {"name": "net.inet.icmp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.ip.stats", "monitored_instance": {"name": "net.inet.ip.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.states", "monitored_instance": {"name": "net.inet.tcp.states", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.stats", "monitored_instance": {"name": "net.inet.tcp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.udp.stats", "monitored_instance": {"name": "net.inet.udp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.icmp6.stats", "monitored_instance": {"name": "net.inet6.icmp6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.ip6.stats", "monitored_instance": {"name": "net.inet6.ip6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.isr", "monitored_instance": {"name": "net.isr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.isr-net.isr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "system.ram", "monitored_instance": {"name": "system.ram", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-system.ram-system.ram", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "uptime", "monitored_instance": {"name": "uptime", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "freebsd.plugin-uptime-uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.loadavg", "monitored_instance": {"name": "vm.loadavg", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.loadavg-vm.loadavg", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_intr", "monitored_instance": {"name": "vm.stats.sys.v_intr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_soft", "monitored_instance": {"name": "vm.stats.sys.v_soft", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_swtch", "monitored_instance": {"name": "vm.stats.sys.v_swtch", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_pgfaults", "monitored_instance": {"name": "vm.stats.vm.v_pgfaults", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_swappgs", "monitored_instance": {"name": "vm.stats.vm.v_swappgs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.swap_info", "monitored_instance": {"name": "vm.swap_info", "link": "", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.swap_info-vm.swap_info", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.vmtotal", "monitored_instance": {"name": "vm.vmtotal", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.vmtotal-vm.vmtotal", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "zfs", "monitored_instance": {"name": "zfs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", "integration_type": "collector", "id": "freebsd.plugin-zfs-zfs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freeipmi.plugin", "module_name": "freeipmi", "monitored_instance": {"name": "Intelligent Platform Management Interface (IPMI)", "link": "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "ipmi", "freeipmi", "ipmimonitoring"], "most_popular": true}, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n{% details summary=\"Command options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n{% /details %}\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n{% /details %}\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", "integration_type": "collector", "id": "freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-activemq", "module_name": "activemq", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.message-brokers"], "icon_filename": "activemq.png", "name": "ActiveMQ", "link": "https://activemq.apache.org/"}, "alternative_monitored_instances": [], "keywords": ["message broker"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", "integration_type": "collector", "id": "go.d.plugin-activemq-ActiveMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-apache", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "Apache", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Apache", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-energid", "module_name": "apache", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energi Core Wallet", "link": "", "icon_filename": "energi.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["energid"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Energi Core Wallet\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis module monitors Energi Core Wallet instances.\nWorks only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/energid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/energid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9796 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9796\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n - name: remote\n url: http://192.0.2.1:9796\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Energi Core Wallet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| energid.blockindex | blocks, headers | count |\n| energid.difficulty | difficulty | difficulty |\n| energid.mempool | max, usage, tx_size | bytes |\n| energid.secmem | total, used, free, locked | bytes |\n| energid.network | connections | connections |\n| energid.timeoffset | timeoffset | seconds |\n| energid.utxo_transactions | transactions, output_transactions | transactions |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Energi_Core_Wallet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/energid/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpd", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "HTTPD", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-HTTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cassandra", "module_name": "cassandra", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.database-servers"], "icon_filename": "cassandra.svg", "name": "Cassandra", "link": "https://cassandra.apache.org/_/index.html"}, "alternative_monitored_instances": [], "keywords": ["nosql", "dbms", "db", "database"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-cassandra-Cassandra", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-chrony", "module_name": "chrony", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "chrony.jpg", "name": "Chrony", "link": "https://chrony.tuxfamily.org/"}, "alternative_monitored_instances": [], "keywords": [], "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}, "most_popular": false}, "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cockroachdb", "plugin_name": "go.d.plugin", "module_name": "cockroachdb", "monitored_instance": {"name": "CockroachDB", "link": "https://www.cockroachlabs.com/", "icon_filename": "cockroachdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cockroachdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", "integration_type": "collector", "id": "go.d.plugin-cockroachdb-CockroachDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-consul", "plugin_name": "go.d.plugin", "module_name": "consul", "monitored_instance": {"name": "Consul", "link": "https://www.consul.io/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "consul.svg"}, "alternative_monitored_instances": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["service networking platform", "hashicorp"], "most_popular": true}, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-consul-Consul", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/consul/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-coredns", "plugin_name": "go.d.plugin", "module_name": "coredns", "monitored_instance": {"name": "CoreDNS", "link": "https://coredns.io/", "icon_filename": "coredns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["coredns", "dns", "kubernetes"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-coredns-CoreDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchbase", "plugin_name": "go.d.plugin", "module_name": "couchbase", "monitored_instance": {"name": "Couchbase", "link": "https://www.couchbase.com/", "icon_filename": "couchbase.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchbase", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchbase-Couchbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchdb", "plugin_name": "go.d.plugin", "module_name": "couchdb", "monitored_instance": {"name": "CouchDB", "link": "https://couchdb.apache.org/", "icon_filename": "couchdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchdb-CouchDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dns_query", "plugin_name": "go.d.plugin", "module_name": "dns_query", "monitored_instance": {"name": "DNS query", "link": "", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dns_query-DNS_query", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsdist", "plugin_name": "go.d.plugin", "module_name": "dnsdist", "monitored_instance": {"name": "DNSdist", "link": "https://dnsdist.org/", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsdist", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsdist-DNSdist", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq", "plugin_name": "go.d.plugin", "module_name": "dnsmasq", "monitored_instance": {"name": "Dnsmasq", "link": "https://thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n{% /details %}\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq-Dnsmasq", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq_dhcp", "plugin_name": "go.d.plugin", "module_name": "dnsmasq_dhcp", "monitored_instance": {"name": "Dnsmasq DHCP", "link": "https://www.thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dhcp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker", "plugin_name": "go.d.plugin", "module_name": "docker", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["container"], "most_popular": true}, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 1 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker-Docker", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker_engine", "plugin_name": "go.d.plugin", "module_name": "docker_engine", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker Engine", "link": "https://docs.docker.com/engine/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["docker", "container"], "most_popular": false}, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker_engine-Docker_Engine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dockerhub", "plugin_name": "go.d.plugin", "module_name": "dockerhub", "monitored_instance": {"name": "Docker Hub repository", "link": "https://hub.docker.com/", "icon_filename": "docker.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["dockerhub"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dockerhub-Docker_Hub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-elasticsearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elasticsearch", "link": "https://www.elastic.co/elasticsearch/", "icon_filename": "elasticsearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-Elasticsearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-opensearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenSearch", "link": "https://opensearch.org/", "icon_filename": "opensearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-OpenSearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-envoy", "plugin_name": "go.d.plugin", "module_name": "envoy", "monitored_instance": {"name": "Envoy", "link": "https://www.envoyproxy.io/", "icon_filename": "envoy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["envoy", "proxy"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-envoy-Envoy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-filecheck", "plugin_name": "go.d.plugin", "module_name": "filecheck", "monitored_instance": {"name": "Files and directories", "link": "", "icon_filename": "filesystem.svg", "categories": ["data-collection.linux-systems"]}, "keywords": ["files", "directories"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors files and directories.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n{% /details %}\n##### Directories\n\nDirectories monitoring example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Files and directories instance\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence | a dimension per file | boolean |\n| filecheck.file_mtime_ago | a dimension per file | seconds |\n| filecheck.file_size | a dimension per file | bytes |\n| filecheck.dir_existence | a dimension per directory | boolean |\n| filecheck.dir_mtime_ago | a dimension per directory | seconds |\n| filecheck.dir_num_of_files | a dimension per directory | files |\n| filecheck.dir_size | a dimension per directory | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-filecheck-Files_and_directories", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-fluentd", "plugin_name": "go.d.plugin", "module_name": "fluentd", "monitored_instance": {"name": "Fluentd", "link": "https://www.fluentd.org/", "icon_filename": "fluentd.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["fluentd", "logging"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", "integration_type": "collector", "id": "go.d.plugin-fluentd-Fluentd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-freeradius", "plugin_name": "go.d.plugin", "module_name": "freeradius", "monitored_instance": {"name": "FreeRADIUS", "link": "https://freeradius.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "freeradius.svg"}, "keywords": ["freeradius", "radius"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-freeradius-FreeRADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-geth", "plugin_name": "go.d.plugin", "module_name": "geth", "monitored_instance": {"name": "Go-ethereum", "link": "https://github.com/ethereum/go-ethereum", "icon_filename": "geth.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["geth", "ethereum", "blockchain"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-geth-Go-ethereum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/geth/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-haproxy", "plugin_name": "go.d.plugin", "module_name": "haproxy", "monitored_instance": {"name": "HAProxy", "link": "https://www.haproxy.org/", "icon_filename": "haproxy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["haproxy", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-haproxy-HAProxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-hfs", "plugin_name": "go.d.plugin", "module_name": "hfs", "monitored_instance": {"name": "Hadoop Distributed File System (HDFS)", "link": "https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html", "icon_filename": "hadoop.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["hdfs", "hadoop"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpcheck", "plugin_name": "go.d.plugin", "module_name": "httpcheck", "monitored_instance": {"name": "HTTP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n{% /details %}\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n{% /details %}\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n{% /details %}\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-httpcheck-HTTP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-isc_dhcpd", "plugin_name": "go.d.plugin", "module_name": "isc_dhcpd", "monitored_instance": {"name": "ISC DHCP", "link": "https://www.isc.org/dhcp/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "keywords": ["dhcpd", "dhcp"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n| isc_dhcpd.pool_active_leases | a dimension per DHCP pool | leases |\n| isc_dhcpd.pool_utilization | a dimension per DHCP pool | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-isc_dhcpd-ISC_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubelet", "plugin_name": "go.d.plugin", "module_name": "k8s_kubelet", "monitored_instance": {"name": "Kubelet", "link": "https://kubernetes.io/docs/concepts/overview/components/#kubelet", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubelet", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubelet-Kubelet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubeproxy", "plugin_name": "go.d.plugin", "module_name": "k8s_kubeproxy", "monitored_instance": {"name": "Kubeproxy", "link": "https://kubernetes.io/docs/concepts/overview/components/#kube-proxy", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubeproxy", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubeproxy-Kubeproxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_state", "plugin_name": "go.d.plugin", "module_name": "k8s_state", "monitored_instance": {"name": "Kubernetes Cluster State", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubernetes", "k8s"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-lighttpd", "plugin_name": "go.d.plugin", "module_name": "lighttpd", "monitored_instance": {"name": "Lighttpd", "link": "https://www.lighttpd.net/", "icon_filename": "lighttpd.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-lighttpd-Lighttpd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logind", "plugin_name": "go.d.plugin", "module_name": "logind", "monitored_instance": {"name": "systemd-logind users", "link": "https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html", "icon_filename": "users.svg", "categories": ["data-collection.systemd"]}, "keywords": ["logind", "systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", "integration_type": "collector", "id": "go.d.plugin-logind-systemd-logind_users", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logind/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logstash", "plugin_name": "go.d.plugin", "module_name": "logstash", "monitored_instance": {"name": "Logstash", "link": "https://www.elastic.co/products/logstash", "icon_filename": "elastic-logstash.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["logstatsh"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event | event, queue | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-logstash-Logstash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mongodb", "plugin_name": "go.d.plugin", "module_name": "mongodb", "monitored_instance": {"name": "MongoDB", "link": "https://www.mongodb.com/", "icon_filename": "mongodb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["mongodb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n{% /details %}\n##### With databases metrics\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", "integration_type": "collector", "id": "go.d.plugin-mongodb-MongoDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mariadb", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MariaDB", "link": "https://mariadb.org/", "icon_filename": "mariadb.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MariaDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MySQL", "link": "https://www.mysql.com/", "categories": ["data-collection.database-servers"], "icon_filename": "mysql.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-percona_mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "Percona MySQL", "link": "https://www.percona.com/software/mysql-database/percona-server", "icon_filename": "percona.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": false}, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n{% /details %}\n##### Connection with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-Percona_MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginx", "plugin_name": "go.d.plugin", "module_name": "nginx", "monitored_instance": {"name": "NGINX", "link": "https://www.nginx.com/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "nginx.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "go.d.plugin", "module_name": "web_log"}, {"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nginx", "web", "webserver", "http", "proxy"], "most_popular": true}, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginx-NGINX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxplus", "plugin_name": "go.d.plugin", "module_name": "nginxplus", "monitored_instance": {"name": "NGINX Plus", "link": "https://www.nginx.com/products/nginx/", "icon_filename": "nginxplus.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["nginxplus", "nginx", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxplus-NGINX_Plus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxvts", "plugin_name": "go.d.plugin", "module_name": "nginxvts", "monitored_instance": {"name": "NGINX VTS", "link": "https://www.nginx.com/", "icon_filename": "nginx.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxvts-NGINX_VTS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ntpd", "plugin_name": "go.d.plugin", "module_name": "ntpd", "monitored_instance": {"name": "NTPd", "link": "https://www.ntp.org/documentation/4.2.8-series/ntpd", "icon_filename": "ntp.png", "categories": ["data-collection.system-clock-and-ntp"]}, "keywords": ["ntpd", "ntp", "time"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 3 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n{% /details %}\n##### With peers metrics\n\nCollect peers metrics.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", "integration_type": "collector", "id": "go.d.plugin-ntpd-NTPd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvidia_smi", "plugin_name": "go.d.plugin", "module_name": "nvidia_smi", "monitored_instance": {"name": "Nvidia GPU", "link": "https://www.nvidia.com/en-us/", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["nvidia", "gpu", "hardware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| use_csv_format | Used format when requesting GPU information. XML is used if set to 'no'. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### XML format\n\nUse XML format when requesting GPU information.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n use_csv_format: no\n\n```\n{% /details %}\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | \u2022 | |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | \u2022 | |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_utilization | gpu | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_memory_utilization | memory | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_decoder_utilization | decoder | % | \u2022 | |\n| nvidia_smi.gpu_encoder_utilization | encoder | % | \u2022 | |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | \u2022 |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B | \u2022 | |\n| nvidia_smi.gpu_temperature | temperature | Celsius | \u2022 | \u2022 |\n| nvidia_smi.gpu_voltage | voltage | V | \u2022 | |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | \u2022 | \u2022 |\n| nvidia_smi.gpu_power_draw | power_draw | Watts | \u2022 | \u2022 |\n| nvidia_smi.gpu_performance_state | P0-P15 | state | \u2022 | \u2022 |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | \u2022 | |\n| nvidia_smi.gpu_mig_devices_count | mig | devices | \u2022 | |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvme", "plugin_name": "go.d.plugin", "module_name": "nvme", "monitored_instance": {"name": "NVMe devices", "link": "", "icon_filename": "nvme.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["nvme"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices using the command line tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and assumes it is set up so that the netdata user can execute `nvme` as root without a password.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### Allow netdata to execute nvme\n\nAdd the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary):\n\n```bash\nnetdata ALL=(root) NOPASSWD: /usr/sbin/nvme\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvme binary. The default is \"nvme\" and the executable is looked for in the directories specified in the PATH environment variable. | nvme | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: nvme\n binary_path: /usr/local/sbin/nvme\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn", "plugin_name": "go.d.plugin", "module_name": "openvpn", "monitored_instance": {"name": "OpenVPN", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n| connect_timeout | Connection timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n| read_timeout | Read timeout in seconds. Sets deadline for read calls. | 2 | no |\n| write_timeout | Write timeout in seconds. Sets deadline for write calls. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n{% /details %}\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn-OpenVPN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn_status_log", "plugin_name": "go.d.plugin", "module_name": "openvpn_status_log", "monitored_instance": {"name": "OpenVPN status log", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n{% /details %}\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn_status_log-OpenVPN_status_log", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", "module_name": "pgbouncer", "monitored_instance": {"name": "PgBouncer", "link": "https://www.pgbouncer.org/", "icon_filename": "postgres.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pgbouncer"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-pgbouncer-PgBouncer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpdaemon", "plugin_name": "go.d.plugin", "module_name": "phpdaemon", "monitored_instance": {"name": "phpDaemon", "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": ["data-collection.apm"]}, "keywords": ["phpdaemon", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n{% /details %}\n##### HTTP authentication\n\nHTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpdaemon-phpDaemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpfpm", "plugin_name": "go.d.plugin", "module_name": "phpfpm", "monitored_instance": {"name": "PHP-FPM", "link": "https://php-fpm.org/", "icon_filename": "php.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["phpfpm", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n{% /details %}\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n{% /details %}\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpfpm-PHP-FPM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pihole", "plugin_name": "go.d.plugin", "module_name": "pihole", "monitored_instance": {"name": "Pi-hole", "link": "https://pi-hole.net", "icon_filename": "pihole.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["pihole"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-pihole-Pi-hole", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pika", "plugin_name": "go.d.plugin", "module_name": "pika", "monitored_instance": {"name": "Pika", "link": "https://github.com/OpenAtomFoundation/pika", "icon_filename": "pika.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pika", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-pika-Pika", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pika/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ping", "plugin_name": "go.d.plugin", "module_name": "ping", "monitored_instance": {"name": "Ping", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["ping"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-tripe time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=\"0 2147483647\"` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n{% /details %}\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Unprivileged mode\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", "integration_type": "collector", "id": "go.d.plugin-ping-Ping", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ping/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-portcheck", "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": {"name": "TCP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", "integration_type": "collector", "id": "go.d.plugin-portcheck-TCP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-postgres", "plugin_name": "go.d.plugin", "module_name": "postgres", "monitored_instance": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["data-collection.database-servers"], "icon_filename": "postgres.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "postgres", "postgresql", "sql"], "most_popular": true}, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-postgres-PostgreSQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns", "plugin_name": "go.d.plugin", "module_name": "powerdns", "monitored_instance": {"name": "PowerDNS Authoritative Server", "link": "https://doc.powerdns.com/authoritative/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns-PowerDNS_Authoritative_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns_recursor", "plugin_name": "go.d.plugin", "module_name": "powerdns_recursor", "monitored_instance": {"name": "PowerDNS Recursor", "link": "https://doc.powerdns.com/recursor/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n{% /details %}\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns_recursor-PowerDNS_Recursor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-4d_server", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "4D Server", "link": "https://github.com/ThomasMaul/Prometheus_4D_Exporter", "icon_filename": "4d_server.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-4D_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-8430ft-modem", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "8430FT modem", "link": "https://github.com/dernasherbrezon/8430ft_exporter", "icon_filename": "mtc.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-8430FT_modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-a10-acos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "A10 ACOS network devices", "link": "https://github.com/a10networks/PrometheusExporter", "icon_filename": "a10-networks.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-A10_ACOS_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-amd_smi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AMD CPU & GPU", "link": "https://github.com/amd/amd_smi_exporter", "icon_filename": "amd.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AMD_CPU_&_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apicast", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "APIcast", "link": "https://github.com/3scale/apicast", "icon_filename": "apicast.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-APIcast", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arm_hwcpipe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ARM HWCPipe", "link": "https://github.com/ylz-at/arm-hwcpipe-exporter", "icon_filename": "arm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ARM_HWCPipe", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Compute instances", "link": "https://github.com/O1ahmad/aws_ec2_exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Compute_instances", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2_spot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Spot Instance", "link": "https://github.com/patcadelina/ec2-spot-exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Spot_Instance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS ECS", "link": "https://github.com/bevers222/ecs-exporter", "icon_filename": "amazon-ecs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_ECS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Health events", "link": "https://github.com/vladvasiliu/aws-health-exporter-rs", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Health_events", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Quota", "link": "https://github.com/emylincon/aws_quota_exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_rds", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS RDS", "link": "https://github.com/percona/rds_exporter", "icon_filename": "aws-rds.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_RDS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_s3", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS S3 buckets", "link": "https://github.com/ribbybibby/s3_exporter", "icon_filename": "aws-s3.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_S3_buckets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_sqs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS SQS", "link": "https://github.com/jmal98/sqs-exporter", "icon_filename": "aws-sqs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_SQS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_instance_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS instance health", "link": "https://github.com/bobtfish/aws-instance-health-exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_instance_health", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airthings_waveplus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Airthings Waveplus air sensor", "link": "https://github.com/jeremybz/waveplus_exporter", "icon_filename": "airthings.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Airthings_Waveplus_air_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_edgedns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Edge DNS Traffic", "link": "https://github.com/akamai/akamai-edgedns-traffic-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Edge_DNS_Traffic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_gtm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Global Traffic Management", "link": "https://github.com/akamai/akamai-gtm-metrics-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Global_Traffic_Management", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_cloudmonitor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akami Cloudmonitor", "link": "https://github.com/ExpressenAB/cloudmonitor_exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akami_Cloudmonitor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alamos_fe2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alamos FE2 server", "link": "https://github.com/codemonauts/prometheus-fe2-exporter", "icon_filename": "alamos_fe2.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alamos_FE2_server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alibaba-cloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alibaba Cloud", "link": "https://github.com/aylei/aliyun-exporter", "icon_filename": "alibaba-cloud.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alibaba_Cloud", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-altaro_backup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Altaro Backup", "link": "https://github.com/raph2i/altaro_backup_exporter", "icon_filename": "altaro.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Altaro_Backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aaisp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Andrews & Arnold line status", "link": "https://github.com/daveio/aaisp-exporter", "icon_filename": "andrewsarnold.jpg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Andrews_&_Arnold_line_status", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Airflow", "link": "https://github.com/shalb/airflow-exporter", "icon_filename": "airflow.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Airflow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-flink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Flink", "link": "https://github.com/matsumana/flink_exporter", "icon_filename": "apache_flink.png", "categories": ["data-collection.apm"]}, "keywords": ["web server", "http", "https"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Flink", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apple_timemachine", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apple Time Machine", "link": "https://github.com/znerol/prometheus-timemachine-exporter", "icon_filename": "apple.svg", "categories": ["data-collection.macos-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apple_Time_Machine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aruba", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Aruba devices", "link": "https://github.com/slashdoom/aruba_exporter", "icon_filename": "aruba.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "aruba devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Aruba_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arvancloud_cdn", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ArvanCloud CDN", "link": "https://github.com/arvancloud/ar-prometheus-exporter", "icon_filename": "arvancloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ArvanCloud_CDN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-audisto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Audisto", "link": "https://github.com/ZeitOnline/audisto_exporter", "icon_filename": "audisto.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Audisto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-authlog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AuthLog", "link": "https://github.com/woblerr/authlog_exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AuthLog", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_ad_app_passwords", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure AD App passwords", "link": "https://github.com/vladvasiliu/azure-app-secrets-monitor", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_AD_App_passwords", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_elastic_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Elastic Pool SQL", "link": "https://github.com/benclapp/azure_elastic_sql_exporter", "icon_filename": "azure-elastic-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Elastic_Pool_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_res", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Resources", "link": "https://github.com/FXinnovation/azure-resources-exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Resources", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure SQL", "link": "https://github.com/iamseth/azure_sql_exporter", "icon_filename": "azure-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_service_bus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Service Bus", "link": "https://github.com/marcinbudny/servicebus_exporter", "icon_filename": "azure-service-bus.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Service_Bus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_app", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure application", "link": "https://github.com/RobustPerception/azure_metrics_exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_application", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bosh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BOSH", "link": "https://github.com/bosh-prometheus/bosh_exporter", "icon_filename": "bosh.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BOSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bigquery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BigQuery", "link": "https://github.com/m-lab/prometheus-bigquery-exporter", "icon_filename": "bigquery.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BigQuery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bird", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bird Routing Daemon", "link": "https://github.com/czerwonk/bird_exporter", "icon_filename": "bird.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bird_Routing_Daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Blackbox", "link": "https://github.com/prometheus/blackbox_exporter", "icon_filename": "prometheus.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["blackbox"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bobcat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bobcat Miner 300", "link": "https://github.com/pperzyna/bobcat_exporter", "icon_filename": "bobcat.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bobcat_Miner_300", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-borg", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Borg backup", "link": "https://github.com/k0ral/borg-exporter", "icon_filename": "borg.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Borg_backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bungeecord", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BungeeCord", "link": "https://github.com/weihao/bungeecord-prometheus-exporter", "icon_filename": "bungee.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BungeeCord", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-csgo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CS:GO", "link": "https://github.com/kinduff/csgo_exporter", "icon_filename": "csgo.svg", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CS:GO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Counter-Strike: Global Offensive server metrics for improved game performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CS:GO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cvmfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CVMFS clients", "link": "https://github.com/guilbaults/cvmfs-exporter", "icon_filename": "cvmfs.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CVMFS_clients", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-celery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Celery", "link": "https://github.com/ZeitOnline/celery_redis_prometheus", "icon_filename": "celery.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Celery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-certificate_transparency", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Certificate Transparency", "link": "https://github.com/Hsn723/ct-exporter", "icon_filename": "ct.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Certificate_Transparency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-checkpoint", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Checkpoint device", "link": "https://github.com/RespiroConsulting/CheckPointExporter", "icon_filename": "checkpoint.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Checkpoint_device", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-chia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Chia", "link": "https://github.com/chia-network/chia-exporter", "icon_filename": "chia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Chia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clm5ip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Christ Elektronik CLM5IP power panel", "link": "https://github.com/christmann/clm5ip_exporter/", "icon_filename": "christelec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_agent", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Agent", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Agent", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_operator", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Operator", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Operator", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_proxy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Proxy", "link": "https://github.com/cilium/proxy", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Proxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cisco_aci", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cisco ACI", "link": "https://github.com/RavuAlHemio/prometheus_aci_exporter", "icon_filename": "cisco.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "cisco devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cisco_ACI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-citrix_netscaler", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Citrix NetScaler", "link": "https://github.com/rokett/Citrix-NetScaler-Exporter", "icon_filename": "citrix.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Citrix_NetScaler", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClamAV daemon", "link": "https://github.com/sergeymakinen/clamav_exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClamAV_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamscan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clamscan results", "link": "https://github.com/FortnoxAB/clamscan-exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clamscan_results", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clash", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clash", "link": "https://github.com/elonzh/clash_exporter", "icon_filename": "clash.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clickhouse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClickHouse", "link": "https://github.com/ClickHouse/ClickHouse", "icon_filename": "clickhouse.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClickHouse database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClickHouse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_cloudwatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CloudWatch", "link": "https://github.com/prometheus/cloudwatch_exporter", "icon_filename": "aws-cloudwatch.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CloudWatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry", "link": "https://github.com/bosh-prometheus/cf_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry_firebase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry Firehose", "link": "https://github.com/bosh-prometheus/firehose_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry_Firehose", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloudflare_pcap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloudflare PCAP", "link": "https://github.com/wehkamp/docker-prometheus-cloudflare-exporter", "icon_filename": "cloudflare.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloudflare_PCAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClusterControl CMON", "link": "https://github.com/severalnines/cmon_exporter", "icon_filename": "cluster-control.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClusterControl_CMON", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-collectd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Collectd", "link": "https://github.com/prometheus/collectd_exporter", "icon_filename": "collectd.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Collectd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-concourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Concourse", "link": "https://concourse-ci.org", "icon_filename": "concourse.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Concourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ftbeerpi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CraftBeerPi", "link": "https://github.com/jo-hannes/craftbeerpi_exporter", "icon_filename": "craftbeer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CraftBeerPi", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crowdsec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crowdsec", "link": "https://docs.crowdsec.net/docs/observability/prometheus", "icon_filename": "crowdsec.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crowdsec", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crypto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crypto exchanges", "link": "https://github.com/ix-ai/crypto-exporter", "icon_filename": "crypto.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crypto_exchanges", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cryptowatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cryptowatch", "link": "https://github.com/nbarrientos/cryptowat_exporter", "icon_filename": "cryptowatch.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cryptowatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-custom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Custom Exporter", "link": "https://github.com/orange-cloudfoundry/custom_exporter", "icon_filename": "customdata.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Custom_Exporter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ddwrt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DDWRT Routers", "link": "https://github.com/camelusferus/ddwrt_collector", "icon_filename": "ddwrt.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DDWRT_Routers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dmarc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DMARC", "link": "https://github.com/jgosmann/dmarc-metrics-exporter", "icon_filename": "dmarc.png", "categories": ["data-collection.mail-servers"]}, "keywords": ["email authentication", "policy", "reporting"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DMARC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dnsbl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DNSBL", "link": "https://github.com/Luzilla/dnsbl_exporter/", "icon_filename": "dnsbl.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DNSBL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC ECS cluster", "link": "https://github.com/paychex/prometheus-emcecs-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_ECS_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_isilon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC Isilon cluster", "link": "https://github.com/paychex/prometheus-isilon-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_Isilon_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_xtremio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC XtremIO cluster", "link": "https://github.com/cthiel42/prometheus-xtremio-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_XtremIO_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_powermax", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell PowerMax", "link": "https://github.com/kckecheng/powermax_exporter", "icon_filename": "powermax.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_PowerMax", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dependency_track", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dependency-Track", "link": "https://github.com/jetstack/dependency-track-exporter", "icon_filename": "dependency-track.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dependency-Track", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-digitalocean", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DigitalOcean", "link": "https://github.com/metalmatze/digitalocean_exporter", "icon_filename": "digitalocean.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DigitalOcean", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-discourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Discourse", "link": "https://github.com/discourse/discourse-prometheus", "icon_filename": "discourse.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Discourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dutch_electricity_smart_meter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dutch Electricity Smart Meter", "link": "https://github.com/TobiasDeBruijn/prometheus-p1-exporter", "icon_filename": "dutch-electricity.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dynatrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dynatrace", "link": "https://github.com/Apside-TOP/dynatrace_exporter", "icon_filename": "dynatrace.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dynatrace", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eos_web", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "EOS", "link": "https://eos-web.web.cern.ch/eos-web/", "icon_filename": "eos.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-EOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eaton_ups", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Eaton UPS", "link": "https://github.com/psyinfra/prometheus-eaton-ups-exporter", "icon_filename": "eaton.svg", "categories": ["data-collection.ups"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Eaton_UPS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-elgato_keylight", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elgato Key Light devices.", "link": "https://github.com/mdlayher/keylight_exporter", "icon_filename": "elgato.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Elgato_Key_Light_devices.", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-energomera", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energomera smart power meters", "link": "https://github.com/peak-load/energomera_exporter", "icon_filename": "energomera.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Energomera_smart_power_meters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-excel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Excel spreadsheet", "link": "https://github.com/MarcusCalidus/excel-exporter", "icon_filename": "excel.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Excel_spreadsheet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-frrouting", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FRRouting", "link": "https://github.com/tynany/frr_exporter", "icon_filename": "frrouting.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FRRouting", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fastd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fastd", "link": "https://github.com/freifunk-darmstadt/fastd-exporter", "icon_filename": "fastd.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fastd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fortigate", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fortigate firewall", "link": "https://github.com/bluecmd/fortigate_exporter", "icon_filename": "fortinet.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fortigate_firewall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_nfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD NFS", "link": "https://github.com/Axcient/freebsd-nfs-exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_NFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_rctl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD RCTL-RACCT", "link": "https://github.com/yo000/rctl_exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_RCTL-RACCT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freifunk", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Freifunk network", "link": "https://github.com/xperimental/freifunk-exporter", "icon_filename": "freifunk.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Freifunk_network", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fritzbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fritzbox network devices", "link": "https://github.com/pdreker/fritz_exporter", "icon_filename": "avm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fritzbox_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_gce", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP GCE", "link": "https://github.com/O1ahmad/gcp-gce-exporter", "icon_filename": "gcp-gce.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_GCE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP Quota", "link": "https://github.com/mintel/gcp-quota-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gtp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GTP", "link": "https://github.com/wmnsk/gtp_exporter", "icon_filename": "gtpu.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GTP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic_cli", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic Command Line Output", "link": "https://github.com/MarioMartReq/generic-exporter", "icon_filename": "cli.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_Command_Line_Output", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-enclosure", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic storage enclosure tool", "link": "https://github.com/Gandi/jbod-rs", "icon_filename": "storage-enclosure.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_storage_enclosure_tool", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_ratelimit", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub API rate limit", "link": "https://github.com/lunarway/github-ratelimit-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_API_rate_limit", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_repo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub repository", "link": "https://github.com/githubexporter/github-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gitlab_runner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitLab Runner", "link": "https://gitlab.com/gitlab-org/gitlab-runner", "icon_filename": "gitlab.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitLab_Runner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gobetween", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Gobetween", "link": "https://github.com/yyyar/gobetween", "icon_filename": "gobetween.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Gobetween", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Cloud Platform", "link": "https://github.com/DazWilkin/gcp-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Cloud_Platform", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-google_pagespeed", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Pagespeed", "link": "https://github.com/foomo/pagespeed_exporter", "icon_filename": "google.svg", "categories": ["data-collection.apm"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Pagespeed", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_stackdriver", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Stackdriver", "link": "https://github.com/prometheus-community/stackdriver_exporter", "icon_filename": "gcp-stackdriver.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Stackdriver", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-grafana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Grafana", "link": "https://grafana.com/", "icon_filename": "grafana.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Grafana", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-graylog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Graylog Server", "link": "https://github.com/Graylog2/graylog2-server/", "icon_filename": "graylog.svg", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Graylog_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HANA", "link": "https://github.com/jenningsloy318/hana_exporter", "icon_filename": "sap.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HANA", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hdsentinel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HDSentinel", "link": "https://github.com/qusielle/hdsentinel-exporter", "icon_filename": "harddisk.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HDSentinel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hhvm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HHVM", "link": "https://github.com/wikimedia/operations-software-hhvm_exporter", "icon_filename": "hhvm.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HHVM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hp_ilo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HP iLO", "link": "https://github.com/infinityworks/hpilo-exporter", "icon_filename": "hp.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HP_iLO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-halon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Halon", "link": "https://github.com/tobiasbp/halon_exporter", "icon_filename": "halon.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Halon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hashicorp_vault", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HashiCorp Vault secrets", "link": "https://github.com/tomtom-international/vault-assessment-prometheus-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HashiCorp_Vault_secrets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hasura_graphql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hasura GraphQL Server", "link": "https://github.com/zolamk/hasura-exporter", "icon_filename": "hasura.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hasura_GraphQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_hotspot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium hotspot", "link": "https://github.com/tedder/helium_hotspot_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_hotspot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_miner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium miner (validator)", "link": "https://github.com/tedder/miner_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_miner_(validator)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_cgm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CGN series CPE", "link": "https://github.com/yrro/hitron-exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CGN_series_CPE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_coda", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CODA Cable Modem", "link": "https://github.com/hairyhenderson/hitron_coda_exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CODA_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homebridge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homebridge", "link": "https://github.com/lstrojny/homebridge-prometheus-exporter", "icon_filename": "homebridge.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homebridge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homey", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homey", "link": "https://github.com/rickardp/homey-prometheus-exporter", "icon_filename": "homey.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homey", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-honeypot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Honeypot", "link": "https://github.com/Intrinsec/honeypot_exporter", "icon_filename": "intrinsec.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Honeypot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hilink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Huawei devices", "link": "https://github.com/eliecharra/hilink-exporter", "icon_filename": "huawei.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Huawei_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hubble", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hubble", "link": "https://github.com/cilium/hubble", "icon_filename": "hubble.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hubble", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_aix_njmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM AIX systems Njmon", "link": "https://github.com/crooks/njmon_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_AIX_systems_Njmon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_cex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM CryptoExpress (CEX) cards", "link": "https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_mq", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM MQ", "link": "https://github.com/agebhar1/mq_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_MQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum", "link": "https://github.com/topine/ibm-spectrum-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum_virtualize", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum Virtualize", "link": "https://github.com/bluecmd/spectrum_virtualize_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum_Virtualize", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_zhmc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Z Hardware Management Console", "link": "https://github.com/zhmcclient/zhmc-prometheus-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IOTA full node", "link": "https://github.com/crholliday/iota-prom-exporter", "icon_filename": "iota.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IOTA_full_node", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ipmi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IPMI (By SoundCloud)", "link": "https://github.com/prometheus-community/ipmi_exporter", "icon_filename": "soundcloud.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IPMI_(By_SoundCloud)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-influxdb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "InfluxDB", "link": "https://github.com/prometheus/influxdb_exporter", "icon_filename": "influxdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-InfluxDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jmx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JMX", "link": "https://github.com/prometheus/jmx_exporter", "icon_filename": "java.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JMX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jarvis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jarvis Standing Desk", "link": "https://github.com/hairyhenderson/jarvis_exporter/", "icon_filename": "jarvis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jarvis_Standing_Desk", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jenkins", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jenkins", "link": "https://www.jenkins.io/", "icon_filename": "jenkins.svg", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jenkins", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jetbrains_fls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JetBrains Floating License Server", "link": "https://github.com/mkreu/jetbrains-fls-exporter", "icon_filename": "jetbrains.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JetBrains_Floating_License_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka", "link": "https://github.com/danielqsj/kafka_exporter/", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_connect", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Connect", "link": "https://github.com/findelabs/kafka-connect-exporter-rs", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Connect", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_consumer_lag", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Consumer Lag", "link": "https://github.com/omarsmak/kafka-consumer-lag-monitoring", "icon_filename": "kafka.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Consumer_Lag", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_zookeeper", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka ZooKeeper", "link": "https://github.com/cloudflare/kafka_zookeeper_exporter", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kannel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kannel", "link": "https://github.com/apostvav/kannel_exporter", "icon_filename": "kannel.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kannel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-keepalived", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Keepalived", "link": "https://github.com/gen2brain/keepalived_exporter", "icon_filename": "keepalived.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Keepalived", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-korral", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kubernetes Cluster Cloud Cost", "link": "https://github.com/agilestacks/korral", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kubernetes_Cluster_Cloud_Cost", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "LDAP", "link": "https://github.com/titisan/ldap_exporter", "icon_filename": "ldap.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-LDAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lagerist", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lagerist Disk latency", "link": "https://github.com/Svedrin/lagerist", "icon_filename": "linux.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lagerist_Disk_latency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-linode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Linode", "link": "https://github.com/DazWilkin/linode-exporter", "icon_filename": "linode.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Linode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lustre", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lustre metadata", "link": "https://github.com/GSI-HPC/prometheus-cluster-exporter", "icon_filename": "lustre.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lustre_metadata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lynis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lynis audit reports", "link": "https://github.com/MauveSoftware/lynis_exporter", "icon_filename": "lynis.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lynis_audit_reports", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mp707", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MP707 USB thermometer", "link": "https://github.com/nradchenko/mp707_exporter", "icon_filename": "thermometer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MP707_USB_thermometer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mqtt_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MQTT Blackbox", "link": "https://github.com/inovex/mqtt_blackbox_exporter", "icon_filename": "mqtt.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MQTT_Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-machbase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Machbase", "link": "https://github.com/MACHBASE/prometheus-machbase-exporter", "icon_filename": "machbase.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Machbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-maildir", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Maildir", "link": "https://github.com/cherti/mailexporter", "icon_filename": "mailserver.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Maildir", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meilisearch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meilisearch", "link": "https://github.com/scottaglia/meilisearch_exporter", "icon_filename": "meilisearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meilisearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-memcached", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Memcached (community)", "link": "https://github.com/prometheus/memcached_exporter", "icon_filename": "memcached.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Memcached_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meraki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meraki dashboard", "link": "https://github.com/TheHolm/meraki-dashboard-promethus-exporter", "icon_filename": "meraki.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meraki_dashboard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mesos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mesos", "link": "http://github.com/mesosphere/mesos_exporter", "icon_filename": "mesos.svg", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mesos", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mikrotik", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MikroTik devices", "link": "https://github.com/swoga/mikrotik-exporter", "icon_filename": "mikrotik.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MikroTik_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-routeros", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mikrotik RouterOS devices", "link": "https://github.com/welbymcroberts/routeros_exporter", "icon_filename": "routeros.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mikrotik_RouterOS_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-minecraft", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Minecraft", "link": "https://github.com/sladkoff/minecraft-prometheus-exporter", "icon_filename": "minecraft.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Minecraft", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-modbus_rtu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Modbus protocol", "link": "https://github.com/dernasherbrezon/modbusrtu_exporter", "icon_filename": "modbus.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Modbus_protocol", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mogilefs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MogileFS", "link": "https://github.com/KKBOX/mogilefs-exporter", "icon_filename": "filesystem.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MogileFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-monnit_mqtt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Monnit Sensors MQTT", "link": "https://github.com/braxton9460/monnit-mqtt-exporter", "icon_filename": "monnit.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Monnit_Sensors_MQTT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nrpe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NRPE daemon", "link": "https://github.com/canonical/nrpe_exporter", "icon_filename": "nrpelinux.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NRPE_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nsxt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NSX-T", "link": "https://github.com/jk8s/nsxt_exporter", "icon_filename": "vmware-nsx.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NSX-T", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nvml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NVML", "link": "https://github.com/oko/nvml-exporter-rs", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NVML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-naemon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Naemon", "link": "https://github.com/Griesbacher/Iapetos", "icon_filename": "naemon.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Naemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nagios", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nagios", "link": "https://github.com/wbollock/nagios_exporter", "icon_filename": "nagios.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nagios", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nature_remo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nature Remo E lite devices", "link": "https://github.com/kenfdev/remo-exporter", "icon_filename": "nature-remo.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nature_Remo_E_lite_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_solidfire", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetApp Solidfire", "link": "https://github.com/mjavier2k/solidfire-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetApp_Solidfire", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetFlow", "link": "https://github.com/paihu/netflow_exporter", "icon_filename": "netflow.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetFlow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netmeter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetMeter", "link": "https://github.com/ssbostan/netmeter-exporter", "icon_filename": "netmeter.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetMeter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_ontap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netapp ONTAP API", "link": "https://github.com/sapcc/netapp-api-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netapp_ONTAP_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netatmo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netatmo sensors", "link": "https://github.com/xperimental/netatmo-exporter", "icon_filename": "netatmo.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netatmo_sensors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-newrelic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "New Relic", "link": "https://github.com/jfindley/newrelic_exporter", "icon_filename": "newrelic.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-New_Relic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextdns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NextDNS", "link": "https://github.com/raylas/nextdns-exporter", "icon_filename": "nextdns.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NextDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextcloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nextcloud servers", "link": "https://github.com/xperimental/nextcloud-exporter", "icon_filename": "nextcloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nextcloud_servers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-obs_studio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OBS Studio", "link": "https://github.com/lukegb/obs_studio_exporter", "icon_filename": "obs-studio.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OBS_Studio", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-odbc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ODBC", "link": "https://github.com/MACHBASE/prometheus-odbc-exporter", "icon_filename": "odbc.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ODBC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-otrs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OTRS", "link": "https://github.com/JulianDroste/otrs_exporter", "icon_filename": "otrs.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OTRS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openhab", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenHAB", "link": "https://github.com/pdreker/openhab_exporter", "icon_filename": "openhab.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenHAB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenLDAP (community)", "link": "https://github.com/tomcz/openldap_exporter", "icon_filename": "openldap.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenLDAP_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRC", "link": "https://git.sr.ht/~tomleb/openrc-exporter", "icon_filename": "linux.png", "categories": ["data-collection.linux-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrct2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRCT2", "link": "https://github.com/terinjokes/openrct2-prometheus-exporter", "icon_filename": "openRCT2.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRCT2", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openroadm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenROADM devices", "link": "https://github.com/utdal/openroadm_exporter", "icon_filename": "openroadm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenROADM_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openstack", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenStack", "link": "https://github.com/CanonicalLtd/prometheus-openstack-exporter", "icon_filename": "openstack.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenStack", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenVAS", "link": "https://github.com/ModeClearCode/openvas_exporter", "icon_filename": "openVAS.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenVAS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openweathermap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenWeatherMap", "link": "https://github.com/Tenzer/openweathermap-exporter", "icon_filename": "openweather.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenWeatherMap", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvswitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Open vSwitch", "link": "https://github.com/digitalocean/openvswitch_exporter", "icon_filename": "ovs.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Open_vSwitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-oracledb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Oracle DB (community)", "link": "https://github.com/iamseth/oracledb_exporter", "icon_filename": "oracle.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["oracle", "database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Oracle_DB_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-patroni", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Patroni", "link": "https://github.com/gopaytech/patroni_exporter", "icon_filename": "patroni.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Patroni", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pws", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Personal Weather Station", "link": "https://github.com/JohnOrthoefer/pws-exporter", "icon_filename": "wunderground.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Personal_Weather_Station", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgpool2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pgpool-II", "link": "https://github.com/pgpool/pgpool2_exporter", "icon_filename": "pgpool2.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pgpool-II", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-philips_hue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Philips Hue", "link": "https://github.com/aexel90/hue_exporter", "icon_filename": "hue.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Philips_Hue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pimoroni_enviro_plus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pimoroni Enviro+", "link": "https://github.com/terradolor/prometheus-enviro-exporter", "icon_filename": "pimorino.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pimoroni_Enviro+", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pingdom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pingdom", "link": "https://github.com/veepee-oss/pingdom_exporter", "icon_filename": "solarwinds.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pingdom", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-podman", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Podman", "link": "https://github.com/containers/prometheus-podman-exporter", "icon_filename": "podman.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Podman", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-powerpal", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Powerpal devices", "link": "https://github.com/aashley/powerpal_exporter", "icon_filename": "powerpal.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Powerpal_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proftpd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ProFTPD", "link": "https://github.com/transnano/proftpd_exporter", "icon_filename": "proftpd.png", "categories": ["data-collection.ftp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ProFTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Prometheus endpoint", "link": "https://prometheus.io/", "icon_filename": "prometheus.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["prometheus", "openmetrics"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Prometheus_endpoint", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proxmox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Proxmox VE", "link": "https://github.com/prometheus-pve/prometheus-pve-exporter", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Proxmox_VE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radius", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RADIUS", "link": "https://github.com/devon-mar/radius-exporter", "icon_filename": "radius.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ripe_atlas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RIPE Atlas", "link": "https://github.com/czerwonk/atlas_exporter", "icon_filename": "ripe.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RIPE_Atlas", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radio_thermostat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Radio Thermostat", "link": "https://github.com/andrewlow/radio-thermostat-exporter", "icon_filename": "radiots.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Radio_Thermostat", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-rancher", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Rancher", "link": "https://github.com/infinityworksltd/prometheus-rancher-exporter", "icon_filename": "rancher.svg", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Rancher", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-raritan_pdu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Raritan PDU", "link": "https://github.com/psyinfra/prometheus-raritan-pdu-exporter", "icon_filename": "raritan.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Raritan_PDU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-redis_queue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Redis Queue", "link": "https://github.com/mdawar/rq-exporter", "icon_filename": "rq.png", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Redis_Queue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sabnzbd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SABnzbd", "link": "https://github.com/msroest/sabnzbd_exporter", "icon_filename": "sabnzbd.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SABnzbd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sma_inverter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SMA Inverters", "link": "https://github.com/dr0ps/sma_inverter_exporter", "icon_filename": "sma.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SMA_Inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sonic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SONiC NOS", "link": "https://github.com/kamelnetworks/sonic_exporter", "icon_filename": "sonic.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SONiC_NOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SQL Database agnostic", "link": "https://github.com/free/sql_exporter", "icon_filename": "sql.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SQL_Database_agnostic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSH", "link": "https://github.com/Nordstrom/ssh_exporter", "icon_filename": "ssh.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSL Certificate", "link": "https://github.com/ribbybibby/ssl_exporter", "icon_filename": "ssl.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSL_Certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-salicru_eqx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Salicru EQX inverter", "link": "https://github.com/alejandroscf/prometheus_salicru_exporter", "icon_filename": "salicru.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Salicru_EQX_inverter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sense_energy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sense Energy", "link": "https://github.com/ejsuncy/sense_energy_prometheus_exporter", "icon_filename": "sense.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sense_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sentry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sentry", "link": "https://github.com/snakecharmer/sentry_exporter", "icon_filename": "sentry.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sentry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-servertech", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ServerTech", "link": "https://github.com/tynany/servertech_exporter", "icon_filename": "servertech.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ServerTech", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shell_cmd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shell command", "link": "https://github.com/tomwilkie/prom-run", "icon_filename": "crunner.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shell_command", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shelly", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shelly humidity sensor", "link": "https://github.com/aexel90/shelly_exporter", "icon_filename": "shelly.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shelly_humidity_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sia", "link": "https://github.com/tbenz9/sia_exporter", "icon_filename": "sia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-s7_plc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Siemens S7 PLC", "link": "https://github.com/MarcusCalidus/s7-plc-exporter", "icon_filename": "siemens.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Siemens_S7_PLC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-site24x7", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Site 24x7", "link": "https://github.com/svenstaro/site24x7_exporter", "icon_filename": "site24x7.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Site_24x7", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-slurm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Slurm", "link": "https://github.com/vpenso/prometheus-slurm-exporter", "icon_filename": "slurm.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Slurm", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-smartrg808ac", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SmartRG 808AC Cable Modem", "link": "https://github.com/AdamIsrael/smartrg808ac_exporter", "icon_filename": "smartr.jpeg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SmartRG_808AC_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Smart meters SML", "link": "https://github.com/mweinelt/sml-exporter", "icon_filename": "sml.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Smart_meters_SML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-softether", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SoftEther VPN Server", "link": "https://github.com/dalance/softether_exporter", "icon_filename": "softether.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SoftEther_VPN_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solaredge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SolarEdge inverters", "link": "https://github.com/dave92082/SolarEdge-Exporter", "icon_filename": "solaredge.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SolarEdge_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lsx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solar logging stick", "link": "https://gitlab.com/bhavin192/lsx-exporter", "icon_filename": "solar.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solar_logging_stick", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solis Ginlong 5G inverters", "link": "https://github.com/candlerb/solis_exporter", "icon_filename": "solis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solis_Ginlong_5G_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-spacelift", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Spacelift", "link": "https://github.com/spacelift-io/prometheus-exporter", "icon_filename": "spacelift.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Spacelift", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-speedify", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Speedify CLI", "link": "https://github.com/willshen/speedify_exporter", "icon_filename": "speedify.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Speedify_CLI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sphinx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sphinx", "link": "https://github.com/foxdalas/sphinx_exporter", "icon_filename": "sphinx.png", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sphinx", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starlink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starlink (SpaceX)", "link": "https://github.com/danopstech/starlink_exporter", "icon_filename": "starlink.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starlink_(SpaceX)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starwind_vsan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starwind VSAN VSphere Edition", "link": "https://github.com/evoicefire/starwind-vsan-exporter", "icon_filename": "starwind.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starwind_VSAN_VSphere_Edition", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-statuspage", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "StatusPage", "link": "https://github.com/vladvasiliu/statuspage-exporter", "icon_filename": "statuspage.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-StatusPage", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-steam_a2s", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Steam", "link": "https://github.com/armsnyder/a2s-exporter", "icon_filename": "a2s.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Steam", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-storidge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Storidge", "link": "https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md", "icon_filename": "storidge.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Storidge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-stream_generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Stream", "link": "https://github.com/carlpett/stream_exporter", "icon_filename": "stream.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Stream", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sunspec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sunspec Solar Energy", "link": "https://github.com/inosion/prometheus-sunspec-exporter", "icon_filename": "sunspec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sunspec_Solar_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-suricata", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Suricata", "link": "https://github.com/corelight/suricata_exporter", "icon_filename": "suricata.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Suricata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-synology_activebackup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Synology ActiveBackup", "link": "https://github.com/codemonauts/activebackup-prometheus-exporter", "icon_filename": "synology.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Synology_ActiveBackup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sysload", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sysload", "link": "https://github.com/egmc/sysload_exporter", "icon_filename": "sysload.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sysload", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-trex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "T-Rex NVIDIA GPU Miner", "link": "https://github.com/dennisstritzke/trex_exporter", "icon_filename": "trex.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-T-Rex_NVIDIA_GPU_Miner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tacas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TACACS", "link": "https://github.com/devon-mar/tacacs-exporter", "icon_filename": "tacacs.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TACACS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tplink_p110", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TP-Link P110", "link": "https://github.com/ijohanne/prometheus-tplink-p110-exporter", "icon_filename": "tplink.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TP-Link_P110", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tado", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tado smart heating solution", "link": "https://github.com/eko/tado-exporter", "icon_filename": "tado.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tado_smart_heating_solution", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tankerkoenig", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tankerkoenig API", "link": "https://github.com/lukasmalkmus/tankerkoenig_exporter", "icon_filename": "tanker.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tankerkoenig_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_powerwall", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Powerwall", "link": "https://github.com/foogod/powerwall_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Powerwall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_wall_connector", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Wall Connector", "link": "https://github.com/benclapp/tesla_wall_connector_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Wall_Connector", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_vehicle", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla vehicle", "link": "https://github.com/wywywywy/tesla-prometheus-exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_vehicle", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-traceroute", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Traceroute", "link": "https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter", "icon_filename": "traceroute.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Traceroute", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twincat_ads_webservice", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TwinCAT ADS Web Service", "link": "https://github.com/MarcusCalidus/twincat-ads-webservice-exporter", "icon_filename": "twincat.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TwinCAT_ADS_Web_Service", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Twitch", "link": "https://github.com/damoun/twitch_exporter", "icon_filename": "twitch.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Twitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ubiquity_ufiber", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Ubiquiti UFiber OLT", "link": "https://github.com/swoga/ufiber-exporter", "icon_filename": "ubiquiti.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Ubiquiti_UFiber_OLT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-uptimerobot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Uptimerobot", "link": "https://github.com/wosc/prometheus-uptimerobot", "icon_filename": "uptimerobot.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Uptimerobot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vscode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "VSCode", "link": "https://github.com/guicaulada/vscode-exporter", "icon_filename": "vscode.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-VSCode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vault_pki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vault PKI", "link": "https://github.com/aarnaud/vault-pki-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vault_PKI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vertica", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vertica", "link": "https://github.com/vertica/vertica-prometheus-exporter", "icon_filename": "vertica.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vertica", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-warp10", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Warp10", "link": "https://github.com/centreon/warp10-sensision-exporter", "icon_filename": "warp10.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Warp10", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xmpp_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "XMPP Server", "link": "https://github.com/horazont/xmpp-blackbox-exporter", "icon_filename": "xmpp.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-XMPP_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xiaomi_mi_flora", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Xiaomi Mi Flora", "link": "https://github.com/xperimental/flowercare-exporter", "icon_filename": "xiaomi.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Xiaomi_Mi_Flora", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-yourls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "YOURLS URL Shortener", "link": "https://github.com/just1not2/prometheus-exporter-yourls", "icon_filename": "yourls.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-YOURLS_URL_Shortener", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zerto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zerto", "link": "https://github.com/claranet/zerto-exporter", "icon_filename": "zerto.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zerto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zulip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zulip", "link": "https://github.com/brokenpip3/zulip-exporter", "icon_filename": "zulip.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zulip", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zyxel_gs1200", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zyxel GS1200-8", "link": "https://github.com/robinelfrink/gs1200-exporter", "icon_filename": "zyxel.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zyxel_GS1200-8", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bpftrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "bpftrace variables", "link": "https://github.com/andreasgerstmayr/bpftrace_exporter", "icon_filename": "bpftrace.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-bpftrace_variables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cadvisor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "cAdvisor", "link": "https://github.com/google/cadvisor", "icon_filename": "cadvisor.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-cAdvisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-etcd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "etcd", "link": "https://etcd.io/", "icon_filename": "etcd.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-etcd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gpsd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "gpsd", "link": "https://github.com/natesales/gpsd-exporter", "icon_filename": "gpsd.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-gpsd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iqair", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "iqAir AirVisual air quality monitors", "link": "https://github.com/Packetslave/iqair_exporter", "icon_filename": "iqair.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-iqAir_AirVisual_air_quality_monitors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jolokia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "jolokia", "link": "https://github.com/aklinkert/jolokia_exporter", "icon_filename": "jolokia.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-jolokia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-journald", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "journald", "link": "https://github.com/dead-claudia/journald-exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-journald", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-loki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "loki", "link": "https://github.com/grafana/loki", "icon_filename": "loki.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-loki", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mosquitto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mosquitto", "link": "https://github.com/sapcc/mosquitto-exporter", "icon_filename": "mosquitto.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mosquitto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mtail", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mtail", "link": "https://github.com/google/mtail", "icon_filename": "mtail.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mtail", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nftables", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "nftables", "link": "https://github.com/Sheridan/nftables_exporter", "icon_filename": "nftables.png", "categories": ["data-collection.linux-systems.firewall-metrics"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-nftables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgbackrest", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "pgBackRest", "link": "https://github.com/woblerr/pgbackrest_exporter", "icon_filename": "pgbackrest.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-pgBackRest", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-strongswan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "strongSwan", "link": "https://github.com/jlti-dev/ipsec_exporter", "icon_filename": "strongswan.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n{% details summary=\"Config\" %}\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n{% /details %}\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-strongSwan", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-proxysql", "plugin_name": "go.d.plugin", "module_name": "proxysql", "monitored_instance": {"name": "ProxySQL", "link": "https://www.proxysql.com/", "icon_filename": "proxysql.png", "categories": ["data-collection.database-servers"]}, "keywords": ["proxysql", "databases", "sql"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| my.cnf | Specifies my.cnf file to read connection parameters from under the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n{% /details %}\n##### my.cnf\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-proxysql-ProxySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pulsar", "plugin_name": "go.d.plugin", "module_name": "pulsar", "monitored_instance": {"name": "Apache Pulsar", "link": "https://pulsar.apache.org/", "icon_filename": "pulsar.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["pulsar"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", "integration_type": "collector", "id": "go.d.plugin-pulsar-Apache_Pulsar", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-rabbitmq", "plugin_name": "go.d.plugin", "module_name": "rabbitmq", "monitored_instance": {"name": "RabbitMQ", "link": "https://www.rabbitmq.com/", "icon_filename": "rabbitmq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["rabbitmq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-rabbitmq-RabbitMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-redis", "plugin_name": "go.d.plugin", "module_name": "redis", "monitored_instance": {"name": "Redis", "link": "https://redis.com/", "categories": ["data-collection.database-servers"], "icon_filename": "redis.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["redis", "databases"], "most_popular": true}, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n{% /details %}\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Unix socket\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n{% /details %}\n##### TCP socket with password\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-redis-Redis", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/redis/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-scaleio", "plugin_name": "go.d.plugin", "module_name": "scaleio", "monitored_instance": {"name": "Dell EMC ScaleIO", "link": "https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["scaleio"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", "integration_type": "collector", "id": "go.d.plugin-scaleio-Dell_EMC_ScaleIO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-snmp", "plugin_name": "go.d.plugin", "module_name": "snmp", "monitored_instance": {"name": "SNMP devices", "link": "", "icon_filename": "snmp.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["snmp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\nIt supports:\n\n- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3.\n- any number of SNMP devices.\n- each SNMP device can be used to collect data for any number of charts.\n- each chart may have any number of dimensions.\n- each SNMP device may have a different update frequency.\n- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).\n\nKeep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second.\n`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode.\n\nAlso, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped.\nThis is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Find OIDs\n\nUse `snmpwalk`, like this:\n\n```sh\nsnmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1\n```\n\n- `-t 20` is the timeout in seconds.\n- `-O fn` will display full OIDs in numeric format.\n- `-v 2c` is the SNMP version.\n- `-c public` is the SNMP community.\n- `192.0.2.1` is the SNMP device.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | 127.0.0.1 | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 10 | no |\n| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n{% /details %}\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`.\n\n> **SNMPv1**: just set `options.version` to 1.\n> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\nThe rest of the configuration is the same as in the SNMPv1/2 example.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n{% /details %}\n##### Multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n{% /details %}\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThe metrics that will be collected are defined in the configuration file.\n", "integration_type": "collector", "id": "go.d.plugin-snmp-SNMP_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-solr", "plugin_name": "go.d.plugin", "module_name": "solr", "monitored_instance": {"name": "Solr", "link": "https://lucene.apache.org/solr/", "icon_filename": "solr.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["solr"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Solr\n\nPlugin: go.d.plugin\nModule: solr\n\n## Overview\n\nThis collector monitors Solr instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Solr version 6.4+\n\nThis collector does not work with Solr versions lower 6.4.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/solr.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/solr.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8983 | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal Solr instance with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n - name: remote\n url: http://203.0.113.10:8983\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m solr\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Solr instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| solr.search_requests | search | requests/s |\n| solr.search_errors | errors | errors/s |\n| solr.search_errors_by_type | client, server, timeouts | errors/s |\n| solr.search_requests_processing_time | time | milliseconds |\n| solr.search_requests_timings | min, median, mean, max | milliseconds |\n| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n| solr.update_requests | search | requests/s |\n| solr.update_errors | errors | errors/s |\n| solr.update_errors_by_type | client, server, timeouts | errors/s |\n| solr.update_requests_processing_time | time | milliseconds |\n| solr.update_requests_timings | min, median, mean, max | milliseconds |\n| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-solr-Solr", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/solr/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-springboot2", "plugin_name": "go.d.plugin", "module_name": "springboot2", "monitored_instance": {"name": "Java Spring-boot 2 applications", "link": "", "icon_filename": "springboot.png", "categories": ["data-collection.apm"]}, "keywords": ["springboot"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Java Spring-boot 2 applications\n\nPlugin: go.d.plugin\nModule: springboot2\n\n## Overview\n\nThis collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects applications running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Spring Boot Actuator\n\nThe Spring Boot Actuator exposes metrics over HTTP, to use it:\n\n- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.\n- set `management.endpoints.web.exposure.include=*` in your `application.properties`.\n\nRefer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. \u2018How-to\u2019 guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/springboot2.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/springboot2.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/actuator/prometheus\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n - name: remote\n url: http://192.0.2.1:8080/actuator/prometheus\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m springboot2\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Java Spring-boot 2 applications instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| springboot2.thread | daemon, total | threads |\n| springboot2.heap | free, eden, survivor, old | B |\n| springboot2.heap_eden | used, commited | B |\n| springboot2.heap_survivor | used, commited | B |\n| springboot2.heap_old | used, commited | B |\n| springboot2.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-springboot2-Java_Spring-boot_2_applications", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-squidlog", "plugin_name": "go.d.plugin", "module_name": "squidlog", "monitored_instance": {"name": "Squid log files", "link": "https://www.lighttpd.net/", "icon_filename": "squid.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["squid", "logs"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/%
**Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-squidlog-Squid_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-supervisord", "plugin_name": "go.d.plugin", "module_name": "supervisord", "monitored_instance": {"name": "Supervisor", "link": "http://supervisord.org/", "icon_filename": "supervisord.png", "categories": ["data-collection.processes-and-system-services"]}, "keywords": ["supervisor"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n{% /details %}\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n{% /details %}\n##### Socket\n\nCollect metrics via Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-supervisord-Supervisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-systemdunits", "plugin_name": "go.d.plugin", "module_name": "systemdunits", "monitored_instance": {"name": "Systemd Units", "link": "https://www.freedesktop.org/wiki/Software/systemd/", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"]}, "keywords": ["systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors Systemd units state.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| include | Systemd units filter. | *.service | no |\n| timeout | System bus requests timeout. | 1 | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n{% /details %}\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n{% /details %}\n##### One specific unit\n\nCollect state of one specific unit.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n{% /details %}\n##### All unit types\n\nCollect state of all units.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-systemdunits-Systemd_Units", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-tengine", "plugin_name": "go.d.plugin", "module_name": "tengine", "monitored_instance": {"name": "Tengine", "link": "https://tengine.taobao.org/", "icon_filename": "tengine.jpeg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["tengine", "web", "webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-tengine-Tengine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-traefik", "plugin_name": "go.d.plugin", "module_name": "traefik", "monitored_instance": {"name": "Traefik", "link": "Traefik", "icon_filename": "traefik.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["traefik", "proxy", "webproxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"All options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n{% /details %}\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", "integration_type": "collector", "id": "go.d.plugin-traefik-Traefik", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-unbound", "plugin_name": "go.d.plugin", "module_name": "unbound", "monitored_instance": {"name": "Unbound", "link": "https://nlnetlabs.nl/projects/unbound/about/", "icon_filename": "unbound.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["unbound", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n{% /details %}\n##### Unix socket\n\nConnecting through Unix socket.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", "integration_type": "collector", "id": "go.d.plugin-unbound-Unbound", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-upsd", "plugin_name": "go.d.plugin", "module_name": "upsd", "monitored_instance": {"name": "UPS (NUT)", "link": "", "icon_filename": "plug-circle-bolt.svg", "categories": ["data-collection.ups"]}, "keywords": ["ups", "nut"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", "integration_type": "collector", "id": "go.d.plugin-upsd-UPS_(NUT)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vcsa", "plugin_name": "go.d.plugin", "module_name": "vcsa", "monitored_instance": {"name": "vCenter Server Appliance", "link": "https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-vcsa-vCenter_Server_Appliance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vernemq", "plugin_name": "go.d.plugin", "module_name": "vernemq", "monitored_instance": {"name": "VerneMQ", "link": "https://vernemq.com", "icon_filename": "vernemq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["vernemq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n{% /details %}\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vsphere", "plugin_name": "go.d.plugin", "module_name": "vsphere", "monitored_instance": {"name": "VMware vCenter Server", "link": "https://www.vmware.com/products/vcenter-server.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware", "esxi", "vcenter"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vsphere-VMware_vCenter_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-web_log", "plugin_name": "go.d.plugin", "module_name": "web_log", "monitored_instance": {"name": "Web server log files", "link": "", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "webservers.svg"}, "keywords": ["webserver", "apache", "httpd", "nginx", "lighttpd", "logs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). | | yes |\n| parser | Log parser configuration. | | no |\n| parser.log_type | Log parser type. | auto | no |\n| parser.csv_config | CSV log parser config. | | no |\n| parser.csv_config.delimiter | CSV field delimiter. | , | no |\n| parser.csv_config.format | CSV log format. | | no |\n| parser.ltsv_config | LTSV log parser config. | | no |\n| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| parser.json_config | JSON log parser config. | | no |\n| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| parser.regexp_config | RegExp log parser config. | | no |\n| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### parser.log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nparser:\n log_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### parser.csv_config.format\n\n\n\n##### parser.ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: json\n json_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-web_log-Web_server_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-whoisquery", "plugin_name": "go.d.plugin", "module_name": "whoisquery", "monitored_instance": {"name": "Domain expiration date", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["whois"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-whoisquery-Domain_expiration_date", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-ad", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Active Directory", "link": "https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "active directory", "ad", "adcs", "adfs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-hyperv", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "HyperV", "link": "https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "hyperv", "virtualization", "vm"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-msexchange", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS Exchange", "link": "https://www.microsoft.com/en-us/microsoft-365/exchange/email", "icon_filename": "exchange.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mail"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-mssql", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS SQL Server", "link": "https://www.microsoft.com/en-us/sql-server/", "icon_filename": "mssql.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mssql", "database", "db"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-dotnet", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "NET Framework", "link": "https://dotnet.microsoft.com/en-us/download/dotnet-framework", "icon_filename": "dotnet.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "dotnet"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["data-collection.windows-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows", "microsoft"], "most_popular": true, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n{% /details %}\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-wireguard", "plugin_name": "go.d.plugin", "module_name": "wireguard", "monitored_instance": {"name": "WireGuard", "link": "https://www.wireguard.com/", "categories": ["data-collection.vpns"], "icon_filename": "wireguard.svg"}, "keywords": ["wireguard", "vpn", "security"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-wireguard-WireGuard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-x509check", "plugin_name": "go.d.plugin", "module_name": "x509check", "monitored_instance": {"name": "X.509 certificate", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "lock.svg"}, "keywords": ["x509", "certificate"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n{% /details %}\n##### Local file certificate\n\nLocal file certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n{% /details %}\n##### SMTP certificate\n\nSMTP certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | revoked | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-x509check-X.509_certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-zookeeper", "plugin_name": "go.d.plugin", "module_name": "zookeeper", "monitored_instance": {"name": "ZooKeeper", "link": "https://zookeeper.apache.org/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "zookeeper.svg"}, "keywords": ["zookeeper"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nLocal server.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n{% /details %}\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-zookeeper-ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "idlejitter.plugin", "module_name": "idlejitter.plugin", "monitored_instance": {"name": "Idle OS Jitter", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["latency", "jitter"], "most_popular": false}, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", "integration_type": "collector", "id": "idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ioping.plugin", "module_name": "ioping.plugin", "monitored_instance": {"name": "IOPing", "link": "https://github.com/koct9i/ioping", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n{% details summary=\"Config\" %}\n```yaml\ndestination=\"/dev/sda\"\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "ioping.plugin-ioping.plugin-IOPing", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "macos.plugin", "module_name": "mach_smi", "monitored_instance": {"name": "macOS", "link": "https://www.apple.com/macos", "categories": ["data-collection.macos-systems"], "icon_filename": "macos.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["macos", "apple", "darwin"], "most_popular": false}, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n{% /details %}\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "macos.plugin-mach_smi-macOS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "nfacct.plugin", "module_name": "nfacct.plugin", "monitored_instance": {"name": "Netfilter", "link": "https://www.netfilter.org/", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "netfilter.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", "integration_type": "collector", "id": "nfacct.plugin-nfacct.plugin-Netfilter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "perf.plugin", "module_name": "perf.plugin", "monitored_instance": {"name": "CPU performance", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux", "cpu performance", "cpu cache", "perf.plugin"], "most_popular": false}, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptior to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the pref plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n{% /details %}\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", "integration_type": "collector", "id": "perf.plugin-perf.plugin-CPU_performance", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/diskstats", "monitored_instance": {"name": "Disk Statistics", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "disks", "io", "bcache", "block devices"], "most_popular": false}, "overview": "# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/diskstats-Disk_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/interrupts", "monitored_instance": {"name": "Interrupts", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["interrupts"], "most_popular": false}, "overview": "# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n understand what your system is doing. It can provide insights into the system's interaction with hardware,\n drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/interrupts-Interrupts", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/loadavg", "monitored_instance": {"name": "System Load Average", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["load", "load average"], "most_popular": false}, "overview": "# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/loadavg-System_Load_Average", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/mdstat", "monitored_instance": {"name": "MD RAID", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["raid", "mdadm", "mdstat", "raid"], "most_popular": false}, "overview": "# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/mdstat-MD_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/meminfo", "monitored_instance": {"name": "Memory Usage", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory", "ram", "available", "committed"], "most_popular": false}, "overview": "# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/meminfo-Memory_Usage", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/dev", "monitored_instance": {"name": "Network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["network interfaces"], "most_popular": false}, "overview": "# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/dev-Network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/ip_vs_stats", "monitored_instance": {"name": "IP Virtual Server", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip virtual server"], "most_popular": false}, "overview": "# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/netstat", "monitored_instance": {"name": "Network statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip", "udp", "udplite", "icmp", "netstat", "snmp"], "most_popular": false}, "overview": "# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipc4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/netstat-Network_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfs", "monitored_instance": {"name": "NFS Client", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs client", "filesystem"], "most_popular": false}, "overview": "# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfs-NFS_Client", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfsd", "monitored_instance": {"name": "NFS Server", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs server", "filesystem"], "most_popular": false}, "overview": "# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfsd-NFS_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sctp/snmp", "monitored_instance": {"name": "SCTP Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sctp", "stream control transmission protocol"], "most_popular": false}, "overview": "# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat", "monitored_instance": {"name": "Socket statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sockets"], "most_popular": false}, "overview": "# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat-Socket_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat6", "monitored_instance": {"name": "IPv6 Socket Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipv6 sockets"], "most_popular": false}, "overview": "# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/softnet_stat", "monitored_instance": {"name": "Softnet Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softnet"], "most_popular": false}, "overview": "# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/softnet_stat-Softnet_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/nf_conntrack", "monitored_instance": {"name": "Conntrack", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["connection tracking mechanism", "netfilter", "conntrack"], "most_popular": false}, "overview": "# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/nf_conntrack-Conntrack", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/synproxy", "monitored_instance": {"name": "Synproxy", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["synproxy"], "most_popular": false}, "overview": "# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/synproxy-Synproxy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/wireless", "monitored_instance": {"name": "Wireless network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["wireless devices"], "most_popular": false}, "overview": "# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/wireless-Wireless_network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pagetypeinfo", "monitored_instance": {"name": "Page types", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory page types"], "most_popular": false}, "overview": "# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pagetypeinfo-Page_types", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pressure", "monitored_instance": {"name": "Pressure Stall Information", "link": "", "categories": ["data-collection.linux-systems.pressure-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pressure"], "most_popular": false}, "overview": "# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pressure-Pressure_Stall_Information", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/softirqs", "monitored_instance": {"name": "SoftIRQ statistics", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softirqs", "interrupts"], "most_popular": false}, "overview": "# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/softirqs-SoftIRQ_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs", "monitored_instance": {"name": "ZFS Pools", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs pools", "pools", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Pools\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs\n\n## Overview\n\nThis integration provides metrics about the state of ZFS pools.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs-ZFS_Pools", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs/arcstats", "monitored_instance": {"name": "ZFS Adaptive Replacement Cache", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs arc", "arc", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/stat", "monitored_instance": {"name": "System statistics", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["cpu utilization", "process counts"], "most_popular": false}, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/stat-System_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/sys/kernel/random/entropy_avail", "monitored_instance": {"name": "Entropy", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["entropy"], "most_popular": false}, "overview": "# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/uptime", "monitored_instance": {"name": "System Uptime", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["uptime"], "most_popular": false}, "overview": "# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/uptime-System_Uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/vmstat", "monitored_instance": {"name": "Memory Statistics", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "page faults", "oom", "numa"], "most_popular": false}, "overview": "# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/vmstat-Memory_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/block/zram", "monitored_instance": {"name": "ZRAM", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zram"], "most_popular": false}, "overview": "# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/block/zram-ZRAM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/drm", "monitored_instance": {"name": "AMD GPU", "link": "https://www.amd.com", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "amd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["amd", "gpu", "hardware"], "most_popular": false}, "overview": "# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/drm-AMD_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/infiniband", "monitored_instance": {"name": "InfiniBand", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["infiniband", "rdma"], "most_popular": false}, "overview": "# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/infiniband-InfiniBand", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/power_supply", "monitored_instance": {"name": "Power Supply", "link": "", "categories": ["data-collection.linux-systems.power-supply-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["psu", "power supply"], "most_popular": false}, "overview": "# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/power_supply-Power_Supply", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/edac/mc", "monitored_instance": {"name": "Memory modules (DIMMs)", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["edac", "ecc", "dimm", "ram", "hardware"], "most_popular": false}, "overview": "# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n - errors related to a DIMM\n - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n - memory controllers that can identify the physical DIMMS and report errors directly for them,\n - memory controllers that report errors for memory address ranges that can be linked to dimms.\n In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/node", "monitored_instance": {"name": "Non-Uniform Memory Access", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["numa"], "most_popular": false}, "overview": "# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/fs/btrfs", "monitored_instance": {"name": "BTRFS", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.btrfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["btrfs", "filesystem"], "most_popular": false}, "overview": "# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/fs/btrfs-BTRFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/kernel/mm/ksm", "monitored_instance": {"name": "Kernel Same-Page Merging", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ksm", "samepage", "merging"], "most_popular": false}, "overview": "# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "ipc", "monitored_instance": {"name": "Inter Process Communication", "link": "", "categories": ["data-collection.linux-systems.ipc-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipc", "semaphores", "shared memory"], "most_popular": false}, "overview": "# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n processes are trying to access a single shared resource, semaphores can ensure that only one process\n accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-ipc-Inter_Process_Communication", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "adaptec_raid", "monitored_instance": {"name": "AdaptecRAID", "link": "https://www.microchip.com/en-us/products/storage", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "adaptec.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# AdaptecRAID\n\nPlugin: python.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nThis collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.\n\n\nIt uses the arcconf command line utility (from adaptec) to monitor your raid controller.\n\nExecuted commands:\n - `sudo -n arcconf GETCONFIG 1 LD`\n - `sudo -n arcconf GETCONFIG 1 PD`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run arcconf as sudoer\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich arcconf shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/arcconf\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/adaptec_raid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: my_job_name \n update_every: 1 # the JOB's data collection frequency\n priority: 60000 # the JOB's order on the dashboard\n penalty: yes # the JOB's penalty\n autodetection_retry: 0 # the JOB's re-check interval in seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin adaptec_raid debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |\n| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AdaptecRAID instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptec_raid.ld_status | a dimension per logical device | bool |\n| adaptec_raid.pd_state | a dimension per physical device | bool |\n| adaptec_raid.smart_warnings | a dimension per physical device | count |\n| adaptec_raid.temperature | a dimension per physical device | celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-adaptec_raid-AdaptecRAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/adaptec_raid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "alarms", "monitored_instance": {"name": "Netdata Agent alarms", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["alarms", "netdata"], "most_popular": false}, "overview": "# Netdata Agent alarms\n\nPlugin: python.d.plugin\nModule: alarms\n\n## Overview\n\nThis collector creates an 'Alarms' menu with one line plot of `alarms.status`.\n\n\nAlarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/alarms.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/alarms.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |\n| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {\"CLEAR\": 0, \"WARNING\": 1, \"CRITICAL\": 2} | yes |\n| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |\n| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |\n| alarm_contains_words | A \",\" separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with \"cpu\" or \"load\" in alarm name. Default includes all. | | yes |\n| alarm_excludes_words | A \",\" separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with \"cpu\" or \"load\" in alarm name. Default excludes None. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n\n```\n##### Advanced\n\nAn advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.\n\"ML\" job will collect status and values for all alarms with \"ml_\" in the name. Default job will collect status for all other alarms.\n\n\n{% details summary=\"Config\" %}\n```yaml\nML:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: true\n alarm_status_chart_type: 'stacked'\n alarm_contains_words: 'ml_'\n\nDefault:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: false\n alarm_status_chart_type: 'stacked'\n alarm_excludes_words: 'ml_'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin alarms debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netdata Agent alarms instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |\n| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |\n\n", "integration_type": "collector", "id": "python.d.plugin-alarms-Netdata_Agent_alarms", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "am2320", "monitored_instance": {"name": "AM2320", "link": "https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "am2320", "sensor", "humidity"], "most_popular": false}, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", "integration_type": "collector", "id": "python.d.plugin-am2320-AM2320", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "beanstalk", "monitored_instance": {"name": "Beanstalk", "link": "https://beanstalkd.github.io/", "categories": ["data-collection.message-brokers"], "icon_filename": "beanstalk.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["beanstalk", "beanstalkd", "message"], "most_popular": false}, "overview": "# Beanstalk\n\nPlugin: python.d.plugin\nModule: beanstalk\n\n## Overview\n\nMonitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.\n\nThe collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### beanstalkc python module\n\nThe collector requires the `beanstalkc` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/beanstalk.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |\n| port | Port to the IP or URL to a beanstalk service. | 11300 | no |\n\n{% /details %}\n#### Examples\n\n##### Remote beanstalk server\n\nA basic remote beanstalk server\n\n```yaml\nremote:\n name: 'beanstalk'\n host: '1.2.3.4'\n port: 11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local_beanstalk'\n host: '127.0.0.1'\n port: 11300\n\nremote_job:\n name: 'remote_beanstalk'\n host: '192.0.2.1'\n port: 113000\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin beanstalk debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.cpu_usage | user, system | cpu time |\n| beanstalk.jobs_rate | total, timeouts | jobs/s |\n| beanstalk.connections_rate | connections | connections/s |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.connections_rate | tubes | tubes |\n| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.current_connections | written, producers, workers, waiting | connections |\n| beanstalk.binlog | written, migrated | records/s |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.jobs_rate | jobs | jobs/s |\n| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.connections | using, waiting, watching | connections |\n| beanstalk.commands | deletes, pauses | commands/s |\n| beanstalk.pause | since, left | seconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-beanstalk-Beanstalk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "bind_rndc", "monitored_instance": {"name": "ISC Bind (RNDC)", "link": "https://www.isc.org/bind/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dns", "bind", "server"], "most_popular": false}, "overview": "# ISC Bind (RNDC)\n\nPlugin: python.d.plugin\nModule: bind_rndc\n\n## Overview\n\nMonitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.\n\nThis collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum bind version and permissions\n\nVersion of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`\n\n#### Setup log rotate for bind stats\n\nBIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.\nIt is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.\n\nTo set up BIND to dump stats do the following:\n\n1. Add to 'named.conf.options' options {}:\n`statistics-file \"/var/log/bind/named.stats\";`\n\n2. Create bind/ directory in /var/log:\n`cd /var/log/ && mkdir bind`\n\n3. Change owner of directory to 'bind' user:\n`chown bind bind/`\n\n4. RELOAD (NOT restart) BIND:\n`systemctl reload bind9.service`\n\n5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)\n\nTo allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:\n`chown :netdata rndc.key`\n\nLast, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:\n```\n/var/log/bind/named.stats {\n\n daily\n rotate 4\n compress\n delaycompress\n create 0644 bind bind\n missingok\n postrotate\n rndc reload > /dev/null\n endscript\n}\n```\nTo test your logrotate conf file run as root:\n`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/bind_rndc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/bind_rndc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |\n\n{% /details %}\n#### Examples\n\n##### Local bind stats\n\nDefine a local path to bind stats file\n\n```yaml\nlocal:\n named_stats_path: '/var/log/bind/named.stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin bind_rndc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC Bind (RNDC) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |\n| bind_rndc.incoming_queries | a dimension per incoming query type | queries |\n| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |\n| bind_rndc.stats_size | stats_size | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-bind_rndc-ISC_Bind_(RNDC)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/bind_rndc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "boinc", "monitored_instance": {"name": "BOINC", "link": "https://boinc.berkeley.edu/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["boinc", "distributed"], "most_popular": false}, "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n{% /details %}\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", "integration_type": "collector", "id": "python.d.plugin-boinc-BOINC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ceph", "monitored_instance": {"name": "Ceph", "link": "https://ceph.io/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ceph.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ceph", "storage"], "most_popular": false}, "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n{% /details %}\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-ceph-Ceph", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "changefinder", "monitored_instance": {"name": "python.d changefinder", "link": "", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["change detection", "anomaly detection", "machine learning", "ml"], "most_popular": false}, "overview": "# python.d changefinder\n\nPlugin: python.d.plugin\nModule: changefinder\n\n## Overview\n\nThis collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to\nperform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)\non your Netdata charts and/or dimensions.\n\n\nInstead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).\n### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's\n typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly\n this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw\n score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have\n already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then\n should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning\n approaches which need some initial window of time before they can be useful.\n- As this collector does most of the work in Python itself, you may want to try it out first on a test or development\n system to get a sense of its performance characteristics on a node similar to where you would like to use it.\n- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the\n typical performance characteristics we saw from running this collector (with defaults) were:\n - A runtime (`netdata.runtime_changefinder`) of ~30ms.\n - Typically ~1% additional cpu usage.\n - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default this collector will work over all `system.*` charts.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the packages below be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages for the netdata user\npip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4\n```\n\n**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section\nof your `netdata.conf` file.\n\n```yaml\n[ plugin:python.d ]\n # update every = 1\n command options = -ppython3\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/changefinder.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/changefinder.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |\n| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |\n| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |\n| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |\n| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |\n| cf_threshold | the percentile above which scores will be flagged. | 99 | no |\n| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |\n| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: ''\n mode: 'per_chart'\n cf_r: 0.5\n cf_order: 1\n cf_smooth: 15\n cf_threshold: 99\n n_score_samples: 14400\n show_scores: false\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin changefinder debug trace\n ```\n\n### Debug Mode\n\n\n\n### Log Messages\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d changefinder instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| changefinder.scores | a dimension per chart | score |\n| changefinder.flags | a dimension per chart | flag |\n\n", "integration_type": "collector", "id": "python.d.plugin-changefinder-python.d_changefinder", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/changefinder/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "dovecot", "monitored_instance": {"name": "Dovecot", "link": "https://www.dovecot.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "dovecot.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dovecot", "imap", "mail"], "most_popular": false}, "overview": "# Dovecot\n\nPlugin: python.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\nIt uses the dovecot socket and executes the `EXPORT global` command to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Dovecot configuration\n\nThe Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/dovecot.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |\n| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |\n| port | Used in combination with host, configures the port devcot listens to. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration.\n\n{% details summary=\"Config\" %}\n```yaml\nlocaltcpip:\n name: 'local'\n host: '127.0.0.1'\n port: 24242\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details summary=\"Config\" %}\n```yaml\nlocalsocket:\n name: 'local'\n socket: '/var/run/dovecot/stats'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin dovecot debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.sessions | active sessions | number |\n| dovecot.logins | logins | number |\n| dovecot.commands | commands | commands |\n| dovecot.faults | minor, major | faults |\n| dovecot.context_switches | voluntary, involuntary | switches |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | number/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth | ok, failed | attempts |\n| dovecot.auth_cache | hit, miss | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-dovecot-Dovecot", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/dovecot/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "example", "monitored_instance": {"name": "Example collector", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["example", "netdata", "python"], "most_popular": false}, "overview": "# Example collector\n\nPlugin: python.d.plugin\nModule: example\n\n## Overview\n\nExample collector that generates some random numbers as metrics.\n\nIf you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.\n\n\nThe `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/example.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/example.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| num_lines | The number of lines to create. | 4 | no |\n| lower | The lower bound of numbers to randomly sample from. | 0 | no |\n| upper | The upper bound of numbers to randomly sample from. | 100 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nfour_lines:\n name: \"Four Lines\"\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n num_lines: 4\n lower: 0\n upper: 100\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin example debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Example collector instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| example.random | random | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-example-Example_collector", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "exim", "monitored_instance": {"name": "Exim", "link": "https://www.exim.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "exim.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["exim", "mail", "server"], "most_popular": false}, "overview": "# Exim\n\nPlugin: python.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue.\n\nIt uses the `exim` command line binary to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Exim configuration - local installation\n\nThe module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.\n\n1. Edit the `exim` configuration with your preferred editor and add:\n`queue_list_requires_admin = false`\n2. Restart `exim` and Netdata\n\n\n#### Exim configuration - WHM (CPanel) server\n\nOn a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.\n\n1. Login to WHM\n2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor\n3. Scroll down to the button **Add additional configuration setting** and click on it.\n4. In the new dropdown which will appear above we need to find and choose:\n`queue_list_requires_admin` and set to `false`\n5. Scroll to the end and click the **Save** button.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/exim.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | Path and command to the `exim` binary | exim -bpc | no |\n\n{% /details %}\n#### Examples\n\n##### Local exim install\n\nA basic local exim install\n\n```yaml\nlocal:\n command: 'exim -bpc'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin exim debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", "integration_type": "collector", "id": "python.d.plugin-exim-Exim", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/exim/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "fail2ban", "monitored_instance": {"name": "Fail2ban", "link": "https://www.fail2ban.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "fail2ban.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["fail2ban", "security", "authentication", "authorization"], "most_popular": false}, "overview": "# Fail2ban\n\nPlugin: python.d.plugin\nModule: fail2ban\n\n## Overview\n\nMonitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.\n\n\nIt collects metrics through reading the default log and configuration files of fail2ban.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `fail2ban.log` file must be readable by the user `netdata`.\n - change the file ownership and access permissions.\n - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.\n\nTo change the file ownership and access permissions, execute the following:\n\n```shell\nsudo chown root:netdata /var/log/fail2ban.log\nsudo chmod 640 /var/log/fail2ban.log\n```\n\nTo persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:\n\n```shell\n/var/log/fail2ban.log {\n\n weekly\n rotate 4\n compress\n\n delaycompress\n missingok\n postrotate\n fail2ban-client flushlogs 1>/dev/null\n endscript\n\n # If fail2ban runs as non-root it still needs to have write access\n # to logfiles.\n # create 640 fail2ban adm\n create 640 root netdata\n}\n```\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.\nIf conf file is not found default jail is ssh.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/fail2ban.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |\n| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |\n| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |\n| exclude | jails you want to exclude from autodetection. | | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocal:\n log_path: '/var/log/fail2ban.log'\n conf_path: '/etc/fail2ban/jail.local'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin fail2ban debug trace\n ```\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fail2ban instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.failed_attempts | a dimension per jail | attempts/s |\n| fail2ban.bans | a dimension per jail | bans/s |\n| fail2ban.banned_ips | a dimension per jail | ips |\n\n", "integration_type": "collector", "id": "python.d.plugin-fail2ban-Fail2ban", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/fail2ban/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "gearman", "monitored_instance": {"name": "Gearman", "link": "http://gearman.org/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "gearman.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["gearman", "gearman job server"], "most_popular": false}, "overview": "# Gearman\n\nPlugin: python.d.plugin\nModule: gearman\n\n## Overview\n\nMonitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.\n\nThis collector connects to a Gearman instance via either TCP or unix socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Socket permissions\n\nThe gearman UNIX socket should have read permission for user netdata.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/gearman.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | URL or IP where gearman is running. | localhost | no |\n| port | Port of URL or IP where gearman is running. | 4730 | no |\n| tls | Use tls to connect to gearman. | false | no |\n| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |\n| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |\n\n{% /details %}\n#### Examples\n\n##### Local gearman service\n\nA basic host and port gearman configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\nremote:\n name: 'remote'\n host: '192.0.2.1'\n port: 4730\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin gearman debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.total_jobs | Pending, Running | Jobs |\n\n### Per gearman job\n\nMetrics related to Gearman jobs. Each job produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.single_job | Pending, Idle, Runnning | Jobs |\n\n", "integration_type": "collector", "id": "python.d.plugin-gearman-Gearman", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/gearman/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "go_expvar", "monitored_instance": {"name": "Go applications (EXPVAR)", "link": "https://pkg.go.dev/expvar", "categories": ["data-collection.apm"], "icon_filename": "go.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["go", "expvar", "application"], "most_popular": false}, "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n{% /details %}\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hddtemp", "monitored_instance": {"name": "HDD temperature", "link": "https://linux.die.net/man/8/hddtemp", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hardware", "hdd temperature", "disk temperature", "temperature"], "most_popular": false}, "overview": "# HDD temperature\n\nPlugin: python.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt uses the `hddtemp` daemon to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Run `hddtemp` in daemon mode\n\nYou can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.\n\nSo running `hddtemp -d` would run the daemon, by default on port 7634.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hddtemp.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\nBy default this collector will try to autodetect disks (autodetection works only for disk which names start with \"sd\"). However this can be overridden by setting the option `disks` to an array of desired disks.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |\n| host | The IP or HOSTNAME to connect to. | localhost | yes |\n| port | The port to connect to. | 7634 | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\n```\n##### Custom disk names\n\nAn example defining the disk names to detect.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n devices:\n - customdisk1\n - customdisk2\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\nremote_job:\n name : 'remote'\n host : 'http://192.0.2.1:2812'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hddtemp debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HDD temperature instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.temperatures | a dimension per disk | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hddtemp-HDD_temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hddtemp/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hpssa", "monitored_instance": {"name": "HP Smart Storage Arrays", "link": "https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hp.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "hp", "hpssa", "array"], "most_popular": false}, "overview": "# HP Smart Storage Arrays\n\nPlugin: python.d.plugin\nModule: hpssa\n\n## Overview\n\nThis collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.\n\nIt uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided, the collector will try to execute the `ssacli` binary.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the hpssa collector\n\nThe `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Allow user netdata to execute `ssacli` as root.\n\nThis module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.\n\n- Add to your `/etc/sudoers` file:\n\n`which ssacli` shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/ssacli\n```\n\n- Reset Netdata's systemd\n unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux\n distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.\n\nAs the `root` user, do the following:\n\n```cmd\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hpssa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hpssa.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |\n| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |\n\n{% /details %}\n#### Examples\n\n##### Local simple config\n\nA basic configuration, specyfing the path to `ssacli`\n\n```yaml\nlocal:\n ssacli_path: /usr/sbin/ssacli\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hpssa debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HP Smart Storage Arrays instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |\n| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |\n| hpssa.ld_status | a dimension per logical drive | Status |\n| hpssa.pd_status | a dimension per physical drive | Status |\n| hpssa.pd_temperature | a dimension per physical drive | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hpssa-HP_Smart_Storage_Arrays", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hpssa/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "icecast", "monitored_instance": {"name": "Icecast", "link": "https://icecast.org/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "icecast.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["icecast", "streaming", "media"], "most_popular": false}, "overview": "# Icecast\n\nPlugin: python.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWithout configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/icecast.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |\n| user | Username to use to connect to `url` if it's password protected. | | no |\n| pass | Password to use to connect to `url` if it's password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Remote Icecast server\n\nConfigure a remote icecast server\n\n```yaml\nremote:\n url: 'http://1.2.3.4:8443/status-json.xsl'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin icecast debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | a dimension for each active source | listeners |\n\n", "integration_type": "collector", "id": "python.d.plugin-icecast-Icecast", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/icecast/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ipfs", "monitored_instance": {"name": "IPFS", "link": "https://ipfs.tech/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ipfs.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IPFS\n\nPlugin: python.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS server metrics about its quality and performance.\n\nIt connects to an http endpoint of the IPFS server to collect the metrics\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the endpoint is accessible by the Agent, netdata will autodetect it\n\n#### Limits\n\nCalls to the following endpoints are disabled due to IPFS bugs:\n\n/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)\n/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ipfs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| url | URL to the IPFS API | no | yes |\n| repoapi | Collect repo metrics. | no | no |\n| pinapi | Set status of IPFS pinned object polling. | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\nremote_host:\n name: 'remote'\n url: 'http://192.0.2.1:5001'\n repoapi: no\n pinapi: no\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ipfs debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | kilobits/s |\n| ipfs.peers | peers | peers |\n| ipfs.repo_size | avail, size | GiB |\n| ipfs.repo_objects | objects, pinned, recursive_pins | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-ipfs-IPFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ipfs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "litespeed", "monitored_instance": {"name": "Litespeed", "link": "https://www.litespeedtech.com/products/litespeed-web-server", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "litespeed.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["litespeed", "web", "server"], "most_popular": false}, "overview": "# Litespeed\n\nPlugin: python.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/litespeed.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |\n\n{% /details %}\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocalhost:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin litespeed debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.connections | free, used | conns |\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.cache | hits | hits/s |\n| litespeed.cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-litespeed-Litespeed", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/litespeed/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "megacli", "monitored_instance": {"name": "MegaCLI", "link": "https://wikitech.wikimedia.org/wiki/MegaCli", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# MegaCLI\n\nPlugin: python.d.plugin\nModule: megacli\n\n## Overview\n\nExamine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.\n\nCollects adapter, physical drives and battery stats using megacli command-line tool\n\nExecuted commands:\n\n - `sudo -n megacli -LDPDInfo -aAll`\n - `sudo -n megacli -AdpBbuCmd -a0`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run megacli as sudoer\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich megacli shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/megacli\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/megacli.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: myname\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin megacli debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |\n| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |\n| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |\n| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |\n| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MegaCLI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_degraded | a dimension per adapter | is degraded |\n| megacli.pd_media_error | a dimension per physical drive | errors/s |\n| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |\n\n### Per battery\n\nMetrics related to Battery Backup Units, each BBU provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_relative_charge | adapter {battery id} | percentage |\n| megacli.bbu_cycle_count | adapter {battery id} | cycle count |\n\n", "integration_type": "collector", "id": "python.d.plugin-megacli-MegaCLI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/megacli/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "memcached", "monitored_instance": {"name": "Memcached", "link": "https://memcached.org/", "categories": ["data-collection.database-servers"], "icon_filename": "memcached.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memcached", "memcache", "cache", "database"], "most_popular": false}, "overview": "# Memcached\n\nPlugin: python.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/memcached.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| host | the host to connect to. | 127.0.0.1 | no |\n| port | the port to connect to. | 11211 | no |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### localhost\n\nAn example configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 11211\n\n```\n##### localipv4\n\nAn example configuration for localipv4.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 11211\n\n```\n{% /details %}\n##### localipv6\n\nAn example configuration for localipv6.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n host: '::1'\n port: 11211\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin memcached debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-memcached-Memcached", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/memcached/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "monit", "monitored_instance": {"name": "Monit", "link": "https://mmonit.com/monit/", "categories": ["data-collection.synthetic-checks"], "icon_filename": "monit.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["monit", "mmonit", "supervision tool", "monitrc"], "most_popular": false}, "overview": "# Monit\n\nPlugin: python.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.\n\n\nIt gathers data from Monit's XML interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to Monit at `http://localhost:2812`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/monit.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |\n| user | Username in case the URL is password protected. | | no |\n| pass | Password in case the URL is password protected. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n\n```\n##### Basic Authentication\n\nExample using basic username and password in order to authenticate.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n user: 'foo'\n pass: 'bar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:2812'\n\nremote_job:\n name: 'remote'\n url: 'http://192.0.2.1:2812'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin monit debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Monit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.filesystems | a dimension per target | filesystems |\n| monit.directories | a dimension per target | directories |\n| monit.files | a dimension per target | files |\n| monit.fifos | a dimension per target | pipes |\n| monit.programs | a dimension per target | programs |\n| monit.services | a dimension per target | processes |\n| monit.process_uptime | a dimension per target | seconds |\n| monit.process_threads | a dimension per target | threads |\n| monit.process_childrens | a dimension per target | children |\n| monit.hosts | a dimension per target | hosts |\n| monit.host_latency | a dimension per target | milliseconds |\n| monit.networks | a dimension per target | interfaces |\n\n", "integration_type": "collector", "id": "python.d.plugin-monit-Monit", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/monit/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "nsd", "monitored_instance": {"name": "Name Server Daemon", "link": "https://nsd.docs.nlnetlabs.nl/en/latest/#", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "nsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nsd", "name server daemon"], "most_popular": false}, "overview": "# Name Server Daemon\n\nPlugin: python.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more.\n\n\nIt uses the `nsd-control stats_noreset` command to gather metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### NSD version\n\nThe version of `nsd` must be 4.0+.\n\n\n#### Provide Netdata the permissions to run the command\n\nNetdata must have permissions to run the `nsd-control stats_noreset` command.\n\nYou can:\n\n- Add \"netdata\" user to \"nsd\" group:\n ```\n usermod -aG nsd netdata\n ```\n- Add Netdata to sudoers\n 1. Edit the sudoers file:\n ```\n visudo -f /etc/sudoers.d/netdata\n ```\n 2. Add the entry:\n ```\n Defaults:netdata !requiretty\n netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset\n ```\n\n > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/nsd.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | The command to run | nsd-control stats_noreset | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: 'nsd_local'\n command: 'nsd-control stats_noreset'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin nsd debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Name Server Daemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.zones | master, slave | zones |\n| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |\n| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |\n| nsd.transfer | NOTIFY, AXFR | queries/s |\n| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-nsd-Name_Server_Daemon", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/nsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "openldap", "monitored_instance": {"name": "OpenLDAP", "link": "https://www.openldap.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "statsd.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["openldap", "RBAC", "Directory access"], "most_popular": false}, "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-openldap-OpenLDAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "oracledb", "monitored_instance": {"name": "Oracle DB", "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", "categories": ["data-collection.database-servers"], "icon_filename": "oracle.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "oracle", "data warehouse", "SQL"], "most_popular": false}, "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", "integration_type": "collector", "id": "python.d.plugin-oracledb-Oracle_DB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "pandas", "monitored_instance": {"name": "Pandas", "link": "https://pandas.pydata.org/", "categories": ["data-collection.generic-data-collection"], "icon_filename": "pandas.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pandas", "python"], "most_popular": false}, "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n{% details summary=\"Config\" %}\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n{% /details %}\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n{% /details %}\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n{% /details %}\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n{% details summary=\"Config\" %}\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n{% /details %}\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n{% details summary=\"Config\" %}\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", "id": "python.d.plugin-pandas-Pandas", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "postfix", "monitored_instance": {"name": "Postfix", "link": "https://www.postfix.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "postfix.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["postfix", "mail", "mail server"], "most_popular": false}, "overview": "# Postfix\n\nPlugin: python.d.plugin\nModule: postfix\n\n## Overview\n\nKeep an eye on Postfix metrics for efficient mail server operations. \nImprove your mail server performance with Netdata's real-time metrics and built-in alerts.\n\n\nMonitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPostfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.\nSee the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin postfix debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-postfix-Postfix", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/postfix/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "puppet", "monitored_instance": {"name": "Puppet", "link": "https://www.puppet.com/", "categories": ["data-collection.ci-cd-systems"], "icon_filename": "puppet.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["puppet", "jvm heap"], "most_popular": false}, "overview": "# Puppet\n\nPlugin: python.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'\n\n\nIt uses Puppet's metrics API endpoint to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/puppet.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n> Notes:\n> - Exact Fully Qualified Domain Name of the node should be used.\n> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.\n> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |\n| tls_verify | Control HTTPS server certificate verification. | False | no |\n| tls_ca_file | Optional CA (bundle) file to use | | no |\n| tls_cert_file | Optional client certificate file | | no |\n| tls_key_file | Optional client key file | | no |\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\npuppetserver:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\n```\n##### TLS Certificate\n\nAn example using a TLS certificate\n\n{% details summary=\"Config\" %}\n```yaml\npuppetdb:\n url: 'https://fqdn.example.com:8081'\n tls_cert_file: /path/to/client.crt\n tls_key_file: /path/to/client.key\n autodetection_retry: 1\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\npuppetserver1:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\npuppetserver2:\n url: 'https://fqdn.example2.com:8140'\n autodetection_retry: 1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin puppet debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm | committed, used | MiB |\n| puppet.jvm | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", "integration_type": "collector", "id": "python.d.plugin-puppet-Puppet", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/puppet/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "rethinkdbs", "monitored_instance": {"name": "RethinkDB", "link": "https://rethinkdb.com/", "categories": ["data-collection.database-servers"], "icon_filename": "rethinkdb.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["rethinkdb", "database", "db"], "most_popular": false}, "overview": "# RethinkDB\n\nPlugin: python.d.plugin\nModule: rethinkdbs\n\n## Overview\n\nThis collector monitors metrics about RethinkDB clusters and database servers.\n\nIt uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to 127.0.0.1:28015.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe collector requires the `rethinkdb` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/rethinkdbs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/rethinkdbs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | Hostname or ip of the RethinkDB server. | localhost | no |\n| port | Port to connect to the RethinkDB server. | 28015 | no |\n| user | The username to use to connect to the RethinkDB server. | admin | no |\n| password | The password to use to connect to the RethinkDB server. | | no |\n| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RethinkDB server\n\nAn example of a configuration for a local RethinkDB server\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 28015\n user: \"user\"\n password: \"pass\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin rethinkdbs debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_connected_servers | connected, missing | servers |\n| rethinkdb.cluster_clients_active | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | reads, writes | documents/s |\n\n### Per database server\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.client_connections | connections | connections |\n| rethinkdb.clients_active | active | clients |\n| rethinkdb.queries | queries | queries/s |\n| rethinkdb.documents | reads, writes | documents/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-rethinkdbs-RethinkDB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "retroshare", "monitored_instance": {"name": "RetroShare", "link": "https://retroshare.cc/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "retroshare.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["retroshare", "p2p"], "most_popular": false}, "overview": "# RetroShare\n\nPlugin: python.d.plugin\nModule: retroshare\n\n## Overview\n\nThis collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.\n\nIt connects to the RetroShare web interface to gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### RetroShare web interface\n\nRetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/retroshare.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/retroshare.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |\n\n{% /details %}\n#### Examples\n\n##### Local RetroShare Web UI\n\nA basic configuration for a RetroShare server running on localhost.\n\n{% details summary=\"Config\" %}\n```yaml\nlocalhost:\n name: 'local retroshare'\n url: 'http://localhost:9090'\n\n```\n{% /details %}\n##### Remote RetroShare Web UI\n\nA basic configuration for a remote RetroShare server.\n\n{% details summary=\"Config\" %}\n```yaml\nremote:\n name: 'remote retroshare'\n url: 'http://1.2.3.4:9090'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin retroshare debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RetroShare instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| retroshare.bandwidth | Upload, Download | kilobits/s |\n| retroshare.peers | All friends, Connected friends | peers |\n| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |\n\n", "integration_type": "collector", "id": "python.d.plugin-retroshare-RetroShare", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/retroshare/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "riakkv", "monitored_instance": {"name": "RiakKV", "link": "https://riak.com/products/riak-kv/index.html", "categories": ["data-collection.database-servers"], "icon_filename": "riak.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "nosql", "big data"], "most_popular": false}, "overview": "# RiakKV\n\nPlugin: python.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.'\n\n\nThis collector reads the database stats from the `/stats` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure RiakKV to enable /stats endpoint\n\nYou can follow the RiakKV configuration reference documentation for how to enable this.\n\nSource : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/riakkv.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The url of the server | no | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic (default)\n\nA basic example configuration per job\n\n```yaml\nlocal:\nurl: 'http://localhost:8098/stats'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n url: 'http://localhost:8098/stats'\n\nremote:\n url: 'http://192.0.2.1:8098/stats'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin riakkv debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |\n| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |\n| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |\n| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RiakKV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | errors | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n| riak.search.index | bad_entry, extract_fail | writes |\n\n", "integration_type": "collector", "id": "python.d.plugin-riakkv-RiakKV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/riakkv/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "samba", "monitored_instance": {"name": "Samba", "link": "https://www.samba.org/samba/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "samba.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["samba", "file sharing"], "most_popular": false}, "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n{% details summary=\"Config\" %}\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-samba-Samba", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (lm-sensors)", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "temperature", "voltage", "current", "power", "fan", "energy", "humidity"], "most_popular": false}, "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: python.d.plugin\nModule: sensors\n\n## Overview\n\nExamine Linux Sensors metrics with Netdata for insights into hardware health and performance.\n\nEnhance your system's reliability with real-time hardware health insights.\n\n\nReads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n- temperature - fan - voltage - current - power - energy - humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/sensors.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\ntypes:\n - temperature\n - fan\n - voltage\n - current\n - power\n - energy\n - humidity\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin sensors debug trace\n ```\n\n### lm-sensors doesn't work on your device\n\n\n\n### ACPI ring buffer errors are printed\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per chip\n\nMetrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temperature | a dimension per sensor | Celsius |\n| sensors.voltage | a dimension per sensor | Volts |\n| sensors.current | a dimension per sensor | Ampere |\n| sensors.power | a dimension per sensor | Watt |\n| sensors.fan | a dimension per sensor | Rotations/min |\n| sensors.energy | a dimension per sensor | Joule |\n| sensors.humidity | a dimension per sensor | Percent |\n\n", "integration_type": "collector", "id": "python.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "smartd_log", "monitored_instance": {"name": "S.M.A.R.T.", "link": "https://linux.die.net/man/8/smartd", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "smart.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["smart", "S.M.A.R.T.", "SCSI devices", "ATA devices"], "most_popular": false}, "overview": "# S.M.A.R.T.\n\nPlugin: python.d.plugin\nModule: smartd_log\n\n## Overview\n\nThis collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.\n\n\nIt reads `smartd` log files to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nUpon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure `smartd` to write attribute information to files.\n\n`smartd` must be running with `-A` option to write `smartd` attribute information to files.\n\nFor this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:\n\n```\n# dump smartd attrs info every 600 seconds\nsmartd_opts=\"-A /var/log/smartd/ -i 600\"\n```\n\nYou may need to create the smartd directory before smartd will write to it: \n\n```sh\nmkdir -p /var/log/smartd\n```\n\nOtherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command.\n\n`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/smartd_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/smartd_log.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to smartd log files. | /var/log/smartd | yes |\n| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |\n| age | Time in minutes since the last dump to file. | 30 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\ncustom:\n name: smartd_log\n log_path: '/var/log/smartd/'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin smartd_log debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics listed below are split in terms of availability on device type, SCSI or ATA.\n\n### Per S.M.A.R.T. instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | SCSI | ATA |\n|:------|:----------|:----|:---:|:---:|\n| smartd_log.read_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.seek_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.soft_read_error_rate | a dimension per device | errors | | \u2022 |\n| smartd_log.write_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.read_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.read_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.sata_interface_downshift | a dimension per device | events | | \u2022 |\n| smartd_log.udma_crc_error_count | a dimension per device | errors | | \u2022 |\n| smartd_log.throughput_performance | a dimension per device | value | | \u2022 |\n| smartd_log.seek_time_performance | a dimension per device | value | | \u2022 |\n| smartd_log.start_stop_count | a dimension per device | events | | \u2022 |\n| smartd_log.power_on_hours_count | a dimension per device | hours | | \u2022 |\n| smartd_log.power_cycle_count | a dimension per device | events | | \u2022 |\n| smartd_log.unexpected_power_loss | a dimension per device | events | | \u2022 |\n| smartd_log.spin_up_time | a dimension per device | ms | | \u2022 |\n| smartd_log.spin_up_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.calibration_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | \u2022 |\n| smartd_log.temperature_celsius | a dimension per device | celsius | \u2022 | \u2022 |\n| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.reserved_block_count | a dimension per device | percentage | | \u2022 |\n| smartd_log.program_fail_count | a dimension per device | errors | | \u2022 |\n| smartd_log.erase_fail_count | a dimension per device | failures | | \u2022 |\n| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | \u2022 |\n| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | \u2022 |\n| smartd_log.reallocation_event_count | a dimension per device | events | | \u2022 |\n| smartd_log.current_pending_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.percent_lifetime_used | a dimension per device | percentage | | \u2022 |\n| smartd_log.media_wearout_indicator | a dimension per device | percentage | | \u2022 |\n| smartd_log.nand_writes_1gib | a dimension per device | GiB | | \u2022 |\n\n", "integration_type": "collector", "id": "python.d.plugin-smartd_log-S.M.A.R.T.", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/smartd_log/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "spigotmc", "monitored_instance": {"name": "SpigotMC", "link": "", "categories": ["data-collection.gaming"], "icon_filename": "spigot.jfif"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["minecraft server", "spigotmc server", "spigot"], "most_popular": false}, "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-spigotmc-SpigotMC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "squid", "monitored_instance": {"name": "Squid", "link": "http://www.squid-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "squid.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["squid", "web delivery", "squid caching proxy"], "most_popular": false}, "overview": "# Squid\n\nPlugin: python.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the endpoint where Squid exposes its `counters` data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Squid's Cache Manager\n\nTake a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/squid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| host | The host to connect to. | | yes |\n| port | The port to connect to. | | yes |\n| request | The URL to request from Squid. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nexample_job_name:\n name: 'local'\n host: 'localhost'\n port: 3128\n request: 'cache_object://localhost:3128/counters'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_job:\n name: 'local'\n host: '127.0.0.1'\n port: 3128\n request: 'cache_object://127.0.0.1:3128/counters'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 3128\n request: 'cache_object://192.0.2.1:3128/counters'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin squid debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-squid-Squid", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/squid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tomcat", "monitored_instance": {"name": "Tomcat", "link": "https://tomcat.apache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "tomcat.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["apache", "tomcat", "webserver", "websocket", "jakarta", "javaEE"], "most_popular": false}, "overview": "# Tomcat\n\nPlugin: python.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the http endpoint of the `/manager/status` in XML format\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nYou need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.\n\n#### Limits\n\nThis module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only `netdata` user, to monitor the `/status` endpoint.\n\nThis is necessary for configuring the collector.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tomcat.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options per job\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |\n| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |\n| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |\n| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:8080/manager/status?XML=true'\n\n```\n##### Using an IPv4 endpoint\n\nA typical configuration using an IPv4 endpoint\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_ipv4:\n name : 'local'\n url : 'http://127.0.0.1:8080/manager/status?XML=true'\n\n```\n{% /details %}\n##### Using an IPv6 endpoint\n\nA typical configuration using an IPv6 endpoint\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_ipv6:\n name : 'local'\n url : 'http://[::1]:8080/manager/status?XML=true'\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tomcat debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.accesses | accesses, errors | requests/s |\n| tomcat.bandwidth | sent, received | KiB/s |\n| tomcat.processing_time | processing time | seconds |\n| tomcat.threads | current, busy | current threads |\n| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |\n| tomcat.jvm_eden | used, committed, max | MiB |\n| tomcat.jvm_survivor | used, committed, max | MiB |\n| tomcat.jvm_tenured | used, committed, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-tomcat-Tomcat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tomcat/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tor", "monitored_instance": {"name": "Tor", "link": "https://www.torproject.org/", "categories": ["data-collection.vpns"], "icon_filename": "tor.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["tor", "traffic", "vpn"], "most_popular": false}, "overview": "# Tor\n\nPlugin: python.d.plugin\nModule: tor\n\n## Overview\n\nThis collector monitors Tor bandwidth traffic .\n\nIt connects to the Tor control port to collect traffic statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe `stem` python library needs to be installed.\n\n\n#### Required Tor configuration\n\nAdd to /etc/tor/torrc:\n\nControlPort 9051\n\nFor more options please read the manual.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| control_addr | Tor control IP address | 127.0.0.1 | no |\n| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |\n| password | Tor control password | | no |\n\n{% /details %}\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_tcp:\n name: 'local'\n control_port: 9051\n password: # if required\n\n```\n{% /details %}\n##### Local socket\n\nA basic local socket configuration\n\n{% details summary=\"Config\" %}\n```yaml\nlocal_socket:\n name: 'local'\n control_port: '/var/run/tor/control'\n password: # if required\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-tor-Tor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "uwsgi", "monitored_instance": {"name": "uWSGI", "link": "https://github.com/unbit/uwsgi/tree/2.0.21", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "uwsgi.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application server", "python", "web applications"], "most_popular": false}, "overview": "# uWSGI\n\nPlugin: python.d.plugin\nModule: uwsgi\n\n## Overview\n\nThis collector monitors uWSGI metrics about requests, workers, memory and more.\n\nIt collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats server\n\nMake sure that you uWSGI exposes it's metrics via a Stats server.\n\nSource: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/uwsgi.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| socket | The 'path/to/uwsgistats.sock' | no | no |\n| host | The host to connect to | no | no |\n| port | The port to connect to | no | no |\n\n{% /details %}\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.\n\n{% details summary=\"Config\" %}\n```yaml\nsocket:\n name : 'local'\n socket : '/tmp/stats.socket'\n\nlocalhost:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nlocalipv4:\n name : 'local'\n host : '127.0.0.1'\n port : 1717\n\nlocalipv6:\n name : 'local'\n host : '::1'\n port : 1717\n\n```\n{% /details %}\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n{% details summary=\"Config\" %}\n```yaml\nlocal:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nremote:\n name : 'remote'\n host : '192.0.2.1'\n port : 1717\n\n```\n{% /details %}\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin uwsgi debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.requests | a dimension per worker | requests/s |\n| uwsgi.tx | a dimension per worker | KiB/s |\n| uwsgi.avg_rt | a dimension per worker | milliseconds |\n| uwsgi.memory_rss | a dimension per worker | MiB |\n| uwsgi.memory_vsz | a dimension per worker | MiB |\n| uwsgi.exceptions | exceptions | exceptions |\n| uwsgi.harakiris | harakiris | harakiris |\n| uwsgi.respawns | respawns | respawns |\n\n", "integration_type": "collector", "id": "python.d.plugin-uwsgi-uWSGI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "varnish", "monitored_instance": {"name": "Varnish", "link": "https://varnish-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "varnish.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["varnish", "varnishstat", "varnishd", "cache", "web server", "web cache"], "most_popular": false}, "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-varnish-Varnish", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "w1sensor", "monitored_instance": {"name": "1-Wire Sensors", "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "1-wire.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "sensor", "1-wire"], "most_popular": false}, "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n{% /details %}\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-w1sensor-1-Wire_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "zscores", "monitored_instance": {"name": "python.d zscores", "link": "https://en.wikipedia.org/wiki/Standard_score", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zscore", "z-score", "standard score", "standard deviation", "anomaly detection", "statistical anomaly detection"], "most_popular": false}, "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/src/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-zscores-python.d_zscores", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "slabinfo.plugin", "module_name": "slabinfo.plugin", "monitored_instance": {"name": "Linux kernel SLAB allocator statistics", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux kernel", "slab", "slub", "slob", "slabinfo"], "most_popular": false}, "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"The main configuration file.\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "tc.plugin", "module_name": "tc.plugin", "monitored_instance": {"name": "tc QoS classes", "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config option\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", "id": "tc.plugin-tc.plugin-tc_QoS_classes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "timex.plugin", "module_name": "timex.plugin", "monitored_instance": {"name": "Timex", "link": "", "categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n{% details summary=\"Config\" %}\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n{% /details %}\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", "id": "timex.plugin-timex.plugin-Timex", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "xenstat.plugin", "module_name": "xenstat.plugin", "monitored_instance": {"name": "Xen XCP-ng", "link": "https://xenproject.org/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "xen.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n{% /details %}\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", "integration_type": "collector", "id": "xenstat.plugin-xenstat.plugin-Xen_XCP-ng", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml", "related_resources": ""}, {"id": "deploy-alpinelinux", "meta": {"name": "Alpine Linux", "link": "https://www.alpinelinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "alpine.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-amazonlinux", "meta": {"name": "Amazon Linux", "link": "https://aws.amazon.com/amazon-linux-2/", "categories": ["deploy.operating-systems"], "icon_filename": "amazonlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-archlinux", "meta": {"name": "Arch Linux", "link": "https://archlinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "archlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos", "meta": {"name": "CentOS", "link": "https://www.centos.org/", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos-stream", "meta": {"name": "CentOS Stream", "link": "https://www.centos.org/centos-stream", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n| 8 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-debian", "meta": {"name": "Debian", "link": "https://www.debian.org/", "categories": ["deploy.operating-systems"], "icon_filename": "debian.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n| 10 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-docker", "meta": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["deploy.docker-kubernetes"], "icon_filename": "docker.svg"}, "most_popular": true, "keywords": ["docker", "container", "containers"], "install_description": "Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n", "methods": [{"method": "Docker CLI", "commands": [{"channel": "nightly", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:edge\n"}, {"channel": "stable", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined \\\n{% if $showClaimingOptions %}\n-e NETDATA_CLAIM_TOKEN={% claim_token %} \\\n-e NETDATA_CLAIM_URL={% claim_url %} \\\n-e NETDATA_CLAIM_ROOMS={% $claim_rooms %} \\\n{% /if %}\nnetdata/netdata:stable\n"}]}, {"method": "Docker Compose", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}, {"method": "Docker Swarm", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n{% if $showClaimingOptions %}\n environment:\n - NETDATA_CLAIM_TOKEN={% claim_token %}\n - NETDATA_CLAIM_URL={% claim_url %}\n - NETDATA_CLAIM_ROOMS={% $claim_rooms %}\n{% /if %}\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}], "additional_info": "", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-fedora", "meta": {"name": "Fedora", "link": "https://www.fedoraproject.org/", "categories": ["deploy.operating-systems"], "icon_filename": "fedora.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 39 | Core | x86_64, aarch64 | |\n| 38 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-freebsd", "meta": {"name": "FreeBSD", "link": "https://www.freebsd.org/", "categories": ["deploy.operating-systems"], "icon_filename": "freebsd.svg"}, "most_popular": true, "keywords": ["freebsd"], "install_description": "## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "fetch", "commands": [{"channel": "nightly", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-kubernetes", "meta": {"name": "Kubernetes (Helm)", "link": "", "categories": ["deploy.docker-kubernetes"], "icon_filename": "kubernetes.svg"}, "keywords": ["kubernetes", "container", "Orchestrator"], "install_description": "**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n", "methods": [{"method": "Helm", "commands": [{"channel": "nightly", "command": "helm install netdata netdata/netdata \\\n--set image.tag=edge{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled=\"true\" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled=\"true\" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n"}, {"channel": "stable", "command": "helm install netdata netdata/netdata \\\n--set image.tag=stable{% if $showClaimingOptions %} \\\n--set parent.claiming.enabled=\"true\" \\\n--set parent.claiming.token={% claim_token %} \\\n--set parent.claiming.rooms={% $claim_rooms %} \\\n--set child.claiming.enabled=\"true\" \\\n--set child.claiming.token={% claim_token %} \\\n--set child.claiming.rooms={% $claim_rooms %}{% /if %}\n"}]}, {"method": "Existing Cluster", "commands": [{"channel": "nightly", "command": "image:\n tag: edge\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"}, {"channel": "stable", "command": "image:\n tag: stable\n\nrestarter:\n enabled: true\n{% if $showClaimingOptions %}\n\nparent:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n\nchild:\n claiming:\n enabled: true\n token: {% claim_token %}\n rooms: {% $claim_rooms %}\n{% /if %}\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-linux-generic", "meta": {"name": "Linux", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "linux.svg"}, "keywords": ["linux"], "most_popular": true, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-macos", "meta": {"name": "macOS", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "macos.svg"}, "most_popular": true, "keywords": ["macOS", "mac", "apple"], "install_description": "Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-manjarolinux", "meta": {"name": "Manjaro Linux", "link": "https://manjaro.org/", "categories": ["deploy.operating-systems"], "icon_filename": "manjaro.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-opensuse", "meta": {"name": "SUSE Linux", "link": "https://www.suse.com/", "categories": ["deploy.operating-systems"], "icon_filename": "openSUSE.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-oraclelinux", "meta": {"name": "Oracle Linux", "link": "https://www.oracle.com/linux/", "categories": ["deploy.operating-systems"], "icon_filename": "oraclelinux.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rhel", "meta": {"name": "Red Hat Enterprise Linux", "link": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux", "categories": ["deploy.operating-systems"], "icon_filename": "rhel.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rockylinux", "meta": {"name": "Rocky Linux", "link": "https://rockylinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "rocky.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-ubuntu", "meta": {"name": "Ubuntu", "link": "https://ubuntu.com/", "categories": ["deploy.operating-systems"], "icon_filename": "ubuntu.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using {% goToCategory navigateToSettings=$navigateToSettings categoryId=\"deploy.docker-kubernetes\" %}Kubernetes{% /goToCategory %} or {% goToCategory categoryId=\"deploy.docker-kubernetes\" %}Docker{% /goToCategory %}?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 23.10 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-windows", "meta": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["deploy.operating-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows"], "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel{% if $showClaimingOptions %} --claim-token {% claim_token %} --claim-rooms {% $claim_rooms %} --claim-url {% claim_url %}{% /if %}\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "export-appoptics", "meta": {"name": "AppOptics", "link": "https://www.solarwinds.com/appoptics", "categories": ["export"], "icon_filename": "solarwinds.svg", "keywords": ["app optics", "AppOptics", "Solarwinds"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-aws-kinesis", "meta": {"name": "AWS Kinesis", "link": "https://aws.amazon.com/kinesis/", "categories": ["export"], "icon_filename": "aws-kinesis.svg"}, "keywords": ["exporter", "AWS", "Kinesis"], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-data", "meta": {"name": "Azure Data Explorer", "link": "https://azure.microsoft.com/en-us/pricing/details/data-explorer/", "categories": ["export"], "icon_filename": "azuredataex.jpg", "keywords": ["Azure Data Explorer", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-event", "meta": {"name": "Azure Event Hub", "link": "https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about", "categories": ["export"], "icon_filename": "azureeventhub.png", "keywords": ["Azure Event Hub", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-bigquery", "meta": {"name": "Google BigQuery", "link": "https://cloud.google.com/bigquery/", "categories": ["export"], "icon_filename": "bigquery.png", "keywords": ["export", "Google BigQuery", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-blueflood", "meta": {"name": "Blueflood", "link": "http://blueflood.io/", "categories": ["export"], "icon_filename": "blueflood.png", "keywords": ["export", "Blueflood", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-chronix", "meta": {"name": "Chronix", "link": "https://dbdb.io/db/chronix", "categories": ["export"], "icon_filename": "chronix.png", "keywords": ["export", "chronix", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-cortex", "meta": {"name": "Cortex", "link": "https://cortexmetrics.io/", "categories": ["export"], "icon_filename": "cortex.png", "keywords": ["export", "cortex", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-crate", "meta": {"name": "CrateDB", "link": "https://crate.io/", "categories": ["export"], "icon_filename": "crate.svg", "keywords": ["export", "CrateDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-elastic", "meta": {"name": "ElasticSearch", "link": "https://www.elastic.co/", "categories": ["export"], "icon_filename": "elasticsearch.svg", "keywords": ["export", "ElasticSearch", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-gnocchi", "meta": {"name": "Gnocchi", "link": "https://wiki.openstack.org/wiki/Gnocchi", "categories": ["export"], "icon_filename": "gnocchi.svg", "keywords": ["export", "Gnocchi", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-google-pubsub", "meta": {"name": "Google Cloud Pub Sub", "link": "https://cloud.google.com/pubsub", "categories": ["export"], "icon_filename": "pubsub.png"}, "keywords": ["exporter", "Google Cloud", "Pub Sub"], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": ""}, {"id": "export-graphite", "meta": {"name": "Graphite", "link": "https://graphite.readthedocs.io/en/latest/", "categories": ["export"], "icon_filename": "graphite.png"}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-influxdb", "meta": {"name": "InfluxDB", "link": "https://www.influxdata.com/", "categories": ["export"], "icon_filename": "influxdb.svg", "keywords": ["InfluxDB", "Influx", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-irondb", "meta": {"name": "IRONdb", "link": "https://docs.circonus.com/irondb/", "categories": ["export"], "icon_filename": "irondb.png", "keywords": ["export", "IRONdb", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-json", "meta": {"name": "JSON", "link": "https://learn.netdata.cloud/docs/exporting/json-document-databases", "categories": ["export"], "icon_filename": "json.svg"}, "keywords": ["exporter", "json"], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": ""}, {"id": "export-kafka", "meta": {"name": "Kafka", "link": "https://kafka.apache.org/", "categories": ["export"], "icon_filename": "kafka.svg", "keywords": ["export", "Kafka", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-kairosdb", "meta": {"name": "KairosDB", "link": "https://kairosdb.github.io/", "categories": ["export"], "icon_filename": "kairos.png", "keywords": ["KairosDB", "kairos", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-m3db", "meta": {"name": "M3DB", "link": "https://m3db.io/", "categories": ["export"], "icon_filename": "m3db.png", "keywords": ["export", "M3DB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-metricfire", "meta": {"name": "MetricFire", "link": "https://www.metricfire.com/", "categories": ["export"], "icon_filename": "metricfire.png", "keywords": ["export", "MetricFire", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-mongodb", "meta": {"name": "MongoDB", "link": "https://www.mongodb.com/", "categories": ["export"], "icon_filename": "mongodb.svg"}, "keywords": ["exporter", "MongoDB"], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": ""}, {"id": "export-newrelic", "meta": {"name": "New Relic", "link": "https://newrelic.com/", "categories": ["export"], "icon_filename": "newrelic.svg", "keywords": ["export", "NewRelic", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-opentsdb", "meta": {"name": "OpenTSDB", "link": "https://github.com/OpenTSDB/opentsdb", "categories": ["export"], "icon_filename": "opentsdb.png"}, "keywords": ["exporter", "OpenTSDB", "scalable time series"], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": ""}, {"id": "export-pgsql", "meta": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["export"], "icon_filename": "postgres.svg", "keywords": ["export", "PostgreSQL", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-prometheus-remote", "meta": {"name": "Prometheus Remote Write", "link": "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage", "categories": ["export"], "icon_filename": "prometheus.svg"}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-quasar", "meta": {"name": "QuasarDB", "link": "https://doc.quasar.ai/master/", "categories": ["export"], "icon_filename": "quasar.jpeg", "keywords": ["export", "quasar", "quasarDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-splunk", "meta": {"name": "Splunk SignalFx", "link": "https://www.splunk.com/en_us/products/observability.html", "categories": ["export"], "icon_filename": "splunk.svg", "keywords": ["export", "splunk", "signalfx", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-thanos", "meta": {"name": "Thanos", "link": "https://thanos.io/", "categories": ["export"], "icon_filename": "thanos.png", "keywords": ["export", "thanos", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-tikv", "meta": {"name": "TiKV", "link": "https://tikv.org/", "categories": ["export"], "icon_filename": "tikv.png", "keywords": ["export", "TiKV", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-timescaledb", "meta": {"name": "TimescaleDB", "link": "https://www.timescale.com/", "categories": ["export"], "icon_filename": "timescale.png", "keywords": ["export", "TimescaleDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-victoria", "meta": {"name": "VictoriaMetrics", "link": "https://victoriametrics.com/products/open-source/", "categories": ["export"], "icon_filename": "victoriametrics.png", "keywords": ["export", "victoriametrics", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-vmware", "meta": {"name": "VMware Aria", "link": "https://www.vmware.com/products/aria-operations-for-applications.html", "categories": ["export"], "icon_filename": "aria.png", "keywords": ["export", "VMware", "Aria", "Tanzu", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-wavefront", "meta": {"name": "Wavefront", "link": "https://docs.wavefront.com/wavefront_data_ingestion.html", "categories": ["export"], "icon_filename": "wavefront.png", "keywords": ["export", "Wavefront", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n{% details summary=\"Config options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n{% /details %}\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "notify-alerta", "meta": {"name": "Alerta", "link": "https://alerta.io/", "categories": ["notify.agent"], "icon_filename": "alerta.png"}, "keywords": ["Alerta"], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"}, {"id": "notify-awssns", "meta": {"name": "AWS SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.agent"], "icon_filename": "aws.svg"}, "keywords": ["AWS SNS"], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"}, {"id": "notify-cloud-awssns", "meta": {"name": "Amazon SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.cloud"], "icon_filename": "awssns.png"}, "keywords": ["awssns"], "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.cloud"], "icon_filename": "discord.png"}, "keywords": ["discord", "community"], "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mattermost", "meta": {"name": "Mattermost", "link": "https://mattermost.com/", "categories": ["notify.cloud"], "icon_filename": "mattermost.png"}, "keywords": ["mattermost"], "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-microsoftteams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams", "categories": ["notify.cloud"], "icon_filename": "teams.svg"}, "keywords": ["microsoft", "teams"], "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **administrator**.\n- The Space to be on **Business** plan or higher.\n- A [Microsoft 365 for Business Account](https://www.microsoft.com/en-us/microsoft-365/business). Note that this is a **paid** account.\n\n### Settings on Microsoft Teams\n\n- The integration gets enabled at a team's channel level.\n- Click on the `...` (aka three dots) icon showing up next to the channel name, it should appear when you hover over it.\n- Click on `Connectors`.\n- Look for the `Incoming Webhook` connector and click configure.\n- Provide a name for your Incoming Webhook Connector, for example _Netdata Alerts_. You can also customize it with a proper icon instead of using the default image.\n- Click `Create`.\n- The _Incoming Webhook URL_ is created.\n- That is the URL to be provided to the Netdata Cloud configuration.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mobile-app", "meta": {"name": "Netdata Mobile App", "link": "https://netdata.cloud", "categories": ["notify.cloud"], "icon_filename": "netdata.png"}, "keywords": ["mobile-app", "phone", "personal-notifications"], "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Business Subscription**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-opsgenie", "meta": {"name": "Opsgenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.cloud"], "icon_filename": "opsgenie.png"}, "keywords": ["opsgenie", "atlassian"], "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.cloud"], "icon_filename": "pagerduty.png"}, "keywords": ["pagerduty"], "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-rocketchat", "meta": {"name": "RocketChat", "link": "https://www.rocket.chat/", "categories": ["notify.cloud"], "icon_filename": "rocketchat.png"}, "keywords": ["rocketchat"], "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.cloud"], "icon_filename": "slack.png"}, "keywords": ["slack"], "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-splunk", "meta": {"name": "Splunk", "link": "https://splunk.com/", "categories": ["notify.cloud"], "icon_filename": "splunk-black.svg"}, "keywords": ["Splunk"], "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.cloud"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- The Telegram bot token and chat ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n\n### Getting the Telegram bot token and chat ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-webhook", "meta": {"name": "Webhook", "link": "https://en.wikipedia.org/wiki/Webhook", "categories": ["notify.cloud"], "icon_filename": "webhook.svg"}, "keywords": ["generic webhooks", "webhooks"], "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Pro** plan or higher\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected.\n\n The notification content sent to the destination service will be a JSON object having these properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | rooms | object[object(string,string)] | Object with list of rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-custom", "meta": {"name": "Custom", "link": "", "categories": ["notify.agent"], "icon_filename": "custom.png"}, "keywords": ["custom"], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"}, {"id": "notify-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.agent"], "icon_filename": "discord.png"}, "keywords": ["Discord"], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"}, {"id": "notify-dynatrace", "meta": {"name": "Dynatrace", "link": "https://dynatrace.com", "categories": ["notify.agent"], "icon_filename": "dynatrace.svg"}, "keywords": ["Dynatrace"], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"}, {"id": "notify-email", "meta": {"name": "Email", "link": "", "categories": ["notify.agent"], "icon_filename": "email.png"}, "keywords": ["email"], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"}, {"id": "notify-flock", "meta": {"name": "Flock", "link": "https://support.flock.com/", "categories": ["notify.agent"], "icon_filename": "flock.png"}, "keywords": ["Flock"], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"}, {"id": "notify-gotify", "meta": {"name": "Gotify", "link": "https://gotify.net/", "categories": ["notify.agent"], "icon_filename": "gotify.png"}, "keywords": ["gotify"], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"}, {"id": "notify-irc", "meta": {"name": "IRC", "link": "", "categories": ["notify.agent"], "icon_filename": "irc.png"}, "keywords": ["IRC"], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"}, {"id": "notify-kavenegar", "meta": {"name": "Kavenegar", "link": "https://kavenegar.com/", "categories": ["notify.agent"], "icon_filename": "kavenegar.png"}, "keywords": ["Kavenegar"], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"}, {"id": "notify-matrix", "meta": {"name": "Matrix", "link": "https://spec.matrix.org/unstable/push-gateway-api/", "categories": ["notify.agent"], "icon_filename": "matrix.svg"}, "keywords": ["Matrix"], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe room ids are unique identifiers and can be obtained from the room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"}, {"id": "notify-messagebird", "meta": {"name": "MessageBird", "link": "https://messagebird.com/", "categories": ["notify.agent"], "icon_filename": "messagebird.svg"}, "keywords": ["MessageBird"], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"}, {"id": "notify-ntfy", "meta": {"name": "ntfy", "link": "https://ntfy.sh/", "categories": ["notify.agent"], "icon_filename": "ntfy.svg"}, "keywords": ["ntfy"], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"}, {"id": "notify-opsgenie", "meta": {"name": "OpsGenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.agent"], "icon_filename": "opsgenie.png"}, "keywords": ["OpsGenie"], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"}, {"id": "notify-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.agent"], "icon_filename": "pagerduty.png"}, "keywords": ["PagerDuty"], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"}, {"id": "notify-prowl", "meta": {"name": "Prowl", "link": "https://www.prowlapp.com/", "categories": ["notify.agent"], "icon_filename": "prowl.png"}, "keywords": ["Prowl"], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"}, {"id": "notify-pushbullet", "meta": {"name": "Pushbullet", "link": "https://www.pushbullet.com/", "categories": ["notify.agent"], "icon_filename": "pushbullet.png"}, "keywords": ["Pushbullet"], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"}, {"id": "notify-pushover", "meta": {"name": "PushOver", "link": "https://pushover.net/", "categories": ["notify.agent"], "icon_filename": "pushover.png"}, "keywords": ["PushOver"], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"}, {"id": "notify-rocketchat", "meta": {"name": "RocketChat", "link": "https://rocket.chat/", "categories": ["notify.agent"], "icon_filename": "rocketchat.png"}, "keywords": ["RocketChat"], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"}, {"id": "notify-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.agent"], "icon_filename": "slack.png"}, "keywords": ["Slack"], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"}, {"id": "notify-sms", "meta": {"name": "SMS", "link": "http://smstools3.kekekasvi.com/", "categories": ["notify.agent"], "icon_filename": "sms.svg"}, "keywords": ["SMS tools 3", "SMS", "Messaging"], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"}, {"id": "notify-syslog", "meta": {"name": "syslog", "link": "", "categories": ["notify.agent"], "icon_filename": "syslog.png"}, "keywords": ["syslog"], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"}, {"id": "notify-teams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams/log-in", "categories": ["notify.agent"], "icon_filename": "msteams.svg"}, "keywords": ["Microsoft", "Teams", "MS teams"], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"}, {"id": "notify-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.agent"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set `DEFAULT_RECIPIENT_TELEGRAM` to the chat ID you want the alert notifications to be sent to. You can define multiple chat IDs like this: -49999333322 -1009999222255. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"}, {"id": "notify-twilio", "meta": {"name": "Twilio", "link": "https://www.twilio.com/", "categories": ["notify.agent"], "icon_filename": "twilio.png"}, "keywords": ["Twilio"], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n{% details summary=\"Config Options\" %}\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n{% /details %}\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"}] diff --git a/integrations/integrations.json b/integrations/integrations.json index 212fe03b4f61cc..6743f16655a63f 100644 --- a/integrations/integrations.json +++ b/integrations/integrations.json @@ -1 +1 @@ -{"categories": [{"id": "deploy", "name": "Deploy", "description": "", "most_popular": true, "priority": 1, "children": [{"id": "deploy.operating-systems", "name": "Operating Systems", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "deploy.docker-kubernetes", "name": "Docker & Kubernetes", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "deploy.provisioning-systems", "parent": "deploy", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection", "name": "Data Collection", "description": "", "most_popular": true, "priority": 2, "children": [{"id": "data-collection.other", "name": "Other", "description": "", "most_popular": false, "priority": -1, "collector_default": true, "children": []}, {"id": "data-collection.ebpf", "name": "eBPF", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd", "name": "FreeBSD", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.containers-and-vms", "name": "Containers and VMs", "description": "", "most_popular": true, "priority": 6, "children": []}, {"id": "data-collection.database-servers", "name": "Databases", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.kubernetes", "name": "Kubernetes", "description": "", "most_popular": true, "priority": 7, "children": []}, {"id": "data-collection.notifications", "name": "Incident Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.service-discovery-registry", "name": "Service Discovery / Registry", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.web-servers-and-web-proxies", "name": "Web Servers and Web Proxies", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.cloud-provider-managed", "name": "Cloud Provider Managed", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.windows-systems", "name": "Windows Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.apm", "name": "APM", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.hardware-devices-and-sensors", "name": "Hardware Devices and Sensors", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.macos-systems", "name": "macOS Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.message-brokers", "name": "Message Brokers", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.provisioning-systems", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.search-engines", "name": "Search Engines", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems", "name": "Linux Systems", "description": "", "most_popular": true, "priority": 5, "children": [{"id": "data-collection.linux-systems.system-metrics", "name": "System", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.linux-systems.memory-metrics", "name": "Memory", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.linux-systems.cpu-metrics", "name": "CPU", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.linux-systems.pressure-metrics", "name": "Pressure", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.network-metrics", "name": "Network", "description": "", "most_popular": true, "priority": 5, "children": []}, {"id": "data-collection.linux-systems.ipc-metrics", "name": "IPC", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.disk-metrics", "name": "Disk", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.linux-systems.firewall-metrics", "name": "Firewall", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.power-supply-metrics", "name": "Power Supply", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics", "name": "Filesystem", "description": "", "most_popular": false, "priority": -1, "children": [{"id": "data-collection.linux-systems.filesystem-metrics.zfs", "name": "ZFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.btrfs", "name": "BTRFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.nfs", "name": "NFS", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.linux-systems.kernel-metrics", "name": "Kernel", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.networking-stack-and-network-interfaces", "name": "Networking Stack and Network Interfaces", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.synthetic-checks", "name": "Synthetic Checks", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ci-cd-systems", "name": "CICD Platforms", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ups", "name": "UPS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd-systems", "name": "FreeBSD Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.logs-servers", "name": "Logs Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.security-systems", "name": "Security Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.observability", "name": "Observability", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.gaming", "name": "Gaming", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.iot-devices", "name": "IoT Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.media-streaming-servers", "name": "Media Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.authentication-and-authorization", "name": "Authentication and Authorization", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.project-management", "name": "Project Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.application-servers", "name": "Application Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.dns-and-dhcp-servers", "name": "DNS and DHCP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.mail-servers", "name": "Mail Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.processes-and-system-services", "name": "Processes and System Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.storage-mount-points-and-filesystems", "name": "Storage, Mount Points and Filesystems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.systemd", "name": "Systemd", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.telephony-servers", "name": "Telephony Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.vpns", "name": "VPNs", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.blockchain-servers", "name": "Blockchain Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.distributed-computing-systems", "name": "Distributed Computing Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.generic-data-collection", "name": "Generic Data Collection", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.p2p", "name": "P2P", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.snmp-and-networked-devices", "name": "SNMP and Networked Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.system-clock-and-ntp", "name": "System Clock and NTP", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.nas", "name": "NAS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.api-gateways", "name": "API Gateways", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.task-queues", "name": "Task Queues", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ftp-servers", "name": "FTP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "logs", "name": "Logs", "description": "Monitoring logs on your infrastructure", "most_popular": true, "priority": 3, "children": []}, {"id": "export", "name": "exporters", "description": "Exporter Integrations", "most_popular": true, "priority": 5, "children": []}, {"id": "notify", "name": "notifications", "description": "Notification Integrations", "most_popular": true, "priority": 4, "children": [{"id": "notify.agent", "name": "Agent Dispatched Notifications", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "notify.cloud", "name": "Centralized Cloud Notifications", "description": "", "most_popular": true, "priority": 1, "children": []}]}], "integrations": [{"meta": {"plugin_name": "apps.plugin", "module_name": "apps", "monitored_instance": {"name": "Applications", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "applications.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["applications", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-apps-Applications", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "groups", "monitored_instance": {"name": "User Groups", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "user.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["groups", "processes", "user auditing", "authorization", "os", "host monitoring"], "most_popular": false}, "overview": "# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-groups-User_Groups", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "users", "monitored_instance": {"name": "Users", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "users.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["users", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-users-Users", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Containers", "link": "", "categories": ["data-collection.containers-and-vms"], "icon_filename": "container.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Kubernetes Containers", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["k8s", "kubernetes", "pods", "containers"], "most_popular": true}, "overview": "# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "LXC Containers", "link": "", "icon_filename": "lxc.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["lxc", "lxd", "container"], "most_popular": true}, "overview": "# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-LXC_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Libvirt Containers", "link": "", "icon_filename": "libvirt.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["libvirt", "container"], "most_popular": true}, "overview": "# Libvirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Libvirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Libvirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Proxmox Containers", "link": "", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["proxmox", "container"], "most_popular": true}, "overview": "# Proxmox Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Proxmox_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Systemd Services", "link": "", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"], "keywords": ["systemd", "services"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Systemd_Services", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Virtual Machines", "link": "", "icon_filename": "container.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vms", "virtualization", "container"], "most_popular": true}, "overview": "# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Virtual Machines for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Virtual_Machines", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "oVirt Containers", "link": "", "icon_filename": "ovirt.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ovirt", "container"], "most_popular": true}, "overview": "# oVirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-oVirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "ap", "monitored_instance": {"name": "Access Points", "link": "https://learn.netdata.cloud/docs/data-collection/networking-stack-and-network-interfaces/linux-access-points", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ap", "access", "point", "wireless", "network"], "most_popular": false}, "overview": "# Access Points\n\nPlugin: charts.d.plugin\nModule: ap\n\n## Overview\n\nThe ap collector visualizes data related to wireless access points.\n\nIt uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect if you are running access points on your linux box.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/ap.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the ap collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |\n| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Change the collection frequency\n\nSpecify a custom collection frequence (update_every) for this collector\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\nap_update_every=10\n\n# the charts priority on the dashboard\n#ap_priority=6900\n\n# the number of retries to do in case of failure\n# before disabling the module\n#ap_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 ap\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit, expected | Mbps |\n\n", "integration_type": "collector", "id": "charts.d.plugin-ap-Access_Points", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/ap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "apcupsd", "monitored_instance": {"name": "APC UPS", "link": "https://www.apc.com", "categories": ["data-collection.ups"], "icon_filename": "apc.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ups", "apc", "power", "supply", "battery", "apcupsd"], "most_popular": false}, "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", "integration_type": "collector", "id": "charts.d.plugin-apcupsd-APC_UPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "libreswan", "monitored_instance": {"name": "Libreswan", "link": "https://libreswan.org/", "categories": ["data-collection.vpns"], "icon_filename": "libreswan.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vpn", "libreswan", "network", "ipsec"], "most_popular": false}, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "charts.d.plugin-libreswan-Libreswan", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "opensips", "monitored_instance": {"name": "OpenSIPS", "link": "https://opensips.org/", "categories": ["data-collection.telephony-servers"], "icon_filename": "opensips.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["opensips", "sip", "voice", "video", "stream"], "most_popular": false}, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", "integration_type": "collector", "id": "charts.d.plugin-opensips-OpenSIPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (sysfs)", "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "sysfs", "hwmon", "rpi", "raspberry pi"], "most_popular": false}, "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", "integration_type": "collector", "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cups.plugin", "module_name": "cups.plugin", "monitored_instance": {"name": "CUPS", "link": "https://www.cups.org/", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "cups.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", "integration_type": "collector", "id": "cups.plugin-cups.plugin-CUPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/extfrag", "monitored_instance": {"name": "System Memory Fragmentation", "link": "https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["extfrag", "extfrag_threshold", "memory fragmentation"], "most_popular": false}, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/zswap", "monitored_instance": {"name": "Linux ZSwap", "link": "https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "zswap", "frontswap", "swap cache"], "most_popular": false}, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "intel_rapl", "monitored_instance": {"name": "Power Capping", "link": "https://www.kernel.org/doc/html/next/power/powercap/powercap.html", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["power capping", "energy"], "most_popular": false}, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", "integration_type": "collector", "id": "debugfs.plugin-intel_rapl-Power_Capping", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "diskspace.plugin", "module_name": "diskspace.plugin", "monitored_instance": {"name": "Disk space", "link": "", "categories": ["data-collection.linux-systems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "ebpf.plugin", "module_name": "disk"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "I/O", "space", "inode"], "most_popular": false}, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "diskspace.plugin-diskspace.plugin-Disk_space", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "cachestat", "monitored_instance": {"name": "eBPF Cachestat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Page cache", "Hit ratio", "eBPF"], "most_popular": false}, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-cachestat-eBPF_Cachestat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "dcstat", "monitored_instance": {"name": "eBPF DCstat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Directory Cache", "File system", "eBPF"], "most_popular": false}, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", "integration_type": "collector", "id": "ebpf.plugin-dcstat-eBPF_DCstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "disk", "monitored_instance": {"name": "eBPF Disk", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hard Disk", "eBPF", "latency", "partition"], "most_popular": false}, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-disk-eBPF_Disk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filedescriptor", "monitored_instance": {"name": "eBPF Filedescriptor", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["file", "eBPF", "fd", "open", "close"], "most_popular": false}, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filedescriptor-eBPF_Filedescriptor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filesystem", "monitored_instance": {"name": "eBPF Filesystem", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Filesystem", "ext4", "btrfs", "nfs", "xfs", "zfs", "eBPF", "latency", "I/O"], "most_popular": false}, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filesystem-eBPF_Filesystem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "hardirq", "monitored_instance": {"name": "eBPF Hardirq", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["HardIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-hardirq-eBPF_Hardirq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mdflush", "monitored_instance": {"name": "eBPF MDflush", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["MD", "RAID", "eBPF"], "most_popular": false}, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mdflush-eBPF_MDflush", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mount", "monitored_instance": {"name": "eBPF Mount", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["mount", "umount", "device", "eBPF"], "most_popular": false}, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mount-eBPF_Mount", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "oomkill", "monitored_instance": {"name": "eBPF OOMkill", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application", "memory"], "most_popular": false}, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", "integration_type": "collector", "id": "ebpf.plugin-oomkill-eBPF_OOMkill", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "process", "monitored_instance": {"name": "eBPF Process", "link": "https://github.com/netdata/netdata/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Memory", "plugin", "eBPF"], "most_popular": false}, "overview": "# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n\n", "integration_type": "collector", "id": "ebpf.plugin-process-eBPF_Process", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "processes", "monitored_instance": {"name": "eBPF Processes", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["thread", "fork", "process", "eBPF"], "most_popular": false}, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-processes-eBPF_Processes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "shm", "monitored_instance": {"name": "eBPF SHM", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "shared memory", "eBPF"], "most_popular": false}, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-shm-eBPF_SHM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "socket", "monitored_instance": {"name": "eBPF Socket", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["TCP", "UDP", "bandwidth", "server", "connection", "socket"], "most_popular": false}, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |\n| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connected_v4 | connections/s |\n| cgroup.net_conn_ipv6 | connected_v6 | connections/s |\n| cgroup.net_bytes_recv | received | calls/s |\n| cgroup.net_bytes_sent | sent | calls/s |\n| cgroup.net_tcp_recv | received | calls/s |\n| cgroup.net_tcp_send | sent | calls/s |\n| cgroup.net_retransmit | retransmitted | calls/s |\n| cgroup.net_udp_send | sent | calls/s |\n| cgroup.net_udp_recv | received | calls/s |\n| services.net_conn_ipv6 | a dimension per systemd service | connections/s |\n| services.net_bytes_recv | a dimension per systemd service | kilobits/s |\n| services.net_bytes_sent | a dimension per systemd service | kilobits/s |\n| services.net_tcp_recv | a dimension per systemd service | calls/s |\n| services.net_tcp_send | a dimension per systemd service | calls/s |\n| services.net_tcp_retransmit | a dimension per systemd service | calls/s |\n| services.net_udp_send | a dimension per systemd service | calls/s |\n| services.net_udp_recv | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-socket-eBPF_Socket", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "softirq", "monitored_instance": {"name": "eBPF SoftIRQ", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SoftIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-softirq-eBPF_SoftIRQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "swap", "monitored_instance": {"name": "eBPF SWAP", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SWAP", "memory", "eBPF", "Hard Disk"], "most_popular": false}, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-swap-eBPF_SWAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "sync", "monitored_instance": {"name": "eBPF Sync", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "eBPF", "hard disk", "memory"], "most_popular": false}, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.meory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-sync-eBPF_Sync", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "vfs", "monitored_instance": {"name": "eBPF VFS", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["virtual", "filesystem", "eBPF", "I/O", "files"], "most_popular": false}, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-vfs-eBPF_VFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.0.freq", "monitored_instance": {"name": "dev.cpu.0.freq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.temperature", "monitored_instance": {"name": "dev.cpu.temperature", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "devstat", "monitored_instance": {"name": "devstat", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", "integration_type": "collector", "id": "freebsd.plugin-devstat-devstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getifaddrs", "monitored_instance": {"name": "getifaddrs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getifaddrs-getifaddrs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getmntinfo", "monitored_instance": {"name": "getmntinfo", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getmntinfo-getmntinfo", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "hw.intrcnt", "monitored_instance": {"name": "hw.intrcnt", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-hw.intrcnt-hw.intrcnt", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "ipfw", "monitored_instance": {"name": "ipfw", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", "integration_type": "collector", "id": "freebsd.plugin-ipfw-ipfw", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.cp_time", "monitored_instance": {"name": "kern.cp_time", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.cp_time-kern.cp_time", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.msq", "monitored_instance": {"name": "kern.ipc.msq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.msq-kern.ipc.msq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.sem", "monitored_instance": {"name": "kern.ipc.sem", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.sem-kern.ipc.sem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.shm", "monitored_instance": {"name": "kern.ipc.shm", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.shm-kern.ipc.shm", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.icmp.stats", "monitored_instance": {"name": "net.inet.icmp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.ip.stats", "monitored_instance": {"name": "net.inet.ip.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.states", "monitored_instance": {"name": "net.inet.tcp.states", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.stats", "monitored_instance": {"name": "net.inet.tcp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.udp.stats", "monitored_instance": {"name": "net.inet.udp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.icmp6.stats", "monitored_instance": {"name": "net.inet6.icmp6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.ip6.stats", "monitored_instance": {"name": "net.inet6.ip6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.isr", "monitored_instance": {"name": "net.isr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.isr-net.isr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "system.ram", "monitored_instance": {"name": "system.ram", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-system.ram-system.ram", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "uptime", "monitored_instance": {"name": "uptime", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "freebsd.plugin-uptime-uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.loadavg", "monitored_instance": {"name": "vm.loadavg", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.loadavg-vm.loadavg", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_intr", "monitored_instance": {"name": "vm.stats.sys.v_intr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_soft", "monitored_instance": {"name": "vm.stats.sys.v_soft", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_swtch", "monitored_instance": {"name": "vm.stats.sys.v_swtch", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_pgfaults", "monitored_instance": {"name": "vm.stats.vm.v_pgfaults", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_swappgs", "monitored_instance": {"name": "vm.stats.vm.v_swappgs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.swap_info", "monitored_instance": {"name": "vm.swap_info", "link": "", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.swap_info-vm.swap_info", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.vmtotal", "monitored_instance": {"name": "vm.vmtotal", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.vmtotal-vm.vmtotal", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "zfs", "monitored_instance": {"name": "zfs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", "integration_type": "collector", "id": "freebsd.plugin-zfs-zfs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freeipmi.plugin", "module_name": "freeipmi", "monitored_instance": {"name": "Intelligent Platform Management Interface (IPMI)", "link": "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "ipmi", "freeipmi", "ipmimonitoring"], "most_popular": true}, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", "integration_type": "collector", "id": "freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-activemq", "module_name": "activemq", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.message-brokers"], "icon_filename": "activemq.png", "name": "ActiveMQ", "link": "https://activemq.apache.org/"}, "alternative_monitored_instances": [], "keywords": ["message broker"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", "integration_type": "collector", "id": "go.d.plugin-activemq-ActiveMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-apache", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "Apache", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Apache", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-energid", "module_name": "apache", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energi Core Wallet", "link": "", "icon_filename": "energi.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["energid"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Energi Core Wallet\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis module monitors Energi Core Wallet instances.\nWorks only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/energid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/energid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9796 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9796\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n - name: remote\n url: http://192.0.2.1:9796\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Energi Core Wallet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| energid.blockindex | blocks, headers | count |\n| energid.difficulty | difficulty | difficulty |\n| energid.mempool | max, usage, tx_size | bytes |\n| energid.secmem | total, used, free, locked | bytes |\n| energid.network | connections | connections |\n| energid.timeoffset | timeoffset | seconds |\n| energid.utxo_transactions | transactions, output_transactions | transactions |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Energi_Core_Wallet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/energid/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpd", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "HTTPD", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-HTTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cassandra", "module_name": "cassandra", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.database-servers"], "icon_filename": "cassandra.svg", "name": "Cassandra", "link": "https://cassandra.apache.org/_/index.html"}, "alternative_monitored_instances": [], "keywords": ["nosql", "dbms", "db", "database"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-cassandra-Cassandra", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-chrony", "module_name": "chrony", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "chrony.jpg", "name": "Chrony", "link": "https://chrony.tuxfamily.org/"}, "alternative_monitored_instances": [], "keywords": [], "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}, "most_popular": false}, "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cockroachdb", "plugin_name": "go.d.plugin", "module_name": "cockroachdb", "monitored_instance": {"name": "CockroachDB", "link": "https://www.cockroachlabs.com/", "icon_filename": "cockroachdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cockroachdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", "integration_type": "collector", "id": "go.d.plugin-cockroachdb-CockroachDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-consul", "plugin_name": "go.d.plugin", "module_name": "consul", "monitored_instance": {"name": "Consul", "link": "https://www.consul.io/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "consul.svg"}, "alternative_monitored_instances": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["service networking platform", "hashicorp"], "most_popular": true}, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-consul-Consul", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/consul/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-coredns", "plugin_name": "go.d.plugin", "module_name": "coredns", "monitored_instance": {"name": "CoreDNS", "link": "https://coredns.io/", "icon_filename": "coredns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["coredns", "dns", "kubernetes"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-coredns-CoreDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchbase", "plugin_name": "go.d.plugin", "module_name": "couchbase", "monitored_instance": {"name": "Couchbase", "link": "https://www.couchbase.com/", "icon_filename": "couchbase.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchbase", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchbase-Couchbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchdb", "plugin_name": "go.d.plugin", "module_name": "couchdb", "monitored_instance": {"name": "CouchDB", "link": "https://couchdb.apache.org/", "icon_filename": "couchdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchdb-CouchDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dns_query", "plugin_name": "go.d.plugin", "module_name": "dns_query", "monitored_instance": {"name": "DNS query", "link": "", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dns_query-DNS_query", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsdist", "plugin_name": "go.d.plugin", "module_name": "dnsdist", "monitored_instance": {"name": "DNSdist", "link": "https://dnsdist.org/", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsdist", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsdist-DNSdist", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq", "plugin_name": "go.d.plugin", "module_name": "dnsmasq", "monitored_instance": {"name": "Dnsmasq", "link": "https://thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq-Dnsmasq", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq_dhcp", "plugin_name": "go.d.plugin", "module_name": "dnsmasq_dhcp", "monitored_instance": {"name": "Dnsmasq DHCP", "link": "https://www.thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dhcp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker", "plugin_name": "go.d.plugin", "module_name": "docker", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["container"], "most_popular": true}, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 1 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker-Docker", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker_engine", "plugin_name": "go.d.plugin", "module_name": "docker_engine", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker Engine", "link": "https://docs.docker.com/engine/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["docker", "container"], "most_popular": false}, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker_engine-Docker_Engine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dockerhub", "plugin_name": "go.d.plugin", "module_name": "dockerhub", "monitored_instance": {"name": "Docker Hub repository", "link": "https://hub.docker.com/", "icon_filename": "docker.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["dockerhub"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dockerhub-Docker_Hub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-elasticsearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elasticsearch", "link": "https://www.elastic.co/elasticsearch/", "icon_filename": "elasticsearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-Elasticsearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-opensearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenSearch", "link": "https://opensearch.org/", "icon_filename": "opensearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-OpenSearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-envoy", "plugin_name": "go.d.plugin", "module_name": "envoy", "monitored_instance": {"name": "Envoy", "link": "https://www.envoyproxy.io/", "icon_filename": "envoy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["envoy", "proxy"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-envoy-Envoy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-filecheck", "plugin_name": "go.d.plugin", "module_name": "filecheck", "monitored_instance": {"name": "Files and directories", "link": "", "icon_filename": "filesystem.svg", "categories": ["data-collection.linux-systems"]}, "keywords": ["files", "directories"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors files and directories.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n##### Directories\n\nDirectories monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Files and directories instance\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence | a dimension per file | boolean |\n| filecheck.file_mtime_ago | a dimension per file | seconds |\n| filecheck.file_size | a dimension per file | bytes |\n| filecheck.dir_existence | a dimension per directory | boolean |\n| filecheck.dir_mtime_ago | a dimension per directory | seconds |\n| filecheck.dir_num_of_files | a dimension per directory | files |\n| filecheck.dir_size | a dimension per directory | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-filecheck-Files_and_directories", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-fluentd", "plugin_name": "go.d.plugin", "module_name": "fluentd", "monitored_instance": {"name": "Fluentd", "link": "https://www.fluentd.org/", "icon_filename": "fluentd.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["fluentd", "logging"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", "integration_type": "collector", "id": "go.d.plugin-fluentd-Fluentd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-freeradius", "plugin_name": "go.d.plugin", "module_name": "freeradius", "monitored_instance": {"name": "FreeRADIUS", "link": "https://freeradius.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "freeradius.svg"}, "keywords": ["freeradius", "radius"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-freeradius-FreeRADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-geth", "plugin_name": "go.d.plugin", "module_name": "geth", "monitored_instance": {"name": "Go-ethereum", "link": "https://github.com/ethereum/go-ethereum", "icon_filename": "geth.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["geth", "ethereum", "blockchain"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-geth-Go-ethereum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/geth/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-haproxy", "plugin_name": "go.d.plugin", "module_name": "haproxy", "monitored_instance": {"name": "HAProxy", "link": "https://www.haproxy.org/", "icon_filename": "haproxy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["haproxy", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-haproxy-HAProxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-hfs", "plugin_name": "go.d.plugin", "module_name": "hfs", "monitored_instance": {"name": "Hadoop Distributed File System (HDFS)", "link": "https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html", "icon_filename": "hadoop.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["hdfs", "hadoop"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpcheck", "plugin_name": "go.d.plugin", "module_name": "httpcheck", "monitored_instance": {"name": "HTTP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax.\n\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-httpcheck-HTTP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-isc_dhcpd", "plugin_name": "go.d.plugin", "module_name": "isc_dhcpd", "monitored_instance": {"name": "ISC DHCP", "link": "https://www.isc.org/dhcp/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "keywords": ["dhcpd", "dhcp"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n| isc_dhcpd.pool_active_leases | a dimension per DHCP pool | leases |\n| isc_dhcpd.pool_utilization | a dimension per DHCP pool | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-isc_dhcpd-ISC_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubelet", "plugin_name": "go.d.plugin", "module_name": "k8s_kubelet", "monitored_instance": {"name": "Kubelet", "link": "https://kubernetes.io/docs/concepts/overview/components/#kubelet", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubelet", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubelet-Kubelet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubeproxy", "plugin_name": "go.d.plugin", "module_name": "k8s_kubeproxy", "monitored_instance": {"name": "Kubeproxy", "link": "https://kubernetes.io/docs/concepts/overview/components/#kube-proxy", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubeproxy", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubeproxy-Kubeproxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_state", "plugin_name": "go.d.plugin", "module_name": "k8s_state", "monitored_instance": {"name": "Kubernetes Cluster State", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubernetes", "k8s"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-lighttpd", "plugin_name": "go.d.plugin", "module_name": "lighttpd", "monitored_instance": {"name": "Lighttpd", "link": "https://www.lighttpd.net/", "icon_filename": "lighttpd.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-lighttpd-Lighttpd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logind", "plugin_name": "go.d.plugin", "module_name": "logind", "monitored_instance": {"name": "systemd-logind users", "link": "https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html", "icon_filename": "users.svg", "categories": ["data-collection.systemd"]}, "keywords": ["logind", "systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", "integration_type": "collector", "id": "go.d.plugin-logind-systemd-logind_users", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logind/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logstash", "plugin_name": "go.d.plugin", "module_name": "logstash", "monitored_instance": {"name": "Logstash", "link": "https://www.elastic.co/products/logstash", "icon_filename": "elastic-logstash.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["logstatsh"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event | event, queue | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-logstash-Logstash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mongodb", "plugin_name": "go.d.plugin", "module_name": "mongodb", "monitored_instance": {"name": "MongoDB", "link": "https://www.mongodb.com/", "icon_filename": "mongodb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["mongodb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n##### With databases metrics\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", "integration_type": "collector", "id": "go.d.plugin-mongodb-MongoDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mariadb", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MariaDB", "link": "https://mariadb.org/", "icon_filename": "mariadb.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MariaDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MySQL", "link": "https://www.mysql.com/", "categories": ["data-collection.database-servers"], "icon_filename": "mysql.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-percona_mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "Percona MySQL", "link": "https://www.percona.com/software/mysql-database/percona-server", "icon_filename": "percona.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": false}, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-Percona_MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginx", "plugin_name": "go.d.plugin", "module_name": "nginx", "monitored_instance": {"name": "NGINX", "link": "https://www.nginx.com/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "nginx.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "go.d.plugin", "module_name": "web_log"}, {"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nginx", "web", "webserver", "http", "proxy"], "most_popular": true}, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginx-NGINX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxplus", "plugin_name": "go.d.plugin", "module_name": "nginxplus", "monitored_instance": {"name": "NGINX Plus", "link": "https://www.nginx.com/products/nginx/", "icon_filename": "nginxplus.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["nginxplus", "nginx", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxplus-NGINX_Plus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxvts", "plugin_name": "go.d.plugin", "module_name": "nginxvts", "monitored_instance": {"name": "NGINX VTS", "link": "https://www.nginx.com/", "icon_filename": "nginx.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxvts-NGINX_VTS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ntpd", "plugin_name": "go.d.plugin", "module_name": "ntpd", "monitored_instance": {"name": "NTPd", "link": "https://www.ntp.org/documentation/4.2.8-series/ntpd", "icon_filename": "ntp.png", "categories": ["data-collection.system-clock-and-ntp"]}, "keywords": ["ntpd", "ntp", "time"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 3 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n##### With peers metrics\n\nCollect peers metrics.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", "integration_type": "collector", "id": "go.d.plugin-ntpd-NTPd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvidia_smi", "plugin_name": "go.d.plugin", "module_name": "nvidia_smi", "monitored_instance": {"name": "Nvidia GPU", "link": "https://www.nvidia.com/en-us/", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["nvidia", "gpu", "hardware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| use_csv_format | Used format when requesting GPU information. XML is used if set to 'no'. | yes | no |\n\n#### Examples\n\n##### XML format\n\nUse XML format when requesting GPU information.\n\n```yaml\njobs:\n - name: nvidia_smi\n use_csv_format: no\n\n```\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | \u2022 | |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | \u2022 | |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_utilization | gpu | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_memory_utilization | memory | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_decoder_utilization | decoder | % | \u2022 | |\n| nvidia_smi.gpu_encoder_utilization | encoder | % | \u2022 | |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | \u2022 |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B | \u2022 | |\n| nvidia_smi.gpu_temperature | temperature | Celsius | \u2022 | \u2022 |\n| nvidia_smi.gpu_voltage | voltage | V | \u2022 | |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | \u2022 | \u2022 |\n| nvidia_smi.gpu_power_draw | power_draw | Watts | \u2022 | \u2022 |\n| nvidia_smi.gpu_performance_state | P0-P15 | state | \u2022 | \u2022 |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | \u2022 | |\n| nvidia_smi.gpu_mig_devices_count | mig | devices | \u2022 | |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvme", "plugin_name": "go.d.plugin", "module_name": "nvme", "monitored_instance": {"name": "NVMe devices", "link": "", "icon_filename": "nvme.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["nvme"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices using the command line tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and assumes it is set up so that the netdata user can execute `nvme` as root without a password.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### Allow netdata to execute nvme\n\nAdd the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary):\n\n```bash\nnetdata ALL=(root) NOPASSWD: /usr/sbin/nvme\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvme binary. The default is \"nvme\" and the executable is looked for in the directories specified in the PATH environment variable. | nvme | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvme\n binary_path: /usr/local/sbin/nvme\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn", "plugin_name": "go.d.plugin", "module_name": "openvpn", "monitored_instance": {"name": "OpenVPN", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n| connect_timeout | Connection timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n| read_timeout | Read timeout in seconds. Sets deadline for read calls. | 2 | no |\n| write_timeout | Write timeout in seconds. Sets deadline for write calls. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn-OpenVPN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn_status_log", "plugin_name": "go.d.plugin", "module_name": "openvpn_status_log", "monitored_instance": {"name": "OpenVPN status log", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn_status_log-OpenVPN_status_log", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", "module_name": "pgbouncer", "monitored_instance": {"name": "PgBouncer", "link": "https://www.pgbouncer.org/", "icon_filename": "postgres.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pgbouncer"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-pgbouncer-PgBouncer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpdaemon", "plugin_name": "go.d.plugin", "module_name": "phpdaemon", "monitored_instance": {"name": "phpDaemon", "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": ["data-collection.apm"]}, "keywords": ["phpdaemon", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpdaemon-phpDaemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpfpm", "plugin_name": "go.d.plugin", "module_name": "phpfpm", "monitored_instance": {"name": "PHP-FPM", "link": "https://php-fpm.org/", "icon_filename": "php.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["phpfpm", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpfpm-PHP-FPM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pihole", "plugin_name": "go.d.plugin", "module_name": "pihole", "monitored_instance": {"name": "Pi-hole", "link": "https://pi-hole.net", "icon_filename": "pihole.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["pihole"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-pihole-Pi-hole", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pika", "plugin_name": "go.d.plugin", "module_name": "pika", "monitored_instance": {"name": "Pika", "link": "https://github.com/OpenAtomFoundation/pika", "icon_filename": "pika.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pika", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-pika-Pika", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pika/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ping", "plugin_name": "go.d.plugin", "module_name": "ping", "monitored_instance": {"name": "Ping", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["ping"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-tripe time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=\"0 2147483647\"` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Unprivileged mode\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", "integration_type": "collector", "id": "go.d.plugin-ping-Ping", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ping/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-portcheck", "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": {"name": "TCP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", "integration_type": "collector", "id": "go.d.plugin-portcheck-TCP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-postgres", "plugin_name": "go.d.plugin", "module_name": "postgres", "monitored_instance": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["data-collection.database-servers"], "icon_filename": "postgres.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "postgres", "postgresql", "sql"], "most_popular": true}, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-postgres-PostgreSQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns", "plugin_name": "go.d.plugin", "module_name": "powerdns", "monitored_instance": {"name": "PowerDNS Authoritative Server", "link": "https://doc.powerdns.com/authoritative/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns-PowerDNS_Authoritative_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns_recursor", "plugin_name": "go.d.plugin", "module_name": "powerdns_recursor", "monitored_instance": {"name": "PowerDNS Recursor", "link": "https://doc.powerdns.com/recursor/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns_recursor-PowerDNS_Recursor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-4d_server", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "4D Server", "link": "https://github.com/ThomasMaul/Prometheus_4D_Exporter", "icon_filename": "4d_server.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-4D_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-8430ft-modem", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "8430FT modem", "link": "https://github.com/dernasherbrezon/8430ft_exporter", "icon_filename": "mtc.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-8430FT_modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-a10-acos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "A10 ACOS network devices", "link": "https://github.com/a10networks/PrometheusExporter", "icon_filename": "a10-networks.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-A10_ACOS_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-amd_smi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AMD CPU & GPU", "link": "https://github.com/amd/amd_smi_exporter", "icon_filename": "amd.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AMD_CPU_&_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apicast", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "APIcast", "link": "https://github.com/3scale/apicast", "icon_filename": "apicast.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-APIcast", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arm_hwcpipe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ARM HWCPipe", "link": "https://github.com/ylz-at/arm-hwcpipe-exporter", "icon_filename": "arm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ARM_HWCPipe", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Compute instances", "link": "https://github.com/O1ahmad/aws_ec2_exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Compute_instances", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2_spot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Spot Instance", "link": "https://github.com/patcadelina/ec2-spot-exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Spot_Instance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS ECS", "link": "https://github.com/bevers222/ecs-exporter", "icon_filename": "amazon-ecs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_ECS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Health events", "link": "https://github.com/vladvasiliu/aws-health-exporter-rs", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Health_events", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Quota", "link": "https://github.com/emylincon/aws_quota_exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_rds", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS RDS", "link": "https://github.com/percona/rds_exporter", "icon_filename": "aws-rds.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_RDS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_s3", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS S3 buckets", "link": "https://github.com/ribbybibby/s3_exporter", "icon_filename": "aws-s3.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_S3_buckets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_sqs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS SQS", "link": "https://github.com/jmal98/sqs-exporter", "icon_filename": "aws-sqs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_SQS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_instance_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS instance health", "link": "https://github.com/bobtfish/aws-instance-health-exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_instance_health", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airthings_waveplus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Airthings Waveplus air sensor", "link": "https://github.com/jeremybz/waveplus_exporter", "icon_filename": "airthings.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Airthings_Waveplus_air_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_edgedns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Edge DNS Traffic", "link": "https://github.com/akamai/akamai-edgedns-traffic-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Edge_DNS_Traffic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_gtm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Global Traffic Management", "link": "https://github.com/akamai/akamai-gtm-metrics-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Global_Traffic_Management", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_cloudmonitor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akami Cloudmonitor", "link": "https://github.com/ExpressenAB/cloudmonitor_exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akami_Cloudmonitor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alamos_fe2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alamos FE2 server", "link": "https://github.com/codemonauts/prometheus-fe2-exporter", "icon_filename": "alamos_fe2.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alamos_FE2_server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alibaba-cloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alibaba Cloud", "link": "https://github.com/aylei/aliyun-exporter", "icon_filename": "alibaba-cloud.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alibaba_Cloud", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-altaro_backup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Altaro Backup", "link": "https://github.com/raph2i/altaro_backup_exporter", "icon_filename": "altaro.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Altaro_Backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aaisp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Andrews & Arnold line status", "link": "https://github.com/daveio/aaisp-exporter", "icon_filename": "andrewsarnold.jpg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Andrews_&_Arnold_line_status", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Airflow", "link": "https://github.com/shalb/airflow-exporter", "icon_filename": "airflow.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Airflow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-flink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Flink", "link": "https://github.com/matsumana/flink_exporter", "icon_filename": "apache_flink.png", "categories": ["data-collection.apm"]}, "keywords": ["web server", "http", "https"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Flink", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apple_timemachine", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apple Time Machine", "link": "https://github.com/znerol/prometheus-timemachine-exporter", "icon_filename": "apple.svg", "categories": ["data-collection.macos-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apple_Time_Machine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aruba", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Aruba devices", "link": "https://github.com/slashdoom/aruba_exporter", "icon_filename": "aruba.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "aruba devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Aruba_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arvancloud_cdn", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ArvanCloud CDN", "link": "https://github.com/arvancloud/ar-prometheus-exporter", "icon_filename": "arvancloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ArvanCloud_CDN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-audisto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Audisto", "link": "https://github.com/ZeitOnline/audisto_exporter", "icon_filename": "audisto.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Audisto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-authlog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AuthLog", "link": "https://github.com/woblerr/authlog_exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AuthLog", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_ad_app_passwords", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure AD App passwords", "link": "https://github.com/vladvasiliu/azure-app-secrets-monitor", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_AD_App_passwords", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_elastic_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Elastic Pool SQL", "link": "https://github.com/benclapp/azure_elastic_sql_exporter", "icon_filename": "azure-elastic-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Elastic_Pool_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_res", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Resources", "link": "https://github.com/FXinnovation/azure-resources-exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Resources", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure SQL", "link": "https://github.com/iamseth/azure_sql_exporter", "icon_filename": "azure-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_service_bus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Service Bus", "link": "https://github.com/marcinbudny/servicebus_exporter", "icon_filename": "azure-service-bus.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Service_Bus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_app", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure application", "link": "https://github.com/RobustPerception/azure_metrics_exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_application", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bosh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BOSH", "link": "https://github.com/bosh-prometheus/bosh_exporter", "icon_filename": "bosh.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BOSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bigquery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BigQuery", "link": "https://github.com/m-lab/prometheus-bigquery-exporter", "icon_filename": "bigquery.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BigQuery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bird", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bird Routing Daemon", "link": "https://github.com/czerwonk/bird_exporter", "icon_filename": "bird.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bird_Routing_Daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Blackbox", "link": "https://github.com/prometheus/blackbox_exporter", "icon_filename": "prometheus.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["blackbox"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bobcat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bobcat Miner 300", "link": "https://github.com/pperzyna/bobcat_exporter", "icon_filename": "bobcat.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bobcat_Miner_300", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-borg", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Borg backup", "link": "https://github.com/k0ral/borg-exporter", "icon_filename": "borg.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Borg_backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bungeecord", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BungeeCord", "link": "https://github.com/weihao/bungeecord-prometheus-exporter", "icon_filename": "bungee.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BungeeCord", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-csgo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CS:GO", "link": "https://github.com/kinduff/csgo_exporter", "icon_filename": "csgo.svg", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CS:GO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Counter-Strike: Global Offensive server metrics for improved game performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CS:GO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cvmfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CVMFS clients", "link": "https://github.com/guilbaults/cvmfs-exporter", "icon_filename": "cvmfs.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CVMFS_clients", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-celery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Celery", "link": "https://github.com/ZeitOnline/celery_redis_prometheus", "icon_filename": "celery.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Celery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-certificate_transparency", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Certificate Transparency", "link": "https://github.com/Hsn723/ct-exporter", "icon_filename": "ct.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Certificate_Transparency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-checkpoint", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Checkpoint device", "link": "https://github.com/RespiroConsulting/CheckPointExporter", "icon_filename": "checkpoint.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Checkpoint_device", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-chia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Chia", "link": "https://github.com/chia-network/chia-exporter", "icon_filename": "chia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Chia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clm5ip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Christ Elektronik CLM5IP power panel", "link": "https://github.com/christmann/clm5ip_exporter/", "icon_filename": "christelec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_agent", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Agent", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Agent", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_operator", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Operator", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Operator", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_proxy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Proxy", "link": "https://github.com/cilium/proxy", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Proxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cisco_aci", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cisco ACI", "link": "https://github.com/RavuAlHemio/prometheus_aci_exporter", "icon_filename": "cisco.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "cisco devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cisco_ACI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-citrix_netscaler", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Citrix NetScaler", "link": "https://github.com/rokett/Citrix-NetScaler-Exporter", "icon_filename": "citrix.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Citrix_NetScaler", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClamAV daemon", "link": "https://github.com/sergeymakinen/clamav_exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClamAV_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamscan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clamscan results", "link": "https://github.com/FortnoxAB/clamscan-exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clamscan_results", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clash", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clash", "link": "https://github.com/elonzh/clash_exporter", "icon_filename": "clash.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clickhouse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClickHouse", "link": "https://github.com/ClickHouse/ClickHouse", "icon_filename": "clickhouse.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClickHouse database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClickHouse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_cloudwatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CloudWatch", "link": "https://github.com/prometheus/cloudwatch_exporter", "icon_filename": "aws-cloudwatch.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CloudWatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry", "link": "https://github.com/bosh-prometheus/cf_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry_firebase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry Firehose", "link": "https://github.com/bosh-prometheus/firehose_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry_Firehose", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloudflare_pcap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloudflare PCAP", "link": "https://github.com/wehkamp/docker-prometheus-cloudflare-exporter", "icon_filename": "cloudflare.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloudflare_PCAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClusterControl CMON", "link": "https://github.com/severalnines/cmon_exporter", "icon_filename": "cluster-control.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClusterControl_CMON", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-collectd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Collectd", "link": "https://github.com/prometheus/collectd_exporter", "icon_filename": "collectd.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Collectd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-concourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Concourse", "link": "https://concourse-ci.org", "icon_filename": "concourse.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Concourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ftbeerpi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CraftBeerPi", "link": "https://github.com/jo-hannes/craftbeerpi_exporter", "icon_filename": "craftbeer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CraftBeerPi", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crowdsec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crowdsec", "link": "https://docs.crowdsec.net/docs/observability/prometheus", "icon_filename": "crowdsec.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crowdsec", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crypto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crypto exchanges", "link": "https://github.com/ix-ai/crypto-exporter", "icon_filename": "crypto.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crypto_exchanges", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cryptowatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cryptowatch", "link": "https://github.com/nbarrientos/cryptowat_exporter", "icon_filename": "cryptowatch.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cryptowatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-custom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Custom Exporter", "link": "https://github.com/orange-cloudfoundry/custom_exporter", "icon_filename": "customdata.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Custom_Exporter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ddwrt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DDWRT Routers", "link": "https://github.com/camelusferus/ddwrt_collector", "icon_filename": "ddwrt.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DDWRT_Routers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dmarc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DMARC", "link": "https://github.com/jgosmann/dmarc-metrics-exporter", "icon_filename": "dmarc.png", "categories": ["data-collection.mail-servers"]}, "keywords": ["email authentication", "policy", "reporting"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DMARC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dnsbl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DNSBL", "link": "https://github.com/Luzilla/dnsbl_exporter/", "icon_filename": "dnsbl.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DNSBL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC ECS cluster", "link": "https://github.com/paychex/prometheus-emcecs-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_ECS_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_isilon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC Isilon cluster", "link": "https://github.com/paychex/prometheus-isilon-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_Isilon_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_xtremio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC XtremIO cluster", "link": "https://github.com/cthiel42/prometheus-xtremio-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_XtremIO_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_powermax", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell PowerMax", "link": "https://github.com/kckecheng/powermax_exporter", "icon_filename": "powermax.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_PowerMax", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dependency_track", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dependency-Track", "link": "https://github.com/jetstack/dependency-track-exporter", "icon_filename": "dependency-track.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dependency-Track", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-digitalocean", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DigitalOcean", "link": "https://github.com/metalmatze/digitalocean_exporter", "icon_filename": "digitalocean.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DigitalOcean", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-discourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Discourse", "link": "https://github.com/discourse/discourse-prometheus", "icon_filename": "discourse.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Discourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dutch_electricity_smart_meter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dutch Electricity Smart Meter", "link": "https://github.com/TobiasDeBruijn/prometheus-p1-exporter", "icon_filename": "dutch-electricity.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dynatrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dynatrace", "link": "https://github.com/Apside-TOP/dynatrace_exporter", "icon_filename": "dynatrace.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dynatrace", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eos_web", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "EOS", "link": "https://eos-web.web.cern.ch/eos-web/", "icon_filename": "eos.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-EOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eaton_ups", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Eaton UPS", "link": "https://github.com/psyinfra/prometheus-eaton-ups-exporter", "icon_filename": "eaton.svg", "categories": ["data-collection.ups"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Eaton_UPS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-elgato_keylight", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elgato Key Light devices.", "link": "https://github.com/mdlayher/keylight_exporter", "icon_filename": "elgato.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Elgato_Key_Light_devices.", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-energomera", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energomera smart power meters", "link": "https://github.com/peak-load/energomera_exporter", "icon_filename": "energomera.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Energomera_smart_power_meters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-excel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Excel spreadsheet", "link": "https://github.com/MarcusCalidus/excel-exporter", "icon_filename": "excel.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Excel_spreadsheet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-frrouting", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FRRouting", "link": "https://github.com/tynany/frr_exporter", "icon_filename": "frrouting.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FRRouting", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fastd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fastd", "link": "https://github.com/freifunk-darmstadt/fastd-exporter", "icon_filename": "fastd.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fastd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fortigate", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fortigate firewall", "link": "https://github.com/bluecmd/fortigate_exporter", "icon_filename": "fortinet.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fortigate_firewall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_nfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD NFS", "link": "https://github.com/Axcient/freebsd-nfs-exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_NFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_rctl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD RCTL-RACCT", "link": "https://github.com/yo000/rctl_exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_RCTL-RACCT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freifunk", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Freifunk network", "link": "https://github.com/xperimental/freifunk-exporter", "icon_filename": "freifunk.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Freifunk_network", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fritzbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fritzbox network devices", "link": "https://github.com/pdreker/fritz_exporter", "icon_filename": "avm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fritzbox_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_gce", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP GCE", "link": "https://github.com/O1ahmad/gcp-gce-exporter", "icon_filename": "gcp-gce.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_GCE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP Quota", "link": "https://github.com/mintel/gcp-quota-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gtp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GTP", "link": "https://github.com/wmnsk/gtp_exporter", "icon_filename": "gtpu.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GTP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic_cli", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic Command Line Output", "link": "https://github.com/MarioMartReq/generic-exporter", "icon_filename": "cli.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_Command_Line_Output", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-enclosure", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic storage enclosure tool", "link": "https://github.com/Gandi/jbod-rs", "icon_filename": "storage-enclosure.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_storage_enclosure_tool", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_ratelimit", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub API rate limit", "link": "https://github.com/lunarway/github-ratelimit-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_API_rate_limit", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_repo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub repository", "link": "https://github.com/githubexporter/github-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gitlab_runner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitLab Runner", "link": "https://gitlab.com/gitlab-org/gitlab-runner", "icon_filename": "gitlab.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitLab_Runner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gobetween", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Gobetween", "link": "https://github.com/yyyar/gobetween", "icon_filename": "gobetween.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Gobetween", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Cloud Platform", "link": "https://github.com/DazWilkin/gcp-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Cloud_Platform", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-google_pagespeed", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Pagespeed", "link": "https://github.com/foomo/pagespeed_exporter", "icon_filename": "google.svg", "categories": ["data-collection.apm"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Pagespeed", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_stackdriver", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Stackdriver", "link": "https://github.com/prometheus-community/stackdriver_exporter", "icon_filename": "gcp-stackdriver.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Stackdriver", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-grafana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Grafana", "link": "https://grafana.com/", "icon_filename": "grafana.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Grafana", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-graylog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Graylog Server", "link": "https://github.com/Graylog2/graylog2-server/", "icon_filename": "graylog.svg", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Graylog_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HANA", "link": "https://github.com/jenningsloy318/hana_exporter", "icon_filename": "sap.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HANA", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hdsentinel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HDSentinel", "link": "https://github.com/qusielle/hdsentinel-exporter", "icon_filename": "harddisk.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HDSentinel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hhvm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HHVM", "link": "https://github.com/wikimedia/operations-software-hhvm_exporter", "icon_filename": "hhvm.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HHVM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hp_ilo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HP iLO", "link": "https://github.com/infinityworks/hpilo-exporter", "icon_filename": "hp.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HP_iLO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-halon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Halon", "link": "https://github.com/tobiasbp/halon_exporter", "icon_filename": "halon.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Halon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hashicorp_vault", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HashiCorp Vault secrets", "link": "https://github.com/tomtom-international/vault-assessment-prometheus-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HashiCorp_Vault_secrets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hasura_graphql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hasura GraphQL Server", "link": "https://github.com/zolamk/hasura-exporter", "icon_filename": "hasura.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hasura_GraphQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_hotspot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium hotspot", "link": "https://github.com/tedder/helium_hotspot_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_hotspot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_miner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium miner (validator)", "link": "https://github.com/tedder/miner_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_miner_(validator)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_cgm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CGN series CPE", "link": "https://github.com/yrro/hitron-exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CGN_series_CPE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_coda", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CODA Cable Modem", "link": "https://github.com/hairyhenderson/hitron_coda_exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CODA_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homebridge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homebridge", "link": "https://github.com/lstrojny/homebridge-prometheus-exporter", "icon_filename": "homebridge.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homebridge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homey", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homey", "link": "https://github.com/rickardp/homey-prometheus-exporter", "icon_filename": "homey.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homey", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-honeypot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Honeypot", "link": "https://github.com/Intrinsec/honeypot_exporter", "icon_filename": "intrinsec.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Honeypot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hilink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Huawei devices", "link": "https://github.com/eliecharra/hilink-exporter", "icon_filename": "huawei.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Huawei_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hubble", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hubble", "link": "https://github.com/cilium/hubble", "icon_filename": "hubble.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hubble", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_aix_njmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM AIX systems Njmon", "link": "https://github.com/crooks/njmon_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_AIX_systems_Njmon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_cex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM CryptoExpress (CEX) cards", "link": "https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_mq", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM MQ", "link": "https://github.com/agebhar1/mq_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_MQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum", "link": "https://github.com/topine/ibm-spectrum-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum_virtualize", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum Virtualize", "link": "https://github.com/bluecmd/spectrum_virtualize_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum_Virtualize", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_zhmc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Z Hardware Management Console", "link": "https://github.com/zhmcclient/zhmc-prometheus-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IOTA full node", "link": "https://github.com/crholliday/iota-prom-exporter", "icon_filename": "iota.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IOTA_full_node", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ipmi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IPMI (By SoundCloud)", "link": "https://github.com/prometheus-community/ipmi_exporter", "icon_filename": "soundcloud.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IPMI_(By_SoundCloud)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-influxdb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "InfluxDB", "link": "https://github.com/prometheus/influxdb_exporter", "icon_filename": "influxdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-InfluxDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jmx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JMX", "link": "https://github.com/prometheus/jmx_exporter", "icon_filename": "java.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JMX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jarvis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jarvis Standing Desk", "link": "https://github.com/hairyhenderson/jarvis_exporter/", "icon_filename": "jarvis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jarvis_Standing_Desk", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jenkins", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jenkins", "link": "https://www.jenkins.io/", "icon_filename": "jenkins.svg", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jenkins", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jetbrains_fls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JetBrains Floating License Server", "link": "https://github.com/mkreu/jetbrains-fls-exporter", "icon_filename": "jetbrains.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JetBrains_Floating_License_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka", "link": "https://github.com/danielqsj/kafka_exporter/", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_connect", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Connect", "link": "https://github.com/findelabs/kafka-connect-exporter-rs", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Connect", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_consumer_lag", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Consumer Lag", "link": "https://github.com/omarsmak/kafka-consumer-lag-monitoring", "icon_filename": "kafka.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Consumer_Lag", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_zookeeper", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka ZooKeeper", "link": "https://github.com/cloudflare/kafka_zookeeper_exporter", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kannel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kannel", "link": "https://github.com/apostvav/kannel_exporter", "icon_filename": "kannel.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kannel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-keepalived", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Keepalived", "link": "https://github.com/gen2brain/keepalived_exporter", "icon_filename": "keepalived.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Keepalived", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-korral", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kubernetes Cluster Cloud Cost", "link": "https://github.com/agilestacks/korral", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kubernetes_Cluster_Cloud_Cost", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "LDAP", "link": "https://github.com/titisan/ldap_exporter", "icon_filename": "ldap.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-LDAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lagerist", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lagerist Disk latency", "link": "https://github.com/Svedrin/lagerist", "icon_filename": "linux.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lagerist_Disk_latency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-linode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Linode", "link": "https://github.com/DazWilkin/linode-exporter", "icon_filename": "linode.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Linode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lustre", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lustre metadata", "link": "https://github.com/GSI-HPC/prometheus-cluster-exporter", "icon_filename": "lustre.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lustre_metadata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lynis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lynis audit reports", "link": "https://github.com/MauveSoftware/lynis_exporter", "icon_filename": "lynis.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lynis_audit_reports", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mp707", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MP707 USB thermometer", "link": "https://github.com/nradchenko/mp707_exporter", "icon_filename": "thermometer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MP707_USB_thermometer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mqtt_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MQTT Blackbox", "link": "https://github.com/inovex/mqtt_blackbox_exporter", "icon_filename": "mqtt.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MQTT_Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-machbase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Machbase", "link": "https://github.com/MACHBASE/prometheus-machbase-exporter", "icon_filename": "machbase.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Machbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-maildir", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Maildir", "link": "https://github.com/cherti/mailexporter", "icon_filename": "mailserver.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Maildir", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meilisearch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meilisearch", "link": "https://github.com/scottaglia/meilisearch_exporter", "icon_filename": "meilisearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meilisearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-memcached", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Memcached (community)", "link": "https://github.com/prometheus/memcached_exporter", "icon_filename": "memcached.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Memcached_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meraki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meraki dashboard", "link": "https://github.com/TheHolm/meraki-dashboard-promethus-exporter", "icon_filename": "meraki.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meraki_dashboard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mesos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mesos", "link": "http://github.com/mesosphere/mesos_exporter", "icon_filename": "mesos.svg", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mesos", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mikrotik", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MikroTik devices", "link": "https://github.com/swoga/mikrotik-exporter", "icon_filename": "mikrotik.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MikroTik_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-routeros", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mikrotik RouterOS devices", "link": "https://github.com/welbymcroberts/routeros_exporter", "icon_filename": "routeros.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mikrotik_RouterOS_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-minecraft", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Minecraft", "link": "https://github.com/sladkoff/minecraft-prometheus-exporter", "icon_filename": "minecraft.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Minecraft", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-modbus_rtu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Modbus protocol", "link": "https://github.com/dernasherbrezon/modbusrtu_exporter", "icon_filename": "modbus.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Modbus_protocol", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mogilefs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MogileFS", "link": "https://github.com/KKBOX/mogilefs-exporter", "icon_filename": "filesystem.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MogileFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-monnit_mqtt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Monnit Sensors MQTT", "link": "https://github.com/braxton9460/monnit-mqtt-exporter", "icon_filename": "monnit.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Monnit_Sensors_MQTT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nrpe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NRPE daemon", "link": "https://github.com/canonical/nrpe_exporter", "icon_filename": "nrpelinux.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NRPE_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nsxt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NSX-T", "link": "https://github.com/jk8s/nsxt_exporter", "icon_filename": "vmware-nsx.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NSX-T", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nvml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NVML", "link": "https://github.com/oko/nvml-exporter-rs", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NVML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-naemon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Naemon", "link": "https://github.com/Griesbacher/Iapetos", "icon_filename": "naemon.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Naemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nagios", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nagios", "link": "https://github.com/wbollock/nagios_exporter", "icon_filename": "nagios.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nagios", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nature_remo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nature Remo E lite devices", "link": "https://github.com/kenfdev/remo-exporter", "icon_filename": "nature-remo.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nature_Remo_E_lite_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_solidfire", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetApp Solidfire", "link": "https://github.com/mjavier2k/solidfire-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetApp_Solidfire", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetFlow", "link": "https://github.com/paihu/netflow_exporter", "icon_filename": "netflow.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetFlow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netmeter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetMeter", "link": "https://github.com/ssbostan/netmeter-exporter", "icon_filename": "netmeter.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetMeter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_ontap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netapp ONTAP API", "link": "https://github.com/sapcc/netapp-api-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netapp_ONTAP_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netatmo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netatmo sensors", "link": "https://github.com/xperimental/netatmo-exporter", "icon_filename": "netatmo.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netatmo_sensors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-newrelic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "New Relic", "link": "https://github.com/jfindley/newrelic_exporter", "icon_filename": "newrelic.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-New_Relic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextdns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NextDNS", "link": "https://github.com/raylas/nextdns-exporter", "icon_filename": "nextdns.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NextDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextcloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nextcloud servers", "link": "https://github.com/xperimental/nextcloud-exporter", "icon_filename": "nextcloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nextcloud_servers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-obs_studio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OBS Studio", "link": "https://github.com/lukegb/obs_studio_exporter", "icon_filename": "obs-studio.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OBS_Studio", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-odbc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ODBC", "link": "https://github.com/MACHBASE/prometheus-odbc-exporter", "icon_filename": "odbc.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ODBC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-otrs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OTRS", "link": "https://github.com/JulianDroste/otrs_exporter", "icon_filename": "otrs.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OTRS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openhab", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenHAB", "link": "https://github.com/pdreker/openhab_exporter", "icon_filename": "openhab.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenHAB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenLDAP (community)", "link": "https://github.com/tomcz/openldap_exporter", "icon_filename": "openldap.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenLDAP_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRC", "link": "https://git.sr.ht/~tomleb/openrc-exporter", "icon_filename": "linux.png", "categories": ["data-collection.linux-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrct2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRCT2", "link": "https://github.com/terinjokes/openrct2-prometheus-exporter", "icon_filename": "openRCT2.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRCT2", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openroadm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenROADM devices", "link": "https://github.com/utdal/openroadm_exporter", "icon_filename": "openroadm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenROADM_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openstack", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenStack", "link": "https://github.com/CanonicalLtd/prometheus-openstack-exporter", "icon_filename": "openstack.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenStack", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenVAS", "link": "https://github.com/ModeClearCode/openvas_exporter", "icon_filename": "openVAS.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenVAS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openweathermap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenWeatherMap", "link": "https://github.com/Tenzer/openweathermap-exporter", "icon_filename": "openweather.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenWeatherMap", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvswitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Open vSwitch", "link": "https://github.com/digitalocean/openvswitch_exporter", "icon_filename": "ovs.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Open_vSwitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-oracledb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Oracle DB (community)", "link": "https://github.com/iamseth/oracledb_exporter", "icon_filename": "oracle.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["oracle", "database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Oracle_DB_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-patroni", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Patroni", "link": "https://github.com/gopaytech/patroni_exporter", "icon_filename": "patroni.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Patroni", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pws", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Personal Weather Station", "link": "https://github.com/JohnOrthoefer/pws-exporter", "icon_filename": "wunderground.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Personal_Weather_Station", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgpool2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pgpool-II", "link": "https://github.com/pgpool/pgpool2_exporter", "icon_filename": "pgpool2.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pgpool-II", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-philips_hue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Philips Hue", "link": "https://github.com/aexel90/hue_exporter", "icon_filename": "hue.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Philips_Hue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pimoroni_enviro_plus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pimoroni Enviro+", "link": "https://github.com/terradolor/prometheus-enviro-exporter", "icon_filename": "pimorino.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pimoroni_Enviro+", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pingdom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pingdom", "link": "https://github.com/veepee-oss/pingdom_exporter", "icon_filename": "solarwinds.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pingdom", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-podman", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Podman", "link": "https://github.com/containers/prometheus-podman-exporter", "icon_filename": "podman.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Podman", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-powerpal", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Powerpal devices", "link": "https://github.com/aashley/powerpal_exporter", "icon_filename": "powerpal.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Powerpal_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proftpd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ProFTPD", "link": "https://github.com/transnano/proftpd_exporter", "icon_filename": "proftpd.png", "categories": ["data-collection.ftp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ProFTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Prometheus endpoint", "link": "https://prometheus.io/", "icon_filename": "prometheus.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["prometheus", "openmetrics"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Prometheus_endpoint", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proxmox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Proxmox VE", "link": "https://github.com/prometheus-pve/prometheus-pve-exporter", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Proxmox_VE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radius", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RADIUS", "link": "https://github.com/devon-mar/radius-exporter", "icon_filename": "radius.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ripe_atlas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RIPE Atlas", "link": "https://github.com/czerwonk/atlas_exporter", "icon_filename": "ripe.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RIPE_Atlas", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radio_thermostat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Radio Thermostat", "link": "https://github.com/andrewlow/radio-thermostat-exporter", "icon_filename": "radiots.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Radio_Thermostat", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-rancher", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Rancher", "link": "https://github.com/infinityworksltd/prometheus-rancher-exporter", "icon_filename": "rancher.svg", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Rancher", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-raritan_pdu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Raritan PDU", "link": "https://github.com/psyinfra/prometheus-raritan-pdu-exporter", "icon_filename": "raritan.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Raritan_PDU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-redis_queue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Redis Queue", "link": "https://github.com/mdawar/rq-exporter", "icon_filename": "rq.png", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Redis_Queue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sabnzbd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SABnzbd", "link": "https://github.com/msroest/sabnzbd_exporter", "icon_filename": "sabnzbd.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SABnzbd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sma_inverter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SMA Inverters", "link": "https://github.com/dr0ps/sma_inverter_exporter", "icon_filename": "sma.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SMA_Inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sonic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SONiC NOS", "link": "https://github.com/kamelnetworks/sonic_exporter", "icon_filename": "sonic.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SONiC_NOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SQL Database agnostic", "link": "https://github.com/free/sql_exporter", "icon_filename": "sql.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SQL_Database_agnostic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSH", "link": "https://github.com/Nordstrom/ssh_exporter", "icon_filename": "ssh.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSL Certificate", "link": "https://github.com/ribbybibby/ssl_exporter", "icon_filename": "ssl.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSL_Certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-salicru_eqx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Salicru EQX inverter", "link": "https://github.com/alejandroscf/prometheus_salicru_exporter", "icon_filename": "salicru.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Salicru_EQX_inverter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sense_energy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sense Energy", "link": "https://github.com/ejsuncy/sense_energy_prometheus_exporter", "icon_filename": "sense.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sense_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sentry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sentry", "link": "https://github.com/snakecharmer/sentry_exporter", "icon_filename": "sentry.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sentry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-servertech", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ServerTech", "link": "https://github.com/tynany/servertech_exporter", "icon_filename": "servertech.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ServerTech", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shell_cmd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shell command", "link": "https://github.com/tomwilkie/prom-run", "icon_filename": "crunner.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shell_command", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shelly", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shelly humidity sensor", "link": "https://github.com/aexel90/shelly_exporter", "icon_filename": "shelly.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shelly_humidity_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sia", "link": "https://github.com/tbenz9/sia_exporter", "icon_filename": "sia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-s7_plc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Siemens S7 PLC", "link": "https://github.com/MarcusCalidus/s7-plc-exporter", "icon_filename": "siemens.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Siemens_S7_PLC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-site24x7", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Site 24x7", "link": "https://github.com/svenstaro/site24x7_exporter", "icon_filename": "site24x7.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Site_24x7", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-slurm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Slurm", "link": "https://github.com/vpenso/prometheus-slurm-exporter", "icon_filename": "slurm.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Slurm", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-smartrg808ac", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SmartRG 808AC Cable Modem", "link": "https://github.com/AdamIsrael/smartrg808ac_exporter", "icon_filename": "smartr.jpeg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SmartRG_808AC_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Smart meters SML", "link": "https://github.com/mweinelt/sml-exporter", "icon_filename": "sml.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Smart_meters_SML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-softether", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SoftEther VPN Server", "link": "https://github.com/dalance/softether_exporter", "icon_filename": "softether.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SoftEther_VPN_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solaredge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SolarEdge inverters", "link": "https://github.com/dave92082/SolarEdge-Exporter", "icon_filename": "solaredge.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SolarEdge_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lsx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solar logging stick", "link": "https://gitlab.com/bhavin192/lsx-exporter", "icon_filename": "solar.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solar_logging_stick", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solis Ginlong 5G inverters", "link": "https://github.com/candlerb/solis_exporter", "icon_filename": "solis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solis_Ginlong_5G_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-spacelift", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Spacelift", "link": "https://github.com/spacelift-io/prometheus-exporter", "icon_filename": "spacelift.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Spacelift", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-speedify", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Speedify CLI", "link": "https://github.com/willshen/speedify_exporter", "icon_filename": "speedify.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Speedify_CLI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sphinx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sphinx", "link": "https://github.com/foxdalas/sphinx_exporter", "icon_filename": "sphinx.png", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sphinx", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starlink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starlink (SpaceX)", "link": "https://github.com/danopstech/starlink_exporter", "icon_filename": "starlink.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starlink_(SpaceX)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starwind_vsan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starwind VSAN VSphere Edition", "link": "https://github.com/evoicefire/starwind-vsan-exporter", "icon_filename": "starwind.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starwind_VSAN_VSphere_Edition", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-statuspage", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "StatusPage", "link": "https://github.com/vladvasiliu/statuspage-exporter", "icon_filename": "statuspage.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-StatusPage", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-steam_a2s", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Steam", "link": "https://github.com/armsnyder/a2s-exporter", "icon_filename": "a2s.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Steam", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-storidge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Storidge", "link": "https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md", "icon_filename": "storidge.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Storidge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-stream_generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Stream", "link": "https://github.com/carlpett/stream_exporter", "icon_filename": "stream.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Stream", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sunspec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sunspec Solar Energy", "link": "https://github.com/inosion/prometheus-sunspec-exporter", "icon_filename": "sunspec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sunspec_Solar_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-suricata", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Suricata", "link": "https://github.com/corelight/suricata_exporter", "icon_filename": "suricata.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Suricata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-synology_activebackup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Synology ActiveBackup", "link": "https://github.com/codemonauts/activebackup-prometheus-exporter", "icon_filename": "synology.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Synology_ActiveBackup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sysload", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sysload", "link": "https://github.com/egmc/sysload_exporter", "icon_filename": "sysload.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sysload", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-trex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "T-Rex NVIDIA GPU Miner", "link": "https://github.com/dennisstritzke/trex_exporter", "icon_filename": "trex.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-T-Rex_NVIDIA_GPU_Miner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tacas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TACACS", "link": "https://github.com/devon-mar/tacacs-exporter", "icon_filename": "tacacs.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TACACS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tplink_p110", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TP-Link P110", "link": "https://github.com/ijohanne/prometheus-tplink-p110-exporter", "icon_filename": "tplink.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TP-Link_P110", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tado", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tado smart heating solution", "link": "https://github.com/eko/tado-exporter", "icon_filename": "tado.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tado_smart_heating_solution", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tankerkoenig", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tankerkoenig API", "link": "https://github.com/lukasmalkmus/tankerkoenig_exporter", "icon_filename": "tanker.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tankerkoenig_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_powerwall", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Powerwall", "link": "https://github.com/foogod/powerwall_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Powerwall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_wall_connector", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Wall Connector", "link": "https://github.com/benclapp/tesla_wall_connector_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Wall_Connector", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_vehicle", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla vehicle", "link": "https://github.com/wywywywy/tesla-prometheus-exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_vehicle", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-traceroute", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Traceroute", "link": "https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter", "icon_filename": "traceroute.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Traceroute", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twincat_ads_webservice", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TwinCAT ADS Web Service", "link": "https://github.com/MarcusCalidus/twincat-ads-webservice-exporter", "icon_filename": "twincat.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TwinCAT_ADS_Web_Service", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Twitch", "link": "https://github.com/damoun/twitch_exporter", "icon_filename": "twitch.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Twitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ubiquity_ufiber", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Ubiquiti UFiber OLT", "link": "https://github.com/swoga/ufiber-exporter", "icon_filename": "ubiquiti.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Ubiquiti_UFiber_OLT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-uptimerobot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Uptimerobot", "link": "https://github.com/wosc/prometheus-uptimerobot", "icon_filename": "uptimerobot.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Uptimerobot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vscode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "VSCode", "link": "https://github.com/guicaulada/vscode-exporter", "icon_filename": "vscode.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-VSCode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vault_pki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vault PKI", "link": "https://github.com/aarnaud/vault-pki-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vault_PKI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vertica", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vertica", "link": "https://github.com/vertica/vertica-prometheus-exporter", "icon_filename": "vertica.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vertica", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-warp10", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Warp10", "link": "https://github.com/centreon/warp10-sensision-exporter", "icon_filename": "warp10.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Warp10", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xmpp_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "XMPP Server", "link": "https://github.com/horazont/xmpp-blackbox-exporter", "icon_filename": "xmpp.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-XMPP_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xiaomi_mi_flora", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Xiaomi Mi Flora", "link": "https://github.com/xperimental/flowercare-exporter", "icon_filename": "xiaomi.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Xiaomi_Mi_Flora", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-yourls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "YOURLS URL Shortener", "link": "https://github.com/just1not2/prometheus-exporter-yourls", "icon_filename": "yourls.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-YOURLS_URL_Shortener", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zerto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zerto", "link": "https://github.com/claranet/zerto-exporter", "icon_filename": "zerto.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zerto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zulip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zulip", "link": "https://github.com/brokenpip3/zulip-exporter", "icon_filename": "zulip.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zulip", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zyxel_gs1200", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zyxel GS1200-8", "link": "https://github.com/robinelfrink/gs1200-exporter", "icon_filename": "zyxel.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zyxel_GS1200-8", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bpftrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "bpftrace variables", "link": "https://github.com/andreasgerstmayr/bpftrace_exporter", "icon_filename": "bpftrace.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-bpftrace_variables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cadvisor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "cAdvisor", "link": "https://github.com/google/cadvisor", "icon_filename": "cadvisor.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-cAdvisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-etcd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "etcd", "link": "https://etcd.io/", "icon_filename": "etcd.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-etcd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gpsd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "gpsd", "link": "https://github.com/natesales/gpsd-exporter", "icon_filename": "gpsd.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-gpsd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iqair", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "iqAir AirVisual air quality monitors", "link": "https://github.com/Packetslave/iqair_exporter", "icon_filename": "iqair.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-iqAir_AirVisual_air_quality_monitors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jolokia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "jolokia", "link": "https://github.com/aklinkert/jolokia_exporter", "icon_filename": "jolokia.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-jolokia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-journald", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "journald", "link": "https://github.com/dead-claudia/journald-exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-journald", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-loki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "loki", "link": "https://github.com/grafana/loki", "icon_filename": "loki.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-loki", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mosquitto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mosquitto", "link": "https://github.com/sapcc/mosquitto-exporter", "icon_filename": "mosquitto.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mosquitto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mtail", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mtail", "link": "https://github.com/google/mtail", "icon_filename": "mtail.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mtail", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nftables", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "nftables", "link": "https://github.com/Sheridan/nftables_exporter", "icon_filename": "nftables.png", "categories": ["data-collection.linux-systems.firewall-metrics"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-nftables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgbackrest", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "pgBackRest", "link": "https://github.com/woblerr/pgbackrest_exporter", "icon_filename": "pgbackrest.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-pgBackRest", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-strongswan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "strongSwan", "link": "https://github.com/jlti-dev/ipsec_exporter", "icon_filename": "strongswan.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-strongSwan", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-proxysql", "plugin_name": "go.d.plugin", "module_name": "proxysql", "monitored_instance": {"name": "ProxySQL", "link": "https://www.proxysql.com/", "icon_filename": "proxysql.png", "categories": ["data-collection.database-servers"]}, "keywords": ["proxysql", "databases", "sql"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| my.cnf | Specifies my.cnf file to read connection parameters from under the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-proxysql-ProxySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pulsar", "plugin_name": "go.d.plugin", "module_name": "pulsar", "monitored_instance": {"name": "Apache Pulsar", "link": "https://pulsar.apache.org/", "icon_filename": "pulsar.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["pulsar"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", "integration_type": "collector", "id": "go.d.plugin-pulsar-Apache_Pulsar", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-rabbitmq", "plugin_name": "go.d.plugin", "module_name": "rabbitmq", "monitored_instance": {"name": "RabbitMQ", "link": "https://www.rabbitmq.com/", "icon_filename": "rabbitmq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["rabbitmq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-rabbitmq-RabbitMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-redis", "plugin_name": "go.d.plugin", "module_name": "redis", "monitored_instance": {"name": "Redis", "link": "https://redis.com/", "categories": ["data-collection.database-servers"], "icon_filename": "redis.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["redis", "databases"], "most_popular": true}, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-redis-Redis", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/redis/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-scaleio", "plugin_name": "go.d.plugin", "module_name": "scaleio", "monitored_instance": {"name": "Dell EMC ScaleIO", "link": "https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["scaleio"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", "integration_type": "collector", "id": "go.d.plugin-scaleio-Dell_EMC_ScaleIO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-snmp", "plugin_name": "go.d.plugin", "module_name": "snmp", "monitored_instance": {"name": "SNMP devices", "link": "", "icon_filename": "snmp.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["snmp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\nIt supports:\n\n- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3.\n- any number of SNMP devices.\n- each SNMP device can be used to collect data for any number of charts.\n- each chart may have any number of dimensions.\n- each SNMP device may have a different update frequency.\n- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).\n\nKeep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second.\n`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode.\n\nAlso, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped.\nThis is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Find OIDs\n\nUse `snmpwalk`, like this:\n\n```sh\nsnmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1\n```\n\n- `-t 20` is the timeout in seconds.\n- `-O fn` will display full OIDs in numeric format.\n- `-v 2c` is the SNMP version.\n- `-c public` is the SNMP community.\n- `192.0.2.1` is the SNMP device.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | 127.0.0.1 | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 10 | no |\n| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`.\n\n> **SNMPv1**: just set `options.version` to 1.\n> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\nThe rest of the configuration is the same as in the SNMPv1/2 example.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n##### Multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThe metrics that will be collected are defined in the configuration file.\n", "integration_type": "collector", "id": "go.d.plugin-snmp-SNMP_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-solr", "plugin_name": "go.d.plugin", "module_name": "solr", "monitored_instance": {"name": "Solr", "link": "https://lucene.apache.org/solr/", "icon_filename": "solr.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["solr"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Solr\n\nPlugin: go.d.plugin\nModule: solr\n\n## Overview\n\nThis collector monitors Solr instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Solr version 6.4+\n\nThis collector does not work with Solr versions lower 6.4.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/solr.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/solr.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8983 | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n```\n##### Basic HTTP auth\n\nLocal Solr instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n - name: remote\n url: http://203.0.113.10:8983\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m solr\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Solr instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| solr.search_requests | search | requests/s |\n| solr.search_errors | errors | errors/s |\n| solr.search_errors_by_type | client, server, timeouts | errors/s |\n| solr.search_requests_processing_time | time | milliseconds |\n| solr.search_requests_timings | min, median, mean, max | milliseconds |\n| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n| solr.update_requests | search | requests/s |\n| solr.update_errors | errors | errors/s |\n| solr.update_errors_by_type | client, server, timeouts | errors/s |\n| solr.update_requests_processing_time | time | milliseconds |\n| solr.update_requests_timings | min, median, mean, max | milliseconds |\n| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-solr-Solr", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/solr/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-springboot2", "plugin_name": "go.d.plugin", "module_name": "springboot2", "monitored_instance": {"name": "Java Spring-boot 2 applications", "link": "", "icon_filename": "springboot.png", "categories": ["data-collection.apm"]}, "keywords": ["springboot"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Java Spring-boot 2 applications\n\nPlugin: go.d.plugin\nModule: springboot2\n\n## Overview\n\nThis collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects applications running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Spring Boot Actuator\n\nThe Spring Boot Actuator exposes metrics over HTTP, to use it:\n\n- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.\n- set `management.endpoints.web.exposure.include=*` in your `application.properties`.\n\nRefer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. \u2018How-to\u2019 guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/springboot2.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/springboot2.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/actuator/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n - name: remote\n url: http://192.0.2.1:8080/actuator/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m springboot2\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Java Spring-boot 2 applications instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| springboot2.thread | daemon, total | threads |\n| springboot2.heap | free, eden, survivor, old | B |\n| springboot2.heap_eden | used, commited | B |\n| springboot2.heap_survivor | used, commited | B |\n| springboot2.heap_old | used, commited | B |\n| springboot2.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-springboot2-Java_Spring-boot_2_applications", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-squidlog", "plugin_name": "go.d.plugin", "module_name": "squidlog", "monitored_instance": {"name": "Squid log files", "link": "https://www.lighttpd.net/", "icon_filename": "squid.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["squid", "logs"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/%
**Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-squidlog-Squid_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-supervisord", "plugin_name": "go.d.plugin", "module_name": "supervisord", "monitored_instance": {"name": "Supervisor", "link": "http://supervisord.org/", "icon_filename": "supervisord.png", "categories": ["data-collection.processes-and-system-services"]}, "keywords": ["supervisor"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n##### Socket\n\nCollect metrics via Unix socket.\n\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-supervisord-Supervisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-systemdunits", "plugin_name": "go.d.plugin", "module_name": "systemdunits", "monitored_instance": {"name": "Systemd Units", "link": "https://www.freedesktop.org/wiki/Software/systemd/", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"]}, "keywords": ["systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors Systemd units state.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| include | Systemd units filter. | *.service | no |\n| timeout | System bus requests timeout. | 1 | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n##### One specific unit\n\nCollect state of one specific unit.\n\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n##### All unit types\n\nCollect state of all units.\n\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-systemdunits-Systemd_Units", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-tengine", "plugin_name": "go.d.plugin", "module_name": "tengine", "monitored_instance": {"name": "Tengine", "link": "https://tengine.taobao.org/", "icon_filename": "tengine.jpeg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["tengine", "web", "webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-tengine-Tengine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-traefik", "plugin_name": "go.d.plugin", "module_name": "traefik", "monitored_instance": {"name": "Traefik", "link": "Traefik", "icon_filename": "traefik.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["traefik", "proxy", "webproxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", "integration_type": "collector", "id": "go.d.plugin-traefik-Traefik", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-unbound", "plugin_name": "go.d.plugin", "module_name": "unbound", "monitored_instance": {"name": "Unbound", "link": "https://nlnetlabs.nl/projects/unbound/about/", "icon_filename": "unbound.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["unbound", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n##### Unix socket\n\nConnecting through Unix socket.\n\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", "integration_type": "collector", "id": "go.d.plugin-unbound-Unbound", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-upsd", "plugin_name": "go.d.plugin", "module_name": "upsd", "monitored_instance": {"name": "UPS (NUT)", "link": "", "icon_filename": "plug-circle-bolt.svg", "categories": ["data-collection.ups"]}, "keywords": ["ups", "nut"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", "integration_type": "collector", "id": "go.d.plugin-upsd-UPS_(NUT)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vcsa", "plugin_name": "go.d.plugin", "module_name": "vcsa", "monitored_instance": {"name": "vCenter Server Appliance", "link": "https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-vcsa-vCenter_Server_Appliance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vernemq", "plugin_name": "go.d.plugin", "module_name": "vernemq", "monitored_instance": {"name": "VerneMQ", "link": "https://vernemq.com", "icon_filename": "vernemq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["vernemq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vsphere", "plugin_name": "go.d.plugin", "module_name": "vsphere", "monitored_instance": {"name": "VMware vCenter Server", "link": "https://www.vmware.com/products/vcenter-server.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware", "esxi", "vcenter"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vsphere-VMware_vCenter_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-web_log", "plugin_name": "go.d.plugin", "module_name": "web_log", "monitored_instance": {"name": "Web server log files", "link": "", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "webservers.svg"}, "keywords": ["webserver", "apache", "httpd", "nginx", "lighttpd", "logs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). | | yes |\n| parser | Log parser configuration. | | no |\n| parser.log_type | Log parser type. | auto | no |\n| parser.csv_config | CSV log parser config. | | no |\n| parser.csv_config.delimiter | CSV field delimiter. | , | no |\n| parser.csv_config.format | CSV log format. | | no |\n| parser.ltsv_config | LTSV log parser config. | | no |\n| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| parser.json_config | JSON log parser config. | | no |\n| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| parser.regexp_config | RegExp log parser config. | | no |\n| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### parser.log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nparser:\n log_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### parser.csv_config.format\n\n\n\n##### parser.ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: json\n json_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-web_log-Web_server_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-whoisquery", "plugin_name": "go.d.plugin", "module_name": "whoisquery", "monitored_instance": {"name": "Domain expiration date", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["whois"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-whoisquery-Domain_expiration_date", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-ad", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Active Directory", "link": "https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "active directory", "ad", "adcs", "adfs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-hyperv", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "HyperV", "link": "https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "hyperv", "virtualization", "vm"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-msexchange", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS Exchange", "link": "https://www.microsoft.com/en-us/microsoft-365/exchange/email", "icon_filename": "exchange.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mail"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-mssql", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS SQL Server", "link": "https://www.microsoft.com/en-us/sql-server/", "icon_filename": "mssql.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mssql", "database", "db"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-dotnet", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "NET Framework", "link": "https://dotnet.microsoft.com/en-us/download/dotnet-framework", "icon_filename": "dotnet.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "dotnet"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["data-collection.windows-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows", "microsoft"], "most_popular": true, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-wireguard", "plugin_name": "go.d.plugin", "module_name": "wireguard", "monitored_instance": {"name": "WireGuard", "link": "https://www.wireguard.com/", "categories": ["data-collection.vpns"], "icon_filename": "wireguard.svg"}, "keywords": ["wireguard", "vpn", "security"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-wireguard-WireGuard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-x509check", "plugin_name": "go.d.plugin", "module_name": "x509check", "monitored_instance": {"name": "X.509 certificate", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "lock.svg"}, "keywords": ["x509", "certificate"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n##### Local file certificate\n\nLocal file certificate.\n\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n##### SMTP certificate\n\nSMTP certificate.\n\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | revoked | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-x509check-X.509_certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-zookeeper", "plugin_name": "go.d.plugin", "module_name": "zookeeper", "monitored_instance": {"name": "ZooKeeper", "link": "https://zookeeper.apache.org/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "zookeeper.svg"}, "keywords": ["zookeeper"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nLocal server.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-zookeeper-ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "idlejitter.plugin", "module_name": "idlejitter.plugin", "monitored_instance": {"name": "Idle OS Jitter", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["latency", "jitter"], "most_popular": false}, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", "integration_type": "collector", "id": "idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ioping.plugin", "module_name": "ioping.plugin", "monitored_instance": {"name": "IOPing", "link": "https://github.com/koct9i/ioping", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n```yaml\ndestination=\"/dev/sda\"\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "ioping.plugin-ioping.plugin-IOPing", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "macos.plugin", "module_name": "mach_smi", "monitored_instance": {"name": "macOS", "link": "https://www.apple.com/macos", "categories": ["data-collection.macos-systems"], "icon_filename": "macos.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["macos", "apple", "darwin"], "most_popular": false}, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "macos.plugin-mach_smi-macOS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "nfacct.plugin", "module_name": "nfacct.plugin", "monitored_instance": {"name": "Netfilter", "link": "https://www.netfilter.org/", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "netfilter.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", "integration_type": "collector", "id": "nfacct.plugin-nfacct.plugin-Netfilter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "perf.plugin", "module_name": "perf.plugin", "monitored_instance": {"name": "CPU performance", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux", "cpu performance", "cpu cache", "perf.plugin"], "most_popular": false}, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptior to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the pref plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", "integration_type": "collector", "id": "perf.plugin-perf.plugin-CPU_performance", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/diskstats", "monitored_instance": {"name": "Disk Statistics", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "disks", "io", "bcache", "block devices"], "most_popular": false}, "overview": "# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/diskstats-Disk_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/interrupts", "monitored_instance": {"name": "Interrupts", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["interrupts"], "most_popular": false}, "overview": "# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n understand what your system is doing. It can provide insights into the system's interaction with hardware,\n drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/interrupts-Interrupts", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/loadavg", "monitored_instance": {"name": "System Load Average", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["load", "load average"], "most_popular": false}, "overview": "# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/loadavg-System_Load_Average", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/mdstat", "monitored_instance": {"name": "MD RAID", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["raid", "mdadm", "mdstat", "raid"], "most_popular": false}, "overview": "# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/mdstat-MD_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/meminfo", "monitored_instance": {"name": "Memory Usage", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory", "ram", "available", "committed"], "most_popular": false}, "overview": "# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/meminfo-Memory_Usage", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/dev", "monitored_instance": {"name": "Network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["network interfaces"], "most_popular": false}, "overview": "# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/dev-Network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/ip_vs_stats", "monitored_instance": {"name": "IP Virtual Server", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip virtual server"], "most_popular": false}, "overview": "# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/netstat", "monitored_instance": {"name": "Network statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip", "udp", "udplite", "icmp", "netstat", "snmp"], "most_popular": false}, "overview": "# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipc4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/netstat-Network_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfs", "monitored_instance": {"name": "NFS Client", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs client", "filesystem"], "most_popular": false}, "overview": "# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfs-NFS_Client", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfsd", "monitored_instance": {"name": "NFS Server", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs server", "filesystem"], "most_popular": false}, "overview": "# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfsd-NFS_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sctp/snmp", "monitored_instance": {"name": "SCTP Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sctp", "stream control transmission protocol"], "most_popular": false}, "overview": "# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat", "monitored_instance": {"name": "Socket statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sockets"], "most_popular": false}, "overview": "# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat-Socket_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat6", "monitored_instance": {"name": "IPv6 Socket Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipv6 sockets"], "most_popular": false}, "overview": "# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/softnet_stat", "monitored_instance": {"name": "Softnet Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softnet"], "most_popular": false}, "overview": "# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/softnet_stat-Softnet_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/nf_conntrack", "monitored_instance": {"name": "Conntrack", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["connection tracking mechanism", "netfilter", "conntrack"], "most_popular": false}, "overview": "# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/nf_conntrack-Conntrack", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/synproxy", "monitored_instance": {"name": "Synproxy", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["synproxy"], "most_popular": false}, "overview": "# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/synproxy-Synproxy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/wireless", "monitored_instance": {"name": "Wireless network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["wireless devices"], "most_popular": false}, "overview": "# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/wireless-Wireless_network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pagetypeinfo", "monitored_instance": {"name": "Page types", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory page types"], "most_popular": false}, "overview": "# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pagetypeinfo-Page_types", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pressure", "monitored_instance": {"name": "Pressure Stall Information", "link": "", "categories": ["data-collection.linux-systems.pressure-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pressure"], "most_popular": false}, "overview": "# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pressure-Pressure_Stall_Information", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/softirqs", "monitored_instance": {"name": "SoftIRQ statistics", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softirqs", "interrupts"], "most_popular": false}, "overview": "# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/softirqs-SoftIRQ_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs", "monitored_instance": {"name": "ZFS Pools", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs pools", "pools", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Pools\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs\n\n## Overview\n\nThis integration provides metrics about the state of ZFS pools.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs-ZFS_Pools", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs/arcstats", "monitored_instance": {"name": "ZFS Adaptive Replacement Cache", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs arc", "arc", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/stat", "monitored_instance": {"name": "System statistics", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["cpu utilization", "process counts"], "most_popular": false}, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/stat-System_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/sys/kernel/random/entropy_avail", "monitored_instance": {"name": "Entropy", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["entropy"], "most_popular": false}, "overview": "# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/uptime", "monitored_instance": {"name": "System Uptime", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["uptime"], "most_popular": false}, "overview": "# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/uptime-System_Uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/vmstat", "monitored_instance": {"name": "Memory Statistics", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "page faults", "oom", "numa"], "most_popular": false}, "overview": "# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/vmstat-Memory_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/block/zram", "monitored_instance": {"name": "ZRAM", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zram"], "most_popular": false}, "overview": "# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/block/zram-ZRAM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/drm", "monitored_instance": {"name": "AMD GPU", "link": "https://www.amd.com", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "amd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["amd", "gpu", "hardware"], "most_popular": false}, "overview": "# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/drm-AMD_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/infiniband", "monitored_instance": {"name": "InfiniBand", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["infiniband", "rdma"], "most_popular": false}, "overview": "# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/infiniband-InfiniBand", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/power_supply", "monitored_instance": {"name": "Power Supply", "link": "", "categories": ["data-collection.linux-systems.power-supply-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["psu", "power supply"], "most_popular": false}, "overview": "# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/power_supply-Power_Supply", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/edac/mc", "monitored_instance": {"name": "Memory modules (DIMMs)", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["edac", "ecc", "dimm", "ram", "hardware"], "most_popular": false}, "overview": "# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n - errors related to a DIMM\n - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n - memory controllers that can identify the physical DIMMS and report errors directly for them,\n - memory controllers that report errors for memory address ranges that can be linked to dimms.\n In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/node", "monitored_instance": {"name": "Non-Uniform Memory Access", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["numa"], "most_popular": false}, "overview": "# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/fs/btrfs", "monitored_instance": {"name": "BTRFS", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.btrfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["btrfs", "filesystem"], "most_popular": false}, "overview": "# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/fs/btrfs-BTRFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/kernel/mm/ksm", "monitored_instance": {"name": "Kernel Same-Page Merging", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ksm", "samepage", "merging"], "most_popular": false}, "overview": "# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "ipc", "monitored_instance": {"name": "Inter Process Communication", "link": "", "categories": ["data-collection.linux-systems.ipc-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipc", "semaphores", "shared memory"], "most_popular": false}, "overview": "# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n processes are trying to access a single shared resource, semaphores can ensure that only one process\n accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-ipc-Inter_Process_Communication", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "adaptec_raid", "monitored_instance": {"name": "AdaptecRAID", "link": "https://www.microchip.com/en-us/products/storage", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "adaptec.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# AdaptecRAID\n\nPlugin: python.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nThis collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.\n\n\nIt uses the arcconf command line utility (from adaptec) to monitor your raid controller.\n\nExecuted commands:\n - `sudo -n arcconf GETCONFIG 1 LD`\n - `sudo -n arcconf GETCONFIG 1 PD`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run arcconf as sudoer\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich arcconf shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/arcconf\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/adaptec_raid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: my_job_name \n update_every: 1 # the JOB's data collection frequency\n priority: 60000 # the JOB's order on the dashboard\n penalty: yes # the JOB's penalty\n autodetection_retry: 0 # the JOB's re-check interval in seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin adaptec_raid debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |\n| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AdaptecRAID instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptec_raid.ld_status | a dimension per logical device | bool |\n| adaptec_raid.pd_state | a dimension per physical device | bool |\n| adaptec_raid.smart_warnings | a dimension per physical device | count |\n| adaptec_raid.temperature | a dimension per physical device | celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-adaptec_raid-AdaptecRAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/adaptec_raid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "alarms", "monitored_instance": {"name": "Netdata Agent alarms", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["alarms", "netdata"], "most_popular": false}, "overview": "# Netdata Agent alarms\n\nPlugin: python.d.plugin\nModule: alarms\n\n## Overview\n\nThis collector creates an 'Alarms' menu with one line plot of `alarms.status`.\n\n\nAlarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/alarms.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/alarms.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |\n| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {\"CLEAR\": 0, \"WARNING\": 1, \"CRITICAL\": 2} | yes |\n| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |\n| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |\n| alarm_contains_words | A \",\" separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with \"cpu\" or \"load\" in alarm name. Default includes all. | | yes |\n| alarm_excludes_words | A \",\" separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with \"cpu\" or \"load\" in alarm name. Default excludes None. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n\n```\n##### Advanced\n\nAn advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.\n\"ML\" job will collect status and values for all alarms with \"ml_\" in the name. Default job will collect status for all other alarms.\n\n\n```yaml\nML:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: true\n alarm_status_chart_type: 'stacked'\n alarm_contains_words: 'ml_'\n\nDefault:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: false\n alarm_status_chart_type: 'stacked'\n alarm_excludes_words: 'ml_'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin alarms debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netdata Agent alarms instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |\n| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |\n\n", "integration_type": "collector", "id": "python.d.plugin-alarms-Netdata_Agent_alarms", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "am2320", "monitored_instance": {"name": "AM2320", "link": "https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "am2320", "sensor", "humidity"], "most_popular": false}, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", "integration_type": "collector", "id": "python.d.plugin-am2320-AM2320", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "beanstalk", "monitored_instance": {"name": "Beanstalk", "link": "https://beanstalkd.github.io/", "categories": ["data-collection.message-brokers"], "icon_filename": "beanstalk.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["beanstalk", "beanstalkd", "message"], "most_popular": false}, "overview": "# Beanstalk\n\nPlugin: python.d.plugin\nModule: beanstalk\n\n## Overview\n\nMonitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.\n\nThe collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### beanstalkc python module\n\nThe collector requires the `beanstalkc` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/beanstalk.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |\n| port | Port to the IP or URL to a beanstalk service. | 11300 | no |\n\n#### Examples\n\n##### Remote beanstalk server\n\nA basic remote beanstalk server\n\n```yaml\nremote:\n name: 'beanstalk'\n host: '1.2.3.4'\n port: 11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local_beanstalk'\n host: '127.0.0.1'\n port: 11300\n\nremote_job:\n name: 'remote_beanstalk'\n host: '192.0.2.1'\n port: 113000\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin beanstalk debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.cpu_usage | user, system | cpu time |\n| beanstalk.jobs_rate | total, timeouts | jobs/s |\n| beanstalk.connections_rate | connections | connections/s |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.connections_rate | tubes | tubes |\n| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.current_connections | written, producers, workers, waiting | connections |\n| beanstalk.binlog | written, migrated | records/s |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.jobs_rate | jobs | jobs/s |\n| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.connections | using, waiting, watching | connections |\n| beanstalk.commands | deletes, pauses | commands/s |\n| beanstalk.pause | since, left | seconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-beanstalk-Beanstalk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "bind_rndc", "monitored_instance": {"name": "ISC Bind (RNDC)", "link": "https://www.isc.org/bind/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dns", "bind", "server"], "most_popular": false}, "overview": "# ISC Bind (RNDC)\n\nPlugin: python.d.plugin\nModule: bind_rndc\n\n## Overview\n\nMonitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.\n\nThis collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum bind version and permissions\n\nVersion of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`\n\n#### Setup log rotate for bind stats\n\nBIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.\nIt is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.\n\nTo set up BIND to dump stats do the following:\n\n1. Add to 'named.conf.options' options {}:\n`statistics-file \"/var/log/bind/named.stats\";`\n\n2. Create bind/ directory in /var/log:\n`cd /var/log/ && mkdir bind`\n\n3. Change owner of directory to 'bind' user:\n`chown bind bind/`\n\n4. RELOAD (NOT restart) BIND:\n`systemctl reload bind9.service`\n\n5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)\n\nTo allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:\n`chown :netdata rndc.key`\n\nLast, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:\n```\n/var/log/bind/named.stats {\n\n daily\n rotate 4\n compress\n delaycompress\n create 0644 bind bind\n missingok\n postrotate\n rndc reload > /dev/null\n endscript\n}\n```\nTo test your logrotate conf file run as root:\n`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/bind_rndc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/bind_rndc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |\n\n#### Examples\n\n##### Local bind stats\n\nDefine a local path to bind stats file\n\n```yaml\nlocal:\n named_stats_path: '/var/log/bind/named.stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin bind_rndc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC Bind (RNDC) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |\n| bind_rndc.incoming_queries | a dimension per incoming query type | queries |\n| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |\n| bind_rndc.stats_size | stats_size | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-bind_rndc-ISC_Bind_(RNDC)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/bind_rndc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "boinc", "monitored_instance": {"name": "BOINC", "link": "https://boinc.berkeley.edu/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["boinc", "distributed"], "most_popular": false}, "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", "integration_type": "collector", "id": "python.d.plugin-boinc-BOINC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ceph", "monitored_instance": {"name": "Ceph", "link": "https://ceph.io/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ceph.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ceph", "storage"], "most_popular": false}, "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-ceph-Ceph", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "changefinder", "monitored_instance": {"name": "python.d changefinder", "link": "", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["change detection", "anomaly detection", "machine learning", "ml"], "most_popular": false}, "overview": "# python.d changefinder\n\nPlugin: python.d.plugin\nModule: changefinder\n\n## Overview\n\nThis collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to\nperform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)\non your Netdata charts and/or dimensions.\n\n\nInstead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).\n### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's\n typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly\n this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw\n score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have\n already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then\n should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning\n approaches which need some initial window of time before they can be useful.\n- As this collector does most of the work in Python itself, you may want to try it out first on a test or development\n system to get a sense of its performance characteristics on a node similar to where you would like to use it.\n- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the\n typical performance characteristics we saw from running this collector (with defaults) were:\n - A runtime (`netdata.runtime_changefinder`) of ~30ms.\n - Typically ~1% additional cpu usage.\n - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default this collector will work over all `system.*` charts.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the packages below be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages for the netdata user\npip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4\n```\n\n**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section\nof your `netdata.conf` file.\n\n```yaml\n[ plugin:python.d ]\n # update every = 1\n command options = -ppython3\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/changefinder.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/changefinder.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |\n| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |\n| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |\n| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |\n| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |\n| cf_threshold | the percentile above which scores will be flagged. | 99 | no |\n| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |\n| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: ''\n mode: 'per_chart'\n cf_r: 0.5\n cf_order: 1\n cf_smooth: 15\n cf_threshold: 99\n n_score_samples: 14400\n show_scores: false\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin changefinder debug trace\n ```\n\n### Debug Mode\n\n\n\n### Log Messages\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d changefinder instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| changefinder.scores | a dimension per chart | score |\n| changefinder.flags | a dimension per chart | flag |\n\n", "integration_type": "collector", "id": "python.d.plugin-changefinder-python.d_changefinder", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/changefinder/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "dovecot", "monitored_instance": {"name": "Dovecot", "link": "https://www.dovecot.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "dovecot.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dovecot", "imap", "mail"], "most_popular": false}, "overview": "# Dovecot\n\nPlugin: python.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\nIt uses the dovecot socket and executes the `EXPORT global` command to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Dovecot configuration\n\nThe Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/dovecot.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |\n| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |\n| port | Used in combination with host, configures the port devcot listens to. | | no |\n\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration.\n\n```yaml\nlocaltcpip:\n name: 'local'\n host: '127.0.0.1'\n port: 24242\n\n```\n##### Local socket\n\nA basic local socket configuration\n\n```yaml\nlocalsocket:\n name: 'local'\n socket: '/var/run/dovecot/stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin dovecot debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.sessions | active sessions | number |\n| dovecot.logins | logins | number |\n| dovecot.commands | commands | commands |\n| dovecot.faults | minor, major | faults |\n| dovecot.context_switches | voluntary, involuntary | switches |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | number/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth | ok, failed | attempts |\n| dovecot.auth_cache | hit, miss | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-dovecot-Dovecot", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/dovecot/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "example", "monitored_instance": {"name": "Example collector", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["example", "netdata", "python"], "most_popular": false}, "overview": "# Example collector\n\nPlugin: python.d.plugin\nModule: example\n\n## Overview\n\nExample collector that generates some random numbers as metrics.\n\nIf you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.\n\n\nThe `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/example.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/example.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| num_lines | The number of lines to create. | 4 | no |\n| lower | The lower bound of numbers to randomly sample from. | 0 | no |\n| upper | The upper bound of numbers to randomly sample from. | 100 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nfour_lines:\n name: \"Four Lines\"\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n num_lines: 4\n lower: 0\n upper: 100\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin example debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Example collector instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| example.random | random | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-example-Example_collector", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "exim", "monitored_instance": {"name": "Exim", "link": "https://www.exim.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "exim.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["exim", "mail", "server"], "most_popular": false}, "overview": "# Exim\n\nPlugin: python.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue.\n\nIt uses the `exim` command line binary to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Exim configuration - local installation\n\nThe module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.\n\n1. Edit the `exim` configuration with your preferred editor and add:\n`queue_list_requires_admin = false`\n2. Restart `exim` and Netdata\n\n\n#### Exim configuration - WHM (CPanel) server\n\nOn a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.\n\n1. Login to WHM\n2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor\n3. Scroll down to the button **Add additional configuration setting** and click on it.\n4. In the new dropdown which will appear above we need to find and choose:\n`queue_list_requires_admin` and set to `false`\n5. Scroll to the end and click the **Save** button.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/exim.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | Path and command to the `exim` binary | exim -bpc | no |\n\n#### Examples\n\n##### Local exim install\n\nA basic local exim install\n\n```yaml\nlocal:\n command: 'exim -bpc'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin exim debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", "integration_type": "collector", "id": "python.d.plugin-exim-Exim", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/exim/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "fail2ban", "monitored_instance": {"name": "Fail2ban", "link": "https://www.fail2ban.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "fail2ban.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["fail2ban", "security", "authentication", "authorization"], "most_popular": false}, "overview": "# Fail2ban\n\nPlugin: python.d.plugin\nModule: fail2ban\n\n## Overview\n\nMonitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.\n\n\nIt collects metrics through reading the default log and configuration files of fail2ban.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `fail2ban.log` file must be readable by the user `netdata`.\n - change the file ownership and access permissions.\n - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.\n\nTo change the file ownership and access permissions, execute the following:\n\n```shell\nsudo chown root:netdata /var/log/fail2ban.log\nsudo chmod 640 /var/log/fail2ban.log\n```\n\nTo persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:\n\n```shell\n/var/log/fail2ban.log {\n\n weekly\n rotate 4\n compress\n\n delaycompress\n missingok\n postrotate\n fail2ban-client flushlogs 1>/dev/null\n endscript\n\n # If fail2ban runs as non-root it still needs to have write access\n # to logfiles.\n # create 640 fail2ban adm\n create 640 root netdata\n}\n```\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.\nIf conf file is not found default jail is ssh.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/fail2ban.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |\n| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |\n| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |\n| exclude | jails you want to exclude from autodetection. | | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocal:\n log_path: '/var/log/fail2ban.log'\n conf_path: '/etc/fail2ban/jail.local'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin fail2ban debug trace\n ```\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fail2ban instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.failed_attempts | a dimension per jail | attempts/s |\n| fail2ban.bans | a dimension per jail | bans/s |\n| fail2ban.banned_ips | a dimension per jail | ips |\n\n", "integration_type": "collector", "id": "python.d.plugin-fail2ban-Fail2ban", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/fail2ban/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "gearman", "monitored_instance": {"name": "Gearman", "link": "http://gearman.org/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "gearman.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["gearman", "gearman job server"], "most_popular": false}, "overview": "# Gearman\n\nPlugin: python.d.plugin\nModule: gearman\n\n## Overview\n\nMonitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.\n\nThis collector connects to a Gearman instance via either TCP or unix socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Socket permissions\n\nThe gearman UNIX socket should have read permission for user netdata.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/gearman.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | URL or IP where gearman is running. | localhost | no |\n| port | Port of URL or IP where gearman is running. | 4730 | no |\n| tls | Use tls to connect to gearman. | false | no |\n| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |\n| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |\n\n#### Examples\n\n##### Local gearman service\n\nA basic host and port gearman configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\nremote:\n name: 'remote'\n host: '192.0.2.1'\n port: 4730\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin gearman debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.total_jobs | Pending, Running | Jobs |\n\n### Per gearman job\n\nMetrics related to Gearman jobs. Each job produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.single_job | Pending, Idle, Runnning | Jobs |\n\n", "integration_type": "collector", "id": "python.d.plugin-gearman-Gearman", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/gearman/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "go_expvar", "monitored_instance": {"name": "Go applications (EXPVAR)", "link": "https://pkg.go.dev/expvar", "categories": ["data-collection.apm"], "icon_filename": "go.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["go", "expvar", "application"], "most_popular": false}, "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hddtemp", "monitored_instance": {"name": "HDD temperature", "link": "https://linux.die.net/man/8/hddtemp", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hardware", "hdd temperature", "disk temperature", "temperature"], "most_popular": false}, "overview": "# HDD temperature\n\nPlugin: python.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt uses the `hddtemp` daemon to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Run `hddtemp` in daemon mode\n\nYou can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.\n\nSo running `hddtemp -d` would run the daemon, by default on port 7634.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hddtemp.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\nBy default this collector will try to autodetect disks (autodetection works only for disk which names start with \"sd\"). However this can be overridden by setting the option `disks` to an array of desired disks.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |\n| host | The IP or HOSTNAME to connect to. | localhost | yes |\n| port | The port to connect to. | 7634 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\n```\n##### Custom disk names\n\nAn example defining the disk names to detect.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n devices:\n - customdisk1\n - customdisk2\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\nremote_job:\n name : 'remote'\n host : 'http://192.0.2.1:2812'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hddtemp debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HDD temperature instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.temperatures | a dimension per disk | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hddtemp-HDD_temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hddtemp/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hpssa", "monitored_instance": {"name": "HP Smart Storage Arrays", "link": "https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hp.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "hp", "hpssa", "array"], "most_popular": false}, "overview": "# HP Smart Storage Arrays\n\nPlugin: python.d.plugin\nModule: hpssa\n\n## Overview\n\nThis collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.\n\nIt uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided, the collector will try to execute the `ssacli` binary.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the hpssa collector\n\nThe `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Allow user netdata to execute `ssacli` as root.\n\nThis module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.\n\n- Add to your `/etc/sudoers` file:\n\n`which ssacli` shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/ssacli\n```\n\n- Reset Netdata's systemd\n unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux\n distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.\n\nAs the `root` user, do the following:\n\n```cmd\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hpssa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hpssa.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |\n| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |\n\n#### Examples\n\n##### Local simple config\n\nA basic configuration, specyfing the path to `ssacli`\n\n```yaml\nlocal:\n ssacli_path: /usr/sbin/ssacli\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hpssa debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HP Smart Storage Arrays instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |\n| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |\n| hpssa.ld_status | a dimension per logical drive | Status |\n| hpssa.pd_status | a dimension per physical drive | Status |\n| hpssa.pd_temperature | a dimension per physical drive | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hpssa-HP_Smart_Storage_Arrays", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hpssa/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "icecast", "monitored_instance": {"name": "Icecast", "link": "https://icecast.org/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "icecast.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["icecast", "streaming", "media"], "most_popular": false}, "overview": "# Icecast\n\nPlugin: python.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWithout configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/icecast.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |\n| user | Username to use to connect to `url` if it's password protected. | | no |\n| pass | Password to use to connect to `url` if it's password protected. | | no |\n\n#### Examples\n\n##### Remote Icecast server\n\nConfigure a remote icecast server\n\n```yaml\nremote:\n url: 'http://1.2.3.4:8443/status-json.xsl'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin icecast debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | a dimension for each active source | listeners |\n\n", "integration_type": "collector", "id": "python.d.plugin-icecast-Icecast", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/icecast/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ipfs", "monitored_instance": {"name": "IPFS", "link": "https://ipfs.tech/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ipfs.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IPFS\n\nPlugin: python.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS server metrics about its quality and performance.\n\nIt connects to an http endpoint of the IPFS server to collect the metrics\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the endpoint is accessible by the Agent, netdata will autodetect it\n\n#### Limits\n\nCalls to the following endpoints are disabled due to IPFS bugs:\n\n/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)\n/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ipfs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| url | URL to the IPFS API | no | yes |\n| repoapi | Collect repo metrics. | no | no |\n| pinapi | Set status of IPFS pinned object polling. | no | no |\n\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\nremote_host:\n name: 'remote'\n url: 'http://192.0.2.1:5001'\n repoapi: no\n pinapi: no\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ipfs debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | kilobits/s |\n| ipfs.peers | peers | peers |\n| ipfs.repo_size | avail, size | GiB |\n| ipfs.repo_objects | objects, pinned, recursive_pins | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-ipfs-IPFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ipfs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "litespeed", "monitored_instance": {"name": "Litespeed", "link": "https://www.litespeedtech.com/products/litespeed-web-server", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "litespeed.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["litespeed", "web", "server"], "most_popular": false}, "overview": "# Litespeed\n\nPlugin: python.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/litespeed.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |\n\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocalhost:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin litespeed debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.connections | free, used | conns |\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.cache | hits | hits/s |\n| litespeed.cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-litespeed-Litespeed", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/litespeed/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "megacli", "monitored_instance": {"name": "MegaCLI", "link": "https://wikitech.wikimedia.org/wiki/MegaCli", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# MegaCLI\n\nPlugin: python.d.plugin\nModule: megacli\n\n## Overview\n\nExamine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.\n\nCollects adapter, physical drives and battery stats using megacli command-line tool\n\nExecuted commands:\n\n - `sudo -n megacli -LDPDInfo -aAll`\n - `sudo -n megacli -AdpBbuCmd -a0`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run megacli as sudoer\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich megacli shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/megacli\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/megacli.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: myname\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin megacli debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |\n| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |\n| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |\n| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |\n| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MegaCLI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_degraded | a dimension per adapter | is degraded |\n| megacli.pd_media_error | a dimension per physical drive | errors/s |\n| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |\n\n### Per battery\n\nMetrics related to Battery Backup Units, each BBU provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_relative_charge | adapter {battery id} | percentage |\n| megacli.bbu_cycle_count | adapter {battery id} | cycle count |\n\n", "integration_type": "collector", "id": "python.d.plugin-megacli-MegaCLI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/megacli/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "memcached", "monitored_instance": {"name": "Memcached", "link": "https://memcached.org/", "categories": ["data-collection.database-servers"], "icon_filename": "memcached.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memcached", "memcache", "cache", "database"], "most_popular": false}, "overview": "# Memcached\n\nPlugin: python.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/memcached.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| host | the host to connect to. | 127.0.0.1 | no |\n| port | the port to connect to. | 11211 | no |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### localhost\n\nAn example configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 11211\n\n```\n##### localipv4\n\nAn example configuration for localipv4.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 11211\n\n```\n##### localipv6\n\nAn example configuration for localipv6.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '::1'\n port: 11211\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin memcached debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-memcached-Memcached", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/memcached/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "monit", "monitored_instance": {"name": "Monit", "link": "https://mmonit.com/monit/", "categories": ["data-collection.synthetic-checks"], "icon_filename": "monit.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["monit", "mmonit", "supervision tool", "monitrc"], "most_popular": false}, "overview": "# Monit\n\nPlugin: python.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.\n\n\nIt gathers data from Monit's XML interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to Monit at `http://localhost:2812`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/monit.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |\n| user | Username in case the URL is password protected. | | no |\n| pass | Password in case the URL is password protected. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n\n```\n##### Basic Authentication\n\nExample using basic username and password in order to authenticate.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n user: 'foo'\n pass: 'bar'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:2812'\n\nremote_job:\n name: 'remote'\n url: 'http://192.0.2.1:2812'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin monit debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Monit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.filesystems | a dimension per target | filesystems |\n| monit.directories | a dimension per target | directories |\n| monit.files | a dimension per target | files |\n| monit.fifos | a dimension per target | pipes |\n| monit.programs | a dimension per target | programs |\n| monit.services | a dimension per target | processes |\n| monit.process_uptime | a dimension per target | seconds |\n| monit.process_threads | a dimension per target | threads |\n| monit.process_childrens | a dimension per target | children |\n| monit.hosts | a dimension per target | hosts |\n| monit.host_latency | a dimension per target | milliseconds |\n| monit.networks | a dimension per target | interfaces |\n\n", "integration_type": "collector", "id": "python.d.plugin-monit-Monit", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/monit/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "nsd", "monitored_instance": {"name": "Name Server Daemon", "link": "https://nsd.docs.nlnetlabs.nl/en/latest/#", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "nsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nsd", "name server daemon"], "most_popular": false}, "overview": "# Name Server Daemon\n\nPlugin: python.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more.\n\n\nIt uses the `nsd-control stats_noreset` command to gather metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### NSD version\n\nThe version of `nsd` must be 4.0+.\n\n\n#### Provide Netdata the permissions to run the command\n\nNetdata must have permissions to run the `nsd-control stats_noreset` command.\n\nYou can:\n\n- Add \"netdata\" user to \"nsd\" group:\n ```\n usermod -aG nsd netdata\n ```\n- Add Netdata to sudoers\n 1. Edit the sudoers file:\n ```\n visudo -f /etc/sudoers.d/netdata\n ```\n 2. Add the entry:\n ```\n Defaults:netdata !requiretty\n netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset\n ```\n\n > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/nsd.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | The command to run | nsd-control stats_noreset | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: 'nsd_local'\n command: 'nsd-control stats_noreset'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin nsd debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Name Server Daemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.zones | master, slave | zones |\n| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |\n| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |\n| nsd.transfer | NOTIFY, AXFR | queries/s |\n| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-nsd-Name_Server_Daemon", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/nsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "openldap", "monitored_instance": {"name": "OpenLDAP", "link": "https://www.openldap.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "statsd.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["openldap", "RBAC", "Directory access"], "most_popular": false}, "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-openldap-OpenLDAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "oracledb", "monitored_instance": {"name": "Oracle DB", "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", "categories": ["data-collection.database-servers"], "icon_filename": "oracle.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "oracle", "data warehouse", "SQL"], "most_popular": false}, "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", "integration_type": "collector", "id": "python.d.plugin-oracledb-Oracle_DB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "pandas", "monitored_instance": {"name": "Pandas", "link": "https://pandas.pydata.org/", "categories": ["data-collection.generic-data-collection"], "icon_filename": "pandas.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pandas", "python"], "most_popular": false}, "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", "id": "python.d.plugin-pandas-Pandas", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "postfix", "monitored_instance": {"name": "Postfix", "link": "https://www.postfix.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "postfix.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["postfix", "mail", "mail server"], "most_popular": false}, "overview": "# Postfix\n\nPlugin: python.d.plugin\nModule: postfix\n\n## Overview\n\nKeep an eye on Postfix metrics for efficient mail server operations. \nImprove your mail server performance with Netdata's real-time metrics and built-in alerts.\n\n\nMonitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPostfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.\nSee the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin postfix debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-postfix-Postfix", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/postfix/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "puppet", "monitored_instance": {"name": "Puppet", "link": "https://www.puppet.com/", "categories": ["data-collection.ci-cd-systems"], "icon_filename": "puppet.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["puppet", "jvm heap"], "most_popular": false}, "overview": "# Puppet\n\nPlugin: python.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'\n\n\nIt uses Puppet's metrics API endpoint to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/puppet.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n> Notes:\n> - Exact Fully Qualified Domain Name of the node should be used.\n> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.\n> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |\n| tls_verify | Control HTTPS server certificate verification. | False | no |\n| tls_ca_file | Optional CA (bundle) file to use | | no |\n| tls_cert_file | Optional client certificate file | | no |\n| tls_key_file | Optional client key file | | no |\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\npuppetserver:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\n```\n##### TLS Certificate\n\nAn example using a TLS certificate\n\n```yaml\npuppetdb:\n url: 'https://fqdn.example.com:8081'\n tls_cert_file: /path/to/client.crt\n tls_key_file: /path/to/client.key\n autodetection_retry: 1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\npuppetserver1:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\npuppetserver2:\n url: 'https://fqdn.example2.com:8140'\n autodetection_retry: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin puppet debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm | committed, used | MiB |\n| puppet.jvm | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", "integration_type": "collector", "id": "python.d.plugin-puppet-Puppet", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/puppet/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "rethinkdbs", "monitored_instance": {"name": "RethinkDB", "link": "https://rethinkdb.com/", "categories": ["data-collection.database-servers"], "icon_filename": "rethinkdb.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["rethinkdb", "database", "db"], "most_popular": false}, "overview": "# RethinkDB\n\nPlugin: python.d.plugin\nModule: rethinkdbs\n\n## Overview\n\nThis collector monitors metrics about RethinkDB clusters and database servers.\n\nIt uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to 127.0.0.1:28015.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe collector requires the `rethinkdb` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/rethinkdbs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/rethinkdbs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | Hostname or ip of the RethinkDB server. | localhost | no |\n| port | Port to connect to the RethinkDB server. | 28015 | no |\n| user | The username to use to connect to the RethinkDB server. | admin | no |\n| password | The password to use to connect to the RethinkDB server. | | no |\n| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |\n\n#### Examples\n\n##### Local RethinkDB server\n\nAn example of a configuration for a local RethinkDB server\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 28015\n user: \"user\"\n password: \"pass\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin rethinkdbs debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_connected_servers | connected, missing | servers |\n| rethinkdb.cluster_clients_active | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | reads, writes | documents/s |\n\n### Per database server\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.client_connections | connections | connections |\n| rethinkdb.clients_active | active | clients |\n| rethinkdb.queries | queries | queries/s |\n| rethinkdb.documents | reads, writes | documents/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-rethinkdbs-RethinkDB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "retroshare", "monitored_instance": {"name": "RetroShare", "link": "https://retroshare.cc/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "retroshare.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["retroshare", "p2p"], "most_popular": false}, "overview": "# RetroShare\n\nPlugin: python.d.plugin\nModule: retroshare\n\n## Overview\n\nThis collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.\n\nIt connects to the RetroShare web interface to gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### RetroShare web interface\n\nRetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/retroshare.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/retroshare.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |\n\n#### Examples\n\n##### Local RetroShare Web UI\n\nA basic configuration for a RetroShare server running on localhost.\n\n```yaml\nlocalhost:\n name: 'local retroshare'\n url: 'http://localhost:9090'\n\n```\n##### Remote RetroShare Web UI\n\nA basic configuration for a remote RetroShare server.\n\n```yaml\nremote:\n name: 'remote retroshare'\n url: 'http://1.2.3.4:9090'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin retroshare debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RetroShare instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| retroshare.bandwidth | Upload, Download | kilobits/s |\n| retroshare.peers | All friends, Connected friends | peers |\n| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |\n\n", "integration_type": "collector", "id": "python.d.plugin-retroshare-RetroShare", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/retroshare/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "riakkv", "monitored_instance": {"name": "RiakKV", "link": "https://riak.com/products/riak-kv/index.html", "categories": ["data-collection.database-servers"], "icon_filename": "riak.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "nosql", "big data"], "most_popular": false}, "overview": "# RiakKV\n\nPlugin: python.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.'\n\n\nThis collector reads the database stats from the `/stats` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure RiakKV to enable /stats endpoint\n\nYou can follow the RiakKV configuration reference documentation for how to enable this.\n\nSource : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/riakkv.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The url of the server | no | yes |\n\n#### Examples\n\n##### Basic (default)\n\nA basic example configuration per job\n\n```yaml\nlocal:\nurl: 'http://localhost:8098/stats'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal:\n url: 'http://localhost:8098/stats'\n\nremote:\n url: 'http://192.0.2.1:8098/stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin riakkv debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |\n| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |\n| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |\n| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RiakKV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | errors | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n| riak.search.index | bad_entry, extract_fail | writes |\n\n", "integration_type": "collector", "id": "python.d.plugin-riakkv-RiakKV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/riakkv/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "samba", "monitored_instance": {"name": "Samba", "link": "https://www.samba.org/samba/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "samba.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["samba", "file sharing"], "most_popular": false}, "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-samba-Samba", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (lm-sensors)", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "temperature", "voltage", "current", "power", "fan", "energy", "humidity"], "most_popular": false}, "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: python.d.plugin\nModule: sensors\n\n## Overview\n\nExamine Linux Sensors metrics with Netdata for insights into hardware health and performance.\n\nEnhance your system's reliability with real-time hardware health insights.\n\n\nReads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n- temperature - fan - voltage - current - power - energy - humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/sensors.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\ntypes:\n - temperature\n - fan\n - voltage\n - current\n - power\n - energy\n - humidity\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin sensors debug trace\n ```\n\n### lm-sensors doesn't work on your device\n\n\n\n### ACPI ring buffer errors are printed\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per chip\n\nMetrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temperature | a dimension per sensor | Celsius |\n| sensors.voltage | a dimension per sensor | Volts |\n| sensors.current | a dimension per sensor | Ampere |\n| sensors.power | a dimension per sensor | Watt |\n| sensors.fan | a dimension per sensor | Rotations/min |\n| sensors.energy | a dimension per sensor | Joule |\n| sensors.humidity | a dimension per sensor | Percent |\n\n", "integration_type": "collector", "id": "python.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "smartd_log", "monitored_instance": {"name": "S.M.A.R.T.", "link": "https://linux.die.net/man/8/smartd", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "smart.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["smart", "S.M.A.R.T.", "SCSI devices", "ATA devices"], "most_popular": false}, "overview": "# S.M.A.R.T.\n\nPlugin: python.d.plugin\nModule: smartd_log\n\n## Overview\n\nThis collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.\n\n\nIt reads `smartd` log files to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nUpon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure `smartd` to write attribute information to files.\n\n`smartd` must be running with `-A` option to write `smartd` attribute information to files.\n\nFor this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:\n\n```\n# dump smartd attrs info every 600 seconds\nsmartd_opts=\"-A /var/log/smartd/ -i 600\"\n```\n\nYou may need to create the smartd directory before smartd will write to it: \n\n```sh\nmkdir -p /var/log/smartd\n```\n\nOtherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command.\n\n`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/smartd_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/smartd_log.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to smartd log files. | /var/log/smartd | yes |\n| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |\n| age | Time in minutes since the last dump to file. | 30 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\ncustom:\n name: smartd_log\n log_path: '/var/log/smartd/'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin smartd_log debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics listed below are split in terms of availability on device type, SCSI or ATA.\n\n### Per S.M.A.R.T. instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | SCSI | ATA |\n|:------|:----------|:----|:---:|:---:|\n| smartd_log.read_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.seek_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.soft_read_error_rate | a dimension per device | errors | | \u2022 |\n| smartd_log.write_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.read_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.read_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.sata_interface_downshift | a dimension per device | events | | \u2022 |\n| smartd_log.udma_crc_error_count | a dimension per device | errors | | \u2022 |\n| smartd_log.throughput_performance | a dimension per device | value | | \u2022 |\n| smartd_log.seek_time_performance | a dimension per device | value | | \u2022 |\n| smartd_log.start_stop_count | a dimension per device | events | | \u2022 |\n| smartd_log.power_on_hours_count | a dimension per device | hours | | \u2022 |\n| smartd_log.power_cycle_count | a dimension per device | events | | \u2022 |\n| smartd_log.unexpected_power_loss | a dimension per device | events | | \u2022 |\n| smartd_log.spin_up_time | a dimension per device | ms | | \u2022 |\n| smartd_log.spin_up_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.calibration_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | \u2022 |\n| smartd_log.temperature_celsius | a dimension per device | celsius | \u2022 | \u2022 |\n| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.reserved_block_count | a dimension per device | percentage | | \u2022 |\n| smartd_log.program_fail_count | a dimension per device | errors | | \u2022 |\n| smartd_log.erase_fail_count | a dimension per device | failures | | \u2022 |\n| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | \u2022 |\n| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | \u2022 |\n| smartd_log.reallocation_event_count | a dimension per device | events | | \u2022 |\n| smartd_log.current_pending_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.percent_lifetime_used | a dimension per device | percentage | | \u2022 |\n| smartd_log.media_wearout_indicator | a dimension per device | percentage | | \u2022 |\n| smartd_log.nand_writes_1gib | a dimension per device | GiB | | \u2022 |\n\n", "integration_type": "collector", "id": "python.d.plugin-smartd_log-S.M.A.R.T.", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/smartd_log/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "spigotmc", "monitored_instance": {"name": "SpigotMC", "link": "", "categories": ["data-collection.gaming"], "icon_filename": "spigot.jfif"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["minecraft server", "spigotmc server", "spigot"], "most_popular": false}, "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-spigotmc-SpigotMC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "squid", "monitored_instance": {"name": "Squid", "link": "http://www.squid-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "squid.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["squid", "web delivery", "squid caching proxy"], "most_popular": false}, "overview": "# Squid\n\nPlugin: python.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the endpoint where Squid exposes its `counters` data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Squid's Cache Manager\n\nTake a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/squid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| host | The host to connect to. | | yes |\n| port | The port to connect to. | | yes |\n| request | The URL to request from Squid. | | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nexample_job_name:\n name: 'local'\n host: 'localhost'\n port: 3128\n request: 'cache_object://localhost:3128/counters'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal_job:\n name: 'local'\n host: '127.0.0.1'\n port: 3128\n request: 'cache_object://127.0.0.1:3128/counters'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 3128\n request: 'cache_object://192.0.2.1:3128/counters'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin squid debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-squid-Squid", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/squid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tomcat", "monitored_instance": {"name": "Tomcat", "link": "https://tomcat.apache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "tomcat.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["apache", "tomcat", "webserver", "websocket", "jakarta", "javaEE"], "most_popular": false}, "overview": "# Tomcat\n\nPlugin: python.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the http endpoint of the `/manager/status` in XML format\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nYou need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.\n\n#### Limits\n\nThis module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only `netdata` user, to monitor the `/status` endpoint.\n\nThis is necessary for configuring the collector.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tomcat.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |\n| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |\n| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |\n| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:8080/manager/status?XML=true'\n\n```\n##### Using an IPv4 endpoint\n\nA typical configuration using an IPv4 endpoint\n\n```yaml\nlocal_ipv4:\n name : 'local'\n url : 'http://127.0.0.1:8080/manager/status?XML=true'\n\n```\n##### Using an IPv6 endpoint\n\nA typical configuration using an IPv6 endpoint\n\n```yaml\nlocal_ipv6:\n name : 'local'\n url : 'http://[::1]:8080/manager/status?XML=true'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tomcat debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.accesses | accesses, errors | requests/s |\n| tomcat.bandwidth | sent, received | KiB/s |\n| tomcat.processing_time | processing time | seconds |\n| tomcat.threads | current, busy | current threads |\n| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |\n| tomcat.jvm_eden | used, committed, max | MiB |\n| tomcat.jvm_survivor | used, committed, max | MiB |\n| tomcat.jvm_tenured | used, committed, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-tomcat-Tomcat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tomcat/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tor", "monitored_instance": {"name": "Tor", "link": "https://www.torproject.org/", "categories": ["data-collection.vpns"], "icon_filename": "tor.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["tor", "traffic", "vpn"], "most_popular": false}, "overview": "# Tor\n\nPlugin: python.d.plugin\nModule: tor\n\n## Overview\n\nThis collector monitors Tor bandwidth traffic .\n\nIt connects to the Tor control port to collect traffic statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe `stem` python library needs to be installed.\n\n\n#### Required Tor configuration\n\nAdd to /etc/tor/torrc:\n\nControlPort 9051\n\nFor more options please read the manual.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| control_addr | Tor control IP address | 127.0.0.1 | no |\n| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |\n| password | Tor control password | | no |\n\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`\n\n```yaml\nlocal_tcp:\n name: 'local'\n control_port: 9051\n password: # if required\n\n```\n##### Local socket\n\nA basic local socket configuration\n\n```yaml\nlocal_socket:\n name: 'local'\n control_port: '/var/run/tor/control'\n password: # if required\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-tor-Tor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "uwsgi", "monitored_instance": {"name": "uWSGI", "link": "https://github.com/unbit/uwsgi/tree/2.0.21", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "uwsgi.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application server", "python", "web applications"], "most_popular": false}, "overview": "# uWSGI\n\nPlugin: python.d.plugin\nModule: uwsgi\n\n## Overview\n\nThis collector monitors uWSGI metrics about requests, workers, memory and more.\n\nIt collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats server\n\nMake sure that you uWSGI exposes it's metrics via a Stats server.\n\nSource: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/uwsgi.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| socket | The 'path/to/uwsgistats.sock' | no | no |\n| host | The host to connect to | no | no |\n| port | The port to connect to | no | no |\n\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.\n\n```yaml\nsocket:\n name : 'local'\n socket : '/tmp/stats.socket'\n\nlocalhost:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nlocalipv4:\n name : 'local'\n host : '127.0.0.1'\n port : 1717\n\nlocalipv6:\n name : 'local'\n host : '::1'\n port : 1717\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nremote:\n name : 'remote'\n host : '192.0.2.1'\n port : 1717\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin uwsgi debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.requests | a dimension per worker | requests/s |\n| uwsgi.tx | a dimension per worker | KiB/s |\n| uwsgi.avg_rt | a dimension per worker | milliseconds |\n| uwsgi.memory_rss | a dimension per worker | MiB |\n| uwsgi.memory_vsz | a dimension per worker | MiB |\n| uwsgi.exceptions | exceptions | exceptions |\n| uwsgi.harakiris | harakiris | harakiris |\n| uwsgi.respawns | respawns | respawns |\n\n", "integration_type": "collector", "id": "python.d.plugin-uwsgi-uWSGI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "varnish", "monitored_instance": {"name": "Varnish", "link": "https://varnish-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "varnish.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["varnish", "varnishstat", "varnishd", "cache", "web server", "web cache"], "most_popular": false}, "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-varnish-Varnish", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "w1sensor", "monitored_instance": {"name": "1-Wire Sensors", "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "1-wire.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "sensor", "1-wire"], "most_popular": false}, "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-w1sensor-1-Wire_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "zscores", "monitored_instance": {"name": "python.d zscores", "link": "https://en.wikipedia.org/wiki/Standard_score", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zscore", "z-score", "standard score", "standard deviation", "anomaly detection", "statistical anomaly detection"], "most_popular": false}, "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-zscores-python.d_zscores", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "slabinfo.plugin", "module_name": "slabinfo.plugin", "monitored_instance": {"name": "Linux kernel SLAB allocator statistics", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux kernel", "slab", "slub", "slob", "slabinfo"], "most_popular": false}, "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "tc.plugin", "module_name": "tc.plugin", "monitored_instance": {"name": "tc QoS classes", "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", "id": "tc.plugin-tc.plugin-tc_QoS_classes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "timex.plugin", "module_name": "timex.plugin", "monitored_instance": {"name": "Timex", "link": "", "categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", "id": "timex.plugin-timex.plugin-Timex", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "xenstat.plugin", "module_name": "xenstat.plugin", "monitored_instance": {"name": "Xen XCP-ng", "link": "https://xenproject.org/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "xen.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", "integration_type": "collector", "id": "xenstat.plugin-xenstat.plugin-Xen_XCP-ng", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml", "related_resources": ""}, {"id": "deploy-alpinelinux", "meta": {"name": "Alpine Linux", "link": "https://www.alpinelinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "alpine.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-amazonlinux", "meta": {"name": "Amazon Linux", "link": "https://aws.amazon.com/amazon-linux-2/", "categories": ["deploy.operating-systems"], "icon_filename": "amazonlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-archlinux", "meta": {"name": "Arch Linux", "link": "https://archlinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "archlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos", "meta": {"name": "CentOS", "link": "https://www.centos.org/", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos-stream", "meta": {"name": "CentOS Stream", "link": "https://www.centos.org/centos-stream", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n| 8 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-debian", "meta": {"name": "Debian", "link": "https://www.debian.org/", "categories": ["deploy.operating-systems"], "icon_filename": "debian.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n| 10 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-docker", "meta": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["deploy.docker-kubernetes"], "icon_filename": "docker.svg"}, "most_popular": true, "keywords": ["docker", "container", "containers"], "install_description": "Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n", "methods": [{"method": "Docker CLI", "commands": [{"channel": "nightly", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined netdata/netdata:edge\n"}, {"channel": "stable", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined netdata/netdata:stable\n"}]}, {"method": "Docker Compose", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}, {"method": "Docker Swarm", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}], "additional_info": "", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-fedora", "meta": {"name": "Fedora", "link": "https://www.fedoraproject.org/", "categories": ["deploy.operating-systems"], "icon_filename": "fedora.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 39 | Core | x86_64, aarch64 | |\n| 38 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-freebsd", "meta": {"name": "FreeBSD", "link": "https://www.freebsd.org/", "categories": ["deploy.operating-systems"], "icon_filename": "freebsd.svg"}, "most_popular": true, "keywords": ["freebsd"], "install_description": "## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "fetch", "commands": [{"channel": "nightly", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-kubernetes", "meta": {"name": "Kubernetes (Helm)", "link": "", "categories": ["deploy.docker-kubernetes"], "icon_filename": "kubernetes.svg"}, "keywords": ["kubernetes", "container", "Orchestrator"], "install_description": "**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n", "methods": [{"method": "Helm", "commands": [{"channel": "nightly", "command": "helm install netdata netdata/netdata \\\n--set image.tag=edge\n"}, {"channel": "stable", "command": "helm install netdata netdata/netdata \\\n--set image.tag=stable\n"}]}, {"method": "Existing Cluster", "commands": [{"channel": "nightly", "command": "image:\n tag: edge\n\nrestarter:\n enabled: true\n\n"}, {"channel": "stable", "command": "image:\n tag: stable\n\nrestarter:\n enabled: true\n\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-linux-generic", "meta": {"name": "Linux", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "linux.svg"}, "keywords": ["linux"], "most_popular": true, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-macos", "meta": {"name": "macOS", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "macos.svg"}, "most_popular": true, "keywords": ["macOS", "mac", "apple"], "install_description": "Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-manjarolinux", "meta": {"name": "Manjaro Linux", "link": "https://manjaro.org/", "categories": ["deploy.operating-systems"], "icon_filename": "manjaro.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-opensuse", "meta": {"name": "SUSE Linux", "link": "https://www.suse.com/", "categories": ["deploy.operating-systems"], "icon_filename": "openSUSE.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-oraclelinux", "meta": {"name": "Oracle Linux", "link": "https://www.oracle.com/linux/", "categories": ["deploy.operating-systems"], "icon_filename": "oraclelinux.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rhel", "meta": {"name": "Red Hat Enterprise Linux", "link": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux", "categories": ["deploy.operating-systems"], "icon_filename": "rhel.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rockylinux", "meta": {"name": "Rocky Linux", "link": "https://rockylinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "rocky.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-ubuntu", "meta": {"name": "Ubuntu", "link": "https://ubuntu.com/", "categories": ["deploy.operating-systems"], "icon_filename": "ubuntu.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 23.10 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-windows", "meta": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["deploy.operating-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows"], "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "export-appoptics", "meta": {"name": "AppOptics", "link": "https://www.solarwinds.com/appoptics", "categories": ["export"], "icon_filename": "solarwinds.svg", "keywords": ["app optics", "AppOptics", "Solarwinds"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-aws-kinesis", "meta": {"name": "AWS Kinesis", "link": "https://aws.amazon.com/kinesis/", "categories": ["export"], "icon_filename": "aws-kinesis.svg"}, "keywords": ["exporter", "AWS", "Kinesis"], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-data", "meta": {"name": "Azure Data Explorer", "link": "https://azure.microsoft.com/en-us/pricing/details/data-explorer/", "categories": ["export"], "icon_filename": "azuredataex.jpg", "keywords": ["Azure Data Explorer", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-event", "meta": {"name": "Azure Event Hub", "link": "https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about", "categories": ["export"], "icon_filename": "azureeventhub.png", "keywords": ["Azure Event Hub", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-bigquery", "meta": {"name": "Google BigQuery", "link": "https://cloud.google.com/bigquery/", "categories": ["export"], "icon_filename": "bigquery.png", "keywords": ["export", "Google BigQuery", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-blueflood", "meta": {"name": "Blueflood", "link": "http://blueflood.io/", "categories": ["export"], "icon_filename": "blueflood.png", "keywords": ["export", "Blueflood", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-chronix", "meta": {"name": "Chronix", "link": "https://dbdb.io/db/chronix", "categories": ["export"], "icon_filename": "chronix.png", "keywords": ["export", "chronix", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-cortex", "meta": {"name": "Cortex", "link": "https://cortexmetrics.io/", "categories": ["export"], "icon_filename": "cortex.png", "keywords": ["export", "cortex", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-crate", "meta": {"name": "CrateDB", "link": "https://crate.io/", "categories": ["export"], "icon_filename": "crate.svg", "keywords": ["export", "CrateDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-elastic", "meta": {"name": "ElasticSearch", "link": "https://www.elastic.co/", "categories": ["export"], "icon_filename": "elasticsearch.svg", "keywords": ["export", "ElasticSearch", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-gnocchi", "meta": {"name": "Gnocchi", "link": "https://wiki.openstack.org/wiki/Gnocchi", "categories": ["export"], "icon_filename": "gnocchi.svg", "keywords": ["export", "Gnocchi", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-google-pubsub", "meta": {"name": "Google Cloud Pub Sub", "link": "https://cloud.google.com/pubsub", "categories": ["export"], "icon_filename": "pubsub.png"}, "keywords": ["exporter", "Google Cloud", "Pub Sub"], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": ""}, {"id": "export-graphite", "meta": {"name": "Graphite", "link": "https://graphite.readthedocs.io/en/latest/", "categories": ["export"], "icon_filename": "graphite.png"}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-influxdb", "meta": {"name": "InfluxDB", "link": "https://www.influxdata.com/", "categories": ["export"], "icon_filename": "influxdb.svg", "keywords": ["InfluxDB", "Influx", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-irondb", "meta": {"name": "IRONdb", "link": "https://docs.circonus.com/irondb/", "categories": ["export"], "icon_filename": "irondb.png", "keywords": ["export", "IRONdb", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-json", "meta": {"name": "JSON", "link": "https://learn.netdata.cloud/docs/exporting/json-document-databases", "categories": ["export"], "icon_filename": "json.svg"}, "keywords": ["exporter", "json"], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": ""}, {"id": "export-kafka", "meta": {"name": "Kafka", "link": "https://kafka.apache.org/", "categories": ["export"], "icon_filename": "kafka.svg", "keywords": ["export", "Kafka", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-kairosdb", "meta": {"name": "KairosDB", "link": "https://kairosdb.github.io/", "categories": ["export"], "icon_filename": "kairos.png", "keywords": ["KairosDB", "kairos", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-m3db", "meta": {"name": "M3DB", "link": "https://m3db.io/", "categories": ["export"], "icon_filename": "m3db.png", "keywords": ["export", "M3DB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-metricfire", "meta": {"name": "MetricFire", "link": "https://www.metricfire.com/", "categories": ["export"], "icon_filename": "metricfire.png", "keywords": ["export", "MetricFire", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-mongodb", "meta": {"name": "MongoDB", "link": "https://www.mongodb.com/", "categories": ["export"], "icon_filename": "mongodb.svg"}, "keywords": ["exporter", "MongoDB"], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": ""}, {"id": "export-newrelic", "meta": {"name": "New Relic", "link": "https://newrelic.com/", "categories": ["export"], "icon_filename": "newrelic.svg", "keywords": ["export", "NewRelic", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-opentsdb", "meta": {"name": "OpenTSDB", "link": "https://github.com/OpenTSDB/opentsdb", "categories": ["export"], "icon_filename": "opentsdb.png"}, "keywords": ["exporter", "OpenTSDB", "scalable time series"], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": ""}, {"id": "export-pgsql", "meta": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["export"], "icon_filename": "postgres.svg", "keywords": ["export", "PostgreSQL", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-prometheus-remote", "meta": {"name": "Prometheus Remote Write", "link": "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage", "categories": ["export"], "icon_filename": "prometheus.svg"}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-quasar", "meta": {"name": "QuasarDB", "link": "https://doc.quasar.ai/master/", "categories": ["export"], "icon_filename": "quasar.jpeg", "keywords": ["export", "quasar", "quasarDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-splunk", "meta": {"name": "Splunk SignalFx", "link": "https://www.splunk.com/en_us/products/observability.html", "categories": ["export"], "icon_filename": "splunk.svg", "keywords": ["export", "splunk", "signalfx", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-thanos", "meta": {"name": "Thanos", "link": "https://thanos.io/", "categories": ["export"], "icon_filename": "thanos.png", "keywords": ["export", "thanos", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-tikv", "meta": {"name": "TiKV", "link": "https://tikv.org/", "categories": ["export"], "icon_filename": "tikv.png", "keywords": ["export", "TiKV", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-timescaledb", "meta": {"name": "TimescaleDB", "link": "https://www.timescale.com/", "categories": ["export"], "icon_filename": "timescale.png", "keywords": ["export", "TimescaleDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-victoria", "meta": {"name": "VictoriaMetrics", "link": "https://victoriametrics.com/products/open-source/", "categories": ["export"], "icon_filename": "victoriametrics.png", "keywords": ["export", "victoriametrics", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-vmware", "meta": {"name": "VMware Aria", "link": "https://www.vmware.com/products/aria-operations-for-applications.html", "categories": ["export"], "icon_filename": "aria.png", "keywords": ["export", "VMware", "Aria", "Tanzu", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-wavefront", "meta": {"name": "Wavefront", "link": "https://docs.wavefront.com/wavefront_data_ingestion.html", "categories": ["export"], "icon_filename": "wavefront.png", "keywords": ["export", "Wavefront", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "notify-alerta", "meta": {"name": "Alerta", "link": "https://alerta.io/", "categories": ["notify.agent"], "icon_filename": "alerta.png"}, "keywords": ["Alerta"], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"}, {"id": "notify-awssns", "meta": {"name": "AWS SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.agent"], "icon_filename": "aws.svg"}, "keywords": ["AWS SNS"], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"}, {"id": "notify-cloud-awssns", "meta": {"name": "Amazon SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.cloud"], "icon_filename": "awssns.png"}, "keywords": ["awssns"], "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.cloud"], "icon_filename": "discord.png"}, "keywords": ["discord", "community"], "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mattermost", "meta": {"name": "Mattermost", "link": "https://mattermost.com/", "categories": ["notify.cloud"], "icon_filename": "mattermost.png"}, "keywords": ["mattermost"], "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-microsoftteams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams", "categories": ["notify.cloud"], "icon_filename": "teams.svg"}, "keywords": ["microsoft", "teams"], "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **administrator**.\n- The Space to be on **Business** plan or higher.\n- A [Microsoft 365 for Business Account](https://www.microsoft.com/en-us/microsoft-365/business). Note that this is a **paid** account.\n\n### Settings on Microsoft Teams\n\n- The integration gets enabled at a team's channel level.\n- Click on the `...` (aka three dots) icon showing up next to the channel name, it should appear when you hover over it.\n- Click on `Connectors`.\n- Look for the `Incoming Webhook` connector and click configure.\n- Provide a name for your Incoming Webhook Connector, for example _Netdata Alerts_. You can also customize it with a proper icon instead of using the default image.\n- Click `Create`.\n- The _Incoming Webhook URL_ is created.\n- That is the URL to be provided to the Netdata Cloud configuration.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mobile-app", "meta": {"name": "Netdata Mobile App", "link": "https://netdata.cloud", "categories": ["notify.cloud"], "icon_filename": "netdata.png"}, "keywords": ["mobile-app", "phone", "personal-notifications"], "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Business Subscription**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-opsgenie", "meta": {"name": "Opsgenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.cloud"], "icon_filename": "opsgenie.png"}, "keywords": ["opsgenie", "atlassian"], "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.cloud"], "icon_filename": "pagerduty.png"}, "keywords": ["pagerduty"], "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-rocketchat", "meta": {"name": "RocketChat", "link": "https://www.rocket.chat/", "categories": ["notify.cloud"], "icon_filename": "rocketchat.png"}, "keywords": ["rocketchat"], "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.cloud"], "icon_filename": "slack.png"}, "keywords": ["slack"], "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-splunk", "meta": {"name": "Splunk", "link": "https://splunk.com/", "categories": ["notify.cloud"], "icon_filename": "splunk-black.svg"}, "keywords": ["Splunk"], "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.cloud"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- The Telegram bot token and chat ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n\n### Getting the Telegram bot token and chat ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-webhook", "meta": {"name": "Webhook", "link": "https://en.wikipedia.org/wiki/Webhook", "categories": ["notify.cloud"], "icon_filename": "webhook.svg"}, "keywords": ["generic webhooks", "webhooks"], "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Pro** plan or higher\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected.\n\n The notification content sent to the destination service will be a JSON object having these properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | rooms | object[object(string,string)] | Object with list of rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-custom", "meta": {"name": "Custom", "link": "", "categories": ["notify.agent"], "icon_filename": "custom.png"}, "keywords": ["custom"], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"}, {"id": "notify-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.agent"], "icon_filename": "discord.png"}, "keywords": ["Discord"], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"}, {"id": "notify-dynatrace", "meta": {"name": "Dynatrace", "link": "https://dynatrace.com", "categories": ["notify.agent"], "icon_filename": "dynatrace.svg"}, "keywords": ["Dynatrace"], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"}, {"id": "notify-email", "meta": {"name": "Email", "link": "", "categories": ["notify.agent"], "icon_filename": "email.png"}, "keywords": ["email"], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"}, {"id": "notify-flock", "meta": {"name": "Flock", "link": "https://support.flock.com/", "categories": ["notify.agent"], "icon_filename": "flock.png"}, "keywords": ["Flock"], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"}, {"id": "notify-gotify", "meta": {"name": "Gotify", "link": "https://gotify.net/", "categories": ["notify.agent"], "icon_filename": "gotify.png"}, "keywords": ["gotify"], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"}, {"id": "notify-irc", "meta": {"name": "IRC", "link": "", "categories": ["notify.agent"], "icon_filename": "irc.png"}, "keywords": ["IRC"], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"}, {"id": "notify-kavenegar", "meta": {"name": "Kavenegar", "link": "https://kavenegar.com/", "categories": ["notify.agent"], "icon_filename": "kavenegar.png"}, "keywords": ["Kavenegar"], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"}, {"id": "notify-matrix", "meta": {"name": "Matrix", "link": "https://spec.matrix.org/unstable/push-gateway-api/", "categories": ["notify.agent"], "icon_filename": "matrix.svg"}, "keywords": ["Matrix"], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe room ids are unique identifiers and can be obtained from the room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"}, {"id": "notify-messagebird", "meta": {"name": "MessageBird", "link": "https://messagebird.com/", "categories": ["notify.agent"], "icon_filename": "messagebird.svg"}, "keywords": ["MessageBird"], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"}, {"id": "notify-ntfy", "meta": {"name": "ntfy", "link": "https://ntfy.sh/", "categories": ["notify.agent"], "icon_filename": "ntfy.svg"}, "keywords": ["ntfy"], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"}, {"id": "notify-opsgenie", "meta": {"name": "OpsGenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.agent"], "icon_filename": "opsgenie.png"}, "keywords": ["OpsGenie"], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"}, {"id": "notify-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.agent"], "icon_filename": "pagerduty.png"}, "keywords": ["PagerDuty"], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"}, {"id": "notify-prowl", "meta": {"name": "Prowl", "link": "https://www.prowlapp.com/", "categories": ["notify.agent"], "icon_filename": "prowl.png"}, "keywords": ["Prowl"], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"}, {"id": "notify-pushbullet", "meta": {"name": "Pushbullet", "link": "https://www.pushbullet.com/", "categories": ["notify.agent"], "icon_filename": "pushbullet.png"}, "keywords": ["Pushbullet"], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"}, {"id": "notify-pushover", "meta": {"name": "PushOver", "link": "https://pushover.net/", "categories": ["notify.agent"], "icon_filename": "pushover.png"}, "keywords": ["PushOver"], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"}, {"id": "notify-rocketchat", "meta": {"name": "RocketChat", "link": "https://rocket.chat/", "categories": ["notify.agent"], "icon_filename": "rocketchat.png"}, "keywords": ["RocketChat"], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"}, {"id": "notify-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.agent"], "icon_filename": "slack.png"}, "keywords": ["Slack"], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"}, {"id": "notify-sms", "meta": {"name": "SMS", "link": "http://smstools3.kekekasvi.com/", "categories": ["notify.agent"], "icon_filename": "sms.svg"}, "keywords": ["SMS tools 3", "SMS", "Messaging"], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"}, {"id": "notify-syslog", "meta": {"name": "syslog", "link": "", "categories": ["notify.agent"], "icon_filename": "syslog.png"}, "keywords": ["syslog"], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"}, {"id": "notify-teams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams/log-in", "categories": ["notify.agent"], "icon_filename": "msteams.svg"}, "keywords": ["Microsoft", "Teams", "MS teams"], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"}, {"id": "notify-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.agent"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set `DEFAULT_RECIPIENT_TELEGRAM` to the chat ID you want the alert notifications to be sent to. You can define multiple chat IDs like this: -49999333322 -1009999222255. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"}, {"id": "notify-twilio", "meta": {"name": "Twilio", "link": "https://www.twilio.com/", "categories": ["notify.agent"], "icon_filename": "twilio.png"}, "keywords": ["Twilio"], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"}]} \ No newline at end of file +{"categories": [{"id": "deploy", "name": "Deploy", "description": "", "most_popular": true, "priority": 1, "children": [{"id": "deploy.operating-systems", "name": "Operating Systems", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "deploy.docker-kubernetes", "name": "Docker & Kubernetes", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "deploy.provisioning-systems", "parent": "deploy", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection", "name": "Data Collection", "description": "", "most_popular": true, "priority": 2, "children": [{"id": "data-collection.other", "name": "Other", "description": "", "most_popular": false, "priority": -1, "collector_default": true, "children": []}, {"id": "data-collection.ebpf", "name": "eBPF", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd", "name": "FreeBSD", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.containers-and-vms", "name": "Containers and VMs", "description": "", "most_popular": true, "priority": 6, "children": []}, {"id": "data-collection.database-servers", "name": "Databases", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.kubernetes", "name": "Kubernetes", "description": "", "most_popular": true, "priority": 7, "children": []}, {"id": "data-collection.notifications", "name": "Incident Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.service-discovery-registry", "name": "Service Discovery / Registry", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.web-servers-and-web-proxies", "name": "Web Servers and Web Proxies", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.cloud-provider-managed", "name": "Cloud Provider Managed", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.windows-systems", "name": "Windows Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.apm", "name": "APM", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.hardware-devices-and-sensors", "name": "Hardware Devices and Sensors", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.macos-systems", "name": "macOS Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.message-brokers", "name": "Message Brokers", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.provisioning-systems", "name": "Provisioning Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.search-engines", "name": "Search Engines", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems", "name": "Linux Systems", "description": "", "most_popular": true, "priority": 5, "children": [{"id": "data-collection.linux-systems.system-metrics", "name": "System", "description": "", "most_popular": true, "priority": 1, "children": []}, {"id": "data-collection.linux-systems.memory-metrics", "name": "Memory", "description": "", "most_popular": true, "priority": 3, "children": []}, {"id": "data-collection.linux-systems.cpu-metrics", "name": "CPU", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "data-collection.linux-systems.pressure-metrics", "name": "Pressure", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.network-metrics", "name": "Network", "description": "", "most_popular": true, "priority": 5, "children": []}, {"id": "data-collection.linux-systems.ipc-metrics", "name": "IPC", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.disk-metrics", "name": "Disk", "description": "", "most_popular": true, "priority": 4, "children": []}, {"id": "data-collection.linux-systems.firewall-metrics", "name": "Firewall", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.power-supply-metrics", "name": "Power Supply", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics", "name": "Filesystem", "description": "", "most_popular": false, "priority": -1, "children": [{"id": "data-collection.linux-systems.filesystem-metrics.zfs", "name": "ZFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.btrfs", "name": "BTRFS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.linux-systems.filesystem-metrics.nfs", "name": "NFS", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.linux-systems.kernel-metrics", "name": "Kernel", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "data-collection.networking-stack-and-network-interfaces", "name": "Networking Stack and Network Interfaces", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.synthetic-checks", "name": "Synthetic Checks", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ci-cd-systems", "name": "CICD Platforms", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ups", "name": "UPS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.freebsd-systems", "name": "FreeBSD Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.logs-servers", "name": "Logs Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.security-systems", "name": "Security Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.observability", "name": "Observability", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.gaming", "name": "Gaming", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.iot-devices", "name": "IoT Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.media-streaming-servers", "name": "Media Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.authentication-and-authorization", "name": "Authentication and Authorization", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.project-management", "name": "Project Management", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.application-servers", "name": "Application Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.dns-and-dhcp-servers", "name": "DNS and DHCP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.mail-servers", "name": "Mail Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.processes-and-system-services", "name": "Processes and System Services", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.storage-mount-points-and-filesystems", "name": "Storage, Mount Points and Filesystems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.systemd", "name": "Systemd", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.telephony-servers", "name": "Telephony Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.vpns", "name": "VPNs", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.blockchain-servers", "name": "Blockchain Servers", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.distributed-computing-systems", "name": "Distributed Computing Systems", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.generic-data-collection", "name": "Generic Data Collection", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.p2p", "name": "P2P", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.snmp-and-networked-devices", "name": "SNMP and Networked Devices", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.system-clock-and-ntp", "name": "System Clock and NTP", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.nas", "name": "NAS", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.api-gateways", "name": "API Gateways", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.task-queues", "name": "Task Queues", "description": "", "most_popular": false, "priority": -1, "children": []}, {"id": "data-collection.ftp-servers", "name": "FTP Servers", "description": "", "most_popular": false, "priority": -1, "children": []}]}, {"id": "logs", "name": "Logs", "description": "Monitoring logs on your infrastructure", "most_popular": true, "priority": 3, "children": []}, {"id": "export", "name": "exporters", "description": "Exporter Integrations", "most_popular": true, "priority": 5, "children": []}, {"id": "notify", "name": "notifications", "description": "Notification Integrations", "most_popular": true, "priority": 4, "children": [{"id": "notify.agent", "name": "Agent Dispatched Notifications", "description": "", "most_popular": true, "priority": 2, "children": []}, {"id": "notify.cloud", "name": "Centralized Cloud Notifications", "description": "", "most_popular": true, "priority": 1, "children": []}]}], "integrations": [{"meta": {"plugin_name": "apps.plugin", "module_name": "apps", "monitored_instance": {"name": "Applications", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "applications.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["applications", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Applications\n\nPlugin: apps.plugin\nModule: apps\n\n## Overview\n\nMonitor Applications for optimal software performance and resource usage.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per applications group\n\nThese metrics refer to the application group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.cpu_utilization | user, system | percentage |\n| app.cpu_guest_utilization | guest | percentage |\n| app.cpu_context_switches | voluntary, involuntary | switches/s |\n| app.mem_usage | rss | MiB |\n| app.mem_private_usage | mem | MiB |\n| app.vmem_usage | vmem | MiB |\n| app.mem_page_faults | minor, major | pgfaults/s |\n| app.swap_usage | swap | MiB |\n| app.disk_physical_io | reads, writes | KiB/s |\n| app.disk_logical_io | reads, writes | KiB/s |\n| app.processes | processes | processes |\n| app.threads | threads | threads |\n| app.fds_open_limit | limit | percentage |\n| app.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| app.uptime | uptime | seconds |\n| app.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-apps-Applications", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "groups", "monitored_instance": {"name": "User Groups", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "user.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["groups", "processes", "user auditing", "authorization", "os", "host monitoring"], "most_popular": false}, "overview": "# User Groups\n\nPlugin: apps.plugin\nModule: groups\n\n## Overview\n\nThis integration monitors resource utilization on a user groups context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user group\n\nThese metrics refer to the user group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user_group | The name of the user group. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| usergroup.cpu_utilization | user, system | percentage |\n| usergroup.cpu_guest_utilization | guest | percentage |\n| usergroup.cpu_context_switches | voluntary, involuntary | switches/s |\n| usergroup.mem_usage | rss | MiB |\n| usergroup.mem_private_usage | mem | MiB |\n| usergroup.vmem_usage | vmem | MiB |\n| usergroup.mem_page_faults | minor, major | pgfaults/s |\n| usergroup.swap_usage | swap | MiB |\n| usergroup.disk_physical_io | reads, writes | KiB/s |\n| usergroup.disk_logical_io | reads, writes | KiB/s |\n| usergroup.processes | processes | processes |\n| usergroup.threads | threads | threads |\n| usergroup.fds_open_limit | limit | percentage |\n| usergroup.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| usergroup.uptime | uptime | seconds |\n| usergroup.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-groups-User_Groups", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "apps.plugin", "module_name": "users", "monitored_instance": {"name": "Users", "link": "", "categories": ["data-collection.processes-and-system-services"], "icon_filename": "users.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["users", "processes", "os", "host monitoring"], "most_popular": false}, "overview": "# Users\n\nPlugin: apps.plugin\nModule: users\n\n## Overview\n\nThis integration monitors resource utilization on a user context.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | The name of the user. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| user.cpu_utilization | user, system | percentage |\n| user.cpu_guest_utilization | guest | percentage |\n| user.cpu_context_switches | voluntary, involuntary | switches/s |\n| user.mem_usage | rss | MiB |\n| user.mem_private_usage | mem | MiB |\n| user.vmem_usage | vmem | MiB |\n| user.mem_page_faults | minor, major | pgfaults/s |\n| user.swap_usage | swap | MiB |\n| user.disk_physical_io | reads, writes | KiB/s |\n| user.disk_logical_io | reads, writes | KiB/s |\n| user.processes | processes | processes |\n| user.threads | threads | threads |\n| user.fds_open_limit | limit | percentage |\n| user.fds_open | files, sockets, pipes, inotifies, event, timer, signal, eventpolls, other | fds |\n| user.uptime | uptime | seconds |\n| user.uptime_summary | min, avg, max | seconds |\n\n", "integration_type": "collector", "id": "apps.plugin-users-Users", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Containers", "link": "", "categories": ["data-collection.containers-and-vms"], "icon_filename": "container.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Kubernetes Containers", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["k8s", "kubernetes", "pods", "containers"], "most_popular": true}, "overview": "# Kubernetes Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ k8s_cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ k8s_cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.mem_usage | cgroup memory utilization |\n| [ k8s_cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ k8s_cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | k8s.cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per k8s cgroup\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.cpu_limit | used | percentage |\n| k8s.cgroup.cpu | user, system | percentage |\n| k8s.cgroup.cpu_per_core | a dimension per core | percentage |\n| k8s.cgroup.throttled | throttled | percentage |\n| k8s.cgroup.throttled_duration | duration | ms |\n| k8s.cgroup.cpu_shares | shares | shares |\n| k8s.cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| k8s.cgroup.writeback | dirty, writeback | MiB |\n| k8s.cgroup.mem_activity | in, out | MiB/s |\n| k8s.cgroup.pgfaults | pgfault, swap | MiB/s |\n| k8s.cgroup.mem_usage | ram, swap | MiB |\n| k8s.cgroup.mem_usage_limit | available, used | MiB |\n| k8s.cgroup.mem_utilization | utilization | percentage |\n| k8s.cgroup.mem_failcnt | failures | count |\n| k8s.cgroup.io | read, write | KiB/s |\n| k8s.cgroup.serviced_ops | read, write | operations/s |\n| k8s.cgroup.throttle_io | read, write | KiB/s |\n| k8s.cgroup.throttle_serviced_ops | read, write | operations/s |\n| k8s.cgroup.queued_ops | read, write | operations |\n| k8s.cgroup.merged_ops | read, write | operations/s |\n| k8s.cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_some_pressure_stall_time | time | ms |\n| k8s.cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.cpu_full_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_some_pressure_stall_time | time | ms |\n| k8s.cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.memory_full_pressure_stall_time | time | ms |\n| k8s.cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_some_pressure_stall_time | time | ms |\n| k8s.cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| k8s.cgroup.io_full_pressure_stall_time | time | ms |\n| k8s.cgroup.pids_current | pids | pids |\n\n### Per k8s cgroup network device\n\nThese metrics refer to the Pod container network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n| k8s_node_name | Node name. The value of _pod.spec.nodeName_. |\n| k8s_namespace | Namespace name. The value of _pod.metadata.namespace_. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). The value of _pod.OwnerReferences.Controller.Kind_. |\n| k8s_controller_name | Controller name.The value of _pod.OwnerReferences.Controller.Name_. |\n| k8s_pod_name | Pod name. The value of _pod.metadata.name_. |\n| k8s_container_name | Container name. The value of _pod.spec.containers.name_. |\n| k8s_kind | Instance kind: \"pod\" or \"container\". |\n| k8s_qos_class | QoS class (guaranteed, burstable, besteffort). |\n| k8s_cluster_id | Cluster ID. The value of kube-system namespace _namespace.metadata.uid_. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s.cgroup.net_net | received, sent | kilobits/s |\n| k8s.cgroup.net_packets | received, sent, multicast | pps |\n| k8s.cgroup.net_errors | inbound, outbound | errors/s |\n| k8s.cgroup.net_drops | inbound, outbound | errors/s |\n| k8s.cgroup.net_fifo | receive, transmit | errors/s |\n| k8s.cgroup.net_compressed | receive, sent | pps |\n| k8s.cgroup.net_events | frames, collisions, carrier | events/s |\n| k8s.cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| k8s.cgroup.net_carrier | up, down | state |\n| k8s.cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Kubernetes_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "LXC Containers", "link": "", "icon_filename": "lxc.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["lxc", "lxd", "container"], "most_popular": true}, "overview": "# LXC Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor LXC Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-LXC_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Libvirt Containers", "link": "", "icon_filename": "libvirt.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["libvirt", "container"], "most_popular": true}, "overview": "# Libvirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Libvirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Libvirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Proxmox Containers", "link": "", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["proxmox", "container"], "most_popular": true}, "overview": "# Proxmox Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Proxmox for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Proxmox_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Systemd Services", "link": "", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"], "keywords": ["systemd", "services"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["containers"], "most_popular": true}, "overview": "# Systemd Services\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Containers for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd service\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service_name | Service name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service.cpu.utilization | user, system | percentage |\n| systemd.service.memory.usage | ram, swap | MiB |\n| systemd.service.memory.failcnt | fail | failures/s |\n| systemd.service.memory.ram.usage | rss, cache, mapped_file, rss_huge | MiB |\n| systemd.service.memory.writeback | writeback, dirty | MiB |\n| systemd.service.memory.paging.faults | minor, major | MiB/s |\n| systemd.service.memory.paging.io | in, out | MiB/s |\n| systemd.service.disk.io | read, write | KiB/s |\n| systemd.service.disk.iops | read, write | operations/s |\n| systemd.service.disk.throttle.io | read, write | KiB/s |\n| systemd.service.disk.throttle.iops | read, write | operations/s |\n| systemd.service.disk.queued_iops | read, write | operations/s |\n| systemd.service.disk.merged_iops | read, write | operations/s |\n| systemd.service.pids.current | pids | pids |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Systemd_Services", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "Virtual Machines", "link": "", "icon_filename": "container.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vms", "virtualization", "container"], "most_popular": true}, "overview": "# Virtual Machines\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor Virtual Machines for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-Virtual_Machines", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cgroups.plugin", "module_name": "/sys/fs/cgroup", "monitored_instance": {"name": "oVirt Containers", "link": "", "icon_filename": "ovirt.svg", "categories": ["data-collection.containers-and-vms"]}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ovirt", "container"], "most_popular": true}, "overview": "# oVirt Containers\n\nPlugin: cgroups.plugin\nModule: /sys/fs/cgroup\n\n## Overview\n\nMonitor oVirt for performance, resource usage, and health status.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cgroup_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.cpu_limit | average cgroup CPU utilization over the last 10 minutes |\n| [ cgroup_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.mem_usage | cgroup memory utilization |\n| [ cgroup_1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ cgroup_10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cgroups.conf) | cgroup.net_packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cpu_limit | used | percentage |\n| cgroup.cpu | user, system | percentage |\n| cgroup.cpu_per_core | a dimension per core | percentage |\n| cgroup.throttled | throttled | percentage |\n| cgroup.throttled_duration | duration | ms |\n| cgroup.cpu_shares | shares | shares |\n| cgroup.mem | cache, rss, swap, rss_huge, mapped_file | MiB |\n| cgroup.writeback | dirty, writeback | MiB |\n| cgroup.mem_activity | in, out | MiB/s |\n| cgroup.pgfaults | pgfault, swap | MiB/s |\n| cgroup.mem_usage | ram, swap | MiB |\n| cgroup.mem_usage_limit | available, used | MiB |\n| cgroup.mem_utilization | utilization | percentage |\n| cgroup.mem_failcnt | failures | count |\n| cgroup.io | read, write | KiB/s |\n| cgroup.serviced_ops | read, write | operations/s |\n| cgroup.throttle_io | read, write | KiB/s |\n| cgroup.throttle_serviced_ops | read, write | operations/s |\n| cgroup.queued_ops | read, write | operations |\n| cgroup.merged_ops | read, write | operations/s |\n| cgroup.cpu_some_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_some_pressure_stall_time | time | ms |\n| cgroup.cpu_full_pressure | some10, some60, some300 | percentage |\n| cgroup.cpu_full_pressure_stall_time | time | ms |\n| cgroup.memory_some_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_some_pressure_stall_time | time | ms |\n| cgroup.memory_full_pressure | some10, some60, some300 | percentage |\n| cgroup.memory_full_pressure_stall_time | time | ms |\n| cgroup.io_some_pressure | some10, some60, some300 | percentage |\n| cgroup.io_some_pressure_stall_time | time | ms |\n| cgroup.io_full_pressure | some10, some60, some300 | percentage |\n| cgroup.io_full_pressure_stall_time | time | ms |\n| cgroup.pids_current | pids | pids |\n\n### Per cgroup network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container name or group path if name resolution fails. |\n| image | Docker/Podman container image name. |\n| device | The name of the host network interface linked to the container's network interface. |\n| container_device | Container network interface name. |\n| interface_type | Network interface type. Always \"virtual\" for the containers. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_net | received, sent | kilobits/s |\n| cgroup.net_packets | received, sent, multicast | pps |\n| cgroup.net_errors | inbound, outbound | errors/s |\n| cgroup.net_drops | inbound, outbound | errors/s |\n| cgroup.net_fifo | receive, transmit | errors/s |\n| cgroup.net_compressed | receive, sent | pps |\n| cgroup.net_events | frames, collisions, carrier | events/s |\n| cgroup.net_operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| cgroup.net_carrier | up, down | state |\n| cgroup.net_mtu | mtu | octets |\n\n", "integration_type": "collector", "id": "cgroups.plugin-/sys/fs/cgroup-oVirt_Containers", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cgroups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "ap", "monitored_instance": {"name": "Access Points", "link": "https://learn.netdata.cloud/docs/data-collection/networking-stack-and-network-interfaces/linux-access-points", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ap", "access", "point", "wireless", "network"], "most_popular": false}, "overview": "# Access Points\n\nPlugin: charts.d.plugin\nModule: ap\n\n## Overview\n\nThe ap collector visualizes data related to wireless access points.\n\nIt uses the `iw` command line utility to detect access points. For each interface that is of `type AP`, it then runs `iw INTERFACE station dump` and collects statistics.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin is able to auto-detect if you are running access points on your linux box.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### `iw` utility.\n\nMake sure the `iw` utility is installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/ap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/ap.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the ap collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ap_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| ap_priority | Controls the order of charts at the netdata dashboard. | 6900 | no |\n| ap_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Change the collection frequency\n\nSpecify a custom collection frequence (update_every) for this collector\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\nap_update_every=10\n\n# the charts priority on the dashboard\n#ap_priority=6900\n\n# the number of retries to do in case of failure\n# before disabling the module\n#ap_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ap` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 ap\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ap.clients | clients | clients |\n| ap.net | received, sent | kilobits/s |\n| ap.packets | received, sent | packets/s |\n| ap.issues | retries, failures | issues/s |\n| ap.signal | average signal | dBm |\n| ap.bitrate | receive, transmit, expected | Mbps |\n\n", "integration_type": "collector", "id": "charts.d.plugin-ap-Access_Points", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/ap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "apcupsd", "monitored_instance": {"name": "APC UPS", "link": "https://www.apc.com", "categories": ["data-collection.ups"], "icon_filename": "apc.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ups", "apc", "power", "supply", "battery", "apcupsd"], "most_popular": false}, "overview": "# APC UPS\n\nPlugin: charts.d.plugin\nModule: apcupsd\n\n## Overview\n\nMonitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.\n\nThe collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nMake sure the `apcaccess` and `apcupsd` are installed and running.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/apcupsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/apcupsd.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the apcupsd collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |\n| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |\n| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |\n| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Multiple apcupsd sources\n\nSpecify a multiple apcupsd sources along with a custom update interval\n\n```yaml\n# add all your APC UPSes in this array - uncomment it too\ndeclare -A apcupsd_sources=(\n [\"local\"]=\"127.0.0.1:3551\",\n [\"remote\"]=\"1.2.3.4:3551\"\n)\n\n# how long to wait for apcupsd to respond\n#apcupsd_timeout=3\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\napcupsd_update_every=5\n\n# the charts priority on the dashboard\n#apcupsd_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#apcupsd_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 apcupsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |\n| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |\n| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |\n| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |\n| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |\n| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |\n| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |\n| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |\n| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |\n| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nMetrics related to UPS. Each UPS provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| apcupsd.charge | charge | percentage |\n| apcupsd.battery.voltage | voltage, nominal | Volts |\n| apcupsd.input.voltage | voltage, min, max | Volts |\n| apcupsd.output.voltage | absolute, nominal | Volts |\n| apcupsd.input.frequency | frequency | Hz |\n| apcupsd.load | load | percentage |\n| apcupsd.load_usage | load | Watts |\n| apcupsd.temperature | temp | Celsius |\n| apcupsd.time | time | Minutes |\n| apcupsd.online | online | boolean |\n| apcupsd.selftest | OK, NO, BT, NG | status |\n| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |\n\n", "integration_type": "collector", "id": "charts.d.plugin-apcupsd-APC_UPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "libreswan", "monitored_instance": {"name": "Libreswan", "link": "https://libreswan.org/", "categories": ["data-collection.vpns"], "icon_filename": "libreswan.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["vpn", "libreswan", "network", "ipsec"], "most_popular": false}, "overview": "# Libreswan\n\nPlugin: charts.d.plugin\nModule: libreswan\n\n## Overview\n\nMonitor Libreswan performance for optimal IPsec VPN operations. Improve your VPN operations with Netdata''s real-time metrics and built-in alerts.\n\nThe collector uses the `ipsec` command to collect the information it needs.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Permissions to execute `ipsec`\n\nThe plugin executes 2 commands to collect all the information it needs:\n\n```sh\nipsec whack --status\nipsec whack --trafficstatus\n```\n\nThe first command is used to extract the currently established tunnels, their IDs and their names.\nThe second command is used to extract the current uptime and traffic.\n\nMost probably user `netdata` will not be able to query libreswan, so the `ipsec` commands will be denied.\nThe plugin attempts to run `ipsec` as `sudo ipsec ...`, to get access to libreswan statistics.\n\nTo allow user `netdata` execute `sudo ipsec ...`, create the file `/etc/sudoers.d/netdata` with this content:\n\n```\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --status\nnetdata ALL = (root) NOPASSWD: /sbin/ipsec whack --trafficstatus\n```\n\nMake sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to find the right path).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/libreswan.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/libreswan.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the libreswan collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| libreswan_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| libreswan_priority | The charts priority on the dashboard | 90000 | no |\n| libreswan_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n| libreswan_sudo | Whether to run `ipsec` with `sudo` or not. | 1 | no |\n\n#### Examples\n\n##### Run `ipsec` without sudo\n\nRun the `ipsec` utility without sudo\n\n```yaml\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#libreswan_update_every=1\n\n# the charts priority on the dashboard\n#libreswan_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#libreswan_retries=10\n\n# set to 1, to run ipsec with sudo (the default)\n# set to 0, to run ipsec without sudo\nlibreswan_sudo=0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `libreswan` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 libreswan\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPSEC tunnel\n\nMetrics related to IPSEC tunnels. Each tunnel provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| libreswan.net | in, out | kilobits/s |\n| libreswan.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "charts.d.plugin-libreswan-Libreswan", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/libreswan/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "opensips", "monitored_instance": {"name": "OpenSIPS", "link": "https://opensips.org/", "categories": ["data-collection.telephony-servers"], "icon_filename": "opensips.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["opensips", "sip", "voice", "video", "stream"], "most_popular": false}, "overview": "# OpenSIPS\n\nPlugin: charts.d.plugin\nModule: opensips\n\n## Overview\n\nExamine OpenSIPS metrics for insights into SIP server operations. Study call rates, error rates, and response times for reliable voice over IP services.\n\nThe collector uses the `opensipsctl` command line utility to gather OpenSIPS metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to call `opensipsctl` along with a default number of parameters, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Required software\n\nThe collector requires the `opensipsctl` to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/opensips.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/opensips.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the opensips collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| opensips_opts | Specify parameters to the `opensipsctl` command. If the default value fails to get global status, set here whatever options are needed to connect to the opensips server. | fifo get_statistics all | no |\n| opensips_cmd | If `opensipsctl` is not in $PATH, specify it's full path here. | | no |\n| opensips_timeout | How long to wait for `opensipsctl` to respond. | 2 | no |\n| opensips_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 5 | no |\n| opensips_priority | The charts priority on the dashboard. | 80000 | no |\n| opensips_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Custom `opensipsctl` command\n\nSet a custom path to the `opensipsctl` command\n\n```yaml\n#opensips_opts=\"fifo get_statistics all\"\nopensips_cmd=/opt/opensips/bin/opensipsctl\n#opensips_timeout=2\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#opensips_update_every=5\n\n# the charts priority on the dashboard\n#opensips_priority=80000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#opensips_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `opensips` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 opensips\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenSIPS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| opensips.dialogs_active | active, early | dialogs |\n| opensips.users | registered, location, contacts, expires | users |\n| opensips.registrar | accepted, rejected | registrations/s |\n| opensips.transactions | UAS, UAC | transactions/s |\n| opensips.core_rcv | requests, replies | queries/s |\n| opensips.core_fwd | requests, replies | queries/s |\n| opensips.core_drop | requests, replies | queries/s |\n| opensips.core_err | requests, replies | queries/s |\n| opensips.core_bad | bad_URIs_rcvd, unsupported_methods, bad_msg_hdr | queries/s |\n| opensips.tm_replies | received, relayed, local | replies/s |\n| opensips.transactions_status | 2xx, 3xx, 4xx, 5xx, 6xx | transactions/s |\n| opensips.transactions_inuse | inuse | transactions |\n| opensips.sl_replies | 1xx, 2xx, 3xx, 4xx, 5xx, 6xx, sent, error, ACKed | replies/s |\n| opensips.dialogs | processed, expire, failed | dialogs/s |\n| opensips.net_waiting | UDP, TCP | kilobytes |\n| opensips.uri_checks | positive, negative | checks / sec |\n| opensips.traces | requests, replies | traces / sec |\n| opensips.shmem | total, used, real_used, max_used, free | kilobytes |\n| opensips.shmem_fragment | fragments | fragments |\n\n", "integration_type": "collector", "id": "charts.d.plugin-opensips-OpenSIPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/opensips/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "charts.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (sysfs)", "link": "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "sysfs", "hwmon", "rpi", "raspberry pi"], "most_popular": false}, "overview": "# Linux Sensors (sysfs)\n\nPlugin: charts.d.plugin\nModule: sensors\n\n## Overview\n\nUse this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).\nFor all other cases use the [Python collector](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values.\"\n\n\nIt will provide charts for all configured system sensors, by reading sensors directly from the kernel.\nThe values graphed are the raw hardware values of the sensors.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the collector will try to read entries under `/sys/devices`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install charts.d plugin\n\nIf [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.\n\n\n#### Enable the sensors collector\n\nThe `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config charts.d.conf\n```\n\nChange the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `charts.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config charts.d/sensors.conf\n```\n#### Options\n\nThe config file is sourced by the charts.d plugin. It's a standard bash file.\n\nThe following collapsed table contains all the options that can be configured for the sensors collector.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |\n| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |\n| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |\n| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |\n| sensors_priority | The charts priority on the dashboard. | 90000 | no |\n| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |\n\n#### Examples\n\n##### Set sensors path depth\n\nSet a different sensors path depth\n\n```yaml\n# the directory the kernel keeps sensor data\n#sensors_sys_dir=\"/sys/devices\"\n\n# how deep in the tree to check for sensor data\nsensors_sys_depth=5\n\n# if set to 1, the script will overwrite internal\n# script functions with code generated ones\n# leave to 1, is faster\n#sensors_source_update=1\n\n# the data collection frequency\n# if unset, will inherit the netdata update frequency\n#sensors_update_every=\n\n# the charts priority on the dashboard\n#sensors_priority=90000\n\n# the number of retries to do in case of failure\n# before disabling the module\n#sensors_retries=10\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `charts.d.plugin` to debug the collector:\n\n ```bash\n ./charts.d.plugin debug 1 sensors\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per sensor chip\n\nMetrics related to sensor chips. Each chip provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temp | {filename} | Celsius |\n| sensors.volt | {filename} | Volts |\n| sensors.curr | {filename} | Ampere |\n| sensors.power | {filename} | Watt |\n| sensors.fans | {filename} | Rotations / Minute |\n| sensors.energy | {filename} | Joule |\n| sensors.humidity | {filename} | Percent |\n\n", "integration_type": "collector", "id": "charts.d.plugin-sensors-Linux_Sensors_(sysfs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "cups.plugin", "module_name": "cups.plugin", "monitored_instance": {"name": "CUPS", "link": "https://www.cups.org/", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "cups.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# CUPS\n\nPlugin: cups.plugin\nModule: cups.plugin\n\n## Overview\n\nMonitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.\n\nThe plugin uses CUPS shared library to connect and monitor the server.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access the server. Netdata sets permissions during installation time to reach the server through its library.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin detects when CUPS server is running and tries to connect to it.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nThe CUPS server must be installed and running. If you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-cups`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:cups]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additional parameters for the collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CUPS instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.dests_state | idle, printing, stopped | dests |\n| cups.dests_option | total, acceptingjobs, shared | dests |\n| cups.job_num | pending, held, processing | jobs |\n| cups.job_size | pending, held, processing | KB |\n\n### Per destination\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cups.destination_job_num | pending, held, processing | jobs |\n| cups.destination_job_size | pending, held, processing | KB |\n\n", "integration_type": "collector", "id": "cups.plugin-cups.plugin-CUPS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/cups.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/extfrag", "monitored_instance": {"name": "System Memory Fragmentation", "link": "https://www.kernel.org/doc/html/next/admin-guide/sysctl/vm.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["extfrag", "extfrag_threshold", "memory fragmentation"], "most_popular": false}, "overview": "# System Memory Fragmentation\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/extfrag\n\n## Overview\n\nCollects memory fragmentation statistics from the Linux kernel\n\nParse data from `debugfs` file\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/extfrag`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically run by default.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the overall memory fragmentation of the system.\n\n### Per node\n\nMemory fragmentation statistics for each NUMA node in the system.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | The NUMA node the metrics are associated with. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.fragmentation_index_dma | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_dma32 | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n| mem.fragmentation_index_normal | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/extfrag-System_Memory_Fragmentation", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "/sys/kernel/debug/zswap", "monitored_instance": {"name": "Linux ZSwap", "link": "https://www.kernel.org/doc/html/latest/admin-guide/mm/zswap.html", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "zswap", "frontswap", "swap cache"], "most_popular": false}, "overview": "# Linux ZSwap\n\nPlugin: debugfs.plugin\nModule: /sys/kernel/debug/zswap\n\n## Overview\n\nCollects zswap performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/kernel/debug/zswap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the performance statistics of zswap.\n\n### Per Linux ZSwap instance\n\nGlobal zswap performance metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.zswap_pool_compression_ratio | compression_ratio | ratio |\n| system.zswap_pool_compressed_size | compressed_size | bytes |\n| system.zswap_pool_raw_size | uncompressed_size | bytes |\n| system.zswap_rejections | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s |\n| system.zswap_pool_limit_hit | limit | events/s |\n| system.zswap_written_back_raw_bytes | written_back | bytes/s |\n| system.zswap_same_filled_raw_size | same_filled | bytes |\n| system.zswap_duplicate_entry | duplicate | entries/s |\n\n", "integration_type": "collector", "id": "debugfs.plugin-/sys/kernel/debug/zswap-Linux_ZSwap", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "debugfs.plugin", "module_name": "intel_rapl", "monitored_instance": {"name": "Power Capping", "link": "https://www.kernel.org/doc/html/next/power/powercap/powercap.html", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["power capping", "energy"], "most_popular": false}, "overview": "# Power Capping\n\nPlugin: debugfs.plugin\nModule: intel_rapl\n\n## Overview\n\nCollects power capping performance metrics on Linux systems.\n\n\nParse data from `debugfs file.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to files under `/sys/devices/virtual/powercap`, which are accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming that debugfs is mounted and the required permissions are available, this integration will automatically detect whether or not the system is using zswap.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### filesystem\n\nThe debugfs filesystem must be mounted on your host for plugin to collect data. You can run the command-line (`sudo mount -t debugfs none /sys/kernel/debug/`) to mount it locally. It is also recommended to modify your fstab (5) avoiding necessity to mount the filesystem before starting netdata.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:debugfs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nMonitor the Intel RAPL zones Consumption.\n\n### Per Power Capping instance\n\nGlobal Intel RAPL zones.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.powercap_intel_rapl_zone | Power | Watts |\n| cpu.powercap_intel_rapl_subzones | dram, core, uncore | Watts |\n\n", "integration_type": "collector", "id": "debugfs.plugin-intel_rapl-Power_Capping", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/debugfs.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "diskspace.plugin", "module_name": "diskspace.plugin", "monitored_instance": {"name": "Disk space", "link": "", "categories": ["data-collection.linux-systems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "ebpf.plugin", "module_name": "disk"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "I/O", "space", "inode"], "most_popular": false}, "overview": "# Disk space\n\nPlugin: diskspace.plugin\nModule: diskspace.plugin\n\n## Overview\n\nMonitor Disk space metrics for proficient storage management. Keep track of usage, free space, and error rates to prevent disk space issues.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin reads data from `/proc/self/mountinfo` and `/proc/diskstats file`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:proc:diskspace]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can also specify per mount point `[plugin:proc:diskspace:mountpoint]`\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |\n| check for new mount points every | Parse proc files frequency. | 15 | no |\n| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |\n| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |\n| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |\n| space usage for all disks | Define if plugin will show metrics for space usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n| inodes usage for all disks | Define if plugin will show metrics for inode usage. When value is set to `auto` plugin will try to access information to display if filesystem or path was not discarded with previous option. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mount_point | Path used to mount a filesystem |\n| filesystem | The filesystem used to format a partition. |\n| mount_root | Root directory where mount points are present. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "diskspace.plugin-diskspace.plugin-Disk_space", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/diskspace.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "cachestat", "monitored_instance": {"name": "eBPF Cachestat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Page cache", "Hit ratio", "eBPF"], "most_popular": false}, "overview": "# eBPF Cachestat\n\nPlugin: ebpf.plugin\nModule: cachestat\n\n## Overview\n\nMonitor Linux page cache events giving for users a general vision about how his kernel is manipulating files.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/cachestat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/cachestat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Cachestat instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.cachestat_ratio | ratio | % |\n| mem.cachestat_dirties | dirty | page/s |\n| mem.cachestat_hits | hit | hits/s |\n| mem.cachestat_misses | miss | misses/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_cachestat_hit_ratio | ratio | % |\n| app.ebpf_cachestat_dirty_pages | pages | page/s |\n| app.ebpf_cachestat_access | hits | hits/s |\n| app.ebpf_cachestat_misses | misses | misses/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.cachestat_ratio | ratio | % |\n| cgroup.cachestat_dirties | dirty | page/s |\n| cgroup.cachestat_hits | hit | hits/s |\n| cgroup.cachestat_misses | miss | misses/s |\n| services.cachestat_ratio | a dimension per systemd service | % |\n| services.cachestat_dirties | a dimension per systemd service | page/s |\n| services.cachestat_hits | a dimension per systemd service | hits/s |\n| services.cachestat_misses | a dimension per systemd service | misses/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-cachestat-eBPF_Cachestat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "dcstat", "monitored_instance": {"name": "eBPF DCstat", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Directory Cache", "File system", "eBPF"], "most_popular": false}, "overview": "# eBPF DCstat\n\nPlugin: ebpf.plugin\nModule: dcstat\n\n## Overview\n\nMonitor directory cache events per application given an overall vision about files on memory or storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/dcstat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/dcstat.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_dc_ratio | ratio | % |\n| app.ebpf_dc_reference | files | files |\n| app.ebpf_dc_not_cache | files | files |\n| app.ebpf_dc_not_found | files | files |\n\n### Per filesystem\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.dc_reference | reference, slow, miss | files |\n| filesystem.dc_hit_ratio | ratio | % |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.dc_ratio | ratio | % |\n| cgroup.dc_reference | reference | files |\n| cgroup.dc_not_cache | slow | files |\n| cgroup.dc_not_found | miss | files |\n| services.dc_ratio | a dimension per systemd service | % |\n| services.dc_reference | a dimension per systemd service | files |\n| services.dc_not_cache | a dimension per systemd service | files |\n| services.dc_not_found | a dimension per systemd service | files |\n\n", "integration_type": "collector", "id": "ebpf.plugin-dcstat-eBPF_DCstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "disk", "monitored_instance": {"name": "eBPF Disk", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hard Disk", "eBPF", "latency", "partition"], "most_popular": false}, "overview": "# eBPF Disk\n\nPlugin: ebpf.plugin\nModule: disk\n\n## Overview\n\nMeasure latency for I/O events on disk.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/disk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/disk.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\nThese metrics measure latency for I/O events on every hard disk present on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.latency_io | latency | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-disk-eBPF_Disk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filedescriptor", "monitored_instance": {"name": "eBPF Filedescriptor", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["file", "eBPF", "fd", "open", "close"], "most_popular": false}, "overview": "# eBPF Filedescriptor\n\nPlugin: ebpf.plugin\nModule: filedescriptor\n\n## Overview\n\nMonitor calls for functions responsible to open or close a file descriptor and possible errors.\n\nAttach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nDepending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/fd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/fd.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.fd_open | open | calls/s |\n| cgroup.fd_open_error | open | calls/s |\n| cgroup.fd_closed | close | calls/s |\n| cgroup.fd_close_error | close | calls/s |\n| services.file_open | a dimension per systemd service | calls/s |\n| services.file_open_error | a dimension per systemd service | calls/s |\n| services.file_closed | a dimension per systemd service | calls/s |\n| services.file_close_error | a dimension per systemd service | calls/s |\n\n### Per eBPF Filedescriptor instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.file_descriptor | open, close | calls/s |\n| filesystem.file_error | open, close | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_file_open | calls | calls/s |\n| app.ebpf_file_open_error | calls | calls/s |\n| app.ebpf_file_closed | calls | calls/s |\n| app.ebpf_file_close_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filedescriptor-eBPF_Filedescriptor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "filesystem", "monitored_instance": {"name": "eBPF Filesystem", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Filesystem", "ext4", "btrfs", "nfs", "xfs", "zfs", "eBPF", "latency", "I/O"], "most_popular": false}, "overview": "# eBPF Filesystem\n\nPlugin: ebpf.plugin\nModule: filesystem\n\n## Overview\n\nMonitor latency for main actions on filesystem like I/O events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/filesystem.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/filesystem.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| btrfsdist | Enable or disable latency monitoring for functions associated with btrfs filesystem. | yes | no |\n| ext4dist | Enable or disable latency monitoring for functions associated with ext4 filesystem. | yes | no |\n| nfsdist | Enable or disable latency monitoring for functions associated with nfs filesystem. | yes | no |\n| xfsdist | Enable or disable latency monitoring for functions associated with xfs filesystem. | yes | no |\n| zfsdist | Enable or disable latency monitoring for functions associated with zfs filesystem. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per filesystem\n\nLatency charts associate with filesystem actions.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.read_latency | latency period | calls/s |\n| filesystem.open_latency | latency period | calls/s |\n| filesystem.sync_latency | latency period | calls/s |\n\n### Per iilesystem\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.write_latency | latency period | calls/s |\n\n### Per eBPF Filesystem instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.attributte_latency | latency period | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-filesystem-eBPF_Filesystem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "hardirq", "monitored_instance": {"name": "eBPF Hardirq", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["HardIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF Hardirq\n\nPlugin: ebpf.plugin\nModule: hardirq\n\n## Overview\n\nMonitor latency for each HardIRQ available.\n\nAttach tracepoints to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/hardirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/hardirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Hardirq instance\n\nThese metrics show latest timestamp for each hardIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.hardirq_latency | hardirq names | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-hardirq-eBPF_Hardirq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mdflush", "monitored_instance": {"name": "eBPF MDflush", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["MD", "RAID", "eBPF"], "most_popular": false}, "overview": "# eBPF MDflush\n\nPlugin: ebpf.plugin\nModule: mdflush\n\n## Overview\n\nMonitor when flush events happen between disks.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mdflush.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mdflush.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF MDflush instance\n\nNumber of times md_flush_request was called since last time.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mdstat.mdstat_flush | disk | flushes |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mdflush-eBPF_MDflush", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "mount", "monitored_instance": {"name": "eBPF Mount", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["mount", "umount", "device", "eBPF"], "most_popular": false}, "overview": "# eBPF Mount\n\nPlugin: ebpf.plugin\nModule: mount\n\n## Overview\n\nMonitor calls for mount and umount syscall.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/mount.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/mount.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Mount instance\n\nCalls for syscalls mount an umount.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mount_points.call | mount, umount | calls/s |\n| mount_points.error | mount, umount | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-mount-eBPF_Mount", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "oomkill", "monitored_instance": {"name": "eBPF OOMkill", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application", "memory"], "most_popular": false}, "overview": "# eBPF OOMkill\n\nPlugin: ebpf.plugin\nModule: oomkill\n\n## Overview\n\nMonitor applications that reach out of memory.\n\nAttach tracepoint to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/oomkill.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/oomkill.conf\n```\n#### Options\n\nOverwrite default configuration reducing number of I/O events\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### update every\n\n\n\n### ebpf load mode\n\n\n\n### lifetime\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese metrics show cgroup/service that reached OOM.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.oomkills | cgroup name | kills |\n| services.oomkills | a dimension per systemd service | kills |\n\n### Per apps\n\nThese metrics show cgroup/service that reached OOM.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.oomkill | kills | kills |\n\n", "integration_type": "collector", "id": "ebpf.plugin-oomkill-eBPF_OOMkill", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "process", "monitored_instance": {"name": "eBPF Process", "link": "https://github.com/netdata/netdata/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["Memory", "plugin", "eBPF"], "most_popular": false}, "overview": "# eBPF Process\n\nPlugin: ebpf.plugin\nModule: process\n\n## Overview\n\nMonitor internal memory usage.\n\nUses netdata internal statistic to monitor memory management by plugin.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Netdata flags.\n\nTo have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`.\n\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Process instance\n\nHow plugin is allocating memory.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_threads | total, running | threads |\n| netdata.ebpf_load_methods | legacy, co-re | methods |\n| netdata.ebpf_kernel_memory | memory_locked | bytes |\n| netdata.ebpf_hash_tables_count | hash_table | hash tables |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_aral_stat_size | memory | bytes |\n| netdata.ebpf_aral_stat_alloc | aral | calls |\n| netdata.ebpf_hash_tables_insert_pid_elements | thread | rows |\n| netdata.ebpf_hash_tables_remove_pid_elements | thread | rows |\n\n", "integration_type": "collector", "id": "ebpf.plugin-process-eBPF_Process", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "processes", "monitored_instance": {"name": "eBPF Processes", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["thread", "fork", "process", "eBPF"], "most_popular": false}, "overview": "# eBPF Processes\n\nPlugin: ebpf.plugin\nModule: processes\n\n## Overview\n\nMonitor calls for function creating tasks (threads and processes) inside Linux kernel.\n\nAttach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/process.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/process.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation. | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Processes instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.process_thread | process | calls/s |\n| system.process_status | process, zombie | difference |\n| system.exit | process | calls/s |\n| system.task_error | task | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.process_create | calls | calls/s |\n| app.thread_create | call | calls/s |\n| app.task_exit | call | calls/s |\n| app.task_close | call | calls/s |\n| app.task_error | app | calls/s |\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.process_create | process | calls/s |\n| cgroup.thread_create | thread | calls/s |\n| cgroup.task_exit | exit | calls/s |\n| cgroup.task_close | process | calls/s |\n| cgroup.task_error | process | calls/s |\n| services.process_create | a dimension per systemd service | calls/s |\n| services.thread_create | a dimension per systemd service | calls/s |\n| services.task_close | a dimension per systemd service | calls/s |\n| services.task_exit | a dimension per systemd service | calls/s |\n| services.task_error | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-processes-eBPF_Processes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "shm", "monitored_instance": {"name": "eBPF SHM", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "shared memory", "eBPF"], "most_popular": false}, "overview": "# eBPF SHM\n\nPlugin: ebpf.plugin\nModule: shm\n\n## Overview\n\nMonitor syscall responsible to manipulate shared memory.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/shm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/shm.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| shmget | Enable or disable monitoring for syscall `shmget` | yes | no |\n| shmat | Enable or disable monitoring for syscall `shmat` | yes | no |\n| shmdt | Enable or disable monitoring for syscall `shmdt` | yes | no |\n| shmctl | Enable or disable monitoring for syscall `shmctl` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.shmget | get | calls/s |\n| cgroup.shmat | at | calls/s |\n| cgroup.shmdt | dt | calls/s |\n| cgroup.shmctl | ctl | calls/s |\n| services.shmget | a dimension per systemd service | calls/s |\n| services.shmat | a dimension per systemd service | calls/s |\n| services.shmdt | a dimension per systemd service | calls/s |\n| services.shmctl | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_shmget_call | calls | calls/s |\n| app.ebpf_shmat_call | calls | calls/s |\n| app.ebpf_shmdt_call | calls | calls/s |\n| app.ebpf_shmctl_call | calls | calls/s |\n\n### Per eBPF SHM instance\n\nThese Metrics show number of calls for specified syscall.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.shared_memory_calls | get, at, dt, ctl | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-shm-eBPF_SHM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "socket", "monitored_instance": {"name": "eBPF Socket", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["TCP", "UDP", "bandwidth", "server", "connection", "socket"], "most_popular": false}, "overview": "# eBPF Socket\n\nPlugin: ebpf.plugin\nModule: socket\n\n## Overview\n\nMonitor bandwidth consumption per application for protocols TCP and UDP.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/network.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/network.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`. Options inside `network connections` are ignored for while.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| bandwidth table size | Number of elements stored inside hash tables used to monitor calls per PID. | 16384 | no |\n| ipv4 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV4 connections. | 16384 | no |\n| ipv6 connection table size | Number of elements stored inside hash tables used to monitor calls per IPV6 connections. | 16384 | no |\n| udp connection table size | Number of temporary elements stored inside hash tables used to monitor UDP connections. | 4096 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Socket instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.inbound_conn | connection_tcp | connections/s |\n| ip.tcp_outbound_conn | received | connections/s |\n| ip.tcp_functions | received, send, closed | calls/s |\n| ip.total_tcp_bandwidth | received, send | kilobits/s |\n| ip.tcp_error | received, send | calls/s |\n| ip.tcp_retransmit | retransmited | calls/s |\n| ip.udp_functions | received, send | calls/s |\n| ip.total_udp_bandwidth | received, send | kilobits/s |\n| ip.udp_error | received, send | calls/s |\n\n### Per apps\n\nThese metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_tcp_v4_connection | connections | connections/s |\n| app.app.ebpf_call_tcp_v6_connection | connections | connections/s |\n| app.ebpf_sock_bytes_sent | bandwidth | kilobits/s |\n| app.ebpf_sock_bytes_received | bandwidth | kilobits/s |\n| app.ebpf_call_tcp_sendmsg | calls | calls/s |\n| app.ebpf_call_tcp_cleanup_rbuf | calls | calls/s |\n| app.ebpf_call_tcp_retransmit | calls | calls/s |\n| app.ebpf_call_udp_sendmsg | calls | calls/s |\n| app.ebpf_call_udp_recvmsg | calls | calls/s |\n\n### Per cgroup\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.net_conn_ipv4 | connected_v4 | connections/s |\n| cgroup.net_conn_ipv6 | connected_v6 | connections/s |\n| cgroup.net_bytes_recv | received | calls/s |\n| cgroup.net_bytes_sent | sent | calls/s |\n| cgroup.net_tcp_recv | received | calls/s |\n| cgroup.net_tcp_send | sent | calls/s |\n| cgroup.net_retransmit | retransmitted | calls/s |\n| cgroup.net_udp_send | sent | calls/s |\n| cgroup.net_udp_recv | received | calls/s |\n| services.net_conn_ipv6 | a dimension per systemd service | connections/s |\n| services.net_bytes_recv | a dimension per systemd service | kilobits/s |\n| services.net_bytes_sent | a dimension per systemd service | kilobits/s |\n| services.net_tcp_recv | a dimension per systemd service | calls/s |\n| services.net_tcp_send | a dimension per systemd service | calls/s |\n| services.net_tcp_retransmit | a dimension per systemd service | calls/s |\n| services.net_udp_send | a dimension per systemd service | calls/s |\n| services.net_udp_recv | a dimension per systemd service | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-socket-eBPF_Socket", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "softirq", "monitored_instance": {"name": "eBPF SoftIRQ", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SoftIRQ", "eBPF"], "most_popular": false}, "overview": "# eBPF SoftIRQ\n\nPlugin: ebpf.plugin\nModule: softirq\n\n## Overview\n\nMonitor latency for each SoftIRQ available.\n\nAttach kprobe to internal kernel functions.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/softirq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/softirq.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF SoftIRQ instance\n\nThese metrics show latest timestamp for each softIRQ available on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirq_latency | soft IRQs | milliseconds |\n\n", "integration_type": "collector", "id": "ebpf.plugin-softirq-eBPF_SoftIRQ", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "swap", "monitored_instance": {"name": "eBPF SWAP", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["SWAP", "memory", "eBPF", "Hard Disk"], "most_popular": false}, "overview": "# eBPF SWAP\n\nPlugin: ebpf.plugin\nModule: swap\n\n## Overview\n\nMonitors when swap has I/O events and applications executing events.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/swap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/swap.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.swap_read | read | calls/s |\n| cgroup.swap_write | write | calls/s |\n| services.swap_read | a dimension per systemd service | calls/s |\n| services.swap_write | a dimension per systemd service | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_swap_readpage | a dimension per app group | calls/s |\n| app.ebpf_call_swap_writepage | a dimension per app group | calls/s |\n\n### Per eBPF SWAP instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapcalls | write, read | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-swap-eBPF_SWAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "sync", "monitored_instance": {"name": "eBPF Sync", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["syscall", "eBPF", "hard disk", "memory"], "most_popular": false}, "overview": "# eBPF Sync\n\nPlugin: ebpf.plugin\nModule: sync\n\n## Overview\n\nMonitor syscall responsible to move data from memory to storage device.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n#### Debug Filesystem\n\nThis thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/sync.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/sync.conf\n```\n#### Options\n\nThis configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n| sync | Enable or disable monitoring for syscall `sync` | yes | no |\n| msync | Enable or disable monitoring for syscall `msync` | yes | no |\n| fsync | Enable or disable monitoring for syscall `fsync` | yes | no |\n| fdatasync | Enable or disable monitoring for syscall `fdatasync` | yes | no |\n| syncfs | Enable or disable monitoring for syscall `syncfs` | yes | no |\n| sync_file_range | Enable or disable monitoring for syscall `sync_file_range` | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per eBPF Sync instance\n\nThese metrics show total number of calls to functions inside kernel.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.file_sync | fsync, fdatasync | calls/s |\n| mem.meory_map | msync | calls/s |\n| mem.sync | sync, syncfs | calls/s |\n| mem.file_segment | sync_file_range | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-sync-eBPF_Sync", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ebpf.plugin", "module_name": "vfs", "monitored_instance": {"name": "eBPF VFS", "link": "https://kernel.org/", "categories": ["data-collection.ebpf"], "icon_filename": "ebpf.jpg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["virtual", "filesystem", "eBPF", "I/O", "files"], "most_popular": false}, "overview": "# eBPF VFS\n\nPlugin: ebpf.plugin\nModule: vfs\n\n## Overview\n\nMonitor I/O events on Linux Virtual Filesystem.\n\nAttach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThis thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Compile kernel\n\nCheck if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.\nWhen you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files\nwith different names.\n\nNow follow steps:\n1. Copy the configuration file to /usr/src/linux/.config.\n2. Select the necessary options: make oldconfig\n3. Compile your kernel image: make bzImage\n4. Compile your modules: make modules\n5. Copy your new kernel image for boot loader directory\n6. Install the new modules: make modules_install\n7. Generate an initial ramdisk image (`initrd`) if it is necessary.\n8. Update your boot loader\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ebpf.d/vfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ebpf.d/vfs.conf\n```\n#### Options\n\nAll options are defined inside section `[global]`.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 5 | no |\n| ebpf load mode | Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`). | entry | no |\n| apps | Enable or disable integration with apps.plugin | no | no |\n| cgroups | Enable or disable integration with cgroup.plugin | no | no |\n| pid table size | Number of elements stored inside hash tables used to monitor calls per PID. | 32768 | no |\n| ebpf type format | Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load). | auto | no |\n| ebpf co-re tracing | Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). | trampoline | no |\n| maps per core | Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information. | yes | no |\n| lifetime | Set default lifetime for thread when enabled by cloud. | 300 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per cgroup\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cgroup.vfs_unlink | delete | calls/s |\n| cgroup.vfs_write | write | calls/s |\n| cgroup.vfs_write_error | write | calls/s |\n| cgroup.vfs_read | read | calls/s |\n| cgroup.vfs_read_error | read | calls/s |\n| cgroup.vfs_write_bytes | write | bytes/s |\n| cgroup.vfs_read_bytes | read | bytes/s |\n| cgroup.vfs_fsync | fsync | calls/s |\n| cgroup.vfs_fsync_error | fsync | calls/s |\n| cgroup.vfs_open | open | calls/s |\n| cgroup.vfs_open_error | open | calls/s |\n| cgroup.vfs_create | create | calls/s |\n| cgroup.vfs_create_error | create | calls/s |\n| services.vfs_unlink | a dimension per systemd service | calls/s |\n| services.vfs_write | a dimension per systemd service | calls/s |\n| services.vfs_write_error | a dimension per systemd service | calls/s |\n| services.vfs_read | a dimension per systemd service | calls/s |\n| services.vfs_read_error | a dimension per systemd service | calls/s |\n| services.vfs_write_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_read_bytes | a dimension per systemd service | bytes/s |\n| services.vfs_fsync | a dimension per systemd service | calls/s |\n| services.vfs_fsync_error | a dimension per systemd service | calls/s |\n| services.vfs_open | a dimension per systemd service | calls/s |\n| services.vfs_open_error | a dimension per systemd service | calls/s |\n| services.vfs_create | a dimension per systemd service | calls/s |\n| services.vfs_create_error | a dimension per systemd service | calls/s |\n\n### Per eBPF VFS instance\n\nThese Metrics show grouped information per cgroup/service.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filesystem.vfs_deleted_objects | delete | calls/s |\n| filesystem.vfs_io | read, write | calls/s |\n| filesystem.vfs_io_bytes | read, write | bytes/s |\n| filesystem.vfs_io_error | read, write | calls/s |\n| filesystem.vfs_fsync | fsync | calls/s |\n| filesystem.vfs_fsync_error | fsync | calls/s |\n| filesystem.vfs_open | open | calls/s |\n| filesystem.vfs_open_error | open | calls/s |\n| filesystem.vfs_create | create | calls/s |\n| filesystem.vfs_create_error | create | calls/s |\n\n### Per apps\n\nThese Metrics show grouped information per apps group.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| app_group | The name of the group defined in the configuration. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| app.ebpf_call_vfs_unlink | calls | calls/s |\n| app.ebpf_call_vfs_write | calls | calls/s |\n| app.ebpf_call_vfs_write_error | calls | calls/s |\n| app.ebpf_call_vfs_read | calls | calls/s |\n| app.ebpf_call_vfs_read_error | calls | calls/s |\n| app.ebpf_call_vfs_write_bytes | writes | bytes/s |\n| app.ebpf_call_vfs_read_bytes | reads | bytes/s |\n| app.ebpf_call_vfs_fsync | calls | calls/s |\n| app.ebpf_call_vfs_fsync_error | calls | calls/s |\n| app.ebpf_call_vfs_open | calls | calls/s |\n| app.ebpf_call_vfs_open_error | calls | calls/s |\n| app.ebpf_call_vfs_create | calls | calls/s |\n| app.ebpf_call_vfs_create_error | calls | calls/s |\n\n", "integration_type": "collector", "id": "ebpf.plugin-vfs-eBPF_VFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.0.freq", "monitored_instance": {"name": "dev.cpu.0.freq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.0.freq\n\nPlugin: freebsd.plugin\nModule: dev.cpu.0.freq\n\n## Overview\n\nRead current CPU Scaling frequency.\n\nCurrent CPU Scaling Frequency\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `Config options`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config Config options\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.0.freq | Enable or disable CPU Scaling frequency metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.0.freq instance\n\nThe metric shows status of CPU frequency, it is direct affected by system load.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.scaling_cur_freq | frequency | MHz |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.0.freq-dev.cpu.0.freq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "dev.cpu.temperature", "monitored_instance": {"name": "dev.cpu.temperature", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# dev.cpu.temperature\n\nPlugin: freebsd.plugin\nModule: dev.cpu.temperature\n\n## Overview\n\nGet current CPU temperature\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| dev.cpu.temperature | Enable or disable CPU temperature metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per dev.cpu.temperature instance\n\nThis metric show latest CPU temperature.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.temperature | a dimension per core | Celsius |\n\n", "integration_type": "collector", "id": "freebsd.plugin-dev.cpu.temperature-dev.cpu.temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "devstat", "monitored_instance": {"name": "devstat", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# devstat\n\nPlugin: freebsd.plugin\nModule: devstat\n\n## Overview\n\nCollect information per hard disk available on host.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:kern.devstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new disks detected at runtime | Enable or disable possibility to detect new disks. | auto | no |\n| performance metrics for pass devices | Enable or disable metrics for disks with type `PASS`. | auto | no |\n| total bandwidth for all disks | Enable or disable total bandwidth metric for all disks. | yes | no |\n| bandwidth for all disks | Enable or disable bandwidth for all disks metric. | auto | no |\n| operations for all disks | Enable or disable operations for all disks metric. | auto | no |\n| queued operations for all disks | Enable or disable queued operations for all disks metric. | auto | no |\n| utilization percentage for all disks | Enable or disable utilization percentage for all disks metric. | auto | no |\n| i/o time for all disks | Enable or disable I/O time for all disks metric. | auto | no |\n| average completed i/o time for all disks | Enable or disable average completed I/O time for all disks metric. | auto | no |\n| average completed i/o bandwidth for all disks | Enable or disable average completed I/O bandwidth for all disks metric. | auto | no |\n| average service time for all disks | Enable or disable average service time for all disks metric. | auto | no |\n| disable by default disks matching | Do not create charts for disks listed. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per devstat instance\n\nThese metrics give a general vision about I/O events on disks.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | io, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes, frees | KiB/s |\n| disk.ops | reads, writes, other, frees | operations/s |\n| disk.qops | operations | operations |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes, other, frees | milliseconds/s |\n| disk.await | reads, writes, other, frees | milliseconds/operation |\n| disk.avgsz | reads, writes, frees | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n", "integration_type": "collector", "id": "freebsd.plugin-devstat-devstat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getifaddrs", "monitored_instance": {"name": "getifaddrs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getifaddrs\n\nPlugin: freebsd.plugin\nModule: getifaddrs\n\n## Overview\n\nCollect traffic per network interface.\n\nThe plugin calls `getifaddrs` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getifaddrs]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new interfaces detected at runtime | Enable or disable possibility to discover new interface after plugin starts. | auto | no |\n| total bandwidth for physical interfaces | Enable or disable total bandwidth for physical interfaces metric. | auto | no |\n| total packets for physical interfaces | Enable or disable total packets for physical interfaces metric. | auto | no |\n| total bandwidth for ipv4 interface | Enable or disable total bandwidth for IPv4 interface metric. | auto | no |\n| total bandwidth for ipv6 interfaces | Enable or disable total bandwidth for ipv6 interfaces metric. | auto | no |\n| bandwidth for all interfaces | Enable or disable bandwidth for all interfaces metric. | auto | no |\n| packets for all interfaces | Enable or disable packets for all interfaces metric. | auto | no |\n| errors for all interfaces | Enable or disable errors for all interfaces metric. | auto | no |\n| drops for all interfaces | Enable or disable drops for all interfaces metric. | auto | no |\n| collisions for all interface | Enable or disable collisions for all interface metric. | auto | no |\n| disable by default interfaces matching | Do not display data for intterfaces listed. | lo* | no |\n| set physical interfaces for system.net | Do not show network traffic for listed interfaces. | igb* ix* cxl* em* ixl* ixlv* bge* ixgbe* vtnet* vmx* re* igc* dwc* | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ interface_inbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of inbound errors for the network interface ${label:device} in the last 10 minutes |\n| [ interface_outbound_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.errors | number of outbound errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per getifaddrs instance\n\nGeneral overview about network traffic.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n| system.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| system.ipv4 | received, sent | kilobits/s |\n| system.ipv6 | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.events | collisions | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getifaddrs-getifaddrs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "getmntinfo", "monitored_instance": {"name": "getmntinfo", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# getmntinfo\n\nPlugin: freebsd.plugin\nModule: getmntinfo\n\n## Overview\n\nCollect information per mount point.\n\nThe plugin calls `getmntinfo` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:getmntinfo]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable new mount points detected at runtime | Cheeck new mount points during runtime. | auto | no |\n| space usage for all disks | Enable or disable space usage for all disks metric. | auto | no |\n| inodes usage for all disks | Enable or disable inodes usage for all disks metric. | auto | no |\n| exclude space metrics on paths | Do not show metrics for listed paths. | /proc/* | no |\n| exclude space metrics on filesystems | Do not monitor listed filesystems. | autofs procfs subfs devfs none | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ disk_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.space | disk ${label:mount_point} space utilization |\n| [ disk_inode_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.inodes | disk ${label:mount_point} inode utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per mount point\n\nThese metrics show detailss about mount point usages.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-getmntinfo-getmntinfo", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "hw.intrcnt", "monitored_instance": {"name": "hw.intrcnt", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# hw.intrcnt\n\nPlugin: freebsd.plugin\nModule: hw.intrcnt\n\n## Overview\n\nGet total number of interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| hw.intrcnt | Enable or disable Interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per hw.intrcnt instance\n\nThese metrics show system interrupts frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.intr | interrupts | interrupts/s |\n| system.interrupts | a dimension per interrupt | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-hw.intrcnt-hw.intrcnt", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "ipfw", "monitored_instance": {"name": "ipfw", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# ipfw\n\nPlugin: freebsd.plugin\nModule: ipfw\n\n## Overview\n\nCollect information about FreeBSD firewall.\n\nThe plugin uses RAW socket to communicate with kernel and collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:ipfw]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| counters for static rules | Enable or disable counters for static rules metric. | yes | no |\n| number of dynamic rules | Enable or disable number of dynamic rules metric. | yes | no |\n| allocated memory | Enable or disable allocated memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ipfw instance\n\nTheese metrics show FreeBSD firewall statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfw.mem | dynamic, static | bytes |\n| ipfw.packets | a dimension per static rule | packets/s |\n| ipfw.bytes | a dimension per static rule | bytes/s |\n| ipfw.active | a dimension per dynamic rule | rules |\n| ipfw.expired | a dimension per dynamic rule | rules |\n\n", "integration_type": "collector", "id": "freebsd.plugin-ipfw-ipfw", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.cp_time", "monitored_instance": {"name": "kern.cp_time", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.cp_time\n\nPlugin: freebsd.plugin\nModule: kern.cp_time\n\n## Overview\n\nTotal CPU utilization\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe netdata main configuration file.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.cp_time | Enable or disable Total CPU usage. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding nice) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.cp_time instance\n\nThese metrics show CPU usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | nice, system, user, interrupt, idle | percentage |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | nice, system, user, interrupt, idle | percentage |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.cp_time-kern.cp_time", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.msq", "monitored_instance": {"name": "kern.ipc.msq", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.msq\n\nPlugin: freebsd.plugin\nModule: kern.ipc.msq\n\n## Overview\n\nCollect number of IPC message Queues\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.msq | Enable or disable IPC message queue metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.msq instance\n\nThese metrics show statistics IPC messages statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_msq_queues | queues | queues |\n| system.ipc_msq_messages | messages | messages |\n| system.ipc_msq_size | allocated, used | bytes |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.msq-kern.ipc.msq", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.sem", "monitored_instance": {"name": "kern.ipc.sem", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.sem\n\nPlugin: freebsd.plugin\nModule: kern.ipc.sem\n\n## Overview\n\nCollect information about semaphore.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.sem | Enable or disable semaphore metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.sem instance\n\nThese metrics shows counters for semaphores on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.sem-kern.ipc.sem", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "kern.ipc.shm", "monitored_instance": {"name": "kern.ipc.shm", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# kern.ipc.shm\n\nPlugin: freebsd.plugin\nModule: kern.ipc.shm\n\n## Overview\n\nCollect shared memory information.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| kern.ipc.shm | Enable or disable shared memory metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per kern.ipc.shm instance\n\nThese metrics give status about current shared memory segments.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_shared_mem_segs | segments | segments |\n| system.ipc_shared_mem_size | allocated | KiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-kern.ipc.shm-kern.ipc.shm", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.icmp.stats", "monitored_instance": {"name": "net.inet.icmp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.icmp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.icmp.stats\n\n## Overview\n\nCollect information about ICMP traffic.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.icmp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| IPv4 ICMP packets | Enable or disable IPv4 ICMP packets metric. | yes | no |\n| IPv4 ICMP error | Enable or disable IPv4 ICMP error metric. | yes | no |\n| IPv4 ICMP messages | Enable or disable IPv4 ICMP messages metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.icmp.stats instance\n\nThese metrics show ICMP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.icmp.stats-net.inet.icmp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.ip.stats", "monitored_instance": {"name": "net.inet.ip.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.ip.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.ip.stats\n\n## Overview\n\nCollect IP stats\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.ip.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 packets | Enable or disable IPv4 packets metric. | yes | no |\n| ipv4 fragments sent | Enable or disable IPv4 fragments sent metric. | yes | no |\n| ipv4 fragments assembly | Enable or disable IPv4 fragments assembly metric. | yes | no |\n| ipv4 errors | Enable or disable IPv4 errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.ip.stats instance\n\nThese metrics show IPv4 connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.ip.stats-net.inet.ip.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.states", "monitored_instance": {"name": "net.inet.tcp.states", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.states\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.states\n\n## Overview\n\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| net.inet.tcp.states | Enable or disable TCP state metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ipv4.tcpsock | IPv4 TCP connections utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.states instance\n\nA counter for TCP connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcpsock | connections | active connections |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.states-net.inet.tcp.states", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.tcp.stats", "monitored_instance": {"name": "net.inet.tcp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.tcp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.tcp.stats\n\n## Overview\n\nCollect overall information about TCP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.tcp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 TCP packets | Enable or disable ipv4 TCP packets metric. | yes | no |\n| ipv4 TCP errors | Enable or disable pv4 TCP errors metric. | yes | no |\n| ipv4 TCP handshake issues | Enable or disable ipv4 TCP handshake issue metric. | yes | no |\n| TCP connection aborts | Enable or disable TCP connection aborts metric. | auto | no |\n| TCP out-of-order queue | Enable or disable TCP out-of-order queue metric. | auto | no |\n| TCP SYN cookies | Enable or disable TCP SYN cookies metric. | auto | no |\n| TCP listen issues | Enable or disable TCP listen issues metric. | auto | no |\n| ECN packets | Enable or disable ECN packets metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ipv4_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ipv4.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.tcp.stats instance\n\nThese metrics show TCP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.tcplistenissues | overflows | packets/s |\n| ipv4.ecnpkts | InCEPkts, InECT0Pkts, InECT1Pkts, OutECT0Pkts, OutECT1Pkts | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.tcp.stats-net.inet.tcp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet.udp.stats", "monitored_instance": {"name": "net.inet.udp.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet.udp.stats\n\nPlugin: freebsd.plugin\nModule: net.inet.udp.stats\n\n## Overview\n\nCollect information about UDP connections.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet.udp.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv4 UDP packets | Enable or disable ipv4 UDP packets metric. | yes | no |\n| ipv4 UDP errors | Enable or disable ipv4 UDP errors metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet.udp.stats instance\n\nThese metrics show UDP connections statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | InErrors, NoPorts, RcvbufErrors, InCsumErrors, IgnoredMulti | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet.udp.stats-net.inet.udp.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.icmp6.stats", "monitored_instance": {"name": "net.inet6.icmp6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.icmp6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.icmp6.stats\n\n## Overview\n\nCollect information abou IPv6 ICMP\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.icmp6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| icmp | Enable or disable ICMP metric. | auto | no |\n| icmp redirects | Enable or disable ICMP redirects metric. | auto | no |\n| icmp errors | Enable or disable ICMP errors metric. | auto | no |\n| icmp echos | Enable or disable ICMP echos metric. | auto | no |\n| icmp router | Enable or disable ICMP router metric. | auto | no |\n| icmp neighbor | Enable or disable ICMP neighbor metric. | auto | no |\n| icmp types | Enable or disable ICMP types metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.icmp6.stats instance\n\nCollect IPv6 ICMP traffic statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.icmp6.stats-net.inet6.icmp6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.inet6.ip6.stats", "monitored_instance": {"name": "net.inet6.ip6.stats", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "network.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.inet6.ip6.stats\n\nPlugin: freebsd.plugin\nModule: net.inet6.ip6.stats\n\n## Overview\n\nCollect information abou IPv6 stats.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.inet6.ip6.stats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| ipv6 packets | Enable or disable ipv6 packet metric. | auto | no |\n| ipv6 fragments sent | Enable or disable ipv6 fragments sent metric. | auto | no |\n| ipv6 fragments assembly | Enable or disable ipv6 fragments assembly metric. | auto | no |\n| ipv6 errors | Enable or disable ipv6 errors metric. | auto | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.inet6.ip6.stats instance\n\nThese metrics show general information about IPv6 connections.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.inet6.ip6.stats-net.inet6.ip6.stats", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "net.isr", "monitored_instance": {"name": "net.isr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# net.isr\n\nPlugin: freebsd.plugin\nModule: net.isr\n\n## Overview\n\nCollect information about system softnet stat.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:net.isr]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| netisr | Enable or disable general vision about softnet stat metrics. | yes | no |\n| netisr per core | Enable or disable softnet stat metric per core. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n| [ 10min_netisr_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of drops in the last minute due to exceeded sysctl net.route.netisr_maxqlen (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per net.isr instance\n\nThese metrics show statistics about softnet stats.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n### Per core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | dispatched, hybrid_dispatched, qdrops, queued | events/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-net.isr-net.isr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "system.ram", "monitored_instance": {"name": "system.ram", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# system.ram\n\nPlugin: freebsd.plugin\nModule: system.ram\n\n## Overview\n\nShow information about system memory usage.\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| system.ram | Enable or disable system RAM metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per system.ram instance\n\nThis metric shows RAM usage statistics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, active, inactive, wired, cache, laundry, buffers | MiB |\n| mem.available | avail | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-system.ram-system.ram", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "uptime", "monitored_instance": {"name": "uptime", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# uptime\n\nPlugin: freebsd.plugin\nModule: uptime\n\n## Overview\n\nShow period of time server is up.\n\nThe plugin calls `clock_gettime` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uptime instance\n\nHow long the system is running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "freebsd.plugin-uptime-uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.loadavg", "monitored_instance": {"name": "vm.loadavg", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.loadavg\n\nPlugin: freebsd.plugin\nModule: vm.loadavg\n\n## Overview\n\nSystem Load Average\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.loadavg | Enable or disable load average metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.loadavg instance\n\nMonitoring for number of threads running or waiting.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.loadavg-vm.loadavg", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_intr", "monitored_instance": {"name": "vm.stats.sys.v_intr", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_intr\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_intr\n\n## Overview\n\nDevice interrupts\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_intr | Enable or disable device interrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_intr instance\n\nThe metric show device interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.dev_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_intr-vm.stats.sys.v_intr", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_soft", "monitored_instance": {"name": "vm.stats.sys.v_soft", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_soft\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_soft\n\n## Overview\n\nSoftware Interrupt\n\nvm.stats.sys.v_soft\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_soft | Enable or disable software inerrupts metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_soft instance\n\nThis metric shows software interrupt frequency.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.soft_intr | interrupts | interrupts/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_soft-vm.stats.sys.v_soft", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.sys.v_swtch", "monitored_instance": {"name": "vm.stats.sys.v_swtch", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.sys.v_swtch\n\nPlugin: freebsd.plugin\nModule: vm.stats.sys.v_swtch\n\n## Overview\n\nCPU context switch\n\nThe plugin calls `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.sys.v_swtch | Enable or disable CPU context switch metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.sys.v_swtch instance\n\nThe metric count the number of context switches happening on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.sys.v_swtch-vm.stats.sys.v_swtch", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_pgfaults", "monitored_instance": {"name": "vm.stats.vm.v_pgfaults", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_pgfaults\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_pgfaults\n\n## Overview\n\nCollect memory page faults events.\n\nThe plugin calls `sysctl` function to collect necessary data\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_pgfaults | Enable or disable Memory page fault metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_pgfaults instance\n\nThe number of page faults happened on host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pgfaults | memory, io_requiring, cow, cow_optimized, in_transit | page faults/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_pgfaults-vm.stats.vm.v_pgfaults", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.stats.vm.v_swappgs", "monitored_instance": {"name": "vm.stats.vm.v_swappgs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.stats.vm.v_swappgs\n\nPlugin: freebsd.plugin\nModule: vm.stats.vm.v_swappgs\n\n## Overview\n\nThe metric swap amount of data read from and written to SWAP.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.stats.vm.v_swappgs | Enable or disable infoormation about SWAP I/O metric. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.stats.vm.v_swappgs instance\n\nThis metric shows events happening on SWAP.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | io, out | KiB/s |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.stats.vm.v_swappgs-vm.stats.vm.v_swappgs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.swap_info", "monitored_instance": {"name": "vm.swap_info", "link": "", "categories": ["data-collection.freebsd"], "icon_filename": "freebsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.swap_info\n\nPlugin: freebsd.plugin\nModule: vm.swap_info\n\n## Overview\n\nCollect information about SWAP memory.\n\nThe plugin calls `sysctlnametomib` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| vm.swap_info | Enable or disable SWAP metrics. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.swap_info instance\n\nThis metric shows the SWAP usage.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swap | free, used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.swap_info-vm.swap_info", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "vm.vmtotal", "monitored_instance": {"name": "vm.vmtotal", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "memory.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# vm.vmtotal\n\nPlugin: freebsd.plugin\nModule: vm.vmtotal\n\n## Overview\n\nCollect Virtual Memory information from host.\n\nThe plugin calls function `sysctl` to collect data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:vm.vmtotal]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable total processes | Number of active processes. | yes | no |\n| processes running | Show number of processes running or blocked. | yes | no |\n| real memory | Memeory used on host. | yes | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vm.vmtotal instance\n\nThese metrics show an overall vision about processes running.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.active_processes | active | processes |\n| system.processes | running, blocked | processes |\n| mem.real | used | MiB |\n\n", "integration_type": "collector", "id": "freebsd.plugin-vm.vmtotal-vm.vmtotal", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freebsd.plugin", "module_name": "zfs", "monitored_instance": {"name": "zfs", "link": "https://www.freebsd.org/", "categories": ["data-collection.freebsd"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# zfs\n\nPlugin: freebsd.plugin\nModule: zfs\n\n## Overview\n\nCollect metrics for ZFS filesystem\n\nThe plugin uses `sysctl` function to collect necessary data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freebsd:zfs_arcstats]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| show zero charts | Do not show charts with zero metrics. | no | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs instance\n\nThese metrics show detailed information about ZFS filesystem.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | throttled | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n| zfs.trim_bytes | TRIMmed | bytes |\n| zfs.trim_requests | successful, failed, unsupported | requests |\n\n", "integration_type": "collector", "id": "freebsd.plugin-zfs-zfs", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freebsd.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "freeipmi.plugin", "module_name": "freeipmi", "monitored_instance": {"name": "Intelligent Platform Management Interface (IPMI)", "link": "https://en.wikipedia.org/wiki/Intelligent_Platform_Management_Interface", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "ipmi", "freeipmi", "ipmimonitoring"], "most_popular": true}, "overview": "# Intelligent Platform Management Interface (IPMI)\n\nPlugin: freeipmi.plugin\nModule: freeipmi\n\n## Overview\n\n\"Monitor enterprise server sensor readings, event log entries, and hardware statuses to ensure reliable server operations.\"\n\n\nThe plugin uses open source library IPMImonitoring to communicate with sensors.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nLinux kernel module for IPMI can create big overhead.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install freeipmi.plugin\n\nWhen using our official DEB/RPM packages, the FreeIPMI plugin is included in a separate package named `netdata-plugin-freeipmi` which needs to be manually installed using your system package manager. It is not installed automatically due to the large number of dependencies it requires.\n\nWhen using a static build of Netdata, the FreeIPMI plugin will be included and installed automatically, though you will still need to have FreeIPMI installed on your system to be able to use the plugin.\n\nWhen using a local build of Netdata, you need to ensure that the FreeIPMI development packages (typically called `libipmimonitoring-dev`, `libipmimonitoring-devel`, or `freeipmi-devel`) are installed when building Netdata.\n\n\n#### Preliminary actions\n\nIf you have not previously used IPMI on your system, you will probably need to run the `ipmimonitoring` command as root\nto initialize IPMI settings so that the Netdata plugin works correctly. It should return information about available sensors on the system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:freeipmi]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThe configuration is set using command line options:\n\n```\n# netdata.conf\n[plugin:freeipmi]\n command options = opt1 opt2 ... optN\n```\n\nTo display a help message listing the available command line options:\n\n```bash\n./usr/libexec/netdata/plugins.d/freeipmi.plugin --help\n```\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SECONDS | Data collection frequency. | | no |\n| debug | Enable verbose output. | disabled | no |\n| no-sel | Disable System Event Log (SEL) collection. | disabled | no |\n| reread-sdr-cache | Re-read SDR cache on every iteration. | disabled | no |\n| interpret-oem-data | Attempt to parse OEM data. | disabled | no |\n| assume-system-event-record | treat illegal SEL events records as normal. | disabled | no |\n| ignore-non-interpretable-sensors | Do not read sensors that cannot be interpreted. | disabled | no |\n| bridge-sensors | Bridge sensors not owned by the BMC. | disabled | no |\n| shared-sensors | Enable shared sensors if found. | disabled | no |\n| no-discrete-reading | Do not read sensors if their event/reading type code is invalid. | enabled | no |\n| ignore-scanning-disabled | Ignore the scanning bit and read sensors no matter what. | disabled | no |\n| assume-bmc-owner | Assume the BMC is the sensor owner no matter what (usually bridging is required too). | disabled | no |\n| hostname HOST | Remote IPMI hostname or IP address. | local | no |\n| username USER | Username that will be used when connecting to the remote host. | | no |\n| password PASS | Password that will be used when connecting to the remote host. | | no |\n| noauthcodecheck / no-auth-code-check | Don't check the authentication codes returned. | | no |\n| driver-type IPMIDRIVER | Specify the driver type to use instead of doing an auto selection. The currently available outofband drivers are LAN and LAN_2_0, which perform IPMI 1.5 and IPMI 2.0 respectively. The currently available inband drivers are KCS, SSIF, OPENIPMI and SUNBMC. | | no |\n| sdr-cache-dir PATH | SDR cache files directory. | /tmp | no |\n| sensor-config-file FILE | Sensors configuration filename. | system default | no |\n| sel-config-file FILE | SEL configuration filename. | system default | no |\n| ignore N1,N2,N3,... | Sensor IDs to ignore. | | no |\n| ignore-status N1,N2,N3,... | Sensor IDs to ignore status (nominal/warning/critical). | | no |\n| -v | Print version and exit. | | no |\n| --help | Print usage message and exit. | | no |\n\n#### Examples\n\n##### Decrease data collection frequency\n\nBasic example decreasing data collection frequency. The minimum `update every` is 5 (enforced internally by the plugin). IPMI is slow and CPU hungry. So, once every 5 seconds is pretty acceptable.\n\n```yaml\n[plugin:freeipmi]\n update every = 10\n\n```\n##### Disable SEL collection\n\nAppend to `command options =` the options you need.\n\n```yaml\n[plugin:freeipmi]\n command options = no-sel\n\n```\n##### Ignore specific sensors\n\nSpecific sensor IDs can be excluded from freeipmi tools by editing `/etc/freeipmi/freeipmi.conf` and setting the IDs to be ignored at `ipmi-sensors-exclude-record-ids`.\n\n**However this file is not used by `libipmimonitoring`** (the library used by Netdata's `freeipmi.plugin`).\n\nTo find the IDs to ignore, run the command `ipmimonitoring`. The first column is the wanted ID:\n\nID | Name | Type | State | Reading | Units | Event\n1 | Ambient Temp | Temperature | Nominal | 26.00 | C | 'OK'\n2 | Altitude | Other Units Based Sensor | Nominal | 480.00 | ft | 'OK'\n3 | Avg Power | Current | Nominal | 100.00 | W | 'OK'\n4 | Planar 3.3V | Voltage | Nominal | 3.29 | V | 'OK'\n5 | Planar 5V | Voltage | Nominal | 4.90 | V | 'OK'\n6 | Planar 12V | Voltage | Nominal | 11.99 | V | 'OK'\n7 | Planar VBAT | Voltage | Nominal | 2.95 | V | 'OK'\n8 | Fan 1A Tach | Fan | Nominal | 3132.00 | RPM | 'OK'\n9 | Fan 1B Tach | Fan | Nominal | 2150.00 | RPM | 'OK'\n10 | Fan 2A Tach | Fan | Nominal | 2494.00 | RPM | 'OK'\n11 | Fan 2B Tach | Fan | Nominal | 1825.00 | RPM | 'OK'\n12 | Fan 3A Tach | Fan | Nominal | 3538.00 | RPM | 'OK'\n13 | Fan 3B Tach | Fan | Nominal | 2625.00 | RPM | 'OK'\n14 | Fan 1 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n15 | Fan 2 | Entity Presence | Nominal | N/A | N/A | 'Entity Present'\n...\n\n`freeipmi.plugin` supports the option `ignore` that accepts a comma separated list of sensor IDs to ignore. To configure it set on `netdata.conf`:\n\n\n```yaml\n[plugin:freeipmi]\n command options = ignore 1,2,3,4,...\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n### kimpi0 CPU usage\n\n\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipmi_sensor_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipmi.conf) | ipmi.sensor_state | IPMI sensor ${label:sensor} (${label:component}) state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe plugin does a speed test when it starts, to find out the duration needed by the IPMI processor to respond. Depending on the speed of your IPMI processor, charts may need several seconds to show up on the dashboard.\n\n\n### Per Intelligent Platform Management Interface (IPMI) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sel | events | events |\n\n### Per sensor\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| sensor | The sensor name |\n| type | One of 45 recognized sensor types (Battery, Voltage...) |\n| component | One of 25 recognized components (Processor, Peripheral). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipmi.sensor_state | nominal, critical, warning, unknown | state |\n| ipmi.sensor_temperature_c | temperature | Celsius |\n| ipmi.sensor_temperature_f | temperature | Fahrenheit |\n| ipmi.sensor_voltage | voltage | Volts |\n| ipmi.sensor_ampere | ampere | Amps |\n| ipmi.sensor_fan_speed | rotations | RPM |\n| ipmi.sensor_power | power | Watts |\n| ipmi.sensor_reading_percent | percentage | % |\n\n", "integration_type": "collector", "id": "freeipmi.plugin-freeipmi-Intelligent_Platform_Management_Interface_(IPMI)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-activemq", "module_name": "activemq", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.message-brokers"], "icon_filename": "activemq.png", "name": "ActiveMQ", "link": "https://activemq.apache.org/"}, "alternative_monitored_instances": [], "keywords": ["message broker"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ActiveMQ\n\nPlugin: go.d.plugin\nModule: activemq\n\n## Overview\n\nThis collector monitors ActiveMQ queues and topics.\n\nIt collects metrics by sending HTTP requests to the Web Console API.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 8161.\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8161\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/activemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/activemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8161 | yes |\n| webadmin | Webadmin root path. | admin | yes |\n| max_queues | Maximum number of concurrently collected queues. | 50 | no |\n| max_topics | Maximum number of concurrently collected topics. | 50 | no |\n| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| timeout | HTTP request timeout. | 1 | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n username: foo\n password: bar\n\n```\n##### Filters and limits\n\nUsing filters and limits for queues and topics.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n max_queues: 100\n max_topics: 100\n queues_filter: '!sandr* *'\n topics_filter: '!sandr* *'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8161\n webadmin: admin\n\n - name: remote\n url: http://192.0.2.1:8161\n webadmin: admin\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m activemq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ActiveMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| activemq.messages | enqueued, dequeued | messages/s |\n| activemq.unprocessed_messages | unprocessed | messages |\n| activemq.consumers | consumers | consumers |\n\n", "integration_type": "collector", "id": "go.d.plugin-activemq-ActiveMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/activemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-apache", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "Apache", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Apache", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-energid", "module_name": "apache", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energi Core Wallet", "link": "", "icon_filename": "energi.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["energid"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Energi Core Wallet\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis module monitors Energi Core Wallet instances.\nWorks only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/energid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/energid.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9796 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9796\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9796\n\n - name: remote\n url: http://192.0.2.1:9796\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Energi Core Wallet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| energid.blockindex | blocks, headers | count |\n| energid.difficulty | difficulty | difficulty |\n| energid.mempool | max, usage, tx_size | bytes |\n| energid.secmem | total, used, free, locked | bytes |\n| energid.network | connections | connections |\n| energid.timeoffset | timeoffset | seconds |\n| energid.utxo_transactions | transactions, output_transactions | transactions |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-Energi_Core_Wallet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/energid/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpd", "plugin_name": "go.d.plugin", "module_name": "apache", "monitored_instance": {"name": "HTTPD", "link": "https://httpd.apache.org/", "icon_filename": "apache.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTPD\n\nPlugin: go.d.plugin\nModule: apache\n\n## Overview\n\nThis collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), \nwhich is a built-in location that provides metrics about the Apache server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Apache instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Apache status support\n\n- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).\n- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/apache.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/apache.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nApache with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m apache\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nAll metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.\n\n\n### Per Apache instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Basic | Extended |\n|:------|:----------|:----|:---:|:---:|\n| apache.connections | connections | connections | \u2022 | \u2022 |\n| apache.conns_async | keepalive, closing, writing | connections | \u2022 | \u2022 |\n| apache.workers | idle, busy | workers | \u2022 | \u2022 |\n| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | \u2022 | \u2022 |\n| apache.requests | requests | requests/s | | \u2022 |\n| apache.net | sent | kilobit/s | | \u2022 |\n| apache.reqpersec | requests | requests/s | | \u2022 |\n| apache.bytespersec | served | KiB/s | | \u2022 |\n| apache.bytesperreq | size | KiB | | \u2022 |\n| apache.uptime | uptime | seconds | | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-apache-HTTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/apache/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cassandra", "module_name": "cassandra", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.database-servers"], "icon_filename": "cassandra.svg", "name": "Cassandra", "link": "https://cassandra.apache.org/_/index.html"}, "alternative_monitored_instances": [], "keywords": ["nosql", "dbms", "db", "database"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Cassandra\n\nPlugin: go.d.plugin\nModule: cassandra\n\n## Overview\n\nThis collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.\n\n\nThe [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host that provide metrics on port 7072.\n\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1:7072/metrics\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Cassandra with Prometheus JMX Exporter\n\nTo configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):\n\n> **Note**: paths can differ depends on your setup.\n\n- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file\n and install it in a directory where Cassandra can access it.\n- Add\n the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)\n file to `/etc/cassandra`.\n- Add the following line to `/etc/cassandra/cassandra-env.sh`\n ```\n JVM_OPTS=\"$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml\n ```\n- Restart cassandra service.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cassandra.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cassandra.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:7072/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nLocal server with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:7072/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:7072/metrics\n\n - name: remote\n url: http://192.0.2.1:7072/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cassandra\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Cassandra instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.client_requests_rate | read, write | requests/s |\n| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |\n| cassandra.client_requests_latency | read, write | seconds |\n| cassandra.row_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.row_cache_hit_rate | hits, misses | events/s |\n| cassandra.row_cache_utilization | used | percentage |\n| cassandra.row_cache_size | size | bytes |\n| cassandra.key_cache_hit_ratio | hit_ratio | percentage |\n| cassandra.key_cache_hit_rate | hits, misses | events/s |\n| cassandra.key_cache_utilization | used | percentage |\n| cassandra.key_cache_size | size | bytes |\n| cassandra.storage_live_disk_space_used | used | bytes |\n| cassandra.compaction_completed_tasks_rate | completed | tasks/s |\n| cassandra.compaction_pending_tasks_count | pending | tasks |\n| cassandra.compaction_compacted_rate | compacted | bytes/s |\n| cassandra.jvm_memory_used | heap, nonheap | bytes |\n| cassandra.jvm_gc_rate | parnew, cms | gc/s |\n| cassandra.jvm_gc_time | parnew, cms | seconds |\n| cassandra.dropped_messages_rate | dropped | messages/s |\n| cassandra.client_requests_timeouts_rate | read, write | timeout/s |\n| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |\n| cassandra.client_requests_failures_rate | read, write | failures/s |\n| cassandra.storage_exceptions_rate | storage | exceptions/s |\n\n### Per thread pool\n\nMetrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thread_pool | thread pool name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cassandra.thread_pool_active_tasks_count | active | tasks |\n| cassandra.thread_pool_pending_tasks_count | pending | tasks |\n| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |\n| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-cassandra-Cassandra", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cassandra/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-chrony", "module_name": "chrony", "plugin_name": "go.d.plugin", "monitored_instance": {"categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "chrony.jpg", "name": "Chrony", "link": "https://chrony.tuxfamily.org/"}, "alternative_monitored_instances": [], "keywords": [], "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}, "most_popular": false}, "overview": "# Chrony\n\nPlugin: go.d.plugin\nModule: chrony\n\n## Overview\n\nThis collector monitors the system's clock performance and peers activity status\n\nIt collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers Chrony instance running on the local host and listening on port 323.\nOn startup, it tries to collect metrics from:\n\n- 127.0.0.1:323\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/chrony.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/chrony.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |\n| timeout | Connection timeout. Zero means no timeout. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:323\n\n - name: remote\n address: 192.0.2.1:323\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m chrony\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Chrony instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| chrony.stratum | stratum | level |\n| chrony.current_correction | current_correction | seconds |\n| chrony.root_delay | root_delay | seconds |\n| chrony.root_dispersion | root_delay | seconds |\n| chrony.last_offset | offset | seconds |\n| chrony.rms_offset | offset | seconds |\n| chrony.frequency | frequency | ppm |\n| chrony.residual_frequency | residual_frequency | ppm |\n| chrony.skew | skew | ppm |\n| chrony.update_interval | update_interval | seconds |\n| chrony.ref_measurement_time | ref_measurement_time | seconds |\n| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |\n| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |\n\n", "integration_type": "collector", "id": "go.d.plugin-chrony-Chrony", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/chrony/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-cockroachdb", "plugin_name": "go.d.plugin", "module_name": "cockroachdb", "monitored_instance": {"name": "CockroachDB", "link": "https://www.cockroachlabs.com/", "icon_filename": "cockroachdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cockroachdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CockroachDB\n\nPlugin: go.d.plugin\nModule: cockroachdb\n\n## Overview\n\nThis collector monitors CockroachDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/cockroachdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/cockroachdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nCockroachDB with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/_status/vars\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/_status/vars\n\n - name: remote\n url: http://203.0.113.10:8080/_status/vars\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m cockroachdb\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |\n| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |\n| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |\n| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |\n| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CockroachDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cockroachdb.process_cpu_time_combined_percentage | used | percentage |\n| cockroachdb.process_cpu_time_percentage | user, sys | percentage |\n| cockroachdb.process_cpu_time | user, sys | ms |\n| cockroachdb.process_memory | rss | KiB |\n| cockroachdb.process_file_descriptors | open | fd |\n| cockroachdb.process_uptime | uptime | seconds |\n| cockroachdb.host_disk_bandwidth | read, write | KiB |\n| cockroachdb.host_disk_operations | reads, writes | operations |\n| cockroachdb.host_disk_iops_in_progress | in_progress | iops |\n| cockroachdb.host_network_bandwidth | received, sent | kilobits |\n| cockroachdb.host_network_packets | received, sent | packets |\n| cockroachdb.live_nodes | live_nodes | nodes |\n| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |\n| cockroachdb.total_storage_capacity | total | KiB |\n| cockroachdb.storage_capacity_usability | usable, unusable | KiB |\n| cockroachdb.storage_usable_capacity | available, used | KiB |\n| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |\n| cockroachdb.sql_connections | active | connections |\n| cockroachdb.sql_bandwidth | received, sent | KiB |\n| cockroachdb.sql_statements_total | started, executed | statements |\n| cockroachdb.sql_errors | statement, transaction | errors |\n| cockroachdb.sql_started_ddl_statements | ddl | statements |\n| cockroachdb.sql_executed_ddl_statements | ddl | statements |\n| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |\n| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |\n| cockroachdb.sql_active_distributed_queries | active | queries |\n| cockroachdb.sql_distributed_flows | active, queued | flows |\n| cockroachdb.live_bytes | applications, system | KiB |\n| cockroachdb.logical_data | keys, values | KiB |\n| cockroachdb.logical_data_count | keys, values | num |\n| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |\n| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |\n| cockroachdb.ranges | ranges | ranges |\n| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |\n| cockroachdb.range_events | split, add, remove, merge | events |\n| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |\n| cockroachdb.rocksdb_read_amplification | reads | reads/query |\n| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |\n| cockroachdb.rocksdb_cache_usage | used | KiB |\n| cockroachdb.rocksdb_cache_operations | hits, misses | operations |\n| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |\n| cockroachdb.rocksdb_sstables | sstables | sstables |\n| cockroachdb.replicas | replicas | replicas |\n| cockroachdb.replicas_quiescence | quiescent, active | replicas |\n| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |\n| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |\n| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |\n| cockroachdb.rebalancing_queries | avg | queries/s |\n| cockroachdb.rebalancing_writes | avg | writes/s |\n| cockroachdb.timeseries_samples | written | samples |\n| cockroachdb.timeseries_write_errors | write | errors |\n| cockroachdb.timeseries_write_bytes | written | KiB |\n| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |\n| cockroachdb.code_heap_memory_usage | go, cgo | KiB |\n| cockroachdb.goroutines | goroutines | goroutines |\n| cockroachdb.gc_count | gc | invokes |\n| cockroachdb.gc_pause | pause | us |\n| cockroachdb.cgo_calls | cgo | calls |\n\n", "integration_type": "collector", "id": "go.d.plugin-cockroachdb-CockroachDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-consul", "plugin_name": "go.d.plugin", "module_name": "consul", "monitored_instance": {"name": "Consul", "link": "https://www.consul.io/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "consul.svg"}, "alternative_monitored_instances": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["service networking platform", "hashicorp"], "most_popular": true}, "overview": "# Consul\n\nPlugin: go.d.plugin\nModule: consul\n\n## Overview\n\nThis collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.\n\n\nIt periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).\n\nUsed endpoints:\n\n- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)\n- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)\n- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)\n- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)\n- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector discovers instances running on the local host, that provide metrics on port 8500.\n\nOn startup, it tries to collect metrics from:\n\n- http://localhost:8500\n- http://127.0.0.1:8500\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Prometheus telemetry\n\n[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.\n\n\n#### Add required ACLs to Token\n\nRequired **only if authentication is enabled**.\n\n| ACL | Endpoint |\n|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |\n| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |\n| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/consul.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/consul.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:8500 | yes |\n| acl_token | ACL token used in every request. | | no |\n| max_checks | Checks processing/charting limit. | | no |\n| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8500\n acl_token: \"ec15675e-2999-d789-832e-8c4794daa8d7\"\n\n - name: remote\n url: http://203.0.113.10:8500\n acl_token: \"ada7f751-f654-8872-7f93-498e799158b6\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m consul\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |\n| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |\n| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |\n| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |\n| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |\n| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).\n\n\n### Per Consul instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.client_rpc_requests_rate | rpc | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.client_rpc_requests_failed_rate | failed | requests/s | \u2022 | \u2022 | \u2022 |\n| consul.memory_allocated | allocated | bytes | \u2022 | \u2022 | \u2022 |\n| consul.memory_sys | sys | bytes | \u2022 | \u2022 | \u2022 |\n| consul.gc_pause_time | gc_pause | seconds | \u2022 | \u2022 | \u2022 |\n| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.kvs_apply_operations_rate | kvs_apply | ops/s | \u2022 | \u2022 | |\n| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.txn_apply_operations_rate | txn_apply | ops/s | \u2022 | \u2022 | |\n| consul.autopilot_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_failure_tolerance | failure_tolerance | servers | \u2022 | \u2022 | |\n| consul.autopilot_server_health_status | healthy, unhealthy | status | \u2022 | \u2022 | |\n| consul.autopilot_server_stable_time | stable | seconds | \u2022 | \u2022 | |\n| consul.autopilot_server_serf_status | active, failed, left, none | status | \u2022 | \u2022 | |\n| consul.autopilot_server_voter_status | voter, not_voter | status | \u2022 | \u2022 | |\n| consul.network_lan_rtt | min, max, avg | ms | \u2022 | \u2022 | |\n| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_commits_rate | commits | commits/s | \u2022 | | |\n| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | | |\n| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | \u2022 | | |\n| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | \u2022 | |\n| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | \u2022 | |\n| consul.raft_leader_elections_rate | leader | elections/s | \u2022 | \u2022 | |\n| consul.raft_leadership_transitions_rate | leadership | transitions/s | \u2022 | \u2022 | |\n| consul.server_leadership_status | leader, not_leader | status | \u2022 | \u2022 | |\n| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | \u2022 | \u2022 | |\n| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | \u2022 | \u2022 | |\n| consul.raft_boltdb_freelist_bytes | freelist | bytes | \u2022 | \u2022 | |\n| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | \u2022 | \u2022 | |\n| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | \u2022 | \u2022 | |\n| consul.license_expiration_time | license_expiration | seconds | \u2022 | \u2022 | \u2022 |\n\n### Per node check\n\nMetrics about checks on Node level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.node_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n### Per service check\n\nMetrics about checks at a Service level.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter Identifier |\n| node_name | The node's name |\n| check_name | The check's name |\n| service_name | The service's name |\n\nMetrics:\n\n| Metric | Dimensions | Unit | Leader | Follower | Client |\n|:------|:----------|:----|:---:|:---:|:---:|\n| consul.service_health_check_status | passing, maintenance, warning, critical | status | \u2022 | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-consul-Consul", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/consul/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-coredns", "plugin_name": "go.d.plugin", "module_name": "coredns", "monitored_instance": {"name": "CoreDNS", "link": "https://coredns.io/", "icon_filename": "coredns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["coredns", "dns", "kubernetes"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CoreDNS\n\nPlugin: go.d.plugin\nModule: coredns\n\n## Overview\n\nThis collector monitors CoreDNS instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/coredns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/coredns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9153/metrics | yes |\n| per_server_stats | Server filter. | | no |\n| per_zone_stats | Zone filter. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n##### per_server_stats\n\nMetrics of servers matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_server_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### per_zone_stats\n\nMetrics of zones matching the selector will be collected.\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format).\n- Syntax:\n\n```yaml\nper_zone_stats:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9153/metrics\n\n - name: remote\n url: http://203.0.113.10:9153/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m coredns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CoreDNS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.dns_request_count_total | requests | requests/s |\n| coredns.dns_responses_count_total | responses | responses/s |\n| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |\n| coredns.dns_panic_count_total | panics | panics/s |\n| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server_name | Server name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.server_dns_request_count_total | requests | requests/s |\n| coredns.server_dns_responses_count_total | responses | responses/s |\n| coredns.server_request_count_total_per_status | processed, dropped | requests/s |\n| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n### Per zone\n\nThese metrics refer to the DNS zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| zone_name | Zone name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| coredns.zone_dns_request_count_total | requests | requests/s |\n| coredns.zone_dns_responses_count_total | responses | responses/s |\n| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |\n| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |\n| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |\n| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-coredns-CoreDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/coredns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchbase", "plugin_name": "go.d.plugin", "module_name": "couchbase", "monitored_instance": {"name": "Couchbase", "link": "https://www.couchbase.com/", "icon_filename": "couchbase.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchbase", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Couchbase\n\nPlugin: go.d.plugin\nModule: couchbase\n\n## Overview\n\nThis collector monitors Couchbase servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchbase.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchbase.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8091 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8091\n\n - name: remote\n url: http://203.0.113.0:8091\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchbase\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Couchbase instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |\n| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |\n| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |\n| couchbase.bucket_item_count | a dimension per bucket | items |\n| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |\n| couchbase.bucket_data_used | a dimension per bucket | bytes |\n| couchbase.bucket_mem_used | a dimension per bucket | bytes |\n| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchbase-Couchbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchbase/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-couchdb", "plugin_name": "go.d.plugin", "module_name": "couchdb", "monitored_instance": {"name": "CouchDB", "link": "https://couchdb.apache.org/", "icon_filename": "couchdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["couchdb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# CouchDB\n\nPlugin: go.d.plugin\nModule: couchdb\n\n## Overview\n\nThis collector monitors CouchDB servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/couchdb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/couchdb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:5984 | yes |\n| node | CouchDB node name. Same as -name vm.args argument. | _local | no |\n| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 2 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n node: couchdb@127.0.0.1\n databases: my-db other-db\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:5984\n\n - name: remote\n url: http://203.0.113.0:5984\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m couchdb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CouchDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| couchdb.activity | db_reads, db_writes, view_reads | requests/s |\n| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |\n| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |\n| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |\n| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |\n| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |\n| couchdb.open_files | files | files |\n| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |\n| couchdb.proccounts | os_procs, erl_procs | processes |\n| couchdb.peakmsgqueue | peak_size | messages |\n| couchdb.reductions | reductions | reductions |\n| couchdb.db_sizes_file | a dimension per database | KiB |\n| couchdb.db_sizes_external | a dimension per database | KiB |\n| couchdb.db_sizes_active | a dimension per database | KiB |\n| couchdb.db_doc_count | a dimension per database | docs |\n| couchdb.db_doc_del_count | a dimension per database | docs |\n\n", "integration_type": "collector", "id": "go.d.plugin-couchdb-CouchDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/couchdb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dns_query", "plugin_name": "go.d.plugin", "module_name": "dns_query", "monitored_instance": {"name": "DNS query", "link": "", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNS query\n\nPlugin: go.d.plugin\nModule: dns_query\n\n## Overview\n\nThis module monitors DNS query round-trip time (RTT).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dns_query.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dns_query.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |\n| servers | Servers to query. | | yes |\n| port | DNS server port. | 53 | no |\n| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |\n| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |\n| timeout | Query read timeout. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: job1\n record_types:\n - A\n - AAAA\n domains:\n - google.com\n - github.com\n - reddit.com\n servers:\n - 8.8.8.8\n - 8.8.4.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dns_query\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per server\n\nThese metrics refer to the DNS server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| server | DNS server address. |\n| network | Network protocol name (tcp, udp, tcp-tls). |\n| record_type | DNS record type (e.g. A, AAAA, CNAME). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dns_query.query_status | success, network_error, dns_error | status |\n| dns_query.query_time | query_time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dns_query-DNS_query", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsdist", "plugin_name": "go.d.plugin", "module_name": "dnsdist", "monitored_instance": {"name": "DNSdist", "link": "https://dnsdist.org/", "icon_filename": "network-wired.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsdist", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# DNSdist\n\nPlugin: go.d.plugin\nModule: dnsdist\n\n## Overview\n\nThis collector monitors DNSDist servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable DNSdist built-in Webserver\n\nFor collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsdist.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsdist.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8083 | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8083\n headers:\n X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).\n\n - name: remote\n url: http://203.0.113.0:8083\n headers:\n X-API-Key: 'your-api-key'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsdist\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per DNSdist instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsdist.queries | all, recursive, empty | queries/s |\n| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |\n| dnsdist.packets_dropped | acl | packets/s |\n| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |\n| dnsdist.backend_responses | responses | responses/s |\n| dnsdist.backend_commerrors | send_errors | errors/s |\n| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |\n| dnsdist.cache | hits, misses | answers/s |\n| dnsdist.servercpu | system_state, user_state | ms/s |\n| dnsdist.servermem | memory_usage | MiB |\n| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |\n| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsdist-DNSdist", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsdist/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq", "plugin_name": "go.d.plugin", "module_name": "dnsmasq", "monitored_instance": {"name": "Dnsmasq", "link": "https://thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq\n\nPlugin: go.d.plugin\nModule: dnsmasq\n\n## Overview\n\nThis collector monitors Dnsmasq servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |\n| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |\n| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n```\n##### Using TCP protocol\n\nLocal server with specific DNS query transport protocol.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n protocol: tcp\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:53\n\n - name: remote\n address: 203.0.113.0:53\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq instance\n\nThe metrics apply to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq.servers_queries | success, failed | queries/s |\n| dnsmasq.cache_performance | hist, misses | events/s |\n| dnsmasq.cache_operations | insertions, evictions | operations/s |\n| dnsmasq.cache_size | size | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq-Dnsmasq", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dnsmasq_dhcp", "plugin_name": "go.d.plugin", "module_name": "dnsmasq_dhcp", "monitored_instance": {"name": "Dnsmasq DHCP", "link": "https://www.thekelleys.org.uk/dnsmasq/doc.html", "icon_filename": "dnsmasq.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["dnsmasq", "dhcp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dnsmasq DHCP\n\nPlugin: go.d.plugin\nModule: dnsmasq_dhcp\n\n## Overview\n\nThis collector monitors Dnsmasq DHCP leases databases, depending on your configuration.\n\nBy default, it uses:\n\n- `/var/lib/misc/dnsmasq.leases` to read leases.\n- `/etc/dnsmasq.conf` to detect dhcp-ranges.\n- `/etc/dnsmasq.d` to find additional configurations.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAll configured dhcp-ranges are detected automatically\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dnsmasq_dhcp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |\n| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |\n| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /var/lib/misc/dnsmasq.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n##### Pi-hole\n\nDnsmasq DHCP on Pi-hole.\n\n```yaml\njobs:\n - name: dnsmasq_dhcp\n leases_path: /etc/pihole/dhcp.leases\n conf_path: /etc/dnsmasq.conf\n conf_dir: /etc/dnsmasq.d\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dnsmasq_dhcp\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dnsmasq DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |\n| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |\n\n### Per dhcp range\n\nThese metrics refer to the DHCP range.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| dhcp_range | DHCP range in `START_IP:END_IP` format |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |\n| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |\n\n", "integration_type": "collector", "id": "go.d.plugin-dnsmasq_dhcp-Dnsmasq_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dnsmasq_dhcp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker", "plugin_name": "go.d.plugin", "module_name": "docker", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["container"], "most_popular": true}, "overview": "# Docker\n\nPlugin: go.d.plugin\nModule: docker\n\n## Overview\n\nThis collector monitors Docker containers state, health status and more.\n\n\nIt connects to the Docker instance via a TCP or UNIX socket and executes the following commands:\n\n- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).\n- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).\n- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nRequires netdata user to be in the docker group.\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nEnabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |\n| timeout | Request timeout in seconds. | 1 | no |\n| collect_container_size | Whether to collect container writable layer size. | no | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'unix:///var/run/docker.sock'\n\n - name: remote\n address: 'tcp://203.0.113.10:2375'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.containers_state | running, paused, stopped | containers |\n| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |\n| docker.images | active, dangling | images |\n| docker.images_size | size | bytes |\n\n### Per container\n\nMetrics related to containers. Each container provides its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| container_name | The container's name |\n| image | The image name the container uses |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |\n| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |\n| docker.container_writeable_layer_size | writeable_layer | size |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker-Docker", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-docker_engine", "plugin_name": "go.d.plugin", "module_name": "docker_engine", "alternative_monitored_instances": [], "monitored_instance": {"name": "Docker Engine", "link": "https://docs.docker.com/engine/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "docker.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["docker", "container"], "most_popular": false}, "overview": "# Docker Engine\n\nPlugin: go.d.plugin\nModule: docker_engine\n\n## Overview\n\nThis collector monitors the activity and health of Docker Engine and Docker Swarm.\n\n\nThe [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/docker_engine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/docker_engine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9323/metrics | yes |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nConfiguration with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9323/metrics\n\n - name: remote\n url: http://192.0.2.1:9323/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m docker_engine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Engine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |\n| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |\n| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |\n| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |\n| docker_engine.swarm_manager_leader | is_leader | bool |\n| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |\n| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |\n| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |\n\n", "integration_type": "collector", "id": "go.d.plugin-docker_engine-Docker_Engine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-dockerhub", "plugin_name": "go.d.plugin", "module_name": "dockerhub", "monitored_instance": {"name": "Docker Hub repository", "link": "https://hub.docker.com/", "icon_filename": "docker.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["dockerhub"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Docker Hub repository\n\nPlugin: go.d.plugin\nModule: dockerhub\n\n## Overview\n\nThis collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/dockerhub.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/dockerhub.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |\n| repositories | List of repositories to monitor. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: dockerhub\n repositories:\n - 'user1/name1'\n - 'user2/name2'\n - 'user3/name3'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m dockerhub\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Docker Hub repository instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dockerhub.pulls_sum | sum | pulls |\n| dockerhub.pulls | a dimension per repository | pulls |\n| dockerhub.pulls_rate | a dimension per repository | pulls/s |\n| dockerhub.stars | a dimension per repository | stars |\n| dockerhub.status | a dimension per repository | status |\n| dockerhub.last_updated | a dimension per repository | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-dockerhub-Docker_Hub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/dockerhub/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-elasticsearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elasticsearch", "link": "https://www.elastic.co/elasticsearch/", "icon_filename": "elasticsearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Elasticsearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-Elasticsearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-opensearch", "module_name": "elasticsearch", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenSearch", "link": "https://opensearch.org/", "icon_filename": "opensearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["elastic", "elasticsearch", "opensearch", "search engine"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# OpenSearch\n\nPlugin: go.d.plugin\nModule: elasticsearch\n\n## Overview\n\nThis collector monitors the performance and health of the Elasticsearch cluster.\n\n\nIt uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.\n\nUsed endpoints:\n\n| Endpoint | Description | API |\n|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|\n| `/` | Node info | |\n| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |\n| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |\n| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect to port 9200:\n\n- http://127.0.0.1:9200\n- https://127.0.0.1:9200\n\n\n#### Limits\n\nBy default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/elasticsearch.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/elasticsearch.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9200 | yes |\n| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |\n| collect_node_stats | Controls whether to collect nodes metrics. | true | no |\n| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |\n| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |\n| collect_indices_stats | Controls whether to collect indices metrics. | false | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic single node mode\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n```\n##### Cluster mode\n\nCluster mode example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n cluster_mode: yes\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nElasticsearch with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9200\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9200\n\n - name: remote\n url: http://192.0.2.1:9200\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m elasticsearch\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |\n| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |\n| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |\n| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |\n| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the cluster node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |\n| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_indices_indexing | index | operations/s |\n| elasticsearch.node_indices_indexing_current | index | operations |\n| elasticsearch.node_indices_indexing_time | index | milliseconds |\n| elasticsearch.node_indices_search | queries, fetches | operations/s |\n| elasticsearch.node_indices_search_current | queries, fetches | operations |\n| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |\n| elasticsearch.node_indices_refresh | refresh | operations/s |\n| elasticsearch.node_indices_refresh_time | refresh | milliseconds |\n| elasticsearch.node_indices_flush | flush | operations/s |\n| elasticsearch.node_indices_flush_time | flush | milliseconds |\n| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |\n| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |\n| elasticsearch.node_indices_segments_count | segments | segments |\n| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |\n| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |\n| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |\n| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |\n| elasticsearch.node_file_descriptors | open | fd |\n| elasticsearch.node_jvm_heap | inuse | percentage |\n| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |\n| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |\n| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |\n| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |\n| elasticsearch.node_jvm_gc_count | young, old | gc/s |\n| elasticsearch.node_jvm_gc_time | young, old | milliseconds |\n| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |\n| elasticsearch.node_cluster_communication_packets | received, sent | pps |\n| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |\n| elasticsearch.node_http_connections | open | connections |\n| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |\n\n### Per cluster\n\nThese metrics refer to the cluster.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.cluster_health_status | green, yellow, red | status |\n| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |\n| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |\n| elasticsearch.cluster_pending_tasks | pending | tasks |\n| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |\n| elasticsearch.cluster_indices_count | indices | indices |\n| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |\n| elasticsearch.cluster_indices_docs_count | docs | docs |\n| elasticsearch.cluster_indices_store_size | size | bytes |\n| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |\n| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |\n\n### Per index\n\nThese metrics refer to the index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |\n| index | Name of the index. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| elasticsearch.node_index_health | green, yellow, red | status |\n| elasticsearch.node_index_shards_count | shards | shards |\n| elasticsearch.node_index_docs_count | docs | docs |\n| elasticsearch.node_index_store_size | store_size | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-elasticsearch-OpenSearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-envoy", "plugin_name": "go.d.plugin", "module_name": "envoy", "monitored_instance": {"name": "Envoy", "link": "https://www.envoyproxy.io/", "icon_filename": "envoy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["envoy", "proxy"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Envoy\n\nPlugin: go.d.plugin\nModule: envoy\n\n## Overview\n\nThis collector monitors Envoy proxies. It collects server, cluster, and listener metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Envoy instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/envoy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/envoy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9901/stats/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9901/stats/prometheus\n\n - name: remote\n url: http://192.0.2.1:9901/stats/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m envoy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Envoy instance\n\nEnvoy exposes metrics in Prometheus format. All metric labels are added to charts.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| envoy.server_state | live, draining, pre_initializing, initializing | state |\n| envoy.server_connections_count | connections | connections |\n| envoy.server_parent_connections_count | connections | connections |\n| envoy.server_memory_allocated_size | allocated | bytes |\n| envoy.server_memory_heap_size | heap | bytes |\n| envoy.server_memory_physical_size | physical | bytes |\n| envoy.server_uptime | uptime | seconds |\n| envoy.cluster_manager_cluster_count | active, not_active | clusters |\n| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |\n| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |\n| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |\n| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |\n| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |\n| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |\n| envoy.cluster_membership_changes_rate | membership | changes/s |\n| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |\n| envoy.cluster_upstream_cx_active_count | active | connections |\n| envoy.cluster_upstream_cx_rate | created | connections/s |\n| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |\n| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |\n| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |\n| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |\n| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |\n| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |\n| envoy.cluster_upstream_rq_active_count | active | requests |\n| envoy.cluster_upstream_rq_rate | requests | requests/s |\n| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |\n| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |\n| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |\n| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |\n| envoy.cluster_upstream_rq_retry_rate | request | retries/s |\n| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |\n| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |\n| envoy.listener_manager_listeners_count | active, warming, draining | listeners |\n| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |\n| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |\n| envoy.listener_admin_downstream_cx_active_count | active | connections |\n| envoy.listener_admin_downstream_cx_rate | created | connections/s |\n| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n| envoy.listener_downstream_cx_active_count | active | connections |\n| envoy.listener_downstream_cx_rate | created | connections/s |\n| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |\n| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |\n| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |\n| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |\n| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |\n| envoy.listener_downstream_pre_cx_active_count | active | sockets |\n| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-envoy-Envoy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/envoy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-filecheck", "plugin_name": "go.d.plugin", "module_name": "filecheck", "monitored_instance": {"name": "Files and directories", "link": "", "icon_filename": "filesystem.svg", "categories": ["data-collection.linux-systems"]}, "keywords": ["files", "directories"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Files and directories\n\nPlugin: go.d.plugin\nModule: filecheck\n\n## Overview\n\nThis collector monitors files and directories.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/filecheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/filecheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| files | List of files to monitor. | | yes |\n| dirs | List of directories to monitor. | | yes |\n| discovery_every | Files and directories discovery interval. | 60 | no |\n\n##### files\n\nFiles matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nfiles:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n##### dirs\n\nDirectories matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\ndirs:\n includes:\n - pattern1\n - pattern2\n excludes:\n - pattern3\n - pattern4\n```\n\n\n#### Examples\n\n##### Files\n\nFiles monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n files:\n include:\n - '/path/to/file1'\n - '/path/to/file2'\n - '/path/to/*.log'\n\n```\n##### Directories\n\nDirectories monitoring example configuration.\n\n```yaml\njobs:\n - name: files_example\n dirs:\n collect_dir_size: no\n include:\n - '/path/to/dir1'\n - '/path/to/dir2'\n - '/path/to/dir3*'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m filecheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Files and directories instance\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| filecheck.file_existence | a dimension per file | boolean |\n| filecheck.file_mtime_ago | a dimension per file | seconds |\n| filecheck.file_size | a dimension per file | bytes |\n| filecheck.dir_existence | a dimension per directory | boolean |\n| filecheck.dir_mtime_ago | a dimension per directory | seconds |\n| filecheck.dir_num_of_files | a dimension per directory | files |\n| filecheck.dir_size | a dimension per directory | bytes |\n\n", "integration_type": "collector", "id": "go.d.plugin-filecheck-Files_and_directories", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/filecheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-fluentd", "plugin_name": "go.d.plugin", "module_name": "fluentd", "monitored_instance": {"name": "Fluentd", "link": "https://www.fluentd.org/", "icon_filename": "fluentd.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["fluentd", "logging"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Fluentd\n\nPlugin: go.d.plugin\nModule: fluentd\n\n## Overview\n\nThis collector monitors Fluentd servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable monitor agent\n\nTo enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/fluentd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/fluentd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:24220 | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nFluentd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:24220\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:24220\n\n - name: remote\n url: http://192.0.2.1:24220\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m fluentd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fluentd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fluentd.retry_count | a dimension per plugin | count |\n| fluentd.buffer_queue_length | a dimension per plugin | queue_length |\n| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |\n\n", "integration_type": "collector", "id": "go.d.plugin-fluentd-Fluentd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/fluentd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-freeradius", "plugin_name": "go.d.plugin", "module_name": "freeradius", "monitored_instance": {"name": "FreeRADIUS", "link": "https://freeradius.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "freeradius.svg"}, "keywords": ["freeradius", "radius"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# FreeRADIUS\n\nPlugin: go.d.plugin\nModule: freeradius\n\n## Overview\n\nThis collector monitors FreeRADIUS servers.\n\nIt collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects FreeRadius instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status server\n\nTo enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/freeradius.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/freeradius.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. | 127.0.0.1 | yes |\n| port | Server port. | 18121 | no |\n| secret | FreeRADIUS secret. | adminsecret | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1\n port: 18121\n secert: adminsecret\n\n - name: remote\n address: 192.0.2.1\n port: 18121\n secert: adminsecret\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m freeradius\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per FreeRADIUS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| freeradius.authentication | requests, responses | packets/s |\n| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_authentication | requests, responses | packets/s |\n| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |\n| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.accounting | requests, responses | packets/s |\n| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n| freeradius.proxy_accounting | requests, responses | packets/s |\n| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-freeradius-FreeRADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/freeradius/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-geth", "plugin_name": "go.d.plugin", "module_name": "geth", "monitored_instance": {"name": "Go-ethereum", "link": "https://github.com/ethereum/go-ethereum", "icon_filename": "geth.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": ["geth", "ethereum", "blockchain"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Go-ethereum\n\nPlugin: go.d.plugin\nModule: geth\n\n## Overview\n\nThis collector monitors Go-ethereum instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Go-ethereum instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/geth.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/geth.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:6060/debug/metrics/prometheus\n\n - name: remote\n url: http://192.0.2.1:6060/debug/metrics/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m geth\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go-ethereum instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |\n| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |\n| geth.eth_db_chaindata_disk_io | reads, writes | bytes |\n| geth.goroutines | goroutines | goroutines |\n| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |\n| geth.chaindata_db_size | level_db, ancient_db | bytes |\n| geth.chainhead | block, receipt, header | block |\n| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |\n| geth.tx_pool_current | invalid, pending, local, pool | transactions |\n| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |\n| geth.p2p_bandwidth | ingress, egress | bytes/s |\n| geth.reorgs | executed | reorgs |\n| geth.reorgs_blocks | added, dropped | blocks |\n| geth.p2p_peers | peers | peers |\n| geth.p2p_peers_calls | dials, serves | calls/s |\n| geth.rpc_calls | failed, successful | calls/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-geth-Go-ethereum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/geth/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-haproxy", "plugin_name": "go.d.plugin", "module_name": "haproxy", "monitored_instance": {"name": "HAProxy", "link": "https://www.haproxy.org/", "icon_filename": "haproxy.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["haproxy", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# HAProxy\n\nPlugin: go.d.plugin\nModule: haproxy\n\n## Overview\n\nThis collector monitors HAProxy servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable PROMEX addon.\n\nTo enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/haproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/haproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8404/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8404/metrics\n\n - name: remote\n url: http://192.0.2.1:8404/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m haproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HAProxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_current_sessions | a dimension per proxy | sessions |\n| haproxy.backend_sessions | a dimension per proxy | sessions/s |\n| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |\n| haproxy.backend_current_queue | a dimension per proxy | requests |\n\n### Per proxy\n\nThese metrics refer to the Proxy.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |\n| haproxy.backend_network_io | in, out | bytes/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-haproxy-HAProxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/haproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-hfs", "plugin_name": "go.d.plugin", "module_name": "hfs", "monitored_instance": {"name": "Hadoop Distributed File System (HDFS)", "link": "https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html", "icon_filename": "hadoop.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["hdfs", "hadoop"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Hadoop Distributed File System (HDFS)\n\nPlugin: go.d.plugin\nModule: hfs\n\n## Overview\n\nThis collector monitors HDFS nodes.\n\nNetdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/hdfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/hdfs.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9870/jmx | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9870/jmx\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9870/jmx\n\n - name: remote\n url: http://192.0.2.1:9870/jmx\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m hfs\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |\n| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |\n| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |\n| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |\n| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Hadoop Distributed File System (HDFS) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | DataNode | NameNode |\n|:------|:----------|:----|:---:|:---:|\n| hdfs.heap_memory | committed, used | MiB | \u2022 | \u2022 |\n| hdfs.gc_count_total | gc | events/s | \u2022 | \u2022 |\n| hdfs.gc_time_total | ms | ms | \u2022 | \u2022 |\n| hdfs.gc_threshold | info, warn | events/s | \u2022 | \u2022 |\n| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | \u2022 | \u2022 |\n| hdfs.logs_total | info, error, warn, fatal | logs/s | \u2022 | \u2022 |\n| hdfs.rpc_bandwidth | received, sent | kilobits/s | \u2022 | \u2022 |\n| hdfs.rpc_calls | calls | calls/s | \u2022 | \u2022 |\n| hdfs.open_connections | open | connections | \u2022 | \u2022 |\n| hdfs.call_queue_length | length | num | \u2022 | \u2022 |\n| hdfs.avg_queue_time | time | ms | \u2022 | \u2022 |\n| hdfs.avg_processing_time | time | ms | \u2022 | \u2022 |\n| hdfs.capacity | remaining, used | KiB | | \u2022 |\n| hdfs.used_capacity | dfs, non_dfs | KiB | | \u2022 |\n| hdfs.load | load | load | | \u2022 |\n| hdfs.volume_failures_total | failures | events/s | | \u2022 |\n| hdfs.files_total | files | num | | \u2022 |\n| hdfs.blocks_total | blocks | num | | \u2022 |\n| hdfs.blocks | corrupt, missing, under_replicated | num | | \u2022 |\n| hdfs.data_nodes | live, dead, stale | num | | \u2022 |\n| hdfs.datanode_capacity | remaining, used | KiB | \u2022 | |\n| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | \u2022 | |\n| hdfs.datanode_failed_volumes | failed volumes | num | \u2022 | |\n| hdfs.datanode_bandwidth | reads, writes | KiB/s | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-hfs-Hadoop_Distributed_File_System_(HDFS)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-httpcheck", "plugin_name": "go.d.plugin", "module_name": "httpcheck", "monitored_instance": {"name": "HTTP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# HTTP Endpoints\n\nPlugin: go.d.plugin\nModule: httpcheck\n\n## Overview\n\nThis collector monitors HTTP servers availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/httpcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/httpcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |\n| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |\n| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |\n| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |\n| headers_match.key | The exact name of the HTTP header to check for. | | yes |\n| headers_match.value | The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header. | | no |\n| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n```\n##### With HTTP request headers\n\nConfiguration with HTTP request headers that will be sent by the client.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n headers:\n Host: localhost:8080\n User-Agent: netdata/go.d.plugin\n Accept: */*\n\n```\n##### With `status_accepted`\n\nA basic example configuration with non-default status_accepted.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n status_accepted:\n - 200\n - 204\n\n```\n##### With `header_match`\n\nExample configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax.\n\n```yaml\njobs:\n # The \"X-Robots-Tag\" header must be present in the HTTP response header,\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n\n # The \"X-Robots-Tag\" header must be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n value: '= noindex,nofollow'\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # but the value of the header does not matter.\n # This config checks for the presence of the header regardless of its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n\n # The \"X-Robots-Tag\" header must not be present in the HTTP response header\n # only if its value is equal to \"noindex, nofollow\".\n # This config checks both the presence of the header and its value.\n - name: local\n url: http://127.0.0.1:8080\n header_match:\n - key: X-Robots-Tag\n exclude: yes\n value: '= noindex,nofollow'\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080\n\n - name: remote\n url: http://192.0.2.1:8080\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m httpcheck\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per target\n\nThe metrics refer to the monitored target.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| url | url value that is set in the configuration file. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| httpcheck.response_time | time | ms |\n| httpcheck.response_length | length | characters |\n| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |\n| httpcheck.in_state | time | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-httpcheck-HTTP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-isc_dhcpd", "plugin_name": "go.d.plugin", "module_name": "isc_dhcpd", "monitored_instance": {"name": "ISC DHCP", "link": "https://www.isc.org/dhcp/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "keywords": ["dhcpd", "dhcp"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# ISC DHCP\n\nPlugin: go.d.plugin\nModule: isc_dhcpd\n\n## Overview\n\nThis collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/isc_dhcpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/isc_dhcpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |\n| pools | List of IP pools to monitor. | | yes |\n\n##### pools\n\nList of IP pools to monitor.\n\n- IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats).\n- Syntax:\n\n```yaml\npools:\n - name: \"POOL_NAME1\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n - name: \"POOL_NAME2\"\n networks: \"SPACE SEPARATED LIST OF IP RANGES\"\n```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n pools:\n - name: lan\n networks: \"192.168.0.0/24 192.168.1.0/24 192.168.2.0/24\"\n - name: wifi\n networks: \"10.0.0.0/24\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m isc_dhcpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC DHCP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| isc_dhcpd.active_leases_total | active | leases |\n| isc_dhcpd.pool_active_leases | a dimension per DHCP pool | leases |\n| isc_dhcpd.pool_utilization | a dimension per DHCP pool | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-isc_dhcpd-ISC_DHCP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/isc_dhcpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubelet", "plugin_name": "go.d.plugin", "module_name": "k8s_kubelet", "monitored_instance": {"name": "Kubelet", "link": "https://kubernetes.io/docs/concepts/overview/components/#kubelet", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubelet", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubelet\n\nPlugin: go.d.plugin\nModule: k8s_kubelet\n\n## Overview\n\nThis collector monitors Kubelet instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubelet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubelet.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10255/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10255/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10250/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubelet\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubelet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | observes/s |\n| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_\u00b5s, 10_\u00b5s, 20_\u00b5s, 40_\u00b5s, 80_\u00b5s, 160_\u00b5s, 320_\u00b5s, 640_\u00b5s, 1280_\u00b5s, 2560_\u00b5s, 5120_\u00b5s, 10240_\u00b5s, 20480_\u00b5s, 40960_\u00b5s, +Inf | percentage |\n| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |\n| k8s_kubelet.kubelet_containers_running | total | running_containers |\n| k8s_kubelet.kubelet_pods_running | total | running_pods |\n| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |\n| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |\n| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |\n| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |\n| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |\n| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |\n| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n\n### Per volume manager\n\nThese metrics refer to the Volume Manager.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubelet-Kubelet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_kubeproxy", "plugin_name": "go.d.plugin", "module_name": "k8s_kubeproxy", "monitored_instance": {"name": "Kubeproxy", "link": "https://kubernetes.io/docs/concepts/overview/components/#kube-proxy", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubeproxy", "kubernetes", "k8s"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubeproxy\n\nPlugin: go.d.plugin\nModule: k8s_kubeproxy\n\n## Overview\n\nThis collector monitors Kubeproxy instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_kubeproxy.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:10249/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:10249/metrics\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:10249/metrics\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_kubeproxy\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kubeproxy instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |\n| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |\n| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |\n| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |\n| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_kubeproxy-Kubeproxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-k8s_state", "plugin_name": "go.d.plugin", "module_name": "k8s_state", "monitored_instance": {"name": "Kubernetes Cluster State", "link": "https://kubernetes.io/", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["kubernetes", "k8s"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Kubernetes Cluster State\n\nPlugin: go.d.plugin\nModule: k8s_state\n\n## Overview\n\nThis collector monitors Kubernetes Nodes, Pods and Containers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/k8s_state.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/k8s_state.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m k8s_state\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per node\n\nThese metrics refer to the Node.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |\n| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |\n| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |\n| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |\n| k8s_state.node_allocatable_mem_requests_utilization | requests | % |\n| k8s_state.node_allocatable_mem_requests_used | requests | bytes |\n| k8s_state.node_allocatable_mem_limits_utilization | limits | % |\n| k8s_state.node_allocatable_mem_limits_used | limits | bytes |\n| k8s_state.node_allocatable_pods_utilization | allocated | % |\n| k8s_state.node_allocatable_pods_usage | available, allocated | pods |\n| k8s_state.node_condition | a dimension per condition | status |\n| k8s_state.node_schedulability | schedulable, unschedulable | state |\n| k8s_state.node_pods_readiness | ready | % |\n| k8s_state.node_pods_readiness_state | ready, unready | pods |\n| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |\n| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |\n| k8s_state.node_containers | containers, init_containers | containers |\n| k8s_state.node_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_init_containers_state | running, waiting, terminated | containers |\n| k8s_state.node_age | age | seconds |\n\n### Per pod\n\nThese metrics refer to the Pod.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_cpu_requests_used | requests | millicpu |\n| k8s_state.pod_cpu_limits_used | limits | millicpu |\n| k8s_state.pod_mem_requests_used | requests | bytes |\n| k8s_state.pod_mem_limits_used | limits | bytes |\n| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |\n| k8s_state.pod_phase | running, failed, succeeded, pending | state |\n| k8s_state.pod_age | age | seconds |\n| k8s_state.pod_containers | containers, init_containers | containers |\n| k8s_state.pod_containers_state | running, waiting, terminated | containers |\n| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |\n\n### Per container\n\nThese metrics refer to the Pod container.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |\n| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |\n| k8s_node_name | Node name. |\n| k8s_namespace | Namespace. |\n| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |\n| k8s_controller_name | Controller name. |\n| k8s_pod_name | Pod name. |\n| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |\n| k8s_container_name | Container name. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| k8s_state.pod_container_readiness_state | ready | state |\n| k8s_state.pod_container_restarts | restarts | restarts |\n| k8s_state.pod_container_state | running, waiting, terminated | state |\n| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |\n| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-k8s_state-Kubernetes_Cluster_State", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_state/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-lighttpd", "plugin_name": "go.d.plugin", "module_name": "lighttpd", "monitored_instance": {"name": "Lighttpd", "link": "https://www.lighttpd.net/", "icon_filename": "lighttpd.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Lighttpd\n\nPlugin: go.d.plugin\nModule: lighttpd\n\n## Overview\n\nThis collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.\n\n\nIt sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), \nwhich is a built-in location that provides metrics about the Lighttpd server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Lighttpd instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://localhost/server-status?auto\n- http://127.0.0.1/server-status?auto\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable Lighttpd status support\n\nTo enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/lighttpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/lighttpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/server-status?auto | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nLighttpd with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/server-status?auto\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n\n - name: remote\n url: http://192.0.2.1/server-status?auto\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m lighttpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Lighttpd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| lighttpd.requests | requests | requests/s |\n| lighttpd.net | sent | kilobits/s |\n| lighttpd.workers | idle, busy | servers |\n| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |\n| lighttpd.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-lighttpd-Lighttpd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/lighttpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logind", "plugin_name": "go.d.plugin", "module_name": "logind", "monitored_instance": {"name": "systemd-logind users", "link": "https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html", "icon_filename": "users.svg", "categories": ["data-collection.systemd"]}, "keywords": ["logind", "systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# systemd-logind users\n\nPlugin: go.d.plugin\nModule: logind\n\n## Overview\n\nThis collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logind.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logind.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logind\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per systemd-logind users instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logind.sessions | remote, local | sessions |\n| logind.sessions_type | console, graphical, other | sessions |\n| logind.sessions_state | online, closing, active | sessions |\n| logind.users_state | offline, closing, online, lingering, active | users |\n\n", "integration_type": "collector", "id": "go.d.plugin-logind-systemd-logind_users", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logind/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-logstash", "plugin_name": "go.d.plugin", "module_name": "logstash", "monitored_instance": {"name": "Logstash", "link": "https://www.elastic.co/products/logstash", "icon_filename": "elastic-logstash.svg", "categories": ["data-collection.logs-servers"]}, "keywords": ["logstatsh"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Logstash\n\nPlugin: go.d.plugin\nModule: logstash\n\n## Overview\n\nThis collector monitors Logstash instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/logstatsh.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/logstatsh.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:9600 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://localhost:9600\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:9600\n\n - name: remote\n url: http://192.0.2.1:9600\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m logstash\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Logstash instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.jvm_threads | threads | count |\n| logstash.jvm_mem_heap_used | in_use | percentage |\n| logstash.jvm_mem_heap | committed, used | KiB |\n| logstash.jvm_mem_pools_eden | committed, used | KiB |\n| logstash.jvm_mem_pools_survivor | committed, used | KiB |\n| logstash.jvm_mem_pools_old | committed, used | KiB |\n| logstash.jvm_gc_collector_count | eden, old | counts/s |\n| logstash.jvm_gc_collector_time | eden, old | ms |\n| logstash.open_file_descriptors | open | fd |\n| logstash.event | in, filtered, out | events/s |\n| logstash.event_duration | event, queue | seconds |\n| logstash.uptime | uptime | seconds |\n\n### Per pipeline\n\nThese metrics refer to the pipeline.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pipeline | pipeline name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| logstash.pipeline_event | in, filtered, out | events/s |\n| logstash.pipeline_event | event, queue | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-logstash-Logstash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/logstash/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mongodb", "plugin_name": "go.d.plugin", "module_name": "mongodb", "monitored_instance": {"name": "MongoDB", "link": "https://www.mongodb.com/", "icon_filename": "mongodb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["mongodb", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# MongoDB\n\nPlugin: go.d.plugin\nModule: mongodb\n\n## Overview\n\nThis collector monitors MongoDB servers.\n\nExecuted queries:\n\n- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)\n- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)\n- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only user\n\nCreate a read-only user for Netdata in the admin database.\n\n- Authenticate as the admin user:\n\n ```bash\n use admin\n db.auth(\"admin\", \"\")\n ```\n\n- Create a user:\n\n ```bash\n db.createUser({\n \"user\":\"netdata\",\n \"pwd\": \"\",\n \"roles\" : [\n {role: 'read', db: 'admin' },\n {role: 'clusterMonitor', db: 'admin'},\n {role: 'read', db: 'local' }\n ]\n })\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mongodb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mongodb.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| databases | Databases selector. Determines which database metrics will be collected. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n```\n##### With databases metrics\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n databases:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n uri: mongodb://netdata:password@localhost:27017\n\n - name: remote\n uri: mongodb://netdata:password@203.0.113.0:27017\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mongodb\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the\n storage engine.\n- Sharding metrics are available on shards only\n for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).\n\n\n### Per MongoDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.operations_rate | reads, writes, commands | operations/s |\n| mongodb.operations_latency_time | reads, writes, commands | milliseconds |\n| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |\n| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |\n| mongodb.scanned_indexes_rate | scanned | indexes/s |\n| mongodb.scanned_documents_rate | scanned | documents/s |\n| mongodb.active_clients_count | readers, writers | clients |\n| mongodb.queued_operations_count | reads, writes | operations |\n| mongodb.cursors_open_count | open | cursors |\n| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |\n| mongodb.cursors_opened_rate | opened | cursors/s |\n| mongodb.cursors_timed_out_rate | timed_out | cursors/s |\n| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |\n| mongodb.transactions_count | active, inactive, open, prepared | transactions |\n| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |\n| mongodb.connections_usage | available, used | connections |\n| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |\n| mongodb.connections_rate | created | connections/s |\n| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |\n| mongodb.network_traffic_rate | in, out | bytes/s |\n| mongodb.network_requests_rate | requests | requests/s |\n| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |\n| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |\n| mongodb.memory_resident_size | used | bytes |\n| mongodb.memory_virtual_size | used | bytes |\n| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |\n| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |\n| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |\n| mongodb.wiredtiger_cache_usage | used | bytes |\n| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |\n| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |\n| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |\n| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |\n| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |\n| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |\n\n### Per lock type\n\nThese metrics refer to the lock type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| lock_type | lock type (e.g. global, database, collection, mutex) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |\n\n### Per commit type\n\nThese metrics refer to the commit type.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.transactions_commits_rate | success, fail | commits/s |\n| mongodb.transactions_commits_duration_time | commits | milliseconds |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.database_collection_count | collections | collections |\n| mongodb.database_indexes_count | indexes | indexes |\n| mongodb.database_views_count | views | views |\n| mongodb.database_documents_count | documents | documents |\n| mongodb.database_data_size | data_size | bytes |\n| mongodb.database_storage_size | storage_size | bytes |\n| mongodb.database_index_size | index_size | bytes |\n\n### Per replica set member\n\nThese metrics refer to the replica set member.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| repl_set_member | replica set member name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |\n| mongodb.repl_set_member_health_status | up, down | status |\n| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |\n| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |\n| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |\n| mongodb.repl_set_member_uptime | uptime | seconds |\n\n### Per shard\n\nThese metrics refer to the shard.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| shard_id | shard id |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mongodb.sharding_shard_chunks_count | chunks | chunks |\n\n", "integration_type": "collector", "id": "go.d.plugin-mongodb-MongoDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mongodb/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mariadb", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MariaDB", "link": "https://mariadb.org/", "icon_filename": "mariadb.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MariaDB\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MariaDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "MySQL", "link": "https://www.mysql.com/", "categories": ["data-collection.database-servers"], "icon_filename": "mysql.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": true}, "overview": "# MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-percona_mysql", "plugin_name": "go.d.plugin", "module_name": "mysql", "monitored_instance": {"name": "Percona MySQL", "link": "https://www.percona.com/software/mysql-database/percona-server", "icon_filename": "percona.svg", "categories": ["data-collection.database-servers"]}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "mysql", "maria", "mariadb", "sql"], "most_popular": false}, "overview": "# Percona MySQL\n\nPlugin: go.d.plugin\nModule: mysql\n\n## Overview\n\nThis collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.\n\n\nIt connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:\n\nExecuted queries:\n\n- `SELECT VERSION();`\n- `SHOW GLOBAL STATUS;`\n- `SHOW GLOBAL VARIABLES;`\n- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)\n- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)\n- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets:\n\n- /var/run/mysqld/mysqld.sock\n- /var/run/mysqld/mysql.sock\n- /var/lib/mysql/mysql.sock\n- /tmp/mysql.sock\n- 127.0.0.1:3306\n- \"[::1]:3306\"\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nA user account should have the\nfollowing [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):\n\n- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)\n- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)\n- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)\n\nTo create the `netdata` user with these permissions, execute the following in the MySQL shell:\n\n```mysql\nCREATE USER 'netdata'@'localhost';\nGRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';\nFLUSH PRIVILEGES;\n```\n\nThe `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only\nbe able to gather statistics without being able to alter or affect operations in any way.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/mysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/mysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |\n| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netdata@unix(/var/lib/mysql/mysql.sock)/\n\n```\n##### Connection with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: netconfig:password@tcp(127.0.0.1:3306)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: netdata@tcp(127.0.0.1:3306)/\n\n - name: remote\n dsn: netconfig:password@tcp(203.0.113.0:3306)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m mysql\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |\n| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |\n| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |\n| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |\n| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |\n| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |\n| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |\n| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |\n| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |\n| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |\n| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |\n| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MariaDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.net | in, out | kilobits/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries | queries, questions, slow_queries | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.queries_type | select, delete, update, insert, replace | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_open_cache_overflows | open_cache | overflows/s | \u2022 | \u2022 | \u2022 |\n| mysql.table_locks | immediate, waited | locks/s | \u2022 | \u2022 | \u2022 |\n| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | \u2022 | \u2022 | \u2022 |\n| mysql.sort_issues | merge_passes, range, scan | issues/s | \u2022 | \u2022 | \u2022 |\n| mysql.tmp | disk_tables, files, tables | events/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections | all, aborted | connections/s | \u2022 | \u2022 | \u2022 |\n| mysql.connections_active | active, limit, max_active | connections | \u2022 | \u2022 | \u2022 |\n| mysql.threads | connected, cached, running | threads | \u2022 | \u2022 | \u2022 |\n| mysql.threads_created | created | threads/s | \u2022 | \u2022 | \u2022 |\n| mysql.thread_cache_misses | misses | misses | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io | read, write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_log | waits, write_requests, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_cur_row_lock | current waits | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log | fsyncs, writes | operations | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_os_log_io | write | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.innodb_deadlocks | deadlocks | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.files | files | files | \u2022 | \u2022 | \u2022 |\n| mysql.files_rate | files | files/s | \u2022 | \u2022 | \u2022 |\n| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | \u2022 | \u2022 | \u2022 |\n| mysql.opened_tables | tables | tables/s | \u2022 | \u2022 | \u2022 |\n| mysql.open_tables | cache, tables | tables | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_fetch_query_duration | duration | milliseconds | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_queries_count | system, user | queries | \u2022 | \u2022 | \u2022 |\n| mysql.process_list_longest_query_duration | duration | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | \u2022 | \u2022 | \u2022 |\n| mysql.qcache | queries | queries | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_freemem | free | MiB | \u2022 | \u2022 | \u2022 |\n| mysql.qcache_memblocks | free, total | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.galera_writesets | rx, tx | writesets/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_bytes | rx, tx | KiB/s | \u2022 | \u2022 | \u2022 |\n| mysql.galera_queue | rx, tx | writesets | \u2022 | \u2022 | \u2022 |\n| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_flow_control | paused | ms | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_status | primary, non_primary, disconnected | status | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_size | nodes | nodes | \u2022 | \u2022 | \u2022 |\n| mysql.galera_cluster_weight | weight | weight | \u2022 | \u2022 | \u2022 |\n| mysql.galera_connected | connected | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_ready | ready | boolean | \u2022 | \u2022 | \u2022 |\n| mysql.galera_open_transactions | open | transactions | \u2022 | \u2022 | \u2022 |\n| mysql.galera_thread_count | threads | threads | \u2022 | \u2022 | \u2022 |\n| mysql.key_blocks | unused, used, not_flushed | blocks | \u2022 | \u2022 | \u2022 |\n| mysql.key_requests | reads, writes | requests/s | \u2022 | \u2022 | \u2022 |\n| mysql.key_disk_ops | reads, writes | operations/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_cache | disk, all | transactions/s | \u2022 | \u2022 | \u2022 |\n| mysql.binlog_stmt_cache | disk, all | statements/s | \u2022 | \u2022 | \u2022 |\n\n### Per connection\n\nThese metrics refer to the replication connection.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.slave_behind | seconds | seconds | \u2022 | \u2022 | \u2022 |\n| mysql.slave_status | sql_running, io_running | boolean | \u2022 | \u2022 | \u2022 |\n\n### Per user\n\nThese metrics refer to the MySQL user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username |\n\nMetrics:\n\n| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |\n|:------|:----------|:----|:---:|:---:|:---:|\n| mysql.userstats_cpu | used | percentage | | \u2022 | \u2022 |\n| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | \u2022 | \u2022 |\n| mysql.userstats_commands | select, update, other | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_commands | denied | commands/s | | \u2022 | \u2022 |\n| mysql.userstats_created_transactions | commit, rollback | transactions/s | | \u2022 | \u2022 |\n| mysql.userstats_binlog_written | written | B/s | | \u2022 | \u2022 |\n| mysql.userstats_empty_queries | empty | queries/s | | \u2022 | \u2022 |\n| mysql.userstats_connections | created | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_lost_connections | lost | connections/s | | \u2022 | \u2022 |\n| mysql.userstats_denied_connections | denied | connections/s | | \u2022 | \u2022 |\n\n", "integration_type": "collector", "id": "go.d.plugin-mysql-Percona_MySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginx", "plugin_name": "go.d.plugin", "module_name": "nginx", "monitored_instance": {"name": "NGINX", "link": "https://www.nginx.com/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "nginx.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "go.d.plugin", "module_name": "web_log"}, {"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nginx", "web", "webserver", "http", "proxy"], "most_popular": true}, "overview": "# NGINX\n\nPlugin: go.d.plugin\nModule: nginx\n\n## Overview\n\nThis collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.\n\n\nIt sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost that are listening on port 80.\nOn startup, it tries to collect metrics from:\n\n- http://127.0.0.1/basic_status\n- http://localhost/stub_status\n- http://127.0.0.1/stub_status\n- http://127.0.0.1/nginx_status\n- http://127.0.0.1/status\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status support\n\nConfigure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginx.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginx.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/stub_status | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/stub_status\n\n - name: remote\n url: http://192.0.2.1/stub_status\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginx\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginx.connections | active | connections |\n| nginx.connections_status | reading, writing, idle | connections |\n| nginx.connections_accepted_handled | accepted, handled | connections/s |\n| nginx.requests | requests | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginx-NGINX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxplus", "plugin_name": "go.d.plugin", "module_name": "nginxplus", "monitored_instance": {"name": "NGINX Plus", "link": "https://www.nginx.com/products/nginx/", "icon_filename": "nginxplus.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["nginxplus", "nginx", "web", "webserver", "http", "proxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NGINX Plus\n\nPlugin: go.d.plugin\nModule: nginxplus\n\n## Overview\n\nThis collector monitors NGINX Plus servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Config API\n\nTo configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxplus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxplus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nNGINX Plus with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://192.0.2.1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxplus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX Plus instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.client_connections_rate | accepted, dropped | connections/s |\n| nginxplus.client_connections_count | active, idle | connections |\n| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |\n| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |\n| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |\n| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |\n| nginxplus.http_requests_rate | requests | requests/s |\n| nginxplus.http_requests_count | requests | requests |\n| nginxplus.uptime | uptime | seconds |\n\n### Per http server zone\n\nThese metrics refer to the HTTP server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_server_zone | HTTP server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_server_zone_requests_rate | requests | requests/s |\n| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_server_zone_requests_processing_count | processing | requests |\n| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http location zone\n\nThese metrics refer to the HTTP location zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_location_zone | HTTP location zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_location_zone_requests_rate | requests | requests/s |\n| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |\n\n### Per http upstream\n\nThese metrics refer to the HTTP upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_peers_count | peers | peers |\n| nginxplus.http_upstream_zombies_count | zombie | servers |\n| nginxplus.http_upstream_keepalive_count | keepalive | connections |\n\n### Per http upstream server\n\nThese metrics refer to the HTTP upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_upstream_name | HTTP upstream name |\n| http_upstream_zone | HTTP upstream zone name |\n| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |\n| http_upstream_server_name | HTTP upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_upstream_server_requests_rate | requests | requests/s |\n| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxplus.http_upstream_server_response_time | response | milliseconds |\n| nginxplus.http_upstream_server_response_header_time | header | milliseconds |\n| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |\n| nginxplus.http_upstream_server_connections_count | active | connections |\n| nginxplus.http_upstream_server_downtime | downtime | seconds |\n\n### Per http cache\n\nThese metrics refer to the HTTP cache.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| http_cache | HTTP cache name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.http_cache_state | warm, cold | state |\n| nginxplus.http_cache_iops | served, written, bypass | responses/s |\n| nginxplus.http_cache_io | served, written, bypass | bytes/s |\n| nginxplus.http_cache_size | size | bytes |\n\n### Per stream server zone\n\nThese metrics refer to the Stream server zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_server_zone | Stream server zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |\n| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |\n| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_server_zone_connections_processing_count | processing | connections |\n| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |\n\n### Per stream upstream\n\nThese metrics refer to the Stream upstream.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_peers_count | peers | peers |\n| nginxplus.stream_upstream_zombies_count | zombie | servers |\n\n### Per stream upstream server\n\nThese metrics refer to the Stream upstream server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| stream_upstream_name | Stream upstream name |\n| stream_upstream_zone | Stream upstream zone name |\n| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |\n| stream_upstream_server_name | Stream upstream server name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |\n| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |\n| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |\n| nginxplus.stream_upstream_server_downtime | downtime | seconds |\n| nginxplus.stream_upstream_server_connections_count | active | connections |\n\n### Per resolver zone\n\nThese metrics refer to the resolver zone.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| resolver_zone | resolver zone name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |\n| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxplus-NGINX_Plus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxplus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nginxvts", "plugin_name": "go.d.plugin", "module_name": "nginxvts", "monitored_instance": {"name": "NGINX VTS", "link": "https://www.nginx.com/", "icon_filename": "nginx.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["webserver"], "related_resources": {"integrations": {"list": [{"plugin_name": "go.d.plugin", "module_name": "weblog"}, {"plugin_name": "go.d.plugin", "module_name": "httpcheck"}, {"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# NGINX VTS\n\nPlugin: go.d.plugin\nModule: nginxvts\n\n## Overview\n\nThis collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).\n\n\nIt sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), \nwhich is a built-in location that provides metrics about the NGINX VTS server.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects NGINX instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure nginx-vts module\n\nTo configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nginxvts.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nginxvts.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status/format/json | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/server-status?auto\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/status/format/json\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/status/format/json\n\n - name: remote\n url: http://192.0.2.1/status/format/json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nginxvts\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NGINX VTS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nginxvts.requests_total | requests | requests/s |\n| nginxvts.active_connections | active | connections |\n| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |\n| nginxvts.uptime | uptime | seconds |\n| nginxvts.shm_usage | max, used | bytes |\n| nginxvts.shm_used_node | used | nodes |\n| nginxvts.server_requests_total | requests | requests/s |\n| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| nginxvts.server_traffic_total | in, out | bytes/s |\n| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-nginxvts-NGINX_VTS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nginxvts/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ntpd", "plugin_name": "go.d.plugin", "module_name": "ntpd", "monitored_instance": {"name": "NTPd", "link": "https://www.ntp.org/documentation/4.2.8-series/ntpd", "icon_filename": "ntp.png", "categories": ["data-collection.system-clock-and-ntp"]}, "keywords": ["ntpd", "ntp", "time"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NTPd\n\nPlugin: go.d.plugin\nModule: ntpd\n\n## Overview\n\nThis collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ntpd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ntpd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |\n| timeout | Connection/read/write timeout. | 3 | no |\n| collect_peers | Determines whether peer metrics will be collected. | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n```\n##### With peers metrics\n\nCollect peers metrics.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n collect_peers: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:123\n\n - name: remote\n address: 203.0.113.0:123\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ntpd\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NTPd instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.sys_offset | offset | milliseconds |\n| ntpd.sys_jitter | system, clock | milliseconds |\n| ntpd.sys_frequency | frequency | ppm |\n| ntpd.sys_wander | clock | ppm |\n| ntpd.sys_rootdelay | delay | milliseconds |\n| ntpd.sys_rootdisp | dispersion | milliseconds |\n| ntpd.sys_stratum | stratum | stratum |\n| ntpd.sys_tc | current, minimum | log2 |\n| ntpd.sys_precision | precision | log2 |\n\n### Per peer\n\nThese metrics refer to the NTPd peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| peer_address | peer's source IP address |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ntpd.peer_offset | offset | milliseconds |\n| ntpd.peer_delay | delay | milliseconds |\n| ntpd.peer_dispersion | dispersion | milliseconds |\n| ntpd.peer_jitter | jitter | milliseconds |\n| ntpd.peer_xleave | xleave | milliseconds |\n| ntpd.peer_rootdelay | rootdelay | milliseconds |\n| ntpd.peer_rootdisp | dispersion | milliseconds |\n| ntpd.peer_stratum | stratum | stratum |\n| ntpd.peer_hmode | hmode | hmode |\n| ntpd.peer_pmode | pmode | pmode |\n| ntpd.peer_hpoll | hpoll | log2 |\n| ntpd.peer_ppoll | ppoll | log2 |\n| ntpd.peer_precision | precision | log2 |\n\n", "integration_type": "collector", "id": "go.d.plugin-ntpd-NTPd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ntpd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvidia_smi", "plugin_name": "go.d.plugin", "module_name": "nvidia_smi", "monitored_instance": {"name": "Nvidia GPU", "link": "https://www.nvidia.com/en-us/", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["nvidia", "gpu", "hardware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Nvidia GPU\n\nPlugin: go.d.plugin\nModule: nvidia_smi\n\n## Overview\n\nThis collector monitors GPUs performance metrics using\nthe [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.\n\n> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvidia_smi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvidia_smi.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvidia_smi binary. The default is \"nvidia_smi\" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |\n| timeout | nvidia_smi binary execution timeout. | 2 | no |\n| use_csv_format | Used format when requesting GPU information. XML is used if set to 'no'. | yes | no |\n\n#### Examples\n\n##### XML format\n\nUse XML format when requesting GPU information.\n\n```yaml\njobs:\n - name: nvidia_smi\n use_csv_format: no\n\n```\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvidia_smi\n binary_path: /usr/local/sbin/nvidia_smi\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvidia_smi\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | \u2022 | |\n| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | \u2022 | |\n| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_utilization | gpu | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_memory_utilization | memory | % | \u2022 | \u2022 |\n| nvidia_smi.gpu_decoder_utilization | decoder | % | \u2022 | |\n| nvidia_smi.gpu_encoder_utilization | encoder | % | \u2022 | |\n| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | \u2022 |\n| nvidia_smi.gpu_bar1_memory_usage | free, used | B | \u2022 | |\n| nvidia_smi.gpu_temperature | temperature | Celsius | \u2022 | \u2022 |\n| nvidia_smi.gpu_voltage | voltage | V | \u2022 | |\n| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | \u2022 | \u2022 |\n| nvidia_smi.gpu_power_draw | power_draw | Watts | \u2022 | \u2022 |\n| nvidia_smi.gpu_performance_state | P0-P15 | state | \u2022 | \u2022 |\n| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | \u2022 | |\n| nvidia_smi.gpu_mig_devices_count | mig | devices | \u2022 | |\n\n### Per mig\n\nThese metrics refer to the Multi-Instance GPU (MIG).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| uuid | GPU id (e.g. 00000000:00:04.0) |\n| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |\n| gpu_instance_id | GPU instance id (e.g. 1) |\n\nMetrics:\n\n| Metric | Dimensions | Unit | XML | CSV |\n|:------|:----------|:----|:---:|:---:|\n| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | \u2022 | |\n| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | \u2022 | |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvidia_smi-Nvidia_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvidia_smi/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-nvme", "plugin_name": "go.d.plugin", "module_name": "nvme", "monitored_instance": {"name": "NVMe devices", "link": "", "icon_filename": "nvme.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["nvme"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# NVMe devices\n\nPlugin: go.d.plugin\nModule: nvme\n\n## Overview\n\nThis collector monitors the health of NVMe devices using the command line tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and assumes it is set up so that the netdata user can execute `nvme` as root without a password.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install nvme-cli\n\nSee [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.\n\n\n#### Allow netdata to execute nvme\n\nAdd the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary):\n\n```bash\nnetdata ALL=(root) NOPASSWD: /usr/sbin/nvme\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/nvme.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/nvme.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| binary_path | Path to nvme binary. The default is \"nvme\" and the executable is looked for in the directories specified in the PATH environment variable. | nvme | no |\n| timeout | nvme binary execution timeout. | 2 | no |\n\n#### Examples\n\n##### Custom binary path\n\nThe executable is not in the directories specified in the PATH environment variable.\n\n```yaml\njobs:\n - name: nvme\n binary_path: /usr/local/sbin/nvme\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m nvme\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the NVME device.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | NVMe device name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nvme.device_estimated_endurance_perc | used | % |\n| nvme.device_available_spare_perc | spare | % |\n| nvme.device_composite_temperature | temperature | celsius |\n| nvme.device_io_transferred_count | read, written | bytes |\n| nvme.device_power_cycles_count | power | cycles |\n| nvme.device_power_on_time | power-on | seconds |\n| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |\n| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |\n| nvme.device_media_errors_rate | media | errors/s |\n| nvme.device_error_log_entries_rate | error_log | entries/s |\n| nvme.device_warning_composite_temperature_time | wctemp | seconds |\n| nvme.device_critical_composite_temperature_time | cctemp | seconds |\n| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |\n| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |\n| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |\n| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-nvme-NVMe_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/nvme/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn", "plugin_name": "go.d.plugin", "module_name": "openvpn", "monitored_instance": {"name": "OpenVPN", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN\n\nPlugin: go.d.plugin\nModule: openvpn\n\n## Overview\n\nThis collector monitors OpenVPN servers.\n\nIt uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable in go.d.conf.\n\nThis collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf).\n\nFrom the documentation for the OpenVPN Management Interface:\n> Currently, the OpenVPN daemon can at most support a single management client any one time.\n\nIt is disabled to not break other tools which use `Management Interface`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n| connect_timeout | Connection timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n| read_timeout | Read timeout in seconds. Sets deadline for read calls. | 2 | no |\n| write_timeout | Write timeout in seconds. Sets deadline for write calls. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n```\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:7505\n\n - name: remote\n address: 203.0.113.0:7505\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn-OpenVPN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-openvpn_status_log", "plugin_name": "go.d.plugin", "module_name": "openvpn_status_log", "monitored_instance": {"name": "OpenVPN status log", "link": "https://openvpn.net/", "icon_filename": "openvpn.svg", "categories": ["data-collection.vpns"]}, "keywords": ["openvpn", "vpn"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# OpenVPN status log\n\nPlugin: go.d.plugin\nModule: openvpn_status_log\n\n## Overview\n\nThis collector monitors OpenVPN server.\n\nIt parses server log files and provides summary and per user metrics.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/openvpn_status_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/openvpn_status_log.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| log_path | Path to status log. | /var/log/openvpn/status.log | yes |\n| per_user_stats | User selector. Determines which user metrics will be collected. | | no |\n\n#### Examples\n\n##### With user metrics\n\nCollect metrics of all users.\n\n```yaml\njobs:\n - name: local\n per_user_stats:\n includes:\n - \"* *\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m openvpn_status_log\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenVPN status log instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.active_clients | clients | clients |\n| openvpn.total_traffic | in, out | kilobits/s |\n\n### Per user\n\nThese metrics refer to the VPN user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| username | VPN username |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openvpn.user_traffic | in, out | kilobits/s |\n| openvpn.user_connection_time | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-openvpn_status_log-OpenVPN_status_log", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/openvpn_status_log/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pgbouncer", "plugin_name": "go.d.plugin", "module_name": "pgbouncer", "monitored_instance": {"name": "PgBouncer", "link": "https://www.pgbouncer.org/", "icon_filename": "postgres.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pgbouncer"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PgBouncer\n\nPlugin: go.d.plugin\nModule: pgbouncer\n\n## Overview\n\nThis collector monitors PgBouncer servers.\n\nExecuted queries:\n\n- `SHOW VERSION;`\n- `SHOW CONFIG;`\n- `SHOW DATABASES;`\n- `SHOW STATS;`\n- `SHOW POOLS;`\n\nInformation about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with `stats_users` permissions to query your PgBouncer instance.\n\nTo create the `netdata` user:\n\n- Add `netdata` user to the `pgbouncer.ini` file:\n\n ```text\n stats_users = netdata\n ```\n\n- Add a password for the `netdata` user to the `userlist.txt` file:\n\n ```text\n \"netdata\" \"\"\n ```\n\n- To verify the credentials, run the following command\n\n ```bash\n psql -h localhost -U netdata -p 6432 pgbouncer -c \"SHOW VERSION;\" >/dev/null 2>&1 && echo OK || echo FAIL\n ```\n\n When it prompts for a password, enter the password you added to `userlist.txt`.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pgbouncer.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pgbouncer.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'\n\n - name: remote\n dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pgbouncer\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PgBouncer instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.client_connections_utilization | used | percentage |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| postgres_database | Postgres database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |\n| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |\n| pgbouncer.db_server_connections_utilization | used | percentage |\n| pgbouncer.db_clients_wait_time | time | seconds |\n| pgbouncer.db_client_max_wait_time | time | seconds |\n| pgbouncer.db_transactions | transactions | transactions/s |\n| pgbouncer.db_transactions_time | time | seconds |\n| pgbouncer.db_transaction_avg_time | time | seconds |\n| pgbouncer.db_queries | queries | queries/s |\n| pgbouncer.db_queries_time | time | seconds |\n| pgbouncer.db_query_avg_time | time | seconds |\n| pgbouncer.db_network_io | received, sent | B/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-pgbouncer-PgBouncer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pgbouncer/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpdaemon", "plugin_name": "go.d.plugin", "module_name": "phpdaemon", "monitored_instance": {"name": "phpDaemon", "link": "https://github.com/kakserpom/phpdaemon", "icon_filename": "php.svg", "categories": ["data-collection.apm"]}, "keywords": ["phpdaemon", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# phpDaemon\n\nPlugin: go.d.plugin\nModule: phpdaemon\n\n## Overview\n\nThis collector monitors phpDaemon instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable phpDaemon's HTTP server\n\nStatistics expected to be in JSON format.\n\n
\nphpDaemon configuration\n\nInstruction from [@METAJIJI](https://github.com/METAJIJI).\n\nTo enable `phpd` statistics on http, you must enable the http server and write an application.\nApplication is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.\n\n```php\n// /opt/phpdaemon/conf/phpd.conf\n\npath /opt/phpdaemon/conf/AppResolver.php;\nPool:HTTPServer {\n privileged;\n listen '127.0.0.1';\n port 8509;\n}\n```\n\n```php\n// /opt/phpdaemon/conf/AppResolver.php\n\nattrs->server['DOCUMENT_URI'], $m)) {\n return $m[1];\n }\n }\n}\n\nreturn new MyAppResolver;\n```\n\n```php\n/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php\n\nheader('Content-Type: application/javascript; charset=utf-8');\n\n $stat = Daemon::getStateOfWorkers();\n $stat['uptime'] = time() - Daemon::$startTime;\n echo json_encode($stat);\n }\n}\n```\n\n
\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpdaemon.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpdaemon.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n```\n##### HTTP authentication\n\nHTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nHTTPS with self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8509/FullStatus\n\n - name: remote\n url: http://192.0.2.1:8509/FullStatus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpdaemon\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per phpDaemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpdaemon.workers | alive, shutdown | workers |\n| phpdaemon.alive_workers | idle, busy, reloading | workers |\n| phpdaemon.idle_workers | preinit, init, initialized | workers |\n| phpdaemon.uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpdaemon-phpDaemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpdaemon/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-phpfpm", "plugin_name": "go.d.plugin", "module_name": "phpfpm", "monitored_instance": {"name": "PHP-FPM", "link": "https://php-fpm.org/", "icon_filename": "php.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["phpfpm", "php"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PHP-FPM\n\nPlugin: go.d.plugin\nModule: phpfpm\n\n## Overview\n\nThis collector monitors PHP-FPM instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable status page\n\nUncomment the `pm.status_path = /status` variable in the `php-fpm` config file.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/phpfpm.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/phpfpm.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/status?full&json | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### HTTP\n\nCollecting data from a local instance over HTTP.\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n```\n##### Unix socket\n\nCollecting data from a local instance over Unix socket.\n\n```yaml\njobs:\n - name: local\n socket: '/tmp/php-fpm.sock'\n\n```\n##### TCP socket\n\nCollecting data from a local instance over TCP socket.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:9000\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost/status?full&json\n\n - name: remote\n url: http://203.0.113.10/status?full&json\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m phpfpm\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PHP-FPM instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| phpfpm.connections | active, max_active, idle | connections |\n| phpfpm.requests | requests | requests/s |\n| phpfpm.performance | max_children_reached, slow_requests | status |\n| phpfpm.request_duration | min, max, avg | milliseconds |\n| phpfpm.request_cpu | min, max, avg | percentage |\n| phpfpm.request_mem | min, max, avg | KB |\n\n", "integration_type": "collector", "id": "go.d.plugin-phpfpm-PHP-FPM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pihole", "plugin_name": "go.d.plugin", "module_name": "pihole", "monitored_instance": {"name": "Pi-hole", "link": "https://pi-hole.net", "icon_filename": "pihole.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["pihole"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pi-hole\n\nPlugin: go.d.plugin\nModule: pihole\n\n## Overview\n\nThis collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).\n\nThe data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the\nmodule's collection interval.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pihole.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pihole.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1 | yes |\n| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |\n| timeout | HTTP request timeout. | 5 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n```\n##### HTTPS with self-signed certificate\n\nRemote instance with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://203.0.113.11\n tls_skip_verify: yes\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1\n\n - name: remote\n url: http://203.0.113.10\n password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pihole\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |\n| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pi-hole instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pihole.dns_queries_total | queries | queries |\n| pihole.dns_queries | cached, blocked, forwarded | queries |\n| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |\n| pihole.unique_clients | unique | clients |\n| pihole.domains_on_blocklist | blocklist | domains |\n| pihole.blocklist_last_update | ago | seconds |\n| pihole.unwanted_domains_blocking_status | enabled, disabled | status |\n| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |\n| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |\n\n", "integration_type": "collector", "id": "go.d.plugin-pihole-Pi-hole", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pika", "plugin_name": "go.d.plugin", "module_name": "pika", "monitored_instance": {"name": "Pika", "link": "https://github.com/OpenAtomFoundation/pika", "icon_filename": "pika.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["pika", "databases"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Pika\n\nPlugin: go.d.plugin\nModule: pika\n\n## Overview\n\nThis collector monitors Pika servers.\n\nIt collects information and statistics about the server executing the following commands:\n\n- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pika.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pika.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Pika server address. | redis://@localhost:9221 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@localhost:9221'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:9221'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:9221'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pika\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pika instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pika.connections | accepted | connections |\n| pika.clients | connected | clients |\n| pika.memory | used | bytes |\n| pika.connected_replicas | connected | replicas |\n| pika.commands | processed | commands/s |\n| pika.commands_calls | a dimension per command | calls/s |\n| pika.database_strings_keys | a dimension per database | keys |\n| pika.database_strings_expires_keys | a dimension per database | keys |\n| pika.database_strings_invalid_keys | a dimension per database | keys |\n| pika.database_hashes_keys | a dimension per database | keys |\n| pika.database_hashes_expires_keys | a dimension per database | keys |\n| pika.database_hashes_invalid_keys | a dimension per database | keys |\n| pika.database_lists_keys | a dimension per database | keys |\n| pika.database_lists_expires_keys | a dimension per database | keys |\n| pika.database_lists_invalid_keys | a dimension per database | keys |\n| pika.database_zsets_keys | a dimension per database | keys |\n| pika.database_zsets_expires_keys | a dimension per database | keys |\n| pika.database_zsets_invalid_keys | a dimension per database | keys |\n| pika.database_sets_keys | a dimension per database | keys |\n| pika.database_sets_expires_keys | a dimension per database | keys |\n| pika.database_sets_invalid_keys | a dimension per database | keys |\n| pika.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-pika-Pika", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pika/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-ping", "plugin_name": "go.d.plugin", "module_name": "ping", "monitored_instance": {"name": "Ping", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["ping"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Ping\n\nPlugin: go.d.plugin\nModule: ping\n\n## Overview\n\nThis module measures round-tripe time and packet loss by sending ping messages to network hosts.\n\nThere are two operational modes:\n\n- privileged (send raw ICMP ping, default). Requires\n CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:\n > **Note**: set automatically during Netdata installation.\n\n ```bash\n sudo setcap CAP_NET_RAW=eip /usr/libexec/netdata/plugins.d/go.d.plugin\n ```\n\n- unprivileged (send UDP ping, Linux only).\n Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):\n\n ```bash\n sudo sysctl -w net.ipv4.ping_group_range=\"0 2147483647\"\n ```\n To persist the change add `net.ipv4.ping_group_range=\"0 2147483647\"` to `/etc/sysctl.conf` and\n execute `sudo sysctl -p`.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/ping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/ping.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hosts | Network hosts. | | yes |\n| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |\n| privileged | Ping packets type. \"no\" means send an \"unprivileged\" UDP ping, \"yes\" - raw ICMP ping. | yes | no |\n| packets | Number of ping packets to send. | 5 | no |\n| interval | Timeout between sending ping packets. | 100ms | no |\n\n#### Examples\n\n##### IPv4 hosts\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Unprivileged mode\n\nAn example configuration.\n\n```yaml\njobs:\n - name: example\n privileged: no\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: example1\n hosts:\n - 192.0.2.0\n - 192.0.2.1\n\n - name: example2\n packets: 10\n hosts:\n - 192.0.2.3\n - 192.0.2.4\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m ping\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |\n| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |\n| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per host\n\nThese metrics refer to the remote host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | remote host |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ping.host_rtt | min, max, avg | milliseconds |\n| ping.host_std_dev_rtt | std_dev | milliseconds |\n| ping.host_packet_loss | loss | percentage |\n| ping.host_packets | received, sent | packets |\n\n", "integration_type": "collector", "id": "go.d.plugin-ping-Ping", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/ping/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-portcheck", "plugin_name": "go.d.plugin", "module_name": "portcheck", "monitored_instance": {"name": "TCP Endpoints", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# TCP Endpoints\n\nPlugin: go.d.plugin\nModule: portcheck\n\n## Overview\n\nThis collector monitors TCP services availability and response time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/portcheck.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/portcheck.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |\n| ports | Remote host ports. Must be specified in numeric format. | | yes |\n| timeout | HTTP request timeout. | 2 | no |\n\n#### Examples\n\n##### Check SSH and telnet\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n```\n##### Check webserver with IPv6 address\n\nAn example configuration.\n\n```yaml\njobs:\n - name: server2\n host: \"[2001:DB8::1]\"\n ports:\n - 80\n - 8080\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nMultiple instances.\n\n\n```yaml\njobs:\n - name: server1\n host: 127.0.0.1\n ports:\n - 22\n - 23\n\n - name: server2\n host: 203.0.113.10\n ports:\n - 22\n - 23\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m portcheck\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |\n| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per tcp endpoint\n\nThese metrics refer to the TCP endpoint.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | host |\n| port | port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| portcheck.status | success, failed, timeout | boolean |\n| portcheck.state_duration | time | seconds |\n| portcheck.latency | time | ms |\n\n", "integration_type": "collector", "id": "go.d.plugin-portcheck-TCP_Endpoints", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/portcheck/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-postgres", "plugin_name": "go.d.plugin", "module_name": "postgres", "monitored_instance": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["data-collection.database-servers"], "icon_filename": "postgres.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["db", "database", "postgres", "postgresql", "sql"], "most_popular": true}, "overview": "# PostgreSQL\n\nPlugin: go.d.plugin\nModule: postgres\n\n## Overview\n\nThis collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.\n\n\nIt establishes a connection to the Postgres instance via a TCP or UNIX socket.\nTo collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:\n\n- 127.0.0.1:5432\n- /var/run/postgresql/\n\n\n#### Limits\n\nTable and index metrics are not collected for databases with more than 50 tables or 250 indexes.\nThese limits can be changed in the configuration file.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create netdata user\n\nCreate a user with granted `pg_monitor`\nor `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).\n\nTo create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:\n\n```postgresql\nCREATE USER netdata;\nGRANT pg_monitor TO netdata;\n```\n\nAfter creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or\nthe [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your\nsystem.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/postgres.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/postgres.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |\n| timeout | Query timeout in seconds. | 2 | no |\n| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). | | no |\n| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |\n| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'\n\n - name: remote\n dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m postgres\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |\n| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |\n| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |\n| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |\n| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |\n| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |\n| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |\n| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |\n| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |\n| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |\n| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PostgreSQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.connections_utilization | used | percentage |\n| postgres.connections_usage | available, used | connections |\n| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |\n| postgres.transactions_duration | a dimension per bucket | transactions/s |\n| postgres.queries_duration | a dimension per bucket | queries/s |\n| postgres.locks_utilization | used | percentage |\n| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |\n| postgres.checkpoints_time | write, sync | milliseconds |\n| postgres.bgwriter_halts_rate | maxwritten | events/s |\n| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |\n| postgres.buffers_backend_fsync_rate | fsync | calls/s |\n| postgres.buffers_allocated_rate | allocated | B/s |\n| postgres.wal_io_rate | write | B/s |\n| postgres.wal_files_count | written, recycled | files |\n| postgres.wal_archiving_files_count | ready, done | files/s |\n| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |\n| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |\n| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |\n| postgres.txid_exhaustion_oldest_txid_num | xid | xid |\n| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |\n| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |\n| postgres.uptime | uptime | seconds |\n| postgres.databases_count | databases | databases |\n\n### Per repl application\n\nThese metrics refer to the replication application.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| application | application name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |\n| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |\n\n### Per repl slot\n\nThese metrics refer to the replication slot.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| slot | replication slot name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |\n\n### Per database\n\nThese metrics refer to the database.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.db_transactions_ratio | committed, rollback | percentage |\n| postgres.db_transactions_rate | committed, rollback | transactions/s |\n| postgres.db_connections_utilization | used | percentage |\n| postgres.db_connections_count | connections | connections |\n| postgres.db_cache_io_ratio | miss | percentage |\n| postgres.db_io_rate | memory, disk | B/s |\n| postgres.db_ops_fetched_rows_ratio | fetched | percentage |\n| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |\n| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.db_conflicts_rate | conflicts | queries/s |\n| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |\n| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |\n| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |\n| postgres.db_temp_files_created_rate | created | files/s |\n| postgres.db_temp_files_io_rate | written | B/s |\n| postgres.db_size | size | B |\n\n### Per table\n\nThese metrics refer to the database table.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.table_rows_dead_ratio | dead | percentage |\n| postgres.table_rows_count | live, dead | rows |\n| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |\n| postgres.table_ops_rows_hot_ratio | hot | percentage |\n| postgres.table_ops_rows_hot_rate | hot | rows/s |\n| postgres.table_cache_io_ratio | miss | percentage |\n| postgres.table_io_rate | memory, disk | B/s |\n| postgres.table_index_cache_io_ratio | miss | percentage |\n| postgres.table_index_io_rate | memory, disk | B/s |\n| postgres.table_toast_cache_io_ratio | miss | percentage |\n| postgres.table_toast_io_rate | memory, disk | B/s |\n| postgres.table_toast_index_cache_io_ratio | miss | percentage |\n| postgres.table_toast_index_io_rate | memory, disk | B/s |\n| postgres.table_scans_rate | index, sequential | scans/s |\n| postgres.table_scans_rows_rate | index, sequential | rows/s |\n| postgres.table_autovacuum_since_time | time | seconds |\n| postgres.table_vacuum_since_time | time | seconds |\n| postgres.table_autoanalyze_since_time | time | seconds |\n| postgres.table_analyze_since_time | time | seconds |\n| postgres.table_null_columns | null | columns |\n| postgres.table_size | size | B |\n| postgres.table_bloat_size_perc | bloat | percentage |\n| postgres.table_bloat_size | bloat | B |\n\n### Per index\n\nThese metrics refer to the table index.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| database | database name |\n| schema | schema name |\n| table | table name |\n| parent_table | parent table name |\n| index | index name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postgres.index_size | size | B |\n| postgres.index_bloat_size_perc | bloat | percentage |\n| postgres.index_bloat_size | bloat | B |\n| postgres.index_usage_status | used, unused | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-postgres-PostgreSQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns", "plugin_name": "go.d.plugin", "module_name": "powerdns", "monitored_instance": {"name": "PowerDNS Authoritative Server", "link": "https://doc.powerdns.com/authoritative/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Authoritative Server\n\nPlugin: go.d.plugin\nModule: powerdns\n\n## Overview\n\nThis collector monitors PowerDNS Authoritative Server instances.\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Authoritative Server instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns.questions_in | udp, tcp | questions/s |\n| powerdns.questions_out | udp, tcp | questions/s |\n| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |\n| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |\n| powerdns.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns-PowerDNS_Authoritative_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-powerdns_recursor", "plugin_name": "go.d.plugin", "module_name": "powerdns_recursor", "monitored_instance": {"name": "PowerDNS Recursor", "link": "https://doc.powerdns.com/recursor/", "icon_filename": "powerdns.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["powerdns", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# PowerDNS Recursor\n\nPlugin: go.d.plugin\nModule: powerdns_recursor\n\n## Overview\n\nThis collector monitors PowerDNS Recursor instances.\n\nIt collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).\n\nUsed endpoints:\n\n- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable webserver\n\nFollow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.\n\n\n#### Enable HTTP API\n\nFollow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/powerdns_recursor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/powerdns_recursor.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8081 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8081\n\n - name: remote\n url: http://203.0.113.0:8081\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m powerdns_recursor\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per PowerDNS Recursor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |\n| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |\n| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |\n| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |\n| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |\n| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |\n| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |\n\n", "integration_type": "collector", "id": "go.d.plugin-powerdns_recursor-PowerDNS_Recursor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/powerdns_recursor/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-4d_server", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "4D Server", "link": "https://github.com/ThomasMaul/Prometheus_4D_Exporter", "icon_filename": "4d_server.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 4D Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor 4D Server performance metrics for efficient application management and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-4D_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-8430ft-modem", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "8430FT modem", "link": "https://github.com/dernasherbrezon/8430ft_exporter", "icon_filename": "mtc.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# 8430FT modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-8430FT_modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-a10-acos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "A10 ACOS network devices", "link": "https://github.com/a10networks/PrometheusExporter", "icon_filename": "a10-networks.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# A10 ACOS network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor A10 Networks device metrics for comprehensive management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-A10_ACOS_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-amd_smi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AMD CPU & GPU", "link": "https://github.com/amd/amd_smi_exporter", "icon_filename": "amd.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AMD CPU & GPU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AMD System Management Interface performance for optimized hardware management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AMD_CPU_&_GPU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apicast", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "APIcast", "link": "https://github.com/3scale/apicast", "icon_filename": "apicast.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# APIcast\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor APIcast performance metrics to optimize API gateway operations and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-APIcast", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arm_hwcpipe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ARM HWCPipe", "link": "https://github.com/ylz-at/arm-hwcpipe-exporter", "icon_filename": "arm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ARM HWCPipe\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep track of ARM running Android devices and get metrics for efficient performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ARM_HWCPipe", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Compute instances", "link": "https://github.com/O1ahmad/aws_ec2_exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Compute instances\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS EC2 instances key metrics for optimized performance and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Compute_instances", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ec2_spot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS EC2 Spot Instance", "link": "https://github.com/patcadelina/ec2-spot-exporter", "icon_filename": "aws-ec2.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS EC2 Spot Instance\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_EC2_Spot_Instance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS ECS", "link": "https://github.com/bevers222/ecs-exporter", "icon_filename": "amazon-ecs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS ECS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on AWS ECS services and resources for optimized container management and orchestration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_ECS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Health events", "link": "https://github.com/vladvasiliu/aws-health-exporter-rs", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Health events\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS service health metrics for proactive incident management and resolution.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Health_events", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS Quota", "link": "https://github.com/emylincon/aws_quota_exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS service quotas for effective resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_rds", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS RDS", "link": "https://github.com/percona/rds_exporter", "icon_filename": "aws-rds.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS RDS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_RDS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_s3", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS S3 buckets", "link": "https://github.com/ribbybibby/s3_exporter", "icon_filename": "aws-s3.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS S3 buckets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_S3_buckets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_sqs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS SQS", "link": "https://github.com/jmal98/sqs-exporter", "icon_filename": "aws-sqs.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS SQS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AWS SQS messaging metrics for efficient message processing and queue management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_SQS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_instance_health", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AWS instance health", "link": "https://github.com/bobtfish/aws-instance-health-exporter", "icon_filename": "aws.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "aws services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AWS instance health\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor the health of AWS instances for improved performance and availability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AWS_instance_health", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airthings_waveplus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Airthings Waveplus air sensor", "link": "https://github.com/jeremybz/waveplus_exporter", "icon_filename": "airthings.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Airthings Waveplus air sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Airthings_Waveplus_air_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_edgedns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Edge DNS Traffic", "link": "https://github.com/akamai/akamai-edgedns-traffic-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Edge DNS Traffic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze Akamai Edge DNS traffic for enhanced performance and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Edge_DNS_Traffic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_gtm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akamai Global Traffic Management", "link": "https://github.com/akamai/akamai-gtm-metrics-exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akamai Global Traffic Management\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akamai_Global_Traffic_Management", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-akami_cloudmonitor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Akami Cloudmonitor", "link": "https://github.com/ExpressenAB/cloudmonitor_exporter", "icon_filename": "akamai.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Akami Cloudmonitor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Akami_Cloudmonitor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alamos_fe2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alamos FE2 server", "link": "https://github.com/codemonauts/prometheus-fe2-exporter", "icon_filename": "alamos_fe2.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alamos FE2 server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Alamos FE2 systems for improved performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alamos_FE2_server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-alibaba-cloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Alibaba Cloud", "link": "https://github.com/aylei/aliyun-exporter", "icon_filename": "alibaba-cloud.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Alibaba Cloud\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Alibaba Cloud services and resources for efficient management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Alibaba_Cloud", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-altaro_backup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Altaro Backup", "link": "https://github.com/raph2i/altaro_backup_exporter", "icon_filename": "altaro.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Altaro Backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Altaro Backup performance metrics to ensure smooth data protection and recovery operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Altaro_Backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aaisp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Andrews & Arnold line status", "link": "https://github.com/daveio/aaisp-exporter", "icon_filename": "andrewsarnold.jpg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Andrews & Arnold line status\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Andrews_&_Arnold_line_status", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-airflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Airflow", "link": "https://github.com/shalb/airflow-exporter", "icon_filename": "airflow.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Airflow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Airflow metrics to optimize task scheduling and workflow management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Airflow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-flink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apache Flink", "link": "https://github.com/matsumana/flink_exporter", "icon_filename": "apache_flink.png", "categories": ["data-collection.apm"]}, "keywords": ["web server", "http", "https"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apache Flink\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Apache Flink metrics for efficient stream processing and application management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apache_Flink", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-apple_timemachine", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Apple Time Machine", "link": "https://github.com/znerol/prometheus-timemachine-exporter", "icon_filename": "apple.svg", "categories": ["data-collection.macos-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Apple Time Machine\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Apple Time Machine backup metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Apple_Time_Machine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aruba", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Aruba devices", "link": "https://github.com/slashdoom/aruba_exporter", "icon_filename": "aruba.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "aruba devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Aruba devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Aruba Networks devices performance metrics for comprehensive network management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Aruba_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-arvancloud_cdn", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ArvanCloud CDN", "link": "https://github.com/arvancloud/ar-prometheus-exporter", "icon_filename": "arvancloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ArvanCloud CDN\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ArvanCloud_CDN", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-audisto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Audisto", "link": "https://github.com/ZeitOnline/audisto_exporter", "icon_filename": "audisto.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Audisto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Audisto SEO and website metrics for improved search performance and optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Audisto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-authlog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "AuthLog", "link": "https://github.com/woblerr/authlog_exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# AuthLog\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor authentication logs for security insights and efficient access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-AuthLog", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_ad_app_passwords", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure AD App passwords", "link": "https://github.com/vladvasiliu/azure-app-secrets-monitor", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure AD App passwords\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nSafeguard and track Azure App secrets for enhanced security and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_AD_App_passwords", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_elastic_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Elastic Pool SQL", "link": "https://github.com/benclapp/azure_elastic_sql_exporter", "icon_filename": "azure-elastic-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Elastic Pool SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Elastic SQL performance metrics for efficient database management and query optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Elastic_Pool_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_res", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Resources", "link": "https://github.com/FXinnovation/azure-resources-exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Resources\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Resources", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure SQL", "link": "https://github.com/iamseth/azure_sql_exporter", "icon_filename": "azure-sql.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure SQL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure SQL performance metrics for efficient database management and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_SQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_service_bus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure Service Bus", "link": "https://github.com/marcinbudny/servicebus_exporter", "icon_filename": "azure-service-bus.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure Service Bus\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Azure Service Bus messaging metrics for optimized communication and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_Service_Bus", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-azure_app", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Azure application", "link": "https://github.com/RobustPerception/azure_metrics_exporter", "icon_filename": "azure.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "azure services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Azure application\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Azure Monitor metrics for comprehensive resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Azure_application", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bosh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BOSH", "link": "https://github.com/bosh-prometheus/bosh_exporter", "icon_filename": "bosh.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BOSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BOSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bigquery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BigQuery", "link": "https://github.com/m-lab/prometheus-bigquery-exporter", "icon_filename": "bigquery.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BigQuery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google BigQuery metrics for optimized data processing and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BigQuery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bird", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bird Routing Daemon", "link": "https://github.com/czerwonk/bird_exporter", "icon_filename": "bird.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bird Routing Daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Bird Routing Daemon metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bird_Routing_Daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Blackbox", "link": "https://github.com/prometheus/blackbox_exporter", "icon_filename": "prometheus.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["blackbox"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack external service availability and response times with Blackbox monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bobcat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Bobcat Miner 300", "link": "https://github.com/pperzyna/bobcat_exporter", "icon_filename": "bobcat.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Bobcat Miner 300\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Bobcat equipment metrics for optimized performance and maintenance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Bobcat_Miner_300", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-borg", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Borg backup", "link": "https://github.com/k0ral/borg-exporter", "icon_filename": "borg.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Borg backup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Borg backup performance metrics for efficient data protection and recovery.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Borg_backup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bungeecord", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "BungeeCord", "link": "https://github.com/weihao/bungeecord-prometheus-exporter", "icon_filename": "bungee.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# BungeeCord\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack BungeeCord proxy server metrics for efficient load balancing and performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-BungeeCord", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-csgo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CS:GO", "link": "https://github.com/kinduff/csgo_exporter", "icon_filename": "csgo.svg", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CS:GO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Counter-Strike: Global Offensive server metrics for improved game performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CS:GO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cvmfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CVMFS clients", "link": "https://github.com/guilbaults/cvmfs-exporter", "icon_filename": "cvmfs.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CVMFS clients\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CernVM File System metrics for optimized distributed file system performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CVMFS_clients", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-celery", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Celery", "link": "https://github.com/ZeitOnline/celery_redis_prometheus", "icon_filename": "celery.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Celery\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Celery task queue metrics for optimized task processing and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Celery", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-certificate_transparency", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Certificate Transparency", "link": "https://github.com/Hsn723/ct-exporter", "icon_filename": "ct.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Certificate Transparency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack certificate transparency log metrics for enhanced\nSSL/TLS certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Certificate_Transparency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-checkpoint", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Checkpoint device", "link": "https://github.com/RespiroConsulting/CheckPointExporter", "icon_filename": "checkpoint.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Checkpoint device\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Check Point firewall and security metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Checkpoint_device", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-chia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Chia", "link": "https://github.com/chia-network/chia-exporter", "icon_filename": "chia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Chia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Chia blockchain metrics for optimized farming and resource allocation.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Chia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clm5ip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Christ Elektronik CLM5IP power panel", "link": "https://github.com/christmann/clm5ip_exporter/", "icon_filename": "christelec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Christ Elektronik CLM5IP power panel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Christ_Elektronik_CLM5IP_power_panel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_agent", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Agent", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Agent\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Cilium Agent metrics for optimized network security and connectivity.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Agent", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_operator", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Operator", "link": "https://github.com/cilium/cilium", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Operator\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cilium Operator metrics for efficient Kubernetes network security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Operator", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cilium_proxy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cilium Proxy", "link": "https://github.com/cilium/proxy", "icon_filename": "cilium.png", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cilium Proxy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cilium Proxy metrics for enhanced network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cilium_Proxy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cisco_aci", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cisco ACI", "link": "https://github.com/RavuAlHemio/prometheus_aci_exporter", "icon_filename": "cisco.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "cisco devices"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cisco ACI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cisco ACI infrastructure metrics for optimized network performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cisco_ACI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-citrix_netscaler", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Citrix NetScaler", "link": "https://github.com/rokett/Citrix-NetScaler-Exporter", "icon_filename": "citrix.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Citrix NetScaler\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetScaler performance metrics for efficient application delivery and load balancing.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Citrix_NetScaler", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClamAV daemon", "link": "https://github.com/sergeymakinen/clamav_exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClamAV daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack ClamAV antivirus metrics for enhanced threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClamAV_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clamscan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clamscan results", "link": "https://github.com/FortnoxAB/clamscan-exporter", "icon_filename": "clamav.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clamscan results\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClamAV scanning performance metrics for efficient malware detection and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clamscan_results", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clash", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Clash", "link": "https://github.com/elonzh/clash_exporter", "icon_filename": "clash.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Clash\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Clash proxy server metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Clash", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-clickhouse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClickHouse", "link": "https://github.com/ClickHouse/ClickHouse", "icon_filename": "clickhouse.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClickHouse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ClickHouse database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClickHouse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-aws_cloudwatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CloudWatch", "link": "https://github.com/prometheus/cloudwatch_exporter", "icon_filename": "aws-cloudwatch.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CloudWatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CloudWatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry", "link": "https://github.com/bosh-prometheus/cf_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Cloud Foundry platform metrics for optimized application deployment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloud_foundry_firebase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloud Foundry Firehose", "link": "https://github.com/bosh-prometheus/firehose_exporter", "icon_filename": "cloud-foundry.svg", "categories": ["data-collection.provisioning-systems"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloud Foundry Firehose\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloud_Foundry_Firehose", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cloudflare_pcap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cloudflare PCAP", "link": "https://github.com/wehkamp/docker-prometheus-cloudflare-exporter", "icon_filename": "cloudflare.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cloudflare PCAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cloudflare_PCAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ClusterControl CMON", "link": "https://github.com/severalnines/cmon_exporter", "icon_filename": "cluster-control.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ClusterControl CMON\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ClusterControl_CMON", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-collectd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Collectd", "link": "https://github.com/prometheus/collectd_exporter", "icon_filename": "collectd.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Collectd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system and application metrics with Collectd for comprehensive performance analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Collectd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-concourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Concourse", "link": "https://concourse-ci.org", "icon_filename": "concourse.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Concourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Concourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ftbeerpi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "CraftBeerPi", "link": "https://github.com/jo-hannes/craftbeerpi_exporter", "icon_filename": "craftbeer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# CraftBeerPi\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-CraftBeerPi", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crowdsec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crowdsec", "link": "https://docs.crowdsec.net/docs/observability/prometheus", "icon_filename": "crowdsec.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crowdsec\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Crowdsec security metrics for efficient threat detection and response.\n\n\nMetrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crowdsec", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-crypto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Crypto exchanges", "link": "https://github.com/ix-ai/crypto-exporter", "icon_filename": "crypto.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Crypto exchanges\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack cryptocurrency market metrics for informed investment and trading decisions.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Crypto_exchanges", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cryptowatch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Cryptowatch", "link": "https://github.com/nbarrientos/cryptowat_exporter", "icon_filename": "cryptowatch.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Cryptowatch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Cryptowatch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-custom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Custom Exporter", "link": "https://github.com/orange-cloudfoundry/custom_exporter", "icon_filename": "customdata.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Custom Exporter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nCreate and monitor custom metrics tailored to your specific use case and requirements.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Custom_Exporter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ddwrt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DDWRT Routers", "link": "https://github.com/camelusferus/ddwrt_collector", "icon_filename": "ddwrt.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DDWRT Routers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on DD-WRT router metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DDWRT_Routers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dmarc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DMARC", "link": "https://github.com/jgosmann/dmarc-metrics-exporter", "icon_filename": "dmarc.png", "categories": ["data-collection.mail-servers"]}, "keywords": ["email authentication", "policy", "reporting"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DMARC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DMARC email authentication metrics for improved email security and deliverability.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DMARC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dnsbl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DNSBL", "link": "https://github.com/Luzilla/dnsbl_exporter/", "icon_filename": "dnsbl.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DNSBL\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor DNSBL metrics for efficient domain reputation and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DNSBL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_ecs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC ECS cluster", "link": "https://github.com/paychex/prometheus-emcecs-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC ECS cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC ECS object storage metrics for optimized storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_ECS_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_isilon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC Isilon cluster", "link": "https://github.com/paychex/prometheus-isilon-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC Isilon cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_Isilon_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_emc_xtremio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell EMC XtremIO cluster", "link": "https://github.com/cthiel42/prometheus-xtremio-exporter", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell EMC XtremIO cluster\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_EMC_XtremIO_cluster", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dell_powermax", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dell PowerMax", "link": "https://github.com/kckecheng/powermax_exporter", "icon_filename": "powermax.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dell PowerMax\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dell_PowerMax", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dependency_track", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dependency-Track", "link": "https://github.com/jetstack/dependency-track-exporter", "icon_filename": "dependency-track.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dependency-Track\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dependency-Track", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-digitalocean", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "DigitalOcean", "link": "https://github.com/metalmatze/digitalocean_exporter", "icon_filename": "digitalocean.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# DigitalOcean\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack DigitalOcean cloud provider metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-DigitalOcean", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-discourse", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Discourse", "link": "https://github.com/discourse/discourse-prometheus", "icon_filename": "discourse.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Discourse\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Discourse forum metrics for efficient community management and engagement.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Discourse", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dutch_electricity_smart_meter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dutch Electricity Smart Meter", "link": "https://github.com/TobiasDeBruijn/prometheus-p1-exporter", "icon_filename": "dutch-electricity.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dutch Electricity Smart Meter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dutch_Electricity_Smart_Meter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-dynatrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Dynatrace", "link": "https://github.com/Apside-TOP/dynatrace_exporter", "icon_filename": "dynatrace.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Dynatrace\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Dynatrace APM metrics for comprehensive application performance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Dynatrace", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eos_web", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "EOS", "link": "https://eos-web.web.cern.ch/eos-web/", "icon_filename": "eos.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# EOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor CERN EOS metrics for efficient storage management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-EOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-eaton_ups", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Eaton UPS", "link": "https://github.com/psyinfra/prometheus-eaton-ups-exporter", "icon_filename": "eaton.svg", "categories": ["data-collection.ups"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Eaton UPS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Eaton_UPS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-elgato_keylight", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Elgato Key Light devices.", "link": "https://github.com/mdlayher/keylight_exporter", "icon_filename": "elgato.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Elgato Key Light devices.\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Elgato Key Light metrics for optimized lighting control and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Elgato_Key_Light_devices.", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-energomera", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Energomera smart power meters", "link": "https://github.com/peak-load/energomera_exporter", "icon_filename": "energomera.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Energomera smart power meters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Energomera electricity meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Energomera_smart_power_meters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-excel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Excel spreadsheet", "link": "https://github.com/MarcusCalidus/excel-exporter", "icon_filename": "excel.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Excel spreadsheet\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport Prometheus metrics to Excel for versatile data analysis and reporting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Excel_spreadsheet", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-frrouting", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FRRouting", "link": "https://github.com/tynany/frr_exporter", "icon_filename": "frrouting.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FRRouting\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Free Range Routing (FRR) metrics for optimized network routing and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FRRouting", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fastd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fastd", "link": "https://github.com/freifunk-darmstadt/fastd-exporter", "icon_filename": "fastd.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fastd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Fastd VPN metrics for efficient virtual private network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fastd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fortigate", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fortigate firewall", "link": "https://github.com/bluecmd/fortigate_exporter", "icon_filename": "fortinet.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fortigate firewall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Fortigate firewall metrics for enhanced network protection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fortigate_firewall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_nfs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD NFS", "link": "https://github.com/Axcient/freebsd-nfs-exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD NFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor FreeBSD Network File System metrics for efficient file sharing management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_NFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freebsd_rctl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "FreeBSD RCTL-RACCT", "link": "https://github.com/yo000/rctl_exporter", "icon_filename": "freebsd.svg", "categories": ["data-collection.freebsd"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# FreeBSD RCTL-RACCT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-FreeBSD_RCTL-RACCT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-freifunk", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Freifunk network", "link": "https://github.com/xperimental/freifunk-exporter", "icon_filename": "freifunk.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Freifunk network\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Freifunk community network metrics for optimized network performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Freifunk_network", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-fritzbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Fritzbox network devices", "link": "https://github.com/pdreker/fritz_exporter", "icon_filename": "avm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Fritzbox network devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack AVM Fritzbox router metrics for efficient home network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Fritzbox_network_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_gce", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP GCE", "link": "https://github.com/O1ahmad/gcp-gce-exporter", "icon_filename": "gcp-gce.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP GCE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_GCE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_quota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GCP Quota", "link": "https://github.com/mintel/gcp-quota-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GCP Quota\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform quota metrics for optimized resource usage and cost management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GCP_Quota", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gtp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GTP", "link": "https://github.com/wmnsk/gtp_exporter", "icon_filename": "gtpu.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GTP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GTP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic_cli", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic Command Line Output", "link": "https://github.com/MarioMartReq/generic-exporter", "icon_filename": "cli.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic Command Line Output\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command line output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_Command_Line_Output", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-enclosure", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Generic storage enclosure tool", "link": "https://github.com/Gandi/jbod-rs", "icon_filename": "storage-enclosure.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Generic storage enclosure tool\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor storage enclosure metrics for efficient storage device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Generic_storage_enclosure_tool", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_ratelimit", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub API rate limit", "link": "https://github.com/lunarway/github-ratelimit-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub API rate limit\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GitHub API rate limit metrics for efficient\nAPI usage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_API_rate_limit", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-github_repo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitHub repository", "link": "https://github.com/githubexporter/github-exporter", "icon_filename": "github.svg", "categories": ["data-collection.other"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitHub repository\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack GitHub repository metrics for optimized project and user analytics monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitHub_repository", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gitlab_runner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "GitLab Runner", "link": "https://gitlab.com/gitlab-org/gitlab-runner", "icon_filename": "gitlab.png", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# GitLab Runner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on GitLab CI/CD job metrics for efficient development and deployment management.\n\n\nMetrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-GitLab_Runner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gobetween", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Gobetween", "link": "https://github.com/yyyar/gobetween", "icon_filename": "gobetween.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Gobetween\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Gobetween load balancer metrics for optimized network traffic management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Gobetween", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Cloud Platform", "link": "https://github.com/DazWilkin/gcp-exporter", "icon_filename": "gcp.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Cloud Platform\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Cloud_Platform", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-google_pagespeed", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Pagespeed", "link": "https://github.com/foomo/pagespeed_exporter", "icon_filename": "google.svg", "categories": ["data-collection.apm"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Pagespeed\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Pagespeed", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gcp_stackdriver", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Google Stackdriver", "link": "https://github.com/prometheus-community/stackdriver_exporter", "icon_filename": "gcp-stackdriver.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "google cloud services"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Google Stackdriver\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Google_Stackdriver", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-grafana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Grafana", "link": "https://grafana.com/", "icon_filename": "grafana.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Grafana\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Grafana", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-graylog", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Graylog Server", "link": "https://github.com/Graylog2/graylog2-server/", "icon_filename": "graylog.svg", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Graylog Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Graylog server metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Graylog_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hana", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HANA", "link": "https://github.com/jenningsloy318/hana_exporter", "icon_filename": "sap.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HANA\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SAP HANA database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HANA", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hdsentinel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HDSentinel", "link": "https://github.com/qusielle/hdsentinel-exporter", "icon_filename": "harddisk.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HDSentinel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HDSentinel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hhvm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HHVM", "link": "https://github.com/wikimedia/operations-software-hhvm_exporter", "icon_filename": "hhvm.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HHVM\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HipHop Virtual Machine metrics for efficient\nPHP execution and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HHVM", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hp_ilo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HP iLO", "link": "https://github.com/infinityworks/hpilo-exporter", "icon_filename": "hp.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HP iLO\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HP_iLO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-halon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Halon", "link": "https://github.com/tobiasbp/halon_exporter", "icon_filename": "halon.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Halon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Halon email security and delivery metrics for optimized email management and protection.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Halon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hashicorp_vault", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "HashiCorp Vault secrets", "link": "https://github.com/tomtom-international/vault-assessment-prometheus-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# HashiCorp Vault secrets\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack HashiCorp Vault security assessment metrics for efficient secrets management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-HashiCorp_Vault_secrets", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hasura_graphql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hasura GraphQL Server", "link": "https://github.com/zolamk/hasura-exporter", "icon_filename": "hasura.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hasura GraphQL Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Hasura GraphQL engine metrics for optimized\nAPI performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hasura_GraphQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_hotspot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium hotspot", "link": "https://github.com/tedder/helium_hotspot_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium hotspot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Helium hotspot metrics for optimized LoRaWAN network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_hotspot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-helium_miner", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Helium miner (validator)", "link": "https://github.com/tedder/miner_exporter", "icon_filename": "helium.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Helium miner (validator)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Helium miner and validator metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Helium_miner_(validator)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_cgm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CGN series CPE", "link": "https://github.com/yrro/hitron-exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CGN series CPE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hitron CGNV4 gateway metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CGN_series_CPE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hitron_coda", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hitron CODA Cable Modem", "link": "https://github.com/hairyhenderson/hitron_coda_exporter", "icon_filename": "hitron.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hitron CODA Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Hitron CODA cable modem metrics for optimized internet connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hitron_CODA_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homebridge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homebridge", "link": "https://github.com/lstrojny/homebridge-prometheus-exporter", "icon_filename": "homebridge.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homebridge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Homebridge smart home metrics for efficient home automation management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homebridge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-homey", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Homey", "link": "https://github.com/rickardp/homey-prometheus-exporter", "icon_filename": "homey.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Homey\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Homey smart home controller metrics for efficient home automation and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Homey", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-honeypot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Honeypot", "link": "https://github.com/Intrinsec/honeypot_exporter", "icon_filename": "intrinsec.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Honeypot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor honeypot metrics for efficient threat detection and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Honeypot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hilink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Huawei devices", "link": "https://github.com/eliecharra/hilink-exporter", "icon_filename": "huawei.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Huawei devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Huawei HiLink device metrics for optimized connectivity and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Huawei_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-hubble", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Hubble", "link": "https://github.com/cilium/hubble", "icon_filename": "hubble.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Hubble\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Hubble network observability metrics for efficient network visibility and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure built-in Prometheus exporter\n\nTo configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Hubble", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_aix_njmon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM AIX systems Njmon", "link": "https://github.com/crooks/njmon_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM AIX systems Njmon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_AIX_systems_Njmon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_cex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM CryptoExpress (CEX) cards", "link": "https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM CryptoExpress (CEX) cards\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack IBM Z Crypto Express device metrics for optimized cryptographic performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_CryptoExpress_(CEX)_cards", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_mq", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM MQ", "link": "https://github.com/agebhar1/mq_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM MQ\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on IBM MQ message queue metrics for efficient message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_MQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum", "link": "https://github.com/topine/ibm-spectrum-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum storage metrics for efficient data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_spectrum_virtualize", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Spectrum Virtualize", "link": "https://github.com/bluecmd/spectrum_virtualize_exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Spectrum Virtualize\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Spectrum_Virtualize", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ibm_zhmc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IBM Z Hardware Management Console", "link": "https://github.com/zhmcclient/zhmc-prometheus-exporter", "icon_filename": "ibm.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IBM Z Hardware Management Console\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IBM_Z_Hardware_Management_Console", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iota", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IOTA full node", "link": "https://github.com/crholliday/iota-prom-exporter", "icon_filename": "iota.svg", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IOTA full node\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IOTA_full_node", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ipmi", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "IPMI (By SoundCloud)", "link": "https://github.com/prometheus-community/ipmi_exporter", "icon_filename": "soundcloud.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# IPMI (By SoundCloud)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor IPMI metrics externally for efficient server hardware management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-IPMI_(By_SoundCloud)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-influxdb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "InfluxDB", "link": "https://github.com/prometheus/influxdb_exporter", "icon_filename": "influxdb.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# InfluxDB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor InfluxDB time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-InfluxDB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jmx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JMX", "link": "https://github.com/prometheus/jmx_exporter", "icon_filename": "java.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JMX\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Java Management Extensions (JMX) metrics for efficient Java application management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JMX", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jarvis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jarvis Standing Desk", "link": "https://github.com/hairyhenderson/jarvis_exporter/", "icon_filename": "jarvis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jarvis Standing Desk\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jarvis standing desk usage metrics for efficient workspace ergonomics and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jarvis_Standing_Desk", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jenkins", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Jenkins", "link": "https://www.jenkins.io/", "icon_filename": "jenkins.svg", "categories": ["data-collection.ci-cd-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Jenkins\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Jenkins continuous integration server metrics for efficient development and build management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Jenkins", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jetbrains_fls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "JetBrains Floating License Server", "link": "https://github.com/mkreu/jetbrains-fls-exporter", "icon_filename": "jetbrains.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# JetBrains Floating License Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor JetBrains floating license server metrics for efficient software licensing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-JetBrains_Floating_License_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka", "link": "https://github.com/danielqsj/kafka_exporter/", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kafka message queue metrics for optimized data streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_connect", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Connect", "link": "https://github.com/findelabs/kafka-connect-exporter-rs", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Connect\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kafka Connect metrics for efficient data streaming and integration.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Connect", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_consumer_lag", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka Consumer Lag", "link": "https://github.com/omarsmak/kafka-consumer-lag-monitoring", "icon_filename": "kafka.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka Consumer Lag\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka consumer lag metrics for efficient message queue management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_Consumer_Lag", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kafka_zookeeper", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kafka ZooKeeper", "link": "https://github.com/cloudflare/kafka_zookeeper_exporter", "icon_filename": "kafka.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["big data", "stream processing", "message broker"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kafka ZooKeeper\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Kafka ZooKeeper metrics for optimized distributed coordination and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kafka_ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-kannel", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kannel", "link": "https://github.com/apostvav/kannel_exporter", "icon_filename": "kannel.png", "categories": ["data-collection.telephony-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kannel\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kannel", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-keepalived", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Keepalived", "link": "https://github.com/gen2brain/keepalived_exporter", "icon_filename": "keepalived.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Keepalived\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Keepalived metrics for efficient high-availability and load balancing management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Keepalived", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-korral", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Kubernetes Cluster Cloud Cost", "link": "https://github.com/agilestacks/korral", "icon_filename": "kubernetes.svg", "categories": ["data-collection.kubernetes"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Kubernetes Cluster Cloud Cost\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Kubernetes_Cluster_Cloud_Cost", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "LDAP", "link": "https://github.com/titisan/ldap_exporter", "icon_filename": "ldap.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# LDAP\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-LDAP", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lagerist", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lagerist Disk latency", "link": "https://github.com/Svedrin/lagerist", "icon_filename": "linux.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lagerist Disk latency\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack disk latency metrics for efficient storage performance and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lagerist_Disk_latency", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-linode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Linode", "link": "https://github.com/DazWilkin/linode-exporter", "icon_filename": "linode.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Linode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Linode cloud hosting metrics for efficient virtual server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Linode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lustre", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lustre metadata", "link": "https://github.com/GSI-HPC/prometheus-cluster-exporter", "icon_filename": "lustre.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lustre metadata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Lustre clustered file system for efficient management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lustre_metadata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lynis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Lynis audit reports", "link": "https://github.com/MauveSoftware/lynis_exporter", "icon_filename": "lynis.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Lynis audit reports\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Lynis security auditing tool metrics for efficient system security and compliance management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Lynis_audit_reports", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mp707", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MP707 USB thermometer", "link": "https://github.com/nradchenko/mp707_exporter", "icon_filename": "thermometer.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MP707 USB thermometer\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MP707 power strip metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MP707_USB_thermometer", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mqtt_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MQTT Blackbox", "link": "https://github.com/inovex/mqtt_blackbox_exporter", "icon_filename": "mqtt.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MQTT Blackbox\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MQTT message transport performance using blackbox testing methods.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MQTT_Blackbox", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-machbase", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Machbase", "link": "https://github.com/MACHBASE/prometheus-machbase-exporter", "icon_filename": "machbase.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Machbase\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Machbase time-series database metrics for efficient data storage and query performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Machbase", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-maildir", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Maildir", "link": "https://github.com/cherti/mailexporter", "icon_filename": "mailserver.svg", "categories": ["data-collection.mail-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Maildir\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack mail server metrics for optimized email management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Maildir", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meilisearch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meilisearch", "link": "https://github.com/scottaglia/meilisearch_exporter", "icon_filename": "meilisearch.svg", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meilisearch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Meilisearch search engine metrics for efficient search performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meilisearch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-memcached", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Memcached (community)", "link": "https://github.com/prometheus/memcached_exporter", "icon_filename": "memcached.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Memcached (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Memcached in-memory key-value store metrics for efficient caching performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Memcached_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-meraki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Meraki dashboard", "link": "https://github.com/TheHolm/meraki-dashboard-promethus-exporter", "icon_filename": "meraki.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Meraki dashboard\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Meraki_dashboard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mesos", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mesos", "link": "http://github.com/mesosphere/mesos_exporter", "icon_filename": "mesos.svg", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mesos\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Apache Mesos cluster manager metrics for efficient resource management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mesos", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mikrotik", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MikroTik devices", "link": "https://github.com/swoga/mikrotik-exporter", "icon_filename": "mikrotik.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MikroTik devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MikroTik_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-routeros", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Mikrotik RouterOS devices", "link": "https://github.com/welbymcroberts/routeros_exporter", "icon_filename": "routeros.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Mikrotik RouterOS devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack MikroTik RouterOS metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Mikrotik_RouterOS_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-minecraft", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Minecraft", "link": "https://github.com/sladkoff/minecraft-prometheus-exporter", "icon_filename": "minecraft.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Minecraft\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Minecraft server metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Minecraft", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-modbus_rtu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Modbus protocol", "link": "https://github.com/dernasherbrezon/modbusrtu_exporter", "icon_filename": "modbus.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Modbus protocol\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Modbus RTU protocol metrics for efficient industrial automation and control performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Modbus_protocol", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mogilefs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "MogileFS", "link": "https://github.com/KKBOX/mogilefs-exporter", "icon_filename": "filesystem.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# MogileFS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor MogileFS distributed file system metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-MogileFS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-monnit_mqtt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Monnit Sensors MQTT", "link": "https://github.com/braxton9460/monnit-mqtt-exporter", "icon_filename": "monnit.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Monnit Sensors MQTT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Monnit sensor data via MQTT for efficient IoT device monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Monnit_Sensors_MQTT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nrpe", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NRPE daemon", "link": "https://github.com/canonical/nrpe_exporter", "icon_filename": "nrpelinux.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NRPE daemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NRPE_daemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nsxt", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NSX-T", "link": "https://github.com/jk8s/nsxt_exporter", "icon_filename": "vmware-nsx.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NSX-T\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NSX-T", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nvml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NVML", "link": "https://github.com/oko/nvml-exporter-rs", "icon_filename": "nvidia.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NVML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NVML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-naemon", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Naemon", "link": "https://github.com/Griesbacher/Iapetos", "icon_filename": "naemon.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Naemon\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Naemon", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nagios", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nagios", "link": "https://github.com/wbollock/nagios_exporter", "icon_filename": "nagios.png", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nagios\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Nagios network monitoring metrics for efficient\nIT infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nagios", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nature_remo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nature Remo E lite devices", "link": "https://github.com/kenfdev/remo-exporter", "icon_filename": "nature-remo.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nature Remo E lite devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Nature Remo E series smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nature_Remo_E_lite_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_solidfire", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetApp Solidfire", "link": "https://github.com/mjavier2k/solidfire-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetApp Solidfire\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetApp Solidfire storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetApp_Solidfire", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netflow", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetFlow", "link": "https://github.com/paihu/netflow_exporter", "icon_filename": "netflow.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetFlow\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NetFlow network traffic metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetFlow", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netmeter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NetMeter", "link": "https://github.com/ssbostan/netmeter-exporter", "icon_filename": "netmeter.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NetMeter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor NetMeter network traffic metrics for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NetMeter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netapp_ontap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netapp ONTAP API", "link": "https://github.com/sapcc/netapp-api-exporter", "icon_filename": "netapp.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netapp ONTAP API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netapp_ONTAP_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-netatmo", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Netatmo sensors", "link": "https://github.com/xperimental/netatmo-exporter", "icon_filename": "netatmo.svg", "categories": ["data-collection.iot-devices"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Netatmo sensors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Netatmo smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Netatmo_sensors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-newrelic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "New Relic", "link": "https://github.com/jfindley/newrelic_exporter", "icon_filename": "newrelic.svg", "categories": ["data-collection.observability"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# New Relic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor New Relic application performance management metrics for efficient application monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-New_Relic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextdns", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "NextDNS", "link": "https://github.com/raylas/nextdns-exporter", "icon_filename": "nextdns.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# NextDNS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack NextDNS DNS resolver and security platform metrics for efficient DNS management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-NextDNS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nextcloud", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Nextcloud servers", "link": "https://github.com/xperimental/nextcloud-exporter", "icon_filename": "nextcloud.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": ["cloud services", "cloud computing", "scalability"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Nextcloud servers\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Nextcloud_servers", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-obs_studio", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OBS Studio", "link": "https://github.com/lukegb/obs_studio_exporter", "icon_filename": "obs-studio.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OBS Studio\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OBS Studio live streaming and recording software metrics for efficient video production and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OBS_Studio", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-odbc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ODBC", "link": "https://github.com/MACHBASE/prometheus-odbc-exporter", "icon_filename": "odbc.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ODBC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ODBC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-otrs", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OTRS", "link": "https://github.com/JulianDroste/otrs_exporter", "icon_filename": "otrs.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OTRS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OTRS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openhab", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenHAB", "link": "https://github.com/pdreker/openhab_exporter", "icon_filename": "openhab.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenHAB\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack openHAB smart home automation system metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenHAB", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openldap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenLDAP (community)", "link": "https://github.com/tomcz/openldap_exporter", "icon_filename": "openldap.svg", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenLDAP (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenLDAP directory service metrics for efficient directory management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenLDAP_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRC", "link": "https://git.sr.ht/~tomleb/openrc-exporter", "icon_filename": "linux.png", "categories": ["data-collection.linux-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on OpenRC init system metrics for efficient system startup and service management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openrct2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenRCT2", "link": "https://github.com/terinjokes/openrct2-prometheus-exporter", "icon_filename": "openRCT2.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenRCT2\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenRCT2 game metrics for efficient game server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenRCT2", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openroadm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenROADM devices", "link": "https://github.com/utdal/openroadm_exporter", "icon_filename": "openroadm.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": ["network monitoring", "network performance", "traffic analysis"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenROADM devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenROADM_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openstack", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenStack", "link": "https://github.com/CanonicalLtd/prometheus-openstack-exporter", "icon_filename": "openstack.svg", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenStack\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenStack cloud computing platform metrics for efficient infrastructure management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenStack", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenVAS", "link": "https://github.com/ModeClearCode/openvas_exporter", "icon_filename": "openVAS.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenVAS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenVAS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openweathermap", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "OpenWeatherMap", "link": "https://github.com/Tenzer/openweathermap-exporter", "icon_filename": "openweather.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# OpenWeatherMap\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-OpenWeatherMap", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-openvswitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Open vSwitch", "link": "https://github.com/digitalocean/openvswitch_exporter", "icon_filename": "ovs.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Open vSwitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Open_vSwitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-oracledb", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Oracle DB (community)", "link": "https://github.com/iamseth/oracledb_exporter", "icon_filename": "oracle.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["oracle", "database", "dbms", "data storage"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Oracle DB (community)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Oracle Database metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Oracle_DB_(community)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-patroni", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Patroni", "link": "https://github.com/gopaytech/patroni_exporter", "icon_filename": "patroni.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Patroni\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Patroni", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pws", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Personal Weather Station", "link": "https://github.com/JohnOrthoefer/pws-exporter", "icon_filename": "wunderground.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Personal Weather Station\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack personal weather station metrics for efficient weather monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Personal_Weather_Station", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgpool2", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pgpool-II", "link": "https://github.com/pgpool/pgpool2_exporter", "icon_filename": "pgpool2.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pgpool-II\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pgpool-II", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-philips_hue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Philips Hue", "link": "https://github.com/aexel90/hue_exporter", "icon_filename": "hue.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Philips Hue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Philips_Hue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pimoroni_enviro_plus", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pimoroni Enviro+", "link": "https://github.com/terradolor/prometheus-enviro-exporter", "icon_filename": "pimorino.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pimoroni Enviro+\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pimoroni_Enviro+", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pingdom", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Pingdom", "link": "https://github.com/veepee-oss/pingdom_exporter", "icon_filename": "solarwinds.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Pingdom\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Pingdom", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-podman", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Podman", "link": "https://github.com/containers/prometheus-podman-exporter", "icon_filename": "podman.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Podman\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Podman container runtime metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Podman", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-powerpal", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Powerpal devices", "link": "https://github.com/aashley/powerpal_exporter", "icon_filename": "powerpal.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Powerpal devices\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Powerpal_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proftpd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ProFTPD", "link": "https://github.com/transnano/proftpd_exporter", "icon_filename": "proftpd.png", "categories": ["data-collection.ftp-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ProFTPD\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor ProFTPD FTP server metrics for efficient file transfer and server performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ProFTPD", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Prometheus endpoint", "link": "https://prometheus.io/", "icon_filename": "prometheus.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["prometheus", "openmetrics"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Prometheus endpoint\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nThis generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.\n\n\nIt collects metrics by periodically sending HTTP requests to the target instance.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Prometheus_endpoint", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-proxmox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Proxmox VE", "link": "https://github.com/prometheus-pve/prometheus-pve-exporter", "icon_filename": "proxmox.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Proxmox VE\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Proxmox_VE", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radius", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RADIUS", "link": "https://github.com/devon-mar/radius-exporter", "icon_filename": "radius.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RADIUS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RADIUS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ripe_atlas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "RIPE Atlas", "link": "https://github.com/czerwonk/atlas_exporter", "icon_filename": "ripe.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# RIPE Atlas\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-RIPE_Atlas", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-radio_thermostat", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Radio Thermostat", "link": "https://github.com/andrewlow/radio-thermostat-exporter", "icon_filename": "radiots.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Radio Thermostat\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Radio_Thermostat", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-rancher", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Rancher", "link": "https://github.com/infinityworksltd/prometheus-rancher-exporter", "icon_filename": "rancher.svg", "categories": ["data-collection.kubernetes"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Rancher\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Rancher container orchestration platform metrics for efficient container management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Rancher", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-raritan_pdu", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Raritan PDU", "link": "https://github.com/psyinfra/prometheus-raritan-pdu-exporter", "icon_filename": "raritan.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Raritan PDU\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Raritan_PDU", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-redis_queue", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Redis Queue", "link": "https://github.com/mdawar/rq-exporter", "icon_filename": "rq.png", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Redis Queue\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Redis_Queue", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sabnzbd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SABnzbd", "link": "https://github.com/msroest/sabnzbd_exporter", "icon_filename": "sabnzbd.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SABnzbd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SABnzbd Usenet client metrics for efficient file downloads and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SABnzbd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sma_inverter", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SMA Inverters", "link": "https://github.com/dr0ps/sma_inverter_exporter", "icon_filename": "sma.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SMA Inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SMA solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SMA_Inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sonic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SONiC NOS", "link": "https://github.com/kamelnetworks/sonic_exporter", "icon_filename": "sonic.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SONiC NOS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SONiC_NOS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sql", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SQL Database agnostic", "link": "https://github.com/free/sql_exporter", "icon_filename": "sql.svg", "categories": ["data-collection.database-servers"]}, "keywords": ["database", "relational db", "data querying"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SQL Database agnostic\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nQuery SQL databases for efficient database performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SQL_Database_agnostic", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssh", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSH", "link": "https://github.com/Nordstrom/ssh_exporter", "icon_filename": "ssh.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSH\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SSH server metrics for efficient secure shell server management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSH", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ssl", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SSL Certificate", "link": "https://github.com/ribbybibby/ssl_exporter", "icon_filename": "ssl.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SSL Certificate\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SSL/TLS certificate metrics for efficient web security and certificate management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SSL_Certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-salicru_eqx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Salicru EQX inverter", "link": "https://github.com/alejandroscf/prometheus_salicru_exporter", "icon_filename": "salicru.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Salicru EQX inverter\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Salicru_EQX_inverter", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sense_energy", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sense Energy", "link": "https://github.com/ejsuncy/sense_energy_prometheus_exporter", "icon_filename": "sense.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sense Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sense_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sentry", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sentry", "link": "https://github.com/snakecharmer/sentry_exporter", "icon_filename": "sentry.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sentry\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sentry error tracking and monitoring platform metrics for efficient application performance and error management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sentry", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-servertech", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "ServerTech", "link": "https://github.com/tynany/servertech_exporter", "icon_filename": "servertech.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# ServerTech\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-ServerTech", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shell_cmd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shell command", "link": "https://github.com/tomwilkie/prom-run", "icon_filename": "crunner.svg", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shell command\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack custom command output metrics for tailored monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shell_command", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-shelly", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Shelly humidity sensor", "link": "https://github.com/aexel90/shelly_exporter", "icon_filename": "shelly.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Shelly humidity sensor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Shelly smart home device metrics for efficient home automation and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Shelly_humidity_sensor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sia", "link": "https://github.com/tbenz9/sia_exporter", "icon_filename": "sia.png", "categories": ["data-collection.blockchain-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Sia decentralized storage platform metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-s7_plc", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Siemens S7 PLC", "link": "https://github.com/MarcusCalidus/s7-plc-exporter", "icon_filename": "siemens.svg", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Siemens S7 PLC\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Siemens_S7_PLC", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-site24x7", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Site 24x7", "link": "https://github.com/svenstaro/site24x7_exporter", "icon_filename": "site24x7.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Site 24x7\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Site_24x7", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-slurm", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Slurm", "link": "https://github.com/vpenso/prometheus-slurm-exporter", "icon_filename": "slurm.png", "categories": ["data-collection.task-queues"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Slurm\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Slurm", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-smartrg808ac", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SmartRG 808AC Cable Modem", "link": "https://github.com/AdamIsrael/smartrg808ac_exporter", "icon_filename": "smartr.jpeg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SmartRG 808AC Cable Modem\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SmartRG SR808ac router metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SmartRG_808AC_Cable_Modem", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sml", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Smart meters SML", "link": "https://github.com/mweinelt/sml-exporter", "icon_filename": "sml.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Smart meters SML\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Smart Message Language (SML) metrics for efficient smart metering and energy management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Smart_meters_SML", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-softether", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SoftEther VPN Server", "link": "https://github.com/dalance/softether_exporter", "icon_filename": "softether.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SoftEther VPN Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SoftEther_VPN_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solaredge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "SolarEdge inverters", "link": "https://github.com/dave92082/SolarEdge-Exporter", "icon_filename": "solaredge.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# SolarEdge inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack SolarEdge solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-SolarEdge_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-lsx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solar logging stick", "link": "https://gitlab.com/bhavin192/lsx-exporter", "icon_filename": "solar.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solar logging stick\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solar_logging_stick", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-solis", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Solis Ginlong 5G inverters", "link": "https://github.com/candlerb/solis_exporter", "icon_filename": "solis.jpg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Solis Ginlong 5G inverters\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Solis solar inverter metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Solis_Ginlong_5G_inverters", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-spacelift", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Spacelift", "link": "https://github.com/spacelift-io/prometheus-exporter", "icon_filename": "spacelift.png", "categories": ["data-collection.provisioning-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Spacelift\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Spacelift", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-speedify", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Speedify CLI", "link": "https://github.com/willshen/speedify_exporter", "icon_filename": "speedify.png", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Speedify CLI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Speedify VPN metrics for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Speedify_CLI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sphinx", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sphinx", "link": "https://github.com/foxdalas/sphinx_exporter", "icon_filename": "sphinx.png", "categories": ["data-collection.search-engines"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sphinx\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Sphinx search engine metrics for efficient search and indexing performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sphinx", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starlink", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starlink (SpaceX)", "link": "https://github.com/danopstech/starlink_exporter", "icon_filename": "starlink.svg", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starlink (SpaceX)\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starlink_(SpaceX)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-starwind_vsan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Starwind VSAN VSphere Edition", "link": "https://github.com/evoicefire/starwind-vsan-exporter", "icon_filename": "starwind.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Starwind VSAN VSphere Edition\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Starwind_VSAN_VSphere_Edition", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-statuspage", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "StatusPage", "link": "https://github.com/vladvasiliu/statuspage-exporter", "icon_filename": "statuspage.png", "categories": ["data-collection.notifications"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# StatusPage\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor StatusPage.io incident and status metrics for efficient incident management and communication.\n\n\nMetrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-StatusPage", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-steam_a2s", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Steam", "link": "https://github.com/armsnyder/a2s-exporter", "icon_filename": "a2s.png", "categories": ["data-collection.gaming"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Steam\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nGain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Steam", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-storidge", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Storidge", "link": "https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md", "icon_filename": "storidge.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Storidge\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Storidge storage metrics for efficient storage management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Storidge", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-stream_generic", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Stream", "link": "https://github.com/carlpett/stream_exporter", "icon_filename": "stream.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Stream\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor streaming metrics for efficient media streaming and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Stream", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sunspec", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sunspec Solar Energy", "link": "https://github.com/inosion/prometheus-sunspec-exporter", "icon_filename": "sunspec.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sunspec Solar Energy\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sunspec_Solar_Energy", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-suricata", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Suricata", "link": "https://github.com/corelight/suricata_exporter", "icon_filename": "suricata.png", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Suricata\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Suricata", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-synology_activebackup", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Synology ActiveBackup", "link": "https://github.com/codemonauts/activebackup-prometheus-exporter", "icon_filename": "synology.png", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Synology ActiveBackup\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Synology Active Backup metrics for efficient backup and data protection management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Synology_ActiveBackup", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-sysload", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Sysload", "link": "https://github.com/egmc/sysload_exporter", "icon_filename": "sysload.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Sysload\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor system load metrics for efficient system performance and resource management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Sysload", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-trex", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "T-Rex NVIDIA GPU Miner", "link": "https://github.com/dennisstritzke/trex_exporter", "icon_filename": "trex.png", "categories": ["data-collection.hardware-devices-and-sensors"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# T-Rex NVIDIA GPU Miner\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-T-Rex_NVIDIA_GPU_Miner", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tacas", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TACACS", "link": "https://github.com/devon-mar/tacacs-exporter", "icon_filename": "tacacs.png", "categories": ["data-collection.authentication-and-authorization"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TACACS\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TACACS", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tplink_p110", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TP-Link P110", "link": "https://github.com/ijohanne/prometheus-tplink-p110-exporter", "icon_filename": "tplink.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TP-Link P110\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack TP-Link P110 smart plug metrics for efficient energy management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TP-Link_P110", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tado", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tado smart heating solution", "link": "https://github.com/eko/tado-exporter", "icon_filename": "tado.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tado smart heating solution\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tado smart thermostat metrics for efficient home heating and cooling management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tado\\xB0 Exporter](https://github.com/eko/tado-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tado_smart_heating_solution", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tankerkoenig", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tankerkoenig API", "link": "https://github.com/lukasmalkmus/tankerkoenig_exporter", "icon_filename": "tanker.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tankerkoenig API\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tankerknig API fuel price metrics for efficient fuel price monitoring and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tankerkoenig_API", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_powerwall", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Powerwall", "link": "https://github.com/foogod/powerwall_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Powerwall\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Powerwall metrics for efficient home energy storage and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Powerwall", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_wall_connector", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla Wall Connector", "link": "https://github.com/benclapp/tesla_wall_connector_exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla Wall Connector\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_Wall_Connector", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-tesla_vehicle", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Tesla vehicle", "link": "https://github.com/wywywywy/tesla-prometheus-exporter", "icon_filename": "tesla.png", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Tesla vehicle\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Tesla vehicle metrics for efficient electric vehicle management and monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Tesla_vehicle", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-traceroute", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Traceroute", "link": "https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter", "icon_filename": "traceroute.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Traceroute\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nExport traceroute metrics for efficient network path analysis and performance monitoring.\n\n\nMetrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Traceroute", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twincat_ads_webservice", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "TwinCAT ADS Web Service", "link": "https://github.com/MarcusCalidus/twincat-ads-webservice-exporter", "icon_filename": "twincat.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# TwinCAT ADS Web Service\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.\n\n\nMetrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-TwinCAT_ADS_Web_Service", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-twitch", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Twitch", "link": "https://github.com/damoun/twitch_exporter", "icon_filename": "twitch.svg", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Twitch\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Twitch streaming platform metrics for efficient live streaming management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Twitch", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-ubiquity_ufiber", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Ubiquiti UFiber OLT", "link": "https://github.com/swoga/ufiber-exporter", "icon_filename": "ubiquiti.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Ubiquiti UFiber OLT\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Ubiquiti_UFiber_OLT", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-uptimerobot", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Uptimerobot", "link": "https://github.com/wosc/prometheus-uptimerobot", "icon_filename": "uptimerobot.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Uptimerobot\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Uptimerobot", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vscode", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "VSCode", "link": "https://github.com/guicaulada/vscode-exporter", "icon_filename": "vscode.svg", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# VSCode\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Visual Studio Code editor metrics for efficient development environment management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-VSCode", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vault_pki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vault PKI", "link": "https://github.com/aarnaud/vault-pki-exporter", "icon_filename": "vault.svg", "categories": ["data-collection.security-systems"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vault PKI\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vault_PKI", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-vertica", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Vertica", "link": "https://github.com/vertica/vertica-prometheus-exporter", "icon_filename": "vertica.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Vertica\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Vertica analytics database platform metrics for efficient database performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Vertica", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-warp10", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Warp10", "link": "https://github.com/centreon/warp10-sensision-exporter", "icon_filename": "warp10.svg", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Warp10\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Warp 10 time-series database metrics for efficient time-series data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Warp10", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xmpp_blackbox", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "XMPP Server", "link": "https://github.com/horazont/xmpp-blackbox-exporter", "icon_filename": "xmpp.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# XMPP Server\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-XMPP_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-xiaomi_mi_flora", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Xiaomi Mi Flora", "link": "https://github.com/xperimental/flowercare-exporter", "icon_filename": "xiaomi.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Xiaomi Mi Flora\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Xiaomi_Mi_Flora", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-yourls", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "YOURLS URL Shortener", "link": "https://github.com/just1not2/prometheus-exporter-yourls", "icon_filename": "yourls.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# YOURLS URL Shortener\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-YOURLS_URL_Shortener", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zerto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zerto", "link": "https://github.com/claranet/zerto-exporter", "icon_filename": "zerto.png", "categories": ["data-collection.cloud-provider-managed"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zerto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zerto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zulip", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zulip", "link": "https://github.com/brokenpip3/zulip-exporter", "icon_filename": "zulip.png", "categories": ["data-collection.media-streaming-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zulip\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Zulip open-source group chat application metrics for efficient team communication management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zulip", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-zyxel_gs1200", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "Zyxel GS1200-8", "link": "https://github.com/robinelfrink/gs1200-exporter", "icon_filename": "zyxel.png", "categories": ["data-collection.networking-stack-and-network-interfaces"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# Zyxel GS1200-8\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Zyxel GS1200 network switch metrics for efficient network device management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-Zyxel_GS1200-8", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-bpftrace", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "bpftrace variables", "link": "https://github.com/andreasgerstmayr/bpftrace_exporter", "icon_filename": "bpftrace.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# bpftrace variables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack bpftrace metrics for advanced performance analysis and troubleshooting.\n\n\nMetrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-bpftrace_variables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-cadvisor", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "cAdvisor", "link": "https://github.com/google/cadvisor", "icon_filename": "cadvisor.png", "categories": ["data-collection.containers-and-vms"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# cAdvisor\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor container resource usage and performance metrics with cAdvisor for efficient container management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-cAdvisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-etcd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "etcd", "link": "https://etcd.io/", "icon_filename": "etcd.svg", "categories": ["data-collection.service-discovery-registry"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# etcd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack etcd database metrics for optimized distributed key-value store management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-etcd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-gpsd", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "gpsd", "link": "https://github.com/natesales/gpsd-exporter", "icon_filename": "gpsd.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# gpsd\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-gpsd", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-iqair", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "iqAir AirVisual air quality monitors", "link": "https://github.com/Packetslave/iqair_exporter", "icon_filename": "iqair.svg", "categories": ["data-collection.iot-devices"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# iqAir AirVisual air quality monitors\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor air quality data from IQAir devices for efficient environmental monitoring and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-iqAir_AirVisual_air_quality_monitors", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-jolokia", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "jolokia", "link": "https://github.com/aklinkert/jolokia_exporter", "icon_filename": "jolokia.png", "categories": ["data-collection.apm"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# jolokia\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor Jolokia JVM metrics for optimized Java application performance and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-jolokia", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-journald", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "journald", "link": "https://github.com/dead-claudia/journald-exporter", "icon_filename": "linux.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# journald\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on systemd-journald metrics for efficient log management and analysis.\n\n\nMetrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-journald", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-loki", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "loki", "link": "https://github.com/grafana/loki", "icon_filename": "loki.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# loki\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack Loki metrics.\n\n\nMetrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Loki\n\nInstall [loki](https://github.com/grafana/loki) according to its documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-loki", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mosquitto", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mosquitto", "link": "https://github.com/sapcc/mosquitto-exporter", "icon_filename": "mosquitto.svg", "categories": ["data-collection.message-brokers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mosquitto\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nKeep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mosquitto", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-mtail", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "mtail", "link": "https://github.com/google/mtail", "icon_filename": "mtail.png", "categories": ["data-collection.logs-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# mtail\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor log data metrics using mtail log data extractor and parser.\n\n\nMetrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-mtail", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-nftables", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "nftables", "link": "https://github.com/Sheridan/nftables_exporter", "icon_filename": "nftables.png", "categories": ["data-collection.linux-systems.firewall-metrics"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# nftables\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor nftables firewall metrics for efficient network security and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-nftables", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-pgbackrest", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "pgBackRest", "link": "https://github.com/woblerr/pgbackrest_exporter", "icon_filename": "pgbackrest.png", "categories": ["data-collection.database-servers"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# pgBackRest\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nMonitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.\n\n\nMetrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-pgBackRest", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-prometheus-strongswan", "module_name": "prometheus", "plugin_name": "go.d.plugin", "monitored_instance": {"name": "strongSwan", "link": "https://github.com/jlti-dev/ipsec_exporter", "icon_filename": "strongswan.svg", "categories": ["data-collection.vpns"]}, "keywords": [], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false, "community": true}, "overview": "# strongSwan\n\nPlugin: go.d.plugin\nModule: prometheus\n\n## Overview\n\nTrack strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.\n\n\nMetrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\nThe full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf).\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Exporter\n\nInstall [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/prometheus.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/prometheus.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 10 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| selector | Time series selector (filter). | | no |\n| fallback_type | Time series selector (filter). | | no |\n| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |\n| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |\n| timeout | HTTP request timeout. | 10 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### selector\n\nThis option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.\n\n- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)\n- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md).\n- Option syntax:\n\n```yaml\nselector:\n allow:\n - pattern1\n - pattern2\n deny:\n - pattern3\n - pattern4\n```\n\n\n##### fallback_type\n\nThis option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.\n\n- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).\n- Option syntax:\n\n```yaml\nfallback_type:\n counter:\n - metric_name_pattern1\n - metric_name_pattern2\n gauge:\n - metric_name_pattern3\n - metric_name_pattern4\n```\n\n\n#### Examples\n\n##### Basic\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nA basic example configuration.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n```\n##### Read metrics from a file\n\nAn example configuration to read metrics from a file.\n\n```yaml\n# use \"file://\" scheme\njobs:\n - name: myapp\n url: file:///opt/metrics/myapp/metrics.txt\n\n```\n##### HTTP authentication\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nBasic HTTP authentication.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:9090/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n> **Note**: Change the port of the monitored application on which it provides metrics.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:9090/metrics\n\n - name: remote\n url: http://192.0.2.1:9090/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m prometheus\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThis collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).\n\n| Metric | Chart | Dimension(s) | Algorithm |\n|---------------------------|-------------------------------------------|----------------------|-------------|\n| Gauge | for each label set | one, the metric name | absolute |\n| Counter | for each label set | one, the metric name | incremental |\n| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |\n| Summary (sum and count) | for each label set | the metric name | incremental |\n| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |\n| Histogram (sum and count) | for each label set | the metric name | incremental |\n\nUntyped metrics (have no '# TYPE') processing:\n\n- As Counter or Gauge depending on pattern match when 'fallback_type' is used.\n- As Counter if it has suffix '_total'.\n- As Summary if it has 'quantile' label.\n- As Histogram if it has 'le' label.\n\n**The rest are ignored**.\n\n", "integration_type": "collector", "id": "go.d.plugin-prometheus-strongSwan", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-proxysql", "plugin_name": "go.d.plugin", "module_name": "proxysql", "monitored_instance": {"name": "ProxySQL", "link": "https://www.proxysql.com/", "icon_filename": "proxysql.png", "categories": ["data-collection.database-servers"]}, "keywords": ["proxysql", "databases", "sql"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# ProxySQL\n\nPlugin: go.d.plugin\nModule: proxysql\n\n## Overview\n\nThis collector monitors ProxySQL servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/proxysql.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/proxysql.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |\n| my.cnf | Specifies my.cnf file to read connection parameters from under the [client] section. | | no |\n| timeout | Query timeout in seconds. | 1 | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n```\n##### my.cnf\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n my.cnf: '/etc/my.cnf'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n dsn: stats:stats@tcp(127.0.0.1:6032)/\n\n - name: remote\n dsn: stats:stats@tcp(203.0.113.0:6032)/\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m proxysql\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ProxySQL instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |\n| proxysql.client_connections_rate | created, aborted | connections/s |\n| proxysql.server_connections_count | connected | connections |\n| proxysql.server_connections_rate | created, aborted, delayed | connections/s |\n| proxysql.backends_traffic | recv, sent | B/s |\n| proxysql.clients_traffic | recv, sent | B/s |\n| proxysql.active_transactions_count | client | connections |\n| proxysql.questions_rate | questions | questions/s |\n| proxysql.slow_queries_rate | slow | queries/s |\n| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |\n| proxysql.backend_statements_count | total, unique | statements |\n| proxysql.backend_statements_rate | prepare, execute, close | statements/s |\n| proxysql.client_statements_count | total, unique | statements |\n| proxysql.client_statements_rate | prepare, execute, close | statements/s |\n| proxysql.cached_statements_count | cached | statements |\n| proxysql.query_cache_entries_count | entries | entries |\n| proxysql.query_cache_memory_used | used | B |\n| proxysql.query_cache_io | in, out | B/s |\n| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |\n| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |\n| proxysql.mysql_monitor_workers_rate | started | workers/s |\n| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |\n| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |\n| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |\n| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |\n| proxysql.uptime | uptime | seconds |\n\n### Per command\n\nThese metrics refer to the SQL command.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| command | SQL command. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_command_execution_rate | uptime | seconds |\n| proxysql.mysql_command_execution_time | time | microseconds |\n| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |\n\n### Per user\n\nThese metrics refer to the user.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| user | username from the mysql_users table |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.mysql_user_connections_utilization | used | percentage |\n| proxysql.mysql_user_connections_count | used | connections |\n\n### Per backend\n\nThese metrics refer to the backend server.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| host | backend server host |\n| port | backend server port |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |\n| proxysql.backend_connections_usage | free, used | connections |\n| proxysql.backend_connections_rate | succeed, failed | connections/s |\n| proxysql.backend_queries_rate | queries | queries/s |\n| proxysql.backend_traffic | recv, send | B/s |\n| proxysql.backend_latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-proxysql-ProxySQL", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/proxysql/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-pulsar", "plugin_name": "go.d.plugin", "module_name": "pulsar", "monitored_instance": {"name": "Apache Pulsar", "link": "https://pulsar.apache.org/", "icon_filename": "pulsar.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["pulsar"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Apache Pulsar\n\nPlugin: go.d.plugin\nModule: pulsar\n\n## Overview\n\nThis collector monitors Pulsar servers.\n\n\nIt collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects Pulsar instances running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/pulsar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/pulsar.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8080/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/metrics\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/metrics\n\n - name: remote\n url: http://192.0.2.1:8080/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m pulsar\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.\n- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.\n- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.\n\n\n### Per Apache Pulsar instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |\n| pulsar.messages_rate | publish, dispatch | messages/s |\n| pulsar.throughput_rate | publish, dispatch | KiB/s |\n| pulsar.storage_size | used | KiB |\n| pulsar.storage_operations_rate | read, write | message batches/s |\n| pulsar.msg_backlog | backlog | messages |\n| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.subscription_delayed | delayed | message batches |\n| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.replication_rate | in, out | messages/s |\n| pulsar.replication_throughput_rate | in, out | KiB/s |\n| pulsar.replication_backlog | backlog | messages |\n\n### Per namespace\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |\n| pulsar.namespace_messages_rate | publish, dispatch | messages/s |\n| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |\n| pulsar.namespace_storage_size | used | KiB |\n| pulsar.namespace_storage_operations_rate | read, write | message batches/s |\n| pulsar.namespace_msg_backlog | backlog | messages |\n| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |\n| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |\n| pulsar.namespace_subscription_delayed | delayed | message batches |\n| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |\n| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |\n| pulsar.namespace_replication_rate | in, out | messages/s |\n| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |\n| pulsar.namespace_replication_backlog | backlog | messages |\n| pulsar.topic_producers | a dimension per topic | producers |\n| pulsar.topic_subscriptions | a dimension per topic | subscriptions |\n| pulsar.topic_consumers | a dimension per topic | consumers |\n| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |\n| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |\n| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |\n| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |\n| pulsar.topic_storage_size | a dimension per topic | KiB |\n| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |\n| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |\n| pulsar.topic_msg_backlog | a dimension per topic | messages |\n| pulsar.topic_subscription_delayed | a dimension per topic | message batches |\n| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |\n| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |\n| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |\n| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |\n| pulsar.topic_replication_backlog | a dimension per topic | messages |\n\n", "integration_type": "collector", "id": "go.d.plugin-pulsar-Apache_Pulsar", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-rabbitmq", "plugin_name": "go.d.plugin", "module_name": "rabbitmq", "monitored_instance": {"name": "RabbitMQ", "link": "https://www.rabbitmq.com/", "icon_filename": "rabbitmq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["rabbitmq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# RabbitMQ\n\nPlugin: go.d.plugin\nModule: rabbitmq\n\n## Overview\n\nThis collector monitors RabbitMQ instances.\n\nIt collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).\nThe following endpoints are used:\n\n- `/api/overview`\n- `/api/node/{node_name}`\n- `/api/vhosts`\n- `/api/queues` (disabled by default)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable management plugin.\n\nThe management plugin is included in the RabbitMQ distribution, but disabled.\nTo enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/rabbitmq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/rabbitmq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://localhost:15672 | yes |\n| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n username: admin\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:15672\n\n - name: remote\n url: http://192.0.2.0:15672\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m rabbitmq\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RabbitMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.messages_count | ready, unacknowledged | messages |\n| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |\n| rabbitmq.connection_churn_rate | created, closed | operations/s |\n| rabbitmq.channel_churn_rate | created, closed | operations/s |\n| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |\n| rabbitmq.file_descriptors_count | available, used | fd |\n| rabbitmq.sockets_count | available, used | sockets |\n| rabbitmq.erlang_processes_count | available, used | processes |\n| rabbitmq.erlang_run_queue_processes_count | length | processes |\n| rabbitmq.memory_usage | used | bytes |\n| rabbitmq.disk_space_free_size | free | bytes |\n\n### Per vhost\n\nThese metrics refer to the virtual host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |\n| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n### Per queue\n\nThese metrics refer to the virtual host queue.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vhost | virtual host name |\n| queue | queue name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |\n| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-rabbitmq-RabbitMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-redis", "plugin_name": "go.d.plugin", "module_name": "redis", "monitored_instance": {"name": "Redis", "link": "https://redis.com/", "categories": ["data-collection.database-servers"], "icon_filename": "redis.svg"}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}, {"plugin_name": "cgroups.plugin", "module_name": "cgroups"}]}}, "alternative_monitored_instances": [], "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["redis", "databases"], "most_popular": true}, "overview": "# Redis\n\nPlugin: go.d.plugin\nModule: redis\n\n## Overview\n\nThis collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.\n\n\nIt connects to the Redis instance via a TCP or UNIX socket and executes the following commands:\n\n- [INFO ALL](https://redis.io/commands/info)\n- [PING](https://redis.io/commands/ping/)\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:\n\n- 127.0.0.1:6379\n- /tmp/redis.sock\n- /var/run/redis/redis.sock\n- /var/lib/redis/redis.sock\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/redis.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/redis.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Redis server address. | redis://@localhost:6379 | yes |\n| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |\n| username | Username used for authentication. | | no |\n| password | Password used for authentication. | | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | | no |\n| tls_key | Client tls key. | | no |\n\n#### Examples\n\n##### TCP socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://@127.0.0.1:6379'\n\n```\n##### Unix socket\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'unix://@/tmp/redis.sock'\n\n```\n##### TCP socket with password\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 'redis://:password@127.0.0.1:6379'\n\n - name: remote\n address: 'redis://user:password@203.0.113.0:6379'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m redis\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |\n| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |\n| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |\n| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Redis instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| redis.connections | accepted, rejected | connections/s |\n| redis.clients | connected, blocked, tracking, in_timeout_table | clients |\n| redis.ping_latency | min, max, avg | seconds |\n| redis.commands | processes | commands/s |\n| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |\n| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |\n| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |\n| redis.key_eviction_events | evicted | keys/s |\n| redis.net | received, sent | kilobits/s |\n| redis.rdb_changes | changes | operations |\n| redis.bgsave_now | current_bgsave_time | seconds |\n| redis.bgsave_health | last_bgsave | status |\n| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |\n| redis.aof_file_size | current, base | bytes |\n| redis.commands_calls | a dimension per command | calls |\n| redis.commands_usec | a dimension per command | microseconds |\n| redis.commands_usec_per_sec | a dimension per command | microseconds/s |\n| redis.key_expiration_events | expired | keys/s |\n| redis.database_keys | a dimension per database | keys |\n| redis.database_expires_keys | a dimension per database | keys |\n| redis.connected_replicas | connected | replicas |\n| redis.master_link_status | up, down | status |\n| redis.master_last_io_since_time | time | seconds |\n| redis.master_link_down_since_time | time | seconds |\n| redis.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-redis-Redis", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/redis/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-scaleio", "plugin_name": "go.d.plugin", "module_name": "scaleio", "monitored_instance": {"name": "Dell EMC ScaleIO", "link": "https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm", "icon_filename": "dell.svg", "categories": ["data-collection.storage-mount-points-and-filesystems"]}, "keywords": ["scaleio"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Dell EMC ScaleIO\n\nPlugin: go.d.plugin\nModule: scaleio\n\n## Overview\n\nThis collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.\n\nIt collects metrics for the following ScaleIO components:\n\n- System\n- Storage Pool\n- Sdc\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/scaleio.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/scaleio.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | https://127.0.0.1:80 | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instance.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1\n username: admin\n password: password\n tls_skip_verify: yes # self-signed certificate\n\n - name: remote\n url: https://203.0.113.10\n username: admin\n password: password\n tls_skip_verify: yes\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m scaleio\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dell EMC ScaleIO instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.system_capacity_total | total | KiB |\n| scaleio.system_capacity_in_use | in_use | KiB |\n| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.system_capacity_available_volume_allocation | available | KiB |\n| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |\n| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |\n| scaleio.system_workload_primary_iops_total | total | iops/s |\n| scaleio.system_workload_primary_iops | read, write | iops/s |\n| scaleio.system_workload_primary_io_size_total | io_size | KiB |\n| scaleio.system_rebalance | read, write | KiB/s |\n| scaleio.system_rebalance_left | left | KiB |\n| scaleio.system_rebalance_time_until_finish | time | seconds |\n| scaleio.system_rebuild | read, write | KiB/s |\n| scaleio.system_rebuild_left | left | KiB |\n| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |\n| scaleio.system_components_volumes_by_type | thick, thin | volumes |\n| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |\n\n### Per storage pool\n\nThese metrics refer to the storage pool.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.storage_pool_capacity_total | total | KiB |\n| scaleio.storage_pool_capacity_in_use | in_use | KiB |\n| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |\n| scaleio.storage_pool_capacity_utilization | used | percentage |\n| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |\n| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |\n| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |\n\n### Per sdc\n\nThese metrics refer to the SDC (ScaleIO Data Client).\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| scaleio.sdc_mdm_connection_state | connected | boolean |\n| scaleio.sdc_bandwidth | read, write | KiB/s |\n| scaleio.sdc_iops | read, write | iops/s |\n| scaleio.sdc_io_size | read, write | KiB |\n| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |\n\n", "integration_type": "collector", "id": "go.d.plugin-scaleio-Dell_EMC_ScaleIO", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/scaleio/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-snmp", "plugin_name": "go.d.plugin", "module_name": "snmp", "monitored_instance": {"name": "SNMP devices", "link": "", "icon_filename": "snmp.png", "categories": ["data-collection.generic-data-collection"]}, "keywords": ["snmp"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# SNMP devices\n\nPlugin: go.d.plugin\nModule: snmp\n\n## Overview\n\nThis collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.\n\nIt supports:\n\n- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3.\n- any number of SNMP devices.\n- each SNMP device can be used to collect data for any number of charts.\n- each chart may have any number of dimensions.\n- each SNMP device may have a different update frequency.\n- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).\n\nKeep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second.\n`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode.\n\nAlso, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped.\nThis is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`).\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Find OIDs\n\nUse `snmpwalk`, like this:\n\n```sh\nsnmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1\n```\n\n- `-t 20` is the timeout in seconds.\n- `-O fn` will display full OIDs in numeric format.\n- `-v 2c` is the SNMP version.\n- `-c public` is the SNMP community.\n- `192.0.2.1` is the SNMP device.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/snmp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/snmp.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| hostname | Target ipv4 address. | 127.0.0.1 | yes |\n| community | SNMPv1/2 community string. | public | no |\n| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |\n| options.port | Target port. | 161 | no |\n| options.retries | Retries to attempt. | 1 | no |\n| options.timeout | SNMP request/response timeout. | 10 | no |\n| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no |\n| user.name | SNMPv3 user name. | | no |\n| user.name | Security level of SNMPv3 messages. | | no |\n| user.auth_proto | Security level of SNMPv3 messages. | | no |\n| user.name | Authentication protocol for SNMPv3 messages. | | no |\n| user.auth_key | Authentication protocol pass phrase. | | no |\n| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |\n| user.priv_key | Privacy protocol pass phrase. | | no |\n| charts | List of charts. | [] | yes |\n| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |\n| charts.title | Chart title. | Untitled chart | no |\n| charts.units | Chart units. | num | no |\n| charts.family | Chart family. | charts.id | no |\n| charts.type | Chart type (line, area, stacked). | line | no |\n| charts.priority | Chart priority. | 70000 | no |\n| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |\n| charts.dimensions | List of chart dimensions. | [] | yes |\n| charts.dimensions.oid | Collected metric OID. | | yes |\n| charts.dimensions.name | Dimension name. | | yes |\n| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |\n| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |\n| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |\n\n##### user.auth_proto\n\nThe security of an SNMPv3 message as per RFC 3414 (`user.level`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|------------------------------------------|\n| none | 1 | no message authentication or encryption |\n| authNoPriv | 2 | message authentication and no encryption |\n| authPriv | 3 | message authentication and encryption |\n\n\n##### user.name\n\nThe digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------|\n| none | 1 | no message authentication |\n| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |\n| sha | 3 | SHA message authentication (HMAC-SHA-96) |\n| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |\n| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |\n| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |\n| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |\n\n\n##### user.priv_proto\n\nThe encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):\n\n| String value | Int value | Description |\n|:------------:|:---------:|-------------------------------------------------------------------------|\n| none | 1 | no message encryption |\n| des | 2 | ES encryption (CBC-DES) |\n| aes | 3 | 128-bit AES encryption (CFB-AES-128) |\n| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with \"Blumenthal\" key localization |\n| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with \"Blumenthal\" key localization |\n| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with \"Reeder\" key localization |\n| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with \"Reeder\" key localization |\n\n\n#### Examples\n\n##### SNMPv1/2\n\nIn this example:\n\n- the SNMP device is `192.0.2.1`.\n- the SNMP version is `2`.\n- the SNMP community is `public`.\n- we will update the values every 10 seconds.\n- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`.\n\n> **SNMPv1**: just set `options.version` to 1.\n> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - id: \"bandwidth_port2\"\n title: \"Switch Bandwidth for port 2\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.2\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.2\"\n multiplier: -8\n divisor: 1000\n\n```\n##### SNMPv3\n\nTo use SNMPv3:\n\n- use `user` instead of `community`.\n- set `options.version` to 3.\n\nThe rest of the configuration is the same as in the SNMPv1/2 example.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: 192.0.2.1\n options:\n version: 3\n user:\n name: username\n level: authPriv\n auth_proto: sha256\n auth_key: auth_protocol_passphrase\n priv_proto: aes256\n priv_key: priv_protocol_passphrase\n\n```\n##### Multiply range\n\nIf you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.\n\nThis is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.\n\nEach of the 24 new charts will have its id (1-24) appended at:\n\n- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.\n- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.\n- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.\n- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.\n\n\n```yaml\njobs:\n - name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port\"\n title: \"Switch Bandwidth for port\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n multiply_range: [1, 24]\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16\"\n multiplier: -8\n divisor: 1000\n\n```\n##### Multiple devices with a common configuration\n\nYAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). \nThe `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.\n\nThe following example:\n\n- adds an `anchor` to the first job.\n- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.\n- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.\n\n\n```yaml\njobs:\n - &anchor\n name: switch\n update_every: 10\n hostname: \"192.0.2.1\"\n community: public\n options:\n version: 2\n charts:\n - id: \"bandwidth_port1\"\n title: \"Switch Bandwidth for port 1\"\n units: \"kilobits/s\"\n type: \"area\"\n family: \"ports\"\n dimensions:\n - name: \"in\"\n oid: \"1.3.6.1.2.1.2.2.1.10.1\"\n algorithm: \"incremental\"\n multiplier: 8\n divisor: 1000\n - name: \"out\"\n oid: \"1.3.6.1.2.1.2.2.1.16.1\"\n multiplier: -8\n divisor: 1000\n - <<: *anchor\n name: switch2\n hostname: \"192.0.2.2\"\n - <<: *anchor\n name: switch3\n hostname: \"192.0.2.3\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m snmp\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nThe metrics that will be collected are defined in the configuration file.\n", "integration_type": "collector", "id": "go.d.plugin-snmp-SNMP_devices", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/snmp/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-solr", "plugin_name": "go.d.plugin", "module_name": "solr", "monitored_instance": {"name": "Solr", "link": "https://lucene.apache.org/solr/", "icon_filename": "solr.svg", "categories": ["data-collection.search-engines"]}, "keywords": ["solr"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Solr\n\nPlugin: go.d.plugin\nModule: solr\n\n## Overview\n\nThis collector monitors Solr instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Solr version 6.4+\n\nThis collector does not work with Solr versions lower 6.4.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/solr.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/solr.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8983 | yes |\n| socket | Server Unix socket. | | no |\n| address | Server address in IP:PORT format. | | no |\n| fcgi_path | Status path. | /status | no |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n```\n##### Basic HTTP auth\n\nLocal Solr instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://localhost:8983\n\n - name: remote\n url: http://203.0.113.10:8983\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m solr\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Solr instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| solr.search_requests | search | requests/s |\n| solr.search_errors | errors | errors/s |\n| solr.search_errors_by_type | client, server, timeouts | errors/s |\n| solr.search_requests_processing_time | time | milliseconds |\n| solr.search_requests_timings | min, median, mean, max | milliseconds |\n| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n| solr.update_requests | search | requests/s |\n| solr.update_errors | errors | errors/s |\n| solr.update_errors_by_type | client, server, timeouts | errors/s |\n| solr.update_requests_processing_time | time | milliseconds |\n| solr.update_requests_timings | min, median, mean, max | milliseconds |\n| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-solr-Solr", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/solr/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-springboot2", "plugin_name": "go.d.plugin", "module_name": "springboot2", "monitored_instance": {"name": "Java Spring-boot 2 applications", "link": "", "icon_filename": "springboot.png", "categories": ["data-collection.apm"]}, "keywords": ["springboot"], "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Java Spring-boot 2 applications\n\nPlugin: go.d.plugin\nModule: springboot2\n\n## Overview\n\nThis collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects applications running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Spring Boot Actuator\n\nThe Spring Boot Actuator exposes metrics over HTTP, to use it:\n\n- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.\n- set `management.endpoints.web.exposure.include=*` in your `application.properties`.\n\nRefer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. \u2018How-to\u2019 guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/springboot2.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/springboot2.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1:8080/actuator/prometheus\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8080/actuator/prometheus\n\n - name: remote\n url: http://192.0.2.1:8080/actuator/prometheus\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m springboot2\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Java Spring-boot 2 applications instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| springboot2.thread | daemon, total | threads |\n| springboot2.heap | free, eden, survivor, old | B |\n| springboot2.heap_eden | used, commited | B |\n| springboot2.heap_survivor | used, commited | B |\n| springboot2.heap_old | used, commited | B |\n| springboot2.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-springboot2-Java_Spring-boot_2_applications", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/springboot2/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-squidlog", "plugin_name": "go.d.plugin", "module_name": "squidlog", "monitored_instance": {"name": "Squid log files", "link": "https://www.lighttpd.net/", "icon_filename": "squid.png", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["squid", "logs"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# Squid log files\n\nPlugin: go.d.plugin\nModule: squidlog\n\n## Overview\n\nhis collector monitors Squid servers by parsing their access log files.\n\n\nIt automatically detects log files of Squid severs running on localhost.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/squidlog.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/squidlog.conf\n```\n#### Options\n\nSquid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).\n\nSquidlog is aware how to parse and interpret the following codes:\n\n| field | squid format code | description |\n|----------------|-------------------|---------------------------------------------------------------|\n| resp_time | %tr | Response time (milliseconds). |\n| client_address | %>a | Client source IP address. |\n| client_address | %>A | Client FQDN. |\n| cache_code | %Ss | Squid request status (TCP_MISS etc). |\n| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |\n| resp_size | %Hs | Cache code and http code. |\n| hierarchy | %Sh/%
**Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m squidlog\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squidlog.requests | requests | requests/s |\n| squidlog.excluded_requests | unmatched | requests/s |\n| squidlog.type_requests | success, bad, redirect, error | requests/s |\n| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |\n| squidlog.bandwidth | sent | kilobits/s |\n| squidlog.response_time | min, max, avg | milliseconds |\n| squidlog.uniq_clients | clients | clients |\n| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |\n| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |\n| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |\n| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |\n| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |\n| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |\n| squidlog.http_method_requests | a dimension per HTTP method | requests/s |\n| squidlog.mime_type_requests | a dimension per MIME type | requests/s |\n| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |\n| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-squidlog-Squid_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/squidlog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-supervisord", "plugin_name": "go.d.plugin", "module_name": "supervisord", "monitored_instance": {"name": "Supervisor", "link": "http://supervisord.org/", "icon_filename": "supervisord.png", "categories": ["data-collection.processes-and-system-services"]}, "keywords": ["supervisor"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Supervisor\n\nPlugin: go.d.plugin\nModule: supervisord\n\n## Overview\n\nThis collector monitors Supervisor instances.\n\nIt can collect metrics from:\n\n- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)\n- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)\n\nUsed methods:\n\n- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/supervisord.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/supervisord.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |\n| timeout | System bus requests timeout. | 1 | no |\n\n#### Examples\n\n##### HTTP\n\nCollect metrics via HTTP.\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n```\n##### Socket\n\nCollect metrics via Unix socket.\n\n```yaml\n- name: local\n url: 'unix:///run/supervisor.sock'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: 'http://127.0.0.1:9001/RPC2'\n\n - name: remote\n url: 'http://192.0.2.1:9001/RPC2'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m supervisord\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Supervisor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.summary_processes | running, non-running | processes |\n\n### Per process group\n\nThese metrics refer to the process group.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| supervisord.processes | running, non-running | processes |\n| supervisord.process_state_code | a dimension per process | code |\n| supervisord.process_exit_status | a dimension per process | exit status |\n| supervisord.process_uptime | a dimension per process | seconds |\n| supervisord.process_downtime | a dimension per process | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-supervisord-Supervisor", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/supervisord/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-systemdunits", "plugin_name": "go.d.plugin", "module_name": "systemdunits", "monitored_instance": {"name": "Systemd Units", "link": "https://www.freedesktop.org/wiki/Software/systemd/", "icon_filename": "systemd.svg", "categories": ["data-collection.systemd"]}, "keywords": ["systemd"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Systemd Units\n\nPlugin: go.d.plugin\nModule: systemdunits\n\n## Overview\n\nThis collector monitors Systemd units state.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/systemdunits.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/systemdunits.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| include | Systemd units filter. | *.service | no |\n| timeout | System bus requests timeout. | 1 | no |\n\n##### include\n\nSystemd units matching the selector will be monitored.\n\n- Logic: (pattern1 OR pattern2)\n- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)\n- Syntax:\n\n```yaml\nincludes:\n - pattern1\n - pattern2\n```\n\n\n#### Examples\n\n##### Service units\n\nCollect state of all service type units.\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n```\n##### One specific unit\n\nCollect state of one specific unit.\n\n```yaml\njobs:\n - name: my-specific-service\n include:\n - 'my-specific.service'\n\n```\n##### All unit types\n\nCollect state of all units.\n\n```yaml\njobs:\n - name: my-specific-service-unit\n include:\n - '*'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollect state of all service and socket type units.\n\n\n```yaml\njobs:\n - name: service\n include:\n - '*.service'\n\n - name: socket\n include:\n - '*.socket'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m systemdunits\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |\n| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |\n| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |\n| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |\n| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |\n| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |\n| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |\n| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |\n| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |\n| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |\n| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per unit\n\nThese metrics refer to the systemd unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| unit_name | systemd unit name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |\n| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-systemdunits-Systemd_Units", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/systemdunits/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-tengine", "plugin_name": "go.d.plugin", "module_name": "tengine", "monitored_instance": {"name": "Tengine", "link": "https://tengine.taobao.org/", "icon_filename": "tengine.jpeg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["tengine", "web", "webserver"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Tengine\n\nPlugin: go.d.plugin\nModule: tengine\n\n## Overview\n\nThis collector monitors Tengine servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable ngx_http_reqstat_module module.\n\nTo enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).\nThe default line format is the only supported format.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/tengine.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/tengine.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1/us | yes |\n| timeout | HTTP request timeout. | 2 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n```\n##### HTTP authentication\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n username: foo\n password: bar\n\n```\n##### HTTPS with self-signed certificate\n\nTengine with enabled HTTPS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n url: https://127.0.0.1/us\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1/us\n\n - name: remote\n url: http://203.0.113.10/us\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m tengine\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tengine instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tengine.bandwidth_total | in, out | B/s |\n| tengine.connections_total | accepted | connections/s |\n| tengine.requests_total | processed | requests/s |\n| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |\n| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |\n| tengine.requests_upstream_total | requests | requests/s |\n| tengine.tries_upstream_total | calls | calls/s |\n| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-tengine-Tengine", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/tengine/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-traefik", "plugin_name": "go.d.plugin", "module_name": "traefik", "monitored_instance": {"name": "Traefik", "link": "Traefik", "icon_filename": "traefik.svg", "categories": ["data-collection.web-servers-and-web-proxies"]}, "keywords": ["traefik", "proxy", "webproxy"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Traefik\n\nPlugin: go.d.plugin\nModule: traefik\n\n## Overview\n\nThis collector monitors Traefik servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable built-in Prometheus exporter\n\nTo enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/traefik.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/traefik.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8082/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n\n```\n##### Basic HTTP auth\n\nLocal server with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8082/metrics\n username: foo\n password: bar\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n http://127.0.0.1:8082/metrics\n\n - name: remote\n http://192.0.2.0:8082/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m traefik\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per entrypoint, protocol\n\nThese metrics refer to the endpoint.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |\n| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |\n| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |\n\n", "integration_type": "collector", "id": "go.d.plugin-traefik-Traefik", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/traefik/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-unbound", "plugin_name": "go.d.plugin", "module_name": "unbound", "monitored_instance": {"name": "Unbound", "link": "https://nlnetlabs.nl/projects/unbound/about/", "icon_filename": "unbound.png", "categories": ["data-collection.dns-and-dhcp-servers"]}, "keywords": ["unbound", "dns"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Unbound\n\nPlugin: go.d.plugin\nModule: unbound\n\n## Overview\n\nThis collector monitors Unbound servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable remote control interface\n\nSet `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).\n\n\n#### Check permissions and adjust if necessary\n\nIf using unix socket:\n\n- socket should be readable and writeable by `netdata` user\n\nIf using ip socket and TLS is disabled:\n\n- socket should be accessible via network\n\nIf TLS is enabled, in addition:\n\n- `control-key-file` should be readable by `netdata` user\n- `control-cert-file` should be readable by `netdata` user\n\nFor auto-detection parameters from `unbound.conf`:\n\n- `unbound.conf` should be readable by `netdata` user\n- if you have several configuration files (include feature) all of them should be readable by `netdata` user\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/unbound.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/unbound.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | /etc/unbound/unbound.conf | no |\n| use_tls | Whether to use TLS or not. | yes | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |\n| tls_ca | Certificate authority that client use when verifying server certificates. | | no |\n| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |\n| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n```\n##### Unix socket\n\nConnecting through Unix socket.\n\n```yaml\njobs:\n - name: socket\n address: /var/run/unbound.sock\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:8953\n\n - name: remote\n address: 203.0.113.11:8953\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m unbound\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Unbound instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.queries | queries | queries |\n| unbound.queries_ip_ratelimited | ratelimited | queries |\n| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.cache | hits, miss | events |\n| unbound.cache_percentage | hits, miss | percentage |\n| unbound.prefetch | prefetches | prefetches |\n| unbound.expired | expired | replies |\n| unbound.zero_ttl_replies | zero_ttl | replies |\n| unbound.recursive_replies | recursive | replies |\n| unbound.recursion_time | avg, median | milliseconds |\n| unbound.request_list_usage | avg, max | queries |\n| unbound.current_request_list_usage | all, users | queries |\n| unbound.request_list_jostle_list | overwritten, dropped | queries |\n| unbound.tcpusage | usage | buffers |\n| unbound.uptime | time | seconds |\n| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |\n| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |\n| unbound.mem_streamwait | streamwait | KB |\n| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |\n| unbound.type_queries | a dimension per query type | queries |\n| unbound.class_queries | a dimension per query class | queries |\n| unbound.opcode_queries | a dimension per query opcode | queries |\n| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |\n| unbound.rcode_answers | a dimension per reply rcode | replies |\n\n### Per thread\n\nThese metrics refer to threads.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| unbound.thread_queries | queries | queries |\n| unbound.thread_queries_ip_ratelimited | ratelimited | queries |\n| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |\n| unbound.thread_cache | hits, miss | events |\n| unbound.thread_cache_percentage | hits, miss | percentage |\n| unbound.thread_prefetch | prefetches | prefetches |\n| unbound.thread_expired | expired | replies |\n| unbound.thread_zero_ttl_replies | zero_ttl | replies |\n| unbound.thread_recursive_replies | recursive | replies |\n| unbound.thread_recursion_time | avg, median | milliseconds |\n| unbound.thread_request_list_usage | avg, max | queries |\n| unbound.thread_current_request_list_usage | all, users | queries |\n| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |\n| unbound.thread_tcpusage | usage | buffers |\n\n", "integration_type": "collector", "id": "go.d.plugin-unbound-Unbound", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/unbound/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-upsd", "plugin_name": "go.d.plugin", "module_name": "upsd", "monitored_instance": {"name": "UPS (NUT)", "link": "", "icon_filename": "plug-circle-bolt.svg", "categories": ["data-collection.ups"]}, "keywords": ["ups", "nut"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# UPS (NUT)\n\nPlugin: go.d.plugin\nModule: upsd\n\n## Overview\n\nThis collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/upsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/upsd.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |\n| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:3493\n\n - name: remote\n address: 203.0.113.0:3493\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m upsd\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |\n| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |\n| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ups\n\nThese metrics refer to the UPS unit.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| ups_name | UPS name. |\n| battery_type | Battery type (chemistry). \"battery.type\" variable value. |\n| device_model | Device model. \"device.mode\" variable value. |\n| device_serial | Device serial number. \"device.serial\" variable value. |\n| device_manufacturer | Device manufacturer. \"device.mfr\" variable value. |\n| device_type | Device type (ups, pdu, scd, psu, ats). \"device.type\" variable value. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| upsd.ups_load | load | percentage |\n| upsd.ups_load_usage | load_usage | Watts |\n| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |\n| upsd.ups_temperature | temperature | Celsius |\n| upsd.ups_battery_charge | charge | percentage |\n| upsd.ups_battery_estimated_runtime | runtime | seconds |\n| upsd.ups_battery_voltage | voltage | Volts |\n| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_voltage | voltage | Volts |\n| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_input_current | current | Ampere |\n| upsd.ups_input_current_nominal | nominal_current | Ampere |\n| upsd.ups_input_frequency | frequency | Hz |\n| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |\n| upsd.ups_output_voltage | voltage | Volts |\n| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |\n| upsd.ups_output_current | current | Ampere |\n| upsd.ups_output_current_nominal | nominal_current | Ampere |\n| upsd.ups_output_frequency | frequency | Hz |\n| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |\n\n", "integration_type": "collector", "id": "go.d.plugin-upsd-UPS_(NUT)", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/upsd/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vcsa", "plugin_name": "go.d.plugin", "module_name": "vcsa", "monitored_instance": {"name": "vCenter Server Appliance", "link": "https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# vCenter Server Appliance\n\nPlugin: go.d.plugin\nModule: vcsa\n\n## Overview\n\nThis collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vcsa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vcsa.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 5 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | yes |\n| password | Password for basic HTTP authentication. | | yes |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nTwo instances.\n\n\n```yaml\njobs:\n - name: vcsa1\n url: https://203.0.113.1\n username: admin@vsphere.local\n password: password\n\n - name: vcsa2\n url: https://203.0.113.10\n username: admin@vsphere.local\n password: password\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vcsa\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |\n| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |\n| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |\n| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |\n| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per vCenter Server Appliance instance\n\nThese metrics refer to the entire monitored application.\n
\nSee health statuses\nOverall System Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------------------------------------------------------------------|\n| green | All components in the appliance are healthy. |\n| yellow | One or more components in the appliance might become overloaded soon. |\n| orange | One or more components in the appliance might be degraded. |\n| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nComponents Health:\n\n| Status | Description |\n|:-------:|:-------------------------------------------------------------|\n| green | The component is healthy. |\n| yellow | The component is healthy, but may have some problems. |\n| orange | The component is degraded, and may have serious problems. |\n| red | The component is unavailable, or will stop functioning soon. |\n| gray | No health data is available. |\n| unknown | Collector failed to decode status. |\n\nSoftware Updates Health:\n\n| Status | Description |\n|:-------:|:-----------------------------------------------------|\n| green | No updates available. |\n| orange | Non-security patches might be available. |\n| red | Security patches might be available. |\n| gray | An error retrieving information on software updates. |\n| unknown | Collector failed to decode status. |\n\n
\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |\n| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |\n\n", "integration_type": "collector", "id": "go.d.plugin-vcsa-vCenter_Server_Appliance", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vcsa/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vernemq", "plugin_name": "go.d.plugin", "module_name": "vernemq", "monitored_instance": {"name": "VerneMQ", "link": "https://vernemq.com", "icon_filename": "vernemq.svg", "categories": ["data-collection.message-brokers"]}, "keywords": ["vernemq", "message brokers"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# VerneMQ\n\nPlugin: go.d.plugin\nModule: vernemq\n\n## Overview\n\nThis collector monitors VerneMQ instances.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vernemq.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vernemq.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | http://127.0.0.1:8888/metrics | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n```\n##### HTTP authentication\n\nLocal instance with basic HTTP authentication.\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n username: username\n password: password\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nLocal and remote instances.\n\n\n```yaml\njobs:\n - name: local\n url: http://127.0.0.1:8888/metrics\n\n - name: remote\n url: http://203.0.113.10:8888/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vernemq\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |\n| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |\n| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |\n| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |\n| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |\n| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |\n| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |\n| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |\n| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |\n| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |\n| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |\n| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |\n| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |\n| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |\n| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |\n| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |\n| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |\n| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per VerneMQ instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vernemq.sockets | open | sockets |\n| vernemq.socket_operations | open, close | sockets/s |\n| vernemq.client_keepalive_expired | closed | sockets/s |\n| vernemq.socket_close_timeout | closed | sockets/s |\n| vernemq.socket_errors | errors | errors/s |\n| vernemq.queue_processes | queue_processes | queue processes |\n| vernemq.queue_processes_operations | setup, teardown | events/s |\n| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |\n| vernemq.queue_messages | received, sent | messages/s |\n| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |\n| vernemq.router_subscriptions | subscriptions | subscriptions |\n| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |\n| vernemq.router_memory | used | KiB |\n| vernemq.average_scheduler_utilization | utilization | percentage |\n| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |\n| vernemq.system_processes | processes | processes |\n| vernemq.system_reductions | reductions | ops/s |\n| vernemq.system_context_switches | context_switches | ops/s |\n| vernemq.system_io | received, sent | kilobits/s |\n| vernemq.system_run_queue | ready | processes |\n| vernemq.system_gc_count | gc | ops/s |\n| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |\n| vernemq.system_allocated_memory | processes, system | KiB |\n| vernemq.bandwidth | received, sent | kilobits/s |\n| vernemq.retain_messages | messages | messages |\n| vernemq.retain_memory | used | KiB |\n| vernemq.cluster_bandwidth | received, sent | kilobits/s |\n| vernemq.cluster_dropped | dropped | kilobits/s |\n| vernemq.netsplit_unresolved | unresolved | netsplits |\n| vernemq.netsplits | resolved, detected | netsplits/s |\n| vernemq.mqtt_auth | received, sent | packets/s |\n| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_connect | connect, connack | packets/s |\n| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect | received, sent | packets/s |\n| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_subscribe | subscribe, suback | packets/s |\n| vernemq.mqtt_subscribe_error | failed | ops/s |\n| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |\n| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |\n| vernemq.mqtt_unsubscribe | mqtt_unsubscribe_error | ops/s |\n| vernemq.mqtt_publish | received, sent | packets/s |\n| vernemq.mqtt_publish_errors | failed | ops/s |\n| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |\n| vernemq.mqtt_puback | received, sent | packets/s |\n| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrec | received, sent | packets/s |\n| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_pubrel | received, sent | packets/s |\n| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcom | received, sent | packets/s |\n| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |\n| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |\n| vernemq.mqtt_ping | pingreq, pingresp | packets/s |\n| vernemq.node_uptime | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vernemq-VerneMQ", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vernemq/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-vsphere", "plugin_name": "go.d.plugin", "module_name": "vsphere", "monitored_instance": {"name": "VMware vCenter Server", "link": "https://www.vmware.com/products/vcenter-server.html", "icon_filename": "vmware.svg", "categories": ["data-collection.containers-and-vms"]}, "keywords": ["vmware", "esxi", "vcenter"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": true}, "overview": "# VMware vCenter Server\n\nPlugin: go.d.plugin\nModule: vsphere\n\n## Overview\n\nThis collector monitors hosts and vms performance statistics from `vCenter` servers.\n\n> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.\n> go.d.plugin needs to be restarted.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.\n**VMware real-time statistics are generated at the 20-second specificity**.\n\nIt is likely that 20 seconds is not enough for big installations and the value should be tuned.\n\nTo get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.\n\n
\nExample (all not related debug lines were removed)\n\n```\n[ilyam@pc]$ ./go.d.plugin -d -m vsphere\n[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process\n[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms\n[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms\n[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms\n[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms\n[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms\n[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms\n[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process\n[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3\u00b5s\n[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process\n[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522\u00b5s\n[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process\n[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts\n[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms\n[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973\u00b5s\n[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process\n[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms\n[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms\n[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s\n[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process\n[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms\n[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms\n[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms\n```\n\n
\n\nThere you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.\n`update_every` and `timeout` parameters should be adjusted based on these numbers.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/vsphere.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/vsphere.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 20 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | vCenter server URL. | | yes |\n| host_include | Hosts selector (filter). | | no |\n| vm_include | Virtual machines selector (filter). | | no |\n| discovery_interval | Hosts and VMs discovery interval. | 300 | no |\n| timeout | HTTP request timeout. | 20 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n##### host_include\n\nMetrics of hosts matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n host_include:\n - '/DC1/*' # select all hosts from datacenter DC1\n - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2\n - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3\n ```\n\n\n##### vm_include\n\nMetrics of VMs matching the selector will be collected.\n\n- Include pattern syntax: \"/Datacenter pattern/Cluster pattern/Host pattern/VM pattern\".\n- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns).\n- Syntax:\n\n ```yaml\n vm_include:\n - '/DC1/*' # select all VMs from datacenter DC\n - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2\n - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3\n ```\n\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name : vcenter1\n url : https://203.0.113.1\n username : admin@vsphere.local\n password : somepassword\n\n - name : vcenter2\n url : https://203.0.113.10\n username : admin@vsphere.local\n password : somepassword\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m vsphere\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |\n| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |\n| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |\n| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per virtual machine\n\nThese metrics refer to the Virtual Machine.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n| vm | Virtual Machine name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.vm_cpu_utilization | used | percentage |\n| vsphere.vm_mem_utilization | used | percentage |\n| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |\n| vsphere.vm_mem_swap_usage | swapped | KiB |\n| vsphere.vm_mem_swap_io | in, out | KiB/s |\n| vsphere.vm_disk_io | read, write | KiB/s |\n| vsphere.vm_disk_max_latency | latency | milliseconds |\n| vsphere.vm_net_traffic | received, sent | KiB/s |\n| vsphere.vm_net_packets | received, sent | packets |\n| vsphere.vm_net_drops | received, sent | packets |\n| vsphere.vm_overall_status | green, red, yellow, gray | status |\n| vsphere.vm_system_uptime | uptime | seconds |\n\n### Per host\n\nThese metrics refer to the ESXi host.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| datacenter | Datacenter name |\n| cluster | Cluster name |\n| host | Host name |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| vsphere.host_cpu_utilization | used | percentage |\n| vsphere.host_mem_utilization | used | percentage |\n| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |\n| vsphere.host_mem_swap_io | in, out | KiB/s |\n| vsphere.host_disk_io | read, write | KiB/s |\n| vsphere.host_disk_max_latency | latency | milliseconds |\n| vsphere.host_net_traffic | received, sent | KiB/s |\n| vsphere.host_net_packets | received, sent | packets |\n| vsphere.host_net_drops | received, sent | packets |\n| vsphere.host_net_errors | received, sent | errors |\n| vsphere.host_overall_status | green, red, yellow, gray | status |\n| vsphere.host_system_uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-vsphere-VMware_vCenter_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/vsphere/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-web_log", "plugin_name": "go.d.plugin", "module_name": "web_log", "monitored_instance": {"name": "Web server log files", "link": "", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "webservers.svg"}, "keywords": ["webserver", "apache", "httpd", "nginx", "lighttpd", "logs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Web server log files\n\nPlugin: go.d.plugin\nModule: web_log\n\n## Overview\n\nThis collector monitors web servers by parsing their log files.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects log files of web servers running on localhost.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/web_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/web_log.conf\n```\n#### Options\n\nWeblog is aware of how to parse and interpret the following fields (**known fields**):\n\n> [nginx](https://nginx.org/en/docs/varindex.html)\n>\n> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)\n\n| nginx | apache | description |\n|-------------------------|----------|------------------------------------------------------------------------------------------|\n| $host ($http_host) | %v | Name of the server which accepted a request. |\n| $server_port | %p | Port of the server which accepted a request. |\n| $scheme | - | Request scheme. \"http\" or \"https\". |\n| $remote_addr | %a (%h) | Client address. |\n| $request | %r | Full original request line. The line is \"$request_method $request_uri $server_protocol\". |\n| $request_method | %m | Request method. Usually \"GET\" or \"POST\". |\n| $request_uri | %U | Full original request URI. |\n| $server_protocol | %H | Request protocol. Usually \"HTTP/1.0\", \"HTTP/1.1\", or \"HTTP/2.0\". |\n| $status | %s (%>s) | Response status code. |\n| $request_length | %I | Bytes received from a client, including request and headers. |\n| $bytes_sent | %O | Bytes sent to a client, including request and headers. |\n| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |\n| $request_time | %D | Request processing time. |\n| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |\n| $ssl_protocol | - | Protocol of an established SSL connection. |\n| $ssl_cipher | - | String of ciphers used for an established SSL connection. |\n\nNotes:\n\n- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.\n- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.\n- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.\n- NGINX logs URI with query parameters, Apache doesnt.\n- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.\n- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| path | Path to the web server log file. | | yes |\n| exclude_path | Path to exclude. | *.gz | no |\n| url_patterns | List of URL patterns. | [] | no |\n| url_patterns.name | Used as a dimension name. | | yes |\n| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). | | yes |\n| parser | Log parser configuration. | | no |\n| parser.log_type | Log parser type. | auto | no |\n| parser.csv_config | CSV log parser config. | | no |\n| parser.csv_config.delimiter | CSV field delimiter. | , | no |\n| parser.csv_config.format | CSV log format. | | no |\n| parser.ltsv_config | LTSV log parser config. | | no |\n| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \\t | no |\n| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |\n| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |\n| parser.json_config | JSON log parser config. | | no |\n| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes |\n| parser.regexp_config | RegExp log parser config. | | no |\n| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |\n\n##### url_patterns\n\n\"URL pattern\" scope metrics will be collected for each URL pattern. \n\nOption syntax:\n\n```yaml\nurl_patterns:\n - name: name1\n pattern: pattern1\n - name: name2\n pattern: pattern2\n```\n\n\n##### parser.log_type\n\nWeblog supports 5 different log parsers:\n\n| Parser type | Description |\n|-------------|-------------------------------------------|\n| auto | Use CSV and auto-detect format |\n| csv | A comma-separated values |\n| json | [JSON](https://www.json.org/json-en.html) |\n| ltsv | [LTSV](http://ltsv.org/) |\n| regexp | Regular expression with named groups |\n\nSyntax:\n\n```yaml\nparser:\n log_type: auto\n```\n\nIf `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.\n\n- checks if format is `CSV` (using regexp).\n- checks if format is `JSON` (using regexp).\n- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):\n\n ```sh\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $host:$server_port $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent - - $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time $upstream_response_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent $request_length $request_time\n $remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent\n ```\n\n If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.\n\n\n##### parser.csv_config.format\n\n\n\n##### parser.ltsv_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: ltsv\n ltsv_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.json_config.mapping\n\nThe mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\n```yaml\nparser:\n log_type: json\n json_config:\n mapping:\n label1: field1\n label2: field2\n```\n\n\n##### parser.regexp_config.pattern\n\nUse pattern with subexpressions names. These names should be **known fields**.\n\n> **Note**: don't use `$` and `%` prefixes for mapped field names.\n\nSyntax:\n\n```yaml\nparser:\n log_type: regexp\n regexp_config:\n pattern: PATTERN\n```\n\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m web_log\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |\n| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |\n| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |\n| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |\n| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |\n| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |\n| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Web server log files instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.requests | requests | requests/s |\n| web_log.excluded_requests | unmatched | requests/s |\n| web_log.type_requests | success, bad, redirect, error | requests/s |\n| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |\n| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |\n| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |\n| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |\n| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |\n| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |\n| web_log.bandwidth | received, sent | kilobits/s |\n| web_log.request_processing_time | min, max, avg | milliseconds |\n| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |\n| web_log.upstream_response_time | min, max, avg | milliseconds |\n| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |\n| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |\n| web_log.vhost_requests | a dimension per vhost | requests/s |\n| web_log.port_requests | a dimension per port | requests/s |\n| web_log.scheme_requests | http, https | requests/s |\n| web_log.http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.http_version_requests | a dimension per HTTP version | requests/s |\n| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |\n| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |\n| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |\n| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |\n| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |\n\n### Per custom time field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_time_field_summary | min, max, avg | milliseconds |\n| web_log.custom_time_field_histogram | a dimension per bucket | observations |\n\n### Per custom numeric field\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |\n\n### Per URL pattern\n\nTBD\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |\n| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |\n| web_log.url_pattern_bandwidth | received, sent | kilobits/s |\n| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-web_log-Web_server_log_files", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-whoisquery", "plugin_name": "go.d.plugin", "module_name": "whoisquery", "monitored_instance": {"name": "Domain expiration date", "link": "", "icon_filename": "globe.svg", "categories": ["data-collection.synthetic-checks"]}, "keywords": ["whois"], "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "most_popular": false}, "overview": "# Domain expiration date\n\nPlugin: go.d.plugin\nModule: whoisquery\n\n## Overview\n\nThis collector monitors the remaining time before the domain expires.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/whoisquery.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/whoisquery.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Domain address. | | yes |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| timeout | The query timeout in seconds. | 5 | no |\n\n#### Examples\n\n##### Basic\n\nBasic configuration example\n\n```yaml\njobs:\n - name: my_site\n source: my_site.com\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple domains.\n\n\n```yaml\njobs:\n - name: my_site1\n source: my_site1.com\n\n - name: my_site2\n source: my_site2.com\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m whoisquery\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per domain\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| domain | Configured source |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| whoisquery.time_until_expiration | expiry | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-whoisquery-Domain_expiration_date", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/whoisquery/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-ad", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Active Directory", "link": "https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "active directory", "ad", "adcs", "adfs"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Active Directory\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Active_Directory", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-hyperv", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "HyperV", "link": "https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview", "icon_filename": "windows.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "hyperv", "virtualization", "vm"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# HyperV\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-HyperV", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-msexchange", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS Exchange", "link": "https://www.microsoft.com/en-us/microsoft-365/exchange/email", "icon_filename": "exchange.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mail"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS Exchange\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_Exchange", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-mssql", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "MS SQL Server", "link": "https://www.microsoft.com/en-us/sql-server/", "icon_filename": "mssql.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "mssql", "database", "db"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# MS SQL Server\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-MS_SQL_Server", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows-dotnet", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "NET Framework", "link": "https://dotnet.microsoft.com/en-us/download/dotnet-framework", "icon_filename": "dotnet.svg", "categories": ["data-collection.windows-systems"]}, "keywords": ["windows", "microsoft", "dotnet"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# NET Framework\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-NET_Framework", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-windows", "plugin_name": "go.d.plugin", "module_name": "windows", "monitored_instance": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["data-collection.windows-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows", "microsoft"], "most_popular": true, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# Windows\n\nPlugin: go.d.plugin\nModule: windows\n\n## Overview\n\nThis collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).\n\n\nIt collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).\n\nUsing the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nData collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install Windows exporter\n\nTo install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/windows.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/windows.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| url | Server URL. | | yes |\n| timeout | HTTP request timeout. | 1 | no |\n| username | Username for basic HTTP authentication. | | no |\n| password | Password for basic HTTP authentication. | | no |\n| proxy_url | Proxy URL. | | no |\n| proxy_username | Username for proxy basic HTTP authentication. | | no |\n| proxy_password | Password for proxy basic HTTP authentication. | | no |\n| method | HTTP request method. | GET | no |\n| body | HTTP request body. | | no |\n| headers | HTTP request headers. | | no |\n| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### HTTP authentication\n\nBasic HTTP authentication.\n\n```yaml\njobs:\n - name: win_server\n url: http://192.0.2.1:9182/metrics\n username: username\n password: password\n\n```\n##### HTTPS with self-signed certificate\n\nDo not validate server certificate chain and hostname.\n\n```yaml\njobs:\n - name: win_server\n url: https://192.0.2.1:9182/metrics\n tls_skip_verify: yes\n\n```\n##### Virtual Node\n\nThe Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.\nYou can create a virtual node for all your Windows machines and control them as separate entities.\n\nTo make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:\n\n> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.\n\n```yaml\n# /etc/netdata/vnodes/vnodes.conf\n- hostname: win_server\n guid: \n```\n\n\n```yaml\njobs:\n - name: win_server\n vnode: win_server\n url: http://192.0.2.1:9182/metrics\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from multiple remote instances.\n\n\n```yaml\njobs:\n - name: win_server1\n url: http://192.0.2.1:9182/metrics\n\n - name: win_server2\n url: http://192.0.2.2:9182/metrics\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m windows\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |\n| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |\n| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |\n| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |\n| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |\n| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).\n\nSupported collectors:\n\n- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)\n- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)\n- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)\n- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)\n- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)\n- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)\n- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)\n- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)\n- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)\n- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)\n- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)\n- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)\n- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)\n- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)\n- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)\n- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)\n- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)\n- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)\n- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)\n- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)\n- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)\n- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)\n- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)\n- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)\n- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)\n\n\n### Per Active Directory instance\n\nThese metrics refer to the entire monitored host.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |\n| windows.memory_utilization | available, used | bytes |\n| windows.memory_page_faults | page_faults | events/s |\n| windows.memory_swap_utilization | available, used | bytes |\n| windows.memory_swap_operations | read, write | operations/s |\n| windows.memory_swap_pages | read, written | pages/s |\n| windows.memory_cached | cached | KiB |\n| windows.memory_cache_faults | cache_faults | events/s |\n| windows.memory_system_pool | paged, non-paged | bytes |\n| windows.tcp_conns_established | ipv4, ipv6 | connections |\n| windows.tcp_conns_active | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |\n| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |\n| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |\n| windows.tcp_segments_received | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |\n| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |\n| windows.os_processes | processes | number |\n| windows.os_users | users | users |\n| windows.os_visible_memory_usage | free, used | bytes |\n| windows.os_paging_files_usage | free, used | bytes |\n| windows.system_threads | threads | number |\n| windows.system_uptime | time | seconds |\n| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |\n| windows.processes_cpu_utilization | a dimension per process | percentage |\n| windows.processes_handles | a dimension per process | handles |\n| windows.processes_io_bytes | a dimension per process | bytes/s |\n| windows.processes_io_operations | a dimension per process | operations/s |\n| windows.processes_page_faults | a dimension per process | pgfaults/s |\n| windows.processes_page_file_bytes | a dimension per process | bytes |\n| windows.processes_pool_bytes | a dimension per process | bytes |\n| windows.processes_threads | a dimension per process | threads |\n| ad.database_operations | add, delete, modify, recycle | operations/s |\n| ad.directory_operations | read, write, search | operations/s |\n| ad.name_cache_lookups | lookups | lookups/s |\n| ad.name_cache_hits | hits | hits/s |\n| ad.atq_average_request_latency | time | seconds |\n| ad.atq_outstanding_requests | outstanding | requests |\n| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |\n| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |\n| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |\n| ad.dra_replication_properties_updated | inbound, outbound | properties/s |\n| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |\n| ad.dra_replication_pending_syncs | pending | syncs |\n| ad.dra_replication_sync_requests | requests | requests/s |\n| ad.ds_threads | in_use | threads |\n| ad.ldap_last_bind_time | last_bind | seconds |\n| ad.binds | binds | binds/s |\n| ad.ldap_searches | searches | searches/s |\n| adfs.ad_login_connection_failures | connection | failures/s |\n| adfs.certificate_authentications | authentications | authentications/s |\n| adfs.db_artifact_failures | connection | failures/s |\n| adfs.db_artifact_query_time_seconds | query_time | seconds/s |\n| adfs.db_config_failures | connection | failures/s |\n| adfs.db_config_query_time_seconds | query_time | seconds/s |\n| adfs.device_authentications | authentications | authentications/s |\n| adfs.external_authentications | success, failure | authentications/s |\n| adfs.federated_authentications | authentications | authentications/s |\n| adfs.federation_metadata_requests | requests | requests/s |\n| adfs.oauth_authorization_requests | requests | requests/s |\n| adfs.oauth_client_authentications | success, failure | authentications/s |\n| adfs.oauth_client_credentials_requests | success, failure | requests/s |\n| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |\n| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |\n| adfs.oauth_client_windows_authentications | success, failure | authentications/s |\n| adfs.oauth_logon_certificate_requests | success, failure | requests/s |\n| adfs.oauth_password_grant_requests | success, failure | requests/s |\n| adfs.oauth_token_requests_success | success | requests/s |\n| adfs.passive_requests | passive | requests/s |\n| adfs.passport_authentications | passport | authentications/s |\n| adfs.password_change_requests | success, failure | requests/s |\n| adfs.samlp_token_requests_success | success | requests/s |\n| adfs.sso_authentications | success, failure | authentications/s |\n| adfs.token_requests | requests | requests/s |\n| adfs.userpassword_authentications | success, failure | authentications/s |\n| adfs.windows_integrated_authentications | authentications | authentications/s |\n| adfs.wsfed_token_requests_success | success | requests/s |\n| adfs.wstrust_token_requests_success | success | requests/s |\n| exchange.activesync_ping_cmds_pending | pending | commands |\n| exchange.activesync_requests | received | requests/s |\n| exchange.activesync_sync_cmds | processed | commands/s |\n| exchange.autodiscover_requests | processed | requests/s |\n| exchange.avail_service_requests | serviced | requests/s |\n| exchange.owa_current_unique_users | logged-in | users |\n| exchange.owa_requests_total | handled | requests/s |\n| exchange.rpc_active_user_count | active | users |\n| exchange.rpc_avg_latency | latency | seconds |\n| exchange.rpc_connection_count | connections | connections |\n| exchange.rpc_operations | operations | operations/s |\n| exchange.rpc_requests | processed | requests |\n| exchange.rpc_user_count | users | users |\n| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |\n| exchange.transport_queues_poison | low, high, none, normal | messages/s |\n| hyperv.vms_health | ok, critical | vms |\n| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |\n| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |\n| hyperv.root_partition_attached_devices | attached | devices |\n| hyperv.root_partition_deposited_pages | deposited | pages |\n| hyperv.root_partition_skipped_interrupts | skipped | interrupts |\n| hyperv.root_partition_device_dma_errors | illegal_dma | requests |\n| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |\n| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |\n| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |\n| hyperv.root_partition_address_space | address_spaces | address spaces |\n| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |\n| hyperv.root_partition_virtual_tlb_pages | used | pages |\n\n### Per cpu core\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| core | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |\n| windows.cpu_core_interrupts | interrupts | interrupts/s |\n| windows.cpu_core_dpcs | dpcs | dpcs/s |\n| windows.cpu_core_cstate | c1, c2, c3 | percentage |\n\n### Per logical disk\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| disk | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.logical_disk_utilization | free, used | bytes |\n| windows.logical_disk_bandwidth | read, write | bytes/s |\n| windows.logical_disk_operations | reads, writes | operations/s |\n| windows.logical_disk_latency | read, write | seconds |\n\n### Per network device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| nic | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.net_nic_bandwidth | received, sent | kilobits/s |\n| windows.net_nic_packets | received, sent | packets/s |\n| windows.net_nic_errors | inbound, outbound | errors/s |\n| windows.net_nic_discarded | inbound, outbound | discards/s |\n\n### Per thermalzone\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| thermalzone | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.thermalzone_temperature | temperature | celsius |\n\n### Per service\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| service | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |\n| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |\n\n### Per website\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| website | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| iis.website_traffic | received, sent | bytes/s |\n| iis.website_requests_rate | requests | requests/s |\n| iis.website_active_connections_count | active | connections |\n| iis.website_users_count | anonymous, non_anonymous | users |\n| iis.website_connection_attempts_rate | connection | attempts/s |\n| iis.website_isapi_extension_requests_count | isapi | requests |\n| iis.website_isapi_extension_requests_rate | isapi | requests/s |\n| iis.website_ftp_file_transfer_rate | received, sent | files/s |\n| iis.website_logon_attempts_rate | logon | attempts/s |\n| iis.website_errors_rate | document_locked, document_not_found | errors/s |\n| iis.website_uptime | document_locked, document_not_found | seconds |\n\n### Per mssql instance\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.instance_accessmethods_page_splits | page | splits/s |\n| mssql.instance_cache_hit_ratio | hit_ratio | percentage |\n| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |\n| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |\n| mssql.instance_bufman_iops | read, written | iops |\n| mssql.instance_blocked_processes | blocked | processes |\n| mssql.instance_user_connection | user | connections |\n| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |\n| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |\n| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |\n| mssql.instance_memmgr_pending_memory_grants | pending | processes |\n| mssql.instance_memmgr_server_memory | memory | bytes |\n| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |\n| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |\n| mssql.instance_sqlstats_batch_requests | batch | requests/s |\n| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |\n| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |\n| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |\n\n### Per database\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| mssql_instance | TBD |\n| database | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mssql.database_active_transactions | active | transactions |\n| mssql.database_backup_restore_operations | backup | operations/s |\n| mssql.database_data_files_size | size | bytes |\n| mssql.database_log_flushed | flushed | bytes/s |\n| mssql.database_log_flushes | log | flushes/s |\n| mssql.database_transactions | transactions | transactions/s |\n| mssql.database_write_transactions | write | transactions/s |\n\n### Per certificate template\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cert_template | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adcs.cert_template_requests | requests | requests/s |\n| adcs.cert_template_failed_requests | failed | requests/s |\n| adcs.cert_template_issued_requests | issued | requests/s |\n| adcs.cert_template_pending_requests | pending | requests/s |\n| adcs.cert_template_request_processing_time | processing_time | seconds |\n| adcs.cert_template_retrievals | retrievals | retrievals/s |\n| adcs.cert_template_retrieval_processing_time | processing_time | seconds |\n| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |\n| adcs.cert_template_request_policy_module_processing | processing_time | seconds |\n| adcs.cert_template_challenge_responses | challenge | responses/s |\n| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |\n| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |\n| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |\n\n### Per process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| process | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netframework.clrexception_thrown | exceptions | exceptions/s |\n| netframework.clrexception_filters | filters | filters/s |\n| netframework.clrexception_finallys | finallys | finallys/s |\n| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |\n| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |\n| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |\n| netframework.clrinterop_interop_stubs_created | created | stubs/s |\n| netframework.clrjit_methods | jit-compiled | methods/s |\n| netframework.clrjit_time | time | percentage |\n| netframework.clrjit_standard_failures | failures | failures/s |\n| netframework.clrjit_il_bytes | compiled_msil | bytes/s |\n| netframework.clrloading_loader_heap_size | committed | bytes |\n| netframework.clrloading_appdomains_loaded | loaded | domain/s |\n| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |\n| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |\n| netframework.clrloading_classes_loaded | loaded | classes/s |\n| netframework.clrloading_class_load_failures | class_load | failures/s |\n| netframework.clrlocksandthreads_queue_length | threads | threads/s |\n| netframework.clrlocksandthreads_current_logical_threads | logical | threads |\n| netframework.clrlocksandthreads_current_physical_threads | physical | threads |\n| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |\n| netframework.clrlocksandthreads_contentions | contentions | contentions/s |\n| netframework.clrmemory_allocated_bytes | allocated | bytes/s |\n| netframework.clrmemory_finalization_survivors | survived | objects |\n| netframework.clrmemory_heap_size | heap | bytes |\n| netframework.clrmemory_promoted | promoted | bytes |\n| netframework.clrmemory_number_gc_handles | used | handles |\n| netframework.clrmemory_collections | gc | gc/s |\n| netframework.clrmemory_induced_gc | gc | gc/s |\n| netframework.clrmemory_number_pinned_objects | pinned | objects |\n| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |\n| netframework.clrmemory_committed | committed | bytes |\n| netframework.clrmemory_reserved | reserved | bytes |\n| netframework.clrmemory_gc_time | time | percentage |\n| netframework.clrremoting_channels | registered | channels/s |\n| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |\n| netframework.clrremoting_context_bound_objects | allocated | objects/s |\n| netframework.clrremoting_context_proxies | objects | objects/s |\n| netframework.clrremoting_contexts | contexts | contexts |\n| netframework.clrremoting_remote_calls | rpc | calls/s |\n| netframework.clrsecurity_link_time_checks | linktime | checks/s |\n| netframework.clrsecurity_checks_time | time | percentage |\n| netframework.clrsecurity_stack_walk_depth | stack | depth |\n| netframework.clrsecurity_runtime_checks | runtime | checks/s |\n\n### Per exchange workload\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.workload_active_tasks | active | tasks |\n| exchange.workload_completed_tasks | completed | tasks/s |\n| exchange.workload_queued_tasks | queued | tasks/s |\n| exchange.workload_yielded_tasks | yielded | tasks/s |\n| exchange.workload_activity_status | active, paused | status |\n\n### Per ldap process\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |\n| exchange.ldap_read_time | read | seconds |\n| exchange.ldap_search_time | search | seconds |\n| exchange.ldap_write_time | write | seconds |\n| exchange.ldap_timeout_errors | timeout | errors/s |\n\n### Per http proxy\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| workload | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exchange.http_proxy_avg_auth_latency | latency | seconds |\n| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |\n| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |\n| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |\n| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |\n| exchange.http_proxy_requests | processed | requests/s |\n\n### Per vm\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_name | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |\n| hyperv.vm_memory_physical | assigned_memory | MiB |\n| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |\n| hyperv.vm_memory_pressure_current | pressure | percentage |\n| hyperv.vm_vid_physical_pages_allocated | allocated | pages |\n| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |\n\n### Per vm device\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_device_bytes | read, written | bytes/s |\n| hyperv.vm_device_operations | read, write | operations/s |\n| hyperv.vm_device_errors | errors | errors/s |\n\n### Per vm interface\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vm_interface | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vm_interface_bytes | received, sent | bytes/s |\n| hyperv.vm_interface_packets | received, sent | packets/s |\n| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |\n\n### Per vswitch\n\nTBD\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| vswitch | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hyperv.vswitch_bytes | received, sent | bytes/s |\n| hyperv.vswitch_packets | received, sent | packets/s |\n| hyperv.vswitch_directed_packets | received, sent | packets/s |\n| hyperv.vswitch_broadcast_packets | received, sent | packets/s |\n| hyperv.vswitch_multicast_packets | received, sent | packets/s |\n| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |\n| hyperv.vswitch_packets_flooded | flooded | packets/s |\n| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |\n| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |\n\n", "integration_type": "collector", "id": "go.d.plugin-windows-Windows", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/windows/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-wireguard", "plugin_name": "go.d.plugin", "module_name": "wireguard", "monitored_instance": {"name": "WireGuard", "link": "https://www.wireguard.com/", "categories": ["data-collection.vpns"], "icon_filename": "wireguard.svg"}, "keywords": ["wireguard", "vpn", "security"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# WireGuard\n\nPlugin: go.d.plugin\nModule: wireguard\n\n## Overview\n\nThis collector monitors WireGuard VPN devices and peers traffic.\n\n\nIt connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt automatically detects instances running on localhost.\n\n\n#### Limits\n\nDoesn't work if Netdata or WireGuard is installed in the container.\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/wireguard.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/wireguard.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m wireguard\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per device\n\nThese metrics refer to the VPN network interface.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.device_network_io | receive, transmit | B/s |\n| wireguard.device_peers | peers | peers |\n\n### Per peer\n\nThese metrics refer to the VPN peer.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | VPN network interface |\n| public_key | Public key of a peer |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireguard.peer_network_io | receive, transmit | B/s |\n| wireguard.peer_latest_handshake_ago | time | seconds |\n\n", "integration_type": "collector", "id": "go.d.plugin-wireguard-WireGuard", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/wireguard/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-x509check", "plugin_name": "go.d.plugin", "module_name": "x509check", "monitored_instance": {"name": "X.509 certificate", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "lock.svg"}, "keywords": ["x509", "certificate"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": []}}}, "overview": "# X.509 certificate\n\nPlugin: go.d.plugin\nModule: x509check\n\n## Overview\n\n\n\nThis collectors monitors x509 certificates expiration time and revocation status.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/x509check.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/x509check.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file. | | no |\n| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |\n| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |\n| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |\n| timeout | SSL connection timeout. | 2 | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Website certificate\n\nWebsite certificate.\n\n```yaml\njobs:\n - name: my_site_cert\n source: https://my_site.org:443\n\n```\n##### Local file certificate\n\nLocal file certificate.\n\n```yaml\njobs:\n - name: my_file_cert\n source: file:///home/me/cert.pem\n\n```\n##### SMTP certificate\n\nSMTP certificate.\n\n```yaml\njobs:\n - name: my_smtp_cert\n source: smtp://smtp.my_mail.org:587\n\n```\n##### Multi-instance\n\n> **Note**: When you define more than one job, their names must be unique.\n\nCheck the expiration status of the multiple websites' certificates.\n\n\n```yaml\njobs:\n - name: my_site_cert1\n source: https://my_site1.org:443\n\n - name: my_site_cert2\n source: https://my_site1.org:443\n\n - name: my_site_cert3\n source: https://my_site3.org:443\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m x509check\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires |\n| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per source\n\nThese metrics refer to the configured source.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| source | Configured source. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| x509check.time_until_expiration | expiry | seconds |\n| x509check.revocation_status | revoked | boolean |\n\n", "integration_type": "collector", "id": "go.d.plugin-x509check-X.509_certificate", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/x509check/metadata.yaml", "related_resources": ""}, {"meta": {"id": "collector-go.d.plugin-zookeeper", "plugin_name": "go.d.plugin", "module_name": "zookeeper", "monitored_instance": {"name": "ZooKeeper", "link": "https://zookeeper.apache.org/", "categories": ["data-collection.service-discovery-registry"], "icon_filename": "zookeeper.svg"}, "keywords": ["zookeeper"], "most_popular": false, "info_provided_to_referring_integrations": {"description": ""}, "related_resources": {"integrations": {"list": [{"plugin_name": "apps.plugin", "module_name": "apps"}]}}}, "overview": "# ZooKeeper\n\nPlugin: go.d.plugin\nModule: zookeeper\n\n## Overview\n\n\n\nIt connects to the Zookeeper instance via a TCP and executes the following commands:\n\n- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:\n\n- 127.0.0.1:2181\n- 127.0.0.1:2182\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Whitelist `mntr` command\n\nAdd `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `go.d/zookeeper.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config go.d/zookeeper.conf\n```\n#### Options\n\nThe following options can be defined globally: update_every, autodetection_retry.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1 | no |\n| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |\n| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |\n| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |\n| use_tls | Whether to use TLS or not. | no | no |\n| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |\n| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |\n| tls_cert | Client TLS certificate. | | no |\n| tls_key | Client TLS key. | | no |\n\n#### Examples\n\n##### Basic\n\nLocal server.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n```\n##### TLS with self-signed certificate\n\nZookeeper with TLS and self-signed certificate.\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n use_tls: yes\n tls_skip_verify: yes\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\njobs:\n - name: local\n address: 127.0.0.1:2181\n\n - name: remote\n address: 192.0.2.1:2181\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `go.d.plugin` to debug the collector:\n\n ```bash\n ./go.d.plugin -d -m zookeeper\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZooKeeper instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zookeeper.requests | outstanding | requests |\n| zookeeper.requests_latency | min, avg, max | ms |\n| zookeeper.connections | alive | connections |\n| zookeeper.packets | received, sent | pps |\n| zookeeper.file_descriptor | open | file descriptors |\n| zookeeper.nodes | znode, ephemerals | nodes |\n| zookeeper.watches | watches | watches |\n| zookeeper.approximate_data_size | size | KiB |\n| zookeeper.server_state | state | state |\n\n", "integration_type": "collector", "id": "go.d.plugin-zookeeper-ZooKeeper", "edit_link": "https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "idlejitter.plugin", "module_name": "idlejitter.plugin", "monitored_instance": {"name": "Idle OS Jitter", "link": "", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["latency", "jitter"], "most_popular": false}, "overview": "# Idle OS Jitter\n\nPlugin: idlejitter.plugin\nModule: idlejitter.plugin\n\n## Overview\n\nMonitor delays in timing for user processes caused by scheduling limitations to optimize the system to run latency sensitive applications with minimal jitter, improving consistency and quality of service.\n\n\nA thread is spawned that requests to sleep for fixed amount of time. When the system wakes it up, it measures how many microseconds have passed. The difference between the requested and the actual duration of the sleep, is the idle jitter. This is done dozens of times per second to ensure we have a representative sample.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration will run by default on all supported systems.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThis integration only supports a single configuration option, and most users will not need to change it.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Idle OS Jitter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.idlejitter | min, max, average | microseconds lost/s |\n\n", "integration_type": "collector", "id": "idlejitter.plugin-idlejitter.plugin-Idle_OS_Jitter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/idlejitter.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "ioping.plugin", "module_name": "ioping.plugin", "monitored_instance": {"name": "IOPing", "link": "https://github.com/koct9i/ioping", "categories": ["data-collection.synthetic-checks"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IOPing\n\nPlugin: ioping.plugin\nModule: ioping.plugin\n\n## Overview\n\nMonitor IOPing metrics for efficient disk I/O latency tracking. Keep track of read/write speeds, latency, and error rates for optimized disk operations.\n\nPlugin uses `ioping` command.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install ioping\n\nYou can install the command by passing the argument `install` to the plugin (`/usr/libexec/netdata/plugins.d/ioping.plugin install`).\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `ioping.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config ioping.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Data collection frequency. | 1s | no |\n| destination | The directory/file/device to ioping. | | yes |\n| request_size | The request size in bytes to ioping the destination (symbolic modifiers are supported) | 4k | no |\n| ioping_opts | Options passed to `ioping` commands. | -T 1000000 | no |\n\n#### Examples\n\n##### Basic Configuration\n\nThis example has the minimum configuration necessary to have the plugin running.\n\n```yaml\ndestination=\"/dev/sda\"\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ioping_disk_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ioping.conf) | ioping.latency | average I/O latency over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ioping.latency | latency | microseconds |\n\n", "integration_type": "collector", "id": "ioping.plugin-ioping.plugin-IOPing", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/ioping.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "macos.plugin", "module_name": "mach_smi", "monitored_instance": {"name": "macOS", "link": "https://www.apple.com/macos", "categories": ["data-collection.macos-systems"], "icon_filename": "macos.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["macos", "apple", "darwin"], "most_popular": false}, "overview": "# macOS\n\nPlugin: macos.plugin\nModule: mach_smi\n\n## Overview\n\nMonitor macOS metrics for efficient operating system performance.\n\nThe plugin uses three different methods to collect data:\n - The function `sysctlbyname` is called to collect network, swap, loadavg, and boot time.\n - The functtion `host_statistic` is called to collect CPU and Virtual memory data;\n - The function `IOServiceGetMatchingServices` to collect storage information.\n\n\nThis collector is only supported on the following platforms:\n\n- macOS\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nThere are three sections in the file which you can configure:\n\n- `[plugin:macos:sysctl]` - Enable or disable monitoring for network, swap, loadavg, and boot time.\n- `[plugin:macos:mach_smi]` - Enable or disable monitoring for CPU and Virtual memory.\n- `[plugin:macos:iokit]` - Enable or disable monitoring for storage device.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enable load average | Enable or disable monitoring of load average metrics (load1, load5, load15). | yes | no |\n| system swap | Enable or disable monitoring of system swap metrics (free, used). | yes | no |\n| bandwidth | Enable or disable monitoring of network bandwidth metrics (received, sent). | yes | no |\n| ipv4 TCP packets | Enable or disable monitoring of IPv4 TCP total packets metrics (received, sent). | yes | no |\n| ipv4 TCP errors | Enable or disable monitoring of IPv4 TCP packets metrics (Input Errors, Checksum, Retransmission segments). | yes | no |\n| ipv4 TCP handshake issues | Enable or disable monitoring of IPv4 TCP handshake metrics (Established Resets, Active Opens, Passive Opens, Attempt Fails). | yes | no |\n| ECN packets | Enable or disable monitoring of ECN statistics metrics (InCEPkts, InNoECTPkts). | auto | no |\n| TCP SYN cookies | Enable or disable monitoring of TCP SYN cookies metrics (received, sent, failed). | auto | no |\n| TCP out-of-order queue | Enable or disable monitoring of TCP out-of-order queue metrics (inqueue). | auto | no |\n| TCP connection aborts | Enable or disable monitoring of TCP connection aborts metrics (Bad Data, User closed, No memory, Timeout). | auto | no |\n| ipv4 UDP packets | Enable or disable monitoring of ipv4 UDP packets metrics (sent, received.). | yes | no |\n| ipv4 UDP errors | Enable or disable monitoring of ipv4 UDP errors metrics (Recieved Buffer error, Input Errors, No Ports, IN Checksum Errors, Ignore Multi). | yes | no |\n| ipv4 icmp packets | Enable or disable monitoring of IPv4 ICMP packets metrics (sent, received, in error, OUT error, IN Checksum error). | yes | no |\n| ipv4 icmp messages | Enable or disable monitoring of ipv4 ICMP messages metrics (I/O messages, I/O Errors, In Checksum). | yes | no |\n| ipv4 packets | Enable or disable monitoring of ipv4 packets metrics (received, sent, forwarded, delivered). | yes | no |\n| ipv4 fragments sent | Enable or disable monitoring of IPv4 fragments sent metrics (ok, fails, creates). | yes | no |\n| ipv4 fragments assembly | Enable or disable monitoring of IPv4 fragments assembly metrics (ok, failed, all). | yes | no |\n| ipv4 errors | Enable or disable monitoring of IPv4 errors metrics (I/O discard, I/O HDR errors, In Addr errors, In Unknown protos, OUT No Routes). | yes | no |\n| ipv6 packets | Enable or disable monitoring of IPv6 packets metrics (received, sent, forwarded, delivered). | auto | no |\n| ipv6 fragments sent | Enable or disable monitoring of IPv6 fragments sent metrics (ok, failed, all). | auto | no |\n| ipv6 fragments assembly | Enable or disable monitoring of IPv6 fragments assembly metrics (ok, failed, timeout, all). | auto | no |\n| ipv6 errors | Enable or disable monitoring of IPv6 errors metrics (I/O Discards, In Hdr Errors, In Addr Errors, In Truncaedd Packets, I/O No Routes). | auto | no |\n| icmp | Enable or disable monitoring of ICMP metrics (sent, received). | auto | no |\n| icmp redirects | Enable or disable monitoring of ICMP redirects metrics (received, sent). | auto | no |\n| icmp errors | Enable or disable monitoring of ICMP metrics (I/O Errors, In Checksums, In Destination Unreachable, In Packet too big, In Time Exceeds, In Parm Problem, Out Dest Unreachable, Out Timee Exceeds, Out Parm Problems.). | auto | no |\n| icmp echos | Enable or disable monitoring of ICMP echos metrics (I/O Echos, I/O Echo Reply). | auto | no |\n| icmp router | Enable or disable monitoring of ICMP router metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp neighbor | Enable or disable monitoring of ICMP neighbor metrics (I/O Solicits, I/O Advertisements). | auto | no |\n| icmp types | Enable or disable monitoring of ICMP types metrics (I/O Type1, I/O Type128, I/O Type129, Out Type133, Out Type135, In Type136, Out Type145). | auto | no |\n| space usage for all disks | Enable or disable monitoring of space usage for all disks metrics (available, used, reserved for root). | yes | no |\n| inodes usage for all disks | Enable or disable monitoring of inodes usage for all disks metrics (available, used, reserved for root). | yes | no |\n| bandwidth | Enable or disable monitoring of bandwidth metrics (received, sent). | yes | no |\n| system uptime | Enable or disable monitoring of system uptime metrics (uptime). | yes | no |\n| cpu utilization | Enable or disable monitoring of CPU utilization metrics (user, nice, system, idel). | yes | no |\n| system ram | Enable or disable monitoring of system RAM metrics (Active, Wired, throttled, compressor, inactive, purgeable, speculative, free). | yes | no |\n| swap i/o | Enable or disable monitoring of SWAP I/O metrics (I/O Swap). | yes | no |\n| memory page faults | Enable or disable monitoring of memory page faults metrics (memory, cow, I/O page, compress, decompress, zero fill, reactivate, purge). | yes | no |\n| disk i/o | Enable or disable monitoring of disk I/O metrics (In, Out). | yes | no |\n\n#### Examples\n\n##### Disable swap monitoring.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:sysctl]\n system swap = no\n[plugin:macos:mach_smi]\n swap i/o = no\n\n```\n##### Disable complete Machine SMI section.\n\nA basic example that discards swap monitoring\n\n```yaml\n[plugin:macos:mach_smi]\n cpu utilization = no\n system ram = no\n swap i/o = no\n memory page faults = no\n disk i/o = no\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per macOS instance\n\nThese metrics refer to hardware and network monitoring.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | user, nice, system, idle | percentage |\n| system.ram | active, wired, throttled, compressor, inactive, purgeable, speculative, free | MiB |\n| mem.swapio | io, out | KiB/s |\n| mem.pgfaults | memory, cow, pagein, pageout, compress, decompress, zero_fill, reactivate, purge | faults/s |\n| system.load | load1, load5, load15 | load |\n| mem.swap | free, used | MiB |\n| system.ipv4 | received, sent | kilobits/s |\n| ipv4.tcppackets | received, sent | packets/s |\n| ipv4.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ipv4.tcphandshake | EstabResets, ActiveOpens, PassiveOpens, AttemptFails | events/s |\n| ipv4.tcpconnaborts | baddata, userclosed, nomemory, timeout | connections/s |\n| ipv4.tcpofo | inqueue | packets/s |\n| ipv4.tcpsyncookies | received, sent, failed | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InEchos, OutEchos | packets/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InHdrErrors, OutNoRoutes, InAddrErrors, InUnknownProtos | packets/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| system.uptime | uptime | seconds |\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | read, writes | KiB/s |\n| disk.ops | read, writes | operations/s |\n| disk.util | utilization | % of time working |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n\n### Per mount point\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.space | avail, used, reserved_for_root | GiB |\n| disk.inodes | avail, used, reserved_for_root | inodes |\n\n### Per network device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.packets | received, sent, multicast_received, multicast_sent | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound | drops/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "macos.plugin-mach_smi-macOS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/macos.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "nfacct.plugin", "module_name": "nfacct.plugin", "monitored_instance": {"name": "Netfilter", "link": "https://www.netfilter.org/", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "netfilter.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Netfilter\n\nPlugin: nfacct.plugin\nModule: nfacct.plugin\n\n## Overview\n\nMonitor Netfilter metrics for optimal packet filtering and manipulation. Keep tabs on packet counts, dropped packets, and error rates to secure network operations.\n\nNetdata uses libmnl (https://www.netfilter.org/projects/libmnl/index.html) to collect information.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThis plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin uses socket to connect with netfilter to collect data\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install required packages\n\nInstall `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:nfacct]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Additinal parameters for collector | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netfilter instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.netlink_new | new, ignore, invalid | connections/s |\n| netfilter.netlink_changes | insert, delete, delete_list | changes/s |\n| netfilter.netlink_search | searched, search_restart, found | searches/s |\n| netfilter.netlink_errors | icmp_error, insert_failed, drop, early_drop | events/s |\n| netfilter.netlink_expect | created, deleted, new | expectations/s |\n| netfilter.nfacct_packets | a dimension per nfacct object | packets/s |\n| netfilter.nfacct_bytes | a dimension per nfacct object | kilobytes/s |\n\n", "integration_type": "collector", "id": "nfacct.plugin-nfacct.plugin-Netfilter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/nfacct.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "perf.plugin", "module_name": "perf.plugin", "monitored_instance": {"name": "CPU performance", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux", "cpu performance", "cpu cache", "perf.plugin"], "most_popular": false}, "overview": "# CPU performance\n\nPlugin: perf.plugin\nModule: perf.plugin\n\n## Overview\n\nThis collector monitors CPU performance metrics about cycles, instructions, migrations, cache operations and more.\n\nIt uses syscall (2) to open a file descriptior to monitor the perf events.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIt needs setuid to use necessary syscall to collect perf events. Netada sets the permission during installation time.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install perf plugin\n\nIf you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.\n\n\n#### Enable the pref plugin\n\nThe plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.\n\nTo enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config netdata.conf\n```\n\nChange the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:perf]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nYou can get the available options running:\n\n```bash\n/usr/libexec/netdata/plugins.d/perf.plugin --help\n````\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| command options | Command options that specify charts shown by plugin. `cycles`, `instructions`, `branch`, `cache`, `bus`, `stalled`, `migrations`, `alignment`, `emulation`, `L1D`, `L1D-prefetch`, `L1I`, `LL`, `DTLB`, `ITLB`, `PBU`. | 1 | yes |\n\n#### Examples\n\n##### All metrics\n\nMonitor all metrics available.\n\n```yaml\n[plugin:perf]\n command options = all\n\n```\n##### CPU cycles\n\nMonitor CPU cycles.\n\n```yaml\n[plugin:perf]\n command options = cycles\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per CPU performance instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| perf.cpu_cycles | cpu, ref_cpu | cycles/s |\n| perf.instructions | instructions | instructions/s |\n| perf.instructions_per_cycle | ipc | instructions/cycle |\n| perf.branch_instructions | instructions, misses | instructions/s |\n| perf.cache | references, misses | operations/s |\n| perf.bus_cycles | bus | cycles/s |\n| perf.stalled_cycles | frontend, backend | cycles/s |\n| perf.migrations | migrations | migrations |\n| perf.alignment_faults | faults | faults |\n| perf.emulation_faults | faults | faults |\n| perf.l1d_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.l1d_cache_prefetch | prefetches | prefetches/s |\n| perf.l1i_cache | read_access, read_misses | events/s |\n| perf.ll_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.dtlb_cache | read_access, read_misses, write_access, write_misses | events/s |\n| perf.itlb_cache | read_access, read_misses | events/s |\n| perf.pbu_cache | read_access | events/s |\n\n", "integration_type": "collector", "id": "perf.plugin-perf.plugin-CPU_performance", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/perf.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/diskstats", "monitored_instance": {"name": "Disk Statistics", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["disk", "disks", "io", "bcache", "block devices"], "most_popular": false}, "overview": "# Disk Statistics\n\nPlugin: proc.plugin\nModule: /proc/diskstats\n\n## Overview\n\nDetailed statistics for each of your system's disk devices and partitions.\nThe data is reported by the kernel and can be used to monitor disk activity on a Linux system.\n\nGet valuable insight into how your disks are performing and where potential bottlenecks might be.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_disk_backlog ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.backlog | average backlog size of the ${label:device} disk over the last 10 minutes |\n| [ 10min_disk_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/disks.conf) | disk.util | average percentage of time ${label:device} disk was busy over the last 10 minutes |\n| [ bcache_cache_dirty ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_alloc | percentage of cache space used for dirty data and metadata (this usually means your SSD cache is too small) |\n| [ bcache_cache_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bcache.conf) | disk.bcache_cache_read_races | number of times data was read from the cache, the bucket was reused and invalidated in the last 10 minutes (when this occurs the data is reread from the backing device) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Disk Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.io | in, out | KiB/s |\n\n### Per disk\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| mount_point | TBD |\n| device_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| disk.io | reads, writes | KiB/s |\n| disk_ext.io | discards | KiB/s |\n| disk.ops | reads, writes | operations/s |\n| disk_ext.ops | discards, flushes | operations/s |\n| disk.qops | operations | operations |\n| disk.backlog | backlog | milliseconds |\n| disk.busy | busy | milliseconds |\n| disk.util | utilization | % of time working |\n| disk.mops | reads, writes | merged operations/s |\n| disk_ext.mops | discards | merged operations/s |\n| disk.iotime | reads, writes | milliseconds/s |\n| disk_ext.iotime | discards, flushes | milliseconds/s |\n| disk.await | reads, writes | milliseconds/operation |\n| disk_ext.await | discards, flushes | milliseconds/operation |\n| disk.avgsz | reads, writes | KiB/operation |\n| disk_ext.avgsz | discards | KiB/operation |\n| disk.svctm | svctm | milliseconds/operation |\n| disk.bcache_cache_alloc | ununsed, dirty, clean, metadata, undefined | percentage |\n| disk.bcache_hit_ratio | 5min, 1hour, 1day, ever | percentage |\n| disk.bcache_rates | congested, writeback | KiB/s |\n| disk.bcache_size | dirty | MiB |\n| disk.bcache_usage | avail | percentage |\n| disk.bcache_cache_read_races | races, errors | operations/s |\n| disk.bcache | hits, misses, collisions, readaheads | operations/s |\n| disk.bcache_bypass | hits, misses | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/diskstats-Disk_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/interrupts", "monitored_instance": {"name": "Interrupts", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["interrupts"], "most_popular": false}, "overview": "# Interrupts\n\nPlugin: proc.plugin\nModule: /proc/interrupts\n\n## Overview\n\nMonitors `/proc/interrupts`, a file organized by CPU and then by the type of interrupt.\nThe numbers reported are the counts of the interrupts that have occurred of each type.\n\nAn interrupt is a signal to the processor emitted by hardware or software indicating an event that needs\nimmediate attention. The processor then interrupts its current activities and executes the interrupt handler\nto deal with the event. This is part of the way a computer multitasks and handles concurrent processing.\n\nThe types of interrupts include:\n\n- **I/O interrupts**: These are caused by I/O devices like the keyboard, mouse, printer, etc. For example, when\n you type something on the keyboard, an interrupt is triggered so the processor can handle the new input.\n\n- **Timer interrupts**: These are generated at regular intervals by the system's timer circuit. It's primarily\n used to switch the CPU among different tasks.\n\n- **Software interrupts**: These are generated by a program requiring disk I/O operations, or other system resources.\n\n- **Hardware interrupts**: These are caused by hardware conditions such as power failure, overheating, etc.\n\nMonitoring `/proc/interrupts` can be used for:\n\n- **Performance tuning**: If an interrupt is happening very frequently, it could be a sign that a device is not\n configured correctly, or there is a software bug causing unnecessary interrupts. This could lead to system\n performance degradation.\n\n- **System troubleshooting**: If you're seeing a lot of unexpected interrupts, it could be a sign of a hardware problem.\n\n- **Understanding system behavior**: More generally, keeping an eye on what interrupts are occurring can help you\n understand what your system is doing. It can provide insights into the system's interaction with hardware,\n drivers, and other parts of the kernel.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Interrupts instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.interrupts | a dimension per device | interrupts/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.interrupts | a dimension per device | interrupts/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/interrupts-Interrupts", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/loadavg", "monitored_instance": {"name": "System Load Average", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["load", "load average"], "most_popular": false}, "overview": "# System Load Average\n\nPlugin: proc.plugin\nModule: /proc/loadavg\n\n## Overview\n\nThe `/proc/loadavg` file provides information about the system load average.\n\nThe load average is a measure of the amount of computational work that a system performs. It is a\nrepresentation of the average system load over a period of time.\n\nThis file contains three numbers representing the system load averages for the last 1, 5, and 15 minutes,\nrespectively. It also includes the currently running processes and the total number of processes.\n\nMonitoring the load average can be used for:\n\n- **System performance**: If the load average is too high, it may indicate that your system is overloaded.\n On a system with a single CPU, if the load average is 1, it means the single CPU is fully utilized. If the\n load averages are consistently higher than the number of CPUs/cores, it may indicate that your system is\n overloaded and tasks are waiting for CPU time.\n\n- **Troubleshooting**: If the load average is unexpectedly high, it can be a sign of a problem. This could be\n due to a runaway process, a software bug, or a hardware issue.\n\n- **Capacity planning**: By monitoring the load average over time, you can understand the trends in your\n system's workload. This can help with capacity planning and scaling decisions.\n\nRemember that load average not only considers CPU usage, but also includes processes waiting for disk I/O.\nTherefore, high load averages could be due to I/O contention as well as CPU contention.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ load_cpu_number ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | number of active CPU cores in the system |\n| [ load_average_15 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system fifteen-minute load average |\n| [ load_average_5 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system five-minute load average |\n| [ load_average_1 ](https://github.com/netdata/netdata/blob/master/src/health/health.d/load.conf) | system.load | system one-minute load average |\n| [ active_processes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/processes.conf) | system.active_processes | system process IDs (PID) space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Load Average instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.load | load1, load5, load15 | load |\n| system.active_processes | active | processes |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/loadavg-System_Load_Average", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/mdstat", "monitored_instance": {"name": "MD RAID", "link": "", "categories": ["data-collection.linux-systems.disk-metrics"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["raid", "mdadm", "mdstat", "raid"], "most_popular": false}, "overview": "# MD RAID\n\nPlugin: proc.plugin\nModule: /proc/mdstat\n\n## Overview\n\nThis integration monitors the status of MD RAID devices.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ mdstat_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of seconds since the last successful data collection |\n| [ mdstat_disks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.disks | number of devices in the down state for the ${label:device} ${label:raid_level} array. Any number > 0 indicates that the array is degraded. |\n| [ mdstat_mismatch_cnt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.mismatch_cnt | number of unsynchronized blocks for the ${label:device} ${label:raid_level} array |\n| [ mdstat_nonredundant_last_collected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mdstat.conf) | md.nonredundant | number of seconds since the last successful data collection |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MD RAID instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.health | a dimension per md array | failed disks |\n\n### Per md array\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n| raid_level | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| md.disks | inuse, down | disks |\n| md.mismatch_cnt | count | unsynchronized blocks |\n| md.status | check, resync, recovery, reshape | percent |\n| md.expected_time_until_operation_finish | finish_in | seconds |\n| md.operation_speed | speed | KiB/s |\n| md.nonredundant | available | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/mdstat-MD_RAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/meminfo", "monitored_instance": {"name": "Memory Usage", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory", "ram", "available", "committed"], "most_popular": false}, "overview": "# Memory Usage\n\nPlugin: proc.plugin\nModule: /proc/meminfo\n\n## Overview\n\n`/proc/meminfo` provides detailed information about the system's current memory usage. It includes information\nabout different types of memory, RAM, Swap, ZSwap, HugePages, Transparent HugePages (THP), Kernel memory,\nSLAB memory, memory mappings, and more.\n\nMonitoring /proc/meminfo can be useful for:\n\n- **Performance Tuning**: Understanding your system's memory usage can help you make decisions about system\n tuning and optimization. For example, if your system is frequently low on free memory, it might benefit\n from more RAM.\n\n- **Troubleshooting**: If your system is experiencing problems, `/proc/meminfo` can provide clues about\n whether memory usage is a factor. For example, if your system is slow and cached swap is high, it could\n mean that your system is swapping out a lot of memory to disk, which can degrade performance.\n\n- **Capacity Planning**: By monitoring memory usage over time, you can understand trends and make informed\n decisions about future capacity needs.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | system.ram | system memory utilization |\n| [ ram_available ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.available | percentage of estimated amount of RAM available for userspace processes, without causing swapping |\n| [ used_swap ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swap | swap memory utilization |\n| [ 1hour_memory_hw_corrupted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.hwcorrupt | amount of memory corrupted due to a hardware failure |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Usage instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ram | free, used, cached, buffers | MiB |\n| mem.available | avail | MiB |\n| mem.swap | free, used | MiB |\n| mem.swap_cached | cached | MiB |\n| mem.zswap | in-ram, on-disk | MiB |\n| mem.hwcorrupt | HardwareCorrupted | MiB |\n| mem.commited | Commited_AS | MiB |\n| mem.writeback | Dirty, Writeback, FuseWriteback, NfsWriteback, Bounce | MiB |\n| mem.kernel | Slab, KernelStack, PageTables, VmallocUsed, Percpu | MiB |\n| mem.slab | reclaimable, unreclaimable | MiB |\n| mem.hugepages | free, used, surplus, reserved | MiB |\n| mem.thp | anonymous, shmem | MiB |\n| mem.thp_details | ShmemPmdMapped, FileHugePages, FilePmdMapped | MiB |\n| mem.reclaiming | Active, Inactive, Active(anon), Inactive(anon), Active(file), Inactive(file), Unevictable, Mlocked | MiB |\n| mem.high_low | high_used, low_used, high_free, low_free | MiB |\n| mem.cma | used, free | MiB |\n| mem.directmaps | 4k, 2m, 4m, 1g | MiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/meminfo-Memory_Usage", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/dev", "monitored_instance": {"name": "Network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["network interfaces"], "most_popular": false}, "overview": "# Network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/dev\n\n## Overview\n\nMonitor network interface metrics about bandwidth, state, errors and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ interface_speed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | network interface ${label:device} current speed |\n| [ 1m_received_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average inbound utilization for the network interface ${label:device} over the last minute |\n| [ 1m_sent_traffic_overflow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.net | average outbound utilization for the network interface ${label:device} over the last minute |\n| [ inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_inbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ wifi_outbound_packets_dropped_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.drops | ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes |\n| [ 1m_received_packets_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | average number of packets received by the network interface ${label:device} over the last minute |\n| [ 10s_received_packets_storm ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.packets | ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, compared to the rate over the last minute |\n| [ 10min_fifo_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/net.conf) | net.fifo | number of FIFO errors for the network interface ${label:device} in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network interfaces instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.net | received, sent | kilobits/s |\n\n### Per network device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| interface_type | TBD |\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| net.net | received, sent | kilobits/s |\n| net.speed | speed | kilobits/s |\n| net.duplex | full, half, unknown | state |\n| net.operstate | up, down, notpresent, lowerlayerdown, testing, dormant, unknown | state |\n| net.carrier | up, down | state |\n| net.mtu | mtu | octets |\n| net.packets | received, sent, multicast | packets/s |\n| net.errors | inbound, outbound | errors/s |\n| net.drops | inbound, outbound | drops/s |\n| net.fifo | receive, transmit | errors |\n| net.compressed | received, sent | packets/s |\n| net.events | frames, collisions, carrier | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/dev-Network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/ip_vs_stats", "monitored_instance": {"name": "IP Virtual Server", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip virtual server"], "most_popular": false}, "overview": "# IP Virtual Server\n\nPlugin: proc.plugin\nModule: /proc/net/ip_vs_stats\n\n## Overview\n\nThis integration monitors IP Virtual Server statistics\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IP Virtual Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipvs.sockets | connections | connections/s |\n| ipvs.packets | received, sent | packets/s |\n| ipvs.net | received, sent | kilobits/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/ip_vs_stats-IP_Virtual_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/netstat", "monitored_instance": {"name": "Network statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ip", "udp", "udplite", "icmp", "netstat", "snmp"], "most_popular": false}, "overview": "# Network statistics\n\nPlugin: proc.plugin\nModule: /proc/net/netstat\n\n## Overview\n\nThis integration provides metrics from the `netstat`, `snmp` and `snmp6` modules.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1m_tcp_syn_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of SYN requests was dropped due to the full TCP SYN queue over the last minute (SYN cookies were not enabled) |\n| [ 1m_tcp_syn_queue_cookies ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_syn_queue | average number of sent SYN cookies due to the full TCP SYN queue over the last minute |\n| [ 1m_tcp_accept_queue_overflows ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of overflows in the TCP accept queue over the last minute |\n| [ 1m_tcp_accept_queue_drops ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_listen.conf) | ip.tcp_accept_queue | average number of dropped packets in the TCP accept queue over the last minute |\n| [ tcp_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_conn.conf) | ip.tcpsock | TCP connections utilization |\n| [ 1m_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_sent ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of sent TCP RESETS over the last 10 seconds. This can indicate a port scan, or that a service running on this host has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last minute |\n| [ 10s_ip_tcp_resets_received ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_resets.conf) | ip.tcphandshake | average number of received TCP RESETS over the last 10 seconds. This can be an indication that a service this host needs has crashed. Netdata will not send a clear notification for this alarm. |\n| [ 1m_ipv4_udp_receive_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP receive buffer errors over the last minute |\n| [ 1m_ipv4_udp_send_buffer_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/udp_errors.conf) | ipv4.udperrors | average number of UDP send buffer errors over the last minute |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Network statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ip | received, sent | kilobits/s |\n| ip.tcpmemorypressures | pressures | events/s |\n| ip.tcpconnaborts | baddata, userclosed, nomemory, timeout, linger, failed | connections/s |\n| ip.tcpreorders | timestamp, sack, fack, reno | packets/s |\n| ip.tcpofo | inqueue, dropped, merged, pruned | packets/s |\n| ip.tcpsyncookies | received, sent, failed | packets/s |\n| ip.tcp_syn_queue | drops, cookies | packets/s |\n| ip.tcp_accept_queue | overflows, drops | packets/s |\n| ip.tcpsock | connections | active connections |\n| ip.tcppackets | received, sent | packets/s |\n| ip.tcperrors | InErrs, InCsumErrors, RetransSegs | packets/s |\n| ip.tcpopens | active, passive | connections/s |\n| ip.tcphandshake | EstabResets, OutRsts, AttemptFails, SynRetrans | events/s |\n| ipv4.packets | received, sent, forwarded, delivered | packets/s |\n| ipv4.errors | InDiscards, OutDiscards, InNoRoutes, OutNoRoutes, InHdrErrors, InAddrErrors, InTruncatedPkts, InCsumErrors | packets/s |\n| ipc4.bcast | received, sent | kilobits/s |\n| ipv4.bcastpkts | received, sent | packets/s |\n| ipv4.mcast | received, sent | kilobits/s |\n| ipv4.mcastpkts | received, sent | packets/s |\n| ipv4.icmp | received, sent | packets/s |\n| ipv4.icmpmsg | InEchoReps, OutEchoReps, InDestUnreachs, OutDestUnreachs, InRedirects, OutRedirects, InEchos, OutEchos, InRouterAdvert, OutRouterAdvert, InRouterSelect, OutRouterSelect, InTimeExcds, OutTimeExcds, InParmProbs, OutParmProbs, InTimestamps, OutTimestamps, InTimestampReps, OutTimestampReps | packets/s |\n| ipv4.icmp_errors | InErrors, OutErrors, InCsumErrors | packets/s |\n| ipv4.udppackets | received, sent | packets/s |\n| ipv4.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv4.udplite | received, sent | packets/s |\n| ipv4.udplite_errors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | packets/s |\n| ipv4.ecnpkts | CEP, NoECTP, ECTP0, ECTP1 | packets/s |\n| ipv4.fragsin | ok, failed, all | packets/s |\n| ipv4.fragsout | ok, failed, created | packets/s |\n| system.ipv6 | received, sent | kilobits/s |\n| ipv6.packets | received, sent, forwarded, delivers | packets/s |\n| ipv6.errors | InDiscards, OutDiscards, InHdrErrors, InAddrErrors, InUnknownProtos, InTooBigErrors, InTruncatedPkts, InNoRoutes, OutNoRoutes | packets/s |\n| ipv6.bcast | received, sent | kilobits/s |\n| ipv6.mcast | received, sent | kilobits/s |\n| ipv6.mcastpkts | received, sent | packets/s |\n| ipv6.udppackets | received, sent | packets/s |\n| ipv6.udperrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors, IgnoredMulti | events/s |\n| ipv6.udplitepackets | received, sent | packets/s |\n| ipv6.udpliteerrors | RcvbufErrors, SndbufErrors, InErrors, NoPorts, InCsumErrors | events/s |\n| ipv6.icmp | received, sent | messages/s |\n| ipv6.icmpredir | received, sent | redirects/s |\n| ipv6.icmperrors | InErrors, OutErrors, InCsumErrors, InDestUnreachs, InPktTooBigs, InTimeExcds, InParmProblems, OutDestUnreachs, OutPktTooBigs, OutTimeExcds, OutParmProblems | errors/s |\n| ipv6.icmpechos | InEchos, OutEchos, InEchoReplies, OutEchoReplies | messages/s |\n| ipv6.groupmemb | InQueries, OutQueries, InResponses, OutResponses, InReductions, OutReductions | messages/s |\n| ipv6.icmprouter | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpneighbor | InSolicits, OutSolicits, InAdvertisements, OutAdvertisements | messages/s |\n| ipv6.icmpmldv2 | received, sent | reports/s |\n| ipv6.icmptypes | InType1, InType128, InType129, InType136, OutType1, OutType128, OutType129, OutType133, OutType135, OutType143 | messages/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.ect | InNoECTPkts, InECT1Pkts, InECT0Pkts, InCEPkts | packets/s |\n| ipv6.fragsin | ok, failed, timeout, all | packets/s |\n| ipv6.fragsout | ok, failed, all | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/netstat-Network_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfs", "monitored_instance": {"name": "NFS Client", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs client", "filesystem"], "most_popular": false}, "overview": "# NFS Client\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfs\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Client.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Client instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfs.net | udp, tcp | operations/s |\n| nfs.rpc | calls, retransmits, auth_refresh | calls/s |\n| nfs.proc2 | a dimension per proc2 call | calls/s |\n| nfs.proc3 | a dimension per proc3 call | calls/s |\n| nfs.proc4 | a dimension per proc4 call | calls/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfs-NFS_Client", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/rpc/nfsd", "monitored_instance": {"name": "NFS Server", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.nfs"], "icon_filename": "nfs.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nfs server", "filesystem"], "most_popular": false}, "overview": "# NFS Server\n\nPlugin: proc.plugin\nModule: /proc/net/rpc/nfsd\n\n## Overview\n\nThis integration provides statistics from the Linux kernel's NFS Server.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per NFS Server instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nfsd.readcache | hits, misses, nocache | reads/s |\n| nfsd.filehandles | stale | handles/s |\n| nfsd.io | read, write | kilobytes/s |\n| nfsd.threads | threads | threads |\n| nfsd.net | udp, tcp | packets/s |\n| nfsd.rpc | calls, bad_format, bad_auth | calls/s |\n| nfsd.proc2 | a dimension per proc2 call | calls/s |\n| nfsd.proc3 | a dimension per proc3 call | calls/s |\n| nfsd.proc4 | a dimension per proc4 call | calls/s |\n| nfsd.proc4ops | a dimension per proc4 operation | operations/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/rpc/nfsd-NFS_Server", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sctp/snmp", "monitored_instance": {"name": "SCTP Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sctp", "stream control transmission protocol"], "most_popular": false}, "overview": "# SCTP Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sctp/snmp\n\n## Overview\n\nThis integration provides statistics about the Stream Control Transmission Protocol (SCTP).\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SCTP Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sctp.established | established | associations |\n| sctp.transitions | active, passive, aborted, shutdown | transitions/s |\n| sctp.packets | received, sent | packets/s |\n| sctp.packet_errors | invalid, checksum | packets/s |\n| sctp.fragmentation | reassembled, fragmented | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sctp/snmp-SCTP_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat", "monitored_instance": {"name": "Socket statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sockets"], "most_popular": false}, "overview": "# Socket statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat\n\n## Overview\n\nThis integration provides socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ tcp_orphans ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_orphans.conf) | ipv4.sockstat_tcp_sockets | orphan IPv4 TCP sockets utilization |\n| [ tcp_memory ](https://github.com/netdata/netdata/blob/master/src/health/health.d/tcp_mem.conf) | ipv4.sockstat_tcp_mem | TCP memory utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Socket statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ip.sockstat_sockets | used | sockets |\n| ipv4.sockstat_tcp_sockets | alloc, orphan, inuse, timewait | sockets |\n| ipv4.sockstat_tcp_mem | mem | KiB |\n| ipv4.sockstat_udp_sockets | inuse | sockets |\n| ipv4.sockstat_udp_mem | mem | sockets |\n| ipv4.sockstat_udplite_sockets | inuse | sockets |\n| ipv4.sockstat_raw_sockets | inuse | sockets |\n| ipv4.sockstat_frag_sockets | inuse | fragments |\n| ipv4.sockstat_frag_mem | mem | KiB |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat-Socket_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/sockstat6", "monitored_instance": {"name": "IPv6 Socket Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipv6 sockets"], "most_popular": false}, "overview": "# IPv6 Socket Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/sockstat6\n\n## Overview\n\nThis integration provides IPv6 socket statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPv6 Socket Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipv6.sockstat6_tcp_sockets | inuse | sockets |\n| ipv6.sockstat6_udp_sockets | inuse | sockets |\n| ipv6.sockstat6_udplite_sockets | inuse | sockets |\n| ipv6.sockstat6_raw_sockets | inuse | sockets |\n| ipv6.sockstat6_frag_sockets | inuse | fragments |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/sockstat6-IPv6_Socket_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/softnet_stat", "monitored_instance": {"name": "Softnet Statistics", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softnet"], "most_popular": false}, "overview": "# Softnet Statistics\n\nPlugin: proc.plugin\nModule: /proc/net/softnet_stat\n\n## Overview\n\n`/proc/net/softnet_stat` provides statistics that relate to the handling of network packets by softirq.\n\nIt provides information about:\n\n- Total number of processed packets (`processed`).\n- Times ksoftirq ran out of quota (`dropped`).\n- Times net_rx_action was rescheduled.\n- Number of times processed all lists before quota.\n- Number of times did not process all lists due to quota.\n- Number of times net_rx_action was rescheduled for GRO (Generic Receive Offload) cells.\n- Number of times GRO cells were processed.\n\nMonitoring the /proc/net/softnet_stat file can be useful for:\n\n- **Network performance monitoring**: By tracking the total number of processed packets and how many packets\n were dropped, you can gain insights into your system's network performance.\n\n- **Troubleshooting**: If you're experiencing network-related issues, this collector can provide valuable clues.\n For instance, a high number of dropped packets may indicate a network problem.\n\n- **Capacity planning**: If your system is consistently processing near its maximum capacity of network\n packets, it might be time to consider upgrading your network infrastructure.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 1min_netdev_backlog_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of dropped packets in the last minute due to exceeded net.core.netdev_max_backlog |\n| [ 1min_netdev_budget_ran_outs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/softnet.conf) | system.softnet_stat | average number of times ksoftirq ran out of sysctl net.core.netdev_budget or net.core.netdev_budget_usecs with work remaining over the last minute (this can be a cause for dropped packets) |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Softnet Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n### Per cpu core\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softnet_stat | processed, dropped, squeezed, received_rps, flow_limit_count | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/softnet_stat-Softnet_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/nf_conntrack", "monitored_instance": {"name": "Conntrack", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["connection tracking mechanism", "netfilter", "conntrack"], "most_popular": false}, "overview": "# Conntrack\n\nPlugin: proc.plugin\nModule: /proc/net/stat/nf_conntrack\n\n## Overview\n\nThis integration monitors the connection tracking mechanism of Netfilter in the Linux Kernel.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ netfilter_conntrack_full ](https://github.com/netdata/netdata/blob/master/src/health/health.d/netfilter.conf) | netfilter.conntrack_sockets | netfilter connection tracker table size utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Conntrack instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.conntrack_sockets | connections | active connections |\n| netfilter.conntrack_new | new, ignore, invalid | connections/s |\n| netfilter.conntrack_changes | inserted, deleted, delete_list | changes/s |\n| netfilter.conntrack_expect | created, deleted, new | expectations/s |\n| netfilter.conntrack_search | searched, restarted, found | searches/s |\n| netfilter.conntrack_errors | icmp_error, error_failed, drop, early_drop | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/nf_conntrack-Conntrack", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/stat/synproxy", "monitored_instance": {"name": "Synproxy", "link": "", "categories": ["data-collection.linux-systems.firewall-metrics"], "icon_filename": "firewall.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["synproxy"], "most_popular": false}, "overview": "# Synproxy\n\nPlugin: proc.plugin\nModule: /proc/net/stat/synproxy\n\n## Overview\n\nThis integration provides statistics about the Synproxy netfilter module.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Synproxy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| netfilter.synproxy_syn_received | received | packets/s |\n| netfilter.synproxy_conn_reopened | reopened | connections/s |\n| netfilter.synproxy_cookies | valid, invalid, retransmits | cookies/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/stat/synproxy-Synproxy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/net/wireless", "monitored_instance": {"name": "Wireless network interfaces", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["wireless devices"], "most_popular": false}, "overview": "# Wireless network interfaces\n\nPlugin: proc.plugin\nModule: /proc/net/wireless\n\n## Overview\n\nMonitor wireless devices with metrics about status, link quality, signal level, noise level and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per wireless device\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| wireless.status | status | status |\n| wireless.link_quality | link_quality | value |\n| wireless.signal_level | signal_level | dBm |\n| wireless.noise_level | noise_level | dBm |\n| wireless.discarded_packets | nwid, crypt, frag, retry, misc | packets/s |\n| wireless.missed_beacons | missed_beacons | frames/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/net/wireless-Wireless_network_interfaces", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pagetypeinfo", "monitored_instance": {"name": "Page types", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memory page types"], "most_popular": false}, "overview": "# Page types\n\nPlugin: proc.plugin\nModule: /proc/pagetypeinfo\n\n## Overview\n\nThis integration provides metrics about the system's memory page types\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Page types instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype_global | a dimension per pagesize | B |\n\n### Per node, zone, type\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| node_id | TBD |\n| node_zone | TBD |\n| node_type | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.pagetype | a dimension per pagesize | B |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pagetypeinfo-Page_types", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/pressure", "monitored_instance": {"name": "Pressure Stall Information", "link": "", "categories": ["data-collection.linux-systems.pressure-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pressure"], "most_popular": false}, "overview": "# Pressure Stall Information\n\nPlugin: proc.plugin\nModule: /proc/pressure\n\n## Overview\n\nIntroduced in Linux kernel 4.20, `/proc/pressure` provides information about system pressure stall information\n(PSI). PSI is a feature that allows the system to track the amount of time the system is stalled due to\nresource contention, such as CPU, memory, or I/O.\n\nThe collectors monitored 3 separate files for CPU, memory, and I/O:\n\n- **cpu**: Tracks the amount of time tasks are stalled due to CPU contention.\n- **memory**: Tracks the amount of time tasks are stalled due to memory contention.\n- **io**: Tracks the amount of time tasks are stalled due to I/O contention.\n- **irq**: Tracks the amount of time tasks are stalled due to IRQ contention.\n\nEach of them provides metrics for stall time over the last 10 seconds, 1 minute, 5 minutes, and 15 minutes.\n\nMonitoring the /proc/pressure files can provide important insights into system performance and capacity planning:\n\n- **Identifying resource contention**: If these metrics are consistently high, it indicates that tasks are\n frequently being stalled due to lack of resources, which can significantly degrade system performance.\n\n- **Troubleshooting performance issues**: If a system is experiencing performance issues, these metrics can\n help identify whether resource contention is the cause.\n\n- **Capacity planning**: By monitoring these metrics over time, you can understand trends in resource\n utilization and make informed decisions about when to add more resources to your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Pressure Stall Information instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu_some_pressure | some10, some60, some300 | percentage |\n| system.cpu_some_pressure_stall_time | time | ms |\n| system.cpu_full_pressure | some10, some60, some300 | percentage |\n| system.cpu_full_pressure_stall_time | time | ms |\n| system.memory_some_pressure | some10, some60, some300 | percentage |\n| system.memory_some_pressure_stall_time | time | ms |\n| system.memory_full_pressure | some10, some60, some300 | percentage |\n| system.memory_full_pressure_stall_time | time | ms |\n| system.io_some_pressure | some10, some60, some300 | percentage |\n| system.io_some_pressure_stall_time | time | ms |\n| system.io_full_pressure | some10, some60, some300 | percentage |\n| system.io_full_pressure_stall_time | time | ms |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/pressure-Pressure_Stall_Information", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/softirqs", "monitored_instance": {"name": "SoftIRQ statistics", "link": "", "categories": ["data-collection.linux-systems.cpu-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["softirqs", "interrupts"], "most_popular": false}, "overview": "# SoftIRQ statistics\n\nPlugin: proc.plugin\nModule: /proc/softirqs\n\n## Overview\n\nIn the Linux kernel, handling of hardware interrupts is split into two halves: the top half and the bottom half.\nThe top half is the routine that responds immediately to an interrupt, while the bottom half is deferred to be processed later.\n\nSoftirqs are a mechanism in the Linux kernel used to handle the bottom halves of interrupts, which can be\ndeferred and processed later in a context where it's safe to enable interrupts.\n\nThe actual work of handling the interrupt is offloaded to a softirq and executed later when the system\ndecides it's a good time to process them. This helps to keep the system responsive by not blocking the top\nhalf for too long, which could lead to missed interrupts.\n\nMonitoring `/proc/softirqs` is useful for:\n\n- **Performance tuning**: A high rate of softirqs could indicate a performance issue. For instance, a high\n rate of network softirqs (`NET_RX` and `NET_TX`) could indicate a network performance issue.\n\n- **Troubleshooting**: If a system is behaving unexpectedly, checking the softirqs could provide clues about\n what is going on. For example, a sudden increase in block device softirqs (BLOCK) might indicate a problem\n with a disk.\n\n- **Understanding system behavior**: Knowing what types of softirqs are happening can help you understand what\n your system is doing, particularly in terms of how it's interacting with hardware and how it's handling\n interrupts.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SoftIRQ statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.softirqs | a dimension per softirq | softirqs/s |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.softirqs | a dimension per softirq | softirqs/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/softirqs-SoftIRQ_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs", "monitored_instance": {"name": "ZFS Pools", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs pools", "pools", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Pools\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs\n\n## Overview\n\nThis integration provides metrics about the state of ZFS pools.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_pool_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is degraded |\n| [ zfs_pool_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.state | ZFS pool ${label:pool} state is faulted or unavail |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zfs pool\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| pool | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfspool.state | online, degraded, faulted, offline, removed, unavail, suspended | boolean |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs-ZFS_Pools", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/spl/kstat/zfs/arcstats", "monitored_instance": {"name": "ZFS Adaptive Replacement Cache", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.zfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zfs arc", "arc", "zfs", "filesystem"], "most_popular": false}, "overview": "# ZFS Adaptive Replacement Cache\n\nPlugin: proc.plugin\nModule: /proc/spl/kstat/zfs/arcstats\n\n## Overview\n\nThis integration monitors ZFS Adadptive Replacement Cache (ARC) statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ zfs_memory_throttle ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfs.memory_ops | number of times ZFS had to limit the ARC growth in the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ZFS Adaptive Replacement Cache instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zfs.arc_size | arcsz, target, min, max | MiB |\n| zfs.l2_size | actual, size | MiB |\n| zfs.reads | arc, demand, prefetch, metadata, l2 | reads/s |\n| zfs.bytes | read, write | KiB/s |\n| zfs.hits | hits, misses | percentage |\n| zfs.hits_rate | hits, misses | events/s |\n| zfs.dhits | hits, misses | percentage |\n| zfs.dhits_rate | hits, misses | events/s |\n| zfs.phits | hits, misses | percentage |\n| zfs.phits_rate | hits, misses | events/s |\n| zfs.mhits | hits, misses | percentage |\n| zfs.mhits_rate | hits, misses | events/s |\n| zfs.l2hits | hits, misses | percentage |\n| zfs.l2hits_rate | hits, misses | events/s |\n| zfs.list_hits | mfu, mfu_ghost, mru, mru_ghost | hits/s |\n| zfs.arc_size_breakdown | recent, frequent | percentage |\n| zfs.memory_ops | direct, throttled, indirect | operations/s |\n| zfs.important_ops | evict_skip, deleted, mutex_miss, hash_collisions | operations/s |\n| zfs.actual_hits | hits, misses | percentage |\n| zfs.actual_hits_rate | hits, misses | events/s |\n| zfs.demand_data_hits | hits, misses | percentage |\n| zfs.demand_data_hits_rate | hits, misses | events/s |\n| zfs.prefetch_data_hits | hits, misses | percentage |\n| zfs.prefetch_data_hits_rate | hits, misses | events/s |\n| zfs.hash_elements | current, max | elements |\n| zfs.hash_chains | current, max | chains |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/spl/kstat/zfs/arcstats-ZFS_Adaptive_Replacement_Cache", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/stat", "monitored_instance": {"name": "System statistics", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["cpu utilization", "process counts"], "most_popular": false}, "overview": "# System statistics\n\nPlugin: proc.plugin\nModule: /proc/stat\n\n## Overview\n\nCPU utilization, states and frequencies and key Linux system performance metrics.\n\nThe `/proc/stat` file provides various types of system statistics:\n\n- The overall system CPU usage statistics\n- Per CPU core statistics\n- The total context switching of the system\n- The total number of processes running\n- The total CPU interrupts\n- The total CPU softirqs\n\nThe collector also reads:\n\n- `/proc/schedstat` for statistics about the process scheduler in the Linux kernel.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/core_throttle_count` to get the count of thermal throttling events for a specific CPU core on Linux systems.\n- `/sys/devices/system/cpu/[X]/thermal_throttle/package_throttle_count` to get the count of thermal throttling events for a specific CPU package on a Linux system.\n- `/sys/devices/system/cpu/[X]/cpufreq/scaling_cur_freq` to get the current operating frequency of a specific CPU core.\n- `/sys/devices/system/cpu/[X]/cpufreq/stats/time_in_state` to get the amount of time the CPU has spent in each of its available frequency states.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/name` to get the names of the idle states for each CPU core in a Linux system.\n- `/sys/devices/system/cpu/[X]/cpuidle/state[X]/time` to get the total time each specific CPU core has spent in each idle state since the system was started.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector auto-detects all metrics. No configuration is needed.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe collector disables cpu frequency and idle state monitoring when there are more than 128 CPU cores available.\n\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `plugin:proc:/proc/stat` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU utilization over the last 10 minutes (excluding iowait, nice and steal) |\n| [ 10min_cpu_iowait ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU iowait time over the last 10 minutes |\n| [ 20min_steal_cpu ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cpu.conf) | system.cpu | average CPU steal time over the last 20 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| system.intr | interrupts | interrupts/s |\n| system.ctxt | switches | context switches/s |\n| system.forks | started | processes/s |\n| system.processes | running, blocked | processes |\n| cpu.core_throttling | a dimension per cpu core | events/s |\n| cpu.package_throttling | a dimension per package | events/s |\n| cpu.cpufreq | a dimension per cpu core | MHz |\n\n### Per cpu core\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| cpu | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| cpu.cpu | guest_nice, guest, steal, softirq, irq, user, system, nice, iowait, idle | percentage |\n| cpuidle.cpu_cstate_residency_time | a dimension per c-state | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/stat-System_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/sys/kernel/random/entropy_avail", "monitored_instance": {"name": "Entropy", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["entropy"], "most_popular": false}, "overview": "# Entropy\n\nPlugin: proc.plugin\nModule: /proc/sys/kernel/random/entropy_avail\n\n## Overview\n\nEntropy, a measure of the randomness or unpredictability of data.\n\nIn the context of cryptography, entropy is used to generate random numbers or keys that are essential for\nsecure communication and encryption. Without a good source of entropy, cryptographic protocols can become\nvulnerable to attacks that exploit the predictability of the generated keys.\n\nIn most operating systems, entropy is generated by collecting random events from various sources, such as\nhardware interrupts, mouse movements, keyboard presses, and disk activity. These events are fed into a pool\nof entropy, which is then used to generate random numbers when needed.\n\nThe `/dev/random` device in Linux is one such source of entropy, and it provides an interface for programs\nto access the pool of entropy. When a program requests random numbers, it reads from the `/dev/random` device,\nwhich blocks until enough entropy is available to generate the requested numbers. This ensures that the\ngenerated numbers are truly random and not predictable. \n\nHowever, if the pool of entropy gets depleted, the `/dev/random` device may block indefinitely, causing\nprograms that rely on random numbers to slow down or even freeze. This is especially problematic for\ncryptographic protocols that require a continuous stream of random numbers, such as SSL/TLS and SSH.\n\nTo avoid this issue, some systems use a hardware random number generator (RNG) to generate high-quality\nentropy. A hardware RNG generates random numbers by measuring physical phenomena, such as thermal noise or\nradioactive decay. These sources of randomness are considered to be more reliable and unpredictable than\nsoftware-based sources.\n\nOne such hardware RNG is the Trusted Platform Module (TPM), which is a dedicated hardware chip that is used\nfor cryptographic operations and secure boot. The TPM contains a built-in hardware RNG that generates\nhigh-quality entropy, which can be used to seed the pool of entropy in the operating system.\n\nAlternatively, software-based solutions such as `Haveged` can be used to generate additional entropy by\nexploiting sources of randomness in the system, such as CPU utilization and network traffic. These solutions\ncan help to mitigate the risk of entropy depletion, but they may not be as reliable as hardware-based solutions.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ lowest_entropy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/entropy.conf) | system.entropy | minimum number of bits of entropy available for the kernel\u2019s random number generator |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Entropy instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.entropy | entropy | entropy |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/sys/kernel/random/entropy_avail-Entropy", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/uptime", "monitored_instance": {"name": "System Uptime", "link": "", "categories": ["data-collection.linux-systems.system-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["uptime"], "most_popular": false}, "overview": "# System Uptime\n\nPlugin: proc.plugin\nModule: /proc/uptime\n\n## Overview\n\nThe amount of time the system has been up (running).\n\nUptime is a critical aspect of overall system performance:\n\n- **Availability**: Uptime monitoring can show whether a server is consistently available or experiences frequent downtimes.\n- **Performance Monitoring**: While server uptime alone doesn't provide detailed performance data, analyzing the duration and frequency of downtimes can help identify patterns or trends.\n- **Proactive problem detection**: If server uptime monitoring reveals unexpected downtimes or a decreasing uptime trend, it can serve as an early warning sign of potential problems.\n- **Root cause analysis**: When investigating server downtime, the uptime metric alone may not provide enough information to pinpoint the exact cause.\n- **Load balancing**: Uptime data can indirectly indicate load balancing issues if certain servers have significantly lower uptimes than others.\n- **Optimize maintenance efforts**: Servers with consistently low uptimes or frequent downtimes may require more attention.\n- **Compliance requirements**: Server uptime data can be used to demonstrate compliance with regulatory requirements or SLAs that mandate a minimum level of server availability.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per System Uptime instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.uptime | uptime | seconds |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/uptime-System_Uptime", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/proc/vmstat", "monitored_instance": {"name": "Memory Statistics", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["swap", "page faults", "oom", "numa"], "most_popular": false}, "overview": "# Memory Statistics\n\nPlugin: proc.plugin\nModule: /proc/vmstat\n\n## Overview\n\nLinux Virtual memory subsystem.\n\nInformation about memory management, indicating how effectively the kernel allocates and frees\nmemory resources in response to system demands.\n\nMonitors page faults, which occur when a process requests a portion of its memory that isn't\nimmediately available. Monitoring these events can help diagnose inefficiencies in memory management and\nprovide insights into application behavior.\n\nTracks swapping activity \u2014 a vital aspect of memory management where the kernel moves data from RAM to\nswap space, and vice versa, based on memory demand and usage. It also monitors the utilization of zswap,\na compressed cache for swap pages, and provides insights into its usage and performance implications.\n\nIn the context of virtualized environments, it tracks the ballooning mechanism which is used to balance\nmemory resources between host and guest systems.\n\nFor systems using NUMA architecture, it provides insights into the local and remote memory accesses, which\ncan impact the performance based on the memory access times.\n\nThe collector also watches for 'Out of Memory' kills, a drastic measure taken by the system when it runs out\nof memory resources.\n\n\n\n\nThis collector is only supported on the following platforms:\n\n- linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ 30min_ram_swapped_out ](https://github.com/netdata/netdata/blob/master/src/health/health.d/swap.conf) | mem.swapio | percentage of the system RAM swapped in the last 30 minutes |\n| [ oom_kill ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ram.conf) | mem.oom_kill | number of out of memory kills in the last 30 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memory Statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.swapio | in, out | KiB/s |\n| system.pgpgio | in, out | KiB/s |\n| system.pgfaults | minor, major | faults/s |\n| mem.balloon | inflate, deflate, migrate | KiB/s |\n| mem.zswapio | in, out | KiB/s |\n| mem.ksm_cow | swapin, write | KiB/s |\n| mem.thp_faults | alloc, fallback, fallback_charge | events/s |\n| mem.thp_file | alloc, fallback, mapped, fallback_charge | events/s |\n| mem.thp_zero | alloc, failed | events/s |\n| mem.thp_collapse | alloc, failed | events/s |\n| mem.thp_split | split, failed, split_pmd, split_deferred | events/s |\n| mem.thp_swapout | swapout, fallback | events/s |\n| mem.thp_compact | success, fail, stall | events/s |\n| mem.oom_kill | kills | kills/s |\n| mem.numa | local, foreign, interleave, other, pte_updates, huge_pte_updates, hint_faults, hint_faults_local, pages_migrated | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/proc/vmstat-Memory_Statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/block/zram", "monitored_instance": {"name": "ZRAM", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zram"], "most_popular": false}, "overview": "# ZRAM\n\nPlugin: proc.plugin\nModule: /sys/block/zram\n\n## Overview\n\nzRAM, or compressed RAM, is a block device that uses a portion of your system's RAM as a block device.\nThe data written to this block device is compressed and stored in memory.\n\nThe collectors provides information about the operation and the effectiveness of zRAM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per zram device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.zram_usage | compressed, metadata | MiB |\n| mem.zram_savings | savings, original | MiB |\n| mem.zram_ratio | ratio | ratio |\n| mem.zram_efficiency | percent | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/block/zram-ZRAM", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/drm", "monitored_instance": {"name": "AMD GPU", "link": "https://www.amd.com", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "amd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["amd", "gpu", "hardware"], "most_popular": false}, "overview": "# AMD GPU\n\nPlugin: proc.plugin\nModule: /sys/class/drm\n\n## Overview\n\nThis integration monitors AMD GPU metrics, such as utilization, clock frequency and memory usage.\n\nIt reads `/sys/class/drm` to collect metrics for every AMD GPU card instance it encounters.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per gpu\n\nThese metrics refer to the GPU.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| product_name | GPU product name (e.g. AMD RX 6600) |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| amdgpu.gpu_utilization | utilization | percentage |\n| amdgpu.gpu_mem_utilization | utilization | percentage |\n| amdgpu.gpu_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_clk_frequency | frequency | MHz |\n| amdgpu.gpu_mem_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_vis_vram_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_vis_vram_usage | free, used | bytes |\n| amdgpu.gpu_mem_gtt_usage_perc | usage | percentage |\n| amdgpu.gpu_mem_gtt_usage | free, used | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/drm-AMD_GPU", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/infiniband", "monitored_instance": {"name": "InfiniBand", "link": "", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["infiniband", "rdma"], "most_popular": false}, "overview": "# InfiniBand\n\nPlugin: proc.plugin\nModule: /sys/class/infiniband\n\n## Overview\n\nThis integration monitors InfiniBand network inteface statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per infiniband port\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ib.bytes | Received, Sent | kilobits/s |\n| ib.packets | Received, Sent, Mcast_rcvd, Mcast_sent, Ucast_rcvd, Ucast_sent | packets/s |\n| ib.errors | Pkts_malformated, Pkts_rcvd_discarded, Pkts_sent_discarded, Tick_Wait_to_send, Pkts_missed_resource, Buffer_overrun, Link_Downed, Link_recovered, Link_integrity_err, Link_minor_errors, Pkts_rcvd_with_EBP, Pkts_rcvd_discarded_by_switch, Pkts_sent_discarded_by_switch | errors/s |\n| ib.hwerrors | Duplicated_packets, Pkt_Seq_Num_gap, Ack_timer_expired, Drop_missing_buffer, Drop_out_of_sequence, NAK_sequence_rcvd, CQE_err_Req, CQE_err_Resp, CQE_Flushed_err_Req, CQE_Flushed_err_Resp, Remote_access_err_Req, Remote_access_err_Resp, Remote_invalid_req, Local_length_err_Resp, RNR_NAK_Packets, CNP_Pkts_ignored, RoCE_ICRC_Errors | errors/s |\n| ib.hwpackets | RoCEv2_Congestion_sent, RoCEv2_Congestion_rcvd, IB_Congestion_handled, ATOMIC_req_rcvd, Connection_req_rcvd, Read_req_rcvd, Write_req_rcvd, RoCE_retrans_adaptive, RoCE_retrans_timeout, RoCE_slow_restart, RoCE_slow_restart_congestion, RoCE_slow_restart_count | packets/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/infiniband-InfiniBand", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/class/power_supply", "monitored_instance": {"name": "Power Supply", "link": "", "categories": ["data-collection.linux-systems.power-supply-metrics"], "icon_filename": "powersupply.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["psu", "power supply"], "most_popular": false}, "overview": "# Power Supply\n\nPlugin: proc.plugin\nModule: /sys/class/power_supply\n\n## Overview\n\nThis integration monitors Power supply metrics, such as battery status, AC power status and more.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ linux_power_supply_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/linux_power_supply.conf) | powersupply.capacity | percentage of remaining power supply capacity |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per power device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| powersupply.capacity | capacity | percentage |\n| powersupply.charge | empty_design, empty, now, full, full_design | Ah |\n| powersupply.energy | empty_design, empty, now, full, full_design | Wh |\n| powersupply.voltage | min_design, min, now, max, max_design | V |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/class/power_supply-Power_Supply", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/edac/mc", "monitored_instance": {"name": "Memory modules (DIMMs)", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["edac", "ecc", "dimm", "ram", "hardware"], "most_popular": false}, "overview": "# Memory modules (DIMMs)\n\nPlugin: proc.plugin\nModule: /sys/devices/system/edac/mc\n\n## Overview\n\nThe Error Detection and Correction (EDAC) subsystem is detecting and reporting errors in the system's memory,\nprimarily ECC (Error-Correcting Code) memory errors.\n\nThe collector provides data for:\n\n- Per memory controller (MC): correctable and uncorrectable errors. These can be of 2 kinds:\n - errors related to a DIMM\n - errors that cannot be associated with a DIMM\n\n- Per memory DIMM: correctable and uncorrectable errors. There are 2 kinds:\n - memory controllers that can identify the physical DIMMS and report errors directly for them,\n - memory controllers that report errors for memory address ranges that can be linked to dimms.\n In this case the DIMMS reported may be more than the physical DIMMS installed.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ecc_memory_mc_noinfo_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC correctable errors (unknown DIMM slot) |\n| [ ecc_memory_mc_noinfo_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_errors | memory controller ${label:controller} ECC uncorrectable errors (unknown DIMM slot) |\n| [ ecc_memory_dimm_correctable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC correctable errors |\n| [ ecc_memory_dimm_uncorrectable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memory.conf) | mem.edac_mc_dimm_errors | DIMM ${label:dimm} controller ${label:controller} (location ${label:dimm_location}) ECC uncorrectable errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per memory controller\n\nThese metrics refer to the memory controller.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| mc_name | Memory controller type. |\n| size_mb | The amount of memory in megabytes that this memory controller manages. |\n| max_location | Last available memory slot in this memory controller. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable, correctable_noinfo, uncorrectable_noinfo | errors |\n\n### Per memory module\n\nThese metrics refer to the memory module (or rank, [depends on the memory controller](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#f5)).\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| controller | [mcX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#mcx-directories) directory name of this memory controller. |\n| dimm | [dimmX or rankX](https://www.kernel.org/doc/html/v5.0/admin-guide/ras.html#dimmx-or-rankx-directories) directory name of this memory module. |\n| dimm_dev_type | Type of DRAM device used in this memory module. For example, x1, x2, x4, x8. |\n| dimm_edac_mode | Used type of error detection and correction. For example, S4ECD4ED would mean a Chipkill with x4 DRAM. |\n| dimm_label | Label assigned to this memory module. |\n| dimm_location | Location of the memory module. |\n| dimm_mem_type | Type of the memory module. |\n| size | The amount of memory in megabytes that this memory module manages. |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.edac_mc_errors | correctable, uncorrectable | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/edac/mc-Memory_modules_(DIMMs)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/devices/system/node", "monitored_instance": {"name": "Non-Uniform Memory Access", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["numa"], "most_popular": false}, "overview": "# Non-Uniform Memory Access\n\nPlugin: proc.plugin\nModule: /sys/devices/system/node\n\n## Overview\n\nInformation about NUMA (Non-Uniform Memory Access) nodes on the system.\n\nNUMA is a method of configuring a cluster of microprocessor in a multiprocessing system so that they can\nshare memory locally, improving performance and the ability of the system to be expanded. NUMA is used in a\nsymmetric multiprocessing (SMP) system.\n\nIn a NUMA system, processors, memory, and I/O devices are grouped together into cells, also known as nodes.\nEach node has its own memory and set of I/O devices, and one or more processors. While a processor can access\nmemory in any of the nodes, it does so faster when accessing memory within its own node.\n\nThe collector provides statistics on memory allocations for processes running on the NUMA nodes, revealing the\nefficiency of memory allocations in multi-node systems.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per numa node\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| numa_node | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.numa_nodes | hit, miss, local, foreign, interleave, other | events/s |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/devices/system/node-Non-Uniform_Memory_Access", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/fs/btrfs", "monitored_instance": {"name": "BTRFS", "link": "", "categories": ["data-collection.linux-systems.filesystem-metrics.btrfs"], "icon_filename": "filesystem.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["btrfs", "filesystem"], "most_popular": false}, "overview": "# BTRFS\n\nPlugin: proc.plugin\nModule: /sys/fs/btrfs\n\n## Overview\n\nThis integration provides usage and error statistics from the BTRFS filesystem.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ btrfs_allocated ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.disk | percentage of allocated BTRFS physical disk space |\n| [ btrfs_data ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.data | utilization of BTRFS data space |\n| [ btrfs_metadata ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.metadata | utilization of BTRFS metadata space |\n| [ btrfs_system ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.system | utilization of BTRFS system space |\n| [ btrfs_device_read_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS read errors |\n| [ btrfs_device_write_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS write errors |\n| [ btrfs_device_flush_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS flush errors |\n| [ btrfs_device_corruption_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS corruption errors |\n| [ btrfs_device_generation_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/btrfs.conf) | btrfs.device_errors | number of encountered BTRFS generation errors |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per btrfs filesystem\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.disk | unallocated, data_free, data_used, meta_free, meta_used, sys_free, sys_used | MiB |\n| btrfs.data | free, used | MiB |\n| btrfs.metadata | free, used, reserved | MiB |\n| btrfs.system | free, used | MiB |\n| btrfs.commits | commits | commits |\n| btrfs.commits_perc_time | commits | percentage |\n| btrfs.commit_timings | last, max | ms |\n\n### Per btrfs device\n\n\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device_id | TBD |\n| filesystem_uuid | TBD |\n| filesystem_label | TBD |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| btrfs.device_errors | write_errs, read_errs, flush_errs, corruption_errs, generation_errs | errors |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/fs/btrfs-BTRFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "/sys/kernel/mm/ksm", "monitored_instance": {"name": "Kernel Same-Page Merging", "link": "", "categories": ["data-collection.linux-systems.memory-metrics"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ksm", "samepage", "merging"], "most_popular": false}, "overview": "# Kernel Same-Page Merging\n\nPlugin: proc.plugin\nModule: /sys/kernel/mm/ksm\n\n## Overview\n\nKernel Samepage Merging (KSM) is a memory-saving feature in Linux that enables the kernel to examine the\nmemory of different processes and identify identical pages. It then merges these identical pages into a\nsingle page that the processes share. This is particularly useful for virtualization, where multiple virtual\nmachines might be running the same operating system or applications and have many identical pages.\n\nThe collector provides information about the operation and effectiveness of KSM on your system.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Kernel Same-Page Merging instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.ksm | shared, unshared, sharing, volatile | MiB |\n| mem.ksm_savings | savings, offered | MiB |\n| mem.ksm_ratios | savings | percentage |\n\n", "integration_type": "collector", "id": "proc.plugin-/sys/kernel/mm/ksm-Kernel_Same-Page_Merging", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "proc.plugin", "module_name": "ipc", "monitored_instance": {"name": "Inter Process Communication", "link": "", "categories": ["data-collection.linux-systems.ipc-metrics"], "icon_filename": "network-wired.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ipc", "semaphores", "shared memory"], "most_popular": false}, "overview": "# Inter Process Communication\n\nPlugin: proc.plugin\nModule: ipc\n\n## Overview\n\nIPC stands for Inter-Process Communication. It is a mechanism which allows processes to communicate with each\nother and synchronize their actions.\n\nThis collector exposes information about:\n\n- Message Queues: This allows messages to be exchanged between processes. It's a more flexible method that\n allows messages to be placed onto a queue and read at a later time.\n\n- Shared Memory: This method allows for the fastest form of IPC because processes can exchange data by\n reading/writing into shared memory segments.\n\n- Semaphores: They are used to synchronize the operations performed by independent processes. So, if multiple\n processes are trying to access a single shared resource, semaphores can ensure that only one process\n accesses the resource at a given time.\n\n\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\n\n\nThere are no configuration options.\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ semaphores_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphores | IPC semaphore utilization |\n| [ semaphore_arrays_used ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipc.conf) | system.ipc_semaphore_arrays | IPC semaphore arrays utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Inter Process Communication instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.ipc_semaphores | semaphores | semaphores |\n| system.ipc_semaphore_arrays | arrays | arrays |\n| system.message_queue_message | a dimension per queue | messages |\n| system.message_queue_bytes | a dimension per queue | bytes |\n| system.shared_memory_segments | segments | segments |\n| system.shared_memory_bytes | bytes | bytes |\n\n", "integration_type": "collector", "id": "proc.plugin-ipc-Inter_Process_Communication", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/proc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "adaptec_raid", "monitored_instance": {"name": "AdaptecRAID", "link": "https://www.microchip.com/en-us/products/storage", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "adaptec.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# AdaptecRAID\n\nPlugin: python.d.plugin\nModule: adaptec_raid\n\n## Overview\n\nThis collector monitors Adaptec RAID hardware storage controller metrics about both physical and logical drives.\n\n\nIt uses the arcconf command line utility (from adaptec) to monitor your raid controller.\n\nExecuted commands:\n - `sudo -n arcconf GETCONFIG 1 LD`\n - `sudo -n arcconf GETCONFIG 1 PD`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the arcconf command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run arcconf as sudoer\n\nThe module uses arcconf, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute arcconf as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich arcconf shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/arcconf\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/adaptec_raid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/adaptec_raid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: my_job_name \n update_every: 1 # the JOB's data collection frequency\n priority: 60000 # the JOB's order on the dashboard\n penalty: yes # the JOB's penalty\n autodetection_retry: 0 # the JOB's re-check interval in seconds\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `adaptec_raid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin adaptec_raid debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ adaptec_raid_ld_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.ld_status | logical device status is failed or degraded |\n| [ adaptec_raid_pd_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptec_raid.pd_state | physical device state is not online |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AdaptecRAID instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| adaptec_raid.ld_status | a dimension per logical device | bool |\n| adaptec_raid.pd_state | a dimension per physical device | bool |\n| adaptec_raid.smart_warnings | a dimension per physical device | count |\n| adaptec_raid.temperature | a dimension per physical device | celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-adaptec_raid-AdaptecRAID", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/adaptec_raid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "alarms", "monitored_instance": {"name": "Netdata Agent alarms", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["alarms", "netdata"], "most_popular": false}, "overview": "# Netdata Agent alarms\n\nPlugin: python.d.plugin\nModule: alarms\n\n## Overview\n\nThis collector creates an 'Alarms' menu with one line plot of `alarms.status`.\n\n\nAlarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIt discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/alarms.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/alarms.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |\n| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {\"CLEAR\": 0, \"WARNING\": 1, \"CRITICAL\": 2} | yes |\n| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |\n| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |\n| alarm_contains_words | A \",\" separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with \"cpu\" or \"load\" in alarm name. Default includes all. | | yes |\n| alarm_excludes_words | A \",\" separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with \"cpu\" or \"load\" in alarm name. Default excludes None. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\njobs:\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n\n```\n##### Advanced\n\nAn advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.\n\"ML\" job will collect status and values for all alarms with \"ml_\" in the name. Default job will collect status for all other alarms.\n\n\n```yaml\nML:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: true\n alarm_status_chart_type: 'stacked'\n alarm_contains_words: 'ml_'\n\nDefault:\n update_every: 5\n url: 'http://127.0.0.1:19999/api/v1/alarms?all'\n status_map:\n CLEAR: 0\n WARNING: 1\n CRITICAL: 2\n collect_alarm_values: false\n alarm_status_chart_type: 'stacked'\n alarm_excludes_words: 'ml_'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin alarms debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Netdata Agent alarms instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |\n| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |\n\n", "integration_type": "collector", "id": "python.d.plugin-alarms-Netdata_Agent_alarms", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/alarms/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "am2320", "monitored_instance": {"name": "AM2320", "link": "https://learn.adafruit.com/adafruit-am2320-temperature-humidity-i2c-sensor/overview", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "am2320", "sensor", "humidity"], "most_popular": false}, "overview": "# AM2320\n\nPlugin: python.d.plugin\nModule: am2320\n\n## Overview\n\nThis collector monitors AM2320 sensor metrics about temperature and humidity.\n\nIt retrieves temperature and humidity values by contacting an AM2320 sensor over i2c.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming prerequisites are met, the collector will try to connect to the sensor via i2c\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Sensor connection to a Raspberry Pi\n\nConnect the am2320 to the Raspberry Pi I2C pins\n\nRaspberry Pi 3B/4 Pins:\n\n- Board 3.3V (pin 1) to sensor VIN (pin 1)\n- Board SDA (pin 3) to sensor SDA (pin 2)\n- Board GND (pin 6) to sensor GND (pin 3)\n- Board SCL (pin 5) to sensor SCL (pin 4)\n\nYou may also need to add two I2C pullup resistors if your board does not already have them. The Raspberry Pi does have internal pullup resistors but it doesn't hurt to add them anyway. You can use 2.2K - 10K but we will just use 10K. The resistors go from VDD to SCL and SDA each.\n\n\n#### Software requirements\n\nInstall the Adafruit Circuit Python AM2320 library:\n\n`sudo pip3 install adafruit-circuitpython-am2320`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/am2320.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/am2320.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Local sensor\n\nA basic JOB configuration\n\n```yaml\nlocal_sensor:\n name: 'Local AM2320'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin am2320 debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per AM2320 instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| am2320.temperature | temperature | celsius |\n| am2320.humidity | humidity | percentage |\n\n", "integration_type": "collector", "id": "python.d.plugin-am2320-AM2320", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "beanstalk", "monitored_instance": {"name": "Beanstalk", "link": "https://beanstalkd.github.io/", "categories": ["data-collection.message-brokers"], "icon_filename": "beanstalk.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["beanstalk", "beanstalkd", "message"], "most_popular": false}, "overview": "# Beanstalk\n\nPlugin: python.d.plugin\nModule: beanstalk\n\n## Overview\n\nMonitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.\n\nThe collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### beanstalkc python module\n\nThe collector requires the `beanstalkc` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/beanstalk.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/beanstalk.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |\n| port | Port to the IP or URL to a beanstalk service. | 11300 | no |\n\n#### Examples\n\n##### Remote beanstalk server\n\nA basic remote beanstalk server\n\n```yaml\nremote:\n name: 'beanstalk'\n host: '1.2.3.4'\n port: 11300\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local_beanstalk'\n host: '127.0.0.1'\n port: 11300\n\nremote_job:\n name: 'remote_beanstalk'\n host: '192.0.2.1'\n port: 113000\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin beanstalk debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Beanstalk instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.cpu_usage | user, system | cpu time |\n| beanstalk.jobs_rate | total, timeouts | jobs/s |\n| beanstalk.connections_rate | connections | connections/s |\n| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |\n| beanstalk.connections_rate | tubes | tubes |\n| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.current_connections | written, producers, workers, waiting | connections |\n| beanstalk.binlog | written, migrated | records/s |\n| beanstalk.uptime | uptime | seconds |\n\n### Per tube\n\nMetrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| beanstalk.jobs_rate | jobs | jobs/s |\n| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |\n| beanstalk.connections | using, waiting, watching | connections |\n| beanstalk.commands | deletes, pauses | commands/s |\n| beanstalk.pause | since, left | seconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-beanstalk-Beanstalk", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "bind_rndc", "monitored_instance": {"name": "ISC Bind (RNDC)", "link": "https://www.isc.org/bind/", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "isc.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dns", "bind", "server"], "most_popular": false}, "overview": "# ISC Bind (RNDC)\n\nPlugin: python.d.plugin\nModule: bind_rndc\n\n## Overview\n\nMonitor ISCBind (RNDC) performance for optimal DNS server operations. Monitor query rates, response times, and error rates to ensure reliable DNS service delivery.\n\nThis collector uses the `rndc` tool to dump (named.stats) statistics then read them to gather Bind Name Server summary performance metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to read named.stats file at `/var/log/bind/named.stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum bind version and permissions\n\nVersion of bind must be >=9.6 and the Netdata user must have permissions to run `rndc stats`\n\n#### Setup log rotate for bind stats\n\nBIND appends logs at EVERY RUN. It is NOT RECOMMENDED to set `update_every` below 30 sec.\nIt is STRONGLY RECOMMENDED to create a `bind-rndc.conf` file for logrotate.\n\nTo set up BIND to dump stats do the following:\n\n1. Add to 'named.conf.options' options {}:\n`statistics-file \"/var/log/bind/named.stats\";`\n\n2. Create bind/ directory in /var/log:\n`cd /var/log/ && mkdir bind`\n\n3. Change owner of directory to 'bind' user:\n`chown bind bind/`\n\n4. RELOAD (NOT restart) BIND:\n`systemctl reload bind9.service`\n\n5. Run as a root 'rndc stats' to dump (BIND will create named.stats in new directory)\n\nTo allow Netdata to run 'rndc stats' change '/etc/bind/rndc.key' group to netdata:\n`chown :netdata rndc.key`\n\nLast, BUT NOT least, is to create bind-rndc.conf in logrotate.d/:\n```\n/var/log/bind/named.stats {\n\n daily\n rotate 4\n compress\n delaycompress\n create 0644 bind bind\n missingok\n postrotate\n rndc reload > /dev/null\n endscript\n}\n```\nTo test your logrotate conf file run as root:\n`logrotate /etc/logrotate.d/bind-rndc -d (debug dry-run mode)`\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/bind_rndc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/bind_rndc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |\n\n#### Examples\n\n##### Local bind stats\n\nDefine a local path to bind stats file\n\n```yaml\nlocal:\n named_stats_path: '/var/log/bind/named.stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `bind_rndc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin bind_rndc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ bind_rndc_stats_file_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/bind_rndc.conf) | bind_rndc.stats_size | BIND statistics-file size |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per ISC Bind (RNDC) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| bind_rndc.name_server_statistics | requests, rejected_queries, success, failure, responses, duplicate, recursion, nxrrset, nxdomain, non_auth_answer, auth_answer, dropped_queries | stats |\n| bind_rndc.incoming_queries | a dimension per incoming query type | queries |\n| bind_rndc.outgoing_queries | a dimension per outgoing query type | queries |\n| bind_rndc.stats_size | stats_size | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-bind_rndc-ISC_Bind_(RNDC)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/bind_rndc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "boinc", "monitored_instance": {"name": "BOINC", "link": "https://boinc.berkeley.edu/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "bolt.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["boinc", "distributed"], "most_popular": false}, "overview": "# BOINC\n\nPlugin: python.d.plugin\nModule: boinc\n\n## Overview\n\nThis collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.\n\nIt uses the same RPC interface that the BOINC monitoring GUI does.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Boinc RPC interface\n\nBOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/boinc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/boinc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| hostname | Define a hostname where boinc is running. | localhost | no |\n| port | The port of boinc RPC interface. | | no |\n| password | Provide a password to connect to a boinc RPC interface. | | no |\n\n#### Examples\n\n##### Configuration of a remote boinc instance\n\nA basic JOB configuration for a remote boinc instance\n\n```yaml\nremote:\n hostname: '1.2.3.4'\n port: 1234\n password: 'some-password'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 1234\n password: 'some-password'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 1234\n password: some-other-password\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin boinc debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |\n| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |\n| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |\n| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per BOINC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| boinc.tasks | Total, Active | tasks |\n| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |\n| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |\n| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |\n\n", "integration_type": "collector", "id": "python.d.plugin-boinc-BOINC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ceph", "monitored_instance": {"name": "Ceph", "link": "https://ceph.io/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ceph.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["ceph", "storage"], "most_popular": false}, "overview": "# Ceph\n\nPlugin: python.d.plugin\nModule: ceph\n\n## Overview\n\nThis collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.\n\nUses the `rados` python module to connect to a Ceph cluster.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### `rados` python module\n\nMake sure the `rados` python module is installed\n\n#### Granting read permissions to ceph group from keyring file\n\nExecute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`\n\n#### Create a specific rados_id\n\nYou can optionally create a rados_id to use instead of admin\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ceph.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ceph.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| config_file | Ceph config file | | yes |\n| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |\n| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |\n\n#### Examples\n\n##### Basic local Ceph cluster\n\nA basic configuration to connect to a local Ceph cluster.\n\n```yaml\nlocal:\n config_file: '/etc/ceph/ceph.conf'\n keyring_file: '/etc/ceph/ceph.client.admin.keyring'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ceph debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Ceph instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ceph.general_usage | avail, used | KiB |\n| ceph.general_objects | cluster | objects |\n| ceph.general_bytes | read, write | KiB/s |\n| ceph.general_operations | read, write | operations |\n| ceph.general_latency | apply, commit | milliseconds |\n| ceph.pool_usage | a dimension per Ceph Pool | KiB |\n| ceph.pool_objects | a dimension per Ceph Pool | objects |\n| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |\n| ceph.pool_read_operations | a dimension per Ceph Pool | operations |\n| ceph.pool_write_operations | a dimension per Ceph Pool | operations |\n| ceph.osd_usage | a dimension per Ceph OSD | KiB |\n| ceph.osd_size | a dimension per Ceph OSD | KiB |\n| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |\n| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |\n\n", "integration_type": "collector", "id": "python.d.plugin-ceph-Ceph", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "changefinder", "monitored_instance": {"name": "python.d changefinder", "link": "", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["change detection", "anomaly detection", "machine learning", "ml"], "most_popular": false}, "overview": "# python.d changefinder\n\nPlugin: python.d.plugin\nModule: changefinder\n\n## Overview\n\nThis collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to\nperform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)\non your Netdata charts and/or dimensions.\n\n\nInstead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).\n### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's\n typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly\n this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw\n score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have\n already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then\n should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning\n approaches which need some initial window of time before they can be useful.\n- As this collector does most of the work in Python itself, you may want to try it out first on a test or development\n system to get a sense of its performance characteristics on a node similar to where you would like to use it.\n- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the\n typical performance characteristics we saw from running this collector (with defaults) were:\n - A runtime (`netdata.runtime_changefinder`) of ~30ms.\n - Typically ~1% additional cpu usage.\n - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default this collector will work over all `system.*` charts.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the packages below be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages for the netdata user\npip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4\n```\n\n**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section\nof your `netdata.conf` file.\n\n```yaml\n[ plugin:python.d ]\n # update every = 1\n command options = -ppython3\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/changefinder.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/changefinder.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |\n| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |\n| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |\n| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |\n| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |\n| cf_threshold | the percentile above which scores will be flagged. | 99 | no |\n| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |\n| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: ''\n mode: 'per_chart'\n cf_r: 0.5\n cf_order: 1\n cf_smooth: 15\n cf_threshold: 99\n n_score_samples: 14400\n show_scores: false\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin changefinder debug trace\n ```\n\n### Debug Mode\n\n\n\n### Log Messages\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d changefinder instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| changefinder.scores | a dimension per chart | score |\n| changefinder.flags | a dimension per chart | flag |\n\n", "integration_type": "collector", "id": "python.d.plugin-changefinder-python.d_changefinder", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/changefinder/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "dovecot", "monitored_instance": {"name": "Dovecot", "link": "https://www.dovecot.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "dovecot.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["dovecot", "imap", "mail"], "most_popular": false}, "overview": "# Dovecot\n\nPlugin: python.d.plugin\nModule: dovecot\n\n## Overview\n\nThis collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.\n\nIt uses the dovecot socket and executes the `EXPORT global` command to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Dovecot configuration\n\nThe Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/dovecot.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/dovecot.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |\n| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |\n| port | Used in combination with host, configures the port devcot listens to. | | no |\n\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration.\n\n```yaml\nlocaltcpip:\n name: 'local'\n host: '127.0.0.1'\n port: 24242\n\n```\n##### Local socket\n\nA basic local socket configuration\n\n```yaml\nlocalsocket:\n name: 'local'\n socket: '/var/run/dovecot/stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin dovecot debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Dovecot instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| dovecot.sessions | active sessions | number |\n| dovecot.logins | logins | number |\n| dovecot.commands | commands | commands |\n| dovecot.faults | minor, major | faults |\n| dovecot.context_switches | voluntary, involuntary | switches |\n| dovecot.io | read, write | KiB/s |\n| dovecot.net | read, write | kilobits/s |\n| dovecot.syscalls | read, write | syscalls/s |\n| dovecot.lookup | path, attr | number/s |\n| dovecot.cache | hits | hits/s |\n| dovecot.auth | ok, failed | attempts |\n| dovecot.auth_cache | hit, miss | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-dovecot-Dovecot", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/dovecot/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "example", "monitored_instance": {"name": "Example collector", "link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/README.md", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["example", "netdata", "python"], "most_popular": false}, "overview": "# Example collector\n\nPlugin: python.d.plugin\nModule: example\n\n## Overview\n\nExample collector that generates some random numbers as metrics.\n\nIf you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.\n\n\nThe `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/example.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/example.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| num_lines | The number of lines to create. | 4 | no |\n| lower | The lower bound of numbers to randomly sample from. | 0 | no |\n| upper | The upper bound of numbers to randomly sample from. | 100 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nfour_lines:\n name: \"Four Lines\"\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n num_lines: 4\n lower: 0\n upper: 100\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin example debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Example collector instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| example.random | random | number |\n\n", "integration_type": "collector", "id": "python.d.plugin-example-Example_collector", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/example/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "exim", "monitored_instance": {"name": "Exim", "link": "https://www.exim.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "exim.jpg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["exim", "mail", "server"], "most_popular": false}, "overview": "# Exim\n\nPlugin: python.d.plugin\nModule: exim\n\n## Overview\n\nThis collector monitors Exim mail queue.\n\nIt uses the `exim` command line binary to get the statistics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAssuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Exim configuration - local installation\n\nThe module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.\n\n1. Edit the `exim` configuration with your preferred editor and add:\n`queue_list_requires_admin = false`\n2. Restart `exim` and Netdata\n\n\n#### Exim configuration - WHM (CPanel) server\n\nOn a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.\n\n1. Login to WHM\n2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor\n3. Scroll down to the button **Add additional configuration setting** and click on it.\n4. In the new dropdown which will appear above we need to find and choose:\n`queue_list_requires_admin` and set to `false`\n5. Scroll to the end and click the **Save** button.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/exim.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/exim.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | Path and command to the `exim` binary | exim -bpc | no |\n\n#### Examples\n\n##### Local exim install\n\nA basic local exim install\n\n```yaml\nlocal:\n command: 'exim -bpc'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin exim debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Exim instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| exim.qemails | emails | emails |\n\n", "integration_type": "collector", "id": "python.d.plugin-exim-Exim", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/exim/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "fail2ban", "monitored_instance": {"name": "Fail2ban", "link": "https://www.fail2ban.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "fail2ban.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["fail2ban", "security", "authentication", "authorization"], "most_popular": false}, "overview": "# Fail2ban\n\nPlugin: python.d.plugin\nModule: fail2ban\n\n## Overview\n\nMonitor Fail2ban performance for prime intrusion prevention operations. Monitor ban counts, jail statuses, and failed login attempts to ensure robust network security.\n\n\nIt collects metrics through reading the default log and configuration files of fail2ban.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe `fail2ban.log` file must be readable by the user `netdata`.\n - change the file ownership and access permissions.\n - update `/etc/logrotate.d/fail2ban`` to persist the changes after rotating the log file.\n\nTo change the file ownership and access permissions, execute the following:\n\n```shell\nsudo chown root:netdata /var/log/fail2ban.log\nsudo chmod 640 /var/log/fail2ban.log\n```\n\nTo persist the changes after rotating the log file, add `create 640 root netdata` to the `/etc/logrotate.d/fail2ban`:\n\n```shell\n/var/log/fail2ban.log {\n\n weekly\n rotate 4\n compress\n\n delaycompress\n missingok\n postrotate\n fail2ban-client flushlogs 1>/dev/null\n endscript\n\n # If fail2ban runs as non-root it still needs to have write access\n # to logfiles.\n # create 640 fail2ban adm\n create 640 root netdata\n}\n```\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default the collector will attempt to read log file at /var/log/fail2ban.log and conf file at /etc/fail2ban/jail.local.\nIf conf file is not found default jail is ssh.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/fail2ban.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/fail2ban.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |\n| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |\n| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |\n| exclude | jails you want to exclude from autodetection. | | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocal:\n log_path: '/var/log/fail2ban.log'\n conf_path: '/etc/fail2ban/jail.local'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `fail2ban` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin fail2ban debug trace\n ```\n\n### Debug Mode\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Fail2ban instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| fail2ban.failed_attempts | a dimension per jail | attempts/s |\n| fail2ban.bans | a dimension per jail | bans/s |\n| fail2ban.banned_ips | a dimension per jail | ips |\n\n", "integration_type": "collector", "id": "python.d.plugin-fail2ban-Fail2ban", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/fail2ban/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "gearman", "monitored_instance": {"name": "Gearman", "link": "http://gearman.org/", "categories": ["data-collection.distributed-computing-systems"], "icon_filename": "gearman.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["gearman", "gearman job server"], "most_popular": false}, "overview": "# Gearman\n\nPlugin: python.d.plugin\nModule: gearman\n\n## Overview\n\nMonitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.\n\nThis collector connects to a Gearman instance via either TCP or unix socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Socket permissions\n\nThe gearman UNIX socket should have read permission for user netdata.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/gearman.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/gearman.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | URL or IP where gearman is running. | localhost | no |\n| port | Port of URL or IP where gearman is running. | 4730 | no |\n| tls | Use tls to connect to gearman. | false | no |\n| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |\n| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |\n\n#### Examples\n\n##### Local gearman service\n\nA basic host and port gearman configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 4730\n\nremote:\n name: 'remote'\n host: '192.0.2.1'\n port: 4730\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin gearman debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Gearman instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.total_jobs | Pending, Running | Jobs |\n\n### Per gearman job\n\nMetrics related to Gearman jobs. Each job produces its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| gearman.single_job | Pending, Idle, Runnning | Jobs |\n\n", "integration_type": "collector", "id": "python.d.plugin-gearman-Gearman", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/gearman/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "go_expvar", "monitored_instance": {"name": "Go applications (EXPVAR)", "link": "https://pkg.go.dev/expvar", "categories": ["data-collection.apm"], "icon_filename": "go.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["go", "expvar", "application"], "most_popular": false}, "overview": "# Go applications (EXPVAR)\n\nPlugin: python.d.plugin\nModule: go_expvar\n\n## Overview\n\nThis collector monitors Go applications that expose their metrics with the use of the `expvar` package from the Go standard library. It produces charts for Go runtime memory statistics and optionally any number of custom charts.\n\nIt connects via http to gather the metrics exposed via the `expvar` package.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the go_expvar collector\n\nThe `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Sample `expvar` usage in a Go application\n\nThe `expvar` package exposes metrics over HTTP and is very easy to use.\nConsider this minimal sample below:\n\n```go\npackage main\n\nimport (\n _ \"expvar\"\n \"net/http\"\n)\n\nfunc main() {\n http.ListenAndServe(\"127.0.0.1:8080\", nil)\n}\n```\n\nWhen imported this way, the `expvar` package registers a HTTP handler at `/debug/vars` that\nexposes Go runtime's memory statistics in JSON format. You can inspect the output by opening\nthe URL in your browser (or by using `wget` or `curl`).\n\nSample output:\n\n```json\n{\n\"cmdline\": [\"./expvar-demo-binary\"],\n\"memstats\": {\"Alloc\":630856,\"TotalAlloc\":630856,\"Sys\":3346432,\"Lookups\":27, }\n}\n```\n\nYou can of course expose and monitor your own variables as well.\nHere is a sample Go application that exposes a few custom variables:\n\n```go\npackage main\n\nimport (\n \"expvar\"\n \"net/http\"\n \"runtime\"\n \"time\"\n)\n\nfunc main() {\n\n tick := time.NewTicker(1 * time.Second)\n num_go := expvar.NewInt(\"runtime.goroutines\")\n counters := expvar.NewMap(\"counters\")\n counters.Set(\"cnt1\", new(expvar.Int))\n counters.Set(\"cnt2\", new(expvar.Float))\n\n go http.ListenAndServe(\":8080\", nil)\n\n for {\n select {\n case <- tick.C:\n num_go.Set(int64(runtime.NumGoroutine()))\n counters.Add(\"cnt1\", 1)\n counters.AddFloat(\"cnt2\", 1.452)\n }\n }\n}\n```\n\nApart from the runtime memory stats, this application publishes two counters and the\nnumber of currently running Goroutines and updates these stats every second.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/go_expvar.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/go_expvar.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified. Each JOB can be used to monitor a different Go application.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |\n| user | If the URL is password protected, this is the username to use. | | no |\n| pass | If the URL is password protected, this is the password to use. | | no |\n| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |\n| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |\n\n#### Examples\n\n##### Monitor a Go app1 application\n\nThe example below sets a configuration for a Go application, called `app1`. Besides the `memstats`, the application also exposes two counters and the number of currently running Goroutines and updates these stats every second.\n\nThe `go_expvar` collector can monitor these as well with the use of the `extra_charts` configuration variable.\n\nThe `extra_charts` variable is a YaML list of Netdata chart definitions.\nEach chart definition has the following keys:\n\n```\nid: Netdata chart ID\noptions: a key-value mapping of chart options\nlines: a list of line definitions\n```\n\n**Note: please do not use dots in the chart or line ID field.\nSee [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-284494195) for explanation.**\n\nPlease see these two links to the official Netdata documentation for more information about the values:\n\n- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart)\n- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)\n\n**Line definitions**\n\nEach chart can define multiple lines (dimensions).\nA line definition is a key-value mapping of line options.\nEach line can have the following options:\n\n```\n# mandatory\nexpvar_key: the name of the expvar as present in the JSON output of /debug/vars endpoint\nexpvar_type: value type; supported are \"float\" or \"int\"\nid: the id of this line/dimension in Netdata\n\n# optional - Netdata defaults are used if these options are not defined\nname: ''\nalgorithm: absolute\nmultiplier: 1\ndivisor: 100 if expvar_type == float, 1 if expvar_type == int\nhidden: False\n```\n\nPlease see the following link for more information about the options and their default values:\n[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#dimension)\n\nApart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;\nAll dicts in the resulting JSON document are then flattened to one level.\nExpvar names are joined together with '.' when flattening.\n\nExample:\n\n```\n{\n \"counters\": {\"cnt1\": 1042, \"cnt2\": 1512.9839999999983},\n \"runtime.goroutines\": 5\n}\n```\n\nIn the above case, the exported variables will be available under `runtime.goroutines`,\n`counters.cnt1` and `counters.cnt2` expvar_keys. If the flattening results in a key collision,\nthe first defined key wins and all subsequent keys with the same name are ignored.\n\n\n```yaml\napp1:\n name : 'app1'\n url : 'http://127.0.0.1:8080/debug/vars'\n collect_memstats: true\n extra_charts:\n - id: \"runtime_goroutines\"\n options:\n name: num_goroutines\n title: \"runtime: number of goroutines\"\n units: goroutines\n family: runtime\n context: expvar.runtime.goroutines\n chart_type: line\n lines:\n - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}\n - id: \"foo_counters\"\n options:\n name: counters\n title: \"some random counters\"\n units: awesomeness\n family: counters\n context: expvar.foo.counters\n chart_type: line\n lines:\n - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}\n - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin go_expvar debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Go applications (EXPVAR) instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| expvar.memstats.heap | alloc, inuse | KiB |\n| expvar.memstats.stack | inuse | KiB |\n| expvar.memstats.mspan | inuse | KiB |\n| expvar.memstats.mcache | inuse | KiB |\n| expvar.memstats.live_objects | live | objects |\n| expvar.memstats.sys | sys | KiB |\n| expvar.memstats.gc_pauses | avg | ns |\n\n", "integration_type": "collector", "id": "python.d.plugin-go_expvar-Go_applications_(EXPVAR)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/go_expvar/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hddtemp", "monitored_instance": {"name": "HDD temperature", "link": "https://linux.die.net/man/8/hddtemp", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["hardware", "hdd temperature", "disk temperature", "temperature"], "most_popular": false}, "overview": "# HDD temperature\n\nPlugin: python.d.plugin\nModule: hddtemp\n\n## Overview\n\nThis collector monitors disk temperatures.\n\n\nIt uses the `hddtemp` daemon to gather the metrics.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Run `hddtemp` in daemon mode\n\nYou can execute `hddtemp` in TCP/IP daemon mode by using the `-d` argument.\n\nSo running `hddtemp -d` would run the daemon, by default on port 7634.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hddtemp.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hddtemp.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\nBy default this collector will try to autodetect disks (autodetection works only for disk which names start with \"sd\"). However this can be overridden by setting the option `disks` to an array of desired disks.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |\n| host | The IP or HOSTNAME to connect to. | localhost | yes |\n| port | The port to connect to. | 7634 | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\n```\n##### Custom disk names\n\nAn example defining the disk names to detect.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n devices:\n - customdisk1\n - customdisk2\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 7634\n\nremote_job:\n name : 'remote'\n host : 'http://192.0.2.1:2812'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hddtemp` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hddtemp debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HDD temperature instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hddtemp.temperatures | a dimension per disk | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hddtemp-HDD_temperature", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hddtemp/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "hpssa", "monitored_instance": {"name": "HP Smart Storage Arrays", "link": "https://buy.hpe.com/us/en/software/server-management-software/server-management-software/smart-array-management-software/hpe-smart-storage-administrator/p/5409020", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hp.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "hp", "hpssa", "array"], "most_popular": false}, "overview": "# HP Smart Storage Arrays\n\nPlugin: python.d.plugin\nModule: hpssa\n\n## Overview\n\nThis collector monitors HP Smart Storage Arrays metrics about operational statuses and temperatures.\n\nIt uses the command line tool `ssacli`. The exact command used is `sudo -n ssacli ctrl all show config detail`\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided, the collector will try to execute the `ssacli` binary.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the hpssa collector\n\nThe `hpssa` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\n\nChange the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Allow user netdata to execute `ssacli` as root.\n\nThis module uses `ssacli`, which can only be executed by root. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `ssacli` as root without a password.\n\n- Add to your `/etc/sudoers` file:\n\n`which ssacli` shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/ssacli\n```\n\n- Reset Netdata's systemd\n unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux\n distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `ssacli` using `sudo`.\n\nAs the `root` user, do the following:\n\n```cmd\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/hpssa.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/hpssa.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |\n| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |\n\n#### Examples\n\n##### Local simple config\n\nA basic configuration, specyfing the path to `ssacli`\n\n```yaml\nlocal:\n ssacli_path: /usr/sbin/ssacli\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `hpssa` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin hpssa debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per HP Smart Storage Arrays instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| hpssa.ctrl_status | ctrl_{adapter slot}_status, cache_{adapter slot}_status, battery_{adapter slot}_status per adapter | Status |\n| hpssa.ctrl_temperature | ctrl_{adapter slot}_temperature, cache_{adapter slot}_temperature per adapter | Celsius |\n| hpssa.ld_status | a dimension per logical drive | Status |\n| hpssa.pd_status | a dimension per physical drive | Status |\n| hpssa.pd_temperature | a dimension per physical drive | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-hpssa-HP_Smart_Storage_Arrays", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/hpssa/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "icecast", "monitored_instance": {"name": "Icecast", "link": "https://icecast.org/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "icecast.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["icecast", "streaming", "media"], "most_popular": false}, "overview": "# Icecast\n\nPlugin: python.d.plugin\nModule: icecast\n\n## Overview\n\nThis collector monitors Icecast listener counts.\n\nIt connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWithout configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Icecast minimum version\n\nNeeds at least icecast version >= 2.4.0\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/icecast.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/icecast.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |\n| user | Username to use to connect to `url` if it's password protected. | | no |\n| pass | Password to use to connect to `url` if it's password protected. | | no |\n\n#### Examples\n\n##### Remote Icecast server\n\nConfigure a remote icecast server\n\n```yaml\nremote:\n url: 'http://1.2.3.4:8443/status-json.xsl'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin icecast debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Icecast instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| icecast.listeners | a dimension for each active source | listeners |\n\n", "integration_type": "collector", "id": "python.d.plugin-icecast-Icecast", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/icecast/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "ipfs", "monitored_instance": {"name": "IPFS", "link": "https://ipfs.tech/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "ipfs.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# IPFS\n\nPlugin: python.d.plugin\nModule: ipfs\n\n## Overview\n\nThis collector monitors IPFS server metrics about its quality and performance.\n\nIt connects to an http endpoint of the IPFS server to collect the metrics\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the endpoint is accessible by the Agent, netdata will autodetect it\n\n#### Limits\n\nCalls to the following endpoints are disabled due to IPFS bugs:\n\n/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)\n/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)\n\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/ipfs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/ipfs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| url | URL to the IPFS API | no | yes |\n| repoapi | Collect repo metrics. | no | no |\n| pinapi | Set status of IPFS pinned object polling. | no | no |\n\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:5001'\n repoapi: no\n pinapi: no\n\nremote_host:\n name: 'remote'\n url: 'http://192.0.2.1:5001'\n repoapi: no\n pinapi: no\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin ipfs debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per IPFS instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| ipfs.bandwidth | in, out | kilobits/s |\n| ipfs.peers | peers | peers |\n| ipfs.repo_size | avail, size | GiB |\n| ipfs.repo_objects | objects, pinned, recursive_pins | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-ipfs-IPFS", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ipfs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "litespeed", "monitored_instance": {"name": "Litespeed", "link": "https://www.litespeedtech.com/products/litespeed-web-server", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "litespeed.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["litespeed", "web", "server"], "most_popular": false}, "overview": "# Litespeed\n\nPlugin: python.d.plugin\nModule: litespeed\n\n## Overview\n\nExamine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.\n\nThe collector uses the statistics under /tmp/lshttpd to gather the metrics.\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/litespeed.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/litespeed.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |\n\n#### Examples\n\n##### Set the path to statistics\n\nChange the path for the litespeed stats files\n\n```yaml\nlocalhost:\n name: 'local'\n path: '/tmp/lshttpd'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `litespeed` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin litespeed debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Litespeed instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.net_throughput | in, out | kilobits/s |\n| litespeed.connections | free, used | conns |\n| litespeed.connections | free, used | conns |\n| litespeed.requests | requests | requests/s |\n| litespeed.requests_processing | processing | requests |\n| litespeed.cache | hits | hits/s |\n| litespeed.cache | hits | hits/s |\n| litespeed.static | hits | hits/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-litespeed-Litespeed", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/litespeed/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "megacli", "monitored_instance": {"name": "MegaCLI", "link": "https://wikitech.wikimedia.org/wiki/MegaCli", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "hard-drive.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["storage", "raid-controller", "manage-disks"], "most_popular": false}, "overview": "# MegaCLI\n\nPlugin: python.d.plugin\nModule: megacli\n\n## Overview\n\nExamine MegaCLI metrics with Netdata for insights into RAID controller performance. Improve your RAID controller efficiency with real-time MegaCLI metrics.\n\nCollects adapter, physical drives and battery stats using megacli command-line tool\n\nExecuted commands:\n\n - `sudo -n megacli -LDPDInfo -aAll`\n - `sudo -n megacli -AdpBbuCmd -a0`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, netdata should be to execute commands via the megacli command line utility\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Grant permissions for netdata, to run megacli as sudoer\n\nThe module uses megacli, which can only be executed by root. It uses sudo and assumes that it is configured such that the netdata user can execute megacli as root without a password.\n\nAdd to your /etc/sudoers file:\nwhich megacli shows the full path to the binary.\n\n```bash\nnetdata ALL=(root) NOPASSWD: /path/to/megacli\n```\n\n\n#### Reset Netdata's systemd unit CapabilityBoundingSet (Linux distributions with systemd)\n\nThe default CapabilityBoundingSet doesn't allow using sudo, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute arcconf using sudo.\n\nAs root user, do the following:\n\n```bash\nmkdir /etc/systemd/system/netdata.service.d\necho -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\nsystemctl daemon-reload\nsystemctl restart netdata.service\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/megacli.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/megacli.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration per job\n\n```yaml\njob_name:\n name: myname\n update_every: 1\n priority: 60000\n penalty: yes\n autodetection_retry: 0\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `megacli` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin megacli debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ megacli_adapter_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_degraded | adapter is in the degraded state (0: false, 1: true) |\n| [ megacli_pd_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_media_error | number of physical drive media errors |\n| [ megacli_pd_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.pd_predictive_failure | number of physical drive predictive failures |\n| [ megacli_bbu_relative_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_relative_charge | average battery backup unit (BBU) relative state of charge over the last 10 seconds |\n| [ megacli_bbu_cycle_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_cycle_count | average battery backup unit (BBU) charge cycles count over the last 10 seconds |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per MegaCLI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.adapter_degraded | a dimension per adapter | is degraded |\n| megacli.pd_media_error | a dimension per physical drive | errors/s |\n| megacli.pd_predictive_failure | a dimension per physical drive | failures/s |\n\n### Per battery\n\nMetrics related to Battery Backup Units, each BBU provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| megacli.bbu_relative_charge | adapter {battery id} | percentage |\n| megacli.bbu_cycle_count | adapter {battery id} | cycle count |\n\n", "integration_type": "collector", "id": "python.d.plugin-megacli-MegaCLI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/megacli/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "memcached", "monitored_instance": {"name": "Memcached", "link": "https://memcached.org/", "categories": ["data-collection.database-servers"], "icon_filename": "memcached.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["memcached", "memcache", "cache", "database"], "most_popular": false}, "overview": "# Memcached\n\nPlugin: python.d.plugin\nModule: memcached\n\n## Overview\n\nMonitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.\n\nIt reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/memcached.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/memcached.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| host | the host to connect to. | 127.0.0.1 | no |\n| port | the port to connect to. | 11211 | no |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### localhost\n\nAn example configuration for localhost.\n\n```yaml\nlocalhost:\n name: 'local'\n host: 'localhost'\n port: 11211\n\n```\n##### localipv4\n\nAn example configuration for localipv4.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 11211\n\n```\n##### localipv6\n\nAn example configuration for localipv6.\n\n```yaml\nlocalhost:\n name: 'local'\n host: '::1'\n port: 11211\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin memcached debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |\n| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |\n| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Memcached instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| memcached.cache | available, used | MiB |\n| memcached.net | in, out | kilobits/s |\n| memcached.connections | current, rejected, total | connections/s |\n| memcached.items | current, total | items |\n| memcached.evicted_reclaimed | reclaimed, evicted | items |\n| memcached.get | hints, misses | requests |\n| memcached.get_rate | rate | requests/s |\n| memcached.set_rate | rate | requests/s |\n| memcached.delete | hits, misses | requests |\n| memcached.cas | hits, misses, bad value | requests |\n| memcached.increment | hits, misses | requests |\n| memcached.decrement | hits, misses | requests |\n| memcached.touch | hits, misses | requests |\n| memcached.touch_rate | rate | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-memcached-Memcached", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/memcached/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "monit", "monitored_instance": {"name": "Monit", "link": "https://mmonit.com/monit/", "categories": ["data-collection.synthetic-checks"], "icon_filename": "monit.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["monit", "mmonit", "supervision tool", "monitrc"], "most_popular": false}, "overview": "# Monit\n\nPlugin: python.d.plugin\nModule: monit\n\n## Overview\n\nThis collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.\n\n\nIt gathers data from Monit's XML interface.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to Monit at `http://localhost:2812`\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/monit.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/monit.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |\n| user | Username in case the URL is password protected. | | no |\n| pass | Password in case the URL is password protected. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n\n```\n##### Basic Authentication\n\nExample using basic username and password in order to authenticate.\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:2812'\n user: 'foo'\n pass: 'bar'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocalhost:\n name: 'local'\n url: 'http://localhost:2812'\n\nremote_job:\n name: 'remote'\n url: 'http://192.0.2.1:2812'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin monit debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Monit instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| monit.filesystems | a dimension per target | filesystems |\n| monit.directories | a dimension per target | directories |\n| monit.files | a dimension per target | files |\n| monit.fifos | a dimension per target | pipes |\n| monit.programs | a dimension per target | programs |\n| monit.services | a dimension per target | processes |\n| monit.process_uptime | a dimension per target | seconds |\n| monit.process_threads | a dimension per target | threads |\n| monit.process_childrens | a dimension per target | children |\n| monit.hosts | a dimension per target | hosts |\n| monit.host_latency | a dimension per target | milliseconds |\n| monit.networks | a dimension per target | interfaces |\n\n", "integration_type": "collector", "id": "python.d.plugin-monit-Monit", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/monit/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "nsd", "monitored_instance": {"name": "Name Server Daemon", "link": "https://nsd.docs.nlnetlabs.nl/en/latest/#", "categories": ["data-collection.dns-and-dhcp-servers"], "icon_filename": "nsd.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["nsd", "name server daemon"], "most_popular": false}, "overview": "# Name Server Daemon\n\nPlugin: python.d.plugin\nModule: nsd\n\n## Overview\n\nThis collector monitors NSD statistics like queries, zones, protocols, query types and more.\n\n\nIt uses the `nsd-control stats_noreset` command to gather metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### NSD version\n\nThe version of `nsd` must be 4.0+.\n\n\n#### Provide Netdata the permissions to run the command\n\nNetdata must have permissions to run the `nsd-control stats_noreset` command.\n\nYou can:\n\n- Add \"netdata\" user to \"nsd\" group:\n ```\n usermod -aG nsd netdata\n ```\n- Add Netdata to sudoers\n 1. Edit the sudoers file:\n ```\n visudo -f /etc/sudoers.d/netdata\n ```\n 2. Add the entry:\n ```\n Defaults:netdata !requiretty\n netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset\n ```\n\n > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/nsd.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/nsd.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| command | The command to run | nsd-control stats_noreset | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: 'nsd_local'\n command: 'nsd-control stats_noreset'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin nsd debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Name Server Daemon instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| nsd.queries | queries | queries/s |\n| nsd.zones | master, slave | zones |\n| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |\n| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |\n| nsd.transfer | NOTIFY, AXFR | queries/s |\n| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-nsd-Name_Server_Daemon", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/nsd/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "openldap", "monitored_instance": {"name": "OpenLDAP", "link": "https://www.openldap.org/", "categories": ["data-collection.authentication-and-authorization"], "icon_filename": "statsd.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["openldap", "RBAC", "Directory access"], "most_popular": false}, "overview": "# OpenLDAP\n\nPlugin: python.d.plugin\nModule: openldap\n\n## Overview\n\nThis collector monitors OpenLDAP metrics about connections, operations, referrals and more.\n\nStatistics are taken from the monitoring interface of a openLDAP (slapd) server\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector doesn't work until all the prerequisites are checked.\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure the openLDAP server to expose metrics to monitor it.\n\nFollow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.\n\n\n#### Install python-ldap module\n\nInstall python ldap module \n\n1. From pip package manager\n\n```bash\npip install ldap\n```\n\n2. With apt package manager (in most deb based distros)\n\n\n```bash\napt-get install python-ldap\n```\n\n\n3. With yum package manager (in most rpm based distros)\n\n\n```bash\nyum install python-ldap\n```\n\n\n#### Insert credentials for Netdata to access openLDAP server\n\nUse the `ldappasswd` utility to set a password for the username you will use.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/openldap.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/openldap.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| username | The bind user with right to access monitor statistics | | yes |\n| password | The password for the binded user | | yes |\n| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |\n| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |\n| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |\n| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |\n| cert_check | False if you want to ignore certificate check | True | yes |\n| timeout | Seconds to timeout if no connection exist | | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nusername: \"cn=admin\"\npassword: \"pass\"\nserver: \"localhost\"\nport: \"389\"\ncheck_cert: True\ntimeout: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin openldap debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per OpenLDAP instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| openldap.total_connections | connections | connections/s |\n| openldap.traffic_stats | sent | KiB/s |\n| openldap.operations_status | completed, initiated | ops/s |\n| openldap.referrals | sent | referrals/s |\n| openldap.entries | sent | entries/s |\n| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |\n| openldap.waiters | write, read | waiters/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-openldap-OpenLDAP", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "oracledb", "monitored_instance": {"name": "Oracle DB", "link": "https://docs.oracle.com/en/database/oracle/oracle-database/", "categories": ["data-collection.database-servers"], "icon_filename": "oracle.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "oracle", "data warehouse", "SQL"], "most_popular": false}, "overview": "# Oracle DB\n\nPlugin: python.d.plugin\nModule: oracledb\n\n## Overview\n\nThis collector monitors OracleDB database metrics about sessions, tables, memory and more.\n\nIt collects the metrics via the supported database client library\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nIn order for this collector to work, it needs a read-only user `netdata` in the RDBMS.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen the requirements are met, databases on the local host on port 1521 will be auto-detected\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Install the python-oracledb package\n\nYou can follow the official guide below to install the required package:\n\nSource: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html\n\n\n#### Create a read only user for netdata\n\nFollow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach\n\nConnect to your Oracle database with an administrative user and execute:\n\n```bash\nCREATE USER netdata IDENTIFIED BY ;\n\nGRANT CONNECT TO netdata;\nGRANT SELECT_CATALOG_ROLE TO netdata;\n```\n\n\n#### Edit the configuration\n\nEdit the configuration troubleshooting:\n\n1. Provide a valid user for the netdata collector to access the database\n2. Specify the network target this database is listening.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/oracledb.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/oracledb.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| user | The username for the user account. | no | yes |\n| password | The password for the user account. | no | yes |\n| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |\n| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |\n| protocol | one of the strings \"tcp\" or \"tcps\" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration, two jobs described for two databases.\n\n```yaml\nlocal:\n user: 'netdata'\n password: 'secret'\n server: 'localhost:1521'\n service: 'XE'\n protocol: 'tcps'\n\nremote:\n user: 'netdata'\n password: 'secret'\n server: '10.0.0.1:1521'\n service: 'XE'\n protocol: 'tcps'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin oracledb debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThese metrics refer to the entire monitored application.\n\n### Per Oracle DB instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| oracledb.session_count | total, active | sessions |\n| oracledb.session_limit_usage | usage | % |\n| oracledb.logons | logons | events/s |\n| oracledb.physical_disk_read_writes | reads, writes | events/s |\n| oracledb.sorts_on_disks | sorts | events/s |\n| oracledb.full_table_scans | full table scans | events/s |\n| oracledb.database_wait_time_ratio | wait time ratio | % |\n| oracledb.shared_pool_free_memory | free memory | % |\n| oracledb.in_memory_sorts_ratio | in-memory sorts | % |\n| oracledb.sql_service_response_time | time | seconds |\n| oracledb.user_rollbacks | rollbacks | events/s |\n| oracledb.enqueue_timeouts | enqueue timeouts | events/s |\n| oracledb.cache_hit_ration | buffer, cursor, library, row | % |\n| oracledb.global_cache_blocks | corrupted, lost | events/s |\n| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |\n| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |\n| oracledb.tablespace_size | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage | a dimension per active tablespace | KiB |\n| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |\n| oracledb.allocated_size | a dimension per active tablespace | B |\n| oracledb.allocated_usage | a dimension per active tablespace | B |\n| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |\n\n", "integration_type": "collector", "id": "python.d.plugin-oracledb-Oracle_DB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "pandas", "monitored_instance": {"name": "Pandas", "link": "https://pandas.pydata.org/", "categories": ["data-collection.generic-data-collection"], "icon_filename": "pandas.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["pandas", "python"], "most_popular": false}, "overview": "# Pandas\n\nPlugin: python.d.plugin\nModule: pandas\n\n## Overview\n\n[Pandas](https://pandas.pydata.org/) is a de-facto standard in reading and processing most types of structured data in Python.\nIf you have metrics appearing in a CSV, JSON, XML, HTML, or [other supported format](https://pandas.pydata.org/docs/user_guide/io.html),\neither locally or via some HTTP endpoint, you can easily ingest and present those metrics in Netdata, by leveraging the Pandas collector.\n\nThis collector can be used to collect pretty much anything that can be read by Pandas, and then processed by Pandas.\n\n\nThe collector uses [pandas](https://pandas.pydata.org/) to pull data and do pandas-based preprocessing, before feeding to Netdata.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector depends on some Python (Python 3 only) packages that can usually be installed via `pip` or `pip3`.\n\n```bash\nsudo pip install pandas requests\n```\n\nNote: If you would like to use [`pandas.read_sql`](https://pandas.pydata.org/docs/reference/api/pandas.read_sql.html) to query a database, you will need to install the below packages as well.\n\n```bash\nsudo pip install 'sqlalchemy<2.0' psycopg2-binary\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/pandas.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/pandas.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| chart_configs | an array of chart configuration dictionaries | [] | yes |\n| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |\n| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Temperature API Example\n\nexample pulling some hourly temperature data, a chart for today forecast (mean,min,max) and another chart for current.\n\n```yaml\ntemperature:\n name: \"temperature\"\n update_every: 5\n chart_configs:\n - name: \"temperature_forecast_by_city\"\n title: \"Temperature By City - Today Forecast\"\n family: \"temperature.today\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}&hourly=temperature_2m').json()['hourly']['temperature_2m']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.describe(); # get aggregate stats for each city;\n df.transpose()[['mean', 'max', 'min']].reset_index(); # just take mean, min, max;\n df.rename(columns={'index':'city'}); # some column renaming;\n df.pivot(columns='city').mean().to_frame().reset_index(); # force to be one row per city;\n df.rename(columns={0:'degrees'}); # some column renaming;\n pd.concat([df, df['city']+'_'+df['level_0']], axis=1); # add new column combining city and summary measurement label;\n df.rename(columns={0:'measurement'}); # some column renaming;\n df[['measurement', 'degrees']].set_index('measurement'); # just take two columns we want;\n df.sort_index(); # sort by city name;\n df.transpose(); # transpose so its just one wide row;\n - name: \"temperature_current_by_city\"\n title: \"Temperature By City - Current\"\n family: \"temperature.current\"\n context: \"pandas.temperature\"\n type: \"line\"\n units: \"Celsius\"\n df_steps: >\n pd.DataFrame.from_dict(\n {city: requests.get(f'https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lng}¤t_weather=true').json()['current_weather']\n for (city,lat,lng)\n in [\n ('dublin', 53.3441, -6.2675),\n ('athens', 37.9792, 23.7166),\n ('london', 51.5002, -0.1262),\n ('berlin', 52.5235, 13.4115),\n ('paris', 48.8567, 2.3510),\n ('madrid', 40.4167, -3.7033),\n ('new_york', 40.71, -74.01),\n ('los_angeles', 34.05, -118.24),\n ]\n }\n );\n df.transpose();\n df[['temperature']];\n df.transpose();\n\n```\n##### API CSV Example\n\nexample showing a read_csv from a url and some light pandas data wrangling.\n\n```yaml\nexample_csv:\n name: \"example_csv\"\n update_every: 2\n chart_configs:\n - name: \"london_system_cpu\"\n title: \"London System CPU - Ratios\"\n family: \"london_system_cpu\"\n context: \"pandas\"\n type: \"line\"\n units: \"n\"\n df_steps: >\n pd.read_csv('https://london.my-netdata.io/api/v1/data?chart=system.cpu&format=csv&after=-60', storage_options={'User-Agent': 'netdata'});\n df.drop('time', axis=1);\n df.mean().to_frame().transpose();\n df.apply(lambda row: (row.user / row.system), axis = 1).to_frame();\n df.rename(columns={0:'average_user_system_ratio'});\n df*100;\n\n```\n##### API JSON Example\n\nexample showing a read_json from a url and some light pandas data wrangling.\n\n```yaml\nexample_json:\n name: \"example_json\"\n update_every: 2\n chart_configs:\n - name: \"london_system_net\"\n title: \"London System Net - Total Bandwidth\"\n family: \"london_system_net\"\n context: \"pandas\"\n type: \"area\"\n units: \"kilobits/s\"\n df_steps: >\n pd.DataFrame(requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['data'], columns=requests.get('https://london.my-netdata.io/api/v1/data?chart=system.net&format=json&after=-1').json()['labels']);\n df.drop('time', axis=1);\n abs(df);\n df.sum(axis=1).to_frame();\n df.rename(columns={0:'total_bandwidth'});\n\n```\n##### XML Example\n\nexample showing a read_xml from a url and some light pandas data wrangling.\n\n```yaml\nexample_xml:\n name: \"example_xml\"\n update_every: 2\n line_sep: \"|\"\n chart_configs:\n - name: \"temperature_forcast\"\n title: \"Temperature Forecast\"\n family: \"temp\"\n context: \"pandas.temp\"\n type: \"line\"\n units: \"celsius\"\n df_steps: >\n pd.read_xml('http://metwdb-openaccess.ichec.ie/metno-wdb2ts/locationforecast?lat=54.7210798611;long=-8.7237392806', xpath='./product/time[1]/location/temperature', parser='etree')|\n df.rename(columns={'value': 'dublin'})|\n df[['dublin']]|\n\n```\n##### SQL Example\n\nexample showing a read_sql from a postgres database using sqlalchemy.\n\n```yaml\nsql:\n name: \"sql\"\n update_every: 5\n chart_configs:\n - name: \"sql\"\n title: \"SQL Example\"\n family: \"sql.example\"\n context: \"example\"\n type: \"line\"\n units: \"percent\"\n df_steps: >\n pd.read_sql_query(\n sql='\\\n select \\\n random()*100 as metric_1, \\\n random()*100 as metric_2 \\\n ',\n con=create_engine('postgresql://localhost/postgres?user=netdata&password=netdata')\n );\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin pandas debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThis collector is expecting one row in the final pandas DataFrame. It is that first row that will be taken\nas the most recent values for each dimension on each chart using (`df.to_dict(orient='records')[0]`).\nSee [pd.to_dict()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_dict.html).\"\n\n\n### Per Pandas instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n\n", "integration_type": "collector", "id": "python.d.plugin-pandas-Pandas", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/pandas/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "postfix", "monitored_instance": {"name": "Postfix", "link": "https://www.postfix.org/", "categories": ["data-collection.mail-servers"], "icon_filename": "postfix.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["postfix", "mail", "mail server"], "most_popular": false}, "overview": "# Postfix\n\nPlugin: python.d.plugin\nModule: postfix\n\n## Overview\n\nKeep an eye on Postfix metrics for efficient mail server operations. \nImprove your mail server performance with Netdata's real-time metrics and built-in alerts.\n\n\nMonitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nPostfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.\nSee the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector executes `postqueue -p` to get Postfix queue statistics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThere is no configuration file.\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin postfix debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Postfix instance\n\nThese metrics refer to the entire monitored application.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| postfix.qemails | emails | emails |\n| postfix.qsize | size | KiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-postfix-Postfix", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/postfix/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "puppet", "monitored_instance": {"name": "Puppet", "link": "https://www.puppet.com/", "categories": ["data-collection.ci-cd-systems"], "icon_filename": "puppet.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["puppet", "jvm heap"], "most_popular": false}, "overview": "# Puppet\n\nPlugin: python.d.plugin\nModule: puppet\n\n## Overview\n\nThis collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'\n\n\nIt uses Puppet's metrics API endpoint to gather the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/puppet.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/puppet.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n> Notes:\n> - Exact Fully Qualified Domain Name of the node should be used.\n> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.\n> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |\n| tls_verify | Control HTTPS server certificate verification. | False | no |\n| tls_ca_file | Optional CA (bundle) file to use | | no |\n| tls_cert_file | Optional client certificate file | | no |\n| tls_key_file | Optional client key file | | no |\n| update_every | Sets the default data collection frequency. | 30 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\npuppetserver:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\n```\n##### TLS Certificate\n\nAn example using a TLS certificate\n\n```yaml\npuppetdb:\n url: 'https://fqdn.example.com:8081'\n tls_cert_file: /path/to/client.crt\n tls_key_file: /path/to/client.key\n autodetection_retry: 1\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\npuppetserver1:\n url: 'https://fqdn.example.com:8140'\n autodetection_retry: 1\n\npuppetserver2:\n url: 'https://fqdn.example2.com:8140'\n autodetection_retry: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin puppet debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Puppet instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| puppet.jvm | committed, used | MiB |\n| puppet.jvm | committed, used | MiB |\n| puppet.cpu | execution, GC | percentage |\n| puppet.fdopen | used | descriptors |\n\n", "integration_type": "collector", "id": "python.d.plugin-puppet-Puppet", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/puppet/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "rethinkdbs", "monitored_instance": {"name": "RethinkDB", "link": "https://rethinkdb.com/", "categories": ["data-collection.database-servers"], "icon_filename": "rethinkdb.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["rethinkdb", "database", "db"], "most_popular": false}, "overview": "# RethinkDB\n\nPlugin: python.d.plugin\nModule: rethinkdbs\n\n## Overview\n\nThis collector monitors metrics about RethinkDB clusters and database servers.\n\nIt uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nWhen no configuration file is found, the collector tries to connect to 127.0.0.1:28015.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe collector requires the `rethinkdb` python module to be installed.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/rethinkdbs.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/rethinkdbs.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | Hostname or ip of the RethinkDB server. | localhost | no |\n| port | Port to connect to the RethinkDB server. | 28015 | no |\n| user | The username to use to connect to the RethinkDB server. | admin | no |\n| password | The password to use to connect to the RethinkDB server. | | no |\n| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |\n\n#### Examples\n\n##### Local RethinkDB server\n\nAn example of a configuration for a local RethinkDB server\n\n```yaml\nlocalhost:\n name: 'local'\n host: '127.0.0.1'\n port: 28015\n user: \"user\"\n password: \"pass\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin rethinkdbs debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RethinkDB instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.cluster_connected_servers | connected, missing | servers |\n| rethinkdb.cluster_clients_active | active | clients |\n| rethinkdb.cluster_queries | queries | queries/s |\n| rethinkdb.cluster_documents | reads, writes | documents/s |\n\n### Per database server\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| rethinkdb.client_connections | connections | connections |\n| rethinkdb.clients_active | active | clients |\n| rethinkdb.queries | queries | queries/s |\n| rethinkdb.documents | reads, writes | documents/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-rethinkdbs-RethinkDB", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "retroshare", "monitored_instance": {"name": "RetroShare", "link": "https://retroshare.cc/", "categories": ["data-collection.media-streaming-servers"], "icon_filename": "retroshare.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["retroshare", "p2p"], "most_popular": false}, "overview": "# RetroShare\n\nPlugin: python.d.plugin\nModule: retroshare\n\n## Overview\n\nThis collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.\n\nIt connects to the RetroShare web interface to gather metrics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### RetroShare web interface\n\nRetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/retroshare.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/retroshare.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |\n\n#### Examples\n\n##### Local RetroShare Web UI\n\nA basic configuration for a RetroShare server running on localhost.\n\n```yaml\nlocalhost:\n name: 'local retroshare'\n url: 'http://localhost:9090'\n\n```\n##### Remote RetroShare Web UI\n\nA basic configuration for a remote RetroShare server.\n\n```yaml\nremote:\n name: 'remote retroshare'\n url: 'http://1.2.3.4:9090'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin retroshare debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RetroShare instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| retroshare.bandwidth | Upload, Download | kilobits/s |\n| retroshare.peers | All friends, Connected friends | peers |\n| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |\n\n", "integration_type": "collector", "id": "python.d.plugin-retroshare-RetroShare", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/retroshare/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "riakkv", "monitored_instance": {"name": "RiakKV", "link": "https://riak.com/products/riak-kv/index.html", "categories": ["data-collection.database-servers"], "icon_filename": "riak.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["database", "nosql", "big data"], "most_popular": false}, "overview": "# RiakKV\n\nPlugin: python.d.plugin\nModule: riakkv\n\n## Overview\n\nThis collector monitors RiakKV metrics about throughput, latency, resources and more.'\n\n\nThis collector reads the database stats from the `/stats` endpoint.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure RiakKV to enable /stats endpoint\n\nYou can follow the RiakKV configuration reference documentation for how to enable this.\n\nSource : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/riakkv.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/riakkv.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The url of the server | no | yes |\n\n#### Examples\n\n##### Basic (default)\n\nA basic example configuration per job\n\n```yaml\nlocal:\nurl: 'http://localhost:8098/stats'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal:\n url: 'http://localhost:8098/stats'\n\nremote:\n url: 'http://192.0.2.1:8098/stats'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin riakkv debug trace\n ```\n\n", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |\n| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |\n| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |\n| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |\n| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per RiakKV instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| riak.kv.throughput | gets, puts | operations/s |\n| riak.dt.vnode_updates | counters, sets, maps | operations/s |\n| riak.search | queries | queries/s |\n| riak.search.documents | indexed | documents/s |\n| riak.consistent.operations | gets, puts | operations/s |\n| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |\n| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |\n| riak.search.latency.query | median, min, 95, 99, 999, max | ms |\n| riak.search.latency.index | median, min, 95, 99, 999, max | ms |\n| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |\n| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |\n| riak.vm | processes | total |\n| riak.vm.memory.processes | allocated, used | MB |\n| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |\n| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |\n| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |\n| riak.search.index | errors | errors |\n| riak.core.protobuf_connections | active | connections |\n| riak.core.repairs | read | repairs |\n| riak.core.fsm_active | get, put, secondary index, list keys | fsms |\n| riak.core.fsm_rejected | get, put | fsms |\n| riak.search.index | bad_entry, extract_fail | writes |\n\n", "integration_type": "collector", "id": "python.d.plugin-riakkv-RiakKV", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/riakkv/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "samba", "monitored_instance": {"name": "Samba", "link": "https://www.samba.org/samba/", "categories": ["data-collection.storage-mount-points-and-filesystems"], "icon_filename": "samba.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["samba", "file sharing"], "most_popular": false}, "overview": "# Samba\n\nPlugin: python.d.plugin\nModule: samba\n\n## Overview\n\nThis collector monitors the performance metrics of Samba file sharing.\n\nIt is using the `smbstatus` command-line tool.\n\nExecuted commands:\n\n- `sudo -n smbstatus -P`\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nAfter all the permissions are satisfied, the `smbstatus -P` binary is executed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the samba collector\n\nThe `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.\n\n```bash\ncd /etc/netdata # Replace this path with your Netdata config directory, if different\nsudo ./edit-config python.d.conf\n```\nChange the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.\n\n\n#### Permissions and programs\n\nTo run the collector you need:\n\n- `smbstatus` program\n- `sudo` program\n- `smbd` must be compiled with profiling enabled\n- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`\n\nThe module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.\n\n- add to your `/etc/sudoers` file:\n\n `which smbstatus` shows the full path to the binary.\n\n ```bash\n netdata ALL=(root) NOPASSWD: /path/to/smbstatus\n ```\n\n- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)\n\n The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.\n\n\n As the `root` user, do the following:\n\n ```cmd\n mkdir /etc/systemd/system/netdata.service.d\n echo -e '[Service]\\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf\n systemctl daemon-reload\n systemctl restart netdata.service\n ```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/samba.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/samba.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration.\n\n```yaml\nmy_job_name:\n name: my_name\n update_every: 1\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin samba debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Samba instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| syscall.rw | sendfile, recvfile | KiB/s |\n| smb2.rw | readout, writein, readin, writeout | KiB/s |\n| smb2.create_close | create, close | operations/s |\n| smb2.get_set_info | getinfo, setinfo | operations/s |\n| smb2.find | find | operations/s |\n| smb2.notify | notify | operations/s |\n| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-samba-Samba", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "sensors", "monitored_instance": {"name": "Linux Sensors (lm-sensors)", "link": "https://hwmon.wiki.kernel.org/lm_sensors", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "microchip.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["sensors", "temperature", "voltage", "current", "power", "fan", "energy", "humidity"], "most_popular": false}, "overview": "# Linux Sensors (lm-sensors)\n\nPlugin: python.d.plugin\nModule: sensors\n\n## Overview\n\nExamine Linux Sensors metrics with Netdata for insights into hardware health and performance.\n\nEnhance your system's reliability with real-time hardware health insights.\n\n\nReads system sensors information (temperature, voltage, electric current, power, etc.) via [lm-sensors](https://hwmon.wiki.kernel.org/lm_sensors).\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe following type of sensors are auto-detected:\n- temperature - fan - voltage - current - power - energy - humidity\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/sensors.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/sensors.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\ntypes:\n - temperature\n - fan\n - voltage\n - current\n - power\n - energy\n - humidity\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `sensors` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin sensors debug trace\n ```\n\n### lm-sensors doesn't work on your device\n\n\n\n### ACPI ring buffer errors are printed\n\n\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per chip\n\nMetrics related to chips. Each chip provides a set of the following metrics, each having the chip name in the metric name as reported by `sensors -u`.\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| sensors.temperature | a dimension per sensor | Celsius |\n| sensors.voltage | a dimension per sensor | Volts |\n| sensors.current | a dimension per sensor | Ampere |\n| sensors.power | a dimension per sensor | Watt |\n| sensors.fan | a dimension per sensor | Rotations/min |\n| sensors.energy | a dimension per sensor | Joule |\n| sensors.humidity | a dimension per sensor | Percent |\n\n", "integration_type": "collector", "id": "python.d.plugin-sensors-Linux_Sensors_(lm-sensors)", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/sensors/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "smartd_log", "monitored_instance": {"name": "S.M.A.R.T.", "link": "https://linux.die.net/man/8/smartd", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "smart.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["smart", "S.M.A.R.T.", "SCSI devices", "ATA devices"], "most_popular": false}, "overview": "# S.M.A.R.T.\n\nPlugin: python.d.plugin\nModule: smartd_log\n\n## Overview\n\nThis collector monitors HDD/SSD S.M.A.R.T. metrics about drive health and performance.\n\n\nIt reads `smartd` log files to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nUpon satisfying the prerequisites, the collector will auto-detect metrics if written in either `/var/log/smartd/` or `/var/lib/smartmontools/`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure `smartd` to write attribute information to files.\n\n`smartd` must be running with `-A` option to write `smartd` attribute information to files.\n\nFor this you need to set `smartd_opts` (or `SMARTD_ARGS`, check _smartd.service_ content) in `/etc/default/smartmontools`:\n\n```\n# dump smartd attrs info every 600 seconds\nsmartd_opts=\"-A /var/log/smartd/ -i 600\"\n```\n\nYou may need to create the smartd directory before smartd will write to it: \n\n```sh\nmkdir -p /var/log/smartd\n```\n\nOtherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontools` (default location). See also for more info on the `-A --attributelog=PREFIX` command.\n\n`smartd` appends logs at every run. It's strongly recommended to use `logrotate` for smartd files.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/smartd_log.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/smartd_log.conf\n```\n#### Options\n\nThis particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| log_path | path to smartd log files. | /var/log/smartd | yes |\n| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |\n| age | Time in minutes since the last dump to file. | 30 | no |\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\ncustom:\n name: smartd_log\n log_path: '/var/log/smartd/'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `smartd_log` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin smartd_log debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nThe metrics listed below are split in terms of availability on device type, SCSI or ATA.\n\n### Per S.M.A.R.T. instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit | SCSI | ATA |\n|:------|:----------|:----|:---:|:---:|\n| smartd_log.read_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.seek_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.soft_read_error_rate | a dimension per device | errors | | \u2022 |\n| smartd_log.write_error_rate | a dimension per device | value | | \u2022 |\n| smartd_log.read_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.read_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.write_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_err_corrected | a dimension per device | errors | \u2022 | |\n| smartd_log.verify_total_unc_errors | a dimension per device | errors | \u2022 | |\n| smartd_log.sata_interface_downshift | a dimension per device | events | | \u2022 |\n| smartd_log.udma_crc_error_count | a dimension per device | errors | | \u2022 |\n| smartd_log.throughput_performance | a dimension per device | value | | \u2022 |\n| smartd_log.seek_time_performance | a dimension per device | value | | \u2022 |\n| smartd_log.start_stop_count | a dimension per device | events | | \u2022 |\n| smartd_log.power_on_hours_count | a dimension per device | hours | | \u2022 |\n| smartd_log.power_cycle_count | a dimension per device | events | | \u2022 |\n| smartd_log.unexpected_power_loss | a dimension per device | events | | \u2022 |\n| smartd_log.spin_up_time | a dimension per device | ms | | \u2022 |\n| smartd_log.spin_up_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.calibration_retries | a dimension per device | retries | | \u2022 |\n| smartd_log.airflow_temperature_celsius | a dimension per device | celsius | | \u2022 |\n| smartd_log.temperature_celsius | a dimension per device | celsius | \u2022 | \u2022 |\n| smartd_log.reallocated_sectors_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.reserved_block_count | a dimension per device | percentage | | \u2022 |\n| smartd_log.program_fail_count | a dimension per device | errors | | \u2022 |\n| smartd_log.erase_fail_count | a dimension per device | failures | | \u2022 |\n| smartd_log.wear_leveller_worst_case_erase_count | a dimension per device | erases | | \u2022 |\n| smartd_log.unused_reserved_nand_blocks | a dimension per device | blocks | | \u2022 |\n| smartd_log.reallocation_event_count | a dimension per device | events | | \u2022 |\n| smartd_log.current_pending_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.offline_uncorrectable_sector_count | a dimension per device | sectors | | \u2022 |\n| smartd_log.percent_lifetime_used | a dimension per device | percentage | | \u2022 |\n| smartd_log.media_wearout_indicator | a dimension per device | percentage | | \u2022 |\n| smartd_log.nand_writes_1gib | a dimension per device | GiB | | \u2022 |\n\n", "integration_type": "collector", "id": "python.d.plugin-smartd_log-S.M.A.R.T.", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/smartd_log/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "spigotmc", "monitored_instance": {"name": "SpigotMC", "link": "", "categories": ["data-collection.gaming"], "icon_filename": "spigot.jfif"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["minecraft server", "spigotmc server", "spigot"], "most_popular": false}, "overview": "# SpigotMC\n\nPlugin: python.d.plugin\nModule: spigotmc\n\n## Overview\n\nThis collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.\n\n\nIt sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.\n\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the Remote Console Protocol\n\nUnder your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.\n\nThis will allow the Server to listen and respond to queries over the rcon protocol.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/spigotmc.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/spigotmc.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| host | The host's IP to connect to. | localhost | yes |\n| port | The port the remote console is listening on. | 25575 | yes |\n| password | Remote console password if any. | | no |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nlocal:\n name: local_server\n url: 127.0.0.1\n port: 25575\n\n```\n##### Basic Authentication\n\nAn example using basic password for authentication with the remote console.\n\n```yaml\nlocal:\n name: local_server_pass\n url: 127.0.0.1\n port: 25575\n password: 'foobar'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal_server:\n name : my_local_server\n url : 127.0.0.1\n port: 25575\n\nremote_server:\n name : another_remote_server\n url : 192.0.2.1\n port: 25575\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin spigotmc debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per SpigotMC instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |\n| spigotmc.users | Users | users |\n| spigotmc.mem | used, allocated, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-spigotmc-SpigotMC", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "squid", "monitored_instance": {"name": "Squid", "link": "http://www.squid-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "squid.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["squid", "web delivery", "squid caching proxy"], "most_popular": false}, "overview": "# Squid\n\nPlugin: python.d.plugin\nModule: squid\n\n## Overview\n\nThis collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.\n\n\nIt collects metrics from the endpoint where Squid exposes its `counters` data.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Configure Squid's Cache Manager\n\nTake a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/squid.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/squid.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 1 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |\n| host | The host to connect to. | | yes |\n| port | The port to connect to. | | yes |\n| request | The URL to request from Squid. | | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\nexample_job_name:\n name: 'local'\n host: 'localhost'\n port: 3128\n request: 'cache_object://localhost:3128/counters'\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal_job:\n name: 'local'\n host: '127.0.0.1'\n port: 3128\n request: 'cache_object://127.0.0.1:3128/counters'\n\nremote_job:\n name: 'remote'\n host: '192.0.2.1'\n port: 3128\n request: 'cache_object://192.0.2.1:3128/counters'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin squid debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Squid instance\n\nThese metrics refer to each monitored Squid instance.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| squid.clients_net | in, out, hits | kilobits/s |\n| squid.clients_requests | requests, hits, errors | requests/s |\n| squid.servers_net | in, out | kilobits/s |\n| squid.servers_requests | requests, errors | requests/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-squid-Squid", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/squid/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tomcat", "monitored_instance": {"name": "Tomcat", "link": "https://tomcat.apache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "tomcat.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["apache", "tomcat", "webserver", "websocket", "jakarta", "javaEE"], "most_popular": false}, "overview": "# Tomcat\n\nPlugin: python.d.plugin\nModule: tomcat\n\n## Overview\n\nThis collector monitors Tomcat metrics about bandwidth, processing time, threads and more.\n\n\nIt parses the information provided by the http endpoint of the `/manager/status` in XML format\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nYou need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.\n\n#### Limits\n\nThis module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create a read-only `netdata` user, to monitor the `/status` endpoint.\n\nThis is necessary for configuring the collector.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tomcat.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tomcat.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |\n| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |\n| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |\n| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration\n\n```yaml\nlocalhost:\n name : 'local'\n url : 'http://localhost:8080/manager/status?XML=true'\n\n```\n##### Using an IPv4 endpoint\n\nA typical configuration using an IPv4 endpoint\n\n```yaml\nlocal_ipv4:\n name : 'local'\n url : 'http://127.0.0.1:8080/manager/status?XML=true'\n\n```\n##### Using an IPv6 endpoint\n\nA typical configuration using an IPv6 endpoint\n\n```yaml\nlocal_ipv6:\n name : 'local'\n url : 'http://[::1]:8080/manager/status?XML=true'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tomcat debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tomcat instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tomcat.accesses | accesses, errors | requests/s |\n| tomcat.bandwidth | sent, received | KiB/s |\n| tomcat.processing_time | processing time | seconds |\n| tomcat.threads | current, busy | current threads |\n| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |\n| tomcat.jvm_eden | used, committed, max | MiB |\n| tomcat.jvm_survivor | used, committed, max | MiB |\n| tomcat.jvm_tenured | used, committed, max | MiB |\n\n", "integration_type": "collector", "id": "python.d.plugin-tomcat-Tomcat", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tomcat/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "tor", "monitored_instance": {"name": "Tor", "link": "https://www.torproject.org/", "categories": ["data-collection.vpns"], "icon_filename": "tor.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["tor", "traffic", "vpn"], "most_popular": false}, "overview": "# Tor\n\nPlugin: python.d.plugin\nModule: tor\n\n## Overview\n\nThis collector monitors Tor bandwidth traffic .\n\nIt connects to the Tor control port to collect traffic statistics.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nIf no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required python module\n\nThe `stem` python library needs to be installed.\n\n\n#### Required Tor configuration\n\nAdd to /etc/tor/torrc:\n\nControlPort 9051\n\nFor more options please read the manual.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/tor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/tor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| control_addr | Tor control IP address | 127.0.0.1 | no |\n| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |\n| password | Tor control password | | no |\n\n#### Examples\n\n##### Local TCP\n\nA basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`\n\n```yaml\nlocal_tcp:\n name: 'local'\n control_port: 9051\n password: # if required\n\n```\n##### Local socket\n\nA basic local socket configuration\n\n```yaml\nlocal_socket:\n name: 'local'\n control_port: '/var/run/tor/control'\n password: # if required\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin tor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Tor instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tor.traffic | read, write | KiB/s |\n\n", "integration_type": "collector", "id": "python.d.plugin-tor-Tor", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/tor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "uwsgi", "monitored_instance": {"name": "uWSGI", "link": "https://github.com/unbit/uwsgi/tree/2.0.21", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "uwsgi.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["application server", "python", "web applications"], "most_popular": false}, "overview": "# uWSGI\n\nPlugin: python.d.plugin\nModule: uwsgi\n\n## Overview\n\nThis collector monitors uWSGI metrics about requests, workers, memory and more.\n\nIt collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Enable the uWSGI Stats server\n\nMake sure that you uWSGI exposes it's metrics via a Stats server.\n\nSource: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/uwsgi.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/uwsgi.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |\n| socket | The 'path/to/uwsgistats.sock' | no | no |\n| host | The host to connect to | no | no |\n| port | The port to connect to | no | no |\n\n#### Examples\n\n##### Basic (default out-of-the-box)\n\nA basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.\n\n```yaml\nsocket:\n name : 'local'\n socket : '/tmp/stats.socket'\n\nlocalhost:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nlocalipv4:\n name : 'local'\n host : '127.0.0.1'\n port : 1717\n\nlocalipv6:\n name : 'local'\n host : '::1'\n port : 1717\n\n```\n##### Multi-instance\n\n> **Note**: When you define multiple jobs, their names must be unique.\n\nCollecting metrics from local and remote instances.\n\n\n```yaml\nlocal:\n name : 'local'\n host : 'localhost'\n port : 1717\n\nremote:\n name : 'remote'\n host : '192.0.2.1'\n port : 1717\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin uwsgi debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per uWSGI instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| uwsgi.requests | a dimension per worker | requests/s |\n| uwsgi.tx | a dimension per worker | KiB/s |\n| uwsgi.avg_rt | a dimension per worker | milliseconds |\n| uwsgi.memory_rss | a dimension per worker | MiB |\n| uwsgi.memory_vsz | a dimension per worker | MiB |\n| uwsgi.exceptions | exceptions | exceptions |\n| uwsgi.harakiris | harakiris | harakiris |\n| uwsgi.respawns | respawns | respawns |\n\n", "integration_type": "collector", "id": "python.d.plugin-uwsgi-uWSGI", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "varnish", "monitored_instance": {"name": "Varnish", "link": "https://varnish-cache.org/", "categories": ["data-collection.web-servers-and-web-proxies"], "icon_filename": "varnish.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["varnish", "varnishstat", "varnishd", "cache", "web server", "web cache"], "most_popular": false}, "overview": "# Varnish\n\nPlugin: python.d.plugin\nModule: varnish\n\n## Overview\n\nThis collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.\n\nNote that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.\n\n\nIt uses the `varnishstat` tool in order to collect the metrics.\n\n\nThis collector is supported on all platforms.\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\n`netdata` user must be a member of the `varnish` group.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nBy default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Provide the necessary permissions\n\nIn order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:\n\n```\nusermod -aG varnish netdata\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/varnish.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/varnish.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |\n| update_every | Sets the default data collection frequency. | 10 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n\n#### Examples\n\n##### Basic\n\nAn example configuration.\n\n```yaml\njob_name:\n instance_name: ''\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin varnish debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Varnish instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.session_connection | accepted, dropped | connections/s |\n| varnish.client_requests | received | requests/s |\n| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |\n| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |\n| varnish.cached_objects_expired | objects | expired/s |\n| varnish.cached_objects_nuked | objects | nuked/s |\n| varnish.threads_total | None | number |\n| varnish.threads_statistics | created, failed, limited | threads/s |\n| varnish.threads_queue_len | in queue | requests |\n| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |\n| varnish.backend_requests | sent | requests/s |\n| varnish.esi_statistics | errors, warnings | problems/s |\n| varnish.memory_usage | free, allocated | MiB |\n| varnish.uptime | uptime | seconds |\n\n### Per Backend\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.backend | header, body | kilobits/s |\n\n### Per Storage\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| varnish.storage_usage | free, allocated | KiB |\n| varnish.storage_alloc_objs | allocated | objects |\n\n", "integration_type": "collector", "id": "python.d.plugin-varnish-Varnish", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "w1sensor", "monitored_instance": {"name": "1-Wire Sensors", "link": "https://www.analog.com/en/product-category/1wire-temperature-sensors.html", "categories": ["data-collection.hardware-devices-and-sensors"], "icon_filename": "1-wire.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["temperature", "sensor", "1-wire"], "most_popular": false}, "overview": "# 1-Wire Sensors\n\nPlugin: python.d.plugin\nModule: w1sensor\n\n## Overview\n\nMonitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.\n\nThe collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThe collector will try to auto detect available 1-Wire devices.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Required Linux kernel modules\n\nMake sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/w1sensor.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/w1sensor.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |\n| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |\n\n#### Examples\n\n##### Provide human readable names\n\nAssociate two 1-Wire identifiers with human readable names.\n\n```yaml\nsensors:\n name_00000022276e: 'Machine room'\n name_00000022298f: 'Rack 12'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin w1sensor debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per 1-Wire Sensors instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| w1sensor.temp | a dimension per sensor | Celsius |\n\n", "integration_type": "collector", "id": "python.d.plugin-w1sensor-1-Wire_Sensors", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "python.d.plugin", "module_name": "zscores", "monitored_instance": {"name": "python.d zscores", "link": "https://en.wikipedia.org/wiki/Standard_score", "categories": ["data-collection.other"], "icon_filename": ""}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["zscore", "z-score", "standard score", "standard deviation", "anomaly detection", "statistical anomaly detection"], "most_popular": false}, "overview": "# python.d zscores\n\nPlugin: python.d.plugin\nModule: zscores\n\n## Overview\n\nBy using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.\n\n\nThis collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/src/web/api/README.md) to get the `mean` and `stddev`\nfor each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).\n\nFor each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over\ntime (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Python Requirements\n\nThis collector will only work with Python 3 and requires the below packages be installed.\n\n```bash\n# become netdata user\nsudo su -s /bin/bash netdata\n# install required packages\npip3 install numpy pandas requests netdata-pandas==0.0.38\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `python.d/zscores.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config python.d/zscores.conf\n```\n#### Options\n\nThere are 2 sections:\n\n* Global variables\n* One or more JOBS that can define multiple different instances to monitor.\n\nThe following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.\n\nAdditionally, the following collapsed table contains all the options that can be configured inside a JOB definition.\n\nEvery configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| charts_regex | what charts to pull data for - A regex like `system\\..*/` or `system\\..*/apps.cpu/apps.mem` etc. | system\\..* | yes |\n| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |\n| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |\n| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |\n| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |\n| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |\n| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |\n| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |\n| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |\n| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |\n| update_every | Sets the default data collection frequency. | 5 | no |\n| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |\n| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |\n| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |\n\n#### Examples\n\n##### Default\n\nDefault configuration.\n\n```yaml\nlocal:\n name: 'local'\n host: '127.0.0.1:19999'\n charts_regex: 'system\\..*'\n charts_to_exclude: 'system.uptime'\n train_secs: 14400\n offset_secs: 300\n train_every_n: 900\n z_smooth_n: 15\n z_clip: 10\n z_abs: 'true'\n burn_in: 2\n mode: 'per_chart'\n per_chart_agg: 'mean'\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Debug Mode\n\nTo troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output\nshould give you clues as to why the collector isn't working.\n\n- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on\n your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.\n\n ```bash\n cd /usr/libexec/netdata/plugins.d/\n ```\n\n- Switch to the `netdata` user.\n\n ```bash\n sudo -u netdata -s\n ```\n\n- Run the `python.d.plugin` to debug the collector:\n\n ```bash\n ./python.d.plugin zscores debug trace\n ```\n\n", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per python.d zscores instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| zscores.z | a dimension per chart or dimension | z |\n| zscores.3stddev | a dimension per chart or dimension | count |\n\n", "integration_type": "collector", "id": "python.d.plugin-zscores-python.d_zscores", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "slabinfo.plugin", "module_name": "slabinfo.plugin", "monitored_instance": {"name": "Linux kernel SLAB allocator statistics", "link": "https://kernel.org/", "categories": ["data-collection.linux-systems.kernel-metrics"], "icon_filename": "linuxserver.svg"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": ["linux kernel", "slab", "slub", "slob", "slabinfo"], "most_popular": false}, "overview": "# Linux kernel SLAB allocator statistics\n\nPlugin: slabinfo.plugin\nModule: slabinfo.plugin\n\n## Overview\n\nCollects metrics on kernel SLAB cache utilization to monitor the low-level performance impact of workloads in the kernel.\n\n\nThe plugin parses `/proc/slabinfo`\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector only supports collecting metrics from a single instance of this integration.\n\nThis integration requires read access to `/proc/slabinfo`, which is accessible only to the root user by default. Netdata uses Linux Capabilities to give the plugin access to this file. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions sVko that it runs as root.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nDue to the large number of metrics generated by this integration, it is disabled by default and must be manually enabled inside `/etc/netdata/netdata.conf`\n\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Minimum setup\n\nIf you installed `netdata` using a package manager, it is also necessary to install the package `netdata-plugin-slabinfo`.\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugins]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Enable plugin | As described above plugin is disabled by default, this option is used to enable plugin. | no | yes |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\nSLAB cache utilization metrics for the whole system.\n\n### Per Linux kernel SLAB allocator statistics instance\n\n\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| mem.slabmemory | a dimension per cache | B |\n| mem.slabfilling | a dimension per cache | % |\n| mem.slabwaste | a dimension per cache | B |\n\n", "integration_type": "collector", "id": "slabinfo.plugin-slabinfo.plugin-Linux_kernel_SLAB_allocator_statistics", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/slabinfo.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "tc.plugin", "module_name": "tc.plugin", "monitored_instance": {"name": "tc QoS classes", "link": "https://wiki.linuxfoundation.org/networking/iproute2", "categories": ["data-collection.linux-systems.network-metrics"], "icon_filename": "netdata.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# tc QoS classes\n\nPlugin: tc.plugin\nModule: tc.plugin\n\n## Overview\n\nExamine tc metrics to gain insights into Linux traffic control operations. Study packet flow rates, queue lengths, and drop rates to optimize network traffic flow.\n\nThe plugin uses `tc` command to collect information about Traffic control.\n\nThis collector is only supported on the following platforms:\n\n- Linux\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs to access command `tc` to get the necessary metrics. To achieve this netdata modifies permission of file `/usr/libexec/netdata/plugins.d/tc-qos-helper.sh`.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Create `tc-qos-helper.conf`\n\nIn order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:\n\n```conf\ntc_show=\"class\"\n```\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:tc]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| script to run to get tc values | Path to script `tc-qos-helper.sh` | usr/libexec/netdata/plugins.d/tc-qos-helper.s | no |\n| enable show all classes and qdiscs for all interfaces | yes/no flag to control what data is presented. | yes | no |\n\n#### Examples\n\n##### Basic\n\nA basic example configuration using classes defined in `/etc/iproute2/tc_cls`.\n\nAn example of class IDs mapped to names in that file can be:\n\n```conf\n2:1 Standard\n2:8 LowPriorityData\n2:10 HighThroughputData\n2:16 OAM\n2:18 LowLatencyData\n2:24 BroadcastVideo\n2:26 MultimediaStreaming\n2:32 RealTimeInteractive\n2:34 MultimediaConferencing\n2:40 Signalling\n2:46 Telephony\n2:48 NetworkControl\n```\n\nYou can read more about setting up the tc rules in rc.local in this [GitHub issue](https://github.com/netdata/netdata/issues/4563#issuecomment-455711973).\n\n\n```yaml\n[plugin:tc]\n script to run to get tc values = /usr/libexec/netdata/plugins.d/tc-qos-helper.sh\n enable show all classes and qdiscs for all interfaces = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per network device direction\n\nMetrics related to QoS network device directions. Each direction (in/out) produces its own set of the following metrics.\n\nLabels:\n\n| Label | Description |\n|:-----------|:----------------|\n| device | The network interface. |\n| device_name | The network interface name |\n| group | The device family |\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| tc.qos | a dimension per class | kilobits/s |\n| tc.qos_packets | a dimension per class | packets/s |\n| tc.qos_dropped | a dimension per class | packets/s |\n| tc.qos_tokens | a dimension per class | tokens |\n| tc.qos_ctokens | a dimension per class | ctokens |\n\n", "integration_type": "collector", "id": "tc.plugin-tc.plugin-tc_QoS_classes", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/tc.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "timex.plugin", "module_name": "timex.plugin", "monitored_instance": {"name": "Timex", "link": "", "categories": ["data-collection.system-clock-and-ntp"], "icon_filename": "syslog.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Timex\n\nPlugin: timex.plugin\nModule: timex.plugin\n\n## Overview\n\nExamine Timex metrics to gain insights into system clock operations. Study time sync status, clock drift, and adjustments to ensure accurate system timekeeping.\n\nIt uses system call adjtimex on Linux and ntp_adjtime on FreeBSD or Mac to monitor the system kernel clock synchronization state.\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis integration doesn't support auto-detection.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\nNo action required.\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:timex]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\nAt least one option ('clock synchronization state', 'time offset') needs to be enabled for this collector to run.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n| clock synchronization state | Make chart showing system clock synchronization state. | yes | yes |\n| time offset | Make chart showing computed time offset between local system and reference clock | yes | yes |\n\n#### Examples\n\n##### Basic\n\nA basic configuration example.\n\n```yaml\n[plugin:timex]\n update every = 1\n clock synchronization state = yes\n time offset = yes\n\n```\n", "troubleshooting": "", "alerts": "## Alerts\n\n\nThe following alerts are available:\n\n| Alert name | On metric | Description |\n|:------------|:----------|:------------|\n| [ system_clock_sync_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/timex.conf) | system.clock_sync_state | when set to 0, the system kernel believes the system clock is not properly synchronized to a reliable server |\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Timex instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| system.clock_sync_state | state | state |\n| system.clock_status | unsync, clockerr | status |\n| system.clock_sync_offset | offset | milliseconds |\n\n", "integration_type": "collector", "id": "timex.plugin-timex.plugin-Timex", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/timex.plugin/metadata.yaml", "related_resources": ""}, {"meta": {"plugin_name": "xenstat.plugin", "module_name": "xenstat.plugin", "monitored_instance": {"name": "Xen XCP-ng", "link": "https://xenproject.org/", "categories": ["data-collection.containers-and-vms"], "icon_filename": "xen.png"}, "related_resources": {"integrations": {"list": []}}, "info_provided_to_referring_integrations": {"description": ""}, "keywords": [], "most_popular": false}, "overview": "# Xen XCP-ng\n\nPlugin: xenstat.plugin\nModule: xenstat.plugin\n\n## Overview\n\nThis collector monitors XenServer and XCP-ng host and domains statistics.\n\n\n\nThis collector is supported on all platforms.\n\nThis collector supports collecting metrics from multiple instances of this integration, including remote instances.\n\nThe plugin needs setuid.\n\n### Default Behavior\n\n#### Auto-Detection\n\nThis plugin requires the `xen-dom0-libs-devel` and `yajl-devel` libraries to be installed.\n\n#### Limits\n\nThe default configuration for this integration does not impose any limits on data collection.\n\n#### Performance Impact\n\nThe default configuration for this integration is not expected to impose a significant performance impact on the system.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### Libraries\n\n1. Install `xen-dom0-libs-devel` and `yajl-devel` using the package manager of your system.\n\n Note: On Cent-OS systems you will need `centos-release-xen` repository and the required package for xen is `xen-devel`\n\n2. Re-install Netdata from source. The installer will detect that the required libraries are now available and will also build xenstat.plugin.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `netdata.conf`.\nConfiguration for this specific integration is located in the `[plugin:xenstat]` section within that file.\n\nThe file format is a modified INI syntax. The general structure is:\n\n```ini\n[section1]\n option1 = some value\n option2 = some other value\n\n[section2]\n option3 = some third value\n```\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config netdata.conf\n```\n#### Options\n\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| update every | Data collection frequency. | 1 | no |\n\n#### Examples\nThere are no configuration examples.\n\n", "troubleshooting": "", "alerts": "## Alerts\n\nThere are no alerts configured by default for this integration.\n", "metrics": "## Metrics\n\nMetrics grouped by *scope*.\n\nThe scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.\n\n\n\n### Per Xen XCP-ng instance\n\nThese metrics refer to the entire monitored application.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xenstat.mem | free, used | MiB |\n| xenstat.domains | domains | domains |\n| xenstat.cpus | cpus | cpus |\n| xenstat.cpu_freq | frequency | MHz |\n\n### Per xendomain\n\nMetrics related to Xen domains. Each domain provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.states | running, blocked, paused, shutdown, crashed, dying | boolean |\n| xendomain.cpu | used | percentage |\n| xendomain.mem | maximum, current | MiB |\n| xendomain.vcpu | a dimension per vcpu | percentage |\n\n### Per xendomain vbd\n\nMetrics related to Xen domain Virtual Block Device. Each VBD provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.oo_req_vbd | requests | requests/s |\n| xendomain.requests_vbd | read, write | requests/s |\n| xendomain.sectors_vbd | read, write | sectors/s |\n\n### Per xendomain network\n\nMetrics related to Xen domain network interfaces. Each network interface provides its own set of the following metrics.\n\nThis scope has no labels.\n\nMetrics:\n\n| Metric | Dimensions | Unit |\n|:------|:----------|:----|\n| xendomain.bytes_network | received, sent | kilobits/s |\n| xendomain.packets_network | received, sent | packets/s |\n| xendomain.errors_network | received, sent | errors/s |\n| xendomain.drops_network | received, sent | drops/s |\n\n", "integration_type": "collector", "id": "xenstat.plugin-xenstat.plugin-Xen_XCP-ng", "edit_link": "https://github.com/netdata/netdata/blob/master/src/collectors/xenstat.plugin/metadata.yaml", "related_resources": ""}, {"id": "deploy-alpinelinux", "meta": {"name": "Alpine Linux", "link": "https://www.alpinelinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "alpine.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-amazonlinux", "meta": {"name": "Amazon Linux", "link": "https://aws.amazon.com/amazon-linux-2/", "categories": ["deploy.operating-systems"], "icon_filename": "amazonlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 2 | Core | x86_64, aarch64 | |\n| 2023 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-archlinux", "meta": {"name": "Arch Linux", "link": "https://archlinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "archlinux.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos", "meta": {"name": "CentOS", "link": "https://www.centos.org/", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 7 | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-centos-stream", "meta": {"name": "CentOS Stream", "link": "https://www.centos.org/centos-stream", "categories": ["deploy.operating-systems"], "icon_filename": "centos.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Community | x86_64, aarch64 | |\n| 8 | Community | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-debian", "meta": {"name": "Debian", "link": "https://www.debian.org/", "categories": ["deploy.operating-systems"], "icon_filename": "debian.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 12 | Core | i386, amd64, armhf, arm64 | |\n| 11 | Core | i386, amd64, armhf, arm64 | |\n| 10 | Core | i386, amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-docker", "meta": {"name": "Docker", "link": "https://www.docker.com/", "categories": ["deploy.docker-kubernetes"], "icon_filename": "docker.svg"}, "most_popular": true, "keywords": ["docker", "container", "containers"], "install_description": "Install and connect new Docker containers\nFind the commands for `docker run`, `docker compose` or `Docker Swarm`. On the last two you can copy the configs, then run `docker-compose up -d` in the same directory as the `docker-compose.yml`\n\n> Netdata container requires different privileges and mounts to provide functionality similar to that provided by Netdata installed on the host. More info [here](https://learn.netdata.cloud/docs/installing/docker?_gl=1*f2xcnf*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMDIyMS40MS4xLjE2OTAzMTAzNjkuNTguMC4w#create-a-new-netdata-agent-container)\n> Netdata will use the hostname from the container in which it is run instead of that of the host system. To change the default hostname check [here](https://learn.netdata.cloud/docs/agent/packaging/docker?_gl=1*i5weve*_ga*MTI1MTUwMzU0OS4xNjg2NjM1MDA1*_ga_J69Z2JCTFB*MTY5MDMxMjM4Ny40Mi4xLjE2OTAzMTIzOTAuNTcuMC4w#change-the-default-hostname)\n", "methods": [{"method": "Docker CLI", "commands": [{"channel": "nightly", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined netdata/netdata:edge\n"}, {"channel": "stable", "command": "docker run -d --name=netdata \\\n--pid=host \\\n--network=host \\\n-v netdataconfig:/etc/netdata \\\n-v netdatalib:/var/lib/netdata \\\n-v netdatacache:/var/cache/netdata \\\n-v /etc/passwd:/host/etc/passwd:ro \\\n-v /etc/group:/host/etc/group:ro \\\n-v /etc/localtime:/etc/localtime:ro \\\n-v /proc:/host/proc:ro \\\n-v /sys:/host/sys:ro \\\n-v /etc/os-release:/host/etc/os-release:ro \\\n-v /var/log:/host/var/log:ro \\\n-v /var/run/docker.sock:/var/run/docker.sock:ro \\\n--restart unless-stopped \\\n--cap-add SYS_PTRACE \\\n--cap-add SYS_ADMIN \\\n--security-opt apparmor=unconfined netdata/netdata:stable\n"}]}, {"method": "Docker Compose", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n container_name: netdata\n pid: host\n network_mode: host\n restart: unless-stopped\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}, {"method": "Docker Swarm", "commands": [{"channel": "nightly", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:edge\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}, {"channel": "stable", "command": "version: '3'\nservices:\n netdata:\n image: netdata/netdata:stable\n pid: host\n network_mode: host\n cap_add:\n - SYS_PTRACE\n - SYS_ADMIN\n security_opt:\n - apparmor:unconfined\n volumes:\n - netdataconfig:/etc/netdata\n - netdatalib:/var/lib/netdata\n - netdatacache:/var/cache/netdata\n - /etc/passwd:/host/etc/passwd:ro\n - /etc/group:/host/etc/group:ro\n - /etc/localtime:/etc/localtime:ro\n - /proc:/host/proc:ro\n - /sys:/host/sys:ro\n - /etc/os-release:/host/etc/os-release:ro\n - /etc/hostname:/etc/hostname:ro\n - /var/log:/host/var/log:ro\n - /var/run/docker.sock:/var/run/docker.sock:ro\n\n deploy:\n mode: global\n restart_policy:\n condition: on-failure\nvolumes:\n netdataconfig:\n netdatalib:\n netdatacache:\n"}]}], "additional_info": "", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 19.03 or newer | Core | linux/i386, linux/amd64, linux/arm/v7, linux/arm64, linux/ppc64le | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 3, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-fedora", "meta": {"name": "Fedora", "link": "https://www.fedoraproject.org/", "categories": ["deploy.operating-systems"], "icon_filename": "fedora.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 39 | Core | x86_64, aarch64 | |\n| 38 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-freebsd", "meta": {"name": "FreeBSD", "link": "https://www.freebsd.org/", "categories": ["deploy.operating-systems"], "icon_filename": "freebsd.svg"}, "most_popular": true, "keywords": ["freebsd"], "install_description": "## Install dependencies\nPlease install the following packages using the command below:\n\n```pkg install bash e2fsprogs-libuuid git curl autoconf automake pkgconf pidof liblz4 libuv json-c cmake gmake```\nThis step needs root privileges. Please respond in the affirmative for any relevant prompts during the installation process.\n\nRun the following command on your node to install and claim Netdata:\n", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "fetch", "commands": [{"channel": "nightly", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "fetch -o /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Netdata can also be installed via [FreeBSD ports](https://www.freshports.org/net-mgmt/netdata).\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13-STABLE | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 6, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-kubernetes", "meta": {"name": "Kubernetes (Helm)", "link": "", "categories": ["deploy.docker-kubernetes"], "icon_filename": "kubernetes.svg"}, "keywords": ["kubernetes", "container", "Orchestrator"], "install_description": "**Use helm install to install Netdata on your Kubernetes cluster**\nFor a new installation use `helm install` or for existing clusters add the content below to your `override.yaml` and then run `helm upgrade -f override.yml netdata netdata/netdata`\n", "methods": [{"method": "Helm", "commands": [{"channel": "nightly", "command": "helm install netdata netdata/netdata \\\n--set image.tag=edge\n"}, {"channel": "stable", "command": "helm install netdata netdata/netdata \\\n--set image.tag=stable\n"}]}, {"method": "Existing Cluster", "commands": [{"channel": "nightly", "command": "image:\n tag: edge\n\nrestarter:\n enabled: true\n\n"}, {"channel": "stable", "command": "image:\n tag: stable\n\nrestarter:\n enabled: true\n\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 4, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-linux-generic", "meta": {"name": "Linux", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "linux.svg"}, "keywords": ["linux"], "most_popular": true, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-macos", "meta": {"name": "macOS", "link": "", "categories": ["deploy.operating-systems"], "icon_filename": "macos.svg"}, "most_popular": true, "keywords": ["macOS", "mac", "apple"], "install_description": "Run the following command on your Intel based OSX, macOS servers to install and claim Netdata:", "methods": [{"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 13 | Community | | |\n| 12 | Community | | |\n| 11 | Community | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 5, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-manjarolinux", "meta": {"name": "Manjaro Linux", "link": "https://manjaro.org/", "categories": ["deploy.operating-systems"], "icon_filename": "manjaro.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| latest | Intermediate | | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-opensuse", "meta": {"name": "SUSE Linux", "link": "https://www.suse.com/", "categories": ["deploy.operating-systems"], "icon_filename": "openSUSE.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 15.5 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-oraclelinux", "meta": {"name": "Oracle Linux", "link": "https://www.oracle.com/linux/", "categories": ["deploy.operating-systems"], "icon_filename": "oraclelinux.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 8 | Core | x86_64, aarch64 | |\n| 9 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rhel", "meta": {"name": "Red Hat Enterprise Linux", "link": "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux", "categories": ["deploy.operating-systems"], "icon_filename": "rhel.png"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9.x | Core | x86_64, aarch64 | |\n| 8.x | Core | x86_64, aarch64 | |\n| 7.x | Core | x86_64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-rockylinux", "meta": {"name": "Rocky Linux", "link": "https://rockylinux.org/", "categories": ["deploy.operating-systems"], "icon_filename": "rocky.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 9 | Core | x86_64, aarch64 | |\n| 8 | Core | x86_64, aarch64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-ubuntu", "meta": {"name": "Ubuntu", "link": "https://ubuntu.com/", "categories": ["deploy.operating-systems"], "icon_filename": "ubuntu.svg"}, "keywords": ["linux"], "most_popular": false, "install_description": "Run the following command on your node to install and claim Netdata:", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "Did you know you can also deploy Netdata on your OS using Kubernetes or Docker?\n", "related_resources": {}, "platform_info": "We build native packages for the following releases:\n\n| Version | Support Tier | Native Package Architectures | Notes |\n|:-------:|:------------:|:----------------------------:|:----- |\n| 22.04 | Core | amd64, armhf, arm64 | |\n| 23.10 | Core | amd64, armhf, arm64 | |\n| 20.04 | Core | amd64, armhf, arm64 | |\n\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": -1, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "deploy-windows", "meta": {"name": "Windows", "link": "https://www.microsoft.com/en-us/windows", "categories": ["deploy.operating-systems"], "icon_filename": "windows.svg"}, "keywords": ["windows"], "install_description": "1. Install [Windows Exporter](https://github.com/prometheus-community/windows_exporter) on every Windows host you want to monitor.\n2. Install Netdata agent on Linux, FreeBSD or Mac.\n3. Configure Netdata to collect data remotely from your Windows hosts by adding one job per host to windows.conf file. See the [configuration section](https://learn.netdata.cloud/docs/data-collection/monitor-anything/System%20Metrics/Windows-machines#configuration) for details.\n4. Enable [virtual nodes](https://learn.netdata.cloud/docs/data-collection/windows-systems#virtual-nodes) configuration so the windows nodes are displayed as separate nodes.\n", "methods": [{"method": "wget", "commands": [{"channel": "nightly", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}, {"method": "curl", "commands": [{"channel": "nightly", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --nightly-channel\n"}, {"channel": "stable", "command": "curl https://get.netdata.cloud/kickstart.sh > /tmp/netdata-kickstart.sh && sh /tmp/netdata-kickstart.sh --stable-channel\n"}]}], "additional_info": "", "related_resources": {}, "most_popular": true, "platform_info": "\nOn other releases of this distribution, a static binary will be installed in `/opt/netdata`.", "quick_start": 2, "integration_type": "deploy", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/deploy.yaml"}, {"id": "export-appoptics", "meta": {"name": "AppOptics", "link": "https://www.solarwinds.com/appoptics", "categories": ["export"], "icon_filename": "solarwinds.svg", "keywords": ["app optics", "AppOptics", "Solarwinds"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# AppOptics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-aws-kinesis", "meta": {"name": "AWS Kinesis", "link": "https://aws.amazon.com/kinesis/", "categories": ["export"], "icon_filename": "aws-kinesis.svg"}, "keywords": ["exporter", "AWS", "Kinesis"], "overview": "# AWS Kinesis\n\nExport metrics to AWS Kinesis Data Streams\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++\n- Here are the instructions when building from source, to ensure 3rd party dependencies are installed:\n ```bash\n git clone --recursive https://github.com/aws/aws-sdk-cpp.git\n cd aws-sdk-cpp/\n git submodule update --init --recursive\n mkdir BUILT\n cd BUILT\n cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..\n make\n make install\n ```\n- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nNetdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n\n```\n##### Configuration with AWS credentials\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[kinesis:my_instance]\n enabled = yes\n destination = us-east-1\n # AWS credentials\n aws_access_key_id = your_access_key_id\n aws_secret_access_key = your_secret_access_key\n # destination stream\n stream name = your_stream_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/aws_kinesis/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-data", "meta": {"name": "Azure Data Explorer", "link": "https://azure.microsoft.com/en-us/pricing/details/data-explorer/", "categories": ["export"], "icon_filename": "azuredataex.jpg", "keywords": ["Azure Data Explorer", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Data Explorer\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-azure-event", "meta": {"name": "Azure Event Hub", "link": "https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about", "categories": ["export"], "icon_filename": "azureeventhub.png", "keywords": ["Azure Event Hub", "Azure"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Azure Event Hub\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-bigquery", "meta": {"name": "Google BigQuery", "link": "https://cloud.google.com/bigquery/", "categories": ["export"], "icon_filename": "bigquery.png", "keywords": ["export", "Google BigQuery", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Google BigQuery\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-blueflood", "meta": {"name": "Blueflood", "link": "http://blueflood.io/", "categories": ["export"], "icon_filename": "blueflood.png", "keywords": ["export", "Blueflood", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Blueflood\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-chronix", "meta": {"name": "Chronix", "link": "https://dbdb.io/db/chronix", "categories": ["export"], "icon_filename": "chronix.png", "keywords": ["export", "chronix", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Chronix\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-cortex", "meta": {"name": "Cortex", "link": "https://cortexmetrics.io/", "categories": ["export"], "icon_filename": "cortex.png", "keywords": ["export", "cortex", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Cortex\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-crate", "meta": {"name": "CrateDB", "link": "https://crate.io/", "categories": ["export"], "icon_filename": "crate.svg", "keywords": ["export", "CrateDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# CrateDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-elastic", "meta": {"name": "ElasticSearch", "link": "https://www.elastic.co/", "categories": ["export"], "icon_filename": "elasticsearch.svg", "keywords": ["export", "ElasticSearch", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# ElasticSearch\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-gnocchi", "meta": {"name": "Gnocchi", "link": "https://wiki.openstack.org/wiki/Gnocchi", "categories": ["export"], "icon_filename": "gnocchi.svg", "keywords": ["export", "Gnocchi", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Gnocchi\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-google-pubsub", "meta": {"name": "Google Cloud Pub Sub", "link": "https://cloud.google.com/pubsub", "categories": ["export"], "icon_filename": "pubsub.png"}, "keywords": ["exporter", "Google Cloud", "Pub Sub"], "overview": "# Google Cloud Pub Sub\n\nExport metrics to Google Cloud Pub/Sub Service\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries\n- Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`\n- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = pubsub.googleapis.com\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n- Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.\n- Create the credentials JSON file by following Google Cloud's authentication guide.\n- The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set\n `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`\n- Set the credentials file option to the full path of the file.\n\n\n```yaml\n[pubsub:my_instance]\n enabled = yes\n destination = pubsub.googleapis.com\n credentials file = /etc/netdata/google_cloud_credentials.json\n project id = my_project\n topic id = my_topic\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/pubsub/metadata.yaml", "troubleshooting": ""}, {"id": "export-graphite", "meta": {"name": "Graphite", "link": "https://graphite.readthedocs.io/en/latest/", "categories": ["export"], "icon_filename": "graphite.png"}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# Graphite\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-influxdb", "meta": {"name": "InfluxDB", "link": "https://www.influxdata.com/", "categories": ["export"], "icon_filename": "influxdb.svg", "keywords": ["InfluxDB", "Influx", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# InfluxDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-irondb", "meta": {"name": "IRONdb", "link": "https://docs.circonus.com/irondb/", "categories": ["export"], "icon_filename": "irondb.png", "keywords": ["export", "IRONdb", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# IRONdb\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-json", "meta": {"name": "JSON", "link": "https://learn.netdata.cloud/docs/exporting/json-document-databases", "categories": ["export"], "icon_filename": "json.svg"}, "keywords": ["exporter", "json"], "overview": "# JSON\n\nUse the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,\nfurther analysis, or correlation with data from other sources\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | pubsub.googleapis.com | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = localhost:5448\n ```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\n\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.\n\n```yaml\n[json:my_json_instance]\n enabled = yes\n destination = localhost:5448\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/json/metadata.yaml", "troubleshooting": ""}, {"id": "export-kafka", "meta": {"name": "Kafka", "link": "https://kafka.apache.org/", "categories": ["export"], "icon_filename": "kafka.svg", "keywords": ["export", "Kafka", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Kafka\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-kairosdb", "meta": {"name": "KairosDB", "link": "https://kairosdb.github.io/", "categories": ["export"], "icon_filename": "kairos.png", "keywords": ["KairosDB", "kairos", "export", "graphite"]}, "keywords": ["exporter", "graphite", "remote write", "time series"], "overview": "# KairosDB\n\nUse the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- You have already installed Netdata and Graphite.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic configuration\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n\n```\n##### Configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:netdata]\n enabled = yes\n destination = localhost:2003\n username = my_username\n password = my_password\n\n```\n##### Detailed Configuration for a remote, secure host\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[graphite:https:netdata]\n enabled = yes\n username = my_username\n password = my_password\n destination = 10.10.1.114:2003\n # data source = average\n # prefix = netdata\n # hostname = my_hostname\n # update every = 10\n # buffer on failures = 10\n # timeout ms = 20000\n # send names instead of ids = yes\n # send charts matching = *\n # send hosts matching = localhost *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/graphite/metadata.yaml", "troubleshooting": ""}, {"id": "export-m3db", "meta": {"name": "M3DB", "link": "https://m3db.io/", "categories": ["export"], "icon_filename": "m3db.png", "keywords": ["export", "M3DB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# M3DB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-metricfire", "meta": {"name": "MetricFire", "link": "https://www.metricfire.com/", "categories": ["export"], "icon_filename": "metricfire.png", "keywords": ["export", "MetricFire", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# MetricFire\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-mongodb", "meta": {"name": "MongoDB", "link": "https://www.mongodb.com/", "categories": ["export"], "icon_filename": "mongodb.svg"}, "keywords": ["exporter", "MongoDB"], "overview": "# MongoDB\n\nUse the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database\nfor long-term storage, further analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.\n- Next, re-install Netdata from the source, which detects that the required library is now available.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Basic configuration\n\nThe default socket timeout depends on the exporting connector update interval.\nThe timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.\n\n\n```yaml\n[mongodb:my_instance]\n enabled = yes\n destination = mongodb://\n database = your_database_name\n collection = your_collection_name\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/mongodb/metadata.yaml", "troubleshooting": ""}, {"id": "export-newrelic", "meta": {"name": "New Relic", "link": "https://newrelic.com/", "categories": ["export"], "icon_filename": "newrelic.svg", "keywords": ["export", "NewRelic", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# New Relic\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-opentsdb", "meta": {"name": "OpenTSDB", "link": "https://github.com/OpenTSDB/opentsdb", "categories": ["export"], "icon_filename": "opentsdb.png"}, "keywords": ["exporter", "OpenTSDB", "scalable time series"], "overview": "# OpenTSDB\n\nUse the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,\nfurther analysis, or correlation with data from other sources.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- OpenTSDB and Netdata, installed, configured and operational.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | Netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Minimal configuration\n\nAdd `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.\nFor example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n\n```\n##### HTTP authentication\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n username = my_username\n password = my_password\n\n```\n##### Using `send hosts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send hosts matching = localhost *\n\n```\n##### Using `send charts matching`\n\n\n\n```yaml\n[opentsdb:my_opentsdb_instance]\n enabled = yes\n destination = localhost:4242\n send charts matching = *\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/opentsdb/metadata.yaml", "troubleshooting": ""}, {"id": "export-pgsql", "meta": {"name": "PostgreSQL", "link": "https://www.postgresql.org/", "categories": ["export"], "icon_filename": "postgres.svg", "keywords": ["export", "PostgreSQL", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# PostgreSQL\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-prometheus-remote", "meta": {"name": "Prometheus Remote Write", "link": "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage", "categories": ["export"], "icon_filename": "prometheus.svg"}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Prometheus Remote Write\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-quasar", "meta": {"name": "QuasarDB", "link": "https://doc.quasar.ai/master/", "categories": ["export"], "icon_filename": "quasar.jpeg", "keywords": ["export", "quasar", "quasarDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# QuasarDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-splunk", "meta": {"name": "Splunk SignalFx", "link": "https://www.splunk.com/en_us/products/observability.html", "categories": ["export"], "icon_filename": "splunk.svg", "keywords": ["export", "splunk", "signalfx", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Splunk SignalFx\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-thanos", "meta": {"name": "Thanos", "link": "https://thanos.io/", "categories": ["export"], "icon_filename": "thanos.png", "keywords": ["export", "thanos", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Thanos\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-tikv", "meta": {"name": "TiKV", "link": "https://tikv.org/", "categories": ["export"], "icon_filename": "tikv.png", "keywords": ["export", "TiKV", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TiKV\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-timescaledb", "meta": {"name": "TimescaleDB", "link": "https://www.timescale.com/", "categories": ["export"], "icon_filename": "timescale.png", "keywords": ["export", "TimescaleDB", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# TimescaleDB\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-victoria", "meta": {"name": "VictoriaMetrics", "link": "https://victoriametrics.com/products/open-source/", "categories": ["export"], "icon_filename": "victoriametrics.png", "keywords": ["export", "victoriametrics", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VictoriaMetrics\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-vmware", "meta": {"name": "VMware Aria", "link": "https://www.vmware.com/products/aria-operations-for-applications.html", "categories": ["export"], "icon_filename": "aria.png", "keywords": ["export", "VMware", "Aria", "Tanzu", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# VMware Aria\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "export-wavefront", "meta": {"name": "Wavefront", "link": "https://docs.wavefront.com/wavefront_data_ingestion.html", "categories": ["export"], "icon_filename": "wavefront.png", "keywords": ["export", "Wavefront", "prometheus", "remote write"]}, "keywords": ["exporter", "Prometheus", "remote write", "time series"], "overview": "# Wavefront\n\nUse the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.\n\n\n## Limitations\n\nThe remote write exporting connector does not support buffer on failures.\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Netdata and the external storage provider of your choice, installed, configured and operational.\n- `protobuf` and `snappy` libraries installed.\n- Netdata reinstalled after the libraries.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `exporting.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config exporting.conf\n```\n#### Options\n\nThe following options can be defined for this exporter.\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |\n| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |\n| username | Username for HTTP authentication | my_username | no |\n| password | Password for HTTP authentication | my_password | no |\n| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |\n| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |\n| prefix | The prefix to add to all metrics. | netdata | no |\n| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |\n| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |\n| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |\n| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |\n| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |\n| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |\n| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |\n| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |\n\n##### destination\n\nThe format of each item in this list, is: [PROTOCOL:]IP[:PORT].\n- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.\n- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.\n- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.\n\nExample IPv4:\n ```yaml\n destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003\n ```\nExample IPv6 and IPv4 together:\n```yaml\ndestination = [ffff:...:0001]:2003 10.11.12.1:2003\n```\nWhen multiple servers are defined, Netdata will try the next one when the previous one fails.\n\n\n##### update every\n\nNetdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers\nsend data to the same database. This randomness does not affect the quality of the data, only the time they are sent.\n\n\n##### buffer on failures\n\nIf the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).\n\n\n##### send hosts matching\n\nIncludes one or more space separated patterns, using * as wildcard (any number of times within each pattern).\nThe patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to\nfilter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.\n\nA pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,\nuse `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).\n\n\n##### send charts matching\n\nA pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,\nuse !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,\npositive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter\nhas a higher priority than the configuration option.\n\n\n##### send names instead of ids\n\nNetdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names\nare human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are\ndifferent : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.\n\n\n#### Examples\n\n##### Example configuration\n\nBasic example configuration for Prometheus remote write.\n\n```yaml\n[prometheus_remote_write:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n\n```\n##### Example configuration with HTTPS and HTTP authentication\n\nAdd `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.\n\n```yaml\n[prometheus_remote_write:https:my_instance]\n enabled = yes\n destination = 10.11.14.2:2003\n remote write URL path = /receive\n username = my_username\n password = my_password\n\n```\n", "integration_type": "exporter", "edit_link": "https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/metadata.yaml", "troubleshooting": ""}, {"id": "notify-alerta", "meta": {"name": "Alerta", "link": "https://alerta.io/", "categories": ["notify.agent"], "icon_filename": "alerta.png"}, "keywords": ["Alerta"], "overview": "# Alerta\n\nThe [Alerta](https://alerta.io/) monitoring system is a tool used to consolidate and de-duplicate alerts from multiple sources for quick \u2018at-a-glance\u2019 visualization. With just one system you can monitor alerts from many other monitoring tools on a single screen.\nYou can send Netdata alerts to Alerta to see alerts coming from many Netdata hosts or also from a multi-host Netdata configuration.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working Alerta instance\n- An Alerta API key (if authentication in Alerta is enabled)\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ALERTA | Set `SEND_ALERTA` to YES | | yes |\n| ALERTA_WEBHOOK_URL | set `ALERTA_WEBHOOK_URL` to the API url you defined when you installed the Alerta server. | | yes |\n| ALERTA_API_KEY | Set `ALERTA_API_KEY` to your API key. | | yes |\n| DEFAULT_RECIPIENT_ALERTA | Set `DEFAULT_RECIPIENT_ALERTA` to the default recipient environment you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n| DEFAULT_RECIPIENT_CUSTOM | Set different recipient environments per role, by editing `DEFAULT_RECIPIENT_CUSTOM` with the environment name of your choice | | no |\n\n##### ALERTA_API_KEY\n\nYou will need an API key to send messages from any source, if Alerta is configured to use authentication (recommended). To create a new API key:\n1. Go to Configuration > API Keys.\n2. Create a new API key called \"netdata\" with `write:alerts` permission.\n\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_alerta[sysadmin]=\"Systems\"\nrole_recipients_alerta[domainadmin]=\"Domains\"\nrole_recipients_alerta[dba]=\"Databases Systems\"\nrole_recipients_alerta[webmaster]=\"Marketing Development\"\nrole_recipients_alerta[proxyadmin]=\"Proxy\"\nrole_recipients_alerta[sitemgr]=\"Sites\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# alerta (alerta.io) global notification options\n\nSEND_ALERTA=\"YES\"\nALERTA_WEBHOOK_URL=\"http://yourserver/alerta/api\"\nALERTA_API_KEY=\"INSERT_YOUR_API_KEY_HERE\"\nDEFAULT_RECIPIENT_ALERTA=\"Production\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/alerta/metadata.yaml"}, {"id": "notify-awssns", "meta": {"name": "AWS SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.agent"], "icon_filename": "aws.svg"}, "keywords": ["AWS SNS"], "overview": "# AWS SNS\n\nAs part of its AWS suite, Amazon provides a notification broker service called 'Simple Notification Service' (SNS). Amazon SNS works similarly to Netdata's own notification system, allowing to dispatch a single notification to multiple subscribers of different types. Among other things, SNS supports sending notifications to:\n- Email addresses\n- Mobile Phones via SMS\n- HTTP or HTTPS web hooks\n- AWS Lambda functions\n- AWS SQS queues\n- Mobile applications via push notifications\nYou can send notifications through Amazon SNS using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- While Amazon SNS supports sending differently formatted messages for different delivery methods, Netdata does not currently support this functionality.\n- For email notification support, we recommend using Netdata's email notifications, as it is has the following benefits:\n - In most cases, it requires less configuration.\n - Netdata's emails are nicely pre-formatted and support features like threading, which requires a lot of manual effort in SNS.\n - It is less resource intensive and more cost-efficient than SNS.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The [Amazon Web Services CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) (awscli).\n- An actual home directory for the user you run Netdata as, instead of just using `/` as a home directory. The setup depends on the distribution, but `/var/lib/netdata` is the recommended directory. If you are using Netdata as a dedicated user, the permissions will already be correct.\n- An Amazon SNS topic to send notifications to with one or more subscribers. The Getting Started section of the Amazon SNS documentation covers the basics of how to set this up. Make note of the Topic ARN when you create the topic.\n- While not mandatory, it is highly recommended to create a dedicated IAM user on your account for Netdata to send notifications. This user needs to have programmatic access, and should only allow access to SNS. For an additional layer of security, you can create one for each system or group of systems.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| aws path | The full path of the aws command. If empty, the system `$PATH` will be searched for it. If not found, Amazon SNS notifications will be silently disabled. | | yes |\n| SEND_AWSNS | Set `SEND_AWSNS` to YES | YES | yes |\n| AWSSNS_MESSAGE_FORMAT | Set `AWSSNS_MESSAGE_FORMAT` to to the string that you want the alert to be sent into. | ${status} on ${host} at ${date}: ${chart} ${value_string} | yes |\n| DEFAULT_RECIPIENT_AWSSNS | Set `DEFAULT_RECIPIENT_AWSSNS` to the Topic ARN you noted down upon creating the Topic. | | yes |\n\n##### AWSSNS_MESSAGE_FORMAT\n\nThe supported variables are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n##### DEFAULT_RECIPIENT_AWSSNS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different recipient Topics per **role**, by editing `DEFAULT_RECIPIENT_AWSSNS` with the Topic ARN you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_awssns[sysadmin]=\"arn:aws:sns:us-east-2:123456789012:Systems\"\nrole_recipients_awssns[domainadmin]=\"arn:aws:sns:us-east-2:123456789012:Domains\"\nrole_recipients_awssns[dba]=\"arn:aws:sns:us-east-2:123456789012:Databases\"\nrole_recipients_awssns[webmaster]=\"arn:aws:sns:us-east-2:123456789012:Development\"\nrole_recipients_awssns[proxyadmin]=\"arn:aws:sns:us-east-2:123456789012:Proxy\"\nrole_recipients_awssns[sitemgr]=\"arn:aws:sns:us-east-2:123456789012:Sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\nAn example working configuration would be:\n\n```yaml\n```conf\n#------------------------------------------------------------------------------\n# Amazon SNS notifications\n\nSEND_AWSSNS=\"YES\"\nAWSSNS_MESSAGE_FORMAT=\"${status} on ${host} at ${date}: ${chart} ${value_string}\"\nDEFAULT_RECIPIENT_AWSSNS=\"arn:aws:sns:us-east-2:123456789012:MyTopic\"\n```\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/awssns/metadata.yaml"}, {"id": "notify-cloud-awssns", "meta": {"name": "Amazon SNS", "link": "https://aws.amazon.com/sns/", "categories": ["notify.cloud"], "icon_filename": "awssns.png"}, "keywords": ["awssns"], "overview": "# Amazon SNS\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on AWS SNS.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add AWS SNS notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- Have an AWS account with AWS SNS access, for more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **AwsSns** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For AWS SNS:\n - Topic ARN - topic provided on AWS SNS (with region) for where to publish your notifications. For more details check [how to configure this on AWS SNS](#settings-on-aws-sns)\n\n### Settings on AWS SNS\n\nTo enable the webhook integration on AWS SNS you need:\n1. [Setting up access for Amazon SNS](https://docs.aws.amazon.com/sns/latest/dg/sns-setting-up.html)\n2. Create a topic\n - On AWS SNS management console click on **Create topic**\n - On the **Details** section, the standard type and provide the topic name\n - On the **Access policy** section, change the **Publishers** option to **Only the specified AWS accounts** and provide the Netdata AWS account **(123269920060)** that will be used to publish notifications to the topic being created\n - Finally, click on **Create topic** on the bottom of the page\n3. Now, use the new **Topic ARN** while adding AWS SNS integration on your space.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.cloud"], "icon_filename": "discord.png"}, "keywords": ["discord", "community"], "overview": "# Discord\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- You need to have a Discord server able to receive webhooks integrations.\n\n### Discord Server Configuration\nSteps to configure your Discord server to receive [webhook notifications](https://support.discord.com/hc/en-us/articles/228383668) from Netdata:\n1. Go to `Server Settings` --> `Integrations`\n2. **Create Webhook** or **View Webhooks** if you already have some defined\n3. Specify the **Name** and **Channel** on your new webhook\n4. Use Webhook URL to add your notification configuration on Netdata UI\n\n### Netdata Configuration Steps\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Discord** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:\n - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**\n - Webhook URL - URL provided on Discord for the channel you want to receive your notifications.\n - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mattermost", "meta": {"name": "Mattermost", "link": "https://mattermost.com/", "categories": ["notify.cloud"], "icon_filename": "mattermost.png"}, "keywords": ["mattermost"], "overview": "# Mattermost\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Mattermost.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a Mattermost app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your Mattermost to receive notifications from Netdata:\n\n1. In Mattermost, go to Product menu > Integrations > Incoming Webhook\n - If you don\u2019t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below.\n2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters\n3. Select the channel to receive webhook payloads, then select Add to create the webhook\n4. You will end up with a webhook endpoint that looks like below:\n `https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx`\n\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.\n\nFor more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Mattermost** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:\n - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-microsoftteams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams", "categories": ["notify.cloud"], "icon_filename": "teams.svg"}, "keywords": ["microsoft", "teams"], "overview": "# Microsoft Teams\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications to a Microsoft Teams channel.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Microsoft Teams notifications integration to your Netdata Cloud space you will need the following:\n\n- A Netdata Cloud account.\n- Access to the Netdata Cloud space as an **administrator**.\n- The Space to be on **Business** plan or higher.\n- A [Microsoft 365 for Business Account](https://www.microsoft.com/en-us/microsoft-365/business). Note that this is a **paid** account.\n\n### Settings on Microsoft Teams\n\n- The integration gets enabled at a team's channel level.\n- Click on the `...` (aka three dots) icon showing up next to the channel name, it should appear when you hover over it.\n- Click on `Connectors`.\n- Look for the `Incoming Webhook` connector and click configure.\n- Provide a name for your Incoming Webhook Connector, for example _Netdata Alerts_. You can also customize it with a proper icon instead of using the default image.\n- Click `Create`.\n- The _Incoming Webhook URL_ is created.\n- That is the URL to be provided to the Netdata Cloud configuration.\n\n### Settings on Netdata Cloud\n\n1. Click on the **Space settings** cog (located above your profile icon).\n2. Click on the **Notification** tab.\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen).\n4. On the **Microsoft Teams** card click on **+ Add**.\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings:\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it.\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration.\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only.\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Microsoft Teams:\n - Microsoft Teams Incoming Webhook URL - the _Incoming Webhook URL_ that was generated earlier.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-mobile-app", "meta": {"name": "Netdata Mobile App", "link": "https://netdata.cloud", "categories": ["notify.cloud"], "icon_filename": "netdata.png"}, "keywords": ["mobile-app", "phone", "personal-notifications"], "overview": "# Netdata Mobile App\n\nFrom the Netdata Cloud UI, you can manage your user notification settings and enable the configuration to deliver notifications on the Netdata Mobile Application.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- You need to have the Netdata Mobile Application installed on your [Android](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or [iOS](https://apps.apple.com/in/app/netdata-mobile/id6474659622) phone.\n\n### Netdata Mobile App Configuration\nSteps to login to the Netdata Mobile Application to receive alert and reachability and alert notifications:\n1. Download the Netdata Mobile Application from [Google Play Store](https://play.google.com/store/apps/details?id=cloud.netdata.android&pli=1) or the [iOS App Store](https://apps.apple.com/in/app/netdata-mobile/id6474659622)\n2. Open the App and Choose the Sign In Option\n - Sign In with Email Address: Enter the Email Address of your registered Netdata Cloud Account and Click on the Verification link received by Email on your mobile device.\n - Sign In with QR Code: Scan the QR Code from your `Netdata Cloud` UI under **User Settings** --> **Notifications** --> **Mobile App Notifications** --> **Show QR Code**\n3. Start receiving alert and reachability notifications for your **Space(s)** on a **Paid Business Subscription**\n\n### Netdata Configuration Steps\n1. Click on the **User settings** on the bottom left of your screen (your profile icon)\n2. Click on the **Notifications** tab\n3. Enable **Mobile App Notifications** if disabled (Enabled by default)\n4. Use the **Show QR Code** Option to login to your mobile device by scanning the **QR Code**\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-opsgenie", "meta": {"name": "Opsgenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.cloud"], "icon_filename": "opsgenie.png"}, "keywords": ["opsgenie", "atlassian"], "overview": "# Opsgenie\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Opsgenie.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Opsgenie to add new integrations.\n\n### Opsgenie Server Configuration\n\nSteps to configure your Opsgenie to receive notifications from Netdata:\n\n1. Go to integrations tab of your team, click **Add integration**\n2. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.\n3. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Opsgenie** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Opsgenie:\n - API Key - a key provided on Opsgenie for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.cloud"], "icon_filename": "pagerduty.png"}, "keywords": ["pagerduty"], "overview": "# PagerDuty\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on PagerDuty.\n", "setup": "## Setup\n\n### Prerequisites\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a PagerDuty service to receive events using webhooks.\n\n\n### PagerDuty Server Configuration\nSteps to configure your PagerDuty to receive notifications from Netdata:\n\n1. Create a service to receive events from your services directory page on PagerDuty\n2. At step 3, select `Events API V2` Integration or **View Webhooks** if you already have some defined\n3. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **PagerDuty** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:\n - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-rocketchat", "meta": {"name": "RocketChat", "link": "https://www.rocket.chat/", "categories": ["notify.cloud"], "icon_filename": "rocketchat.png"}, "keywords": ["rocketchat"], "overview": "# RocketChat\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on RocketChat.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have permissions on Mattermost to add new integrations.\n- You need to have a RocketChat app on your workspace to receive the webhooks.\n\n### Mattermost Server Configuration\n\nSteps to configure your RocketChat to receive notifications from Netdata:\n\n1. In RocketChat, Navigate to Administration > Workspace > Integrations.\n2. Click **+New** at the top right corner.\n3. For more details about each parameter, check [create-a-new-incoming-webhook](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations#create-a-new-incoming-webhook).\n4. After configuring integration, click Save.\n5. You will end up with a webhook endpoint that looks like below:\n `https://your-server.rocket.chat/hooks/YYYYYYYYYYYYYYYYYYYYYYYY/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`\n - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your RocketChat instance.\n\n\nFor more details please check RocketChat's article Incoming webhooks for [RocketChat](https://docs.rocket.chat/use-rocket.chat/workspace-administration/integrations/).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **RocketChat** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For RocketChat:\n - Webhook URL - URL provided on RocketChat for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.cloud"], "icon_filename": "slack.png"}, "keywords": ["slack"], "overview": "# Slack\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Slack.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Business** plan or higher\n- You need to have a Slack app on your workspace to receive the Webhooks.\n\n### Slack Server Configuration\n\nSteps to configure your Slack to receive notifications from Netdata:\n\n1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details\n2. Install the app on your workspace\n3. Configure Webhook URLs for your workspace\n - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**\n - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**\n - After pressing that specify the channel where you want your notifications to be delivered\n - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI\n\nFor more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Slack** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:\n - Webhook URL - URL provided on Slack for the channel you want to receive your notifications.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-splunk", "meta": {"name": "Splunk", "link": "https://splunk.com/", "categories": ["notify.cloud"], "icon_filename": "splunk-black.svg"}, "keywords": ["Splunk"], "overview": "# Splunk\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Splunk.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Splunk notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- URI and token for your Splunk HTTP Event Collector. Refer to the [Splunk documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) for detailed instructions.\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Splunk** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - provide a descriptive name for your configuration to easily identify it.\n - Rooms - select the nodes or areas of your infrastructure you want to receive notifications about.\n - Notification - choose the type of notifications you want to receive: All Alerts and unreachable, All Alerts, Critical only.\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Splunk:\n - HTTP Event Collector URI - The URI of your HTTP event collector in Splunk\n - HTTP Event Collector Token - the token that Splunk provided to you when you created the HTTP Event Collector\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.cloud"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nFrom the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Telegram.\n", "setup": "## Setup\n\n### Prerequisites\n\nTo add Telegram notification you need:\n\n- A Netdata Cloud account\n- Access to the space as an **administrator**\n- Space needs to be on **Business** plan or higher\n- The Telegram bot token and chat ID\n\n### Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Telegram** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n - **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n - **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Telegram:\n - Bot Token - the token of your bot\n - Chat ID - the chat id where your bot will deliver messages to\n\n### Getting the Telegram bot token and chat ID\n\n- Bot token: To create one bot, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. **Start a conversation with your bot or invite it into the group where you want it to send notifications**.\n- To get the chat ID you have two options:\n - Contact the [@myidbot](https://t.me/myidbot) bot and send the `/getid` command to get your personal chat ID, or invite it into a group and use the `/getgroupid` command to get the group chat ID.\n - Alternatively, you can get the chat ID directly from the bot API. Send your bot a command in the chat you want to use, then check `https://api.telegram.org/bot{YourBotToken}/getUpdates`, eg. `https://api.telegram.org/bot111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5/getUpdates`\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-cloud-webhook", "meta": {"name": "Webhook", "link": "https://en.wikipedia.org/wiki/Webhook", "categories": ["notify.cloud"], "icon_filename": "webhook.svg"}, "keywords": ["generic webhooks", "webhooks"], "overview": "# Webhook\n\nFrom the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on a webhook using a predefined schema.\n", "setup": "## Setup\n\n### Prerequisites\n\n- A Netdata Cloud account\n- Access to the Netdata Space as an **administrator**\n- The Netdata Space needs to be on **Pro** plan or higher\n- You need to have an app that allows you to receive webhooks following a predefined schema.\n\n### Netdata Configuration Steps\n\n1. Click on the **Space settings** cog (located above your profile icon)\n2. Click on the **Notification** tab\n3. Click on the **+ Add configuration** button (near the top-right corner of your screen)\n4. On the **Webhook** card click on **+ Add**\n5. A modal will be presented to you to enter the required details to enable the configuration:\n * **Notification settings** are Netdata specific settings\n - Configuration name - you can optionally provide a name for your configuration you can easily refer to it\n - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration\n - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only\n * **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Webhook:\n - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls.\n - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL.\n - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms.\n * Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.\n * Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.\n * Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.\n\n\n ### Webhook service\n\n A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.\n\n #### Netdata webhook integration\n\n A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the \"webhook URL\") when an event occurs.\n\n Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected.\n\n The notification content sent to the destination service will be a JSON object having these properties:\n\n | field | type | description |\n | :-- | :-- | :-- |\n | message | string | A summary message of the alert. |\n | alarm | string | The alarm the notification is about. |\n | info | string | Additional info related with the alert. |\n | chart | string | The chart associated with the alert. |\n | context | string | The chart context. |\n | space | string | The space where the node that raised the alert is assigned. |\n | rooms | object[object(string,string)] | Object with list of rooms names and urls where the node belongs to. |\n | family | string | Context family. |\n | class | string | Classification of the alert, e.g. \"Error\". |\n | severity | string | Alert severity, can be one of \"warning\", \"critical\" or \"clear\". |\n | date | string | Date of the alert in ISO8601 format. |\n | duration | string | Duration the alert has been raised. |\n | additional_active_critical_alerts | integer | Number of additional critical alerts currently existing on the same node. |\n | additional_active_warning_alerts | integer | Number of additional warning alerts currently existing on the same node. |\n | alarm_url | string | Netdata Cloud URL for this alarm. |\n\n #### Extra headers\n\n When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.\n\n By default, the following headers will be sent in the HTTP request\n\n | **Header** | **Value** |\n |:-------------------------------:|-----------------------------|\n | Content-Type | application/json |\n\n #### Authentication mechanisms\n\n Netdata webhook integration supports 3 different authentication mechanisms:\n\n ##### Mutual TLS authentication (recommended)\n\n In mutual Transport Layer Security (mTLS) authentication, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.\n\n This is the default authentication mechanism used if no other method is selected.\n\n To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. In order to achieve this, the Netdata client sending the notification supports mutual TLS (mTLS) to identify itself with a client certificate that your server can validate.\n\n The steps to perform this validation are as follows:\n\n - Store Netdata CA certificate on a file in your disk. The content of this file should be:\n\n
\n Netdata CA certificate\n\n ```\n -----BEGIN CERTIFICATE-----\n MIIF0jCCA7qgAwIBAgIUDV0rS5jXsyNX33evHEQOwn9fPo0wDQYJKoZIhvcNAQEN\n BQAwgYAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\n Ew1TYW4gRnJhbmNpc2NvMRYwFAYDVQQKEw1OZXRkYXRhLCBJbmMuMRIwEAYDVQQL\n EwlDbG91ZCBTUkUxGDAWBgNVBAMTD05ldGRhdGEgUm9vdCBDQTAeFw0yMzAyMjIx\n MjQzMDBaFw0zMzAyMTkxMjQzMDBaMIGAMQswCQYDVQQGEwJVUzETMBEGA1UECBMK\n Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEWMBQGA1UEChMNTmV0\n ZGF0YSwgSW5jLjESMBAGA1UECxMJQ2xvdWQgU1JFMRgwFgYDVQQDEw9OZXRkYXRh\n IFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwIg7z3R++\n ppQYYVVoMIDlhWO3qVTMsAQoJYEvVa6fqaImUBLW/k19LUaXgUJPohB7gBp1pkjs\n QfY5dBo8iFr7MDHtyiAFjcQV181sITTMBEJwp77R4slOXCvrreizhTt1gvf4S1zL\n qeHBYWEgH0RLrOAqD0jkOHwewVouO0k3Wf2lEbCq3qRk2HeDvkv0LR7sFC+dDms8\n fDHqb/htqhk+FAJELGRqLeaFq1Z5Eq1/9dk4SIeHgK5pdYqsjpBzOTmocgriw6he\n s7F3dOec1ZZdcBEAxOjbYt4e58JwuR81cWAVMmyot5JNCzYVL9e5Vc5n22qt2dmc\n Tzw2rLOPt9pT5bzbmyhcDuNg2Qj/5DySAQ+VQysx91BJRXyUimqE7DwQyLhpQU72\n jw29lf2RHdCPNmk8J1TNropmpz/aI7rkperPugdOmxzP55i48ECbvDF4Wtazi+l+\n 4kx7ieeLfEQgixy4lRUUkrgJlIDOGbw+d2Ag6LtOgwBiBYnDgYpvLucnx5cFupPY\n Cy3VlJ4EKUeQQSsz5kVmvotk9MED4sLx1As8V4e5ViwI5dCsRfKny7BeJ6XNPLnw\n PtMh1hbiqCcDmB1urCqXcMle4sRhKccReYOwkLjLLZ80A+MuJuIEAUUuEPCwywzU\n R7pagYsmvNgmwIIuJtB6mIJBShC7TpJG+wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC\n AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU9IbvOsPSUrpr8H2zSafYVQ9e\n Ft8wDQYJKoZIhvcNAQENBQADggIBABQ08aI31VKZs8jzg+y/QM5cvzXlVhcpkZsY\n 1VVBr0roSBw9Pld9SERrEHto8PVXbadRxeEs4sKivJBKubWAooQ6NTvEB9MHuGnZ\n VCU+N035Gq/mhBZgtIs/Zz33jTB2ju3G4Gm9VTZbVqd0OUxFs41Iqvi0HStC3/Io\n rKi7crubmp5f2cNW1HrS++ScbTM+VaKVgQ2Tg5jOjou8wtA+204iYXlFpw9Q0qnP\n qq6ix7TfLLeRVp6mauwPsAJUgHZluz7yuv3r7TBdukU4ZKUmfAGIPSebtB3EzXfH\n 7Y326xzv0hEpjvDHLy6+yFfTdBSrKPsMHgc9bsf88dnypNYL8TUiEHlcTgCGU8ts\n ud8sWN2M5FEWbHPNYRVfH3xgY2iOYZzn0i+PVyGryOPuzkRHTxDLPIGEWE5susM4\n X4bnNJyKH1AMkBCErR34CLXtAe2ngJlV/V3D4I8CQFJdQkn9tuznohUU/j80xvPH\n FOcDGQYmh4m2aIJtlNVP6+/92Siugb5y7HfslyRK94+bZBg2D86TcCJWaaZOFUrR\n Y3WniYXsqM5/JI4OOzu7dpjtkJUYvwtg7Qb5jmm8Ilf5rQZJhuvsygzX6+WM079y\n nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3\n 5zrbwvQf\n -----END CERTIFICATE-----\n ```\n
\n\n - Enable client certificate validation on the web server that is doing the TLS termination. Below we show you how to perform this configuration in `NGINX` and `Apache`\n\n **NGINX**\n\n ```bash\n server {\n listen 443 ssl default_server;\n\n # ... existing SSL configuration for server authentication ...\n ssl_verify_client on;\n ssl_client_certificate /path/to/Netdata_CA.pem;\n\n location / {\n if ($ssl_client_s_dn !~ \"CN=app.netdata.cloud\") {\n return 403;\n }\n # ... existing location configuration ...\n }\n }\n ```\n\n **Apache**\n\n ```bash\n Listen 443\n \n # ... existing SSL configuration for server authentication ...\n SSLVerifyClient require\n SSLCACertificateFile \"/path/to/Netdata_CA.pem\"\n \n \n Require expr \"%{SSL_CLIENT_S_DN_CN} == 'app.netdata.cloud'\"\n # ... existing directory configuration ...\n \n ```\n\n ##### Basic authentication\n\n In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.\n\n ##### Bearer token authentication\n\n In bearer token authentication, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.\n\n ##### Challenge secret\n\n To validate that you have ownership of the web application that will receive the webhook events, we are using a challenge response check mechanism.\n\n This mechanism works as follows:\n\n - The challenge secret parameter that you provide is a shared secret between you and Netdata only.\n - On your request for creating a new Webhook integration, we will make a GET request to the url of the webhook, adding a query parameter `crc_token`, consisting of a random string.\n - You will receive this request on your application and it must construct an encrypted response, consisting of a base64-encoded HMAC SHA-256 hash created from the crc_token and the shared secret. The response will be in the format:\n\n ```json\n {\n \"response_token\": \"sha256=9GKoHJYmcHIkhD+C182QWN79YBd+D+Vkj4snmZrfNi4=\"\n }\n ```\n\n - We will compare your application's response with the hash that we will generate using the challenge secret, and if they are the same, the integration creation will succeed.\n\n We will do this validation everytime you update your integration configuration.\n\n - Response requirements:\n - A base64 encoded HMAC SHA-256 hash created from the crc_token and the shared secret.\n - Valid response_token and JSON format.\n - Latency less than 5 seconds.\n - 200 HTTP response code.\n\n **Example response token generation in Python:**\n\n Here you can see how to define a handler for a Flask application in python 3:\n\n ```python\n import base64\n import hashlib\n import hmac\n import json\n\n key ='YOUR_CHALLENGE_SECRET'\n\n @app.route('/webhooks/netdata')\n def webhook_challenge():\n token = request.args.get('crc_token').encode('ascii')\n\n # creates HMAC SHA-256 hash from incomming token and your consumer secret\n sha256_hash_digest = hmac.new(key.encode(),\n msg=token,\n digestmod=hashlib.sha256).digest()\n\n # construct response data with base64 encoded hash\n response = {\n 'response_token': 'sha256=' + base64.b64encode(sha256_hash_digest).decode('ascii')\n }\n\n # returns properly formatted json response\n return json.dumps(response)\n ```\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/integrations/cloud-notifications/metadata.yaml", "troubleshooting": ""}, {"id": "notify-custom", "meta": {"name": "Custom", "link": "", "categories": ["notify.agent"], "icon_filename": "custom.png"}, "keywords": ["custom"], "overview": "# Custom\n\nNetdata Agent's alert notification feature allows you to send custom notifications to any endpoint you choose.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_CUSTOM | Set `SEND_CUSTOM` to YES | YES | yes |\n| DEFAULT_RECIPIENT_CUSTOM | This value is dependent on how you handle the `${to}` variable inside the `custom_sender()` function. | | yes |\n| custom_sender() | You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the function in this configuration file. | | no |\n\n##### DEFAULT_RECIPIENT_CUSTOM\n\nAll roles will default to this variable if left unconfigured. You can edit `DEFAULT_RECIPIENT_CUSTOM` with the variable you want, in the following entries at the bottom of the same file:\n```\nrole_recipients_custom[sysadmin]=\"systems\"\nrole_recipients_custom[domainadmin]=\"domains\"\nrole_recipients_custom[dba]=\"databases systems\"\nrole_recipients_custom[webmaster]=\"marketing development\"\nrole_recipients_custom[proxyadmin]=\"proxy-admin\"\nrole_recipients_custom[sitemgr]=\"sites\"\n```\n\n\n##### custom_sender()\n\nThe following is a sample custom_sender() function in health_alarm_notify.conf, to send an SMS via an imaginary HTTPS endpoint to the SMS gateway:\n```\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n```\n\nThe supported variables that you can use for the function's `msg` variable are:\n\n| Variable name | Description |\n|:---------------------------:|:---------------------------------------------------------------------------------|\n| `${alarm}` | Like \"name = value units\" |\n| `${status_message}` | Like \"needs attention\", \"recovered\", \"is critical\" |\n| `${severity}` | Like \"Escalated to CRITICAL\", \"Recovered from WARNING\" |\n| `${raised_for}` | Like \"(alarm was raised for 10 minutes)\" |\n| `${host}` | The host generated this event |\n| `${url_host}` | Same as ${host} but URL encoded |\n| `${unique_id}` | The unique id of this event |\n| `${alarm_id}` | The unique id of the alarm that generated this event |\n| `${event_id}` | The incremental id of the event, for this alarm id |\n| `${when}` | The timestamp this event occurred |\n| `${name}` | The name of the alarm, as given in netdata health.d entries |\n| `${url_name}` | Same as ${name} but URL encoded |\n| `${chart}` | The name of the chart (type.id) |\n| `${url_chart}` | Same as ${chart} but URL encoded |\n| `${status}` | The current status : REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${old_status}` | The previous status: REMOVED, UNINITIALIZED, UNDEFINED, CLEAR, WARNING, CRITICAL |\n| `${value}` | The current value of the alarm |\n| `${old_value}` | The previous value of the alarm |\n| `${src}` | The line number and file the alarm has been configured |\n| `${duration}` | The duration in seconds of the previous alarm state |\n| `${duration_txt}` | Same as ${duration} for humans |\n| `${non_clear_duration}` | The total duration in seconds this is/was non-clear |\n| `${non_clear_duration_txt}` | Same as ${non_clear_duration} for humans |\n| `${units}` | The units of the value |\n| `${info}` | A short description of the alarm |\n| `${value_string}` | Friendly value (with units) |\n| `${old_value_string}` | Friendly old value (with units) |\n| `${image}` | The URL of an image to represent the status of the alarm |\n| `${color}` | A color in AABBCC format for the alarm |\n| `${goto_url}` | The URL the user can click to see the netdata dashboard |\n| `${calc_expression}` | The expression evaluated to provide the value for the alarm |\n| `${calc_param_values}` | The value of the variables in the evaluated expression |\n| `${total_warnings}` | The total number of alarms in WARNING state on the host |\n| `${total_critical}` | The total number of alarms in CRITICAL state on the host |\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# custom notifications\n\nSEND_CUSTOM=\"YES\"\nDEFAULT_RECIPIENT_CUSTOM=\"\"\n\n# The custom_sender() is a custom function to do whatever you need to do\ncustom_sender() {\n # example human readable SMS\n local msg=\"${host} ${status_message}: ${alarm} ${raised_for}\"\n\n # limit it to 160 characters and encode it for use in a URL\n urlencode \"${msg:0:160}\" >/dev/null; msg=\"${REPLY}\"\n\n # a space separated list of the recipients to send alarms to\n to=\"${1}\"\n\n for phone in ${to}; do\n httpcode=$(docurl -X POST \\\n --data-urlencode \"From=XXX\" \\\n --data-urlencode \"To=${phone}\" \\\n --data-urlencode \"Body=${msg}\" \\\n -u \"${accountsid}:${accounttoken}\" \\\n https://domain.website.com/)\n\n if [ \"${httpcode}\" = \"200\" ]; then\n info \"sent custom notification ${msg} to ${phone}\"\n sent=$((sent + 1))\n else\n error \"failed to send custom notification ${msg} to ${phone} with HTTP error code ${httpcode}.\"\n fi\n done\n}\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/custom/metadata.yaml"}, {"id": "notify-discord", "meta": {"name": "Discord", "link": "https://discord.com/", "categories": ["notify.agent"], "icon_filename": "discord.png"}, "keywords": ["Discord"], "overview": "# Discord\n\nSend notifications to Discord using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more Discord channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DISCORD | Set `SEND_DISCORD` to YES | YES | yes |\n| DISCORD_WEBHOOK_URL | set `DISCORD_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_DISCORD | Set `DEFAULT_RECIPIENT_DISCORD` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_DISCORD\n\nAll roles will default to this variable if left unconfigured.\nYou can then have different channels per role, by editing `DEFAULT_RECIPIENT_DISCORD` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_discord[sysadmin]=\"systems\"\nrole_recipients_discord[domainadmin]=\"domains\"\nrole_recipients_discord[dba]=\"databases systems\"\nrole_recipients_discord[webmaster]=\"marketing development\"\nrole_recipients_discord[proxyadmin]=\"proxy-admin\"\nrole_recipients_discord[sitemgr]=\"sites\"\n```\n\nThe values you provide should already exist as Discord channels in your server.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# discord (discordapp.com) global notification options\n\nSEND_DISCORD=\"YES\"\nDISCORD_WEBHOOK_URL=\"https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_DISCORD=\"alerts\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/discord/metadata.yaml"}, {"id": "notify-dynatrace", "meta": {"name": "Dynatrace", "link": "https://dynatrace.com", "categories": ["notify.agent"], "icon_filename": "dynatrace.svg"}, "keywords": ["Dynatrace"], "overview": "# Dynatrace\n\nDynatrace allows you to receive notifications using their Events REST API. See the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event) about POSTing an event in the Events API for more details.\nYou can send notifications to Dynatrace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Dynatrace Server. You can use the same on all your Netdata servers but make sure the server is network visible from your Netdata hosts. The Dynatrace server should be with protocol prefixed (http:// or https://), for example: https://monitor.example.com.\n- An API Token. Generate a secure access API token that enables access to your Dynatrace monitoring data via the REST-based API. See [Dynatrace API - Authentication](https://www.dynatrace.com/support/help/extend-dynatrace/dynatrace-api/basics/dynatrace-api-authentication/) for more details.\n- An API Space. This is the URL part of the page you have access in order to generate the API Token. For example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n- A Server Tag. To generate one on your Dynatrace Server, go to Settings --> Tags --> Manually applied tags and create the Tag. The Netdata alarm is sent as a Dynatrace Event to be correlated with all those hosts tagged with this Tag you have created.\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_DYNATRACE | Set `SEND_DYNATRACE` to YES | YES | yes |\n| DYNATRACE_SERVER | Set `DYNATRACE_SERVER` to the Dynatrace server with the protocol prefix, for example `https://monitor.example.com`. | | yes |\n| DYNATRACE_TOKEN | Set `DYNATRACE_TOKEN` to your Dynatrace API authentication token | | yes |\n| DYNATRACE_SPACE | Set `DYNATRACE_SPACE` to the API Space, it is the URL part of the page you have access in order to generate the API Token. | | yes |\n| DYNATRACE_TAG_VALUE | Set `DYNATRACE_TAG_VALUE` to your Dynatrace Server Tag. | | yes |\n| DYNATRACE_ANNOTATION_TYPE | `DYNATRACE_ANNOTATION_TYPE` can be left to its default value Netdata Alarm, but you can change it to better fit your needs. | Netdata Alarm | no |\n| DYNATRACE_EVENT | Set `DYNATRACE_EVENT` to the Dynatrace eventType you want. | Netdata Alarm | no |\n\n##### DYNATRACE_SPACE\n\nFor example, the URL for a generated API token might look like: https://monitor.illumineit.com/e/2a93fe0e-4cd5-469a-9d0d-1a064235cfce/#settings/integration/apikeys;gf=all In that case, the Space is 2a93fe0e-4cd5-469a-9d0d-1a064235cfce.\n\n\n##### DYNATRACE_EVENT\n\n`AVAILABILITY_EVENT`, `CUSTOM_ALERT`, `CUSTOM_ANNOTATION`, `CUSTOM_CONFIGURATION`, `CUSTOM_DEPLOYMENT`, `CUSTOM_INFO`, `ERROR_EVENT`,\n`MARKED_FOR_TERMINATION`, `PERFORMANCE_EVENT`, `RESOURCE_CONTENTION_EVENT`.\nYou can read more [here](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/events-v2/post-event#request-body-objects).\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Dynatrace global notification options\n\nSEND_DYNATRACE=\"YES\"\nDYNATRACE_SERVER=\"https://monitor.example.com\"\nDYNATRACE_TOKEN=\"XXXXXXX\"\nDYNATRACE_SPACE=\"2a93fe0e-4cd5-469a-9d0d-1a064235cfce\"\nDYNATRACE_TAG_VALUE=\"SERVERTAG\"\nDYNATRACE_ANNOTATION_TYPE=\"Netdata Alert\"\nDYNATRACE_EVENT=\"AVAILABILITY_EVENT\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/dynatrace/metadata.yaml"}, {"id": "notify-email", "meta": {"name": "Email", "link": "", "categories": ["notify.agent"], "icon_filename": "email.png"}, "keywords": ["email"], "overview": "# Email\n\nSend notifications via Email using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working sendmail command is required for email alerts to work. Almost all MTAs provide a sendmail interface. Netdata sends all emails as user netdata, so make sure your sendmail works for local users.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| EMAIL_SENDER | You can change `EMAIL_SENDER` to the email address sending the notifications. | netdata | no |\n| SEND_EMAIL | Set `SEND_EMAIL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_EMAIL | Set `DEFAULT_RECIPIENT_EMAIL` to the email address you want the email to be sent by default. You can define multiple email addresses like this: `alarms@example.com` `systems@example.com`. | root | yes |\n\n##### DEFAULT_RECIPIENT_EMAIL\n\nAll roles will default to this variable if left unconfigured.\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_email[sysadmin]=\"systems@example.com\"\nrole_recipients_email[domainadmin]=\"domains@example.com\"\nrole_recipients_email[dba]=\"databases@example.com systems@example.com\"\nrole_recipients_email[webmaster]=\"marketing@example.com development@example.com\"\nrole_recipients_email[proxyadmin]=\"proxy-admin@example.com\"\nrole_recipients_email[sitemgr]=\"sites@example.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# email global notification options\n\nEMAIL_SENDER=\"example@domain.com\"\nSEND_EMAIL=\"YES\"\nDEFAULT_RECIPIENT_EMAIL=\"recipient@example.com\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/email/metadata.yaml"}, {"id": "notify-flock", "meta": {"name": "Flock", "link": "https://support.flock.com/", "categories": ["notify.agent"], "icon_filename": "flock.png"}, "keywords": ["Flock"], "overview": "# Flock\n\nSend notifications to Flock using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by flock.com. You can use the same on all your Netdata servers (or you can have multiple if you like). Read more about flock webhooks and how to get one [here](https://admin.flock.com/webhooks).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_FLOCK | Set `SEND_FLOCK` to YES | YES | yes |\n| FLOCK_WEBHOOK_URL | set `FLOCK_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_FLOCK | Set `DEFAULT_RECIPIENT_FLOCK` to the Flock channel you want the alert notifications to be sent to. All roles will default to this variable if left unconfigured. | | yes |\n\n##### DEFAULT_RECIPIENT_FLOCK\n\nYou can have different channels per role, by editing DEFAULT_RECIPIENT_FLOCK with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_flock[sysadmin]=\"systems\"\nrole_recipients_flock[domainadmin]=\"domains\"\nrole_recipients_flock[dba]=\"databases systems\"\nrole_recipients_flock[webmaster]=\"marketing development\"\nrole_recipients_flock[proxyadmin]=\"proxy-admin\"\nrole_recipients_flock[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# flock (flock.com) global notification options\n\nSEND_FLOCK=\"YES\"\nFLOCK_WEBHOOK_URL=\"https://api.flock.com/hooks/sendMessage/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_FLOCK=\"alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/flock/metadata.yaml"}, {"id": "notify-gotify", "meta": {"name": "Gotify", "link": "https://gotify.net/", "categories": ["notify.agent"], "icon_filename": "gotify.png"}, "keywords": ["gotify"], "overview": "# Gotify\n\n[Gotify](https://gotify.net/) is a self-hosted push notification service created for sending and receiving messages in real time.\nYou can send alerts to your Gotify instance using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An application token. You can generate a new token in the Gotify Web UI.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_GOTIFY | Set `SEND_GOTIFY` to YES | YES | yes |\n| GOTIFY_APP_TOKEN | set `GOTIFY_APP_TOKEN` to the app token you generated. | | yes |\n| GOTIFY_APP_URL | Set `GOTIFY_APP_URL` to point to your Gotify instance, for example `https://push.example.domain/` | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_GOTIFY=\"YES\"\nGOTIFY_APP_TOKEN=\"XXXXXXXXXXXXXXX\"\nGOTIFY_APP_URL=\"https://push.example.domain/\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/gotify/metadata.yaml"}, {"id": "notify-irc", "meta": {"name": "IRC", "link": "", "categories": ["notify.agent"], "icon_filename": "irc.png"}, "keywords": ["IRC"], "overview": "# IRC\n\nSend notifications to IRC using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The `nc` utility. You can set the path to it, or Netdata will search for it in your system `$PATH`.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| nc path | Set the path for nc, otherwise Netdata will search for it in your system $PATH | | yes |\n| SEND_IRC | Set `SEND_IRC` YES. | YES | yes |\n| IRC_NETWORK | Set `IRC_NETWORK` to the IRC network which your preferred channels belong to. | | yes |\n| IRC_PORT | Set `IRC_PORT` to the IRC port to which a connection will occur. | | no |\n| IRC_NICKNAME | Set `IRC_NICKNAME` to the IRC nickname which is required to send the notification. It must not be an already registered name as the connection's MODE is defined as a guest. | | yes |\n| IRC_REALNAME | Set `IRC_REALNAME` to the IRC realname which is required in order to make the connection. | | yes |\n| DEFAULT_RECIPIENT_IRC | You can have different channels per role, by editing `DEFAULT_RECIPIENT_IRC` with the channel you want | | yes |\n\n##### nc path\n\n```sh\n#------------------------------------------------------------------------------\n# external commands\n#\n# The full path of the nc command.\n# If empty, the system $PATH will be searched for it.\n# If not found, irc notifications will be silently disabled.\nnc=\"/usr/bin/nc\"\n```\n\n\n##### DEFAULT_RECIPIENT_IRC\n\nThe `DEFAULT_RECIPIENT_IRC` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_irc[sysadmin]=\"#systems\"\nrole_recipients_irc[domainadmin]=\"#domains\"\nrole_recipients_irc[dba]=\"#databases #systems\"\nrole_recipients_irc[webmaster]=\"#marketing #development\"\nrole_recipients_irc[proxyadmin]=\"#proxy-admin\"\nrole_recipients_irc[sitemgr]=\"#sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# irc notification options\n#\nSEND_IRC=\"YES\"\nDEFAULT_RECIPIENT_IRC=\"#system-alarms\"\nIRC_NETWORK=\"irc.freenode.net\"\nIRC_NICKNAME=\"netdata-alarm-user\"\nIRC_REALNAME=\"netdata-user\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/irc/metadata.yaml"}, {"id": "notify-kavenegar", "meta": {"name": "Kavenegar", "link": "https://kavenegar.com/", "categories": ["notify.agent"], "icon_filename": "kavenegar.png"}, "keywords": ["Kavenegar"], "overview": "# Kavenegar\n\n[Kavenegar](https://kavenegar.com/) as service for software developers, based in Iran, provides send and receive SMS, calling voice by using its APIs.\nYou can send notifications to Kavenegar using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The APIKEY and Sender from http://panel.kavenegar.com/client/setting/account\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_KAVENEGAR | Set `SEND_KAVENEGAR` to YES | YES | yes |\n| KAVENEGAR_API_KEY | Set `KAVENEGAR_API_KEY` to your API key. | | yes |\n| KAVENEGAR_SENDER | Set `KAVENEGAR_SENDER` to the value of your Sender. | | yes |\n| DEFAULT_RECIPIENT_KAVENEGAR | Set `DEFAULT_RECIPIENT_KAVENEGAR` to the SMS recipient you want the alert notifications to be sent to. You can define multiple recipients like this: 09155555555 09177777777. | | yes |\n\n##### DEFAULT_RECIPIENT_KAVENEGAR\n\nAll roles will default to this variable if lest unconfigured.\n\nYou can then have different SMS recipients per role, by editing `DEFAULT_RECIPIENT_KAVENEGAR` with the SMS recipients you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_kavenegar[sysadmin]=\"09100000000\"\nrole_recipients_kavenegar[domainadmin]=\"09111111111\"\nrole_recipients_kavenegar[dba]=\"0922222222\"\nrole_recipients_kavenegar[webmaster]=\"0933333333\"\nrole_recipients_kavenegar[proxyadmin]=\"0944444444\"\nrole_recipients_kavenegar[sitemgr]=\"0955555555\"\n```\n\nThe values you provide should be defined as environments in `/etc/alertad.conf` with `ALLOWED_ENVIRONMENTS` option.\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Kavenegar (Kavenegar.com) SMS options\n\nSEND_KAVENEGAR=\"YES\"\nKAVENEGAR_API_KEY=\"XXXXXXXXXXXX\"\nKAVENEGAR_SENDER=\"YYYYYYYY\"\nDEFAULT_RECIPIENT_KAVENEGAR=\"0912345678\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/kavenegar/metadata.yaml"}, {"id": "notify-matrix", "meta": {"name": "Matrix", "link": "https://spec.matrix.org/unstable/push-gateway-api/", "categories": ["notify.agent"], "icon_filename": "matrix.svg"}, "keywords": ["Matrix"], "overview": "# Matrix\n\nSend notifications to Matrix network rooms using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The url of the homeserver (`https://homeserver:port`).\n- Credentials for connecting to the homeserver, in the form of a valid access token for your account (or for a dedicated notification account). These tokens usually don't expire.\n- The room ids that you want to sent the notification to.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MATRIX | Set `SEND_MATRIX` to YES | YES | yes |\n| MATRIX_HOMESERVER | set `MATRIX_HOMESERVER` to the URL of the Matrix homeserver. | | yes |\n| MATRIX_ACCESSTOKEN | Set `MATRIX_ACCESSTOKEN` to the access token from your Matrix account. | | yes |\n| DEFAULT_RECIPIENT_MATRIX | Set `DEFAULT_RECIPIENT_MATRIX` to the rooms you want the alert notifications to be sent to. The format is `!roomid:homeservername`. | | yes |\n\n##### MATRIX_ACCESSTOKEN\n\nTo obtain the access token, you can use the following curl command:\n```\ncurl -XPOST -d '{\"type\":\"m.login.password\", \"user\":\"example\", \"password\":\"wordpass\"}' \"https://homeserver:8448/_matrix/client/r0/login\"\n```\n\n\n##### DEFAULT_RECIPIENT_MATRIX\n\nThe room ids are unique identifiers and can be obtained from the room settings in a Matrix client (e.g. Riot).\n\nYou can define multiple rooms like this: `!roomid1:homeservername` `!roomid2:homeservername`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different rooms per role, by editing `DEFAULT_RECIPIENT_MATRIX` with the `!roomid:homeservername` you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_matrix[sysadmin]=\"!roomid1:homeservername\"\nrole_recipients_matrix[domainadmin]=\"!roomid2:homeservername\"\nrole_recipients_matrix[dba]=\"!roomid3:homeservername\"\nrole_recipients_matrix[webmaster]=\"!roomid4:homeservername\"\nrole_recipients_matrix[proxyadmin]=\"!roomid5:homeservername\"\nrole_recipients_matrix[sitemgr]=\"!roomid6:homeservername\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Matrix notifications\n\nSEND_MATRIX=\"YES\"\nMATRIX_HOMESERVER=\"https://matrix.org:8448\"\nMATRIX_ACCESSTOKEN=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MATRIX=\"!XXXXXXXXXXXX:matrix.org\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/matrix/metadata.yaml"}, {"id": "notify-messagebird", "meta": {"name": "MessageBird", "link": "https://messagebird.com/", "categories": ["notify.agent"], "icon_filename": "messagebird.svg"}, "keywords": ["MessageBird"], "overview": "# MessageBird\n\nSend notifications to MessageBird using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An access key under 'API ACCESS (REST)' (you will want a live key), you can read more [here](https://developers.messagebird.com/quickstarts/sms/test-credits-api-keys/).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MESSAGEBIRD | Set `SEND_MESSAGEBIRD` to YES | YES | yes |\n| MESSAGEBIRD_ACCESS_KEY | Set `MESSAGEBIRD_ACCESS_KEY` to your API key. | | yes |\n| MESSAGEBIRD_NUMBER | Set `MESSAGEBIRD_NUMBER` to the MessageBird number you want to use for the alert. | | yes |\n| DEFAULT_RECIPIENT_MESSAGEBIRD | Set `DEFAULT_RECIPIENT_MESSAGEBIRD` to the number you want the alert notification to be sent as an SMS. You can define multiple recipients like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_MESSAGEBIRD\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different recipients per role, by editing `DEFAULT_RECIPIENT_MESSAGEBIRD` with the number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_messagebird[sysadmin]=\"+15555555555\"\nrole_recipients_messagebird[domainadmin]=\"+15555555556\"\nrole_recipients_messagebird[dba]=\"+15555555557\"\nrole_recipients_messagebird[webmaster]=\"+15555555558\"\nrole_recipients_messagebird[proxyadmin]=\"+15555555559\"\nrole_recipients_messagebird[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Messagebird (messagebird.com) SMS options\n\nSEND_MESSAGEBIRD=\"YES\"\nMESSAGEBIRD_ACCESS_KEY=\"XXXXXXXX\"\nMESSAGEBIRD_NUMBER=\"XXXXXXX\"\nDEFAULT_RECIPIENT_MESSAGEBIRD=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/messagebird/metadata.yaml"}, {"id": "notify-ntfy", "meta": {"name": "ntfy", "link": "https://ntfy.sh/", "categories": ["notify.agent"], "icon_filename": "ntfy.svg"}, "keywords": ["ntfy"], "overview": "# ntfy\n\n[ntfy](https://ntfy.sh/) (pronounce: notify) is a simple HTTP-based [pub-sub](https://en.wikipedia.org/wiki/Publish%E2%80%93subscribe_pattern) notification service. It allows you to send notifications to your phone or desktop via scripts from any computer, entirely without signup, cost or setup. It's also [open source](https://github.com/binwiederhier/ntfy) if you want to run your own server.\nYou can send alerts to an ntfy server using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- (Optional) A [self-hosted ntfy server](https://docs.ntfy.sh/faq/#can-i-self-host-it), in case you don't want to use https://ntfy.sh\n- A new [topic](https://ntfy.sh/#subscribe) for the notifications to be published to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_NTFY | Set `SEND_NTFY` to YES | YES | yes |\n| DEFAULT_RECIPIENT_NTFY | URL formed by the server-topic combination you want the alert notifications to be sent to. Unless hosting your own server, the server should always be set to https://ntfy.sh. | | yes |\n| NTFY_USERNAME | The username for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_PASSWORD | The password for netdata to use to authenticate with an ntfy server. | | no |\n| NTFY_ACCESS_TOKEN | The access token for netdata to use to authenticate with an ntfy server. | | no |\n\n##### DEFAULT_RECIPIENT_NTFY\n\nYou can define multiple recipient URLs like this: `https://SERVER1/TOPIC1` `https://SERVER2/TOPIC2`\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different servers and/or topics per role, by editing DEFAULT_RECIPIENT_NTFY with the server-topic combination you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_ntfy[sysadmin]=\"https://SERVER1/TOPIC1\"\nrole_recipients_ntfy[domainadmin]=\"https://SERVER2/TOPIC2\"\nrole_recipients_ntfy[dba]=\"https://SERVER3/TOPIC3\"\nrole_recipients_ntfy[webmaster]=\"https://SERVER4/TOPIC4\"\nrole_recipients_ntfy[proxyadmin]=\"https://SERVER5/TOPIC5\"\nrole_recipients_ntfy[sitemgr]=\"https://SERVER6/TOPIC6\"\n```\n\n\n##### NTFY_USERNAME\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_PASSWORD\n\nOnly useful on self-hosted ntfy instances. See [users and roles](https://docs.ntfy.sh/config/#users-and-roles) for details.\nEnsure that your user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n##### NTFY_ACCESS_TOKEN\n\nThis can be used in place of `NTFY_USERNAME` and `NTFY_PASSWORD` to authenticate with a self-hosted ntfy instance. See [access tokens](https://docs.ntfy.sh/config/?h=access+to#access-tokens) for details.\nEnsure that the token user has proper read/write access to the provided topic in `DEFAULT_RECIPIENT_NTFY`\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_NTFY=\"YES\"\nDEFAULT_RECIPIENT_NTFY=\"https://ntfy.sh/netdata-X7seHg7d3Tw9zGOk https://ntfy.sh/netdata-oIPm4IK1IlUtlA30\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/ntfy/metadata.yaml"}, {"id": "notify-opsgenie", "meta": {"name": "OpsGenie", "link": "https://www.atlassian.com/software/opsgenie", "categories": ["notify.agent"], "icon_filename": "opsgenie.png"}, "keywords": ["OpsGenie"], "overview": "# OpsGenie\n\nOpsgenie is an alerting and incident response tool. It is designed to group and filter alarms, build custom routing rules for on-call teams, and correlate deployments and commits to incidents.\nYou can send notifications to Opsgenie using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Opsgenie integration. You can create an [integration](https://docs.opsgenie.com/docs/api-integration) in the [Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_OPSGENIE | Set `SEND_OPSGENIE` to YES | YES | yes |\n| OPSGENIE_API_KEY | Set `OPSGENIE_API_KEY` to your API key. | | yes |\n| OPSGENIE_API_URL | Set `OPSGENIE_API_URL` to the corresponding URL if required, for example there are region-specific API URLs such as `https://eu.api.opsgenie.com`. | https://api.opsgenie.com | no |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\nSEND_OPSGENIE=\"YES\"\nOPSGENIE_API_KEY=\"11111111-2222-3333-4444-555555555555\"\nOPSGENIE_API_URL=\"\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/opsgenie/metadata.yaml"}, {"id": "notify-pagerduty", "meta": {"name": "PagerDuty", "link": "https://www.pagerduty.com/", "categories": ["notify.agent"], "icon_filename": "pagerduty.png"}, "keywords": ["PagerDuty"], "overview": "# PagerDuty\n\nPagerDuty is an enterprise incident resolution service that integrates with ITOps and DevOps monitoring stacks to improve operational reliability and agility. From enriching and aggregating events to correlating them into incidents, PagerDuty streamlines the incident management process by reducing alert noise and resolution times.\nYou can send notifications to PagerDuty using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An installation of the [PagerDuty](https://www.pagerduty.com/docs/guides/agent-install-guide/) agent on the node running the Netdata Agent\n- A PagerDuty Generic API service using either the `Events API v2` or `Events API v1`\n- [Add a new service](https://support.pagerduty.com/docs/services-and-integrations#section-configuring-services-and-integrations) to PagerDuty. Click Use our API directly and select either `Events API v2` or `Events API v1`. Once you finish creating the service, click on the Integrations tab to find your Integration Key.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PD | Set `SEND_PD` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PD | Set `DEFAULT_RECIPIENT_PD` to the PagerDuty service key you want the alert notifications to be sent to. You can define multiple service keys like this: `pd_service_key_1` `pd_service_key_2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PD\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PD` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pd[sysadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa\"\nrole_recipients_pd[domainadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb\"\nrole_recipients_pd[dba]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxc\"\nrole_recipients_pd[webmaster]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxd\"\nrole_recipients_pd[proxyadmin]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxe\"\nrole_recipients_pd[sitemgr]=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxf\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pagerduty.com notification options\n\nSEND_PD=\"YES\"\nDEFAULT_RECIPIENT_PD=\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\nUSE_PD_VERSION=\"2\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pagerduty/metadata.yaml"}, {"id": "notify-prowl", "meta": {"name": "Prowl", "link": "https://www.prowlapp.com/", "categories": ["notify.agent"], "icon_filename": "prowl.png"}, "keywords": ["Prowl"], "overview": "# Prowl\n\nSend notifications to Prowl using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n\n## Limitations\n\n- Because of how Netdata integrates with Prowl, there is a hard limit of at most 1000 notifications per hour (starting from the first notification sent). Any alerts beyond the first thousand in an hour will be dropped.\n- Warning messages will be sent with the 'High' priority, critical messages will be sent with the 'Emergency' priority, and all other messages will be sent with the normal priority. Opening the notification's associated URL will take you to the Netdata dashboard of the system that issued the alert, directly to the chart that it triggered on.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Prowl API key, which can be requested through the Prowl website after registering\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PROWL | Set `SEND_PROWL` to YES | YES | yes |\n| DEFAULT_RECIPIENT_PROWL | Set `DEFAULT_RECIPIENT_PROWL` to the Prowl API key you want the alert notifications to be sent to. You can define multiple API keys like this: `APIKEY1`, `APIKEY2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PROWL\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PROWL` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_prowl[sysadmin]=\"AAAAAAAA\"\nrole_recipients_prowl[domainadmin]=\"BBBBBBBBB\"\nrole_recipients_prowl[dba]=\"CCCCCCCCC\"\nrole_recipients_prowl[webmaster]=\"DDDDDDDDDD\"\nrole_recipients_prowl[proxyadmin]=\"EEEEEEEEEE\"\nrole_recipients_prowl[sitemgr]=\"FFFFFFFFFF\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# iOS Push Notifications\n\nSEND_PROWL=\"YES\"\nDEFAULT_RECIPIENT_PROWL=\"XXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/prowl/metadata.yaml"}, {"id": "notify-pushbullet", "meta": {"name": "Pushbullet", "link": "https://www.pushbullet.com/", "categories": ["notify.agent"], "icon_filename": "pushbullet.png"}, "keywords": ["Pushbullet"], "overview": "# Pushbullet\n\nSend notifications to Pushbullet using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A Pushbullet access token that can be created in your [account settings](https://www.pushbullet.com/#settings/account).\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| Send_PUSHBULLET | Set `Send_PUSHBULLET` to YES | YES | yes |\n| PUSHBULLET_ACCESS_TOKEN | set `PUSHBULLET_ACCESS_TOKEN` to the access token you generated. | | yes |\n| DEFAULT_RECIPIENT_PUSHBULLET | Set `DEFAULT_RECIPIENT_PUSHBULLET` to the email (e.g. `example@domain.com`) or the channel tag (e.g. `#channel`) you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHBULLET\n\nYou can define multiple entries like this: user1@email.com user2@email.com.\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHBULLET` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushbullet[sysadmin]=\"user1@email.com\"\nrole_recipients_pushbullet[domainadmin]=\"user2@mail.com\"\nrole_recipients_pushbullet[dba]=\"#channel1\"\nrole_recipients_pushbullet[webmaster]=\"#channel2\"\nrole_recipients_pushbullet[proxyadmin]=\"user3@mail.com\"\nrole_recipients_pushbullet[sitemgr]=\"user4@mail.com\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushbullet (pushbullet.com) push notification options\n\nSEND_PUSHBULLET=\"YES\"\nPUSHBULLET_ACCESS_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHBULLET=\"admin1@example.com admin3@somemail.com #examplechanneltag #anotherchanneltag\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushbullet/metadata.yaml"}, {"id": "notify-pushover", "meta": {"name": "PushOver", "link": "https://pushover.net/", "categories": ["notify.agent"], "icon_filename": "pushover.png"}, "keywords": ["PushOver"], "overview": "# PushOver\n\nSend notification to Pushover using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n- Netdata will send warning messages with priority 0 and critical messages with priority 1.\n- Pushover allows you to select do-not-disturb hours. The way this is configured, critical notifications will ring and vibrate your phone, even during the do-not-disturb-hours.\n- All other notifications will be delivered silently.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- An Application token. You can use the same on all your Netdata servers.\n- A User token for each user you are going to send notifications to. This is the actual recipient of the notification.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_PUSHOVER | Set `SEND_PUSHOVER` to YES | YES | yes |\n| PUSHOVER_WEBHOOK_URL | set `PUSHOVER_WEBHOOK_URL` to your Pushover Application token. | | yes |\n| DEFAULT_RECIPIENT_PUSHOVER | Set `DEFAULT_RECIPIENT_PUSHOVER` the Pushover User token you want the alert notifications to be sent to. You can define multiple User tokens like this: `USERTOKEN1` `USERTOKEN2`. | | yes |\n\n##### DEFAULT_RECIPIENT_PUSHOVER\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_PUSHOVER` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_pushover[sysadmin]=\"USERTOKEN1\"\nrole_recipients_pushover[domainadmin]=\"USERTOKEN2\"\nrole_recipients_pushover[dba]=\"USERTOKEN3 USERTOKEN4\"\nrole_recipients_pushover[webmaster]=\"USERTOKEN5\"\nrole_recipients_pushover[proxyadmin]=\"USERTOKEN6\"\nrole_recipients_pushover[sitemgr]=\"USERTOKEN7\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# pushover (pushover.net) global notification options\n\nSEND_PUSHOVER=\"YES\"\nPUSHOVER_APP_TOKEN=\"XXXXXXXXX\"\nDEFAULT_RECIPIENT_PUSHOVER=\"USERTOKEN\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/pushover/metadata.yaml"}, {"id": "notify-rocketchat", "meta": {"name": "RocketChat", "link": "https://rocket.chat/", "categories": ["notify.agent"], "icon_filename": "rocketchat.png"}, "keywords": ["RocketChat"], "overview": "# RocketChat\n\nSend notifications to Rocket.Chat using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by RocketChat. You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_ROCKETCHAT | Set `SEND_ROCKETCHAT` to `YES` | YES | yes |\n| ROCKETCHAT_WEBHOOK_URL | set `ROCKETCHAT_WEBHOOK_URL` to your webhook URL. | | yes |\n| DEFAULT_RECIPIENT_ROCKETCHAT | Set `DEFAULT_RECIPIENT_ROCKETCHAT` to the channel you want the alert notifications to be sent to. You can define multiple channels like this: `alerts` `systems`. | | yes |\n\n##### DEFAULT_RECIPIENT_ROCKETCHAT\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_ROCKETCHAT` can be edited in the following entries at the bottom of the same file:\n```conf\nrole_recipients_rocketchat[sysadmin]=\"systems\"\nrole_recipients_rocketchat[domainadmin]=\"domains\"\nrole_recipients_rocketchat[dba]=\"databases systems\"\nrole_recipients_rocketchat[webmaster]=\"marketing development\"\nrole_recipients_rocketchat[proxyadmin]=\"proxy_admin\"\nrole_recipients_rocketchat[sitemgr]=\"sites\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# rocketchat (rocket.chat) global notification options\n\nSEND_ROCKETCHAT=\"YES\"\nROCKETCHAT_WEBHOOK_URL=\"\"\nDEFAULT_RECIPIENT_ROCKETCHAT=\"monitoring_alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/rocketchat/metadata.yaml"}, {"id": "notify-slack", "meta": {"name": "Slack", "link": "https://slack.com/", "categories": ["notify.agent"], "icon_filename": "slack.png"}, "keywords": ["Slack"], "overview": "# Slack\n\nSend notifications to a Slack workspace using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Slack app along with an incoming webhook, read Slack's guide on the topic [here](https://api.slack.com/messaging/webhooks).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_SLACK | Set `SEND_SLACK` to YES | YES | yes |\n| SLACK_WEBHOOK_URL | set `SLACK_WEBHOOK_URL` to your Slack app's webhook URL. | | yes |\n| DEFAULT_RECIPIENT_SLACK | Set `DEFAULT_RECIPIENT_SLACK` to the Slack channel your Slack app is set to send messages to. The syntax for channels is `#channel` or `channel`. | | yes |\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# slack (slack.com) global notification options\n\nSEND_SLACK=\"YES\"\nSLACK_WEBHOOK_URL=\"https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\" \nDEFAULT_RECIPIENT_SLACK=\"#alarms\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/slack/metadata.yaml"}, {"id": "notify-sms", "meta": {"name": "SMS", "link": "http://smstools3.kekekasvi.com/", "categories": ["notify.agent"], "icon_filename": "sms.svg"}, "keywords": ["SMS tools 3", "SMS", "Messaging"], "overview": "# SMS\n\nSend notifications to `smstools3` using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\nThe SMS Server Tools 3 is a SMS Gateway software which can send and receive short messages through GSM modems and mobile phones.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- [Install](http://smstools3.kekekasvi.com/index.php?p=compiling) and [configure](http://smstools3.kekekasvi.com/index.php?p=configure) `smsd`\n- To ensure that the user `netdata` can execute `sendsms`. Any user executing `sendsms` needs to:\n - Have write permissions to /tmp and /var/spool/sms/outgoing\n - Be a member of group smsd\n - To ensure that the steps above are successful, just su netdata and execute sendsms phone message.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| sendsms | Set the path for `sendsms`, otherwise Netdata will search for it in your system `$PATH:` | YES | yes |\n| SEND_SMS | Set `SEND_SMS` to `YES`. | | yes |\n| DEFAULT_RECIPIENT_SMS | Set DEFAULT_RECIPIENT_SMS to the phone number you want the alert notifications to be sent to. You can define multiple phone numbers like this: PHONE1 PHONE2. | | yes |\n\n##### sendsms\n\n# The full path of the sendsms command (smstools3).\n# If empty, the system $PATH will be searched for it.\n# If not found, SMS notifications will be silently disabled.\nsendsms=\"/usr/bin/sendsms\"\n\n\n##### DEFAULT_RECIPIENT_SMS\n\nAll roles will default to this variable if left unconfigured.\n\nYou can then have different phone numbers per role, by editing `DEFAULT_RECIPIENT_SMS` with the phone number you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_sms[sysadmin]=\"PHONE1\"\nrole_recipients_sms[domainadmin]=\"PHONE2\"\nrole_recipients_sms[dba]=\"PHONE3\"\nrole_recipients_sms[webmaster]=\"PHONE4\"\nrole_recipients_sms[proxyadmin]=\"PHONE5\"\nrole_recipients_sms[sitemgr]=\"PHONE6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# SMS Server Tools 3 (smstools3) global notification options\nSEND_SMS=\"YES\"\nDEFAULT_RECIPIENT_SMS=\"1234567890\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/smstools3/metadata.yaml"}, {"id": "notify-syslog", "meta": {"name": "syslog", "link": "", "categories": ["notify.agent"], "icon_filename": "syslog.png"}, "keywords": ["syslog"], "overview": "# syslog\n\nSend notifications to Syslog using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A working `logger` command for this to work. This is the case on pretty much every Linux system in existence, and most BSD systems.\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SYSLOG_FACILITY | Set `SYSLOG_FACILITY` to the facility used for logging, by default this value is set to `local6`. | | yes |\n| DEFAULT_RECIPIENT_SYSLOG | Set `DEFAULT_RECIPIENT_SYSLOG` to the recipient you want the alert notifications to be sent to. | | yes |\n| SEND_SYSLOG | Set SEND_SYSLOG to YES, make sure you have everything else configured before turning this on. | | yes |\n\n##### DEFAULT_RECIPIENT_SYSLOG\n\nTargets are defined as follows:\n\n```\n[[facility.level][@host[:port]]/]prefix\n```\n\nprefix defines what the log messages are prefixed with. By default, all lines are prefixed with 'netdata'.\n\nThe facility and level are the standard syslog facility and level options, for more info on them see your local logger and syslog documentation. By default, Netdata will log to the local6 facility, with a log level dependent on the type of message (crit for CRITICAL, warning for WARNING, and info for everything else).\n\nYou can configure sending directly to remote log servers by specifying a host (and optionally a port). However, this has a somewhat high overhead, so it is much preferred to use your local syslog daemon to handle the forwarding of messages to remote systems (pretty much all of them allow at least simple forwarding, and most of the really popular ones support complex queueing and routing of messages to remote log servers).\n\nYou can define multiple recipients like this: daemon.notice@loghost:514/netdata daemon.notice@loghost2:514/netdata.\nAll roles will default to this variable if left unconfigured.\n\n\n##### SEND_SYSLOG \n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_SYSLOG with the recipient you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_syslog[sysadmin]=\"daemon.notice@loghost1:514/netdata\"\nrole_recipients_syslog[domainadmin]=\"daemon.notice@loghost2:514/netdata\"\nrole_recipients_syslog[dba]=\"daemon.notice@loghost3:514/netdata\"\nrole_recipients_syslog[webmaster]=\"daemon.notice@loghost4:514/netdata\"\nrole_recipients_syslog[proxyadmin]=\"daemon.notice@loghost5:514/netdata\"\nrole_recipients_syslog[sitemgr]=\"daemon.notice@loghost6:514/netdata\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# syslog notifications\n\nSEND_SYSLOG=\"YES\"\nSYSLOG_FACILITY='local6'\nDEFAULT_RECIPIENT_SYSLOG=\"daemon.notice@loghost6:514/netdata\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/syslog/metadata.yaml"}, {"id": "notify-teams", "meta": {"name": "Microsoft Teams", "link": "https://www.microsoft.com/en-us/microsoft-teams/log-in", "categories": ["notify.agent"], "icon_filename": "msteams.svg"}, "keywords": ["Microsoft", "Teams", "MS teams"], "overview": "# Microsoft Teams\n\nYou can send Netdata alerts to Microsoft Teams using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- The incoming webhook URL as given by Microsoft Teams. You can use the same on all your Netdata servers (or you can have multiple if you like).\n- One or more channels to post the messages to\n- Access to the terminal where Netdata Agent is running\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_MSTEAMS | Set `SEND_MSTEAMS` to YES | YES | yes |\n| MSTEAMS_WEBHOOK_URL | set `MSTEAMS_WEBHOOK_URL` to the incoming webhook URL as given by Microsoft Teams. | | yes |\n| DEFAULT_RECIPIENT_MSTEAMS | Set `DEFAULT_RECIPIENT_MSTEAMS` to the encoded Microsoft Teams channel name you want the alert notifications to be sent to. | | yes |\n\n##### DEFAULT_RECIPIENT_MSTEAMS\n\nIn Microsoft Teams the channel name is encoded in the URI after `/IncomingWebhook/`. You can define multiple channels like this: `CHANNEL1` `CHANNEL2`.\n\nAll roles will default to this variable if left unconfigured.\n\nYou can have different channels per role, by editing `DEFAULT_RECIPIENT_MSTEAMS` with the channel you want, in the following entries at the bottom of the same file:\n```conf\nrole_recipients_msteams[sysadmin]=\"CHANNEL1\"\nrole_recipients_msteams[domainadmin]=\"CHANNEL2\"\nrole_recipients_msteams[dba]=\"databases CHANNEL3\"\nrole_recipients_msteams[webmaster]=\"CHANNEL4\"\nrole_recipients_msteams[proxyadmin]=\"CHANNEL5\"\nrole_recipients_msteams[sitemgr]=\"CHANNEL6\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Microsoft Teams (office.com) global notification options\n\nSEND_MSTEAMS=\"YES\"\nMSTEAMS_WEBHOOK_URL=\"https://outlook.office.com/webhook/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX@XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/IncomingWebhook/CHANNEL/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nDEFAULT_RECIPIENT_MSTEAMS=\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/msteams/metadata.yaml"}, {"id": "notify-telegram", "meta": {"name": "Telegram", "link": "https://telegram.org/", "categories": ["notify.agent"], "icon_filename": "telegram.svg"}, "keywords": ["Telegram"], "overview": "# Telegram\n\nSend notifications to Telegram using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- A bot token. To get one, contact the [@BotFather](https://t.me/BotFather) bot and send the command `/newbot` and follow the instructions. Invite your bot to a group where you want it to send messages.\n- The chat ID for every chat you want to send messages to. Invite [@myidbot](https://t.me/myidbot) bot to the group that will receive notifications, and write the command `/getgroupid@myidbot` to get the group chat ID. Group IDs start with a hyphen, supergroup IDs start with `-100`.\n- Terminal access to the Agent you wish to configure.\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TELEGRAM | Set `SEND_TELEGRAM` to YES | YES | yes |\n| TELEGRAM_BOT_TOKEN | set `TELEGRAM_BOT_TOKEN` to your bot token. | | yes |\n| DEFAULT_RECIPIENT_TELEGRAM | Set `DEFAULT_RECIPIENT_TELEGRAM` to the chat ID you want the alert notifications to be sent to. You can define multiple chat IDs like this: -49999333322 -1009999222255. | | yes |\n\n##### DEFAULT_RECIPIENT_TELEGRAM\n\nAll roles will default to this variable if left unconfigured.\n\nThe `DEFAULT_RECIPIENT_CUSTOM` can be edited in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_telegram[sysadmin]=\"-49999333324\"\nrole_recipients_telegram[domainadmin]=\"-49999333389\"\nrole_recipients_telegram[dba]=\"-10099992222\"\nrole_recipients_telegram[webmaster]=\"-10099992222 -49999333389\"\nrole_recipients_telegram[proxyadmin]=\"-49999333344\"\nrole_recipients_telegram[sitemgr]=\"-49999333876\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# telegram (telegram.org) global notification options\n\nSEND_TELEGRAM=\"YES\"\nTELEGRAM_BOT_TOKEN=\"111122223:7OpFlFFRzRBbrUUmIjj5HF9Ox2pYJZy5\"\nDEFAULT_RECIPIENT_TELEGRAM=\"-49999333876\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/telegram/metadata.yaml"}, {"id": "notify-twilio", "meta": {"name": "Twilio", "link": "https://www.twilio.com/", "categories": ["notify.agent"], "icon_filename": "twilio.png"}, "keywords": ["Twilio"], "overview": "# Twilio\n\nSend notifications to Twilio using Netdata's Agent alert notification feature, which supports dozens of endpoints, user roles, and more.\n\n", "setup": "## Setup\n\n### Prerequisites\n\n#### \n\n- Get your SID, and Token from https://www.twilio.com/console\n- Terminal access to the Agent you wish to configure\n\n\n\n### Configuration\n\n#### File\n\nThe configuration file name for this integration is `health_alarm_notify.conf`.\n\n\nYou can edit the configuration file using the `edit-config` script from the\nNetdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).\n\n```bash\ncd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata\nsudo ./edit-config health_alarm_notify.conf\n```\n#### Options\n\nThe following options can be defined for this notification\n\n| Name | Description | Default | Required |\n|:----|:-----------|:-------|:--------:|\n| SEND_TWILIO | Set `SEND_TWILIO` to YES | YES | yes |\n| TWILIO_ACCOUNT_SID | set `TWILIO_ACCOUNT_SID` to your account SID. | | yes |\n| TWILIO_ACCOUNT_TOKEN | Set `TWILIO_ACCOUNT_TOKEN` to your account token. | | yes |\n| TWILIO_NUMBER | Set `TWILIO_NUMBER` to your account's number. | | yes |\n| DEFAULT_RECIPIENT_TWILIO | Set DEFAULT_RECIPIENT_TWILIO to the number you want the alert notifications to be sent to. You can define multiple numbers like this: +15555555555 +17777777777. | | yes |\n\n##### DEFAULT_RECIPIENT_TWILIO\n\nYou can then have different recipients per role, by editing DEFAULT_RECIPIENT_TWILIO with the recipient's number you want, in the following entries at the bottom of the same file:\n\n```conf\nrole_recipients_twilio[sysadmin]=\"+15555555555\"\nrole_recipients_twilio[domainadmin]=\"+15555555556\"\nrole_recipients_twilio[dba]=\"+15555555557\"\nrole_recipients_twilio[webmaster]=\"+15555555558\"\nrole_recipients_twilio[proxyadmin]=\"+15555555559\"\nrole_recipients_twilio[sitemgr]=\"+15555555550\"\n```\n\n\n#### Examples\n\n##### Basic Configuration\n\n\n\n```yaml\n#------------------------------------------------------------------------------\n# Twilio (twilio.com) SMS options\n\nSEND_TWILIO=\"YES\"\nTWILIO_ACCOUNT_SID=\"xxxxxxxxx\"\nTWILIO_ACCOUNT_TOKEN=\"xxxxxxxxxx\"\nTWILIO_NUMBER=\"xxxxxxxxxxx\"\nDEFAULT_RECIPIENT_TWILIO=\"+15555555555\"\n\n```\n", "troubleshooting": "## Troubleshooting\n\n### Test Notification\n\nYou can run the following command by hand, to test alerts configuration:\n\n```bash\n# become user netdata\nsudo su -s /bin/bash netdata\n\n# enable debugging info on the console\nexport NETDATA_ALARM_NOTIFY_DEBUG=1\n\n# send test alarms to sysadmin\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test\n\n# send test alarms to any role\n/usr/libexec/netdata/plugins.d/alarm-notify.sh test \"ROLE\"\n```\n\nNote that this will test _all_ alert mechanisms for the selected role.\n\n", "integration_type": "notification", "edit_link": "https://github.com/netdata/netdata/blob/master/src/health/notifications/twilio/metadata.yaml"}]} \ No newline at end of file diff --git a/netdata-installer.sh b/netdata-installer.sh index d2fe1a2abde897..773548785491e9 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -914,7 +914,7 @@ bundle_libbpf() { bundle_libbpf copy_co_re() { - cp -R "${1}/includes" "src/collectors/ebpf.plugin/" + cp -R "${1}/includes" "src/libnetdata/ebpf/" } bundle_ebpf_co_re() { diff --git a/netdata.cppcheck b/netdata.cppcheck deleted file mode 100644 index 245c7a005fda31..00000000000000 --- a/netdata.cppcheck +++ /dev/null @@ -1,17 +0,0 @@ - - - cppcheck-build - -
- - - cppcheck-lib - gnu - posix - - - nullPointerRedundantCheck - unusedFunction - readdirCalled - - diff --git a/packaging/bundle-ebpf-co-re.sh b/packaging/bundle-ebpf-co-re.sh index 82609fec1596d1..572333cc984653 100755 --- a/packaging/bundle-ebpf-co-re.sh +++ b/packaging/bundle-ebpf-co-re.sh @@ -6,4 +6,4 @@ CORE_VERSION="$(cat "${SRCDIR}/packaging/ebpf-co-re.version")" CORE_TARBALL="netdata-ebpf-co-re-glibc-${CORE_VERSION}.tar.xz" curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/ebpf-co-re/releases/download/${CORE_VERSION}/${CORE_TARBALL}" > "${CORE_TARBALL}" || exit 1 grep "${CORE_TARBALL}" "${SRCDIR}/packaging/ebpf-co-re.checksums" | sha256sum -c - || exit 1 -tar -xa --no-same-owner -f "${CORE_TARBALL}" -C "${SRCDIR}/src/collectors/ebpf.plugin" || exit 1 +tar -xa --no-same-owner -f "${CORE_TARBALL}" -C "${SRCDIR}/src/libnetdata/ebpf" || exit 1 diff --git a/packaging/version b/packaging/version index 32fcdfe12b44cc..570857ac33b8c0 100644 --- a/packaging/version +++ b/packaging/version @@ -1 +1 @@ -v1.44.0-358-nightly +v1.44.0-370-nightly diff --git a/src/collectors/apps.plugin/README.md b/src/collectors/apps.plugin/README.md index 7416e3b9fc43f4..27d5d9a450e26a 100644 --- a/src/collectors/apps.plugin/README.md +++ b/src/collectors/apps.plugin/README.md @@ -237,7 +237,7 @@ Examples below for process group `sql`: - Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_pipes&dimensions=sql&value_color=green=0%7Cred) - Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_sockets&dimensions=sql&value_color=green%3E=3%7Cred) -For more information about badges check [Generating Badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md) +For more information about badges check [Generating Badges](https://github.com/netdata/netdata/blob/master/src/web/api/badges/README.md) ## Comparison with console tools diff --git a/src/collectors/cgroups.plugin/cgroup-network.c b/src/collectors/cgroups.plugin/cgroup-network.c index 07e7acd02e7bbd..085a6aa6f19604 100644 --- a/src/collectors/cgroups.plugin/cgroup-network.c +++ b/src/collectors/cgroups.plugin/cgroup-network.c @@ -7,7 +7,6 @@ #ifndef _GNU_SOURCE #define _GNU_SOURCE /* See feature_test_macros(7) */ #endif -#include #endif char env_netdata_host_prefix[FILENAME_MAX + 50] = ""; diff --git a/src/collectors/ebpf.plugin/ebpf.h b/src/collectors/ebpf.plugin/ebpf.h index ad7c5a94cddfdc..c0f9b39fb3fc5a 100644 --- a/src/collectors/ebpf.plugin/ebpf.h +++ b/src/collectors/ebpf.plugin/ebpf.h @@ -38,17 +38,19 @@ #define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf" #ifdef LIBBPF_MAJOR_VERSION // BTF code -#include "includes/cachestat.skel.h" -#include "includes/dc.skel.h" -#include "includes/disk.skel.h" -#include "includes/fd.skel.h" -#include "includes/hardirq.skel.h" -#include "includes/mdflush.skel.h" -#include "includes/mount.skel.h" -#include "includes/shm.skel.h" -#include "includes/socket.skel.h" -#include "includes/swap.skel.h" -#include "includes/vfs.skel.h" +#include "libnetdata/ebpf/includes/cachestat.skel.h" +#include "libnetdata/ebpf/includes/dc.skel.h" +#include "libnetdata/ebpf/includes/disk.skel.h" +#include "libnetdata/ebpf/includes/fd.skel.h" +#include "libnetdata/ebpf/includes/filesystem.skel.h" +#include "libnetdata/ebpf/includes/hardirq.skel.h" +#include "libnetdata/ebpf/includes/mdflush.skel.h" +#include "libnetdata/ebpf/includes/mount.skel.h" +#include "libnetdata/ebpf/includes/shm.skel.h" +#include "libnetdata/ebpf/includes/sync.skel.h" +#include "libnetdata/ebpf/includes/socket.skel.h" +#include "libnetdata/ebpf/includes/swap.skel.h" +#include "libnetdata/ebpf/includes/vfs.skel.h" extern struct cachestat_bpf *cachestat_bpf_obj; extern struct dc_bpf *dc_bpf_obj; diff --git a/src/collectors/ebpf.plugin/ebpf_filesystem.h b/src/collectors/ebpf.plugin/ebpf_filesystem.h index f58d7fbe43849e..cd54be57e921c8 100644 --- a/src/collectors/ebpf.plugin/ebpf_filesystem.h +++ b/src/collectors/ebpf.plugin/ebpf_filesystem.h @@ -8,9 +8,6 @@ #define NETDATA_EBPF_FS_MODULE_DESC "Monitor filesystem latency for: btrfs, ext4, nfs, xfs and zfs." #include "ebpf.h" -#ifdef LIBBPF_MAJOR_VERSION -#include "includes/filesystem.skel.h" -#endif #define NETDATA_FS_MAX_DIST_NAME 64UL diff --git a/src/collectors/ebpf.plugin/ebpf_sync.h b/src/collectors/ebpf.plugin/ebpf_sync.h index bd1bb78b0d3f70..3736955652b436 100644 --- a/src/collectors/ebpf.plugin/ebpf_sync.h +++ b/src/collectors/ebpf.plugin/ebpf_sync.h @@ -3,10 +3,6 @@ #ifndef NETDATA_EBPF_SYNC_H #define NETDATA_EBPF_SYNC_H 1 -#ifdef LIBBPF_MAJOR_VERSION -#include "includes/sync.skel.h" -#endif - // Module name & description #define NETDATA_EBPF_MODULE_NAME_SYNC "sync" #define NETDATA_EBPF_SYNC_MODULE_DESC "Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2)." diff --git a/src/collectors/python.d.plugin/anomalies/README.md b/src/collectors/python.d.plugin/anomalies/README.md index 766854aecb0346..8a86274585562d 100644 --- a/src/collectors/python.d.plugin/anomalies/README.md +++ b/src/collectors/python.d.plugin/anomalies/README.md @@ -219,7 +219,7 @@ If you would like to go deeper on what exactly the anomalies collector is doing ## Notes -- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the required data for each chart. +- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://github.com/netdata/netdata/blob/master/src/web/api/README.md) to get the required data for each chart. - Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/). - It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node. - As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it. diff --git a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md index df20429be847fd..b36dc7297f52b5 100644 --- a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md +++ b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md @@ -20,7 +20,7 @@ Module: zscores By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. -This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev` +This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/src/web/api/README.md) to get the `mean` and `stddev` for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over diff --git a/src/collectors/python.d.plugin/zscores/metadata.yaml b/src/collectors/python.d.plugin/zscores/metadata.yaml index 388e9b460c40c4..2f29603923b792 100644 --- a/src/collectors/python.d.plugin/zscores/metadata.yaml +++ b/src/collectors/python.d.plugin/zscores/metadata.yaml @@ -27,7 +27,7 @@ modules: metrics_description: | By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis. method_description: | - This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev` + This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/src/web/api/README.md) to get the `mean` and `stddev` for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over diff --git a/src/collectors/statsd.plugin/README.md b/src/collectors/statsd.plugin/README.md index 6e6e75e2243e35..bc8dbb6b1bddfd 100644 --- a/src/collectors/statsd.plugin/README.md +++ b/src/collectors/statsd.plugin/README.md @@ -305,7 +305,7 @@ Synthetic charts are organized in - **charts for each application** aka family in Netdata Dashboard. - **StatsD metrics for each chart** /aka charts and context Netdata Dashboard. -> You can read more about how the Netdata Agent organizes information in the relevant [documentation](https://github.com/netdata/netdata/blob/master/web/README.md) +> You can read more about how the Netdata Agent organizes information in the relevant [documentation](https://github.com/netdata/netdata/blob/master/src/web/README.md) For each application you need to create a `.conf` file in `/etc/netdata/statsd.d`. @@ -832,7 +832,7 @@ Context is a second way to group metrics, when the metrics are of the same natur our case, if we ran several different load testing experiments side-by-side, we could define the same app, but different context (e.g `http_requests.experiment1`, `http_requests.experiment2`). -Find more details about family and context in our [documentation](https://github.com/netdata/netdata/blob/master/web/README.md#families). +Find more details about family and context in our [documentation](https://github.com/netdata/netdata/blob/master/src/web/README.md#families). #### Dimensions @@ -981,7 +981,7 @@ At this point, you have used StatsD to gather metrics for k6, creating a whole n Netdata dashboard in the process. Moreover, you can further customize the icon of the particular section, as well as the description for each chart. -To edit the section, please follow the Netdata [documentation](https://github.com/netdata/netdata/blob/master/web/gui/README.md#customizing-the-local-dashboard). +To edit the section, please follow the Netdata [documentation](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md#customizing-the-local-dashboard). While the following configuration will be placed in a new file, as the documentation suggests, it is instructing to use `dashboard_info.js` as a template. Open the file and see how the rest of sections and collectors have been defined. diff --git a/src/daemon/config/README.md b/src/daemon/config/README.md index 61738236822033..ffe813c50827ff 100644 --- a/src/daemon/config/README.md +++ b/src/daemon/config/README.md @@ -25,7 +25,7 @@ adapt the general behavior of Netdata, in great detail. You can find all these s accessing the URL `https://netdata.server.hostname:19999/netdata.conf`. For example check the configuration file of [netdata.firehol.org](http://netdata.firehol.org/netdata.conf). HTTP access to this file is limited by default to [private IPs](https://en.wikipedia.org/wiki/Private_network), via -the [web server access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists). +the [web server access lists](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#access-lists). `netdata.conf` has sections stated with `[section]`. You will see the following sections: @@ -38,7 +38,7 @@ the [web server access lists](https://github.com/netdata/netdata/blob/master/web 6. `[sqlite]` to [configure](#sqlite-section-options) the [Netdata daemon](https://github.com/netdata/netdata/blob/master/src/daemon/README.md) SQLite settings. 7. `[ml]` to configure settings for [machine learning](https://github.com/netdata/netdata/blob/master/src/ml/README.md). 8. `[health]` to [configure](#health-section-options) general settings for [health monitoring](https://github.com/netdata/netdata/blob/master/src/health/README.md). -9. `[web]` to [configure the web server](https://github.com/netdata/netdata/blob/master/web/server/README.md). +9. `[web]` to [configure the web server](https://github.com/netdata/netdata/blob/master/src/web/server/README.md). 10. `[registry]` for the [Netdata registry](https://github.com/netdata/netdata/blob/master/src/registry/README.md). 11. `[global statistics]` for the [Netdata registry](https://github.com/netdata/netdata/blob/master/src/registry/README.md). 12. `[statsd]` for the general settings of the [stats.d.plugin](https://github.com/netdata/netdata/blob/master/src/collectors/statsd.plugin/README.md). @@ -181,7 +181,7 @@ monitoring](https://github.com/netdata/netdata/blob/master/src/health/README.md) ### [web] section options -Refer to the [web server documentation](https://github.com/netdata/netdata/blob/master/web/server/README.md) +Refer to the [web server documentation](https://github.com/netdata/netdata/blob/master/src/web/server/README.md) ### [plugins] section options diff --git a/src/daemon/daemon.c b/src/daemon/daemon.c index 7dcca18cb51b55..d9a4b81de52306 100644 --- a/src/daemon/daemon.c +++ b/src/daemon/daemon.c @@ -60,7 +60,7 @@ static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t closedir(dir); } -void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool recursive) +static void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool recursive) { if (chown(dir, uid, gid) == -1) netdata_log_error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid); @@ -68,7 +68,7 @@ void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool recursive) fix_directory_file_permissions(dir, uid, gid, recursive); } -void clean_directory(char *dirname) +static void clean_directory(char *dirname) { DIR *dir = opendir(dirname); if(!dir) return; @@ -84,7 +84,7 @@ void clean_directory(char *dirname) closedir(dir); } -void prepare_required_directories(uid_t uid, gid_t gid) { +static void prepare_required_directories(uid_t uid, gid_t gid) { change_dir_ownership(netdata_configured_cache_dir, uid, gid, true); change_dir_ownership(netdata_configured_varlib_dir, uid, gid, false); change_dir_ownership(netdata_configured_lock_dir, uid, gid, false); @@ -98,7 +98,7 @@ void prepare_required_directories(uid_t uid, gid_t gid) { clean_directory(netdata_configured_lock_dir); } -int become_user(const char *username, int pid_fd) { +static int become_user(const char *username, int pid_fd) { int am_i_root = (getuid() == 0)?1:0; struct passwd *pw = getpwnam(username); diff --git a/src/daemon/daemon.h b/src/daemon/daemon.h index bc584d2affedb2..1f8837fd6bc9c3 100644 --- a/src/daemon/daemon.h +++ b/src/daemon/daemon.h @@ -3,8 +3,6 @@ #ifndef NETDATA_DAEMON_H #define NETDATA_DAEMON_H 1 -int become_user(const char *username, int pid_fd); - int become_daemon(int dont_fork, const char *user); void netdata_cleanup_and_exit(int ret, const char *action, const char *action_result, const char *action_data); diff --git a/src/daemon/main.c b/src/daemon/main.c index 36842607107a00..50399db67a86ea 100644 --- a/src/daemon/main.c +++ b/src/daemon/main.c @@ -303,7 +303,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) { do { \ usec_t now_ut = now_monotonic_usec(); \ if(prev_msg) \ - netdata_log_info("NETDATA SHUTDOWN: in %7llu ms, %s%s - next: %s", (now_ut - last_ut) / USEC_PER_MS, (timeout)?"(TIMEOUT) ":"", prev_msg, msg); \ + netdata_log_info("NETDATA SHUTDOWN: in %llu ms, %s%s - next: %s", (now_ut - last_ut) / USEC_PER_MS, (timeout)?"(TIMEOUT) ":"", prev_msg, msg); \ else \ netdata_log_info("NETDATA SHUTDOWN: next: %s", msg); \ last_ut = now_ut; \ diff --git a/src/daemon/service.c b/src/daemon/service.c index 895c8587070b2f..ff966c57d24ad3 100644 --- a/src/daemon/service.c +++ b/src/daemon/service.c @@ -36,20 +36,7 @@ static void svc_rrddim_obsolete_to_archive(RRDDIM *rd) { if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) { /* only a collector can mark a chart as obsolete, so we must remove the reference */ - - size_t tiers_available = 0, tiers_said_no_retention = 0; - for(size_t tier = 0; tier < storage_tiers ;tier++) { - if(rd->tiers[tier].sch) { - tiers_available++; - - if(storage_engine_store_finalize(rd->tiers[tier].sch)) - tiers_said_no_retention++; - - rd->tiers[tier].sch = NULL; - } - } - - if (tiers_available == tiers_said_no_retention && tiers_said_no_retention) { + if (!rrddim_finalize_collection_and_check_retention(rd)) { /* This metric has no data and no references */ metaqueue_delete_dimension_uuid(&rd->metric_uuid); } @@ -204,6 +191,10 @@ static void svc_rrd_cleanup_obsolete_charts_from_all_hosts() { RRDHOST *host; rrdhost_foreach_read(host) { + + if (!service_running(SERVICE_MAINTENANCE)) + break; + if(rrdhost_receiver_replicating_charts(host) || rrdhost_sender_replicating_charts(host)) continue; @@ -321,7 +312,9 @@ void *service_main(void *ptr) real_step = USEC_PER_SEC; svc_rrd_cleanup_obsolete_charts_from_all_hosts(); - svc_rrdhost_cleanup_orphan_hosts(localhost); + + if (service_running(SERVICE_MAINTENANCE)) + svc_rrdhost_cleanup_orphan_hosts(localhost); } netdata_thread_cleanup_pop(1); diff --git a/src/database/engine/datafile.c b/src/database/engine/datafile.c index 7322039cd3688a..1ec2dea799e8e1 100644 --- a/src/database/engine/datafile.c +++ b/src/database/engine/datafile.c @@ -557,7 +557,9 @@ void finalize_data_files(struct rrdengine_instance *ctx) { bool logged = false; - logged = false; + if (!ctx->datafiles.first) + return; + while(__atomic_load_n(&ctx->atomic.extents_currently_being_flushed, __ATOMIC_RELAXED)) { if(!logged) { netdata_log_info("Waiting for inflight flush to finish on tier %d...", ctx->config.tier); diff --git a/src/database/rrd.h b/src/database/rrd.h index 8ab8430ba03244..dd765ad09b9ee2 100644 --- a/src/database/rrd.h +++ b/src/database/rrd.h @@ -260,6 +260,7 @@ typedef struct storage_collect_handle { struct rrddim_tier { STORAGE_POINT virtual_point; STORAGE_ENGINE_BACKEND seb; + SPINLOCK spinlock; uint32_t tier_grouping; time_t next_point_end_time_s; STORAGE_METRIC_HANDLE *smh; // the metric handle inside the database diff --git a/src/database/rrddim.c b/src/database/rrddim.c index 4f009ec0531c4e..74f48fb1820b8f 100644 --- a/src/database/rrddim.c +++ b/src/database/rrddim.c @@ -95,6 +95,7 @@ static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, v rd->tiers[tier].seb = eng->seb; rd->tiers[tier].tier_grouping = host->db[tier].tier_grouping; rd->tiers[tier].smh = eng->api.metric_get_or_create(rd, host->db[tier].si); + rd->tiers[tier].spinlock.locked = false; storage_point_unset(rd->tiers[tier].virtual_point); initialized++; @@ -169,15 +170,16 @@ bool rrddim_finalize_collection_and_check_retention(RRDDIM *rd) { size_t tiers_available = 0, tiers_said_no_retention = 0; for(size_t tier = 0; tier < storage_tiers ;tier++) { - if(!rd->tiers[tier].sch) - continue; + spinlock_lock(&rd->tiers[tier].spinlock); + if(rd->tiers[tier].sch) { + tiers_available++; - tiers_available++; + if (storage_engine_store_finalize(rd->tiers[tier].sch)) + tiers_said_no_retention++; - if(storage_engine_store_finalize(rd->tiers[tier].sch)) - tiers_said_no_retention++; - - rd->tiers[tier].sch = NULL; + rd->tiers[tier].sch = NULL; + } + spinlock_unlock(&rd->tiers[tier].spinlock); } // return true if the dimension has retention in the db diff --git a/src/database/rrdhost.c b/src/database/rrdhost.c index 2699ca8f34dd61..b08df44ef59774 100644 --- a/src/database/rrdhost.c +++ b/src/database/rrdhost.c @@ -879,23 +879,12 @@ void dbengine_init(char *hostname) { struct dbengine_initialization tiers_init[RRD_STORAGE_TIERS] = {}; + bool tiers_adjusted = false; size_t created_tiers = 0; char dbenginepath[FILENAME_MAX + 1]; char dbengineconfig[200 + 1]; int divisor = 1; for(size_t tier = 0; tier < storage_tiers ;tier++) { - if(tier == 0) - snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine", netdata_configured_cache_dir); - else - snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine-tier%zu", netdata_configured_cache_dir, tier); - - int ret = mkdir(dbenginepath, 0775); - if (ret != 0 && errno != EEXIST) { - nd_log(NDLS_DAEMON, NDLP_CRIT, - "DBENGINE on '%s': cannot create directory '%s'", - hostname, dbenginepath); - break; - } if(tier > 0) divisor *= 2; @@ -924,10 +913,7 @@ void dbengine_init(char *hostname) { else if(strcmp(bf, "full") == 0) backfill = RRD_BACKFILL_FULL; else if(strcmp(bf, "none") == 0) backfill = RRD_BACKFILL_NONE; else { - nd_log(NDLS_DAEMON, NDLP_WARNING, - "DBENGINE: unknown backfill value '%s', assuming 'new'", - bf); - + nd_log(NDLS_DAEMON, NDLP_WARNING, "DBENGINE: unknown backfill value '%s', assuming 'new'", bf); config_set(CONFIG_SECTION_DB, dbengineconfig, "new"); backfill = RRD_BACKFILL_NEW; } @@ -940,8 +926,21 @@ void dbengine_init(char *hostname) { storage_tiers_grouping_iterations[tier] = 1; nd_log(NDLS_DAEMON, NDLP_WARNING, "DBENGINE on '%s': dbengine tier %zu gives aggregation of more than 65535 points of tier 0. " - "Disabling tiers above %zu", + "Disabling tiers %zu and above", hostname, tier, tier); + storage_tiers = tier; + tiers_adjusted = true; + break; + } + + if(tier == 0) + snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine", netdata_configured_cache_dir); + else + snprintfz(dbenginepath, FILENAME_MAX, "%s/dbengine-tier%zu", netdata_configured_cache_dir, tier); + + int ret = mkdir(dbenginepath, 0775); + if (ret != 0 && errno != EEXIST) { + nd_log(NDLS_DAEMON, NDLP_CRIT, "DBENGINE on '%s': cannot create directory '%s'", hostname, dbenginepath); break; } @@ -961,6 +960,8 @@ void dbengine_init(char *hostname) { else dbengine_tier_init(&tiers_init[tier]); } + if (tiers_adjusted) + config_set_number(CONFIG_SECTION_DB, "storage tiers", storage_tiers); for(size_t tier = 0; tier < storage_tiers ;tier++) { void *ptr; diff --git a/src/database/rrdset.c b/src/database/rrdset.c index b43e34bba88289..096558d28fd5d2 100644 --- a/src/database/rrdset.c +++ b/src/database/rrdset.c @@ -2,7 +2,6 @@ #define NETDATA_RRD_INTERNALS #include "rrd.h" -#include #include "storage_engine.h" diff --git a/src/go/collectors/go.d.plugin/.devcontainer/devcontainer.json b/src/go/collectors/go.d.plugin/.devcontainer/devcontainer.json new file mode 100644 index 00000000000000..c6b84287038e7e --- /dev/null +++ b/src/go/collectors/go.d.plugin/.devcontainer/devcontainer.json @@ -0,0 +1,16 @@ +{ + "image": "netdata/devenv", + "forwardPorts": [ + 19999 + ], + "customizations": { + "vscode": { + "extensions": [ + "golang.go", + "exiasr.hadolint", + "timonwong.shellcheck", + "redhat.vscode-yaml" + ] + } + } +} diff --git a/src/go/collectors/go.d.plugin/.dockerignore b/src/go/collectors/go.d.plugin/.dockerignore new file mode 100644 index 00000000000000..d79e4ae873711f --- /dev/null +++ b/src/go/collectors/go.d.plugin/.dockerignore @@ -0,0 +1,4 @@ +Makefile +/hack +docker-compose.yml +/mocks/tmp \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/Dockerfile.dev b/src/go/collectors/go.d.plugin/Dockerfile.dev new file mode 100644 index 00000000000000..544e9a96d996c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/Dockerfile.dev @@ -0,0 +1,22 @@ +FROM golang:1.21 AS build-env + +RUN mkdir -p /workspace +WORKDIR /workspace + +ENV GOOS=linux +ENV GOARCH=amd64 +ENV CGO_ENABLED=0 + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY . . + +RUN go build -o go.d.plugin github.com/netdata/go.d.plugin/cmd/godplugin + +FROM netdata/netdata + +COPY ./mocks/netdata/netdata.conf /etc/netdata/ +COPY ./mocks/conf.d /usr/lib/netdata/conf.d +COPY --from=build-env /workspace/go.d.plugin /usr/libexec/netdata/plugins.d/go.d.plugin diff --git a/src/go/collectors/go.d.plugin/LICENSE b/src/go/collectors/go.d.plugin/LICENSE new file mode 100644 index 00000000000000..94a9ed024d3859 --- /dev/null +++ b/src/go/collectors/go.d.plugin/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/go/collectors/go.d.plugin/Makefile b/src/go/collectors/go.d.plugin/Makefile new file mode 100644 index 00000000000000..eadf3e122a2c84 --- /dev/null +++ b/src/go/collectors/go.d.plugin/Makefile @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +DEV_MODULES := all + +all: download vet test build + +.PHONY: help +help: + @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: download +download: ## Download go modules + go mod download + +.PHONY: build +build: clean ## Build package + hack/go-build.sh + +.PHONY: clean +clean: + rm -rf bin vendor + +.PHONY: check +check: fmt vet ## Run static code analysis + + +.PHONY: test +test: ## Run tests + go test ./... -race -cover -covermode=atomic + +.PHONY: fmt +fmt: + hack/go-fmt.sh . + +.PHONY: vet +vet: + go vet ./... + +.PHONY: release +release: clean download ## Create all release artifacts + hack/go-build.sh all + hack/go-build.sh configs + hack/go-build.sh vendor + cd bin && sha256sum -b * >"sha256sums.txt" + +.PHONY: dev +dev: dev-build dev-up ## Launch development build + +dev-build: + docker-compose build + +dev-up: + docker-compose up -d --remove-orphans + +.PHONY: dev-exec +dev-exec: ## Get into development environment + docker-compose exec netdata bash + +dev-log: + docker-compose logs -f netdata + +dev-run: ## Run go.d.plugin inside development environment + go run github.com/netdata/go.d.plugin/cmd/godplugin -d -c conf.d + +dev-mock: ## Run go.d.plugin inside development environment with mock config + go run github.com/netdata/go.d.plugin/cmd/godplugin -d -c ./mocks/conf.d -m $(DEV_MODULES) diff --git a/src/go/collectors/go.d.plugin/README.md b/src/go/collectors/go.d.plugin/README.md new file mode 100644 index 00000000000000..7f34241b151880 --- /dev/null +++ b/src/go/collectors/go.d.plugin/README.md @@ -0,0 +1,217 @@ + + +# go.d.plugin + +`go.d.plugin` is a [Netdata](https://github.com/netdata/netdata) external plugin. It is an **orchestrator** for data +collection modules written in `go`. + +1. It runs as an independent process (`ps fax` shows it). +2. It is started and stopped automatically by Netdata. +3. It communicates with Netdata via a unidirectional pipe (sending data to the Netdata daemon). +4. Supports any number of data collection [modules](https://github.com/netdata/go.d.plugin/tree/master/modules). +5. Allows each [module](https://github.com/netdata/go.d.plugin/tree/master/modules) to have any number of data + collection jobs. + +## Bug reports, feature requests, and questions + +Are welcome! We are using [netdata/netdata](https://github.com/netdata/netdata/) repository for bugs, feature requests, +and questions. + +- [GitHub Issues](https://github.com/netdata/netdata/issues/new/choose): report bugs or open a new feature request. +- [GitHub Discussions](https://github.com/netdata/netdata/discussions): ask a question or suggest a new idea. + +## Install + +Go.d.plugin is shipped with Netdata. + +### Required Linux capabilities + +All capabilities are set automatically during Netdata installation using +the [official installation method](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md#install-on-linux-with-one-line-installer). +No further action required. If you have used a different installation method and need to set the capabilities manually, +see the appropriate collector readme. + +| Capability | Required by | +|:--------------------|:----------------------------------------------------------------------------------------:| +| CAP_NET_RAW | [Ping](https://github.com/netdata/go.d.plugin/tree/master/modules/ping#readme) | +| CAP_NET_ADMIN | [Wireguard](https://github.com/netdata/go.d.plugin/tree/master/modules/wireguard#readme) | +| CAP_DAC_READ_SEARCH | [Filecheck](https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck#readme) | + +## Available modules + +| Name | Monitors | +|:----------------------------------------------------------------------------------------------------|:-----------------------------:| +| [activemq](https://github.com/netdata/go.d.plugin/tree/master/modules/activemq) | ActiveMQ | +| [apache](https://github.com/netdata/go.d.plugin/tree/master/modules/apache) | Apache | +| [bind](https://github.com/netdata/go.d.plugin/tree/master/modules/bind) | ISC Bind | +| [cassandra](https://github.com/netdata/go.d.plugin/tree/master/modules/cassandra) | Cassandra | +| [chrony](https://github.com/netdata/go.d.plugin/tree/master/modules/chrony) | Chrony | +| [cockroachdb](https://github.com/netdata/go.d.plugin/tree/master/modules/cockroachdb) | CockroachDB | +| [consul](https://github.com/netdata/go.d.plugin/tree/master/modules/consul) | Consul | +| [coredns](https://github.com/netdata/go.d.plugin/tree/master/modules/coredns) | CoreDNS | +| [couchbase](https://github.com/netdata/go.d.plugin/tree/master/modules/couchbase) | Couchbase | +| [couchdb](https://github.com/netdata/go.d.plugin/tree/master/modules/couchdb) | CouchDB | +| [dnsdist](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsdist) | Dnsdist | +| [dnsmasq](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq) | Dnsmasq DNS Forwarder | +| [dnsmasq_dhcp](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq_dhcp) | Dnsmasq DHCP | +| [dns_query](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsquery) | DNS Query RTT | +| [docker](https://github.com/netdata/go.d.plugin/tree/master/modules/docker) | Docker Engine | +| [docker_engine](https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine) | Docker Engine | +| [dockerhub](https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub) | Docker Hub | +| [elasticsearch](https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch) | Elasticsearch/OpenSearch | +| [energid](https://github.com/netdata/go.d.plugin/tree/master/modules/energid) | Energi Core | +| [envoy](https://github.com/netdata/go.d.plugin/tree/master/modules/envoy) | Envoy | +| [example](https://github.com/netdata/go.d.plugin/tree/master/modules/example) | - | +| [filecheck](https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck) | Files and Directories | +| [fluentd](https://github.com/netdata/go.d.plugin/tree/master/modules/fluentd) | Fluentd | +| [freeradius](https://github.com/netdata/go.d.plugin/tree/master/modules/freeradius) | FreeRADIUS | +| [haproxy](https://github.com/netdata/go.d.plugin/tree/master/modules/haproxy) | HAProxy | +| [hdfs](https://github.com/netdata/go.d.plugin/tree/master/modules/hdfs) | HDFS | +| [httpcheck](https://github.com/netdata/go.d.plugin/tree/master/modules/httpcheck) | Any HTTP Endpoint | +| [isc_dhcpd](https://github.com/netdata/go.d.plugin/tree/master/modules/isc_dhcpd) | ISC DHCP | +| [k8s_kubelet](https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubelet) | Kubelet | +| [k8s_kubeproxy](https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubeproxy) | Kube-proxy | +| [k8s_state](https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_state) | Kubernetes cluster state | +| [lighttpd](https://github.com/netdata/go.d.plugin/tree/master/modules/lighttpd) | Lighttpd | +| [logind](https://github.com/netdata/go.d.plugin/tree/master/modules/logind) | systemd-logind | +| [logstash](https://github.com/netdata/go.d.plugin/tree/master/modules/logstash) | Logstash | +| [mongoDB](https://github.com/netdata/go.d.plugin/tree/master/modules/mongodb) | MongoDB | +| [mysql](https://github.com/netdata/go.d.plugin/tree/master/modules/mysql) | MySQL | +| [nginx](https://github.com/netdata/go.d.plugin/tree/master/modules/nginx) | NGINX | +| [nginxplus](https://github.com/netdata/go.d.plugin/tree/master/modules/nginxplus) | NGINX Plus | +| [nginxvts](https://github.com/netdata/go.d.plugin/tree/master/modules/nginxvts) | NGINX VTS | +| [ntpd](https://github.com/netdata/go.d.plugin/tree/master/modules/ntpd) | NTP daemon | +| [nvme](https://github.com/netdata/go.d.plugin/tree/master/modules/nvme) | NVMe devices | +| [openvpn](https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn) | OpenVPN | +| [openvpn_status_log](https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn_status_log) | OpenVPN | +| [pgbouncer](https://github.com/netdata/go.d.plugin/tree/master/modules/pgbouncer) | PgBouncer | +| [phpdaemon](https://github.com/netdata/go.d.plugin/tree/master/modules/phpdaemon) | phpDaemon | +| [phpfpm](https://github.com/netdata/go.d.plugin/tree/master/modules/phpfpm) | PHP-FPM | +| [pihole](https://github.com/netdata/go.d.plugin/tree/master/modules/pihole) | Pi-hole | +| [pika](https://github.com/netdata/go.d.plugin/tree/master/modules/pika) | Pika | +| [ping](https://github.com/netdata/go.d.plugin/tree/master/modules/ping) | Any network host | +| [prometheus](https://github.com/netdata/go.d.plugin/tree/master/modules/prometheus) | Any Prometheus Endpoint | +| [portcheck](https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck) | Any TCP Endpoint | +| [postgres](https://github.com/netdata/go.d.plugin/tree/master/modules/postgres) | PostgreSQL | +| [powerdns](https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns) | PowerDNS Authoritative Server | +| [powerdns_recursor](https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns_recursor) | PowerDNS Recursor | +| [proxysql](https://github.com/netdata/go.d.plugin/tree/master/modules/proxysql) | ProxySQL | +| [pulsar](https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck) | Apache Pulsar | +| [rabbitmq](https://github.com/netdata/go.d.plugin/tree/master/modules/rabbitmq) | RabbitMQ | +| [redis](https://github.com/netdata/go.d.plugin/tree/master/modules/redis) | Redis | +| [scaleio](https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio) | Dell EMC ScaleIO | +| [SNMP](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp) | SNMP | +| [solr](https://github.com/netdata/go.d.plugin/tree/master/modules/solr) | Solr | +| [squidlog](https://github.com/netdata/go.d.plugin/tree/master/modules/squidlog) | Squid | +| [springboot2](https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2) | Spring Boot2 | +| [supervisord](https://github.com/netdata/go.d.plugin/tree/master/modules/supervisord) | Supervisor | +| [systemdunits](https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits) | Systemd unit state | +| [tengine](https://github.com/netdata/go.d.plugin/tree/master/modules/tengine) | Tengine | +| [traefik](https://github.com/netdata/go.d.plugin/tree/master/modules/traefik) | Traefik | +| [upsd](https://github.com/netdata/go.d.plugin/tree/master/modules/upsd) | UPSd (Nut) | +| [unbound](https://github.com/netdata/go.d.plugin/tree/master/modules/unbound) | Unbound | +| [vcsa](https://github.com/netdata/go.d.plugin/tree/master/modules/vcsa) | vCenter Server Appliance | +| [vernemq](https://github.com/netdata/go.d.plugin/tree/master/modules/vernemq) | VerneMQ | +| [vsphere](https://github.com/netdata/go.d.plugin/tree/master/modules/vsphere) | VMware vCenter Server | +| [web_log](https://github.com/netdata/go.d.plugin/tree/master/modules/weblog) | Apache/NGINX | +| [wireguard](https://github.com/netdata/go.d.plugin/tree/master/modules/wireguard) | WireGuard | +| [whoisquery](https://github.com/netdata/go.d.plugin/tree/master/modules/whoisquery) | Domain Expiry | +| [windows](https://github.com/netdata/go.d.plugin/tree/master/modules/windows) | Windows | +| [x509check](https://github.com/netdata/go.d.plugin/tree/master/modules/x509check) | Digital Certificates | +| [zookeeper](https://github.com/netdata/go.d.plugin/tree/master/modules/zookeeper) | ZooKeeper | + +## Configuration + +Edit the `go.d.conf` configuration file using `edit-config` from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically +at `/etc/netdata`. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory +sudo ./edit-config go.d.conf +``` + +Configurations are written in [YAML](http://yaml.org/). + +- [plugin configuration](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf) +- [specific module configuration](https://github.com/netdata/go.d.plugin/tree/master/config/go.d) + +### Enable a collector + +To enable a collector you should edit `go.d.conf` to uncomment the collector in question and change it from `no` +to `yes`. + +For example, to enable the `example` plugin you would need to update `go.d.conf` from something like: + +```yaml +modules: +# example: no +``` + +to + +```yaml +modules: + example: yes +``` + +Then [restart netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for the +change to take effect. + +## Contributing + +If you want to contribute to this project, we are humbled. Please take a look at +our [contributing guidelines](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md) and don't hesitate to +contact us in our forums. + +### How to develop a collector + +Read [how to write a Netdata collector in Go](https://github.com/netdata/go.d.plugin/blob/master/docs/how-to-write-a-module.md). + +## Troubleshooting + +Plugin CLI: + +```sh +Usage: + orchestrator [OPTIONS] [update every] + +Application Options: + -m, --modules= module name to run (default: all) + -c, --config-dir= config dir to read + -w, --watch-path= config path to watch + -d, --debug debug mode + -v, --version display the version and exit + +Help Options: + -h, --help Show this help message +``` + +To debug specific module: + +```sh +# become user netdata +sudo su -s /bin/bash netdata + +# run plugin in debug mode +./go.d.plugin -d -m +``` + +Change `` to the module name you want to debug. See the [whole list](#available-modules) of available +modules. + +## Netdata Community + +This repository follows the Netdata Code of Conduct and is part of the Netdata Community. + +- [Community Forums](https://community.netdata.cloud) +- [Netdata Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md) diff --git a/src/go/collectors/go.d.plugin/agent/README.md b/src/go/collectors/go.d.plugin/agent/README.md new file mode 100644 index 00000000000000..1e6f24aafe7bc0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/README.md @@ -0,0 +1,157 @@ +# agent + +This library is a tool for writing [netdata](https://github.com/netdata/netdata) plugins. + +We strongly believe that custom plugins are very important, and they must be easy to write. + + +Definitions: + - orchestrator + > plugin orchestrators are external plugins that do not collect any data by themselves. Instead, they support data collection modules written in the language of the orchestrator. Usually the orchestrator provides a higher level abstraction, making it ideal for writing new data collection modules with the minimum of code. + + - plugin + > plugin is a set of data collection modules. + + - module + > module is a data collector. It collects, processes and returns processed data to the orchestrator. + + - job + > job is a module instance with specific settings. + + +Package provides: + - CLI parser + - plugin orchestrator (loads configurations, creates and serves jobs) + +You are responsible only for __creating modules__. + +## Custom plugin example + +[Yep! So easy!](https://github.com/netdata/go.d.plugin/blob/master/examples/simple/main.go) + +## How to write a Module + +Module is responsible for **charts creating** and **data collecting**. Implement Module interface and that is it. + +```go +type Module interface { + // Init does initialization. + // If it returns false, the job will be disabled. + Init() bool + + // Check is called after Init. + // If it returns false, the job will be disabled. + Check() bool + + // Charts returns the chart definition. + // Make sure not to share returned instance. + Charts() *Charts + + // Collect collects metrics. + Collect() map[string]int64 + + // SetLogger sets logger. + SetLogger(l *logger.Logger) + + // Cleanup performs cleanup if needed. + Cleanup() +} + +// Base is a helper struct. All modules should embed this struct. +type Base struct { + *logger.Logger +} + +// SetLogger sets logger. +func (b *Base) SetLogger(l *logger.Logger) { b.Logger = l } + +``` + +## How to write a Plugin + +Since plugin is a set of modules all you need is: + - write module(s) + - add module(s) to the plugins [registry](https://github.com/netdata/go.d.plugin/blob/master/plugin/module/registry.go) + - start the plugin + + +## How to integrate your plugin into Netdata + +Three simple steps: + - move the plugin to the `plugins.d` dir. + - add plugin configuration file to the `etc/netdata/` dir. + - add modules configuration files to the `etc/netdata//` dir. + +Congratulations! + +## Configurations + +Configurations are written in [YAML](https://yaml.org/). + + - plugin configuration: + +```yaml + +# Enable/disable the whole plugin. +enabled: yes + +# Default enable/disable value for all modules. +default_run: yes + +# Maximum number of used CPUs. Zero means no limit. +max_procs: 0 + +# Enable/disable specific plugin module +modules: +# module_name1: yes +# module_name2: yes + +``` + + - module configuration + +```yaml +# [ GLOBAL ] +update_every: 1 +autodetection_retry: 0 + +# [ JOBS ] +jobs: + - name: job1 + param1: value1 + param2: value2 + + - name: job2 + param1: value1 + param2: value2 +``` + +Plugin uses `yaml.Unmarshal` to add configuration parameters to the module. Please use `yaml` tags! + +## Debug + +Plugin CLI: +``` +Usage: + plugin [OPTIONS] [update every] + +Application Options: + -d, --debug debug mode + -m, --modules= modules name (default: all) + -c, --config= config dir + +Help Options: + -h, --help Show this help message + +``` + +Specific module debug: +``` +# become user netdata +sudo su -s /bin/bash netdata + +# run plugin in debug mode +./ -d -m +``` + +Change `` to your plugin name and `` to the module name you want to debug. diff --git a/src/go/collectors/go.d.plugin/agent/agent.go b/src/go/collectors/go.d.plugin/agent/agent.go new file mode 100644 index 00000000000000..9d6a85f91f5812 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/agent.go @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package agent + +import ( + "context" + "io" + "log/slog" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery" + "github.com/netdata/go.d.plugin/agent/filelock" + "github.com/netdata/go.d.plugin/agent/filestatus" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/jobmgr" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/netdata/go.d.plugin/agent/vnodes" + "github.com/netdata/go.d.plugin/logger" + "github.com/netdata/go.d.plugin/pkg/multipath" + + "github.com/mattn/go-isatty" +) + +var isTerminal = isatty.IsTerminal(os.Stdout.Fd()) + +// Config is an Agent configuration. +type Config struct { + Name string + ConfDir []string + ModulesConfDir []string + ModulesSDConfPath []string + VnodesConfDir []string + StateFile string + LockDir string + ModuleRegistry module.Registry + RunModule string + MinUpdateEvery int +} + +// Agent represents orchestrator. +type Agent struct { + *logger.Logger + + Name string + ConfDir multipath.MultiPath + ModulesConfDir multipath.MultiPath + ModulesSDConfPath []string + VnodesConfDir multipath.MultiPath + StateFile string + LockDir string + RunModule string + MinUpdateEvery int + ModuleRegistry module.Registry + Out io.Writer + + api *netdataapi.API +} + +// New creates a new Agent. +func New(cfg Config) *Agent { + return &Agent{ + Logger: logger.New().With( + slog.String("component", "agent"), + ), + Name: cfg.Name, + ConfDir: cfg.ConfDir, + ModulesConfDir: cfg.ModulesConfDir, + ModulesSDConfPath: cfg.ModulesSDConfPath, + VnodesConfDir: cfg.VnodesConfDir, + StateFile: cfg.StateFile, + LockDir: cfg.LockDir, + RunModule: cfg.RunModule, + MinUpdateEvery: cfg.MinUpdateEvery, + ModuleRegistry: module.DefaultRegistry, + Out: safewriter.Stdout, + api: netdataapi.New(safewriter.Stdout), + } +} + +// Run starts the Agent. +func (a *Agent) Run() { + go a.keepAlive() + serve(a) +} + +func serve(a *Agent) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + var wg sync.WaitGroup + + var exit bool + var reload bool + + for { + ctx, cancel := context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, "reload", reload) + + wg.Add(1) + go func() { defer wg.Done(); a.run(ctx) }() + + switch sig := <-ch; sig { + case syscall.SIGHUP: + a.Infof("received %s signal (%d). Restarting running instance", sig, sig) + default: + a.Infof("received %s signal (%d). Terminating...", sig, sig) + module.DontObsoleteCharts() + exit = true + } + + cancel() + + func() { + timeout := time.Second * 10 + t := time.NewTimer(timeout) + defer t.Stop() + done := make(chan struct{}) + + go func() { wg.Wait(); close(done) }() + + select { + case <-t.C: + a.Errorf("stopping all goroutines timed out after %s. Exiting...", timeout) + os.Exit(0) + case <-done: + } + }() + + if exit { + os.Exit(0) + } + + reload = true + time.Sleep(time.Second) + } +} + +func (a *Agent) run(ctx context.Context) { + a.Info("instance is started") + defer func() { a.Info("instance is stopped") }() + + cfg := a.loadPluginConfig() + a.Infof("using config: %s", cfg.String()) + + if !cfg.Enabled { + a.Info("plugin is disabled in the configuration file, exiting...") + if isTerminal { + os.Exit(0) + } + _ = a.api.DISABLE() + return + } + + enabledModules := a.loadEnabledModules(cfg) + if len(enabledModules) == 0 { + a.Info("no modules to run") + if isTerminal { + os.Exit(0) + } + _ = a.api.DISABLE() + return + } + + discCfg := a.buildDiscoveryConf(enabledModules) + + discoveryManager, err := discovery.NewManager(discCfg) + if err != nil { + a.Error(err) + if isTerminal { + os.Exit(0) + } + return + } + + functionsManager := functions.NewManager() + + jobsManager := jobmgr.NewManager() + jobsManager.PluginName = a.Name + jobsManager.Out = a.Out + jobsManager.Modules = enabledModules + + // TODO: API will be changed in https://github.com/netdata/netdata/pull/16702 + //if logger.Level.Enabled(slog.LevelDebug) { + // dyncfgDiscovery, _ := dyncfg.NewDiscovery(dyncfg.Config{ + // Plugin: a.Name, + // API: netdataapi.New(a.Out), + // Modules: enabledModules, + // ModuleConfigDefaults: discCfg.Registry, + // Functions: functionsManager, + // }) + // + // discoveryManager.Add(dyncfgDiscovery) + // + // jobsManager.Dyncfg = dyncfgDiscovery + //} + + if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 { + vnodes.Disabled = true + } else { + jobsManager.Vnodes = reg + } + + if a.LockDir != "" { + jobsManager.FileLock = filelock.New(a.LockDir) + } + + var statusSaveManager *filestatus.Manager + if !isTerminal && a.StateFile != "" { + statusSaveManager = filestatus.NewManager(a.StateFile) + jobsManager.StatusSaver = statusSaveManager + if store, err := filestatus.LoadStore(a.StateFile); err != nil { + a.Warningf("couldn't load state file: %v", err) + } else { + jobsManager.StatusStore = store + } + } + + in := make(chan []*confgroup.Group) + var wg sync.WaitGroup + + wg.Add(1) + go func() { defer wg.Done(); functionsManager.Run(ctx) }() + + wg.Add(1) + go func() { defer wg.Done(); jobsManager.Run(ctx, in) }() + + wg.Add(1) + go func() { defer wg.Done(); discoveryManager.Run(ctx, in) }() + + if statusSaveManager != nil { + wg.Add(1) + go func() { defer wg.Done(); statusSaveManager.Run(ctx) }() + } + + wg.Wait() + <-ctx.Done() +} + +func (a *Agent) keepAlive() { + if isTerminal { + return + } + + tk := time.NewTicker(time.Second) + defer tk.Stop() + + var n int + for range tk.C { + if err := a.api.EMPTYLINE(); err != nil { + a.Infof("keepAlive: %v", err) + n++ + } else { + n = 0 + } + if n == 3 { + a.Info("too many keepAlive errors. Terminating...") + os.Exit(0) + } + } +} diff --git a/src/go/collectors/go.d.plugin/agent/agent_test.go b/src/go/collectors/go.d.plugin/agent/agent_test.go new file mode 100644 index 00000000000000..2a15a6b733c01b --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/agent_test.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package agent + +import ( + "bytes" + "context" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/stretchr/testify/assert" +) + +// TODO: tech debt +func TestNew(t *testing.T) { + +} + +func TestAgent_Run(t *testing.T) { + a := New(Config{ + Name: "", + ConfDir: nil, + ModulesConfDir: nil, + ModulesSDConfPath: nil, + StateFile: "", + ModuleRegistry: nil, + RunModule: "", + MinUpdateEvery: 0, + }) + + var buf bytes.Buffer + a.Out = safewriter.New(&buf) + + var mux sync.Mutex + stats := make(map[string]int) + a.ModuleRegistry = prepareRegistry(&mux, stats, "module1", "module2") + + ctx, cancel := context.WithCancel(context.Background()) + var wg sync.WaitGroup + + wg.Add(1) + go func() { defer wg.Done(); a.run(ctx) }() + + time.Sleep(time.Second * 2) + cancel() + wg.Wait() + + assert.Equalf(t, 1, stats["module1_init"], "module1 init") + assert.Equalf(t, 1, stats["module2_init"], "module2 init") + assert.Equalf(t, 1, stats["module1_check"], "module1 check") + assert.Equalf(t, 1, stats["module2_check"], "module2 check") + assert.Equalf(t, 1, stats["module1_charts"], "module1 charts") + assert.Equalf(t, 1, stats["module2_charts"], "module2 charts") + assert.Truef(t, stats["module1_collect"] > 0, "module1 collect") + assert.Truef(t, stats["module2_collect"] > 0, "module2 collect") + assert.Equalf(t, 1, stats["module1_cleanup"], "module1 cleanup") + assert.Equalf(t, 1, stats["module2_cleanup"], "module2 cleanup") + assert.True(t, buf.String() != "") +} + +func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) module.Registry { + reg := module.Registry{} + for _, name := range names { + name := name + reg.Register(name, module.Creator{ + Create: func() module.Module { return prepareMockModule(name, mux, stats) }, + }) + } + return reg +} + +func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module { + return &module.MockModule{ + InitFunc: func() bool { + mux.Lock() + defer mux.Unlock() + stats[name+"_init"]++ + return true + }, + CheckFunc: func() bool { + mux.Lock() + defer mux.Unlock() + stats[name+"_check"]++ + return true + }, + ChartsFunc: func() *module.Charts { + mux.Lock() + defer mux.Unlock() + stats[name+"_charts"]++ + return &module.Charts{ + &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}, + } + }, + CollectFunc: func() map[string]int64 { + mux.Lock() + defer mux.Unlock() + stats[name+"_collect"]++ + return map[string]int64{"id1": 1} + }, + CleanupFunc: func() { + mux.Lock() + defer mux.Unlock() + stats[name+"_cleanup"]++ + }, + } +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/cache.go b/src/go/collectors/go.d.plugin/agent/confgroup/cache.go new file mode 100644 index 00000000000000..40c8071d5c12be --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/cache.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +func NewCache() *Cache { + return &Cache{ + hashes: make(map[uint64]uint), + sources: make(map[string]map[uint64]Config), + } +} + +type Cache struct { + hashes map[uint64]uint // map[cfgHash]cfgCount + sources map[string]map[uint64]Config // map[cfgSource]map[cfgHash]cfg +} + +func (c *Cache) Add(group *Group) (added, removed []Config) { + if group == nil { + return nil, nil + } + + if len(group.Configs) == 0 { + return c.addEmpty(group) + } + + return c.addNotEmpty(group) +} + +func (c *Cache) addEmpty(group *Group) (added, removed []Config) { + set, ok := c.sources[group.Source] + if !ok { + return nil, nil + } + + for hash, cfg := range set { + c.hashes[hash]-- + if c.hashes[hash] == 0 { + removed = append(removed, cfg) + } + delete(set, hash) + } + + delete(c.sources, group.Source) + + return nil, removed +} + +func (c *Cache) addNotEmpty(group *Group) (added, removed []Config) { + set, ok := c.sources[group.Source] + if !ok { + set = make(map[uint64]Config) + c.sources[group.Source] = set + } + + seen := make(map[uint64]struct{}) + + for _, cfg := range group.Configs { + hash := cfg.Hash() + seen[hash] = struct{}{} + + if _, ok := set[hash]; ok { + continue + } + + set[hash] = cfg + if c.hashes[hash] == 0 { + added = append(added, cfg) + } + c.hashes[hash]++ + } + + if !ok { + return added, nil + } + + for hash, cfg := range set { + if _, ok := seen[hash]; ok { + continue + } + + delete(set, hash) + c.hashes[hash]-- + if c.hashes[hash] == 0 { + removed = append(removed, cfg) + } + } + + if ok && len(set) == 0 { + delete(c.sources, group.Source) + } + + return added, removed +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/cache_test.go b/src/go/collectors/go.d.plugin/agent/confgroup/cache_test.go new file mode 100644 index 00000000000000..a2bbd491968146 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/cache_test.go @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfigCache_Add(t *testing.T) { + tests := map[string]struct { + prepareGroups []Group + groups []Group + expectedAdd []Config + expectedRemove []Config + }{ + "new group, new configs": { + groups: []Group{ + prepareGroup("source", prepareCfg("name", "module")), + }, + expectedAdd: []Config{ + prepareCfg("name", "module"), + }, + }, + "several equal updates for the same group": { + groups: []Group{ + prepareGroup("source", prepareCfg("name", "module")), + prepareGroup("source", prepareCfg("name", "module")), + prepareGroup("source", prepareCfg("name", "module")), + prepareGroup("source", prepareCfg("name", "module")), + prepareGroup("source", prepareCfg("name", "module")), + }, + expectedAdd: []Config{ + prepareCfg("name", "module"), + }, + }, + "empty group update for cached group": { + prepareGroups: []Group{ + prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + }, + groups: []Group{ + prepareGroup("source"), + }, + expectedRemove: []Config{ + prepareCfg("name1", "module"), + prepareCfg("name2", "module"), + }, + }, + "changed group update for cached group": { + prepareGroups: []Group{ + prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + }, + groups: []Group{ + prepareGroup("source", prepareCfg("name2", "module")), + }, + expectedRemove: []Config{ + prepareCfg("name1", "module"), + }, + }, + "empty group update for uncached group": { + groups: []Group{ + prepareGroup("source"), + prepareGroup("source"), + }, + }, + "several updates with different source but same context": { + groups: []Group{ + prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + }, + expectedAdd: []Config{ + prepareCfg("name1", "module"), + prepareCfg("name2", "module"), + }, + }, + "have equal configs from 2 sources, get empty group for the 1st source": { + prepareGroups: []Group{ + prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), + }, + groups: []Group{ + prepareGroup("source2"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cache := NewCache() + + for _, group := range test.prepareGroups { + cache.Add(&group) + } + + var added, removed []Config + for _, group := range test.groups { + a, r := cache.Add(&group) + added = append(added, a...) + removed = append(removed, r...) + } + + sortConfigs(added) + sortConfigs(removed) + sortConfigs(test.expectedAdd) + sortConfigs(test.expectedRemove) + + assert.Equalf(t, test.expectedAdd, added, "added configs") + assert.Equalf(t, test.expectedRemove, removed, "removed configs") + }) + } +} + +func prepareGroup(source string, cfgs ...Config) Group { + return Group{ + Configs: cfgs, + Source: source, + } +} + +func prepareCfg(name, module string) Config { + return Config{ + "name": name, + "module": module, + } +} + +func sortConfigs(cfgs []Config) { + if len(cfgs) == 0 { + return + } + sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() }) +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/group.go b/src/go/collectors/go.d.plugin/agent/confgroup/group.go new file mode 100644 index 00000000000000..649a145d7aec5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/group.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/agent/hostinfo" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/ilyam8/hashstructure" +) + +type Group struct { + Configs []Config + Source string +} + +type Config map[string]interface{} + +func (c Config) HashIncludeMap(_ string, k, _ interface{}) (bool, error) { + s := k.(string) + return !(strings.HasPrefix(s, "__") && strings.HasSuffix(s, "__")), nil +} + +func (c Config) NameWithHash() string { return fmt.Sprintf("%s_%d", c.Name(), c.Hash()) } +func (c Config) Name() string { v, _ := c.get("name").(string); return v } +func (c Config) Module() string { v, _ := c.get("module").(string); return v } +func (c Config) FullName() string { return fullName(c.Name(), c.Module()) } +func (c Config) UpdateEvery() int { v, _ := c.get("update_every").(int); return v } +func (c Config) AutoDetectionRetry() int { v, _ := c.get("autodetection_retry").(int); return v } +func (c Config) Priority() int { v, _ := c.get("priority").(int); return v } +func (c Config) Labels() map[any]any { v, _ := c.get("labels").(map[any]any); return v } +func (c Config) Hash() uint64 { return calcHash(c) } +func (c Config) Source() string { v, _ := c.get("__source__").(string); return v } +func (c Config) Provider() string { v, _ := c.get("__provider__").(string); return v } +func (c Config) Vnode() string { v, _ := c.get("vnode").(string); return v } + +func (c Config) SetName(v string) { c.set("name", v) } +func (c Config) SetModule(v string) { c.set("module", v) } +func (c Config) SetSource(v string) { c.set("__source__", v) } +func (c Config) SetProvider(v string) { c.set("__provider__", v) } + +func (c Config) set(key string, value interface{}) { c[key] = value } +func (c Config) get(key string) interface{} { return c[key] } + +func (c Config) Apply(def Default) { + if c.UpdateEvery() <= 0 { + v := firstPositive(def.UpdateEvery, module.UpdateEvery) + c.set("update_every", v) + } + if c.AutoDetectionRetry() <= 0 { + v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry) + c.set("autodetection_retry", v) + } + if c.Priority() <= 0 { + v := firstPositive(def.Priority, module.Priority) + c.set("priority", v) + } + if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 { + c.set("update_every", def.MinUpdateEvery) + } + if c.Name() == "" { + c.set("name", c.Module()) + } else { + c.set("name", cleanName(jobNameResolveHostname(c.Name()))) + } + + if v, ok := c.get("url").(string); ok { + c.set("url", urlResolveHostname(v)) + } +} + +func cleanName(name string) string { + return reInvalidCharacters.ReplaceAllString(name, "_") +} + +var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`) + +func fullName(name, module string) string { + if name == module { + return name + } + return module + "_" + name +} + +func calcHash(obj interface{}) uint64 { + hash, _ := hashstructure.Hash(obj, nil) + return hash +} + +func firstPositive(value int, others ...int) int { + if value > 0 || len(others) == 0 { + return value + } + return firstPositive(others[0], others[1:]...) +} + +func urlResolveHostname(rawURL string) string { + if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") { + return rawURL + } + + u, err := url.Parse(rawURL) + if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) { + return rawURL + } + + u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1) + + return u.String() +} + +func jobNameResolveHostname(name string) string { + if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") { + return name + } + + if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") { + return name + } + + return strings.Replace(name, "hostname", hostinfo.Hostname, 1) +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/group_test.go b/src/go/collectors/go.d.plugin/agent/confgroup/group_test.go new file mode 100644 index 00000000000000..af9a804e83d6f3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/group_test.go @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" +) + +func TestConfig_Name(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "string": {cfg: Config{"name": "name"}, expected: "name"}, + "empty string": {cfg: Config{"name": ""}, expected: ""}, + "not string": {cfg: Config{"name": 0}, expected: ""}, + "not set": {cfg: Config{}, expected: ""}, + "nil cfg": {expected: ""}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.Name()) + }) + } +} + +func TestConfig_Module(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "string": {cfg: Config{"module": "module"}, expected: "module"}, + "empty string": {cfg: Config{"module": ""}, expected: ""}, + "not string": {cfg: Config{"module": 0}, expected: ""}, + "not set": {cfg: Config{}, expected: ""}, + "nil cfg": {expected: ""}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.Module()) + }) + } +} + +func TestConfig_FullName(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "name == module": {cfg: Config{"name": "name", "module": "name"}, expected: "name"}, + "name != module": {cfg: Config{"name": "name", "module": "module"}, expected: "module_name"}, + "nil cfg": {expected: ""}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.FullName()) + }) + } +} + +func TestConfig_UpdateEvery(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "int": {cfg: Config{"update_every": 1}, expected: 1}, + "not int": {cfg: Config{"update_every": "1"}, expected: 0}, + "not set": {cfg: Config{}, expected: 0}, + "nil cfg": {expected: 0}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.UpdateEvery()) + }) + } +} + +func TestConfig_AutoDetectionRetry(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "int": {cfg: Config{"autodetection_retry": 1}, expected: 1}, + "not int": {cfg: Config{"autodetection_retry": "1"}, expected: 0}, + "not set": {cfg: Config{}, expected: 0}, + "nil cfg": {expected: 0}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.AutoDetectionRetry()) + }) + } +} + +func TestConfig_Priority(t *testing.T) { + tests := map[string]struct { + cfg Config + expected interface{} + }{ + "int": {cfg: Config{"priority": 1}, expected: 1}, + "not int": {cfg: Config{"priority": "1"}, expected: 0}, + "not set": {cfg: Config{}, expected: 0}, + "nil cfg": {expected: 0}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.cfg.Priority()) + }) + } +} + +func TestConfig_Hash(t *testing.T) { + tests := map[string]struct { + one, two Config + equal bool + }{ + "same keys, no internal keys": { + one: Config{"name": "name"}, + two: Config{"name": "name"}, + equal: true, + }, + "same keys, different internal keys": { + one: Config{"name": "name", "__key__": 1}, + two: Config{"name": "name", "__value__": 1}, + equal: true, + }, + "same keys, same internal keys": { + one: Config{"name": "name", "__key__": 1}, + two: Config{"name": "name", "__key__": 1}, + equal: true, + }, + "diff keys, no internal keys": { + one: Config{"name": "name1"}, + two: Config{"name": "name2"}, + equal: false, + }, + "diff keys, different internal keys": { + one: Config{"name": "name1", "__key__": 1}, + two: Config{"name": "name2", "__value__": 1}, + equal: false, + }, + "diff keys, same internal keys": { + one: Config{"name": "name1", "__key__": 1}, + two: Config{"name": "name2", "__key__": 1}, + equal: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.equal { + assert.Equal(t, test.one.Hash(), test.two.Hash()) + } else { + assert.NotEqual(t, test.one.Hash(), test.two.Hash()) + } + }) + } + cfg := Config{"name": "name", "module": "module"} + assert.NotZero(t, cfg.Hash()) +} + +func TestConfig_SetModule(t *testing.T) { + cfg := Config{} + cfg.SetModule("name") + + assert.Equal(t, cfg.Module(), "name") +} + +func TestConfig_SetSource(t *testing.T) { + cfg := Config{} + cfg.SetSource("name") + + assert.Equal(t, cfg.Source(), "name") +} + +func TestConfig_SetProvider(t *testing.T) { + cfg := Config{} + cfg.SetProvider("name") + + assert.Equal(t, cfg.Provider(), "name") +} + +func TestConfig_Apply(t *testing.T) { + const jobDef = 11 + const applyDef = 22 + tests := map[string]struct { + def Default + origCfg Config + expectedCfg Config + }{ + "+job +def": { + def: Default{ + UpdateEvery: applyDef, + AutoDetectionRetry: applyDef, + Priority: applyDef, + }, + origCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + expectedCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + }, + "-job +def": { + def: Default{ + UpdateEvery: applyDef, + AutoDetectionRetry: applyDef, + Priority: applyDef, + }, + origCfg: Config{ + "name": "name", + "module": "module", + }, + expectedCfg: Config{ + "name": "name", + "module": "module", + "update_every": applyDef, + "autodetection_retry": applyDef, + "priority": applyDef, + }, + }, + "-job -def (+global)": { + def: Default{}, + origCfg: Config{ + "name": "name", + "module": "module", + }, + expectedCfg: Config{ + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + "adjust update_every (update_every < min update every)": { + def: Default{ + MinUpdateEvery: jobDef + 10, + }, + origCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef, + }, + expectedCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef + 10, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + "do not adjust update_every (update_every > min update every)": { + def: Default{ + MinUpdateEvery: 2, + }, + origCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef, + }, + expectedCfg: Config{ + "name": "name", + "module": "module", + "update_every": jobDef, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + "set name to module name if name not set": { + def: Default{}, + origCfg: Config{ + "module": "module", + }, + expectedCfg: Config{ + "name": "module", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + "clean name": { + def: Default{}, + origCfg: Config{ + "name": "na me", + "module": "module", + }, + expectedCfg: Config{ + "name": "na_me", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + test.origCfg.Apply(test.def) + + assert.Equal(t, test.expectedCfg, test.origCfg) + }) + } +} + +func Test_urlResolveHostname(t *testing.T) { + tests := map[string]struct { + input string + wantChanged bool + }{ + "hostname with suffix": { + wantChanged: true, + input: "http://hostname.local:80/metrics", + }, + "hostname without suffix": { + wantChanged: true, + input: "http://hostname:80/metrics", + }, + "no hostname": { + wantChanged: false, + input: "http://127.0.0.1:80/metrics", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + + if test.wantChanged { + assert.NotEqual(t, test.input, urlResolveHostname(test.input)) + } else { + assert.Equal(t, test.input, urlResolveHostname(test.input)) + } + }) + } +} + +func Test_jobNameResolveHostname(t *testing.T) { + tests := map[string]struct { + input string + wantChanged bool + }{ + "hostname with dot suffix": { + wantChanged: true, + input: "hostname.local", + }, + "hostname with underscore suffix": { + wantChanged: true, + input: "hostname_local", + }, + "hostname without suffix": { + wantChanged: true, + input: "hostname", + }, + "no hostname": { + wantChanged: false, + input: "name", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + + if test.wantChanged { + assert.NotEqual(t, test.input, jobNameResolveHostname(test.input)) + } else { + assert.Equal(t, test.input, jobNameResolveHostname(test.input)) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/registry.go b/src/go/collectors/go.d.plugin/agent/confgroup/registry.go new file mode 100644 index 00000000000000..295a75129abcc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/registry.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +type Registry map[string]Default + +type Default struct { + MinUpdateEvery int `yaml:"-"` + UpdateEvery int `yaml:"update_every"` + AutoDetectionRetry int `yaml:"autodetection_retry"` + Priority int `yaml:"priority"` +} + +func (r Registry) Register(name string, def Default) { + if name != "" { + r[name] = def + } +} + +func (r Registry) Lookup(name string) (Default, bool) { + def, ok := r[name] + return def, ok +} diff --git a/src/go/collectors/go.d.plugin/agent/confgroup/registry_test.go b/src/go/collectors/go.d.plugin/agent/confgroup/registry_test.go new file mode 100644 index 00000000000000..a63c0ceb1eb9a1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/confgroup/registry_test.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegistry_Register(t *testing.T) { + name := "module" + defaults := Default{ + MinUpdateEvery: 1, + UpdateEvery: 1, + AutoDetectionRetry: 1, + Priority: 1, + } + expected := Registry{ + name: defaults, + } + + actual := Registry{} + actual.Register(name, defaults) + + assert.Equal(t, expected, actual) +} + +func TestRegistry_Lookup(t *testing.T) { + name := "module" + expected := Default{ + MinUpdateEvery: 1, + UpdateEvery: 1, + AutoDetectionRetry: 1, + Priority: 1, + } + reg := Registry{} + reg.Register(name, expected) + + actual, ok := reg.Lookup("module") + + assert.True(t, ok) + assert.Equal(t, expected, actual) +} diff --git a/src/go/collectors/go.d.plugin/agent/config.go b/src/go/collectors/go.d.plugin/agent/config.go new file mode 100644 index 00000000000000..fef68c7e0e614f --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/config.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package agent + +import ( + "fmt" + + "gopkg.in/yaml.v2" +) + +func defaultConfig() config { + return config{ + Enabled: true, + DefaultRun: true, + MaxProcs: 0, + Modules: nil, + } +} + +type config struct { + Enabled bool `yaml:"enabled"` + DefaultRun bool `yaml:"default_run"` + MaxProcs int `yaml:"max_procs"` + Modules map[string]bool `yaml:"modules"` +} + +func (c *config) String() string { + return fmt.Sprintf("enabled '%v', default_run '%v', max_procs '%d'", + c.Enabled, c.DefaultRun, c.MaxProcs) +} + +func (c *config) isExplicitlyEnabled(moduleName string) bool { + return c.isEnabled(moduleName, true) +} + +func (c *config) isImplicitlyEnabled(moduleName string) bool { + return c.isEnabled(moduleName, false) +} + +func (c *config) isEnabled(moduleName string, explicit bool) bool { + if enabled, ok := c.Modules[moduleName]; ok { + return enabled + } + if explicit { + return false + } + return c.DefaultRun +} + +func (c *config) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain config + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + var m map[string]interface{} + if err := unmarshal(&m); err != nil { + return err + } + + for key, value := range m { + switch key { + case "enabled", "default_run", "max_procs", "modules": + continue + } + var b bool + if in, err := yaml.Marshal(value); err != nil || yaml.Unmarshal(in, &b) != nil { + continue + } + if c.Modules == nil { + c.Modules = make(map[string]bool) + } + c.Modules[key] = b + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/cache.go b/src/go/collectors/go.d.plugin/agent/discovery/cache.go new file mode 100644 index 00000000000000..7187246b2d0736 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/cache.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discovery + +import ( + "github.com/netdata/go.d.plugin/agent/confgroup" +) + +type cache map[string]*confgroup.Group + +func newCache() *cache { + return &cache{} +} + +func (c cache) update(groups []*confgroup.Group) { + if len(groups) == 0 { + return + } + for _, group := range groups { + if group != nil { + c[group.Source] = group + } + } +} + +func (c cache) reset() { + for key := range c { + delete(c, key) + } +} + +func (c cache) groups() []*confgroup.Group { + groups := make([]*confgroup.Group, 0, len(c)) + for _, group := range c { + groups = append(groups, group) + } + return groups +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/config.go b/src/go/collectors/go.d.plugin/agent/discovery/config.go new file mode 100644 index 00000000000000..d19770d3510439 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/config.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discovery + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/dummy" + "github.com/netdata/go.d.plugin/agent/discovery/file" +) + +type Config struct { + Registry confgroup.Registry + File file.Config + Dummy dummy.Config +} + +func validateConfig(cfg Config) error { + if len(cfg.Registry) == 0 { + return errors.New("empty config registry") + } + if len(cfg.File.Read)+len(cfg.File.Watch) == 0 && len(cfg.Dummy.Names) == 0 { + return errors.New("discoverers not set") + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dummy/config.go b/src/go/collectors/go.d.plugin/agent/discovery/dummy/config.go new file mode 100644 index 00000000000000..8fc66ef129405a --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dummy/config.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dummy + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/confgroup" +) + +type Config struct { + Registry confgroup.Registry + Names []string +} + +func validateConfig(cfg Config) error { + if len(cfg.Registry) == 0 { + return errors.New("empty config registry") + } + if len(cfg.Names) == 0 { + return errors.New("names not set") + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery.go b/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery.go new file mode 100644 index 00000000000000..acd0b8f1cb759a --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dummy + +import ( + "context" + "fmt" + "log/slog" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/logger" +) + +func NewDiscovery(cfg Config) (*Discovery, error) { + if err := validateConfig(cfg); err != nil { + return nil, fmt.Errorf("config validation: %v", err) + } + d := &Discovery{ + Logger: logger.New().With( + slog.String("component", "discovery dummy"), + ), + reg: cfg.Registry, + names: cfg.Names, + } + return d, nil +} + +type Discovery struct { + *logger.Logger + + reg confgroup.Registry + names []string +} + +func (d *Discovery) String() string { + return d.Name() +} + +func (d *Discovery) Name() string { + return "dummy discovery" +} + +func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { + d.Info("instance is started") + defer func() { d.Info("instance is stopped") }() + + select { + case <-ctx.Done(): + case in <- d.groups(): + } + + close(in) +} + +func (d *Discovery) groups() (groups []*confgroup.Group) { + for _, name := range d.names { + groups = append(groups, d.newCfgGroup(name)) + } + return groups +} + +func (d *Discovery) newCfgGroup(name string) *confgroup.Group { + def, ok := d.reg.Lookup(name) + if !ok { + return nil + } + + cfg := confgroup.Config{} + cfg.SetModule(name) + cfg.SetSource(name) + cfg.SetProvider("dummy") + cfg.Apply(def) + + group := &confgroup.Group{ + Configs: []confgroup.Config{cfg}, + Source: name, + } + return group +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery_test.go b/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery_test.go new file mode 100644 index 00000000000000..45eb39aff4ec8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dummy/discovery_test.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dummy + +import ( + "context" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewDiscovery(t *testing.T) { + tests := map[string]struct { + cfg Config + wantErr bool + }{ + "valid config": { + cfg: Config{ + Registry: confgroup.Registry{"module1": confgroup.Default{}}, + Names: []string{"module1", "module2"}, + }, + }, + "invalid config, registry not set": { + cfg: Config{ + Names: []string{"module1", "module2"}, + }, + wantErr: true, + }, + "invalid config, names not set": { + cfg: Config{ + Names: []string{"module1", "module2"}, + }, + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + d, err := NewDiscovery(test.cfg) + + if test.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.NotNil(t, d) + } + }) + } +} + +func TestDiscovery_Run(t *testing.T) { + expected := []*confgroup.Group{ + { + Source: "module1", + Configs: []confgroup.Config{ + { + "name": "module1", + "module": "module1", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": "module1", + "__provider__": "dummy", + }, + }, + }, + { + Source: "module2", + Configs: []confgroup.Config{ + { + "name": "module2", + "module": "module2", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": "module2", + "__provider__": "dummy", + }, + }, + }, + } + + reg := confgroup.Registry{ + "module1": {}, + "module2": {}, + } + cfg := Config{ + Registry: reg, + Names: []string{"module1", "module2"}, + } + + discovery, err := NewDiscovery(cfg) + require.NoError(t, err) + + in := make(chan []*confgroup.Group) + timeout := time.Second * 2 + + go discovery.Run(context.Background(), in) + + var actual []*confgroup.Group + select { + case actual = <-in: + case <-time.After(timeout): + t.Logf("discovery timed out after %s", timeout) + } + assert.Equal(t, expected, actual) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/config.go b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/config.go new file mode 100644 index 00000000000000..ebda00f5004df4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/config.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dyncfg + +import ( + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/module" +) + +type Config struct { + Plugin string + API NetdataDyncfgAPI + Functions FunctionRegistry + Modules module.Registry + ModuleConfigDefaults confgroup.Registry +} + +type NetdataDyncfgAPI interface { + DynCfgEnable(string) error + DynCfgReset() error + DyncCfgRegisterModule(string) error + DynCfgRegisterJob(_, _, _ string) error + DynCfgReportJobStatus(_, _, _, _ string) error + FunctionResultSuccess(_, _, _ string) error + FunctionResultReject(_, _, _ string) error +} + +type FunctionRegistry interface { + Register(name string, reg func(functions.Function)) +} + +func validateConfig(cfg Config) error { + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg.go b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg.go new file mode 100644 index 00000000000000..2f3c34234015a0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg.go @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dyncfg + +import ( + "bytes" + "context" + "fmt" + "log/slog" + "strings" + "sync" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +const dynCfg = "dyncfg" + +func NewDiscovery(cfg Config) (*Discovery, error) { + if err := validateConfig(cfg); err != nil { + return nil, err + } + + mgr := &Discovery{ + Logger: logger.New().With( + slog.String("component", "discovery dyncfg"), + ), + Plugin: cfg.Plugin, + API: cfg.API, + Modules: cfg.Modules, + ModuleConfigDefaults: nil, + mux: &sync.Mutex{}, + configs: make(map[string]confgroup.Config), + } + + mgr.registerFunctions(cfg.Functions) + + return mgr, nil +} + +type Discovery struct { + *logger.Logger + + Plugin string + API NetdataDyncfgAPI + Modules module.Registry + ModuleConfigDefaults confgroup.Registry + + in chan<- []*confgroup.Group + + mux *sync.Mutex + configs map[string]confgroup.Config +} + +func (d *Discovery) String() string { + return d.Name() +} + +func (d *Discovery) Name() string { + return "dyncfg discovery" +} + +func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { + d.Info("instance is started") + defer func() { d.Info("instance is stopped") }() + + d.in = in + + if reload, ok := ctx.Value("reload").(bool); ok && reload { + _ = d.API.DynCfgReset() + } + + _ = d.API.DynCfgEnable(d.Plugin) + + for k := range d.Modules { + _ = d.API.DyncCfgRegisterModule(k) + } + + <-ctx.Done() +} + +func (d *Discovery) registerFunctions(r FunctionRegistry) { + r.Register("get_plugin_config", d.getPluginConfig) + r.Register("get_plugin_config_schema", d.getModuleConfigSchema) + r.Register("set_plugin_config", d.setPluginConfig) + + r.Register("get_module_config", d.getModuleConfig) + r.Register("get_module_config_schema", d.getModuleConfigSchema) + r.Register("set_module_config", d.setModuleConfig) + + r.Register("get_job_config", d.getJobConfig) + r.Register("get_job_config_schema", d.getJobConfigSchema) + r.Register("set_job_config", d.setJobConfig) + r.Register("delete_job", d.deleteJobName) +} + +func (d *Discovery) getPluginConfig(fn functions.Function) { d.notImplemented(fn) } +func (d *Discovery) getPluginConfigSchema(fn functions.Function) { d.notImplemented(fn) } +func (d *Discovery) setPluginConfig(fn functions.Function) { d.notImplemented(fn) } + +func (d *Discovery) getModuleConfig(fn functions.Function) { d.notImplemented(fn) } +func (d *Discovery) getModuleConfigSchema(fn functions.Function) { d.notImplemented(fn) } +func (d *Discovery) setModuleConfig(fn functions.Function) { d.notImplemented(fn) } + +func (d *Discovery) getJobConfig(fn functions.Function) { + if err := d.verifyFn(fn, 2); err != nil { + d.apiReject(fn, err.Error()) + return + } + + moduleName, jobName := fn.Args[0], fn.Args[1] + + bs, err := d.getConfigBytes(moduleName + "_" + jobName) + if err != nil { + d.apiReject(fn, err.Error()) + return + } + + d.apiSuccessYAML(fn, string(bs)) +} + +func (d *Discovery) getJobConfigSchema(fn functions.Function) { + if err := d.verifyFn(fn, 1); err != nil { + d.apiReject(fn, err.Error()) + return + } + + name := fn.Args[0] + + v, ok := d.Modules[name] + if !ok { + msg := jsonErrorf("module %s is not registered", name) + d.apiReject(fn, msg) + return + } + + d.apiSuccessJSON(fn, v.JobConfigSchema) +} + +func (d *Discovery) setJobConfig(fn functions.Function) { + if err := d.verifyFn(fn, 2); err != nil { + d.apiReject(fn, err.Error()) + return + } + + var cfg confgroup.Config + if err := yaml.NewDecoder(bytes.NewBuffer(fn.Payload)).Decode(&cfg); err != nil { + d.apiReject(fn, err.Error()) + return + } + + modName, jobName := fn.Args[0], fn.Args[1] + def, _ := d.ModuleConfigDefaults.Lookup(modName) + src := source(modName, jobName) + + cfg.SetProvider(dynCfg) + cfg.SetSource(src) + cfg.SetModule(modName) + cfg.SetName(jobName) + cfg.Apply(def) + + d.in <- []*confgroup.Group{ + { + Configs: []confgroup.Config{cfg}, + Source: src, + }, + } + + d.apiSuccessJSON(fn, "") +} + +func (d *Discovery) deleteJobName(fn functions.Function) { + if err := d.verifyFn(fn, 2); err != nil { + d.apiReject(fn, err.Error()) + return + } + + modName, jobName := fn.Args[0], fn.Args[1] + + cfg, ok := d.getConfig(modName + "_" + jobName) + if !ok { + d.apiReject(fn, jsonErrorf("module '%s' job '%s': not registered", modName, jobName)) + return + } + if cfg.Provider() != dynCfg { + d.apiReject(fn, jsonErrorf("module '%s' job '%s': can't remove non Dyncfg job", modName, jobName)) + return + } + + d.in <- []*confgroup.Group{ + { + Configs: []confgroup.Config{}, + Source: source(modName, jobName), + }, + } + + d.apiSuccessJSON(fn, "") +} + +func (d *Discovery) apiSuccessJSON(fn functions.Function, payload string) { + _ = d.API.FunctionResultSuccess(fn.UID, "application/json", payload) +} + +func (d *Discovery) apiSuccessYAML(fn functions.Function, payload string) { + _ = d.API.FunctionResultSuccess(fn.UID, "application/x-yaml", payload) +} + +func (d *Discovery) apiReject(fn functions.Function, msg string) { + _ = d.API.FunctionResultReject(fn.UID, "application/json", msg) +} + +func (d *Discovery) notImplemented(fn functions.Function) { + d.Infof("not implemented: '%s'", fn.String()) + msg := jsonErrorf("function '%s' is not implemented", fn.Name) + d.apiReject(fn, msg) +} + +func (d *Discovery) verifyFn(fn functions.Function, wantArgs int) error { + if got := len(fn.Args); got != wantArgs { + msg := jsonErrorf("wrong number of arguments: want %d, got %d (args: '%v')", wantArgs, got, fn.Args) + return fmt.Errorf(msg) + } + + if isSetFunction(fn) && len(fn.Payload) == 0 { + msg := jsonErrorf("no payload") + return fmt.Errorf(msg) + } + + return nil +} + +func jsonErrorf(format string, a ...any) string { + msg := fmt.Sprintf(format, a...) + msg = strings.ReplaceAll(msg, "\n", " ") + + return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg) +} + +func source(modName, jobName string) string { + return fmt.Sprintf("%s/%s/%s", dynCfg, modName, jobName) +} + +func cfgJobName(cfg confgroup.Config) string { + if strings.HasPrefix(cfg.Source(), "dyncfg") { + return cfg.Name() + } + return cfg.NameWithHash() +} + +func isSetFunction(fn functions.Function) bool { + return strings.HasPrefix(fn.Name, "set_") +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg_test.go b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg_test.go new file mode 100644 index 00000000000000..3eee1cef30b448 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/dyncfg_test.go @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dyncfg + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewDiscovery(t *testing.T) { + +} + +func TestDiscovery_Register(t *testing.T) { + tests := map[string]struct { + regConfigs []confgroup.Config + wantApiStats *mockApi + wantConfigs int + }{ + "register jobs created by Dyncfg and other providers": { + regConfigs: []confgroup.Config{ + prepareConfig( + "__provider__", dynCfg, + "module", "test", + "name", "first", + ), + prepareConfig( + "__provider__", "test", + "module", "test", + "name", "second", + ), + }, + wantConfigs: 2, + wantApiStats: &mockApi{ + callsDynCfgRegisterJob: 1, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var mock mockApi + d := &Discovery{ + API: &mock, + mux: &sync.Mutex{}, + configs: make(map[string]confgroup.Config), + } + + for _, v := range test.regConfigs { + d.Register(v) + } + + assert.Equal(t, test.wantApiStats, &mock) + assert.Equal(t, test.wantConfigs, len(d.configs)) + }) + } +} + +func TestDiscovery_Unregister(t *testing.T) { + tests := map[string]struct { + regConfigs []confgroup.Config + unregConfigs []confgroup.Config + wantApiStats *mockApi + wantConfigs int + }{ + "register/unregister jobs created by Dyncfg and other providers": { + wantConfigs: 0, + wantApiStats: &mockApi{ + callsDynCfgRegisterJob: 1, + }, + regConfigs: []confgroup.Config{ + prepareConfig( + "__provider__", dynCfg, + "module", "test", + "name", "first", + ), + prepareConfig( + "__provider__", "test", + "module", "test", + "name", "second", + ), + }, + unregConfigs: []confgroup.Config{ + prepareConfig( + "__provider__", dynCfg, + "module", "test", + "name", "first", + ), + prepareConfig( + "__provider__", "test", + "module", "test", + "name", "second", + ), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var mock mockApi + d := &Discovery{ + API: &mock, + mux: &sync.Mutex{}, + configs: make(map[string]confgroup.Config), + } + + for _, v := range test.regConfigs { + d.Register(v) + } + for _, v := range test.unregConfigs { + d.Unregister(v) + } + + assert.Equal(t, test.wantApiStats, &mock) + assert.Equal(t, test.wantConfigs, len(d.configs)) + }) + } +} + +func TestDiscovery_UpdateStatus(t *testing.T) { + +} + +func TestDiscovery_Run(t *testing.T) { + tests := map[string]struct { + wantApiStats *mockApi + }{ + "default run": { + wantApiStats: &mockApi{ + callsDynCfgEnable: 1, + callsDyncCfgRegisterModule: 2, + callsRegister: 10, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var mock mockApi + d, err := NewDiscovery(Config{ + Plugin: "test", + API: &mock, + Functions: &mock, + Modules: module.Registry{ + "module1": module.Creator{}, + "module2": module.Creator{}, + }, + ModuleConfigDefaults: nil, + }) + require.Nil(t, err) + + testTime := time.Second * 3 + ctx, cancel := context.WithTimeout(context.Background(), testTime) + defer cancel() + + in := make(chan<- []*confgroup.Group) + done := make(chan struct{}) + + go func() { defer close(done); d.Run(ctx, in) }() + + timeout := testTime + time.Second*2 + tk := time.NewTimer(timeout) + defer tk.Stop() + + select { + case <-done: + assert.Equal(t, test.wantApiStats, &mock) + case <-tk.C: + t.Errorf("timed out after %s", timeout) + } + }) + } +} + +type mockApi struct { + callsDynCfgEnable int + callsDyncCfgRegisterModule int + callsDynCfgRegisterJob int + callsDynCfgReportJobStatus int + callsFunctionResultSuccess int + callsFunctionResultReject int + + callsRegister int +} + +func (m *mockApi) Register(string, func(functions.Function)) { + m.callsRegister++ +} + +func (m *mockApi) DynCfgEnable(string) error { + m.callsDynCfgEnable++ + return nil +} + +func (m *mockApi) DynCfgReset() error { + return nil +} + +func (m *mockApi) DyncCfgRegisterModule(string) error { + m.callsDyncCfgRegisterModule++ + return nil +} + +func (m *mockApi) DynCfgRegisterJob(_, _, _ string) error { + m.callsDynCfgRegisterJob++ + return nil +} + +func (m *mockApi) DynCfgReportJobStatus(_, _, _, _ string) error { + m.callsDynCfgReportJobStatus++ + return nil +} + +func (m *mockApi) FunctionResultSuccess(_, _, _ string) error { + m.callsFunctionResultSuccess++ + return nil +} + +func (m *mockApi) FunctionResultReject(_, _, _ string) error { + m.callsFunctionResultReject++ + return nil +} + +func prepareConfig(values ...string) confgroup.Config { + cfg := confgroup.Config{} + for i := 1; i < len(values); i += 2 { + cfg[values[i-1]] = values[i] + } + return cfg +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/ext.go b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/ext.go new file mode 100644 index 00000000000000..910475c3d5d2fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/dyncfg/ext.go @@ -0,0 +1,79 @@ +package dyncfg + +import ( + "errors" + "os" + "strings" + + "github.com/netdata/go.d.plugin/agent/confgroup" + + "gopkg.in/yaml.v2" +) + +func (d *Discovery) Register(cfg confgroup.Config) { + name := cfgJobName(cfg) + if cfg.Provider() != dynCfg { + // jobType handling in ND is not documented + _ = d.API.DynCfgRegisterJob(cfg.Module(), name, "stock") + } + + key := cfg.Module() + "_" + name + d.addConfig(key, cfg) +} + +func (d *Discovery) Unregister(cfg confgroup.Config) { + key := cfg.Module() + "_" + cfgJobName(cfg) + d.removeConfig(key) +} + +func (d *Discovery) UpdateStatus(cfg confgroup.Config, status, payload string) { + _ = d.API.DynCfgReportJobStatus(cfg.Module(), cfgJobName(cfg), status, payload) +} + +func (d *Discovery) addConfig(name string, cfg confgroup.Config) { + d.mux.Lock() + defer d.mux.Unlock() + + d.configs[name] = cfg +} + +func (d *Discovery) removeConfig(key string) { + d.mux.Lock() + defer d.mux.Unlock() + + delete(d.configs, key) +} + +func (d *Discovery) getConfig(key string) (confgroup.Config, bool) { + d.mux.Lock() + defer d.mux.Unlock() + + v, ok := d.configs[key] + return v, ok +} + +func (d *Discovery) getConfigBytes(key string) ([]byte, error) { + d.mux.Lock() + defer d.mux.Unlock() + + cfg, ok := d.configs[key] + if !ok { + return nil, errors.New("config not found") + } + + bs, err := yaml.Marshal(cfg) + if err != nil { + return nil, err + } + + return bs, nil +} + +var envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") + +func isStock(cfg confgroup.Config) bool { + if envNDStockConfigDir == "" { + return false + } + return strings.HasPrefix(cfg.Source(), envNDStockConfigDir) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/config.go b/src/go/collectors/go.d.plugin/agent/discovery/file/config.go new file mode 100644 index 00000000000000..f8ef6b5fdb7ab9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/config.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/confgroup" +) + +type Config struct { + Registry confgroup.Registry + Read []string + Watch []string +} + +func validateConfig(cfg Config) error { + if len(cfg.Registry) == 0 { + return errors.New("empty config registry") + } + if len(cfg.Read)+len(cfg.Watch) == 0 { + return errors.New("discoverers not set") + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/discovery.go b/src/go/collectors/go.d.plugin/agent/discovery/file/discovery.go new file mode 100644 index 00000000000000..028644dd475061 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/discovery.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "context" + "errors" + "fmt" + "log/slog" + "sync" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/logger" +) + +var log = logger.New().With( + slog.String("component", "discovery file"), +) + +func NewDiscovery(cfg Config) (*Discovery, error) { + if err := validateConfig(cfg); err != nil { + return nil, fmt.Errorf("file discovery config validation: %v", err) + } + + d := Discovery{ + Logger: log, + } + + if err := d.registerDiscoverers(cfg); err != nil { + return nil, fmt.Errorf("file discovery initialization: %v", err) + } + + return &d, nil +} + +type ( + Discovery struct { + *logger.Logger + discoverers []discoverer + } + discoverer interface { + Run(ctx context.Context, in chan<- []*confgroup.Group) + } +) + +func (d *Discovery) String() string { + return d.Name() +} + +func (d *Discovery) Name() string { + return fmt.Sprintf("file discovery: %v", d.discoverers) +} + +func (d *Discovery) registerDiscoverers(cfg Config) error { + if len(cfg.Read) != 0 { + d.discoverers = append(d.discoverers, NewReader(cfg.Registry, cfg.Read)) + } + if len(cfg.Watch) != 0 { + d.discoverers = append(d.discoverers, NewWatcher(cfg.Registry, cfg.Watch)) + } + if len(d.discoverers) == 0 { + return errors.New("zero registered discoverers") + } + return nil +} + +func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { + d.Info("instance is started") + defer func() { d.Info("instance is stopped") }() + + var wg sync.WaitGroup + + for _, dd := range d.discoverers { + wg.Add(1) + go func(dd discoverer) { + defer wg.Done() + d.runDiscoverer(ctx, dd, in) + }(dd) + } + + wg.Wait() + <-ctx.Done() +} + +func (d *Discovery) runDiscoverer(ctx context.Context, dd discoverer, in chan<- []*confgroup.Group) { + updates := make(chan []*confgroup.Group) + go dd.Run(ctx, updates) + for { + select { + case <-ctx.Done(): + return + case groups, ok := <-updates: + if !ok { + return + } + select { + case <-ctx.Done(): + return + case in <- groups: + } + } + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/discovery_test.go b/src/go/collectors/go.d.plugin/agent/discovery/file/discovery_test.go new file mode 100644 index 00000000000000..2bdb669eb82476 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/discovery_test.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TODO: tech dept +func TestNewDiscovery(t *testing.T) { + +} + +// TODO: tech dept +func TestDiscovery_Run(t *testing.T) { + +} + +func prepareDiscovery(t *testing.T, cfg Config) *Discovery { + d, err := NewDiscovery(cfg) + require.NoError(t, err) + return d +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/parse.go b/src/go/collectors/go.d.plugin/agent/discovery/file/parse.go new file mode 100644 index 00000000000000..b6ba52372aec52 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/parse.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/netdata/go.d.plugin/agent/confgroup" + + "gopkg.in/yaml.v2" +) + +type format int + +const ( + unknownFormat format = iota + unknownEmptyFormat + staticFormat + sdFormat +) + +func parse(req confgroup.Registry, path string) (*confgroup.Group, error) { + bs, err := os.ReadFile(path) + if err != nil { + return nil, err + } + if len(bs) == 0 { + return nil, nil + } + + switch cfgFormat(bs) { + case staticFormat: + return parseStaticFormat(req, path, bs) + case sdFormat: + return parseSDFormat(req, path, bs) + case unknownEmptyFormat: + return nil, nil + default: + return nil, fmt.Errorf("unknown file format: '%s'", path) + } +} + +func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) { + name := fileName(path) + // TODO: properly handle module renaming + // See agent/setup.go buildDiscoveryConf() for details + if name == "wmi" { + name = "windows" + } + modDef, ok := reg.Lookup(name) + if !ok { + return nil, nil + } + + var modCfg staticConfig + if err := yaml.Unmarshal(bs, &modCfg); err != nil { + return nil, err + } + for _, cfg := range modCfg.Jobs { + cfg.SetModule(name) + def := mergeDef(modCfg.Default, modDef) + cfg.Apply(def) + } + group := &confgroup.Group{ + Configs: modCfg.Jobs, + Source: path, + } + return group, nil +} + +func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) { + var cfgs sdConfig + if err := yaml.Unmarshal(bs, &cfgs); err != nil { + return nil, err + } + + var i int + for _, cfg := range cfgs { + if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" { + cfg.Apply(def) + cfgs[i] = cfg + i++ + } + } + + group := &confgroup.Group{ + Configs: cfgs[:i], + Source: path, + } + return group, nil +} + +func cfgFormat(bs []byte) format { + var data interface{} + if err := yaml.Unmarshal(bs, &data); err != nil { + return unknownFormat + } + if data == nil { + return unknownEmptyFormat + } + + type ( + static = map[interface{}]interface{} + sd = []interface{} + ) + switch data.(type) { + case static: + return staticFormat + case sd: + return sdFormat + default: + return unknownFormat + } +} + +func mergeDef(a, b confgroup.Default) confgroup.Default { + return confgroup.Default{ + MinUpdateEvery: firstPositive(a.MinUpdateEvery, b.MinUpdateEvery), + UpdateEvery: firstPositive(a.UpdateEvery, b.UpdateEvery), + AutoDetectionRetry: firstPositive(a.AutoDetectionRetry, b.AutoDetectionRetry), + Priority: firstPositive(a.Priority, b.Priority), + } +} + +func firstPositive(value int, others ...int) int { + if value > 0 || len(others) == 0 { + return value + } + return firstPositive(others[0], others[1:]...) +} + +func fileName(path string) string { + _, file := filepath.Split(path) + ext := filepath.Ext(path) + return file[:len(file)-len(ext)] +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/parse_test.go b/src/go/collectors/go.d.plugin/agent/discovery/file/parse_test.go new file mode 100644 index 00000000000000..e18d43013ef267 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/parse_test.go @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParse(t *testing.T) { + const ( + jobDef = 11 + cfgDef = 22 + modDef = 33 + ) + tests := map[string]func(t *testing.T, tmp *tmpDir){ + "static, default: +job +conf +module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": { + UpdateEvery: modDef, + AutoDetectionRetry: modDef, + Priority: modDef, + }, + } + cfg := staticConfig{ + Default: confgroup.Default{ + UpdateEvery: cfgDef, + AutoDetectionRetry: cfgDef, + Priority: cfgDef, + }, + Jobs: []confgroup.Config{ + { + "name": "name", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "static, default: +job +conf +module (merge all)": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": { + Priority: modDef, + }, + } + cfg := staticConfig{ + Default: confgroup.Default{ + AutoDetectionRetry: cfgDef, + }, + Jobs: []confgroup.Config{ + { + "name": "name", + "update_every": jobDef, + }, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": jobDef, + "autodetection_retry": cfgDef, + "priority": modDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "static, default: -job +conf +module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": { + UpdateEvery: modDef, + AutoDetectionRetry: modDef, + Priority: modDef, + }, + } + cfg := staticConfig{ + Default: confgroup.Default{ + UpdateEvery: cfgDef, + AutoDetectionRetry: cfgDef, + Priority: cfgDef, + }, + Jobs: []confgroup.Config{ + { + "name": "name", + }, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": cfgDef, + "autodetection_retry": cfgDef, + "priority": cfgDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "static, default: -job -conf +module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": { + UpdateEvery: modDef, + AutoDetectionRetry: modDef, + Priority: modDef, + }, + } + cfg := staticConfig{ + Jobs: []confgroup.Config{ + { + "name": "name", + }, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "autodetection_retry": modDef, + "priority": modDef, + "update_every": modDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "static, default: -job -conf -module (+global)": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": {}, + } + cfg := staticConfig{ + Jobs: []confgroup.Config{ + { + "name": "name", + }, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "update_every": module.UpdateEvery, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "sd, default: +job +module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "sd_module": { + UpdateEvery: modDef, + AutoDetectionRetry: modDef, + Priority: modDef, + }, + } + cfg := sdConfig{ + { + "name": "name", + "module": "sd_module", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "module": "sd_module", + "name": "name", + "update_every": jobDef, + "autodetection_retry": jobDef, + "priority": jobDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "sd, default: -job +module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "sd_module": { + UpdateEvery: modDef, + AutoDetectionRetry: modDef, + Priority: modDef, + }, + } + cfg := sdConfig{ + { + "name": "name", + "module": "sd_module", + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "sd_module", + "update_every": modDef, + "autodetection_retry": modDef, + "priority": modDef, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "sd, default: -job -module (+global)": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "sd_module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "sd_module", + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "sd_module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "sd, job has no 'module' or 'module' is empty": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "sd_module": {}, + } + cfg := sdConfig{ + { + "name": "name", + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{}, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "conf registry has no module": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "sd_module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + filename := tmp.join("module.conf") + tmp.writeYAML(filename, cfg) + + expected := &confgroup.Group{ + Source: filename, + Configs: []confgroup.Config{}, + } + + group, err := parse(reg, filename) + + require.NoError(t, err) + assert.Equal(t, expected, group) + }, + "empty file": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{ + "module": {}, + } + + filename := tmp.createFile("empty-*") + group, err := parse(reg, filename) + + assert.Nil(t, group) + require.NoError(t, err) + }, + "only comments, unknown empty format": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{} + + filename := tmp.createFile("unknown-empty-format-*") + tmp.writeString(filename, "# a comment") + group, err := parse(reg, filename) + + assert.Nil(t, group) + assert.NoError(t, err) + }, + "unknown format": func(t *testing.T, tmp *tmpDir) { + reg := confgroup.Registry{} + + filename := tmp.createFile("unknown-format-*") + tmp.writeYAML(filename, "unknown") + group, err := parse(reg, filename) + + assert.Nil(t, group) + assert.Error(t, err) + }, + } + + for name, scenario := range tests { + t.Run(name, func(t *testing.T) { + tmp := newTmpDir(t, "parse-file-*") + defer tmp.cleanup() + scenario(t, tmp) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/read.go b/src/go/collectors/go.d.plugin/agent/discovery/file/read.go new file mode 100644 index 00000000000000..3d27955ade33fb --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/read.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "context" + "os" + "path/filepath" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/logger" +) + +type ( + staticConfig struct { + confgroup.Default `yaml:",inline"` + Jobs []confgroup.Config `yaml:"jobs"` + } + sdConfig []confgroup.Config +) + +func NewReader(reg confgroup.Registry, paths []string) *Reader { + return &Reader{ + Logger: log, + reg: reg, + paths: paths, + } +} + +type Reader struct { + *logger.Logger + + reg confgroup.Registry + paths []string +} + +func (r *Reader) String() string { + return r.Name() +} + +func (r *Reader) Name() string { + return "file reader" +} + +func (r *Reader) Run(ctx context.Context, in chan<- []*confgroup.Group) { + r.Info("instance is started") + defer func() { r.Info("instance is stopped") }() + + select { + case <-ctx.Done(): + case in <- r.groups(): + } + + close(in) +} + +func (r *Reader) groups() (groups []*confgroup.Group) { + for _, pattern := range r.paths { + matches, err := filepath.Glob(pattern) + if err != nil { + continue + } + + for _, path := range matches { + if fi, err := os.Stat(path); err != nil || !fi.Mode().IsRegular() { + continue + } + + group, err := parse(r.reg, path) + if err != nil { + r.Warningf("parse '%s': %v", path, err) + continue + } + if group == nil { + group = &confgroup.Group{Source: path} + } + groups = append(groups, group) + } + } + + for _, group := range groups { + for _, cfg := range group.Configs { + cfg.SetSource(group.Source) + cfg.SetProvider(r.Name()) + } + } + + return groups +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/read_test.go b/src/go/collectors/go.d.plugin/agent/discovery/file/read_test.go new file mode 100644 index 00000000000000..2bfa20a77896b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/read_test.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" +) + +func TestReader_String(t *testing.T) { + assert.NotEmpty(t, NewReader(confgroup.Registry{}, nil)) +} + +func TestNewReader(t *testing.T) { + tests := map[string]struct { + reg confgroup.Registry + paths []string + }{ + "empty inputs": { + reg: confgroup.Registry{}, + paths: []string{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { assert.NotNil(t, NewReader(test.reg, test.paths)) }) + } +} + +func TestReader_Run(t *testing.T) { + tmp := newTmpDir(t, "reader-run-*") + defer tmp.cleanup() + + module1 := tmp.join("module1.conf") + module2 := tmp.join("module2.conf") + module3 := tmp.join("module3.conf") + + tmp.writeYAML(module1, staticConfig{ + Jobs: []confgroup.Config{{"name": "name"}}, + }) + tmp.writeYAML(module2, staticConfig{ + Jobs: []confgroup.Config{{"name": "name"}}, + }) + tmp.writeString(module3, "# a comment") + + reg := confgroup.Registry{ + "module1": {}, + "module2": {}, + "module3": {}, + } + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Read: []string{module1, module2, module3}, + }) + expected := []*confgroup.Group{ + { + Source: module1, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module1", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": module1, + "__provider__": "file reader", + }, + }, + }, + { + Source: module2, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module2", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": module2, + "__provider__": "file reader", + }, + }, + }, + { + Source: module3, + }, + } + + sim := discoverySim{ + discovery: discovery, + expectedGroups: expected, + } + + sim.run(t) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/file/sim_test.go new file mode 100644 index 00000000000000..d2bf8124bcea9c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/sim_test.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "context" + "os" + "path/filepath" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +type ( + discoverySim struct { + discovery *Discovery + beforeRun func() + afterRun func() + expectedGroups []*confgroup.Group + } +) + +func (sim discoverySim) run(t *testing.T) { + t.Helper() + require.NotNil(t, sim.discovery) + + if sim.beforeRun != nil { + sim.beforeRun() + } + + in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group) + go sim.collectGroups(t, in, out) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + go sim.discovery.Run(ctx, in) + time.Sleep(time.Millisecond * 250) + + if sim.afterRun != nil { + sim.afterRun() + } + + actual := <-out + + sortGroups(actual) + sortGroups(sim.expectedGroups) + + assert.Equal(t, sim.expectedGroups, actual) +} + +func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) { + timeout := time.Second * 5 + var groups []*confgroup.Group +loop: + for { + select { + case updates := <-in: + if groups = append(groups, updates...); len(groups) >= len(sim.expectedGroups) { + break loop + } + case <-time.After(timeout): + t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped", + sim.discovery.discoverers, timeout, len(groups), len(sim.expectedGroups)) + break loop + } + } + out <- groups +} + +type tmpDir struct { + dir string + t *testing.T +} + +func newTmpDir(t *testing.T, pattern string) *tmpDir { + pattern = "netdata-go-test-discovery-file-" + pattern + dir, err := os.MkdirTemp(os.TempDir(), pattern) + require.NoError(t, err) + return &tmpDir{dir: dir, t: t} +} + +func (d *tmpDir) cleanup() { + assert.NoError(d.t, os.RemoveAll(d.dir)) +} + +func (d *tmpDir) join(filename string) string { + return filepath.Join(d.dir, filename) +} + +func (d *tmpDir) createFile(pattern string) string { + f, err := os.CreateTemp(d.dir, pattern) + require.NoError(d.t, err) + _ = f.Close() + return f.Name() +} + +func (d *tmpDir) removeFile(filename string) { + err := os.Remove(filename) + require.NoError(d.t, err) +} + +func (d *tmpDir) renameFile(origFilename, newFilename string) { + err := os.Rename(origFilename, newFilename) + require.NoError(d.t, err) +} + +func (d *tmpDir) writeYAML(filename string, in interface{}) { + bs, err := yaml.Marshal(in) + require.NoError(d.t, err) + err = os.WriteFile(filename, bs, 0644) + require.NoError(d.t, err) +} + +func (d *tmpDir) writeString(filename, data string) { + err := os.WriteFile(filename, []byte(data), 0644) + require.NoError(d.t, err) +} + +func sortGroups(groups []*confgroup.Group) { + if len(groups) == 0 { + return + } + sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source }) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/watch.go b/src/go/collectors/go.d.plugin/agent/discovery/file/watch.go new file mode 100644 index 00000000000000..e33aac3ec0eee8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/watch.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/logger" + + "github.com/fsnotify/fsnotify" +) + +type ( + Watcher struct { + *logger.Logger + + paths []string + reg confgroup.Registry + watcher *fsnotify.Watcher + cache cache + refreshEvery time.Duration + } + cache map[string]time.Time +) + +func (c cache) lookup(path string) (time.Time, bool) { v, ok := c[path]; return v, ok } +func (c cache) has(path string) bool { _, ok := c.lookup(path); return ok } +func (c cache) remove(path string) { delete(c, path) } +func (c cache) put(path string, modTime time.Time) { c[path] = modTime } + +func NewWatcher(reg confgroup.Registry, paths []string) *Watcher { + d := &Watcher{ + Logger: log, + paths: paths, + reg: reg, + watcher: nil, + cache: make(cache), + refreshEvery: time.Minute, + } + return d +} + +func (w *Watcher) String() string { + return w.Name() +} + +func (w *Watcher) Name() string { + return "file watcher" +} + +func (w *Watcher) Run(ctx context.Context, in chan<- []*confgroup.Group) { + w.Info("instance is started") + defer func() { w.Info("instance is stopped") }() + + watcher, err := fsnotify.NewWatcher() + if err != nil { + w.Errorf("fsnotify watcher initialization: %v", err) + return + } + + w.watcher = watcher + defer w.stop() + w.refresh(ctx, in) + + tk := time.NewTicker(w.refreshEvery) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-tk.C: + w.refresh(ctx, in) + case event := <-w.watcher.Events: + // TODO: check if event.Has will do + if event.Name == "" || isChmodOnly(event) || !w.fileMatches(event.Name) { + break + } + if event.Has(fsnotify.Create) && w.cache.has(event.Name) { + // vim "backupcopy=no" case, already collected after Rename event. + break + } + if event.Has(fsnotify.Rename) { + // It is common to modify files using vim. + // When writing to a file a backup is made. "backupcopy" option tells how it's done. + // Default is "no": rename the file and write a new one. + // This is cheap attempt to not send empty group for the old file. + time.Sleep(time.Millisecond * 100) + } + w.refresh(ctx, in) + case err := <-w.watcher.Errors: + if err != nil { + w.Warningf("watch: %v", err) + } + } + } +} + +func (w *Watcher) fileMatches(file string) bool { + for _, pattern := range w.paths { + if ok, _ := filepath.Match(pattern, file); ok { + return true + } + } + return false +} + +func (w *Watcher) listFiles() (files []string) { + for _, pattern := range w.paths { + if matches, err := filepath.Glob(pattern); err == nil { + files = append(files, matches...) + } + } + return files +} + +func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) { + select { + case <-ctx.Done(): + return + default: + } + var groups []*confgroup.Group + seen := make(map[string]bool) + + for _, file := range w.listFiles() { + fi, err := os.Lstat(file) + if err != nil { + w.Warningf("lstat '%s': %v", file, err) + continue + } + + if !fi.Mode().IsRegular() { + continue + } + + seen[file] = true + if v, ok := w.cache.lookup(file); ok && v.Equal(fi.ModTime()) { + continue + } + w.cache.put(file, fi.ModTime()) + + if group, err := parse(w.reg, file); err != nil { + w.Warningf("parse '%s': %v", file, err) + } else if group == nil { + groups = append(groups, &confgroup.Group{Source: file}) + } else { + groups = append(groups, group) + } + } + + for name := range w.cache { + if seen[name] { + continue + } + w.cache.remove(name) + groups = append(groups, &confgroup.Group{Source: name}) + } + + for _, group := range groups { + for _, cfg := range group.Configs { + cfg.SetSource(group.Source) + cfg.SetProvider("file watcher") + } + } + + send(ctx, in, groups) + w.watchDirs() +} + +func (w *Watcher) watchDirs() { + for _, path := range w.paths { + if idx := strings.LastIndex(path, "/"); idx > -1 { + path = path[:idx] + } else { + path = "./" + } + if err := w.watcher.Add(path); err != nil { + w.Errorf("start watching '%s': %v", path, err) + } + } +} + +func (w *Watcher) stop() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // closing the watcher deadlocks unless all events and errors are drained. + go func() { + for { + select { + case <-w.watcher.Errors: + case <-w.watcher.Events: + case <-ctx.Done(): + return + } + } + }() + + // in fact never returns an error + _ = w.watcher.Close() +} + +func isChmodOnly(event fsnotify.Event) bool { + return event.Op^fsnotify.Chmod == 0 +} + +func send(ctx context.Context, in chan<- []*confgroup.Group, groups []*confgroup.Group) { + if len(groups) == 0 { + return + } + select { + case <-ctx.Done(): + case in <- groups: + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/file/watch_test.go b/src/go/collectors/go.d.plugin/agent/discovery/file/watch_test.go new file mode 100644 index 00000000000000..1450b7bb6be842 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/file/watch_test.go @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package file + +import ( + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" +) + +func TestWatcher_String(t *testing.T) { + assert.NotEmpty(t, NewWatcher(confgroup.Registry{}, nil)) +} + +func TestNewWatcher(t *testing.T) { + tests := map[string]struct { + reg confgroup.Registry + paths []string + }{ + "empty inputs": { + reg: confgroup.Registry{}, + paths: []string{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { assert.NotNil(t, NewWatcher(test.reg, test.paths)) }) + } +} + +func TestWatcher_Run(t *testing.T) { + tests := map[string]func(tmp *tmpDir) discoverySim{ + "file exists before start": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeYAML(filename, cfg) + }, + expectedGroups: expected, + } + return sim + }, + "empty file": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeString(filename, "") + }, + expectedGroups: expected, + } + return sim + }, + "only comments, no data": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeString(filename, "# a comment") + }, + expectedGroups: expected, + } + return sim + }, + "add file": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + } + + sim := discoverySim{ + discovery: discovery, + afterRun: func() { + tmp.writeYAML(filename, cfg) + }, + expectedGroups: expected, + } + return sim + }, + "remove file": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + { + Source: filename, + Configs: nil, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeYAML(filename, cfg) + }, + afterRun: func() { + tmp.removeFile(filename) + }, + expectedGroups: expected, + } + return sim + }, + "change file": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + cfgOrig := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + cfgChanged := sdConfig{ + { + "name": "name_changed", + "module": "module", + }, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name_changed", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeYAML(filename, cfgOrig) + }, + afterRun: func() { + tmp.writeYAML(filename, cfgChanged) + time.Sleep(time.Millisecond * 500) + }, + expectedGroups: expected, + } + return sim + }, + "vim 'backupcopy=no' (writing to a file and backup)": func(tmp *tmpDir) discoverySim { + reg := confgroup.Registry{ + "module": {}, + } + cfg := sdConfig{ + { + "name": "name", + "module": "module", + }, + } + filename := tmp.join("module.conf") + discovery := prepareDiscovery(t, Config{ + Registry: reg, + Watch: []string{tmp.join("*.conf")}, + }) + expected := []*confgroup.Group{ + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + { + Source: filename, + Configs: []confgroup.Config{ + { + "name": "name", + "module": "module", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + "__source__": filename, + "__provider__": "file watcher", + }, + }, + }, + } + + sim := discoverySim{ + discovery: discovery, + beforeRun: func() { + tmp.writeYAML(filename, cfg) + }, + afterRun: func() { + newFilename := filename + ".swp" + tmp.renameFile(filename, newFilename) + tmp.writeYAML(filename, cfg) + tmp.removeFile(newFilename) + time.Sleep(time.Millisecond * 500) + }, + expectedGroups: expected, + } + return sim + }, + } + + for name, createSim := range tests { + t.Run(name, func(t *testing.T) { + tmp := newTmpDir(t, "watch-run-*") + defer tmp.cleanup() + + createSim(tmp).run(t) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/manager.go b/src/go/collectors/go.d.plugin/agent/discovery/manager.go new file mode 100644 index 00000000000000..3ab1ab6af588f6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/manager.go @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discovery + +import ( + "context" + "errors" + "fmt" + "log/slog" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/dummy" + "github.com/netdata/go.d.plugin/agent/discovery/file" + "github.com/netdata/go.d.plugin/logger" +) + +func NewManager(cfg Config) (*Manager, error) { + if err := validateConfig(cfg); err != nil { + return nil, fmt.Errorf("discovery manager config validation: %v", err) + } + + mgr := &Manager{ + Logger: logger.New().With( + slog.String("component", "discovery manager"), + ), + send: make(chan struct{}, 1), + sendEvery: time.Second * 2, // timeout to aggregate changes + discoverers: make([]discoverer, 0), + mux: &sync.RWMutex{}, + cache: newCache(), + } + + if err := mgr.registerDiscoverers(cfg); err != nil { + return nil, fmt.Errorf("discovery manager initializaion: %v", err) + } + + return mgr, nil +} + +type discoverer interface { + Run(ctx context.Context, in chan<- []*confgroup.Group) +} + +type Manager struct { + *logger.Logger + discoverers []discoverer + send chan struct{} + sendEvery time.Duration + mux *sync.RWMutex + cache *cache +} + +func (m *Manager) String() string { + return fmt.Sprintf("discovery manager: %v", m.discoverers) +} + +func (m *Manager) Add(d discoverer) { + m.discoverers = append(m.discoverers, d) +} + +func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) { + m.Info("instance is started") + defer func() { m.Info("instance is stopped") }() + + var wg sync.WaitGroup + + for _, d := range m.discoverers { + wg.Add(1) + go func(d discoverer) { + defer wg.Done() + m.runDiscoverer(ctx, d) + }(d) + } + + wg.Add(1) + go func() { + defer wg.Done() + m.sendLoop(ctx, in) + }() + + wg.Wait() + <-ctx.Done() +} + +func (m *Manager) registerDiscoverers(cfg Config) error { + if len(cfg.File.Read) > 0 || len(cfg.File.Watch) > 0 { + cfg.File.Registry = cfg.Registry + d, err := file.NewDiscovery(cfg.File) + if err != nil { + return err + } + m.Add(d) + } + + if len(cfg.Dummy.Names) > 0 { + cfg.Dummy.Registry = cfg.Registry + d, err := dummy.NewDiscovery(cfg.Dummy) + if err != nil { + return err + } + m.Add(d) + } + + if len(m.discoverers) == 0 { + return errors.New("zero registered discoverers") + } + + m.Infof("registered discoverers: %v", m.discoverers) + return nil +} + +func (m *Manager) runDiscoverer(ctx context.Context, d discoverer) { + updates := make(chan []*confgroup.Group) + go d.Run(ctx, updates) + + for { + select { + case <-ctx.Done(): + return + case groups, ok := <-updates: + if !ok { + return + } + func() { + m.mux.Lock() + defer m.mux.Unlock() + + m.cache.update(groups) + m.triggerSend() + }() + } + } +} + +func (m *Manager) sendLoop(ctx context.Context, in chan<- []*confgroup.Group) { + m.mustSend(ctx, in) + + tk := time.NewTicker(m.sendEvery) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-tk.C: + select { + case <-m.send: + m.trySend(in) + default: + } + } + } +} + +func (m *Manager) mustSend(ctx context.Context, in chan<- []*confgroup.Group) { + select { + case <-ctx.Done(): + return + case <-m.send: + m.mux.Lock() + groups := m.cache.groups() + m.cache.reset() + m.mux.Unlock() + + select { + case <-ctx.Done(): + case in <- groups: + } + return + } +} + +func (m *Manager) trySend(in chan<- []*confgroup.Group) { + m.mux.Lock() + defer m.mux.Unlock() + + select { + case in <- m.cache.groups(): + m.cache.reset() + default: + m.triggerSend() + } +} + +func (m *Manager) triggerSend() { + select { + case m.send <- struct{}{}: + default: + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/manager_test.go b/src/go/collectors/go.d.plugin/agent/discovery/manager_test.go new file mode 100644 index 00000000000000..ebeba81a460f6c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/manager_test.go @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discovery + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/file" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewManager(t *testing.T) { + tests := map[string]struct { + cfg Config + wantErr bool + }{ + "valid config": { + cfg: Config{ + Registry: confgroup.Registry{"module1": confgroup.Default{}}, + File: file.Config{Read: []string{"path"}}, + }, + }, + "invalid config, registry not set": { + cfg: Config{ + File: file.Config{Read: []string{"path"}}, + }, + wantErr: true, + }, + "invalid config, discoverers not set": { + cfg: Config{ + Registry: confgroup.Registry{"module1": confgroup.Default{}}, + }, + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mgr, err := NewManager(test.cfg) + + if test.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.NotNil(t, mgr) + } + }) + } +} + +func TestManager_Run(t *testing.T) { + tests := map[string]func() discoverySim{ + "several discoverers, unique groups with delayed collect": func() discoverySim { + const numGroups, numCfgs = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numCfgs) + d2 := prepareMockDiscoverer("test2", numGroups, numCfgs) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + + sim := discoverySim{ + mgr: mgr, + collectDelay: mgr.sendEvery + time.Second, + expectedGroups: expected, + } + return sim + }, + "several discoverers, unique groups": func() discoverySim { + const numGroups, numCfgs = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numCfgs) + d2 := prepareMockDiscoverer("test2", numGroups, numCfgs) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "several discoverers, same groups": func() discoverySim { + const numGroups, numTargets = 2, 2 + d1 := prepareMockDiscoverer("test1", numGroups, numTargets) + mgr := prepareManager(d1, d1) + expected := combineGroups(d1.groups) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "several discoverers, empty groups": func() discoverySim { + const numGroups, numCfgs = 1, 0 + d1 := prepareMockDiscoverer("test1", numGroups, numCfgs) + d2 := prepareMockDiscoverer("test2", numGroups, numCfgs) + mgr := prepareManager(d1, d2) + expected := combineGroups(d1.groups, d2.groups) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: expected, + } + return sim + }, + "several discoverers, nil groups": func() discoverySim { + const numGroups, numCfgs = 0, 0 + d1 := prepareMockDiscoverer("test1", numGroups, numCfgs) + d2 := prepareMockDiscoverer("test2", numGroups, numCfgs) + mgr := prepareManager(d1, d2) + + sim := discoverySim{ + mgr: mgr, + expectedGroups: nil, + } + return sim + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { sim().run(t) }) + } +} + +func prepareMockDiscoverer(source string, groups, configs int) mockDiscoverer { + d := mockDiscoverer{} + + for i := 0; i < groups; i++ { + group := confgroup.Group{ + Source: fmt.Sprintf("%s_group_%d", source, i+1), + } + for j := 0; j < configs; j++ { + group.Configs = append(group.Configs, + confgroup.Config{"name": fmt.Sprintf("%s_group_%d_target_%d", source, i+1, j+1)}) + } + d.groups = append(d.groups, &group) + } + return d +} + +func prepareManager(discoverers ...discoverer) *Manager { + mgr := &Manager{ + send: make(chan struct{}, 1), + sendEvery: 2 * time.Second, + discoverers: discoverers, + cache: newCache(), + mux: &sync.RWMutex{}, + } + return mgr +} + +type mockDiscoverer struct { + groups []*confgroup.Group +} + +func (md mockDiscoverer) Run(ctx context.Context, out chan<- []*confgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case out <- md.groups: + return + } + } +} + +func combineGroups(groups ...[]*confgroup.Group) (combined []*confgroup.Group) { + for _, set := range groups { + combined = append(combined, set...) + } + return combined +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/conffile.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/conffile.go new file mode 100644 index 00000000000000..96b6f07cd78a67 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/conffile.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sd + +import ( + "context" + + "github.com/ilyam8/hashstructure" +) + +type ConfigFileProvider interface { + Run(ctx context.Context) + Configs() chan ConfigFile +} + +type ConfigFile struct { + Source string + Data []byte +} + +func (c *ConfigFile) Hash() uint64 { + h, _ := hashstructure.Hash(c, nil) + return h +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/config.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/config.go new file mode 100644 index 00000000000000..8b47fc0d86744e --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/config.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hostsocket + +type NetworkSocketConfig struct { + Tags string +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net.go new file mode 100644 index 00000000000000..18cc35b7dc434f --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net.go @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hostsocket + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "log/slog" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + "github.com/ilyam8/hashstructure" +) + +type netSocketTargetGroup struct { + provider string + source string + targets []model.Target +} + +func (g *netSocketTargetGroup) Provider() string { return g.provider } +func (g *netSocketTargetGroup) Source() string { return g.source } +func (g *netSocketTargetGroup) Targets() []model.Target { return g.targets } + +type NetSocketTarget struct { + model.Base + + hash uint64 + + Protocol string + Address string + Port string + Comm string + Cmdline string +} + +func (t *NetSocketTarget) TUID() string { return t.tuid() } +func (t *NetSocketTarget) Hash() uint64 { return t.hash } +func (t *NetSocketTarget) tuid() string { + return fmt.Sprintf("%s_%s_%d", strings.ToLower(t.Protocol), t.Port, t.hash) +} + +func NewNetSocketDiscoverer(cfg NetworkSocketConfig) (*NetDiscoverer, error) { + tags, err := model.ParseTags(cfg.Tags) + if err != nil { + return nil, fmt.Errorf("parse tags: %v", err) + } + + dir := os.Getenv("NETDATA_PLUGINS_DIR") + if dir == "" { + dir, _ = os.Getwd() + } + + d := &NetDiscoverer{ + Logger: logger.New().With( + slog.String("component", "discovery sd hostsocket"), + ), + interval: time.Second * 60, + ll: &localListenersExec{ + binPath: filepath.Join(dir, "local-listeners"), + timeout: time.Second * 5, + }, + } + d.Tags().Merge(tags) + + return d, nil +} + +type ( + NetDiscoverer struct { + *logger.Logger + model.Base + + interval time.Duration + ll localListeners + } + localListeners interface { + discover(ctx context.Context) ([]byte, error) + } +) + +func (d *NetDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + if err := d.discoverLocalListeners(ctx, in); err != nil { + d.Error(err) + return + } + + tk := time.NewTicker(d.interval) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-tk.C: + if err := d.discoverLocalListeners(ctx, in); err != nil { + d.Error(err) + return + } + } + } +} + +func (d *NetDiscoverer) discoverLocalListeners(ctx context.Context, in chan<- []model.TargetGroup) error { + bs, err := d.ll.discover(ctx) + if err != nil { + if errors.Is(err, context.Canceled) { + return nil + } + return err + } + + tggs, err := d.parseLocalListeners(bs) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + case in <- tggs: + } + return nil +} + +func (d *NetDiscoverer) parseLocalListeners(bs []byte) ([]model.TargetGroup, error) { + var tgts []model.Target + + sc := bufio.NewScanner(bytes.NewReader(bs)) + for sc.Scan() { + text := strings.TrimSpace(sc.Text()) + if text == "" { + continue + } + + // Protocol|Address|Port|Cmdline + parts := strings.SplitN(text, "|", 4) + if len(parts) != 4 { + return nil, fmt.Errorf("unexpected data: '%s'", text) + } + + tgt := NetSocketTarget{ + Protocol: parts[0], + Address: parts[1], + Port: parts[2], + Comm: extractComm(parts[3]), + Cmdline: parts[3], + } + + hash, err := calcHash(tgt) + if err != nil { + continue + } + + tgt.hash = hash + tgt.Tags().Merge(d.Tags()) + + tgts = append(tgts, &tgt) + } + + tgg := &netSocketTargetGroup{ + provider: "hostsocket", + source: "net", + targets: tgts, + } + + return []model.TargetGroup{tgg}, nil +} + +type localListenersExec struct { + binPath string + timeout time.Duration +} + +func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) { + execCtx, cancel := context.WithTimeout(ctx, e.timeout) + defer cancel() + + cmd := exec.CommandContext(execCtx, e.binPath, "tcp") // TODO: tcp6? + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} + +func extractComm(s string) string { + i := strings.IndexByte(s, ' ') + if i <= 0 { + return "" + } + _, comm := filepath.Split(s[:i]) + return comm +} + +func calcHash(obj any) (uint64, error) { + return hashstructure.Hash(obj, nil) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net_test.go new file mode 100644 index 00000000000000..4ec860875594a0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/net_test.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hostsocket + +import ( + "context" + "errors" + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" +) + +var ( + localListenersOutputSample = []byte(` +UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D +TCP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D +TCP|127.0.0.1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D +UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1 +`) +) + +func TestNetSocketDiscoverer_Discover(t *testing.T) { + tests := map[string]discoverySim{ + "valid response": { + mock: &mockLocalListenersExec{}, + wantDoneBeforeCancel: false, + wantTargetGroups: []model.TargetGroup{&netSocketTargetGroup{ + provider: "hostsocket", + source: "net", + targets: []model.Target{ + withHash(&NetSocketTarget{ + Protocol: "UDP6", + Address: "::1", + Port: "8125", + Comm: "netdata", + Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D", + }), + withHash(&NetSocketTarget{ + Protocol: "TCP6", + Address: "::1", + Port: "8125", + Comm: "netdata", + Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D", + }), + withHash(&NetSocketTarget{ + Protocol: "TCP", + Address: "127.0.0.1", + Port: "8125", + Comm: "netdata", + Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D", + }), + withHash(&NetSocketTarget{ + Protocol: "UDP", + Address: "127.0.0.1", + Port: "53768", + Comm: "go.d.plugin", + Cmdline: "/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1", + }), + }, + }}, + }, + "empty response": { + mock: &mockLocalListenersExec{emptyResponse: true}, + wantDoneBeforeCancel: false, + wantTargetGroups: []model.TargetGroup{&netSocketTargetGroup{ + provider: "hostsocket", + source: "net", + }}, + }, + "error on exec": { + mock: &mockLocalListenersExec{err: true}, + wantDoneBeforeCancel: true, + wantTargetGroups: nil, + }, + "invalid data": { + mock: &mockLocalListenersExec{invalidResponse: true}, + wantDoneBeforeCancel: true, + wantTargetGroups: nil, + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { + sim.run(t) + }) + } +} + +func withHash(l *NetSocketTarget) *NetSocketTarget { + l.hash, _ = calcHash(l) + tags, _ := model.ParseTags("hostsocket net") + l.Tags().Merge(tags) + return l +} + +type mockLocalListenersExec struct { + err bool + emptyResponse bool + invalidResponse bool +} + +func (m *mockLocalListenersExec) discover(context.Context) ([]byte, error) { + if m.err { + return nil, errors.New("mock discover() error") + } + if m.emptyResponse { + return nil, nil + } + if m.invalidResponse { + return []byte("this is very incorrect data"), nil + } + return localListenersOutputSample, nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/sim_test.go new file mode 100644 index 00000000000000..998d9370c40da2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/hostsocket/sim_test.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hostsocket + +import ( + "context" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type discoverySim struct { + mock *mockLocalListenersExec + wantDoneBeforeCancel bool + wantTargetGroups []model.TargetGroup +} + +func (sim *discoverySim) run(t *testing.T) { + d, err := NewNetSocketDiscoverer(NetworkSocketConfig{Tags: "hostsocket net"}) + require.NoError(t, err) + + d.ll = sim.mock + + ctx, cancel := context.WithCancel(context.Background()) + tggs, done := sim.collectTargetGroups(t, ctx, d) + + if sim.wantDoneBeforeCancel { + select { + case <-done: + default: + assert.Fail(t, "discovery hasn't finished before cancel") + } + } + assert.Equal(t, sim.wantTargetGroups, tggs) + + cancel() + select { + case <-done: + case <-time.After(time.Second * 3): + assert.Fail(t, "discovery hasn't finished after cancel") + } +} + +func (sim *discoverySim) collectTargetGroups(t *testing.T, ctx context.Context, d *NetDiscoverer) ([]model.TargetGroup, chan struct{}) { + + in := make(chan []model.TargetGroup) + done := make(chan struct{}) + + go func() { defer close(done); d.Discover(ctx, in) }() + + timeout := time.Second * 5 + var tggs []model.TargetGroup + + func() { + for { + select { + case groups := <-in: + if tggs = append(tggs, groups...); len(tggs) == len(sim.wantTargetGroups) { + return + } + case <-done: + return + case <-time.After(timeout): + t.Logf("discovery timed out after %s", timeout) + return + } + } + }() + + return tggs, done +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/config.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/config.go new file mode 100644 index 00000000000000..684eeb5e50a048 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/config.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import "errors" + +type Config struct { + APIServer string `yaml:"api_server"` // TODO: not used + Namespaces []string `yaml:"namespaces"` + Pod *PodConfig `yaml:"pod"` + Service *ServiceConfig `yaml:"service"` +} + +type PodConfig struct { + Tags string `yaml:"tags"` + LocalMode bool `yaml:"local_mode"` + Selector struct { + Label string `yaml:"label"` + Field string `yaml:"field"` + } `yaml:"selector"` +} + +type ServiceConfig struct { + Tags string `yaml:"tags"` + Selector struct { + Label string `yaml:"label"` + Field string `yaml:"field"` + } `yaml:"selector"` +} + +func validateConfig(cfg Config) error { + if cfg.Pod == nil && cfg.Service == nil { + return errors.New("no discoverers configured") + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes.go new file mode 100644 index 00000000000000..ba4f058515de51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes.go @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "log/slog" + "os" + "strings" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + "github.com/netdata/go.d.plugin/pkg/k8sclient" + + "github.com/ilyam8/hashstructure" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +const ( + envNodeName = "MY_NODE_NAME" +) + +var log = logger.New().With( + slog.String("component", "discovery sd k8s"), +) + +func NewKubeDiscoverer(cfg Config) (*KubeDiscoverer, error) { + if err := validateConfig(cfg); err != nil { + return nil, fmt.Errorf("config validation: %v", err) + } + + client, err := k8sclient.New("Netdata/service-td") + if err != nil { + return nil, fmt.Errorf("create clientset: %v", err) + } + + ns := cfg.Namespaces + if len(ns) == 0 { + ns = []string{corev1.NamespaceAll} + } + + d := &KubeDiscoverer{ + Logger: log, + namespaces: ns, + podConf: cfg.Pod, + svcConf: cfg.Service, + client: client, + discoverers: make([]model.Discoverer, 0, len(ns)), + started: make(chan struct{}), + } + + return d, nil +} + +type KubeDiscoverer struct { + *logger.Logger + + podConf *PodConfig + svcConf *ServiceConfig + + namespaces []string + client kubernetes.Interface + discoverers []model.Discoverer + started chan struct{} +} + +func (d *KubeDiscoverer) String() string { + return "k8s td manager" +} + +const resyncPeriod = 10 * time.Minute + +func (d *KubeDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + d.Info("instance is started") + defer d.Info("instance is stopped") + + for _, namespace := range d.namespaces { + if err := d.setupPodDiscoverer(ctx, d.podConf, namespace); err != nil { + d.Errorf("create pod discoverer: %v", err) + return + } + if err := d.setupServiceDiscoverer(ctx, d.svcConf, namespace); err != nil { + d.Errorf("create service discoverer: %v", err) + return + } + } + + if len(d.discoverers) == 0 { + d.Warning("no discoverers registered") + return + } + + d.Infof("registered: %v", d.discoverers) + + var wg sync.WaitGroup + updates := make(chan []model.TargetGroup) + + for _, disc := range d.discoverers { + wg.Add(1) + go func(disc model.Discoverer) { defer wg.Done(); disc.Discover(ctx, updates) }(disc) + } + + done := make(chan struct{}) + go func() { defer close(done); wg.Wait() }() + + close(d.started) + + for { + select { + case <-ctx.Done(): + select { + case <-done: + d.Info("all discoverers exited") + case <-time.After(time.Second * 5): + d.Warning("not all discoverers exited") + } + return + case <-done: + d.Info("all discoverers exited") + return + case tggs := <-updates: + select { + case <-ctx.Done(): + case in <- tggs: + } + } + } +} + +func (d *KubeDiscoverer) setupPodDiscoverer(ctx context.Context, conf *PodConfig, namespace string) error { + if conf == nil { + return nil + } + + if conf.LocalMode { + name := os.Getenv(envNodeName) + if name == "" { + return fmt.Errorf("local_mode is enabled, but env '%s' not set", envNodeName) + } + conf.Selector.Field = joinSelectors(conf.Selector.Field, "spec.nodeName="+name) + } + + tags, err := model.ParseTags(conf.Tags) + if err != nil { + return fmt.Errorf("parse tags: %v", err) + } + + pod := d.client.CoreV1().Pods(namespace) + podLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = conf.Selector.Field + options.LabelSelector = conf.Selector.Label + return pod.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = conf.Selector.Field + options.LabelSelector = conf.Selector.Label + return pod.Watch(ctx, options) + }, + } + + cmap := d.client.CoreV1().ConfigMaps(namespace) + cmapLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return cmap.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return cmap.Watch(ctx, options) + }, + } + + secret := d.client.CoreV1().Secrets(namespace) + secretLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + return secret.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return secret.Watch(ctx, options) + }, + } + + td := newPodDiscoverer( + cache.NewSharedInformer(podLW, &corev1.Pod{}, resyncPeriod), + cache.NewSharedInformer(cmapLW, &corev1.ConfigMap{}, resyncPeriod), + cache.NewSharedInformer(secretLW, &corev1.Secret{}, resyncPeriod), + ) + td.Tags().Merge(tags) + + d.discoverers = append(d.discoverers, td) + + return nil +} + +func (d *KubeDiscoverer) setupServiceDiscoverer(ctx context.Context, conf *ServiceConfig, namespace string) error { + if conf == nil { + return nil + } + + tags, err := model.ParseTags(conf.Tags) + if err != nil { + return fmt.Errorf("parse tags: %v", err) + } + + svc := d.client.CoreV1().Services(namespace) + + svcLW := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = conf.Selector.Field + options.LabelSelector = conf.Selector.Label + return svc.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = conf.Selector.Field + options.LabelSelector = conf.Selector.Label + return svc.Watch(ctx, options) + }, + } + + inf := cache.NewSharedInformer(svcLW, &corev1.Service{}, resyncPeriod) + + td := newServiceDiscoverer(inf) + td.Tags().Merge(tags) + + d.discoverers = append(d.discoverers, td) + + return nil +} + +func enqueue(queue *workqueue.Type, obj any) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + queue.Add(key) +} + +func send(ctx context.Context, in chan<- []model.TargetGroup, tgg model.TargetGroup) { + if tgg == nil { + return + } + select { + case <-ctx.Done(): + case in <- []model.TargetGroup{tgg}: + } +} + +func calcHash(obj any) (uint64, error) { + return hashstructure.Hash(obj, nil) +} + +func joinSelectors(srs ...string) string { + var i int + for _, v := range srs { + if v != "" { + srs[i] = v + i++ + } + } + return strings.Join(srs[:i], ",") +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes_test.go new file mode 100644 index 00000000000000..dcaecb8febd416 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/kubernetes_test.go @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "fmt" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/pkg/k8sclient" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +var discoveryTags model.Tags = map[string]struct{}{"k8s": {}} + +func TestMain(m *testing.M) { + _ = os.Setenv(envNodeName, "m01") + _ = os.Setenv(k8sclient.EnvFakeClient, "true") + code := m.Run() + _ = os.Unsetenv(envNodeName) + _ = os.Unsetenv(k8sclient.EnvFakeClient) + os.Exit(code) +} + +func TestNewKubeDiscoverer(t *testing.T) { + tests := map[string]struct { + cfg Config + wantErr bool + }{ + "pod and service config": { + wantErr: false, + cfg: Config{Pod: &PodConfig{}, Service: &ServiceConfig{}}, + }, + "pod config": { + wantErr: false, + cfg: Config{Pod: &PodConfig{}}, + }, + "service config": { + wantErr: false, + cfg: Config{Service: &ServiceConfig{}}, + }, + "empty config": { + wantErr: true, + cfg: Config{}, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + disc, err := NewKubeDiscoverer(test.cfg) + + if test.wantErr { + assert.Error(t, err) + assert.Nil(t, disc) + } else { + assert.NoError(t, err) + assert.NotNil(t, disc) + } + }) + } +} + +func TestKubeDiscoverer_Discover(t *testing.T) { + const prod = "prod" + const dev = "dev" + prodNamespace := newNamespace(prod) + devNamespace := newNamespace(dev) + + tests := map[string]func() discoverySim{ + "multiple namespaces pod td": func() discoverySim { + httpdProd, nginxProd := newHTTPDPod(), newNGINXPod() + httpdProd.Namespace = prod + nginxProd.Namespace = prod + + httpdDev, nginxDev := newHTTPDPod(), newNGINXPod() + httpdDev.Namespace = dev + nginxDev.Namespace = dev + + disc, _ := preparePodDiscoverer( + []string{prod, dev}, + prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev) + + return discoverySim{ + td: disc, + sortBeforeVerify: true, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpdDev), + preparePodTargetGroup(nginxDev), + preparePodTargetGroup(httpdProd), + preparePodTargetGroup(nginxProd), + }, + } + }, + "multiple namespaces ClusterIP service td": func() discoverySim { + httpdProd, nginxProd := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpdProd.Namespace = prod + nginxProd.Namespace = prod + + httpdDev, nginxDev := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpdDev.Namespace = dev + nginxDev.Namespace = dev + + disc, _ := prepareSvcDiscoverer( + []string{prod, dev}, + prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev) + + return discoverySim{ + td: disc, + sortBeforeVerify: true, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpdDev), + prepareSvcTargetGroup(nginxDev), + prepareSvcTargetGroup(httpdProd), + prepareSvcTargetGroup(nginxProd), + }, + } + }, + } + + for name, createSim := range tests { + t.Run(name, func(t *testing.T) { + sim := createSim() + sim.run(t) + }) + } +} + +func prepareDiscoverer(role string, namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { + client := fake.NewSimpleClientset(objects...) + disc := &KubeDiscoverer{ + namespaces: namespaces, + client: client, + discoverers: nil, + started: make(chan struct{}), + } + switch role { + case "pod": + disc.podConf = &PodConfig{Tags: "k8s"} + case "svc": + disc.svcConf = &ServiceConfig{Tags: "k8s"} + } + return disc, client +} + +func newNamespace(name string) *corev1.Namespace { + return &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} +} + +func mustCalcHash(obj any) uint64 { + hash, err := calcHash(obj) + if err != nil { + panic(fmt.Sprintf("hash calculation: %v", err)) + } + return hash +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod.go new file mode 100644 index 00000000000000..a6391f7f6ce955 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod.go @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type podTargetGroup struct { + targets []model.Target + source string +} + +func (p podTargetGroup) Provider() string { return "sd:k8s:pod" } +func (p podTargetGroup) Source() string { return fmt.Sprintf("%s(%s)", p.Provider(), p.source) } +func (p podTargetGroup) Targets() []model.Target { return p.targets } + +type PodTarget struct { + model.Base `hash:"ignore"` + + hash uint64 + tuid string + + Address string + Namespace string + Name string + Annotations map[string]any + Labels map[string]any + NodeName string + PodIP string + ControllerName string + ControllerKind string + ContName string + Image string + Env map[string]any + Port string + PortName string + PortProtocol string +} + +func (p PodTarget) Hash() uint64 { return p.hash } +func (p PodTarget) TUID() string { return p.tuid } + +func newPodDiscoverer(pod, cmap, secret cache.SharedInformer) *podDiscoverer { + + if pod == nil || cmap == nil || secret == nil { + panic("nil pod or cmap or secret informer") + } + + queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"}) + + _, _ = pod.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj any) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj any) { enqueue(queue, obj) }, + DeleteFunc: func(obj any) { enqueue(queue, obj) }, + }) + + return &podDiscoverer{ + Logger: log, + podInformer: pod, + cmapInformer: cmap, + secretInformer: secret, + queue: queue, + } +} + +type podDiscoverer struct { + *logger.Logger + model.Base + + podInformer cache.SharedInformer + cmapInformer cache.SharedInformer + secretInformer cache.SharedInformer + queue *workqueue.Type +} + +func (p *podDiscoverer) String() string { + return "k8s pod" +} + +func (p *podDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) { + p.Info("instance is started") + defer p.Info("instance is stopped") + defer p.queue.ShutDown() + + go p.podInformer.Run(ctx.Done()) + go p.cmapInformer.Run(ctx.Done()) + go p.secretInformer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), + p.podInformer.HasSynced, p.cmapInformer.HasSynced, p.secretInformer.HasSynced) { + p.Error("failed to sync caches") + return + } + + go p.run(ctx, in) + + <-ctx.Done() +} + +func (p *podDiscoverer) run(ctx context.Context, in chan<- []model.TargetGroup) { + for { + item, shutdown := p.queue.Get() + if shutdown { + return + } + p.handleQueueItem(ctx, in, item) + } +} + +func (p *podDiscoverer) handleQueueItem(ctx context.Context, in chan<- []model.TargetGroup, item any) { + defer p.queue.Done(item) + + key := item.(string) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + obj, ok, err := p.podInformer.GetStore().GetByKey(key) + if err != nil { + return + } + + if !ok { + tgg := &podTargetGroup{source: podSourceFromNsName(namespace, name)} + send(ctx, in, tgg) + return + } + + pod, err := toPod(obj) + if err != nil { + return + } + + tgg := p.buildTargetGroup(pod) + + for _, tgt := range tgg.Targets() { + tgt.Tags().Merge(p.Tags()) + } + + send(ctx, in, tgg) + +} + +func (p *podDiscoverer) buildTargetGroup(pod *corev1.Pod) model.TargetGroup { + if pod.Status.PodIP == "" || len(pod.Spec.Containers) == 0 { + return &podTargetGroup{ + source: podSource(pod), + } + } + return &podTargetGroup{ + source: podSource(pod), + targets: p.buildTargets(pod), + } +} + +func (p *podDiscoverer) buildTargets(pod *corev1.Pod) (targets []model.Target) { + var name, kind string + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + name = ref.Name + kind = ref.Kind + break + } + } + + for _, container := range pod.Spec.Containers { + env := p.collectEnv(pod.Namespace, container) + + if len(container.Ports) == 0 { + tgt := &PodTarget{ + tuid: podTUID(pod, container), + Address: pod.Status.PodIP, + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: mapAny(pod.Annotations), + Labels: mapAny(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: name, + ControllerKind: kind, + ContName: container.Name, + Image: container.Image, + Env: mapAny(env), + } + hash, err := calcHash(tgt) + if err != nil { + continue + } + tgt.hash = hash + + targets = append(targets, tgt) + } else { + for _, port := range container.Ports { + portNum := strconv.FormatUint(uint64(port.ContainerPort), 10) + tgt := &PodTarget{ + tuid: podTUIDWithPort(pod, container, port), + Address: net.JoinHostPort(pod.Status.PodIP, portNum), + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: mapAny(pod.Annotations), + Labels: mapAny(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: name, + ControllerKind: kind, + ContName: container.Name, + Image: container.Image, + Env: mapAny(env), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + } + hash, err := calcHash(tgt) + if err != nil { + continue + } + tgt.hash = hash + + targets = append(targets, tgt) + } + } + } + + return targets +} + +func (p *podDiscoverer) collectEnv(ns string, container corev1.Container) map[string]string { + vars := make(map[string]string) + + // When a key exists in multiple sources, + // the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // + // Order (https://github.com/kubernetes/kubectl/blob/master/pkg/describe/describe.go) + // - envFrom: configMapRef, secretRef + // - env: value || valueFrom: fieldRef, resourceFieldRef, secretRef, configMap + + for _, src := range container.EnvFrom { + switch { + case src.ConfigMapRef != nil: + p.envFromConfigMap(vars, ns, src) + case src.SecretRef != nil: + p.envFromSecret(vars, ns, src) + } + } + + for _, env := range container.Env { + if env.Name == "" || isVar(env.Name) { + continue + } + switch { + case env.Value != "": + vars[env.Name] = env.Value + case env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil: + p.valueFromSecret(vars, ns, env) + case env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil: + p.valueFromConfigMap(vars, ns, env) + } + } + + if len(vars) == 0 { + return nil + } + return vars +} + +func (p *podDiscoverer) valueFromConfigMap(vars map[string]string, ns string, env corev1.EnvVar) { + if env.ValueFrom.ConfigMapKeyRef.Name == "" || env.ValueFrom.ConfigMapKeyRef.Key == "" { + return + } + + sr := env.ValueFrom.ConfigMapKeyRef + key := ns + "/" + sr.Name + + item, exist, err := p.cmapInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + cmap, err := toConfigMap(item) + if err != nil { + return + } + + if v, ok := cmap.Data[sr.Key]; ok { + vars[env.Name] = v + } +} + +func (p *podDiscoverer) valueFromSecret(vars map[string]string, ns string, env corev1.EnvVar) { + if env.ValueFrom.SecretKeyRef.Name == "" || env.ValueFrom.SecretKeyRef.Key == "" { + return + } + + secretKey := env.ValueFrom.SecretKeyRef + key := ns + "/" + secretKey.Name + + item, exist, err := p.secretInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + secret, err := toSecret(item) + if err != nil { + return + } + + if v, ok := secret.Data[secretKey.Key]; ok { + vars[env.Name] = string(v) + } +} + +func (p *podDiscoverer) envFromConfigMap(vars map[string]string, ns string, src corev1.EnvFromSource) { + if src.ConfigMapRef.Name == "" { + return + } + + key := ns + "/" + src.ConfigMapRef.Name + item, exist, err := p.cmapInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + cmap, err := toConfigMap(item) + if err != nil { + return + } + + for k, v := range cmap.Data { + vars[src.Prefix+k] = v + } +} + +func (p *podDiscoverer) envFromSecret(vars map[string]string, ns string, src corev1.EnvFromSource) { + if src.SecretRef.Name == "" { + return + } + + key := ns + "/" + src.SecretRef.Name + item, exist, err := p.secretInformer.GetStore().GetByKey(key) + if err != nil || !exist { + return + } + + secret, err := toSecret(item) + if err != nil { + return + } + + for k, v := range secret.Data { + vars[src.Prefix+k] = string(v) + } +} + +func podTUID(pod *corev1.Pod, container corev1.Container) string { + return fmt.Sprintf("%s_%s_%s", + pod.Namespace, + pod.Name, + container.Name, + ) +} + +func podTUIDWithPort(pod *corev1.Pod, container corev1.Container, port corev1.ContainerPort) string { + return fmt.Sprintf("%s_%s_%s_%s_%s", + pod.Namespace, + pod.Name, + container.Name, + strings.ToLower(string(port.Protocol)), + strconv.FormatUint(uint64(port.ContainerPort), 10), + ) +} + +func podSourceFromNsName(namespace, name string) string { + return namespace + "/" + name +} + +func podSource(pod *corev1.Pod) string { + return podSourceFromNsName(pod.Namespace, pod.Name) +} + +func toPod(obj any) (*corev1.Pod, error) { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", obj) + } + return pod, nil +} + +func toConfigMap(obj any) (*corev1.ConfigMap, error) { + cmap, ok := obj.(*corev1.ConfigMap) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", obj) + } + return cmap, nil +} + +func toSecret(obj any) (*corev1.Secret, error) { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", obj) + } + return secret, nil +} + +func isVar(name string) bool { + // Variable references $(VAR_NAME) are expanded using the previous defined + // environment variables in the container and any service environment + // variables. + return strings.IndexByte(name, '$') != -1 +} + +func mapAny(src map[string]string) map[string]any { + if src == nil { + return nil + } + m := make(map[string]any, len(src)) + for k, v := range src { + m[k] = v + } + return m +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod_test.go new file mode 100644 index 00000000000000..87506243bc6bdf --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/pod_test.go @@ -0,0 +1,622 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "net" + "strconv" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +func TestPodTargetGroup_Provider(t *testing.T) { + var p podTargetGroup + assert.NotEmpty(t, p.Provider()) +} + +func TestPodTargetGroup_Source(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantSources []string + }{ + "pods with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + disc, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + wantSources: []string{ + "sd:k8s:pod(default/httpd-dd95c4d68-5bkwl)", + "sd:k8s:pod(default/nginx-7cfd77469b-q6kxj)", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var sources []string + for _, tgg := range sim.run(t) { + sources = append(sources, tgg.Source()) + } + + assert.Equal(t, test.wantSources, sources) + }) + } +} + +func TestPodTargetGroup_Targets(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantTargets int + }{ + "pods with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: discovery, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + wantTargets: 4, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var targets int + for _, tgg := range sim.run(t) { + targets += len(tgg.Targets()) + } + + assert.Equal(t, test.wantTargets, targets) + }) + } +} + +func TestPodTarget_Hash(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantHashes []uint64 + }{ + "pods with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: discovery, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + wantHashes: []uint64{ + 12703169414253998055, + 13351713096133918928, + 8241692333761256175, + 11562466355572729519, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var hashes []uint64 + for _, tgg := range sim.run(t) { + for _, tg := range tgg.Targets() { + hashes = append(hashes, tg.Hash()) + } + } + + assert.Equal(t, test.wantHashes, hashes) + }) + } +} + +func TestPodTarget_TUID(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantTUID []string + }{ + "pods with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: discovery, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + wantTUID: []string{ + "default_httpd-dd95c4d68-5bkwl_httpd_tcp_80", + "default_httpd-dd95c4d68-5bkwl_httpd_tcp_443", + "default_nginx-7cfd77469b-q6kxj_nginx_tcp_80", + "default_nginx-7cfd77469b-q6kxj_nginx_tcp_443", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var tuid []string + for _, tgg := range sim.run(t) { + for _, tg := range tgg.Targets() { + tuid = append(tuid, tg.TUID()) + } + } + + assert.Equal(t, test.wantTUID, tuid) + }) + } +} + +func TestNewPodDiscoverer(t *testing.T) { + tests := map[string]struct { + podInf cache.SharedInformer + cmapInf cache.SharedInformer + secretInf cache.SharedInformer + wantPanic bool + }{ + "valid informers": { + wantPanic: false, + podInf: cache.NewSharedInformer(nil, &corev1.Pod{}, resyncPeriod), + cmapInf: cache.NewSharedInformer(nil, &corev1.ConfigMap{}, resyncPeriod), + secretInf: cache.NewSharedInformer(nil, &corev1.Secret{}, resyncPeriod), + }, + "nil informers": { + wantPanic: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + f := func() { newPodDiscoverer(test.podInf, test.cmapInf, test.secretInf) } + + if test.wantPanic { + assert.Panics(t, f) + } else { + assert.NotPanics(t, f) + } + }) + } +} + +func TestPodDiscoverer_String(t *testing.T) { + var p podDiscoverer + assert.NotEmpty(t, p.String()) +} + +func TestPodDiscoverer_Discover(t *testing.T) { + tests := map[string]func() discoverySim{ + "ADD: pods exist before run": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + td, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: td, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + "ADD: pods exist before run and add after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + disc, client := prepareAllNsPodDiscoverer(httpd) + podClient := client.CoreV1().Pods("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + "DELETE: remove pods after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + disc, client := prepareAllNsPodDiscoverer(httpd, nginx) + podClient := client.CoreV1().Pods("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _ = podClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + preparePodTargetGroup(nginx), + prepareEmptyPodTargetGroup(httpd), + prepareEmptyPodTargetGroup(nginx), + }, + } + }, + "DELETE,ADD: remove and add pods after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + disc, client := prepareAllNsPodDiscoverer(httpd) + podClient := client.CoreV1().Pods("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroup(httpd), + prepareEmptyPodTargetGroup(httpd), + preparePodTargetGroup(nginx), + }, + } + }, + "ADD: pods with empty PodIP": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Status.PodIP = "" + nginx.Status.PodIP = "" + disc, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareEmptyPodTargetGroup(httpd), + prepareEmptyPodTargetGroup(nginx), + }, + } + }, + "UPDATE: set pods PodIP after sync": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Status.PodIP = "" + nginx.Status.PodIP = "" + disc, client := prepareAllNsPodDiscoverer(httpd, nginx) + podClient := client.CoreV1().Pods("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _, _ = podClient.Update(ctx, newHTTPDPod(), metav1.UpdateOptions{}) + _, _ = podClient.Update(ctx, newNGINXPod(), metav1.UpdateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + prepareEmptyPodTargetGroup(httpd), + prepareEmptyPodTargetGroup(nginx), + preparePodTargetGroup(newHTTPDPod()), + preparePodTargetGroup(newNGINXPod()), + }, + } + }, + "ADD: pods without containers": func() discoverySim { + httpd, nginx := newHTTPDPod(), newNGINXPod() + httpd.Spec.Containers = httpd.Spec.Containers[:0] + nginx.Spec.Containers = httpd.Spec.Containers[:0] + disc, _ := prepareAllNsPodDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareEmptyPodTargetGroup(httpd), + prepareEmptyPodTargetGroup(nginx), + }, + } + }, + "Env: from value": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *corev1.Container) { + c.Env = []corev1.EnvVar{ + {Name: "key1", Value: "value1"}, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + + disc, _ := prepareAllNsPodDiscoverer(httpd) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroupWithEnv(httpd, data), + }, + } + }, + "Env: from Secret": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *corev1.Container) { + c.Env = []corev1.EnvVar{ + { + Name: "key1", + ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}, + Key: "key1", + }}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + secret := prepareSecret("my-secret", data) + + disc, _ := prepareAllNsPodDiscoverer(httpd, secret) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroupWithEnv(httpd, data), + }, + } + }, + "Env: from ConfigMap": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *corev1.Container) { + c.Env = []corev1.EnvVar{ + { + Name: "key1", + ValueFrom: &corev1.EnvVarSource{ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cmap"}, + Key: "key1", + }}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1"} + cmap := prepareConfigMap("my-cmap", data) + + disc, _ := prepareAllNsPodDiscoverer(httpd, cmap) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroupWithEnv(httpd, data), + }, + } + }, + "EnvFrom: from ConfigMap": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *corev1.Container) { + c.EnvFrom = []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-cmap"}}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1", "key2": "value2"} + cmap := prepareConfigMap("my-cmap", data) + + disc, _ := prepareAllNsPodDiscoverer(httpd, cmap) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroupWithEnv(httpd, data), + }, + } + }, + "EnvFrom: from Secret": func() discoverySim { + httpd := newHTTPDPod() + mangle := func(c *corev1.Container) { + c.EnvFrom = []corev1.EnvFromSource{ + { + SecretRef: &corev1.SecretEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}}, + }, + } + } + mangleContainers(httpd.Spec.Containers, mangle) + data := map[string]string{"key1": "value1", "key2": "value2"} + secret := prepareSecret("my-secret", data) + + disc, _ := prepareAllNsPodDiscoverer(httpd, secret) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + preparePodTargetGroupWithEnv(httpd, data), + }, + } + }, + } + + for name, createSim := range tests { + t.Run(name, func(t *testing.T) { + sim := createSim() + sim.run(t) + }) + } +} + +func prepareAllNsPodDiscoverer(objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { + return prepareDiscoverer("pod", []string{corev1.NamespaceAll}, objects...) +} + +func preparePodDiscoverer(namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { + return prepareDiscoverer("pod", namespaces, objects...) +} + +func mangleContainers(containers []corev1.Container, mange func(container *corev1.Container)) { + for i := range containers { + mange(&containers[i]) + } +} + +var controllerTrue = true + +func newHTTPDPod() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpd-dd95c4d68-5bkwl", + Namespace: "default", + UID: "1cebb6eb-0c1e-495b-8131-8fa3e6668dc8", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "httpd", "tier": "frontend"}, + OwnerReferences: []metav1.OwnerReference{ + {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue}, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "m01", + Containers: []corev1.Container{ + { + Name: "httpd", + Image: "httpd", + Ports: []corev1.ContainerPort{ + {Name: "http", Protocol: corev1.ProtocolTCP, ContainerPort: 80}, + {Name: "https", Protocol: corev1.ProtocolTCP, ContainerPort: 443}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "172.17.0.1", + }, + } +} + +func newNGINXPod() *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-7cfd77469b-q6kxj", + Namespace: "default", + UID: "09e883f2-d740-4c5f-970d-02cf02876522", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "nginx", "tier": "frontend"}, + OwnerReferences: []metav1.OwnerReference{ + {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue}, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "m01", + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + Ports: []corev1.ContainerPort{ + {Name: "http", Protocol: corev1.ProtocolTCP, ContainerPort: 80}, + {Name: "https", Protocol: corev1.ProtocolTCP, ContainerPort: 443}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + PodIP: "172.17.0.2", + }, + } +} + +func prepareConfigMap(name string, data map[string]string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8167" + name), + }, + Data: data, + } +} + +func prepareSecret(name string, data map[string]string) *corev1.Secret { + secretData := make(map[string][]byte, len(data)) + for k, v := range data { + secretData[k] = []byte(v) + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8161" + name), + }, + Data: secretData, + } +} + +func prepareEmptyPodTargetGroup(pod *corev1.Pod) *podTargetGroup { + return &podTargetGroup{source: podSource(pod)} +} + +func preparePodTargetGroup(pod *corev1.Pod) *podTargetGroup { + tgg := prepareEmptyPodTargetGroup(pod) + + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + portNum := strconv.FormatUint(uint64(port.ContainerPort), 10) + tgt := &PodTarget{ + tuid: podTUIDWithPort(pod, container, port), + Address: net.JoinHostPort(pod.Status.PodIP, portNum), + Namespace: pod.Namespace, + Name: pod.Name, + Annotations: mapAny(pod.Annotations), + Labels: mapAny(pod.Labels), + NodeName: pod.Spec.NodeName, + PodIP: pod.Status.PodIP, + ControllerName: "netdata-test", + ControllerKind: "DaemonSet", + ContName: container.Name, + Image: container.Image, + Env: nil, + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + } + tgt.hash = mustCalcHash(tgt) + tgt.Tags().Merge(discoveryTags) + + tgg.targets = append(tgg.targets, tgt) + } + } + + return tgg +} + +func preparePodTargetGroupWithEnv(pod *corev1.Pod, env map[string]string) *podTargetGroup { + tgg := preparePodTargetGroup(pod) + + for _, tgt := range tgg.Targets() { + tgt.(*PodTarget).Env = mapAny(env) + tgt.(*PodTarget).hash = mustCalcHash(tgt) + } + + return tgg +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service.go new file mode 100644 index 00000000000000..975c5f84d8c74e --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service.go @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type serviceTargetGroup struct { + targets []model.Target + source string +} + +func (s serviceTargetGroup) Provider() string { return "sd:k8s:service" } +func (s serviceTargetGroup) Source() string { return fmt.Sprintf("%s(%s)", s.Provider(), s.source) } +func (s serviceTargetGroup) Targets() []model.Target { return s.targets } + +type ServiceTarget struct { + model.Base `hash:"ignore"` + + hash uint64 + tuid string + + Address string + Namespace string + Name string + Annotations map[string]any + Labels map[string]any + Port string + PortName string + PortProtocol string + ClusterIP string + ExternalName string + Type string +} + +func (s ServiceTarget) Hash() uint64 { return s.hash } +func (s ServiceTarget) TUID() string { return s.tuid } + +type serviceDiscoverer struct { + *logger.Logger + model.Base + + informer cache.SharedInformer + queue *workqueue.Type +} + +func newServiceDiscoverer(inf cache.SharedInformer) *serviceDiscoverer { + if inf == nil { + panic("nil service informer") + } + + queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "service"}) + _, _ = inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj any) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj any) { enqueue(queue, obj) }, + DeleteFunc: func(obj any) { enqueue(queue, obj) }, + }) + + return &serviceDiscoverer{ + Logger: log, + informer: inf, + queue: queue, + } +} + +func (s *serviceDiscoverer) String() string { + return "k8s service" +} + +func (s *serviceDiscoverer) Discover(ctx context.Context, ch chan<- []model.TargetGroup) { + s.Info("instance is started") + defer s.Info("instance is stopped") + defer s.queue.ShutDown() + + go s.informer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { + s.Error("failed to sync caches") + return + } + + go s.run(ctx, ch) + + <-ctx.Done() +} + +func (s *serviceDiscoverer) run(ctx context.Context, in chan<- []model.TargetGroup) { + for { + item, shutdown := s.queue.Get() + if shutdown { + return + } + + s.handleQueueItem(ctx, in, item) + } +} + +func (s *serviceDiscoverer) handleQueueItem(ctx context.Context, in chan<- []model.TargetGroup, item any) { + defer s.queue.Done(item) + + key := item.(string) + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + obj, exists, err := s.informer.GetStore().GetByKey(key) + if err != nil { + return + } + + if !exists { + tgg := &serviceTargetGroup{source: serviceSourceFromNsName(namespace, name)} + send(ctx, in, tgg) + return + } + + svc, err := toService(obj) + if err != nil { + return + } + + tgg := s.buildTargetGroup(svc) + + for _, tgt := range tgg.Targets() { + tgt.Tags().Merge(s.Tags()) + } + + send(ctx, in, tgg) +} + +func (s *serviceDiscoverer) buildTargetGroup(svc *corev1.Service) model.TargetGroup { + // TODO: headless service? + if svc.Spec.ClusterIP == "" || len(svc.Spec.Ports) == 0 { + return &serviceTargetGroup{ + source: serviceSource(svc), + } + } + return &serviceTargetGroup{ + source: serviceSource(svc), + targets: s.buildTargets(svc), + } +} + +func (s *serviceDiscoverer) buildTargets(svc *corev1.Service) (targets []model.Target) { + for _, port := range svc.Spec.Ports { + portNum := strconv.FormatInt(int64(port.Port), 10) + tgt := &ServiceTarget{ + tuid: serviceTUID(svc, port), + Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum), + Namespace: svc.Namespace, + Name: svc.Name, + Annotations: mapAny(svc.Annotations), + Labels: mapAny(svc.Labels), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + ClusterIP: svc.Spec.ClusterIP, + ExternalName: svc.Spec.ExternalName, + Type: string(svc.Spec.Type), + } + hash, err := calcHash(tgt) + if err != nil { + continue + } + tgt.hash = hash + + targets = append(targets, tgt) + } + + return targets +} + +func serviceTUID(svc *corev1.Service, port corev1.ServicePort) string { + return fmt.Sprintf("%s_%s_%s_%s", + svc.Namespace, + svc.Name, + strings.ToLower(string(port.Protocol)), + strconv.FormatInt(int64(port.Port), 10), + ) +} + +func serviceSourceFromNsName(namespace, name string) string { + return namespace + "/" + name +} + +func serviceSource(svc *corev1.Service) string { + return serviceSourceFromNsName(svc.Namespace, svc.Name) +} + +func toService(obj any) (*corev1.Service, error) { + svc, ok := obj.(*corev1.Service) + if !ok { + return nil, fmt.Errorf("received unexpected object type: %T", obj) + } + return svc, nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service_test.go new file mode 100644 index 00000000000000..a62d66f099c21f --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/service_test.go @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "net" + "strconv" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +func TestServiceTargetGroup_Provider(t *testing.T) { + var s serviceTargetGroup + assert.NotEmpty(t, s.Provider()) +} + +func TestServiceTargetGroup_Source(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantSources []string + }{ + "ClusterIP svc with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + wantSources: []string{ + "sd:k8s:service(default/httpd-cluster-ip-service)", + "sd:k8s:service(default/nginx-cluster-ip-service)", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var sources []string + for _, tgg := range sim.run(t) { + sources = append(sources, tgg.Source()) + } + + assert.Equal(t, test.wantSources, sources) + }) + } +} + +func TestServiceTargetGroup_Targets(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantTargets int + }{ + "ClusterIP svc with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + wantTargets: 4, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var targets int + for _, tgg := range sim.run(t) { + targets += len(tgg.Targets()) + } + + assert.Equal(t, test.wantTargets, targets) + }) + } +} + +func TestServiceTarget_Hash(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantHashes []uint64 + }{ + "ClusterIP svc with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + wantHashes: []uint64{ + 17611803477081780974, + 6019985892433421258, + 4151907287549842238, + 5757608926096186119, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var hashes []uint64 + for _, tgg := range sim.run(t) { + for _, tgt := range tgg.Targets() { + hashes = append(hashes, tgt.Hash()) + } + } + + assert.Equal(t, test.wantHashes, hashes) + }) + } +} + +func TestServiceTarget_TUID(t *testing.T) { + tests := map[string]struct { + createSim func() discoverySim + wantTUID []string + }{ + "ClusterIP svc with multiple ports": { + createSim: func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + wantTUID: []string{ + "default_httpd-cluster-ip-service_tcp_80", + "default_httpd-cluster-ip-service_tcp_443", + "default_nginx-cluster-ip-service_tcp_80", + "default_nginx-cluster-ip-service_tcp_443", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + + var tuid []string + for _, tgg := range sim.run(t) { + for _, tgt := range tgg.Targets() { + tuid = append(tuid, tgt.TUID()) + } + } + + assert.Equal(t, test.wantTUID, tuid) + }) + } +} + +func TestNewServiceDiscoverer(t *testing.T) { + tests := map[string]struct { + informer cache.SharedInformer + wantPanic bool + }{ + "valid informer": { + wantPanic: false, + informer: cache.NewSharedInformer(nil, &corev1.Service{}, resyncPeriod), + }, + "nil informer": { + wantPanic: true, + informer: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + f := func() { newServiceDiscoverer(test.informer) } + + if test.wantPanic { + assert.Panics(t, f) + } else { + assert.NotPanics(t, f) + } + }) + } +} + +func TestServiceDiscoverer_String(t *testing.T) { + var s serviceDiscoverer + assert.NotEmpty(t, s.String()) +} + +func TestServiceDiscoverer_Discover(t *testing.T) { + tests := map[string]func() discoverySim{ + "ADD: ClusterIP svc exist before run": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + "ADD: ClusterIP svc exist before run and add after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, client := prepareAllNsSvcDiscoverer(httpd) + svcClient := client.CoreV1().Services("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + "DELETE: ClusterIP svc remove after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, client := prepareAllNsSvcDiscoverer(httpd, nginx) + svcClient := client.CoreV1().Services("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _ = svcClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + prepareEmptySvcTargetGroup(httpd), + prepareEmptySvcTargetGroup(nginx), + }, + } + }, + "ADD,DELETE: ClusterIP svc remove and add after sync": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + disc, client := prepareAllNsSvcDiscoverer(httpd) + svcClient := client.CoreV1().Services("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{}) + _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + prepareSvcTargetGroup(httpd), + prepareEmptySvcTargetGroup(httpd), + prepareSvcTargetGroup(nginx), + }, + } + }, + "ADD: Headless svc exist before run": func() discoverySim { + httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService() + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareEmptySvcTargetGroup(httpd), + prepareEmptySvcTargetGroup(nginx), + }, + } + }, + "UPDATE: Headless => ClusterIP svc after sync": func() discoverySim { + httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService() + httpdUpd, nginxUpd := *httpd, *nginx + httpdUpd.Spec.ClusterIP = "10.100.0.1" + nginxUpd.Spec.ClusterIP = "10.100.0.2" + disc, client := prepareAllNsSvcDiscoverer(httpd, nginx) + svcClient := client.CoreV1().Services("default") + + return discoverySim{ + td: disc, + runAfterSync: func(ctx context.Context) { + time.Sleep(time.Millisecond * 50) + _, _ = svcClient.Update(ctx, &httpdUpd, metav1.UpdateOptions{}) + _, _ = svcClient.Update(ctx, &nginxUpd, metav1.UpdateOptions{}) + }, + wantTargetGroups: []model.TargetGroup{ + prepareEmptySvcTargetGroup(httpd), + prepareEmptySvcTargetGroup(nginx), + prepareSvcTargetGroup(&httpdUpd), + prepareSvcTargetGroup(&nginxUpd), + }, + } + }, + "ADD: ClusterIP svc with zero exposed ports": func() discoverySim { + httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService() + httpd.Spec.Ports = httpd.Spec.Ports[:0] + nginx.Spec.Ports = httpd.Spec.Ports[:0] + disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx) + + return discoverySim{ + td: disc, + wantTargetGroups: []model.TargetGroup{ + prepareEmptySvcTargetGroup(httpd), + prepareEmptySvcTargetGroup(nginx), + }, + } + }, + } + + for name, createSim := range tests { + t.Run(name, func(t *testing.T) { + sim := createSim() + sim.run(t) + }) + } +} + +func prepareAllNsSvcDiscoverer(objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { + return prepareDiscoverer("svc", []string{corev1.NamespaceAll}, objects...) +} + +func prepareSvcDiscoverer(namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) { + return prepareDiscoverer("svc", namespaces, objects...) +} + +func newHTTPDClusterIPService() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "httpd-cluster-ip-service", + Namespace: "default", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "httpd", "tier": "frontend"}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "http", Protocol: corev1.ProtocolTCP, Port: 80}, + {Name: "https", Protocol: corev1.ProtocolTCP, Port: 443}, + }, + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.100.0.1", + Selector: map[string]string{"app": "httpd", "tier": "frontend"}, + }, + } +} + +func newNGINXClusterIPService() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx-cluster-ip-service", + Namespace: "default", + Annotations: map[string]string{"phase": "prod"}, + Labels: map[string]string{"app": "nginx", "tier": "frontend"}, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "http", Protocol: corev1.ProtocolTCP, Port: 80}, + {Name: "https", Protocol: corev1.ProtocolTCP, Port: 443}, + }, + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.100.0.2", + Selector: map[string]string{"app": "nginx", "tier": "frontend"}, + }, + } +} + +func newHTTPDHeadlessService() *corev1.Service { + svc := newHTTPDClusterIPService() + svc.Name = "httpd-headless-service" + svc.Spec.ClusterIP = "" + return svc +} + +func newNGINXHeadlessService() *corev1.Service { + svc := newNGINXClusterIPService() + svc.Name = "nginx-headless-service" + svc.Spec.ClusterIP = "" + return svc +} + +func prepareEmptySvcTargetGroup(svc *corev1.Service) *serviceTargetGroup { + return &serviceTargetGroup{source: serviceSource(svc)} +} + +func prepareSvcTargetGroup(svc *corev1.Service) *serviceTargetGroup { + tgg := prepareEmptySvcTargetGroup(svc) + + for _, port := range svc.Spec.Ports { + portNum := strconv.FormatInt(int64(port.Port), 10) + tgt := &ServiceTarget{ + tuid: serviceTUID(svc, port), + Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum), + Namespace: svc.Namespace, + Name: svc.Name, + Annotations: mapAny(svc.Annotations), + Labels: mapAny(svc.Labels), + Port: portNum, + PortName: port.Name, + PortProtocol: string(port.Protocol), + ClusterIP: svc.Spec.ClusterIP, + ExternalName: svc.Spec.ExternalName, + Type: string(svc.Spec.Type), + } + tgt.hash = mustCalcHash(tgt) + tgt.Tags().Merge(discoveryTags) + tgg.targets = append(tgg.targets, tgt) + } + + return tgg +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/sim_test.go new file mode 100644 index 00000000000000..af66c549f32107 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/kubernetes/sim_test.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package kubernetes + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/tools/cache" +) + +const ( + startWaitTimeout = time.Second * 3 + finishWaitTimeout = time.Second * 5 +) + +type discoverySim struct { + td *KubeDiscoverer + runAfterSync func(ctx context.Context) + sortBeforeVerify bool + wantTargetGroups []model.TargetGroup +} + +func (sim discoverySim) run(t *testing.T) []model.TargetGroup { + t.Helper() + require.NotNil(t, sim.td) + require.NotEmpty(t, sim.wantTargetGroups) + + in, out := make(chan []model.TargetGroup), make(chan []model.TargetGroup) + go sim.collectTargetGroups(t, in, out) + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + go sim.td.Discover(ctx, in) + + select { + case <-sim.td.started: + case <-time.After(startWaitTimeout): + t.Fatalf("td %s filed to start in %s", sim.td.discoverers, startWaitTimeout) + } + + synced := cache.WaitForCacheSync(ctx.Done(), sim.td.hasSynced) + require.Truef(t, synced, "td %s failed to sync", sim.td.discoverers) + + if sim.runAfterSync != nil { + sim.runAfterSync(ctx) + } + + groups := <-out + + if sim.sortBeforeVerify { + sortTargetGroups(groups) + } + + sim.verifyResult(t, groups) + return groups +} + +func (sim discoverySim) collectTargetGroups(t *testing.T, in, out chan []model.TargetGroup) { + var tggs []model.TargetGroup +loop: + for { + select { + case inGroups := <-in: + if tggs = append(tggs, inGroups...); len(tggs) >= len(sim.wantTargetGroups) { + break loop + } + case <-time.After(finishWaitTimeout): + t.Logf("td %s timed out after %s, got %d groups, expected %d, some events are skipped", + sim.td.discoverers, finishWaitTimeout, len(tggs), len(sim.wantTargetGroups)) + break loop + } + } + out <- tggs +} + +func (sim discoverySim) verifyResult(t *testing.T, result []model.TargetGroup) { + var expected, actual any + + if len(sim.wantTargetGroups) == len(result) { + expected = sim.wantTargetGroups + actual = result + } else { + want := make(map[string]model.TargetGroup) + for _, group := range sim.wantTargetGroups { + want[group.Source()] = group + } + got := make(map[string]model.TargetGroup) + for _, group := range result { + got[group.Source()] = group + } + expected, actual = want, got + } + + assert.Equal(t, expected, actual) +} + +type hasSynced interface { + hasSynced() bool +} + +var ( + _ hasSynced = &KubeDiscoverer{} + _ hasSynced = &podDiscoverer{} + _ hasSynced = &serviceDiscoverer{} +) + +func (d *KubeDiscoverer) hasSynced() bool { + for _, disc := range d.discoverers { + v, ok := disc.(hasSynced) + if !ok || !v.hasSynced() { + return false + } + } + return true +} + +func (p *podDiscoverer) hasSynced() bool { + return p.podInformer.HasSynced() && p.cmapInformer.HasSynced() && p.secretInformer.HasSynced() +} + +func (s *serviceDiscoverer) hasSynced() bool { + return s.informer.HasSynced() +} + +func sortTargetGroups(tggs []model.TargetGroup) { + if len(tggs) == 0 { + return + } + sort.Slice(tggs, func(i, j int) bool { return tggs[i].Source() < tggs[j].Source() }) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/model/discoverer.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/discoverer.go new file mode 100644 index 00000000000000..301322d32f9fe0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/discoverer.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model + +import ( + "context" +) + +type Discoverer interface { + Discover(ctx context.Context, ch chan<- []TargetGroup) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags.go new file mode 100644 index 00000000000000..e36f0b8f53b45c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model + +import ( + "fmt" + "sort" + "strings" +) + +type Base struct { + tags Tags +} + +func (b *Base) Tags() Tags { + if b.tags == nil { + b.tags = NewTags() + } + return b.tags +} + +type Tags map[string]struct{} + +func NewTags() Tags { + return Tags{} +} + +func (t Tags) Merge(tags Tags) { + for tag := range tags { + if strings.HasPrefix(tag, "-") { + delete(t, tag[1:]) + } else { + t[tag] = struct{}{} + } + } +} + +func (t Tags) String() string { + ts := make([]string, 0, len(t)) + for key := range t { + ts = append(ts, key) + } + sort.Strings(ts) + return fmt.Sprintf("{%s}", strings.Join(ts, ", ")) +} + +func ParseTags(line string) (Tags, error) { + words := strings.Fields(line) + if len(words) == 0 { + return NewTags(), nil + } + + tags := NewTags() + for _, tag := range words { + if !isTagWordValid(tag) { + return nil, fmt.Errorf("tags '%s' contains tag '%s' with forbidden symbol", line, tag) + } + tags[tag] = struct{}{} + } + return tags, nil +} + +func isTagWordValid(word string) bool { + // valid: + // ^[a-zA-Z][a-zA-Z0-9=_.]*$ + word = strings.TrimPrefix(word, "-") + if len(word) == 0 { + return false + } + for i, b := range word { + switch { + default: + return false + case b >= 'a' && b <= 'z': + case b >= 'A' && b <= 'Z': + case b >= '0' && b <= '9' && i > 0: + case (b == '=' || b == '_' || b == '.') && i > 0: + } + } + return true +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags_test.go new file mode 100644 index 00000000000000..4f07bcbf61f148 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/tags_test.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/model/target.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/target.go new file mode 100644 index 00000000000000..eb2bd9d5140730 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/model/target.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package model + +type Target interface { + Hash() uint64 + Tags() Tags + TUID() string +} + +type TargetGroup interface { + Targets() []Target + Provider() string + Source() string +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/accumulator.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/accumulator.go new file mode 100644 index 00000000000000..5d529410618164 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/accumulator.go @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "context" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" +) + +func newAccumulator() *accumulator { + return &accumulator{ + send: make(chan struct{}, 1), + sendEvery: time.Second * 3, + mux: &sync.Mutex{}, + tggs: make(map[string]model.TargetGroup), + } +} + +type ( + accumulator struct { + *logger.Logger + discoverers []model.Discoverer + send chan struct{} + sendEvery time.Duration + mux *sync.Mutex + tggs map[string]model.TargetGroup + } +) + +func (a *accumulator) run(ctx context.Context, in chan []model.TargetGroup) { + updates := make(chan []model.TargetGroup) + + var wg sync.WaitGroup + for _, d := range a.discoverers { + wg.Add(1) + d := d + go func() { defer wg.Done(); a.runDiscoverer(ctx, d, updates) }() + } + + done := make(chan struct{}) + go func() { defer close(done); wg.Wait() }() + + tk := time.NewTicker(a.sendEvery) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + select { + case <-done: + a.Info("all discoverers exited") + case <-time.After(time.Second * 5): + a.Warning("not all discoverers exited") + } + return + case <-done: + a.Info("all discoverers exited") + a.trySend(in) + return + case <-tk.C: + select { + case <-a.send: + a.trySend(in) + default: + } + } + } +} + +func (a *accumulator) runDiscoverer(ctx context.Context, d model.Discoverer, updates chan []model.TargetGroup) { + done := make(chan struct{}) + go func() { defer close(done); d.Discover(ctx, updates) }() + + for { + select { + case <-ctx.Done(): + select { + case <-done: + case <-time.After(time.Second * 5): + } + return + case <-done: + return + case tggs := <-updates: + a.mux.Lock() + a.groupsUpdate(tggs) + a.mux.Unlock() + a.triggerSend() + } + } +} + +func (a *accumulator) trySend(in chan<- []model.TargetGroup) { + a.mux.Lock() + defer a.mux.Unlock() + + select { + case in <- a.groupsList(): + a.groupsReset() + default: + a.triggerSend() + } +} + +func (a *accumulator) triggerSend() { + select { + case a.send <- struct{}{}: + default: + } +} + +func (a *accumulator) groupsUpdate(tggs []model.TargetGroup) { + for _, tgg := range tggs { + a.tggs[tgg.Source()] = tgg + } +} + +func (a *accumulator) groupsReset() { + for key := range a.tggs { + delete(a.tggs, key) + } +} + +func (a *accumulator) groupsList() []model.TargetGroup { + tggs := make([]model.TargetGroup, 0, len(a.tggs)) + for _, tgg := range a.tggs { + if tgg != nil { + tggs = append(tggs, tgg) + } + } + return tggs +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify.go new file mode 100644 index 00000000000000..0d96331d3b55ab --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "bytes" + "strings" + "text/template" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" +) + +func newTargetClassificator(cfg []ClassifyRuleConfig) (*targetClassificator, error) { + rules, err := newClassifyRules(cfg) + if err != nil { + return nil, err + } + + c := &targetClassificator{ + rules: rules, + buf: bytes.Buffer{}, + } + + return c, nil +} + +type ( + targetClassificator struct { + *logger.Logger + rules []*classifyRule + buf bytes.Buffer + } + + classifyRule struct { + name string + sr selector + tags model.Tags + match []*classifyRuleMatch + } + classifyRuleMatch struct { + tags model.Tags + expr *template.Template + } +) + +func (c *targetClassificator) classify(tgt model.Target) model.Tags { + var tags model.Tags + + for i, rule := range c.rules { + if !rule.sr.matches(tgt.Tags()) { + continue + } + + for j, match := range rule.match { + c.buf.Reset() + + if err := match.expr.Execute(&c.buf, tgt); err != nil { + c.Warningf("failed to execute classify rule[%d]->match[%d]->expr on target '%s'", i+1, j+1, tgt.TUID()) + continue + } + if strings.TrimSpace(c.buf.String()) != "true" { + continue + } + + if tags == nil { + tags = model.NewTags() + } + + tags.Merge(rule.tags) + tags.Merge(match.tags) + } + } + + return tags +} + +func newClassifyRules(cfg []ClassifyRuleConfig) ([]*classifyRule, error) { + var rules []*classifyRule + + fmap := newFuncMap() + + for _, ruleCfg := range cfg { + rule := classifyRule{name: ruleCfg.Name} + + sr, err := parseSelector(ruleCfg.Selector) + if err != nil { + return nil, err + } + rule.sr = sr + + tags, err := model.ParseTags(ruleCfg.Tags) + if err != nil { + return nil, err + } + rule.tags = tags + + for _, matchCfg := range ruleCfg.Match { + var match classifyRuleMatch + + tags, err := model.ParseTags(matchCfg.Tags) + if err != nil { + return nil, err + } + match.tags = tags + + tmpl, err := parseTemplate(matchCfg.Expr, fmap) + if err != nil { + return nil, err + } + match.expr = tmpl + + rule.match = append(rule.match, &match) + } + + rules = append(rules, &rule) + } + + return rules, nil +} + +func parseTemplate(s string, fmap template.FuncMap) (*template.Template, error) { + return template.New("root"). + Option("missingkey=error"). + Funcs(fmap). + Parse(s) +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify_test.go new file mode 100644 index 00000000000000..27ebc5e1341aeb --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/classify_test.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestTargetClassificator_classify(t *testing.T) { + config := ` +- selector: "rule1" + tags: "foo1" + match: + - tags: "bar1" + expr: '{{ glob .Name "mock*1*" }}' + - tags: "bar2" + expr: '{{ glob .Name "mock*2*" }}' +- selector: "rule2" + tags: "foo2" + match: + - tags: "bar3" + expr: '{{ glob .Name "mock*3*" }}' + - tags: "bar4" + expr: '{{ glob .Name "mock*4*" }}' +- selector: "rule3" + tags: "foo3" + match: + - tags: "bar5" + expr: '{{ glob .Name "mock*5*" }}' + - tags: "bar6" + expr: '{{ glob .Name "mock*6*" }}' +` + tests := map[string]struct { + target model.Target + wantTags model.Tags + }{ + "no rules match": { + target: newMockTarget("mock1"), + wantTags: nil, + }, + "one rule one match": { + target: newMockTarget("mock4", "rule2"), + wantTags: mustParseTags("foo2 bar4"), + }, + "one rule two match": { + target: newMockTarget("mock56", "rule3"), + wantTags: mustParseTags("foo3 bar5 bar6"), + }, + "all rules all matches": { + target: newMockTarget("mock123456", "rule1 rule2 rule3"), + wantTags: mustParseTags("foo1 foo2 foo3 bar1 bar2 bar3 bar4 bar5 bar6"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var cfg []ClassifyRuleConfig + + err := yaml.Unmarshal([]byte(config), &cfg) + require.NoErrorf(t, err, "yaml unmarshalling of config") + + clr, err := newTargetClassificator(cfg) + require.NoErrorf(t, err, "targetClassificator creation") + + assert.Equal(t, test.wantTags, clr.classify(test.target)) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose.go new file mode 100644 index 00000000000000..ccb57956b0a7fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "bytes" + "text/template" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +func newConfigComposer(cfg []ComposeRuleConfig) (*configComposer, error) { + rules, err := newComposeRules(cfg) + if err != nil { + return nil, err + } + + c := &configComposer{ + rules: rules, + buf: bytes.Buffer{}, + } + + return c, nil +} + +type ( + configComposer struct { + *logger.Logger + rules []*composeRule + buf bytes.Buffer + } + + composeRule struct { + name string + sr selector + conf []*composeRuleConf + } + composeRuleConf struct { + sr selector + tmpl *template.Template + } +) + +func (c *configComposer) compose(tgt model.Target) []confgroup.Config { + var configs []confgroup.Config + + for i, rule := range c.rules { + if !rule.sr.matches(tgt.Tags()) { + continue + } + + for j, conf := range rule.conf { + if !conf.sr.matches(tgt.Tags()) { + continue + } + + c.buf.Reset() + + if err := conf.tmpl.Execute(&c.buf, tgt); err != nil { + c.Warningf("failed to execute rule[%d]->config[%d]->template on target '%s'", i+1, j+1, tgt.TUID()) + continue + } + if c.buf.Len() == 0 { + continue + } + + var cfg confgroup.Config + + if err := yaml.Unmarshal(c.buf.Bytes(), &cfg); err != nil { + c.Warningf("failed on yaml unmarshalling: %v", err) + continue + } + + configs = append(configs, cfg) + } + } + + if len(configs) > 0 { + c.Infof("created %d config(s) for target '%s'", len(configs), tgt.TUID()) + } + return configs +} + +func newComposeRules(cfg []ComposeRuleConfig) ([]*composeRule, error) { + var rules []*composeRule + + fmap := newFuncMap() + + for _, ruleCfg := range cfg { + rule := composeRule{name: ruleCfg.Name} + + sr, err := parseSelector(ruleCfg.Selector) + if err != nil { + return nil, err + } + rule.sr = sr + + for _, confCfg := range ruleCfg.Config { + var conf composeRuleConf + + sr, err := parseSelector(confCfg.Selector) + if err != nil { + return nil, err + } + conf.sr = sr + + tmpl, err := parseTemplate(confCfg.Template, fmap) + if err != nil { + return nil, err + } + conf.tmpl = tmpl + + rule.conf = append(rule.conf, &conf) + } + + rules = append(rules, &rule) + } + + return rules, nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose_test.go new file mode 100644 index 00000000000000..0db0c5fb6cb5f1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/compose_test.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfigComposer_compose(t *testing.T) { + config := ` +- selector: "rule1" + config: + - selector: "bar1" + template: | + name: {{ .Name }}-1 + - selector: "bar2" + template: | + name: {{ .Name }}-2 +- selector: "rule2" + config: + - selector: "bar3" + template: | + name: {{ .Name }}-3 + - selector: "bar4" + template: | + name: {{ .Name }}-4 +- selector: "rule3" + config: + - selector: "bar5" + template: | + name: {{ .Name }}-5 + - selector: "bar6" + template: | + name: {{ .Name }}-6 +` + tests := map[string]struct { + target model.Target + wantConfigs []confgroup.Config + }{ + "no rules matches": { + target: newMockTarget("mock"), + wantConfigs: nil, + }, + "one rule one config": { + target: newMockTarget("mock", "rule1 bar1"), + wantConfigs: []confgroup.Config{ + {"name": "mock-1"}, + }, + }, + "one rule two config": { + target: newMockTarget("mock", "rule2 bar3 bar4"), + wantConfigs: []confgroup.Config{ + {"name": "mock-3"}, + {"name": "mock-4"}, + }, + }, + "all rules all configs": { + target: newMockTarget("mock", "rule1 bar1 bar2 rule2 bar3 bar4 rule3 bar5 bar6"), + wantConfigs: []confgroup.Config{ + {"name": "mock-1"}, + {"name": "mock-2"}, + {"name": "mock-3"}, + {"name": "mock-4"}, + {"name": "mock-5"}, + {"name": "mock-6"}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var cfg []ComposeRuleConfig + + err := yaml.Unmarshal([]byte(config), &cfg) + require.NoErrorf(t, err, "yaml unmarshalling of config") + + cmr, err := newConfigComposer(cfg) + require.NoErrorf(t, err, "configComposer creation") + + assert.Equal(t, test.wantConfigs, cmr.compose(test.target)) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/config.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/config.go new file mode 100644 index 00000000000000..faed30e362f5ec --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/config.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "errors" + "fmt" + "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes" +) + +type Config struct { + Name string `yaml:"name"` + Discovery DiscoveryConfig `yaml:"discovery"` + Classify []ClassifyRuleConfig `yaml:"classify"` + Compose []ComposeRuleConfig `yaml:"compose"` // TODO: "jobs"? +} + +type ( + DiscoveryConfig struct { + K8s []kubernetes.Config `yaml:"k8s"` + HostSocket HostSocketConfig `yaml:"hostsocket"` + } + HostSocketConfig struct { + Net *hostsocket.NetworkSocketConfig `yaml:"net"` + } +) + +type ClassifyRuleConfig struct { + Name string `yaml:"name"` + Selector string `yaml:"selector"` // mandatory + Tags string `yaml:"tags"` // mandatory + Match []struct { + Tags string `yaml:"tags"` // mandatory + Expr string `yaml:"expr"` // mandatory + } `yaml:"match"` // mandatory, at least 1 +} + +type ComposeRuleConfig struct { + Name string `yaml:"name"` // optional + Selector string `yaml:"selector"` // mandatory + Config []struct { + Selector string `yaml:"selector"` // mandatory + Template string `yaml:"template"` // mandatory + } `yaml:"config"` // mandatory, at least 1 +} + +func validateConfig(cfg Config) error { + if cfg.Name != "" { + return errors.New("'name' not set") + } + if len(cfg.Discovery.K8s) == 0 { + return errors.New("'discovery->k8s' not set") + } + if err := validateClassifyConfig(cfg.Classify); err != nil { + return fmt.Errorf("tag rules: %v", err) + } + if err := validateComposeConfig(cfg.Compose); err != nil { + return fmt.Errorf("config rules: %v", err) + } + return nil +} + +func validateClassifyConfig(rules []ClassifyRuleConfig) error { + if len(rules) == 0 { + return errors.New("empty config, need least 1 rule") + } + for i, rule := range rules { + if rule.Selector == "" { + return fmt.Errorf("'rule[%s][%d]->selector' not set", rule.Name, i+1) + } + if rule.Tags == "" { + return fmt.Errorf("'rule[%s][%d]->tags' not set", rule.Name, i+1) + } + if len(rule.Match) == 0 { + return fmt.Errorf("'rule[%s][%d]->match' not set, need at least 1 rule match", rule.Name, i+1) + } + + for j, match := range rule.Match { + if match.Tags == "" { + return fmt.Errorf("'rule[%s][%d]->match[%d]->tags' not set", rule.Name, i+1, j+1) + } + if match.Expr == "" { + return fmt.Errorf("'rule[%s][%d]->match[%d]->expr' not set", rule.Name, i+1, j+1) + } + } + } + return nil +} + +func validateComposeConfig(rules []ComposeRuleConfig) error { + if len(rules) == 0 { + return errors.New("empty config, need least 1 rule") + } + for i, rule := range rules { + if rule.Selector == "" { + return fmt.Errorf("'rule[%s][%d]->selector' not set", rule.Name, i+1) + } + + if len(rule.Config) == 0 { + return fmt.Errorf("'rule[%s][%d]->config' not set", rule.Name, i+1) + } + + for j, conf := range rule.Config { + if conf.Selector == "" { + return fmt.Errorf("'rule[%s][%d]->config[%d]->selector' not set", rule.Name, i+1, j+1) + } + if conf.Template == "" { + return fmt.Errorf("'rule[%s][%d]->config[%d]->template' not set", rule.Name, i+1, j+1) + } + } + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap.go new file mode 100644 index 00000000000000..d49b0d3e3b091b --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "regexp" + "text/template" + + "github.com/Masterminds/sprig/v3" + "github.com/bmatcuk/doublestar/v4" +) + +func newFuncMap() template.FuncMap { + custom := map[string]interface{}{ + "glob": globAny, + "re": regexpAny, + } + + fm := sprig.HermeticTxtFuncMap() + for name, fn := range custom { + fm[name] = fn + } + + return fm +} + +func globAny(value, pattern string, rest ...string) bool { + switch len(rest) { + case 0: + return globOnce(value, pattern) + default: + return globOnce(value, pattern) || globAny(value, rest[0], rest[1:]...) + } +} + +func regexpAny(value, pattern string, rest ...string) bool { + switch len(rest) { + case 0: + return regexpOnce(value, pattern) + default: + return regexpOnce(value, pattern) || regexpAny(value, rest[0], rest[1:]...) + } +} + +func globOnce(value, pattern string) bool { + ok, err := doublestar.Match(pattern, value) + return err == nil && ok +} + +func regexpOnce(value, pattern string) bool { + ok, err := regexp.MatchString(pattern, value) + return err == nil && ok +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap_test.go new file mode 100644 index 00000000000000..c8ced5170a0053 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/funcmap_test.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_globAny(t *testing.T) { + tests := map[string]struct { + patterns []string + value string + wantMatch bool + }{ + "one param, matches": { + wantMatch: true, + patterns: []string{"*"}, + value: "value", + }, + "one param, matches with *": { + wantMatch: true, + patterns: []string{"**/value"}, + value: "/one/two/three/value", + }, + "one param, not matches": { + wantMatch: false, + patterns: []string{"Value"}, + value: "value", + }, + "several params, last one matches": { + wantMatch: true, + patterns: []string{"not", "matches", "*"}, + value: "value", + }, + "several params, no matches": { + wantMatch: false, + patterns: []string{"not", "matches", "really"}, + value: "value", + }, + } + + for name, test := range tests { + name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) + ok := globAny(test.value, test.patterns[0], test.patterns[1:]...) + + if test.wantMatch { + assert.Truef(t, ok, name) + } else { + assert.Falsef(t, ok, name) + } + } +} + +func Test_regexpAny(t *testing.T) { + tests := map[string]struct { + patterns []string + value string + wantMatch bool + }{ + "one param, matches": { + wantMatch: true, + patterns: []string{"^value$"}, + value: "value", + }, + "one param, not matches": { + wantMatch: false, + patterns: []string{"^Value$"}, + value: "value", + }, + "several params, last one matches": { + wantMatch: true, + patterns: []string{"not", "matches", "va[lue]{3}"}, + value: "value", + }, + "several params, no matches": { + wantMatch: false, + patterns: []string{"not", "matches", "val[^l]ue"}, + value: "value", + }, + } + + for name, test := range tests { + name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) + ok := regexpAny(test.value, test.patterns[0], test.patterns[1:]...) + + if test.wantMatch { + assert.Truef(t, ok, name) + } else { + assert.Falsef(t, ok, name) + } + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline.go new file mode 100644 index 00000000000000..1a1eb69f9404c5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline.go @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "context" + "log/slog" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket" + "github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" +) + +func New(cfg Config) (*Pipeline, error) { + if err := validateConfig(cfg); err != nil { + return nil, err + } + + p := &Pipeline{ + Logger: logger.New().With( + slog.String("component", "discovery sd pipeline"), + ), + accum: newAccumulator(), + discoverers: make([]model.Discoverer, 0), + items: make(map[string]map[uint64][]confgroup.Config), + } + + if err := p.registerDiscoverers(cfg); err != nil { + return nil, err + } + + return p, nil +} + +type ( + Pipeline struct { + *logger.Logger + + discoverers []model.Discoverer + accum *accumulator + + clr classificator + cmr composer + + items map[string]map[uint64][]confgroup.Config // [source][targetHash] + } + classificator interface { + classify(model.Target) model.Tags + } + composer interface { + compose(model.Target) []confgroup.Config + } +) + +func (p *Pipeline) registerDiscoverers(conf Config) error { + for _, cfg := range conf.Discovery.K8s { + td, err := kubernetes.NewKubeDiscoverer(cfg) + if err != nil { + return err + } + p.discoverers = append(p.discoverers, td) + } + if conf.Discovery.HostSocket.Net != nil { + td, err := hostsocket.NewNetSocketDiscoverer(*conf.Discovery.HostSocket.Net) + if err != nil { + return err + } + p.discoverers = append(p.discoverers, td) + } + + return nil +} + +func (p *Pipeline) Run(ctx context.Context, in chan<- []*confgroup.Group) { + p.Info("instance is started") + defer p.Info("instance is stopped") + + p.accum.discoverers = p.discoverers + + updates := make(chan []model.TargetGroup) + done := make(chan struct{}) + + go func() { defer close(done); p.accum.run(ctx, updates) }() + + for { + select { + case <-ctx.Done(): + select { + case <-done: + case <-time.After(time.Second * 5): + } + return + case <-done: + return + case tggs := <-updates: + p.Infof("received %d target groups", len(tggs)) + send(ctx, in, p.processGroups(tggs)) + } + } +} + +func (p *Pipeline) processGroups(tggs []model.TargetGroup) []*confgroup.Group { + var confGroups []*confgroup.Group + // updates come from the accumulator, this ensures that all groups have different sources + for _, tgg := range tggs { + p.Infof("processing group '%s' with %d target(s)", tgg.Source(), len(tgg.Targets())) + if v := p.processGroup(tgg); v != nil { + confGroups = append(confGroups, v) + } + } + return confGroups +} + +func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group { + if len(tgg.Targets()) == 0 { + if _, ok := p.items[tgg.Source()]; !ok { + return nil + } + delete(p.items, tgg.Source()) + return &confgroup.Group{Source: tgg.Source()} + } + + targetsCache, ok := p.items[tgg.Source()] + if !ok { + targetsCache = make(map[uint64][]confgroup.Config) + p.items[tgg.Source()] = targetsCache + } + + var changed bool + seen := make(map[uint64]bool) + + for _, tgt := range tgg.Targets() { + if tgt == nil { + continue + } + + hash := tgt.Hash() + seen[hash] = true + + if _, ok := targetsCache[hash]; ok { + continue + } + + if tags := p.clr.classify(tgt); len(tags) > 0 { + tgt.Tags().Merge(tags) + + if configs := p.cmr.compose(tgt); len(configs) > 0 { + for _, cfg := range configs { + cfg.SetProvider(tgg.Provider()) + cfg.SetSource(tgg.Source()) + } + targetsCache[hash] = configs + changed = true + } + } else { + p.Infof("target '%s' classify: fail", tgt.TUID()) + } + } + + for hash := range targetsCache { + if seen[hash] { + continue + } + if configs := targetsCache[hash]; len(configs) > 0 { + changed = true + } + delete(targetsCache, hash) + } + + if !changed { + return nil + } + + // TODO: deepcopy? + cfgGroup := &confgroup.Group{Source: tgg.Source()} + for _, cfgs := range targetsCache { + cfgGroup.Configs = append(cfgGroup.Configs, cfgs...) + } + + return cfgGroup +} + +func send(ctx context.Context, in chan<- []*confgroup.Group, configs []*confgroup.Group) { + if len(configs) == 0 { + return + } + + select { + case <-ctx.Done(): + return + case in <- configs: + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline_test.go new file mode 100644 index 00000000000000..ae6c5991aed049 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/pipeline_test.go @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/ilyam8/hashstructure" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestNew(t *testing.T) { + tests := map[string]struct { + config string + wantErr bool + }{ + "fails when config unset": { + wantErr: true, + config: "", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + + var cfg Config + err := yaml.Unmarshal([]byte(test.config), &cfg) + require.Nilf(t, err, "cfg unmarshal") + + _, err = New(cfg) + + if test.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestPipeline_Run(t *testing.T) { + const config = ` +classify: + - selector: "rule1" + tags: "foo1" + match: + - tags: "bar1" + expr: '{{ glob .Name "mock*1*" }}' + - tags: "bar2" + expr: '{{ glob .Name "mock*2*" }}' +compose: + - selector: "foo1" + config: + - selector: "bar1" + template: | + name: {{ .Name }}-foobar1 + - selector: "bar2" + template: | + name: {{ .Name }}-foobar2 +` + tests := map[string]discoverySim{ + "new group with no targets": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("", + newMockTargetGroup("test"), + ), + }, + wantClassifyCalls: 0, + wantComposeCalls: 0, + wantConfGroups: nil, + }, + "new group with targets": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("rule1", + newMockTargetGroup("test", "mock1", "mock2"), + ), + }, + wantClassifyCalls: 2, + wantComposeCalls: 2, + wantConfGroups: []*confgroup.Group{ + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + }}, + }, + }, + "existing group with same targets": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("rule1", + newMockTargetGroup("test", "mock1", "mock2"), + ), + newDelayedMockDiscoverer("rule1", 5, + newMockTargetGroup("test", "mock1", "mock2"), + ), + }, + wantClassifyCalls: 2, + wantComposeCalls: 2, + wantConfGroups: []*confgroup.Group{ + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + }}, + }, + }, + "existing empty group that previously had targets": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("rule1", + newMockTargetGroup("test", "mock1", "mock2"), + ), + newDelayedMockDiscoverer("rule1", 5, + newMockTargetGroup("test"), + ), + }, + wantClassifyCalls: 2, + wantComposeCalls: 2, + wantConfGroups: []*confgroup.Group{ + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + }}, + {Source: "test", Configs: nil}, + }, + }, + "existing group with old and new targets": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("rule1", + newMockTargetGroup("test", "mock1", "mock2"), + ), + newDelayedMockDiscoverer("rule1", 5, + newMockTargetGroup("test", "mock1", "mock2", "mock11", "mock22"), + ), + }, + wantClassifyCalls: 4, + wantComposeCalls: 4, + wantConfGroups: []*confgroup.Group{ + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + }}, + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock11-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock22-foobar2", + }, + }}, + }, + }, + "existing group with new targets only": { + config: config, + discoverers: []model.Discoverer{ + newMockDiscoverer("rule1", + newMockTargetGroup("test", "mock1", "mock2"), + ), + newDelayedMockDiscoverer("rule1", 5, + newMockTargetGroup("test", "mock11", "mock22"), + ), + }, + wantClassifyCalls: 4, + wantComposeCalls: 4, + wantConfGroups: []*confgroup.Group{ + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock1-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock2-foobar2", + }, + }}, + {Source: "test", Configs: []confgroup.Config{ + { + "__provider__": "mock", + "__source__": "test", + "name": "mock11-foobar1", + }, + { + "__provider__": "mock", + "__source__": "test", + "name": "mock22-foobar2", + }, + }}, + }, + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { + sim.run(t) + }) + } +} + +func newMockDiscoverer(tags string, tggs ...model.TargetGroup) *mockDiscoverer { + return &mockDiscoverer{ + tags: mustParseTags(tags), + tggs: tggs, + } +} + +func newDelayedMockDiscoverer(tags string, delay int, tggs ...model.TargetGroup) *mockDiscoverer { + return &mockDiscoverer{ + tags: mustParseTags(tags), + tggs: tggs, + delay: time.Duration(delay) * time.Second, + } +} + +type mockDiscoverer struct { + tggs []model.TargetGroup + tags model.Tags + delay time.Duration +} + +func (md mockDiscoverer) Discover(ctx context.Context, out chan<- []model.TargetGroup) { + for _, tgg := range md.tggs { + for _, tgt := range tgg.Targets() { + tgt.Tags().Merge(md.tags) + } + } + + select { + case <-ctx.Done(): + case <-time.After(md.delay): + select { + case <-ctx.Done(): + case out <- md.tggs: + } + } +} + +func newMockTargetGroup(source string, targets ...string) *mockTargetGroup { + m := &mockTargetGroup{source: source} + for _, name := range targets { + m.targets = append(m.targets, &mockTarget{Name: name}) + } + return m +} + +type mockTargetGroup struct { + targets []model.Target + source string +} + +func (mg mockTargetGroup) Targets() []model.Target { return mg.targets } +func (mg mockTargetGroup) Source() string { return mg.source } +func (mg mockTargetGroup) Provider() string { return "mock" } + +func newMockTarget(name string, tags ...string) *mockTarget { + m := &mockTarget{Name: name} + v, _ := model.ParseTags(strings.Join(tags, " ")) + m.Tags().Merge(v) + return m +} + +type mockTarget struct { + model.Base + Name string +} + +func (mt mockTarget) TUID() string { return mt.Name } +func (mt mockTarget) Hash() uint64 { return mustCalcHash(mt.Name) } + +func mustParseTags(line string) model.Tags { + v, err := model.ParseTags(line) + if err != nil { + panic(fmt.Sprintf("mustParseTags: %v", err)) + } + return v +} + +func mustCalcHash(obj any) uint64 { + hash, err := hashstructure.Hash(obj, nil) + if err != nil { + panic(fmt.Sprintf("hash calculation: %v", err)) + } + return hash +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/qq.yaml b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/qq.yaml new file mode 100644 index 00000000000000..e2ed5e402f5d00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/qq.yaml @@ -0,0 +1,34 @@ +name: qqq +discovery: + k8s: + - pod: + tags: "pod" + local_mode: yes + service: + tags: "service" + hostsocket: + net: + tags: "netsocket" + unix: + tags: "unixsocket" + docker: + - address: "1" + tags: "qq" + + +classify: + - name: "name" + selector: "k8s" + tags: "apps" + match: + - tags: "apache" + expr: '{{ and (eq .Port "8161") (glob .Image "**/activemq*") }}' + +compose: + - name: "Applications" + selector: "apps" + config: + - selector: "apache" + template: | + module: bind + name: bind-{{.TUID}} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector.go new file mode 100644 index 00000000000000..99cd6a8a15e7d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector.go @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" +) + +type selector interface { + matches(model.Tags) bool +} + +type ( + exactSelector string + trueSelector struct{} + negSelector struct{ selector } + orSelector struct{ lhs, rhs selector } + andSelector struct{ lhs, rhs selector } +) + +func (s exactSelector) matches(tags model.Tags) bool { _, ok := tags[string(s)]; return ok } +func (s trueSelector) matches(model.Tags) bool { return true } +func (s negSelector) matches(tags model.Tags) bool { return !s.selector.matches(tags) } +func (s orSelector) matches(tags model.Tags) bool { return s.lhs.matches(tags) || s.rhs.matches(tags) } +func (s andSelector) matches(tags model.Tags) bool { return s.lhs.matches(tags) && s.rhs.matches(tags) } + +func (s exactSelector) String() string { return "{" + string(s) + "}" } +func (s negSelector) String() string { return "{!" + stringify(s.selector) + "}" } +func (s trueSelector) String() string { return "{*}" } +func (s orSelector) String() string { return "{" + stringify(s.lhs) + "|" + stringify(s.rhs) + "}" } +func (s andSelector) String() string { return "{" + stringify(s.lhs) + ", " + stringify(s.rhs) + "}" } +func stringify(sr selector) string { return strings.Trim(fmt.Sprintf("%s", sr), "{}") } + +func parseSelector(line string) (sr selector, err error) { + words := strings.Fields(line) + if len(words) == 0 { + return trueSelector{}, nil + } + + var srs []selector + for _, word := range words { + if idx := strings.IndexByte(word, '|'); idx > 0 { + sr, err = parseOrSelectorWord(word) + } else { + sr, err = parseSingleSelectorWord(word) + } + if err != nil { + return nil, fmt.Errorf("selector '%s' contains selector '%s' with forbidden symbol", line, word) + } + srs = append(srs, sr) + } + + switch len(srs) { + case 0: + return trueSelector{}, nil + case 1: + return srs[0], nil + default: + return newAndSelector(srs[0], srs[1], srs[2:]...), nil + } +} + +func parseOrSelectorWord(orWord string) (sr selector, err error) { + var srs []selector + for _, word := range strings.Split(orWord, "|") { + if sr, err = parseSingleSelectorWord(word); err != nil { + return nil, err + } + srs = append(srs, sr) + } + switch len(srs) { + case 0: + return trueSelector{}, nil + case 1: + return srs[0], nil + default: + return newOrSelector(srs[0], srs[1], srs[2:]...), nil + } +} + +func parseSingleSelectorWord(word string) (selector, error) { + if len(word) == 0 { + return nil, errors.New("empty word") + } + neg := word[0] == '!' + if neg { + word = word[1:] + } + if len(word) == 0 { + return nil, errors.New("empty word") + } + if word != "*" && !isSelectorWordValid(word) { + return nil, errors.New("forbidden symbol") + } + + var sr selector + switch word { + case "*": + sr = trueSelector{} + default: + sr = exactSelector(word) + } + if neg { + return negSelector{sr}, nil + } + return sr, nil +} + +func newAndSelector(lhs, rhs selector, others ...selector) selector { + m := andSelector{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newAndSelector(m, others[0], others[1:]...) + } +} + +func newOrSelector(lhs, rhs selector, others ...selector) selector { + m := orSelector{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newOrSelector(m, others[0], others[1:]...) + } +} + +func isSelectorWordValid(word string) bool { + // valid: + // * + // ^[a-zA-Z][a-zA-Z0-9=_.]*$ + if len(word) == 0 { + return false + } + if word == "*" { + return true + } + for i, b := range word { + switch { + case b >= 'a' && b <= 'z': + case b >= 'A' && b <= 'Z': + case b >= '0' && b <= '9' && i > 0: + case (b == '=' || b == '_' || b == '.') && i > 0: + default: + return false + } + } + return true +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector_test.go new file mode 100644 index 00000000000000..986eef72cbae59 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/selector_test.go @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "regexp" + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + + "github.com/stretchr/testify/assert" +) + +var reSrString = regexp.MustCompile(`^{[^{}]+}$`) + +func TestTrueSelector_String(t *testing.T) { + var sr trueSelector + assert.Equal(t, "{*}", sr.String()) +} + +func TestExactSelector_String(t *testing.T) { + sr := exactSelector("selector") + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestNegSelector_String(t *testing.T) { + srs := []selector{ + exactSelector("selector"), + negSelector{exactSelector("selector")}, + orSelector{ + lhs: exactSelector("selector"), + rhs: exactSelector("selector")}, + orSelector{ + lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + }, + andSelector{ + lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + }, + } + + for i, sr := range srs { + neg := negSelector{sr} + assert.True(t, reSrString.MatchString(neg.String()), "selector num %d", i+1) + } +} + +func TestOrSelector_String(t *testing.T) { + sr := orSelector{ + lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + } + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestAndSelector_String(t *testing.T) { + sr := andSelector{ + lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}}, + } + + assert.True(t, reSrString.MatchString(sr.String())) +} + +func TestExactSelector_Matches(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []exactSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []exactSelector{ + "a", + "b", + }, + } + notMatchTests := struct { + tags model.Tags + srs []exactSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []exactSelector{ + "c", + "d", + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestNegSelector_Matches(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []negSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []negSelector{ + {exactSelector("c")}, + {exactSelector("d")}, + }, + } + notMatchTests := struct { + tags model.Tags + srs []negSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []negSelector{ + {exactSelector("a")}, + {exactSelector("b")}, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestOrSelector_Matches(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []orSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []orSelector{ + { + lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("b")}, + }, + }, + } + notMatchTests := struct { + tags model.Tags + srs []orSelector + }{ + tags: model.Tags{"a": {}, "b": {}}, + srs: []orSelector{ + { + lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("f")}, + }, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestAndSelector_Matches(t *testing.T) { + matchTests := struct { + tags model.Tags + srs []andSelector + }{ + tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}}, + srs: []andSelector{ + { + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("d")}, + }, + }, + } + notMatchTests := struct { + tags model.Tags + srs []andSelector + }{ + tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}}, + srs: []andSelector{ + { + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("z")}, + }, + }, + } + + for i, sr := range matchTests.srs { + assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1) + } + for i, sr := range notMatchTests.srs { + assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1) + } +} + +func TestParseSelector(t *testing.T) { + tests := map[string]struct { + wantSelector selector + wantErr bool + }{ + "": {wantSelector: trueSelector{}}, + "a": {wantSelector: exactSelector("a")}, + "Z": {wantSelector: exactSelector("Z")}, + "a_b": {wantSelector: exactSelector("a_b")}, + "a=b": {wantSelector: exactSelector("a=b")}, + "!a": {wantSelector: negSelector{exactSelector("a")}}, + "a b": {wantSelector: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}}, + "a|b": {wantSelector: orSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}}, + "*": {wantSelector: trueSelector{}}, + "!*": {wantSelector: negSelector{trueSelector{}}}, + "a b !c d|e f": { + wantSelector: andSelector{ + lhs: andSelector{ + lhs: andSelector{ + lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}, + rhs: negSelector{exactSelector("c")}, + }, + rhs: orSelector{ + lhs: exactSelector("d"), + rhs: exactSelector("e"), + }, + }, + rhs: exactSelector("f"), + }, + }, + "!": {wantErr: true}, + "a !": {wantErr: true}, + "a!b": {wantErr: true}, + "0a": {wantErr: true}, + "a b c*": {wantErr: true}, + "__": {wantErr: true}, + "a|b|c*": {wantErr: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sr, err := parseSelector(name) + + if test.wantErr { + assert.Nil(t, sr) + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.wantSelector, sr) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/sim_test.go new file mode 100644 index 00000000000000..eec7f417e9e758 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/pipeline/sim_test.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pipeline + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/logger" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +type discoverySim struct { + config string + discoverers []model.Discoverer + wantClassifyCalls int + wantComposeCalls int + wantConfGroups []*confgroup.Group +} + +func (sim discoverySim) run(t *testing.T) { + t.Helper() + + var cfg Config + err := yaml.Unmarshal([]byte(sim.config), &cfg) + require.Nilf(t, err, "cfg unmarshal") + + clr, err := newTargetClassificator(cfg.Classify) + require.Nilf(t, err, "classify %v", err) + + cmr, err := newConfigComposer(cfg.Compose) + require.Nilf(t, err, "compose") + + mockClr := &mockClassificator{clr: clr} + mockCmr := &mockComposer{cmr: cmr} + + accum := newAccumulator() + accum.sendEvery = time.Second * 2 + + pl := &Pipeline{ + Logger: logger.New(), + accum: accum, + discoverers: sim.discoverers, + clr: mockClr, + cmr: mockCmr, + items: make(map[string]map[uint64][]confgroup.Config), + } + + pl.accum.Logger = pl.Logger + clr.Logger = pl.Logger + cmr.Logger = pl.Logger + + groups := sim.collectGroups(t, pl) + + sortConfigGroups(groups) + sortConfigGroups(sim.wantConfGroups) + + assert.Equal(t, sim.wantConfGroups, groups) + assert.Equalf(t, sim.wantClassifyCalls, mockClr.calls, "classify calls") + assert.Equalf(t, sim.wantComposeCalls, mockCmr.calls, "compose calls") +} + +func (sim discoverySim) collectGroups(t *testing.T, pl *Pipeline) []*confgroup.Group { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + in := make(chan []*confgroup.Group) + done := make(chan struct{}) + + go func() { defer close(done); pl.Run(ctx, in) }() + + timeout := time.Second * 10 + var groups []*confgroup.Group + + func() { + for { + select { + case inGroups := <-in: + groups = append(groups, inGroups...) + case <-done: + return + case <-time.After(timeout): + t.Logf("discovery timed out after %s, got %d groups, expected %d, some events are skipped", + timeout, len(groups), len(sim.wantConfGroups)) + return + } + } + }() + + return groups +} + +type mockClassificator struct { + calls int + clr *targetClassificator +} + +func (m *mockClassificator) classify(tgt model.Target) model.Tags { + m.calls++ + return m.clr.classify(tgt) +} + +type mockComposer struct { + calls int + cmr *configComposer +} + +func (m *mockComposer) compose(tgt model.Target) []confgroup.Config { + m.calls++ + return m.cmr.compose(tgt) +} + +func sortConfigGroups(groups []*confgroup.Group) { + sort.Slice(groups, func(i, j int) bool { + return groups[i].Source < groups[j].Source + }) + + for _, g := range groups { + sort.Slice(g.Configs, func(i, j int) bool { + return g.Configs[i].Name() < g.Configs[j].Name() + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/sd.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/sd.go new file mode 100644 index 00000000000000..7897a659df0cc5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/sd.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sd + +import ( + "context" + "sync" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +func NewServiceDiscovery() (*ServiceDiscovery, error) { + return nil, nil +} + +type ( + ServiceDiscovery struct { + *logger.Logger + + confProv ConfigFileProvider + sdFactory sdPipelineFactory + + confCache map[string]uint64 + pipelines map[string]func() + } + sdPipeline interface { + Run(ctx context.Context, in chan<- []*confgroup.Group) + } + sdPipelineFactory interface { + create(config pipeline.Config) (sdPipeline, error) + } +) + +func (d *ServiceDiscovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { + d.Info("instance is started") + defer d.Info("instance is stopped") + defer d.cleanup() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { defer wg.Done(); d.confProv.Run(ctx) }() + + for { + select { + case <-ctx.Done(): + return + case cf := <-d.confProv.Configs(): + if cf.Source == "" { + continue + } + if len(cf.Data) == 0 { + delete(d.confCache, cf.Source) + d.removePipeline(cf) + } else if hash, ok := d.confCache[cf.Source]; !ok || hash != cf.Hash() { + d.confCache[cf.Source] = cf.Hash() + d.addPipeline(ctx, cf, in) + } + } + } +} + +func (d *ServiceDiscovery) addPipeline(ctx context.Context, cf ConfigFile, in chan<- []*confgroup.Group) { + var cfg pipeline.Config + + if err := yaml.Unmarshal(cf.Data, &cfg); err != nil { + d.Error(err) + return + } + + pl, err := d.sdFactory.create(cfg) + if err != nil { + d.Error(err) + return + } + + if stop, ok := d.pipelines[cf.Source]; ok { + stop() + } + + var wg sync.WaitGroup + plCtx, cancel := context.WithCancel(ctx) + + wg.Add(1) + go func() { defer wg.Done(); pl.Run(plCtx, in) }() + stop := func() { cancel(); wg.Wait() } + + d.pipelines[cf.Source] = stop +} + +func (d *ServiceDiscovery) removePipeline(cf ConfigFile) { + if stop, ok := d.pipelines[cf.Source]; ok { + delete(d.pipelines, cf.Source) + stop() + } +} + +func (d *ServiceDiscovery) cleanup() { + for _, stop := range d.pipelines { + stop() + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/sd_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/sd_test.go new file mode 100644 index 00000000000000..b67921e96670a0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/sd_test.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sd + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline" + + "gopkg.in/yaml.v2" +) + +func TestServiceDiscovery_Run(t *testing.T) { + tests := map[string]discoverySim{ + "add pipeline": { + configs: []ConfigFile{ + prepareConfigFile("source", "name"), + }, + wantPipelines: []*mockPipeline{ + {name: "name", started: true, stopped: false}, + }, + }, + "remove pipeline": { + configs: []ConfigFile{ + prepareConfigFile("source", "name"), + prepareEmptyConfigFile("source"), + }, + wantPipelines: []*mockPipeline{ + {name: "name", started: true, stopped: true}, + }, + }, + "re-add pipeline multiple times": { + configs: []ConfigFile{ + prepareConfigFile("source", "name"), + prepareConfigFile("source", "name"), + prepareConfigFile("source", "name"), + }, + wantPipelines: []*mockPipeline{ + {name: "name", started: true, stopped: false}, + }, + }, + "restart pipeline": { + configs: []ConfigFile{ + prepareConfigFile("source", "name1"), + prepareConfigFile("source", "name2"), + }, + wantPipelines: []*mockPipeline{ + {name: "name1", started: true, stopped: true}, + {name: "name2", started: true, stopped: false}, + }, + }, + "invalid pipeline config": { + configs: []ConfigFile{ + prepareConfigFile("source", "invalid"), + }, + wantPipelines: nil, + }, + "invalid config for running pipeline": { + configs: []ConfigFile{ + prepareConfigFile("source", "name"), + prepareConfigFile("source", "invalid"), + }, + wantPipelines: []*mockPipeline{ + {name: "name", started: true, stopped: false}, + }, + }, + } + + for name, sim := range tests { + t.Run(name, func(t *testing.T) { + sim.run(t) + }) + } +} + +func prepareConfigFile(source, name string) ConfigFile { + bs, _ := yaml.Marshal(pipeline.Config{Name: name}) + + return ConfigFile{ + Source: source, + Data: bs, + } +} + +func prepareEmptyConfigFile(source string) ConfigFile { + return ConfigFile{ + Source: source, + } +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sd/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sd/sim_test.go new file mode 100644 index 00000000000000..9ddb15e509ef51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sd/sim_test.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package sd + +import ( + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline" + "github.com/netdata/go.d.plugin/logger" + + "github.com/stretchr/testify/assert" +) + +var lock = &sync.Mutex{} + +type discoverySim struct { + configs []ConfigFile + wantPipelines []*mockPipeline +} + +func (sim *discoverySim) run(t *testing.T) { + fact := &mockFactory{} + mgr := &ServiceDiscovery{ + Logger: logger.New(), + sdFactory: fact, + confProv: &mockConfigProvider{ + configs: sim.configs, + ch: make(chan ConfigFile), + }, + confCache: make(map[string]uint64), + pipelines: make(map[string]func()), + } + + in := make(chan<- []*confgroup.Group) + done := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { defer close(done); mgr.Run(ctx, in) }() + + time.Sleep(time.Second * 3) + + lock.Lock() + assert.Equalf(t, sim.wantPipelines, fact.pipelines, "before stop") + lock.Unlock() + + cancel() + + timeout := time.Second * 5 + + select { + case <-done: + lock.Lock() + for _, pl := range fact.pipelines { + assert.Truef(t, pl.stopped, "pipeline '%s' is not stopped after cancel()", pl.name) + } + lock.Unlock() + case <-time.After(timeout): + t.Errorf("sd failed to exit in %s", timeout) + } +} + +type mockConfigProvider struct { + configs []ConfigFile + ch chan ConfigFile +} + +func (m *mockConfigProvider) Run(ctx context.Context) { + for _, conf := range m.configs { + select { + case <-ctx.Done(): + return + case m.ch <- conf: + } + } + <-ctx.Done() +} + +func (m *mockConfigProvider) Configs() chan ConfigFile { + return m.ch +} + +type mockFactory struct { + pipelines []*mockPipeline +} + +func (m *mockFactory) create(cfg pipeline.Config) (sdPipeline, error) { + lock.Lock() + defer lock.Unlock() + + if cfg.Name == "invalid" { + return nil, errors.New("mock sdPipelineFactory.create() error") + } + + pl := mockPipeline{name: cfg.Name} + m.pipelines = append(m.pipelines, &pl) + + return &pl, nil +} + +type mockPipeline struct { + name string + started bool + stopped bool +} + +func (m *mockPipeline) Run(ctx context.Context, _ chan<- []*confgroup.Group) { + lock.Lock() + m.started = true + lock.Unlock() + defer func() { lock.Lock(); m.stopped = true; lock.Unlock() }() + <-ctx.Done() +} diff --git a/src/go/collectors/go.d.plugin/agent/discovery/sim_test.go b/src/go/collectors/go.d.plugin/agent/discovery/sim_test.go new file mode 100644 index 00000000000000..f8a515de01bef9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/discovery/sim_test.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discovery + +import ( + "context" + "sort" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type discoverySim struct { + mgr *Manager + collectDelay time.Duration + expectedGroups []*confgroup.Group +} + +func (sim discoverySim) run(t *testing.T) { + t.Helper() + require.NotNil(t, sim.mgr) + + in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group) + go sim.collectGroups(t, in, out) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go sim.mgr.Run(ctx, in) + + actualGroups := <-out + + sortGroups(sim.expectedGroups) + sortGroups(actualGroups) + + assert.Equal(t, sim.expectedGroups, actualGroups) +} + +func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) { + time.Sleep(sim.collectDelay) + + timeout := sim.mgr.sendEvery + time.Second*2 + var groups []*confgroup.Group +loop: + for { + select { + case inGroups := <-in: + if groups = append(groups, inGroups...); len(groups) >= len(sim.expectedGroups) { + break loop + } + case <-time.After(timeout): + t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped", + sim.mgr.discoverers, timeout, len(groups), len(sim.expectedGroups)) + break loop + } + } + out <- groups +} + +func sortGroups(groups []*confgroup.Group) { + if len(groups) == 0 { + return + } + sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source }) +} diff --git a/src/go/collectors/go.d.plugin/agent/executable/name.go b/src/go/collectors/go.d.plugin/agent/executable/name.go new file mode 100644 index 00000000000000..49dcf8db6e3aba --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/executable/name.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package executable + +import ( + "os" + "path/filepath" + "strings" +) + +var Name string + +func init() { + s, err := os.Executable() + if err != nil || s == "" || strings.HasSuffix(s, ".test") { + Name = "go.d" + return + } + + _, Name = filepath.Split(s) + Name = strings.TrimSuffix(Name, ".plugin") +} diff --git a/src/go/collectors/go.d.plugin/agent/filelock/filelock.go b/src/go/collectors/go.d.plugin/agent/filelock/filelock.go new file mode 100644 index 00000000000000..ed4c038fde9232 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filelock/filelock.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filelock + +import ( + "path/filepath" + + "github.com/gofrs/flock" +) + +func New(dir string) *Locker { + return &Locker{ + suffix: ".collector.lock", + dir: dir, + locks: make(map[string]*flock.Flock), + } +} + +type Locker struct { + suffix string + dir string + locks map[string]*flock.Flock +} + +func (l *Locker) Lock(name string) (bool, error) { + name = filepath.Join(l.dir, name+l.suffix) + + if _, ok := l.locks[name]; ok { + return true, nil + } + + locker := flock.New(name) + + ok, err := locker.TryLock() + if ok { + l.locks[name] = locker + } else { + _ = locker.Close() + } + + return ok, err +} + +func (l *Locker) Unlock(name string) error { + name = filepath.Join(l.dir, name+l.suffix) + + locker, ok := l.locks[name] + if !ok { + return nil + } + + delete(l.locks, name) + + return locker.Close() +} diff --git a/src/go/collectors/go.d.plugin/agent/filelock/filelock_test.go b/src/go/collectors/go.d.plugin/agent/filelock/filelock_test.go new file mode 100644 index 00000000000000..dde5d35de0a76a --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filelock/filelock_test.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filelock + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.NotNil(t, New("")) +} + +func TestLocker_Lock(t *testing.T) { + tests := map[string]func(t *testing.T, dir string){ + "register a lock": func(t *testing.T, dir string) { + reg := New(dir) + + ok, err := reg.Lock("name") + assert.True(t, ok) + assert.NoError(t, err) + }, + "register the same lock twice": func(t *testing.T, dir string) { + reg := New(dir) + + ok, err := reg.Lock("name") + require.True(t, ok) + require.NoError(t, err) + + ok, err = reg.Lock("name") + assert.True(t, ok) + assert.NoError(t, err) + }, + "failed to register locked by other process lock": func(t *testing.T, dir string) { + reg1 := New(dir) + reg2 := New(dir) + + ok, err := reg1.Lock("name") + require.True(t, ok) + require.NoError(t, err) + + ok, err = reg2.Lock("name") + assert.False(t, ok) + assert.NoError(t, err) + }, + "failed to register because a directory doesnt exist": func(t *testing.T, dir string) { + reg := New(dir + dir) + + ok, err := reg.Lock("name") + assert.False(t, ok) + assert.Error(t, err) + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-file-lock-registry") + require.NoError(t, err) + defer func() { require.NoError(t, os.RemoveAll(dir)) }() + + test(t, dir) + }) + } +} + +func TestLocker_Unlock(t *testing.T) { + tests := map[string]func(t *testing.T, dir string){ + "unregister a lock": func(t *testing.T, dir string) { + reg := New(dir) + + ok, err := reg.Lock("name") + require.True(t, ok) + require.NoError(t, err) + + assert.NoError(t, reg.Unlock("name")) + }, + "unregister not registered lock": func(t *testing.T, dir string) { + reg := New(dir) + + assert.NoError(t, reg.Unlock("name")) + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-file-lock-registry") + require.NoError(t, err) + defer func() { require.NoError(t, os.RemoveAll(dir)) }() + + test(t, dir) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/filestatus/manager.go b/src/go/collectors/go.d.plugin/agent/filestatus/manager.go new file mode 100644 index 00000000000000..323ad1f474407b --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filestatus/manager.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filestatus + +import ( + "context" + "log/slog" + "os" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/logger" +) + +func NewManager(path string) *Manager { + return &Manager{ + Logger: logger.New().With( + slog.String("component", "filestatus manager"), + ), + path: path, + store: &Store{}, + flushEvery: time.Second * 5, + flushCh: make(chan struct{}, 1), + } +} + +type Manager struct { + *logger.Logger + + path string + + store *Store + + flushEvery time.Duration + flushCh chan struct{} +} + +func (m *Manager) Run(ctx context.Context) { + m.Info("instance is started") + defer func() { m.Info("instance is stopped") }() + + tk := time.NewTicker(m.flushEvery) + defer tk.Stop() + defer m.flush() + + for { + select { + case <-ctx.Done(): + return + case <-tk.C: + m.tryFlush() + } + } +} + +func (m *Manager) Save(cfg confgroup.Config, status string) { + if v, ok := m.store.lookup(cfg); !ok || status != v { + m.store.add(cfg, status) + m.triggerFlush() + } +} + +func (m *Manager) Remove(cfg confgroup.Config) { + if _, ok := m.store.lookup(cfg); ok { + m.store.remove(cfg) + m.triggerFlush() + } +} + +func (m *Manager) triggerFlush() { + select { + case m.flushCh <- struct{}{}: + default: + } +} + +func (m *Manager) tryFlush() { + select { + case <-m.flushCh: + m.flush() + default: + } +} + +func (m *Manager) flush() { + bs, err := m.store.bytes() + if err != nil { + return + } + + f, err := os.Create(m.path) + if err != nil { + return + } + defer func() { _ = f.Close() }() + + _, _ = f.Write(bs) +} diff --git a/src/go/collectors/go.d.plugin/agent/filestatus/manager_test.go b/src/go/collectors/go.d.plugin/agent/filestatus/manager_test.go new file mode 100644 index 00000000000000..8520da0e786564 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filestatus/manager_test.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filestatus + +import ( + "context" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewManager(t *testing.T) { + mgr := NewManager("") + assert.NotNil(t, mgr.store) +} + +func TestManager_Run(t *testing.T) { + type testAction struct { + name string + cfg confgroup.Config + status string + } + tests := map[string]struct { + actions []testAction + wantFile string + }{ + "save": { + actions: []testAction{ + { + name: "save", status: "ok", + cfg: prepareConfig("module", "module1", "name", "name1"), + }, + { + name: "save", status: "ok", + cfg: prepareConfig("module", "module2", "name", "name2"), + }, + }, + wantFile: ` +{ + "module1": { + "name1:5956328514325012774": "ok" + }, + "module2": { + "name2:14684454322123948394": "ok" + } +} +`, + }, + "remove": { + actions: []testAction{ + { + name: "save", status: "ok", + cfg: prepareConfig("module", "module1", "name", "name1"), + }, + { + name: "save", status: "ok", + cfg: prepareConfig("module", "module2", "name", "name2"), + }, + { + name: "remove", + cfg: prepareConfig("module", "module2", "name", "name2"), + }, + }, + wantFile: ` +{ + "module1": { + "name1:5956328514325012774": "ok" + } +} +`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-filestatus-run") + require.NoError(t, err) + defer func() { assert.NoError(t, os.RemoveAll(dir)) }() + + filename := path.Join(dir, "filestatus") + + mgr := NewManager(filename) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { defer close(done); mgr.Run(ctx) }() + + for _, v := range test.actions { + switch v.name { + case "save": + mgr.Save(v.cfg, v.status) + case "remove": + mgr.Remove(v.cfg) + } + } + + cancel() + + timeout := time.Second * 5 + tk := time.NewTimer(timeout) + defer tk.Stop() + + select { + case <-done: + case <-tk.C: + t.Errorf("timed out after %s", timeout) + } + + bs, err := os.ReadFile(filename) + require.NoError(t, err) + + assert.Equal(t, strings.TrimSpace(test.wantFile), string(bs)) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/filestatus/store.go b/src/go/collectors/go.d.plugin/agent/filestatus/store.go new file mode 100644 index 00000000000000..6161ec02d895e0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filestatus/store.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filestatus + +import ( + "encoding/json" + "fmt" + "os" + "slices" + "sync" + + "github.com/netdata/go.d.plugin/agent/confgroup" +) + +func LoadStore(path string) (*Store, error) { + var s Store + + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + return &s, json.NewDecoder(f).Decode(&s.items) +} + +type Store struct { + mux sync.Mutex + items map[string]map[string]string // [module][name:hash]status +} + +func (s *Store) Contains(cfg confgroup.Config, statuses ...string) bool { + status, ok := s.lookup(cfg) + if !ok { + return false + } + + return slices.Contains(statuses, status) +} + +func (s *Store) lookup(cfg confgroup.Config) (string, bool) { + s.mux.Lock() + defer s.mux.Unlock() + + jobs, ok := s.items[cfg.Module()] + if !ok { + return "", false + } + + status, ok := jobs[storeJobKey(cfg)] + + return status, ok +} + +func (s *Store) add(cfg confgroup.Config, status string) { + s.mux.Lock() + defer s.mux.Unlock() + + if s.items == nil { + s.items = make(map[string]map[string]string) + } + + if s.items[cfg.Module()] == nil { + s.items[cfg.Module()] = make(map[string]string) + } + + s.items[cfg.Module()][storeJobKey(cfg)] = status +} + +func (s *Store) remove(cfg confgroup.Config) { + s.mux.Lock() + defer s.mux.Unlock() + + delete(s.items[cfg.Module()], storeJobKey(cfg)) + + if len(s.items[cfg.Module()]) == 0 { + delete(s.items, cfg.Module()) + } +} + +func (s *Store) bytes() ([]byte, error) { + s.mux.Lock() + defer s.mux.Unlock() + + return json.MarshalIndent(s.items, "", " ") +} + +func storeJobKey(cfg confgroup.Config) string { + return fmt.Sprintf("%s:%d", cfg.Name(), cfg.Hash()) +} diff --git a/src/go/collectors/go.d.plugin/agent/filestatus/store_test.go b/src/go/collectors/go.d.plugin/agent/filestatus/store_test.go new file mode 100644 index 00000000000000..811b0ffa73ce7c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/filestatus/store_test.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filestatus + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/confgroup" + + "github.com/stretchr/testify/assert" +) + +// TODO: tech debt +func TestLoadStore(t *testing.T) { + +} + +// TODO: tech debt +func TestStore_Contains(t *testing.T) { + +} + +func TestStore_add(t *testing.T) { + tests := map[string]struct { + prepare func() *Store + input confgroup.Config + wantItemsNum int + }{ + "add cfg to the empty store": { + prepare: func() *Store { + return &Store{} + }, + input: prepareConfig( + "module", "modName", + "name", "jobName", + ), + wantItemsNum: 1, + }, + "add cfg that already in the store": { + prepare: func() *Store { + return &Store{ + items: map[string]map[string]string{ + "modName": {"jobName:18299273693089411682": "state"}, + }, + } + }, + input: prepareConfig( + "module", "modName", + "name", "jobName", + ), + wantItemsNum: 1, + }, + "add cfg with same module, same name, but specific options": { + prepare: func() *Store { + return &Store{ + items: map[string]map[string]string{ + "modName": {"jobName:18299273693089411682": "state"}, + }, + } + }, + input: prepareConfig( + "module", "modName", + "name", "jobName", + "opt", "val", + ), + wantItemsNum: 2, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + s := test.prepare() + s.add(test.input, "state") + assert.Equal(t, test.wantItemsNum, calcStoreItems(s)) + }) + } +} + +func TestStore_remove(t *testing.T) { + tests := map[string]struct { + prepare func() *Store + input confgroup.Config + wantItemsNum int + }{ + "remove cfg from the empty store": { + prepare: func() *Store { + return &Store{} + }, + input: prepareConfig( + "module", "modName", + "name", "jobName", + ), + wantItemsNum: 0, + }, + "remove cfg from the store": { + prepare: func() *Store { + return &Store{ + items: map[string]map[string]string{ + "modName": { + "jobName:18299273693089411682": "state", + "jobName:18299273693089411683": "state", + }, + }, + } + }, + input: prepareConfig( + "module", "modName", + "name", "jobName", + ), + wantItemsNum: 1, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + s := test.prepare() + s.remove(test.input) + assert.Equal(t, test.wantItemsNum, calcStoreItems(s)) + }) + } +} + +func calcStoreItems(s *Store) (num int) { + for _, v := range s.items { + for range v { + num++ + } + } + return num +} + +func prepareConfig(values ...string) confgroup.Config { + cfg := confgroup.Config{} + for i := 1; i < len(values); i += 2 { + cfg[values[i-1]] = values[i] + } + return cfg +} diff --git a/src/go/collectors/go.d.plugin/agent/functions/function.go b/src/go/collectors/go.d.plugin/agent/functions/function.go new file mode 100644 index 00000000000000..46a728994a9dfa --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/functions/function.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package functions + +import ( + "bufio" + "bytes" + "encoding/csv" + "fmt" + "strconv" + "strings" + "time" +) + +type Function struct { + key string + UID string + Timeout time.Duration + Name string + Args []string + Payload []byte +} + +func (f *Function) String() string { + return fmt.Sprintf("key: %s, uid: %s, timeout: %s, function: %s, args: %v, payload: %s", + f.key, f.UID, f.Timeout, f.Name, f.Args, string(f.Payload)) +} + +func parseFunction(s string) (*Function, error) { + r := csv.NewReader(strings.NewReader(s)) + r.Comma = ' ' + + parts, err := r.Read() + if err != nil { + return nil, err + } + if len(parts) != 4 { + return nil, fmt.Errorf("unexpected number of words: want 4, got %d (%v)", len(parts), parts) + } + + timeout, err := strconv.ParseInt(parts[2], 10, 64) + if err != nil { + return nil, err + } + + cmd := strings.Split(parts[3], " ") + + fn := &Function{ + key: parts[0], + UID: parts[1], + Timeout: time.Duration(timeout) * time.Second, + Name: cmd[0], + Args: cmd[1:], + } + + return fn, nil +} + +func parseFunctionWithPayload(s string, sc *bufio.Scanner) (*Function, error) { + fn, err := parseFunction(s) + if err != nil { + return nil, err + } + + var n int + var buf bytes.Buffer + for sc.Scan() && sc.Text() != "FUNCTION_PAYLOAD_END" { + if n++; n > 1 { + buf.WriteString("\n") + } + buf.WriteString(sc.Text()) + } + + fn.Payload = append(fn.Payload, buf.Bytes()...) + + return fn, nil +} diff --git a/src/go/collectors/go.d.plugin/agent/functions/manager.go b/src/go/collectors/go.d.plugin/agent/functions/manager.go new file mode 100644 index 00000000000000..760780cff47c7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/functions/manager.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package functions + +import ( + "bufio" + "context" + "io" + "log/slog" + "os" + "strings" + "sync" + + "github.com/netdata/go.d.plugin/logger" + + "github.com/mattn/go-isatty" + "github.com/muesli/cancelreader" +) + +var isTerminal = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsTerminal(os.Stdin.Fd()) + +func NewManager() *Manager { + return &Manager{ + Logger: logger.New().With( + slog.String("component", "functions manager"), + ), + Input: os.Stdin, + mux: &sync.Mutex{}, + FunctionRegistry: make(map[string]func(Function)), + } +} + +type Manager struct { + *logger.Logger + + Input io.Reader + mux *sync.Mutex + FunctionRegistry map[string]func(Function) +} + +func (m *Manager) Register(name string, fn func(Function)) { + if fn == nil { + m.Warningf("not registering '%s': nil function", name) + return + } + m.addFunction(name, fn) +} + +func (m *Manager) Run(ctx context.Context) { + m.Info("instance is started") + defer func() { m.Info("instance is stopped") }() + + if !isTerminal { + var wg sync.WaitGroup + + r, err := cancelreader.NewReader(m.Input) + if err != nil { + m.Errorf("fail to create cancel reader: %v", err) + return + } + + go func() { <-ctx.Done(); r.Cancel() }() + + wg.Add(1) + go func() { defer wg.Done(); m.run(r) }() + + wg.Wait() + _ = r.Close() + } + + <-ctx.Done() +} + +func (m *Manager) run(r io.Reader) { + sc := bufio.NewScanner(r) + + for sc.Scan() { + text := sc.Text() + + var fn *Function + var err error + + // FIXME: if we are waiting for FUNCTION_PAYLOAD_END and a new FUNCTION* appears, + // we need to discard the current one and switch to the new one + switch { + case strings.HasPrefix(text, "FUNCTION "): + fn, err = parseFunction(text) + case strings.HasPrefix(text, "FUNCTION_PAYLOAD "): + fn, err = parseFunctionWithPayload(text, sc) + case text == "": + continue + default: + m.Warningf("unexpected line: '%s'", text) + continue + } + + if err != nil { + m.Warningf("parse function: %v ('%s')", err, text) + continue + } + + function, ok := m.lookupFunction(fn.Name) + if !ok { + m.Infof("skipping execution of '%s': unregistered function", fn.Name) + continue + } + if function == nil { + m.Warningf("skipping execution of '%s': nil function registered", fn.Name) + continue + } + + m.Debugf("executing function: '%s'", fn.String()) + function(*fn) + } +} + +func (m *Manager) addFunction(name string, fn func(Function)) { + m.mux.Lock() + defer m.mux.Unlock() + + if _, ok := m.FunctionRegistry[name]; !ok { + m.Debugf("registering function '%s'", name) + } else { + m.Warningf("re-registering function '%s'", name) + } + m.FunctionRegistry[name] = fn +} + +func (m *Manager) lookupFunction(name string) (func(Function), bool) { + m.mux.Lock() + defer m.mux.Unlock() + + f, ok := m.FunctionRegistry[name] + return f, ok +} diff --git a/src/go/collectors/go.d.plugin/agent/functions/manager_test.go b/src/go/collectors/go.d.plugin/agent/functions/manager_test.go new file mode 100644 index 00000000000000..84c4502eb873dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/functions/manager_test.go @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package functions + +import ( + "context" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestNewManager(t *testing.T) { + mgr := NewManager() + + assert.NotNilf(t, mgr.Input, "Input") + assert.NotNilf(t, mgr.FunctionRegistry, "FunctionRegistry") +} + +func TestManager_Register(t *testing.T) { + type testInputFn struct { + name string + invalid bool + } + tests := map[string]struct { + input []testInputFn + expected []string + }{ + "valid registration": { + input: []testInputFn{ + {name: "fn1"}, + {name: "fn2"}, + }, + expected: []string{"fn1", "fn2"}, + }, + "registration with duplicates": { + input: []testInputFn{ + {name: "fn1"}, + {name: "fn2"}, + {name: "fn1"}, + }, + expected: []string{"fn1", "fn2"}, + }, + "registration with nil functions": { + input: []testInputFn{ + {name: "fn1"}, + {name: "fn2", invalid: true}, + }, + expected: []string{"fn1"}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mgr := NewManager() + + for _, v := range test.input { + if v.invalid { + mgr.Register(v.name, nil) + } else { + mgr.Register(v.name, func(Function) {}) + } + } + + var got []string + for name := range mgr.FunctionRegistry { + got = append(got, name) + } + sort.Strings(got) + sort.Strings(test.expected) + + assert.Equal(t, test.expected, got) + }) + } +} + +func TestManager_Run(t *testing.T) { + tests := map[string]struct { + register []string + input string + expected []Function + }{ + "valid function: single": { + register: []string{"fn1"}, + input: ` +FUNCTION UID 1 "fn1 arg1 arg2" +`, + expected: []Function{ + { + key: "FUNCTION", + UID: "UID", + Timeout: time.Second, + Name: "fn1", + Args: []string{"arg1", "arg2"}, + Payload: nil, + }, + }, + }, + "valid function: multiple": { + register: []string{"fn1", "fn2"}, + input: ` +FUNCTION UID 1 "fn1 arg1 arg2" +FUNCTION UID 1 "fn2 arg1 arg2" +`, + expected: []Function{ + { + key: "FUNCTION", + UID: "UID", + Timeout: time.Second, + Name: "fn1", + Args: []string{"arg1", "arg2"}, + Payload: nil, + }, + { + key: "FUNCTION", + UID: "UID", + Timeout: time.Second, + Name: "fn2", + Args: []string{"arg1", "arg2"}, + Payload: nil, + }, + }, + }, + "valid function: single with payload": { + register: []string{"fn1", "fn2"}, + input: ` +FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" +payload line1 +payload line2 +FUNCTION_PAYLOAD_END +`, + expected: []Function{ + { + key: "FUNCTION_PAYLOAD", + UID: "UID", + Timeout: time.Second, + Name: "fn1", + Args: []string{"arg1", "arg2"}, + Payload: []byte("payload line1\npayload line2"), + }, + }, + }, + "valid function: multiple with payload": { + register: []string{"fn1", "fn2"}, + input: ` +FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" +payload line1 +payload line2 +FUNCTION_PAYLOAD_END + +FUNCTION_PAYLOAD UID 1 "fn2 arg1 arg2" +payload line3 +payload line4 +FUNCTION_PAYLOAD_END +`, + expected: []Function{ + { + key: "FUNCTION_PAYLOAD", + UID: "UID", + Timeout: time.Second, + Name: "fn1", + Args: []string{"arg1", "arg2"}, + Payload: []byte("payload line1\npayload line2"), + }, + { + key: "FUNCTION_PAYLOAD", + UID: "UID", + Timeout: time.Second, + Name: "fn2", + Args: []string{"arg1", "arg2"}, + Payload: []byte("payload line3\npayload line4"), + }, + }, + }, + "valid function: multiple with and without payload": { + register: []string{"fn1", "fn2", "fn3", "fn4"}, + input: ` +FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" +payload line1 +payload line2 +FUNCTION_PAYLOAD_END + +FUNCTION UID 1 "fn2 arg1 arg2" +FUNCTION UID 1 "fn3 arg1 arg2" + +FUNCTION_PAYLOAD UID 1 "fn4 arg1 arg2" +payload line3 +payload line4 +FUNCTION_PAYLOAD_END +`, + expected: []Function{ + { + key: "FUNCTION_PAYLOAD", + UID: "UID", + Timeout: time.Second, + Name: "fn1", + Args: []string{"arg1", "arg2"}, + Payload: []byte("payload line1\npayload line2"), + }, + { + key: "FUNCTION", + UID: "UID", + Timeout: time.Second, + Name: "fn2", + Args: []string{"arg1", "arg2"}, + Payload: nil, + }, + { + key: "FUNCTION", + UID: "UID", + Timeout: time.Second, + Name: "fn3", + Args: []string{"arg1", "arg2"}, + Payload: nil, + }, + { + key: "FUNCTION_PAYLOAD", + UID: "UID", + Timeout: time.Second, + Name: "fn4", + Args: []string{"arg1", "arg2"}, + Payload: []byte("payload line3\npayload line4"), + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mgr := NewManager() + + mgr.Input = strings.NewReader(test.input) + + mock := &mockFunctionExecutor{} + for _, v := range test.register { + mgr.Register(v, mock.execute) + } + + testTime := time.Second * 5 + ctx, cancel := context.WithTimeout(context.Background(), testTime) + defer cancel() + + done := make(chan struct{}) + + go func() { defer close(done); mgr.Run(ctx) }() + + timeout := testTime + time.Second*2 + tk := time.NewTimer(timeout) + defer tk.Stop() + + select { + case <-done: + assert.Equal(t, test.expected, mock.executed) + case <-tk.C: + t.Errorf("timed out after %s", timeout) + } + }) + } +} + +type mockFunctionExecutor struct { + executed []Function +} + +func (m *mockFunctionExecutor) execute(fn Function) { + m.executed = append(m.executed, fn) +} diff --git a/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo.go b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo.go new file mode 100644 index 00000000000000..b395b61c6f9e55 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hostinfo + +import ( + "bytes" + "context" + "os/exec" + "time" +) + +var Hostname = getHostname() + +func getHostname() string { + path, err := exec.LookPath("hostname") + if err != nil { + return "" + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + + bs, err := exec.CommandContext(ctx, path).Output() + if err != nil { + return "" + } + + return string(bytes.TrimSpace(bs)) +} diff --git a/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_common.go b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_common.go new file mode 100644 index 00000000000000..69bbf5c78fb8d6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_common.go @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build !linux + +package hostinfo + +var SystemdVersion int diff --git a/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_linux.go b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_linux.go new file mode 100644 index 00000000000000..db2005f00787bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/hostinfo/hostinfo_linux.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux + +package hostinfo + +import ( + "context" + "regexp" + "strconv" + + "github.com/coreos/go-systemd/v22/dbus" +) + +var SystemdVersion = getSystemdVersion() + +func getSystemdVersion() int { + var reVersion = regexp.MustCompile(`[0-9][0-9][0-9]`) + + conn, err := dbus.NewWithContext(context.Background()) + if err != nil { + return 0 + } + defer conn.Close() + + version, err := conn.GetManagerProperty("Version") + if err != nil { + return 0 + } + + major := reVersion.FindString(version) + if major == "" { + return 0 + } + + ver, err := strconv.Atoi(major) + if err != nil { + return 0 + } + + return ver +} diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/cache.go b/src/go/collectors/go.d.plugin/agent/jobmgr/cache.go new file mode 100644 index 00000000000000..53a1f732532cc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/cache.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "context" + + "github.com/netdata/go.d.plugin/agent/confgroup" +) + +func newRunningJobsCache() *runningJobsCache { + return &runningJobsCache{} +} + +func newRetryingJobsCache() *retryingJobsCache { + return &retryingJobsCache{} +} + +type ( + runningJobsCache map[string]bool + retryingJobsCache map[uint64]retryTask + + retryTask struct { + cancel context.CancelFunc + timeout int + retries int + } +) + +func (c runningJobsCache) put(cfg confgroup.Config) { + c[cfg.FullName()] = true +} +func (c runningJobsCache) remove(cfg confgroup.Config) { + delete(c, cfg.FullName()) +} +func (c runningJobsCache) has(cfg confgroup.Config) bool { + return c[cfg.FullName()] +} + +func (c retryingJobsCache) put(cfg confgroup.Config, retry retryTask) { + c[cfg.Hash()] = retry +} +func (c retryingJobsCache) remove(cfg confgroup.Config) { + delete(c, cfg.Hash()) +} +func (c retryingJobsCache) lookup(cfg confgroup.Config) (retryTask, bool) { + v, ok := c[cfg.Hash()] + return v, ok +} diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/di.go b/src/go/collectors/go.d.plugin/agent/jobmgr/di.go new file mode 100644 index 00000000000000..fa567b2ce39a47 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/di.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/vnodes" +) + +type FileLocker interface { + Lock(name string) (bool, error) + Unlock(name string) error +} + +type Vnodes interface { + Lookup(key string) (*vnodes.VirtualNode, bool) +} + +type StatusSaver interface { + Save(cfg confgroup.Config, state string) + Remove(cfg confgroup.Config) +} + +type StatusStore interface { + Contains(cfg confgroup.Config, states ...string) bool +} + +type Dyncfg interface { + Register(cfg confgroup.Config) + Unregister(cfg confgroup.Config) + UpdateStatus(cfg confgroup.Config, status, payload string) +} diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/manager.go b/src/go/collectors/go.d.plugin/agent/jobmgr/manager.go new file mode 100644 index 00000000000000..7088f84f9c8df7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/manager.go @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "context" + "fmt" + "io" + "log/slog" + "os" + "strings" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +type Job interface { + Name() string + ModuleName() string + FullName() string + AutoDetection() bool + AutoDetectionEvery() int + RetryAutoDetection() bool + Tick(clock int) + Start() + Stop() + Cleanup() +} + +type jobStatus = string + +const ( + jobStatusRunning jobStatus = "running" // Check() succeeded + jobStatusRetrying jobStatus = "retrying" // Check() failed, but we need keep trying auto-detection + jobStatusStoppedFailed jobStatus = "stopped_failed" // Check() failed + jobStatusStoppedDupLocal jobStatus = "stopped_duplicate_local" // a job with the same FullName is running + jobStatusStoppedDupGlobal jobStatus = "stopped_duplicate_global" // a job with the same FullName is registered by another plugin + jobStatusStoppedRegErr jobStatus = "stopped_registration_error" // an error during registration (only 'too many open files') + jobStatusStoppedCreateErr jobStatus = "stopped_creation_error" // an error during creation (yaml unmarshal) +) + +func NewManager() *Manager { + np := noop{} + mgr := &Manager{ + Logger: logger.New().With( + slog.String("component", "job manager"), + ), + Out: io.Discard, + FileLock: np, + StatusSaver: np, + StatusStore: np, + Vnodes: np, + Dyncfg: np, + + confGroupCache: confgroup.NewCache(), + + runningJobs: newRunningJobsCache(), + retryingJobs: newRetryingJobsCache(), + + addCh: make(chan confgroup.Config), + removeCh: make(chan confgroup.Config), + } + + return mgr +} + +type Manager struct { + *logger.Logger + + PluginName string + Out io.Writer + Modules module.Registry + + FileLock FileLocker + StatusSaver StatusSaver + StatusStore StatusStore + Vnodes Vnodes + Dyncfg Dyncfg + + confGroupCache *confgroup.Cache + runningJobs *runningJobsCache + retryingJobs *retryingJobsCache + + addCh chan confgroup.Config + removeCh chan confgroup.Config + + queueMux sync.Mutex + queue []Job +} + +func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) { + m.Info("instance is started") + defer func() { m.cleanup(); m.Info("instance is stopped") }() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { defer wg.Done(); m.runConfigGroupsHandling(ctx, in) }() + + wg.Add(1) + go func() { defer wg.Done(); m.runConfigsHandling(ctx) }() + + wg.Add(1) + go func() { defer wg.Done(); m.runRunningJobsHandling(ctx) }() + + wg.Wait() + <-ctx.Done() +} + +func (m *Manager) runConfigGroupsHandling(ctx context.Context, in chan []*confgroup.Group) { + for { + select { + case <-ctx.Done(): + return + case groups := <-in: + for _, gr := range groups { + select { + case <-ctx.Done(): + return + default: + a, r := m.confGroupCache.Add(gr) + m.Debugf("received config group ('%s'): %d jobs (added: %d, removed: %d)", gr.Source, len(gr.Configs), len(a), len(r)) + sendConfigs(ctx, m.removeCh, r) + sendConfigs(ctx, m.addCh, a) + } + } + } + } +} + +func (m *Manager) runConfigsHandling(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case cfg := <-m.addCh: + m.addConfig(ctx, cfg) + case cfg := <-m.removeCh: + m.removeConfig(cfg) + } + } +} + +func (m *Manager) cleanup() { + for _, task := range *m.retryingJobs { + task.cancel() + } + for name := range *m.runningJobs { + _ = m.FileLock.Unlock(name) + } + // TODO: m.Dyncfg.Register() ? + m.stopRunningJobs() +} + +func (m *Manager) addConfig(ctx context.Context, cfg confgroup.Config) { + task, isRetry := m.retryingJobs.lookup(cfg) + if isRetry { + task.cancel() + m.retryingJobs.remove(cfg) + } else { + m.Dyncfg.Register(cfg) + } + + if m.runningJobs.has(cfg) { + m.Infof("%s[%s] job is being served by another job, skipping it", cfg.Module(), cfg.Name()) + m.StatusSaver.Save(cfg, jobStatusStoppedDupLocal) + m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another job") + return + } + + job, err := m.createJob(cfg) + if err != nil { + m.Warningf("couldn't create %s[%s]: %v", cfg.Module(), cfg.Name(), err) + m.StatusSaver.Save(cfg, jobStatusStoppedCreateErr) + m.Dyncfg.UpdateStatus(cfg, "error", fmt.Sprintf("build error: %s", err)) + return + } + + cleanupJob := true + defer func() { + if cleanupJob { + job.Cleanup() + } + }() + + if isRetry { + job.AutoDetectEvery = task.timeout + job.AutoDetectTries = task.retries + } else if job.AutoDetectionEvery() == 0 { + switch { + case m.StatusStore.Contains(cfg, jobStatusRunning, jobStatusRetrying): + m.Infof("%s[%s] job last status is running/retrying, applying recovering settings", cfg.Module(), cfg.Name()) + job.AutoDetectEvery = 30 + job.AutoDetectTries = 11 + case isInsideK8sCluster() && cfg.Provider() == "file watcher": + m.Infof("%s[%s] is k8s job, applying recovering settings", cfg.Module(), cfg.Name()) + job.AutoDetectEvery = 10 + job.AutoDetectTries = 7 + } + } + + switch detection(job) { + case jobStatusRunning: + if ok, err := m.FileLock.Lock(cfg.FullName()); ok || err != nil && !isTooManyOpenFiles(err) { + cleanupJob = false + m.runningJobs.put(cfg) + m.StatusSaver.Save(cfg, jobStatusRunning) + m.Dyncfg.UpdateStatus(cfg, "running", "") + m.startJob(job) + } else if isTooManyOpenFiles(err) { + m.Error(err) + m.StatusSaver.Save(cfg, jobStatusStoppedRegErr) + m.Dyncfg.UpdateStatus(cfg, "error", "too many open files") + } else { + m.Infof("%s[%s] job is being served by another plugin, skipping it", cfg.Module(), cfg.Name()) + m.StatusSaver.Save(cfg, jobStatusStoppedDupGlobal) + m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another plugin") + } + case jobStatusRetrying: + m.Infof("%s[%s] job detection failed, will retry in %d seconds", cfg.Module(), cfg.Name(), job.AutoDetectionEvery()) + ctx, cancel := context.WithCancel(ctx) + m.retryingJobs.put(cfg, retryTask{ + cancel: cancel, + timeout: job.AutoDetectionEvery(), + retries: job.AutoDetectTries, + }) + go runRetryTask(ctx, m.addCh, cfg, time.Second*time.Duration(job.AutoDetectionEvery())) + m.StatusSaver.Save(cfg, jobStatusRetrying) + m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, will retry later") + case jobStatusStoppedFailed: + m.StatusSaver.Save(cfg, jobStatusStoppedFailed) + m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, stopping it") + default: + m.Warningf("%s[%s] job detection: unknown state", cfg.Module(), cfg.Name()) + } +} + +func (m *Manager) removeConfig(cfg confgroup.Config) { + if m.runningJobs.has(cfg) { + m.stopJob(cfg.FullName()) + _ = m.FileLock.Unlock(cfg.FullName()) + m.runningJobs.remove(cfg) + } + + if task, ok := m.retryingJobs.lookup(cfg); ok { + task.cancel() + m.retryingJobs.remove(cfg) + } + + m.StatusSaver.Remove(cfg) + m.Dyncfg.Unregister(cfg) +} + +func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { + creator, ok := m.Modules[cfg.Module()] + if !ok { + return nil, fmt.Errorf("can not find %s module", cfg.Module()) + } + + m.Debugf("creating %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg) + + mod := creator.Create() + if err := unmarshal(cfg, mod); err != nil { + return nil, err + } + + labels := make(map[string]string) + for name, value := range cfg.Labels() { + n, ok1 := name.(string) + v, ok2 := value.(string) + if ok1 && ok2 { + labels[n] = v + } + } + + jobCfg := module.JobConfig{ + PluginName: m.PluginName, + Name: cfg.Name(), + ModuleName: cfg.Module(), + FullName: cfg.FullName(), + UpdateEvery: cfg.UpdateEvery(), + AutoDetectEvery: cfg.AutoDetectionRetry(), + Priority: cfg.Priority(), + Labels: labels, + IsStock: isStockConfig(cfg), + Module: mod, + Out: m.Out, + } + + if cfg.Vnode() != "" { + n, ok := m.Vnodes.Lookup(cfg.Vnode()) + if !ok { + return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode()) + } + + jobCfg.VnodeGUID = n.GUID + jobCfg.VnodeHostname = n.Hostname + jobCfg.VnodeLabels = n.Labels + } + + job := module.NewJob(jobCfg) + + return job, nil +} + +func detection(job Job) jobStatus { + if !job.AutoDetection() { + if job.RetryAutoDetection() { + return jobStatusRetrying + } else { + return jobStatusStoppedFailed + } + } + return jobStatusRunning +} + +func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config, timeout time.Duration) { + t := time.NewTimer(timeout) + defer t.Stop() + + select { + case <-ctx.Done(): + case <-t.C: + sendConfig(ctx, out, cfg) + } +} + +func sendConfigs(ctx context.Context, out chan<- confgroup.Config, cfgs []confgroup.Config) { + for _, cfg := range cfgs { + sendConfig(ctx, out, cfg) + } +} + +func sendConfig(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) { + select { + case <-ctx.Done(): + return + case out <- cfg: + } +} + +func unmarshal(conf interface{}, module interface{}) error { + bs, err := yaml.Marshal(conf) + if err != nil { + return err + } + return yaml.Unmarshal(bs, module) +} + +func isInsideK8sCluster() bool { + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + return host != "" && port != "" +} + +func isTooManyOpenFiles(err error) bool { + return err != nil && strings.Contains(err.Error(), "too many open files") +} + +func isStockConfig(cfg confgroup.Config) bool { + if !strings.HasPrefix(cfg.Provider(), "file") { + return false + } + return !strings.Contains(cfg.Source(), "/etc/netdata") +} diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/manager_test.go b/src/go/collectors/go.d.plugin/agent/jobmgr/manager_test.go new file mode 100644 index 00000000000000..69dceda49734ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/manager_test.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "bytes" + "context" + "sync" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/stretchr/testify/assert" +) + +// TODO: tech dept +func TestNewManager(t *testing.T) { + +} + +// TODO: tech dept +func TestManager_Run(t *testing.T) { + groups := []*confgroup.Group{ + { + Source: "source", + Configs: []confgroup.Config{ + { + "name": "name", + "module": "success", + "update_every": module.UpdateEvery, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + { + "name": "name", + "module": "success", + "update_every": module.UpdateEvery + 1, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + { + "name": "name", + "module": "fail", + "update_every": module.UpdateEvery + 1, + "autodetection_retry": module.AutoDetectionRetry, + "priority": module.Priority, + }, + }, + }, + } + var buf bytes.Buffer + mgr := NewManager() + mgr.Modules = prepareMockRegistry() + mgr.Out = safewriter.New(&buf) + mgr.PluginName = "test.plugin" + + ctx, cancel := context.WithCancel(context.Background()) + in := make(chan []*confgroup.Group) + var wg sync.WaitGroup + + wg.Add(1) + go func() { defer wg.Done(); mgr.Run(ctx, in) }() + + select { + case in <- groups: + case <-time.After(time.Second * 2): + } + + time.Sleep(time.Second * 5) + cancel() + wg.Wait() + + assert.True(t, buf.String() != "") +} + +func prepareMockRegistry() module.Registry { + reg := module.Registry{} + reg.Register("success", module.Creator{ + Create: func() module.Module { + return &module.MockModule{ + InitFunc: func() bool { return true }, + CheckFunc: func() bool { return true }, + ChartsFunc: func() *module.Charts { + return &module.Charts{ + &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}, + } + }, + CollectFunc: func() map[string]int64 { + return map[string]int64{"id1": 1} + }, + } + }, + }) + reg.Register("fail", module.Creator{ + Create: func() module.Module { + return &module.MockModule{ + InitFunc: func() bool { return false }, + } + }, + }) + return reg +} diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/noop.go b/src/go/collectors/go.d.plugin/agent/jobmgr/noop.go new file mode 100644 index 00000000000000..15883105de300a --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/noop.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/vnodes" +) + +type noop struct{} + +func (n noop) Lock(string) (bool, error) { return true, nil } +func (n noop) Unlock(string) error { return nil } +func (n noop) Save(confgroup.Config, string) {} +func (n noop) Remove(confgroup.Config) {} +func (n noop) Contains(confgroup.Config, ...string) bool { return false } +func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false } +func (n noop) Register(confgroup.Config) { return } +func (n noop) Unregister(confgroup.Config) { return } +func (n noop) UpdateStatus(confgroup.Config, string, string) { return } diff --git a/src/go/collectors/go.d.plugin/agent/jobmgr/run.go b/src/go/collectors/go.d.plugin/agent/jobmgr/run.go new file mode 100644 index 00000000000000..f1a14cadc20d91 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/jobmgr/run.go @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "context" + "slices" + "time" + + "github.com/netdata/go.d.plugin/agent/ticker" +) + +func (m *Manager) runRunningJobsHandling(ctx context.Context) { + tk := ticker.New(time.Second) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case clock := <-tk.C: + //m.Debugf("tick %d", clock) + m.notifyRunningJobs(clock) + } + } +} + +func (m *Manager) notifyRunningJobs(clock int) { + m.queueMux.Lock() + defer m.queueMux.Unlock() + + for _, v := range m.queue { + v.Tick(clock) + } +} + +func (m *Manager) startJob(job Job) { + m.queueMux.Lock() + defer m.queueMux.Unlock() + + go job.Start() + + m.queue = append(m.queue, job) +} + +func (m *Manager) stopJob(name string) { + m.queueMux.Lock() + defer m.queueMux.Unlock() + + idx := slices.IndexFunc(m.queue, func(job Job) bool { + return job.FullName() == name + }) + + if idx != -1 { + j := m.queue[idx] + j.Stop() + + copy(m.queue[idx:], m.queue[idx+1:]) + m.queue[len(m.queue)-1] = nil + m.queue = m.queue[:len(m.queue)-1] + } +} + +func (m *Manager) stopRunningJobs() { + m.queueMux.Lock() + defer m.queueMux.Unlock() + + for i, v := range m.queue { + v.Stop() + m.queue[i] = nil + } + m.queue = m.queue[:0] +} diff --git a/src/go/collectors/go.d.plugin/agent/module/charts.go b/src/go/collectors/go.d.plugin/agent/module/charts.go new file mode 100644 index 00000000000000..2b9c35c3b47032 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/charts.go @@ -0,0 +1,462 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "errors" + "fmt" + "strings" + "unicode" +) + +type ( + ChartType string + DimAlgo string +) + +const ( + // Line chart type. + Line ChartType = "line" + // Area chart type. + Area ChartType = "area" + // Stacked chart type. + Stacked ChartType = "stacked" + + // Absolute dimension algorithm. + // The value is to drawn as-is (interpolated to second boundary). + Absolute DimAlgo = "absolute" + // Incremental dimension algorithm. + // The value increases over time, the difference from the last value is presented in the chart, + // the server interpolates the value and calculates a per second figure. + Incremental DimAlgo = "incremental" + // PercentOfAbsolute dimension algorithm. + // The percent of this value compared to the total of all dimensions. + PercentOfAbsolute DimAlgo = "percentage-of-absolute-row" + // PercentOfIncremental dimension algorithm. + // The percent of this value compared to the incremental total of all dimensions + PercentOfIncremental DimAlgo = "percentage-of-incremental-row" +) + +const ( + // Not documented. + // https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L204 + + LabelSourceAuto = 1 << 0 + LabelSourceConf = 1 << 1 + LabelSourceK8s = 1 << 2 +) + +func (d DimAlgo) String() string { + switch d { + case Absolute, Incremental, PercentOfAbsolute, PercentOfIncremental: + return string(d) + } + return string(Absolute) +} + +func (c ChartType) String() string { + switch c { + case Line, Area, Stacked: + return string(c) + } + return string(Line) +} + +type ( + // Charts is a collection of Charts. + Charts []*Chart + + // Opts represents chart options. + Opts struct { + Obsolete bool + Detail bool + StoreFirst bool + Hidden bool + } + + // Chart represents a chart. + // For the full description please visit https://docs.netdata.cloud/collectors/plugins.d/#chart + Chart struct { + // typeID is the unique identification of the chart, if not specified, + // the orchestrator will use job full name + chart ID as typeID (default behaviour). + typ string + id string + + OverModule string + IDSep bool + ID string + OverID string + Title string + Units string + Fam string + Ctx string + Type ChartType + Priority int + Opts + + Labels []Label + Dims Dims + Vars Vars + + Retries int + + remove bool + // created flag is used to indicate whether the chart needs to be created by the orchestrator. + created bool + // updated flag is used to indicate whether the chart was updated on last data collection interval. + updated bool + + // ignore flag is used to indicate that the chart shouldn't be sent to the netdata plugins.d + ignore bool + } + + Label struct { + Key string + Value string + Source int + } + + // DimOpts represents dimension options. + DimOpts struct { + Obsolete bool + Hidden bool + NoReset bool + NoOverflow bool + } + + // Dim represents a chart dimension. + // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#dimension. + Dim struct { + ID string + Name string + Algo DimAlgo + Mul int + Div int + DimOpts + + remove bool + } + + // Var represents a chart variable. + // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#variable + Var struct { + ID string + Name string + Value int64 + } + + // Dims is a collection of dims. + Dims []*Dim + // Vars is a collection of vars. + Vars []*Var +) + +func (o Opts) String() string { + var b strings.Builder + if o.Detail { + b.WriteString(" detail") + } + if o.Hidden { + b.WriteString(" hidden") + } + if o.Obsolete { + b.WriteString(" obsolete") + } + if o.StoreFirst { + b.WriteString(" store_first") + } + + if len(b.String()) == 0 { + return "" + } + return b.String()[1:] +} + +func (o DimOpts) String() string { + var b strings.Builder + if o.Hidden { + b.WriteString(" hidden") + } + if o.NoOverflow { + b.WriteString(" nooverflow") + } + if o.NoReset { + b.WriteString(" noreset") + } + if o.Obsolete { + b.WriteString(" obsolete") + } + + if len(b.String()) == 0 { + return "" + } + return b.String()[1:] +} + +// Add adds (appends) a variable number of Charts. +func (c *Charts) Add(charts ...*Chart) error { + for _, chart := range charts { + err := checkChart(chart) + if err != nil { + return fmt.Errorf("error on adding chart '%s' : %s", chart.ID, err) + } + if chart := c.Get(chart.ID); chart != nil && !chart.remove { + return fmt.Errorf("error on adding chart : '%s' is already in charts", chart.ID) + } + *c = append(*c, chart) + } + + return nil +} + +// Get returns the chart by ID. +func (c Charts) Get(chartID string) *Chart { + idx := c.index(chartID) + if idx == -1 { + return nil + } + return c[idx] +} + +// Has returns true if ChartsFunc contain the chart with the given ID, false otherwise. +func (c Charts) Has(chartID string) bool { + return c.index(chartID) != -1 +} + +// Remove removes the chart from Charts by ID. +// Avoid to use it in runtime. +func (c *Charts) Remove(chartID string) error { + idx := c.index(chartID) + if idx == -1 { + return fmt.Errorf("error on removing chart : '%s' is not in charts", chartID) + } + copy((*c)[idx:], (*c)[idx+1:]) + (*c)[len(*c)-1] = nil + *c = (*c)[:len(*c)-1] + return nil +} + +// Copy returns a deep copy of ChartsFunc. +func (c Charts) Copy() *Charts { + charts := Charts{} + for idx := range c { + charts = append(charts, c[idx].Copy()) + } + return &charts +} + +func (c Charts) index(chartID string) int { + for idx := range c { + if c[idx].ID == chartID { + return idx + } + } + return -1 +} + +// MarkNotCreated changes 'created' chart flag to false. +// Use it to add dimension in runtime. +func (c *Chart) MarkNotCreated() { + c.created = false +} + +// MarkRemove sets 'remove' flag and Obsolete option to true. +// Use it to remove chart in runtime. +func (c *Chart) MarkRemove() { + c.Obsolete = true + c.remove = true +} + +// MarkDimRemove sets 'remove' flag, Obsolete and optionally Hidden options to true. +// Use it to remove dimension in runtime. +func (c *Chart) MarkDimRemove(dimID string, hide bool) error { + if !c.HasDim(dimID) { + return fmt.Errorf("chart '%s' has no '%s' dimension", c.ID, dimID) + } + dim := c.GetDim(dimID) + dim.Obsolete = true + if hide { + dim.Hidden = true + } + dim.remove = true + return nil +} + +// AddDim adds new dimension to the chart dimensions. +func (c *Chart) AddDim(newDim *Dim) error { + err := checkDim(newDim) + if err != nil { + return fmt.Errorf("error on adding dim to chart '%s' : %s", c.ID, err) + } + if c.HasDim(newDim.ID) { + return fmt.Errorf("error on adding dim : '%s' is already in chart '%s' dims", newDim.ID, c.ID) + } + c.Dims = append(c.Dims, newDim) + + return nil +} + +// AddVar adds new variable to the chart variables. +func (c *Chart) AddVar(newVar *Var) error { + err := checkVar(newVar) + if err != nil { + return fmt.Errorf("error on adding var to chart '%s' : %s", c.ID, err) + } + if c.indexVar(newVar.ID) != -1 { + return fmt.Errorf("error on adding var : '%s' is already in chart '%s' vars", newVar.ID, c.ID) + } + c.Vars = append(c.Vars, newVar) + + return nil +} + +// GetDim returns dimension by ID. +func (c *Chart) GetDim(dimID string) *Dim { + idx := c.indexDim(dimID) + if idx == -1 { + return nil + } + return c.Dims[idx] +} + +// RemoveDim removes dimension by ID. +// Avoid to use it in runtime. +func (c *Chart) RemoveDim(dimID string) error { + idx := c.indexDim(dimID) + if idx == -1 { + return fmt.Errorf("error on removing dim : '%s' isn't in chart '%s'", dimID, c.ID) + } + c.Dims = append(c.Dims[:idx], c.Dims[idx+1:]...) + + return nil +} + +// HasDim returns true if the chart contains dimension with the given ID, false otherwise. +func (c Chart) HasDim(dimID string) bool { + return c.indexDim(dimID) != -1 +} + +// Copy returns a deep copy of the chart. +func (c Chart) Copy() *Chart { + chart := c + chart.Dims = Dims{} + chart.Vars = Vars{} + + for idx := range c.Dims { + chart.Dims = append(chart.Dims, c.Dims[idx].copy()) + } + for idx := range c.Vars { + chart.Vars = append(chart.Vars, c.Vars[idx].copy()) + } + + return &chart +} + +func (c Chart) indexDim(dimID string) int { + for idx := range c.Dims { + if c.Dims[idx].ID == dimID { + return idx + } + } + return -1 +} + +func (c Chart) indexVar(varID string) int { + for idx := range c.Vars { + if c.Vars[idx].ID == varID { + return idx + } + } + return -1 +} + +func (d Dim) copy() *Dim { + return &d +} + +func (v Var) copy() *Var { + return &v +} + +func checkCharts(charts ...*Chart) error { + for _, chart := range charts { + err := checkChart(chart) + if err != nil { + return fmt.Errorf("chart '%s' : %v", chart.ID, err) + } + } + return nil +} + +func checkChart(chart *Chart) error { + if chart.ID == "" { + return errors.New("empty ID") + } + + if chart.Title == "" { + return errors.New("empty Title") + } + + if chart.Units == "" { + return errors.New("empty Units") + } + + if id := checkID(chart.ID); id != -1 { + return fmt.Errorf("unacceptable symbol in ID : '%c'", id) + } + + set := make(map[string]bool) + + for _, d := range chart.Dims { + err := checkDim(d) + if err != nil { + return err + } + if set[d.ID] { + return fmt.Errorf("duplicate dim '%s'", d.ID) + } + set[d.ID] = true + } + + set = make(map[string]bool) + + for _, v := range chart.Vars { + if err := checkVar(v); err != nil { + return err + } + if set[v.ID] { + return fmt.Errorf("duplicate var '%s'", v.ID) + } + set[v.ID] = true + } + return nil +} + +func checkDim(d *Dim) error { + if d.ID == "" { + return errors.New("empty dim ID") + } + if id := checkID(d.ID); id != -1 { + return fmt.Errorf("unacceptable symbol in dim ID '%s' : '%c'", d.ID, id) + } + return nil +} + +func checkVar(v *Var) error { + if v.ID == "" { + return errors.New("empty var ID") + } + if id := checkID(v.ID); id != -1 { + return fmt.Errorf("unacceptable symbol in var ID '%s' : '%c'", v.ID, id) + } + return nil +} + +func checkID(id string) int { + for _, r := range id { + if unicode.IsSpace(r) { + return int(r) + } + } + return -1 +} diff --git a/src/go/collectors/go.d.plugin/agent/module/charts_test.go b/src/go/collectors/go.d.plugin/agent/module/charts_test.go new file mode 100644 index 00000000000000..7c35bb33e4c94e --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/charts_test.go @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createTestChart(id string) *Chart { + return &Chart{ + ID: id, + Title: "Title", + Units: "units", + Fam: "family", + Ctx: "context", + Type: Line, + Dims: Dims{ + {ID: "dim1", Algo: Absolute}, + }, + Vars: Vars{ + {ID: "var1", Value: 1}, + }, + } +} + +func TestDimAlgo_String(t *testing.T) { + cases := []struct { + expected string + actual fmt.Stringer + }{ + {"absolute", Absolute}, + {"incremental", Incremental}, + {"percentage-of-absolute-row", PercentOfAbsolute}, + {"percentage-of-incremental-row", PercentOfIncremental}, + {"absolute", DimAlgo("wrong")}, + } + + for _, v := range cases { + assert.Equal(t, v.expected, v.actual.String()) + } +} + +func TestChartType_String(t *testing.T) { + cases := []struct { + expected string + actual fmt.Stringer + }{ + {"line", Line}, + {"area", Area}, + {"stacked", Stacked}, + {"line", ChartType("wrong")}, + } + + for _, v := range cases { + assert.Equal(t, v.expected, v.actual.String()) + } +} + +func TestOpts_String(t *testing.T) { + cases := []struct { + expected string + actual fmt.Stringer + }{ + {"", Opts{}}, + { + "detail hidden obsolete store_first", + Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true}, + }, + { + "detail hidden obsolete store_first", + Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true}, + }, + } + + for _, v := range cases { + assert.Equal(t, v.expected, v.actual.String()) + } +} + +func TestDimOpts_String(t *testing.T) { + cases := []struct { + expected string + actual fmt.Stringer + }{ + {"", DimOpts{}}, + { + "hidden nooverflow noreset obsolete", + DimOpts{Hidden: true, NoOverflow: true, NoReset: true, Obsolete: true}, + }, + { + "hidden obsolete", + DimOpts{Hidden: true, NoOverflow: false, NoReset: false, Obsolete: true}, + }, + } + + for _, v := range cases { + assert.Equal(t, v.expected, v.actual.String()) + } +} + +func TestCharts_Copy(t *testing.T) { + orig := &Charts{ + createTestChart("1"), + createTestChart("2"), + } + copied := orig.Copy() + + require.False(t, orig == copied, "Charts copy points to the same address") + require.Len(t, *orig, len(*copied)) + + for idx := range *orig { + compareCharts(t, (*orig)[idx], (*copied)[idx]) + } +} + +func TestChart_Copy(t *testing.T) { + orig := createTestChart("1") + + compareCharts(t, orig, orig.Copy()) +} + +func TestCharts_Add(t *testing.T) { + charts := Charts{} + chart1 := createTestChart("1") + chart2 := createTestChart("2") + chart3 := createTestChart("") + + // OK case + assert.NoError(t, charts.Add( + chart1, + chart2, + )) + assert.Len(t, charts, 2) + + // NG case + assert.Error(t, charts.Add( + chart3, + chart1, + chart2, + )) + assert.Len(t, charts, 2) + + assert.True(t, charts[0] == chart1) + assert.True(t, charts[1] == chart2) +} + +func TestCharts_Add_SameID(t *testing.T) { + charts := Charts{} + chart1 := createTestChart("1") + chart2 := createTestChart("1") + + assert.NoError(t, charts.Add(chart1)) + assert.Error(t, charts.Add(chart2)) + assert.Len(t, charts, 1) + + charts = Charts{} + chart1 = createTestChart("1") + chart2 = createTestChart("1") + + assert.NoError(t, charts.Add(chart1)) + chart1.MarkRemove() + assert.NoError(t, charts.Add(chart2)) + assert.Len(t, charts, 2) +} + +func TestCharts_Get(t *testing.T) { + chart := createTestChart("1") + charts := Charts{ + chart, + } + + // OK case + assert.True(t, chart == charts.Get("1")) + // NG case + assert.Nil(t, charts.Get("2")) +} + +func TestCharts_Has(t *testing.T) { + chart := createTestChart("1") + charts := &Charts{ + chart, + } + + // OK case + assert.True(t, charts.Has("1")) + // NG case + assert.False(t, charts.Has("2")) +} + +func TestCharts_Remove(t *testing.T) { + chart := createTestChart("1") + charts := &Charts{ + chart, + } + + // OK case + assert.NoError(t, charts.Remove("1")) + assert.Len(t, *charts, 0) + + // NG case + assert.Error(t, charts.Remove("2")) +} + +func TestChart_AddDim(t *testing.T) { + chart := createTestChart("1") + dim := &Dim{ID: "dim2"} + + // OK case + assert.NoError(t, chart.AddDim(dim)) + assert.Len(t, chart.Dims, 2) + + // NG case + assert.Error(t, chart.AddDim(dim)) + assert.Len(t, chart.Dims, 2) +} + +func TestChart_AddVar(t *testing.T) { + chart := createTestChart("1") + variable := &Var{ID: "var2"} + + // OK case + assert.NoError(t, chart.AddVar(variable)) + assert.Len(t, chart.Vars, 2) + + // NG case + assert.Error(t, chart.AddVar(variable)) + assert.Len(t, chart.Vars, 2) +} + +func TestChart_GetDim(t *testing.T) { + chart := &Chart{ + Dims: Dims{ + {ID: "1"}, + {ID: "2"}, + }, + } + + // OK case + assert.True(t, chart.GetDim("1") != nil && chart.GetDim("1").ID == "1") + + // NG case + assert.Nil(t, chart.GetDim("3")) +} + +func TestChart_RemoveDim(t *testing.T) { + chart := createTestChart("1") + + // OK case + assert.NoError(t, chart.RemoveDim("dim1")) + assert.Len(t, chart.Dims, 0) + + // NG case + assert.Error(t, chart.RemoveDim("dim2")) +} + +func TestChart_HasDim(t *testing.T) { + chart := createTestChart("1") + + // OK case + assert.True(t, chart.HasDim("dim1")) + // NG case + assert.False(t, chart.HasDim("dim2")) +} + +func TestChart_MarkNotCreated(t *testing.T) { + chart := createTestChart("1") + + chart.MarkNotCreated() + assert.False(t, chart.created) +} + +func TestChart_MarkRemove(t *testing.T) { + chart := createTestChart("1") + + chart.MarkRemove() + assert.True(t, chart.remove) + assert.True(t, chart.Obsolete) +} + +func TestChart_MarkDimRemove(t *testing.T) { + chart := createTestChart("1") + + assert.Error(t, chart.MarkDimRemove("dim99", false)) + assert.NoError(t, chart.MarkDimRemove("dim1", true)) + assert.True(t, chart.GetDim("dim1").Obsolete) + assert.True(t, chart.GetDim("dim1").Hidden) + assert.True(t, chart.GetDim("dim1").remove) +} + +func TestChart_check(t *testing.T) { + // OK case + chart := createTestChart("1") + assert.NoError(t, checkChart(chart)) + + // NG case + chart = createTestChart("1") + chart.ID = "" + assert.Error(t, checkChart(chart)) + + chart = createTestChart("1") + chart.ID = "invalid id" + assert.Error(t, checkChart(chart)) + + chart = createTestChart("1") + chart.Title = "" + assert.Error(t, checkChart(chart)) + + chart = createTestChart("1") + chart.Units = "" + assert.Error(t, checkChart(chart)) + + chart = createTestChart("1") + chart.Dims = Dims{ + {ID: "1"}, + {ID: "1"}, + } + assert.Error(t, checkChart(chart)) + + chart = createTestChart("1") + chart.Vars = Vars{ + {ID: "1"}, + {ID: "1"}, + } + assert.Error(t, checkChart(chart)) +} + +func TestDim_check(t *testing.T) { + // OK case + dim := &Dim{ID: "id"} + assert.NoError(t, checkDim(dim)) + + // NG case + dim = &Dim{ID: "id"} + dim.ID = "" + assert.Error(t, checkDim(dim)) + + dim = &Dim{ID: "id"} + dim.ID = "invalid id" + assert.Error(t, checkDim(dim)) +} + +func TestVar_check(t *testing.T) { + // OK case + v := &Var{ID: "id"} + assert.NoError(t, checkVar(v)) + + // NG case + v = &Var{ID: "id"} + v.ID = "" + assert.Error(t, checkVar(v)) + + v = &Var{ID: "id"} + v.ID = "invalid id" + assert.Error(t, checkVar(v)) +} + +func compareCharts(t *testing.T, orig, copied *Chart) { + // 1. compare chart pointers + // 2. compare Dims, Vars length + // 3. compare Dims, Vars pointers + + assert.False(t, orig == copied, "Chart copy ChartsFunc points to the same address") + + require.Len(t, orig.Dims, len(copied.Dims)) + require.Len(t, orig.Vars, len(copied.Vars)) + + for idx := range (*orig).Dims { + assert.False(t, orig.Dims[idx] == copied.Dims[idx], "Chart copy dim points to the same address") + assert.Equal(t, orig.Dims[idx], copied.Dims[idx], "Chart copy dim isn't equal to orig") + } + + for idx := range (*orig).Vars { + assert.False(t, orig.Vars[idx] == copied.Vars[idx], "Chart copy var points to the same address") + assert.Equal(t, orig.Vars[idx], copied.Vars[idx], "Chart copy var isn't equal to orig") + } +} diff --git a/src/go/collectors/go.d.plugin/agent/module/job.go b/src/go/collectors/go.d.plugin/agent/module/job.go new file mode 100644 index 00000000000000..6200ff9f5a6a10 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/job.go @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "bytes" + "fmt" + "io" + "log/slog" + "os" + "regexp" + "runtime/debug" + "strings" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/vnodes" + "github.com/netdata/go.d.plugin/logger" +) + +var obsoleteLock = &sync.Mutex{} +var obsoleteCharts = true + +func DontObsoleteCharts() { + obsoleteLock.Lock() + obsoleteCharts = false + obsoleteLock.Unlock() +} + +func shouldObsoleteCharts() bool { + obsoleteLock.Lock() + defer obsoleteLock.Unlock() + return obsoleteCharts +} + +var reSpace = regexp.MustCompile(`\s+`) + +var ndInternalMonitoringDisabled = os.Getenv("NETDATA_INTERNALS_MONITORING") == "NO" + +func newRuntimeChart(pluginName string) *Chart { + // this is needed to keep the same name as we had before https://github.com/netdata/go.d.plugin/issues/650 + ctxName := pluginName + if ctxName == "go.d" { + ctxName = "go" + } + ctxName = reSpace.ReplaceAllString(ctxName, "_") + return &Chart{ + typ: "netdata", + Title: "Execution time", + Units: "ms", + Fam: pluginName, + Ctx: fmt.Sprintf("netdata.%s_plugin_execution_time", ctxName), + Priority: 145000, + Dims: Dims{ + {ID: "time"}, + }, + } +} + +type JobConfig struct { + PluginName string + Name string + ModuleName string + FullName string + Module Module + Labels map[string]string + Out io.Writer + UpdateEvery int + AutoDetectEvery int + Priority int + IsStock bool + + VnodeGUID string + VnodeHostname string + VnodeLabels map[string]string +} + +const ( + penaltyStep = 5 + maxPenalty = 600 + infTries = -1 +) + +func NewJob(cfg JobConfig) *Job { + var buf bytes.Buffer + + j := &Job{ + AutoDetectEvery: cfg.AutoDetectEvery, + AutoDetectTries: infTries, + + pluginName: cfg.PluginName, + name: cfg.Name, + moduleName: cfg.ModuleName, + fullName: cfg.FullName, + updateEvery: cfg.UpdateEvery, + priority: cfg.Priority, + isStock: cfg.IsStock, + module: cfg.Module, + labels: cfg.Labels, + out: cfg.Out, + runChart: newRuntimeChart(cfg.PluginName), + stop: make(chan struct{}), + tick: make(chan int), + buf: &buf, + api: netdataapi.New(&buf), + + vnodeGUID: cfg.VnodeGUID, + vnodeHostname: cfg.VnodeHostname, + vnodeLabels: cfg.VnodeLabels, + } + + log := logger.New().With( + slog.String("collector", j.ModuleName()), + slog.String("job", j.Name()), + ) + + j.Logger = log + if j.module != nil { + j.module.GetBase().Logger = log + } + + return j +} + +// Job represents a job. It's a module wrapper. +type Job struct { + pluginName string + name string + moduleName string + fullName string + + updateEvery int + AutoDetectEvery int + AutoDetectTries int + priority int + labels map[string]string + + *logger.Logger + + isStock bool + + module Module + + initialized bool + panicked bool + + runChart *Chart + charts *Charts + tick chan int + out io.Writer + buf *bytes.Buffer + api *netdataapi.API + + retries int + prevRun time.Time + + stop chan struct{} + + vnodeCreated bool + vnodeGUID string + vnodeHostname string + vnodeLabels map[string]string +} + +// NetdataChartIDMaxLength is the chart ID max length. See RRD_ID_LENGTH_MAX in the netdata source code. +const NetdataChartIDMaxLength = 1000 + +// FullName returns job full name. +func (j Job) FullName() string { + return j.fullName +} + +// ModuleName returns job module name. +func (j Job) ModuleName() string { + return j.moduleName +} + +// Name returns job name. +func (j Job) Name() string { + return j.name +} + +// Panicked returns 'panicked' flag value. +func (j Job) Panicked() bool { + return j.panicked +} + +// AutoDetectionEvery returns value of AutoDetectEvery. +func (j Job) AutoDetectionEvery() int { + return j.AutoDetectEvery +} + +// RetryAutoDetection returns whether it is needed to retry autodetection. +func (j Job) RetryAutoDetection() bool { + return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0) +} + +// AutoDetection invokes init, check and postCheck. It handles panic. +func (j *Job) AutoDetection() (ok bool) { + defer func() { + if r := recover(); r != nil { + ok = false + j.panicked = true + j.disableAutoDetection() + + j.Errorf("PANIC %v", r) + if logger.Level.Enabled(slog.LevelDebug) { + j.Errorf("STACK: %s", debug.Stack()) + } + } + if !ok { + j.module.Cleanup() + } + }() + + if j.isStock { + j.Mute() + } + + if ok = j.init(); !ok { + j.Error("init failed") + j.Unmute() + j.disableAutoDetection() + return + } + + if ok = j.check(); !ok { + j.Error("check failed") + j.Unmute() + return + } + + j.Unmute() + + j.Info("check success") + if ok = j.postCheck(); !ok { + j.Error("postCheck failed") + j.disableAutoDetection() + return + } + + return true +} + +// Tick Tick. +func (j *Job) Tick(clock int) { + select { + case j.tick <- clock: + default: + j.Debug("skip the tick due to previous run hasn't been finished") + } +} + +// Start starts job main loop. +func (j *Job) Start() { + j.Infof("started, data collection interval %ds", j.updateEvery) + defer func() { j.Info("stopped") }() + +LOOP: + for { + select { + case <-j.stop: + break LOOP + case t := <-j.tick: + if t%(j.updateEvery+j.penalty()) == 0 { + j.runOnce() + } + } + } + j.module.Cleanup() + j.Cleanup() + j.stop <- struct{}{} +} + +// Stop stops job main loop. It blocks until the job is stopped. +func (j *Job) Stop() { + // TODO: should have blocking and non blocking stop + j.stop <- struct{}{} + <-j.stop +} + +func (j *Job) disableAutoDetection() { + j.AutoDetectEvery = 0 +} + +func (j *Job) Cleanup() { + j.buf.Reset() + if !shouldObsoleteCharts() { + return + } + + if !vnodes.Disabled { + if !j.vnodeCreated && j.vnodeGUID != "" { + _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels) + j.vnodeCreated = true + } + _ = j.api.HOST(j.vnodeGUID) + } + + if j.runChart.created { + j.runChart.MarkRemove() + j.createChart(j.runChart) + } + if j.charts != nil { + for _, chart := range *j.charts { + if chart.created { + chart.MarkRemove() + j.createChart(chart) + } + } + } + + if j.buf.Len() > 0 { + _, _ = io.Copy(j.out, j.buf) + } +} + +func (j *Job) init() bool { + if j.initialized { + return true + } + + j.initialized = j.module.Init() + + return j.initialized +} + +func (j *Job) check() bool { + ok := j.module.Check() + if !ok && j.AutoDetectTries != infTries { + j.AutoDetectTries-- + } + return ok +} + +func (j *Job) postCheck() bool { + if j.charts = j.module.Charts(); j.charts == nil { + j.Error("nil charts") + return false + } + if err := checkCharts(*j.charts...); err != nil { + j.Errorf("charts check: %v", err) + return false + } + return true +} + +func (j *Job) runOnce() { + curTime := time.Now() + sinceLastRun := calcSinceLastRun(curTime, j.prevRun) + j.prevRun = curTime + + metrics := j.collect() + + if j.panicked { + return + } + + if j.processMetrics(metrics, curTime, sinceLastRun) { + j.retries = 0 + } else { + j.retries++ + } + + _, _ = io.Copy(j.out, j.buf) + j.buf.Reset() +} + +func (j *Job) collect() (result map[string]int64) { + j.panicked = false + defer func() { + if r := recover(); r != nil { + j.panicked = true + j.Errorf("PANIC: %v", r) + if logger.Level.Enabled(slog.LevelDebug) { + j.Errorf("STACK: %s", debug.Stack()) + } + } + }() + return j.module.Collect() +} + +func (j *Job) processMetrics(metrics map[string]int64, startTime time.Time, sinceLastRun int) bool { + if !vnodes.Disabled { + if !j.vnodeCreated && j.vnodeGUID != "" { + _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels) + j.vnodeCreated = true + } + + _ = j.api.HOST(j.vnodeGUID) + } + + if !ndInternalMonitoringDisabled && !j.runChart.created { + j.runChart.ID = fmt.Sprintf("execution_time_of_%s", j.FullName()) + j.createChart(j.runChart) + } + + elapsed := int64(durationTo(time.Since(startTime), time.Millisecond)) + + var i, updated int + for _, chart := range *j.charts { + if !chart.created { + typeID := fmt.Sprintf("%s.%s", j.FullName(), chart.ID) + if len(typeID) >= NetdataChartIDMaxLength { + j.Warningf("chart 'type.id' length (%d) >= max allowed (%d), the chart is ignored (%s)", + len(typeID), NetdataChartIDMaxLength, typeID) + chart.ignore = true + } + j.createChart(chart) + } + if chart.remove { + continue + } + (*j.charts)[i] = chart + i++ + if len(metrics) == 0 || chart.Obsolete { + continue + } + if j.updateChart(chart, metrics, sinceLastRun) { + updated++ + } + } + *j.charts = (*j.charts)[:i] + + if updated == 0 { + return false + } + if !ndInternalMonitoringDisabled { + j.updateChart(j.runChart, map[string]int64{"time": elapsed}, sinceLastRun) + } + + return true +} + +func (j *Job) createChart(chart *Chart) { + defer func() { chart.created = true }() + if chart.ignore { + return + } + + if chart.Priority == 0 { + chart.Priority = j.priority + j.priority++ + } + _ = j.api.CHART( + getChartType(chart, j), + getChartID(chart), + chart.OverID, + chart.Title, + chart.Units, + chart.Fam, + chart.Ctx, + chart.Type.String(), + chart.Priority, + j.updateEvery, + chart.Opts.String(), + j.pluginName, + j.moduleName, + ) + + if chart.Obsolete { + _ = j.api.EMPTYLINE() + return + } + + seen := make(map[string]bool) + for _, l := range chart.Labels { + if l.Key != "" { + seen[l.Key] = true + ls := l.Source + // the default should be auto + // https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L205 + if ls == 0 { + ls = LabelSourceAuto + } + _ = j.api.CLABEL(l.Key, l.Value, ls) + } + } + for k, v := range j.labels { + if !seen[k] { + _ = j.api.CLABEL(k, v, LabelSourceConf) + } + } + _ = j.api.CLABEL("_collect_job", j.Name(), LabelSourceAuto) + _ = j.api.CLABELCOMMIT() + + for _, dim := range chart.Dims { + _ = j.api.DIMENSION( + firstNotEmpty(dim.Name, dim.ID), + dim.Name, + dim.Algo.String(), + handleZero(dim.Mul), + handleZero(dim.Div), + dim.DimOpts.String(), + ) + } + for _, v := range chart.Vars { + if v.Name != "" { + _ = j.api.VARIABLE(v.Name, v.Value) + } else { + _ = j.api.VARIABLE(v.ID, v.Value) + } + } + _ = j.api.EMPTYLINE() +} + +func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun int) bool { + if chart.ignore { + dims := chart.Dims[:0] + for _, dim := range chart.Dims { + if !dim.remove { + dims = append(dims, dim) + } + } + chart.Dims = dims + return false + } + + if !chart.updated { + sinceLastRun = 0 + } + + _ = j.api.BEGIN( + getChartType(chart, j), + getChartID(chart), + sinceLastRun, + ) + var i, updated int + for _, dim := range chart.Dims { + if dim.remove { + continue + } + chart.Dims[i] = dim + i++ + if v, ok := collected[dim.ID]; !ok { + _ = j.api.SETEMPTY(firstNotEmpty(dim.Name, dim.ID)) + } else { + _ = j.api.SET(firstNotEmpty(dim.Name, dim.ID), v) + updated++ + } + } + chart.Dims = chart.Dims[:i] + + for _, vr := range chart.Vars { + if v, ok := collected[vr.ID]; ok { + if vr.Name != "" { + _ = j.api.VARIABLE(vr.Name, v) + } else { + _ = j.api.VARIABLE(vr.ID, v) + } + } + + } + _ = j.api.END() + + if chart.updated = updated > 0; chart.updated { + chart.Retries = 0 + } else { + chart.Retries++ + } + return chart.updated +} + +func (j Job) penalty() int { + v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2 + if v > maxPenalty { + return maxPenalty + } + return v +} + +func getChartType(chart *Chart, j *Job) string { + if chart.typ != "" { + return chart.typ + } + if !chart.IDSep { + chart.typ = j.FullName() + } else if i := strings.IndexByte(chart.ID, '.'); i != -1 { + chart.typ = j.FullName() + "_" + chart.ID[:i] + } else { + chart.typ = j.FullName() + } + if chart.OverModule != "" { + if v := strings.TrimPrefix(chart.typ, j.ModuleName()); v != chart.typ { + chart.typ = chart.OverModule + v + } + } + return chart.typ +} + +func getChartID(chart *Chart) string { + if chart.id != "" { + return chart.id + } + if !chart.IDSep { + return chart.ID + } + if i := strings.IndexByte(chart.ID, '.'); i != -1 { + chart.id = chart.ID[i+1:] + } else { + chart.id = chart.ID + } + return chart.id +} + +func calcSinceLastRun(curTime, prevRun time.Time) int { + if prevRun.IsZero() { + return 0 + } + return int((curTime.UnixNano() - prevRun.UnixNano()) / 1000) +} + +func durationTo(duration time.Duration, to time.Duration) int { + return int(int64(duration) / (int64(to) / int64(time.Nanosecond))) +} + +func firstNotEmpty(val1, val2 string) string { + if val1 != "" { + return val1 + } + return val2 +} + +func handleZero(v int) int { + if v == 0 { + return 1 + } + return v +} diff --git a/src/go/collectors/go.d.plugin/agent/module/job_test.go b/src/go/collectors/go.d.plugin/agent/module/job_test.go new file mode 100644 index 00000000000000..f19fdcebda3448 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/job_test.go @@ -0,0 +1,290 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "fmt" + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + pluginName = "plugin" + modName = "module" + jobName = "job" +) + +func newTestJob() *Job { + return NewJob( + JobConfig{ + PluginName: pluginName, + Name: jobName, + ModuleName: modName, + FullName: modName + "_" + jobName, + Module: nil, + Out: io.Discard, + UpdateEvery: 0, + AutoDetectEvery: 0, + Priority: 0, + }, + ) +} + +func TestNewJob(t *testing.T) { + assert.IsType(t, (*Job)(nil), newTestJob()) +} + +func TestJob_FullName(t *testing.T) { + job := newTestJob() + + assert.Equal(t, job.FullName(), fmt.Sprintf("%s_%s", modName, jobName)) +} + +func TestJob_ModuleName(t *testing.T) { + job := newTestJob() + + assert.Equal(t, job.ModuleName(), modName) +} + +func TestJob_Name(t *testing.T) { + job := newTestJob() + + assert.Equal(t, job.Name(), jobName) +} + +func TestJob_Panicked(t *testing.T) { + job := newTestJob() + + assert.Equal(t, job.Panicked(), job.panicked) + job.panicked = true + assert.Equal(t, job.Panicked(), job.panicked) +} + +func TestJob_AutoDetectionEvery(t *testing.T) { + job := newTestJob() + + assert.Equal(t, job.AutoDetectionEvery(), job.AutoDetectEvery) +} + +func TestJob_RetryAutoDetection(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return true + }, + CheckFunc: func() bool { return false }, + ChartsFunc: func() *Charts { + return &Charts{} + }, + } + job.module = m + job.AutoDetectEvery = 1 + + assert.True(t, job.RetryAutoDetection()) + assert.Equal(t, infTries, job.AutoDetectTries) + for i := 0; i < 1000; i++ { + job.check() + } + assert.True(t, job.RetryAutoDetection()) + assert.Equal(t, infTries, job.AutoDetectTries) + + job.AutoDetectTries = 10 + for i := 0; i < 10; i++ { + job.check() + } + assert.False(t, job.RetryAutoDetection()) + assert.Equal(t, 0, job.AutoDetectTries) +} + +func TestJob_AutoDetection(t *testing.T) { + job := newTestJob() + var v int + m := &MockModule{ + InitFunc: func() bool { + v++ + return true + }, + CheckFunc: func() bool { + v++ + return true + }, + ChartsFunc: func() *Charts { + v++ + return &Charts{} + }, + } + job.module = m + + assert.True(t, job.AutoDetection()) + assert.Equal(t, 3, v) +} + +func TestJob_AutoDetection_FailInit(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return false + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_AutoDetection_FailCheck(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return true + }, + CheckFunc: func() bool { + return false + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_AutoDetection_FailPostCheck(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return true + }, + CheckFunc: func() bool { + return true + }, + ChartsFunc: func() *Charts { + return nil + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_AutoDetection_PanicInit(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + panic("panic in Init") + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_AutoDetection_PanicCheck(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return true + }, + CheckFunc: func() bool { + panic("panic in Check") + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_AutoDetection_PanicPostCheck(t *testing.T) { + job := newTestJob() + m := &MockModule{ + InitFunc: func() bool { + return true + }, + CheckFunc: func() bool { + return true + }, + ChartsFunc: func() *Charts { + panic("panic in PostCheck") + }, + } + job.module = m + + assert.False(t, job.AutoDetection()) + assert.True(t, m.CleanupDone) +} + +func TestJob_Start(t *testing.T) { + m := &MockModule{ + ChartsFunc: func() *Charts { + return &Charts{ + &Chart{ + ID: "id", + Title: "title", + Units: "units", + Dims: Dims{ + {ID: "id1"}, + {ID: "id2"}, + }, + }, + } + }, + CollectFunc: func() map[string]int64 { + return map[string]int64{ + "id1": 1, + "id2": 2, + } + }, + } + job := newTestJob() + job.module = m + job.charts = job.module.Charts() + job.updateEvery = 1 + + go func() { + for i := 1; i < 3; i++ { + job.Tick(i) + time.Sleep(time.Second) + } + job.Stop() + }() + + job.Start() + + assert.True(t, m.CleanupDone) +} + +func TestJob_MainLoop_Panic(t *testing.T) { + m := &MockModule{ + CollectFunc: func() map[string]int64 { + panic("panic in Collect") + }, + } + job := newTestJob() + job.module = m + job.updateEvery = 1 + + go func() { + for i := 1; i < 3; i++ { + time.Sleep(time.Second) + job.Tick(i) + } + job.Stop() + }() + + job.Start() + + assert.True(t, job.Panicked()) + assert.True(t, m.CleanupDone) +} + +func TestJob_Tick(t *testing.T) { + job := newTestJob() + for i := 0; i < 3; i++ { + job.Tick(i) + } +} diff --git a/src/go/collectors/go.d.plugin/agent/module/mock.go b/src/go/collectors/go.d.plugin/agent/module/mock.go new file mode 100644 index 00000000000000..c4353eb524a039 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/mock.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +// MockModule MockModule. +type MockModule struct { + Base + + InitFunc func() bool + CheckFunc func() bool + ChartsFunc func() *Charts + CollectFunc func() map[string]int64 + CleanupFunc func() + CleanupDone bool +} + +// Init invokes InitFunc. +func (m MockModule) Init() bool { + if m.InitFunc == nil { + return true + } + return m.InitFunc() +} + +// Check invokes CheckFunc. +func (m MockModule) Check() bool { + if m.CheckFunc == nil { + return true + } + return m.CheckFunc() +} + +// Charts invokes ChartsFunc. +func (m MockModule) Charts() *Charts { + if m.ChartsFunc == nil { + return nil + } + return m.ChartsFunc() +} + +// Collect invokes CollectDunc. +func (m MockModule) Collect() map[string]int64 { + if m.CollectFunc == nil { + return nil + } + return m.CollectFunc() +} + +// Cleanup sets CleanupDone to true. +func (m *MockModule) Cleanup() { + if m.CleanupFunc != nil { + m.CleanupFunc() + } + m.CleanupDone = true +} diff --git a/src/go/collectors/go.d.plugin/agent/module/mock_test.go b/src/go/collectors/go.d.plugin/agent/module/mock_test.go new file mode 100644 index 00000000000000..9c194e89363421 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/mock_test.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMockModule_Init(t *testing.T) { + m := &MockModule{} + + assert.True(t, m.Init()) + m.InitFunc = func() bool { return false } + assert.False(t, m.Init()) +} + +func TestMockModule_Check(t *testing.T) { + m := &MockModule{} + + assert.True(t, m.Check()) + m.CheckFunc = func() bool { return false } + assert.False(t, m.Check()) +} + +func TestMockModule_Charts(t *testing.T) { + m := &MockModule{} + c := &Charts{} + + assert.Nil(t, m.Charts()) + m.ChartsFunc = func() *Charts { return c } + assert.True(t, c == m.Charts()) +} + +func TestMockModule_Collect(t *testing.T) { + m := &MockModule{} + d := map[string]int64{ + "1": 1, + } + + assert.Nil(t, m.Collect()) + m.CollectFunc = func() map[string]int64 { return d } + assert.Equal(t, d, m.Collect()) +} + +func TestMockModule_Cleanup(t *testing.T) { + m := &MockModule{} + require.False(t, m.CleanupDone) + + m.Cleanup() + assert.True(t, m.CleanupDone) +} diff --git a/src/go/collectors/go.d.plugin/agent/module/module.go b/src/go/collectors/go.d.plugin/agent/module/module.go new file mode 100644 index 00000000000000..3421a02ee7e133 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/module.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "github.com/netdata/go.d.plugin/logger" +) + +// Module is an interface that represents a module. +type Module interface { + // Init does initialization. + // If it returns false, the job will be disabled. + Init() bool + + // Check is called after Init. + // If it returns false, the job will be disabled. + Check() bool + + // Charts returns the chart definition. + // Make sure not to share returned instance. + Charts() *Charts + + // Collect collects metrics. + Collect() map[string]int64 + + // Cleanup Cleanup + Cleanup() + + GetBase() *Base +} + +// Base is a helper struct. All modules should embed this struct. +type Base struct { + *logger.Logger +} + +func (b *Base) GetBase() *Base { return b } diff --git a/src/go/collectors/go.d.plugin/agent/module/registry.go b/src/go/collectors/go.d.plugin/agent/module/registry.go new file mode 100644 index 00000000000000..4d0d2c493df0d0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/registry.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import "fmt" + +const ( + UpdateEvery = 1 + AutoDetectionRetry = 0 + Priority = 70000 +) + +// Defaults is a set of module default parameters. +type Defaults struct { + UpdateEvery int + AutoDetectionRetry int + Priority int + Disabled bool +} + +type ( + // Creator is a Job builder. + Creator struct { + Defaults + Create func() Module + JobConfigSchema string + } + // Registry is a collection of Creators. + Registry map[string]Creator +) + +// DefaultRegistry DefaultRegistry. +var DefaultRegistry = Registry{} + +// Register registers a module in the DefaultRegistry. +func Register(name string, creator Creator) { + DefaultRegistry.Register(name, creator) +} + +// Register registers a module. +func (r Registry) Register(name string, creator Creator) { + if _, ok := r[name]; ok { + panic(fmt.Sprintf("%s is already in registry", name)) + } + r[name] = creator +} diff --git a/src/go/collectors/go.d.plugin/agent/module/registry_test.go b/src/go/collectors/go.d.plugin/agent/module/registry_test.go new file mode 100644 index 00000000000000..c9f31105a5ae4e --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/module/registry_test.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package module + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegister(t *testing.T) { + modName := "modName" + registry := make(Registry) + + // OK case + assert.NotPanics( + t, + func() { + registry.Register(modName, Creator{}) + }) + + _, exist := registry[modName] + + require.True(t, exist) + + // Panic case + assert.Panics( + t, + func() { + registry.Register(modName, Creator{}) + }) + +} diff --git a/src/go/collectors/go.d.plugin/agent/netdataapi/api.go b/src/go/collectors/go.d.plugin/agent/netdataapi/api.go new file mode 100644 index 00000000000000..43c34d22dc0d5e --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/netdataapi/api.go @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package netdataapi + +import ( + "bytes" + "fmt" + "io" + "strconv" +) + +type ( + // API implements Netdata external plugins API. + // https://learn.netdata.cloud/docs/agent/collectors/plugins.d#the-output-of-the-plugin + API struct { + io.Writer + } +) + +const quotes = "' '" + +var ( + end = []byte("END\n\n") + clabelCommit = []byte("CLABEL_COMMIT\n") + newLine = []byte("\n") +) + +func New(w io.Writer) *API { return &API{w} } + +// CHART creates or update a chart. +func (a *API) CHART( + typeID string, + ID string, + name string, + title string, + units string, + family string, + context string, + chartType string, + priority int, + updateEvery int, + options string, + plugin string, + module string) error { + _, err := a.Write([]byte("CHART " + "'" + + typeID + "." + ID + quotes + + name + quotes + + title + quotes + + units + quotes + + family + quotes + + context + quotes + + chartType + quotes + + strconv.Itoa(priority) + quotes + + strconv.Itoa(updateEvery) + quotes + + options + quotes + + plugin + quotes + + module + "'\n")) + return err +} + +// DIMENSION adds or update a dimension to the chart just created. +func (a *API) DIMENSION( + ID string, + name string, + algorithm string, + multiplier int, + divisor int, + options string) error { + _, err := a.Write([]byte("DIMENSION '" + + ID + quotes + + name + quotes + + algorithm + quotes + + strconv.Itoa(multiplier) + quotes + + strconv.Itoa(divisor) + quotes + + options + "'\n")) + return err +} + +// CLABEL adds or update a label to the chart. +func (a *API) CLABEL(key, value string, source int) error { + _, err := a.Write([]byte("CLABEL '" + + key + quotes + + value + quotes + + strconv.Itoa(source) + "'\n")) + return err +} + +// CLABELCOMMIT adds labels to the chart. Should be called after one or more CLABEL. +func (a *API) CLABELCOMMIT() error { + _, err := a.Write(clabelCommit) + return err +} + +// BEGIN initializes data collection for a chart. +func (a *API) BEGIN(typeID string, ID string, msSince int) (err error) { + if msSince > 0 { + _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "' " + strconv.Itoa(msSince) + "\n")) + } else { + _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "'\n")) + } + return err +} + +// SET sets the value of a dimension for the initialized chart. +func (a *API) SET(ID string, value int64) error { + _, err := a.Write([]byte("SET '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) + return err +} + +// SETEMPTY sets the empty value of a dimension for the initialized chart. +func (a *API) SETEMPTY(ID string) error { + _, err := a.Write([]byte("SET '" + ID + "' = \n")) + return err +} + +// VARIABLE sets the value of a CHART scope variable for the initialized chart. +func (a *API) VARIABLE(ID string, value int64) error { + _, err := a.Write([]byte("VARIABLE CHART '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n")) + return err +} + +// END completes data collection for the initialized chart. +func (a *API) END() error { + _, err := a.Write(end) + return err +} + +// DISABLE disables this plugin. This will prevent Netdata from restarting the plugin. +func (a *API) DISABLE() error { + _, err := a.Write([]byte("DISABLE\n")) + return err +} + +// EMPTYLINE writes an empty line. +func (a *API) EMPTYLINE() error { + _, err := a.Write(newLine) + return err +} + +func (a *API) HOSTINFO(guid, hostname string, labels map[string]string) error { + if err := a.HOSTDEFINE(guid, hostname); err != nil { + return err + } + for k, v := range labels { + if err := a.HOSTLABEL(k, v); err != nil { + return err + } + } + return a.HOSTDEFINEEND() +} + +func (a *API) HOSTDEFINE(guid, hostname string) error { + _, err := fmt.Fprintf(a, "HOST_DEFINE '%s' '%s'\n", guid, hostname) + return err +} + +func (a *API) HOSTLABEL(name, value string) error { + _, err := fmt.Fprintf(a, "HOST_LABEL '%s' '%s'\n", name, value) + return err +} + +func (a *API) HOSTDEFINEEND() error { + _, err := fmt.Fprintf(a, "HOST_DEFINE_END\n\n") + return err +} + +func (a *API) HOST(guid string) error { + _, err := a.Write([]byte("HOST " + "'" + guid + "'" + "\n\n")) + return err +} + +func (a *API) DynCfgEnable(pluginName string) error { + _, err := a.Write([]byte("DYNCFG_ENABLE '" + pluginName + "'\n\n")) + return err +} + +func (a *API) DynCfgReset() error { + _, err := a.Write([]byte("DYNCFG_RESET\n")) + return err +} + +func (a *API) DyncCfgRegisterModule(moduleName string) error { + _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_MODULE '%s' job_array\n\n", moduleName) + return err +} + +func (a *API) DynCfgRegisterJob(moduleName, jobName, jobType string) error { + _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_JOB '%s' '%s' '%s' 0\n\n", moduleName, jobName, jobType) + return err +} + +func (a *API) DynCfgReportJobStatus(moduleName, jobName, status, reason string) error { + _, err := fmt.Fprintf(a, "REPORT_JOB_STATUS '%s' '%s' '%s' 0 '%s'\n\n", moduleName, jobName, status, reason) + return err +} + +func (a *API) FunctionResultSuccess(uid, contentType, payload string) error { + return a.functionResult(uid, contentType, payload, "1") +} + +func (a *API) FunctionResultReject(uid, contentType, payload string) error { + return a.functionResult(uid, contentType, payload, "0") +} + +func (a *API) functionResult(uid, contentType, payload, code string) error { + var buf bytes.Buffer + + buf.WriteString("FUNCTION_RESULT_BEGIN " + uid + " " + code + " " + contentType + " 0\n") + if payload != "" { + buf.WriteString(payload + "\n") + } + buf.WriteString("FUNCTION_RESULT_END\n\n") + + _, err := buf.WriteTo(a) + return err +} diff --git a/src/go/collectors/go.d.plugin/agent/netdataapi/api_test.go b/src/go/collectors/go.d.plugin/agent/netdataapi/api_test.go new file mode 100644 index 00000000000000..30f01946016956 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/netdataapi/api_test.go @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package netdataapi + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAPI_CHART(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CHART( + "", + "id", + "name", + "title", + "units", + "family", + "context", + "line", + 1, + 1, + "", + "plugin", + "module", + ) + + assert.Equal( + t, + "CHART '.id' 'name' 'title' 'units' 'family' 'context' 'line' '1' '1' '' 'plugin' 'module'\n", + buf.String(), + ) +} + +func TestAPI_DIMENSION(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DIMENSION( + "id", + "name", + "absolute", + 1, + 1, + "", + ) + + assert.Equal( + t, + "DIMENSION 'id' 'name' 'absolute' '1' '1' ''\n", + buf.String(), + ) +} + +func TestAPI_BEGIN(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.BEGIN( + "typeID", + "id", + 0, + ) + + assert.Equal( + t, + "BEGIN 'typeID.id'\n", + buf.String(), + ) + + buf.Reset() + + _ = a.BEGIN( + "typeID", + "id", + 1, + ) + + assert.Equal( + t, + "BEGIN 'typeID.id' 1\n", + buf.String(), + ) +} + +func TestAPI_SET(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.SET("id", 100) + + assert.Equal( + t, + "SET 'id' = 100\n", + buf.String(), + ) +} + +func TestAPI_SETEMPTY(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.SETEMPTY("id") + + assert.Equal( + t, + "SET 'id' = \n", + buf.String(), + ) +} + +func TestAPI_VARIABLE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.VARIABLE("id", 100) + + assert.Equal( + t, + "VARIABLE CHART 'id' = 100\n", + buf.String(), + ) +} + +func TestAPI_END(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.END() + + assert.Equal( + t, + "END\n\n", + buf.String(), + ) +} + +func TestAPI_CLABEL(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CLABEL("key", "value", 1) + + assert.Equal( + t, + "CLABEL 'key' 'value' '1'\n", + buf.String(), + ) +} + +func TestAPI_CLABELCOMMIT(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.CLABELCOMMIT() + + assert.Equal( + t, + "CLABEL_COMMIT\n", + buf.String(), + ) +} + +func TestAPI_DISABLE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DISABLE() + + assert.Equal( + t, + "DISABLE\n", + buf.String(), + ) +} + +func TestAPI_EMPTYLINE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.EMPTYLINE() + + assert.Equal( + t, + "\n", + buf.String(), + ) +} + +func TestAPI_HOST(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOST("guid") + + assert.Equal( + t, + "HOST 'guid'\n\n", + buf.String(), + ) +} + +func TestAPI_HOSTDEFINE(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTDEFINE("guid", "hostname") + + assert.Equal( + t, + "HOST_DEFINE 'guid' 'hostname'\n", + buf.String(), + ) +} + +func TestAPI_HOSTLABEL(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTLABEL("name", "value") + + assert.Equal( + t, + "HOST_LABEL 'name' 'value'\n", + buf.String(), + ) +} + +func TestAPI_HOSTDEFINEEND(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTDEFINEEND() + + assert.Equal( + t, + "HOST_DEFINE_END\n\n", + buf.String(), + ) +} + +func TestAPI_HOSTINFO(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.HOSTINFO("guid", "hostname", map[string]string{"label1": "value1"}) + + assert.Equal( + t, + `HOST_DEFINE 'guid' 'hostname' +HOST_LABEL 'label1' 'value1' +HOST_DEFINE_END + +`, + buf.String(), + ) +} + +func TestAPI_DynCfgEnable(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DynCfgEnable("plugin") + + assert.Equal( + t, + "DYNCFG_ENABLE 'plugin'\n\n", + buf.String(), + ) +} + +func TestAPI_DynCfgReset(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DynCfgReset() + + assert.Equal( + t, + "DYNCFG_RESET\n", + buf.String(), + ) +} + +func TestAPI_DyncCfgRegisterModule(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DyncCfgRegisterModule("module") + + assert.Equal( + t, + "DYNCFG_REGISTER_MODULE 'module' job_array\n\n", + buf.String(), + ) +} + +func TestAPI_DynCfgRegisterJob(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DynCfgRegisterJob("module", "job", "type") + + assert.Equal( + t, + "DYNCFG_REGISTER_JOB 'module' 'job' 'type' 0\n\n", + buf.String(), + ) +} + +func TestAPI_DynCfgReportJobStatus(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.DynCfgReportJobStatus("module", "job", "status", "reason") + + assert.Equal( + t, + "REPORT_JOB_STATUS 'module' 'job' 'status' 0 'reason'\n\n", + buf.String(), + ) +} + +func TestAPI_FunctionResultSuccess(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.FunctionResultSuccess("uid", "contentType", "payload") + + assert.Equal( + t, + `FUNCTION_RESULT_BEGIN uid 1 contentType 0 +payload +FUNCTION_RESULT_END + +`, + buf.String(), + ) +} + +func TestAPI_FunctionResultReject(t *testing.T) { + buf := &bytes.Buffer{} + a := API{Writer: buf} + + _ = a.FunctionResultReject("uid", "contentType", "payload") + + assert.Equal( + t, + `FUNCTION_RESULT_BEGIN uid 0 contentType 0 +payload +FUNCTION_RESULT_END + +`, + buf.String(), + ) +} diff --git a/src/go/collectors/go.d.plugin/agent/safewriter/writer.go b/src/go/collectors/go.d.plugin/agent/safewriter/writer.go new file mode 100644 index 00000000000000..533c1055d9319c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/safewriter/writer.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package safewriter + +import ( + "io" + "os" + "sync" +) + +var Stdout = New(os.Stdout) + +func New(w io.Writer) io.Writer { + return &writer{ + mx: &sync.Mutex{}, + w: w, + } +} + +type writer struct { + mx *sync.Mutex + w io.Writer +} + +func (w *writer) Write(p []byte) (n int, err error) { + w.mx.Lock() + n, err = w.w.Write(p) + w.mx.Unlock() + return n, err +} diff --git a/src/go/collectors/go.d.plugin/agent/setup.go b/src/go/collectors/go.d.plugin/agent/setup.go new file mode 100644 index 00000000000000..202eedbb27e841 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/setup.go @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package agent + +import ( + "io" + "os" + "strings" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/discovery" + "github.com/netdata/go.d.plugin/agent/discovery/dummy" + "github.com/netdata/go.d.plugin/agent/discovery/file" + "github.com/netdata/go.d.plugin/agent/hostinfo" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/vnodes" + + "gopkg.in/yaml.v2" +) + +func (a *Agent) loadPluginConfig() config { + a.Info("loading config file") + + if len(a.ConfDir) == 0 { + a.Info("config dir not provided, will use defaults") + return defaultConfig() + } + + cfgPath := a.Name + ".conf" + a.Debugf("looking for '%s' in %v", cfgPath, a.ConfDir) + + path, err := a.ConfDir.Find(cfgPath) + if err != nil || path == "" { + a.Warning("couldn't find config, will use defaults") + return defaultConfig() + } + a.Infof("found '%s", path) + + cfg := defaultConfig() + if err := loadYAML(&cfg, path); err != nil { + a.Warningf("couldn't load config '%s': %v, will use defaults", path, err) + return defaultConfig() + } + a.Info("config successfully loaded") + return cfg +} + +func (a *Agent) loadEnabledModules(cfg config) module.Registry { + a.Info("loading modules") + + all := a.RunModule == "all" || a.RunModule == "" + enabled := module.Registry{} + + for name, creator := range a.ModuleRegistry { + if !all && a.RunModule != name { + continue + } + if all { + // Known issue: go.d/logind high CPU usage on Alma Linux8 (https://github.com/netdata/netdata/issues/15930) + if !cfg.isExplicitlyEnabled(name) && (creator.Disabled || name == "logind" && hostinfo.SystemdVersion == 239) { + a.Infof("'%s' module disabled by default, should be explicitly enabled in the config", name) + continue + } + if !cfg.isImplicitlyEnabled(name) { + a.Infof("'%s' module disabled in the config file", name) + continue + } + } + enabled[name] = creator + } + + a.Infof("enabled/registered modules: %d/%d", len(enabled), len(a.ModuleRegistry)) + + return enabled +} + +func (a *Agent) buildDiscoveryConf(enabled module.Registry) discovery.Config { + a.Info("building discovery config") + + reg := confgroup.Registry{} + for name, creator := range enabled { + reg.Register(name, confgroup.Default{ + MinUpdateEvery: a.MinUpdateEvery, + UpdateEvery: creator.UpdateEvery, + AutoDetectionRetry: creator.AutoDetectionRetry, + Priority: creator.Priority, + }) + } + + var readPaths, dummyPaths []string + + if len(a.ModulesConfDir) == 0 { + if isInsideK8sCluster() { + return discovery.Config{Registry: reg} + } + a.Info("modules conf dir not provided, will use default config for all enabled modules") + for name := range enabled { + dummyPaths = append(dummyPaths, name) + } + return discovery.Config{ + Registry: reg, + Dummy: dummy.Config{Names: dummyPaths}, + } + } + + for name := range enabled { + // TODO: properly handle module renaming + // We need to announce this change in Netdata v1.39.0 release notes and then remove this workaround. + // This is just a quick fix for wmi=>windows. We need to prefer user wmi.conf over windows.conf + // 2nd part of this fix is in /agent/job/discovery/file/parse.go parseStaticFormat() + if name == "windows" { + cfgName := "wmi.conf" + a.Debugf("looking for '%s' in %v", cfgName, a.ModulesConfDir) + + path, err := a.ModulesConfDir.Find(cfgName) + + if err == nil && strings.Contains(path, "etc/netdata") { + a.Infof("found '%s", path) + readPaths = append(readPaths, path) + continue + } + } + + cfgName := name + ".conf" + a.Debugf("looking for '%s' in %v", cfgName, a.ModulesConfDir) + + path, err := a.ModulesConfDir.Find(cfgName) + if isInsideK8sCluster() { + if err != nil { + a.Infof("not found '%s', won't use default (reading stock configs is disabled in k8s)", cfgName) + continue + } else if isStockConfig(path) { + a.Infof("found '%s', but won't load it (reading stock configs is disabled in k8s)", cfgName) + continue + } + } + if err != nil { + a.Infof("couldn't find '%s' module config, will use default config", name) + dummyPaths = append(dummyPaths, name) + } else { + a.Debugf("found '%s", path) + readPaths = append(readPaths, path) + } + } + + a.Infof("dummy/read/watch paths: %d/%d/%d", len(dummyPaths), len(readPaths), len(a.ModulesSDConfPath)) + return discovery.Config{ + Registry: reg, + File: file.Config{ + Read: readPaths, + Watch: a.ModulesSDConfPath, + }, + Dummy: dummy.Config{ + Names: dummyPaths, + }, + } +} + +func (a *Agent) setupVnodeRegistry() *vnodes.Vnodes { + a.Debugf("looking for 'vnodes/' in %v", a.VnodesConfDir) + + if len(a.VnodesConfDir) == 0 { + return nil + } + + dirPath, err := a.VnodesConfDir.Find("vnodes/") + if err != nil || dirPath == "" { + return nil + } + + reg := vnodes.New(dirPath) + a.Infof("found '%s' (%d vhosts)", dirPath, reg.Len()) + + return reg +} + +func loadYAML(conf interface{}, path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + if err = yaml.NewDecoder(f).Decode(conf); err != nil { + if err == io.EOF { + return nil + } + return err + } + return nil +} + +var ( + envKubeHost = os.Getenv("KUBERNETES_SERVICE_HOST") + envKubePort = os.Getenv("KUBERNETES_SERVICE_PORT") + envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") +) + +func isInsideK8sCluster() bool { return envKubeHost != "" && envKubePort != "" } + +func isStockConfig(path string) bool { + if envNDStockConfigDir == "" { + return false + } + return strings.HasPrefix(path, envNDStockConfigDir) +} diff --git a/src/go/collectors/go.d.plugin/agent/setup_test.go b/src/go/collectors/go.d.plugin/agent/setup_test.go new file mode 100644 index 00000000000000..a98c40ba48003c --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/setup_test.go @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package agent + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestConfig_UnmarshalYAML(t *testing.T) { + tests := map[string]struct { + input string + wantCfg config + }{ + "valid configuration": { + input: "enabled: yes\ndefault_run: yes\nmodules:\n module1: yes\n module2: yes", + wantCfg: config{ + Enabled: true, + DefaultRun: true, + Modules: map[string]bool{ + "module1": true, + "module2": true, + }, + }, + }, + "valid configuration with broken modules section": { + input: "enabled: yes\ndefault_run: yes\nmodules:\nmodule1: yes\nmodule2: yes", + wantCfg: config{ + Enabled: true, + DefaultRun: true, + Modules: map[string]bool{ + "module1": true, + "module2": true, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var cfg config + err := yaml.Unmarshal([]byte(test.input), &cfg) + require.NoError(t, err) + assert.Equal(t, test.wantCfg, cfg) + }) + } +} + +func TestAgent_loadConfig(t *testing.T) { + tests := map[string]struct { + agent Agent + wantCfg config + }{ + "valid config file": { + agent: Agent{ + Name: "agent-valid", + ConfDir: []string{"testdata"}, + }, + wantCfg: config{ + Enabled: true, + DefaultRun: true, + MaxProcs: 1, + Modules: map[string]bool{ + "module1": true, + "module2": true, + }, + }, + }, + "no config path provided": { + agent: Agent{}, + wantCfg: defaultConfig(), + }, + "config file not found": { + agent: Agent{ + Name: "agent", + ConfDir: []string{"testdata/not-exist"}, + }, + wantCfg: defaultConfig(), + }, + "empty config file": { + agent: Agent{ + Name: "agent-empty", + ConfDir: []string{"testdata"}, + }, + wantCfg: defaultConfig(), + }, + "invalid syntax config file": { + agent: Agent{ + Name: "agent-invalid-syntax", + ConfDir: []string{"testdata"}, + }, + wantCfg: defaultConfig(), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.wantCfg, test.agent.loadPluginConfig()) + }) + } +} + +func TestAgent_loadEnabledModules(t *testing.T) { + tests := map[string]struct { + agent Agent + cfg config + wantModules module.Registry + }{ + "load all, module disabled by default but explicitly enabled": { + agent: Agent{ + ModuleRegistry: module.Registry{ + "module1": module.Creator{Defaults: module.Defaults{Disabled: true}}, + }, + }, + cfg: config{ + Modules: map[string]bool{"module1": true}, + }, + wantModules: module.Registry{ + "module1": module.Creator{Defaults: module.Defaults{Disabled: true}}, + }, + }, + "load all, module disabled by default and not explicitly enabled": { + agent: Agent{ + ModuleRegistry: module.Registry{ + "module1": module.Creator{Defaults: module.Defaults{Disabled: true}}, + }, + }, + wantModules: module.Registry{}, + }, + "load all, module in config modules (default_run=true)": { + agent: Agent{ + ModuleRegistry: module.Registry{ + "module1": module.Creator{}, + }, + }, + cfg: config{ + Modules: map[string]bool{"module1": true}, + DefaultRun: true, + }, + wantModules: module.Registry{ + "module1": module.Creator{}, + }, + }, + "load all, module not in config modules (default_run=true)": { + agent: Agent{ + ModuleRegistry: module.Registry{"module1": module.Creator{}}, + }, + cfg: config{ + DefaultRun: true, + }, + wantModules: module.Registry{"module1": module.Creator{}}, + }, + "load all, module in config modules (default_run=false)": { + agent: Agent{ + ModuleRegistry: module.Registry{ + "module1": module.Creator{}, + }, + }, + cfg: config{ + Modules: map[string]bool{"module1": true}, + }, + wantModules: module.Registry{ + "module1": module.Creator{}, + }, + }, + "load all, module not in config modules (default_run=false)": { + agent: Agent{ + ModuleRegistry: module.Registry{ + "module1": module.Creator{}, + }, + }, + wantModules: module.Registry{}, + }, + "load specific, module exist in registry": { + agent: Agent{ + RunModule: "module1", + ModuleRegistry: module.Registry{ + "module1": module.Creator{}, + }, + }, + wantModules: module.Registry{ + "module1": module.Creator{}, + }, + }, + "load specific, module doesnt exist in registry": { + agent: Agent{ + RunModule: "module3", + ModuleRegistry: module.Registry{}, + }, + wantModules: module.Registry{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.wantModules, test.agent.loadEnabledModules(test.cfg)) + }) + } +} + +// TODO: tech debt +func TestAgent_buildDiscoveryConf(t *testing.T) { + +} diff --git a/web/api/ilove/README.md b/src/go/collectors/go.d.plugin/agent/testdata/agent-empty.conf similarity index 100% rename from web/api/ilove/README.md rename to src/go/collectors/go.d.plugin/agent/testdata/agent-empty.conf diff --git a/src/go/collectors/go.d.plugin/agent/testdata/agent-invalid-syntax.conf b/src/go/collectors/go.d.plugin/agent/testdata/agent-invalid-syntax.conf new file mode 100644 index 00000000000000..c4a0b914c2e1ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/testdata/agent-invalid-syntax.conf @@ -0,0 +1,7 @@ +- enabled: yes +default_run: yes +max_procs: 1 + +modules: + module1: yes + module2: yes diff --git a/src/go/collectors/go.d.plugin/agent/testdata/agent-valid.conf b/src/go/collectors/go.d.plugin/agent/testdata/agent-valid.conf new file mode 100644 index 00000000000000..ec5e1d06e73369 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/testdata/agent-valid.conf @@ -0,0 +1,7 @@ +enabled: yes +default_run: yes +max_procs: 1 + +modules: + module1: yes + module2: yes diff --git a/src/go/collectors/go.d.plugin/agent/ticker/ticker.go b/src/go/collectors/go.d.plugin/agent/ticker/ticker.go new file mode 100644 index 00000000000000..e4228fe4c4cbc9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/ticker/ticker.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ticker + +import "time" + +type ( + // Ticker holds a channel that delivers ticks of a clock at intervals. + // The ticks are aligned to interval boundaries. + Ticker struct { + C <-chan int + done chan struct{} + loops int + interval time.Duration + } +) + +// New returns a new Ticker containing a channel that will send the time with a period specified by the duration argument. +// It adjusts the intervals or drops ticks to make up for slow receivers. +// The duration must be greater than zero; if not, New will panic. Stop the Ticker to release associated resources. +func New(interval time.Duration) *Ticker { + ticker := &Ticker{ + interval: interval, + done: make(chan struct{}, 1), + } + ticker.start() + return ticker +} + +func (t *Ticker) start() { + ch := make(chan int) + t.C = ch + go func() { + LOOP: + for { + now := time.Now() + nextRun := now.Truncate(t.interval).Add(t.interval) + + time.Sleep(nextRun.Sub(now)) + select { + case <-t.done: + close(ch) + break LOOP + case ch <- t.loops: + t.loops++ + } + } + }() +} + +// Stop turns off a Ticker. After Stop, no more ticks will be sent. +// Stop does not close the channel, to prevent a read from the channel succeeding incorrectly. +func (t *Ticker) Stop() { + t.done <- struct{}{} +} diff --git a/src/go/collectors/go.d.plugin/agent/ticker/ticket_test.go b/src/go/collectors/go.d.plugin/agent/ticker/ticket_test.go new file mode 100644 index 00000000000000..19308536592109 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/ticker/ticket_test.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ticker + +import ( + "testing" + "time" +) + +// TODO: often fails Circle CI (~200-240) +var allowedDelta = 500 * time.Millisecond + +func TestTickerParallel(t *testing.T) { + for i := 0; i < 100; i++ { + i := i + go func() { + time.Sleep(time.Second / 100 * time.Duration(i)) + TestTicker(t) + }() + } + time.Sleep(4 * time.Second) +} + +func TestTicker(t *testing.T) { + tk := New(time.Second) + defer tk.Stop() + prev := time.Now() + for i := 0; i < 3; i++ { + <-tk.C + now := time.Now() + diff := abs(now.Round(time.Second).Sub(now)) + if diff >= allowedDelta { + t.Errorf("Ticker is not aligned: expect delta < %v but was: %v (%s)", allowedDelta, diff, now.Format(time.RFC3339Nano)) + } + if i > 0 { + dt := now.Sub(prev) + if abs(dt-time.Second) >= allowedDelta { + t.Errorf("Ticker interval: expect delta < %v ns but was: %v", allowedDelta, abs(dt-time.Second)) + } + } + prev = now + } +} + +func abs(a time.Duration) time.Duration { + if a < 0 { + return -a + } + return a +} diff --git a/src/go/collectors/go.d.plugin/agent/vnodes/testdata/config.yaml b/src/go/collectors/go.d.plugin/agent/vnodes/testdata/config.yaml new file mode 100644 index 00000000000000..db256d32f08e94 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/vnodes/testdata/config.yaml @@ -0,0 +1,11 @@ +- hostname: first + guid: 4ea21e84-93b4-418b-b83e-79397610cd6e + labels: + area: "41" + level: "82" + +- hostname: second + guid: 9486b0e1-b391-4d9a-bd88-5c703183f9b6 + labels: + area: "51" + level: "92" diff --git a/src/go/collectors/go.d.plugin/agent/vnodes/vnodes.go b/src/go/collectors/go.d.plugin/agent/vnodes/vnodes.go new file mode 100644 index 00000000000000..fc2a87fd3f329a --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/vnodes/vnodes.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vnodes + +import ( + "io" + "io/fs" + "log/slog" + "os" + "path/filepath" + "sync" + + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +var Disabled = false // TODO: remove after Netdata v1.39.0. Fix for "from source" stable-channel installations. + +func New(confDir string) *Vnodes { + vn := &Vnodes{ + Logger: logger.New().With( + slog.String("component", "vnodes"), + ), + + confDir: confDir, + vnodes: make(map[string]*VirtualNode), + } + + vn.readConfDir() + + return vn +} + +type ( + Vnodes struct { + *logger.Logger + + confDir string + mux *sync.Mutex + vnodes map[string]*VirtualNode + } + VirtualNode struct { + GUID string `yaml:"guid"` + Hostname string `yaml:"hostname"` + Labels map[string]string `yaml:"labels"` + } +) + +func (vn *Vnodes) Lookup(key string) (*VirtualNode, bool) { + v, ok := vn.vnodes[key] + return v, ok +} + +func (vn *Vnodes) Len() int { + return len(vn.vnodes) +} + +func (vn *Vnodes) readConfDir() { + _ = filepath.WalkDir(vn.confDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + vn.Warning(err) + return nil + } + + if !d.Type().IsRegular() || !isConfigFile(path) { + return nil + } + + var cfg []VirtualNode + if err := loadConfigFile(&cfg, path); err != nil { + vn.Warning(err) + return nil + } + + for _, v := range cfg { + if v.Hostname == "" || v.GUID == "" { + vn.Warningf("skipping virtual node '%+v': some required fields are missing (%s)", v, path) + continue + } + if _, ok := vn.vnodes[v.Hostname]; ok { + vn.Warningf("skipping virtual node '%+v': duplicate node (%s)", v, path) + continue + } + + v := v + vn.Debugf("adding virtual node'%+v' (%s)", v, path) + vn.vnodes[v.Hostname] = &v + } + + return nil + }) +} + +func isConfigFile(path string) bool { + switch filepath.Ext(path) { + case ".yaml", ".yml", ".conf": + return true + default: + return false + } +} + +func loadConfigFile(conf interface{}, path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + if err := yaml.NewDecoder(f).Decode(conf); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/agent/vnodes/vnodes_test.go b/src/go/collectors/go.d.plugin/agent/vnodes/vnodes_test.go new file mode 100644 index 00000000000000..fc2c2ef3589977 --- /dev/null +++ b/src/go/collectors/go.d.plugin/agent/vnodes/vnodes_test.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vnodes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.NotNil(t, New("testdata")) + assert.NotNil(t, New("not_exist")) +} + +func TestVnodes_Lookup(t *testing.T) { + req := New("testdata") + + _, ok := req.Lookup("first") + assert.True(t, ok) + + _, ok = req.Lookup("second") + assert.True(t, ok) + + _, ok = req.Lookup("third") + assert.False(t, ok) +} diff --git a/src/go/collectors/go.d.plugin/cli/cli.go b/src/go/collectors/go.d.plugin/cli/cli.go new file mode 100644 index 00000000000000..646bdf121e7db5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/cli/cli.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cli + +import ( + "strconv" + + "github.com/jessevdk/go-flags" +) + +// Option defines command line options. +type Option struct { + UpdateEvery int + Module string `short:"m" long:"modules" description:"module name to run" default:"all"` + ConfDir []string `short:"c" long:"config-dir" description:"config dir to read"` + WatchPath []string `short:"w" long:"watch-path" description:"config path to watch"` + Debug bool `short:"d" long:"debug" description:"debug mode"` + Version bool `short:"v" long:"version" description:"display the version and exit"` +} + +// Parse returns parsed command-line flags in Option struct +func Parse(args []string) (*Option, error) { + opt := &Option{ + UpdateEvery: 1, + } + parser := flags.NewParser(opt, flags.Default) + parser.Name = "orchestrator" + parser.Usage = "[OPTIONS] [update every]" + + rest, err := parser.ParseArgs(args) + if err != nil { + return nil, err + } + + if len(rest) > 1 { + if opt.UpdateEvery, err = strconv.Atoi(rest[1]); err != nil { + return nil, err + } + } + + return opt, nil +} diff --git a/src/go/collectors/go.d.plugin/cmd/godplugin/main.go b/src/go/collectors/go.d.plugin/cmd/godplugin/main.go new file mode 100644 index 00000000000000..cffdb4adde3903 --- /dev/null +++ b/src/go/collectors/go.d.plugin/cmd/godplugin/main.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package main + +import ( + "errors" + "fmt" + "log/slog" + "os" + "os/user" + "path/filepath" + "strings" + + "github.com/netdata/go.d.plugin/agent" + "github.com/netdata/go.d.plugin/cli" + "github.com/netdata/go.d.plugin/logger" + "github.com/netdata/go.d.plugin/pkg/multipath" + + "github.com/jessevdk/go-flags" + "golang.org/x/net/http/httpproxy" + + _ "github.com/netdata/go.d.plugin/modules" +) + +var ( + cd, _ = os.Getwd() + name = "go.d" + userDir = os.Getenv("NETDATA_USER_CONFIG_DIR") + stockDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") + varLibDir = os.Getenv("NETDATA_LIB_DIR") + lockDir = os.Getenv("NETDATA_LOCK_DIR") + watchPath = os.Getenv("NETDATA_PLUGINS_GOD_WATCH_PATH") + envLogLevel = os.Getenv("NETDATA_LOG_LEVEL") + + version = "unknown" +) + +func confDir(opts *cli.Option) multipath.MultiPath { + if len(opts.ConfDir) > 0 { + return opts.ConfDir + } + if userDir != "" || stockDir != "" { + return multipath.New( + userDir, + stockDir, + ) + } + return multipath.New( + filepath.Join(cd, "/../../../../etc/netdata"), + filepath.Join(cd, "/../../../../usr/lib/netdata/conf.d"), + ) +} + +func modulesConfDir(opts *cli.Option) (mpath multipath.MultiPath) { + if len(opts.ConfDir) > 0 { + return opts.ConfDir + } + if userDir != "" || stockDir != "" { + if userDir != "" { + mpath = append(mpath, filepath.Join(userDir, name)) + } + if stockDir != "" { + mpath = append(mpath, filepath.Join(stockDir, name)) + } + return multipath.New(mpath...) + } + return multipath.New( + filepath.Join(cd, "/../../../../etc/netdata", name), + filepath.Join(cd, "/../../../../usr/lib/netdata/conf.d", name), + ) +} + +func watchPaths(opts *cli.Option) []string { + if watchPath == "" { + return opts.WatchPath + } + return append(opts.WatchPath, watchPath) +} + +func stateFile() string { + if varLibDir == "" { + return "" + } + return filepath.Join(varLibDir, "god-jobs-statuses.json") +} + +func init() { + // https://github.com/netdata/netdata/issues/8949#issuecomment-638294959 + if v := os.Getenv("TZ"); strings.HasPrefix(v, ":") { + _ = os.Unsetenv("TZ") + } +} + +func main() { + opts := parseCLI() + + if opts.Version { + fmt.Printf("go.d.plugin, version: %s\n", version) + return + } + + if envLogLevel != "" { + logger.Level.SetByName(envLogLevel) + } + + if opts.Debug { + logger.Level.Set(slog.LevelDebug) + } + + a := agent.New(agent.Config{ + Name: name, + ConfDir: confDir(opts), + ModulesConfDir: modulesConfDir(opts), + ModulesSDConfPath: watchPaths(opts), + VnodesConfDir: confDir(opts), + StateFile: stateFile(), + LockDir: lockDir, + RunModule: opts.Module, + MinUpdateEvery: opts.UpdateEvery, + }) + + a.Debugf("plugin: name=%s, version=%s", a.Name, version) + if u, err := user.Current(); err == nil { + a.Debugf("current user: name=%s, uid=%s", u.Username, u.Uid) + } + + cfg := httpproxy.FromEnvironment() + a.Infof("env HTTP_PROXY '%s', HTTPS_PROXY '%s'", cfg.HTTPProxy, cfg.HTTPSProxy) + + a.Run() +} + +func parseCLI() *cli.Option { + opt, err := cli.Parse(os.Args) + if err != nil { + var flagsErr *flags.Error + if errors.As(err, &flagsErr) && errors.Is(flagsErr.Type, flags.ErrHelp) { + os.Exit(0) + } else { + os.Exit(1) + } + } + return opt +} diff --git a/src/go/collectors/go.d.plugin/config/go.d.conf b/src/go/collectors/go.d.plugin/config/go.d.conf new file mode 100644 index 00000000000000..f456d25d33effe --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d.conf @@ -0,0 +1,92 @@ +# netdata go.d.plugin configuration +# +# This file is in YAML format. + +# Enable/disable the whole go.d.plugin. +enabled: yes + +# Enable/disable default value for all modules. +default_run: yes + +# Maximum number of used CPUs. Zero means no limit. +max_procs: 0 + +# Enable/disable specific g.d.plugin module +# If you want to change any value, you need to uncomment out it first. +# IMPORTANT: Do not remove all spaces, just remove # symbol. There should be a space before module name. +modules: +# activemq: yes +# apache: yes +# bind: yes +# chrony: yes +# cockroachdb: yes +# consul: yes +# coredns: yes +# couchbase: yes +# couchdb: yes +# dnsdist: yes +# dnsmasq: yes +# dnsmasq_dhcp: yes +# dns_query: yes +# docker: yes +# docker_engine: yes +# dockerhub: yes +# elasticsearch: yes +# envoy: yes +# example: no +# filecheck: yes +# fluentd: yes +# freeradius: yes +# haproxy: yes +# hdfs: yes +# httpcheck: yes +# isc_dhcpd: yes +# k8s_kubelet: yes +# k8s_kubeproxy: yes +# lighttpd: yes +# logind: yes +# logstash: yes +# mongodb: yes +# mysql: yes +# nginx: yes +# nginxplus: yes +# nginxvts: yes +# ntpd: yes +# nvme: yes +# nvidia_smi: no +# openvpn: no +# openvpn_status_log: yes +# ping: yes +# pgbouncer: yes +# phpdaemon: yes +# phpfpm: yes +# pihole: yes +# pika: yes +# portcheck: yes +# postgres: yes +# powerdns: yes +# powerdns_recursor: yes +# prometheus: yes +# pulsar: yes +# rabbitmq: yes +# redis: yes +# scaleio: yes +# snmp: yes +# solr: yes +# springboot2: yes +# squidlog: yes +# supervisord: yes +# systemdunits: yes +# tengine: yes +# traefik: yes +# upsd: yes +# unbound: yes +# vernemq: yes +# vcsa: yes +# vsphere: yes +# web_log: yes +# wireguard: yes +# whoisquery: yes +# windows: yes +# x509check: yes +# zookeeper: yes diff --git a/src/go/collectors/go.d.plugin/config/go.d/activemq.conf b/src/go/collectors/go.d.plugin/config/go.d/activemq.conf new file mode 100644 index 00000000000000..0f5b157ccc8d37 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/activemq.conf @@ -0,0 +1,11 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/activemq + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:8161 + webadmin: admin diff --git a/src/go/collectors/go.d.plugin/config/go.d/apache.conf b/src/go/collectors/go.d.plugin/config/go.d/apache.conf new file mode 100644 index 00000000000000..a57d4f4e1b3d9c --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/apache.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/apache + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost/server-status?auto + + - name: local + url: http://127.0.0.1/server-status?auto diff --git a/src/go/collectors/go.d.plugin/config/go.d/bind.conf b/src/go/collectors/go.d.plugin/config/go.d/bind.conf new file mode 100644 index 00000000000000..8dadc8efa23568 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/bind.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/bind + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8653/json/v1 + + - name: local + url: http://127.0.0.1:8653/xml/v3 diff --git a/src/go/collectors/go.d.plugin/config/go.d/cassandra.conf b/src/go/collectors/go.d.plugin/config/go.d/cassandra.conf new file mode 100644 index 00000000000000..8a6f5f0b7ea1fe --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/cassandra.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/cassandra + +#update_every: 5 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:7072/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/chrony.conf b/src/go/collectors/go.d.plugin/config/go.d/chrony.conf new file mode 100644 index 00000000000000..2cf16620bc07b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/chrony.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/chrony + +jobs: + - name: local + address: '127.0.0.1:323' + timeout: 1 + +# - name: remote +# address: '203.0.113.0:323' diff --git a/src/go/collectors/go.d.plugin/config/go.d/cockroachdb.conf b/src/go/collectors/go.d.plugin/config/go.d/cockroachdb.conf new file mode 100644 index 00000000000000..36a8eed1f863b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/cockroachdb.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/cockroachdb + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:8080/_status/vars + + - name: local + url: http://127.0.0.1:8080/_status/vars diff --git a/src/go/collectors/go.d.plugin/config/go.d/consul.conf b/src/go/collectors/go.d.plugin/config/go.d/consul.conf new file mode 100644 index 00000000000000..cafea474bd2488 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/consul.conf @@ -0,0 +1,15 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/consul + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:8500 + acl_token: "" + + - name: local + url: http://127.0.0.1:8500 + acl_token: "" diff --git a/src/go/collectors/go.d.plugin/config/go.d/coredns.conf b/src/go/collectors/go.d.plugin/config/go.d/coredns.conf new file mode 100644 index 00000000000000..78b10f7bcada08 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/coredns.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/coredns + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - url: http://127.0.0.1:9153/metrics + - url: http://kube-dns.kube-system.svc.cluster.local:9153/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/couchbase.conf b/src/go/collectors/go.d.plugin/config/go.d/couchbase.conf new file mode 100644 index 00000000000000..8e3ecba64ce48e --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/couchbase.conf @@ -0,0 +1,12 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/couchbase + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8091 + username: admin + password: password diff --git a/src/go/collectors/go.d.plugin/config/go.d/couchdb.conf b/src/go/collectors/go.d.plugin/config/go.d/couchdb.conf new file mode 100644 index 00000000000000..6fc9c47e4d453b --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/couchdb.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/couchdb + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: local +# url: http://127.0.0.1:5984 +# node: node@host +# username: admin +# password: password +# databases: my-db diff --git a/src/go/collectors/go.d.plugin/config/go.d/dns_query.conf b/src/go/collectors/go.d.plugin/config/go.d/dns_query.conf new file mode 100644 index 00000000000000..94df30344b51d0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/dns_query.conf @@ -0,0 +1,18 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsquery + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: example +# record_types: +# - A +# domains: +# - google.com +# - github.com +# - reddit.com +# servers: +# - 8.8.8.8 +# - 8.8.4.4 diff --git a/src/go/collectors/go.d.plugin/config/go.d/dnsdist.conf b/src/go/collectors/go.d.plugin/config/go.d/dnsdist.conf new file mode 100644 index 00000000000000..f11fd644012351 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/dnsdist.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsdist + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8083 + headers: + X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key). +# +# - name: remote +# url: http://203.0.113.0:8083 +# headers: +# X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key). diff --git a/src/go/collectors/go.d.plugin/config/go.d/dnsmasq.conf b/src/go/collectors/go.d.plugin/config/go.d/dnsmasq.conf new file mode 100644 index 00000000000000..02c9764a3db166 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/dnsmasq.conf @@ -0,0 +1,15 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + protocol: udp + address: '127.0.0.1:53' + +# - name: remote +# protocol: udp +# address: '203.0.113.0:53' diff --git a/src/go/collectors/go.d.plugin/config/go.d/dnsmasq_dhcp.conf b/src/go/collectors/go.d.plugin/config/go.d/dnsmasq_dhcp.conf new file mode 100644 index 00000000000000..23b9d21e1fb6e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/dnsmasq_dhcp.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq_dhcp + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: dnsmasq_dhcp + leases_path: /var/lib/misc/dnsmasq.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d + + - name: dnsmasq_dhcp + leases_path: /etc/pihole/dhcp.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d diff --git a/src/go/collectors/go.d.plugin/config/go.d/docker.conf b/src/go/collectors/go.d.plugin/config/go.d/docker.conf new file mode 100644 index 00000000000000..72e30c75cbec88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/docker.conf @@ -0,0 +1,12 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/docker + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 'unix:///var/run/docker.sock' + timeout: 2 + collect_container_size: no diff --git a/src/go/collectors/go.d.plugin/config/go.d/docker_engine.conf b/src/go/collectors/go.d.plugin/config/go.d/docker_engine.conf new file mode 100644 index 00000000000000..184cac84e26239 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/docker_engine.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:9323/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/dockerhub.conf b/src/go/collectors/go.d.plugin/config/go.d/dockerhub.conf new file mode 100644 index 00000000000000..b9606a24bc977a --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/dockerhub.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: local +# repositories: ['user1/name1', 'user2/name2', 'user3/name3'] diff --git a/src/go/collectors/go.d.plugin/config/go.d/elasticsearch.conf b/src/go/collectors/go.d.plugin/config/go.d/elasticsearch.conf new file mode 100644 index 00000000000000..16c19bb7fe9f4e --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/elasticsearch.conf @@ -0,0 +1,19 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch + +#update_every: 5 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:9200 + cluster_mode: no + + # opensearch + - name: local + url: https://127.0.0.1:9200 + cluster_mode: no + tls_skip_verify: yes + username: admin + password: admin diff --git a/src/go/collectors/go.d.plugin/config/go.d/energid.conf b/src/go/collectors/go.d.plugin/config/go.d/energid.conf new file mode 100644 index 00000000000000..e6495062e17e33 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/energid.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/energid + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: energi +# url: http://127.0.0.1:9796 +# username: energy +# password: energy +# +# - name: bitcoin +# url: http://203.0.113.0:8332 +# username: bitcoin +# password: bitcoin diff --git a/src/go/collectors/go.d.plugin/config/go.d/envoy.conf b/src/go/collectors/go.d.plugin/config/go.d/envoy.conf new file mode 100644 index 00000000000000..02e7c9a237f0cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/envoy.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/envoy + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus diff --git a/src/go/collectors/go.d.plugin/config/go.d/example.conf b/src/go/collectors/go.d.plugin/config/go.d/example.conf new file mode 100644 index 00000000000000..cf62a3c1df947a --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/example.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/example + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: example diff --git a/src/go/collectors/go.d.plugin/config/go.d/filecheck.conf b/src/go/collectors/go.d.plugin/config/go.d/filecheck.conf new file mode 100644 index 00000000000000..ae1ce303a221a1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/filecheck.conf @@ -0,0 +1,20 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: files_example +# files: +# include: +# - '/path/to/file1' +# - '/path/to/file2' +# +# - name: dirs_example +# dirs: +# collect_dir_size: yes +# include: +# - '/path/to/dir1' +# - '/path/to/dir2' diff --git a/src/go/collectors/go.d.plugin/config/go.d/fluentd.conf b/src/go/collectors/go.d.plugin/config/go.d/fluentd.conf new file mode 100644 index 00000000000000..654b4707d2bed4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/fluentd.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/fluentd + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:24220 + + - name: local + url: http://127.0.0.1:24220 diff --git a/src/go/collectors/go.d.plugin/config/go.d/freeradius.conf b/src/go/collectors/go.d.plugin/config/go.d/freeradius.conf new file mode 100644 index 00000000000000..5b3df0a834ef61 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/freeradius.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/freeradius + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: localhost + port: 18121 + secret: adminsecret + + - name: local + address: 127.0.0.1 + port: 18121 + secret: adminsecret diff --git a/src/go/collectors/go.d.plugin/config/go.d/geth.conf b/src/go/collectors/go.d.plugin/config/go.d/geth.conf new file mode 100644 index 00000000000000..c94083e1c04142 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/geth.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/geth + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: "local" + url: http://localhost:6060/debug/metrics/prometheus diff --git a/src/go/collectors/go.d.plugin/config/go.d/haproxy.conf b/src/go/collectors/go.d.plugin/config/go.d/haproxy.conf new file mode 100644 index 00000000000000..e589ac2c677850 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/haproxy.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/haproxy + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8404/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/hdfs.conf b/src/go/collectors/go.d.plugin/config/go.d/hdfs.conf new file mode 100644 index 00000000000000..44c052711112cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/hdfs.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/hdfs + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 +# + +#jobs: +# - name: namenode +# url: http://127.0.0.1:9870/jmx +# +# - name: datanode +# url: http://127.0.0.1:9864/jmx diff --git a/src/go/collectors/go.d.plugin/config/go.d/httpcheck.conf b/src/go/collectors/go.d.plugin/config/go.d/httpcheck.conf new file mode 100644 index 00000000000000..b29ead29699769 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/httpcheck.conf @@ -0,0 +1,16 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/httpcheck + +#update_every : 1 +#autodetection_retry : 0 +#priority : 70000 + +#jobs: +# - name: jira +# url: https://jira.localdomain/ +# +# - name: cool_website +# url: http://cool.website:8080/home +# status_accepted: [200, 204] +# response_match: My cool website!<\/title> +# timeout: 2 diff --git a/src/go/collectors/go.d.plugin/config/go.d/isc_dhcpd.conf b/src/go/collectors/go.d.plugin/config/go.d/isc_dhcpd.conf new file mode 100644 index 00000000000000..03b195b801f9b0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/isc_dhcpd.conf @@ -0,0 +1,27 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/isc_dhcpd + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: ipv4_example +# leases_path: '/path/to/dhcpd.leases_ipv4' +# pools: +# - name: office +# networks: '192.0.2.1-192.0.2.254' +# - name: wifi +# networks: '198.51.100.0/24' +# - name: dmz +# networks: '203.0.113.0/255.255.255.0' +# +# - name: ipv6_example +# leases_path: '/path/to/dhcpd.leases_ipv6' +# pools: +# - name: office +# networks: '2001:db8::/64' +# - name: wifi +# networks: '2001:db8:0:1::/64' +# - name: dmz +# networks: '2001:db8:0:2::/64' diff --git a/src/go/collectors/go.d.plugin/config/go.d/k8s_kubelet.conf b/src/go/collectors/go.d.plugin/config/go.d/k8s_kubelet.conf new file mode 100644 index 00000000000000..64f895d84d945e --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/k8s_kubelet.conf @@ -0,0 +1,11 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubelet + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - url: http://127.0.0.1:10255/metrics + - url: https://localhost:10250/metrics + tls_skip_verify: yes diff --git a/src/go/collectors/go.d.plugin/config/go.d/k8s_kubeproxy.conf b/src/go/collectors/go.d.plugin/config/go.d/k8s_kubeproxy.conf new file mode 100644 index 00000000000000..dabb7fe1b18221 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/k8s_kubeproxy.conf @@ -0,0 +1,8 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubeproxy + +update_every: 1 +autodetection_retry: 0 + +jobs: + - url: http://127.0.0.1:10249/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/k8s_state.conf b/src/go/collectors/go.d.plugin/config/go.d/k8s_state.conf new file mode 100644 index 00000000000000..ba386e0d2c5f5a --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/k8s_state.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_state + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: k8s_state diff --git a/src/go/collectors/go.d.plugin/config/go.d/lighttpd.conf b/src/go/collectors/go.d.plugin/config/go.d/lighttpd.conf new file mode 100644 index 00000000000000..df403375efccdb --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/lighttpd.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/lighttpd + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost/server-status?auto + + - name: local + url: http://127.0.0.1/server-status?auto diff --git a/src/go/collectors/go.d.plugin/config/go.d/logind.conf b/src/go/collectors/go.d.plugin/config/go.d/logind.conf new file mode 100644 index 00000000000000..5ff90345addee5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/logind.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/logind + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: logind diff --git a/src/go/collectors/go.d.plugin/config/go.d/logstash.conf b/src/go/collectors/go.d.plugin/config/go.d/logstash.conf new file mode 100644 index 00000000000000..4afa1a298efaf8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/logstash.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/logstash + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:9600 + + - name: local + url: http://127.0.0.1:9600 diff --git a/src/go/collectors/go.d.plugin/config/go.d/mongodb.conf b/src/go/collectors/go.d.plugin/config/go.d/mongodb.conf new file mode 100644 index 00000000000000..5236df6592c1c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/mongodb.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/mongodb + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + uri: 'mongodb://localhost:27017' + timeout: 2 +# databases: +# include: +# - "* *" diff --git a/src/go/collectors/go.d.plugin/config/go.d/mysql.conf b/src/go/collectors/go.d.plugin/config/go.d/mysql.conf new file mode 100644 index 00000000000000..15ce2abc907502 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/mysql.conf @@ -0,0 +1,56 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/mysql + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 +# timeout: 1 + +jobs: + # my.cnf + - name: local + my.cnf: '/etc/my.cnf' + + - name: local + my.cnf: '/etc/mysql/my.cnf' + + - name: local + my.cnf: '/etc/mysql/debian.cnf' + + # root + - name: local + dsn: root@unix(/var/run/mysqld/mysqld.sock)/ + + - name: local + dsn: root@unix(/var/run/mysqld/mysql.sock)/ + + - name: local + dsn: root@unix(/var/lib/mysql/mysql.sock)/ + + - name: local + dsn: root@unix(/tmp/mysql.sock)/ + + - name: local + dsn: root@tcp(127.0.0.1:3306)/ + + - name: local + dsn: root@tcp([::1]:3306)/ + + # netdata + - name: local + dsn: netdata@unix(/var/run/mysqld/mysqld.sock)/ + + - name: local + dsn: netdata@unix(/var/run/mysqld/mysql.sock)/ + + - name: local + dsn: netdata@unix(/var/lib/mysql/mysql.sock)/ + + - name: local + dsn: netdata@unix(/tmp/mysql.sock)/ + + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + + - name: local + dsn: netdata@tcp([::1]:3306)/ diff --git a/src/go/collectors/go.d.plugin/config/go.d/nginx.conf b/src/go/collectors/go.d.plugin/config/go.d/nginx.conf new file mode 100644 index 00000000000000..594c248969537f --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/nginx.conf @@ -0,0 +1,23 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/nginx + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1/basic_status + + - name: local + url: http://localhost/stub_status + + - name: local + url: http://127.0.0.1/stub_status + + - name: local + url: http://127.0.0.1/nginx_status + + - name: local + url: http://127.0.0.1/status + diff --git a/src/go/collectors/go.d.plugin/config/go.d/nginxplus.conf b/src/go/collectors/go.d.plugin/config/go.d/nginxplus.conf new file mode 100644 index 00000000000000..d66318a7653e83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/nginxplus.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/nginxplus + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1 diff --git a/src/go/collectors/go.d.plugin/config/go.d/nginxvts.conf b/src/go/collectors/go.d.plugin/config/go.d/nginxvts.conf new file mode 100644 index 00000000000000..39fb477eabf593 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/nginxvts.conf @@ -0,0 +1,12 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/nginxvts + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1/status/format/json +# - name: remote +# url: http://203.0.113.0/status/format/json diff --git a/src/go/collectors/go.d.plugin/config/go.d/ntpd.conf b/src/go/collectors/go.d.plugin/config/go.d/ntpd.conf new file mode 100644 index 00000000000000..e36b317bf35ef3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/ntpd.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/ntpd + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: '127.0.0.1:123' + collect_peers: no + +# - name: remote +# address: '203.0.113.0:123' diff --git a/src/go/collectors/go.d.plugin/config/go.d/nvidia_smi.conf b/src/go/collectors/go.d.plugin/config/go.d/nvidia_smi.conf new file mode 100644 index 00000000000000..319c3fd41cfe78 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/nvidia_smi.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/nvidia_smi + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: nvidia_smi + use_csv_format: yes diff --git a/src/go/collectors/go.d.plugin/config/go.d/nvme.conf b/src/go/collectors/go.d.plugin/config/go.d/nvme.conf new file mode 100644 index 00000000000000..fdedf6d1d2cbbd --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/nvme.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/nvme + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: nvme diff --git a/src/go/collectors/go.d.plugin/config/go.d/openvpn.conf b/src/go/collectors/go.d.plugin/config/go.d/openvpn.conf new file mode 100644 index 00000000000000..aaf297c5c5a55c --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/openvpn.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 127.0.0.1:7505 diff --git a/src/go/collectors/go.d.plugin/config/go.d/openvpn_status_log.conf b/src/go/collectors/go.d.plugin/config/go.d/openvpn_status_log.conf new file mode 100644 index 00000000000000..4959f1c8c4dbdf --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/openvpn_status_log.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn_status_log + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + log_path: '/var/log/openvpn/status.log' + #per_user_stats: + # includes: + # - "* *" diff --git a/src/go/collectors/go.d.plugin/config/go.d/pgbouncer.conf b/src/go/collectors/go.d.plugin/config/go.d/pgbouncer.conf new file mode 100644 index 00000000000000..a6eb76d3278989 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/pgbouncer.conf @@ -0,0 +1,12 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/pgbouncer + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer' + - name: local + dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432' diff --git a/src/go/collectors/go.d.plugin/config/go.d/phpdaemon.conf b/src/go/collectors/go.d.plugin/config/go.d/phpdaemon.conf new file mode 100644 index 00000000000000..75ddda0db949b4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/phpdaemon.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/phpdaemon + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus diff --git a/src/go/collectors/go.d.plugin/config/go.d/phpfpm.conf b/src/go/collectors/go.d.plugin/config/go.d/phpfpm.conf new file mode 100644 index 00000000000000..1ae811c6f24a9b --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/phpfpm.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/phpfpm + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost/status?full&json + + - name: local + url: http://127.0.0.1/status?full&json + + - name: local + url: http://[::1]/status?full&json + diff --git a/src/go/collectors/go.d.plugin/config/go.d/pihole.conf b/src/go/collectors/go.d.plugin/config/go.d/pihole.conf new file mode 100644 index 00000000000000..856d426352dad2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/pihole.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/pihole + +#update_every : 5 +#timeout : 5 +#autodetection_retry : 0 +#priority : 70000 + +jobs: + - name: pihole + url: http://127.0.0.1 + +# - name: pihole +# url: http://pi.hole diff --git a/src/go/collectors/go.d.plugin/config/go.d/pika.conf b/src/go/collectors/go.d.plugin/config/go.d/pika.conf new file mode 100644 index 00000000000000..96a7766b770fdc --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/pika.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/pika + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 'redis://@127.0.0.1:9221' diff --git a/src/go/collectors/go.d.plugin/config/go.d/ping.conf b/src/go/collectors/go.d.plugin/config/go.d/ping.conf new file mode 100644 index 00000000000000..7fa4b004a928b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/ping.conf @@ -0,0 +1,14 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/ping + +#update_every: 5 +#autodetection_retry: 0 +#priority: 70000 + +## Uncomment the following lines to create a data collection config: + +#jobs: +# - name: example +# hosts: +# - 192.0.2.0 +# - 192.0.2.1 diff --git a/src/go/collectors/go.d.plugin/config/go.d/portcheck.conf b/src/go/collectors/go.d.plugin/config/go.d/portcheck.conf new file mode 100644 index 00000000000000..237b68a125699d --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/portcheck.conf @@ -0,0 +1,15 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: job1 +# host: 10.0.0.1 +# ports: [23, 80, 8080] +# +# - name: job2 +# host: 10.0.0.2 +# ports: [22, 19999] diff --git a/src/go/collectors/go.d.plugin/config/go.d/postgres.conf b/src/go/collectors/go.d.plugin/config/go.d/postgres.conf new file mode 100644 index 00000000000000..92666df7cdc8db --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/postgres.conf @@ -0,0 +1,23 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/postgres + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + # User postgres + - name: local + dsn: 'postgresql://postgres:postgres@127.0.0.1:5432/postgres' + #collect_databases_matching: '*' + - name: local + dsn: 'host=/var/run/postgresql dbname=postgres user=postgres' + #collect_databases_matching: '*' + + # User netdata + - name: local + dsn: 'postgresql://netdata@127.0.0.1:5432/postgres' + #collect_databases_matching: '*' + - name: local + dsn: 'host=/var/run/postgresql dbname=postgres user=netdata' + #collect_databases_matching: '*' diff --git a/src/go/collectors/go.d.plugin/config/go.d/powerdns.conf b/src/go/collectors/go.d.plugin/config/go.d/powerdns.conf new file mode 100644 index 00000000000000..7873d54f570b93 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/powerdns.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8081 +# headers: +# X-API-KEY: secret # static pre-shared authentication key for access to the REST API (api-key). + +# - name: remote +# url: http://203.0.113.0:8081 +# headers: +# X-API-KEY: secret # static pre-shared authentication key for access to the REST API (api-key). diff --git a/src/go/collectors/go.d.plugin/config/go.d/powerdns_recursor.conf b/src/go/collectors/go.d.plugin/config/go.d/powerdns_recursor.conf new file mode 100644 index 00000000000000..31873f2a815f0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/powerdns_recursor.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns_recursor + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8081 + +# - name: remote +# url: http://203.0.113.0:8081 diff --git a/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf b/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf new file mode 100644 index 00000000000000..43fa0af299f5c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/prometheus.conf @@ -0,0 +1,1361 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/prometheus + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + # https://github.com/prometheus/prometheus/wiki/Default-port-allocations + # - name: node_exporter_local + # url: 'http://127.0.0.1:9100/metrics' + - name: loki_local + url: 'http://127.0.0.1:3100/metrics' + - name: wireguard_local + url: 'http://127.0.0.1:9586/metrics' + expected_prefix: 'wireguard_' + - name: netbox_local + url: 'http://127.0.0.1:8001/metrics' + expected_prefix: 'django_' + - name: haproxy_exporter_local + url: 'http://127.0.0.1:9101/metrics' + - name: statsd_exporter_local + url: 'http://127.0.0.1:9102/metrics' + - name: collectd_exporter_local + url: 'http://127.0.0.1:9103/metrics' + - name: mysqld_exporter_local + url: 'http://127.0.0.1:9104/metrics' + - name: mesos_exporter_local + url: 'http://127.0.0.1:9105/metrics' + - name: cloudwatch_exporter_local + url: 'http://127.0.0.1:9106/metrics' + - name: consul_exporter_local + url: 'http://127.0.0.1:9107/metrics' + - name: graphite_exporter_local + url: 'http://127.0.0.1:9108/metrics' + - name: graphite_exporter_local + url: 'http://127.0.0.1:9109/metrics' + - name: blackbox_exporter_local + url: 'http://127.0.0.1:9110/metrics' + - name: expvar_exporter_local + url: 'http://127.0.0.1:9111/metrics' + - name: promacct_pcap-based_network_traffic_accounting_local + url: 'http://127.0.0.1:9112/metrics' + - name: nginx_exporter_local + url: 'http://127.0.0.1:9113/metrics' + - name: elasticsearch_exporter_local + url: 'http://127.0.0.1:9114/metrics' + - name: blackbox_exporter_local + url: 'http://127.0.0.1:9115/metrics' + - name: snmp_exporter_local + url: 'http://127.0.0.1:9116/metrics' + - name: apache_exporter_local + url: 'http://127.0.0.1:9117/metrics' + - name: jenkins_exporter_local + url: 'http://127.0.0.1:9118/metrics' + - name: bind_exporter_local + url: 'http://127.0.0.1:9119/metrics' + - name: powerdns_exporter_local + url: 'http://127.0.0.1:9120/metrics' + - name: redis_exporter_local + url: 'http://127.0.0.1:9121/metrics' + - name: influxdb_exporter_local + url: 'http://127.0.0.1:9122/metrics' + - name: rethinkdb_exporter_local + url: 'http://127.0.0.1:9123/metrics' + - name: freebsd_sysctl_exporter_local + url: 'http://127.0.0.1:9124/metrics' + - name: statsd_exporter_local + url: 'http://127.0.0.1:9125/metrics' + - name: new_relic_exporter_local + url: 'http://127.0.0.1:9126/metrics' + - name: pgbouncer_exporter_local + url: 'http://127.0.0.1:9127/metrics' + - name: ceph_exporter_local + url: 'http://127.0.0.1:9128/metrics' + - name: haproxy_log_exporter_local + url: 'http://127.0.0.1:9129/metrics' + - name: unifi_poller_local + url: 'http://127.0.0.1:9130/metrics' + - name: varnish_exporter_local + url: 'http://127.0.0.1:9131/metrics' + - name: airflow_exporter_local + url: 'http://127.0.0.1:9132/metrics' + - name: fritz_box_exporter_local + url: 'http://127.0.0.1:9133/metrics' + - name: zfs_exporter_local + url: 'http://127.0.0.1:9134/metrics' + - name: rtorrent_exporter_local + url: 'http://127.0.0.1:9135/metrics' + - name: collins_exporter_local + url: 'http://127.0.0.1:9136/metrics' + - name: silicondust_hdhomerun_exporter_local + url: 'http://127.0.0.1:9137/metrics' + - name: heka_exporter_local + url: 'http://127.0.0.1:9138/metrics' + - name: azure_sql_exporter_local + url: 'http://127.0.0.1:9139/metrics' + - name: mirth_exporter_local + url: 'http://127.0.0.1:9140/metrics' + - name: zookeeper_exporter_local + url: 'http://127.0.0.1:9141/metrics' + - name: big-ip_exporter_local + url: 'http://127.0.0.1:9142/metrics' + - name: cloudmonitor_exporter_local + url: 'http://127.0.0.1:9143/metrics' + - name: aerospike_exporter_local + url: 'http://127.0.0.1:9145/metrics' + - name: icecast_exporter_local + url: 'http://127.0.0.1:9146/metrics' + - name: nginx_request_exporter_local + url: 'http://127.0.0.1:9147/metrics' + - name: nats_exporter_local + url: 'http://127.0.0.1:9148/metrics' + - name: passenger_exporter_local + url: 'http://127.0.0.1:9149/metrics' + - name: memcached_exporter_local + url: 'http://127.0.0.1:9150/metrics' + - name: varnish_request_exporter_local + url: 'http://127.0.0.1:9151/metrics' + - name: command_runner_exporter_local + url: 'http://127.0.0.1:9152/metrics' + - name: coredns_local + url: 'http://127.0.0.1:9153/metrics' + - name: postfix_exporter_local + url: 'http://127.0.0.1:9154/metrics' + - name: vsphere_graphite_local + url: 'http://127.0.0.1:9155/metrics' + - name: webdriver_exporter_local + url: 'http://127.0.0.1:9156/metrics' + - name: ibm_mq_exporter_local + url: 'http://127.0.0.1:9157/metrics' + - name: pingdom_exporter_local + url: 'http://127.0.0.1:9158/metrics' + - name: apache_flink_exporter_local + url: 'http://127.0.0.1:9160/metrics' + - name: oracle_db_exporter_local + url: 'http://127.0.0.1:9161/metrics' + - name: apcupsd_exporter_local + url: 'http://127.0.0.1:9162/metrics' + - name: zgres_exporter_local + url: 'http://127.0.0.1:9163/metrics' + - name: s6_exporter_local + url: 'http://127.0.0.1:9164/metrics' + - name: keepalived_exporter_local + url: 'http://127.0.0.1:9165/metrics' + - name: dovecot_exporter_local + url: 'http://127.0.0.1:9166/metrics' + - name: unbound_exporter_local + url: 'http://127.0.0.1:9167/metrics' + - name: gitlab-monitor_local + url: 'http://127.0.0.1:9168/metrics' + - name: lustre_exporter_local + url: 'http://127.0.0.1:9169/metrics' + - name: docker_hub_exporter_local + url: 'http://127.0.0.1:9170/metrics' + - name: github_exporter_local + url: 'http://127.0.0.1:9171/metrics' + - name: script_exporter_local + url: 'http://127.0.0.1:9172/metrics' + - name: rancher_exporter_local + url: 'http://127.0.0.1:9173/metrics' + - name: docker-cloud_exporter_local + url: 'http://127.0.0.1:9174/metrics' + - name: saltstack_exporter_local + url: 'http://127.0.0.1:9175/metrics' + - name: openvpn_exporter_local + url: 'http://127.0.0.1:9176/metrics' + - name: libvirt_exporter_local + url: 'http://127.0.0.1:9177/metrics' + - name: stream_exporter_local + url: 'http://127.0.0.1:9178/metrics' + - name: shield_exporter_local + url: 'http://127.0.0.1:9179/metrics' + - name: scylladb_exporter_local + url: 'http://127.0.0.1:9180/metrics' + - name: openstack_ceilometer_exporter_local + url: 'http://127.0.0.1:9181/metrics' + - name: openstack_exporter_local + url: 'http://127.0.0.1:9183/metrics' + - name: twitch_exporter_local + url: 'http://127.0.0.1:9184/metrics' + - name: kafka_topic_exporter_local + url: 'http://127.0.0.1:9185/metrics' + - name: cloud_foundry_firehose_exporter_local + url: 'http://127.0.0.1:9186/metrics' + - name: postgresql_exporter_local + url: 'http://127.0.0.1:9187/metrics' + - name: crypto_exporter_local + url: 'http://127.0.0.1:9188/metrics' + - name: hetzner_cloud_csi_driver_nodes_local + url: 'http://127.0.0.1:9189/metrics' + - name: bosh_exporter_local + url: 'http://127.0.0.1:9190/metrics' + - name: netflow_exporter_local + url: 'http://127.0.0.1:9191/metrics' + - name: ceph_exporter_local + url: 'http://127.0.0.1:9192/metrics' + - name: cloud_foundry_exporter_local + url: 'http://127.0.0.1:9193/metrics' + - name: bosh_tsdb_exporter_local + url: 'http://127.0.0.1:9194/metrics' + - name: maxscale_exporter_local + url: 'http://127.0.0.1:9195/metrics' + - name: upnp_internet_gateway_device_exporter_local + url: 'http://127.0.0.1:9196/metrics' + - name: logstash_exporter_local + url: 'http://127.0.0.1:9198/metrics' + - name: cloudflare_exporter_local + url: 'http://127.0.0.1:9199/metrics' + - name: pacemaker_exporter_local + url: 'http://127.0.0.1:9202/metrics' + - name: domain_exporter_local + url: 'http://127.0.0.1:9203/metrics' + - name: pcsensor_temper_exporter_local + url: 'http://127.0.0.1:9204/metrics' + - name: nextcloud_exporter_local + url: 'http://127.0.0.1:9205/metrics' + - name: elasticsearch_exporter_local + url: 'http://127.0.0.1:9206/metrics' + - name: mysql_exporter_local + url: 'http://127.0.0.1:9207/metrics' + - name: kafka_consumer_group_exporter_local + url: 'http://127.0.0.1:9208/metrics' + - name: fastnetmon_advanced_exporter_local + url: 'http://127.0.0.1:9209/metrics' + - name: netatmo_exporter_local + url: 'http://127.0.0.1:9210/metrics' + - name: dnsbl-exporter_local + url: 'http://127.0.0.1:9211/metrics' + - name: digitalocean_exporter_local + url: 'http://127.0.0.1:9212/metrics' + - name: custom_exporter_local + url: 'http://127.0.0.1:9213/metrics' + - name: mqtt_blackbox_exporter_local + url: 'http://127.0.0.1:9214/metrics' + - name: prometheus_graphite_bridge_local + url: 'http://127.0.0.1:9215/metrics' + - name: mongodb_exporter_local + url: 'http://127.0.0.1:9216/metrics' + - name: consul_agent_exporter_local + url: 'http://127.0.0.1:9217/metrics' + - name: promql-guard_local + url: 'http://127.0.0.1:9218/metrics' + - name: ssl_certificate_exporter_local + url: 'http://127.0.0.1:9219/metrics' + - name: netapp_trident_exporter_local + url: 'http://127.0.0.1:9220/metrics' + - name: proxmox_ve_exporter_local + url: 'http://127.0.0.1:9221/metrics' + - name: aws_ecs_exporter_local + url: 'http://127.0.0.1:9222/metrics' + - name: bladepsgi_exporter_local + url: 'http://127.0.0.1:9223/metrics' + - name: fluentd_exporter_local + url: 'http://127.0.0.1:9224/metrics' + - name: mailexporter_local + url: 'http://127.0.0.1:9225/metrics' + - name: allas_local + url: 'http://127.0.0.1:9226/metrics' + - name: proc_exporter_local + url: 'http://127.0.0.1:9227/metrics' + - name: flussonic_exporter_local + url: 'http://127.0.0.1:9228/metrics' + - name: gitlab-workhorse_local + url: 'http://127.0.0.1:9229/metrics' + - name: network_ups_tools_exporter_local + url: 'http://127.0.0.1:9230/metrics' + - name: solr_exporter_local + url: 'http://127.0.0.1:9231/metrics' + - name: osquery_exporter_local + url: 'http://127.0.0.1:9232/metrics' + - name: mgmt_exporter_local + url: 'http://127.0.0.1:9233/metrics' + - name: mosquitto_exporter_local + url: 'http://127.0.0.1:9234/metrics' + - name: gitlab-pages_exporter_local + url: 'http://127.0.0.1:9235/metrics' + - name: gitlab_gitaly_exporter_local + url: 'http://127.0.0.1:9236/metrics' + - name: sql_exporter_local + url: 'http://127.0.0.1:9237/metrics' + - name: uwsgi_expoter_local + url: 'http://127.0.0.1:9238/metrics' + - name: surfboard_exporter_local + url: 'http://127.0.0.1:9239/metrics' + - name: tinyproxy_exporter_local + url: 'http://127.0.0.1:9240/metrics' + - name: arangodb_exporter_local + url: 'http://127.0.0.1:9241/metrics' + - name: ceph_radosgw_usage_exporter_local + url: 'http://127.0.0.1:9242/metrics' + - name: chef_compliance_exporter_local + url: 'http://127.0.0.1:9243/metrics' + - name: moby_container_exporter_local + url: 'http://127.0.0.1:9244/metrics' + - name: naemon_nagios_exporter_local + url: 'http://127.0.0.1:9245/metrics' + - name: smartpi_local + url: 'http://127.0.0.1:9246/metrics' + - name: sphinx_exporter_local + url: 'http://127.0.0.1:9247/metrics' + - name: freebsd_gstat_exporter_local + url: 'http://127.0.0.1:9248/metrics' + - name: apache_flink_metrics_reporter_local + url: 'http://127.0.0.1:9249/metrics' + - name: opentsdb_exporter_local + url: 'http://127.0.0.1:9250/metrics' + - name: sensu_exporter_local + url: 'http://127.0.0.1:9251/metrics' + - name: gitlab_runner_exporter_local + url: 'http://127.0.0.1:9252/metrics' + - name: php-fpm_exporter_local + url: 'http://127.0.0.1:9253/metrics' + - name: kafka_burrow_exporter_local + url: 'http://127.0.0.1:9254/metrics' + - name: google_stackdriver_exporter_local + url: 'http://127.0.0.1:9255/metrics' + - name: td-agent_exporter_local + url: 'http://127.0.0.1:9256/metrics' + - name: smart_exporter_local + url: 'http://127.0.0.1:9257/metrics' + - name: hello_sense_exporter_local + url: 'http://127.0.0.1:9258/metrics' + - name: azure_resources_exporter_local + url: 'http://127.0.0.1:9259/metrics' + - name: buildkite_exporter_local + url: 'http://127.0.0.1:9260/metrics' + - name: grafana_exporter_local + url: 'http://127.0.0.1:9261/metrics' + - name: bloomsky_exporter_local + url: 'http://127.0.0.1:9262/metrics' + - name: vmware_guest_exporter_local + url: 'http://127.0.0.1:9263/metrics' + - name: nest_exporter_local + url: 'http://127.0.0.1:9264/metrics' + - name: weather_exporter_local + url: 'http://127.0.0.1:9265/metrics' + - name: openhab_exporter_local + url: 'http://127.0.0.1:9266/metrics' + - name: nagios_livestatus_exporter_local + url: 'http://127.0.0.1:9267/metrics' + - name: cratedb_remote_remote_read_write_adapter_local + url: 'http://127.0.0.1:9268/metrics' + - name: fluent-agent-lite_exporter_local + url: 'http://127.0.0.1:9269/metrics' + - name: jmeter_exporter_local + url: 'http://127.0.0.1:9270/metrics' + - name: pagespeed_exporter_local + url: 'http://127.0.0.1:9271/metrics' + - name: vmware_exporter_local + url: 'http://127.0.0.1:9272/metrics' + - name: kubernetes_persistentvolume_disk_usage_exporter_local + url: 'http://127.0.0.1:9274/metrics' + - name: nrpe_exporter_local + url: 'http://127.0.0.1:9275/metrics' + - name: githubql_exporter_local + url: 'http://127.0.0.1:9276/metrics' + - name: azure_monitor_exporter_local + url: 'http://127.0.0.1:9276/metrics' + - name: mongo_collection_exporter_local + url: 'http://127.0.0.1:9277/metrics' + - name: crypto_miner_exporter_local + url: 'http://127.0.0.1:9278/metrics' + - name: instaclustr_exporter_local + url: 'http://127.0.0.1:9279/metrics' + - name: citrix_netscaler_exporter_local + url: 'http://127.0.0.1:9280/metrics' + - name: fastd_exporter_local + url: 'http://127.0.0.1:9281/metrics' + - name: freeswitch_exporter_local + url: 'http://127.0.0.1:9282/metrics' + - name: ceph_ceph-mgr_prometheus_plugin_local + url: 'http://127.0.0.1:9283/metrics' + - name: gobetween_local + url: 'http://127.0.0.1:9284/metrics' + - name: database_exporter_local + url: 'http://127.0.0.1:9285/metrics' + - name: vdo_compression_and_deduplication_exporter_local + url: 'http://127.0.0.1:9286/metrics' + - name: ceph_iscsi_gateway_statistics_local + url: 'http://127.0.0.1:9287/metrics' + - name: consrv_local + url: 'http://127.0.0.1:9288/metrics' + - name: lovoos_ipmi_exporter_local + url: 'http://127.0.0.1:9289/metrics' + - name: soundclouds_ipmi_exporter_local + url: 'http://127.0.0.1:9290/metrics' + - name: ibm_z_hmc_exporter_local + url: 'http://127.0.0.1:9291/metrics' + - name: netapp_ontap_api_exporter_local + url: 'http://127.0.0.1:9292/metrics' + - name: connection_status_exporter_local + url: 'http://127.0.0.1:9293/metrics' + - name: miflora_flower_care_exporter_local + url: 'http://127.0.0.1:9294/metrics' + - name: freifunk_exporter_local + url: 'http://127.0.0.1:9295/metrics' + - name: odbc_exporter_local + url: 'http://127.0.0.1:9296/metrics' + - name: machbase_exporter_local + url: 'http://127.0.0.1:9297/metrics' + - name: generic_exporter_local + url: 'http://127.0.0.1:9298/metrics' + - name: exporter_aggregator_local + url: 'http://127.0.0.1:9299/metrics' + - name: squid_exporter_local + url: 'http://127.0.0.1:9301/metrics' + - name: faucet_sdn_faucet_exporter_local + url: 'http://127.0.0.1:9302/metrics' + - name: faucet_sdn_gauge_exporter_local + url: 'http://127.0.0.1:9303/metrics' + - name: logstash_exporter_local + url: 'http://127.0.0.1:9304/metrics' + - name: go-ethereum_exporter_local + url: 'http://127.0.0.1:9305/metrics' + - name: kyototycoon_exporter_local + url: 'http://127.0.0.1:9306/metrics' + - name: audisto_exporter_local + url: 'http://127.0.0.1:9307/metrics' + - name: kafka_exporter_local + url: 'http://127.0.0.1:9308/metrics' + - name: fluentd_exporter_local + url: 'http://127.0.0.1:9309/metrics' + - name: open_vswitch_exporter_local + url: 'http://127.0.0.1:9310/metrics' + - name: iota_exporter_local + url: 'http://127.0.0.1:9311/metrics' + - name: cloudprober_exporter_local + url: 'http://127.0.0.1:9313/metrics' + - name: eris_exporter_local + url: 'http://127.0.0.1:9314/metrics' + - name: centrifugo_exporter_local + url: 'http://127.0.0.1:9315/metrics' + - name: tado_exporter_local + url: 'http://127.0.0.1:9316/metrics' + - name: tellstick_local_exporter_local + url: 'http://127.0.0.1:9317/metrics' + - name: conntrack_exporter_local + url: 'http://127.0.0.1:9318/metrics' + - name: flexlm_exporter_local + url: 'http://127.0.0.1:9319/metrics' + - name: consul_telemetry_exporter_local + url: 'http://127.0.0.1:9320/metrics' + - name: spring_boot_actuator_exporter_local + url: 'http://127.0.0.1:9321/metrics' + - name: haproxy_abuser_exporter_local + url: 'http://127.0.0.1:9322/metrics' + - name: docker_prometheus_metrics_local + url: 'http://127.0.0.1:9323/metrics' + - name: bird_routing_daemon_exporter_local + url: 'http://127.0.0.1:9324/metrics' + - name: ovirt_exporter_local + url: 'http://127.0.0.1:9325/metrics' + - name: junos_exporter_local + url: 'http://127.0.0.1:9326/metrics' + - name: s3_exporter_local + url: 'http://127.0.0.1:9327/metrics' + - name: openldap_syncrepl_exporter_local + url: 'http://127.0.0.1:9328/metrics' + - name: cups_exporter_local + url: 'http://127.0.0.1:9329/metrics' + - name: openldap_metrics_exporter_local + url: 'http://127.0.0.1:9330/metrics' + - name: influx-spout_prometheus_metrics_local + url: 'http://127.0.0.1:9331/metrics' + - name: network_exporter_local + url: 'http://127.0.0.1:9332/metrics' + - name: vault_pki_exporter_local + url: 'http://127.0.0.1:9333/metrics' + - name: ejabberd_exporter_local + url: 'http://127.0.0.1:9334/metrics' + - name: nexsan_exporter_local + url: 'http://127.0.0.1:9335/metrics' + - name: mediacom_internet_usage_exporter_local + url: 'http://127.0.0.1:9336/metrics' + - name: mqttgateway_local + url: 'http://127.0.0.1:9337/metrics' + - name: aws_s3_exporter_local + url: 'http://127.0.0.1:9339/metrics' + - name: financial_quotes_exporter_local + url: 'http://127.0.0.1:9340/metrics' + - name: slurm_exporter_local + url: 'http://127.0.0.1:9341/metrics' + - name: frr_exporter_local + url: 'http://127.0.0.1:9342/metrics' + - name: gridserver_exporter_local + url: 'http://127.0.0.1:9343/metrics' + - name: mqtt_exporter_local + url: 'http://127.0.0.1:9344/metrics' + - name: ruckus_smartzone_exporter_local + url: 'http://127.0.0.1:9345/metrics' + - name: ping_exporter_local + url: 'http://127.0.0.1:9346/metrics' + - name: junos_exporter_local + url: 'http://127.0.0.1:9347/metrics' + - name: bigquery_exporter_local + url: 'http://127.0.0.1:9348/metrics' + - name: configurable_elasticsearch_query_exporter_local + url: 'http://127.0.0.1:9349/metrics' + - name: thousandeyes_exporter_local + url: 'http://127.0.0.1:9350/metrics' + - name: wal-e_wal-g_exporter_local + url: 'http://127.0.0.1:9351/metrics' + - name: nature_remo_exporter_local + url: 'http://127.0.0.1:9352/metrics' + - name: ceph_exporter_local + url: 'http://127.0.0.1:9353/metrics' + - name: deluge_exporter_local + url: 'http://127.0.0.1:9354/metrics' + - name: nightwatchjs_exporter_local + url: 'http://127.0.0.1:9355/metrics' + - name: pacemaker_exporter_local + url: 'http://127.0.0.1:9356/metrics' + - name: p1_exporter_local + url: 'http://127.0.0.1:9357/metrics' + - name: performance_counters_exporter_local + url: 'http://127.0.0.1:9358/metrics' + - name: sidekiq_prometheus_local + url: 'http://127.0.0.1:9359/metrics' + - name: powershell_exporter_local + url: 'http://127.0.0.1:9360/metrics' + - name: scaleway_sd_exporter_local + url: 'http://127.0.0.1:9361/metrics' + - name: cisco_exporter_local + url: 'http://127.0.0.1:9362/metrics' + - name: clickhouse_local + url: 'http://127.0.0.1:9363/metrics' + - name: continent8_exporter_local + url: 'http://127.0.0.1:9364/metrics' + - name: cumulus_linux_exporter_local + url: 'http://127.0.0.1:9365/metrics' + - name: haproxy_stick_table_exporter_local + url: 'http://127.0.0.1:9366/metrics' + - name: teamspeak3_exporter_local + url: 'http://127.0.0.1:9367/metrics' + - name: ethereum_client_exporter_local + url: 'http://127.0.0.1:9368/metrics' + - name: prometheus_pushprox_local + url: 'http://127.0.0.1:9369/metrics' + - name: u-bmc_local + url: 'http://127.0.0.1:9370/metrics' + - name: conntrack-stats-exporter_local + url: 'http://127.0.0.1:9371/metrics' + - name: appmetrics_prometheus_local + url: 'http://127.0.0.1:9372/metrics' + - name: gcp_service_discovery_local + url: 'http://127.0.0.1:9373/metrics' + - name: smokeping_prober_local + url: 'http://127.0.0.1:9374/metrics' + - name: particle_exporter_local + url: 'http://127.0.0.1:9375/metrics' + - name: falco_local + url: 'http://127.0.0.1:9376/metrics' + - name: cisco_aci_exporter_local + url: 'http://127.0.0.1:9377/metrics' + - name: etcd_grpc_proxy_exporter_local + url: 'http://127.0.0.1:9378/metrics' + - name: etcd_exporter_local + url: 'http://127.0.0.1:9379/metrics' + - name: mythtv_exporter_local + url: 'http://127.0.0.1:9380/metrics' + - name: kafka_zookeeper_exporter_local + url: 'http://127.0.0.1:9381/metrics' + - name: frrouting_exporter_local + url: 'http://127.0.0.1:9382/metrics' + - name: aws_health_exporter_local + url: 'http://127.0.0.1:9383/metrics' + - name: aws_sqs_exporter_local + url: 'http://127.0.0.1:9384/metrics' + - name: apcupsdexporter_local + url: 'http://127.0.0.1:9385/metrics' + - name: httpd-exporter_local + url: 'http://127.0.0.1:9386/metrics' + - name: tankerkönig_api_exporter_local + url: 'http://127.0.0.1:9386/metrics' + - name: sabnzbd_exporter_local + url: 'http://127.0.0.1:9387/metrics' + - name: linode_exporter_local + url: 'http://127.0.0.1:9388/metrics' + - name: scylla-cluster-tests_exporter_local + url: 'http://127.0.0.1:9389/metrics' + - name: kannel_exporter_local + url: 'http://127.0.0.1:9390/metrics' + - name: concourse_prometheus_metrics_local + url: 'http://127.0.0.1:9391/metrics' + - name: generic_command_line_output_exporter_local + url: 'http://127.0.0.1:9392/metrics' + - name: patroni_exporter_local + url: 'http://127.0.0.1:9393/metrics' + - name: alertmanager_github_webhook_receiver_local + url: 'http://127.0.0.1:9393/metrics' + - name: ruby_prometheus_exporter_local + url: 'http://127.0.0.1:9394/metrics' + - name: ldap_exporter_local + url: 'http://127.0.0.1:9395/metrics' + - name: monerod_exporter_local + url: 'http://127.0.0.1:9396/metrics' + - name: comap_local + url: 'http://127.0.0.1:9397/metrics' + - name: open_hardware_monitor_exporter_local + url: 'http://127.0.0.1:9398/metrics' + - name: prometheus_sql_exporter_local + url: 'http://127.0.0.1:9399/metrics' + - name: ripe_atlas_exporter_local + url: 'http://127.0.0.1:9400/metrics' + - name: 1-wire_exporter_local + url: 'http://127.0.0.1:9401/metrics' + - name: google_cloud_platform_exporter_local + url: 'http://127.0.0.1:9402/metrics' + - name: zerto_exporter_local + url: 'http://127.0.0.1:9403/metrics' + - name: jmx_exporter_local + url: 'http://127.0.0.1:9404/metrics' + - name: discourse_exporter_local + url: 'http://127.0.0.1:9405/metrics' + - name: hhvm_exporter_local + url: 'http://127.0.0.1:9406/metrics' + - name: obs_studio_exporter_local + url: 'http://127.0.0.1:9407/metrics' + - name: rds_enhanced_monitoring_exporter_local + url: 'http://127.0.0.1:9408/metrics' + - name: ovn-kubernetes_master_exporter_local + url: 'http://127.0.0.1:9409/metrics' + - name: ovn-kubernetes_node_exporter_local + url: 'http://127.0.0.1:9410/metrics' + - name: softether_exporter_local + url: 'http://127.0.0.1:9411/metrics' + - name: sentry_exporter_local + url: 'http://127.0.0.1:9412/metrics' + - name: mogilefs_exporter_local + url: 'http://127.0.0.1:9413/metrics' + - name: homey_exporter_local + url: 'http://127.0.0.1:9414/metrics' + - name: cloudwatch_read_adapter_local + url: 'http://127.0.0.1:9415/metrics' + - name: hp_ilo_metrics_exporter_local + url: 'http://127.0.0.1:9416/metrics' + - name: ethtool_exporter_local + url: 'http://127.0.0.1:9417/metrics' + - name: gearman_exporter_local + url: 'http://127.0.0.1:9418/metrics' + - name: rabbitmq_exporter_local + url: 'http://127.0.0.1:9419/metrics' + - name: couchbase_exporter_local + url: 'http://127.0.0.1:9420/metrics' + - name: apicast_local + url: 'http://127.0.0.1:9421/metrics' + - name: jolokia_exporter_local + url: 'http://127.0.0.1:9422/metrics' + - name: hp_raid_exporter_local + url: 'http://127.0.0.1:9423/metrics' + - name: influxdb_stats_exporter_local + url: 'http://127.0.0.1:9424/metrics' + - name: pachyderm_exporter_local + url: 'http://127.0.0.1:9425/metrics' + - name: vespa_engine_exporter_local + url: 'http://127.0.0.1:9426/metrics' + - name: ping_exporter_local + url: 'http://127.0.0.1:9427/metrics' + - name: ssh_exporter_local + url: 'http://127.0.0.1:9428/metrics' + - name: uptimerobot_exporter_local + url: 'http://127.0.0.1:9429/metrics' + - name: corerad_local + url: 'http://127.0.0.1:9430/metrics' + - name: hpfeeds_broker_exporter_local + url: 'http://127.0.0.1:9431/metrics' + - name: windows_perflib_exporter_local + url: 'http://127.0.0.1:9432/metrics' + - name: knot_exporter_local + url: 'http://127.0.0.1:9433/metrics' + - name: opensips_exporter_local + url: 'http://127.0.0.1:9434/metrics' + - name: ebpf_exporter_local + url: 'http://127.0.0.1:9435/metrics' + - name: mikrotik-exporter_local + url: 'http://127.0.0.1:9436/metrics' + - name: dell_emc_isilon_exporter_local + url: 'http://127.0.0.1:9437/metrics' + - name: dell_emc_ecs_exporter_local + url: 'http://127.0.0.1:9438/metrics' + - name: bitcoind_exporter_local + url: 'http://127.0.0.1:9439/metrics' + - name: ravendb_exporter_local + url: 'http://127.0.0.1:9440/metrics' + - name: nomad_exporter_local + url: 'http://127.0.0.1:9441/metrics' + - name: mcrouter_exporter_local + url: 'http://127.0.0.1:9442/metrics' + - name: foundationdb_exporter_local + url: 'http://127.0.0.1:9444/metrics' + - name: nvidia_gpu_exporter_local + url: 'http://127.0.0.1:9445/metrics' + - name: orange_livebox_dsl_modem_exporter_local + url: 'http://127.0.0.1:9446/metrics' + - name: resque_exporter_local + url: 'http://127.0.0.1:9447/metrics' + - name: eventstore_exporter_local + url: 'http://127.0.0.1:9448/metrics' + - name: omeroserver_exporter_local + url: 'http://127.0.0.1:9449/metrics' + - name: habitat_exporter_local + url: 'http://127.0.0.1:9450/metrics' + - name: reindexer_exporter_local + url: 'http://127.0.0.1:9451/metrics' + - name: freebsd_jail_exporter_local + url: 'http://127.0.0.1:9452/metrics' + - name: midonet-kubernetes_local + url: 'http://127.0.0.1:9453/metrics' + - name: nvidia_smi_exporter_local + url: 'http://127.0.0.1:9454/metrics' + - name: iptables_exporter_local + url: 'http://127.0.0.1:9455/metrics' + - name: aws_lambda_exporter_local + url: 'http://127.0.0.1:9456/metrics' + - name: files_content_exporter_local + url: 'http://127.0.0.1:9457/metrics' + - name: rocketchat_exporter_local + url: 'http://127.0.0.1:9458/metrics' + - name: yarn_exporter_local + url: 'http://127.0.0.1:9459/metrics' + - name: hana_exporter_local + url: 'http://127.0.0.1:9460/metrics' + - name: aws_lambda_read_adapter_local + url: 'http://127.0.0.1:9461/metrics' + - name: php_opcache_exporter_local + url: 'http://127.0.0.1:9462/metrics' + - name: virgin_media_liberty_global_hub3_exporter_local + url: 'http://127.0.0.1:9463/metrics' + - name: opencensus-nodejs_prometheus_exporter_local + url: 'http://127.0.0.1:9464/metrics' + - name: hetzner_cloud_k8s_cloud_controller_manager_local + url: 'http://127.0.0.1:9465/metrics' + - name: mqtt_push_gateway_local + url: 'http://127.0.0.1:9466/metrics' + - name: nginx-prometheus-shiny-exporter_local + url: 'http://127.0.0.1:9467/metrics' + - name: nasa-swpc-exporter_local + url: 'http://127.0.0.1:9468/metrics' + - name: script_exporter_local + url: 'http://127.0.0.1:9469/metrics' + - name: cachet_exporter_local + url: 'http://127.0.0.1:9470/metrics' + - name: lxc-exporter_local + url: 'http://127.0.0.1:9471/metrics' + - name: hetzner_cloud_csi_driver_controller_local + url: 'http://127.0.0.1:9472/metrics' + - name: stellar-core-exporter_local + url: 'http://127.0.0.1:9473/metrics' + - name: libvirtd_exporter_local + url: 'http://127.0.0.1:9474/metrics' + - name: wgipamd_local + url: 'http://127.0.0.1:9475/metrics' + - name: ovn_metrics_exporter_local + url: 'http://127.0.0.1:9476/metrics' + - name: csp_violation_report_exporter_local + url: 'http://127.0.0.1:9477/metrics' + - name: sentinel_exporter_local + url: 'http://127.0.0.1:9478/metrics' + - name: elasticbeat_exporter_local + url: 'http://127.0.0.1:9479/metrics' + - name: brigade_exporter_local + url: 'http://127.0.0.1:9480/metrics' + - name: drbd9_exporter_local + url: 'http://127.0.0.1:9481/metrics' + - name: vector_packet_process_vpp_exporter_local + url: 'http://127.0.0.1:9482/metrics' + - name: ibm_app_connect_enterprise_exporter_local + url: 'http://127.0.0.1:9483/metrics' + - name: kubedex-exporter_local + url: 'http://127.0.0.1:9484/metrics' + - name: emarsys_exporter_local + url: 'http://127.0.0.1:9485/metrics' + - name: domoticz_exporter_local + url: 'http://127.0.0.1:9486/metrics' + - name: docker_stats_exporter_local + url: 'http://127.0.0.1:9487/metrics' + - name: bmw_connected_drive_exporter_local + url: 'http://127.0.0.1:9488/metrics' + - name: tezos_node_metrics_exporter_local + url: 'http://127.0.0.1:9489/metrics' + - name: exporter_for_docker_libnetwork_plugin_for_ovn_local + url: 'http://127.0.0.1:9490/metrics' + - name: docker_container_stats_exporter_docker_ps_local + url: 'http://127.0.0.1:9491/metrics' + - name: azure_exporter_monitor_and_usage_local + url: 'http://127.0.0.1:9492/metrics' + - name: prosafe_exporter_local + url: 'http://127.0.0.1:9493/metrics' + - name: kamailio_exporter_local + url: 'http://127.0.0.1:9494/metrics' + - name: ingestor_exporter_local + url: 'http://127.0.0.1:9495/metrics' + - name: 389ds_ipa_exporter_local + url: 'http://127.0.0.1:9496/metrics' + - name: immudb_exporter_local + url: 'http://127.0.0.1:9497/metrics' + - name: tp-link_hs110_exporter_local + url: 'http://127.0.0.1:9498/metrics' + - name: smartthings_exporter_local + url: 'http://127.0.0.1:9499/metrics' + - name: cassandra_exporter_local + url: 'http://127.0.0.1:9500/metrics' + - name: hetznercloud_exporter_local + url: 'http://127.0.0.1:9501/metrics' + - name: hetzner_exporter_local + url: 'http://127.0.0.1:9502/metrics' + - name: scaleway_exporter_local + url: 'http://127.0.0.1:9503/metrics' + - name: github_exporter_local + url: 'http://127.0.0.1:9504/metrics' + - name: dockerhub_exporter_local + url: 'http://127.0.0.1:9505/metrics' + - name: jenkins_exporter_local + url: 'http://127.0.0.1:9506/metrics' + - name: owncloud_exporter_local + url: 'http://127.0.0.1:9507/metrics' + - name: ccache_exporter_local + url: 'http://127.0.0.1:9508/metrics' + - name: hetzner_storagebox_exporter_local + url: 'http://127.0.0.1:9509/metrics' + - name: dummy_exporter_local + url: 'http://127.0.0.1:9510/metrics' + - name: cloudera_exporter_local + url: 'http://127.0.0.1:9512/metrics' + - name: openconfig_streaming_telemetry_exporter_local + url: 'http://127.0.0.1:9513/metrics' + - name: app_stores_exporter_local + url: 'http://127.0.0.1:9514/metrics' + - name: swarm-exporter_local + url: 'http://127.0.0.1:9515/metrics' + - name: prometheus_speedtest_exporter_local + url: 'http://127.0.0.1:9516/metrics' + - name: matroschka_prober_local + url: 'http://127.0.0.1:9517/metrics' + - name: crypto_stock_exchanges_funds_exporter_local + url: 'http://127.0.0.1:9518/metrics' + - name: acurite_exporter_local + url: 'http://127.0.0.1:9519/metrics' + - name: swift_health_exporter_local + url: 'http://127.0.0.1:9520/metrics' + - name: ruuvi_exporter_local + url: 'http://127.0.0.1:9521/metrics' + - name: tftp_exporter_local + url: 'http://127.0.0.1:9522/metrics' + - name: 3cx_exporter_local + url: 'http://127.0.0.1:9523/metrics' + - name: loki_exporter_local + url: 'http://127.0.0.1:9524/metrics' + - name: alibaba_cloud_exporter_local + url: 'http://127.0.0.1:9525/metrics' + - name: kafka_lag_exporter_local + url: 'http://127.0.0.1:9526/metrics' + - name: netgear_cable_modem_exporter_local + url: 'http://127.0.0.1:9527/metrics' + - name: total_connect_comfort_exporter_local + url: 'http://127.0.0.1:9528/metrics' + - name: octoprint_exporter_local + url: 'http://127.0.0.1:9529/metrics' + - name: custom_prometheus_exporter_local + url: 'http://127.0.0.1:9530/metrics' + - name: jfrog_artifactory_exporter_local + url: 'http://127.0.0.1:9531/metrics' + - name: snyk_exporter_local + url: 'http://127.0.0.1:9532/metrics' + - name: network_exporter_for_cisco_api_local + url: 'http://127.0.0.1:9533/metrics' + - name: humio_exporter_local + url: 'http://127.0.0.1:9534/metrics' + - name: cron_exporter_local + url: 'http://127.0.0.1:9535/metrics' + - name: ipsec_exporter_local + url: 'http://127.0.0.1:9536/metrics' + - name: cri-o_local + url: 'http://127.0.0.1:9537/metrics' + - name: bull_queue_local + url: 'http://127.0.0.1:9538/metrics' + - name: modemmanager_exporter_local + url: 'http://127.0.0.1:9539/metrics' + - name: emq_exporter_local + url: 'http://127.0.0.1:9540/metrics' + - name: smartmon_exporter_local + url: 'http://127.0.0.1:9541/metrics' + - name: sakuracloud_exporter_local + url: 'http://127.0.0.1:9542/metrics' + - name: kube2iam_exporter_local + url: 'http://127.0.0.1:9543/metrics' + - name: pgio_exporter_local + url: 'http://127.0.0.1:9544/metrics' + - name: hp_ilo4_exporter_local + url: 'http://127.0.0.1:9545/metrics' + - name: pwrstat-exporter_local + url: 'http://127.0.0.1:9546/metrics' + - name: patroni_exporter_local + url: 'http://127.0.0.1:9547/metrics' + - name: trafficserver_exporter_local + url: 'http://127.0.0.1:9548/metrics' + - name: raspberry_exporter_local + url: 'http://127.0.0.1:9549/metrics' + - name: rtl_433_exporter_local + url: 'http://127.0.0.1:9550/metrics' + - name: hostapd_exporter_local + url: 'http://127.0.0.1:9551/metrics' + - name: alpine_apk_exporter_local + url: 'http://127.0.0.1:9552/metrics' + - name: aws_elastic_beanstalk_exporter_local + url: 'http://127.0.0.1:9552/metrics' + - name: apt_exporter_local + url: 'http://127.0.0.1:9553/metrics' + - name: acc_server_manager_exporter_local + url: 'http://127.0.0.1:9554/metrics' + - name: sona_exporter_local + url: 'http://127.0.0.1:9555/metrics' + - name: routinator_exporter_local + url: 'http://127.0.0.1:9556/metrics' + - name: mysql_count_exporter_local + url: 'http://127.0.0.1:9557/metrics' + - name: systemd_exporter_local + url: 'http://127.0.0.1:9558/metrics' + - name: ntp_exporter_local + url: 'http://127.0.0.1:9559/metrics' + - name: sql_queries_exporter_local + url: 'http://127.0.0.1:9560/metrics' + - name: qbittorrent_exporter_local + url: 'http://127.0.0.1:9561/metrics' + - name: ptv_xserver_exporter_local + url: 'http://127.0.0.1:9562/metrics' + - name: kibana_exporter_local + url: 'http://127.0.0.1:9563/metrics' + - name: purpleair_exporter_local + url: 'http://127.0.0.1:9564/metrics' + - name: bminer_exporter_local + url: 'http://127.0.0.1:9565/metrics' + - name: rabbitmq_cli_consumer_local + url: 'http://127.0.0.1:9566/metrics' + - name: alertsnitch_local + url: 'http://127.0.0.1:9567/metrics' + - name: dell_poweredge_ipmi_exporter_local + url: 'http://127.0.0.1:9568/metrics' + - name: hvpa_controller_local + url: 'http://127.0.0.1:9569/metrics' + - name: vpa_exporter_local + url: 'http://127.0.0.1:9570/metrics' + - name: helm_exporter_local + url: 'http://127.0.0.1:9571/metrics' + - name: ctld_exporter_local + url: 'http://127.0.0.1:9572/metrics' + - name: jkstatus_exporter_local + url: 'http://127.0.0.1:9573/metrics' + - name: opentracker_exporter_local + url: 'http://127.0.0.1:9574/metrics' + - name: poweradmin_server_monitor_exporter_local + url: 'http://127.0.0.1:9575/metrics' + - name: exabgp_exporter_local + url: 'http://127.0.0.1:9576/metrics' + - name: aria2_exporter_local + url: 'http://127.0.0.1:9578/metrics' + - name: iperf3_exporter_local + url: 'http://127.0.0.1:9579/metrics' + - name: azure_service_bus_exporter_local + url: 'http://127.0.0.1:9580/metrics' + - name: codenotary_vcn_exporter_local + url: 'http://127.0.0.1:9581/metrics' + - name: signatory_a_remote_operation_signer_for_tezos_local + url: 'http://127.0.0.1:9583/metrics' + - name: bunnycdn_exporter_local + url: 'http://127.0.0.1:9584/metrics' + - name: opvizor_performance_analyzer_process_exporter_local + url: 'http://127.0.0.1:9585/metrics' + - name: nfs-ganesha_exporter_local + url: 'http://127.0.0.1:9587/metrics' + - name: ltsv-tailer_exporter_local + url: 'http://127.0.0.1:9588/metrics' + - name: goflow_exporter_local + url: 'http://127.0.0.1:9589/metrics' + - name: flow_exporter_local + url: 'http://127.0.0.1:9590/metrics' + - name: srcds_exporter_local + url: 'http://127.0.0.1:9591/metrics' + - name: gcp_quota_exporter_local + url: 'http://127.0.0.1:9592/metrics' + - name: lighthouse_exporter_local + url: 'http://127.0.0.1:9593/metrics' + - name: plex_exporter_local + url: 'http://127.0.0.1:9594/metrics' + - name: netio_exporter_local + url: 'http://127.0.0.1:9595/metrics' + - name: azure_elastic_sql_exporter_local + url: 'http://127.0.0.1:9596/metrics' + - name: github_vulnerability_alerts_exporter_local + url: 'http://127.0.0.1:9597/metrics' + - name: pirograph_exporter_local + url: 'http://127.0.0.1:9599/metrics' + - name: circleci_exporter_local + url: 'http://127.0.0.1:9600/metrics' + - name: messagebird_exporter_local + url: 'http://127.0.0.1:9601/metrics' + - name: modbus_exporter_local + url: 'http://127.0.0.1:9602/metrics' + - name: xen_exporter_using_xenlight_local + url: 'http://127.0.0.1:9603/metrics' + - name: xmpp_blackbox_exporter_local + url: 'http://127.0.0.1:9604/metrics' + - name: fping-exporter_local + url: 'http://127.0.0.1:9605/metrics' + - name: ecr-exporter_local + url: 'http://127.0.0.1:9606/metrics' + - name: raspberry_pi_sense_hat_exporter_local + url: 'http://127.0.0.1:9607/metrics' + - name: ironic_prometheus_exporter_local + url: 'http://127.0.0.1:9608/metrics' + - name: netapp_exporter_local + url: 'http://127.0.0.1:9609/metrics' + - name: kubernetes_exporter_local + url: 'http://127.0.0.1:9610/metrics' + - name: speedport_exporter_local + url: 'http://127.0.0.1:9611/metrics' + - name: opflex-agent_exporter_local + url: 'http://127.0.0.1:9612/metrics' + - name: azure_health_exporter_local + url: 'http://127.0.0.1:9613/metrics' + - name: nut_upsc_exporter_local + url: 'http://127.0.0.1:9614/metrics' + - name: mellanox_mlx5_exporter_local + url: 'http://127.0.0.1:9615/metrics' + - name: mailgun_exporter_local + url: 'http://127.0.0.1:9616/metrics' + - name: pi-hole_exporter_local + url: 'http://127.0.0.1:9617/metrics' + - name: stellar-account-exporter_local + url: 'http://127.0.0.1:9618/metrics' + - name: stellar-horizon-exporter_local + url: 'http://127.0.0.1:9619/metrics' + - name: rundeck_exporter_local + url: 'http://127.0.0.1:9620/metrics' + - name: opennebula_exporter_local + url: 'http://127.0.0.1:9621/metrics' + - name: bmc_exporter_local + url: 'http://127.0.0.1:9622/metrics' + - name: tc4400_exporter_local + url: 'http://127.0.0.1:9623/metrics' + - name: pact_broker_exporter_local + url: 'http://127.0.0.1:9624/metrics' + - name: bareos_exporter_local + url: 'http://127.0.0.1:9625/metrics' + - name: hockeypuck_local + url: 'http://127.0.0.1:9626/metrics' + - name: artifactory_exporter_local + url: 'http://127.0.0.1:9627/metrics' + - name: solace_pubsub_plus_exporter_local + url: 'http://127.0.0.1:9628/metrics' + - name: prometheus_gitlab_notifier_local + url: 'http://127.0.0.1:9629/metrics' + - name: nftables_exporter_local + url: 'http://127.0.0.1:9630/metrics' + - name: a_op5_monitor_exporter_local + url: 'http://127.0.0.1:9631/metrics' + - name: opflex-server_exporter_local + url: 'http://127.0.0.1:9632/metrics' + - name: smartctl_exporter_local + url: 'http://127.0.0.1:9633/metrics' + - name: aerospike_ttl_exporter_local + url: 'http://127.0.0.1:9634/metrics' + - name: fail2ban_exporter_local + url: 'http://127.0.0.1:9635/metrics' + - name: exim4_exporter_local + url: 'http://127.0.0.1:9636/metrics' + - name: kubeversion_exporter_local + url: 'http://127.0.0.1:9637/metrics' + - name: a_icinga2_exporter_local + url: 'http://127.0.0.1:9638/metrics' + - name: scriptable_jmx_exporter_local + url: 'http://127.0.0.1:9639/metrics' + - name: logstash_output_exporter_local + url: 'http://127.0.0.1:9640/metrics' + - name: coturn_exporter_local + url: 'http://127.0.0.1:9641/metrics' + - name: bugsnag_exporter_local + url: 'http://127.0.0.1:9642/metrics' + - name: exporter_for_grouped_process_local + url: 'http://127.0.0.1:9644/metrics' + - name: burp_exporter_local + url: 'http://127.0.0.1:9645/metrics' + - name: locust_exporter_local + url: 'http://127.0.0.1:9646/metrics' + - name: docker_exporter_local + url: 'http://127.0.0.1:9647/metrics' + - name: ntpmon_exporter_local + url: 'http://127.0.0.1:9648/metrics' + - name: logstash_exporter_local + url: 'http://127.0.0.1:9649/metrics' + - name: keepalived_exporter_local + url: 'http://127.0.0.1:9650/metrics' + - name: storj_exporter_local + url: 'http://127.0.0.1:9651/metrics' + - name: praefect_exporter_local + url: 'http://127.0.0.1:9652/metrics' + - name: jira_issues_exporter_local + url: 'http://127.0.0.1:9653/metrics' + - name: ansible_galaxy_exporter_local + url: 'http://127.0.0.1:9654/metrics' + - name: kube-netc_exporter_local + url: 'http://127.0.0.1:9655/metrics' + - name: matrix_local + url: 'http://127.0.0.1:9656/metrics' + - name: krill_exporter_local + url: 'http://127.0.0.1:9657/metrics' + - name: sap_hana_sql_exporter_local + url: 'http://127.0.0.1:9658/metrics' + - name: kaiterra_laser_egg_exporter_local + url: 'http://127.0.0.1:9660/metrics' + - name: hashpipe_exporter_local + url: 'http://127.0.0.1:9661/metrics' + - name: pms5003_particulate_matter_sensor_exporter_local + url: 'http://127.0.0.1:9662/metrics' + - name: sap_nwrfc_exporter_local + url: 'http://127.0.0.1:9663/metrics' + - name: linux_ha_clusterlabs_exporter_local + url: 'http://127.0.0.1:9664/metrics' + - name: senderscore_exporter_local + url: 'http://127.0.0.1:9665/metrics' + - name: alertmanager_silences_exporter_local + url: 'http://127.0.0.1:9666/metrics' + - name: smtpd_exporter_local + url: 'http://127.0.0.1:9667/metrics' + - name: suses_sap_hana_exporter_local + url: 'http://127.0.0.1:9668/metrics' + - name: panopticon_native_metrics_local + url: 'http://127.0.0.1:9669/metrics' + - name: flare_native_metrics_local + url: 'http://127.0.0.1:9670/metrics' + - name: aws_ec2_spot_exporter_local + url: 'http://127.0.0.1:9671/metrics' + - name: aircontrol_co2_exporter_local + url: 'http://127.0.0.1:9672/metrics' + - name: co2_monitor_exporter_local + url: 'http://127.0.0.1:9673/metrics' + - name: google_analytics_exporter_local + url: 'http://127.0.0.1:9674/metrics' + - name: docker_swarm_exporter_local + url: 'http://127.0.0.1:9675/metrics' + - name: hetzner_traffic_exporter_local + url: 'http://127.0.0.1:9676/metrics' + - name: aws_ecs_exporter_local + url: 'http://127.0.0.1:9677/metrics' + - name: ircd_user_exporter_local + url: 'http://127.0.0.1:9678/metrics' + - name: aws_health_exporter_local + url: 'http://127.0.0.1:9679/metrics' + - name: suses_sap_host_exporter_local + url: 'http://127.0.0.1:9680/metrics' + - name: myfitnesspal_exporter_local + url: 'http://127.0.0.1:9681/metrics' + - name: powder_monkey_local + url: 'http://127.0.0.1:9682/metrics' + - name: infiniband_exporter_local + url: 'http://127.0.0.1:9683/metrics' + - name: kibana_standalone_exporter_local + url: 'http://127.0.0.1:9684/metrics' + - name: eideticom_local + url: 'http://127.0.0.1:9685/metrics' + - name: aws_ec2_exporter_local + url: 'http://127.0.0.1:9686/metrics' + - name: gitaly_blackbox_exporter_local + url: 'http://127.0.0.1:9687/metrics' + - name: lan_server_modbus_exporter_local + url: 'http://127.0.0.1:9689/metrics' + - name: tcp_longterm_connection_exporter_local + url: 'http://127.0.0.1:9690/metrics' + - name: celery_redis_exporter_local + url: 'http://127.0.0.1:9691/metrics' + - name: gcp_gce_exporter_local + url: 'http://127.0.0.1:9692/metrics' + - name: sigma_air_manager_exporter_local + url: 'http://127.0.0.1:9693/metrics' + - name: per-user_usage_exporter_for_cisco_xe_lnss_local + url: 'http://127.0.0.1:9694/metrics' + - name: cifs_exporter_local + url: 'http://127.0.0.1:9695/metrics' + - name: jitsi_videobridge_exporter_local + url: 'http://127.0.0.1:9696/metrics' + - name: tendermint_blockchain_exporter_local + url: 'http://127.0.0.1:9697/metrics' + - name: integrated_dell_remote_access_controller_idrac_exporter_local + url: 'http://127.0.0.1:9698/metrics' + - name: pyncette_exporter_local + url: 'http://127.0.0.1:9699/metrics' + - name: jitsi_meet_exporter_local + url: 'http://127.0.0.1:9700/metrics' + - name: workbook_exporter_local + url: 'http://127.0.0.1:9701/metrics' + - name: homeplug_plc_exporter_local + url: 'http://127.0.0.1:9702/metrics' + - name: vircadia_local + url: 'http://127.0.0.1:9703/metrics' + - name: linux_tc_exporter_local + url: 'http://127.0.0.1:9704/metrics' + - name: upc_connect_box_exporter_local + url: 'http://127.0.0.1:9705/metrics' + - name: postfix_exporter_local + url: 'http://127.0.0.1:9706/metrics' + - name: radarr_exporter_local + url: 'http://127.0.0.1:9707/metrics' + - name: sonarr_exporter_local + url: 'http://127.0.0.1:9708/metrics' + - name: hadoop_hdfs_fsimage_exporter_local + url: 'http://127.0.0.1:9709/metrics' + - name: nut-exporter_local + url: 'http://127.0.0.1:9710/metrics' + - name: cloudflare_flan_scan_report_exporter_local + url: 'http://127.0.0.1:9711/metrics' + - name: siemens_s7_plc_exporter_local + url: 'http://127.0.0.1:9712/metrics' + - name: glusterfs_exporter_local + url: 'http://127.0.0.1:9713/metrics' + - name: fritzbox_exporter_local + url: 'http://127.0.0.1:9714/metrics' + - name: twincat_ads_web_service_exporter_local + url: 'http://127.0.0.1:9715/metrics' + - name: signald_webhook_receiver_local + url: 'http://127.0.0.1:9716/metrics' + - name: tplink_easysmart_switch_exporter_local + url: 'http://127.0.0.1:9717/metrics' + - name: warp10_exporter_local + url: 'http://127.0.0.1:9718/metrics' + - name: pgpool-ii_exporter_local + url: 'http://127.0.0.1:9719/metrics' + - name: moodle_db_exporter_local + url: 'http://127.0.0.1:9720/metrics' + - name: gtp_exporter_local + url: 'http://127.0.0.1:9721/metrics' + - name: miele_exporter_local + url: 'http://127.0.0.1:9722/metrics' + - name: freeswitch_exporter_local + url: 'http://127.0.0.1:9724/metrics' + - name: sunnyboy_exporter_local + url: 'http://127.0.0.1:9725/metrics' + - name: python_rq_exporter_local + url: 'http://127.0.0.1:9726/metrics' + - name: ctdb_exporter_local + url: 'http://127.0.0.1:9727/metrics' + - name: nginx-rtmp_exporter_local + url: 'http://127.0.0.1:9728/metrics' + - name: libvirtd_exporter_local + url: 'http://127.0.0.1:9729/metrics' + - name: lynis_exporter_local + url: 'http://127.0.0.1:9730/metrics' + - name: nebula_mam_exporter_local + url: 'http://127.0.0.1:9731/metrics' + - name: nftables_exporter_local + url: 'http://127.0.0.1:9732/metrics' + - name: honeypot_exporter_local + url: 'http://127.0.0.1:9733/metrics' + - name: a10-networks_prometheus_exporter_local + url: 'http://127.0.0.1:9734/metrics' + - name: webweaver_local + url: 'http://127.0.0.1:9735/metrics' + - name: mongodb_query_exporter_local + url: 'http://127.0.0.1:9736/metrics' + - name: folding_home_exporter_local + url: 'http://127.0.0.1:9737/metrics' + - name: processor_counter_monitor_exporter_local + url: 'http://127.0.0.1:9738/metrics' + - name: kafka_consumer_lag_monitoring_local + url: 'http://127.0.0.1:9739/metrics' + - name: flightdeck_local + url: 'http://127.0.0.1:9740/metrics' + - name: ibm_spectrum_exporter_local + url: 'http://127.0.0.1:9741/metrics' + - name: transmission-exporter_local + url: 'http://127.0.0.1:9742/metrics' + - name: sma-exporter_local + url: 'http://127.0.0.1:9743/metrics' + - name: site24x7_exporter_local + url: 'http://127.0.0.1:9803/metrics' + - name: envoy_proxy_local + url: 'http://127.0.0.1:9901/metrics' + - name: nginx_vts_exporter_local + url: 'http://127.0.0.1:9913/metrics' + - name: login_exporter_local + url: 'http://127.0.0.1:9980/metrics' + - name: filestat_exporter_local + url: 'http://127.0.0.1:9943/metrics' + - name: sia_exporter_local + url: 'http://127.0.0.1:9983/metrics' + - name: couchdb_exporter_local + url: 'http://127.0.0.1:9984/metrics' + - name: netapp_solidfire_exporter_local + url: 'http://127.0.0.1:9987/metrics' + - name: wildfly_exporter_local + url: 'http://127.0.0.1:9990/metrics' + - name: prometheus-jdbc-exporter_local + url: 'http://127.0.0.1:5555/metrics' + - name: midonet_agent_local + url: 'http://127.0.0.1:7300/metrics' + - name: traefik_local + url: 'http://127.0.0.1:8080/metrics' + expected_prefix: 'traefik_' + - name: trickster_local + url: 'http://127.0.0.1:8082/metrics' + - name: fawkes_local + url: 'http://127.0.0.1:8088/metrics' + - name: prom2teams_local + url: 'http://127.0.0.1:8089/metrics' + - name: phabricator_webhook_for_alertmanager_local + url: 'http://127.0.0.1:8292/metrics' + - name: ha_proxy_v2_plus_local + url: 'http://127.0.0.1:8404/metrics' + - name: rds_exporter_local + url: 'http://127.0.0.1:9042/metrics' + - name: telegram_bot_for_alertmanager_local + url: 'http://127.0.0.1:9087/metrics' + - name: jiralert_local + url: 'http://127.0.0.1:9097/metrics' + - name: storidge_exporter_local + url: 'http://127.0.0.1:16995/metrics' + - name: transmission_exporter_local + url: 'http://127.0.0.1:19091/metrics' + - name: fluent_plugin_for_prometheus_local + url: 'http://127.0.0.1:24231/metrics' + - name: proxysql_exporter_local + url: 'http://127.0.0.1:42004/metrics' + - name: pcp_exporter_local + url: 'http://127.0.0.1:44323/metrics' + - name: dcos_exporter_local + url: 'http://127.0.0.1:61091/metrics' + - name: caddy_local + url: 'http://localhost:2019/metrics' + expected_prefix: 'caddy_' + # Run Geth with --metrics flag. + # Docs: https://geth.ethereum.org/docs/interface/metrics + - name: geth_local + url: 'http://127.0.0.1:6060/debug/metrics/prometheus' + expected_prefix: 'eth_' + # Run OpenEthereum with --metrics flag. + # Docs: https://openethereum.github.io/Configuring-OpenEthereum.html?q=metrics-interface + - name: openethereum_local + url: 'http://127.0.0.1:3000/metrics' + expected_prefix: 'blockchaincache_' + - name: pushgateway_local + url: 'http://127.0.0.1:9091/metrics' + expected_prefix: 'pushgateway_' + selector: + allow: + - pushgateway_* + # Run Nethermind with --Metrics.Enabled true. + # Docs: https://docs.nethermind.io/nethermind/ethereum-client/metrics/setting-up-local-metrics-infrastracture + - name: nethermind_local + url: 'http://127.0.0.1:9091/metrics' + expected_prefix: 'nethermind_' + selector: + allow: + - nethermind* + # Run Besu with --metrics-enabled flag. + # Docs: https://besu.hyperledger.org/en/stable/HowTo/Monitor/Metrics/ + - name: besu_local + url: '127.0.0.1:9545' + expected_prefix: 'besu_' + - name: crowdsec_local + url: http://127.0.0.1:6060/metrics + expected_prefix: 'cs_' diff --git a/src/go/collectors/go.d.plugin/config/go.d/proxysql.conf b/src/go/collectors/go.d.plugin/config/go.d/proxysql.conf new file mode 100644 index 00000000000000..100f21384035b5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/proxysql.conf @@ -0,0 +1,19 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/proxysql + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + # my.cnf + - name: local + my.cnf: '/etc/my.cnf' + + # stats + - name: local + dsn: stats:stats@tcp(127.0.0.1:6032)/ + + - name: local + dsn: stats:stats@tcp([::1]:6032)/ + diff --git a/src/go/collectors/go.d.plugin/config/go.d/pulsar.conf b/src/go/collectors/go.d.plugin/config/go.d/pulsar.conf new file mode 100644 index 00000000000000..147c8e1846b8cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/pulsar.conf @@ -0,0 +1,11 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/pulsar + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8080/metrics + diff --git a/src/go/collectors/go.d.plugin/config/go.d/rabbitmq.conf b/src/go/collectors/go.d.plugin/config/go.d/rabbitmq.conf new file mode 100644 index 00000000000000..9b1db9f5c99da7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/rabbitmq.conf @@ -0,0 +1,19 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/rabbitmq + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:15672 + username: guest + password: guest + collect_queues_metrics: no + + - name: local + url: http://127.0.0.1:15672 + username: guest + password: guest + collect_queues_metrics: no diff --git a/src/go/collectors/go.d.plugin/config/go.d/redis.conf b/src/go/collectors/go.d.plugin/config/go.d/redis.conf new file mode 100644 index 00000000000000..67ea04a8a49587 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/redis.conf @@ -0,0 +1,19 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/redis + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 'redis://@127.0.0.1:6379' + + - name: local + address: 'unix://@/tmp/redis.sock' + + - name: local + address: 'unix://@/var/run/redis/redis.sock' + + - name: local + address: 'unix://@/var/lib/redis/redis.sock' diff --git a/src/go/collectors/go.d.plugin/config/go.d/scaleio.conf b/src/go/collectors/go.d.plugin/config/go.d/scaleio.conf new file mode 100644 index 00000000000000..7206bab81ff720 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/scaleio.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio + +#update_every : 1 +#autodetection_retry : 0 +#priority : 70000 + +#jobs: +# - name : local +# url : https://127.0.0.1 +# username : admin +# password : password + +# - name : remote +# url : https://100.64.0.1 +# username : admin +# password : password diff --git a/src/go/collectors/go.d.plugin/config/go.d/snmp.conf b/src/go/collectors/go.d.plugin/config/go.d/snmp.conf new file mode 100644 index 00000000000000..dc4da60f6529cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/snmp.conf @@ -0,0 +1,52 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/snmp + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: switch +# update_every: 10 +# hostname: "192.0.2.1" +# community: public +# options: +# version: 2 +# user: +# name: "username" +# level: "authPriv" +# auth_proto: "sha256" +# auth_key: "auth_protocol_passphrase" +# priv_proto: "aes256" +# priv_key: "priv_protocol_passphrase" +# charts: +# - id: "bandwidth_port1" +# title: "Switch Bandwidth for port 1" +# units: "kilobits/s" +# type: "area" +# family: "ports" +# dimensions: +# - name: "in" +# oid: "1.3.6.1.2.1.2.2.1.10.1" +# algorithm: "incremental" +# multiplier: 8 +# divisor: 1000 +# - name: "out" +# oid: "1.3.6.1.2.1.2.2.1.16.1" +# multiplier: -8 +# divisor: 1000 +# - id: "bandwidth_port2" +# title: "Switch Bandwidth for port 2" +# units: "kilobits/s" +# type: "area" +# family: "ports" +# dimensions: +# - name: "in" +# oid: "1.3.6.1.2.1.2.2.1.10.2" +# algorithm: "incremental" +# multiplier: 8 +# divisor: 1000 +# - name: "out" +# oid: "1.3.6.1.2.1.2.2.1.16.2" +# multiplier: -8 +# divisor: 1000 diff --git a/src/go/collectors/go.d.plugin/config/go.d/solr.conf b/src/go/collectors/go.d.plugin/config/go.d/solr.conf new file mode 100644 index 00000000000000..c0cc7d095ef9d9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/solr.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/solr + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:8983 + + - name: local + url: http://127.0.0.1:8983 diff --git a/src/go/collectors/go.d.plugin/config/go.d/springboot2.conf b/src/go/collectors/go.d.plugin/config/go.d/springboot2.conf new file mode 100644 index 00000000000000..6328bcc579f614 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/springboot2.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2 + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost:8080/actuator/prometheus + + - name: local + url: http://127.0.0.1:8080/actuator/prometheus diff --git a/src/go/collectors/go.d.plugin/config/go.d/squidlog.conf b/src/go/collectors/go.d.plugin/config/go.d/squidlog.conf new file mode 100644 index 00000000000000..5f70ff5101addd --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/squidlog.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/squidlog + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: squidlog + path: /var/log/squid/access.log + + - name: squidlog + path: /var/log/squid3/access.log diff --git a/src/go/collectors/go.d.plugin/config/go.d/supervisord.conf b/src/go/collectors/go.d.plugin/config/go.d/supervisord.conf new file mode 100644 index 00000000000000..ef5e929fe7b842 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/supervisord.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/supervisord + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: 'http://127.0.0.1:9001/RPC2' + + - name: local + url: 'unix:///run/supervisor.sock' diff --git a/src/go/collectors/go.d.plugin/config/go.d/systemdunits.conf b/src/go/collectors/go.d.plugin/config/go.d/systemdunits.conf new file mode 100644 index 00000000000000..36507fd0545758 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/systemdunits.conf @@ -0,0 +1,20 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits + +#update_every: 10 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: service-units + include: + - '*.service' + +# - name: my-specific-service-unit +# include: +# - 'my-specific.service' +# + +# - name: socket-units +# include: +# - '*.socket' diff --git a/src/go/collectors/go.d.plugin/config/go.d/tengine.conf b/src/go/collectors/go.d.plugin/config/go.d/tengine.conf new file mode 100644 index 00000000000000..33bbdd6b6e9057 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/tengine.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/tengine + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://localhost/us + + - name: local + url: http://127.0.0.1/us diff --git a/src/go/collectors/go.d.plugin/config/go.d/traefik.conf b/src/go/collectors/go.d.plugin/config/go.d/traefik.conf new file mode 100644 index 00000000000000..f0be8baf72963b --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/traefik.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/traefik + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8082/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/unbound.conf b/src/go/collectors/go.d.plugin/config/go.d/unbound.conf new file mode 100644 index 00000000000000..ac3cd4042327fa --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/unbound.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/unbound + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 127.0.0.1:8953 + timeout: 1 + conf_path: /etc/unbound/unbound.conf + cumulative_stats: no + use_tls: yes + tls_skip_verify: yes + tls_cert: /etc/unbound/unbound_control.pem + tls_key: /etc/unbound/unbound_control.key diff --git a/src/go/collectors/go.d.plugin/config/go.d/upsd.conf b/src/go/collectors/go.d.plugin/config/go.d/upsd.conf new file mode 100644 index 00000000000000..87a5462003dd1d --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/upsd.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/upsd + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: upsd + address: 127.0.0.1:3493 diff --git a/src/go/collectors/go.d.plugin/config/go.d/vcsa.conf b/src/go/collectors/go.d.plugin/config/go.d/vcsa.conf new file mode 100644 index 00000000000000..0a7a2e55febd3e --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/vcsa.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/vcsa + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name : vcsa1 +# url : https://203.0.113.0 +# username : admin@vsphere.local +# password : somepassword +# +# - name : vcsa2 +# url : https://203.0.113.10 +# username : admin@vsphere.local +# password : somepassword diff --git a/src/go/collectors/go.d.plugin/config/go.d/vernemq.conf b/src/go/collectors/go.d.plugin/config/go.d/vernemq.conf new file mode 100644 index 00000000000000..55877f7076422c --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/vernemq.conf @@ -0,0 +1,10 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/vernemq + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + url: http://127.0.0.1:8888/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/vsphere.conf b/src/go/collectors/go.d.plugin/config/go.d/vsphere.conf new file mode 100644 index 00000000000000..e3a6c7f1a8882c --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/vsphere.conf @@ -0,0 +1,17 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/vsphere + +#update_every : 20 # do not decrease the value, vmware real-time stats generated at the 20-seconds specificity. +#autodetection_retry : 0 +#priority : 70000 + +#jobs: +# - name : vcenter1 +# url : https://203.0.113.0 +# username : admin@vsphere.local +# password : somepassword +# +# - name : vcenter2 +# url : https://203.0.113.10 +# username : admin@vsphere.local +# password : somepassword diff --git a/src/go/collectors/go.d.plugin/config/go.d/web_log.conf b/src/go/collectors/go.d.plugin/config/go.d/web_log.conf new file mode 100644 index 00000000000000..60ea342ff0d2b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/web_log.conf @@ -0,0 +1,48 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/web_log + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + # NGINX + # debian, arch + - name: nginx + path: /var/log/nginx/access.log + + # gentoo + - name: nginx + path: /var/log/nginx/localhost.access_log + + # APACHE + # debian + - name: apache + path: /var/log/apache2/access.log + + # gentoo + - name: apache + path: /var/log/apache2/access_log + + # arch + - name: apache + path: /var/log/httpd/access_log + + # debian + - name: apache_vhosts + path: /var/log/apache2/other_vhosts_access.log + + # GUNICORN + - name: gunicorn + path: /var/log/gunicorn/access.log + + - name: gunicorn + path: /var/log/gunicorn/gunicorn-access.log + + # IIS + # This configuration assumes you are running netdata on WSL + - name: iis + path: /mnt/c/inetpub/logs/LogFiles/W3SVC1/u_ex*.log + log_type: csv + csv_config: + format: '- - $host $request_method $request_uri - $server_port - $remote_addr - - $status - - $request_time' diff --git a/src/go/collectors/go.d.plugin/config/go.d/whoisquery.conf b/src/go/collectors/go.d.plugin/config/go.d/whoisquery.conf new file mode 100644 index 00000000000000..47e1f0de61e32b --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/whoisquery.conf @@ -0,0 +1,11 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/whoisquery + +#update_every: 60 +#autodetection_retry: 0 +#priority: 70000 +# + +# jobs: +# - name: example +# source: example.org diff --git a/src/go/collectors/go.d.plugin/config/go.d/windows.conf b/src/go/collectors/go.d.plugin/config/go.d/windows.conf new file mode 100644 index 00000000000000..8a394f356e9f71 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/windows.conf @@ -0,0 +1,19 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/windows + +#update_every: 5 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: hostname.local + url: http://hostname.local:9182/metrics + + - name: hostname.local + url: http://127.0.0.1:9182/metrics + +# - name: win_server1 +# url: http://10.0.0.1:9182/metrics +# +# - name: win_server2 +# url: http://10.0.0.2:9182/metrics diff --git a/src/go/collectors/go.d.plugin/config/go.d/wireguard.conf b/src/go/collectors/go.d.plugin/config/go.d/wireguard.conf new file mode 100644 index 00000000000000..c58d846b2caa4f --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/wireguard.conf @@ -0,0 +1,9 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/wireguard + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: wireguard diff --git a/src/go/collectors/go.d.plugin/config/go.d/x509check.conf b/src/go/collectors/go.d.plugin/config/go.d/x509check.conf new file mode 100644 index 00000000000000..ba9538a3dadd17 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/x509check.conf @@ -0,0 +1,16 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/x509check + +#update_every: 60 +#autodetection_retry: 0 +#priority: 70000 + +#jobs: +# - name: my_site_cert +# source: https://my_site.org:443 +# +# - name: my_file_cert +# source: file:///home/me/cert.pem +# +# - name: my_smtp_cert +# source: smtp://smtp.my_mail.org:587 diff --git a/src/go/collectors/go.d.plugin/config/go.d/zookeeper.conf b/src/go/collectors/go.d.plugin/config/go.d/zookeeper.conf new file mode 100644 index 00000000000000..58607ecd9c91b5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/config/go.d/zookeeper.conf @@ -0,0 +1,13 @@ +## All available configuration options, their descriptions and default values: +## https://github.com/netdata/go.d.plugin/tree/master/modules/zookeeper + +#update_every: 1 +#autodetection_retry: 0 +#priority: 70000 + +jobs: + - name: local + address: 127.0.0.1:2181 + + - name: local + address: 127.0.0.1:2182 diff --git a/src/go/collectors/go.d.plugin/docker-compose.yml b/src/go/collectors/go.d.plugin/docker-compose.yml new file mode 100644 index 00000000000000..28a27ad1295abb --- /dev/null +++ b/src/go/collectors/go.d.plugin/docker-compose.yml @@ -0,0 +1,54 @@ +version: '3' +services: + netdata: + build: + context: . + dockerfile: Dockerfile.dev + ports: + - '19999:19999' + cap_add: + - SYS_PTRACE + security_opt: + - apparmor:unconfined + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - ./mocks/tmp:/usr/local/apache2/logs:ro + depends_on: + - springboot2 + - httpd + + springboot2: + build: mocks/springboot2 + ports: + - '8080:8080' + + logstash: + image: docker.elastic.co/logstash/logstash:7.3.1 + ports: + - '9600:9600' + + httpd: + image: httpd:2.4 + volumes: + - ./mocks/httpd/httpd.conf:/usr/local/apache2/conf/httpd.conf:ro + - ./mocks/tmp:/usr/local/apache2/logs:rw + + blackbox_probe: + build: mocks/blackbox + depends_on: + - springboot2 + - httpd + command: + - '/bin/sh' + - '-c' + - | + while :; do + curl -o /dev/null http://springboot2:8080/hello || : + curl -o /dev/null http://springboot2:8080/hello || : + curl -o /dev/null http://springboot2:8080/ || : + curl -o /dev/null http://httpd/ || : + curl -o /dev/null -XPOST http://httpd/post || : + curl -o /dev/null http://httpd/not_exist || : + sleep 0.7 + done diff --git a/src/go/collectors/go.d.plugin/docs/how-to-write-a-module.md b/src/go/collectors/go.d.plugin/docs/how-to-write-a-module.md new file mode 100644 index 00000000000000..4107a04f5ec10b --- /dev/null +++ b/src/go/collectors/go.d.plugin/docs/how-to-write-a-module.md @@ -0,0 +1,289 @@ +<!-- +title: "How to write a Netdata collector in Go" +description: "This guide will walk you through the technical implementation of writing a new Netdata collector in Golang, with tips on interfaces, structure, configuration files, and more." +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/docs/how-to-write-a-module.md" +sidebar_label: "How to write a Netdata collector in Go" +learn_status: "Published" +learn_topic_type: "Tasks" +learn_rel_path: "Developers/External plugins/go.d.plugin" +sidebar_position: 20 +--> + +# How to write a Netdata collector in Go + +## Prerequisites + +- Take a look at our [contributing guidelines](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md). +- [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) this repository to your personal + GitHub account. +- [Clone](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/cloning-a-repository#:~:text=to%20GitHub%20Desktop-,On%20GitHub%2C%20navigate%20to%20the%20main%20page%20of%20the%20repository,Desktop%20to%20complete%20the%20clone.) + locally the **forked** repository (e.g `git clone https://github.com/odyslam/go.d.plugin`). +- Using a terminal, `cd` into the directory (e.g `cd go.d.plugin`) + + +## Write and test a simple collector + +> :exclamation: You can skip most of these steps if you first experiment directy with the existing +> [example module](https://github.com/netdata/go.d.plugin/tree/master/modules/example), which will +> give you an idea of how things work. + +Let's assume you want to write a collector named `example2`. + +The steps are: + +- Add the source code to [`modules/example2/`](https://github.com/netdata/go.d.plugin/tree/master/modules). + - [module interface](#module-interface). + - [suggested module layout](#module-layout). + - [helper packages](#helper-packages). +- Add the configuration to [`config/go.d/example2.conf`](https://github.com/netdata/go.d.plugin/tree/master/config/go.d). +- Add the module to [`config/go.d.conf`](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf). +- Import the module in [`modules/init.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/init.go). +- Update the [`available modules list`](https://github.com/netdata/go.d.plugin#available-modules). +- To build it, run `make` from the plugin root dir. This will create a new `go.d.plugin` binary that includes your newly + developed collector. It will be placed into the `bin` directory (e.g `go.d.plugin/bin`) +- Run it in the debug mode `bin/godplugin -d -m <MODULE_NAME>`. This will output the `STDOUT` of the collector, the same + output that is sent to the Netdata Agent and is transformed into charts. You can read more about this collector API in + our [documentation](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#external-plugins-api). +- If you want to test the collector with the actual Netdata Agent, you need to replace the `go.d.plugin` binary that + exists in the Netdata Agent installation directory with the one you just compiled. Once + you [restart](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) the Netdata Agent, it will detect and run + it, creating all the charts. It is advised not to remove the default `go.d.plugin` binary, but simply rename it + to `go.d.plugin.old` so that the Agent doesn't run it, but you can easily rename it back once you are done. +- Run `make clean` when you are done with testing. + + +## Module Interface + +Every module should implement the following interface: + +``` +type Module interface { + Init() bool + Check() bool + Charts() *Charts + Collect() map[string]int64 + Cleanup() +} +``` + +### Init method + +- `Init` does module initialization. +- If it returns `false`, the job will be disabled. + +We propose to use the following template: + +``` +// example.go + +func (e *Example) Init() bool { + err := e.validateConfig() + if err != nil { + e.Errorf("config validation: %v", err) + return false + } + + someValue, err := e.initSomeValue() + if err != nil { + e.Errorf("someValue init: %v", err) + return false + } + e.someValue = someValue + + // ... + return true +} +``` + +Move specific initialization methods into the `init.go` file. See [suggested module layout](#module-Layout). + +### Check method + +- `Check` returns whether the job is able to collect metrics. +- Called after `Init` and only if `Init` returned `true`. +- If it returns `false`, the job will be disabled. + +The simplest way to implement `Check` is to see if we are getting any metrics from `Collect`. A lot of modules use such +approach. + +``` +// example.go + +func (e *Example) Check() bool { + return len(e.Collect()) > 0 +} +``` + +### Charts method + +:exclamation: Netdata module produces [`charts`](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart), not +raw metrics. + +Use [`agent/module`](https://github.com/netdata/go.d.plugin/blob/master/agent/module/charts.go) package to create them, +it contains charts and dimensions structs. + +- `Charts` returns the [charts](https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/README.md#chart) (`*module.Charts`). +- Called after `Check` and only if `Check` returned `true`. +- If it returns `nil`, the job will be disabled +- :warning: Make sure not to share returned value between module instances (jobs). + +Usually charts initialized in `Init` and `Chart` method just returns the charts instance: + +``` +// example.go + +func (e *Example) Charts() *Charts { + return e.charts +} +``` + +### Collect method + +- `Collect` collects metrics. +- Called only if `Check` returned `true`. +- Called every `update_every` seconds. +- `map[string]int64` keys are charts dimensions ids'. + +We propose to use the following template: + +``` +// example.go + +func (e *Example) Collect() map[string]int64 { + ms, err := e.collect() + if err != nil { + e.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} +``` + +Move metrics collection logic into the `collect.go` file. See [suggested module layout](#module-Layout). + +### Cleanup method + +- `Cleanup` performs the job cleanup/teardown. +- Called if `Init` or `Check` fails, or we want to stop the job after `Collect`. + +If you have nothing to clean up: + +``` +// example.go + +func (Example) Cleanup() {} +``` + +## Module Layout + +The general idea is to not put everything in a single file. + +We recommend using one file per logical area. This approach makes it easier to maintain the module. + +Suggested minimal layout: + +| Filename | Contains | +|---------------------------------------------------|--------------------------------------------------------| +| [`module_name.go`](#file-module_namego) | Module configuration, implementation and registration. | +| [`charts.go`](#file-chartsgo) | Charts, charts templates and constructor functions. | +| [`init.go`](#file-initgo) | Initialization methods. | +| [`collect.go`](#file-collectgo) | Metrics collection implementation. | +| [`module_name_test.go`](#file-module_name_testgo) | Public methods/functions tests. | +| [`testdata/`](#file-module_name_testgo) | Files containing sample data. | + +### File `module_name.go` + +> :exclamation: See the example [`example.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/example/example.go). + +Don't overload this file with the implementation details. + +Usually it contains only: + +- module registration. +- module configuration. +- [module interface implementation](#module-interface). + +### File `charts.go` + +> :exclamation: See the example: [`charts.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/example/charts.go). + +Put charts, charts templates and charts constructor functions in this file. + +### File `init.go` + +> :exclamation: See the example: [`init.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/example/init.go). + +All the module initialization details should go in this file. + +- make a function for each value that needs to be initialized. +- a function should return a value(s), not implicitly set/change any values in the main struct. + +``` +// init.go + +// Prefer this approach. +func (e Example) initSomeValue() (someValue, error) { + // ... + return someValue, nil +} + +// This approach is ok too, but we recommend to not use it. +func (e *Example) initSomeValue() error { + // ... + m.someValue = someValue + return nil +} +``` + +### File `collect.go` + +> :exclamation: See the example: [`collect.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/example/collect.go). + +This file is the entry point for the metrics collection. + +Feel free to split it into several files if you think it makes the code more readable. + +Use `collect_` prefix for the filenames: `collect_this.go`, `collect_that.go`, etc. + +``` +// collect.go + +func (e *Example) collect() (map[string]int64, error) { + collected := make(map[string])int64 + // ... + // ... + // ... + return collected, nil +} +``` + +### File `module_name_test.go` + +> :exclamation: See the example: [`example_test.go`](https://github.com/netdata/go.d.plugin/blob/master/modules/example/example_test.go). + +> if you have no experience in testing we recommend starting with [testing package documentation](https://golang.org/pkg/testing/). + +> we use `assert` and `require` packages from [github.com/stretchr/testify](https://github.com/stretchr/testify) library, +> check [their documentation](https://pkg.go.dev/github.com/stretchr/testify). + +Testing is mandatory. + +- test only public functions and methods (`New`, `Init`, `Check`, `Charts`, `Cleanup`, `Collect`). +- do not create a test function per a case, use [table driven tests](https://github.com/golang/go/wiki/TableDrivenTests) + . Prefer `map[string]struct{ ... }` over `[]struct{ ... }`. +- use helper functions _to prepare_ test cases to keep them clean and readable. + +### Directory `testdata/` + +Put files with sample data in this directory if you need any. Its name should +be [`testdata`](https://golang.org/cmd/go/#hdr-Package_lists_and_patterns). + +> Directory and file names that begin with "." or "_" are ignored by the go tool, as are directories named "testdata". + +## Helper packages + +There are [some helper packages](https://github.com/netdata/go.d.plugin/tree/master/pkg) for writing a module. + diff --git a/src/go/collectors/go.d.plugin/examples/simple/main.go b/src/go/collectors/go.d.plugin/examples/simple/main.go new file mode 100644 index 00000000000000..9982b91fce1dc5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/examples/simple/main.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package main + +import ( + "fmt" + "log/slog" + "math/rand" + "os" + "path" + + "github.com/netdata/go.d.plugin/agent" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/cli" + "github.com/netdata/go.d.plugin/logger" + "github.com/netdata/go.d.plugin/pkg/multipath" + + "github.com/jessevdk/go-flags" +) + +var version = "v0.0.1-example" + +type example struct{ module.Base } + +func (example) Cleanup() {} + +func (example) Init() bool { return true } + +func (example) Check() bool { return true } + +func (example) Charts() *module.Charts { + return &module.Charts{ + { + ID: "random", + Title: "A Random Number", Units: "random", Fam: "random", + Dims: module.Dims{ + {ID: "random0", Name: "random 0"}, + {ID: "random1", Name: "random 1"}, + }, + }, + } +} + +func (e *example) Collect() map[string]int64 { + return map[string]int64{ + "random0": rand.Int63n(100), + "random1": rand.Int63n(100), + } +} + +var ( + cd, _ = os.Getwd() + name = "goplugin" + userDir = os.Getenv("NETDATA_USER_CONFIG_DIR") + stockDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") +) + +func confDir(dirs []string) (mpath multipath.MultiPath) { + if len(dirs) > 0 { + return dirs + } + if userDir != "" && stockDir != "" { + return multipath.New( + userDir, + stockDir, + ) + } + return multipath.New( + path.Join(cd, "/../../../../etc/netdata"), + path.Join(cd, "/../../../../usr/lib/netdata/conf.d"), + ) +} + +func modulesConfDir(dirs []string) multipath.MultiPath { + if len(dirs) > 0 { + return dirs + } + if userDir != "" && stockDir != "" { + return multipath.New( + path.Join(userDir, name), + path.Join(stockDir, name), + ) + } + return multipath.New( + path.Join(cd, "/../../../../etc/netdata", name), + path.Join(cd, "/../../../../usr/lib/netdata/conf.d", name), + ) +} + +func main() { + opt := parseCLI() + + if opt.Debug { + logger.Level.Set(slog.LevelDebug) + } + if opt.Version { + fmt.Println(version) + os.Exit(0) + } + + module.Register("example", module.Creator{ + Create: func() module.Module { return &example{} }}, + ) + + p := agent.New(agent.Config{ + Name: name, + ConfDir: confDir(opt.ConfDir), + ModulesConfDir: modulesConfDir(opt.ConfDir), + ModulesSDConfPath: opt.WatchPath, + RunModule: opt.Module, + MinUpdateEvery: opt.UpdateEvery, + }) + + p.Run() +} + +func parseCLI() *cli.Option { + opt, err := cli.Parse(os.Args) + if err != nil { + if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { + os.Exit(0) + } + os.Exit(1) + } + return opt +} diff --git a/src/go/collectors/go.d.plugin/go.mod b/src/go/collectors/go.d.plugin/go.mod new file mode 100644 index 00000000000000..76b415b9684aab --- /dev/null +++ b/src/go/collectors/go.d.plugin/go.mod @@ -0,0 +1,142 @@ +module github.com/netdata/go.d.plugin + +go 1.21 + +replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.36.2 + +require ( + github.com/DATA-DOG/go-sqlmock v1.5.2 + github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Wing924/ltsv v0.3.1 + github.com/apparentlymart/go-cidr v1.1.0 + github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de + github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc + github.com/blang/semver/v4 v4.0.0 + github.com/bmatcuk/doublestar/v4 v4.6.1 + github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e + github.com/cloudflare/cfssl v1.6.4 + github.com/coreos/go-systemd/v22 v22.5.0 + github.com/docker/docker v24.0.7+incompatible + github.com/facebook/time v0.0.0-20230914161634-c95c229720fd + github.com/fsnotify/fsnotify v1.7.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/go-sql-driver/mysql v1.7.1 + github.com/godbus/dbus/v5 v5.1.0 + github.com/gofrs/flock v0.8.1 + github.com/golang/mock v1.6.0 + github.com/gosnmp/gosnmp v1.37.0 + github.com/ilyam8/hashstructure v1.1.0 + github.com/jackc/pgx/v4 v4.18.1 + github.com/jessevdk/go-flags v1.5.0 + github.com/likexian/whois v1.15.1 + github.com/likexian/whois-parser v1.24.10 + github.com/lmittmann/tint v1.0.4 + github.com/mattn/go-isatty v0.0.20 + github.com/mattn/go-xmlrpc v0.0.3 + github.com/miekg/dns v1.1.58 + github.com/mitchellh/go-homedir v1.1.0 + github.com/muesli/cancelreader v0.2.2 + github.com/prometheus-community/pro-bing v0.3.0 + github.com/prometheus/prometheus v2.5.0+incompatible + github.com/stretchr/testify v1.8.4 + github.com/tomasen/fcgi_client v0.0.0-20180423082037-2bb3d819fd19 + github.com/valyala/fastjson v1.6.4 + github.com/vmware/govmomi v0.34.2 + go.mongodb.org/mongo-driver v1.13.1 + golang.org/x/net v0.21.0 + golang.org/x/text v0.14.0 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b + gopkg.in/ini.v1 v1.67.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.29.1 + k8s.io/apimachinery v0.29.1 + k8s.io/client-go v0.29.1 + layeh.com/radius v0.0.0-20190322222518-890bc1058917 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Microsoft/go-winio v0.5.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.1.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/likexian/gokit v0.25.13 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mdlayher/genetlink v1.3.2 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.17.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/src/go/collectors/go.d.plugin/go.sum b/src/go/collectors/go.d.plugin/go.sum new file mode 100644 index 00000000000000..f83f2c6263bcad --- /dev/null +++ b/src/go/collectors/go.d.plugin/go.sum @@ -0,0 +1,524 @@ +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Wing924/ltsv v0.3.1 h1:hbjzQ6YuS/sOm7nQJG7ddT9ua1yYmcH25Q8lsuiQE0A= +github.com/Wing924/ltsv v0.3.1/go.mod h1:zl47wq7H23LocdDHg7yJAH/Qdc4MWHXu1Evx9Ahilmo= +github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA= +github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc h1:Keo7wQ7UODUaHcEi7ltENhbAK2VgZjfat6mLy03tQzo= +github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e h1:Iw4JdD/TlCUvlVWIjuV1M98rGNo/C+NxM6U5ghStom4= +github.com/clbanning/rfile/v2 v2.0.0-20231024120205-ac3fca974b0e/go.mod h1:Y53jAgtl30vLWEnRWkZFT+CpwLNsrQJb0F5AwHieNGs= +github.com/cloudflare/cfssl v1.6.4 h1:NMOvfrEjFfC63K3SGXgAnFdsgkmiq4kATme5BfcqrO8= +github.com/cloudflare/cfssl v1.6.4/go.mod h1:8b3CQMxfWPAeom3zBnGJ6sd+G1NkL5TXqmDXacb+1J0= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebook/time v0.0.0-20230914161634-c95c229720fd h1:HLODj3PC4arOjLcAbTf7m9sqHniOALu52g5Wi4Wa8n4= +github.com/facebook/time v0.0.0-20230914161634-c95c229720fd/go.mod h1:dfouHrgxDA7FxAzPYOFIGHFcrFlG2trLpeLtA5+hs+Q= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= +github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1 h1:K4bn56FHdjFCfjSo3wWaD6rJL8r9yvmmncJNMhdkKrw= +github.com/google/pprof v0.0.0-20220520215854-d04f2422c8a1/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gosnmp/gosnmp v1.37.0 h1:/Tf8D3b9wrnNuf/SfbvO+44mPrjVphBhRtcGg22V07Y= +github.com/gosnmp/gosnmp v1.37.0/go.mod h1:GDH9vNqpsD7f2HvZhKs5dlqSEcAS6s6Qp099oZRCR+M= +github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= +github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/ilyam8/hashstructure v1.1.0 h1:N8t8hzzKLf2Da87XgC/DBYqXUmSbclgx+2cZxS5/klU= +github.com/ilyam8/hashstructure v1.1.0/go.mod h1:LoLuwBSNpZOi3eTMfAqe2i4oW9QkI08e6g1Pci9h7hs= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/likexian/gokit v0.25.13 h1:p2Uw3+6fGG53CwdU2Dz0T6bOycdb2+bAFAa3ymwWVkM= +github.com/likexian/gokit v0.25.13/go.mod h1:qQhEWFBEfqLCO3/vOEo2EDKd+EycekVtUK4tex+l2H4= +github.com/likexian/whois v1.15.1 h1:6vTMI8n9s1eJdmcO4R9h1x99aQWIZZX1CD3am68gApU= +github.com/likexian/whois v1.15.1/go.mod h1:/nxmQ6YXvLz+qTxC/QFtEJNAt0zLuRxJrKiWpBJX8X0= +github.com/likexian/whois-parser v1.24.10 h1:Gfr+Q96PIo+HigM4r4rJ0SjN47h+URMRTdGcZ9jDXU4= +github.com/likexian/whois-parser v1.24.10/go.mod h1:b6STMHHDaSKbd4PzGrP50wWE5NzeBUETa/hT9gI0G9I= +github.com/lmittmann/tint v1.0.4 h1:LeYihpJ9hyGvE0w+K2okPTGUdVLfng1+nDNVR4vWISc= +github.com/lmittmann/tint v1.0.4/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-xmlrpc v0.0.3 h1:Y6WEMLEsqs3RviBrAa1/7qmbGB7DVD3brZIbqMbQdGY= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEtqEfNAPp47RUON4= +github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/prometheus v0.36.2 h1:ZMqiEKdamv/YgI/7V5WtQGWbwEerCsXJ26CZgeXDUXM= +github.com/prometheus/prometheus v0.36.2/go.mod h1:GBcYMr17Nr2/iDIrWmiy9wC5GKl0NOQ5R9XynB1HAG8= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tomasen/fcgi_client v0.0.0-20180423082037-2bb3d819fd19 h1:ZCmSnT6CLGhfoQ2lPEhL4nsJstKDCw1F1RfN8/smTCU= +github.com/tomasen/fcgi_client v0.0.0-20180423082037-2bb3d819fd19/go.mod h1:SXTY+QvI+KTTKXQdg0zZ7nx0u94QWh8ZAwBQYsW9cqk= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/vmware/govmomi v0.34.2 h1:o6ydkTVITOkpQU6HAf6tP5GvHFCNJlNUNlMsvFK77X4= +github.com/vmware/govmomi v0.34.2/go.mod h1:qWWT6n9mdCr/T9vySsoUqcI04sSEj4CqHXxtk/Y+Los= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190510150013-5403a72a6aaf/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b h1:9JncmKXcUwE918my+H6xmjBdhK2jM/UTUNXxhRG1BAk= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20220504211119-3d4a969bb56b/go.mod h1:yp4gl6zOlnDGOZeWeDfMwQcsdOIQnMdhuPx9mwwWBL4= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= +layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/src/go/collectors/go.d.plugin/hack/go-build.sh b/src/go/collectors/go.d.plugin/hack/go-build.sh new file mode 100755 index 00000000000000..331b6bcff621ff --- /dev/null +++ b/src/go/collectors/go.d.plugin/hack/go-build.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +# SPDX-License-Identifier: GPL-3.0-or-later + +set -e + +PLATFORMS=( + darwin/amd64 + darwin/arm64 + freebsd/386 + freebsd/amd64 + freebsd/arm + freebsd/arm64 + linux/386 + linux/amd64 + linux/arm + linux/arm64 + linux/ppc64 + linux/ppc64le + linux/mips + linux/mipsle + linux/mips64 + linux/mips64le +) + +getos() { + local IFS=/ && read -ra array <<<"$1" && echo "${array[0]}" +} + +getarch() { + local IFS=/ && read -ra array <<<"$1" && echo "${array[1]}" +} + +WHICH="$1" + +VERSION="${TRAVIS_TAG:-$(git describe --tags --always --dirty)}" + +GOLDFLAGS=${GLDFLAGS:-} +GOLDFLAGS="$GOLDFLAGS -w -s -X main.version=$VERSION" + +build() { + echo "Building ${GOOS}/${GOARCH}" + CGO_ENABLED=0 GOOS="$1" GOARCH="$2" go build -ldflags "${GOLDFLAGS}" -o "$3" "github.com/netdata/go.d.plugin/cmd/godplugin" +} + +create_config_archives() { + mkdir -p bin + tar -zcvf "bin/config.tar.gz" -C config . + tar -zcvf "bin/go.d.plugin-config-${VERSION}.tar.gz" -C config . +} + +create_vendor_archives() { + mkdir -p bin + go mod vendor + tar -zc --transform "s:^:go.d.plugin-${VERSION#v}/:" -f "bin/vendor.tar.gz" vendor + tar -zc --transform "s:^:go.d.plugin-${VERSION#v}/:" -f "bin/go.d.plugin-vendor-${VERSION}.tar.gz" vendor +} + +build_all_platforms() { + for PLATFORM in "${PLATFORMS[@]}"; do + GOOS=$(getos "$PLATFORM") + GOARCH=$(getarch "$PLATFORM") + FILE="bin/go.d.plugin-${VERSION}.${GOOS}-${GOARCH}" + + build "$GOOS" "$GOARCH" "$FILE" + + ARCHIVE="${FILE}.tar.gz" + tar -C bin -cvzf "${ARCHIVE}" "${FILE/bin\//}" + rm "${FILE}" + done +} + +build_specific_platform() { + GOOS=$(getos "$1") + GOARCH=$(getarch "$1") + : "${GOARCH:=amd64}" + + build "$GOOS" "$GOARCH" bin/godplugin +} + +build_current_platform() { + eval "$(go env | grep -e "GOHOSTOS" -e "GOHOSTARCH")" + GOOS=${GOOS:-$GOHOSTOS} + GOARCH=${GOARCH:-$GOHOSTARCH} + + build "$GOOS" "$GOARCH" bin/godplugin +} + +if [[ "$WHICH" == "configs" ]]; then + echo "Creating config archives for version: $VERSION" + create_config_archives + exit 0 +fi + +if [[ "$WHICH" == "vendor" ]]; then + echo "Creating vendor archives for version: $VERSION" + create_vendor_archives + exit 0 +fi + +echo "Building binaries for version: $VERSION" + +if [[ "$WHICH" == "all" ]]; then + build_all_platforms +elif [[ -n "$WHICH" ]]; then + build_specific_platform "$WHICH" +else + build_current_platform +fi diff --git a/src/go/collectors/go.d.plugin/hack/go-fmt.sh b/src/go/collectors/go.d.plugin/hack/go-fmt.sh new file mode 100755 index 00000000000000..fcc9e2d5725547 --- /dev/null +++ b/src/go/collectors/go.d.plugin/hack/go-fmt.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# SPDX-License-Identifier: GPL-3.0-or-later + +for TARGET in "${@}"; do + find "${TARGET}" -name '*.go' -exec gofmt -s -w {} \+ +done +git diff --exit-code diff --git a/src/go/collectors/go.d.plugin/logger/default.go b/src/go/collectors/go.d.plugin/logger/default.go new file mode 100644 index 00000000000000..c8bfb4d4249851 --- /dev/null +++ b/src/go/collectors/go.d.plugin/logger/default.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logger + +import ( + "log/slog" + "os" + + "github.com/mattn/go-isatty" +) + +func newDefaultLogger() *Logger { + if isatty.IsTerminal(os.Stderr.Fd()) { + // skip 2 slog pkg calls, 3 this pkg calls + return &Logger{sl: slog.New(withCallDepth(5, newTerminalHandler()))} + } + return &Logger{sl: slog.New(newTextHandler()).With(pluginAttr)} +} + +var defaultLogger = newDefaultLogger() + +func Error(a ...any) { defaultLogger.Error(a...) } +func Warning(a ...any) { defaultLogger.Warning(a...) } +func Info(a ...any) { defaultLogger.Info(a...) } +func Debug(a ...any) { defaultLogger.Debug(a...) } +func Errorf(format string, a ...any) { defaultLogger.Errorf(format, a...) } +func Warningf(format string, a ...any) { defaultLogger.Warningf(format, a...) } +func Infof(format string, a ...any) { defaultLogger.Infof(format, a...) } +func Debugf(format string, a ...any) { defaultLogger.Debugf(format, a...) } +func With(args ...any) *Logger { return defaultLogger.With(args...) } diff --git a/src/go/collectors/go.d.plugin/logger/handler.go b/src/go/collectors/go.d.plugin/logger/handler.go new file mode 100644 index 00000000000000..5b300c0cdb5e76 --- /dev/null +++ b/src/go/collectors/go.d.plugin/logger/handler.go @@ -0,0 +1,77 @@ +package logger + +import ( + "context" + "log/slog" + "os" + "runtime" + "strings" + + "github.com/lmittmann/tint" +) + +func newTextHandler() slog.Handler { + return slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: Level.lvl, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.TimeKey && isJournal { + return slog.Attr{} + } + if a.Key == slog.LevelKey { + v := a.Value.Any().(slog.Level) + a.Value = slog.StringValue(strings.ToLower(v.String())) + } + return a + }, + }) +} + +func newTerminalHandler() slog.Handler { + return tint.NewHandler(os.Stderr, &tint.Options{ + AddSource: true, + Level: Level.lvl, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.TimeKey { + return slog.Attr{} + } + if a.Key == slog.SourceKey && !Level.Enabled(slog.LevelDebug) { + return slog.Attr{} + } + return a + }, + }) +} + +func withCallDepth(depth int, sh slog.Handler) slog.Handler { + if v, ok := sh.(*callDepthHandler); ok { + sh = v.sh + } + return &callDepthHandler{depth: depth, sh: sh} +} + +type callDepthHandler struct { + depth int + sh slog.Handler +} + +func (h *callDepthHandler) Enabled(ctx context.Context, level slog.Level) bool { + return h.sh.Enabled(ctx, level) +} + +func (h *callDepthHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return withCallDepth(h.depth, h.sh.WithAttrs(attrs)) +} + +func (h *callDepthHandler) WithGroup(name string) slog.Handler { + return withCallDepth(h.depth, h.sh.WithGroup(name)) +} + +func (h *callDepthHandler) Handle(ctx context.Context, r slog.Record) error { + // https://pkg.go.dev/log/slog#example-package-Wrapping + var pcs [1]uintptr + // skip Callers and this function + runtime.Callers(h.depth+2, pcs[:]) + r.PC = pcs[0] + + return h.sh.Handle(ctx, r) +} diff --git a/src/go/collectors/go.d.plugin/logger/level.go b/src/go/collectors/go.d.plugin/logger/level.go new file mode 100644 index 00000000000000..22f35f987b0e5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/logger/level.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logger + +import ( + "log/slog" + "strings" +) + +var Level = &level{lvl: &slog.LevelVar{}} + +type level struct { + lvl *slog.LevelVar +} + +func (l *level) Enabled(level slog.Level) bool { + return level >= l.lvl.Level() +} + +func (l *level) Set(level slog.Level) { + l.lvl.Set(level) +} + +func (l *level) SetByName(level string) { + switch strings.ToLower(level) { + case "err", "error": + l.lvl.Set(slog.LevelError) + case "warn", "warning": + l.lvl.Set(slog.LevelWarn) + case "info": + l.lvl.Set(slog.LevelInfo) + case "debug": + l.lvl.Set(slog.LevelDebug) + } +} diff --git a/src/go/collectors/go.d.plugin/logger/logger.go b/src/go/collectors/go.d.plugin/logger/logger.go new file mode 100644 index 00000000000000..74782cfde83151 --- /dev/null +++ b/src/go/collectors/go.d.plugin/logger/logger.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logger + +import ( + "context" + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "sync/atomic" + "syscall" + + "github.com/netdata/go.d.plugin/agent/executable" + + "github.com/mattn/go-isatty" +) + +var isTerm = isatty.IsTerminal(os.Stderr.Fd()) + +var isJournal = isStderrConnectedToJournal() + +var pluginAttr = slog.String("plugin", executable.Name) + +func New() *Logger { + if isTerm { + // skip 2 slog pkg calls, 2 this pkg calls + return &Logger{sl: slog.New(withCallDepth(4, newTerminalHandler()))} + } + return &Logger{sl: slog.New(newTextHandler()).With(pluginAttr)} +} + +type Logger struct { + muted atomic.Bool + sl *slog.Logger +} + +func (l *Logger) Error(a ...any) { l.log(slog.LevelError, fmt.Sprint(a...)) } +func (l *Logger) Warning(a ...any) { l.log(slog.LevelWarn, fmt.Sprint(a...)) } +func (l *Logger) Info(a ...any) { l.log(slog.LevelInfo, fmt.Sprint(a...)) } +func (l *Logger) Debug(a ...any) { l.log(slog.LevelDebug, fmt.Sprint(a...)) } +func (l *Logger) Errorf(format string, a ...any) { l.log(slog.LevelError, fmt.Sprintf(format, a...)) } +func (l *Logger) Warningf(format string, a ...any) { l.log(slog.LevelWarn, fmt.Sprintf(format, a...)) } +func (l *Logger) Infof(format string, a ...any) { l.log(slog.LevelInfo, fmt.Sprintf(format, a...)) } +func (l *Logger) Debugf(format string, a ...any) { l.log(slog.LevelDebug, fmt.Sprintf(format, a...)) } +func (l *Logger) Mute() { l.mute(true) } +func (l *Logger) Unmute() { l.mute(false) } + +func (l *Logger) With(args ...any) *Logger { + if l.isNil() { + return &Logger{sl: New().sl.With(args...)} + } + + ll := &Logger{sl: l.sl.With(args...)} + ll.muted.Store(l.muted.Load()) + + return ll +} + +func (l *Logger) log(level slog.Level, msg string) { + if l.isNil() { + nilLogger.sl.Log(context.Background(), level, msg) + return + } + + if !l.muted.Load() { + l.sl.Log(context.Background(), level, msg) + } +} + +func (l *Logger) mute(v bool) { + if l.isNil() || isTerm && Level.Enabled(slog.LevelDebug) { + return + } + l.muted.Store(v) +} + +func (l *Logger) isNil() bool { return l == nil || l.sl == nil } + +var nilLogger = New() + +func isStderrConnectedToJournal() bool { + stream := os.Getenv("JOURNAL_STREAM") + if stream == "" { + return false + } + + idx := strings.IndexByte(stream, ':') + if idx <= 0 { + return false + } + + dev, ino := stream[:idx], stream[idx+1:] + + var stat syscall.Stat_t + if err := syscall.Fstat(int(os.Stderr.Fd()), &stat); err != nil { + return false + } + + return dev == strconv.Itoa(int(stat.Dev)) && ino == strconv.FormatUint(stat.Ino, 10) +} diff --git a/src/go/collectors/go.d.plugin/logger/logger_test.go b/src/go/collectors/go.d.plugin/logger/logger_test.go new file mode 100644 index 00000000000000..df7049d0ad6ef4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/logger/logger_test.go @@ -0,0 +1,21 @@ +package logger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + tests := map[string]*Logger{ + "default logger": New(), + "nil logger": nil, + } + + for name, logger := range tests { + t.Run(name, func(t *testing.T) { + f := func() { logger.Infof("test %s", "test") } + assert.NotPanics(t, f) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/mocks/blackbox/Dockerfile b/src/go/collectors/go.d.plugin/mocks/blackbox/Dockerfile new file mode 100644 index 00000000000000..23c5a80a093b1f --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/blackbox/Dockerfile @@ -0,0 +1,3 @@ +FROM alpine + +RUN apk add --no-cache curl \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d.conf new file mode 100644 index 00000000000000..7a179bd68c18a9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d.conf @@ -0,0 +1,16 @@ +modules: + activemq: yes + apache: yes + consul: yes + dns_query: yes + example: no + freeradius: yes + httpcheck: yes + lighttpd: yes + mongodb: yes + nginx: yes + portcheck: yes + rabbitmq: yes + solr: yes + springboot2: yes + web_log: yes diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/apache.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/apache.conf new file mode 100644 index 00000000000000..a27444e1749fa8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/apache.conf @@ -0,0 +1,6 @@ +jobs: + - name: local + url: http://localhost/server-status?auto + + - name: local + url: http://httpd/server-status?auto diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/example.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/example.conf new file mode 100644 index 00000000000000..5d6472bba2d423 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/example.conf @@ -0,0 +1,2 @@ +jobs: +- name: example \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/logstash.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/logstash.conf new file mode 100644 index 00000000000000..f041a9768571a4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/logstash.conf @@ -0,0 +1,3 @@ +jobs: +- name: local + url: http://logstash:9600 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/mongodb.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/mongodb.conf new file mode 100644 index 00000000000000..a998fc1793748f --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/mongodb.conf @@ -0,0 +1,7 @@ +jobs: +- name: local + uri: "mongodb://mongo:27017" + timeout: 10 + databases: + includes: + - "* *" diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/springboot2.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/springboot2.conf new file mode 100644 index 00000000000000..da3d09233e61aa --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/springboot2.conf @@ -0,0 +1,8 @@ +jobs: +- name: local + url: http://springboot2:8080/actuator/prometheus +- name: filter + url: http://springboot2:8080/actuator/prometheus + uri_filter: + excludes: + - = /hello \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/web_log.conf b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/web_log.conf new file mode 100644 index 00000000000000..e378cfe7992c8f --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/conf.d/go.d/web_log.conf @@ -0,0 +1,30 @@ +jobs: +- name: httpd + path: /usr/local/apache2/logs/access_log + categories: + - name: status + match: ~ ^/server-status + histogram: [1, 10, 100, 1000] + +- name: httpd + path: ./mocks/tmp/access_log + categories: + - name: status + match: ~ ^/server-status + histogram: [1, 10, 100, 1000] + +- name: httpd2 + path: /usr/local/apache2/logs/access_log + aggregate_response_codes: true + categories: + - name: status + match: ~ ^/server-status + histogram: [1, 10, 100, 1000] + +- name: httpd2 + path: ./mocks/tmp/access_log + aggregate_response_codes: true + categories: + - name: status + match: ~ ^/server-status + histogram: [1, 10, 100, 1000] \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/httpd/httpd.conf b/src/go/collectors/go.d.plugin/mocks/httpd/httpd.conf new file mode 100644 index 00000000000000..c911bc1dfb5007 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/httpd/httpd.conf @@ -0,0 +1,92 @@ +ServerRoot "/usr/local/apache2" + +Listen 80 +LoadModule mpm_event_module modules/mod_mpm_event.so +LoadModule authn_file_module modules/mod_authn_file.so +LoadModule authn_core_module modules/mod_authn_core.so +LoadModule authz_host_module modules/mod_authz_host.so +LoadModule authz_groupfile_module modules/mod_authz_groupfile.so +LoadModule authz_user_module modules/mod_authz_user.so +LoadModule authz_core_module modules/mod_authz_core.so +LoadModule access_compat_module modules/mod_access_compat.so +LoadModule auth_basic_module modules/mod_auth_basic.so +LoadModule reqtimeout_module modules/mod_reqtimeout.so +LoadModule filter_module modules/mod_filter.so +LoadModule mime_module modules/mod_mime.so +LoadModule log_config_module modules/mod_log_config.so +LoadModule logio_module modules/mod_logio.so +LoadModule env_module modules/mod_env.so +LoadModule headers_module modules/mod_headers.so +LoadModule setenvif_module modules/mod_setenvif.so +LoadModule version_module modules/mod_version.so +LoadModule unixd_module modules/mod_unixd.so +LoadModule status_module modules/mod_status.so +LoadModule autoindex_module modules/mod_autoindex.so +LoadModule dir_module modules/mod_dir.so +LoadModule alias_module modules/mod_alias.so + +User daemon +Group daemon + +ServerAdmin you@example.com + +<Directory /> + AllowOverride none + Require all denied +</Directory> + +DocumentRoot "/usr/local/apache2/htdocs" +<Directory "/usr/local/apache2/htdocs"> + Options Indexes FollowSymLinks + AllowOverride None + Require all granted +</Directory> + +<IfModule dir_module> + DirectoryIndex index.html +</IfModule> + +<Files ".ht*"> + Require all denied +</Files> + +ErrorLog /usr/local/apache2/logs/error_log +LogLevel warn + +<IfModule log_config_module> + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %{cookie}n %D %{Host}i \"%{X-Forwarded-For}i\"" onearm + LogFormat "%h %l %u %t \"%r\" %>s %b" common + + <IfModule logio_module> + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio + </IfModule> + + CustomLog /usr/local/apache2/logs/access_log combinedio + +</IfModule> + +<IfModule alias_module> + ScriptAlias /cgi-bin/ "/usr/local/apache2/cgi-bin/" + +</IfModule> + +<Directory "/usr/local/apache2/cgi-bin"> + AllowOverride None + Options None + Require all granted +</Directory> + +<IfModule headers_module> + RequestHeader unset Proxy early +</IfModule> + +<IfModule mime_module> + TypesConfig conf/mime.types + AddType application/x-compress .Z + AddType application/x-gzip .gz .tgz +</IfModule> + +<Location /server-status> + SetHandler server-status +</Location> diff --git a/src/go/collectors/go.d.plugin/mocks/netdata/netdata.conf b/src/go/collectors/go.d.plugin/mocks/netdata/netdata.conf new file mode 100644 index 00000000000000..9f169257269b68 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/netdata/netdata.conf @@ -0,0 +1,12 @@ +[plugins] + proc = no + diskspace = no + cgroups = no + tc = no + idlejitter = no + apps = no + python.d = no + charts.d = no + node.d = no + fping = no + go.d = yes \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/.gitignore b/src/go/collectors/go.d.plugin/mocks/springboot2/.gitignore new file mode 100644 index 00000000000000..836ff4a65796f0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/.gitignore @@ -0,0 +1,4 @@ +.gradle/ +.idea/ +springboot2.* +/build \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/Dockerfile b/src/go/collectors/go.d.plugin/mocks/springboot2/Dockerfile new file mode 100644 index 00000000000000..c7471dff8aaf99 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/Dockerfile @@ -0,0 +1,12 @@ +FROM gradle:5.0-jdk8-alpine as builder + +COPY --chown=gradle:gradle . /home/gradle/src +WORKDIR /home/gradle/src +RUN gradle build + +FROM openjdk:8-jre-alpine + +EXPOSE 8080 +COPY --from=builder /home/gradle/src/build/libs/springboot2-0.1.0.jar /app/ + +CMD ["java", "-jar", "/app/springboot2-0.1.0.jar"] \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/build.gradle b/src/go/collectors/go.d.plugin/mocks/springboot2/build.gradle new file mode 100644 index 00000000000000..3cbcff14a22c76 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/build.gradle @@ -0,0 +1,33 @@ +buildscript { + repositories { + mavenCentral() + } + dependencies { + classpath("org.springframework.boot:spring-boot-gradle-plugin:2.0.3.RELEASE") + } +} + +apply plugin: 'java' +apply plugin: 'eclipse' +apply plugin: 'idea' +apply plugin: 'org.springframework.boot' +apply plugin: 'io.spring.dependency-management' + +bootJar { + baseName = 'springboot2' + version = '0.1.0' +} + +repositories { + mavenCentral() +} + +sourceCompatibility = 1.8 +targetCompatibility = 1.8 + +dependencies { + compile("org.springframework.boot:spring-boot-starter-web") + compile("org.springframework.boot:spring-boot-starter-actuator") + compile("io.micrometer:micrometer-registry-prometheus") + testCompile('org.springframework.boot:spring-boot-starter-test') +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/settings.gradle b/src/go/collectors/go.d.plugin/mocks/springboot2/settings.gradle new file mode 100644 index 00000000000000..7fca39b723edda --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/settings.gradle @@ -0,0 +1,2 @@ +rootProject.name = 'springboot2' + diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/java/hello/Main.java b/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/java/hello/Main.java new file mode 100644 index 00000000000000..524656de5a41fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/java/hello/Main.java @@ -0,0 +1,23 @@ +package hello; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.EnableAutoConfiguration; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.stereotype.Controller; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; + +@Controller +@SpringBootApplication +@EnableAutoConfiguration +public class Main { + public static void main(String[] args) { + SpringApplication.run(Main.class, args); + } + + @RequestMapping("/hello") + @ResponseBody + public String hello() { + return "Hello!"; + } +} diff --git a/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/resources/application.properties b/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/resources/application.properties new file mode 100644 index 00000000000000..821da092741ec8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/mocks/springboot2/src/main/resources/application.properties @@ -0,0 +1 @@ +management.endpoints.web.exposure.include=* diff --git a/web/rtc/README.md b/src/go/collectors/go.d.plugin/mocks/tmp/.gitkeep similarity index 100% rename from web/rtc/README.md rename to src/go/collectors/go.d.plugin/mocks/tmp/.gitkeep diff --git a/src/go/collectors/go.d.plugin/modules/activemq/README.md b/src/go/collectors/go.d.plugin/modules/activemq/README.md new file mode 120000 index 00000000000000..de893d1d064f2e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/README.md @@ -0,0 +1 @@ +integrations/activemq.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/activemq/activemq.go b/src/go/collectors/go.d.plugin/modules/activemq/activemq.go new file mode 100644 index 00000000000000..109c874dee1046 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/activemq.go @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + _ "embed" + "fmt" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("activemq", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + keyQueues = "queues" + keyTopics = "topics" + keyAdvisory = "Advisory" +) + +var nameReplacer = strings.NewReplacer(".", "_", " ", "") + +const ( + defaultMaxQueues = 50 + defaultMaxTopics = 50 + defaultURL = "http://127.0.0.1:8161" + defaultHTTPTimeout = time.Second +) + +// New creates Example with default values. +func New() *ActiveMQ { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + + MaxQueues: defaultMaxQueues, + MaxTopics: defaultMaxTopics, + } + + return &ActiveMQ{ + Config: config, + charts: &Charts{}, + activeQueues: make(map[string]bool), + activeTopics: make(map[string]bool), + } +} + +// Config is the ActiveMQ module configuration. +type Config struct { + web.HTTP `yaml:",inline"` + Webadmin string `yaml:"webadmin"` + MaxQueues int `yaml:"max_queues"` + MaxTopics int `yaml:"max_topics"` + QueuesFilter string `yaml:"queues_filter"` + TopicsFilter string `yaml:"topics_filter"` +} + +// ActiveMQ ActiveMQ module. +type ActiveMQ struct { + module.Base + Config `yaml:",inline"` + + apiClient *apiClient + activeQueues map[string]bool + activeTopics map[string]bool + queuesFilter matcher.Matcher + topicsFilter matcher.Matcher + charts *Charts +} + +// Cleanup makes cleanup. +func (ActiveMQ) Cleanup() {} + +// Init makes initialization. +func (a *ActiveMQ) Init() bool { + if a.URL == "" { + a.Error("URL not set") + return false + } + + if a.Webadmin == "" { + a.Error("webadmin root path is not set") + return false + } + + if a.QueuesFilter != "" { + f, err := matcher.NewSimplePatternsMatcher(a.QueuesFilter) + if err != nil { + a.Errorf("error on creating queues filter : %v", err) + return false + } + a.queuesFilter = matcher.WithCache(f) + } + + if a.TopicsFilter != "" { + f, err := matcher.NewSimplePatternsMatcher(a.TopicsFilter) + if err != nil { + a.Errorf("error on creating topics filter : %v", err) + return false + } + a.topicsFilter = matcher.WithCache(f) + } + + client, err := web.NewHTTPClient(a.Client) + if err != nil { + a.Error(err) + return false + } + + a.apiClient = newAPIClient(client, a.Request, a.Webadmin) + + return true +} + +// Check makes check. +func (a *ActiveMQ) Check() bool { + return len(a.Collect()) > 0 +} + +// Charts creates Charts. +func (a ActiveMQ) Charts() *Charts { + return a.charts +} + +// Collect collects metrics. +func (a *ActiveMQ) Collect() map[string]int64 { + metrics := make(map[string]int64) + + var ( + queues *queues + topics *topics + err error + ) + + if queues, err = a.apiClient.getQueues(); err != nil { + a.Error(err) + return nil + } + + if topics, err = a.apiClient.getTopics(); err != nil { + a.Error(err) + return nil + } + + a.processQueues(queues, metrics) + a.processTopics(topics, metrics) + + return metrics +} + +func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) { + var ( + count = len(a.activeQueues) + updated = make(map[string]bool) + unp int + ) + + for _, q := range queues.Items { + if strings.Contains(q.Name, keyAdvisory) { + continue + } + + if !a.activeQueues[q.Name] { + if a.MaxQueues != 0 && count > a.MaxQueues { + unp++ + continue + } + + if !a.filterQueues(q.Name) { + continue + } + + a.activeQueues[q.Name] = true + a.addQueueTopicCharts(q.Name, keyQueues) + } + + rname := nameReplacer.Replace(q.Name) + + metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount + metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount + metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount + metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount + + updated[q.Name] = true + } + + for name := range a.activeQueues { + if !updated[name] { + delete(a.activeQueues, name) + a.removeQueueTopicCharts(name, keyQueues) + } + } + + if unp > 0 { + a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues) + } +} + +func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) { + var ( + count = len(a.activeTopics) + updated = make(map[string]bool) + unp int + ) + + for _, t := range topics.Items { + if strings.Contains(t.Name, keyAdvisory) { + continue + } + + if !a.activeTopics[t.Name] { + if a.MaxTopics != 0 && count > a.MaxTopics { + unp++ + continue + } + + if !a.filterTopics(t.Name) { + continue + } + + a.activeTopics[t.Name] = true + a.addQueueTopicCharts(t.Name, keyTopics) + } + + rname := nameReplacer.Replace(t.Name) + + metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount + metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount + metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount + metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount + + updated[t.Name] = true + } + + for name := range a.activeTopics { + if !updated[name] { + // TODO: delete after timeout? + delete(a.activeTopics, name) + a.removeQueueTopicCharts(name, keyTopics) + } + } + + if unp > 0 { + a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics) + } +} + +func (a ActiveMQ) filterQueues(line string) bool { + if a.queuesFilter == nil { + return true + } + return a.queuesFilter.MatchString(line) +} + +func (a ActiveMQ) filterTopics(line string) bool { + if a.topicsFilter == nil { + return true + } + return a.topicsFilter.MatchString(line) +} + +func (a *ActiveMQ) addQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + charts := charts.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, typ, rname) + chart.Title = fmt.Sprintf(chart.Title, name) + chart.Fam = typ + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, typ, rname) + } + } + + _ = a.charts.Add(*charts...) + +} + +func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() +} diff --git a/src/go/collectors/go.d.plugin/modules/activemq/activemq_test.go b/src/go/collectors/go.d.plugin/modules/activemq/activemq_test.go new file mode 100644 index 00000000000000..e45ceecd428fcd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/activemq_test.go @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + queuesData = []string{ + `<queues> +<queue name="sandra"> +<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/> +<feed> +<atom>queueBrowse/sandra?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/sandra?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +<queue name="Test"> +<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/> +<feed> +<atom>queueBrowse/Test?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/Test?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +</queues>`, + `<queues> +<queue name="sandra"> +<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/> +<feed> +<atom>queueBrowse/sandra?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/sandra?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +<queue name="Test"> +<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/> +<feed> +<atom>queueBrowse/Test?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/Test?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +<queue name="Test2"> +<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/> +<feed> +<atom>queueBrowse/Test?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/Test?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +</queues>`, + `<queues> +<queue name="sandra"> +<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/> +<feed> +<atom>queueBrowse/sandra?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/sandra?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +<queue name="Test"> +<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/> +<feed> +<atom>queueBrowse/Test?view=rss&feedType=atom_1.0</atom> +<rss>queueBrowse/Test?view=rss&feedType=rss_2.0</rss> +</feed> +</queue> +</queues>`, + } + + topicsData = []string{ + `<topics> +<topic name="ActiveMQ.Advisory.MasterBroker "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="AAA "> +<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/> +</topic> +<topic name="ActiveMQ.Advisory.Topic "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="ActiveMQ.Advisory.Queue "> +<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/> +</topic> +<topic name="AAAA "> +<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/> +</topic> +</topics>`, + `<topics> +<topic name="ActiveMQ.Advisory.MasterBroker "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="AAA "> +<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/> +</topic> +<topic name="ActiveMQ.Advisory.Topic "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="ActiveMQ.Advisory.Queue "> +<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/> +</topic> +<topic name="AAAA "> +<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/> +</topic> +<topic name="BBB "> +<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/> +</topic> +</topics>`, + `<topics> +<topic name="ActiveMQ.Advisory.MasterBroker "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="AAA "> +<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/> +</topic> +<topic name="ActiveMQ.Advisory.Topic "> +<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/> +</topic> +<topic name="ActiveMQ.Advisory.Queue "> +<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/> +</topic> +<topic name="AAAA "> +<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/> +</topic> +</topics>`, + } +) + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) + assert.Equal(t, defaultMaxQueues, job.MaxQueues) + assert.Equal(t, defaultMaxTopics, job.MaxTopics) +} + +func TestActiveMQ_Init(t *testing.T) { + job := New() + + // NG case + assert.False(t, job.Init()) + + // OK case + job.Webadmin = "webadmin" + assert.True(t, job.Init()) + assert.NotNil(t, job.apiClient) +} + +func TestActiveMQ_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/webadmin/xml/queues.jsp": + _, _ = w.Write([]byte(queuesData[0])) + case "/webadmin/xml/topics.jsp": + _, _ = w.Write([]byte(topicsData[0])) + } + })) + defer ts.Close() + + job := New() + job.HTTP.Request = web.Request{URL: ts.URL} + job.Webadmin = "webadmin" + + require.True(t, job.Init()) + require.True(t, job.Check()) +} + +func TestActiveMQ_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestActiveMQ_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestActiveMQ_Collect(t *testing.T) { + var collectNum int + getQueues := func() string { return queuesData[collectNum] } + getTopics := func() string { return topicsData[collectNum] } + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/webadmin/xml/queues.jsp": + _, _ = w.Write([]byte(getQueues())) + case "/webadmin/xml/topics.jsp": + _, _ = w.Write([]byte(getTopics())) + } + })) + defer ts.Close() + + job := New() + job.HTTP.Request = web.Request{URL: ts.URL} + job.Webadmin = "webadmin" + + require.True(t, job.Init()) + require.True(t, job.Check()) + + cases := []struct { + expected map[string]int64 + numQueues int + numTopics int + numCharts int + }{ + { + expected: map[string]int64{ + "queues_sandra_consumers": 1, + "queues_sandra_dequeued": 1, + "queues_Test_enqueued": 2, + "queues_Test_unprocessed": 1, + "topics_AAA_dequeued": 1, + "topics_AAAA_unprocessed": 1, + "queues_Test_dequeued": 1, + "topics_AAA_enqueued": 2, + "topics_AAA_unprocessed": 1, + "topics_AAAA_consumers": 1, + "topics_AAAA_dequeued": 1, + "queues_Test_consumers": 1, + "queues_sandra_enqueued": 2, + "queues_sandra_unprocessed": 1, + "topics_AAA_consumers": 1, + "topics_AAAA_enqueued": 2, + }, + numQueues: 2, + numTopics: 2, + numCharts: 12, + }, + { + expected: map[string]int64{ + "queues_sandra_enqueued": 3, + "queues_Test_enqueued": 3, + "queues_Test_unprocessed": 1, + "queues_Test2_dequeued": 0, + "topics_BBB_enqueued": 2, + "queues_sandra_dequeued": 2, + "queues_sandra_unprocessed": 1, + "queues_Test2_enqueued": 0, + "topics_AAAA_enqueued": 3, + "topics_AAAA_dequeued": 2, + "topics_BBB_unprocessed": 1, + "topics_AAA_dequeued": 2, + "topics_AAAA_unprocessed": 1, + "queues_Test_consumers": 2, + "queues_Test_dequeued": 2, + "queues_Test2_consumers": 0, + "queues_Test2_unprocessed": 0, + "topics_AAA_consumers": 2, + "topics_AAA_enqueued": 3, + "topics_BBB_dequeued": 1, + "queues_sandra_consumers": 2, + "topics_AAA_unprocessed": 1, + "topics_AAAA_consumers": 2, + "topics_BBB_consumers": 1, + }, + numQueues: 3, + numTopics: 3, + numCharts: 18, + }, + { + expected: map[string]int64{ + "queues_sandra_unprocessed": 1, + "queues_Test_unprocessed": 1, + "queues_sandra_consumers": 3, + "topics_AAAA_enqueued": 4, + "queues_sandra_dequeued": 3, + "queues_Test_consumers": 3, + "queues_Test_enqueued": 4, + "queues_Test_dequeued": 3, + "topics_AAA_consumers": 3, + "topics_AAA_unprocessed": 1, + "topics_AAAA_consumers": 3, + "topics_AAAA_unprocessed": 1, + "queues_sandra_enqueued": 4, + "topics_AAA_enqueued": 4, + "topics_AAA_dequeued": 3, + "topics_AAAA_dequeued": 3, + }, + numQueues: 2, + numTopics: 2, + numCharts: 18, + }, + } + + for _, c := range cases { + require.Equal(t, c.expected, job.Collect()) + assert.Len(t, job.activeQueues, c.numQueues) + assert.Len(t, job.activeTopics, c.numTopics) + assert.Len(t, *job.charts, c.numCharts) + collectNum++ + } +} + +func TestActiveMQ_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer ts.Close() + + job := New() + job.Webadmin = "webadmin" + job.HTTP.Request = web.Request{URL: ts.URL} + + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestActiveMQ_InvalidData(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye!")) + })) + defer ts.Close() + + mod := New() + mod.Webadmin = "webadmin" + mod.HTTP.Request = web.Request{URL: ts.URL} + + require.True(t, mod.Init()) + assert.False(t, mod.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/activemq/apiclient.go b/src/go/collectors/go.d.plugin/modules/activemq/apiclient.go new file mode 100644 index 00000000000000..6835fd5aabff94 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/apiclient.go @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "encoding/xml" + "fmt" + "github.com/netdata/go.d.plugin/pkg/web" + "io" + "net/http" + "net/url" + "path" +) + +type topics struct { + XMLName xml.Name `xml:"topics"` + Items []topic `xml:"topic"` +} + +type topic struct { + XMLName xml.Name `xml:"topic"` + Name string `xml:"name,attr"` + Stats stats `xml:"stats"` +} + +type queues struct { + XMLName xml.Name `xml:"queues"` + Items []queue `xml:"queue"` +} + +type queue struct { + XMLName xml.Name `xml:"queue"` + Name string `xml:"name,attr"` + Stats stats `xml:"stats"` +} + +type stats struct { + XMLName xml.Name `xml:"stats"` + Size int64 `xml:"size,attr"` + ConsumerCount int64 `xml:"consumerCount,attr"` + EnqueueCount int64 `xml:"enqueueCount,attr"` + DequeueCount int64 `xml:"dequeueCount,attr"` +} + +const pathStats = "/%s/xml/%s.jsp" + +func newAPIClient(client *http.Client, request web.Request, webadmin string) *apiClient { + return &apiClient{ + httpClient: client, + request: request, + webadmin: webadmin, + } +} + +type apiClient struct { + httpClient *http.Client + request web.Request + webadmin string +} + +func (a *apiClient) getQueues() (*queues, error) { + req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyQueues)) + if err != nil { + return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err) + } + + resp, err := a.doRequestOK(req) + + defer closeBody(resp) + + if err != nil { + return nil, err + } + + var queues queues + + if err := xml.NewDecoder(resp.Body).Decode(&queues); err != nil { + return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err) + } + + return &queues, nil +} + +func (a *apiClient) getTopics() (*topics, error) { + req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyTopics)) + if err != nil { + return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err) + } + + resp, err := a.doRequestOK(req) + + defer closeBody(resp) + + if err != nil { + return nil, err + } + + var topics topics + + if err := xml.NewDecoder(resp.Body).Decode(&topics); err != nil { + return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err) + } + + return &topics, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return resp, fmt.Errorf("error on request to %s : %v", req.URL, err) + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + return resp, err +} + +func (a apiClient) createRequest(urlPath string) (*http.Request, error) { + req := a.request.Copy() + u, err := url.Parse(req.URL) + if err != nil { + return nil, err + } + u.Path = path.Join(u.Path, urlPath) + req.URL = u.String() + return web.NewHTTPRequest(req) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/activemq/charts.go b/src/go/collectors/go.d.plugin/modules/activemq/charts.go new file mode 100644 index 00000000000000..465cbce662c690 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/charts.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "%s_%s_messages", + Title: "%s Messages", + Units: "messages/s", + Fam: "", + Ctx: "activemq.messages", + Dims: Dims{ + {ID: "%s_%s_enqueued", Name: "enqueued", Algo: module.Incremental}, + {ID: "%s_%s_dequeued", Name: "dequeued", Algo: module.Incremental}, + }, + }, + { + ID: "%s_%s_unprocessed_messages", + Title: "%s Unprocessed Messages", + Units: "messages", + Fam: "", + Ctx: "activemq.unprocessed_messages", + Dims: Dims{ + {ID: "%s_%s_unprocessed", Name: "unprocessed"}, + }, + }, + { + ID: "%s_%s_consumers", + Title: "%s Consumers", + Units: "consumers", + Fam: "", + Ctx: "activemq.consumers", + Dims: Dims{ + {ID: "%s_%s_consumers", Name: "consumers"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/activemq/config_schema.json b/src/go/collectors/go.d.plugin/modules/activemq/config_schema.json new file mode 100644 index 00000000000000..abefb5d2f9c412 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/config_schema.json @@ -0,0 +1,75 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/activemq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "webadmin": { + "type": "string" + }, + "max_queues": { + "type": "integer" + }, + "max_topics": { + "type": "integer" + }, + "queues_filter": { + "type": "string" + }, + "topics_filter": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url", + "webadmin" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/activemq/integrations/activemq.md b/src/go/collectors/go.d.plugin/modules/activemq/integrations/activemq.md new file mode 100644 index 00000000000000..3a4915d2229d59 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/integrations/activemq.md @@ -0,0 +1,233 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/activemq/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/activemq/metadata.yaml" +sidebar_label: "ActiveMQ" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ActiveMQ + + +<img src="https://netdata.cloud/img/activemq.png" width="150"/> + + +Plugin: go.d.plugin +Module: activemq + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors ActiveMQ queues and topics. + +It collects metrics by sending HTTP requests to the Web Console API. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This collector discovers instances running on the local host that provide metrics on port 8161. +On startup, it tries to collect metrics from: + +- http://localhost:8161 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ActiveMQ instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| activemq.messages | enqueued, dequeued | messages/s | +| activemq.unprocessed_messages | unprocessed | messages | +| activemq.consumers | consumers | consumers | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/activemq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/activemq.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://localhost:8161 | yes | +| webadmin | Webadmin root path. | admin | yes | +| max_queues | Maximum number of concurrently collected queues. | 50 | no | +| max_topics | Maximum number of concurrently collected topics. | 50 | no | +| queues_filter | Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| topics_filter | Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| timeout | HTTP request timeout. | 1 | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + username: foo + password: bar + +``` +</details> + +##### Filters and limits + +Using filters and limits for queues and topics. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + max_queues: 100 + max_topics: 100 + queues_filter: '!sandr* *' + topics_filter: '!sandr* *' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + + - name: remote + url: http://192.0.2.1:8161 + webadmin: admin + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m activemq + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/activemq/metadata.yaml b/src/go/collectors/go.d.plugin/modules/activemq/metadata.yaml new file mode 100644 index 00000000000000..07d344a5089cdb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/activemq/metadata.yaml @@ -0,0 +1,230 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-activemq + module_name: activemq + plugin_name: go.d.plugin + monitored_instance: + categories: + - data-collection.message-brokers + icon_filename: activemq.png + name: ActiveMQ + link: https://activemq.apache.org/ + alternative_monitored_instances: [] + keywords: + - message broker + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: + - plugin_name: go.d.plugin + module_name: httpcheck + - plugin_name: apps.plugin + module_name: apps + overview: + data_collection: + metrics_description: This collector monitors ActiveMQ queues and topics. + method_description: It collects metrics by sending HTTP requests to the Web Console API. + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + This collector discovers instances running on the local host that provide metrics on port 8161. + On startup, it tries to collect metrics from: + + - http://localhost:8161 + limits: + description: "" + performance_impact: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/activemq.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://localhost:8161 + required: true + - name: webadmin + description: Webadmin root path. + default_value: admin + required: true + - name: max_queues + description: Maximum number of concurrently collected queues. + default_value: 50 + required: false + - name: max_topics + description: Maximum number of concurrently collected topics. + default_value: 50 + required: false + - name: queues_filter + description: | + Queues filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). + default_value: "" + required: false + - name: topics_filter + description: | + Topics filter. Syntax is [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). + default_value: "" + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + username: foo + password: bar + - name: Filters and limits + description: Using filters and limits for queues and topics. + config: | + jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + max_queues: 100 + max_topics: 100 + queues_filter: '!sandr* *' + topics_filter: '!sandr* *' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8161 + webadmin: admin + + - name: remote + url: http://192.0.2.1:8161 + webadmin: admin + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: activemq.messages + availability: [] + description: Messaged + unit: messages/s + chart_type: line + dimensions: + - name: enqueued + - name: dequeued + - name: activemq.unprocessed_messages + availability: [] + description: Unprocessed Messages + unit: messages + chart_type: line + dimensions: + - name: unprocessed + - name: activemq.consumers + availability: [] + description: Consumers + unit: consumers + chart_type: line + dimensions: + - name: consumers diff --git a/src/go/collectors/go.d.plugin/modules/apache/README.md b/src/go/collectors/go.d.plugin/modules/apache/README.md new file mode 120000 index 00000000000000..066ee4162abb9a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/README.md @@ -0,0 +1 @@ +integrations/apache.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/apache/apache.go b/src/go/collectors/go.d.plugin/modules/apache/apache.go new file mode 100644 index 00000000000000..8b117463d9598f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/apache.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +import ( + _ "embed" + "net/http" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("apache", module.Creator{ + Create: func() module.Module { return New() }, + JobConfigSchema: configSchema, + }) +} + +func New() *Apache { + return &Apache{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/server-status?auto", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 2}, + }, + }, + }, + charts: &module.Charts{}, + once: &sync.Once{}, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Apache struct { + module.Base + + Config `yaml:",inline"` + + charts *module.Charts + + httpClient *http.Client + once *sync.Once +} + +func (a *Apache) Init() bool { + if err := a.verifyConfig(); err != nil { + a.Errorf("config validation: %v", err) + return false + } + + httpClient, err := a.initHTTPClient() + if err != nil { + a.Errorf("init HTTP client: %v", err) + return false + } + a.httpClient = httpClient + + a.Debugf("using URL %s", a.URL) + a.Debugf("using timeout: %s", a.Timeout.Duration) + return true +} + +func (a *Apache) Check() bool { + return len(a.Collect()) > 0 +} + +func (a *Apache) Charts() *module.Charts { + return a.charts +} + +func (a *Apache) Collect() map[string]int64 { + mx, err := a.collect() + if err != nil { + a.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (a *Apache) Cleanup() { + if a.httpClient != nil { + a.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/apache/apache_test.go b/src/go/collectors/go.d.plugin/modules/apache/apache_test.go new file mode 100644 index 00000000000000..a507113f3555fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/apache_test.go @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataSimpleStatusMPMEvent, _ = os.ReadFile("testdata/simple-status-mpm-event.txt") + dataExtendedStatusMPMEvent, _ = os.ReadFile("testdata/extended-status-mpm-event.txt") + dataExtendedStatusMPMPrefork, _ = os.ReadFile("testdata/extended-status-mpm-prefork.txt") + dataLighttpdStatus, _ = os.ReadFile("testdata/lighttpd-status.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataSimpleStatusMPMEvent": dataSimpleStatusMPMEvent, + "dataExtendedStatusMPMEvent": dataExtendedStatusMPMEvent, + "dataExtendedStatusMPMPrefork": dataExtendedStatusMPMPrefork, + "dataLighttpdStatus": dataLighttpdStatus, + } { + require.NotNilf(t, data, name) + + } +} + +func TestApache_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fail when URL has no wantMetrics suffix": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apache := New() + apache.Config = test.config + + if test.wantFail { + assert.False(t, apache.Init()) + } else { + assert.True(t, apache.Init()) + } + }) + } +} + +func TestApache_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (apache *Apache, cleanup func()) + }{ + "success on simple status MPM Event": { + wantFail: false, + prepare: caseMPMEventSimpleStatus, + }, + "success on extended status MPM Event": { + wantFail: false, + prepare: caseMPMEventExtendedStatus, + }, + "success on extended status MPM Prefork": { + wantFail: false, + prepare: caseMPMPreforkExtendedStatus, + }, + "fail on Lighttpd response": { + wantFail: true, + prepare: caseLighttpdResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apache, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, apache.Check()) + } else { + assert.True(t, apache.Check()) + } + }) + } +} + +func TestApache_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestApache_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (apache *Apache, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on simple status MPM Event": { + prepare: caseMPMEventSimpleStatus, + wantNumOfCharts: len(baseCharts), + wantMetrics: map[string]int64{ + "busy_workers": 1, + "conns_async_closing": 0, + "conns_async_keep_alive": 0, + "conns_async_writing": 0, + "conns_total": 0, + "idle_workers": 74, + "scoreboard_closing": 0, + "scoreboard_dns_lookup": 0, + "scoreboard_finishing": 0, + "scoreboard_idle_cleanup": 0, + "scoreboard_keepalive": 0, + "scoreboard_logging": 0, + "scoreboard_open": 325, + "scoreboard_reading": 0, + "scoreboard_sending": 1, + "scoreboard_starting": 0, + "scoreboard_waiting": 74, + }, + }, + "success on extended status MPM Event": { + prepare: caseMPMEventExtendedStatus, + wantNumOfCharts: len(baseCharts) + len(extendedCharts), + wantMetrics: map[string]int64{ + "busy_workers": 1, + "bytes_per_req": 136533000, + "bytes_per_sec": 4800000, + "conns_async_closing": 0, + "conns_async_keep_alive": 0, + "conns_async_writing": 0, + "conns_total": 0, + "idle_workers": 99, + "req_per_sec": 3515, + "scoreboard_closing": 0, + "scoreboard_dns_lookup": 0, + "scoreboard_finishing": 0, + "scoreboard_idle_cleanup": 0, + "scoreboard_keepalive": 0, + "scoreboard_logging": 0, + "scoreboard_open": 300, + "scoreboard_reading": 0, + "scoreboard_sending": 1, + "scoreboard_starting": 0, + "scoreboard_waiting": 99, + "total_accesses": 9, + "total_kBytes": 12, + "uptime": 256, + }, + }, + "success on extended status MPM Prefork": { + prepare: caseMPMPreforkExtendedStatus, + wantNumOfCharts: len(baseCharts) + len(extendedCharts) - 2, + wantMetrics: map[string]int64{ + "busy_workers": 70, + "bytes_per_req": 3617880000, + "bytes_per_sec": 614250000000, + "idle_workers": 1037, + "req_per_sec": 16978100, + "scoreboard_closing": 0, + "scoreboard_dns_lookup": 0, + "scoreboard_finishing": 0, + "scoreboard_idle_cleanup": 0, + "scoreboard_keepalive": 0, + "scoreboard_logging": 0, + "scoreboard_open": 3, + "scoreboard_reading": 0, + "scoreboard_sending": 0, + "scoreboard_starting": 0, + "scoreboard_waiting": 3, + "total_accesses": 120358784, + "total_kBytes": 4252382776, + "uptime": 708904, + }, + }, + "fail on Lighttpd response": { + prepare: caseLighttpdResponse, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + apache, cleanup := test.prepare(t) + defer cleanup() + + _ = apache.Check() + + collected := apache.Collect() + + require.Equal(t, test.wantMetrics, collected) + assert.Equal(t, test.wantNumOfCharts, len(*apache.Charts())) + }) + } +} + +func caseMPMEventSimpleStatus(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataSimpleStatusMPMEvent) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} + +func caseMPMEventExtendedStatus(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataExtendedStatusMPMEvent) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} + +func caseMPMPreforkExtendedStatus(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataExtendedStatusMPMPrefork) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} + +func caseLighttpdResponse(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataLighttpdStatus) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*Apache, func()) { + t.Helper() + apache := New() + apache.URL = "http://127.0.0.1:65001/server-status?auto" + require.True(t, apache.Init()) + + return apache, func() {} +} + +func case404(t *testing.T) (*Apache, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + apache := New() + apache.URL = srv.URL + "/server-status?auto" + require.True(t, apache.Init()) + + return apache, srv.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/apache/charts.go b/src/go/collectors/go.d.plugin/modules/apache/charts.go new file mode 100644 index 00000000000000..1dcf77f512463f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/charts.go @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +import "github.com/netdata/go.d.plugin/agent/module" + +const ( + prioRequests = module.Priority + iota + prioConnection + prioConnsAsync + prioScoreboard + prioNet + prioWorkers + prioReqPerSec + prioBytesPerSec + prioBytesPerReq + prioUptime +) + +var baseCharts = module.Charts{ + chartConnections.Copy(), + chartConnsAsync.Copy(), + chartWorkers.Copy(), + chartScoreboard.Copy(), +} + +var extendedCharts = module.Charts{ + chartRequests.Copy(), + chartBandwidth.Copy(), + chartReqPerSec.Copy(), + chartBytesPerSec.Copy(), + chartBytesPerReq.Copy(), + chartUptime.Copy(), +} + +func newCharts(s *serverStatus) *module.Charts { + charts := baseCharts.Copy() + + // ServerMPM: prefork + if s.Connections.Total == nil { + _ = charts.Remove(chartConnections.ID) + } + if s.Connections.Async.KeepAlive == nil { + _ = charts.Remove(chartConnsAsync.ID) + } + + if s.Total.Accesses != nil { + _ = charts.Add(*extendedCharts.Copy()...) + } + + return charts +} + +// simple status +var ( + chartConnections = module.Chart{ + ID: "connections", + Title: "Connections", + Units: "connections", + Fam: "connections", + Ctx: "apache.connections", + Priority: prioConnection, + Dims: module.Dims{ + {ID: "conns_total", Name: "connections"}, + }, + } + chartConnsAsync = module.Chart{ + ID: "conns_async", + Title: "Async Connections", + Units: "connections", + Fam: "connections", + Ctx: "apache.conns_async", + Type: module.Stacked, + Priority: prioConnsAsync, + Dims: module.Dims{ + {ID: "conns_async_keep_alive", Name: "keepalive"}, + {ID: "conns_async_closing", Name: "closing"}, + {ID: "conns_async_writing", Name: "writing"}, + }, + } + chartWorkers = module.Chart{ + ID: "workers", + Title: "Workers Threads", + Units: "workers", + Fam: "workers", + Ctx: "apache.workers", + Type: module.Stacked, + Priority: prioWorkers, + Dims: module.Dims{ + {ID: "idle_workers", Name: "idle"}, + {ID: "busy_workers", Name: "busy"}, + }, + } + chartScoreboard = module.Chart{ + ID: "scoreboard", + Title: "Scoreboard", + Units: "connections", + Fam: "connections", + Ctx: "apache.scoreboard", + Priority: prioScoreboard, + Dims: module.Dims{ + {ID: "scoreboard_waiting", Name: "waiting"}, + {ID: "scoreboard_starting", Name: "starting"}, + {ID: "scoreboard_reading", Name: "reading"}, + {ID: "scoreboard_sending", Name: "sending"}, + {ID: "scoreboard_keepalive", Name: "keepalive"}, + {ID: "scoreboard_dns_lookup", Name: "dns_lookup"}, + {ID: "scoreboard_closing", Name: "closing"}, + {ID: "scoreboard_logging", Name: "logging"}, + {ID: "scoreboard_finishing", Name: "finishing"}, + {ID: "scoreboard_idle_cleanup", Name: "idle_cleanup"}, + {ID: "scoreboard_open", Name: "open"}, + }, + } +) + +// extended status +var ( + chartRequests = module.Chart{ + ID: "requests", + Title: "Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "apache.requests", + Priority: prioRequests, + Dims: module.Dims{ + {ID: "total_accesses", Name: "requests", Algo: module.Incremental}, + }, + } + chartBandwidth = module.Chart{ + ID: "net", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "apache.net", + Type: module.Area, + Priority: prioNet, + Dims: module.Dims{ + {ID: "total_kBytes", Name: "sent", Algo: module.Incremental, Mul: 8}, + }, + } + chartReqPerSec = module.Chart{ + ID: "reqpersec", + Title: "Lifetime Average Number Of Requests Per Second", + Units: "requests/s", + Fam: "statistics", + Ctx: "apache.reqpersec", + Type: module.Area, + Priority: prioReqPerSec, + Dims: module.Dims{ + {ID: "req_per_sec", Name: "requests", Div: 100000}, + }, + } + chartBytesPerSec = module.Chart{ + ID: "bytespersec", + Title: "Lifetime Average Number Of Bytes Served Per Second", + Units: "KiB/s", + Fam: "statistics", + Ctx: "apache.bytespersec", + Type: module.Area, + Priority: prioBytesPerSec, + Dims: module.Dims{ + {ID: "bytes_per_sec", Name: "served", Mul: 8, Div: 1024 * 100000}, + }, + } + chartBytesPerReq = module.Chart{ + ID: "bytesperreq", + Title: "Lifetime Average Response Size", + Units: "KiB", + Fam: "statistics", + Ctx: "apache.bytesperreq", + Type: module.Area, + Priority: prioBytesPerReq, + Dims: module.Dims{ + {ID: "bytes_per_req", Name: "size", Div: 1024 * 100000}, + }, + } + chartUptime = module.Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "availability", + Ctx: "apache.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "uptime"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/apache/collect.go b/src/go/collectors/go.d.plugin/modules/apache/collect.go new file mode 100644 index 00000000000000..5e9a6048ccbd3e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/collect.go @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +import ( + "bufio" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (a *Apache) collect() (map[string]int64, error) { + status, err := a.scrapeStatus() + if err != nil { + return nil, err + } + + mx := stm.ToMap(status) + if len(mx) == 0 { + return nil, fmt.Errorf("nothing was collected from %s", a.URL) + } + + a.once.Do(func() { a.charts = newCharts(status) }) + + return mx, nil +} + +func (a *Apache) scrapeStatus() (*serverStatus, error) { + req, err := web.NewHTTPRequest(a.Request) + if err != nil { + return nil, err + } + + resp, err := a.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + return parseResponse(resp.Body) +} + +func parseResponse(r io.Reader) (*serverStatus, error) { + s := bufio.NewScanner(r) + var status serverStatus + + for s.Scan() { + parts := strings.Split(s.Text(), ":") + if len(parts) != 2 { + continue + } + + key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + + switch key { + default: + case "BusyServers", "IdleServers": + return nil, fmt.Errorf("found '%s', Lighttpd data", key) + case "BusyWorkers": + status.Workers.Busy = parseInt(value) + case "IdleWorkers": + status.Workers.Idle = parseInt(value) + case "ConnsTotal": + status.Connections.Total = parseInt(value) + case "ConnsAsyncWriting": + status.Connections.Async.Writing = parseInt(value) + case "ConnsAsyncKeepAlive": + status.Connections.Async.KeepAlive = parseInt(value) + case "ConnsAsyncClosing": + status.Connections.Async.Closing = parseInt(value) + case "Total Accesses": + status.Total.Accesses = parseInt(value) + case "Total kBytes": + status.Total.KBytes = parseInt(value) + case "Uptime": + status.Uptime = parseInt(value) + case "ReqPerSec": + status.Averages.ReqPerSec = parseFloat(value) + case "BytesPerSec": + status.Averages.BytesPerSec = parseFloat(value) + case "BytesPerReq": + status.Averages.BytesPerReq = parseFloat(value) + case "Scoreboard": + status.Scoreboard = parseScoreboard(value) + } + } + + return &status, nil +} + +func parseScoreboard(line string) *scoreboard { + // “_” Waiting for Connection + // “S” Starting up + // “R” Reading Request + // “W” Sending Reply + // “K” Keepalive (read) + // “D” DNS Lookup + // “C” Closing connection + // “L” Logging + // “G” Gracefully finishing + // “I” Idle cleanup of worker + // “.” Open slot with no current process + var sb scoreboard + for _, s := range strings.Split(line, "") { + switch s { + case "_": + sb.Waiting++ + case "S": + sb.Starting++ + case "R": + sb.Reading++ + case "W": + sb.Sending++ + case "K": + sb.KeepAlive++ + case "D": + sb.DNSLookup++ + case "C": + sb.Closing++ + case "L": + sb.Logging++ + case "G": + sb.Finishing++ + case "I": + sb.IdleCleanup++ + case ".": + sb.Open++ + } + } + return &sb +} + +func parseInt(value string) *int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil + } + return &v +} + +func parseFloat(value string) *float64 { + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil + } + return &v +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/apache/config_schema.json b/src/go/collectors/go.d.plugin/modules/apache/config_schema.json new file mode 100644 index 00000000000000..81ece2b67d2944 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/apache job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/apache/init.go b/src/go/collectors/go.d.plugin/modules/apache/init.go new file mode 100644 index 00000000000000..35599977077dc2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +import ( + "errors" + "net/http" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (a Apache) verifyConfig() error { + if a.URL == "" { + return errors.New("url not set") + } + if !strings.HasSuffix(a.URL, "?auto") { + return errors.New("invalid URL, should ends in '?auto'") + } + return nil +} + +func (a Apache) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(a.Client) +} diff --git a/src/go/collectors/go.d.plugin/modules/apache/integrations/apache.md b/src/go/collectors/go.d.plugin/modules/apache/integrations/apache.md new file mode 100644 index 00000000000000..6163ee5a9d0208 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/integrations/apache.md @@ -0,0 +1,238 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/apache/integrations/apache.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/apache/metadata.yaml" +sidebar_label: "Apache" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Apache + + +<img src="https://netdata.cloud/img/apache.svg" width="150"/> + + +Plugin: go.d.plugin +Module: apache + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more. + + +It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), +which is a built-in location that provides metrics about the Apache server. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Apache instances running on localhost that are listening on port 80. +On startup, it tries to collect metrics from: + +- http://localhost/server-status?auto +- http://127.0.0.1/server-status?auto + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on. + + +### Per Apache instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | Basic | Extended | +|:------|:----------|:----|:---:|:---:| +| apache.connections | connections | connections | • | • | +| apache.conns_async | keepalive, closing, writing | connections | • | • | +| apache.workers | idle, busy | workers | • | • | +| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | • | • | +| apache.requests | requests | requests/s | | • | +| apache.net | sent | kilobit/s | | • | +| apache.reqpersec | requests | requests/s | | • | +| apache.bytespersec | served | KiB/s | | • | +| apache.bytesperreq | size | KiB | | • | +| apache.uptime | uptime | seconds | | • | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable Apache status support + +- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html). +- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/apache.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/apache.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/server-status?auto | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Apache with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1/server-status?auto + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + + - name: remote + url: http://192.0.2.1/server-status?auto + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m apache + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/apache/integrations/httpd.md b/src/go/collectors/go.d.plugin/modules/apache/integrations/httpd.md new file mode 100644 index 00000000000000..fe2c92b464a92a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/integrations/httpd.md @@ -0,0 +1,238 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/apache/integrations/httpd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/apache/metadata.yaml" +sidebar_label: "HTTPD" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HTTPD + + +<img src="https://netdata.cloud/img/apache.svg" width="150"/> + + +Plugin: go.d.plugin +Module: apache + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more. + + +It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), +which is a built-in location that provides metrics about the Apache server. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Apache instances running on localhost that are listening on port 80. +On startup, it tries to collect metrics from: + +- http://localhost/server-status?auto +- http://127.0.0.1/server-status?auto + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on. + + +### Per Apache instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | Basic | Extended | +|:------|:----------|:----|:---:|:---:| +| apache.connections | connections | connections | • | • | +| apache.conns_async | keepalive, closing, writing | connections | • | • | +| apache.workers | idle, busy | workers | • | • | +| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | • | • | +| apache.requests | requests | requests/s | | • | +| apache.net | sent | kilobit/s | | • | +| apache.reqpersec | requests | requests/s | | • | +| apache.bytespersec | served | KiB/s | | • | +| apache.bytesperreq | size | KiB | | • | +| apache.uptime | uptime | seconds | | • | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable Apache status support + +- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html). +- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/apache.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/apache.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/server-status?auto | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Apache with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1/server-status?auto + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + + - name: remote + url: http://192.0.2.1/server-status?auto + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m apache + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/apache/metadata.yaml b/src/go/collectors/go.d.plugin/modules/apache/metadata.yaml new file mode 100644 index 00000000000000..bfab73fcfe45a8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/metadata.yaml @@ -0,0 +1,302 @@ +plugin_name: go.d.plugin +modules: + - &module + meta: &meta + id: collector-go.d.plugin-apache + plugin_name: go.d.plugin + module_name: apache + monitored_instance: + name: Apache + link: https://httpd.apache.org/ + icon_filename: apache.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - webserver + related_resources: + integrations: + list: + - plugin_name: go.d.plugin + module_name: weblog + - plugin_name: go.d.plugin + module_name: httpcheck + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more. + method_description: | + It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html), + which is a built-in location that provides metrics about the Apache server. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Apache instances running on localhost that are listening on port 80. + On startup, it tries to collect metrics from: + + - http://localhost/server-status?auto + - http://127.0.0.1/server-status?auto + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable Apache status support + description: | + - Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html). + - Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6). + configuration: + file: + name: go.d/apache.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/server-status?auto + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + - name: HTTPS with self-signed certificate + description: Apache with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1/server-status?auto + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + + - name: remote + url: http://192.0.2.1/server-status?auto + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: | + All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on. + availability: + - Basic + - Extended + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: apache.connections + availability: + - Basic + - Extended + description: Connections + unit: connections + chart_type: line + dimensions: + - name: connections + - name: apache.conns_async + availability: + - Basic + - Extended + description: Active Connections + unit: connections + chart_type: stacked + dimensions: + - name: keepalive + - name: closing + - name: writing + - name: apache.workers + availability: + - Basic + - Extended + description: Workers Threads + unit: workers + chart_type: stacked + dimensions: + - name: idle + - name: busy + - name: apache.scoreboard + availability: + - Basic + - Extended + description: Scoreboard + unit: connections + chart_type: line + dimensions: + - name: waiting + - name: starting + - name: reading + - name: sending + - name: keepalive + - name: dns_lookup + - name: closing + - name: logging + - name: finishing + - name: idle_cleanup + - name: open + - name: apache.requests + availability: + - Extended + description: Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: apache.net + availability: + - Extended + description: Bandwidth + unit: kilobit/s + chart_type: area + dimensions: + - name: sent + - name: apache.reqpersec + availability: + - Extended + description: Lifetime Average Number Of Requests Per Second + unit: requests/s + chart_type: area + dimensions: + - name: requests + - name: apache.bytespersec + availability: + - Extended + description: Lifetime Average Number Of Bytes Served Per Second + unit: KiB/s + chart_type: area + dimensions: + - name: served + - name: apache.bytesperreq + availability: + - Extended + description: Lifetime Average Response Size + unit: KiB + chart_type: area + dimensions: + - name: size + - name: apache.uptime + availability: + - Extended + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-httpd + monitored_instance: + name: HTTPD + link: https://httpd.apache.org/ + icon_filename: apache.svg + categories: + - data-collection.web-servers-and-web-proxies diff --git a/src/go/collectors/go.d.plugin/modules/apache/metrics.go b/src/go/collectors/go.d.plugin/modules/apache/metrics.go new file mode 100644 index 00000000000000..953bd42c385a5a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/metrics.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package apache + +type ( + serverStatus struct { + // ExtendedStatus + Total struct { + // Total number of accesses. + Accesses *int64 `stm:"accesses"` + // Total number of byte count served. + // This metric reflects the bytes that should have been served, + // which is not necessarily equal to the bytes actually (successfully) served. + KBytes *int64 `stm:"kBytes"` + } `stm:"total"` + Averages struct { + //Average number of requests per second. + ReqPerSec *float64 `stm:"req_per_sec,100000,1"` + // Average number of bytes served per second. + BytesPerSec *float64 `stm:"bytes_per_sec,100000,1"` + // Average number of bytes per request. + BytesPerReq *float64 `stm:"bytes_per_req,100000,1"` + } `stm:""` + Uptime *int64 `stm:"uptime"` + + Workers struct { + // Total number of busy worker threads/processes. + // A worker is considered “busy” if it is in any of the following states: + // reading, writing, keep-alive, logging, closing, or gracefully finishing. + Busy *int64 `stm:"busy_workers"` + // Total number of idle worker threads/processes. + // An “idle” worker is not in any of the busy states. + Idle *int64 `stm:"idle_workers"` + } `stm:""` + Connections struct { + Total *int64 `stm:"total"` + Async struct { + // Number of async connections in writing state (only applicable to event MPM). + Writing *int64 `stm:"writing"` + // Number of async connections in keep-alive state (only applicable to event MPM). + KeepAlive *int64 `stm:"keep_alive"` + // Number of async connections in closing state (only applicable to event MPM). + Closing *int64 `stm:"closing"` + } `stm:"async"` + } `stm:"conns"` + Scoreboard *scoreboard `stm:"scoreboard"` + } + scoreboard struct { + Waiting int64 `stm:"waiting"` + Starting int64 `stm:"starting"` + Reading int64 `stm:"reading"` + Sending int64 `stm:"sending"` + KeepAlive int64 `stm:"keepalive"` + DNSLookup int64 `stm:"dns_lookup"` + Closing int64 `stm:"closing"` + Logging int64 `stm:"logging"` + Finishing int64 `stm:"finishing"` + IdleCleanup int64 `stm:"idle_cleanup"` + Open int64 `stm:"open"` + } +) diff --git a/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-event.txt b/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-event.txt new file mode 100644 index 00000000000000..136b69363c8a98 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-event.txt @@ -0,0 +1,39 @@ +127.0.0.1 +ServerVersion: Apache/2.4.37 (Unix) +ServerMPM: event +Server Built: Oct 23 2018 18:27:46 +CurrentTime: Sunday, 13-Jan-2019 20:39:30 MSK +RestartTime: Sunday, 13-Jan-2019 20:35:13 MSK +ParentServerConfigGeneration: 1 +ParentServerMPMGeneration: 0 +ServerUptimeSeconds: 256 +ServerUptime: 4 minutes 16 seconds +Load1: 1.02 +Load5: 1.30 +Load15: 1.41 +Total Accesses: 9 +Total kBytes: 12 +Total Duration: 1 +CPUUser: 0 +CPUSystem: .01 +CPUChildrenUser: 0 +CPUChildrenSystem: 0 +CPULoad: .00390625 +Uptime: 256 +ReqPerSec: .0351563 +BytesPerSec: 48 +BytesPerReq: 1365.33 +DurationPerReq: .111111 +BusyWorkers: 1 +IdleWorkers: 99 +Processes: 4 +Stopping: 0 +BusyWorkers: 1 +IdleWorkers: 99 +ConnsTotal: 0 +ConnsAsyncWriting: 0 +ConnsAsyncKeepAlive: 0 +ConnsAsyncClosing: 0 +Scoreboard: ____________________________________________________________W_______________________________________............................................................................................................................................................................................................................................................................................................ +Using GnuTLS version: 3.6.5 +Built against GnuTLS version: 3.5.19 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-prefork.txt b/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-prefork.txt new file mode 100644 index 00000000000000..eeafb4983f6655 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/testdata/extended-status-mpm-prefork.txt @@ -0,0 +1,48 @@ +some.host.name +ServerVersion: Apache/2.4.53 (Unix) OpenSSL/1.1.1 +ServerMPM: prefork +Server Built: Apr 6 2022 16:30:59 +CurrentTime: Monday, 18-Apr-2022 11:52:39 CEST +RestartTime: Sunday, 10-Apr-2022 06:57:34 CEST +ParentServerConfigGeneration: 9 +ParentServerMPMGeneration: 8 +ServerUptimeSeconds: 708904 +ServerUptime: 8 days 4 hours 55 minutes 4 seconds +Load1: 7.18 +Load5: 7.29 +Load15: 8.25 +Total Accesses: 120358784 +Total kBytes: 4252382776 +Total Duration: 35583107177 +CPUUser: 4549.96 +CPUSystem: 4142.92 +CPUChildrenUser: 776666 +CPUChildrenSystem: 609619 +CPULoad: 196.78 +Uptime: 708904 +ReqPerSec: 169.781 +BytesPerSec: 6142500 +BytesPerReq: 36178.8 +DurationPerReq: 295.642 +BusyWorkers: 70 +IdleWorkers: 1037 +Scoreboard: ___... +TLSSessionCacheStatus +CacheType: SHMCB +CacheSharedMemory: 512000 +CacheCurrentEntries: 1969 +CacheSubcaches: 32 +CacheIndexesPerSubcaches: 88 +CacheTimeLeftOldestAvg: 295 +CacheTimeLeftOldestMin: 295 +CacheTimeLeftOldestMax: 296 +CacheIndexUsage: 69% +CacheUsage: 99% +CacheStoreCount: 22984008 +CacheReplaceCount: 0 +CacheExpireCount: 0 +CacheDiscardCount: 22976594 +CacheRetrieveHitCount: 5501 +CacheRetrieveMissCount: 4630 +CacheRemoveHitCount: 5491 +CacheRemoveMissCount: 51 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/apache/testdata/lighttpd-status.txt b/src/go/collectors/go.d.plugin/modules/apache/testdata/lighttpd-status.txt new file mode 100644 index 00000000000000..07d8e06e83618c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/testdata/lighttpd-status.txt @@ -0,0 +1,6 @@ +Total Accesses: 12 +Total kBytes: 4 +Uptime: 11 +BusyServers: 3 +IdleServers: 125 +Scoreboard: khr_____________________________________________________________________________________________________________________________ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/apache/testdata/simple-status-mpm-event.txt b/src/go/collectors/go.d.plugin/modules/apache/testdata/simple-status-mpm-event.txt new file mode 100644 index 00000000000000..8093eacf917f30 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/apache/testdata/simple-status-mpm-event.txt @@ -0,0 +1,24 @@ +127.0.0.1 +ServerVersion: Apache/2.4.37 (Unix) +ServerMPM: event +Server Built: Oct 23 2018 18:27:46 +CurrentTime: Sunday, 13-Jan-2019 21:43:56 MSK +RestartTime: Sunday, 13-Jan-2019 21:43:53 MSK +ParentServerConfigGeneration: 1 +ParentServerMPMGeneration: 0 +ServerUptimeSeconds: 2 +ServerUptime: 2 seconds +Load1: 0.77 +Load5: 0.93 +Load15: 1.03 +BusyWorkers: 1 +IdleWorkers: 74 +Processes: 3 +Stopping: 0 +BusyWorkers: 1 +IdleWorkers: 74 +ConnsTotal: 0 +ConnsAsyncWriting: 0 +ConnsAsyncKeepAlive: 0 +ConnsAsyncClosing: 0 +Scoreboard: ________________________________________________________________W__________..................................................................................................................................................................................................................................................................................................................................... \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/bind/README.md b/src/go/collectors/go.d.plugin/modules/bind/README.md new file mode 100644 index 00000000000000..1240e547003552 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/README.md @@ -0,0 +1,117 @@ +<!-- +title: "Bind9 monitoring with Netdata" +description: "Monitor the health and performance of Bind9 DNS servers with zero configuration, per-second metric granularity, and interactive visualizations." +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/bind/README.md" +sidebar_label: "Bind9" +learn_status: "Published" +learn_topic_type: "References" +learn_rel_path: "Integrations/Monitor/Webapps" +--> + +# Bind9 collector + +[`Bind9`](https://www.isc.org/bind/) (or named) is a very flexible, full-featured DNS system. + +This module will monitor one or more `Bind9` servers, depending on your configuration. + +## Requirements + +- `bind` version 9.9+ with configured `statistics-channels` + +For detail information on how to get your bind installation ready, please refer to the following articles: + +- [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/) +- [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics) +- [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0). + +Normally, you will need something like this in your `named.conf.options`: + +``` +statistics-channels { + inet 127.0.0.1 port 8653 allow { 127.0.0.1; }; + inet ::1 port 8653 allow { ::1; }; +}; +``` + +## Charts + +It produces the following charts: + +- Global Received Requests by IP version (IPv4, IPv6) in `requests/s` +- Global Successful Queries in `queries/s` +- Global Recursive Clients in `clients` +- Global Queries by IP Protocol (TCP, UDP) in `queries/s` +- Global Queries Analysis in `queries/s` +- Global Received Updates in `updates/s` +- Global Query Failures in `failures/s` +- Global Query Failures Analysis in `failures/s` +- Global Server Statistics in `operations/s` +- Global Incoming Requests by OpCode in `requests/s` +- Global Incoming Requests by Query Type in `requests/s` + +Per View Statistics (the following set will be added for each bind view): + +- Resolver Active Queries in `queries` +- Resolver Statistics in `operations/s` +- Resolver Round Trip Time in `queries/s` +- Resolver Requests by Query Type in `requests/s` +- Resolver Cache Hits in `operations/s` + +## Configuration + +Edit the `go.d/bind.conf` configuration file using `edit-config` from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory +sudo ./edit-config go.d/bind.conf +``` + +Needs only `url`. Here is an example for several servers: + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8653/json/v1 + + - name: local + url: http://127.0.0.1:8653/xml/v3 + + - name: remote + url: http://203.0.113.10:8653/xml/v3 + + - name: local_with_views + url: http://127.0.0.1:8653/json/v1 + permit_view: '!_* *' +``` + +View filter syntax: [simple patterns](https://docs.netdata.cloud/libnetdata/simple_pattern/). + +For all available options please see +module [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/bind.conf). + +## Troubleshooting + +To troubleshoot issues with the `bind` collector, run the `go.d.plugin` with the debug option enabled. The output should +give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m bind + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/bind/bind.go b/src/go/collectors/go.d.plugin/modules/bind/bind.go new file mode 100644 index 00000000000000..bcca0204e1b715 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/bind.go @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + _ "embed" + "fmt" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("bind", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1:8653/json/v1" + defaultHTTPTimeout = time.Second * 2 +) + +// New creates Bind with default values. +func New() *Bind { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + + return &Bind{ + Config: config, + charts: &Charts{}, + } +} + +type bindAPIClient interface { + serverStats() (*serverStats, error) +} + +// Config is the Bind module configuration. +type Config struct { + web.HTTP `yaml:",inline"` + PermitView string `yaml:"permit_view"` +} + +// Bind Bind module. +type Bind struct { + module.Base + Config `yaml:",inline"` + + bindAPIClient + permitView matcher.Matcher + charts *Charts +} + +// Cleanup makes cleanup. +func (Bind) Cleanup() {} + +// Init makes initialization. +func (b *Bind) Init() bool { + if b.URL == "" { + b.Error("URL not set") + return false + } + + client, err := web.NewHTTPClient(b.Client) + if err != nil { + b.Errorf("error on creating http client : %v", err) + return false + } + + switch { + case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ + b.bindAPIClient = newXML3Client(client, b.Request) + case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ + b.bindAPIClient = newJSONClient(client, b.Request) + default: + b.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) + return false + } + + if b.PermitView != "" { + m, err := matcher.NewSimplePatternsMatcher(b.PermitView) + if err != nil { + b.Errorf("error on creating permitView matcher : %v", err) + return false + } + b.permitView = matcher.WithCache(m) + } + + return true +} + +// Check makes check. +func (b *Bind) Check() bool { + return len(b.Collect()) > 0 +} + +// Charts creates Charts. +func (b Bind) Charts() *Charts { + return b.charts +} + +// Collect collects metrics. +func (b *Bind) Collect() map[string]int64 { + metrics := make(map[string]int64) + + s, err := b.serverStats() + if err != nil { + b.Error(err) + return nil + } + b.collectServerStats(metrics, s) + + return metrics +} + +func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) { + var chart *Chart + + for k, v := range stats.NSStats { + var ( + algo = module.Incremental + dimName = k + chartID string + ) + switch { + default: + continue + case k == "RecursClients": + dimName = "clients" + chartID = keyRecursiveClients + algo = module.Absolute + case k == "Requestv4": + dimName = "IPv4" + chartID = keyReceivedRequests + case k == "Requestv6": + dimName = "IPv6" + chartID = keyReceivedRequests + case k == "QryFailure": + dimName = "failures" + chartID = keyQueryFailures + case k == "QryUDP": + dimName = "UDP" + chartID = keyProtocolsQueries + case k == "QryTCP": + dimName = "TCP" + chartID = keyProtocolsQueries + case k == "QrySuccess": + dimName = "queries" + chartID = keyQueriesSuccess + case strings.HasSuffix(k, "QryRej"): + chartID = keyQueryFailuresDetail + case strings.HasPrefix(k, "Qry"): + chartID = keyQueriesAnalysis + case strings.HasPrefix(k, "Update"): + chartID = keyReceivedUpdates + } + + if !b.charts.Has(chartID) { + _ = b.charts.Add(charts[chartID].Copy()) + } + + chart = b.charts.Get(chartID) + + if !chart.HasDim(k) { + _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + delete(stats.NSStats, k) + metrics[k] = v + } + + for _, v := range []struct { + item map[string]int64 + chartID string + }{ + {item: stats.NSStats, chartID: keyNSStats}, + {item: stats.OpCodes, chartID: keyInOpCodes}, + {item: stats.QTypes, chartID: keyInQTypes}, + {item: stats.SockStats, chartID: keyInSockStats}, + } { + if len(v.item) == 0 { + continue + } + + if !b.charts.Has(v.chartID) { + _ = b.charts.Add(charts[v.chartID].Copy()) + } + + chart = b.charts.Get(v.chartID) + + for key, val := range v.item { + if !chart.HasDim(key) { + _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + + metrics[key] = val + } + } + + if !(b.permitView != nil && len(stats.Views) > 0) { + return + } + + for name, view := range stats.Views { + if !b.permitView.MatchString(name) { + continue + } + r := view.Resolver + + delete(r.Stats, "BucketSize") + + for key, val := range r.Stats { + var ( + algo = module.Incremental + dimName = key + chartKey string + ) + + switch { + default: + chartKey = keyResolverStats + case key == "NumFetch": + chartKey = keyResolverNumFetch + dimName = "queries" + algo = module.Absolute + case strings.HasPrefix(key, "QryRTT"): + // TODO: not ordered + chartKey = keyResolverRTT + } + + chartID := fmt.Sprintf(chartKey, name) + + if !b.charts.Has(chartID) { + chart = charts[chartKey].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + dimID := fmt.Sprintf("%s_%s", name, key) + + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + metrics[dimID] = val + } + + if len(r.QTypes) > 0 { + chartID := fmt.Sprintf(keyResolverInQTypes, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverInQTypes].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + + for key, val := range r.QTypes { + dimID := fmt.Sprintf("%s_%s", name, key) + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + metrics[dimID] = val + } + } + + if len(r.CacheStats) > 0 { + chartID := fmt.Sprintf(keyResolverCacheHits, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverCacheHits].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"] + metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"] + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/bind/bind_test.go b/src/go/collectors/go.d.plugin/modules/bind/bind_test.go new file mode 100644 index 00000000000000..65ff36af0a7e75 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/bind_test.go @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + jsonServerData, _ = os.ReadFile("testdata/query-server.json") + xmlServerData, _ = os.ReadFile("testdata/query-server.xml") +) + +func TestNew(t *testing.T) { + job := New() + assert.IsType(t, (*Bind)(nil), job) + assert.NotNil(t, job.charts) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestBind_Cleanup(t *testing.T) { New().Cleanup() } + +func TestBind_Init(t *testing.T) { + // OK + job := New() + assert.True(t, job.Init()) + assert.NotNil(t, job.bindAPIClient) + + //NG + job = New() + job.URL = "" + assert.False(t, job.Init()) + job.URL = defaultURL[:len(defaultURL)-1] + assert.False(t, job.Init()) +} + +func TestBind_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/json/v1/server" { + _, _ = w.Write(jsonServerData) + } + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/json/v1" + + require.True(t, job.Init()) + require.True(t, job.Check()) +} + +func TestBind_CheckNG(t *testing.T) { + job := New() + + job.URL = "http://127.0.0.1:38001/xml/v3" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestBind_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestBind_CollectJSON(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/json/v1/server" { + _, _ = w.Write(jsonServerData) + } + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/json/v1" + job.PermitView = "*" + + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "_default_Queryv4": 4503685324, + "_default_NSEC": 53193, + "_default_NSEC3PARAM": 993, + "_default_ANY": 5149356, + "QryFORMERR": 8, + "CookieMatch": 125065, + "A6": 538255, + "MAILA": 44, + "ExpireOpt": 195, + "CNAME": 534171, + "TYPE115": 285, + "_default_RESERVED0": 19, + "_default_ClientCookieOut": 3790767469, + "_default_CookieClientOk": 297765763, + "QryFailure": 225786697, + "TYPE127": 1, + "_default_GlueFetchv4": 110619519, + "_default_Queryv6": 291939086, + "UPDATE": 18836, + "RESERVED0": 13705, + "_default_CacheHits": 405229520524, + "Requestv6": 155, + "QryTCP": 4226324, + "RESERVED15": 0, + "QUERY": 36766967932, + "EUI64": 627, + "_default_NXDOMAIN": 1245990908, + "_default_REFUSED": 106664780, + "_default_EUI64": 2087, + "QrySERVFAIL": 219515158, + "QryRecursion": 3666523564, + "MX": 1483690, + "DNSKEY": 143483, + "_default_TYPE115": 112, + "_default_Others": 813, + "_default_CacheMisses": 127371, + "RateDropped": 219, + "NAPTR": 109959, + "NSEC": 81, + "AAAA": 3304112238, + "_default_QryRTT500": 2071767970, + "_default_TYPE127": 2, + "_default_A6": 556692, + "QryAuthAns": 440508475, + "RecursClients": 74, + "XfrRej": 97, + "LOC": 52, + "CookieIn": 1217208, + "RRSIG": 25192, + "_default_LOC": 21, + "ReqBadEDNSVer": 450, + "MG": 4, + "_default_GlueFetchv6": 121100044, + "_default_HINFO": 1, + "IQUERY": 199, + "_default_BadCookieRcode": 14779, + "AuthQryRej": 148023, + "QrySuccess": 28766465065, + "SRV": 27637747, + "TYPE223": 2, + "CookieNew": 1058677, + "_default_QryRTT10": 628295, + "_default_ServerCookieOut": 364811250, + "RESERVED11": 3, + "_default_CookieIn": 298084581, + "_default_DS": 973892, + "_bind_CacheHits": 0, + "STATUS": 35546, + "TLSA": 297, + "_default_SERVFAIL": 6523360, + "_default_GlueFetchv4Fail": 3949012, + "_default_NULL": 3548, + "UpdateRej": 15661, + "RESERVED10": 5, + "_default_EDNS0Fail": 3982564, + "_default_DLV": 20418, + "ANY": 298451299, + "_default_GlueFetchv6Fail": 91728801, + "_default_RP": 134, + "_default_AAAA": 817525939, + "X25": 2, + "NS": 5537956, + "_default_NumFetch": 100, + "_default_DNSKEY": 182224, + "QryUDP": 36455909449, + "QryReferral": 1152155, + "QryNXDOMAIN": 5902446156, + "TruncatedResp": 25882799, + "DNAME": 1, + "DLV": 37676, + "_default_FORMERR": 3827518, + "_default_RRSIG": 191628, + "RecQryRej": 225638588, + "QryDropped": 52141050, + "Response": 36426730232, + "RESERVED14": 0, + "_default_SPF": 16521, + "_default_DNAME": 6, + "Requestv4": 36767496594, + "CookieNoMatch": 33466, + "RESERVED9": 0, + "_default_QryRTT800": 2709649, + "_default_QryRTT1600": 455315, + "_default_OtherError": 1426431, + "_default_MX": 1575795, + "QryNoauthAns": 35538538399, + "NSIDOpt": 81, + "ReqTCP": 4234792, + "SOA": 3860272, + "RESERVED8": 0, + "RESERVED13": 8, + "MAILB": 42, + "AXFR": 105, + "QryNxrrset": 1308983498, + "SPF": 2872, + "PTR": 693769261, + "_default_Responsev4": 4169576370, + "_default_QryRTT100": 2086168894, + "_default_Retry": 783763680, + "_default_SRV": 3848459, + "QryDuplicate": 288617636, + "ECSOpt": 8742938, + "A": 32327037206, + "DS": 1687895, + "RESERVED12": 1, + "_default_QryRTT1600+": 27639, + "_default_TXT": 43595113, + "_default_CDS": 251, + "RESERVED6": 7401, + "RESERVED3": 2, + "_default_Truncated": 14015078, + "_default_NextItem": 1788902, + "_default_Responsev6": 151, + "_default_QueryTimeout": 335575100, + "_default_A": 3673673090, + "ReqEdns0": 532104182, + "OtherOpt": 3425542, + "NULL": 3604, + "HINFO": 9, + "_default_SOA": 1326766, + "_default_NAPTR": 30685, + "_default_PTR": 208067284, + "_default_CNAME": 38153754, + "RespEDNS0": 527991455, + "RESERVED7": 0, + "TXT": 100045556, + "_default_Lame": 1975334, + "_bind_CacheMisses": 509, + "IXFR": 33, + "_default_NS": 675609, + "_default_AFSDB": 5, + "NOTIFY": 390443, + "Others": 74006, + } + + assert.Equal(t, expected, job.Collect()) + assert.Len(t, *job.charts, 17) +} + +func TestBind_CollectXML3(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/xml/v3/server" { + _, _ = w.Write(xmlServerData) + } + })) + defer ts.Close() + + job := New() + job.PermitView = "*" + job.URL = ts.URL + "/xml/v3" + + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "_bind_CookieClientOk": 0, + "_bind_ValNegOk": 0, + "_bind_GlueFetchv4Fail": 0, + "_bind_ValFail": 0, + "RateSlipped": 0, + "_default_ValFail": 0, + "_default_TYPE127": 2, + "TLSA": 299, + "_default_FORMERR": 3831796, + "_default_ValNegOk": 0, + "_default_RRSIG": 191877, + "_default_CacheHits": 405816752908, + "CookieBadTime": 0, + "RESERVED14": 0, + "_default_SPF": 16563, + "RESERVED3": 2, + "NS": 5545011, + "QrySERVFAIL": 219790234, + "UPDATE": 18839, + "_default_NAPTR": 30706, + "RESERVED13": 8, + "_default_CookieIn": 298556974, + "_bind_Retry": 0, + "_default_SOA": 1327966, + "_bind_Truncated": 0, + "RESERVED6": 7401, + "_default_CookieClientOk": 298237641, + "_default_QueryTimeout": 336165169, + "SPF": 2887, + "_default_DNAME": 6, + "_bind_Lame": 0, + "QryUDP": 36511992002, + "NOTIFY": 390521, + "DNAME": 1, + "DS": 1688561, + "_default_OtherError": 1464741, + "_default_Retry": 784916992, + "_default_TXT": 43650696, + "QryBADCOOKIE": 0, + "RespEDNS0": 528451140, + "TXT": 100195931, + "OtherOpt": 3431439, + "_default_HINFO": 1, + "RESERVED0": 13705, + "_bind_CacheHits": 0, + "ReqTCP": 4241537, + "RespTSIG": 0, + "RESERVED11": 3, + "_default_QryRTT100": 2087797539, + "_default_REFUSED": 106782830, + "_bind_SERVFAIL": 0, + "X25": 2, + "_default_RP": 134, + "QryDuplicate": 289518897, + "CookieNoMatch": 34013, + "_default_BadCookieRcode": 15399, + "_default_CacheMisses": 127371, + "_bind_Mismatch": 0, + "_default_ServerCookieOut": 365308714, + "_bind_QryRTT500": 0, + "RPZRewrites": 0, + "A": 32377004350, + "_default_NextItem": 1790135, + "_default_MX": 1576150, + "_bind_REFUSED": 0, + "_bind_ZoneQuota": 0, + "_default_ServerQuota": 0, + "_default_ANY": 5149916, + "_default_EUI64": 2087, + "_default_QueryCurUDP": 0, + "RESERVED7": 0, + "IXFR": 33, + "_default_Queryv4": 4509791268, + "_default_GlueFetchv4": 110749701, + "_default_TYPE115": 112, + "_bind_QueryAbort": 0, + "UpdateReqFwd": 0, + "_default_NSEC3PARAM": 995, + "_bind_NextItem": 0, + "RecursClients": 64, + "QryReferral": 1152178, + "QryFORMERR": 8, + "CookieIn": 1220424, + "NSIDOpt": 81, + "MAILA": 44, + "TYPE223": 2, + "RRSIG": 25193, + "UpdateBadPrereq": 0, + "UpdateRej": 15661, + "QryAuthAns": 440885288, + "_default_PTR": 208337408, + "_default_Others": 813, + "_default_NS": 676773, + "_bind_GlueFetchv4": 0, + "QryNoauthAns": 35593104164, + "QryRecursion": 3671792792, + "_default_ClientCookieOut": 3795901994, + "_bind_BadEDNSVersion": 0, + "ReqEdns0": 532586114, + "RateDropped": 230, + "_default_ValOk": 0, + "CNAME": 535141, + "AuthQryRej": 148159, + "RESERVED10": 5, + "_default_QueryCurTCP": 0, + "_bind_Queryv4": 0, + "_bind_CacheMisses": 509, + "ExpireOpt": 195, + "XfrRej": 97, + "_default_DNSKEY": 182399, + "RecQryRej": 225832466, + "NSEC": 81, + "_default_Responsev4": 4175093103, + "_bind_ValOk": 0, + "_bind_QueryCurTCP": 0, + "Requestv4": 36823884979, + "DNSKEY": 143600, + "_default_LOC": 21, + "UpdateRespFwd": 0, + "AXFR": 105, + "_bind_CookieIn": 0, + "_default_QryRTT1600": 455849, + "_bind_BadCookieRcode": 0, + "QryNXDOMAIN": 5911582433, + "ReqSIG0": 0, + "QUERY": 36823356081, + "NULL": 3606, + "_default_Lame": 1979599, + "_default_DS": 974240, + "SRV": 27709732, + "_bind_QuerySockFail": 0, + "MG": 4, + "_default_QryRTT800": 2712733, + "_bind_QryRTT1600+": 0, + "DNS64": 0, + "_default_Truncated": 14028716, + "_default_QryRTT10": 629577, + "_default_SERVFAIL": 6533579, + "_default_AFSDB": 5, + "STATUS": 35585, + "Response": 36482142477, + "KeyTagOpt": 0, + "_default_Mismatch": 0, + "Requestv6": 156, + "LOC": 52, + "_bind_NXDOMAIN": 0, + "PTR": 694347710, + "_default_NSEC": 53712, + "_bind_QryRTT100": 0, + "RESERVED8": 0, + "DLV": 37712, + "HINFO": 9, + "_default_AAAA": 818803359, + "QryNXRedirRLookup": 0, + "TYPE127": 1, + "_default_EDNS0Fail": 3987571, + "_default_CDS": 251, + "_bind_ServerCookieOut": 0, + "_bind_QueryCurUDP": 0, + "_bind_GlueFetchv6Fail": 0, + "UpdateFail": 0, + "_default_ZoneQuota": 0, + "_default_QuerySockFail": 0, + "_default_GlueFetchv6Fail": 91852240, + "RespSIG0": 0, + "_default_GlueFetchv4Fail": 3964627, + "_bind_Responsev6": 0, + "_default_GlueFetchv6": 121268854, + "_default_Queryv6": 292282376, + "TruncatedResp": 25899017, + "ReqTSIG": 0, + "_default_BadEDNSVersion": 0, + "_bind_NumFetch": 0, + "RESERVED12": 1, + "_default_Responsev6": 152, + "_default_SRV": 3855156, + "ANY": 298567781, + "_default_CNAME": 38213966, + "_bind_ClientCookieOut": 0, + "NAPTR": 109998, + "_default_QryRTT500": 2075608518, + "_default_A6": 558874, + "_bind_OtherError": 0, + "CookieMatch": 125340, + "_default_QryRTT1600+": 27681, + "_default_DLV": 20468, + "_default_NULL": 3554, + "_bind_Queryv6": 0, + "_bind_QueryTimeout": 0, + "_bind_ValAttempt": 0, + "RESERVED9": 0, + "A6": 539773, + "MX": 1484497, + "QrySuccess": 28810069822, + "XfrReqDone": 0, + "RESERVED15": 0, + "MAILB": 42, + "Others": 74007, + "_bind_ServerQuota": 0, + "_bind_EDNS0Fail": 0, + "QryNxrrset": 1311185019, + "QryFailure": 225980711, + "ReqBadSIG": 0, + "UpdateFwdFail": 0, + "ECSOpt": 8743959, + "QryDropped": 52215943, + "EUI64": 627, + "_default_ValAttempt": 0, + "_default_A": 3678445415, + "_bind_QryRTT800": 0, + "_default_NXDOMAIN": 1247746765, + "_default_RESERVED0": 19, + "_default_NumFetch": 62, + "_bind_Responsev4": 0, + "_bind_QryRTT1600": 0, + "CookieNew": 1061071, + "ReqBadEDNSVer": 450, + "TYPE115": 285, + "_bind_FORMERR": 0, + "SOA": 3863889, + "_bind_QryRTT10": 0, + "CookieBadSize": 0, + "_bind_GlueFetchv6": 0, + "QryNXRedir": 0, + "AAAA": 3309600766, + "_default_QueryAbort": 0, + "QryTCP": 4233061, + "UpdateDone": 0, + "IQUERY": 199, + } + + assert.Equal(t, expected, job.Collect()) + assert.Len(t, *job.charts, 20) +} + +func TestBind_InvalidData(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte("hello and goodbye")) })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/json/v1" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestBind_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/json/v1" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/bind/charts.go b/src/go/collectors/go.d.plugin/modules/bind/charts.go new file mode 100644 index 00000000000000..d2e0cf00431bd9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/charts.go @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + // Charts is an alias for module.Charts. + Charts = module.Charts + // Chart is an alias for module.Chart. + Chart = module.Chart + // Dims is an alias for module.Dims. + Dims = module.Dims + // Dim is an alias for module.Dim. + Dim = module.Dim +) + +const ( + // TODO: add to orchestrator module + basePriority = 70000 + + keyReceivedRequests = "received_requests" + keyQueriesSuccess = "queries_success" + keyRecursiveClients = "recursive_clients" + keyProtocolsQueries = "protocols_queries" + keyQueriesAnalysis = "queries_analysis" + keyReceivedUpdates = "received_updates" + keyQueryFailures = "query_failures" + keyQueryFailuresDetail = "query_failures_detail" + keyNSStats = "nsstats" + keyInOpCodes = "in_opcodes" + keyInQTypes = "in_qtypes" + keyInSockStats = "in_sockstats" + + keyResolverStats = "view_resolver_stats_%s" + keyResolverRTT = "view_resolver_rtt_%s" + keyResolverInQTypes = "view_resolver_qtypes_%s" + keyResolverCacheHits = "view_resolver_cachehits_%s" + keyResolverNumFetch = "view_resolver_numfetch_%s" +) + +var charts = map[string]Chart{ + keyRecursiveClients: { + ID: keyRecursiveClients, + Title: "Global Recursive Clients", + Units: "clients", + Fam: "clients", + Ctx: "bind.recursive_clients", + Priority: basePriority + 1, + }, + keyReceivedRequests: { + ID: keyReceivedRequests, + Title: "Global Received Requests by IP version", + Units: "requests/s", + Fam: "requests", + Ctx: "bind.requests", + Type: module.Stacked, + Priority: basePriority + 2, + }, + keyQueriesSuccess: { + ID: keyQueriesSuccess, + Title: "Global Successful Queries", + Units: "queries/s", + Fam: "queries", + Ctx: "bind.queries_success", + Priority: basePriority + 3, + }, + keyProtocolsQueries: { + ID: keyProtocolsQueries, + Title: "Global Queries by IP Protocol", + Units: "queries/s", + Fam: "queries", + Ctx: "bind.protocol_queries", + Type: module.Stacked, + Priority: basePriority + 4, + }, + keyQueriesAnalysis: { + ID: keyQueriesAnalysis, + Title: "Global Queries Analysis", + Units: "queries/s", + Fam: "queries", + Ctx: "bind.global_queries", + Type: module.Stacked, + Priority: basePriority + 5, + }, + keyReceivedUpdates: { + ID: keyReceivedUpdates, + Title: "Global Received Updates", + Units: "updates/s", + Fam: "updates", + Ctx: "bind.global_updates", + Type: module.Stacked, + Priority: basePriority + 6, + }, + keyQueryFailures: { + ID: keyQueryFailures, + Title: "Global Query Failures", + Units: "failures/s", + Fam: "failures", + Ctx: "bind.global_failures", + Priority: basePriority + 7, + }, + keyQueryFailuresDetail: { + ID: keyQueryFailuresDetail, + Title: "Global Query Failures Analysis", + Units: "failures/s", + Fam: "failures", + Ctx: "bind.global_failures_detail", + Type: module.Stacked, + Priority: basePriority + 8, + }, + keyNSStats: { + ID: keyNSStats, + Title: "Global Server Statistics", + Units: "operations/s", + Fam: "other", + Ctx: "bind.nsstats", + Priority: basePriority + 9, + }, + keyInOpCodes: { + ID: keyInOpCodes, + Title: "Incoming Requests by OpCode", + Units: "requests/s", + Fam: "requests", + Ctx: "bind.in_opcodes", + Type: module.Stacked, + Priority: basePriority + 10, + }, + keyInQTypes: { + ID: keyInQTypes, + Title: "Incoming Requests by Query Type", + Units: "requests/s", + Fam: "requests", + Ctx: "bind.in_qtypes", + Type: module.Stacked, + Priority: basePriority + 11, + }, + keyInSockStats: { + ID: keyInSockStats, + Title: "Socket Statistics", + Units: "operations/s", + Fam: "sockets", + Ctx: "bind.in_sockstats", + Priority: basePriority + 12, + }, + + keyResolverRTT: { + ID: keyResolverRTT, + Title: "Resolver Round Trip Time", + Units: "queries/s", + Fam: "view %s", + Ctx: "bind.resolver_rtt", + Type: module.Stacked, + Priority: basePriority + 22, + }, + keyResolverStats: { + ID: keyResolverStats, + Title: "Resolver Statistics", + Units: "operations/s", + Fam: "view %s", + Ctx: "bind.resolver_stats", + Priority: basePriority + 23, + }, + keyResolverInQTypes: { + ID: keyResolverInQTypes, + Title: "Resolver Requests by Query Type", + Units: "requests/s", + Fam: "view %s", + Ctx: "bind.resolver_qtypes", + Type: module.Stacked, + Priority: basePriority + 24, + }, + keyResolverNumFetch: { + ID: keyResolverNumFetch, + Title: "Resolver Active Queries", + Units: "queries", + Fam: "view %s", + Ctx: "bind.resolver_active_queries", + Priority: basePriority + 25, + }, + keyResolverCacheHits: { + ID: keyResolverCacheHits, + Title: "Resolver Cache Hits", + Units: "operations/s", + Fam: "view %s", + Ctx: "bind.resolver_cachehits", + Type: module.Area, + Priority: basePriority + 26, + Dims: Dims{ + {ID: "%s_CacheHits", Name: "hits", Algo: module.Incremental}, + {ID: "%s_CacheMisses", Name: "misses", Algo: module.Incremental, Mul: -1}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/bind/config_schema.json b/src/go/collectors/go.d.plugin/modules/bind/config_schema.json new file mode 100644 index 00000000000000..042f47a1a2c57c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/config_schema.json @@ -0,0 +1,21 @@ +{ + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Bind collector job configuration", + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The person's first name." + }, + "lastName": { + "type": "string", + "description": "The person's last name." + }, + "age": { + "description": "Age in years which must be equal to or greater than zero.", + "type": "integer", + "minimum": 0 + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/bind/json_client.go b/src/go/collectors/go.d.plugin/modules/bind/json_client.go new file mode 100644 index 00000000000000..1537131aae2501 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/json_client.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +type serverStats = jsonServerStats + +type jsonServerStats struct { + OpCodes map[string]int64 + QTypes map[string]int64 + NSStats map[string]int64 + SockStats map[string]int64 + Views map[string]jsonView +} + +type jsonView struct { + Resolver jsonViewResolver +} + +type jsonViewResolver struct { + Stats map[string]int64 + QTypes map[string]int64 + CacheStats map[string]int64 +} + +func newJSONClient(client *http.Client, request web.Request) *jsonClient { + return &jsonClient{httpClient: client, request: request} +} + +type jsonClient struct { + httpClient *http.Client + request web.Request +} + +func (c jsonClient) serverStats() (*serverStats, error) { + req := c.request.Copy() + u, err := url.Parse(req.URL) + if err != nil { + return nil, fmt.Errorf("error on parsing URL: %v", err) + } + + u.Path = path.Join(u.Path, "/server") + req.URL = u.String() + + httpReq, err := web.NewHTTPRequest(req) + if err != nil { + return nil, fmt.Errorf("error on creating HTTP request: %v", err) + } + + resp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("error on request : %v", err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode) + } + + stats := &jsonServerStats{} + if err = json.NewDecoder(resp.Body).Decode(stats); err != nil { + return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err) + } + return stats, nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.json b/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.json new file mode 100644 index 00000000000000..885a4e28e12175 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.json @@ -0,0 +1,302 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2018-04-26T08:27:05.582Z", + "config-time":"2019-02-05T21:24:44.108Z", + "current-time":"2019-02-06T07:01:27.538Z", + "version":"9.11.3-1~bpo9+1-Debian", + "opcodes":{ + "QUERY":36766967932, + "IQUERY":199, + "STATUS":35546, + "RESERVED3":2, + "NOTIFY":390443, + "UPDATE":18836, + "RESERVED6":7401, + "RESERVED7":0, + "RESERVED8":0, + "RESERVED9":0, + "RESERVED10":5, + "RESERVED11":3, + "RESERVED12":1, + "RESERVED13":8, + "RESERVED14":0, + "RESERVED15":0 + }, + "rcodes":{ + "NOERROR":30078966646, + "FORMERR":0, + "SERVFAIL":219515158, + "NXDOMAIN":5902446156, + "NOTIMP":0, + "REFUSED":225802272, + "YXDOMAIN":0, + "YXRRSET":0, + "NXRRSET":0, + "NOTAUTH":0, + "NOTZONE":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0, + "BADVERS":0, + "17":0, + "18":0, + "19":0, + "20":0, + "21":0, + "22":0, + "BADCOOKIE":0 + }, + "qtypes":{ + "RESERVED0":13705, + "A":32327037206, + "NS":5537956, + "CNAME":534171, + "SOA":3860272, + "MG":4, + "NULL":3604, + "PTR":693769261, + "HINFO":9, + "MX":1483690, + "TXT":100045556, + "X25":2, + "AAAA":3304112238, + "LOC":52, + "SRV":27637747, + "NAPTR":109959, + "A6":538255, + "DNAME":1, + "DS":1687895, + "RRSIG":25192, + "NSEC":81, + "DNSKEY":143483, + "TLSA":297, + "SPF":2872, + "EUI64":627, + "TYPE115":285, + "TYPE127":1, + "TYPE223":2, + "IXFR":33, + "AXFR":105, + "MAILB":42, + "MAILA":44, + "ANY":298451299, + "DLV":37676, + "Others":74006 + }, + "nsstats":{ + "Requestv4":36767496594, + "Requestv6":155, + "ReqEdns0":532104182, + "ReqBadEDNSVer":450, + "ReqTCP":4234792, + "AuthQryRej":148023, + "RecQryRej":225638588, + "XfrRej":97, + "UpdateRej":15661, + "Response":36426730232, + "TruncatedResp":25882799, + "RespEDNS0":527991455, + "QrySuccess":28766465065, + "QryAuthAns":440508475, + "QryNoauthAns":35538538399, + "QryReferral":1152155, + "QryNxrrset":1308983498, + "QrySERVFAIL":219515158, + "QryFORMERR":8, + "QryNXDOMAIN":5902446156, + "QryRecursion":3666523564, + "QryDuplicate":288617636, + "QryDropped":52141050, + "QryFailure":225786697, + "RecursClients":74, + "RateDropped":219, + "QryUDP":36455909449, + "QryTCP":4226324, + "NSIDOpt":81, + "ExpireOpt":195, + "OtherOpt":3425542, + "CookieIn":1217208, + "CookieNew":1058677, + "CookieNoMatch":33466, + "CookieMatch":125065, + "ECSOpt":8742938 + }, + "zonestats":{ + "NotifyOutv4":992661, + "NotifyOutv6":691098, + "NotifyInv4":376341, + "NotifyRej":1, + "SOAOutv4":129981, + "AXFRReqv4":2044, + "IXFRReqv4":22794, + "XfrSuccess":50, + "XfrFail":25132 + }, + "resstats":{ + "Mismatch":20050151, + "QuerySockFail":341338, + "QueryCurUDP":91 + }, + "views":{ + "_default":{ + "resolver":{ + "stats":{ + "Queryv4":4503685324, + "Queryv6":291939086, + "Responsev4":4169576370, + "Responsev6":151, + "NXDOMAIN":1245990908, + "SERVFAIL":6523360, + "FORMERR":3827518, + "OtherError":1426431, + "EDNS0Fail":3982564, + "Truncated":14015078, + "Lame":1975334, + "Retry":783763680, + "QueryTimeout":335575100, + "GlueFetchv4":110619519, + "GlueFetchv6":121100044, + "GlueFetchv4Fail":3949012, + "GlueFetchv6Fail":91728801, + "QryRTT10":628295, + "QryRTT100":2086168894, + "QryRTT500":2071767970, + "QryRTT800":2709649, + "QryRTT1600":455315, + "QryRTT1600+":27639, + "NumFetch":100, + "BucketSize":31, + "REFUSED":106664780, + "ClientCookieOut":3790767469, + "ServerCookieOut":364811250, + "CookieIn":298084581, + "CookieClientOk":297765763, + "BadCookieRcode":14779, + "NextItem":1788902 + }, + "qtypes":{ + "RESERVED0":19, + "A":3673673090, + "NS":675609, + "CNAME":38153754, + "SOA":1326766, + "NULL":3548, + "PTR":208067284, + "HINFO":1, + "MX":1575795, + "TXT":43595113, + "RP":134, + "AFSDB":5, + "AAAA":817525939, + "LOC":21, + "SRV":3848459, + "NAPTR":30685, + "A6":556692, + "DNAME":6, + "DS":973892, + "RRSIG":191628, + "NSEC":53193, + "DNSKEY":182224, + "NSEC3PARAM":993, + "CDS":251, + "SPF":16521, + "EUI64":2087, + "TYPE115":112, + "TYPE127":2, + "ANY":5149356, + "DLV":20418, + "Others":813 + }, + "cache":{ + "A":169353, + "NS":307028, + "CNAME":37960, + "SOA":16, + "PTR":76913, + "MX":91, + "TXT":12499, + "AAAA":15550, + "SRV":42, + "DNAME":5, + "DS":3300, + "RRSIG":26832, + "NSEC":18379, + "DNSKEY":62, + "NSEC3PARAM":1, + "SPF":3, + "Others":1, + "!A":247, + "!NS":28, + "!SOA":6, + "!PTR":7, + "!MX":3, + "!TXT":247, + "!AAAA":22631, + "!SRV":72, + "!NAPTR":1, + "!A6":51, + "!DS":16, + "!SPF":1, + "NXDOMAIN":205872, + "#RRSIG":1, + "#NSEC":1 + }, + "cachestats":{ + "CacheHits":405229520524, + "CacheMisses":127371, + "QueryHits":171622440929, + "QueryMisses":5114505254, + "DeleteLRU":0, + "DeleteTTL":1673818609, + "CacheNodes":839357, + "CacheBuckets":532479, + "TreeMemTotal":1438467514974, + "TreeMemInUse":489426131, + "TreeMemMax":820437431, + "HeapMemTotal":455163904, + "HeapMemInUse":10855424, + "HeapMemMax":11527168 + }, + "adb":{ + "nentries":6143, + "entriescnt":47619, + "nnames":6143, + "namescnt":46743 + } + } + }, + "_bind":{ + "resolver":{ + "stats":{ + "BucketSize":31 + }, + "qtypes":{ + }, + "cache":{ + }, + "cachestats":{ + "CacheHits":0, + "CacheMisses":509, + "QueryHits":0, + "QueryMisses":509, + "DeleteLRU":0, + "DeleteTTL":0, + "CacheNodes":0, + "CacheBuckets":64, + "TreeMemTotal":287792, + "TreeMemInUse":29952, + "TreeMemMax":29952, + "HeapMemTotal":262144, + "HeapMemInUse":1024, + "HeapMemMax":1024 + }, + "adb":{ + "nentries":1021, + "nnames":1021 + } + } + } + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.xml b/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.xml new file mode 100644 index 00000000000000..515cdeabaf73a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/testdata/query-server.xml @@ -0,0 +1,470 @@ +<?xml version="1.0" encoding="UTF-8"?> +<?xml-stylesheet type="text/xsl" href="/bind9.xsl"?> +<statistics version="3.8"> + <server> + <boot-time>2018-04-26T08:27:05.582Z</boot-time> + <config-time>2019-02-06T12:25:35.919Z</config-time> + <current-time>2019-02-06T12:29:31.168Z</current-time> + <version>9.11.3-1~bpo9+1-Debian</version> + <counters type="opcode"> + <counter name="QUERY">36823356081</counter> + <counter name="IQUERY">199</counter> + <counter name="STATUS">35585</counter> + <counter name="RESERVED3">2</counter> + <counter name="NOTIFY">390521</counter> + <counter name="UPDATE">18839</counter> + <counter name="RESERVED6">7401</counter> + <counter name="RESERVED7">0</counter> + <counter name="RESERVED8">0</counter> + <counter name="RESERVED9">0</counter> + <counter name="RESERVED10">5</counter> + <counter name="RESERVED11">3</counter> + <counter name="RESERVED12">1</counter> + <counter name="RESERVED13">8</counter> + <counter name="RESERVED14">0</counter> + <counter name="RESERVED15">0</counter> + </counters> + <counters type="rcode"> + <counter name="NOERROR">30124773524</counter> + <counter name="FORMERR">0</counter> + <counter name="SERVFAIL">219790234</counter> + <counter name="NXDOMAIN">5911582433</counter> + <counter name="NOTIMP">0</counter> + <counter name="REFUSED">225996286</counter> + <counter name="YXDOMAIN">0</counter> + <counter name="YXRRSET">0</counter> + <counter name="NXRRSET">0</counter> + <counter name="NOTAUTH">0</counter> + <counter name="NOTZONE">0</counter> + <counter name="RESERVED11">0</counter> + <counter name="RESERVED12">0</counter> + <counter name="RESERVED13">0</counter> + <counter name="RESERVED14">0</counter> + <counter name="RESERVED15">0</counter> + <counter name="BADVERS">0</counter> + <counter name="17">0</counter> + <counter name="18">0</counter> + <counter name="19">0</counter> + <counter name="20">0</counter> + <counter name="21">0</counter> + <counter name="22">0</counter> + <counter name="BADCOOKIE">0</counter> + </counters> + <counters type="qtype"> + <counter name="RESERVED0">13705</counter> + <counter name="A">32377004350</counter> + <counter name="NS">5545011</counter> + <counter name="CNAME">535141</counter> + <counter name="SOA">3863889</counter> + <counter name="MG">4</counter> + <counter name="NULL">3606</counter> + <counter name="PTR">694347710</counter> + <counter name="HINFO">9</counter> + <counter name="MX">1484497</counter> + <counter name="TXT">100195931</counter> + <counter name="X25">2</counter> + <counter name="AAAA">3309600766</counter> + <counter name="LOC">52</counter> + <counter name="SRV">27709732</counter> + <counter name="NAPTR">109998</counter> + <counter name="A6">539773</counter> + <counter name="DNAME">1</counter> + <counter name="DS">1688561</counter> + <counter name="RRSIG">25193</counter> + <counter name="NSEC">81</counter> + <counter name="DNSKEY">143600</counter> + <counter name="TLSA">299</counter> + <counter name="SPF">2887</counter> + <counter name="EUI64">627</counter> + <counter name="TYPE115">285</counter> + <counter name="TYPE127">1</counter> + <counter name="TYPE223">2</counter> + <counter name="IXFR">33</counter> + <counter name="AXFR">105</counter> + <counter name="MAILB">42</counter> + <counter name="MAILA">44</counter> + <counter name="ANY">298567781</counter> + <counter name="DLV">37712</counter> + <counter name="Others">74007</counter> + </counters> + <counters type="nsstat"> + <counter name="Requestv4">36823884979</counter> + <counter name="Requestv6">156</counter> + <counter name="ReqEdns0">532586114</counter> + <counter name="ReqBadEDNSVer">450</counter> + <counter name="ReqTSIG">0</counter> + <counter name="ReqSIG0">0</counter> + <counter name="ReqBadSIG">0</counter> + <counter name="ReqTCP">4241537</counter> + <counter name="AuthQryRej">148159</counter> + <counter name="RecQryRej">225832466</counter> + <counter name="XfrRej">97</counter> + <counter name="UpdateRej">15661</counter> + <counter name="Response">36482142477</counter> + <counter name="TruncatedResp">25899017</counter> + <counter name="RespEDNS0">528451140</counter> + <counter name="RespTSIG">0</counter> + <counter name="RespSIG0">0</counter> + <counter name="QrySuccess">28810069822</counter> + <counter name="QryAuthAns">440885288</counter> + <counter name="QryNoauthAns">35593104164</counter> + <counter name="QryReferral">1152178</counter> + <counter name="QryNxrrset">1311185019</counter> + <counter name="QrySERVFAIL">219790234</counter> + <counter name="QryFORMERR">8</counter> + <counter name="QryNXDOMAIN">5911582433</counter> + <counter name="QryRecursion">3671792792</counter> + <counter name="QryDuplicate">289518897</counter> + <counter name="QryDropped">52215943</counter> + <counter name="QryFailure">225980711</counter> + <counter name="XfrReqDone">0</counter> + <counter name="UpdateReqFwd">0</counter> + <counter name="UpdateRespFwd">0</counter> + <counter name="UpdateFwdFail">0</counter> + <counter name="UpdateDone">0</counter> + <counter name="UpdateFail">0</counter> + <counter name="UpdateBadPrereq">0</counter> + <counter name="RecursClients">64</counter> + <counter name="DNS64">0</counter> + <counter name="RateDropped">230</counter> + <counter name="RateSlipped">0</counter> + <counter name="RPZRewrites">0</counter> + <counter name="QryUDP">36511992002</counter> + <counter name="QryTCP">4233061</counter> + <counter name="NSIDOpt">81</counter> + <counter name="ExpireOpt">195</counter> + <counter name="OtherOpt">3431439</counter> + <counter name="CookieIn">1220424</counter> + <counter name="CookieNew">1061071</counter> + <counter name="CookieBadSize">0</counter> + <counter name="CookieBadTime">0</counter> + <counter name="CookieNoMatch">34013</counter> + <counter name="CookieMatch">125340</counter> + <counter name="ECSOpt">8743959</counter> + <counter name="QryNXRedir">0</counter> + <counter name="QryNXRedirRLookup">0</counter> + <counter name="QryBADCOOKIE">0</counter> + <counter name="KeyTagOpt">0</counter> + </counters> + <counters type="zonestat"> + <counter name="NotifyOutv4">992895</counter> + <counter name="NotifyOutv6">691254</counter> + <counter name="NotifyInv4">376354</counter> + <counter name="NotifyInv6">0</counter> + <counter name="NotifyRej">1</counter> + <counter name="SOAOutv4">130105</counter> + <counter name="SOAOutv6">0</counter> + <counter name="AXFRReqv4">2047</counter> + <counter name="AXFRReqv6">0</counter> + <counter name="IXFRReqv4">22814</counter> + <counter name="IXFRReqv6">0</counter> + <counter name="XfrSuccess">50</counter> + <counter name="XfrFail">25155</counter> + </counters> + <counters type="resstat"> + <counter name="Mismatch">20059475</counter> + <counter name="QuerySockFail">341338</counter> + <counter name="QueryCurUDP">58</counter> + <counter name="QueryCurTCP">1</counter> + </counters> + </server> + <views> + <view name="_default"> + <counters type="resqtype"> + <counter name="RESERVED0">19</counter> + <counter name="A">3678445415</counter> + <counter name="NS">676773</counter> + <counter name="CNAME">38213966</counter> + <counter name="SOA">1327966</counter> + <counter name="NULL">3554</counter> + <counter name="PTR">208337408</counter> + <counter name="HINFO">1</counter> + <counter name="MX">1576150</counter> + <counter name="TXT">43650696</counter> + <counter name="RP">134</counter> + <counter name="AFSDB">5</counter> + <counter name="AAAA">818803359</counter> + <counter name="LOC">21</counter> + <counter name="SRV">3855156</counter> + <counter name="NAPTR">30706</counter> + <counter name="A6">558874</counter> + <counter name="DNAME">6</counter> + <counter name="DS">974240</counter> + <counter name="RRSIG">191877</counter> + <counter name="NSEC">53712</counter> + <counter name="DNSKEY">182399</counter> + <counter name="NSEC3PARAM">995</counter> + <counter name="CDS">251</counter> + <counter name="SPF">16563</counter> + <counter name="EUI64">2087</counter> + <counter name="TYPE115">112</counter> + <counter name="TYPE127">2</counter> + <counter name="ANY">5149916</counter> + <counter name="DLV">20468</counter> + <counter name="Others">813</counter> + </counters> + <counters type="resstats"> + <counter name="Queryv4">4509791268</counter> + <counter name="Queryv6">292282376</counter> + <counter name="Responsev4">4175093103</counter> + <counter name="Responsev6">152</counter> + <counter name="NXDOMAIN">1247746765</counter> + <counter name="SERVFAIL">6533579</counter> + <counter name="FORMERR">3831796</counter> + <counter name="OtherError">1464741</counter> + <counter name="EDNS0Fail">3987571</counter> + <counter name="Mismatch">0</counter> + <counter name="Truncated">14028716</counter> + <counter name="Lame">1979599</counter> + <counter name="Retry">784916992</counter> + <counter name="QueryAbort">0</counter> + <counter name="QuerySockFail">0</counter> + <counter name="QueryCurUDP">0</counter> + <counter name="QueryCurTCP">0</counter> + <counter name="QueryTimeout">336165169</counter> + <counter name="GlueFetchv4">110749701</counter> + <counter name="GlueFetchv6">121268854</counter> + <counter name="GlueFetchv4Fail">3964627</counter> + <counter name="GlueFetchv6Fail">91852240</counter> + <counter name="ValAttempt">0</counter> + <counter name="ValOk">0</counter> + <counter name="ValNegOk">0</counter> + <counter name="ValFail">0</counter> + <counter name="QryRTT10">629577</counter> + <counter name="QryRTT100">2087797539</counter> + <counter name="QryRTT500">2075608518</counter> + <counter name="QryRTT800">2712733</counter> + <counter name="QryRTT1600">455849</counter> + <counter name="QryRTT1600+">27681</counter> + <counter name="NumFetch">62</counter> + <counter name="BucketSize">31</counter> + <counter name="REFUSED">106782830</counter> + <counter name="ClientCookieOut">3795901994</counter> + <counter name="ServerCookieOut">365308714</counter> + <counter name="CookieIn">298556974</counter> + <counter name="CookieClientOk">298237641</counter> + <counter name="BadEDNSVersion">0</counter> + <counter name="BadCookieRcode">15399</counter> + <counter name="ZoneQuota">0</counter> + <counter name="ServerQuota">0</counter> + <counter name="NextItem">1790135</counter> + </counters> + <cache name="_default"> + <rrset> + <name>A</name> + <counter>192185</counter> + </rrset> + <rrset> + <name>NS</name> + <counter>326554</counter> + </rrset> + <rrset> + <name>CNAME</name> + <counter>41900</counter> + </rrset> + <rrset> + <name>SOA</name> + <counter>15</counter> + </rrset> + <rrset> + <name>PTR</name> + <counter>82398</counter> + </rrset> + <rrset> + <name>MX</name> + <counter>80</counter> + </rrset> + <rrset> + <name>TXT</name> + <counter>11952</counter> + </rrset> + <rrset> + <name>AAAA</name> + <counter>16361</counter> + </rrset> + <rrset> + <name>SRV</name> + <counter>55</counter> + </rrset> + <rrset> + <name>NAPTR</name> + <counter>1</counter> + </rrset> + <rrset> + <name>DNAME</name> + <counter>1</counter> + </rrset> + <rrset> + <name>DS</name> + <counter>3760</counter> + </rrset> + <rrset> + <name>RRSIG</name> + <counter>28542</counter> + </rrset> + <rrset> + <name>NSEC</name> + <counter>19250</counter> + </rrset> + <rrset> + <name>DNSKEY</name> + <counter>57</counter> + </rrset> + <rrset> + <name>NSEC3PARAM</name> + <counter>1</counter> + </rrset> + <rrset> + <name>SPF</name> + <counter>4</counter> + </rrset> + <rrset> + <name>Others</name> + <counter>2</counter> + </rrset> + <rrset> + <name>!A</name> + <counter>287</counter> + </rrset> + <rrset> + <name>!NS</name> + <counter>42</counter> + </rrset> + <rrset> + <name>!SOA</name> + <counter>10</counter> + </rrset> + <rrset> + <name>!PTR</name> + <counter>6</counter> + </rrset> + <rrset> + <name>!MX</name> + <counter>2</counter> + </rrset> + <rrset> + <name>!TXT</name> + <counter>280</counter> + </rrset> + <rrset> + <name>!AAAA</name> + <counter>27381</counter> + </rrset> + <rrset> + <name>!SRV</name> + <counter>81</counter> + </rrset> + <rrset> + <name>!NAPTR</name> + <counter>2</counter> + </rrset> + <rrset> + <name>!A6</name> + <counter>38</counter> + </rrset> + <rrset> + <name>!DS</name> + <counter>20</counter> + </rrset> + <rrset> + <name>NXDOMAIN</name> + <counter>315286</counter> + </rrset> + </cache> + <counters type="adbstat"> + <counter name="nentries">2039</counter> + <counter name="entriescnt">14535</counter> + <counter name="nnames">2039</counter> + <counter name="namescnt">12286</counter> + </counters> + <counters type="cachestats"> + <counter name="CacheHits">405816752908</counter> + <counter name="CacheMisses">127371</counter> + <counter name="QueryHits">171876840110</counter> + <counter name="QueryMisses">5120854081</counter> + <counter name="DeleteLRU">0</counter> + <counter name="DeleteTTL">1675820766</counter> + <counter name="CacheNodes">1000477</counter> + <counter name="CacheBuckets">532479</counter> + <counter name="TreeMemTotal">1440529356195</counter> + <counter name="TreeMemInUse">642752571</counter> + <counter name="TreeMemMax">820437431</counter> + <counter name="HeapMemTotal">455163904</counter> + <counter name="HeapMemInUse">10855424</counter> + <counter name="HeapMemMax">11527168</counter> + </counters> + </view> + <view name="_bind"> + <counters type="resqtype" /> + <counters type="resstats"> + <counter name="Queryv4">0</counter> + <counter name="Queryv6">0</counter> + <counter name="Responsev4">0</counter> + <counter name="Responsev6">0</counter> + <counter name="NXDOMAIN">0</counter> + <counter name="SERVFAIL">0</counter> + <counter name="FORMERR">0</counter> + <counter name="OtherError">0</counter> + <counter name="EDNS0Fail">0</counter> + <counter name="Mismatch">0</counter> + <counter name="Truncated">0</counter> + <counter name="Lame">0</counter> + <counter name="Retry">0</counter> + <counter name="QueryAbort">0</counter> + <counter name="QuerySockFail">0</counter> + <counter name="QueryCurUDP">0</counter> + <counter name="QueryCurTCP">0</counter> + <counter name="QueryTimeout">0</counter> + <counter name="GlueFetchv4">0</counter> + <counter name="GlueFetchv6">0</counter> + <counter name="GlueFetchv4Fail">0</counter> + <counter name="GlueFetchv6Fail">0</counter> + <counter name="ValAttempt">0</counter> + <counter name="ValOk">0</counter> + <counter name="ValNegOk">0</counter> + <counter name="ValFail">0</counter> + <counter name="QryRTT10">0</counter> + <counter name="QryRTT100">0</counter> + <counter name="QryRTT500">0</counter> + <counter name="QryRTT800">0</counter> + <counter name="QryRTT1600">0</counter> + <counter name="QryRTT1600+">0</counter> + <counter name="NumFetch">0</counter> + <counter name="BucketSize">31</counter> + <counter name="REFUSED">0</counter> + <counter name="ClientCookieOut">0</counter> + <counter name="ServerCookieOut">0</counter> + <counter name="CookieIn">0</counter> + <counter name="CookieClientOk">0</counter> + <counter name="BadEDNSVersion">0</counter> + <counter name="BadCookieRcode">0</counter> + <counter name="ZoneQuota">0</counter> + <counter name="ServerQuota">0</counter> + <counter name="NextItem">0</counter> + </counters> + <cache name="_bind" /> + <counters type="adbstat"> + <counter name="nentries">1021</counter> + <counter name="entriescnt">0</counter> + <counter name="nnames">1021</counter> + <counter name="namescnt">0</counter> + </counters> + <counters type="cachestats"> + <counter name="CacheHits">0</counter> + <counter name="CacheMisses">509</counter> + <counter name="QueryHits">0</counter> + <counter name="QueryMisses">509</counter> + <counter name="DeleteLRU">0</counter> + <counter name="DeleteTTL">0</counter> + <counter name="CacheNodes">0</counter> + <counter name="CacheBuckets">64</counter> + <counter name="TreeMemTotal">287792</counter> + <counter name="TreeMemInUse">29952</counter> + <counter name="TreeMemMax">29952</counter> + <counter name="HeapMemTotal">262144</counter> + <counter name="HeapMemInUse">1024</counter> + <counter name="HeapMemMax">1024</counter> + </counters> + </view> + </views> +</statistics> \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/bind/xml3_client.go b/src/go/collectors/go.d.plugin/modules/bind/xml3_client.go new file mode 100644 index 00000000000000..b84cabbd80e5dd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/bind/xml3_client.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +type xml3Stats struct { + Server xml3Server `xml:"server"` + Views []xml3View `xml:"views>view"` +} + +type xml3Server struct { + CounterGroups []xml3CounterGroup `xml:"counters"` +} + +type xml3CounterGroup struct { + Type string `xml:"type,attr"` + Counters []struct { + Name string `xml:"name,attr"` + Value int64 `xml:",chardata"` + } `xml:"counter"` +} + +type xml3View struct { + Name string `xml:"name,attr"` + CounterGroups []xml3CounterGroup `xml:"counters"` +} + +func newXML3Client(client *http.Client, request web.Request) *xml3Client { + return &xml3Client{httpClient: client, request: request} +} + +type xml3Client struct { + httpClient *http.Client + request web.Request +} + +func (c xml3Client) serverStats() (*serverStats, error) { + req := c.request.Copy() + u, err := url.Parse(req.URL) + if err != nil { + return nil, fmt.Errorf("error on parsing URL: %v", err) + } + + u.Path = path.Join(u.Path, "/server") + req.URL = u.String() + + httpReq, err := web.NewHTTPRequest(req) + if err != nil { + return nil, fmt.Errorf("error on creating HTTP request: %v", err) + } + + resp, err := c.httpClient.Do(httpReq) + if err != nil { + return nil, fmt.Errorf("error on request : %v", err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode) + } + + stats := xml3Stats{} + if err = xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err) + } + return convertXML(stats), nil +} + +func convertXML(xmlStats xml3Stats) *serverStats { + stats := serverStats{ + OpCodes: make(map[string]int64), + NSStats: make(map[string]int64), + QTypes: make(map[string]int64), + SockStats: make(map[string]int64), + Views: make(map[string]jsonView), + } + + var m map[string]int64 + + for _, group := range xmlStats.Server.CounterGroups { + switch group.Type { + default: + continue + case "opcode": + m = stats.OpCodes + case "qtype": + m = stats.QTypes + case "nsstat": + m = stats.NSStats + case "sockstat": + m = stats.SockStats + } + + for _, v := range group.Counters { + m[v.Name] = v.Value + } + } + + for _, view := range xmlStats.Views { + stats.Views[view.Name] = jsonView{ + Resolver: jsonViewResolver{ + Stats: make(map[string]int64), + QTypes: make(map[string]int64), + CacheStats: make(map[string]int64), + }, + } + for _, viewGroup := range view.CounterGroups { + switch viewGroup.Type { + default: + continue + case "resqtype": + m = stats.Views[view.Name].Resolver.QTypes + case "resstats": + m = stats.Views[view.Name].Resolver.Stats + case "cachestats": + m = stats.Views[view.Name].Resolver.CacheStats + } + for _, viewCounter := range viewGroup.Counters { + m[viewCounter.Name] = viewCounter.Value + } + } + } + return &stats +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/README.md b/src/go/collectors/go.d.plugin/modules/cassandra/README.md new file mode 120000 index 00000000000000..99b5b9da502f12 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/README.md @@ -0,0 +1 @@ +integrations/cassandra.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/cassandra.go b/src/go/collectors/go.d.plugin/modules/cassandra/cassandra.go new file mode 100644 index 00000000000000..1e745fbd8a581e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/cassandra.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("cassandra", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Cassandra { + return &Cassandra{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:7072/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + }, + charts: baseCharts.Copy(), + validateMetrics: true, + mx: newCassandraMetrics(), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Cassandra struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + prom prometheus.Prometheus + + validateMetrics bool + mx *cassandraMetrics +} + +func (c *Cassandra) Init() bool { + if err := c.validateConfig(); err != nil { + c.Errorf("error on validating config: %v", err) + return false + } + + prom, err := c.initPrometheusClient() + if err != nil { + c.Errorf("error on init prometheus client: %v", err) + return false + } + c.prom = prom + + return true +} + +func (c *Cassandra) Check() bool { + return len(c.Collect()) > 0 +} + +func (c *Cassandra) Charts() *module.Charts { + return c.charts +} + +func (c *Cassandra) Collect() map[string]int64 { + mx, err := c.collect() + if err != nil { + c.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (c *Cassandra) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/cassandra_test.go b/src/go/collectors/go.d.plugin/modules/cassandra/cassandra_test.go new file mode 100644 index 00000000000000..4425de46e44835 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/cassandra_test.go @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataMetrics, _ = os.ReadFile("testdata/metrics.txt") +) + +func Test_TestData(t *testing.T) { + for name, data := range map[string][]byte{ + "dataMetrics": dataMetrics, + } { + assert.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Cassandra)(nil), New()) +} + +func TestCassandra_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success if 'url' is set": { + config: Config{ + HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:7072"}}}, + }, + "success on default config": { + wantFail: false, + config: New().Config, + }, + "fails if 'url' is unset": { + wantFail: true, + config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := New() + c.Config = test.config + + if test.wantFail { + assert.False(t, c.Init()) + } else { + assert.True(t, c.Init()) + } + }) + } +} + +func TestCassandra_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (c *Cassandra, cleanup func()) + wantFail bool + }{ + "success on valid response": { + prepare: prepareCassandra, + }, + "fails if endpoint returns invalid data": { + wantFail: true, + prepare: prepareCassandraInvalidData, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareCassandraConnectionRefused, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareCassandraResponse404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c, cleanup := test.prepare() + defer cleanup() + + require.True(t, c.Init()) + + if test.wantFail { + assert.False(t, c.Check()) + } else { + assert.True(t, c.Check()) + } + }) + } +} + +func TestCassandra_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestCassandra_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (c *Cassandra, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response": { + prepare: prepareCassandra, + wantCollected: map[string]int64{ + "client_request_failures_reads": 0, + "client_request_failures_writes": 0, + "client_request_latency_reads": 333316, + "client_request_latency_writes": 331841, + "client_request_read_latency_p50": 61, + "client_request_read_latency_p75": 88, + "client_request_read_latency_p95": 126, + "client_request_read_latency_p98": 182, + "client_request_read_latency_p99": 219, + "client_request_read_latency_p999": 454, + "client_request_timeouts_reads": 0, + "client_request_timeouts_writes": 0, + "client_request_total_latency_reads": 23688998, + "client_request_total_latency_writes": 14253267, + "client_request_unavailables_reads": 0, + "client_request_unavailables_writes": 0, + "client_request_write_latency_p50": 35, + "client_request_write_latency_p75": 61, + "client_request_write_latency_p95": 105, + "client_request_write_latency_p98": 126, + "client_request_write_latency_p99": 152, + "client_request_write_latency_p999": 315, + "compaction_bytes_compacted": 2532, + "compaction_completed_tasks": 1078, + "compaction_pending_tasks": 0, + "dropped_messages": 0, + "jvm_gc_cms_count": 1, + "jvm_gc_cms_time": 59, + "jvm_gc_parnew_count": 218, + "jvm_gc_parnew_time": 1617, + "jvm_memory_heap_used": 1134866288, + "jvm_memory_nonheap_used": 96565696, + "key_cache_hit_ratio": 87273, + "key_cache_hits": 1336427, + "key_cache_misses": 194890, + "key_cache_size": 196559936, + "key_cache_utilization": 20828, + "row_cache_hit_ratio": 0, + "row_cache_hits": 0, + "row_cache_misses": 0, + "row_cache_size": 0, + "row_cache_utilization": 0, + "storage_exceptions": 0, + "storage_load": 858272986, + "thread_pool_CacheCleanupExecutor_active_tasks": 0, + "thread_pool_CacheCleanupExecutor_blocked_tasks": 0, + "thread_pool_CacheCleanupExecutor_pending_tasks": 0, + "thread_pool_CacheCleanupExecutor_total_blocked_tasks": 0, + "thread_pool_CompactionExecutor_active_tasks": 0, + "thread_pool_CompactionExecutor_blocked_tasks": 0, + "thread_pool_CompactionExecutor_pending_tasks": 0, + "thread_pool_CompactionExecutor_total_blocked_tasks": 0, + "thread_pool_GossipStage_active_tasks": 0, + "thread_pool_GossipStage_blocked_tasks": 0, + "thread_pool_GossipStage_pending_tasks": 0, + "thread_pool_GossipStage_total_blocked_tasks": 0, + "thread_pool_HintsDispatcher_active_tasks": 0, + "thread_pool_HintsDispatcher_blocked_tasks": 0, + "thread_pool_HintsDispatcher_pending_tasks": 0, + "thread_pool_HintsDispatcher_total_blocked_tasks": 0, + "thread_pool_MemtableFlushWriter_active_tasks": 0, + "thread_pool_MemtableFlushWriter_blocked_tasks": 0, + "thread_pool_MemtableFlushWriter_pending_tasks": 0, + "thread_pool_MemtableFlushWriter_total_blocked_tasks": 0, + "thread_pool_MemtablePostFlush_active_tasks": 0, + "thread_pool_MemtablePostFlush_blocked_tasks": 0, + "thread_pool_MemtablePostFlush_pending_tasks": 0, + "thread_pool_MemtablePostFlush_total_blocked_tasks": 0, + "thread_pool_MemtableReclaimMemory_active_tasks": 0, + "thread_pool_MemtableReclaimMemory_blocked_tasks": 0, + "thread_pool_MemtableReclaimMemory_pending_tasks": 0, + "thread_pool_MemtableReclaimMemory_total_blocked_tasks": 0, + "thread_pool_MutationStage_active_tasks": 0, + "thread_pool_MutationStage_blocked_tasks": 0, + "thread_pool_MutationStage_pending_tasks": 0, + "thread_pool_MutationStage_total_blocked_tasks": 0, + "thread_pool_Native-Transport-Requests_active_tasks": 0, + "thread_pool_Native-Transport-Requests_blocked_tasks": 0, + "thread_pool_Native-Transport-Requests_pending_tasks": 0, + "thread_pool_Native-Transport-Requests_total_blocked_tasks": 0, + "thread_pool_PendingRangeCalculator_active_tasks": 0, + "thread_pool_PendingRangeCalculator_blocked_tasks": 0, + "thread_pool_PendingRangeCalculator_pending_tasks": 0, + "thread_pool_PendingRangeCalculator_total_blocked_tasks": 0, + "thread_pool_PerDiskMemtableFlushWriter_0_active_tasks": 0, + "thread_pool_PerDiskMemtableFlushWriter_0_blocked_tasks": 0, + "thread_pool_PerDiskMemtableFlushWriter_0_pending_tasks": 0, + "thread_pool_PerDiskMemtableFlushWriter_0_total_blocked_tasks": 0, + "thread_pool_ReadStage_active_tasks": 0, + "thread_pool_ReadStage_blocked_tasks": 0, + "thread_pool_ReadStage_pending_tasks": 0, + "thread_pool_ReadStage_total_blocked_tasks": 0, + "thread_pool_Sampler_active_tasks": 0, + "thread_pool_Sampler_blocked_tasks": 0, + "thread_pool_Sampler_pending_tasks": 0, + "thread_pool_Sampler_total_blocked_tasks": 0, + "thread_pool_SecondaryIndexManagement_active_tasks": 0, + "thread_pool_SecondaryIndexManagement_blocked_tasks": 0, + "thread_pool_SecondaryIndexManagement_pending_tasks": 0, + "thread_pool_SecondaryIndexManagement_total_blocked_tasks": 0, + "thread_pool_ValidationExecutor_active_tasks": 0, + "thread_pool_ValidationExecutor_blocked_tasks": 0, + "thread_pool_ValidationExecutor_pending_tasks": 0, + "thread_pool_ValidationExecutor_total_blocked_tasks": 0, + "thread_pool_ViewBuildExecutor_active_tasks": 0, + "thread_pool_ViewBuildExecutor_blocked_tasks": 0, + "thread_pool_ViewBuildExecutor_pending_tasks": 0, + "thread_pool_ViewBuildExecutor_total_blocked_tasks": 0, + }, + }, + "fails if endpoint returns invalid data": { + prepare: prepareCassandraInvalidData, + }, + "fails on connection refused": { + prepare: prepareCassandraConnectionRefused, + }, + "fails on 404 response": { + prepare: prepareCassandraResponse404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c, cleanup := test.prepare() + defer cleanup() + + require.True(t, c.Init()) + + mx := c.Collect() + + assert.Equal(t, test.wantCollected, mx) + }) + } +} + +func prepareCassandra() (c *Cassandra, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataMetrics) + })) + + c = New() + c.URL = ts.URL + return c, ts.Close +} + +func prepareCassandraInvalidData() (c *Cassandra, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + c = New() + c.URL = ts.URL + return c, ts.Close +} + +func prepareCassandraConnectionRefused() (c *Cassandra, cleanup func()) { + c = New() + c.URL = "http://127.0.0.1:38001" + return c, func() {} +} + +func prepareCassandraResponse404() (c *Cassandra, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + c = New() + c.URL = ts.URL + return c, ts.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/charts.go b/src/go/collectors/go.d.plugin/modules/cassandra/charts.go new file mode 100644 index 00000000000000..f175bf0700beaa --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/charts.go @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioClientRequestsRate = module.Priority + iota + + prioClientRequestReadLatency + prioClientRequestWriteLatency + prioClientRequestsLatency + + prioKeyCacheHitRatio + prioRowCacheHitRatio + prioKeyCacheHitRate + prioRowCacheHitRate + prioKeyCacheUtilization + prioRowCacheUtilization + prioKeyCacheSize + prioRowCacheSize + + prioStorageLiveDiskSpaceUsed + + prioCompactionCompletedTasksRate + prioCompactionPendingTasksCount + prioCompactionBytesCompactedRate + + prioThreadPoolActiveTasksCount + prioThreadPoolPendingTasksCount + prioThreadPoolBlockedTasksCount + prioThreadPoolBlockedTasksRate + + prioJVMMemoryUsed + prioJVMGCCount + prioJVMGCTime + + prioDroppedMessagesRate + prioRequestsTimeoutsRate + prioRequestsUnavailablesRate + prioRequestsFailuresRate + prioStorageExceptionsRate +) + +var baseCharts = module.Charts{ + chartClientRequestsRate.Copy(), + + chartClientRequestsLatency.Copy(), + chartClientRequestReadLatencyHistogram.Copy(), + chartClientRequestWriteLatencyHistogram.Copy(), + + chartKeyCacheHitRatio.Copy(), + chartRowCacheHitRatio.Copy(), + chartKeyCacheHitRate.Copy(), + chartRowCacheHitRate.Copy(), + chartKeyCacheUtilization.Copy(), + chartRowCacheUtilization.Copy(), + chartKeyCacheSize.Copy(), + chartRowCacheSize.Copy(), + + chartStorageLiveDiskSpaceUsed.Copy(), + + chartCompactionCompletedTasksRate.Copy(), + chartCompactionPendingTasksCount.Copy(), + chartCompactionBytesCompactedRate.Copy(), + + chartJVMMemoryUsed.Copy(), + chartJVMGCRate.Copy(), + chartJVMGCTime.Copy(), + + chartDroppedMessagesRate.Copy(), + chartClientRequestTimeoutsRate.Copy(), + chartClientRequestUnavailablesRate.Copy(), + chartClientRequestFailuresRate.Copy(), + chartStorageExceptionsRate.Copy(), +} + +var ( + chartClientRequestsRate = module.Chart{ + ID: "client_requests_rate", + Title: "Client requests rate", + Units: "requests/s", + Fam: "throughput", + Ctx: "cassandra.client_requests_rate", + Priority: prioClientRequestsRate, + Dims: module.Dims{ + {ID: "client_request_latency_reads", Name: "read", Algo: module.Incremental}, + {ID: "client_request_latency_writes", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } +) + +var ( + chartClientRequestReadLatencyHistogram = module.Chart{ + ID: "client_request_read_latency_histogram", + Title: "Client request read latency histogram", + Units: "seconds", + Fam: "latency", + Ctx: "cassandra.client_request_read_latency_histogram", + Priority: prioClientRequestReadLatency, + Dims: module.Dims{ + {ID: "client_request_read_latency_p50", Name: "p50", Div: 1e6}, + {ID: "client_request_read_latency_p75", Name: "p75", Div: 1e6}, + {ID: "client_request_read_latency_p95", Name: "p95", Div: 1e6}, + {ID: "client_request_read_latency_p98", Name: "p98", Div: 1e6}, + {ID: "client_request_read_latency_p99", Name: "p99", Div: 1e6}, + {ID: "client_request_read_latency_p999", Name: "p999", Div: 1e6}, + }, + } + chartClientRequestWriteLatencyHistogram = module.Chart{ + ID: "client_request_write_latency_histogram", + Title: "Client request write latency histogram", + Units: "seconds", + Fam: "latency", + Ctx: "cassandra.client_request_write_latency_histogram", + Priority: prioClientRequestWriteLatency, + Dims: module.Dims{ + {ID: "client_request_write_latency_p50", Name: "p50", Div: 1e6}, + {ID: "client_request_write_latency_p75", Name: "p75", Div: 1e6}, + {ID: "client_request_write_latency_p95", Name: "p95", Div: 1e6}, + {ID: "client_request_write_latency_p98", Name: "p98", Div: 1e6}, + {ID: "client_request_write_latency_p99", Name: "p99", Div: 1e6}, + {ID: "client_request_write_latency_p999", Name: "p999", Div: 1e6}, + }, + } + chartClientRequestsLatency = module.Chart{ + ID: "client_requests_latency", + Title: "Client requests total latency", + Units: "seconds", + Fam: "latency", + Ctx: "cassandra.client_requests_latency", + Priority: prioClientRequestsLatency, + Dims: module.Dims{ + {ID: "client_request_total_latency_reads", Name: "read", Algo: module.Incremental, Div: 1e6}, + {ID: "client_request_total_latency_writes", Name: "write", Algo: module.Incremental, Div: 1e6}, + }, + } +) + +var ( + chartKeyCacheHitRatio = module.Chart{ + ID: "key_cache_hit_ratio", + Title: "Key cache hit ratio", + Units: "percentage", + Fam: "cache", + Ctx: "cassandra.key_cache_hit_ratio", + Priority: prioKeyCacheHitRatio, + Dims: module.Dims{ + {ID: "key_cache_hit_ratio", Name: "hit_ratio", Div: 1000}, + }, + } + chartKeyCacheHitRate = module.Chart{ + ID: "key_cache_hit_rate", + Title: "Key cache hit rate", + Units: "events/s", + Fam: "cache", + Ctx: "cassandra.key_cache_hit_rate", + Priority: prioKeyCacheHitRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "key_cache_hits", Name: "hits", Algo: module.Incremental}, + {ID: "key_cache_misses", Name: "misses", Algo: module.Incremental}, + }, + } + chartKeyCacheUtilization = module.Chart{ + ID: "key_cache_utilization", + Title: "Key cache utilization", + Units: "percentage", + Fam: "cache", + Ctx: "cassandra.key_cache_utilization", + Priority: prioKeyCacheUtilization, + Dims: module.Dims{ + {ID: "key_cache_utilization", Name: "used", Div: 1000}, + }, + } + chartKeyCacheSize = module.Chart{ + ID: "key_cache_size", + Title: "Key cache size", + Units: "bytes", + Fam: "cache", + Ctx: "cassandra.key_cache_size", + Priority: prioKeyCacheSize, + Dims: module.Dims{ + {ID: "key_cache_size", Name: "size"}, + }, + } + + chartRowCacheHitRatio = module.Chart{ + ID: "row_cache_hit_ratio", + Title: "Row cache hit ratio", + Units: "percentage", + Fam: "cache", + Ctx: "cassandra.row_cache_hit_ratio", + Priority: prioRowCacheHitRatio, + Dims: module.Dims{ + {ID: "row_cache_hit_ratio", Name: "hit_ratio", Div: 1000}, + }, + } + chartRowCacheHitRate = module.Chart{ + ID: "row_cache_hit_rate", + Title: "Row cache hit rate", + Units: "events/s", + Fam: "cache", + Ctx: "cassandra.row_cache_hit_rate", + Priority: prioRowCacheHitRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "row_cache_hits", Name: "hits", Algo: module.Incremental}, + {ID: "row_cache_misses", Name: "misses", Algo: module.Incremental}, + }, + } + chartRowCacheUtilization = module.Chart{ + ID: "row_cache_utilization", + Title: "Row cache utilization", + Units: "percentage", + Fam: "cache", + Ctx: "cassandra.row_cache_utilization", + Priority: prioRowCacheUtilization, + Dims: module.Dims{ + {ID: "row_cache_utilization", Name: "used", Div: 1000}, + }, + } + chartRowCacheSize = module.Chart{ + ID: "row_cache_size", + Title: "Row cache size", + Units: "bytes", + Fam: "cache", + Ctx: "cassandra.row_cache_size", + Priority: prioRowCacheSize, + Dims: module.Dims{ + {ID: "row_cache_size", Name: "size"}, + }, + } +) + +var ( + chartStorageLiveDiskSpaceUsed = module.Chart{ + ID: "storage_live_disk_space_used", + Title: "Disk space used by live data", + Units: "bytes", + Fam: "disk usage", + Ctx: "cassandra.storage_live_disk_space_used", + Priority: prioStorageLiveDiskSpaceUsed, + Dims: module.Dims{ + {ID: "storage_load", Name: "used"}, + }, + } +) + +var ( + chartCompactionCompletedTasksRate = module.Chart{ + ID: "compaction_completed_tasks_rate", + Title: "Completed compactions rate", + Units: "tasks/s", + Fam: "compaction", + Ctx: "cassandra.compaction_completed_tasks_rate", + Priority: prioCompactionCompletedTasksRate, + Dims: module.Dims{ + {ID: "compaction_completed_tasks", Name: "completed", Algo: module.Incremental}, + }, + } + chartCompactionPendingTasksCount = module.Chart{ + ID: "compaction_pending_tasks_count", + Title: "Pending compactions", + Units: "tasks", + Fam: "compaction", + Ctx: "cassandra.compaction_pending_tasks_count", + Priority: prioCompactionPendingTasksCount, + Dims: module.Dims{ + {ID: "compaction_pending_tasks", Name: "pending"}, + }, + } + chartCompactionBytesCompactedRate = module.Chart{ + ID: "compaction_compacted_rate", + Title: "Compaction data rate", + Units: "bytes/s", + Fam: "compaction", + Ctx: "cassandra.compaction_compacted_rate", + Priority: prioCompactionBytesCompactedRate, + Dims: module.Dims{ + {ID: "compaction_bytes_compacted", Name: "compacted", Algo: module.Incremental}, + }, + } +) + +var ( + chartsTmplThreadPool = module.Charts{ + chartTmplThreadPoolActiveTasksCount.Copy(), + chartTmplThreadPoolPendingTasksCount.Copy(), + chartTmplThreadPoolBlockedTasksCount.Copy(), + chartTmplThreadPoolBlockedTasksRate.Copy(), + } + + chartTmplThreadPoolActiveTasksCount = module.Chart{ + ID: "thread_pool_%s_active_tasks_count", + Title: "Active tasks", + Units: "tasks", + Fam: "thread pools", + Ctx: "cassandra.thread_pool_active_tasks_count", + Priority: prioThreadPoolActiveTasksCount, + Dims: module.Dims{ + {ID: "thread_pool_%s_active_tasks", Name: "active"}, + }, + } + chartTmplThreadPoolPendingTasksCount = module.Chart{ + ID: "thread_pool_%s_pending_tasks_count", + Title: "Pending tasks", + Units: "tasks", + Fam: "thread pools", + Ctx: "cassandra.thread_pool_pending_tasks_count", + Priority: prioThreadPoolPendingTasksCount, + Dims: module.Dims{ + {ID: "thread_pool_%s_pending_tasks", Name: "pending"}, + }, + } + chartTmplThreadPoolBlockedTasksCount = module.Chart{ + ID: "thread_pool_%s_blocked_tasks_count", + Title: "Blocked tasks", + Units: "tasks", + Fam: "thread pools", + Ctx: "cassandra.thread_pool_blocked_tasks_count", + Priority: prioThreadPoolBlockedTasksCount, + Dims: module.Dims{ + {ID: "thread_pool_%s_blocked_tasks", Name: "blocked"}, + }, + } + chartTmplThreadPoolBlockedTasksRate = module.Chart{ + ID: "thread_pool_%s_blocked_tasks_rate", + Title: "Blocked tasks rate", + Units: "tasks/s", + Fam: "thread pools", + Ctx: "cassandra.thread_pool_blocked_tasks_rate", + Priority: prioThreadPoolBlockedTasksRate, + Dims: module.Dims{ + {ID: "thread_pool_%s_total_blocked_tasks", Name: "blocked", Algo: module.Incremental}, + }, + } +) + +var ( + chartJVMMemoryUsed = module.Chart{ + ID: "jvm_memory_used", + Title: "Memory used", + Units: "bytes", + Fam: "jvm runtime", + Ctx: "cassandra.jvm_memory_used", + Priority: prioJVMMemoryUsed, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "jvm_memory_heap_used", Name: "heap"}, + {ID: "jvm_memory_nonheap_used", Name: "nonheap"}, + }, + } + chartJVMGCRate = module.Chart{ + ID: "jvm_gc_rate", + Title: "Garbage collections rate", + Units: "gc/s", + Fam: "jvm runtime", + Ctx: "cassandra.jvm_gc_rate", + Priority: prioJVMGCCount, + Dims: module.Dims{ + {ID: "jvm_gc_parnew_count", Name: "parnew", Algo: module.Incremental}, + {ID: "jvm_gc_cms_count", Name: "cms", Algo: module.Incremental}, + }, + } + chartJVMGCTime = module.Chart{ + ID: "jvm_gc_time", + Title: "Garbage collection time", + Units: "seconds", + Fam: "jvm runtime", + Ctx: "cassandra.jvm_gc_time", + Priority: prioJVMGCTime, + Dims: module.Dims{ + {ID: "jvm_gc_parnew_time", Name: "parnew", Algo: module.Incremental, Div: 1e9}, + {ID: "jvm_gc_cms_time", Name: "cms", Algo: module.Incremental, Div: 1e9}, + }, + } +) + +var ( + chartDroppedMessagesRate = module.Chart{ + ID: "dropped_messages_rate", + Title: "Dropped messages rate", + Units: "messages/s", + Fam: "errors", + Ctx: "cassandra.dropped_messages_rate", + Priority: prioDroppedMessagesRate, + Dims: module.Dims{ + {ID: "dropped_messages", Name: "dropped"}, + }, + } + chartClientRequestTimeoutsRate = module.Chart{ + ID: "client_requests_timeouts_rate", + Title: "Client requests timeouts rate", + Units: "timeouts/s", + Fam: "errors", + Ctx: "cassandra.client_requests_timeouts_rate", + Priority: prioRequestsTimeoutsRate, + Dims: module.Dims{ + {ID: "client_request_timeouts_reads", Name: "read", Algo: module.Incremental}, + {ID: "client_request_timeouts_writes", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } + chartClientRequestUnavailablesRate = module.Chart{ + ID: "client_requests_unavailables_rate", + Title: "Client requests unavailable exceptions rate", + Units: "exceptions/s", + Fam: "errors", + Ctx: "cassandra.client_requests_unavailables_rate", + Priority: prioRequestsUnavailablesRate, + Dims: module.Dims{ + {ID: "client_request_unavailables_reads", Name: "read", Algo: module.Incremental}, + {ID: "client_request_unavailables_writes", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } + chartClientRequestFailuresRate = module.Chart{ + ID: "client_requests_failures_rate", + Title: "Client requests failures rate", + Units: "failures/s", + Fam: "errors", + Ctx: "cassandra.client_requests_failures_rate", + Priority: prioRequestsFailuresRate, + Dims: module.Dims{ + {ID: "client_request_failures_reads", Name: "read", Algo: module.Incremental}, + {ID: "client_request_failures_writes", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } + chartStorageExceptionsRate = module.Chart{ + ID: "storage_exceptions_rate", + Title: "Storage exceptions rate", + Units: "exceptions/s", + Fam: "errors", + Ctx: "cassandra.storage_exceptions_rate", + Priority: prioStorageExceptionsRate, + Dims: module.Dims{ + {ID: "storage_exceptions", Name: "storage", Algo: module.Incremental}, + }, + } +) + +func (c *Cassandra) addThreadPoolCharts(pool *threadPoolMetrics) { + charts := chartsTmplThreadPool.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, pool.name) + chart.Labels = []module.Label{ + {Key: "thread_pool", Value: pool.name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, pool.name) + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/collect.go b/src/go/collectors/go.d.plugin/modules/cassandra/collect.go new file mode 100644 index 00000000000000..c96a43947746dd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/collect.go @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "strings" +) + +const ( + suffixCount = "_count" + suffixValue = "_value" +) + +func (c *Cassandra) collect() (map[string]int64, error) { + pms, err := c.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if c.validateMetrics { + if !isCassandraMetrics(pms) { + return nil, errors.New("collected metrics aren't Cassandra metrics") + } + c.validateMetrics = false + } + + mx := make(map[string]int64) + + c.resetMetrics() + c.collectMetrics(pms) + c.processMetric(mx) + + return mx, nil +} + +func (c *Cassandra) resetMetrics() { + cm := newCassandraMetrics() + for key, p := range c.mx.threadPools { + cm.threadPools[key] = &threadPoolMetrics{ + name: p.name, + hasCharts: p.hasCharts, + } + } + c.mx = cm +} + +func (c *Cassandra) processMetric(mx map[string]int64) { + c.mx.clientReqTotalLatencyReads.write(mx, "client_request_total_latency_reads") + c.mx.clientReqTotalLatencyWrites.write(mx, "client_request_total_latency_writes") + c.mx.clientReqLatencyReads.write(mx, "client_request_latency_reads") + c.mx.clientReqLatencyWrites.write(mx, "client_request_latency_writes") + c.mx.clientReqTimeoutsReads.write(mx, "client_request_timeouts_reads") + c.mx.clientReqTimeoutsWrites.write(mx, "client_request_timeouts_writes") + c.mx.clientReqUnavailablesReads.write(mx, "client_request_unavailables_reads") + c.mx.clientReqUnavailablesWrites.write(mx, "client_request_unavailables_writes") + c.mx.clientReqFailuresReads.write(mx, "client_request_failures_reads") + c.mx.clientReqFailuresWrites.write(mx, "client_request_failures_writes") + + c.mx.clientReqReadLatencyP50.write(mx, "client_request_read_latency_p50") + c.mx.clientReqReadLatencyP75.write(mx, "client_request_read_latency_p75") + c.mx.clientReqReadLatencyP95.write(mx, "client_request_read_latency_p95") + c.mx.clientReqReadLatencyP98.write(mx, "client_request_read_latency_p98") + c.mx.clientReqReadLatencyP99.write(mx, "client_request_read_latency_p99") + c.mx.clientReqReadLatencyP999.write(mx, "client_request_read_latency_p999") + c.mx.clientReqWriteLatencyP50.write(mx, "client_request_write_latency_p50") + c.mx.clientReqWriteLatencyP75.write(mx, "client_request_write_latency_p75") + c.mx.clientReqWriteLatencyP95.write(mx, "client_request_write_latency_p95") + c.mx.clientReqWriteLatencyP98.write(mx, "client_request_write_latency_p98") + c.mx.clientReqWriteLatencyP99.write(mx, "client_request_write_latency_p99") + c.mx.clientReqWriteLatencyP999.write(mx, "client_request_write_latency_p999") + + c.mx.rowCacheHits.write(mx, "row_cache_hits") + c.mx.rowCacheMisses.write(mx, "row_cache_misses") + c.mx.rowCacheSize.write(mx, "row_cache_size") + if c.mx.rowCacheHits.isSet && c.mx.rowCacheMisses.isSet { + if s := c.mx.rowCacheHits.value + c.mx.rowCacheMisses.value; s > 0 { + mx["row_cache_hit_ratio"] = int64((c.mx.rowCacheHits.value * 100 / s) * 1000) + } else { + mx["row_cache_hit_ratio"] = 0 + } + } + if c.mx.rowCacheCapacity.isSet && c.mx.rowCacheSize.isSet { + if s := c.mx.rowCacheCapacity.value; s > 0 { + mx["row_cache_utilization"] = int64((c.mx.rowCacheSize.value * 100 / s) * 1000) + } else { + mx["row_cache_utilization"] = 0 + } + } + + c.mx.keyCacheHits.write(mx, "key_cache_hits") + c.mx.keyCacheMisses.write(mx, "key_cache_misses") + c.mx.keyCacheSize.write(mx, "key_cache_size") + if c.mx.keyCacheHits.isSet && c.mx.keyCacheMisses.isSet { + if s := c.mx.keyCacheHits.value + c.mx.keyCacheMisses.value; s > 0 { + mx["key_cache_hit_ratio"] = int64((c.mx.keyCacheHits.value * 100 / s) * 1000) + } else { + mx["key_cache_hit_ratio"] = 0 + } + } + if c.mx.keyCacheCapacity.isSet && c.mx.keyCacheSize.isSet { + if s := c.mx.keyCacheCapacity.value; s > 0 { + mx["key_cache_utilization"] = int64((c.mx.keyCacheSize.value * 100 / s) * 1000) + } else { + mx["key_cache_utilization"] = 0 + } + } + + c.mx.droppedMessages.write1k(mx, "dropped_messages") + + c.mx.storageLoad.write(mx, "storage_load") + c.mx.storageExceptions.write(mx, "storage_exceptions") + + c.mx.compactionBytesCompacted.write(mx, "compaction_bytes_compacted") + c.mx.compactionPendingTasks.write(mx, "compaction_pending_tasks") + c.mx.compactionCompletedTasks.write(mx, "compaction_completed_tasks") + + c.mx.jvmMemoryHeapUsed.write(mx, "jvm_memory_heap_used") + c.mx.jvmMemoryNonHeapUsed.write(mx, "jvm_memory_nonheap_used") + c.mx.jvmGCParNewCount.write(mx, "jvm_gc_parnew_count") + c.mx.jvmGCParNewTime.write1k(mx, "jvm_gc_parnew_time") + c.mx.jvmGCCMSCount.write(mx, "jvm_gc_cms_count") + c.mx.jvmGCCMSTime.write1k(mx, "jvm_gc_cms_time") + + for _, p := range c.mx.threadPools { + if !p.hasCharts { + p.hasCharts = true + c.addThreadPoolCharts(p) + } + + px := "thread_pool_" + p.name + "_" + p.activeTasks.write(mx, px+"active_tasks") + p.pendingTasks.write(mx, px+"pending_tasks") + p.blockedTasks.write(mx, px+"blocked_tasks") + p.totalBlockedTasks.write(mx, px+"total_blocked_tasks") + } +} + +func (c *Cassandra) collectMetrics(pms prometheus.Series) { + c.collectClientRequestMetrics(pms) + c.collectDroppedMessagesMetrics(pms) + c.collectThreadPoolsMetrics(pms) + c.collectStorageMetrics(pms) + c.collectCacheMetrics(pms) + c.collectJVMMetrics(pms) + c.collectCompactionMetrics(pms) +} + +func (c *Cassandra) collectClientRequestMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_clientrequest" + + var rw struct{ read, write *metricValue } + for _, pm := range pms.FindByName(metric + suffixCount) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + + switch name { + case "TotalLatency": + rw.read, rw.write = &c.mx.clientReqTotalLatencyReads, &c.mx.clientReqTotalLatencyWrites + case "Latency": + rw.read, rw.write = &c.mx.clientReqLatencyReads, &c.mx.clientReqLatencyWrites + case "Timeouts": + rw.read, rw.write = &c.mx.clientReqTimeoutsReads, &c.mx.clientReqTimeoutsWrites + case "Unavailables": + rw.read, rw.write = &c.mx.clientReqUnavailablesReads, &c.mx.clientReqUnavailablesWrites + case "Failures": + rw.read, rw.write = &c.mx.clientReqFailuresReads, &c.mx.clientReqFailuresWrites + default: + continue + } + + switch scope { + case "Read": + rw.read.add(pm.Value) + case "Write": + rw.write.add(pm.Value) + } + } + + rw = struct{ read, write *metricValue }{} + + for _, pm := range pms.FindByNames( + metric+"_50thpercentile", + metric+"_75thpercentile", + metric+"_95thpercentile", + metric+"_98thpercentile", + metric+"_99thpercentile", + metric+"_999thpercentile", + ) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + + if name != "Latency" { + continue + } + + switch { + case strings.HasSuffix(pm.Name(), "_50thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP50, &c.mx.clientReqWriteLatencyP50 + case strings.HasSuffix(pm.Name(), "_75thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP75, &c.mx.clientReqWriteLatencyP75 + case strings.HasSuffix(pm.Name(), "_95thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP95, &c.mx.clientReqWriteLatencyP95 + case strings.HasSuffix(pm.Name(), "_98thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP98, &c.mx.clientReqWriteLatencyP98 + case strings.HasSuffix(pm.Name(), "_99thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP99, &c.mx.clientReqWriteLatencyP99 + case strings.HasSuffix(pm.Name(), "_999thpercentile"): + rw.read, rw.write = &c.mx.clientReqReadLatencyP999, &c.mx.clientReqWriteLatencyP999 + default: + continue + } + + switch scope { + case "Read": + rw.read.add(pm.Value) + case "Write": + rw.write.add(pm.Value) + } + } +} + +func (c *Cassandra) collectCacheMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_cache" + + var hm struct{ hits, misses *metricValue } + for _, pm := range pms.FindByName(metric + suffixCount) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + + switch scope { + case "KeyCache": + hm.hits, hm.misses = &c.mx.keyCacheHits, &c.mx.keyCacheMisses + case "RowCache": + hm.hits, hm.misses = &c.mx.rowCacheHits, &c.mx.rowCacheMisses + default: + continue + } + + switch name { + case "Hits": + hm.hits.add(pm.Value) + case "Misses": + hm.misses.add(pm.Value) + } + } + + var cs struct{ cap, size *metricValue } + for _, pm := range pms.FindByName(metric + suffixValue) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + + switch scope { + case "KeyCache": + cs.cap, cs.size = &c.mx.keyCacheCapacity, &c.mx.keyCacheSize + case "RowCache": + cs.cap, cs.size = &c.mx.rowCacheCapacity, &c.mx.rowCacheSize + default: + continue + } + + switch name { + case "Capacity": + cs.cap.add(pm.Value) + case "Size": + cs.size.add(pm.Value) + } + } +} + +func (c *Cassandra) collectThreadPoolsMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_threadpools" + + for _, pm := range pms.FindByName(metric + suffixValue) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + pool := c.getThreadPoolMetrics(scope) + + switch name { + case "ActiveTasks": + pool.activeTasks.add(pm.Value) + case "PendingTasks": + pool.pendingTasks.add(pm.Value) + } + } + for _, pm := range pms.FindByName(metric + suffixCount) { + name := pm.Labels.Get("name") + scope := pm.Labels.Get("scope") + pool := c.getThreadPoolMetrics(scope) + + switch name { + case "CompletedTasks": + pool.totalBlockedTasks.add(pm.Value) + case "TotalBlockedTasks": + pool.totalBlockedTasks.add(pm.Value) + case "CurrentlyBlockedTasks": + pool.blockedTasks.add(pm.Value) + } + } +} + +func (c *Cassandra) collectStorageMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_storage" + + for _, pm := range pms.FindByName(metric + suffixCount) { + name := pm.Labels.Get("name") + + switch name { + case "Load": + c.mx.storageLoad.add(pm.Value) + case "Exceptions": + c.mx.storageExceptions.add(pm.Value) + } + } +} + +func (c *Cassandra) collectDroppedMessagesMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_droppedmessage" + + for _, pm := range pms.FindByName(metric + suffixCount) { + c.mx.droppedMessages.add(pm.Value) + } +} + +func (c *Cassandra) collectJVMMetrics(pms prometheus.Series) { + const metricMemUsed = "jvm_memory_bytes_used" + const metricGC = "jvm_gc_collection_seconds" + + for _, pm := range pms.FindByName(metricMemUsed) { + area := pm.Labels.Get("area") + + switch area { + case "heap": + c.mx.jvmMemoryHeapUsed.add(pm.Value) + case "nonheap": + c.mx.jvmMemoryNonHeapUsed.add(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricGC + suffixCount) { + gc := pm.Labels.Get("gc") + + switch gc { + case "ParNew": + c.mx.jvmGCParNewCount.add(pm.Value) + case "ConcurrentMarkSweep": + c.mx.jvmGCCMSCount.add(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricGC + "_sum") { + gc := pm.Labels.Get("gc") + + switch gc { + case "ParNew": + c.mx.jvmGCParNewTime.add(pm.Value) + case "ConcurrentMarkSweep": + c.mx.jvmGCCMSTime.add(pm.Value) + } + } +} + +func (c *Cassandra) collectCompactionMetrics(pms prometheus.Series) { + const metric = "org_apache_cassandra_metrics_compaction" + + for _, pm := range pms.FindByName(metric + suffixValue) { + name := pm.Labels.Get("name") + + switch name { + case "CompletedTasks": + c.mx.compactionCompletedTasks.add(pm.Value) + case "PendingTasks": + c.mx.compactionPendingTasks.add(pm.Value) + } + } + for _, pm := range pms.FindByName(metric + suffixCount) { + name := pm.Labels.Get("name") + + switch name { + case "BytesCompacted": + c.mx.compactionBytesCompacted.add(pm.Value) + } + } +} + +func (c *Cassandra) getThreadPoolMetrics(name string) *threadPoolMetrics { + pool, ok := c.mx.threadPools[name] + if !ok { + pool = &threadPoolMetrics{name: name} + c.mx.threadPools[name] = pool + } + return pool +} + +func isCassandraMetrics(pms prometheus.Series) bool { + for _, pm := range pms { + if strings.HasPrefix(pm.Name(), "org_apache_cassandra_metrics") { + return true + } + } + return false +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/config_schema.json b/src/go/collectors/go.d.plugin/modules/cassandra/config_schema.json new file mode 100644 index 00000000000000..ff22764ecb53e7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/cassandra job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/init.go b/src/go/collectors/go.d.plugin/modules/cassandra/init.go new file mode 100644 index 00000000000000..92bb5e56d6862c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (c *Cassandra) validateConfig() error { + if c.URL == "" { + return errors.New("'url' is not set") + } + return nil +} + +func (c *Cassandra) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(c.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, c.Request), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/integrations/cassandra.md b/src/go/collectors/go.d.plugin/modules/cassandra/integrations/cassandra.md new file mode 100644 index 00000000000000..8ab132c1a0b389 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/integrations/cassandra.md @@ -0,0 +1,278 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/cassandra/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/cassandra/metadata.yaml" +sidebar_label: "Cassandra" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cassandra + + +<img src="https://netdata.cloud/img/cassandra.svg" width="150"/> + + +Plugin: go.d.plugin +Module: cassandra + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool. + + +The [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This collector discovers instances running on the local host that provide metrics on port 7072. + +On startup, it tries to collect metrics from: + +- http://127.0.0.1:7072/metrics + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Cassandra instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cassandra.client_requests_rate | read, write | requests/s | +| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds | +| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds | +| cassandra.client_requests_latency | read, write | seconds | +| cassandra.row_cache_hit_ratio | hit_ratio | percentage | +| cassandra.row_cache_hit_rate | hits, misses | events/s | +| cassandra.row_cache_utilization | used | percentage | +| cassandra.row_cache_size | size | bytes | +| cassandra.key_cache_hit_ratio | hit_ratio | percentage | +| cassandra.key_cache_hit_rate | hits, misses | events/s | +| cassandra.key_cache_utilization | used | percentage | +| cassandra.key_cache_size | size | bytes | +| cassandra.storage_live_disk_space_used | used | bytes | +| cassandra.compaction_completed_tasks_rate | completed | tasks/s | +| cassandra.compaction_pending_tasks_count | pending | tasks | +| cassandra.compaction_compacted_rate | compacted | bytes/s | +| cassandra.jvm_memory_used | heap, nonheap | bytes | +| cassandra.jvm_gc_rate | parnew, cms | gc/s | +| cassandra.jvm_gc_time | parnew, cms | seconds | +| cassandra.dropped_messages_rate | dropped | messages/s | +| cassandra.client_requests_timeouts_rate | read, write | timeout/s | +| cassandra.client_requests_unavailables_rate | read, write | exceptions/s | +| cassandra.client_requests_failures_rate | read, write | failures/s | +| cassandra.storage_exceptions_rate | storage | exceptions/s | + +### Per thread pool + +Metrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thread_pool | thread pool name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cassandra.thread_pool_active_tasks_count | active | tasks | +| cassandra.thread_pool_pending_tasks_count | pending | tasks | +| cassandra.thread_pool_blocked_tasks_count | blocked | tasks | +| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure Cassandra with Prometheus JMX Exporter + +To configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter): + +> **Note**: paths can differ depends on your setup. + +- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file + and install it in a directory where Cassandra can access it. +- Add + the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml) + file to `/etc/cassandra`. +- Add the following line to `/etc/cassandra/cassandra-env.sh` + ``` + JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml + ``` +- Restart cassandra service. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/cassandra.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/cassandra.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:7072/metrics | yes | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 2 | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:7072/metrics + +``` +##### HTTP authentication + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:7072/metrics + username: foo + password: bar + +``` +</details> + +##### HTTPS with self-signed certificate + +Local server with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:7072/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:7072/metrics + + - name: remote + url: http://192.0.2.1:7072/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m cassandra + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/jmx_exporter.yaml b/src/go/collectors/go.d.plugin/modules/cassandra/jmx_exporter.yaml new file mode 100644 index 00000000000000..983f6f9b2b8a77 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/jmx_exporter.yaml @@ -0,0 +1,31 @@ +lowercaseOutputLabelNames: true +lowercaseOutputName: true +whitelistObjectNames: ["org.apache.cassandra.metrics:*"] +blacklistObjectNames: + - "org.apache.cassandra.metrics:type=ColumnFamily,*" + - "org.apache.cassandra.metrics:type=Table,*" +rules: + # Throughput and Latency + - pattern: org.apache.cassandra.metrics<type=(ClientRequest), scope=(Write|Read), name=(TotalLatency|Latency|Timeouts|Unavailables|Failures)><>(Count) + - pattern: org.apache.cassandra.metrics<type=(ClientRequest), scope=(Write|Read), name=(Latency)><>(\S*Percentile) + + # Dropped messages + - pattern: org.apache.cassandra.metrics<type=(DroppedMessage), scope=(\S*), name=(Dropped)><>(Count) + + # Cache + - pattern: org.apache.cassandra.metrics<type=Cache, scope=(KeyCache|RowCache), name=(Hits|Misses)><>(Count) + - pattern: org.apache.cassandra.metrics<type=Cache, scope=(KeyCache|RowCache), name=(Capacity|Size)><>(Value) + + # Storage + - pattern: org.apache.cassandra.metrics<type=(Storage), name=(Load|Exceptions)><>(Count) + + # Tables + # - pattern: org.apache.cassandra.metrics<type=(Table), keyspace=(\S*), scope=(\S*), name=(TotalDiskSpaceUsed)><>(Count) + + # Compaction + - pattern: org.apache.cassandra.metrics<type=(Compaction), name=(CompletedTasks|PendingTasks)><>(Value) + - pattern: org.apache.cassandra.metrics<type=(Compaction), name=(BytesCompacted)><>(Count) + + # Thread Pools + - pattern: org.apache.cassandra.metrics<type=(ThreadPools), path=(\S*), scope=(\S*), name=(ActiveTasks|PendingTasks)><>(Value) + - pattern: org.apache.cassandra.metrics<type=(ThreadPools), path=(\S*), scope=(\S*), name=(CurrentlyBlockedTasks|TotalBlockedTasks)><>(Count) diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/metadata.yaml b/src/go/collectors/go.d.plugin/modules/cassandra/metadata.yaml new file mode 100644 index 00000000000000..ef9458c0329d9b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/metadata.yaml @@ -0,0 +1,410 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-cassandra + module_name: cassandra + plugin_name: go.d.plugin + monitored_instance: + categories: + - data-collection.database-servers + icon_filename: cassandra.svg + name: Cassandra + link: https://cassandra.apache.org/_/index.html + alternative_monitored_instances: [] + keywords: + - nosql + - dbms + - db + - database + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool. + method_description: | + The [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + This collector discovers instances running on the local host that provide metrics on port 7072. + + On startup, it tries to collect metrics from: + + - http://127.0.0.1:7072/metrics + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Configure Cassandra with Prometheus JMX Exporter + description: | + To configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter): + + > **Note**: paths can differ depends on your setup. + + - Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file + and install it in a directory where Cassandra can access it. + - Add + the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml) + file to `/etc/cassandra`. + - Add the following line to `/etc/cassandra/cassandra-env.sh` + ``` + JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml + ``` + - Restart cassandra service. + configuration: + file: + name: go.d/cassandra.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:7072/metrics + required: true + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:7072/metrics + - name: HTTP authentication + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:7072/metrics + username: foo + password: bar + - name: HTTPS with self-signed certificate + description: Local server with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1:7072/metrics + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:7072/metrics + + - name: remote + url: http://192.0.2.1:7072/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: cassandra.client_requests_rate + availability: [] + description: Client requests rate + unit: requests/s + chart_type: line + dimensions: + - name: read + - name: write + - name: cassandra.client_request_read_latency_histogram + availability: [] + description: Client request read latency histogram + unit: seconds + chart_type: line + dimensions: + - name: p50 + - name: p75 + - name: p95 + - name: p98 + - name: p99 + - name: p999 + - name: cassandra.client_request_write_latency_histogram + availability: [] + description: Client request write latency histogram + unit: seconds + chart_type: line + dimensions: + - name: p50 + - name: p75 + - name: p95 + - name: p98 + - name: p99 + - name: p999 + - name: cassandra.client_requests_latency + availability: [] + description: Client requests total latency + unit: seconds + chart_type: line + dimensions: + - name: read + - name: write + - name: cassandra.row_cache_hit_ratio + availability: [] + description: Key cache hit ratio + unit: percentage + chart_type: line + dimensions: + - name: hit_ratio + - name: cassandra.row_cache_hit_rate + availability: [] + description: Key cache hit rate + unit: events/s + chart_type: stacked + dimensions: + - name: hits + - name: misses + - name: cassandra.row_cache_utilization + availability: [] + description: Key cache utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: cassandra.row_cache_size + availability: [] + description: Key cache size + unit: bytes + chart_type: line + dimensions: + - name: size + - name: cassandra.key_cache_hit_ratio + availability: [] + description: Row cache hit ratio + unit: percentage + chart_type: line + dimensions: + - name: hit_ratio + - name: cassandra.key_cache_hit_rate + availability: [] + description: Row cache hit rate + unit: events/s + chart_type: stacked + dimensions: + - name: hits + - name: misses + - name: cassandra.key_cache_utilization + availability: [] + description: Row cache utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: cassandra.key_cache_size + availability: [] + description: Row cache size + unit: bytes + chart_type: line + dimensions: + - name: size + - name: cassandra.storage_live_disk_space_used + availability: [] + description: Disk space used by live data + unit: bytes + chart_type: line + dimensions: + - name: used + - name: cassandra.compaction_completed_tasks_rate + availability: [] + description: Completed compactions rate + unit: tasks/s + chart_type: line + dimensions: + - name: completed + - name: cassandra.compaction_pending_tasks_count + availability: [] + description: Pending compactions + unit: tasks + chart_type: line + dimensions: + - name: pending + - name: cassandra.compaction_compacted_rate + availability: [] + description: Compaction data rate + unit: bytes/s + chart_type: line + dimensions: + - name: compacted + - name: cassandra.jvm_memory_used + availability: [] + description: Memory used + unit: bytes + chart_type: stacked + dimensions: + - name: heap + - name: nonheap + - name: cassandra.jvm_gc_rate + availability: [] + description: Garbage collections rate + unit: gc/s + chart_type: line + dimensions: + - name: parnew + - name: cms + - name: cassandra.jvm_gc_time + availability: [] + description: Garbage collection time + unit: seconds + chart_type: line + dimensions: + - name: parnew + - name: cms + - name: cassandra.dropped_messages_rate + availability: [] + description: Dropped messages rate + unit: messages/s + chart_type: line + dimensions: + - name: dropped + - name: cassandra.client_requests_timeouts_rate + availability: [] + description: Client requests timeouts rate + unit: timeout/s + chart_type: line + dimensions: + - name: read + - name: write + - name: cassandra.client_requests_unavailables_rate + availability: [] + description: Client requests unavailable exceptions rate + unit: exceptions/s + chart_type: line + dimensions: + - name: read + - name: write + - name: cassandra.client_requests_failures_rate + availability: [] + description: Client requests failures rate + unit: failures/s + chart_type: line + dimensions: + - name: read + - name: write + - name: cassandra.storage_exceptions_rate + availability: [] + description: Storage exceptions rate + unit: exceptions/s + chart_type: line + dimensions: + - name: storage + - name: thread pool + description: Metrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics. + labels: + - name: thread_pool + description: thread pool name + metrics: + - name: cassandra.thread_pool_active_tasks_count + availability: [] + description: Active tasks + unit: tasks + chart_type: line + dimensions: + - name: active + - name: cassandra.thread_pool_pending_tasks_count + availability: [] + description: Pending tasks + unit: tasks + chart_type: line + dimensions: + - name: pending + - name: cassandra.thread_pool_blocked_tasks_count + availability: [] + description: Blocked tasks + unit: tasks + chart_type: line + dimensions: + - name: blocked + - name: cassandra.thread_pool_blocked_tasks_rate + availability: [] + description: Blocked tasks rate + unit: tasks/s + chart_type: line + dimensions: + - name: blocked diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/metrics.go b/src/go/collectors/go.d.plugin/modules/cassandra/metrics.go new file mode 100644 index 00000000000000..6533c694cabfc8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/metrics.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cassandra + +// https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#table-metrics +// https://www.datadoghq.com/blog/how-to-collect-cassandra-metrics/ +// https://docs.opennms.com/horizon/29/deployment/time-series-storage/newts/cassandra-jmx.html + +func newCassandraMetrics() *cassandraMetrics { + return &cassandraMetrics{ + threadPools: make(map[string]*threadPoolMetrics), + } +} + +type cassandraMetrics struct { + clientReqTotalLatencyReads metricValue + clientReqTotalLatencyWrites metricValue + clientReqLatencyReads metricValue + clientReqLatencyWrites metricValue + clientReqTimeoutsReads metricValue + clientReqTimeoutsWrites metricValue + clientReqUnavailablesReads metricValue + clientReqUnavailablesWrites metricValue + clientReqFailuresReads metricValue + clientReqFailuresWrites metricValue + + clientReqReadLatencyP50 metricValue + clientReqReadLatencyP75 metricValue + clientReqReadLatencyP95 metricValue + clientReqReadLatencyP98 metricValue + clientReqReadLatencyP99 metricValue + clientReqReadLatencyP999 metricValue + clientReqWriteLatencyP50 metricValue + clientReqWriteLatencyP75 metricValue + clientReqWriteLatencyP95 metricValue + clientReqWriteLatencyP98 metricValue + clientReqWriteLatencyP99 metricValue + clientReqWriteLatencyP999 metricValue + + rowCacheHits metricValue + rowCacheMisses metricValue + rowCacheCapacity metricValue + rowCacheSize metricValue + keyCacheHits metricValue + keyCacheMisses metricValue + keyCacheCapacity metricValue + keyCacheSize metricValue + + // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#dropped-metrics + droppedMessages metricValue + + // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#storage-metrics + storageLoad metricValue + storageExceptions metricValue + + // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#compaction-metrics + compactionBytesCompacted metricValue + compactionPendingTasks metricValue + compactionCompletedTasks metricValue + + // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#memory + jvmMemoryHeapUsed metricValue + jvmMemoryNonHeapUsed metricValue + // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#garbagecollector + jvmGCParNewCount metricValue + jvmGCParNewTime metricValue + jvmGCCMSCount metricValue + jvmGCCMSTime metricValue + + threadPools map[string]*threadPoolMetrics +} + +type threadPoolMetrics struct { + name string + hasCharts bool + + activeTasks metricValue + pendingTasks metricValue + blockedTasks metricValue + totalBlockedTasks metricValue +} + +type metricValue struct { + isSet bool + value float64 +} + +func (mv *metricValue) add(v float64) { + mv.isSet = true + mv.value += v +} + +func (mv *metricValue) write(mx map[string]int64, key string) { + if mv.isSet { + mx[key] = int64(mv.value) + } +} + +func (mv *metricValue) write1k(mx map[string]int64, key string) { + if mv.isSet { + mx[key] = int64(mv.value * 1000) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/cassandra/testdata/metrics.txt b/src/go/collectors/go.d.plugin/modules/cassandra/testdata/metrics.txt new file mode 100644 index 00000000000000..663a6808029a0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cassandra/testdata/metrics.txt @@ -0,0 +1,402 @@ +# HELP jvm_threads_current Current thread count of a JVM +# TYPE jvm_threads_current gauge +jvm_threads_current 93.0 +# HELP jvm_threads_daemon Daemon thread count of a JVM +# TYPE jvm_threads_daemon gauge +jvm_threads_daemon 82.0 +# HELP jvm_threads_peak Peak thread count of a JVM +# TYPE jvm_threads_peak gauge +jvm_threads_peak 94.0 +# HELP jvm_threads_started_total Started thread count of a JVM +# TYPE jvm_threads_started_total counter +jvm_threads_started_total 1860.0 +# HELP jvm_threads_deadlocked Cycles of JVM-threads that are in deadlock waiting to acquire object monitors or ownable synchronizers +# TYPE jvm_threads_deadlocked gauge +jvm_threads_deadlocked 0.0 +# HELP jvm_threads_deadlocked_monitor Cycles of JVM-threads that are in deadlock waiting to acquire object monitors +# TYPE jvm_threads_deadlocked_monitor gauge +jvm_threads_deadlocked_monitor 0.0 +# HELP jvm_threads_state Current count of threads by state +# TYPE jvm_threads_state gauge +jvm_threads_state{state="NEW",} 0.0 +jvm_threads_state{state="TERMINATED",} 0.0 +jvm_threads_state{state="RUNNABLE",} 16.0 +jvm_threads_state{state="BLOCKED",} 0.0 +jvm_threads_state{state="WAITING",} 46.0 +jvm_threads_state{state="TIMED_WAITING",} 31.0 +jvm_threads_state{state="UNKNOWN",} 0.0 +# HELP jvm_memory_pool_allocated_bytes_total Total bytes allocated in a given JVM memory pool. Only updated after GC, not continuously. +# TYPE jvm_memory_pool_allocated_bytes_total counter +jvm_memory_pool_allocated_bytes_total{pool="Par Survivor Space",} 1.52801872E8 +jvm_memory_pool_allocated_bytes_total{pool="CMS Old Gen",} 8.55035344E8 +jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'profiled nmethods'",} 2.4841216E7 +jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'non-profiled nmethods'",} 1.3023104E7 +jvm_memory_pool_allocated_bytes_total{pool="Compressed Class Space",} 6640584.0 +jvm_memory_pool_allocated_bytes_total{pool="Metaspace",} 5.3862968E7 +jvm_memory_pool_allocated_bytes_total{pool="Par Eden Space",} 7.3147804328E10 +jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'non-nmethods'",} 1530112.0 +# HELP jvm_gc_collection_seconds Time spent in a given JVM garbage collector in seconds. +# TYPE jvm_gc_collection_seconds summary +jvm_gc_collection_seconds_count{gc="ParNew",} 218.0 +jvm_gc_collection_seconds_sum{gc="ParNew",} 1.617 +jvm_gc_collection_seconds_count{gc="ConcurrentMarkSweep",} 1.0 +jvm_gc_collection_seconds_sum{gc="ConcurrentMarkSweep",} 0.059 +# HELP jvm_classes_currently_loaded The number of classes that are currently loaded in the JVM +# TYPE jvm_classes_currently_loaded gauge +jvm_classes_currently_loaded 9663.0 +# HELP jvm_classes_loaded_total The total number of classes that have been loaded since the JVM has started execution +# TYPE jvm_classes_loaded_total counter +jvm_classes_loaded_total 9663.0 +# HELP jvm_classes_unloaded_total The total number of classes that have been unloaded since the JVM has started execution +# TYPE jvm_classes_unloaded_total counter +jvm_classes_unloaded_total 0.0 +# HELP jmx_config_reload_success_total Number of times configuration have successfully been reloaded. +# TYPE jmx_config_reload_success_total counter +jmx_config_reload_success_total 0.0 +# HELP jmx_config_reload_failure_total Number of times configuration have failed to be reloaded. +# TYPE jmx_config_reload_failure_total counter +jmx_config_reload_failure_total 0.0 +# HELP org_apache_cassandra_metrics_clientrequest_50thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=50thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_50thpercentile untyped +org_apache_cassandra_metrics_clientrequest_50thpercentile{scope="Read",name="Latency",} 61.214 +org_apache_cassandra_metrics_clientrequest_50thpercentile{scope="Write",name="Latency",} 35.425000000000004 +# HELP org_apache_cassandra_metrics_clientrequest_95thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=95thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_95thpercentile untyped +org_apache_cassandra_metrics_clientrequest_95thpercentile{scope="Read",name="Latency",} 126.934 +org_apache_cassandra_metrics_clientrequest_95thpercentile{scope="Write",name="Latency",} 105.778 +# HELP org_apache_cassandra_metrics_cache_count Attribute exposed for management org.apache.cassandra.metrics:name=Misses,type=Cache,attribute=Count +# TYPE org_apache_cassandra_metrics_cache_count untyped +org_apache_cassandra_metrics_cache_count{scope="KeyCache",name="Misses",} 194890.0 +org_apache_cassandra_metrics_cache_count{scope="KeyCache",name="Hits",} 1336427.0 +org_apache_cassandra_metrics_cache_count{scope="RowCache",name="Hits",} 0.0 +org_apache_cassandra_metrics_cache_count{scope="RowCache",name="Misses",} 0.0 +# HELP org_apache_cassandra_metrics_storage_count Attribute exposed for management org.apache.cassandra.metrics:name=Exceptions,type=Storage,attribute=Count +# TYPE org_apache_cassandra_metrics_storage_count untyped +org_apache_cassandra_metrics_storage_count{name="Exceptions",} 0.0 +org_apache_cassandra_metrics_storage_count{name="Load",} 8.58272986E8 +# HELP org_apache_cassandra_metrics_compaction_count Attribute exposed for management org.apache.cassandra.metrics:name=BytesCompacted,type=Compaction,attribute=Count +# TYPE org_apache_cassandra_metrics_compaction_count untyped +org_apache_cassandra_metrics_compaction_count{name="BytesCompacted",} 2532.0 +# HELP org_apache_cassandra_metrics_clientrequest_count Attribute exposed for management org.apache.cassandra.metrics:name=Timeouts,type=ClientRequest,attribute=Count +# TYPE org_apache_cassandra_metrics_clientrequest_count untyped +org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Timeouts",} 0.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Latency",} 333316.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Unavailables",} 0.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="TotalLatency",} 1.4253267E7 +org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Timeouts",} 0.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Failures",} 0.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Latency",} 331841.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Failures",} 0.0 +org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="TotalLatency",} 2.3688998E7 +org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Unavailables",} 0.0 +# HELP org_apache_cassandra_metrics_cache_value Attribute exposed for management org.apache.cassandra.metrics:name=Size,type=Cache,attribute=Value +# TYPE org_apache_cassandra_metrics_cache_value untyped +org_apache_cassandra_metrics_cache_value{scope="RowCache",name="Size",} 0.0 +org_apache_cassandra_metrics_cache_value{scope="KeyCache",name="Size",} 1.96559936E8 +org_apache_cassandra_metrics_cache_value{scope="RowCache",name="Capacity",} 0.0 +org_apache_cassandra_metrics_cache_value{scope="KeyCache",name="Capacity",} 9.437184E8 +# HELP org_apache_cassandra_metrics_clientrequest_75thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=75thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_75thpercentile untyped +org_apache_cassandra_metrics_clientrequest_75thpercentile{scope="Read",name="Latency",} 88.148 +org_apache_cassandra_metrics_clientrequest_75thpercentile{scope="Write",name="Latency",} 61.214 +# HELP org_apache_cassandra_metrics_clientrequest_999thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=999thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_999thpercentile untyped +org_apache_cassandra_metrics_clientrequest_999thpercentile{scope="Read",name="Latency",} 454.826 +org_apache_cassandra_metrics_clientrequest_999thpercentile{scope="Write",name="Latency",} 315.85200000000003 +# HELP org_apache_cassandra_metrics_clientrequest_99thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=99thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_99thpercentile untyped +org_apache_cassandra_metrics_clientrequest_99thpercentile{scope="Read",name="Latency",} 219.342 +org_apache_cassandra_metrics_clientrequest_99thpercentile{scope="Write",name="Latency",} 152.321 +# HELP org_apache_cassandra_metrics_threadpools_value Attribute exposed for management org.apache.cassandra.metrics:name=ActiveTasks,type=ThreadPools,attribute=Value +# TYPE org_apache_cassandra_metrics_threadpools_value untyped +org_apache_cassandra_metrics_threadpools_value{path="transport",scope="Native-Transport-Requests",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="HintsDispatcher",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="transport",scope="Native-Transport-Requests",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableFlushWriter",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CompactionExecutor",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="Sampler",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableReclaimMemory",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ViewBuildExecutor",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableReclaimMemory",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PerDiskMemtableFlushWriter_0",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtablePostFlush",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="SecondaryIndexManagement",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ValidationExecutor",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="Sampler",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableFlushWriter",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ValidationExecutor",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="HintsDispatcher",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="SecondaryIndexManagement",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="request",scope="MutationStage",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="request",scope="ReadStage",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="GossipStage",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CacheCleanupExecutor",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CompactionExecutor",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="request",scope="MutationStage",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PendingRangeCalculator",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CacheCleanupExecutor",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtablePostFlush",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ViewBuildExecutor",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="request",scope="ReadStage",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PerDiskMemtableFlushWriter_0",name="PendingTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PendingRangeCalculator",name="ActiveTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_value{path="internal",scope="GossipStage",name="ActiveTasks",} 0.0 +# HELP org_apache_cassandra_metrics_droppedmessage_count Attribute exposed for management org.apache.cassandra.metrics:name=Dropped,type=DroppedMessage,attribute=Count +# TYPE org_apache_cassandra_metrics_droppedmessage_count untyped +org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_PROPOSE_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PULL_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="_TEST_2",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_COMMIT_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PROPOSE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_VERSION_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PING_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="VALIDATION_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_SYN",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="HINT_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_PROMISE_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_SHUTDOWN",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PROPOSE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_SLICE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="REPAIR_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="_TRACE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PING_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="CLEANUP_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="REQUEST_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="ECHO_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="STATUS_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="REPLICATION_DONE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="REQUEST_RESPONSE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_CONSISTENT_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="_SAMPLE",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_VERSION_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="FAILURE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_ACK2",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SYNC_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="TRUNCATE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="UNUSED_CUSTOM_VERB",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="ECHO_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="FAILED_SESSION_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PREPARE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="STATUS_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="_TEST_1",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="HINT",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PUSH_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_CONSISTENT_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="REPLICATION_DONE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PULL_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_ACK",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_COMMIT_MSG",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SYNC_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="INTERNAL_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="TRUNCATE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_COMMIT_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="READ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="HINT_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PREPARE_REQ",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="VALIDATION_RSP",name="Dropped",} 0.0 +org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PUSH_RSP",name="Dropped",} 0.0 +# HELP org_apache_cassandra_metrics_clientrequest_98thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=98thPercentile +# TYPE org_apache_cassandra_metrics_clientrequest_98thpercentile untyped +org_apache_cassandra_metrics_clientrequest_98thpercentile{scope="Read",name="Latency",} 182.785 +org_apache_cassandra_metrics_clientrequest_98thpercentile{scope="Write",name="Latency",} 126.934 +# HELP org_apache_cassandra_metrics_threadpools_count Attribute exposed for management org.apache.cassandra.metrics:name=TotalBlockedTasks,type=ThreadPools,attribute=Count +# TYPE org_apache_cassandra_metrics_threadpools_count untyped +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="HintsDispatcher",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="request",scope="MutationStage",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="Sampler",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="GossipStage",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableFlushWriter",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="Sampler",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PerDiskMemtableFlushWriter_0",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableFlushWriter",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CacheCleanupExecutor",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ValidationExecutor",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableReclaimMemory",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="GossipStage",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtablePostFlush",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ViewBuildExecutor",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CacheCleanupExecutor",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PendingRangeCalculator",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableReclaimMemory",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="HintsDispatcher",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CompactionExecutor",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="transport",scope="Native-Transport-Requests",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="transport",scope="Native-Transport-Requests",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="SecondaryIndexManagement",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtablePostFlush",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="request",scope="MutationStage",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ValidationExecutor",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PerDiskMemtableFlushWriter_0",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="request",scope="ReadStage",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ViewBuildExecutor",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="SecondaryIndexManagement",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PendingRangeCalculator",name="CurrentlyBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CompactionExecutor",name="TotalBlockedTasks",} 0.0 +org_apache_cassandra_metrics_threadpools_count{path="request",scope="ReadStage",name="TotalBlockedTasks",} 0.0 +# HELP org_apache_cassandra_metrics_compaction_value Attribute exposed for management org.apache.cassandra.metrics:name=CompletedTasks,type=Compaction,attribute=Value +# TYPE org_apache_cassandra_metrics_compaction_value untyped +org_apache_cassandra_metrics_compaction_value{name="CompletedTasks",} 1078.0 +org_apache_cassandra_metrics_compaction_value{name="PendingTasks",} 0.0 +# HELP jmx_scrape_duration_seconds Time this JMX scrape took, in seconds. +# TYPE jmx_scrape_duration_seconds gauge +jmx_scrape_duration_seconds 0.102931999 +# HELP jmx_scrape_error Non-zero if this scrape failed. +# TYPE jmx_scrape_error gauge +jmx_scrape_error 0.0 +# HELP jmx_scrape_cached_beans Number of beans with their matching rule cached +# TYPE jmx_scrape_cached_beans gauge +jmx_scrape_cached_beans 0.0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 155.0 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.666810482687E9 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 213.0 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 100000.0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 5.105344512E9 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 3.464957952E9 +# HELP jvm_memory_objects_pending_finalization The number of objects waiting in the finalizer queue. +# TYPE jvm_memory_objects_pending_finalization gauge +jvm_memory_objects_pending_finalization 0.0 +# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{area="heap",} 1.134866288E9 +jvm_memory_bytes_used{area="nonheap",} 9.6565696E7 +# HELP jvm_memory_bytes_committed Committed (bytes) of a given JVM memory area. +# TYPE jvm_memory_bytes_committed gauge +jvm_memory_bytes_committed{area="heap",} 2.0447232E9 +jvm_memory_bytes_committed{area="nonheap",} 1.01838848E8 +# HELP jvm_memory_bytes_max Max (bytes) of a given JVM memory area. +# TYPE jvm_memory_bytes_max gauge +jvm_memory_bytes_max{area="heap",} 2.0447232E9 +jvm_memory_bytes_max{area="nonheap",} -1.0 +# HELP jvm_memory_bytes_init Initial bytes of a given JVM memory area. +# TYPE jvm_memory_bytes_init gauge +jvm_memory_bytes_init{area="heap",} 2.08666624E9 +jvm_memory_bytes_init{area="nonheap",} 7667712.0 +# HELP jvm_memory_pool_bytes_used Used bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{pool="CodeHeap 'non-nmethods'",} 1443712.0 +jvm_memory_pool_bytes_used{pool="Metaspace",} 5.386508E7 +jvm_memory_pool_bytes_used{pool="CodeHeap 'profiled nmethods'",} 2.2212992E7 +jvm_memory_pool_bytes_used{pool="Compressed Class Space",} 6640584.0 +jvm_memory_pool_bytes_used{pool="Par Eden Space",} 2.6869912E8 +jvm_memory_pool_bytes_used{pool="Par Survivor Space",} 1.1131824E7 +jvm_memory_pool_bytes_used{pool="CodeHeap 'non-profiled nmethods'",} 1.2403328E7 +jvm_memory_pool_bytes_used{pool="CMS Old Gen",} 8.55035344E8 +# HELP jvm_memory_pool_bytes_committed Committed bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_committed gauge +jvm_memory_pool_bytes_committed{pool="CodeHeap 'non-nmethods'",} 2555904.0 +jvm_memory_pool_bytes_committed{pool="Metaspace",} 5.574656E7 +jvm_memory_pool_bytes_committed{pool="CodeHeap 'profiled nmethods'",} 2.3724032E7 +jvm_memory_pool_bytes_committed{pool="Compressed Class Space",} 7360512.0 +jvm_memory_pool_bytes_committed{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_bytes_committed{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_bytes_committed{pool="CodeHeap 'non-profiled nmethods'",} 1.245184E7 +jvm_memory_pool_bytes_committed{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_memory_pool_bytes_max Max bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_max gauge +jvm_memory_pool_bytes_max{pool="CodeHeap 'non-nmethods'",} 5832704.0 +jvm_memory_pool_bytes_max{pool="Metaspace",} -1.0 +jvm_memory_pool_bytes_max{pool="CodeHeap 'profiled nmethods'",} 1.22912768E8 +jvm_memory_pool_bytes_max{pool="Compressed Class Space",} 1.073741824E9 +jvm_memory_pool_bytes_max{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_bytes_max{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_bytes_max{pool="CodeHeap 'non-profiled nmethods'",} 1.22912768E8 +jvm_memory_pool_bytes_max{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_memory_pool_bytes_init Initial bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_bytes_init gauge +jvm_memory_pool_bytes_init{pool="CodeHeap 'non-nmethods'",} 2555904.0 +jvm_memory_pool_bytes_init{pool="Metaspace",} 0.0 +jvm_memory_pool_bytes_init{pool="CodeHeap 'profiled nmethods'",} 2555904.0 +jvm_memory_pool_bytes_init{pool="Compressed Class Space",} 0.0 +jvm_memory_pool_bytes_init{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_bytes_init{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_bytes_init{pool="CodeHeap 'non-profiled nmethods'",} 2555904.0 +jvm_memory_pool_bytes_init{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_memory_pool_collection_used_bytes Used bytes after last collection of a given JVM memory pool. +# TYPE jvm_memory_pool_collection_used_bytes gauge +jvm_memory_pool_collection_used_bytes{pool="Par Eden Space",} 0.0 +jvm_memory_pool_collection_used_bytes{pool="Par Survivor Space",} 1.1131824E7 +jvm_memory_pool_collection_used_bytes{pool="CMS Old Gen",} 0.0 +# HELP jvm_memory_pool_collection_committed_bytes Committed after last collection bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_collection_committed_bytes gauge +jvm_memory_pool_collection_committed_bytes{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_collection_committed_bytes{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_collection_committed_bytes{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_memory_pool_collection_max_bytes Max bytes after last collection of a given JVM memory pool. +# TYPE jvm_memory_pool_collection_max_bytes gauge +jvm_memory_pool_collection_max_bytes{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_collection_max_bytes{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_collection_max_bytes{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_memory_pool_collection_init_bytes Initial after last collection bytes of a given JVM memory pool. +# TYPE jvm_memory_pool_collection_init_bytes gauge +jvm_memory_pool_collection_init_bytes{pool="Par Eden Space",} 3.3554432E8 +jvm_memory_pool_collection_init_bytes{pool="Par Survivor Space",} 4.194304E7 +jvm_memory_pool_collection_init_bytes{pool="CMS Old Gen",} 1.66723584E9 +# HELP jvm_info VM version info +# TYPE jvm_info gauge +jvm_info{runtime="OpenJDK Runtime Environment",vendor="Debian",version="11.0.16+8-post-Debian-1deb11u1",} 1.0 +# HELP jvm_buffer_pool_used_bytes Used bytes of a given JVM buffer pool. +# TYPE jvm_buffer_pool_used_bytes gauge +jvm_buffer_pool_used_bytes{pool="mapped",} 9.20360582E8 +jvm_buffer_pool_used_bytes{pool="direct",} 5.1679788E7 +# HELP jvm_buffer_pool_capacity_bytes Bytes capacity of a given JVM buffer pool. +# TYPE jvm_buffer_pool_capacity_bytes gauge +jvm_buffer_pool_capacity_bytes{pool="mapped",} 9.20360582E8 +jvm_buffer_pool_capacity_bytes{pool="direct",} 5.1679786E7 +# HELP jvm_buffer_pool_used_buffers Used buffers of a given JVM buffer pool. +# TYPE jvm_buffer_pool_used_buffers gauge +jvm_buffer_pool_used_buffers{pool="mapped",} 74.0 +jvm_buffer_pool_used_buffers{pool="direct",} 34.0 +# HELP jmx_exporter_build_info A metric with a constant '1' value labeled with the version of the JMX exporter. +# TYPE jmx_exporter_build_info gauge +jmx_exporter_build_info{version="0.17.2",name="jmx_prometheus_javaagent",} 1.0 +# HELP jmx_config_reload_failure_created Number of times configuration have failed to be reloaded. +# TYPE jmx_config_reload_failure_created gauge +jmx_config_reload_failure_created 1.666810482756E9 +# HELP jmx_config_reload_success_created Number of times configuration have successfully been reloaded. +# TYPE jmx_config_reload_success_created gauge +jmx_config_reload_success_created 1.666810482755E9 +# HELP jvm_memory_pool_allocated_bytes_created Total bytes allocated in a given JVM memory pool. Only updated after GC, not continuously. +# TYPE jvm_memory_pool_allocated_bytes_created gauge +jvm_memory_pool_allocated_bytes_created{pool="Par Survivor Space",} 1.666810483789E9 +jvm_memory_pool_allocated_bytes_created{pool="CMS Old Gen",} 1.666810484715E9 +jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'profiled nmethods'",} 1.666810483788E9 +jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'non-profiled nmethods'",} 1.666810483789E9 +jvm_memory_pool_allocated_bytes_created{pool="Compressed Class Space",} 1.666810483789E9 +jvm_memory_pool_allocated_bytes_created{pool="Metaspace",} 1.666810483789E9 +jvm_memory_pool_allocated_bytes_created{pool="Par Eden Space",} 1.666810483789E9 +jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'non-nmethods'",} 1.666810483789E9 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/chrony/README.md b/src/go/collectors/go.d.plugin/modules/chrony/README.md new file mode 120000 index 00000000000000..4a58f37332d1e3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/README.md @@ -0,0 +1 @@ +integrations/chrony.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/chrony/charts.go b/src/go/collectors/go.d.plugin/modules/chrony/charts.go new file mode 100644 index 00000000000000..92af5474448c79 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/charts.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +var charts = module.Charts{ + { + ID: "stratum", + Title: "Distance to the reference clock", + Units: "level", + Fam: "stratum", + Ctx: "chrony.stratum", + Dims: module.Dims{ + {ID: "stratum", Name: "stratum"}, + }, + }, + { + ID: "current_correction", + Title: "Current correction", + Units: "seconds", + Fam: "correction", + Ctx: "chrony.current_correction", + Dims: module.Dims{ + {ID: "current_correction", Div: scaleFactor}, + }, + }, + { + ID: "root_delay", + Title: "Network path delay to stratum-1", + Units: "seconds", + Fam: "root", + Ctx: "chrony.root_delay", + Dims: module.Dims{ + {ID: "root_delay", Div: scaleFactor}, + }, + }, + { + ID: "root_dispersion", + Title: "Dispersion accumulated back to stratum-1", + Units: "seconds", + Fam: "root", + Ctx: "chrony.root_dispersion", + Dims: module.Dims{ + {ID: "root_dispersion", Div: scaleFactor}, + }, + }, + { + ID: "last_offset", + Title: "Offset on the last clock update", + Units: "seconds", + Fam: "offset", + Ctx: "chrony.last_offset", + Dims: module.Dims{ + {ID: "last_offset", Name: "offset", Div: scaleFactor}, + }, + }, + { + ID: "rms_offset", + Title: "Long-term average of the offset value", + Units: "seconds", + Fam: "offset", + Ctx: "chrony.rms_offset", + Dims: module.Dims{ + {ID: "rms_offset", Name: "offset", Div: scaleFactor}, + }, + }, + { + ID: "frequency", + Title: "Frequency", + Units: "ppm", + Fam: "frequency", + Ctx: "chrony.frequency", + Dims: module.Dims{ + {ID: "frequency", Div: scaleFactor}, + }, + }, + { + ID: "residual_frequency", + Title: "Residual frequency", + Units: "ppm", + Fam: "frequency", + Ctx: "chrony.residual_frequency", + Dims: module.Dims{ + {ID: "residual_frequency", Div: scaleFactor}, + }, + }, + { + ID: "skew", + Title: "Skew", + Units: "ppm", + Fam: "frequency", + Ctx: "chrony.skew", + Dims: module.Dims{ + {ID: "skew", Div: scaleFactor}, + }, + }, + { + ID: "update_interval", + Title: "Interval between the last two clock updates", + Units: "seconds", + Fam: "updates", + Ctx: "chrony.update_interval", + Dims: module.Dims{ + {ID: "update_interval", Div: scaleFactor}, + }, + }, + { + ID: "ref_measurement_time", + Title: "Time since the last measurement", + Units: "seconds", + Fam: "updates", + Ctx: "chrony.ref_measurement_time", + Dims: module.Dims{ + {ID: "ref_measurement_time"}, + }, + }, + { + ID: "leap_status", + Title: "Leap status", + Units: "status", + Fam: "leap status", + Ctx: "chrony.leap_status", + Dims: module.Dims{ + {ID: "leap_status_normal", Name: "normal"}, + {ID: "leap_status_insert_second", Name: "insert_second"}, + {ID: "leap_status_delete_second", Name: "delete_second"}, + {ID: "leap_status_unsynchronised", Name: "unsynchronised"}, + }, + }, + { + ID: "activity", + Title: "Peers activity", + Units: "sources", + Fam: "activity", + Ctx: "chrony.activity", + Type: module.Stacked, + Dims: module.Dims{ + {ID: "online_sources", Name: "online"}, + {ID: "offline_sources", Name: "offline"}, + {ID: "burst_online_sources", Name: "burst_online"}, + {ID: "burst_offline_sources", Name: "burst_offline"}, + {ID: "unresolved_sources", Name: "unresolved"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/chrony.go b/src/go/collectors/go.d.plugin/modules/chrony/chrony.go new file mode 100644 index 00000000000000..9f12325b934e8b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/chrony.go @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + _ "embed" + "time" + + "github.com/facebook/time/ntp/chrony" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("chrony", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Chrony { + return &Chrony{ + Config: Config{ + Address: "127.0.0.1:323", + Timeout: web.Duration{Duration: time.Second}, + }, + charts: charts.Copy(), + newClient: newChronyClient, + } +} + +type Config struct { + Address string `yaml:"address"` + Timeout web.Duration `yaml:"timeout"` +} + +type ( + Chrony struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + newClient func(c Config) (chronyClient, error) + client chronyClient + } + chronyClient interface { + Tracking() (*chrony.ReplyTracking, error) + Activity() (*chrony.ReplyActivity, error) + Close() + } +) + +func (c *Chrony) Init() bool { + if err := c.validateConfig(); err != nil { + c.Errorf("config validation: %v", err) + return false + } + + return true +} + +func (c *Chrony) Check() bool { + return len(c.Collect()) > 0 +} + +func (c *Chrony) Charts() *module.Charts { + return c.charts +} + +func (c *Chrony) Collect() map[string]int64 { + mx, err := c.collect() + if err != nil { + c.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (c *Chrony) Cleanup() { + if c.client != nil { + c.client.Close() + c.client = nil + } +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/chrony_test.go b/src/go/collectors/go.d.plugin/modules/chrony/chrony_test.go new file mode 100644 index 00000000000000..a6568b234f1719 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/chrony_test.go @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "errors" + "net" + "testing" + "time" + + "github.com/facebook/time/ntp/chrony" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestChrony_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default config": { + config: New().Config, + }, + "unset 'address'": { + wantFail: true, + config: Config{ + Address: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := New() + c.Config = test.config + + if test.wantFail { + assert.False(t, c.Init()) + } else { + assert.True(t, c.Init()) + } + }) + } +} + +func TestChrony_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *Chrony + wantFail bool + }{ + "tracking: success, activity: success": { + wantFail: false, + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) }, + }, + "tracking: success, activity: fail": { + wantFail: false, + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) }, + }, + "tracking: fail, activity: success": { + wantFail: true, + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) }, + }, + "tracking: fail, activity: fail": { + wantFail: true, + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) }, + }, + "fail on creating client": { + wantFail: true, + prepare: func() *Chrony { return prepareChronyWithMock(nil) }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := test.prepare() + + require.True(t, c.Init()) + + if test.wantFail { + assert.False(t, c.Check()) + } else { + assert.True(t, c.Check()) + } + }) + } +} + +func TestChrony_Charts(t *testing.T) { + assert.Equal(t, len(charts), len(*New().Charts())) +} + +func TestChrony_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func(c *Chrony) + wantClose bool + }{ + "after New": { + wantClose: false, + prepare: func(c *Chrony) {}, + }, + "after Init": { + wantClose: false, + prepare: func(c *Chrony) { c.Init() }, + }, + "after Check": { + wantClose: true, + prepare: func(c *Chrony) { c.Init(); c.Check() }, + }, + "after Collect": { + wantClose: true, + prepare: func(c *Chrony) { c.Init(); c.Collect() }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := &mockClient{} + c := prepareChronyWithMock(m) + test.prepare(c) + + require.NotPanics(t, c.Cleanup) + + if test.wantClose { + assert.True(t, m.closeCalled) + } else { + assert.False(t, m.closeCalled) + } + }) + } +} + +func TestChrony_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *Chrony + expected map[string]int64 + }{ + "tracking: success, activity: success": { + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) }, + expected: map[string]int64{ + "burst_offline_sources": 3, + "burst_online_sources": 4, + "current_correction": 154872, + "frequency": 51051185607, + "last_offset": 3095, + "leap_status_delete_second": 0, + "leap_status_insert_second": 1, + "leap_status_normal": 0, + "leap_status_unsynchronised": 0, + "offline_sources": 2, + "online_sources": 8, + "ref_measurement_time": 63793323616, + "residual_frequency": -571789, + "rms_offset": 130089, + "root_delay": 59576179, + "root_dispersion": 1089275, + "skew": 41821926, + "stratum": 4, + "unresolved_sources": 1, + "update_interval": 1044219238281, + }, + }, + "tracking: success, activity: fail": { + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) }, + expected: map[string]int64{ + "current_correction": 154872, + "frequency": 51051185607, + "last_offset": 3095, + "leap_status_delete_second": 0, + "leap_status_insert_second": 1, + "leap_status_normal": 0, + "leap_status_unsynchronised": 0, + "ref_measurement_time": 63793323586, + "residual_frequency": -571789, + "rms_offset": 130089, + "root_delay": 59576179, + "root_dispersion": 1089275, + "skew": 41821926, + "stratum": 4, + "update_interval": 1044219238281, + }, + }, + "tracking: fail, activity: success": { + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) }, + expected: nil, + }, + "tracking: fail, activity: fail": { + prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) }, + expected: nil, + }, + "fail on creating client": { + prepare: func() *Chrony { return prepareChronyWithMock(nil) }, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := test.prepare() + + require.True(t, c.Init()) + _ = c.Check() + + collected := c.Collect() + copyRefMeasurementTime(collected, test.expected) + + assert.Equal(t, test.expected, collected) + }) + } +} + +func prepareChronyWithMock(m *mockClient) *Chrony { + c := New() + if m == nil { + c.newClient = func(_ Config) (chronyClient, error) { return nil, errors.New("mock.newClient error") } + } else { + c.newClient = func(_ Config) (chronyClient, error) { return m, nil } + } + return c +} + +type mockClient struct { + errOnTracking bool + errOnActivity bool + closeCalled bool +} + +func (m mockClient) Tracking() (*chrony.ReplyTracking, error) { + if m.errOnTracking { + return nil, errors.New("mockClient.Tracking call error") + } + reply := chrony.ReplyTracking{ + Tracking: chrony.Tracking{ + RefID: 2728380539, + IPAddr: net.IP("192.0.2.0"), + Stratum: 4, + LeapStatus: 1, + RefTime: time.Time{}, + CurrentCorrection: 0.00015487267228309065, + LastOffset: 3.0953951863921247e-06, + RMSOffset: 0.00013008920359425247, + FreqPPM: -51.051185607910156, + ResidFreqPPM: -0.0005717896274290979, + SkewPPM: 0.0418219268321991, + RootDelay: 0.05957617983222008, + RootDispersion: 0.0010892755817621946, + LastUpdateInterval: 1044.21923828125, + }, + } + return &reply, nil +} + +func (m mockClient) Activity() (*chrony.ReplyActivity, error) { + if m.errOnActivity { + return nil, errors.New("mockClient.Activity call error") + } + reply := chrony.ReplyActivity{ + Activity: chrony.Activity{ + Online: 8, + Offline: 2, + BurstOnline: 4, + BurstOffline: 3, + Unresolved: 1, + }, + } + return &reply, nil +} + +func (m *mockClient) Close() { + m.closeCalled = true +} + +func copyRefMeasurementTime(dst, src map[string]int64) { + if _, ok := dst["ref_measurement_time"]; !ok { + return + } + if _, ok := src["ref_measurement_time"]; !ok { + return + } + dst["ref_measurement_time"] = src["ref_measurement_time"] +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/client.go b/src/go/collectors/go.d.plugin/modules/chrony/client.go new file mode 100644 index 00000000000000..caa219f3bb7fa5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/client.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "fmt" + "net" + + "github.com/facebook/time/ntp/chrony" +) + +func newChronyClient(c Config) (chronyClient, error) { + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + if err != nil { + return nil, err + } + + client := &simpleClient{ + conn: conn, + client: &chrony.Client{Connection: conn}, + } + return client, nil +} + +type simpleClient struct { + conn net.Conn + client *chrony.Client +} + +func (sc *simpleClient) Tracking() (*chrony.ReplyTracking, error) { + reply, err := sc.client.Communicate(chrony.NewTrackingPacket()) + if err != nil { + return nil, err + } + + tracking, ok := reply.(*chrony.ReplyTracking) + if !ok { + return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyTracking{}, reply) + } + return tracking, nil +} + +func (sc *simpleClient) Activity() (*chrony.ReplyActivity, error) { + reply, err := sc.client.Communicate(chrony.NewActivityPacket()) + if err != nil { + return nil, err + } + + activity, ok := reply.(*chrony.ReplyActivity) + if !ok { + return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyActivity{}, reply) + } + return activity, nil +} + +func (sc *simpleClient) Close() { + if sc.conn != nil { + _ = sc.conn.Close() + sc.conn = nil + } +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/collect.go b/src/go/collectors/go.d.plugin/modules/chrony/collect.go new file mode 100644 index 00000000000000..06a9ecc7909c28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/collect.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "fmt" + "time" +) + +const scaleFactor = 1000000000 + +func (c *Chrony) collect() (map[string]int64, error) { + if c.client == nil { + client, err := c.newClient(c.Config) + if err != nil { + return nil, err + } + c.client = client + } + + mx := make(map[string]int64) + + if err := c.collectTracking(mx); err != nil { + return nil, err + } + if err := c.collectActivity(mx); err != nil { + return mx, err + } + + return mx, nil +} + +const ( + // https://github.com/mlichvar/chrony/blob/7daf34675a5a2487895c74d1578241ca91a4eb70/ntp.h#L70-L75 + leapStatusNormal = 0 + leapStatusInsertSecond = 1 + leapStatusDeleteSecond = 2 + leapStatusUnsynchronised = 3 +) + +func (c *Chrony) collectTracking(mx map[string]int64) error { + reply, err := c.client.Tracking() + if err != nil { + return fmt.Errorf("error on collecting tracking: %v", err) + } + + mx["stratum"] = int64(reply.Stratum) + mx["leap_status_normal"] = boolToInt(reply.LeapStatus == leapStatusNormal) + mx["leap_status_insert_second"] = boolToInt(reply.LeapStatus == leapStatusInsertSecond) + mx["leap_status_delete_second"] = boolToInt(reply.LeapStatus == leapStatusDeleteSecond) + mx["leap_status_unsynchronised"] = boolToInt(reply.LeapStatus == leapStatusUnsynchronised) + mx["root_delay"] = int64(reply.RootDelay * scaleFactor) + mx["root_dispersion"] = int64(reply.RootDispersion * scaleFactor) + mx["skew"] = int64(reply.SkewPPM * scaleFactor) + mx["last_offset"] = int64(reply.LastOffset * scaleFactor) + mx["rms_offset"] = int64(reply.RMSOffset * scaleFactor) + mx["update_interval"] = int64(reply.LastUpdateInterval * scaleFactor) + // handle chrony restarts + if reply.RefTime.Year() != 1970 { + mx["ref_measurement_time"] = time.Now().Unix() - reply.RefTime.Unix() + } + mx["residual_frequency"] = int64(reply.ResidFreqPPM * scaleFactor) + // https://github.com/mlichvar/chrony/blob/5b04f3ca902e5d10aa5948fb7587d30b43941049/client.c#L1706 + mx["current_correction"] = abs(int64(reply.CurrentCorrection * scaleFactor)) + mx["frequency"] = abs(int64(reply.FreqPPM * scaleFactor)) + + return nil +} + +func (c *Chrony) collectActivity(mx map[string]int64) error { + reply, err := c.client.Activity() + if err != nil { + return fmt.Errorf("error on collecting activity: %v", err) + } + + mx["online_sources"] = int64(reply.Online) + mx["offline_sources"] = int64(reply.Offline) + mx["burst_online_sources"] = int64(reply.BurstOnline) + mx["burst_offline_sources"] = int64(reply.BurstOffline) + mx["unresolved_sources"] = int64(reply.Unresolved) + + return nil +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +func abs(v int64) int64 { + if v < 0 { + return -v + } + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/config_schema.json b/src/go/collectors/go.d.plugin/modules/chrony/config_schema.json new file mode 100644 index 00000000000000..105adaa799abda --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/config_schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/chrony job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/init.go b/src/go/collectors/go.d.plugin/modules/chrony/init.go new file mode 100644 index 00000000000000..70c8916f2d16fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/init.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package chrony + +import ( + "errors" +) + +func (c Chrony) validateConfig() error { + if c.Address == "" { + return errors.New("empty 'address'") + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/chrony/integrations/chrony.md b/src/go/collectors/go.d.plugin/modules/chrony/integrations/chrony.md new file mode 100644 index 00000000000000..52aa92d8047b00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/integrations/chrony.md @@ -0,0 +1,187 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/chrony/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/chrony/metadata.yaml" +sidebar_label: "Chrony" +learn_status: "Published" +learn_rel_path: "Data Collection/System Clock and NTP" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Chrony + + +<img src="https://netdata.cloud/img/chrony.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: chrony + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the system's clock performance and peers activity status + +It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6. + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This collector discovers Chrony instance running on the local host and listening on port 323. +On startup, it tries to collect metrics from: + +- 127.0.0.1:323 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Chrony instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| chrony.stratum | stratum | level | +| chrony.current_correction | current_correction | seconds | +| chrony.root_delay | root_delay | seconds | +| chrony.root_dispersion | root_delay | seconds | +| chrony.last_offset | offset | seconds | +| chrony.rms_offset | offset | seconds | +| chrony.frequency | frequency | ppm | +| chrony.residual_frequency | residual_frequency | ppm | +| chrony.skew | skew | ppm | +| chrony.update_interval | update_interval | seconds | +| chrony.ref_measurement_time | ref_measurement_time | seconds | +| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status | +| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/chrony.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/chrony.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes | +| timeout | Connection timeout. Zero means no timeout. | 1 | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + address: 127.0.0.1:323 + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:323 + + - name: remote + address: 192.0.2.1:323 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m chrony + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/chrony/metadata.yaml b/src/go/collectors/go.d.plugin/modules/chrony/metadata.yaml new file mode 100644 index 00000000000000..18f9152e628f66 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/chrony/metadata.yaml @@ -0,0 +1,208 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-chrony + module_name: chrony + plugin_name: go.d.plugin + monitored_instance: + categories: + - data-collection.system-clock-and-ntp + icon_filename: chrony.jpg + name: Chrony + link: https://chrony.tuxfamily.org/ + alternative_monitored_instances: [] + keywords: [] + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + most_popular: false + overview: + data_collection: + metrics_description: This collector monitors the system's clock performance and peers activity status + method_description: It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + This collector discovers Chrony instance running on the local host and listening on port 323. + On startup, it tries to collect metrics from: + + - 127.0.0.1:323 + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/chrony.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address. The format is IP:PORT. + default_value: 127.0.0.1:323 + required: true + - name: timeout + description: Connection timeout. Zero means no timeout. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:323 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:323 + + - name: remote + address: 192.0.2.1:323 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: chrony.stratum + availability: [] + description: Distance to the reference clock + unit: level + chart_type: line + dimensions: + - name: stratum + - name: chrony.current_correction + availability: [] + description: Current correction + unit: seconds + chart_type: line + dimensions: + - name: current_correction + - name: chrony.root_delay + availability: [] + description: Network path delay to stratum-1 + unit: seconds + chart_type: line + dimensions: + - name: root_delay + - name: chrony.root_dispersion + availability: [] + description: Dispersion accumulated back to stratum-1 + unit: seconds + chart_type: line + dimensions: + - name: root_delay + - name: chrony.last_offset + availability: [] + description: Offset on the last clock update + unit: seconds + chart_type: line + dimensions: + - name: offset + - name: chrony.rms_offset + availability: [] + description: Long-term average of the offset value + unit: seconds + chart_type: line + dimensions: + - name: offset + - name: chrony.frequency + availability: [] + description: Frequency + unit: ppm + chart_type: line + dimensions: + - name: frequency + - name: chrony.residual_frequency + availability: [] + description: Residual frequency + unit: ppm + chart_type: line + dimensions: + - name: residual_frequency + - name: chrony.skew + availability: [] + description: Skew + unit: ppm + chart_type: line + dimensions: + - name: skew + - name: chrony.update_interval + availability: [] + description: Interval between the last two clock updates + unit: seconds + chart_type: line + dimensions: + - name: update_interval + - name: chrony.ref_measurement_time + availability: [] + description: Time since the last measurement + unit: seconds + chart_type: line + dimensions: + - name: ref_measurement_time + - name: chrony.leap_status + availability: [] + description: Leap status + unit: status + chart_type: line + dimensions: + - name: normal + - name: insert_second + - name: delete_second + - name: unsynchronised + - name: chrony.activity + availability: [] + description: Peers activity + unit: sources + chart_type: stacked + dimensions: + - name: online + - name: offline + - name: burst_online + - name: burst_offline + - name: unresolved diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/README.md b/src/go/collectors/go.d.plugin/modules/cockroachdb/README.md new file mode 120000 index 00000000000000..a8130f26245389 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/README.md @@ -0,0 +1 @@ +integrations/cockroachdb.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/charts.go b/src/go/collectors/go.d.plugin/modules/cockroachdb/charts.go new file mode 100644 index 00000000000000..20802e92850178 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/charts.go @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Vars = module.Vars +) + +var charts = Charts{ + chartProcessCPUCombinedPercent.Copy(), + chartProcessCPUPercent.Copy(), + chartProcessCPUUsage.Copy(), + chartProcessMemory.Copy(), + chartProcessFDUsage.Copy(), + chartProcessUptime.Copy(), + + chartHostDiskBandwidth.Copy(), + chartHostDiskOperations.Copy(), + chartHostDiskIOPS.Copy(), + chartHostNetworkBandwidth.Copy(), + chartHostNetworkPackets.Copy(), + + chartLiveNodes.Copy(), + chartHeartBeats.Copy(), + + chartCapacity.Copy(), + chartCapacityUsability.Copy(), + chartCapacityUsable.Copy(), + chartCapacityUsedPercentage.Copy(), + + chartSQLConnections.Copy(), + chartSQLTraffic.Copy(), + chartSQLStatementsTotal.Copy(), + chartSQLErrors.Copy(), + chartSQLStartedDDLStatements.Copy(), + chartSQLExecutedDDLStatements.Copy(), + chartSQLStartedDMLStatements.Copy(), + chartSQLExecutedDMLStatements.Copy(), + chartSQLStartedTCLStatements.Copy(), + chartSQLExecutedTCLStatements.Copy(), + chartSQLActiveDistQueries.Copy(), + chartSQLActiveFlowsForDistQueries.Copy(), + + chartUsedLiveData.Copy(), + chartLogicalData.Copy(), + chartLogicalDataCount.Copy(), + + chartKVTransactions.Copy(), + chartKVTransactionsRestarts.Copy(), + + chartRanges.Copy(), + chartRangesWithProblems.Copy(), + chartRangesEvents.Copy(), + chartRangesSnapshotEvents.Copy(), + + chartRocksDBReadAmplification.Copy(), + chartRocksDBTableOperations.Copy(), + chartRocksDBCacheUsage.Copy(), + chartRocksDBCacheOperations.Copy(), + chartRocksDBCacheHitRage.Copy(), + chartRocksDBSSTables.Copy(), + + chartReplicas.Copy(), + chartReplicasQuiescence.Copy(), + chartReplicasLeaders.Copy(), + chartReplicasLeaseHolder.Copy(), + + chartQueuesProcessingFailures.Copy(), + + chartRebalancingQueries.Copy(), + chartRebalancingWrites.Copy(), + + chartTimeSeriesWrittenSamples.Copy(), + chartTimeSeriesWriteErrors.Copy(), + chartTimeSeriesWrittenBytes.Copy(), + + chartSlowRequests.Copy(), + + chartGoroutines.Copy(), + chartGoCgoHeapMemory.Copy(), + chartCGoCalls.Copy(), + chartGCRuns.Copy(), + chartGCPauseTime.Copy(), +} + +// Process +var ( + chartProcessCPUCombinedPercent = Chart{ + ID: "process_cpu_time_combined_percentage", + Title: "Combined CPU Time Percentage, Normalized 0-1 by Number of Cores", + Units: "percentage", + Fam: "process", + Ctx: "cockroachdb.process_cpu_time_combined_percentage", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSysCPUCombinedPercentNormalized, Name: "used", Div: precision}, + }, + } + chartProcessCPUPercent = Chart{ + ID: "process_cpu_time_percentage", + Title: "CPU Time Percentage", + Units: "percentage", + Fam: "process", + Ctx: "cockroachdb.process_cpu_time_percentage", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSysCPUUserPercent, Name: "user", Div: precision}, + {ID: metricSysCPUSysPercent, Name: "sys", Div: precision}, + }, + } + chartProcessCPUUsage = Chart{ + ID: "process_cpu_time", + Title: "CPU Time", + Units: "ms", + Fam: "process", + Ctx: "cockroachdb.process_cpu_time", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSysCPUUserNs, Name: "user", Algo: module.Incremental, Div: 1e6}, + {ID: metricSysCPUSysNs, Name: "sys", Algo: module.Incremental, Div: 1e6}, + }, + } + chartProcessMemory = Chart{ + ID: "process_memory", + Title: "Memory Usage", + Units: "KiB", + Fam: "process", + Ctx: "cockroachdb.process_memory", + Dims: Dims{ + {ID: metricSysRSS, Name: "rss", Div: 1024}, + }, + } + chartProcessFDUsage = Chart{ + ID: "process_file_descriptors", + Title: "File Descriptors", + Units: "fd", + Fam: "process", + Ctx: "cockroachdb.process_file_descriptors", + Dims: Dims{ + {ID: metricSysFDOpen, Name: "open"}, + }, + Vars: Vars{ + {ID: metricSysFDSoftLimit}, + }, + } + chartProcessUptime = Chart{ + ID: "process_uptime", + Title: "Uptime", + Units: "seconds", + Fam: "process", + Ctx: "cockroachdb.process_uptime", + Dims: Dims{ + {ID: metricSysUptime, Name: "uptime"}, + }, + } +) + +// Host +// Host +var ( + chartHostDiskBandwidth = Chart{ + ID: "host_disk_bandwidth", + Title: "Host Disk Cumulative Bandwidth", + Units: "KiB", + Fam: "host", + Ctx: "cockroachdb.host_disk_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: metricSysHostDiskReadBytes, Name: "read", Div: 1024, Algo: module.Incremental}, + {ID: metricSysHostDiskWriteBytes, Name: "write", Div: -1024, Algo: module.Incremental}, + }, + } + chartHostDiskOperations = Chart{ + ID: "host_disk_operations", + Title: "Host Disk Cumulative Operations", + Units: "operations", + Fam: "host", + Ctx: "cockroachdb.host_disk_operations", + Dims: Dims{ + {ID: metricSysHostDiskReadCount, Name: "reads", Algo: module.Incremental}, + {ID: metricSysHostDiskWriteCount, Name: "writes", Mul: -1, Algo: module.Incremental}, + }, + } + chartHostDiskIOPS = Chart{ + ID: "host_disk_iops_in_progress", + Title: "Host Disk Cumulative IOPS In Progress", + Units: "iops", + Fam: "host", + Ctx: "cockroachdb.host_disk_iops_in_progress", + Dims: Dims{ + {ID: metricSysHostDiskIOPSInProgress, Name: "in progress"}, + }, + } + chartHostNetworkBandwidth = Chart{ + ID: "host_network_bandwidth", + Title: "Host Network Cumulative Bandwidth", + Units: "kilobits", + Fam: "host", + Ctx: "cockroachdb.host_network_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: metricSysHostNetRecvBytes, Name: "received", Div: 1000, Algo: module.Incremental}, + {ID: metricSysHostNetSendBytes, Name: "sent", Div: -1000, Algo: module.Incremental}, + }, + } + chartHostNetworkPackets = Chart{ + ID: "host_network_packets", + Title: "Host Network Cumulative Packets", + Units: "packets", + Fam: "host", + Ctx: "cockroachdb.host_network_packets", + Dims: Dims{ + {ID: metricSysHostNetRecvPackets, Name: "received", Algo: module.Incremental}, + {ID: metricSysHostNetSendPackets, Name: "sent", Mul: -1, Algo: module.Incremental}, + }, + } +) + +// Liveness +var ( + chartLiveNodes = Chart{ + ID: "live_nodes", + Title: "Live Nodes in the Cluster", + Units: "nodes", + Fam: "liveness", + Ctx: "cockroachdb.live_nodes", + Dims: Dims{ + {ID: metricLiveNodes, Name: "live nodes"}, + }, + } + chartHeartBeats = Chart{ + ID: "node_liveness_heartbeats", + Title: "Node Liveness Heartbeats", + Units: "heartbeats", + Fam: "liveness", + Ctx: "cockroachdb.node_liveness_heartbeats", + Type: module.Stacked, + Dims: Dims{ + {ID: metricHeartBeatSuccesses, Name: "successful", Algo: module.Incremental}, + {ID: metricHeartBeatFailures, Name: "failed", Algo: module.Incremental}, + }, + } +) + +// Capacity +var ( + chartCapacity = Chart{ + ID: "total_storage_capacity", + Title: "Total Storage Capacity", + Units: "KiB", + Fam: "capacity", + Ctx: "cockroachdb.total_storage_capacity", + Dims: Dims{ + {ID: metricCapacity, Name: "total", Div: 1024}, + }, + } + chartCapacityUsability = Chart{ + ID: "storage_capacity_usability", + Title: "Storage Capacity Usability", + Units: "KiB", + Fam: "capacity", + Ctx: "cockroachdb.storage_capacity_usability", + Type: module.Stacked, + Dims: Dims{ + {ID: metricCapacityUsable, Name: "usable", Div: 1024}, + {ID: metricCapacityUnusable, Name: "unusable", Div: 1024}, + }, + } + chartCapacityUsable = Chart{ + ID: "storage_usable_capacity", + Title: "Storage Usable Capacity", + Units: "KiB", + Fam: "capacity", + Ctx: "cockroachdb.storage_usable_capacity", + Type: module.Stacked, + Dims: Dims{ + {ID: metricCapacityAvailable, Name: "available", Div: 1024}, + {ID: metricCapacityUsed, Name: "used", Div: 1024}, + }, + } + chartCapacityUsedPercentage = Chart{ + ID: "storage_used_capacity_percentage", + Title: "Storage Used Capacity Utilization", + Units: "percentage", + Fam: "capacity", + Ctx: "cockroachdb.storage_used_capacity_percentage", + Dims: Dims{ + {ID: metricCapacityUsedPercentage, Name: "total", Div: precision}, + {ID: metricCapacityUsableUsedPercentage, Name: "usable", Div: precision}, + }, + } +) + +// SQL +var ( + chartSQLConnections = Chart{ + ID: "sql_connections", + Title: "Active SQL Connections", + Units: "connections", + Fam: "sql", + Ctx: "cockroachdb.sql_connections", + Dims: Dims{ + {ID: metricSQLConnections, Name: "active"}, + }, + } + chartSQLTraffic = Chart{ + ID: "sql_bandwidth", + Title: "SQL Bandwidth", + Units: "KiB", + Fam: "sql", + Ctx: "cockroachdb.sql_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: metricSQLBytesIn, Name: "received", Div: 1024, Algo: module.Incremental}, + {ID: metricSQLBytesOut, Name: "sent", Div: -1024, Algo: module.Incremental}, + }, + } + chartSQLStatementsTotal = Chart{ + ID: "sql_statements_total", + Title: "SQL Statements Total", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_statements_total", + Type: module.Area, + Dims: Dims{ + {ID: metricSQLQueryStartedCount, Name: "started", Algo: module.Incremental}, + {ID: metricSQLQueryCount, Name: "executed", Algo: module.Incremental}, + }, + } + chartSQLErrors = Chart{ + ID: "sql_errors", + Title: "SQL Statements and Transaction Errors", + Units: "errors", + Fam: "sql", + Ctx: "cockroachdb.sql_errors", + Dims: Dims{ + {ID: metricSQLFailureCount, Name: "statement", Algo: module.Incremental}, + {ID: metricSQLTXNAbortCount, Name: "transaction", Algo: module.Incremental}, + }, + } + chartSQLStartedDDLStatements = Chart{ + ID: "sql_started_ddl_statements", + Title: "SQL Started DDL Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_started_ddl_statements", + Dims: Dims{ + {ID: metricSQLDDLStartedCount, Name: "DDL"}, + }, + } + chartSQLExecutedDDLStatements = Chart{ + ID: "sql_executed_ddl_statements", + Title: "SQL Executed DDL Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_executed_ddl_statements", + Dims: Dims{ + {ID: metricSQLDDLCount, Name: "DDL"}, + }, + } + chartSQLStartedDMLStatements = Chart{ + ID: "sql_started_dml_statements", + Title: "SQL Started DML Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_started_dml_statements", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSQLSelectStartedCount, Name: "SELECT", Algo: module.Incremental}, + {ID: metricSQLUpdateStartedCount, Name: "UPDATE", Algo: module.Incremental}, + {ID: metricSQLInsertStartedCount, Name: "INSERT", Algo: module.Incremental}, + {ID: metricSQLDeleteStartedCount, Name: "DELETE", Algo: module.Incremental}, + }, + } + chartSQLExecutedDMLStatements = Chart{ + ID: "sql_executed_dml_statements", + Title: "SQL Executed DML Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_executed_dml_statements", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSQLSelectCount, Name: "SELECT", Algo: module.Incremental}, + {ID: metricSQLUpdateCount, Name: "UPDATE", Algo: module.Incremental}, + {ID: metricSQLInsertCount, Name: "INSERT", Algo: module.Incremental}, + {ID: metricSQLDeleteCount, Name: "DELETE", Algo: module.Incremental}, + }, + } + chartSQLStartedTCLStatements = Chart{ + ID: "sql_started_tcl_statements", + Title: "SQL Started TCL Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_started_tcl_statements", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSQLTXNBeginStartedCount, Name: "BEGIN", Algo: module.Incremental}, + {ID: metricSQLTXNCommitStartedCount, Name: "COMMIT", Algo: module.Incremental}, + {ID: metricSQLTXNRollbackStartedCount, Name: "ROLLBACK", Algo: module.Incremental}, + {ID: metricSQLSavepointStartedCount, Name: "SAVEPOINT", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointStartedCount, Name: "SAVEPOINT cockroach_restart", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointReleaseStartedCount, Name: "RELEASE SAVEPOINT cockroach_restart", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointRollbackStartedCount, Name: "ROLLBACK TO SAVEPOINT cockroach_restart", Algo: module.Incremental}, + }, + } + chartSQLExecutedTCLStatements = Chart{ + ID: "sql_executed_tcl_statements", + Title: "SQL Executed TCL Statements", + Units: "statements", + Fam: "sql", + Ctx: "cockroachdb.sql_executed_tcl_statements", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSQLTXNBeginCount, Name: "BEGIN", Algo: module.Incremental}, + {ID: metricSQLTXNCommitCount, Name: "COMMIT", Algo: module.Incremental}, + {ID: metricSQLTXNRollbackCount, Name: "ROLLBACK", Algo: module.Incremental}, + {ID: metricSQLSavepointCount, Name: "SAVEPOINT", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointCount, Name: "SAVEPOINT cockroach_restart", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointReleaseCount, Name: "RELEASE SAVEPOINT cockroach_restart", Algo: module.Incremental}, + {ID: metricSQLRestartSavepointRollbackCount, Name: "ROLLBACK TO SAVEPOINT cockroach_restart", Algo: module.Incremental}, + }, + } + chartSQLActiveDistQueries = Chart{ + ID: "sql_active_distributed_queries", + Title: "Active Distributed SQL Queries", + Units: "queries", + Fam: "sql", + Ctx: "cockroachdb.sql_active_distributed_queries", + Dims: Dims{ + {ID: metricSQLDistSQLQueriesActive, Name: "active"}, + }, + } + chartSQLActiveFlowsForDistQueries = Chart{ + ID: "sql_distributed_flows", + Title: "Distributed SQL Flows", + Units: "flows", + Fam: "sql", + Ctx: "cockroachdb.sql_distributed_flows", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSQLDistSQLFlowsActive, Name: "active"}, + {ID: metricSQLDistSQLFlowsQueued, Name: "queued"}, + }, + } +) + +// Storage +var ( + chartUsedLiveData = Chart{ + ID: "live_bytes", + Title: "Used Live Data", + Units: "KiB", + Fam: "storage", + Ctx: "cockroachdb.live_bytes", + Dims: Dims{ + {ID: metricLiveBytes, Name: "applications", Div: 1024}, + {ID: metricSysBytes, Name: "system", Div: 1024}, + }, + } + chartLogicalData = Chart{ + ID: "logical_data", + Title: "Logical Data", + Units: "KiB", + Fam: "storage", + Ctx: "cockroachdb.logical_data", + Type: module.Stacked, + Dims: Dims{ + {ID: metricKeyBytes, Name: "keys", Div: 1024}, + {ID: metricValBytes, Name: "values", Div: 1024}, + }, + } + chartLogicalDataCount = Chart{ + ID: "logical_data_count", + Title: "Logical Data Count", + Units: "num", + Fam: "storage", + Ctx: "cockroachdb.logical_data_count", + Type: module.Stacked, + Dims: Dims{ + {ID: metricKeyCount, Name: "keys"}, + {ID: metricValCount, Name: "values"}, + }, + } +) + +// KV Transactions +var ( + chartKVTransactions = Chart{ + ID: "kv_transactions", + Title: "KV Transactions", + Units: "transactions", + Fam: "kv transactions", + Ctx: "cockroachdb.kv_transactions", + Type: module.Area, + Dims: Dims{ + {ID: metricTxnCommits, Name: "committed", Algo: module.Incremental}, + {ID: metricTxnCommits1PC, Name: "fast-path_committed", Algo: module.Incremental}, + {ID: metricTxnAborts, Name: "aborted", Algo: module.Incremental}, + }, + } + chartKVTransactionsRestarts = Chart{ + ID: "kv_transaction_restarts", + Title: "KV Transaction Restarts", + Units: "restarts", + Fam: "kv transactions", + Ctx: "cockroachdb.kv_transaction_restarts", + Type: module.Stacked, + Dims: Dims{ + {ID: metricTxnRestartsWriteTooOld, Name: "write too old", Algo: module.Incremental}, + {ID: metricTxnRestartsWriteTooOldMulti, Name: "write too old (multiple)", Algo: module.Incremental}, + {ID: metricTxnRestartsSerializable, Name: "forwarded timestamp (iso=serializable)", Algo: module.Incremental}, + {ID: metricTxnRestartsPossibleReplay, Name: "possible reply", Algo: module.Incremental}, + {ID: metricTxnRestartsAsyncWriteFailure, Name: "async consensus failure", Algo: module.Incremental}, + {ID: metricTxnRestartsReadWithInUncertainty, Name: "read within uncertainty interval", Algo: module.Incremental}, + {ID: metricTxnRestartsTxnAborted, Name: "aborted", Algo: module.Incremental}, + {ID: metricTxnRestartsTxnPush, Name: "push failure", Algo: module.Incremental}, + {ID: metricTxnRestartsUnknown, Name: "unknown", Algo: module.Incremental}, + }, + } +) + +// Ranges +var ( + chartRanges = Chart{ + ID: "ranges", + Title: "Ranges", + Units: "ranges", + Fam: "ranges", + Ctx: "cockroachdb.ranges", + Dims: Dims{ + {ID: metricRanges, Name: "ranges"}, + }, + } + chartRangesWithProblems = Chart{ + ID: "ranges_replication_problem", + Title: "Ranges Replication Problems", + Units: "ranges", + Fam: "ranges", + Ctx: "cockroachdb.ranges_replication_problem", + Type: module.Stacked, + Dims: Dims{ + {ID: metricRangesUnavailable, Name: "unavailable"}, + {ID: metricRangesUnderReplicated, Name: "under_replicated"}, + {ID: metricRangesOverReplicated, Name: "over_replicated"}, + }, + } + chartRangesEvents = Chart{ + ID: "range_events", + Title: "Range Events", + Units: "events", + Fam: "ranges", + Ctx: "cockroachdb.range_events", + Type: module.Stacked, + Dims: Dims{ + {ID: metricRangeSplits, Name: "split", Algo: module.Incremental}, + {ID: metricRangeAdds, Name: "add", Algo: module.Incremental}, + {ID: metricRangeRemoves, Name: "remove", Algo: module.Incremental}, + {ID: metricRangeMerges, Name: "merge", Algo: module.Incremental}, + }, + } + chartRangesSnapshotEvents = Chart{ + ID: "range_snapshot_events", + Title: "Range Snapshot Events", + Units: "events", + Fam: "ranges", + Ctx: "cockroachdb.range_snapshot_events", + Type: module.Stacked, + Dims: Dims{ + {ID: metricRangeSnapshotsGenerated, Name: "generated", Algo: module.Incremental}, + {ID: metricRangeSnapshotsNormalApplied, Name: "applied (raft-initiated)", Algo: module.Incremental}, + {ID: metricRangeSnapshotsLearnerApplied, Name: "applied (learner)", Algo: module.Incremental}, + {ID: metricRangeSnapshotsPreemptiveApplied, Name: "applied (preemptive)", Algo: module.Incremental}, + }, + } +) + +// RocksDB +var ( + chartRocksDBReadAmplification = Chart{ + ID: "rocksdb_read_amplification", + Title: "RocksDB Read Amplification", + Units: "reads/query", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_read_amplification", + Dims: Dims{ + {ID: metricRocksDBReadAmplification, Name: "reads"}, + }, + } + chartRocksDBTableOperations = Chart{ + ID: "rocksdb_table_operations", + Title: "RocksDB Table Operations", + Units: "operations", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_table_operations", + Dims: Dims{ + {ID: metricRocksDBCompactions, Name: "compactions", Algo: module.Incremental}, + {ID: metricRocksDBFlushes, Name: "flushes", Algo: module.Incremental}, + }, + } + chartRocksDBCacheUsage = Chart{ + ID: "rocksdb_cache_usage", + Title: "RocksDB Block Cache Usage", + Units: "KiB", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_cache_usage", + Type: module.Area, + Dims: Dims{ + {ID: metricRocksDBBlockCacheUsage, Name: "used", Div: 1024}, + }, + } + chartRocksDBCacheOperations = Chart{ + ID: "rocksdb_cache_operations", + Title: "RocksDB Block Cache Operations", + Units: "operations", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_cache_operations", + Type: module.Stacked, + Dims: Dims{ + {ID: metricRocksDBBlockCacheHits, Name: "hits", Algo: module.Incremental}, + {ID: metricRocksDBBlockCacheMisses, Name: "misses", Algo: module.Incremental}, + }, + } + chartRocksDBCacheHitRage = Chart{ + ID: "rocksdb_cache_hit_rate", + Title: "RocksDB Block Cache Hit Rate", + Units: "percentage", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_cache_hit_rate", + Type: module.Area, + Dims: Dims{ + {ID: metricRocksDBBlockCacheHitRate, Name: "hit rate"}, + }, + } + chartRocksDBSSTables = Chart{ + ID: "rocksdb_sstables", + Title: "RocksDB SSTables", + Units: "sstables", + Fam: "rocksdb", + Ctx: "cockroachdb.rocksdb_sstables", + Dims: Dims{ + {ID: metricRocksDBNumSSTables, Name: "sstables"}, + }, + } +) + +// Replicas +var ( + chartReplicas = Chart{ + ID: "replicas", + Title: "Number of Replicas", + Units: "replicas", + Fam: "replication", + Ctx: "cockroachdb.replicas", + Dims: Dims{ + {ID: metricReplicas, Name: "replicas"}, + }, + } + chartReplicasQuiescence = Chart{ + ID: "replicas_quiescence", + Title: "Replicas Quiescence", + Units: "replicas", + Fam: "replication", + Ctx: "cockroachdb.replicas_quiescence", + Type: module.Stacked, + Dims: Dims{ + {ID: metricReplicasQuiescent, Name: "quiescent"}, + {ID: metricReplicasActive, Name: "active"}, + }, + } + chartReplicasLeaders = Chart{ + ID: "replicas_leaders", + Title: "Number of Raft Leaders", + Units: "replicas", + Fam: "replication", + Ctx: "cockroachdb.replicas_leaders", + Type: module.Area, + Dims: Dims{ + {ID: metricReplicasLeaders, Name: "leaders"}, + {ID: metricReplicasLeadersNotLeaseholders, Name: "not leaseholders"}, + }, + } + chartReplicasLeaseHolder = Chart{ + ID: "replicas_leaseholders", + Title: "Number of Leaseholders", + Units: "leaseholders", + Fam: "replication", + Ctx: "cockroachdb.replicas_leaseholders", + Dims: Dims{ + {ID: metricReplicasLeaseholders, Name: "leaseholders"}, + }, + } +) + +// Queues +var ( + chartQueuesProcessingFailures = Chart{ + ID: "queue_processing_failures", + Title: "Queues Processing Failures", + Units: "failures", + Fam: "queues", + Ctx: "cockroachdb.queue_processing_failures", + Type: module.Stacked, + Dims: Dims{ + {ID: metricQueueGCProcessFailure, Name: "gc", Algo: module.Incremental}, + {ID: metricQueueReplicaGCProcessFailure, Name: "replica gc", Algo: module.Incremental}, + {ID: metricQueueReplicateProcessFailure, Name: "replication", Algo: module.Incremental}, + {ID: metricQueueSplitProcessFailure, Name: "split", Algo: module.Incremental}, + {ID: metricQueueConsistencyProcessFailure, Name: "consistency", Algo: module.Incremental}, + {ID: metricQueueRaftLogProcessFailure, Name: "raft log", Algo: module.Incremental}, + {ID: metricQueueRaftSnapshotProcessFailure, Name: "raft snapshot", Algo: module.Incremental}, + {ID: metricQueueTSMaintenanceProcessFailure, Name: "time series maintenance", Algo: module.Incremental}, + }, + } +) + +// Rebalancing +var ( + chartRebalancingQueries = Chart{ + ID: "rebalancing_queries", + Title: "Rebalancing Average Queries", + Units: "queries/s", + Fam: "rebalancing", + Ctx: "cockroachdb.rebalancing_queries", + Dims: Dims{ + {ID: metricRebalancingQueriesPerSecond, Name: "avg", Div: precision}, + }, + } + chartRebalancingWrites = Chart{ + ID: "rebalancing_writes", + Title: "Rebalancing Average Writes", + Units: "writes/s", + Fam: "rebalancing", + Ctx: "cockroachdb.rebalancing_writes", + Dims: Dims{ + {ID: metricRebalancingWritesPerSecond, Name: "avg", Div: precision}, + }, + } +) + +// Time Series +var ( + chartTimeSeriesWrittenSamples = Chart{ + ID: "timeseries_samples", + Title: "Time Series Written Samples", + Units: "samples", + Fam: "time series", + Ctx: "cockroachdb.timeseries_samples", + Dims: Dims{ + {ID: metricTimeSeriesWriteSamples, Name: "written", Algo: module.Incremental}, + }, + } + chartTimeSeriesWriteErrors = Chart{ + ID: "timeseries_write_errors", + Title: "Time Series Write Errors", + Units: "errors", + Fam: "time series", + Ctx: "cockroachdb.timeseries_write_errors", + Dims: Dims{ + {ID: metricTimeSeriesWriteErrors, Name: "write", Algo: module.Incremental}, + }, + } + chartTimeSeriesWrittenBytes = Chart{ + ID: "timeseries_write_bytes", + Title: "Time Series Bytes Written", + Units: "KiB", + Fam: "time series", + Ctx: "cockroachdb.timeseries_write_bytes", + Dims: Dims{ + {ID: metricTimeSeriesWriteBytes, Name: "written", Algo: module.Incremental}, + }, + } +) + +// Slow Requests +var ( + chartSlowRequests = Chart{ + ID: "slow_requests", + Title: "Slow Requests", + Units: "requests", + Fam: "slow requests", + Ctx: "cockroachdb.slow_requests", + Type: module.Stacked, + Dims: Dims{ + {ID: metricRequestsSlowLatch, Name: "acquiring latches"}, + {ID: metricRequestsSlowLease, Name: "acquiring lease"}, + {ID: metricRequestsSlowRaft, Name: "in raft"}, + }, + } +) + +// Go/Cgo +var ( + chartGoCgoHeapMemory = Chart{ + ID: "code_heap_memory_usage", + Title: "Heap Memory Usage", + Units: "KiB", + Fam: "go/cgo", + Ctx: "cockroachdb.code_heap_memory_usage", + Type: module.Stacked, + Dims: Dims{ + {ID: metricSysGoAllocBytes, Name: "go", Div: 1024}, + {ID: metricSysCGoAllocBytes, Name: "cgo", Div: 1024}, + }, + } + chartGoroutines = Chart{ + ID: "goroutines_count", + Title: "Number of Goroutines", + Units: "goroutines", + Fam: "go/cgo", + Ctx: "cockroachdb.goroutines", + Dims: Dims{ + {ID: metricSysGoroutines, Name: "goroutines"}, + }, + } + chartGCRuns = Chart{ + ID: "gc_count", + Title: "GC Runs", + Units: "invokes", + Fam: "go/cgo", + Ctx: "cockroachdb.gc_count", + Dims: Dims{ + {ID: metricSysGCCount, Name: "gc", Algo: module.Incremental}, + }, + } + chartGCPauseTime = Chart{ + ID: "gc_pause", + Title: "GC Pause Time", + Units: "us", + Fam: "go/cgo", + Ctx: "cockroachdb.gc_pause", + Dims: Dims{ + {ID: metricSysGCPauseNs, Name: "pause", Algo: module.Incremental, Div: 1e3}, + }, + } + chartCGoCalls = Chart{ + ID: "cgo_calls", + Title: "Cgo Calls", + Units: "calls", + Fam: "go/cgo", + Ctx: "cockroachdb.cgo_calls", + Dims: Dims{ + {ID: metricSysCGoCalls, Name: "cgo", Algo: module.Incremental}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb.go b/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb.go new file mode 100644 index 00000000000000..0a862f97eae634 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +// DefaultMetricsSampleInterval hard coded to 10 +// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58 +const cockroachDBSamplingInterval = 10 + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("cockroachdb", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: cockroachDBSamplingInterval, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *CockroachDB { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/_status/vars", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + + return &CockroachDB{ + Config: config, + charts: charts.Copy(), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every"` + } + + CockroachDB struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *Charts + } +) + +func (c *CockroachDB) validateConfig() error { + if c.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (c *CockroachDB) initClient() error { + client, err := web.NewHTTPClient(c.Client) + if err != nil { + return err + } + + c.prom = prometheus.New(client, c.Request) + return nil +} + +func (c *CockroachDB) Init() bool { + if err := c.validateConfig(); err != nil { + c.Errorf("error on validating config: %v", err) + return false + } + if err := c.initClient(); err != nil { + c.Errorf("error on initializing client: %v", err) + return false + } + if c.UpdateEvery < cockroachDBSamplingInterval { + c.Warningf("'update_every'(%d) is lower then CockroachDB default sampling interval (%d)", + c.UpdateEvery, cockroachDBSamplingInterval) + } + return true +} + +func (c *CockroachDB) Check() bool { + return len(c.Collect()) > 0 +} + +func (c *CockroachDB) Charts() *Charts { + return c.charts +} + +func (c *CockroachDB) Collect() map[string]int64 { + mx, err := c.collect() + if err != nil { + c.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (CockroachDB) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb_test.go b/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb_test.go new file mode 100644 index 00000000000000..88c3077165bb86 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/cockroachdb_test.go @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + metricsData, _ = os.ReadFile("testdata/metrics.txt") + wrongMetricsData, _ = os.ReadFile("testdata/non_cockroachdb.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, metricsData) + assert.NotNil(t, wrongMetricsData) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestCockroachDB_Init(t *testing.T) { + cdb := prepareCockroachDB() + + assert.True(t, cdb.Init()) +} + +func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) { + cdb := prepareCockroachDB() + cdb.URL = "" + + assert.False(t, cdb.Init()) +} + +func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { + cdb := prepareCockroachDB() + cdb.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, cdb.Init()) +} + +func TestCockroachDB_Check(t *testing.T) { + cdb, srv := prepareClientServer(t) + defer srv.Close() + + assert.True(t, cdb.Check()) +} + +func TestCockroachDB_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { + cdb := New() + cdb.URL = "http://127.0.0.1:38001/metrics" + require.True(t, cdb.Init()) + + assert.False(t, cdb.Check()) +} + +func TestCockroachDB_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestCockroachDB_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestCockroachDB_Collect(t *testing.T) { + cdb, srv := prepareClientServer(t) + defer srv.Close() + + expected := map[string]int64{ + "capacity": 64202351837184, + "capacity_available": 40402062147584, + "capacity_unusable": 23800157791684, + "capacity_usable": 40402194045500, + "capacity_usable_used_percent": 0, + "capacity_used": 131897916, + "capacity_used_percent": 37070, + "keybytes": 6730852, + "keycount": 119307, + "livebytes": 81979227, + "liveness_heartbeatfailures": 2, + "liveness_heartbeatsuccesses": 2720, + "liveness_livenodes": 3, + "queue_consistency_process_failure": 0, + "queue_gc_process_failure": 0, + "queue_raftlog_process_failure": 0, + "queue_raftsnapshot_process_failure": 0, + "queue_replicagc_process_failure": 0, + "queue_replicate_process_failure": 0, + "queue_split_process_failure": 0, + "queue_tsmaintenance_process_failure": 0, + "range_adds": 0, + "range_merges": 0, + "range_removes": 0, + "range_snapshots_generated": 0, + "range_snapshots_learner_applied": 0, + "range_snapshots_normal_applied": 0, + "range_snapshots_preemptive_applied": 0, + "range_splits": 0, + "ranges": 34, + "ranges_overreplicated": 0, + "ranges_unavailable": 0, + "ranges_underreplicated": 0, + "rebalancing_queriespersecond": 801, + "rebalancing_writespersecond": 213023, + "replicas": 34, + "replicas_active": 0, + "replicas_leaders": 7, + "replicas_leaders_not_leaseholders": 0, + "replicas_leaseholders": 7, + "replicas_quiescent": 34, + "requests_slow_latch": 0, + "requests_slow_lease": 0, + "requests_slow_raft": 0, + "rocksdb_block_cache_hit_rate": 92104, + "rocksdb_block_cache_hits": 94825, + "rocksdb_block_cache_misses": 8129, + "rocksdb_block_cache_usage": 39397184, + "rocksdb_compactions": 7, + "rocksdb_flushes": 13, + "rocksdb_num_sstables": 8, + "rocksdb_read_amplification": 1, + "sql_bytesin": 0, + "sql_bytesout": 0, + "sql_conns": 0, + "sql_ddl_count": 0, + "sql_ddl_started_count": 0, + "sql_delete_count": 0, + "sql_delete_started_count": 0, + "sql_distsql_flows_active": 0, + "sql_distsql_flows_queued": 0, + "sql_distsql_queries_active": 0, + "sql_failure_count": 0, + "sql_insert_count": 0, + "sql_insert_started_count": 0, + "sql_misc_count": 0, + "sql_misc_started_count": 0, + "sql_query_count": 0, + "sql_query_started_count": 0, + "sql_restart_savepoint_count": 0, + "sql_restart_savepoint_release_count": 0, + "sql_restart_savepoint_release_started_count": 0, + "sql_restart_savepoint_rollback_count": 0, + "sql_restart_savepoint_rollback_started_count": 0, + "sql_restart_savepoint_started_count": 0, + "sql_savepoint_count": 0, + "sql_savepoint_started_count": 0, + "sql_select_count": 0, + "sql_select_started_count": 0, + "sql_txn_abort_count": 0, + "sql_txn_begin_count": 0, + "sql_txn_begin_started_count": 0, + "sql_txn_commit_count": 0, + "sql_txn_commit_started_count": 0, + "sql_txn_rollback_count": 0, + "sql_txn_rollback_started_count": 0, + "sql_update_count": 0, + "sql_update_started_count": 0, + "sys_cgo_allocbytes": 63363512, + "sys_cgocalls": 577778, + "sys_cpu_combined_percent_normalized": 851, + "sys_cpu_sys_ns": 154420000000, + "sys_cpu_sys_percent": 1403, + "sys_cpu_user_ns": 227620000000, + "sys_cpu_user_percent": 2004, + "sys_fd_open": 47, + "sys_fd_softlimit": 1048576, + "sys_gc_count": 279, + "sys_gc_pause_ns": 60700450, + "sys_go_allocbytes": 106576224, + "sys_goroutines": 235, + "sys_host_disk_iopsinprogress": 0, + "sys_host_disk_read_bytes": 43319296, + "sys_host_disk_read_count": 1176, + "sys_host_disk_write_bytes": 942080, + "sys_host_disk_write_count": 106, + "sys_host_net_recv_bytes": 234392325, + "sys_host_net_recv_packets": 593876, + "sys_host_net_send_bytes": 461746036, + "sys_host_net_send_packets": 644128, + "sys_rss": 314691584, + "sys_uptime": 12224, + "sysbytes": 13327, + "timeseries_write_bytes": 82810041, + "timeseries_write_errors": 0, + "timeseries_write_samples": 845784, + "txn_aborts": 1, + "txn_commits": 7472, + "txn_commits1PC": 3206, + "txn_restarts_asyncwritefailure": 0, + "txn_restarts_possiblereplay": 0, + "txn_restarts_readwithinuncertainty": 0, + "txn_restarts_serializable": 0, + "txn_restarts_txnaborted": 0, + "txn_restarts_txnpush": 0, + "txn_restarts_unknown": 0, + "txn_restarts_writetooold": 0, + "txn_restarts_writetoooldmulti": 0, + "valbytes": 75527718, + "valcount": 124081, + } + + collected := cdb.Collect() + assert.Equal(t, expected, collected) + testCharts(t, cdb, collected) +} + +func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) { + cdb, srv := prepareClientServerNotCockroachDBMetricResponse(t) + defer srv.Close() + + assert.Nil(t, cdb.Collect()) +} + +func TestCockroachDB_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { + cdb := prepareCockroachDB() + require.True(t, cdb.Init()) + + assert.Nil(t, cdb.Collect()) +} + +func TestCockroachDB_Collect_ReturnsNilIfReceiveInvalidResponse(t *testing.T) { + cdb, ts := prepareClientServerInvalidDataResponse(t) + defer ts.Close() + + assert.Nil(t, cdb.Collect()) +} + +func TestCockroachDB_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) { + cdb, ts := prepareClientServerResponse404(t) + defer ts.Close() + + assert.Nil(t, cdb.Collect()) +} + +func testCharts(t *testing.T, cdb *CockroachDB, collected map[string]int64) { + ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, c *CockroachDB, collected map[string]int64) { + for _, chart := range *c.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareCockroachDB() *CockroachDB { + cdb := New() + cdb.URL = "http://127.0.0.1:38001/metrics" + return cdb +} + +func prepareClientServer(t *testing.T) (*CockroachDB, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsData) + })) + + cdb := New() + cdb.URL = ts.URL + require.True(t, cdb.Init()) + + return cdb, ts +} + +func prepareClientServerNotCockroachDBMetricResponse(t *testing.T) (*CockroachDB, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(wrongMetricsData) + })) + + cdb := New() + cdb.URL = ts.URL + require.True(t, cdb.Init()) + + return cdb, ts +} + +func prepareClientServerInvalidDataResponse(t *testing.T) (*CockroachDB, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + cdb := New() + cdb.URL = ts.URL + require.True(t, cdb.Init()) + + return cdb, ts +} + +func prepareClientServerResponse404(t *testing.T) (*CockroachDB, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + cdb := New() + cdb.URL = ts.URL + require.True(t, cdb.Init()) + return cdb, ts +} diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/collect.go b/src/go/collectors/go.d.plugin/modules/cockroachdb/collect.go new file mode 100644 index 00000000000000..49c8bb78c124d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/collect.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func validCockroachDBMetrics(scraped prometheus.Series) bool { + return scraped.FindByName("sql_restart_savepoint_count_internal").Len() > 0 +} + +func (c *CockroachDB) collect() (map[string]int64, error) { + scraped, err := c.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if !validCockroachDBMetrics(scraped) { + return nil, errors.New("returned metrics aren't CockroachDB metrics") + } + + mx := collectScraped(scraped, metrics) + calcUsableCapacity(mx) + calcUnusableCapacity(mx) + calcTotalCapacityUsedPercentage(mx) + calcUsableCapacityUsedPercentage(mx) + calcRocksDBCacheHitRate(mx) + calcActiveReplicas(mx) + calcCPUUsagePercent(mx) + + return stm.ToMap(mx), nil +} + +const precision = 1000 + +func collectScraped(scraped prometheus.Series, metricList []string) map[string]float64 { + mx := make(map[string]float64) + for _, name := range metricList { + if ms := scraped.FindByName(name); ms.Len() == 1 { + if isMetricFloat(name) { + mx[name] = ms.Max() * precision + } else { + mx[name] = ms.Max() + } + } + } + return mx +} + +func calcUsableCapacity(mx map[string]float64) { + if !hasAll(mx, metricCapacityAvailable, metricCapacityUsed) { + return + } + available := mx[metricCapacityAvailable] + used := mx[metricCapacityUsed] + + mx[metricCapacityUsable] = available + used +} + +func calcUnusableCapacity(mx map[string]float64) { + if !hasAll(mx, metricCapacity, metricCapacityAvailable, metricCapacityUsed) { + return + } + total := mx[metricCapacity] + available := mx[metricCapacityAvailable] + used := mx[metricCapacityUsed] + + mx[metricCapacityUnusable] = total - (available + used) +} + +func calcTotalCapacityUsedPercentage(mx map[string]float64) { + if !hasAll(mx, metricCapacity, metricCapacityUnusable, metricCapacityUsed) { + return + } + total := mx[metricCapacity] + unusable := mx[metricCapacityUnusable] + used := mx[metricCapacityUsed] + + if mx[metricCapacity] == 0 { + mx[metricCapacityUsedPercentage] = 0 + } else { + mx[metricCapacityUsedPercentage] = (unusable + used) / total * 100 * precision + } +} + +func calcUsableCapacityUsedPercentage(mx map[string]float64) { + if !hasAll(mx, metricCapacityUsable, metricCapacityUsed) { + return + } + usable := mx[metricCapacityUsable] + used := mx[metricCapacityUsed] + + if usable == 0 { + mx[metricCapacityUsableUsedPercentage] = 0 + } else { + mx[metricCapacityUsableUsedPercentage] = used / usable * 100 * precision + } +} + +func calcRocksDBCacheHitRate(mx map[string]float64) { + if !hasAll(mx, metricRocksDBBlockCacheHits, metricRocksDBBlockCacheMisses) { + return + } + hits := mx[metricRocksDBBlockCacheHits] + misses := mx[metricRocksDBBlockCacheMisses] + + if sum := hits + misses; sum == 0 { + mx[metricRocksDBBlockCacheHitRate] = 0 + } else { + mx[metricRocksDBBlockCacheHitRate] = hits / sum * 100 * precision + } +} + +func calcActiveReplicas(mx map[string]float64) { + if !hasAll(mx, metricReplicasQuiescent) { + return + } + total := mx[metricReplicas] + quiescent := mx[metricReplicasQuiescent] + + mx[metricReplicasActive] = total - quiescent +} + +func calcCPUUsagePercent(mx map[string]float64) { + if hasAll(mx, metricSysCPUUserPercent) { + mx[metricSysCPUUserPercent] *= 100 + } + if hasAll(mx, metricSysCPUSysPercent) { + mx[metricSysCPUSysPercent] *= 100 + } + if hasAll(mx, metricSysCPUCombinedPercentNormalized) { + mx[metricSysCPUCombinedPercentNormalized] *= 100 + } +} + +func isMetricFloat(name string) bool { + // only Float metrics (see NewGaugeFloat64 in the cockroach repo): + // - GcPausePercent, CPUUserPercent, CPUCombinedPercentNorm, AverageQueriesPerSecond, AverageWritesPerSecond + switch name { + case metricSysCPUUserPercent, + metricSysCPUSysPercent, + metricSysCPUCombinedPercentNormalized, + metricRebalancingQueriesPerSecond, + metricRebalancingWritesPerSecond: + return true + } + return false +} + +func hasAll(mx map[string]float64, key string, rest ...string) bool { + _, ok := mx[key] + if len(rest) == 0 { + return ok + } + return ok && hasAll(mx, rest[0], rest[1:]...) +} diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/config_schema.json b/src/go/collectors/go.d.plugin/modules/cockroachdb/config_schema.json new file mode 100644 index 00000000000000..e732b99f641b16 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/cockroachdb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/integrations/cockroachdb.md b/src/go/collectors/go.d.plugin/modules/cockroachdb/integrations/cockroachdb.md new file mode 100644 index 00000000000000..39bbedcdfc8b1e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/integrations/cockroachdb.md @@ -0,0 +1,288 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/cockroachdb/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/cockroachdb/metadata.yaml" +sidebar_label: "CockroachDB" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CockroachDB + + +<img src="https://netdata.cloud/img/cockroachdb.svg" width="150"/> + + +Plugin: go.d.plugin +Module: cockroachdb + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors CockroachDB servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per CockroachDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| cockroachdb.process_cpu_time_combined_percentage | used | percentage | +| cockroachdb.process_cpu_time_percentage | user, sys | percentage | +| cockroachdb.process_cpu_time | user, sys | ms | +| cockroachdb.process_memory | rss | KiB | +| cockroachdb.process_file_descriptors | open | fd | +| cockroachdb.process_uptime | uptime | seconds | +| cockroachdb.host_disk_bandwidth | read, write | KiB | +| cockroachdb.host_disk_operations | reads, writes | operations | +| cockroachdb.host_disk_iops_in_progress | in_progress | iops | +| cockroachdb.host_network_bandwidth | received, sent | kilobits | +| cockroachdb.host_network_packets | received, sent | packets | +| cockroachdb.live_nodes | live_nodes | nodes | +| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats | +| cockroachdb.total_storage_capacity | total | KiB | +| cockroachdb.storage_capacity_usability | usable, unusable | KiB | +| cockroachdb.storage_usable_capacity | available, used | KiB | +| cockroachdb.storage_used_capacity_percentage | total, usable | percentage | +| cockroachdb.sql_connections | active | connections | +| cockroachdb.sql_bandwidth | received, sent | KiB | +| cockroachdb.sql_statements_total | started, executed | statements | +| cockroachdb.sql_errors | statement, transaction | errors | +| cockroachdb.sql_started_ddl_statements | ddl | statements | +| cockroachdb.sql_executed_ddl_statements | ddl | statements | +| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements | +| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements | +| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements | +| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements | +| cockroachdb.sql_active_distributed_queries | active | queries | +| cockroachdb.sql_distributed_flows | active, queued | flows | +| cockroachdb.live_bytes | applications, system | KiB | +| cockroachdb.logical_data | keys, values | KiB | +| cockroachdb.logical_data_count | keys, values | num | +| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions | +| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts | +| cockroachdb.ranges | ranges | ranges | +| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges | +| cockroachdb.range_events | split, add, remove, merge | events | +| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events | +| cockroachdb.rocksdb_read_amplification | reads | reads/query | +| cockroachdb.rocksdb_table_operations | compactions, flushes | operations | +| cockroachdb.rocksdb_cache_usage | used | KiB | +| cockroachdb.rocksdb_cache_operations | hits, misses | operations | +| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage | +| cockroachdb.rocksdb_sstables | sstables | sstables | +| cockroachdb.replicas | replicas | replicas | +| cockroachdb.replicas_quiescence | quiescent, active | replicas | +| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas | +| cockroachdb.replicas_leaseholders | leaseholders | leaseholders | +| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures | +| cockroachdb.rebalancing_queries | avg | queries/s | +| cockroachdb.rebalancing_writes | avg | writes/s | +| cockroachdb.timeseries_samples | written | samples | +| cockroachdb.timeseries_write_errors | write | errors | +| cockroachdb.timeseries_write_bytes | written | KiB | +| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests | +| cockroachdb.code_heap_memory_usage | go, cgo | KiB | +| cockroachdb.goroutines | goroutines | goroutines | +| cockroachdb.gc_count | gc | invokes | +| cockroachdb.gc_pause | pause | us | +| cockroachdb.cgo_calls | cgo | calls | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization | +| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization | +| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum | +| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target | +| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/cockroachdb.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/cockroachdb.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + +``` +</details> + +##### HTTP authentication + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +CockroachDB with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8080/_status/vars + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + + - name: remote + url: http://203.0.113.10:8080/_status/vars + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m cockroachdb + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/metadata.yaml b/src/go/collectors/go.d.plugin/modules/cockroachdb/metadata.yaml new file mode 100644 index 00000000000000..522f200ac880a9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/metadata.yaml @@ -0,0 +1,620 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-cockroachdb + plugin_name: go.d.plugin + module_name: cockroachdb + monitored_instance: + name: CockroachDB + link: https://www.cockroachlabs.com/ + icon_filename: cockroachdb.svg + categories: + - data-collection.database-servers + keywords: + - cockroachdb + - databases + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors CockroachDB servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/cockroachdb.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8080/_status/vars + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + - name: HTTP authentication + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + username: username + password: password + - name: HTTPS with self-signed certificate + description: CockroachDB with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1:8080/_status/vars + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/_status/vars + + - name: remote + url: http://203.0.113.10:8080/_status/vars + troubleshooting: + problems: + list: [] + alerts: + - name: cockroachdb_used_storage_capacity + metric: cockroachdb.storage_used_capacity_percentage + info: storage capacity utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf + - name: cockroachdb_used_usable_storage_capacity + metric: cockroachdb.storage_used_capacity_percentage + info: storage usable space utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf + - name: cockroachdb_unavailable_ranges + metric: cockroachdb.ranges_replication_problem + info: number of ranges with fewer live replicas than needed for quorum + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf + - name: cockroachdb_underreplicated_ranges + metric: cockroachdb.ranges_replication_problem + info: number of ranges with fewer live replicas than the replication target + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf + - name: cockroachdb_open_file_descriptors_limit + metric: cockroachdb.process_file_descriptors + info: "open file descriptors utilization (against softlimit)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: cockroachdb.process_cpu_time_combined_percentage + description: Combined CPU Time Percentage, Normalized 0-1 by Number of Cores + unit: percentage + chart_type: line + dimensions: + - name: used + - name: cockroachdb.process_cpu_time_percentage + description: CPU Time Percentage + unit: percentage + chart_type: stacked + dimensions: + - name: user + - name: sys + - name: cockroachdb.process_cpu_time + description: CPU Time + unit: ms + chart_type: stacked + dimensions: + - name: user + - name: sys + - name: cockroachdb.process_memory + description: Memory Usage + unit: KiB + chart_type: line + dimensions: + - name: rss + - name: cockroachdb.process_file_descriptors + description: File Descriptors + unit: fd + chart_type: line + dimensions: + - name: open + - name: cockroachdb.process_uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: cockroachdb.host_disk_bandwidth + description: Host Disk Cumulative Bandwidth + unit: KiB + chart_type: area + dimensions: + - name: read + - name: write + - name: cockroachdb.host_disk_operations + description: Host Disk Cumulative Operations + unit: operations + chart_type: line + dimensions: + - name: reads + - name: writes + - name: cockroachdb.host_disk_iops_in_progress + description: Host Disk Cumulative IOPS In Progress + unit: iops + chart_type: line + dimensions: + - name: in_progress + - name: cockroachdb.host_network_bandwidth + description: Host Network Cumulative Bandwidth + unit: kilobits + chart_type: area + dimensions: + - name: received + - name: sent + - name: cockroachdb.host_network_packets + description: Host Network Cumulative Packets + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent + - name: cockroachdb.live_nodes + description: Live Nodes in the Cluster + unit: nodes + chart_type: line + dimensions: + - name: live_nodes + - name: cockroachdb.node_liveness_heartbeats + description: Node Liveness Heartbeats + unit: heartbeats + chart_type: stacked + dimensions: + - name: successful + - name: failed + - name: cockroachdb.total_storage_capacity + description: Total Storage Capacity + unit: KiB + chart_type: line + dimensions: + - name: total + - name: cockroachdb.storage_capacity_usability + description: Storage Capacity Usability + unit: KiB + chart_type: stacked + dimensions: + - name: usable + - name: unusable + - name: cockroachdb.storage_usable_capacity + description: Storage Usable Capacity + unit: KiB + chart_type: stacked + dimensions: + - name: available + - name: used + - name: cockroachdb.storage_used_capacity_percentage + description: Storage Used Capacity Utilization + unit: percentage + chart_type: line + dimensions: + - name: total + - name: usable + - name: cockroachdb.sql_connections + description: Active SQL Connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: cockroachdb.sql_bandwidth + description: SQL Bandwidth + unit: KiB + chart_type: area + dimensions: + - name: received + - name: sent + - name: cockroachdb.sql_statements_total + description: SQL Statements Total + unit: statements + chart_type: area + dimensions: + - name: started + - name: executed + - name: cockroachdb.sql_errors + description: SQL Statements and Transaction Errors + unit: errors + chart_type: line + dimensions: + - name: statement + - name: transaction + - name: cockroachdb.sql_started_ddl_statements + description: SQL Started DDL Statements + unit: statements + chart_type: line + dimensions: + - name: ddl + - name: cockroachdb.sql_executed_ddl_statements + description: SQL Executed DDL Statements + unit: statements + chart_type: line + dimensions: + - name: ddl + - name: cockroachdb.sql_started_dml_statements + description: SQL Started DML Statements + unit: statements + chart_type: stacked + dimensions: + - name: select + - name: update + - name: delete + - name: insert + - name: cockroachdb.sql_executed_dml_statements + description: SQL Executed DML Statements + unit: statements + chart_type: stacked + dimensions: + - name: select + - name: update + - name: delete + - name: insert + - name: cockroachdb.sql_started_tcl_statements + description: SQL Started TCL Statements + unit: statements + chart_type: stacked + dimensions: + - name: begin + - name: commit + - name: rollback + - name: savepoint + - name: savepoint_cockroach_restart + - name: release_savepoint_cockroach_restart + - name: rollback_to_savepoint_cockroach_restart + - name: cockroachdb.sql_executed_tcl_statements + description: SQL Executed TCL Statements + unit: statements + chart_type: stacked + dimensions: + - name: begin + - name: commit + - name: rollback + - name: savepoint + - name: savepoint_cockroach_restart + - name: release_savepoint_cockroach_restart + - name: rollback_to_savepoint_cockroach_restart + - name: cockroachdb.sql_active_distributed_queries + description: Active Distributed SQL Queries + unit: queries + chart_type: line + dimensions: + - name: active + - name: cockroachdb.sql_distributed_flows + description: Distributed SQL Flows + unit: flows + chart_type: stacked + dimensions: + - name: active + - name: queued + - name: cockroachdb.live_bytes + description: Used Live Data + unit: KiB + chart_type: line + dimensions: + - name: applications + - name: system + - name: cockroachdb.logical_data + description: Logical Data + unit: KiB + chart_type: stacked + dimensions: + - name: keys + - name: values + - name: cockroachdb.logical_data_count + description: Logical Data Count + unit: num + chart_type: stacked + dimensions: + - name: keys + - name: values + - name: cockroachdb.kv_transactions + description: KV Transactions + unit: transactions + chart_type: area + dimensions: + - name: committed + - name: fast-path_committed + - name: aborted + - name: cockroachdb.kv_transaction_restarts + description: KV Transaction Restarts + unit: restarts + chart_type: stacked + dimensions: + - name: write_too_old + - name: write_too_old_multiple + - name: forwarded_timestamp + - name: possible_reply + - name: async_consensus_failure + - name: read_within_uncertainty_interval + - name: aborted + - name: push_failure + - name: unknown + - name: cockroachdb.ranges + description: Ranges + unit: ranges + chart_type: line + dimensions: + - name: ranges + - name: cockroachdb.ranges_replication_problem + description: Ranges Replication Problems + unit: ranges + chart_type: stacked + dimensions: + - name: unavailable + - name: under_replicated + - name: over_replicated + - name: cockroachdb.range_events + description: Range Events + unit: events + chart_type: stacked + dimensions: + - name: split + - name: add + - name: remove + - name: merge + - name: cockroachdb.range_snapshot_events + description: Range Snapshot Events + unit: events + chart_type: stacked + dimensions: + - name: generated + - name: applied_raft_initiated + - name: applied_learner + - name: applied_preemptive + - name: cockroachdb.rocksdb_read_amplification + description: RocksDB Read Amplification + unit: reads/query + chart_type: line + dimensions: + - name: reads + - name: cockroachdb.rocksdb_table_operations + description: RocksDB Table Operations + unit: operations + chart_type: line + dimensions: + - name: compactions + - name: flushes + - name: cockroachdb.rocksdb_cache_usage + description: RocksDB Block Cache Usage + unit: KiB + chart_type: area + dimensions: + - name: used + - name: cockroachdb.rocksdb_cache_operations + description: RocksDB Block Cache Operations + unit: operations + chart_type: stacked + dimensions: + - name: hits + - name: misses + - name: cockroachdb.rocksdb_cache_hit_rate + description: RocksDB Block Cache Hit Rate + unit: percentage + chart_type: area + dimensions: + - name: hit_rate + - name: cockroachdb.rocksdb_sstables + description: RocksDB SSTables + unit: sstables + chart_type: line + dimensions: + - name: sstables + - name: cockroachdb.replicas + description: Number of Replicas + unit: replicas + chart_type: line + dimensions: + - name: replicas + - name: cockroachdb.replicas_quiescence + description: Replicas Quiescence + unit: replicas + chart_type: stacked + dimensions: + - name: quiescent + - name: active + - name: cockroachdb.replicas_leaders + description: Number of Raft Leaders + unit: replicas + chart_type: area + dimensions: + - name: leaders + - name: not_leaseholders + - name: cockroachdb.replicas_leaseholders + description: Number of Leaseholders + unit: leaseholders + chart_type: line + dimensions: + - name: leaseholders + - name: cockroachdb.queue_processing_failures + description: Queues Processing Failures + unit: failures + chart_type: stacked + dimensions: + - name: gc + - name: replica_gc + - name: replication + - name: split + - name: consistency + - name: raft_log + - name: raft_snapshot + - name: time_series_maintenance + - name: cockroachdb.rebalancing_queries + description: Rebalancing Average Queries + unit: queries/s + chart_type: line + dimensions: + - name: avg + - name: cockroachdb.rebalancing_writes + description: Rebalancing Average Writes + unit: writes/s + chart_type: line + dimensions: + - name: avg + - name: cockroachdb.timeseries_samples + description: Time Series Written Samples + unit: samples + chart_type: line + dimensions: + - name: written + - name: cockroachdb.timeseries_write_errors + description: Time Series Write Errors + unit: errors + chart_type: line + dimensions: + - name: write + - name: cockroachdb.timeseries_write_bytes + description: Time Series Bytes Written + unit: KiB + chart_type: line + dimensions: + - name: written + - name: cockroachdb.slow_requests + description: Slow Requests + unit: requests + chart_type: stacked + dimensions: + - name: acquiring_latches + - name: acquiring_lease + - name: in_raft + - name: cockroachdb.code_heap_memory_usage + description: Heap Memory Usage + unit: KiB + chart_type: stacked + dimensions: + - name: go + - name: cgo + - name: cockroachdb.goroutines + description: Number of Goroutines + unit: goroutines + chart_type: line + dimensions: + - name: goroutines + - name: cockroachdb.gc_count + description: GC Runs + unit: invokes + chart_type: line + dimensions: + - name: gc + - name: cockroachdb.gc_pause + description: GC Pause Time + unit: us + chart_type: line + dimensions: + - name: pause + - name: cockroachdb.cgo_calls + description: Cgo Calls + unit: calls + chart_type: line + dimensions: + - name: cgo diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/metrics.go b/src/go/collectors/go.d.plugin/modules/cockroachdb/metrics.go new file mode 100644 index 00000000000000..fabd254990e65f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/metrics.go @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +// Architecture Overview +// https://www.cockroachlabs.com/docs/stable/architecture/overview.html + +// Web Dashboards +// https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/src/views/cluster/containers/nodeGraphs/dashboards + +// Process +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go + metricSysCPUUserNs = "sys_cpu_user_ns" + metricSysCPUSysNs = "sys_cpu_sys_ns" + metricSysCPUUserPercent = "sys_cpu_user_percent" + metricSysCPUSysPercent = "sys_cpu_sys_percent" + metricSysCPUCombinedPercentNormalized = "sys_cpu_combined_percent_normalized" + metricSysRSS = "sys_rss" + metricSysFDOpen = "sys_fd_open" + metricSysFDSoftLimit = "sys_fd_softlimit" + metricSysUptime = "sys_uptime" +) + +// Host Disk/Network Cumulative +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go + metricSysHostDiskReadBytes = "sys_host_disk_read_bytes" + metricSysHostDiskWriteBytes = "sys_host_disk_write_bytes" + metricSysHostDiskReadCount = "sys_host_disk_read_count" + metricSysHostDiskWriteCount = "sys_host_disk_write_count" + metricSysHostDiskIOPSInProgress = "sys_host_disk_iopsinprogress" + metricSysHostNetSendBytes = "sys_host_net_send_bytes" + metricSysHostNetRecvBytes = "sys_host_net_recv_bytes" + metricSysHostNetSendPackets = "sys_host_net_send_packets" + metricSysHostNetRecvPackets = "sys_host_net_recv_packets" +) + +// Liveness +const ( + //https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/node_liveness.go + metricLiveNodes = "liveness_livenodes" + metricHeartBeatSuccesses = "liveness_heartbeatsuccesses" + metricHeartBeatFailures = "liveness_heartbeatfailures" +) + +// Capacity +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricCapacity = "capacity" + metricCapacityAvailable = "capacity_available" + metricCapacityUsed = "capacity_used" + //metricCapacityReserved = "capacity_reserved" +) + +// SQL +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/server.go + metricSQLConnections = "sql_conns" + metricSQLBytesIn = "sql_bytesin" + metricSQLBytesOut = "sql_bytesout" + + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/exec_util.go + // Started Statements + metricSQLQueryStartedCount = "sql_query_started_count" // Cumulative (Statements + Transaction Statements) + metricSQLSelectStartedCount = "sql_select_started_count" + metricSQLUpdateStartedCount = "sql_update_started_count" + metricSQLInsertStartedCount = "sql_insert_started_count" + metricSQLDeleteStartedCount = "sql_delete_started_count" + metricSQLSavepointStartedCount = "sql_savepoint_started_count" + metricSQLRestartSavepointStartedCount = "sql_restart_savepoint_started_count" + metricSQLRestartSavepointReleaseStartedCount = "sql_restart_savepoint_release_started_count" + metricSQLRestartSavepointRollbackStartedCount = "sql_restart_savepoint_rollback_started_count" + metricSQLDDLStartedCount = "sql_ddl_started_count" + metricSQLMiscStartedCount = "sql_misc_started_count" + // Started Transaction Statements + metricSQLTXNBeginStartedCount = "sql_txn_begin_started_count" + metricSQLTXNCommitStartedCount = "sql_txn_commit_started_count" + metricSQLTXNRollbackStartedCount = "sql_txn_rollback_started_count" + + // Executed Statements + metricSQLQueryCount = "sql_query_count" // Cumulative (Statements + Transaction Statements) + metricSQLSelectCount = "sql_select_count" + metricSQLUpdateCount = "sql_update_count" + metricSQLInsertCount = "sql_insert_count" + metricSQLDeleteCount = "sql_delete_count" + metricSQLSavepointCount = "sql_savepoint_count" + metricSQLRestartSavepointCount = "sql_restart_savepoint_count" + metricSQLRestartSavepointReleaseCount = "sql_restart_savepoint_release_count" + metricSQLRestartSavepointRollbackCount = "sql_restart_savepoint_rollback_count" + metricSQLDDLCount = "sql_ddl_count" + metricSQLMiscCount = "sql_misc_count" + // Executed Transaction statements + metricSQLTXNBeginCount = "sql_txn_begin_count" + metricSQLTXNCommitCount = "sql_txn_commit_count" + metricSQLTXNRollbackCount = "sql_txn_rollback_count" + + // Statements Resulted In An Error + metricSQLFailureCount = "sql_failure_count" + // Transaction Resulted In Abort Errors + metricSQLTXNAbortCount = "sql_txn_abort_count" + + // Distributed SQL + // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/execinfra/metrics.go + metricSQLDistSQLQueriesActive = "sql_distsql_queries_active" + metricSQLDistSQLFlowsActive = "sql_distsql_flows_active" + metricSQLDistSQLFlowsQueued = "sql_distsql_flows_queued" +) + +// Storage +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricLiveBytes = "livebytes" + metricSysBytes = "sysbytes" + metricKeyBytes = "keybytes" + metricValBytes = "valbytes" + metricKeyCount = "keycount" + metricValCount = "valcount" +) + +// KV Transactions +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/kv/txn_metrics.go + metricTxnCommits = "txn_commits" + metricTxnCommits1PC = "txn_commits1PC" + metricTxnAborts = "txn_aborts" + metricTxnRestartsWriteTooOld = "txn_restarts_writetooold" + metricTxnRestartsWriteTooOldMulti = "txn_restarts_writetoooldmulti" + metricTxnRestartsSerializable = "txn_restarts_serializable" + metricTxnRestartsPossibleReplay = "txn_restarts_possiblereplay" + metricTxnRestartsAsyncWriteFailure = "txn_restarts_asyncwritefailure" + metricTxnRestartsReadWithInUncertainty = "txn_restarts_readwithinuncertainty" + metricTxnRestartsTxnAborted = "txn_restarts_txnaborted" + metricTxnRestartsTxnPush = "txn_restarts_txnpush" + metricTxnRestartsUnknown = "txn_restarts_unknown" +) + +// Ranges +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricRanges = "ranges" + metricRangesUnavailable = "ranges_unavailable" + metricRangesUnderReplicated = "ranges_underreplicated" + metricRangesOverReplicated = "ranges_overreplicated" + // Range Events Metrics + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricRangeSplits = "range_splits" + metricRangeAdds = "range_adds" + metricRangeRemoves = "range_removes" + metricRangeMerges = "range_merges" + metricRangeSnapshotsGenerated = "range_snapshots_generated" + metricRangeSnapshotsPreemptiveApplied = "range_snapshots_preemptive_applied" + metricRangeSnapshotsLearnerApplied = "range_snapshots_learner_applied" + metricRangeSnapshotsNormalApplied = "range_snapshots_normal_applied" +) + +// RocksDB +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricRocksDBReadAmplification = "rocksdb_read_amplification" + metricRocksDBNumSSTables = "rocksdb_num_sstables" + metricRocksDBBlockCacheUsage = "rocksdb_block_cache_usage" + metricRocksDBBlockCacheHits = "rocksdb_block_cache_hits" + metricRocksDBBlockCacheMisses = "rocksdb_block_cache_misses" + metricRocksDBCompactions = "rocksdb_compactions" + metricRocksDBFlushes = "rocksdb_flushes" +) + +// Replication +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricReplicas = "replicas" + // metricReplicasReserved = "replicas_reserved" + metricReplicasLeaders = "replicas_leaders" + metricReplicasLeadersNotLeaseholders = "replicas_leaders_not_leaseholders" + metricReplicasLeaseholders = "replicas_leaseholders" + metricReplicasQuiescent = "replicas_quiescent" +) + +// Queues +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricQueueGCProcessFailure = "queue_gc_process_failure" + metricQueueReplicaGCProcessFailure = "queue_replicagc_process_failure" + metricQueueReplicateProcessFailure = "queue_replicate_process_failure" + metricQueueSplitProcessFailure = "queue_split_process_failure" + metricQueueConsistencyProcessFailure = "queue_consistency_process_failure" + metricQueueRaftLogProcessFailure = "queue_raftlog_process_failure" + metricQueueRaftSnapshotProcessFailure = "queue_raftsnapshot_process_failure" + metricQueueTSMaintenanceProcessFailure = "queue_tsmaintenance_process_failure" +) + +// Rebalancing +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricRebalancingQueriesPerSecond = "rebalancing_queriespersecond" + metricRebalancingWritesPerSecond = "rebalancing_writespersecond" +) + +// Slow Requests +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go + metricRequestsSlowLease = "requests_slow_lease" + metricRequestsSlowLatch = "requests_slow_latch" + metricRequestsSlowRaft = "requests_slow_raft" +) + +// Time Series +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/ts/metrics.go + metricTimeSeriesWriteSamples = "timeseries_write_samples" + metricTimeSeriesWriteErrors = "timeseries_write_errors" + metricTimeSeriesWriteBytes = "timeseries_write_bytes" +) + +// Go/Cgo +const ( + // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go + metricSysGoAllocBytes = "sys_go_allocbytes" + metricSysCGoAllocBytes = "sys_cgo_allocbytes" + metricSysCGoCalls = "sys_cgocalls" + metricSysGoroutines = "sys_goroutines" + metricSysGCCount = "sys_gc_count" + metricSysGCPauseNs = "sys_gc_pause_ns" +) + +const ( + // Calculated Metrics + metricCapacityUsable = "capacity_usable" + metricCapacityUnusable = "capacity_unusable" + metricCapacityUsedPercentage = "capacity_used_percent" + metricCapacityUsableUsedPercentage = "capacity_usable_used_percent" + metricRocksDBBlockCacheHitRate = "rocksdb_block_cache_hit_rate" + metricReplicasActive = "replicas_active" +) + +var metrics = []string{ + metricSysCPUUserNs, + metricSysCPUSysNs, + metricSysCPUUserPercent, + metricSysCPUSysPercent, + metricSysCPUCombinedPercentNormalized, + metricSysRSS, + metricSysFDOpen, + metricSysFDSoftLimit, + metricSysUptime, + + metricSysHostDiskReadBytes, + metricSysHostDiskWriteBytes, + metricSysHostDiskReadCount, + metricSysHostDiskWriteCount, + metricSysHostDiskIOPSInProgress, + metricSysHostNetSendBytes, + metricSysHostNetRecvBytes, + metricSysHostNetSendPackets, + metricSysHostNetRecvPackets, + + metricLiveNodes, + metricHeartBeatSuccesses, + metricHeartBeatFailures, + + metricCapacity, + metricCapacityAvailable, + metricCapacityUsed, + + metricSQLConnections, + metricSQLBytesIn, + metricSQLBytesOut, + metricSQLQueryStartedCount, + metricSQLSelectStartedCount, + metricSQLUpdateStartedCount, + metricSQLInsertStartedCount, + metricSQLDeleteStartedCount, + metricSQLSavepointStartedCount, + metricSQLRestartSavepointStartedCount, + metricSQLRestartSavepointReleaseStartedCount, + metricSQLRestartSavepointRollbackStartedCount, + metricSQLDDLStartedCount, + metricSQLMiscStartedCount, + metricSQLTXNBeginStartedCount, + metricSQLTXNCommitStartedCount, + metricSQLTXNRollbackStartedCount, + metricSQLQueryCount, + metricSQLSelectCount, + metricSQLUpdateCount, + metricSQLInsertCount, + metricSQLDeleteCount, + metricSQLSavepointCount, + metricSQLRestartSavepointCount, + metricSQLRestartSavepointReleaseCount, + metricSQLRestartSavepointRollbackCount, + metricSQLDDLCount, + metricSQLMiscCount, + metricSQLTXNBeginCount, + metricSQLTXNCommitCount, + metricSQLTXNRollbackCount, + metricSQLFailureCount, + metricSQLTXNAbortCount, + metricSQLDistSQLQueriesActive, + metricSQLDistSQLFlowsActive, + metricSQLDistSQLFlowsQueued, + + metricLiveBytes, + metricSysBytes, + metricKeyBytes, + metricValBytes, + metricKeyCount, + metricValCount, + + metricTxnCommits, + metricTxnCommits1PC, + metricTxnAborts, + metricTxnRestartsWriteTooOld, + metricTxnRestartsWriteTooOldMulti, + metricTxnRestartsSerializable, + metricTxnRestartsPossibleReplay, + metricTxnRestartsAsyncWriteFailure, + metricTxnRestartsReadWithInUncertainty, + metricTxnRestartsTxnAborted, + metricTxnRestartsTxnPush, + metricTxnRestartsUnknown, + + metricRanges, + metricRangesUnavailable, + metricRangesUnderReplicated, + metricRangesOverReplicated, + metricRangeSplits, + metricRangeAdds, + metricRangeRemoves, + metricRangeMerges, + metricRangeSnapshotsGenerated, + metricRangeSnapshotsPreemptiveApplied, + metricRangeSnapshotsLearnerApplied, + metricRangeSnapshotsNormalApplied, + + metricRocksDBReadAmplification, + metricRocksDBNumSSTables, + metricRocksDBBlockCacheUsage, + metricRocksDBBlockCacheHits, + metricRocksDBBlockCacheMisses, + metricRocksDBCompactions, + metricRocksDBFlushes, + + metricReplicas, + metricReplicasLeaders, + metricReplicasLeadersNotLeaseholders, + metricReplicasLeaseholders, + metricReplicasQuiescent, + + metricQueueGCProcessFailure, + metricQueueReplicaGCProcessFailure, + metricQueueReplicateProcessFailure, + metricQueueSplitProcessFailure, + metricQueueConsistencyProcessFailure, + metricQueueRaftLogProcessFailure, + metricQueueRaftSnapshotProcessFailure, + metricQueueTSMaintenanceProcessFailure, + + metricRebalancingQueriesPerSecond, + metricRebalancingWritesPerSecond, + + metricTimeSeriesWriteSamples, + metricTimeSeriesWriteErrors, + metricTimeSeriesWriteBytes, + + metricRequestsSlowLease, + metricRequestsSlowLatch, + metricRequestsSlowRaft, + + metricSysGoAllocBytes, + metricSysCGoAllocBytes, + metricSysCGoCalls, + metricSysGoroutines, + metricSysGCCount, + metricSysGCPauseNs, +} diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/metrics.txt b/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/metrics.txt new file mode 100644 index 00000000000000..ca537e1010ee53 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/metrics.txt @@ -0,0 +1,2952 @@ +# HELP sql_distsql_flows_active Number of distributed SQL flows currently active +# TYPE sql_distsql_flows_active gauge +sql_distsql_flows_active 0.0 +# HELP queue_consistency_process_failure Number of replicas which failed processing in the consistency checker queue +# TYPE queue_consistency_process_failure counter +queue_consistency_process_failure{store="1"} 0.0 +# HELP queue_replicate_process_success Number of replicas successfully processed by the replicate queue +# TYPE queue_replicate_process_success counter +queue_replicate_process_success{store="1"} 0.0 +# HELP distsender_batches Number of batches processed +# TYPE distsender_batches counter +distsender_batches 56336.0 +# HELP changefeed_table_metadata_nanos Time blocked while verifying table metadata histories +# TYPE changefeed_table_metadata_nanos counter +changefeed_table_metadata_nanos 0.0 +# HELP sql_update_started_count Number of SQL UPDATE statements started +# TYPE sql_update_started_count counter +sql_update_started_count 0.0 +# HELP raft_process_handleready_latency Latency histogram for handling a Raft ready +# TYPE raft_process_handleready_latency histogram +raft_process_handleready_latency_bucket{store="1",le="671.0"} 3.0 +raft_process_handleready_latency_bucket{store="1",le="703.0"} 4.0 +raft_process_handleready_latency_bucket{store="1",le="735.0"} 5.0 +raft_process_handleready_latency_bucket{store="1",le="767.0"} 11.0 +raft_process_handleready_latency_bucket{store="1",le="799.0"} 14.0 +raft_process_handleready_latency_bucket{store="1",le="831.0"} 19.0 +raft_process_handleready_latency_bucket{store="1",le="863.0"} 27.0 +raft_process_handleready_latency_bucket{store="1",le="895.0"} 34.0 +raft_process_handleready_latency_bucket{store="1",le="927.0"} 48.0 +raft_process_handleready_latency_bucket{store="1",le="959.0"} 70.0 +raft_process_handleready_latency_bucket{store="1",le="991.0"} 85.0 +raft_process_handleready_latency_bucket{store="1",le="1023.0"} 110.0 +raft_process_handleready_latency_bucket{store="1",le="1087.0"} 153.0 +raft_process_handleready_latency_bucket{store="1",le="1151.0"} 222.0 +raft_process_handleready_latency_bucket{store="1",le="1215.0"} 326.0 +raft_process_handleready_latency_bucket{store="1",le="1279.0"} 439.0 +raft_process_handleready_latency_bucket{store="1",le="1343.0"} 537.0 +raft_process_handleready_latency_bucket{store="1",le="1407.0"} 649.0 +raft_process_handleready_latency_bucket{store="1",le="1471.0"} 784.0 +raft_process_handleready_latency_bucket{store="1",le="1535.0"} 889.0 +raft_process_handleready_latency_bucket{store="1",le="1599.0"} 996.0 +raft_process_handleready_latency_bucket{store="1",le="1663.0"} 1078.0 +raft_process_handleready_latency_bucket{store="1",le="1727.0"} 1153.0 +raft_process_handleready_latency_bucket{store="1",le="1791.0"} 1228.0 +raft_process_handleready_latency_bucket{store="1",le="1855.0"} 1301.0 +raft_process_handleready_latency_bucket{store="1",le="1919.0"} 1370.0 +raft_process_handleready_latency_bucket{store="1",le="1983.0"} 1434.0 +raft_process_handleready_latency_bucket{store="1",le="2047.0"} 1493.0 +raft_process_handleready_latency_bucket{store="1",le="2175.0"} 1605.0 +raft_process_handleready_latency_bucket{store="1",le="2303.0"} 1693.0 +raft_process_handleready_latency_bucket{store="1",le="2431.0"} 1746.0 +raft_process_handleready_latency_bucket{store="1",le="2559.0"} 1806.0 +raft_process_handleready_latency_bucket{store="1",le="2687.0"} 1861.0 +raft_process_handleready_latency_bucket{store="1",le="2815.0"} 1922.0 +raft_process_handleready_latency_bucket{store="1",le="2943.0"} 1977.0 +raft_process_handleready_latency_bucket{store="1",le="3071.0"} 2031.0 +raft_process_handleready_latency_bucket{store="1",le="3199.0"} 2087.0 +raft_process_handleready_latency_bucket{store="1",le="3327.0"} 2138.0 +raft_process_handleready_latency_bucket{store="1",le="3455.0"} 2215.0 +raft_process_handleready_latency_bucket{store="1",le="3583.0"} 2284.0 +raft_process_handleready_latency_bucket{store="1",le="3711.0"} 2365.0 +raft_process_handleready_latency_bucket{store="1",le="3839.0"} 2471.0 +raft_process_handleready_latency_bucket{store="1",le="3967.0"} 2571.0 +raft_process_handleready_latency_bucket{store="1",le="4095.0"} 2680.0 +raft_process_handleready_latency_bucket{store="1",le="4351.0"} 2916.0 +raft_process_handleready_latency_bucket{store="1",le="4607.0"} 3225.0 +raft_process_handleready_latency_bucket{store="1",le="4863.0"} 3662.0 +raft_process_handleready_latency_bucket{store="1",le="5119.0"} 4195.0 +raft_process_handleready_latency_bucket{store="1",le="5375.0"} 4922.0 +raft_process_handleready_latency_bucket{store="1",le="5631.0"} 5692.0 +raft_process_handleready_latency_bucket{store="1",le="5887.0"} 6311.0 +raft_process_handleready_latency_bucket{store="1",le="6143.0"} 6798.0 +raft_process_handleready_latency_bucket{store="1",le="6399.0"} 7181.0 +raft_process_handleready_latency_bucket{store="1",le="6655.0"} 7432.0 +raft_process_handleready_latency_bucket{store="1",le="6911.0"} 7638.0 +raft_process_handleready_latency_bucket{store="1",le="7167.0"} 7763.0 +raft_process_handleready_latency_bucket{store="1",le="7423.0"} 7843.0 +raft_process_handleready_latency_bucket{store="1",le="7679.0"} 7910.0 +raft_process_handleready_latency_bucket{store="1",le="7935.0"} 7961.0 +raft_process_handleready_latency_bucket{store="1",le="8191.0"} 8011.0 +raft_process_handleready_latency_bucket{store="1",le="8703.0"} 8058.0 +raft_process_handleready_latency_bucket{store="1",le="9215.0"} 8111.0 +raft_process_handleready_latency_bucket{store="1",le="9727.0"} 8151.0 +raft_process_handleready_latency_bucket{store="1",le="10239.0"} 8182.0 +raft_process_handleready_latency_bucket{store="1",le="10751.0"} 8213.0 +raft_process_handleready_latency_bucket{store="1",le="11263.0"} 8235.0 +raft_process_handleready_latency_bucket{store="1",le="11775.0"} 8266.0 +raft_process_handleready_latency_bucket{store="1",le="12287.0"} 8290.0 +raft_process_handleready_latency_bucket{store="1",le="12799.0"} 8316.0 +raft_process_handleready_latency_bucket{store="1",le="13311.0"} 8330.0 +raft_process_handleready_latency_bucket{store="1",le="13823.0"} 8347.0 +raft_process_handleready_latency_bucket{store="1",le="14335.0"} 8374.0 +raft_process_handleready_latency_bucket{store="1",le="14847.0"} 8398.0 +raft_process_handleready_latency_bucket{store="1",le="15359.0"} 8427.0 +raft_process_handleready_latency_bucket{store="1",le="15871.0"} 8450.0 +raft_process_handleready_latency_bucket{store="1",le="16383.0"} 8476.0 +raft_process_handleready_latency_bucket{store="1",le="17407.0"} 8518.0 +raft_process_handleready_latency_bucket{store="1",le="18431.0"} 8561.0 +raft_process_handleready_latency_bucket{store="1",le="19455.0"} 8585.0 +raft_process_handleready_latency_bucket{store="1",le="20479.0"} 8605.0 +raft_process_handleready_latency_bucket{store="1",le="21503.0"} 8630.0 +raft_process_handleready_latency_bucket{store="1",le="22527.0"} 8652.0 +raft_process_handleready_latency_bucket{store="1",le="23551.0"} 8664.0 +raft_process_handleready_latency_bucket{store="1",le="24575.0"} 8673.0 +raft_process_handleready_latency_bucket{store="1",le="25599.0"} 8681.0 +raft_process_handleready_latency_bucket{store="1",le="26623.0"} 8692.0 +raft_process_handleready_latency_bucket{store="1",le="27647.0"} 8696.0 +raft_process_handleready_latency_bucket{store="1",le="28671.0"} 8704.0 +raft_process_handleready_latency_bucket{store="1",le="29695.0"} 8713.0 +raft_process_handleready_latency_bucket{store="1",le="30719.0"} 8727.0 +raft_process_handleready_latency_bucket{store="1",le="31743.0"} 8734.0 +raft_process_handleready_latency_bucket{store="1",le="32767.0"} 8744.0 +raft_process_handleready_latency_bucket{store="1",le="34815.0"} 8764.0 +raft_process_handleready_latency_bucket{store="1",le="36863.0"} 8776.0 +raft_process_handleready_latency_bucket{store="1",le="38911.0"} 8788.0 +raft_process_handleready_latency_bucket{store="1",le="40959.0"} 8796.0 +raft_process_handleready_latency_bucket{store="1",le="43007.0"} 8802.0 +raft_process_handleready_latency_bucket{store="1",le="45055.0"} 8812.0 +raft_process_handleready_latency_bucket{store="1",le="47103.0"} 8822.0 +raft_process_handleready_latency_bucket{store="1",le="49151.0"} 8828.0 +raft_process_handleready_latency_bucket{store="1",le="51199.0"} 8832.0 +raft_process_handleready_latency_bucket{store="1",le="53247.0"} 8836.0 +raft_process_handleready_latency_bucket{store="1",le="55295.0"} 8841.0 +raft_process_handleready_latency_bucket{store="1",le="57343.0"} 8844.0 +raft_process_handleready_latency_bucket{store="1",le="59391.0"} 8849.0 +raft_process_handleready_latency_bucket{store="1",le="61439.0"} 8857.0 +raft_process_handleready_latency_bucket{store="1",le="63487.0"} 8866.0 +raft_process_handleready_latency_bucket{store="1",le="65535.0"} 8871.0 +raft_process_handleready_latency_bucket{store="1",le="69631.0"} 8884.0 +raft_process_handleready_latency_bucket{store="1",le="73727.0"} 8894.0 +raft_process_handleready_latency_bucket{store="1",le="77823.0"} 8904.0 +raft_process_handleready_latency_bucket{store="1",le="81919.0"} 8909.0 +raft_process_handleready_latency_bucket{store="1",le="86015.0"} 8916.0 +raft_process_handleready_latency_bucket{store="1",le="90111.0"} 8926.0 +raft_process_handleready_latency_bucket{store="1",le="94207.0"} 8929.0 +raft_process_handleready_latency_bucket{store="1",le="98303.0"} 8930.0 +raft_process_handleready_latency_bucket{store="1",le="102399.0"} 8935.0 +raft_process_handleready_latency_bucket{store="1",le="106495.0"} 8940.0 +raft_process_handleready_latency_bucket{store="1",le="110591.0"} 8941.0 +raft_process_handleready_latency_bucket{store="1",le="114687.0"} 8943.0 +raft_process_handleready_latency_bucket{store="1",le="118783.0"} 8947.0 +raft_process_handleready_latency_bucket{store="1",le="122879.0"} 8948.0 +raft_process_handleready_latency_bucket{store="1",le="126975.0"} 8951.0 +raft_process_handleready_latency_bucket{store="1",le="131071.0"} 8952.0 +raft_process_handleready_latency_bucket{store="1",le="139263.0"} 8954.0 +raft_process_handleready_latency_bucket{store="1",le="147455.0"} 8959.0 +raft_process_handleready_latency_bucket{store="1",le="155647.0"} 8961.0 +raft_process_handleready_latency_bucket{store="1",le="163839.0"} 8962.0 +raft_process_handleready_latency_bucket{store="1",le="172031.0"} 8964.0 +raft_process_handleready_latency_bucket{store="1",le="188415.0"} 8965.0 +raft_process_handleready_latency_bucket{store="1",le="196607.0"} 8968.0 +raft_process_handleready_latency_bucket{store="1",le="204799.0"} 8969.0 +raft_process_handleready_latency_bucket{store="1",le="221183.0"} 8971.0 +raft_process_handleready_latency_bucket{store="1",le="237567.0"} 8972.0 +raft_process_handleready_latency_bucket{store="1",le="245759.0"} 8973.0 +raft_process_handleready_latency_bucket{store="1",le="253951.0"} 8974.0 +raft_process_handleready_latency_bucket{store="1",le="294911.0"} 8975.0 +raft_process_handleready_latency_bucket{store="1",le="311295.0"} 8976.0 +raft_process_handleready_latency_bucket{store="1",le="327679.0"} 8981.0 +raft_process_handleready_latency_bucket{store="1",le="344063.0"} 8984.0 +raft_process_handleready_latency_bucket{store="1",le="360447.0"} 8989.0 +raft_process_handleready_latency_bucket{store="1",le="376831.0"} 8998.0 +raft_process_handleready_latency_bucket{store="1",le="393215.0"} 9013.0 +raft_process_handleready_latency_bucket{store="1",le="409599.0"} 9040.0 +raft_process_handleready_latency_bucket{store="1",le="425983.0"} 9074.0 +raft_process_handleready_latency_bucket{store="1",le="442367.0"} 9111.0 +raft_process_handleready_latency_bucket{store="1",le="458751.0"} 9167.0 +raft_process_handleready_latency_bucket{store="1",le="475135.0"} 9254.0 +raft_process_handleready_latency_bucket{store="1",le="491519.0"} 9336.0 +raft_process_handleready_latency_bucket{store="1",le="507903.0"} 9426.0 +raft_process_handleready_latency_bucket{store="1",le="524287.0"} 9528.0 +raft_process_handleready_latency_bucket{store="1",le="557055.0"} 9797.0 +raft_process_handleready_latency_bucket{store="1",le="589823.0"} 10152.0 +raft_process_handleready_latency_bucket{store="1",le="622591.0"} 10535.0 +raft_process_handleready_latency_bucket{store="1",le="655359.0"} 11015.0 +raft_process_handleready_latency_bucket{store="1",le="688127.0"} 11550.0 +raft_process_handleready_latency_bucket{store="1",le="720895.0"} 12107.0 +raft_process_handleready_latency_bucket{store="1",le="753663.0"} 12736.0 +raft_process_handleready_latency_bucket{store="1",le="786431.0"} 13366.0 +raft_process_handleready_latency_bucket{store="1",le="819199.0"} 14043.0 +raft_process_handleready_latency_bucket{store="1",le="851967.0"} 14742.0 +raft_process_handleready_latency_bucket{store="1",le="884735.0"} 15425.0 +raft_process_handleready_latency_bucket{store="1",le="917503.0"} 16120.0 +raft_process_handleready_latency_bucket{store="1",le="950271.0"} 16774.0 +raft_process_handleready_latency_bucket{store="1",le="983039.0"} 17410.0 +raft_process_handleready_latency_bucket{store="1",le="1.015807e+06"} 18030.0 +raft_process_handleready_latency_bucket{store="1",le="1.048575e+06"} 18574.0 +raft_process_handleready_latency_bucket{store="1",le="1.114111e+06"} 19559.0 +raft_process_handleready_latency_bucket{store="1",le="1.179647e+06"} 20407.0 +raft_process_handleready_latency_bucket{store="1",le="1.245183e+06"} 21059.0 +raft_process_handleready_latency_bucket{store="1",le="1.310719e+06"} 21649.0 +raft_process_handleready_latency_bucket{store="1",le="1.376255e+06"} 22120.0 +raft_process_handleready_latency_bucket{store="1",le="1.441791e+06"} 22513.0 +raft_process_handleready_latency_bucket{store="1",le="1.507327e+06"} 22863.0 +raft_process_handleready_latency_bucket{store="1",le="1.572863e+06"} 23168.0 +raft_process_handleready_latency_bucket{store="1",le="1.638399e+06"} 23475.0 +raft_process_handleready_latency_bucket{store="1",le="1.703935e+06"} 23751.0 +raft_process_handleready_latency_bucket{store="1",le="1.769471e+06"} 24004.0 +raft_process_handleready_latency_bucket{store="1",le="1.835007e+06"} 24246.0 +raft_process_handleready_latency_bucket{store="1",le="1.900543e+06"} 24494.0 +raft_process_handleready_latency_bucket{store="1",le="1.966079e+06"} 24695.0 +raft_process_handleready_latency_bucket{store="1",le="2.031615e+06"} 24883.0 +raft_process_handleready_latency_bucket{store="1",le="2.097151e+06"} 25036.0 +raft_process_handleready_latency_bucket{store="1",le="2.228223e+06"} 25278.0 +raft_process_handleready_latency_bucket{store="1",le="2.359295e+06"} 25461.0 +raft_process_handleready_latency_bucket{store="1",le="2.490367e+06"} 25606.0 +raft_process_handleready_latency_bucket{store="1",le="2.621439e+06"} 25691.0 +raft_process_handleready_latency_bucket{store="1",le="2.752511e+06"} 25765.0 +raft_process_handleready_latency_bucket{store="1",le="2.883583e+06"} 25826.0 +raft_process_handleready_latency_bucket{store="1",le="3.014655e+06"} 25873.0 +raft_process_handleready_latency_bucket{store="1",le="3.145727e+06"} 25909.0 +raft_process_handleready_latency_bucket{store="1",le="3.276799e+06"} 25943.0 +raft_process_handleready_latency_bucket{store="1",le="3.407871e+06"} 25964.0 +raft_process_handleready_latency_bucket{store="1",le="3.538943e+06"} 25992.0 +raft_process_handleready_latency_bucket{store="1",le="3.670015e+06"} 26012.0 +raft_process_handleready_latency_bucket{store="1",le="3.801087e+06"} 26027.0 +raft_process_handleready_latency_bucket{store="1",le="3.932159e+06"} 26042.0 +raft_process_handleready_latency_bucket{store="1",le="4.063231e+06"} 26052.0 +raft_process_handleready_latency_bucket{store="1",le="4.194303e+06"} 26057.0 +raft_process_handleready_latency_bucket{store="1",le="4.456447e+06"} 26062.0 +raft_process_handleready_latency_bucket{store="1",le="4.718591e+06"} 26073.0 +raft_process_handleready_latency_bucket{store="1",le="4.980735e+06"} 26081.0 +raft_process_handleready_latency_bucket{store="1",le="5.242879e+06"} 26090.0 +raft_process_handleready_latency_bucket{store="1",le="5.505023e+06"} 26097.0 +raft_process_handleready_latency_bucket{store="1",le="5.767167e+06"} 26105.0 +raft_process_handleready_latency_bucket{store="1",le="6.029311e+06"} 26107.0 +raft_process_handleready_latency_bucket{store="1",le="6.291455e+06"} 26111.0 +raft_process_handleready_latency_bucket{store="1",le="6.553599e+06"} 26114.0 +raft_process_handleready_latency_bucket{store="1",le="6.815743e+06"} 26115.0 +raft_process_handleready_latency_bucket{store="1",le="7.077887e+06"} 26118.0 +raft_process_handleready_latency_bucket{store="1",le="7.340031e+06"} 26119.0 +raft_process_handleready_latency_bucket{store="1",le="7.602175e+06"} 26121.0 +raft_process_handleready_latency_bucket{store="1",le="7.864319e+06"} 26122.0 +raft_process_handleready_latency_bucket{store="1",le="8.126463e+06"} 26124.0 +raft_process_handleready_latency_bucket{store="1",le="8.388607e+06"} 26127.0 +raft_process_handleready_latency_bucket{store="1",le="9.437183e+06"} 26133.0 +raft_process_handleready_latency_bucket{store="1",le="9.961471e+06"} 26134.0 +raft_process_handleready_latency_bucket{store="1",le="1.0485759e+07"} 26135.0 +raft_process_handleready_latency_bucket{store="1",le="1.1010047e+07"} 26136.0 +raft_process_handleready_latency_bucket{store="1",le="1.2058623e+07"} 26137.0 +raft_process_handleready_latency_bucket{store="1",le="1.2582911e+07"} 26138.0 +raft_process_handleready_latency_bucket{store="1",le="1.3631487e+07"} 26139.0 +raft_process_handleready_latency_bucket{store="1",le="2.5165823e+07"} 26140.0 +raft_process_handleready_latency_bucket{store="1",le="3.1457279e+07"} 26141.0 +raft_process_handleready_latency_bucket{store="1",le="3.7748735e+07"} 26142.0 +raft_process_handleready_latency_bucket{store="1",le="4.1943039e+07"} 26143.0 +raft_process_handleready_latency_bucket{store="1",le="4.8234495e+07"} 26144.0 +raft_process_handleready_latency_bucket{store="1",le="9.05969663e+08"} 26145.0 +raft_process_handleready_latency_bucket{store="1",le="9.73078527e+08"} 26146.0 +raft_process_handleready_latency_bucket{store="1",le="1.006632959e+09"} 26147.0 +raft_process_handleready_latency_bucket{store="1",le="1.040187391e+09"} 26148.0 +raft_process_handleready_latency_bucket{store="1",le="1.0200547327e+10"} 26149.0 +raft_process_handleready_latency_bucket{store="1",le="+Inf"} 26149.0 +raft_process_handleready_latency_sum{store="1"} 3.4720430875e+10 +raft_process_handleready_latency_count{store="1"} 26149.0 +# HELP txn_parallelcommits Number of KV transaction parallel commit attempts +# TYPE txn_parallelcommits counter +txn_parallelcommits 517.0 +# HELP txn_restarts_readwithinuncertainty Number of restarts due to reading a new value within the uncertainty interval +# TYPE txn_restarts_readwithinuncertainty counter +txn_restarts_readwithinuncertainty 0.0 +# HELP sys_host_net_send_packets Packets sent on all network interfaces since this process started +# TYPE sys_host_net_send_packets gauge +sys_host_net_send_packets 644128.0 +# HELP queue_merge_processingnanos Nanoseconds spent processing replicas in the merge queue +# TYPE queue_merge_processingnanos counter +queue_merge_processingnanos{store="1"} 0.0 +# HELP queue_raftlog_pending Number of pending replicas in the Raft log queue +# TYPE queue_raftlog_pending gauge +queue_raftlog_pending{store="1"} 0.0 +# HELP queue_split_processingnanos Nanoseconds spent processing replicas in the split queue +# TYPE queue_split_processingnanos counter +queue_split_processingnanos{store="1"} 0.0 +# HELP txnrecovery_attempts_total Number of transaction recovery attempts executed +# TYPE txnrecovery_attempts_total counter +txnrecovery_attempts_total{store="1"} 0.0 +# HELP gossip_connections_outgoing Number of active outgoing gossip connections +# TYPE gossip_connections_outgoing gauge +gossip_connections_outgoing 2.0 +# HELP sql_mem_sql_max Memory usage per sql statement for sql +# TYPE sql_mem_sql_max histogram +sql_mem_sql_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_max_sum 0.0 +sql_mem_sql_max_count 0.0 +sql_mem_sql_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_max_sum 0.0 +sql_mem_sql_max_count 0.0 +# HELP intents_resolve_attempts Count of (point or range) intent commit evaluation attempts +# TYPE intents_resolve_attempts counter +intents_resolve_attempts{store="1"} 4.0 +# HELP raft_rcvd_snap Number of MsgSnap messages received by this store +# TYPE raft_rcvd_snap counter +raft_rcvd_snap{store="1"} 0.0 +# HELP queue_raftlog_process_failure Number of replicas which failed processing in the Raft log queue +# TYPE queue_raftlog_process_failure counter +queue_raftlog_process_failure{store="1"} 0.0 +# HELP queue_gc_info_resolvetotal Number of attempted intent resolutions +# TYPE queue_gc_info_resolvetotal counter +queue_gc_info_resolvetotal{store="1"} 0.0 +# HELP sys_gc_pause_percent Current GC pause percentage +# TYPE sys_gc_pause_percent gauge +sys_gc_pause_percent 2.582156232137188e-06 +# HELP exec_error Number of batch KV requests that failed to execute on this node +# TYPE exec_error counter +exec_error 18.0 +# HELP rocksdb_read_amplification Number of disk reads per query +# TYPE rocksdb_read_amplification gauge +rocksdb_read_amplification{store="1"} 1.0 +# HELP raft_rcvd_timeoutnow Number of MsgTimeoutNow messages received by this store +# TYPE raft_rcvd_timeoutnow counter +raft_rcvd_timeoutnow{store="1"} 2.0 +# HELP queue_raftsnapshot_processingnanos Nanoseconds spent processing replicas in the Raft repair queue +# TYPE queue_raftsnapshot_processingnanos counter +queue_raftsnapshot_processingnanos{store="1"} 0.0 +# HELP queue_replicagc_process_success Number of replicas successfully processed by the replica GC queue +# TYPE queue_replicagc_process_success counter +queue_replicagc_process_success{store="1"} 9.0 +# HELP sql_mem_internal_session_current Current sql session memory usage for internal +# TYPE sql_mem_internal_session_current gauge +sql_mem_internal_session_current 0.0 +# HELP distsender_errors_notleaseholder Number of NotLeaseHolderErrors encountered +# TYPE distsender_errors_notleaseholder counter +distsender_errors_notleaseholder 15.0 +# HELP timeseries_write_errors Total errors encountered while attempting to write metrics to disk +# TYPE timeseries_write_errors counter +timeseries_write_errors 0.0 +# HELP sys_cgocalls Total number of cgo calls +# TYPE sys_cgocalls gauge +sys_cgocalls 577778.0 +# HELP exec_latency Latency of batch KV requests executed on this node +# TYPE exec_latency histogram +exec_latency_bucket{le="32767.0"} 1.0 +exec_latency_bucket{le="38911.0"} 2.0 +exec_latency_bucket{le="40959.0"} 5.0 +exec_latency_bucket{le="43007.0"} 7.0 +exec_latency_bucket{le="45055.0"} 8.0 +exec_latency_bucket{le="47103.0"} 11.0 +exec_latency_bucket{le="49151.0"} 14.0 +exec_latency_bucket{le="51199.0"} 18.0 +exec_latency_bucket{le="53247.0"} 19.0 +exec_latency_bucket{le="55295.0"} 20.0 +exec_latency_bucket{le="57343.0"} 23.0 +exec_latency_bucket{le="59391.0"} 26.0 +exec_latency_bucket{le="63487.0"} 32.0 +exec_latency_bucket{le="65535.0"} 35.0 +exec_latency_bucket{le="69631.0"} 43.0 +exec_latency_bucket{le="73727.0"} 60.0 +exec_latency_bucket{le="77823.0"} 80.0 +exec_latency_bucket{le="81919.0"} 102.0 +exec_latency_bucket{le="86015.0"} 118.0 +exec_latency_bucket{le="90111.0"} 147.0 +exec_latency_bucket{le="94207.0"} 170.0 +exec_latency_bucket{le="98303.0"} 199.0 +exec_latency_bucket{le="102399.0"} 227.0 +exec_latency_bucket{le="106495.0"} 255.0 +exec_latency_bucket{le="110591.0"} 289.0 +exec_latency_bucket{le="114687.0"} 327.0 +exec_latency_bucket{le="118783.0"} 369.0 +exec_latency_bucket{le="122879.0"} 412.0 +exec_latency_bucket{le="126975.0"} 460.0 +exec_latency_bucket{le="131071.0"} 497.0 +exec_latency_bucket{le="139263.0"} 562.0 +exec_latency_bucket{le="147455.0"} 633.0 +exec_latency_bucket{le="155647.0"} 700.0 +exec_latency_bucket{le="163839.0"} 792.0 +exec_latency_bucket{le="172031.0"} 862.0 +exec_latency_bucket{le="180223.0"} 948.0 +exec_latency_bucket{le="188415.0"} 1021.0 +exec_latency_bucket{le="196607.0"} 1065.0 +exec_latency_bucket{le="204799.0"} 1110.0 +exec_latency_bucket{le="212991.0"} 1148.0 +exec_latency_bucket{le="221183.0"} 1186.0 +exec_latency_bucket{le="229375.0"} 1227.0 +exec_latency_bucket{le="237567.0"} 1250.0 +exec_latency_bucket{le="245759.0"} 1280.0 +exec_latency_bucket{le="253951.0"} 1311.0 +exec_latency_bucket{le="262143.0"} 1333.0 +exec_latency_bucket{le="278527.0"} 1366.0 +exec_latency_bucket{le="294911.0"} 1396.0 +exec_latency_bucket{le="311295.0"} 1416.0 +exec_latency_bucket{le="327679.0"} 1439.0 +exec_latency_bucket{le="344063.0"} 1457.0 +exec_latency_bucket{le="360447.0"} 1473.0 +exec_latency_bucket{le="376831.0"} 1483.0 +exec_latency_bucket{le="393215.0"} 1493.0 +exec_latency_bucket{le="409599.0"} 1503.0 +exec_latency_bucket{le="425983.0"} 1514.0 +exec_latency_bucket{le="442367.0"} 1520.0 +exec_latency_bucket{le="458751.0"} 1526.0 +exec_latency_bucket{le="475135.0"} 1533.0 +exec_latency_bucket{le="491519.0"} 1538.0 +exec_latency_bucket{le="507903.0"} 1542.0 +exec_latency_bucket{le="524287.0"} 1549.0 +exec_latency_bucket{le="557055.0"} 1556.0 +exec_latency_bucket{le="589823.0"} 1564.0 +exec_latency_bucket{le="622591.0"} 1568.0 +exec_latency_bucket{le="655359.0"} 1575.0 +exec_latency_bucket{le="688127.0"} 1578.0 +exec_latency_bucket{le="720895.0"} 1583.0 +exec_latency_bucket{le="753663.0"} 1589.0 +exec_latency_bucket{le="786431.0"} 1597.0 +exec_latency_bucket{le="819199.0"} 1599.0 +exec_latency_bucket{le="851967.0"} 1602.0 +exec_latency_bucket{le="884735.0"} 1606.0 +exec_latency_bucket{le="917503.0"} 1608.0 +exec_latency_bucket{le="950271.0"} 1609.0 +exec_latency_bucket{le="983039.0"} 1611.0 +exec_latency_bucket{le="1.015807e+06"} 1612.0 +exec_latency_bucket{le="1.048575e+06"} 1617.0 +exec_latency_bucket{le="1.114111e+06"} 1621.0 +exec_latency_bucket{le="1.179647e+06"} 1623.0 +exec_latency_bucket{le="1.245183e+06"} 1626.0 +exec_latency_bucket{le="1.310719e+06"} 1629.0 +exec_latency_bucket{le="1.376255e+06"} 1634.0 +exec_latency_bucket{le="1.441791e+06"} 1637.0 +exec_latency_bucket{le="1.507327e+06"} 1642.0 +exec_latency_bucket{le="1.572863e+06"} 1649.0 +exec_latency_bucket{le="1.638399e+06"} 1653.0 +exec_latency_bucket{le="1.703935e+06"} 1661.0 +exec_latency_bucket{le="1.769471e+06"} 1675.0 +exec_latency_bucket{le="1.835007e+06"} 1694.0 +exec_latency_bucket{le="1.900543e+06"} 1727.0 +exec_latency_bucket{le="1.966079e+06"} 1761.0 +exec_latency_bucket{le="2.031615e+06"} 1816.0 +exec_latency_bucket{le="2.097151e+06"} 1897.0 +exec_latency_bucket{le="2.228223e+06"} 2127.0 +exec_latency_bucket{le="2.359295e+06"} 2463.0 +exec_latency_bucket{le="2.490367e+06"} 2938.0 +exec_latency_bucket{le="2.621439e+06"} 3489.0 +exec_latency_bucket{le="2.752511e+06"} 4097.0 +exec_latency_bucket{le="2.883583e+06"} 4692.0 +exec_latency_bucket{le="3.014655e+06"} 5308.0 +exec_latency_bucket{le="3.145727e+06"} 5929.0 +exec_latency_bucket{le="3.276799e+06"} 6485.0 +exec_latency_bucket{le="3.407871e+06"} 6942.0 +exec_latency_bucket{le="3.538943e+06"} 7392.0 +exec_latency_bucket{le="3.670015e+06"} 7782.0 +exec_latency_bucket{le="3.801087e+06"} 8065.0 +exec_latency_bucket{le="3.932159e+06"} 8301.0 +exec_latency_bucket{le="4.063231e+06"} 8508.0 +exec_latency_bucket{le="4.194303e+06"} 8676.0 +exec_latency_bucket{le="4.456447e+06"} 8957.0 +exec_latency_bucket{le="4.718591e+06"} 9130.0 +exec_latency_bucket{le="4.980735e+06"} 9277.0 +exec_latency_bucket{le="5.242879e+06"} 9399.0 +exec_latency_bucket{le="5.505023e+06"} 9498.0 +exec_latency_bucket{le="5.767167e+06"} 9586.0 +exec_latency_bucket{le="6.029311e+06"} 9659.0 +exec_latency_bucket{le="6.291455e+06"} 9740.0 +exec_latency_bucket{le="6.553599e+06"} 9795.0 +exec_latency_bucket{le="6.815743e+06"} 9847.0 +exec_latency_bucket{le="7.077887e+06"} 9887.0 +exec_latency_bucket{le="7.340031e+06"} 9907.0 +exec_latency_bucket{le="7.602175e+06"} 9932.0 +exec_latency_bucket{le="7.864319e+06"} 9952.0 +exec_latency_bucket{le="8.126463e+06"} 9967.0 +exec_latency_bucket{le="8.388607e+06"} 9981.0 +exec_latency_bucket{le="8.912895e+06"} 10005.0 +exec_latency_bucket{le="9.437183e+06"} 10017.0 +exec_latency_bucket{le="9.961471e+06"} 10031.0 +exec_latency_bucket{le="1.0485759e+07"} 10035.0 +exec_latency_bucket{le="1.1010047e+07"} 10044.0 +exec_latency_bucket{le="1.1534335e+07"} 10050.0 +exec_latency_bucket{le="1.2058623e+07"} 10056.0 +exec_latency_bucket{le="1.2582911e+07"} 10058.0 +exec_latency_bucket{le="1.3107199e+07"} 10061.0 +exec_latency_bucket{le="1.3631487e+07"} 10065.0 +exec_latency_bucket{le="1.4155775e+07"} 10067.0 +exec_latency_bucket{le="1.5204351e+07"} 10068.0 +exec_latency_bucket{le="1.6252927e+07"} 10069.0 +exec_latency_bucket{le="1.7825791e+07"} 10070.0 +exec_latency_bucket{le="1.8874367e+07"} 10073.0 +exec_latency_bucket{le="1.9922943e+07"} 10074.0 +exec_latency_bucket{le="2.0971519e+07"} 10076.0 +exec_latency_bucket{le="2.8311551e+07"} 10077.0 +exec_latency_bucket{le="3.7748735e+07"} 10078.0 +exec_latency_bucket{le="3.9845887e+07"} 10079.0 +exec_latency_bucket{le="4.4040191e+07"} 10080.0 +exec_latency_bucket{le="1.04857599e+08"} 10081.0 +exec_latency_bucket{le="1.09051903e+08"} 10082.0 +exec_latency_bucket{le="4.19430399e+08"} 10083.0 +exec_latency_bucket{le="7.38197503e+08"} 10084.0 +exec_latency_bucket{le="8.38860799e+08"} 10085.0 +exec_latency_bucket{le="9.05969663e+08"} 10086.0 +exec_latency_bucket{le="9.73078527e+08"} 10087.0 +exec_latency_bucket{le="1.040187391e+09"} 10089.0 +exec_latency_bucket{le="1.342177279e+09"} 10090.0 +exec_latency_bucket{le="3.087007743e+09"} 10091.0 +exec_latency_bucket{le="1.0200547327e+10"} 10092.0 +exec_latency_bucket{le="+Inf"} 10092.0 +exec_latency_sum 5.1143858324e+10 +exec_latency_count 10092.0 +# HELP changefeed_flush_nanos Total time spent flushing all feeds +# TYPE changefeed_flush_nanos counter +changefeed_flush_nanos 0.0 +# HELP sql_restart_savepoint_release_started_count Number of `RELEASE SAVEPOINT cockroach_restart` statements started +# TYPE sql_restart_savepoint_release_started_count counter +sql_restart_savepoint_release_started_count 0.0 +# HELP sql_select_started_count_internal Number of SQL SELECT statements started (internal queries) +# TYPE sql_select_started_count_internal counter +sql_select_started_count_internal 1607.0 +# HELP gossip_bytes_sent Number of sent gossip bytes +# TYPE gossip_bytes_sent counter +gossip_bytes_sent 4462.0 +# HELP sql_txn_commit_count_internal Number of SQL transaction COMMIT statements successfully executed (internal queries) +# TYPE sql_txn_commit_count_internal counter +sql_txn_commit_count_internal 0.0 +# HELP leases_transfers_success Number of successful lease transfers +# TYPE leases_transfers_success counter +leases_transfers_success{store="1"} 0.0 +# HELP compactor_suggestionbytes_skipped Number of logical bytes in suggested compactions which were not compacted +# TYPE compactor_suggestionbytes_skipped counter +compactor_suggestionbytes_skipped{store="1"} 0.0 +# HELP sql_savepoint_started_count_internal Number of SQL SAVEPOINT statements started (internal queries) +# TYPE sql_savepoint_started_count_internal counter +sql_savepoint_started_count_internal 0.0 +# HELP sql_mem_admin_txn_current Current sql transaction memory usage for admin +# TYPE sql_mem_admin_txn_current gauge +sql_mem_admin_txn_current 0.0 +# HELP sql_optimizer_count Number of statements which ran with the cost-based optimizer +# TYPE sql_optimizer_count counter +sql_optimizer_count 0.0 +# HELP sql_restart_savepoint_started_count_internal Number of `SAVEPOINT cockroach_restart` statements started (internal queries) +# TYPE sql_restart_savepoint_started_count_internal counter +sql_restart_savepoint_started_count_internal 0.0 +# HELP sql_restart_savepoint_rollback_count_internal Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed (internal queries) +# TYPE sql_restart_savepoint_rollback_count_internal counter +sql_restart_savepoint_rollback_count_internal 0.0 +# HELP txn_restarts_possiblereplay Number of restarts due to possible replays of command batches at the storage layer +# TYPE txn_restarts_possiblereplay counter +txn_restarts_possiblereplay 0.0 +# HELP kv_closed_timestamp_max_behind_nanos Largest latency between realtime and replica max closed timestamp +# TYPE kv_closed_timestamp_max_behind_nanos gauge +kv_closed_timestamp_max_behind_nanos{store="1"} 1.2220867530922e+13 +# HELP queue_merge_purgatory Number of replicas in the merge queue's purgatory, waiting to become mergeable +# TYPE queue_merge_purgatory gauge +queue_merge_purgatory{store="1"} 0.0 +# HELP sys_cpu_user_ns Total user cpu time +# TYPE sys_cpu_user_ns gauge +sys_cpu_user_ns 2.2762e+11 +# HELP sql_service_latency Latency of SQL request execution +# TYPE sql_service_latency histogram +sql_service_latency_bucket{le="+Inf"} 0.0 +sql_service_latency_sum 0.0 +sql_service_latency_count 0.0 +# HELP raft_process_tickingnanos Nanoseconds spent in store.processRaft() processing replica.Tick() +# TYPE raft_process_tickingnanos counter +raft_process_tickingnanos{store="1"} 5.15943156e+08 +# HELP queue_raftsnapshot_process_failure Number of replicas which failed processing in the Raft repair queue +# TYPE queue_raftsnapshot_process_failure counter +queue_raftsnapshot_process_failure{store="1"} 0.0 +# HELP kv_rangefeed_catchup_scan_nanos Time spent in RangeFeed catchup scan +# TYPE kv_rangefeed_catchup_scan_nanos counter +kv_rangefeed_catchup_scan_nanos{store="1"} 0.0 +# HELP txn_commits1PC Number of KV transaction on-phase commit attempts +# TYPE txn_commits1PC counter +txn_commits1PC 3206.0 +# HELP sql_mem_conns_current Current sql statement memory usage for conns +# TYPE sql_mem_conns_current gauge +sql_mem_conns_current 0.0 +# HELP sql_txn_rollback_count Number of SQL transaction ROLLBACK statements successfully executed +# TYPE sql_txn_rollback_count counter +sql_txn_rollback_count 0.0 +# HELP sql_query_count_internal Number of SQL queries executed (internal queries) +# TYPE sql_query_count_internal counter +sql_query_count_internal 2650.0 +# HELP sql_exec_latency_internal Latency of SQL statement execution (internal queries) +# TYPE sql_exec_latency_internal histogram +sql_exec_latency_internal_bucket{le="139263.0"} 1.0 +sql_exec_latency_internal_bucket{le="147455.0"} 3.0 +sql_exec_latency_internal_bucket{le="163839.0"} 4.0 +sql_exec_latency_internal_bucket{le="172031.0"} 8.0 +sql_exec_latency_internal_bucket{le="188415.0"} 9.0 +sql_exec_latency_internal_bucket{le="204799.0"} 10.0 +sql_exec_latency_internal_bucket{le="212991.0"} 12.0 +sql_exec_latency_internal_bucket{le="237567.0"} 13.0 +sql_exec_latency_internal_bucket{le="245759.0"} 17.0 +sql_exec_latency_internal_bucket{le="253951.0"} 19.0 +sql_exec_latency_internal_bucket{le="262143.0"} 24.0 +sql_exec_latency_internal_bucket{le="278527.0"} 40.0 +sql_exec_latency_internal_bucket{le="294911.0"} 71.0 +sql_exec_latency_internal_bucket{le="311295.0"} 86.0 +sql_exec_latency_internal_bucket{le="327679.0"} 107.0 +sql_exec_latency_internal_bucket{le="344063.0"} 138.0 +sql_exec_latency_internal_bucket{le="360447.0"} 169.0 +sql_exec_latency_internal_bucket{le="376831.0"} 202.0 +sql_exec_latency_internal_bucket{le="393215.0"} 244.0 +sql_exec_latency_internal_bucket{le="409599.0"} 272.0 +sql_exec_latency_internal_bucket{le="425983.0"} 289.0 +sql_exec_latency_internal_bucket{le="442367.0"} 312.0 +sql_exec_latency_internal_bucket{le="458751.0"} 330.0 +sql_exec_latency_internal_bucket{le="475135.0"} 345.0 +sql_exec_latency_internal_bucket{le="491519.0"} 356.0 +sql_exec_latency_internal_bucket{le="507903.0"} 367.0 +sql_exec_latency_internal_bucket{le="524287.0"} 374.0 +sql_exec_latency_internal_bucket{le="557055.0"} 385.0 +sql_exec_latency_internal_bucket{le="589823.0"} 399.0 +sql_exec_latency_internal_bucket{le="622591.0"} 409.0 +sql_exec_latency_internal_bucket{le="655359.0"} 416.0 +sql_exec_latency_internal_bucket{le="688127.0"} 424.0 +sql_exec_latency_internal_bucket{le="720895.0"} 431.0 +sql_exec_latency_internal_bucket{le="753663.0"} 437.0 +sql_exec_latency_internal_bucket{le="786431.0"} 442.0 +sql_exec_latency_internal_bucket{le="819199.0"} 446.0 +sql_exec_latency_internal_bucket{le="851967.0"} 447.0 +sql_exec_latency_internal_bucket{le="884735.0"} 455.0 +sql_exec_latency_internal_bucket{le="917503.0"} 459.0 +sql_exec_latency_internal_bucket{le="950271.0"} 463.0 +sql_exec_latency_internal_bucket{le="983039.0"} 470.0 +sql_exec_latency_internal_bucket{le="1.015807e+06"} 472.0 +sql_exec_latency_internal_bucket{le="1.048575e+06"} 479.0 +sql_exec_latency_internal_bucket{le="1.114111e+06"} 491.0 +sql_exec_latency_internal_bucket{le="1.179647e+06"} 502.0 +sql_exec_latency_internal_bucket{le="1.245183e+06"} 518.0 +sql_exec_latency_internal_bucket{le="1.310719e+06"} 532.0 +sql_exec_latency_internal_bucket{le="1.376255e+06"} 551.0 +sql_exec_latency_internal_bucket{le="1.441791e+06"} 567.0 +sql_exec_latency_internal_bucket{le="1.507327e+06"} 583.0 +sql_exec_latency_internal_bucket{le="1.572863e+06"} 598.0 +sql_exec_latency_internal_bucket{le="1.638399e+06"} 617.0 +sql_exec_latency_internal_bucket{le="1.703935e+06"} 634.0 +sql_exec_latency_internal_bucket{le="1.769471e+06"} 659.0 +sql_exec_latency_internal_bucket{le="1.835007e+06"} 676.0 +sql_exec_latency_internal_bucket{le="1.900543e+06"} 714.0 +sql_exec_latency_internal_bucket{le="1.966079e+06"} 754.0 +sql_exec_latency_internal_bucket{le="2.031615e+06"} 791.0 +sql_exec_latency_internal_bucket{le="2.097151e+06"} 840.0 +sql_exec_latency_internal_bucket{le="2.228223e+06"} 937.0 +sql_exec_latency_internal_bucket{le="2.359295e+06"} 1046.0 +sql_exec_latency_internal_bucket{le="2.490367e+06"} 1154.0 +sql_exec_latency_internal_bucket{le="2.621439e+06"} 1254.0 +sql_exec_latency_internal_bucket{le="2.752511e+06"} 1357.0 +sql_exec_latency_internal_bucket{le="2.883583e+06"} 1444.0 +sql_exec_latency_internal_bucket{le="3.014655e+06"} 1534.0 +sql_exec_latency_internal_bucket{le="3.145727e+06"} 1609.0 +sql_exec_latency_internal_bucket{le="3.276799e+06"} 1675.0 +sql_exec_latency_internal_bucket{le="3.407871e+06"} 1738.0 +sql_exec_latency_internal_bucket{le="3.538943e+06"} 1793.0 +sql_exec_latency_internal_bucket{le="3.670015e+06"} 1847.0 +sql_exec_latency_internal_bucket{le="3.801087e+06"} 1896.0 +sql_exec_latency_internal_bucket{le="3.932159e+06"} 1952.0 +sql_exec_latency_internal_bucket{le="4.063231e+06"} 1994.0 +sql_exec_latency_internal_bucket{le="4.194303e+06"} 2040.0 +sql_exec_latency_internal_bucket{le="4.456447e+06"} 2136.0 +sql_exec_latency_internal_bucket{le="4.718591e+06"} 2208.0 +sql_exec_latency_internal_bucket{le="4.980735e+06"} 2261.0 +sql_exec_latency_internal_bucket{le="5.242879e+06"} 2326.0 +sql_exec_latency_internal_bucket{le="5.505023e+06"} 2363.0 +sql_exec_latency_internal_bucket{le="5.767167e+06"} 2389.0 +sql_exec_latency_internal_bucket{le="6.029311e+06"} 2424.0 +sql_exec_latency_internal_bucket{le="6.291455e+06"} 2450.0 +sql_exec_latency_internal_bucket{le="6.553599e+06"} 2481.0 +sql_exec_latency_internal_bucket{le="6.815743e+06"} 2508.0 +sql_exec_latency_internal_bucket{le="7.077887e+06"} 2540.0 +sql_exec_latency_internal_bucket{le="7.340031e+06"} 2549.0 +sql_exec_latency_internal_bucket{le="7.602175e+06"} 2562.0 +sql_exec_latency_internal_bucket{le="7.864319e+06"} 2572.0 +sql_exec_latency_internal_bucket{le="8.126463e+06"} 2577.0 +sql_exec_latency_internal_bucket{le="8.388607e+06"} 2582.0 +sql_exec_latency_internal_bucket{le="8.912895e+06"} 2596.0 +sql_exec_latency_internal_bucket{le="9.437183e+06"} 2608.0 +sql_exec_latency_internal_bucket{le="9.961471e+06"} 2616.0 +sql_exec_latency_internal_bucket{le="1.0485759e+07"} 2621.0 +sql_exec_latency_internal_bucket{le="1.1010047e+07"} 2625.0 +sql_exec_latency_internal_bucket{le="1.1534335e+07"} 2629.0 +sql_exec_latency_internal_bucket{le="1.2058623e+07"} 2630.0 +sql_exec_latency_internal_bucket{le="1.2582911e+07"} 2634.0 +sql_exec_latency_internal_bucket{le="1.4155775e+07"} 2635.0 +sql_exec_latency_internal_bucket{le="1.4680063e+07"} 2638.0 +sql_exec_latency_internal_bucket{le="1.6777215e+07"} 2639.0 +sql_exec_latency_internal_bucket{le="1.7825791e+07"} 2640.0 +sql_exec_latency_internal_bucket{le="1.8874367e+07"} 2642.0 +sql_exec_latency_internal_bucket{le="2.2020095e+07"} 2644.0 +sql_exec_latency_internal_bucket{le="2.3068671e+07"} 2645.0 +sql_exec_latency_internal_bucket{le="2.5165823e+07"} 2646.0 +sql_exec_latency_internal_bucket{le="2.9360127e+07"} 2647.0 +sql_exec_latency_internal_bucket{le="3.5651583e+07"} 2648.0 +sql_exec_latency_internal_bucket{le="4.1943039e+07"} 2649.0 +sql_exec_latency_internal_bucket{le="4.8234495e+07"} 2650.0 +sql_exec_latency_internal_bucket{le="1.25829119e+08"} 2651.0 +sql_exec_latency_internal_bucket{le="1.30023423e+08"} 2652.0 +sql_exec_latency_internal_bucket{le="2.18103807e+08"} 2653.0 +sql_exec_latency_internal_bucket{le="2.26492415e+08"} 2654.0 +sql_exec_latency_internal_bucket{le="5.20093695e+08"} 2655.0 +sql_exec_latency_internal_bucket{le="1.0200547327e+10"} 2656.0 +sql_exec_latency_internal_bucket{le="+Inf"} 2656.0 +sql_exec_latency_internal_sum 1.9847050656e+10 +sql_exec_latency_internal_count 2656.0 +# HELP rebalancing_queriespersecond Number of kv-level requests received per second by the store, averaged over a large time period as used in rebalancing decisions +# TYPE rebalancing_queriespersecond gauge +rebalancing_queriespersecond{store="1"} 0.8014446777604269 +# HELP raft_process_applycommitted_latency Latency histogram for applying all committed Raft commands in a Raft ready +# TYPE raft_process_applycommitted_latency histogram +raft_process_applycommitted_latency_bucket{store="1",le="59.0"} 4.0 +raft_process_applycommitted_latency_bucket{store="1",le="61.0"} 19.0 +raft_process_applycommitted_latency_bucket{store="1",le="63.0"} 57.0 +raft_process_applycommitted_latency_bucket{store="1",le="67.0"} 261.0 +raft_process_applycommitted_latency_bucket{store="1",le="71.0"} 1674.0 +raft_process_applycommitted_latency_bucket{store="1",le="75.0"} 4513.0 +raft_process_applycommitted_latency_bucket{store="1",le="79.0"} 7653.0 +raft_process_applycommitted_latency_bucket{store="1",le="83.0"} 10075.0 +raft_process_applycommitted_latency_bucket{store="1",le="87.0"} 12079.0 +raft_process_applycommitted_latency_bucket{store="1",le="91.0"} 14825.0 +raft_process_applycommitted_latency_bucket{store="1",le="95.0"} 17083.0 +raft_process_applycommitted_latency_bucket{store="1",le="99.0"} 18993.0 +raft_process_applycommitted_latency_bucket{store="1",le="103.0"} 20504.0 +raft_process_applycommitted_latency_bucket{store="1",le="107.0"} 21540.0 +raft_process_applycommitted_latency_bucket{store="1",le="111.0"} 22621.0 +raft_process_applycommitted_latency_bucket{store="1",le="115.0"} 23464.0 +raft_process_applycommitted_latency_bucket{store="1",le="119.0"} 24266.0 +raft_process_applycommitted_latency_bucket{store="1",le="123.0"} 25183.0 +raft_process_applycommitted_latency_bucket{store="1",le="127.0"} 25896.0 +raft_process_applycommitted_latency_bucket{store="1",le="135.0"} 27600.0 +raft_process_applycommitted_latency_bucket{store="1",le="143.0"} 29871.0 +raft_process_applycommitted_latency_bucket{store="1",le="151.0"} 31645.0 +raft_process_applycommitted_latency_bucket{store="1",le="159.0"} 33100.0 +raft_process_applycommitted_latency_bucket{store="1",le="167.0"} 34182.0 +raft_process_applycommitted_latency_bucket{store="1",le="175.0"} 35102.0 +raft_process_applycommitted_latency_bucket{store="1",le="183.0"} 36118.0 +raft_process_applycommitted_latency_bucket{store="1",le="191.0"} 37125.0 +raft_process_applycommitted_latency_bucket{store="1",le="199.0"} 37989.0 +raft_process_applycommitted_latency_bucket{store="1",le="207.0"} 38819.0 +raft_process_applycommitted_latency_bucket{store="1",le="215.0"} 39480.0 +raft_process_applycommitted_latency_bucket{store="1",le="223.0"} 40029.0 +raft_process_applycommitted_latency_bucket{store="1",le="231.0"} 40456.0 +raft_process_applycommitted_latency_bucket{store="1",le="239.0"} 40788.0 +raft_process_applycommitted_latency_bucket{store="1",le="247.0"} 41080.0 +raft_process_applycommitted_latency_bucket{store="1",le="255.0"} 41298.0 +raft_process_applycommitted_latency_bucket{store="1",le="271.0"} 41598.0 +raft_process_applycommitted_latency_bucket{store="1",le="287.0"} 41781.0 +raft_process_applycommitted_latency_bucket{store="1",le="303.0"} 41898.0 +raft_process_applycommitted_latency_bucket{store="1",le="319.0"} 41964.0 +raft_process_applycommitted_latency_bucket{store="1",le="335.0"} 42029.0 +raft_process_applycommitted_latency_bucket{store="1",le="351.0"} 42086.0 +raft_process_applycommitted_latency_bucket{store="1",le="367.0"} 42128.0 +raft_process_applycommitted_latency_bucket{store="1",le="383.0"} 42159.0 +raft_process_applycommitted_latency_bucket{store="1",le="399.0"} 42182.0 +raft_process_applycommitted_latency_bucket{store="1",le="415.0"} 42212.0 +raft_process_applycommitted_latency_bucket{store="1",le="431.0"} 42231.0 +raft_process_applycommitted_latency_bucket{store="1",le="447.0"} 42255.0 +raft_process_applycommitted_latency_bucket{store="1",le="463.0"} 42274.0 +raft_process_applycommitted_latency_bucket{store="1",le="479.0"} 42284.0 +raft_process_applycommitted_latency_bucket{store="1",le="495.0"} 42299.0 +raft_process_applycommitted_latency_bucket{store="1",le="511.0"} 42308.0 +raft_process_applycommitted_latency_bucket{store="1",le="543.0"} 42324.0 +raft_process_applycommitted_latency_bucket{store="1",le="575.0"} 42335.0 +raft_process_applycommitted_latency_bucket{store="1",le="607.0"} 42347.0 +raft_process_applycommitted_latency_bucket{store="1",le="639.0"} 42353.0 +raft_process_applycommitted_latency_bucket{store="1",le="671.0"} 42361.0 +raft_process_applycommitted_latency_bucket{store="1",le="703.0"} 42365.0 +raft_process_applycommitted_latency_bucket{store="1",le="735.0"} 42369.0 +raft_process_applycommitted_latency_bucket{store="1",le="767.0"} 42375.0 +raft_process_applycommitted_latency_bucket{store="1",le="799.0"} 42381.0 +raft_process_applycommitted_latency_bucket{store="1",le="863.0"} 42386.0 +raft_process_applycommitted_latency_bucket{store="1",le="895.0"} 42390.0 +raft_process_applycommitted_latency_bucket{store="1",le="927.0"} 42397.0 +raft_process_applycommitted_latency_bucket{store="1",le="959.0"} 42405.0 +raft_process_applycommitted_latency_bucket{store="1",le="991.0"} 42412.0 +raft_process_applycommitted_latency_bucket{store="1",le="1023.0"} 42421.0 +raft_process_applycommitted_latency_bucket{store="1",le="1087.0"} 42435.0 +raft_process_applycommitted_latency_bucket{store="1",le="1151.0"} 42442.0 +raft_process_applycommitted_latency_bucket{store="1",le="1215.0"} 42449.0 +raft_process_applycommitted_latency_bucket{store="1",le="1279.0"} 42458.0 +raft_process_applycommitted_latency_bucket{store="1",le="1343.0"} 42461.0 +raft_process_applycommitted_latency_bucket{store="1",le="1407.0"} 42466.0 +raft_process_applycommitted_latency_bucket{store="1",le="1471.0"} 42469.0 +raft_process_applycommitted_latency_bucket{store="1",le="1535.0"} 42472.0 +raft_process_applycommitted_latency_bucket{store="1",le="1599.0"} 42473.0 +raft_process_applycommitted_latency_bucket{store="1",le="1727.0"} 42474.0 +raft_process_applycommitted_latency_bucket{store="1",le="1791.0"} 42486.0 +raft_process_applycommitted_latency_bucket{store="1",le="1855.0"} 42503.0 +raft_process_applycommitted_latency_bucket{store="1",le="1919.0"} 42509.0 +raft_process_applycommitted_latency_bucket{store="1",le="1983.0"} 42514.0 +raft_process_applycommitted_latency_bucket{store="1",le="2047.0"} 42518.0 +raft_process_applycommitted_latency_bucket{store="1",le="2175.0"} 42522.0 +raft_process_applycommitted_latency_bucket{store="1",le="2303.0"} 42526.0 +raft_process_applycommitted_latency_bucket{store="1",le="2431.0"} 42529.0 +raft_process_applycommitted_latency_bucket{store="1",le="2559.0"} 42531.0 +raft_process_applycommitted_latency_bucket{store="1",le="2687.0"} 42533.0 +raft_process_applycommitted_latency_bucket{store="1",le="6911.0"} 42537.0 +raft_process_applycommitted_latency_bucket{store="1",le="7167.0"} 42540.0 +raft_process_applycommitted_latency_bucket{store="1",le="7423.0"} 42548.0 +raft_process_applycommitted_latency_bucket{store="1",le="7679.0"} 42553.0 +raft_process_applycommitted_latency_bucket{store="1",le="7935.0"} 42557.0 +raft_process_applycommitted_latency_bucket{store="1",le="8191.0"} 42562.0 +raft_process_applycommitted_latency_bucket{store="1",le="8703.0"} 42572.0 +raft_process_applycommitted_latency_bucket{store="1",le="9215.0"} 42576.0 +raft_process_applycommitted_latency_bucket{store="1",le="9727.0"} 42583.0 +raft_process_applycommitted_latency_bucket{store="1",le="10239.0"} 42588.0 +raft_process_applycommitted_latency_bucket{store="1",le="10751.0"} 42591.0 +raft_process_applycommitted_latency_bucket{store="1",le="11263.0"} 42594.0 +raft_process_applycommitted_latency_bucket{store="1",le="11775.0"} 42596.0 +raft_process_applycommitted_latency_bucket{store="1",le="12287.0"} 42598.0 +raft_process_applycommitted_latency_bucket{store="1",le="13311.0"} 42600.0 +raft_process_applycommitted_latency_bucket{store="1",le="13823.0"} 42601.0 +raft_process_applycommitted_latency_bucket{store="1",le="14335.0"} 42605.0 +raft_process_applycommitted_latency_bucket{store="1",le="14847.0"} 42608.0 +raft_process_applycommitted_latency_bucket{store="1",le="15359.0"} 42610.0 +raft_process_applycommitted_latency_bucket{store="1",le="15871.0"} 42616.0 +raft_process_applycommitted_latency_bucket{store="1",le="16383.0"} 42620.0 +raft_process_applycommitted_latency_bucket{store="1",le="17407.0"} 42634.0 +raft_process_applycommitted_latency_bucket{store="1",le="18431.0"} 42655.0 +raft_process_applycommitted_latency_bucket{store="1",le="19455.0"} 42678.0 +raft_process_applycommitted_latency_bucket{store="1",le="20479.0"} 42724.0 +raft_process_applycommitted_latency_bucket{store="1",le="21503.0"} 42784.0 +raft_process_applycommitted_latency_bucket{store="1",le="22527.0"} 42869.0 +raft_process_applycommitted_latency_bucket{store="1",le="23551.0"} 42941.0 +raft_process_applycommitted_latency_bucket{store="1",le="24575.0"} 43041.0 +raft_process_applycommitted_latency_bucket{store="1",le="25599.0"} 43163.0 +raft_process_applycommitted_latency_bucket{store="1",le="26623.0"} 43320.0 +raft_process_applycommitted_latency_bucket{store="1",le="27647.0"} 43508.0 +raft_process_applycommitted_latency_bucket{store="1",le="28671.0"} 43746.0 +raft_process_applycommitted_latency_bucket{store="1",le="29695.0"} 44015.0 +raft_process_applycommitted_latency_bucket{store="1",le="30719.0"} 44324.0 +raft_process_applycommitted_latency_bucket{store="1",le="31743.0"} 44711.0 +raft_process_applycommitted_latency_bucket{store="1",le="32767.0"} 45084.0 +raft_process_applycommitted_latency_bucket{store="1",le="34815.0"} 45942.0 +raft_process_applycommitted_latency_bucket{store="1",le="36863.0"} 46940.0 +raft_process_applycommitted_latency_bucket{store="1",le="38911.0"} 47810.0 +raft_process_applycommitted_latency_bucket{store="1",le="40959.0"} 48543.0 +raft_process_applycommitted_latency_bucket{store="1",le="43007.0"} 49172.0 +raft_process_applycommitted_latency_bucket{store="1",le="45055.0"} 49712.0 +raft_process_applycommitted_latency_bucket{store="1",le="47103.0"} 50198.0 +raft_process_applycommitted_latency_bucket{store="1",le="49151.0"} 50691.0 +raft_process_applycommitted_latency_bucket{store="1",le="51199.0"} 51166.0 +raft_process_applycommitted_latency_bucket{store="1",le="53247.0"} 51579.0 +raft_process_applycommitted_latency_bucket{store="1",le="55295.0"} 51966.0 +raft_process_applycommitted_latency_bucket{store="1",le="57343.0"} 52361.0 +raft_process_applycommitted_latency_bucket{store="1",le="59391.0"} 52724.0 +raft_process_applycommitted_latency_bucket{store="1",le="61439.0"} 53065.0 +raft_process_applycommitted_latency_bucket{store="1",le="63487.0"} 53400.0 +raft_process_applycommitted_latency_bucket{store="1",le="65535.0"} 53701.0 +raft_process_applycommitted_latency_bucket{store="1",le="69631.0"} 54333.0 +raft_process_applycommitted_latency_bucket{store="1",le="73727.0"} 54926.0 +raft_process_applycommitted_latency_bucket{store="1",le="77823.0"} 55475.0 +raft_process_applycommitted_latency_bucket{store="1",le="81919.0"} 56020.0 +raft_process_applycommitted_latency_bucket{store="1",le="86015.0"} 56553.0 +raft_process_applycommitted_latency_bucket{store="1",le="90111.0"} 57025.0 +raft_process_applycommitted_latency_bucket{store="1",le="94207.0"} 57449.0 +raft_process_applycommitted_latency_bucket{store="1",le="98303.0"} 57837.0 +raft_process_applycommitted_latency_bucket{store="1",le="102399.0"} 58186.0 +raft_process_applycommitted_latency_bucket{store="1",le="106495.0"} 58530.0 +raft_process_applycommitted_latency_bucket{store="1",le="110591.0"} 58819.0 +raft_process_applycommitted_latency_bucket{store="1",le="114687.0"} 59126.0 +raft_process_applycommitted_latency_bucket{store="1",le="118783.0"} 59396.0 +raft_process_applycommitted_latency_bucket{store="1",le="122879.0"} 59649.0 +raft_process_applycommitted_latency_bucket{store="1",le="126975.0"} 59901.0 +raft_process_applycommitted_latency_bucket{store="1",le="131071.0"} 60181.0 +raft_process_applycommitted_latency_bucket{store="1",le="139263.0"} 60694.0 +raft_process_applycommitted_latency_bucket{store="1",le="147455.0"} 61214.0 +raft_process_applycommitted_latency_bucket{store="1",le="155647.0"} 61746.0 +raft_process_applycommitted_latency_bucket{store="1",le="163839.0"} 62313.0 +raft_process_applycommitted_latency_bucket{store="1",le="172031.0"} 62819.0 +raft_process_applycommitted_latency_bucket{store="1",le="180223.0"} 63287.0 +raft_process_applycommitted_latency_bucket{store="1",le="188415.0"} 63745.0 +raft_process_applycommitted_latency_bucket{store="1",le="196607.0"} 64188.0 +raft_process_applycommitted_latency_bucket{store="1",le="204799.0"} 64599.0 +raft_process_applycommitted_latency_bucket{store="1",le="212991.0"} 65018.0 +raft_process_applycommitted_latency_bucket{store="1",le="221183.0"} 65424.0 +raft_process_applycommitted_latency_bucket{store="1",le="229375.0"} 65764.0 +raft_process_applycommitted_latency_bucket{store="1",le="237567.0"} 66116.0 +raft_process_applycommitted_latency_bucket{store="1",le="245759.0"} 66470.0 +raft_process_applycommitted_latency_bucket{store="1",le="253951.0"} 66796.0 +raft_process_applycommitted_latency_bucket{store="1",le="262143.0"} 67084.0 +raft_process_applycommitted_latency_bucket{store="1",le="278527.0"} 67681.0 +raft_process_applycommitted_latency_bucket{store="1",le="294911.0"} 68244.0 +raft_process_applycommitted_latency_bucket{store="1",le="311295.0"} 68719.0 +raft_process_applycommitted_latency_bucket{store="1",le="327679.0"} 69150.0 +raft_process_applycommitted_latency_bucket{store="1",le="344063.0"} 69558.0 +raft_process_applycommitted_latency_bucket{store="1",le="360447.0"} 69908.0 +raft_process_applycommitted_latency_bucket{store="1",le="376831.0"} 70250.0 +raft_process_applycommitted_latency_bucket{store="1",le="393215.0"} 70600.0 +raft_process_applycommitted_latency_bucket{store="1",le="409599.0"} 70894.0 +raft_process_applycommitted_latency_bucket{store="1",le="425983.0"} 71182.0 +raft_process_applycommitted_latency_bucket{store="1",le="442367.0"} 71428.0 +raft_process_applycommitted_latency_bucket{store="1",le="458751.0"} 71655.0 +raft_process_applycommitted_latency_bucket{store="1",le="475135.0"} 71882.0 +raft_process_applycommitted_latency_bucket{store="1",le="491519.0"} 72080.0 +raft_process_applycommitted_latency_bucket{store="1",le="507903.0"} 72286.0 +raft_process_applycommitted_latency_bucket{store="1",le="524287.0"} 72482.0 +raft_process_applycommitted_latency_bucket{store="1",le="557055.0"} 72854.0 +raft_process_applycommitted_latency_bucket{store="1",le="589823.0"} 73184.0 +raft_process_applycommitted_latency_bucket{store="1",le="622591.0"} 73492.0 +raft_process_applycommitted_latency_bucket{store="1",le="655359.0"} 73791.0 +raft_process_applycommitted_latency_bucket{store="1",le="688127.0"} 74038.0 +raft_process_applycommitted_latency_bucket{store="1",le="720895.0"} 74308.0 +raft_process_applycommitted_latency_bucket{store="1",le="753663.0"} 74528.0 +raft_process_applycommitted_latency_bucket{store="1",le="786431.0"} 74742.0 +raft_process_applycommitted_latency_bucket{store="1",le="819199.0"} 74970.0 +raft_process_applycommitted_latency_bucket{store="1",le="851967.0"} 75213.0 +raft_process_applycommitted_latency_bucket{store="1",le="884735.0"} 75428.0 +raft_process_applycommitted_latency_bucket{store="1",le="917503.0"} 75634.0 +raft_process_applycommitted_latency_bucket{store="1",le="950271.0"} 75848.0 +raft_process_applycommitted_latency_bucket{store="1",le="983039.0"} 76101.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.015807e+06"} 76351.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.048575e+06"} 76569.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.114111e+06"} 76977.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.179647e+06"} 77355.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.245183e+06"} 77726.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.310719e+06"} 78102.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.376255e+06"} 78417.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.441791e+06"} 78707.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.507327e+06"} 78945.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.572863e+06"} 79194.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.638399e+06"} 79448.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.703935e+06"} 79678.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.769471e+06"} 79867.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.835007e+06"} 80072.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.900543e+06"} 80252.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.966079e+06"} 80430.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.031615e+06"} 80607.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.097151e+06"} 80786.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.228223e+06"} 81069.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.359295e+06"} 81293.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.490367e+06"} 81503.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.621439e+06"} 81702.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.752511e+06"} 81864.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.883583e+06"} 82021.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.014655e+06"} 82168.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.145727e+06"} 82302.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.276799e+06"} 82409.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.407871e+06"} 82513.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.538943e+06"} 82615.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.670015e+06"} 82703.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.801087e+06"} 82785.0 +raft_process_applycommitted_latency_bucket{store="1",le="3.932159e+06"} 82869.0 +raft_process_applycommitted_latency_bucket{store="1",le="4.063231e+06"} 82925.0 +raft_process_applycommitted_latency_bucket{store="1",le="4.194303e+06"} 82992.0 +raft_process_applycommitted_latency_bucket{store="1",le="4.456447e+06"} 83101.0 +raft_process_applycommitted_latency_bucket{store="1",le="4.718591e+06"} 83194.0 +raft_process_applycommitted_latency_bucket{store="1",le="4.980735e+06"} 83280.0 +raft_process_applycommitted_latency_bucket{store="1",le="5.242879e+06"} 83342.0 +raft_process_applycommitted_latency_bucket{store="1",le="5.505023e+06"} 83399.0 +raft_process_applycommitted_latency_bucket{store="1",le="5.767167e+06"} 83454.0 +raft_process_applycommitted_latency_bucket{store="1",le="6.029311e+06"} 83489.0 +raft_process_applycommitted_latency_bucket{store="1",le="6.291455e+06"} 83517.0 +raft_process_applycommitted_latency_bucket{store="1",le="6.553599e+06"} 83542.0 +raft_process_applycommitted_latency_bucket{store="1",le="6.815743e+06"} 83569.0 +raft_process_applycommitted_latency_bucket{store="1",le="7.077887e+06"} 83594.0 +raft_process_applycommitted_latency_bucket{store="1",le="7.340031e+06"} 83613.0 +raft_process_applycommitted_latency_bucket{store="1",le="7.602175e+06"} 83635.0 +raft_process_applycommitted_latency_bucket{store="1",le="7.864319e+06"} 83650.0 +raft_process_applycommitted_latency_bucket{store="1",le="8.126463e+06"} 83669.0 +raft_process_applycommitted_latency_bucket{store="1",le="8.388607e+06"} 83677.0 +raft_process_applycommitted_latency_bucket{store="1",le="8.912895e+06"} 83704.0 +raft_process_applycommitted_latency_bucket{store="1",le="9.437183e+06"} 83722.0 +raft_process_applycommitted_latency_bucket{store="1",le="9.961471e+06"} 83729.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.0485759e+07"} 83736.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.1010047e+07"} 83743.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.1534335e+07"} 83749.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.2058623e+07"} 83754.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.2582911e+07"} 83757.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.3107199e+07"} 83759.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.3631487e+07"} 83763.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.4155775e+07"} 83764.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.4680063e+07"} 83766.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.5204351e+07"} 83770.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.6252927e+07"} 83772.0 +raft_process_applycommitted_latency_bucket{store="1",le="1.9922943e+07"} 83773.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.0971519e+07"} 83774.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.5165823e+07"} 83775.0 +raft_process_applycommitted_latency_bucket{store="1",le="2.6214399e+07"} 83776.0 +raft_process_applycommitted_latency_bucket{store="1",le="+Inf"} 83776.0 +raft_process_applycommitted_latency_sum{store="1"} 2.5535355126e+10 +raft_process_applycommitted_latency_count{store="1"} 83776.0 +# HELP queue_replicate_processingnanos Nanoseconds spent processing replicas in the replicate queue +# TYPE queue_replicate_processingnanos counter +queue_replicate_processingnanos{store="1"} 0.0 +# HELP changefeed_poll_request_nanos Time spent fetching changes +# TYPE changefeed_poll_request_nanos histogram +changefeed_poll_request_nanos_bucket{le="+Inf"} 0.0 +changefeed_poll_request_nanos_sum 0.0 +changefeed_poll_request_nanos_count 0.0 +# HELP txnrecovery_attempts_pending Number of transaction recovery attempts currently in-flight +# TYPE txnrecovery_attempts_pending gauge +txnrecovery_attempts_pending{store="1"} 0.0 +# HELP requests_slow_lease Number of requests that have been stuck for a long time acquiring a lease +# TYPE requests_slow_lease gauge +requests_slow_lease{store="1"} 0.0 +# HELP sql_savepoint_started_count Number of SQL SAVEPOINT statements started +# TYPE sql_savepoint_started_count counter +sql_savepoint_started_count 0.0 +# HELP replicas_leaders Number of raft leaders +# TYPE replicas_leaders gauge +replicas_leaders{store="1"} 7.0 +# HELP raftlog_truncated Number of Raft log entries truncated +# TYPE raftlog_truncated counter +raftlog_truncated{store="1"} 19128.0 +# HELP queue_raftsnapshot_process_success Number of replicas successfully processed by the Raft repair queue +# TYPE queue_raftsnapshot_process_success counter +queue_raftsnapshot_process_success{store="1"} 0.0 +# HELP queue_tsmaintenance_process_success Number of replicas successfully processed by the time series maintenance queue +# TYPE queue_tsmaintenance_process_success counter +queue_tsmaintenance_process_success{store="1"} 0.0 +# HELP requests_slow_raft Number of requests that have been stuck for a long time in raft +# TYPE requests_slow_raft gauge +requests_slow_raft{store="1"} 0.0 +# HELP addsstable_delay_total Amount by which evaluation of AddSSTable requests was delayed +# TYPE addsstable_delay_total counter +addsstable_delay_total{store="1"} 0.0 +# HELP sys_cpu_sys_percent Current system cpu percentage +# TYPE sys_cpu_sys_percent gauge +sys_cpu_sys_percent 0.014030189881984257 +# HELP queue_replicate_rebalancereplica Number of replica rebalancer-initiated additions attempted by the replicate queue +# TYPE queue_replicate_rebalancereplica counter +queue_replicate_rebalancereplica{store="1"} 0.0 +# HELP gossip_bytes_received Number of received gossip bytes +# TYPE gossip_bytes_received counter +gossip_bytes_received 1817.0 +# HELP distsender_rpc_sent_local Number of local RPCs sent +# TYPE distsender_rpc_sent_local counter +distsender_rpc_sent_local 4533.0 +# HELP sys_host_net_recv_packets Packets received on all network interfaces since this process started +# TYPE sys_host_net_recv_packets gauge +sys_host_net_recv_packets 593876.0 +# HELP changefeed_processing_nanos Time spent processing KV changes into SQL rows +# TYPE changefeed_processing_nanos counter +changefeed_processing_nanos 0.0 +# HELP sql_mem_distsql_current Current sql statement memory usage for distsql +# TYPE sql_mem_distsql_current gauge +sql_mem_distsql_current 0.0 +# HELP leases_error Number of failed lease requests +# TYPE leases_error counter +leases_error{store="1"} 0.0 +# HELP capacity Total storage capacity +# TYPE capacity gauge +capacity{store="1"} 6.4202351837184e+13 +# HELP rpc_heartbeats_loops_exited Counter of the number of connection heartbeat loops which have exited with an error +# TYPE rpc_heartbeats_loops_exited counter +rpc_heartbeats_loops_exited 0.0 +# HELP sys_host_disk_iopsinprogress IO operations currently in progress on this host +# TYPE sys_host_disk_iopsinprogress gauge +sys_host_disk_iopsinprogress 0.0 +# HELP ranges_overreplicated Number of ranges with more live replicas than the replication target +# TYPE ranges_overreplicated gauge +ranges_overreplicated{store="1"} 0.0 +# HELP intents_poison_attempts Count of (point or range) poisoning intent abort evaluation attempts +# TYPE intents_poison_attempts counter +intents_poison_attempts{store="1"} 0.0 +# HELP sys_goroutines Current number of goroutines +# TYPE sys_goroutines gauge +sys_goroutines 235.0 +# HELP raft_enqueued_pending Number of pending outgoing messages in the Raft Transport queue +# TYPE raft_enqueued_pending gauge +raft_enqueued_pending{store="1"} 0.0 +# HELP sql_txn_begin_started_count_internal Number of SQL transaction BEGIN statements started (internal queries) +# TYPE sql_txn_begin_started_count_internal counter +sql_txn_begin_started_count_internal 0.0 +# HELP txn_commits Number of committed KV transactions (including 1PC) +# TYPE txn_commits counter +txn_commits 7472.0 +# HELP liveness_heartbeatsuccesses Number of successful node liveness heartbeats from this node +# TYPE liveness_heartbeatsuccesses counter +liveness_heartbeatsuccesses 2720.0 +# HELP sys_cgo_allocbytes Current bytes of memory allocated by cgo +# TYPE sys_cgo_allocbytes gauge +sys_cgo_allocbytes 6.3363512e+07 +# HELP sql_distsql_flows_queue_wait Duration of time flows spend waiting in the queue +# TYPE sql_distsql_flows_queue_wait histogram +sql_distsql_flows_queue_wait_bucket{le="+Inf"} 0.0 +sql_distsql_flows_queue_wait_sum 0.0 +sql_distsql_flows_queue_wait_count 0.0 +# HELP txnrecovery_failures Number of transaction recovery attempts that failed +# TYPE txnrecovery_failures counter +txnrecovery_failures{store="1"} 0.0 +# HELP rpc_heartbeats_nominal Gauge of current connections in the nominal state +# TYPE rpc_heartbeats_nominal gauge +rpc_heartbeats_nominal 7.0 +# HELP sys_host_disk_write_count Disk write operations across all disks since this process started +# TYPE sys_host_disk_write_count gauge +sys_host_disk_write_count 106.0 +# HELP sys_host_disk_write_time Time spent writing to all disks since this process started +# TYPE sys_host_disk_write_time gauge +sys_host_disk_write_time 1.02e+08 +# HELP ranges_underreplicated Number of ranges with fewer live replicas than the replication target +# TYPE ranges_underreplicated gauge +ranges_underreplicated{store="1"} 0.0 +# HELP rocksdb_num_sstables Number of rocksdb SSTables +# TYPE rocksdb_num_sstables gauge +rocksdb_num_sstables{store="1"} 8.0 +# HELP raft_commandsapplied Count of Raft commands applied +# TYPE raft_commandsapplied counter +raft_commandsapplied{store="1"} 0.0 +# HELP raftlog_behind Number of Raft log entries followers on other stores are behind +# TYPE raftlog_behind gauge +raftlog_behind{store="1"} 0.0 +# HELP queue_tsmaintenance_pending Number of pending replicas in the time series maintenance queue +# TYPE queue_tsmaintenance_pending gauge +queue_tsmaintenance_pending{store="1"} 0.0 +# HELP sql_mem_admin_txn_max Memory usage per sql transaction for admin +# TYPE sql_mem_admin_txn_max histogram +sql_mem_admin_txn_max_bucket{le="+Inf"} 0.0 +sql_mem_admin_txn_max_sum 0.0 +sql_mem_admin_txn_max_count 0.0 +# HELP txnwaitqueue_pusher_slow The total number of cases where a pusher waited more than the excessive wait threshold +# TYPE txnwaitqueue_pusher_slow gauge +txnwaitqueue_pusher_slow{store="1"} 0.0 +# HELP compactor_compactingnanos Number of nanoseconds spent compacting ranges +# TYPE compactor_compactingnanos counter +compactor_compactingnanos{store="1"} 0.0 +# HELP rebalancing_lease_transfers Number of lease transfers motivated by store-level load imbalances +# TYPE rebalancing_lease_transfers counter +rebalancing_lease_transfers{store="1"} 0.0 +# HELP requests_slow_latch Number of requests that have been stuck for a long time acquiring latches +# TYPE requests_slow_latch gauge +requests_slow_latch{store="1"} 0.0 +# HELP keycount Count of all keys +# TYPE keycount gauge +keycount{store="1"} 119307.0 +# HELP addsstable_delay_enginebackpressure Amount by which evaluation of AddSSTable requests was delayed by storage-engine backpressure +# TYPE addsstable_delay_enginebackpressure counter +addsstable_delay_enginebackpressure{store="1"} 0.0 +# HELP tscache_skl_write_pages Number of pages in the write timestamp cache +# TYPE tscache_skl_write_pages gauge +tscache_skl_write_pages{store="1"} 1.0 +# HELP sql_query_started_count Number of SQL queries started +# TYPE sql_query_started_count counter +sql_query_started_count 0.0 +# HELP sys_gc_count Total number of GC runs +# TYPE sys_gc_count gauge +sys_gc_count 279.0 +# HELP sys_host_disk_read_time Time spent reading from all disks since this process started +# TYPE sys_host_disk_read_time gauge +sys_host_disk_read_time 4.7e+08 +# HELP sql_mem_distsql_max Memory usage per sql statement for distsql +# TYPE sql_mem_distsql_max histogram +sql_mem_distsql_max_bucket{le="4011.0"} 86.0 +sql_mem_distsql_max_bucket{le="4311.0"} 107.0 +sql_mem_distsql_max_bucket{le="4615.0"} 126.0 +sql_mem_distsql_max_bucket{le="4967.0"} 127.0 +sql_mem_distsql_max_bucket{le="5503.0"} 129.0 +sql_mem_distsql_max_bucket{le="5803.0"} 130.0 +sql_mem_distsql_max_bucket{le="5831.0"} 132.0 +sql_mem_distsql_max_bucket{le="6127.0"} 133.0 +sql_mem_distsql_max_bucket{le="6423.0"} 134.0 +sql_mem_distsql_max_bucket{le="6431.0"} 135.0 +sql_mem_distsql_max_bucket{le="6727.0"} 136.0 +sql_mem_distsql_max_bucket{le="+Inf"} 136.0 +sql_mem_distsql_max_sum 582308.0 +sql_mem_distsql_max_count 136.0 +# HELP sql_new_conns Counter of the number of sql connections created +# TYPE sql_new_conns counter +sql_new_conns 0.0 +# HELP sql_optimizer_plan_cache_misses Number of non-prepared statements for which a cached plan was not used +# TYPE sql_optimizer_plan_cache_misses counter +sql_optimizer_plan_cache_misses 0.0 +# HELP raft_rcvd_vote Number of MsgVote messages received by this store +# TYPE raft_rcvd_vote counter +raft_rcvd_vote{store="1"} 31.0 +# HELP addsstable_applications Number of SSTable ingestions applied (i.e. applied by Replicas) +# TYPE addsstable_applications counter +addsstable_applications{store="1"} 0.0 +# HELP sql_mem_bulk_max Memory usage per sql statement for bulk operations +# TYPE sql_mem_bulk_max histogram +sql_mem_bulk_max_bucket{le="+Inf"} 0.0 +sql_mem_bulk_max_sum 0.0 +sql_mem_bulk_max_count 0.0 +# HELP sql_select_started_count Number of SQL SELECT statements started +# TYPE sql_select_started_count counter +sql_select_started_count 0.0 +# HELP sql_misc_count Number of other SQL statements successfully executed +# TYPE sql_misc_count counter +sql_misc_count 0.0 +# HELP sql_delete_count_internal Number of SQL DELETE statements successfully executed (internal queries) +# TYPE sql_delete_count_internal counter +sql_delete_count_internal 505.0 +# HELP sql_savepoint_count_internal Number of SQL SAVEPOINT statements successfully executed (internal queries) +# TYPE sql_savepoint_count_internal counter +sql_savepoint_count_internal 0.0 +# HELP ranges_unavailable Number of ranges with fewer live replicas than needed for quorum +# TYPE ranges_unavailable gauge +ranges_unavailable{store="1"} 0.0 +# HELP capacity_available Available storage capacity +# TYPE capacity_available gauge +capacity_available{store="1"} 4.0402062147584e+13 +# HELP queue_gc_info_transactionspangcstaging Number of GC'able entries corresponding to staging txns +# TYPE queue_gc_info_transactionspangcstaging counter +queue_gc_info_transactionspangcstaging{store="1"} 0.0 +# HELP txn_restarts_txnpush Number of restarts due to a transaction push failure +# TYPE txn_restarts_txnpush counter +txn_restarts_txnpush 0.0 +# HELP rocksdb_block_cache_misses Count of block cache misses +# TYPE rocksdb_block_cache_misses gauge +rocksdb_block_cache_misses{store="1"} 8129.0 +# HELP addsstable_copies number of SSTable ingestions that required copying files during application +# TYPE addsstable_copies counter +addsstable_copies{store="1"} 0.0 +# HELP txnwaitqueue_pushee_waiting Number of pushees on the txn wait queue +# TYPE txnwaitqueue_pushee_waiting gauge +txnwaitqueue_pushee_waiting{store="1"} 0.0 +# HELP sql_mem_sql_txn_current Current sql transaction memory usage for sql +# TYPE sql_mem_sql_txn_current gauge +sql_mem_sql_txn_current 0.0 +sql_mem_sql_txn_current 0.0 +# HELP sql_insert_count Number of SQL INSERT statements successfully executed +# TYPE sql_insert_count counter +sql_insert_count 0.0 +# HELP sql_txn_abort_count Number of SQL transaction abort errors +# TYPE sql_txn_abort_count counter +sql_txn_abort_count 0.0 +# HELP intentage Cumulative age of intents +# TYPE intentage gauge +intentage{store="1"} -16.0 +# HELP range_merges Number of range merges +# TYPE range_merges counter +range_merges{store="1"} 0.0 +# HELP queue_gc_info_abortspangcnum Number of AbortSpan entries fit for removal +# TYPE queue_gc_info_abortspangcnum counter +queue_gc_info_abortspangcnum{store="1"} 1.0 +# HELP gossip_infos_sent Number of sent gossip Info objects +# TYPE gossip_infos_sent counter +gossip_infos_sent 30.0 +# HELP sql_update_count Number of SQL UPDATE statements successfully executed +# TYPE sql_update_count counter +sql_update_count 0.0 +# HELP sql_txn_rollback_count_internal Number of SQL transaction ROLLBACK statements successfully executed (internal queries) +# TYPE sql_txn_rollback_count_internal counter +sql_txn_rollback_count_internal 0.0 +# HELP raft_process_workingnanos Nanoseconds spent in store.processRaft() working +# TYPE raft_process_workingnanos counter +raft_process_workingnanos{store="1"} 2.4058409967e+10 +# HELP queue_merge_pending Number of pending replicas in the merge queue +# TYPE queue_merge_pending gauge +queue_merge_pending{store="1"} 0.0 +# HELP txnwaitqueue_pusher_wait_time Histogram of durations spent in queue by pushers +# TYPE txnwaitqueue_pusher_wait_time histogram +txnwaitqueue_pusher_wait_time_bucket{store="1",le="1.769471e+06"} 1.0 +txnwaitqueue_pusher_wait_time_bucket{store="1",le="4.980735e+06"} 2.0 +txnwaitqueue_pusher_wait_time_bucket{store="1",le="+Inf"} 2.0 +txnwaitqueue_pusher_wait_time_sum{store="1"} 6.750206e+06 +txnwaitqueue_pusher_wait_time_count{store="1"} 2.0 +# HELP sys_cpu_combined_percent_normalized Current user+system cpu percentage, normalized 0-1 by number of cores +# TYPE sys_cpu_combined_percent_normalized gauge +sys_cpu_combined_percent_normalized 0.008518329571204727 +# HELP sql_mem_internal_max Memory usage per sql statement for internal +# TYPE sql_mem_internal_max histogram +sql_mem_internal_max_bucket{le="4011.0"} 2581.0 +sql_mem_internal_max_bucket{le="4311.0"} 2616.0 +sql_mem_internal_max_bucket{le="4487.0"} 2617.0 +sql_mem_internal_max_bucket{le="4855.0"} 2636.0 +sql_mem_internal_max_bucket{le="4967.0"} 2637.0 +sql_mem_internal_max_bucket{le="+Inf"} 2637.0 +sql_mem_internal_max_sum 1.0604975e+07 +sql_mem_internal_max_count 2637.0 +# HELP sql_mem_admin_max Memory usage per sql statement for admin +# TYPE sql_mem_admin_max histogram +sql_mem_admin_max_bucket{le="+Inf"} 0.0 +sql_mem_admin_max_sum 0.0 +sql_mem_admin_max_count 0.0 +# HELP sql_txn_rollback_started_count Number of SQL transaction ROLLBACK statements started +# TYPE sql_txn_rollback_started_count counter +sql_txn_rollback_started_count 0.0 +# HELP sql_insert_count_internal Number of SQL INSERT statements successfully executed (internal queries) +# TYPE sql_insert_count_internal counter +sql_insert_count_internal 516.0 +# HELP sql_distsql_service_latency_internal Latency of DistSQL request execution (internal queries) +# TYPE sql_distsql_service_latency_internal histogram +sql_distsql_service_latency_internal_bucket{le="2.883583e+06"} 2.0 +sql_distsql_service_latency_internal_bucket{le="3.407871e+06"} 4.0 +sql_distsql_service_latency_internal_bucket{le="3.538943e+06"} 5.0 +sql_distsql_service_latency_internal_bucket{le="3.670015e+06"} 9.0 +sql_distsql_service_latency_internal_bucket{le="3.801087e+06"} 13.0 +sql_distsql_service_latency_internal_bucket{le="3.932159e+06"} 20.0 +sql_distsql_service_latency_internal_bucket{le="4.063231e+06"} 30.0 +sql_distsql_service_latency_internal_bucket{le="4.194303e+06"} 43.0 +sql_distsql_service_latency_internal_bucket{le="4.456447e+06"} 73.0 +sql_distsql_service_latency_internal_bucket{le="4.718591e+06"} 108.0 +sql_distsql_service_latency_internal_bucket{le="4.980735e+06"} 176.0 +sql_distsql_service_latency_internal_bucket{le="5.242879e+06"} 242.0 +sql_distsql_service_latency_internal_bucket{le="5.505023e+06"} 289.0 +sql_distsql_service_latency_internal_bucket{le="5.767167e+06"} 334.0 +sql_distsql_service_latency_internal_bucket{le="6.029311e+06"} 398.0 +sql_distsql_service_latency_internal_bucket{le="6.291455e+06"} 459.0 +sql_distsql_service_latency_internal_bucket{le="6.553599e+06"} 519.0 +sql_distsql_service_latency_internal_bucket{le="6.815743e+06"} 581.0 +sql_distsql_service_latency_internal_bucket{le="7.077887e+06"} 639.0 +sql_distsql_service_latency_internal_bucket{le="7.340031e+06"} 695.0 +sql_distsql_service_latency_internal_bucket{le="7.602175e+06"} 747.0 +sql_distsql_service_latency_internal_bucket{le="7.864319e+06"} 785.0 +sql_distsql_service_latency_internal_bucket{le="8.126463e+06"} 828.0 +sql_distsql_service_latency_internal_bucket{le="8.388607e+06"} 885.0 +sql_distsql_service_latency_internal_bucket{le="8.912895e+06"} 971.0 +sql_distsql_service_latency_internal_bucket{le="9.437183e+06"} 1037.0 +sql_distsql_service_latency_internal_bucket{le="9.961471e+06"} 1109.0 +sql_distsql_service_latency_internal_bucket{le="1.0485759e+07"} 1192.0 +sql_distsql_service_latency_internal_bucket{le="1.1010047e+07"} 1245.0 +sql_distsql_service_latency_internal_bucket{le="1.1534335e+07"} 1293.0 +sql_distsql_service_latency_internal_bucket{le="1.2058623e+07"} 1335.0 +sql_distsql_service_latency_internal_bucket{le="1.2582911e+07"} 1368.0 +sql_distsql_service_latency_internal_bucket{le="1.3107199e+07"} 1397.0 +sql_distsql_service_latency_internal_bucket{le="1.3631487e+07"} 1425.0 +sql_distsql_service_latency_internal_bucket{le="1.4155775e+07"} 1454.0 +sql_distsql_service_latency_internal_bucket{le="1.4680063e+07"} 1468.0 +sql_distsql_service_latency_internal_bucket{le="1.5204351e+07"} 1482.0 +sql_distsql_service_latency_internal_bucket{le="1.5728639e+07"} 1490.0 +sql_distsql_service_latency_internal_bucket{le="1.6252927e+07"} 1503.0 +sql_distsql_service_latency_internal_bucket{le="1.6777215e+07"} 1509.0 +sql_distsql_service_latency_internal_bucket{le="1.7825791e+07"} 1523.0 +sql_distsql_service_latency_internal_bucket{le="1.8874367e+07"} 1531.0 +sql_distsql_service_latency_internal_bucket{le="1.9922943e+07"} 1542.0 +sql_distsql_service_latency_internal_bucket{le="2.0971519e+07"} 1553.0 +sql_distsql_service_latency_internal_bucket{le="2.2020095e+07"} 1561.0 +sql_distsql_service_latency_internal_bucket{le="2.3068671e+07"} 1563.0 +sql_distsql_service_latency_internal_bucket{le="2.4117247e+07"} 1565.0 +sql_distsql_service_latency_internal_bucket{le="2.5165823e+07"} 1568.0 +sql_distsql_service_latency_internal_bucket{le="2.6214399e+07"} 1569.0 +sql_distsql_service_latency_internal_bucket{le="2.7262975e+07"} 1572.0 +sql_distsql_service_latency_internal_bucket{le="2.8311551e+07"} 1575.0 +sql_distsql_service_latency_internal_bucket{le="2.9360127e+07"} 1576.0 +sql_distsql_service_latency_internal_bucket{le="3.5651583e+07"} 1577.0 +sql_distsql_service_latency_internal_bucket{le="4.6137343e+07"} 1579.0 +sql_distsql_service_latency_internal_bucket{le="7.1303167e+07"} 1580.0 +sql_distsql_service_latency_internal_bucket{le="1.42606335e+08"} 1581.0 +sql_distsql_service_latency_internal_bucket{le="1.040187391e+09"} 1582.0 +sql_distsql_service_latency_internal_bucket{le="1.0200547327e+10"} 1583.0 +sql_distsql_service_latency_internal_bucket{le="+Inf"} 1583.0 +sql_distsql_service_latency_internal_sum 2.5664813521e+10 +sql_distsql_service_latency_internal_count 1583.0 +# HELP liveness_epochincrements Number of times this node has incremented its liveness epoch +# TYPE liveness_epochincrements counter +liveness_epochincrements 0.0 +# HELP distsender_rangelookups Number of range lookups. +# TYPE distsender_rangelookups counter +distsender_rangelookups 11.0 +# HELP sys_fd_softlimit Process open FD soft limit +# TYPE sys_fd_softlimit gauge +sys_fd_softlimit 1.048576e+06 +# HELP sys_host_disk_weightedio_time Weighted time spent reading from or writing to to all disks since this process started +# TYPE sys_host_disk_weightedio_time gauge +sys_host_disk_weightedio_time 5.89e+08 +# HELP sql_delete_count Number of SQL DELETE statements successfully executed +# TYPE sql_delete_count counter +sql_delete_count 0.0 +# HELP sql_distsql_service_latency Latency of DistSQL request execution +# TYPE sql_distsql_service_latency histogram +sql_distsql_service_latency_bucket{le="+Inf"} 0.0 +sql_distsql_service_latency_sum 0.0 +sql_distsql_service_latency_count 0.0 +# HELP sql_delete_started_count_internal Number of SQL DELETE statements started (internal queries) +# TYPE sql_delete_started_count_internal counter +sql_delete_started_count_internal 505.0 +# HELP sql_restart_savepoint_count_internal Number of `SAVEPOINT cockroach_restart` statements successfully executed (internal queries) +# TYPE sql_restart_savepoint_count_internal counter +sql_restart_savepoint_count_internal 0.0 +# HELP rpc_heartbeats_failed Gauge of current connections in the failed state +# TYPE rpc_heartbeats_failed gauge +rpc_heartbeats_failed 0.0 +# HELP rocksdb_encryption_algorithm algorithm in use for encryption-at-rest, see ccl/storageccl/engineccl/enginepbccl/key_registry.proto +# TYPE rocksdb_encryption_algorithm gauge +rocksdb_encryption_algorithm{store="1"} 0.0 +# HELP queue_gc_info_pushtxn Number of attempted pushes +# TYPE queue_gc_info_pushtxn counter +queue_gc_info_pushtxn{store="1"} 0.0 +# HELP livecount Count of live keys +# TYPE livecount gauge +livecount{store="1"} 116757.0 +# HELP raft_entrycache_bytes Aggregate size of all Raft entries in the Raft entry cache +# TYPE raft_entrycache_bytes gauge +raft_entrycache_bytes{store="1"} 115690.0 +# HELP queue_replicagc_removereplica Number of replica removals attempted by the replica gc queue +# TYPE queue_replicagc_removereplica counter +queue_replicagc_removereplica{store="1"} 0.0 +# HELP sys_cgo_totalbytes Total bytes of memory allocated by cgo, but not released +# TYPE sys_cgo_totalbytes gauge +sys_cgo_totalbytes 8.1698816e+07 +# HELP sql_conns Number of active sql connections +# TYPE sql_conns gauge +sql_conns 0.0 +# HELP sql_mem_conns_max Memory usage per sql statement for conns +# TYPE sql_mem_conns_max histogram +sql_mem_conns_max_bucket{le="+Inf"} 0.0 +sql_mem_conns_max_sum 0.0 +sql_mem_conns_max_count 0.0 +# HELP sql_delete_started_count Number of SQL DELETE statements started +# TYPE sql_delete_started_count counter +sql_delete_started_count 0.0 +# HELP sql_failure_count Number of statements resulting in a planning or runtime error +# TYPE sql_failure_count counter +sql_failure_count 0.0 +# HELP node_id node ID with labels for advertised RPC and HTTP addresses +# TYPE node_id gauge +node_id{advertise_addr="roach1:26257",http_addr="roach1:8080",sql_addr="roach1:26257"} 1.0 +# HELP valcount Count of all values +# TYPE valcount gauge +valcount{store="1"} 124081.0 +# HELP range_snapshots_generated Number of generated snapshots +# TYPE range_snapshots_generated counter +range_snapshots_generated{store="1"} 0.0 +# HELP sys_host_net_send_bytes Bytes sent on all network interfaces since this process started +# TYPE sys_host_net_send_bytes gauge +sys_host_net_send_bytes 4.61746036e+08 +# HELP sql_insert_started_count_internal Number of SQL INSERT statements started (internal queries) +# TYPE sql_insert_started_count_internal counter +sql_insert_started_count_internal 516.0 +# HELP sql_service_latency_internal Latency of SQL request execution (internal queries) +# TYPE sql_service_latency_internal histogram +sql_service_latency_internal_bucket{le="2.752511e+06"} 1.0 +sql_service_latency_internal_bucket{le="2.883583e+06"} 4.0 +sql_service_latency_internal_bucket{le="3.014655e+06"} 6.0 +sql_service_latency_internal_bucket{le="3.145727e+06"} 8.0 +sql_service_latency_internal_bucket{le="3.276799e+06"} 15.0 +sql_service_latency_internal_bucket{le="3.407871e+06"} 24.0 +sql_service_latency_internal_bucket{le="3.538943e+06"} 31.0 +sql_service_latency_internal_bucket{le="3.670015e+06"} 45.0 +sql_service_latency_internal_bucket{le="3.801087e+06"} 59.0 +sql_service_latency_internal_bucket{le="3.932159e+06"} 76.0 +sql_service_latency_internal_bucket{le="4.063231e+06"} 103.0 +sql_service_latency_internal_bucket{le="4.194303e+06"} 127.0 +sql_service_latency_internal_bucket{le="4.456447e+06"} 190.0 +sql_service_latency_internal_bucket{le="4.718591e+06"} 249.0 +sql_service_latency_internal_bucket{le="4.980735e+06"} 342.0 +sql_service_latency_internal_bucket{le="5.242879e+06"} 438.0 +sql_service_latency_internal_bucket{le="5.505023e+06"} 520.0 +sql_service_latency_internal_bucket{le="5.767167e+06"} 585.0 +sql_service_latency_internal_bucket{le="6.029311e+06"} 685.0 +sql_service_latency_internal_bucket{le="6.291455e+06"} 777.0 +sql_service_latency_internal_bucket{le="6.553599e+06"} 866.0 +sql_service_latency_internal_bucket{le="6.815743e+06"} 955.0 +sql_service_latency_internal_bucket{le="7.077887e+06"} 1036.0 +sql_service_latency_internal_bucket{le="7.340031e+06"} 1116.0 +sql_service_latency_internal_bucket{le="7.602175e+06"} 1188.0 +sql_service_latency_internal_bucket{le="7.864319e+06"} 1246.0 +sql_service_latency_internal_bucket{le="8.126463e+06"} 1310.0 +sql_service_latency_internal_bucket{le="8.388607e+06"} 1380.0 +sql_service_latency_internal_bucket{le="8.912895e+06"} 1497.0 +sql_service_latency_internal_bucket{le="9.437183e+06"} 1593.0 +sql_service_latency_internal_bucket{le="9.961471e+06"} 1686.0 +sql_service_latency_internal_bucket{le="1.0485759e+07"} 1792.0 +sql_service_latency_internal_bucket{le="1.1010047e+07"} 1865.0 +sql_service_latency_internal_bucket{le="1.1534335e+07"} 1931.0 +sql_service_latency_internal_bucket{le="1.2058623e+07"} 1998.0 +sql_service_latency_internal_bucket{le="1.2582911e+07"} 2057.0 +sql_service_latency_internal_bucket{le="1.3107199e+07"} 2116.0 +sql_service_latency_internal_bucket{le="1.3631487e+07"} 2172.0 +sql_service_latency_internal_bucket{le="1.4155775e+07"} 2228.0 +sql_service_latency_internal_bucket{le="1.4680063e+07"} 2279.0 +sql_service_latency_internal_bucket{le="1.5204351e+07"} 2315.0 +sql_service_latency_internal_bucket{le="1.5728639e+07"} 2353.0 +sql_service_latency_internal_bucket{le="1.6252927e+07"} 2386.0 +sql_service_latency_internal_bucket{le="1.6777215e+07"} 2415.0 +sql_service_latency_internal_bucket{le="1.7825791e+07"} 2465.0 +sql_service_latency_internal_bucket{le="1.8874367e+07"} 2501.0 +sql_service_latency_internal_bucket{le="1.9922943e+07"} 2525.0 +sql_service_latency_internal_bucket{le="2.0971519e+07"} 2546.0 +sql_service_latency_internal_bucket{le="2.2020095e+07"} 2563.0 +sql_service_latency_internal_bucket{le="2.3068671e+07"} 2581.0 +sql_service_latency_internal_bucket{le="2.4117247e+07"} 2592.0 +sql_service_latency_internal_bucket{le="2.5165823e+07"} 2603.0 +sql_service_latency_internal_bucket{le="2.6214399e+07"} 2614.0 +sql_service_latency_internal_bucket{le="2.7262975e+07"} 2619.0 +sql_service_latency_internal_bucket{le="2.8311551e+07"} 2625.0 +sql_service_latency_internal_bucket{le="2.9360127e+07"} 2629.0 +sql_service_latency_internal_bucket{le="3.0408703e+07"} 2632.0 +sql_service_latency_internal_bucket{le="3.5651583e+07"} 2633.0 +sql_service_latency_internal_bucket{le="3.7748735e+07"} 2634.0 +sql_service_latency_internal_bucket{le="3.9845887e+07"} 2636.0 +sql_service_latency_internal_bucket{le="4.1943039e+07"} 2639.0 +sql_service_latency_internal_bucket{le="4.4040191e+07"} 2640.0 +sql_service_latency_internal_bucket{le="4.6137343e+07"} 2644.0 +sql_service_latency_internal_bucket{le="4.8234495e+07"} 2646.0 +sql_service_latency_internal_bucket{le="5.0331647e+07"} 2647.0 +sql_service_latency_internal_bucket{le="5.2428799e+07"} 2648.0 +sql_service_latency_internal_bucket{le="7.1303167e+07"} 2649.0 +sql_service_latency_internal_bucket{le="1.25829119e+08"} 2650.0 +sql_service_latency_internal_bucket{le="1.42606335e+08"} 2651.0 +sql_service_latency_internal_bucket{le="2.18103807e+08"} 2652.0 +sql_service_latency_internal_bucket{le="2.26492415e+08"} 2653.0 +sql_service_latency_internal_bucket{le="5.20093695e+08"} 2654.0 +sql_service_latency_internal_bucket{le="1.040187391e+09"} 2655.0 +sql_service_latency_internal_bucket{le="1.0200547327e+10"} 2656.0 +sql_service_latency_internal_bucket{le="+Inf"} 2656.0 +sql_service_latency_internal_sum 3.8702937504e+10 +sql_service_latency_internal_count 2656.0 +# HELP rocksdb_block_cache_usage Bytes used by the block cache +# TYPE rocksdb_block_cache_usage gauge +rocksdb_block_cache_usage{store="1"} 3.9397184e+07 +# HELP compactor_compactions_failure Number of failed compaction requests sent to the storage engine +# TYPE compactor_compactions_failure counter +compactor_compactions_failure{store="1"} 0.0 +# HELP sql_txn_begin_count Number of SQL transaction BEGIN statements successfully executed +# TYPE sql_txn_begin_count counter +sql_txn_begin_count 0.0 +# HELP sql_txn_commit_started_count Number of SQL transaction COMMIT statements started +# TYPE sql_txn_commit_started_count counter +sql_txn_commit_started_count 0.0 +# HELP range_snapshots_learner_applied Number of applied learner snapshots +# TYPE range_snapshots_learner_applied counter +range_snapshots_learner_applied{store="1"} 0.0 +# HELP raft_rcvd_heartbeat Number of (coalesced, if enabled) MsgHeartbeat messages received by this store +# TYPE raft_rcvd_heartbeat counter +raft_rcvd_heartbeat{store="1"} 9077.0 +# HELP queue_replicate_process_failure Number of replicas which failed processing in the replicate queue +# TYPE queue_replicate_process_failure counter +queue_replicate_process_failure{store="1"} 0.0 +# HELP txn_restarts_writetoooldmulti Number of restarts due to multiple concurrent writers committing first +# TYPE txn_restarts_writetoooldmulti counter +txn_restarts_writetoooldmulti 0.0 +# HELP sql_savepoint_count Number of SQL SAVEPOINT statements successfully executed +# TYPE sql_savepoint_count counter +sql_savepoint_count 0.0 +# HELP sql_update_started_count_internal Number of SQL UPDATE statements started (internal queries) +# TYPE sql_update_started_count_internal counter +sql_update_started_count_internal 16.0 +# HELP replicas_leaseholders Number of lease holders +# TYPE replicas_leaseholders gauge +replicas_leaseholders{store="1"} 7.0 +# HELP rocksdb_bloom_filter_prefix_checked Number of times the bloom filter was checked +# TYPE rocksdb_bloom_filter_prefix_checked gauge +rocksdb_bloom_filter_prefix_checked{store="1"} 27363.0 +# HELP queue_split_purgatory Number of replicas in the split queue's purgatory, waiting to become splittable +# TYPE queue_split_purgatory gauge +queue_split_purgatory{store="1"} 0.0 +# HELP queue_gc_info_resolvesuccess Number of successful intent resolutions +# TYPE queue_gc_info_resolvesuccess counter +queue_gc_info_resolvesuccess{store="1"} 0.0 +# HELP txnrecovery_successes_aborted Number of transaction recovery attempts that aborted a transaction +# TYPE txnrecovery_successes_aborted counter +txnrecovery_successes_aborted{store="1"} 0.0 +# HELP changefeed_max_behind_nanos Largest commit-to-emit duration of any running feed +# TYPE changefeed_max_behind_nanos gauge +changefeed_max_behind_nanos 0.0 +# HELP sql_misc_started_count_internal Number of other SQL statements started (internal queries) +# TYPE sql_misc_started_count_internal counter +sql_misc_started_count_internal 2.0 +# HELP rocksdb_block_cache_pinned_usage Bytes pinned by the block cache +# TYPE rocksdb_block_cache_pinned_usage gauge +rocksdb_block_cache_pinned_usage{store="1"} 0.0 +# HELP sql_bytesout Number of sql bytes sent +# TYPE sql_bytesout counter +sql_bytesout 0.0 +# HELP timeseries_write_bytes Total size in bytes of metric samples written to disk +# TYPE timeseries_write_bytes counter +timeseries_write_bytes 8.2810041e+07 +# HELP sql_txn_latency Latency of SQL transactions +# TYPE sql_txn_latency histogram +sql_txn_latency_bucket{le="+Inf"} 0.0 +sql_txn_latency_sum 0.0 +sql_txn_latency_count 0.0 +# HELP sql_optimizer_fallback_count_internal Number of statements which the cost-based optimizer was unable to plan (internal queries) +# TYPE sql_optimizer_fallback_count_internal counter +sql_optimizer_fallback_count_internal 0.0 +# HELP raft_rcvd_dropped Number of dropped incoming Raft messages +# TYPE raft_rcvd_dropped counter +raft_rcvd_dropped{store="1"} 0.0 +# HELP queue_tsmaintenance_processingnanos Nanoseconds spent processing replicas in the time series maintenance queue +# TYPE queue_tsmaintenance_processingnanos counter +queue_tsmaintenance_processingnanos{store="1"} 0.0 +# HELP queue_gc_info_numkeysaffected Number of keys with GC'able data +# TYPE queue_gc_info_numkeysaffected counter +queue_gc_info_numkeysaffected{store="1"} 50.0 +# HELP distsender_batches_partial Number of partial batches processed after being divided on range boundaries +# TYPE distsender_batches_partial counter +distsender_batches_partial 3848.0 +# HELP queue_gc_info_abortspanconsidered Number of AbortSpan entries old enough to be considered for removal +# TYPE queue_gc_info_abortspanconsidered counter +queue_gc_info_abortspanconsidered{store="1"} 0.0 +# HELP tscache_skl_read_pages Number of pages in the read timestamp cache +# TYPE tscache_skl_read_pages gauge +tscache_skl_read_pages{store="1"} 1.0 +# HELP txnwaitqueue_query_wait_time Histogram of durations spent in queue by queries +# TYPE txnwaitqueue_query_wait_time histogram +txnwaitqueue_query_wait_time_bucket{store="1",le="+Inf"} 0.0 +txnwaitqueue_query_wait_time_sum{store="1"} 0.0 +txnwaitqueue_query_wait_time_count{store="1"} 0.0 +# HELP sql_select_count_internal Number of SQL SELECT statements successfully executed (internal queries) +# TYPE sql_select_count_internal counter +sql_select_count_internal 1607.0 +# HELP liveness_livenodes Number of live nodes in the cluster (will be 0 if this node is not itself live) +# TYPE liveness_livenodes gauge +liveness_livenodes 3.0 +# HELP sql_query_count Number of SQL queries executed +# TYPE sql_query_count counter +sql_query_count 0.0 +# HELP sql_optimizer_plan_cache_hits_internal Number of non-prepared statements for which a cached plan was used (internal queries) +# TYPE sql_optimizer_plan_cache_hits_internal counter +sql_optimizer_plan_cache_hits_internal 2120.0 +# HELP leases_success Number of successful lease requests +# TYPE leases_success counter +leases_success{store="1"} 2260.0 +# HELP capacity_used Used storage capacity +# TYPE capacity_used gauge +capacity_used{store="1"} 1.31897916e+08 +# HELP compactor_compactions_success Number of successful compaction requests sent to the storage engine +# TYPE compactor_compactions_success counter +compactor_compactions_success{store="1"} 0.0 +# HELP txn_restarts_serializable Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE +# TYPE txn_restarts_serializable counter +txn_restarts_serializable 0.0 +# HELP queue_replicate_purgatory Number of replicas in the replicate queue's purgatory, awaiting allocation options +# TYPE queue_replicate_purgatory gauge +queue_replicate_purgatory{store="1"} 0.0 +# HELP queue_split_process_success Number of replicas successfully processed by the split queue +# TYPE queue_split_process_success counter +queue_split_process_success{store="1"} 0.0 +# HELP queue_merge_process_success Number of replicas successfully processed by the merge queue +# TYPE queue_merge_process_success counter +queue_merge_process_success{store="1"} 0.0 +# HELP changefeed_flushes Total flushes across all feeds +# TYPE changefeed_flushes counter +changefeed_flushes 0.0 +# HELP changefeed_buffer_entries_out Total entries leaving the buffer between raft and changefeed sinks +# TYPE changefeed_buffer_entries_out counter +changefeed_buffer_entries_out 0.0 +# HELP sys_host_disk_read_bytes Bytes read from all disks since this process started +# TYPE sys_host_disk_read_bytes gauge +sys_host_disk_read_bytes 4.3319296e+07 +# HELP sql_mem_internal_current Current sql statement memory usage for internal +# TYPE sql_mem_internal_current gauge +sql_mem_internal_current 0.0 +# HELP clock_offset_stddevnanos Stddev clock offset with other nodes +# TYPE clock_offset_stddevnanos gauge +clock_offset_stddevnanos 210665.0 +# HELP sql_misc_count_internal Number of other SQL statements successfully executed (internal queries) +# TYPE sql_misc_count_internal counter +sql_misc_count_internal 2.0 +# HELP sql_optimizer_count_internal Number of statements which ran with the cost-based optimizer (internal queries) +# TYPE sql_optimizer_count_internal counter +sql_optimizer_count_internal 4798.0 +# HELP intentcount Count of intent keys +# TYPE intentcount gauge +intentcount{store="1"} 0.0 +# HELP txnwaitqueue_pusher_waiting Number of pushers on the txn wait queue +# TYPE txnwaitqueue_pusher_waiting gauge +txnwaitqueue_pusher_waiting{store="1"} 0.0 +# HELP txn_restarts_unknown Number of restarts due to a unknown reasons +# TYPE txn_restarts_unknown counter +txn_restarts_unknown 0.0 +# HELP gossip_connections_incoming Number of active incoming gossip connections +# TYPE gossip_connections_incoming gauge +gossip_connections_incoming 0.0 +# HELP txn_restarts_txnaborted Number of restarts due to an abort by a concurrent transaction (usually due to deadlock) +# TYPE txn_restarts_txnaborted counter +txn_restarts_txnaborted 0.0 +# HELP clock_offset_meannanos Mean clock offset with other nodes +# TYPE clock_offset_meannanos gauge +clock_offset_meannanos -14326.0 +# HELP sys_host_disk_io_time Time spent reading from or writing to all disks since this process started +# TYPE sys_host_disk_io_time gauge +sys_host_disk_io_time 4.75e+08 +# HELP sql_ddl_started_count Number of SQL DDL statements started +# TYPE sql_ddl_started_count counter +sql_ddl_started_count 0.0 +# HELP sql_misc_started_count Number of other SQL statements started +# TYPE sql_misc_started_count counter +sql_misc_started_count 0.0 +# HELP sql_ddl_count_internal Number of SQL DDL statements successfully executed (internal queries) +# TYPE sql_ddl_count_internal counter +sql_ddl_count_internal 4.0 +# HELP rpc_heartbeats_initializing Gauge of current connections in the initializing state +# TYPE rpc_heartbeats_initializing gauge +rpc_heartbeats_initializing 0.0 +# HELP lastupdatenanos Timestamp at which bytes/keys/intents metrics were last updated +# TYPE lastupdatenanos gauge +lastupdatenanos{store="1"} 5.937496135985266e+18 +# HELP sql_mem_admin_session_current Current sql session memory usage for admin +# TYPE sql_mem_admin_session_current gauge +sql_mem_admin_session_current 0.0 +# HELP sql_distsql_queries_total Number of distributed SQL queries executed +# TYPE sql_distsql_queries_total counter +sql_distsql_queries_total 2660.0 +# HELP sql_optimizer_fallback_count Number of statements which the cost-based optimizer was unable to plan +# TYPE sql_optimizer_fallback_count counter +sql_optimizer_fallback_count 0.0 +# HELP replicas_quiescent Number of quiesced replicas +# TYPE replicas_quiescent gauge +replicas_quiescent{store="1"} 34.0 +# HELP rocksdb_compactions Number of table compactions +# TYPE rocksdb_compactions gauge +rocksdb_compactions{store="1"} 7.0 +# HELP raft_rcvd_app Number of MsgApp messages received by this store +# TYPE raft_rcvd_app counter +raft_rcvd_app{store="1"} 62111.0 +# HELP queue_gc_pending Number of pending replicas in the GC queue +# TYPE queue_gc_pending gauge +queue_gc_pending{store="1"} 0.0 +# HELP sql_mem_internal_session_max Memory usage per sql session for internal +# TYPE sql_mem_internal_session_max histogram +sql_mem_internal_session_max_bucket{le="4011.0"} 2123.0 +sql_mem_internal_session_max_bucket{le="4487.0"} 2142.0 +sql_mem_internal_session_max_bucket{le="+Inf"} 2142.0 +sql_mem_internal_session_max_sum 8.600606e+06 +sql_mem_internal_session_max_count 2142.0 +# HELP sql_mem_conns_session_current Current sql session memory usage for conns +# TYPE sql_mem_conns_session_current gauge +sql_mem_conns_session_current 0.0 +# HELP valbytes Number of bytes taken up by values +# TYPE valbytes gauge +valbytes{store="1"} 7.5527718e+07 +# HELP range_raftleadertransfers Number of raft leader transfers +# TYPE range_raftleadertransfers counter +range_raftleadertransfers{store="1"} 5.0 +# HELP gossip_infos_received Number of received gossip Info objects +# TYPE gossip_infos_received counter +gossip_infos_received 8.0 +# HELP sql_restart_savepoint_release_started_count_internal Number of `RELEASE SAVEPOINT cockroach_restart` statements started (internal queries) +# TYPE sql_restart_savepoint_release_started_count_internal counter +sql_restart_savepoint_release_started_count_internal 0.0 +# HELP sql_distsql_exec_latency_internal Latency of DistSQL statement execution (internal queries) +# TYPE sql_distsql_exec_latency_internal histogram +sql_distsql_exec_latency_internal_bucket{le="245759.0"} 3.0 +sql_distsql_exec_latency_internal_bucket{le="262143.0"} 8.0 +sql_distsql_exec_latency_internal_bucket{le="278527.0"} 23.0 +sql_distsql_exec_latency_internal_bucket{le="294911.0"} 53.0 +sql_distsql_exec_latency_internal_bucket{le="311295.0"} 68.0 +sql_distsql_exec_latency_internal_bucket{le="327679.0"} 89.0 +sql_distsql_exec_latency_internal_bucket{le="344063.0"} 120.0 +sql_distsql_exec_latency_internal_bucket{le="360447.0"} 149.0 +sql_distsql_exec_latency_internal_bucket{le="376831.0"} 181.0 +sql_distsql_exec_latency_internal_bucket{le="393215.0"} 223.0 +sql_distsql_exec_latency_internal_bucket{le="409599.0"} 250.0 +sql_distsql_exec_latency_internal_bucket{le="425983.0"} 266.0 +sql_distsql_exec_latency_internal_bucket{le="442367.0"} 287.0 +sql_distsql_exec_latency_internal_bucket{le="458751.0"} 304.0 +sql_distsql_exec_latency_internal_bucket{le="475135.0"} 318.0 +sql_distsql_exec_latency_internal_bucket{le="491519.0"} 329.0 +sql_distsql_exec_latency_internal_bucket{le="507903.0"} 340.0 +sql_distsql_exec_latency_internal_bucket{le="524287.0"} 347.0 +sql_distsql_exec_latency_internal_bucket{le="557055.0"} 358.0 +sql_distsql_exec_latency_internal_bucket{le="589823.0"} 369.0 +sql_distsql_exec_latency_internal_bucket{le="622591.0"} 378.0 +sql_distsql_exec_latency_internal_bucket{le="655359.0"} 383.0 +sql_distsql_exec_latency_internal_bucket{le="688127.0"} 389.0 +sql_distsql_exec_latency_internal_bucket{le="720895.0"} 394.0 +sql_distsql_exec_latency_internal_bucket{le="753663.0"} 397.0 +sql_distsql_exec_latency_internal_bucket{le="786431.0"} 402.0 +sql_distsql_exec_latency_internal_bucket{le="819199.0"} 405.0 +sql_distsql_exec_latency_internal_bucket{le="884735.0"} 408.0 +sql_distsql_exec_latency_internal_bucket{le="917503.0"} 409.0 +sql_distsql_exec_latency_internal_bucket{le="950271.0"} 411.0 +sql_distsql_exec_latency_internal_bucket{le="983039.0"} 412.0 +sql_distsql_exec_latency_internal_bucket{le="1.048575e+06"} 413.0 +sql_distsql_exec_latency_internal_bucket{le="1.114111e+06"} 416.0 +sql_distsql_exec_latency_internal_bucket{le="1.245183e+06"} 419.0 +sql_distsql_exec_latency_internal_bucket{le="1.310719e+06"} 420.0 +sql_distsql_exec_latency_internal_bucket{le="1.441791e+06"} 421.0 +sql_distsql_exec_latency_internal_bucket{le="1.507327e+06"} 422.0 +sql_distsql_exec_latency_internal_bucket{le="1.572863e+06"} 426.0 +sql_distsql_exec_latency_internal_bucket{le="1.638399e+06"} 427.0 +sql_distsql_exec_latency_internal_bucket{le="1.703935e+06"} 429.0 +sql_distsql_exec_latency_internal_bucket{le="1.769471e+06"} 439.0 +sql_distsql_exec_latency_internal_bucket{le="1.835007e+06"} 442.0 +sql_distsql_exec_latency_internal_bucket{le="1.900543e+06"} 460.0 +sql_distsql_exec_latency_internal_bucket{le="1.966079e+06"} 484.0 +sql_distsql_exec_latency_internal_bucket{le="2.031615e+06"} 510.0 +sql_distsql_exec_latency_internal_bucket{le="2.097151e+06"} 550.0 +sql_distsql_exec_latency_internal_bucket{le="2.228223e+06"} 612.0 +sql_distsql_exec_latency_internal_bucket{le="2.359295e+06"} 688.0 +sql_distsql_exec_latency_internal_bucket{le="2.490367e+06"} 766.0 +sql_distsql_exec_latency_internal_bucket{le="2.621439e+06"} 845.0 +sql_distsql_exec_latency_internal_bucket{le="2.752511e+06"} 913.0 +sql_distsql_exec_latency_internal_bucket{le="2.883583e+06"} 967.0 +sql_distsql_exec_latency_internal_bucket{le="3.014655e+06"} 1022.0 +sql_distsql_exec_latency_internal_bucket{le="3.145727e+06"} 1070.0 +sql_distsql_exec_latency_internal_bucket{le="3.276799e+06"} 1108.0 +sql_distsql_exec_latency_internal_bucket{le="3.407871e+06"} 1144.0 +sql_distsql_exec_latency_internal_bucket{le="3.538943e+06"} 1171.0 +sql_distsql_exec_latency_internal_bucket{le="3.670015e+06"} 1207.0 +sql_distsql_exec_latency_internal_bucket{le="3.801087e+06"} 1238.0 +sql_distsql_exec_latency_internal_bucket{le="3.932159e+06"} 1267.0 +sql_distsql_exec_latency_internal_bucket{le="4.063231e+06"} 1292.0 +sql_distsql_exec_latency_internal_bucket{le="4.194303e+06"} 1328.0 +sql_distsql_exec_latency_internal_bucket{le="4.456447e+06"} 1373.0 +sql_distsql_exec_latency_internal_bucket{le="4.718591e+06"} 1410.0 +sql_distsql_exec_latency_internal_bucket{le="4.980735e+06"} 1434.0 +sql_distsql_exec_latency_internal_bucket{le="5.242879e+06"} 1463.0 +sql_distsql_exec_latency_internal_bucket{le="5.505023e+06"} 1479.0 +sql_distsql_exec_latency_internal_bucket{le="5.767167e+06"} 1489.0 +sql_distsql_exec_latency_internal_bucket{le="6.029311e+06"} 1498.0 +sql_distsql_exec_latency_internal_bucket{le="6.291455e+06"} 1509.0 +sql_distsql_exec_latency_internal_bucket{le="6.553599e+06"} 1523.0 +sql_distsql_exec_latency_internal_bucket{le="6.815743e+06"} 1531.0 +sql_distsql_exec_latency_internal_bucket{le="7.077887e+06"} 1540.0 +sql_distsql_exec_latency_internal_bucket{le="7.340031e+06"} 1545.0 +sql_distsql_exec_latency_internal_bucket{le="7.602175e+06"} 1551.0 +sql_distsql_exec_latency_internal_bucket{le="7.864319e+06"} 1554.0 +sql_distsql_exec_latency_internal_bucket{le="8.126463e+06"} 1555.0 +sql_distsql_exec_latency_internal_bucket{le="8.388607e+06"} 1556.0 +sql_distsql_exec_latency_internal_bucket{le="8.912895e+06"} 1562.0 +sql_distsql_exec_latency_internal_bucket{le="9.437183e+06"} 1565.0 +sql_distsql_exec_latency_internal_bucket{le="9.961471e+06"} 1568.0 +sql_distsql_exec_latency_internal_bucket{le="1.0485759e+07"} 1571.0 +sql_distsql_exec_latency_internal_bucket{le="1.1534335e+07"} 1574.0 +sql_distsql_exec_latency_internal_bucket{le="1.2058623e+07"} 1575.0 +sql_distsql_exec_latency_internal_bucket{le="1.2582911e+07"} 1576.0 +sql_distsql_exec_latency_internal_bucket{le="1.8874367e+07"} 1578.0 +sql_distsql_exec_latency_internal_bucket{le="2.2020095e+07"} 1580.0 +sql_distsql_exec_latency_internal_bucket{le="3.5651583e+07"} 1581.0 +sql_distsql_exec_latency_internal_bucket{le="1.30023423e+08"} 1582.0 +sql_distsql_exec_latency_internal_bucket{le="1.0200547327e+10"} 1583.0 +sql_distsql_exec_latency_internal_bucket{le="+Inf"} 1583.0 +sql_distsql_exec_latency_internal_sum 1.4678473169e+10 +sql_distsql_exec_latency_internal_count 1583.0 +# HELP replicas_leaders_not_leaseholders Number of replicas that are Raft leaders whose range lease is held by another store +# TYPE replicas_leaders_not_leaseholders gauge +replicas_leaders_not_leaseholders{store="1"} 0.0 +# HELP capacity_reserved Capacity reserved for snapshots +# TYPE capacity_reserved gauge +capacity_reserved{store="1"} 0.0 +# HELP queue_merge_process_failure Number of replicas which failed processing in the merge queue +# TYPE queue_merge_process_failure counter +queue_merge_process_failure{store="1"} 0.0 +# HELP queue_raftlog_processingnanos Nanoseconds spent processing replicas in the Raft log queue +# TYPE queue_raftlog_processingnanos counter +queue_raftlog_processingnanos{store="1"} 9.05864517e+08 +# HELP queue_replicagc_process_failure Number of replicas which failed processing in the replica GC queue +# TYPE queue_replicagc_process_failure counter +queue_replicagc_process_failure{store="1"} 0.0 +# HELP sys_uptime Process uptime +# TYPE sys_uptime gauge +sys_uptime 12224.0 +# HELP tscache_skl_write_rotations Number of page rotations in the write timestamp cache +# TYPE tscache_skl_write_rotations counter +tscache_skl_write_rotations{store="1"} 0.0 +# HELP queue_replicate_removereplica Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition) +# TYPE queue_replicate_removereplica counter +queue_replicate_removereplica{store="1"} 0.0 +# HELP queue_split_process_failure Number of replicas which failed processing in the split queue +# TYPE queue_split_process_failure counter +queue_split_process_failure{store="1"} 0.0 +# HELP rocksdb_block_cache_hits Count of block cache hits +# TYPE rocksdb_block_cache_hits gauge +rocksdb_block_cache_hits{store="1"} 94825.0 +# HELP raft_rcvd_heartbeatresp Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store +# TYPE raft_rcvd_heartbeatresp counter +raft_rcvd_heartbeatresp{store="1"} 2091.0 +# HELP queue_replicagc_pending Number of pending replicas in the replica GC queue +# TYPE queue_replicagc_pending gauge +queue_replicagc_pending{store="1"} 0.0 +# HELP queue_tsmaintenance_process_failure Number of replicas which failed processing in the time series maintenance queue +# TYPE queue_tsmaintenance_process_failure counter +queue_tsmaintenance_process_failure{store="1"} 0.0 +# HELP intentresolver_async_throttled Number of intent resolution attempts not run asynchronously due to throttling +# TYPE intentresolver_async_throttled counter +intentresolver_async_throttled{store="1"} 0.0 +# HELP sql_txn_rollback_started_count_internal Number of SQL transaction ROLLBACK statements started (internal queries) +# TYPE sql_txn_rollback_started_count_internal counter +sql_txn_rollback_started_count_internal 0.0 +# HELP intentbytes Number of bytes in intent KV pairs +# TYPE intentbytes gauge +intentbytes{store="1"} 0.0 +# HELP rocksdb_memtable_total_size Current size of memtable in bytes +# TYPE rocksdb_memtable_total_size gauge +rocksdb_memtable_total_size{store="1"} 1.4375272e+07 +# HELP txnrecovery_successes_pending Number of transaction recovery attempts that left a transaction pending +# TYPE txnrecovery_successes_pending counter +txnrecovery_successes_pending{store="1"} 0.0 +# HELP txn_durations KV transaction durations +# TYPE txn_durations histogram +txn_durations_bucket{le="950271.0"} 1.0 +txn_durations_bucket{le="1.015807e+06"} 2.0 +txn_durations_bucket{le="1.114111e+06"} 5.0 +txn_durations_bucket{le="1.245183e+06"} 10.0 +txn_durations_bucket{le="1.310719e+06"} 19.0 +txn_durations_bucket{le="1.376255e+06"} 26.0 +txn_durations_bucket{le="1.441791e+06"} 37.0 +txn_durations_bucket{le="1.507327e+06"} 62.0 +txn_durations_bucket{le="1.572863e+06"} 99.0 +txn_durations_bucket{le="1.638399e+06"} 146.0 +txn_durations_bucket{le="1.703935e+06"} 200.0 +txn_durations_bucket{le="1.769471e+06"} 270.0 +txn_durations_bucket{le="1.835007e+06"} 356.0 +txn_durations_bucket{le="1.900543e+06"} 441.0 +txn_durations_bucket{le="1.966079e+06"} 549.0 +txn_durations_bucket{le="2.031615e+06"} 672.0 +txn_durations_bucket{le="2.097151e+06"} 785.0 +txn_durations_bucket{le="2.228223e+06"} 993.0 +txn_durations_bucket{le="2.359295e+06"} 1210.0 +txn_durations_bucket{le="2.490367e+06"} 1430.0 +txn_durations_bucket{le="2.621439e+06"} 1627.0 +txn_durations_bucket{le="2.752511e+06"} 1852.0 +txn_durations_bucket{le="2.883583e+06"} 2073.0 +txn_durations_bucket{le="3.014655e+06"} 2318.0 +txn_durations_bucket{le="3.145727e+06"} 2541.0 +txn_durations_bucket{le="3.276799e+06"} 2796.0 +txn_durations_bucket{le="3.407871e+06"} 3039.0 +txn_durations_bucket{le="3.538943e+06"} 3283.0 +txn_durations_bucket{le="3.670015e+06"} 3508.0 +txn_durations_bucket{le="3.801087e+06"} 3731.0 +txn_durations_bucket{le="3.932159e+06"} 3942.0 +txn_durations_bucket{le="4.063231e+06"} 4114.0 +txn_durations_bucket{le="4.194303e+06"} 4281.0 +txn_durations_bucket{le="4.456447e+06"} 4572.0 +txn_durations_bucket{le="4.718591e+06"} 4809.0 +txn_durations_bucket{le="4.980735e+06"} 5010.0 +txn_durations_bucket{le="5.242879e+06"} 5187.0 +txn_durations_bucket{le="5.505023e+06"} 5351.0 +txn_durations_bucket{le="5.767167e+06"} 5492.0 +txn_durations_bucket{le="6.029311e+06"} 5627.0 +txn_durations_bucket{le="6.291455e+06"} 5743.0 +txn_durations_bucket{le="6.553599e+06"} 5858.0 +txn_durations_bucket{le="6.815743e+06"} 5975.0 +txn_durations_bucket{le="7.077887e+06"} 6082.0 +txn_durations_bucket{le="7.340031e+06"} 6167.0 +txn_durations_bucket{le="7.602175e+06"} 6242.0 +txn_durations_bucket{le="7.864319e+06"} 6304.0 +txn_durations_bucket{le="8.126463e+06"} 6356.0 +txn_durations_bucket{le="8.388607e+06"} 6399.0 +txn_durations_bucket{le="8.912895e+06"} 6499.0 +txn_durations_bucket{le="9.437183e+06"} 6572.0 +txn_durations_bucket{le="9.961471e+06"} 6658.0 +txn_durations_bucket{le="1.0485759e+07"} 6714.0 +txn_durations_bucket{le="1.1010047e+07"} 6785.0 +txn_durations_bucket{le="1.1534335e+07"} 6847.0 +txn_durations_bucket{le="1.2058623e+07"} 6899.0 +txn_durations_bucket{le="1.2582911e+07"} 6945.0 +txn_durations_bucket{le="1.3107199e+07"} 7001.0 +txn_durations_bucket{le="1.3631487e+07"} 7053.0 +txn_durations_bucket{le="1.4155775e+07"} 7109.0 +txn_durations_bucket{le="1.4680063e+07"} 7159.0 +txn_durations_bucket{le="1.5204351e+07"} 7183.0 +txn_durations_bucket{le="1.5728639e+07"} 7210.0 +txn_durations_bucket{le="1.6252927e+07"} 7239.0 +txn_durations_bucket{le="1.6777215e+07"} 7263.0 +txn_durations_bucket{le="1.7825791e+07"} 7302.0 +txn_durations_bucket{le="1.8874367e+07"} 7332.0 +txn_durations_bucket{le="1.9922943e+07"} 7357.0 +txn_durations_bucket{le="2.0971519e+07"} 7370.0 +txn_durations_bucket{le="2.2020095e+07"} 7389.0 +txn_durations_bucket{le="2.3068671e+07"} 7398.0 +txn_durations_bucket{le="2.4117247e+07"} 7409.0 +txn_durations_bucket{le="2.5165823e+07"} 7416.0 +txn_durations_bucket{le="2.6214399e+07"} 7423.0 +txn_durations_bucket{le="2.7262975e+07"} 7424.0 +txn_durations_bucket{le="2.8311551e+07"} 7430.0 +txn_durations_bucket{le="2.9360127e+07"} 7432.0 +txn_durations_bucket{le="3.0408703e+07"} 7435.0 +txn_durations_bucket{le="3.2505855e+07"} 7439.0 +txn_durations_bucket{le="3.3554431e+07"} 7440.0 +txn_durations_bucket{le="3.7748735e+07"} 7443.0 +txn_durations_bucket{le="3.9845887e+07"} 7447.0 +txn_durations_bucket{le="4.1943039e+07"} 7450.0 +txn_durations_bucket{le="4.6137343e+07"} 7452.0 +txn_durations_bucket{le="1.00663295e+08"} 7453.0 +txn_durations_bucket{le="1.04857599e+08"} 7454.0 +txn_durations_bucket{le="1.09051903e+08"} 7455.0 +txn_durations_bucket{le="1.17440511e+08"} 7456.0 +txn_durations_bucket{le="1.25829119e+08"} 7457.0 +txn_durations_bucket{le="1.34217727e+08"} 7458.0 +txn_durations_bucket{le="2.18103807e+08"} 7459.0 +txn_durations_bucket{le="2.26492415e+08"} 7460.0 +txn_durations_bucket{le="5.20093695e+08"} 7461.0 +txn_durations_bucket{le="9.05969663e+08"} 7462.0 +txn_durations_bucket{le="1.006632959e+09"} 7463.0 +txn_durations_bucket{le="1.040187391e+09"} 7464.0 +txn_durations_bucket{le="4.563402751e+09"} 7465.0 +txn_durations_bucket{le="+Inf"} 7465.0 +txn_durations_sum 4.8816906967e+10 +txn_durations_count 7465.0 +# HELP sys_go_allocbytes Current bytes of memory allocated by go +# TYPE sys_go_allocbytes gauge +sys_go_allocbytes 1.06576224e+08 +# HELP sys_host_net_recv_bytes Bytes received on all network interfaces since this process started +# TYPE sys_host_net_recv_bytes gauge +sys_host_net_recv_bytes 2.34392325e+08 +# HELP raft_process_logcommit_latency Latency histogram for committing Raft log entries +# TYPE raft_process_logcommit_latency histogram +raft_process_logcommit_latency_bucket{store="1",le="229375.0"} 1.0 +raft_process_logcommit_latency_bucket{store="1",le="237567.0"} 3.0 +raft_process_logcommit_latency_bucket{store="1",le="245759.0"} 4.0 +raft_process_logcommit_latency_bucket{store="1",le="253951.0"} 6.0 +raft_process_logcommit_latency_bucket{store="1",le="262143.0"} 12.0 +raft_process_logcommit_latency_bucket{store="1",le="278527.0"} 19.0 +raft_process_logcommit_latency_bucket{store="1",le="294911.0"} 53.0 +raft_process_logcommit_latency_bucket{store="1",le="311295.0"} 106.0 +raft_process_logcommit_latency_bucket{store="1",le="327679.0"} 196.0 +raft_process_logcommit_latency_bucket{store="1",le="344063.0"} 323.0 +raft_process_logcommit_latency_bucket{store="1",le="360447.0"} 500.0 +raft_process_logcommit_latency_bucket{store="1",le="376831.0"} 713.0 +raft_process_logcommit_latency_bucket{store="1",le="393215.0"} 997.0 +raft_process_logcommit_latency_bucket{store="1",le="409599.0"} 1362.0 +raft_process_logcommit_latency_bucket{store="1",le="425983.0"} 1800.0 +raft_process_logcommit_latency_bucket{store="1",le="442367.0"} 2314.0 +raft_process_logcommit_latency_bucket{store="1",le="458751.0"} 2818.0 +raft_process_logcommit_latency_bucket{store="1",le="475135.0"} 3404.0 +raft_process_logcommit_latency_bucket{store="1",le="491519.0"} 4003.0 +raft_process_logcommit_latency_bucket{store="1",le="507903.0"} 4687.0 +raft_process_logcommit_latency_bucket{store="1",le="524287.0"} 5361.0 +raft_process_logcommit_latency_bucket{store="1",le="557055.0"} 6875.0 +raft_process_logcommit_latency_bucket{store="1",le="589823.0"} 8409.0 +raft_process_logcommit_latency_bucket{store="1",le="622591.0"} 10050.0 +raft_process_logcommit_latency_bucket{store="1",le="655359.0"} 11694.0 +raft_process_logcommit_latency_bucket{store="1",le="688127.0"} 13332.0 +raft_process_logcommit_latency_bucket{store="1",le="720895.0"} 15073.0 +raft_process_logcommit_latency_bucket{store="1",le="753663.0"} 16774.0 +raft_process_logcommit_latency_bucket{store="1",le="786431.0"} 18420.0 +raft_process_logcommit_latency_bucket{store="1",le="819199.0"} 19982.0 +raft_process_logcommit_latency_bucket{store="1",le="851967.0"} 21514.0 +raft_process_logcommit_latency_bucket{store="1",le="884735.0"} 22990.0 +raft_process_logcommit_latency_bucket{store="1",le="917503.0"} 24326.0 +raft_process_logcommit_latency_bucket{store="1",le="950271.0"} 25560.0 +raft_process_logcommit_latency_bucket{store="1",le="983039.0"} 26706.0 +raft_process_logcommit_latency_bucket{store="1",le="1.015807e+06"} 27822.0 +raft_process_logcommit_latency_bucket{store="1",le="1.048575e+06"} 28770.0 +raft_process_logcommit_latency_bucket{store="1",le="1.114111e+06"} 30476.0 +raft_process_logcommit_latency_bucket{store="1",le="1.179647e+06"} 31927.0 +raft_process_logcommit_latency_bucket{store="1",le="1.245183e+06"} 33126.0 +raft_process_logcommit_latency_bucket{store="1",le="1.310719e+06"} 34230.0 +raft_process_logcommit_latency_bucket{store="1",le="1.376255e+06"} 35235.0 +raft_process_logcommit_latency_bucket{store="1",le="1.441791e+06"} 36152.0 +raft_process_logcommit_latency_bucket{store="1",le="1.507327e+06"} 36975.0 +raft_process_logcommit_latency_bucket{store="1",le="1.572863e+06"} 37751.0 +raft_process_logcommit_latency_bucket{store="1",le="1.638399e+06"} 38508.0 +raft_process_logcommit_latency_bucket{store="1",le="1.703935e+06"} 39195.0 +raft_process_logcommit_latency_bucket{store="1",le="1.769471e+06"} 39851.0 +raft_process_logcommit_latency_bucket{store="1",le="1.835007e+06"} 40441.0 +raft_process_logcommit_latency_bucket{store="1",le="1.900543e+06"} 40948.0 +raft_process_logcommit_latency_bucket{store="1",le="1.966079e+06"} 41384.0 +raft_process_logcommit_latency_bucket{store="1",le="2.031615e+06"} 41782.0 +raft_process_logcommit_latency_bucket{store="1",le="2.097151e+06"} 42108.0 +raft_process_logcommit_latency_bucket{store="1",le="2.228223e+06"} 42671.0 +raft_process_logcommit_latency_bucket{store="1",le="2.359295e+06"} 43132.0 +raft_process_logcommit_latency_bucket{store="1",le="2.490367e+06"} 43510.0 +raft_process_logcommit_latency_bucket{store="1",le="2.621439e+06"} 43807.0 +raft_process_logcommit_latency_bucket{store="1",le="2.752511e+06"} 44049.0 +raft_process_logcommit_latency_bucket{store="1",le="2.883583e+06"} 44270.0 +raft_process_logcommit_latency_bucket{store="1",le="3.014655e+06"} 44426.0 +raft_process_logcommit_latency_bucket{store="1",le="3.145727e+06"} 44569.0 +raft_process_logcommit_latency_bucket{store="1",le="3.276799e+06"} 44689.0 +raft_process_logcommit_latency_bucket{store="1",le="3.407871e+06"} 44794.0 +raft_process_logcommit_latency_bucket{store="1",le="3.538943e+06"} 44902.0 +raft_process_logcommit_latency_bucket{store="1",le="3.670015e+06"} 44988.0 +raft_process_logcommit_latency_bucket{store="1",le="3.801087e+06"} 45072.0 +raft_process_logcommit_latency_bucket{store="1",le="3.932159e+06"} 45158.0 +raft_process_logcommit_latency_bucket{store="1",le="4.063231e+06"} 45226.0 +raft_process_logcommit_latency_bucket{store="1",le="4.194303e+06"} 45274.0 +raft_process_logcommit_latency_bucket{store="1",le="4.456447e+06"} 45392.0 +raft_process_logcommit_latency_bucket{store="1",le="4.718591e+06"} 45477.0 +raft_process_logcommit_latency_bucket{store="1",le="4.980735e+06"} 45555.0 +raft_process_logcommit_latency_bucket{store="1",le="5.242879e+06"} 45619.0 +raft_process_logcommit_latency_bucket{store="1",le="5.505023e+06"} 45684.0 +raft_process_logcommit_latency_bucket{store="1",le="5.767167e+06"} 45723.0 +raft_process_logcommit_latency_bucket{store="1",le="6.029311e+06"} 45779.0 +raft_process_logcommit_latency_bucket{store="1",le="6.291455e+06"} 45817.0 +raft_process_logcommit_latency_bucket{store="1",le="6.553599e+06"} 45840.0 +raft_process_logcommit_latency_bucket{store="1",le="6.815743e+06"} 45875.0 +raft_process_logcommit_latency_bucket{store="1",le="7.077887e+06"} 45904.0 +raft_process_logcommit_latency_bucket{store="1",le="7.340031e+06"} 45919.0 +raft_process_logcommit_latency_bucket{store="1",le="7.602175e+06"} 45944.0 +raft_process_logcommit_latency_bucket{store="1",le="7.864319e+06"} 45962.0 +raft_process_logcommit_latency_bucket{store="1",le="8.126463e+06"} 45972.0 +raft_process_logcommit_latency_bucket{store="1",le="8.388607e+06"} 45980.0 +raft_process_logcommit_latency_bucket{store="1",le="8.912895e+06"} 46004.0 +raft_process_logcommit_latency_bucket{store="1",le="9.437183e+06"} 46018.0 +raft_process_logcommit_latency_bucket{store="1",le="9.961471e+06"} 46029.0 +raft_process_logcommit_latency_bucket{store="1",le="1.0485759e+07"} 46038.0 +raft_process_logcommit_latency_bucket{store="1",le="1.1010047e+07"} 46044.0 +raft_process_logcommit_latency_bucket{store="1",le="1.1534335e+07"} 46049.0 +raft_process_logcommit_latency_bucket{store="1",le="1.2058623e+07"} 46058.0 +raft_process_logcommit_latency_bucket{store="1",le="1.2582911e+07"} 46060.0 +raft_process_logcommit_latency_bucket{store="1",le="1.3107199e+07"} 46066.0 +raft_process_logcommit_latency_bucket{store="1",le="1.3631487e+07"} 46068.0 +raft_process_logcommit_latency_bucket{store="1",le="1.4155775e+07"} 46070.0 +raft_process_logcommit_latency_bucket{store="1",le="1.4680063e+07"} 46071.0 +raft_process_logcommit_latency_bucket{store="1",le="1.5204351e+07"} 46072.0 +raft_process_logcommit_latency_bucket{store="1",le="1.5728639e+07"} 46073.0 +raft_process_logcommit_latency_bucket{store="1",le="1.6252927e+07"} 46076.0 +raft_process_logcommit_latency_bucket{store="1",le="1.7825791e+07"} 46079.0 +raft_process_logcommit_latency_bucket{store="1",le="1.8874367e+07"} 46083.0 +raft_process_logcommit_latency_bucket{store="1",le="1.9922943e+07"} 46084.0 +raft_process_logcommit_latency_bucket{store="1",le="2.0971519e+07"} 46086.0 +raft_process_logcommit_latency_bucket{store="1",le="2.2020095e+07"} 46087.0 +raft_process_logcommit_latency_bucket{store="1",le="2.4117247e+07"} 46088.0 +raft_process_logcommit_latency_bucket{store="1",le="2.5165823e+07"} 46089.0 +raft_process_logcommit_latency_bucket{store="1",le="3.0408703e+07"} 46090.0 +raft_process_logcommit_latency_bucket{store="1",le="3.1457279e+07"} 46091.0 +raft_process_logcommit_latency_bucket{store="1",le="3.7748735e+07"} 46093.0 +raft_process_logcommit_latency_bucket{store="1",le="4.1943039e+07"} 46094.0 +raft_process_logcommit_latency_bucket{store="1",le="4.6137343e+07"} 46095.0 +raft_process_logcommit_latency_bucket{store="1",le="4.8234495e+07"} 46096.0 +raft_process_logcommit_latency_bucket{store="1",le="5.0331647e+07"} 46097.0 +raft_process_logcommit_latency_bucket{store="1",le="7.5497471e+07"} 46098.0 +raft_process_logcommit_latency_bucket{store="1",le="2.09715199e+08"} 46099.0 +raft_process_logcommit_latency_bucket{store="1",le="2.18103807e+08"} 46101.0 +raft_process_logcommit_latency_bucket{store="1",le="4.19430399e+08"} 46102.0 +raft_process_logcommit_latency_bucket{store="1",le="6.37534207e+08"} 46103.0 +raft_process_logcommit_latency_bucket{store="1",le="9.05969663e+08"} 46104.0 +raft_process_logcommit_latency_bucket{store="1",le="9.73078527e+08"} 46105.0 +raft_process_logcommit_latency_bucket{store="1",le="1.006632959e+09"} 46106.0 +raft_process_logcommit_latency_bucket{store="1",le="1.040187391e+09"} 46108.0 +raft_process_logcommit_latency_bucket{store="1",le="1.0200547327e+10"} 46110.0 +raft_process_logcommit_latency_bucket{store="1",le="+Inf"} 46110.0 +raft_process_logcommit_latency_sum{store="1"} 8.2096278498e+10 +raft_process_logcommit_latency_count{store="1"} 46110.0 +# HELP queue_consistency_process_success Number of replicas successfully processed by the consistency checker queue +# TYPE queue_consistency_process_success counter +queue_consistency_process_success{store="1"} 7.0 +# HELP distsender_batches_async_throttled Number of partial batches not sent asynchronously due to throttling +# TYPE distsender_batches_async_throttled counter +distsender_batches_async_throttled 0.0 +# HELP liveness_heartbeatlatency Node liveness heartbeat latency +# TYPE liveness_heartbeatlatency histogram +liveness_heartbeatlatency_bucket{le="1.966079e+06"} 2.0 +liveness_heartbeatlatency_bucket{le="2.228223e+06"} 11.0 +liveness_heartbeatlatency_bucket{le="2.359295e+06"} 20.0 +liveness_heartbeatlatency_bucket{le="2.490367e+06"} 48.0 +liveness_heartbeatlatency_bucket{le="2.621439e+06"} 94.0 +liveness_heartbeatlatency_bucket{le="2.752511e+06"} 156.0 +liveness_heartbeatlatency_bucket{le="2.883583e+06"} 250.0 +liveness_heartbeatlatency_bucket{le="3.014655e+06"} 372.0 +liveness_heartbeatlatency_bucket{le="3.145727e+06"} 513.0 +liveness_heartbeatlatency_bucket{le="3.276799e+06"} 653.0 +liveness_heartbeatlatency_bucket{le="3.407871e+06"} 797.0 +liveness_heartbeatlatency_bucket{le="3.538943e+06"} 949.0 +liveness_heartbeatlatency_bucket{le="3.670015e+06"} 1110.0 +liveness_heartbeatlatency_bucket{le="3.801087e+06"} 1264.0 +liveness_heartbeatlatency_bucket{le="3.932159e+06"} 1399.0 +liveness_heartbeatlatency_bucket{le="4.063231e+06"} 1537.0 +liveness_heartbeatlatency_bucket{le="4.194303e+06"} 1648.0 +liveness_heartbeatlatency_bucket{le="4.456447e+06"} 1822.0 +liveness_heartbeatlatency_bucket{le="4.718591e+06"} 1987.0 +liveness_heartbeatlatency_bucket{le="4.980735e+06"} 2096.0 +liveness_heartbeatlatency_bucket{le="5.242879e+06"} 2191.0 +liveness_heartbeatlatency_bucket{le="5.505023e+06"} 2277.0 +liveness_heartbeatlatency_bucket{le="5.767167e+06"} 2330.0 +liveness_heartbeatlatency_bucket{le="6.029311e+06"} 2383.0 +liveness_heartbeatlatency_bucket{le="6.291455e+06"} 2436.0 +liveness_heartbeatlatency_bucket{le="6.553599e+06"} 2479.0 +liveness_heartbeatlatency_bucket{le="6.815743e+06"} 2519.0 +liveness_heartbeatlatency_bucket{le="7.077887e+06"} 2559.0 +liveness_heartbeatlatency_bucket{le="7.340031e+06"} 2596.0 +liveness_heartbeatlatency_bucket{le="7.602175e+06"} 2616.0 +liveness_heartbeatlatency_bucket{le="7.864319e+06"} 2635.0 +liveness_heartbeatlatency_bucket{le="8.126463e+06"} 2647.0 +liveness_heartbeatlatency_bucket{le="8.388607e+06"} 2657.0 +liveness_heartbeatlatency_bucket{le="8.912895e+06"} 2672.0 +liveness_heartbeatlatency_bucket{le="9.437183e+06"} 2687.0 +liveness_heartbeatlatency_bucket{le="9.961471e+06"} 2695.0 +liveness_heartbeatlatency_bucket{le="1.0485759e+07"} 2699.0 +liveness_heartbeatlatency_bucket{le="1.1010047e+07"} 2701.0 +liveness_heartbeatlatency_bucket{le="1.1534335e+07"} 2705.0 +liveness_heartbeatlatency_bucket{le="1.2058623e+07"} 2710.0 +liveness_heartbeatlatency_bucket{le="1.2582911e+07"} 2711.0 +liveness_heartbeatlatency_bucket{le="1.3631487e+07"} 2713.0 +liveness_heartbeatlatency_bucket{le="1.4155775e+07"} 2714.0 +liveness_heartbeatlatency_bucket{le="1.8874367e+07"} 2715.0 +liveness_heartbeatlatency_bucket{le="4.1943039e+07"} 2716.0 +liveness_heartbeatlatency_bucket{le="9.6468991e+07"} 2717.0 +liveness_heartbeatlatency_bucket{le="1.04857599e+08"} 2718.0 +liveness_heartbeatlatency_bucket{le="9.39524095e+08"} 2719.0 +liveness_heartbeatlatency_bucket{le="1.040187391e+09"} 2720.0 +liveness_heartbeatlatency_bucket{le="4.563402751e+09"} 2721.0 +liveness_heartbeatlatency_bucket{le="1.0200547327e+10"} 2722.0 +liveness_heartbeatlatency_bucket{le="+Inf"} 2722.0 +liveness_heartbeatlatency_sum 2.8913562974e+10 +liveness_heartbeatlatency_count 2722.0 +# HELP sql_mem_internal_txn_current Current sql transaction memory usage for internal +# TYPE sql_mem_internal_txn_current gauge +sql_mem_internal_txn_current 0.0 +# HELP sql_exec_latency Latency of SQL statement execution +# TYPE sql_exec_latency histogram +sql_exec_latency_bucket{le="+Inf"} 0.0 +sql_exec_latency_sum 0.0 +sql_exec_latency_count 0.0 +# HELP sql_query_started_count_internal Number of SQL queries started (internal queries) +# TYPE sql_query_started_count_internal counter +sql_query_started_count_internal 2656.0 +# HELP queue_replicate_pending Number of pending replicas in the replicate queue +# TYPE queue_replicate_pending gauge +queue_replicate_pending{store="1"} 0.0 +# HELP queue_replicate_transferlease Number of range lease transfers attempted by the replicate queue +# TYPE queue_replicate_transferlease counter +queue_replicate_transferlease{store="1"} 0.0 +# HELP txn_autoretries Number of automatic retries to avoid serializable restarts +# TYPE txn_autoretries counter +txn_autoretries 0.0 +# HELP txn_aborts Number of aborted KV transactions +# TYPE txn_aborts counter +txn_aborts 1.0 +# HELP txn_restarts_writetooold Number of restarts due to a concurrent writer committing first +# TYPE txn_restarts_writetooold counter +txn_restarts_writetooold 0.0 +# HELP sys_cpu_user_percent Current user cpu percentage +# TYPE sys_cpu_user_percent gauge +sys_cpu_user_percent 0.02004312840283465 +# HELP sys_rss Current process RSS +# TYPE sys_rss gauge +sys_rss 3.14691584e+08 +# HELP changefeed_error_retries Total retryable errors encountered by all changefeeds +# TYPE changefeed_error_retries counter +changefeed_error_retries 0.0 +# HELP queue_raftsnapshot_pending Number of pending replicas in the Raft repair queue +# TYPE queue_raftsnapshot_pending gauge +queue_raftsnapshot_pending{store="1"} 0.0 +# HELP requests_backpressure_split Number of backpressured writes waiting on a Range split +# TYPE requests_backpressure_split gauge +requests_backpressure_split{store="1"} 0.0 +# HELP distsender_rpc_sent_nextreplicaerror Number of RPCs sent due to per-replica errors +# TYPE distsender_rpc_sent_nextreplicaerror counter +distsender_rpc_sent_nextreplicaerror 15.0 +# HELP sql_select_count Number of SQL SELECT statements successfully executed +# TYPE sql_select_count counter +sql_select_count 0.0 +# HELP leases_expiration Number of replica leaseholders using expiration-based leases +# TYPE leases_expiration gauge +leases_expiration{store="1"} 1.0 +# HELP queue_gc_info_transactionspanscanned Number of entries in transaction spans scanned from the engine +# TYPE queue_gc_info_transactionspanscanned counter +queue_gc_info_transactionspanscanned{store="1"} 0.0 +# HELP txnrecovery_successes_committed Number of transaction recovery attempts that committed a transaction +# TYPE txnrecovery_successes_committed counter +txnrecovery_successes_committed{store="1"} 0.0 +# HELP distsender_batches_async_sent Number of partial batches sent asynchronously +# TYPE distsender_batches_async_sent counter +distsender_batches_async_sent 1590.0 +# HELP sql_txn_commit_started_count_internal Number of SQL transaction COMMIT statements started (internal queries) +# TYPE sql_txn_commit_started_count_internal counter +sql_txn_commit_started_count_internal 0.0 +# HELP sql_restart_savepoint_release_count_internal Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed (internal queries) +# TYPE sql_restart_savepoint_release_count_internal counter +sql_restart_savepoint_release_count_internal 0.0 +# HELP syscount Count of system KV pairs +# TYPE syscount gauge +syscount{store="1"} 147.0 +# HELP rocksdb_bloom_filter_prefix_useful Number of times the bloom filter helped avoid iterator creation +# TYPE rocksdb_bloom_filter_prefix_useful gauge +rocksdb_bloom_filter_prefix_useful{store="1"} 11962.0 +# HELP rocksdb_estimated_pending_compaction Estimated pending compaction bytes +# TYPE rocksdb_estimated_pending_compaction gauge +rocksdb_estimated_pending_compaction{store="1"} 0.0 +# HELP queue_gc_info_intentsconsidered Number of 'old' intents +# TYPE queue_gc_info_intentsconsidered counter +queue_gc_info_intentsconsidered{store="1"} 0.0 +# HELP queue_gc_info_transactionspangcpending Number of GC'able entries corresponding to pending txns +# TYPE queue_gc_info_transactionspangcpending counter +queue_gc_info_transactionspangcpending{store="1"} 0.0 +# HELP exec_success Number of batch KV requests executed successfully on this node +# TYPE exec_success counter +exec_success 10074.0 +# HELP sys_host_disk_read_count Disk read operations across all disks since this process started +# TYPE sys_host_disk_read_count gauge +sys_host_disk_read_count 1176.0 +# HELP compactor_suggestionbytes_queued Number of logical bytes in suggested compactions in the queue +# TYPE compactor_suggestionbytes_queued gauge +compactor_suggestionbytes_queued{store="1"} 0.0 +# HELP txn_restarts_asyncwritefailure Number of restarts due to async consensus writes that failed to leave intents +# TYPE txn_restarts_asyncwritefailure counter +txn_restarts_asyncwritefailure 0.0 +# HELP sys_fd_open Process open file descriptors +# TYPE sys_fd_open gauge +sys_fd_open 47.0 +# HELP changefeed_emit_nanos Total time spent emitting all feeds +# TYPE changefeed_emit_nanos counter +changefeed_emit_nanos 0.0 +# HELP sql_mem_sql_session_current Current sql session memory usage for sql +# TYPE sql_mem_sql_session_current gauge +sql_mem_sql_session_current 0.0 +sql_mem_sql_session_current 0.0 +# HELP sql_mem_conns_txn_current Current sql transaction memory usage for conns +# TYPE sql_mem_conns_txn_current gauge +sql_mem_conns_txn_current 0.0 +# HELP txnwaitqueue_deadlocks_total Number of deadlocks detected by the txn wait queue +# TYPE txnwaitqueue_deadlocks_total counter +txnwaitqueue_deadlocks_total{store="1"} 0.0 +# HELP sql_mem_internal_txn_max Memory usage per sql transaction for internal +# TYPE sql_mem_internal_txn_max histogram +sql_mem_internal_txn_max_bucket{le="4011.0"} 1058.0 +sql_mem_internal_txn_max_bucket{le="4311.0"} 1060.0 +sql_mem_internal_txn_max_bucket{le="4615.0"} 1098.0 +sql_mem_internal_txn_max_bucket{le="4967.0"} 1100.0 +sql_mem_internal_txn_max_bucket{le="+Inf"} 1100.0 +sql_mem_internal_txn_max_sum 4.437564e+06 +sql_mem_internal_txn_max_count 1100.0 +# HELP sql_txn_abort_count_internal Number of SQL transaction abort errors (internal queries) +# TYPE sql_txn_abort_count_internal counter +sql_txn_abort_count_internal 0.0 +# HELP leases_epoch Number of replica leaseholders using epoch-based leases +# TYPE leases_epoch gauge +leases_epoch{store="1"} 6.0 +# HELP follower_reads_success_count Number of reads successfully processed by any replica +# TYPE follower_reads_success_count counter +follower_reads_success_count{store="1"} 2.0 +# HELP raft_ticks Number of Raft ticks queued +# TYPE raft_ticks counter +raft_ticks{store="1"} 61183.0 +# HELP queue_gc_info_abortspanscanned Number of transactions present in the AbortSpan scanned from the engine +# TYPE queue_gc_info_abortspanscanned counter +queue_gc_info_abortspanscanned{store="1"} 1.0 +# HELP raft_entrycache_hits Number of successful cache lookups in the Raft entry cache +# TYPE raft_entrycache_hits counter +raft_entrycache_hits{store="1"} 49670.0 +# HELP sql_mem_sql_session_max Memory usage per sql session for sql +# TYPE sql_mem_sql_session_max histogram +sql_mem_sql_session_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_session_max_sum 0.0 +sql_mem_sql_session_max_count 0.0 +sql_mem_sql_session_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_session_max_sum 0.0 +sql_mem_sql_session_max_count 0.0 +# HELP sql_restart_savepoint_rollback_started_count_internal Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started (internal queries) +# TYPE sql_restart_savepoint_rollback_started_count_internal counter +sql_restart_savepoint_rollback_started_count_internal 0.0 +# HELP sql_distsql_select_count_internal Number of DistSQL SELECT statements (internal queries) +# TYPE sql_distsql_select_count_internal counter +sql_distsql_select_count_internal 1583.0 +# HELP replicas_reserved Number of replicas reserved for snapshots +# TYPE replicas_reserved gauge +replicas_reserved{store="1"} 0.0 +# HELP livebytes Number of bytes of live data (keys plus values) +# TYPE livebytes gauge +livebytes{store="1"} 8.1979227e+07 +# HELP keybytes Number of bytes taken up by keys +# TYPE keybytes gauge +keybytes{store="1"} 6.730852e+06 +# HELP range_adds Number of range additions +# TYPE range_adds counter +range_adds{store="1"} 0.0 +# HELP range_snapshots_preemptive_applied Number of applied pre-emptive snapshots +# TYPE range_snapshots_preemptive_applied counter +range_snapshots_preemptive_applied{store="1"} 0.0 +# HELP changefeed_emitted_messages Messages emitted by all feeds +# TYPE changefeed_emitted_messages counter +changefeed_emitted_messages 0.0 +# HELP queue_gc_process_failure Number of replicas which failed processing in the GC queue +# TYPE queue_gc_process_failure counter +queue_gc_process_failure{store="1"} 0.0 +# HELP queue_gc_processingnanos Nanoseconds spent processing replicas in the GC queue +# TYPE queue_gc_processingnanos counter +queue_gc_processingnanos{store="1"} 1.21329751e+08 +# HELP raft_entrycache_accesses Number of cache lookups in the Raft entry cache +# TYPE raft_entrycache_accesses counter +raft_entrycache_accesses{store="1"} 49766.0 +# HELP txnwaitqueue_query_waiting Number of transaction status queries waiting for an updated transaction record +# TYPE txnwaitqueue_query_waiting gauge +txnwaitqueue_query_waiting{store="1"} 0.0 +# HELP queue_gc_process_success Number of replicas successfully processed by the GC queue +# TYPE queue_gc_process_success counter +queue_gc_process_success{store="1"} 9.0 +# HELP sql_mem_bulk_current Current sql statement memory usage for bulk operations +# TYPE sql_mem_bulk_current gauge +sql_mem_bulk_current 0.0 +# HELP sql_distsql_queries_active Number of distributed SQL queries currently active +# TYPE sql_distsql_queries_active gauge +sql_distsql_queries_active 0.0 +# HELP sql_restart_savepoint_started_count Number of `SAVEPOINT cockroach_restart` statements started +# TYPE sql_restart_savepoint_started_count counter +sql_restart_savepoint_started_count 0.0 +# HELP sql_txn_commit_count Number of SQL transaction COMMIT statements successfully executed +# TYPE sql_txn_commit_count counter +sql_txn_commit_count 0.0 +# HELP txn_restarts Number of restarted KV transactions +# TYPE txn_restarts histogram +txn_restarts_bucket{le="+Inf"} 0.0 +txn_restarts_sum 0.0 +txn_restarts_count 0.0 +# HELP sql_bytesin Number of sql bytes received +# TYPE sql_bytesin counter +sql_bytesin 0.0 +# HELP sql_distsql_select_count Number of DistSQL SELECT statements +# TYPE sql_distsql_select_count counter +sql_distsql_select_count 0.0 +# HELP rocksdb_table_readers_mem_estimate Memory used by index and filter blocks +# TYPE rocksdb_table_readers_mem_estimate gauge +rocksdb_table_readers_mem_estimate{store="1"} 122624.0 +# HELP raft_rcvd_appresp Number of MsgAppResp messages received by this store +# TYPE raft_rcvd_appresp counter +raft_rcvd_appresp{store="1"} 67681.0 +# HELP sys_cpu_sys_ns Total system cpu time +# TYPE sys_cpu_sys_ns gauge +sys_cpu_sys_ns 1.5442e+11 +# HELP distsender_rpc_sent Number of RPCs sent +# TYPE distsender_rpc_sent counter +distsender_rpc_sent 58459.0 +# HELP sql_mem_admin_current Current sql statement memory usage for admin +# TYPE sql_mem_admin_current gauge +sql_mem_admin_current 0.0 +# HELP build_timestamp Build information +# TYPE build_timestamp gauge +build_timestamp{tag="v19.2.2",go_version="go1.12.12"} 1.576028023e+09 +# HELP sql_distsql_flows_queued Number of distributed SQL flows currently queued +# TYPE sql_distsql_flows_queued gauge +sql_distsql_flows_queued 0.0 +# HELP sql_mem_sql_current Current sql statement memory usage for sql +# TYPE sql_mem_sql_current gauge +sql_mem_sql_current 0.0 +sql_mem_sql_current 0.0 +# HELP sql_ddl_count Number of SQL DDL statements successfully executed +# TYPE sql_ddl_count counter +sql_ddl_count 0.0 +# HELP replicas Number of replicas +# TYPE replicas gauge +replicas{store="1"} 34.0 +# HELP rpc_heartbeats_loops_started Counter of the number of connection heartbeat loops which have been started +# TYPE rpc_heartbeats_loops_started counter +rpc_heartbeats_loops_started 7.0 +# HELP queue_gc_info_transactionspangccommitted Number of GC'able entries corresponding to committed txns +# TYPE queue_gc_info_transactionspangccommitted counter +queue_gc_info_transactionspangccommitted{store="1"} 0.0 +# HELP intents_abort_attempts Count of (point or range) non-poisoning intent abort evaluation attempts +# TYPE intents_abort_attempts counter +intents_abort_attempts{store="1"} 0.0 +# HELP sys_go_totalbytes Total bytes of memory allocated by go, but not released +# TYPE sys_go_totalbytes gauge +sys_go_totalbytes 1.97562616e+08 +# HELP engine_stalls Number of disk stalls detected on this node +# TYPE engine_stalls counter +engine_stalls 0.0 +# HELP sql_restart_savepoint_count Number of `SAVEPOINT cockroach_restart` statements successfully executed +# TYPE sql_restart_savepoint_count counter +sql_restart_savepoint_count 0.0 +# HELP sysbytes Number of bytes in system KV pairs +# TYPE sysbytes gauge +sysbytes{store="1"} 13327.0 +# HELP raft_rcvd_prevote Number of MsgPreVote messages received by this store +# TYPE raft_rcvd_prevote counter +raft_rcvd_prevote{store="1"} 32.0 +# HELP liveness_heartbeatfailures Number of failed node liveness heartbeats from this node +# TYPE liveness_heartbeatfailures counter +liveness_heartbeatfailures 2.0 +# HELP sql_ddl_started_count_internal Number of SQL DDL statements started (internal queries) +# TYPE sql_ddl_started_count_internal counter +sql_ddl_started_count_internal 10.0 +# HELP sql_txn_latency_internal Latency of SQL transactions (internal queries) +# TYPE sql_txn_latency_internal histogram +sql_txn_latency_internal_bucket{le="1.441791e+06"} 1.0 +sql_txn_latency_internal_bucket{le="1.572863e+06"} 5.0 +sql_txn_latency_internal_bucket{le="1.638399e+06"} 9.0 +sql_txn_latency_internal_bucket{le="1.703935e+06"} 16.0 +sql_txn_latency_internal_bucket{le="1.769471e+06"} 26.0 +sql_txn_latency_internal_bucket{le="1.835007e+06"} 42.0 +sql_txn_latency_internal_bucket{le="1.900543e+06"} 56.0 +sql_txn_latency_internal_bucket{le="1.966079e+06"} 73.0 +sql_txn_latency_internal_bucket{le="2.031615e+06"} 97.0 +sql_txn_latency_internal_bucket{le="2.097151e+06"} 134.0 +sql_txn_latency_internal_bucket{le="2.228223e+06"} 196.0 +sql_txn_latency_internal_bucket{le="2.359295e+06"} 255.0 +sql_txn_latency_internal_bucket{le="2.490367e+06"} 293.0 +sql_txn_latency_internal_bucket{le="2.621439e+06"} 315.0 +sql_txn_latency_internal_bucket{le="2.752511e+06"} 329.0 +sql_txn_latency_internal_bucket{le="2.883583e+06"} 351.0 +sql_txn_latency_internal_bucket{le="3.014655e+06"} 363.0 +sql_txn_latency_internal_bucket{le="3.145727e+06"} 378.0 +sql_txn_latency_internal_bucket{le="3.276799e+06"} 401.0 +sql_txn_latency_internal_bucket{le="3.407871e+06"} 431.0 +sql_txn_latency_internal_bucket{le="3.538943e+06"} 458.0 +sql_txn_latency_internal_bucket{le="3.670015e+06"} 508.0 +sql_txn_latency_internal_bucket{le="3.801087e+06"} 561.0 +sql_txn_latency_internal_bucket{le="3.932159e+06"} 600.0 +sql_txn_latency_internal_bucket{le="4.063231e+06"} 660.0 +sql_txn_latency_internal_bucket{le="4.194303e+06"} 710.0 +sql_txn_latency_internal_bucket{le="4.456447e+06"} 806.0 +sql_txn_latency_internal_bucket{le="4.718591e+06"} 881.0 +sql_txn_latency_internal_bucket{le="4.980735e+06"} 944.0 +sql_txn_latency_internal_bucket{le="5.242879e+06"} 1018.0 +sql_txn_latency_internal_bucket{le="5.505023e+06"} 1088.0 +sql_txn_latency_internal_bucket{le="5.767167e+06"} 1158.0 +sql_txn_latency_internal_bucket{le="6.029311e+06"} 1212.0 +sql_txn_latency_internal_bucket{le="6.291455e+06"} 1258.0 +sql_txn_latency_internal_bucket{le="6.553599e+06"} 1309.0 +sql_txn_latency_internal_bucket{le="6.815743e+06"} 1361.0 +sql_txn_latency_internal_bucket{le="7.077887e+06"} 1422.0 +sql_txn_latency_internal_bucket{le="7.340031e+06"} 1470.0 +sql_txn_latency_internal_bucket{le="7.602175e+06"} 1511.0 +sql_txn_latency_internal_bucket{le="7.864319e+06"} 1544.0 +sql_txn_latency_internal_bucket{le="8.126463e+06"} 1584.0 +sql_txn_latency_internal_bucket{le="8.388607e+06"} 1620.0 +sql_txn_latency_internal_bucket{le="8.912895e+06"} 1692.0 +sql_txn_latency_internal_bucket{le="9.437183e+06"} 1749.0 +sql_txn_latency_internal_bucket{le="9.961471e+06"} 1806.0 +sql_txn_latency_internal_bucket{le="1.0485759e+07"} 1847.0 +sql_txn_latency_internal_bucket{le="1.1010047e+07"} 1887.0 +sql_txn_latency_internal_bucket{le="1.1534335e+07"} 1923.0 +sql_txn_latency_internal_bucket{le="1.2058623e+07"} 1949.0 +sql_txn_latency_internal_bucket{le="1.2582911e+07"} 1968.0 +sql_txn_latency_internal_bucket{le="1.3107199e+07"} 1982.0 +sql_txn_latency_internal_bucket{le="1.3631487e+07"} 1993.0 +sql_txn_latency_internal_bucket{le="1.4155775e+07"} 2008.0 +sql_txn_latency_internal_bucket{le="1.4680063e+07"} 2016.0 +sql_txn_latency_internal_bucket{le="1.5204351e+07"} 2020.0 +sql_txn_latency_internal_bucket{le="1.5728639e+07"} 2028.0 +sql_txn_latency_internal_bucket{le="1.6252927e+07"} 2032.0 +sql_txn_latency_internal_bucket{le="1.6777215e+07"} 2036.0 +sql_txn_latency_internal_bucket{le="1.7825791e+07"} 2044.0 +sql_txn_latency_internal_bucket{le="1.8874367e+07"} 2049.0 +sql_txn_latency_internal_bucket{le="1.9922943e+07"} 2052.0 +sql_txn_latency_internal_bucket{le="2.0971519e+07"} 2056.0 +sql_txn_latency_internal_bucket{le="2.2020095e+07"} 2060.0 +sql_txn_latency_internal_bucket{le="2.3068671e+07"} 2064.0 +sql_txn_latency_internal_bucket{le="2.4117247e+07"} 2065.0 +sql_txn_latency_internal_bucket{le="2.5165823e+07"} 2066.0 +sql_txn_latency_internal_bucket{le="2.6214399e+07"} 2068.0 +sql_txn_latency_internal_bucket{le="2.8311551e+07"} 2069.0 +sql_txn_latency_internal_bucket{le="2.9360127e+07"} 2070.0 +sql_txn_latency_internal_bucket{le="3.0408703e+07"} 2072.0 +sql_txn_latency_internal_bucket{le="3.2505855e+07"} 2073.0 +sql_txn_latency_internal_bucket{le="3.5651583e+07"} 2074.0 +sql_txn_latency_internal_bucket{le="4.1943039e+07"} 2076.0 +sql_txn_latency_internal_bucket{le="4.8234495e+07"} 2077.0 +sql_txn_latency_internal_bucket{le="1.25829119e+08"} 2078.0 +sql_txn_latency_internal_bucket{le="1.34217727e+08"} 2079.0 +sql_txn_latency_internal_bucket{le="2.18103807e+08"} 2080.0 +sql_txn_latency_internal_bucket{le="2.26492415e+08"} 2081.0 +sql_txn_latency_internal_bucket{le="5.20093695e+08"} 2082.0 +sql_txn_latency_internal_bucket{le="1.0200547327e+10"} 2083.0 +sql_txn_latency_internal_bucket{le="+Inf"} 2083.0 +sql_txn_latency_internal_sum 2.4672466909e+10 +sql_txn_latency_internal_count 2083.0 +# HELP totalbytes Total number of bytes taken up by keys and values including non-live data +# TYPE totalbytes gauge +totalbytes{store="1"} 8.225857e+07 +# HELP gcbytesage Cumulative age of non-live data +# TYPE gcbytesage gauge +gcbytesage{store="1"} -6.30933145e+08 +# HELP raft_rcvd_prop Number of MsgProp messages received by this store +# TYPE raft_rcvd_prop counter +raft_rcvd_prop{store="1"} 13.0 +# HELP raft_rcvd_prevoteresp Number of MsgPreVoteResp messages received by this store +# TYPE raft_rcvd_prevoteresp counter +raft_rcvd_prevoteresp{store="1"} 20.0 +# HELP queue_raftlog_process_success Number of replicas successfully processed by the Raft log queue +# TYPE queue_raftlog_process_success counter +queue_raftlog_process_success{store="1"} 154.0 +# HELP sql_restart_savepoint_rollback_count Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed +# TYPE sql_restart_savepoint_rollback_count counter +sql_restart_savepoint_rollback_count 0.0 +# HELP queue_consistency_pending Number of pending replicas in the consistency checker queue +# TYPE queue_consistency_pending gauge +queue_consistency_pending{store="1"} 0.0 +# HELP sql_restart_savepoint_rollback_started_count Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started +# TYPE sql_restart_savepoint_rollback_started_count counter +sql_restart_savepoint_rollback_started_count 0.0 +# HELP sql_update_count_internal Number of SQL UPDATE statements successfully executed (internal queries) +# TYPE sql_update_count_internal counter +sql_update_count_internal 16.0 +# HELP addsstable_proposals Number of SSTable ingestions proposed (i.e. sent to Raft by lease holders) +# TYPE addsstable_proposals counter +addsstable_proposals{store="1"} 0.0 +# HELP queue_replicate_addreplica Number of replica additions attempted by the replicate queue +# TYPE queue_replicate_addreplica counter +queue_replicate_addreplica{store="1"} 0.0 +# HELP sql_mem_sql_txn_max Memory usage per sql transaction for sql +# TYPE sql_mem_sql_txn_max histogram +sql_mem_sql_txn_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_txn_max_sum 0.0 +sql_mem_sql_txn_max_count 0.0 +sql_mem_sql_txn_max_bucket{le="+Inf"} 0.0 +sql_mem_sql_txn_max_sum 0.0 +sql_mem_sql_txn_max_count 0.0 +# HELP sql_mem_conns_txn_max Memory usage per sql transaction for conns +# TYPE sql_mem_conns_txn_max histogram +sql_mem_conns_txn_max_bucket{le="+Inf"} 0.0 +sql_mem_conns_txn_max_sum 0.0 +sql_mem_conns_txn_max_count 0.0 +# HELP raft_process_commandcommit_latency Latency histogram for committing Raft commands +# TYPE raft_process_commandcommit_latency histogram +raft_process_commandcommit_latency_bucket{store="1",le="8703.0"} 2.0 +raft_process_commandcommit_latency_bucket{store="1",le="9215.0"} 5.0 +raft_process_commandcommit_latency_bucket{store="1",le="9727.0"} 23.0 +raft_process_commandcommit_latency_bucket{store="1",le="10239.0"} 64.0 +raft_process_commandcommit_latency_bucket{store="1",le="10751.0"} 119.0 +raft_process_commandcommit_latency_bucket{store="1",le="11263.0"} 215.0 +raft_process_commandcommit_latency_bucket{store="1",le="11775.0"} 298.0 +raft_process_commandcommit_latency_bucket{store="1",le="12287.0"} 415.0 +raft_process_commandcommit_latency_bucket{store="1",le="12799.0"} 517.0 +raft_process_commandcommit_latency_bucket{store="1",le="13311.0"} 608.0 +raft_process_commandcommit_latency_bucket{store="1",le="13823.0"} 674.0 +raft_process_commandcommit_latency_bucket{store="1",le="14335.0"} 748.0 +raft_process_commandcommit_latency_bucket{store="1",le="14847.0"} 809.0 +raft_process_commandcommit_latency_bucket{store="1",le="15359.0"} 863.0 +raft_process_commandcommit_latency_bucket{store="1",le="15871.0"} 916.0 +raft_process_commandcommit_latency_bucket{store="1",le="16383.0"} 977.0 +raft_process_commandcommit_latency_bucket{store="1",le="17407.0"} 1125.0 +raft_process_commandcommit_latency_bucket{store="1",le="18431.0"} 1295.0 +raft_process_commandcommit_latency_bucket{store="1",le="19455.0"} 1531.0 +raft_process_commandcommit_latency_bucket{store="1",le="20479.0"} 1788.0 +raft_process_commandcommit_latency_bucket{store="1",le="21503.0"} 2110.0 +raft_process_commandcommit_latency_bucket{store="1",le="22527.0"} 2513.0 +raft_process_commandcommit_latency_bucket{store="1",le="23551.0"} 2943.0 +raft_process_commandcommit_latency_bucket{store="1",le="24575.0"} 3527.0 +raft_process_commandcommit_latency_bucket{store="1",le="25599.0"} 4139.0 +raft_process_commandcommit_latency_bucket{store="1",le="26623.0"} 4886.0 +raft_process_commandcommit_latency_bucket{store="1",le="27647.0"} 5635.0 +raft_process_commandcommit_latency_bucket{store="1",le="28671.0"} 6427.0 +raft_process_commandcommit_latency_bucket{store="1",le="29695.0"} 7234.0 +raft_process_commandcommit_latency_bucket{store="1",le="30719.0"} 8064.0 +raft_process_commandcommit_latency_bucket{store="1",le="31743.0"} 8964.0 +raft_process_commandcommit_latency_bucket{store="1",le="32767.0"} 9885.0 +raft_process_commandcommit_latency_bucket{store="1",le="34815.0"} 11527.0 +raft_process_commandcommit_latency_bucket{store="1",le="36863.0"} 12928.0 +raft_process_commandcommit_latency_bucket{store="1",le="38911.0"} 14225.0 +raft_process_commandcommit_latency_bucket{store="1",le="40959.0"} 15324.0 +raft_process_commandcommit_latency_bucket{store="1",le="43007.0"} 16255.0 +raft_process_commandcommit_latency_bucket{store="1",le="45055.0"} 17117.0 +raft_process_commandcommit_latency_bucket{store="1",le="47103.0"} 17895.0 +raft_process_commandcommit_latency_bucket{store="1",le="49151.0"} 18640.0 +raft_process_commandcommit_latency_bucket{store="1",le="51199.0"} 19281.0 +raft_process_commandcommit_latency_bucket{store="1",le="53247.0"} 19961.0 +raft_process_commandcommit_latency_bucket{store="1",le="55295.0"} 20546.0 +raft_process_commandcommit_latency_bucket{store="1",le="57343.0"} 21150.0 +raft_process_commandcommit_latency_bucket{store="1",le="59391.0"} 21736.0 +raft_process_commandcommit_latency_bucket{store="1",le="61439.0"} 22256.0 +raft_process_commandcommit_latency_bucket{store="1",le="63487.0"} 22783.0 +raft_process_commandcommit_latency_bucket{store="1",le="65535.0"} 23256.0 +raft_process_commandcommit_latency_bucket{store="1",le="69631.0"} 24251.0 +raft_process_commandcommit_latency_bucket{store="1",le="73727.0"} 25169.0 +raft_process_commandcommit_latency_bucket{store="1",le="77823.0"} 26004.0 +raft_process_commandcommit_latency_bucket{store="1",le="81919.0"} 26775.0 +raft_process_commandcommit_latency_bucket{store="1",le="86015.0"} 27489.0 +raft_process_commandcommit_latency_bucket{store="1",le="90111.0"} 28155.0 +raft_process_commandcommit_latency_bucket{store="1",le="94207.0"} 28752.0 +raft_process_commandcommit_latency_bucket{store="1",le="98303.0"} 29281.0 +raft_process_commandcommit_latency_bucket{store="1",le="102399.0"} 29838.0 +raft_process_commandcommit_latency_bucket{store="1",le="106495.0"} 30300.0 +raft_process_commandcommit_latency_bucket{store="1",le="110591.0"} 30725.0 +raft_process_commandcommit_latency_bucket{store="1",le="114687.0"} 31127.0 +raft_process_commandcommit_latency_bucket{store="1",le="118783.0"} 31498.0 +raft_process_commandcommit_latency_bucket{store="1",le="122879.0"} 31854.0 +raft_process_commandcommit_latency_bucket{store="1",le="126975.0"} 32163.0 +raft_process_commandcommit_latency_bucket{store="1",le="131071.0"} 32450.0 +raft_process_commandcommit_latency_bucket{store="1",le="139263.0"} 32990.0 +raft_process_commandcommit_latency_bucket{store="1",le="147455.0"} 33471.0 +raft_process_commandcommit_latency_bucket{store="1",le="155647.0"} 33830.0 +raft_process_commandcommit_latency_bucket{store="1",le="163839.0"} 34176.0 +raft_process_commandcommit_latency_bucket{store="1",le="172031.0"} 34434.0 +raft_process_commandcommit_latency_bucket{store="1",le="180223.0"} 34668.0 +raft_process_commandcommit_latency_bucket{store="1",le="188415.0"} 34893.0 +raft_process_commandcommit_latency_bucket{store="1",le="196607.0"} 35116.0 +raft_process_commandcommit_latency_bucket{store="1",le="204799.0"} 35301.0 +raft_process_commandcommit_latency_bucket{store="1",le="212991.0"} 35494.0 +raft_process_commandcommit_latency_bucket{store="1",le="221183.0"} 35659.0 +raft_process_commandcommit_latency_bucket{store="1",le="229375.0"} 35833.0 +raft_process_commandcommit_latency_bucket{store="1",le="237567.0"} 35992.0 +raft_process_commandcommit_latency_bucket{store="1",le="245759.0"} 36128.0 +raft_process_commandcommit_latency_bucket{store="1",le="253951.0"} 36269.0 +raft_process_commandcommit_latency_bucket{store="1",le="262143.0"} 36429.0 +raft_process_commandcommit_latency_bucket{store="1",le="278527.0"} 36660.0 +raft_process_commandcommit_latency_bucket{store="1",le="294911.0"} 36867.0 +raft_process_commandcommit_latency_bucket{store="1",le="311295.0"} 37077.0 +raft_process_commandcommit_latency_bucket{store="1",le="327679.0"} 37288.0 +raft_process_commandcommit_latency_bucket{store="1",le="344063.0"} 37454.0 +raft_process_commandcommit_latency_bucket{store="1",le="360447.0"} 37621.0 +raft_process_commandcommit_latency_bucket{store="1",le="376831.0"} 37762.0 +raft_process_commandcommit_latency_bucket{store="1",le="393215.0"} 37920.0 +raft_process_commandcommit_latency_bucket{store="1",le="409599.0"} 38042.0 +raft_process_commandcommit_latency_bucket{store="1",le="425983.0"} 38168.0 +raft_process_commandcommit_latency_bucket{store="1",le="442367.0"} 38289.0 +raft_process_commandcommit_latency_bucket{store="1",le="458751.0"} 38379.0 +raft_process_commandcommit_latency_bucket{store="1",le="475135.0"} 38481.0 +raft_process_commandcommit_latency_bucket{store="1",le="491519.0"} 38564.0 +raft_process_commandcommit_latency_bucket{store="1",le="507903.0"} 38632.0 +raft_process_commandcommit_latency_bucket{store="1",le="524287.0"} 38714.0 +raft_process_commandcommit_latency_bucket{store="1",le="557055.0"} 38861.0 +raft_process_commandcommit_latency_bucket{store="1",le="589823.0"} 39013.0 +raft_process_commandcommit_latency_bucket{store="1",le="622591.0"} 39137.0 +raft_process_commandcommit_latency_bucket{store="1",le="655359.0"} 39263.0 +raft_process_commandcommit_latency_bucket{store="1",le="688127.0"} 39368.0 +raft_process_commandcommit_latency_bucket{store="1",le="720895.0"} 39459.0 +raft_process_commandcommit_latency_bucket{store="1",le="753663.0"} 39557.0 +raft_process_commandcommit_latency_bucket{store="1",le="786431.0"} 39638.0 +raft_process_commandcommit_latency_bucket{store="1",le="819199.0"} 39693.0 +raft_process_commandcommit_latency_bucket{store="1",le="851967.0"} 39770.0 +raft_process_commandcommit_latency_bucket{store="1",le="884735.0"} 39828.0 +raft_process_commandcommit_latency_bucket{store="1",le="917503.0"} 39883.0 +raft_process_commandcommit_latency_bucket{store="1",le="950271.0"} 39941.0 +raft_process_commandcommit_latency_bucket{store="1",le="983039.0"} 39996.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.015807e+06"} 40053.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.048575e+06"} 40103.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.114111e+06"} 40218.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.179647e+06"} 40312.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.245183e+06"} 40401.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.310719e+06"} 40515.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.376255e+06"} 40592.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.441791e+06"} 40706.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.507327e+06"} 40834.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.572863e+06"} 40973.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.638399e+06"} 41123.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.703935e+06"} 41275.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.769471e+06"} 41419.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.835007e+06"} 41557.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.900543e+06"} 41690.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.966079e+06"} 41837.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.031615e+06"} 41976.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.097151e+06"} 42105.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.228223e+06"} 42335.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.359295e+06"} 42526.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.490367e+06"} 42699.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.621439e+06"} 42848.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.752511e+06"} 42973.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.883583e+06"} 43080.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.014655e+06"} 43189.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.145727e+06"} 43286.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.276799e+06"} 43369.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.407871e+06"} 43444.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.538943e+06"} 43523.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.670015e+06"} 43590.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.801087e+06"} 43654.0 +raft_process_commandcommit_latency_bucket{store="1",le="3.932159e+06"} 43717.0 +raft_process_commandcommit_latency_bucket{store="1",le="4.063231e+06"} 43753.0 +raft_process_commandcommit_latency_bucket{store="1",le="4.194303e+06"} 43801.0 +raft_process_commandcommit_latency_bucket{store="1",le="4.456447e+06"} 43889.0 +raft_process_commandcommit_latency_bucket{store="1",le="4.718591e+06"} 43969.0 +raft_process_commandcommit_latency_bucket{store="1",le="4.980735e+06"} 44035.0 +raft_process_commandcommit_latency_bucket{store="1",le="5.242879e+06"} 44079.0 +raft_process_commandcommit_latency_bucket{store="1",le="5.505023e+06"} 44126.0 +raft_process_commandcommit_latency_bucket{store="1",le="5.767167e+06"} 44163.0 +raft_process_commandcommit_latency_bucket{store="1",le="6.029311e+06"} 44180.0 +raft_process_commandcommit_latency_bucket{store="1",le="6.291455e+06"} 44198.0 +raft_process_commandcommit_latency_bucket{store="1",le="6.553599e+06"} 44221.0 +raft_process_commandcommit_latency_bucket{store="1",le="6.815743e+06"} 44237.0 +raft_process_commandcommit_latency_bucket{store="1",le="7.077887e+06"} 44251.0 +raft_process_commandcommit_latency_bucket{store="1",le="7.340031e+06"} 44268.0 +raft_process_commandcommit_latency_bucket{store="1",le="7.602175e+06"} 44285.0 +raft_process_commandcommit_latency_bucket{store="1",le="7.864319e+06"} 44298.0 +raft_process_commandcommit_latency_bucket{store="1",le="8.126463e+06"} 44313.0 +raft_process_commandcommit_latency_bucket{store="1",le="8.388607e+06"} 44319.0 +raft_process_commandcommit_latency_bucket{store="1",le="8.912895e+06"} 44338.0 +raft_process_commandcommit_latency_bucket{store="1",le="9.437183e+06"} 44352.0 +raft_process_commandcommit_latency_bucket{store="1",le="9.961471e+06"} 44358.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.0485759e+07"} 44363.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.1010047e+07"} 44367.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.1534335e+07"} 44373.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.2058623e+07"} 44376.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.2582911e+07"} 44377.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.3107199e+07"} 44379.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.3631487e+07"} 44382.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.4680063e+07"} 44384.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.5204351e+07"} 44386.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.6252927e+07"} 44387.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.8874367e+07"} 44388.0 +raft_process_commandcommit_latency_bucket{store="1",le="1.9922943e+07"} 44390.0 +raft_process_commandcommit_latency_bucket{store="1",le="2.6214399e+07"} 44391.0 +raft_process_commandcommit_latency_bucket{store="1",le="+Inf"} 44391.0 +raft_process_commandcommit_latency_sum{store="1"} 1.6329882265e+10 +raft_process_commandcommit_latency_count{store="1"} 44391.0 +# HELP raft_rcvd_voteresp Number of MsgVoteResp messages received by this store +# TYPE raft_rcvd_voteresp counter +raft_rcvd_voteresp{store="1"} 24.0 +# HELP raft_entrycache_size Number of Raft entries in the Raft entry cache +# TYPE raft_entrycache_size gauge +raft_entrycache_size{store="1"} 417.0 +# HELP tscache_skl_read_rotations Number of page rotations in the read timestamp cache +# TYPE tscache_skl_read_rotations counter +tscache_skl_read_rotations{store="1"} 0.0 +# HELP round_trip_latency Distribution of round-trip latencies with other nodes +# TYPE round_trip_latency histogram +round_trip_latency_bucket{le="221183.0"} 1.0 +round_trip_latency_bucket{le="237567.0"} 2.0 +round_trip_latency_bucket{le="253951.0"} 4.0 +round_trip_latency_bucket{le="278527.0"} 10.0 +round_trip_latency_bucket{le="294911.0"} 14.0 +round_trip_latency_bucket{le="311295.0"} 25.0 +round_trip_latency_bucket{le="327679.0"} 51.0 +round_trip_latency_bucket{le="344063.0"} 69.0 +round_trip_latency_bucket{le="360447.0"} 100.0 +round_trip_latency_bucket{le="376831.0"} 128.0 +round_trip_latency_bucket{le="393215.0"} 171.0 +round_trip_latency_bucket{le="409599.0"} 225.0 +round_trip_latency_bucket{le="425983.0"} 287.0 +round_trip_latency_bucket{le="442367.0"} 378.0 +round_trip_latency_bucket{le="458751.0"} 475.0 +round_trip_latency_bucket{le="475135.0"} 584.0 +round_trip_latency_bucket{le="491519.0"} 710.0 +round_trip_latency_bucket{le="507903.0"} 863.0 +round_trip_latency_bucket{le="524287.0"} 1038.0 +round_trip_latency_bucket{le="557055.0"} 1475.0 +round_trip_latency_bucket{le="589823.0"} 1979.0 +round_trip_latency_bucket{le="622591.0"} 2622.0 +round_trip_latency_bucket{le="655359.0"} 3314.0 +round_trip_latency_bucket{le="688127.0"} 4064.0 +round_trip_latency_bucket{le="720895.0"} 4905.0 +round_trip_latency_bucket{le="753663.0"} 5812.0 +round_trip_latency_bucket{le="786431.0"} 6765.0 +round_trip_latency_bucket{le="819199.0"} 7791.0 +round_trip_latency_bucket{le="851967.0"} 8913.0 +round_trip_latency_bucket{le="884735.0"} 9981.0 +round_trip_latency_bucket{le="917503.0"} 11033.0 +round_trip_latency_bucket{le="950271.0"} 12068.0 +round_trip_latency_bucket{le="983039.0"} 13072.0 +round_trip_latency_bucket{le="1.015807e+06"} 14069.0 +round_trip_latency_bucket{le="1.048575e+06"} 15031.0 +round_trip_latency_bucket{le="1.114111e+06"} 16651.0 +round_trip_latency_bucket{le="1.179647e+06"} 18055.0 +round_trip_latency_bucket{le="1.245183e+06"} 19374.0 +round_trip_latency_bucket{le="1.310719e+06"} 20496.0 +round_trip_latency_bucket{le="1.376255e+06"} 21477.0 +round_trip_latency_bucket{le="1.441791e+06"} 22299.0 +round_trip_latency_bucket{le="1.507327e+06"} 23073.0 +round_trip_latency_bucket{le="1.572863e+06"} 23740.0 +round_trip_latency_bucket{le="1.638399e+06"} 24341.0 +round_trip_latency_bucket{le="1.703935e+06"} 24843.0 +round_trip_latency_bucket{le="1.769471e+06"} 25249.0 +round_trip_latency_bucket{le="1.835007e+06"} 25668.0 +round_trip_latency_bucket{le="1.900543e+06"} 26007.0 +round_trip_latency_bucket{le="1.966079e+06"} 26344.0 +round_trip_latency_bucket{le="2.031615e+06"} 26597.0 +round_trip_latency_bucket{le="2.097151e+06"} 26801.0 +round_trip_latency_bucket{le="2.228223e+06"} 27159.0 +round_trip_latency_bucket{le="2.359295e+06"} 27448.0 +round_trip_latency_bucket{le="2.490367e+06"} 27652.0 +round_trip_latency_bucket{le="2.621439e+06"} 27822.0 +round_trip_latency_bucket{le="2.752511e+06"} 27959.0 +round_trip_latency_bucket{le="2.883583e+06"} 28063.0 +round_trip_latency_bucket{le="3.014655e+06"} 28123.0 +round_trip_latency_bucket{le="3.145727e+06"} 28185.0 +round_trip_latency_bucket{le="3.276799e+06"} 28243.0 +round_trip_latency_bucket{le="3.407871e+06"} 28281.0 +round_trip_latency_bucket{le="3.538943e+06"} 28332.0 +round_trip_latency_bucket{le="3.670015e+06"} 28358.0 +round_trip_latency_bucket{le="3.801087e+06"} 28377.0 +round_trip_latency_bucket{le="3.932159e+06"} 28399.0 +round_trip_latency_bucket{le="4.063231e+06"} 28416.0 +round_trip_latency_bucket{le="4.194303e+06"} 28426.0 +round_trip_latency_bucket{le="4.456447e+06"} 28446.0 +round_trip_latency_bucket{le="4.718591e+06"} 28460.0 +round_trip_latency_bucket{le="4.980735e+06"} 28469.0 +round_trip_latency_bucket{le="5.242879e+06"} 28478.0 +round_trip_latency_bucket{le="5.505023e+06"} 28484.0 +round_trip_latency_bucket{le="5.767167e+06"} 28489.0 +round_trip_latency_bucket{le="6.029311e+06"} 28493.0 +round_trip_latency_bucket{le="6.553599e+06"} 28494.0 +round_trip_latency_bucket{le="6.815743e+06"} 28497.0 +round_trip_latency_bucket{le="7.077887e+06"} 28498.0 +round_trip_latency_bucket{le="7.340031e+06"} 28500.0 +round_trip_latency_bucket{le="7.602175e+06"} 28501.0 +round_trip_latency_bucket{le="7.864319e+06"} 28502.0 +round_trip_latency_bucket{le="8.126463e+06"} 28505.0 +round_trip_latency_bucket{le="8.388607e+06"} 28507.0 +round_trip_latency_bucket{le="8.912895e+06"} 28509.0 +round_trip_latency_bucket{le="9.437183e+06"} 28510.0 +round_trip_latency_bucket{le="9.961471e+06"} 28511.0 +round_trip_latency_bucket{le="1.0485759e+07"} 28512.0 +round_trip_latency_bucket{le="1.1010047e+07"} 28513.0 +round_trip_latency_bucket{le="1.2582911e+07"} 28514.0 +round_trip_latency_bucket{le="1.5204351e+07"} 28515.0 +round_trip_latency_bucket{le="1.6252927e+07"} 28516.0 +round_trip_latency_bucket{le="1.7825791e+07"} 28518.0 +round_trip_latency_bucket{le="1.9922943e+07"} 28519.0 +round_trip_latency_bucket{le="2.2020095e+07"} 28520.0 +round_trip_latency_bucket{le="2.9360127e+07"} 28523.0 +round_trip_latency_bucket{le="3.1457279e+07"} 28524.0 +round_trip_latency_bucket{le="3.2505855e+07"} 28525.0 +round_trip_latency_bucket{le="5.2428799e+07"} 28526.0 +round_trip_latency_bucket{le="1.50994943e+08"} 28527.0 +round_trip_latency_bucket{le="3.52321535e+08"} 28528.0 +round_trip_latency_bucket{le="4.19430399e+08"} 28529.0 +round_trip_latency_bucket{le="6.71088639e+08"} 28530.0 +round_trip_latency_bucket{le="+Inf"} 28530.0 +round_trip_latency_sum 3.5795193998e+10 +round_trip_latency_count 28530.0 +# HELP sql_failure_count_internal Number of statements resulting in a planning or runtime error (internal queries) +# TYPE sql_failure_count_internal counter +sql_failure_count_internal 6.0 +# HELP raft_heartbeats_pending Number of pending heartbeats and responses waiting to be coalesced +# TYPE raft_heartbeats_pending gauge +raft_heartbeats_pending{store="1"} 0.0 +# HELP queue_replicate_removedeadreplica Number of dead replica removals attempted by the replicate queue (typically in response to a node outage) +# TYPE queue_replicate_removedeadreplica counter +queue_replicate_removedeadreplica{store="1"} 0.0 +# HELP sql_txn_begin_started_count Number of SQL transaction BEGIN statements started +# TYPE sql_txn_begin_started_count counter +sql_txn_begin_started_count 0.0 +# HELP timeseries_write_samples Total number of metric samples written to disk +# TYPE timeseries_write_samples counter +timeseries_write_samples 845784.0 +# HELP sys_gc_pause_ns Total GC pause +# TYPE sys_gc_pause_ns gauge +sys_gc_pause_ns 6.070045e+07 +# HELP sql_restart_savepoint_release_count Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed +# TYPE sql_restart_savepoint_release_count counter +sql_restart_savepoint_release_count 0.0 +# HELP range_splits Number of range splits +# TYPE range_splits counter +range_splits{store="1"} 0.0 +# HELP compactor_suggestionbytes_compacted Number of logical bytes compacted from suggested compactions +# TYPE compactor_suggestionbytes_compacted counter +compactor_suggestionbytes_compacted{store="1"} 0.0 +# HELP distsender_errors_inleasetransferbackoffs Number of times backed off due to NotLeaseHolderErrors during lease transfer. +# TYPE distsender_errors_inleasetransferbackoffs counter +distsender_errors_inleasetransferbackoffs 0.0 +# HELP sql_distsql_flows_total Number of distributed SQL flows executed +# TYPE sql_distsql_flows_total counter +sql_distsql_flows_total 1042.0 +# HELP sql_mem_conns_session_max Memory usage per sql session for conns +# TYPE sql_mem_conns_session_max histogram +sql_mem_conns_session_max_bucket{le="+Inf"} 0.0 +sql_mem_conns_session_max_sum 0.0 +sql_mem_conns_session_max_count 0.0 +# HELP sql_optimizer_plan_cache_hits Number of non-prepared statements for which a cached plan was used +# TYPE sql_optimizer_plan_cache_hits counter +sql_optimizer_plan_cache_hits 0.0 +# HELP leases_transfers_error Number of failed lease transfers +# TYPE leases_transfers_error counter +leases_transfers_error{store="1"} 0.0 +# HELP rebalancing_writespersecond Number of keys written (i.e. applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions +# TYPE rebalancing_writespersecond gauge +rebalancing_writespersecond{store="1"} 213.02361755221986 +# HELP rocksdb_flushes Number of table flushes +# TYPE rocksdb_flushes gauge +rocksdb_flushes{store="1"} 13.0 +# HELP changefeed_buffer_entries_in Total entries entering the buffer between raft and changefeed sinks +# TYPE changefeed_buffer_entries_in counter +changefeed_buffer_entries_in 0.0 +# HELP sys_host_disk_write_bytes Bytes written to all disks since this process started +# TYPE sys_host_disk_write_bytes gauge +sys_host_disk_write_bytes 942080.0 +# HELP changefeed_emitted_bytes Bytes emitted by all feeds +# TYPE changefeed_emitted_bytes counter +changefeed_emitted_bytes 0.0 +# HELP sql_insert_started_count Number of SQL INSERT statements started +# TYPE sql_insert_started_count counter +sql_insert_started_count 0.0 +# HELP sql_distsql_exec_latency Latency of DistSQL statement execution +# TYPE sql_distsql_exec_latency histogram +sql_distsql_exec_latency_bucket{le="+Inf"} 0.0 +sql_distsql_exec_latency_sum 0.0 +sql_distsql_exec_latency_count 0.0 +# HELP queue_replicagc_processingnanos Nanoseconds spent processing replicas in the replica GC queue +# TYPE queue_replicagc_processingnanos counter +queue_replicagc_processingnanos{store="1"} 3.60590602e+09 +# HELP queue_replicate_removelearnerreplica Number of learner replica removals attempted by the replicate queue (typically due to internal race conditions) +# TYPE queue_replicate_removelearnerreplica counter +queue_replicate_removelearnerreplica{store="1"} 0.0 +# HELP rebalancing_range_rebalances Number of range rebalance operations motivated by store-level load imbalances +# TYPE rebalancing_range_rebalances counter +rebalancing_range_rebalances{store="1"} 0.0 +# HELP sql_mem_admin_session_max Memory usage per sql session for admin +# TYPE sql_mem_admin_session_max histogram +sql_mem_admin_session_max_bucket{le="+Inf"} 0.0 +sql_mem_admin_session_max_sum 0.0 +sql_mem_admin_session_max_count 0.0 +# HELP sql_optimizer_plan_cache_misses_internal Number of non-prepared statements for which a cached plan was not used (internal queries) +# TYPE sql_optimizer_plan_cache_misses_internal counter +sql_optimizer_plan_cache_misses_internal 524.0 +# HELP range_removes Number of range removals +# TYPE range_removes counter +range_removes{store="1"} 0.0 +# HELP range_snapshots_normal_applied Number of applied snapshots +# TYPE range_snapshots_normal_applied counter +range_snapshots_normal_applied{store="1"} 0.0 +# HELP queue_consistency_processingnanos Nanoseconds spent processing replicas in the consistency checker queue +# TYPE queue_consistency_processingnanos counter +queue_consistency_processingnanos{store="1"} 1.11826751e+08 +# HELP queue_split_pending Number of pending replicas in the split queue +# TYPE queue_split_pending gauge +queue_split_pending{store="1"} 0.0 +# HELP queue_gc_info_intenttxns Number of associated distinct transactions +# TYPE queue_gc_info_intenttxns counter +queue_gc_info_intenttxns{store="1"} 0.0 +# HELP queue_gc_info_transactionspangcaborted Number of GC'able entries corresponding to aborted txns +# TYPE queue_gc_info_transactionspangcaborted counter +queue_gc_info_transactionspangcaborted{store="1"} 0.0 +# HELP sql_txn_begin_count_internal Number of SQL transaction BEGIN statements successfully executed (internal queries) +# TYPE sql_txn_begin_count_internal counter +sql_txn_begin_count_internal 0.0 +# HELP ranges Number of ranges +# TYPE ranges gauge +ranges{store="1"} 34.0 +# HELP raft_rcvd_transferleader Number of MsgTransferLeader messages received by this store +# TYPE raft_rcvd_transferleader counter +raft_rcvd_transferleader{store="1"} 0.0 +# HELP gossip_connections_refused Number of refused incoming gossip connections +# TYPE gossip_connections_refused counter +gossip_connections_refused 0.0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/non_cockroachdb.txt b/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/non_cockroachdb.txt new file mode 100644 index 00000000000000..f5f0ae082c69fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/cockroachdb/testdata/non_cockroachdb.txt @@ -0,0 +1,27 @@ +# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize +# TYPE wmi_os_process_memory_limix_bytes gauge +wmi_os_process_memory_limix_bytes 1.40737488224256e+14 +# HELP wmi_os_processes OperatingSystem.NumberOfProcesses +# TYPE wmi_os_processes gauge +wmi_os_processes 124 +# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses +# TYPE wmi_os_processes_limit gauge +wmi_os_processes_limit 4.294967295e+09 +# HELP wmi_os_time OperatingSystem.LocalDateTime +# TYPE wmi_os_time gauge +wmi_os_time 1.57804974e+09 +# HELP wmi_os_timezone OperatingSystem.LocalDateTime +# TYPE wmi_os_timezone gauge +wmi_os_timezone{timezone="MSK"} 1 +# HELP wmi_os_users OperatingSystem.NumberOfUsers +# TYPE wmi_os_users gauge +wmi_os_users 2 +# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize +# TYPE wmi_os_virtual_memory_bytes gauge +wmi_os_virtual_memory_bytes 5.770891264e+09 +# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory +# TYPE wmi_os_virtual_memory_free_bytes gauge +wmi_os_virtual_memory_free_bytes 3.76489984e+09 +# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize +# TYPE wmi_os_visible_memory_bytes gauge +wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/README.md b/src/go/collectors/go.d.plugin/modules/consul/README.md new file mode 120000 index 00000000000000..5e57e46dca2769 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/README.md @@ -0,0 +1 @@ +integrations/consul.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/charts.go b/src/go/collectors/go.d.plugin/modules/consul/charts.go new file mode 100644 index 00000000000000..42327cef404f69 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/charts.go @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "fmt" + + "github.com/blang/semver/v4" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + _ = module.Priority + iota + prioKVSApplyTime + prioKVSApplyOperations + prioTXNApplyTime + prioTXNApplyOperations + prioRaftCommitTime + prioRaftCommitsRate + + prioServerLeadershipStatus + prioRaftLeaderLastContactTime + prioRaftFollowerLastContactLeaderTime + prioRaftLeaderElections + prioRaftLeadershipTransitions + + prioAutopilotClusterHealthStatus + prioAutopilotFailureTolerance + prioAutopilotServerHealthStatus + prioAutopilotServerStableTime + prioAutopilotServerSerfStatus + prioAutopilotServerVoterStatus + + prioNetworkLanRTT + + prioRPCRequests + prioRPCRequestsExceeded + prioRPCRequestsFailed + + prioRaftThreadMainSaturation + prioRaftThreadFSMSaturation + + prioRaftFSMLastRestoreDuration + prioRaftLeaderOldestLogAge + prioRaftRPCInstallSnapshotTime + + prioBoltDBFreelistBytes + prioBoltDBLogsPerBatch + prioBoltDBStoreLogsTime + + prioMemoryAllocated + prioMemorySys + prioGCPauseTime + + prioServiceHealthCheckStatus + prioNodeHealthCheckStatus + + prioLicenseExpirationTime +) + +var ( + clientCharts = module.Charts{ + clientRPCRequestsRateChart.Copy(), + clientRPCRequestsExceededRateChart.Copy(), + clientRPCRequestsFailedRateChart.Copy(), + + memoryAllocatedChart.Copy(), + memorySysChart.Copy(), + gcPauseTimeChart.Copy(), + + licenseExpirationTimeChart.Copy(), + } + + serverLeaderCharts = module.Charts{ + raftCommitTimeChart.Copy(), + raftLeaderLastContactTimeChart.Copy(), + raftCommitsRateChart.Copy(), + raftLeaderOldestLogAgeChart.Copy(), + } + serverFollowerCharts = module.Charts{ + raftFollowerLastContactLeaderTimeChart.Copy(), + raftRPCInstallSnapshotTimeChart.Copy(), + } + serverAutopilotHealthCharts = module.Charts{ + autopilotServerHealthStatusChart.Copy(), + autopilotServerStableTimeChart.Copy(), + autopilotServerSerfStatusChart.Copy(), + autopilotServerVoterStatusChart.Copy(), + } + serverCommonCharts = module.Charts{ + kvsApplyTimeChart.Copy(), + kvsApplyOperationsRateChart.Copy(), + txnApplyTimeChart.Copy(), + txnApplyOperationsRateChart.Copy(), + + autopilotClusterHealthStatusChart.Copy(), + autopilotFailureTolerance.Copy(), + + raftLeaderElectionsRateChart.Copy(), + raftLeadershipTransitionsRateChart.Copy(), + serverLeadershipStatusChart.Copy(), + + networkLanRTTChart.Copy(), + + clientRPCRequestsRateChart.Copy(), + clientRPCRequestsExceededRateChart.Copy(), + clientRPCRequestsFailedRateChart.Copy(), + + raftThreadMainSaturationPercChart.Copy(), + raftThreadFSMSaturationPercChart.Copy(), + + raftFSMLastRestoreDurationChart.Copy(), + + raftBoltDBFreelistBytesChart.Copy(), + raftBoltDBLogsPerBatchChart.Copy(), + raftBoltDBStoreLogsTimeChart.Copy(), + + memoryAllocatedChart.Copy(), + memorySysChart.Copy(), + gcPauseTimeChart.Copy(), + + licenseExpirationTimeChart.Copy(), + } + + kvsApplyTimeChart = module.Chart{ + ID: "kvs_apply_time", + Title: "KVS apply time", + Units: "ms", + Fam: "transaction timing", + Ctx: "consul.kvs_apply_time", + Priority: prioKVSApplyTime, + Dims: module.Dims{ + {ID: "kvs_apply_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "kvs_apply_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "kvs_apply_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + kvsApplyOperationsRateChart = module.Chart{ + ID: "kvs_apply_operations_rate", + Title: "KVS apply operations", + Units: "ops/s", + Fam: "transaction timing", + Ctx: "consul.kvs_apply_operations_rate", + Priority: prioKVSApplyOperations, + Dims: module.Dims{ + {ID: "kvs_apply_count", Name: "kvs_apply"}, + }, + } + txnApplyTimeChart = module.Chart{ + ID: "txn_apply_time", + Title: "Transaction apply time", + Units: "ms", + Fam: "transaction timing", + Ctx: "consul.txn_apply_time", + Priority: prioTXNApplyTime, + Dims: module.Dims{ + {ID: "txn_apply_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "txn_apply_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "txn_apply_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + txnApplyOperationsRateChart = module.Chart{ + ID: "txn_apply_operations_rate", + Title: "Transaction apply operations", + Units: "ops/s", + Fam: "transaction timing", + Ctx: "consul.txn_apply_operations_rate", + Priority: prioTXNApplyOperations, + Dims: module.Dims{ + {ID: "txn_apply_count", Name: "kvs_apply"}, + }, + } + + raftCommitTimeChart = module.Chart{ + ID: "raft_commit_time", + Title: "Raft commit time", + Units: "ms", + Fam: "transaction timing", + Ctx: "consul.raft_commit_time", + Priority: prioRaftCommitTime, + Dims: module.Dims{ + {ID: "raft_commitTime_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "raft_commitTime_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "raft_commitTime_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + raftCommitsRateChart = module.Chart{ + ID: "raft_commits_rate", + Title: "Raft commits rate", + Units: "commits/s", + Fam: "transaction timing", + Ctx: "consul.raft_commits_rate", + Priority: prioRaftCommitsRate, + Dims: module.Dims{ + {ID: "raft_apply", Name: "commits", Div: precision, Algo: module.Incremental}, + }, + } + + autopilotClusterHealthStatusChart = module.Chart{ + ID: "autopilot_health_status", + Title: "Autopilot cluster health status", + Units: "status", + Fam: "autopilot", + Ctx: "consul.autopilot_health_status", + Priority: prioAutopilotClusterHealthStatus, + Dims: module.Dims{ + {ID: "autopilot_healthy_yes", Name: "healthy"}, + {ID: "autopilot_healthy_no", Name: "unhealthy"}, + }, + } + autopilotFailureTolerance = module.Chart{ + ID: "autopilot_failure_tolerance", + Title: "Autopilot cluster failure tolerance", + Units: "servers", + Fam: "autopilot", + Ctx: "consul.autopilot_failure_tolerance", + Priority: prioAutopilotFailureTolerance, + Dims: module.Dims{ + {ID: "autopilot_failure_tolerance", Name: "failure_tolerance"}, + }, + } + autopilotServerHealthStatusChart = module.Chart{ + ID: "autopilot_server_health_status", + Title: "Autopilot server health status", + Units: "status", + Fam: "autopilot", + Ctx: "consul.autopilot_server_health_status", + Priority: prioAutopilotServerHealthStatus, + Dims: module.Dims{ + {ID: "autopilot_server_healthy_yes", Name: "healthy"}, + {ID: "autopilot_server_healthy_no", Name: "unhealthy"}, + }, + } + autopilotServerStableTimeChart = module.Chart{ + ID: "autopilot_server_stable_time", + Title: "Autopilot server stable time", + Units: "seconds", + Fam: "autopilot", + Ctx: "consul.autopilot_server_stable_time", + Priority: prioAutopilotServerStableTime, + Dims: module.Dims{ + {ID: "autopilot_server_stable_time", Name: "stable"}, + }, + } + autopilotServerSerfStatusChart = module.Chart{ + ID: "autopilot_server_serf_status", + Title: "Autopilot server Serf status", + Units: "status", + Fam: "autopilot", + Ctx: "consul.autopilot_server_serf_status", + Priority: prioAutopilotServerSerfStatus, + Dims: module.Dims{ + {ID: "autopilot_server_sefStatus_alive", Name: "alive"}, + {ID: "autopilot_server_sefStatus_failed", Name: "failed"}, + {ID: "autopilot_server_sefStatus_left", Name: "left"}, + {ID: "autopilot_server_sefStatus_none", Name: "none"}, + }, + } + autopilotServerVoterStatusChart = module.Chart{ + ID: "autopilot_server_voter_status", + Title: "Autopilot server Raft voting membership", + Units: "status", + Fam: "autopilot", + Ctx: "consul.autopilot_server_voter_status", + Priority: prioAutopilotServerVoterStatus, + Dims: module.Dims{ + {ID: "autopilot_server_voter_yes", Name: "voter"}, + {ID: "autopilot_server_voter_no", Name: "not_voter"}, + }, + } + + raftLeaderLastContactTimeChart = module.Chart{ + ID: "raft_leader_last_contact_time", + Title: "Raft leader last contact time", + Units: "ms", + Fam: "leadership changes", + Ctx: "consul.raft_leader_last_contact_time", + Priority: prioRaftLeaderLastContactTime, + Dims: module.Dims{ + {ID: "raft_leader_lastContact_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "raft_leader_lastContact_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "raft_leader_lastContact_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + raftFollowerLastContactLeaderTimeChart = module.Chart{ + ID: "raft_follower_last_contact_leader_time", + Title: "Raft follower last contact with the leader time", + Units: "ms", + Fam: "leadership changes", + Ctx: "consul.raft_follower_last_contact_leader_time", + Priority: prioRaftFollowerLastContactLeaderTime, + Dims: module.Dims{ + {ID: "autopilot_server_lastContact_leader", Name: "leader_last_contact"}, + }, + } + raftLeaderElectionsRateChart = module.Chart{ + ID: "raft_leader_elections_rate", + Title: "Raft leader elections rate", + Units: "elections/s", + Fam: "leadership changes", + Ctx: "consul.raft_leader_elections_rate", + Priority: prioRaftLeaderElections, + Dims: module.Dims{ + {ID: "raft_state_candidate", Name: "leader", Algo: module.Incremental}, + }, + } + raftLeadershipTransitionsRateChart = module.Chart{ + ID: "raft_leadership_transitions_rate", + Title: "Raft leadership transitions rate", + Units: "transitions/s", + Fam: "leadership changes", + Ctx: "consul.raft_leadership_transitions_rate", + Priority: prioRaftLeadershipTransitions, + Dims: module.Dims{ + {ID: "raft_state_leader", Name: "leadership", Algo: module.Incremental}, + }, + } + serverLeadershipStatusChart = module.Chart{ + ID: "server_leadership_status", + Title: "Server leadership status", + Units: "status", + Fam: "leadership changes", + Ctx: "consul.server_leadership_status", + Priority: prioServerLeadershipStatus, + Dims: module.Dims{ + {ID: "server_isLeader_yes", Name: "leader"}, + {ID: "server_isLeader_no", Name: "not_leader"}, + }, + } + + networkLanRTTChart = module.Chart{ + ID: "network_lan_rtt", + Title: "Network lan RTT", + Units: "ms", + Fam: "network rtt", + Ctx: "consul.network_lan_rtt", + Type: module.Area, + Priority: prioNetworkLanRTT, + Dims: module.Dims{ + {ID: "network_lan_rtt_min", Name: "min", Div: 1e6}, + {ID: "network_lan_rtt_max", Name: "max", Div: 1e6}, + {ID: "network_lan_rtt_avg", Name: "avg", Div: 1e6}, + }, + } + + clientRPCRequestsRateChart = module.Chart{ + ID: "client_rpc_requests_rate", + Title: "Client RPC requests", + Units: "requests/s", + Fam: "rpc network activity", + Ctx: "consul.client_rpc_requests_rate", + Priority: prioRPCRequests, + Dims: module.Dims{ + {ID: "client_rpc", Name: "rpc", Algo: module.Incremental}, + }, + } + clientRPCRequestsExceededRateChart = module.Chart{ + ID: "client_rpc_requests_exceeded_rate", + Title: "Client rate-limited RPC requests", + Units: "requests/s", + Fam: "rpc network activity", + Ctx: "consul.client_rpc_requests_exceeded_rate", + Priority: prioRPCRequestsExceeded, + Dims: module.Dims{ + {ID: "client_rpc_exceeded", Name: "exceeded", Algo: module.Incremental}, + }, + } + clientRPCRequestsFailedRateChart = module.Chart{ + ID: "client_rpc_requests_failed_rate", + Title: "Client failed RPC requests", + Units: "requests/s", + Fam: "rpc network activity", + Ctx: "consul.client_rpc_requests_failed_rate", + Priority: prioRPCRequestsFailed, + Dims: module.Dims{ + {ID: "client_rpc_failed", Name: "failed", Algo: module.Incremental}, + }, + } + + raftThreadMainSaturationPercChart = module.Chart{ + ID: "raft_thread_main_saturation_perc", + Title: "Raft main thread saturation", + Units: "percentage", + Fam: "raft saturation", + Ctx: "consul.raft_thread_main_saturation_perc", + Priority: prioRaftThreadMainSaturation, + Dims: module.Dims{ + {ID: "raft_thread_main_saturation_quantile=0.5", Name: "quantile_0.5", Div: precision * 10}, + {ID: "raft_thread_main_saturation_quantile=0.9", Name: "quantile_0.9", Div: precision * 10}, + {ID: "raft_thread_main_saturation_quantile=0.99", Name: "quantile_0.99", Div: precision * 10}, + }, + } + raftThreadFSMSaturationPercChart = module.Chart{ + ID: "raft_thread_fsm_saturation_perc", + Title: "Raft FSM thread saturation", + Units: "percentage", + Fam: "raft saturation", + Ctx: "consul.raft_thread_fsm_saturation_perc", + Priority: prioRaftThreadFSMSaturation, + Dims: module.Dims{ + {ID: "raft_thread_fsm_saturation_quantile=0.5", Name: "quantile_0.5", Div: precision * 10}, + {ID: "raft_thread_fsm_saturation_quantile=0.9", Name: "quantile_0.9", Div: precision * 10}, + {ID: "raft_thread_fsm_saturation_quantile=0.99", Name: "quantile_0.99", Div: precision * 10}, + }, + } + + raftFSMLastRestoreDurationChart = module.Chart{ + ID: "raft_fsm_last_restore_duration", + Title: "Raft last restore duration", + Units: "ms", + Fam: "raft replication capacity", + Ctx: "consul.raft_fsm_last_restore_duration", + Priority: prioRaftFSMLastRestoreDuration, + Dims: module.Dims{ + {ID: "raft_fsm_lastRestoreDuration", Name: "last_restore_duration"}, + }, + } + raftLeaderOldestLogAgeChart = module.Chart{ + ID: "raft_leader_oldest_log_age", + Title: "Raft leader oldest log age", + Units: "seconds", + Fam: "raft replication capacity", + Ctx: "consul.raft_leader_oldest_log_age", + Priority: prioRaftLeaderOldestLogAge, + Dims: module.Dims{ + {ID: "raft_leader_oldestLogAge", Name: "oldest_log_age", Div: 1000}, + }, + } + raftRPCInstallSnapshotTimeChart = module.Chart{ + ID: "raft_rpc_install_snapshot_time", + Title: "Raft RPC install snapshot time", + Units: "ms", + Fam: "raft replication capacity", + Ctx: "consul.raft_rpc_install_snapshot_time", + Priority: prioRaftRPCInstallSnapshotTime, + Dims: module.Dims{ + {ID: "raft_rpc_installSnapshot_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "raft_rpc_installSnapshot_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "raft_rpc_installSnapshot_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + + raftBoltDBFreelistBytesChart = module.Chart{ + ID: "raft_boltdb_freelist_bytes", + Title: "Raft BoltDB freelist", + Units: "bytes", + Fam: "boltdb performance", + Ctx: "consul.raft_boltdb_freelist_bytes", + Priority: prioBoltDBFreelistBytes, + Dims: module.Dims{ + {ID: "raft_boltdb_freelistBytes", Name: "freelist"}, + }, + } + raftBoltDBLogsPerBatchChart = module.Chart{ + ID: "raft_boltdb_logs_per_batch_rate", + Title: "Raft BoltDB logs written per batch", + Units: "logs/s", + Fam: "boltdb performance", + Ctx: "consul.raft_boltdb_logs_per_batch_rate", + Priority: prioBoltDBLogsPerBatch, + Dims: module.Dims{ + {ID: "raft_boltdb_logsPerBatch_sum", Name: "written", Algo: module.Incremental}, + }, + } + + raftBoltDBStoreLogsTimeChart = module.Chart{ + ID: "raft_boltdb_store_logs_time", + Title: "Raft BoltDB store logs time", + Units: "ms", + Fam: "boltdb performance", + Ctx: "consul.raft_boltdb_store_logs_time", + Priority: prioBoltDBStoreLogsTime, + Dims: module.Dims{ + {ID: "raft_boltdb_storeLogs_quantile=0.5", Name: "quantile_0.5", Div: precision * precision}, + {ID: "raft_boltdb_storeLogs_quantile=0.9", Name: "quantile_0.9", Div: precision * precision}, + {ID: "raft_boltdb_storeLogs_quantile=0.99", Name: "quantile_0.99", Div: precision * precision}, + }, + } + + memoryAllocatedChart = module.Chart{ + ID: "memory_allocated", + Title: "Memory allocated by the Consul process", + Units: "bytes", + Fam: "memory", + Ctx: "consul.memory_allocated", + Priority: prioMemoryAllocated, + Dims: module.Dims{ + {ID: "runtime_alloc_bytes", Name: "allocated"}, + }, + } + memorySysChart = module.Chart{ + ID: "memory_sys", + Title: "Memory obtained from the OS", + Units: "bytes", + Fam: "memory", + Ctx: "consul.memory_sys", + Priority: prioMemorySys, + Dims: module.Dims{ + {ID: "runtime_sys_bytes", Name: "sys"}, + }, + } + + gcPauseTimeChart = module.Chart{ + ID: "gc_pause_time", + Title: "Garbage collection stop-the-world pause time", + Units: "seconds", + Fam: "garbage collection", + Ctx: "consul.gc_pause_time", + Priority: prioGCPauseTime, + Dims: module.Dims{ + {ID: "runtime_total_gc_pause_ns", Name: "gc_pause", Algo: module.Incremental, Div: 1e9}, + }, + } + + licenseExpirationTimeChart = module.Chart{ + ID: "license_expiration_time", + Title: "License expiration time", + Units: "seconds", + Fam: "license", + Ctx: "consul.license_expiration_time", + Priority: prioLicenseExpirationTime, + Dims: module.Dims{ + {ID: "system_licenseExpiration", Name: "license_expiration"}, + }, + } +) + +var ( + serviceHealthCheckStatusChartTmpl = module.Chart{ + ID: "health_check_%s_status", + Title: "Service health check status", + Units: "status", + Fam: "service health checks", + Ctx: "consul.service_health_check_status", + Priority: prioServiceHealthCheckStatus, + Dims: module.Dims{ + {ID: "health_check_%s_passing_status", Name: "passing"}, + {ID: "health_check_%s_critical_status", Name: "critical"}, + {ID: "health_check_%s_maintenance_status", Name: "maintenance"}, + {ID: "health_check_%s_warning_status", Name: "warning"}, + }, + } + nodeHealthCheckStatusChartTmpl = module.Chart{ + ID: "health_check_%s_status", + Title: "Node health check status", + Units: "status", + Fam: "node health checks", + Ctx: "consul.node_health_check_status", + Priority: prioNodeHealthCheckStatus, + Dims: module.Dims{ + {ID: "health_check_%s_passing_status", Name: "passing"}, + {ID: "health_check_%s_critical_status", Name: "critical"}, + {ID: "health_check_%s_maintenance_status", Name: "maintenance"}, + {ID: "health_check_%s_warning_status", Name: "warning"}, + }, + } +) + +func (c *Consul) addGlobalCharts() { + if !c.isTelemetryPrometheusEnabled() { + return + } + + var charts *module.Charts + + if !c.isServer() { + charts = clientCharts.Copy() + } else { + charts = serverCommonCharts.Copy() + + // can't really rely on checking if a response contains a metric due to retention of some metrics + // https://github.com/hashicorp/go-metrics/blob/b6d5c860c07ef6eeec89f4a662c7b452dd4d0c93/prometheus/prometheus.go#L75-L76 + if c.version != nil { + if c.version.LT(semver.Version{Major: 1, Minor: 13, Patch: 0}) { + _ = charts.Remove(raftThreadMainSaturationPercChart.ID) + _ = charts.Remove(raftThreadFSMSaturationPercChart.ID) + } + if c.version.LT(semver.Version{Major: 1, Minor: 11, Patch: 0}) { + _ = charts.Remove(kvsApplyTimeChart.ID) + _ = charts.Remove(kvsApplyOperationsRateChart.ID) + _ = charts.Remove(txnApplyTimeChart.ID) + _ = charts.Remove(txnApplyOperationsRateChart.ID) + _ = charts.Remove(raftBoltDBFreelistBytesChart.ID) + } + } + } + + if !c.hasLicense() { + _ = charts.Remove(licenseExpirationTimeChart.ID) + } + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "datacenter", Value: c.cfg.Config.Datacenter}, + {Key: "node_name", Value: c.cfg.Config.NodeName}, + } + } + + if err := c.Charts().Add(*charts.Copy()...); err != nil { + c.Warning(err) + } +} + +func (c *Consul) addServerAutopilotHealthCharts() { + charts := serverAutopilotHealthCharts.Copy() + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "datacenter", Value: c.cfg.Config.Datacenter}, + {Key: "node_name", Value: c.cfg.Config.NodeName}, + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func newServiceHealthCheckChart(check *agentCheck) *module.Chart { + chart := serviceHealthCheckStatusChartTmpl.Copy() + chart.ID = fmt.Sprintf(chart.ID, check.CheckID) + chart.Labels = []module.Label{ + {Key: "node_name", Value: check.Node}, + {Key: "check_name", Value: check.Name}, + {Key: "service_name", Value: check.ServiceName}, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, check.CheckID) + } + return chart +} + +func newNodeHealthCheckChart(check *agentCheck) *module.Chart { + chart := nodeHealthCheckStatusChartTmpl.Copy() + chart.ID = fmt.Sprintf(chart.ID, check.CheckID) + chart.Labels = []module.Label{ + {Key: "node_name", Value: check.Node}, + {Key: "check_name", Value: check.Name}, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, check.CheckID) + } + return chart +} + +func (c *Consul) addHealthCheckCharts(check *agentCheck) { + var chart *module.Chart + + if check.ServiceName != "" { + chart = newServiceHealthCheckChart(check) + } else { + chart = newNodeHealthCheckChart(check) + } + + chart.Labels = append(chart.Labels, module.Label{ + Key: "datacenter", + Value: c.cfg.Config.Datacenter, + }) + + if err := c.Charts().Add(chart); err != nil { + c.Warning(err) + } +} + +func (c *Consul) removeHealthCheckCharts(checkID string) { + id := fmt.Sprintf("health_check_%s_status", checkID) + + chart := c.Charts().Get(id) + if chart == nil { + c.Warningf("failed to remove '%s' chart: the chart does not exist", id) + return + } + + chart.MarkRemove() + chart.MarkNotCreated() +} + +func (c *Consul) addLeaderCharts() { + charts := serverLeaderCharts.Copy() + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "datacenter", Value: c.cfg.Config.Datacenter}, + {Key: "node_name", Value: c.cfg.Config.NodeName}, + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func (c *Consul) removeLeaderCharts() { + s := make(map[string]bool) + for _, v := range serverLeaderCharts { + s[v.ID] = true + } + + for _, v := range *c.Charts() { + if s[v.ID] { + v.MarkRemove() + v.MarkNotCreated() + } + } +} + +func (c *Consul) addFollowerCharts() { + charts := serverFollowerCharts.Copy() + if c.isCloudManaged() { + // 'autopilot_server_lastContact_leader' comes from 'operator/autopilot/health' which is disabled + _ = charts.Remove(raftFollowerLastContactLeaderTimeChart.ID) + } + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "datacenter", Value: c.cfg.Config.Datacenter}, + {Key: "node_name", Value: c.cfg.Config.NodeName}, + } + } + + if err := c.Charts().Add(*charts...); err != nil { + c.Warning(err) + } +} + +func (c *Consul) removeFollowerCharts() { + s := make(map[string]bool) + for _, v := range serverFollowerCharts { + s[v.ID] = true + } + + for _, v := range *c.Charts() { + if s[v.ID] { + v.MarkRemove() + v.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect.go b/src/go/collectors/go.d.plugin/modules/consul/collect.go new file mode 100644 index 00000000000000..2f4acbdfab12e9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + precision = 1000 +) + +func (c *Consul) collect() (map[string]int64, error) { + if c.cfg == nil { + if err := c.collectConfiguration(); err != nil { + return nil, err + } + + c.addGlobalChartsOnce.Do(c.addGlobalCharts) + } + + mx := make(map[string]int64) + + if err := c.collectChecks(mx); err != nil { + return nil, err + } + + if c.isServer() { + if !c.isCloudManaged() { + c.addServerAutopilotChartsOnce.Do(c.addServerAutopilotHealthCharts) + // 'operator/autopilot/health' is disabled in Cloud managed (403: Operation is not allowed in managed Consul clusters) + if err := c.collectAutopilotHealth(mx); err != nil { + return nil, err + } + } + if err := c.collectNetworkRTT(mx); err != nil { + return nil, err + } + } + + if c.isTelemetryPrometheusEnabled() { + if err := c.collectMetricsPrometheus(mx); err != nil { + return nil, err + } + } + + return mx, nil +} + +func (c *Consul) isTelemetryPrometheusEnabled() bool { + return c.cfg.DebugConfig.Telemetry.PrometheusOpts.Expiration != "0s" +} + +func (c *Consul) isCloudManaged() bool { + return c.cfg.DebugConfig.Cloud.ClientSecret != "" || c.cfg.DebugConfig.Cloud.ResourceID != "" +} + +func (c *Consul) hasLicense() bool { + return c.cfg.Stats.License.ID != "" +} + +func (c *Consul) isServer() bool { + return c.cfg.Config.Server +} + +func (c *Consul) doOKDecode(urlPath string, in interface{}, statusCodes ...int) error { + req, err := web.NewHTTPRequest(c.Request.Copy()) + if err != nil { + return fmt.Errorf("error on creating request: %v", err) + } + + req.URL.Path = urlPath + if c.ACLToken != "" { + req.Header.Set("X-Consul-Token", c.ACLToken) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on request to %s : %v", req.URL, err) + } + + defer closeBody(resp) + + codes := map[int]bool{http.StatusOK: true} + for _, v := range statusCodes { + codes[v] = true + } + + if !codes[resp.StatusCode] { + return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + if err = json.NewDecoder(resp.Body).Decode(&in); err != nil { + return fmt.Errorf("error on decoding response from %s : %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect_autopilot.go b/src/go/collectors/go.d.plugin/modules/consul/collect_autopilot.go new file mode 100644 index 00000000000000..c6055857f2f25e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect_autopilot.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "net/http" + "time" +) + +const ( + // https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health + urlPathOperationAutopilotHealth = "/v1/operator/autopilot/health" +) + +type autopilotHealth struct { + Servers []struct { + ID string + SerfStatus string + Leader bool + LastContact string + Healthy bool + Voter bool + StableSince time.Time + } +} + +func (c *Consul) collectAutopilotHealth(mx map[string]int64) error { + var health autopilotHealth + + // The HTTP status code will indicate the health of the cluster: 200 is healthy, 429 is unhealthy. + // https://github.com/hashicorp/consul/blob/c7ef04c5979dbc311ff3c67b7bf3028a93e8b0f1/agent/operator_endpoint.go#L325 + if err := c.doOKDecode(urlPathOperationAutopilotHealth, &health, http.StatusTooManyRequests); err != nil { + return err + } + + for _, srv := range health.Servers { + if srv.ID == c.cfg.Config.NodeID { + // SerfStatus: alive, left, failed or none: + // https://github.com/hashicorp/consul/blob/c7ef04c5979dbc311ff3c67b7bf3028a93e8b0f1/agent/consul/operator_autopilot_endpoint.go#L124-L133 + mx["autopilot_server_sefStatus_alive"] = boolToInt(srv.SerfStatus == "alive") + mx["autopilot_server_sefStatus_left"] = boolToInt(srv.SerfStatus == "left") + mx["autopilot_server_sefStatus_failed"] = boolToInt(srv.SerfStatus == "failed") + mx["autopilot_server_sefStatus_none"] = boolToInt(srv.SerfStatus == "none") + // https://github.com/hashicorp/raft-autopilot/blob/d936f51c374c3b7902d5e4fdafe9f7d8d199ea53/types.go#L110 + mx["autopilot_server_healthy_yes"] = boolToInt(srv.Healthy) + mx["autopilot_server_healthy_no"] = boolToInt(!srv.Healthy) + mx["autopilot_server_voter_yes"] = boolToInt(srv.Voter) + mx["autopilot_server_voter_no"] = boolToInt(!srv.Voter) + mx["autopilot_server_stable_time"] = int64(time.Now().Sub(srv.StableSince).Seconds()) + if !srv.Leader { + if v, err := time.ParseDuration(srv.LastContact); err == nil { + mx["autopilot_server_lastContact_leader"] = v.Milliseconds() + } + } + + break + } + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect_checks.go b/src/go/collectors/go.d.plugin/modules/consul/collect_checks.go new file mode 100644 index 00000000000000..88ea4612bb3ff1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect_checks.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +const ( + // https://www.consul.io/api-docs/agent/check#list-checks + urlPathAgentChecks = "/v1/agent/checks" +) + +type agentCheck struct { + Node string + CheckID string + Name string + Status string + ServiceID string + ServiceName string + ServiceTags []string +} + +func (c *Consul) collectChecks(mx map[string]int64) error { + var checks map[string]*agentCheck + + if err := c.doOKDecode(urlPathAgentChecks, &checks); err != nil { + return err + } + + for id, check := range checks { + if !c.checks[id] { + c.checks[id] = true + c.addHealthCheckCharts(check) + } + + mx["health_check_"+id+"_passing_status"] = boolToInt(check.Status == "passing") + mx["health_check_"+id+"_warning_status"] = boolToInt(check.Status == "warning") + mx["health_check_"+id+"_critical_status"] = boolToInt(check.Status == "critical") + mx["health_check_"+id+"_maintenance_status"] = boolToInt(check.Status == "maintenance") + } + + for id := range c.checks { + if _, ok := checks[id]; !ok { + delete(c.checks, id) + c.removeHealthCheckCharts(id) + } + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect_config.go b/src/go/collectors/go.d.plugin/modules/consul/collect_config.go new file mode 100644 index 00000000000000..14c77067f0b97a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect_config.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "github.com/blang/semver/v4" +) + +const ( + // https://developer.hashicorp.com/consul/api-docs/agent#read-configuration + urlPathAgentSelf = "/v1/agent/self" +) + +type consulConfig struct { + Config struct { + Datacenter string + PrimaryDatacenter string + NodeName string + NodeID string + Server bool + Version string + } + DebugConfig struct { + Telemetry struct { + MetricsPrefix string + DisableHostname bool + PrometheusOpts struct { + Expiration string + Name string + } + } + Cloud struct { + AuthURL string + ClientID string + ClientSecret string + Hostname string + ResourceID string + ScadaAddress string + } + } + Stats struct { + License struct { + ID string `json:"id"` + } `json:"license"` + } +} + +func (c *Consul) collectConfiguration() error { + var cfg consulConfig + + if err := c.doOKDecode(urlPathAgentSelf, &cfg); err != nil { + return err + } + + c.cfg = &cfg + c.Debugf("consul config: %+v", cfg) + + if !c.isTelemetryPrometheusEnabled() { + c.Warning("export of Prometheus metrics is disabled") + } + + ver, err := semver.New(c.cfg.Config.Version) + if err != nil { + c.Warningf("error on parsing Consul version '%s': %v", c.cfg.Config.Version, err) + return nil + } + + c.version = ver + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect_metrics.go b/src/go/collectors/go.d.plugin/modules/consul/collect_metrics.go new file mode 100644 index 00000000000000..9e44e2b30b5fad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect_metrics.go @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (c *Consul) collectMetricsPrometheus(mx map[string]int64) error { + mfs, err := c.prom.Scrape() + if err != nil { + return err + } + + // Key Metrics (https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) + + // prometheus metrics are messy: + // - if 'disable_hostname' is false (default): + // - consul_autopilot_failure_tolerance => consul_hostname_autopilot_failure_tolerance + // - both are exposed + // - only the one with the hostname has the correct value + // - 1.14.3 (it probably has something to do with cloud management version): + // - runtime_sys_bytes => runtime_sys_bytes_sys_bytes; consul_autopilot_healthy => consul_autopilot_healthy_healthy + // - both are exposed + // - only the one with the double name has the correct value + + if c.isServer() { + c.collectSummary(mx, mfs, "raft_thread_main_saturation") + c.collectSummary(mx, mfs, "raft_thread_fsm_saturation") + c.collectSummary(mx, mfs, "raft_boltdb_logsPerBatch") + c.collectSummary(mx, mfs, "kvs_apply") + c.collectSummary(mx, mfs, "txn_apply") + c.collectSummary(mx, mfs, "raft_boltdb_storeLogs") + c.collectSummary(mx, mfs, "raft_rpc_installSnapshot") // make sense for followers only + c.collectSummary(mx, mfs, "raft_commitTime") // make sense for leader only + c.collectSummary(mx, mfs, "raft_leader_lastContact") // make sense for leader only + + c.collectCounter(mx, mfs, "raft_apply", precision) // make sense for leader only + c.collectCounter(mx, mfs, "raft_state_candidate", 1) + c.collectCounter(mx, mfs, "raft_state_leader", 1) + + c.collectGaugeBool(mx, mfs, "autopilot_healthy", "autopilot_healthy_healthy") + c.collectGaugeBool(mx, mfs, "server_isLeader", "server_isLeader_isLeader") + c.collectGauge(mx, mfs, "autopilot_failure_tolerance", 1, "autopilot_failure_tolerance_failure_tolerance") + c.collectGauge(mx, mfs, "raft_fsm_lastRestoreDuration", 1) + c.collectGauge(mx, mfs, "raft_leader_oldestLogAge", 1, "raft_leader_oldestLogAge_oldestLogAge") + c.collectGauge(mx, mfs, "raft_boltdb_freelistBytes", 1, "raft_boltdb_freelistBytes_freelistBytes") + + if isLeader, ok := c.isLeader(mfs); ok { + if isLeader && !c.hasLeaderCharts { + c.addLeaderCharts() + c.hasLeaderCharts = true + } + if !isLeader && c.hasLeaderCharts { + c.removeLeaderCharts() + c.hasLeaderCharts = false + } + if !isLeader && !c.hasFollowerCharts { + c.addFollowerCharts() + c.hasFollowerCharts = true + } + if isLeader && c.hasFollowerCharts { + c.removeFollowerCharts() + c.hasFollowerCharts = false + } + } + } + + c.collectGauge(mx, mfs, "system_licenseExpiration", 3600, "system_licenseExpiration_licenseExpiration") + + c.collectCounter(mx, mfs, "client_rpc", 1) + c.collectCounter(mx, mfs, "client_rpc_exceeded", 1) + c.collectCounter(mx, mfs, "client_rpc_failed", 1) + + c.collectGauge(mx, mfs, "runtime_alloc_bytes", 1, "runtime_alloc_bytes_alloc_bytes") + c.collectGauge(mx, mfs, "runtime_sys_bytes", 1, "runtime_sys_bytes_sys_bytes") + c.collectGauge(mx, mfs, "runtime_total_gc_pause_ns", 1, "runtime_total_gc_pause_ns_total_gc_pause_ns") + + return nil +} + +func (c *Consul) isLeader(mfs prometheus.MetricFamilies) (bool, bool) { + var mf *prometheus.MetricFamily + for _, v := range []string{"server_isLeader_isLeader", "server_isLeader"} { + if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil { + break + } + if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil { + break + } + } + + if mf == nil { + return false, false + } + + return mf.Metrics()[0].Gauge().Value() == 1, true +} + +func (c *Consul) collectGauge(mx map[string]int64, mfs prometheus.MetricFamilies, name string, mul float64, aliases ...string) { + var mf *prometheus.MetricFamily + for _, v := range append(aliases, name) { + if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil { + break + } + if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil { + break + } + } + + if mf == nil { + return + } + + v := mf.Metrics()[0].Gauge().Value() + + if !math.IsNaN(v) { + mx[name] = int64(v * mul) + } +} + +func (c *Consul) collectGaugeBool(mx map[string]int64, mfs prometheus.MetricFamilies, name string, aliases ...string) { + var mf *prometheus.MetricFamily + for _, v := range append(aliases, name) { + if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil { + break + } + if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil { + break + } + } + + if mf == nil { + return + } + + v := mf.Metrics()[0].Gauge().Value() + + if !math.IsNaN(v) { + mx[name+"_yes"] = boolToInt(v == 1) + mx[name+"_no"] = boolToInt(v == 0) + } +} + +func (c *Consul) collectCounter(mx map[string]int64, mfs prometheus.MetricFamilies, name string, mul float64) { + mf := mfs.GetCounter(c.promMetricName(name)) + if mf == nil { + return + } + + v := mf.Metrics()[0].Counter().Value() + + if !math.IsNaN(v) { + mx[name] = int64(v * mul) + } +} + +func (c *Consul) collectSummary(mx map[string]int64, mfs prometheus.MetricFamilies, name string) { + mf := mfs.GetSummary(c.promMetricName(name)) + if mf == nil { + return + } + + m := mf.Metrics()[0] + + for _, q := range m.Summary().Quantiles() { + v := q.Value() + // MaxAge is 10 seconds (hardcoded) + // https://github.com/hashicorp/go-metrics/blob/b6d5c860c07ef6eeec89f4a662c7b452dd4d0c93/prometheus/prometheus.go#L227 + if math.IsNaN(v) { + v = 0 + } + + id := fmt.Sprintf("%s_quantile=%s", name, formatFloat(q.Quantile())) + mx[id] = int64(v * precision * precision) + } + + mx[name+"_sum"] = int64(m.Summary().Sum() * precision) + mx[name+"_count"] = int64(m.Summary().Count()) +} + +func (c *Consul) promMetricName(name string) string { + px := c.cfg.DebugConfig.Telemetry.MetricsPrefix + return px + "_" + name +} + +var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_", "=", "_", "-", "_", "/", "_") + +// controlled by 'disable_hostname' +// https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-disable_hostname +func (c *Consul) promMetricNameWithHostname(name string) string { + px := c.cfg.DebugConfig.Telemetry.MetricsPrefix + node := forbiddenCharsReplacer.Replace(c.cfg.Config.NodeName) + + return px + "_" + node + "_" + name +} + +func formatFloat(v float64) string { + return strconv.FormatFloat(v, 'f', -1, 64) +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/collect_net_rtt.go b/src/go/collectors/go.d.plugin/modules/consul/collect_net_rtt.go new file mode 100644 index 00000000000000..50825e55680e2c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/collect_net_rtt.go @@ -0,0 +1,75 @@ +package consul + +import ( + "math" + "time" + + "github.com/netdata/go.d.plugin/pkg/metrics" +) + +const ( + // https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes + urlPathCoordinateNodes = "/v1/coordinate/nodes" +) + +type nodeCoordinates struct { + Node string + Coord struct { + Vec []float64 + Error float64 + Adjustment float64 + Height float64 + } +} + +func (c *Consul) collectNetworkRTT(mx map[string]int64) error { + var coords []nodeCoordinates + + if err := c.doOKDecode(urlPathCoordinateNodes, &coords); err != nil { + return err + } + + var thisNode nodeCoordinates + var ok bool + + coords, thisNode, ok = removeNodeCoordinates(coords, c.cfg.Config.NodeName) + if !ok || len(coords) == 0 { + return nil + } + + sum := metrics.NewSummary() + for _, v := range coords { + d := calcDistance(thisNode, v) + sum.Observe(d.Seconds()) + } + sum.WriteTo(mx, "network_lan_rtt", 1e9, 1) + + return nil +} + +func calcDistance(a, b nodeCoordinates) time.Duration { + // https://developer.hashicorp.com/consul/docs/architecture/coordinates#working-with-coordinates + sum := 0.0 + for i := 0; i < len(a.Coord.Vec); i++ { + diff := a.Coord.Vec[i] - b.Coord.Vec[i] + sum += diff * diff + } + + rtt := math.Sqrt(sum) + a.Coord.Height + b.Coord.Height + + adjusted := rtt + a.Coord.Adjustment + b.Coord.Adjustment + if adjusted > 0.0 { + rtt = adjusted + } + + return time.Duration(rtt * 1e9) // nanoseconds +} + +func removeNodeCoordinates(coords []nodeCoordinates, node string) ([]nodeCoordinates, nodeCoordinates, bool) { + for i, v := range coords { + if v.Node == node { + return append(coords[:i], coords[i+1:]...), v, true + } + } + return coords, nodeCoordinates{}, false +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/config_schema.json b/src/go/collectors/go.d.plugin/modules/consul/config_schema.json new file mode 100644 index 00000000000000..a7172369678c12 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/config_schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/consul job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "acl_token": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/consul.go b/src/go/collectors/go.d.plugin/modules/consul/consul.go new file mode 100644 index 00000000000000..ebd10984a27bfd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/consul.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + _ "embed" + "net/http" + "sync" + "time" + + "github.com/blang/semver/v4" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("consul", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Consul { + return &Consul{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:8500"}, + Client: web.Client{Timeout: web.Duration{Duration: time.Second * 2}}, + }, + }, + charts: &module.Charts{}, + addGlobalChartsOnce: &sync.Once{}, + addServerAutopilotChartsOnce: &sync.Once{}, + checks: make(map[string]bool), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + + ACLToken string `yaml:"acl_token"` +} + +type Consul struct { + module.Base + + Config `yaml:",inline"` + + charts *module.Charts + addGlobalChartsOnce *sync.Once + addServerAutopilotChartsOnce *sync.Once + + httpClient *http.Client + prom prometheus.Prometheus + + cfg *consulConfig + version *semver.Version + + hasLeaderCharts bool + hasFollowerCharts bool + checks map[string]bool +} + +func (c *Consul) Init() bool { + if err := c.validateConfig(); err != nil { + c.Errorf("config validation: %v", err) + return false + } + + httpClient, err := c.initHTTPClient() + if err != nil { + c.Errorf("init HTTP client: %v", err) + return false + } + c.httpClient = httpClient + + prom, err := c.initPrometheusClient(httpClient) + if err != nil { + c.Errorf("init Prometheus client: %v", err) + return false + } + c.prom = prom + + return true +} + +func (c *Consul) Check() bool { + return len(c.Collect()) > 0 +} + +func (c *Consul) Charts() *module.Charts { + return c.charts +} + +func (c *Consul) Collect() map[string]int64 { + mx, err := c.collect() + if err != nil { + c.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (c *Consul) Cleanup() { + if c.httpClient != nil { + c.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/consul_test.go b/src/go/collectors/go.d.plugin/modules/consul/consul_test.go new file mode 100644 index 00000000000000..b8f9908933db8d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/consul_test.go @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +var ( + datav1132Checks, _ = os.ReadFile("testdata/v1.13.2/v1-agent-checks.json") + dataV1132ClientSelf, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-self.json") + dataV1132ClientPromMetrics, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-metrics.txt") + dataV1132ServerSelf, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self.json") + dataV1132ServerSelfDisabledPrometheus, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_disabled_prom.json") + dataV1132ServerSelfWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_with_hostname.json") + dataV1132ServerPromMetrics, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics.txt") + dataV1132ServerPromMetricsWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt") + dataV1132ServerOperatorAutopilotHealth, _ = os.ReadFile("testdata/v1.13.2/server_v1-operator-autopilot-health.json") + dataV1132ServerCoordinateNodes, _ = os.ReadFile("testdata/v1.13.2/server_v1-coordinate-nodes.json") + + dataV1143CloudServerPromMetrics, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-metrics.txt") + dataV1143CloudServerSelf, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-self.json") + dataV1143CloudServerCoordinateNodes, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json") + dataV1143CloudChecks, _ = os.ReadFile("testdata/v1.14.3-cloud/v1-agent-checks.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "datav1132Checks": datav1132Checks, + "dataV1132ClientSelf": dataV1132ClientSelf, + "dataV1132ClientPromMetrics": dataV1132ClientPromMetrics, + "dataV1132ServerSelf": dataV1132ServerSelf, + "dataV1132ServerSelfWithHostname": dataV1132ServerSelfWithHostname, + "dataV1132ServerSelfDisabledPrometheus": dataV1132ServerSelfDisabledPrometheus, + "dataV1132ServerPromMetrics": dataV1132ServerPromMetrics, + "dataV1132ServerPromMetricsWithHostname": dataV1132ServerPromMetricsWithHostname, + "dataV1132ServerOperatorAutopilotHealth": dataV1132ServerOperatorAutopilotHealth, + "dataV1132ServerCoordinateNodes": dataV1132ServerCoordinateNodes, + "dataV1143CloudServerPromMetrics": dataV1143CloudServerPromMetrics, + "dataV1143CloudServerSelf": dataV1143CloudServerSelf, + "dataV1143CloudServerCoordinateNodes": dataV1143CloudServerCoordinateNodes, + "dataV1143CloudChecks": dataV1143CloudChecks, + } { + require.NotNilf(t, data, name) + } +} + +func TestConsul_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + consul := New() + consul.Config = test.config + + if test.wantFail { + assert.False(t, consul.Init()) + } else { + assert.True(t, consul.Init()) + } + }) + } +} + +func TestConsul_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (consul *Consul, cleanup func()) + }{ + "success on response from Consul v1.13.2 server": { + wantFail: false, + prepare: caseConsulV1132ServerResponse, + }, + "success on response from Consul v1.14.3 server cloud managed": { + wantFail: false, + prepare: caseConsulV1143CloudServerResponse, + }, + "success on response from Consul v1.13.2 server with enabled hostname": { + wantFail: false, + prepare: caseConsulV1132ServerWithHostnameResponse, + }, + "success on response from Consul v1.13.2 server with disabled prometheus": { + wantFail: false, + prepare: caseConsulV1132ServerWithDisabledPrometheus, + }, + "success on response from Consul v1.13.2 client": { + wantFail: false, + prepare: caseConsulV1132ClientResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + consul, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, consul.Check()) + } else { + assert.True(t, consul.Check()) + } + }) + } +} + +func TestConsul_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (consul *Consul, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on response from Consul v1.13.2 server": { + prepare: caseConsulV1132ServerResponse, + // 3 node, 1 service check, no license + wantNumOfCharts: len(serverCommonCharts) + len(serverAutopilotHealthCharts) + len(serverLeaderCharts) + 3 + 1 - 1, + wantMetrics: map[string]int64{ + "autopilot_failure_tolerance": 1, + "autopilot_healthy_no": 0, + "autopilot_healthy_yes": 1, + "autopilot_server_healthy_no": 0, + "autopilot_server_healthy_yes": 1, + "autopilot_server_lastContact_leader": 13, + "autopilot_server_sefStatus_alive": 1, + "autopilot_server_sefStatus_failed": 0, + "autopilot_server_sefStatus_left": 0, + "autopilot_server_sefStatus_none": 0, + "autopilot_server_stable_time": 265849, + "autopilot_server_voter_no": 0, + "autopilot_server_voter_yes": 1, + "client_rpc": 6838, + "client_rpc_exceeded": 0, + "client_rpc_failed": 0, + "health_check_chk1_critical_status": 0, + "health_check_chk1_maintenance_status": 0, + "health_check_chk1_passing_status": 1, + "health_check_chk1_warning_status": 0, + "health_check_chk2_critical_status": 1, + "health_check_chk2_maintenance_status": 0, + "health_check_chk2_passing_status": 0, + "health_check_chk2_warning_status": 0, + "health_check_chk3_critical_status": 1, + "health_check_chk3_maintenance_status": 0, + "health_check_chk3_passing_status": 0, + "health_check_chk3_warning_status": 0, + "health_check_mysql_critical_status": 1, + "health_check_mysql_maintenance_status": 0, + "health_check_mysql_passing_status": 0, + "health_check_mysql_warning_status": 0, + "kvs_apply_count": 0, + "kvs_apply_quantile=0.5": 0, + "kvs_apply_quantile=0.9": 0, + "kvs_apply_quantile=0.99": 0, + "kvs_apply_sum": 0, + "network_lan_rtt_avg": 737592, + "network_lan_rtt_count": 2, + "network_lan_rtt_max": 991168, + "network_lan_rtt_min": 484017, + "network_lan_rtt_sum": 1475185, + "raft_apply": 10681000, + "raft_boltdb_freelistBytes": 11264, + "raft_boltdb_logsPerBatch_count": 12360, + "raft_boltdb_logsPerBatch_quantile=0.5": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.9": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.99": 1000000, + "raft_boltdb_logsPerBatch_sum": 12362000, + "raft_boltdb_storeLogs_count": 12360, + "raft_boltdb_storeLogs_quantile=0.5": 13176624, + "raft_boltdb_storeLogs_quantile=0.9": 13176624, + "raft_boltdb_storeLogs_quantile=0.99": 13176624, + "raft_boltdb_storeLogs_sum": 651888027, + "raft_commitTime_count": 12345, + "raft_commitTime_quantile=0.5": 41146488, + "raft_commitTime_quantile=0.9": 41146488, + "raft_commitTime_quantile=0.99": 41146488, + "raft_commitTime_sum": 955781149, + "raft_fsm_lastRestoreDuration": 2, + "raft_leader_lastContact_count": 80917, + "raft_leader_lastContact_quantile=0.5": 33000000, + "raft_leader_lastContact_quantile=0.9": 68000000, + "raft_leader_lastContact_quantile=0.99": 68000000, + "raft_leader_lastContact_sum": 3066900000, + "raft_leader_oldestLogAge": 166046464, + "raft_rpc_installSnapshot_count": 0, + "raft_rpc_installSnapshot_quantile=0.5": 0, + "raft_rpc_installSnapshot_quantile=0.9": 0, + "raft_rpc_installSnapshot_quantile=0.99": 0, + "raft_rpc_installSnapshot_sum": 0, + "raft_state_candidate": 1, + "raft_state_leader": 1, + "raft_thread_fsm_saturation_count": 11923, + "raft_thread_fsm_saturation_quantile=0.5": 0, + "raft_thread_fsm_saturation_quantile=0.9": 0, + "raft_thread_fsm_saturation_quantile=0.99": 0, + "raft_thread_fsm_saturation_sum": 90, + "raft_thread_main_saturation_count": 43067, + "raft_thread_main_saturation_quantile=0.5": 0, + "raft_thread_main_saturation_quantile=0.9": 0, + "raft_thread_main_saturation_quantile=0.99": 0, + "raft_thread_main_saturation_sum": 205409, + "runtime_alloc_bytes": 53065368, + "runtime_sys_bytes": 84955160, + "runtime_total_gc_pause_ns": 1372001280, + "server_isLeader_no": 0, + "server_isLeader_yes": 1, + "txn_apply_count": 0, + "txn_apply_quantile=0.5": 0, + "txn_apply_quantile=0.9": 0, + "txn_apply_quantile=0.99": 0, + "txn_apply_sum": 0, + }, + }, + "success on response from Consul v1.14.3 server cloud managed": { + prepare: caseConsulV1143CloudServerResponse, + // 3 node, 1 service check, license + wantNumOfCharts: len(serverCommonCharts) + len(serverLeaderCharts) + 3 + 1, + wantMetrics: map[string]int64{ + "autopilot_failure_tolerance": 0, + "autopilot_healthy_no": 0, + "autopilot_healthy_yes": 1, + "client_rpc": 438718, + "client_rpc_exceeded": 0, + "client_rpc_failed": 0, + "health_check_chk1_critical_status": 0, + "health_check_chk1_maintenance_status": 0, + "health_check_chk1_passing_status": 1, + "health_check_chk1_warning_status": 0, + "health_check_chk2_critical_status": 1, + "health_check_chk2_maintenance_status": 0, + "health_check_chk2_passing_status": 0, + "health_check_chk2_warning_status": 0, + "health_check_chk3_critical_status": 1, + "health_check_chk3_maintenance_status": 0, + "health_check_chk3_passing_status": 0, + "health_check_chk3_warning_status": 0, + "health_check_mysql_critical_status": 1, + "health_check_mysql_maintenance_status": 0, + "health_check_mysql_passing_status": 0, + "health_check_mysql_warning_status": 0, + "kvs_apply_count": 2, + "kvs_apply_quantile=0.5": 0, + "kvs_apply_quantile=0.9": 0, + "kvs_apply_quantile=0.99": 0, + "kvs_apply_sum": 18550, + "network_lan_rtt_avg": 1321107, + "network_lan_rtt_count": 1, + "network_lan_rtt_max": 1321107, + "network_lan_rtt_min": 1321107, + "network_lan_rtt_sum": 1321107, + "raft_apply": 115252000, + "raft_boltdb_freelistBytes": 26008, + "raft_boltdb_logsPerBatch_count": 122794, + "raft_boltdb_logsPerBatch_quantile=0.5": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.9": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.99": 1000000, + "raft_boltdb_logsPerBatch_sum": 122856000, + "raft_boltdb_storeLogs_count": 122794, + "raft_boltdb_storeLogs_quantile=0.5": 1673303, + "raft_boltdb_storeLogs_quantile=0.9": 2210979, + "raft_boltdb_storeLogs_quantile=0.99": 2210979, + "raft_boltdb_storeLogs_sum": 278437403, + "raft_commitTime_count": 122785, + "raft_commitTime_quantile=0.5": 1718204, + "raft_commitTime_quantile=0.9": 2262192, + "raft_commitTime_quantile=0.99": 2262192, + "raft_commitTime_sum": 284260428, + "raft_fsm_lastRestoreDuration": 0, + "raft_leader_lastContact_count": 19, + "raft_leader_lastContact_quantile=0.5": 0, + "raft_leader_lastContact_quantile=0.9": 0, + "raft_leader_lastContact_quantile=0.99": 0, + "raft_leader_lastContact_sum": 598000, + "raft_leader_oldestLogAge": 68835264, + "raft_rpc_installSnapshot_count": 1, + "raft_rpc_installSnapshot_quantile=0.5": 0, + "raft_rpc_installSnapshot_quantile=0.9": 0, + "raft_rpc_installSnapshot_quantile=0.99": 0, + "raft_rpc_installSnapshot_sum": 473038, + "raft_state_candidate": 1, + "raft_state_leader": 1, + "raft_thread_fsm_saturation_count": 44326, + "raft_thread_fsm_saturation_quantile=0.5": 0, + "raft_thread_fsm_saturation_quantile=0.9": 0, + "raft_thread_fsm_saturation_quantile=0.99": 0, + "raft_thread_fsm_saturation_sum": 729, + "raft_thread_main_saturation_count": 451221, + "raft_thread_main_saturation_quantile=0.5": 0, + "raft_thread_main_saturation_quantile=0.9": 0, + "raft_thread_main_saturation_quantile=0.99": 9999, + "raft_thread_main_saturation_sum": 213059, + "runtime_alloc_bytes": 51729856, + "runtime_sys_bytes": 160156960, + "runtime_total_gc_pause_ns": 832754048, + "server_isLeader_no": 0, + "server_isLeader_yes": 1, + "system_licenseExpiration": 2949945, + "txn_apply_count": 0, + "txn_apply_quantile=0.5": 0, + "txn_apply_quantile=0.9": 0, + "txn_apply_quantile=0.99": 0, + "txn_apply_sum": 0, + }, + }, + "success on response from Consul v1.13.2 server with enabled hostname": { + prepare: caseConsulV1132ServerResponse, + // 3 node, 1 service check, no license + wantNumOfCharts: len(serverCommonCharts) + len(serverAutopilotHealthCharts) + len(serverLeaderCharts) + 3 + 1 - 1, + wantMetrics: map[string]int64{ + "autopilot_failure_tolerance": 1, + "autopilot_healthy_no": 0, + "autopilot_healthy_yes": 1, + "autopilot_server_healthy_no": 0, + "autopilot_server_healthy_yes": 1, + "autopilot_server_lastContact_leader": 13, + "autopilot_server_sefStatus_alive": 1, + "autopilot_server_sefStatus_failed": 0, + "autopilot_server_sefStatus_left": 0, + "autopilot_server_sefStatus_none": 0, + "autopilot_server_stable_time": 265825, + "autopilot_server_voter_no": 0, + "autopilot_server_voter_yes": 1, + "client_rpc": 6838, + "client_rpc_exceeded": 0, + "client_rpc_failed": 0, + "health_check_chk1_critical_status": 0, + "health_check_chk1_maintenance_status": 0, + "health_check_chk1_passing_status": 1, + "health_check_chk1_warning_status": 0, + "health_check_chk2_critical_status": 1, + "health_check_chk2_maintenance_status": 0, + "health_check_chk2_passing_status": 0, + "health_check_chk2_warning_status": 0, + "health_check_chk3_critical_status": 1, + "health_check_chk3_maintenance_status": 0, + "health_check_chk3_passing_status": 0, + "health_check_chk3_warning_status": 0, + "health_check_mysql_critical_status": 1, + "health_check_mysql_maintenance_status": 0, + "health_check_mysql_passing_status": 0, + "health_check_mysql_warning_status": 0, + "kvs_apply_count": 0, + "kvs_apply_quantile=0.5": 0, + "kvs_apply_quantile=0.9": 0, + "kvs_apply_quantile=0.99": 0, + "kvs_apply_sum": 0, + "network_lan_rtt_avg": 737592, + "network_lan_rtt_count": 2, + "network_lan_rtt_max": 991168, + "network_lan_rtt_min": 484017, + "network_lan_rtt_sum": 1475185, + "raft_apply": 10681000, + "raft_boltdb_freelistBytes": 11264, + "raft_boltdb_logsPerBatch_count": 12360, + "raft_boltdb_logsPerBatch_quantile=0.5": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.9": 1000000, + "raft_boltdb_logsPerBatch_quantile=0.99": 1000000, + "raft_boltdb_logsPerBatch_sum": 12362000, + "raft_boltdb_storeLogs_count": 12360, + "raft_boltdb_storeLogs_quantile=0.5": 13176624, + "raft_boltdb_storeLogs_quantile=0.9": 13176624, + "raft_boltdb_storeLogs_quantile=0.99": 13176624, + "raft_boltdb_storeLogs_sum": 651888027, + "raft_commitTime_count": 12345, + "raft_commitTime_quantile=0.5": 41146488, + "raft_commitTime_quantile=0.9": 41146488, + "raft_commitTime_quantile=0.99": 41146488, + "raft_commitTime_sum": 955781149, + "raft_fsm_lastRestoreDuration": 2, + "raft_leader_lastContact_count": 80917, + "raft_leader_lastContact_quantile=0.5": 33000000, + "raft_leader_lastContact_quantile=0.9": 68000000, + "raft_leader_lastContact_quantile=0.99": 68000000, + "raft_leader_lastContact_sum": 3066900000, + "raft_leader_oldestLogAge": 166046464, + "raft_rpc_installSnapshot_count": 0, + "raft_rpc_installSnapshot_quantile=0.5": 0, + "raft_rpc_installSnapshot_quantile=0.9": 0, + "raft_rpc_installSnapshot_quantile=0.99": 0, + "raft_rpc_installSnapshot_sum": 0, + "raft_state_candidate": 1, + "raft_state_leader": 1, + "raft_thread_fsm_saturation_count": 11923, + "raft_thread_fsm_saturation_quantile=0.5": 0, + "raft_thread_fsm_saturation_quantile=0.9": 0, + "raft_thread_fsm_saturation_quantile=0.99": 0, + "raft_thread_fsm_saturation_sum": 90, + "raft_thread_main_saturation_count": 43067, + "raft_thread_main_saturation_quantile=0.5": 0, + "raft_thread_main_saturation_quantile=0.9": 0, + "raft_thread_main_saturation_quantile=0.99": 0, + "raft_thread_main_saturation_sum": 205409, + "runtime_alloc_bytes": 53065368, + "runtime_sys_bytes": 84955160, + "runtime_total_gc_pause_ns": 1372001280, + "server_isLeader_no": 0, + "server_isLeader_yes": 1, + "txn_apply_count": 0, + "txn_apply_quantile=0.5": 0, + "txn_apply_quantile=0.9": 0, + "txn_apply_quantile=0.99": 0, + "txn_apply_sum": 0, + }, + }, + "success on response from Consul v1.13.2 server with disabled prometheus": { + prepare: caseConsulV1132ServerWithDisabledPrometheus, + // 3 node, 1 service check, no license + wantNumOfCharts: len(serverAutopilotHealthCharts) + 3 + 1, + wantMetrics: map[string]int64{ + "autopilot_server_healthy_no": 0, + "autopilot_server_healthy_yes": 1, + "autopilot_server_lastContact_leader": 13, + "autopilot_server_sefStatus_alive": 1, + "autopilot_server_sefStatus_failed": 0, + "autopilot_server_sefStatus_left": 0, + "autopilot_server_sefStatus_none": 0, + "autopilot_server_stable_time": 265805, + "autopilot_server_voter_no": 0, + "autopilot_server_voter_yes": 1, + "health_check_chk1_critical_status": 0, + "health_check_chk1_maintenance_status": 0, + "health_check_chk1_passing_status": 1, + "health_check_chk1_warning_status": 0, + "health_check_chk2_critical_status": 1, + "health_check_chk2_maintenance_status": 0, + "health_check_chk2_passing_status": 0, + "health_check_chk2_warning_status": 0, + "health_check_chk3_critical_status": 1, + "health_check_chk3_maintenance_status": 0, + "health_check_chk3_passing_status": 0, + "health_check_chk3_warning_status": 0, + "health_check_mysql_critical_status": 1, + "health_check_mysql_maintenance_status": 0, + "health_check_mysql_passing_status": 0, + "health_check_mysql_warning_status": 0, + "network_lan_rtt_avg": 737592, + "network_lan_rtt_count": 2, + "network_lan_rtt_max": 991168, + "network_lan_rtt_min": 484017, + "network_lan_rtt_sum": 1475185, + }, + }, + "success on response from Consul v1.13.2 client": { + prepare: caseConsulV1132ClientResponse, + // 3 node, 1 service check, no license + wantNumOfCharts: len(clientCharts) + 3 + 1 - 1, + wantMetrics: map[string]int64{ + "client_rpc": 34, + "client_rpc_exceeded": 0, + "client_rpc_failed": 0, + "health_check_chk1_critical_status": 0, + "health_check_chk1_maintenance_status": 0, + "health_check_chk1_passing_status": 1, + "health_check_chk1_warning_status": 0, + "health_check_chk2_critical_status": 1, + "health_check_chk2_maintenance_status": 0, + "health_check_chk2_passing_status": 0, + "health_check_chk2_warning_status": 0, + "health_check_chk3_critical_status": 1, + "health_check_chk3_maintenance_status": 0, + "health_check_chk3_passing_status": 0, + "health_check_chk3_warning_status": 0, + "health_check_mysql_critical_status": 1, + "health_check_mysql_maintenance_status": 0, + "health_check_mysql_passing_status": 0, + "health_check_mysql_warning_status": 0, + "runtime_alloc_bytes": 26333408, + "runtime_sys_bytes": 51201032, + "runtime_total_gc_pause_ns": 4182423, + }, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + consul, cleanup := test.prepare(t) + defer cleanup() + + mx := consul.Collect() + + delete(mx, "autopilot_server_stable_time") + delete(test.wantMetrics, "autopilot_server_stable_time") + + require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*consul.Charts())) + } + }) + } +} + +func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case r.URL.Path == urlPathAgentSelf: + _, _ = w.Write(dataV1143CloudServerSelf) + case r.URL.Path == urlPathAgentChecks: + _, _ = w.Write(dataV1143CloudChecks) + case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus": + _, _ = w.Write(dataV1143CloudServerPromMetrics) + case r.URL.Path == urlPathOperationAutopilotHealth: + w.WriteHeader(http.StatusForbidden) + case r.URL.Path == urlPathCoordinateNodes: + _, _ = w.Write(dataV1143CloudServerCoordinateNodes) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case r.URL.Path == urlPathAgentSelf: + _, _ = w.Write(dataV1132ServerSelf) + case r.URL.Path == urlPathAgentChecks: + _, _ = w.Write(datav1132Checks) + case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus": + _, _ = w.Write(dataV1132ServerPromMetrics) + case r.URL.Path == urlPathOperationAutopilotHealth: + _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth) + case r.URL.Path == urlPathCoordinateNodes: + _, _ = w.Write(dataV1132ServerCoordinateNodes) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case r.URL.Path == urlPathAgentSelf: + _, _ = w.Write(dataV1132ServerSelfWithHostname) + case r.URL.Path == urlPathAgentChecks: + _, _ = w.Write(datav1132Checks) + case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus": + _, _ = w.Write(dataV1132ServerPromMetricsWithHostname) + case r.URL.Path == urlPathOperationAutopilotHealth: + _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth) + case r.URL.Path == urlPathCoordinateNodes: + _, _ = w.Write(dataV1132ServerCoordinateNodes) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathAgentSelf: + _, _ = w.Write(dataV1132ServerSelfDisabledPrometheus) + case urlPathAgentChecks: + _, _ = w.Write(datav1132Checks) + case urlPathOperationAutopilotHealth: + _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth) + case urlPathCoordinateNodes: + _, _ = w.Write(dataV1132ServerCoordinateNodes) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case r.URL.Path == urlPathAgentSelf: + _, _ = w.Write(dataV1132ClientSelf) + case r.URL.Path == urlPathAgentChecks: + _, _ = w.Write(datav1132Checks) + case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus": + _, _ = w.Write(dataV1132ClientPromMetrics) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + consul := New() + consul.URL = srv.URL + + require.True(t, consul.Init()) + + return consul, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*Consul, func()) { + t.Helper() + consul := New() + consul.URL = "http://127.0.0.1:65535/" + require.True(t, consul.Init()) + + return consul, func() {} +} + +func case404(t *testing.T) (*Consul, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + consul := New() + consul.URL = srv.URL + require.True(t, consul.Init()) + + return consul, srv.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/init.go b/src/go/collectors/go.d.plugin/modules/consul/init.go new file mode 100644 index 00000000000000..11087ec64a0a8b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/init.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package consul + +import ( + "errors" + "net/http" + "net/url" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (c *Consul) validateConfig() error { + if c.URL == "" { + return errors.New("'url' not set") + } + return nil +} + +func (c *Consul) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(c.Client) +} + +const urlPathAgentMetrics = "/v1/agent/metrics" + +func (c *Consul) initPrometheusClient(httpClient *http.Client) (prometheus.Prometheus, error) { + r, err := web.NewHTTPRequest(c.Request.Copy()) + if err != nil { + return nil, err + } + r.URL.Path = urlPathAgentMetrics + r.URL.RawQuery = url.Values{ + "format": []string{"prometheus"}, + }.Encode() + + req := c.Request.Copy() + req.URL = r.URL.String() + + if c.ACLToken != "" { + if req.Headers == nil { + req.Headers = make(map[string]string) + } + req.Headers["X-Consul-Token"] = c.ACLToken + } + + return prometheus.New(httpClient, req), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/integrations/consul.md b/src/go/collectors/go.d.plugin/modules/consul/integrations/consul.md new file mode 100644 index 00000000000000..20d8018ee4a14d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/integrations/consul.md @@ -0,0 +1,324 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/consul/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/consul/metadata.yaml" +sidebar_label: "Consul" +learn_status: "Published" +learn_rel_path: "Data Collection/Service Discovery / Registry" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Consul + + +<img src="https://netdata.cloud/img/consul.svg" width="150"/> + + +Plugin: go.d.plugin +Module: consul + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more. + + +It periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs). + +Used endpoints: + +- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) +- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) +- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration) +- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics) +- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This collector discovers instances running on the local host, that provide metrics on port 8500. + +On startup, it tries to collect metrics from: + +- http://localhost:8500 +- http://127.0.0.1:8500 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent). + + +### Per Consul instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | Leader | Follower | Client | +|:------|:----------|:----|:---:|:---:|:---:| +| consul.client_rpc_requests_rate | rpc | requests/s | • | • | • | +| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | • | • | • | +| consul.client_rpc_requests_failed_rate | failed | requests/s | • | • | • | +| consul.memory_allocated | allocated | bytes | • | • | • | +| consul.memory_sys | sys | bytes | • | • | • | +| consul.gc_pause_time | gc_pause | seconds | • | • | • | +| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | | +| consul.kvs_apply_operations_rate | kvs_apply | ops/s | • | • | | +| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | | +| consul.txn_apply_operations_rate | txn_apply | ops/s | • | • | | +| consul.autopilot_health_status | healthy, unhealthy | status | • | • | | +| consul.autopilot_failure_tolerance | failure_tolerance | servers | • | • | | +| consul.autopilot_server_health_status | healthy, unhealthy | status | • | • | | +| consul.autopilot_server_stable_time | stable | seconds | • | • | | +| consul.autopilot_server_serf_status | active, failed, left, none | status | • | • | | +| consul.autopilot_server_voter_status | voter, not_voter | status | • | • | | +| consul.network_lan_rtt | min, max, avg | ms | • | • | | +| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | | | +| consul.raft_commits_rate | commits | commits/s | • | | | +| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | | | +| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | • | | | +| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | • | | +| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | • | | +| consul.raft_leader_elections_rate | leader | elections/s | • | • | | +| consul.raft_leadership_transitions_rate | leadership | transitions/s | • | • | | +| consul.server_leadership_status | leader, not_leader | status | • | • | | +| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | • | • | | +| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | • | • | | +| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | • | • | | +| consul.raft_boltdb_freelist_bytes | freelist | bytes | • | • | | +| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | • | • | | +| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | | +| consul.license_expiration_time | license_expiration | seconds | • | • | • | + +### Per node check + +Metrics about checks on Node level. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| datacenter | Datacenter Identifier | +| node_name | The node's name | +| check_name | The check's name | + +Metrics: + +| Metric | Dimensions | Unit | Leader | Follower | Client | +|:------|:----------|:----|:---:|:---:|:---:| +| consul.node_health_check_status | passing, maintenance, warning, critical | status | • | • | • | + +### Per service check + +Metrics about checks at a Service level. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| datacenter | Datacenter Identifier | +| node_name | The node's name | +| check_name | The check's name | +| service_name | The service's name | + +Metrics: + +| Metric | Dimensions | Unit | Leader | Follower | Client | +|:------|:----------|:----|:---:|:---:|:---:| +| consul.service_health_check_status | passing, maintenance, warning, critical | status | • | • | • | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} | +| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy | +| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes | +| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader | +| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} | +| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} | + + +## Setup + +### Prerequisites + +#### Enable Prometheus telemetry + +[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`. + + +#### Add required ACLs to Token + +Required **only if authentication is enabled**. + +| ACL | Endpoint | +|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) | +| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) | +| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) | + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/consul.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/consul.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://localhost:8500 | yes | +| acl_token | ACL token used in every request. | | no | +| max_checks | Checks processing/charting limit. | | no | +| max_filter | Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). | | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 1 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + +``` +##### Basic HTTP auth + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + + - name: remote + url: http://203.0.113.10:8500 + acl_token: "ada7f751-f654-8872-7f93-498e799158b6" + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m consul + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/consul/metadata.yaml b/src/go/collectors/go.d.plugin/modules/consul/metadata.yaml new file mode 100644 index 00000000000000..470959139d2eb4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/metadata.yaml @@ -0,0 +1,599 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-consul + plugin_name: go.d.plugin + module_name: consul + monitored_instance: + name: Consul + link: https://www.consul.io/ + categories: + - data-collection.service-discovery-registry + icon_filename: consul.svg + alternative_monitored_instances: [] + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - service networking platform + - hashicorp + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more. + method_description: | + It periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs). + + Used endpoints: + + - [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) + - [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) + - [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration) + - [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics) + - [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + This collector discovers instances running on the local host, that provide metrics on port 8500. + + On startup, it tries to collect metrics from: + + - http://localhost:8500 + - http://127.0.0.1:8500 + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable Prometheus telemetry + description: | + [Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`. + - title: Add required ACLs to Token + description: | + Required **only if authentication is enabled**. + + | ACL | Endpoint | + |:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) | + | `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) | + | `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) | + configuration: + file: + name: go.d/consul.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://localhost:8500 + required: true + - name: acl_token + description: ACL token used in every request. + default_value: "" + required: false + - name: max_checks + description: Checks processing/charting limit. + default_value: "" + required: false + - name: max_filter + description: Checks processing/charting filter. Uses [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). + default_value: "" + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + - name: Basic HTTP auth + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8500 + acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7" + + - name: remote + url: http://203.0.113.10:8500 + acl_token: "ada7f751-f654-8872-7f93-498e799158b6" + troubleshooting: + problems: + list: [] + alerts: + - name: consul_node_health_check_status + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.node_health_check_status + info: node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} + - name: consul_service_health_check_status + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.service_health_check_status + info: service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} + - name: consul_client_rpc_requests_exceeded + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.client_rpc_requests_exceeded_rate + info: number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} + - name: consul_client_rpc_requests_failed + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.client_rpc_requests_failed_rate + info: number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} + - name: consul_gc_pause_time + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.gc_pause_time + info: time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} + - name: consul_autopilot_health_status + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.autopilot_health_status + info: datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} + - name: consul_autopilot_server_health_status + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.autopilot_server_health_status + info: server ${label:node_name} from datacenter ${label:datacenter} is unhealthy + - name: consul_raft_leader_last_contact_time + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.raft_leader_last_contact_time + info: median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes + - name: consul_raft_leadership_transitions + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.raft_leadership_transitions_rate + info: there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader + - name: consul_raft_thread_main_saturation + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.raft_thread_main_saturation_perc + info: average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} + - name: consul_raft_thread_fsm_saturation + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.raft_thread_fsm_saturation_perc + info: average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} + - name: consul_license_expiration_time + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf + metric: consul.license_expiration_time + info: Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} + metrics: + folding: + title: Metrics + enabled: false + description: | + The set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent). + availability: + - Leader + - Follower + - Client + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: consul.client_rpc_requests_rate + description: Client RPC requests + unit: requests/s + chart_type: line + dimensions: + - name: rpc + - name: consul.client_rpc_requests_exceeded_rate + description: Client rate-limited RPC requests + unit: requests/s + chart_type: line + dimensions: + - name: exceeded + - name: consul.client_rpc_requests_failed_rate + description: Client failed RPC requests + unit: requests/s + chart_type: line + dimensions: + - name: failed + - name: consul.memory_allocated + description: Memory allocated by the Consul process + unit: bytes + chart_type: line + dimensions: + - name: allocated + - name: consul.memory_sys + description: Memory obtained from the OS + unit: bytes + chart_type: line + dimensions: + - name: sys + - name: consul.gc_pause_time + description: Garbage collection stop-the-world pause time + unit: seconds + chart_type: line + dimensions: + - name: gc_pause + - name: consul.kvs_apply_time + description: KVS apply time + unit: ms + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.kvs_apply_operations_rate + description: KVS apply operations + unit: ops/s + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: kvs_apply + - name: consul.txn_apply_time + description: Transaction apply time + unit: ms + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.txn_apply_operations_rate + description: Transaction apply operations + unit: ops/s + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: txn_apply + - name: consul.autopilot_health_status + description: Autopilot cluster health status + unit: status + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: healthy + - name: unhealthy + - name: consul.autopilot_failure_tolerance + description: Autopilot cluster failure tolerance + unit: servers + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: failure_tolerance + - name: consul.autopilot_server_health_status + description: Autopilot server health status + unit: status + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: healthy + - name: unhealthy + - name: consul.autopilot_server_stable_time + description: Autopilot server stable time + unit: seconds + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: stable + - name: consul.autopilot_server_serf_status + description: Autopilot server Serf status + unit: status + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: active + - name: failed + - name: left + - name: none + - name: consul.autopilot_server_voter_status + description: Autopilot server Raft voting membership + unit: status + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: voter + - name: not_voter + - name: consul.network_lan_rtt + description: Network lan RTT + unit: ms + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: min + - name: max + - name: avg + - name: consul.raft_commit_time + description: Raft commit time + unit: ms + chart_type: line + availability: + - Leader + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.raft_commits_rate + description: Raft commits rate + unit: commits/s + chart_type: line + availability: + - Leader + dimensions: + - name: commits + - name: consul.raft_leader_last_contact_time + description: Raft leader last contact time + unit: ms + chart_type: line + availability: + - Leader + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.raft_leader_oldest_log_age + description: Raft leader oldest log age + unit: seconds + chart_type: line + availability: + - Leader + dimensions: + - name: oldest_log_age + - name: consul.raft_follower_last_contact_leader_time + description: Raft follower last contact with the leader time + unit: ms + chart_type: line + availability: + - Follower + dimensions: + - name: leader_last_contact + - name: consul.raft_rpc_install_snapshot_time + description: Raft RPC install snapshot time + unit: ms + chart_type: line + availability: + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.raft_leader_elections_rate + description: Raft leader elections rate + unit: elections/s + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: leader + - name: consul.raft_leadership_transitions_rate + description: Raft leadership transitions rate + unit: transitions/s + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: leadership + - name: consul.server_leadership_status + description: Server leadership status + unit: status + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: leader + - name: not_leader + - name: consul.raft_thread_main_saturation_perc + description: Raft main thread saturation + unit: percentage + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.raft_thread_fsm_saturation_perc + description: Raft FSM thread saturation + unit: percentage + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.raft_fsm_last_restore_duration + description: Raft last restore duration + unit: ms + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: last_restore_duration + - name: consul.raft_boltdb_freelist_bytes + description: Raft BoltDB freelist + unit: bytes + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: freelist + - name: consul.raft_boltdb_logs_per_batch_rate + description: Raft BoltDB logs written per batch + unit: logs/s + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: written + - name: consul.raft_boltdb_store_logs_time + description: Raft BoltDB store logs time + unit: ms + chart_type: line + availability: + - Leader + - Follower + dimensions: + - name: quantile_0.5 + - name: quantile_0.9 + - name: quantile_0.99 + - name: consul.license_expiration_time + description: License expiration time + unit: seconds + chart_type: line + dimensions: + - name: license_expiration + - name: node check + description: Metrics about checks on Node level. + labels: + - name: datacenter + description: Datacenter Identifier + - name: node_name + description: The node's name + - name: check_name + description: The check's name + metrics: + - name: consul.node_health_check_status + description: Node health check status + unit: status + chart_type: line + dimensions: + - name: passing + - name: maintenance + - name: warning + - name: critical + - name: service check + description: Metrics about checks at a Service level. + labels: + - name: datacenter + description: Datacenter Identifier + - name: node_name + description: The node's name + - name: check_name + description: The check's name + - name: service_name + description: The service's name + metrics: + - name: consul.service_health_check_status + description: Service health check status + unit: status + chart_type: line + dimensions: + - name: passing + - name: maintenance + - name: warning + - name: critical diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt new file mode 100644 index 00000000000000..e93e677d810edd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt @@ -0,0 +1,989 @@ +# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token. +# TYPE consul_acl_ResolveToken summary +consul_acl_ResolveToken{quantile="0.5"} NaN +consul_acl_ResolveToken{quantile="0.9"} NaN +consul_acl_ResolveToken{quantile="0.99"} NaN +consul_acl_ResolveToken_sum 0 +consul_acl_ResolveToken_count 0 +# HELP consul_acl_authmethod_delete +# TYPE consul_acl_authmethod_delete summary +consul_acl_authmethod_delete{quantile="0.5"} NaN +consul_acl_authmethod_delete{quantile="0.9"} NaN +consul_acl_authmethod_delete{quantile="0.99"} NaN +consul_acl_authmethod_delete_sum 0 +consul_acl_authmethod_delete_count 0 +# HELP consul_acl_authmethod_upsert +# TYPE consul_acl_authmethod_upsert summary +consul_acl_authmethod_upsert{quantile="0.5"} NaN +consul_acl_authmethod_upsert{quantile="0.9"} NaN +consul_acl_authmethod_upsert{quantile="0.99"} NaN +consul_acl_authmethod_upsert_sum 0 +consul_acl_authmethod_upsert_count 0 +# HELP consul_acl_bindingrule_delete +# TYPE consul_acl_bindingrule_delete summary +consul_acl_bindingrule_delete{quantile="0.5"} NaN +consul_acl_bindingrule_delete{quantile="0.9"} NaN +consul_acl_bindingrule_delete{quantile="0.99"} NaN +consul_acl_bindingrule_delete_sum 0 +consul_acl_bindingrule_delete_count 0 +# HELP consul_acl_bindingrule_upsert +# TYPE consul_acl_bindingrule_upsert summary +consul_acl_bindingrule_upsert{quantile="0.5"} NaN +consul_acl_bindingrule_upsert{quantile="0.9"} NaN +consul_acl_bindingrule_upsert{quantile="0.99"} NaN +consul_acl_bindingrule_upsert_sum 0 +consul_acl_bindingrule_upsert_count 0 +# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_deregistration counter +consul_acl_blocked_check_deregistration 0 +# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_registration counter +consul_acl_blocked_check_registration 0 +# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL) +# TYPE consul_acl_blocked_node_registration counter +consul_acl_blocked_node_registration 0 +# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_deregistration counter +consul_acl_blocked_service_deregistration 0 +# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_registration counter +consul_acl_blocked_service_registration 0 +# HELP consul_acl_login +# TYPE consul_acl_login summary +consul_acl_login{quantile="0.5"} NaN +consul_acl_login{quantile="0.9"} NaN +consul_acl_login{quantile="0.99"} NaN +consul_acl_login_sum 0 +consul_acl_login_count 0 +# HELP consul_acl_logout +# TYPE consul_acl_logout summary +consul_acl_logout{quantile="0.5"} NaN +consul_acl_logout{quantile="0.9"} NaN +consul_acl_logout{quantile="0.99"} NaN +consul_acl_logout_sum 0 +consul_acl_logout_count 0 +# HELP consul_acl_policy_delete +# TYPE consul_acl_policy_delete summary +consul_acl_policy_delete{quantile="0.5"} NaN +consul_acl_policy_delete{quantile="0.9"} NaN +consul_acl_policy_delete{quantile="0.99"} NaN +consul_acl_policy_delete_sum 0 +consul_acl_policy_delete_count 0 +# HELP consul_acl_policy_upsert +# TYPE consul_acl_policy_upsert summary +consul_acl_policy_upsert{quantile="0.5"} NaN +consul_acl_policy_upsert{quantile="0.9"} NaN +consul_acl_policy_upsert{quantile="0.99"} NaN +consul_acl_policy_upsert_sum 0 +consul_acl_policy_upsert_count 0 +# HELP consul_acl_role_delete +# TYPE consul_acl_role_delete summary +consul_acl_role_delete{quantile="0.5"} NaN +consul_acl_role_delete{quantile="0.9"} NaN +consul_acl_role_delete{quantile="0.99"} NaN +consul_acl_role_delete_sum 0 +consul_acl_role_delete_count 0 +# HELP consul_acl_role_upsert +# TYPE consul_acl_role_upsert summary +consul_acl_role_upsert{quantile="0.5"} NaN +consul_acl_role_upsert{quantile="0.9"} NaN +consul_acl_role_upsert{quantile="0.99"} NaN +consul_acl_role_upsert_sum 0 +consul_acl_role_upsert_count 0 +# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_hit counter +consul_acl_token_cache_hit 0 +# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_miss counter +consul_acl_token_cache_miss 0 +# HELP consul_acl_token_clone +# TYPE consul_acl_token_clone summary +consul_acl_token_clone{quantile="0.5"} NaN +consul_acl_token_clone{quantile="0.9"} NaN +consul_acl_token_clone{quantile="0.99"} NaN +consul_acl_token_clone_sum 0 +consul_acl_token_clone_count 0 +# HELP consul_acl_token_delete +# TYPE consul_acl_token_delete summary +consul_acl_token_delete{quantile="0.5"} NaN +consul_acl_token_delete{quantile="0.9"} NaN +consul_acl_token_delete{quantile="0.99"} NaN +consul_acl_token_delete_sum 0 +consul_acl_token_delete_count 0 +# HELP consul_acl_token_upsert +# TYPE consul_acl_token_upsert summary +consul_acl_token_upsert{quantile="0.5"} NaN +consul_acl_token_upsert{quantile="0.9"} NaN +consul_acl_token_upsert{quantile="0.99"} NaN +consul_acl_token_upsert_sum 0 +consul_acl_token_upsert_count 0 +# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour +# TYPE consul_agent_tls_cert_expiry gauge +consul_agent_tls_cert_expiry 0 +# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path. +# TYPE consul_api_http summary +consul_api_http{quantile="0.5"} NaN +consul_api_http{quantile="0.9"} NaN +consul_api_http{quantile="0.99"} NaN +consul_api_http_sum 0 +consul_api_http_count 0 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.12827900052070618 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.16961899399757385 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 0.16961899399757385 +consul_api_http_sum{method="GET",path="v1_agent_checks"} 72.76162604242563 +consul_api_http_count{method="GET",path="v1_agent_checks"} 430 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.21463799476623535 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 0.35256800055503845 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 0.35256800055503845 +consul_api_http_sum{method="GET",path="v1_agent_metrics"} 148.1220167428255 +consul_api_http_count{method="GET",path="v1_agent_metrics"} 438 +# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found. +# TYPE consul_catalog_connect_not_found counter +consul_catalog_connect_not_found 0 +# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service. +# TYPE consul_catalog_connect_query counter +consul_catalog_connect_query 0 +# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag. +# TYPE consul_catalog_connect_query_tag counter +consul_catalog_connect_query_tag 0 +# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags. +# TYPE consul_catalog_connect_query_tags counter +consul_catalog_connect_query_tags 0 +# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation. +# TYPE consul_catalog_deregister summary +consul_catalog_deregister{quantile="0.5"} NaN +consul_catalog_deregister{quantile="0.9"} NaN +consul_catalog_deregister{quantile="0.99"} NaN +consul_catalog_deregister_sum 0 +consul_catalog_deregister_count 0 +# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation. +# TYPE consul_catalog_register summary +consul_catalog_register{quantile="0.5"} NaN +consul_catalog_register{quantile="0.9"} NaN +consul_catalog_register{quantile="0.99"} NaN +consul_catalog_register_sum 0 +consul_catalog_register_count 0 +# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found. +# TYPE consul_catalog_service_not_found counter +consul_catalog_service_not_found 0 +# HELP consul_catalog_service_query Increments for each catalog query for the given service. +# TYPE consul_catalog_service_query counter +consul_catalog_service_query 0 +# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag. +# TYPE consul_catalog_service_query_tag counter +consul_catalog_service_query_tag 0 +# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags. +# TYPE consul_catalog_service_query_tags counter +consul_catalog_service_query_tags 0 +# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog. +# TYPE consul_client_api_catalog_datacenters counter +consul_client_api_catalog_datacenters 0 +# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request. +# TYPE consul_client_api_catalog_deregister counter +consul_client_api_catalog_deregister 0 +# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway. +# TYPE consul_client_api_catalog_gateway_services counter +consul_client_api_catalog_gateway_services 0 +# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services. +# TYPE consul_client_api_catalog_node_service_list counter +consul_client_api_catalog_node_service_list 0 +# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_catalog_node_services counter +consul_client_api_catalog_node_services 0 +# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog. +# TYPE consul_client_api_catalog_nodes counter +consul_client_api_catalog_nodes 0 +# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request. +# TYPE consul_client_api_catalog_register counter +consul_client_api_catalog_register 0 +# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service. +# TYPE consul_client_api_catalog_service_nodes counter +consul_client_api_catalog_service_nodes 0 +# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog. +# TYPE consul_client_api_catalog_services counter +consul_client_api_catalog_services 0 +# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. +# TYPE consul_client_api_error_catalog_service_nodes counter +consul_client_api_error_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters. +# TYPE consul_client_api_success_catalog_datacenters counter +consul_client_api_success_catalog_datacenters 0 +# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request. +# TYPE consul_client_api_success_catalog_deregister counter +consul_client_api_success_catalog_deregister 0 +# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. +# TYPE consul_client_api_success_catalog_gateway_services counter +consul_client_api_success_catalog_gateway_services 0 +# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services. +# TYPE consul_client_api_success_catalog_node_service_list counter +consul_client_api_success_catalog_node_service_list 0 +# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node. +# TYPE consul_client_api_success_catalog_node_services counter +consul_client_api_success_catalog_node_services 0 +# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes. +# TYPE consul_client_api_success_catalog_nodes counter +consul_client_api_success_catalog_nodes 0 +# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request. +# TYPE consul_client_api_success_catalog_register counter +consul_client_api_success_catalog_register 0 +# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_success_catalog_service_nodes counter +consul_client_api_success_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services. +# TYPE consul_client_api_success_catalog_services counter +consul_client_api_success_catalog_services 0 +# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. +# TYPE consul_client_rpc counter +consul_client_rpc 34 +# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters. +# TYPE consul_client_rpc_error_catalog_datacenters counter +consul_client_rpc_error_catalog_datacenters 0 +# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request. +# TYPE consul_client_rpc_error_catalog_deregister counter +consul_client_rpc_error_catalog_deregister 0 +# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. +# TYPE consul_client_rpc_error_catalog_gateway_services counter +consul_client_rpc_error_catalog_gateway_services 0 +# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. +# TYPE consul_client_rpc_error_catalog_node_service_list counter +consul_client_rpc_error_catalog_node_service_list 0 +# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node. +# TYPE consul_client_rpc_error_catalog_node_services counter +consul_client_rpc_error_catalog_node_services 0 +# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes. +# TYPE consul_client_rpc_error_catalog_nodes counter +consul_client_rpc_error_catalog_nodes 0 +# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request. +# TYPE consul_client_rpc_error_catalog_register counter +consul_client_rpc_error_catalog_register 0 +# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service. +# TYPE consul_client_rpc_error_catalog_service_nodes counter +consul_client_rpc_error_catalog_service_nodes 0 +# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services. +# TYPE consul_client_rpc_error_catalog_services counter +consul_client_rpc_error_catalog_services 0 +# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration. +# TYPE consul_client_rpc_exceeded counter +consul_client_rpc_exceeded 0 +# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. +# TYPE consul_client_rpc_failed counter +consul_client_rpc_failed 0 +# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided. +# TYPE consul_consul_cache_bypass counter +consul_consul_cache_bypass 0 +# HELP consul_consul_cache_entries_count Represents the number of entries in this cache. +# TYPE consul_consul_cache_entries_count gauge +consul_consul_cache_entries_count 0 +# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted. +# TYPE consul_consul_cache_evict_expired counter +consul_consul_cache_evict_expired 0 +# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache. +# TYPE consul_consul_cache_fetch_error counter +consul_consul_cache_fetch_error 0 +# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache. +# TYPE consul_consul_cache_fetch_success counter +consul_consul_cache_fetch_success 0 +# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead +# TYPE consul_consul_fsm_ca summary +consul_consul_fsm_ca{quantile="0.5"} NaN +consul_consul_fsm_ca{quantile="0.9"} NaN +consul_consul_fsm_ca{quantile="0.99"} NaN +consul_consul_fsm_ca_sum 0 +consul_consul_fsm_ca_count 0 +# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead +# TYPE consul_consul_fsm_intention summary +consul_consul_fsm_intention{quantile="0.5"} NaN +consul_consul_fsm_intention{quantile="0.9"} NaN +consul_consul_fsm_intention{quantile="0.99"} NaN +consul_consul_fsm_intention_sum 0 +consul_consul_fsm_intention_count 0 +# HELP consul_consul_intention_apply +# TYPE consul_consul_intention_apply summary +consul_consul_intention_apply{quantile="0.5"} NaN +consul_consul_intention_apply{quantile="0.9"} NaN +consul_consul_intention_apply{quantile="0.99"} NaN +consul_consul_intention_apply_sum 0 +consul_consul_intention_apply_count 0 +# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_clients gauge +consul_consul_members_clients 0 +# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_servers gauge +consul_consul_members_servers 0 +# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_config_entries gauge +consul_consul_state_config_entries 0 +# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_connect_instances gauge +consul_consul_state_connect_instances 0 +# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. +# TYPE consul_consul_state_kv_entries gauge +consul_consul_state_kv_entries 0 +# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_nodes gauge +consul_consul_state_nodes 0 +# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. +# TYPE consul_consul_state_peerings gauge +consul_consul_state_peerings 0 +# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_service_instances gauge +consul_consul_state_service_instances 0 +# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_services gauge +consul_consul_state_services 0 +# HELP consul_federation_state_apply +# TYPE consul_federation_state_apply summary +consul_federation_state_apply{quantile="0.5"} NaN +consul_federation_state_apply{quantile="0.9"} NaN +consul_federation_state_apply{quantile="0.99"} NaN +consul_federation_state_apply_sum 0 +consul_federation_state_apply_count 0 +# HELP consul_federation_state_get +# TYPE consul_federation_state_get summary +consul_federation_state_get{quantile="0.5"} NaN +consul_federation_state_get{quantile="0.9"} NaN +consul_federation_state_get{quantile="0.99"} NaN +consul_federation_state_get_sum 0 +consul_federation_state_get_count 0 +# HELP consul_federation_state_list +# TYPE consul_federation_state_list summary +consul_federation_state_list{quantile="0.5"} NaN +consul_federation_state_list{quantile="0.9"} NaN +consul_federation_state_list{quantile="0.99"} NaN +consul_federation_state_list_sum 0 +consul_federation_state_list_count 0 +# HELP consul_federation_state_list_mesh_gateways +# TYPE consul_federation_state_list_mesh_gateways summary +consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN +consul_federation_state_list_mesh_gateways_sum 0 +consul_federation_state_list_mesh_gateways_count 0 +# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM. +# TYPE consul_fsm_acl summary +consul_fsm_acl{quantile="0.5"} NaN +consul_fsm_acl{quantile="0.9"} NaN +consul_fsm_acl{quantile="0.99"} NaN +consul_fsm_acl_sum 0 +consul_fsm_acl_count 0 +# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM. +# TYPE consul_fsm_acl_authmethod summary +consul_fsm_acl_authmethod{quantile="0.5"} NaN +consul_fsm_acl_authmethod{quantile="0.9"} NaN +consul_fsm_acl_authmethod{quantile="0.99"} NaN +consul_fsm_acl_authmethod_sum 0 +consul_fsm_acl_authmethod_count 0 +# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM. +# TYPE consul_fsm_acl_bindingrule summary +consul_fsm_acl_bindingrule{quantile="0.5"} NaN +consul_fsm_acl_bindingrule{quantile="0.9"} NaN +consul_fsm_acl_bindingrule{quantile="0.99"} NaN +consul_fsm_acl_bindingrule_sum 0 +consul_fsm_acl_bindingrule_count 0 +# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM. +# TYPE consul_fsm_acl_policy summary +consul_fsm_acl_policy{quantile="0.5"} NaN +consul_fsm_acl_policy{quantile="0.9"} NaN +consul_fsm_acl_policy{quantile="0.99"} NaN +consul_fsm_acl_policy_sum 0 +consul_fsm_acl_policy_count 0 +# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM. +# TYPE consul_fsm_acl_token summary +consul_fsm_acl_token{quantile="0.5"} NaN +consul_fsm_acl_token{quantile="0.9"} NaN +consul_fsm_acl_token{quantile="0.99"} NaN +consul_fsm_acl_token_sum 0 +consul_fsm_acl_token_count 0 +# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM. +# TYPE consul_fsm_autopilot summary +consul_fsm_autopilot{quantile="0.5"} NaN +consul_fsm_autopilot{quantile="0.9"} NaN +consul_fsm_autopilot{quantile="0.99"} NaN +consul_fsm_autopilot_sum 0 +consul_fsm_autopilot_count 0 +# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM. +# TYPE consul_fsm_ca summary +consul_fsm_ca{quantile="0.5"} NaN +consul_fsm_ca{quantile="0.9"} NaN +consul_fsm_ca{quantile="0.99"} NaN +consul_fsm_ca_sum 0 +consul_fsm_ca_count 0 +# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate. +# TYPE consul_fsm_ca_leaf summary +consul_fsm_ca_leaf{quantile="0.5"} NaN +consul_fsm_ca_leaf{quantile="0.9"} NaN +consul_fsm_ca_leaf{quantile="0.99"} NaN +consul_fsm_ca_leaf_sum 0 +consul_fsm_ca_leaf_count 0 +# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM. +# TYPE consul_fsm_coordinate_batch_update summary +consul_fsm_coordinate_batch_update{quantile="0.5"} NaN +consul_fsm_coordinate_batch_update{quantile="0.9"} NaN +consul_fsm_coordinate_batch_update{quantile="0.99"} NaN +consul_fsm_coordinate_batch_update_sum 0 +consul_fsm_coordinate_batch_update_count 0 +# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM. +# TYPE consul_fsm_deregister summary +consul_fsm_deregister{quantile="0.5"} NaN +consul_fsm_deregister{quantile="0.9"} NaN +consul_fsm_deregister{quantile="0.99"} NaN +consul_fsm_deregister_sum 0 +consul_fsm_deregister_count 0 +# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM. +# TYPE consul_fsm_intention summary +consul_fsm_intention{quantile="0.5"} NaN +consul_fsm_intention{quantile="0.9"} NaN +consul_fsm_intention{quantile="0.99"} NaN +consul_fsm_intention_sum 0 +consul_fsm_intention_count 0 +# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM. +# TYPE consul_fsm_kvs summary +consul_fsm_kvs{quantile="0.5"} NaN +consul_fsm_kvs{quantile="0.9"} NaN +consul_fsm_kvs{quantile="0.99"} NaN +consul_fsm_kvs_sum 0 +consul_fsm_kvs_count 0 +# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM. +# TYPE consul_fsm_peering summary +consul_fsm_peering{quantile="0.5"} NaN +consul_fsm_peering{quantile="0.9"} NaN +consul_fsm_peering{quantile="0.99"} NaN +consul_fsm_peering_sum 0 +consul_fsm_peering_count 0 +# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot. +# TYPE consul_fsm_persist summary +consul_fsm_persist{quantile="0.5"} NaN +consul_fsm_persist{quantile="0.9"} NaN +consul_fsm_persist{quantile="0.99"} NaN +consul_fsm_persist_sum 0 +consul_fsm_persist_count 0 +# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM. +# TYPE consul_fsm_prepared_query summary +consul_fsm_prepared_query{quantile="0.5"} NaN +consul_fsm_prepared_query{quantile="0.9"} NaN +consul_fsm_prepared_query{quantile="0.99"} NaN +consul_fsm_prepared_query_sum 0 +consul_fsm_prepared_query_count 0 +# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM. +# TYPE consul_fsm_register summary +consul_fsm_register{quantile="0.5"} NaN +consul_fsm_register{quantile="0.9"} NaN +consul_fsm_register{quantile="0.99"} NaN +consul_fsm_register_sum 0 +consul_fsm_register_count 0 +# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM. +# TYPE consul_fsm_session summary +consul_fsm_session{quantile="0.5"} NaN +consul_fsm_session{quantile="0.9"} NaN +consul_fsm_session{quantile="0.99"} NaN +consul_fsm_session_sum 0 +consul_fsm_session_count 0 +# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM. +# TYPE consul_fsm_system_metadata summary +consul_fsm_system_metadata{quantile="0.5"} NaN +consul_fsm_system_metadata{quantile="0.9"} NaN +consul_fsm_system_metadata{quantile="0.99"} NaN +consul_fsm_system_metadata_sum 0 +consul_fsm_system_metadata_count 0 +# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM. +# TYPE consul_fsm_tombstone summary +consul_fsm_tombstone{quantile="0.5"} NaN +consul_fsm_tombstone{quantile="0.9"} NaN +consul_fsm_tombstone{quantile="0.99"} NaN +consul_fsm_tombstone_sum 0 +consul_fsm_tombstone_count 0 +# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM. +# TYPE consul_fsm_txn summary +consul_fsm_txn{quantile="0.5"} NaN +consul_fsm_txn{quantile="0.9"} NaN +consul_fsm_txn{quantile="0.99"} NaN +consul_fsm_txn_sum 0 +consul_fsm_txn_count 0 +# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server. +# TYPE consul_grpc_client_connection_count counter +consul_grpc_client_connection_count 2 +# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers. +# TYPE consul_grpc_client_connections gauge +consul_grpc_client_connections 1 +# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server. +# TYPE consul_grpc_client_request_count counter +consul_grpc_client_request_count 0 +# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server. +# TYPE consul_grpc_server_connection_count counter +consul_grpc_server_connection_count 0 +# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server. +# TYPE consul_grpc_server_connections gauge +consul_grpc_server_connections 0 +# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server. +# TYPE consul_grpc_server_request_count counter +consul_grpc_server_request_count 0 +# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server. +# TYPE consul_grpc_server_stream_count counter +consul_grpc_server_stream_count 0 +# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server. +# TYPE consul_grpc_server_streams gauge +consul_grpc_server_streams 0 +# HELP consul_intention_apply +# TYPE consul_intention_apply summary +consul_intention_apply{quantile="0.5"} NaN +consul_intention_apply{quantile="0.9"} NaN +consul_intention_apply{quantile="0.99"} NaN +consul_intention_apply_sum 0 +consul_intention_apply_count 0 +# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store. +# TYPE consul_kvs_apply summary +consul_kvs_apply{quantile="0.5"} NaN +consul_kvs_apply{quantile="0.9"} NaN +consul_kvs_apply{quantile="0.99"} NaN +consul_kvs_apply_sum 0 +consul_kvs_apply_count 0 +# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership. +# TYPE consul_leader_barrier summary +consul_leader_barrier{quantile="0.5"} NaN +consul_leader_barrier{quantile="0.9"} NaN +consul_leader_barrier{quantile="0.99"} NaN +consul_leader_barrier_sum 0 +consul_leader_barrier_count 0 +# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones. +# TYPE consul_leader_reapTombstones summary +consul_leader_reapTombstones{quantile="0.5"} NaN +consul_leader_reapTombstones{quantile="0.9"} NaN +consul_leader_reapTombstones{quantile="0.99"} NaN +consul_leader_reapTombstones_sum 0 +consul_leader_reapTombstones_count 0 +# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information. +# TYPE consul_leader_reconcile summary +consul_leader_reconcile{quantile="0.5"} NaN +consul_leader_reconcile{quantile="0.9"} NaN +consul_leader_reconcile{quantile="0.99"} NaN +consul_leader_reconcile_sum 0 +consul_leader_reconcile_count 0 +# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information. +# TYPE consul_leader_reconcileMember summary +consul_leader_reconcileMember{quantile="0.5"} NaN +consul_leader_reconcileMember{quantile="0.9"} NaN +consul_leader_reconcileMember{quantile="0.99"} NaN +consul_leader_reconcileMember_sum 0 +consul_leader_reconcileMember_count 0 +# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_policies_index gauge +consul_leader_replication_acl_policies_index 0 +# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader +# TYPE consul_leader_replication_acl_policies_status gauge +consul_leader_replication_acl_policies_status 0 +# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_roles_index gauge +consul_leader_replication_acl_roles_index 0 +# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader +# TYPE consul_leader_replication_acl_roles_status gauge +consul_leader_replication_acl_roles_status 0 +# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_tokens_index gauge +consul_leader_replication_acl_tokens_index 0 +# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader +# TYPE consul_leader_replication_acl_tokens_status gauge +consul_leader_replication_acl_tokens_status 0 +# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_config_entries_index gauge +consul_leader_replication_config_entries_index 0 +# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader +# TYPE consul_leader_replication_config_entries_status gauge +consul_leader_replication_config_entries_status 0 +# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_federation_state_index gauge +consul_leader_replication_federation_state_index 0 +# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_federation_state_status gauge +consul_leader_replication_federation_state_status 0 +# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_namespaces_index gauge +consul_leader_replication_namespaces_index 0 +# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_namespaces_status gauge +consul_leader_replication_namespaces_status 0 +# HELP consul_memberlist_gossip consul_memberlist_gossip +# TYPE consul_memberlist_gossip summary +consul_memberlist_gossip{network="lan",quantile="0.5"} 0.02992900088429451 +consul_memberlist_gossip{network="lan",quantile="0.9"} 0.05322999879717827 +consul_memberlist_gossip{network="lan",quantile="0.99"} 0.09028899669647217 +consul_memberlist_gossip_sum{network="lan"} 72.09632398188114 +consul_memberlist_gossip_count{network="lan"} 2159 +# HELP consul_memberlist_msg_alive consul_memberlist_msg_alive +# TYPE consul_memberlist_msg_alive counter +consul_memberlist_msg_alive{network="lan"} 3 +# HELP consul_memberlist_probeNode consul_memberlist_probeNode +# TYPE consul_memberlist_probeNode summary +consul_memberlist_probeNode{network="lan",quantile="0.5"} 1.2391510009765625 +consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.470810055732727 +consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.470810055732727 +consul_memberlist_probeNode_sum{network="lan"} 550.6824030280113 +consul_memberlist_probeNode_count{network="lan"} 410 +# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode +# TYPE consul_memberlist_pushPullNode summary +consul_memberlist_pushPullNode{network="lan",quantile="0.5"} 1.6478170156478882 +consul_memberlist_pushPullNode{network="lan",quantile="0.9"} 1.6478170156478882 +consul_memberlist_pushPullNode{network="lan",quantile="0.99"} 1.6478170156478882 +consul_memberlist_pushPullNode_sum{network="lan"} 28.438491106033325 +consul_memberlist_pushPullNode_count{network="lan"} 17 +# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept +# TYPE consul_memberlist_tcp_accept counter +consul_memberlist_tcp_accept{network="lan"} 15 +# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect +# TYPE consul_memberlist_tcp_connect counter +consul_memberlist_tcp_connect{network="lan"} 18 +# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent +# TYPE consul_memberlist_tcp_sent counter +consul_memberlist_tcp_sent{network="lan"} 24679 +# HELP consul_memberlist_udp_received consul_memberlist_udp_received +# TYPE consul_memberlist_udp_received counter +consul_memberlist_udp_received{network="lan"} 117437 +# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent +# TYPE consul_memberlist_udp_sent counter +consul_memberlist_udp_sent{network="lan"} 118601 +# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update. +# TYPE consul_prepared_query_apply summary +consul_prepared_query_apply{quantile="0.5"} NaN +consul_prepared_query_apply{quantile="0.9"} NaN +consul_prepared_query_apply{quantile="0.99"} NaN +consul_prepared_query_apply_sum 0 +consul_prepared_query_apply_count 0 +# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request. +# TYPE consul_prepared_query_execute summary +consul_prepared_query_execute{quantile="0.5"} NaN +consul_prepared_query_execute{quantile="0.9"} NaN +consul_prepared_query_execute{quantile="0.99"} NaN +consul_prepared_query_execute_sum 0 +consul_prepared_query_execute_count 0 +# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. +# TYPE consul_prepared_query_execute_remote summary +consul_prepared_query_execute_remote{quantile="0.5"} NaN +consul_prepared_query_execute_remote{quantile="0.9"} NaN +consul_prepared_query_execute_remote{quantile="0.99"} NaN +consul_prepared_query_execute_remote_sum 0 +consul_prepared_query_execute_remote_count 0 +# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request. +# TYPE consul_prepared_query_explain summary +consul_prepared_query_explain{quantile="0.5"} NaN +consul_prepared_query_explain{quantile="0.9"} NaN +consul_prepared_query_explain{quantile="0.99"} NaN +consul_prepared_query_explain_sum 0 +consul_prepared_query_explain_count 0 +# HELP consul_raft_applied_index Represents the raft applied index. +# TYPE consul_raft_applied_index gauge +consul_raft_applied_index 0 +# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval. +# TYPE consul_raft_apply counter +consul_raft_apply 0 +# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader. +# TYPE consul_raft_commitTime summary +consul_raft_commitTime{quantile="0.5"} NaN +consul_raft_commitTime{quantile="0.9"} NaN +consul_raft_commitTime{quantile="0.99"} NaN +consul_raft_commitTime_sum 0 +consul_raft_commitTime_count 0 +# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took. +# TYPE consul_raft_fsm_lastRestoreDuration gauge +consul_raft_fsm_lastRestoreDuration 0 +# HELP consul_raft_last_index Represents the raft last index. +# TYPE consul_raft_last_index gauge +consul_raft_last_index 0 +# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. +# TYPE consul_raft_leader_lastContact summary +consul_raft_leader_lastContact{quantile="0.5"} NaN +consul_raft_leader_lastContact{quantile="0.9"} NaN +consul_raft_leader_lastContact{quantile="0.99"} NaN +consul_raft_leader_lastContact_sum 0 +consul_raft_leader_lastContact_count 0 +# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is. +# TYPE consul_raft_leader_oldestLogAge gauge +consul_raft_leader_oldestLogAge 0 +# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster. +# TYPE consul_raft_rpc_installSnapshot summary +consul_raft_rpc_installSnapshot{quantile="0.5"} NaN +consul_raft_rpc_installSnapshot{quantile="0.9"} NaN +consul_raft_rpc_installSnapshot{quantile="0.99"} NaN +consul_raft_rpc_installSnapshot_sum 0 +consul_raft_rpc_installSnapshot_count 0 +# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk. +# TYPE consul_raft_snapshot_persist summary +consul_raft_snapshot_persist{quantile="0.5"} NaN +consul_raft_snapshot_persist{quantile="0.9"} NaN +consul_raft_snapshot_persist{quantile="0.99"} NaN +consul_raft_snapshot_persist_sum 0 +consul_raft_snapshot_persist_count 0 +# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election. +# TYPE consul_raft_state_candidate counter +consul_raft_state_candidate 0 +# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader. +# TYPE consul_raft_state_leader counter +consul_raft_state_leader 0 +# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection. +# TYPE consul_rpc_accept_conn counter +consul_rpc_accept_conn 0 +# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed. +# TYPE consul_rpc_consistentRead summary +consul_rpc_consistentRead{quantile="0.5"} NaN +consul_rpc_consistentRead{quantile="0.9"} NaN +consul_rpc_consistentRead{quantile="0.99"} NaN +consul_rpc_consistentRead_sum 0 +consul_rpc_consistentRead_count 0 +# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query. +# TYPE consul_rpc_cross_dc counter +consul_rpc_cross_dc 0 +# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling. +# TYPE consul_rpc_queries_blocking gauge +consul_rpc_queries_blocking 0 +# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries. +# TYPE consul_rpc_query counter +consul_rpc_query 0 +# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection. +# TYPE consul_rpc_raft_handoff counter +consul_rpc_raft_handoff 0 +# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request. +# TYPE consul_rpc_request counter +consul_rpc_request 0 +# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request. +# TYPE consul_rpc_request_error counter +consul_rpc_request_error 0 +# HELP consul_runtime_alloc_bytes consul_runtime_alloc_bytes +# TYPE consul_runtime_alloc_bytes gauge +consul_runtime_alloc_bytes 2.6333408e+07 +# HELP consul_runtime_free_count consul_runtime_free_count +# TYPE consul_runtime_free_count gauge +consul_runtime_free_count 674987 +# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns +# TYPE consul_runtime_gc_pause_ns summary +consul_runtime_gc_pause_ns{quantile="0.5"} NaN +consul_runtime_gc_pause_ns{quantile="0.9"} NaN +consul_runtime_gc_pause_ns{quantile="0.99"} NaN +consul_runtime_gc_pause_ns_sum 4.182423e+06 +consul_runtime_gc_pause_ns_count 17 +# HELP consul_runtime_heap_objects consul_runtime_heap_objects +# TYPE consul_runtime_heap_objects gauge +consul_runtime_heap_objects 63474 +# HELP consul_runtime_malloc_count consul_runtime_malloc_count +# TYPE consul_runtime_malloc_count gauge +consul_runtime_malloc_count 738461 +# HELP consul_runtime_num_goroutines consul_runtime_num_goroutines +# TYPE consul_runtime_num_goroutines gauge +consul_runtime_num_goroutines 53 +# HELP consul_runtime_sys_bytes consul_runtime_sys_bytes +# TYPE consul_runtime_sys_bytes gauge +consul_runtime_sys_bytes 5.1201032e+07 +# HELP consul_runtime_total_gc_pause_ns consul_runtime_total_gc_pause_ns +# TYPE consul_runtime_total_gc_pause_ns gauge +consul_runtime_total_gc_pause_ns 4.182423e+06 +# HELP consul_runtime_total_gc_runs consul_runtime_total_gc_runs +# TYPE consul_runtime_total_gc_runs gauge +consul_runtime_total_gc_runs 17 +# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms +# TYPE consul_serf_coordinate_adjustment_ms summary +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 1.9778540134429932 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 2.0611228942871094 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 2.0611228942871094 +consul_serf_coordinate_adjustment_ms_sum{network="lan"} 375.26442916691303 +consul_serf_coordinate_adjustment_ms_count{network="lan"} 410 +# HELP consul_serf_member_join consul_serf_member_join +# TYPE consul_serf_member_join counter +consul_serf_member_join{network="lan"} 3 +# HELP consul_serf_msgs_received consul_serf_msgs_received +# TYPE consul_serf_msgs_received summary +consul_serf_msgs_received{network="lan",quantile="0.5"} NaN +consul_serf_msgs_received{network="lan",quantile="0.9"} NaN +consul_serf_msgs_received{network="lan",quantile="0.99"} NaN +consul_serf_msgs_received_sum{network="lan"} 100 +consul_serf_msgs_received_count{network="lan"} 4 +# HELP consul_serf_msgs_sent consul_serf_msgs_sent +# TYPE consul_serf_msgs_sent summary +consul_serf_msgs_sent{network="lan",quantile="0.5"} NaN +consul_serf_msgs_sent{network="lan",quantile="0.9"} NaN +consul_serf_msgs_sent{network="lan",quantile="0.99"} NaN +consul_serf_msgs_sent_sum{network="lan"} 200 +consul_serf_msgs_sent_count{network="lan"} 8 +# HELP consul_serf_queue_Event consul_serf_queue_Event +# TYPE consul_serf_queue_Event summary +consul_serf_queue_Event{network="lan",quantile="0.5"} NaN +consul_serf_queue_Event{network="lan",quantile="0.9"} NaN +consul_serf_queue_Event{network="lan",quantile="0.99"} NaN +consul_serf_queue_Event_sum{network="lan"} 0 +consul_serf_queue_Event_count{network="lan"} 14 +# HELP consul_serf_queue_Intent consul_serf_queue_Intent +# TYPE consul_serf_queue_Intent summary +consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN +consul_serf_queue_Intent_sum{network="lan"} 0 +consul_serf_queue_Intent_count{network="lan"} 14 +# HELP consul_serf_queue_Query consul_serf_queue_Query +# TYPE consul_serf_queue_Query summary +consul_serf_queue_Query{network="lan",quantile="0.5"} NaN +consul_serf_queue_Query{network="lan",quantile="0.9"} NaN +consul_serf_queue_Query{network="lan",quantile="0.99"} NaN +consul_serf_queue_Query_sum{network="lan"} 0 +consul_serf_queue_Query_count{network="lan"} 14 +# HELP consul_serf_snapshot_appendLine consul_serf_snapshot_appendLine +# TYPE consul_serf_snapshot_appendLine summary +consul_serf_snapshot_appendLine{network="lan",quantile="0.5"} NaN +consul_serf_snapshot_appendLine{network="lan",quantile="0.9"} NaN +consul_serf_snapshot_appendLine{network="lan",quantile="0.99"} NaN +consul_serf_snapshot_appendLine_sum{network="lan"} 0.08486000122502446 +consul_serf_snapshot_appendLine_count{network="lan"} 4 +# HELP consul_server_isLeader Tracks if the server is a leader. +# TYPE consul_server_isLeader gauge +consul_server_isLeader 0 +# HELP consul_session_apply Measures the time spent applying a session update. +# TYPE consul_session_apply summary +consul_session_apply{quantile="0.5"} NaN +consul_session_apply{quantile="0.9"} NaN +consul_session_apply{quantile="0.99"} NaN +consul_session_apply_sum 0 +consul_session_apply_count 0 +# HELP consul_session_renew Measures the time spent renewing a session. +# TYPE consul_session_renew summary +consul_session_renew{quantile="0.5"} NaN +consul_session_renew{quantile="0.9"} NaN +consul_session_renew{quantile="0.99"} NaN +consul_session_renew_sum 0 +consul_session_renew_count 0 +# HELP consul_session_ttl_active Tracks the active number of sessions being tracked. +# TYPE consul_session_ttl_active gauge +consul_session_ttl_active 0 +# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session. +# TYPE consul_session_ttl_invalidate summary +consul_session_ttl_invalidate{quantile="0.5"} NaN +consul_session_ttl_invalidate{quantile="0.9"} NaN +consul_session_ttl_invalidate{quantile="0.99"} NaN +consul_session_ttl_invalidate_sum 0 +consul_session_ttl_invalidate_count 0 +# HELP consul_txn_apply Measures the time spent applying a transaction operation. +# TYPE consul_txn_apply summary +consul_txn_apply{quantile="0.5"} NaN +consul_txn_apply{quantile="0.9"} NaN +consul_txn_apply{quantile="0.99"} NaN +consul_txn_apply_sum 0 +consul_txn_apply_count 0 +# HELP consul_txn_read Measures the time spent returning a read transaction. +# TYPE consul_txn_read summary +consul_txn_read{quantile="0.5"} NaN +consul_txn_read{quantile="0.9"} NaN +consul_txn_read{quantile="0.99"} NaN +consul_txn_read_sum 0 +consul_txn_read_count 0 +# HELP consul_version Represents the Consul version. +# TYPE consul_version gauge +consul_version 0 +consul_version{pre_release="",version="1.13.2"} 1 +# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version. +# TYPE consul_xds_server_streams gauge +consul_xds_server_streams 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 1.9158e-05 +go_gc_duration_seconds{quantile="0.25"} 0.000109081 +go_gc_duration_seconds{quantile="0.5"} 0.000251188 +go_gc_duration_seconds{quantile="0.75"} 0.000417427 +go_gc_duration_seconds{quantile="1"} 0.000564015 +go_gc_duration_seconds_sum 0.004182423 +go_gc_duration_seconds_count 17 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 58 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.18.1"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.6578488e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.1175476e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.493307e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 675169 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 3.182534545511277e-05 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 6.043992e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 2.6578488e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.2009472e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 2.8884992e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 64658 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.056768e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 4.0894464e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.671442476091947e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 739827 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 265880 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 310080 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 3.547528e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.395013e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.048576e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.048576e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 5.1201032e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 13 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 3.12 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1024 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 18 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 9.9598336e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.67144207026e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 8.133632e+08 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes -1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-self.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-self.json new file mode 100644 index 00000000000000..e5f75dc24dd28e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/client_v1-agent-self.json @@ -0,0 +1,50 @@ +{ + "Config": { + "Datacenter": "us-central", + "PrimaryDatacenter": "us-central", + "NodeName": "satya-vm", + "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Revision": "0e046bbb", + "Server": false, + "Version": "1.13.2", + "BuildDate": "2022-09-20T20:30:07Z" + }, + "DebugConfig": { + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": true, + "DogstatsdAddr": "", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "10m0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt new file mode 100644 index 00000000000000..63dbaddfcb5948 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt @@ -0,0 +1,1255 @@ +# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token. +# TYPE consul_acl_ResolveToken summary +consul_acl_ResolveToken{quantile="0.5"} NaN +consul_acl_ResolveToken{quantile="0.9"} NaN +consul_acl_ResolveToken{quantile="0.99"} NaN +consul_acl_ResolveToken_sum 0 +consul_acl_ResolveToken_count 0 +# HELP consul_acl_authmethod_delete +# TYPE consul_acl_authmethod_delete summary +consul_acl_authmethod_delete{quantile="0.5"} NaN +consul_acl_authmethod_delete{quantile="0.9"} NaN +consul_acl_authmethod_delete{quantile="0.99"} NaN +consul_acl_authmethod_delete_sum 0 +consul_acl_authmethod_delete_count 0 +# HELP consul_acl_authmethod_upsert +# TYPE consul_acl_authmethod_upsert summary +consul_acl_authmethod_upsert{quantile="0.5"} NaN +consul_acl_authmethod_upsert{quantile="0.9"} NaN +consul_acl_authmethod_upsert{quantile="0.99"} NaN +consul_acl_authmethod_upsert_sum 0 +consul_acl_authmethod_upsert_count 0 +# HELP consul_acl_bindingrule_delete +# TYPE consul_acl_bindingrule_delete summary +consul_acl_bindingrule_delete{quantile="0.5"} NaN +consul_acl_bindingrule_delete{quantile="0.9"} NaN +consul_acl_bindingrule_delete{quantile="0.99"} NaN +consul_acl_bindingrule_delete_sum 0 +consul_acl_bindingrule_delete_count 0 +# HELP consul_acl_bindingrule_upsert +# TYPE consul_acl_bindingrule_upsert summary +consul_acl_bindingrule_upsert{quantile="0.5"} NaN +consul_acl_bindingrule_upsert{quantile="0.9"} NaN +consul_acl_bindingrule_upsert{quantile="0.99"} NaN +consul_acl_bindingrule_upsert_sum 0 +consul_acl_bindingrule_upsert_count 0 +# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_deregistration counter +consul_acl_blocked_check_deregistration 0 +# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_registration counter +consul_acl_blocked_check_registration 0 +# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL) +# TYPE consul_acl_blocked_node_registration counter +consul_acl_blocked_node_registration 0 +# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_deregistration counter +consul_acl_blocked_service_deregistration 0 +# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_registration counter +consul_acl_blocked_service_registration 0 +# HELP consul_acl_login +# TYPE consul_acl_login summary +consul_acl_login{quantile="0.5"} NaN +consul_acl_login{quantile="0.9"} NaN +consul_acl_login{quantile="0.99"} NaN +consul_acl_login_sum 0 +consul_acl_login_count 0 +# HELP consul_acl_logout +# TYPE consul_acl_logout summary +consul_acl_logout{quantile="0.5"} NaN +consul_acl_logout{quantile="0.9"} NaN +consul_acl_logout{quantile="0.99"} NaN +consul_acl_logout_sum 0 +consul_acl_logout_count 0 +# HELP consul_acl_policy_delete +# TYPE consul_acl_policy_delete summary +consul_acl_policy_delete{quantile="0.5"} NaN +consul_acl_policy_delete{quantile="0.9"} NaN +consul_acl_policy_delete{quantile="0.99"} NaN +consul_acl_policy_delete_sum 0 +consul_acl_policy_delete_count 0 +# HELP consul_acl_policy_upsert +# TYPE consul_acl_policy_upsert summary +consul_acl_policy_upsert{quantile="0.5"} NaN +consul_acl_policy_upsert{quantile="0.9"} NaN +consul_acl_policy_upsert{quantile="0.99"} NaN +consul_acl_policy_upsert_sum 0 +consul_acl_policy_upsert_count 0 +# HELP consul_acl_role_delete +# TYPE consul_acl_role_delete summary +consul_acl_role_delete{quantile="0.5"} NaN +consul_acl_role_delete{quantile="0.9"} NaN +consul_acl_role_delete{quantile="0.99"} NaN +consul_acl_role_delete_sum 0 +consul_acl_role_delete_count 0 +# HELP consul_acl_role_upsert +# TYPE consul_acl_role_upsert summary +consul_acl_role_upsert{quantile="0.5"} NaN +consul_acl_role_upsert{quantile="0.9"} NaN +consul_acl_role_upsert{quantile="0.99"} NaN +consul_acl_role_upsert_sum 0 +consul_acl_role_upsert_count 0 +# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_hit counter +consul_acl_token_cache_hit 0 +# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_miss counter +consul_acl_token_cache_miss 0 +# HELP consul_acl_token_clone +# TYPE consul_acl_token_clone summary +consul_acl_token_clone{quantile="0.5"} NaN +consul_acl_token_clone{quantile="0.9"} NaN +consul_acl_token_clone{quantile="0.99"} NaN +consul_acl_token_clone_sum 0 +consul_acl_token_clone_count 0 +# HELP consul_acl_token_delete +# TYPE consul_acl_token_delete summary +consul_acl_token_delete{quantile="0.5"} NaN +consul_acl_token_delete{quantile="0.9"} NaN +consul_acl_token_delete{quantile="0.99"} NaN +consul_acl_token_delete_sum 0 +consul_acl_token_delete_count 0 +# HELP consul_acl_token_upsert +# TYPE consul_acl_token_upsert summary +consul_acl_token_upsert{quantile="0.5"} NaN +consul_acl_token_upsert{quantile="0.9"} NaN +consul_acl_token_upsert{quantile="0.99"} NaN +consul_acl_token_upsert_sum 0 +consul_acl_token_upsert_count 0 +# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour +# TYPE consul_agent_tls_cert_expiry gauge +consul_agent_tls_cert_expiry 0 +# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path. +# TYPE consul_api_http summary +consul_api_http{quantile="0.5"} NaN +consul_api_http{quantile="0.9"} NaN +consul_api_http{quantile="0.99"} NaN +consul_api_http_sum 0 +consul_api_http_count 0 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.11646900326013565 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.3685469925403595 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 1.142822027206421 +consul_api_http_sum{method="GET",path="v1_agent_checks"} 24054.416150089353 +consul_api_http_count{method="GET",path="v1_agent_checks"} 99423 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.8454239964485168 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 4.116001129150391 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 6.345314025878906 +consul_api_http_sum{method="GET",path="v1_agent_metrics"} 169447.17186257243 +consul_api_http_count{method="GET",path="v1_agent_metrics"} 118670 +# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function. +# TYPE consul_autopilot_failure_tolerance gauge +consul_autopilot_failure_tolerance 1 +# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy. +# TYPE consul_autopilot_healthy gauge +consul_autopilot_healthy 1 +# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found. +# TYPE consul_catalog_connect_not_found counter +consul_catalog_connect_not_found 0 +# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service. +# TYPE consul_catalog_connect_query counter +consul_catalog_connect_query 0 +# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag. +# TYPE consul_catalog_connect_query_tag counter +consul_catalog_connect_query_tag 0 +# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags. +# TYPE consul_catalog_connect_query_tags counter +consul_catalog_connect_query_tags 0 +# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation. +# TYPE consul_catalog_deregister summary +consul_catalog_deregister{quantile="0.5"} NaN +consul_catalog_deregister{quantile="0.9"} NaN +consul_catalog_deregister{quantile="0.99"} NaN +consul_catalog_deregister_sum 0 +consul_catalog_deregister_count 0 +# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation. +# TYPE consul_catalog_register summary +consul_catalog_register{quantile="0.5"} NaN +consul_catalog_register{quantile="0.9"} NaN +consul_catalog_register{quantile="0.99"} NaN +consul_catalog_register_sum 15302.798070907593 +consul_catalog_register_count 193 +# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found. +# TYPE consul_catalog_service_not_found counter +consul_catalog_service_not_found 0 +# HELP consul_catalog_service_query Increments for each catalog query for the given service. +# TYPE consul_catalog_service_query counter +consul_catalog_service_query 0 +# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag. +# TYPE consul_catalog_service_query_tag counter +consul_catalog_service_query_tag 0 +# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags. +# TYPE consul_catalog_service_query_tags counter +consul_catalog_service_query_tags 0 +# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog. +# TYPE consul_client_api_catalog_datacenters counter +consul_client_api_catalog_datacenters 0 +# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request. +# TYPE consul_client_api_catalog_deregister counter +consul_client_api_catalog_deregister 0 +# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway. +# TYPE consul_client_api_catalog_gateway_services counter +consul_client_api_catalog_gateway_services 0 +# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services. +# TYPE consul_client_api_catalog_node_service_list counter +consul_client_api_catalog_node_service_list 0 +# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_catalog_node_services counter +consul_client_api_catalog_node_services 0 +# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog. +# TYPE consul_client_api_catalog_nodes counter +consul_client_api_catalog_nodes 0 +# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request. +# TYPE consul_client_api_catalog_register counter +consul_client_api_catalog_register 0 +# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service. +# TYPE consul_client_api_catalog_service_nodes counter +consul_client_api_catalog_service_nodes 0 +# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog. +# TYPE consul_client_api_catalog_services counter +consul_client_api_catalog_services 0 +# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. +# TYPE consul_client_api_error_catalog_service_nodes counter +consul_client_api_error_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters. +# TYPE consul_client_api_success_catalog_datacenters counter +consul_client_api_success_catalog_datacenters 0 +# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request. +# TYPE consul_client_api_success_catalog_deregister counter +consul_client_api_success_catalog_deregister 0 +# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. +# TYPE consul_client_api_success_catalog_gateway_services counter +consul_client_api_success_catalog_gateway_services 0 +# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services. +# TYPE consul_client_api_success_catalog_node_service_list counter +consul_client_api_success_catalog_node_service_list 0 +# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node. +# TYPE consul_client_api_success_catalog_node_services counter +consul_client_api_success_catalog_node_services 0 +# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes. +# TYPE consul_client_api_success_catalog_nodes counter +consul_client_api_success_catalog_nodes 0 +# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request. +# TYPE consul_client_api_success_catalog_register counter +consul_client_api_success_catalog_register 0 +# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_success_catalog_service_nodes counter +consul_client_api_success_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services. +# TYPE consul_client_api_success_catalog_services counter +consul_client_api_success_catalog_services 0 +# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. +# TYPE consul_client_rpc counter +consul_client_rpc 6838 +# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters. +# TYPE consul_client_rpc_error_catalog_datacenters counter +consul_client_rpc_error_catalog_datacenters 0 +# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request. +# TYPE consul_client_rpc_error_catalog_deregister counter +consul_client_rpc_error_catalog_deregister 0 +# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. +# TYPE consul_client_rpc_error_catalog_gateway_services counter +consul_client_rpc_error_catalog_gateway_services 0 +# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. +# TYPE consul_client_rpc_error_catalog_node_service_list counter +consul_client_rpc_error_catalog_node_service_list 0 +# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node. +# TYPE consul_client_rpc_error_catalog_node_services counter +consul_client_rpc_error_catalog_node_services 0 +# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes. +# TYPE consul_client_rpc_error_catalog_nodes counter +consul_client_rpc_error_catalog_nodes 0 +# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request. +# TYPE consul_client_rpc_error_catalog_register counter +consul_client_rpc_error_catalog_register 0 +# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service. +# TYPE consul_client_rpc_error_catalog_service_nodes counter +consul_client_rpc_error_catalog_service_nodes 0 +# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services. +# TYPE consul_client_rpc_error_catalog_services counter +consul_client_rpc_error_catalog_services 0 +# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration. +# TYPE consul_client_rpc_exceeded counter +consul_client_rpc_exceeded 0 +# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. +# TYPE consul_client_rpc_failed counter +consul_client_rpc_failed 0 +# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided. +# TYPE consul_consul_cache_bypass counter +consul_consul_cache_bypass 0 +# HELP consul_consul_cache_entries_count Represents the number of entries in this cache. +# TYPE consul_consul_cache_entries_count gauge +consul_consul_cache_entries_count 0 +# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted. +# TYPE consul_consul_cache_evict_expired counter +consul_consul_cache_evict_expired 0 +# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache. +# TYPE consul_consul_cache_fetch_error counter +consul_consul_cache_fetch_error 0 +# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache. +# TYPE consul_consul_cache_fetch_success counter +consul_consul_cache_fetch_success 0 +# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead +# TYPE consul_consul_fsm_ca summary +consul_consul_fsm_ca{quantile="0.5"} NaN +consul_consul_fsm_ca{quantile="0.9"} NaN +consul_consul_fsm_ca{quantile="0.99"} NaN +consul_consul_fsm_ca_sum 0 +consul_consul_fsm_ca_count 0 +# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead +# TYPE consul_consul_fsm_intention summary +consul_consul_fsm_intention{quantile="0.5"} NaN +consul_consul_fsm_intention{quantile="0.9"} NaN +consul_consul_fsm_intention{quantile="0.99"} NaN +consul_consul_fsm_intention_sum 0 +consul_consul_fsm_intention_count 0 +# HELP consul_consul_intention_apply +# TYPE consul_consul_intention_apply summary +consul_consul_intention_apply{quantile="0.5"} NaN +consul_consul_intention_apply{quantile="0.9"} NaN +consul_consul_intention_apply{quantile="0.99"} NaN +consul_consul_intention_apply_sum 0 +consul_consul_intention_apply_count 0 +# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_clients gauge +consul_consul_members_clients 0 +consul_consul_members_clients{datacenter="us-central"} 0 +# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_servers gauge +consul_consul_members_servers 0 +consul_consul_members_servers{datacenter="us-central"} 3 +# HELP consul_consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peering" and, for enterprise, "partition". We emit this metric every 9 seconds +# TYPE consul_consul_peering_exported_services gauge +consul_consul_peering_exported_services 0 +# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_config_entries gauge +consul_consul_state_config_entries 0 +consul_consul_state_config_entries{datacenter="us-central",kind="exported-services"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="ingress-gateway"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="mesh"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="proxy-defaults"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="service-defaults"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="service-intentions"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="service-resolver"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="service-router"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="service-splitter"} 0 +consul_consul_state_config_entries{datacenter="us-central",kind="terminating-gateway"} 0 +# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_connect_instances gauge +consul_consul_state_connect_instances 0 +consul_consul_state_connect_instances{datacenter="us-central",kind="connect-native"} 0 +consul_consul_state_connect_instances{datacenter="us-central",kind="connect-proxy"} 0 +consul_consul_state_connect_instances{datacenter="us-central",kind="ingress-gateway"} 0 +consul_consul_state_connect_instances{datacenter="us-central",kind="mesh-gateway"} 0 +consul_consul_state_connect_instances{datacenter="us-central",kind="terminating-gateway"} 0 +# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. +# TYPE consul_consul_state_kv_entries gauge +consul_consul_state_kv_entries 0 +consul_consul_state_kv_entries{datacenter="us-central"} 1 +# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_nodes gauge +consul_consul_state_nodes 0 +consul_consul_state_nodes{datacenter="us-central"} 3 +# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. +# TYPE consul_consul_state_peerings gauge +consul_consul_state_peerings 0 +consul_consul_state_peerings{datacenter="us-central"} 0 +# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_service_instances gauge +consul_consul_state_service_instances 0 +consul_consul_state_service_instances{datacenter="us-central"} 4 +# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_services gauge +consul_consul_state_services 0 +consul_consul_state_services{datacenter="us-central"} 2 +# HELP consul_federation_state_apply +# TYPE consul_federation_state_apply summary +consul_federation_state_apply{quantile="0.5"} NaN +consul_federation_state_apply{quantile="0.9"} NaN +consul_federation_state_apply{quantile="0.99"} NaN +consul_federation_state_apply_sum 0 +consul_federation_state_apply_count 0 +# HELP consul_federation_state_get +# TYPE consul_federation_state_get summary +consul_federation_state_get{quantile="0.5"} NaN +consul_federation_state_get{quantile="0.9"} NaN +consul_federation_state_get{quantile="0.99"} NaN +consul_federation_state_get_sum 0 +consul_federation_state_get_count 0 +# HELP consul_federation_state_list +# TYPE consul_federation_state_list summary +consul_federation_state_list{quantile="0.5"} NaN +consul_federation_state_list{quantile="0.9"} NaN +consul_federation_state_list{quantile="0.99"} NaN +consul_federation_state_list_sum 0 +consul_federation_state_list_count 0 +# HELP consul_federation_state_list_mesh_gateways +# TYPE consul_federation_state_list_mesh_gateways summary +consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN +consul_federation_state_list_mesh_gateways_sum 0 +consul_federation_state_list_mesh_gateways_count 0 +# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM. +# TYPE consul_fsm_acl summary +consul_fsm_acl{quantile="0.5"} NaN +consul_fsm_acl{quantile="0.9"} NaN +consul_fsm_acl{quantile="0.99"} NaN +consul_fsm_acl_sum 0 +consul_fsm_acl_count 0 +# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM. +# TYPE consul_fsm_acl_authmethod summary +consul_fsm_acl_authmethod{quantile="0.5"} NaN +consul_fsm_acl_authmethod{quantile="0.9"} NaN +consul_fsm_acl_authmethod{quantile="0.99"} NaN +consul_fsm_acl_authmethod_sum 0 +consul_fsm_acl_authmethod_count 0 +# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM. +# TYPE consul_fsm_acl_bindingrule summary +consul_fsm_acl_bindingrule{quantile="0.5"} NaN +consul_fsm_acl_bindingrule{quantile="0.9"} NaN +consul_fsm_acl_bindingrule{quantile="0.99"} NaN +consul_fsm_acl_bindingrule_sum 0 +consul_fsm_acl_bindingrule_count 0 +# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM. +# TYPE consul_fsm_acl_policy summary +consul_fsm_acl_policy{quantile="0.5"} NaN +consul_fsm_acl_policy{quantile="0.9"} NaN +consul_fsm_acl_policy{quantile="0.99"} NaN +consul_fsm_acl_policy_sum 0 +consul_fsm_acl_policy_count 0 +# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM. +# TYPE consul_fsm_acl_token summary +consul_fsm_acl_token{quantile="0.5"} NaN +consul_fsm_acl_token{quantile="0.9"} NaN +consul_fsm_acl_token{quantile="0.99"} NaN +consul_fsm_acl_token_sum 0 +consul_fsm_acl_token_count 0 +# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM. +# TYPE consul_fsm_autopilot summary +consul_fsm_autopilot{quantile="0.5"} NaN +consul_fsm_autopilot{quantile="0.9"} NaN +consul_fsm_autopilot{quantile="0.99"} NaN +consul_fsm_autopilot_sum 0 +consul_fsm_autopilot_count 0 +# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM. +# TYPE consul_fsm_ca summary +consul_fsm_ca{quantile="0.5"} NaN +consul_fsm_ca{quantile="0.9"} NaN +consul_fsm_ca{quantile="0.99"} NaN +consul_fsm_ca_sum 0 +consul_fsm_ca_count 0 +# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate. +# TYPE consul_fsm_ca_leaf summary +consul_fsm_ca_leaf{quantile="0.5"} NaN +consul_fsm_ca_leaf{quantile="0.9"} NaN +consul_fsm_ca_leaf{quantile="0.99"} NaN +consul_fsm_ca_leaf_sum 0 +consul_fsm_ca_leaf_count 0 +# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM. +# TYPE consul_fsm_coordinate_batch_update summary +consul_fsm_coordinate_batch_update{quantile="0.5"} 0.846472978591919 +consul_fsm_coordinate_batch_update{quantile="0.9"} 0.846472978591919 +consul_fsm_coordinate_batch_update{quantile="0.99"} 0.846472978591919 +consul_fsm_coordinate_batch_update_sum 1319.3496078031603 +consul_fsm_coordinate_batch_update_count 22753 +# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM. +# TYPE consul_fsm_deregister summary +consul_fsm_deregister{quantile="0.5"} NaN +consul_fsm_deregister{quantile="0.9"} NaN +consul_fsm_deregister{quantile="0.99"} NaN +consul_fsm_deregister_sum 7.263695985078812 +consul_fsm_deregister_count 25 +# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM. +# TYPE consul_fsm_intention summary +consul_fsm_intention{quantile="0.5"} NaN +consul_fsm_intention{quantile="0.9"} NaN +consul_fsm_intention{quantile="0.99"} NaN +consul_fsm_intention_sum 0 +consul_fsm_intention_count 0 +# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM. +# TYPE consul_fsm_kvs summary +consul_fsm_kvs{quantile="0.5"} NaN +consul_fsm_kvs{quantile="0.9"} NaN +consul_fsm_kvs{quantile="0.99"} NaN +consul_fsm_kvs_sum 0 +consul_fsm_kvs_count 0 +# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM. +# TYPE consul_fsm_peering summary +consul_fsm_peering{quantile="0.5"} NaN +consul_fsm_peering{quantile="0.9"} NaN +consul_fsm_peering{quantile="0.99"} NaN +consul_fsm_peering_sum 0 +consul_fsm_peering_count 0 +# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot. +# TYPE consul_fsm_persist summary +consul_fsm_persist{quantile="0.5"} NaN +consul_fsm_persist{quantile="0.9"} NaN +consul_fsm_persist{quantile="0.99"} NaN +consul_fsm_persist_sum 0.7345139980316162 +consul_fsm_persist_count 1 +# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM. +# TYPE consul_fsm_prepared_query summary +consul_fsm_prepared_query{quantile="0.5"} NaN +consul_fsm_prepared_query{quantile="0.9"} NaN +consul_fsm_prepared_query{quantile="0.99"} NaN +consul_fsm_prepared_query_sum 0 +consul_fsm_prepared_query_count 0 +# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM. +# TYPE consul_fsm_register summary +consul_fsm_register{quantile="0.5"} NaN +consul_fsm_register{quantile="0.9"} NaN +consul_fsm_register{quantile="0.99"} NaN +consul_fsm_register_sum 77.52807594463229 +consul_fsm_register_count 475 +# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM. +# TYPE consul_fsm_session summary +consul_fsm_session{quantile="0.5"} NaN +consul_fsm_session{quantile="0.9"} NaN +consul_fsm_session{quantile="0.99"} NaN +consul_fsm_session_sum 0 +consul_fsm_session_count 0 +# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM. +# TYPE consul_fsm_system_metadata summary +consul_fsm_system_metadata{quantile="0.5"} NaN +consul_fsm_system_metadata{quantile="0.9"} NaN +consul_fsm_system_metadata{quantile="0.99"} NaN +consul_fsm_system_metadata_sum 0 +consul_fsm_system_metadata_count 0 +# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM. +# TYPE consul_fsm_tombstone summary +consul_fsm_tombstone{quantile="0.5"} NaN +consul_fsm_tombstone{quantile="0.9"} NaN +consul_fsm_tombstone{quantile="0.99"} NaN +consul_fsm_tombstone_sum 0 +consul_fsm_tombstone_count 0 +# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM. +# TYPE consul_fsm_txn summary +consul_fsm_txn{quantile="0.5"} NaN +consul_fsm_txn{quantile="0.9"} NaN +consul_fsm_txn{quantile="0.99"} NaN +consul_fsm_txn_sum 0 +consul_fsm_txn_count 0 +# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server. +# TYPE consul_grpc_client_connection_count counter +consul_grpc_client_connection_count 875 +# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers. +# TYPE consul_grpc_client_connections gauge +consul_grpc_client_connections 1 +# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server. +# TYPE consul_grpc_client_request_count counter +consul_grpc_client_request_count 0 +# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server. +# TYPE consul_grpc_server_connection_count counter +consul_grpc_server_connection_count 853 +# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server. +# TYPE consul_grpc_server_connections gauge +consul_grpc_server_connections 1 +# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server. +# TYPE consul_grpc_server_request_count counter +consul_grpc_server_request_count 0 +# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server. +# TYPE consul_grpc_server_stream_count counter +consul_grpc_server_stream_count 0 +# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server. +# TYPE consul_grpc_server_streams gauge +consul_grpc_server_streams 0 +# HELP consul_intention_apply +# TYPE consul_intention_apply summary +consul_intention_apply{quantile="0.5"} NaN +consul_intention_apply{quantile="0.9"} NaN +consul_intention_apply{quantile="0.99"} NaN +consul_intention_apply_sum 0 +consul_intention_apply_count 0 +# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store. +# TYPE consul_kvs_apply summary +consul_kvs_apply{quantile="0.5"} NaN +consul_kvs_apply{quantile="0.9"} NaN +consul_kvs_apply{quantile="0.99"} NaN +consul_kvs_apply_sum 0 +consul_kvs_apply_count 0 +# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership. +# TYPE consul_leader_barrier summary +consul_leader_barrier{quantile="0.5"} NaN +consul_leader_barrier{quantile="0.9"} NaN +consul_leader_barrier{quantile="0.99"} NaN +consul_leader_barrier_sum 115364.21848773956 +consul_leader_barrier_count 1657 +# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones. +# TYPE consul_leader_reapTombstones summary +consul_leader_reapTombstones{quantile="0.5"} NaN +consul_leader_reapTombstones{quantile="0.9"} NaN +consul_leader_reapTombstones{quantile="0.99"} NaN +consul_leader_reapTombstones_sum 26.21475601196289 +consul_leader_reapTombstones_count 1 +# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information. +# TYPE consul_leader_reconcile summary +consul_leader_reconcile{quantile="0.5"} NaN +consul_leader_reconcile{quantile="0.9"} NaN +consul_leader_reconcile{quantile="0.99"} NaN +consul_leader_reconcile_sum 543.0488127619028 +consul_leader_reconcile_count 1657 +# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information. +# TYPE consul_leader_reconcileMember summary +consul_leader_reconcileMember{quantile="0.5"} NaN +consul_leader_reconcileMember{quantile="0.9"} NaN +consul_leader_reconcileMember{quantile="0.99"} NaN +consul_leader_reconcileMember_sum 511.33584634773433 +consul_leader_reconcileMember_count 4975 +# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_policies_index gauge +consul_leader_replication_acl_policies_index 0 +# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader +# TYPE consul_leader_replication_acl_policies_status gauge +consul_leader_replication_acl_policies_status 0 +# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_roles_index gauge +consul_leader_replication_acl_roles_index 0 +# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader +# TYPE consul_leader_replication_acl_roles_status gauge +consul_leader_replication_acl_roles_status 0 +# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_tokens_index gauge +consul_leader_replication_acl_tokens_index 0 +# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader +# TYPE consul_leader_replication_acl_tokens_status gauge +consul_leader_replication_acl_tokens_status 0 +# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_config_entries_index gauge +consul_leader_replication_config_entries_index 0 +# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader +# TYPE consul_leader_replication_config_entries_status gauge +consul_leader_replication_config_entries_status 0 +# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_federation_state_index gauge +consul_leader_replication_federation_state_index 0 +# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_federation_state_status gauge +consul_leader_replication_federation_state_status 0 +# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_namespaces_index gauge +consul_leader_replication_namespaces_index 0 +# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_namespaces_status gauge +consul_leader_replication_namespaces_status 0 +# HELP consul_memberlist_gossip consul_memberlist_gossip +# TYPE consul_memberlist_gossip summary +consul_memberlist_gossip{network="lan",quantile="0.5"} 0.02304000034928322 +consul_memberlist_gossip{network="lan",quantile="0.9"} 0.03136000037193298 +consul_memberlist_gossip{network="lan",quantile="0.99"} 0.0756089985370636 +consul_memberlist_gossip_sum{network="lan"} 10881.414362509036 +consul_memberlist_gossip_count{network="lan"} 497026 +consul_memberlist_gossip{network="wan",quantile="0.5"} 0.018719999119639397 +consul_memberlist_gossip{network="wan",quantile="0.9"} 0.029740000143647194 +consul_memberlist_gossip{network="wan",quantile="0.99"} 0.048298001289367676 +consul_memberlist_gossip_sum{network="wan"} 4231.353692025063 +consul_memberlist_gossip_count{network="wan"} 198810 +# HELP consul_memberlist_probeNode consul_memberlist_probeNode +# TYPE consul_memberlist_probeNode summary +consul_memberlist_probeNode{network="lan",quantile="0.5"} 0.8122829794883728 +consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.0762710571289062 +consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.0762710571289062 +consul_memberlist_probeNode_sum{network="lan"} 79954.1767796278 +consul_memberlist_probeNode_count{network="lan"} 94283 +consul_memberlist_probeNode{network="wan",quantile="0.5"} 0.8124139904975891 +consul_memberlist_probeNode{network="wan",quantile="0.9"} 0.9564329981803894 +consul_memberlist_probeNode{network="wan",quantile="0.99"} 0.9564329981803894 +consul_memberlist_probeNode_sum{network="wan"} 17170.356712043285 +consul_memberlist_probeNode_count{network="wan"} 18817 +# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode +# TYPE consul_memberlist_pushPullNode summary +consul_memberlist_pushPullNode{network="lan",quantile="0.5"} NaN +consul_memberlist_pushPullNode{network="lan",quantile="0.9"} NaN +consul_memberlist_pushPullNode{network="lan",quantile="0.99"} NaN +consul_memberlist_pushPullNode_sum{network="lan"} 6319.592049598694 +consul_memberlist_pushPullNode_count{network="lan"} 3316 +consul_memberlist_pushPullNode{network="wan",quantile="0.5"} NaN +consul_memberlist_pushPullNode{network="wan",quantile="0.9"} NaN +consul_memberlist_pushPullNode{network="wan",quantile="0.99"} NaN +consul_memberlist_pushPullNode_sum{network="wan"} 3150.5957354307175 +consul_memberlist_pushPullNode_count{network="wan"} 1657 +# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept +# TYPE consul_memberlist_tcp_accept counter +consul_memberlist_tcp_accept{network="lan"} 3327 +consul_memberlist_tcp_accept{network="wan"} 1661 +# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect +# TYPE consul_memberlist_tcp_connect counter +consul_memberlist_tcp_connect{network="lan"} 3316 +consul_memberlist_tcp_connect{network="wan"} 1657 +# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent +# TYPE consul_memberlist_tcp_sent counter +consul_memberlist_tcp_sent{network="lan"} 5.728236e+06 +consul_memberlist_tcp_sent{network="wan"} 2.671365e+06 +# HELP consul_memberlist_udp_received consul_memberlist_udp_received +# TYPE consul_memberlist_udp_received counter +consul_memberlist_udp_received{network="lan"} 2.7072233e+07 +consul_memberlist_udp_received{network="wan"} 5.805281e+06 +# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent +# TYPE consul_memberlist_udp_sent counter +consul_memberlist_udp_sent{network="lan"} 2.7064743e+07 +consul_memberlist_udp_sent{network="wan"} 5.806099e+06 +# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour +# TYPE consul_mesh_active_root_ca_expiry gauge +consul_mesh_active_root_ca_expiry NaN +# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour +# TYPE consul_mesh_active_signing_ca_expiry gauge +consul_mesh_active_signing_ca_expiry NaN +# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update. +# TYPE consul_prepared_query_apply summary +consul_prepared_query_apply{quantile="0.5"} NaN +consul_prepared_query_apply{quantile="0.9"} NaN +consul_prepared_query_apply{quantile="0.99"} NaN +consul_prepared_query_apply_sum 0 +consul_prepared_query_apply_count 0 +# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request. +# TYPE consul_prepared_query_execute summary +consul_prepared_query_execute{quantile="0.5"} NaN +consul_prepared_query_execute{quantile="0.9"} NaN +consul_prepared_query_execute{quantile="0.99"} NaN +consul_prepared_query_execute_sum 0 +consul_prepared_query_execute_count 0 +# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. +# TYPE consul_prepared_query_execute_remote summary +consul_prepared_query_execute_remote{quantile="0.5"} NaN +consul_prepared_query_execute_remote{quantile="0.9"} NaN +consul_prepared_query_execute_remote{quantile="0.99"} NaN +consul_prepared_query_execute_remote_sum 0 +consul_prepared_query_execute_remote_count 0 +# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request. +# TYPE consul_prepared_query_explain summary +consul_prepared_query_explain{quantile="0.5"} NaN +consul_prepared_query_explain{quantile="0.9"} NaN +consul_prepared_query_explain{quantile="0.99"} NaN +consul_prepared_query_explain_sum 0 +consul_prepared_query_explain_count 0 +# HELP consul_raft_applied_index Represents the raft applied index. +# TYPE consul_raft_applied_index gauge +consul_raft_applied_index 452955 +# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval. +# TYPE consul_raft_apply counter +consul_raft_apply 10681 +# HELP consul_raft_barrier consul_raft_barrier +# TYPE consul_raft_barrier counter +consul_raft_barrier 1657 +# HELP consul_raft_boltdb_freePageBytes consul_raft_boltdb_freePageBytes +# TYPE consul_raft_boltdb_freePageBytes gauge +consul_raft_boltdb_freePageBytes 5.758976e+06 +# HELP consul_raft_boltdb_freelistBytes consul_raft_boltdb_freelistBytes +# TYPE consul_raft_boltdb_freelistBytes gauge +consul_raft_boltdb_freelistBytes 11264 +# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog +# TYPE consul_raft_boltdb_getLog summary +consul_raft_boltdb_getLog{quantile="0.5"} 0.030570000410079956 +consul_raft_boltdb_getLog{quantile="0.9"} 0.030570000410079956 +consul_raft_boltdb_getLog{quantile="0.99"} 0.030570000410079956 +consul_raft_boltdb_getLog_sum 630.6968591569457 +consul_raft_boltdb_getLog_count 39046 +# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize +# TYPE consul_raft_boltdb_logBatchSize summary +consul_raft_boltdb_logBatchSize{quantile="0.5"} 243 +consul_raft_boltdb_logBatchSize{quantile="0.9"} 243 +consul_raft_boltdb_logBatchSize{quantile="0.99"} 243 +consul_raft_boltdb_logBatchSize_sum 3.567357e+06 +consul_raft_boltdb_logBatchSize_count 12360 +# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize +# TYPE consul_raft_boltdb_logSize summary +consul_raft_boltdb_logSize{quantile="0.5"} 243 +consul_raft_boltdb_logSize{quantile="0.9"} 243 +consul_raft_boltdb_logSize{quantile="0.99"} 243 +consul_raft_boltdb_logSize_sum 3.567357e+06 +consul_raft_boltdb_logSize_count 12362 +# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch +# TYPE consul_raft_boltdb_logsPerBatch summary +consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1 +consul_raft_boltdb_logsPerBatch_sum 12362 +consul_raft_boltdb_logsPerBatch_count 12360 +# HELP consul_raft_boltdb_numFreePages consul_raft_boltdb_numFreePages +# TYPE consul_raft_boltdb_numFreePages gauge +consul_raft_boltdb_numFreePages 1399 +# HELP consul_raft_boltdb_numPendingPages consul_raft_boltdb_numPendingPages +# TYPE consul_raft_boltdb_numPendingPages gauge +consul_raft_boltdb_numPendingPages 7 +# HELP consul_raft_boltdb_openReadTxn consul_raft_boltdb_openReadTxn +# TYPE consul_raft_boltdb_openReadTxn gauge +consul_raft_boltdb_openReadTxn 0 +# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs +# TYPE consul_raft_boltdb_storeLogs summary +consul_raft_boltdb_storeLogs{quantile="0.5"} 13.176624298095703 +consul_raft_boltdb_storeLogs{quantile="0.9"} 13.176624298095703 +consul_raft_boltdb_storeLogs{quantile="0.99"} 13.176624298095703 +consul_raft_boltdb_storeLogs_sum 651888.0279793739 +consul_raft_boltdb_storeLogs_count 12360 +# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn +# TYPE consul_raft_boltdb_totalReadTxn counter +consul_raft_boltdb_totalReadTxn 51200 +# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount +# TYPE consul_raft_boltdb_txstats_cursorCount counter +consul_raft_boltdb_txstats_cursorCount 139498 +# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount +# TYPE consul_raft_boltdb_txstats_nodeCount counter +consul_raft_boltdb_txstats_nodeCount 52400 +# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref +# TYPE consul_raft_boltdb_txstats_nodeDeref counter +consul_raft_boltdb_txstats_nodeDeref 0 +# HELP consul_raft_boltdb_txstats_pageAlloc consul_raft_boltdb_txstats_pageAlloc +# TYPE consul_raft_boltdb_txstats_pageAlloc gauge +consul_raft_boltdb_txstats_pageAlloc 4.38874112e+08 +# HELP consul_raft_boltdb_txstats_pageCount consul_raft_boltdb_txstats_pageCount +# TYPE consul_raft_boltdb_txstats_pageCount gauge +consul_raft_boltdb_txstats_pageCount 107147 +# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance +# TYPE consul_raft_boltdb_txstats_rebalance counter +consul_raft_boltdb_txstats_rebalance 5869 +# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime +# TYPE consul_raft_boltdb_txstats_rebalanceTime summary +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0 +consul_raft_boltdb_txstats_rebalanceTime_sum 3.391055107116699 +consul_raft_boltdb_txstats_rebalanceTime_count 19882 +# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill +# TYPE consul_raft_boltdb_txstats_spill counter +consul_raft_boltdb_txstats_spill 51598 +# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime +# TYPE consul_raft_boltdb_txstats_spillTime summary +consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.019670000299811363 +consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.019670000299811363 +consul_raft_boltdb_txstats_spillTime_sum 372.6177089449484 +consul_raft_boltdb_txstats_spillTime_count 19882 +# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split +# TYPE consul_raft_boltdb_txstats_split counter +consul_raft_boltdb_txstats_split 2154 +# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write +# TYPE consul_raft_boltdb_txstats_write counter +consul_raft_boltdb_txstats_write 76328 +# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime +# TYPE consul_raft_boltdb_txstats_writeTime summary +consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 13.529101371765137 +consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 13.529101371765137 +consul_raft_boltdb_txstats_writeTime_sum 649086.0377488136 +consul_raft_boltdb_txstats_writeTime_count 19882 +# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity +# TYPE consul_raft_boltdb_writeCapacity summary +consul_raft_boltdb_writeCapacity{quantile="0.5"} 76.11837005615234 +consul_raft_boltdb_writeCapacity{quantile="0.9"} 76.11837005615234 +consul_raft_boltdb_writeCapacity{quantile="0.99"} 76.11837005615234 +consul_raft_boltdb_writeCapacity_sum 1.1691283255012557e+06 +consul_raft_boltdb_writeCapacity_count 12360 +# HELP consul_raft_commitNumLogs consul_raft_commitNumLogs +# TYPE consul_raft_commitNumLogs gauge +consul_raft_commitNumLogs 1 +# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader. +# TYPE consul_raft_commitTime summary +consul_raft_commitTime{quantile="0.5"} 41.146488189697266 +consul_raft_commitTime{quantile="0.9"} 41.146488189697266 +consul_raft_commitTime{quantile="0.99"} 41.146488189697266 +consul_raft_commitTime_sum 955781.14939785 +consul_raft_commitTime_count 12345 +# HELP consul_raft_fsm_apply consul_raft_fsm_apply +# TYPE consul_raft_fsm_apply summary +consul_raft_fsm_apply{quantile="0.5"} 0.9867730140686035 +consul_raft_fsm_apply{quantile="0.9"} 0.9867730140686035 +consul_raft_fsm_apply{quantile="0.99"} 0.9867730140686035 +consul_raft_fsm_apply_sum 2157.036477720365 +consul_raft_fsm_apply_count 23257 +# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue +# TYPE consul_raft_fsm_enqueue summary +consul_raft_fsm_enqueue{quantile="0.5"} 0.01827000081539154 +consul_raft_fsm_enqueue{quantile="0.9"} 0.01827000081539154 +consul_raft_fsm_enqueue{quantile="0.99"} 0.01827000081539154 +consul_raft_fsm_enqueue_sum 312.4720518933609 +consul_raft_fsm_enqueue_count 12345 +# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took. +# TYPE consul_raft_fsm_lastRestoreDuration gauge +consul_raft_fsm_lastRestoreDuration 2 +# HELP consul_raft_last_index Represents the raft last index. +# TYPE consul_raft_last_index gauge +consul_raft_last_index 452955 +# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog +# TYPE consul_raft_leader_dispatchLog summary +consul_raft_leader_dispatchLog{quantile="0.5"} 13.253751754760742 +consul_raft_leader_dispatchLog{quantile="0.9"} 13.253751754760742 +consul_raft_leader_dispatchLog{quantile="0.99"} 13.253751754760742 +consul_raft_leader_dispatchLog_sum 652275.1332504749 +consul_raft_leader_dispatchLog_count 12345 +# HELP consul_raft_leader_dispatchNumLogs consul_raft_leader_dispatchNumLogs +# TYPE consul_raft_leader_dispatchNumLogs gauge +consul_raft_leader_dispatchNumLogs 1 +# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. +# TYPE consul_raft_leader_lastContact summary +consul_raft_leader_lastContact{quantile="0.5"} 33 +consul_raft_leader_lastContact{quantile="0.9"} 68 +consul_raft_leader_lastContact{quantile="0.99"} 68 +consul_raft_leader_lastContact_sum 3.0669e+06 +consul_raft_leader_lastContact_count 80917 +# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is. +# TYPE consul_raft_leader_oldestLogAge gauge +consul_raft_leader_oldestLogAge 1.66046464e+08 +# HELP consul_raft_replication_appendEntries_logs consul_raft_replication_appendEntries_logs +# TYPE consul_raft_replication_appendEntries_logs counter +consul_raft_replication_appendEntries_logs{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 12346 +consul_raft_replication_appendEntries_logs{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 12346 +# HELP consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae counter +consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae 12346 +# HELP consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 counter +consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 12346 +# HELP consul_raft_replication_appendEntries_rpc consul_raft_replication_appendEntries_rpc +# TYPE consul_raft_replication_appendEntries_rpc summary +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.7382550239562988 +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.030032992362976 +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 3.7775509357452393 +consul_raft_replication_appendEntries_rpc_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 3.243855408252254e+06 +consul_raft_replication_appendEntries_rpc_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 1.317473e+06 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6895250082015991 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9500619769096375 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 1.682297945022583 +consul_raft_replication_appendEntries_rpc_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 3.2418369520926476e+06 +consul_raft_replication_appendEntries_rpc_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 1.317366e+06 +# HELP consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.7751650214195251 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.1017019748687744 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 3.81791090965271 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 3.299558741098821e+06 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 1.317473e+06 +# HELP consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 summary +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.7417550086975098 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.0077530145645142 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.726017951965332 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_sum 3.2977981372908056e+06 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_count 1.317366e+06 +# HELP consul_raft_replication_heartbeat consul_raft_replication_heartbeat +# TYPE consul_raft_replication_heartbeat summary +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.5587760210037231 +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.304479956626892 +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 1.4234989881515503 +consul_raft_replication_heartbeat_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 103812.21699500084 +consul_raft_replication_heartbeat_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 132454 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.5443660020828247 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9280639886856079 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 1.0106929540634155 +consul_raft_replication_heartbeat_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 96512.05100156367 +consul_raft_replication_heartbeat_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 132488 +# HELP consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.5922750234603882 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.3319799900054932 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 1.454179048538208 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 108115.97687250376 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 132454 +# HELP consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 summary +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.5915359854698181 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 0.9649440050125122 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.0576729774475098 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_sum 100780.49696727097 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_count 132488 +# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster. +# TYPE consul_raft_rpc_installSnapshot summary +consul_raft_rpc_installSnapshot{quantile="0.5"} NaN +consul_raft_rpc_installSnapshot{quantile="0.9"} NaN +consul_raft_rpc_installSnapshot{quantile="0.99"} NaN +consul_raft_rpc_installSnapshot_sum 0 +consul_raft_rpc_installSnapshot_count 0 +# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk. +# TYPE consul_raft_snapshot_persist summary +consul_raft_snapshot_persist{quantile="0.5"} NaN +consul_raft_snapshot_persist{quantile="0.9"} NaN +consul_raft_snapshot_persist{quantile="0.99"} NaN +consul_raft_snapshot_persist_sum 0.7742639780044556 +consul_raft_snapshot_persist_count 1 +# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election. +# TYPE consul_raft_state_candidate counter +consul_raft_state_candidate 1 +# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader. +# TYPE consul_raft_state_leader counter +consul_raft_state_leader 1 +# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation +# TYPE consul_raft_thread_fsm_saturation summary +consul_raft_thread_fsm_saturation{quantile="0.5"} 0 +consul_raft_thread_fsm_saturation{quantile="0.9"} 0 +consul_raft_thread_fsm_saturation{quantile="0.99"} 0 +consul_raft_thread_fsm_saturation_sum 0.09000000357627869 +consul_raft_thread_fsm_saturation_count 11923 +# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation +# TYPE consul_raft_thread_main_saturation summary +consul_raft_thread_main_saturation{quantile="0.5"} 0 +consul_raft_thread_main_saturation{quantile="0.9"} 0 +consul_raft_thread_main_saturation{quantile="0.99"} 0 +consul_raft_thread_main_saturation_sum 205.40999860689044 +consul_raft_thread_main_saturation_count 43067 +# HELP consul_raft_verify_leader consul_raft_verify_leader +# TYPE consul_raft_verify_leader counter +consul_raft_verify_leader 364 +# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection. +# TYPE consul_rpc_accept_conn counter +consul_rpc_accept_conn 864 +# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed. +# TYPE consul_rpc_consistentRead summary +consul_rpc_consistentRead{quantile="0.5"} NaN +consul_rpc_consistentRead{quantile="0.9"} NaN +consul_rpc_consistentRead{quantile="0.99"} NaN +consul_rpc_consistentRead_sum 293.6328800059855 +consul_rpc_consistentRead_count 364 +# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query. +# TYPE consul_rpc_cross_dc counter +consul_rpc_cross_dc 0 +# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling. +# TYPE consul_rpc_queries_blocking gauge +consul_rpc_queries_blocking 1 +# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries. +# TYPE consul_rpc_query counter +consul_rpc_query 2559 +# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection. +# TYPE consul_rpc_raft_handoff counter +consul_rpc_raft_handoff 2 +# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request. +# TYPE consul_rpc_request counter +consul_rpc_request 159034 +# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request. +# TYPE consul_rpc_request_error counter +consul_rpc_request_error 0 +# HELP consul_runtime_alloc_bytes consul_runtime_alloc_bytes +# TYPE consul_runtime_alloc_bytes gauge +consul_runtime_alloc_bytes 5.3065368e+07 +# HELP consul_runtime_free_count consul_runtime_free_count +# TYPE consul_runtime_free_count gauge +consul_runtime_free_count 7.33623104e+08 +# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns +# TYPE consul_runtime_gc_pause_ns summary +consul_runtime_gc_pause_ns{quantile="0.5"} NaN +consul_runtime_gc_pause_ns{quantile="0.9"} NaN +consul_runtime_gc_pause_ns{quantile="0.99"} NaN +consul_runtime_gc_pause_ns_sum 1.372001275e+09 +consul_runtime_gc_pause_ns_count 3761 +# HELP consul_runtime_heap_objects consul_runtime_heap_objects +# TYPE consul_runtime_heap_objects gauge +consul_runtime_heap_objects 227577 +# HELP consul_runtime_malloc_count consul_runtime_malloc_count +# TYPE consul_runtime_malloc_count gauge +consul_runtime_malloc_count 7.33850688e+08 +# HELP consul_runtime_num_goroutines consul_runtime_num_goroutines +# TYPE consul_runtime_num_goroutines gauge +consul_runtime_num_goroutines 132 +# HELP consul_runtime_sys_bytes consul_runtime_sys_bytes +# TYPE consul_runtime_sys_bytes gauge +consul_runtime_sys_bytes 8.495516e+07 +# HELP consul_runtime_total_gc_pause_ns consul_runtime_total_gc_pause_ns +# TYPE consul_runtime_total_gc_pause_ns gauge +consul_runtime_total_gc_pause_ns 1.37200128e+09 +# HELP consul_runtime_total_gc_runs consul_runtime_total_gc_runs +# TYPE consul_runtime_total_gc_runs gauge +consul_runtime_total_gc_runs 3761 +# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms +# TYPE consul_serf_coordinate_adjustment_ms summary +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 0.1953909993171692 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 0.2344750016927719 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 0.2344750016927719 +consul_serf_coordinate_adjustment_ms_sum{network="lan"} 20281.621190846952 +consul_serf_coordinate_adjustment_ms_count{network="lan"} 94283 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.5"} 0.19766099750995636 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.9"} 0.20183999836444855 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.99"} 0.20183999836444855 +consul_serf_coordinate_adjustment_ms_sum{network="wan"} 4030.8057950612783 +consul_serf_coordinate_adjustment_ms_count{network="wan"} 18817 +# HELP consul_serf_queue_Event consul_serf_queue_Event +# TYPE consul_serf_queue_Event summary +consul_serf_queue_Event{network="lan",quantile="0.5"} NaN +consul_serf_queue_Event{network="lan",quantile="0.9"} NaN +consul_serf_queue_Event{network="lan",quantile="0.99"} NaN +consul_serf_queue_Event_sum{network="lan"} 0 +consul_serf_queue_Event_count{network="lan"} 3313 +consul_serf_queue_Event{network="wan",quantile="0.5"} NaN +consul_serf_queue_Event{network="wan",quantile="0.9"} NaN +consul_serf_queue_Event{network="wan",quantile="0.99"} NaN +consul_serf_queue_Event_sum{network="wan"} 0 +consul_serf_queue_Event_count{network="wan"} 3313 +# HELP consul_serf_queue_Intent consul_serf_queue_Intent +# TYPE consul_serf_queue_Intent summary +consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN +consul_serf_queue_Intent_sum{network="lan"} 0 +consul_serf_queue_Intent_count{network="lan"} 3313 +consul_serf_queue_Intent{network="wan",quantile="0.5"} NaN +consul_serf_queue_Intent{network="wan",quantile="0.9"} NaN +consul_serf_queue_Intent{network="wan",quantile="0.99"} NaN +consul_serf_queue_Intent_sum{network="wan"} 0 +consul_serf_queue_Intent_count{network="wan"} 3313 +# HELP consul_serf_queue_Query consul_serf_queue_Query +# TYPE consul_serf_queue_Query summary +consul_serf_queue_Query{network="lan",quantile="0.5"} NaN +consul_serf_queue_Query{network="lan",quantile="0.9"} NaN +consul_serf_queue_Query{network="lan",quantile="0.99"} NaN +consul_serf_queue_Query_sum{network="lan"} 0 +consul_serf_queue_Query_count{network="lan"} 3313 +consul_serf_queue_Query{network="wan",quantile="0.5"} NaN +consul_serf_queue_Query{network="wan",quantile="0.9"} NaN +consul_serf_queue_Query{network="wan",quantile="0.99"} NaN +consul_serf_queue_Query_sum{network="wan"} 0 +consul_serf_queue_Query_count{network="wan"} 3313 +# HELP consul_server_isLeader Tracks if the server is a leader. +# TYPE consul_server_isLeader gauge +consul_server_isLeader 1 +# HELP consul_session_apply Measures the time spent applying a session update. +# TYPE consul_session_apply summary +consul_session_apply{quantile="0.5"} NaN +consul_session_apply{quantile="0.9"} NaN +consul_session_apply{quantile="0.99"} NaN +consul_session_apply_sum 0 +consul_session_apply_count 0 +# HELP consul_session_renew Measures the time spent renewing a session. +# TYPE consul_session_renew summary +consul_session_renew{quantile="0.5"} NaN +consul_session_renew{quantile="0.9"} NaN +consul_session_renew{quantile="0.99"} NaN +consul_session_renew_sum 0 +consul_session_renew_count 0 +# HELP consul_session_ttl_active Tracks the active number of sessions being tracked. +# TYPE consul_session_ttl_active gauge +consul_session_ttl_active 0 +# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session. +# TYPE consul_session_ttl_invalidate summary +consul_session_ttl_invalidate{quantile="0.5"} NaN +consul_session_ttl_invalidate{quantile="0.9"} NaN +consul_session_ttl_invalidate{quantile="0.99"} NaN +consul_session_ttl_invalidate_sum 0 +consul_session_ttl_invalidate_count 0 +# HELP consul_txn_apply Measures the time spent applying a transaction operation. +# TYPE consul_txn_apply summary +consul_txn_apply{quantile="0.5"} NaN +consul_txn_apply{quantile="0.9"} NaN +consul_txn_apply{quantile="0.99"} NaN +consul_txn_apply_sum 0 +consul_txn_apply_count 0 +# HELP consul_txn_read Measures the time spent returning a read transaction. +# TYPE consul_txn_read summary +consul_txn_read{quantile="0.5"} NaN +consul_txn_read{quantile="0.9"} NaN +consul_txn_read{quantile="0.99"} NaN +consul_txn_read_sum 0 +consul_txn_read_count 0 +# HELP consul_version Represents the Consul version. +# TYPE consul_version gauge +consul_version 0 +# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version. +# TYPE consul_xds_server_streams gauge +consul_xds_server_streams 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.9921e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010804 +go_gc_duration_seconds{quantile="0.5"} 0.00016214 +go_gc_duration_seconds{quantile="0.75"} 0.000549655 +go_gc_duration_seconds{quantile="1"} 0.003364656 +go_gc_duration_seconds_sum 1.3724735909999999 +go_gc_duration_seconds_count 3762 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 137 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.18.1"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.6647944e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 9.1199946672e+10 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 2.497531e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 7.33814669e+08 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 4.2228338057402265e-05 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 8.927624e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 2.6647944e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 3.3161216e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 3.620864e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 49363 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 9.936896e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.9369856e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.6713685789559276e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 7.33864032e+08 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 413168 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 620160 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 5.3447888e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.591077e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.933312e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.933312e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 8.495516e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 15 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 3612.93 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1024 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 35 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.53645056e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.67126917263e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 9.18421504e+08 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes -1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt new file mode 100644 index 00000000000000..a5df1d586d2dde --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt @@ -0,0 +1,1509 @@ +# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token. +# TYPE consul_acl_ResolveToken summary +consul_acl_ResolveToken{quantile="0.5"} NaN +consul_acl_ResolveToken{quantile="0.9"} NaN +consul_acl_ResolveToken{quantile="0.99"} NaN +consul_acl_ResolveToken_sum 0 +consul_acl_ResolveToken_count 0 +# HELP consul_acl_authmethod_delete +# TYPE consul_acl_authmethod_delete summary +consul_acl_authmethod_delete{quantile="0.5"} NaN +consul_acl_authmethod_delete{quantile="0.9"} NaN +consul_acl_authmethod_delete{quantile="0.99"} NaN +consul_acl_authmethod_delete_sum 0 +consul_acl_authmethod_delete_count 0 +# HELP consul_acl_authmethod_upsert +# TYPE consul_acl_authmethod_upsert summary +consul_acl_authmethod_upsert{quantile="0.5"} NaN +consul_acl_authmethod_upsert{quantile="0.9"} NaN +consul_acl_authmethod_upsert{quantile="0.99"} NaN +consul_acl_authmethod_upsert_sum 0 +consul_acl_authmethod_upsert_count 0 +# HELP consul_acl_bindingrule_delete +# TYPE consul_acl_bindingrule_delete summary +consul_acl_bindingrule_delete{quantile="0.5"} NaN +consul_acl_bindingrule_delete{quantile="0.9"} NaN +consul_acl_bindingrule_delete{quantile="0.99"} NaN +consul_acl_bindingrule_delete_sum 0 +consul_acl_bindingrule_delete_count 0 +# HELP consul_acl_bindingrule_upsert +# TYPE consul_acl_bindingrule_upsert summary +consul_acl_bindingrule_upsert{quantile="0.5"} NaN +consul_acl_bindingrule_upsert{quantile="0.9"} NaN +consul_acl_bindingrule_upsert{quantile="0.99"} NaN +consul_acl_bindingrule_upsert_sum 0 +consul_acl_bindingrule_upsert_count 0 +# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_deregistration counter +consul_acl_blocked_check_deregistration 0 +# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_registration counter +consul_acl_blocked_check_registration 0 +# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL) +# TYPE consul_acl_blocked_node_registration counter +consul_acl_blocked_node_registration 0 +# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_deregistration counter +consul_acl_blocked_service_deregistration 0 +# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_registration counter +consul_acl_blocked_service_registration 0 +# HELP consul_acl_login +# TYPE consul_acl_login summary +consul_acl_login{quantile="0.5"} NaN +consul_acl_login{quantile="0.9"} NaN +consul_acl_login{quantile="0.99"} NaN +consul_acl_login_sum 0 +consul_acl_login_count 0 +# HELP consul_acl_logout +# TYPE consul_acl_logout summary +consul_acl_logout{quantile="0.5"} NaN +consul_acl_logout{quantile="0.9"} NaN +consul_acl_logout{quantile="0.99"} NaN +consul_acl_logout_sum 0 +consul_acl_logout_count 0 +# HELP consul_acl_policy_delete +# TYPE consul_acl_policy_delete summary +consul_acl_policy_delete{quantile="0.5"} NaN +consul_acl_policy_delete{quantile="0.9"} NaN +consul_acl_policy_delete{quantile="0.99"} NaN +consul_acl_policy_delete_sum 0 +consul_acl_policy_delete_count 0 +# HELP consul_acl_policy_upsert +# TYPE consul_acl_policy_upsert summary +consul_acl_policy_upsert{quantile="0.5"} NaN +consul_acl_policy_upsert{quantile="0.9"} NaN +consul_acl_policy_upsert{quantile="0.99"} NaN +consul_acl_policy_upsert_sum 0 +consul_acl_policy_upsert_count 0 +# HELP consul_acl_role_delete +# TYPE consul_acl_role_delete summary +consul_acl_role_delete{quantile="0.5"} NaN +consul_acl_role_delete{quantile="0.9"} NaN +consul_acl_role_delete{quantile="0.99"} NaN +consul_acl_role_delete_sum 0 +consul_acl_role_delete_count 0 +# HELP consul_acl_role_upsert +# TYPE consul_acl_role_upsert summary +consul_acl_role_upsert{quantile="0.5"} NaN +consul_acl_role_upsert{quantile="0.9"} NaN +consul_acl_role_upsert{quantile="0.99"} NaN +consul_acl_role_upsert_sum 0 +consul_acl_role_upsert_count 0 +# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_hit counter +consul_acl_token_cache_hit 0 +# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_miss counter +consul_acl_token_cache_miss 0 +# HELP consul_acl_token_clone +# TYPE consul_acl_token_clone summary +consul_acl_token_clone{quantile="0.5"} NaN +consul_acl_token_clone{quantile="0.9"} NaN +consul_acl_token_clone{quantile="0.99"} NaN +consul_acl_token_clone_sum 0 +consul_acl_token_clone_count 0 +# HELP consul_acl_token_delete +# TYPE consul_acl_token_delete summary +consul_acl_token_delete{quantile="0.5"} NaN +consul_acl_token_delete{quantile="0.9"} NaN +consul_acl_token_delete{quantile="0.99"} NaN +consul_acl_token_delete_sum 0 +consul_acl_token_delete_count 0 +# HELP consul_acl_token_upsert +# TYPE consul_acl_token_upsert summary +consul_acl_token_upsert{quantile="0.5"} NaN +consul_acl_token_upsert{quantile="0.9"} NaN +consul_acl_token_upsert{quantile="0.99"} NaN +consul_acl_token_upsert_sum 0 +consul_acl_token_upsert_count 0 +# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour +# TYPE consul_agent_tls_cert_expiry gauge +consul_agent_tls_cert_expiry 0 +# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path. +# TYPE consul_api_http summary +consul_api_http{quantile="0.5"} NaN +consul_api_http{quantile="0.9"} NaN +consul_api_http{quantile="0.99"} NaN +consul_api_http_sum 0 +consul_api_http_count 0 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.10910899937152863 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.2985079884529114 +consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 0.2985079884529114 +consul_api_http_sum{method="GET",path="v1_agent_checks"} 85.87442895025015 +consul_api_http_count{method="GET",path="v1_agent_checks"} 588 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.5271260142326355 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 1.2289390563964844 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 1.2289390563964844 +consul_api_http_sum{method="GET",path="v1_agent_metrics"} 409.6580027639866 +consul_api_http_count{method="GET",path="v1_agent_metrics"} 592 +# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function. +# TYPE consul_autopilot_failure_tolerance gauge +consul_autopilot_failure_tolerance 0 +# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy. +# TYPE consul_autopilot_healthy gauge +consul_autopilot_healthy 0 +# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found. +# TYPE consul_catalog_connect_not_found counter +consul_catalog_connect_not_found 0 +# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service. +# TYPE consul_catalog_connect_query counter +consul_catalog_connect_query 0 +# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag. +# TYPE consul_catalog_connect_query_tag counter +consul_catalog_connect_query_tag 0 +# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags. +# TYPE consul_catalog_connect_query_tags counter +consul_catalog_connect_query_tags 0 +# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation. +# TYPE consul_catalog_deregister summary +consul_catalog_deregister{quantile="0.5"} NaN +consul_catalog_deregister{quantile="0.9"} NaN +consul_catalog_deregister{quantile="0.99"} NaN +consul_catalog_deregister_sum 0 +consul_catalog_deregister_count 0 +# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation. +# TYPE consul_catalog_register summary +consul_catalog_register{quantile="0.5"} NaN +consul_catalog_register{quantile="0.9"} NaN +consul_catalog_register{quantile="0.99"} NaN +consul_catalog_register_sum 45.98546886444092 +consul_catalog_register_count 2 +# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found. +# TYPE consul_catalog_service_not_found counter +consul_catalog_service_not_found 0 +# HELP consul_catalog_service_query Increments for each catalog query for the given service. +# TYPE consul_catalog_service_query counter +consul_catalog_service_query 0 +# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag. +# TYPE consul_catalog_service_query_tag counter +consul_catalog_service_query_tag 0 +# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags. +# TYPE consul_catalog_service_query_tags counter +consul_catalog_service_query_tags 0 +# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog. +# TYPE consul_client_api_catalog_datacenters counter +consul_client_api_catalog_datacenters 0 +# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request. +# TYPE consul_client_api_catalog_deregister counter +consul_client_api_catalog_deregister 0 +# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway. +# TYPE consul_client_api_catalog_gateway_services counter +consul_client_api_catalog_gateway_services 0 +# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services. +# TYPE consul_client_api_catalog_node_service_list counter +consul_client_api_catalog_node_service_list 0 +# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_catalog_node_services counter +consul_client_api_catalog_node_services 0 +# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog. +# TYPE consul_client_api_catalog_nodes counter +consul_client_api_catalog_nodes 0 +# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request. +# TYPE consul_client_api_catalog_register counter +consul_client_api_catalog_register 0 +# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service. +# TYPE consul_client_api_catalog_service_nodes counter +consul_client_api_catalog_service_nodes 0 +# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog. +# TYPE consul_client_api_catalog_services counter +consul_client_api_catalog_services 0 +# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. +# TYPE consul_client_api_error_catalog_service_nodes counter +consul_client_api_error_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters. +# TYPE consul_client_api_success_catalog_datacenters counter +consul_client_api_success_catalog_datacenters 0 +# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request. +# TYPE consul_client_api_success_catalog_deregister counter +consul_client_api_success_catalog_deregister 0 +# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. +# TYPE consul_client_api_success_catalog_gateway_services counter +consul_client_api_success_catalog_gateway_services 0 +# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services. +# TYPE consul_client_api_success_catalog_node_service_list counter +consul_client_api_success_catalog_node_service_list 0 +# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node. +# TYPE consul_client_api_success_catalog_node_services counter +consul_client_api_success_catalog_node_services 0 +# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes. +# TYPE consul_client_api_success_catalog_nodes counter +consul_client_api_success_catalog_nodes 0 +# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request. +# TYPE consul_client_api_success_catalog_register counter +consul_client_api_success_catalog_register 0 +# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_success_catalog_service_nodes counter +consul_client_api_success_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services. +# TYPE consul_client_api_success_catalog_services counter +consul_client_api_success_catalog_services 0 +# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. +# TYPE consul_client_rpc counter +consul_client_rpc 46 +# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters. +# TYPE consul_client_rpc_error_catalog_datacenters counter +consul_client_rpc_error_catalog_datacenters 0 +# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request. +# TYPE consul_client_rpc_error_catalog_deregister counter +consul_client_rpc_error_catalog_deregister 0 +# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. +# TYPE consul_client_rpc_error_catalog_gateway_services counter +consul_client_rpc_error_catalog_gateway_services 0 +# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. +# TYPE consul_client_rpc_error_catalog_node_service_list counter +consul_client_rpc_error_catalog_node_service_list 0 +# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node. +# TYPE consul_client_rpc_error_catalog_node_services counter +consul_client_rpc_error_catalog_node_services 0 +# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes. +# TYPE consul_client_rpc_error_catalog_nodes counter +consul_client_rpc_error_catalog_nodes 0 +# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request. +# TYPE consul_client_rpc_error_catalog_register counter +consul_client_rpc_error_catalog_register 0 +# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service. +# TYPE consul_client_rpc_error_catalog_service_nodes counter +consul_client_rpc_error_catalog_service_nodes 0 +# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services. +# TYPE consul_client_rpc_error_catalog_services counter +consul_client_rpc_error_catalog_services 0 +# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration. +# TYPE consul_client_rpc_exceeded counter +consul_client_rpc_exceeded 0 +# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. +# TYPE consul_client_rpc_failed counter +consul_client_rpc_failed 0 +# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided. +# TYPE consul_consul_cache_bypass counter +consul_consul_cache_bypass 0 +# HELP consul_consul_cache_entries_count Represents the number of entries in this cache. +# TYPE consul_consul_cache_entries_count gauge +consul_consul_cache_entries_count 0 +# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted. +# TYPE consul_consul_cache_evict_expired counter +consul_consul_cache_evict_expired 0 +# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache. +# TYPE consul_consul_cache_fetch_error counter +consul_consul_cache_fetch_error 0 +# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache. +# TYPE consul_consul_cache_fetch_success counter +consul_consul_cache_fetch_success 0 +# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead +# TYPE consul_consul_fsm_ca summary +consul_consul_fsm_ca{quantile="0.5"} NaN +consul_consul_fsm_ca{quantile="0.9"} NaN +consul_consul_fsm_ca{quantile="0.99"} NaN +consul_consul_fsm_ca_sum 0 +consul_consul_fsm_ca_count 0 +# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead +# TYPE consul_consul_fsm_intention summary +consul_consul_fsm_intention{quantile="0.5"} NaN +consul_consul_fsm_intention{quantile="0.9"} NaN +consul_consul_fsm_intention{quantile="0.99"} NaN +consul_consul_fsm_intention_sum 0 +consul_consul_fsm_intention_count 0 +# HELP consul_consul_intention_apply +# TYPE consul_consul_intention_apply summary +consul_consul_intention_apply{quantile="0.5"} NaN +consul_consul_intention_apply{quantile="0.9"} NaN +consul_consul_intention_apply{quantile="0.99"} NaN +consul_consul_intention_apply_sum 0 +consul_consul_intention_apply_count 0 +# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_clients gauge +consul_consul_members_clients 0 +# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_consul_members_servers gauge +consul_consul_members_servers 0 +# HELP consul_consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peering" and, for enterprise, "partition". We emit this metric every 9 seconds +# TYPE consul_consul_peering_exported_services gauge +consul_consul_peering_exported_services 0 +# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_config_entries gauge +consul_consul_state_config_entries 0 +# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_consul_state_connect_instances gauge +consul_consul_state_connect_instances 0 +# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. +# TYPE consul_consul_state_kv_entries gauge +consul_consul_state_kv_entries 0 +# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_nodes gauge +consul_consul_state_nodes 0 +# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. +# TYPE consul_consul_state_peerings gauge +consul_consul_state_peerings 0 +# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_service_instances gauge +consul_consul_state_service_instances 0 +# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_consul_state_services gauge +consul_consul_state_services 0 +# HELP consul_federation_state_apply +# TYPE consul_federation_state_apply summary +consul_federation_state_apply{quantile="0.5"} NaN +consul_federation_state_apply{quantile="0.9"} NaN +consul_federation_state_apply{quantile="0.99"} NaN +consul_federation_state_apply_sum 0 +consul_federation_state_apply_count 0 +# HELP consul_federation_state_get +# TYPE consul_federation_state_get summary +consul_federation_state_get{quantile="0.5"} NaN +consul_federation_state_get{quantile="0.9"} NaN +consul_federation_state_get{quantile="0.99"} NaN +consul_federation_state_get_sum 0 +consul_federation_state_get_count 0 +# HELP consul_federation_state_list +# TYPE consul_federation_state_list summary +consul_federation_state_list{quantile="0.5"} NaN +consul_federation_state_list{quantile="0.9"} NaN +consul_federation_state_list{quantile="0.99"} NaN +consul_federation_state_list_sum 0 +consul_federation_state_list_count 0 +# HELP consul_federation_state_list_mesh_gateways +# TYPE consul_federation_state_list_mesh_gateways summary +consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN +consul_federation_state_list_mesh_gateways_sum 0 +consul_federation_state_list_mesh_gateways_count 0 +# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM. +# TYPE consul_fsm_acl summary +consul_fsm_acl{quantile="0.5"} NaN +consul_fsm_acl{quantile="0.9"} NaN +consul_fsm_acl{quantile="0.99"} NaN +consul_fsm_acl_sum 0 +consul_fsm_acl_count 0 +# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM. +# TYPE consul_fsm_acl_authmethod summary +consul_fsm_acl_authmethod{quantile="0.5"} NaN +consul_fsm_acl_authmethod{quantile="0.9"} NaN +consul_fsm_acl_authmethod{quantile="0.99"} NaN +consul_fsm_acl_authmethod_sum 0 +consul_fsm_acl_authmethod_count 0 +# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM. +# TYPE consul_fsm_acl_bindingrule summary +consul_fsm_acl_bindingrule{quantile="0.5"} NaN +consul_fsm_acl_bindingrule{quantile="0.9"} NaN +consul_fsm_acl_bindingrule{quantile="0.99"} NaN +consul_fsm_acl_bindingrule_sum 0 +consul_fsm_acl_bindingrule_count 0 +# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM. +# TYPE consul_fsm_acl_policy summary +consul_fsm_acl_policy{quantile="0.5"} NaN +consul_fsm_acl_policy{quantile="0.9"} NaN +consul_fsm_acl_policy{quantile="0.99"} NaN +consul_fsm_acl_policy_sum 0 +consul_fsm_acl_policy_count 0 +# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM. +# TYPE consul_fsm_acl_token summary +consul_fsm_acl_token{quantile="0.5"} NaN +consul_fsm_acl_token{quantile="0.9"} NaN +consul_fsm_acl_token{quantile="0.99"} NaN +consul_fsm_acl_token_sum 0 +consul_fsm_acl_token_count 0 +# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM. +# TYPE consul_fsm_autopilot summary +consul_fsm_autopilot{quantile="0.5"} NaN +consul_fsm_autopilot{quantile="0.9"} NaN +consul_fsm_autopilot{quantile="0.99"} NaN +consul_fsm_autopilot_sum 0 +consul_fsm_autopilot_count 0 +# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM. +# TYPE consul_fsm_ca summary +consul_fsm_ca{quantile="0.5"} NaN +consul_fsm_ca{quantile="0.9"} NaN +consul_fsm_ca{quantile="0.99"} NaN +consul_fsm_ca_sum 0 +consul_fsm_ca_count 0 +# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate. +# TYPE consul_fsm_ca_leaf summary +consul_fsm_ca_leaf{quantile="0.5"} NaN +consul_fsm_ca_leaf{quantile="0.9"} NaN +consul_fsm_ca_leaf{quantile="0.99"} NaN +consul_fsm_ca_leaf_sum 0 +consul_fsm_ca_leaf_count 0 +# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM. +# TYPE consul_fsm_coordinate_batch_update summary +consul_fsm_coordinate_batch_update{quantile="0.5"} 0.10997900366783142 +consul_fsm_coordinate_batch_update{quantile="0.9"} 0.10997900366783142 +consul_fsm_coordinate_batch_update{quantile="0.99"} 0.10997900366783142 +consul_fsm_coordinate_batch_update_sum 240.22869294136763 +consul_fsm_coordinate_batch_update_count 11035 +# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM. +# TYPE consul_fsm_deregister summary +consul_fsm_deregister{quantile="0.5"} NaN +consul_fsm_deregister{quantile="0.9"} NaN +consul_fsm_deregister{quantile="0.99"} NaN +consul_fsm_deregister_sum 1.4027419984340668 +consul_fsm_deregister_count 3 +# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM. +# TYPE consul_fsm_intention summary +consul_fsm_intention{quantile="0.5"} NaN +consul_fsm_intention{quantile="0.9"} NaN +consul_fsm_intention{quantile="0.99"} NaN +consul_fsm_intention_sum 0 +consul_fsm_intention_count 0 +# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM. +# TYPE consul_fsm_kvs summary +consul_fsm_kvs{quantile="0.5"} NaN +consul_fsm_kvs{quantile="0.9"} NaN +consul_fsm_kvs{quantile="0.99"} NaN +consul_fsm_kvs_sum 0 +consul_fsm_kvs_count 0 +# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM. +# TYPE consul_fsm_peering summary +consul_fsm_peering{quantile="0.5"} NaN +consul_fsm_peering{quantile="0.9"} NaN +consul_fsm_peering{quantile="0.99"} NaN +consul_fsm_peering_sum 0 +consul_fsm_peering_count 0 +# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot. +# TYPE consul_fsm_persist summary +consul_fsm_persist{quantile="0.5"} NaN +consul_fsm_persist{quantile="0.9"} NaN +consul_fsm_persist{quantile="0.99"} NaN +consul_fsm_persist_sum 0 +consul_fsm_persist_count 0 +# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM. +# TYPE consul_fsm_prepared_query summary +consul_fsm_prepared_query{quantile="0.5"} NaN +consul_fsm_prepared_query{quantile="0.9"} NaN +consul_fsm_prepared_query{quantile="0.99"} NaN +consul_fsm_prepared_query_sum 0 +consul_fsm_prepared_query_count 0 +# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM. +# TYPE consul_fsm_register summary +consul_fsm_register{quantile="0.5"} NaN +consul_fsm_register{quantile="0.9"} NaN +consul_fsm_register{quantile="0.99"} NaN +consul_fsm_register_sum 20.184059869498014 +consul_fsm_register_count 210 +# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM. +# TYPE consul_fsm_session summary +consul_fsm_session{quantile="0.5"} NaN +consul_fsm_session{quantile="0.9"} NaN +consul_fsm_session{quantile="0.99"} NaN +consul_fsm_session_sum 0 +consul_fsm_session_count 0 +# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM. +# TYPE consul_fsm_system_metadata summary +consul_fsm_system_metadata{quantile="0.5"} NaN +consul_fsm_system_metadata{quantile="0.9"} NaN +consul_fsm_system_metadata{quantile="0.99"} NaN +consul_fsm_system_metadata_sum 0 +consul_fsm_system_metadata_count 0 +# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM. +# TYPE consul_fsm_tombstone summary +consul_fsm_tombstone{quantile="0.5"} NaN +consul_fsm_tombstone{quantile="0.9"} NaN +consul_fsm_tombstone{quantile="0.99"} NaN +consul_fsm_tombstone_sum 0 +consul_fsm_tombstone_count 0 +# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM. +# TYPE consul_fsm_txn summary +consul_fsm_txn{quantile="0.5"} NaN +consul_fsm_txn{quantile="0.9"} NaN +consul_fsm_txn{quantile="0.99"} NaN +consul_fsm_txn_sum 0 +consul_fsm_txn_count 0 +# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server. +# TYPE consul_grpc_client_connection_count counter +consul_grpc_client_connection_count 9 +# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers. +# TYPE consul_grpc_client_connections gauge +consul_grpc_client_connections 0 +# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server. +# TYPE consul_grpc_client_request_count counter +consul_grpc_client_request_count 0 +# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server. +# TYPE consul_grpc_server_connection_count counter +consul_grpc_server_connection_count 6 +# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server. +# TYPE consul_grpc_server_connections gauge +consul_grpc_server_connections 0 +# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server. +# TYPE consul_grpc_server_request_count counter +consul_grpc_server_request_count 0 +# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server. +# TYPE consul_grpc_server_stream_count counter +consul_grpc_server_stream_count 0 +# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server. +# TYPE consul_grpc_server_streams gauge +consul_grpc_server_streams 0 +# HELP consul_intention_apply +# TYPE consul_intention_apply summary +consul_intention_apply{quantile="0.5"} NaN +consul_intention_apply{quantile="0.9"} NaN +consul_intention_apply{quantile="0.99"} NaN +consul_intention_apply_sum 0 +consul_intention_apply_count 0 +# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store. +# TYPE consul_kvs_apply summary +consul_kvs_apply{quantile="0.5"} NaN +consul_kvs_apply{quantile="0.9"} NaN +consul_kvs_apply{quantile="0.99"} NaN +consul_kvs_apply_sum 0 +consul_kvs_apply_count 0 +# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership. +# TYPE consul_leader_barrier summary +consul_leader_barrier{quantile="0.5"} NaN +consul_leader_barrier{quantile="0.9"} NaN +consul_leader_barrier{quantile="0.99"} NaN +consul_leader_barrier_sum 168.71699333190918 +consul_leader_barrier_count 8 +# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones. +# TYPE consul_leader_reapTombstones summary +consul_leader_reapTombstones{quantile="0.5"} NaN +consul_leader_reapTombstones{quantile="0.9"} NaN +consul_leader_reapTombstones{quantile="0.99"} NaN +consul_leader_reapTombstones_sum 0 +consul_leader_reapTombstones_count 0 +# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information. +# TYPE consul_leader_reconcile summary +consul_leader_reconcile{quantile="0.5"} NaN +consul_leader_reconcile{quantile="0.9"} NaN +consul_leader_reconcile{quantile="0.99"} NaN +consul_leader_reconcile_sum 2.5833420008420944 +consul_leader_reconcile_count 8 +# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information. +# TYPE consul_leader_reconcileMember summary +consul_leader_reconcileMember{quantile="0.5"} NaN +consul_leader_reconcileMember{quantile="0.9"} NaN +consul_leader_reconcileMember{quantile="0.99"} NaN +consul_leader_reconcileMember_sum 60.76006331667304 +consul_leader_reconcileMember_count 26 +# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_policies_index gauge +consul_leader_replication_acl_policies_index 0 +# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader +# TYPE consul_leader_replication_acl_policies_status gauge +consul_leader_replication_acl_policies_status 0 +# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_roles_index gauge +consul_leader_replication_acl_roles_index 0 +# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader +# TYPE consul_leader_replication_acl_roles_status gauge +consul_leader_replication_acl_roles_status 0 +# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_tokens_index gauge +consul_leader_replication_acl_tokens_index 0 +# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader +# TYPE consul_leader_replication_acl_tokens_status gauge +consul_leader_replication_acl_tokens_status 0 +# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_config_entries_index gauge +consul_leader_replication_config_entries_index 0 +# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader +# TYPE consul_leader_replication_config_entries_status gauge +consul_leader_replication_config_entries_status 0 +# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_federation_state_index gauge +consul_leader_replication_federation_state_index 0 +# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_federation_state_status gauge +consul_leader_replication_federation_state_status 0 +# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_namespaces_index gauge +consul_leader_replication_namespaces_index 0 +# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_namespaces_status gauge +consul_leader_replication_namespaces_status 0 +# HELP consul_memberlist_degraded_probe consul_memberlist_degraded_probe +# TYPE consul_memberlist_degraded_probe counter +consul_memberlist_degraded_probe{network="lan"} 1 +consul_memberlist_degraded_probe{network="wan"} 1 +# HELP consul_memberlist_gossip consul_memberlist_gossip +# TYPE consul_memberlist_gossip summary +consul_memberlist_gossip{network="lan",quantile="0.5"} 0.019190000370144844 +consul_memberlist_gossip{network="lan",quantile="0.9"} 0.04447900131344795 +consul_memberlist_gossip{network="lan",quantile="0.99"} 0.06036800146102905 +consul_memberlist_gossip_sum{network="lan"} 72.94090104475617 +consul_memberlist_gossip_count{network="lan"} 2984 +consul_memberlist_gossip{network="wan",quantile="0.5"} 0.020829999819397926 +consul_memberlist_gossip{network="wan",quantile="0.9"} 0.04980999976396561 +consul_memberlist_gossip{network="wan",quantile="0.99"} 0.06190900132060051 +consul_memberlist_gossip_sum{network="wan"} 31.62462099501863 +consul_memberlist_gossip_count{network="wan"} 1193 +# HELP consul_memberlist_msg_alive consul_memberlist_msg_alive +# TYPE consul_memberlist_msg_alive counter +consul_memberlist_msg_alive{network="lan"} 5 +consul_memberlist_msg_alive{network="wan"} 5 +# HELP consul_memberlist_msg_dead consul_memberlist_msg_dead +# TYPE consul_memberlist_msg_dead counter +consul_memberlist_msg_dead{network="lan"} 2 +consul_memberlist_msg_dead{network="wan"} 2 +# HELP consul_memberlist_probeNode consul_memberlist_probeNode +# TYPE consul_memberlist_probeNode summary +consul_memberlist_probeNode{network="lan",quantile="0.5"} 0.9080119729042053 +consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.071262001991272 +consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.071262001991272 +consul_memberlist_probeNode_sum{network="lan"} 560.697409927845 +consul_memberlist_probeNode_count{network="lan"} 559 +consul_memberlist_probeNode{network="wan",quantile="0.5"} 0.7037429809570312 +consul_memberlist_probeNode{network="wan",quantile="0.9"} 1.0175219774246216 +consul_memberlist_probeNode{network="wan",quantile="0.99"} 1.0175219774246216 +consul_memberlist_probeNode_sum{network="wan"} 133.5382086634636 +consul_memberlist_probeNode_count{network="wan"} 112 +# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode +# TYPE consul_memberlist_pushPullNode summary +consul_memberlist_pushPullNode{network="lan",quantile="0.5"} NaN +consul_memberlist_pushPullNode{network="lan",quantile="0.9"} NaN +consul_memberlist_pushPullNode{network="lan",quantile="0.99"} NaN +consul_memberlist_pushPullNode_sum{network="lan"} 32.9423828125 +consul_memberlist_pushPullNode_count{network="lan"} 23 +consul_memberlist_pushPullNode{network="wan",quantile="0.5"} NaN +consul_memberlist_pushPullNode{network="wan",quantile="0.9"} NaN +consul_memberlist_pushPullNode{network="wan",quantile="0.99"} NaN +consul_memberlist_pushPullNode_sum{network="wan"} 14.840403079986572 +consul_memberlist_pushPullNode_count{network="wan"} 10 +# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept +# TYPE consul_memberlist_tcp_accept counter +consul_memberlist_tcp_accept{network="lan"} 23 +consul_memberlist_tcp_accept{network="wan"} 10 +# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect +# TYPE consul_memberlist_tcp_connect counter +consul_memberlist_tcp_connect{network="lan"} 23 +consul_memberlist_tcp_connect{network="wan"} 10 +# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent +# TYPE consul_memberlist_tcp_sent counter +consul_memberlist_tcp_sent{network="lan"} 39810 +consul_memberlist_tcp_sent{network="wan"} 15776 +# HELP consul_memberlist_udp_received consul_memberlist_udp_received +# TYPE consul_memberlist_udp_received counter +consul_memberlist_udp_received{network="lan"} 168805 +consul_memberlist_udp_received{network="wan"} 42596 +# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent +# TYPE consul_memberlist_udp_sent counter +consul_memberlist_udp_sent{network="lan"} 168852 +consul_memberlist_udp_sent{network="wan"} 41510 +# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour +# TYPE consul_mesh_active_root_ca_expiry gauge +consul_mesh_active_root_ca_expiry 0 +# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour +# TYPE consul_mesh_active_signing_ca_expiry gauge +consul_mesh_active_signing_ca_expiry 0 +# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update. +# TYPE consul_prepared_query_apply summary +consul_prepared_query_apply{quantile="0.5"} NaN +consul_prepared_query_apply{quantile="0.9"} NaN +consul_prepared_query_apply{quantile="0.99"} NaN +consul_prepared_query_apply_sum 0 +consul_prepared_query_apply_count 0 +# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request. +# TYPE consul_prepared_query_execute summary +consul_prepared_query_execute{quantile="0.5"} NaN +consul_prepared_query_execute{quantile="0.9"} NaN +consul_prepared_query_execute{quantile="0.99"} NaN +consul_prepared_query_execute_sum 0 +consul_prepared_query_execute_count 0 +# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. +# TYPE consul_prepared_query_execute_remote summary +consul_prepared_query_execute_remote{quantile="0.5"} NaN +consul_prepared_query_execute_remote{quantile="0.9"} NaN +consul_prepared_query_execute_remote{quantile="0.99"} NaN +consul_prepared_query_execute_remote_sum 0 +consul_prepared_query_execute_remote_count 0 +# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request. +# TYPE consul_prepared_query_explain summary +consul_prepared_query_explain{quantile="0.5"} NaN +consul_prepared_query_explain{quantile="0.9"} NaN +consul_prepared_query_explain{quantile="0.99"} NaN +consul_prepared_query_explain_sum 0 +consul_prepared_query_explain_count 0 +# HELP consul_raft_applied_index Represents the raft applied index. +# TYPE consul_raft_applied_index gauge +consul_raft_applied_index 0 +# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval. +# TYPE consul_raft_apply counter +consul_raft_apply 52 +# HELP consul_raft_barrier consul_raft_barrier +# TYPE consul_raft_barrier counter +consul_raft_barrier 8 +# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog +# TYPE consul_raft_boltdb_getLog summary +consul_raft_boltdb_getLog{quantile="0.5"} 0.04751899838447571 +consul_raft_boltdb_getLog{quantile="0.9"} 0.04751899838447571 +consul_raft_boltdb_getLog{quantile="0.99"} 0.04751899838447571 +consul_raft_boltdb_getLog_sum 177.71392010012642 +consul_raft_boltdb_getLog_count 25884 +# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize +# TYPE consul_raft_boltdb_logBatchSize summary +consul_raft_boltdb_logBatchSize{quantile="0.5"} 414 +consul_raft_boltdb_logBatchSize{quantile="0.9"} 414 +consul_raft_boltdb_logBatchSize{quantile="0.99"} 414 +consul_raft_boltdb_logBatchSize_sum 32278 +consul_raft_boltdb_logBatchSize_count 93 +# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize +# TYPE consul_raft_boltdb_logSize summary +consul_raft_boltdb_logSize{quantile="0.5"} 414 +consul_raft_boltdb_logSize{quantile="0.9"} 414 +consul_raft_boltdb_logSize{quantile="0.99"} 414 +consul_raft_boltdb_logSize_sum 32278 +consul_raft_boltdb_logSize_count 95 +# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch +# TYPE consul_raft_boltdb_logsPerBatch summary +consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1 +consul_raft_boltdb_logsPerBatch_sum 95 +consul_raft_boltdb_logsPerBatch_count 93 +# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs +# TYPE consul_raft_boltdb_storeLogs summary +consul_raft_boltdb_storeLogs{quantile="0.5"} 17.80512237548828 +consul_raft_boltdb_storeLogs{quantile="0.9"} 17.80512237548828 +consul_raft_boltdb_storeLogs{quantile="0.99"} 17.80512237548828 +consul_raft_boltdb_storeLogs_sum 1006.1075472831726 +consul_raft_boltdb_storeLogs_count 93 +# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn +# TYPE consul_raft_boltdb_totalReadTxn counter +consul_raft_boltdb_totalReadTxn 25946 +# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount +# TYPE consul_raft_boltdb_txstats_cursorCount counter +consul_raft_boltdb_txstats_cursorCount 52198 +# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount +# TYPE consul_raft_boltdb_txstats_nodeCount counter +consul_raft_boltdb_txstats_nodeCount 386 +# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref +# TYPE consul_raft_boltdb_txstats_nodeDeref counter +consul_raft_boltdb_txstats_nodeDeref 0 +# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance +# TYPE consul_raft_boltdb_txstats_rebalance counter +consul_raft_boltdb_txstats_rebalance 0 +# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime +# TYPE consul_raft_boltdb_txstats_rebalanceTime summary +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0 +consul_raft_boltdb_txstats_rebalanceTime_sum 0 +consul_raft_boltdb_txstats_rebalanceTime_count 120 +# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill +# TYPE consul_raft_boltdb_txstats_spill counter +consul_raft_boltdb_txstats_spill 398 +# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime +# TYPE consul_raft_boltdb_txstats_spillTime summary +consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0.018939999863505363 +consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.04575999826192856 +consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.04575999826192856 +consul_raft_boltdb_txstats_spillTime_sum 2.559216999274213 +consul_raft_boltdb_txstats_spillTime_count 120 +# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split +# TYPE consul_raft_boltdb_txstats_split counter +consul_raft_boltdb_txstats_split 19 +# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write +# TYPE consul_raft_boltdb_txstats_write counter +consul_raft_boltdb_txstats_write 600 +# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime +# TYPE consul_raft_boltdb_txstats_writeTime summary +consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 17.56859588623047 +consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 17.67194366455078 +consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 17.67194366455078 +consul_raft_boltdb_txstats_writeTime_sum 1048.4321446418762 +consul_raft_boltdb_txstats_writeTime_count 120 +# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity +# TYPE consul_raft_boltdb_writeCapacity summary +consul_raft_boltdb_writeCapacity{quantile="0.5"} 56.34065628051758 +consul_raft_boltdb_writeCapacity{quantile="0.9"} 56.34065628051758 +consul_raft_boltdb_writeCapacity{quantile="0.99"} 56.34065628051758 +consul_raft_boltdb_writeCapacity_sum 11092.64028930664 +consul_raft_boltdb_writeCapacity_count 93 +# HELP consul_raft_candidate_electSelf consul_raft_candidate_electSelf +# TYPE consul_raft_candidate_electSelf summary +consul_raft_candidate_electSelf{quantile="0.5"} NaN +consul_raft_candidate_electSelf{quantile="0.9"} NaN +consul_raft_candidate_electSelf{quantile="0.99"} NaN +consul_raft_candidate_electSelf_sum 64.78176307678223 +consul_raft_candidate_electSelf_count 2 +# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader. +# TYPE consul_raft_commitTime summary +consul_raft_commitTime{quantile="0.5"} 58.47069549560547 +consul_raft_commitTime{quantile="0.9"} 58.47069549560547 +consul_raft_commitTime{quantile="0.99"} 58.47069549560547 +consul_raft_commitTime_sum 1418.8827295303345 +consul_raft_commitTime_count 64 +# HELP consul_raft_fsm_apply consul_raft_fsm_apply +# TYPE consul_raft_fsm_apply summary +consul_raft_fsm_apply{quantile="0.5"} 0.1474989950656891 +consul_raft_fsm_apply{quantile="0.9"} 0.1474989950656891 +consul_raft_fsm_apply{quantile="0.99"} 0.1474989950656891 +consul_raft_fsm_apply_sum 368.55326924845576 +consul_raft_fsm_apply_count 11248 +# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue +# TYPE consul_raft_fsm_enqueue summary +consul_raft_fsm_enqueue{quantile="0.5"} 0.01882000081241131 +consul_raft_fsm_enqueue{quantile="0.9"} 0.01882000081241131 +consul_raft_fsm_enqueue{quantile="0.99"} 0.01882000081241131 +consul_raft_fsm_enqueue_sum 1.6373119996860623 +consul_raft_fsm_enqueue_count 64 +# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took. +# TYPE consul_raft_fsm_lastRestoreDuration gauge +consul_raft_fsm_lastRestoreDuration 0 +# HELP consul_raft_fsm_restore consul_raft_fsm_restore +# TYPE consul_raft_fsm_restore summary +consul_raft_fsm_restore{quantile="0.5"} NaN +consul_raft_fsm_restore{quantile="0.9"} NaN +consul_raft_fsm_restore{quantile="0.99"} NaN +consul_raft_fsm_restore_sum 2.6886210441589355 +consul_raft_fsm_restore_count 1 +# HELP consul_raft_last_index Represents the raft last index. +# TYPE consul_raft_last_index gauge +consul_raft_last_index 0 +# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog +# TYPE consul_raft_leader_dispatchLog summary +consul_raft_leader_dispatchLog{quantile="0.5"} 17.841020584106445 +consul_raft_leader_dispatchLog{quantile="0.9"} 17.841020584106445 +consul_raft_leader_dispatchLog{quantile="0.99"} 17.841020584106445 +consul_raft_leader_dispatchLog_sum 614.3611516952515 +consul_raft_leader_dispatchLog_count 64 +# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. +# TYPE consul_raft_leader_lastContact summary +consul_raft_leader_lastContact{quantile="0.5"} 30 +consul_raft_leader_lastContact{quantile="0.9"} 67 +consul_raft_leader_lastContact{quantile="0.99"} 67 +consul_raft_leader_lastContact_sum 13872 +consul_raft_leader_lastContact_count 364 +# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is. +# TYPE consul_raft_leader_oldestLogAge gauge +consul_raft_leader_oldestLogAge 0 +# HELP consul_raft_net_getRPCType consul_raft_net_getRPCType +# TYPE consul_raft_net_getRPCType summary +consul_raft_net_getRPCType{quantile="0.5"} NaN +consul_raft_net_getRPCType{quantile="0.9"} NaN +consul_raft_net_getRPCType{quantile="0.99"} NaN +consul_raft_net_getRPCType_sum 269090.0442453362 +consul_raft_net_getRPCType_count 2002 +# HELP consul_raft_net_rpcDecode consul_raft_net_rpcDecode +# TYPE consul_raft_net_rpcDecode summary +consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.5"} NaN +consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.9"} NaN +consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.99"} NaN +consul_raft_net_rpcDecode_sum{rpcType="AppendEntries"} 50.56464605871588 +consul_raft_net_rpcDecode_count{rpcType="AppendEntries"} 1811 +consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.5"} NaN +consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.9"} NaN +consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.99"} NaN +consul_raft_net_rpcDecode_sum{rpcType="Heartbeat"} 4.609708994626999 +consul_raft_net_rpcDecode_count{rpcType="Heartbeat"} 189 +consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.5"} NaN +consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.9"} NaN +consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.99"} NaN +consul_raft_net_rpcDecode_sum{rpcType="RequestVote"} 0.052609000355005264 +consul_raft_net_rpcDecode_count{rpcType="RequestVote"} 1 +consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.5"} NaN +consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.9"} NaN +consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.99"} NaN +consul_raft_net_rpcDecode_sum{rpcType="TimeoutNow"} 0.07034999877214432 +consul_raft_net_rpcDecode_count{rpcType="TimeoutNow"} 1 +# HELP consul_raft_net_rpcEnqueue consul_raft_net_rpcEnqueue +# TYPE consul_raft_net_rpcEnqueue summary +consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.5"} NaN +consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.9"} NaN +consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.99"} NaN +consul_raft_net_rpcEnqueue_sum{rpcType="AppendEntries"} 61.944881823379546 +consul_raft_net_rpcEnqueue_count{rpcType="AppendEntries"} 1811 +consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.5"} NaN +consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.9"} NaN +consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.99"} NaN +consul_raft_net_rpcEnqueue_sum{rpcType="Heartbeat"} 4.966151000931859 +consul_raft_net_rpcEnqueue_count{rpcType="Heartbeat"} 189 +consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.5"} NaN +consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.9"} NaN +consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.99"} NaN +consul_raft_net_rpcEnqueue_sum{rpcType="RequestVote"} 0.012551000341773033 +consul_raft_net_rpcEnqueue_count{rpcType="RequestVote"} 1 +consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.5"} NaN +consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.9"} NaN +consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.99"} NaN +consul_raft_net_rpcEnqueue_sum{rpcType="TimeoutNow"} 0.021700000390410423 +consul_raft_net_rpcEnqueue_count{rpcType="TimeoutNow"} 1 +# HELP consul_raft_net_rpcRespond consul_raft_net_rpcRespond +# TYPE consul_raft_net_rpcRespond summary +consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.5"} NaN +consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.9"} NaN +consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.99"} NaN +consul_raft_net_rpcRespond_sum{rpcType="AppendEntries"} 632.5211075674742 +consul_raft_net_rpcRespond_count{rpcType="AppendEntries"} 1811 +consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.5"} NaN +consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.9"} NaN +consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.99"} NaN +consul_raft_net_rpcRespond_sum{rpcType="Heartbeat"} 2.6388960042968392 +consul_raft_net_rpcRespond_count{rpcType="Heartbeat"} 189 +consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.5"} NaN +consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.9"} NaN +consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.99"} NaN +consul_raft_net_rpcRespond_sum{rpcType="RequestVote"} 27.120553970336914 +consul_raft_net_rpcRespond_count{rpcType="RequestVote"} 1 +consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.5"} NaN +consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.9"} NaN +consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.99"} NaN +consul_raft_net_rpcRespond_sum{rpcType="TimeoutNow"} 0.18450799584388733 +consul_raft_net_rpcRespond_count{rpcType="TimeoutNow"} 1 +# HELP consul_raft_replication_appendEntries_logs consul_raft_replication_appendEntries_logs +# TYPE consul_raft_replication_appendEntries_logs counter +consul_raft_replication_appendEntries_logs{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 64 +consul_raft_replication_appendEntries_logs{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 64 +# HELP consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae counter +consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae 64 +# HELP consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 counter +consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 64 +# HELP consul_raft_replication_appendEntries_rpc consul_raft_replication_appendEntries_rpc +# TYPE consul_raft_replication_appendEntries_rpc summary +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.7193149924278259 +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.123671054840088 +consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 2.9677159786224365 +consul_raft_replication_appendEntries_rpc_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 5982.053934007883 +consul_raft_replication_appendEntries_rpc_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 6008 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6742749810218811 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 1.1206400394439697 +consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 4.632521152496338 +consul_raft_replication_appendEntries_rpc_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 5640.875204831362 +consul_raft_replication_appendEntries_rpc_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 6125 +# HELP consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.7773330211639404 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.177711009979248 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 3.0745749473571777 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 6255.161469876766 +consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 6008 +# HELP consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 summary +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.7206940054893494 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.1687090396881104 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 4.6782097816467285 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_sum 5913.810284465551 +consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_count 6125 +# HELP consul_raft_replication_heartbeat consul_raft_replication_heartbeat +# TYPE consul_raft_replication_heartbeat summary +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.6244940161705017 +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.0416409969329834 +consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 1.4274380207061768 +consul_raft_replication_heartbeat_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 488.172178208828 +consul_raft_replication_heartbeat_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 601 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6106240153312683 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9524030089378357 +consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 0.9726319909095764 +consul_raft_replication_heartbeat_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 459.77358454465866 +consul_raft_replication_heartbeat_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 625 +# HELP consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae +# TYPE consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.65802401304245 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.0810810327529907 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 1.4524680376052856 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 509.27614790201187 +consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 601 +# HELP consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 +# TYPE consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 summary +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.6355040073394775 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.000391960144043 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.0161620378494263 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_sum 480.9242581129074 +consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_count 625 +# HELP consul_raft_rpc_appendEntries consul_raft_rpc_appendEntries +# TYPE consul_raft_rpc_appendEntries summary +consul_raft_rpc_appendEntries{quantile="0.5"} NaN +consul_raft_rpc_appendEntries{quantile="0.9"} NaN +consul_raft_rpc_appendEntries{quantile="0.99"} NaN +consul_raft_rpc_appendEntries_sum 573.4200130868703 +consul_raft_rpc_appendEntries_count 2000 +# HELP consul_raft_rpc_appendEntries_processLogs consul_raft_rpc_appendEntries_processLogs +# TYPE consul_raft_rpc_appendEntries_processLogs summary +consul_raft_rpc_appendEntries_processLogs{quantile="0.5"} NaN +consul_raft_rpc_appendEntries_processLogs{quantile="0.9"} NaN +consul_raft_rpc_appendEntries_processLogs{quantile="0.99"} NaN +consul_raft_rpc_appendEntries_processLogs_sum 148.3990723239258 +consul_raft_rpc_appendEntries_processLogs_count 28 +# HELP consul_raft_rpc_appendEntries_storeLogs consul_raft_rpc_appendEntries_storeLogs +# TYPE consul_raft_rpc_appendEntries_storeLogs summary +consul_raft_rpc_appendEntries_storeLogs{quantile="0.5"} NaN +consul_raft_rpc_appendEntries_storeLogs{quantile="0.9"} NaN +consul_raft_rpc_appendEntries_storeLogs{quantile="0.99"} NaN +consul_raft_rpc_appendEntries_storeLogs_sum 395.2212791442871 +consul_raft_rpc_appendEntries_storeLogs_count 29 +# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster. +# TYPE consul_raft_rpc_installSnapshot summary +consul_raft_rpc_installSnapshot{quantile="0.5"} NaN +consul_raft_rpc_installSnapshot{quantile="0.9"} NaN +consul_raft_rpc_installSnapshot{quantile="0.99"} NaN +consul_raft_rpc_installSnapshot_sum 0 +consul_raft_rpc_installSnapshot_count 0 +# HELP consul_raft_rpc_processHeartbeat consul_raft_rpc_processHeartbeat +# TYPE consul_raft_rpc_processHeartbeat summary +consul_raft_rpc_processHeartbeat{quantile="0.5"} NaN +consul_raft_rpc_processHeartbeat{quantile="0.9"} NaN +consul_raft_rpc_processHeartbeat{quantile="0.99"} NaN +consul_raft_rpc_processHeartbeat_sum 3.374873999040574 +consul_raft_rpc_processHeartbeat_count 189 +# HELP consul_raft_rpc_requestVote consul_raft_rpc_requestVote +# TYPE consul_raft_rpc_requestVote summary +consul_raft_rpc_requestVote{quantile="0.5"} NaN +consul_raft_rpc_requestVote{quantile="0.9"} NaN +consul_raft_rpc_requestVote{quantile="0.99"} NaN +consul_raft_rpc_requestVote_sum 27.062883377075195 +consul_raft_rpc_requestVote_count 1 +# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk. +# TYPE consul_raft_snapshot_persist summary +consul_raft_snapshot_persist{quantile="0.5"} NaN +consul_raft_snapshot_persist{quantile="0.9"} NaN +consul_raft_snapshot_persist{quantile="0.99"} NaN +consul_raft_snapshot_persist_sum 0 +consul_raft_snapshot_persist_count 0 +# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election. +# TYPE consul_raft_state_candidate counter +consul_raft_state_candidate 1 +# HELP consul_raft_state_follower consul_raft_state_follower +# TYPE consul_raft_state_follower counter +consul_raft_state_follower 1 +# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader. +# TYPE consul_raft_state_leader counter +consul_raft_state_leader 1 +# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation +# TYPE consul_raft_thread_fsm_saturation summary +consul_raft_thread_fsm_saturation{quantile="0.5"} 0 +consul_raft_thread_fsm_saturation{quantile="0.9"} 0 +consul_raft_thread_fsm_saturation{quantile="0.99"} 0 +consul_raft_thread_fsm_saturation_sum 0.14000000059604645 +consul_raft_thread_fsm_saturation_count 75 +# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation +# TYPE consul_raft_thread_main_saturation summary +consul_raft_thread_main_saturation{quantile="0.5"} 0 +consul_raft_thread_main_saturation{quantile="0.9"} 0.009999999776482582 +consul_raft_thread_main_saturation{quantile="0.99"} 0.009999999776482582 +consul_raft_thread_main_saturation_sum 0.9699999857693911 +consul_raft_thread_main_saturation_count 328 +# HELP consul_raft_verify_leader consul_raft_verify_leader +# TYPE consul_raft_verify_leader counter +consul_raft_verify_leader 6 +# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection. +# TYPE consul_rpc_accept_conn counter +consul_rpc_accept_conn 15 +# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed. +# TYPE consul_rpc_consistentRead summary +consul_rpc_consistentRead{quantile="0.5"} NaN +consul_rpc_consistentRead{quantile="0.9"} NaN +consul_rpc_consistentRead{quantile="0.99"} NaN +consul_rpc_consistentRead_sum 3.1557260155677795 +consul_rpc_consistentRead_count 6 +# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query. +# TYPE consul_rpc_cross_dc counter +consul_rpc_cross_dc 0 +# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling. +# TYPE consul_rpc_queries_blocking gauge +consul_rpc_queries_blocking 0 +# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries. +# TYPE consul_rpc_query counter +consul_rpc_query 19 +# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection. +# TYPE consul_rpc_raft_handoff counter +consul_rpc_raft_handoff 4 +# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request. +# TYPE consul_rpc_request counter +consul_rpc_request 936 +# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request. +# TYPE consul_rpc_request_error counter +consul_rpc_request_error 0 +# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns +# TYPE consul_runtime_gc_pause_ns summary +consul_runtime_gc_pause_ns{quantile="0.5"} NaN +consul_runtime_gc_pause_ns{quantile="0.9"} NaN +consul_runtime_gc_pause_ns{quantile="0.99"} NaN +consul_runtime_gc_pause_ns_sum 1.565053e+07 +consul_runtime_gc_pause_ns_count 42 +# HELP consul_satya_vm_autopilot_failure_tolerance consul_satya_vm_autopilot_failure_tolerance +# TYPE consul_satya_vm_autopilot_failure_tolerance gauge +consul_satya_vm_autopilot_failure_tolerance 1 +# HELP consul_satya_vm_autopilot_healthy consul_satya_vm_autopilot_healthy +# TYPE consul_satya_vm_autopilot_healthy gauge +consul_satya_vm_autopilot_healthy 1 +# HELP consul_satya_vm_consul_members_clients consul_satya_vm_consul_members_clients +# TYPE consul_satya_vm_consul_members_clients gauge +consul_satya_vm_consul_members_clients{datacenter="us-central"} 0 +# HELP consul_satya_vm_consul_members_servers consul_satya_vm_consul_members_servers +# TYPE consul_satya_vm_consul_members_servers gauge +consul_satya_vm_consul_members_servers{datacenter="us-central"} 3 +# HELP consul_satya_vm_consul_state_config_entries consul_satya_vm_consul_state_config_entries +# TYPE consul_satya_vm_consul_state_config_entries gauge +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="exported-services"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="ingress-gateway"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="mesh"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="proxy-defaults"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-defaults"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-intentions"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-resolver"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-router"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-splitter"} 0 +consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="terminating-gateway"} 0 +# HELP consul_satya_vm_consul_state_connect_instances consul_satya_vm_consul_state_connect_instances +# TYPE consul_satya_vm_consul_state_connect_instances gauge +consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="connect-native"} 0 +consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="connect-proxy"} 0 +consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="ingress-gateway"} 0 +consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="mesh-gateway"} 0 +consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="terminating-gateway"} 0 +# HELP consul_satya_vm_consul_state_kv_entries consul_satya_vm_consul_state_kv_entries +# TYPE consul_satya_vm_consul_state_kv_entries gauge +consul_satya_vm_consul_state_kv_entries{datacenter="us-central"} 1 +# HELP consul_satya_vm_consul_state_nodes consul_satya_vm_consul_state_nodes +# TYPE consul_satya_vm_consul_state_nodes gauge +consul_satya_vm_consul_state_nodes{datacenter="us-central"} 3 +# HELP consul_satya_vm_consul_state_peerings consul_satya_vm_consul_state_peerings +# TYPE consul_satya_vm_consul_state_peerings gauge +consul_satya_vm_consul_state_peerings{datacenter="us-central"} 0 +# HELP consul_satya_vm_consul_state_service_instances consul_satya_vm_consul_state_service_instances +# TYPE consul_satya_vm_consul_state_service_instances gauge +consul_satya_vm_consul_state_service_instances{datacenter="us-central"} 4 +# HELP consul_satya_vm_consul_state_services consul_satya_vm_consul_state_services +# TYPE consul_satya_vm_consul_state_services gauge +consul_satya_vm_consul_state_services{datacenter="us-central"} 2 +# HELP consul_satya_vm_grpc_client_connections consul_satya_vm_grpc_client_connections +# TYPE consul_satya_vm_grpc_client_connections gauge +consul_satya_vm_grpc_client_connections 1 +# HELP consul_satya_vm_grpc_server_connections consul_satya_vm_grpc_server_connections +# TYPE consul_satya_vm_grpc_server_connections gauge +consul_satya_vm_grpc_server_connections 0 +# HELP consul_satya_vm_memberlist_health_score consul_satya_vm_memberlist_health_score +# TYPE consul_satya_vm_memberlist_health_score gauge +consul_satya_vm_memberlist_health_score{network="lan"} 0 +consul_satya_vm_memberlist_health_score{network="wan"} 0 +# HELP consul_satya_vm_mesh_active_root_ca_expiry consul_satya_vm_mesh_active_root_ca_expiry +# TYPE consul_satya_vm_mesh_active_root_ca_expiry gauge +consul_satya_vm_mesh_active_root_ca_expiry NaN +# HELP consul_satya_vm_mesh_active_signing_ca_expiry consul_satya_vm_mesh_active_signing_ca_expiry +# TYPE consul_satya_vm_mesh_active_signing_ca_expiry gauge +consul_satya_vm_mesh_active_signing_ca_expiry NaN +# HELP consul_satya_vm_raft_applied_index consul_satya_vm_raft_applied_index +# TYPE consul_satya_vm_raft_applied_index gauge +consul_satya_vm_raft_applied_index 455437 +# HELP consul_satya_vm_raft_boltdb_freePageBytes consul_satya_vm_raft_boltdb_freePageBytes +# TYPE consul_satya_vm_raft_boltdb_freePageBytes gauge +consul_satya_vm_raft_boltdb_freePageBytes 3.960832e+06 +# HELP consul_satya_vm_raft_boltdb_freelistBytes consul_satya_vm_raft_boltdb_freelistBytes +# TYPE consul_satya_vm_raft_boltdb_freelistBytes gauge +consul_satya_vm_raft_boltdb_freelistBytes 7752 +# HELP consul_satya_vm_raft_boltdb_numFreePages consul_satya_vm_raft_boltdb_numFreePages +# TYPE consul_satya_vm_raft_boltdb_numFreePages gauge +consul_satya_vm_raft_boltdb_numFreePages 961 +# HELP consul_satya_vm_raft_boltdb_numPendingPages consul_satya_vm_raft_boltdb_numPendingPages +# TYPE consul_satya_vm_raft_boltdb_numPendingPages gauge +consul_satya_vm_raft_boltdb_numPendingPages 6 +# HELP consul_satya_vm_raft_boltdb_openReadTxn consul_satya_vm_raft_boltdb_openReadTxn +# TYPE consul_satya_vm_raft_boltdb_openReadTxn gauge +consul_satya_vm_raft_boltdb_openReadTxn 0 +# HELP consul_satya_vm_raft_boltdb_txstats_pageAlloc consul_satya_vm_raft_boltdb_txstats_pageAlloc +# TYPE consul_satya_vm_raft_boltdb_txstats_pageAlloc gauge +consul_satya_vm_raft_boltdb_txstats_pageAlloc 2.465792e+06 +# HELP consul_satya_vm_raft_boltdb_txstats_pageCount consul_satya_vm_raft_boltdb_txstats_pageCount +# TYPE consul_satya_vm_raft_boltdb_txstats_pageCount gauge +consul_satya_vm_raft_boltdb_txstats_pageCount 602 +# HELP consul_satya_vm_raft_commitNumLogs consul_satya_vm_raft_commitNumLogs +# TYPE consul_satya_vm_raft_commitNumLogs gauge +consul_satya_vm_raft_commitNumLogs 1 +# HELP consul_satya_vm_raft_fsm_lastRestoreDuration consul_satya_vm_raft_fsm_lastRestoreDuration +# TYPE consul_satya_vm_raft_fsm_lastRestoreDuration gauge +consul_satya_vm_raft_fsm_lastRestoreDuration 2 +# HELP consul_satya_vm_raft_last_index consul_satya_vm_raft_last_index +# TYPE consul_satya_vm_raft_last_index gauge +consul_satya_vm_raft_last_index 455437 +# HELP consul_satya_vm_raft_leader_dispatchNumLogs consul_satya_vm_raft_leader_dispatchNumLogs +# TYPE consul_satya_vm_raft_leader_dispatchNumLogs gauge +consul_satya_vm_raft_leader_dispatchNumLogs 1 +# HELP consul_satya_vm_raft_leader_oldestLogAge consul_satya_vm_raft_leader_oldestLogAge +# TYPE consul_satya_vm_raft_leader_oldestLogAge gauge +consul_satya_vm_raft_leader_oldestLogAge 1.86193632e+08 +# HELP consul_satya_vm_raft_peers consul_satya_vm_raft_peers +# TYPE consul_satya_vm_raft_peers gauge +consul_satya_vm_raft_peers 3 +# HELP consul_satya_vm_rpc_queries_blocking consul_satya_vm_rpc_queries_blocking +# TYPE consul_satya_vm_rpc_queries_blocking gauge +consul_satya_vm_rpc_queries_blocking 1 +# HELP consul_satya_vm_runtime_alloc_bytes consul_satya_vm_runtime_alloc_bytes +# TYPE consul_satya_vm_runtime_alloc_bytes gauge +consul_satya_vm_runtime_alloc_bytes 3.2406104e+07 +# HELP consul_satya_vm_runtime_free_count consul_satya_vm_runtime_free_count +# TYPE consul_satya_vm_runtime_free_count gauge +consul_satya_vm_runtime_free_count 8.260123e+06 +# HELP consul_satya_vm_runtime_heap_objects consul_satya_vm_runtime_heap_objects +# TYPE consul_satya_vm_runtime_heap_objects gauge +consul_satya_vm_runtime_heap_objects 118531 +# HELP consul_satya_vm_runtime_malloc_count consul_satya_vm_runtime_malloc_count +# TYPE consul_satya_vm_runtime_malloc_count gauge +consul_satya_vm_runtime_malloc_count 8.378654e+06 +# HELP consul_satya_vm_runtime_num_goroutines consul_satya_vm_runtime_num_goroutines +# TYPE consul_satya_vm_runtime_num_goroutines gauge +consul_satya_vm_runtime_num_goroutines 123 +# HELP consul_satya_vm_runtime_sys_bytes consul_satya_vm_runtime_sys_bytes +# TYPE consul_satya_vm_runtime_sys_bytes gauge +consul_satya_vm_runtime_sys_bytes 7.3614344e+07 +# HELP consul_satya_vm_runtime_total_gc_pause_ns consul_satya_vm_runtime_total_gc_pause_ns +# TYPE consul_satya_vm_runtime_total_gc_pause_ns gauge +consul_satya_vm_runtime_total_gc_pause_ns 1.565053e+07 +# HELP consul_satya_vm_runtime_total_gc_runs consul_satya_vm_runtime_total_gc_runs +# TYPE consul_satya_vm_runtime_total_gc_runs gauge +consul_satya_vm_runtime_total_gc_runs 42 +# HELP consul_satya_vm_server_isLeader consul_satya_vm_server_isLeader +# TYPE consul_satya_vm_server_isLeader gauge +consul_satya_vm_server_isLeader 1 +# HELP consul_satya_vm_session_ttl_active consul_satya_vm_session_ttl_active +# TYPE consul_satya_vm_session_ttl_active gauge +consul_satya_vm_session_ttl_active 0 +# HELP consul_satya_vm_version consul_satya_vm_version +# TYPE consul_satya_vm_version gauge +consul_satya_vm_version{pre_release="",version="1.13.2"} 1 +# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms +# TYPE consul_serf_coordinate_adjustment_ms summary +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 0.18447500467300415 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 0.23558799922466278 +consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 0.3543170094490051 +consul_serf_coordinate_adjustment_ms_sum{network="lan"} 127.64726796071045 +consul_serf_coordinate_adjustment_ms_count{network="lan"} 559 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.5"} 0.11145199835300446 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.9"} 0.12108899652957916 +consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.99"} 0.12108899652957916 +consul_serf_coordinate_adjustment_ms_sum{network="wan"} 29.19709792546928 +consul_serf_coordinate_adjustment_ms_count{network="wan"} 112 +# HELP consul_serf_events consul_serf_events +# TYPE consul_serf_events counter +consul_serf_events{network="lan"} 2 +# HELP consul_serf_events_consul:new_leader consul_serf_events_consul:new_leader +# TYPE consul_serf_events_consul:new_leader counter +consul_serf_events_consul:new_leader{network="lan"} 2 +# HELP consul_serf_member_join consul_serf_member_join +# TYPE consul_serf_member_join counter +consul_serf_member_join{network="lan"} 5 +consul_serf_member_join{network="wan"} 5 +# HELP consul_serf_member_left consul_serf_member_left +# TYPE consul_serf_member_left counter +consul_serf_member_left{network="lan"} 2 +consul_serf_member_left{network="wan"} 2 +# HELP consul_serf_msgs_received consul_serf_msgs_received +# TYPE consul_serf_msgs_received summary +consul_serf_msgs_received{network="lan",quantile="0.5"} NaN +consul_serf_msgs_received{network="lan",quantile="0.9"} NaN +consul_serf_msgs_received{network="lan",quantile="0.99"} NaN +consul_serf_msgs_received_sum{network="lan"} 1066 +consul_serf_msgs_received_count{network="lan"} 33 +consul_serf_msgs_received{network="wan",quantile="0.5"} NaN +consul_serf_msgs_received{network="wan",quantile="0.9"} NaN +consul_serf_msgs_received{network="wan",quantile="0.99"} NaN +consul_serf_msgs_received_sum{network="wan"} 909 +consul_serf_msgs_received_count{network="wan"} 23 +# HELP consul_serf_msgs_sent consul_serf_msgs_sent +# TYPE consul_serf_msgs_sent summary +consul_serf_msgs_sent{network="lan",quantile="0.5"} NaN +consul_serf_msgs_sent{network="lan",quantile="0.9"} NaN +consul_serf_msgs_sent{network="lan",quantile="0.99"} NaN +consul_serf_msgs_sent_sum{network="lan"} 1204 +consul_serf_msgs_sent_count{network="lan"} 36 +consul_serf_msgs_sent{network="wan",quantile="0.5"} NaN +consul_serf_msgs_sent{network="wan",quantile="0.9"} NaN +consul_serf_msgs_sent{network="wan",quantile="0.99"} NaN +consul_serf_msgs_sent_sum{network="wan"} 792 +consul_serf_msgs_sent_count{network="wan"} 20 +# HELP consul_serf_queue_Event consul_serf_queue_Event +# TYPE consul_serf_queue_Event summary +consul_serf_queue_Event{network="lan",quantile="0.5"} NaN +consul_serf_queue_Event{network="lan",quantile="0.9"} NaN +consul_serf_queue_Event{network="lan",quantile="0.99"} NaN +consul_serf_queue_Event_sum{network="lan"} 0 +consul_serf_queue_Event_count{network="lan"} 19 +consul_serf_queue_Event{network="wan",quantile="0.5"} NaN +consul_serf_queue_Event{network="wan",quantile="0.9"} NaN +consul_serf_queue_Event{network="wan",quantile="0.99"} NaN +consul_serf_queue_Event_sum{network="wan"} 0 +consul_serf_queue_Event_count{network="wan"} 19 +# HELP consul_serf_queue_Intent consul_serf_queue_Intent +# TYPE consul_serf_queue_Intent summary +consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN +consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN +consul_serf_queue_Intent_sum{network="lan"} 0 +consul_serf_queue_Intent_count{network="lan"} 19 +consul_serf_queue_Intent{network="wan",quantile="0.5"} NaN +consul_serf_queue_Intent{network="wan",quantile="0.9"} NaN +consul_serf_queue_Intent{network="wan",quantile="0.99"} NaN +consul_serf_queue_Intent_sum{network="wan"} 1 +consul_serf_queue_Intent_count{network="wan"} 19 +# HELP consul_serf_queue_Query consul_serf_queue_Query +# TYPE consul_serf_queue_Query summary +consul_serf_queue_Query{network="lan",quantile="0.5"} NaN +consul_serf_queue_Query{network="lan",quantile="0.9"} NaN +consul_serf_queue_Query{network="lan",quantile="0.99"} NaN +consul_serf_queue_Query_sum{network="lan"} 0 +consul_serf_queue_Query_count{network="lan"} 19 +consul_serf_queue_Query{network="wan",quantile="0.5"} NaN +consul_serf_queue_Query{network="wan",quantile="0.9"} NaN +consul_serf_queue_Query{network="wan",quantile="0.99"} NaN +consul_serf_queue_Query_sum{network="wan"} 0 +consul_serf_queue_Query_count{network="wan"} 19 +# HELP consul_serf_snapshot_appendLine consul_serf_snapshot_appendLine +# TYPE consul_serf_snapshot_appendLine summary +consul_serf_snapshot_appendLine{network="lan",quantile="0.5"} NaN +consul_serf_snapshot_appendLine{network="lan",quantile="0.9"} NaN +consul_serf_snapshot_appendLine{network="lan",quantile="0.99"} NaN +consul_serf_snapshot_appendLine_sum{network="lan"} 0.3810300036566332 +consul_serf_snapshot_appendLine_count{network="lan"} 15 +consul_serf_snapshot_appendLine{network="wan",quantile="0.5"} NaN +consul_serf_snapshot_appendLine{network="wan",quantile="0.9"} NaN +consul_serf_snapshot_appendLine{network="wan",quantile="0.99"} NaN +consul_serf_snapshot_appendLine_sum{network="wan"} 0.3907299981219694 +consul_serf_snapshot_appendLine_count{network="wan"} 13 +# HELP consul_server_isLeader Tracks if the server is a leader. +# TYPE consul_server_isLeader gauge +consul_server_isLeader 0 +# HELP consul_session_apply Measures the time spent applying a session update. +# TYPE consul_session_apply summary +consul_session_apply{quantile="0.5"} NaN +consul_session_apply{quantile="0.9"} NaN +consul_session_apply{quantile="0.99"} NaN +consul_session_apply_sum 0 +consul_session_apply_count 0 +# HELP consul_session_renew Measures the time spent renewing a session. +# TYPE consul_session_renew summary +consul_session_renew{quantile="0.5"} NaN +consul_session_renew{quantile="0.9"} NaN +consul_session_renew{quantile="0.99"} NaN +consul_session_renew_sum 0 +consul_session_renew_count 0 +# HELP consul_session_ttl_active Tracks the active number of sessions being tracked. +# TYPE consul_session_ttl_active gauge +consul_session_ttl_active 0 +# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session. +# TYPE consul_session_ttl_invalidate summary +consul_session_ttl_invalidate{quantile="0.5"} NaN +consul_session_ttl_invalidate{quantile="0.9"} NaN +consul_session_ttl_invalidate{quantile="0.99"} NaN +consul_session_ttl_invalidate_sum 0 +consul_session_ttl_invalidate_count 0 +# HELP consul_txn_apply Measures the time spent applying a transaction operation. +# TYPE consul_txn_apply summary +consul_txn_apply{quantile="0.5"} NaN +consul_txn_apply{quantile="0.9"} NaN +consul_txn_apply{quantile="0.99"} NaN +consul_txn_apply_sum 0 +consul_txn_apply_count 0 +# HELP consul_txn_read Measures the time spent returning a read transaction. +# TYPE consul_txn_read summary +consul_txn_read{quantile="0.5"} NaN +consul_txn_read{quantile="0.9"} NaN +consul_txn_read{quantile="0.99"} NaN +consul_txn_read_sum 0 +consul_txn_read_count 0 +# HELP consul_version Represents the Consul version. +# TYPE consul_version gauge +consul_version 0 +# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version. +# TYPE consul_xds_server_streams gauge +consul_xds_server_streams 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 5.3269e-05 +go_gc_duration_seconds{quantile="0.25"} 0.000130599 +go_gc_duration_seconds{quantile="0.5"} 0.000271028 +go_gc_duration_seconds{quantile="0.75"} 0.000362027 +go_gc_duration_seconds{quantile="1"} 0.002227924 +go_gc_duration_seconds_sum 0.01565053 +go_gc_duration_seconds_count 42 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 130 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.18.1"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 3.2922384e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 7.39548784e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.625099e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 8.260339e+06 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 7.265691723511656e-05 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 6.583e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 3.2922384e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 2.3904256e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 3.72736e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 122074 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.6113664e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.1177856e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.6713887082058973e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 8.382413e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 375768 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 603840 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 4.5858448e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.872245e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.736704e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.736704e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.3614344e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 14 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 20.7 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1024 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 33 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.22032128e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.67138812259e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 8.48359424e+08 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes -1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self.json new file mode 100644 index 00000000000000..0b11cda5307eab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self.json @@ -0,0 +1,50 @@ +{ + "Config": { + "Datacenter": "us-central", + "PrimaryDatacenter": "us-central", + "NodeName": "satya-vm", + "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Revision": "0e046bbb", + "Server": true, + "Version": "1.13.2", + "BuildDate": "2022-09-20T20:30:07Z" + }, + "DebugConfig": { + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": false, + "DogstatsdAddr": "", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "2m0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json new file mode 100644 index 00000000000000..0b11cda5307eab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json @@ -0,0 +1,50 @@ +{ + "Config": { + "Datacenter": "us-central", + "PrimaryDatacenter": "us-central", + "NodeName": "satya-vm", + "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Revision": "0e046bbb", + "Server": true, + "Version": "1.13.2", + "BuildDate": "2022-09-20T20:30:07Z" + }, + "DebugConfig": { + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": false, + "DogstatsdAddr": "", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "2m0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json new file mode 100644 index 00000000000000..c964d10fe89fb7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json @@ -0,0 +1,50 @@ +{ + "Config": { + "Datacenter": "us-central", + "PrimaryDatacenter": "us-central", + "NodeName": "satya-vm", + "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Revision": "0e046bbb", + "Server": true, + "Version": "1.13.2", + "BuildDate": "2022-09-20T20:30:07Z" + }, + "DebugConfig": { + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": false, + "DogstatsdAddr": "", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json new file mode 100644 index 00000000000000..dfe37bcc097e28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json @@ -0,0 +1,50 @@ +{ + "Config": { + "Datacenter": "us-central", + "PrimaryDatacenter": "us-central", + "NodeName": "satya-vm", + "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Revision": "0e046bbb", + "Server": false, + "Version": "1.13.2", + "BuildDate": "2022-09-20T20:30:07Z" + }, + "DebugConfig": { + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": false, + "DogstatsdAddr": "", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "10m0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json new file mode 100644 index 00000000000000..8f3f63839a5f1b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json @@ -0,0 +1,59 @@ +[ + { + "Node": "satya-vm", + "Segment": "", + "Coord": { + "Vec": [ + 0.014829503547751722, + 0.0072173849395880596, + 0.004329474334739038, + -0.0032798752739064438, + -0.010134170963372591, + -0.008257638503292454, + 0.00752142875530981, + 0.0017901665053347217 + ], + "Error": 0.493977389081921, + "Adjustment": 0.00017401717315766792, + "Height": 2.8272088782225915e-05 + } + }, + { + "Node": "satya-vm2", + "Segment": "", + "Coord": { + "Vec": [ + 0.01485399579339927, + 0.007233318963330601, + 0.004314864811042585, + -0.0032764668107421653, + -0.010133938771787391, + -0.008238915750721635, + 0.0075168683512753035, + 0.001776534386752108 + ], + "Error": 0.3003366063730667, + "Adjustment": 0.00019935098724887628, + "Height": 4.192904954404545e-05 + } + }, + { + "Node": "satya-vm3", + "Segment": "", + "Coord": { + "Vec": [ + 0.014782092899311995, + 0.007186516660508205, + 0.004357885422476095, + -0.003286526239099157, + -0.010134722455521066, + -0.008294075475167818, + 0.007530358624901773, + 0.0018166544975743123 + ], + "Error": 0.12048664650994341, + "Adjustment": 0.00014477073973997567, + "Height": 0.0005656138448826895 + } + } +] diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json new file mode 100644 index 00000000000000..4acee01eca9fe6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json @@ -0,0 +1,48 @@ +{ + "Healthy": true, + "FailureTolerance": 1, + "Servers": [ + { + "ID": "72849161-41cb-14df-fc9b-563ddff3bae7", + "Name": "satya-vm3", + "Address": "10.10.30.119:8300", + "SerfStatus": "alive", + "Version": "1.13.2", + "Leader": false, + "LastContact": "54.653679ms", + "LastTerm": 29, + "LastIndex": 486777, + "Healthy": true, + "Voter": true, + "StableSince": "2022-12-21T13:53:42Z" + }, + { + "ID": "3e75e0af-859b-83e8-779f-f3a6d12f02ae", + "Name": "satya-vm2", + "Address": "10.10.30.176:8300", + "SerfStatus": "alive", + "Version": "1.13.2", + "Leader": true, + "LastContact": "0ms", + "LastTerm": 29, + "LastIndex": 486777, + "Healthy": true, + "Voter": true, + "StableSince": "2022-12-21T13:53:46Z" + }, + { + "ID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d", + "Name": "satya-vm", + "Address": "10.10.30.177:8300", + "SerfStatus": "alive", + "Version": "1.13.2", + "Leader": false, + "LastContact": "13.211617ms", + "LastTerm": 29, + "LastIndex": 486777, + "Healthy": true, + "Voter": true, + "StableSince": "2022-12-20T09:55:28Z" + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/v1-agent-checks.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/v1-agent-checks.json new file mode 100644 index 00000000000000..b8967cb74b6fe7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.13.2/v1-agent-checks.json @@ -0,0 +1,68 @@ +{ + "chk1": { + "Node": "mysql1", + "CheckID": "chk1", + "Name": "ssh", + "Status": "passing", + "Notes": "", + "Output": "TCP connect 127.0.0.1:22: Success", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "chk2": { + "Node": "mysql1", + "CheckID": "chk2", + "Name": "telnet", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:23: connect: connection refused", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "chk3": { + "Node": "mysql1", + "CheckID": "chk3", + "Name": "telnet", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:23: connect: connection refused", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "mysql": { + "Node": "mysql1", + "CheckID": "mysql", + "Name": "MYSQL TCP on port 3336", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:3336: connect: connection refused", + "ServiceID": "mysql0", + "ServiceName": "mysql", + "ServiceTags": [ + "primary", + "secondary" + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt new file mode 100644 index 00000000000000..094f03508c5c24 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt @@ -0,0 +1,1502 @@ +# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token. +# TYPE consul_acl_ResolveToken summary +consul_acl_ResolveToken{quantile="0.5"} 0.05904199928045273 +consul_acl_ResolveToken{quantile="0.9"} 0.1010729968547821 +consul_acl_ResolveToken{quantile="0.99"} 0.18903599679470062 +consul_acl_ResolveToken_sum 59019.61223328998 +consul_acl_ResolveToken_count 863476 +# HELP consul_acl_authmethod_delete +# TYPE consul_acl_authmethod_delete summary +consul_acl_authmethod_delete{quantile="0.5"} NaN +consul_acl_authmethod_delete{quantile="0.9"} NaN +consul_acl_authmethod_delete{quantile="0.99"} NaN +consul_acl_authmethod_delete_sum 0 +consul_acl_authmethod_delete_count 0 +# HELP consul_acl_authmethod_upsert +# TYPE consul_acl_authmethod_upsert summary +consul_acl_authmethod_upsert{quantile="0.5"} NaN +consul_acl_authmethod_upsert{quantile="0.9"} NaN +consul_acl_authmethod_upsert{quantile="0.99"} NaN +consul_acl_authmethod_upsert_sum 0 +consul_acl_authmethod_upsert_count 0 +# HELP consul_acl_bindingrule_delete +# TYPE consul_acl_bindingrule_delete summary +consul_acl_bindingrule_delete{quantile="0.5"} NaN +consul_acl_bindingrule_delete{quantile="0.9"} NaN +consul_acl_bindingrule_delete{quantile="0.99"} NaN +consul_acl_bindingrule_delete_sum 0 +consul_acl_bindingrule_delete_count 0 +# HELP consul_acl_bindingrule_upsert +# TYPE consul_acl_bindingrule_upsert summary +consul_acl_bindingrule_upsert{quantile="0.5"} NaN +consul_acl_bindingrule_upsert{quantile="0.9"} NaN +consul_acl_bindingrule_upsert{quantile="0.99"} NaN +consul_acl_bindingrule_upsert_sum 0 +consul_acl_bindingrule_upsert_count 0 +# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_deregistration counter +consul_acl_blocked_check_deregistration 0 +# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL) +# TYPE consul_acl_blocked_check_registration counter +consul_acl_blocked_check_registration 0 +# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL) +# TYPE consul_acl_blocked_node_registration counter +consul_acl_blocked_node_registration 0 +# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_deregistration counter +consul_acl_blocked_service_deregistration 0 +# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL) +# TYPE consul_acl_blocked_service_registration counter +consul_acl_blocked_service_registration 0 +# HELP consul_acl_login +# TYPE consul_acl_login summary +consul_acl_login{quantile="0.5"} NaN +consul_acl_login{quantile="0.9"} NaN +consul_acl_login{quantile="0.99"} NaN +consul_acl_login_sum 0 +consul_acl_login_count 0 +# HELP consul_acl_logout +# TYPE consul_acl_logout summary +consul_acl_logout{quantile="0.5"} NaN +consul_acl_logout{quantile="0.9"} NaN +consul_acl_logout{quantile="0.99"} NaN +consul_acl_logout_sum 0 +consul_acl_logout_count 0 +# HELP consul_acl_policy_delete +# TYPE consul_acl_policy_delete summary +consul_acl_policy_delete{quantile="0.5"} NaN +consul_acl_policy_delete{quantile="0.9"} NaN +consul_acl_policy_delete{quantile="0.99"} NaN +consul_acl_policy_delete_sum 2.2944839000701904 +consul_acl_policy_delete_count 1 +# HELP consul_acl_policy_upsert +# TYPE consul_acl_policy_upsert summary +consul_acl_policy_upsert{quantile="0.5"} NaN +consul_acl_policy_upsert{quantile="0.9"} NaN +consul_acl_policy_upsert{quantile="0.99"} NaN +consul_acl_policy_upsert_sum 173.05634947121143 +consul_acl_policy_upsert_count 11 +# HELP consul_acl_role_delete +# TYPE consul_acl_role_delete summary +consul_acl_role_delete{quantile="0.5"} NaN +consul_acl_role_delete{quantile="0.9"} NaN +consul_acl_role_delete{quantile="0.99"} NaN +consul_acl_role_delete_sum 0 +consul_acl_role_delete_count 0 +# HELP consul_acl_role_upsert +# TYPE consul_acl_role_upsert summary +consul_acl_role_upsert{quantile="0.5"} NaN +consul_acl_role_upsert{quantile="0.9"} NaN +consul_acl_role_upsert{quantile="0.99"} NaN +consul_acl_role_upsert_sum 0 +consul_acl_role_upsert_count 0 +# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_hit counter +consul_acl_token_cache_hit 0 +# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache. +# TYPE consul_acl_token_cache_miss counter +consul_acl_token_cache_miss 0 +# HELP consul_acl_token_clone +# TYPE consul_acl_token_clone summary +consul_acl_token_clone{quantile="0.5"} NaN +consul_acl_token_clone{quantile="0.9"} NaN +consul_acl_token_clone{quantile="0.99"} NaN +consul_acl_token_clone_sum 0 +consul_acl_token_clone_count 0 +# HELP consul_acl_token_delete +# TYPE consul_acl_token_delete summary +consul_acl_token_delete{quantile="0.5"} NaN +consul_acl_token_delete{quantile="0.9"} NaN +consul_acl_token_delete{quantile="0.99"} NaN +consul_acl_token_delete_sum 35.43468403816223 +consul_acl_token_delete_count 12 +# HELP consul_acl_token_upsert +# TYPE consul_acl_token_upsert summary +consul_acl_token_upsert{quantile="0.5"} NaN +consul_acl_token_upsert{quantile="0.9"} NaN +consul_acl_token_upsert{quantile="0.99"} NaN +consul_acl_token_upsert_sum 33.15468955039978 +consul_acl_token_upsert_count 9 +# HELP consul_agent_event consul_agent_event +# TYPE consul_agent_event counter +consul_agent_event 793609 +# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour +# TYPE consul_agent_tls_cert_expiry gauge +consul_agent_tls_cert_expiry 0 +# HELP consul_agent_write_event consul_agent_write_event +# TYPE consul_agent_write_event summary +consul_agent_write_event{quantile="0.5"} 0.012071000412106514 +consul_agent_write_event{quantile="0.9"} 0.03231099992990494 +consul_agent_write_event{quantile="0.99"} 0.038460999727249146 +consul_agent_write_event_sum 17825.32184328325 +consul_agent_write_event_count 793609 +# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path. +# TYPE consul_api_http summary +consul_api_http{quantile="0.5"} NaN +consul_api_http{quantile="0.9"} NaN +consul_api_http{quantile="0.99"} NaN +consul_api_http_sum 0 +consul_api_http_count 0 +consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_acl_policy_"} 0.3439910039305687 +consul_api_http_count{method="GET",path="v1_acl_policy_"} 2 +consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_acl_policy_name_"} 0.2537579983472824 +consul_api_http_count{method="GET",path="v1_acl_policy_name_"} 2 +consul_api_http{method="GET",path="v1_acl_token_",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_acl_token_",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_acl_token_",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_acl_token_"} 292.9099607616663 +consul_api_http_count{method="GET",path="v1_acl_token_"} 1447 +consul_api_http{method="GET",path="v1_agent_members",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_agent_members",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_agent_members",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_agent_members"} 1504.3780329823494 +consul_api_http_count{method="GET",path="v1_agent_members"} 15059 +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_agent_metrics"} 47773.76364764571 +consul_api_http_count{method="GET",path="v1_agent_metrics"} 10129 +consul_api_http{method="GET",path="v1_agent_self",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_agent_self",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_agent_self",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_agent_self"} 9246.783903598785 +consul_api_http_count{method="GET",path="v1_agent_self"} 7567 +consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.5"} 0.8214660286903381 +consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.9"} 1.1057649850845337 +consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.99"} 1.1057649850845337 +consul_api_http_sum{method="GET",path="v1_catalog_node-services_"} 824.5040957331657 +consul_api_http_count{method="GET",path="v1_catalog_node-services_"} 1069 +consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_catalog_nodes"} 0.37226200103759766 +consul_api_http_count{method="GET",path="v1_catalog_nodes"} 1 +consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.5"} 0.538116991519928 +consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.9"} 0.6367400288581848 +consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.99"} 0.6367400288581848 +consul_api_http_sum{method="GET",path="v1_catalog_service_"} 43381.559261500835 +consul_api_http_count{method="GET",path="v1_catalog_service_"} 75066 +consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.5"} 0.2639490067958832 +consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.9"} 0.2639490067958832 +consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.99"} 0.2639490067958832 +consul_api_http_sum{method="GET",path="v1_internal_ui_catalog-overview"} 3496.612477712333 +consul_api_http_count{method="GET",path="v1_internal_ui_catalog-overview"} 14553 +consul_api_http{method="GET",path="v1_namespace_",quantile="0.5"} 0.14019399881362915 +consul_api_http{method="GET",path="v1_namespace_",quantile="0.9"} 0.29843899607658386 +consul_api_http{method="GET",path="v1_namespace_",quantile="0.99"} 0.29843899607658386 +consul_api_http_sum{method="GET",path="v1_namespace_"} 6329.847745008767 +consul_api_http_count{method="GET",path="v1_namespace_"} 30022 +consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_operator_autopilot_health"} 1326.0989246219397 +consul_api_http_count{method="GET",path="v1_operator_autopilot_health"} 7747 +consul_api_http{method="GET",path="v1_partitions",quantile="0.5"} NaN +consul_api_http{method="GET",path="v1_partitions",quantile="0.9"} NaN +consul_api_http{method="GET",path="v1_partitions",quantile="0.99"} NaN +consul_api_http_sum{method="GET",path="v1_partitions"} 3190.110695719719 +consul_api_http_count{method="GET",path="v1_partitions"} 4136 +consul_api_http{method="GET",path="v1_status_leader",quantile="0.5"} 0.07637300342321396 +consul_api_http{method="GET",path="v1_status_leader",quantile="0.9"} 0.07637300342321396 +consul_api_http{method="GET",path="v1_status_leader",quantile="0.99"} 0.07637300342321396 +consul_api_http_sum{method="GET",path="v1_status_leader"} 4829.641642797738 +consul_api_http_count{method="GET",path="v1_status_leader"} 45620 +consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.5"} 2.291783094406128 +consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.9"} 2.9903249740600586 +consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.99"} 2.9903249740600586 +consul_api_http_sum{method="PUT",path="v1_catalog_register"} 284584.19143879414 +consul_api_http_count{method="PUT",path="v1_catalog_register"} 90170 +# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function. +# TYPE consul_autopilot_failure_tolerance gauge +consul_autopilot_failure_tolerance 0 +# HELP consul_autopilot_failure_tolerance_failure_tolerance consul_autopilot_failure_tolerance_failure_tolerance +# TYPE consul_autopilot_failure_tolerance_failure_tolerance gauge +consul_autopilot_failure_tolerance_failure_tolerance 0 +# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy. +# TYPE consul_autopilot_healthy gauge +consul_autopilot_healthy 0 +# HELP consul_autopilot_healthy_healthy consul_autopilot_healthy_healthy +# TYPE consul_autopilot_healthy_healthy gauge +consul_autopilot_healthy_healthy 1 +# HELP consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided. +# TYPE consul_cache_bypass counter +consul_cache_bypass 0 +# HELP consul_cache_connect_ca_leaf_fetch_success consul_cache_connect_ca_leaf_fetch_success +# TYPE consul_cache_connect_ca_leaf_fetch_success counter +consul_cache_connect_ca_leaf_fetch_success{result_not_modified="false"} 2 +# HELP consul_cache_connect_ca_root_fetch_success consul_cache_connect_ca_root_fetch_success +# TYPE consul_cache_connect_ca_root_fetch_success counter +consul_cache_connect_ca_root_fetch_success{result_not_modified="false"} 271 +# HELP consul_cache_connect_ca_root_hit consul_cache_connect_ca_root_hit +# TYPE consul_cache_connect_ca_root_hit counter +consul_cache_connect_ca_root_hit 2 +# HELP consul_cache_entries_count Represents the number of entries in this cache. +# TYPE consul_cache_entries_count gauge +consul_cache_entries_count 0 +# HELP consul_cache_entries_count_entries_count consul_cache_entries_count_entries_count +# TYPE consul_cache_entries_count_entries_count gauge +consul_cache_entries_count_entries_count 30 +# HELP consul_cache_evict_expired Counts the number of expired entries that are evicted. +# TYPE consul_cache_evict_expired counter +consul_cache_evict_expired 1 +# HELP consul_cache_fetch_error Counts the number of failed fetches by the cache. +# TYPE consul_cache_fetch_error counter +consul_cache_fetch_error 0 +# HELP consul_cache_fetch_success Counts the number of successful fetches by the cache. +# TYPE consul_cache_fetch_success counter +consul_cache_fetch_success 0 +consul_cache_fetch_success{result_not_modified="false"} 1381 +# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found. +# TYPE consul_catalog_connect_not_found counter +consul_catalog_connect_not_found 0 +# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service. +# TYPE consul_catalog_connect_query counter +consul_catalog_connect_query 0 +# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag. +# TYPE consul_catalog_connect_query_tag counter +consul_catalog_connect_query_tag 0 +# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags. +# TYPE consul_catalog_connect_query_tags counter +consul_catalog_connect_query_tags 0 +# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation. +# TYPE consul_catalog_deregister summary +consul_catalog_deregister{quantile="0.5"} NaN +consul_catalog_deregister{quantile="0.9"} NaN +consul_catalog_deregister{quantile="0.99"} NaN +consul_catalog_deregister_sum 221.93704390525818 +consul_catalog_deregister_count 55 +# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation. +# TYPE consul_catalog_register summary +consul_catalog_register{quantile="0.5"} 2.13044810295105 +consul_catalog_register{quantile="0.9"} 2.721796989440918 +consul_catalog_register{quantile="0.99"} 2.721796989440918 +consul_catalog_register_sum 265432.1276627779 +consul_catalog_register_count 90231 +# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found. +# TYPE consul_catalog_service_not_found counter +consul_catalog_service_not_found 0 +# HELP consul_catalog_service_query Increments for each catalog query for the given service. +# TYPE consul_catalog_service_query counter +consul_catalog_service_query 0 +consul_catalog_service_query{service="consul-connect-injector-consul"} 15004 +consul_catalog_service_query{service="consul-ingress-gateway-consul"} 15009 +consul_catalog_service_query{service="kubelet-default"} 15009 +consul_catalog_service_query{service="kubernetes-default"} 15016 +consul_catalog_service_query{service="netdata-default"} 15009 +# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag. +# TYPE consul_catalog_service_query_tag counter +consul_catalog_service_query_tag 0 +# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags. +# TYPE consul_catalog_service_query_tags counter +consul_catalog_service_query_tags 0 +consul_catalog_service_query_tags{service="consul-connect-injector-consul",tag="k8s"} 15003 +consul_catalog_service_query_tags{service="consul-ingress-gateway-consul",tag="k8s"} 15009 +consul_catalog_service_query_tags{service="kubelet-default",tag="k8s"} 15009 +consul_catalog_service_query_tags{service="kubernetes-default",tag="k8s"} 15014 +consul_catalog_service_query_tags{service="netdata-default",tag="k8s"} 15004 +# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog. +# TYPE consul_client_api_catalog_datacenters counter +consul_client_api_catalog_datacenters 0 +# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request. +# TYPE consul_client_api_catalog_deregister counter +consul_client_api_catalog_deregister 0 +# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway. +# TYPE consul_client_api_catalog_gateway_services counter +consul_client_api_catalog_gateway_services 0 +# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services. +# TYPE consul_client_api_catalog_node_service_list counter +consul_client_api_catalog_node_service_list 0 +consul_client_api_catalog_node_service_list{node="ip-172-25-37-57",partition="default"} 1069 +# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_catalog_node_services counter +consul_client_api_catalog_node_services 0 +# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog. +# TYPE consul_client_api_catalog_nodes counter +consul_client_api_catalog_nodes 0 +consul_client_api_catalog_nodes{node="ip-172-25-37-57",partition="default"} 1 +# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request. +# TYPE consul_client_api_catalog_register counter +consul_client_api_catalog_register 0 +consul_client_api_catalog_register{node="ip-172-25-37-57",partition="default"} 90170 +# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service. +# TYPE consul_client_api_catalog_service_nodes counter +consul_client_api_catalog_service_nodes 0 +consul_client_api_catalog_service_nodes{node="ip-172-25-37-57",partition="default"} 75066 +# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog. +# TYPE consul_client_api_catalog_services counter +consul_client_api_catalog_services 0 +# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service. +# TYPE consul_client_api_error_catalog_service_nodes counter +consul_client_api_error_catalog_service_nodes 0 +# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters. +# TYPE consul_client_api_success_catalog_datacenters counter +consul_client_api_success_catalog_datacenters 0 +# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request. +# TYPE consul_client_api_success_catalog_deregister counter +consul_client_api_success_catalog_deregister 0 +# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway. +# TYPE consul_client_api_success_catalog_gateway_services counter +consul_client_api_success_catalog_gateway_services 0 +# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services. +# TYPE consul_client_api_success_catalog_node_service_list counter +consul_client_api_success_catalog_node_service_list 0 +consul_client_api_success_catalog_node_service_list{node="ip-172-25-37-57",partition="default"} 1069 +# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node. +# TYPE consul_client_api_success_catalog_node_services counter +consul_client_api_success_catalog_node_services 0 +# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes. +# TYPE consul_client_api_success_catalog_nodes counter +consul_client_api_success_catalog_nodes 0 +consul_client_api_success_catalog_nodes{node="ip-172-25-37-57",partition="default"} 1 +# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request. +# TYPE consul_client_api_success_catalog_register counter +consul_client_api_success_catalog_register 0 +consul_client_api_success_catalog_register{node="ip-172-25-37-57",partition="default"} 90170 +# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service. +# TYPE consul_client_api_success_catalog_service_nodes counter +consul_client_api_success_catalog_service_nodes 0 +consul_client_api_success_catalog_service_nodes{node="ip-172-25-37-57",partition="default"} 75072 +# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services. +# TYPE consul_client_api_success_catalog_services counter +consul_client_api_success_catalog_services 0 +# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server. +# TYPE consul_client_rpc counter +consul_client_rpc 438718 +# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters. +# TYPE consul_client_rpc_error_catalog_datacenters counter +consul_client_rpc_error_catalog_datacenters 0 +# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request. +# TYPE consul_client_rpc_error_catalog_deregister counter +consul_client_rpc_error_catalog_deregister 0 +# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway. +# TYPE consul_client_rpc_error_catalog_gateway_services counter +consul_client_rpc_error_catalog_gateway_services 0 +# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services. +# TYPE consul_client_rpc_error_catalog_node_service_list counter +consul_client_rpc_error_catalog_node_service_list 0 +# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node. +# TYPE consul_client_rpc_error_catalog_node_services counter +consul_client_rpc_error_catalog_node_services 0 +# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes. +# TYPE consul_client_rpc_error_catalog_nodes counter +consul_client_rpc_error_catalog_nodes 0 +# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request. +# TYPE consul_client_rpc_error_catalog_register counter +consul_client_rpc_error_catalog_register 0 +# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service. +# TYPE consul_client_rpc_error_catalog_service_nodes counter +consul_client_rpc_error_catalog_service_nodes 0 +# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services. +# TYPE consul_client_rpc_error_catalog_services counter +consul_client_rpc_error_catalog_services 0 +# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration. +# TYPE consul_client_rpc_exceeded counter +consul_client_rpc_exceeded 0 +# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. +# TYPE consul_client_rpc_failed counter +consul_client_rpc_failed 0 +# HELP consul_consul_cache_bypass Deprecated - please use cache_bypass instead. +# TYPE consul_consul_cache_bypass counter +consul_consul_cache_bypass 0 +# HELP consul_consul_cache_connect_ca_leaf_fetch_success consul_consul_cache_connect_ca_leaf_fetch_success +# TYPE consul_consul_cache_connect_ca_leaf_fetch_success counter +consul_consul_cache_connect_ca_leaf_fetch_success{result_not_modified="false"} 2 +# HELP consul_consul_cache_connect_ca_root_fetch_success consul_consul_cache_connect_ca_root_fetch_success +# TYPE consul_consul_cache_connect_ca_root_fetch_success counter +consul_consul_cache_connect_ca_root_fetch_success{result_not_modified="false"} 271 +# HELP consul_consul_cache_connect_ca_root_hit consul_consul_cache_connect_ca_root_hit +# TYPE consul_consul_cache_connect_ca_root_hit counter +consul_consul_cache_connect_ca_root_hit 2 +# HELP consul_consul_cache_entries_count Deprecated - please use cache_entries_count instead. +# TYPE consul_consul_cache_entries_count gauge +consul_consul_cache_entries_count 0 +# HELP consul_consul_cache_entries_count_entries_count consul_consul_cache_entries_count_entries_count +# TYPE consul_consul_cache_entries_count_entries_count gauge +consul_consul_cache_entries_count_entries_count 30 +# HELP consul_consul_cache_evict_expired Deprecated - please use cache_evict_expired instead. +# TYPE consul_consul_cache_evict_expired counter +consul_consul_cache_evict_expired 1 +# HELP consul_consul_cache_fetch_error Deprecated - please use cache_fetch_error instead. +# TYPE consul_consul_cache_fetch_error counter +consul_consul_cache_fetch_error 0 +# HELP consul_consul_cache_fetch_success Deprecated - please use cache_fetch_success instead. +# TYPE consul_consul_cache_fetch_success counter +consul_consul_cache_fetch_success 0 +consul_consul_cache_fetch_success{result_not_modified="false"} 1381 +# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead +# TYPE consul_consul_fsm_ca summary +consul_consul_fsm_ca{quantile="0.5"} NaN +consul_consul_fsm_ca{quantile="0.9"} NaN +consul_consul_fsm_ca{quantile="0.99"} NaN +consul_consul_fsm_ca_sum 0 +consul_consul_fsm_ca_count 0 +# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead +# TYPE consul_consul_fsm_intention summary +consul_consul_fsm_intention{quantile="0.5"} NaN +consul_consul_fsm_intention{quantile="0.9"} NaN +consul_consul_fsm_intention{quantile="0.99"} NaN +consul_consul_fsm_intention_sum 0 +consul_consul_fsm_intention_count 0 +# HELP consul_consul_intention_apply Deprecated - please use intention_apply +# TYPE consul_consul_intention_apply summary +consul_consul_intention_apply{quantile="0.5"} NaN +consul_consul_intention_apply{quantile="0.9"} NaN +consul_consul_intention_apply{quantile="0.99"} NaN +consul_consul_intention_apply_sum 0 +consul_consul_intention_apply_count 0 +# HELP consul_consul_leader_reconcile consul_consul_leader_reconcile +# TYPE consul_consul_leader_reconcile summary +consul_consul_leader_reconcile{quantile="0.5"} NaN +consul_consul_leader_reconcile{quantile="0.9"} NaN +consul_consul_leader_reconcile{quantile="0.99"} NaN +consul_consul_leader_reconcile_sum 1747.430968016386 +consul_consul_leader_reconcile_count 7530 +# HELP consul_consul_members_clients Deprecated - please use members_clients instead. +# TYPE consul_consul_members_clients gauge +consul_consul_members_clients 0 +# HELP consul_consul_members_clients_clients consul_consul_members_clients_clients +# TYPE consul_consul_members_clients_clients gauge +consul_consul_members_clients_clients{datacenter="consul-sandbox-cluster-0159c9d3"} 1 +consul_consul_members_clients_clients{datacenter="consul-sandbox-cluster-0159c9d3",partition="default",segment=""} 1 +# HELP consul_consul_members_servers Deprecated - please use members_servers instead. +# TYPE consul_consul_members_servers gauge +consul_consul_members_servers 0 +# HELP consul_consul_members_servers_servers consul_consul_members_servers_servers +# TYPE consul_consul_members_servers_servers gauge +consul_consul_members_servers_servers{datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# HELP consul_consul_peering_exported_services Deprecated - please use peering_exported_services +# TYPE consul_consul_peering_exported_services gauge +consul_consul_peering_exported_services 0 +# HELP consul_consul_peering_healthy Deprecated - please use peering_exported_services +# TYPE consul_consul_peering_healthy gauge +consul_consul_peering_healthy 0 +# HELP consul_consul_state_config_entries Deprecated - please use state_config_entries instead. +# TYPE consul_consul_state_config_entries gauge +consul_consul_state_config_entries 0 +# HELP consul_consul_state_config_entries_config_entries consul_consul_state_config_entries_config_entries +# TYPE consul_consul_state_config_entries_config_entries gauge +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="infra",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="consul",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="default",partition="default"} 0 +consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="infra",partition="default"} 0 +# HELP consul_consul_state_connect_instances Deprecated - please use state_connect_instances instead. +# TYPE consul_consul_state_connect_instances gauge +consul_consul_state_connect_instances 0 +# HELP consul_consul_state_connect_instances_connect_instances consul_consul_state_connect_instances_connect_instances +# TYPE consul_consul_state_connect_instances_connect_instances gauge +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="consul",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="default",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="infra",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="consul",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="default",partition="default"} 1 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="infra",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="consul",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="default",partition="default"} 2 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="infra",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="consul",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="default",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="infra",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="consul",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="default",partition="default"} 0 +consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="infra",partition="default"} 0 +# HELP consul_consul_state_kv_entries Deprecated - please use kv_entries instead. +# TYPE consul_consul_state_kv_entries gauge +consul_consul_state_kv_entries 0 +# HELP consul_consul_state_kv_entries_kv_entries consul_consul_state_kv_entries_kv_entries +# TYPE consul_consul_state_kv_entries_kv_entries gauge +consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul"} 0 +consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default"} 0 +consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra"} 0 +# HELP consul_consul_state_nodes Deprecated - please use state_nodes instead. +# TYPE consul_consul_state_nodes gauge +consul_consul_state_nodes 0 +# HELP consul_consul_state_nodes_nodes consul_consul_state_nodes_nodes +# TYPE consul_consul_state_nodes_nodes gauge +consul_consul_state_nodes_nodes{datacenter="consul-sandbox-cluster-0159c9d3",partition="default"} 8 +# HELP consul_consul_state_peerings Deprecated - please use state_peerings instead. +# TYPE consul_consul_state_peerings gauge +consul_consul_state_peerings 0 +# HELP consul_consul_state_peerings_peerings consul_consul_state_peerings_peerings +# TYPE consul_consul_state_peerings_peerings gauge +consul_consul_state_peerings_peerings{datacenter="consul-sandbox-cluster-0159c9d3",partition="default"} 0 +# HELP consul_consul_state_service_instances Deprecated - please use state_service_instances instead. +# TYPE consul_consul_state_service_instances gauge +consul_consul_state_service_instances 0 +# HELP consul_consul_state_service_instances_service_instances consul_consul_state_service_instances_service_instances +# TYPE consul_consul_state_service_instances_service_instances gauge +consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul",partition="default"} 2 +consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default",partition="default"} 9 +consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra",partition="default"} 0 +# HELP consul_consul_state_services Deprecated - please use state_services instead. +# TYPE consul_consul_state_services gauge +consul_consul_state_services 0 +# HELP consul_consul_state_services_services consul_consul_state_services_services +# TYPE consul_consul_state_services_services gauge +consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul",partition="default"} 2 +consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default",partition="default"} 7 +consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra",partition="default"} 0 +# HELP consul_federation_state_apply +# TYPE consul_federation_state_apply summary +consul_federation_state_apply{quantile="0.5"} NaN +consul_federation_state_apply{quantile="0.9"} NaN +consul_federation_state_apply{quantile="0.99"} NaN +consul_federation_state_apply_sum 0 +consul_federation_state_apply_count 0 +# HELP consul_federation_state_get +# TYPE consul_federation_state_get summary +consul_federation_state_get{quantile="0.5"} NaN +consul_federation_state_get{quantile="0.9"} NaN +consul_federation_state_get{quantile="0.99"} NaN +consul_federation_state_get_sum 0 +consul_federation_state_get_count 0 +# HELP consul_federation_state_list +# TYPE consul_federation_state_list summary +consul_federation_state_list{quantile="0.5"} NaN +consul_federation_state_list{quantile="0.9"} NaN +consul_federation_state_list{quantile="0.99"} NaN +consul_federation_state_list_sum 0 +consul_federation_state_list_count 0 +# HELP consul_federation_state_list_mesh_gateways +# TYPE consul_federation_state_list_mesh_gateways summary +consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN +consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN +consul_federation_state_list_mesh_gateways_sum 0 +consul_federation_state_list_mesh_gateways_count 0 +# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM. +# TYPE consul_fsm_acl summary +consul_fsm_acl{quantile="0.5"} NaN +consul_fsm_acl{quantile="0.9"} NaN +consul_fsm_acl{quantile="0.99"} NaN +consul_fsm_acl_sum 0 +consul_fsm_acl_count 0 +# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM. +# TYPE consul_fsm_acl_authmethod summary +consul_fsm_acl_authmethod{quantile="0.5"} NaN +consul_fsm_acl_authmethod{quantile="0.9"} NaN +consul_fsm_acl_authmethod{quantile="0.99"} NaN +consul_fsm_acl_authmethod_sum 0 +consul_fsm_acl_authmethod_count 0 +# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM. +# TYPE consul_fsm_acl_bindingrule summary +consul_fsm_acl_bindingrule{quantile="0.5"} NaN +consul_fsm_acl_bindingrule{quantile="0.9"} NaN +consul_fsm_acl_bindingrule{quantile="0.99"} NaN +consul_fsm_acl_bindingrule_sum 0 +consul_fsm_acl_bindingrule_count 0 +# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM. +# TYPE consul_fsm_acl_policy summary +consul_fsm_acl_policy{quantile="0.5"} NaN +consul_fsm_acl_policy{quantile="0.9"} NaN +consul_fsm_acl_policy{quantile="0.99"} NaN +consul_fsm_acl_policy_sum 0 +consul_fsm_acl_policy_count 0 +# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM. +# TYPE consul_fsm_acl_token summary +consul_fsm_acl_token{quantile="0.5"} NaN +consul_fsm_acl_token{quantile="0.9"} NaN +consul_fsm_acl_token{quantile="0.99"} NaN +consul_fsm_acl_token_sum 0 +consul_fsm_acl_token_count 0 +consul_fsm_acl_token{op="upsert",quantile="0.5"} NaN +consul_fsm_acl_token{op="upsert",quantile="0.9"} NaN +consul_fsm_acl_token{op="upsert",quantile="0.99"} NaN +consul_fsm_acl_token_sum{op="upsert"} 0.18545499444007874 +consul_fsm_acl_token_count{op="upsert"} 1 +# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM. +# TYPE consul_fsm_autopilot summary +consul_fsm_autopilot{quantile="0.5"} NaN +consul_fsm_autopilot{quantile="0.9"} NaN +consul_fsm_autopilot{quantile="0.99"} NaN +consul_fsm_autopilot_sum 37.74536604247987 +consul_fsm_autopilot_count 753 +# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM. +# TYPE consul_fsm_ca summary +consul_fsm_ca{quantile="0.5"} NaN +consul_fsm_ca{quantile="0.9"} NaN +consul_fsm_ca{quantile="0.99"} NaN +consul_fsm_ca_sum 0 +consul_fsm_ca_count 0 +# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate. +# TYPE consul_fsm_ca_leaf summary +consul_fsm_ca_leaf{quantile="0.5"} NaN +consul_fsm_ca_leaf{quantile="0.9"} NaN +consul_fsm_ca_leaf{quantile="0.99"} NaN +consul_fsm_ca_leaf_sum 0 +consul_fsm_ca_leaf_count 0 +# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM. +# TYPE consul_fsm_coordinate_batch_update summary +consul_fsm_coordinate_batch_update{quantile="0.5"} 0.1002039983868599 +consul_fsm_coordinate_batch_update{quantile="0.9"} 0.1002039983868599 +consul_fsm_coordinate_batch_update{quantile="0.99"} 0.1002039983868599 +consul_fsm_coordinate_batch_update_sum 2816.718877375126 +consul_fsm_coordinate_batch_update_count 21979 +# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM. +# TYPE consul_fsm_deregister summary +consul_fsm_deregister{quantile="0.5"} NaN +consul_fsm_deregister{quantile="0.9"} NaN +consul_fsm_deregister{quantile="0.99"} NaN +consul_fsm_deregister_sum 81.9582624938339 +consul_fsm_deregister_count 56 +# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM. +# TYPE consul_fsm_intention summary +consul_fsm_intention{quantile="0.5"} NaN +consul_fsm_intention{quantile="0.9"} NaN +consul_fsm_intention{quantile="0.99"} NaN +consul_fsm_intention_sum 0 +consul_fsm_intention_count 0 +# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM. +# TYPE consul_fsm_kvs summary +consul_fsm_kvs{quantile="0.5"} NaN +consul_fsm_kvs{quantile="0.9"} NaN +consul_fsm_kvs{quantile="0.99"} NaN +consul_fsm_kvs_sum 0 +consul_fsm_kvs_count 0 +# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM. +# TYPE consul_fsm_peering summary +consul_fsm_peering{quantile="0.5"} NaN +consul_fsm_peering{quantile="0.9"} NaN +consul_fsm_peering{quantile="0.99"} NaN +consul_fsm_peering_sum 0 +consul_fsm_peering_count 0 +# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot. +# TYPE consul_fsm_persist summary +consul_fsm_persist{quantile="0.5"} NaN +consul_fsm_persist{quantile="0.9"} NaN +consul_fsm_persist{quantile="0.99"} NaN +consul_fsm_persist_sum 361.0432777404785 +consul_fsm_persist_count 10 +# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM. +# TYPE consul_fsm_prepared_query summary +consul_fsm_prepared_query{quantile="0.5"} NaN +consul_fsm_prepared_query{quantile="0.9"} NaN +consul_fsm_prepared_query{quantile="0.99"} NaN +consul_fsm_prepared_query_sum 0 +consul_fsm_prepared_query_count 0 +# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM. +# TYPE consul_fsm_register summary +consul_fsm_register{quantile="0.5"} 0.15392500162124634 +consul_fsm_register{quantile="0.9"} 0.22902700304985046 +consul_fsm_register{quantile="0.99"} 0.22902700304985046 +consul_fsm_register_sum 17763.026295486838 +consul_fsm_register_count 90283 +# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM. +# TYPE consul_fsm_session summary +consul_fsm_session{quantile="0.5"} NaN +consul_fsm_session{quantile="0.9"} NaN +consul_fsm_session{quantile="0.99"} NaN +consul_fsm_session_sum 0 +consul_fsm_session_count 0 +# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM. +# TYPE consul_fsm_system_metadata summary +consul_fsm_system_metadata{quantile="0.5"} NaN +consul_fsm_system_metadata{quantile="0.9"} NaN +consul_fsm_system_metadata{quantile="0.99"} NaN +consul_fsm_system_metadata_sum 0 +consul_fsm_system_metadata_count 0 +# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM. +# TYPE consul_fsm_tombstone summary +consul_fsm_tombstone{quantile="0.5"} NaN +consul_fsm_tombstone{quantile="0.9"} NaN +consul_fsm_tombstone{quantile="0.99"} NaN +consul_fsm_tombstone_sum 0 +consul_fsm_tombstone_count 0 +# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM. +# TYPE consul_fsm_txn summary +consul_fsm_txn{quantile="0.5"} NaN +consul_fsm_txn{quantile="0.9"} NaN +consul_fsm_txn{quantile="0.99"} NaN +consul_fsm_txn_sum 0 +consul_fsm_txn_count 0 +# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server. +# TYPE consul_grpc_client_connection_count counter +consul_grpc_client_connection_count 0 +# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers. +# TYPE consul_grpc_client_connections gauge +consul_grpc_client_connections 0 +# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server. +# TYPE consul_grpc_client_request_count counter +consul_grpc_client_request_count 0 +consul_grpc_client_request_count{server_type="internal"} 4136 +# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server. +# TYPE consul_grpc_server_connection_count counter +consul_grpc_server_connection_count 0 +consul_grpc_server_connection_count{server_type="external"} 1 +# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server. +# TYPE consul_grpc_server_connections gauge +consul_grpc_server_connections 0 +# HELP consul_grpc_server_connections_connections consul_grpc_server_connections_connections +# TYPE consul_grpc_server_connections_connections gauge +consul_grpc_server_connections_connections{server_type="external"} 5 +# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server. +# TYPE consul_grpc_server_request_count counter +consul_grpc_server_request_count 0 +consul_grpc_server_request_count{server_type="external"} 49 +consul_grpc_server_request_count{server_type="internal"} 4139 +# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server. +# TYPE consul_grpc_server_stream_count counter +consul_grpc_server_stream_count 0 +# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server. +# TYPE consul_grpc_server_streams gauge +consul_grpc_server_streams 0 +# HELP consul_intention_apply +# TYPE consul_intention_apply summary +consul_intention_apply{quantile="0.5"} NaN +consul_intention_apply{quantile="0.9"} NaN +consul_intention_apply{quantile="0.99"} NaN +consul_intention_apply_sum 0 +consul_intention_apply_count 0 +# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store. +# TYPE consul_kvs_apply summary +consul_kvs_apply{quantile="0.5"} NaN +consul_kvs_apply{quantile="0.9"} NaN +consul_kvs_apply{quantile="0.99"} NaN +consul_kvs_apply_sum 18.550758838653564 +consul_kvs_apply_count 2 +# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership. +# TYPE consul_leader_barrier summary +consul_leader_barrier{quantile="0.5"} NaN +consul_leader_barrier{quantile="0.9"} NaN +consul_leader_barrier{quantile="0.99"} NaN +consul_leader_barrier_sum 16746.72570502758 +consul_leader_barrier_count 7530 +# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones. +# TYPE consul_leader_reapTombstones summary +consul_leader_reapTombstones{quantile="0.5"} NaN +consul_leader_reapTombstones{quantile="0.9"} NaN +consul_leader_reapTombstones{quantile="0.99"} NaN +consul_leader_reapTombstones_sum 8.299793243408203 +consul_leader_reapTombstones_count 2 +# HELP consul_leader_reconcile consul_leader_reconcile +# TYPE consul_leader_reconcile summary +consul_leader_reconcile{quantile="0.5"} NaN +consul_leader_reconcile{quantile="0.9"} NaN +consul_leader_reconcile{quantile="0.99"} NaN +consul_leader_reconcile_sum 1640.2054885923862 +consul_leader_reconcile_count 7530 +# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information. +# TYPE consul_leader_reconcileMember summary +consul_leader_reconcileMember{quantile="0.5"} NaN +consul_leader_reconcileMember{quantile="0.9"} NaN +consul_leader_reconcileMember{quantile="0.99"} NaN +consul_leader_reconcileMember_sum 923.1838235380128 +consul_leader_reconcileMember_count 9879 +# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_policies_index gauge +consul_leader_replication_acl_policies_index 0 +# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader +# TYPE consul_leader_replication_acl_policies_status gauge +consul_leader_replication_acl_policies_status 0 +# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_roles_index gauge +consul_leader_replication_acl_roles_index 0 +# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader +# TYPE consul_leader_replication_acl_roles_status gauge +consul_leader_replication_acl_roles_status 0 +# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_acl_tokens_index gauge +consul_leader_replication_acl_tokens_index 0 +# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader +# TYPE consul_leader_replication_acl_tokens_status gauge +consul_leader_replication_acl_tokens_status 0 +# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_config_entries_index gauge +consul_leader_replication_config_entries_index 0 +# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader +# TYPE consul_leader_replication_config_entries_status gauge +consul_leader_replication_config_entries_status 0 +# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_federation_state_index gauge +consul_leader_replication_federation_state_index 0 +# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_federation_state_status gauge +consul_leader_replication_federation_state_status 0 +# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated +# TYPE consul_leader_replication_namespaces_index gauge +consul_leader_replication_namespaces_index 0 +# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader +# TYPE consul_leader_replication_namespaces_status gauge +consul_leader_replication_namespaces_status 0 +# HELP consul_memberlist_gossip consul_memberlist_gossip +# TYPE consul_memberlist_gossip summary +consul_memberlist_gossip{network="wan",quantile="0.5"} 0.013411000370979309 +consul_memberlist_gossip{network="wan",quantile="0.9"} 0.01651100069284439 +consul_memberlist_gossip{network="wan",quantile="0.99"} 0.017091000452637672 +consul_memberlist_gossip_sum{network="wan"} 12186.142546130694 +consul_memberlist_gossip_count{network="wan"} 903629 +consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.5"} 0.01858999952673912 +consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.9"} 0.02322000078856945 +consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.99"} 0.03482099995017052 +consul_memberlist_gossip_sum{network="lan",partition="default",segment=""} 38046.85491481074 +consul_memberlist_gossip_count{network="lan",partition="default",segment=""} 2.259067e+06 +# HELP consul_memberlist_node_instances_instances consul_memberlist_node_instances_instances +# TYPE consul_memberlist_node_instances_instances gauge +consul_memberlist_node_instances_instances{network="lan",node_state="alive",partition="default",segment=""} 2 +consul_memberlist_node_instances_instances{network="lan",node_state="dead",partition="default",segment=""} 0 +consul_memberlist_node_instances_instances{network="lan",node_state="left",partition="default",segment=""} 0 +consul_memberlist_node_instances_instances{network="lan",node_state="suspect",partition="default",segment=""} 0 +# HELP consul_memberlist_probeNode consul_memberlist_probeNode +# TYPE consul_memberlist_probeNode summary +consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.5"} 1.3738830089569092 +consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.9"} 1.4592169523239136 +consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.99"} 1.4592169523239136 +consul_memberlist_probeNode_sum{network="lan",partition="default",segment=""} 44756.27836251259 +consul_memberlist_probeNode_count{network="lan",partition="default",segment=""} 30847 +# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode +# TYPE consul_memberlist_pushPullNode summary +consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.5"} 2.5498108863830566 +consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.9"} 2.5498108863830566 +consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.99"} 2.5498108863830566 +consul_memberlist_pushPullNode_sum{network="lan",partition="default",segment=""} 5021.0542075634 +consul_memberlist_pushPullNode_count{network="lan",partition="default",segment=""} 1773 +# HELP consul_memberlist_queue_broadcasts consul_memberlist_queue_broadcasts +# TYPE consul_memberlist_queue_broadcasts summary +consul_memberlist_queue_broadcasts{network="wan",quantile="0.5"} 0 +consul_memberlist_queue_broadcasts{network="wan",quantile="0.9"} 0 +consul_memberlist_queue_broadcasts{network="wan",quantile="0.99"} 0 +consul_memberlist_queue_broadcasts_sum{network="wan"} 0 +consul_memberlist_queue_broadcasts_count{network="wan"} 15060 +consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.5"} 0 +consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.9"} 0 +consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.99"} 0 +consul_memberlist_queue_broadcasts_sum{network="lan",partition="default",segment=""} 0 +consul_memberlist_queue_broadcasts_count{network="lan",partition="default",segment=""} 15060 +# HELP consul_memberlist_size_local_local consul_memberlist_size_local_local +# TYPE consul_memberlist_size_local_local gauge +consul_memberlist_size_local_local{network="lan",partition="default",segment=""} 2.208582144e+09 +# HELP consul_memberlist_size_remote consul_memberlist_size_remote +# TYPE consul_memberlist_size_remote summary +consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.5"} 717 +consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.9"} 717 +consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.99"} 717 +consul_memberlist_size_remote_sum{network="lan",partition="default",segment=""} 2.538313e+06 +consul_memberlist_size_remote_count{network="lan",partition="default",segment=""} 3549 +# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept +# TYPE consul_memberlist_tcp_accept counter +consul_memberlist_tcp_accept{network="lan",partition="default",segment=""} 1776 +# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect +# TYPE consul_memberlist_tcp_connect counter +consul_memberlist_tcp_connect{network="lan",partition="default",segment=""} 1773 +# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent +# TYPE consul_memberlist_tcp_sent counter +consul_memberlist_tcp_sent{network="lan",partition="default",segment=""} 3.206921e+06 +# HELP consul_memberlist_udp_received consul_memberlist_udp_received +# TYPE consul_memberlist_udp_received counter +consul_memberlist_udp_received{network="lan"} 9.221042e+06 +# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent +# TYPE consul_memberlist_udp_sent counter +consul_memberlist_udp_sent{network="lan",partition="default",segment=""} 9.218109e+06 +# HELP consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_members_clients gauge +consul_members_clients 0 +# HELP consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6. +# TYPE consul_members_servers gauge +consul_members_servers 0 +# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour +# TYPE consul_mesh_active_root_ca_expiry gauge +consul_mesh_active_root_ca_expiry 0 +# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour +# TYPE consul_mesh_active_signing_ca_expiry gauge +consul_mesh_active_signing_ca_expiry 0 +# HELP consul_namespace_read consul_namespace_read +# TYPE consul_namespace_read summary +consul_namespace_read{quantile="0.5"} 0.06529200077056885 +consul_namespace_read{quantile="0.9"} 0.12670400738716125 +consul_namespace_read{quantile="0.99"} 0.12670400738716125 +consul_namespace_read_sum 2885.675253532827 +consul_namespace_read_count 30042 +# HELP consul_partition_list consul_partition_list +# TYPE consul_partition_list summary +consul_partition_list{quantile="0.5"} NaN +consul_partition_list{quantile="0.9"} NaN +consul_partition_list{quantile="0.99"} NaN +consul_partition_list_sum 325.827104203403 +consul_partition_list_count 4138 +# HELP consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peer_name", "peer_id" and, for enterprise, "partition". We emit this metric every 9 seconds +# TYPE consul_peering_exported_services gauge +consul_peering_exported_services 0 +# HELP consul_peering_healthy A gauge that tracks how if a peering is healthy (1) or not (0). The labels are "peer_name", "peer_id" and, for enterprise, "partition". We emit this metric every 9 seconds +# TYPE consul_peering_healthy gauge +consul_peering_healthy 0 +# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update. +# TYPE consul_prepared_query_apply summary +consul_prepared_query_apply{quantile="0.5"} NaN +consul_prepared_query_apply{quantile="0.9"} NaN +consul_prepared_query_apply{quantile="0.99"} NaN +consul_prepared_query_apply_sum 0 +consul_prepared_query_apply_count 0 +# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request. +# TYPE consul_prepared_query_execute summary +consul_prepared_query_execute{quantile="0.5"} NaN +consul_prepared_query_execute{quantile="0.9"} NaN +consul_prepared_query_execute{quantile="0.99"} NaN +consul_prepared_query_execute_sum 0 +consul_prepared_query_execute_count 0 +# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter. +# TYPE consul_prepared_query_execute_remote summary +consul_prepared_query_execute_remote{quantile="0.5"} NaN +consul_prepared_query_execute_remote{quantile="0.9"} NaN +consul_prepared_query_execute_remote{quantile="0.99"} NaN +consul_prepared_query_execute_remote_sum 0 +consul_prepared_query_execute_remote_count 0 +# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request. +# TYPE consul_prepared_query_explain summary +consul_prepared_query_explain{quantile="0.5"} NaN +consul_prepared_query_explain{quantile="0.9"} NaN +consul_prepared_query_explain{quantile="0.99"} NaN +consul_prepared_query_explain_sum 0 +consul_prepared_query_explain_count 0 +# HELP consul_raft_applied_index Represents the raft applied index. +# TYPE consul_raft_applied_index gauge +consul_raft_applied_index 0 +# HELP consul_raft_applied_index_applied_index consul_raft_applied_index_applied_index +# TYPE consul_raft_applied_index_applied_index gauge +consul_raft_applied_index_applied_index 145203 +# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval. +# TYPE consul_raft_apply counter +consul_raft_apply 115252 +# HELP consul_raft_barrier consul_raft_barrier +# TYPE consul_raft_barrier counter +consul_raft_barrier 7530 +# HELP consul_raft_boltdb_freePageBytes_freePageBytes consul_raft_boltdb_freePageBytes_freePageBytes +# TYPE consul_raft_boltdb_freePageBytes_freePageBytes gauge +consul_raft_boltdb_freePageBytes_freePageBytes 1.3307904e+07 +# HELP consul_raft_boltdb_freelistBytes_freelistBytes consul_raft_boltdb_freelistBytes_freelistBytes +# TYPE consul_raft_boltdb_freelistBytes_freelistBytes gauge +consul_raft_boltdb_freelistBytes_freelistBytes 26008 +# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog +# TYPE consul_raft_boltdb_getLog summary +consul_raft_boltdb_getLog{quantile="0.5"} 0.06123099848628044 +consul_raft_boltdb_getLog{quantile="0.9"} 0.06123099848628044 +consul_raft_boltdb_getLog{quantile="0.99"} 0.06123099848628044 +consul_raft_boltdb_getLog_sum 1990.6473612803966 +consul_raft_boltdb_getLog_count 45019 +# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize +# TYPE consul_raft_boltdb_logBatchSize summary +consul_raft_boltdb_logBatchSize{quantile="0.5"} 1109 +consul_raft_boltdb_logBatchSize{quantile="0.9"} 1167 +consul_raft_boltdb_logBatchSize{quantile="0.99"} 1167 +consul_raft_boltdb_logBatchSize_sum 1.05877264e+08 +consul_raft_boltdb_logBatchSize_count 122794 +# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize +# TYPE consul_raft_boltdb_logSize summary +consul_raft_boltdb_logSize{quantile="0.5"} 1109 +consul_raft_boltdb_logSize{quantile="0.9"} 1167 +consul_raft_boltdb_logSize{quantile="0.99"} 1167 +consul_raft_boltdb_logSize_sum 1.05877264e+08 +consul_raft_boltdb_logSize_count 122856 +# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch +# TYPE consul_raft_boltdb_logsPerBatch summary +consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1 +consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1 +consul_raft_boltdb_logsPerBatch_sum 122856 +consul_raft_boltdb_logsPerBatch_count 122794 +# HELP consul_raft_boltdb_numFreePages_numFreePages consul_raft_boltdb_numFreePages_numFreePages +# TYPE consul_raft_boltdb_numFreePages_numFreePages gauge +consul_raft_boltdb_numFreePages_numFreePages 3238 +# HELP consul_raft_boltdb_numPendingPages_numPendingPages consul_raft_boltdb_numPendingPages_numPendingPages +# TYPE consul_raft_boltdb_numPendingPages_numPendingPages gauge +consul_raft_boltdb_numPendingPages_numPendingPages 11 +# HELP consul_raft_boltdb_openReadTxn_openReadTxn consul_raft_boltdb_openReadTxn_openReadTxn +# TYPE consul_raft_boltdb_openReadTxn_openReadTxn gauge +consul_raft_boltdb_openReadTxn_openReadTxn 0 +# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs +# TYPE consul_raft_boltdb_storeLogs summary +consul_raft_boltdb_storeLogs{quantile="0.5"} 1.6733039617538452 +consul_raft_boltdb_storeLogs{quantile="0.9"} 2.21097993850708 +consul_raft_boltdb_storeLogs{quantile="0.99"} 2.21097993850708 +consul_raft_boltdb_storeLogs_sum 278437.40395510197 +consul_raft_boltdb_storeLogs_count 122794 +# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn +# TYPE consul_raft_boltdb_totalReadTxn counter +consul_raft_boltdb_totalReadTxn 100198 +# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount +# TYPE consul_raft_boltdb_txstats_cursorCount counter +consul_raft_boltdb_txstats_cursorCount 568889 +# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount +# TYPE consul_raft_boltdb_txstats_nodeCount counter +consul_raft_boltdb_txstats_nodeCount 537103 +# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref +# TYPE consul_raft_boltdb_txstats_nodeDeref counter +consul_raft_boltdb_txstats_nodeDeref 136 +# HELP consul_raft_boltdb_txstats_pageAlloc_pageAlloc consul_raft_boltdb_txstats_pageAlloc_pageAlloc +# TYPE consul_raft_boltdb_txstats_pageAlloc_pageAlloc gauge +consul_raft_boltdb_txstats_pageAlloc_pageAlloc 5.955145728e+09 +# HELP consul_raft_boltdb_txstats_pageCount_pageCount consul_raft_boltdb_txstats_pageCount_pageCount +# TYPE consul_raft_boltdb_txstats_pageCount_pageCount gauge +consul_raft_boltdb_txstats_pageCount_pageCount 1.453893e+06 +# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance +# TYPE consul_raft_boltdb_txstats_rebalance counter +consul_raft_boltdb_txstats_rebalance 91912 +# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime +# TYPE consul_raft_boltdb_txstats_rebalanceTime summary +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0 +consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0 +consul_raft_boltdb_txstats_rebalanceTime_sum 61.22855579853058 +consul_raft_boltdb_txstats_rebalanceTime_count 90364 +# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill +# TYPE consul_raft_boltdb_txstats_spill counter +consul_raft_boltdb_txstats_spill 545942 +# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime +# TYPE consul_raft_boltdb_txstats_spillTime summary +consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.19511699676513672 +consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.19511699676513672 +consul_raft_boltdb_txstats_spillTime_sum 3640.070483505726 +consul_raft_boltdb_txstats_spillTime_count 90364 +# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split +# TYPE consul_raft_boltdb_txstats_split counter +consul_raft_boltdb_txstats_split 55070 +# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write +# TYPE consul_raft_boltdb_txstats_write counter +consul_raft_boltdb_txstats_write 791562 +# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime +# TYPE consul_raft_boltdb_txstats_writeTime summary +consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 0 +consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 11.23631763458252 +consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 11.23631763458252 +consul_raft_boltdb_txstats_writeTime_sum 254982.9575778246 +consul_raft_boltdb_txstats_writeTime_count 90364 +# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity +# TYPE consul_raft_boltdb_writeCapacity summary +consul_raft_boltdb_writeCapacity{quantile="0.5"} 601.9552612304688 +consul_raft_boltdb_writeCapacity{quantile="0.9"} 635.841064453125 +consul_raft_boltdb_writeCapacity{quantile="0.99"} 635.841064453125 +consul_raft_boltdb_writeCapacity_sum 6.307136215111172e+07 +consul_raft_boltdb_writeCapacity_count 122794 +# HELP consul_raft_commitNumLogs_commitNumLogs consul_raft_commitNumLogs_commitNumLogs +# TYPE consul_raft_commitNumLogs_commitNumLogs gauge +consul_raft_commitNumLogs_commitNumLogs 1 +# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader. +# TYPE consul_raft_commitTime summary +consul_raft_commitTime{quantile="0.5"} 1.7182049751281738 +consul_raft_commitTime{quantile="0.9"} 2.2621920108795166 +consul_raft_commitTime{quantile="0.99"} 2.2621920108795166 +consul_raft_commitTime_sum 284260.4287290573 +consul_raft_commitTime_count 122785 +# HELP consul_raft_fsm_apply consul_raft_fsm_apply +# TYPE consul_raft_fsm_apply summary +consul_raft_fsm_apply{quantile="0.5"} 0.16612499952316284 +consul_raft_fsm_apply{quantile="0.9"} 0.2391670048236847 +consul_raft_fsm_apply{quantile="0.99"} 0.2391670048236847 +consul_raft_fsm_apply_sum 24152.752846952528 +consul_raft_fsm_apply_count 115317 +# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue +# TYPE consul_raft_fsm_enqueue summary +consul_raft_fsm_enqueue{quantile="0.5"} 0.015490000136196613 +consul_raft_fsm_enqueue{quantile="0.9"} 0.04627100005745888 +consul_raft_fsm_enqueue{quantile="0.99"} 0.04627100005745888 +consul_raft_fsm_enqueue_sum 3328.7210418977775 +consul_raft_fsm_enqueue_count 122763 +# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took. +# TYPE consul_raft_fsm_lastRestoreDuration gauge +consul_raft_fsm_lastRestoreDuration 0 +# HELP consul_raft_last_index Represents the raft last index. +# TYPE consul_raft_last_index gauge +consul_raft_last_index 0 +# HELP consul_raft_last_index_last_index consul_raft_last_index_last_index +# TYPE consul_raft_last_index_last_index gauge +consul_raft_last_index_last_index 145203 +# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog +# TYPE consul_raft_leader_dispatchLog summary +consul_raft_leader_dispatchLog{quantile="0.5"} 1.7106239795684814 +consul_raft_leader_dispatchLog{quantile="0.9"} 2.249191999435425 +consul_raft_leader_dispatchLog{quantile="0.99"} 2.249191999435425 +consul_raft_leader_dispatchLog_sum 282281.0580151081 +consul_raft_leader_dispatchLog_count 122780 +# HELP consul_raft_leader_dispatchNumLogs_dispatchNumLogs consul_raft_leader_dispatchNumLogs_dispatchNumLogs +# TYPE consul_raft_leader_dispatchNumLogs_dispatchNumLogs gauge +consul_raft_leader_dispatchNumLogs_dispatchNumLogs 1 +# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. +# TYPE consul_raft_leader_lastContact summary +consul_raft_leader_lastContact{quantile="0.5"} NaN +consul_raft_leader_lastContact{quantile="0.9"} NaN +consul_raft_leader_lastContact{quantile="0.99"} NaN +consul_raft_leader_lastContact_sum 598 +consul_raft_leader_lastContact_count 19 +# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is. +# TYPE consul_raft_leader_oldestLogAge gauge +consul_raft_leader_oldestLogAge 0 +# HELP consul_raft_leader_oldestLogAge_oldestLogAge consul_raft_leader_oldestLogAge_oldestLogAge +# TYPE consul_raft_leader_oldestLogAge_oldestLogAge gauge +consul_raft_leader_oldestLogAge_oldestLogAge 6.8835264e+07 +# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster. +# TYPE consul_raft_rpc_installSnapshot summary +consul_raft_rpc_installSnapshot{quantile="0.5"} NaN +consul_raft_rpc_installSnapshot{quantile="0.9"} NaN +consul_raft_rpc_installSnapshot{quantile="0.99"} NaN +consul_raft_rpc_installSnapshot_sum 473.0382385253906 +consul_raft_rpc_installSnapshot_count 1 +# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk. +# TYPE consul_raft_snapshot_persist summary +consul_raft_snapshot_persist{quantile="0.5"} NaN +consul_raft_snapshot_persist{quantile="0.9"} NaN +consul_raft_snapshot_persist{quantile="0.99"} NaN +consul_raft_snapshot_persist_sum 457.33628499507904 +consul_raft_snapshot_persist_count 10 +# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election. +# TYPE consul_raft_state_candidate counter +consul_raft_state_candidate 1 +# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader. +# TYPE consul_raft_state_leader counter +consul_raft_state_leader 1 +# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation +# TYPE consul_raft_thread_fsm_saturation summary +consul_raft_thread_fsm_saturation{quantile="0.5"} 0 +consul_raft_thread_fsm_saturation{quantile="0.9"} 0 +consul_raft_thread_fsm_saturation{quantile="0.99"} 0 +consul_raft_thread_fsm_saturation_sum 0.7299999818205833 +consul_raft_thread_fsm_saturation_count 44326 +# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation +# TYPE consul_raft_thread_main_saturation summary +consul_raft_thread_main_saturation{quantile="0.5"} 0 +consul_raft_thread_main_saturation{quantile="0.9"} 0 +consul_raft_thread_main_saturation{quantile="0.99"} 0.009999999776482582 +consul_raft_thread_main_saturation_sum 213.059995315969 +consul_raft_thread_main_saturation_count 451221 +# HELP consul_raft_verify_leader consul_raft_verify_leader +# TYPE consul_raft_verify_leader counter +consul_raft_verify_leader 2 +# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection. +# TYPE consul_rpc_accept_conn counter +consul_rpc_accept_conn 39 +# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed. +# TYPE consul_rpc_consistentRead summary +consul_rpc_consistentRead{quantile="0.5"} NaN +consul_rpc_consistentRead{quantile="0.9"} NaN +consul_rpc_consistentRead{quantile="0.99"} NaN +consul_rpc_consistentRead_sum 85.52406929805875 +consul_rpc_consistentRead_count 1600 +# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query. +# TYPE consul_rpc_cross_dc counter +consul_rpc_cross_dc 0 +# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling. +# TYPE consul_rpc_queries_blocking gauge +consul_rpc_queries_blocking 0 +# HELP consul_rpc_queries_blocking_queries_blocking consul_rpc_queries_blocking_queries_blocking +# TYPE consul_rpc_queries_blocking_queries_blocking gauge +consul_rpc_queries_blocking_queries_blocking 20 +# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries. +# TYPE consul_rpc_query counter +consul_rpc_query 261853 +# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection. +# TYPE consul_rpc_raft_handoff counter +consul_rpc_raft_handoff 3 +# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request. +# TYPE consul_rpc_request counter +consul_rpc_request 233395 +# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request. +# TYPE consul_rpc_request_error counter +consul_rpc_request_error 0 +# HELP consul_runtime_alloc_bytes_alloc_bytes consul_runtime_alloc_bytes_alloc_bytes +# TYPE consul_runtime_alloc_bytes_alloc_bytes gauge +consul_runtime_alloc_bytes_alloc_bytes 5.1729856e+07 +# HELP consul_runtime_free_count_free_count consul_runtime_free_count_free_count +# TYPE consul_runtime_free_count_free_count gauge +consul_runtime_free_count_free_count 1.513573888e+09 +# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns +# TYPE consul_runtime_gc_pause_ns summary +consul_runtime_gc_pause_ns{quantile="0.5"} NaN +consul_runtime_gc_pause_ns{quantile="0.9"} NaN +consul_runtime_gc_pause_ns{quantile="0.99"} NaN +consul_runtime_gc_pause_ns_sum 8.32754022e+08 +consul_runtime_gc_pause_ns_count 4172 +# HELP consul_runtime_heap_objects_heap_objects consul_runtime_heap_objects_heap_objects +# TYPE consul_runtime_heap_objects_heap_objects gauge +consul_runtime_heap_objects_heap_objects 309596 +# HELP consul_runtime_malloc_count_malloc_count consul_runtime_malloc_count_malloc_count +# TYPE consul_runtime_malloc_count_malloc_count gauge +consul_runtime_malloc_count_malloc_count 1.51388352e+09 +# HELP consul_runtime_num_goroutines_num_goroutines consul_runtime_num_goroutines_num_goroutines +# TYPE consul_runtime_num_goroutines_num_goroutines gauge +consul_runtime_num_goroutines_num_goroutines 305 +# HELP consul_runtime_sys_bytes_sys_bytes consul_runtime_sys_bytes_sys_bytes +# TYPE consul_runtime_sys_bytes_sys_bytes gauge +consul_runtime_sys_bytes_sys_bytes 1.6015696e+08 +# HELP consul_runtime_total_gc_pause_ns_total_gc_pause_ns consul_runtime_total_gc_pause_ns_total_gc_pause_ns +# TYPE consul_runtime_total_gc_pause_ns_total_gc_pause_ns gauge +consul_runtime_total_gc_pause_ns_total_gc_pause_ns 8.32754048e+08 +# HELP consul_runtime_total_gc_runs_total_gc_runs consul_runtime_total_gc_runs_total_gc_runs +# TYPE consul_runtime_total_gc_runs_total_gc_runs gauge +consul_runtime_total_gc_runs_total_gc_runs 4172 +# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms +# TYPE consul_serf_coordinate_adjustment_ms summary +consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.5"} 0.31390100717544556 +consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.9"} 0.31821900606155396 +consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.99"} 0.31821900606155396 +consul_serf_coordinate_adjustment_ms_sum{network="lan",partition="default",segment=""} 23996.035400994588 +consul_serf_coordinate_adjustment_ms_count{network="lan",partition="default",segment=""} 30847 +# HELP consul_serf_queue_Event consul_serf_queue_Event +# TYPE consul_serf_queue_Event summary +consul_serf_queue_Event{network="wan",quantile="0.5"} 0 +consul_serf_queue_Event{network="wan",quantile="0.9"} 0 +consul_serf_queue_Event{network="wan",quantile="0.99"} 0 +consul_serf_queue_Event_sum{network="wan"} 0 +consul_serf_queue_Event_count{network="wan"} 15060 +consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.5"} 0 +consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.9"} 0 +consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.99"} 0 +consul_serf_queue_Event_sum{network="lan",partition="default",segment=""} 6429 +consul_serf_queue_Event_count{network="lan",partition="default",segment=""} 15060 +# HELP consul_serf_queue_Intent consul_serf_queue_Intent +# TYPE consul_serf_queue_Intent summary +consul_serf_queue_Intent{network="wan",quantile="0.5"} 0 +consul_serf_queue_Intent{network="wan",quantile="0.9"} 0 +consul_serf_queue_Intent{network="wan",quantile="0.99"} 0 +consul_serf_queue_Intent_sum{network="wan"} 0 +consul_serf_queue_Intent_count{network="wan"} 15060 +consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.5"} 0 +consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.9"} 0 +consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.99"} 0 +consul_serf_queue_Intent_sum{network="lan",partition="default",segment=""} 0 +consul_serf_queue_Intent_count{network="lan",partition="default",segment=""} 15060 +# HELP consul_serf_queue_Query consul_serf_queue_Query +# TYPE consul_serf_queue_Query summary +consul_serf_queue_Query{network="wan",quantile="0.5"} 0 +consul_serf_queue_Query{network="wan",quantile="0.9"} 0 +consul_serf_queue_Query{network="wan",quantile="0.99"} 0 +consul_serf_queue_Query_sum{network="wan"} 0 +consul_serf_queue_Query_count{network="wan"} 15060 +consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.5"} 0 +consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.9"} 0 +consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.99"} 0 +consul_serf_queue_Query_sum{network="lan",partition="default",segment=""} 0 +consul_serf_queue_Query_count{network="lan",partition="default",segment=""} 15060 +# HELP consul_server_isLeader Tracks if the server is a leader. +# TYPE consul_server_isLeader gauge +consul_server_isLeader 0 +# HELP consul_server_isLeader_isLeader consul_server_isLeader_isLeader +# TYPE consul_server_isLeader_isLeader gauge +consul_server_isLeader_isLeader 1 +# HELP consul_session_apply Measures the time spent applying a session update. +# TYPE consul_session_apply summary +consul_session_apply{quantile="0.5"} NaN +consul_session_apply{quantile="0.9"} NaN +consul_session_apply{quantile="0.99"} NaN +consul_session_apply_sum 0 +consul_session_apply_count 0 +# HELP consul_session_renew Measures the time spent renewing a session. +# TYPE consul_session_renew summary +consul_session_renew{quantile="0.5"} NaN +consul_session_renew{quantile="0.9"} NaN +consul_session_renew{quantile="0.99"} NaN +consul_session_renew_sum 0 +consul_session_renew_count 0 +# HELP consul_session_ttl_active Tracks the active number of sessions being tracked. +# TYPE consul_session_ttl_active gauge +consul_session_ttl_active 0 +# HELP consul_session_ttl_active_active consul_session_ttl_active_active +# TYPE consul_session_ttl_active_active gauge +consul_session_ttl_active_active 0 +# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session. +# TYPE consul_session_ttl_invalidate summary +consul_session_ttl_invalidate{quantile="0.5"} NaN +consul_session_ttl_invalidate{quantile="0.9"} NaN +consul_session_ttl_invalidate{quantile="0.99"} NaN +consul_session_ttl_invalidate_sum 0 +consul_session_ttl_invalidate_count 0 +# HELP consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_state_config_entries gauge +consul_state_config_entries 0 +# HELP consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4. +# TYPE consul_state_connect_instances gauge +consul_state_connect_instances 0 +# HELP consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3. +# TYPE consul_state_kv_entries gauge +consul_state_kv_entries 0 +# HELP consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_state_nodes gauge +consul_state_nodes 0 +# HELP consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0. +# TYPE consul_state_peerings gauge +consul_state_peerings 0 +# HELP consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_state_service_instances gauge +consul_state_service_instances 0 +# HELP consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0. +# TYPE consul_state_services gauge +consul_state_services 0 +# HELP consul_system_licenseExpiration Represents the number of hours until the current license is going to expire +# TYPE consul_system_licenseExpiration gauge +consul_system_licenseExpiration 0 +# HELP consul_system_licenseExpiration_licenseExpiration consul_system_licenseExpiration_licenseExpiration +# TYPE consul_system_licenseExpiration_licenseExpiration gauge +consul_system_licenseExpiration_licenseExpiration 819.429443359375 +# HELP consul_txn_apply Measures the time spent applying a transaction operation. +# TYPE consul_txn_apply summary +consul_txn_apply{quantile="0.5"} NaN +consul_txn_apply{quantile="0.9"} NaN +consul_txn_apply{quantile="0.99"} NaN +consul_txn_apply_sum 0 +consul_txn_apply_count 0 +# HELP consul_txn_read Measures the time spent returning a read transaction. +# TYPE consul_txn_read summary +consul_txn_read{quantile="0.5"} NaN +consul_txn_read{quantile="0.9"} NaN +consul_txn_read{quantile="0.99"} NaN +consul_txn_read_sum 0 +consul_txn_read_count 0 +# HELP consul_version Represents the Consul version. +# TYPE consul_version gauge +consul_version 0 +# HELP consul_xds_server_idealStreamsMax The maximum number of xDS streams per server, chosen to achieve a roughly even spread of load across servers. +# TYPE consul_xds_server_idealStreamsMax gauge +consul_xds_server_idealStreamsMax 0 +# HELP consul_xds_server_streamDrained Counts the number of xDS streams that are drained when rebalancing the load between servers. +# TYPE consul_xds_server_streamDrained counter +consul_xds_server_streamDrained 0 +# HELP consul_xds_server_streamStart Measures the time in milliseconds after an xDS stream is opened until xDS resources are first generated for the stream. +# TYPE consul_xds_server_streamStart summary +consul_xds_server_streamStart{quantile="0.5"} NaN +consul_xds_server_streamStart{quantile="0.9"} NaN +consul_xds_server_streamStart{quantile="0.99"} NaN +consul_xds_server_streamStart_sum 3501.488723754883 +consul_xds_server_streamStart_count 11 +# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version. +# TYPE consul_xds_server_streams gauge +consul_xds_server_streams 0 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 3.7191e-05 +go_gc_duration_seconds{quantile="0.25"} 6.1463e-05 +go_gc_duration_seconds{quantile="0.5"} 7.7062e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000115923 +go_gc_duration_seconds{quantile="1"} 0.001147196 +go_gc_duration_seconds_sum 0.832754027 +go_gc_duration_seconds_count 4172 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 313 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.19.4"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 5.195244e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 1.0251245704e+11 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 4.77878e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 1.51357406e+09 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 2.663750489550345e-05 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 1.5347888e+07 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 5.195244e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 7.4121216e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 6.1472768e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 311688 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 5.914624e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 1.35593984e+08 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.6741251000160766e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 1.513885748e+09 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 2400 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 15600 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 712656 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 943776 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 7.2274088e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 658892 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 2.818048e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 2.818048e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 1.60156968e+08 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 10 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 4001.82 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 65536 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 45 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.30408448e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.67367331028e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.046990848e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes -1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json new file mode 100644 index 00000000000000..8a11b7d0e82df9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json @@ -0,0 +1,71 @@ +{ + "Config": { + "Datacenter": "consul-sandbox-cluster-0159c9d3", + "PrimaryDatacenter": "consul-sandbox-cluster-0159c9d3", + "NodeName": "ip-172-25-37-57", + "NodeID": "b1906d81-c585-7c2c-1236-a5404b7fa7ca", + "Revision": "55a184d3", + "Server": true, + "Version": "1.14.3+ent", + "BuildDate": "2022-12-13T17:12:10Z" + }, + "DebugConfig": { + "Cloud": { + "AuthURL": "", + "ClientID": "492e9e67-6386-4727-964f-8a41305f30a5", + "ClientSecret": "hidden", + "Hostname": "", + "ResourceID": "organization/1/project/2/hashicorp.consul.cluster/3", + "ScadaAddress": "" + }, + "Telemetry": { + "AllowedPrefixes": [], + "BlockedPrefixes": [ + "consul.rpc.server.call" + ], + "CirconusAPIApp": "", + "CirconusAPIToken": "hidden", + "CirconusAPIURL": "", + "CirconusBrokerID": "", + "CirconusBrokerSelectTag": "", + "CirconusCheckDisplayName": "", + "CirconusCheckForceMetricActivation": "", + "CirconusCheckID": "", + "CirconusCheckInstanceID": "", + "CirconusCheckSearchTag": "", + "CirconusCheckTags": "", + "CirconusSubmissionInterval": "", + "CirconusSubmissionURL": "", + "Disable": false, + "DisableHostname": false, + "DogstatsdAddr": "127.0.0.1:8125", + "DogstatsdTags": [], + "FilterDefault": true, + "MetricsPrefix": "consul", + "PrometheusOpts": { + "CounterDefinitions": [], + "Expiration": "5m0s", + "GaugeDefinitions": [], + "Name": "consul", + "Registerer": null, + "SummaryDefinitions": [] + }, + "RetryFailedConfiguration": true, + "StatsdAddr": "", + "StatsiteAddr": "" + } + }, + "Stats": { + "license": { + "customer": "a1c27ed4-43a4-4192-9f39-14e1166d2d2e", + "expiration_time": "2023-02-22 14:11:12.877172615 +0000 UTC", + "features": "Automated Backups, Automated Upgrades, Namespaces, SSO, Audit Logging, Admin Partitions", + "id": "492e9e67-6386-4727-964f-8a41305f30a5", + "install_id": "*", + "issue_time": "2023-01-15 14:11:12.877172615 +0000 UTC", + "modules": "Governance and Policy", + "product": "consul", + "start_time": "2023-01-15 14:11:12.877172615 +0000 UTC" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json new file mode 100644 index 00000000000000..bfe44c7fc38e2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json @@ -0,0 +1,42 @@ +[ + { + "Node": "ip-10-50-133-93", + "Segment": "", + "Partition": "default", + "Coord": { + "Vec": [ + -0.0005406415790908119, + -0.005125240204547753, + -0.0010556502711423538, + -0.00223296135134459, + 0.002051567080576126, + -0.004494795954099239, + -0.0010621855776488467, + 0.0013985871196457514 + ], + "Error": 0.056466891936309965, + "Adjustment": -0.0004925342111843478, + "Height": 0.00043853135504766936 + } + }, + { + "Node": "ip-172-25-37-57", + "Segment": "", + "Partition": "default", + "Coord": { + "Vec": [ + -0.00041456488713690183, + -0.0039300429073992685, + -0.0008094743964577936, + -0.001712238560569221, + 0.0015731451331568297, + -0.00344661716784539, + -0.0008144857045591224, + 0.0010724389795601075 + ], + "Error": 0.0223287150164881, + "Adjustment": -0.0004893904130922427, + "Height": 5.5788597108650077e-05 + } + } +] diff --git a/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json new file mode 100644 index 00000000000000..0daa492c06172c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json @@ -0,0 +1,68 @@ +{ + "chk1": { + "Node": "mysql1", + "CheckID": "chk1", + "Name": "ssh", + "Status": "passing", + "Notes": "", + "Output": "TCP connect 127.0.0.1:22: Success", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "chk2": { + "Node": "mysql1", + "CheckID": "chk2", + "Name": "telnet", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:23: connect: connection refused", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "chk3": { + "Node": "mysql1", + "CheckID": "chk3", + "Name": "telnet", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:23: connect: connection refused", + "ServiceID": "", + "ServiceName": "", + "ServiceTags": [ + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + }, + "mysql": { + "Node": "mysql1", + "CheckID": "mysql", + "Name": "MYSQL TCP on port 3336", + "Status": "critical", + "Notes": "", + "Output": "dial tcp 127.0.0.1:3336: connect: connection refused", + "ServiceID": "mysql0", + "ServiceName": "mysql", + "ServiceTags": [ + "primary", + "secondary" + ], + "Definition": { + }, + "CreateIndex": 0, + "ModifyIndex": 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/README.md b/src/go/collectors/go.d.plugin/modules/coredns/README.md new file mode 120000 index 00000000000000..fcd7e55447531c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/README.md @@ -0,0 +1 @@ +integrations/coredns.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/coredns/charts.go b/src/go/collectors/go.d.plugin/modules/coredns/charts.go new file mode 100644 index 00000000000000..f4d4639f1d79b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/charts.go @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Chart is an alias for module.Chart + Chart = module.Chart + // Dims is an alias for module.Dims + Dims = module.Dims + // Dim is an alias for module.Dim + Dim = module.Dim +) + +var summaryCharts = Charts{ + { + ID: "dns_request_count_total", + Title: "Number Of DNS Requests", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_request_count_total", + Dims: Dims{ + {ID: "request_total", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "dns_responses_count_total", + Title: "Number Of DNS Responses", + Units: "responses/s", + Fam: "summary", + Ctx: "coredns.dns_responses_count_total", + Dims: Dims{ + {ID: "response_total", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "dns_request_count_total_per_status", + Title: "Number Of Processed And Dropped DNS Requests", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_request_count_total_per_status", + Type: module.Stacked, + Dims: Dims{ + {ID: "request_per_status_processed", Name: "processed", Algo: module.Incremental}, + {ID: "request_per_status_dropped", Name: "dropped", Algo: module.Incremental}, + }, + }, + { + ID: "dns_no_matching_zone_dropped_total", + Title: "Number Of Dropped DNS Requests Because Of No Matching Zone", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_no_matching_zone_dropped_total", + Dims: Dims{ + {ID: "no_matching_zone_dropped_total", Name: "dropped", Algo: module.Incremental}, + }, + }, + { + ID: "dns_panic_count_total", + Title: "Number Of Panics", + Units: "panics/s", + Fam: "summary", + Ctx: "coredns.dns_panic_count_total", + Dims: Dims{ + {ID: "panic_total", Name: "panics", Algo: module.Incremental}, + }, + }, + { + ID: "dns_requests_count_total_per_proto", + Title: "Number Of DNS Requests Per Transport Protocol", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_requests_count_total_per_proto", + Type: module.Stacked, + Dims: Dims{ + {ID: "request_per_proto_udp", Name: "udp", Algo: module.Incremental}, + {ID: "request_per_proto_tcp", Name: "tcp", Algo: module.Incremental}, + }, + }, + { + ID: "dns_requests_count_total_per_ip_family", + Title: "Number Of DNS Requests Per IP Family", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_requests_count_total_per_ip_family", + Type: module.Stacked, + Dims: Dims{ + {ID: "request_per_ip_family_v4", Name: "v4", Algo: module.Incremental}, + {ID: "request_per_ip_family_v6", Name: "v6", Algo: module.Incremental}, + }, + }, + //{ + // ID: "dns_requests_duration_seconds", + // Title: "Number Of DNS Requests Per Bucket", + // Units: "requests/s", + // Fam: "summary", + // Ctx: "coredns.dns_requests_duration_seconds", + // Type: module.Stacked, + // Dims: Dims{ + // {ID: "request_duration_seconds_bucket_0.00025", Name: "0.00025s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.0005", Name: "0.0005s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.001", Name: "0.001s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.002", Name: "0.002s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.004", Name: "0.004s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.008", Name: "0.008s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.016", Name: "0.016s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.032", Name: "0.032s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.064", Name: "0.064s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.128", Name: "0.128s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.256", Name: "0.256s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_0.512", Name: "0.512s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_1.024", Name: "1.024s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_2.048", Name: "2.048s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_4.096", Name: "4.096s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_8.192", Name: "8.192s", Algo: module.Incremental}, + // {ID: "request_duration_seconds_bucket_+Inf", Name: "+Inf", Algo: module.Incremental}, + // }, + //}, + { + ID: "dns_requests_count_total_per_type", + Title: "Number Of DNS Requests Per Type", + Units: "requests/s", + Fam: "summary", + Ctx: "coredns.dns_requests_count_total_per_per_type", + Type: module.Stacked, + Dims: Dims{ + {ID: "request_per_type_A", Name: "A", Algo: module.Incremental}, + {ID: "request_per_type_AAAA", Name: "AAAA", Algo: module.Incremental}, + {ID: "request_per_type_MX", Name: "MX", Algo: module.Incremental}, + {ID: "request_per_type_SOA", Name: "SOA", Algo: module.Incremental}, + {ID: "request_per_type_CNAME", Name: "CNAME", Algo: module.Incremental}, + {ID: "request_per_type_PTR", Name: "PTR", Algo: module.Incremental}, + {ID: "request_per_type_TXT", Name: "TXT", Algo: module.Incremental}, + {ID: "request_per_type_NS", Name: "NS", Algo: module.Incremental}, + {ID: "request_per_type_DS", Name: "DS", Algo: module.Incremental}, + {ID: "request_per_type_DNSKEY", Name: "DNSKEY", Algo: module.Incremental}, + {ID: "request_per_type_RRSIG", Name: "RRSIG", Algo: module.Incremental}, + {ID: "request_per_type_NSEC", Name: "NSEC", Algo: module.Incremental}, + {ID: "request_per_type_NSEC3", Name: "NSEC3", Algo: module.Incremental}, + {ID: "request_per_type_IXFR", Name: "IXFR", Algo: module.Incremental}, + {ID: "request_per_type_ANY", Name: "ANY", Algo: module.Incremental}, + {ID: "request_per_type_other", Name: "other", Algo: module.Incremental}, + }, + }, + { + ID: "dns_responses_count_total_per_rcode", + Title: "Number Of DNS Responses Per Rcode", + Units: "responses/s", + Fam: "summary", + Ctx: "coredns.dns_responses_count_total_per_rcode", + Type: module.Stacked, + Dims: Dims{ + {ID: "response_per_rcode_NOERROR", Name: "NOERROR", Algo: module.Incremental}, + {ID: "response_per_rcode_FORMERR", Name: "FORMERR", Algo: module.Incremental}, + {ID: "response_per_rcode_SERVFAIL", Name: "SERVFAIL", Algo: module.Incremental}, + {ID: "response_per_rcode_NXDOMAIN", Name: "NXDOMAIN", Algo: module.Incremental}, + {ID: "response_per_rcode_NOTIMP", Name: "NOTIMP", Algo: module.Incremental}, + {ID: "response_per_rcode_REFUSED", Name: "REFUSED", Algo: module.Incremental}, + {ID: "response_per_rcode_YXDOMAIN", Name: "YXDOMAIN", Algo: module.Incremental}, + {ID: "response_per_rcode_YXRRSET", Name: "YXRRSET", Algo: module.Incremental}, + {ID: "response_per_rcode_NXRRSET", Name: "NXRRSET", Algo: module.Incremental}, + {ID: "response_per_rcode_NOTAUTH", Name: "NOTAUTH", Algo: module.Incremental}, + {ID: "response_per_rcode_NOTZONE", Name: "NOTZONE", Algo: module.Incremental}, + {ID: "response_per_rcode_BADSIG", Name: "BADSIG", Algo: module.Incremental}, + {ID: "response_per_rcode_BADKEY", Name: "BADKEY", Algo: module.Incremental}, + {ID: "response_per_rcode_BADTIME", Name: "BADTIME", Algo: module.Incremental}, + {ID: "response_per_rcode_BADMODE", Name: "BADMODE", Algo: module.Incremental}, + {ID: "response_per_rcode_BADNAME", Name: "BADNAME", Algo: module.Incremental}, + {ID: "response_per_rcode_BADALG", Name: "BADALG", Algo: module.Incremental}, + {ID: "response_per_rcode_BADTRUNC", Name: "BADTRUNC", Algo: module.Incremental}, + {ID: "response_per_rcode_BADCOOKIE", Name: "BADCOOKIE", Algo: module.Incremental}, + {ID: "response_per_rcode_other", Name: "other", Algo: module.Incremental}, + }, + }, +} + +var serverCharts = Charts{ + { + ID: "per_%s_%s_dns_request_count_total", + Title: "Number Of DNS Requests, %s %s", + Units: "requests/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_request_count_total", + Dims: Dims{ + {ID: "%s_request_total", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "per_%s_%s_dns_responses_count_total", + Title: "Number Of DNS Responses, %s %s", + Units: "responses/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_responses_count_total", + Dims: Dims{ + {ID: "%s_response_total", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "per_%s_%s_dns_request_count_total_per_status", + Title: "Number Of Processed And Dropped DNS Requests, %s %s", + Units: "requests/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_request_count_total_per_status", + Type: module.Stacked, + Dims: Dims{ + {ID: "%s_request_per_status_processed", Name: "processed", Algo: module.Incremental}, + {ID: "%s_request_per_status_dropped", Name: "dropped", Algo: module.Incremental}, + }, + }, + { + ID: "per_%s_%s_dns_requests_count_total_per_proto", + Title: "Number Of DNS Requests Per Transport Protocol, %s %s", + Units: "requests/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_requests_count_total_per_proto", + Type: module.Stacked, + Dims: Dims{ + {ID: "%s_request_per_proto_udp", Name: "udp", Algo: module.Incremental}, + {ID: "%s_request_per_proto_tcp", Name: "tcp", Algo: module.Incremental}, + }, + }, + { + ID: "per_%s_%s_dns_requests_count_total_per_ip_family", + Title: "Number Of DNS Requests Per IP Family, %s %s", + Units: "requests/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_requests_count_total_per_ip_family", + Type: module.Stacked, + Dims: Dims{ + {ID: "%s_request_per_ip_family_v4", Name: "v4", Algo: module.Incremental}, + {ID: "%s_request_per_ip_family_v6", Name: "v6", Algo: module.Incremental}, + }, + }, + //{ + // ID: "per_%s_%s_dns_requests_duration_seconds", + // Title: "Number Of DNS Requests Per Bucket, %s %s", + // Units: "requests/s", + // Fam: "%s %s", + // Ctx: "coredns.server_dns_requests_duration_seconds", + // Type: module.Stacked, + // Dims: Dims{ + // {ID: "%s_request_duration_seconds_bucket_0.00025", Name: "0.00025s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.0005", Name: "0.0005s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.001", Name: "0.001s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.002", Name: "0.002s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.004", Name: "0.004s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.008", Name: "0.008s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.016", Name: "0.016s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.032", Name: "0.032s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.064", Name: "0.064s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.128", Name: "0.128s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.256", Name: "0.256s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_0.512", Name: "0.512s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_1.024", Name: "1.024s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_2.048", Name: "2.048s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_4.096", Name: "4.096s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_8.192", Name: "8.192s", Algo: module.Incremental}, + // {ID: "%s_request_duration_seconds_bucket_+Inf", Name: "+Inf", Algo: module.Incremental}, + // }, + //}, + { + ID: "per_%s_%s_dns_requests_count_total_per_type", + Title: "Number Of DNS Requests Per Type, %s %s", + Units: "requests/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_requests_count_total_per_per_type", + Type: module.Stacked, + Dims: Dims{ + {ID: "%s_request_per_type_A", Name: "A", Algo: module.Incremental}, + {ID: "%s_request_per_type_AAAA", Name: "AAAA", Algo: module.Incremental}, + {ID: "%s_request_per_type_MX", Name: "MX", Algo: module.Incremental}, + {ID: "%s_request_per_type_SOA", Name: "SOA", Algo: module.Incremental}, + {ID: "%s_request_per_type_CNAME", Name: "CNAME", Algo: module.Incremental}, + {ID: "%s_request_per_type_PTR", Name: "PTR", Algo: module.Incremental}, + {ID: "%s_request_per_type_TXT", Name: "TXT", Algo: module.Incremental}, + {ID: "%s_request_per_type_NS", Name: "NS", Algo: module.Incremental}, + {ID: "%s_request_per_type_DS", Name: "DS", Algo: module.Incremental}, + {ID: "%s_request_per_type_DNSKEY", Name: "DNSKEY", Algo: module.Incremental}, + {ID: "%s_request_per_type_RRSIG", Name: "RRSIG", Algo: module.Incremental}, + {ID: "%s_request_per_type_NSEC", Name: "NSEC", Algo: module.Incremental}, + {ID: "%s_request_per_type_NSEC3", Name: "NSEC3", Algo: module.Incremental}, + {ID: "%s_request_per_type_IXFR", Name: "IXFR", Algo: module.Incremental}, + {ID: "%s_request_per_type_ANY", Name: "ANY", Algo: module.Incremental}, + {ID: "%s_request_per_type_other", Name: "other", Algo: module.Incremental}, + }, + }, + { + ID: "per_%s_%s_dns_responses_count_total_per_rcode", + Title: "Number Of DNS Responses Per Rcode, %s %s", + Units: "responses/s", + Fam: "%s %s", + Ctx: "coredns.server_dns_responses_count_total_per_rcode", + Type: module.Stacked, + Dims: Dims{ + {ID: "%s_response_per_rcode_NOERROR", Name: "NOERROR", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_FORMERR", Name: "FORMERR", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_SERVFAIL", Name: "SERVFAIL", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_NXDOMAIN", Name: "NXDOMAIN", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_NOTIMP", Name: "NOTIMP", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_REFUSED", Name: "REFUSED", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_YXDOMAIN", Name: "YXDOMAIN", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_YXRRSET", Name: "YXRRSET", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_NXRRSET", Name: "NXRRSET", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_NOTAUTH", Name: "NOTAUTH", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_NOTZONE", Name: "NOTZONE", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADSIG", Name: "BADSIG", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADKEY", Name: "BADKEY", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADTIME", Name: "BADTIME", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADMODE", Name: "BADMODE", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADNAME", Name: "BADNAME", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADALG", Name: "BADALG", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADTRUNC", Name: "BADTRUNC", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_BADCOOKIE", Name: "BADCOOKIE", Algo: module.Incremental}, + {ID: "%s_response_per_rcode_other", Name: "other", Algo: module.Incremental}, + }, + }, +} + +var zoneCharts = func() Charts { + c := serverCharts.Copy() + _ = c.Remove("per_%s_%s_dns_request_count_total_per_status") + return *c +}() diff --git a/src/go/collectors/go.d.plugin/modules/coredns/collect.go b/src/go/collectors/go.d.plugin/modules/coredns/collect.go new file mode 100644 index 00000000000000..18d7226bb7c984 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/collect.go @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + "errors" + "fmt" + "strings" + + "github.com/blang/semver/v4" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +const ( + metricPanicCountTotal169orOlder = "coredns_panic_count_total" + metricRequestCountTotal169orOlder = "coredns_dns_request_count_total" + metricRequestTypeCountTotal169orOlder = "coredns_dns_request_type_count_total" + metricResponseRcodeCountTotal169orOlder = "coredns_dns_response_rcode_count_total" + + metricPanicCountTotal170orNewer = "coredns_panics_total" + metricRequestCountTotal170orNewer = "coredns_dns_requests_total" + metricRequestTypeCountTotal170orNewer = "coredns_dns_requests_total" + metricResponseRcodeCountTotal170orNewer = "coredns_dns_responses_total" +) + +var ( + empty = "" + dropped = "dropped" + emptyServerReplaceName = "empty" + rootZoneReplaceName = "root" + version169 = semver.MustParse("1.6.9") +) + +type requestMetricsNames struct { + panicCountTotal string + // true for all metrics below: + // - if none of server block matches 'server' tag is "", empty server has only one zone - dropped. + // example: + // coredns_dns_requests_total{family="1",proto="udp",server="",zone="dropped"} 1 for + // - dropped requests are added to both dropped and corresponding zone + // example: + // coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",zone="dropped"} 2 + // coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",zone="ya.ru."} 2 + requestCountTotal string + requestTypeCountTotal string + responseRcodeCountTotal string +} + +func (cd *CoreDNS) collect() (map[string]int64, error) { + raw, err := cd.prom.ScrapeSeries() + + if err != nil { + return nil, err + } + + mx := newMetrics() + + // some metric names are different depending on the version + // update them once + if !cd.skipVersionCheck { + cd.updateVersionDependentMetrics(raw) + cd.skipVersionCheck = true + } + + //we can only get these metrics if we know the server version + if cd.version == nil { + return nil, errors.New("unable to determine server version") + } + + cd.collectPanic(mx, raw) + cd.collectSummaryRequests(mx, raw) + cd.collectSummaryRequestsPerType(mx, raw) + cd.collectSummaryResponsesPerRcode(mx, raw) + + if cd.perServerMatcher != nil { + cd.collectPerServerRequests(mx, raw) + //cd.collectPerServerRequestsDuration(mx, raw) + cd.collectPerServerRequestPerType(mx, raw) + cd.collectPerServerResponsePerRcode(mx, raw) + } + + if cd.perZoneMatcher != nil { + cd.collectPerZoneRequests(mx, raw) + //cd.collectPerZoneRequestsDuration(mx, raw) + cd.collectPerZoneRequestsPerType(mx, raw) + cd.collectPerZoneResponsesPerRcode(mx, raw) + } + + return stm.ToMap(mx), nil +} + +func (cd *CoreDNS) updateVersionDependentMetrics(raw prometheus.Series) { + version := cd.parseVersion(raw) + if version == nil { + return + } + cd.version = version + if cd.version.LTE(version169) { + cd.metricNames.panicCountTotal = metricPanicCountTotal169orOlder + cd.metricNames.requestCountTotal = metricRequestCountTotal169orOlder + cd.metricNames.requestTypeCountTotal = metricRequestTypeCountTotal169orOlder + cd.metricNames.responseRcodeCountTotal = metricResponseRcodeCountTotal169orOlder + } else { + cd.metricNames.panicCountTotal = metricPanicCountTotal170orNewer + cd.metricNames.requestCountTotal = metricRequestCountTotal170orNewer + cd.metricNames.requestTypeCountTotal = metricRequestTypeCountTotal170orNewer + cd.metricNames.responseRcodeCountTotal = metricResponseRcodeCountTotal170orNewer + } +} + +func (cd *CoreDNS) parseVersion(raw prometheus.Series) *semver.Version { + var versionStr string + for _, metric := range raw.FindByName("coredns_build_info") { + versionStr = metric.Labels.Get("version") + } + if versionStr == "" { + cd.Error("cannot find version string in metrics") + return nil + } + + version, err := semver.Make(versionStr) + if err != nil { + cd.Errorf("failed to find server version: %v", err) + return nil + } + return &version +} + +func (cd *CoreDNS) collectPanic(mx *metrics, raw prometheus.Series) { + mx.Panic.Set(raw.FindByName(cd.metricNames.panicCountTotal).Max()) +} + +func (cd *CoreDNS) collectSummaryRequests(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) { + var ( + family = metric.Labels.Get("family") + proto = metric.Labels.Get("proto") + server = metric.Labels.Get("server") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if family == empty || proto == empty || zone == empty { + continue + } + + if server == empty { + mx.NoZoneDropped.Add(value) + } + + setRequestPerStatus(&mx.Summary.Request, value, server, zone) + + if zone == dropped && server != empty { + continue + } + + mx.Summary.Request.Total.Add(value) + setRequestPerIPFamily(&mx.Summary.Request, value, family) + setRequestPerProto(&mx.Summary.Request, value, proto) + } +} + +//func (cd *CoreDNS) collectSummaryRequestsDuration(mx *metrics, raw prometheus.Series) { +// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) { +// var ( +// server = metric.Labels.Get("server") +// zone = metric.Labels.Get("zone") +// le = metric.Labels.Get("le") +// value = metric.Value +// ) +// +// if zone == empty || zone == dropped && server != empty || le == empty { +// continue +// } +// +// setRequestDuration(&mx.Summary.Request, value, le) +// } +// processRequestDuration(&mx.Summary.Request) +//} + +func (cd *CoreDNS) collectSummaryRequestsPerType(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) { + var ( + server = metric.Labels.Get("server") + typ = metric.Labels.Get("type") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if typ == empty || zone == empty || zone == dropped && server != empty { + continue + } + + setRequestPerType(&mx.Summary.Request, value, typ) + } +} + +func (cd *CoreDNS) collectSummaryResponsesPerRcode(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) { + var ( + rcode = metric.Labels.Get("rcode") + server = metric.Labels.Get("server") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if rcode == empty || zone == empty || zone == dropped && server != empty { + continue + } + + setResponsePerRcode(&mx.Summary.Response, value, rcode) + } +} + +// Per Server + +func (cd *CoreDNS) collectPerServerRequests(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) { + var ( + family = metric.Labels.Get("family") + proto = metric.Labels.Get("proto") + server = metric.Labels.Get("server") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if family == empty || proto == empty || zone == empty { + continue + } + + if !cd.perServerMatcher.MatchString(server) { + continue + } + + if server == empty { + server = emptyServerReplaceName + } + + if !cd.collectedServers[server] { + cd.addNewServerCharts(server) + cd.collectedServers[server] = true + } + + if _, ok := mx.PerServer[server]; !ok { + mx.PerServer[server] = &requestResponse{} + } + + srv := mx.PerServer[server] + + setRequestPerStatus(&srv.Request, value, server, zone) + + if zone == dropped && server != emptyServerReplaceName { + continue + } + + srv.Request.Total.Add(value) + setRequestPerIPFamily(&srv.Request, value, family) + setRequestPerProto(&srv.Request, value, proto) + } +} + +//func (cd *CoreDNS) collectPerServerRequestsDuration(mx *metrics, raw prometheus.Series) { +// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) { +// var ( +// server = metric.Labels.Get("server") +// zone = metric.Labels.Get("zone") +// le = metric.Labels.Get("le") +// value = metric.Value +// ) +// +// if zone == empty || zone == dropped && server != empty || le == empty { +// continue +// } +// +// if !cd.perServerMatcher.MatchString(server) { +// continue +// } +// +// if server == empty { +// server = emptyServerReplaceName +// } +// +// if !cd.collectedServers[server] { +// cd.addNewServerCharts(server) +// cd.collectedServers[server] = true +// } +// +// if _, ok := mx.PerServer[server]; !ok { +// mx.PerServer[server] = &requestResponse{} +// } +// +// setRequestDuration(&mx.PerServer[server].Request, value, le) +// } +// for _, s := range mx.PerServer { +// processRequestDuration(&s.Request) +// } +//} + +func (cd *CoreDNS) collectPerServerRequestPerType(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) { + var ( + server = metric.Labels.Get("server") + typ = metric.Labels.Get("type") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if typ == empty || zone == empty || zone == dropped && server != empty { + continue + } + + if !cd.perServerMatcher.MatchString(server) { + continue + } + + if server == empty { + server = emptyServerReplaceName + } + + if !cd.collectedServers[server] { + cd.addNewServerCharts(server) + cd.collectedServers[server] = true + } + + if _, ok := mx.PerServer[server]; !ok { + mx.PerServer[server] = &requestResponse{} + } + + setRequestPerType(&mx.PerServer[server].Request, value, typ) + } +} + +func (cd *CoreDNS) collectPerServerResponsePerRcode(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) { + var ( + rcode = metric.Labels.Get("rcode") + server = metric.Labels.Get("server") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if rcode == empty || zone == empty || zone == dropped && server != empty { + continue + } + + if !cd.perServerMatcher.MatchString(server) { + continue + } + + if server == empty { + server = emptyServerReplaceName + } + + if !cd.collectedServers[server] { + cd.addNewServerCharts(server) + cd.collectedServers[server] = true + } + + if _, ok := mx.PerServer[server]; !ok { + mx.PerServer[server] = &requestResponse{} + } + + setResponsePerRcode(&mx.PerServer[server].Response, value, rcode) + } +} + +// Per Zone + +func (cd *CoreDNS) collectPerZoneRequests(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) { + var ( + family = metric.Labels.Get("family") + proto = metric.Labels.Get("proto") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if family == empty || proto == empty || zone == empty { + continue + } + + if !cd.perZoneMatcher.MatchString(zone) { + continue + } + + if zone == "." { + zone = rootZoneReplaceName + } + + if !cd.collectedZones[zone] { + cd.addNewZoneCharts(zone) + cd.collectedZones[zone] = true + } + + if _, ok := mx.PerZone[zone]; !ok { + mx.PerZone[zone] = &requestResponse{} + } + + zoneMX := mx.PerZone[zone] + zoneMX.Request.Total.Add(value) + setRequestPerIPFamily(&zoneMX.Request, value, family) + setRequestPerProto(&zoneMX.Request, value, proto) + } +} + +//func (cd *CoreDNS) collectPerZoneRequestsDuration(mx *metrics, raw prometheus.Series) { +// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) { +// var ( +// zone = metric.Labels.Get("zone") +// le = metric.Labels.Get("le") +// value = metric.Value +// ) +// +// if zone == empty || le == empty { +// continue +// } +// +// if !cd.perZoneMatcher.MatchString(zone) { +// continue +// } +// +// if zone == "." { +// zone = rootZoneReplaceName +// } +// +// if !cd.collectedZones[zone] { +// cd.addNewZoneCharts(zone) +// cd.collectedZones[zone] = true +// } +// +// if _, ok := mx.PerZone[zone]; !ok { +// mx.PerZone[zone] = &requestResponse{} +// } +// +// setRequestDuration(&mx.PerZone[zone].Request, value, le) +// } +// for _, s := range mx.PerZone { +// processRequestDuration(&s.Request) +// } +//} + +func (cd *CoreDNS) collectPerZoneRequestsPerType(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) { + var ( + typ = metric.Labels.Get("type") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if typ == empty || zone == empty { + continue + } + + if !cd.perZoneMatcher.MatchString(zone) { + continue + } + + if zone == "." { + zone = rootZoneReplaceName + } + + if !cd.collectedZones[zone] { + cd.addNewZoneCharts(zone) + cd.collectedZones[zone] = true + } + + if _, ok := mx.PerZone[zone]; !ok { + mx.PerZone[zone] = &requestResponse{} + } + + setRequestPerType(&mx.PerZone[zone].Request, value, typ) + } +} + +func (cd *CoreDNS) collectPerZoneResponsesPerRcode(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) { + var ( + rcode = metric.Labels.Get("rcode") + zone = metric.Labels.Get("zone") + value = metric.Value + ) + + if rcode == empty || zone == empty { + continue + } + + if !cd.perZoneMatcher.MatchString(zone) { + continue + } + + if zone == "." { + zone = rootZoneReplaceName + } + + if !cd.collectedZones[zone] { + cd.addNewZoneCharts(zone) + cd.collectedZones[zone] = true + } + + if _, ok := mx.PerZone[zone]; !ok { + mx.PerZone[zone] = &requestResponse{} + } + + setResponsePerRcode(&mx.PerZone[zone].Response, value, rcode) + } +} + +// --- + +func setRequestPerIPFamily(mx *request, value float64, family string) { + switch family { + case "1": + mx.PerIPFamily.IPv4.Add(value) + case "2": + mx.PerIPFamily.IPv6.Add(value) + } +} + +func setRequestPerProto(mx *request, value float64, proto string) { + switch proto { + case "udp": + mx.PerProto.UDP.Add(value) + case "tcp": + mx.PerProto.TCP.Add(value) + } +} + +func setRequestPerStatus(mx *request, value float64, server, zone string) { + switch zone { + default: + mx.PerStatus.Processed.Add(value) + case "dropped": + mx.PerStatus.Dropped.Add(value) + if server == empty || server == emptyServerReplaceName { + return + } + mx.PerStatus.Processed.Sub(value) + } +} + +func setRequestPerType(mx *request, value float64, typ string) { + switch typ { + default: + mx.PerType.Other.Add(value) + case "A": + mx.PerType.A.Add(value) + case "AAAA": + mx.PerType.AAAA.Add(value) + case "MX": + mx.PerType.MX.Add(value) + case "SOA": + mx.PerType.SOA.Add(value) + case "CNAME": + mx.PerType.CNAME.Add(value) + case "PTR": + mx.PerType.PTR.Add(value) + case "TXT": + mx.PerType.TXT.Add(value) + case "NS": + mx.PerType.NS.Add(value) + case "DS": + mx.PerType.DS.Add(value) + case "DNSKEY": + mx.PerType.DNSKEY.Add(value) + case "RRSIG": + mx.PerType.RRSIG.Add(value) + case "NSEC": + mx.PerType.NSEC.Add(value) + case "NSEC3": + mx.PerType.NSEC3.Add(value) + case "IXFR": + mx.PerType.IXFR.Add(value) + case "ANY": + mx.PerType.ANY.Add(value) + } +} + +func setResponsePerRcode(mx *response, value float64, rcode string) { + mx.Total.Add(value) + + switch rcode { + default: + mx.PerRcode.Other.Add(value) + case "NOERROR": + mx.PerRcode.NOERROR.Add(value) + case "FORMERR": + mx.PerRcode.FORMERR.Add(value) + case "SERVFAIL": + mx.PerRcode.SERVFAIL.Add(value) + case "NXDOMAIN": + mx.PerRcode.NXDOMAIN.Add(value) + case "NOTIMP": + mx.PerRcode.NOTIMP.Add(value) + case "REFUSED": + mx.PerRcode.REFUSED.Add(value) + case "YXDOMAIN": + mx.PerRcode.YXDOMAIN.Add(value) + case "YXRRSET": + mx.PerRcode.YXRRSET.Add(value) + case "NXRRSET": + mx.PerRcode.NXRRSET.Add(value) + case "NOTAUTH": + mx.PerRcode.NOTAUTH.Add(value) + case "NOTZONE": + mx.PerRcode.NOTZONE.Add(value) + case "BADSIG": + mx.PerRcode.BADSIG.Add(value) + case "BADKEY": + mx.PerRcode.BADKEY.Add(value) + case "BADTIME": + mx.PerRcode.BADTIME.Add(value) + case "BADMODE": + mx.PerRcode.BADMODE.Add(value) + case "BADNAME": + mx.PerRcode.BADNAME.Add(value) + case "BADALG": + mx.PerRcode.BADALG.Add(value) + case "BADTRUNC": + mx.PerRcode.BADTRUNC.Add(value) + case "BADCOOKIE": + mx.PerRcode.BADCOOKIE.Add(value) + } +} + +//func setRequestDuration(mx *request, value float64, le string) { +// switch le { +// case "0.00025": +// mx.Duration.LE000025.Add(value) +// case "0.0005": +// mx.Duration.LE00005.Add(value) +// case "0.001": +// mx.Duration.LE0001.Add(value) +// case "0.002": +// mx.Duration.LE0002.Add(value) +// case "0.004": +// mx.Duration.LE0004.Add(value) +// case "0.008": +// mx.Duration.LE0008.Add(value) +// case "0.016": +// mx.Duration.LE0016.Add(value) +// case "0.032": +// mx.Duration.LE0032.Add(value) +// case "0.064": +// mx.Duration.LE0064.Add(value) +// case "0.128": +// mx.Duration.LE0128.Add(value) +// case "0.256": +// mx.Duration.LE0256.Add(value) +// case "0.512": +// mx.Duration.LE0512.Add(value) +// case "1.024": +// mx.Duration.LE1024.Add(value) +// case "2.048": +// mx.Duration.LE2048.Add(value) +// case "4.096": +// mx.Duration.LE4096.Add(value) +// case "8.192": +// mx.Duration.LE8192.Add(value) +// case "+Inf": +// mx.Duration.LEInf.Add(value) +// } +//} + +//func processRequestDuration(mx *request) { +// mx.Duration.LEInf.Sub(mx.Duration.LE8192.Value()) +// mx.Duration.LE8192.Sub(mx.Duration.LE4096.Value()) +// mx.Duration.LE4096.Sub(mx.Duration.LE2048.Value()) +// mx.Duration.LE2048.Sub(mx.Duration.LE1024.Value()) +// mx.Duration.LE1024.Sub(mx.Duration.LE0512.Value()) +// mx.Duration.LE0512.Sub(mx.Duration.LE0256.Value()) +// mx.Duration.LE0256.Sub(mx.Duration.LE0128.Value()) +// mx.Duration.LE0128.Sub(mx.Duration.LE0064.Value()) +// mx.Duration.LE0064.Sub(mx.Duration.LE0032.Value()) +// mx.Duration.LE0032.Sub(mx.Duration.LE0016.Value()) +// mx.Duration.LE0016.Sub(mx.Duration.LE0008.Value()) +// mx.Duration.LE0008.Sub(mx.Duration.LE0004.Value()) +// mx.Duration.LE0004.Sub(mx.Duration.LE0002.Value()) +// mx.Duration.LE0002.Sub(mx.Duration.LE0001.Value()) +// mx.Duration.LE0001.Sub(mx.Duration.LE00005.Value()) +// mx.Duration.LE00005.Sub(mx.Duration.LE000025.Value()) +//} + +// --- + +func (cd *CoreDNS) addNewServerCharts(name string) { + charts := serverCharts.Copy() + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, "server", name) + chart.Title = fmt.Sprintf(chart.Title, "Server", name) + chart.Fam = fmt.Sprintf(chart.Fam, "server", name) + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + _ = cd.charts.Add(*charts...) +} + +func (cd *CoreDNS) addNewZoneCharts(name string) { + charts := zoneCharts.Copy() + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, "zone", name) + chart.Title = fmt.Sprintf(chart.Title, "Zone", name) + chart.Fam = fmt.Sprintf(chart.Fam, "zone", name) + chart.Ctx = strings.Replace(chart.Ctx, "coredns.server_", "coredns.zone_", 1) + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + _ = cd.charts.Add(*charts...) +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/config_schema.json b/src/go/collectors/go.d.plugin/modules/coredns/config_schema.json new file mode 100644 index 00000000000000..70b9ef00139733 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/config_schema.json @@ -0,0 +1,93 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/coredns job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "per_server_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "per_zone_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/coredns.go b/src/go/collectors/go.d.plugin/modules/coredns/coredns.go new file mode 100644 index 00000000000000..18c92caf385066 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/coredns.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + _ "embed" + "time" + + "github.com/blang/semver/v4" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + defaultURL = "http://127.0.0.1:9153/metrics" + defaultHTTPTimeout = time.Second * 2 +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("coredns", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +// New creates CoreDNS with default values. +func New() *CoreDNS { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &CoreDNS{ + Config: config, + charts: summaryCharts.Copy(), + collectedServers: make(map[string]bool), + collectedZones: make(map[string]bool), + } +} + +// Config is the CoreDNS module configuration. +type Config struct { + web.HTTP `yaml:",inline"` + PerServerStats matcher.SimpleExpr `yaml:"per_server_stats"` + PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats"` +} + +// CoreDNS CoreDNS module. +type CoreDNS struct { + module.Base + Config `yaml:",inline"` + charts *Charts + prom prometheus.Prometheus + perServerMatcher matcher.Matcher + perZoneMatcher matcher.Matcher + collectedServers map[string]bool + collectedZones map[string]bool + skipVersionCheck bool + version *semver.Version + metricNames requestMetricsNames +} + +// Cleanup makes cleanup. +func (CoreDNS) Cleanup() {} + +// Init makes initialization. +func (cd *CoreDNS) Init() bool { + if cd.URL == "" { + cd.Error("URL not set") + return false + } + + if !cd.PerServerStats.Empty() { + m, err := cd.PerServerStats.Parse() + if err != nil { + cd.Errorf("error on creating 'per_server_stats' matcher : %v", err) + return false + } + cd.perServerMatcher = matcher.WithCache(m) + } + + if !cd.PerZoneStats.Empty() { + m, err := cd.PerZoneStats.Parse() + if err != nil { + cd.Errorf("error on creating 'per_zone_stats' matcher : %v", err) + return false + } + cd.perZoneMatcher = matcher.WithCache(m) + } + + client, err := web.NewHTTPClient(cd.Client) + if err != nil { + cd.Errorf("error on creating http client : %v", err) + return false + } + + cd.prom = prometheus.New(client, cd.Request) + + return true +} + +// Check makes check. +func (cd *CoreDNS) Check() bool { + return len(cd.Collect()) > 0 +} + +// Charts creates Charts. +func (cd *CoreDNS) Charts() *Charts { + return cd.charts +} + +// Collect collects metrics. +func (cd *CoreDNS) Collect() map[string]int64 { + mx, err := cd.collect() + + if err != nil { + cd.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/coredns_test.go b/src/go/collectors/go.d.plugin/modules/coredns/coredns_test.go new file mode 100644 index 00000000000000..a6b77976a21c99 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/coredns_test.go @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testNoLoad169, _ = os.ReadFile("testdata/version169/no_load.txt") + testSomeLoad169, _ = os.ReadFile("testdata/version169/some_load.txt") + testNoLoad170, _ = os.ReadFile("testdata/version170/no_load.txt") + testSomeLoad170, _ = os.ReadFile("testdata/version170/some_load.txt") + testNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt") +) + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*CoreDNS)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestCoreDNS_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestCoreDNS_Cleanup(t *testing.T) { New().Cleanup() } + +func TestCoreDNS_Init(t *testing.T) { assert.True(t, New().Init()) } + +func TestCoreDNS_InitNG(t *testing.T) { + job := New() + job.URL = "" + assert.False(t, job.Init()) +} + +func TestCoreDNS_Check(t *testing.T) { + tests := []struct { + name string + data []byte + }{ + {"version 1.6.9", testNoLoad169}, + {"version 1.7.0", testNoLoad170}, + } + for _, testNoLoad := range tests { + t.Run(testNoLoad.name, func(t *testing.T) { + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNoLoad.data) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.True(t, job.Check()) + }) + } +} + +func TestCoreDNS_CheckNG(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestCoreDNS_Collect(t *testing.T) { + tests := []struct { + name string + data []byte + }{ + {"version 1.6.9", testSomeLoad169}, + {"version 1.7.0", testSomeLoad170}, + } + for _, testSomeLoad := range tests { + t.Run(testSomeLoad.name, func(t *testing.T) { + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testSomeLoad.data) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + job.PerServerStats.Includes = []string{"glob:*"} + job.PerZoneStats.Includes = []string{"glob:*"} + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "coredns.io._request_per_ip_family_v4": 19, + "coredns.io._request_per_ip_family_v6": 0, + "coredns.io._request_per_proto_tcp": 0, + "coredns.io._request_per_proto_udp": 19, + "coredns.io._request_per_status_dropped": 0, + "coredns.io._request_per_status_processed": 0, + "coredns.io._request_per_type_A": 6, + "coredns.io._request_per_type_AAAA": 6, + "coredns.io._request_per_type_ANY": 0, + "coredns.io._request_per_type_CNAME": 0, + "coredns.io._request_per_type_DNSKEY": 0, + "coredns.io._request_per_type_DS": 0, + "coredns.io._request_per_type_IXFR": 0, + "coredns.io._request_per_type_MX": 7, + "coredns.io._request_per_type_NS": 0, + "coredns.io._request_per_type_NSEC": 0, + "coredns.io._request_per_type_NSEC3": 0, + "coredns.io._request_per_type_PTR": 0, + "coredns.io._request_per_type_RRSIG": 0, + "coredns.io._request_per_type_SOA": 0, + "coredns.io._request_per_type_SRV": 0, + "coredns.io._request_per_type_TXT": 0, + "coredns.io._request_per_type_other": 0, + "coredns.io._request_total": 19, + "coredns.io._response_per_rcode_BADALG": 0, + "coredns.io._response_per_rcode_BADCOOKIE": 0, + "coredns.io._response_per_rcode_BADKEY": 0, + "coredns.io._response_per_rcode_BADMODE": 0, + "coredns.io._response_per_rcode_BADNAME": 0, + "coredns.io._response_per_rcode_BADSIG": 0, + "coredns.io._response_per_rcode_BADTIME": 0, + "coredns.io._response_per_rcode_BADTRUNC": 0, + "coredns.io._response_per_rcode_FORMERR": 0, + "coredns.io._response_per_rcode_NOERROR": 19, + "coredns.io._response_per_rcode_NOTAUTH": 0, + "coredns.io._response_per_rcode_NOTIMP": 0, + "coredns.io._response_per_rcode_NOTZONE": 0, + "coredns.io._response_per_rcode_NXDOMAIN": 0, + "coredns.io._response_per_rcode_NXRRSET": 0, + "coredns.io._response_per_rcode_REFUSED": 0, + "coredns.io._response_per_rcode_SERVFAIL": 0, + "coredns.io._response_per_rcode_YXDOMAIN": 0, + "coredns.io._response_per_rcode_YXRRSET": 0, + "coredns.io._response_per_rcode_other": 0, + "coredns.io._response_total": 19, + "dns://:53_request_per_ip_family_v4": 15, + "dns://:53_request_per_ip_family_v6": 0, + "dns://:53_request_per_proto_tcp": 0, + "dns://:53_request_per_proto_udp": 15, + "dns://:53_request_per_status_dropped": 9, + "dns://:53_request_per_status_processed": 6, + "dns://:53_request_per_type_A": 5, + "dns://:53_request_per_type_AAAA": 5, + "dns://:53_request_per_type_ANY": 0, + "dns://:53_request_per_type_CNAME": 0, + "dns://:53_request_per_type_DNSKEY": 0, + "dns://:53_request_per_type_DS": 0, + "dns://:53_request_per_type_IXFR": 0, + "dns://:53_request_per_type_MX": 5, + "dns://:53_request_per_type_NS": 0, + "dns://:53_request_per_type_NSEC": 0, + "dns://:53_request_per_type_NSEC3": 0, + "dns://:53_request_per_type_PTR": 0, + "dns://:53_request_per_type_RRSIG": 0, + "dns://:53_request_per_type_SOA": 0, + "dns://:53_request_per_type_SRV": 0, + "dns://:53_request_per_type_TXT": 0, + "dns://:53_request_per_type_other": 0, + "dns://:53_request_total": 15, + "dns://:53_response_per_rcode_BADALG": 0, + "dns://:53_response_per_rcode_BADCOOKIE": 0, + "dns://:53_response_per_rcode_BADKEY": 0, + "dns://:53_response_per_rcode_BADMODE": 0, + "dns://:53_response_per_rcode_BADNAME": 0, + "dns://:53_response_per_rcode_BADSIG": 0, + "dns://:53_response_per_rcode_BADTIME": 0, + "dns://:53_response_per_rcode_BADTRUNC": 0, + "dns://:53_response_per_rcode_FORMERR": 0, + "dns://:53_response_per_rcode_NOERROR": 6, + "dns://:53_response_per_rcode_NOTAUTH": 0, + "dns://:53_response_per_rcode_NOTIMP": 0, + "dns://:53_response_per_rcode_NOTZONE": 0, + "dns://:53_response_per_rcode_NXDOMAIN": 0, + "dns://:53_response_per_rcode_NXRRSET": 0, + "dns://:53_response_per_rcode_REFUSED": 0, + "dns://:53_response_per_rcode_SERVFAIL": 9, + "dns://:53_response_per_rcode_YXDOMAIN": 0, + "dns://:53_response_per_rcode_YXRRSET": 0, + "dns://:53_response_per_rcode_other": 0, + "dns://:53_response_total": 15, + "dns://:54_request_per_ip_family_v4": 25, + "dns://:54_request_per_ip_family_v6": 0, + "dns://:54_request_per_proto_tcp": 0, + "dns://:54_request_per_proto_udp": 25, + "dns://:54_request_per_status_dropped": 12, + "dns://:54_request_per_status_processed": 13, + "dns://:54_request_per_type_A": 8, + "dns://:54_request_per_type_AAAA": 8, + "dns://:54_request_per_type_ANY": 0, + "dns://:54_request_per_type_CNAME": 0, + "dns://:54_request_per_type_DNSKEY": 0, + "dns://:54_request_per_type_DS": 0, + "dns://:54_request_per_type_IXFR": 0, + "dns://:54_request_per_type_MX": 9, + "dns://:54_request_per_type_NS": 0, + "dns://:54_request_per_type_NSEC": 0, + "dns://:54_request_per_type_NSEC3": 0, + "dns://:54_request_per_type_PTR": 0, + "dns://:54_request_per_type_RRSIG": 0, + "dns://:54_request_per_type_SOA": 0, + "dns://:54_request_per_type_SRV": 0, + "dns://:54_request_per_type_TXT": 0, + "dns://:54_request_per_type_other": 0, + "dns://:54_request_total": 25, + "dns://:54_response_per_rcode_BADALG": 0, + "dns://:54_response_per_rcode_BADCOOKIE": 0, + "dns://:54_response_per_rcode_BADKEY": 0, + "dns://:54_response_per_rcode_BADMODE": 0, + "dns://:54_response_per_rcode_BADNAME": 0, + "dns://:54_response_per_rcode_BADSIG": 0, + "dns://:54_response_per_rcode_BADTIME": 0, + "dns://:54_response_per_rcode_BADTRUNC": 0, + "dns://:54_response_per_rcode_FORMERR": 0, + "dns://:54_response_per_rcode_NOERROR": 13, + "dns://:54_response_per_rcode_NOTAUTH": 0, + "dns://:54_response_per_rcode_NOTIMP": 0, + "dns://:54_response_per_rcode_NOTZONE": 0, + "dns://:54_response_per_rcode_NXDOMAIN": 0, + "dns://:54_response_per_rcode_NXRRSET": 0, + "dns://:54_response_per_rcode_REFUSED": 0, + "dns://:54_response_per_rcode_SERVFAIL": 12, + "dns://:54_response_per_rcode_YXDOMAIN": 0, + "dns://:54_response_per_rcode_YXRRSET": 0, + "dns://:54_response_per_rcode_other": 0, + "dns://:54_response_total": 25, + "dropped_request_per_ip_family_v4": 42, + "dropped_request_per_ip_family_v6": 0, + "dropped_request_per_proto_tcp": 0, + "dropped_request_per_proto_udp": 42, + "dropped_request_per_status_dropped": 0, + "dropped_request_per_status_processed": 0, + "dropped_request_per_type_A": 14, + "dropped_request_per_type_AAAA": 14, + "dropped_request_per_type_ANY": 0, + "dropped_request_per_type_CNAME": 0, + "dropped_request_per_type_DNSKEY": 0, + "dropped_request_per_type_DS": 0, + "dropped_request_per_type_IXFR": 0, + "dropped_request_per_type_MX": 14, + "dropped_request_per_type_NS": 0, + "dropped_request_per_type_NSEC": 0, + "dropped_request_per_type_NSEC3": 0, + "dropped_request_per_type_PTR": 0, + "dropped_request_per_type_RRSIG": 0, + "dropped_request_per_type_SOA": 0, + "dropped_request_per_type_SRV": 0, + "dropped_request_per_type_TXT": 0, + "dropped_request_per_type_other": 0, + "dropped_request_total": 42, + "dropped_response_per_rcode_BADALG": 0, + "dropped_response_per_rcode_BADCOOKIE": 0, + "dropped_response_per_rcode_BADKEY": 0, + "dropped_response_per_rcode_BADMODE": 0, + "dropped_response_per_rcode_BADNAME": 0, + "dropped_response_per_rcode_BADSIG": 0, + "dropped_response_per_rcode_BADTIME": 0, + "dropped_response_per_rcode_BADTRUNC": 0, + "dropped_response_per_rcode_FORMERR": 0, + "dropped_response_per_rcode_NOERROR": 0, + "dropped_response_per_rcode_NOTAUTH": 0, + "dropped_response_per_rcode_NOTIMP": 0, + "dropped_response_per_rcode_NOTZONE": 0, + "dropped_response_per_rcode_NXDOMAIN": 0, + "dropped_response_per_rcode_NXRRSET": 0, + "dropped_response_per_rcode_REFUSED": 21, + "dropped_response_per_rcode_SERVFAIL": 21, + "dropped_response_per_rcode_YXDOMAIN": 0, + "dropped_response_per_rcode_YXRRSET": 0, + "dropped_response_per_rcode_other": 0, + "dropped_response_total": 42, + "empty_request_per_ip_family_v4": 21, + "empty_request_per_ip_family_v6": 0, + "empty_request_per_proto_tcp": 0, + "empty_request_per_proto_udp": 21, + "empty_request_per_status_dropped": 21, + "empty_request_per_status_processed": 0, + "empty_request_per_type_A": 7, + "empty_request_per_type_AAAA": 7, + "empty_request_per_type_ANY": 0, + "empty_request_per_type_CNAME": 0, + "empty_request_per_type_DNSKEY": 0, + "empty_request_per_type_DS": 0, + "empty_request_per_type_IXFR": 0, + "empty_request_per_type_MX": 7, + "empty_request_per_type_NS": 0, + "empty_request_per_type_NSEC": 0, + "empty_request_per_type_NSEC3": 0, + "empty_request_per_type_PTR": 0, + "empty_request_per_type_RRSIG": 0, + "empty_request_per_type_SOA": 0, + "empty_request_per_type_SRV": 0, + "empty_request_per_type_TXT": 0, + "empty_request_per_type_other": 0, + "empty_request_total": 21, + "empty_response_per_rcode_BADALG": 0, + "empty_response_per_rcode_BADCOOKIE": 0, + "empty_response_per_rcode_BADKEY": 0, + "empty_response_per_rcode_BADMODE": 0, + "empty_response_per_rcode_BADNAME": 0, + "empty_response_per_rcode_BADSIG": 0, + "empty_response_per_rcode_BADTIME": 0, + "empty_response_per_rcode_BADTRUNC": 0, + "empty_response_per_rcode_FORMERR": 0, + "empty_response_per_rcode_NOERROR": 0, + "empty_response_per_rcode_NOTAUTH": 0, + "empty_response_per_rcode_NOTIMP": 0, + "empty_response_per_rcode_NOTZONE": 0, + "empty_response_per_rcode_NXDOMAIN": 0, + "empty_response_per_rcode_NXRRSET": 0, + "empty_response_per_rcode_REFUSED": 21, + "empty_response_per_rcode_SERVFAIL": 0, + "empty_response_per_rcode_YXDOMAIN": 0, + "empty_response_per_rcode_YXRRSET": 0, + "empty_response_per_rcode_other": 0, + "empty_response_total": 21, + "no_matching_zone_dropped_total": 21, + "panic_total": 0, + "request_per_ip_family_v4": 61, + "request_per_ip_family_v6": 0, + "request_per_proto_tcp": 0, + "request_per_proto_udp": 61, + "request_per_status_dropped": 42, + "request_per_status_processed": 19, + "request_per_type_A": 20, + "request_per_type_AAAA": 20, + "request_per_type_ANY": 0, + "request_per_type_CNAME": 0, + "request_per_type_DNSKEY": 0, + "request_per_type_DS": 0, + "request_per_type_IXFR": 0, + "request_per_type_MX": 21, + "request_per_type_NS": 0, + "request_per_type_NSEC": 0, + "request_per_type_NSEC3": 0, + "request_per_type_PTR": 0, + "request_per_type_RRSIG": 0, + "request_per_type_SOA": 0, + "request_per_type_SRV": 0, + "request_per_type_TXT": 0, + "request_per_type_other": 0, + "request_total": 61, + "response_per_rcode_BADALG": 0, + "response_per_rcode_BADCOOKIE": 0, + "response_per_rcode_BADKEY": 0, + "response_per_rcode_BADMODE": 0, + "response_per_rcode_BADNAME": 0, + "response_per_rcode_BADSIG": 0, + "response_per_rcode_BADTIME": 0, + "response_per_rcode_BADTRUNC": 0, + "response_per_rcode_FORMERR": 0, + "response_per_rcode_NOERROR": 19, + "response_per_rcode_NOTAUTH": 0, + "response_per_rcode_NOTIMP": 0, + "response_per_rcode_NOTZONE": 0, + "response_per_rcode_NXDOMAIN": 0, + "response_per_rcode_NXRRSET": 0, + "response_per_rcode_REFUSED": 21, + "response_per_rcode_SERVFAIL": 21, + "response_per_rcode_YXDOMAIN": 0, + "response_per_rcode_YXRRSET": 0, + "response_per_rcode_other": 0, + "response_total": 61, + "ya.ru._request_per_ip_family_v4": 21, + "ya.ru._request_per_ip_family_v6": 0, + "ya.ru._request_per_proto_tcp": 0, + "ya.ru._request_per_proto_udp": 21, + "ya.ru._request_per_status_dropped": 0, + "ya.ru._request_per_status_processed": 0, + "ya.ru._request_per_type_A": 7, + "ya.ru._request_per_type_AAAA": 7, + "ya.ru._request_per_type_ANY": 0, + "ya.ru._request_per_type_CNAME": 0, + "ya.ru._request_per_type_DNSKEY": 0, + "ya.ru._request_per_type_DS": 0, + "ya.ru._request_per_type_IXFR": 0, + "ya.ru._request_per_type_MX": 7, + "ya.ru._request_per_type_NS": 0, + "ya.ru._request_per_type_NSEC": 0, + "ya.ru._request_per_type_NSEC3": 0, + "ya.ru._request_per_type_PTR": 0, + "ya.ru._request_per_type_RRSIG": 0, + "ya.ru._request_per_type_SOA": 0, + "ya.ru._request_per_type_SRV": 0, + "ya.ru._request_per_type_TXT": 0, + "ya.ru._request_per_type_other": 0, + "ya.ru._request_total": 21, + "ya.ru._response_per_rcode_BADALG": 0, + "ya.ru._response_per_rcode_BADCOOKIE": 0, + "ya.ru._response_per_rcode_BADKEY": 0, + "ya.ru._response_per_rcode_BADMODE": 0, + "ya.ru._response_per_rcode_BADNAME": 0, + "ya.ru._response_per_rcode_BADSIG": 0, + "ya.ru._response_per_rcode_BADTIME": 0, + "ya.ru._response_per_rcode_BADTRUNC": 0, + "ya.ru._response_per_rcode_FORMERR": 0, + "ya.ru._response_per_rcode_NOERROR": 0, + "ya.ru._response_per_rcode_NOTAUTH": 0, + "ya.ru._response_per_rcode_NOTIMP": 0, + "ya.ru._response_per_rcode_NOTZONE": 0, + "ya.ru._response_per_rcode_NXDOMAIN": 0, + "ya.ru._response_per_rcode_NXRRSET": 0, + "ya.ru._response_per_rcode_REFUSED": 0, + "ya.ru._response_per_rcode_SERVFAIL": 21, + "ya.ru._response_per_rcode_YXDOMAIN": 0, + "ya.ru._response_per_rcode_YXRRSET": 0, + "ya.ru._response_per_rcode_other": 0, + "ya.ru._response_total": 21, + } + + assert.Equal(t, expected, job.Collect()) + }) + } +} + +func TestCoreDNS_CollectNoLoad(t *testing.T) { + tests := []struct { + name string + data []byte + }{ + {"version 1.6.9", testNoLoad169}, + {"version 1.7.0", testNoLoad170}, + } + for _, testNoLoad := range tests { + t.Run(testNoLoad.name, func(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNoLoad.data) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + job.PerServerStats.Includes = []string{"glob:*"} + job.PerZoneStats.Includes = []string{"glob:*"} + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "no_matching_zone_dropped_total": 0, + "panic_total": 99, + "request_per_ip_family_v4": 0, + "request_per_ip_family_v6": 0, + "request_per_proto_tcp": 0, + "request_per_proto_udp": 0, + "request_per_status_dropped": 0, + "request_per_status_processed": 0, + "request_per_type_A": 0, + "request_per_type_AAAA": 0, + "request_per_type_ANY": 0, + "request_per_type_CNAME": 0, + "request_per_type_DNSKEY": 0, + "request_per_type_DS": 0, + "request_per_type_IXFR": 0, + "request_per_type_MX": 0, + "request_per_type_NS": 0, + "request_per_type_NSEC": 0, + "request_per_type_NSEC3": 0, + "request_per_type_PTR": 0, + "request_per_type_RRSIG": 0, + "request_per_type_SOA": 0, + "request_per_type_SRV": 0, + "request_per_type_TXT": 0, + "request_per_type_other": 0, + "request_total": 0, + "response_per_rcode_BADALG": 0, + "response_per_rcode_BADCOOKIE": 0, + "response_per_rcode_BADKEY": 0, + "response_per_rcode_BADMODE": 0, + "response_per_rcode_BADNAME": 0, + "response_per_rcode_BADSIG": 0, + "response_per_rcode_BADTIME": 0, + "response_per_rcode_BADTRUNC": 0, + "response_per_rcode_FORMERR": 0, + "response_per_rcode_NOERROR": 0, + "response_per_rcode_NOTAUTH": 0, + "response_per_rcode_NOTIMP": 0, + "response_per_rcode_NOTZONE": 0, + "response_per_rcode_NXDOMAIN": 0, + "response_per_rcode_NXRRSET": 0, + "response_per_rcode_REFUSED": 0, + "response_per_rcode_SERVFAIL": 0, + "response_per_rcode_YXDOMAIN": 0, + "response_per_rcode_YXRRSET": 0, + "response_per_rcode_other": 0, + "response_total": 0, + } + + assert.Equal(t, expected, job.Collect()) + }) + } + +} + +func TestCoreDNS_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestCoreDNS_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestCoreDNS_CollectNoVersion(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNoLoadNoVersion) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + job.PerServerStats.Includes = []string{"glob:*"} + job.PerZoneStats.Includes = []string{"glob:*"} + require.True(t, job.Init()) + require.False(t, job.Check()) + + assert.Nil(t, job.Collect()) +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/integrations/coredns.md b/src/go/collectors/go.d.plugin/modules/coredns/integrations/coredns.md new file mode 100644 index 00000000000000..d564c7cae550ad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/integrations/coredns.md @@ -0,0 +1,294 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/coredns/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/coredns/metadata.yaml" +sidebar_label: "CoreDNS" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CoreDNS + + +<img src="https://netdata.cloud/img/coredns.svg" width="150"/> + + +Plugin: go.d.plugin +Module: coredns + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors CoreDNS instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per CoreDNS instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| coredns.dns_request_count_total | requests | requests/s | +| coredns.dns_responses_count_total | responses | responses/s | +| coredns.dns_request_count_total_per_status | processed, dropped | requests/s | +| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s | +| coredns.dns_panic_count_total | panics | panics/s | +| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s | +| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s | +| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s | +| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s | + +### Per server + +These metrics refer to the DNS server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| server_name | Server name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| coredns.server_dns_request_count_total | requests | requests/s | +| coredns.server_dns_responses_count_total | responses | responses/s | +| coredns.server_request_count_total_per_status | processed, dropped | requests/s | +| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s | +| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s | +| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s | +| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s | + +### Per zone + +These metrics refer to the DNS zone. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| zone_name | Zone name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| coredns.zone_dns_request_count_total | requests | requests/s | +| coredns.zone_dns_responses_count_total | responses | responses/s | +| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s | +| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s | +| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s | +| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/coredns.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/coredns.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9153/metrics | yes | +| per_server_stats | Server filter. | | no | +| per_zone_stats | Zone filter. | | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 2 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +##### per_server_stats + +Metrics of servers matching the selector will be collected. +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). +- Syntax: + +```yaml +per_server_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 +``` + + +##### per_zone_stats + +Metrics of zones matching the selector will be collected. +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). +- Syntax: + +```yaml +per_zone_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9153/metrics + +``` +</details> + +##### Basic HTTP auth + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9153/metrics + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9153/metrics + + - name: remote + url: http://203.0.113.10:9153/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m coredns + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/coredns/metadata.yaml b/src/go/collectors/go.d.plugin/modules/coredns/metadata.yaml new file mode 100644 index 00000000000000..699f190fe5831b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/metadata.yaml @@ -0,0 +1,459 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-coredns + plugin_name: go.d.plugin + module_name: coredns + monitored_instance: + name: CoreDNS + link: https://coredns.io/ + icon_filename: coredns.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - coredns + - dns + - kubernetes + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors CoreDNS instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/coredns.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9153/metrics + required: true + - name: per_server_stats + description: Server filter. + default_value: "" + required: false + detailed_description: | + Metrics of servers matching the selector will be collected. + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + - Syntax: + + ```yaml + per_server_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + - name: per_zone_stats + description: Zone filter. + default_value: "" + required: false + detailed_description: | + Metrics of zones matching the selector will be collected. + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + - Syntax: + + ```yaml + per_zone_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9153/metrics + - name: Basic HTTP auth + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9153/metrics + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9153/metrics + + - name: remote + url: http://203.0.113.10:9153/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: coredns.dns_request_count_total + description: Number Of DNS Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: coredns.dns_responses_count_total + description: Number Of DNS Responses + unit: responses/s + chart_type: line + dimensions: + - name: responses + - name: coredns.dns_request_count_total_per_status + description: Number Of Processed And Dropped DNS Requests + unit: requests/s + chart_type: stacked + dimensions: + - name: processed + - name: dropped + - name: coredns.dns_no_matching_zone_dropped_total + description: Number Of Dropped DNS Requests Because Of No Matching Zone + unit: requests/s + chart_type: line + dimensions: + - name: dropped + - name: coredns.dns_panic_count_total + description: Number Of Panics + unit: panics/s + chart_type: line + dimensions: + - name: panics + - name: coredns.dns_requests_count_total_per_proto + description: Number Of DNS Requests Per Transport Protocol + unit: requests/s + chart_type: stacked + dimensions: + - name: udp + - name: tcp + - name: coredns.dns_requests_count_total_per_ip_family + description: Number Of DNS Requests Per IP Family + unit: requests/s + chart_type: stacked + dimensions: + - name: v4 + - name: v6 + - name: coredns.dns_requests_count_total_per_per_type + description: Number Of DNS Requests Per Type + unit: requests/s + chart_type: stacked + dimensions: + - name: a + - name: aaaa + - name: mx + - name: soa + - name: cname + - name: ptr + - name: txt + - name: ns + - name: ds + - name: dnskey + - name: rrsig + - name: nsec + - name: nsec3 + - name: ixfr + - name: any + - name: other + - name: coredns.dns_responses_count_total_per_rcode + description: Number Of DNS Responses Per Rcode + unit: responses/s + chart_type: stacked + dimensions: + - name: noerror + - name: formerr + - name: servfail + - name: nxdomain + - name: notimp + - name: refused + - name: yxdomain + - name: yxrrset + - name: nxrrset + - name: notauth + - name: notzone + - name: badsig + - name: badkey + - name: badtime + - name: badmode + - name: badname + - name: badalg + - name: badtrunc + - name: badcookie + - name: other + - name: server + description: These metrics refer to the DNS server. + labels: + - name: server_name + description: Server name. + metrics: + - name: coredns.server_dns_request_count_total + description: Number Of DNS Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: coredns.server_dns_responses_count_total + description: Number Of DNS Responses + unit: responses/s + chart_type: line + dimensions: + - name: responses + - name: coredns.server_request_count_total_per_status + description: Number Of Processed And Dropped DNS Requests + unit: requests/s + chart_type: stacked + dimensions: + - name: processed + - name: dropped + - name: coredns.server_requests_count_total_per_proto + description: Number Of DNS Requests Per Transport Protocol + unit: requests/s + chart_type: stacked + dimensions: + - name: udp + - name: tcp + - name: coredns.server_requests_count_total_per_ip_family + description: Number Of DNS Requests Per IP Family + unit: requests/s + chart_type: stacked + dimensions: + - name: v4 + - name: v6 + - name: coredns.server_requests_count_total_per_per_type + description: Number Of DNS Requests Per Type + unit: requests/s + chart_type: stacked + dimensions: + - name: a + - name: aaaa + - name: mx + - name: soa + - name: cname + - name: ptr + - name: txt + - name: ns + - name: ds + - name: dnskey + - name: rrsig + - name: nsec + - name: nsec3 + - name: ixfr + - name: any + - name: other + - name: coredns.server_responses_count_total_per_rcode + description: Number Of DNS Responses Per Rcode + unit: responses/s + chart_type: stacked + dimensions: + - name: noerror + - name: formerr + - name: servfail + - name: nxdomain + - name: notimp + - name: refused + - name: yxdomain + - name: yxrrset + - name: nxrrset + - name: notauth + - name: notzone + - name: badsig + - name: badkey + - name: badtime + - name: badmode + - name: badname + - name: badalg + - name: badtrunc + - name: badcookie + - name: other + - name: zone + description: These metrics refer to the DNS zone. + labels: + - name: zone_name + description: Zone name. + metrics: + - name: coredns.zone_dns_request_count_total + description: Number Of DNS Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: coredns.zone_dns_responses_count_total + description: Number Of DNS Responses + unit: responses/s + chart_type: line + dimensions: + - name: responses + - name: coredns.zone_requests_count_total_per_proto + description: Number Of DNS Requests Per Transport Protocol + unit: requests/s + chart_type: stacked + dimensions: + - name: udp + - name: tcp + - name: coredns.zone_requests_count_total_per_ip_family + description: Number Of DNS Requests Per IP Family + unit: requests/s + chart_type: stacked + dimensions: + - name: v4 + - name: v6 + - name: coredns.zone_requests_count_total_per_per_type + description: Number Of DNS Requests Per Type + unit: requests/s + chart_type: stacked + dimensions: + - name: a + - name: aaaa + - name: mx + - name: soa + - name: cname + - name: ptr + - name: txt + - name: ns + - name: ds + - name: dnskey + - name: rrsig + - name: nsec + - name: nsec3 + - name: ixfr + - name: any + - name: other + - name: coredns.zone_responses_count_total_per_rcode + description: Number Of DNS Responses Per Rcode + unit: responses/s + chart_type: stacked + dimensions: + - name: noerror + - name: formerr + - name: servfail + - name: nxdomain + - name: notimp + - name: refused + - name: yxdomain + - name: yxrrset + - name: nxrrset + - name: notauth + - name: notzone + - name: badsig + - name: badkey + - name: badtime + - name: badmode + - name: badname + - name: badalg + - name: badtrunc + - name: badcookie + - name: other diff --git a/src/go/collectors/go.d.plugin/modules/coredns/metrics.go b/src/go/collectors/go.d.plugin/modules/coredns/metrics.go new file mode 100644 index 00000000000000..77061751e7b230 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/metrics.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + mtx "github.com/netdata/go.d.plugin/pkg/metrics" +) + +func newMetrics() *metrics { + mx := &metrics{} + mx.PerServer = make(map[string]*requestResponse) + mx.PerZone = make(map[string]*requestResponse) + + return mx +} + +type metrics struct { + Panic mtx.Gauge `stm:"panic_total"` + NoZoneDropped mtx.Gauge `stm:"no_matching_zone_dropped_total"` + Summary requestResponse `stm:""` + PerServer map[string]*requestResponse `stm:""` + PerZone map[string]*requestResponse `stm:""` +} + +type requestResponse struct { + Request request `stm:"request"` + Response response `stm:"response"` +} + +type request struct { + Total mtx.Gauge `stm:"total"` + PerStatus struct { + Processed mtx.Gauge `stm:"processed"` + Dropped mtx.Gauge `stm:"dropped"` + } `stm:"per_status"` + PerProto struct { + UDP mtx.Gauge `stm:"udp"` + TCP mtx.Gauge `stm:"tcp"` + } `stm:"per_proto"` + PerIPFamily struct { + IPv4 mtx.Gauge `stm:"v4"` + IPv6 mtx.Gauge `stm:"v6"` + } `stm:"per_ip_family"` + // https://github.com/coredns/coredns/blob/master/plugin/metrics/vars/report.go + PerType struct { + A mtx.Gauge `stm:"A"` + AAAA mtx.Gauge `stm:"AAAA"` + MX mtx.Gauge `stm:"MX"` + SOA mtx.Gauge `stm:"SOA"` + CNAME mtx.Gauge `stm:"CNAME"` + PTR mtx.Gauge `stm:"PTR"` + TXT mtx.Gauge `stm:"TXT"` + NS mtx.Gauge `stm:"NS"` + SRV mtx.Gauge `stm:"SRV"` + DS mtx.Gauge `stm:"DS"` + DNSKEY mtx.Gauge `stm:"DNSKEY"` + RRSIG mtx.Gauge `stm:"RRSIG"` + NSEC mtx.Gauge `stm:"NSEC"` + NSEC3 mtx.Gauge `stm:"NSEC3"` + IXFR mtx.Gauge `stm:"IXFR"` + ANY mtx.Gauge `stm:"ANY"` + Other mtx.Gauge `stm:"other"` + } `stm:"per_type"` + //Duration struct { + // LE000025 mtx.Gauge `stm:"0.00025"` + // LE00005 mtx.Gauge `stm:"0.0005"` + // LE0001 mtx.Gauge `stm:"0.001"` + // LE0002 mtx.Gauge `stm:"0.002"` + // LE0004 mtx.Gauge `stm:"0.004"` + // LE0008 mtx.Gauge `stm:"0.008"` + // LE0016 mtx.Gauge `stm:"0.016"` + // LE0032 mtx.Gauge `stm:"0.032"` + // LE0064 mtx.Gauge `stm:"0.064"` + // LE0128 mtx.Gauge `stm:"0.128"` + // LE0256 mtx.Gauge `stm:"0.256"` + // LE0512 mtx.Gauge `stm:"0.512"` + // LE1024 mtx.Gauge `stm:"1.024"` + // LE2048 mtx.Gauge `stm:"2.048"` + // LE4096 mtx.Gauge `stm:"4.096"` + // LE8192 mtx.Gauge `stm:"8.192"` + // LEInf mtx.Gauge `stm:"+Inf"` + //} `stm:"duration_seconds_bucket"` +} + +// https://github.com/miekg/dns/blob/master/types.go +// https://github.com/miekg/dns/blob/master/msg.go#L169 +type response struct { + Total mtx.Gauge `stm:"total"` + PerRcode struct { + NOERROR mtx.Gauge `stm:"NOERROR"` + FORMERR mtx.Gauge `stm:"FORMERR"` + SERVFAIL mtx.Gauge `stm:"SERVFAIL"` + NXDOMAIN mtx.Gauge `stm:"NXDOMAIN"` + NOTIMP mtx.Gauge `stm:"NOTIMP"` + REFUSED mtx.Gauge `stm:"REFUSED"` + YXDOMAIN mtx.Gauge `stm:"YXDOMAIN"` + YXRRSET mtx.Gauge `stm:"YXRRSET"` + NXRRSET mtx.Gauge `stm:"NXRRSET"` + NOTAUTH mtx.Gauge `stm:"NOTAUTH"` + NOTZONE mtx.Gauge `stm:"NOTZONE"` + BADSIG mtx.Gauge `stm:"BADSIG"` + BADKEY mtx.Gauge `stm:"BADKEY"` + BADTIME mtx.Gauge `stm:"BADTIME"` + BADMODE mtx.Gauge `stm:"BADMODE"` + BADNAME mtx.Gauge `stm:"BADNAME"` + BADALG mtx.Gauge `stm:"BADALG"` + BADTRUNC mtx.Gauge `stm:"BADTRUNC"` + BADCOOKIE mtx.Gauge `stm:"BADCOOKIE"` + Other mtx.Gauge `stm:"other"` + } `stm:"per_rcode"` +} diff --git a/src/go/collectors/go.d.plugin/modules/coredns/testdata/no_version/no_load.txt b/src/go/collectors/go.d.plugin/modules/coredns/testdata/no_version/no_load.txt new file mode 100644 index 00000000000000..f0de841f05e100 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/testdata/no_version/no_load.txt @@ -0,0 +1,6 @@ +# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built. +# TYPE coredns_build_info gauge +coredns_build_info{goversion="go1.14.4",revision="f59c03d"} 1 +# HELP coredns_panics_total A metrics that counts the number of panics. +# TYPE coredns_panics_total counter +coredns_panics_total 99 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/no_load.txt b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/no_load.txt new file mode 100644 index 00000000000000..8fee1a73c717e1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/no_load.txt @@ -0,0 +1,6 @@ +# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built. +# TYPE coredns_build_info gauge +coredns_build_info{goversion="go1.14.1",revision="1766568",version="1.6.9"} 1 +# HELP coredns_panic_count_total A metrics that counts the number of panics. +# TYPE coredns_panic_count_total counter +coredns_panic_count_total 99 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/some_load.txt b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/some_load.txt new file mode 100644 index 00000000000000..15c4a57ec0b9b5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version169/some_load.txt @@ -0,0 +1,180 @@ +# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built. +# TYPE coredns_build_info gauge +coredns_build_info{goversion="go1.14.1",revision="1766568",version="1.6.9"} 1 +# HELP coredns_panic_count_total A metrics that counts the number of panics. +# TYPE coredns_panic_count_total counter +coredns_panic_count_total 0 +# HELP coredns_dns_request_count_total Counter of DNS requests made per zone, protocol and family. +# TYPE coredns_dns_request_count_total counter +coredns_dns_request_count_total{family="1",proto="udp",server="",zone="dropped"} 21 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="coredns.io."} 6 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="dropped"} 9 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="ya.ru."} 9 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="coredns.io."} 13 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="dropped"} 12 +coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="ya.ru."} 12 +# HELP coredns_dns_request_type_count_total Counter of DNS requests per type, per zone. +# TYPE coredns_dns_request_type_count_total counter +coredns_dns_request_type_count_total{server="",type="A",zone="dropped"} 7 +coredns_dns_request_type_count_total{server="",type="AAAA",zone="dropped"} 7 +coredns_dns_request_type_count_total{server="",type="MX",zone="dropped"} 7 +coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="coredns.io."} 2 +coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="dropped"} 3 +coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="ya.ru."} 3 +coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="coredns.io."} 2 +coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="dropped"} 3 +coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="ya.ru."} 3 +coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="coredns.io."} 2 +coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="dropped"} 3 +coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="ya.ru."} 3 +coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="coredns.io."} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="dropped"} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="ya.ru."} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="coredns.io."} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="dropped"} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="ya.ru."} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="coredns.io."} 5 +coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="dropped"} 4 +coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="ya.ru."} 4 +# HELP coredns_dns_response_rcode_count_total Counter of response status codes. +# TYPE coredns_dns_response_rcode_count_total counter +coredns_dns_response_rcode_count_total{rcode="NOERROR",server="dns://:53",zone="coredns.io."} 6 +coredns_dns_response_rcode_count_total{rcode="NOERROR",server="dns://:54",zone="coredns.io."} 13 +coredns_dns_response_rcode_count_total{rcode="REFUSED",server="",zone="dropped"} 21 +coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:53",zone="dropped"} 9 +coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:53",zone="ya.ru."} 9 +coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:54",zone="dropped"} 12 +coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:54",zone="ya.ru."} 12 +# HELP coredns_dns_request_duration_seconds Histogram of the time (in seconds) each request took. +# TYPE coredns_dns_request_duration_seconds histogram +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.00025"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.0005"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.001"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.002"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.004"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.008"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.016"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.032"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.064"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.128"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.256"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.512"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="1.024"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="2.048"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="4.096"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="8.192"} 21 +coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="+Inf"} 21 +coredns_dns_request_duration_seconds_sum{server="",zone="dropped"} 0.00015171000000000005 +coredns_dns_request_duration_seconds_count{server="",zone="dropped"} 21 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.00025"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.0005"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.001"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.002"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.004"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.008"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.016"} 1 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.032"} 1 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.064"} 5 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.128"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.256"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.512"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="1.024"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="2.048"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="4.096"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="8.192"} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="+Inf"} 6 +coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="coredns.io."} 0.278949832 +coredns_dns_request_duration_seconds_count{server="dns://:53",zone="coredns.io."} 6 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.00025"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.0005"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.001"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.002"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.004"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.008"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.016"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.032"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.064"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.128"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.256"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.512"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="1.024"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="2.048"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="4.096"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="8.192"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="+Inf"} 9 +coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="dropped"} 7.657700000000001e-05 +coredns_dns_request_duration_seconds_count{server="dns://:53",zone="dropped"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.00025"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.0005"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.001"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.002"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.004"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.008"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.016"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.032"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.064"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.128"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.256"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.512"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="1.024"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="2.048"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="4.096"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="8.192"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="+Inf"} 9 +coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="ya.ru."} 0.001103838 +coredns_dns_request_duration_seconds_count{server="dns://:53",zone="ya.ru."} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.00025"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.0005"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.001"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.002"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.004"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.008"} 0 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.016"} 9 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.032"} 10 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.064"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.128"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.256"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.512"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="1.024"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="2.048"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="4.096"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="8.192"} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="+Inf"} 13 +coredns_dns_request_duration_seconds_sum{server="dns://:54",zone="coredns.io."} 0.25558616300000003 +coredns_dns_request_duration_seconds_count{server="dns://:54",zone="coredns.io."} 13 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.00025"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.0005"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.001"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.002"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.004"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.008"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.016"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.032"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.064"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.128"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.256"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.512"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="1.024"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="2.048"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="4.096"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="8.192"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="+Inf"} 12 +coredns_dns_request_duration_seconds_sum{server="dns://:54",zone="dropped"} 9.260400000000001e-05 +coredns_dns_request_duration_seconds_count{server="dns://:54",zone="dropped"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.00025"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.0005"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.001"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.002"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.004"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.008"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.016"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.032"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.064"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.128"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.256"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.512"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="1.024"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="2.048"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="4.096"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="8.192"} 12 +coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="+Inf"} 12 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/no_load.txt b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/no_load.txt new file mode 100644 index 00000000000000..ba343ab57fe905 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/no_load.txt @@ -0,0 +1,6 @@ +# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built. +# TYPE coredns_build_info gauge +coredns_build_info{goversion="go1.14.4",revision="f59c03d",version="1.7.0"} 1 +# HELP coredns_panics_total A metrics that counts the number of panics. +# TYPE coredns_panics_total counter +coredns_panics_total 99 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/some_load.txt b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/some_load.txt new file mode 100644 index 00000000000000..34f0a9a221ffc1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/coredns/testdata/version170/some_load.txt @@ -0,0 +1,38 @@ +# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built. +# TYPE coredns_build_info gauge +coredns_build_info{goversion="go1.14.4",revision="f59c03d",version="1.7.0"} 1 +# HELP coredns_panics_total A metrics that counts the number of panics. +# TYPE coredns_panics_total counter +coredns_panics_total 0 +# HELP coredns_dns_requests_total Counter of DNS requests made per zone, protocol and family. +# TYPE coredns_dns_requests_total counter +coredns_dns_requests_total{family="1",proto="udp",server="",type="A",zone="dropped"} 7 +coredns_dns_requests_total{family="1",proto="udp",server="",type="AAAA",zone="dropped"} 7 +coredns_dns_requests_total{family="1",proto="udp",server="",type="MX",zone="dropped"} 7 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="coredns.io."} 2 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="dropped"} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="ya.ru."} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="coredns.io."} 2 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="dropped"} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="ya.ru."} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="coredns.io."} 2 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="dropped"} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="ya.ru."} 3 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="coredns.io."} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="dropped"} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="ya.ru."} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="coredns.io."} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="dropped"} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="ya.ru."} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="coredns.io."} 5 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="dropped"} 4 +coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="ya.ru."} 4 +# HELP coredns_dns_response_rcode_count_total Counter of response status codes. +# TYPE coredns_dns_response_rcode_count_total counter +coredns_dns_responses_total{rcode="NOERROR",server="dns://:53",zone="coredns.io."} 6 +coredns_dns_responses_total{rcode="NOERROR",server="dns://:54",zone="coredns.io."} 13 +coredns_dns_responses_total{rcode="REFUSED",server="",zone="dropped"} 21 +coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:53",zone="dropped"} 9 +coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:53",zone="ya.ru."} 9 +coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:54",zone="dropped"} 12 +coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:54",zone="ya.ru."} 12 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/README.md b/src/go/collectors/go.d.plugin/modules/couchbase/README.md new file mode 120000 index 00000000000000..fa8d05e1c4fba2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/README.md @@ -0,0 +1 @@ +integrations/couchbase.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/charts.go b/src/go/collectors/go.d.plugin/modules/couchbase/charts.go new file mode 100644 index 00000000000000..e54c04ab6524f0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/charts.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + Charts = module.Charts + Chart = module.Chart + Dim = module.Dim +) + +var bucketQuotaPercentUsedChart = Chart{ + ID: "bucket_quota_percent_used", + Title: "Quota Percent Used Per Bucket", + Units: "%", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_quota_percent_used", +} + +var bucketOpsPerSecChart = Chart{ + ID: "bucket_ops_per_sec", + Title: "Operations Per Second Per Bucket", + Units: "ops/s", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_ops_per_sec", + Type: module.Stacked, +} + +var bucketDiskFetchesChart = Chart{ + ID: "bucket_disk_fetches", + Title: "Disk Fetches Per Bucket", + Units: "fetches", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_disk_fetches", + Type: module.Stacked, +} + +var bucketItemCountChart = Chart{ + ID: "bucket_item_count", + Title: "Item Count Per Bucket", + Units: "items", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_item_count", + Type: module.Stacked, +} + +var bucketDiskUsedChart = Chart{ + ID: "bucket_disk_used_stats", + Title: "Disk Used Per Bucket", + Units: "bytes", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_disk_used_stats", + Type: module.Stacked, +} + +var bucketDataUsedChart = Chart{ + ID: "bucket_data_used", + Title: "Data Used Per Bucket", + Units: "bytes", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_data_used", + Type: module.Stacked, +} + +var bucketMemUsedChart = Chart{ + ID: "bucket_mem_used", + Title: "Memory Used Per Bucket", + Units: "bytes", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_mem_used", + Type: module.Stacked, +} + +var bucketVBActiveNumNonResidentChart = Chart{ + ID: "bucket_vb_active_num_non_resident_stats", + Title: "Number Of Non-Resident Items Per Bucket", + Units: "items", + Fam: "buckets basic stats", + Ctx: "couchbase.bucket_vb_active_num_non_resident", + Type: module.Stacked, +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/collect.go b/src/go/collectors/go.d.plugin/modules/couchbase/collect.go new file mode 100644 index 00000000000000..2b1672376cb09a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/collect.go @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathBucketsStats = "/pools/default/buckets" + + precision = 1000 +) + +func (cb *Couchbase) collect() (map[string]int64, error) { + ms, err := cb.scrapeCouchbase() + if err != nil { + return nil, fmt.Errorf("error on scraping couchbase: %v", err) + } + if ms.empty() { + return nil, nil + } + + collected := make(map[string]int64) + cb.collectBasicStats(collected, ms) + + return collected, nil +} + +func (cb *Couchbase) collectBasicStats(collected map[string]int64, ms *cbMetrics) { + for _, b := range ms.BucketsBasicStats { + + if !cb.collectedBuckets[b.Name] { + cb.collectedBuckets[b.Name] = true + cb.addBucketToCharts(b.Name) + } + + bs := b.BasicStats + collected[indexDimID(b.Name, "quota_percent_used")] = int64(bs.QuotaPercentUsed * precision) + collected[indexDimID(b.Name, "ops_per_sec")] = int64(bs.OpsPerSec * precision) + collected[indexDimID(b.Name, "disk_fetches")] = int64(bs.DiskFetches) + collected[indexDimID(b.Name, "item_count")] = int64(bs.ItemCount) + collected[indexDimID(b.Name, "disk_used")] = int64(bs.DiskUsed) + collected[indexDimID(b.Name, "data_used")] = int64(bs.DataUsed) + collected[indexDimID(b.Name, "mem_used")] = int64(bs.MemUsed) + collected[indexDimID(b.Name, "vb_active_num_non_resident")] = int64(bs.VbActiveNumNonResident) + } +} + +func (cb *Couchbase) addBucketToCharts(bucket string) { + cb.addDimToChart(bucketQuotaPercentUsedChart.ID, &module.Dim{ + ID: indexDimID(bucket, "quota_percent_used"), + Name: bucket, + Div: precision, + }) + + cb.addDimToChart(bucketOpsPerSecChart.ID, &module.Dim{ + ID: indexDimID(bucket, "ops_per_sec"), + Name: bucket, + Div: precision, + }) + + cb.addDimToChart(bucketDiskFetchesChart.ID, &module.Dim{ + ID: indexDimID(bucket, "disk_fetches"), + Name: bucket, + }) + + cb.addDimToChart(bucketItemCountChart.ID, &module.Dim{ + ID: indexDimID(bucket, "item_count"), + Name: bucket, + }) + + cb.addDimToChart(bucketDiskUsedChart.ID, &module.Dim{ + ID: indexDimID(bucket, "disk_used"), + Name: bucket, + }) + + cb.addDimToChart(bucketDataUsedChart.ID, &module.Dim{ + ID: indexDimID(bucket, "data_used"), + Name: bucket, + }) + + cb.addDimToChart(bucketMemUsedChart.ID, &module.Dim{ + ID: indexDimID(bucket, "mem_used"), + Name: bucket, + }) + + cb.addDimToChart(bucketVBActiveNumNonResidentChart.ID, &module.Dim{ + ID: indexDimID(bucket, "vb_active_num_non_resident"), + Name: bucket, + }) +} + +func (cb *Couchbase) addDimToChart(chartID string, dim *module.Dim) { + chart := cb.Charts().Get(chartID) + if chart == nil { + cb.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID) + return + } + if err := chart.AddDim(dim); err != nil { + cb.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (cb *Couchbase) scrapeCouchbase() (*cbMetrics, error) { + ms := &cbMetrics{} + req, _ := web.NewHTTPRequest(cb.Request) + req.URL.Path = urlPathBucketsStats + req.URL.RawQuery = url.Values{"skipMap": []string{"true"}}.Encode() + + if err := cb.doOKDecode(req, &ms.BucketsBasicStats); err != nil { + return nil, err + } + return ms, nil +} + +func (cb *Couchbase) doOKDecode(req *http.Request, in interface{}) error { + resp, err := cb.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func indexDimID(name, metric string) string { + return fmt.Sprintf("bucket_%s_%s", name, metric) +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/config_schema.json b/src/go/collectors/go.d.plugin/modules/couchbase/config_schema.json new file mode 100644 index 00000000000000..307a1261b0d9fb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/couchbase job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/couchbase.go b/src/go/collectors/go.d.plugin/modules/couchbase/couchbase.go new file mode 100644 index 00000000000000..b92ec2d769f112 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/couchbase.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("couchbase", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Couchbase { + return &Couchbase{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8091", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + }, + collectedBuckets: make(map[string]bool), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + } + Couchbase struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts + collectedBuckets map[string]bool + } +) + +func (cb *Couchbase) Cleanup() { + if cb.httpClient == nil { + return + } + cb.httpClient.CloseIdleConnections() +} + +func (cb *Couchbase) Init() bool { + err := cb.validateConfig() + if err != nil { + cb.Errorf("check configuration: %v", err) + return false + } + + httpClient, err := cb.initHTTPClient() + if err != nil { + cb.Errorf("init HTTP client: %v", err) + return false + } + cb.httpClient = httpClient + + charts, err := cb.initCharts() + if err != nil { + cb.Errorf("init charts: %v", err) + return false + } + + cb.charts = charts + return true +} + +func (cb *Couchbase) Check() bool { + return len(cb.Collect()) > 0 +} + +func (cb *Couchbase) Charts() *Charts { + return cb.charts +} + +func (cb *Couchbase) Collect() map[string]int64 { + mx, err := cb.collect() + if err != nil { + cb.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/couchbase_test.go b/src/go/collectors/go.d.plugin/modules/couchbase/couchbase_test.go new file mode 100644 index 00000000000000..da0fa4e6658fb6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/couchbase_test.go @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v660BucketsBasicStats, _ = os.ReadFile("testdata/6.6.0/buckets_basic_stats.json") +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v660BucketsBasicStats": v660BucketsBasicStats, + } { + require.NotNilf(t, data, name) + } +} + +func TestCouchbase_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'URL'": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "", + }, + }, + }, + }, + "fails on invalid URL": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "127.0.0.1:9090", + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cb := New() + cb.Config = test.config + + if test.wantFail { + assert.False(t, cb.Init()) + } else { + assert.True(t, cb.Init()) + } + }) + } +} + +func TestCouchbase_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (cb *Couchbase, cleanup func()) + wantFail bool + }{ + "success on valid response v6.6.0": { + prepare: prepareCouchbaseV660, + }, + "fails on response with invalid data": { + wantFail: true, + prepare: prepareCouchbaseInvalidData, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareCouchbase404, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareCouchbaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cb, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, cb.Check()) + } else { + assert.True(t, cb.Check()) + } + }) + } +} + +func TestCouchbase_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (cb *Couchbase, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v6.6.0": { + prepare: prepareCouchbaseV660, + wantCollected: map[string]int64{ + "bucket_beer-sample_data_used": 13990431, + "bucket_beer-sample_disk_fetches": 1, + "bucket_beer-sample_disk_used": 27690472, + "bucket_beer-sample_item_count": 7303, + "bucket_beer-sample_mem_used": 34294872, + "bucket_beer-sample_ops_per_sec": 1100, + "bucket_beer-sample_quota_percent_used": 32706, + "bucket_beer-sample_vb_active_num_non_resident": 1, + "bucket_gamesim-sample_data_used": 5371804, + "bucket_gamesim-sample_disk_fetches": 1, + "bucket_gamesim-sample_disk_used": 13821793, + "bucket_gamesim-sample_item_count": 586, + "bucket_gamesim-sample_mem_used": 29586696, + "bucket_gamesim-sample_ops_per_sec": 1100, + "bucket_gamesim-sample_quota_percent_used": 28216, + "bucket_gamesim-sample_vb_active_num_non_resident": 1, + "bucket_travel-sample_data_used": 53865472, + "bucket_travel-sample_disk_fetches": 1, + "bucket_travel-sample_disk_used": 62244260, + "bucket_travel-sample_item_count": 31591, + "bucket_travel-sample_mem_used": 54318184, + "bucket_travel-sample_ops_per_sec": 1100, + "bucket_travel-sample_quota_percent_used": 51801, + "bucket_travel-sample_vb_active_num_non_resident": 1, + }, + }, + "fails on response with invalid data": { + prepare: prepareCouchbaseInvalidData, + }, + "fails on 404 response": { + prepare: prepareCouchbase404, + }, + "fails on connection refused": { + prepare: prepareCouchbaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cb, cleanup := test.prepare(t) + defer cleanup() + + collected := cb.Collect() + + assert.Equal(t, test.wantCollected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, cb, collected) + }) + } +} + +func prepareCouchbaseV660(t *testing.T) (cb *Couchbase, cleanup func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(v660BucketsBasicStats) + })) + + cb = New() + cb.URL = srv.URL + require.True(t, cb.Init()) + + return cb, srv.Close +} + +func prepareCouchbaseInvalidData(t *testing.T) (*Couchbase, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + cb := New() + cb.URL = srv.URL + require.True(t, cb.Init()) + + return cb, srv.Close +} + +func prepareCouchbase404(t *testing.T) (*Couchbase, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + cb := New() + cb.URL = srv.URL + require.True(t, cb.Init()) + + return cb, srv.Close +} + +func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) { + t.Helper() + cb := New() + cb.URL = "http://127.0.0.1:38001" + require.True(t, cb.Init()) + + return cb, func() {} +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cb *Couchbase, collected map[string]int64) { + for _, chart := range *cb.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/init.go b/src/go/collectors/go.d.plugin/modules/couchbase/init.go new file mode 100644 index 00000000000000..c274ee572d3d7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/init.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (cb *Couchbase) initCharts() (*Charts, error) { + var bucketCharts = module.Charts{ + bucketQuotaPercentUsedChart.Copy(), + bucketOpsPerSecChart.Copy(), + bucketDiskFetchesChart.Copy(), + bucketItemCountChart.Copy(), + bucketDiskUsedChart.Copy(), + bucketDataUsedChart.Copy(), + bucketMemUsedChart.Copy(), + bucketVBActiveNumNonResidentChart.Copy(), + } + return bucketCharts.Copy(), nil +} + +func (cb Couchbase) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(cb.Client) +} + +func (cb Couchbase) validateConfig() error { + if cb.URL == "" { + return errors.New("URL not set") + } + if _, err := web.NewHTTPRequest(cb.Request); err != nil { + return err + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/integrations/couchbase.md b/src/go/collectors/go.d.plugin/modules/couchbase/integrations/couchbase.md new file mode 100644 index 00000000000000..4a1e5b6d7f2527 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/integrations/couchbase.md @@ -0,0 +1,212 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/couchbase/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/couchbase/metadata.yaml" +sidebar_label: "Couchbase" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Couchbase + + +<img src="https://netdata.cloud/img/couchbase.svg" width="150"/> + + +Plugin: go.d.plugin +Module: couchbase + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Couchbase servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Couchbase instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage | +| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s | +| couchbase.bucket_disk_fetches | a dimension per bucket | fetches | +| couchbase.bucket_item_count | a dimension per bucket | items | +| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes | +| couchbase.bucket_data_used | a dimension per bucket | bytes | +| couchbase.bucket_mem_used | a dimension per bucket | bytes | +| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/couchbase.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/couchbase.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8091 | yes | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 2 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8091 + +``` +</details> + +##### Basic HTTP auth + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8091 + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8091 + + - name: remote + url: http://203.0.113.0:8091 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m couchbase + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/metadata.yaml b/src/go/collectors/go.d.plugin/modules/couchbase/metadata.yaml new file mode 100644 index 00000000000000..de21e924dd097b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/metadata.yaml @@ -0,0 +1,214 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-couchbase + plugin_name: go.d.plugin + module_name: couchbase + monitored_instance: + name: Couchbase + link: https://www.couchbase.com/ + icon_filename: couchbase.svg + categories: + - data-collection.database-servers + keywords: + - couchbase + - databases + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Couchbase servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/couchbase.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8091 + required: true + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8091 + - name: Basic HTTP auth + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8091 + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8091 + + - name: remote + url: http://203.0.113.0:8091 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: couchbase.bucket_quota_percent_used + description: Quota Percent Used Per Bucket + unit: percentage + chart_type: line + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_ops_per_sec + description: Operations Per Second Per Bucket + unit: ops/s + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_disk_fetches + description: Disk Fetches Per Bucket + unit: fetches + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_item_count + description: Item Count Per Bucket + unit: items + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_disk_used_stats + description: Disk Used Per Bucket + unit: bytes + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_data_used + description: Data Used Per Bucket + unit: bytes + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_mem_used + description: Memory Used Per Bucket + unit: bytes + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: couchbase.bucket_vb_active_num_non_resident + description: Number Of Non-Resident Items Per Bucket + unit: items + chart_type: stacked + dimensions: + - name: a dimension per bucket diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/metrics.go b/src/go/collectors/go.d.plugin/modules/couchbase/metrics.go new file mode 100644 index 00000000000000..c4f23304ba9e9c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/metrics.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchbase + +type cbMetrics struct { + // https://developer.couchbase.com/resources/best-practice-guides/monitoring-guide.pdf + BucketsBasicStats []bucketsBasicStats +} + +func (m cbMetrics) empty() bool { + switch { + case m.hasBucketsStats(): + return false + } + return true +} + +func (m cbMetrics) hasBucketsStats() bool { return len(m.BucketsBasicStats) > 0 } + +type bucketsBasicStats struct { + Name string `json:"name"` + + BasicStats struct { + DataUsed float64 `json:"dataUsed"` + DiskFetches float64 `json:"diskFetches"` + ItemCount float64 `json:"itemCount"` + DiskUsed float64 `json:"diskUsed"` + MemUsed float64 `json:"memUsed"` + OpsPerSec float64 `json:"opsPerSec"` + QuotaPercentUsed float64 `json:"quotaPercentUsed"` + VbActiveNumNonResident float64 `json:"vbActiveNumNonResident"` + } `json:"basicStats"` +} diff --git a/src/go/collectors/go.d.plugin/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json b/src/go/collectors/go.d.plugin/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json new file mode 100644 index 00000000000000..3749add7902a9f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json @@ -0,0 +1,422 @@ +[ + { + "name": "beer-sample", + "uuid": "bf10ab11911f1c065db5fd58c5fbc0b6", + "bucketType": "membase", + "authType": "sasl", + "uri": "/pools/default/buckets/beer-sample?bucket_uuid=bf10ab11911f1c065db5fd58c5fbc0b6", + "streamingUri": "/pools/default/bucketsStreaming/beer-sample?bucket_uuid=bf10ab11911f1c065db5fd58c5fbc0b6", + "localRandomKeyUri": "/pools/default/buckets/beer-sample/localRandomKey", + "controllers": { + "compactAll": "/pools/default/buckets/beer-sample/controller/compactBucket", + "compactDB": "/pools/default/buckets/beer-sample/controller/compactDatabases", + "purgeDeletes": "/pools/default/buckets/beer-sample/controller/unsafePurgeBucket", + "startRecovery": "/pools/default/buckets/beer-sample/controller/startRecovery" + }, + "nodes": [ + { + "couchApiBaseHTTPS": "https://172.17.0.2:18092/beer-sample%2Bbf10ab11911f1c065db5fd58c5fbc0b6", + "couchApiBase": "http://172.17.0.2:8092/beer-sample%2Bbf10ab11911f1c065db5fd58c5fbc0b6", + "systemStats": { + "cpu_utilization_rate": 15.21035598705502, + "cpu_stolen_rate": 0, + "swap_total": 0, + "swap_used": 0, + "mem_total": 33587437568, + "mem_free": 30532227072, + "mem_limit": 33587437568, + "cpu_cores_available": 6, + "allocstall": 0 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 102960477, + "couch_docs_data_size": 72439963, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 796048, + "couch_views_data_size": 787744, + "curr_items": 39480, + "curr_items_tot": 39480, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 118199752, + "ops": 0, + "vb_active_num_non_resident": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "638", + "memoryTotal": 33587437568, + "memoryFree": 30532227072, + "mcdMemoryReserved": 25625, + "mcdMemoryAllocated": 25625, + "replication": 0, + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@cb.local", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500", + "clusterCompatibility": 393222, + "version": "6.6.0-7909-enterprise", + "os": "x86_64-unknown-linux-gnu", + "cpuCount": 6, + "ports": { + "direct": 11210, + "httpsCAPI": 18092, + "httpsMgmt": 18091, + "distTCP": 21100, + "distTLS": 21150 + }, + "services": [ + "cbas", + "eventing", + "fts", + "index", + "kv", + "n1ql" + ], + "nodeEncryption": false, + "configuredHostname": "127.0.0.1:8091", + "addressFamily": "inet", + "externalListeners": [ + { + "afamily": "inet", + "nodeEncryption": false + }, + { + "afamily": "inet6", + "nodeEncryption": false + } + ] + } + ], + "stats": { + "uri": "/pools/default/buckets/beer-sample/stats", + "directoryURI": "/pools/default/buckets/beer-sample/statsDirectory", + "nodeStatsListURI": "/pools/default/buckets/beer-sample/nodes" + }, + "nodeLocator": "vbucket", + "saslPassword": "47809efed0156c874b91bbdfeba89912", + "ddocs": { + "uri": "/pools/default/buckets/beer-sample/ddocs" + }, + "replicaIndex": true, + "autoCompactionSettings": false, + "maxTTL": 0, + "compressionMode": "passive", + "replicaNumber": 1, + "threadsNumber": 3, + "quota": { + "ram": 104857600, + "rawRAM": 104857600 + }, + "basicStats": { + "quotaPercentUsed": 32.70613861083984, + "opsPerSec": 1.1, + "diskFetches": 1, + "itemCount": 7303, + "diskUsed": 27690472, + "dataUsed": 13990431, + "memUsed": 34294872, + "vbActiveNumNonResident": 1 + }, + "evictionPolicy": "valueOnly", + "durabilityMinLevel": "none", + "conflictResolutionType": "seqno", + "bucketCapabilitiesVer": "", + "bucketCapabilities": [ + "durableWrite", + "tombstonedUserXAttrs", + "couchapi", + "dcp", + "cbhello", + "touch", + "cccp", + "xdcrCheckpointing", + "nodesExt", + "xattr" + ] + }, + { + "name": "gamesim-sample", + "uuid": "23ff61363bc4df9af4eb9c2198fc74d3", + "bucketType": "membase", + "authType": "sasl", + "uri": "/pools/default/buckets/gamesim-sample?bucket_uuid=23ff61363bc4df9af4eb9c2198fc74d3", + "streamingUri": "/pools/default/bucketsStreaming/gamesim-sample?bucket_uuid=23ff61363bc4df9af4eb9c2198fc74d3", + "localRandomKeyUri": "/pools/default/buckets/gamesim-sample/localRandomKey", + "controllers": { + "compactAll": "/pools/default/buckets/gamesim-sample/controller/compactBucket", + "compactDB": "/pools/default/buckets/gamesim-sample/controller/compactDatabases", + "purgeDeletes": "/pools/default/buckets/gamesim-sample/controller/unsafePurgeBucket", + "startRecovery": "/pools/default/buckets/gamesim-sample/controller/startRecovery" + }, + "nodes": [ + { + "couchApiBaseHTTPS": "https://172.17.0.2:18092/gamesim-sample%2B23ff61363bc4df9af4eb9c2198fc74d3", + "couchApiBase": "http://172.17.0.2:8092/gamesim-sample%2B23ff61363bc4df9af4eb9c2198fc74d3", + "systemStats": { + "cpu_utilization_rate": 15.21035598705502, + "cpu_stolen_rate": 0, + "swap_total": 0, + "swap_used": 0, + "mem_total": 33587437568, + "mem_free": 30532227072, + "mem_limit": 33587437568, + "cpu_cores_available": 6, + "allocstall": 0 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 102960477, + "couch_docs_data_size": 72439963, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 796048, + "couch_views_data_size": 787744, + "curr_items": 39480, + "curr_items_tot": 39480, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 118199752, + "ops": 0, + "vb_active_num_non_resident": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "638", + "memoryTotal": 33587437568, + "memoryFree": 30532227072, + "mcdMemoryReserved": 25625, + "mcdMemoryAllocated": 25625, + "replication": 0, + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@cb.local", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500", + "clusterCompatibility": 393222, + "version": "6.6.0-7909-enterprise", + "os": "x86_64-unknown-linux-gnu", + "cpuCount": 6, + "ports": { + "direct": 11210, + "httpsCAPI": 18092, + "httpsMgmt": 18091, + "distTCP": 21100, + "distTLS": 21150 + }, + "services": [ + "cbas", + "eventing", + "fts", + "index", + "kv", + "n1ql" + ], + "nodeEncryption": false, + "configuredHostname": "127.0.0.1:8091", + "addressFamily": "inet", + "externalListeners": [ + { + "afamily": "inet", + "nodeEncryption": false + }, + { + "afamily": "inet6", + "nodeEncryption": false + } + ] + } + ], + "stats": { + "uri": "/pools/default/buckets/gamesim-sample/stats", + "directoryURI": "/pools/default/buckets/gamesim-sample/statsDirectory", + "nodeStatsListURI": "/pools/default/buckets/gamesim-sample/nodes" + }, + "nodeLocator": "vbucket", + "saslPassword": "39cf71a1da3f298bed52d19973dce967", + "ddocs": { + "uri": "/pools/default/buckets/gamesim-sample/ddocs" + }, + "replicaIndex": true, + "autoCompactionSettings": false, + "maxTTL": 0, + "compressionMode": "passive", + "replicaNumber": 1, + "threadsNumber": 3, + "quota": { + "ram": 104857600, + "rawRAM": 104857600 + }, + "basicStats": { + "quotaPercentUsed": 28.21607208251953, + "opsPerSec": 1.1, + "diskFetches": 1, + "itemCount": 586, + "diskUsed": 13821793, + "dataUsed": 5371804, + "memUsed": 29586696, + "vbActiveNumNonResident": 1 + }, + "evictionPolicy": "valueOnly", + "durabilityMinLevel": "none", + "conflictResolutionType": "seqno", + "bucketCapabilitiesVer": "", + "bucketCapabilities": [ + "durableWrite", + "tombstonedUserXAttrs", + "couchapi", + "dcp", + "cbhello", + "touch", + "cccp", + "xdcrCheckpointing", + "nodesExt", + "xattr" + ] + }, + { + "name": "travel-sample", + "uuid": "68a336f9ec0e0d2150d56298c896d0a9", + "bucketType": "membase", + "authType": "sasl", + "uri": "/pools/default/buckets/travel-sample?bucket_uuid=68a336f9ec0e0d2150d56298c896d0a9", + "streamingUri": "/pools/default/bucketsStreaming/travel-sample?bucket_uuid=68a336f9ec0e0d2150d56298c896d0a9", + "localRandomKeyUri": "/pools/default/buckets/travel-sample/localRandomKey", + "controllers": { + "compactAll": "/pools/default/buckets/travel-sample/controller/compactBucket", + "compactDB": "/pools/default/buckets/travel-sample/controller/compactDatabases", + "purgeDeletes": "/pools/default/buckets/travel-sample/controller/unsafePurgeBucket", + "startRecovery": "/pools/default/buckets/travel-sample/controller/startRecovery" + }, + "nodes": [ + { + "couchApiBaseHTTPS": "https://172.17.0.2:18092/travel-sample%2B68a336f9ec0e0d2150d56298c896d0a9", + "couchApiBase": "http://172.17.0.2:8092/travel-sample%2B68a336f9ec0e0d2150d56298c896d0a9", + "systemStats": { + "cpu_utilization_rate": 15.21035598705502, + "cpu_stolen_rate": 0, + "swap_total": 0, + "swap_used": 0, + "mem_total": 33587437568, + "mem_free": 30532227072, + "mem_limit": 33587437568, + "cpu_cores_available": 6, + "allocstall": 0 + }, + "interestingStats": { + "cmd_get": 0, + "couch_docs_actual_disk_size": 102960477, + "couch_docs_data_size": 72439963, + "couch_spatial_data_size": 0, + "couch_spatial_disk_size": 0, + "couch_views_actual_disk_size": 796048, + "couch_views_data_size": 787744, + "curr_items": 39480, + "curr_items_tot": 39480, + "ep_bg_fetched": 0, + "get_hits": 0, + "mem_used": 118199752, + "ops": 0, + "vb_active_num_non_resident": 0, + "vb_replica_curr_items": 0 + }, + "uptime": "638", + "memoryTotal": 33587437568, + "memoryFree": 30532227072, + "mcdMemoryReserved": 25625, + "mcdMemoryAllocated": 25625, + "replication": 0, + "clusterMembership": "active", + "recoveryType": "none", + "status": "healthy", + "otpNode": "ns_1@cb.local", + "thisNode": true, + "hostname": "172.17.0.2:8091", + "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500", + "clusterCompatibility": 393222, + "version": "6.6.0-7909-enterprise", + "os": "x86_64-unknown-linux-gnu", + "cpuCount": 6, + "ports": { + "direct": 11210, + "httpsCAPI": 18092, + "httpsMgmt": 18091, + "distTCP": 21100, + "distTLS": 21150 + }, + "services": [ + "cbas", + "eventing", + "fts", + "index", + "kv", + "n1ql" + ], + "nodeEncryption": false, + "configuredHostname": "127.0.0.1:8091", + "addressFamily": "inet", + "externalListeners": [ + { + "afamily": "inet", + "nodeEncryption": false + }, + { + "afamily": "inet6", + "nodeEncryption": false + } + ] + } + ], + "stats": { + "uri": "/pools/default/buckets/travel-sample/stats", + "directoryURI": "/pools/default/buckets/travel-sample/statsDirectory", + "nodeStatsListURI": "/pools/default/buckets/travel-sample/nodes" + }, + "nodeLocator": "vbucket", + "saslPassword": "c6be6d9be723b8b1f8eac4edb84a06ed", + "ddocs": { + "uri": "/pools/default/buckets/travel-sample/ddocs" + }, + "replicaIndex": true, + "autoCompactionSettings": false, + "maxTTL": 0, + "compressionMode": "passive", + "replicaNumber": 1, + "threadsNumber": 3, + "quota": { + "ram": 104857600, + "rawRAM": 104857600 + }, + "basicStats": { + "quotaPercentUsed": 51.80185699462891, + "opsPerSec": 1.1, + "diskFetches": 1, + "itemCount": 31591, + "diskUsed": 62244260, + "dataUsed": 53865472, + "memUsed": 54318184, + "vbActiveNumNonResident": 1 + }, + "evictionPolicy": "valueOnly", + "durabilityMinLevel": "none", + "conflictResolutionType": "seqno", + "bucketCapabilitiesVer": "", + "bucketCapabilities": [ + "durableWrite", + "tombstonedUserXAttrs", + "couchapi", + "dcp", + "cbhello", + "touch", + "cccp", + "xdcrCheckpointing", + "nodesExt", + "xattr" + ] + } +] diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/README.md b/src/go/collectors/go.d.plugin/modules/couchdb/README.md new file mode 120000 index 00000000000000..14cff4d368025f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/README.md @@ -0,0 +1 @@ +integrations/couchdb.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/charts.go b/src/go/collectors/go.d.plugin/modules/couchdb/charts.go new file mode 100644 index 00000000000000..e1576b6649c856 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/charts.go @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + Charts = module.Charts + Dims = module.Dims + Vars = module.Vars +) + +var dbActivityCharts = Charts{ + { + ID: "activity", + Title: "Overall Activity", + Units: "requests/s", + Fam: "dbactivity", + Ctx: "couchdb.activity", + Type: module.Stacked, + Dims: Dims{ + {ID: "couchdb_database_reads", Name: "DB reads", Algo: module.Incremental}, + {ID: "couchdb_database_writes", Name: "DB writes", Algo: module.Incremental}, + {ID: "couchdb_httpd_view_reads", Name: "View reads", Algo: module.Incremental}, + }, + }, +} + +var httpTrafficBreakdownCharts = Charts{ + { + ID: "request_methods", + Title: "HTTP request methods", + Units: "requests/s", + Fam: "httptraffic", + Ctx: "couchdb.request_methods", + Type: module.Stacked, + Dims: Dims{ + {ID: "couchdb_httpd_request_methods_COPY", Name: "COPY", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_DELETE", Name: "DELETE", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_GET", Name: "GET", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_HEAD", Name: "HEAD", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_OPTIONS", Name: "OPTIONS", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_POST", Name: "POST", Algo: module.Incremental}, + {ID: "couchdb_httpd_request_methods_PUT", Name: "PUT", Algo: module.Incremental}, + }, + }, + { + ID: "response_codes", + Title: "HTTP response status codes", + Units: "responses/s", + Fam: "httptraffic", + Ctx: "couchdb.response_codes", + Type: module.Stacked, + Dims: Dims{ + {ID: "couchdb_httpd_status_codes_200", Name: "200 OK", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_201", Name: "201 Created", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_202", Name: "202 Accepted", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_204", Name: "204 No Content", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_206", Name: "206 Partial Content", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_301", Name: "301 Moved Permanently", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_302", Name: "302 Found", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_304", Name: "304 Not Modified", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_400", Name: "400 Bad Request", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_401", Name: "401 Unauthorized", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_403", Name: "403 Forbidden", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_404", Name: "404 Not Found", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_406", Name: "406 Not Acceptable", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_409", Name: "409 Conflict", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_412", Name: "412 Precondition Failed", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_413", Name: "413 Request Entity Too Long", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_414", Name: "414 Request URI Too Long", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_415", Name: "415 Unsupported Media Type", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_416", Name: "416 Requested Range Not Satisfiable", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_417", Name: "417 Expectation Failed", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_500", Name: "500 Internal Server Error", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_501", Name: "501 Not Implemented", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_503", Name: "503 Service Unavailable", Algo: module.Incremental}, + }, + }, + { + ID: "response_code_classes", + Title: "HTTP response status code classes", + Units: "responses/s", + Fam: "httptraffic", + Ctx: "couchdb.response_code_classes", + Type: module.Stacked, + Dims: Dims{ + {ID: "couchdb_httpd_status_codes_2xx", Name: "2xx Success", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_3xx", Name: "3xx Redirection", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_4xx", Name: "4xx Client error", Algo: module.Incremental}, + {ID: "couchdb_httpd_status_codes_5xx", Name: "5xx Server error", Algo: module.Incremental}, + }, + }, +} + +var serverOperationsCharts = Charts{ + { + ID: "active_tasks", + Title: "Active task breakdown", + Units: "tasks", + Fam: "ops", + Ctx: "couchdb.active_tasks", + Type: module.Stacked, + Dims: Dims{ + {ID: "active_tasks_indexer", Name: "Indexer"}, + {ID: "active_tasks_database_compaction", Name: "DB Compaction"}, + {ID: "active_tasks_replication", Name: "Replication"}, + {ID: "active_tasks_view_compaction", Name: "View Compaction"}, + }, + }, + { + ID: "replicator_jobs", + Title: "Replicator job breakdown", + Units: "jobs", + Fam: "ops", + Ctx: "couchdb.replicator_jobs", + Type: module.Stacked, + Dims: Dims{ + {ID: "couch_replicator_jobs_running", Name: "Running"}, + {ID: "couch_replicator_jobs_pending", Name: "Pending"}, + {ID: "couch_replicator_jobs_crashed", Name: "Crashed"}, + {ID: "internal_replication_jobs", Name: "Internal replication jobs"}, + }, + }, + { + ID: "open_files", + Title: "Open files", + Units: "files", + Fam: "ops", + Ctx: "couchdb.open_files", + Dims: Dims{ + {ID: "couchdb_open_os_files", Name: "# files"}, + }, + }, +} + +var erlangStatisticsCharts = Charts{ + { + ID: "erlang_memory", + Title: "Erlang VM memory usage", + Units: "B", + Fam: "erlang", + Ctx: "couchdb.erlang_vm_memory", + Type: module.Stacked, + Dims: Dims{ + {ID: "memory_atom", Name: "atom"}, + {ID: "memory_binary", Name: "binaries"}, + {ID: "memory_code", Name: "code"}, + {ID: "memory_ets", Name: "ets"}, + {ID: "memory_processes", Name: "procs"}, + {ID: "memory_other", Name: "other"}, + }, + }, + { + ID: "erlang_proc_counts", + Title: "Process counts", + Units: "processes", + Fam: "erlang", + Ctx: "couchdb.proccounts", + Dims: Dims{ + {ID: "os_proc_count", Name: "OS procs"}, + {ID: "process_count", Name: "erl procs"}, + }, + }, + { + ID: "erlang_peak_msg_queue", + Title: "Peak message queue size", + Units: "messages", + Fam: "erlang", + Ctx: "couchdb.peakmsgqueue", + Dims: Dims{ + {ID: "peak_msg_queue", Name: "peak size"}, + }, + }, + { + ID: "erlang_reductions", + Title: "Erlang reductions", + Units: "reductions", + Fam: "erlang", + Ctx: "couchdb.reductions", + Type: module.Stacked, + Dims: Dims{ + {ID: "reductions", Name: "reductions", Algo: module.Incremental}, + }, + }, +} + +var ( + dbSpecificCharts = Charts{ + { + ID: "db_sizes_file", + Title: "Database sizes (file)", + Units: "KiB", + Fam: "perdbstats", + Ctx: "couchdb.db_sizes_file", + }, + { + ID: "db_sizes_external", + Title: "Database sizes (external)", + Units: "KiB", + Fam: "perdbstats", + Ctx: "couchdb.db_sizes_external", + }, + { + ID: "db_sizes_active", + Title: "Database sizes (active)", + Units: "KiB", + Fam: "perdbstats", + Ctx: "couchdb.db_sizes_active", + }, + { + ID: "db_doc_counts", + Title: "Database # of docs", + Units: "docs", + Fam: "perdbstats", + Ctx: "couchdb.db_doc_count", + }, + { + ID: "db_doc_del_counts", + Title: "Database # of deleted docs", + Units: "docs", + Fam: "perdbstats", + Ctx: "couchdb.db_doc_del_count", + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/collect.go b/src/go/collectors/go.d.plugin/modules/couchdb/collect.go new file mode 100644 index 00000000000000..9fd041800e8c5e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/collect.go @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "strings" + "sync" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathActiveTasks = "/_active_tasks" + urlPathOverviewStats = "/_node/%s/_stats" + urlPathSystemStats = "/_node/%s/_system" + urlPathDatabases = "/_dbs_info" + + httpStatusCodePrefix = "couchdb_httpd_status_codes_" + httpStatusCodePrefixLen = len(httpStatusCodePrefix) +) + +func (cdb *CouchDB) collect() (map[string]int64, error) { + ms := cdb.scrapeCouchDB() + if ms.empty() { + return nil, nil + } + + collected := make(map[string]int64) + cdb.collectNodeStats(collected, ms) + cdb.collectSystemStats(collected, ms) + cdb.collectActiveTasks(collected, ms) + cdb.collectDBStats(collected, ms) + + return collected, nil +} + +func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { + if !ms.hasNodeStats() { + return + } + + for metric, value := range stm.ToMap(ms.NodeStats) { + collected[metric] = value + if strings.HasPrefix(metric, httpStatusCodePrefix) { + code := metric[httpStatusCodePrefixLen:] + collected["couchdb_httpd_status_codes_"+string(code[0])+"xx"] += value + } + } +} + +func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { + if !ms.hasNodeSystem() { + return + } + + for metric, value := range stm.ToMap(ms.NodeSystem) { + collected[metric] = value + } + + collected["peak_msg_queue"] = findMaxMQSize(ms.NodeSystem.MessageQueues) +} + +func (CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) { + collected["active_tasks_indexer"] = 0 + collected["active_tasks_database_compaction"] = 0 + collected["active_tasks_replication"] = 0 + collected["active_tasks_view_compaction"] = 0 + + if !ms.hasActiveTasks() { + return + } + + for _, task := range ms.ActiveTasks { + collected["active_tasks_"+task.Type]++ + } +} + +func (cdb *CouchDB) collectDBStats(collected map[string]int64, ms *cdbMetrics) { + if !ms.hasDBStats() { + return + } + + for _, dbStats := range ms.DBStats { + if dbStats.Error != "" { + cdb.Warning("database '", dbStats.Key, "' doesn't exist") + continue + } + merge(collected, stm.ToMap(dbStats.Info), "db_"+dbStats.Key) + } +} + +func (cdb *CouchDB) scrapeCouchDB() *cdbMetrics { + ms := &cdbMetrics{} + wg := &sync.WaitGroup{} + + wg.Add(1) + go func() { defer wg.Done(); cdb.scrapeNodeStats(ms) }() + + wg.Add(1) + go func() { defer wg.Done(); cdb.scrapeSystemStats(ms) }() + + wg.Add(1) + go func() { defer wg.Done(); cdb.scrapeActiveTasks(ms) }() + + if len(cdb.databases) > 0 { + wg.Add(1) + go func() { defer wg.Done(); cdb.scrapeDBStats(ms) }() + } + + wg.Wait() + return ms +} + +func (cdb *CouchDB) scrapeNodeStats(ms *cdbMetrics) { + req, _ := web.NewHTTPRequest(cdb.Request) + req.URL.Path = fmt.Sprintf(urlPathOverviewStats, cdb.Config.Node) + + var stats cdbNodeStats + if err := cdb.doOKDecode(req, &stats); err != nil { + cdb.Warning(err) + return + } + ms.NodeStats = &stats +} + +func (cdb *CouchDB) scrapeSystemStats(ms *cdbMetrics) { + req, _ := web.NewHTTPRequest(cdb.Request) + req.URL.Path = fmt.Sprintf(urlPathSystemStats, cdb.Config.Node) + + var stats cdbNodeSystem + if err := cdb.doOKDecode(req, &stats); err != nil { + cdb.Warning(err) + return + } + ms.NodeSystem = &stats +} + +func (cdb *CouchDB) scrapeActiveTasks(ms *cdbMetrics) { + req, _ := web.NewHTTPRequest(cdb.Request) + req.URL.Path = urlPathActiveTasks + + var stats []cdbActiveTask + if err := cdb.doOKDecode(req, &stats); err != nil { + cdb.Warning(err) + return + } + ms.ActiveTasks = stats +} + +func (cdb *CouchDB) scrapeDBStats(ms *cdbMetrics) { + req, _ := web.NewHTTPRequest(cdb.Request) + req.URL.Path = urlPathDatabases + req.Method = http.MethodPost + req.Header.Add("Accept", "application/json") + req.Header.Add("Content-Type", "application/json") + + var q struct { + Keys []string `json:"keys"` + } + q.Keys = cdb.databases + body, err := json.Marshal(q) + if err != nil { + cdb.Error(err) + return + } + req.Body = io.NopCloser(bytes.NewReader(body)) + + var stats []cdbDBStats + if err := cdb.doOKDecode(req, &stats); err != nil { + cdb.Warning(err) + return + } + ms.DBStats = stats +} + +func findMaxMQSize(MessageQueues map[string]interface{}) int64 { + var max float64 + for _, mq := range MessageQueues { + switch mqSize := mq.(type) { + case float64: + max = math.Max(max, mqSize) + case map[string]interface{}: + if v, ok := mqSize["count"].(float64); ok { + max = math.Max(max, v) + } + } + } + return int64(max) +} + +func (cdb *CouchDB) pingCouchDB() error { + req, _ := web.NewHTTPRequest(cdb.Request) + + var info struct{ Couchdb string } + if err := cdb.doOKDecode(req, &info); err != nil { + return err + } + + if info.Couchdb != "Welcome" { + return errors.New("not a CouchDB endpoint") + } + + return nil +} + +func (cdb *CouchDB) doOKDecode(req *http.Request, in interface{}) error { + resp, err := cdb.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + // TODO: read resp body, it contains reason + // ex.: {"error":"bad_request","reason":"`keys` member must exist."} (400) + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func merge(dst, src map[string]int64, prefix string) { + for k, v := range src { + dst[prefix+"_"+k] = v + } +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/config_schema.json b/src/go/collectors/go.d.plugin/modules/couchdb/config_schema.json new file mode 100644 index 00000000000000..e3a67e322378a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/config_schema.json @@ -0,0 +1,65 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/couchdb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "node": { + "type": "string" + }, + "databases": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/couchdb.go b/src/go/collectors/go.d.plugin/modules/couchdb/couchdb.go new file mode 100644 index 00000000000000..3342b7b7f4fb02 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/couchdb.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +import ( + _ "embed" + "net/http" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("couchdb", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *CouchDB { + return &CouchDB{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:5984", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + Node: "_local", + }, + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + Node string `yaml:"node"` + Databases string `yaml:"databases"` + } + + CouchDB struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts + + databases []string + } +) + +func (cdb *CouchDB) Cleanup() { + if cdb.httpClient == nil { + return + } + cdb.httpClient.CloseIdleConnections() +} + +func (cdb *CouchDB) Init() bool { + err := cdb.validateConfig() + if err != nil { + cdb.Errorf("check configuration: %v", err) + return false + } + + cdb.databases = strings.Fields(cdb.Config.Databases) + + httpClient, err := cdb.initHTTPClient() + if err != nil { + cdb.Errorf("init HTTP client: %v", err) + return false + } + cdb.httpClient = httpClient + + charts, err := cdb.initCharts() + if err != nil { + cdb.Errorf("init charts: %v", err) + return false + } + cdb.charts = charts + + return true +} + +func (cdb *CouchDB) Check() bool { + if err := cdb.pingCouchDB(); err != nil { + cdb.Error(err) + return false + } + return len(cdb.Collect()) > 0 +} + +func (cdb *CouchDB) Charts() *Charts { + return cdb.charts +} + +func (cdb *CouchDB) Collect() map[string]int64 { + mx, err := cdb.collect() + if err != nil { + cdb.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/couchdb_test.go b/src/go/collectors/go.d.plugin/modules/couchdb/couchdb_test.go new file mode 100644 index 00000000000000..29b5b64af31900 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/couchdb_test.go @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v311Root, _ = os.ReadFile("testdata/v3.1.1/root.json") + v311ActiveTasks, _ = os.ReadFile("testdata/v3.1.1/active_tasks.json") + v311NodeStats, _ = os.ReadFile("testdata/v3.1.1/node_stats.json") + v311NodeSystem, _ = os.ReadFile("testdata/v3.1.1/node_system.json") + v311DbsInfo, _ = os.ReadFile("testdata/v3.1.1/dbs_info.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v311Root": v311Root, + "v311ActiveTasks": v311ActiveTasks, + "v311NodeStats": v311NodeStats, + "v311NodeSystem": v311NodeSystem, + "v311DbsInfo": v311DbsInfo, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestCouchDB_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantNumOfCharts int + wantFail bool + }{ + "default": { + wantNumOfCharts: numOfCharts( + dbActivityCharts, + httpTrafficBreakdownCharts, + serverOperationsCharts, + erlangStatisticsCharts, + ), + config: New().Config, + }, + "URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }}, + }, + "invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + es := New() + es.Config = test.config + + if test.wantFail { + assert.False(t, es.Init()) + } else { + assert.True(t, es.Init()) + assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) + } + }) + } +} + +func TestCouchDB_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (cdb *CouchDB, cleanup func()) + wantFail bool + }{ + "valid data": {prepare: prepareCouchDBValidData}, + "invalid data": {prepare: prepareCouchDBInvalidData, wantFail: true}, + "404": {prepare: prepareCouchDB404, wantFail: true}, + "connection refused": {prepare: prepareCouchDBConnectionRefused, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cdb, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, cdb.Check()) + } else { + assert.True(t, cdb.Check()) + } + }) + } +} + +func TestCouchDB_Charts(t *testing.T) { + assert.Nil(t, New().Charts()) +} + +func TestCouchDB_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestCouchDB_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *CouchDB + wantCollected map[string]int64 + checkCharts bool + }{ + "all stats": { + prepare: func() *CouchDB { + cdb := New() + cdb.Config.Databases = "db1 db2" + return cdb + }, + wantCollected: map[string]int64{ + + // node stats + "couch_replicator_jobs_crashed": 1, + "couch_replicator_jobs_pending": 1, + "couch_replicator_jobs_running": 1, + "couchdb_database_reads": 1, + "couchdb_database_writes": 14, + "couchdb_httpd_request_methods_COPY": 1, + "couchdb_httpd_request_methods_DELETE": 1, + "couchdb_httpd_request_methods_GET": 75544, + "couchdb_httpd_request_methods_HEAD": 1, + "couchdb_httpd_request_methods_OPTIONS": 1, + "couchdb_httpd_request_methods_POST": 15, + "couchdb_httpd_request_methods_PUT": 3, + "couchdb_httpd_status_codes_200": 75294, + "couchdb_httpd_status_codes_201": 15, + "couchdb_httpd_status_codes_202": 1, + "couchdb_httpd_status_codes_204": 1, + "couchdb_httpd_status_codes_206": 1, + "couchdb_httpd_status_codes_301": 1, + "couchdb_httpd_status_codes_302": 1, + "couchdb_httpd_status_codes_304": 1, + "couchdb_httpd_status_codes_400": 1, + "couchdb_httpd_status_codes_401": 20, + "couchdb_httpd_status_codes_403": 1, + "couchdb_httpd_status_codes_404": 225, + "couchdb_httpd_status_codes_405": 1, + "couchdb_httpd_status_codes_406": 1, + "couchdb_httpd_status_codes_409": 1, + "couchdb_httpd_status_codes_412": 3, + "couchdb_httpd_status_codes_413": 1, + "couchdb_httpd_status_codes_414": 1, + "couchdb_httpd_status_codes_415": 1, + "couchdb_httpd_status_codes_416": 1, + "couchdb_httpd_status_codes_417": 1, + "couchdb_httpd_status_codes_500": 1, + "couchdb_httpd_status_codes_501": 1, + "couchdb_httpd_status_codes_503": 1, + "couchdb_httpd_status_codes_2xx": 75312, + "couchdb_httpd_status_codes_3xx": 3, + "couchdb_httpd_status_codes_4xx": 258, + "couchdb_httpd_status_codes_5xx": 3, + "couchdb_httpd_view_reads": 1, + "couchdb_open_os_files": 1, + + // node system + "context_switches": 22614499, + "ets_table_count": 116, + "internal_replication_jobs": 1, + "io_input": 49674812, + "io_output": 686400800, + "memory_atom_used": 488328, + "memory_atom": 504433, + "memory_binary": 297696, + "memory_code": 11252688, + "memory_ets": 1579120, + "memory_other": 20427855, + "memory_processes": 9161448, + "os_proc_count": 1, + "peak_msg_queue": 2, + "process_count": 296, + "reductions": 43211228312, + "run_queue": 1, + + // active tasks + "active_tasks_database_compaction": 1, + "active_tasks_indexer": 2, + "active_tasks_replication": 1, + "active_tasks_view_compaction": 1, + + // databases + "db_db1_db_doc_counts": 14, + "db_db1_db_doc_del_counts": 1, + "db_db1_db_sizes_active": 2818, + "db_db1_db_sizes_external": 588, + "db_db1_db_sizes_file": 74115, + + "db_db2_db_doc_counts": 15, + "db_db2_db_doc_del_counts": 1, + "db_db2_db_sizes_active": 1818, + "db_db2_db_sizes_external": 288, + "db_db2_db_sizes_file": 7415, + }, + checkCharts: true, + }, + "wrong node": { + prepare: func() *CouchDB { + cdb := New() + cdb.Config.Node = "bad_node@bad_host" + cdb.Config.Databases = "db1 db2" + return cdb + }, + wantCollected: map[string]int64{ + + // node stats + + // node system + + // active tasks + "active_tasks_database_compaction": 1, + "active_tasks_indexer": 2, + "active_tasks_replication": 1, + "active_tasks_view_compaction": 1, + + // databases + "db_db1_db_doc_counts": 14, + "db_db1_db_doc_del_counts": 1, + "db_db1_db_sizes_active": 2818, + "db_db1_db_sizes_external": 588, + "db_db1_db_sizes_file": 74115, + + "db_db2_db_doc_counts": 15, + "db_db2_db_doc_del_counts": 1, + "db_db2_db_sizes_active": 1818, + "db_db2_db_sizes_external": 288, + "db_db2_db_sizes_file": 7415, + }, + checkCharts: false, + }, + "wrong database": { + prepare: func() *CouchDB { + cdb := New() + cdb.Config.Databases = "bad_db db1 db2" + return cdb + }, + wantCollected: map[string]int64{ + + // node stats + "couch_replicator_jobs_crashed": 1, + "couch_replicator_jobs_pending": 1, + "couch_replicator_jobs_running": 1, + "couchdb_database_reads": 1, + "couchdb_database_writes": 14, + "couchdb_httpd_request_methods_COPY": 1, + "couchdb_httpd_request_methods_DELETE": 1, + "couchdb_httpd_request_methods_GET": 75544, + "couchdb_httpd_request_methods_HEAD": 1, + "couchdb_httpd_request_methods_OPTIONS": 1, + "couchdb_httpd_request_methods_POST": 15, + "couchdb_httpd_request_methods_PUT": 3, + "couchdb_httpd_status_codes_200": 75294, + "couchdb_httpd_status_codes_201": 15, + "couchdb_httpd_status_codes_202": 1, + "couchdb_httpd_status_codes_204": 1, + "couchdb_httpd_status_codes_206": 1, + "couchdb_httpd_status_codes_301": 1, + "couchdb_httpd_status_codes_302": 1, + "couchdb_httpd_status_codes_304": 1, + "couchdb_httpd_status_codes_400": 1, + "couchdb_httpd_status_codes_401": 20, + "couchdb_httpd_status_codes_403": 1, + "couchdb_httpd_status_codes_404": 225, + "couchdb_httpd_status_codes_405": 1, + "couchdb_httpd_status_codes_406": 1, + "couchdb_httpd_status_codes_409": 1, + "couchdb_httpd_status_codes_412": 3, + "couchdb_httpd_status_codes_413": 1, + "couchdb_httpd_status_codes_414": 1, + "couchdb_httpd_status_codes_415": 1, + "couchdb_httpd_status_codes_416": 1, + "couchdb_httpd_status_codes_417": 1, + "couchdb_httpd_status_codes_500": 1, + "couchdb_httpd_status_codes_501": 1, + "couchdb_httpd_status_codes_503": 1, + "couchdb_httpd_status_codes_2xx": 75312, + "couchdb_httpd_status_codes_3xx": 3, + "couchdb_httpd_status_codes_4xx": 258, + "couchdb_httpd_status_codes_5xx": 3, + "couchdb_httpd_view_reads": 1, + "couchdb_open_os_files": 1, + + // node system + "context_switches": 22614499, + "ets_table_count": 116, + "internal_replication_jobs": 1, + "io_input": 49674812, + "io_output": 686400800, + "memory_atom_used": 488328, + "memory_atom": 504433, + "memory_binary": 297696, + "memory_code": 11252688, + "memory_ets": 1579120, + "memory_other": 20427855, + "memory_processes": 9161448, + "os_proc_count": 1, + "peak_msg_queue": 2, + "process_count": 296, + "reductions": 43211228312, + "run_queue": 1, + + // active tasks + "active_tasks_database_compaction": 1, + "active_tasks_indexer": 2, + "active_tasks_replication": 1, + "active_tasks_view_compaction": 1, + + // databases + "db_db1_db_doc_counts": 14, + "db_db1_db_doc_del_counts": 1, + "db_db1_db_sizes_active": 2818, + "db_db1_db_sizes_external": 588, + "db_db1_db_sizes_file": 74115, + + "db_db2_db_doc_counts": 15, + "db_db2_db_doc_del_counts": 1, + "db_db2_db_sizes_active": 1818, + "db_db2_db_sizes_external": 288, + "db_db2_db_sizes_file": 7415, + }, + checkCharts: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cdb, cleanup := prepareCouchDB(t, test.prepare) + defer cleanup() + + var collected map[string]int64 + for i := 0; i < 10; i++ { + collected = cdb.Collect() + } + + assert.Equal(t, test.wantCollected, collected) + if test.checkCharts { + ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cdb *CouchDB, collected map[string]int64) { + for _, chart := range *cdb.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, cleanup func()) { + t.Helper() + cdb = createCDB() + srv := prepareCouchDBEndpoint() + cdb.URL = srv.URL + + require.True(t, cdb.Init()) + + return cdb, srv.Close +} + +func prepareCouchDBValidData(t *testing.T) (cdb *CouchDB, cleanup func()) { + return prepareCouchDB(t, New) +} + +func prepareCouchDBInvalidData(t *testing.T) (*CouchDB, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + cdb := New() + cdb.URL = srv.URL + require.True(t, cdb.Init()) + + return cdb, srv.Close +} + +func prepareCouchDB404(t *testing.T) (*CouchDB, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + cdb := New() + cdb.URL = srv.URL + require.True(t, cdb.Init()) + + return cdb, srv.Close +} + +func prepareCouchDBConnectionRefused(t *testing.T) (*CouchDB, func()) { + t.Helper() + cdb := New() + cdb.URL = "http://127.0.0.1:38001" + require.True(t, cdb.Init()) + + return cdb, func() {} +} + +func prepareCouchDBEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_node/_local/_stats": + _, _ = w.Write(v311NodeStats) + case "/_node/_local/_system": + _, _ = w.Write(v311NodeSystem) + case urlPathActiveTasks: + _, _ = w.Write(v311ActiveTasks) + case "/_dbs_info": + _, _ = w.Write(v311DbsInfo) + case "/": + _, _ = w.Write(v311Root) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} + +func numOfCharts(charts ...Charts) (num int) { + for _, v := range charts { + num += len(v) + } + return num +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/init.go b/src/go/collectors/go.d.plugin/modules/couchdb/init.go new file mode 100644 index 00000000000000..8de4cb8985613f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/init.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (cdb *CouchDB) validateConfig() error { + if cdb.URL == "" { + return errors.New("URL not set") + } + if cdb.Node == "" { + return errors.New("'node' not set") + } + if _, err := web.NewHTTPRequest(cdb.Request); err != nil { + return err + } + return nil +} + +func (cdb *CouchDB) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(cdb.Client) +} + +func (cdb *CouchDB) initCharts() (*Charts, error) { + charts := module.Charts{} + + if err := charts.Add(*dbActivityCharts.Copy()...); err != nil { + return nil, err + } + if err := charts.Add(*httpTrafficBreakdownCharts.Copy()...); err != nil { + return nil, err + } + if err := charts.Add(*serverOperationsCharts.Copy()...); err != nil { + return nil, err + } + if len(cdb.databases) != 0 { + dbCharts := dbSpecificCharts.Copy() + + if err := charts.Add(*dbCharts...); err != nil { + return nil, err + } + + for _, chart := range *dbCharts { + for _, db := range cdb.databases { + if err := chart.AddDim(&module.Dim{ID: "db_" + db + "_" + chart.ID, Name: db}); err != nil { + return nil, err + } + } + } + + } + if err := charts.Add(*erlangStatisticsCharts.Copy()...); err != nil { + return nil, err + } + + if len(charts) == 0 { + return nil, errors.New("zero charts") + } + return &charts, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/integrations/couchdb.md b/src/go/collectors/go.d.plugin/modules/couchdb/integrations/couchdb.md new file mode 100644 index 00000000000000..a7fbec7efaaba3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/integrations/couchdb.md @@ -0,0 +1,225 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/couchdb/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/couchdb/metadata.yaml" +sidebar_label: "CouchDB" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CouchDB + + +<img src="https://netdata.cloud/img/couchdb.svg" width="150"/> + + +Plugin: go.d.plugin +Module: couchdb + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors CouchDB servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per CouchDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| couchdb.activity | db_reads, db_writes, view_reads | requests/s | +| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s | +| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s | +| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s | +| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks | +| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs | +| couchdb.open_files | files | files | +| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B | +| couchdb.proccounts | os_procs, erl_procs | processes | +| couchdb.peakmsgqueue | peak_size | messages | +| couchdb.reductions | reductions | reductions | +| couchdb.db_sizes_file | a dimension per database | KiB | +| couchdb.db_sizes_external | a dimension per database | KiB | +| couchdb.db_sizes_active | a dimension per database | KiB | +| couchdb.db_doc_count | a dimension per database | docs | +| couchdb.db_doc_del_count | a dimension per database | docs | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/couchdb.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/couchdb.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:5984 | yes | +| node | CouchDB node name. Same as -name vm.args argument. | _local | no | +| databases | List of database names for which db-specific stats should be displayed, space separated. | | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 2 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:5984 + +``` +</details> + +##### Basic HTTP auth + +Local server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:5984 + node: couchdb@127.0.0.1 + databases: my-db other-db + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:5984 + + - name: remote + url: http://203.0.113.0:5984 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m couchdb + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/metadata.yaml b/src/go/collectors/go.d.plugin/modules/couchdb/metadata.yaml new file mode 100644 index 00000000000000..2f0036db2b1e6a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/metadata.yaml @@ -0,0 +1,323 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-couchdb + plugin_name: go.d.plugin + module_name: couchdb + monitored_instance: + name: CouchDB + link: https://couchdb.apache.org/ + icon_filename: couchdb.svg + categories: + - data-collection.database-servers + keywords: + - couchdb + - databases + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors CouchDB servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/couchdb.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:5984 + required: true + - name: node + description: CouchDB node name. Same as -name vm.args argument. + default_value: "_local" + required: false + - name: databases + description: List of database names for which db-specific stats should be displayed, space separated. + default_value: "" + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + folding: + title: Example + enabled: true + config: | + jobs: + - name: local + url: http://127.0.0.1:5984 + - name: Basic HTTP auth + description: > + Local server with basic HTTP authentication, node name and multiple databases defined. + Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. + Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server. + config: | + jobs: + - name: local + url: http://127.0.0.1:5984 + node: couchdb@127.0.0.1 + databases: my-db other-db + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:5984 + + - name: remote + url: http://203.0.113.0:5984 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: couchdb.activity + description: Overall Activity + unit: requests/s + chart_type: stacked + dimensions: + - name: db_reads + - name: db_writes + - name: view_reads + - name: couchdb.request_methods + description: HTTP request methods + unit: requests/s + chart_type: stacked + dimensions: + - name: copy + - name: delete + - name: get + - name: head + - name: options + - name: post + - name: put + - name: couchdb.response_codes + description: HTTP response status codes + unit: responses/s + chart_type: stacked + dimensions: + - name: "200" + - name: "201" + - name: "202" + - name: "204" + - name: "206" + - name: "301" + - name: "302" + - name: "304" + - name: "400" + - name: "401" + - name: "403" + - name: "404" + - name: "406" + - name: "409" + - name: "412" + - name: "413" + - name: "414" + - name: "415" + - name: "416" + - name: "417" + - name: "500" + - name: "501" + - name: "503" + - name: couchdb.response_code_classes + description: HTTP response status code classes + unit: responses/s + chart_type: stacked + dimensions: + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: couchdb.active_tasks + description: Active task breakdown + unit: tasks + chart_type: stacked + dimensions: + - name: indexer + - name: db_compaction + - name: replication + - name: view_compaction + - name: couchdb.replicator_jobs + description: Replicator job breakdown + unit: jobs + chart_type: stacked + dimensions: + - name: running + - name: pending + - name: crashed + - name: internal_replication_jobs + - name: couchdb.open_files + description: Open files + unit: files + chart_type: line + dimensions: + - name: files + - name: couchdb.erlang_vm_memory + description: Erlang VM memory usage + unit: B + chart_type: stacked + dimensions: + - name: atom + - name: binaries + - name: code + - name: ets + - name: procs + - name: other + - name: couchdb.proccounts + description: Process counts + unit: processes + chart_type: line + dimensions: + - name: os_procs + - name: erl_procs + - name: couchdb.peakmsgqueue + description: Peak message queue size + unit: messages + chart_type: line + dimensions: + - name: peak_size + - name: couchdb.reductions + description: Erlang reductions + unit: reductions + chart_type: line + dimensions: + - name: reductions + - name: couchdb.db_sizes_file + description: Database sizes (file) + unit: KiB + chart_type: line + dimensions: + - name: a dimension per database + - name: couchdb.db_sizes_external + description: Database sizes (external) + unit: KiB + chart_type: line + dimensions: + - name: a dimension per database + - name: couchdb.db_sizes_active + description: Database sizes (active) + unit: KiB + chart_type: line + dimensions: + - name: a dimension per database + - name: couchdb.db_doc_count + description: 'Database # of docs' + unit: docs + chart_type: line + dimensions: + - name: a dimension per database + - name: couchdb.db_doc_del_count + description: 'Database # of deleted docs' + unit: docs + chart_type: line + dimensions: + - name: a dimension per database diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/metrics.go b/src/go/collectors/go.d.plugin/modules/couchdb/metrics.go new file mode 100644 index 00000000000000..4d2f02679f6a31 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/metrics.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package couchdb + +// https://docs.couchdb.org/en/stable/api/index.html + +type cdbMetrics struct { + // https://docs.couchdb.org/en/stable/api/server/common.html#active-tasks + ActiveTasks []cdbActiveTask + // https://docs.couchdb.org/en/stable/api/server/common.html#node-node-name-stats + NodeStats *cdbNodeStats + // https://docs.couchdb.org/en/stable/api/server/common.html#node-node-name-system + NodeSystem *cdbNodeSystem + // https://docs.couchdb.org/en/stable/api/database/common.html + DBStats []cdbDBStats +} + +func (m cdbMetrics) empty() bool { + switch { + case m.hasActiveTasks(), m.hasNodeStats(), m.hasNodeSystem(), m.hasDBStats(): + return false + } + return true +} + +func (m cdbMetrics) hasActiveTasks() bool { return m.ActiveTasks != nil } +func (m cdbMetrics) hasNodeStats() bool { return m.NodeStats != nil } +func (m cdbMetrics) hasNodeSystem() bool { return m.NodeSystem != nil } +func (m cdbMetrics) hasDBStats() bool { return m.DBStats != nil } + +type cdbActiveTask struct { + Type string `json:"type"` +} + +type cdbNodeStats struct { + CouchDB struct { + DatabaseReads struct { + Value float64 `stm:"" json:"value"` + } `stm:"database_reads" json:"database_reads"` + DatabaseWrites struct { + Value float64 `stm:"" json:"value"` + } `stm:"database_writes" json:"database_writes"` + HTTPd struct { + ViewReads struct { + Value float64 `stm:"" json:"value"` + } `stm:"view_reads" json:"view_reads"` + } `stm:"httpd" json:"httpd"` + HTTPdRequestMethods struct { + Copy struct { + Value float64 `stm:"" json:"value"` + } `stm:"COPY" json:"COPY"` + Delete struct { + Value float64 `stm:"" json:"value"` + } `stm:"DELETE" json:"DELETE"` + Get struct { + Value float64 `stm:"" json:"value"` + } `stm:"GET" json:"GET"` + Head struct { + Value float64 `stm:"" json:"value"` + } `stm:"HEAD" json:"HEAD"` + Options struct { + Value float64 `stm:"" json:"value"` + } `stm:"OPTIONS" json:"OPTIONS"` + Post struct { + Value float64 `stm:"" json:"value"` + } `stm:"POST" json:"POST"` + Put struct { + Value float64 `stm:"" json:"value"` + } `stm:"PUT" json:"PUT"` + } `stm:"httpd_request_methods" json:"httpd_request_methods"` + HTTPdStatusCodes struct { + Code200 struct { + Value float64 `stm:"" json:"value"` + } `stm:"200" json:"200"` + Code201 struct { + Value float64 `stm:"" json:"value"` + } `stm:"201" json:"201"` + Code202 struct { + Value float64 `stm:"" json:"value"` + } `stm:"202" json:"202"` + Code204 struct { + Value float64 `stm:"" json:"value"` + } `stm:"204" json:"204"` + Code206 struct { + Value float64 `stm:"" json:"value"` + } `stm:"206" json:"206"` + Code301 struct { + Value float64 `stm:"" json:"value"` + } `stm:"301" json:"301"` + Code302 struct { + Value float64 `stm:"" json:"value"` + } `stm:"302" json:"302"` + Code304 struct { + Value float64 `stm:"" json:"value"` + } `stm:"304" json:"304"` + Code400 struct { + Value float64 `stm:"" json:"value"` + } `stm:"400" json:"400"` + Code401 struct { + Value float64 `stm:"" json:"value"` + } `stm:"401" json:"401"` + Code403 struct { + Value float64 `stm:"" json:"value"` + } `stm:"403" json:"403"` + Code404 struct { + Value float64 `stm:"" json:"value"` + } `stm:"404" json:"404"` + Code405 struct { + Value float64 `stm:"" json:"value"` + } `stm:"405" json:"405"` + Code406 struct { + Value float64 `stm:"" json:"value"` + } `stm:"406" json:"406"` + Code409 struct { + Value float64 `stm:"" json:"value"` + } `stm:"409" json:"409"` + Code412 struct { + Value float64 `stm:"" json:"value"` + } `stm:"412" json:"412"` + Code413 struct { + Value float64 `stm:"" json:"value"` + } `stm:"413" json:"413"` + Code414 struct { + Value float64 `stm:"" json:"value"` + } `stm:"414" json:"414"` + Code415 struct { + Value float64 `stm:"" json:"value"` + } `stm:"415" json:"415"` + Code416 struct { + Value float64 `stm:"" json:"value"` + } `stm:"416" json:"416"` + Code417 struct { + Value float64 `stm:"" json:"value"` + } `stm:"417" json:"417"` + Code500 struct { + Value float64 `stm:"" json:"value"` + } `stm:"500" json:"500"` + Code501 struct { + Value float64 `stm:"" json:"value"` + } `stm:"501" json:"501"` + Code503 struct { + Value float64 `stm:"" json:"value"` + } `stm:"503" json:"503"` + } `stm:"httpd_status_codes" json:"httpd_status_codes"` + OpenOSFiles struct { + Value float64 `stm:"" json:"value"` + } `stm:"open_os_files" json:"open_os_files"` + } `stm:"couchdb" json:"couchdb"` + CouchReplicator struct { + Jobs struct { + Running struct { + Value float64 `stm:"" json:"value"` + } `stm:"running" json:"running"` + Pending struct { + Value float64 `stm:"" json:"value"` + } `stm:"pending" json:"pending"` + Crashed struct { + Value float64 `stm:"" json:"value"` + } `stm:"crashed" json:"crashed"` + } `stm:"jobs" json:"jobs"` + } `stm:"couch_replicator" json:"couch_replicator"` +} + +type cdbNodeSystem struct { + Memory struct { + Other float64 `stm:"other" json:"other"` + Atom float64 `stm:"atom" json:"atom"` + AtomUsed float64 `stm:"atom_used" json:"atom_used"` + Processes float64 `stm:"processes" json:"processes"` + Binary float64 `stm:"binary" json:"binary"` + Code float64 `stm:"code" json:"code"` + Ets float64 `stm:"ets" json:"ets"` + } `stm:"memory" json:"memory"` + + RunQueue float64 `stm:"run_queue" json:"run_queue"` + EtsTableCount float64 `stm:"ets_table_count" json:"ets_table_count"` + ContextSwitches float64 `stm:"context_switches" json:"context_switches"` + Reductions float64 `stm:"reductions" json:"reductions"` + IOInput float64 `stm:"io_input" json:"io_input"` + IOOutput float64 `stm:"io_output" json:"io_output"` + OSProcCount float64 `stm:"os_proc_count" json:"os_proc_count"` + ProcessCount float64 `stm:"process_count" json:"process_count"` + InternalReplicationJobs float64 `stm:"internal_replication_jobs" json:"internal_replication_jobs"` + + MessageQueues map[string]interface{} `json:"message_queues"` +} + +type cdbDBStats struct { + Key string + Error string + Info struct { + Sizes struct { + File float64 `stm:"file" json:"file"` + External float64 `stm:"external" json:"external"` + Active float64 `stm:"active" json:"active"` + } `stm:"db_sizes" json:"sizes"` + DocDelCount float64 `stm:"db_doc_del_counts" json:"doc_del_count"` + DocCount float64 `stm:"db_doc_counts" json:"doc_count"` + } +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/active_tasks.json b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/active_tasks.json new file mode 100644 index 00000000000000..788fe5642d998e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/active_tasks.json @@ -0,0 +1,63 @@ +[ + { + "changes_done": 64438, + "database": "mailbox", + "pid": "<0.12986.1>", + "progress": 84, + "started_on": 1376116576, + "total_changes": 76215, + "type": "database_compaction", + "updated_on": 1376116619 + }, + { + "changes_done": 26534, + "database": "mailbox", + "pid": "<0.12943.2>", + "progress": 23, + "started_on": 1376116592, + "total_changes": 76215, + "type": "view_compaction", + "updated_on": 1376116637 + }, + { + "changes_done": 14443, + "database": "mailbox", + "design_document": "c9753817b3ba7c674d92361f24f59b9f", + "pid": "<0.10461.3>", + "progress": 18, + "started_on": 1376116621, + "total_changes": 76215, + "type": "indexer", + "updated_on": 1376116650 + }, + { + "changes_done": 5454, + "database": "mailbox", + "design_document": "_design/meta", + "pid": "<0.6838.4>", + "progress": 7, + "started_on": 1376116632, + "total_changes": 76215, + "type": "indexer", + "updated_on": 1376116651 + }, + { + "checkpointed_source_seq": 68585, + "continuous": false, + "doc_id": null, + "doc_write_failures": 1, + "docs_read": 4524, + "docs_written": 4524, + "missing_revisions_found": 4524, + "pid": "<0.1538.5>", + "progress": 44, + "replication_id": "9bc1727d74d49d9e157e260bb8bbd1d5", + "revisions_checked": 4524, + "source": "mailbox", + "source_seq": 154419, + "started_on": 1376116644, + "target": "http://mailsrv:5984/mailbox", + "type": "replication", + "updated_on": 1376116651 + } +] diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/dbs_info.json b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/dbs_info.json new file mode 100644 index 00000000000000..9ca43a53ce1a32 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/dbs_info.json @@ -0,0 +1,52 @@ +[ + { + "key": "db1", + "info": { + "db_name": "db1", + "purge_seq": "0-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEhlwqEtkSKqHKMgCAIT2GV4", + "update_seq": "14-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEjlxqEtkSKoHK2DNAgCGOxls", + "sizes": { + "file": 74115, + "external": 588, + "active": 2818 + }, + "props": {}, + "doc_del_count": 1, + "doc_count": 14, + "disk_format_version": 8, + "compact_running": false, + "cluster": { + "q": 2, + "n": 1, + "w": 1, + "r": 1 + }, + "instance_start_time": "0" + } + }, + { + "key": "db2", + "info": { + "db_name": "db2", + "purge_seq": "0-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEhlwqEtkSKqHKMgCAIT2GV5", + "update_seq": "14-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEjlxqEtkSKoHK2DNAgCGOxlt", + "sizes": { + "file": 7415, + "external": 288, + "active": 1818 + }, + "props": {}, + "doc_del_count": 1, + "doc_count": 15, + "disk_format_version": 8, + "compact_running": false, + "cluster": { + "q": 2, + "n": 1, + "w": 1, + "r": 1 + }, + "instance_start_time": "0" + } + } +] diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_stats.json b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_stats.json new file mode 100644 index 00000000000000..ae31366af2d590 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_stats.json @@ -0,0 +1,1651 @@ +{ + "global_changes": { + "db_writes": { + "value": 1, + "type": "counter", + "desc": "number of db writes performed by global changes" + }, + "event_doc_conflict": { + "value": 1, + "type": "counter", + "desc": "number of conflicted event docs encountered by global changes" + }, + "listener_pending_updates": { + "value": 1, + "type": "gauge", + "desc": "number of global changes updates pending writes in global_changes_listener" + }, + "rpcs": { + "value": 1, + "type": "counter", + "desc": "number of rpc operations performed by global_changes" + }, + "server_pending_updates": { + "value": 1, + "type": "gauge", + "desc": "number of global changes updates pending writes in global_changes_server" + } + }, + "couchdb": { + "httpd": { + "aborted_requests": { + "value": 1, + "type": "counter", + "desc": "number of aborted requests" + }, + "bulk_docs": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "distribution of the number of docs in _bulk_docs requests" + }, + "bulk_requests": { + "value": 1, + "type": "counter", + "desc": "number of bulk requests" + }, + "requests": { + "value": 75562, + "type": "counter", + "desc": "number of HTTP requests" + }, + "view_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of HTTP view timeouts" + }, + "find_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of HTTP find timeouts" + }, + "explain_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of HTTP _explain timeouts" + }, + "all_docs_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of HTTP all_docs timeouts" + }, + "partition_view_requests": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP view requests" + }, + "partition_find_requests": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP _find requests" + }, + "partition_explain_requests": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP _explain requests" + }, + "partition_all_docs_requests": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP _all_docs requests" + }, + "partition_view_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP view timeouts" + }, + "partition_find_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP find timeouts" + }, + "partition_explain_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP _explain timeouts" + }, + "partition_all_docs_timeouts": { + "value": 1, + "type": "counter", + "desc": "number of partition HTTP all_docs timeouts" + }, + "temporary_view_reads": { + "value": 1, + "type": "counter", + "desc": "number of temporary view reads" + }, + "view_reads": { + "value": 1, + "type": "counter", + "desc": "number of view reads" + }, + "clients_requesting_changes": { + "value": 1, + "type": "counter", + "desc": "number of clients for continuous _changes" + }, + "purge_requests": { + "value": 1, + "type": "counter", + "desc": "number of purge requests" + } + }, + "dbinfo": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "distribution of latencies for calls to retrieve DB info" + }, + "io_queue": { + "search": { + "value": 1, + "type": "counter", + "desc": "Search IO directly triggered by client requests" + } + }, + "io_queue2": { + "search": { + "count": { + "value": 1, + "type": "counter", + "desc": "Search IO directly triggered by client requests" + } + } + }, + "auth_cache_hits": { + "value": 1, + "type": "counter", + "desc": "number of authentication cache hits" + }, + "auth_cache_misses": { + "value": 2, + "type": "counter", + "desc": "number of authentication cache misses" + }, + "collect_results_time": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "microsecond latency for calls to couch_db:collect_results/3" + }, + "database_writes": { + "value": 14, + "type": "counter", + "desc": "number of times a database was changed" + }, + "database_reads": { + "value": 1, + "type": "counter", + "desc": "number of times a document was read from a database" + }, + "database_purges": { + "value": 1, + "type": "counter", + "desc": "number of times a database was purged" + }, + "db_open_time": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "milliseconds required to open a database" + }, + "document_inserts": { + "value": 17, + "type": "counter", + "desc": "number of documents inserted" + }, + "document_writes": { + "value": 17, + "type": "counter", + "desc": "number of document write operations" + }, + "document_purges": { + "total": { + "value": 1, + "type": "counter", + "desc": "number of total document purge operations" + }, + "success": { + "value": 1, + "type": "counter", + "desc": "number of successful document purge operations" + }, + "failure": { + "value": 1, + "type": "counter", + "desc": "number of failed document purge operations" + } + }, + "local_document_writes": { + "value": 1, + "type": "counter", + "desc": "number of _local document write operations" + }, + "httpd_request_methods": { + "COPY": { + "value": 1, + "type": "counter", + "desc": "number of HTTP COPY requests" + }, + "DELETE": { + "value": 1, + "type": "counter", + "desc": "number of HTTP DELETE requests" + }, + "GET": { + "value": 75544, + "type": "counter", + "desc": "number of HTTP GET requests" + }, + "HEAD": { + "value": 1, + "type": "counter", + "desc": "number of HTTP HEAD requests" + }, + "OPTIONS": { + "value": 1, + "type": "counter", + "desc": "number of HTTP OPTIONS requests" + }, + "POST": { + "value": 15, + "type": "counter", + "desc": "number of HTTP POST requests" + }, + "PUT": { + "value": 3, + "type": "counter", + "desc": "number of HTTP PUT requests" + } + }, + "httpd_status_codes": { + "200": { + "value": 75294, + "type": "counter", + "desc": "number of HTTP 200 OK responses" + }, + "201": { + "value": 15, + "type": "counter", + "desc": "number of HTTP 201 Created responses" + }, + "202": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 202 Accepted responses" + }, + "204": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 204 No Content responses" + }, + "206": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 206 Partial Content" + }, + "301": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 301 Moved Permanently responses" + }, + "302": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 302 Found responses" + }, + "304": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 304 Not Modified responses" + }, + "400": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 400 Bad Request responses" + }, + "401": { + "value": 20, + "type": "counter", + "desc": "number of HTTP 401 Unauthorized responses" + }, + "403": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 403 Forbidden responses" + }, + "404": { + "value": 225, + "type": "counter", + "desc": "number of HTTP 404 Not Found responses" + }, + "405": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 405 Method Not Allowed responses" + }, + "406": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 406 Not Acceptable responses" + }, + "409": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 409 Conflict responses" + }, + "412": { + "value": 3, + "type": "counter", + "desc": "number of HTTP 412 Precondition Failed responses" + }, + "413": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 413 Request Entity Too Long responses" + }, + "414": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 414 Request URI Too Long responses" + }, + "415": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 415 Unsupported Media Type responses" + }, + "416": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 416 Requested Range Not Satisfiable responses" + }, + "417": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 417 Expectation Failed responses" + }, + "500": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 500 Internal Server Error responses" + }, + "501": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 501 Not Implemented responses" + }, + "503": { + "value": 1, + "type": "counter", + "desc": "number of HTTP 503 Service unavailable responses" + } + }, + "open_databases": { + "value": 1, + "type": "counter", + "desc": "number of open databases" + }, + "open_os_files": { + "value": 1, + "type": "counter", + "desc": "number of file descriptors CouchDB has open" + }, + "request_time": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of a request inside CouchDB without MochiWeb" + }, + "couch_server": { + "lru_skip": { + "value": 1, + "type": "counter", + "desc": "number of couch_server LRU operations skipped" + } + }, + "query_server": { + "vdu_rejects": { + "value": 1, + "type": "counter", + "desc": "number of rejections by validate_doc_update function" + }, + "vdu_process_time": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "duration of validate_doc_update function calls" + } + }, + "mrview": { + "map_doc": { + "value": 1, + "type": "counter", + "desc": "number of documents mapped in the view server" + }, + "emits": { + "value": 1, + "type": "counter", + "desc": "number of invocations of `emit' in map functions in the view server" + } + } + }, + "mem3": { + "shard_cache": { + "eviction": { + "value": 1, + "type": "counter", + "desc": "number of shard cache evictions" + }, + "hit": { + "value": 185, + "type": "counter", + "desc": "number of shard cache hits" + }, + "miss": { + "value": 252470, + "type": "counter", + "desc": "number of shard cache misses" + } + } + }, + "ddoc_cache": { + "hit": { + "value": 1, + "type": "counter", + "desc": "number of design doc cache hits" + }, + "miss": { + "value": 3, + "type": "counter", + "desc": "number of design doc cache misses" + }, + "recovery": { + "value": 1, + "type": "counter", + "desc": "number of design doc cache recoveries" + } + }, + "couch_log": { + "level": { + "alert": { + "value": 1, + "type": "counter", + "desc": "number of logged alert messages" + }, + "critical": { + "value": 1, + "type": "counter", + "desc": "number of logged critical messages" + }, + "debug": { + "value": 1, + "type": "counter", + "desc": "number of logged debug messages" + }, + "emergency": { + "value": 1, + "type": "counter", + "desc": "number of logged emergency messages" + }, + "error": { + "value": 2, + "type": "counter", + "desc": "number of logged error messages" + }, + "info": { + "value": 8, + "type": "counter", + "desc": "number of logged info messages" + }, + "notice": { + "value": 126250, + "type": "counter", + "desc": "number of logged notice messages" + }, + "warning": { + "value": 8, + "type": "counter", + "desc": "number of logged warning messages" + } + } + }, + "dreyfus": { + "httpd": { + "search": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "Distribution of overall search request latency as experienced by the end user" + } + }, + "rpc": { + "search": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of a search RPC worker" + }, + "group1": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of a group1 RPC worker" + }, + "group2": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of a group2 RPC worker" + }, + "info": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an info RPC worker" + } + }, + "index": { + "await": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an dreyfus_index await request" + }, + "search": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an dreyfus_index search request" + }, + "group1": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an dreyfus_index group1 request" + }, + "group2": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an dreyfus_index group2 request" + }, + "info": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of an dreyfus_index info request" + } + } + }, + "fabric": { + "worker": { + "timeouts": { + "value": 1, + "type": "counter", + "desc": "number of worker timeouts" + } + }, + "open_shard": { + "timeouts": { + "value": 1, + "type": "counter", + "desc": "number of open shard timeouts" + } + }, + "read_repairs": { + "success": { + "value": 1, + "type": "counter", + "desc": "number of successful read repair operations" + }, + "failure": { + "value": 1, + "type": "counter", + "desc": "number of failed read repair operations" + } + }, + "doc_update": { + "errors": { + "value": 1, + "type": "counter", + "desc": "number of document update errors" + }, + "mismatched_errors": { + "value": 1, + "type": "counter", + "desc": "number of document update errors with multiple error types" + }, + "write_quorum_errors": { + "value": 1, + "type": "counter", + "desc": "number of write quorum errors" + } + } + }, + "rexi": { + "buffered": { + "value": 1, + "type": "counter", + "desc": "number of rexi messages buffered" + }, + "down": { + "value": 1, + "type": "counter", + "desc": "number of rexi_DOWN messages handled" + }, + "dropped": { + "value": 1, + "type": "counter", + "desc": "number of rexi messages dropped from buffers" + }, + "streams": { + "timeout": { + "init_stream": { + "value": 1, + "type": "counter", + "desc": "number of rexi stream initialization timeouts" + }, + "stream": { + "value": 1, + "type": "counter", + "desc": "number of rexi stream timeouts" + }, + "wait_for_ack": { + "value": 1, + "type": "counter", + "desc": "number of rexi stream timeouts while waiting for acks" + } + } + } + }, + "couch_replicator": { + "changes_read_failures": { + "value": 1, + "type": "counter", + "desc": "number of failed replicator changes read failures" + }, + "changes_reader_deaths": { + "value": 1, + "type": "counter", + "desc": "number of failed replicator changes readers" + }, + "changes_manager_deaths": { + "value": 1, + "type": "counter", + "desc": "number of failed replicator changes managers" + }, + "changes_queue_deaths": { + "value": 1, + "type": "counter", + "desc": "number of failed replicator changes work queues" + }, + "checkpoints": { + "success": { + "value": 1, + "type": "counter", + "desc": "number of checkpoints successfully saves" + }, + "failure": { + "value": 1, + "type": "counter", + "desc": "number of failed checkpoint saves" + } + }, + "failed_starts": { + "value": 1, + "type": "counter", + "desc": "number of replications that have failed to start" + }, + "requests": { + "value": 1, + "type": "counter", + "desc": "number of HTTP requests made by the replicator" + }, + "responses": { + "failure": { + "value": 1, + "type": "counter", + "desc": "number of failed HTTP responses received by the replicator" + }, + "success": { + "value": 1, + "type": "counter", + "desc": "number of successful HTTP responses received by the replicator" + } + }, + "stream_responses": { + "failure": { + "value": 1, + "type": "counter", + "desc": "number of failed streaming HTTP responses received by the replicator" + }, + "success": { + "value": 1, + "type": "counter", + "desc": "number of successful streaming HTTP responses received by the replicator" + } + }, + "worker_deaths": { + "value": 1, + "type": "counter", + "desc": "number of failed replicator workers" + }, + "workers_started": { + "value": 1, + "type": "counter", + "desc": "number of replicator workers started" + }, + "cluster_is_stable": { + "value": 1, + "type": "gauge", + "desc": "1 if cluster is stable, 0 if unstable" + }, + "db_scans": { + "value": 1, + "type": "counter", + "desc": "number of times replicator db scans have been started" + }, + "docs": { + "dbs_created": { + "value": 1, + "type": "counter", + "desc": "number of db shard creations seen by replicator doc processor" + }, + "dbs_deleted": { + "value": 1, + "type": "counter", + "desc": "number of db shard deletions seen by replicator doc processor" + }, + "dbs_found": { + "value": 1, + "type": "counter", + "desc": "number of db shard found by replicator doc processor" + }, + "db_changes": { + "value": 1, + "type": "counter", + "desc": "number of db changes processed by replicator doc processor" + }, + "failed_state_updates": { + "value": 1, + "type": "counter", + "desc": "number of 'failed' state document updates" + }, + "completed_state_updates": { + "value": 1, + "type": "counter", + "desc": "number of 'completed' state document updates" + } + }, + "jobs": { + "adds": { + "value": 1, + "type": "counter", + "desc": "number of jobs added to replicator scheduler" + }, + "duplicate_adds": { + "value": 1, + "type": "counter", + "desc": "number of duplicate jobs added to replicator scheduler" + }, + "removes": { + "value": 1, + "type": "counter", + "desc": "number of jobs removed from replicator scheduler" + }, + "starts": { + "value": 1, + "type": "counter", + "desc": "number of jobs started by replicator scheduler" + }, + "stops": { + "value": 1, + "type": "counter", + "desc": "number of jobs stopped by replicator scheduler" + }, + "crashes": { + "value": 1, + "type": "counter", + "desc": "number of job crashed noticed by replicator scheduler" + }, + "running": { + "value": 1, + "type": "gauge", + "desc": "replicator scheduler running jobs" + }, + "pending": { + "value": 1, + "type": "gauge", + "desc": "replicator scheduler pending jobs" + }, + "crashed": { + "value": 1, + "type": "gauge", + "desc": "replicator scheduler crashed jobs" + }, + "total": { + "value": 1, + "type": "gauge", + "desc": "total number of replicator scheduler jobs" + } + }, + "connection": { + "acquires": { + "value": 1, + "type": "counter", + "desc": "number of times connections are shared" + }, + "creates": { + "value": 1, + "type": "counter", + "desc": "number of connections created" + }, + "releases": { + "value": 1, + "type": "counter", + "desc": "number of times ownership of a connection is released" + }, + "owner_crashes": { + "value": 1, + "type": "counter", + "desc": "number of times a connection owner crashes while owning at least one connection" + }, + "worker_crashes": { + "value": 1, + "type": "counter", + "desc": "number of times a worker unexpectedly terminates" + }, + "closes": { + "value": 1, + "type": "counter", + "desc": "number of times a worker is gracefully shut down" + } + } + }, + "pread": { + "exceed_eof": { + "value": 1, + "type": "counter", + "desc": "number of the attempts to read beyond end of db file" + }, + "exceed_limit": { + "value": 1, + "type": "counter", + "desc": "number of the attempts to read beyond set limit" + } + }, + "mango": { + "unindexed_queries": { + "value": 1, + "type": "counter", + "desc": "number of mango queries that could not use an index" + }, + "query_invalid_index": { + "value": 1, + "type": "counter", + "desc": "number of mango queries that generated an invalid index warning" + }, + "too_many_docs_scanned": { + "value": 1, + "type": "counter", + "desc": "number of mango queries that generated an index scan warning" + }, + "docs_examined": { + "value": 1, + "type": "counter", + "desc": "number of documents examined by mango queries coordinated by this node" + }, + "quorum_docs_examined": { + "value": 1, + "type": "counter", + "desc": "number of documents examined by mango queries, using cluster quorum" + }, + "results_returned": { + "value": 1, + "type": "counter", + "desc": "number of rows returned by mango queries" + }, + "query_time": { + "value": { + "min": 0.0, + "max": 0.0, + "arithmetic_mean": 0.0, + "geometric_mean": 0.0, + "harmonic_mean": 0.0, + "median": 0.0, + "variance": 0.0, + "standard_deviation": 0.0, + "skewness": 0.0, + "kurtosis": 0.0, + "percentile": [ + [ + 50, + 0.0 + ], + [ + 75, + 0.0 + ], + [ + 90, + 0.0 + ], + [ + 95, + 0.0 + ], + [ + 99, + 0.0 + ], + [ + 999, + 0.0 + ] + ], + "histogram": [ + [ + 0, + 0 + ] + ], + "n": 0 + }, + "type": "histogram", + "desc": "length of time processing a mango query" + }, + "evaluate_selector": { + "value": 1, + "type": "counter", + "desc": "number of mango selector evaluations" + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_system.json b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_system.json new file mode 100644 index 00000000000000..7084645a405daf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/node_system.json @@ -0,0 +1,176 @@ +{ + "uptime": 253571, + "memory": { + "other": 20427855, + "atom": 504433, + "atom_used": 488328, + "processes": 9161448, + "processes_used": 9160864, + "binary": 297696, + "code": 11252688, + "ets": 1579120 + }, + "run_queue": 1, + "ets_table_count": 116, + "context_switches": 22614499, + "reductions": 43211228312, + "garbage_collection_count": 11416345, + "words_reclaimed": 20241272866, + "io_input": 49674812, + "io_output": 686400800, + "os_proc_count": 1, + "stale_proc_count": 1, + "process_count": 296, + "process_limit": 262144, + "message_queues": { + "couch_file": { + "count": 2, + "min": 1, + "max": 1, + "50": 1, + "90": 1, + "99": 1 + }, + "couch_db_updater": { + "count": 2, + "min": 1, + "max": 1, + "50": 1, + "90": 1, + "99": 1 + }, + "httpc_manager": 1, + "httpc_handler_sup": 1, + "ken_sup": 1, + "ken_server": 1, + "couch_replication": 1, + "standard_error_sup": 1, + "chttpd_auth_cache_lru": 1, + "couch_index_sup": 1, + "ioq_sup": 1, + "couch_index_server": 1, + "mem3_events": 1, + "jwtf_sup": 1, + "jwtf_keystore": 1, + "ioq": 1, + "couch_uuids": 1, + "ftp_sup": 1, + "ibrowse_sup": 1, + "couch_secondary_services": 1, + "couch_primary_services": 1, + "couch_task_status": 1, + "couch_sup": 1, + "global_changes_sup": 1, + "global_changes_server": 1, + "couch_server": 1, + "couch_epi_functions_gen_couch_index": 1, + "couch_plugin": 1, + "ibrowse": 1, + "config_event": 1, + "couch_epi_functions_gen_chttpd_auth": 1, + "chttpd_sup": 1, + "couch_epi_functions_gen_couch_db": 1, + "couch_epi_data_gen_flags_config": 1, + "couch_epi_functions_gen_global_changes": 1, + "couch_proc_manager": 1, + "release_handler": 1, + "sasl_sup": 1, + "couch_epi_functions_gen_chttpd_handlers": 1, + "couch_epi_functions_gen_feature_flags": 1, + "couch_epi_functions_gen_chttpd": 1, + "dreyfus_sup": 1, + "sasl_safe_sup": 1, + "couch_event_sup2": 1, + "alarm_handler": 1, + "couch_event_server": 1, + "dreyfus_index_manager": 1, + "timer_server": 1, + "runtime_tools_sup": 1, + "couch_httpd_vhost": 1, + "chttpd_auth_cache": 1, + "couch_stats_sup": 1, + "couch_stats_process_tracker": 1, + "chttpd": 1, + "kernel_safe_sup": 1, + "tftp_sup": 1, + "couch_stats_aggregator": 1, + "rex": 1, + "folsom_sup": 1, + "inet_gethost_native_sup": 1, + "kernel_sup": 1, + "ddoc_cache_sup": 1, + "global_name_server": 1, + "ddoc_cache_opener": 1, + "folsom_sample_slide_sup": 1, + "ddoc_cache_lru": 1, + "file_server_2": 1, + "standard_error": 1, + "rexi_buffer_nonode@nohost": 1, + "rexi_server_nonode@nohost": 1, + "couch_drv": 1, + "couch_peruser_sup": 1, + "tls_connection_sup": 1, + "couch_peruser": 1, + "folsom_metrics_histogram_ets": 1, + "couch_replicator_sup": 1, + "ssl_sup": 1, + "couch_replicator_scheduler_sup": 1, + "smoosh_sup": 1, + "folsom_meter_timer_server": 1, + "smoosh_server": 1, + "couch_replicator_scheduler": 1, + "couch_epi_data_gen_dreyfus_black_list": 1, + "mem3_sync_nodes": 1, + "couch_replicator_rate_limiter": 1, + "inet_gethost_native": 1, + "inets_sup": 1, + "setup_sup": 1, + "inet_db": 1, + "ssl_pem_cache": 1, + "mem3_sync": 1, + "ssl_manager": 1, + "mem3_sup": 1, + "ssl_listen_tracker_sup": 1, + "mem3_shards": 1, + "mem3_seeds": 1, + "httpd_sup": 1, + "couch_log_sup": 1, + "mem3_reshard_sup": 1, + "mango_sup": 1, + "couch_log_server": 1, + "mem3_reshard_job_sup": 1, + "erts_code_purger": 1, + "global_group": 1, + "error_logger": 1, + "couch_replicator_doc_processor": 1, + "ssl_connection_sup": 1, + "init": 1, + "mem3_reshard_dbdoc": 1, + "couch_replicator_connection": 1, + "erl_signal_server": 1, + "couch_replicator_clustering": 1, + "config": 1, + "mem3_reshard": 1, + "user": 1, + "couch_epi_sup": 1, + "mem3_nodes": 1, + "ssl_admin_sup": 1, + "mochiweb_clock": 1, + "rexi_buffer_mon": 1, + "dtls_udp_sup": 1, + "rexi_buffer_sup": 1, + "erl_prim_loader": 1, + "code_server": 1, + "httpc_sup": 1, + "rexi_sup": 1, + "dtls_connection_sup": 1, + "rexi_server_sup": 1, + "rexi_server_mon": 1, + "application_controller": 1, + "httpc_profile_sup": 1, + "config_sup": 1, + "rexi_server": 1 + }, + "internal_replication_jobs": 1, + "distribution": {} +} diff --git a/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/root.json b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/root.json new file mode 100644 index 00000000000000..e7feb41c7695c8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/couchdb/testdata/v3.1.1/root.json @@ -0,0 +1,16 @@ +{ + "couchdb": "Welcome", + "version": "3.1.1", + "git_sha": "ce596c65d", + "uuid": "d7bc2230b8e4de7f20680091bd7a21c7", + "features": [ + "access-ready", + "partitioned", + "pluggable-storage-engines", + "reshard", + "scheduler" + ], + "vendor": { + "name": "The Apache Software Foundation" + } +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/README.md b/src/go/collectors/go.d.plugin/modules/dnsdist/README.md new file mode 120000 index 00000000000000..c5fd71aa5d1847 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/README.md @@ -0,0 +1 @@ +integrations/dnsdist.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/charts.go b/src/go/collectors/go.d.plugin/modules/dnsdist/charts.go new file mode 100644 index 00000000000000..78d73603b07463 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/charts.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +import "github.com/netdata/go.d.plugin/agent/module" + +var charts = module.Charts{ + { + ID: "queries", + Title: "Client queries received", + Units: "queries/s", + Fam: "queries", + Ctx: "dnsdist.queries", + Dims: module.Dims{ + {ID: "queries", Name: "all", Algo: module.Incremental}, + {ID: "rdqueries", Name: "recursive", Algo: module.Incremental}, + {ID: "empty-queries", Name: "empty", Algo: module.Incremental}, + }, + }, + { + ID: "queries_dropped", + Title: "Client queries dropped", + Units: "queries/s", + Fam: "queries", + Ctx: "dnsdist.queries_dropped", + Dims: module.Dims{ + {ID: "rule-drop", Name: "rule drop", Algo: module.Incremental}, + {ID: "dyn-blocked", Name: "dynamic blocked", Algo: module.Incremental}, + {ID: "no-policy", Name: "no policy", Algo: module.Incremental}, + {ID: "noncompliant-queries", Name: "non queries", Algo: module.Incremental}, + }, + }, + { + ID: "packets_dropped", + Title: "Packets dropped", + Units: "packets/s", + Fam: "packets", + Ctx: "dnsdist.packets_dropped", + Dims: module.Dims{ + {ID: "acl-drops", Name: "acl", Algo: module.Incremental}, + }, + }, + { + ID: "answers", + Title: "Answers statistics", + Units: "answers/s", + Fam: "answers", + Ctx: "dnsdist.answers", + Dims: module.Dims{ + {ID: "self-answered", Name: "self answered", Algo: module.Incremental}, + {ID: "rule-nxdomain", Name: "nxdomain", Algo: module.Incremental, Mul: -1}, + {ID: "rule-refused", Name: "refused", Algo: module.Incremental, Mul: -1}, + {ID: "trunc-failures", Name: "trunc failures", Algo: module.Incremental, Mul: -1}, + }, + }, + { + ID: "backend_responses", + Title: "Backend responses", + Units: "responses/s", + Fam: "backends", + Ctx: "dnsdist.backend_responses", + Dims: module.Dims{ + {ID: "responses", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "backend_commerrors", + Title: "Backend communication errors", + Units: "errors/s", + Fam: "backends", + Ctx: "dnsdist.backend_commerrors", + Dims: module.Dims{ + {ID: "downstream-send-errors", Name: "send errors", Algo: module.Incremental}, + }, + }, + { + ID: "backend_errors", + Title: "Backend error responses", + Units: "responses/s", + Fam: "backends", + Ctx: "dnsdist.backend_errors", + Dims: module.Dims{ + {ID: "downstream-timeouts", Name: "timeouts", Algo: module.Incremental}, + {ID: "servfail-responses", Name: "servfail", Algo: module.Incremental}, + {ID: "noncompliant-responses", Name: "non compliant", Algo: module.Incremental}, + }, + }, + { + ID: "cache", + Title: "Cache performance", + Units: "answers/s", + Fam: "cache", + Ctx: "dnsdist.cache", + Dims: module.Dims{ + {ID: "cache-hits", Name: "hits", Algo: module.Incremental}, + {ID: "cache-misses", Name: "misses", Algo: module.Incremental, Mul: -1}, + }, + }, + { + ID: "servercpu", + Title: "DNSdist server CPU utilization", + Units: "ms/s", + Fam: "server", + Ctx: "dnsdist.servercpu", + Type: module.Stacked, + Dims: module.Dims{ + {ID: "cpu-sys-msec", Name: "system state", Algo: module.Incremental}, + {ID: "cpu-user-msec", Name: "user state", Algo: module.Incremental}, + }, + }, + { + ID: "servermem", + Title: "DNSdist server memory utilization", + Units: "MiB", + Fam: "server", + Ctx: "dnsdist.servermem", + Type: module.Area, + Dims: module.Dims{ + {ID: "real-memory-usage", Name: "memory usage", Div: 1 << 20}, + }, + }, + { + ID: "query_latency", + Title: "Query latency", + Units: "queries/s", + Fam: "latency", + Ctx: "dnsdist.query_latency", + Type: module.Stacked, + Dims: module.Dims{ + {ID: "latency0-1", Name: "1ms", Algo: module.Incremental}, + {ID: "latency1-10", Name: "10ms", Algo: module.Incremental}, + {ID: "latency10-50", Name: "50ms", Algo: module.Incremental}, + {ID: "latency50-100", Name: "100ms", Algo: module.Incremental}, + {ID: "latency100-1000", Name: "1sec", Algo: module.Incremental}, + {ID: "latency-slow", Name: "slow", Algo: module.Incremental}, + }, + }, + { + ID: "query_latency_avg", + Title: "Average latency for the last N queries", + Units: "microseconds", + Fam: "latency", + Ctx: "dnsdist.query_latency_avg", + Dims: module.Dims{ + {ID: "latency-avg100", Name: "100"}, + {ID: "latency-avg1000", Name: "1k"}, + {ID: "latency-avg10000", Name: "10k"}, + {ID: "latency-avg1000000", Name: "1000k"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/collect.go b/src/go/collectors/go.d.plugin/modules/dnsdist/collect.go new file mode 100644 index 00000000000000..53075a5add7084 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/collect.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathJSONStat = "/jsonstat" +) + +func (d *DNSdist) collect() (map[string]int64, error) { + statistics, err := d.scrapeStatistics() + if err != nil { + return nil, err + } + + collected := make(map[string]int64) + d.collectStatistic(collected, statistics) + + return collected, nil +} + +func (d *DNSdist) collectStatistic(collected map[string]int64, statistics *statisticMetrics) { + for metric, value := range stm.ToMap(statistics) { + collected[metric] = value + } +} + +func (d *DNSdist) scrapeStatistics() (*statisticMetrics, error) { + req, _ := web.NewHTTPRequest(d.Request) + req.URL.Path = urlPathJSONStat + req.URL.RawQuery = url.Values{"command": []string{"stats"}}.Encode() + + var statistics statisticMetrics + if err := d.doOKDecode(req, &statistics); err != nil { + return nil, err + } + + return &statistics, nil +} + +func (d *DNSdist) doOKDecode(req *http.Request, in interface{}) error { + resp, err := d.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/config_schema.json b/src/go/collectors/go.d.plugin/modules/dnsdist/config_schema.json new file mode 100644 index 00000000000000..880190ce2c8c60 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsdist job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist.go b/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist.go new file mode 100644 index 00000000000000..0af2425344e585 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("dnsdist", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + }) +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type DNSdist struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func New() *DNSdist { + return &DNSdist{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8083", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + } +} + +func (d *DNSdist) Init() bool { + err := d.validateConfig() + if err != nil { + d.Errorf("config validation: %v", err) + return false + } + + client, err := d.initHTTPClient() + if err != nil { + d.Errorf("init HTTP client: %v", err) + return false + } + d.httpClient = client + + cs, err := d.initCharts() + if err != nil { + d.Errorf("init charts: %v", err) + return false + } + d.charts = cs + + return true +} + +func (d *DNSdist) Check() bool { + return len(d.Collect()) > 0 +} + +func (d *DNSdist) Charts() *module.Charts { + return d.charts +} + +func (d *DNSdist) Collect() map[string]int64 { + ms, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(ms) == 0 { + return nil + } + + return ms +} + +func (d *DNSdist) Cleanup() { + if d.httpClient == nil { + return + } + + d.httpClient.CloseIdleConnections() +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist_test.go b/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist_test.go new file mode 100644 index 00000000000000..851d99016e22ad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/dnsdist_test.go @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v151JSONStat, _ = os.ReadFile("testdata/v1.5.1/jsonstat.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v151JSONStat": v151JSONStat, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*DNSdist)(nil), New()) +} + +func Test_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset URL": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:38001", + }, + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ns := New() + ns.Config = test.config + + if test.wantFail { + assert.False(t, ns.Init()) + } else { + assert.True(t, ns.Init()) + } + }) + } +} + +func Test_Charts(t *testing.T) { + dist := New() + require.True(t, dist.Init()) + assert.NotNil(t, dist.Charts()) +} + +func Test_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func Test_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (dist *DNSdist, cleanup func()) + wantFail bool + }{ + "success on valid response v1.5.1": { + prepare: preparePowerDNSdistV151, + wantFail: false, + }, + "fails on 404 response": { + prepare: preparePowerDNSdist404, + wantFail: true, + }, + "fails on connection refused": { + prepare: preparePowerDNSdistConnectionRefused, + wantFail: true, + }, + "fails with invalid data": { + prepare: preparePowerDNSdistInvalidData, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dist, cleanup := test.prepare() + defer cleanup() + require.True(t, dist.Init()) + + if test.wantFail { + assert.False(t, dist.Check()) + } else { + assert.True(t, dist.Check()) + } + }) + } +} + +func Test_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (dist *DNSdist, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v1.5.1": { + prepare: preparePowerDNSdistV151, + wantCollected: map[string]int64{ + "acl-drops": 1, + "cache-hits": 1, + "cache-misses": 1, + "cpu-sys-msec": 411, + "cpu-user-msec": 939, + "downstream-send-errors": 1, + "downstream-timeouts": 1, + "dyn-blocked": 1, + "empty-queries": 1, + "latency-avg100": 14237, + "latency-avg1000": 9728, + "latency-avg10000": 1514, + "latency-avg1000000": 15, + "latency-slow": 1, + "latency0-1": 1, + "latency1-10": 3, + "latency10-50": 996, + "latency100-1000": 4, + "latency50-100": 1, + "no-policy": 1, + "noncompliant-queries": 1, + "noncompliant-responses": 1, + "queries": 1003, + "rdqueries": 1003, + "real-memory-usage": 202125312, + "responses": 1003, + "rule-drop": 1, + "rule-nxdomain": 1, + "rule-refused": 1, + "self-answered": 1, + "servfail-responses": 1, + "trunc-failures": 1, + }, + }, + "fails on 404 response": { + prepare: preparePowerDNSdist404, + }, + "fails on connection refused": { + prepare: preparePowerDNSdistConnectionRefused, + }, + "fails with invalid data": { + prepare: preparePowerDNSdistInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dist, cleanup := test.prepare() + defer cleanup() + require.True(t, dist.Init()) + + collected := dist.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, dist, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dist *DNSdist, collected map[string]int64) { + for _, chart := range *dist.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func preparePowerDNSdistV151() (*DNSdist, func()) { + srv := preparePowerDNSDistEndpoint() + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSdist404() (*DNSdist, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSdistConnectionRefused() (*DNSdist, func()) { + ns := New() + ns.URL = "http://127.0.0.1:38001" + + return ns, func() {} +} + +func preparePowerDNSdistInvalidData() (*DNSdist, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSDistEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.String() { + case "/jsonstat?command=stats": + _, _ = w.Write(v151JSONStat) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/init.go b/src/go/collectors/go.d.plugin/modules/dnsdist/init.go new file mode 100644 index 00000000000000..d5889168174b27 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/init.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (d DNSdist) validateConfig() error { + if d.URL == "" { + return errors.New("URL not set") + } + + if _, err := web.NewHTTPRequest(d.Request); err != nil { + return err + } + + return nil +} + +func (d DNSdist) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(d.Client) +} + +func (d DNSdist) initCharts() (*module.Charts, error) { + return charts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/integrations/dnsdist.md b/src/go/collectors/go.d.plugin/modules/dnsdist/integrations/dnsdist.md new file mode 100644 index 00000000000000..26f7551b729c7d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/integrations/dnsdist.md @@ -0,0 +1,210 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsdist/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsdist/metadata.yaml" +sidebar_label: "DNSdist" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DNSdist + + +<img src="https://netdata.cloud/img/network-wired.svg" width="150"/> + + +Plugin: go.d.plugin +Module: dnsdist + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors DNSDist servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per DNSdist instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dnsdist.queries | all, recursive, empty | queries/s | +| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s | +| dnsdist.packets_dropped | acl | packets/s | +| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s | +| dnsdist.backend_responses | responses | responses/s | +| dnsdist.backend_commerrors | send_errors | errors/s | +| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s | +| dnsdist.cache | hits, misses | answers/s | +| dnsdist.servercpu | system_state, user_state | ms/s | +| dnsdist.servermem | memory_usage | MiB | +| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s | +| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable DNSdist built-in Webserver + +For collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/dnsdist.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/dnsdist.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8083 | yes | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 1 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8083 + headers: + X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key). + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8083 + headers: + X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key). + + - name: remote + url: http://203.0.113.0:8083 + headers: + X-API-Key: 'your-api-key' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m dnsdist + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/metadata.yaml b/src/go/collectors/go.d.plugin/modules/dnsdist/metadata.yaml new file mode 100644 index 00000000000000..4e7a45d39f8f07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/metadata.yaml @@ -0,0 +1,259 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-dnsdist + plugin_name: go.d.plugin + module_name: dnsdist + monitored_instance: + name: DNSdist + link: https://dnsdist.org/ + icon_filename: network-wired.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - dnsdist + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors DNSDist servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable DNSdist built-in Webserver + description: | + For collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html). + configuration: + file: + name: go.d/dnsdist.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8083 + required: true + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8083 + headers: + X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key). + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8083 + headers: + X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key). + + - name: remote + url: http://203.0.113.0:8083 + headers: + X-API-Key: 'your-api-key' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: dnsdist.queries + description: Client queries received + unit: queries/s + chart_type: line + dimensions: + - name: all + - name: recursive + - name: empty + - name: dnsdist.queries_dropped + description: Client queries dropped + unit: queries/s + chart_type: line + dimensions: + - name: rule_drop + - name: dynamic_blocked + - name: no_policy + - name: non_queries + - name: dnsdist.packets_dropped + description: Packets dropped + unit: packets/s + chart_type: line + dimensions: + - name: acl + - name: dnsdist.answers + description: Answers statistics + unit: answers/s + chart_type: line + dimensions: + - name: self_answered + - name: nxdomain + - name: refused + - name: trunc_failures + - name: dnsdist.backend_responses + description: Backend responses + unit: responses/s + chart_type: line + dimensions: + - name: responses + - name: dnsdist.backend_commerrors + description: Backend communication errors + unit: errors/s + chart_type: line + dimensions: + - name: send_errors + - name: dnsdist.backend_errors + description: Backend error responses + unit: responses/s + chart_type: line + dimensions: + - name: timeouts + - name: servfail + - name: non_compliant + - name: dnsdist.cache + description: Cache performance + unit: answers/s + chart_type: line + dimensions: + - name: hits + - name: misses + - name: dnsdist.servercpu + description: DNSdist server CPU utilization + unit: ms/s + chart_type: stacked + dimensions: + - name: system_state + - name: user_state + - name: dnsdist.servermem + description: DNSdist server memory utilization + unit: MiB + chart_type: area + dimensions: + - name: memory_usage + - name: dnsdist.query_latency + description: Query latency + unit: queries/s + chart_type: stacked + dimensions: + - name: 1ms + - name: 10ms + - name: 50ms + - name: 100ms + - name: 1sec + - name: slow + - name: dnsdist.query_latency_avg + description: Average latency for the last N queries + unit: microseconds + chart_type: line + dimensions: + - name: "100" + - name: 1k + - name: 10k + - name: 1000k diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/metrics.go b/src/go/collectors/go.d.plugin/modules/dnsdist/metrics.go new file mode 100644 index 00000000000000..1de04319dfdea0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/metrics.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsdist + +// https://dnsdist.org/guides/webserver.html#get--jsonstat +// https://dnsdist.org/statistics.html + +type statisticMetrics struct { + AclDrops float64 `stm:"acl-drops" json:"acl-drops"` + CacheHits float64 `stm:"cache-hits" json:"cache-hits"` + CacheMisses float64 `stm:"cache-misses" json:"cache-misses"` + CPUSysMsec float64 `stm:"cpu-sys-msec" json:"cpu-sys-msec"` + CPUUserMsec float64 `stm:"cpu-user-msec" json:"cpu-user-msec"` + DownStreamSendErrors float64 `stm:"downstream-send-errors" json:"downstream-send-errors"` + DownStreamTimeout float64 `stm:"downstream-timeouts" json:"downstream-timeouts"` + DynBlocked float64 `stm:"dyn-blocked" json:"dyn-blocked"` + EmptyQueries float64 `stm:"empty-queries" json:"empty-queries"` + LatencyAvg100 float64 `stm:"latency-avg100" json:"latency-avg100"` + LatencyAvg1000 float64 `stm:"latency-avg1000" json:"latency-avg1000"` + LatencyAvg10000 float64 `stm:"latency-avg10000" json:"latency-avg10000"` + LatencyAvg1000000 float64 `stm:"latency-avg1000000" json:"latency-avg1000000"` + LatencySlow float64 `stm:"latency-slow" json:"latency-slow"` + Latency0 float64 `stm:"latency0-1" json:"latency0-1"` + Latency1 float64 `stm:"latency1-10" json:"latency1-10"` + Latency10 float64 `stm:"latency10-50" json:"latency10-50"` + Latency100 float64 `stm:"latency100-1000" json:"latency100-1000"` + Latency50 float64 `stm:"latency50-100" json:"latency50-100"` + NoPolicy float64 `stm:"no-policy" json:"no-policy"` + NonCompliantQueries float64 `stm:"noncompliant-queries" json:"noncompliant-queries"` + NonCompliantResponses float64 `stm:"noncompliant-responses" json:"noncompliant-responses"` + Queries float64 `stm:"queries" json:"queries"` + RdQueries float64 `stm:"rdqueries" json:"rdqueries"` + RealMemoryUsage float64 `stm:"real-memory-usage" json:"real-memory-usage"` + Responses float64 `stm:"responses" json:"responses"` + RuleDrop float64 `stm:"rule-drop" json:"rule-drop"` + RuleNxDomain float64 `stm:"rule-nxdomain" json:"rule-nxdomain"` + RuleRefused float64 `stm:"rule-refused" json:"rule-refused"` + SelfAnswered float64 `stm:"self-answered" json:"self-answered"` + ServFailResponses float64 `stm:"servfail-responses" json:"servfail-responses"` + TruncFailures float64 `stm:"trunc-failures" json:"trunc-failures"` +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsdist/testdata/v1.5.1/jsonstat.json b/src/go/collectors/go.d.plugin/modules/dnsdist/testdata/v1.5.1/jsonstat.json new file mode 100644 index 00000000000000..37b791e477ccfd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsdist/testdata/v1.5.1/jsonstat.json @@ -0,0 +1,56 @@ +{ + "acl-drops": 1, + "cache-hits": 1, + "cache-misses": 1, + "cpu-iowait": 39284, + "cpu-steal": 1, + "cpu-sys-msec": 411, + "cpu-user-msec": 939, + "doh-query-pipe-full": 1, + "doh-response-pipe-full": 1, + "downstream-send-errors": 1, + "downstream-timeouts": 1, + "dyn-block-nmg-size": 1, + "dyn-blocked": 1, + "empty-queries": 1, + "fd-usage": 22, + "frontend-noerror": 1003, + "frontend-nxdomain": 1, + "frontend-servfail": 1, + "latency-avg100": 14237.416845242331, + "latency-avg1000": 9728.0972656536997, + "latency-avg10000": 1514.0804874856037, + "latency-avg1000000": 15.0804874856037, + "latency-count": 1003, + "latency-slow": 1, + "latency-sum": 15474, + "latency0-1": 1, + "latency1-10": 3, + "latency10-50": 996, + "latency100-1000": 4, + "latency50-100": 1, + "no-policy": 1, + "noncompliant-queries": 1, + "noncompliant-responses": 1, + "over-capacity-drops": 1, + "packetcache-hits": 1, + "packetcache-misses": 1, + "queries": 1003, + "rdqueries": 1003, + "real-memory-usage": 202125312, + "responses": 1003, + "rule-drop": 1, + "rule-nxdomain": 1, + "rule-refused": 1, + "rule-servfail": 1, + "security-status": 1, + "self-answered": 1, + "servfail-responses": 1, + "too-old-drops": 1, + "trunc-failures": 1, + "udp-in-errors": 38, + "udp-noport-errors": 1102, + "udp-recvbuf-errors": 1, + "udp-sndbuf-errors": 179, + "uptime": 394 +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/README.md b/src/go/collectors/go.d.plugin/modules/dnsmasq/README.md new file mode 120000 index 00000000000000..a424dd9c6af3fb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/README.md @@ -0,0 +1 @@ +integrations/dnsmasq.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/charts.go b/src/go/collectors/go.d.plugin/modules/dnsmasq/charts.go new file mode 100644 index 00000000000000..8ae9908302d769 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/charts.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq + +import "github.com/netdata/go.d.plugin/agent/module" + +var cacheCharts = module.Charts{ + { + ID: "servers_queries", + Title: "Queries forwarded to the upstream servers", + Units: "queries/s", + Fam: "servers", + Ctx: "dnsmasq.servers_queries", + Dims: module.Dims{ + {ID: "queries", Name: "success", Algo: module.Incremental}, + {ID: "failed_queries", Name: "failed", Algo: module.Incremental}, + }, + }, + { + ID: "cache_performance", + Title: "Cache performance", + Units: "events/s", + Fam: "cache", + Ctx: "dnsmasq.cache_performance", + Dims: module.Dims{ + {ID: "hits", Algo: module.Incremental}, + {ID: "misses", Algo: module.Incremental}, + }, + }, + { + ID: "cache_operations", + Title: "Cache operations", + Units: "operations/s", + Fam: "cache", + Ctx: "dnsmasq.cache_operations", + Dims: module.Dims{ + {ID: "insertions", Algo: module.Incremental}, + {ID: "evictions", Algo: module.Incremental}, + }, + }, + { + ID: "cache_size", + Title: "Cache size", + Units: "entries", + Fam: "cache", + Ctx: "dnsmasq.cache_size", + Dims: module.Dims{ + {ID: "cachesize", Name: "size"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/collect.go b/src/go/collectors/go.d.plugin/modules/dnsmasq/collect.go new file mode 100644 index 00000000000000..2561688d709caf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/collect.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq + +import ( + "fmt" + "strconv" + "strings" + + "github.com/miekg/dns" +) + +func (d *Dnsmasq) collect() (map[string]int64, error) { + r, err := d.queryCacheStatistics() + if err != nil { + return nil, err + } + + ms := make(map[string]int64) + if err = d.collectResponse(ms, r); err != nil { + return nil, err + } + + return ms, nil +} + +func (d *Dnsmasq) collectResponse(ms map[string]int64, resp *dns.Msg) error { + /* + ;; flags: qr aa rd ra; QUERY: 7, ANSWER: 7, AUTHORITY: 0, ADDITIONAL: 0 + + ;; QUESTION SECTION: + ;cachesize.bind. CH TXT + ;insertions.bind. CH TXT + ;evictions.bind. CH TXT + ;hits.bind. CH TXT + ;misses.bind. CH TXT + ;auth.bind. CH TXT + ;servers.bind. CH TXT + + ;; ANSWER SECTION: + cachesize.bind. 0 CH TXT "150" + insertions.bind. 0 CH TXT "1" + evictions.bind. 0 CH TXT "0" + hits.bind. 0 CH TXT "176" + misses.bind. 0 CH TXT "4" + auth.bind. 0 CH TXT "0" + servers.bind. 0 CH TXT "10.0.0.1#53 0 0" "1.1.1.1#53 4 3" "1.0.0.1#53 3 0" + */ + for _, a := range resp.Answer { + txt, ok := a.(*dns.TXT) + if !ok { + continue + } + + idx := strings.IndexByte(txt.Hdr.Name, '.') + if idx == -1 { + continue + } + + switch name := txt.Hdr.Name[:idx]; name { + case "servers": + for _, entry := range txt.Txt { + parts := strings.Fields(entry) + if len(parts) != 3 { + return fmt.Errorf("parse %s (%s): unexpected format", txt.Hdr.Name, entry) + } + queries, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, entry, err) + } + failedQueries, err := strconv.ParseFloat(parts[2], 64) + if err != nil { + return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, entry, err) + } + + ms["queries"] += int64(queries) + ms["failed_queries"] += int64(failedQueries) + } + case "cachesize", "insertions", "evictions", "hits", "misses", "auth": + if len(txt.Txt) != 1 { + return fmt.Errorf("parse '%s' (%v): unexpected format", txt.Hdr.Name, txt.Txt) + } + v, err := strconv.ParseFloat(txt.Txt[0], 64) + if err != nil { + return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, txt.Txt[0], err) + } + + ms[name] = int64(v) + } + } + return nil +} + +func (d *Dnsmasq) queryCacheStatistics() (*dns.Msg, error) { + msg := &dns.Msg{ + MsgHdr: dns.MsgHdr{ + Id: dns.Id(), + RecursionDesired: true, + }, + Question: []dns.Question{ + {Name: "cachesize.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + {Name: "insertions.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + {Name: "evictions.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + {Name: "hits.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + {Name: "misses.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + // TODO: collect auth.bind if available + // auth.bind query is only supported if dnsmasq has been built + // to support running as an authoritative name server. See https://github.com/netdata/netdata/issues/13766 + //{Name: "auth.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + {Name: "servers.bind.", Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS}, + }, + } + + r, _, err := d.dnsClient.Exchange(msg, d.Address) + if err != nil { + return nil, err + } + if r == nil { + return nil, fmt.Errorf("'%s' returned an empty response", d.Address) + } + if r.Rcode != dns.RcodeSuccess { + s := dns.RcodeToString[r.Rcode] + return nil, fmt.Errorf("'%s' returned '%s' (%d) response code", d.Address, s, r.Rcode) + } + return r, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/config_schema.json b/src/go/collectors/go.d.plugin/modules/dnsmasq/config_schema.json new file mode 100644 index 00000000000000..d0881991748501 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/config_schema.json @@ -0,0 +1,26 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsmasq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "protocol": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq.go b/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq.go new file mode 100644 index 00000000000000..33e252b09bf512 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/miekg/dns" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("dnsmasq", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Dnsmasq { + return &Dnsmasq{ + Config: Config{ + Protocol: "udp", + Address: "127.0.0.1:53", + Timeout: web.Duration{Duration: time.Second}, + }, + + newDNSClient: func(network string, timeout time.Duration) dnsClient { + return &dns.Client{ + Net: network, + Timeout: timeout, + } + }, + } +} + +type Config struct { + Protocol string `yaml:"protocol"` + Address string `yaml:"address"` + Timeout web.Duration `yaml:"timeout"` +} + +type ( + Dnsmasq struct { + module.Base + Config `yaml:",inline"` + + newDNSClient func(network string, timeout time.Duration) dnsClient + dnsClient dnsClient + + charts *module.Charts + } + + dnsClient interface { + Exchange(msg *dns.Msg, address string) (resp *dns.Msg, rtt time.Duration, err error) + } +) + +func (d *Dnsmasq) Init() bool { + err := d.validateConfig() + if err != nil { + d.Errorf("config validation: %v", err) + return false + } + + client, err := d.initDNSClient() + if err != nil { + d.Errorf("init DNS client: %v", err) + return false + } + d.dnsClient = client + + charts, err := d.initCharts() + if err != nil { + d.Errorf("init charts: %v", err) + return false + } + d.charts = charts + + return true +} + +func (d *Dnsmasq) Check() bool { + return len(d.Collect()) > 0 +} + +func (d *Dnsmasq) Charts() *module.Charts { + return d.charts +} + +func (d *Dnsmasq) Collect() map[string]int64 { + ms, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (Dnsmasq) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq_test.go b/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq_test.go new file mode 100644 index 00000000000000..b4f0bb555c2029 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/dnsmasq_test.go @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.IsType(t, (*Dnsmasq)(nil), New()) +} + +func TestDnsmasq_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'address'": { + wantFail: true, + config: Config{ + Protocol: "udp", + Address: "", + }, + }, + "fails on unset 'protocol'": { + wantFail: true, + config: Config{ + Protocol: "", + Address: "127.0.0.1:53", + }, + }, + "fails on invalid 'protocol'": { + wantFail: true, + config: Config{ + Protocol: "http", + Address: "127.0.0.1:53", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ns := New() + ns.Config = test.config + + if test.wantFail { + assert.False(t, ns.Init()) + } else { + assert.True(t, ns.Init()) + } + }) + } +} + +func TestDnsmasq_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *Dnsmasq + wantFail bool + }{ + "success on valid response": { + prepare: prepareOKDnsmasq, + }, + "fails on error on cache stats query": { + wantFail: true, + prepare: prepareErrorOnExchangeDnsmasq, + }, + "fails on response rcode is not success": { + wantFail: true, + prepare: prepareRcodeServerFailureOnExchangeDnsmasq, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dnsmasq := test.prepare() + require.True(t, dnsmasq.Init()) + + if test.wantFail { + assert.False(t, dnsmasq.Check()) + } else { + assert.True(t, dnsmasq.Check()) + } + }) + } +} + +func TestDnsmasq_Charts(t *testing.T) { + dnsmasq := New() + require.True(t, dnsmasq.Init()) + assert.NotNil(t, dnsmasq.Charts()) +} + +func TestDnsmasq_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestDnsmasq_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *Dnsmasq + wantCollected map[string]int64 + }{ + "success on valid response": { + prepare: prepareOKDnsmasq, + wantCollected: map[string]int64{ + //"auth": 5, + "cachesize": 999, + "evictions": 5, + "failed_queries": 9, + "hits": 100, + "insertions": 10, + "misses": 50, + "queries": 17, + }, + }, + "fails on error on cache stats query": { + prepare: prepareErrorOnExchangeDnsmasq, + }, + "fails on response rcode is not success": { + prepare: prepareRcodeServerFailureOnExchangeDnsmasq, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dnsmasq := test.prepare() + require.True(t, dnsmasq.Init()) + + collected := dnsmasq.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, dnsmasq, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dnsmasq *Dnsmasq, collected map[string]int64) { + for _, chart := range *dnsmasq.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func prepareOKDnsmasq() *Dnsmasq { + dnsmasq := New() + dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient { + return &mockDNSClient{} + } + return dnsmasq +} + +func prepareErrorOnExchangeDnsmasq() *Dnsmasq { + dnsmasq := New() + dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient { + return &mockDNSClient{ + errOnExchange: true, + } + } + return dnsmasq +} + +func prepareRcodeServerFailureOnExchangeDnsmasq() *Dnsmasq { + dnsmasq := New() + dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient { + return &mockDNSClient{ + rcodeServerFailureOnExchange: true, + } + } + return dnsmasq +} + +type mockDNSClient struct { + errOnExchange bool + rcodeServerFailureOnExchange bool +} + +func (m mockDNSClient) Exchange(msg *dns.Msg, _ string) (*dns.Msg, time.Duration, error) { + if m.errOnExchange { + return nil, 0, errors.New("'Exchange' error") + } + if m.rcodeServerFailureOnExchange { + resp := &dns.Msg{MsgHdr: dns.MsgHdr{Rcode: dns.RcodeServerFailure}} + return resp, 0, nil + } + + var answers []dns.RR + for _, q := range msg.Question { + a, err := prepareDNSAnswer(q) + if err != nil { + return nil, 0, err + } + answers = append(answers, a) + } + + resp := &dns.Msg{ + MsgHdr: dns.MsgHdr{ + Rcode: dns.RcodeSuccess, + }, + Answer: answers, + } + return resp, 0, nil +} + +func prepareDNSAnswer(q dns.Question) (dns.RR, error) { + if want, got := dns.TypeToString[dns.TypeTXT], dns.TypeToString[q.Qtype]; want != got { + return nil, fmt.Errorf("unexpected Qtype, want=%s, got=%s", want, got) + } + if want, got := dns.ClassToString[dns.ClassCHAOS], dns.ClassToString[q.Qclass]; want != got { + return nil, fmt.Errorf("unexpected Qclass, want=%s, got=%s", want, got) + } + + var txt []string + switch q.Name { + case "cachesize.bind.": + txt = []string{"999"} + case "insertions.bind.": + txt = []string{"10"} + case "evictions.bind.": + txt = []string{"5"} + case "hits.bind.": + txt = []string{"100"} + case "misses.bind.": + txt = []string{"50"} + case "auth.bind.": + txt = []string{"5"} + case "servers.bind.": + txt = []string{"10.0.0.1#53 10 5", "1.1.1.1#53 4 3", "1.0.0.1#53 3 1"} + default: + return nil, fmt.Errorf("unexpected question Name: %s", q.Name) + } + + rr := &dns.TXT{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeTXT, + Class: dns.ClassCHAOS, + }, + Txt: txt, + } + return rr, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/init.go b/src/go/collectors/go.d.plugin/modules/dnsmasq/init.go new file mode 100644 index 00000000000000..2ce4790ae133cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/init.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq + +import ( + "errors" + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (d Dnsmasq) validateConfig() error { + if d.Address == "" { + return errors.New("'address' parameter not set") + } + if !isProtocolValid(d.Protocol) { + return fmt.Errorf("'protocol' (%s) is not valid, expected one of %v", d.Protocol, validProtocols) + } + return nil +} + +func (d Dnsmasq) initDNSClient() (dnsClient, error) { + return d.newDNSClient(d.Protocol, d.Timeout.Duration), nil +} + +func (d Dnsmasq) initCharts() (*module.Charts, error) { + return cacheCharts.Copy(), nil +} + +func isProtocolValid(protocol string) bool { + for _, v := range validProtocols { + if protocol == v { + return true + } + } + return false +} + +var validProtocols = []string{ + "udp", + "tcp", + "tcp-tls", +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/integrations/dnsmasq.md b/src/go/collectors/go.d.plugin/modules/dnsmasq/integrations/dnsmasq.md new file mode 100644 index 00000000000000..3d3a3fda6d4bd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/integrations/dnsmasq.md @@ -0,0 +1,195 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsmasq/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsmasq/metadata.yaml" +sidebar_label: "Dnsmasq" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dnsmasq + + +<img src="https://netdata.cloud/img/dnsmasq.svg" width="150"/> + + +Plugin: go.d.plugin +Module: dnsmasq + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Dnsmasq servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Dnsmasq instance + +The metrics apply to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dnsmasq.servers_queries | success, failed | queries/s | +| dnsmasq.cache_performance | hist, misses | events/s | +| dnsmasq.cache_operations | insertions, evictions | operations/s | +| dnsmasq.cache_size | size | entries | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/dnsmasq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/dnsmasq.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes | +| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no | +| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:53 + +``` +</details> + +##### Using TCP protocol + +Local server with specific DNS query transport protocol. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:53 + protocol: tcp + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:53 + + - name: remote + address: 203.0.113.0:53 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m dnsmasq + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq/metadata.yaml b/src/go/collectors/go.d.plugin/modules/dnsmasq/metadata.yaml new file mode 100644 index 00000000000000..6911a323a14b03 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq/metadata.yaml @@ -0,0 +1,144 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-dnsmasq + plugin_name: go.d.plugin + module_name: dnsmasq + monitored_instance: + name: Dnsmasq + link: https://thekelleys.org.uk/dnsmasq/doc.html + icon_filename: dnsmasq.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - dnsmasq + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Dnsmasq servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/dnsmasq.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address in `ip:port` format. + default_value: 127.0.0.1:53 + required: true + - name: protocol + description: 'DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls.' + default_value: udp + required: false + - name: timeout + description: DNS query timeout (dial, write and read) in seconds. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:53 + - name: Using TCP protocol + description: Local server with specific DNS query transport protocol. + config: | + jobs: + - name: local + address: 127.0.0.1:53 + protocol: tcp + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:53 + + - name: remote + address: 203.0.113.0:53 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: The metrics apply to the entire monitored application. + labels: [] + metrics: + - name: dnsmasq.servers_queries + description: Queries forwarded to the upstream servers + unit: queries/s + chart_type: line + dimensions: + - name: success + - name: failed + - name: dnsmasq.cache_performance + description: Cache performance + unit: events/s + chart_type: line + dimensions: + - name: hist + - name: misses + - name: dnsmasq.cache_operations + description: Cache operations + unit: operations/s + chart_type: line + dimensions: + - name: insertions + - name: evictions + - name: dnsmasq.cache_size + description: Cache size + unit: entries + chart_type: line + dimensions: + - name: size diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/README.md b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/README.md new file mode 120000 index 00000000000000..ad22eb4eeb9f7c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/README.md @@ -0,0 +1 @@ +integrations/dnsmasq_dhcp.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/charts.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/charts.go new file mode 100644 index 00000000000000..1d622c7b470e25 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/charts.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioDHCPRangeUtilization = module.Priority + iota + prioDHCPRangeAllocatesLeases + prioDHCPRanges + prioDHCPHosts +) + +var charts = module.Charts{ + { + ID: "dhcp_ranges", + Title: "Number of DHCP Ranges", + Units: "ranges", + Fam: "dhcp ranges", + Ctx: "dnsmasq_dhcp.dhcp_ranges", + Type: module.Stacked, + Priority: prioDHCPRanges, + Dims: module.Dims{ + {ID: "ipv4_dhcp_ranges", Name: "ipv4"}, + {ID: "ipv6_dhcp_ranges", Name: "ipv6"}, + }, + }, + { + ID: "dhcp_hosts", + Title: "Number of DHCP Hosts", + Units: "hosts", + Fam: "dhcp hosts", + Ctx: "dnsmasq_dhcp.dhcp_host", + Type: module.Stacked, + Priority: prioDHCPHosts, + Dims: module.Dims{ + {ID: "ipv4_dhcp_hosts", Name: "ipv4"}, + {ID: "ipv6_dhcp_hosts", Name: "ipv6"}, + }, + }, +} + +var ( + chartsTmpl = module.Charts{ + chartTmplDHCPRangeUtilization.Copy(), + chartTmplDHCPRangeAllocatedLeases.Copy(), + } +) + +var ( + chartTmplDHCPRangeUtilization = module.Chart{ + ID: "dhcp_range_%s_utilization", + Title: "DHCP Range utilization", + Units: "percentage", + Fam: "dhcp range utilization", + Ctx: "dnsmasq_dhcp.dhcp_range_utilization", + Type: module.Area, + Priority: prioDHCPRangeUtilization, + Dims: module.Dims{ + {ID: "dhcp_range_%s_utilization", Name: "used"}, + }, + } + chartTmplDHCPRangeAllocatedLeases = module.Chart{ + ID: "dhcp_range_%s_allocated_leases", + Title: "DHCP Range Allocated Leases", + Units: "leases", + Fam: "dhcp range leases", + Ctx: "dnsmasq_dhcp.dhcp_range_allocated_leases", + Priority: prioDHCPRangeAllocatesLeases, + Dims: module.Dims{ + {ID: "dhcp_range_%s_allocated_leases", Name: "leases"}, + }, + } +) + +func newDHCPRangeCharts(dhcpRange string) *module.Charts { + charts := chartsTmpl.Copy() + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, dhcpRange) + c.Labels = []module.Label{ + {Key: "dhcp_range", Value: dhcpRange}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, dhcpRange) + } + } + return charts +} + +func (d *DnsmasqDHCP) addDHCPRangeCharts(dhcpRange string) { + charts := newDHCPRangeCharts(dhcpRange) + if err := d.Charts().Add(*charts...); err != nil { + d.Warning(err) + } +} + +func (d *DnsmasqDHCP) removeDHCPRangeCharts(dhcpRange string) { + p := "dhcp_range_" + dhcpRange + for _, c := range *d.Charts() { + if strings.HasSuffix(c.ID, p) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/collect.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/collect.go new file mode 100644 index 00000000000000..77f174b762275a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/collect.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import ( + "bufio" + "io" + "math" + "math/big" + "net" + "os" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/iprange" +) + +func (d *DnsmasqDHCP) collect() (map[string]int64, error) { + now := time.Now() + var updated bool + + if now.Sub(d.parseConfigTime) > d.parseConfigEvery { + d.parseConfigTime = now + + dhcpRanges, dhcpHosts := d.parseDnsmasqDHCPConfiguration() + d.dhcpRanges, d.dhcpHosts = dhcpRanges, dhcpHosts + updated = d.updateCharts() + + d.collectV4V6Stats() + } + + f, err := os.Open(d.LeasesPath) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + if !updated { + fi, err := f.Stat() + if err != nil { + return nil, err + } + + if d.leasesModTime.Equal(fi.ModTime()) { + d.Debug("lease database file modification time has not changed, old data is returned") + return d.mx, nil + } + + d.Debug("leases db file modification time has changed, reading it") + d.leasesModTime = fi.ModTime() + } + + leases := findLeases(f) + d.collectRangesStats(leases) + + return d.mx, nil +} + +func (d *DnsmasqDHCP) collectV4V6Stats() { + d.mx["ipv4_dhcp_ranges"], d.mx["ipv6_dhcp_ranges"] = 0, 0 + for _, r := range d.dhcpRanges { + if r.Family() == iprange.V6Family { + d.mx["ipv6_dhcp_ranges"]++ + } else { + d.mx["ipv4_dhcp_ranges"]++ + } + } + + d.mx["ipv4_dhcp_hosts"], d.mx["ipv6_dhcp_hosts"] = 0, 0 + for _, ip := range d.dhcpHosts { + if ip.To4() == nil { + d.mx["ipv6_dhcp_hosts"]++ + } else { + d.mx["ipv4_dhcp_hosts"]++ + } + } +} + +func (d *DnsmasqDHCP) collectRangesStats(leases []net.IP) { + for _, r := range d.dhcpRanges { + d.mx["dhcp_range_"+r.String()+"_allocated_leases"] = 0 + d.mx["dhcp_range_"+r.String()+"_utilization"] = 0 + } + + for _, ip := range leases { + for _, r := range d.dhcpRanges { + if r.Contains(ip) { + d.mx["dhcp_range_"+r.String()+"_allocated_leases"]++ + break + } + } + } + + for _, ip := range d.dhcpHosts { + for _, r := range d.dhcpRanges { + if r.Contains(ip) { + d.mx["dhcp_range_"+r.String()+"_allocated_leases"]++ + break + } + } + } + + for _, r := range d.dhcpRanges { + name := "dhcp_range_" + r.String() + "_allocated_leases" + numOfIps, ok := d.mx[name] + if !ok { + d.mx[name] = 0 + } + d.mx["dhcp_range_"+r.String()+"_utilization"] = int64(math.Round(calcPercent(numOfIps, r.Size()))) + } +} + +func (d *DnsmasqDHCP) updateCharts() bool { + var updated bool + seen := make(map[string]bool) + for _, r := range d.dhcpRanges { + seen[r.String()] = true + if !d.cacheDHCPRanges[r.String()] { + d.cacheDHCPRanges[r.String()] = true + d.addDHCPRangeCharts(r.String()) + updated = true + } + } + + for v := range d.cacheDHCPRanges { + if !seen[v] { + delete(d.cacheDHCPRanges, v) + d.removeDHCPRangeCharts(v) + updated = true + } + } + return updated +} + +func findLeases(r io.Reader) []net.IP { + /* + 1560300536 08:00:27:61:3c:ee 2.2.2.3 debian8 * + duid 00:01:00:01:24:90:cf:5b:08:00:27:61:2e:2c + 1560300414 660684014 1234::20b * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee + */ + var ips []net.IP + s := bufio.NewScanner(r) + + for s.Scan() { + parts := strings.Fields(s.Text()) + if len(parts) != 5 { + continue + } + + ip := net.ParseIP(parts[2]) + if ip == nil { + continue + } + ips = append(ips, ip) + } + + return ips +} + +func calcPercent(ips int64, hosts *big.Int) float64 { + h := hosts.Int64() + if ips == 0 || h == 0 || !hosts.IsInt64() { + return 0 + } + return float64(ips) * 100 / float64(h) +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/config_schema.json b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/config_schema.json new file mode 100644 index 00000000000000..bb9d768130098c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/config_schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dnsmasq_dhcp job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "leases_path": { + "type": "string" + }, + "conf_path": { + "type": "string" + }, + "conf_dir": { + "type": "string" + } + }, + "required": [ + "name", + "leases_path" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp.go new file mode 100644 index 00000000000000..ede8a8ee815518 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import ( + _ "embed" + "net" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/iprange" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("dnsmasq_dhcp", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *DnsmasqDHCP { + config := Config{ + // debian defaults + LeasesPath: "/var/lib/misc/dnsmasq.leases", + ConfPath: "/etc/dnsmasq.conf", + ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new", + } + + return &DnsmasqDHCP{ + Config: config, + charts: charts.Copy(), + parseConfigEvery: time.Minute, + cacheDHCPRanges: make(map[string]bool), + mx: make(map[string]int64), + } +} + +type Config struct { + LeasesPath string `yaml:"leases_path"` + ConfPath string `yaml:"conf_path"` + ConfDir string `yaml:"conf_dir"` +} + +type DnsmasqDHCP struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + leasesModTime time.Time + + parseConfigTime time.Time + parseConfigEvery time.Duration + + dhcpRanges []iprange.Range + dhcpHosts []net.IP + + cacheDHCPRanges map[string]bool + + mx map[string]int64 +} + +func (d *DnsmasqDHCP) Init() bool { + if err := d.validateConfig(); err != nil { + d.Errorf("config validation: %v", err) + return false + } + if err := d.checkLeasesPath(); err != nil { + d.Errorf("leases path check: %v", err) + return false + } + + return true +} + +func (d *DnsmasqDHCP) Check() bool { + return len(d.Collect()) > 0 +} + +func (d *DnsmasqDHCP) Charts() *module.Charts { + return d.charts +} + +func (d *DnsmasqDHCP) Collect() map[string]int64 { + mx, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (d *DnsmasqDHCP) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp_test.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp_test.go new file mode 100644 index 00000000000000..9e7693fa945e09 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/dhcp_test.go @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testLeasesPath = "testdata/dnsmasq.leases" + testConfPath = "testdata/dnsmasq.conf" + testConfDir = "testdata/dnsmasq.d" +) + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*DnsmasqDHCP)(nil), job) +} + +func TestDnsmasqDHCP_Init(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = testConfPath + job.ConfDir = testConfDir + + assert.True(t, job.Init()) +} + +func TestDnsmasqDHCP_InitEmptyLeasesPath(t *testing.T) { + job := New() + job.LeasesPath = "" + + assert.False(t, job.Init()) +} + +func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.LeasesPath += "!" + + assert.False(t, job.Init()) +} + +func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = "testdata/dnsmasq3.conf" + job.ConfDir = "" + + assert.True(t, job.Init()) +} + +func TestDnsmasqDHCP_Check(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = testConfPath + job.ConfDir = testConfDir + + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestDnsmasqDHCP_Charts(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = testConfPath + job.ConfDir = testConfDir + + require.True(t, job.Init()) + + assert.NotNil(t, job.Charts()) +} + +func TestDnsmasqDHCP_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestDnsmasqDHCP_Collect(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = testConfPath + job.ConfDir = testConfDir + + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "dhcp_range_1230::1-1230::64_allocated_leases": 7, + "dhcp_range_1230::1-1230::64_utilization": 7, + "dhcp_range_1231::1-1231::64_allocated_leases": 1, + "dhcp_range_1231::1-1231::64_utilization": 1, + "dhcp_range_1232::1-1232::64_allocated_leases": 1, + "dhcp_range_1232::1-1232::64_utilization": 1, + "dhcp_range_1233::1-1233::64_allocated_leases": 1, + "dhcp_range_1233::1-1233::64_utilization": 1, + "dhcp_range_1234::1-1234::64_allocated_leases": 1, + "dhcp_range_1234::1-1234::64_utilization": 1, + "dhcp_range_192.168.0.1-192.168.0.100_allocated_leases": 6, + "dhcp_range_192.168.0.1-192.168.0.100_utilization": 6, + "dhcp_range_192.168.1.1-192.168.1.100_allocated_leases": 5, + "dhcp_range_192.168.1.1-192.168.1.100_utilization": 5, + "dhcp_range_192.168.2.1-192.168.2.100_allocated_leases": 4, + "dhcp_range_192.168.2.1-192.168.2.100_utilization": 4, + "dhcp_range_192.168.200.1-192.168.200.100_allocated_leases": 1, + "dhcp_range_192.168.200.1-192.168.200.100_utilization": 1, + "dhcp_range_192.168.3.1-192.168.3.100_allocated_leases": 1, + "dhcp_range_192.168.3.1-192.168.3.100_utilization": 1, + "dhcp_range_192.168.4.1-192.168.4.100_allocated_leases": 1, + "dhcp_range_192.168.4.1-192.168.4.100_utilization": 1, + "ipv4_dhcp_hosts": 6, + "ipv4_dhcp_ranges": 6, + "ipv6_dhcp_hosts": 5, + "ipv6_dhcp_ranges": 5, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestDnsmasqDHCP_CollectFailedToOpenLeasesPath(t *testing.T) { + job := New() + job.LeasesPath = testLeasesPath + job.ConfPath = testConfPath + job.ConfDir = testConfDir + + require.True(t, job.Init()) + require.True(t, job.Check()) + + job.LeasesPath = "" + assert.Nil(t, job.Collect()) +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/init.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/init.go new file mode 100644 index 00000000000000..6c74674a3f1811 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/init.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import "errors" + +func (d *DnsmasqDHCP) validateConfig() error { + if d.LeasesPath == "" { + return errors.New("empty 'leases_path'") + } + return nil +} + +func (d *DnsmasqDHCP) checkLeasesPath() error { + f, err := openFile(d.LeasesPath) + if err != nil { + return err + } + _ = f.Close() + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md new file mode 100644 index 00000000000000..617887bbd2c035 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md @@ -0,0 +1,205 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsmasq_dhcp/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsmasq_dhcp/metadata.yaml" +sidebar_label: "Dnsmasq DHCP" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dnsmasq DHCP + + +<img src="https://netdata.cloud/img/dnsmasq.svg" width="150"/> + + +Plugin: go.d.plugin +Module: dnsmasq_dhcp + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Dnsmasq DHCP leases databases, depending on your configuration. + +By default, it uses: + +- `/var/lib/misc/dnsmasq.leases` to read leases. +- `/etc/dnsmasq.conf` to detect dhcp-ranges. +- `/etc/dnsmasq.d` to find additional configurations. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +All configured dhcp-ranges are detected automatically + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Dnsmasq DHCP instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges | +| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts | + +### Per dhcp range + +These metrics refer to the DHCP range. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| dhcp_range | DHCP range in `START_IP:END_IP` format | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dnsmasq_dhcp.dhcp_range_utilization | used | percentage | +| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/dnsmasq_dhcp.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no | +| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no | +| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: dnsmasq_dhcp + leases_path: /var/lib/misc/dnsmasq.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d + +``` +</details> + +##### Pi-hole + +Dnsmasq DHCP on Pi-hole. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: dnsmasq_dhcp + leases_path: /etc/pihole/dhcp.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m dnsmasq_dhcp + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/metadata.yaml b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/metadata.yaml new file mode 100644 index 00000000000000..13b73336cedec7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/metadata.yaml @@ -0,0 +1,151 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-dnsmasq_dhcp + plugin_name: go.d.plugin + module_name: dnsmasq_dhcp + monitored_instance: + name: Dnsmasq DHCP + link: https://www.thekelleys.org.uk/dnsmasq/doc.html + icon_filename: dnsmasq.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - dnsmasq + - dhcp + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Dnsmasq DHCP leases databases, depending on your configuration. + + By default, it uses: + + - `/var/lib/misc/dnsmasq.leases` to read leases. + - `/etc/dnsmasq.conf` to detect dhcp-ranges. + - `/etc/dnsmasq.d` to find additional configurations. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + All configured dhcp-ranges are detected automatically + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/dnsmasq_dhcp.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: leases_path + description: Path to dnsmasq DHCP leases file. + default_value: /var/lib/misc/dnsmasq.leases + required: false + - name: conf_path + description: Path to dnsmasq configuration file. + default_value: /etc/dnsmasq.conf + required: false + - name: conf_dir + description: Path to dnsmasq configuration directory. + default_value: /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: dnsmasq_dhcp + leases_path: /var/lib/misc/dnsmasq.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d + - name: Pi-hole + description: Dnsmasq DHCP on Pi-hole. + config: | + jobs: + - name: dnsmasq_dhcp + leases_path: /etc/pihole/dhcp.leases + conf_path: /etc/dnsmasq.conf + conf_dir: /etc/dnsmasq.d + troubleshooting: + problems: + list: [] + alerts: + - name: dnsmasq_dhcp_dhcp_range_utilization + metric: dnsmasq_dhcp.dhcp_range_utilization + info: DHCP range utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: dnsmasq_dhcp.dhcp_ranges + description: Number of DHCP Ranges + unit: ranges + chart_type: stacked + dimensions: + - name: ipv4 + - name: ipv6 + - name: dnsmasq_dhcp.dhcp_hosts + description: Number of DHCP Hosts + unit: hosts + chart_type: stacked + dimensions: + - name: ipv4 + - name: ipv6 + - name: dhcp range + description: These metrics refer to the DHCP range. + labels: + - name: dhcp_range + description: DHCP range in `START_IP:END_IP` format + metrics: + - name: dnsmasq_dhcp.dhcp_range_utilization + description: DHCP Range utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: dnsmasq_dhcp.dhcp_range_allocated_leases + description: DHCP Range Allocated Leases + unit: leases + chart_type: line + dimensions: + - name: allocated diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/parse_configuration.go b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/parse_configuration.go new file mode 100644 index 00000000000000..688939b5295370 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/parse_configuration.go @@ -0,0 +1,384 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsmasq_dhcp + +import ( + "bufio" + "fmt" + "net" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/netdata/go.d.plugin/pkg/iprange" +) + +func (d *DnsmasqDHCP) parseDnsmasqDHCPConfiguration() ([]iprange.Range, []net.IP) { + configs := findConfigurationFiles(d.ConfPath, d.ConfDir) + + dhcpRanges := d.getDHCPRanges(configs) + dhcpHosts := d.getDHCPHosts(configs) + + return dhcpRanges, dhcpHosts +} + +func (d *DnsmasqDHCP) getDHCPRanges(configs []*configFile) []iprange.Range { + var dhcpRanges []iprange.Range + var parsed string + seen := make(map[string]bool) + + for _, conf := range configs { + d.Debugf("looking in '%s'", conf.path) + + for _, value := range conf.get("dhcp-range") { + d.Debugf("found dhcp-range '%s'", value) + if parsed = parseDHCPRangeValue(value); parsed == "" || seen[parsed] { + continue + } + seen[parsed] = true + + r, err := iprange.ParseRange(parsed) + if r == nil || err != nil { + d.Warningf("error on parsing dhcp-range '%s', skipping it", parsed) + continue + } + + d.Debugf("adding dhcp-range '%s'", parsed) + dhcpRanges = append(dhcpRanges, r) + } + } + + // order: ipv4, ipv6 + sort.Slice(dhcpRanges, func(i, j int) bool { return dhcpRanges[i].Family() < dhcpRanges[j].Family() }) + + return dhcpRanges +} + +func (d *DnsmasqDHCP) getDHCPHosts(configs []*configFile) []net.IP { + var dhcpHosts []net.IP + seen := make(map[string]bool) + var parsed string + + for _, conf := range configs { + d.Debugf("looking in '%s'", conf.path) + + for _, value := range conf.get("dhcp-host") { + d.Debugf("found dhcp-host '%s'", value) + if parsed = parseDHCPHostValue(value); parsed == "" || seen[parsed] { + continue + } + seen[parsed] = true + + v := net.ParseIP(parsed) + if v == nil { + d.Warningf("error on parsing dhcp-host '%s', skipping it", parsed) + continue + } + + d.Debugf("adding dhcp-host '%s'", parsed) + dhcpHosts = append(dhcpHosts, v) + } + } + return dhcpHosts +} + +/* +Examples: + - 192.168.0.50,192.168.0.150,12h + - 192.168.0.50,192.168.0.150,255.255.255.0,12h + - set:red,1.1.1.50,1.1.2.150, 255.255.252.0 + - 192.168.0.0,static + - 1234::2,1234::500, 64, 12h + - 1234::2,1234::500 + - 1234::2,1234::500, slaac + - 1234::,ra-only + - 1234::,ra-names + - 1234::,ra-stateless +*/ +var reDHCPRange = regexp.MustCompile(`([0-9a-f.:]+),([0-9a-f.:]+)`) + +func parseDHCPRangeValue(s string) (r string) { + if strings.Contains(s, "ra-stateless") { + return + } + + match := reDHCPRange.FindStringSubmatch(s) + if match == nil { + return + } + + start, end := net.ParseIP(match[1]), net.ParseIP(match[2]) + if start == nil || end == nil { + return + } + + return fmt.Sprintf("%s-%s", start, end) +} + +/* +Examples: + - 11:22:33:44:55:66,192.168.0.60 + - 11:22:33:44:55:66,fred,192.168.0.60,45m + - 11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60 + - bert,192.168.0.70,infinite + - id:01:02:02:04,192.168.0.60 + - id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61 + - id:marjorie,192.168.0.60 + - id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] +*/ +var ( + reDHCPHostV4 = regexp.MustCompile(`(?:[0-9]{1,3}\.){3}[0-9]{1,3}`) + reDHCPHostV6 = regexp.MustCompile(`\[([0-9a-f.:]+)]`) +) + +func parseDHCPHostValue(s string) (r string) { + if strings.Contains(s, "[") { + return strings.Trim(reDHCPHostV6.FindString(s), "[]") + } + return reDHCPHostV4.FindString(s) +} + +type ( + extension string + + extensions []extension + + configDir struct { + path string + include extensions + exclude extensions + } +) + +func (e extension) match(filename string) bool { + return strings.HasSuffix(filename, string(e)) +} + +func (es extensions) match(filename string) bool { + for _, e := range es { + if e.match(filename) { + return true + } + } + return false +} + +func parseConfDir(confDirStr string) configDir { + // # Include all the files in a directory except those ending in .bak + //#conf-dir=/etc/dnsmasq.d,.bak + //# Include all files in a directory which end in .conf + //#conf-dir=/etc/dnsmasq.d/,*.conf + + parts := strings.Split(confDirStr, ",") + cd := configDir{path: parts[0]} + + for _, arg := range parts[1:] { + arg = strings.TrimSpace(arg) + if strings.HasPrefix(arg, "*") { + cd.include = append(cd.include, extension(arg[1:])) + } else { + cd.exclude = append(cd.exclude, extension(arg)) + } + } + return cd +} + +func (cd configDir) isValidFilename(filename string) bool { + switch { + default: + return true + case strings.HasPrefix(filename, "."): + case strings.HasPrefix(filename, "~"): + case strings.HasPrefix(filename, "#") && strings.HasSuffix(filename, "#"): + } + return false +} + +func (cd configDir) match(filename string) bool { + switch { + default: + return true + case !cd.isValidFilename(filename): + case len(cd.include) > 0 && !cd.include.match(filename): + case cd.exclude.match(filename): + } + return false +} + +func (cd configDir) findConfigs() ([]string, error) { + fis, err := os.ReadDir(cd.path) + if err != nil { + return nil, err + } + + var files []string + for _, fi := range fis { + info, err := fi.Info() + if err != nil { + return nil, err + } + if !info.Mode().IsRegular() || !cd.match(fi.Name()) { + continue + } + files = append(files, filepath.Join(cd.path, fi.Name())) + } + return files, nil +} + +func openFile(filepath string) (f *os.File, err error) { + defer func() { + if err != nil && f != nil { + _ = f.Close() + } + }() + + f, err = os.Open(filepath) + if err != nil { + return nil, err + } + + fi, err := f.Stat() + if err != nil { + return nil, err + } + + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("'%s' is not a regular file", filepath) + } + return f, nil +} + +type ( + configOption struct { + key, value string + } + + configFile struct { + path string + options []configOption + } +) + +func (cf *configFile) get(name string) []string { + var options []string + for _, o := range cf.options { + if o.key != name { + continue + } + options = append(options, o.value) + } + return options +} + +func parseConfFile(filename string) (*configFile, error) { + f, err := openFile(filename) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + cf := configFile{path: filename} + s := bufio.NewScanner(f) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "#") { + continue + } + + if !strings.Contains(line, "=") { + continue + } + + line = strings.ReplaceAll(line, " ", "") + parts := strings.Split(line, "=") + if len(parts) != 2 { + continue + } + + cf.options = append(cf.options, configOption{key: parts[0], value: parts[1]}) + } + return &cf, nil +} + +type ConfigFinder struct { + entryConfig string + entryDir string + visitedConfigs map[string]bool + visitedDirs map[string]bool +} + +func (f *ConfigFinder) find() []*configFile { + f.visitedConfigs = make(map[string]bool) + f.visitedDirs = make(map[string]bool) + + configs := f.recursiveFind(f.entryConfig) + + for _, file := range f.entryDirConfigs() { + configs = append(configs, f.recursiveFind(file)...) + } + return configs +} + +func (f *ConfigFinder) entryDirConfigs() []string { + if f.entryDir == "" { + return nil + } + files, err := parseConfDir(f.entryDir).findConfigs() + if err != nil { + return nil + } + return files +} + +func (f *ConfigFinder) recursiveFind(filename string) (configs []*configFile) { + if f.visitedConfigs[filename] { + return nil + } + + config, err := parseConfFile(filename) + if err != nil { + return nil + } + + files, dirs := config.get("conf-file"), config.get("conf-dir") + + f.visitedConfigs[filename] = true + configs = append(configs, config) + + for _, file := range files { + configs = append(configs, f.recursiveFind(file)...) + } + + for _, dir := range dirs { + if dir == "" { + continue + } + + d := parseConfDir(dir) + + if f.visitedDirs[d.path] { + continue + } + f.visitedDirs[d.path] = true + + files, err = d.findConfigs() + if err != nil { + continue + } + + for _, file := range files { + configs = append(configs, f.recursiveFind(file)...) + } + } + return configs +} + +func findConfigurationFiles(entryConfig string, entryDir string) []*configFile { + cf := ConfigFinder{ + entryConfig: entryConfig, + entryDir: entryDir, + } + return cf.find() +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.conf new file mode 100644 index 00000000000000..4cf77478e6ca69 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.conf @@ -0,0 +1,77 @@ +# Uncomment this to enable the integrated DHCP server, you need +# to supply the range of addresses available for lease and optionally +# a lease time. If you have more than one network, you will need to +# repeat this for each network on which you want to supply DHCP +# service. +#dhcp-range=192.168.0.50,192.168.0.150,12h + +# This is an example of a DHCP range where the netmask is given. This +# is needed for networks we reach the dnsmasq DHCP server via a relay +# agent. If you don't know what a DHCP relay agent is, you probably +# don't need to worry about this. +#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h + +# This is an example of a DHCP range which sets a tag, so that +# some DHCP options may be set only for this network. +#dhcp-range=set:red,192.168.0.50,192.168.0.150 + +# Use this DHCP range only when the tag "green" is set. +#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h + +# Specify a subnet which can't be used for dynamic address allocation, +# is available for hosts with matching --dhcp-host lines. Note that +# dhcp-host declarations will be ignored unless there is a dhcp-range +# of some type for the subnet in question. +# In this case the netmask is implied (it comes from the network +# configuration on the machine running dnsmasq) it is possible to give +# an explicit netmask instead. +#dhcp-range=192.168.0.0,static + +# Enable DHCPv6. Note that the prefix-length does not need to be specified +# and defaults to 64 if missing/ +#dhcp-range=1234::2, 1234::500, 64, 12h + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +#dhcp-range=1234::, ra-only + +# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and +# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack +# hosts. Use the DHCPv4 lease to derive the name, network segment and +# MAC address and assume that the host will also have an +# IPv6 address calculated using the SLAAC alogrithm. +#dhcp-range=1234::, ra-names + +# Do Router Advertisements, BUT NOT DHCP for this subnet. +# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.) +#dhcp-range=1234::, ra-only, 48h + +# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA +# so that clients can use SLAAC addresses as well as DHCP ones. +#dhcp-range=1234::2, 1234::500, slaac + +# Do Router Advertisements and stateless DHCP for this subnet. Clients will +# not get addresses from DHCP, but they will get other configuration information. +# They will use SLAAC for addresses. +#dhcp-range=1234::, ra-stateless + +# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses +# from DHCPv4 leases. +#dhcp-range=1234::, ra-stateless, ra-names + +dhcp-range=192.168.0.1,192.168.0.100,12h +dhcp-range = 1230::1, 1230::64 + +dhcp-range = 1235::2, 1235::500, ra-stateless +dhcp-range=1234::, ra-stateless, ra-names +dhcp-range=1234::, ra-stateless +dhcp-range=1234::, ra-only, 48h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.99 +dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1230::63] + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf new file mode 100644 index 00000000000000..b9ca7821849fde --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf @@ -0,0 +1 @@ +dhcp-range=tag:green,192.168.11.1,192.168.11.100,12h \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any new file mode 100644 index 00000000000000..300faa28e74aeb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any @@ -0,0 +1,10 @@ +dhcp-range=tag:green,192.168.1.1,192.168.1.100,12h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.1.99 + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any new file mode 100644 index 00000000000000..414d6819fa92e9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any @@ -0,0 +1,10 @@ +dhcp-range = 1231::1, 1231::64 + +dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1231::63] + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any new file mode 100644 index 00000000000000..24a74279727fb1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any @@ -0,0 +1,10 @@ +dhcp-range=tag:green,192.168.2.1,192.168.2.100,12h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.2.99 + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any new file mode 100644 index 00000000000000..4ae70f0b2344dd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any @@ -0,0 +1,10 @@ +dhcp-range = 1232::1, 1232::64 + +dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1232::63] + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf new file mode 100644 index 00000000000000..dc58bf9d8d9ab3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf @@ -0,0 +1 @@ +dhcp-range=192.168.22.0,192.168.22.255,12h \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak new file mode 100644 index 00000000000000..c3897671a89ead --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak @@ -0,0 +1 @@ +dhcp-range=tag:green,192.168.33.1,192.168.33.100,12h \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any new file mode 100644 index 00000000000000..a55ac969a478d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any @@ -0,0 +1,10 @@ +dhcp-range=tag:green,192.168.3.1,192.168.3.100,12h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.3.99 + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any new file mode 100644 index 00000000000000..4bc6cf10f67973 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any @@ -0,0 +1,3 @@ +dhcp-range = 1233::1, 1233::64 + +dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1233::63] \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other new file mode 100644 index 00000000000000..18fe1ac53db36f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other @@ -0,0 +1 @@ +dhcp-range=tag:green,192.168.44.1,192.168.44.100,12h \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf new file mode 100644 index 00000000000000..1493b800912198 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf @@ -0,0 +1,10 @@ +dhcp-range=tag:green,192.168.4.1,192.168.4.100,12h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.4.99 + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf new file mode 100644 index 00000000000000..389c2c95b911b0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf @@ -0,0 +1,10 @@ +dhcp-range = 1234::1, 1234::64 + +dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::63] + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf + +conf-dir=testdata/dnsmasq.d2 +conf-dir=testdata/dnsmasq.d3,.bak +conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.leases b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.leases new file mode 100644 index 00000000000000..606e74fbab6208 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq.leases @@ -0,0 +1,19 @@ +1560300536 08:00:27:61:3c:ee 192.168.0.1 * * +1560300536 08:00:27:61:3c:ee 192.168.0.2 * * +1560300536 08:00:27:61:3c:ee 192.168.0.3 * * +1560300536 08:00:27:61:3c:ee 192.168.0.4 * * +1560300536 08:00:27:61:3c:ee 192.168.0.5 * * +1560300536 08:00:27:61:3c:ee 192.168.1.1 * * +1560300536 08:00:27:61:3c:ee 192.168.1.2 * * +1560300536 08:00:27:61:3c:ee 192.168.1.3 * * +1560300536 08:00:27:61:3c:ee 192.168.1.4 * * +1560300536 08:00:27:61:3c:ee 192.168.2.1 * * +1560300536 08:00:27:61:3c:ee 192.168.2.2 * * +1560300536 08:00:27:61:3c:ee 192.168.2.3 * * +duid 00:01:00:01:24:90:cf:5b:08:00:27:61:2e:2c +1560300414 660684014 1230::1 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee +1560300414 660684014 1230::2 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee +1560300414 660684014 1230::3 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee +1560300414 660684014 1230::4 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee +1560300414 660684014 1230::5 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee +1560300414 660684014 1230::6 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf new file mode 100644 index 00000000000000..bd1766adba8d5a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf @@ -0,0 +1,6 @@ +dhcp-range=192.168.200.1,192.168.200.100,12h + +dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.200.99 + +conf-file=testdata/dnsmasq.conf +conf-file=testdata/dnsmasq2.conf \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf new file mode 100644 index 00000000000000..3475544b58ee0f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf @@ -0,0 +1,4 @@ +#dhcp-range=192.168.0.50,192.168.0.150,12h +#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h +#dhcp-range=set:red,192.168.0.50,192.168.0.150 +#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/README.md b/src/go/collectors/go.d.plugin/modules/dnsquery/README.md new file mode 120000 index 00000000000000..c5baa8254febfc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/README.md @@ -0,0 +1 @@ +integrations/dns_query.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/charts.go b/src/go/collectors/go.d.plugin/modules/dnsquery/charts.go new file mode 100644 index 00000000000000..a56c9c0beccdf4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/charts.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsquery + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioDNSQueryStatus = module.Priority + iota + prioDNSQueryTime +) + +var ( + dnsChartsTmpl = module.Charts{ + dnsQueryStatusChartTmpl.Copy(), + dnsQueryTimeChartTmpl.Copy(), + } + dnsQueryStatusChartTmpl = module.Chart{ + ID: "server_%s_record_%s_query_status", + Title: "DNS Query Status", + Units: "status", + Fam: "query status", + Ctx: "dns_query.query_status", + Priority: prioDNSQueryStatus, + Dims: module.Dims{ + {ID: "server_%s_record_%s_query_status_success", Name: "success"}, + {ID: "server_%s_record_%s_query_status_network_error", Name: "network_error"}, + {ID: "server_%s_record_%s_query_status_dns_error", Name: "dns_error"}, + }, + } + dnsQueryTimeChartTmpl = module.Chart{ + ID: "server_%s_record_%s_query_time", + Title: "DNS Query Time", + Units: "seconds", + Fam: "query time", + Ctx: "dns_query.query_time", + Priority: prioDNSQueryTime, + Dims: module.Dims{ + {ID: "server_%s_record_%s_query_time", Name: "query_time", Div: 1e9}, + }, + } +) + +func newDNSServerCharts(server, network, rtype string) *module.Charts { + charts := dnsChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(server, ".", "_"), rtype) + chart.Labels = []module.Label{ + {Key: "server", Value: server}, + {Key: "network", Value: network}, + {Key: "record_type", Value: rtype}, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, server, rtype) + } + } + + return charts +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/collect.go b/src/go/collectors/go.d.plugin/modules/dnsquery/collect.go new file mode 100644 index 00000000000000..46104e94442d72 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/collect.go @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsquery + +import ( + "math/rand" + "net" + "strconv" + "sync" + "time" + + "github.com/miekg/dns" +) + +func (d *DNSQuery) collect() (map[string]int64, error) { + if d.dnsClient == nil { + d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration) + } + + mx := make(map[string]int64) + domain := randomDomain(d.Domains) + d.Debugf("current domain : %s", domain) + + var wg sync.WaitGroup + var mux sync.RWMutex + for _, srv := range d.Servers { + for rtypeName, rtype := range d.recordTypes { + wg.Add(1) + go func(srv, rtypeName string, rtype uint16, wg *sync.WaitGroup) { + defer wg.Done() + + msg := new(dns.Msg) + msg.SetQuestion(dns.Fqdn(domain), rtype) + address := net.JoinHostPort(srv, strconv.Itoa(d.Port)) + + resp, rtt, err := d.dnsClient.Exchange(msg, address) + + mux.Lock() + defer mux.Unlock() + + px := "server_" + srv + "_record_" + rtypeName + "_" + + mx[px+"query_status_success"] = 0 + mx[px+"query_status_network_error"] = 0 + mx[px+"query_status_dns_error"] = 0 + + if err != nil { + d.Debugf("error on querying %s after %s query for %s : %s", srv, rtypeName, domain, err) + mx[px+"query_status_network_error"] = 1 + return + } + + if resp != nil && resp.Rcode != dns.RcodeSuccess { + d.Debugf("invalid answer from %s after %s query for %s (rcode %d)", srv, rtypeName, domain, resp.Rcode) + mx[px+"query_status_dns_error"] = 1 + } else { + mx[px+"query_status_success"] = 1 + } + mx["server_"+srv+"_record_"+rtypeName+"_query_time"] = rtt.Nanoseconds() + + }(srv, rtypeName, rtype, &wg) + } + } + wg.Wait() + + return mx, nil +} + +func randomDomain(domains []string) string { + src := rand.NewSource(time.Now().UnixNano()) + r := rand.New(src) + return domains[r.Intn(len(domains))] +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/config_schema.json b/src/go/collectors/go.d.plugin/modules/dnsquery/config_schema.json new file mode 100644 index 00000000000000..4a7fa412ad774b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/config_schema.json @@ -0,0 +1,48 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dns_query job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "domains": { + "type": "array", + "items": { + "type": "string" + } + }, + "servers": { + "type": "array", + "items": { + "type": "string" + } + }, + "network": { + "type": "string" + }, + "record_type": { + "type": "string" + }, + "record_types": { + "type": "array", + "items": { + "type": "string" + } + }, + "port": { + "type": "integer" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "domains", + "servers" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery.go b/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery.go new file mode 100644 index 00000000000000..dd1cd3c66fbad8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsquery + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/miekg/dns" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("dns_query", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *DNSQuery { + return &DNSQuery{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 2}, + Network: "udp", + RecordTypes: []string{"A"}, + Port: 53, + }, + newDNSClient: func(network string, timeout time.Duration) dnsClient { + return &dns.Client{ + Net: network, + ReadTimeout: timeout, + } + }, + } +} + +type Config struct { + Domains []string `yaml:"domains"` + Servers []string `yaml:"servers"` + Network string `yaml:"network"` + RecordType string `yaml:"record_type"` + RecordTypes []string `yaml:"record_types"` + Port int `yaml:"port"` + Timeout web.Duration `yaml:"timeout"` +} + +type ( + DNSQuery struct { + module.Base + + Config `yaml:",inline"` + + charts *module.Charts + + newDNSClient func(network string, duration time.Duration) dnsClient + recordTypes map[string]uint16 + + dnsClient dnsClient + } + + dnsClient interface { + Exchange(msg *dns.Msg, address string) (response *dns.Msg, rtt time.Duration, err error) + } +) + +func (d *DNSQuery) Init() bool { + if err := d.verifyConfig(); err != nil { + d.Errorf("config validation: %v", err) + return false + } + + rt, err := d.initRecordTypes() + if err != nil { + d.Errorf("init record type: %v", err) + return false + } + d.recordTypes = rt + + charts, err := d.initCharts() + if err != nil { + d.Errorf("init charts: %v", err) + return false + } + d.charts = charts + + return true +} + +func (d *DNSQuery) Check() bool { + return true +} + +func (d *DNSQuery) Charts() *module.Charts { + return d.charts +} + +func (d *DNSQuery) Collect() map[string]int64 { + mx, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (d *DNSQuery) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery_test.go b/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery_test.go new file mode 100644 index 00000000000000..5ba8417314f544 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/dnsquery_test.go @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsquery + +import ( + "errors" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/miekg/dns" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestDNSQuery_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success when all set": { + wantFail: false, + config: Config{ + Domains: []string{"example.com"}, + Servers: []string{"192.0.2.0"}, + Network: "udp", + RecordTypes: []string{"A"}, + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + "success when using deprecated record_type": { + wantFail: false, + config: Config{ + Domains: []string{"example.com"}, + Servers: []string{"192.0.2.0"}, + Network: "udp", + RecordType: "A", + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + "fail with default": { + wantFail: true, + config: New().Config, + }, + "fail when domains not set": { + wantFail: true, + config: Config{ + Domains: nil, + Servers: []string{"192.0.2.0"}, + Network: "udp", + RecordTypes: []string{"A"}, + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + "fail when servers not set": { + wantFail: true, + config: Config{ + Domains: []string{"example.com"}, + Servers: nil, + Network: "udp", + RecordTypes: []string{"A"}, + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + "fail when network is invalid": { + wantFail: true, + config: Config{ + Domains: []string{"example.com"}, + Servers: []string{"192.0.2.0"}, + Network: "gcp", + RecordTypes: []string{"A"}, + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + "fail when record_type is invalid": { + wantFail: true, + config: Config{ + Domains: []string{"example.com"}, + Servers: []string{"192.0.2.0"}, + Network: "udp", + RecordTypes: []string{"B"}, + Port: 53, + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dq := New() + dq.Config = test.config + + if test.wantFail { + assert.False(t, dq.Init()) + } else { + assert.True(t, dq.Init()) + } + }) + } +} + +func TestDNSQuery_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func() *DNSQuery + }{ + "success when DNS query successful": { + wantFail: false, + prepare: caseDNSClientOK, + }, + "success when DNS query returns an error": { + wantFail: false, + prepare: caseDNSClientErr, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dq := test.prepare() + + require.True(t, dq.Init()) + + if test.wantFail { + assert.False(t, dq.Check()) + } else { + assert.True(t, dq.Check()) + } + }) + } +} + +func TestDNSQuery_Charts(t *testing.T) { + dq := New() + + dq.Domains = []string{"google.com"} + dq.Servers = []string{"192.0.2.0", "192.0.2.1"} + require.True(t, dq.Init()) + + assert.NotNil(t, dq.Charts()) + assert.Len(t, *dq.Charts(), len(dnsChartsTmpl)*len(dq.Servers)) +} + +func TestDNSQuery_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *DNSQuery + wantMetrics map[string]int64 + }{ + "success when DNS query successful": { + prepare: caseDNSClientOK, + wantMetrics: map[string]int64{ + "server_192.0.2.0_record_A_query_status_dns_error": 0, + "server_192.0.2.0_record_A_query_status_network_error": 0, + "server_192.0.2.0_record_A_query_status_success": 1, + "server_192.0.2.0_record_A_query_time": 1000000000, + "server_192.0.2.1_record_A_query_status_dns_error": 0, + "server_192.0.2.1_record_A_query_status_network_error": 0, + "server_192.0.2.1_record_A_query_status_success": 1, + "server_192.0.2.1_record_A_query_time": 1000000000, + }, + }, + "fail when DNS query returns an error": { + prepare: caseDNSClientErr, + wantMetrics: map[string]int64{ + "server_192.0.2.0_record_A_query_status_dns_error": 0, + "server_192.0.2.0_record_A_query_status_network_error": 1, + "server_192.0.2.0_record_A_query_status_success": 0, + "server_192.0.2.1_record_A_query_status_dns_error": 0, + "server_192.0.2.1_record_A_query_status_network_error": 1, + "server_192.0.2.1_record_A_query_status_success": 0, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dq := test.prepare() + + require.True(t, dq.Init()) + + mx := dq.Collect() + + require.Equal(t, test.wantMetrics, mx) + }) + } +} + +func caseDNSClientOK() *DNSQuery { + dq := New() + dq.Domains = []string{"example.com"} + dq.Servers = []string{"192.0.2.0", "192.0.2.1"} + dq.newDNSClient = func(_ string, _ time.Duration) dnsClient { + return mockDNSClient{errOnExchange: false} + } + return dq +} + +func caseDNSClientErr() *DNSQuery { + dq := New() + dq.Domains = []string{"example.com"} + dq.Servers = []string{"192.0.2.0", "192.0.2.1"} + dq.newDNSClient = func(_ string, _ time.Duration) dnsClient { + return mockDNSClient{errOnExchange: true} + } + return dq +} + +type mockDNSClient struct { + errOnExchange bool +} + +func (m mockDNSClient) Exchange(_ *dns.Msg, _ string) (response *dns.Msg, rtt time.Duration, err error) { + if m.errOnExchange { + return nil, time.Second, errors.New("mock.Exchange() error") + } + return nil, time.Second, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/init.go b/src/go/collectors/go.d.plugin/modules/dnsquery/init.go new file mode 100644 index 00000000000000..8e1e078c750cf7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/init.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dnsquery + +import ( + "errors" + "fmt" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/miekg/dns" +) + +func (d *DNSQuery) verifyConfig() error { + if len(d.Domains) == 0 { + return errors.New("no domains specified") + } + + if len(d.Servers) == 0 { + return errors.New("no servers specified") + } + + if !(d.Network == "" || d.Network == "udp" || d.Network == "tcp" || d.Network == "tcp-tls") { + return fmt.Errorf("wrong network transport : %s", d.Network) + } + + if d.RecordType != "" { + d.Warning("'record_type' config option is deprecated, use 'record_types' instead") + d.RecordTypes = append(d.RecordTypes, d.RecordType) + } + + if len(d.RecordTypes) == 0 { + return errors.New("no record types specified") + } + + return nil +} + +func (d *DNSQuery) initRecordTypes() (map[string]uint16, error) { + types := make(map[string]uint16) + for _, v := range d.RecordTypes { + rtype, err := parseRecordType(v) + if err != nil { + return nil, err + } + types[v] = rtype + + } + + return types, nil +} + +func (d *DNSQuery) initCharts() (*module.Charts, error) { + var charts module.Charts + + for _, srv := range d.Servers { + for _, rtype := range d.RecordTypes { + cs := newDNSServerCharts(srv, d.Network, rtype) + if err := charts.Add(*cs...); err != nil { + return nil, err + } + } + } + + return &charts, nil +} + +func parseRecordType(recordType string) (uint16, error) { + var rtype uint16 + + switch recordType { + case "A": + rtype = dns.TypeA + case "AAAA": + rtype = dns.TypeAAAA + case "ANY": + rtype = dns.TypeANY + case "CNAME": + rtype = dns.TypeCNAME + case "MX": + rtype = dns.TypeMX + case "NS": + rtype = dns.TypeNS + case "PTR": + rtype = dns.TypePTR + case "SOA": + rtype = dns.TypeSOA + case "SPF": + rtype = dns.TypeSPF + case "SRV": + rtype = dns.TypeSRV + case "TXT": + rtype = dns.TypeTXT + default: + return 0, fmt.Errorf("unknown record type : %s", recordType) + } + + return rtype, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/integrations/dns_query.md b/src/go/collectors/go.d.plugin/modules/dnsquery/integrations/dns_query.md new file mode 100644 index 00000000000000..f1e4c5b5b50756 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/integrations/dns_query.md @@ -0,0 +1,181 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsquery/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/dnsquery/metadata.yaml" +sidebar_label: "DNS query" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DNS query + + +<img src="https://netdata.cloud/img/network-wired.svg" width="150"/> + + +Plugin: go.d.plugin +Module: dns_query + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This module monitors DNS query round-trip time (RTT). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per server + +These metrics refer to the DNS server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| server | DNS server address. | +| network | Network protocol name (tcp, udp, tcp-tls). | +| record_type | DNS record type (e.g. A, AAAA, CNAME). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dns_query.query_status | success, network_error, dns_error | status | +| dns_query.query_time | query_time | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/dns_query.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/dns_query.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes | +| servers | Servers to query. | | yes | +| port | DNS server port. | 53 | no | +| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no | +| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no | +| timeout | Query read timeout. | 2 | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: job1 + record_types: + - A + - AAAA + domains: + - google.com + - github.com + - reddit.com + servers: + - 8.8.8.8 + - 8.8.4.4 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m dns_query + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/dnsquery/metadata.yaml b/src/go/collectors/go.d.plugin/modules/dnsquery/metadata.yaml new file mode 100644 index 00000000000000..8c199550fc86f6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dnsquery/metadata.yaml @@ -0,0 +1,142 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-dns_query + plugin_name: go.d.plugin + module_name: dns_query + monitored_instance: + name: DNS query + link: "" + icon_filename: network-wired.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This module monitors DNS query round-trip time (RTT). + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/dns_query.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: domains + description: Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. + default_value: "" + required: true + - name: servers + description: Servers to query. + default_value: "" + required: true + - name: port + description: DNS server port. + default_value: 53 + required: false + - name: network + description: "Network protocol name. Available options: udp, tcp, tcp-tls." + default_value: udp + required: false + - name: record_types + description: "Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV." + default_value: A + required: false + - name: timeout + description: Query read timeout. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: job1 + record_types: + - A + - AAAA + domains: + - google.com + - github.com + - reddit.com + servers: + - 8.8.8.8 + - 8.8.4.4 + troubleshooting: + problems: + list: [] + alerts: + - name: dns_query_query_status + metric: dns_query.query_status + info: "DNS request type ${label:record_type} to server ${label:server} is unsuccessful" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: server + description: These metrics refer to the DNS server. + labels: + - name: server + description: DNS server address. + - name: network + description: Network protocol name (tcp, udp, tcp-tls). + - name: record_type + description: DNS record type (e.g. A, AAAA, CNAME). + metrics: + - name: dns_query.query_status + description: DNS Query Status + unit: status + chart_type: line + dimensions: + - name: success + - name: network_error + - name: dns_error + - name: dns_query.query_time + description: DNS Query Time + unit: seconds + chart_type: line + dimensions: + - name: query_time diff --git a/src/go/collectors/go.d.plugin/modules/docker/README.md b/src/go/collectors/go.d.plugin/modules/docker/README.md new file mode 120000 index 00000000000000..b4804ee06c3081 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/README.md @@ -0,0 +1 @@ +integrations/docker.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/docker/charts.go b/src/go/collectors/go.d.plugin/modules/docker/charts.go new file mode 100644 index 00000000000000..051c0f405375b2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/charts.go @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioContainersState = module.Priority + iota + prioContainersHealthy + + prioContainerState + prioContainerHealthStatus + prioContainerWritableLayerSize + + prioImagesCount + prioImagesSize +) + +var summaryCharts = module.Charts{ + containersStateChart.Copy(), + containersHealthyChart.Copy(), + + imagesCountChart.Copy(), + imagesSizeChart.Copy(), +} + +var ( + containersStateChart = module.Chart{ + ID: "containers_state", + Title: "Total number of Docker containers in various states", + Units: "containers", + Fam: "containers", + Ctx: "docker.containers_state", + Priority: prioContainersState, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "containers_state_running", Name: "running"}, + {ID: "containers_state_paused", Name: "paused"}, + {ID: "containers_state_exited", Name: "exited"}, + }, + } + containersHealthyChart = module.Chart{ + ID: "healthy_containers", + Title: "Total number of Docker containers in various health states", + Units: "containers", + Fam: "containers", + Ctx: "docker.containers_health_status", + Priority: prioContainersHealthy, + Dims: module.Dims{ + {ID: "containers_health_status_healthy", Name: "healthy"}, + {ID: "containers_health_status_unhealthy", Name: "unhealthy"}, + {ID: "containers_health_status_not_running_unhealthy", Name: "not_running_unhealthy"}, + {ID: "containers_health_status_starting", Name: "starting"}, + {ID: "containers_health_status_none", Name: "no_healthcheck"}, + }, + } +) + +var ( + imagesCountChart = module.Chart{ + ID: "images_count", + Title: "Total number of Docker images in various states", + Units: "images", + Fam: "images", + Ctx: "docker.images", + Priority: prioImagesCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "images_active", Name: "active"}, + {ID: "images_dangling", Name: "dangling"}, + }, + } + imagesSizeChart = module.Chart{ + ID: "images_size", + Title: "Total size of all Docker images", + Units: "bytes", + Fam: "images", + Ctx: "docker.images_size", + Priority: prioImagesSize, + Dims: module.Dims{ + {ID: "images_size", Name: "size"}, + }, + } +) + +var ( + containerChartsTmpl = module.Charts{ + containerStateChartTmpl.Copy(), + containerHealthStatusChartTmpl.Copy(), + containerWritableLayerSizeChartTmpl.Copy(), + } + + containerStateChartTmpl = module.Chart{ + ID: "container_%s_state", + Title: "Docker container state", + Units: "state", + Fam: "containers", + Ctx: "docker.container_state", + Priority: prioContainerState, + Dims: module.Dims{ + {ID: "container_%s_state_running", Name: "running"}, + {ID: "container_%s_state_paused", Name: "paused"}, + {ID: "container_%s_state_exited", Name: "exited"}, + {ID: "container_%s_state_created", Name: "created"}, + {ID: "container_%s_state_restarting", Name: "restarting"}, + {ID: "container_%s_state_removing", Name: "removing"}, + {ID: "container_%s_state_dead", Name: "dead"}, + }, + } + containerHealthStatusChartTmpl = module.Chart{ + ID: "container_%s_health_status", + Title: "Docker container health status", + Units: "status", + Fam: "containers", + Ctx: "docker.container_health_status", + Priority: prioContainerHealthStatus, + Dims: module.Dims{ + {ID: "container_%s_health_status_healthy", Name: "healthy"}, + {ID: "container_%s_health_status_unhealthy", Name: "unhealthy"}, + {ID: "container_%s_health_status_not_running_unhealthy", Name: "not_running_unhealthy"}, + {ID: "container_%s_health_status_starting", Name: "starting"}, + {ID: "container_%s_health_status_none", Name: "no_healthcheck"}, + }, + } + containerWritableLayerSizeChartTmpl = module.Chart{ + ID: "container_%s_writable_layer_size", + Title: "Docker container writable layer size", + Units: "bytes", + Fam: "containers", + Ctx: "docker.container_writeable_layer_size", + Priority: prioContainerWritableLayerSize, + Dims: module.Dims{ + {ID: "container_%s_size_rw", Name: "writable_layer"}, + }, + } +) + +func (d *Docker) addContainerCharts(name, image string) { + charts := containerChartsTmpl.Copy() + if !d.CollectContainerSize { + _ = charts.Remove(containerWritableLayerSizeChartTmpl.ID) + } + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "container_name", Value: name}, + {Key: "image", Value: image}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := d.Charts().Add(*charts...); err != nil { + d.Warning(err) + } +} + +func (d *Docker) removeContainerCharts(name string) { + px := fmt.Sprintf("container_%s", name) + + for _, chart := range *d.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/docker/collect.go b/src/go/collectors/go.d.plugin/modules/docker/collect.go new file mode 100644 index 00000000000000..ceda40671849ed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/collect.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker + +import ( + "context" + "fmt" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +func (d *Docker) collect() (map[string]int64, error) { + if d.client == nil { + client, err := d.newClient(d.Config) + if err != nil { + return nil, err + } + d.client = client + } + + if !d.verNegotiated { + d.verNegotiated = true + d.negotiateAPIVersion() + } + + defer func() { _ = d.client.Close() }() + + mx := make(map[string]int64) + + if err := d.collectInfo(mx); err != nil { + return nil, err + } + if err := d.collectImages(mx); err != nil { + return nil, err + } + if err := d.collectContainers(mx); err != nil { + return nil, err + } + + return mx, nil +} + +func (d *Docker) collectInfo(mx map[string]int64) error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + + info, err := d.client.Info(ctx) + if err != nil { + return err + } + + mx["containers_state_running"] = int64(info.ContainersRunning) + mx["containers_state_paused"] = int64(info.ContainersPaused) + mx["containers_state_exited"] = int64(info.ContainersStopped) + + return nil +} + +func (d *Docker) collectImages(mx map[string]int64) error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + + images, err := d.client.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + return err + } + + mx["images_size"] = 0 + mx["images_dangling"] = 0 + mx["images_active"] = 0 + + for _, v := range images { + mx["images_size"] += v.Size + if v.Containers == 0 { + mx["images_dangling"]++ + } else { + mx["images_active"]++ + } + } + + return nil +} + +var ( + containerHealthStatuses = []string{ + types.Healthy, + types.Unhealthy, + types.Starting, + types.NoHealthcheck, + } + containerStates = []string{ + "created", + "running", + "paused", + "restarting", + "removing", + "exited", + "dead", + } +) + +func (d *Docker) collectContainers(mx map[string]int64) error { + containerSet := make(map[string][]types.Container) + + for _, status := range containerHealthStatuses { + if err := func() error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + + v, err := d.client.ContainerList(ctx, types.ContainerListOptions{ + All: true, + Filters: filters.NewArgs(filters.KeyValuePair{Key: "health", Value: status}), + Size: d.CollectContainerSize, + }) + if err != nil { + return err + } + containerSet[status] = v + return nil + + }(); err != nil { + return err + } + } + + seen := make(map[string]bool) + + for _, s := range containerHealthStatuses { + mx["containers_health_status_"+s] = 0 + } + mx["containers_health_status_not_running_unhealthy"] = 0 + + for status, containers := range containerSet { + if status != types.Unhealthy { + mx["containers_health_status_"+status] = int64(len(containers)) + } + + for _, cntr := range containers { + if status == types.Unhealthy { + if cntr.State == "running" { + mx["containers_health_status_"+status] += 1 + } else { + mx["containers_health_status_not_running_unhealthy"] += 1 + } + } + + if len(cntr.Names) == 0 { + continue + } + + name := strings.TrimPrefix(cntr.Names[0], "/") + + seen[name] = true + + if !d.containers[name] { + d.containers[name] = true + d.addContainerCharts(name, cntr.Image) + } + + px := fmt.Sprintf("container_%s_", name) + + for _, s := range containerHealthStatuses { + mx[px+"health_status_"+s] = 0 + } + mx[px+"health_status_not_running_unhealthy"] = 0 + for _, s := range containerStates { + mx[px+"state_"+s] = 0 + } + + if status == types.Unhealthy && cntr.State != "running" { + mx[px+"health_status_not_running_unhealthy"] += 1 + } else { + mx[px+"health_status_"+status] = 1 + } + mx[px+"state_"+cntr.State] = 1 + mx[px+"size_rw"] = cntr.SizeRw + mx[px+"size_root_fs"] = cntr.SizeRootFs + } + } + + for name := range d.containers { + if !seen[name] { + delete(d.containers, name) + d.removeContainerCharts(name) + } + } + + return nil +} + +func (d *Docker) negotiateAPIVersion() { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + + d.client.NegotiateAPIVersion(ctx) +} diff --git a/src/go/collectors/go.d.plugin/modules/docker/config_schema.json b/src/go/collectors/go.d.plugin/modules/docker/config_schema.json new file mode 100644 index 00000000000000..b060da819fe5df --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/config_schema.json @@ -0,0 +1,26 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/docker job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_container_size": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/docker/docker.go b/src/go/collectors/go.d.plugin/modules/docker/docker.go new file mode 100644 index 00000000000000..1078de2fbca9ce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/docker.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker + +import ( + "context" + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/docker/docker/api/types" + docker "github.com/docker/docker/client" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("docker", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Docker { + return &Docker{ + Config: Config{ + Address: docker.DefaultDockerHost, + Timeout: web.Duration{Duration: time.Second * 5}, + CollectContainerSize: false, + }, + + charts: summaryCharts.Copy(), + newClient: func(cfg Config) (dockerClient, error) { + return docker.NewClientWithOpts(docker.WithHost(cfg.Address)) + }, + containers: make(map[string]bool), + } +} + +type Config struct { + Timeout web.Duration `yaml:"timeout"` + Address string `yaml:"address"` + CollectContainerSize bool `yaml:"collect_container_size"` +} + +type ( + Docker struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + newClient func(Config) (dockerClient, error) + client dockerClient + verNegotiated bool + + containers map[string]bool + } + dockerClient interface { + NegotiateAPIVersion(context.Context) + Info(context.Context) (types.Info, error) + ImageList(context.Context, types.ImageListOptions) ([]types.ImageSummary, error) + ContainerList(context.Context, types.ContainerListOptions) ([]types.Container, error) + Close() error + } +) + +func (d *Docker) Init() bool { + return true +} + +func (d *Docker) Check() bool { + return len(d.Collect()) > 0 +} + +func (d *Docker) Charts() *module.Charts { + return d.charts +} + +func (d *Docker) Collect() map[string]int64 { + mx, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (d *Docker) Cleanup() { + if d.client == nil { + return + } + if err := d.client.Close(); err != nil { + d.Warningf("error on closing docker client: %v", err) + } + d.client = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/docker/docker_test.go b/src/go/collectors/go.d.plugin/modules/docker/docker_test.go new file mode 100644 index 00000000000000..0a3711b4d53391 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/docker_test.go @@ -0,0 +1,828 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker + +import ( + "context" + "errors" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDocker_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default config": { + wantFail: false, + config: New().Config, + }, + "unset 'address'": { + wantFail: false, + config: Config{ + Address: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + d := New() + d.Config = test.config + + if test.wantFail { + assert.False(t, d.Init()) + } else { + assert.True(t, d.Init()) + } + }) + } +} + +func TestDocker_Charts(t *testing.T) { + assert.Equal(t, len(summaryCharts), len(*New().Charts())) +} + +func TestDocker_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func(d *Docker) + wantClose bool + }{ + "after New": { + wantClose: false, + prepare: func(d *Docker) {}, + }, + "after Init": { + wantClose: false, + prepare: func(d *Docker) { d.Init() }, + }, + "after Check": { + wantClose: true, + prepare: func(d *Docker) { d.Init(); d.Check() }, + }, + "after Collect": { + wantClose: true, + prepare: func(d *Docker) { d.Init(); d.Collect() }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := &mockClient{} + d := New() + d.newClient = prepareNewClientFunc(m) + + test.prepare(d) + + require.NotPanics(t, d.Cleanup) + + if test.wantClose { + assert.True(t, m.closeCalled) + } else { + assert.False(t, m.closeCalled) + } + }) + } +} + +func TestDocker_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *Docker + wantFail bool + }{ + "case success": { + wantFail: false, + prepare: func() *Docker { + return prepareCaseSuccess() + }, + }, + "case success without container size": { + wantFail: false, + prepare: func() *Docker { + return prepareCaseSuccessWithoutContainerSize() + }, + }, + "fail on case err on Info()": { + wantFail: true, + prepare: func() *Docker { + return prepareCaseErrOnInfo() + }, + }, + "fail on case err on ImageList()": { + wantFail: true, + prepare: func() *Docker { + return prepareCaseErrOnImageList() + }, + }, + "fail on case err on ContainerList()": { + wantFail: true, + prepare: func() *Docker { + return prepareCaseErrOnContainerList() + }, + }, + "fail on case err on creating Docker client": { + wantFail: true, + prepare: func() *Docker { + return prepareCaseErrCreatingClient() + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + d := test.prepare() + + require.True(t, d.Init()) + + if test.wantFail { + assert.False(t, d.Check()) + } else { + assert.True(t, d.Check()) + } + }) + } +} + +func TestDocker_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *Docker + expected map[string]int64 + }{ + "case success": { + prepare: func() *Docker { + return prepareCaseSuccess() + }, + expected: map[string]int64{ + "container_container10_health_status_healthy": 0, + "container_container10_health_status_none": 0, + "container_container10_health_status_not_running_unhealthy": 1, + "container_container10_health_status_starting": 0, + "container_container10_health_status_unhealthy": 0, + "container_container10_size_root_fs": 0, + "container_container10_size_rw": 0, + "container_container10_state_created": 0, + "container_container10_state_dead": 1, + "container_container10_state_exited": 0, + "container_container10_state_paused": 0, + "container_container10_state_removing": 0, + "container_container10_state_restarting": 0, + "container_container10_state_running": 0, + "container_container11_health_status_healthy": 0, + "container_container11_health_status_none": 0, + "container_container11_health_status_not_running_unhealthy": 0, + "container_container11_health_status_starting": 1, + "container_container11_health_status_unhealthy": 0, + "container_container11_size_root_fs": 0, + "container_container11_size_rw": 0, + "container_container11_state_created": 0, + "container_container11_state_dead": 0, + "container_container11_state_exited": 0, + "container_container11_state_paused": 0, + "container_container11_state_removing": 1, + "container_container11_state_restarting": 0, + "container_container11_state_running": 0, + "container_container12_health_status_healthy": 0, + "container_container12_health_status_none": 0, + "container_container12_health_status_not_running_unhealthy": 0, + "container_container12_health_status_starting": 1, + "container_container12_health_status_unhealthy": 0, + "container_container12_size_root_fs": 0, + "container_container12_size_rw": 0, + "container_container12_state_created": 0, + "container_container12_state_dead": 0, + "container_container12_state_exited": 1, + "container_container12_state_paused": 0, + "container_container12_state_removing": 0, + "container_container12_state_restarting": 0, + "container_container12_state_running": 0, + "container_container13_health_status_healthy": 0, + "container_container13_health_status_none": 0, + "container_container13_health_status_not_running_unhealthy": 0, + "container_container13_health_status_starting": 1, + "container_container13_health_status_unhealthy": 0, + "container_container13_size_root_fs": 0, + "container_container13_size_rw": 0, + "container_container13_state_created": 0, + "container_container13_state_dead": 0, + "container_container13_state_exited": 1, + "container_container13_state_paused": 0, + "container_container13_state_removing": 0, + "container_container13_state_restarting": 0, + "container_container13_state_running": 0, + "container_container14_health_status_healthy": 0, + "container_container14_health_status_none": 1, + "container_container14_health_status_not_running_unhealthy": 0, + "container_container14_health_status_starting": 0, + "container_container14_health_status_unhealthy": 0, + "container_container14_size_root_fs": 0, + "container_container14_size_rw": 0, + "container_container14_state_created": 0, + "container_container14_state_dead": 1, + "container_container14_state_exited": 0, + "container_container14_state_paused": 0, + "container_container14_state_removing": 0, + "container_container14_state_restarting": 0, + "container_container14_state_running": 0, + "container_container15_health_status_healthy": 0, + "container_container15_health_status_none": 1, + "container_container15_health_status_not_running_unhealthy": 0, + "container_container15_health_status_starting": 0, + "container_container15_health_status_unhealthy": 0, + "container_container15_size_root_fs": 0, + "container_container15_size_rw": 0, + "container_container15_state_created": 0, + "container_container15_state_dead": 1, + "container_container15_state_exited": 0, + "container_container15_state_paused": 0, + "container_container15_state_removing": 0, + "container_container15_state_restarting": 0, + "container_container15_state_running": 0, + "container_container16_health_status_healthy": 0, + "container_container16_health_status_none": 1, + "container_container16_health_status_not_running_unhealthy": 0, + "container_container16_health_status_starting": 0, + "container_container16_health_status_unhealthy": 0, + "container_container16_size_root_fs": 0, + "container_container16_size_rw": 0, + "container_container16_state_created": 0, + "container_container16_state_dead": 1, + "container_container16_state_exited": 0, + "container_container16_state_paused": 0, + "container_container16_state_removing": 0, + "container_container16_state_restarting": 0, + "container_container16_state_running": 0, + "container_container1_health_status_healthy": 1, + "container_container1_health_status_none": 0, + "container_container1_health_status_not_running_unhealthy": 0, + "container_container1_health_status_starting": 0, + "container_container1_health_status_unhealthy": 0, + "container_container1_size_root_fs": 0, + "container_container1_size_rw": 0, + "container_container1_state_created": 1, + "container_container1_state_dead": 0, + "container_container1_state_exited": 0, + "container_container1_state_paused": 0, + "container_container1_state_removing": 0, + "container_container1_state_restarting": 0, + "container_container1_state_running": 0, + "container_container2_health_status_healthy": 1, + "container_container2_health_status_none": 0, + "container_container2_health_status_not_running_unhealthy": 0, + "container_container2_health_status_starting": 0, + "container_container2_health_status_unhealthy": 0, + "container_container2_size_root_fs": 0, + "container_container2_size_rw": 0, + "container_container2_state_created": 0, + "container_container2_state_dead": 0, + "container_container2_state_exited": 0, + "container_container2_state_paused": 0, + "container_container2_state_removing": 0, + "container_container2_state_restarting": 0, + "container_container2_state_running": 1, + "container_container3_health_status_healthy": 1, + "container_container3_health_status_none": 0, + "container_container3_health_status_not_running_unhealthy": 0, + "container_container3_health_status_starting": 0, + "container_container3_health_status_unhealthy": 0, + "container_container3_size_root_fs": 0, + "container_container3_size_rw": 0, + "container_container3_state_created": 0, + "container_container3_state_dead": 0, + "container_container3_state_exited": 0, + "container_container3_state_paused": 0, + "container_container3_state_removing": 0, + "container_container3_state_restarting": 0, + "container_container3_state_running": 1, + "container_container4_health_status_healthy": 0, + "container_container4_health_status_none": 0, + "container_container4_health_status_not_running_unhealthy": 1, + "container_container4_health_status_starting": 0, + "container_container4_health_status_unhealthy": 0, + "container_container4_size_root_fs": 0, + "container_container4_size_rw": 0, + "container_container4_state_created": 1, + "container_container4_state_dead": 0, + "container_container4_state_exited": 0, + "container_container4_state_paused": 0, + "container_container4_state_removing": 0, + "container_container4_state_restarting": 0, + "container_container4_state_running": 0, + "container_container5_health_status_healthy": 0, + "container_container5_health_status_none": 0, + "container_container5_health_status_not_running_unhealthy": 0, + "container_container5_health_status_starting": 0, + "container_container5_health_status_unhealthy": 1, + "container_container5_size_root_fs": 0, + "container_container5_size_rw": 0, + "container_container5_state_created": 0, + "container_container5_state_dead": 0, + "container_container5_state_exited": 0, + "container_container5_state_paused": 0, + "container_container5_state_removing": 0, + "container_container5_state_restarting": 0, + "container_container5_state_running": 1, + "container_container6_health_status_healthy": 0, + "container_container6_health_status_none": 0, + "container_container6_health_status_not_running_unhealthy": 1, + "container_container6_health_status_starting": 0, + "container_container6_health_status_unhealthy": 0, + "container_container6_size_root_fs": 0, + "container_container6_size_rw": 0, + "container_container6_state_created": 0, + "container_container6_state_dead": 0, + "container_container6_state_exited": 0, + "container_container6_state_paused": 1, + "container_container6_state_removing": 0, + "container_container6_state_restarting": 0, + "container_container6_state_running": 0, + "container_container7_health_status_healthy": 0, + "container_container7_health_status_none": 0, + "container_container7_health_status_not_running_unhealthy": 1, + "container_container7_health_status_starting": 0, + "container_container7_health_status_unhealthy": 0, + "container_container7_size_root_fs": 0, + "container_container7_size_rw": 0, + "container_container7_state_created": 0, + "container_container7_state_dead": 0, + "container_container7_state_exited": 0, + "container_container7_state_paused": 0, + "container_container7_state_removing": 0, + "container_container7_state_restarting": 1, + "container_container7_state_running": 0, + "container_container8_health_status_healthy": 0, + "container_container8_health_status_none": 0, + "container_container8_health_status_not_running_unhealthy": 1, + "container_container8_health_status_starting": 0, + "container_container8_health_status_unhealthy": 0, + "container_container8_size_root_fs": 0, + "container_container8_size_rw": 0, + "container_container8_state_created": 0, + "container_container8_state_dead": 0, + "container_container8_state_exited": 0, + "container_container8_state_paused": 0, + "container_container8_state_removing": 1, + "container_container8_state_restarting": 0, + "container_container8_state_running": 0, + "container_container9_health_status_healthy": 0, + "container_container9_health_status_none": 0, + "container_container9_health_status_not_running_unhealthy": 1, + "container_container9_health_status_starting": 0, + "container_container9_health_status_unhealthy": 0, + "container_container9_size_root_fs": 0, + "container_container9_size_rw": 0, + "container_container9_state_created": 0, + "container_container9_state_dead": 0, + "container_container9_state_exited": 1, + "container_container9_state_paused": 0, + "container_container9_state_removing": 0, + "container_container9_state_restarting": 0, + "container_container9_state_running": 0, + "containers_health_status_healthy": 3, + "containers_health_status_none": 3, + "containers_health_status_not_running_unhealthy": 6, + "containers_health_status_starting": 3, + "containers_health_status_unhealthy": 1, + "containers_state_exited": 6, + "containers_state_paused": 5, + "containers_state_running": 4, + "images_active": 1, + "images_dangling": 1, + "images_size": 300, + }, + }, + "case success without container size": { + prepare: func() *Docker { + return prepareCaseSuccessWithoutContainerSize() + }, + expected: map[string]int64{ + "container_container10_health_status_healthy": 0, + "container_container10_health_status_none": 0, + "container_container10_health_status_not_running_unhealthy": 1, + "container_container10_health_status_starting": 0, + "container_container10_health_status_unhealthy": 0, + "container_container10_size_root_fs": 0, + "container_container10_size_rw": 0, + "container_container10_state_created": 0, + "container_container10_state_dead": 1, + "container_container10_state_exited": 0, + "container_container10_state_paused": 0, + "container_container10_state_removing": 0, + "container_container10_state_restarting": 0, + "container_container10_state_running": 0, + "container_container11_health_status_healthy": 0, + "container_container11_health_status_none": 0, + "container_container11_health_status_not_running_unhealthy": 0, + "container_container11_health_status_starting": 1, + "container_container11_health_status_unhealthy": 0, + "container_container11_size_root_fs": 0, + "container_container11_size_rw": 0, + "container_container11_state_created": 0, + "container_container11_state_dead": 0, + "container_container11_state_exited": 0, + "container_container11_state_paused": 0, + "container_container11_state_removing": 1, + "container_container11_state_restarting": 0, + "container_container11_state_running": 0, + "container_container12_health_status_healthy": 0, + "container_container12_health_status_none": 0, + "container_container12_health_status_not_running_unhealthy": 0, + "container_container12_health_status_starting": 1, + "container_container12_health_status_unhealthy": 0, + "container_container12_size_root_fs": 0, + "container_container12_size_rw": 0, + "container_container12_state_created": 0, + "container_container12_state_dead": 0, + "container_container12_state_exited": 1, + "container_container12_state_paused": 0, + "container_container12_state_removing": 0, + "container_container12_state_restarting": 0, + "container_container12_state_running": 0, + "container_container13_health_status_healthy": 0, + "container_container13_health_status_none": 0, + "container_container13_health_status_not_running_unhealthy": 0, + "container_container13_health_status_starting": 1, + "container_container13_health_status_unhealthy": 0, + "container_container13_size_root_fs": 0, + "container_container13_size_rw": 0, + "container_container13_state_created": 0, + "container_container13_state_dead": 0, + "container_container13_state_exited": 1, + "container_container13_state_paused": 0, + "container_container13_state_removing": 0, + "container_container13_state_restarting": 0, + "container_container13_state_running": 0, + "container_container14_health_status_healthy": 0, + "container_container14_health_status_none": 1, + "container_container14_health_status_not_running_unhealthy": 0, + "container_container14_health_status_starting": 0, + "container_container14_health_status_unhealthy": 0, + "container_container14_size_root_fs": 0, + "container_container14_size_rw": 0, + "container_container14_state_created": 0, + "container_container14_state_dead": 1, + "container_container14_state_exited": 0, + "container_container14_state_paused": 0, + "container_container14_state_removing": 0, + "container_container14_state_restarting": 0, + "container_container14_state_running": 0, + "container_container15_health_status_healthy": 0, + "container_container15_health_status_none": 1, + "container_container15_health_status_not_running_unhealthy": 0, + "container_container15_health_status_starting": 0, + "container_container15_health_status_unhealthy": 0, + "container_container15_size_root_fs": 0, + "container_container15_size_rw": 0, + "container_container15_state_created": 0, + "container_container15_state_dead": 1, + "container_container15_state_exited": 0, + "container_container15_state_paused": 0, + "container_container15_state_removing": 0, + "container_container15_state_restarting": 0, + "container_container15_state_running": 0, + "container_container16_health_status_healthy": 0, + "container_container16_health_status_none": 1, + "container_container16_health_status_not_running_unhealthy": 0, + "container_container16_health_status_starting": 0, + "container_container16_health_status_unhealthy": 0, + "container_container16_size_root_fs": 0, + "container_container16_size_rw": 0, + "container_container16_state_created": 0, + "container_container16_state_dead": 1, + "container_container16_state_exited": 0, + "container_container16_state_paused": 0, + "container_container16_state_removing": 0, + "container_container16_state_restarting": 0, + "container_container16_state_running": 0, + "container_container1_health_status_healthy": 1, + "container_container1_health_status_none": 0, + "container_container1_health_status_not_running_unhealthy": 0, + "container_container1_health_status_starting": 0, + "container_container1_health_status_unhealthy": 0, + "container_container1_size_root_fs": 0, + "container_container1_size_rw": 0, + "container_container1_state_created": 1, + "container_container1_state_dead": 0, + "container_container1_state_exited": 0, + "container_container1_state_paused": 0, + "container_container1_state_removing": 0, + "container_container1_state_restarting": 0, + "container_container1_state_running": 0, + "container_container2_health_status_healthy": 1, + "container_container2_health_status_none": 0, + "container_container2_health_status_not_running_unhealthy": 0, + "container_container2_health_status_starting": 0, + "container_container2_health_status_unhealthy": 0, + "container_container2_size_root_fs": 0, + "container_container2_size_rw": 0, + "container_container2_state_created": 0, + "container_container2_state_dead": 0, + "container_container2_state_exited": 0, + "container_container2_state_paused": 0, + "container_container2_state_removing": 0, + "container_container2_state_restarting": 0, + "container_container2_state_running": 1, + "container_container3_health_status_healthy": 1, + "container_container3_health_status_none": 0, + "container_container3_health_status_not_running_unhealthy": 0, + "container_container3_health_status_starting": 0, + "container_container3_health_status_unhealthy": 0, + "container_container3_size_root_fs": 0, + "container_container3_size_rw": 0, + "container_container3_state_created": 0, + "container_container3_state_dead": 0, + "container_container3_state_exited": 0, + "container_container3_state_paused": 0, + "container_container3_state_removing": 0, + "container_container3_state_restarting": 0, + "container_container3_state_running": 1, + "container_container4_health_status_healthy": 0, + "container_container4_health_status_none": 0, + "container_container4_health_status_not_running_unhealthy": 1, + "container_container4_health_status_starting": 0, + "container_container4_health_status_unhealthy": 0, + "container_container4_size_root_fs": 0, + "container_container4_size_rw": 0, + "container_container4_state_created": 1, + "container_container4_state_dead": 0, + "container_container4_state_exited": 0, + "container_container4_state_paused": 0, + "container_container4_state_removing": 0, + "container_container4_state_restarting": 0, + "container_container4_state_running": 0, + "container_container5_health_status_healthy": 0, + "container_container5_health_status_none": 0, + "container_container5_health_status_not_running_unhealthy": 0, + "container_container5_health_status_starting": 0, + "container_container5_health_status_unhealthy": 1, + "container_container5_size_root_fs": 0, + "container_container5_size_rw": 0, + "container_container5_state_created": 0, + "container_container5_state_dead": 0, + "container_container5_state_exited": 0, + "container_container5_state_paused": 0, + "container_container5_state_removing": 0, + "container_container5_state_restarting": 0, + "container_container5_state_running": 1, + "container_container6_health_status_healthy": 0, + "container_container6_health_status_none": 0, + "container_container6_health_status_not_running_unhealthy": 1, + "container_container6_health_status_starting": 0, + "container_container6_health_status_unhealthy": 0, + "container_container6_size_root_fs": 0, + "container_container6_size_rw": 0, + "container_container6_state_created": 0, + "container_container6_state_dead": 0, + "container_container6_state_exited": 0, + "container_container6_state_paused": 1, + "container_container6_state_removing": 0, + "container_container6_state_restarting": 0, + "container_container6_state_running": 0, + "container_container7_health_status_healthy": 0, + "container_container7_health_status_none": 0, + "container_container7_health_status_not_running_unhealthy": 1, + "container_container7_health_status_starting": 0, + "container_container7_health_status_unhealthy": 0, + "container_container7_size_root_fs": 0, + "container_container7_size_rw": 0, + "container_container7_state_created": 0, + "container_container7_state_dead": 0, + "container_container7_state_exited": 0, + "container_container7_state_paused": 0, + "container_container7_state_removing": 0, + "container_container7_state_restarting": 1, + "container_container7_state_running": 0, + "container_container8_health_status_healthy": 0, + "container_container8_health_status_none": 0, + "container_container8_health_status_not_running_unhealthy": 1, + "container_container8_health_status_starting": 0, + "container_container8_health_status_unhealthy": 0, + "container_container8_size_root_fs": 0, + "container_container8_size_rw": 0, + "container_container8_state_created": 0, + "container_container8_state_dead": 0, + "container_container8_state_exited": 0, + "container_container8_state_paused": 0, + "container_container8_state_removing": 1, + "container_container8_state_restarting": 0, + "container_container8_state_running": 0, + "container_container9_health_status_healthy": 0, + "container_container9_health_status_none": 0, + "container_container9_health_status_not_running_unhealthy": 1, + "container_container9_health_status_starting": 0, + "container_container9_health_status_unhealthy": 0, + "container_container9_size_root_fs": 0, + "container_container9_size_rw": 0, + "container_container9_state_created": 0, + "container_container9_state_dead": 0, + "container_container9_state_exited": 1, + "container_container9_state_paused": 0, + "container_container9_state_removing": 0, + "container_container9_state_restarting": 0, + "container_container9_state_running": 0, + "containers_health_status_healthy": 3, + "containers_health_status_none": 3, + "containers_health_status_not_running_unhealthy": 6, + "containers_health_status_starting": 3, + "containers_health_status_unhealthy": 1, + "containers_state_exited": 6, + "containers_state_paused": 5, + "containers_state_running": 4, + "images_active": 1, + "images_dangling": 1, + "images_size": 300, + }, + }, + "fail on case err on Info()": { + prepare: func() *Docker { + return prepareCaseErrOnInfo() + }, + expected: nil, + }, + "fail on case err on ImageList()": { + prepare: func() *Docker { + return prepareCaseErrOnImageList() + }, + expected: nil, + }, + "fail on case err on ContainerList()": { + prepare: func() *Docker { + return prepareCaseErrOnContainerList() + }, + expected: nil, + }, + "fail on case err on creating Docker client": { + prepare: func() *Docker { + return prepareCaseErrCreatingClient() + }, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + d := test.prepare() + + require.True(t, d.Init()) + + mx := d.Collect() + + require.Equal(t, test.expected, mx) + + if d.client != nil { + m, ok := d.client.(*mockClient) + require.True(t, ok) + require.True(t, m.negotiateAPIVersionCalled) + } + + }) + } +} + +func prepareCaseSuccess() *Docker { + d := New() + d.CollectContainerSize = true + d.newClient = prepareNewClientFunc(&mockClient{}) + return d +} + +func prepareCaseSuccessWithoutContainerSize() *Docker { + d := New() + d.CollectContainerSize = false + d.newClient = prepareNewClientFunc(&mockClient{}) + return d +} + +func prepareCaseErrOnInfo() *Docker { + d := New() + d.newClient = prepareNewClientFunc(&mockClient{errOnInfo: true}) + return d +} + +func prepareCaseErrOnImageList() *Docker { + d := New() + d.newClient = prepareNewClientFunc(&mockClient{errOnImageList: true}) + return d +} + +func prepareCaseErrOnContainerList() *Docker { + d := New() + d.newClient = prepareNewClientFunc(&mockClient{errOnContainerList: true}) + return d +} + +func prepareCaseErrCreatingClient() *Docker { + d := New() + d.newClient = prepareNewClientFunc(nil) + return d +} + +func prepareNewClientFunc(m *mockClient) func(_ Config) (dockerClient, error) { + if m == nil { + return func(_ Config) (dockerClient, error) { return nil, errors.New("mock.newClient() error") } + } + return func(_ Config) (dockerClient, error) { return m, nil } +} + +type mockClient struct { + errOnInfo bool + errOnImageList bool + errOnContainerList bool + negotiateAPIVersionCalled bool + closeCalled bool +} + +func (m *mockClient) Info(_ context.Context) (types.Info, error) { + if m.errOnInfo { + return types.Info{}, errors.New("mockClient.Info() error") + } + + return types.Info{ + ContainersRunning: 4, + ContainersPaused: 5, + ContainersStopped: 6, + }, nil +} + +func (m *mockClient) ContainerList(_ context.Context, opts types.ContainerListOptions) ([]types.Container, error) { + if m.errOnContainerList { + return nil, errors.New("mockClient.ContainerList() error") + } + + v := opts.Filters.Get("health") + + if len(v) == 0 { + return nil, errors.New("mockClient.ContainerList() error (expect 'health' filter)") + } + + var containers []types.Container + + switch v[0] { + case types.Healthy: + containers = []types.Container{ + {Names: []string{"container1"}, State: "created", Image: "example/example:v1"}, + {Names: []string{"container2"}, State: "running", Image: "example/example:v1"}, + {Names: []string{"container3"}, State: "running", Image: "example/example:v1"}, + } + case types.Unhealthy: + containers = []types.Container{ + {Names: []string{"container4"}, State: "created", Image: "example/example:v2"}, + {Names: []string{"container5"}, State: "running", Image: "example/example:v2"}, + {Names: []string{"container6"}, State: "paused", Image: "example/example:v2"}, + {Names: []string{"container7"}, State: "restarting", Image: "example/example:v2"}, + {Names: []string{"container8"}, State: "removing", Image: "example/example:v2"}, + {Names: []string{"container9"}, State: "exited", Image: "example/example:v2"}, + {Names: []string{"container10"}, State: "dead", Image: "example/example:v2"}, + } + case types.Starting: + containers = []types.Container{ + {Names: []string{"container11"}, State: "removing", Image: "example/example:v3"}, + {Names: []string{"container12"}, State: "exited", Image: "example/example:v3"}, + {Names: []string{"container13"}, State: "exited", Image: "example/example:v3"}, + } + case types.NoHealthcheck: + containers = []types.Container{ + {Names: []string{"container14"}, State: "dead", Image: "example/example:v4"}, + {Names: []string{"container15"}, State: "dead", Image: "example/example:v4"}, + {Names: []string{"container16"}, State: "dead", Image: "example/example:v4"}, + } + } + + if opts.Size { + for _, c := range containers { + c.SizeRw = 123 + c.SizeRootFs = 321 + } + } + + return containers, nil +} + +func (m *mockClient) ImageList(_ context.Context, _ types.ImageListOptions) ([]types.ImageSummary, error) { + if m.errOnImageList { + return nil, errors.New("mockClient.ImageList() error") + } + + return []types.ImageSummary{ + { + Containers: 0, + Size: 100, + }, + { + Containers: 1, + Size: 200, + }, + }, nil +} + +func (m *mockClient) NegotiateAPIVersion(_ context.Context) { + m.negotiateAPIVersionCalled = true +} + +func (m *mockClient) Close() error { + m.closeCalled = true + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/docker/integrations/docker.md b/src/go/collectors/go.d.plugin/modules/docker/integrations/docker.md new file mode 100644 index 00000000000000..b62ebf2fd7a88f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/integrations/docker.md @@ -0,0 +1,208 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/docker/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/docker/metadata.yaml" +sidebar_label: "Docker" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Docker + + +<img src="https://netdata.cloud/img/docker.svg" width="150"/> + + +Plugin: go.d.plugin +Module: docker + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Docker containers state, health status and more. + + +It connects to the Docker instance via a TCP or UNIX socket and executes the following commands: + +- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo). +- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList). +- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +Requires netdata user to be in the docker group. + +### Default Behavior + +#### Auto-Detection + +It discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Enabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine. + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Docker instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| docker.containers_state | running, paused, stopped | containers | +| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers | +| docker.images | active, dangling | images | +| docker.images_size | size | bytes | + +### Per container + +Metrics related to containers. Each container provides its own set of the following metrics. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| container_name | The container's name | +| image | The image name the container uses | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| docker.container_state | running, paused, exited, created, restarting, removing, dead | state | +| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status | +| docker.container_writeable_layer_size | writeable_layer | size | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/docker.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/docker.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes | +| timeout | Request timeout in seconds. | 1 | no | +| collect_container_size | Whether to collect container writable layer size. | no | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +```yaml +jobs: + - name: local + address: 'unix:///var/run/docker.sock' + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'unix:///var/run/docker.sock' + + - name: remote + address: 'tcp://203.0.113.10:2375' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m docker + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/docker/metadata.yaml b/src/go/collectors/go.d.plugin/modules/docker/metadata.yaml new file mode 100644 index 00000000000000..408e84a4582660 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker/metadata.yaml @@ -0,0 +1,190 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-docker + plugin_name: go.d.plugin + module_name: docker + alternative_monitored_instances: [] + monitored_instance: + name: Docker + link: https://www.docker.com/ + categories: + - data-collection.containers-and-vms + icon_filename: docker.svg + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - container + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Docker containers state, health status and more. + method_description: | + It connects to the Docker instance via a TCP or UNIX socket and executes the following commands: + + - [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo). + - [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList). + - [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList). + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: Requires netdata user to be in the docker group. + default_behavior: + auto_detection: + description: | + It discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`. + limits: + description: "" + performance_impact: + description: | + Enabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine. + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/docker.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: 'Docker daemon''s listening address. When using a TCP socket, the format is: tcp://[ip]:[port]' + default_value: unix:///var/run/docker.sock + required: true + - name: timeout + description: Request timeout in seconds. + default_value: 1 + required: false + - name: collect_container_size + description: Whether to collect container writable layer size. + default_value: "no" + required: false + examples: + folding: + enabled: true + title: Config + list: + - name: Basic + description: An example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + address: 'unix:///var/run/docker.sock' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 'unix:///var/run/docker.sock' + + - name: remote + address: 'tcp://203.0.113.10:2375' + troubleshooting: + problems: + list: [] + alerts: + - name: docker_container_unhealthy + metric: docker.container_health_status + info: ${label:container_name} docker container health status is unhealthy + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: docker.containers_state + description: Total number of Docker containers in various states + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: paused + - name: stopped + - name: docker.containers_health_status + description: Total number of Docker containers in various health states + unit: containers + chart_type: line + dimensions: + - name: healthy + - name: unhealthy + - name: not_running_unhealthy + - name: starting + - name: no_healthcheck + - name: docker.images + description: Total number of Docker images in various states + unit: images + chart_type: stacked + dimensions: + - name: active + - name: dangling + - name: docker.images_size + description: Total size of all Docker images + unit: bytes + chart_type: line + dimensions: + - name: size + - name: container + description: Metrics related to containers. Each container provides its own set of the following metrics. + labels: + - name: container_name + description: The container's name + - name: image + description: The image name the container uses + metrics: + - name: docker.container_state + description: Docker container state + unit: state + chart_type: line + dimensions: + - name: running + - name: paused + - name: exited + - name: created + - name: restarting + - name: removing + - name: dead + - name: docker.container_health_status + description: Docker container health status + unit: status + chart_type: line + dimensions: + - name: healthy + - name: unhealthy + - name: not_running_unhealthy + - name: starting + - name: no_healthcheck + - name: docker.container_writeable_layer_size + description: Docker container writable layer size + unit: size + chart_type: line + dimensions: + - name: writeable_layer diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/README.md b/src/go/collectors/go.d.plugin/modules/docker_engine/README.md new file mode 120000 index 00000000000000..f00a4cd97925bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/README.md @@ -0,0 +1 @@ +integrations/docker_engine.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/charts.go b/src/go/collectors/go.d.plugin/modules/docker_engine/charts.go new file mode 100644 index 00000000000000..056cad0a138e0c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/charts.go @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "engine_daemon_container_actions", + Title: "Container Actions", + Units: "actions/s", + Fam: "containers", + Ctx: "docker_engine.engine_daemon_container_actions", + Type: module.Stacked, + Dims: Dims{ + {ID: "container_actions_changes", Name: "changes", Algo: module.Incremental}, + {ID: "container_actions_commit", Name: "commit", Algo: module.Incremental}, + {ID: "container_actions_create", Name: "create", Algo: module.Incremental}, + {ID: "container_actions_delete", Name: "delete", Algo: module.Incremental}, + {ID: "container_actions_start", Name: "start", Algo: module.Incremental}, + }, + }, + { + ID: "engine_daemon_container_states_containers", + Title: "Containers In Various States", + Units: "containers", + Fam: "containers", + Ctx: "docker_engine.engine_daemon_container_states_containers", + Type: module.Stacked, + Dims: Dims{ + {ID: "container_states_running", Name: "running"}, + {ID: "container_states_paused", Name: "paused"}, + {ID: "container_states_stopped", Name: "stopped"}, + }, + }, + { + ID: "builder_builds_failed_total", + Title: "Builder Builds Fails By Reason", + Units: "fails/s", + Fam: "builder", + Ctx: "docker_engine.builder_builds_failed_total", + Type: module.Stacked, + Dims: Dims{ + {ID: "builder_fails_build_canceled", Name: "build_canceled", Algo: module.Incremental}, + {ID: "builder_fails_build_target_not_reachable_error", Name: "build_target_not_reachable_error", Algo: module.Incremental}, + {ID: "builder_fails_command_not_supported_error", Name: "command_not_supported_error", Algo: module.Incremental}, + {ID: "builder_fails_dockerfile_empty_error", Name: "dockerfile_empty_error", Algo: module.Incremental}, + {ID: "builder_fails_dockerfile_syntax_error", Name: "dockerfile_syntax_error", Algo: module.Incremental}, + {ID: "builder_fails_error_processing_commands_error", Name: "error_processing_commands_error", Algo: module.Incremental}, + {ID: "builder_fails_missing_onbuild_arguments_error", Name: "missing_onbuild_arguments_error", Algo: module.Incremental}, + {ID: "builder_fails_unknown_instruction_error", Name: "unknown_instruction_error", Algo: module.Incremental}, + }, + }, + { + ID: "engine_daemon_health_checks_failed_total", + Title: "Health Checks", + Units: "events/s", + Fam: "health checks", + Ctx: "docker_engine.engine_daemon_health_checks_failed_total", + Dims: Dims{ + {ID: "health_checks_failed", Name: "fails", Algo: module.Incremental}, + }, + }, +} + +var swarmManagerCharts = Charts{ + { + ID: "swarm_manager_leader", + Title: "Swarm Manager Leader", + Units: "bool", + Fam: "swarm", + Ctx: "docker_engine.swarm_manager_leader", + Dims: Dims{ + {ID: "swarm_manager_leader", Name: "is_leader"}, + }, + }, + { + ID: "swarm_manager_object_store", + Title: "Swarm Manager Object Store", + Units: "objects", + Fam: "swarm", + Type: module.Stacked, + Ctx: "docker_engine.swarm_manager_object_store", + Dims: Dims{ + {ID: "swarm_manager_nodes_total", Name: "nodes"}, + {ID: "swarm_manager_services_total", Name: "services"}, + {ID: "swarm_manager_tasks_total", Name: "tasks"}, + {ID: "swarm_manager_networks_total", Name: "networks"}, + {ID: "swarm_manager_secrets_total", Name: "secrets"}, + {ID: "swarm_manager_configs_total", Name: "configs"}, + }, + }, + { + ID: "swarm_manager_nodes_per_state", + Title: "Swarm Manager Nodes Per State", + Units: "nodes", + Fam: "swarm", + Ctx: "docker_engine.swarm_manager_nodes_per_state", + Type: module.Stacked, + Dims: Dims{ + {ID: "swarm_manager_nodes_state_ready", Name: "ready"}, + {ID: "swarm_manager_nodes_state_down", Name: "down"}, + {ID: "swarm_manager_nodes_state_unknown", Name: "unknown"}, + {ID: "swarm_manager_nodes_state_disconnected", Name: "disconnected"}, + }, + }, + { + ID: "swarm_manager_tasks_per_state", + Title: "Swarm Manager Tasks Per State", + Units: "tasks", + Fam: "swarm", + Ctx: "docker_engine.swarm_manager_tasks_per_state", + Type: module.Stacked, + Dims: Dims{ + {ID: "swarm_manager_tasks_state_running", Name: "running"}, + {ID: "swarm_manager_tasks_state_failed", Name: "failed"}, + {ID: "swarm_manager_tasks_state_ready", Name: "ready"}, + {ID: "swarm_manager_tasks_state_rejected", Name: "rejected"}, + {ID: "swarm_manager_tasks_state_starting", Name: "starting"}, + {ID: "swarm_manager_tasks_state_shutdown", Name: "shutdown"}, + {ID: "swarm_manager_tasks_state_new", Name: "new"}, + {ID: "swarm_manager_tasks_state_orphaned", Name: "orphaned"}, + {ID: "swarm_manager_tasks_state_preparing", Name: "preparing"}, + {ID: "swarm_manager_tasks_state_pending", Name: "pending"}, + {ID: "swarm_manager_tasks_state_complete", Name: "complete"}, + {ID: "swarm_manager_tasks_state_remove", Name: "remove"}, + {ID: "swarm_manager_tasks_state_accepted", Name: "accepted"}, + {ID: "swarm_manager_tasks_state_assigned", Name: "assigned"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/collect.go b/src/go/collectors/go.d.plugin/modules/docker_engine/collect.go new file mode 100644 index 00000000000000..c259f3ae10a4cd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/collect.go @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func isDockerEngineMetrics(pms prometheus.Series) bool { + return pms.FindByName("engine_daemon_engine_info").Len() > 0 +} + +func (de *DockerEngine) collect() (map[string]int64, error) { + pms, err := de.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if !isDockerEngineMetrics(pms) { + return nil, fmt.Errorf("'%s' returned non docker engine metrics", de.URL) + } + + mx := de.collectMetrics(pms) + return stm.ToMap(mx), nil +} + +func (de *DockerEngine) collectMetrics(pms prometheus.Series) metrics { + var mx metrics + collectHealthChecks(&mx, pms) + collectContainerActions(&mx, pms) + collectBuilderBuildsFails(&mx, pms) + if hasContainerStates(pms) { + de.hasContainerStates = true + mx.Container.States = &containerStates{} + collectContainerStates(&mx, pms) + } + if isSwarmManager(pms) { + de.isSwarmManager = true + mx.SwarmManager = &swarmManager{} + collectSwarmManager(&mx, pms) + } + return mx +} + +func isSwarmManager(pms prometheus.Series) bool { + return pms.FindByName("swarm_node_manager").Max() == 1 +} + +func hasContainerStates(pms prometheus.Series) bool { + return pms.FindByName("engine_daemon_container_states_containers").Len() > 0 +} + +func collectHealthChecks(mx *metrics, raw prometheus.Series) { + v := raw.FindByName("engine_daemon_health_checks_failed_total").Max() + mx.HealthChecks.Failed = v +} + +func collectContainerActions(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName("engine_daemon_container_actions_seconds_count") { + action := metric.Labels.Get("action") + if action == "" { + continue + } + + v := metric.Value + switch action { + default: + case "changes": + mx.Container.Actions.Changes = v + case "commit": + mx.Container.Actions.Commit = v + case "create": + mx.Container.Actions.Create = v + case "delete": + mx.Container.Actions.Delete = v + case "start": + mx.Container.Actions.Start = v + } + } +} + +func collectContainerStates(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName("engine_daemon_container_states_containers") { + state := metric.Labels.Get("state") + if state == "" { + continue + } + + v := metric.Value + switch state { + default: + case "paused": + mx.Container.States.Paused = v + case "running": + mx.Container.States.Running = v + case "stopped": + mx.Container.States.Stopped = v + } + } +} + +func collectBuilderBuildsFails(mx *metrics, raw prometheus.Series) { + for _, metric := range raw.FindByName("builder_builds_failed_total") { + reason := metric.Labels.Get("reason") + if reason == "" { + continue + } + + v := metric.Value + switch reason { + default: + case "build_canceled": + mx.Builder.FailsByReason.BuildCanceled = v + case "build_target_not_reachable_error": + mx.Builder.FailsByReason.BuildTargetNotReachableError = v + case "command_not_supported_error": + mx.Builder.FailsByReason.CommandNotSupportedError = v + case "dockerfile_empty_error": + mx.Builder.FailsByReason.DockerfileEmptyError = v + case "dockerfile_syntax_error": + mx.Builder.FailsByReason.DockerfileSyntaxError = v + case "error_processing_commands_error": + mx.Builder.FailsByReason.ErrorProcessingCommandsError = v + case "missing_onbuild_arguments_error": + mx.Builder.FailsByReason.MissingOnbuildArgumentsError = v + case "unknown_instruction_error": + mx.Builder.FailsByReason.UnknownInstructionError = v + } + } +} + +func collectSwarmManager(mx *metrics, raw prometheus.Series) { + v := raw.FindByName("swarm_manager_configs_total").Max() + mx.SwarmManager.Configs = v + + v = raw.FindByName("swarm_manager_networks_total").Max() + mx.SwarmManager.Networks = v + + v = raw.FindByName("swarm_manager_secrets_total").Max() + mx.SwarmManager.Secrets = v + + v = raw.FindByName("swarm_manager_services_total").Max() + mx.SwarmManager.Services = v + + v = raw.FindByName("swarm_manager_leader").Max() + mx.SwarmManager.IsLeader = v + + for _, metric := range raw.FindByName("swarm_manager_nodes") { + state := metric.Labels.Get("state") + if state == "" { + continue + } + + v := metric.Value + switch state { + default: + case "disconnected": + mx.SwarmManager.Nodes.PerState.Disconnected = v + case "down": + mx.SwarmManager.Nodes.PerState.Down = v + case "ready": + mx.SwarmManager.Nodes.PerState.Ready = v + case "unknown": + mx.SwarmManager.Nodes.PerState.Unknown = v + } + mx.SwarmManager.Nodes.Total += v + } + + for _, metric := range raw.FindByName("swarm_manager_tasks_total") { + state := metric.Labels.Get("state") + if state == "" { + continue + } + + v := metric.Value + switch state { + default: + case "accepted": + mx.SwarmManager.Tasks.PerState.Accepted = v + case "assigned": + mx.SwarmManager.Tasks.PerState.Assigned = v + case "complete": + mx.SwarmManager.Tasks.PerState.Complete = v + case "failed": + mx.SwarmManager.Tasks.PerState.Failed = v + case "new": + mx.SwarmManager.Tasks.PerState.New = v + case "orphaned": + mx.SwarmManager.Tasks.PerState.Orphaned = v + case "pending": + mx.SwarmManager.Tasks.PerState.Pending = v + case "preparing": + mx.SwarmManager.Tasks.PerState.Preparing = v + case "ready": + mx.SwarmManager.Tasks.PerState.Ready = v + case "rejected": + mx.SwarmManager.Tasks.PerState.Rejected = v + case "remove": + mx.SwarmManager.Tasks.PerState.Remove = v + case "running": + mx.SwarmManager.Tasks.PerState.Running = v + case "shutdown": + mx.SwarmManager.Tasks.PerState.Shutdown = v + case "starting": + mx.SwarmManager.Tasks.PerState.Starting = v + } + mx.SwarmManager.Tasks.Total += v + } +} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/config_schema.json b/src/go/collectors/go.d.plugin/modules/docker_engine/config_schema.json new file mode 100644 index 00000000000000..2b85056106e421 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/docker_engine job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine.go b/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine.go new file mode 100644 index 00000000000000..7c69daa29cbb57 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("docker_engine", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *DockerEngine { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9323/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + return &DockerEngine{ + Config: config, + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + } + DockerEngine struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + isSwarmManager bool + hasContainerStates bool + } +) + +func (de DockerEngine) validateConfig() error { + if de.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (de *DockerEngine) initClient() error { + client, err := web.NewHTTPClient(de.Client) + if err != nil { + return err + } + + de.prom = prometheus.New(client, de.Request) + return nil +} + +func (de *DockerEngine) Init() bool { + if err := de.validateConfig(); err != nil { + de.Errorf("config validation: %v", err) + return false + } + if err := de.initClient(); err != nil { + de.Errorf("client initialization: %v", err) + return false + } + return true +} + +func (de *DockerEngine) Check() bool { + return len(de.Collect()) > 0 +} + +func (de DockerEngine) Charts() *Charts { + cs := charts.Copy() + if !de.hasContainerStates { + if err := cs.Remove("engine_daemon_container_states_containers"); err != nil { + de.Warning(err) + } + } + + if !de.isSwarmManager { + return cs + } + + if err := cs.Add(*swarmManagerCharts.Copy()...); err != nil { + de.Warning(err) + } + return cs +} + +func (de *DockerEngine) Collect() map[string]int64 { + mx, err := de.collect() + if err != nil { + de.Error(err) + return nil + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (DockerEngine) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine_test.go b/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine_test.go new file mode 100644 index 00000000000000..7ffc1ce5e4eb12 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/docker_engine_test.go @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + metricsNonDockerEngine, _ = os.ReadFile("testdata/non-docker-engine.txt") + metricsV17050CE, _ = os.ReadFile("testdata/v17.05.0-ce.txt") + metricsV18093CE, _ = os.ReadFile("testdata/v18.09.3-ce.txt") + metricsV18093CESwarm, _ = os.ReadFile("testdata/v18.09.3-ce-swarm.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, metricsNonDockerEngine) + assert.NotNil(t, metricsV17050CE) + assert.NotNil(t, metricsV18093CE) + assert.NotNil(t, metricsV18093CESwarm) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestDockerEngine_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestDockerEngine_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default": { + config: New().Config, + }, + "empty URL": { + config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + wantFail: true, + }, + "nonexistent TLS CA": { + config: Config{HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:9323/metrics"}, + Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dockerEngine := New() + dockerEngine.Config = test.config + + if test.wantFail { + assert.False(t, dockerEngine.Init()) + } else { + assert.True(t, dockerEngine.Init()) + } + }) + } +} + +func TestDockerEngine_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*DockerEngine, *httptest.Server) + wantFail bool + }{ + "v17.05.0-ce": {prepare: prepareClientServerV17050CE}, + "v18.09.3-ce": {prepare: prepareClientServerV18093CE}, + "v18.09.3-ce-swarm": {prepare: prepareClientServerV18093CESwarm}, + "non docker engine": {prepare: prepareClientServerNonDockerEngine, wantFail: true}, + "invalid data": {prepare: prepareClientServerInvalidData, wantFail: true}, + "404": {prepare: prepareClientServer404, wantFail: true}, + "connection refused": {prepare: prepareClientServerConnectionRefused, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dockerEngine, srv := test.prepare(t) + defer srv.Close() + + if test.wantFail { + assert.False(t, dockerEngine.Check()) + } else { + assert.True(t, dockerEngine.Check()) + } + }) + } +} + +func TestDockerEngine_Charts(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*DockerEngine, *httptest.Server) + wantNumCharts int + }{ + "v17.05.0-ce": {prepare: prepareClientServerV17050CE, wantNumCharts: len(charts) - 1}, // no container states chart + "v18.09.3-ce": {prepare: prepareClientServerV18093CE, wantNumCharts: len(charts)}, + "v18.09.3-ce-swarm": {prepare: prepareClientServerV18093CESwarm, wantNumCharts: len(charts) + len(swarmManagerCharts)}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dockerEngine, srv := test.prepare(t) + defer srv.Close() + + require.True(t, dockerEngine.Check()) + assert.Len(t, *dockerEngine.Charts(), test.wantNumCharts) + }) + } +} + +func TestDockerEngine_Collect_ReturnsNilOnErrors(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*DockerEngine, *httptest.Server) + }{ + "non docker engine": {prepare: prepareClientServerNonDockerEngine}, + "invalid data": {prepare: prepareClientServerInvalidData}, + "404": {prepare: prepareClientServer404}, + "connection refused": {prepare: prepareClientServerConnectionRefused}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dockerEngine, srv := test.prepare(t) + defer srv.Close() + + assert.Nil(t, dockerEngine.Collect()) + }) + } +} + +func TestDockerEngine_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*DockerEngine, *httptest.Server) + expected map[string]int64 + }{ + "v17.05.0-ce": { + prepare: prepareClientServerV17050CE, + expected: map[string]int64{ + "builder_fails_build_canceled": 1, + "builder_fails_build_target_not_reachable_error": 2, + "builder_fails_command_not_supported_error": 3, + "builder_fails_dockerfile_empty_error": 4, + "builder_fails_dockerfile_syntax_error": 5, + "builder_fails_error_processing_commands_error": 6, + "builder_fails_missing_onbuild_arguments_error": 7, + "builder_fails_unknown_instruction_error": 8, + "container_actions_changes": 1, + "container_actions_commit": 1, + "container_actions_create": 1, + "container_actions_delete": 1, + "container_actions_start": 1, + "health_checks_failed": 33, + }, + }, + "v18.09.3-ce": { + prepare: prepareClientServerV18093CE, + expected: map[string]int64{ + "builder_fails_build_canceled": 1, + "builder_fails_build_target_not_reachable_error": 2, + "builder_fails_command_not_supported_error": 3, + "builder_fails_dockerfile_empty_error": 4, + "builder_fails_dockerfile_syntax_error": 5, + "builder_fails_error_processing_commands_error": 6, + "builder_fails_missing_onbuild_arguments_error": 7, + "builder_fails_unknown_instruction_error": 8, + "container_actions_changes": 1, + "container_actions_commit": 1, + "container_actions_create": 1, + "container_actions_delete": 1, + "container_actions_start": 1, + "container_states_paused": 11, + "container_states_running": 12, + "container_states_stopped": 13, + "health_checks_failed": 33, + }, + }, + "v18.09.3-ce-swarm": { + prepare: prepareClientServerV18093CESwarm, + expected: map[string]int64{ + "builder_fails_build_canceled": 1, + "builder_fails_build_target_not_reachable_error": 2, + "builder_fails_command_not_supported_error": 3, + "builder_fails_dockerfile_empty_error": 4, + "builder_fails_dockerfile_syntax_error": 5, + "builder_fails_error_processing_commands_error": 6, + "builder_fails_missing_onbuild_arguments_error": 7, + "builder_fails_unknown_instruction_error": 8, + "container_actions_changes": 1, + "container_actions_commit": 1, + "container_actions_create": 1, + "container_actions_delete": 1, + "container_actions_start": 1, + "container_states_paused": 11, + "container_states_running": 12, + "container_states_stopped": 13, + "health_checks_failed": 33, + "swarm_manager_configs_total": 1, + "swarm_manager_leader": 1, + "swarm_manager_networks_total": 3, + "swarm_manager_nodes_state_disconnected": 1, + "swarm_manager_nodes_state_down": 2, + "swarm_manager_nodes_state_ready": 3, + "swarm_manager_nodes_state_unknown": 4, + "swarm_manager_nodes_total": 10, + "swarm_manager_secrets_total": 1, + "swarm_manager_services_total": 1, + "swarm_manager_tasks_state_accepted": 1, + "swarm_manager_tasks_state_assigned": 2, + "swarm_manager_tasks_state_complete": 3, + "swarm_manager_tasks_state_failed": 4, + "swarm_manager_tasks_state_new": 5, + "swarm_manager_tasks_state_orphaned": 6, + "swarm_manager_tasks_state_pending": 7, + "swarm_manager_tasks_state_preparing": 8, + "swarm_manager_tasks_state_ready": 9, + "swarm_manager_tasks_state_rejected": 10, + "swarm_manager_tasks_state_remove": 11, + "swarm_manager_tasks_state_running": 12, + "swarm_manager_tasks_state_shutdown": 13, + "swarm_manager_tasks_state_starting": 14, + "swarm_manager_tasks_total": 105, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pulsar, srv := test.prepare(t) + defer srv.Close() + + for i := 0; i < 10; i++ { + _ = pulsar.Collect() + } + collected := pulsar.Collect() + + require.NotNil(t, collected) + require.Equal(t, test.expected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dockerEngine *DockerEngine, collected map[string]int64) { + t.Helper() + for _, chart := range *dockerEngine.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsV17050CE) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServerV18093CE(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsV18093CE) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServerV18093CESwarm(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsV18093CESwarm) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServerNonDockerEngine(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsNonDockerEngine) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServerInvalidData(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServer404(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + dockerEngine := New() + dockerEngine.URL = srv.URL + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} + +func prepareClientServerConnectionRefused(t *testing.T) (*DockerEngine, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(nil) + + dockerEngine := New() + dockerEngine.URL = "http://127.0.0.1:38001/metrics" + require.True(t, dockerEngine.Init()) + + return dockerEngine, srv +} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/integrations/docker_engine.md b/src/go/collectors/go.d.plugin/modules/docker_engine/integrations/docker_engine.md new file mode 100644 index 00000000000000..78b46866dc4d39 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/integrations/docker_engine.md @@ -0,0 +1,229 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/docker_engine/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/docker_engine/metadata.yaml" +sidebar_label: "Docker Engine" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Docker Engine + + +<img src="https://netdata.cloud/img/docker.svg" width="150"/> + + +Plugin: go.d.plugin +Module: docker_engine + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and health of Docker Engine and Docker Swarm. + + +The [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Docker Engine instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s | +| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers | +| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s | +| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s | +| docker_engine.swarm_manager_leader | is_leader | bool | +| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects | +| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes | +| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable built-in Prometheus exporter + +To enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/docker_engine.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/docker_engine.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9323/metrics | yes | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| timeout | HTTP request timeout. | 1 | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9323/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9323/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Configuration with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9323/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9323/metrics + + - name: remote + url: http://192.0.2.1:9323/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m docker_engine + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/metadata.yaml b/src/go/collectors/go.d.plugin/modules/docker_engine/metadata.yaml new file mode 100644 index 00000000000000..8f81d4e3531cb3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/metadata.yaml @@ -0,0 +1,263 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-docker_engine + plugin_name: go.d.plugin + module_name: docker_engine + alternative_monitored_instances: [] + monitored_instance: + name: Docker Engine + link: https://docs.docker.com/engine/ + categories: + - data-collection.containers-and-vms + icon_filename: docker.svg + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - docker + - container + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the activity and health of Docker Engine and Docker Swarm. + method_description: | + The [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + It discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable built-in Prometheus exporter + description: | + To enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker). + configuration: + file: + name: go.d/docker_engine.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9323/metrics + required: true + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: "no" + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: "no" + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:9323/metrics + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9323/metrics + username: username + password: password + - name: HTTPS with self-signed certificate + description: Configuration with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: http://127.0.0.1:9323/metrics + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9323/metrics + + - name: remote + url: http://192.0.2.1:9323/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: docker_engine.engine_daemon_container_actions + description: Container Actions + unit: actions/s + chart_type: stacked + dimensions: + - name: changes + - name: commit + - name: create + - name: delete + - name: start + - name: docker_engine.engine_daemon_container_states_containers + description: Containers In Various States + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: paused + - name: stopped + - name: docker_engine.builder_builds_failed_total + description: Builder Builds Fails By Reason + unit: fails/s + chart_type: stacked + dimensions: + - name: build_canceled + - name: build_target_not_reachable_error + - name: command_not_supported_error + - name: dockerfile_empty_error + - name: dockerfile_syntax_error + - name: error_processing_commands_error + - name: missing_onbuild_arguments_error + - name: unknown_instruction_error + - name: docker_engine.engine_daemon_health_checks_failed_total + description: Health Checks + unit: events/s + chart_type: line + dimensions: + - name: fails + - name: docker_engine.swarm_manager_leader + description: Swarm Manager Leader + unit: bool + chart_type: line + dimensions: + - name: is_leader + - name: docker_engine.swarm_manager_object_store + description: Swarm Manager Object Store + unit: objects + chart_type: stacked + dimensions: + - name: nodes + - name: services + - name: tasks + - name: networks + - name: secrets + - name: configs + - name: docker_engine.swarm_manager_nodes_per_state + description: Swarm Manager Nodes Per State + unit: nodes + chart_type: stacked + dimensions: + - name: ready + - name: down + - name: unknown + - name: disconnected + - name: docker_engine.swarm_manager_tasks_per_state + description: Swarm Manager Tasks Per State + unit: tasks + chart_type: stacked + dimensions: + - name: running + - name: failed + - name: ready + - name: rejected + - name: starting + - name: shutdown + - name: new + - name: orphaned + - name: preparing + - name: pending + - name: complete + - name: remove + - name: accepted + - name: assigned diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/metrics.go b/src/go/collectors/go.d.plugin/modules/docker_engine/metrics.go new file mode 100644 index 00000000000000..4c84e839869aff --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/metrics.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +type metrics struct { + Container struct { + Actions struct { + Changes float64 `stm:"changes"` + Commit float64 `stm:"commit"` + Create float64 `stm:"create"` + Delete float64 `stm:"delete"` + Start float64 `stm:"start"` + } `stm:"actions"` + States *containerStates `stm:"states"` + } `stm:"container"` + Builder struct { + FailsByReason struct { + BuildCanceled float64 `stm:"build_canceled"` + BuildTargetNotReachableError float64 `stm:"build_target_not_reachable_error"` + CommandNotSupportedError float64 `stm:"command_not_supported_error"` + DockerfileEmptyError float64 `stm:"dockerfile_empty_error"` + DockerfileSyntaxError float64 `stm:"dockerfile_syntax_error"` + ErrorProcessingCommandsError float64 `stm:"error_processing_commands_error"` + MissingOnbuildArgumentsError float64 `stm:"missing_onbuild_arguments_error"` + UnknownInstructionError float64 `stm:"unknown_instruction_error"` + } `stm:"fails"` + } `stm:"builder"` + HealthChecks struct { + Failed float64 `stm:"failed"` + } `stm:"health_checks"` + SwarmManager *swarmManager `stm:"swarm_manager"` +} + +type containerStates struct { + Paused float64 `stm:"paused"` + Running float64 `stm:"running"` + Stopped float64 `stm:"stopped"` +} + +type swarmManager struct { + IsLeader float64 `stm:"leader"` + Configs float64 `stm:"configs_total"` + Networks float64 `stm:"networks_total"` + Secrets float64 `stm:"secrets_total"` + Services float64 `stm:"services_total"` + Nodes struct { + Total float64 `stm:"total"` + PerState struct { + Disconnected float64 `stm:"disconnected"` + Down float64 `stm:"down"` + Ready float64 `stm:"ready"` + Unknown float64 `stm:"unknown"` + } `stm:"state"` + } `stm:"nodes"` + Tasks struct { + Total float64 `stm:"total"` + PerState struct { + Accepted float64 `stm:"accepted"` + Assigned float64 `stm:"assigned"` + Complete float64 `stm:"complete"` + Failed float64 `stm:"failed"` + New float64 `stm:"new"` + Orphaned float64 `stm:"orphaned"` + Pending float64 `stm:"pending"` + Preparing float64 `stm:"preparing"` + Ready float64 `stm:"ready"` + Rejected float64 `stm:"rejected"` + Remove float64 `stm:"remove"` + Running float64 `stm:"running"` + Shutdown float64 `stm:"shutdown"` + Starting float64 `stm:"starting"` + } `stm:"state"` + } `stm:"tasks"` +} diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/non-docker-engine.txt b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/non-docker-engine.txt new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v17.05.0-ce.txt b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v17.05.0-ce.txt new file mode 100644 index 00000000000000..8d175a8e909478 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v17.05.0-ce.txt @@ -0,0 +1,460 @@ +# HELP builder_builds_failed_total Number of failed image builds +# TYPE builder_builds_failed_total counter +builder_builds_failed_total{reason="build_canceled"} 1 +builder_builds_failed_total{reason="build_target_not_reachable_error"} 2 +builder_builds_failed_total{reason="command_not_supported_error"} 3 +builder_builds_failed_total{reason="dockerfile_empty_error"} 4 +builder_builds_failed_total{reason="dockerfile_syntax_error"} 5 +builder_builds_failed_total{reason="error_processing_commands_error"} 6 +builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7 +builder_builds_failed_total{reason="unknown_instruction_error"} 8 +# HELP builder_builds_triggered_total Number of triggered image builds +# TYPE builder_builds_triggered_total counter +builder_builds_triggered_total 0 +# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action +# TYPE engine_daemon_container_actions_seconds histogram +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="changes"} 0 +engine_daemon_container_actions_seconds_count{action="changes"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="commit"} 0 +engine_daemon_container_actions_seconds_count{action="commit"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="create"} 0 +engine_daemon_container_actions_seconds_count{action="create"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="delete"} 0 +engine_daemon_container_actions_seconds_count{action="delete"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="start"} 0 +engine_daemon_container_actions_seconds_count{action="start"} 1 +# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has +# TYPE engine_daemon_engine_cpus_cpus gauge +engine_daemon_engine_cpus_cpus 4 +# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on +# TYPE engine_daemon_engine_info gauge +engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1 +# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has +# TYPE engine_daemon_engine_memory_bytes gauge +engine_daemon_engine_memory_bytes 2.5215361024e+10 +# HELP engine_daemon_events_subscribers_total The number of current subscribers to events +# TYPE engine_daemon_events_subscribers_total gauge +engine_daemon_events_subscribers_total 0 +# HELP engine_daemon_events_total The number of events logged +# TYPE engine_daemon_events_total counter +engine_daemon_events_total 0 +# HELP engine_daemon_health_checks_failed_total The total number of failed health checks +# TYPE engine_daemon_health_checks_failed_total counter +engine_daemon_health_checks_failed_total 33 +# HELP engine_daemon_health_checks_total The total number of health checks +# TYPE engine_daemon_health_checks_total counter +engine_daemon_health_checks_total 0 +# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_sum 0 +etcd_debugging_snap_save_marshalling_duration_seconds_count 0 +# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_total_duration_seconds histogram +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_total_duration_seconds_sum 0 +etcd_debugging_snap_save_total_duration_seconds_count 0 +# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal. +# TYPE etcd_disk_wal_fsync_duration_seconds histogram +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0 +etcd_disk_wal_fsync_duration_seconds_sum 0 +etcd_disk_wal_fsync_duration_seconds_count 0 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 1.0085e-05 +go_gc_duration_seconds{quantile="0.25"} 3.1991e-05 +go_gc_duration_seconds{quantile="0.5"} 4.8062e-05 +go_gc_duration_seconds{quantile="0.75"} 9.067e-05 +go_gc_duration_seconds{quantile="1"} 0.000175239 +go_gc_duration_seconds_sum 0.000724173 +go_gc_duration_seconds_count 12 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 50 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.13368e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.7343352e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.454057e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 319815 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.398208e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.13368e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 5.5648256e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.0477568e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 114878 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 5.4738944e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.6125824e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.5528438390886765e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 434693 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 6944 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 159696 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 196608 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.5134512e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.112335e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 983040 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 983040 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.2286456e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="prometheus"} 0 +http_request_duration_microseconds_count{handler="prometheus"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="prometheus"} 0 +http_request_size_bytes_count{handler="prometheus"} 0 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="prometheus"} 0 +http_response_size_bytes_count{handler="prometheus"} 0 +# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer +# TYPE logger_log_entries_size_greater_than_buffer_total counter +logger_log_entries_size_greater_than_buffer_total 0 +# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed +# TYPE logger_log_read_operations_failed_total counter +logger_log_read_operations_failed_total 0 +# HELP logger_log_write_operations_failed_total Number of log write operations that failed +# TYPE logger_log_write_operations_failed_total counter +logger_log_write_operations_failed_total 0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 2.12 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 24 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 8.5929984e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.55284287673e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.257283584e+09 +# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state. +# TYPE swarm_dispatcher_scheduling_delay_seconds histogram +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0 +swarm_dispatcher_scheduling_delay_seconds_sum 0 +swarm_dispatcher_scheduling_delay_seconds_count 0 +# HELP swarm_manager_configs_total The number of configs in the cluster object store +# TYPE swarm_manager_configs_total gauge +swarm_manager_configs_total 0 +# HELP swarm_manager_leader Indicates if this manager node is a leader +# TYPE swarm_manager_leader gauge +swarm_manager_leader 0 +# HELP swarm_manager_networks_total The number of networks in the cluster object store +# TYPE swarm_manager_networks_total gauge +swarm_manager_networks_total 0 +# HELP swarm_manager_nodes The number of nodes +# TYPE swarm_manager_nodes gauge +swarm_manager_nodes{state="disconnected"} 0 +swarm_manager_nodes{state="down"} 0 +swarm_manager_nodes{state="ready"} 0 +swarm_manager_nodes{state="unknown"} 0 +# HELP swarm_manager_secrets_total The number of secrets in the cluster object store +# TYPE swarm_manager_secrets_total gauge +swarm_manager_secrets_total 0 +# HELP swarm_manager_services_total The number of services in the cluster object store +# TYPE swarm_manager_services_total gauge +swarm_manager_services_total 0 +# HELP swarm_manager_tasks_total The number of tasks in the cluster object store +# TYPE swarm_manager_tasks_total gauge +swarm_manager_tasks_total{state="accepted"} 0 +swarm_manager_tasks_total{state="assigned"} 0 +swarm_manager_tasks_total{state="complete"} 0 +swarm_manager_tasks_total{state="failed"} 0 +swarm_manager_tasks_total{state="new"} 0 +swarm_manager_tasks_total{state="orphaned"} 0 +swarm_manager_tasks_total{state="pending"} 0 +swarm_manager_tasks_total{state="preparing"} 0 +swarm_manager_tasks_total{state="ready"} 0 +swarm_manager_tasks_total{state="rejected"} 0 +swarm_manager_tasks_total{state="remove"} 0 +swarm_manager_tasks_total{state="running"} 0 +swarm_manager_tasks_total{state="shutdown"} 0 +swarm_manager_tasks_total{state="starting"} 0 +# HELP swarm_node_manager Whether this node is a manager or not +# TYPE swarm_node_manager gauge +swarm_node_manager 0 +# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency. +# TYPE swarm_raft_snapshot_latency_seconds histogram +swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_snapshot_latency_seconds_sum 0 +swarm_raft_snapshot_latency_seconds_count 0 +# HELP swarm_raft_transaction_latency_seconds Raft transaction latency. +# TYPE swarm_raft_transaction_latency_seconds histogram +swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="10"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_transaction_latency_seconds_sum 0 +swarm_raft_transaction_latency_seconds_count 0 +# HELP swarm_store_batch_latency_seconds Raft store batch latency. +# TYPE swarm_store_batch_latency_seconds histogram +swarm_store_batch_latency_seconds_bucket{le="0.005"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.01"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.025"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.05"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.1"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.25"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="1"} 0 +swarm_store_batch_latency_seconds_bucket{le="2.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="5"} 0 +swarm_store_batch_latency_seconds_bucket{le="10"} 0 +swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_batch_latency_seconds_sum 0 +swarm_store_batch_latency_seconds_count 0 +# HELP swarm_store_lookup_latency_seconds Raft store read latency. +# TYPE swarm_store_lookup_latency_seconds histogram +swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="10"} 0 +swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_lookup_latency_seconds_sum 0 +swarm_store_lookup_latency_seconds_count 0 +# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held. +# TYPE swarm_store_memory_store_lock_duration_seconds histogram +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0 +swarm_store_memory_store_lock_duration_seconds_sum 0 +swarm_store_memory_store_lock_duration_seconds_count 0 +# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency. +# TYPE swarm_store_read_tx_latency_seconds histogram +swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_read_tx_latency_seconds_sum 0 +swarm_store_read_tx_latency_seconds_count 0 +# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency. +# TYPE swarm_store_write_tx_latency_seconds histogram +swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_write_tx_latency_seconds_sum 0 +swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt new file mode 100644 index 00000000000000..edd69abee680b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt @@ -0,0 +1,468 @@ +# HELP builder_builds_failed_total Number of failed image builds +# TYPE builder_builds_failed_total counter +builder_builds_failed_total{reason="build_canceled"} 1 +builder_builds_failed_total{reason="build_target_not_reachable_error"} 2 +builder_builds_failed_total{reason="command_not_supported_error"} 3 +builder_builds_failed_total{reason="dockerfile_empty_error"} 4 +builder_builds_failed_total{reason="dockerfile_syntax_error"} 5 +builder_builds_failed_total{reason="error_processing_commands_error"} 6 +builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7 +builder_builds_failed_total{reason="unknown_instruction_error"} 8 +# HELP builder_builds_triggered_total Number of triggered image builds +# TYPE builder_builds_triggered_total counter +builder_builds_triggered_total 0 +# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action +# TYPE engine_daemon_container_actions_seconds histogram +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="changes"} 0 +engine_daemon_container_actions_seconds_count{action="changes"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="commit"} 0 +engine_daemon_container_actions_seconds_count{action="commit"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="create"} 0 +engine_daemon_container_actions_seconds_count{action="create"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="delete"} 0 +engine_daemon_container_actions_seconds_count{action="delete"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="start"} 0 +engine_daemon_container_actions_seconds_count{action="start"} 1 +# HELP engine_daemon_container_states_containers The count of containers in various states +# TYPE engine_daemon_container_states_containers gauge +engine_daemon_container_states_containers{state="paused"} 11 +engine_daemon_container_states_containers{state="running"} 12 +engine_daemon_container_states_containers{state="stopped"} 13 +# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has +# TYPE engine_daemon_engine_cpus_cpus gauge +engine_daemon_engine_cpus_cpus 4 +# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on +# TYPE engine_daemon_engine_info gauge +engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1 +# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has +# TYPE engine_daemon_engine_memory_bytes gauge +engine_daemon_engine_memory_bytes 2.5215361024e+10 +# HELP engine_daemon_events_subscribers_total The number of current subscribers to events +# TYPE engine_daemon_events_subscribers_total gauge +engine_daemon_events_subscribers_total 0 +# HELP engine_daemon_events_total The number of events logged +# TYPE engine_daemon_events_total counter +engine_daemon_events_total 0 +# HELP engine_daemon_health_checks_failed_total The total number of failed health checks +# TYPE engine_daemon_health_checks_failed_total counter +engine_daemon_health_checks_failed_total 33 +# HELP engine_daemon_health_checks_total The total number of health checks +# TYPE engine_daemon_health_checks_total counter +engine_daemon_health_checks_total 0 +# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_sum 0 +etcd_debugging_snap_save_marshalling_duration_seconds_count 0 +# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_total_duration_seconds histogram +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_total_duration_seconds_sum 0 +etcd_debugging_snap_save_total_duration_seconds_count 0 +# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal. +# TYPE etcd_disk_wal_fsync_duration_seconds histogram +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0 +etcd_disk_wal_fsync_duration_seconds_sum 0 +etcd_disk_wal_fsync_duration_seconds_count 0 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 1.0085e-05 +go_gc_duration_seconds{quantile="0.25"} 3.1991e-05 +go_gc_duration_seconds{quantile="0.5"} 4.8062e-05 +go_gc_duration_seconds{quantile="0.75"} 9.067e-05 +go_gc_duration_seconds{quantile="1"} 0.000175239 +go_gc_duration_seconds_sum 0.000724173 +go_gc_duration_seconds_count 12 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 50 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.13368e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.7343352e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.454057e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 319815 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.398208e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.13368e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 5.5648256e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.0477568e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 114878 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 5.4738944e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.6125824e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.5528438390886765e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 434693 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 6944 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 159696 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 196608 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.5134512e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.112335e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 983040 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 983040 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.2286456e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="prometheus"} 0 +http_request_duration_microseconds_count{handler="prometheus"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="prometheus"} 0 +http_request_size_bytes_count{handler="prometheus"} 0 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="prometheus"} 0 +http_response_size_bytes_count{handler="prometheus"} 0 +# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer +# TYPE logger_log_entries_size_greater_than_buffer_total counter +logger_log_entries_size_greater_than_buffer_total 0 +# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed +# TYPE logger_log_read_operations_failed_total counter +logger_log_read_operations_failed_total 0 +# HELP logger_log_write_operations_failed_total Number of log write operations that failed +# TYPE logger_log_write_operations_failed_total counter +logger_log_write_operations_failed_total 0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 2.12 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 24 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 8.5929984e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.55284287673e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.257283584e+09 +# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state. +# TYPE swarm_dispatcher_scheduling_delay_seconds histogram +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0 +swarm_dispatcher_scheduling_delay_seconds_sum 0 +swarm_dispatcher_scheduling_delay_seconds_count 0 +# HELP swarm_manager_configs_total The number of configs in the cluster object store +# TYPE swarm_manager_configs_total gauge +swarm_manager_configs_total 1 +# HELP swarm_manager_leader Indicates if this manager node is a leader +# TYPE swarm_manager_leader gauge +swarm_manager_leader 1 +# HELP swarm_manager_networks_total The number of networks in the cluster object store +# TYPE swarm_manager_networks_total gauge +swarm_manager_networks_total 3 +# HELP swarm_manager_nodes The number of nodes +# TYPE swarm_manager_nodes gauge +swarm_manager_nodes{state="disconnected"} 1 +swarm_manager_nodes{state="down"} 2 +swarm_manager_nodes{state="ready"} 3 +swarm_manager_nodes{state="unknown"} 4 +# HELP swarm_manager_secrets_total The number of secrets in the cluster object store +# TYPE swarm_manager_secrets_total gauge +swarm_manager_secrets_total 1 +# HELP swarm_manager_services_total The number of services in the cluster object store +# TYPE swarm_manager_services_total gauge +swarm_manager_services_total 1 +# HELP swarm_manager_tasks_total The number of tasks in the cluster object store +# TYPE swarm_manager_tasks_total gauge +swarm_manager_tasks_total{state="accepted"} 1 +swarm_manager_tasks_total{state="assigned"} 2 +swarm_manager_tasks_total{state="complete"} 3 +swarm_manager_tasks_total{state="failed"} 4 +swarm_manager_tasks_total{state="new"} 5 +swarm_manager_tasks_total{state="orphaned"} 6 +swarm_manager_tasks_total{state="pending"} 7 +swarm_manager_tasks_total{state="preparing"} 8 +swarm_manager_tasks_total{state="ready"} 9 +swarm_manager_tasks_total{state="rejected"} 10 +swarm_manager_tasks_total{state="remove"} 11 +swarm_manager_tasks_total{state="running"} 12 +swarm_manager_tasks_total{state="shutdown"} 13 +swarm_manager_tasks_total{state="starting"} 14 +# HELP swarm_node_info Information related to the swarm +# TYPE swarm_node_info gauge +swarm_node_info{node_id="193816ofdqsg9kkm0hkfladvo",swarm_id="k1a6iu49n97a1vej3u5pjgsbr"} 1 +# HELP swarm_node_manager Whether this node is a manager or not +# TYPE swarm_node_manager gauge +swarm_node_manager 1 +# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency. +# TYPE swarm_raft_snapshot_latency_seconds histogram +swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_snapshot_latency_seconds_sum 0 +swarm_raft_snapshot_latency_seconds_count 0 +# HELP swarm_raft_transaction_latency_seconds Raft transaction latency. +# TYPE swarm_raft_transaction_latency_seconds histogram +swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="10"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_transaction_latency_seconds_sum 0 +swarm_raft_transaction_latency_seconds_count 0 +# HELP swarm_store_batch_latency_seconds Raft store batch latency. +# TYPE swarm_store_batch_latency_seconds histogram +swarm_store_batch_latency_seconds_bucket{le="0.005"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.01"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.025"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.05"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.1"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.25"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="1"} 0 +swarm_store_batch_latency_seconds_bucket{le="2.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="5"} 0 +swarm_store_batch_latency_seconds_bucket{le="10"} 0 +swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_batch_latency_seconds_sum 0 +swarm_store_batch_latency_seconds_count 0 +# HELP swarm_store_lookup_latency_seconds Raft store read latency. +# TYPE swarm_store_lookup_latency_seconds histogram +swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="10"} 0 +swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_lookup_latency_seconds_sum 0 +swarm_store_lookup_latency_seconds_count 0 +# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held. +# TYPE swarm_store_memory_store_lock_duration_seconds histogram +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0 +swarm_store_memory_store_lock_duration_seconds_sum 0 +swarm_store_memory_store_lock_duration_seconds_count 0 +# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency. +# TYPE swarm_store_read_tx_latency_seconds histogram +swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_read_tx_latency_seconds_sum 0 +swarm_store_read_tx_latency_seconds_count 0 +# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency. +# TYPE swarm_store_write_tx_latency_seconds histogram +swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_write_tx_latency_seconds_sum 0 +swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce.txt b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce.txt new file mode 100644 index 00000000000000..b545892102b936 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/docker_engine/testdata/v18.09.3-ce.txt @@ -0,0 +1,465 @@ +# HELP builder_builds_failed_total Number of failed image builds +# TYPE builder_builds_failed_total counter +builder_builds_failed_total{reason="build_canceled"} 1 +builder_builds_failed_total{reason="build_target_not_reachable_error"} 2 +builder_builds_failed_total{reason="command_not_supported_error"} 3 +builder_builds_failed_total{reason="dockerfile_empty_error"} 4 +builder_builds_failed_total{reason="dockerfile_syntax_error"} 5 +builder_builds_failed_total{reason="error_processing_commands_error"} 6 +builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7 +builder_builds_failed_total{reason="unknown_instruction_error"} 8 +# HELP builder_builds_triggered_total Number of triggered image builds +# TYPE builder_builds_triggered_total counter +builder_builds_triggered_total 0 +# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action +# TYPE engine_daemon_container_actions_seconds histogram +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="changes"} 0 +engine_daemon_container_actions_seconds_count{action="changes"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="commit"} 0 +engine_daemon_container_actions_seconds_count{action="commit"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="create"} 0 +engine_daemon_container_actions_seconds_count{action="create"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="delete"} 0 +engine_daemon_container_actions_seconds_count{action="delete"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1 +engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1 +engine_daemon_container_actions_seconds_sum{action="start"} 0 +engine_daemon_container_actions_seconds_count{action="start"} 1 +# HELP engine_daemon_container_states_containers The count of containers in various states +# TYPE engine_daemon_container_states_containers gauge +engine_daemon_container_states_containers{state="paused"} 11 +engine_daemon_container_states_containers{state="running"} 12 +engine_daemon_container_states_containers{state="stopped"} 13 +# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has +# TYPE engine_daemon_engine_cpus_cpus gauge +engine_daemon_engine_cpus_cpus 4 +# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on +# TYPE engine_daemon_engine_info gauge +engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1 +# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has +# TYPE engine_daemon_engine_memory_bytes gauge +engine_daemon_engine_memory_bytes 2.5215361024e+10 +# HELP engine_daemon_events_subscribers_total The number of current subscribers to events +# TYPE engine_daemon_events_subscribers_total gauge +engine_daemon_events_subscribers_total 0 +# HELP engine_daemon_events_total The number of events logged +# TYPE engine_daemon_events_total counter +engine_daemon_events_total 0 +# HELP engine_daemon_health_checks_failed_total The total number of failed health checks +# TYPE engine_daemon_health_checks_failed_total counter +engine_daemon_health_checks_failed_total 33 +# HELP engine_daemon_health_checks_total The total number of health checks +# TYPE engine_daemon_health_checks_total counter +engine_daemon_health_checks_total 0 +# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_marshalling_duration_seconds_sum 0 +etcd_debugging_snap_save_marshalling_duration_seconds_count 0 +# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot. +# TYPE etcd_debugging_snap_save_total_duration_seconds histogram +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0 +etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0 +etcd_debugging_snap_save_total_duration_seconds_sum 0 +etcd_debugging_snap_save_total_duration_seconds_count 0 +# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal. +# TYPE etcd_disk_wal_fsync_duration_seconds histogram +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0 +etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0 +etcd_disk_wal_fsync_duration_seconds_sum 0 +etcd_disk_wal_fsync_duration_seconds_count 0 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 1.0085e-05 +go_gc_duration_seconds{quantile="0.25"} 3.1991e-05 +go_gc_duration_seconds{quantile="0.5"} 4.8062e-05 +go_gc_duration_seconds{quantile="0.75"} 9.067e-05 +go_gc_duration_seconds{quantile="1"} 0.000175239 +go_gc_duration_seconds_sum 0.000724173 +go_gc_duration_seconds_count 12 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 50 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.13368e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.7343352e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.454057e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 319815 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.398208e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.13368e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 5.5648256e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.0477568e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 114878 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 5.4738944e+07 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.6125824e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.5528438390886765e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 434693 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 6944 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 159696 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 196608 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.5134512e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 1.112335e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 983040 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 983040 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.2286456e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="prometheus"} 0 +http_request_duration_microseconds_count{handler="prometheus"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="prometheus"} 0 +http_request_size_bytes_count{handler="prometheus"} 0 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN +http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="prometheus"} 0 +http_response_size_bytes_count{handler="prometheus"} 0 +# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer +# TYPE logger_log_entries_size_greater_than_buffer_total counter +logger_log_entries_size_greater_than_buffer_total 0 +# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed +# TYPE logger_log_read_operations_failed_total counter +logger_log_read_operations_failed_total 0 +# HELP logger_log_write_operations_failed_total Number of log write operations that failed +# TYPE logger_log_write_operations_failed_total counter +logger_log_write_operations_failed_total 0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 2.12 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 24 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 8.5929984e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.55284287673e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.257283584e+09 +# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state. +# TYPE swarm_dispatcher_scheduling_delay_seconds histogram +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0 +swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0 +swarm_dispatcher_scheduling_delay_seconds_sum 0 +swarm_dispatcher_scheduling_delay_seconds_count 0 +# HELP swarm_manager_configs_total The number of configs in the cluster object store +# TYPE swarm_manager_configs_total gauge +swarm_manager_configs_total 0 +# HELP swarm_manager_leader Indicates if this manager node is a leader +# TYPE swarm_manager_leader gauge +swarm_manager_leader 0 +# HELP swarm_manager_networks_total The number of networks in the cluster object store +# TYPE swarm_manager_networks_total gauge +swarm_manager_networks_total 0 +# HELP swarm_manager_nodes The number of nodes +# TYPE swarm_manager_nodes gauge +swarm_manager_nodes{state="disconnected"} 0 +swarm_manager_nodes{state="down"} 0 +swarm_manager_nodes{state="ready"} 0 +swarm_manager_nodes{state="unknown"} 0 +# HELP swarm_manager_secrets_total The number of secrets in the cluster object store +# TYPE swarm_manager_secrets_total gauge +swarm_manager_secrets_total 0 +# HELP swarm_manager_services_total The number of services in the cluster object store +# TYPE swarm_manager_services_total gauge +swarm_manager_services_total 0 +# HELP swarm_manager_tasks_total The number of tasks in the cluster object store +# TYPE swarm_manager_tasks_total gauge +swarm_manager_tasks_total{state="accepted"} 0 +swarm_manager_tasks_total{state="assigned"} 0 +swarm_manager_tasks_total{state="complete"} 0 +swarm_manager_tasks_total{state="failed"} 0 +swarm_manager_tasks_total{state="new"} 0 +swarm_manager_tasks_total{state="orphaned"} 0 +swarm_manager_tasks_total{state="pending"} 0 +swarm_manager_tasks_total{state="preparing"} 0 +swarm_manager_tasks_total{state="ready"} 0 +swarm_manager_tasks_total{state="rejected"} 0 +swarm_manager_tasks_total{state="remove"} 0 +swarm_manager_tasks_total{state="running"} 0 +swarm_manager_tasks_total{state="shutdown"} 0 +swarm_manager_tasks_total{state="starting"} 0 +# HELP swarm_node_manager Whether this node is a manager or not +# TYPE swarm_node_manager gauge +swarm_node_manager 0 +# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency. +# TYPE swarm_raft_snapshot_latency_seconds histogram +swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0 +swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_snapshot_latency_seconds_sum 0 +swarm_raft_snapshot_latency_seconds_count 0 +# HELP swarm_raft_transaction_latency_seconds Raft transaction latency. +# TYPE swarm_raft_transaction_latency_seconds histogram +swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="1"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="5"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="10"} 0 +swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0 +swarm_raft_transaction_latency_seconds_sum 0 +swarm_raft_transaction_latency_seconds_count 0 +# HELP swarm_store_batch_latency_seconds Raft store batch latency. +# TYPE swarm_store_batch_latency_seconds histogram +swarm_store_batch_latency_seconds_bucket{le="0.005"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.01"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.025"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.05"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.1"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.25"} 0 +swarm_store_batch_latency_seconds_bucket{le="0.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="1"} 0 +swarm_store_batch_latency_seconds_bucket{le="2.5"} 0 +swarm_store_batch_latency_seconds_bucket{le="5"} 0 +swarm_store_batch_latency_seconds_bucket{le="10"} 0 +swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_batch_latency_seconds_sum 0 +swarm_store_batch_latency_seconds_count 0 +# HELP swarm_store_lookup_latency_seconds Raft store read latency. +# TYPE swarm_store_lookup_latency_seconds histogram +swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0 +swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="1"} 0 +swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="5"} 0 +swarm_store_lookup_latency_seconds_bucket{le="10"} 0 +swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_lookup_latency_seconds_sum 0 +swarm_store_lookup_latency_seconds_count 0 +# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held. +# TYPE swarm_store_memory_store_lock_duration_seconds histogram +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0 +swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0 +swarm_store_memory_store_lock_duration_seconds_sum 0 +swarm_store_memory_store_lock_duration_seconds_count 0 +# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency. +# TYPE swarm_store_read_tx_latency_seconds histogram +swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_read_tx_latency_seconds_sum 0 +swarm_store_read_tx_latency_seconds_count 0 +# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency. +# TYPE swarm_store_write_tx_latency_seconds histogram +swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="1"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="5"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="10"} 0 +swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0 +swarm_store_write_tx_latency_seconds_sum 0 +swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/README.md b/src/go/collectors/go.d.plugin/modules/dockerhub/README.md new file mode 120000 index 00000000000000..703add4ed1d8ab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/README.md @@ -0,0 +1 @@ +integrations/docker_hub_repository.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/apiclient.go b/src/go/collectors/go.d.plugin/modules/dockerhub/apiclient.go new file mode 100644 index 00000000000000..a4d3497007dc82 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/apiclient.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +type repository struct { + User string + Name string + Status int + StarCount int `json:"star_count"` + PullCount int `json:"pull_count"` + LastUpdated string `json:"last_updated"` +} + +func newAPIClient(client *http.Client, request web.Request) *apiClient { + return &apiClient{httpClient: client, request: request} +} + +type apiClient struct { + httpClient *http.Client + request web.Request +} + +func (a apiClient) getRepository(repoName string) (*repository, error) { + req, err := a.createRequest(repoName) + if err != nil { + return nil, fmt.Errorf("error on creating http request : %v", err) + } + + resp, err := a.doRequestOK(req) + defer closeBody(resp) + if err != nil { + return nil, err + } + + var repo repository + if err := json.NewDecoder(resp.Body).Decode(&repo); err != nil { + return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return &repo, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error on request: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + return resp, nil +} + +func (a apiClient) createRequest(urlPath string) (*http.Request, error) { + req := a.request.Copy() + u, err := url.Parse(req.URL) + if err != nil { + return nil, err + } + + u.Path = path.Join(u.Path, urlPath) + req.URL = u.String() + return web.NewHTTPRequest(req) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/charts.go b/src/go/collectors/go.d.plugin/modules/dockerhub/charts.go new file mode 100644 index 00000000000000..ba7adc930946f8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/charts.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims + // Dim is an alias for module.Dim + Dim = module.Dim +) + +var charts = Charts{ + { + ID: "pulls_sum", + Title: "Pulls Summary", + Units: "pulls", + Fam: "pulls", + Dims: Dims{ + {ID: "pull_sum", Name: "sum"}, + }, + }, + { + ID: "pulls", + Title: "Pulls", + Units: "pulls", + Fam: "pulls", + Type: module.Stacked, + }, + { + ID: "pulls_rate", + Title: "Pulls Rate", + Units: "pulls/s", + Fam: "pulls", + Type: module.Stacked, + }, + { + ID: "stars", + Title: "Stars", + Units: "stars", + Fam: "stars", + Type: module.Stacked, + }, + { + ID: "status", + Title: "Current Status", + Units: "status", + Fam: "status", + }, + { + ID: "last_updated", + Title: "Time Since Last Updated", + Units: "seconds", + Fam: "last updated", + }, +} + +func addReposToCharts(repositories []string, cs *Charts) { + for _, name := range repositories { + dimName := strings.Replace(name, "/", "_", -1) + _ = cs.Get("pulls").AddDim(&Dim{ + ID: "pull_count_" + name, + Name: dimName, + }) + _ = cs.Get("pulls_rate").AddDim(&Dim{ + ID: "pull_count_" + name, + Name: dimName, + Algo: module.Incremental, + }) + _ = cs.Get("stars").AddDim(&Dim{ + ID: "star_count_" + name, + Name: dimName, + }) + _ = cs.Get("status").AddDim(&Dim{ + ID: "status_" + name, + Name: dimName, + }) + _ = cs.Get("last_updated").AddDim(&Dim{ + ID: "last_updated_" + name, + Name: dimName, + }) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/collect.go b/src/go/collectors/go.d.plugin/modules/dockerhub/collect.go new file mode 100644 index 00000000000000..211c1ea7cac07b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/collect.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "fmt" + "time" +) + +func (dh *DockerHub) collect() (map[string]int64, error) { + var ( + reposNum = len(dh.Repositories) + ch = make(chan *repository, reposNum) + mx = make(map[string]int64) + ) + + for _, name := range dh.Repositories { + go dh.collectRepo(name, ch) + } + + var ( + parsed int + pullSum int + ) + + for i := 0; i < reposNum; i++ { + repo := <-ch + if repo == nil { + continue + } + if err := parseRepoTo(repo, mx); err != nil { + dh.Errorf("error on parsing %s/%s : %v", repo.User, repo.Name, err) + continue + } + pullSum += repo.PullCount + parsed++ + } + close(ch) + + if parsed == reposNum { + mx["pull_sum"] = int64(pullSum) + } + + return mx, nil +} + +func (dh *DockerHub) collectRepo(repoName string, ch chan *repository) { + repo, err := dh.client.getRepository(repoName) + if err != nil { + dh.Error(err) + } + ch <- repo +} + +func parseRepoTo(repo *repository, mx map[string]int64) error { + t, err := time.Parse(time.RFC3339Nano, repo.LastUpdated) + if err != nil { + return err + } + mx[fmt.Sprintf("last_updated_%s/%s", repo.User, repo.Name)] = int64(time.Since(t).Seconds()) + mx[fmt.Sprintf("star_count_%s/%s", repo.User, repo.Name)] = int64(repo.StarCount) + mx[fmt.Sprintf("pull_count_%s/%s", repo.User, repo.Name)] = int64(repo.PullCount) + mx[fmt.Sprintf("status_%s/%s", repo.User, repo.Name)] = int64(repo.Status) + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/config_schema.json b/src/go/collectors/go.d.plugin/modules/dockerhub/config_schema.json new file mode 100644 index 00000000000000..1be293e6f1fe71 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/config_schema.json @@ -0,0 +1,65 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/dockerhub job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "repositories": { + "type": "array", + "items": { + "type": "number" + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "repositories" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub.go b/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub.go new file mode 100644 index 00000000000000..48836a6068f4f5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + defaultURL = "https://hub.docker.com/v2/repositories" + defaultHTTPTimeout = time.Second * 2 + + defaultUpdateEvery = 5 +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("dockerhub", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: defaultUpdateEvery, + }, + Create: func() module.Module { return New() }, + }) +} + +// New creates DockerHub with default values. +func New() *DockerHub { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &DockerHub{ + Config: config, + } +} + +// Config is the DockerHub module configuration. +type Config struct { + web.HTTP `yaml:",inline"` + Repositories []string +} + +// DockerHub DockerHub module. +type DockerHub struct { + module.Base + Config `yaml:",inline"` + client *apiClient +} + +// Cleanup makes cleanup. +func (DockerHub) Cleanup() {} + +// Init makes initialization. +func (dh *DockerHub) Init() bool { + if dh.URL == "" { + dh.Error("URL not set") + return false + } + + if len(dh.Repositories) == 0 { + dh.Error("repositories parameter is not set") + return false + } + + client, err := web.NewHTTPClient(dh.Client) + if err != nil { + dh.Errorf("error on creating http client : %v", err) + return false + } + dh.client = newAPIClient(client, dh.Request) + + return true +} + +// Check makes check. +func (dh DockerHub) Check() bool { + return len(dh.Collect()) > 0 +} + +// Charts creates Charts. +func (dh DockerHub) Charts() *Charts { + cs := charts.Copy() + addReposToCharts(dh.Repositories, cs) + return cs +} + +// Collect collects metrics. +func (dh *DockerHub) Collect() map[string]int64 { + mx, err := dh.collect() + + if err != nil { + dh.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub_test.go b/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub_test.go new file mode 100644 index 00000000000000..350af1a539ebb2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/dockerhub_test.go @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + repo1Data, _ = os.ReadFile("testdata/repo1.txt") + repo2Data, _ = os.ReadFile("testdata/repo2.txt") + repo3Data, _ = os.ReadFile("testdata/repo3.txt") +) + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*DockerHub)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) + assert.Len(t, job.Repositories, 0) + assert.Nil(t, job.client) +} + +func TestDockerHub_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() } + +func TestDockerHub_Init(t *testing.T) { + job := New() + job.Repositories = []string{"name/repo"} + assert.True(t, job.Init()) + assert.NotNil(t, job.client) +} + +func TestDockerHub_InitNG(t *testing.T) { assert.False(t, New().Init()) } + +func TestDockerHub_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "name1/repo1"): + _, _ = w.Write(repo1Data) + case strings.HasSuffix(r.URL.Path, "name2/repo2"): + _, _ = w.Write(repo2Data) + case strings.HasSuffix(r.URL.Path, "name3/repo3"): + _, _ = w.Write(repo3Data) + } + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestDockerHub_CheckNG(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/metrics" + job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestDockerHub_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(r.URL.Path, "name1/repo1"): + _, _ = w.Write(repo1Data) + case strings.HasSuffix(r.URL.Path, "name2/repo2"): + _, _ = w.Write(repo2Data) + case strings.HasSuffix(r.URL.Path, "name3/repo3"): + _, _ = w.Write(repo3Data) + } + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "star_count_user1/name1": 45, + "pull_count_user1/name1": 18540191, + "status_user1/name1": 1, + "star_count_user2/name2": 45, + "pull_count_user2/name2": 18540192, + "status_user2/name2": 1, + "star_count_user3/name3": 45, + "pull_count_user3/name3": 18540193, + "status_user3/name3": 1, + "pull_sum": 55620576, + } + + collected := job.Collect() + + for k := range collected { + if strings.HasPrefix(k, "last") { + delete(collected, k) + } + } + assert.Equal(t, expected, collected) +} + +func TestDockerHub_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestDockerHub_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/integrations/docker_hub_repository.md b/src/go/collectors/go.d.plugin/modules/dockerhub/integrations/docker_hub_repository.md new file mode 100644 index 00000000000000..d846146743cf8e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/integrations/docker_hub_repository.md @@ -0,0 +1,174 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/dockerhub/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/dockerhub/metadata.yaml" +sidebar_label: "Docker Hub repository" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Docker Hub repository + + +<img src="https://netdata.cloud/img/docker.svg" width="150"/> + + +Plugin: go.d.plugin +Module: dockerhub + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Docker Hub repository instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| dockerhub.pulls_sum | sum | pulls | +| dockerhub.pulls | a dimension per repository | pulls | +| dockerhub.pulls_rate | a dimension per repository | pulls/s | +| dockerhub.stars | a dimension per repository | stars | +| dockerhub.status | a dimension per repository | status | +| dockerhub.last_updated | a dimension per repository | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/dockerhub.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/dockerhub.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes | +| repositories | List of repositories to monitor. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: dockerhub + repositories: + - 'user1/name1' + - 'user2/name2' + - 'user3/name3' + +``` + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m dockerhub + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/metadata.yaml b/src/go/collectors/go.d.plugin/modules/dockerhub/metadata.yaml new file mode 100644 index 00000000000000..605d6c1cb9d736 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/metadata.yaml @@ -0,0 +1,190 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-dockerhub + plugin_name: go.d.plugin + module_name: dockerhub + monitored_instance: + name: Docker Hub repository + link: https://hub.docker.com/ + icon_filename: docker.svg + categories: + - data-collection.containers-and-vms # FIXME + keywords: + - dockerhub + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/dockerhub.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: DockerHub URL. + default_value: https://hub.docker.com/v2/repositories + required: true + - name: repositories + description: List of repositories to monitor. + default_value: "" + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: dockerhub + repositories: + - 'user1/name1' + - 'user2/name2' + - 'user3/name3' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: dockerhub.pulls_sum + description: Pulls Summary + unit: pulls + chart_type: line + dimensions: + - name: sum + - name: dockerhub.pulls + description: Pulls + unit: pulls + chart_type: stacked + dimensions: + - name: a dimension per repository + - name: dockerhub.pulls_rate + description: Pulls Rate + unit: pulls/s + chart_type: stacked + dimensions: + - name: a dimension per repository + - name: dockerhub.stars + description: Stars + unit: stars + chart_type: stacked + dimensions: + - name: a dimension per repository + - name: dockerhub.status + description: Current Status + unit: status + chart_type: line + dimensions: + - name: a dimension per repository + - name: dockerhub.last_updated + description: Time Since Last Updated + unit: seconds + chart_type: line + dimensions: + - name: a dimension per repository diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo1.txt b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo1.txt new file mode 100644 index 00000000000000..b67e2f3825e615 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo1.txt @@ -0,0 +1,22 @@ +{ + "user": "user1", + "name": "name1", + "namespace": "namespace", + "repository_type": "image", + "status": 1, + "description": "Description.", + "is_private": false, + "is_automated": false, + "can_edit": false, + "star_count": 45, + "pull_count": 18540191, + "last_updated": "2019-03-28T21:26:05.527650Z", + "is_migrated": false, + "has_starred": false, + "affiliation": null, + "permissions": { + "read": true, + "write": false, + "admin": false + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo2.txt b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo2.txt new file mode 100644 index 00000000000000..e84ba989ba9a9a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo2.txt @@ -0,0 +1,22 @@ +{ + "user": "user2", + "name": "name2", + "namespace": "namespace", + "repository_type": "image", + "status": 1, + "description": "Description.", + "is_private": false, + "is_automated": false, + "can_edit": false, + "star_count": 45, + "pull_count": 18540192, + "last_updated": "2019-03-28T21:26:05.527650Z", + "is_migrated": false, + "has_starred": false, + "affiliation": null, + "permissions": { + "read": true, + "write": false, + "admin": false + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo3.txt b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo3.txt new file mode 100644 index 00000000000000..1fc64a9c38a660 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/dockerhub/testdata/repo3.txt @@ -0,0 +1,22 @@ +{ + "user": "user3", + "name": "name3", + "namespace": "namespace", + "repository_type": "image", + "status": 1, + "description": "Description.", + "is_private": false, + "is_automated": false, + "can_edit": false, + "star_count": 45, + "pull_count": 18540193, + "last_updated": "2019-03-28T21:26:05.527650Z", + "is_migrated": false, + "has_starred": false, + "affiliation": null, + "permissions": { + "read": true, + "write": false, + "admin": false + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/README.md b/src/go/collectors/go.d.plugin/modules/elasticsearch/README.md new file mode 120000 index 00000000000000..8951ff7b29405b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/README.md @@ -0,0 +1 @@ +integrations/elasticsearch.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/charts.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/charts.go new file mode 100644 index 00000000000000..4bfb40a198d1a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/charts.go @@ -0,0 +1,845 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioNodeIndicesIndexingOps = module.Priority + iota + prioNodeIndicesIndexingOpsCurrent + prioNodeIndicesIndexingOpsTime + prioNodeIndicesSearchOps + prioNodeIndicesSearchOpsCurrent + prioNodeIndicesSearchOpsTime + prioNodeIndicesRefreshOps + prioNodeIndicesRefreshOpsTime + prioNodeIndicesFlushOps + prioNodeIndicesFlushOpsTime + prioNodeIndicesFieldDataMemoryUsage + prioNodeIndicesFieldDataEvictions + prioNodeIndicesSegmentsCount + prioNodeIndicesSegmentsMemoryUsageTotal + prioNodeIndicesSegmentsMemoryUsage + prioNodeIndicesTransLogOps + prioNodeIndexTransLogSize + prioNodeFileDescriptors + prioNodeJVMMemHeap + prioNodeJVMMemHeapBytes + prioNodeJVMBufferPoolsCount + prioNodeJVMBufferPoolDirectMemory + prioNodeJVMBufferPoolMappedMemory + prioNodeJVMGCCount + prioNodeJVMGCTime + prioNodeThreadPoolQueued + prioNodeThreadPoolRejected + prioNodeClusterCommunicationPackets + prioNodeClusterCommunication + prioNodeHTTPConnections + prioNodeBreakersTrips + + prioClusterStatus + prioClusterNodesCount + prioClusterShardsCount + prioClusterPendingTasks + prioClusterInFlightFetchesCount + + prioClusterIndicesCount + prioClusterIndicesShardsCount + prioClusterIndicesDocsCount + prioClusterIndicesStoreSize + prioClusterIndicesQueryCache + prioClusterNodesByRoleCount + + prioNodeIndexHealth + prioNodeIndexShardsCount + prioNodeIndexDocsCount + prioNodeIndexStoreSize +) + +var nodeChartsTmpl = module.Charts{ + nodeIndicesIndexingOpsChartTmpl.Copy(), + nodeIndicesIndexingOpsCurrentChartTmpl.Copy(), + nodeIndicesIndexingOpsTimeChartTmpl.Copy(), + + nodeIndicesSearchOpsChartTmpl.Copy(), + nodeIndicesSearchOpsCurrentChartTmpl.Copy(), + nodeIndicesSearchOpsTimeChartTmpl.Copy(), + + nodeIndicesRefreshOpsChartTmpl.Copy(), + nodeIndicesRefreshOpsTimeChartTmpl.Copy(), + + nodeIndicesFlushOpsChartTmpl.Copy(), + nodeIndicesFlushOpsTimeChartTmpl.Copy(), + + nodeIndicesFieldDataMemoryUsageChartTmpl.Copy(), + nodeIndicesFieldDataEvictionsChartTmpl.Copy(), + + nodeIndicesSegmentsCountChartTmpl.Copy(), + nodeIndicesSegmentsMemoryUsageTotalChartTmpl.Copy(), + nodeIndicesSegmentsMemoryUsageChartTmpl.Copy(), + + nodeIndicesTransLogOpsChartTmpl.Copy(), + nodeIndexTransLogSizeChartTmpl.Copy(), + + nodeFileDescriptorsChartTmpl.Copy(), + + nodeJVMMemHeapChartTmpl.Copy(), + nodeJVMMemHeapBytesChartTmpl.Copy(), + nodeJVMBufferPoolsCountChartTmpl.Copy(), + nodeJVMBufferPoolDirectMemoryChartTmpl.Copy(), + nodeJVMBufferPoolMappedMemoryChartTmpl.Copy(), + nodeJVMGCCountChartTmpl.Copy(), + nodeJVMGCTimeChartTmpl.Copy(), + + nodeThreadPoolQueuedChartTmpl.Copy(), + nodeThreadPoolRejectedChartTmpl.Copy(), + + nodeClusterCommunicationPacketsChartTmpl.Copy(), + nodeClusterCommunicationChartTmpl.Copy(), + + nodeHTTPConnectionsChartTmpl.Copy(), + + nodeBreakersTripsChartTmpl.Copy(), +} + +var ( + nodeIndicesIndexingOpsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_indexing_operations", + Title: "Indexing Operations", + Units: "operations/s", + Fam: "indices indexing", + Ctx: "elasticsearch.node_indices_indexing", + Priority: prioNodeIndicesIndexingOps, + Dims: module.Dims{ + {ID: "node_%s_indices_indexing_index_total", Name: "index", Algo: module.Incremental}, + }, + } + nodeIndicesIndexingOpsCurrentChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_indexing_operations_current", + Title: "Indexing Operations Current", + Units: "operations", + Fam: "indices indexing", + Ctx: "elasticsearch.node_indices_indexing_current", + Priority: prioNodeIndicesIndexingOpsCurrent, + Dims: module.Dims{ + {ID: "node_%s_indices_indexing_index_current", Name: "index"}, + }, + } + nodeIndicesIndexingOpsTimeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_indexing_operations_time", + Title: "Time Spent On Indexing Operations", + Units: "milliseconds", + Fam: "indices indexing", + Ctx: "elasticsearch.node_indices_indexing_time", + Priority: prioNodeIndicesIndexingOpsTime, + Dims: module.Dims{ + {ID: "node_%s_indices_indexing_index_time_in_millis", Name: "index", Algo: module.Incremental}, + }, + } + + nodeIndicesSearchOpsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_search_operations", + Title: "Search Operations", + Units: "operations/s", + Fam: "indices search", + Ctx: "elasticsearch.node_indices_search", + Type: module.Stacked, + Priority: prioNodeIndicesSearchOps, + Dims: module.Dims{ + {ID: "node_%s_indices_search_query_total", Name: "queries", Algo: module.Incremental}, + {ID: "node_%s_indices_search_fetch_total", Name: "fetches", Algo: module.Incremental}, + }, + } + nodeIndicesSearchOpsCurrentChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_search_operations_current", + Title: "Search Operations Current", + Units: "operations", + Fam: "indices search", + Ctx: "elasticsearch.node_indices_search_current", + Type: module.Stacked, + Priority: prioNodeIndicesSearchOpsCurrent, + Dims: module.Dims{ + {ID: "node_%s_indices_search_query_current", Name: "queries"}, + {ID: "node_%s_indices_search_fetch_current", Name: "fetches"}, + }, + } + nodeIndicesSearchOpsTimeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_search_operations_time", + Title: "Time Spent On Search Operations", + Units: "milliseconds", + Fam: "indices search", + Ctx: "elasticsearch.node_indices_search_time", + Type: module.Stacked, + Priority: prioNodeIndicesSearchOpsTime, + Dims: module.Dims{ + {ID: "node_%s_indices_search_query_time_in_millis", Name: "query", Algo: module.Incremental}, + {ID: "node_%s_indices_search_fetch_time_in_millis", Name: "fetch", Algo: module.Incremental}, + }, + } + + nodeIndicesRefreshOpsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_refresh_operations", + Title: "Refresh Operations", + Units: "operations/s", + Fam: "indices refresh", + Ctx: "elasticsearch.node_indices_refresh", + Priority: prioNodeIndicesRefreshOps, + Dims: module.Dims{ + {ID: "node_%s_indices_refresh_total", Name: "refresh", Algo: module.Incremental}, + }, + } + nodeIndicesRefreshOpsTimeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_refresh_operations_time", + Title: "Time Spent On Refresh Operations", + Units: "milliseconds", + Fam: "indices refresh", + Ctx: "elasticsearch.node_indices_refresh_time", + Priority: prioNodeIndicesRefreshOpsTime, + Dims: module.Dims{ + {ID: "node_%s_indices_refresh_total_time_in_millis", Name: "refresh", Algo: module.Incremental}, + }, + } + + nodeIndicesFlushOpsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_flush_operations", + Title: "Flush Operations", + Units: "operations/s", + Fam: "indices flush", + Ctx: "elasticsearch.node_indices_flush", + Priority: prioNodeIndicesFlushOps, + Dims: module.Dims{ + {ID: "node_%s_indices_flush_total", Name: "flush", Algo: module.Incremental}, + }, + } + nodeIndicesFlushOpsTimeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_flush_operations_time", + Title: "Time Spent On Flush Operations", + Units: "milliseconds", + Fam: "indices flush", + Ctx: "elasticsearch.node_indices_flush_time", + Priority: prioNodeIndicesFlushOpsTime, + Dims: module.Dims{ + {ID: "node_%s_indices_flush_total_time_in_millis", Name: "flush", Algo: module.Incremental}, + }, + } + + nodeIndicesFieldDataMemoryUsageChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_fielddata_memory_usage", + Title: "Fielddata Cache Memory Usage", + Units: "bytes", + Fam: "indices fielddata", + Ctx: "elasticsearch.node_indices_fielddata_memory_usage", + Type: module.Area, + Priority: prioNodeIndicesFieldDataMemoryUsage, + Dims: module.Dims{ + {ID: "node_%s_indices_fielddata_memory_size_in_bytes", Name: "used"}, + }, + } + nodeIndicesFieldDataEvictionsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_fielddata_evictions", + Title: "Fielddata Evictions", + Units: "operations/s", + Fam: "indices fielddata", + Ctx: "elasticsearch.node_indices_fielddata_evictions", + Priority: prioNodeIndicesFieldDataEvictions, + Dims: module.Dims{ + {ID: "node_%s_indices_fielddata_evictions", Name: "evictions", Algo: module.Incremental}, + }, + } + + nodeIndicesSegmentsCountChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_segments_count", + Title: "Segments Count", + Units: "segments", + Fam: "indices segments", + Ctx: "elasticsearch.node_indices_segments_count", + Priority: prioNodeIndicesSegmentsCount, + Dims: module.Dims{ + {ID: "node_%s_indices_segments_count", Name: "segments"}, + }, + } + nodeIndicesSegmentsMemoryUsageTotalChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_segments_memory_usage_total", + Title: "Segments Memory Usage Total", + Units: "bytes", + Fam: "indices segments", + Ctx: "elasticsearch.node_indices_segments_memory_usage_total", + Priority: prioNodeIndicesSegmentsMemoryUsageTotal, + Dims: module.Dims{ + {ID: "node_%s_indices_segments_memory_in_bytes", Name: "used"}, + }, + } + nodeIndicesSegmentsMemoryUsageChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_segments_memory_usage", + Title: "Segments Memory Usage", + Units: "bytes", + Fam: "indices segments", + Ctx: "elasticsearch.node_indices_segments_memory_usage", + Type: module.Stacked, + Priority: prioNodeIndicesSegmentsMemoryUsage, + Dims: module.Dims{ + {ID: "node_%s_indices_segments_terms_memory_in_bytes", Name: "terms"}, + {ID: "node_%s_indices_segments_stored_fields_memory_in_bytes", Name: "stored_fields"}, + {ID: "node_%s_indices_segments_term_vectors_memory_in_bytes", Name: "term_vectors"}, + {ID: "node_%s_indices_segments_norms_memory_in_bytes", Name: "norms"}, + {ID: "node_%s_indices_segments_points_memory_in_bytes", Name: "points"}, + {ID: "node_%s_indices_segments_doc_values_memory_in_bytes", Name: "doc_values"}, + {ID: "node_%s_indices_segments_index_writer_memory_in_bytes", Name: "index_writer"}, + {ID: "node_%s_indices_segments_version_map_memory_in_bytes", Name: "version_map"}, + {ID: "node_%s_indices_segments_fixed_bit_set_memory_in_bytes", Name: "fixed_bit_set"}, + }, + } + + nodeIndicesTransLogOpsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_indices_translog_operations", + Title: "Translog Operations", + Units: "operations", + Fam: "indices translog", + Ctx: "elasticsearch.node_indices_translog_operations", + Type: module.Area, + Priority: prioNodeIndicesTransLogOps, + Dims: module.Dims{ + {ID: "node_%s_indices_translog_operations", Name: "total"}, + {ID: "node_%s_indices_translog_uncommitted_operations", Name: "uncommitted"}, + }, + } + nodeIndexTransLogSizeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_index_translog_size", + Title: "Translog Size", + Units: "bytes", + Fam: "indices translog", + Ctx: "elasticsearch.node_indices_translog_size", + Type: module.Area, + Priority: prioNodeIndexTransLogSize, + Dims: module.Dims{ + {ID: "node_%s_indices_translog_size_in_bytes", Name: "total"}, + {ID: "node_%s_indices_translog_uncommitted_size_in_bytes", Name: "uncommitted"}, + }, + } + + nodeFileDescriptorsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_file_descriptors", + Title: "Process File Descriptors", + Units: "fd", + Fam: "process", + Ctx: "elasticsearch.node_file_descriptors", + Priority: prioNodeFileDescriptors, + Dims: module.Dims{ + {ID: "node_%s_process_open_file_descriptors", Name: "open"}, + }, + } + + nodeJVMMemHeapChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_mem_heap", + Title: "JVM Heap Percentage Currently in Use", + Units: "percentage", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_heap", + Type: module.Area, + Priority: prioNodeJVMMemHeap, + Dims: module.Dims{ + {ID: "node_%s_jvm_mem_heap_used_percent", Name: "inuse"}, + }, + } + nodeJVMMemHeapBytesChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_mem_heap_bytes", + Title: "JVM Heap Commit And Usage", + Units: "bytes", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_heap_bytes", + Type: module.Area, + Priority: prioNodeJVMMemHeapBytes, + Dims: module.Dims{ + {ID: "node_%s_jvm_mem_heap_committed_in_bytes", Name: "committed"}, + {ID: "node_%s_jvm_mem_heap_used_in_bytes", Name: "used"}, + }, + } + nodeJVMBufferPoolsCountChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_buffer_pools_count", + Title: "JVM Buffer Pools Count", + Units: "pools", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_buffer_pools_count", + Priority: prioNodeJVMBufferPoolsCount, + Dims: module.Dims{ + {ID: "node_%s_jvm_buffer_pools_direct_count", Name: "direct"}, + {ID: "node_%s_jvm_buffer_pools_mapped_count", Name: "mapped"}, + }, + } + nodeJVMBufferPoolDirectMemoryChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_buffer_pool_direct_memory", + Title: "JVM Buffer Pool Direct Memory", + Units: "bytes", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_buffer_pool_direct_memory", + Type: module.Area, + Priority: prioNodeJVMBufferPoolDirectMemory, + Dims: module.Dims{ + {ID: "node_%s_jvm_buffer_pools_direct_total_capacity_in_bytes", Name: "total"}, + {ID: "node_%s_jvm_buffer_pools_direct_used_in_bytes", Name: "used"}, + }, + } + nodeJVMBufferPoolMappedMemoryChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_buffer_pool_mapped_memory", + Title: "JVM Buffer Pool Mapped Memory", + Units: "bytes", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_buffer_pool_mapped_memory", + Type: module.Area, + Priority: prioNodeJVMBufferPoolMappedMemory, + Dims: module.Dims{ + {ID: "node_%s_jvm_buffer_pools_mapped_total_capacity_in_bytes", Name: "total"}, + {ID: "node_%s_jvm_buffer_pools_mapped_used_in_bytes", Name: "used"}, + }, + } + nodeJVMGCCountChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_gc_count", + Title: "JVM Garbage Collections", + Units: "gc/s", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_gc_count", + Type: module.Stacked, + Priority: prioNodeJVMGCCount, + Dims: module.Dims{ + {ID: "node_%s_jvm_gc_collectors_young_collection_count", Name: "young", Algo: module.Incremental}, + {ID: "node_%s_jvm_gc_collectors_old_collection_count", Name: "old", Algo: module.Incremental}, + }, + } + nodeJVMGCTimeChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_jvm_gc_time", + Title: "JVM Time Spent On Garbage Collections", + Units: "milliseconds", + Fam: "jvm", + Ctx: "elasticsearch.node_jvm_gc_time", + Type: module.Stacked, + Priority: prioNodeJVMGCTime, + Dims: module.Dims{ + {ID: "node_%s_jvm_gc_collectors_young_collection_time_in_millis", Name: "young", Algo: module.Incremental}, + {ID: "node_%s_jvm_gc_collectors_old_collection_time_in_millis", Name: "old", Algo: module.Incremental}, + }, + } + + nodeThreadPoolQueuedChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_thread_pool_queued", + Title: "Thread Pool Queued Threads Count", + Units: "threads", + Fam: "thread pool", + Ctx: "elasticsearch.node_thread_pool_queued", + Type: module.Stacked, + Priority: prioNodeThreadPoolQueued, + Dims: module.Dims{ + {ID: "node_%s_thread_pool_generic_queue", Name: "generic"}, + {ID: "node_%s_thread_pool_search_queue", Name: "search"}, + {ID: "node_%s_thread_pool_search_throttled_queue", Name: "search_throttled"}, + {ID: "node_%s_thread_pool_get_queue", Name: "get"}, + {ID: "node_%s_thread_pool_analyze_queue", Name: "analyze"}, + {ID: "node_%s_thread_pool_write_queue", Name: "write"}, + {ID: "node_%s_thread_pool_snapshot_queue", Name: "snapshot"}, + {ID: "node_%s_thread_pool_warmer_queue", Name: "warmer"}, + {ID: "node_%s_thread_pool_refresh_queue", Name: "refresh"}, + {ID: "node_%s_thread_pool_listener_queue", Name: "listener"}, + {ID: "node_%s_thread_pool_fetch_shard_started_queue", Name: "fetch_shard_started"}, + {ID: "node_%s_thread_pool_fetch_shard_store_queue", Name: "fetch_shard_store"}, + {ID: "node_%s_thread_pool_flush_queue", Name: "flush"}, + {ID: "node_%s_thread_pool_force_merge_queue", Name: "force_merge"}, + {ID: "node_%s_thread_pool_management_queue", Name: "management"}, + }, + } + nodeThreadPoolRejectedChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_thread_pool_rejected", + Title: "Thread Pool Rejected Threads Count", + Units: "threads", + Fam: "thread pool", + Ctx: "elasticsearch.node_thread_pool_rejected", + Type: module.Stacked, + Priority: prioNodeThreadPoolRejected, + Dims: module.Dims{ + {ID: "node_%s_thread_pool_generic_rejected", Name: "generic"}, + {ID: "node_%s_thread_pool_search_rejected", Name: "search"}, + {ID: "node_%s_thread_pool_search_throttled_rejected", Name: "search_throttled"}, + {ID: "node_%s_thread_pool_get_rejected", Name: "get"}, + {ID: "node_%s_thread_pool_analyze_rejected", Name: "analyze"}, + {ID: "node_%s_thread_pool_write_rejected", Name: "write"}, + {ID: "node_%s_thread_pool_snapshot_rejected", Name: "snapshot"}, + {ID: "node_%s_thread_pool_warmer_rejected", Name: "warmer"}, + {ID: "node_%s_thread_pool_refresh_rejected", Name: "refresh"}, + {ID: "node_%s_thread_pool_listener_rejected", Name: "listener"}, + {ID: "node_%s_thread_pool_fetch_shard_started_rejected", Name: "fetch_shard_started"}, + {ID: "node_%s_thread_pool_fetch_shard_store_rejected", Name: "fetch_shard_store"}, + {ID: "node_%s_thread_pool_flush_rejected", Name: "flush"}, + {ID: "node_%s_thread_pool_force_merge_rejected", Name: "force_merge"}, + {ID: "node_%s_thread_pool_management_rejected", Name: "management"}, + }, + } + + nodeClusterCommunicationPacketsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_cluster_communication_packets", + Title: "Node Cluster Communication", + Units: "pps", + Fam: "transport", + Ctx: "elasticsearch.node_cluster_communication_packets", + Priority: prioNodeClusterCommunicationPackets, + Dims: module.Dims{ + {ID: "node_%s_transport_rx_count", Name: "received", Algo: module.Incremental}, + {ID: "node_%s_transport_tx_count", Name: "sent", Mul: -1, Algo: module.Incremental}, + }, + } + nodeClusterCommunicationChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_cluster_communication_traffic", + Title: "Cluster Communication Bandwidth", + Units: "bytes/s", + Fam: "transport", + Ctx: "elasticsearch.node_cluster_communication_traffic", + Priority: prioNodeClusterCommunication, + Dims: module.Dims{ + {ID: "node_%s_transport_rx_size_in_bytes", Name: "received", Algo: module.Incremental}, + {ID: "node_%s_transport_tx_size_in_bytes", Name: "sent", Mul: -1, Algo: module.Incremental}, + }, + } + + nodeHTTPConnectionsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_http_connections", + Title: "HTTP Connections", + Units: "connections", + Fam: "http", + Ctx: "elasticsearch.node_http_connections", + Priority: prioNodeHTTPConnections, + Dims: module.Dims{ + {ID: "node_%s_http_current_open", Name: "open"}, + }, + } + + nodeBreakersTripsChartTmpl = module.Chart{ + ID: "node_%s_cluster_%s_breakers_trips", + Title: "Circuit Breaker Trips Count", + Units: "trips/s", + Fam: "circuit breakers", + Ctx: "elasticsearch.node_breakers_trips", + Type: module.Stacked, + Priority: prioNodeBreakersTrips, + Dims: module.Dims{ + {ID: "node_%s_breakers_request_tripped", Name: "requests", Algo: module.Incremental}, + {ID: "node_%s_breakers_fielddata_tripped", Name: "fielddata", Algo: module.Incremental}, + {ID: "node_%s_breakers_in_flight_requests_tripped", Name: "in_flight_requests", Algo: module.Incremental}, + {ID: "node_%s_breakers_model_inference_tripped", Name: "model_inference", Algo: module.Incremental}, + {ID: "node_%s_breakers_accounting_tripped", Name: "accounting", Algo: module.Incremental}, + {ID: "node_%s_breakers_parent_tripped", Name: "parent", Algo: module.Incremental}, + }, + } +) + +var clusterHealthChartsTmpl = module.Charts{ + clusterStatusChartTmpl.Copy(), + clusterNodesCountChartTmpl.Copy(), + clusterShardsCountChartTmpl.Copy(), + clusterPendingTasksChartTmpl.Copy(), + clusterInFlightFetchesCountChartTmpl.Copy(), +} + +var ( + clusterStatusChartTmpl = module.Chart{ + ID: "cluster_%s_status", + Title: "Cluster Status", + Units: "status", + Fam: "cluster health", + Ctx: "elasticsearch.cluster_health_status", + Priority: prioClusterStatus, + Dims: module.Dims{ + {ID: "cluster_status_green", Name: "green"}, + {ID: "cluster_status_red", Name: "red"}, + {ID: "cluster_status_yellow", Name: "yellow"}, + }, + } + clusterNodesCountChartTmpl = module.Chart{ + ID: "cluster_%s_number_of_nodes", + Title: "Cluster Nodes Count", + Units: "nodes", + Fam: "cluster health", + Ctx: "elasticsearch.cluster_number_of_nodes", + Priority: prioClusterNodesCount, + Dims: module.Dims{ + {ID: "cluster_number_of_nodes", Name: "nodes"}, + {ID: "cluster_number_of_data_nodes", Name: "data_nodes"}, + }, + } + clusterShardsCountChartTmpl = module.Chart{ + ID: "cluster_%s_shards_count", + Title: "Cluster Shards Count", + Units: "shards", + Fam: "cluster health", + Ctx: "elasticsearch.cluster_shards_count", + Priority: prioClusterShardsCount, + Dims: module.Dims{ + {ID: "cluster_active_primary_shards", Name: "active_primary"}, + {ID: "cluster_active_shards", Name: "active"}, + {ID: "cluster_relocating_shards", Name: "relocating"}, + {ID: "cluster_initializing_shards", Name: "initializing"}, + {ID: "cluster_unassigned_shards", Name: "unassigned"}, + {ID: "cluster_delayed_unassigned_shards", Name: "delayed_unassigned"}, + }, + } + clusterPendingTasksChartTmpl = module.Chart{ + ID: "cluster_%s_pending_tasks", + Title: "Cluster Pending Tasks", + Units: "tasks", + Fam: "cluster health", + Ctx: "elasticsearch.cluster_pending_tasks", + Priority: prioClusterPendingTasks, + Dims: module.Dims{ + {ID: "cluster_number_of_pending_tasks", Name: "pending"}, + }, + } + clusterInFlightFetchesCountChartTmpl = module.Chart{ + ID: "cluster_%s_number_of_in_flight_fetch", + Title: "Cluster Unfinished Fetches", + Units: "fetches", + Fam: "cluster health", + Ctx: "elasticsearch.cluster_number_of_in_flight_fetch", + Priority: prioClusterInFlightFetchesCount, + Dims: module.Dims{ + {ID: "cluster_number_of_in_flight_fetch", Name: "in_flight_fetch"}, + }, + } +) + +var clusterStatsChartsTmpl = module.Charts{ + clusterIndicesCountChartTmpl.Copy(), + clusterIndicesShardsCountChartTmpl.Copy(), + clusterIndicesDocsCountChartTmpl.Copy(), + clusterIndicesStoreSizeChartTmpl.Copy(), + clusterIndicesQueryCacheChartTmpl.Copy(), + clusterNodesByRoleCountChartTmpl.Copy(), +} + +var ( + clusterIndicesCountChartTmpl = module.Chart{ + ID: "cluster_%s_indices_count", + Title: "Cluster Indices Count", + Units: "indices", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_indices_count", + Priority: prioClusterIndicesCount, + Dims: module.Dims{ + {ID: "cluster_indices_count", Name: "indices"}, + }, + } + clusterIndicesShardsCountChartTmpl = module.Chart{ + ID: "cluster_%s_indices_shards_count", + Title: "Cluster Indices Shards Count", + Units: "shards", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_indices_shards_count", + Priority: prioClusterIndicesShardsCount, + Dims: module.Dims{ + {ID: "cluster_indices_shards_total", Name: "total"}, + {ID: "cluster_indices_shards_primaries", Name: "primaries"}, + {ID: "cluster_indices_shards_replication", Name: "replication"}, + }, + } + clusterIndicesDocsCountChartTmpl = module.Chart{ + ID: "cluster_%s_indices_docs_count", + Title: "Cluster Indices Docs Count", + Units: "docs", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_indices_docs_count", + Priority: prioClusterIndicesDocsCount, + Dims: module.Dims{ + {ID: "cluster_indices_docs_count", Name: "docs"}, + }, + } + clusterIndicesStoreSizeChartTmpl = module.Chart{ + ID: "cluster_%s_indices_store_size", + Title: "Cluster Indices Store Size", + Units: "bytes", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_indices_store_size", + Priority: prioClusterIndicesStoreSize, + Dims: module.Dims{ + {ID: "cluster_indices_store_size_in_bytes", Name: "size"}, + }, + } + clusterIndicesQueryCacheChartTmpl = module.Chart{ + ID: "cluster_%s_indices_query_cache", + Title: "Cluster Indices Query Cache", + Units: "events/s", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_indices_query_cache", + Type: module.Stacked, + Priority: prioClusterIndicesQueryCache, + Dims: module.Dims{ + {ID: "cluster_indices_query_cache_hit_count", Name: "hit", Algo: module.Incremental}, + {ID: "cluster_indices_query_cache_miss_count", Name: "miss", Algo: module.Incremental}, + }, + } + clusterNodesByRoleCountChartTmpl = module.Chart{ + ID: "cluster_%s_nodes_by_role_count", + Title: "Cluster Nodes By Role Count", + Units: "nodes", + Fam: "cluster stats", + Ctx: "elasticsearch.cluster_nodes_by_role_count", + Priority: prioClusterNodesByRoleCount, + Dims: module.Dims{ + {ID: "cluster_nodes_count_coordinating_only", Name: "coordinating_only"}, + {ID: "cluster_nodes_count_data", Name: "data"}, + {ID: "cluster_nodes_count_data_cold", Name: "data_cold"}, + {ID: "cluster_nodes_count_data_content", Name: "data_content"}, + {ID: "cluster_nodes_count_data_frozen", Name: "data_frozen"}, + {ID: "cluster_nodes_count_data_hot", Name: "data_hot"}, + {ID: "cluster_nodes_count_data_warm", Name: "data_warm"}, + {ID: "cluster_nodes_count_ingest", Name: "ingest"}, + {ID: "cluster_nodes_count_master", Name: "master"}, + {ID: "cluster_nodes_count_ml", Name: "ml"}, + {ID: "cluster_nodes_count_remote_cluster_client", Name: "remote_cluster_client"}, + {ID: "cluster_nodes_count_voting_only", Name: "voting_only"}, + }, + } +) + +var nodeIndexChartsTmpl = module.Charts{ + nodeIndexHealthChartTmpl.Copy(), + nodeIndexShardsCountChartTmpl.Copy(), + nodeIndexDocsCountChartTmpl.Copy(), + nodeIndexStoreSizeChartTmpl.Copy(), +} + +var ( + nodeIndexHealthChartTmpl = module.Chart{ + ID: "node_index_%s_cluster_%s_health", + Title: "Index Health", + Units: "status", + Fam: "index stats", + Ctx: "elasticsearch.node_index_health", + Priority: prioNodeIndexHealth, + Dims: module.Dims{ + {ID: "node_index_%s_stats_health_green", Name: "green"}, + {ID: "node_index_%s_stats_health_red", Name: "red"}, + {ID: "node_index_%s_stats_health_yellow", Name: "yellow"}, + }, + } + nodeIndexShardsCountChartTmpl = module.Chart{ + ID: "node_index_%s_cluster_%s_shards_count", + Title: "Index Shards Count", + Units: "shards", + Fam: "index stats", + Ctx: "elasticsearch.node_index_shards_count", + Priority: prioNodeIndexShardsCount, + Dims: module.Dims{ + {ID: "node_index_%s_stats_shards_count", Name: "shards"}, + }, + } + nodeIndexDocsCountChartTmpl = module.Chart{ + ID: "node_index_%s_cluster_%s_docs_count", + Title: "Index Docs Count", + Units: "docs", + Fam: "index stats", + Ctx: "elasticsearch.node_index_docs_count", + Priority: prioNodeIndexDocsCount, + Dims: module.Dims{ + {ID: "node_index_%s_stats_docs_count", Name: "docs"}, + }, + } + nodeIndexStoreSizeChartTmpl = module.Chart{ + ID: "node_index_%s_cluster_%s_store_size", + Title: "Index Store Size", + Units: "bytes", + Fam: "index stats", + Ctx: "elasticsearch.node_index_store_size", + Priority: prioNodeIndexStoreSize, + Dims: module.Dims{ + {ID: "node_index_%s_stats_store_size_in_bytes", Name: "store_size"}, + }, + } +) + +func (es *Elasticsearch) addClusterStatsCharts() { + charts := clusterStatsChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, es.clusterName) + chart.Labels = []module.Label{ + {Key: "cluster_name", Value: es.clusterName}, + } + } + + if err := es.charts.Add(*charts...); err != nil { + es.Warning(err) + } +} + +func (es *Elasticsearch) addClusterHealthCharts() { + charts := clusterHealthChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, es.clusterName) + chart.Labels = []module.Label{ + {Key: "cluster_name", Value: es.clusterName}, + } + } + + if err := es.charts.Add(*charts...); err != nil { + es.Warning(err) + } +} + +func (es *Elasticsearch) addNodeCharts(nodeID string, node *esNodeStats) { + charts := nodeChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, nodeID, es.clusterName) + chart.Labels = []module.Label{ + {Key: "cluster_name", Value: es.clusterName}, + {Key: "node_name", Value: node.Name}, + {Key: "host", Value: node.Host}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, nodeID) + } + } + + if err := es.Charts().Add(*charts...); err != nil { + es.Warning(err) + } +} + +func (es *Elasticsearch) removeNodeCharts(nodeID string) { + px := fmt.Sprintf("node_%s_cluster_%s_", nodeID, es.clusterName) + es.removeCharts(px) +} + +func (es *Elasticsearch) addIndexCharts(index string) { + charts := nodeIndexChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, index, es.clusterName) + chart.Labels = []module.Label{ + {Key: "cluster_name", Value: es.clusterName}, + {Key: "index", Value: index}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, index) + } + } + + if err := es.Charts().Add(*charts...); err != nil { + es.Warning(err) + } +} + +func (es *Elasticsearch) removeIndexCharts(index string) { + px := fmt.Sprintf("node_index_%s_cluster_%s_", index, es.clusterName) + es.removeCharts(px) +} + +func (es *Elasticsearch) removeCharts(prefix string) { + for _, chart := range *es.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/collect.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/collect.go new file mode 100644 index 00000000000000..f39c4cf560d9b4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/collect.go @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathLocalNodeStats = "/_nodes/_local/stats" + urlPathNodesStats = "/_nodes/stats" + urlPathIndicesStats = "/_cat/indices" + urlPathClusterHealth = "/_cluster/health" + urlPathClusterStats = "/_cluster/stats" +) + +func (es *Elasticsearch) collect() (map[string]int64, error) { + if es.clusterName == "" { + name, err := es.getClusterName() + if err != nil { + return nil, err + } + es.clusterName = name + } + + ms := es.scrapeElasticsearch() + if ms.empty() { + return nil, nil + } + + mx := make(map[string]int64) + + es.collectNodesStats(mx, ms) + es.collectClusterHealth(mx, ms) + es.collectClusterStats(mx, ms) + es.collectLocalIndicesStats(mx, ms) + + return mx, nil +} + +func (es *Elasticsearch) collectNodesStats(mx map[string]int64, ms *esMetrics) { + if !ms.hasNodesStats() { + return + } + + seen := make(map[string]bool) + + for nodeID, node := range ms.NodesStats.Nodes { + seen[nodeID] = true + + if !es.nodes[nodeID] { + es.nodes[nodeID] = true + es.addNodeCharts(nodeID, node) + } + + merge(mx, stm.ToMap(node), "node_"+nodeID) + } + + for nodeID := range es.nodes { + if !seen[nodeID] { + delete(es.nodes, nodeID) + es.removeNodeCharts(nodeID) + } + } +} + +func (es *Elasticsearch) collectClusterHealth(mx map[string]int64, ms *esMetrics) { + if !ms.hasClusterHealth() { + return + } + + es.addClusterHealthChartsOnce.Do(es.addClusterHealthCharts) + + merge(mx, stm.ToMap(ms.ClusterHealth), "cluster") + + mx["cluster_status_green"] = boolToInt(ms.ClusterHealth.Status == "green") + mx["cluster_status_yellow"] = boolToInt(ms.ClusterHealth.Status == "yellow") + mx["cluster_status_red"] = boolToInt(ms.ClusterHealth.Status == "red") +} + +func (es *Elasticsearch) collectClusterStats(mx map[string]int64, ms *esMetrics) { + if !ms.hasClusterStats() { + return + } + + es.addClusterStatsChartsOnce.Do(es.addClusterStatsCharts) + + merge(mx, stm.ToMap(ms.ClusterStats), "cluster") +} + +func (es *Elasticsearch) collectLocalIndicesStats(mx map[string]int64, ms *esMetrics) { + if !ms.hasLocalIndicesStats() { + return + } + + seen := make(map[string]bool) + + for _, v := range ms.LocalIndicesStats { + seen[v.Index] = true + + if !es.indices[v.Index] { + es.indices[v.Index] = true + es.addIndexCharts(v.Index) + } + + px := fmt.Sprintf("node_index_%s_stats_", v.Index) + + mx[px+"health_green"] = boolToInt(v.Health == "green") + mx[px+"health_yellow"] = boolToInt(v.Health == "yellow") + mx[px+"health_red"] = boolToInt(v.Health == "red") + mx[px+"shards_count"] = strToInt(v.Rep) + mx[px+"docs_count"] = strToInt(v.DocsCount) + mx[px+"store_size_in_bytes"] = convertIndexStoreSizeToBytes(v.StoreSize) + } + + for index := range es.indices { + if !seen[index] { + delete(es.indices, index) + es.removeIndexCharts(index) + } + } +} + +func (es *Elasticsearch) scrapeElasticsearch() *esMetrics { + ms := &esMetrics{} + wg := &sync.WaitGroup{} + + if es.DoNodeStats { + wg.Add(1) + go func() { defer wg.Done(); es.scrapeNodesStats(ms) }() + } + if es.DoClusterHealth { + wg.Add(1) + go func() { defer wg.Done(); es.scrapeClusterHealth(ms) }() + } + if es.DoClusterStats { + wg.Add(1) + go func() { defer wg.Done(); es.scrapeClusterStats(ms) }() + } + if !es.ClusterMode && es.DoIndicesStats { + wg.Add(1) + go func() { defer wg.Done(); es.scrapeLocalIndicesStats(ms) }() + } + wg.Wait() + + return ms +} + +func (es *Elasticsearch) scrapeNodesStats(ms *esMetrics) { + req, _ := web.NewHTTPRequest(es.Request) + if es.ClusterMode { + req.URL.Path = urlPathNodesStats + } else { + req.URL.Path = urlPathLocalNodeStats + } + + var stats esNodesStats + if err := es.doOKDecode(req, &stats); err != nil { + es.Warning(err) + return + } + + ms.NodesStats = &stats +} + +func (es *Elasticsearch) scrapeClusterHealth(ms *esMetrics) { + req, _ := web.NewHTTPRequest(es.Request) + req.URL.Path = urlPathClusterHealth + + var health esClusterHealth + if err := es.doOKDecode(req, &health); err != nil { + es.Warning(err) + return + } + + ms.ClusterHealth = &health +} + +func (es *Elasticsearch) scrapeClusterStats(ms *esMetrics) { + req, _ := web.NewHTTPRequest(es.Request) + req.URL.Path = urlPathClusterStats + + var stats esClusterStats + if err := es.doOKDecode(req, &stats); err != nil { + es.Warning(err) + return + } + + ms.ClusterStats = &stats +} + +func (es *Elasticsearch) scrapeLocalIndicesStats(ms *esMetrics) { + req, _ := web.NewHTTPRequest(es.Request) + req.URL.Path = urlPathIndicesStats + req.URL.RawQuery = "local=true&format=json" + + var stats []esIndexStats + if err := es.doOKDecode(req, &stats); err != nil { + es.Warning(err) + return + } + + ms.LocalIndicesStats = removeSystemIndices(stats) +} + +func (es *Elasticsearch) getClusterName() (string, error) { + req, _ := web.NewHTTPRequest(es.Request) + + var info struct { + ClusterName string `json:"cluster_name"` + } + + if err := es.doOKDecode(req, &info); err != nil { + return "", err + } + + if info.ClusterName == "" { + return "", errors.New("empty cluster name") + } + + return info.ClusterName, nil +} + +func (es *Elasticsearch) doOKDecode(req *http.Request, in interface{}) error { + resp, err := es.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func convertIndexStoreSizeToBytes(size string) int64 { + var num float64 + switch { + case strings.HasSuffix(size, "kb"): + num, _ = strconv.ParseFloat(size[:len(size)-2], 64) + num *= math.Pow(1024, 1) + case strings.HasSuffix(size, "mb"): + num, _ = strconv.ParseFloat(size[:len(size)-2], 64) + num *= math.Pow(1024, 2) + case strings.HasSuffix(size, "gb"): + num, _ = strconv.ParseFloat(size[:len(size)-2], 64) + num *= math.Pow(1024, 3) + case strings.HasSuffix(size, "tb"): + num, _ = strconv.ParseFloat(size[:len(size)-2], 64) + num *= math.Pow(1024, 4) + case strings.HasSuffix(size, "b"): + num, _ = strconv.ParseFloat(size[:len(size)-1], 64) + } + return int64(num) +} + +func strToInt(s string) int64 { + v, _ := strconv.Atoi(s) + return int64(v) +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +func removeSystemIndices(indices []esIndexStats) []esIndexStats { + var i int + for _, index := range indices { + if strings.HasPrefix(index.Index, ".") { + continue + } + indices[i] = index + i++ + } + return indices[:i] +} + +func merge(dst, src map[string]int64, prefix string) { + for k, v := range src { + dst[prefix+"_"+k] = v + } +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/config_schema.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/config_schema.json new file mode 100644 index 00000000000000..f69eb6e43bd2d8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/config_schema.json @@ -0,0 +1,74 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/elasticsearch job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "cluster_mode": { + "type": "boolean" + }, + "collect_node_stats": { + "type": "boolean" + }, + "collect_cluster_health": { + "type": "boolean" + }, + "collect_cluster_stats": { + "type": "boolean" + }, + "collect_indices_stats": { + "type": "boolean" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch.go new file mode 100644 index 00000000000000..4b29a6cc85801a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +import ( + _ "embed" + "net/http" + "sync" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("elasticsearch", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Elasticsearch { + return &Elasticsearch{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9200", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + ClusterMode: false, + + DoNodeStats: true, + DoClusterStats: true, + DoClusterHealth: true, + DoIndicesStats: false, + }, + + charts: &module.Charts{}, + addClusterHealthChartsOnce: &sync.Once{}, + addClusterStatsChartsOnce: &sync.Once{}, + nodes: make(map[string]bool), + indices: make(map[string]bool), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + ClusterMode bool `yaml:"cluster_mode"` + DoNodeStats bool `yaml:"collect_node_stats"` + DoClusterHealth bool `yaml:"collect_cluster_health"` + DoClusterStats bool `yaml:"collect_cluster_stats"` + DoIndicesStats bool `yaml:"collect_indices_stats"` +} + +type Elasticsearch struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts + + clusterName string + + addClusterHealthChartsOnce *sync.Once + addClusterStatsChartsOnce *sync.Once + + nodes map[string]bool + indices map[string]bool +} + +func (es *Elasticsearch) Init() bool { + err := es.validateConfig() + if err != nil { + es.Errorf("check configuration: %v", err) + return false + } + + httpClient, err := es.initHTTPClient() + if err != nil { + es.Errorf("init HTTP client: %v", err) + return false + } + es.httpClient = httpClient + + return true +} + +func (es *Elasticsearch) Check() bool { + return len(es.Collect()) > 0 +} + +func (es *Elasticsearch) Charts() *module.Charts { + return es.charts +} + +func (es *Elasticsearch) Collect() map[string]int64 { + mx, err := es.collect() + if err != nil { + es.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (es *Elasticsearch) Cleanup() { + if es.httpClient != nil { + es.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch_test.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch_test.go new file mode 100644 index 00000000000000..d4f1628cdd903f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/elasticsearch_test.go @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v842NodesLocalStats, _ = os.ReadFile("testdata/v8.4.2/nodes_local_stats.json") + v842NodesStats, _ = os.ReadFile("testdata/v8.4.2/nodes_stats.json") + v842ClusterHealth, _ = os.ReadFile("testdata/v8.4.2/cluster_health.json") + v842ClusterStats, _ = os.ReadFile("testdata/v8.4.2/cluster_stats.json") + v842CatIndicesStats, _ = os.ReadFile("testdata/v8.4.2/cat_indices_stats.json") + v842Info, _ = os.ReadFile("testdata/v8.4.2/info.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v842NodesLocalStats": v842NodesLocalStats, + "v842NodesStats": v842NodesStats, + "v842ClusterHealth": v842ClusterHealth, + "v842ClusterStats": v842ClusterStats, + "v842CatIndicesStats": v842CatIndicesStats, + "v842Info": v842Info, + } { + require.NotNilf(t, data, name) + } +} + +func TestElasticsearch_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default": { + config: New().Config, + }, + "all stats": { + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + DoNodeStats: true, + DoClusterHealth: true, + DoClusterStats: true, + DoIndicesStats: true, + }, + }, + "only node_stats": { + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + DoNodeStats: true, + DoClusterHealth: false, + DoClusterStats: false, + DoIndicesStats: false, + }, + }, + "URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }}, + }, + "invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }}, + }, + "all API calls are disabled": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + DoNodeStats: false, + DoClusterHealth: false, + DoClusterStats: false, + DoIndicesStats: false, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + es := New() + es.Config = test.config + + if test.wantFail { + assert.False(t, es.Init()) + } else { + assert.True(t, es.Init()) + } + }) + } +} + +func TestElasticsearch_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (es *Elasticsearch, cleanup func()) + wantFail bool + }{ + "valid data": {prepare: prepareElasticsearchValidData}, + "invalid data": {prepare: prepareElasticsearchInvalidData, wantFail: true}, + "404": {prepare: prepareElasticsearch404, wantFail: true}, + "connection refused": {prepare: prepareElasticsearchConnectionRefused, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + es, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, es.Check()) + } else { + assert.True(t, es.Check()) + } + }) + } +} + +func TestElasticsearch_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestElasticsearch_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestElasticsearch_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *Elasticsearch + wantCollected map[string]int64 + wantCharts int + }{ + "v842: all nodes stats": { + prepare: func() *Elasticsearch { + es := New() + es.ClusterMode = true + es.DoNodeStats = true + es.DoClusterHealth = false + es.DoClusterStats = false + es.DoIndicesStats = false + return es + }, + wantCharts: len(nodeChartsTmpl) * 3, + wantCollected: map[string]int64{ + "node_Klg1CjgMTouentQcJlRGuA_breakers_accounting_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_fielddata_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_in_flight_requests_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_model_inference_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_parent_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_request_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_http_current_open": 75, + "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_evictions": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_memory_size_in_bytes": 600, + "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total": 35130, + "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total_time_in_millis": 22204637, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_current": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_time_in_millis": 1100012973, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_total": 3667364815, + "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total": 7720800, + "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total_time_in_millis": 94297737, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_current": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_time_in_millis": 21316723, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_total": 42642621, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_current": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_time_in_millis": 51262303, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_total": 166820275, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_count": 320, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_doc_values_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_fixed_bit_set_memory_in_bytes": 1904, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_index_writer_memory_in_bytes": 262022568, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_norms_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_points_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_stored_fields_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_term_vectors_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_terms_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_version_map_memory_in_bytes": 49200018, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_operations": 352376, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_size_in_bytes": 447695989, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_operations": 352376, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_size_in_bytes": 447695989, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_count": 94, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_total_capacity_in_bytes": 4654848, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_used_in_bytes": 4654850, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_count": 858, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_total_capacity_in_bytes": 103114998135, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_used_in_bytes": 103114998135, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_count": 0, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_time_in_millis": 0, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_count": 78652, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_time_in_millis": 6014274, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_committed_in_bytes": 7864320000, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_in_bytes": 5059735552, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_percent": 64, + "node_Klg1CjgMTouentQcJlRGuA_process_max_file_descriptors": 1048576, + "node_Klg1CjgMTouentQcJlRGuA_process_open_file_descriptors": 1156, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_transport_rx_count": 1300324276, + "node_Klg1CjgMTouentQcJlRGuA_transport_rx_size_in_bytes": 1789333458217, + "node_Klg1CjgMTouentQcJlRGuA_transport_tx_count": 1300324275, + "node_Klg1CjgMTouentQcJlRGuA_transport_tx_size_in_bytes": 2927487680282, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_accounting_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_fielddata_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_in_flight_requests_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_model_inference_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_parent_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_breakers_request_tripped": 0, + "node_k_AifYMWQTykjUq3pgE_-w_http_current_open": 14, + "node_k_AifYMWQTykjUq3pgE_-w_indices_fielddata_evictions": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_fielddata_memory_size_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_flush_total": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_flush_total_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_current": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_total": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_refresh_total": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_refresh_total_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_current": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_total": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_current": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_total": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_count": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_doc_values_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_fixed_bit_set_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_index_writer_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_norms_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_points_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_stored_fields_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_term_vectors_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_terms_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_version_map_memory_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_operations": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_size_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_uncommitted_operations": 0, + "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_uncommitted_size_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_count": 19, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_total_capacity_in_bytes": 2142214, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_used_in_bytes": 2142216, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_count": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_total_capacity_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_used_in_bytes": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_old_collection_count": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_old_collection_time_in_millis": 0, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_young_collection_count": 342994, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_young_collection_time_in_millis": 768917, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_committed_in_bytes": 281018368, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_used_in_bytes": 178362704, + "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_used_percent": 63, + "node_k_AifYMWQTykjUq3pgE_-w_process_max_file_descriptors": 1048576, + "node_k_AifYMWQTykjUq3pgE_-w_process_open_file_descriptors": 557, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_analyze_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_analyze_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_started_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_started_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_store_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_store_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_flush_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_flush_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_force_merge_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_force_merge_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_generic_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_generic_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_get_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_get_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_listener_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_listener_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_management_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_management_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_refresh_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_refresh_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_throttled_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_throttled_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_snapshot_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_snapshot_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_warmer_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_warmer_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_write_queue": 0, + "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_write_rejected": 0, + "node_k_AifYMWQTykjUq3pgE_-w_transport_rx_count": 107632996, + "node_k_AifYMWQTykjUq3pgE_-w_transport_rx_size_in_bytes": 180620082152, + "node_k_AifYMWQTykjUq3pgE_-w_transport_tx_count": 107633007, + "node_k_AifYMWQTykjUq3pgE_-w_transport_tx_size_in_bytes": 420999501235, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_accounting_tripped": 0, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_fielddata_tripped": 0, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_in_flight_requests_tripped": 0, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_model_inference_tripped": 0, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_parent_tripped": 93, + "node_tk_U7GMCRkCG4FoOvusrng_breakers_request_tripped": 1, + "node_tk_U7GMCRkCG4FoOvusrng_http_current_open": 84, + "node_tk_U7GMCRkCG4FoOvusrng_indices_fielddata_evictions": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_fielddata_memory_size_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_flush_total": 67895, + "node_tk_U7GMCRkCG4FoOvusrng_indices_flush_total_time_in_millis": 81917283, + "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_current": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_time_in_millis": 1244633519, + "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_total": 6550378755, + "node_tk_U7GMCRkCG4FoOvusrng_indices_refresh_total": 12359783, + "node_tk_U7GMCRkCG4FoOvusrng_indices_refresh_total_time_in_millis": 300152615, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_current": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_time_in_millis": 24517851, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_total": 25105951, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_current": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_time_in_millis": 158980385, + "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_total": 157912598, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_count": 291, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_doc_values_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_fixed_bit_set_memory_in_bytes": 55672, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_index_writer_memory_in_bytes": 57432664, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_norms_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_points_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_stored_fields_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_term_vectors_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_terms_memory_in_bytes": 0, + "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_version_map_memory_in_bytes": 568, + "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_operations": 1449698, + "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_size_in_bytes": 1214204014, + "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_uncommitted_operations": 1449698, + "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_uncommitted_size_in_bytes": 1214204014, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_count": 90, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_total_capacity_in_bytes": 4571711, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_used_in_bytes": 4571713, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_count": 831, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_total_capacity_in_bytes": 99844219805, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_used_in_bytes": 99844219805, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_old_collection_count": 1, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_old_collection_time_in_millis": 796, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_young_collection_count": 139959, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_young_collection_time_in_millis": 3581668, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_committed_in_bytes": 7864320000, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_used_in_bytes": 1884124192, + "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_used_percent": 23, + "node_tk_U7GMCRkCG4FoOvusrng_process_max_file_descriptors": 1048576, + "node_tk_U7GMCRkCG4FoOvusrng_process_open_file_descriptors": 1180, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_analyze_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_analyze_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_started_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_started_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_store_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_store_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_flush_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_flush_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_force_merge_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_force_merge_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_generic_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_generic_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_get_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_get_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_listener_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_listener_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_management_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_management_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_refresh_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_refresh_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_throttled_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_throttled_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_snapshot_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_snapshot_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_warmer_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_warmer_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_write_queue": 0, + "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_write_rejected": 0, + "node_tk_U7GMCRkCG4FoOvusrng_transport_rx_count": 2167879292, + "node_tk_U7GMCRkCG4FoOvusrng_transport_rx_size_in_bytes": 4905919297323, + "node_tk_U7GMCRkCG4FoOvusrng_transport_tx_count": 2167879293, + "node_tk_U7GMCRkCG4FoOvusrng_transport_tx_size_in_bytes": 2964638852652, + }, + }, + "v842: local node stats": { + prepare: func() *Elasticsearch { + es := New() + es.DoNodeStats = true + es.DoClusterHealth = false + es.DoClusterStats = false + es.DoIndicesStats = false + return es + }, + wantCharts: len(nodeChartsTmpl), + wantCollected: map[string]int64{ + "node_Klg1CjgMTouentQcJlRGuA_breakers_accounting_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_fielddata_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_in_flight_requests_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_model_inference_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_parent_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_breakers_request_tripped": 0, + "node_Klg1CjgMTouentQcJlRGuA_http_current_open": 73, + "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_evictions": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_memory_size_in_bytes": 600, + "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total": 35134, + "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total_time_in_millis": 22213090, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_current": 1, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_time_in_millis": 1100149051, + "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_total": 3667793202, + "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total": 7721472, + "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total_time_in_millis": 94304142, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_current": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_time_in_millis": 21316820, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_total": 42645288, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_current": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_time_in_millis": 51265805, + "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_total": 166823028, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_count": 307, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_doc_values_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_fixed_bit_set_memory_in_bytes": 2008, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_index_writer_memory_in_bytes": 240481008, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_norms_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_points_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_stored_fields_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_term_vectors_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_terms_memory_in_bytes": 0, + "node_Klg1CjgMTouentQcJlRGuA_indices_segments_version_map_memory_in_bytes": 44339216, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_operations": 362831, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_size_in_bytes": 453491882, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_operations": 362831, + "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_size_in_bytes": 453491882, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_count": 94, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_total_capacity_in_bytes": 4654848, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_used_in_bytes": 4654850, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_count": 844, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_total_capacity_in_bytes": 103411995802, + "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_used_in_bytes": 103411995802, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_count": 0, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_time_in_millis": 0, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_count": 78661, + "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_time_in_millis": 6014901, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_committed_in_bytes": 7864320000, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_in_bytes": 4337402488, + "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_percent": 55, + "node_Klg1CjgMTouentQcJlRGuA_process_max_file_descriptors": 1048576, + "node_Klg1CjgMTouentQcJlRGuA_process_open_file_descriptors": 1149, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_queue": 0, + "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_rejected": 0, + "node_Klg1CjgMTouentQcJlRGuA_transport_rx_count": 1300468666, + "node_Klg1CjgMTouentQcJlRGuA_transport_rx_size_in_bytes": 1789647854011, + "node_Klg1CjgMTouentQcJlRGuA_transport_tx_count": 1300468665, + "node_Klg1CjgMTouentQcJlRGuA_transport_tx_size_in_bytes": 2927853534431, + }, + }, + "v842: only cluster_health": { + prepare: func() *Elasticsearch { + es := New() + es.DoNodeStats = false + es.DoClusterHealth = true + es.DoClusterStats = false + es.DoIndicesStats = false + return es + }, + wantCharts: len(clusterHealthChartsTmpl), + wantCollected: map[string]int64{ + "cluster_active_primary_shards": 97, + "cluster_active_shards": 194, + "cluster_active_shards_percent_as_number": 100, + "cluster_delayed_unassigned_shards": 0, + "cluster_initializing_shards": 0, + "cluster_number_of_data_nodes": 2, + "cluster_number_of_in_flight_fetch": 0, + "cluster_number_of_nodes": 3, + "cluster_number_of_pending_tasks": 0, + "cluster_relocating_shards": 0, + "cluster_status_green": 1, + "cluster_status_red": 0, + "cluster_status_yellow": 0, + "cluster_unassigned_shards": 0, + }, + }, + "v842: only cluster_stats": { + prepare: func() *Elasticsearch { + es := New() + es.DoNodeStats = false + es.DoClusterHealth = false + es.DoClusterStats = true + es.DoIndicesStats = false + return es + }, + wantCharts: len(clusterStatsChartsTmpl), + wantCollected: map[string]int64{ + "cluster_indices_count": 97, + "cluster_indices_docs_count": 402750703, + "cluster_indices_query_cache_hit_count": 96838726, + "cluster_indices_query_cache_miss_count": 587768226, + "cluster_indices_shards_primaries": 97, + "cluster_indices_shards_replication": 1, + "cluster_indices_shards_total": 194, + "cluster_indices_store_size_in_bytes": 380826136962, + "cluster_nodes_count_coordinating_only": 0, + "cluster_nodes_count_data": 0, + "cluster_nodes_count_data_cold": 0, + "cluster_nodes_count_data_content": 2, + "cluster_nodes_count_data_frozen": 0, + "cluster_nodes_count_data_hot": 2, + "cluster_nodes_count_data_warm": 0, + "cluster_nodes_count_ingest": 2, + "cluster_nodes_count_master": 3, + "cluster_nodes_count_ml": 0, + "cluster_nodes_count_remote_cluster_client": 2, + "cluster_nodes_count_total": 3, + "cluster_nodes_count_transform": 2, + "cluster_nodes_count_voting_only": 1, + }, + }, + "v842: only indices_stats": { + prepare: func() *Elasticsearch { + es := New() + es.DoNodeStats = false + es.DoClusterHealth = false + es.DoClusterStats = false + es.DoIndicesStats = true + return es + }, + wantCharts: len(nodeIndexChartsTmpl) * 3, + wantCollected: map[string]int64{ + "node_index_my-index-000001_stats_docs_count": 1, + "node_index_my-index-000001_stats_health_green": 0, + "node_index_my-index-000001_stats_health_red": 0, + "node_index_my-index-000001_stats_health_yellow": 1, + "node_index_my-index-000001_stats_shards_count": 1, + "node_index_my-index-000001_stats_store_size_in_bytes": 208, + "node_index_my-index-000002_stats_docs_count": 1, + "node_index_my-index-000002_stats_health_green": 0, + "node_index_my-index-000002_stats_health_red": 0, + "node_index_my-index-000002_stats_health_yellow": 1, + "node_index_my-index-000002_stats_shards_count": 1, + "node_index_my-index-000002_stats_store_size_in_bytes": 208, + "node_index_my-index-000003_stats_docs_count": 1, + "node_index_my-index-000003_stats_health_green": 0, + "node_index_my-index-000003_stats_health_red": 0, + "node_index_my-index-000003_stats_health_yellow": 1, + "node_index_my-index-000003_stats_shards_count": 1, + "node_index_my-index-000003_stats_store_size_in_bytes": 208, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + es, cleanup := prepareElasticsearch(t, test.prepare) + defer cleanup() + + var mx map[string]int64 + for i := 0; i < 10; i++ { + mx = es.Collect() + } + + //m := mx + //l := make([]string, 0) + //for k := range m { + // l = append(l, k) + //} + //sort.Strings(l) + //for _, value := range l { + // fmt.Println(fmt.Sprintf("\"%s\": %d,", value, m[value])) + //} + //return + + assert.Equal(t, test.wantCollected, mx) + assert.Len(t, *es.Charts(), test.wantCharts) + ensureCollectedHasAllChartsDimsVarsIDs(t, es, mx) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, es *Elasticsearch, collected map[string]int64) { + for _, chart := range *es.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Elasticsearch, cleanup func()) { + t.Helper() + srv := prepareElasticsearchEndpoint() + + es = createES() + es.URL = srv.URL + require.True(t, es.Init()) + + return es, srv.Close +} + +func prepareElasticsearchValidData(t *testing.T) (es *Elasticsearch, cleanup func()) { + return prepareElasticsearch(t, New) +} + +func prepareElasticsearchInvalidData(t *testing.T) (*Elasticsearch, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + es := New() + es.URL = srv.URL + require.True(t, es.Init()) + + return es, srv.Close +} + +func prepareElasticsearch404(t *testing.T) (*Elasticsearch, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + es := New() + es.URL = srv.URL + require.True(t, es.Init()) + + return es, srv.Close +} + +func prepareElasticsearchConnectionRefused(t *testing.T) (*Elasticsearch, func()) { + t.Helper() + es := New() + es.URL = "http://127.0.0.1:38001" + require.True(t, es.Init()) + + return es, func() {} +} + +func prepareElasticsearchEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathNodesStats: + _, _ = w.Write(v842NodesStats) + case urlPathLocalNodeStats: + _, _ = w.Write(v842NodesLocalStats) + case urlPathClusterHealth: + _, _ = w.Write(v842ClusterHealth) + case urlPathClusterStats: + _, _ = w.Write(v842ClusterStats) + case urlPathIndicesStats: + _, _ = w.Write(v842CatIndicesStats) + case "/": + _, _ = w.Write(v842Info) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/init.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/init.go new file mode 100644 index 00000000000000..764c45e4a8c3f5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/init.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (es *Elasticsearch) validateConfig() error { + if es.URL == "" { + return errors.New("URL not set") + } + if !(es.DoNodeStats || es.DoClusterHealth || es.DoClusterStats || es.DoIndicesStats) { + return errors.New("all API calls are disabled") + } + if _, err := web.NewHTTPRequest(es.Request); err != nil { + return err + } + return nil +} + +func (es *Elasticsearch) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(es.Client) +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/elasticsearch.md b/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/elasticsearch.md new file mode 100644 index 00000000000000..a75c4eac83170c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/elasticsearch.md @@ -0,0 +1,343 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/elasticsearch/integrations/elasticsearch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/elasticsearch/metadata.yaml" +sidebar_label: "Elasticsearch" +learn_status: "Published" +learn_rel_path: "Data Collection/Search Engines" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Elasticsearch + + +<img src="https://netdata.cloud/img/elasticsearch.svg" width="150"/> + + +Plugin: go.d.plugin +Module: elasticsearch + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance and health of the Elasticsearch cluster. + + +It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics. + +Used endpoints: + +| Endpoint | Description | API | +|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------| +| `/` | Node info | | +| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | +| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | +| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) | +| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) | + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by attempting to connect to port 9200: + +- http://127.0.0.1:9200 +- https://127.0.0.1:9200 + + +#### Limits + +By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`. + + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per node + +These metrics refer to the cluster node. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | +| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). | +| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.node_indices_indexing | index | operations/s | +| elasticsearch.node_indices_indexing_current | index | operations | +| elasticsearch.node_indices_indexing_time | index | milliseconds | +| elasticsearch.node_indices_search | queries, fetches | operations/s | +| elasticsearch.node_indices_search_current | queries, fetches | operations | +| elasticsearch.node_indices_search_time | queries, fetches | milliseconds | +| elasticsearch.node_indices_refresh | refresh | operations/s | +| elasticsearch.node_indices_refresh_time | refresh | milliseconds | +| elasticsearch.node_indices_flush | flush | operations/s | +| elasticsearch.node_indices_flush_time | flush | milliseconds | +| elasticsearch.node_indices_fielddata_memory_usage | used | bytes | +| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s | +| elasticsearch.node_indices_segments_count | segments | segments | +| elasticsearch.node_indices_segments_memory_usage_total | used | bytes | +| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes | +| elasticsearch.node_indices_translog_operations | total, uncommitted | operations | +| elasticsearch.node_indices_translog_size | total, uncommitted | bytes | +| elasticsearch.node_file_descriptors | open | fd | +| elasticsearch.node_jvm_heap | inuse | percentage | +| elasticsearch.node_jvm_heap_bytes | committed, used | bytes | +| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools | +| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes | +| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes | +| elasticsearch.node_jvm_gc_count | young, old | gc/s | +| elasticsearch.node_jvm_gc_time | young, old | milliseconds | +| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads | +| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads | +| elasticsearch.node_cluster_communication_packets | received, sent | pps | +| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s | +| elasticsearch.node_http_connections | open | connections | +| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s | + +### Per cluster + +These metrics refer to the cluster. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.cluster_health_status | green, yellow, red | status | +| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes | +| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards | +| elasticsearch.cluster_pending_tasks | pending | tasks | +| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches | +| elasticsearch.cluster_indices_count | indices | indices | +| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards | +| elasticsearch.cluster_indices_docs_count | docs | docs | +| elasticsearch.cluster_indices_store_size | size | bytes | +| elasticsearch.cluster_indices_query_cache | hit, miss | events/s | +| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes | + +### Per index + +These metrics refer to the index. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | +| index | Name of the index. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.node_index_health | green, yellow, red | status | +| elasticsearch.node_index_shards_count | shards | shards | +| elasticsearch.node_index_docs_count | docs | docs | +| elasticsearch.node_index_store_size | store_size | bytes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. | +| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. | +| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. | +| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. | +| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/elasticsearch.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/elasticsearch.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9200 | yes | +| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no | +| collect_node_stats | Controls whether to collect nodes metrics. | true | no | +| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no | +| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no | +| collect_indices_stats | Controls whether to collect indices metrics. | false | no | +| timeout | HTTP request timeout. | 5 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic single node mode + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + +``` +##### Cluster mode + +Cluster mode example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + cluster_mode: yes + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Elasticsearch with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9200 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + + - name: remote + url: http://192.0.2.1:9200 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m elasticsearch + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/opensearch.md b/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/opensearch.md new file mode 100644 index 00000000000000..b19c15e42a65e1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/integrations/opensearch.md @@ -0,0 +1,343 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/elasticsearch/integrations/opensearch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/elasticsearch/metadata.yaml" +sidebar_label: "OpenSearch" +learn_status: "Published" +learn_rel_path: "Data Collection/Search Engines" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenSearch + + +<img src="https://netdata.cloud/img/opensearch.svg" width="150"/> + + +Plugin: go.d.plugin +Module: elasticsearch + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance and health of the Elasticsearch cluster. + + +It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics. + +Used endpoints: + +| Endpoint | Description | API | +|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------| +| `/` | Node info | | +| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | +| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | +| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) | +| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) | + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by attempting to connect to port 9200: + +- http://127.0.0.1:9200 +- https://127.0.0.1:9200 + + +#### Limits + +By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`. + + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per node + +These metrics refer to the cluster node. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | +| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). | +| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.node_indices_indexing | index | operations/s | +| elasticsearch.node_indices_indexing_current | index | operations | +| elasticsearch.node_indices_indexing_time | index | milliseconds | +| elasticsearch.node_indices_search | queries, fetches | operations/s | +| elasticsearch.node_indices_search_current | queries, fetches | operations | +| elasticsearch.node_indices_search_time | queries, fetches | milliseconds | +| elasticsearch.node_indices_refresh | refresh | operations/s | +| elasticsearch.node_indices_refresh_time | refresh | milliseconds | +| elasticsearch.node_indices_flush | flush | operations/s | +| elasticsearch.node_indices_flush_time | flush | milliseconds | +| elasticsearch.node_indices_fielddata_memory_usage | used | bytes | +| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s | +| elasticsearch.node_indices_segments_count | segments | segments | +| elasticsearch.node_indices_segments_memory_usage_total | used | bytes | +| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes | +| elasticsearch.node_indices_translog_operations | total, uncommitted | operations | +| elasticsearch.node_indices_translog_size | total, uncommitted | bytes | +| elasticsearch.node_file_descriptors | open | fd | +| elasticsearch.node_jvm_heap | inuse | percentage | +| elasticsearch.node_jvm_heap_bytes | committed, used | bytes | +| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools | +| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes | +| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes | +| elasticsearch.node_jvm_gc_count | young, old | gc/s | +| elasticsearch.node_jvm_gc_time | young, old | milliseconds | +| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads | +| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads | +| elasticsearch.node_cluster_communication_packets | received, sent | pps | +| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s | +| elasticsearch.node_http_connections | open | connections | +| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s | + +### Per cluster + +These metrics refer to the cluster. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.cluster_health_status | green, yellow, red | status | +| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes | +| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards | +| elasticsearch.cluster_pending_tasks | pending | tasks | +| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches | +| elasticsearch.cluster_indices_count | indices | indices | +| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards | +| elasticsearch.cluster_indices_docs_count | docs | docs | +| elasticsearch.cluster_indices_store_size | size | bytes | +| elasticsearch.cluster_indices_query_cache | hit, miss | events/s | +| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes | + +### Per index + +These metrics refer to the index. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). | +| index | Name of the index. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| elasticsearch.node_index_health | green, yellow, red | status | +| elasticsearch.node_index_shards_count | shards | shards | +| elasticsearch.node_index_docs_count | docs | docs | +| elasticsearch.node_index_store_size | store_size | bytes | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. | +| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. | +| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. | +| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. | +| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/elasticsearch.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/elasticsearch.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9200 | yes | +| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no | +| collect_node_stats | Controls whether to collect nodes metrics. | true | no | +| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no | +| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no | +| collect_indices_stats | Controls whether to collect indices metrics. | false | no | +| timeout | HTTP request timeout. | 5 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic single node mode + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + +``` +##### Cluster mode + +Cluster mode example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + cluster_mode: yes + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Elasticsearch with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9200 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9200 + + - name: remote + url: http://192.0.2.1:9200 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m elasticsearch + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/metadata.yaml b/src/go/collectors/go.d.plugin/modules/elasticsearch/metadata.yaml new file mode 100644 index 00000000000000..f8458e3f1557f4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/metadata.yaml @@ -0,0 +1,634 @@ +plugin_name: go.d.plugin +modules: + - &module + meta: &meta + id: collector-go.d.plugin-elasticsearch + module_name: elasticsearch + plugin_name: go.d.plugin + monitored_instance: + name: Elasticsearch + link: https://www.elastic.co/elasticsearch/ + icon_filename: elasticsearch.svg + categories: + - data-collection.search-engines + keywords: + - elastic + - elasticsearch + - opensearch + - search engine + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + - plugin_name: cgroups.plugin + module_name: cgroups + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors the performance and health of the Elasticsearch cluster. + method_description: | + It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics. + + Used endpoints: + + | Endpoint | Description | API | + |------------------------|----------------------|-------------------------------------------------------------------------------------------------------------| + | `/` | Node info | | + | `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | + | `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) | + | `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) | + | `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) | + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects instances running on localhost by attempting to connect to port 9200: + + - http://127.0.0.1:9200 + - https://127.0.0.1:9200 + limits: + description: | + By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`. + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: "go.d/elasticsearch.conf" + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9200 + required: true + - name: cluster_mode + description: Controls whether to collect metrics for all nodes in the cluster or only for the local node. + default_value: "false" + required: false + - name: collect_node_stats + description: Controls whether to collect nodes metrics. + default_value: "true" + required: false + - name: collect_cluster_health + description: Controls whether to collect cluster health metrics. + default_value: "true" + required: false + - name: collect_cluster_stats + description: Controls whether to collect cluster stats metrics. + default_value: "true" + required: false + - name: collect_indices_stats + description: Controls whether to collect indices metrics. + default_value: "false" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 5 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic single node mode + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1:9200 + - name: Cluster mode + description: Cluster mode example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9200 + cluster_mode: yes + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9200 + username: username + password: password + - name: HTTPS with self-signed certificate + description: Elasticsearch with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1:9200 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9200 + + - name: remote + url: http://192.0.2.1:9200 + troubleshooting: + problems: + list: [] + alerts: + - name: elasticsearch_node_indices_search_time_query + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf + metric: elasticsearch.node_indices_search_time + info: search performance is degraded, queries run slowly. + - name: elasticsearch_node_indices_search_time_fetch + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf + metric: elasticsearch.node_indices_search_time + info: search performance is degraded, fetches run slowly. + - name: elasticsearch_cluster_health_status_red + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf + metric: elasticsearch.cluster_health_status + info: cluster health status is red. + - name: elasticsearch_cluster_health_status_yellow + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf + metric: elasticsearch.cluster_health_status + info: cluster health status is yellow. + - name: elasticsearch_node_index_health_red + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf + metric: elasticsearch.node_index_health + info: node index $label:index health status is red. + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: node + description: These metrics refer to the cluster node. + labels: + - name: cluster_name + description: | + Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + - name: node_name + description: | + Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). + - name: host + description: | + Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). + metrics: + - name: elasticsearch.node_indices_indexing + description: Indexing Operations + unit: operations/s + chart_type: line + dimensions: + - name: index + - name: elasticsearch.node_indices_indexing_current + description: Indexing Operations Current + unit: operations + chart_type: line + dimensions: + - name: index + - name: elasticsearch.node_indices_indexing_time + description: Time Spent On Indexing Operations + unit: milliseconds + chart_type: line + dimensions: + - name: index + - name: elasticsearch.node_indices_search + description: Search Operations + unit: operations/s + chart_type: stacked + dimensions: + - name: queries + - name: fetches + - name: elasticsearch.node_indices_search_current + description: Search Operations Current + unit: operations + chart_type: stacked + dimensions: + - name: queries + - name: fetches + - name: elasticsearch.node_indices_search_time + description: node_indices_search_time + unit: milliseconds + chart_type: stacked + dimensions: + - name: queries + - name: fetches + - name: elasticsearch.node_indices_refresh + description: Refresh Operations + unit: operations/s + chart_type: line + dimensions: + - name: refresh + - name: elasticsearch.node_indices_refresh_time + description: Time Spent On Refresh Operations + unit: milliseconds + chart_type: line + dimensions: + - name: refresh + - name: elasticsearch.node_indices_flush + description: Flush Operations + unit: operations/s + chart_type: line + dimensions: + - name: flush + - name: elasticsearch.node_indices_flush_time + description: Time Spent On Flush Operations + unit: milliseconds + chart_type: line + dimensions: + - name: flush + - name: elasticsearch.node_indices_fielddata_memory_usage + description: Fielddata Cache Memory Usage + unit: bytes + chart_type: area + dimensions: + - name: used + - name: elasticsearch.node_indices_fielddata_evictions + description: Fielddata Evictions + unit: operations/s + chart_type: line + dimensions: + - name: evictions + - name: elasticsearch.node_indices_segments_count + description: Segments Count + unit: segments + chart_type: line + dimensions: + - name: segments + - name: elasticsearch.node_indices_segments_memory_usage_total + description: Segments Memory Usage Total + unit: bytes + chart_type: line + dimensions: + - name: used + - name: elasticsearch.node_indices_segments_memory_usage + description: Segments Memory Usage + unit: bytes + chart_type: stacked + dimensions: + - name: terms + - name: stored_fields + - name: term_vectors + - name: norms + - name: points + - name: doc_values + - name: index_writer + - name: version_map + - name: fixed_bit_set + - name: elasticsearch.node_indices_translog_operations + description: Translog Operations + unit: operations + chart_type: area + dimensions: + - name: total + - name: uncommitted + - name: elasticsearch.node_indices_translog_size + description: Translog Size + unit: bytes + chart_type: area + dimensions: + - name: total + - name: uncommitted + - name: elasticsearch.node_file_descriptors + description: Process File Descriptors + unit: fd + chart_type: line + dimensions: + - name: open + - name: elasticsearch.node_jvm_heap + description: JVM Heap Percentage Currently in Use + unit: percentage + chart_type: area + dimensions: + - name: inuse + - name: elasticsearch.node_jvm_heap_bytes + description: JVM Heap Commit And Usage + unit: bytes + chart_type: area + dimensions: + - name: committed + - name: used + - name: elasticsearch.node_jvm_buffer_pools_count + description: JVM Buffer Pools Count + unit: pools + chart_type: line + dimensions: + - name: direct + - name: mapped + - name: elasticsearch.node_jvm_buffer_pool_direct_memory + description: JVM Buffer Pool Direct Memory + unit: bytes + chart_type: area + dimensions: + - name: total + - name: used + - name: elasticsearch.node_jvm_buffer_pool_mapped_memory + description: JVM Buffer Pool Mapped Memory + unit: bytes + chart_type: area + dimensions: + - name: total + - name: used + - name: elasticsearch.node_jvm_gc_count + description: JVM Garbage Collections + unit: gc/s + chart_type: stacked + dimensions: + - name: young + - name: old + - name: elasticsearch.node_jvm_gc_time + description: JVM Time Spent On Garbage Collections + unit: milliseconds + chart_type: stacked + dimensions: + - name: young + - name: old + - name: elasticsearch.node_thread_pool_queued + description: Thread Pool Queued Threads Count + unit: threads + chart_type: stacked + dimensions: + - name: generic + - name: search + - name: search_throttled + - name: get + - name: analyze + - name: write + - name: snapshot + - name: warmer + - name: refresh + - name: listener + - name: fetch_shard_started + - name: fetch_shard_store + - name: flush + - name: force_merge + - name: management + - name: elasticsearch.node_thread_pool_rejected + description: Thread Pool Rejected Threads Count + unit: threads + chart_type: stacked + dimensions: + - name: generic + - name: search + - name: search_throttled + - name: get + - name: analyze + - name: write + - name: snapshot + - name: warmer + - name: refresh + - name: listener + - name: fetch_shard_started + - name: fetch_shard_store + - name: flush + - name: force_merge + - name: management + - name: elasticsearch.node_cluster_communication_packets + description: Cluster Communication + unit: pps + chart_type: line + dimensions: + - name: received + - name: sent + - name: elasticsearch.node_cluster_communication_traffic + description: Cluster Communication Bandwidth + unit: bytes/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: elasticsearch.node_http_connections + description: HTTP Connections + unit: connections + chart_type: line + dimensions: + - name: open + - name: elasticsearch.node_breakers_trips + description: Circuit Breaker Trips Count + unit: trips/s + chart_type: stacked + dimensions: + - name: requests + - name: fielddata + - name: in_flight_requests + - name: model_inference + - name: accounting + - name: parent + - name: cluster + description: These metrics refer to the cluster. + labels: + - name: cluster_name + description: | + Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + metrics: + - name: elasticsearch.cluster_health_status + description: Cluster Status + unit: status + chart_type: line + dimensions: + - name: green + - name: yellow + - name: red + - name: elasticsearch.cluster_number_of_nodes + description: Cluster Nodes Count + unit: nodes + chart_type: line + dimensions: + - name: nodes + - name: data_nodes + - name: elasticsearch.cluster_shards_count + description: Cluster Shards Count + unit: shards + chart_type: line + dimensions: + - name: active_primary + - name: active + - name: relocating + - name: initializing + - name: unassigned + - name: delayed_unaasigned + - name: elasticsearch.cluster_pending_tasks + description: Cluster Pending Tasks + unit: tasks + chart_type: line + dimensions: + - name: pending + - name: elasticsearch.cluster_number_of_in_flight_fetch + description: Cluster Unfinished Fetches + unit: fetches + chart_type: line + dimensions: + - name: in_flight_fetch + - name: elasticsearch.cluster_indices_count + description: Cluster Indices Count + unit: indices + chart_type: line + dimensions: + - name: indices + - name: elasticsearch.cluster_indices_shards_count + description: Cluster Indices Shards Count + unit: shards + chart_type: line + dimensions: + - name: total + - name: primaries + - name: replication + - name: elasticsearch.cluster_indices_docs_count + description: Cluster Indices Docs Count + unit: docs + chart_type: line + dimensions: + - name: docs + - name: elasticsearch.cluster_indices_store_size + description: Cluster Indices Store Size + unit: bytes + chart_type: line + dimensions: + - name: size + - name: elasticsearch.cluster_indices_query_cache + description: Cluster Indices Query Cache + unit: events/s + chart_type: line + dimensions: + - name: hit + - name: miss + - name: elasticsearch.cluster_nodes_by_role_count + description: Cluster Nodes By Role Count + unit: nodes + chart_type: line + dimensions: + - name: coordinating_only + - name: data + - name: data_cold + - name: data_content + - name: data_frozen + - name: data_hot + - name: data_warm + - name: ingest + - name: master + - name: ml + - name: remote_cluster_client + - name: voting_only + - name: index + description: These metrics refer to the index. + labels: + - name: cluster_name + description: | + Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). + - name: index + description: Name of the index. + metrics: + - name: elasticsearch.node_index_health + description: Index Health + unit: status + chart_type: line + dimensions: + - name: green + - name: yellow + - name: red + - name: elasticsearch.node_index_shards_count + description: Index Shards Count + unit: shards + chart_type: line + dimensions: + - name: shards + - name: elasticsearch.node_index_docs_count + description: Index Docs Count + unit: docs + chart_type: line + dimensions: + - name: docs + - name: elasticsearch.node_index_store_size + description: Index Store Size + unit: bytes + chart_type: line + dimensions: + - name: store_size + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-opensearch + monitored_instance: + name: OpenSearch + link: https://opensearch.org/ + icon_filename: opensearch.svg + categories: + - data-collection.search-engines diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/metrics.go b/src/go/collectors/go.d.plugin/modules/elasticsearch/metrics.go new file mode 100644 index 00000000000000..e838dc643edaf1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/metrics.go @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package elasticsearch + +// https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html + +type esMetrics struct { + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html + NodesStats *esNodesStats + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html + ClusterHealth *esClusterHealth + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html + ClusterStats *esClusterStats + // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html + LocalIndicesStats []esIndexStats +} + +func (m esMetrics) empty() bool { + switch { + case m.hasNodesStats(), m.hasClusterHealth(), m.hasClusterStats(), m.hasLocalIndicesStats(): + return false + } + return true +} + +func (m esMetrics) hasNodesStats() bool { return m.NodesStats != nil && len(m.NodesStats.Nodes) > 0 } +func (m esMetrics) hasClusterHealth() bool { return m.ClusterHealth != nil } +func (m esMetrics) hasClusterStats() bool { return m.ClusterStats != nil } +func (m esMetrics) hasLocalIndicesStats() bool { return len(m.LocalIndicesStats) > 0 } + +type ( + esNodesStats struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*esNodeStats `json:"nodes"` + } + esNodeStats struct { + Name string + Host string + Indices struct { + Indexing struct { + IndexTotal float64 `stm:"index_total" json:"index_total"` + IndexCurrent float64 `stm:"index_current" json:"index_current"` + IndexTimeInMillis float64 `stm:"index_time_in_millis" json:"index_time_in_millis"` + } `stm:"indexing"` + Search struct { + FetchTotal float64 `stm:"fetch_total" json:"fetch_total"` + FetchCurrent float64 `stm:"fetch_current" json:"fetch_current"` + FetchTimeInMillis float64 `stm:"fetch_time_in_millis" json:"fetch_time_in_millis"` + QueryTotal float64 `stm:"query_total" json:"query_total"` + QueryCurrent float64 `stm:"query_current" json:"query_current"` + QueryTimeInMillis float64 `stm:"query_time_in_millis" json:"query_time_in_millis"` + } `stm:"search"` + Refresh struct { + Total float64 `stm:"total"` + TimeInMillis float64 `stm:"total_time_in_millis" json:"total_time_in_millis"` + } `stm:"refresh"` + Flush struct { + Total float64 `stm:"total"` + TimeInMillis float64 `stm:"total_time_in_millis" json:"total_time_in_millis"` + } `stm:"flush"` + FieldData struct { + MemorySizeInBytes float64 `stm:"memory_size_in_bytes" json:"memory_size_in_bytes"` + Evictions float64 `stm:"evictions"` + } `stm:"fielddata"` + Segments struct { + Count float64 `stm:"count" json:"count"` + MemoryInBytes float64 `stm:"memory_in_bytes" json:"memory_in_bytes"` + TermsMemoryInBytes float64 `stm:"terms_memory_in_bytes" json:"terms_memory_in_bytes"` + StoredFieldsMemoryInBytes float64 `stm:"stored_fields_memory_in_bytes" json:"stored_fields_memory_in_bytes"` + TermVectorsMemoryInBytes float64 `stm:"term_vectors_memory_in_bytes" json:"term_vectors_memory_in_bytes"` + NormsMemoryInBytes float64 `stm:"norms_memory_in_bytes" json:"norms_memory_in_bytes"` + PointsMemoryInBytes float64 `stm:"points_memory_in_bytes" json:"points_memory_in_bytes"` + DocValuesMemoryInBytes float64 `stm:"doc_values_memory_in_bytes" json:"doc_values_memory_in_bytes"` + IndexWriterMemoryInBytes float64 `stm:"index_writer_memory_in_bytes" json:"index_writer_memory_in_bytes"` + VersionMapMemoryInBytes float64 `stm:"version_map_memory_in_bytes" json:"version_map_memory_in_bytes"` + FixedBitSetMemoryInBytes float64 `stm:"fixed_bit_set_memory_in_bytes" json:"fixed_bit_set_memory_in_bytes"` + } `stm:"segments"` + Translog struct { + Operations float64 `stm:"operations"` + SizeInBytes float64 `stm:"size_in_bytes" json:"size_in_bytes"` + UncommittedOperations float64 `stm:"uncommitted_operations" json:"uncommitted_operations"` + UncommittedSizeInBytes float64 `stm:"uncommitted_size_in_bytes" json:"uncommitted_size_in_bytes"` + } `stm:"translog"` + } `stm:"indices"` + Process struct { + OpenFileDescriptors float64 `stm:"open_file_descriptors" json:"open_file_descriptors"` + MaxFileDescriptors float64 `stm:"max_file_descriptors" json:"max_file_descriptors"` + } `stm:"process"` + JVM struct { + Mem struct { + HeapUsedPercent float64 `stm:"heap_used_percent" json:"heap_used_percent"` + HeapUsedInBytes float64 `stm:"heap_used_in_bytes" json:"heap_used_in_bytes"` + HeapCommittedInBytes float64 `stm:"heap_committed_in_bytes" json:"heap_committed_in_bytes"` + } `stm:"mem"` + GC struct { + Collectors struct { + Young struct { + CollectionCount float64 `stm:"collection_count" json:"collection_count"` + CollectionTimeInMillis float64 `stm:"collection_time_in_millis" json:"collection_time_in_millis"` + } `stm:"young"` + Old struct { + CollectionCount float64 `stm:"collection_count" json:"collection_count"` + CollectionTimeInMillis float64 `stm:"collection_time_in_millis" json:"collection_time_in_millis"` + } `stm:"old"` + } `stm:"collectors"` + } `stm:"gc"` + BufferPools struct { + Mapped struct { + Count float64 `stm:"count"` + UsedInBytes float64 `stm:"used_in_bytes" json:"used_in_bytes"` + TotalCapacityInBytes float64 `stm:"total_capacity_in_bytes" json:"total_capacity_in_bytes"` + } `stm:"mapped"` + Direct struct { + Count float64 `stm:"count"` + UsedInBytes float64 `stm:"used_in_bytes" json:"used_in_bytes"` + TotalCapacityInBytes float64 `stm:"total_capacity_in_bytes" json:"total_capacity_in_bytes"` + } `stm:"direct"` + } `stm:"buffer_pools" json:"buffer_pools"` + } `stm:"jvm"` + // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html + ThreadPool struct { + Generic struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"generic"` + Search struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"search"` + SearchThrottled struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"search_throttled" json:"search_throttled"` + Get struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"get"` + Analyze struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"analyze"` + Write struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"write"` + Snapshot struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"snapshot"` + Warmer struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"warmer"` + Refresh struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"refresh"` + Listener struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"listener"` + FetchShardStarted struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"fetch_shard_started" json:"fetch_shard_started"` + FetchShardStore struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"fetch_shard_store" json:"fetch_shard_store"` + Flush struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"flush"` + ForceMerge struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"force_merge" json:"force_merge"` + Management struct { + Queue float64 `stm:"queue"` + Rejected float64 `stm:"rejected"` + } `stm:"management"` + } `stm:"thread_pool" json:"thread_pool"` + Transport struct { + RxCount float64 `stm:"rx_count" json:"rx_count"` + RxSizeInBytes float64 `stm:"rx_size_in_bytes" json:"rx_size_in_bytes"` + TxCount float64 `stm:"tx_count" json:"tx_count"` + TxSizeInBytes float64 `stm:"tx_size_in_bytes" json:"tx_size_in_bytes"` + } `stm:"transport"` + HTTP struct { + CurrentOpen float64 `stm:"current_open" json:"current_open"` + } `stm:"http"` + Breakers struct { + Request struct { + Tripped float64 `stm:"tripped"` + } `stm:"request"` + FieldData struct { + Tripped float64 `stm:"tripped"` + } `stm:"fielddata"` + InFlightRequests struct { + Tripped float64 `stm:"tripped"` + } `stm:"in_flight_requests" json:"in_flight_requests"` + ModelInference struct { + Tripped float64 `stm:"tripped"` + } `stm:"model_inference" json:"model_inference"` + Accounting struct { + Tripped float64 `stm:"tripped"` + } `stm:"accounting"` + Parent struct { + Tripped float64 `stm:"tripped"` + } `stm:"parent"` + } `stm:"breakers"` + } +) + +type esClusterHealth struct { + ClusterName string `json:"cluster_name"` + Status string + NumOfNodes float64 `stm:"number_of_nodes" json:"number_of_nodes"` + NumOfDataNodes float64 `stm:"number_of_data_nodes" json:"number_of_data_nodes"` + ActivePrimaryShards float64 `stm:"active_primary_shards" json:"active_primary_shards"` + ActiveShards float64 `stm:"active_shards" json:"active_shards"` + RelocatingShards float64 `stm:"relocating_shards" json:"relocating_shards"` + InitializingShards float64 `stm:"initializing_shards" json:"initializing_shards"` + UnassignedShards float64 `stm:"unassigned_shards" json:"unassigned_shards"` + DelayedUnassignedShards float64 `stm:"delayed_unassigned_shards" json:"delayed_unassigned_shards"` + NumOfPendingTasks float64 `stm:"number_of_pending_tasks" json:"number_of_pending_tasks"` + NumOfInFlightFetch float64 `stm:"number_of_in_flight_fetch" json:"number_of_in_flight_fetch"` + ActiveShardsPercentAsNumber float64 `stm:"active_shards_percent_as_number" json:"active_shards_percent_as_number"` +} + +type esClusterStats struct { + ClusterName string `json:"cluster_name"` + Nodes struct { + Count struct { + Total float64 `stm:"total"` + CoordinatingOnly float64 `stm:"coordinating_only" json:"coordinating_only"` + Data float64 `stm:"data"` + DataCold float64 `stm:"data_cold" json:"data_cold"` + DataContent float64 `stm:"data_content" json:"data_content"` + DataFrozen float64 `stm:"data_frozen" json:"data_frozen"` + DataHot float64 `stm:"data_hot" json:"data_hot"` + DataWarm float64 `stm:"data_warm" json:"data_warm"` + Ingest float64 `stm:"ingest"` + Master float64 `stm:"master"` + ML float64 `stm:"ml"` + RemoteClusterClient float64 `stm:"remote_cluster_client" json:"remote_cluster_client"` + Transform float64 `stm:"transform"` + VotingOnly float64 `stm:"voting_only" json:"voting_only"` + } `stm:"count"` + } `stm:"nodes"` + Indices struct { + Count float64 `stm:"count"` + Shards struct { + Total float64 `stm:"total"` + Primaries float64 `stm:"primaries"` + Replication float64 `stm:"replication"` + } `stm:"shards"` + Docs struct { + Count float64 `stm:"count"` + } `stm:"docs"` + Store struct { + SizeInBytes float64 `stm:"size_in_bytes" json:"size_in_bytes"` + } `stm:"store"` + QueryCache struct { + HitCount float64 `stm:"hit_count" json:"hit_count"` + MissCount float64 `stm:"miss_count" json:"miss_count"` + } `stm:"query_cache" json:"query_cache"` + } `stm:"indices"` +} + +type esIndexStats struct { + Index string + Health string + Rep string + DocsCount string `json:"docs.count"` + StoreSize string `json:"store.size"` +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json new file mode 100644 index 00000000000000..f46794cc46a59b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json @@ -0,0 +1,50 @@ +[ + { + "health": "yellow", + "status": "open", + "index": "my-index-000003", + "uuid": "Clrvbw-AQ5CB3xWI3MUXFg", + "pri": "1", + "rep": "1", + "docs.count": "1", + "docs.deleted": "1", + "store.size": "208b", + "pri.store.size": "208b" + }, + { + "health": "yellow", + "status": "open", + "index": "my-index-000002", + "uuid": "z7cy4d2PQYSSJDhi8dIjWg", + "pri": "1", + "rep": "1", + "docs.count": "1", + "docs.deleted": "1", + "store.size": "208b", + "pri.store.size": "208b" + }, + { + "health": "yellow", + "status": "open", + "index": "my-index-000001", + "uuid": "08YTiZfmQUiO67VOGZOfVg", + "pri": "1", + "rep": "1", + "docs.count": "1", + "docs.deleted": "1", + "store.size": "208b", + "pri.store.size": "208b" + }, + { + "health": "yellow", + "status": "open", + "index": ".my-system-index-000001", + "uuid": "08YTiZfmQUiO67VOGZOfVg", + "pri": "1", + "rep": "1", + "docs.count": "1", + "docs.deleted": "1", + "store.size": "208b", + "pri.store.size": "208b" + } +] diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_health.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_health.json new file mode 100644 index 00000000000000..0fdc0de49099b3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_health.json @@ -0,0 +1,17 @@ +{ + "cluster_name": "36928dce44074ceba64d7b3d698443a7", + "status": "green", + "timed_out": false, + "number_of_nodes": 3, + "number_of_data_nodes": 2, + "active_primary_shards": 97, + "active_shards": 194, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0, + "delayed_unassigned_shards": 0, + "number_of_pending_tasks": 0, + "number_of_in_flight_fetch": 0, + "task_max_waiting_in_queue_millis": 0, + "active_shards_percent_as_number": 100 +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json new file mode 100644 index 00000000000000..53bea1b3465d4c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json @@ -0,0 +1,377 @@ +{ + "_nodes": { + "total": 3, + "successful": 3, + "failed": 0 + }, + "cluster_name": "36928dce44074ceba64d7b3d698443a7", + "cluster_uuid": "5jO2X31FQ32kJAWoCsp3Vw", + "timestamp": 1687866240414, + "status": "green", + "indices": { + "count": 97, + "shards": { + "total": 194, + "primaries": 97, + "replication": 1, + "index": { + "shards": { + "min": 2, + "max": 2, + "avg": 2 + }, + "primaries": { + "min": 1, + "max": 1, + "avg": 1 + }, + "replication": { + "min": 1, + "max": 1, + "avg": 1 + } + } + }, + "docs": { + "count": 402750703, + "deleted": 1603 + }, + "store": { + "size_in_bytes": 380826136962, + "total_data_set_size_in_bytes": 380826136962, + "reserved_in_bytes": 0 + }, + "fielddata": { + "memory_size_in_bytes": 600, + "evictions": 0 + }, + "query_cache": { + "memory_size_in_bytes": 37465951, + "total_count": 684606952, + "hit_count": 96838726, + "miss_count": 587768226, + "cache_size": 22571, + "cache_count": 91319, + "evictions": 68748 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 614, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 0, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 0, + "index_writer_memory_in_bytes": 368167356, + "version_map_memory_in_bytes": 54470768, + "fixed_bit_set_memory_in_bytes": 57736, + "max_unsafe_auto_id_timestamp": 1679747033889, + "file_sizes": {} + }, + "mappings": { + "total_field_count": 10467, + "total_deduplicated_field_count": 2070, + "total_deduplicated_mapping_size_in_bytes": 26441, + "field_types": [ + { + "name": "alias", + "count": 1, + "index_count": 1, + "script_count": 0 + }, + { + "name": "boolean", + "count": 77, + "index_count": 37, + "script_count": 0 + }, + { + "name": "constant_keyword", + "count": 10, + "index_count": 4, + "script_count": 0 + }, + { + "name": "date", + "count": 763, + "index_count": 86, + "script_count": 0 + }, + { + "name": "flattened", + "count": 27, + "index_count": 27, + "script_count": 0 + }, + { + "name": "float", + "count": 8, + "index_count": 4, + "script_count": 0 + }, + { + "name": "integer", + "count": 279, + "index_count": 70, + "script_count": 0 + }, + { + "name": "ip", + "count": 4, + "index_count": 4, + "script_count": 0 + }, + { + "name": "keyword", + "count": 4345, + "index_count": 86, + "script_count": 0 + }, + { + "name": "long", + "count": 1143, + "index_count": 79, + "script_count": 0 + }, + { + "name": "match_only_text", + "count": 1170, + "index_count": 69, + "script_count": 0 + }, + { + "name": "nested", + "count": 4, + "index_count": 4, + "script_count": 0 + }, + { + "name": "object", + "count": 2583, + "index_count": 85, + "script_count": 0 + }, + { + "name": "text", + "count": 49, + "index_count": 17, + "script_count": 0 + }, + { + "name": "version", + "count": 4, + "index_count": 4, + "script_count": 0 + } + ], + "runtime_field_types": [] + }, + "analysis": { + "char_filter_types": [], + "tokenizer_types": [], + "filter_types": [], + "analyzer_types": [], + "built_in_char_filters": [], + "built_in_tokenizers": [], + "built_in_filters": [], + "built_in_analyzers": [] + }, + "versions": [ + { + "version": "8.4.2", + "index_count": 97, + "primary_shard_count": 97, + "total_primary_bytes": 189671468048 + } + ] + }, + "nodes": { + "count": { + "total": 3, + "coordinating_only": 0, + "data": 0, + "data_cold": 0, + "data_content": 2, + "data_frozen": 0, + "data_hot": 2, + "data_warm": 0, + "ingest": 2, + "master": 3, + "ml": 0, + "remote_cluster_client": 2, + "transform": 2, + "voting_only": 1 + }, + "versions": [ + "8.4.2" + ], + "os": { + "available_processors": 8, + "allocated_processors": 8, + "names": [ + { + "name": "Linux", + "count": 3 + } + ], + "pretty_names": [ + { + "pretty_name": "Ubuntu 20.04.5 LTS", + "count": 3 + } + ], + "architectures": [ + { + "arch": "amd64", + "count": 3 + } + ], + "mem": { + "total_in_bytes": 33285996544, + "adjusted_total_in_bytes": 32153534464, + "free_in_bytes": 1732333568, + "used_in_bytes": 31553662976, + "free_percent": 5, + "used_percent": 95 + } + }, + "process": { + "cpu": { + "percent": 26 + }, + "open_file_descriptors": { + "min": 557, + "max": 1185, + "avg": 968 + } + }, + "jvm": { + "max_uptime_in_millis": 23671188288, + "versions": [ + { + "version": "18.0.2.1", + "vm_name": "OpenJDK 64-Bit Server VM", + "vm_version": "18.0.2.1+1-1", + "vm_vendor": "Oracle Corporation", + "bundled_jdk": true, + "using_bundled_jdk": true, + "count": 3 + } + ], + "mem": { + "heap_used_in_bytes": 8044798544, + "heap_max_in_bytes": 16009658368 + }, + "threads": 272 + }, + "fs": { + "total_in_bytes": 979252543488, + "free_in_bytes": 595738775552, + "available_in_bytes": 595738775552 + }, + "plugins": [], + "network_types": { + "transport_types": { + "security4": 3 + }, + "http_types": { + "security4": 3 + } + }, + "discovery_types": { + "multi-node": 3 + }, + "packaging_types": [ + { + "flavor": "default", + "type": "docker", + "count": 3 + } + ], + "ingest": { + "number_of_pipelines": 20, + "processor_stats": { + "conditional": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "date": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "geoip": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "pipeline": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "remove": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "rename": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "script": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "set": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + }, + "set_security_user": { + "count": 0, + "failed": 0, + "current": 0, + "time_in_millis": 0 + } + } + }, + "indexing_pressure": { + "memory": { + "current": { + "combined_coordinating_and_primary_in_bytes": 0, + "coordinating_in_bytes": 0, + "primary_in_bytes": 0, + "replica_in_bytes": 0, + "all_in_bytes": 0 + }, + "total": { + "combined_coordinating_and_primary_in_bytes": 0, + "coordinating_in_bytes": 0, + "primary_in_bytes": 0, + "replica_in_bytes": 0, + "all_in_bytes": 0, + "coordinating_rejections": 0, + "primary_rejections": 0, + "replica_rejections": 0 + }, + "limit_in_bytes": 0 + } + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/info.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/info.json new file mode 100644 index 00000000000000..23e3f15963e2ce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/info.json @@ -0,0 +1,17 @@ +{ + "name": "instance-0000000006", + "cluster_name": "36928dce44074ceba64d7b3d698443a7", + "cluster_uuid": "5jO2X31FQ32kJAWoCsp3Vw", + "version": { + "number": "8.4.2", + "build_flavor": "default", + "build_type": "docker", + "build_hash": "89f8c6d8429db93b816403ee75e5c270b43a940a", + "build_date": "2022-09-14T16:26:04.382547801Z", + "build_snapshot": false, + "lucene_version": "9.3.0", + "minimum_wire_compatibility_version": "7.17.0", + "minimum_index_compatibility_version": "7.0.0" + }, + "tagline": "You Know, for Search" +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json new file mode 100644 index 00000000000000..77e0ad0ba53a27 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json @@ -0,0 +1,867 @@ +{ + "_nodes": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "cluster_name": "36928dce44074ceba64d7b3d698443a7", + "nodes": { + "Klg1CjgMTouentQcJlRGuA": { + "timestamp": 1687867033043, + "name": "instance-0000000006", + "transport_address": "172.25.238.204:19349", + "host": "172.25.238.204", + "ip": "172.25.238.204:19349", + "roles": [ + "data_content", + "data_hot", + "ingest", + "master", + "remote_cluster_client", + "transform" + ], + "attributes": { + "xpack.installed": "true", + "logical_availability_zone": "zone-0", + "availability_zone": "us-east-1a", + "region": "us-east-1", + "instance_configuration": "aws.es.datahot.i3", + "server_name": "instance-0000000006.36928dce44074ceba64d7b3d698443a7", + "data": "hot" + }, + "indices": { + "docs": { + "count": 403212527, + "deleted": 2287 + }, + "shard_stats": { + "total_count": 97 + }, + "store": { + "size_in_bytes": 189816312947, + "total_data_set_size_in_bytes": 189816312947, + "reserved_in_bytes": 0 + }, + "indexing": { + "index_total": 3667793202, + "index_time_in_millis": 1100149051, + "index_current": 1, + "index_failed": 149288, + "delete_total": 13333, + "delete_time_in_millis": 1883, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 7502889, + "time_in_millis": 747395, + "exists_total": 7411696, + "exists_time_in_millis": 741794, + "missing_total": 91193, + "missing_time_in_millis": 5601, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 166823028, + "query_time_in_millis": 51265805, + "query_current": 0, + "fetch_total": 42645288, + "fetch_time_in_millis": 21316820, + "fetch_current": 0, + "scroll_total": 13037388, + "scroll_time_in_millis": 138762688, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 912669, + "total_time_in_millis": 1022950085, + "total_docs": 12230404828, + "total_size_in_bytes": 5503526044088, + "total_stopped_time_in_millis": 3959107, + "total_throttled_time_in_millis": 747116999, + "total_auto_throttle_in_bytes": 3674596384 + }, + "refresh": { + "total": 7721472, + "total_time_in_millis": 94304142, + "external_total": 7659770, + "external_total_time_in_millis": 100804787, + "listeners": 0 + }, + "flush": { + "total": 35134, + "periodic": 34985, + "total_time_in_millis": 22213090 + }, + "warmer": { + "current": 0, + "total": 6096195, + "total_time_in_millis": 1439617 + }, + "query_cache": { + "memory_size_in_bytes": 18034237, + "total_count": 274407233, + "hit_count": 45114414, + "miss_count": 229292819, + "cache_size": 11302, + "cache_count": 46210, + "evictions": 34908 + }, + "fielddata": { + "memory_size_in_bytes": 600, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 307, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 0, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 0, + "index_writer_memory_in_bytes": 240481008, + "version_map_memory_in_bytes": 44339216, + "fixed_bit_set_memory_in_bytes": 2008, + "max_unsafe_auto_id_timestamp": 1679747033889, + "file_sizes": {} + }, + "translog": { + "operations": 362831, + "size_in_bytes": 453491882, + "uncommitted_operations": 362831, + "uncommitted_size_in_bytes": 453491882, + "earliest_last_modified_age": 8 + }, + "request_cache": { + "memory_size_in_bytes": 6779720, + "evictions": 0, + "hit_count": 10885151, + "miss_count": 8798 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 5718894 + }, + "bulk": { + "total_operations": 465694640, + "total_time_in_millis": 1118684280, + "total_size_in_bytes": 3998536502390, + "avg_time_in_millis": 0, + "avg_size_in_bytes": 8526 + } + }, + "os": { + "timestamp": 1687867033054, + "cpu": { + "percent": 11, + "load_average": { + "1m": 1.24, + "5m": 2.15, + "15m": 2.39 + } + }, + "mem": { + "total_in_bytes": 16106127360, + "adjusted_total_in_bytes": 15728640000, + "free_in_bytes": 517578752, + "used_in_bytes": 15588548608, + "free_percent": 3, + "used_percent": 97 + }, + "swap": { + "total_in_bytes": 0, + "free_in_bytes": 0, + "used_in_bytes": 0 + }, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 2633246338856561 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": 206897, + "stat": { + "number_of_elapsed_periods": 110099433, + "number_of_times_throttled": 389045, + "time_throttled_nanos": 34502349002867 + } + }, + "memory": { + "control_group": "/", + "limit_in_bytes": "16106127360", + "usage_in_bytes": "15588548608" + } + } + }, + "process": { + "timestamp": 1687867033054, + "open_file_descriptors": 1149, + "max_file_descriptors": 1048576, + "cpu": { + "percent": 11, + "total_in_millis": 2576219400 + }, + "mem": { + "total_virtual_in_bytes": 117744459776 + } + }, + "jvm": { + "timestamp": 1687867033055, + "uptime_in_millis": 11286453256, + "mem": { + "heap_used_in_bytes": 4337402488, + "heap_used_percent": 55, + "heap_committed_in_bytes": 7864320000, + "heap_max_in_bytes": 7864320000, + "non_heap_used_in_bytes": 343633376, + "non_heap_committed_in_bytes": 350355456, + "pools": { + "young": { + "used_in_bytes": 2654994432, + "max_in_bytes": 0, + "peak_used_in_bytes": 4718592000, + "peak_max_in_bytes": 0 + }, + "old": { + "used_in_bytes": 1413394432, + "max_in_bytes": 7864320000, + "peak_used_in_bytes": 2444862976, + "peak_max_in_bytes": 7864320000 + }, + "survivor": { + "used_in_bytes": 269013624, + "max_in_bytes": 0, + "peak_used_in_bytes": 591396864, + "peak_max_in_bytes": 0 + } + } + }, + "threads": { + "count": 112, + "peak_count": 117 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 78661, + "collection_time_in_millis": 6014901 + }, + "old": { + "collection_count": 0, + "collection_time_in_millis": 0 + } + } + }, + "buffer_pools": { + "mapped": { + "count": 844, + "used_in_bytes": 103411995802, + "total_capacity_in_bytes": 103411995802 + }, + "direct": { + "count": 94, + "used_in_bytes": 4654850, + "total_capacity_in_bytes": 4654848 + }, + "mapped - 'non-volatile memory'": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + }, + "classes": { + "current_loaded_count": 36006, + "total_loaded_count": 37829, + "total_unloaded_count": 1823 + } + }, + "thread_pool": { + "analyze": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "auto_complete": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "azure_event_loop": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ccr": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "cluster_coordination": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 1130226 + }, + "fetch_shard_started": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 6, + "completed": 38 + }, + "flush": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 89892 + }, + "force_merge": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 143 + }, + "generic": { + "threads": 46, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 46, + "completed": 89722038 + }, + "get": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "management": { + "threads": 3, + "queue": 0, + "active": 1, + "rejected": 0, + "largest": 3, + "completed": 416796779 + }, + "ml_datafeed": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_job_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_native_inference_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_utility": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 22545252 + }, + "refresh": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 885152069 + }, + "repository_azure": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "rollup_indexing": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search": { + "threads": 5, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 5, + "completed": 167558865 + }, + "search_coordination": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 14101096 + }, + "search_throttled": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_fetch_async": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_prewarming": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-crypto": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-token-key": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "snapshot": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 806551 + }, + "snapshot_meta": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "system_critical_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 2350943 + }, + "system_critical_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 7637 + }, + "system_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 31143771 + }, + "system_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 7401359 + }, + "vector_tile_generation": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "warmer": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 36139188 + }, + "watcher": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "write": { + "threads": 3, + "queue": 0, + "active": 2, + "rejected": 0, + "largest": 3, + "completed": 575385289 + } + }, + "fs": { + "timestamp": 1687867033056, + "total": { + "total_in_bytes": 483183820800, + "free_in_bytes": 292670836736, + "available_in_bytes": 292670836736 + }, + "data": [ + { + "path": "/app/data", + "mount": "/app (/dev/mapper/lxc-data)", + "type": "xfs", + "total_in_bytes": 483183820800, + "free_in_bytes": 292670836736, + "available_in_bytes": 292670836736 + } + ], + "io_stats": { + "devices": [ + { + "device_name": "dm-1", + "operations": 6160920260, + "read_operations": 376565165, + "write_operations": 5784355095, + "read_kilobytes": 31265075012, + "write_kilobytes": 100985041837, + "io_time_in_millis": 184335640 + } + ], + "total": { + "operations": 6160920260, + "read_operations": 376565165, + "write_operations": 5784355095, + "read_kilobytes": 31265075012, + "write_kilobytes": 100985041837, + "io_time_in_millis": 184335640 + } + } + }, + "transport": { + "server_open": 24, + "total_outbound_connections": 11, + "rx_count": 1300468666, + "rx_size_in_bytes": 1789647854011, + "tx_count": 1300468665, + "tx_size_in_bytes": 2927853534431, + "inbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 1256244956 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 202091898 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 3242593 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 454964 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 173349 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 39048 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 14155 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 75267 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 1534 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 76 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 3 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ], + "outbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 1128511214 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 161858180 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 6819172 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 2563797 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 445824 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 122462 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 95822 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 49986 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 1931 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 250 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 27 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ] + }, + "http": { + "current_open": 73, + "total_opened": 779388 + }, + "breakers": { + "fielddata": { + "limit_size_in_bytes": 3145728000, + "limit_size": "2.9gb", + "estimated_size_in_bytes": 600, + "estimated_size": "600b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 4718592000, + "limit_size": "4.3gb", + "estimated_size_in_bytes": 16440, + "estimated_size": "16kb", + "overhead": 1, + "tripped": 0 + }, + "inflight_requests": { + "limit_size_in_bytes": 7864320000, + "limit_size": "7.3gb", + "estimated_size_in_bytes": 56628, + "estimated_size": "55.3kb", + "overhead": 2, + "tripped": 0 + }, + "model_inference": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "eql_sequence": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 7471104000, + "limit_size": "6.9gb", + "estimated_size_in_bytes": 4341596792, + "estimated_size": "4gb", + "overhead": 1, + "tripped": 0 + } + } + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json new file mode 100644 index 00000000000000..6e6b21b916c07e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json @@ -0,0 +1,2577 @@ +{ + "_nodes": { + "total": 3, + "successful": 3, + "failed": 0 + }, + "cluster_name": "36928dce44074ceba64d7b3d698443a7", + "nodes": { + "tk_U7GMCRkCG4FoOvusrng": { + "timestamp": 1687866153482, + "name": "instance-0000000005", + "transport_address": "172.22.146.77:19280", + "host": "172.22.146.77", + "ip": "172.22.146.77:19280", + "roles": [ + "data_content", + "data_hot", + "ingest", + "master", + "remote_cluster_client", + "transform" + ], + "attributes": { + "instance_configuration": "aws.es.datahot.i3", + "server_name": "instance-0000000005.36928dce44074ceba64d7b3d698443a7", + "data": "hot", + "xpack.installed": "true", + "logical_availability_zone": "zone-1", + "availability_zone": "us-east-1e", + "region": "us-east-1" + }, + "indices": { + "docs": { + "count": 403028528, + "deleted": 430916 + }, + "shard_stats": { + "total_count": 97 + }, + "store": { + "size_in_bytes": 190773977702, + "total_data_set_size_in_bytes": 190773977702, + "reserved_in_bytes": 0 + }, + "indexing": { + "index_total": 6550378755, + "index_time_in_millis": 1244633519, + "index_current": 0, + "index_failed": 3425, + "delete_total": 422502, + "delete_time_in_millis": 12139, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 1673415, + "time_in_millis": 176085, + "exists_total": 1505245, + "exists_time_in_millis": 164637, + "missing_total": 168170, + "missing_time_in_millis": 11448, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 157912598, + "query_time_in_millis": 158980385, + "query_current": 0, + "fetch_total": 25105951, + "fetch_time_in_millis": 24517851, + "fetch_current": 0, + "scroll_total": 4428540, + "scroll_time_in_millis": 153962443, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 1, + "current_docs": 1768114, + "current_size_in_bytes": 954513675, + "total": 1494757, + "total_time_in_millis": 1621446531, + "total_docs": 21027016560, + "total_size_in_bytes": 8884898196658, + "total_stopped_time_in_millis": 4962617, + "total_throttled_time_in_millis": 1169888193, + "total_auto_throttle_in_bytes": 4651560300 + }, + "refresh": { + "total": 12359783, + "total_time_in_millis": 300152615, + "external_total": 12278845, + "external_total_time_in_millis": 311222562, + "listeners": 0 + }, + "flush": { + "total": 67895, + "periodic": 67579, + "total_time_in_millis": 81917283 + }, + "warmer": { + "current": 0, + "total": 6153265, + "total_time_in_millis": 1348469 + }, + "query_cache": { + "memory_size_in_bytes": 19433507, + "total_count": 410202459, + "hit_count": 51724734, + "miss_count": 358477725, + "cache_size": 11311, + "cache_count": 45151, + "evictions": 33840 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 291, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 0, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 0, + "index_writer_memory_in_bytes": 57432664, + "version_map_memory_in_bytes": 568, + "fixed_bit_set_memory_in_bytes": 55672, + "max_unsafe_auto_id_timestamp": 1676581446329, + "file_sizes": {} + }, + "translog": { + "operations": 1449698, + "size_in_bytes": 1214204014, + "uncommitted_operations": 1449698, + "uncommitted_size_in_bytes": 1214204014, + "earliest_last_modified_age": 14453 + }, + "request_cache": { + "memory_size_in_bytes": 6178272, + "evictions": 0, + "hit_count": 7403041, + "miss_count": 10622 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 48470343 + }, + "bulk": { + "total_operations": 783008788, + "total_time_in_millis": 1265052645, + "total_size_in_bytes": 6949964886117, + "avg_time_in_millis": 0, + "avg_size_in_bytes": 8635 + } + }, + "os": { + "timestamp": 1687866153489, + "cpu": { + "percent": 9, + "load_average": { + "1m": 0.83, + "5m": 1.1, + "15m": 1.3 + } + }, + "mem": { + "total_in_bytes": 16106127360, + "adjusted_total_in_bytes": 15728640000, + "free_in_bytes": 1425637376, + "used_in_bytes": 14680489984, + "free_percent": 9, + "used_percent": 91 + }, + "swap": { + "total_in_bytes": 0, + "free_in_bytes": 0, + "used_in_bytes": 0 + }, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 4328157929052960 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": 206897, + "stat": { + "number_of_elapsed_periods": 198258313, + "number_of_times_throttled": 619367, + "time_throttled_nanos": 45229163024496 + } + }, + "memory": { + "control_group": "/", + "limit_in_bytes": "16106127360", + "usage_in_bytes": "14680489984" + } + } + }, + "process": { + "timestamp": 1687866153489, + "open_file_descriptors": 1180, + "max_file_descriptors": 1048576, + "cpu": { + "percent": 9, + "total_in_millis": 3994216500 + }, + "mem": { + "total_virtual_in_bytes": 114185707520 + } + }, + "jvm": { + "timestamp": 1687866153490, + "uptime_in_millis": 20231050756, + "mem": { + "heap_used_in_bytes": 1884124192, + "heap_used_percent": 23, + "heap_committed_in_bytes": 7864320000, + "heap_max_in_bytes": 7864320000, + "non_heap_used_in_bytes": 376433344, + "non_heap_committed_in_bytes": 385548288, + "pools": { + "young": { + "used_in_bytes": 385875968, + "max_in_bytes": 0, + "peak_used_in_bytes": 4714397696, + "peak_max_in_bytes": 0 + }, + "old": { + "used_in_bytes": 1399682080, + "max_in_bytes": 7864320000, + "peak_used_in_bytes": 7851651072, + "peak_max_in_bytes": 7864320000 + }, + "survivor": { + "used_in_bytes": 98566144, + "max_in_bytes": 0, + "peak_used_in_bytes": 591396864, + "peak_max_in_bytes": 0 + } + } + }, + "threads": { + "count": 115, + "peak_count": 126 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 139959, + "collection_time_in_millis": 3581668 + }, + "old": { + "collection_count": 1, + "collection_time_in_millis": 796 + } + } + }, + "buffer_pools": { + "mapped": { + "count": 831, + "used_in_bytes": 99844219805, + "total_capacity_in_bytes": 99844219805 + }, + "direct": { + "count": 90, + "used_in_bytes": 4571713, + "total_capacity_in_bytes": 4571711 + }, + "mapped - 'non-volatile memory'": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + }, + "classes": { + "current_loaded_count": 38122, + "total_loaded_count": 40402, + "total_unloaded_count": 2280 + } + }, + "thread_pool": { + "analyze": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "auto_complete": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 1 + }, + "azure_event_loop": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ccr": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "cluster_coordination": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 4427981 + }, + "fetch_shard_started": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 6, + "completed": 72 + }, + "flush": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 166429 + }, + "force_merge": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 205 + }, + "generic": { + "threads": 40, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 40, + "completed": 171078109 + }, + "get": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "management": { + "threads": 3, + "queue": 0, + "active": 1, + "rejected": 0, + "largest": 3, + "completed": 761997145 + }, + "ml_datafeed": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_job_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_native_inference_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_utility": { + "threads": 3, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 3, + "completed": 40979576 + }, + "refresh": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 1224783637 + }, + "repository_azure": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "rollup_indexing": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search": { + "threads": 5, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 5, + "completed": 191798560 + }, + "search_coordination": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 18868632 + }, + "search_throttled": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_fetch_async": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_prewarming": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-crypto": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-token-key": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "snapshot": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 1757953 + }, + "snapshot_meta": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 9, + "completed": 700327 + }, + "system_critical_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 11110320 + }, + "system_critical_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 14932 + }, + "system_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 39897928 + }, + "system_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 13382379 + }, + "vector_tile_generation": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "warmer": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 85786496 + }, + "watcher": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "write": { + "threads": 3, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 3, + "completed": 980512922 + } + }, + "fs": { + "timestamp": 1687866153490, + "total": { + "total_in_bytes": 483183820800, + "free_in_bytes": 290682736640, + "available_in_bytes": 290682736640 + }, + "data": [ + { + "path": "/app/data", + "mount": "/app (/dev/mapper/lxc-data)", + "type": "xfs", + "total_in_bytes": 483183820800, + "free_in_bytes": 290682736640, + "available_in_bytes": 290682736640 + } + ], + "io_stats": { + "devices": [ + { + "device_name": "dm-1", + "operations": 5478832410, + "read_operations": 89263106, + "write_operations": 5389569304, + "read_kilobytes": 9500415196, + "write_kilobytes": 67144441274, + "io_time_in_millis": 271723584 + } + ], + "total": { + "operations": 5478832410, + "read_operations": 89263106, + "write_operations": 5389569304, + "read_kilobytes": 9500415196, + "write_kilobytes": 67144441274, + "io_time_in_millis": 271723584 + } + } + }, + "transport": { + "server_open": 24, + "total_outbound_connections": 9, + "rx_count": 2167879292, + "rx_size_in_bytes": 4905919297323, + "tx_count": 2167879293, + "tx_size_in_bytes": 2964638852652, + "inbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 2149806152 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 350125308 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 6237311 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 3462010 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 1695688 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 446932 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 34053 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 124821 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 1034 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 47 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 7 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 1 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 2 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ], + "outbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 1911876454 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 246835312 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 5928518 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 2342608 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 566388 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 164795 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 91456 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 68952 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 3952 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 772 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 51 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 25 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 10 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ] + }, + "http": { + "current_open": 84, + "total_opened": 1793320 + }, + "breakers": { + "model_inference": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "eql_sequence": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "fielddata": { + "limit_size_in_bytes": 3145728000, + "limit_size": "2.9gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 4718592000, + "limit_size": "4.3gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 1 + }, + "inflight_requests": { + "limit_size_in_bytes": 7864320000, + "limit_size": "7.3gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 2, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 7471104000, + "limit_size": "6.9gb", + "estimated_size_in_bytes": 1884124192, + "estimated_size": "1.7gb", + "overhead": 1, + "tripped": 93 + } + } + }, + "Klg1CjgMTouentQcJlRGuA": { + "timestamp": 1687866153482, + "name": "instance-0000000006", + "transport_address": "172.25.238.204:19349", + "host": "172.25.238.204", + "ip": "172.25.238.204:19349", + "roles": [ + "data_content", + "data_hot", + "ingest", + "master", + "remote_cluster_client", + "transform" + ], + "attributes": { + "logical_availability_zone": "zone-0", + "availability_zone": "us-east-1a", + "server_name": "instance-0000000006.36928dce44074ceba64d7b3d698443a7", + "xpack.installed": "true", + "data": "hot", + "instance_configuration": "aws.es.datahot.i3", + "region": "us-east-1" + }, + "indices": { + "docs": { + "count": 402750701, + "deleted": 1501 + }, + "shard_stats": { + "total_count": 97 + }, + "store": { + "size_in_bytes": 189584860329, + "total_data_set_size_in_bytes": 189584860329, + "reserved_in_bytes": 0 + }, + "indexing": { + "index_total": 3667364815, + "index_time_in_millis": 1100012973, + "index_current": 0, + "index_failed": 149288, + "delete_total": 13333, + "delete_time_in_millis": 1883, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 7502285, + "time_in_millis": 747339, + "exists_total": 7411100, + "exists_time_in_millis": 741739, + "missing_total": 91185, + "missing_time_in_millis": 5600, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 166820275, + "query_time_in_millis": 51262303, + "query_current": 0, + "fetch_total": 42642621, + "fetch_time_in_millis": 21316723, + "fetch_current": 0, + "scroll_total": 13036366, + "scroll_time_in_millis": 138752334, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 912589, + "total_time_in_millis": 1022946643, + "total_docs": 12230248422, + "total_size_in_bytes": 5503433306347, + "total_stopped_time_in_millis": 3959107, + "total_throttled_time_in_millis": 747116999, + "total_auto_throttle_in_bytes": 3674596384 + }, + "refresh": { + "total": 7720800, + "total_time_in_millis": 94297737, + "external_total": 7659102, + "external_total_time_in_millis": 100797967, + "listeners": 0 + }, + "flush": { + "total": 35130, + "periodic": 34981, + "total_time_in_millis": 22204637 + }, + "warmer": { + "current": 0, + "total": 6095530, + "total_time_in_millis": 1439528 + }, + "query_cache": { + "memory_size_in_bytes": 18032444, + "total_count": 274404002, + "hit_count": 45113976, + "miss_count": 229290026, + "cache_size": 11260, + "cache_count": 46168, + "evictions": 34908 + }, + "fielddata": { + "memory_size_in_bytes": 600, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 320, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 0, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 0, + "index_writer_memory_in_bytes": 262022568, + "version_map_memory_in_bytes": 49200018, + "fixed_bit_set_memory_in_bytes": 1904, + "max_unsafe_auto_id_timestamp": 1679747033889, + "file_sizes": {} + }, + "translog": { + "operations": 352376, + "size_in_bytes": 447695989, + "uncommitted_operations": 352376, + "uncommitted_size_in_bytes": 447695989, + "earliest_last_modified_age": 233 + }, + "request_cache": { + "memory_size_in_bytes": 6779128, + "evictions": 0, + "hit_count": 10884306, + "miss_count": 8796 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 5718894 + }, + "bulk": { + "total_operations": 465641149, + "total_time_in_millis": 1118546460, + "total_size_in_bytes": 3998028967189, + "avg_time_in_millis": 0, + "avg_size_in_bytes": 8613 + } + }, + "os": { + "timestamp": 1687866153492, + "cpu": { + "percent": 10, + "load_average": { + "1m": 2.38, + "5m": 2.74, + "15m": 2.45 + } + }, + "mem": { + "total_in_bytes": 16106127360, + "adjusted_total_in_bytes": 15728640000, + "free_in_bytes": 765980672, + "used_in_bytes": 15340146688, + "free_percent": 5, + "used_percent": 95 + }, + "swap": { + "total_in_bytes": 0, + "free_in_bytes": 0, + "used_in_bytes": 0 + }, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 2632999205547019 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": 206897, + "stat": { + "number_of_elapsed_periods": 110090960, + "number_of_times_throttled": 389008, + "time_throttled_nanos": 34498461943176 + } + }, + "memory": { + "control_group": "/", + "limit_in_bytes": "16106127360", + "usage_in_bytes": "15340146688" + } + } + }, + "process": { + "timestamp": 1687866153493, + "open_file_descriptors": 1156, + "max_file_descriptors": 1048576, + "cpu": { + "percent": 10, + "total_in_millis": 2575977020 + }, + "mem": { + "total_virtual_in_bytes": 117447507968 + } + }, + "jvm": { + "timestamp": 1687866153494, + "uptime_in_millis": 11285573694, + "mem": { + "heap_used_in_bytes": 5059735552, + "heap_used_percent": 64, + "heap_committed_in_bytes": 7864320000, + "heap_max_in_bytes": 7864320000, + "non_heap_used_in_bytes": 343633376, + "non_heap_committed_in_bytes": 350355456, + "pools": { + "young": { + "used_in_bytes": 3351248896, + "max_in_bytes": 0, + "peak_used_in_bytes": 4718592000, + "peak_max_in_bytes": 0 + }, + "old": { + "used_in_bytes": 1354067968, + "max_in_bytes": 7864320000, + "peak_used_in_bytes": 2444862976, + "peak_max_in_bytes": 7864320000 + }, + "survivor": { + "used_in_bytes": 354418688, + "max_in_bytes": 0, + "peak_used_in_bytes": 591396864, + "peak_max_in_bytes": 0 + } + } + }, + "threads": { + "count": 112, + "peak_count": 117 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 78652, + "collection_time_in_millis": 6014274 + }, + "old": { + "collection_count": 0, + "collection_time_in_millis": 0 + } + } + }, + "buffer_pools": { + "mapped": { + "count": 858, + "used_in_bytes": 103114998135, + "total_capacity_in_bytes": 103114998135 + }, + "direct": { + "count": 94, + "used_in_bytes": 4654850, + "total_capacity_in_bytes": 4654848 + }, + "mapped - 'non-volatile memory'": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + }, + "classes": { + "current_loaded_count": 36006, + "total_loaded_count": 37829, + "total_unloaded_count": 1823 + } + }, + "thread_pool": { + "analyze": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "auto_complete": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "azure_event_loop": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ccr": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "cluster_coordination": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 1130214 + }, + "fetch_shard_started": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 6, + "completed": 38 + }, + "flush": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 89882 + }, + "force_merge": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 143 + }, + "generic": { + "threads": 46, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 46, + "completed": 89714323 + }, + "get": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "management": { + "threads": 3, + "queue": 0, + "active": 1, + "rejected": 0, + "largest": 3, + "completed": 416760833 + }, + "ml_datafeed": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_job_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_native_inference_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_utility": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 22543494 + }, + "refresh": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 885068032 + }, + "repository_azure": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "rollup_indexing": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search": { + "threads": 5, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 5, + "completed": 167558078 + }, + "search_coordination": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 14101082 + }, + "search_throttled": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_fetch_async": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_prewarming": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-crypto": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-token-key": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "snapshot": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 806551 + }, + "snapshot_meta": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "system_critical_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 2350761 + }, + "system_critical_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 7635 + }, + "system_read": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 31141408 + }, + "system_write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 7400801 + }, + "vector_tile_generation": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "warmer": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 36136481 + }, + "watcher": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "write": { + "threads": 3, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 3, + "completed": 575332197 + } + }, + "fs": { + "timestamp": 1687866153494, + "total": { + "total_in_bytes": 483183820800, + "free_in_bytes": 292886683648, + "available_in_bytes": 292886683648 + }, + "data": [ + { + "path": "/app/data", + "mount": "/app (/dev/mapper/lxc-data)", + "type": "xfs", + "total_in_bytes": 483183820800, + "free_in_bytes": 292886683648, + "available_in_bytes": 292886683648 + } + ], + "io_stats": { + "devices": [ + { + "device_name": "dm-1", + "operations": 6160354146, + "read_operations": 376563348, + "write_operations": 5783790798, + "read_kilobytes": 31264865276, + "write_kilobytes": 100978561519, + "io_time_in_millis": 183984060 + } + ], + "total": { + "operations": 6160354146, + "read_operations": 376563348, + "write_operations": 5783790798, + "read_kilobytes": 31264865276, + "write_kilobytes": 100978561519, + "io_time_in_millis": 183984060 + } + } + }, + "transport": { + "server_open": 24, + "total_outbound_connections": 11, + "rx_count": 1300324276, + "rx_size_in_bytes": 1789333458217, + "tx_count": 1300324275, + "tx_size_in_bytes": 2927487680282, + "inbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 1256115237 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 202073370 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 3242412 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 454921 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 173321 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 39045 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 14154 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 75261 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 1534 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 76 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 3 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ], + "outbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 1128384926 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 161841158 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 6818465 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 2563517 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 445765 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 122453 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 95805 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 49979 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 1930 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 250 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 27 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ] + }, + "http": { + "current_open": 75, + "total_opened": 779352 + }, + "breakers": { + "fielddata": { + "limit_size_in_bytes": 3145728000, + "limit_size": "2.9gb", + "estimated_size_in_bytes": 600, + "estimated_size": "600b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 4718592000, + "limit_size": "4.3gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "inflight_requests": { + "limit_size_in_bytes": 7864320000, + "limit_size": "7.3gb", + "estimated_size_in_bytes": 1464, + "estimated_size": "1.4kb", + "overhead": 2, + "tripped": 0 + }, + "model_inference": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "eql_sequence": { + "limit_size_in_bytes": 3932160000, + "limit_size": "3.6gb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 7471104000, + "limit_size": "6.9gb", + "estimated_size_in_bytes": 5059735552, + "estimated_size": "4.7gb", + "overhead": 1, + "tripped": 0 + } + } + }, + "k_AifYMWQTykjUq3pgE_-w": { + "timestamp": 1687866153482, + "name": "tiebreaker-0000000002", + "transport_address": "172.25.242.111:19393", + "host": "172.25.242.111", + "ip": "172.25.242.111:19393", + "roles": [ + "master", + "voting_only" + ], + "attributes": { + "logical_availability_zone": "tiebreaker", + "availability_zone": "us-east-1b", + "server_name": "tiebreaker-0000000002.36928dce44074ceba64d7b3d698443a7", + "xpack.installed": "true", + "data": "hot", + "instance_configuration": "aws.es.master.c5d", + "region": "us-east-1" + }, + "indices": { + "docs": { + "count": 0, + "deleted": 0 + }, + "shard_stats": { + "total_count": 0 + }, + "store": { + "size_in_bytes": 0, + "total_data_set_size_in_bytes": 0, + "reserved_in_bytes": 0 + }, + "indexing": { + "index_total": 0, + "index_time_in_millis": 0, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 0 + }, + "refresh": { + "total": 0, + "total_time_in_millis": 0, + "external_total": 0, + "external_total_time_in_millis": 0, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 0, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 0, + "memory_in_bytes": 0, + "terms_memory_in_bytes": 0, + "stored_fields_memory_in_bytes": 0, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 0, + "points_memory_in_bytes": 0, + "doc_values_memory_in_bytes": 0, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -9223372036854776000, + "file_sizes": {} + }, + "translog": { + "operations": 0, + "size_in_bytes": 0, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 0, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "bulk": { + "total_operations": 0, + "total_time_in_millis": 0, + "total_size_in_bytes": 0, + "avg_time_in_millis": 0, + "avg_size_in_bytes": 0 + } + }, + "os": { + "timestamp": 1687866153483, + "cpu": { + "percent": 0, + "load_average": { + "1m": 3.18, + "5m": 2.94, + "15m": 2.54 + } + }, + "mem": { + "total_in_bytes": 1073741824, + "adjusted_total_in_bytes": 696254464, + "free_in_bytes": 101437440, + "used_in_bytes": 972304384, + "free_percent": 9, + "used_percent": 91 + }, + "swap": { + "total_in_bytes": 536870912, + "free_in_bytes": 536870912, + "used_in_bytes": 0 + }, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 281986757031142 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": 847058, + "stat": { + "number_of_elapsed_periods": 133754533, + "number_of_times_throttled": 226, + "time_throttled_nanos": 6732992268 + } + }, + "memory": { + "control_group": "/", + "limit_in_bytes": "1073741824", + "usage_in_bytes": "972304384" + } + } + }, + "process": { + "timestamp": 1687866153483, + "open_file_descriptors": 557, + "max_file_descriptors": 1048576, + "cpu": { + "percent": 0, + "total_in_millis": 182462990 + }, + "mem": { + "total_virtual_in_bytes": 6049042432 + } + }, + "jvm": { + "timestamp": 1687866153484, + "uptime_in_millis": 23671101768, + "mem": { + "heap_used_in_bytes": 178362704, + "heap_used_percent": 63, + "heap_committed_in_bytes": 281018368, + "heap_max_in_bytes": 281018368, + "non_heap_used_in_bytes": 221757752, + "non_heap_committed_in_bytes": 231145472, + "pools": { + "young": { + "used_in_bytes": 71303168, + "max_in_bytes": 0, + "peak_used_in_bytes": 163577856, + "peak_max_in_bytes": 0 + }, + "old": { + "used_in_bytes": 106872320, + "max_in_bytes": 281018368, + "peak_used_in_bytes": 246953424, + "peak_max_in_bytes": 281018368 + }, + "survivor": { + "used_in_bytes": 187216, + "max_in_bytes": 0, + "peak_used_in_bytes": 20971520, + "peak_max_in_bytes": 0 + } + } + }, + "threads": { + "count": 45, + "peak_count": 47 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 342994, + "collection_time_in_millis": 768917 + }, + "old": { + "collection_count": 0, + "collection_time_in_millis": 0 + } + } + }, + "buffer_pools": { + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + }, + "direct": { + "count": 19, + "used_in_bytes": 2142216, + "total_capacity_in_bytes": 2142214 + }, + "mapped - 'non-volatile memory'": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + }, + "classes": { + "current_loaded_count": 29581, + "total_loaded_count": 31244, + "total_unloaded_count": 1663 + } + }, + "thread_pool": { + "analyze": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "auto_complete": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "azure_event_loop": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ccr": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "cluster_coordination": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 1708790 + }, + "fetch_shard_started": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "fetch_shard_store": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "flush": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "force_merge": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "generic": { + "threads": 9, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 9, + "completed": 78631938 + }, + "get": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "management": { + "threads": 2, + "queue": 0, + "active": 1, + "rejected": 0, + "largest": 2, + "completed": 86206936 + }, + "ml_datafeed": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_job_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_native_inference_comms": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "ml_utility": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 47308828 + }, + "refresh": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "repository_azure": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "rollup_indexing": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search_coordination": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "search_throttled": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_fetch_async": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "searchable_snapshots_cache_prewarming": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-crypto": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "security-token-key": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "snapshot": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "snapshot_meta": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "system_critical_read": { + "threads": 1, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 1, + "completed": 1 + }, + "system_critical_write": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "system_read": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "system_write": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "vector_tile_generation": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "warmer": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "watcher": { + "threads": 0, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 0, + "completed": 0 + }, + "write": { + "threads": 2, + "queue": 0, + "active": 0, + "rejected": 0, + "largest": 2, + "completed": 2 + } + }, + "fs": { + "timestamp": 1687866153484, + "total": { + "total_in_bytes": 12884901888, + "free_in_bytes": 12789022720, + "available_in_bytes": 12789022720 + }, + "data": [ + { + "path": "/app/data", + "mount": "/app (/dev/mapper/lxc-data)", + "type": "xfs", + "total_in_bytes": 12884901888, + "free_in_bytes": 12789022720, + "available_in_bytes": 12789022720 + } + ], + "io_stats": { + "devices": [ + { + "device_name": "dm-1", + "operations": 1025442756, + "read_operations": 12887271, + "write_operations": 1012555485, + "read_kilobytes": 666215440, + "write_kilobytes": 20200424566, + "io_time_in_millis": 547217376 + } + ], + "total": { + "operations": 1025442756, + "read_operations": 12887271, + "write_operations": 1012555485, + "read_kilobytes": 666215440, + "write_kilobytes": 20200424566, + "io_time_in_millis": 547217376 + } + } + }, + "transport": { + "server_open": 26, + "total_outbound_connections": 20, + "rx_count": 107632996, + "rx_size_in_bytes": 180620082152, + "tx_count": 107633007, + "tx_size_in_bytes": 420999501235, + "inbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 146874447 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 16292686 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 50826 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 1965 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 187 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 84 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 2 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 65800 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 14 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 0 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 0 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ], + "outbound_handling_time_histogram": [ + { + "lt_millis": 1, + "count": 97208157 + }, + { + "ge_millis": 1, + "lt_millis": 2, + "count": 10385725 + }, + { + "ge_millis": 2, + "lt_millis": 4, + "count": 28647 + }, + { + "ge_millis": 4, + "lt_millis": 8, + "count": 6334 + }, + { + "ge_millis": 8, + "lt_millis": 16, + "count": 1042 + }, + { + "ge_millis": 16, + "lt_millis": 32, + "count": 818 + }, + { + "ge_millis": 32, + "lt_millis": 64, + "count": 1556 + }, + { + "ge_millis": 64, + "lt_millis": 128, + "count": 725 + }, + { + "ge_millis": 128, + "lt_millis": 256, + "count": 3 + }, + { + "ge_millis": 256, + "lt_millis": 512, + "count": 0 + }, + { + "ge_millis": 512, + "lt_millis": 1024, + "count": 0 + }, + { + "ge_millis": 1024, + "lt_millis": 2048, + "count": 0 + }, + { + "ge_millis": 2048, + "lt_millis": 4096, + "count": 0 + }, + { + "ge_millis": 4096, + "lt_millis": 8192, + "count": 0 + }, + { + "ge_millis": 8192, + "lt_millis": 16384, + "count": 0 + }, + { + "ge_millis": 16384, + "lt_millis": 32768, + "count": 0 + }, + { + "ge_millis": 32768, + "lt_millis": 65536, + "count": 0 + }, + { + "ge_millis": 65536, + "count": 0 + } + ] + }, + "http": { + "current_open": 14, + "total_opened": 13364 + }, + "breakers": { + "model_inference": { + "limit_size_in_bytes": 140509184, + "limit_size": "134mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "eql_sequence": { + "limit_size_in_bytes": 140509184, + "limit_size": "134mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "fielddata": { + "limit_size_in_bytes": 112407347, + "limit_size": "107.1mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 168611020, + "limit_size": "160.7mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1, + "tripped": 0 + }, + "inflight_requests": { + "limit_size_in_bytes": 281018368, + "limit_size": "268mb", + "estimated_size_in_bytes": 1464, + "estimated_size": "1.4kb", + "overhead": 2, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 266967449, + "limit_size": "254.5mb", + "estimated_size_in_bytes": 178362704, + "estimated_size": "170mb", + "overhead": 1, + "tripped": 0 + } + } + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/README.md b/src/go/collectors/go.d.plugin/modules/energid/README.md new file mode 120000 index 00000000000000..894468aae8dbee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/README.md @@ -0,0 +1 @@ +integrations/energi_core_wallet.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/energid/charts.go b/src/go/collectors/go.d.plugin/modules/energid/charts.go new file mode 100644 index 00000000000000..3dcc252af3d37e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/charts.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import "github.com/netdata/go.d.plugin/agent/module" + +var charts = module.Charts{ + // getblockchaininfo (blockchain processing) + { + ID: "blockindex", + Title: "Blockchain index", + Units: "count", + Fam: "blockchain", + Ctx: "energid.blockindex", + Type: module.Area, + Dims: module.Dims{ + {ID: "blockchain_blocks", Name: "blocks"}, + {ID: "blockchain_headers", Name: "headers"}, + }, + }, + { + ID: "difficulty", + Title: "Blockchain difficulty", + Units: "difficulty", + Fam: "blockchain", + Ctx: "energid.difficulty", + Dims: module.Dims{ + {ID: "blockchain_difficulty", Name: "difficulty", Div: 1000}, + }, + }, + + // getmempoolinfo (state of the TX memory pool) + { + ID: "mempool", + Title: "Memory pool", + Units: "bytes", + Fam: "memory", + Ctx: "energid.mempool", + Type: module.Area, + Dims: module.Dims{ + {ID: "mempool_max", Name: "max"}, + {ID: "mempool_current", Name: "usage"}, + {ID: "mempool_txsize", Name: "tx_size"}, + }, + }, + + // getmemoryinfo + { + ID: "secmem", + Title: "Secure memory", + Units: "bytes", + Fam: "memory", + Ctx: "energid.secmem", + Type: module.Area, + Dims: module.Dims{ + {ID: "secmem_total", Name: "total"}, + {ID: "secmem_used", Name: "used"}, + {ID: "secmem_free", Name: "free"}, + {ID: "secmem_locked", Name: "locked"}, + }, + }, + + // getnetworkinfo (P2P networking) + { + ID: "network", + Title: "Network", + Units: "connections", + Fam: "network", + Ctx: "energid.network", + Dims: module.Dims{ + {ID: "network_connections", Name: "connections"}, + }, + }, + { + ID: "timeoffset", + Title: "Network time offset", + Units: "seconds", + Fam: "network", + Ctx: "energid.timeoffset", + Dims: module.Dims{ + {ID: "network_timeoffset", Name: "timeoffset"}, + }, + }, + + // gettxoutsetinfo (unspent transaction output set) + { + ID: "utxo_transactions", + Title: "Transactions", + Units: "transactions", + Fam: "utxo", + Ctx: "energid.utxo_transactions", + Dims: module.Dims{ + {ID: "utxo_transactions", Name: "transactions"}, + {ID: "utxo_output_transactions", Name: "output_transactions"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/collect.go b/src/go/collectors/go.d.plugin/modules/energid/collect.go new file mode 100644 index 00000000000000..965ee4b366a33c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/collect.go @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + jsonRPCVersion = "1.1" + + methodGetBlockchainInfo = "getblockchaininfo" + methodGetMemPoolInfo = "getmempoolinfo" + methodGetNetworkInfo = "getnetworkinfo" + methodGetTXOutSetInfo = "gettxoutsetinfo" + methodGetMemoryInfo = "getmemoryinfo" +) + +var infoRequests = rpcRequests{ + {JSONRPC: jsonRPCVersion, ID: 1, Method: methodGetBlockchainInfo}, + {JSONRPC: jsonRPCVersion, ID: 2, Method: methodGetMemPoolInfo}, + {JSONRPC: jsonRPCVersion, ID: 3, Method: methodGetNetworkInfo}, + {JSONRPC: jsonRPCVersion, ID: 4, Method: methodGetTXOutSetInfo}, + {JSONRPC: jsonRPCVersion, ID: 5, Method: methodGetMemoryInfo}, +} + +func (e *Energid) collect() (map[string]int64, error) { + responses, err := e.scrapeEnergid(infoRequests) + if err != nil { + return nil, err + } + + info, err := e.collectInfoResponse(infoRequests, responses) + if err != nil { + return nil, err + } + + return stm.ToMap(info), nil +} + +func (e *Energid) collectInfoResponse(requests rpcRequests, responses rpcResponses) (*energidInfo, error) { + var info energidInfo + for _, req := range requests { + resp := responses.getByID(req.ID) + if resp == nil { + e.Warningf("method '%s' (id %d) not in responses", req.Method, req.ID) + continue + } + + if resp.Error != nil { + e.Warningf("server returned an error on method '%s': %v", req.Method, resp.Error) + continue + } + + var err error + switch req.Method { + case methodGetBlockchainInfo: + info.Blockchain, err = parseBlockchainInfo(resp.Result) + case methodGetMemPoolInfo: + info.MemPool, err = parseMemPoolInfo(resp.Result) + case methodGetNetworkInfo: + info.Network, err = parseNetworkInfo(resp.Result) + case methodGetTXOutSetInfo: + info.TxOutSet, err = parseTXOutSetInfo(resp.Result) + case methodGetMemoryInfo: + info.Memory, err = parseMemoryInfo(resp.Result) + } + if err != nil { + return nil, fmt.Errorf("parse '%s' method result: %v", req.Method, err) + } + } + + return &info, nil +} + +func parseBlockchainInfo(result []byte) (*blockchainInfo, error) { + var m blockchainInfo + if err := json.Unmarshal(result, &m); err != nil { + return nil, err + } + return &m, nil +} + +func parseMemPoolInfo(result []byte) (*memPoolInfo, error) { + var m memPoolInfo + if err := json.Unmarshal(result, &m); err != nil { + return nil, err + } + return &m, nil +} + +func parseNetworkInfo(result []byte) (*networkInfo, error) { + var m networkInfo + if err := json.Unmarshal(result, &m); err != nil { + return nil, err + } + return &m, nil +} + +func parseTXOutSetInfo(result []byte) (*txOutSetInfo, error) { + var m txOutSetInfo + if err := json.Unmarshal(result, &m); err != nil { + return nil, err + } + return &m, nil +} + +func parseMemoryInfo(result []byte) (*memoryInfo, error) { + var m memoryInfo + if err := json.Unmarshal(result, &m); err != nil { + return nil, err + } + return &m, nil +} + +func (e *Energid) scrapeEnergid(requests rpcRequests) (rpcResponses, error) { + req, _ := web.NewHTTPRequest(e.Request) + req.Method = http.MethodPost + req.Header.Set("Content-Type", "application/json") + body, _ := json.Marshal(requests) + req.Body = io.NopCloser(bytes.NewReader(body)) + + var resp rpcResponses + if err := e.doOKDecode(req, &resp); err != nil { + return nil, err + } + + return resp, nil +} + +func (e *Energid) doOKDecode(req *http.Request, in interface{}) error { + resp, err := e.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/config_schema.json b/src/go/collectors/go.d.plugin/modules/energid/config_schema.json new file mode 100644 index 00000000000000..20f4ec9f8201c9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/energid job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/energid.go b/src/go/collectors/go.d.plugin/modules/energid/energid.go new file mode 100644 index 00000000000000..fcffe50d861f8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/energid.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("energid", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Energid struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func New() *Energid { + return &Energid{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9796", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + } +} + +func (e *Energid) Init() bool { + err := e.validateConfig() + if err != nil { + e.Errorf("config validation: %v", err) + return false + } + + client, err := e.initHTTPClient() + if err != nil { + e.Errorf("init HTTP client: %v", err) + return false + } + e.httpClient = client + + cs, err := e.initCharts() + if err != nil { + e.Errorf("init charts: %v", err) + return false + } + e.charts = cs + + return true +} + +func (e *Energid) Check() bool { + return len(e.Collect()) > 0 +} + +func (e *Energid) Charts() *module.Charts { + return e.charts +} + +func (e *Energid) Collect() map[string]int64 { + ms, err := e.collect() + if err != nil { + e.Error(err) + } + + if len(ms) == 0 { + return nil + } + + return ms +} + +func (e *Energid) Cleanup() { + if e.httpClient == nil { + return + } + e.httpClient.CloseIdleConnections() +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/energid_test.go b/src/go/collectors/go.d.plugin/modules/energid/energid_test.go new file mode 100644 index 00000000000000..ab0e2f24e1880d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/energid_test.go @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v241GetBlockchainInfo, _ = os.ReadFile("testdata/v2.4.1/getblockchaininfo.json") + v241GetMemPoolInfo, _ = os.ReadFile("testdata/v2.4.1/getmempoolinfo.json") + v241GetNetworkInfo, _ = os.ReadFile("testdata/v2.4.1/getnetworkinfo.json") + v241GetTXOutSetInfo, _ = os.ReadFile("testdata/v2.4.1/gettxoutsetinfo.json") + v241GetMemoryInfo, _ = os.ReadFile("testdata/v2.4.1/getmemoryinfo.json") +) + +func Test_Testdata(t *testing.T) { + for name, data := range map[string][]byte{ + "v241GetBlockchainInfo": v241GetBlockchainInfo, + "v241GetMemPoolInfo": v241GetMemPoolInfo, + "v241GetNetworkInfo": v241GetNetworkInfo, + "v241GetTXOutSetInfo": v241GetTXOutSetInfo, + "v241GetMemoryInfo": v241GetMemoryInfo, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Energid)(nil), New()) +} + +func Test_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset URL": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:38001", + }, + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + energid := New() + energid.Config = test.config + + if test.wantFail { + assert.False(t, energid.Init()) + } else { + assert.True(t, energid.Init()) + } + }) + } +} + +func Test_Charts(t *testing.T) { + energid := New() + require.True(t, energid.Init()) + assert.NotNil(t, energid.Charts()) +} + +func Test_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func Test_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (energid *Energid, cleanup func()) + wantFail bool + }{ + "success on valid v2.4.1 response": { + prepare: prepareEnergidV241, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareEnergid404, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareEnergidConnectionRefused, + }, + "fails on response with invalid data": { + wantFail: true, + prepare: prepareEnergidInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + energid, cleanup := test.prepare() + defer cleanup() + + require.True(t, energid.Init()) + + if test.wantFail { + assert.False(t, energid.Check()) + } else { + assert.True(t, energid.Check()) + } + }) + } +} + +func Test_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (energid *Energid, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid v2.4.1 response": { + prepare: prepareEnergidV241, + wantCollected: map[string]int64{ + "blockchain_blocks": 1, + "blockchain_difficulty": 0, + "blockchain_headers": 1, + "mempool_current": 1, + "mempool_max": 300000000, + "mempool_txsize": 1, + "network_connections": 1, + "network_timeoffset": 1, + "secmem_free": 65248, + "secmem_locked": 65536, + "secmem_total": 65536, + "secmem_used": 288, + "utxo_output_transactions": 1, + "utxo_transactions": 1, + }, + }, + "fails on 404 response": { + prepare: prepareEnergid404, + }, + "fails on connection refused": { + prepare: prepareEnergidConnectionRefused, + }, + "fails on response with invalid data": { + prepare: prepareEnergidInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + energid, cleanup := test.prepare() + defer cleanup() + require.True(t, energid.Init()) + + collected := energid.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, energid, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, energid *Energid, ms map[string]int64) { + for _, chart := range *energid.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func prepareEnergidV241() (*Energid, func()) { + srv := prepareEnergidEndPoint() + energid := New() + energid.URL = srv.URL + + return energid, srv.Close +} + +func prepareEnergidInvalidData() (*Energid, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("Hello world!")) + })) + energid := New() + energid.URL = srv.URL + + return energid, srv.Close +} + +func prepareEnergid404() (*Energid, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + energid := New() + energid.URL = srv.URL + + return energid, srv.Close +} + +func prepareEnergidConnectionRefused() (*Energid, func()) { + energid := New() + energid.URL = "http://127.0.0.1:38001" + + return energid, func() {} +} + +func prepareEnergidEndPoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + body, _ := io.ReadAll(r.Body) + var requests rpcRequests + if err := json.Unmarshal(body, &requests); err != nil || len(requests) == 0 { + w.WriteHeader(http.StatusInternalServerError) + return + } + + var responses rpcResponses + for _, req := range requests { + resp := rpcResponse{JSONRPC: jsonRPCVersion, ID: req.ID} + switch req.Method { + case methodGetBlockchainInfo: + resp.Result = prepareResult(v241GetBlockchainInfo) + case methodGetMemPoolInfo: + resp.Result = prepareResult(v241GetMemPoolInfo) + case methodGetNetworkInfo: + resp.Result = prepareResult(v241GetNetworkInfo) + case methodGetTXOutSetInfo: + resp.Result = prepareResult(v241GetTXOutSetInfo) + case methodGetMemoryInfo: + resp.Result = prepareResult(v241GetMemoryInfo) + default: + resp.Error = &rpcError{Code: -32601, Message: "Method not found"} + } + responses = append(responses, resp) + } + + bs, _ := json.Marshal(responses) + _, _ = w.Write(bs) + })) +} + +func prepareResult(resp []byte) json.RawMessage { + var r rpcResponse + _ = json.Unmarshal(resp, &r) + return r.Result +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/init.go b/src/go/collectors/go.d.plugin/modules/energid/init.go new file mode 100644 index 00000000000000..3b7b7fb9e8b694 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/init.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (e Energid) validateConfig() error { + if e.URL == "" { + return errors.New("URL not set") + } + + if _, err := web.NewHTTPRequest(e.Request); err != nil { + return err + } + + return nil +} + +func (e Energid) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(e.Client) +} + +func (e Energid) initCharts() (*module.Charts, error) { + return charts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/integrations/energi_core_wallet.md b/src/go/collectors/go.d.plugin/modules/energid/integrations/energi_core_wallet.md new file mode 100644 index 00000000000000..1215bed155bb3f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/integrations/energi_core_wallet.md @@ -0,0 +1,224 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/energid/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/energid/metadata.yaml" +sidebar_label: "Energi Core Wallet" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Energi Core Wallet + + +<img src="https://netdata.cloud/img/energi.png" width="150"/> + + +Plugin: go.d.plugin +Module: apache + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This module monitors Energi Core Wallet instances. +Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Energi Core Wallet instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| energid.blockindex | blocks, headers | count | +| energid.difficulty | difficulty | difficulty | +| energid.mempool | max, usage, tx_size | bytes | +| energid.secmem | total, used, free, locked | bytes | +| energid.network | connections | connections | +| energid.timeoffset | timeoffset | seconds | +| energid.utxo_transactions | transactions, output_transactions | transactions | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/energid.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/energid.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9796 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9796 + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9796 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9796 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9796 + + - name: remote + url: http://192.0.2.1:9796 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m apache + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/energid/jsonrpc.go b/src/go/collectors/go.d.plugin/modules/energid/jsonrpc.go new file mode 100644 index 00000000000000..c3a80e9b012792 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/jsonrpc.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +import ( + "encoding/json" + "fmt" +) + +// https://www.jsonrpc.org/specification#request_object +type ( + rpcRequest struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + ID int `json:"id"` + } + rpcRequests []rpcRequest +) + +// http://www.jsonrpc.org/specification#response_object +type ( + rpcResponse struct { + JSONRPC string `json:"jsonrpc"` + Result json.RawMessage `json:"result"` + Error *rpcError `json:"error"` + ID int `json:"id"` + } + rpcResponses []rpcResponse +) + +func (rs rpcResponses) getByID(id int) *rpcResponse { + for _, r := range rs { + if r.ID == id { + return &r + } + } + return nil +} + +// http://www.jsonrpc.org/specification#error_object +type rpcError struct { + Code int64 `json:"code"` + Message string `json:"message"` +} + +func (e rpcError) String() string { + return fmt.Sprintf("%s (code %d)", e.Message, e.Code) +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/metadata.yaml b/src/go/collectors/go.d.plugin/modules/energid/metadata.yaml new file mode 100644 index 00000000000000..c32f7cb579dc5e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/metadata.yaml @@ -0,0 +1,225 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-energid + module_name: apache + plugin_name: energid + monitored_instance: + name: Energi Core Wallet + link: "" + icon_filename: energi.png + categories: + - data-collection.blockchain-servers + keywords: + - energid + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This module monitors Energi Core Wallet instances. + Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet). + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/energid.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9796 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9796 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9796 + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:9796 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9796 + + - name: remote + url: http://192.0.2.1:9796 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: energid.blockindex + description: Blockchain index + unit: count + chart_type: area + dimensions: + - name: blocks + - name: headers + - name: energid.difficulty + description: Blockchain difficulty + unit: difficulty + chart_type: line + dimensions: + - name: difficulty + - name: energid.mempool + description: Memory pool + unit: bytes + chart_type: area + dimensions: + - name: max + - name: usage + - name: tx_size + - name: energid.secmem + description: Secure memory + unit: bytes + chart_type: area + dimensions: + - name: total + - name: used + - name: free + - name: locked + - name: energid.network + description: Network + unit: connections + chart_type: line + dimensions: + - name: connections + - name: energid.timeoffset + description: Network time offset + unit: seconds + chart_type: line + dimensions: + - name: timeoffset + - name: energid.utxo_transactions + description: Transactions + unit: transactions + chart_type: line + dimensions: + - name: transactions + - name: output_transactions diff --git a/src/go/collectors/go.d.plugin/modules/energid/metrics.go b/src/go/collectors/go.d.plugin/modules/energid/metrics.go new file mode 100644 index 00000000000000..2e77edf917d4d8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/metrics.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package energid + +// API docs: https://github.com/energicryptocurrency/core-api-documentation + +type energidInfo struct { + Blockchain *blockchainInfo `stm:"blockchain"` + MemPool *memPoolInfo `stm:"mempool"` + Network *networkInfo `stm:"network"` + TxOutSet *txOutSetInfo `stm:"utxo"` + Memory *memoryInfo `stm:"secmem"` +} + +// https://github.com/energicryptocurrency/core-api-documentation#getblockchaininfo +type blockchainInfo struct { + Blocks float64 `stm:"blocks" json:"blocks"` + Headers float64 `stm:"headers" json:"headers"` + Difficulty float64 `stm:"difficulty,1000,1" json:"difficulty"` +} + +// https://github.com/energicryptocurrency/core-api-documentation#getmempoolinfo +type memPoolInfo struct { + Bytes float64 `stm:"txsize" json:"bytes"` + Usage float64 `stm:"current" json:"usage"` + MaxMemPool float64 `stm:"max" json:"maxmempool"` +} + +// https://github.com/energicryptocurrency/core-api-documentation#getnetworkinfo +type networkInfo struct { + TimeOffset float64 `stm:"timeoffset" json:"timeoffset"` + Connections float64 `stm:"connections" json:"connections"` +} + +// https://github.com/energicryptocurrency/core-api-documentation#gettxoutsetinfo +type txOutSetInfo struct { + Transactions float64 `stm:"transactions" json:"transactions"` + TxOuts float64 `stm:"output_transactions" json:"txouts"` +} + +// undocumented +type memoryInfo struct { + Locked struct { + Used float64 `stm:"used" json:"used"` + Free float64 `stm:"free" json:"free"` + Total float64 `stm:"total" json:"total"` + Locked float64 `stm:"locked" json:"locked"` + } `stm:"" json:"locked"` +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getblockchaininfo.json b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getblockchaininfo.json new file mode 100644 index 00000000000000..7d194d62a79dc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getblockchaininfo.json @@ -0,0 +1,66 @@ +{ + "result": { + "chain": "test", + "blocks": 1, + "headers": 1, + "bestblockhash": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74", + "difficulty": 4.656542373906925e-10, + "mediantime": 1524344801, + "verificationprogress": 3.57591520058473e-07, + "chainwork": "0000000000000000000000000000000000000000000000000000000000000002", + "pruned": false, + "pos": false, + "posv2": false, + "softforks": [ + { + "id": "bip34", + "version": 2, + "reject": { + "status": false + } + }, + { + "id": "bip66", + "version": 3, + "reject": { + "status": false + } + }, + { + "id": "bip65", + "version": 4, + "reject": { + "status": false + } + } + ], + "bip9_softforks": { + "csv": { + "status": "defined", + "startTime": 1486252800, + "timeout": 1549328400, + "since": 1 + }, + "dip0001": { + "status": "defined", + "startTime": 1505692800, + "timeout": 1549328400, + "since": 1 + }, + "bip147": { + "status": "defined", + "startTime": 1546300800, + "timeout": 1549328400, + "since": 1 + }, + "spork17": { + "status": "defined", + "startTime": 1566129600, + "timeout": 1577793600, + "since": 1 + } + } + }, + "error": null, + "id": "1" +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmemoryinfo.json b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmemoryinfo.json new file mode 100644 index 00000000000000..9fdece550ad7be --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmemoryinfo.json @@ -0,0 +1,14 @@ +{ + "result": { + "locked": { + "used": 288, + "free": 65248, + "total": 65536, + "locked": 65536, + "chunks_used": 4, + "chunks_free": 2 + } + }, + "error": null, + "id": "1" +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmempoolinfo.json b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmempoolinfo.json new file mode 100644 index 00000000000000..8845555b1f536a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getmempoolinfo.json @@ -0,0 +1,11 @@ +{ + "result": { + "size": 1, + "bytes": 1, + "usage": 1, + "maxmempool": 300000000, + "mempoolminfee": 1 + }, + "error": null, + "id": "1" +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getnetworkinfo.json b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getnetworkinfo.json new file mode 100644 index 00000000000000..59df2c5adc69dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/getnetworkinfo.json @@ -0,0 +1,41 @@ +{ + "result": { + "version": 2040100, + "subversion": "/Energi Core:2.4.1/", + "protocolversion": 70213, + "localservices": "0000000000000005", + "localrelay": true, + "timeoffset": 1, + "networkactive": true, + "connections": 1, + "networks": [ + { + "name": "ipv4", + "limited": false, + "reachable": true, + "proxy": "", + "proxy_randomize_credentials": false + }, + { + "name": "ipv6", + "limited": false, + "reachable": true, + "proxy": "", + "proxy_randomize_credentials": false + }, + { + "name": "onion", + "limited": true, + "reachable": false, + "proxy": "", + "proxy_randomize_credentials": false + } + ], + "relayfee": 1e-05, + "incrementalfee": 1e-05, + "localaddresses": [], + "warnings": "" + }, + "error": null, + "id": "1" +} diff --git a/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json new file mode 100644 index 00000000000000..5bc606f57cd67e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json @@ -0,0 +1,13 @@ +{ + "result": { + "height": 1, + "bestblock": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74", + "transactions": 1, + "txouts": 1, + "hash_serialized_2": "ba3631e5919f37c8f542658238de0516612a7063fbd6143ef813a4e1cc4548e1", + "disk_size": 1, + "total_amount": 1 + }, + "error": null, + "id": "1" +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/README.md b/src/go/collectors/go.d.plugin/modules/envoy/README.md new file mode 120000 index 00000000000000..a0d3a2a2cea144 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/README.md @@ -0,0 +1 @@ +integrations/envoy.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/envoy/charts.go b/src/go/collectors/go.d.plugin/modules/envoy/charts.go new file mode 100644 index 00000000000000..36fa301ac859a6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/charts.go @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package envoy + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/prometheus/prometheus/model/labels" +) + +const ( + prioServerState = module.Priority + iota + prioServerMemoryAllocatedSize + prioServerMemoryHeapSize + prioServerMemoryPhysicalSize + prioServerConnectionsCount + prioServerParentConnectionsCount + + prioClusterManagerClustersCount + prioClusterManagerClusterChangesRate + prioClusterManagerClusterUpdatesRate + prioClusterManagerClusterUpdatesVieMergeRate + prioClusterManagerClusterUpdatesMergeCancelledRate + prioClusterManagerClusterUpdatesOufOfMergeWindowsRate + + prioClusterMembershipEndpointsCount + prioClusterMembershipChangesRate + prioClusterMembershipUpdatesRate + + prioClusterUpstreamActiveConnectionsCount + prioClusterUpstreamConnectionsRate + prioClusterUpstreamHTTPConnectionsRate + prioClusterUpstreamDestroyedConnectionsRate + prioClusterUpstreamFailedConnectionsRate + prioClusterUpstreamTimedOutConnectionsRate + prioClusterUpstreamTrafficRate + prioClusterUpstreamBufferedSize + + prioClusterUpstreamActiveRequestsCount + prioClusterUpstreamRequestsRate + prioClusterUpstreamFailedRequestsRate + prioClusterUpstreamActivePendingRequestsCount + prioClusterUpstreamPendingRequestsRate + prioClusterUpstreamPendingFailedRequestsRate + prioClusterUpstreamRequestRetriesRate + prioClusterUpstreamRequestSuccessRetriesRate + prioClusterUpstreamRequestBackoffRetriesRate + + prioListenerManagerListenerCount + prioListenerManagerListenerChangesRate + prioListenerManagerListenerObjectEventsRate + + prioListenerAdminDownstreamActiveConnectionsCount + prioListenerAdminDownstreamConnectionsRate + prioListenerAdminDownstreamDestroyedConnectionsRate + prioListenerAdminDownstreamTimedOutConnectionsRate + prioListenerAdminDownstreamRejectedConnectionsRate + prioListenerAdminDownstreamFilterClosedByRemoteConnectionsRate + prioListenerAdminDownstreamFilterReadErrorsRate + prioListenerAdminDownstreamActiveSocketsCount + prioListenerAdminDownstreamTimedOutSocketsRate + + prioListenerDownstreamActiveConnectionsCount + prioListenerDownstreamConnectionsRate + prioListenerDownstreamDestroyedConnectionsRate + prioListenerDownstreamTimedOutConnectionsRate + prioListenerDownstreamRejectedConnectionsRate + prioListenerDownstreamFilterClosedByRemoteConnectionsRate + prioListenerDownstreamFilterReadErrorsRate + prioListenerDownstreamActiveSocketsCount + prioListenerDownstreamTimedOutSocketsRate + + prioServerUptime +) + +var ( + serverChartsTmpl = module.Charts{ + serverStateChartTmpl.Copy(), + + serverMemoryAllocatedSizeChartTmpl.Copy(), + serverMemoryHeapSizeChartTmpl.Copy(), + serverMemoryPhysicalSizeChartTmpl.Copy(), + + serverConnectionsCountChartTmpl.Copy(), + serverParentConnectionsCountChartTmpl.Copy(), + + serverUptimeChartTmpl.Copy(), + } + serverStateChartTmpl = module.Chart{ + ID: "server_state_%s", + Title: "Server current state", + Units: "state", + Fam: "server", + Ctx: "envoy.server_state", + Priority: prioServerState, + Dims: module.Dims{ + {ID: "envoy_server_state_live_%s", Name: "live"}, + {ID: "envoy_server_state_draining_%s", Name: "draining"}, + {ID: "envoy_server_state_pre_initializing_%s", Name: "pre_initializing"}, + {ID: "envoy_server_state_initializing_%s", Name: "initializing"}, + }, + } + serverConnectionsCountChartTmpl = module.Chart{ + ID: "server_connections_%s", + Title: "Server current connections", + Units: "connections", + Fam: "server", + Ctx: "envoy.server_connections_count", + Priority: prioServerConnectionsCount, + Dims: module.Dims{ + {ID: "envoy_server_total_connections_%s", Name: "connections"}, + }, + } + serverParentConnectionsCountChartTmpl = module.Chart{ + ID: "server_parent_connections_%s", + Title: "Server current parent connections", + Units: "connections", + Fam: "server", + Ctx: "envoy.server_parent_connections_count", + Priority: prioServerParentConnectionsCount, + Dims: module.Dims{ + {ID: "envoy_server_parent_connections_%s", Name: "connections"}, + }, + } + serverMemoryAllocatedSizeChartTmpl = module.Chart{ + ID: "server_memory_allocated_size_%s", + Title: "Server memory allocated size", + Units: "bytes", + Fam: "server", + Ctx: "envoy.server_memory_allocated_size", + Priority: prioServerMemoryAllocatedSize, + Dims: module.Dims{ + {ID: "envoy_server_memory_allocated_%s", Name: "allocated"}, + }, + } + serverMemoryHeapSizeChartTmpl = module.Chart{ + ID: "server_memory_heap_size_%s", + Title: "Server memory heap size", + Units: "bytes", + Fam: "server", + Ctx: "envoy.server_memory_heap_size", + Priority: prioServerMemoryHeapSize, + Dims: module.Dims{ + {ID: "envoy_server_memory_heap_size_%s", Name: "heap"}, + }, + } + serverMemoryPhysicalSizeChartTmpl = module.Chart{ + ID: "server_memory_physical_size_%s", + Title: "Server memory physical size", + Units: "bytes", + Fam: "server", + Ctx: "envoy.server_memory_physical_size", + Priority: prioServerMemoryPhysicalSize, + Dims: module.Dims{ + {ID: "envoy_server_memory_physical_size_%s", Name: "physical"}, + }, + } + serverUptimeChartTmpl = module.Chart{ + ID: "server_uptime_%s", + Title: "Server uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "envoy.server_uptime", + Priority: prioServerUptime, + Dims: module.Dims{ + {ID: "envoy_server_uptime_%s", Name: "uptime"}, + }, + } +) + +var ( + clusterManagerChartsTmpl = module.Charts{ + clusterManagerClusterCountChartTmpl.Copy(), + clusterManagerClusterChangesRateChartTmpl.Copy(), + clusterManagerClusterUpdatesRateChartTmpl.Copy(), + clusterManagerClusterUpdatesViaMergeRateChartTmpl.Copy(), + clusterManagerClusterUpdatesMergeCancelledRateChartTmpl.Copy(), + clusterManagerClusterUpdatesOutOfMergeWindowRateChartTmpl.Copy(), + } + clusterManagerClusterCountChartTmpl = module.Chart{ + ID: "cluster_manager_cluster_count_%s", + Title: "Cluster manager current clusters", + Units: "clusters", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_cluster_count", + Priority: prioClusterManagerClustersCount, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_active_clusters_%s", Name: "active"}, + {ID: "envoy_cluster_manager_warming_clusters_%s", Name: "not_active"}, + }, + } + clusterManagerClusterChangesRateChartTmpl = module.Chart{ + ID: "cluster_manager_cluster_changes_%s", + Title: "Cluster manager cluster changes", + Units: "clusters/s", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_cluster_changes_rate", + Priority: prioClusterManagerClusterChangesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_cluster_added_%s", Name: "added", Algo: module.Incremental}, + {ID: "envoy_cluster_manager_cluster_modified_%s", Name: "modified", Algo: module.Incremental}, + {ID: "envoy_cluster_manager_cluster_removed_%s", Name: "removed", Algo: module.Incremental}, + }, + } + clusterManagerClusterUpdatesRateChartTmpl = module.Chart{ + ID: "cluster_manager_cluster_updates_%s", + Title: "Cluster manager updates", + Units: "updates/s", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_cluster_updates_rate", + Priority: prioClusterManagerClusterUpdatesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_cluster_updated_%s", Name: "cluster", Algo: module.Incremental}, + }, + } + clusterManagerClusterUpdatesViaMergeRateChartTmpl = module.Chart{ + ID: "cluster_manager_cluster_updated_via_merge_%s", + Title: "Cluster manager updates applied as merged updates", + Units: "updates/s", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_cluster_updated_via_merge_rate", + Priority: prioClusterManagerClusterUpdatesVieMergeRate, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_cluster_updated_via_merge_%s", Name: "via_merge", Algo: module.Incremental}, + }, + } + clusterManagerClusterUpdatesMergeCancelledRateChartTmpl = module.Chart{ + ID: "cluster_manager_update_merge_cancelled_%s", + Title: "Cluster manager cancelled merged updates", + Units: "updates/s", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_update_merge_cancelled_rate", + Priority: prioClusterManagerClusterUpdatesMergeCancelledRate, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_update_merge_cancelled_%s", Name: "merge_cancelled", Algo: module.Incremental}, + }, + } + clusterManagerClusterUpdatesOutOfMergeWindowRateChartTmpl = module.Chart{ + ID: "cluster_manager_update_out_of_merge_window_%s", + Title: "Cluster manager out of a merge window updates", + Units: "updates/s", + Fam: "cluster mgr", + Ctx: "envoy.cluster_manager_update_out_of_merge_window_rate", + Priority: prioClusterManagerClusterUpdatesOufOfMergeWindowsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_manager_update_out_of_merge_window_%s", Name: "out_of_merge_window", Algo: module.Incremental}, + }, + } +) + +var ( + clusterUpstreamChartsTmpl = module.Charts{ + clusterUpstreamActiveConnectionsCountChartTmpl.Copy(), + clusterUpstreamConnectionsRateChartTmpl.Copy(), + clusterUpstreamHTTPConnectionsRateChartTmpl.Copy(), + clusterUpstreamDestroyedConnectionsRateChartTmpl.Copy(), + clusterUpstreamFailedConnectionsRateChartTmpl.Copy(), + clusterUpstreamTimedOutConnectionsRateChartTmpl.Copy(), + clusterUpstreamTrafficRateChartTmpl.Copy(), + clusterUpstreamBufferedSizeChartTmpl.Copy(), + + clusterUpstreamActiveRequestsCountChartTmpl.Copy(), + clusterUpstreamRequestsRateChartTmpl.Copy(), + clusterUpstreamFailedRequestsRateChartTmpl.Copy(), + clusterUpstreamActivePendingRequestsCountChartTmpl.Copy(), + clusterUpstreamPendingRequestsRateChartTmpl.Copy(), + clusterUpstreamPendingFailedRequestsRateChartTmpl.Copy(), + clusterUpstreamRequestRetriesRateChartTmpl.Copy(), + clusterUpstreamRequestSuccessRetriesRateChartTmpl.Copy(), + clusterUpstreamRequestRetriesBackoffRateChartTmpl.Copy(), + + clusterMembershipEndpointsCountChartTmpl.Copy(), + clusterMembershipChangesRateChartTmpl.Copy(), + clusterMembershipUpdatesRateChartTmpl.Copy(), + } + + clusterUpstreamActiveConnectionsCountChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_active_%s", + Title: "Cluster upstream current active connections", + Units: "connections", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_active_count", + Priority: prioClusterUpstreamActiveConnectionsCount, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_active_%s", Name: "active"}, + }, + } + clusterUpstreamConnectionsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_total_%s", + Title: "Cluster upstream connections", + Units: "connections/s", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_rate", + Priority: prioClusterUpstreamConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_total_%s", Name: "created", Algo: module.Incremental}, + }, + } + clusterUpstreamHTTPConnectionsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_http_total_%s", + Title: "Cluster upstream connections by HTTP version", + Units: "connections/s", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_http_rate", + Priority: prioClusterUpstreamHTTPConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_http1_total_%s", Name: "http1", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_cx_http2_total_%s", Name: "http2", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_cx_http3_total_%s", Name: "http3", Algo: module.Incremental}, + }, + } + clusterUpstreamDestroyedConnectionsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_destroy_%s", + Title: "Cluster upstream destroyed connections", + Units: "connections/s", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_destroy_rate", + Priority: prioClusterUpstreamDestroyedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_destroy_local_%s", Name: "local", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_cx_destroy_remote_%s", Name: "remote", Algo: module.Incremental}, + }, + } + clusterUpstreamFailedConnectionsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_connect_fail_%s", + Title: "Cluster upstream failed connections", + Units: "connections/s", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_connect_fail_rate", + Priority: prioClusterUpstreamFailedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_connect_fail_%s", Name: "failed", Algo: module.Incremental}, + }, + } + clusterUpstreamTimedOutConnectionsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_connect_timeout_%s", + Title: "Cluster upstream timed out connections", + Units: "connections/s", + Fam: "upstream conns", + Ctx: "envoy.cluster_upstream_cx_connect_timeout_rate", + Priority: prioClusterUpstreamTimedOutConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_connect_timeout_%s", Name: "timeout", Algo: module.Incremental}, + }, + } + clusterUpstreamTrafficRateChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_bytes_total_%s", + Title: "Cluster upstream connection traffic", + Units: "bytes/s", + Fam: "upstream traffic", + Ctx: "envoy.cluster_upstream_cx_bytes_rate", + Priority: prioClusterUpstreamTrafficRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_rx_bytes_total_%s", Name: "received", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_cx_tx_bytes_total_%s", Name: "sent", Algo: module.Incremental}, + }, + } + clusterUpstreamBufferedSizeChartTmpl = module.Chart{ + ID: "cluster_upstream_cx_bytes_buffered_%s", + Title: "Cluster upstream current connection buffered size", + Units: "bytes", + Fam: "upstream traffic", + Ctx: "envoy.cluster_upstream_cx_bytes_buffered_size", + Priority: prioClusterUpstreamBufferedSize, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_cx_rx_bytes_buffered_%s", Name: "received"}, + {ID: "envoy_cluster_upstream_cx_tx_bytes_buffered_%s", Name: "send"}, + }, + } + + clusterUpstreamActiveRequestsCountChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_active_%s", + Title: "Cluster upstream current active requests", + Units: "requests", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_active_count", + Priority: prioClusterUpstreamActiveRequestsCount, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_active_%s", Name: "active"}, + }, + } + clusterUpstreamRequestsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_total_%s", + Title: "Cluster upstream requests", + Units: "requests/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_rate", + Priority: prioClusterUpstreamRequestsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_total_%s", Name: "requests", Algo: module.Incremental}, + }, + } + clusterUpstreamFailedRequestsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_failed_total_%s", + Title: "Cluster upstream failed requests", + Units: "requests/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_failed_rate", + Priority: prioClusterUpstreamFailedRequestsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_cancelled_%s", Name: "cancelled", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_maintenance_mode_%s", Name: "maintenance_mode", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_timeout_%s", Name: "timeout", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_max_duration_reached_%s", Name: "max_duration_reached", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_per_try_timeout_%s", Name: "per_try_timeout", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_rx_reset_%s", Name: "reset_local", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_tx_reset_%s", Name: "reset_remote", Algo: module.Incremental}, + }, + } + clusterUpstreamActivePendingRequestsCountChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_pending_active_%s", + Title: "Cluster upstream current active pending requests", + Units: "requests", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_pending_active_count", + Priority: prioClusterUpstreamActivePendingRequestsCount, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_pending_active_%s", Name: "active_pending"}, + }, + } + clusterUpstreamPendingRequestsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_pending_total_%s", + Title: "Cluster upstream pending requests", + Units: "requests/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_pending_rate", + Priority: prioClusterUpstreamPendingRequestsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_pending_total_%s", Name: "pending", Algo: module.Incremental}, + }, + } + clusterUpstreamPendingFailedRequestsRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_pending_failed_total_%s", + Title: "Cluster upstream failed pending requests", + Units: "requests/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_pending_failed_rate", + Priority: prioClusterUpstreamPendingFailedRequestsRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_pending_overflow_%s", Name: "overflow", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_pending_failure_eject_%s", Name: "failure_eject", Algo: module.Incremental}, + }, + } + clusterUpstreamRequestRetriesRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_retry_%s", + Title: "Cluster upstream request retries", + Units: "retries/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_retry_rate", + Priority: prioClusterUpstreamRequestRetriesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_retry_%s", Name: "request", Algo: module.Incremental}, + }, + } + clusterUpstreamRequestSuccessRetriesRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_retry_success_%s", + Title: "Cluster upstream request successful retries", + Units: "retries/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_retry_success_rate", + Priority: prioClusterUpstreamRequestSuccessRetriesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_retry_success_%s", Name: "success", Algo: module.Incremental}, + }, + } + clusterUpstreamRequestRetriesBackoffRateChartTmpl = module.Chart{ + ID: "cluster_upstream_rq_retry_backoff_%s", + Title: "Cluster upstream request backoff retries", + Units: "retries/s", + Fam: "upstream requests", + Ctx: "envoy.cluster_upstream_rq_retry_backoff_rate", + Priority: prioClusterUpstreamRequestBackoffRetriesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_upstream_rq_retry_backoff_exponential_%s", Name: "exponential", Algo: module.Incremental}, + {ID: "envoy_cluster_upstream_rq_retry_backoff_ratelimited_%s", Name: "ratelimited", Algo: module.Incremental}, + }, + } + + clusterMembershipEndpointsCountChartTmpl = module.Chart{ + ID: "cluster_membership_endpoints_count_%s", + Title: "Cluster membership current endpoints", + Units: "endpoints", + Fam: "cluster membership", + Ctx: "envoy.cluster_membership_endpoints_count", + Priority: prioClusterMembershipEndpointsCount, + Dims: module.Dims{ + {ID: "envoy_cluster_membership_healthy_%s", Name: "healthy"}, + {ID: "envoy_cluster_membership_degraded_%s", Name: "degraded"}, + {ID: "envoy_cluster_membership_excluded_%s", Name: "excluded"}, + }, + } + clusterMembershipChangesRateChartTmpl = module.Chart{ + ID: "cluster_membership_change_%s", + Title: "Cluster membership changes", + Units: "changes/s", + Fam: "cluster membership", + Ctx: "envoy.cluster_membership_changes_rate", + Priority: prioClusterMembershipChangesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_membership_change_%s", Name: "membership", Algo: module.Incremental}, + }, + } + clusterMembershipUpdatesRateChartTmpl = module.Chart{ + ID: "cluster_membership_updates_%s", + Title: "Cluster membership updates", + Units: "updates/s", + Fam: "cluster membership", + Ctx: "envoy.cluster_membership_updates_rate", + Priority: prioClusterMembershipUpdatesRate, + Dims: module.Dims{ + {ID: "envoy_cluster_update_success_%s", Name: "success", Algo: module.Incremental}, + {ID: "envoy_cluster_update_failure_%s", Name: "failure", Algo: module.Incremental}, + {ID: "envoy_cluster_update_empty_%s", Name: "empty", Algo: module.Incremental}, + {ID: "envoy_cluster_update_no_rebuild_%s", Name: "no_rebuild", Algo: module.Incremental}, + }, + } +) + +var ( + listenerManagerChartsTmpl = module.Charts{ + listenerManagerListenersByStateCountChartTmpl.Copy(), + listenerManagerListenerChangesRateChartTmpl.Copy(), + listenerManagerListenerObjectEventsRateChartTmpl.Copy(), + } + listenerManagerListenersByStateCountChartTmpl = module.Chart{ + ID: "listener_manager_listeners_count_%s", + Title: "Listener manager current listeners", + Units: "listeners", + Fam: "downstream mgr", + Ctx: "envoy.listener_manager_listeners_count", + Priority: prioListenerManagerListenerCount, + Dims: module.Dims{ + {ID: "envoy_listener_manager_total_listeners_active_%s", Name: "active"}, + {ID: "envoy_listener_manager_total_listeners_warming_%s", Name: "warming"}, + {ID: "envoy_listener_manager_total_listeners_draining_%s", Name: "draining"}, + }, + } + listenerManagerListenerChangesRateChartTmpl = module.Chart{ + ID: "listener_manager_listener_changes_%s", + Title: "Listener manager listener changes", + Units: "listeners/s", + Fam: "downstream mgr", + Ctx: "envoy.listener_manager_listener_changes_rate", + Priority: prioListenerManagerListenerChangesRate, + Dims: module.Dims{ + {ID: "envoy_listener_manager_listener_added_%s", Name: "added", Algo: module.Incremental}, + {ID: "envoy_listener_manager_listener_modified_%s", Name: "modified", Algo: module.Incremental}, + {ID: "envoy_listener_manager_listener_removed_%s", Name: "removed", Algo: module.Incremental}, + {ID: "envoy_listener_manager_listener_stopped_%s", Name: "stopped", Algo: module.Incremental}, + }, + } + listenerManagerListenerObjectEventsRateChartTmpl = module.Chart{ + ID: "listener_manager_listener_object_events_%s", + Title: "Listener manager listener object events", + Units: "objects/s", + Fam: "downstream mgr", + Ctx: "envoy.listener_manager_listener_object_events_rate", + Priority: prioListenerManagerListenerObjectEventsRate, + Dims: module.Dims{ + {ID: "envoy_listener_manager_listener_create_success_%s", Name: "create_success", Algo: module.Incremental}, + {ID: "envoy_listener_manager_listener_create_failure_%s", Name: "create_failure", Algo: module.Incremental}, + {ID: "envoy_listener_manager_listener_in_place_updated_%s", Name: "in_place_updated", Algo: module.Incremental}, + }, + } +) + +var ( + listenerAdminDownstreamChartsTmpl = module.Charts{ + listenerAdminDownstreamActiveConnectionsCountChartTmpl.Copy(), + listenerAdminDownstreamConnectionsRateChartTmpl.Copy(), + listenerAdminDownstreamDestroyedConnectionsRateChartTmpl.Copy(), + listenerAdminDownstreamTimedOutConnectionsRateChartTmpl.Copy(), + listenerAdminDownstreamRejectedConnectionsRateChartTmpl.Copy(), + listenerAdminDownstreamFilterClosedByRemoteConnectionsRateChartTmpl.Copy(), + listenerAdminDownstreamFilterReadErrorsRateChartTmpl.Copy(), + + listenerAdminDownstreamActiveSocketsCountChartTmpl.Copy(), + listenerAdminDownstreamTimedOutSocketsRateChartTmpl.Copy(), + } + + listenerAdminDownstreamActiveConnectionsCountChartTmpl = module.Chart{ + ID: "listener_admin_downstream_cx_active_%s", + Title: "Listener admin downstream current active connections", + Units: "connections", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_cx_active_count", + Priority: prioListenerAdminDownstreamActiveConnectionsCount, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_cx_active_%s", Name: "active"}, + }, + } + listenerAdminDownstreamConnectionsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_cx_total_%s", + Title: "Listener admin downstream connections", + Units: "connections/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_cx_rate", + Priority: prioListenerAdminDownstreamConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_cx_total_%s", Name: "created", Algo: module.Incremental}, + }, + } + listenerAdminDownstreamDestroyedConnectionsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_cx_destroy_%s", + Title: "Listener admin downstream destroyed connections", + Units: "connections/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_cx_destroy_rate", + Priority: prioListenerAdminDownstreamDestroyedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_cx_destroy_%s", Name: "destroyed", Algo: module.Incremental}, + }, + } + listenerAdminDownstreamTimedOutConnectionsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_cx_transport_socket_connect_timeout_%s", + Title: "Listener admin downstream timed out connections", + Units: "connections/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate", + Priority: prioListenerAdminDownstreamTimedOutConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout_%s", Name: "timeout", Algo: module.Incremental}, + }, + } + listenerAdminDownstreamRejectedConnectionsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_cx_rejected_%s", + Title: "Listener admin downstream rejected connections", + Units: "connections/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_cx_rejected_rate", + Priority: prioListenerAdminDownstreamRejectedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_cx_overflow_%s", Name: "overflow", Algo: module.Incremental}, + {ID: "envoy_listener_admin_downstream_cx_overload_reject_%s", Name: "overload", Algo: module.Incremental}, + {ID: "envoy_listener_admin_downstream_global_cx_overflow_%s", Name: "global_overflow", Algo: module.Incremental}, + }, + } + listenerAdminDownstreamFilterClosedByRemoteConnectionsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_listener_filter_remote_close_%s", + Title: "Listener admin downstream connections closed by remote when peek data for listener filters", + Units: "connections/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_listener_filter_remote_close_rate", + Priority: prioListenerAdminDownstreamFilterClosedByRemoteConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_listener_filter_remote_close_%s", Name: "closed", Algo: module.Incremental}, + }, + } + listenerAdminDownstreamFilterReadErrorsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_listener_filter_error_%s", + Title: "Listener admin downstream read errors when peeking data for listener filters", + Units: "errors/s", + Fam: "downstream adm conns", + Ctx: "envoy.listener_admin_downstream_listener_filter_error_rate", + Priority: prioListenerAdminDownstreamFilterReadErrorsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_listener_filter_error_%s", Name: "read", Algo: module.Incremental}, + }, + } + + listenerAdminDownstreamActiveSocketsCountChartTmpl = module.Chart{ + ID: "listener_admin_downstream_pre_cx_active_%s", + Title: "Listener admin downstream current active sockets", + Units: "sockets", + Fam: "downstream adm sockets", + Ctx: "envoy.listener_admin_downstream_pre_cx_active_count", + Priority: prioListenerAdminDownstreamActiveSocketsCount, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_pre_cx_active_%s", Name: "active"}, + }, + } + listenerAdminDownstreamTimedOutSocketsRateChartTmpl = module.Chart{ + ID: "listener_admin_downstream_pre_cx_timeout_%s", + Title: "Listener admin downstream timed out sockets", + Units: "sockets/s", + Fam: "downstream adm sockets", + Ctx: "envoy.listener_admin_downstream_pre_cx_timeout_rate", + Priority: prioListenerAdminDownstreamTimedOutSocketsRate, + Dims: module.Dims{ + {ID: "envoy_listener_admin_downstream_pre_cx_timeout_%s", Name: "timeout", Algo: module.Incremental}, + }, + } +) + +var ( + listenerDownstreamChartsTmpl = module.Charts{ + listenerDownstreamActiveConnectionsCountChartTmpl.Copy(), + listenerDownstreamConnectionsRateChartTmpl.Copy(), + listenerDownstreamDestroyedConnectionsRateChartTmpl.Copy(), + listenerDownstreamTimedOutConnectionsRateChartTmpl.Copy(), + listenerDownstreamRejectedConnectionsRateChartTmpl.Copy(), + listenerDownstreamFilterClosedByRemoteConnectionsRateChartTmpl.Copy(), + listenerDownstreamFilterReadErrorsRateChartTmpl.Copy(), + + listenerDownstreamActiveSocketsCountChartTmpl.Copy(), + listenerDownstreamTimedOutSocketsRateChartTmpl.Copy(), + } + + listenerDownstreamActiveConnectionsCountChartTmpl = module.Chart{ + ID: "listener_downstream_cx_active_%s", + Title: "Listener downstream current active connections", + Units: "connections", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_cx_active_count", + Priority: prioListenerDownstreamActiveConnectionsCount, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_cx_active_%s", Name: "active"}, + }, + } + listenerDownstreamConnectionsRateChartTmpl = module.Chart{ + ID: "listener_downstream_cx_total_%s", + Title: "Listener downstream connections", + Units: "connections/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_cx_rate", + Priority: prioListenerDownstreamConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_cx_total_%s", Name: "created", Algo: module.Incremental}, + }, + } + listenerDownstreamDestroyedConnectionsRateChartTmpl = module.Chart{ + ID: "listener_downstream_cx_destroy_%s", + Title: "Listener downstream destroyed connections", + Units: "connections/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_cx_destroy_rate", + Priority: prioListenerDownstreamDestroyedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_cx_destroy_%s", Name: "destroyed", Algo: module.Incremental}, + }, + } + listenerDownstreamTimedOutConnectionsRateChartTmpl = module.Chart{ + ID: "listener_downstream_cx_transport_socket_connect_timeout_%s", + Title: "Listener downstream timed out connections", + Units: "connections/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_cx_transport_socket_connect_timeout_rate", + Priority: prioListenerDownstreamTimedOutConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_cx_transport_socket_connect_timeout_%s", Name: "timeout", Algo: module.Incremental}, + }, + } + listenerDownstreamRejectedConnectionsRateChartTmpl = module.Chart{ + ID: "listener_downstream_cx_rejected_%s", + Title: "Listener downstream rejected connections", + Units: "connections/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_cx_rejected_rate", + Priority: prioListenerDownstreamRejectedConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_cx_overflow_%s", Name: "overflow", Algo: module.Incremental}, + {ID: "envoy_listener_downstream_cx_overload_reject_%s", Name: "overload", Algo: module.Incremental}, + {ID: "envoy_listener_downstream_global_cx_overflow_%s", Name: "global_overflow", Algo: module.Incremental}, + }, + } + listenerDownstreamFilterClosedByRemoteConnectionsRateChartTmpl = module.Chart{ + ID: "listener_downstream_listener_filter_remote_close_%s", + Title: "Listener downstream connections closed by remote when peek data for listener filters", + Units: "connections/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_listener_filter_remote_close_rate", + Priority: prioListenerDownstreamFilterClosedByRemoteConnectionsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_listener_filter_remote_close_%s", Name: "closed", Algo: module.Incremental}, + }, + } + listenerDownstreamFilterReadErrorsRateChartTmpl = module.Chart{ + ID: "listener_downstream_listener_filter_error_%s", + Title: "Listener downstream read errors when peeking data for listener filters", + Units: "errors/s", + Fam: "downstream conns", + Ctx: "envoy.listener_downstream_listener_filter_error_rate", + Priority: prioListenerDownstreamFilterReadErrorsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_listener_filter_error_%s", Name: "read", Algo: module.Incremental}, + }, + } + + listenerDownstreamActiveSocketsCountChartTmpl = module.Chart{ + ID: "listener_downstream_pre_cx_active_%s", + Title: "Listener downstream current active sockets", + Units: "sockets", + Fam: "downstream sockets", + Ctx: "envoy.listener_downstream_pre_cx_active_count", + Priority: prioListenerDownstreamActiveSocketsCount, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_pre_cx_active_%s", Name: "active"}, + }, + } + listenerDownstreamTimedOutSocketsRateChartTmpl = module.Chart{ + ID: "listener_downstream_pre_cx_timeout_%s", + Title: "Listener downstream timed out sockets", + Units: "sockets/s", + Fam: "downstream sockets", + Ctx: "envoy.listener_downstream_pre_cx_timeout_rate", + Priority: prioListenerDownstreamTimedOutSocketsRate, + Dims: module.Dims{ + {ID: "envoy_listener_downstream_pre_cx_timeout_%s", Name: "timeout", Algo: module.Incremental}, + }, + } +) + +func (e *Envoy) addServerCharts(id string, labels labels.Labels) { + e.addCharts(serverChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addClusterManagerCharts(id string, labels labels.Labels) { + e.addCharts(clusterManagerChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addClusterUpstreamCharts(id string, labels labels.Labels) { + e.addCharts(clusterUpstreamChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addListenerManagerCharts(id string, labels labels.Labels) { + e.addCharts(listenerManagerChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addListenerAdminDownstreamCharts(id string, labels labels.Labels) { + e.addCharts(listenerAdminDownstreamChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addListenerDownstreamCharts(id string, labels labels.Labels) { + e.addCharts(listenerDownstreamChartsTmpl.Copy(), id, labels) +} + +func (e *Envoy) addCharts(charts *module.Charts, id string, labels labels.Labels) { + charts = charts.Copy() + + for _, chart := range *charts { + if id == "" { + chart.ID = strings.Replace(chart.ID, "_%s", "", 1) + for _, dim := range chart.Dims { + dim.ID = strings.Replace(dim.ID, "_%s", "", 1) + } + } else { + chart.ID = fmt.Sprintf(chart.ID, dotReplacer.Replace(id)) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + } + + for _, lbl := range labels { + chart.Labels = append(chart.Labels, module.Label{Key: lbl.Name, Value: lbl.Value}) + } + } + + if err := e.Charts().Add(*charts...); err != nil { + e.Warning(err) + } +} + +func (e *Envoy) removeCharts(id string) { + if id == "" { + return + } + + id = dotReplacer.Replace(id) + for _, chart := range *e.Charts() { + if strings.HasSuffix(chart.ID, id) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +var dotReplacer = strings.NewReplacer(".", "_") diff --git a/src/go/collectors/go.d.plugin/modules/envoy/collect.go b/src/go/collectors/go.d.plugin/modules/envoy/collect.go new file mode 100644 index 00000000000000..43ec1975a747ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/collect.go @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package envoy + +import ( + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + + "github.com/prometheus/prometheus/model/labels" +) + +// Server stats: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/statistics# +// Server state: https://www.envoyproxy.io/docs/envoy/latest/api-v3/admin/v3/server_info.proto#enum-admin-v3-serverinfo-state +// Listener stats: https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/stats + +func (e *Envoy) collect() (map[string]int64, error) { + mfs, err := e.prom.Scrape() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + e.collectServerStats(mx, mfs) + e.collectClusterManagerStats(mx, mfs) + e.collectClusterUpstreamStats(mx, mfs) + e.collectListenerManagerStats(mx, mfs) + e.collectListenerAdminDownstreamStats(mx, mfs) + e.collectListenerDownstreamStats(mx, mfs) + + return mx, nil +} + +func (e *Envoy) collectServerStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_server_uptime", + "envoy_server_memory_allocated", + "envoy_server_memory_heap_size", + "envoy_server_memory_physical_size", + "envoy_server_parent_connections", + "envoy_server_total_connections", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.servers[id] { + e.servers[id] = true + e.addServerCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + e.collectGauge(mfs, "envoy_server_state", func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + for _, v := range []string{"live", "draining", "pre_initializing", "initializing"} { + mx[join(name, v, id)] = 0 + } + + switch m.Gauge().Value() { + case 0: + mx[join(name, "live", id)] = 1 + case 1: + mx[join(name, "draining", id)] = 1 + case 2: + mx[join(name, "pre_initializing", id)] = 1 + case 3: + mx[join(name, "initializing", id)] = 1 + } + }) + + for id := range e.servers { + if id != "" && !seen[id] { + delete(e.servers, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectClusterManagerStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_cluster_manager_cluster_added", + "envoy_cluster_manager_cluster_modified", + "envoy_cluster_manager_cluster_removed", + "envoy_cluster_manager_cluster_updated", + "envoy_cluster_manager_cluster_updated_via_merge", + "envoy_cluster_manager_update_merge_cancelled", + "envoy_cluster_manager_update_out_of_merge_window", + } { + e.collectCounter(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.clusterMgrs[id] { + e.clusterMgrs[id] = true + e.addClusterManagerCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Counter().Value()) + }) + } + + for _, n := range []string{ + "envoy_cluster_manager_active_clusters", + "envoy_cluster_manager_warming_clusters", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + for id := range e.clusterMgrs { + if id != "" && !seen[id] { + delete(e.clusterMgrs, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectListenerAdminDownstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_listener_admin_downstream_cx_total", + "envoy_listener_admin_downstream_cx_destroy", + "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout", + "envoy_listener_admin_downstream_cx_overflow", + "envoy_listener_admin_downstream_cx_overload_reject", + "envoy_listener_admin_downstream_global_cx_overflow", + "envoy_listener_admin_downstream_pre_cx_timeout", + "envoy_listener_admin_downstream_listener_filter_remote_close", + "envoy_listener_admin_downstream_listener_filter_error", + } { + e.collectCounter(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerAdminDownstream[id] { + e.listenerAdminDownstream[id] = true + e.addListenerAdminDownstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Counter().Value()) + }) + } + for _, n := range []string{ + "envoy_listener_admin_downstream_cx_active", + "envoy_listener_admin_downstream_pre_cx_active", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerAdminDownstream[id] { + e.listenerAdminDownstream[id] = true + e.addListenerAdminDownstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + for id := range e.listenerAdminDownstream { + if id != "" && !seen[id] { + delete(e.listenerAdminDownstream, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectListenerDownstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_listener_downstream_cx_total", + "envoy_listener_downstream_cx_destroy", + "envoy_listener_downstream_cx_transport_socket_connect_timeout", + "envoy_listener_downstream_cx_overflow", + "envoy_listener_downstream_cx_overload_reject", + "envoy_listener_downstream_global_cx_overflow", + "envoy_listener_downstream_pre_cx_timeout", + "envoy_listener_downstream_listener_filter_remote_close", + "envoy_listener_downstream_listener_filter_error", + } { + e.collectCounter(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerDownstream[id] { + e.listenerDownstream[id] = true + e.addListenerDownstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Counter().Value()) + }) + } + for _, n := range []string{ + "envoy_listener_downstream_cx_active", + "envoy_listener_downstream_pre_cx_active", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerDownstream[id] { + e.listenerDownstream[id] = true + e.addListenerDownstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + for id := range e.listenerDownstream { + if id != "" && !seen[id] { + delete(e.listenerDownstream, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectClusterUpstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_cluster_upstream_cx_total", + "envoy_cluster_upstream_cx_http1_total", + "envoy_cluster_upstream_cx_http2_total", + "envoy_cluster_upstream_cx_http3_total", + "envoy_cluster_upstream_cx_http3_total", + "envoy_cluster_upstream_cx_connect_fail", + "envoy_cluster_upstream_cx_connect_timeout", + "envoy_cluster_upstream_cx_idle_timeout", + "envoy_cluster_upstream_cx_max_duration_reached", + "envoy_cluster_upstream_cx_connect_attempts_exceeded", + "envoy_cluster_upstream_cx_overflow", + "envoy_cluster_upstream_cx_destroy", + "envoy_cluster_upstream_cx_destroy_local", + "envoy_cluster_upstream_cx_destroy_remote", + "envoy_cluster_upstream_cx_rx_bytes_total", + "envoy_cluster_upstream_cx_tx_bytes_total", + "envoy_cluster_upstream_rq_total", + "envoy_cluster_upstream_rq_pending_total", + "envoy_cluster_upstream_rq_pending_overflow", + "envoy_cluster_upstream_rq_pending_failure_eject", + "envoy_cluster_upstream_rq_cancelled", + "envoy_cluster_upstream_rq_maintenance_mode", + "envoy_cluster_upstream_rq_timeout", + "envoy_cluster_upstream_rq_max_duration_reached", + "envoy_cluster_upstream_rq_per_try_timeout", + "envoy_cluster_upstream_rq_rx_reset", + "envoy_cluster_upstream_rq_tx_reset", + "envoy_cluster_upstream_rq_retry", + "envoy_cluster_upstream_rq_retry_backoff_exponential", + "envoy_cluster_upstream_rq_retry_backoff_ratelimited", + "envoy_cluster_upstream_rq_retry_success", + "envoy_cluster_membership_change", + "envoy_cluster_update_success", + "envoy_cluster_update_failure", + "envoy_cluster_update_empty", + "envoy_cluster_update_no_rebuild", + } { + e.collectCounter(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.clusterUpstream[id] { + e.clusterUpstream[id] = true + e.addClusterUpstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Counter().Value()) + }) + } + + for _, n := range []string{ + "envoy_cluster_upstream_cx_active", + "envoy_cluster_upstream_cx_rx_bytes_buffered", + "envoy_cluster_upstream_cx_tx_bytes_buffered", + "envoy_cluster_upstream_rq_active", + "envoy_cluster_upstream_rq_pending_active", + "envoy_cluster_membership_healthy", + "envoy_cluster_membership_degraded", + "envoy_cluster_membership_excluded", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.clusterUpstream[id] { + e.clusterUpstream[id] = true + e.addClusterUpstreamCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + for id := range e.clusterUpstream { + if id != "" && !seen[id] { + delete(e.clusterUpstream, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectListenerManagerStats(mx map[string]int64, mfs prometheus.MetricFamilies) { + seen := make(map[string]bool) + for _, n := range []string{ + "envoy_listener_manager_listener_added", + "envoy_listener_manager_listener_modified", + "envoy_listener_manager_listener_removed", + "envoy_listener_manager_listener_stopped", + "envoy_listener_manager_listener_create_success", + "envoy_listener_manager_listener_create_failure", + "envoy_listener_manager_listener_in_place_updated", + } { + e.collectCounter(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerMgrs[id] { + e.listenerMgrs[id] = true + e.addListenerManagerCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Counter().Value()) + }) + } + + for _, n := range []string{ + "envoy_listener_manager_total_listeners_warming", + "envoy_listener_manager_total_listeners_active", + "envoy_listener_manager_total_listeners_draining", + } { + e.collectGauge(mfs, n, func(name string, m prometheus.Metric) { + id := e.joinLabels(m.Labels()) + seen[id] = true + + if !e.listenerMgrs[id] { + e.listenerMgrs[id] = true + e.addListenerManagerCharts(id, m.Labels()) + } + + mx[join(name, id)] += int64(m.Gauge().Value()) + }) + } + + for id := range e.listenerMgrs { + if id != "" && !seen[id] { + delete(e.listenerMgrs, id) + e.removeCharts(id) + } + } +} + +func (e *Envoy) collectGauge(mfs prometheus.MetricFamilies, metric string, process func(name string, m prometheus.Metric)) { + if mf := mfs.GetGauge(metric); mf != nil { + for _, m := range mf.Metrics() { + process(mf.Name(), m) + } + } +} + +func (e *Envoy) collectCounter(mfs prometheus.MetricFamilies, metric string, process func(name string, m prometheus.Metric)) { + if mf := mfs.GetCounter(metric); mf != nil { + for _, m := range mf.Metrics() { + process(mf.Name(), m) + } + } +} + +func (e *Envoy) joinLabels(labels labels.Labels) string { + var buf strings.Builder + first := true + for _, lbl := range labels { + v := lbl.Value + if v == "" { + continue + } + if strings.IndexByte(v, ' ') != -1 { + v = spaceReplacer.Replace(v) + } + if strings.IndexByte(v, '\\') != -1 { + if v = decodeLabelValue(v); strings.IndexByte(v, '\\') != -1 { + v = backslashReplacer.Replace(v) + } + } + if first { + buf.WriteString(v) + first = false + } else { + buf.WriteString("_" + v) + } + } + return buf.String() +} + +var ( + spaceReplacer = strings.NewReplacer(" ", "_") + backslashReplacer = strings.NewReplacer(`\`, "_") +) + +func decodeLabelValue(value string) string { + v, err := strconv.Unquote("\"" + value + "\"") + if err != nil { + return value + } + return v +} + +func join(name string, elems ...string) string { + for _, v := range elems { + if v != "" { + name += "_" + v + } + } + return name +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/config_schema.json b/src/go/collectors/go.d.plugin/modules/envoy/config_schema.json new file mode 100644 index 00000000000000..48b3c947892da0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/envoy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/envoy.go b/src/go/collectors/go.d.plugin/modules/envoy/envoy.go new file mode 100644 index 00000000000000..de9efa13dc36c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/envoy.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package envoy + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("envoy", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Envoy { + return &Envoy{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9091/stats/prometheus", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 2}, + }, + }, + }, + + charts: &module.Charts{}, + + servers: make(map[string]bool), + clusterMgrs: make(map[string]bool), + clusterUpstream: make(map[string]bool), + listenerMgrs: make(map[string]bool), + listenerAdminDownstream: make(map[string]bool), + listenerDownstream: make(map[string]bool), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Envoy struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + + charts *module.Charts + + servers map[string]bool + clusterMgrs map[string]bool + clusterUpstream map[string]bool + listenerMgrs map[string]bool + listenerAdminDownstream map[string]bool + listenerDownstream map[string]bool +} + +func (e *Envoy) Init() bool { + if err := e.validateConfig(); err != nil { + e.Errorf("config validation: %v", err) + return false + } + + prom, err := e.initPrometheusClient() + if err != nil { + e.Errorf("init Prometheus client: %v", err) + return false + } + e.prom = prom + + return true +} + +func (e *Envoy) Check() bool { + return len(e.Collect()) > 0 +} + +func (e *Envoy) Charts() *module.Charts { + return e.charts +} + +func (e *Envoy) Collect() map[string]int64 { + mx, err := e.collect() + if err != nil { + e.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (e *Envoy) Cleanup() { + if e.prom == nil || e.prom.HTTPClient() == nil { + return + } + + e.prom.HTTPClient().CloseIdleConnections() +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/envoy_test.go b/src/go/collectors/go.d.plugin/modules/envoy/envoy_test.go new file mode 100644 index 00000000000000..3bdd82cb1a5fc5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/envoy_test.go @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package envoy + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataEnvoyConsulDataplane, _ = os.ReadFile("testdata/consul-dataplane.txt") + dataEnvoy, _ = os.ReadFile("testdata/envoy.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataEnvoyConsulDataplane": dataEnvoyConsulDataplane, + "dataEnvoy": dataEnvoy, + } { + require.NotNilf(t, data, name) + } +} + +func TestEnvoy_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + envoy := New() + envoy.Config = test.config + + if test.wantFail { + assert.False(t, envoy.Init()) + } else { + assert.True(t, envoy.Init()) + } + }) + } + +} + +func TestEnvoy_Cleanup(t *testing.T) { + envoy := New() + assert.NotPanics(t, envoy.Cleanup) + + require.True(t, envoy.Init()) + assert.NotPanics(t, envoy.Cleanup) +} + +func TestEnvoy_Charts(t *testing.T) { + envoy, cleanup := prepareCaseEnvoyStats() + defer cleanup() + + require.Empty(t, *envoy.Charts()) + + require.True(t, envoy.Init()) + _ = envoy.Collect() + require.NotEmpty(t, *envoy.Charts()) +} + +func TestEnvoy_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (envoy *Envoy, cleanup func()) + wantFail bool + }{ + "case envoy consul dataplane": { + wantFail: false, + prepare: prepareCaseEnvoyConsulDataplaneStats, + }, + "case envoy": { + wantFail: false, + prepare: prepareCaseEnvoyStats, + }, + "case invalid data response": { + wantFail: true, + prepare: prepareCaseInvalidDataResponse, + }, + "case 404": { + wantFail: true, + prepare: prepareCase404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + envoy, cleanup := test.prepare() + defer cleanup() + + require.True(t, envoy.Init()) + + if test.wantFail { + assert.False(t, envoy.Check()) + } else { + assert.True(t, envoy.Check()) + } + }) + } +} + +func TestEnvoy_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (envoy *Envoy, cleanup func()) + wantMetrics map[string]int64 + }{ + "case envoy consul dataplane": { + prepare: prepareCaseEnvoyConsulDataplaneStats, + wantMetrics: map[string]int64{ + "envoy_cluster_manager_active_clusters_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 4, + "envoy_cluster_manager_cluster_added_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 4, + "envoy_cluster_manager_cluster_modified_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_manager_cluster_removed_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_manager_cluster_updated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 2, + "envoy_cluster_manager_cluster_updated_via_merge_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_manager_update_merge_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_manager_update_out_of_merge_window_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_manager_warming_clusters_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1, + "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 2, + "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1, + "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1, + "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1, + "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2, + "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507, + "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1, + "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507, + "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1, + "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2, + "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 17, + "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 102618, + "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 3853, + "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 8645645, + "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 724779, + "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507, + "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1, + "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2, + "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 114982, + "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1240, + "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 732, + "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1, + "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 4749, + "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507, + "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1, + "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2, + "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1, + "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1758, + "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1, + "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 3, + "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0, + "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0, + "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0, + "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0, + "envoy_listener_admin_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 1, + "envoy_listener_admin_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 2, + "envoy_listener_admin_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3, + "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_admin_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 1, + "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 3, + "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 6507, + "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 1, + "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 4, + "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 6507, + "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 1, + "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0, + "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0, + "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0, + "envoy_listener_manager_listener_added_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3, + "envoy_listener_manager_listener_create_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_listener_create_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 6, + "envoy_listener_manager_listener_in_place_updated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_listener_modified_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_listener_removed_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_listener_stopped_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_total_listeners_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3, + "envoy_listener_manager_total_listeners_draining_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_listener_manager_total_listeners_warming_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_memory_allocated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 7742368, + "envoy_server_memory_heap_size_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 14680064, + "envoy_server_memory_physical_size_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 19175778, + "envoy_server_parent_connections_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_state_draining_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_state_initializing_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_state_live_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 1, + "envoy_server_state_pre_initializing_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_total_connections_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0, + "envoy_server_uptime_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 32527, + }, + }, + "case envoy": { + prepare: prepareCaseEnvoyStats, + wantMetrics: map[string]int64{ + "envoy_cluster_manager_active_clusters": 1, + "envoy_cluster_manager_cluster_added": 1, + "envoy_cluster_manager_cluster_modified": 0, + "envoy_cluster_manager_cluster_removed": 0, + "envoy_cluster_manager_cluster_updated": 0, + "envoy_cluster_manager_cluster_updated_via_merge": 0, + "envoy_cluster_manager_update_merge_cancelled": 0, + "envoy_cluster_manager_update_out_of_merge_window": 0, + "envoy_cluster_manager_warming_clusters": 0, + "envoy_cluster_membership_change_service_envoyproxy_io": 1, + "envoy_cluster_membership_degraded_service_envoyproxy_io": 0, + "envoy_cluster_membership_excluded_service_envoyproxy_io": 0, + "envoy_cluster_membership_healthy_service_envoyproxy_io": 1, + "envoy_cluster_update_empty_service_envoyproxy_io": 0, + "envoy_cluster_update_failure_service_envoyproxy_io": 0, + "envoy_cluster_update_no_rebuild_service_envoyproxy_io": 0, + "envoy_cluster_update_success_service_envoyproxy_io": 1242, + "envoy_cluster_upstream_cx_active_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_connect_attempts_exceeded_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_connect_fail_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_connect_timeout_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_destroy_local_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_destroy_remote_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_destroy_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_http1_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_http2_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_http3_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_idle_timeout_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_max_duration_reached_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_overflow_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_rx_bytes_buffered_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_rx_bytes_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_tx_bytes_buffered_service_envoyproxy_io": 0, + "envoy_cluster_upstream_cx_tx_bytes_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_active_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_cancelled_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_maintenance_mode_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_max_duration_reached_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_pending_active_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_pending_failure_eject_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_pending_overflow_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_pending_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_per_try_timeout_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_retry_backoff_exponential_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_retry_backoff_ratelimited_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_retry_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_retry_success_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_rx_reset_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_timeout_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_total_service_envoyproxy_io": 0, + "envoy_cluster_upstream_rq_tx_reset_service_envoyproxy_io": 0, + "envoy_listener_admin_downstream_cx_active": 2, + "envoy_listener_admin_downstream_cx_destroy": 4, + "envoy_listener_admin_downstream_cx_overflow": 0, + "envoy_listener_admin_downstream_cx_overload_reject": 0, + "envoy_listener_admin_downstream_cx_total": 6, + "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout": 0, + "envoy_listener_admin_downstream_global_cx_overflow": 0, + "envoy_listener_admin_downstream_listener_filter_error": 0, + "envoy_listener_admin_downstream_listener_filter_remote_close": 0, + "envoy_listener_admin_downstream_pre_cx_active": 0, + "envoy_listener_admin_downstream_pre_cx_timeout": 0, + "envoy_listener_downstream_cx_active_0.0.0.0_10000": 0, + "envoy_listener_downstream_cx_destroy_0.0.0.0_10000": 0, + "envoy_listener_downstream_cx_overflow_0.0.0.0_10000": 0, + "envoy_listener_downstream_cx_overload_reject_0.0.0.0_10000": 0, + "envoy_listener_downstream_cx_total_0.0.0.0_10000": 0, + "envoy_listener_downstream_cx_transport_socket_connect_timeout_0.0.0.0_10000": 0, + "envoy_listener_downstream_global_cx_overflow_0.0.0.0_10000": 0, + "envoy_listener_downstream_listener_filter_error_0.0.0.0_10000": 0, + "envoy_listener_downstream_listener_filter_remote_close_0.0.0.0_10000": 0, + "envoy_listener_downstream_pre_cx_active_0.0.0.0_10000": 0, + "envoy_listener_downstream_pre_cx_timeout_0.0.0.0_10000": 0, + "envoy_listener_manager_listener_added": 1, + "envoy_listener_manager_listener_create_failure": 0, + "envoy_listener_manager_listener_create_success": 16, + "envoy_listener_manager_listener_in_place_updated": 0, + "envoy_listener_manager_listener_modified": 0, + "envoy_listener_manager_listener_removed": 0, + "envoy_listener_manager_listener_stopped": 0, + "envoy_listener_manager_total_listeners_active": 1, + "envoy_listener_manager_total_listeners_draining": 0, + "envoy_listener_manager_total_listeners_warming": 0, + "envoy_server_memory_allocated": 7630184, + "envoy_server_memory_heap_size": 16777216, + "envoy_server_memory_physical_size": 28426958, + "envoy_server_parent_connections": 0, + "envoy_server_state_draining": 0, + "envoy_server_state_initializing": 0, + "envoy_server_state_live": 1, + "envoy_server_state_pre_initializing": 0, + "envoy_server_total_connections": 0, + "envoy_server_uptime": 6225, + }, + }, + "case invalid data response": { + prepare: prepareCaseInvalidDataResponse, + wantMetrics: nil, + }, + "case 404": { + prepare: prepareCase404, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + envoy, cleanup := test.prepare() + defer cleanup() + + require.True(t, envoy.Init()) + + mx := envoy.Collect() + + require.Equal(t, test.wantMetrics, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, envoy, mx) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, envoy *Envoy, mx map[string]int64) { + for _, chart := range *envoy.Charts() { + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := mx[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareCaseEnvoyConsulDataplaneStats() (*Envoy, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataEnvoyConsulDataplane) + })) + envoy := New() + envoy.URL = srv.URL + + return envoy, srv.Close +} + +func prepareCaseEnvoyStats() (*Envoy, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(dataEnvoy) + })) + envoy := New() + envoy.URL = srv.URL + + return envoy, srv.Close +} + +func prepareCaseInvalidDataResponse() (*Envoy, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + envoy := New() + envoy.URL = srv.URL + + return envoy, srv.Close +} + +func prepareCase404() (*Envoy, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + envoy := New() + envoy.URL = srv.URL + + return envoy, srv.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/init.go b/src/go/collectors/go.d.plugin/modules/envoy/init.go new file mode 100644 index 00000000000000..96d73545b952cd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package envoy + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (e *Envoy) validateConfig() error { + if e.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (e *Envoy) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(e.Client) + if err != nil { + return nil, err + } + + return prometheus.New(httpClient, e.Request), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/envoy/integrations/envoy.md b/src/go/collectors/go.d.plugin/modules/envoy/integrations/envoy.md new file mode 100644 index 00000000000000..3702f7abe36b6a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/integrations/envoy.md @@ -0,0 +1,271 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/envoy/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/envoy/metadata.yaml" +sidebar_label: "Envoy" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Envoy + + +<img src="https://netdata.cloud/img/envoy.svg" width="150"/> + + +Plugin: go.d.plugin +Module: envoy + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Envoy proxies. It collects server, cluster, and listener metrics. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Envoy instances running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Envoy instance + +Envoy exposes metrics in Prometheus format. All metric labels are added to charts. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| envoy.server_state | live, draining, pre_initializing, initializing | state | +| envoy.server_connections_count | connections | connections | +| envoy.server_parent_connections_count | connections | connections | +| envoy.server_memory_allocated_size | allocated | bytes | +| envoy.server_memory_heap_size | heap | bytes | +| envoy.server_memory_physical_size | physical | bytes | +| envoy.server_uptime | uptime | seconds | +| envoy.cluster_manager_cluster_count | active, not_active | clusters | +| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s | +| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s | +| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s | +| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s | +| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s | +| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints | +| envoy.cluster_membership_changes_rate | membership | changes/s | +| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s | +| envoy.cluster_upstream_cx_active_count | active | connections | +| envoy.cluster_upstream_cx_rate | created | connections/s | +| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s | +| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s | +| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s | +| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s | +| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s | +| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes | +| envoy.cluster_upstream_rq_active_count | active | requests | +| envoy.cluster_upstream_rq_rate | requests | requests/s | +| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s | +| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests | +| envoy.cluster_upstream_rq_pending_rate | pending | requests/s | +| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s | +| envoy.cluster_upstream_rq_retry_rate | request | retries/s | +| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s | +| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s | +| envoy.listener_manager_listeners_count | active, warming, draining | listeners | +| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s | +| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s | +| envoy.listener_admin_downstream_cx_active_count | active | connections | +| envoy.listener_admin_downstream_cx_rate | created | connections/s | +| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s | +| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s | +| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s | +| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s | +| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s | +| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets | +| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s | +| envoy.listener_downstream_cx_active_count | active | connections | +| envoy.listener_downstream_cx_rate | created | connections/s | +| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s | +| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s | +| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s | +| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s | +| envoy.listener_downstream_listener_filter_error_rate | read | errors/s | +| envoy.listener_downstream_pre_cx_active_count | active | sockets | +| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/envoy.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/envoy.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9901/stats/prometheus + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + + - name: remote + url: http://192.0.2.1:9901/stats/prometheus + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m envoy + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/envoy/metadata.yaml b/src/go/collectors/go.d.plugin/modules/envoy/metadata.yaml new file mode 100644 index 00000000000000..def9e726a0062d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/metadata.yaml @@ -0,0 +1,538 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-envoy + plugin_name: go.d.plugin + module_name: envoy + monitored_instance: + name: Envoy + link: https://www.envoyproxy.io/ + icon_filename: envoy.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - envoy + - proxy + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Envoy proxies. It collects server, cluster, and listener metrics. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Envoy instances running on localhost. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/envoy.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9091/stats/prometheus + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:9901/stats/prometheus + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9901/stats/prometheus + + - name: remote + url: http://192.0.2.1:9901/stats/prometheus + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: Envoy exposes metrics in Prometheus format. All metric labels are added to charts. + labels: [] + metrics: + - name: envoy.server_state + description: Server current state + unit: state + chart_type: line + dimensions: + - name: live + - name: draining + - name: pre_initializing + - name: initializing + - name: envoy.server_connections_count + description: Server current connections + unit: connections + chart_type: line + dimensions: + - name: connections + - name: envoy.server_parent_connections_count + description: Server current parent connections + unit: connections + chart_type: line + dimensions: + - name: connections + - name: envoy.server_memory_allocated_size + description: Server memory allocated size + unit: bytes + chart_type: line + dimensions: + - name: allocated + - name: envoy.server_memory_heap_size + description: Server memory heap size + unit: bytes + chart_type: line + dimensions: + - name: heap + - name: envoy.server_memory_physical_size + description: Server memory physical size + unit: bytes + chart_type: line + dimensions: + - name: physical + - name: envoy.server_uptime + description: Server uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: envoy.cluster_manager_cluster_count + description: Cluster manager current clusters + unit: clusters + chart_type: line + dimensions: + - name: active + - name: not_active + - name: envoy.cluster_manager_cluster_changes_rate + description: Cluster manager cluster changes + unit: clusters/s + chart_type: line + dimensions: + - name: added + - name: modified + - name: removed + - name: envoy.cluster_manager_cluster_updates_rate + description: Cluster manager updates + unit: updates/s + chart_type: line + dimensions: + - name: cluster + - name: envoy.cluster_manager_cluster_updated_via_merge_rate + description: Cluster manager updates applied as merged updates + unit: updates/s + chart_type: line + dimensions: + - name: via_merge + - name: envoy.cluster_manager_update_merge_cancelled_rate + description: Cluster manager cancelled merged updates + unit: updates/s + chart_type: line + dimensions: + - name: merge_cancelled + - name: envoy.cluster_manager_update_out_of_merge_window_rate + description: Cluster manager out of a merge window updates + unit: updates/s + chart_type: line + dimensions: + - name: out_of_merge_window + - name: envoy.cluster_membership_endpoints_count + description: Cluster membership current endpoints + unit: endpoints + chart_type: line + dimensions: + - name: healthy + - name: degraded + - name: excluded + - name: envoy.cluster_membership_changes_rate + description: Cluster membership changes + unit: changes/s + chart_type: line + dimensions: + - name: membership + - name: envoy.cluster_membership_updates_rate + description: Cluster membership updates + unit: updates/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: empty + - name: no_rebuild + - name: envoy.cluster_upstream_cx_active_count + description: Cluster upstream current active connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: envoy.cluster_upstream_cx_rate + description: Cluster upstream connections + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: envoy.cluster_upstream_cx_http_rate + description: Cluster upstream connections by HTTP version + unit: connections/s + chart_type: line + dimensions: + - name: http1 + - name: http2 + - name: http3 + - name: envoy.cluster_upstream_cx_destroy_rate + description: Cluster upstream destroyed connections + unit: connections/s + chart_type: line + dimensions: + - name: local + - name: remote + - name: envoy.cluster_upstream_cx_connect_fail_rate + description: Cluster upstream failed connections + unit: connections/s + chart_type: line + dimensions: + - name: failed + - name: envoy.cluster_upstream_cx_connect_timeout_rate + description: Cluster upstream timed out connections + unit: connections/s + chart_type: line + dimensions: + - name: timeout + - name: envoy.cluster_upstream_cx_bytes_rate + description: Cluster upstream connection traffic + unit: bytes/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: envoy.cluster_upstream_cx_bytes_buffered_size + description: Cluster upstream current connection buffered size + unit: bytes + chart_type: line + dimensions: + - name: received + - name: send + - name: envoy.cluster_upstream_rq_active_count + description: Cluster upstream current active requests + unit: requests + chart_type: line + dimensions: + - name: active + - name: envoy.cluster_upstream_rq_rate + description: Cluster upstream requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: envoy.cluster_upstream_rq_failed_rate + description: Cluster upstream failed requests + unit: requests/s + chart_type: line + dimensions: + - name: cancelled + - name: maintenance_mode + - name: timeout + - name: max_duration_reached + - name: per_try_timeout + - name: reset_local + - name: reset_remote + - name: envoy.cluster_upstream_rq_pending_active_count + description: Cluster upstream current active pending requests + unit: requests + chart_type: line + dimensions: + - name: active_pending + - name: envoy.cluster_upstream_rq_pending_rate + description: Cluster upstream pending requests + unit: requests/s + chart_type: line + dimensions: + - name: pending + - name: envoy.cluster_upstream_rq_pending_failed_rate + description: Cluster upstream failed pending requests + unit: requests/s + chart_type: line + dimensions: + - name: overflow + - name: failure_eject + - name: envoy.cluster_upstream_rq_retry_rate + description: Cluster upstream request retries + unit: retries/s + chart_type: line + dimensions: + - name: request + - name: envoy.cluster_upstream_rq_retry_success_rate + description: Cluster upstream request successful retries + unit: retries/s + chart_type: line + dimensions: + - name: success + - name: envoy.cluster_upstream_rq_retry_backoff_rate + description: Cluster upstream request backoff retries + unit: retries/s + chart_type: line + dimensions: + - name: exponential + - name: ratelimited + - name: envoy.listener_manager_listeners_count + description: Listener manager current listeners + unit: listeners + chart_type: line + dimensions: + - name: active + - name: warming + - name: draining + - name: envoy.listener_manager_listener_changes_rate + description: Listener manager listener changes + unit: listeners/s + chart_type: line + dimensions: + - name: added + - name: modified + - name: removed + - name: stopped + - name: envoy.listener_manager_listener_object_events_rate + description: Listener manager listener object events + unit: objects/s + chart_type: line + dimensions: + - name: create_success + - name: create_failure + - name: in_place_updated + - name: envoy.listener_admin_downstream_cx_active_count + description: Listener admin downstream current active connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: envoy.listener_admin_downstream_cx_rate + description: Listener admin downstream connections + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: envoy.listener_admin_downstream_cx_destroy_rate + description: Listener admin downstream destroyed connections + unit: connections/s + chart_type: line + dimensions: + - name: destroyed + - name: envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate + description: Listener admin downstream timed out connections + unit: connections/s + chart_type: line + dimensions: + - name: timeout + - name: envoy.listener_admin_downstream_cx_rejected_rate + description: Listener admin downstream rejected connections + unit: connections/s + chart_type: line + dimensions: + - name: overflow + - name: overload + - name: global_overflow + - name: envoy.listener_admin_downstream_listener_filter_remote_close_rate + description: Listener admin downstream connections closed by remote when peek data for listener filters + unit: connections/s + chart_type: line + dimensions: + - name: closed + - name: envoy.listener_admin_downstream_listener_filter_error_rate + description: Listener admin downstream read errors when peeking data for listener filters + unit: errors/s + chart_type: line + dimensions: + - name: read + - name: envoy.listener_admin_downstream_pre_cx_active_count + description: Listener admin downstream current active sockets + unit: sockets + chart_type: line + dimensions: + - name: active + - name: envoy.listener_admin_downstream_pre_cx_timeout_rate + description: Listener admin downstream timed out sockets + unit: sockets/s + chart_type: line + dimensions: + - name: timeout + - name: envoy.listener_downstream_cx_active_count + description: Listener downstream current active connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: envoy.listener_downstream_cx_rate + description: Listener downstream connections + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: envoy.listener_downstream_cx_destroy_rate + description: Listener downstream destroyed connections + unit: connections/s + chart_type: line + dimensions: + - name: destroyed + - name: envoy.listener_downstream_cx_transport_socket_connect_timeout_rate + description: Listener downstream timed out connections + unit: connections/s + chart_type: line + dimensions: + - name: timeout + - name: envoy.listener_downstream_cx_rejected_rate + description: Listener downstream rejected connections + unit: connections/s + chart_type: line + dimensions: + - name: overflow + - name: overload + - name: global_overflow + - name: envoy.listener_downstream_listener_filter_remote_close_rate + description: Listener downstream connections closed by remote when peek data for listener filters + unit: connections/s + chart_type: line + dimensions: + - name: closed + - name: envoy.listener_downstream_listener_filter_error_rate + description: Listener downstream read errors when peeking data for listener filters + unit: errors/s + chart_type: line + dimensions: + - name: read + - name: envoy.listener_downstream_pre_cx_active_count + description: Listener downstream current active sockets + unit: sockets + chart_type: line + dimensions: + - name: active + - name: envoy.listener_downstream_pre_cx_timeout_rate + description: Listener downstream timed out sockets + unit: sockets/s + chart_type: line + dimensions: + - name: timeout diff --git a/src/go/collectors/go.d.plugin/modules/envoy/testdata/consul-dataplane.txt b/src/go/collectors/go.d.plugin/modules/envoy/testdata/consul-dataplane.txt new file mode 100644 index 00000000000000..2dbb91856cdb61 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/testdata/consul-dataplane.txt @@ -0,0 +1,1786 @@ +# HELP consul_dataplane_connect_duration This will be a sample of the time it takes to get connected to a server. This duration will cover everything from making the server features request all the way through to opening an xDS session with a server +# TYPE consul_dataplane_connect_duration summary +consul_dataplane_connect_duration{quantile="0.5"} NaN +consul_dataplane_connect_duration{quantile="0.9"} NaN +consul_dataplane_connect_duration{quantile="0.99"} NaN +consul_dataplane_connect_duration_sum 321.85443115234375 +consul_dataplane_connect_duration_count 1 +# HELP consul_dataplane_connection_errors This will track the number of errors encountered during the stream connection +# TYPE consul_dataplane_connection_errors gauge +consul_dataplane_connection_errors 0 +# HELP consul_dataplane_consul_connected This will either be 0 or 1 depending on whether the dataplane is currently connected to a Consul server. +# TYPE consul_dataplane_consul_connected gauge +consul_dataplane_consul_connected 1 +# HELP consul_dataplane_discover_servers_duration This will be a sample of the time it takes to discover Consul server IPs. +# TYPE consul_dataplane_discover_servers_duration summary +consul_dataplane_discover_servers_duration{quantile="0.5"} NaN +consul_dataplane_discover_servers_duration{quantile="0.9"} NaN +consul_dataplane_discover_servers_duration{quantile="0.99"} NaN +consul_dataplane_discover_servers_duration_sum 0.6415159702301025 +consul_dataplane_discover_servers_duration_count 1 +# HELP consul_dataplane_envoy_connected This will either be 0 or 1 depending on whether Envoy is currently running and connected to the local xDS listeners. +# TYPE consul_dataplane_envoy_connected gauge +consul_dataplane_envoy_connected 1 +# HELP consul_dataplane_go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE consul_dataplane_go_gc_duration_seconds summary +consul_dataplane_go_gc_duration_seconds{quantile="0"} 2.194e-05 +consul_dataplane_go_gc_duration_seconds{quantile="0.25"} 3.592e-05 +consul_dataplane_go_gc_duration_seconds{quantile="0.5"} 5.2941e-05 +consul_dataplane_go_gc_duration_seconds{quantile="0.75"} 6.61e-05 +consul_dataplane_go_gc_duration_seconds{quantile="1"} 0.000139612 +consul_dataplane_go_gc_duration_seconds_sum 0.014481198 +consul_dataplane_go_gc_duration_seconds_count 273 +# HELP consul_dataplane_go_goroutines Number of goroutines that currently exist. +# TYPE consul_dataplane_go_goroutines gauge +consul_dataplane_go_goroutines 41 +# HELP consul_dataplane_go_info Information about the Go environment. +# TYPE consul_dataplane_go_info gauge +consul_dataplane_go_info{version="go1.19.1"} 1 +# HELP consul_dataplane_go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE consul_dataplane_go_memstats_alloc_bytes gauge +consul_dataplane_go_memstats_alloc_bytes 2.543784e+06 +# HELP consul_dataplane_go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE consul_dataplane_go_memstats_alloc_bytes_total counter +consul_dataplane_go_memstats_alloc_bytes_total 4.6530512e+07 +# HELP consul_dataplane_go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE consul_dataplane_go_memstats_buck_hash_sys_bytes gauge +consul_dataplane_go_memstats_buck_hash_sys_bytes 4700 +# HELP consul_dataplane_go_memstats_frees_total Total number of frees. +# TYPE consul_dataplane_go_memstats_frees_total counter +consul_dataplane_go_memstats_frees_total 1.356599e+06 +# HELP consul_dataplane_go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE consul_dataplane_go_memstats_gc_sys_bytes gauge +consul_dataplane_go_memstats_gc_sys_bytes 9.370488e+06 +# HELP consul_dataplane_go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE consul_dataplane_go_memstats_heap_alloc_bytes gauge +consul_dataplane_go_memstats_heap_alloc_bytes 2.543784e+06 +# HELP consul_dataplane_go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE consul_dataplane_go_memstats_heap_idle_bytes gauge +consul_dataplane_go_memstats_heap_idle_bytes 3.137536e+06 +# HELP consul_dataplane_go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE consul_dataplane_go_memstats_heap_inuse_bytes gauge +consul_dataplane_go_memstats_heap_inuse_bytes 4.46464e+06 +# HELP consul_dataplane_go_memstats_heap_objects Number of allocated objects. +# TYPE consul_dataplane_go_memstats_heap_objects gauge +consul_dataplane_go_memstats_heap_objects 5982 +# HELP consul_dataplane_go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE consul_dataplane_go_memstats_heap_released_bytes gauge +consul_dataplane_go_memstats_heap_released_bytes 2.940928e+06 +# HELP consul_dataplane_go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE consul_dataplane_go_memstats_heap_sys_bytes gauge +consul_dataplane_go_memstats_heap_sys_bytes 7.602176e+06 +# HELP consul_dataplane_go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE consul_dataplane_go_memstats_last_gc_time_seconds gauge +consul_dataplane_go_memstats_last_gc_time_seconds 1.678889049944119e+09 +# HELP consul_dataplane_go_memstats_lookups_total Total number of pointer lookups. +# TYPE consul_dataplane_go_memstats_lookups_total counter +consul_dataplane_go_memstats_lookups_total 0 +# HELP consul_dataplane_go_memstats_mallocs_total Total number of mallocs. +# TYPE consul_dataplane_go_memstats_mallocs_total counter +consul_dataplane_go_memstats_mallocs_total 1.362581e+06 +# HELP consul_dataplane_go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE consul_dataplane_go_memstats_mcache_inuse_bytes gauge +consul_dataplane_go_memstats_mcache_inuse_bytes 4800 +# HELP consul_dataplane_go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE consul_dataplane_go_memstats_mcache_sys_bytes gauge +consul_dataplane_go_memstats_mcache_sys_bytes 15600 +# HELP consul_dataplane_go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE consul_dataplane_go_memstats_mspan_inuse_bytes gauge +consul_dataplane_go_memstats_mspan_inuse_bytes 80920 +# HELP consul_dataplane_go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE consul_dataplane_go_memstats_mspan_sys_bytes gauge +consul_dataplane_go_memstats_mspan_sys_bytes 81600 +# HELP consul_dataplane_go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE consul_dataplane_go_memstats_next_gc_bytes gauge +consul_dataplane_go_memstats_next_gc_bytes 5.238856e+06 +# HELP consul_dataplane_go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE consul_dataplane_go_memstats_other_sys_bytes gauge +consul_dataplane_go_memstats_other_sys_bytes 1.258124e+06 +# HELP consul_dataplane_go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE consul_dataplane_go_memstats_stack_inuse_bytes gauge +consul_dataplane_go_memstats_stack_inuse_bytes 786432 +# HELP consul_dataplane_go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE consul_dataplane_go_memstats_stack_sys_bytes gauge +consul_dataplane_go_memstats_stack_sys_bytes 786432 +# HELP consul_dataplane_go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE consul_dataplane_go_memstats_sys_bytes gauge +consul_dataplane_go_memstats_sys_bytes 1.911912e+07 +# HELP consul_dataplane_go_threads Number of OS threads created. +# TYPE consul_dataplane_go_threads gauge +consul_dataplane_go_threads 10 +# HELP consul_dataplane_login_duration This will be a sample of the time it takes to login to Consul. +# TYPE consul_dataplane_login_duration summary +consul_dataplane_login_duration{quantile="0.5"} NaN +consul_dataplane_login_duration{quantile="0.9"} NaN +consul_dataplane_login_duration{quantile="0.99"} NaN +consul_dataplane_login_duration_sum 18.53141975402832 +consul_dataplane_login_duration_count 1 +# HELP consul_dataplane_runtime_alloc_bytes runtime_alloc_bytes +# TYPE consul_dataplane_runtime_alloc_bytes gauge +consul_dataplane_runtime_alloc_bytes 2.526696e+06 +# HELP consul_dataplane_runtime_free_count runtime_free_count +# TYPE consul_dataplane_runtime_free_count gauge +consul_dataplane_runtime_free_count 1.356599e+06 +# HELP consul_dataplane_runtime_gc_pause_ns runtime_gc_pause_ns +# TYPE consul_dataplane_runtime_gc_pause_ns summary +consul_dataplane_runtime_gc_pause_ns{quantile="0.5"} 55990 +consul_dataplane_runtime_gc_pause_ns{quantile="0.9"} 55990 +consul_dataplane_runtime_gc_pause_ns{quantile="0.99"} 55990 +consul_dataplane_runtime_gc_pause_ns_sum 55990 +consul_dataplane_runtime_gc_pause_ns_count 1 +# HELP consul_dataplane_runtime_heap_objects runtime_heap_objects +# TYPE consul_dataplane_runtime_heap_objects gauge +consul_dataplane_runtime_heap_objects 5978 +# HELP consul_dataplane_runtime_malloc_count runtime_malloc_count +# TYPE consul_dataplane_runtime_malloc_count gauge +consul_dataplane_runtime_malloc_count 1.362577e+06 +# HELP consul_dataplane_runtime_num_goroutines runtime_num_goroutines +# TYPE consul_dataplane_runtime_num_goroutines gauge +consul_dataplane_runtime_num_goroutines 35 +# HELP consul_dataplane_runtime_sys_bytes runtime_sys_bytes +# TYPE consul_dataplane_runtime_sys_bytes gauge +consul_dataplane_runtime_sys_bytes 1.911912e+07 +# HELP consul_dataplane_runtime_total_gc_pause_ns runtime_total_gc_pause_ns +# TYPE consul_dataplane_runtime_total_gc_pause_ns gauge +consul_dataplane_runtime_total_gc_pause_ns 1.4481198e+07 +# HELP consul_dataplane_runtime_total_gc_runs runtime_total_gc_runs +# TYPE consul_dataplane_runtime_total_gc_runs gauge +consul_dataplane_runtime_total_gc_runs 273 +# TYPE envoy_cluster_assignment_stale counter +envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_assignment_timeout_received counter +envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_bind_errors counter +envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_default_total_match_count counter +envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1 +envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1 +# TYPE envoy_cluster_external_upstream_rq counter +envoy_cluster_external_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_external_upstream_rq_completed counter +envoy_cluster_external_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_external_upstream_rq_xx counter +envoy_cluster_external_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_http1_dropped_headers_with_underscores counter +envoy_cluster_http1_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_http1_metadata_not_supported_error counter +envoy_cluster_http1_metadata_not_supported_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_http1_requests_rejected_with_underscores_in_headers counter +envoy_cluster_http1_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_http1_response_flood counter +envoy_cluster_http1_response_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_http2_dropped_headers_with_underscores counter +envoy_cluster_http2_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_header_overflow counter +envoy_cluster_http2_header_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_headers_cb_no_stream counter +envoy_cluster_http2_headers_cb_no_stream{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_inbound_empty_frames_flood counter +envoy_cluster_http2_inbound_empty_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_inbound_priority_frames_flood counter +envoy_cluster_http2_inbound_priority_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_inbound_window_update_frames_flood counter +envoy_cluster_http2_inbound_window_update_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_keepalive_timeout counter +envoy_cluster_http2_keepalive_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_metadata_empty_frames counter +envoy_cluster_http2_metadata_empty_frames{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_outbound_control_flood counter +envoy_cluster_http2_outbound_control_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_outbound_flood counter +envoy_cluster_http2_outbound_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_requests_rejected_with_underscores_in_headers counter +envoy_cluster_http2_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_rx_messaging_error counter +envoy_cluster_http2_rx_messaging_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_rx_reset counter +envoy_cluster_http2_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_stream_refused_errors counter +envoy_cluster_http2_stream_refused_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_trailers counter +envoy_cluster_http2_trailers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_tx_flush_timeout counter +envoy_cluster_http2_tx_flush_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_tx_reset counter +envoy_cluster_http2_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_internal_upstream_rq counter +envoy_cluster_internal_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +# TYPE envoy_cluster_internal_upstream_rq_completed counter +envoy_cluster_internal_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +# TYPE envoy_cluster_internal_upstream_rq_xx counter +envoy_cluster_internal_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +# TYPE envoy_cluster_lb_healthy_panic counter +envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_local_cluster_not_ok counter +envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_recalculate_zone_structures counter +envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_subsets_created counter +envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_subsets_fallback counter +envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_subsets_fallback_panic counter +envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_subsets_removed counter +envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_subsets_selected counter +envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_cluster_too_small counter +envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_no_capacity_left counter +envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_number_differs counter +envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_routing_all_directly counter +envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_routing_cross_zone counter +envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_lb_zone_routing_sampled counter +envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_membership_change counter +envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1 +envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 2 +envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1 +# TYPE envoy_cluster_original_dst_host_invalid counter +envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_retry_or_shadow_abandoned counter +envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_update_attempt counter +envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_update_empty counter +envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_update_failure counter +envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_update_no_rebuild counter +envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_update_success counter +envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_close_notify counter +envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_connect_attempts_exceeded counter +envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_connect_timeout counter +envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_connect_with_0_rtt counter +envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy counter +envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507 +envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local counter +envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507 +envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local_with_active_rq counter +envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758 +envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote counter +envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote_with_active_rq counter +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_with_active_rq counter +envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758 +envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_http1_total counter +envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_cx_http2_total counter +envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_http3_total counter +envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_idle_timeout counter +envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_max_duration_reached counter +envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_max_requests counter +envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_none_healthy counter +envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_overflow counter +envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_pool_overflow counter +envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_protocol_error counter +envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_cx_rx_bytes_total counter +envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 3853 +envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 8645645 +envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 724779 +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507 +envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_cx_tx_bytes_total counter +envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 114982 +envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1240 +envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 732 +# TYPE envoy_cluster_upstream_flow_control_backed_up_total counter +envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_flow_control_drained_total counter +envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_flow_control_paused_reading_total counter +envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_flow_control_resumed_reading_total counter +envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_http3_broken counter +envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_failed_total counter +envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_succeeded_total counter +envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq counter +envoy_cluster_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_upstream_rq_0rtt counter +envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_cancelled counter +envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 4749 +envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_completed counter +envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_upstream_rq_maintenance_mode counter +envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_max_duration_reached counter +envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_pending_failure_eject counter +envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_pending_overflow counter +envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_pending_total counter +envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507 +envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_rq_per_try_idle_timeout counter +envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_per_try_timeout counter +envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry counter +envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_exponential counter +envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_ratelimited counter +envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry_limit_exceeded counter +envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry_overflow counter +envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_retry_success counter +envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_rx_reset counter +envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_timeout counter +envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_total counter +envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758 +envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_upstream_rq_tx_reset counter +envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_xx counter +envoy_cluster_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3 +# TYPE envoy_cluster_manager_cds_init_fetch_timeout counter +envoy_cluster_manager_cds_init_fetch_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_cds_update_attempt counter +envoy_cluster_manager_cds_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_cluster_manager_cds_update_failure counter +envoy_cluster_manager_cds_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_cds_update_rejected counter +envoy_cluster_manager_cds_update_rejected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_cds_update_success counter +envoy_cluster_manager_cds_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_cluster_manager_cluster_added counter +envoy_cluster_manager_cluster_added{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 4 +# TYPE envoy_cluster_manager_cluster_modified counter +envoy_cluster_manager_cluster_modified{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_cluster_removed counter +envoy_cluster_manager_cluster_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_cluster_updated counter +envoy_cluster_manager_cluster_updated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_cluster_manager_cluster_updated_via_merge counter +envoy_cluster_manager_cluster_updated_via_merge{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_update_merge_cancelled counter +envoy_cluster_manager_update_merge_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_manager_update_out_of_merge_window counter +envoy_cluster_manager_update_out_of_merge_window{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_connect_authzrbac_allowed counter +envoy_connect_authzrbac_allowed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_connect_authzrbac_denied counter +envoy_connect_authzrbac_denied{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_connect_authzrbac_shadow_allowed counter +envoy_connect_authzrbac_shadow_allowed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_connect_authzrbac_shadow_denied counter +envoy_connect_authzrbac_shadow_denied{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_control_plane_rate_limit_enforced counter +envoy_control_plane_rate_limit_enforced{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_dns_cares_get_addr_failure counter +envoy_dns_cares_get_addr_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_dns_cares_not_found counter +envoy_dns_cares_not_found{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_dns_cares_resolve_total counter +envoy_dns_cares_resolve_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_dns_cares_timeouts counter +envoy_dns_cares_timeouts{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_envoy_overload_actions_reset_high_memory_stream_count counter +envoy_envoy_overload_actions_reset_high_memory_stream_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_filesystem_flushed_by_timer counter +envoy_filesystem_flushed_by_timer{} 3253 +# TYPE envoy_filesystem_reopen_failed counter +envoy_filesystem_reopen_failed{} 0 +# TYPE envoy_filesystem_write_buffered counter +envoy_filesystem_write_buffered{} 3 +# TYPE envoy_filesystem_write_completed counter +envoy_filesystem_write_completed{} 3 +# TYPE envoy_filesystem_write_failed counter +envoy_filesystem_write_failed{} 0 +# TYPE envoy_http_downstream_cx_delayed_close_timeout counter +envoy_http_downstream_cx_delayed_close_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_delayed_close_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_destroy counter +envoy_http_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3 +# TYPE envoy_http_downstream_cx_destroy_active_rq counter +envoy_http_downstream_cx_destroy_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_destroy_local counter +envoy_http_downstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_destroy_local_active_rq counter +envoy_http_downstream_cx_destroy_local_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_local_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_destroy_remote counter +envoy_http_downstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3 +# TYPE envoy_http_downstream_cx_destroy_remote_active_rq counter +envoy_http_downstream_cx_destroy_remote_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_remote_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_drain_close counter +envoy_http_downstream_cx_drain_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_drain_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_http1_total counter +envoy_http_downstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_downstream_cx_http2_total counter +envoy_http_downstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_http3_total counter +envoy_http_downstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_idle_timeout counter +envoy_http_downstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_max_duration_reached counter +envoy_http_downstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_max_requests_reached counter +envoy_http_downstream_cx_max_requests_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_max_requests_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_overload_disable_keepalive counter +envoy_http_downstream_cx_overload_disable_keepalive{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_overload_disable_keepalive{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_protocol_error counter +envoy_http_downstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_rx_bytes_total counter +envoy_http_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 448 +envoy_http_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 353 +# TYPE envoy_http_downstream_cx_ssl_total counter +envoy_http_downstream_cx_ssl_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_ssl_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_total counter +envoy_http_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_downstream_cx_tx_bytes_total counter +envoy_http_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1035762 +envoy_http_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 725008 +# TYPE envoy_http_downstream_cx_upgrades_total counter +envoy_http_downstream_cx_upgrades_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_upgrades_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_flow_control_paused_reading_total counter +envoy_http_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_flow_control_resumed_reading_total counter +envoy_http_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_completed counter +envoy_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_downstream_rq_failed_path_normalization counter +envoy_http_downstream_rq_failed_path_normalization{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_failed_path_normalization{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_header_timeout counter +envoy_http_downstream_rq_header_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_header_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_http1_total counter +envoy_http_downstream_rq_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 4 +envoy_http_downstream_rq_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_downstream_rq_http2_total counter +envoy_http_downstream_rq_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_http3_total counter +envoy_http_downstream_rq_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_idle_timeout counter +envoy_http_downstream_rq_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_max_duration_reached counter +envoy_http_downstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_non_relative_path counter +envoy_http_downstream_rq_non_relative_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_non_relative_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_overload_close counter +envoy_http_downstream_rq_overload_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_overload_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_redirected_with_normalized_path counter +envoy_http_downstream_rq_redirected_with_normalized_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_redirected_with_normalized_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_rejected_via_ip_detection counter +envoy_http_downstream_rq_rejected_via_ip_detection{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_rejected_via_ip_detection{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_response_before_rq_complete counter +envoy_http_downstream_rq_response_before_rq_complete{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_response_before_rq_complete{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_rx_reset counter +envoy_http_downstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_timeout counter +envoy_http_downstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_too_large counter +envoy_http_downstream_rq_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_total counter +envoy_http_downstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 4 +envoy_http_downstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_downstream_rq_tx_reset counter +envoy_http_downstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_ws_on_non_ws_route counter +envoy_http_downstream_rq_ws_on_non_ws_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_ws_on_non_ws_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_xx counter +envoy_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3 +envoy_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1 +envoy_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_no_cluster counter +envoy_http_no_cluster{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_no_cluster{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_no_route counter +envoy_http_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_passthrough_internal_redirect_bad_location counter +envoy_http_passthrough_internal_redirect_bad_location{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_bad_location{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_passthrough_internal_redirect_no_route counter +envoy_http_passthrough_internal_redirect_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_passthrough_internal_redirect_predicate counter +envoy_http_passthrough_internal_redirect_predicate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_predicate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_passthrough_internal_redirect_too_many_redirects counter +envoy_http_passthrough_internal_redirect_too_many_redirects{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_too_many_redirects{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_passthrough_internal_redirect_unsafe_scheme counter +envoy_http_passthrough_internal_redirect_unsafe_scheme{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_unsafe_scheme{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_rq_direct_response counter +envoy_http_rq_direct_response{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_direct_response{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1 +# TYPE envoy_http_rq_redirect counter +envoy_http_rq_redirect{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_redirect{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_rq_reset_after_downstream_response_started counter +envoy_http_rq_reset_after_downstream_response_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_reset_after_downstream_response_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_rq_total counter +envoy_http_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 1 +envoy_http_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4 +# TYPE envoy_http_rs_too_large counter +envoy_http_rs_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_rs_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_tracing_client_enabled counter +envoy_http_tracing_client_enabled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_tracing_health_check counter +envoy_http_tracing_health_check{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_tracing_not_traceable counter +envoy_http_tracing_not_traceable{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_tracing_random_sampling counter +envoy_http_tracing_random_sampling{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_tracing_service_forced counter +envoy_http_tracing_service_forced{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http1_dropped_headers_with_underscores counter +envoy_http1_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_http1_metadata_not_supported_error counter +envoy_http1_metadata_not_supported_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_http1_requests_rejected_with_underscores_in_headers counter +envoy_http1_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_http1_response_flood counter +envoy_http1_response_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_cx_destroy counter +envoy_listener_admin_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_listener_admin_downstream_cx_overflow counter +envoy_listener_admin_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_cx_overload_reject counter +envoy_listener_admin_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_cx_total counter +envoy_listener_admin_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3 +# TYPE envoy_listener_admin_downstream_cx_transport_socket_connect_timeout counter +envoy_listener_admin_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_global_cx_overflow counter +envoy_listener_admin_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_listener_filter_error counter +envoy_listener_admin_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_listener_filter_remote_close counter +envoy_listener_admin_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_downstream_pre_cx_timeout counter +envoy_listener_admin_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_http_downstream_rq_completed counter +envoy_listener_admin_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +# TYPE envoy_listener_admin_http_downstream_rq_xx counter +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +# TYPE envoy_listener_admin_main_thread_downstream_cx_total counter +envoy_listener_admin_main_thread_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3 +# TYPE envoy_listener_admin_no_filter_chain_match counter +envoy_listener_admin_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_downstream_cx_destroy counter +envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 3 +envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6507 +envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1 +# TYPE envoy_listener_downstream_cx_overflow counter +envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_cx_overload_reject counter +envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_cx_total counter +envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 4 +envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6507 +envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1 +# TYPE envoy_listener_downstream_cx_transport_socket_connect_timeout counter +envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_global_cx_overflow counter +envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_listener_filter_error counter +envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_listener_filter_remote_close counter +envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_pre_cx_timeout counter +envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_extension_config_missing counter +envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_http_downstream_rq_completed counter +envoy_listener_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 4 +# TYPE envoy_listener_http_downstream_rq_xx counter +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 3 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 1 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0 +# TYPE envoy_listener_no_filter_chain_match counter +envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_server_ssl_socket_factory_downstream_context_secrets_not_ready counter +envoy_listener_server_ssl_socket_factory_downstream_context_secrets_not_ready{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_server_ssl_socket_factory_ssl_context_update_by_sds counter +envoy_listener_server_ssl_socket_factory_ssl_context_update_by_sds{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_server_ssl_socket_factory_upstream_context_secrets_not_ready counter +envoy_listener_server_ssl_socket_factory_upstream_context_secrets_not_ready{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_connection_error counter +envoy_listener_ssl_connection_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_fail_verify_cert_hash counter +envoy_listener_ssl_fail_verify_cert_hash{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_fail_verify_error counter +envoy_listener_ssl_fail_verify_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_fail_verify_no_cert counter +envoy_listener_ssl_fail_verify_no_cert{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_fail_verify_san counter +envoy_listener_ssl_fail_verify_san{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_handshake counter +envoy_listener_ssl_handshake{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_no_certificate counter +envoy_listener_ssl_no_certificate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_ocsp_staple_failed counter +envoy_listener_ssl_ocsp_staple_failed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_ocsp_staple_omitted counter +envoy_listener_ssl_ocsp_staple_omitted{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_ocsp_staple_requests counter +envoy_listener_ssl_ocsp_staple_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_ocsp_staple_responses counter +envoy_listener_ssl_ocsp_staple_responses{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_ssl_session_reused counter +envoy_listener_ssl_session_reused{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +# TYPE envoy_listener_worker_downstream_cx_total counter +envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 2 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 2 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 3169 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 3338 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_manager_lds_init_fetch_timeout counter +envoy_listener_manager_lds_init_fetch_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_lds_update_attempt counter +envoy_listener_manager_lds_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_listener_manager_lds_update_failure counter +envoy_listener_manager_lds_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_lds_update_rejected counter +envoy_listener_manager_lds_update_rejected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_lds_update_success counter +envoy_listener_manager_lds_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_listener_manager_listener_added counter +envoy_listener_manager_listener_added{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3 +# TYPE envoy_listener_manager_listener_create_failure counter +envoy_listener_manager_listener_create_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_listener_create_success counter +envoy_listener_manager_listener_create_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 6 +# TYPE envoy_listener_manager_listener_in_place_updated counter +envoy_listener_manager_listener_in_place_updated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_listener_modified counter +envoy_listener_manager_listener_modified{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_listener_removed counter +envoy_listener_manager_listener_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_listener_stopped counter +envoy_listener_manager_listener_stopped{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_main_thread_watchdog_mega_miss counter +envoy_main_thread_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_main_thread_watchdog_miss counter +envoy_main_thread_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_deprecated_feature_use counter +envoy_runtime_deprecated_feature_use{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_load_error counter +envoy_runtime_load_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_load_success counter +envoy_runtime_load_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_runtime_override_dir_exists counter +envoy_runtime_override_dir_exists{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_override_dir_not_exists counter +envoy_runtime_override_dir_not_exists{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_server_debug_assertion_failures counter +envoy_server_debug_assertion_failures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_dropped_stat_flushes counter +envoy_server_dropped_stat_flushes{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_dynamic_unknown_fields counter +envoy_server_dynamic_unknown_fields{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_envoy_bug_failures counter +envoy_server_envoy_bug_failures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_main_thread_watchdog_mega_miss counter +envoy_server_main_thread_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_main_thread_watchdog_miss counter +envoy_server_main_thread_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_static_unknown_fields counter +envoy_server_static_unknown_fields{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_wip_protos counter +envoy_server_wip_protos{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_worker_watchdog_mega_miss counter +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_worker_watchdog_miss counter +envoy_server_worker_watchdog_miss{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_tcp_downstream_cx_no_route counter +envoy_tcp_downstream_cx_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_downstream_cx_rx_bytes_total counter +envoy_tcp_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_downstream_cx_total counter +envoy_tcp_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 6507 +# TYPE envoy_tcp_downstream_cx_tx_bytes_total counter +envoy_tcp_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_downstream_flow_control_paused_reading_total counter +envoy_tcp_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_downstream_flow_control_resumed_reading_total counter +envoy_tcp_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_idle_timeout counter +envoy_tcp_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_max_downstream_connection_duration counter +envoy_tcp_max_downstream_connection_duration{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_original_destination_downstream_cx_no_route counter +envoy_tcp_original_destination_downstream_cx_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_downstream_cx_rx_bytes_total counter +envoy_tcp_original_destination_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 1240 +# TYPE envoy_tcp_original_destination_downstream_cx_total counter +envoy_tcp_original_destination_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 1 +# TYPE envoy_tcp_original_destination_downstream_cx_tx_bytes_total counter +envoy_tcp_original_destination_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 8645645 +# TYPE envoy_tcp_original_destination_downstream_flow_control_paused_reading_total counter +envoy_tcp_original_destination_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_downstream_flow_control_resumed_reading_total counter +envoy_tcp_original_destination_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_idle_timeout counter +envoy_tcp_original_destination_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_max_downstream_connection_duration counter +envoy_tcp_original_destination_max_downstream_connection_duration{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_upstream_flush_total counter +envoy_tcp_original_destination_upstream_flush_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_upstream_flush_total counter +envoy_tcp_upstream_flush_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_retry counter +envoy_vhost_vcluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_retry_limit_exceeded counter +envoy_vhost_vcluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_retry_overflow counter +envoy_vhost_vcluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_retry_success counter +envoy_vhost_vcluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_timeout counter +envoy_vhost_vcluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_vhost_vcluster_upstream_rq_total counter +envoy_vhost_vcluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0 +# TYPE envoy_workers_watchdog_mega_miss counter +envoy_workers_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_workers_watchdog_miss counter +envoy_workers_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_open gauge +envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_pool_open gauge +envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_open gauge +envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_pending_open gauge +envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_retry_open gauge +envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_open gauge +envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_pool_open gauge +envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_open gauge +envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_pending_open gauge +envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_retry_open gauge +envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_http2_deferred_stream_close gauge +envoy_cluster_http2_deferred_stream_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_pending_send_bytes gauge +envoy_cluster_http2_pending_send_bytes{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +# TYPE envoy_cluster_http2_streams_active gauge +envoy_cluster_http2_streams_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +# TYPE envoy_cluster_lb_subsets_active gauge +envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_max_host_weight gauge +envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_membership_degraded gauge +envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_membership_excluded gauge +envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_membership_healthy gauge +envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1 +envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1 +# TYPE envoy_cluster_membership_total gauge +envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1 +envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1 +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_cx_rx_bytes_buffered gauge +envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 17 +envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 102618 +# TYPE envoy_cluster_upstream_cx_tx_bytes_buffered gauge +envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1 +# TYPE envoy_cluster_upstream_rq_pending_active gauge +envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_version gauge +envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0 +envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_manager_active_clusters gauge +envoy_cluster_manager_active_clusters{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 4 +# TYPE envoy_cluster_manager_cds_update_time gauge +envoy_cluster_manager_cds_update_time{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1678856528260 +# TYPE envoy_cluster_manager_cds_version gauge +envoy_cluster_manager_cds_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 17241709254077376921 +# TYPE envoy_cluster_manager_warming_clusters gauge +envoy_cluster_manager_warming_clusters{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_control_plane_connected_state gauge +envoy_control_plane_connected_state{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_control_plane_pending_requests gauge +envoy_control_plane_pending_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_dns_cares_pending_resolutions gauge +envoy_dns_cares_pending_resolutions{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_filesystem_write_total_buffered gauge +envoy_filesystem_write_total_buffered{} 0 +# TYPE envoy_http_downstream_cx_active gauge +envoy_http_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1 +envoy_http_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1 +# TYPE envoy_http_downstream_cx_http1_active gauge +envoy_http_downstream_cx_http1_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1 +envoy_http_downstream_cx_http1_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1 +# TYPE envoy_http_downstream_cx_http2_active gauge +envoy_http_downstream_cx_http2_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http2_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_http3_active gauge +envoy_http_downstream_cx_http3_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http3_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_rx_bytes_buffered gauge +envoy_http_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 112 +envoy_http_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 86 +# TYPE envoy_http_downstream_cx_ssl_active gauge +envoy_http_downstream_cx_ssl_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_ssl_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_tx_bytes_buffered gauge +envoy_http_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_cx_upgrades_active gauge +envoy_http_downstream_cx_upgrades_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_upgrades_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0 +# TYPE envoy_http_downstream_rq_active gauge +envoy_http_downstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1 +envoy_http_downstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1 +# TYPE envoy_listener_admin_downstream_cx_active gauge +envoy_listener_admin_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_listener_admin_downstream_pre_cx_active gauge +envoy_listener_admin_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_admin_main_thread_downstream_cx_active gauge +envoy_listener_admin_main_thread_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_listener_downstream_cx_active gauge +envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 1 +envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_downstream_pre_cx_active gauge +envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_worker_downstream_cx_active gauge +envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 1 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0 +# TYPE envoy_listener_manager_lds_update_time gauge +envoy_listener_manager_lds_update_time{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1678856528268 +# TYPE envoy_listener_manager_lds_version gauge +envoy_listener_manager_lds_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 17241709254077376921 +# TYPE envoy_listener_manager_total_filter_chains_draining gauge +envoy_listener_manager_total_filter_chains_draining{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_total_listeners_active gauge +envoy_listener_manager_total_listeners_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3 +# TYPE envoy_listener_manager_total_listeners_draining gauge +envoy_listener_manager_total_listeners_draining{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_total_listeners_warming gauge +envoy_listener_manager_total_listeners_warming{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_listener_manager_workers_started gauge +envoy_listener_manager_workers_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_runtime_admin_overrides_active gauge +envoy_runtime_admin_overrides_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_deprecated_feature_seen_since_process_start gauge +envoy_runtime_deprecated_feature_seen_since_process_start{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_runtime_num_keys gauge +envoy_runtime_num_keys{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_runtime_num_layers gauge +envoy_runtime_num_layers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_server_compilation_settings_fips_mode gauge +envoy_server_compilation_settings_fips_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_concurrency gauge +envoy_server_concurrency{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_server_days_until_first_cert_expiring gauge +envoy_server_days_until_first_cert_expiring{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_server_hot_restart_epoch gauge +envoy_server_hot_restart_epoch{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_live gauge +envoy_server_live{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_server_memory_allocated gauge +envoy_server_memory_allocated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 7742368 +# TYPE envoy_server_memory_heap_size gauge +envoy_server_memory_heap_size{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 14680064 +# TYPE envoy_server_memory_physical_size gauge +envoy_server_memory_physical_size{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 19175778 +# TYPE envoy_server_parent_connections gauge +envoy_server_parent_connections{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_seconds_until_first_ocsp_response_expiring gauge +envoy_server_seconds_until_first_ocsp_response_expiring{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_state gauge +envoy_server_state{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_stats_recent_lookups gauge +envoy_server_stats_recent_lookups{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 11362 +# TYPE envoy_server_total_connections gauge +envoy_server_total_connections{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0 +# TYPE envoy_server_uptime gauge +envoy_server_uptime{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 32527 +# TYPE envoy_server_version gauge +envoy_server_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1424117 +# TYPE envoy_tcp_downstream_cx_rx_bytes_buffered gauge +envoy_tcp_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_downstream_cx_tx_bytes_buffered gauge +envoy_tcp_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_tcp_original_destination_downstream_cx_rx_bytes_buffered gauge +envoy_tcp_original_destination_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_downstream_cx_tx_bytes_buffered gauge +envoy_tcp_original_destination_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_original_destination_upstream_flush_active gauge +envoy_tcp_original_destination_upstream_flush_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0 +# TYPE envoy_tcp_upstream_flush_active gauge +envoy_tcp_upstream_flush_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0 +# TYPE envoy_cluster_external_upstream_rq_time histogram +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2 +envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2 +envoy_cluster_external_upstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 15.1000000000000014210854715202 +envoy_cluster_external_upstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_cx_connect_ms histogram +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="0.5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="25"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="50"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="100"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="250"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="2500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="30000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="60000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="300000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1800000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="3600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="+Inf"} 1 +envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 8.0500000000000007105427357601002 +envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="0.5"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="25"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="50"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="100"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="250"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="500"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="2500"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="30000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="60000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="300000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="600000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1800000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="3600000"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="+Inf"} 1757 +envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0 +envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1757 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="0.5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="25"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="50"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="100"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="250"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="2500"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="30000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="60000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="300000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1800000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="3600000"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="+Inf"} 1 +envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 2.049999999999999822364316059975 +envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 1 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2 +envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2 +envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1.0500000000000000444089209850063 +envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_upstream_cx_length_ms histogram +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="0.5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="25"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="50"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="100"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="250"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="2500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="30000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="60000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="300000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1800000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="3600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="+Inf"} 0 +envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="0.5"} 6502 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1"} 6502 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="25"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="50"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="100"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="250"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="500"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="2500"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="30000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="60000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="300000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="600000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1800000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="3600000"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="+Inf"} 6505 +envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 10.1499999999999985789145284798 +envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6505 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="0.5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="25"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="50"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="100"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="250"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="2500"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="30000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="60000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="300000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="600000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1800000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="3600000"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="+Inf"} 1 +envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 855 +envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 0 +envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0 +# TYPE envoy_cluster_upstream_rq_time histogram +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2 +envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2 +envoy_cluster_upstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 15.1000000000000014210854715202 +envoy_cluster_upstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2 +# TYPE envoy_cluster_manager_cds_update_duration histogram +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1 +envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1 +envoy_cluster_manager_cds_update_duration_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 34.5 +envoy_cluster_manager_cds_update_duration_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_http_downstream_cx_length_ms histogram +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="0.5"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="25"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="50"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="100"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="250"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="500"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1000"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="2500"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5000"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10000"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="30000"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="60000"} 0 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="300000"} 2 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="600000"} 2 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1800000"} 2 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="3600000"} 2 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="+Inf"} 2 +envoy_http_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 181000 +envoy_http_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="0.5"} 1 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1"} 1 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5"} 1 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10"} 1 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="25"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="50"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="100"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="250"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="500"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="2500"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="30000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="60000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="300000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="600000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1800000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="3600000"} 3 +envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="+Inf"} 3 +envoy_http_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 23 +envoy_http_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3 +# TYPE envoy_http_downstream_rq_time histogram +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="0.5"} 0 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1"} 0 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="25"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="50"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="100"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="250"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="500"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="2500"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="30000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="60000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="300000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="600000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1800000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="3600000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="+Inf"} 3 +envoy_http_downstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 12.1499999999999985789145284798 +envoy_http_downstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="0.5"} 1 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1"} 1 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5"} 1 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10"} 2 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="25"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="50"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="100"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="250"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="500"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="2500"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="30000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="60000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="300000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="600000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1800000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="3600000"} 3 +envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="+Inf"} 3 +envoy_http_downstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 17.5500000000000007105427357601 +envoy_http_downstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3 +# TYPE envoy_listener_admin_downstream_cx_length_ms histogram +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 2 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 2 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 2 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 2 +envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 2 +envoy_listener_admin_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 181000 +envoy_listener_admin_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2 +# TYPE envoy_listener_downstream_cx_length_ms histogram +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="0.5"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="5"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="10"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="25"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="50"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="100"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="250"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="500"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="2500"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="5000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="10000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="30000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="60000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="300000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="600000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1800000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="3600000"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="+Inf"} 3 +envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 23 +envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 3 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="0.5"} 6502 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1"} 6502 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="5"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="10"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="25"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="50"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="100"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="250"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="500"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="2500"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="5000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="10000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="30000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="60000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="300000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="600000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1800000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="3600000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="+Inf"} 6505 +envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 10.1499999999999985789145284798 +envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6505 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="0.5"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="5"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="10"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="25"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="50"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="100"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="250"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="500"} 0 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="2500"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="5000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="10000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="30000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="60000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="300000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="600000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1800000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="3600000"} 1 +envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="+Inf"} 1 +envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 855 +envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1 +# TYPE envoy_listener_manager_lds_update_duration histogram +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1 +envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1 +envoy_listener_manager_lds_update_duration_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 5.049999999999999822364316059975 +envoy_listener_manager_lds_update_duration_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 +# TYPE envoy_server_initialization_time_ms histogram +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 0 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1 +envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1 +envoy_server_initialization_time_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1150 +envoy_server_initialization_time_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1 diff --git a/src/go/collectors/go.d.plugin/modules/envoy/testdata/envoy.txt b/src/go/collectors/go.d.plugin/modules/envoy/testdata/envoy.txt new file mode 100644 index 00000000000000..1102c4c0d1df0f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/envoy/testdata/envoy.txt @@ -0,0 +1,929 @@ +# TYPE envoy_cluster_assignment_stale counter +envoy_cluster_assignment_stale{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_assignment_timeout_received counter +envoy_cluster_assignment_timeout_received{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_bind_errors counter +envoy_cluster_bind_errors{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_client_ssl_socket_factory_downstream_context_secrets_not_ready counter +envoy_cluster_client_ssl_socket_factory_downstream_context_secrets_not_ready{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_client_ssl_socket_factory_ssl_context_update_by_sds counter +envoy_cluster_client_ssl_socket_factory_ssl_context_update_by_sds{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_client_ssl_socket_factory_upstream_context_secrets_not_ready counter +envoy_cluster_client_ssl_socket_factory_upstream_context_secrets_not_ready{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_default_total_match_count counter +envoy_cluster_default_total_match_count{envoy_cluster_name="service_envoyproxy_io"} 1 +# TYPE envoy_cluster_lb_healthy_panic counter +envoy_cluster_lb_healthy_panic{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_local_cluster_not_ok counter +envoy_cluster_lb_local_cluster_not_ok{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_recalculate_zone_structures counter +envoy_cluster_lb_recalculate_zone_structures{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_created counter +envoy_cluster_lb_subsets_created{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_fallback counter +envoy_cluster_lb_subsets_fallback{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_fallback_panic counter +envoy_cluster_lb_subsets_fallback_panic{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_removed counter +envoy_cluster_lb_subsets_removed{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_selected counter +envoy_cluster_lb_subsets_selected{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_cluster_too_small counter +envoy_cluster_lb_zone_cluster_too_small{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_no_capacity_left counter +envoy_cluster_lb_zone_no_capacity_left{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_number_differs counter +envoy_cluster_lb_zone_number_differs{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_routing_all_directly counter +envoy_cluster_lb_zone_routing_all_directly{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_routing_cross_zone counter +envoy_cluster_lb_zone_routing_cross_zone{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_zone_routing_sampled counter +envoy_cluster_lb_zone_routing_sampled{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_membership_change counter +envoy_cluster_membership_change{envoy_cluster_name="service_envoyproxy_io"} 1 +# TYPE envoy_cluster_original_dst_host_invalid counter +envoy_cluster_original_dst_host_invalid{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_retry_or_shadow_abandoned counter +envoy_cluster_retry_or_shadow_abandoned{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_connection_error counter +envoy_cluster_ssl_connection_error{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_fail_verify_cert_hash counter +envoy_cluster_ssl_fail_verify_cert_hash{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_fail_verify_error counter +envoy_cluster_ssl_fail_verify_error{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_fail_verify_no_cert counter +envoy_cluster_ssl_fail_verify_no_cert{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_fail_verify_san counter +envoy_cluster_ssl_fail_verify_san{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_handshake counter +envoy_cluster_ssl_handshake{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_no_certificate counter +envoy_cluster_ssl_no_certificate{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_ocsp_staple_failed counter +envoy_cluster_ssl_ocsp_staple_failed{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_ocsp_staple_omitted counter +envoy_cluster_ssl_ocsp_staple_omitted{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_ocsp_staple_requests counter +envoy_cluster_ssl_ocsp_staple_requests{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_ocsp_staple_responses counter +envoy_cluster_ssl_ocsp_staple_responses{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_ssl_session_reused counter +envoy_cluster_ssl_session_reused{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_update_attempt counter +envoy_cluster_update_attempt{envoy_cluster_name="service_envoyproxy_io"} 1242 +# TYPE envoy_cluster_update_empty counter +envoy_cluster_update_empty{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_update_failure counter +envoy_cluster_update_failure{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_update_no_rebuild counter +envoy_cluster_update_no_rebuild{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_update_success counter +envoy_cluster_update_success{envoy_cluster_name="service_envoyproxy_io"} 1242 +# TYPE envoy_cluster_upstream_cx_close_notify counter +envoy_cluster_upstream_cx_close_notify{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_connect_attempts_exceeded counter +envoy_cluster_upstream_cx_connect_attempts_exceeded{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_connect_fail counter +envoy_cluster_upstream_cx_connect_fail{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_connect_timeout counter +envoy_cluster_upstream_cx_connect_timeout{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_connect_with_0_rtt counter +envoy_cluster_upstream_cx_connect_with_0_rtt{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy counter +envoy_cluster_upstream_cx_destroy{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local counter +envoy_cluster_upstream_cx_destroy_local{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_local_with_active_rq counter +envoy_cluster_upstream_cx_destroy_local_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote counter +envoy_cluster_upstream_cx_destroy_remote{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_remote_with_active_rq counter +envoy_cluster_upstream_cx_destroy_remote_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_destroy_with_active_rq counter +envoy_cluster_upstream_cx_destroy_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_http1_total counter +envoy_cluster_upstream_cx_http1_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_http2_total counter +envoy_cluster_upstream_cx_http2_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_http3_total counter +envoy_cluster_upstream_cx_http3_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_idle_timeout counter +envoy_cluster_upstream_cx_idle_timeout{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_max_duration_reached counter +envoy_cluster_upstream_cx_max_duration_reached{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_max_requests counter +envoy_cluster_upstream_cx_max_requests{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_none_healthy counter +envoy_cluster_upstream_cx_none_healthy{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_overflow counter +envoy_cluster_upstream_cx_overflow{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_pool_overflow counter +envoy_cluster_upstream_cx_pool_overflow{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_protocol_error counter +envoy_cluster_upstream_cx_protocol_error{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_rx_bytes_total counter +envoy_cluster_upstream_cx_rx_bytes_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_total counter +envoy_cluster_upstream_cx_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_tx_bytes_total counter +envoy_cluster_upstream_cx_tx_bytes_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_flow_control_backed_up_total counter +envoy_cluster_upstream_flow_control_backed_up_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_flow_control_drained_total counter +envoy_cluster_upstream_flow_control_drained_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_flow_control_paused_reading_total counter +envoy_cluster_upstream_flow_control_paused_reading_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_flow_control_resumed_reading_total counter +envoy_cluster_upstream_flow_control_resumed_reading_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_http3_broken counter +envoy_cluster_upstream_http3_broken{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_failed_total counter +envoy_cluster_upstream_internal_redirect_failed_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_internal_redirect_succeeded_total counter +envoy_cluster_upstream_internal_redirect_succeeded_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_0rtt counter +envoy_cluster_upstream_rq_0rtt{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_cancelled counter +envoy_cluster_upstream_rq_cancelled{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_completed counter +envoy_cluster_upstream_rq_completed{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_maintenance_mode counter +envoy_cluster_upstream_rq_maintenance_mode{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_max_duration_reached counter +envoy_cluster_upstream_rq_max_duration_reached{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_pending_failure_eject counter +envoy_cluster_upstream_rq_pending_failure_eject{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_pending_overflow counter +envoy_cluster_upstream_rq_pending_overflow{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_pending_total counter +envoy_cluster_upstream_rq_pending_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_per_try_idle_timeout counter +envoy_cluster_upstream_rq_per_try_idle_timeout{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_per_try_timeout counter +envoy_cluster_upstream_rq_per_try_timeout{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry counter +envoy_cluster_upstream_rq_retry{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_exponential counter +envoy_cluster_upstream_rq_retry_backoff_exponential{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry_backoff_ratelimited counter +envoy_cluster_upstream_rq_retry_backoff_ratelimited{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry_limit_exceeded counter +envoy_cluster_upstream_rq_retry_limit_exceeded{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry_overflow counter +envoy_cluster_upstream_rq_retry_overflow{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_retry_success counter +envoy_cluster_upstream_rq_retry_success{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_rx_reset counter +envoy_cluster_upstream_rq_rx_reset{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_timeout counter +envoy_cluster_upstream_rq_timeout{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_total counter +envoy_cluster_upstream_rq_total{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_tx_reset counter +envoy_cluster_upstream_rq_tx_reset{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_manager_cluster_added counter +envoy_cluster_manager_cluster_added{} 1 +# TYPE envoy_cluster_manager_cluster_modified counter +envoy_cluster_manager_cluster_modified{} 0 +# TYPE envoy_cluster_manager_cluster_removed counter +envoy_cluster_manager_cluster_removed{} 0 +# TYPE envoy_cluster_manager_cluster_updated counter +envoy_cluster_manager_cluster_updated{} 0 +# TYPE envoy_cluster_manager_cluster_updated_via_merge counter +envoy_cluster_manager_cluster_updated_via_merge{} 0 +# TYPE envoy_cluster_manager_update_merge_cancelled counter +envoy_cluster_manager_update_merge_cancelled{} 0 +# TYPE envoy_cluster_manager_update_out_of_merge_window counter +envoy_cluster_manager_update_out_of_merge_window{} 0 +# TYPE envoy_dns_cares_get_addr_failure counter +envoy_dns_cares_get_addr_failure{} 0 +# TYPE envoy_dns_cares_not_found counter +envoy_dns_cares_not_found{} 0 +# TYPE envoy_dns_cares_resolve_total counter +envoy_dns_cares_resolve_total{} 1242 +# TYPE envoy_dns_cares_timeouts counter +envoy_dns_cares_timeouts{} 0 +# TYPE envoy_envoy_overload_actions_reset_high_memory_stream_count counter +envoy_envoy_overload_actions_reset_high_memory_stream_count{} 0 +# TYPE envoy_filesystem_flushed_by_timer counter +envoy_filesystem_flushed_by_timer{} 0 +# TYPE envoy_filesystem_reopen_failed counter +envoy_filesystem_reopen_failed{} 0 +# TYPE envoy_filesystem_write_buffered counter +envoy_filesystem_write_buffered{} 0 +# TYPE envoy_filesystem_write_completed counter +envoy_filesystem_write_completed{} 0 +# TYPE envoy_filesystem_write_failed counter +envoy_filesystem_write_failed{} 0 +# TYPE envoy_http_downstream_cx_delayed_close_timeout counter +envoy_http_downstream_cx_delayed_close_timeout{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_delayed_close_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy counter +envoy_http_downstream_cx_destroy{envoy_http_conn_manager_prefix="admin"} 4 +envoy_http_downstream_cx_destroy{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy_active_rq counter +envoy_http_downstream_cx_destroy_active_rq{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy_local counter +envoy_http_downstream_cx_destroy_local{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_local{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy_local_active_rq counter +envoy_http_downstream_cx_destroy_local_active_rq{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_local_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy_remote counter +envoy_http_downstream_cx_destroy_remote{envoy_http_conn_manager_prefix="admin"} 4 +envoy_http_downstream_cx_destroy_remote{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_destroy_remote_active_rq counter +envoy_http_downstream_cx_destroy_remote_active_rq{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_destroy_remote_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_drain_close counter +envoy_http_downstream_cx_drain_close{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_drain_close{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http1_total counter +envoy_http_downstream_cx_http1_total{envoy_http_conn_manager_prefix="admin"} 6 +envoy_http_downstream_cx_http1_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http2_total counter +envoy_http_downstream_cx_http2_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http2_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http3_total counter +envoy_http_downstream_cx_http3_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http3_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_idle_timeout counter +envoy_http_downstream_cx_idle_timeout{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_idle_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_max_duration_reached counter +envoy_http_downstream_cx_max_duration_reached{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_max_duration_reached{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_max_requests_reached counter +envoy_http_downstream_cx_max_requests_reached{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_max_requests_reached{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_overload_disable_keepalive counter +envoy_http_downstream_cx_overload_disable_keepalive{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_overload_disable_keepalive{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_protocol_error counter +envoy_http_downstream_cx_protocol_error{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_protocol_error{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_rx_bytes_total counter +envoy_http_downstream_cx_rx_bytes_total{envoy_http_conn_manager_prefix="admin"} 678 +envoy_http_downstream_cx_rx_bytes_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_ssl_total counter +envoy_http_downstream_cx_ssl_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_ssl_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_total counter +envoy_http_downstream_cx_total{envoy_http_conn_manager_prefix="admin"} 6 +envoy_http_downstream_cx_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_tx_bytes_total counter +envoy_http_downstream_cx_tx_bytes_total{envoy_http_conn_manager_prefix="admin"} 212404 +envoy_http_downstream_cx_tx_bytes_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_upgrades_total counter +envoy_http_downstream_cx_upgrades_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_upgrades_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_flow_control_paused_reading_total counter +envoy_http_downstream_flow_control_paused_reading_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_flow_control_paused_reading_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_flow_control_resumed_reading_total counter +envoy_http_downstream_flow_control_resumed_reading_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_flow_control_resumed_reading_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_completed counter +envoy_http_downstream_rq_completed{envoy_http_conn_manager_prefix="admin"} 5 +envoy_http_downstream_rq_completed{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_failed_path_normalization counter +envoy_http_downstream_rq_failed_path_normalization{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_failed_path_normalization{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_header_timeout counter +envoy_http_downstream_rq_header_timeout{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_header_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_http1_total counter +envoy_http_downstream_rq_http1_total{envoy_http_conn_manager_prefix="admin"} 6 +envoy_http_downstream_rq_http1_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_http2_total counter +envoy_http_downstream_rq_http2_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_http2_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_http3_total counter +envoy_http_downstream_rq_http3_total{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_http3_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_idle_timeout counter +envoy_http_downstream_rq_idle_timeout{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_idle_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_max_duration_reached counter +envoy_http_downstream_rq_max_duration_reached{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_max_duration_reached{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_non_relative_path counter +envoy_http_downstream_rq_non_relative_path{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_non_relative_path{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_overload_close counter +envoy_http_downstream_rq_overload_close{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_overload_close{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_redirected_with_normalized_path counter +envoy_http_downstream_rq_redirected_with_normalized_path{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_redirected_with_normalized_path{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_rejected_via_ip_detection counter +envoy_http_downstream_rq_rejected_via_ip_detection{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_rejected_via_ip_detection{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_response_before_rq_complete counter +envoy_http_downstream_rq_response_before_rq_complete{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_response_before_rq_complete{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_rx_reset counter +envoy_http_downstream_rq_rx_reset{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_rx_reset{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_timeout counter +envoy_http_downstream_rq_timeout{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_too_large counter +envoy_http_downstream_rq_too_large{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_too_large{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_total counter +envoy_http_downstream_rq_total{envoy_http_conn_manager_prefix="admin"} 6 +envoy_http_downstream_rq_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_tx_reset counter +envoy_http_downstream_rq_tx_reset{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_tx_reset{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_ws_on_non_ws_route counter +envoy_http_downstream_rq_ws_on_non_ws_route{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_ws_on_non_ws_route{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_xx counter +envoy_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="admin"} 3 +envoy_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_no_cluster counter +envoy_http_no_cluster{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_no_cluster{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_no_route counter +envoy_http_no_route{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_no_route{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_passthrough_internal_redirect_bad_location counter +envoy_http_passthrough_internal_redirect_bad_location{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_bad_location{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_passthrough_internal_redirect_no_route counter +envoy_http_passthrough_internal_redirect_no_route{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_no_route{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_passthrough_internal_redirect_predicate counter +envoy_http_passthrough_internal_redirect_predicate{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_predicate{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_passthrough_internal_redirect_too_many_redirects counter +envoy_http_passthrough_internal_redirect_too_many_redirects{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_too_many_redirects{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_passthrough_internal_redirect_unsafe_scheme counter +envoy_http_passthrough_internal_redirect_unsafe_scheme{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_passthrough_internal_redirect_unsafe_scheme{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_rq_direct_response counter +envoy_http_rq_direct_response{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_direct_response{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_rq_redirect counter +envoy_http_rq_redirect{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_redirect{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_rq_reset_after_downstream_response_started counter +envoy_http_rq_reset_after_downstream_response_started{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_reset_after_downstream_response_started{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_rq_total counter +envoy_http_rq_total{envoy_http_conn_manager_prefix="async-client"} 0 +envoy_http_rq_total{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_rs_too_large counter +envoy_http_rs_too_large{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_rs_too_large{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_tracing_client_enabled counter +envoy_http_tracing_client_enabled{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_tracing_health_check counter +envoy_http_tracing_health_check{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_tracing_not_traceable counter +envoy_http_tracing_not_traceable{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_tracing_random_sampling counter +envoy_http_tracing_random_sampling{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_tracing_service_forced counter +envoy_http_tracing_service_forced{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http1_dropped_headers_with_underscores counter +envoy_http1_dropped_headers_with_underscores{} 0 +# TYPE envoy_http1_metadata_not_supported_error counter +envoy_http1_metadata_not_supported_error{} 0 +# TYPE envoy_http1_requests_rejected_with_underscores_in_headers counter +envoy_http1_requests_rejected_with_underscores_in_headers{} 0 +# TYPE envoy_http1_response_flood counter +envoy_http1_response_flood{} 0 +# TYPE envoy_listener_admin_downstream_cx_destroy counter +envoy_listener_admin_downstream_cx_destroy{} 4 +# TYPE envoy_listener_admin_downstream_cx_overflow counter +envoy_listener_admin_downstream_cx_overflow{} 0 +# TYPE envoy_listener_admin_downstream_cx_overload_reject counter +envoy_listener_admin_downstream_cx_overload_reject{} 0 +# TYPE envoy_listener_admin_downstream_cx_total counter +envoy_listener_admin_downstream_cx_total{} 6 +# TYPE envoy_listener_admin_downstream_cx_transport_socket_connect_timeout counter +envoy_listener_admin_downstream_cx_transport_socket_connect_timeout{} 0 +# TYPE envoy_listener_admin_downstream_global_cx_overflow counter +envoy_listener_admin_downstream_global_cx_overflow{} 0 +# TYPE envoy_listener_admin_downstream_listener_filter_error counter +envoy_listener_admin_downstream_listener_filter_error{} 0 +# TYPE envoy_listener_admin_downstream_listener_filter_remote_close counter +envoy_listener_admin_downstream_listener_filter_remote_close{} 0 +# TYPE envoy_listener_admin_downstream_pre_cx_timeout counter +envoy_listener_admin_downstream_pre_cx_timeout{} 0 +# TYPE envoy_listener_admin_http_downstream_rq_completed counter +envoy_listener_admin_http_downstream_rq_completed{envoy_http_conn_manager_prefix="admin"} 5 +# TYPE envoy_listener_admin_http_downstream_rq_xx counter +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="admin"} 0 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="admin"} 3 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="admin"} 0 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="admin"} 2 +envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="admin"} 0 +# TYPE envoy_listener_admin_main_thread_downstream_cx_total counter +envoy_listener_admin_main_thread_downstream_cx_total{} 6 +# TYPE envoy_listener_admin_no_filter_chain_match counter +envoy_listener_admin_no_filter_chain_match{} 0 +# TYPE envoy_listener_downstream_cx_destroy counter +envoy_listener_downstream_cx_destroy{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_cx_overflow counter +envoy_listener_downstream_cx_overflow{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_cx_overload_reject counter +envoy_listener_downstream_cx_overload_reject{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_cx_total counter +envoy_listener_downstream_cx_total{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_cx_transport_socket_connect_timeout counter +envoy_listener_downstream_cx_transport_socket_connect_timeout{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_global_cx_overflow counter +envoy_listener_downstream_global_cx_overflow{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_listener_filter_error counter +envoy_listener_downstream_listener_filter_error{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_listener_filter_remote_close counter +envoy_listener_downstream_listener_filter_remote_close{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_pre_cx_timeout counter +envoy_listener_downstream_pre_cx_timeout{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_extension_config_missing counter +envoy_listener_extension_config_missing{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_http_downstream_rq_completed counter +envoy_listener_http_downstream_rq_completed{envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_http_downstream_rq_xx counter +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_no_filter_chain_match counter +envoy_listener_no_filter_chain_match{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_worker_downstream_cx_total counter +envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="10",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="11",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="12",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="13",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="14",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="15",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="2",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="3",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="4",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="5",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="6",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="7",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="8",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_total{envoy_worker_id="9",envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_manager_listener_added counter +envoy_listener_manager_listener_added{} 1 +# TYPE envoy_listener_manager_listener_create_failure counter +envoy_listener_manager_listener_create_failure{} 0 +# TYPE envoy_listener_manager_listener_create_success counter +envoy_listener_manager_listener_create_success{} 16 +# TYPE envoy_listener_manager_listener_in_place_updated counter +envoy_listener_manager_listener_in_place_updated{} 0 +# TYPE envoy_listener_manager_listener_modified counter +envoy_listener_manager_listener_modified{} 0 +# TYPE envoy_listener_manager_listener_removed counter +envoy_listener_manager_listener_removed{} 0 +# TYPE envoy_listener_manager_listener_stopped counter +envoy_listener_manager_listener_stopped{} 0 +# TYPE envoy_main_thread_watchdog_mega_miss counter +envoy_main_thread_watchdog_mega_miss{} 0 +# TYPE envoy_main_thread_watchdog_miss counter +envoy_main_thread_watchdog_miss{} 0 +# TYPE envoy_runtime_deprecated_feature_use counter +envoy_runtime_deprecated_feature_use{} 0 +# TYPE envoy_runtime_load_error counter +envoy_runtime_load_error{} 0 +# TYPE envoy_runtime_load_success counter +envoy_runtime_load_success{} 1 +# TYPE envoy_runtime_override_dir_exists counter +envoy_runtime_override_dir_exists{} 0 +# TYPE envoy_runtime_override_dir_not_exists counter +envoy_runtime_override_dir_not_exists{} 1 +# TYPE envoy_server_debug_assertion_failures counter +envoy_server_debug_assertion_failures{} 0 +# TYPE envoy_server_dropped_stat_flushes counter +envoy_server_dropped_stat_flushes{} 0 +# TYPE envoy_server_dynamic_unknown_fields counter +envoy_server_dynamic_unknown_fields{} 0 +# TYPE envoy_server_envoy_bug_failures counter +envoy_server_envoy_bug_failures{} 0 +# TYPE envoy_server_main_thread_watchdog_mega_miss counter +envoy_server_main_thread_watchdog_mega_miss{} 0 +# TYPE envoy_server_main_thread_watchdog_miss counter +envoy_server_main_thread_watchdog_miss{} 0 +# TYPE envoy_server_static_unknown_fields counter +envoy_server_static_unknown_fields{} 0 +# TYPE envoy_server_wip_protos counter +envoy_server_wip_protos{} 0 +# TYPE envoy_server_worker_watchdog_mega_miss counter +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="0"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="1"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="10"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="11"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="12"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="13"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="14"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="15"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="2"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="3"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="4"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="5"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="6"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="7"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="8"} 0 +envoy_server_worker_watchdog_mega_miss{envoy_worker_id="9"} 0 +# TYPE envoy_server_worker_watchdog_miss counter +envoy_server_worker_watchdog_miss{envoy_worker_id="0"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="1"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="10"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="11"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="12"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="13"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="14"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="15"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="2"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="3"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="4"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="5"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="6"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="7"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="8"} 0 +envoy_server_worker_watchdog_miss{envoy_worker_id="9"} 0 +# TYPE envoy_workers_watchdog_mega_miss counter +envoy_workers_watchdog_mega_miss{} 0 +# TYPE envoy_workers_watchdog_miss counter +envoy_workers_watchdog_miss{} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_open gauge +envoy_cluster_circuit_breakers_default_cx_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_default_cx_pool_open gauge +envoy_cluster_circuit_breakers_default_cx_pool_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_open gauge +envoy_cluster_circuit_breakers_default_rq_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_pending_open gauge +envoy_cluster_circuit_breakers_default_rq_pending_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_default_rq_retry_open gauge +envoy_cluster_circuit_breakers_default_rq_retry_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_open gauge +envoy_cluster_circuit_breakers_high_cx_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_high_cx_pool_open gauge +envoy_cluster_circuit_breakers_high_cx_pool_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_open gauge +envoy_cluster_circuit_breakers_high_rq_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_pending_open gauge +envoy_cluster_circuit_breakers_high_rq_pending_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_circuit_breakers_high_rq_retry_open gauge +envoy_cluster_circuit_breakers_high_rq_retry_open{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_lb_subsets_active gauge +envoy_cluster_lb_subsets_active{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_max_host_weight gauge +envoy_cluster_max_host_weight{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_membership_degraded gauge +envoy_cluster_membership_degraded{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_membership_excluded gauge +envoy_cluster_membership_excluded{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_membership_healthy gauge +envoy_cluster_membership_healthy{envoy_cluster_name="service_envoyproxy_io"} 1 +# TYPE envoy_cluster_membership_total gauge +envoy_cluster_membership_total{envoy_cluster_name="service_envoyproxy_io"} 1 +# TYPE envoy_cluster_upstream_cx_active gauge +envoy_cluster_upstream_cx_active{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_rx_bytes_buffered gauge +envoy_cluster_upstream_cx_rx_bytes_buffered{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_tx_bytes_buffered gauge +envoy_cluster_upstream_cx_tx_bytes_buffered{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_active gauge +envoy_cluster_upstream_rq_active{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_rq_pending_active gauge +envoy_cluster_upstream_rq_pending_active{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_version gauge +envoy_cluster_version{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_manager_active_clusters gauge +envoy_cluster_manager_active_clusters{} 1 +# TYPE envoy_cluster_manager_warming_clusters gauge +envoy_cluster_manager_warming_clusters{} 0 +# TYPE envoy_dns_cares_pending_resolutions gauge +envoy_dns_cares_pending_resolutions{} 0 +# TYPE envoy_filesystem_write_total_buffered gauge +envoy_filesystem_write_total_buffered{} 0 +# TYPE envoy_http_downstream_cx_active gauge +envoy_http_downstream_cx_active{envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_cx_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http1_active gauge +envoy_http_downstream_cx_http1_active{envoy_http_conn_manager_prefix="admin"} 2 +envoy_http_downstream_cx_http1_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http2_active gauge +envoy_http_downstream_cx_http2_active{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http2_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_http3_active gauge +envoy_http_downstream_cx_http3_active{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_http3_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_rx_bytes_buffered gauge +envoy_http_downstream_cx_rx_bytes_buffered{envoy_http_conn_manager_prefix="admin"} 245 +envoy_http_downstream_cx_rx_bytes_buffered{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_ssl_active gauge +envoy_http_downstream_cx_ssl_active{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_ssl_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_tx_bytes_buffered gauge +envoy_http_downstream_cx_tx_bytes_buffered{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_tx_bytes_buffered{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_cx_upgrades_active gauge +envoy_http_downstream_cx_upgrades_active{envoy_http_conn_manager_prefix="admin"} 0 +envoy_http_downstream_cx_upgrades_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_active gauge +envoy_http_downstream_rq_active{envoy_http_conn_manager_prefix="admin"} 1 +envoy_http_downstream_rq_active{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_listener_admin_downstream_cx_active gauge +envoy_listener_admin_downstream_cx_active{} 2 +# TYPE envoy_listener_admin_downstream_pre_cx_active gauge +envoy_listener_admin_downstream_pre_cx_active{} 0 +# TYPE envoy_listener_admin_main_thread_downstream_cx_active gauge +envoy_listener_admin_main_thread_downstream_cx_active{} 2 +# TYPE envoy_listener_downstream_cx_active gauge +envoy_listener_downstream_cx_active{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_downstream_pre_cx_active gauge +envoy_listener_downstream_pre_cx_active{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_worker_downstream_cx_active gauge +envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="10",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="11",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="12",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="13",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="14",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="15",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="2",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="3",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="4",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="5",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="6",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="7",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="8",envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_worker_downstream_cx_active{envoy_worker_id="9",envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_listener_manager_total_filter_chains_draining gauge +envoy_listener_manager_total_filter_chains_draining{} 0 +# TYPE envoy_listener_manager_total_listeners_active gauge +envoy_listener_manager_total_listeners_active{} 1 +# TYPE envoy_listener_manager_total_listeners_draining gauge +envoy_listener_manager_total_listeners_draining{} 0 +# TYPE envoy_listener_manager_total_listeners_warming gauge +envoy_listener_manager_total_listeners_warming{} 0 +# TYPE envoy_listener_manager_workers_started gauge +envoy_listener_manager_workers_started{} 1 +# TYPE envoy_runtime_admin_overrides_active gauge +envoy_runtime_admin_overrides_active{} 0 +# TYPE envoy_runtime_deprecated_feature_seen_since_process_start gauge +envoy_runtime_deprecated_feature_seen_since_process_start{} 0 +# TYPE envoy_runtime_num_keys gauge +envoy_runtime_num_keys{} 0 +# TYPE envoy_runtime_num_layers gauge +envoy_runtime_num_layers{} 0 +# TYPE envoy_server_compilation_settings_fips_mode gauge +envoy_server_compilation_settings_fips_mode{} 0 +# TYPE envoy_server_concurrency gauge +envoy_server_concurrency{} 16 +# TYPE envoy_server_days_until_first_cert_expiring gauge +envoy_server_days_until_first_cert_expiring{} 4294967295 +# TYPE envoy_server_hot_restart_epoch gauge +envoy_server_hot_restart_epoch{} 0 +# TYPE envoy_server_hot_restart_generation gauge +envoy_server_hot_restart_generation{} 1 +# TYPE envoy_server_live gauge +envoy_server_live{} 1 +# TYPE envoy_server_memory_allocated gauge +envoy_server_memory_allocated{} 7630184 +# TYPE envoy_server_memory_heap_size gauge +envoy_server_memory_heap_size{} 16777216 +# TYPE envoy_server_memory_physical_size gauge +envoy_server_memory_physical_size{} 28426958 +# TYPE envoy_server_parent_connections gauge +envoy_server_parent_connections{} 0 +# TYPE envoy_server_seconds_until_first_ocsp_response_expiring gauge +envoy_server_seconds_until_first_ocsp_response_expiring{} 0 +# TYPE envoy_server_state gauge +envoy_server_state{} 0 +# TYPE envoy_server_stats_recent_lookups gauge +envoy_server_stats_recent_lookups{} 1763 +# TYPE envoy_server_total_connections gauge +envoy_server_total_connections{} 0 +# TYPE envoy_server_uptime gauge +envoy_server_uptime{} 6225 +# TYPE envoy_server_version gauge +envoy_server_version{} 9993205 +# TYPE envoy_cluster_upstream_cx_connect_ms histogram +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="0.5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="25"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="50"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="100"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="250"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="500"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="2500"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="30000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="60000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="300000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="600000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1800000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="3600000"} 0 +envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="+Inf"} 0 +envoy_cluster_upstream_cx_connect_ms_sum{envoy_cluster_name="service_envoyproxy_io"} 0 +envoy_cluster_upstream_cx_connect_ms_count{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_cluster_upstream_cx_length_ms histogram +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="0.5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="25"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="50"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="100"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="250"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="2500"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="30000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="60000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="300000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1800000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="3600000"} 0 +envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="+Inf"} 0 +envoy_cluster_upstream_cx_length_ms_sum{envoy_cluster_name="service_envoyproxy_io"} 0 +envoy_cluster_upstream_cx_length_ms_count{envoy_cluster_name="service_envoyproxy_io"} 0 +# TYPE envoy_http_downstream_cx_length_ms histogram +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="0.5"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="5"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="10"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="25"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="50"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="100"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="250"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="500"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1000"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="2500"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="5000"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="10000"} 3 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="30000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="60000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="300000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="600000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1800000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="3600000"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="+Inf"} 4 +envoy_http_downstream_cx_length_ms_sum{envoy_http_conn_manager_prefix="admin"} 17506.150000000001455191522836685 +envoy_http_downstream_cx_length_ms_count{envoy_http_conn_manager_prefix="admin"} 4 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="0.5"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="25"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="50"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="100"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="250"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="500"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="2500"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="30000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="60000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="300000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="600000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1800000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="3600000"} 0 +envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="+Inf"} 0 +envoy_http_downstream_cx_length_ms_sum{envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_cx_length_ms_count{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_http_downstream_rq_time histogram +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="0.5"} 2 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1"} 2 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="5"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="10"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="25"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="50"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="100"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="250"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="500"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="2500"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="5000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="10000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="30000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="60000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="300000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="600000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1800000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="3600000"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="+Inf"} 5 +envoy_http_downstream_rq_time_sum{envoy_http_conn_manager_prefix="admin"} 3.1500000000000003552713678800501 +envoy_http_downstream_rq_time_count{envoy_http_conn_manager_prefix="admin"} 5 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="0.5"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="25"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="50"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="100"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="250"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="500"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="2500"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="30000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="60000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="300000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="600000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1800000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="3600000"} 0 +envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="+Inf"} 0 +envoy_http_downstream_rq_time_sum{envoy_http_conn_manager_prefix="ingress_http"} 0 +envoy_http_downstream_rq_time_count{envoy_http_conn_manager_prefix="ingress_http"} 0 +# TYPE envoy_listener_admin_downstream_cx_length_ms histogram +envoy_listener_admin_downstream_cx_length_ms_bucket{le="0.5"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="1"} 0 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="5"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="10"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="25"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="50"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="100"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="250"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="500"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="1000"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="2500"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="5000"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="10000"} 3 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="30000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="60000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="300000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="600000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="1800000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="3600000"} 4 +envoy_listener_admin_downstream_cx_length_ms_bucket{le="+Inf"} 4 +envoy_listener_admin_downstream_cx_length_ms_sum{} 17506.150000000001455191522836685 +envoy_listener_admin_downstream_cx_length_ms_count{} 4 +# TYPE envoy_listener_downstream_cx_length_ms histogram +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="0.5"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="5"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="10"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="25"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="50"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="100"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="250"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="500"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="2500"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="5000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="10000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="30000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="60000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="300000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="600000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1800000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="3600000"} 0 +envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="+Inf"} 0 +envoy_listener_downstream_cx_length_ms_sum{envoy_listener_address="0.0.0.0_10000"} 0 +envoy_listener_downstream_cx_length_ms_count{envoy_listener_address="0.0.0.0_10000"} 0 +# TYPE envoy_server_initialization_time_ms histogram +envoy_server_initialization_time_ms_bucket{le="0.5"} 0 +envoy_server_initialization_time_ms_bucket{le="1"} 0 +envoy_server_initialization_time_ms_bucket{le="5"} 0 +envoy_server_initialization_time_ms_bucket{le="10"} 0 +envoy_server_initialization_time_ms_bucket{le="25"} 0 +envoy_server_initialization_time_ms_bucket{le="50"} 0 +envoy_server_initialization_time_ms_bucket{le="100"} 1 +envoy_server_initialization_time_ms_bucket{le="250"} 1 +envoy_server_initialization_time_ms_bucket{le="500"} 1 +envoy_server_initialization_time_ms_bucket{le="1000"} 1 +envoy_server_initialization_time_ms_bucket{le="2500"} 1 +envoy_server_initialization_time_ms_bucket{le="5000"} 1 +envoy_server_initialization_time_ms_bucket{le="10000"} 1 +envoy_server_initialization_time_ms_bucket{le="30000"} 1 +envoy_server_initialization_time_ms_bucket{le="60000"} 1 +envoy_server_initialization_time_ms_bucket{le="300000"} 1 +envoy_server_initialization_time_ms_bucket{le="600000"} 1 +envoy_server_initialization_time_ms_bucket{le="1800000"} 1 +envoy_server_initialization_time_ms_bucket{le="3600000"} 1 +envoy_server_initialization_time_ms_bucket{le="+Inf"} 1 +envoy_server_initialization_time_ms_sum{} 76.5 +envoy_server_initialization_time_ms_count{} 1 diff --git a/src/go/collectors/go.d.plugin/modules/example/README.md b/src/go/collectors/go.d.plugin/modules/example/README.md new file mode 100644 index 00000000000000..f6c80ea62ad713 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/README.md @@ -0,0 +1,80 @@ +<!-- +title: "Example module" +description: "Use this example data collection module, which produces example charts with random values, to better understand how to build your own collector in Go." +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/example/README.md" +sidebar_label: "Example module in Go" +learn_status: "Published" +learn_topic_type: "References" +learn_rel_path: "Integrations/Monitor/Mock Collectors" +--> + +# Example module + +An example data collection module. Use it as an example writing a new module. + +## Charts + +This module produces example charts with random values. Number of charts, dimensions and chart type is configurable. + +## Configuration + +Edit the `go.d/example.conf` configuration file using `edit-config` from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory +sudo ./edit-config go.d/example.conf +``` + +Disabled by default. Should be explicitly enabled +in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf). + +```yaml +# go.d.conf +modules: + example: yes +``` + +Here is an example configuration with several jobs: + +```yaml +jobs: + - name: example + charts: + num: 3 + dimensions: 5 + + - name: hidden_example + hidden_charts: + num: 3 + dimensions: 5 +``` + +--- + +For all available options, see the Example +collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/example.conf). + +## Troubleshooting + +To troubleshoot issues with the `example` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m example + ``` diff --git a/src/go/collectors/go.d.plugin/modules/example/charts.go b/src/go/collectors/go.d.plugin/modules/example/charts.go new file mode 100644 index 00000000000000..85604d13a351bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/charts.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package example + +import ( + "fmt" + "github.com/netdata/go.d.plugin/agent/module" +) + +var chartTemplate = module.Chart{ + ID: "random_%d", + Title: "A Random Number", + Units: "random", + Fam: "random", + Ctx: "example.random", +} + +var hiddenChartTemplate = module.Chart{ + ID: "hidden_random_%d", + Title: "A Random Number", + Units: "random", + Fam: "random", + Ctx: "example.random", + Opts: module.Opts{ + Hidden: true, + }, +} + +func newChart(num, ctx, labels int, typ module.ChartType) *module.Chart { + chart := chartTemplate.Copy() + chart.ID = fmt.Sprintf(chart.ID, num) + chart.Type = typ + if ctx > 0 { + chart.Ctx += fmt.Sprintf("_%d", ctx) + } + for i := 0; i < labels; i++ { + chart.Labels = append(chart.Labels, module.Label{ + Key: fmt.Sprintf("example_name_%d", i), + Value: fmt.Sprintf("example_value_%d_%d", num, i), + }) + } + return chart +} + +func newHiddenChart(num, ctx, labels int, typ module.ChartType) *module.Chart { + chart := hiddenChartTemplate.Copy() + chart.ID = fmt.Sprintf(chart.ID, num) + chart.Type = typ + if ctx > 0 { + chart.Ctx += fmt.Sprintf("_%d", ctx) + } + for i := 0; i < labels; i++ { + chart.Labels = append(chart.Labels, module.Label{ + Key: fmt.Sprintf("example_name_%d", i), + Value: fmt.Sprintf("example_value_%d_%d", num, i), + }) + } + return chart +} diff --git a/src/go/collectors/go.d.plugin/modules/example/collect.go b/src/go/collectors/go.d.plugin/modules/example/collect.go new file mode 100644 index 00000000000000..b0a9b9d7bb626e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/collect.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package example + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (e *Example) collect() (map[string]int64, error) { + collected := make(map[string]int64) + + for _, chart := range *e.Charts() { + e.collectChart(collected, chart) + } + return collected, nil +} + +func (e *Example) collectChart(collected map[string]int64, chart *module.Chart) { + var num int + if chart.Opts.Hidden { + num = e.Config.HiddenCharts.Dims + } else { + num = e.Config.Charts.Dims + } + + for i := 0; i < num; i++ { + name := fmt.Sprintf("random%d", i) + id := fmt.Sprintf("%s_%s", chart.ID, name) + + if !e.collectedDims[id] { + e.collectedDims[id] = true + + dim := &module.Dim{ID: id, Name: name} + if err := chart.AddDim(dim); err != nil { + e.Warning(err) + } + chart.MarkNotCreated() + } + if i%2 == 0 { + collected[id] = e.randInt() + } else { + collected[id] = -e.randInt() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/example/config_schema.json b/src/go/collectors/go.d.plugin/modules/example/config_schema.json new file mode 100644 index 00000000000000..852b39b1cbdaed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/config_schema.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/example job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer" + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer" + }, + "labels": { + "type": "integer" + } + }, + "required": [ + "type", + "num", + "contexts", + "dimensions", + "labels" + ] + }, + "hidden_charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer" + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer" + }, + "labels": { + "type": "integer" + } + }, + "required": [ + "type", + "num", + "contexts", + "dimensions", + "labels" + ] + } + }, + "required": [ + "name", + "charts" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/example/example.go b/src/go/collectors/go.d.plugin/modules/example/example.go new file mode 100644 index 00000000000000..fe24bcc3e4c0ce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/example.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package example + +import ( + _ "embed" + "math/rand" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("example", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: module.UpdateEvery, + AutoDetectionRetry: module.AutoDetectionRetry, + Priority: module.Priority, + Disabled: true, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Example { + return &Example{ + Config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 4, + }, + HiddenCharts: ConfigCharts{ + Num: 0, + Dims: 4, + }, + }, + + randInt: func() int64 { return rand.Int63n(100) }, + collectedDims: make(map[string]bool), + } +} + +type ( + Config struct { + Charts ConfigCharts `yaml:"charts"` + HiddenCharts ConfigCharts `yaml:"hidden_charts"` + } + ConfigCharts struct { + Type string `yaml:"type"` + Num int `yaml:"num"` + Contexts int `yaml:"contexts"` + Dims int `yaml:"dimensions"` + Labels int `yaml:"labels"` + } +) + +type Example struct { + module.Base // should be embedded by every module + Config `yaml:",inline"` + + randInt func() int64 + charts *module.Charts + collectedDims map[string]bool +} + +func (e *Example) Init() bool { + err := e.validateConfig() + if err != nil { + e.Errorf("config validation: %v", err) + return false + } + + charts, err := e.initCharts() + if err != nil { + e.Errorf("charts init: %v", err) + return false + } + e.charts = charts + return true +} + +func (e *Example) Check() bool { + return len(e.Collect()) > 0 +} + +func (e *Example) Charts() *module.Charts { + return e.charts +} + +func (e *Example) Collect() map[string]int64 { + mx, err := e.collect() + if err != nil { + e.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (e *Example) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/example/example_test.go b/src/go/collectors/go.d.plugin/modules/example/example_test.go new file mode 100644 index 00000000000000..47cc51a2f50fc6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/example_test.go @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package example + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + // We want to ensure that module is a reference type, nothing more. + + assert.IsType(t, (*Example)(nil), New()) +} + +func TestExample_Init(t *testing.T) { + // 'Init() bool' initializes the module with an appropriate config, so to test it we need: + // - provide the config. + // - set module.Config field with the config. + // - call Init() and compare its return value with the expected value. + + // 'test' map contains different test cases. + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "success when only 'charts' set": { + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "success when only 'hidden_charts' set": { + config: Config{ + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "success when 'charts' and 'hidden_charts' set": { + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 2, + }, + }, + }, + "fails when 'charts' and 'hidden_charts' set, but 'num' == 0": { + wantFail: true, + config: Config{ + Charts: ConfigCharts{ + Num: 0, + Dims: 2, + }, + HiddenCharts: ConfigCharts{ + Num: 0, + Dims: 2, + }, + }, + }, + "fails when only 'charts' set, 'num' > 0, but 'dimensions' == 0": { + wantFail: true, + config: Config{ + Charts: ConfigCharts{ + Num: 1, + Dims: 0, + }, + }, + }, + "fails when only 'hidden_charts' set, 'num' > 0, but 'dimensions' == 0": { + wantFail: true, + config: Config{ + HiddenCharts: ConfigCharts{ + Num: 1, + Dims: 0, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + example := New() + example.Config = test.config + + if test.wantFail { + assert.False(t, example.Init()) + } else { + assert.True(t, example.Init()) + } + }) + } +} + +func TestExample_Check(t *testing.T) { + // 'Check() bool' reports whether the module is able to collect any data, so to test it we need: + // - provide the module with a specific config. + // - initialize the module (call Init()). + // - call Check() and compare its return value with the expected value. + + // 'test' map contains different test cases. + tests := map[string]struct { + prepare func() *Example + wantFail bool + }{ + "success on default": {prepare: prepareExampleDefault}, + "success when only 'charts' set": {prepare: prepareExampleOnlyCharts}, + "success when only 'hidden_charts' set": {prepare: prepareExampleOnlyHiddenCharts}, + "success when 'charts' and 'hidden_charts' set": {prepare: prepareExampleChartsAndHiddenCharts}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + example := test.prepare() + require.True(t, example.Init()) + + if test.wantFail { + assert.False(t, example.Check()) + } else { + assert.True(t, example.Check()) + } + }) + } +} + +func TestExample_Charts(t *testing.T) { + // We want to ensure that initialized module does not return 'nil'. + // If it is not 'nil' we are ok. + + // 'test' map contains different test cases. + tests := map[string]struct { + prepare func(t *testing.T) *Example + wantNil bool + }{ + "not initialized collector": { + wantNil: true, + prepare: func(t *testing.T) *Example { + return New() + }, + }, + "initialized collector": { + prepare: func(t *testing.T) *Example { + example := New() + require.True(t, example.Init()) + return example + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + example := test.prepare(t) + + if test.wantNil { + assert.Nil(t, example.Charts()) + } else { + assert.NotNil(t, example.Charts()) + } + }) + } +} + +func TestExample_Cleanup(t *testing.T) { + // Since this module has nothing to clean up, + // we want just to ensure that Cleanup() not panics. + + assert.NotPanics(t, New().Cleanup) +} + +func TestExample_Collect(t *testing.T) { + // 'Collect() map[string]int64' returns collected data, so to test it we need: + // - provide the module with a specific config. + // - initialize the module (call Init()). + // - call Collect() and compare its return value with the expected value. + + // 'test' map contains different test cases. + tests := map[string]struct { + prepare func() *Example + wantCollected map[string]int64 + }{ + "default config": { + prepare: prepareExampleDefault, + wantCollected: map[string]int64{ + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + }, + }, + "only 'charts' set": { + prepare: prepareExampleOnlyCharts, + wantCollected: map[string]int64{ + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + "random_0_random4": 1, + "random_1_random0": 1, + "random_1_random1": -1, + "random_1_random2": 1, + "random_1_random3": -1, + "random_1_random4": 1, + }, + }, + "only 'hidden_charts' set": { + prepare: prepareExampleOnlyHiddenCharts, + wantCollected: map[string]int64{ + "hidden_random_0_random0": 1, + "hidden_random_0_random1": -1, + "hidden_random_0_random2": 1, + "hidden_random_0_random3": -1, + "hidden_random_0_random4": 1, + "hidden_random_1_random0": 1, + "hidden_random_1_random1": -1, + "hidden_random_1_random2": 1, + "hidden_random_1_random3": -1, + "hidden_random_1_random4": 1, + }, + }, + "'charts' and 'hidden_charts' set": { + prepare: prepareExampleChartsAndHiddenCharts, + wantCollected: map[string]int64{ + "hidden_random_0_random0": 1, + "hidden_random_0_random1": -1, + "hidden_random_0_random2": 1, + "hidden_random_0_random3": -1, + "hidden_random_0_random4": 1, + "hidden_random_1_random0": 1, + "hidden_random_1_random1": -1, + "hidden_random_1_random2": 1, + "hidden_random_1_random3": -1, + "hidden_random_1_random4": 1, + "random_0_random0": 1, + "random_0_random1": -1, + "random_0_random2": 1, + "random_0_random3": -1, + "random_0_random4": 1, + "random_1_random0": 1, + "random_1_random1": -1, + "random_1_random2": 1, + "random_1_random3": -1, + "random_1_random4": 1, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + example := test.prepare() + require.True(t, example.Init()) + + collected := example.Collect() + + assert.Equal(t, test.wantCollected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, example, collected) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, e *Example, collected map[string]int64) { + for _, chart := range *e.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, + "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, + "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareExampleDefault() *Example { + return prepareExample(New().Config) +} + +func prepareExampleOnlyCharts() *Example { + return prepareExample(Config{ + Charts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareExampleOnlyHiddenCharts() *Example { + return prepareExample(Config{ + HiddenCharts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareExampleChartsAndHiddenCharts() *Example { + return prepareExample(Config{ + Charts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + HiddenCharts: ConfigCharts{ + Num: 2, + Dims: 5, + }, + }) +} + +func prepareExample(cfg Config) *Example { + example := New() + example.Config = cfg + example.randInt = func() int64 { return 1 } + return example +} diff --git a/src/go/collectors/go.d.plugin/modules/example/init.go b/src/go/collectors/go.d.plugin/modules/example/init.go new file mode 100644 index 00000000000000..fdfbcd80723ca8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/example/init.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package example + +import ( + "errors" + "github.com/netdata/go.d.plugin/agent/module" +) + +func (e *Example) validateConfig() error { + if e.Config.Charts.Num <= 0 && e.Config.HiddenCharts.Num <= 0 { + return errors.New("'charts->num' or `hidden_charts->num` must be > 0") + } + if e.Config.Charts.Num > 0 && e.Config.Charts.Dims <= 0 { + return errors.New("'charts->dimensions' must be > 0") + } + if e.Config.HiddenCharts.Num > 0 && e.Config.HiddenCharts.Dims <= 0 { + return errors.New("'hidden_charts->dimensions' must be > 0") + } + return nil +} + +func (e *Example) initCharts() (*module.Charts, error) { + charts := &module.Charts{} + + var ctx int + v := calcContextEvery(e.Config.Charts.Num, e.Config.Charts.Contexts) + for i := 0; i < e.Config.Charts.Num; i++ { + if i != 0 && v != 0 && ctx < (e.Config.Charts.Contexts-1) && i%v == 0 { + ctx++ + } + chart := newChart(i, ctx, e.Config.Charts.Labels, module.ChartType(e.Config.Charts.Type)) + + if err := charts.Add(chart); err != nil { + return nil, err + } + } + + ctx = 0 + v = calcContextEvery(e.Config.HiddenCharts.Num, e.Config.HiddenCharts.Contexts) + for i := 0; i < e.Config.HiddenCharts.Num; i++ { + if i != 0 && v != 0 && ctx < (e.Config.HiddenCharts.Contexts-1) && i%v == 0 { + ctx++ + } + chart := newHiddenChart(i, ctx, e.Config.HiddenCharts.Labels, module.ChartType(e.Config.HiddenCharts.Type)) + + if err := charts.Add(chart); err != nil { + return nil, err + } + } + + return charts, nil +} + +func calcContextEvery(charts, contexts int) int { + if contexts <= 1 { + return 0 + } + if contexts > charts { + return 1 + } + return charts / contexts +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/README.md b/src/go/collectors/go.d.plugin/modules/filecheck/README.md new file mode 120000 index 00000000000000..24dc78d8d60b88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/README.md @@ -0,0 +1 @@ +integrations/files_and_directories.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/charts.go b/src/go/collectors/go.d.plugin/modules/filecheck/charts.go new file mode 100644 index 00000000000000..c00f1fda100e0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/charts.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import "github.com/netdata/go.d.plugin/agent/module" + +var ( + fileCharts = module.Charts{ + fileExistenceChart.Copy(), + fileModTimeAgoChart.Copy(), + fileSizeChart.Copy(), + } + + fileExistenceChart = module.Chart{ + ID: "file_existence", + Title: "File Existence (0: not exists, 1: exists)", + Units: "boolean", + Fam: "files", + Ctx: "filecheck.file_existence", + Vars: module.Vars{ + {ID: "num_of_files"}, + }, + } + fileModTimeAgoChart = module.Chart{ + ID: "file_mtime_ago", + Title: "File Time Since the Last Modification", + Units: "seconds", + Fam: "files", + Ctx: "filecheck.file_mtime_ago", + } + fileSizeChart = module.Chart{ + ID: "file_size", + Title: "File Size", + Units: "bytes", + Fam: "files", + Ctx: "filecheck.file_size", + } +) + +var ( + dirCharts = module.Charts{ + dirExistenceChart.Copy(), + dirModTimeChart.Copy(), + dirNumOfFilesChart.Copy(), + dirSizeChart.Copy(), + } + + dirExistenceChart = module.Chart{ + ID: "dir_existence", + Title: "Dir Existence (0: not exists, 1: exists)", + Units: "boolean", + Fam: "dirs", + Ctx: "filecheck.dir_existence", + Vars: module.Vars{ + {ID: "num_of_dirs"}, + }, + } + dirModTimeChart = module.Chart{ + ID: "dir_mtime_ago", + Title: "Dir Time Since the Last Modification", + Units: "seconds", + Fam: "dirs", + Ctx: "filecheck.dir_mtime_ago", + } + dirNumOfFilesChart = module.Chart{ + ID: "dir_num_of_files", + Title: "Dir Number of Files", + Units: "files", + Fam: "dirs", + Ctx: "filecheck.dir_num_of_files", + } + dirSizeChart = module.Chart{ + ID: "dir_size", + Title: "Dir Size", + Units: "bytes", + Fam: "dirs", + Ctx: "filecheck.dir_size", + } +) diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/collect.go b/src/go/collectors/go.d.plugin/modules/filecheck/collect.go new file mode 100644 index 00000000000000..921846a7581a14 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/collect.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + "regexp" + "runtime" + "strings" +) + +func (fc *Filecheck) collect() (map[string]int64, error) { + ms := make(map[string]int64) + + fc.collectFiles(ms) + fc.collectDirs(ms) + + return ms, nil +} + +func hasMeta(path string) bool { + magicChars := `*?[` + if runtime.GOOS != "windows" { + magicChars = `*?[\` + } + return strings.ContainsAny(path, magicChars) +} + +func removeDuplicates(s []string) []string { + set := make(map[string]bool, len(s)) + uniq := s[:0] + for _, v := range s { + if !set[v] { + set[v] = true + uniq = append(uniq, v) + } + } + return uniq +} + +var reSpace = regexp.MustCompile(`\s`) diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/collect_dirs.go b/src/go/collectors/go.d.plugin/modules/filecheck/collect_dirs.go new file mode 100644 index 00000000000000..32861c0e0263e3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/collect_dirs.go @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (fc *Filecheck) collectDirs(ms map[string]int64) { + curTime := time.Now() + if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration { + fc.lastDiscoveryDirs = curTime + fc.curDirs = fc.discoveryDirs() + fc.updateDirsCharts(fc.curDirs) + } + + for _, path := range fc.curDirs { + fc.collectDir(ms, path, curTime) + } + ms["num_of_dirs"] = int64(len(fc.curDirs)) +} + +func (fc *Filecheck) collectDir(ms map[string]int64, path string, curTime time.Time) { + info, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + ms[dirDimID(path, "exists")] = 0 + } else { + ms[dirDimID(path, "exists")] = 1 + } + fc.Debug(err) + return + } + + if !info.IsDir() { + return + } + + ms[dirDimID(path, "exists")] = 1 + ms[dirDimID(path, "mtime_ago")] = int64(curTime.Sub(info.ModTime()).Seconds()) + if num, err := calcDirNumOfFiles(path); err == nil { + ms[dirDimID(path, "num_of_files")] = int64(num) + } + if fc.Dirs.CollectDirSize { + if size, err := calcDirSize(path); err == nil { + ms[dirDimID(path, "size_bytes")] = size + } + } +} + +func (fc Filecheck) discoveryDirs() (dirs []string) { + for _, path := range fc.Dirs.Include { + if hasMeta(path) { + continue + } + dirs = append(dirs, path) + } + + for _, path := range fc.Dirs.Include { + if !hasMeta(path) { + continue + } + matches, _ := filepath.Glob(path) + for _, v := range matches { + fi, err := os.Lstat(v) + if err == nil && fi.IsDir() { + dirs = append(dirs, v) + } + } + } + return removeDuplicates(dirs) +} + +func (fc *Filecheck) updateDirsCharts(dirs []string) { + set := make(map[string]bool, len(dirs)) + for _, path := range dirs { + set[path] = true + if !fc.collectedDirs[path] { + fc.collectedDirs[path] = true + fc.addDirToCharts(path) + } + } + for path := range fc.collectedDirs { + if !set[path] { + delete(fc.collectedDirs, path) + fc.removeDirFromCharts(path) + } + } +} + +func (fc *Filecheck) addDirToCharts(path string) { + for _, chart := range *fc.Charts() { + if !strings.HasPrefix(chart.ID, "dir_") { + continue + } + + var id string + switch chart.ID { + case dirExistenceChart.ID: + id = dirDimID(path, "exists") + case dirModTimeChart.ID: + id = dirDimID(path, "mtime_ago") + case dirNumOfFilesChart.ID: + id = dirDimID(path, "num_of_files") + case dirSizeChart.ID: + id = dirDimID(path, "size_bytes") + default: + fc.Warningf("add dimension: couldn't dim id for '%s' chart (dir '%s')", chart.ID, path) + continue + } + + dim := &module.Dim{ID: id, Name: reSpace.ReplaceAllString(path, "_")} + + if err := chart.AddDim(dim); err != nil { + fc.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func (fc *Filecheck) removeDirFromCharts(path string) { + for _, chart := range *fc.Charts() { + if !strings.HasPrefix(chart.ID, "dir_") { + continue + } + + var id string + switch chart.ID { + case dirExistenceChart.ID: + id = dirDimID(path, "exists") + case dirModTimeChart.ID: + id = dirDimID(path, "mtime_ago") + case dirNumOfFilesChart.ID: + id = dirDimID(path, "num_of_files") + case dirSizeChart.ID: + id = dirDimID(path, "size_bytes") + default: + fc.Warningf("remove dimension: couldn't dim id for '%s' chart (dir '%s')", chart.ID, path) + continue + } + + if err := chart.MarkDimRemove(id, true); err != nil { + fc.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func dirDimID(path, metric string) string { + return fmt.Sprintf("dir_%s_%s", reSpace.ReplaceAllString(path, "_"), metric) +} + +func calcDirNumOfFiles(dirpath string) (int, error) { + f, err := os.Open(dirpath) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + // TODO: include dirs? + names, err := f.Readdirnames(-1) + return len(names), err +} + +func calcDirSize(dirpath string) (int64, error) { + var size int64 + err := filepath.Walk(dirpath, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return nil + }) + return size, err +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/collect_files.go b/src/go/collectors/go.d.plugin/modules/filecheck/collect_files.go new file mode 100644 index 00000000000000..25568473f5dff5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/collect_files.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (fc *Filecheck) collectFiles(ms map[string]int64) { + curTime := time.Now() + if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration { + fc.lastDiscoveryFiles = curTime + fc.curFiles = fc.discoveryFiles() + fc.updateFilesCharts(fc.curFiles) + } + + for _, path := range fc.curFiles { + fc.collectFile(ms, path, curTime) + } + ms["num_of_files"] = int64(len(fc.curFiles)) +} + +func (fc *Filecheck) collectFile(ms map[string]int64, path string, curTime time.Time) { + info, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + ms[fileDimID(path, "exists")] = 0 + } else { + ms[fileDimID(path, "exists")] = 1 + } + fc.Debug(err) + return + } + + if info.IsDir() { + return + } + + ms[fileDimID(path, "exists")] = 1 + ms[fileDimID(path, "size_bytes")] = info.Size() + ms[fileDimID(path, "mtime_ago")] = int64(curTime.Sub(info.ModTime()).Seconds()) +} + +func (fc Filecheck) discoveryFiles() (files []string) { + for _, path := range fc.Files.Include { + if hasMeta(path) { + continue + } + files = append(files, path) + } + + for _, path := range fc.Files.Include { + if !hasMeta(path) { + continue + } + matches, _ := filepath.Glob(path) + for _, v := range matches { + fi, err := os.Lstat(v) + if err == nil && fi.Mode().IsRegular() { + files = append(files, v) + } + } + } + return removeDuplicates(files) +} + +func (fc *Filecheck) updateFilesCharts(files []string) { + set := make(map[string]bool, len(files)) + for _, path := range files { + set[path] = true + if !fc.collectedFiles[path] { + fc.collectedFiles[path] = true + fc.addFileToCharts(path) + } + } + for path := range fc.collectedFiles { + if !set[path] { + delete(fc.collectedFiles, path) + fc.removeFileFromCharts(path) + } + } +} + +func (fc *Filecheck) addFileToCharts(path string) { + for _, chart := range *fc.Charts() { + if !strings.HasPrefix(chart.ID, "file_") { + continue + } + + var id string + switch chart.ID { + case fileExistenceChart.ID: + id = fileDimID(path, "exists") + case fileModTimeAgoChart.ID: + id = fileDimID(path, "mtime_ago") + case fileSizeChart.ID: + id = fileDimID(path, "size_bytes") + default: + fc.Warningf("add dimension: couldn't dim id for '%s' chart (file '%s')", chart.ID, path) + continue + } + + dim := &module.Dim{ID: id, Name: reSpace.ReplaceAllString(path, "_")} + + if err := chart.AddDim(dim); err != nil { + fc.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func (fc *Filecheck) removeFileFromCharts(path string) { + for _, chart := range *fc.Charts() { + if !strings.HasPrefix(chart.ID, "file_") { + continue + } + + var id string + switch chart.ID { + case fileExistenceChart.ID: + id = fileDimID(path, "exists") + case fileModTimeAgoChart.ID: + id = fileDimID(path, "mtime_ago") + case fileSizeChart.ID: + id = fileDimID(path, "size_bytes") + default: + fc.Warningf("remove dimension: couldn't dim id for '%s' chart (file '%s')", chart.ID, path) + continue + } + + if err := chart.MarkDimRemove(id, true); err != nil { + fc.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func fileDimID(path, metric string) string { + return fmt.Sprintf("file_%s_%s", reSpace.ReplaceAllString(path, "_"), metric) +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/config_schema.json b/src/go/collectors/go.d.plugin/modules/filecheck/config_schema.json new file mode 100644 index 00000000000000..a6b0efca96892a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/config_schema.json @@ -0,0 +1,75 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/filecheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "discovery_every": { + "type": [ + "string", + "integer" + ] + }, + "files": { + "type": "object", + "properties": { + "include": { + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "include", + "exclude" + ] + }, + "dirs": { + "type": "object", + "properties": { + "include": { + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "type": "array", + "items": { + "type": "string" + } + }, + "collect_dir_size": { + "type": "boolean" + } + }, + "required": [ + "include", + "exclude" + ] + } + }, + "oneOf": [ + { + "required": [ + "name", + "files" + ] + }, + { + "required": [ + "name", + "dirs" + ] + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/filecheck.go b/src/go/collectors/go.d.plugin/modules/filecheck/filecheck.go new file mode 100644 index 00000000000000..e1369bc1c3d070 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/filecheck.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("filecheck", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Filecheck { + return &Filecheck{ + Config: Config{ + DiscoveryEvery: web.Duration{Duration: time.Second * 30}, + Files: filesConfig{}, + Dirs: dirsConfig{ + CollectDirSize: true, + }, + }, + collectedFiles: make(map[string]bool), + collectedDirs: make(map[string]bool), + } +} + +type ( + Config struct { + DiscoveryEvery web.Duration `yaml:"discovery_every"` + Files filesConfig `yaml:"files"` + Dirs dirsConfig `yaml:"dirs"` + } + filesConfig struct { + Include []string `yaml:"include"` + Exclude []string `yaml:"exclude"` + } + dirsConfig struct { + Include []string `yaml:"include"` + Exclude []string `yaml:"exclude"` + CollectDirSize bool `yaml:"collect_dir_size"` + } +) + +type Filecheck struct { + module.Base + Config `yaml:",inline"` + + lastDiscoveryFiles time.Time + curFiles []string + collectedFiles map[string]bool + + lastDiscoveryDirs time.Time + curDirs []string + collectedDirs map[string]bool + + charts *module.Charts +} + +func (Filecheck) Cleanup() { +} + +func (fc *Filecheck) Init() bool { + err := fc.validateConfig() + if err != nil { + fc.Errorf("error on validating config: %v", err) + return false + } + + charts, err := fc.initCharts() + if err != nil { + fc.Errorf("error on charts initialization: %v", err) + return false + } + fc.charts = charts + + fc.Debugf("monitored files: %v", fc.Files.Include) + fc.Debugf("monitored dirs: %v", fc.Dirs.Include) + return true +} + +func (fc Filecheck) Check() bool { + return true +} + +func (fc *Filecheck) Charts() *module.Charts { + return fc.charts +} + +func (fc *Filecheck) Collect() map[string]int64 { + ms, err := fc.collect() + if err != nil { + fc.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/filecheck_test.go b/src/go/collectors/go.d.plugin/modules/filecheck/filecheck_test.go new file mode 100644 index 00000000000000..5024f646032119 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/filecheck_test.go @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + "strings" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestFilecheck_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestFilecheck_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantNumOfCharts int + wantFail bool + }{ + "default": { + config: New().Config, + wantFail: true, + }, + "empty files->include and dirs->include": { + config: Config{ + Files: filesConfig{}, + Dirs: dirsConfig{}, + }, + wantFail: true, + }, + "files->include and dirs->include": { + config: Config{ + Files: filesConfig{ + Include: []string{ + "/path/to/file1", + "/path/to/file2", + }, + }, + Dirs: dirsConfig{ + Include: []string{ + "/path/to/dir1", + "/path/to/dir2", + }, + CollectDirSize: true, + }, + }, + wantNumOfCharts: len(fileCharts) + len(dirCharts), + }, + "only files->include": { + config: Config{ + Files: filesConfig{ + Include: []string{ + "/path/to/file1", + "/path/to/file2", + }, + }, + }, + wantNumOfCharts: len(fileCharts), + }, + "only dirs->include": { + config: Config{ + Dirs: dirsConfig{ + Include: []string{ + "/path/to/dir1", + "/path/to/dir2", + }, + CollectDirSize: true, + }, + }, + wantNumOfCharts: len(dirCharts), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + fc := New() + fc.Config = test.config + + if test.wantFail { + assert.False(t, fc.Init()) + } else { + require.True(t, fc.Init()) + assert.Equal(t, test.wantNumOfCharts, len(*fc.Charts())) + } + }) + } +} + +func TestFilecheck_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *Filecheck + }{ + "collect files": {prepare: prepareFilecheckFiles}, + "collect files filepath pattern": {prepare: prepareFilecheckGlobFiles}, + "collect only non existent files": {prepare: prepareFilecheckNonExistentFiles}, + "collect dirs": {prepare: prepareFilecheckDirs}, + "collect dirs filepath pattern": {prepare: prepareFilecheckGlobDirs}, + "collect only non existent dirs": {prepare: prepareFilecheckNonExistentDirs}, + "collect files and dirs": {prepare: prepareFilecheckFilesDirs}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + fc := test.prepare() + require.True(t, fc.Init()) + + assert.True(t, fc.Check()) + }) + } +} + +func TestFilecheck_Collect(t *testing.T) { + // TODO: should use TEMP dir and create files/dirs dynamically during a test case + tests := map[string]struct { + prepare func() *Filecheck + wantCollected map[string]int64 + }{ + "collect files": { + prepare: prepareFilecheckFiles, + wantCollected: map[string]int64{ + "file_testdata/empty_file.log_exists": 1, + "file_testdata/empty_file.log_mtime_ago": 5081, + "file_testdata/empty_file.log_size_bytes": 0, + "file_testdata/file.log_exists": 1, + "file_testdata/file.log_mtime_ago": 4161, + "file_testdata/file.log_size_bytes": 5707, + "file_testdata/non_existent_file.log_exists": 0, + "num_of_files": 3, + "num_of_dirs": 0, + }, + }, + "collect files filepath pattern": { + prepare: prepareFilecheckGlobFiles, + wantCollected: map[string]int64{ + "file_testdata/empty_file.log_exists": 1, + "file_testdata/empty_file.log_mtime_ago": 5081, + "file_testdata/empty_file.log_size_bytes": 0, + "file_testdata/file.log_exists": 1, + "file_testdata/file.log_mtime_ago": 4161, + "file_testdata/file.log_size_bytes": 5707, + "num_of_files": 2, + "num_of_dirs": 0, + }, + }, + "collect only non existent files": { + prepare: prepareFilecheckNonExistentFiles, + wantCollected: map[string]int64{ + "file_testdata/non_existent_file.log_exists": 0, + "num_of_files": 1, + "num_of_dirs": 0, + }, + }, + "collect dirs": { + prepare: prepareFilecheckDirs, + wantCollected: map[string]int64{ + "dir_testdata/dir_exists": 1, + "dir_testdata/dir_mtime_ago": 4087, + "dir_testdata/dir_num_of_files": 3, + "dir_testdata/dir_size_bytes": 8160, + "dir_testdata/non_existent_dir_exists": 0, + "num_of_files": 0, + "num_of_dirs": 2, + }, + }, + "collect dirs filepath pattern": { + prepare: prepareFilecheckGlobDirs, + wantCollected: map[string]int64{ + "dir_testdata/dir_exists": 1, + "dir_testdata/dir_mtime_ago": 4087, + "dir_testdata/dir_num_of_files": 3, + "dir_testdata/dir_size_bytes": 8160, + "dir_testdata/non_existent_dir_exists": 0, + "num_of_files": 0, + "num_of_dirs": 2, + }, + }, + "collect dirs w/o size": { + prepare: prepareFilecheckDirsWithoutSize, + wantCollected: map[string]int64{ + "dir_testdata/dir_exists": 1, + "dir_testdata/dir_mtime_ago": 4087, + "dir_testdata/dir_num_of_files": 3, + "dir_testdata/non_existent_dir_exists": 0, + "num_of_files": 0, + "num_of_dirs": 2, + }, + }, + "collect only non existent dirs": { + prepare: prepareFilecheckNonExistentDirs, + wantCollected: map[string]int64{ + "dir_testdata/non_existent_dir_exists": 0, + "num_of_files": 0, + "num_of_dirs": 1, + }, + }, + "collect files and dirs": { + prepare: prepareFilecheckFilesDirs, + wantCollected: map[string]int64{ + "dir_testdata/dir_exists": 1, + "dir_testdata/dir_mtime_ago": 4120, + "dir_testdata/dir_num_of_files": 3, + "dir_testdata/dir_size_bytes": 8160, + "dir_testdata/non_existent_dir_exists": 0, + "file_testdata/empty_file.log_exists": 1, + "file_testdata/empty_file.log_mtime_ago": 5176, + "file_testdata/empty_file.log_size_bytes": 0, + "file_testdata/file.log_exists": 1, + "file_testdata/file.log_mtime_ago": 4256, + "file_testdata/file.log_size_bytes": 5707, + "file_testdata/non_existent_file.log_exists": 0, + "num_of_files": 3, + "num_of_dirs": 2, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + fc := test.prepare() + require.True(t, fc.Init()) + + collected := fc.Collect() + + copyModTime(test.wantCollected, collected) + assert.Equal(t, test.wantCollected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, fc, collected) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, fc *Filecheck, collected map[string]int64) { + // TODO: check other charts + for _, chart := range *fc.Charts() { + if chart.Obsolete { + continue + } + switch chart.ID { + case fileExistenceChart.ID, dirExistenceChart.ID: + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } + } +} + +func prepareFilecheckFiles() *Filecheck { + fc := New() + fc.Config.Files.Include = []string{ + "testdata/empty_file.log", + "testdata/file.log", + "testdata/non_existent_file.log", + } + return fc +} + +func prepareFilecheckGlobFiles() *Filecheck { + fc := New() + fc.Config.Files.Include = []string{ + "testdata/*.log", + } + return fc +} + +func prepareFilecheckNonExistentFiles() *Filecheck { + fc := New() + fc.Config.Files.Include = []string{ + "testdata/non_existent_file.log", + } + return fc +} + +func prepareFilecheckDirs() *Filecheck { + fc := New() + fc.Config.Dirs.Include = []string{ + "testdata/dir", + "testdata/non_existent_dir", + } + return fc +} + +func prepareFilecheckGlobDirs() *Filecheck { + fc := New() + fc.Config.Dirs.Include = []string{ + "testdata/*ir", + "testdata/non_existent_dir", + } + return fc +} + +func prepareFilecheckDirsWithoutSize() *Filecheck { + fc := New() + fc.Config.Dirs.Include = []string{ + "testdata/dir", + "testdata/non_existent_dir", + } + fc.Config.Dirs.CollectDirSize = false + return fc +} + +func prepareFilecheckNonExistentDirs() *Filecheck { + fc := New() + fc.Config.Dirs.Include = []string{ + "testdata/non_existent_dir", + } + return fc +} + +func prepareFilecheckFilesDirs() *Filecheck { + fc := New() + fc.Config.Files.Include = []string{ + "testdata/empty_file.log", + "testdata/file.log", + "testdata/non_existent_file.log", + } + fc.Config.Dirs.Include = []string{ + "testdata/dir", + "testdata/non_existent_dir", + } + return fc +} + +func copyModTime(dst, src map[string]int64) { + if src == nil || dst == nil { + return + } + for key := range src { + if strings.Contains(key, "mtime") { + dst[key] = src[key] + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/init.go b/src/go/collectors/go.d.plugin/modules/filecheck/init.go new file mode 100644 index 00000000000000..858e3e503bd823 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/init.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package filecheck + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (fc Filecheck) validateConfig() error { + if len(fc.Files.Include) == 0 && len(fc.Dirs.Include) == 0 { + return errors.New("both 'files->include' and 'dirs->include' are empty") + } + return nil +} + +func (fc Filecheck) initCharts() (*module.Charts, error) { + charts := &module.Charts{} + + if len(fc.Files.Include) > 0 { + if err := charts.Add(*fileCharts.Copy()...); err != nil { + return nil, err + } + } + + if len(fc.Dirs.Include) > 0 { + if err := charts.Add(*dirCharts.Copy()...); err != nil { + return nil, err + } + if !fc.Dirs.CollectDirSize { + if err := charts.Remove(dirSizeChart.ID); err != nil { + return nil, err + } + } + } + + if len(*charts) == 0 { + return nil, errors.New("empty charts") + } + return charts, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/integrations/files_and_directories.md b/src/go/collectors/go.d.plugin/modules/filecheck/integrations/files_and_directories.md new file mode 100644 index 00000000000000..4daf317e9b2d6a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/integrations/files_and_directories.md @@ -0,0 +1,226 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/filecheck/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/filecheck/metadata.yaml" +sidebar_label: "Files and directories" +learn_status: "Published" +learn_rel_path: "Data Collection/Linux Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Files and directories + + +<img src="https://netdata.cloud/img/filesystem.svg" width="150"/> + + +Plugin: go.d.plugin +Module: filecheck + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors files and directories. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +This collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Files and directories instance + +TBD + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| filecheck.file_existence | a dimension per file | boolean | +| filecheck.file_mtime_ago | a dimension per file | seconds | +| filecheck.file_size | a dimension per file | bytes | +| filecheck.dir_existence | a dimension per directory | boolean | +| filecheck.dir_mtime_ago | a dimension per directory | seconds | +| filecheck.dir_num_of_files | a dimension per directory | files | +| filecheck.dir_size | a dimension per directory | bytes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/filecheck.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/filecheck.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| files | List of files to monitor. | | yes | +| dirs | List of directories to monitor. | | yes | +| discovery_every | Files and directories discovery interval. | 60 | no | + +##### files + +Files matching the selector will be monitored. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) +- Syntax: + +```yaml +files: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 +``` + + +##### dirs + +Directories matching the selector will be monitored. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) +- Syntax: + +```yaml +dirs: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 +``` + + +</details> + +#### Examples + +##### Files + +Files monitoring example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: files_example + files: + include: + - '/path/to/file1' + - '/path/to/file2' + - '/path/to/*.log' + +``` +</details> + +##### Directories + +Directories monitoring example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: files_example + dirs: + collect_dir_size: no + include: + - '/path/to/dir1' + - '/path/to/dir2' + - '/path/to/dir3*' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m filecheck + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/filecheck/metadata.yaml b/src/go/collectors/go.d.plugin/modules/filecheck/metadata.yaml new file mode 100644 index 00000000000000..d4e78cea15287e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/filecheck/metadata.yaml @@ -0,0 +1,188 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-filecheck + plugin_name: go.d.plugin + module_name: filecheck + monitored_instance: + name: Files and directories + link: "" + icon_filename: filesystem.svg + categories: + - data-collection.linux-systems + keywords: + - files + - directories + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors files and directories. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: | + This collector requires the DAC_READ_SEARCH capability, but it is set automatically during installation, so no manual configuration is needed. + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/filecheck.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: files + description: List of files to monitor. + default_value: "" + required: true + detailed_description: | + Files matching the selector will be monitored. + + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) + - Syntax: + + ```yaml + files: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + - name: dirs + description: List of directories to monitor. + default_value: "" + required: true + detailed_description: | + Directories matching the selector will be monitored. + + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) + - Syntax: + + ```yaml + dirs: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + - name: discovery_every + description: Files and directories discovery interval. + default_value: 60 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Files + description: Files monitoring example configuration. + config: | + jobs: + - name: files_example + files: + include: + - '/path/to/file1' + - '/path/to/file2' + - '/path/to/*.log' + - name: Directories + description: Directories monitoring example configuration. + config: | + jobs: + - name: files_example + dirs: + collect_dir_size: no + include: + - '/path/to/dir1' + - '/path/to/dir2' + - '/path/to/dir3*' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: TBD + labels: [] + metrics: + - name: filecheck.file_existence + description: 'File Existence (0: not exists, 1: exists)' + unit: boolean + chart_type: line + dimensions: + - name: a dimension per file + - name: filecheck.file_mtime_ago + description: File Time Since the Last Modification + unit: seconds + chart_type: line + dimensions: + - name: a dimension per file + - name: filecheck.file_size + description: File Size + unit: bytes + chart_type: line + dimensions: + - name: a dimension per file + - name: filecheck.dir_existence + description: 'Dir Existence (0: not exists, 1: exists)' + unit: boolean + chart_type: line + dimensions: + - name: a dimension per directory + - name: filecheck.dir_mtime_ago + description: Dir Time Since the Last Modification + unit: seconds + chart_type: line + dimensions: + - name: a dimension per directory + - name: filecheck.dir_num_of_files + description: Dir Number of Files + unit: files + chart_type: line + dimensions: + - name: a dimension per directory + - name: filecheck.dir_size + description: Dir Size + unit: bytes + chart_type: line + dimensions: + - name: a dimension per directory diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/README.md b/src/go/collectors/go.d.plugin/modules/fluentd/README.md new file mode 120000 index 00000000000000..96241702f7a421 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/README.md @@ -0,0 +1 @@ +integrations/fluentd.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/apiclient.go b/src/go/collectors/go.d.plugin/modules/fluentd/apiclient.go new file mode 100644 index 00000000000000..01773976a1d841 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/apiclient.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const pluginsPath = "/api/plugins.json" + +type pluginsInfo struct { + Payload []pluginData `json:"plugins"` +} + +type pluginData struct { + ID string `json:"plugin_id"` + Type string `json:"type"` + Category string `json:"plugin_category"` + RetryCount *int64 `json:"retry_count"` + BufferTotalQueuedSize *int64 `json:"buffer_total_queued_size"` + BufferQueueLength *int64 `json:"buffer_queue_length"` +} + +func (p pluginData) hasCategory() bool { + return p.RetryCount != nil +} + +func (p pluginData) hasBufferQueueLength() bool { + return p.BufferQueueLength != nil +} + +func (p pluginData) hasBufferTotalQueuedSize() bool { + return p.BufferTotalQueuedSize != nil +} + +func newAPIClient(client *http.Client, request web.Request) *apiClient { + return &apiClient{httpClient: client, request: request} +} + +type apiClient struct { + httpClient *http.Client + request web.Request +} + +func (a apiClient) getPluginsInfo() (*pluginsInfo, error) { + req, err := a.createRequest(pluginsPath) + if err != nil { + return nil, fmt.Errorf("error on creating request : %v", err) + } + + resp, err := a.doRequestOK(req) + defer closeBody(resp) + if err != nil { + return nil, err + } + + var info pluginsInfo + if err = json.NewDecoder(resp.Body).Decode(&info); err != nil { + return nil, fmt.Errorf("error on decoding response from %s : %v", req.URL, err) + } + + return &info, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error on request: %v", err) + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + return resp, nil +} + +func (a apiClient) createRequest(urlPath string) (*http.Request, error) { + req := a.request.Copy() + u, err := url.Parse(req.URL) + if err != nil { + return nil, err + } + + u.Path = path.Join(u.Path, urlPath) + req.URL = u.String() + return web.NewHTTPRequest(req) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/charts.go b/src/go/collectors/go.d.plugin/modules/fluentd/charts.go new file mode 100644 index 00000000000000..24b8b21aca8dbe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/charts.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dim is an alias for module.Dim + Dim = module.Dim +) + +// TODO: units for buffer charts +var charts = Charts{ + { + ID: "retry_count", + Title: "Plugin Retry Count", + Units: "count", + Fam: "retry count", + Ctx: "fluentd.retry_count", + }, + { + ID: "buffer_queue_length", + Title: "Plugin Buffer Queue Length", + Units: "queue length", + Fam: "buffer", + Ctx: "fluentd.buffer_queue_length", + }, + { + ID: "buffer_total_queued_size", + Title: "Plugin Buffer Total Size", + Units: "buffer total size", + Fam: "buffer", + Ctx: "fluentd.buffer_total_queued_size", + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/config_schema.json b/src/go/collectors/go.d.plugin/modules/fluentd/config_schema.json new file mode 100644 index 00000000000000..f5bfe30477e603 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/config_schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/fluentd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "permit_plugin_id": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/fluentd.go b/src/go/collectors/go.d.plugin/modules/fluentd/fluentd.go new file mode 100644 index 00000000000000..5b627b7b44bf6a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/fluentd.go @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import ( + _ "embed" + "fmt" + "time" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("fluentd", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1:24220" + defaultHTTPTimeout = time.Second * 2 +) + +// New creates Fluentd with default values. +func New() *Fluentd { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }} + + return &Fluentd{ + Config: config, + activePlugins: make(map[string]bool), + charts: charts.Copy(), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + PermitPlugin string `yaml:"permit_plugin_id"` +} + +// Fluentd Fluentd module. +type Fluentd struct { + module.Base + Config `yaml:",inline"` + + permitPlugin matcher.Matcher + apiClient *apiClient + activePlugins map[string]bool + charts *Charts +} + +// Cleanup makes cleanup. +func (Fluentd) Cleanup() {} + +// Init makes initialization. +func (f *Fluentd) Init() bool { + if f.URL == "" { + f.Error("URL not set") + return false + } + + if f.PermitPlugin != "" { + m, err := matcher.NewSimplePatternsMatcher(f.PermitPlugin) + if err != nil { + f.Errorf("error on creating permit_plugin matcher : %v", err) + return false + } + f.permitPlugin = matcher.WithCache(m) + } + + client, err := web.NewHTTPClient(f.Client) + if err != nil { + f.Errorf("error on creating client : %v", err) + return false + } + + f.apiClient = newAPIClient(client, f.Request) + + f.Debugf("using URL %s", f.URL) + f.Debugf("using timeout: %s", f.Timeout.Duration) + + return true +} + +// Check makes check. +func (f Fluentd) Check() bool { return len(f.Collect()) > 0 } + +// Charts creates Charts. +func (f Fluentd) Charts() *Charts { return f.charts } + +// Collect collects metrics. +func (f *Fluentd) Collect() map[string]int64 { + info, err := f.apiClient.getPluginsInfo() + + if err != nil { + f.Error(err) + return nil + } + + metrics := make(map[string]int64) + + for _, p := range info.Payload { + // TODO: if p.Category == "input" ? + if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() { + continue + } + + if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) { + f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category) + continue + } + + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + metrics[id+"_retry_count"] = *p.RetryCount + } + if p.hasBufferQueueLength() { + metrics[id+"_buffer_queue_length"] = *p.BufferQueueLength + } + if p.hasBufferTotalQueuedSize() { + metrics[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize + } + + if !f.activePlugins[id] { + f.activePlugins[id] = true + f.addPluginToCharts(p) + } + + } + + return metrics +} + +func (f *Fluentd) addPluginToCharts(p pluginData) { + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + chart := f.charts.Get("retry_count") + _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferQueueLength() { + chart := f.charts.Get("buffer_queue_length") + _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferTotalQueuedSize() { + chart := f.charts.Get("buffer_total_queued_size") + _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID}) + chart.MarkNotCreated() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/fluentd_test.go b/src/go/collectors/go.d.plugin/modules/fluentd/fluentd_test.go new file mode 100644 index 00000000000000..492e2ebaae47a4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/fluentd_test.go @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testDataPlugins, _ = os.ReadFile("testdata/plugins.json") + +func TestNew(t *testing.T) { + job := New() + assert.IsType(t, (*Fluentd)(nil), job) + assert.NotNil(t, job.charts) + assert.NotNil(t, job.activePlugins) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestFluentd_Init(t *testing.T) { + // OK + job := New() + assert.True(t, job.Init()) + assert.NotNil(t, job.apiClient) + + //NG + job = New() + job.URL = "" + assert.False(t, job.Init()) +} + +func TestFluentd_Check(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testDataPlugins) + })) + defer ts.Close() + + // OK + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + // NG + job = New() + job.URL = "http://127.0.0.1:38001/api/plugins.json" + require.True(t, job.Init()) + require.False(t, job.Check()) +} + +func TestFluentd_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestFluentd_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestFluentd_Collect(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testDataPlugins) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "output_stdout_stdout_output_retry_count": 0, + "output_td_tdlog_output_retry_count": 0, + "output_td_tdlog_output_buffer_queue_length": 0, + "output_td_tdlog_output_buffer_total_queued_size": 0, + } + assert.Equal(t, expected, job.Collect()) + assert.Len(t, job.charts.Get("retry_count").Dims, 2) + assert.Len(t, job.charts.Get("buffer_queue_length").Dims, 1) + assert.Len(t, job.charts.Get("buffer_total_queued_size").Dims, 1) +} + +func TestFluentd_InvalidData(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestFluentd_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/integrations/fluentd.md b/src/go/collectors/go.d.plugin/modules/fluentd/integrations/fluentd.md new file mode 100644 index 00000000000000..fd11351eeb6152 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/integrations/fluentd.md @@ -0,0 +1,221 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/fluentd/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/fluentd/metadata.yaml" +sidebar_label: "Fluentd" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Fluentd + + +<img src="https://netdata.cloud/img/fluentd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: fluentd + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Fluentd servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Fluentd instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| fluentd.retry_count | a dimension per plugin | count | +| fluentd.buffer_queue_length | a dimension per plugin | queue_length | +| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable monitor agent + +To enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/fluentd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/fluentd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:24220 | yes | +| timeout | HTTP request timeout. | 2 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:24220 + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:24220 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Fluentd with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:24220 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:24220 + + - name: remote + url: http://192.0.2.1:24220 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m fluentd + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/metadata.yaml b/src/go/collectors/go.d.plugin/modules/fluentd/metadata.yaml new file mode 100644 index 00000000000000..99e85da1a31048 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/metadata.yaml @@ -0,0 +1,192 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-fluentd + plugin_name: go.d.plugin + module_name: fluentd + monitored_instance: + name: Fluentd + link: https://www.fluentd.org/ + icon_filename: fluentd.svg + categories: + - data-collection.logs-servers + keywords: + - fluentd + - logging + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Fluentd servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable monitor agent + description: | + To enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api). + configuration: + file: + name: go.d/fluentd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:24220 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:24220 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:24220 + username: username + password: password + - name: HTTPS with self-signed certificate + description: Fluentd with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1:24220 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:24220 + + - name: remote + url: http://192.0.2.1:24220 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: fluentd.retry_count + description: Plugin Retry Count + unit: count + chart_type: line + dimensions: + - name: a dimension per plugin + - name: fluentd.buffer_queue_length + description: Plugin Buffer Queue Length + unit: queue_length + chart_type: line + dimensions: + - name: a dimension per plugin + - name: fluentd.buffer_total_queued_size + description: Plugin Buffer Total Size + unit: queued_size + chart_type: line + dimensions: + - name: a dimension per plugin diff --git a/src/go/collectors/go.d.plugin/modules/fluentd/testdata/plugins.json b/src/go/collectors/go.d.plugin/modules/fluentd/testdata/plugins.json new file mode 100644 index 00000000000000..1fd921f7c60497 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/fluentd/testdata/plugins.json @@ -0,0 +1,101 @@ +{ + "plugins": [ + { + "plugin_id": "input_forward", + "plugin_category": "input", + "type": "forward", + "config": { + "@type": "forward", + "@id": "input_forward" + }, + "output_plugin": false, + "retry_count": null + }, + { + "plugin_id": "input_http", + "plugin_category": "input", + "type": "http", + "config": { + "@type": "http", + "@id": "input_http", + "port": "8888" + }, + "output_plugin": false, + "retry_count": null + }, + { + "plugin_id": "input_debug_agent", + "plugin_category": "input", + "type": "debug_agent", + "config": { + "@type": "debug_agent", + "@id": "input_debug_agent", + "bind": "127.0.0.1", + "port": "24230" + }, + "output_plugin": false, + "retry_count": null + }, + { + "plugin_id": "object:3f7e4d08e3e0", + "plugin_category": "input", + "type": "monitor_agent", + "config": { + "@type": "monitor_agent", + "bind": "0.0.0.0", + "port": "24220" + }, + "output_plugin": false, + "retry_count": null + }, + { + "plugin_id": "output_td", + "plugin_category": "output", + "type": "tdlog", + "config": { + "@type": "tdlog", + "@id": "output_td", + "apikey": "xxxxxx", + "auto_create_table": "" + }, + "output_plugin": true, + "buffer_queue_length": 0, + "buffer_total_queued_size": 0, + "retry_count": 0, + "retry": {} + }, + { + "plugin_id": "output_stdout", + "plugin_category": "output", + "type": "stdout", + "config": { + "@type": "stdout", + "@id": "output_stdout" + }, + "output_plugin": true, + "retry_count": 0, + "retry": {} + }, + { + "plugin_id": "object:3f7e4b836770", + "plugin_category": "filter", + "type": "grep", + "config": { + "@type": "grep", + "regexp1": "message cool" + }, + "output_plugin": false, + "retry_count": null + }, + { + "plugin_id": "object:3f7e4bbe5a38", + "plugin_category": "filter", + "type": "record_transformer", + "config": { + "@type": "record_transformer" + }, + "output_plugin": false, + "retry_count": null + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/README.md b/src/go/collectors/go.d.plugin/modules/freeradius/README.md new file mode 120000 index 00000000000000..66deefdb74b7bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/README.md @@ -0,0 +1 @@ +integrations/freeradius.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/api/client.go b/src/go/collectors/go.d.plugin/modules/freeradius/api/client.go new file mode 100644 index 00000000000000..01f784c17d026f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/api/client.go @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package api + +import ( + "context" + "crypto/hmac" + "crypto/md5" + "fmt" + "net" + "strconv" + "time" + + "layeh.com/radius" + "layeh.com/radius/rfc2869" +) + +type Status struct { + AccessRequests int64 `stm:"access-requests"` + AccessAccepts int64 `stm:"access-accepts"` + AccessRejects int64 `stm:"access-rejects"` + AccessChallenges int64 `stm:"access-challenges"` + AuthResponses int64 `stm:"auth-responses"` + AuthDuplicateRequests int64 `stm:"auth-duplicate-requests"` + AuthMalformedRequests int64 `stm:"auth-malformed-requests"` + AuthInvalidRequests int64 `stm:"auth-invalid-requests"` + AuthDroppedRequests int64 `stm:"auth-dropped-requests"` + AuthUnknownTypes int64 `stm:"auth-unknown-types"` + + AccountingRequests int64 `stm:"accounting-requests"` + AccountingResponses int64 `stm:"accounting-responses"` + AcctDuplicateRequests int64 `stm:"acct-duplicate-requests"` + AcctMalformedRequests int64 `stm:"acct-malformed-requests"` + AcctInvalidRequests int64 `stm:"acct-invalid-requests"` + AcctDroppedRequests int64 `stm:"acct-dropped-requests"` + AcctUnknownTypes int64 `stm:"acct-unknown-types"` + + ProxyAccessRequests int64 `stm:"proxy-access-requests"` + ProxyAccessAccepts int64 `stm:"proxy-access-accepts"` + ProxyAccessRejects int64 `stm:"proxy-access-rejects"` + ProxyAccessChallenges int64 `stm:"proxy-access-challenges"` + ProxyAuthResponses int64 `stm:"proxy-auth-responses"` + ProxyAuthDuplicateRequests int64 `stm:"proxy-auth-duplicate-requests"` + ProxyAuthMalformedRequests int64 `stm:"proxy-auth-malformed-requests"` + ProxyAuthInvalidRequests int64 `stm:"proxy-auth-invalid-requests"` + ProxyAuthDroppedRequests int64 `stm:"proxy-auth-dropped-requests"` + ProxyAuthUnknownTypes int64 `stm:"proxy-auth-unknown-types"` + + ProxyAccountingRequests int64 `stm:"proxy-accounting-requests"` + ProxyAccountingResponses int64 `stm:"proxy-accounting-responses"` + ProxyAcctDuplicateRequests int64 `stm:"proxy-acct-duplicate-requests"` + ProxyAcctMalformedRequests int64 `stm:"proxy-acct-malformed-requests"` + ProxyAcctInvalidRequests int64 `stm:"proxy-acct-invalid-requests"` + ProxyAcctDroppedRequests int64 `stm:"proxy-acct-dropped-requests"` + ProxyAcctUnknownTypes int64 `stm:"proxy-acct-unknown-types"` +} + +type ( + radiusClient interface { + Exchange(ctx context.Context, packet *radius.Packet, address string) (*radius.Packet, error) + } + Config struct { + Address string + Port int + Secret string + Timeout time.Duration + } + Client struct { + address string + secret string + timeout time.Duration + radiusClient + } +) + +func New(conf Config) *Client { + return &Client{ + address: net.JoinHostPort(conf.Address, strconv.Itoa(conf.Port)), + secret: conf.Secret, + timeout: conf.Timeout, + radiusClient: &radius.Client{Retry: time.Second, MaxPacketErrors: 10}, + } +} + +func (c Client) Status() (*Status, error) { + packet, err := newStatusServerPacket(c.secret) + if err != nil { + return nil, fmt.Errorf("error on creating StatusServer packet: %v", err) + } + + resp, err := c.queryServer(packet) + if err != nil { + return nil, fmt.Errorf("error on request to '%s': %v", c.address, err) + } + + return decodeResponse(resp), nil +} + +func (c Client) queryServer(packet *radius.Packet) (*radius.Packet, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + resp, err := c.Exchange(ctx, packet, c.address) + if err != nil { + return nil, err + } + + if resp.Code != radius.CodeAccessAccept { + return nil, fmt.Errorf("'%s' returned response code %d", c.address, resp.Code) + } + return resp, nil +} + +func newStatusServerPacket(secret string) (*radius.Packet, error) { + // https://wiki.freeradius.org/config/Status#status-of-freeradius-server + packet := radius.New(radius.CodeStatusServer, []byte(secret)) + if err := FreeRADIUSStatisticsType_Set(packet, FreeRADIUSStatisticsType_Value_All); err != nil { + return nil, err + } + if err := rfc2869.MessageAuthenticator_Set(packet, make([]byte, 16)); err != nil { + return nil, err + } + hash := hmac.New(md5.New, packet.Secret) + encode, err := packet.Encode() + if err != nil { + return nil, err + } + if _, err := hash.Write(encode); err != nil { + return nil, err + } + if err := rfc2869.MessageAuthenticator_Set(packet, hash.Sum(nil)); err != nil { + return nil, err + } + return packet, nil +} + +func decodeResponse(resp *radius.Packet) *Status { + return &Status{ + AccessRequests: int64(FreeRADIUSTotalAccessRequests_Get(resp)), + AccessAccepts: int64(FreeRADIUSTotalAccessAccepts_Get(resp)), + AccessRejects: int64(FreeRADIUSTotalAccessRejects_Get(resp)), + AccessChallenges: int64(FreeRADIUSTotalAccessChallenges_Get(resp)), + AuthResponses: int64(FreeRADIUSTotalAuthResponses_Get(resp)), + AuthDuplicateRequests: int64(FreeRADIUSTotalAuthDuplicateRequests_Get(resp)), + AuthMalformedRequests: int64(FreeRADIUSTotalAuthMalformedRequests_Get(resp)), + AuthInvalidRequests: int64(FreeRADIUSTotalAuthInvalidRequests_Get(resp)), + AuthDroppedRequests: int64(FreeRADIUSTotalAuthDroppedRequests_Get(resp)), + AuthUnknownTypes: int64(FreeRADIUSTotalAuthUnknownTypes_Get(resp)), + AccountingRequests: int64(FreeRADIUSTotalAccountingRequests_Get(resp)), + AccountingResponses: int64(FreeRADIUSTotalAccountingResponses_Get(resp)), + AcctDuplicateRequests: int64(FreeRADIUSTotalAcctDuplicateRequests_Get(resp)), + AcctMalformedRequests: int64(FreeRADIUSTotalAcctMalformedRequests_Get(resp)), + AcctInvalidRequests: int64(FreeRADIUSTotalAcctInvalidRequests_Get(resp)), + AcctDroppedRequests: int64(FreeRADIUSTotalAcctDroppedRequests_Get(resp)), + AcctUnknownTypes: int64(FreeRADIUSTotalAcctUnknownTypes_Get(resp)), + ProxyAccessRequests: int64(FreeRADIUSTotalProxyAccessRequests_Get(resp)), + ProxyAccessAccepts: int64(FreeRADIUSTotalProxyAccessAccepts_Get(resp)), + ProxyAccessRejects: int64(FreeRADIUSTotalProxyAccessRejects_Get(resp)), + ProxyAccessChallenges: int64(FreeRADIUSTotalProxyAccessChallenges_Get(resp)), + ProxyAuthResponses: int64(FreeRADIUSTotalProxyAuthResponses_Get(resp)), + ProxyAuthDuplicateRequests: int64(FreeRADIUSTotalProxyAuthDuplicateRequests_Get(resp)), + ProxyAuthMalformedRequests: int64(FreeRADIUSTotalProxyAuthMalformedRequests_Get(resp)), + ProxyAuthInvalidRequests: int64(FreeRADIUSTotalProxyAuthInvalidRequests_Get(resp)), + ProxyAuthDroppedRequests: int64(FreeRADIUSTotalProxyAuthDroppedRequests_Get(resp)), + ProxyAuthUnknownTypes: int64(FreeRADIUSTotalProxyAuthUnknownTypes_Get(resp)), + ProxyAccountingRequests: int64(FreeRADIUSTotalProxyAccountingRequests_Get(resp)), + ProxyAccountingResponses: int64(FreeRADIUSTotalProxyAccountingResponses_Get(resp)), + ProxyAcctDuplicateRequests: int64(FreeRADIUSTotalProxyAcctDuplicateRequests_Get(resp)), + ProxyAcctMalformedRequests: int64(FreeRADIUSTotalProxyAcctMalformedRequests_Get(resp)), + ProxyAcctInvalidRequests: int64(FreeRADIUSTotalProxyAcctInvalidRequests_Get(resp)), + ProxyAcctDroppedRequests: int64(FreeRADIUSTotalProxyAcctDroppedRequests_Get(resp)), + ProxyAcctUnknownTypes: int64(FreeRADIUSTotalProxyAcctUnknownTypes_Get(resp)), + } +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/api/client_test.go b/src/go/collectors/go.d.plugin/modules/freeradius/api/client_test.go new file mode 100644 index 00000000000000..9323aa9920922b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/api/client_test.go @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package api + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "layeh.com/radius" +) + +func TestNew(t *testing.T) { + assert.NotNil(t, New(Config{})) +} + +func TestClient_Status(t *testing.T) { + var c Client + c.radiusClient = newOKMockFreeRADIUSClient() + + expected := Status{ + AccessRequests: 1, + AccessAccepts: 2, + AccessRejects: 3, + AccessChallenges: 4, + AuthResponses: 5, + AuthDuplicateRequests: 6, + AuthMalformedRequests: 7, + AuthInvalidRequests: 8, + AuthDroppedRequests: 9, + AuthUnknownTypes: 10, + AccountingRequests: 11, + AccountingResponses: 12, + AcctDuplicateRequests: 13, + AcctMalformedRequests: 14, + AcctInvalidRequests: 15, + AcctDroppedRequests: 16, + AcctUnknownTypes: 17, + ProxyAccessRequests: 18, + ProxyAccessAccepts: 19, + ProxyAccessRejects: 20, + ProxyAccessChallenges: 21, + ProxyAuthResponses: 22, + ProxyAuthDuplicateRequests: 23, + ProxyAuthMalformedRequests: 24, + ProxyAuthInvalidRequests: 25, + ProxyAuthDroppedRequests: 26, + ProxyAuthUnknownTypes: 27, + ProxyAccountingRequests: 28, + ProxyAccountingResponses: 29, + ProxyAcctDuplicateRequests: 30, + ProxyAcctMalformedRequests: 31, + ProxyAcctInvalidRequests: 32, + ProxyAcctDroppedRequests: 33, + ProxyAcctUnknownTypes: 34, + } + + s, err := c.Status() + + require.NoError(t, err) + assert.Equal(t, expected, *s) +} + +func TestClient_Status_ReturnsErrorIfClientExchangeReturnsError(t *testing.T) { + var c Client + c.radiusClient = newErrorMockFreeRADIUSClient() + + s, err := c.Status() + + assert.Nil(t, s) + assert.Error(t, err) +} + +func TestClient_Status_ReturnsErrorIfServerResponseHasBadStatus(t *testing.T) { + var c Client + c.radiusClient = newBadRespCodeMockFreeRADIUSClient() + + s, err := c.Status() + + assert.Nil(t, s) + assert.Error(t, err) +} + +type mockFreeRADIUSClient struct { + errOnExchange bool + badRespCode bool +} + +func newOKMockFreeRADIUSClient() *mockFreeRADIUSClient { + return &mockFreeRADIUSClient{} +} + +func newErrorMockFreeRADIUSClient() *mockFreeRADIUSClient { + return &mockFreeRADIUSClient{errOnExchange: true} +} + +func newBadRespCodeMockFreeRADIUSClient() *mockFreeRADIUSClient { + return &mockFreeRADIUSClient{badRespCode: true} +} + +func (m mockFreeRADIUSClient) Exchange(_ context.Context, _ *radius.Packet, _ string) (*radius.Packet, error) { + if m.errOnExchange { + return nil, errors.New("mock Exchange error") + } + resp := radius.New(radius.CodeAccessAccept, []byte("secret")) + if m.badRespCode { + resp.Code = radius.CodeAccessReject + } else { + resp.Code = radius.CodeAccessAccept + } + addValues(resp) + return resp, nil +} + +func addValues(resp *radius.Packet) { + _ = FreeRADIUSTotalAccessRequests_Add(resp, 1) + _ = FreeRADIUSTotalAccessAccepts_Add(resp, 2) + _ = FreeRADIUSTotalAccessRejects_Add(resp, 3) + _ = FreeRADIUSTotalAccessChallenges_Add(resp, 4) + _ = FreeRADIUSTotalAuthResponses_Add(resp, 5) + _ = FreeRADIUSTotalAuthDuplicateRequests_Add(resp, 6) + _ = FreeRADIUSTotalAuthMalformedRequests_Add(resp, 7) + _ = FreeRADIUSTotalAuthInvalidRequests_Add(resp, 8) + _ = FreeRADIUSTotalAuthDroppedRequests_Add(resp, 9) + _ = FreeRADIUSTotalAuthUnknownTypes_Add(resp, 10) + _ = FreeRADIUSTotalAccountingRequests_Add(resp, 11) + _ = FreeRADIUSTotalAccountingResponses_Add(resp, 12) + _ = FreeRADIUSTotalAcctDuplicateRequests_Add(resp, 13) + _ = FreeRADIUSTotalAcctMalformedRequests_Add(resp, 14) + _ = FreeRADIUSTotalAcctInvalidRequests_Add(resp, 15) + _ = FreeRADIUSTotalAcctDroppedRequests_Add(resp, 16) + _ = FreeRADIUSTotalAcctUnknownTypes_Add(resp, 17) + _ = FreeRADIUSTotalProxyAccessRequests_Add(resp, 18) + _ = FreeRADIUSTotalProxyAccessAccepts_Add(resp, 19) + _ = FreeRADIUSTotalProxyAccessRejects_Add(resp, 20) + _ = FreeRADIUSTotalProxyAccessChallenges_Add(resp, 21) + _ = FreeRADIUSTotalProxyAuthResponses_Add(resp, 22) + _ = FreeRADIUSTotalProxyAuthDuplicateRequests_Add(resp, 23) + _ = FreeRADIUSTotalProxyAuthMalformedRequests_Add(resp, 24) + _ = FreeRADIUSTotalProxyAuthInvalidRequests_Add(resp, 25) + _ = FreeRADIUSTotalProxyAuthDroppedRequests_Add(resp, 26) + _ = FreeRADIUSTotalProxyAuthUnknownTypes_Add(resp, 27) + _ = FreeRADIUSTotalProxyAccountingRequests_Add(resp, 28) + _ = FreeRADIUSTotalProxyAccountingResponses_Add(resp, 29) + _ = FreeRADIUSTotalProxyAcctDuplicateRequests_Add(resp, 30) + _ = FreeRADIUSTotalProxyAcctMalformedRequests_Add(resp, 31) + _ = FreeRADIUSTotalProxyAcctInvalidRequests_Add(resp, 32) + _ = FreeRADIUSTotalProxyAcctDroppedRequests_Add(resp, 33) + _ = FreeRADIUSTotalProxyAcctUnknownTypes_Add(resp, 34) +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/api/dictionary.go b/src/go/collectors/go.d.plugin/modules/freeradius/api/dictionary.go new file mode 100644 index 00000000000000..0ed348ae3924c0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/api/dictionary.go @@ -0,0 +1,2683 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package api + +import ( + "strconv" + "time" + + "layeh.com/radius" + "layeh.com/radius/rfc2865" +) + +/* +The response from the freeradius 3.0.12+dfsg-5+deb9u1 + +Sent Status-Server Id 23 from 0.0.0.0:37131 to 127.0.0.1:18121 length 50 + Message-Authenticator = 0x00 + FreeRADIUS-Statistics-Type = All +Received Access-Accept Id 23 from 127.0.0.1:18121 to 0.0.0.0:0 length 536 + FreeRADIUS-Total-Access-Requests = 3 + FreeRADIUS-Total-Access-Accepts = 0 + FreeRADIUS-Total-Access-Rejects = 0 + FreeRADIUS-Total-Access-Challenges = 0 + FreeRADIUS-Total-Auth-Responses = 0 + FreeRADIUS-Total-Auth-Duplicate-Requests = 0 + FreeRADIUS-Total-Auth-Malformed-Requests = 0 + FreeRADIUS-Total-Auth-Invalid-Requests = 0 + FreeRADIUS-Total-Auth-Dropped-Requests = 0 + FreeRADIUS-Total-Auth-Unknown-Types = 0 + FreeRADIUS-Total-Accounting-Requests = 0 + FreeRADIUS-Total-Accounting-Responses = 0 + FreeRADIUS-Total-Acct-Duplicate-Requests = 0 + FreeRADIUS-Total-Acct-Malformed-Requests = 0 + FreeRADIUS-Total-Acct-Invalid-Requests = 0 + FreeRADIUS-Total-Acct-Dropped-Requests = 0 + FreeRADIUS-Total-Acct-Unknown-Types = 0 + FreeRADIUS-Total-Proxy-Access-Requests = 0 + FreeRADIUS-Total-Proxy-Access-Accepts = 0 + FreeRADIUS-Total-Proxy-Access-Rejects = 0 + FreeRADIUS-Total-Proxy-Access-Challenges = 0 + FreeRADIUS-Total-Proxy-Auth-Responses = 0 + FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = 0 + FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = 0 + FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = 0 + FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = 0 + FreeRADIUS-Total-Proxy-Auth-Unknown-Types = 0 + FreeRADIUS-Total-Proxy-Accounting-Requests = 0 + FreeRADIUS-Total-Proxy-Accounting-Responses = 0 + FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = 0 + FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = 0 + FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = 0 + FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = 0 + FreeRADIUS-Total-Proxy-Acct-Unknown-Types = 0 + FreeRADIUS-Stats-Start-Time = "Dec 27 2018 05:08:02 +09" + FreeRADIUS-Stats-HUP-Time = "Dec 27 2018 05:08:02 +09" + FreeRADIUS-Queue-Len-Internal = 0 + FreeRADIUS-Queue-Len-Proxy = 0 + FreeRADIUS-Queue-Len-Auth = 0 + FreeRADIUS-Queue-Len-Acct = 0 + FreeRADIUS-Queue-Len-Detail = 0 + FreeRADIUS-Queue-PPS-In = 0 + FreeRADIUS-Queue-PPS-Out = 0 +*/ + +// Code generation tool https://github.com/layeh/radius/tree/master/cmd/radius-dict-gen. +// Used dictionary: dictionary.freeradius from /usr/share/freeradius/ directory (freeradius 3.0.12+dfsg-5+deb9u1) +// All attributes that are not in response were removed to reduce the amount of generated code. + +// Code generated by radius-dict-gen. DO NOT EDIT. + +const ( + _FreeRADIUS_VendorID = 11344 +) + +func _FreeRADIUS_AddVendor(p *radius.Packet, typ byte, attr radius.Attribute) (err error) { + var vsa radius.Attribute + vendor := make(radius.Attribute, 2+len(attr)) + vendor[0] = typ + vendor[1] = byte(len(vendor)) + copy(vendor[2:], attr) + vsa, err = radius.NewVendorSpecific(_FreeRADIUS_VendorID, vendor) + if err != nil { + return + } + p.Add(rfc2865.VendorSpecific_Type, vsa) + return +} + +func _FreeRADIUS_GetsVendor(p *radius.Packet, typ byte) (values []radius.Attribute) { + for _, attr := range p.Attributes[rfc2865.VendorSpecific_Type] { + vendorID, vsa, err := radius.VendorSpecific(attr) + if err != nil || vendorID != _FreeRADIUS_VendorID { + continue + } + for len(vsa) >= 3 { + vsaTyp, vsaLen := vsa[0], vsa[1] + if int(vsaLen) > len(vsa) || vsaLen < 3 { + break + } + if vsaTyp == typ { + values = append(values, vsa[2:int(vsaLen)]) + } + vsa = vsa[int(vsaLen):] + } + } + return +} + +func _FreeRADIUS_LookupVendor(p *radius.Packet, typ byte) (attr radius.Attribute, ok bool) { + for _, a := range p.Attributes[rfc2865.VendorSpecific_Type] { + vendorID, vsa, err := radius.VendorSpecific(a) + if err != nil || vendorID != _FreeRADIUS_VendorID { + continue + } + for len(vsa) >= 3 { + vsaTyp, vsaLen := vsa[0], vsa[1] + if int(vsaLen) > len(vsa) || vsaLen < 3 { + break + } + if vsaTyp == typ { + return vsa[2:int(vsaLen)], true + } + vsa = vsa[int(vsaLen):] + } + } + return +} + +func _FreeRADIUS_SetVendor(p *radius.Packet, typ byte, attr radius.Attribute) (err error) { + for i := 0; i < len(p.Attributes[rfc2865.VendorSpecific_Type]); { + vendorID, vsa, err := radius.VendorSpecific(p.Attributes[rfc2865.VendorSpecific_Type][i]) + if err != nil || vendorID != _FreeRADIUS_VendorID { + i++ + continue + } + for j := 0; len(vsa[j:]) >= 3; { + vsaTyp, vsaLen := vsa[0], vsa[1] + if int(vsaLen) > len(vsa[j:]) || vsaLen < 3 { + i++ + break + } + if vsaTyp == typ { + vsa = append(vsa[:j], vsa[j+int(vsaLen):]...) + } + j += int(vsaLen) + } + if len(vsa) > 0 { + copy(p.Attributes[rfc2865.VendorSpecific_Type][i][4:], vsa) + i++ + } else { + p.Attributes[rfc2865.VendorSpecific_Type] = append(p.Attributes[rfc2865.VendorSpecific_Type][:i], p.Attributes[rfc2865.VendorSpecific_Type][i+i:]...) + } + } + return _FreeRADIUS_AddVendor(p, typ, attr) +} + +func _FreeRADIUS_DelVendor(p *radius.Packet, typ byte) { +vsaLoop: + for i := 0; i < len(p.Attributes[rfc2865.VendorSpecific_Type]); { + attr := p.Attributes[rfc2865.VendorSpecific_Type][i] + vendorID, vsa, err := radius.VendorSpecific(attr) + if err != nil || vendorID != _FreeRADIUS_VendorID { + continue + } + offset := 0 + for len(vsa[offset:]) >= 3 { + vsaTyp, vsaLen := vsa[offset], vsa[offset+1] + if int(vsaLen) > len(vsa) || vsaLen < 3 { + continue vsaLoop + } + if vsaTyp == typ { + copy(vsa[offset:], vsa[offset+int(vsaLen):]) + vsa = vsa[:len(vsa)-int(vsaLen)] + } else { + offset += int(vsaLen) + } + } + if offset == 0 { + p.Attributes[rfc2865.VendorSpecific_Type] = append(p.Attributes[rfc2865.VendorSpecific_Type][:i], p.Attributes[rfc2865.VendorSpecific_Type][i+1:]...) + } else { + i++ + } + } + return +} + +type FreeRADIUSStatisticsType uint32 + +const ( + FreeRADIUSStatisticsType_Value_All FreeRADIUSStatisticsType = 31 +) + +var FreeRADIUSStatisticsType_Strings = map[FreeRADIUSStatisticsType]string{ + FreeRADIUSStatisticsType_Value_All: "All", +} + +func (a FreeRADIUSStatisticsType) String() string { + if str, ok := FreeRADIUSStatisticsType_Strings[a]; ok { + return str + } + return "FreeRADIUSStatisticsType(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSStatisticsType_Add(p *radius.Packet, value FreeRADIUSStatisticsType) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 127, a) +} + +func FreeRADIUSStatisticsType_Get(p *radius.Packet) (value FreeRADIUSStatisticsType) { + value, _ = FreeRADIUSStatisticsType_Lookup(p) + return +} + +func FreeRADIUSStatisticsType_Gets(p *radius.Packet) (values []FreeRADIUSStatisticsType, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 127) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSStatisticsType(i)) + } + return +} + +func FreeRADIUSStatisticsType_Lookup(p *radius.Packet) (value FreeRADIUSStatisticsType, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 127) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSStatisticsType(i) + return +} + +func FreeRADIUSStatisticsType_Set(p *radius.Packet, value FreeRADIUSStatisticsType) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 127, a) +} + +func FreeRADIUSStatisticsType_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 127) +} + +type FreeRADIUSTotalAccessRequests uint32 + +var FreeRADIUSTotalAccessRequests_Strings = map[FreeRADIUSTotalAccessRequests]string{} + +func (a FreeRADIUSTotalAccessRequests) String() string { + if str, ok := FreeRADIUSTotalAccessRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccessRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccessRequests_Add(p *radius.Packet, value FreeRADIUSTotalAccessRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 128, a) +} + +func FreeRADIUSTotalAccessRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAccessRequests) { + value, _ = FreeRADIUSTotalAccessRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAccessRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 128) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccessRequests(i)) + } + return +} + +func FreeRADIUSTotalAccessRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 128) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccessRequests(i) + return +} + +func FreeRADIUSTotalAccessRequests_Set(p *radius.Packet, value FreeRADIUSTotalAccessRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 128, a) +} + +func FreeRADIUSTotalAccessRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 128) +} + +type FreeRADIUSTotalAccessAccepts uint32 + +var FreeRADIUSTotalAccessAccepts_Strings = map[FreeRADIUSTotalAccessAccepts]string{} + +func (a FreeRADIUSTotalAccessAccepts) String() string { + if str, ok := FreeRADIUSTotalAccessAccepts_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccessAccepts(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccessAccepts_Add(p *radius.Packet, value FreeRADIUSTotalAccessAccepts) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 129, a) +} + +func FreeRADIUSTotalAccessAccepts_Get(p *radius.Packet) (value FreeRADIUSTotalAccessAccepts) { + value, _ = FreeRADIUSTotalAccessAccepts_Lookup(p) + return +} + +func FreeRADIUSTotalAccessAccepts_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessAccepts, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 129) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccessAccepts(i)) + } + return +} + +func FreeRADIUSTotalAccessAccepts_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessAccepts, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 129) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccessAccepts(i) + return +} + +func FreeRADIUSTotalAccessAccepts_Set(p *radius.Packet, value FreeRADIUSTotalAccessAccepts) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 129, a) +} + +func FreeRADIUSTotalAccessAccepts_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 129) +} + +type FreeRADIUSTotalAccessRejects uint32 + +var FreeRADIUSTotalAccessRejects_Strings = map[FreeRADIUSTotalAccessRejects]string{} + +func (a FreeRADIUSTotalAccessRejects) String() string { + if str, ok := FreeRADIUSTotalAccessRejects_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccessRejects(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccessRejects_Add(p *radius.Packet, value FreeRADIUSTotalAccessRejects) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 130, a) +} + +func FreeRADIUSTotalAccessRejects_Get(p *radius.Packet) (value FreeRADIUSTotalAccessRejects) { + value, _ = FreeRADIUSTotalAccessRejects_Lookup(p) + return +} + +func FreeRADIUSTotalAccessRejects_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessRejects, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 130) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccessRejects(i)) + } + return +} + +func FreeRADIUSTotalAccessRejects_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessRejects, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 130) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccessRejects(i) + return +} + +func FreeRADIUSTotalAccessRejects_Set(p *radius.Packet, value FreeRADIUSTotalAccessRejects) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 130, a) +} + +func FreeRADIUSTotalAccessRejects_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 130) +} + +type FreeRADIUSTotalAccessChallenges uint32 + +var FreeRADIUSTotalAccessChallenges_Strings = map[FreeRADIUSTotalAccessChallenges]string{} + +func (a FreeRADIUSTotalAccessChallenges) String() string { + if str, ok := FreeRADIUSTotalAccessChallenges_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccessChallenges(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccessChallenges_Add(p *radius.Packet, value FreeRADIUSTotalAccessChallenges) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 131, a) +} + +func FreeRADIUSTotalAccessChallenges_Get(p *radius.Packet) (value FreeRADIUSTotalAccessChallenges) { + value, _ = FreeRADIUSTotalAccessChallenges_Lookup(p) + return +} + +func FreeRADIUSTotalAccessChallenges_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessChallenges, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 131) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccessChallenges(i)) + } + return +} + +func FreeRADIUSTotalAccessChallenges_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessChallenges, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 131) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccessChallenges(i) + return +} + +func FreeRADIUSTotalAccessChallenges_Set(p *radius.Packet, value FreeRADIUSTotalAccessChallenges) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 131, a) +} + +func FreeRADIUSTotalAccessChallenges_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 131) +} + +type FreeRADIUSTotalAuthResponses uint32 + +var FreeRADIUSTotalAuthResponses_Strings = map[FreeRADIUSTotalAuthResponses]string{} + +func (a FreeRADIUSTotalAuthResponses) String() string { + if str, ok := FreeRADIUSTotalAuthResponses_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthResponses(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthResponses_Add(p *radius.Packet, value FreeRADIUSTotalAuthResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 132, a) +} + +func FreeRADIUSTotalAuthResponses_Get(p *radius.Packet) (value FreeRADIUSTotalAuthResponses) { + value, _ = FreeRADIUSTotalAuthResponses_Lookup(p) + return +} + +func FreeRADIUSTotalAuthResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthResponses, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 132) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthResponses(i)) + } + return +} + +func FreeRADIUSTotalAuthResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthResponses, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 132) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthResponses(i) + return +} + +func FreeRADIUSTotalAuthResponses_Set(p *radius.Packet, value FreeRADIUSTotalAuthResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 132, a) +} + +func FreeRADIUSTotalAuthResponses_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 132) +} + +type FreeRADIUSTotalAuthDuplicateRequests uint32 + +var FreeRADIUSTotalAuthDuplicateRequests_Strings = map[FreeRADIUSTotalAuthDuplicateRequests]string{} + +func (a FreeRADIUSTotalAuthDuplicateRequests) String() string { + if str, ok := FreeRADIUSTotalAuthDuplicateRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 133, a) +} + +func FreeRADIUSTotalAuthDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthDuplicateRequests) { + value, _ = FreeRADIUSTotalAuthDuplicateRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAuthDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthDuplicateRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 133) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthDuplicateRequests(i)) + } + return +} + +func FreeRADIUSTotalAuthDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthDuplicateRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 133) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthDuplicateRequests(i) + return +} + +func FreeRADIUSTotalAuthDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 133, a) +} + +func FreeRADIUSTotalAuthDuplicateRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 133) +} + +type FreeRADIUSTotalAuthMalformedRequests uint32 + +var FreeRADIUSTotalAuthMalformedRequests_Strings = map[FreeRADIUSTotalAuthMalformedRequests]string{} + +func (a FreeRADIUSTotalAuthMalformedRequests) String() string { + if str, ok := FreeRADIUSTotalAuthMalformedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 134, a) +} + +func FreeRADIUSTotalAuthMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthMalformedRequests) { + value, _ = FreeRADIUSTotalAuthMalformedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAuthMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthMalformedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 134) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthMalformedRequests(i)) + } + return +} + +func FreeRADIUSTotalAuthMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthMalformedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 134) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthMalformedRequests(i) + return +} + +func FreeRADIUSTotalAuthMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 134, a) +} + +func FreeRADIUSTotalAuthMalformedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 134) +} + +type FreeRADIUSTotalAuthInvalidRequests uint32 + +var FreeRADIUSTotalAuthInvalidRequests_Strings = map[FreeRADIUSTotalAuthInvalidRequests]string{} + +func (a FreeRADIUSTotalAuthInvalidRequests) String() string { + if str, ok := FreeRADIUSTotalAuthInvalidRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 135, a) +} + +func FreeRADIUSTotalAuthInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthInvalidRequests) { + value, _ = FreeRADIUSTotalAuthInvalidRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAuthInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthInvalidRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 135) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthInvalidRequests(i)) + } + return +} + +func FreeRADIUSTotalAuthInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthInvalidRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 135) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthInvalidRequests(i) + return +} + +func FreeRADIUSTotalAuthInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 135, a) +} + +func FreeRADIUSTotalAuthInvalidRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 135) +} + +type FreeRADIUSTotalAuthDroppedRequests uint32 + +var FreeRADIUSTotalAuthDroppedRequests_Strings = map[FreeRADIUSTotalAuthDroppedRequests]string{} + +func (a FreeRADIUSTotalAuthDroppedRequests) String() string { + if str, ok := FreeRADIUSTotalAuthDroppedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 136, a) +} + +func FreeRADIUSTotalAuthDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthDroppedRequests) { + value, _ = FreeRADIUSTotalAuthDroppedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAuthDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthDroppedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 136) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthDroppedRequests(i)) + } + return +} + +func FreeRADIUSTotalAuthDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthDroppedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 136) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthDroppedRequests(i) + return +} + +func FreeRADIUSTotalAuthDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 136, a) +} + +func FreeRADIUSTotalAuthDroppedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 136) +} + +type FreeRADIUSTotalAuthUnknownTypes uint32 + +var FreeRADIUSTotalAuthUnknownTypes_Strings = map[FreeRADIUSTotalAuthUnknownTypes]string{} + +func (a FreeRADIUSTotalAuthUnknownTypes) String() string { + if str, ok := FreeRADIUSTotalAuthUnknownTypes_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAuthUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAuthUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalAuthUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 137, a) +} + +func FreeRADIUSTotalAuthUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalAuthUnknownTypes) { + value, _ = FreeRADIUSTotalAuthUnknownTypes_Lookup(p) + return +} + +func FreeRADIUSTotalAuthUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthUnknownTypes, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 137) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAuthUnknownTypes(i)) + } + return +} + +func FreeRADIUSTotalAuthUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthUnknownTypes, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 137) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAuthUnknownTypes(i) + return +} + +func FreeRADIUSTotalAuthUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalAuthUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 137, a) +} + +func FreeRADIUSTotalAuthUnknownTypes_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 137) +} + +type FreeRADIUSTotalProxyAccessRequests uint32 + +var FreeRADIUSTotalProxyAccessRequests_Strings = map[FreeRADIUSTotalProxyAccessRequests]string{} + +func (a FreeRADIUSTotalProxyAccessRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAccessRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccessRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccessRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 138, a) +} + +func FreeRADIUSTotalProxyAccessRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRequests) { + value, _ = FreeRADIUSTotalProxyAccessRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccessRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 138) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccessRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAccessRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 138) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccessRequests(i) + return +} + +func FreeRADIUSTotalProxyAccessRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 138, a) +} + +func FreeRADIUSTotalProxyAccessRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 138) +} + +type FreeRADIUSTotalProxyAccessAccepts uint32 + +var FreeRADIUSTotalProxyAccessAccepts_Strings = map[FreeRADIUSTotalProxyAccessAccepts]string{} + +func (a FreeRADIUSTotalProxyAccessAccepts) String() string { + if str, ok := FreeRADIUSTotalProxyAccessAccepts_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccessAccepts(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccessAccepts_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessAccepts) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 139, a) +} + +func FreeRADIUSTotalProxyAccessAccepts_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessAccepts) { + value, _ = FreeRADIUSTotalProxyAccessAccepts_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccessAccepts_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessAccepts, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 139) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccessAccepts(i)) + } + return +} + +func FreeRADIUSTotalProxyAccessAccepts_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessAccepts, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 139) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccessAccepts(i) + return +} + +func FreeRADIUSTotalProxyAccessAccepts_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessAccepts) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 139, a) +} + +func FreeRADIUSTotalProxyAccessAccepts_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 139) +} + +type FreeRADIUSTotalProxyAccessRejects uint32 + +var FreeRADIUSTotalProxyAccessRejects_Strings = map[FreeRADIUSTotalProxyAccessRejects]string{} + +func (a FreeRADIUSTotalProxyAccessRejects) String() string { + if str, ok := FreeRADIUSTotalProxyAccessRejects_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccessRejects(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccessRejects_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessRejects) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 140, a) +} + +func FreeRADIUSTotalProxyAccessRejects_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRejects) { + value, _ = FreeRADIUSTotalProxyAccessRejects_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccessRejects_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessRejects, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 140) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccessRejects(i)) + } + return +} + +func FreeRADIUSTotalProxyAccessRejects_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRejects, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 140) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccessRejects(i) + return +} + +func FreeRADIUSTotalProxyAccessRejects_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessRejects) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 140, a) +} + +func FreeRADIUSTotalProxyAccessRejects_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 140) +} + +type FreeRADIUSTotalProxyAccessChallenges uint32 + +var FreeRADIUSTotalProxyAccessChallenges_Strings = map[FreeRADIUSTotalProxyAccessChallenges]string{} + +func (a FreeRADIUSTotalProxyAccessChallenges) String() string { + if str, ok := FreeRADIUSTotalProxyAccessChallenges_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccessChallenges(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccessChallenges_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessChallenges) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 141, a) +} + +func FreeRADIUSTotalProxyAccessChallenges_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessChallenges) { + value, _ = FreeRADIUSTotalProxyAccessChallenges_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccessChallenges_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessChallenges, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 141) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccessChallenges(i)) + } + return +} + +func FreeRADIUSTotalProxyAccessChallenges_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessChallenges, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 141) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccessChallenges(i) + return +} + +func FreeRADIUSTotalProxyAccessChallenges_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessChallenges) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 141, a) +} + +func FreeRADIUSTotalProxyAccessChallenges_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 141) +} + +type FreeRADIUSTotalProxyAuthResponses uint32 + +var FreeRADIUSTotalProxyAuthResponses_Strings = map[FreeRADIUSTotalProxyAuthResponses]string{} + +func (a FreeRADIUSTotalProxyAuthResponses) String() string { + if str, ok := FreeRADIUSTotalProxyAuthResponses_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthResponses(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthResponses_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 142, a) +} + +func FreeRADIUSTotalProxyAuthResponses_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthResponses) { + value, _ = FreeRADIUSTotalProxyAuthResponses_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthResponses, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 142) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthResponses(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthResponses, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 142) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthResponses(i) + return +} + +func FreeRADIUSTotalProxyAuthResponses_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 142, a) +} + +func FreeRADIUSTotalProxyAuthResponses_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 142) +} + +type FreeRADIUSTotalProxyAuthDuplicateRequests uint32 + +var FreeRADIUSTotalProxyAuthDuplicateRequests_Strings = map[FreeRADIUSTotalProxyAuthDuplicateRequests]string{} + +func (a FreeRADIUSTotalProxyAuthDuplicateRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAuthDuplicateRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 143, a) +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDuplicateRequests) { + value, _ = FreeRADIUSTotalProxyAuthDuplicateRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthDuplicateRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 143) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthDuplicateRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDuplicateRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 143) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthDuplicateRequests(i) + return +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 143, a) +} + +func FreeRADIUSTotalProxyAuthDuplicateRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 143) +} + +type FreeRADIUSTotalProxyAuthMalformedRequests uint32 + +var FreeRADIUSTotalProxyAuthMalformedRequests_Strings = map[FreeRADIUSTotalProxyAuthMalformedRequests]string{} + +func (a FreeRADIUSTotalProxyAuthMalformedRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAuthMalformedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 144, a) +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthMalformedRequests) { + value, _ = FreeRADIUSTotalProxyAuthMalformedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthMalformedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 144) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthMalformedRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthMalformedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 144) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthMalformedRequests(i) + return +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 144, a) +} + +func FreeRADIUSTotalProxyAuthMalformedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 144) +} + +type FreeRADIUSTotalProxyAuthInvalidRequests uint32 + +var FreeRADIUSTotalProxyAuthInvalidRequests_Strings = map[FreeRADIUSTotalProxyAuthInvalidRequests]string{} + +func (a FreeRADIUSTotalProxyAuthInvalidRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAuthInvalidRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 145, a) +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthInvalidRequests) { + value, _ = FreeRADIUSTotalProxyAuthInvalidRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthInvalidRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 145) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthInvalidRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthInvalidRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 145) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthInvalidRequests(i) + return +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 145, a) +} + +func FreeRADIUSTotalProxyAuthInvalidRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 145) +} + +type FreeRADIUSTotalProxyAuthDroppedRequests uint32 + +var FreeRADIUSTotalProxyAuthDroppedRequests_Strings = map[FreeRADIUSTotalProxyAuthDroppedRequests]string{} + +func (a FreeRADIUSTotalProxyAuthDroppedRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAuthDroppedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 146, a) +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDroppedRequests) { + value, _ = FreeRADIUSTotalProxyAuthDroppedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthDroppedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 146) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthDroppedRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDroppedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 146) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthDroppedRequests(i) + return +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 146, a) +} + +func FreeRADIUSTotalProxyAuthDroppedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 146) +} + +type FreeRADIUSTotalProxyAuthUnknownTypes uint32 + +var FreeRADIUSTotalProxyAuthUnknownTypes_Strings = map[FreeRADIUSTotalProxyAuthUnknownTypes]string{} + +func (a FreeRADIUSTotalProxyAuthUnknownTypes) String() string { + if str, ok := FreeRADIUSTotalProxyAuthUnknownTypes_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAuthUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 147, a) +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthUnknownTypes) { + value, _ = FreeRADIUSTotalProxyAuthUnknownTypes_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthUnknownTypes, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 147) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAuthUnknownTypes(i)) + } + return +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthUnknownTypes, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 147) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAuthUnknownTypes(i) + return +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 147, a) +} + +func FreeRADIUSTotalProxyAuthUnknownTypes_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 147) +} + +type FreeRADIUSTotalAccountingRequests uint32 + +var FreeRADIUSTotalAccountingRequests_Strings = map[FreeRADIUSTotalAccountingRequests]string{} + +func (a FreeRADIUSTotalAccountingRequests) String() string { + if str, ok := FreeRADIUSTotalAccountingRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccountingRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccountingRequests_Add(p *radius.Packet, value FreeRADIUSTotalAccountingRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 148, a) +} + +func FreeRADIUSTotalAccountingRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAccountingRequests) { + value, _ = FreeRADIUSTotalAccountingRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAccountingRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccountingRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 148) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccountingRequests(i)) + } + return +} + +func FreeRADIUSTotalAccountingRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccountingRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 148) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccountingRequests(i) + return +} + +func FreeRADIUSTotalAccountingRequests_Set(p *radius.Packet, value FreeRADIUSTotalAccountingRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 148, a) +} + +func FreeRADIUSTotalAccountingRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 148) +} + +type FreeRADIUSTotalAccountingResponses uint32 + +var FreeRADIUSTotalAccountingResponses_Strings = map[FreeRADIUSTotalAccountingResponses]string{} + +func (a FreeRADIUSTotalAccountingResponses) String() string { + if str, ok := FreeRADIUSTotalAccountingResponses_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAccountingResponses(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAccountingResponses_Add(p *radius.Packet, value FreeRADIUSTotalAccountingResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 149, a) +} + +func FreeRADIUSTotalAccountingResponses_Get(p *radius.Packet) (value FreeRADIUSTotalAccountingResponses) { + value, _ = FreeRADIUSTotalAccountingResponses_Lookup(p) + return +} + +func FreeRADIUSTotalAccountingResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccountingResponses, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 149) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAccountingResponses(i)) + } + return +} + +func FreeRADIUSTotalAccountingResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccountingResponses, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 149) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAccountingResponses(i) + return +} + +func FreeRADIUSTotalAccountingResponses_Set(p *radius.Packet, value FreeRADIUSTotalAccountingResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 149, a) +} + +func FreeRADIUSTotalAccountingResponses_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 149) +} + +type FreeRADIUSTotalAcctDuplicateRequests uint32 + +var FreeRADIUSTotalAcctDuplicateRequests_Strings = map[FreeRADIUSTotalAcctDuplicateRequests]string{} + +func (a FreeRADIUSTotalAcctDuplicateRequests) String() string { + if str, ok := FreeRADIUSTotalAcctDuplicateRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAcctDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAcctDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 150, a) +} + +func FreeRADIUSTotalAcctDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctDuplicateRequests) { + value, _ = FreeRADIUSTotalAcctDuplicateRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAcctDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctDuplicateRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 150) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAcctDuplicateRequests(i)) + } + return +} + +func FreeRADIUSTotalAcctDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctDuplicateRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 150) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAcctDuplicateRequests(i) + return +} + +func FreeRADIUSTotalAcctDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 150, a) +} + +func FreeRADIUSTotalAcctDuplicateRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 150) +} + +type FreeRADIUSTotalAcctMalformedRequests uint32 + +var FreeRADIUSTotalAcctMalformedRequests_Strings = map[FreeRADIUSTotalAcctMalformedRequests]string{} + +func (a FreeRADIUSTotalAcctMalformedRequests) String() string { + if str, ok := FreeRADIUSTotalAcctMalformedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAcctMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAcctMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 151, a) +} + +func FreeRADIUSTotalAcctMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctMalformedRequests) { + value, _ = FreeRADIUSTotalAcctMalformedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAcctMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctMalformedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 151) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAcctMalformedRequests(i)) + } + return +} + +func FreeRADIUSTotalAcctMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctMalformedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 151) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAcctMalformedRequests(i) + return +} + +func FreeRADIUSTotalAcctMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 151, a) +} + +func FreeRADIUSTotalAcctMalformedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 151) +} + +type FreeRADIUSTotalAcctInvalidRequests uint32 + +var FreeRADIUSTotalAcctInvalidRequests_Strings = map[FreeRADIUSTotalAcctInvalidRequests]string{} + +func (a FreeRADIUSTotalAcctInvalidRequests) String() string { + if str, ok := FreeRADIUSTotalAcctInvalidRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAcctInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAcctInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 152, a) +} + +func FreeRADIUSTotalAcctInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctInvalidRequests) { + value, _ = FreeRADIUSTotalAcctInvalidRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAcctInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctInvalidRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 152) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAcctInvalidRequests(i)) + } + return +} + +func FreeRADIUSTotalAcctInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctInvalidRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 152) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAcctInvalidRequests(i) + return +} + +func FreeRADIUSTotalAcctInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 152, a) +} + +func FreeRADIUSTotalAcctInvalidRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 152) +} + +type FreeRADIUSTotalAcctDroppedRequests uint32 + +var FreeRADIUSTotalAcctDroppedRequests_Strings = map[FreeRADIUSTotalAcctDroppedRequests]string{} + +func (a FreeRADIUSTotalAcctDroppedRequests) String() string { + if str, ok := FreeRADIUSTotalAcctDroppedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAcctDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAcctDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 153, a) +} + +func FreeRADIUSTotalAcctDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctDroppedRequests) { + value, _ = FreeRADIUSTotalAcctDroppedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalAcctDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctDroppedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 153) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAcctDroppedRequests(i)) + } + return +} + +func FreeRADIUSTotalAcctDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctDroppedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 153) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAcctDroppedRequests(i) + return +} + +func FreeRADIUSTotalAcctDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 153, a) +} + +func FreeRADIUSTotalAcctDroppedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 153) +} + +type FreeRADIUSTotalAcctUnknownTypes uint32 + +var FreeRADIUSTotalAcctUnknownTypes_Strings = map[FreeRADIUSTotalAcctUnknownTypes]string{} + +func (a FreeRADIUSTotalAcctUnknownTypes) String() string { + if str, ok := FreeRADIUSTotalAcctUnknownTypes_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalAcctUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalAcctUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalAcctUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 154, a) +} + +func FreeRADIUSTotalAcctUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalAcctUnknownTypes) { + value, _ = FreeRADIUSTotalAcctUnknownTypes_Lookup(p) + return +} + +func FreeRADIUSTotalAcctUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctUnknownTypes, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 154) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalAcctUnknownTypes(i)) + } + return +} + +func FreeRADIUSTotalAcctUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctUnknownTypes, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 154) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalAcctUnknownTypes(i) + return +} + +func FreeRADIUSTotalAcctUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalAcctUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 154, a) +} + +func FreeRADIUSTotalAcctUnknownTypes_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 154) +} + +type FreeRADIUSTotalProxyAccountingRequests uint32 + +var FreeRADIUSTotalProxyAccountingRequests_Strings = map[FreeRADIUSTotalProxyAccountingRequests]string{} + +func (a FreeRADIUSTotalProxyAccountingRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAccountingRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccountingRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccountingRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccountingRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 155, a) +} + +func FreeRADIUSTotalProxyAccountingRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingRequests) { + value, _ = FreeRADIUSTotalProxyAccountingRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccountingRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccountingRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 155) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccountingRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAccountingRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 155) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccountingRequests(i) + return +} + +func FreeRADIUSTotalProxyAccountingRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccountingRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 155, a) +} + +func FreeRADIUSTotalProxyAccountingRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 155) +} + +type FreeRADIUSTotalProxyAccountingResponses uint32 + +var FreeRADIUSTotalProxyAccountingResponses_Strings = map[FreeRADIUSTotalProxyAccountingResponses]string{} + +func (a FreeRADIUSTotalProxyAccountingResponses) String() string { + if str, ok := FreeRADIUSTotalProxyAccountingResponses_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAccountingResponses(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAccountingResponses_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccountingResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 156, a) +} + +func FreeRADIUSTotalProxyAccountingResponses_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingResponses) { + value, _ = FreeRADIUSTotalProxyAccountingResponses_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAccountingResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccountingResponses, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 156) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAccountingResponses(i)) + } + return +} + +func FreeRADIUSTotalProxyAccountingResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingResponses, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 156) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAccountingResponses(i) + return +} + +func FreeRADIUSTotalProxyAccountingResponses_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccountingResponses) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 156, a) +} + +func FreeRADIUSTotalProxyAccountingResponses_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 156) +} + +type FreeRADIUSTotalProxyAcctDuplicateRequests uint32 + +var FreeRADIUSTotalProxyAcctDuplicateRequests_Strings = map[FreeRADIUSTotalProxyAcctDuplicateRequests]string{} + +func (a FreeRADIUSTotalProxyAcctDuplicateRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAcctDuplicateRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAcctDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 157, a) +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDuplicateRequests) { + value, _ = FreeRADIUSTotalProxyAcctDuplicateRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctDuplicateRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 157) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAcctDuplicateRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDuplicateRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 157) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAcctDuplicateRequests(i) + return +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctDuplicateRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 157, a) +} + +func FreeRADIUSTotalProxyAcctDuplicateRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 157) +} + +type FreeRADIUSTotalProxyAcctMalformedRequests uint32 + +var FreeRADIUSTotalProxyAcctMalformedRequests_Strings = map[FreeRADIUSTotalProxyAcctMalformedRequests]string{} + +func (a FreeRADIUSTotalProxyAcctMalformedRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAcctMalformedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAcctMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 158, a) +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctMalformedRequests) { + value, _ = FreeRADIUSTotalProxyAcctMalformedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctMalformedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 158) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAcctMalformedRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctMalformedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 158) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAcctMalformedRequests(i) + return +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctMalformedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 158, a) +} + +func FreeRADIUSTotalProxyAcctMalformedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 158) +} + +type FreeRADIUSTotalProxyAcctInvalidRequests uint32 + +var FreeRADIUSTotalProxyAcctInvalidRequests_Strings = map[FreeRADIUSTotalProxyAcctInvalidRequests]string{} + +func (a FreeRADIUSTotalProxyAcctInvalidRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAcctInvalidRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAcctInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 159, a) +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctInvalidRequests) { + value, _ = FreeRADIUSTotalProxyAcctInvalidRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctInvalidRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 159) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAcctInvalidRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctInvalidRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 159) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAcctInvalidRequests(i) + return +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctInvalidRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 159, a) +} + +func FreeRADIUSTotalProxyAcctInvalidRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 159) +} + +type FreeRADIUSTotalProxyAcctDroppedRequests uint32 + +var FreeRADIUSTotalProxyAcctDroppedRequests_Strings = map[FreeRADIUSTotalProxyAcctDroppedRequests]string{} + +func (a FreeRADIUSTotalProxyAcctDroppedRequests) String() string { + if str, ok := FreeRADIUSTotalProxyAcctDroppedRequests_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAcctDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 160, a) +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDroppedRequests) { + value, _ = FreeRADIUSTotalProxyAcctDroppedRequests_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctDroppedRequests, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 160) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAcctDroppedRequests(i)) + } + return +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDroppedRequests, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 160) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAcctDroppedRequests(i) + return +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctDroppedRequests) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 160, a) +} + +func FreeRADIUSTotalProxyAcctDroppedRequests_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 160) +} + +type FreeRADIUSTotalProxyAcctUnknownTypes uint32 + +var FreeRADIUSTotalProxyAcctUnknownTypes_Strings = map[FreeRADIUSTotalProxyAcctUnknownTypes]string{} + +func (a FreeRADIUSTotalProxyAcctUnknownTypes) String() string { + if str, ok := FreeRADIUSTotalProxyAcctUnknownTypes_Strings[a]; ok { + return str + } + return "FreeRADIUSTotalProxyAcctUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 161, a) +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctUnknownTypes) { + value, _ = FreeRADIUSTotalProxyAcctUnknownTypes_Lookup(p) + return +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctUnknownTypes, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 161) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSTotalProxyAcctUnknownTypes(i)) + } + return +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctUnknownTypes, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 161) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSTotalProxyAcctUnknownTypes(i) + return +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctUnknownTypes) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 161, a) +} + +func FreeRADIUSTotalProxyAcctUnknownTypes_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 161) +} + +type FreeRADIUSQueueLenInternal uint32 + +var FreeRADIUSQueueLenInternal_Strings = map[FreeRADIUSQueueLenInternal]string{} + +func (a FreeRADIUSQueueLenInternal) String() string { + if str, ok := FreeRADIUSQueueLenInternal_Strings[a]; ok { + return str + } + return "FreeRADIUSQueueLenInternal(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueueLenInternal_Add(p *radius.Packet, value FreeRADIUSQueueLenInternal) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 162, a) +} + +func FreeRADIUSQueueLenInternal_Get(p *radius.Packet) (value FreeRADIUSQueueLenInternal) { + value, _ = FreeRADIUSQueueLenInternal_Lookup(p) + return +} + +func FreeRADIUSQueueLenInternal_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenInternal, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 162) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueueLenInternal(i)) + } + return +} + +func FreeRADIUSQueueLenInternal_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenInternal, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 162) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueueLenInternal(i) + return +} + +func FreeRADIUSQueueLenInternal_Set(p *radius.Packet, value FreeRADIUSQueueLenInternal) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 162, a) +} + +func FreeRADIUSQueueLenInternal_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 162) +} + +type FreeRADIUSQueueLenProxy uint32 + +var FreeRADIUSQueueLenProxy_Strings = map[FreeRADIUSQueueLenProxy]string{} + +func (a FreeRADIUSQueueLenProxy) String() string { + if str, ok := FreeRADIUSQueueLenProxy_Strings[a]; ok { + return str + } + return "FreeRADIUSQueueLenProxy(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueueLenProxy_Add(p *radius.Packet, value FreeRADIUSQueueLenProxy) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 163, a) +} + +func FreeRADIUSQueueLenProxy_Get(p *radius.Packet) (value FreeRADIUSQueueLenProxy) { + value, _ = FreeRADIUSQueueLenProxy_Lookup(p) + return +} + +func FreeRADIUSQueueLenProxy_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenProxy, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 163) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueueLenProxy(i)) + } + return +} + +func FreeRADIUSQueueLenProxy_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenProxy, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 163) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueueLenProxy(i) + return +} + +func FreeRADIUSQueueLenProxy_Set(p *radius.Packet, value FreeRADIUSQueueLenProxy) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 163, a) +} + +func FreeRADIUSQueueLenProxy_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 163) +} + +type FreeRADIUSQueueLenAuth uint32 + +var FreeRADIUSQueueLenAuth_Strings = map[FreeRADIUSQueueLenAuth]string{} + +func (a FreeRADIUSQueueLenAuth) String() string { + if str, ok := FreeRADIUSQueueLenAuth_Strings[a]; ok { + return str + } + return "FreeRADIUSQueueLenAuth(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueueLenAuth_Add(p *radius.Packet, value FreeRADIUSQueueLenAuth) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 164, a) +} + +func FreeRADIUSQueueLenAuth_Get(p *radius.Packet) (value FreeRADIUSQueueLenAuth) { + value, _ = FreeRADIUSQueueLenAuth_Lookup(p) + return +} + +func FreeRADIUSQueueLenAuth_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenAuth, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 164) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueueLenAuth(i)) + } + return +} + +func FreeRADIUSQueueLenAuth_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenAuth, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 164) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueueLenAuth(i) + return +} + +func FreeRADIUSQueueLenAuth_Set(p *radius.Packet, value FreeRADIUSQueueLenAuth) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 164, a) +} + +func FreeRADIUSQueueLenAuth_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 164) +} + +type FreeRADIUSQueueLenAcct uint32 + +var FreeRADIUSQueueLenAcct_Strings = map[FreeRADIUSQueueLenAcct]string{} + +func (a FreeRADIUSQueueLenAcct) String() string { + if str, ok := FreeRADIUSQueueLenAcct_Strings[a]; ok { + return str + } + return "FreeRADIUSQueueLenAcct(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueueLenAcct_Add(p *radius.Packet, value FreeRADIUSQueueLenAcct) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 165, a) +} + +func FreeRADIUSQueueLenAcct_Get(p *radius.Packet) (value FreeRADIUSQueueLenAcct) { + value, _ = FreeRADIUSQueueLenAcct_Lookup(p) + return +} + +func FreeRADIUSQueueLenAcct_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenAcct, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 165) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueueLenAcct(i)) + } + return +} + +func FreeRADIUSQueueLenAcct_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenAcct, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 165) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueueLenAcct(i) + return +} + +func FreeRADIUSQueueLenAcct_Set(p *radius.Packet, value FreeRADIUSQueueLenAcct) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 165, a) +} + +func FreeRADIUSQueueLenAcct_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 165) +} + +type FreeRADIUSQueueLenDetail uint32 + +var FreeRADIUSQueueLenDetail_Strings = map[FreeRADIUSQueueLenDetail]string{} + +func (a FreeRADIUSQueueLenDetail) String() string { + if str, ok := FreeRADIUSQueueLenDetail_Strings[a]; ok { + return str + } + return "FreeRADIUSQueueLenDetail(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueueLenDetail_Add(p *radius.Packet, value FreeRADIUSQueueLenDetail) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 166, a) +} + +func FreeRADIUSQueueLenDetail_Get(p *radius.Packet) (value FreeRADIUSQueueLenDetail) { + value, _ = FreeRADIUSQueueLenDetail_Lookup(p) + return +} + +func FreeRADIUSQueueLenDetail_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenDetail, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 166) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueueLenDetail(i)) + } + return +} + +func FreeRADIUSQueueLenDetail_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenDetail, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 166) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueueLenDetail(i) + return +} + +func FreeRADIUSQueueLenDetail_Set(p *radius.Packet, value FreeRADIUSQueueLenDetail) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 166, a) +} + +func FreeRADIUSQueueLenDetail_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 166) +} + +func FreeRADIUSStatsStartTime_Add(p *radius.Packet, value time.Time) (err error) { + var a radius.Attribute + a, err = radius.NewDate(value) + if err != nil { + return + } + return _FreeRADIUS_AddVendor(p, 176, a) +} + +func FreeRADIUSStatsStartTime_Get(p *radius.Packet) (value time.Time) { + value, _ = FreeRADIUSStatsStartTime_Lookup(p) + return +} + +func FreeRADIUSStatsStartTime_Gets(p *radius.Packet) (values []time.Time, err error) { + var i time.Time + for _, attr := range _FreeRADIUS_GetsVendor(p, 176) { + i, err = radius.Date(attr) + if err != nil { + return + } + values = append(values, i) + } + return +} + +func FreeRADIUSStatsStartTime_Lookup(p *radius.Packet) (value time.Time, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 176) + if !ok { + err = radius.ErrNoAttribute + return + } + value, err = radius.Date(a) + return +} + +func FreeRADIUSStatsStartTime_Set(p *radius.Packet, value time.Time) (err error) { + var a radius.Attribute + a, err = radius.NewDate(value) + if err != nil { + return + } + return _FreeRADIUS_SetVendor(p, 176, a) +} + +func FreeRADIUSStatsStartTime_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 176) +} + +func FreeRADIUSStatsHUPTime_Add(p *radius.Packet, value time.Time) (err error) { + var a radius.Attribute + a, err = radius.NewDate(value) + if err != nil { + return + } + return _FreeRADIUS_AddVendor(p, 177, a) +} + +func FreeRADIUSStatsHUPTime_Get(p *radius.Packet) (value time.Time) { + value, _ = FreeRADIUSStatsHUPTime_Lookup(p) + return +} + +func FreeRADIUSStatsHUPTime_Gets(p *radius.Packet) (values []time.Time, err error) { + var i time.Time + for _, attr := range _FreeRADIUS_GetsVendor(p, 177) { + i, err = radius.Date(attr) + if err != nil { + return + } + values = append(values, i) + } + return +} + +func FreeRADIUSStatsHUPTime_Lookup(p *radius.Packet) (value time.Time, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 177) + if !ok { + err = radius.ErrNoAttribute + return + } + value, err = radius.Date(a) + return +} + +func FreeRADIUSStatsHUPTime_Set(p *radius.Packet, value time.Time) (err error) { + var a radius.Attribute + a, err = radius.NewDate(value) + if err != nil { + return + } + return _FreeRADIUS_SetVendor(p, 177, a) +} + +func FreeRADIUSStatsHUPTime_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 177) +} + +type FreeRADIUSQueuePPSIn uint32 + +var FreeRADIUSQueuePPSIn_Strings = map[FreeRADIUSQueuePPSIn]string{} + +func (a FreeRADIUSQueuePPSIn) String() string { + if str, ok := FreeRADIUSQueuePPSIn_Strings[a]; ok { + return str + } + return "FreeRADIUSQueuePPSIn(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueuePPSIn_Add(p *radius.Packet, value FreeRADIUSQueuePPSIn) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 181, a) +} + +func FreeRADIUSQueuePPSIn_Get(p *radius.Packet) (value FreeRADIUSQueuePPSIn) { + value, _ = FreeRADIUSQueuePPSIn_Lookup(p) + return +} + +func FreeRADIUSQueuePPSIn_Gets(p *radius.Packet) (values []FreeRADIUSQueuePPSIn, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 181) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueuePPSIn(i)) + } + return +} + +func FreeRADIUSQueuePPSIn_Lookup(p *radius.Packet) (value FreeRADIUSQueuePPSIn, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 181) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueuePPSIn(i) + return +} + +func FreeRADIUSQueuePPSIn_Set(p *radius.Packet, value FreeRADIUSQueuePPSIn) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 181, a) +} + +func FreeRADIUSQueuePPSIn_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 181) +} + +type FreeRADIUSQueuePPSOut uint32 + +var FreeRADIUSQueuePPSOut_Strings = map[FreeRADIUSQueuePPSOut]string{} + +func (a FreeRADIUSQueuePPSOut) String() string { + if str, ok := FreeRADIUSQueuePPSOut_Strings[a]; ok { + return str + } + return "FreeRADIUSQueuePPSOut(" + strconv.FormatUint(uint64(a), 10) + ")" +} + +func FreeRADIUSQueuePPSOut_Add(p *radius.Packet, value FreeRADIUSQueuePPSOut) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_AddVendor(p, 182, a) +} + +func FreeRADIUSQueuePPSOut_Get(p *radius.Packet) (value FreeRADIUSQueuePPSOut) { + value, _ = FreeRADIUSQueuePPSOut_Lookup(p) + return +} + +func FreeRADIUSQueuePPSOut_Gets(p *radius.Packet) (values []FreeRADIUSQueuePPSOut, err error) { + var i uint32 + for _, attr := range _FreeRADIUS_GetsVendor(p, 182) { + i, err = radius.Integer(attr) + if err != nil { + return + } + values = append(values, FreeRADIUSQueuePPSOut(i)) + } + return +} + +func FreeRADIUSQueuePPSOut_Lookup(p *radius.Packet) (value FreeRADIUSQueuePPSOut, err error) { + a, ok := _FreeRADIUS_LookupVendor(p, 182) + if !ok { + err = radius.ErrNoAttribute + return + } + var i uint32 + i, err = radius.Integer(a) + if err != nil { + return + } + value = FreeRADIUSQueuePPSOut(i) + return +} + +func FreeRADIUSQueuePPSOut_Set(p *radius.Packet, value FreeRADIUSQueuePPSOut) (err error) { + a := radius.NewInteger(uint32(value)) + return _FreeRADIUS_SetVendor(p, 182, a) +} + +func FreeRADIUSQueuePPSOut_Del(p *radius.Packet) { + _FreeRADIUS_DelVendor(p, 182) +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/charts.go b/src/go/collectors/go.d.plugin/modules/freeradius/charts.go new file mode 100644 index 00000000000000..6f84d5cf3b13a7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/charts.go @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "authentication", + Title: "Authentication", + Units: "packets/s", + Fam: "authentication", + Ctx: "freeradius.authentication", + Dims: Dims{ + {ID: "access-requests", Name: "requests", Algo: module.Incremental}, + {ID: "auth-responses", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "authentication_responses", + Title: "Authentication Responses", + Units: "packets/s", + Fam: "authentication", + Ctx: "freeradius.authentication_access_responses", + Dims: Dims{ + {ID: "access-accepts", Name: "accepts", Algo: module.Incremental}, + {ID: "access-rejects", Name: "rejects", Algo: module.Incremental}, + {ID: "access-challenges", Name: "challenges", Algo: module.Incremental}, + }, + }, + { + ID: "bad_authentication_requests", + Title: "Bad Authentication Requests", + Units: "packets/s", + Fam: "authentication", + Ctx: "freeradius.bad_authentication", + Dims: Dims{ + {ID: "auth-dropped-requests", Name: "dropped", Algo: module.Incremental}, + {ID: "auth-duplicate-requests", Name: "duplicate", Algo: module.Incremental}, + {ID: "auth-invalid-requests", Name: "invalid", Algo: module.Incremental}, + {ID: "auth-malformed-requests", Name: "malformed", Algo: module.Incremental}, + {ID: "auth-unknown-types", Name: "unknown-types", Algo: module.Incremental}, + }, + }, + { + ID: "proxy_authentication", + Title: "Authentication", + Units: "packets/s", + Fam: "proxy authentication", + Ctx: "freeradius.proxy_authentication", + Dims: Dims{ + {ID: "proxy-access-requests", Name: "requests", Algo: module.Incremental}, + {ID: "proxy-auth-responses", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "proxy_authentication_responses", + Title: "Authentication Responses", + Units: "packets/s", + Fam: "proxy authentication", + Ctx: "freeradius.proxy_authentication_access_responses", + Dims: Dims{ + {ID: "proxy-access-accepts", Name: "accepts", Algo: module.Incremental}, + {ID: "proxy-access-rejects", Name: "rejects", Algo: module.Incremental}, + {ID: "proxy-access-challenges", Name: "challenges", Algo: module.Incremental}, + }, + }, + { + ID: "bad_proxy_authentication_requests", + Title: "Bad Authentication Requests", + Units: "packets/s", + Fam: "proxy authentication", + Ctx: "freeradius.proxy_bad_authentication", + Dims: Dims{ + {ID: "proxy-auth-dropped-requests", Name: "dropped", Algo: module.Incremental}, + {ID: "proxy-auth-duplicate-requests", Name: "duplicate", Algo: module.Incremental}, + {ID: "proxy-auth-invalid-requests", Name: "invalid", Algo: module.Incremental}, + {ID: "proxy-auth-malformed-requests", Name: "malformed", Algo: module.Incremental}, + {ID: "proxy-auth-unknown-types", Name: "unknown-types", Algo: module.Incremental}, + }, + }, + { + ID: "accounting", + Title: "Accounting", + Units: "packets/s", + Fam: "accounting", + Ctx: "freeradius.accounting", + Dims: Dims{ + {ID: "accounting-requests", Name: "requests", Algo: module.Incremental}, + {ID: "accounting-responses", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "bad_accounting_requests", + Title: "Bad Accounting Requests", + Units: "packets/s", + Fam: "accounting", + Ctx: "freeradius.bad_accounting", + Dims: Dims{ + {ID: "acct-dropped-requests", Name: "dropped", Algo: module.Incremental}, + {ID: "acct-duplicate-requests", Name: "duplicate", Algo: module.Incremental}, + {ID: "acct-invalid-requests", Name: "invalid", Algo: module.Incremental}, + {ID: "acct-malformed-requests", Name: "malformed", Algo: module.Incremental}, + {ID: "acct-unknown-types", Name: "unknown-types", Algo: module.Incremental}, + }, + }, + { + ID: "proxy_accounting", + Title: "Accounting", + Units: "packets/s", + Fam: "proxy accounting", + Ctx: "freeradius.proxy_accounting", + Dims: Dims{ + {ID: "proxy-accounting-requests", Name: "requests", Algo: module.Incremental}, + {ID: "proxy-accounting-responses", Name: "responses", Algo: module.Incremental}, + }, + }, + { + ID: "bad_proxy_accounting_requests", + Title: "Bad Accounting Requests", + Units: "packets/s", + Fam: "proxy accounting", + Ctx: "freeradius.proxy_bad_accounting", + Dims: Dims{ + {ID: "proxy-acct-dropped-requests", Name: "dropped", Algo: module.Incremental}, + {ID: "proxy-acct-duplicate-requests", Name: "duplicate", Algo: module.Incremental}, + {ID: "proxy-acct-invalid-requests", Name: "invalid", Algo: module.Incremental}, + {ID: "proxy-acct-malformed-requests", Name: "malformed", Algo: module.Incremental}, + {ID: "proxy-acct-unknown-types", Name: "unknown-types", Algo: module.Incremental}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/collect.go b/src/go/collectors/go.d.plugin/modules/freeradius/collect.go new file mode 100644 index 00000000000000..44cc44769dd0a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/collect.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import ( + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (f *FreeRADIUS) collect() (map[string]int64, error) { + status, err := f.client.Status() + if err != nil { + return nil, err + } + + return stm.ToMap(status), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/config_schema.json b/src/go/collectors/go.d.plugin/modules/freeradius/config_schema.json new file mode 100644 index 00000000000000..b8bd25fa962105 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/config_schema.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/freeradius job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "secret": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "address", + "port", + "secret" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/freeradius.go b/src/go/collectors/go.d.plugin/modules/freeradius/freeradius.go new file mode 100644 index 00000000000000..5897917cf7be23 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/freeradius.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/go.d.plugin/modules/freeradius/api" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("freeradius", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *FreeRADIUS { + cfg := Config{ + Address: "127.0.0.1", + Port: 18121, + Secret: "adminsecret", + Timeout: web.Duration{Duration: time.Second}, + } + return &FreeRADIUS{ + Config: cfg, + } +} + +type ( + client interface { + Status() (*api.Status, error) + } + Config struct { + Address string + Port int + Secret string + Timeout web.Duration + } + FreeRADIUS struct { + module.Base + Config `yaml:",inline"` + client + } +) + +func (f FreeRADIUS) validateConfig() error { + if f.Address == "" { + return errors.New("address not set") + } + if f.Port == 0 { + return errors.New("port not set") + } + if f.Secret == "" { + return errors.New("secret not set") + } + return nil +} + +func (f *FreeRADIUS) initClient() { + f.client = api.New(api.Config{ + Address: f.Address, + Port: f.Port, + Secret: f.Secret, + Timeout: f.Timeout.Duration, + }) +} + +func (f *FreeRADIUS) Init() bool { + err := f.validateConfig() + if err != nil { + f.Errorf("error on validating config: %v", err) + return false + } + + f.initClient() + return true +} + +func (f FreeRADIUS) Check() bool { + return len(f.Collect()) > 0 +} + +func (FreeRADIUS) Charts() *Charts { + return charts.Copy() +} + +func (f *FreeRADIUS) Collect() map[string]int64 { + mx, err := f.collect() + if err != nil { + f.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (FreeRADIUS) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/freeradius_test.go b/src/go/collectors/go.d.plugin/modules/freeradius/freeradius_test.go new file mode 100644 index 00000000000000..b9432ec96a4f45 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/freeradius_test.go @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import ( + "errors" + "testing" + + "github.com/netdata/go.d.plugin/modules/freeradius/api" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestFreeRADIUS_Init(t *testing.T) { + freeRADIUS := New() + + assert.True(t, freeRADIUS.Init()) +} + +func TestFreeRADIUS_Init_ReturnsFalseIfAddressNotSet(t *testing.T) { + freeRADIUS := New() + freeRADIUS.Address = "" + + assert.False(t, freeRADIUS.Init()) +} + +func TestFreeRADIUS_Init_ReturnsFalseIfPortNotSet(t *testing.T) { + freeRADIUS := New() + freeRADIUS.Port = 0 + + assert.False(t, freeRADIUS.Init()) +} + +func TestFreeRADIUS_Init_ReturnsFalseIfSecretNotSet(t *testing.T) { + freeRADIUS := New() + freeRADIUS.Secret = "" + + assert.False(t, freeRADIUS.Init()) +} + +func TestFreeRADIUS_Check(t *testing.T) { + freeRADIUS := New() + freeRADIUS.client = newOKMockClient() + + assert.True(t, freeRADIUS.Check()) +} + +func TestFreeRADIUS_Check_ReturnsFalseIfClientStatusReturnsError(t *testing.T) { + freeRADIUS := New() + freeRADIUS.client = newErrorMockClient() + + assert.False(t, freeRADIUS.Check()) +} + +func TestFreeRADIUS_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestFreeRADIUS_Collect(t *testing.T) { + freeRADIUS := New() + freeRADIUS.client = newOKMockClient() + + expected := map[string]int64{ + "access-requests": 1, + "access-accepts": 2, + "access-rejects": 3, + "access-challenges": 4, + "auth-responses": 5, + "auth-duplicate-requests": 6, + "auth-malformed-requests": 7, + "auth-invalid-requests": 8, + "auth-dropped-requests": 9, + "auth-unknown-types": 10, + "accounting-requests": 11, + "accounting-responses": 12, + "acct-duplicate-requests": 13, + "acct-malformed-requests": 14, + "acct-invalid-requests": 15, + "acct-dropped-requests": 16, + "acct-unknown-types": 17, + "proxy-access-requests": 18, + "proxy-access-accepts": 19, + "proxy-access-rejects": 20, + "proxy-access-challenges": 21, + "proxy-auth-responses": 22, + "proxy-auth-duplicate-requests": 23, + "proxy-auth-malformed-requests": 24, + "proxy-auth-invalid-requests": 25, + "proxy-auth-dropped-requests": 26, + "proxy-auth-unknown-types": 27, + "proxy-accounting-requests": 28, + "proxy-accounting-responses": 29, + "proxy-acct-duplicate-requests": 30, + "proxy-acct-malformed-requests": 31, + "proxy-acct-invalid-requests": 32, + "proxy-acct-dropped-requests": 33, + "proxy-acct-unknown-types": 34, + } + collected := freeRADIUS.Collect() + + assert.Equal(t, expected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, freeRADIUS, collected) +} + +func TestFreeRADIUS_Collect_ReturnsNilIfClientStatusReturnsError(t *testing.T) { + freeRADIUS := New() + freeRADIUS.client = newErrorMockClient() + + assert.Nil(t, freeRADIUS.Collect()) +} + +func TestFreeRADIUS_Cleanup(t *testing.T) { + New().Cleanup() +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, f *FreeRADIUS, collected map[string]int64) { + for _, chart := range *f.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func newOKMockClient() *mockClient { + return &mockClient{} +} + +func newErrorMockClient() *mockClient { + return &mockClient{errOnStatus: true} +} + +type mockClient struct { + errOnStatus bool +} + +func (m mockClient) Status() (*api.Status, error) { + if m.errOnStatus { + return nil, errors.New("mock Status error") + } + + status := &api.Status{ + AccessRequests: 1, + AccessAccepts: 2, + AccessRejects: 3, + AccessChallenges: 4, + AuthResponses: 5, + AuthDuplicateRequests: 6, + AuthMalformedRequests: 7, + AuthInvalidRequests: 8, + AuthDroppedRequests: 9, + AuthUnknownTypes: 10, + AccountingRequests: 11, + AccountingResponses: 12, + AcctDuplicateRequests: 13, + AcctMalformedRequests: 14, + AcctInvalidRequests: 15, + AcctDroppedRequests: 16, + AcctUnknownTypes: 17, + ProxyAccessRequests: 18, + ProxyAccessAccepts: 19, + ProxyAccessRejects: 20, + ProxyAccessChallenges: 21, + ProxyAuthResponses: 22, + ProxyAuthDuplicateRequests: 23, + ProxyAuthMalformedRequests: 24, + ProxyAuthInvalidRequests: 25, + ProxyAuthDroppedRequests: 26, + ProxyAuthUnknownTypes: 27, + ProxyAccountingRequests: 28, + ProxyAccountingResponses: 29, + ProxyAcctDuplicateRequests: 30, + ProxyAcctMalformedRequests: 31, + ProxyAcctInvalidRequests: 32, + ProxyAcctDroppedRequests: 33, + ProxyAcctUnknownTypes: 34, + } + return status, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/integrations/freeradius.md b/src/go/collectors/go.d.plugin/modules/freeradius/integrations/freeradius.md new file mode 100644 index 00000000000000..014a0577ce0e17 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/integrations/freeradius.md @@ -0,0 +1,199 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/freeradius/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/freeradius/metadata.yaml" +sidebar_label: "FreeRADIUS" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# FreeRADIUS + + +<img src="https://netdata.cloud/img/freeradius.svg" width="150"/> + + +Plugin: go.d.plugin +Module: freeradius + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors FreeRADIUS servers. + +It collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It automatically detects FreeRadius instances running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per FreeRADIUS instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| freeradius.authentication | requests, responses | packets/s | +| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s | +| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s | +| freeradius.proxy_authentication | requests, responses | packets/s | +| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s | +| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s | +| freeradius.accounting | requests, responses | packets/s | +| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s | +| freeradius.proxy_accounting | requests, responses | packets/s | +| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable status server + +To enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/freeradius.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/freeradius.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address. | 127.0.0.1 | yes | +| port | Server port. | 18121 | no | +| secret | FreeRADIUS secret. | adminsecret | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1 + port: 18121 + secert: adminsecret + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1 + port: 18121 + secert: adminsecret + + - name: remote + address: 192.0.2.1 + port: 18121 + secert: adminsecret + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m freeradius + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/freeradius/metadata.yaml b/src/go/collectors/go.d.plugin/modules/freeradius/metadata.yaml new file mode 100644 index 00000000000000..5ecdcf41726e7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/freeradius/metadata.yaml @@ -0,0 +1,206 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-freeradius + plugin_name: go.d.plugin + module_name: freeradius + monitored_instance: + name: FreeRADIUS + link: https://freeradius.org/ + categories: + - data-collection.authentication-and-authorization + icon_filename: freeradius.svg + keywords: + - freeradius + - radius + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: | + This collector monitors FreeRADIUS servers. + + It collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server. + method_description: "" + default_behavior: + auto_detection: + description: | + It automatically detects FreeRadius instances running on localhost. + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Enable status server + description: | + To enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status). + configuration: + file: + name: go.d/freeradius.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address. + default_value: 127.0.0.1 + required: true + - name: port + description: Server port. + default_value: 18121 + required: false + - name: secret + description: FreeRADIUS secret. + default_value: adminsecret + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1 + port: 18121 + secert: adminsecret + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1 + port: 18121 + secert: adminsecret + + - name: remote + address: 192.0.2.1 + port: 18121 + secert: adminsecret + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: freeradius.authentication + description: Authentication + unit: packets/s + chart_type: line + dimensions: + - name: requests + - name: responses + - name: freeradius.authentication_access_responses + description: Authentication Responses + unit: packets/s + chart_type: line + dimensions: + - name: accepts + - name: rejects + - name: challenges + - name: freeradius.bad_authentication + description: Bad Authentication Requests + unit: packets/s + chart_type: line + dimensions: + - name: dropped + - name: duplicate + - name: invalid + - name: malformed + - name: unknown-types + - name: freeradius.proxy_authentication + description: Authentication + unit: packets/s + chart_type: line + dimensions: + - name: requests + - name: responses + - name: freeradius.proxy_authentication_access_responses + description: Authentication Responses + unit: packets/s + chart_type: line + dimensions: + - name: accepts + - name: rejects + - name: challenges + - name: freeradius.proxy_bad_authentication + description: Bad Authentication Requests + unit: packets/s + chart_type: line + dimensions: + - name: dropped + - name: duplicate + - name: invalid + - name: malformed + - name: unknown-types + - name: freeradius.accounting + description: Accounting + unit: packets/s + chart_type: line + dimensions: + - name: requests + - name: responses + - name: freeradius.bad_accounting + description: Bad Accounting Requests + unit: packets/s + chart_type: line + dimensions: + - name: dropped + - name: duplicate + - name: invalid + - name: malformed + - name: unknown-types + - name: freeradius.proxy_accounting + description: Accounting + unit: packets/s + chart_type: line + dimensions: + - name: requests + - name: responses + - name: freeradius.proxy_bad_accounting + description: Bad Accounting Requests + unit: packets/s + chart_type: line + dimensions: + - name: dropped + - name: duplicate + - name: invalid + - name: malformed + - name: unknown-types diff --git a/src/go/collectors/go.d.plugin/modules/geth/README.md b/src/go/collectors/go.d.plugin/modules/geth/README.md new file mode 120000 index 00000000000000..3a8eb0b689defe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/README.md @@ -0,0 +1 @@ +integrations/go-ethereum.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/geth/charts.go b/src/go/collectors/go.d.plugin/modules/geth/charts.go new file mode 100644 index 00000000000000..d0058e19dafbe4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/charts.go @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package geth + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Dim = module.Dim +) + +var charts = Charts{ + chartAncientChainData.Copy(), + chartChaindataDisk.Copy(), + chartAncientChainDataRate.Copy(), + chartChaindataDiskRate.Copy(), + chartChainDataSize.Copy(), + chartChainHead.Copy(), + chartP2PNetwork.Copy(), + chartNumberOfPeers.Copy(), + chartp2pDialsServes.Copy(), + chartReorgs.Copy(), + chartReorgsBlocks.Copy(), + chartGoRoutines.Copy(), + chartTxPoolCurrent.Copy(), + chartTxPoolQueued.Copy(), + chartTxPoolPending.Copy(), + chartRpcInformation.Copy(), +} + +var ( + chartAncientChainDataRate = Chart{ + ID: "chaindata_ancient_rate", + Title: "Ancient Chaindata rate", + Units: "bytes/s", + Fam: "chaindata", + Ctx: "geth.eth_db_chaindata_ancient_io_rate", + Dims: Dims{ + {ID: ethDbChainDataAncientRead, Name: "reads", Algo: "incremental"}, + {ID: ethDbChainDataAncientWrite, Name: "writes", Mul: -1, Algo: "incremental"}, + }, + } + + chartAncientChainData = Chart{ + ID: "chaindata_ancient", + Title: "Session ancient Chaindata", + Units: "bytes", + Fam: "chaindata", + Ctx: "geth.eth_db_chaindata_ancient_io", + Dims: Dims{ + {ID: ethDbChainDataAncientRead, Name: "reads"}, + {ID: ethDbChainDataAncientWrite, Name: "writes", Mul: -1}, + }, + } + chartChaindataDisk = Chart{ + ID: "chaindata_disk", + Title: "Session chaindata on disk", + Units: "bytes", + Fam: "chaindata", + Ctx: "geth.eth_db_chaindata_disk_io", + Dims: Dims{ + {ID: ethDbChaindataDiskRead, Name: "reads"}, + {ID: ethDbChainDataDiskWrite, Name: "writes", Mul: -1}, + }, + } + chartGoRoutines = Chart{ + ID: "goroutines", + Title: "Number of goroutines", + Units: "goroutines", + Fam: "goroutines", + Ctx: "geth.goroutines", + Dims: Dims{ + {ID: goRoutines, Name: "goroutines"}, + }, + } + chartChaindataDiskRate = Chart{ + ID: "chaindata_disk_date", + Title: "On disk Chaindata rate", + Units: "bytes/s", + Fam: "chaindata", + Ctx: "geth.eth_db_chaindata_disk_io_rate", + Dims: Dims{ + {ID: ethDbChaindataDiskRead, Name: "reads", Algo: "incremental"}, + {ID: ethDbChainDataDiskWrite, Name: "writes", Mul: -1, Algo: "incremental"}, + }, + } + chartChainDataSize = Chart{ + ID: "chaindata_db_size", + Title: "Chaindata Size", + Units: "bytes", + Fam: "chaindata", + Ctx: "geth.chaindata_db_size", + Dims: Dims{ + {ID: ethDbChainDataDiskSize, Name: "levelDB"}, + {ID: ethDbChainDataAncientSize, Name: "ancientDB"}, + }, + } + chartChainHead = Chart{ + ID: "chainhead_overall", + Title: "Chainhead", + Units: "block", + Fam: "chainhead", + Ctx: "geth.chainhead", + Dims: Dims{ + {ID: chainHeadBlock, Name: "block"}, + {ID: chainHeadReceipt, Name: "receipt"}, + {ID: chainHeadHeader, Name: "header"}, + }, + } + chartTxPoolPending = Chart{ + ID: "txpoolpending", + Title: "Pending Transaction Pool", + Units: "transactions", + Fam: "tx_pool", + Ctx: "geth.tx_pool_pending", + Dims: Dims{ + {ID: txPoolInvalid, Name: "invalid"}, + {ID: txPoolPending, Name: "pending"}, + {ID: txPoolLocal, Name: "local"}, + {ID: txPoolPendingDiscard, Name: " discard"}, + {ID: txPoolNofunds, Name: "no funds"}, + {ID: txPoolPendingRatelimit, Name: "ratelimit"}, + {ID: txPoolPendingReplace, Name: "replace"}, + }, + } + chartTxPoolCurrent = Chart{ + ID: "txpoolcurrent", + Title: "Transaction Pool", + Units: "transactions", + Fam: "tx_pool", + Ctx: "geth.tx_pool_current", + Dims: Dims{ + {ID: txPoolInvalid, Name: "invalid"}, + {ID: txPoolPending, Name: "pending"}, + {ID: txPoolLocal, Name: "local"}, + {ID: txPoolNofunds, Name: "pool"}, + }, + } + chartTxPoolQueued = Chart{ + ID: "txpoolqueued", + Title: "Queued Transaction Pool", + Units: "transactions", + Fam: "tx_pool", + Ctx: "geth.tx_pool_queued", + Dims: Dims{ + {ID: txPoolQueuedDiscard, Name: "discard"}, + {ID: txPoolQueuedEviction, Name: "eviction"}, + {ID: txPoolQueuedNofunds, Name: "no_funds"}, + {ID: txPoolQueuedRatelimit, Name: "ratelimit"}, + }, + } + chartP2PNetwork = Chart{ + ID: "p2p_network", + Title: "P2P bandwidth", + Units: "bytes/s", + Fam: "p2p_bandwidth", + Ctx: "geth.p2p_bandwidth", + Dims: Dims{ + {ID: p2pIngress, Name: "ingress", Algo: "incremental"}, + {ID: p2pEgress, Name: "egress", Mul: -1, Algo: "incremental"}, + }, + } + chartReorgs = Chart{ + ID: "reorgs_executed", + Title: "Executed Reorgs", + Units: "reorgs", + Fam: "reorgs", + Ctx: "geth.reorgs", + Dims: Dims{ + {ID: reorgsExecuted, Name: "executed"}, + }, + } + chartReorgsBlocks = Chart{ + ID: "reorgs_blocks", + Title: "Blocks Added/Removed from Reorg", + Units: "blocks", + Fam: "reorgs", + Ctx: "geth.reorgs_blocks", + Dims: Dims{ + {ID: reorgsAdd, Name: "added"}, + {ID: reorgsDropped, Name: "dropped"}, + }, + } + + chartNumberOfPeers = Chart{ + ID: "p2p_peers_number", + Title: "Number of Peers", + Units: "peers", + Fam: "p2p_peers", + Ctx: "geth.p2p_peers", + Dims: Dims{ + {ID: p2pPeers, Name: "peers"}, + }, + } + + chartp2pDialsServes = Chart{ + ID: "p2p_dials_serves", + Title: "P2P Serves and Dials", + Units: "calls/s", + Fam: "p2p_peers", + Ctx: "geth.p2p_peers_calls", + Dims: Dims{ + {ID: p2pDials, Name: "dials", Algo: "incremental"}, + {ID: p2pServes, Name: "serves", Algo: "incremental"}, + }, + } + chartRpcInformation = Chart{ + ID: "rpc_calls", + Title: "rpc calls", + Units: "calls/s", + Fam: "rpc", + Ctx: "geth.rpc_calls", + Dims: Dims{ + {ID: rpcFailure, Name: "failed", Algo: "incremental"}, + {ID: rpcSuccess, Name: "successful", Algo: "incremental"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/geth/collect.go b/src/go/collectors/go.d.plugin/modules/geth/collect.go new file mode 100644 index 00000000000000..446155bd596771 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/collect.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package geth + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (g *Geth) collect() (map[string]int64, error) { + pms, err := g.prom.ScrapeSeries() + if err != nil { + return nil, err + } + mx := g.collectGeth(pms) + + return stm.ToMap(mx), nil +} + +func (g *Geth) collectGeth(pms prometheus.Series) map[string]float64 { + mx := make(map[string]float64) + g.collectChainData(mx, pms) + g.collectP2P(mx, pms) + g.collectTxPool(mx, pms) + g.collectRpc(mx, pms) + return mx +} + +func (g *Geth) collectChainData(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + chainValidation, + chainWrite, + ethDbChainDataAncientRead, + ethDbChainDataAncientWrite, + ethDbChaindataDiskRead, + ethDbChainDataDiskWrite, + chainHeadBlock, + chainHeadHeader, + chainHeadReceipt, + ethDbChainDataAncientSize, + ethDbChainDataDiskSize, + reorgsAdd, + reorgsDropped, + reorgsExecuted, + goRoutines, + ) + g.collectEth(mx, pms) + +} + +func (g *Geth) collectRpc(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + rpcRequests, + rpcSuccess, + rpcFailure, + ) + g.collectEth(mx, pms) +} + +func (g *Geth) collectTxPool(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + txPoolInvalid, + txPoolPending, + txPoolLocal, + txPoolPendingDiscard, + txPoolNofunds, + txPoolPendingRatelimit, + txPoolPendingReplace, + txPoolQueuedDiscard, + txPoolQueuedEviction, + txPoolQueuedEviction, + txPoolQueuedRatelimit, + ) + g.collectEth(mx, pms) +} + +func (g *Geth) collectP2P(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + p2pDials, + p2pEgress, + p2pIngress, + p2pPeers, + p2pServes, + ) + g.collectEth(mx, pms) +} + +func (g *Geth) collectEth(mx map[string]float64, pms prometheus.Series) { + for _, pm := range pms { + mx[pm.Name()] += pm.Value + } +} diff --git a/src/go/collectors/go.d.plugin/modules/geth/config_schema.json b/src/go/collectors/go.d.plugin/modules/geth/config_schema.json new file mode 100644 index 00000000000000..78d3e0abb463d7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/geth job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/geth/geth.go b/src/go/collectors/go.d.plugin/modules/geth/geth.go new file mode 100644 index 00000000000000..fe6b2bd9608997 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/geth.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package geth + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("geth", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Geth { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:6060/debug/metrics/prometheus", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + + return &Geth{ + Config: config, + charts: charts.Copy(), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + } + + Geth struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *Charts + } +) + +func (g Geth) validateConfig() error { + if g.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (g *Geth) initClient() error { + client, err := web.NewHTTPClient(g.Client) + if err != nil { + return err + } + + g.prom = prometheus.New(client, g.Request) + return nil +} + +func (g *Geth) Init() bool { + if err := g.validateConfig(); err != nil { + g.Errorf("error on validating config: %g", err) + return false + } + if err := g.initClient(); err != nil { + g.Errorf("error on initializing client: %g", err) + return false + } + return true +} + +func (g *Geth) Check() bool { + return len(g.Collect()) > 0 +} + +func (g *Geth) Charts() *Charts { + return g.charts +} + +func (g *Geth) Collect() map[string]int64 { + mx, err := g.collect() + if err != nil { + g.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (Geth) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/geth/integrations/go-ethereum.md b/src/go/collectors/go.d.plugin/modules/geth/integrations/go-ethereum.md new file mode 100644 index 00000000000000..d0245834cf6e57 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/integrations/go-ethereum.md @@ -0,0 +1,217 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/geth/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/geth/metadata.yaml" +sidebar_label: "Go-ethereum" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Go-ethereum + + +<img src="https://netdata.cloud/img/geth.png" width="150"/> + + +Plugin: go.d.plugin +Module: geth + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Go-ethereum instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Go-ethereum instances running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Go-ethereum instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s | +| geth.eth_db_chaindata_ancient_io | reads, writes | bytes | +| geth.eth_db_chaindata_disk_io | reads, writes | bytes | +| geth.goroutines | goroutines | goroutines | +| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s | +| geth.chaindata_db_size | level_db, ancient_db | bytes | +| geth.chainhead | block, receipt, header | block | +| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions | +| geth.tx_pool_current | invalid, pending, local, pool | transactions | +| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions | +| geth.p2p_bandwidth | ingress, egress | bytes/s | +| geth.reorgs | executed | reorgs | +| geth.reorgs_blocks | added, dropped | blocks | +| geth.p2p_peers | peers | peers | +| geth.p2p_peers_calls | dials, serves | calls/s | +| geth.rpc_calls | failed, successful | calls/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/geth.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/geth.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + username: username + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + + - name: remote + url: http://192.0.2.1:6060/debug/metrics/prometheus + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m geth + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/geth/metadata.yaml b/src/go/collectors/go.d.plugin/modules/geth/metadata.yaml new file mode 100644 index 00000000000000..ef131776a70880 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/metadata.yaml @@ -0,0 +1,291 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-geth + plugin_name: go.d.plugin + module_name: geth + monitored_instance: + name: Go-ethereum + link: https://github.com/ethereum/go-ethereum + icon_filename: geth.png + categories: + - data-collection.blockchain-servers + keywords: + - geth + - ethereum + - blockchain + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Go-ethereum instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Go-ethereum instances running on localhost. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/geth.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:6060/debug/metrics/prometheus + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + username: username + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:6060/debug/metrics/prometheus + + - name: remote + url: http://192.0.2.1:6060/debug/metrics/prometheus + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: geth.eth_db_chaindata_ancient_io_rate + description: Ancient Chaindata rate + unit: bytes/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: geth.eth_db_chaindata_ancient_io + description: Session ancient Chaindata + unit: bytes + chart_type: line + dimensions: + - name: reads + - name: writes + - name: geth.eth_db_chaindata_disk_io + description: Session chaindata on disk + unit: bytes + chart_type: line + dimensions: + - name: reads + - name: writes + - name: geth.goroutines + description: Number of goroutines + unit: goroutines + chart_type: line + dimensions: + - name: goroutines + - name: geth.eth_db_chaindata_disk_io_rate + description: On disk Chaindata rate + unit: bytes/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: geth.chaindata_db_size + description: Chaindata Size + unit: bytes + chart_type: line + dimensions: + - name: level_db + - name: ancient_db + - name: geth.chainhead + description: Chainhead + unit: block + chart_type: line + dimensions: + - name: block + - name: receipt + - name: header + - name: geth.tx_pool_pending + description: Pending Transaction Pool + unit: transactions + chart_type: line + dimensions: + - name: invalid + - name: pending + - name: local + - name: discard + - name: no_funds + - name: ratelimit + - name: replace + - name: geth.tx_pool_current + description: Transaction Pool + unit: transactions + chart_type: line + dimensions: + - name: invalid + - name: pending + - name: local + - name: pool + - name: geth.tx_pool_queued + description: Queued Transaction Pool + unit: transactions + chart_type: line + dimensions: + - name: discard + - name: eviction + - name: no_funds + - name: ratelimit + - name: geth.p2p_bandwidth + description: P2P bandwidth + unit: bytes/s + chart_type: line + dimensions: + - name: ingress + - name: egress + - name: geth.reorgs + description: Executed Reorgs + unit: reorgs + chart_type: line + dimensions: + - name: executed + - name: geth.reorgs_blocks + description: Blocks Added/Removed from Reorg + unit: blocks + chart_type: line + dimensions: + - name: added + - name: dropped + - name: geth.p2p_peers + description: Number of Peers + unit: peers + chart_type: line + dimensions: + - name: peers + - name: geth.p2p_peers_calls + description: P2P Serves and Dials + unit: calls/s + chart_type: line + dimensions: + - name: dials + - name: serves + - name: geth.rpc_calls + description: rpc calls + unit: calls/s + chart_type: line + dimensions: + - name: failed + - name: successful diff --git a/src/go/collectors/go.d.plugin/modules/geth/metrics.go b/src/go/collectors/go.d.plugin/modules/geth/metrics.go new file mode 100644 index 00000000000000..642973d694ccb1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/metrics.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package geth + +// summary +const ( + chainValidation = "chain_validation" + chainWrite = "chain_write" + chainHeadBlock = "chain_head_block" + chainHeadHeader = "chain_head_header" + chainHeadReceipt = "chain_head_receipt" +) + +// + rate +const ( + ethDbChainDataAncientRead = "eth_db_chaindata_ancient_read" + ethDbChainDataAncientWrite = "eth_db_chaindata_ancient_write" + ethDbChaindataDiskRead = "eth_db_chaindata_disk_read" + ethDbChainDataDiskWrite = "eth_db_chaindata_disk_write" + ethDbChainDataDiskSize = "eth_db_chaindata_disk_size" + ethDbChainDataAncientSize = "eth_db_chaindata_ancient_size" + + txPoolInvalid = "txpool_invalid" + txPoolPending = "txpool_pending" + txPoolLocal = "txpool_local" + txPoolPendingDiscard = "txpool_pending_discard" + txPoolNofunds = "txpool_pending_nofunds" + txPoolPendingRatelimit = "txpool_pending_ratelimit" + txPoolPendingReplace = "txpool_pending_replace" + txPoolQueuedDiscard = "txpool_queued_discard" + txPoolQueuedEviction = "txpool_queued_eviction" + txPoolQueuedNofunds = "txpool_queued_nofunds" + txPoolQueuedRatelimit = "txpool_queued_ratelimit" +) + +const ( + // gauge + p2pEgress = "p2p_egress" + p2pIngress = "p2p_ingress" + + p2pPeers = "p2p_peers" + p2pServes = "p2p_serves" + p2pDials = "p2p_dials" + + rpcRequests = "rpc_requests" + rpcSuccess = "rpc_success" + rpcFailure = "rpc_failure" + + reorgsAdd = "chain_reorg_add" + reorgsExecuted = "chain_reorg_executes" + reorgsDropped = "chain_reorg_drop" + + goRoutines = "system_cpu_goroutines" +) diff --git a/src/go/collectors/go.d.plugin/modules/geth/testdata/metrics_geth.txt b/src/go/collectors/go.d.plugin/modules/geth/testdata/metrics_geth.txt new file mode 100644 index 00000000000000..055fea8933d4f4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/geth/testdata/metrics_geth.txt @@ -0,0 +1,1569 @@ +# TYPE chain_account_commits_count counter +chain_account_commits_count 0 + +# TYPE chain_account_commits summary IMP +chain_account_commits {quantile="0.5"} 0 +chain_account_commits {quantile="0.75"} 0 +chain_account_commits {quantile="0.95"} 0 +chain_account_commits {quantile="0.99"} 0 +chain_account_commits {quantile="0.999"} 0 +chain_account_commits {quantile="0.9999"} 0 + +# TYPE chain_account_hashes_count counter +chain_account_hashes_count 0 + +# TYPE chain_account_hashes summary +chain_account_hashes {quantile="0.5"} 0 +chain_account_hashes {quantile="0.75"} 0 +chain_account_hashes {quantile="0.95"} 0 +chain_account_hashes {quantile="0.99"} 0 +chain_account_hashes {quantile="0.999"} 0 +chain_account_hashes {quantile="0.9999"} 0 + +# TYPE chain_account_reads_count counter +chain_account_reads_count 0 + +# TYPE chain_account_reads summary IMP +chain_account_reads {quantile="0.5"} 0 +chain_account_reads {quantile="0.75"} 0 +chain_account_reads {quantile="0.95"} 0 +chain_account_reads {quantile="0.99"} 0 +chain_account_reads {quantile="0.999"} 0 +chain_account_reads {quantile="0.9999"} 0 + +# TYPE chain_account_updates_count counter +chain_account_updates_count 0 + +# TYPE chain_account_updates summary IMP +chain_account_updates {quantile="0.5"} 0 +chain_account_updates {quantile="0.75"} 0 +chain_account_updates {quantile="0.95"} 0 +chain_account_updates {quantile="0.99"} 0 +chain_account_updates {quantile="0.999"} 0 +chain_account_updates {quantile="0.9999"} 0 + +# TYPE chain_execution_count counter +chain_execution_count 0 + +# TYPE chain_execution summary IMP +chain_execution {quantile="0.5"} 0 +chain_execution {quantile="0.75"} 0 +chain_execution {quantile="0.95"} 0 +chain_execution {quantile="0.99"} 0 +chain_execution {quantile="0.999"} 0 +chain_execution {quantile="0.9999"} 0 +#--- +# TYPE chain_head_block gauge IMP +chain_head_block 0 + +# TYPE chain_head_header gauge IMP +chain_head_header 24576 + +# TYPE chain_head_receipt gauge IMP +chain_head_receipt 24576 +#--- +# TYPE chain_inserts_count counter +chain_inserts_count 0 + +# TYPE chain_inserts summary +chain_inserts {quantile="0.5"} 0 +chain_inserts {quantile="0.75"} 0 +chain_inserts {quantile="0.95"} 0 +chain_inserts {quantile="0.99"} 0 +chain_inserts {quantile="0.999"} 0 +chain_inserts {quantile="0.9999"} 0 + +# TYPE chain_prefetch_executes_count counter +chain_prefetch_executes_count 0 + +# TYPE chain_prefetch_executes summary +chain_prefetch_executes {quantile="0.5"} 0 +chain_prefetch_executes {quantile="0.75"} 0 +chain_prefetch_executes {quantile="0.95"} 0 +chain_prefetch_executes {quantile="0.99"} 0 +chain_prefetch_executes {quantile="0.999"} 0 +chain_prefetch_executes {quantile="0.9999"} 0 + +# TYPE chain_prefetch_interrupts gauge +chain_prefetch_interrupts 0 + +# TYPE chain_reorg_add gauge +chain_reorg_add 0 + +# TYPE chain_reorg_drop gauge +chain_reorg_drop 0 + +# TYPE chain_reorg_executes gauge +chain_reorg_executes 0 + +# TYPE chain_reorg_invalidTx gauge +chain_reorg_invalidTx 0 + +# TYPE chain_snapshot_account_reads_count counter +chain_snapshot_account_reads_count 0 + +# TYPE chain_snapshot_account_reads summary +chain_snapshot_account_reads {quantile="0.5"} 0 +chain_snapshot_account_reads {quantile="0.75"} 0 +chain_snapshot_account_reads {quantile="0.95"} 0 +chain_snapshot_account_reads {quantile="0.99"} 0 +chain_snapshot_account_reads {quantile="0.999"} 0 +chain_snapshot_account_reads {quantile="0.9999"} 0 + +# TYPE chain_snapshot_commits_count counter +chain_snapshot_commits_count 0 + +# TYPE chain_snapshot_commits summary +chain_snapshot_commits {quantile="0.5"} 0 +chain_snapshot_commits {quantile="0.75"} 0 +chain_snapshot_commits {quantile="0.95"} 0 +chain_snapshot_commits {quantile="0.99"} 0 +chain_snapshot_commits {quantile="0.999"} 0 +chain_snapshot_commits {quantile="0.9999"} 0 + +# TYPE chain_snapshot_storage_reads_count counter +chain_snapshot_storage_reads_count 0 + +# TYPE chain_snapshot_storage_reads summary IMP +chain_snapshot_storage_reads {quantile="0.5"} 0 +chain_snapshot_storage_reads {quantile="0.75"} 0 +chain_snapshot_storage_reads {quantile="0.95"} 0 +chain_snapshot_storage_reads {quantile="0.99"} 0 +chain_snapshot_storage_reads {quantile="0.999"} 0 +chain_snapshot_storage_reads {quantile="0.9999"} 0 + +# TYPE chain_storage_commits_count counter +chain_storage_commits_count 0 + +# TYPE chain_storage_commits summary IMP +chain_storage_commits {quantile="0.5"} 0 +chain_storage_commits {quantile="0.75"} 0 +chain_storage_commits {quantile="0.95"} 0 +chain_storage_commits {quantile="0.99"} 0 +chain_storage_commits {quantile="0.999"} 0 +chain_storage_commits {quantile="0.9999"} 0 + +# TYPE chain_storage_hashes_count counter +chain_storage_hashes_count 0 + +# TYPE chain_storage_hashes summary IMP +chain_storage_hashes {quantile="0.5"} 0 +chain_storage_hashes {quantile="0.75"} 0 +chain_storage_hashes {quantile="0.95"} 0 +chain_storage_hashes {quantile="0.99"} 0 +chain_storage_hashes {quantile="0.999"} 0 +chain_storage_hashes {quantile="0.9999"} 0 + +# TYPE chain_storage_reads_count counter +chain_storage_reads_count 0 + +# TYPE chain_storage_reads summary +chain_storage_reads {quantile="0.5"} 0 +chain_storage_reads {quantile="0.75"} 0 +chain_storage_reads {quantile="0.95"} 0 +chain_storage_reads {quantile="0.99"} 0 +chain_storage_reads {quantile="0.999"} 0 +chain_storage_reads {quantile="0.9999"} 0 + +# TYPE chain_storage_updates_count counter +chain_storage_updates_count 0 + +# TYPE chain_storage_updates summary IMP +chain_storage_updates {quantile="0.5"} 0 +chain_storage_updates {quantile="0.75"} 0 +chain_storage_updates {quantile="0.95"} 0 +chain_storage_updates {quantile="0.99"} 0 +chain_storage_updates {quantile="0.999"} 0 +chain_storage_updates {quantile="0.9999"} 0 + +# TYPE chain_validation_count counter +chain_validation_count 0 + +# TYPE chain_validation summary IMP +chain_validation {quantile="0.5"} 0 +chain_validation {quantile="0.75"} 0 +chain_validation {quantile="0.95"} 0 +chain_validation {quantile="0.99"} 0 +chain_validation {quantile="0.999"} 0 +chain_validation {quantile="0.9999"} 0 + +# TYPE chain_write_count counter +chain_write_count 0 + +# TYPE chain_write summary IMP +chain_write {quantile="0.5"} 0 +chain_write {quantile="0.75"} 0 +chain_write {quantile="0.95"} 0 +chain_write {quantile="0.99"} 0 +chain_write {quantile="0.999"} 0 +chain_write {quantile="0.9999"} 0 + +# TYPE db_preimage_hits gauge +db_preimage_hits 8893 + +# TYPE db_preimage_total gauge +db_preimage_total 8893 + +# TYPE eth_db_chaindata_ancient_read gauge IMP + rate +eth_db_chaindata_ancient_read 954 + +# TYPE eth_db_chaindata_ancient_size gauge +eth_db_chaindata_ancient_size 9901428 + +# TYPE eth_db_chaindata_ancient_write gauge IMP + rate +eth_db_chaindata_ancient_write 7256150 + +# TYPE eth_db_chaindata_compact_input gauge +eth_db_chaindata_compact_input 0 + +# TYPE eth_db_chaindata_compact_level0 gauge +eth_db_chaindata_compact_level0 0 + +# TYPE eth_db_chaindata_compact_memory gauge +eth_db_chaindata_compact_memory 0 + +# TYPE eth_db_chaindata_compact_nonlevel0 gauge +eth_db_chaindata_compact_nonlevel0 0 + +# TYPE eth_db_chaindata_compact_output gauge +eth_db_chaindata_compact_output 0 + +# TYPE eth_db_chaindata_compact_seek gauge +eth_db_chaindata_compact_seek 0 + +# TYPE eth_db_chaindata_compact_time gauge +eth_db_chaindata_compact_time 0 + +# TYPE eth_db_chaindata_compact_writedelay_counter gauge +eth_db_chaindata_compact_writedelay_counter 0 + +# TYPE eth_db_chaindata_compact_writedelay_duration gauge +eth_db_chaindata_compact_writedelay_duration 0 + +# TYPE eth_db_chaindata_disk_read gauge IMP + rate +eth_db_chaindata_disk_read 0 + +# TYPE eth_db_chaindata_disk_size gauge +eth_db_chaindata_disk_size 0 + +# TYPE eth_db_chaindata_disk_write gauge IMP + rate +eth_db_chaindata_disk_write 10028946 + +# TYPE eth_downloader_bodies_drop gauge +eth_downloader_bodies_drop 0 + +# TYPE eth_downloader_bodies_in gauge +eth_downloader_bodies_in 2061 + +# TYPE eth_downloader_bodies_req_count counter +eth_downloader_bodies_req_count 100 + +# TYPE eth_downloader_bodies_req summary +eth_downloader_bodies_req {quantile="0.5"} 1.73698035e+07 +eth_downloader_bodies_req {quantile="0.75"} 2.534998e+07 +eth_downloader_bodies_req {quantile="0.95"} 2.806964048999994e+08 +eth_downloader_bodies_req {quantile="0.99"} 7.47070292879998e+08 +eth_downloader_bodies_req {quantile="0.999"} 7.51141436e+08 +eth_downloader_bodies_req {quantile="0.9999"} 7.51141436e+08 + +# TYPE eth_downloader_bodies_timeout gauge +eth_downloader_bodies_timeout 0 + +# TYPE eth_downloader_headers_drop gauge +eth_downloader_headers_drop 0 + +# TYPE eth_downloader_headers_in gauge +eth_downloader_headers_in 20133 + +# TYPE eth_downloader_headers_req_count counter +eth_downloader_headers_req_count 129 + +# TYPE eth_downloader_headers_req summary +eth_downloader_headers_req {quantile="0.5"} 4.0981132e+07 +eth_downloader_headers_req {quantile="0.75"} 4.5769116e+07 +eth_downloader_headers_req {quantile="0.95"} 2.53663427e+08 +eth_downloader_headers_req {quantile="0.99"} 6.901528164999979e+08 +eth_downloader_headers_req {quantile="0.999"} 7.45691875e+08 +eth_downloader_headers_req {quantile="0.9999"} 7.45691875e+08 + +# TYPE eth_downloader_headers_timeout gauge +eth_downloader_headers_timeout 0 + +# TYPE eth_downloader_receipts_drop gauge +eth_downloader_receipts_drop 0 + +# TYPE eth_downloader_receipts_in gauge +eth_downloader_receipts_in 0 + +# TYPE eth_downloader_receipts_req_count counter +eth_downloader_receipts_req_count 0 + +# TYPE eth_downloader_receipts_req summary +eth_downloader_receipts_req {quantile="0.5"} 0 +eth_downloader_receipts_req {quantile="0.75"} 0 +eth_downloader_receipts_req {quantile="0.95"} 0 +eth_downloader_receipts_req {quantile="0.99"} 0 +eth_downloader_receipts_req {quantile="0.999"} 0 +eth_downloader_receipts_req {quantile="0.9999"} 0 + +# TYPE eth_downloader_receipts_timeout gauge +eth_downloader_receipts_timeout 0 + +# TYPE eth_downloader_states_drop gauge +eth_downloader_states_drop 0 + +# TYPE eth_downloader_states_in gauge +eth_downloader_states_in 0 + +# TYPE eth_downloader_throttle gauge +eth_downloader_throttle 0 + +# TYPE eth_fetcher_block_announces_dos gauge +eth_fetcher_block_announces_dos 0 + +# TYPE eth_fetcher_block_announces_drop gauge +eth_fetcher_block_announces_drop 0 + +# TYPE eth_fetcher_block_announces_in gauge +eth_fetcher_block_announces_in 0 + +# TYPE eth_fetcher_block_announces_out_count counter +eth_fetcher_block_announces_out_count 0 + +# TYPE eth_fetcher_block_announces_out summary +eth_fetcher_block_announces_out {quantile="0.5"} 0 +eth_fetcher_block_announces_out {quantile="0.75"} 0 +eth_fetcher_block_announces_out {quantile="0.95"} 0 +eth_fetcher_block_announces_out {quantile="0.99"} 0 +eth_fetcher_block_announces_out {quantile="0.999"} 0 +eth_fetcher_block_announces_out {quantile="0.9999"} 0 + +# TYPE eth_fetcher_block_bodies gauge +eth_fetcher_block_bodies 0 + +# TYPE eth_fetcher_block_broadcasts_dos gauge +eth_fetcher_block_broadcasts_dos 0 + +# TYPE eth_fetcher_block_broadcasts_drop gauge +eth_fetcher_block_broadcasts_drop 0 + +# TYPE eth_fetcher_block_broadcasts_in gauge +eth_fetcher_block_broadcasts_in 0 + +# TYPE eth_fetcher_block_broadcasts_out_count counter +eth_fetcher_block_broadcasts_out_count 0 + +# TYPE eth_fetcher_block_broadcasts_out summary +eth_fetcher_block_broadcasts_out {quantile="0.5"} 0 +eth_fetcher_block_broadcasts_out {quantile="0.75"} 0 +eth_fetcher_block_broadcasts_out {quantile="0.95"} 0 +eth_fetcher_block_broadcasts_out {quantile="0.99"} 0 +eth_fetcher_block_broadcasts_out {quantile="0.999"} 0 +eth_fetcher_block_broadcasts_out {quantile="0.9999"} 0 + +# TYPE eth_fetcher_block_filter_bodies_in gauge +eth_fetcher_block_filter_bodies_in 2061 + +# TYPE eth_fetcher_block_filter_bodies_out gauge +eth_fetcher_block_filter_bodies_out 2061 + +# TYPE eth_fetcher_block_filter_headers_in gauge +eth_fetcher_block_filter_headers_in 23 + +# TYPE eth_fetcher_block_filter_headers_out gauge +eth_fetcher_block_filter_headers_out 23 + +# TYPE eth_fetcher_block_headers gauge +eth_fetcher_block_headers 0 + +# TYPE eth_fetcher_transaction_announces_dos gauge +eth_fetcher_transaction_announces_dos 0 + +# TYPE eth_fetcher_transaction_announces_in gauge +eth_fetcher_transaction_announces_in 0 + +# TYPE eth_fetcher_transaction_announces_known gauge +eth_fetcher_transaction_announces_known 0 + +# TYPE eth_fetcher_transaction_announces_underpriced gauge +eth_fetcher_transaction_announces_underpriced 0 + +# TYPE eth_fetcher_transaction_broadcasts_in gauge +eth_fetcher_transaction_broadcasts_in 0 + +# TYPE eth_fetcher_transaction_broadcasts_known gauge +eth_fetcher_transaction_broadcasts_known 0 + +# TYPE eth_fetcher_transaction_broadcasts_otherreject gauge +eth_fetcher_transaction_broadcasts_otherreject 0 + +# TYPE eth_fetcher_transaction_broadcasts_underpriced gauge +eth_fetcher_transaction_broadcasts_underpriced 0 + +# TYPE eth_fetcher_transaction_fetching_hashes gauge +eth_fetcher_transaction_fetching_hashes 0 + +# TYPE eth_fetcher_transaction_fetching_peers gauge +eth_fetcher_transaction_fetching_peers 0 + +# TYPE eth_fetcher_transaction_queueing_hashes gauge +eth_fetcher_transaction_queueing_hashes 0 + +# TYPE eth_fetcher_transaction_queueing_peers gauge +eth_fetcher_transaction_queueing_peers 0 + +# TYPE eth_fetcher_transaction_replies_in gauge +eth_fetcher_transaction_replies_in 0 + +# TYPE eth_fetcher_transaction_replies_known gauge +eth_fetcher_transaction_replies_known 0 + +# TYPE eth_fetcher_transaction_replies_otherreject gauge +eth_fetcher_transaction_replies_otherreject 0 + +# TYPE eth_fetcher_transaction_replies_underpriced gauge +eth_fetcher_transaction_replies_underpriced 0 + +# TYPE eth_fetcher_transaction_request_done gauge +eth_fetcher_transaction_request_done 0 + +# TYPE eth_fetcher_transaction_request_fail gauge +eth_fetcher_transaction_request_fail 0 + +# TYPE eth_fetcher_transaction_request_out gauge +eth_fetcher_transaction_request_out 0 + +# TYPE eth_fetcher_transaction_request_timeout gauge +eth_fetcher_transaction_request_timeout 0 + +# TYPE eth_fetcher_transaction_waiting_hashes gauge +eth_fetcher_transaction_waiting_hashes 0 + +# TYPE eth_fetcher_transaction_waiting_peers gauge +eth_fetcher_transaction_waiting_peers 0 + +# TYPE les_client_req_rtt_count counter +les_client_req_rtt_count 0 + +# TYPE les_client_req_rtt summary +les_client_req_rtt {quantile="0.5"} 0 +les_client_req_rtt {quantile="0.75"} 0 +les_client_req_rtt {quantile="0.95"} 0 +les_client_req_rtt {quantile="0.99"} 0 +les_client_req_rtt {quantile="0.999"} 0 +les_client_req_rtt {quantile="0.9999"} 0 + +# TYPE les_client_req_sendDelay_count counter +les_client_req_sendDelay_count 0 + +# TYPE les_client_req_sendDelay summary +les_client_req_sendDelay {quantile="0.5"} 0 +les_client_req_sendDelay {quantile="0.75"} 0 +les_client_req_sendDelay {quantile="0.95"} 0 +les_client_req_sendDelay {quantile="0.99"} 0 +les_client_req_sendDelay {quantile="0.999"} 0 +les_client_req_sendDelay {quantile="0.9999"} 0 + +# TYPE les_client_serverPool_connected gauge +les_client_serverPool_connected 0 + +# TYPE les_client_serverPool_dialed gauge +les_client_serverPool_dialed 0 + +# TYPE les_client_serverPool_selectable gauge +les_client_serverPool_selectable 0 + +# TYPE les_client_serverPool_sessionValue gauge +les_client_serverPool_sessionValue 0 + +# TYPE les_client_serverPool_timeout gauge +les_client_serverPool_timeout 0 + +# TYPE les_client_serverPool_totalValue gauge +les_client_serverPool_totalValue 0 + +# TYPE les_connection_duration_count counter +les_connection_duration_count 0 + +# TYPE les_connection_duration summary +les_connection_duration {quantile="0.5"} 0 +les_connection_duration {quantile="0.75"} 0 +les_connection_duration {quantile="0.95"} 0 +les_connection_duration {quantile="0.99"} 0 +les_connection_duration {quantile="0.999"} 0 +les_connection_duration {quantile="0.9999"} 0 + +# TYPE les_connection_server gauge +les_connection_server 0 + +# TYPE les_misc_in_packets_body gauge +les_misc_in_packets_body 0 + +# TYPE les_misc_in_packets_code gauge +les_misc_in_packets_code 0 + +# TYPE les_misc_in_packets_header gauge +les_misc_in_packets_header 0 + +# TYPE les_misc_in_packets_helperTrie gauge +les_misc_in_packets_helperTrie 0 + +# TYPE les_misc_in_packets_proof gauge +les_misc_in_packets_proof 0 + +# TYPE les_misc_in_packets_receipt gauge +les_misc_in_packets_receipt 0 + +# TYPE les_misc_in_packets_total gauge +les_misc_in_packets_total 0 + +# TYPE les_misc_in_packets_txStatus gauge +les_misc_in_packets_txStatus 0 + +# TYPE les_misc_in_packets_txs gauge +les_misc_in_packets_txs 0 + +# TYPE les_misc_in_traffic_body gauge +les_misc_in_traffic_body 0 + +# TYPE les_misc_in_traffic_code gauge +les_misc_in_traffic_code 0 + +# TYPE les_misc_in_traffic_header gauge +les_misc_in_traffic_header 0 + +# TYPE les_misc_in_traffic_helperTrie gauge +les_misc_in_traffic_helperTrie 0 + +# TYPE les_misc_in_traffic_proof gauge +les_misc_in_traffic_proof 0 + +# TYPE les_misc_in_traffic_receipt gauge +les_misc_in_traffic_receipt 0 + +# TYPE les_misc_in_traffic_total gauge +les_misc_in_traffic_total 0 + +# TYPE les_misc_in_traffic_txStatus gauge +les_misc_in_traffic_txStatus 0 + +# TYPE les_misc_in_traffic_txs gauge +les_misc_in_traffic_txs 0 + +# TYPE les_misc_out_packets_body gauge +les_misc_out_packets_body 0 + +# TYPE les_misc_out_packets_code gauge +les_misc_out_packets_code 0 + +# TYPE les_misc_out_packets_header gauge +les_misc_out_packets_header 0 + +# TYPE les_misc_out_packets_helperTrie gauge +les_misc_out_packets_helperTrie 0 + +# TYPE les_misc_out_packets_proof gauge +les_misc_out_packets_proof 0 + +# TYPE les_misc_out_packets_receipt gauge +les_misc_out_packets_receipt 0 + +# TYPE les_misc_out_packets_total gauge +les_misc_out_packets_total 0 + +# TYPE les_misc_out_packets_txStatus gauge +les_misc_out_packets_txStatus 0 + +# TYPE les_misc_out_packets_txs gauge +les_misc_out_packets_txs 0 + +# TYPE les_misc_out_traffic_body gauge +les_misc_out_traffic_body 0 + +# TYPE les_misc_out_traffic_code gauge +les_misc_out_traffic_code 0 + +# TYPE les_misc_out_traffic_header gauge +les_misc_out_traffic_header 0 + +# TYPE les_misc_out_traffic_helperTrie gauge +les_misc_out_traffic_helperTrie 0 + +# TYPE les_misc_out_traffic_proof gauge +les_misc_out_traffic_proof 0 + +# TYPE les_misc_out_traffic_receipt gauge +les_misc_out_traffic_receipt 0 + +# TYPE les_misc_out_traffic_total gauge +les_misc_out_traffic_total 0 + +# TYPE les_misc_out_traffic_txStatus gauge +les_misc_out_traffic_txStatus 0 + +# TYPE les_misc_out_traffic_txs gauge +les_misc_out_traffic_txs 0 + +# TYPE les_misc_serve_body_count counter +les_misc_serve_body_count 0 + +# TYPE les_misc_serve_body summary +les_misc_serve_body {quantile="0.5"} 0 +les_misc_serve_body {quantile="0.75"} 0 +les_misc_serve_body {quantile="0.95"} 0 +les_misc_serve_body {quantile="0.99"} 0 +les_misc_serve_body {quantile="0.999"} 0 +les_misc_serve_body {quantile="0.9999"} 0 + +# TYPE les_misc_serve_code_count counter +les_misc_serve_code_count 0 + +# TYPE les_misc_serve_code summary +les_misc_serve_code {quantile="0.5"} 0 +les_misc_serve_code {quantile="0.75"} 0 +les_misc_serve_code {quantile="0.95"} 0 +les_misc_serve_code {quantile="0.99"} 0 +les_misc_serve_code {quantile="0.999"} 0 +les_misc_serve_code {quantile="0.9999"} 0 + +# TYPE les_misc_serve_header_count counter +les_misc_serve_header_count 0 + +# TYPE les_misc_serve_header summary +les_misc_serve_header {quantile="0.5"} 0 +les_misc_serve_header {quantile="0.75"} 0 +les_misc_serve_header {quantile="0.95"} 0 +les_misc_serve_header {quantile="0.99"} 0 +les_misc_serve_header {quantile="0.999"} 0 +les_misc_serve_header {quantile="0.9999"} 0 + +# TYPE les_misc_serve_helperTrie_count counter +les_misc_serve_helperTrie_count 0 + +# TYPE les_misc_serve_helperTrie summary +les_misc_serve_helperTrie {quantile="0.5"} 0 +les_misc_serve_helperTrie {quantile="0.75"} 0 +les_misc_serve_helperTrie {quantile="0.95"} 0 +les_misc_serve_helperTrie {quantile="0.99"} 0 +les_misc_serve_helperTrie {quantile="0.999"} 0 +les_misc_serve_helperTrie {quantile="0.9999"} 0 + +# TYPE les_misc_serve_proof_count counter +les_misc_serve_proof_count 0 + +# TYPE les_misc_serve_proof summary +les_misc_serve_proof {quantile="0.5"} 0 +les_misc_serve_proof {quantile="0.75"} 0 +les_misc_serve_proof {quantile="0.95"} 0 +les_misc_serve_proof {quantile="0.99"} 0 +les_misc_serve_proof {quantile="0.999"} 0 +les_misc_serve_proof {quantile="0.9999"} 0 + +# TYPE les_misc_serve_receipt_count counter +les_misc_serve_receipt_count 0 + +# TYPE les_misc_serve_receipt summary +les_misc_serve_receipt {quantile="0.5"} 0 +les_misc_serve_receipt {quantile="0.75"} 0 +les_misc_serve_receipt {quantile="0.95"} 0 +les_misc_serve_receipt {quantile="0.99"} 0 +les_misc_serve_receipt {quantile="0.999"} 0 +les_misc_serve_receipt {quantile="0.9999"} 0 + +# TYPE les_misc_serve_txStatus_count counter +les_misc_serve_txStatus_count 0 + +# TYPE les_misc_serve_txStatus summary +les_misc_serve_txStatus {quantile="0.5"} 0 +les_misc_serve_txStatus {quantile="0.75"} 0 +les_misc_serve_txStatus {quantile="0.95"} 0 +les_misc_serve_txStatus {quantile="0.99"} 0 +les_misc_serve_txStatus {quantile="0.999"} 0 +les_misc_serve_txStatus {quantile="0.9999"} 0 + +# TYPE les_misc_serve_txs_count counter +les_misc_serve_txs_count 0 + +# TYPE les_misc_serve_txs summary +les_misc_serve_txs {quantile="0.5"} 0 +les_misc_serve_txs {quantile="0.75"} 0 +les_misc_serve_txs {quantile="0.95"} 0 +les_misc_serve_txs {quantile="0.99"} 0 +les_misc_serve_txs {quantile="0.999"} 0 +les_misc_serve_txs {quantile="0.9999"} 0 + +# TYPE les_server_blockProcessingTime_count counter +les_server_blockProcessingTime_count 0 + +# TYPE les_server_blockProcessingTime summary IMP +les_server_blockProcessingTime {quantile="0.5"} 0 +les_server_blockProcessingTime {quantile="0.75"} 0 +les_server_blockProcessingTime {quantile="0.95"} 0 +les_server_blockProcessingTime {quantile="0.99"} 0 +les_server_blockProcessingTime {quantile="0.999"} 0 +les_server_blockProcessingTime {quantile="0.9999"} 0 + +# TYPE les_server_clientEvent_error gauge +les_server_clientEvent_error 0 + +# TYPE les_server_clientEvent_freeze gauge +les_server_clientEvent_freeze 0 + +# TYPE les_server_globalFactor gauge +les_server_globalFactor 0 + +# TYPE les_server_recentRequestEstimated gauge +les_server_recentRequestEstimated 0 + +# TYPE les_server_recentRequestServed gauge +les_server_recentRequestServed 0 + +# TYPE les_server_req_avgEstimatedTime gauge +les_server_req_avgEstimatedTime 0 + +# TYPE les_server_req_avgServedTime gauge +les_server_req_avgServedTime 0 + +# TYPE les_server_req_estimatedTime_count counter +les_server_req_estimatedTime_count 0 + +# TYPE les_server_req_estimatedTime summary +les_server_req_estimatedTime {quantile="0.5"} 0 +les_server_req_estimatedTime {quantile="0.75"} 0 +les_server_req_estimatedTime {quantile="0.95"} 0 +les_server_req_estimatedTime {quantile="0.99"} 0 +les_server_req_estimatedTime {quantile="0.999"} 0 +les_server_req_estimatedTime {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_count counter +les_server_req_relative_count 0 + +# TYPE les_server_req_relative summary +les_server_req_relative {quantile="0.5"} 0 +les_server_req_relative {quantile="0.75"} 0 +les_server_req_relative {quantile="0.95"} 0 +les_server_req_relative {quantile="0.99"} 0 +les_server_req_relative {quantile="0.999"} 0 +les_server_req_relative {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_body_count counter +les_server_req_relative_body_count 0 + +# TYPE les_server_req_relative_body summary +les_server_req_relative_body {quantile="0.5"} 0 +les_server_req_relative_body {quantile="0.75"} 0 +les_server_req_relative_body {quantile="0.95"} 0 +les_server_req_relative_body {quantile="0.99"} 0 +les_server_req_relative_body {quantile="0.999"} 0 +les_server_req_relative_body {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_code_count counter +les_server_req_relative_code_count 0 + +# TYPE les_server_req_relative_code summary +les_server_req_relative_code {quantile="0.5"} 0 +les_server_req_relative_code {quantile="0.75"} 0 +les_server_req_relative_code {quantile="0.95"} 0 +les_server_req_relative_code {quantile="0.99"} 0 +les_server_req_relative_code {quantile="0.999"} 0 +les_server_req_relative_code {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_header_count counter +les_server_req_relative_header_count 0 + +# TYPE les_server_req_relative_header summary +les_server_req_relative_header {quantile="0.5"} 0 +les_server_req_relative_header {quantile="0.75"} 0 +les_server_req_relative_header {quantile="0.95"} 0 +les_server_req_relative_header {quantile="0.99"} 0 +les_server_req_relative_header {quantile="0.999"} 0 +les_server_req_relative_header {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_helperTrie_count counter +les_server_req_relative_helperTrie_count 0 + +# TYPE les_server_req_relative_helperTrie summary +les_server_req_relative_helperTrie {quantile="0.5"} 0 +les_server_req_relative_helperTrie {quantile="0.75"} 0 +les_server_req_relative_helperTrie {quantile="0.95"} 0 +les_server_req_relative_helperTrie {quantile="0.99"} 0 +les_server_req_relative_helperTrie {quantile="0.999"} 0 +les_server_req_relative_helperTrie {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_proof_count counter +les_server_req_relative_proof_count 0 + +# TYPE les_server_req_relative_proof summary +les_server_req_relative_proof {quantile="0.5"} 0 +les_server_req_relative_proof {quantile="0.75"} 0 +les_server_req_relative_proof {quantile="0.95"} 0 +les_server_req_relative_proof {quantile="0.99"} 0 +les_server_req_relative_proof {quantile="0.999"} 0 +les_server_req_relative_proof {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_receipt_count counter +les_server_req_relative_receipt_count 0 + +# TYPE les_server_req_relative_receipt summary +les_server_req_relative_receipt {quantile="0.5"} 0 +les_server_req_relative_receipt {quantile="0.75"} 0 +les_server_req_relative_receipt {quantile="0.95"} 0 +les_server_req_relative_receipt {quantile="0.99"} 0 +les_server_req_relative_receipt {quantile="0.999"} 0 +les_server_req_relative_receipt {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_txStatus_count counter +les_server_req_relative_txStatus_count 0 + +# TYPE les_server_req_relative_txStatus summary +les_server_req_relative_txStatus {quantile="0.5"} 0 +les_server_req_relative_txStatus {quantile="0.75"} 0 +les_server_req_relative_txStatus {quantile="0.95"} 0 +les_server_req_relative_txStatus {quantile="0.99"} 0 +les_server_req_relative_txStatus {quantile="0.999"} 0 +les_server_req_relative_txStatus {quantile="0.9999"} 0 + +# TYPE les_server_req_relative_txs_count counter +les_server_req_relative_txs_count 0 + +# TYPE les_server_req_relative_txs summary +les_server_req_relative_txs {quantile="0.5"} 0 +les_server_req_relative_txs {quantile="0.75"} 0 +les_server_req_relative_txs {quantile="0.95"} 0 +les_server_req_relative_txs {quantile="0.99"} 0 +les_server_req_relative_txs {quantile="0.999"} 0 +les_server_req_relative_txs {quantile="0.9999"} 0 + +# TYPE les_server_req_servedTime_count counter +les_server_req_servedTime_count 0 + +# TYPE les_server_req_servedTime summary +les_server_req_servedTime {quantile="0.5"} 0 +les_server_req_servedTime {quantile="0.75"} 0 +les_server_req_servedTime {quantile="0.95"} 0 +les_server_req_servedTime {quantile="0.99"} 0 +les_server_req_servedTime {quantile="0.999"} 0 +les_server_req_servedTime {quantile="0.9999"} 0 + +# TYPE les_server_servingQueue_queued gauge +les_server_servingQueue_queued 0 + +# TYPE les_server_servingQueue_served gauge +les_server_servingQueue_served 0 + +# TYPE les_server_totalCapacity gauge +les_server_totalCapacity 0 + +# TYPE les_server_totalRecharge gauge +les_server_totalRecharge 0 + +# TYPE p2p_dials gauge IMP +p2p_dials 69 + +#IMP all egress + +# TYPE p2p_egress gauge IMP +p2p_egress 134834 + +# TYPE p2p_egress_eth_65_0x00 gauge +p2p_egress_eth_65_0x00 177 + +# TYPE p2p_egress_eth_65_0x00_packets gauge +p2p_egress_eth_65_0x00_packets 3 + +# TYPE p2p_egress_eth_65_0x03 gauge +p2p_egress_eth_65_0x03 1315 + +# TYPE p2p_egress_eth_65_0x03_packets gauge +p2p_egress_eth_65_0x03_packets 132 + +# TYPE p2p_egress_eth_65_0x04 gauge +p2p_egress_eth_65_0x04 3 + +# TYPE p2p_egress_eth_65_0x04_packets gauge +p2p_egress_eth_65_0x04_packets 1 + +# TYPE p2p_egress_eth_65_0x05 gauge +p2p_egress_eth_65_0x05 68658 + +# TYPE p2p_egress_eth_65_0x05_packets gauge +p2p_egress_eth_65_0x05_packets 83 + +# TYPE p2p_egress_eth_66_0x00 gauge +p2p_egress_eth_66_0x00 327 + +# TYPE p2p_egress_eth_66_0x00_packets gauge +p2p_egress_eth_66_0x00_packets 5 + +# TYPE p2p_egress_eth_66_0x03 gauge +p2p_egress_eth_66_0x03 20 + +# TYPE p2p_egress_eth_66_0x03_packets gauge +p2p_egress_eth_66_0x03_packets 1 + +# TYPE p2p_egress_eth_66_0x04 gauge +p2p_egress_eth_66_0x04 0 + +# TYPE p2p_egress_eth_66_0x04_packets gauge +p2p_egress_eth_66_0x04_packets 0 + +# TYPE p2p_egress_eth_66_0x05 gauge +p2p_egress_eth_66_0x05 0 + +# TYPE p2p_egress_eth_66_0x05_packets gauge +p2p_egress_eth_66_0x05_packets 0 + +# TYPE p2p_egress_snap_1_0x00 gauge +p2p_egress_snap_1_0x00 0 + +# TYPE p2p_egress_snap_1_0x00_packets gauge +p2p_egress_snap_1_0x00_packets 0 + +# TYPE p2p_handle_eth_65_0x01_count counter +p2p_handle_eth_65_0x01_count 1 + +# TYPE p2p_handle_eth_65_0x01 summary +p2p_handle_eth_65_0x01 {quantile="0.5"} 185 +p2p_handle_eth_65_0x01 {quantile="0.75"} 185 +p2p_handle_eth_65_0x01 {quantile="0.95"} 185 +p2p_handle_eth_65_0x01 {quantile="0.99"} 185 +p2p_handle_eth_65_0x01 {quantile="0.999"} 185 +p2p_handle_eth_65_0x01 {quantile="0.9999"} 185 + +# TYPE p2p_handle_eth_65_0x03_count counter +p2p_handle_eth_65_0x03_count 1 + +# TYPE p2p_handle_eth_65_0x03 summary +p2p_handle_eth_65_0x03 {quantile="0.5"} 126 +p2p_handle_eth_65_0x03 {quantile="0.75"} 126 +p2p_handle_eth_65_0x03 {quantile="0.95"} 126 +p2p_handle_eth_65_0x03 {quantile="0.99"} 126 +p2p_handle_eth_65_0x03 {quantile="0.999"} 126 +p2p_handle_eth_65_0x03 {quantile="0.9999"} 126 + +# TYPE p2p_handle_eth_65_0x04_count counter +p2p_handle_eth_65_0x04_count 154 + +# TYPE p2p_handle_eth_65_0x04 summary +p2p_handle_eth_65_0x04 {quantile="0.5"} 855 +p2p_handle_eth_65_0x04 {quantile="0.75"} 1172 +p2p_handle_eth_65_0x04 {quantile="0.95"} 1673.5 +p2p_handle_eth_65_0x04 {quantile="0.99"} 8296.449999999888 +p2p_handle_eth_65_0x04 {quantile="0.999"} 13775 +p2p_handle_eth_65_0x04 {quantile="0.9999"} 13775 + +# TYPE p2p_handle_eth_65_0x06_count counter +p2p_handle_eth_65_0x06_count 99 + +# TYPE p2p_handle_eth_65_0x06 summary +p2p_handle_eth_65_0x06 {quantile="0.5"} 180 +p2p_handle_eth_65_0x06 {quantile="0.75"} 250 +p2p_handle_eth_65_0x06 {quantile="0.95"} 2105 +p2p_handle_eth_65_0x06 {quantile="0.99"} 7910 +p2p_handle_eth_65_0x06 {quantile="0.999"} 7910 +p2p_handle_eth_65_0x06 {quantile="0.9999"} 7910 + +# TYPE p2p_handle_eth_65_0x08_count counter +p2p_handle_eth_65_0x08_count 17 + +# TYPE p2p_handle_eth_65_0x08 summary +p2p_handle_eth_65_0x08 {quantile="0.5"} 5 +p2p_handle_eth_65_0x08 {quantile="0.75"} 7 +p2p_handle_eth_65_0x08 {quantile="0.95"} 87 +p2p_handle_eth_65_0x08 {quantile="0.99"} 87 +p2p_handle_eth_65_0x08 {quantile="0.999"} 87 +p2p_handle_eth_65_0x08 {quantile="0.9999"} 87 + +# TYPE p2p_handle_eth_66_0x03_count counter +p2p_handle_eth_66_0x03_count 1 + +# TYPE p2p_handle_eth_66_0x03 summary +p2p_handle_eth_66_0x03 {quantile="0.5"} 405 +p2p_handle_eth_66_0x03 {quantile="0.75"} 405 +p2p_handle_eth_66_0x03 {quantile="0.95"} 405 +p2p_handle_eth_66_0x03 {quantile="0.99"} 405 +p2p_handle_eth_66_0x03 {quantile="0.999"} 405 +p2p_handle_eth_66_0x03 {quantile="0.9999"} 405 + +# TYPE p2p_handle_eth_66_0x04_count counter +p2p_handle_eth_66_0x04_count 2 + +# TYPE p2p_handle_eth_66_0x04 summary +p2p_handle_eth_66_0x04 {quantile="0.5"} 595.5 +p2p_handle_eth_66_0x04 {quantile="0.75"} 1091 +p2p_handle_eth_66_0x04 {quantile="0.95"} 1091 +p2p_handle_eth_66_0x04 {quantile="0.99"} 1091 +p2p_handle_eth_66_0x04 {quantile="0.999"} 1091 +p2p_handle_eth_66_0x04 {quantile="0.9999"} 1091 + +# TYPE p2p_handle_eth_66_0x06_count counter +p2p_handle_eth_66_0x06_count 1 + +# TYPE p2p_handle_eth_66_0x06 summary +p2p_handle_eth_66_0x06 {quantile="0.5"} 1309 +p2p_handle_eth_66_0x06 {quantile="0.75"} 1309 +p2p_handle_eth_66_0x06 {quantile="0.95"} 1309 +p2p_handle_eth_66_0x06 {quantile="0.99"} 1309 +p2p_handle_eth_66_0x06 {quantile="0.999"} 1309 +p2p_handle_eth_66_0x06 {quantile="0.9999"} 1309 + +# TYPE p2p_handle_eth_66_0x08_count counter +p2p_handle_eth_66_0x08_count 2 + +# TYPE p2p_handle_eth_66_0x08 summary +p2p_handle_eth_66_0x08 {quantile="0.5"} 16 +p2p_handle_eth_66_0x08 {quantile="0.75"} 28 +p2p_handle_eth_66_0x08 {quantile="0.95"} 28 +p2p_handle_eth_66_0x08 {quantile="0.99"} 28 +p2p_handle_eth_66_0x08 {quantile="0.999"} 28 +p2p_handle_eth_66_0x08 {quantile="0.9999"} 28 + +# TYPE p2p_handle_snap_1_0x01_count counter +p2p_handle_snap_1_0x01_count 1 + +# TYPE p2p_handle_snap_1_0x01 summary +p2p_handle_snap_1_0x01 {quantile="0.5"} 375 +p2p_handle_snap_1_0x01 {quantile="0.75"} 375 +p2p_handle_snap_1_0x01 {quantile="0.95"} 375 +p2p_handle_snap_1_0x01 {quantile="0.99"} 375 +p2p_handle_snap_1_0x01 {quantile="0.999"} 375 +p2p_handle_snap_1_0x01 {quantile="0.9999"} 375 + +# TYPE p2p_ingress gauge #IMP +p2p_ingress 3918214 + +# TYPE p2p_ingress_eth_65_0x00 gauge #IMP +p2p_ingress_eth_65_0x00 271 + +# TYPE p2p_ingress_eth_65_0x00_packets gauge IMP +p2p_ingress_eth_65_0x00_packets 3 + +# TYPE p2p_ingress_eth_65_0x01 gauge IMP +p2p_ingress_eth_65_0x01 0 + +# TYPE p2p_ingress_eth_65_0x01_packets gauge IMP +p2p_ingress_eth_65_0x01_packets 0 + +# TYPE p2p_ingress_eth_65_0x03 gauge IMP +p2p_ingress_eth_65_0x03 10 + +# TYPE p2p_ingress_eth_65_0x03_packets gauge IMP +p2p_ingress_eth_65_0x03_packets 1 + +# TYPE p2p_ingress_eth_65_0x04 gauge IMP +p2p_ingress_eth_65_0x04 3362209 + +# TYPE p2p_ingress_eth_65_0x04_packets gauge IMP +p2p_ingress_eth_65_0x04_packets 131 + +# TYPE p2p_ingress_eth_65_0x06 gauge IMP +p2p_ingress_eth_65_0x06 383458 + +# TYPE p2p_ingress_eth_65_0x06_packets gauge IMP +p2p_ingress_eth_65_0x06_packets 83 + +# TYPE p2p_ingress_eth_65_0x08 gauge +p2p_ingress_eth_65_0x08 96828 + +# TYPE p2p_ingress_eth_65_0x08_packets gauge +p2p_ingress_eth_65_0x08_packets 9 + +# TYPE p2p_ingress_eth_66_0x00 gauge +p2p_ingress_eth_66_0x00 436 + +# TYPE p2p_ingress_eth_66_0x00_packets gauge +p2p_ingress_eth_66_0x00_packets 5 + +# TYPE p2p_ingress_eth_66_0x03 gauge +p2p_ingress_eth_66_0x03 0 + +# TYPE p2p_ingress_eth_66_0x03_packets gauge +p2p_ingress_eth_66_0x03_packets 0 + +# TYPE p2p_ingress_eth_66_0x04 gauge +p2p_ingress_eth_66_0x04 0 + +# TYPE p2p_ingress_eth_66_0x04_packets gauge +p2p_ingress_eth_66_0x04_packets 0 + +# TYPE p2p_ingress_eth_66_0x06 gauge +p2p_ingress_eth_66_0x06 0 + +# TYPE p2p_ingress_eth_66_0x06_packets gauge +p2p_ingress_eth_66_0x06_packets 0 + +# TYPE p2p_ingress_eth_66_0x08 gauge +p2p_ingress_eth_66_0x08 0 + +# TYPE p2p_ingress_eth_66_0x08_packets gauge +p2p_ingress_eth_66_0x08_packets 0 + +# TYPE p2p_ingress_snap_1_0x01 gauge +p2p_ingress_snap_1_0x01 0 + +# TYPE p2p_ingress_snap_1_0x01_packets gauge +p2p_ingress_snap_1_0x01_packets 0 + +# TYPE p2p_peers gauge IMP +p2p_peers 8 + +# TYPE p2p_serves gauge IMP +p2p_serves 70 + +# TYPE p2p_tracked_eth_66_0x03 gauge +p2p_tracked_eth_66_0x03 2 + +# TYPE p2p_tracked_eth_66_0x05 gauge +p2p_tracked_eth_66_0x05 0 + +# TYPE p2p_tracked_snap_1_0x00 gauge +p2p_tracked_snap_1_0x00 0 + +# TYPE p2p_wait_eth_66_0x03_count counter +p2p_wait_eth_66_0x03_count 2 + +# TYPE p2p_wait_eth_66_0x03 summary +p2p_wait_eth_66_0x03 {quantile="0.5"} 567440.5 +p2p_wait_eth_66_0x03 {quantile="0.75"} 574606 +p2p_wait_eth_66_0x03 {quantile="0.95"} 574606 +p2p_wait_eth_66_0x03 {quantile="0.99"} 574606 +p2p_wait_eth_66_0x03 {quantile="0.999"} 574606 +p2p_wait_eth_66_0x03 {quantile="0.9999"} 574606 + +# TYPE p2p_wait_eth_66_0x05_count counter +p2p_wait_eth_66_0x05_count 1 + +# TYPE p2p_wait_eth_66_0x05 summary +p2p_wait_eth_66_0x05 {quantile="0.5"} 212272 +p2p_wait_eth_66_0x05 {quantile="0.75"} 212272 +p2p_wait_eth_66_0x05 {quantile="0.95"} 212272 +p2p_wait_eth_66_0x05 {quantile="0.99"} 212272 +p2p_wait_eth_66_0x05 {quantile="0.999"} 212272 +p2p_wait_eth_66_0x05 {quantile="0.9999"} 212272 + +# TYPE p2p_wait_snap_1_0x00_count counter +p2p_wait_snap_1_0x00_count 1 + +# TYPE p2p_wait_snap_1_0x00 summary +p2p_wait_snap_1_0x00 {quantile="0.5"} 574823 +p2p_wait_snap_1_0x00 {quantile="0.75"} 574823 +p2p_wait_snap_1_0x00 {quantile="0.95"} 574823 +p2p_wait_snap_1_0x00 {quantile="0.99"} 574823 +p2p_wait_snap_1_0x00 {quantile="0.999"} 574823 +p2p_wait_snap_1_0x00 {quantile="0.9999"} 574823 + +# TYPE rpc_duration_all_count counter +rpc_duration_all_count 0 + +# TYPE rpc_duration_all summary +rpc_duration_all {quantile="0.5"} 0 +rpc_duration_all {quantile="0.75"} 0 +rpc_duration_all {quantile="0.95"} 0 +rpc_duration_all {quantile="0.99"} 0 +rpc_duration_all {quantile="0.999"} 0 +rpc_duration_all {quantile="0.9999"} 0 + +# TYPE rpc_failure gauge +rpc_failure 0 + +# TYPE rpc_requests gauge +rpc_requests 0 + +# TYPE rpc_success gauge +rpc_success 0 + +# TYPE state_snapshot_bloom_account_falsehit gauge +state_snapshot_bloom_account_falsehit 0 + +# TYPE state_snapshot_bloom_account_miss gauge +state_snapshot_bloom_account_miss 0 + +# TYPE state_snapshot_bloom_account_truehit gauge +state_snapshot_bloom_account_truehit 0 + +# TYPE state_snapshot_bloom_error gauge +state_snapshot_bloom_error 0 + +# TYPE state_snapshot_bloom_storage_falsehit gauge +state_snapshot_bloom_storage_falsehit 0 + +# TYPE state_snapshot_bloom_storage_miss gauge +state_snapshot_bloom_storage_miss 0 + +# TYPE state_snapshot_bloom_storage_truehit gauge +state_snapshot_bloom_storage_truehit 0 + +# TYPE state_snapshot_clean_account_hit gauge +state_snapshot_clean_account_hit 0 + +# TYPE state_snapshot_clean_account_inex gauge +state_snapshot_clean_account_inex 0 + +# TYPE state_snapshot_clean_account_miss gauge +state_snapshot_clean_account_miss 0 + +# TYPE state_snapshot_clean_account_read gauge +state_snapshot_clean_account_read 0 + +# TYPE state_snapshot_clean_account_write gauge +state_snapshot_clean_account_write 0 + +# TYPE state_snapshot_clean_storage_hit gauge +state_snapshot_clean_storage_hit 0 + +# TYPE state_snapshot_clean_storage_inex gauge +state_snapshot_clean_storage_inex 0 + +# TYPE state_snapshot_clean_storage_miss gauge +state_snapshot_clean_storage_miss 0 + +# TYPE state_snapshot_clean_storage_read gauge +state_snapshot_clean_storage_read 0 + +# TYPE state_snapshot_clean_storage_write gauge +state_snapshot_clean_storage_write 0 + +# TYPE state_snapshot_dirty_account_hit gauge +state_snapshot_dirty_account_hit 0 + +# TYPE state_snapshot_dirty_account_hit_depth_count counter +state_snapshot_dirty_account_hit_depth_count 0 + +# TYPE state_snapshot_dirty_account_hit_depth summary +state_snapshot_dirty_account_hit_depth {quantile="0.5"} 0 +state_snapshot_dirty_account_hit_depth {quantile="0.75"} 0 +state_snapshot_dirty_account_hit_depth {quantile="0.95"} 0 +state_snapshot_dirty_account_hit_depth {quantile="0.99"} 0 +state_snapshot_dirty_account_hit_depth {quantile="0.999"} 0 +state_snapshot_dirty_account_hit_depth {quantile="0.9999"} 0 + +# TYPE state_snapshot_dirty_account_inex gauge +state_snapshot_dirty_account_inex 0 + +# TYPE state_snapshot_dirty_account_miss gauge +state_snapshot_dirty_account_miss 0 + +# TYPE state_snapshot_dirty_account_read gauge +state_snapshot_dirty_account_read 0 + +# TYPE state_snapshot_dirty_account_write gauge +state_snapshot_dirty_account_write 0 + +# TYPE state_snapshot_dirty_storage_hit gauge +state_snapshot_dirty_storage_hit 0 + +# TYPE state_snapshot_dirty_storage_hit_depth_count counter +state_snapshot_dirty_storage_hit_depth_count 0 + +# TYPE state_snapshot_dirty_storage_hit_depth summary +state_snapshot_dirty_storage_hit_depth {quantile="0.5"} 0 +state_snapshot_dirty_storage_hit_depth {quantile="0.75"} 0 +state_snapshot_dirty_storage_hit_depth {quantile="0.95"} 0 +state_snapshot_dirty_storage_hit_depth {quantile="0.99"} 0 +state_snapshot_dirty_storage_hit_depth {quantile="0.999"} 0 +state_snapshot_dirty_storage_hit_depth {quantile="0.9999"} 0 + +# TYPE state_snapshot_dirty_storage_inex gauge +state_snapshot_dirty_storage_inex 0 + +# TYPE state_snapshot_dirty_storage_miss gauge +state_snapshot_dirty_storage_miss 0 + +# TYPE state_snapshot_dirty_storage_read gauge +state_snapshot_dirty_storage_read 0 + +# TYPE state_snapshot_dirty_storage_write gauge +state_snapshot_dirty_storage_write 0 + +# TYPE state_snapshot_flush_account_item gauge +state_snapshot_flush_account_item 0 + +# TYPE state_snapshot_flush_account_size gauge +state_snapshot_flush_account_size 0 + +# TYPE state_snapshot_flush_storage_item gauge +state_snapshot_flush_storage_item 0 + +# TYPE state_snapshot_flush_storage_size gauge +state_snapshot_flush_storage_size 0 + +# TYPE state_snapshot_generation_account_generated gauge +state_snapshot_generation_account_generated 8893 + +# TYPE state_snapshot_generation_account_missall gauge +state_snapshot_generation_account_missall 1 + +# TYPE state_snapshot_generation_account_recovered gauge +state_snapshot_generation_account_recovered 0 + +# TYPE state_snapshot_generation_account_wiped gauge +state_snapshot_generation_account_wiped 0 + +# TYPE state_snapshot_generation_duration_account_prove gauge +state_snapshot_generation_duration_account_prove 16221 + +# TYPE state_snapshot_generation_duration_account_snapread gauge +state_snapshot_generation_duration_account_snapread 89448 + +# TYPE state_snapshot_generation_duration_account_trieread gauge +state_snapshot_generation_duration_account_trieread 78590307 + +# TYPE state_snapshot_generation_duration_account_write gauge +state_snapshot_generation_duration_account_write 84327092 + +# TYPE state_snapshot_generation_duration_storage_prove gauge +state_snapshot_generation_duration_storage_prove 0 + +# TYPE state_snapshot_generation_duration_storage_snapread gauge +state_snapshot_generation_duration_storage_snapread 0 + +# TYPE state_snapshot_generation_duration_storage_trieread gauge +state_snapshot_generation_duration_storage_trieread 0 + +# TYPE state_snapshot_generation_duration_storage_write gauge +state_snapshot_generation_duration_storage_write 0 + +# TYPE state_snapshot_generation_proof_failure gauge +state_snapshot_generation_proof_failure 1 + +# TYPE state_snapshot_generation_proof_success gauge +state_snapshot_generation_proof_success 0 + +# TYPE state_snapshot_generation_storage_generated gauge +state_snapshot_generation_storage_generated 0 + +# TYPE state_snapshot_generation_storage_missall gauge +state_snapshot_generation_storage_missall 0 + +# TYPE state_snapshot_generation_storage_recovered gauge +state_snapshot_generation_storage_recovered 0 + +# TYPE state_snapshot_generation_storage_wiped gauge +state_snapshot_generation_storage_wiped 0 + +# TYPE system_cpu_goroutines gauge +system_cpu_goroutines 129 + +# TYPE system_cpu_procload gauge +system_cpu_procload 47 + +# TYPE system_cpu_sysload gauge +system_cpu_sysload 215 + +# TYPE system_cpu_syswait gauge +system_cpu_syswait 25 + +# TYPE system_cpu_threads gauge +system_cpu_threads 13 + +# TYPE system_disk_readbytes gauge +system_disk_readbytes 5017534 + +# TYPE system_disk_readcount gauge +system_disk_readcount 913 + +# TYPE system_disk_readdata gauge +system_disk_readdata 1777439 + +# TYPE system_disk_writebytes gauge +system_disk_writebytes 36555070 + +# TYPE system_disk_writecount gauge +system_disk_writecount 72172 + +# TYPE system_disk_writedata gauge +system_disk_writedata 13225794 + +# TYPE system_memory_allocs gauge +system_memory_allocs 2144962 + +# TYPE system_memory_frees gauge +system_memory_frees 1268637 + +# TYPE system_memory_held gauge +system_memory_held 728506368 + +# TYPE system_memory_pauses gauge +system_memory_pauses 4199764 + +# TYPE system_memory_used gauge +system_memory_used 577212048 + +# TYPE trie_bloom_add gauge +trie_bloom_add 0 + +# TYPE trie_bloom_error gauge +trie_bloom_error 0 + +# TYPE trie_bloom_fault gauge +trie_bloom_fault 2 + +# TYPE trie_bloom_load gauge +trie_bloom_load 0 + +# TYPE trie_bloom_miss gauge +trie_bloom_miss 0 + +# TYPE trie_bloom_test gauge +trie_bloom_test 0 + +# TYPE trie_memcache_clean_hit gauge +trie_memcache_clean_hit 6 + +# TYPE trie_memcache_clean_miss gauge +trie_memcache_clean_miss 12356 + +# TYPE trie_memcache_clean_read gauge +trie_memcache_clean_read 2679 + +# TYPE trie_memcache_clean_write gauge +trie_memcache_clean_write 1483023 + +# TYPE trie_memcache_commit_nodes gauge +trie_memcache_commit_nodes 12356 + +# TYPE trie_memcache_commit_size gauge +trie_memcache_commit_size 1869429 + +# TYPE trie_memcache_dirty_hit gauge +trie_memcache_dirty_hit 0 + +# TYPE trie_memcache_dirty_miss gauge +trie_memcache_dirty_miss 12356 + +# TYPE trie_memcache_dirty_read gauge +trie_memcache_dirty_read 0 + +# TYPE trie_memcache_dirty_write gauge +trie_memcache_dirty_write 1474037 + +# TYPE trie_memcache_flush_nodes gauge +trie_memcache_flush_nodes 0 + +# TYPE trie_memcache_flush_size gauge +trie_memcache_flush_size 0 + +# TYPE trie_memcache_gc_nodes gauge +trie_memcache_gc_nodes 0 + +# TYPE trie_memcache_gc_size gauge +trie_memcache_gc_size 0 + +# TYPE trie_prefetch_miner_account_dup gauge +trie_prefetch_miner_account_dup 0 + +# TYPE trie_prefetch_miner_account_load gauge +trie_prefetch_miner_account_load 0 + +# TYPE trie_prefetch_miner_account_skip gauge +trie_prefetch_miner_account_skip 0 + +# TYPE trie_prefetch_miner_account_waste gauge +trie_prefetch_miner_account_waste 0 + +# TYPE trie_prefetch_miner_deliverymiss gauge +trie_prefetch_miner_deliverymiss 1 + +# TYPE trie_prefetch_miner_storage_dup gauge +trie_prefetch_miner_storage_dup 0 + +# TYPE trie_prefetch_miner_storage_load gauge +trie_prefetch_miner_storage_load 0 + +# TYPE trie_prefetch_miner_storage_skip gauge +trie_prefetch_miner_storage_skip 0 + +# TYPE trie_prefetch_miner_storage_waste gauge +trie_prefetch_miner_storage_waste 0 + +# TYPE txpool_invalid gauge IMP + rate +txpool_invalid 0 + +# TYPE txpool_known gauge +txpool_known 0 + + +# TYPE txpool_overflowed gauge +txpool_overflowed 0 + +#--- +# TYPE txpool_pending gauge IMP +txpool_pending 0 + +# TYPE txpool_local gauge IMP +txpool_local 0 + +# TYPE txpool_queued gauge +txpool_queued 0 +#--- + +# TYPE txpool_pending_discard gauge IMP + rate +txpool_pending_discard 0 + +# TYPE txpool_pending_nofunds gauge IMP + rate +txpool_pending_nofunds 0 + +# TYPE txpool_pending_ratelimit gauge IMP + rate +txpool_pending_ratelimit 0 + +# TYPE txpool_pending_replace gauge IMP + rate +txpool_pending_replace 0 + + + +# TYPE txpool_queued_discard gauge IMP + rate +txpool_queued_discard 0 +ƒga +# TYPE txpool_queued_eviction gauge IMP + rate +txpool_queued_eviction 0 + +# TYPE txpool_queued_nofunds gauge IMP + rate +txpool_queued_nofunds 0 + +# TYPE txpool_queued_ratelimit gauge IMP + rate +txpool_queued_ratelimit 0 + +# TYPE txpool_queued_replace gauge IMP + rate +txpool_queued_replace 0 + +# TYPE txpool_reheap_count counter +txpool_reheap_count 0 + +# TYPE txpool_reheap summary +txpool_reheap {quantile="0.5"} 0 +txpool_reheap {quantile="0.75"} 0 +txpool_reheap {quantile="0.95"} 0 +txpool_reheap {quantile="0.99"} 0 +txpool_reheap {quantile="0.999"} 0 +txpool_reheap {quantile="0.9999"} 0 + +# TYPE txpool_slots gauge +txpool_slots 0 + +# TYPE txpool_underpriced gauge IMP + rate +txpool_underpriced 0 + +# TYPE txpool_valid gauge IMP + rate +txpool_valid 0 + +# TYPE vflux_server_capQueryNonZero gauge +vflux_server_capQueryNonZero 0 + +# TYPE vflux_server_capQueryZero gauge +vflux_server_capQueryZero 0 + +# TYPE vflux_server_clientEvent_activated gauge +vflux_server_clientEvent_activated 0 + +# TYPE vflux_server_clientEvent_connected gauge +vflux_server_clientEvent_connected 0 + +# TYPE vflux_server_clientEvent_deactivated gauge +vflux_server_clientEvent_deactivated 0 + +# TYPE vflux_server_clientEvent_disconnected gauge +vflux_server_clientEvent_disconnected 0 + +# TYPE vflux_server_totalConnected gauge +vflux_server_totalConnected 0 + diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/README.md b/src/go/collectors/go.d.plugin/modules/haproxy/README.md new file mode 120000 index 00000000000000..2f52cf84638b5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/README.md @@ -0,0 +1 @@ +integrations/haproxy.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/charts.go b/src/go/collectors/go.d.plugin/modules/haproxy/charts.go new file mode 100644 index 00000000000000..8c5f6bef64e03e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/charts.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package haproxy + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +var charts = module.Charts{ + chartBackendCurrentSessions.Copy(), + chartBackendSessions.Copy(), + + chartBackendResponseTimeAverage.Copy(), + + chartBackendQueueTimeAverage.Copy(), + chartBackendCurrentQueue.Copy(), +} + +var ( + chartBackendCurrentSessions = module.Chart{ + ID: "backend_current_sessions", + Title: "Current number of active sessions", + Units: "sessions", + Fam: "backend sessions", + Ctx: "haproxy.backend_current_sessions", + } + chartBackendSessions = module.Chart{ + ID: "backend_sessions", + Title: "Sessions rate", + Units: "sessions/s", + Fam: "backend sessions", + Ctx: "haproxy.backend_sessions", + } +) + +var ( + chartBackendResponseTimeAverage = module.Chart{ + ID: "backend_response_time_average", + Title: "Average response time for last 1024 successful connections", + Units: "milliseconds", + Fam: "backend responses", + Ctx: "haproxy.backend_response_time_average", + } + chartTemplateBackendHTTPResponses = module.Chart{ + ID: "backend_http_responses_proxy_%s", + Title: "HTTP responses by code class for <code>%s</code> proxy", + Units: "responses/s", + Fam: "backend responses", + Ctx: "haproxy.backend_http_responses", + Type: module.Stacked, + Dims: module.Dims{ + {ID: "haproxy_backend_http_responses_1xx_proxy_%s", Name: "1xx", Algo: module.Incremental}, + {ID: "haproxy_backend_http_responses_2xx_proxy_%s", Name: "2xx", Algo: module.Incremental}, + {ID: "haproxy_backend_http_responses_3xx_proxy_%s", Name: "3xx", Algo: module.Incremental}, + {ID: "haproxy_backend_http_responses_4xx_proxy_%s", Name: "4xx", Algo: module.Incremental}, + {ID: "haproxy_backend_http_responses_5xx_proxy_%s", Name: "5xx", Algo: module.Incremental}, + {ID: "haproxy_backend_http_responses_other_proxy_%s", Name: "other", Algo: module.Incremental}, + }, + } +) + +var ( + chartBackendQueueTimeAverage = module.Chart{ + ID: "backend_queue_time_average", + Title: "Average queue time for last 1024 successful connections", + Units: "milliseconds", + Fam: "backend queue", + Ctx: "haproxy.backend_queue_time_average", + } + chartBackendCurrentQueue = module.Chart{ + ID: "backend_current_queue", + Title: "Current number of queued requests", + Units: "requests", + Fam: "backend queue", + Ctx: "haproxy.backend_current_queue", + } +) + +var ( + chartTemplateBackendNetworkIO = module.Chart{ + ID: "backend_network_io_proxy_%s", + Title: "Network traffic for <code>%s</code> proxy", + Units: "bytes/s", + Fam: "backend network", + Ctx: "haproxy.backend_network_io", + Type: module.Area, + Dims: module.Dims{ + {ID: "haproxy_backend_bytes_in_proxy_%s", Name: "in", Algo: module.Incremental}, + {ID: "haproxy_backend_bytes_out_proxy_%s", Name: "out", Algo: module.Incremental, Mul: -1}, + }, + } +) + +func newChartBackendHTTPResponses(proxy string) *module.Chart { + return newBackendChartFromTemplate(chartTemplateBackendHTTPResponses, proxy) +} + +func newChartBackendNetworkIO(proxy string) *module.Chart { + return newBackendChartFromTemplate(chartTemplateBackendNetworkIO, proxy) +} + +func newBackendChartFromTemplate(tpl module.Chart, proxy string) *module.Chart { + c := tpl.Copy() + c.ID = fmt.Sprintf(c.ID, proxy) + c.Title = fmt.Sprintf(c.Title, proxy) + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, proxy) + } + return c +} diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/collect.go b/src/go/collectors/go.d.plugin/modules/haproxy/collect.go new file mode 100644 index 00000000000000..10d24a088bd7f3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/collect.go @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package haproxy + +import ( + "errors" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricBackendSessionsTotal = "haproxy_backend_sessions_total" + metricBackendCurrentSessions = "haproxy_backend_current_sessions" + metricBackendHTTPResponsesTotal = "haproxy_backend_http_responses_total" + metricBackendResponseTimeAverageSeconds = "haproxy_backend_response_time_average_seconds" + metricBackendCurrentQueue = "haproxy_backend_current_queue" + metricBackendQueueTimeAverageSeconds = "haproxy_backend_queue_time_average_seconds" + metricBackendBytesInTotal = "haproxy_backend_bytes_in_total" + metricBackendBytesOutTotal = "haproxy_backend_bytes_out_total" +) + +func isHaproxyMetrics(pms prometheus.Series) bool { + for _, pm := range pms { + if strings.HasPrefix(pm.Name(), "haproxy_") { + return true + } + } + return false +} + +func (h *Haproxy) collect() (map[string]int64, error) { + pms, err := h.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if h.validateMetrics && !isHaproxyMetrics(pms) { + return nil, errors.New("unexpected metrics (not HAProxy)") + } + h.validateMetrics = false + + mx := make(map[string]int64) + for _, pm := range pms { + proxy := pm.Labels.Get("proxy") + if proxy == "" { + continue + } + + if !h.proxies[proxy] { + h.proxies[proxy] = true + h.addProxyToCharts(proxy) + } + + mx[dimID(pm)] = int64(pm.Value * multiplier(pm)) + } + + return mx, nil +} + +func (h *Haproxy) addProxyToCharts(proxy string) { + h.addDimToChart(chartBackendCurrentSessions.ID, &module.Dim{ + ID: proxyDimID(metricBackendCurrentSessions, proxy), + Name: proxy, + }) + h.addDimToChart(chartBackendSessions.ID, &module.Dim{ + ID: proxyDimID(metricBackendSessionsTotal, proxy), + Name: proxy, + Algo: module.Incremental, + }) + + h.addDimToChart(chartBackendResponseTimeAverage.ID, &module.Dim{ + ID: proxyDimID(metricBackendResponseTimeAverageSeconds, proxy), + Name: proxy, + }) + if err := h.Charts().Add(newChartBackendHTTPResponses(proxy)); err != nil { + h.Warning(err) + } + + h.addDimToChart(chartBackendCurrentQueue.ID, &module.Dim{ + ID: proxyDimID(metricBackendCurrentQueue, proxy), + Name: proxy, + }) + h.addDimToChart(chartBackendQueueTimeAverage.ID, &module.Dim{ + ID: proxyDimID(metricBackendQueueTimeAverageSeconds, proxy), + Name: proxy, + }) + + if err := h.Charts().Add(newChartBackendNetworkIO(proxy)); err != nil { + h.Warning(err) + } +} + +func (h *Haproxy) addDimToChart(chartID string, dim *module.Dim) { + chart := h.Charts().Get(chartID) + if chart == nil { + h.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID) + return + } + if err := chart.AddDim(dim); err != nil { + h.Warning(err) + return + } + chart.MarkNotCreated() +} + +func multiplier(pm prometheus.SeriesSample) float64 { + switch pm.Name() { + case metricBackendResponseTimeAverageSeconds, + metricBackendQueueTimeAverageSeconds: + // to milliseconds + return 1000 + } + return 1 +} + +func dimID(pm prometheus.SeriesSample) string { + proxy := pm.Labels.Get("proxy") + if proxy == "" { + return "" + } + + name := cleanMetricName(pm.Name()) + if pm.Name() == metricBackendHTTPResponsesTotal { + name += "_" + pm.Labels.Get("code") + } + return proxyDimID(name, proxy) +} + +func proxyDimID(metric, proxy string) string { + return cleanMetricName(metric) + "_proxy_" + proxy +} + +func cleanMetricName(name string) string { + if strings.HasSuffix(name, "_total") { + return name[:len(name)-6] + } + if strings.HasSuffix(name, "_seconds") { + return name[:len(name)-8] + } + return name +} diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/config_schema.json b/src/go/collectors/go.d.plugin/modules/haproxy/config_schema.json new file mode 100644 index 00000000000000..9fa8cd11146daa --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/haproxy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/haproxy.go b/src/go/collectors/go.d.plugin/modules/haproxy/haproxy.go new file mode 100644 index 00000000000000..ffc936711d73f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/haproxy.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package haproxy + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("haproxy", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Haproxy { + return &Haproxy{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8404/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + + charts: charts.Copy(), + proxies: make(map[string]bool), + validateMetrics: true, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Haproxy struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + prom prometheus.Prometheus + validateMetrics bool + proxies map[string]bool +} + +func (h *Haproxy) Init() bool { + if err := h.validateConfig(); err != nil { + h.Errorf("config validation: %v", err) + return false + } + + prom, err := h.initPrometheusClient() + if err != nil { + h.Errorf("prometheus client initialization: %v", err) + return false + } + h.prom = prom + + return true +} + +func (h *Haproxy) Check() bool { + return len(h.Collect()) > 0 +} + +func (h *Haproxy) Charts() *module.Charts { + return h.charts +} + +func (h *Haproxy) Collect() map[string]int64 { + ms, err := h.collect() + if err != nil { + h.Error(err) + return nil + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (Haproxy) Cleanup() { + // TODO: close http idle connections +} diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/haproxy_test.go b/src/go/collectors/go.d.plugin/modules/haproxy/haproxy_test.go new file mode 100644 index 00000000000000..c881c19f37d2d4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/haproxy_test.go @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package haproxy + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v2310Metrics, _ = os.ReadFile("testdata/v2.3.10/metrics.txt") +) + +func Test_Testdata(t *testing.T) { + for name, data := range map[string][]byte{ + "v2310Metrics": v2310Metrics, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Haproxy)(nil), New()) +} + +func TestHaproxy_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'url'": { + wantFail: true, + config: Config{HTTP: web.HTTP{ + Request: web.Request{}, + }}, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rdb := New() + rdb.Config = test.config + + if test.wantFail { + assert.False(t, rdb.Init()) + } else { + assert.True(t, rdb.Init()) + } + }) + } +} + +func TestHaproxy_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestHaproxy_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestHaproxy_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (h *Haproxy, cleanup func()) + }{ + "success on valid response v2.3.1": { + wantFail: false, + prepare: prepareCaseHaproxyV231Metrics, + }, + "fails on response with unexpected metrics (not HAProxy)": { + wantFail: true, + prepare: prepareCaseNotHaproxyMetrics, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareCase404Response, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareCaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + h, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, h.Check()) + } else { + assert.True(t, h.Check()) + } + }) + } +} + +func TestHaproxy_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (h *Haproxy, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v2.3.1": { + prepare: prepareCaseHaproxyV231Metrics, + wantCollected: map[string]int64{ + "haproxy_backend_bytes_in_proxy_proxy1": 21057046294, + "haproxy_backend_bytes_in_proxy_proxy2": 2493759083896, + "haproxy_backend_bytes_out_proxy_proxy1": 41352782609, + "haproxy_backend_bytes_out_proxy_proxy2": 5131407558, + "haproxy_backend_current_queue_proxy_proxy1": 1, + "haproxy_backend_current_queue_proxy_proxy2": 1, + "haproxy_backend_current_sessions_proxy_proxy1": 1, + "haproxy_backend_current_sessions_proxy_proxy2": 1322, + "haproxy_backend_http_responses_1xx_proxy_proxy1": 1, + "haproxy_backend_http_responses_1xx_proxy_proxy2": 4130401, + "haproxy_backend_http_responses_2xx_proxy_proxy1": 21338013, + "haproxy_backend_http_responses_2xx_proxy_proxy2": 1, + "haproxy_backend_http_responses_3xx_proxy_proxy1": 10004, + "haproxy_backend_http_responses_3xx_proxy_proxy2": 1, + "haproxy_backend_http_responses_4xx_proxy_proxy1": 10170758, + "haproxy_backend_http_responses_4xx_proxy_proxy2": 1, + "haproxy_backend_http_responses_5xx_proxy_proxy1": 3075, + "haproxy_backend_http_responses_5xx_proxy_proxy2": 1, + "haproxy_backend_http_responses_other_proxy_proxy1": 5657, + "haproxy_backend_http_responses_other_proxy_proxy2": 1, + "haproxy_backend_queue_time_average_proxy_proxy1": 0, + "haproxy_backend_queue_time_average_proxy_proxy2": 0, + "haproxy_backend_response_time_average_proxy_proxy1": 52, + "haproxy_backend_response_time_average_proxy_proxy2": 1, + "haproxy_backend_sessions_proxy_proxy1": 31527507, + "haproxy_backend_sessions_proxy_proxy2": 4131723, + }, + }, + "fails on response with unexpected metrics (not HAProxy)": { + prepare: prepareCaseNotHaproxyMetrics, + }, + "fails on 404 response": { + prepare: prepareCase404Response, + }, + "fails on connection refused": { + prepare: prepareCaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + h, cleanup := test.prepare(t) + defer cleanup() + + ms := h.Collect() + + assert.Equal(t, test.wantCollected, ms) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, h, ms) + } + }) + } +} + +func prepareCaseHaproxyV231Metrics(t *testing.T) (*Haproxy, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(v2310Metrics) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCaseNotHaproxyMetrics(t *testing.T) (*Haproxy, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` +# HELP haproxy_backend_http_responses_total Total number of HTTP responses. +# TYPE haproxy_backend_http_responses_total counter +application_backend_http_responses_total{proxy="infra-traefik-web",code="1xx"} 0 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="1xx"} 4130401 +application_backend_http_responses_total{proxy="infra-traefik-web",code="2xx"} 21338013 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="2xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="3xx"} 10004 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="3xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="4xx"} 10170758 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="4xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="5xx"} 3075 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="5xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="other"} 5657 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} 0 +`)) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCase404Response(t *testing.T) (*Haproxy, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) { + t.Helper() + h := New() + h.URL = "http://127.0.0.1:38001" + require.True(t, h.Init()) + + return h, func() {} +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, h *Haproxy, ms map[string]int64) { + for _, chart := range *h.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/init.go b/src/go/collectors/go.d.plugin/modules/haproxy/init.go new file mode 100644 index 00000000000000..30e0c456041436 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/init.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package haproxy + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (h Haproxy) validateConfig() error { + if h.URL == "" { + return errors.New("'url' is not set") + } + if _, err := web.NewHTTPRequest(h.Request); err != nil { + return err + } + return nil +} + +func (h Haproxy) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(h.Client) + if err != nil { + return nil, err + } + + prom := prometheus.NewWithSelector(httpClient, h.Request, sr) + return prom, nil +} + +var sr, _ = selector.Expr{ + Allow: []string{ + metricBackendHTTPResponsesTotal, + metricBackendCurrentQueue, + metricBackendQueueTimeAverageSeconds, + metricBackendBytesInTotal, + metricBackendResponseTimeAverageSeconds, + metricBackendSessionsTotal, + metricBackendCurrentSessions, + metricBackendBytesOutTotal, + }, +}.Parse() diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/integrations/haproxy.md b/src/go/collectors/go.d.plugin/modules/haproxy/integrations/haproxy.md new file mode 100644 index 00000000000000..d4cf4146fc88b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/integrations/haproxy.md @@ -0,0 +1,241 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/haproxy/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/haproxy/metadata.yaml" +sidebar_label: "HAProxy" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HAProxy + + +<img src="https://netdata.cloud/img/haproxy.svg" width="150"/> + + +Plugin: go.d.plugin +Module: haproxy + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors HAProxy servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per HAProxy instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| haproxy.backend_current_sessions | a dimension per proxy | sessions | +| haproxy.backend_sessions | a dimension per proxy | sessions/s | +| haproxy.backend_response_time_average | a dimension per proxy | milliseconds | +| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds | +| haproxy.backend_current_queue | a dimension per proxy | requests | + +### Per proxy + +These metrics refer to the Proxy. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s | +| haproxy.backend_network_io | in, out | bytes/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable PROMEX addon. + +To enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/haproxy.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/haproxy.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8404/metrics + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8404/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +NGINX Plus with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8404/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8404/metrics + + - name: remote + url: http://192.0.2.1:8404/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m haproxy + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/metadata.yaml b/src/go/collectors/go.d.plugin/modules/haproxy/metadata.yaml new file mode 100644 index 00000000000000..adc8796020f8ba --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/metadata.yaml @@ -0,0 +1,231 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-haproxy + plugin_name: go.d.plugin + module_name: haproxy + monitored_instance: + name: HAProxy + link: https://www.haproxy.org/ + icon_filename: haproxy.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - haproxy + - web + - webserver + - http + - proxy + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors HAProxy servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable PROMEX addon. + description: | + To enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex). + configuration: + file: + name: go.d/haproxy.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8404/metrics + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8404/metrics + username: username + password: password + - name: HTTPS with self-signed certificate + description: NGINX Plus with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1:8404/metrics + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8404/metrics + + - name: remote + url: http://192.0.2.1:8404/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: haproxy.backend_current_sessions + description: Current number of active sessions + unit: sessions + chart_type: line + dimensions: + - name: a dimension per proxy + - name: haproxy.backend_sessions + description: Sessions rate + unit: sessions/s + chart_type: line + dimensions: + - name: a dimension per proxy + - name: haproxy.backend_response_time_average + description: Average response time for last 1024 successful connections + unit: milliseconds + chart_type: line + dimensions: + - name: a dimension per proxy + - name: haproxy.backend_queue_time_average + description: Average queue time for last 1024 successful connections + unit: milliseconds + chart_type: line + dimensions: + - name: a dimension per proxy + - name: haproxy.backend_current_queue + description: Current number of queued requests + unit: requests + chart_type: line + dimensions: + - name: a dimension per proxy + - name: proxy + description: These metrics refer to the Proxy. + labels: [] + metrics: + - name: haproxy.backend_http_responses + description: HTTP responses by code class + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: other + - name: haproxy.backend_network_io + description: Network traffic + unit: bytes/s + chart_type: area + dimensions: + - name: in + - name: out diff --git a/src/go/collectors/go.d.plugin/modules/haproxy/testdata/v2.3.10/metrics.txt b/src/go/collectors/go.d.plugin/modules/haproxy/testdata/v2.3.10/metrics.txt new file mode 100644 index 00000000000000..a156485d96cfd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/haproxy/testdata/v2.3.10/metrics.txt @@ -0,0 +1,382 @@ +# HELP haproxy_frontend_status Current status of the service (frontend: 0=STOP, 1=UP - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB). +# TYPE haproxy_frontend_status gauge +haproxy_frontend_status{proxy="healthz"} 1 +haproxy_frontend_status{proxy="http"} 1 +haproxy_frontend_status{proxy="https"} 1 +haproxy_frontend_status{proxy="stats"} 1 +# HELP haproxy_frontend_current_sessions Current number of active sessions. +# TYPE haproxy_frontend_current_sessions gauge +haproxy_frontend_current_sessions{proxy="healthz"} 1 +haproxy_frontend_current_sessions{proxy="http"} 1 +haproxy_frontend_current_sessions{proxy="https"} 1348 +haproxy_frontend_current_sessions{proxy="stats"} 2 +# HELP haproxy_frontend_max_sessions Maximum observed number of active sessions. +# TYPE haproxy_frontend_max_sessions gauge +haproxy_frontend_max_sessions{proxy="healthz"} 10 +haproxy_frontend_max_sessions{proxy="http"} 5 +haproxy_frontend_max_sessions{proxy="https"} 1389 +haproxy_frontend_max_sessions{proxy="stats"} 8 +# HELP haproxy_frontend_limit_sessions Configured session limit. +# TYPE haproxy_frontend_limit_sessions gauge +haproxy_frontend_limit_sessions{proxy="healthz"} 524181 +haproxy_frontend_limit_sessions{proxy="http"} 524181 +haproxy_frontend_limit_sessions{proxy="https"} 524181 +haproxy_frontend_limit_sessions{proxy="stats"} 524181 +# HELP haproxy_frontend_sessions_total Total number of sessions. +# TYPE haproxy_frontend_sessions_total counter +haproxy_frontend_sessions_total{proxy="healthz"} 723971 +haproxy_frontend_sessions_total{proxy="http"} 1392 +haproxy_frontend_sessions_total{proxy="https"} 23433914 +haproxy_frontend_sessions_total{proxy="stats"} 4207 +# HELP haproxy_frontend_limit_session_rate Configured limit on new sessions per second. +# TYPE haproxy_frontend_limit_session_rate gauge +haproxy_frontend_limit_session_rate{proxy="healthz"} 1 +haproxy_frontend_limit_session_rate{proxy="http"} 1 +haproxy_frontend_limit_session_rate{proxy="https"} 1 +haproxy_frontend_limit_session_rate{proxy="stats"} 1 +# HELP haproxy_frontend_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_frontend_max_session_rate gauge +haproxy_frontend_max_session_rate{proxy="healthz"} 1 +haproxy_frontend_max_session_rate{proxy="http"} 12 +haproxy_frontend_max_session_rate{proxy="https"} 96 +haproxy_frontend_max_session_rate{proxy="stats"} 2 +# HELP haproxy_frontend_connections_rate_max Maximum observed number of connections per second. +# TYPE haproxy_frontend_connections_rate_max gauge +haproxy_frontend_connections_rate_max{proxy="healthz"} 1 +haproxy_frontend_connections_rate_max{proxy="http"} 12 +haproxy_frontend_connections_rate_max{proxy="https"} 85 +haproxy_frontend_connections_rate_max{proxy="stats"} 2 +# HELP haproxy_frontend_connections_total Total number of connections. +# TYPE haproxy_frontend_connections_total counter +haproxy_frontend_connections_total{proxy="healthz"} 723971 +haproxy_frontend_connections_total{proxy="http"} 1392 +haproxy_frontend_connections_total{proxy="https"} 23476808 +haproxy_frontend_connections_total{proxy="stats"} 4207 +# HELP haproxy_frontend_bytes_in_total Current total of incoming bytes. +# TYPE haproxy_frontend_bytes_in_total counter +haproxy_frontend_bytes_in_total{proxy="healthz"} 79636810 +haproxy_frontend_bytes_in_total{proxy="http"} 73990 +haproxy_frontend_bytes_in_total{proxy="https"} 2514816135823 +haproxy_frontend_bytes_in_total{proxy="stats"} 14694474 +# HELP haproxy_frontend_bytes_out_total Current total of outgoing bytes. +# TYPE haproxy_frontend_bytes_out_total counter +haproxy_frontend_bytes_out_total{proxy="healthz"} 112215505 +haproxy_frontend_bytes_out_total{proxy="http"} 260431 +haproxy_frontend_bytes_out_total{proxy="https"} 46485344378 +haproxy_frontend_bytes_out_total{proxy="stats"} 23646727611 +# HELP haproxy_frontend_requests_denied_total Total number of denied requests. +# TYPE haproxy_frontend_requests_denied_total counter +haproxy_frontend_requests_denied_total{proxy="healthz"} 1 +haproxy_frontend_requests_denied_total{proxy="http"} 1 +haproxy_frontend_requests_denied_total{proxy="https"} 1 +haproxy_frontend_requests_denied_total{proxy="stats"} 1 +# HELP haproxy_frontend_responses_denied_total Total number of denied responses. +# TYPE haproxy_frontend_responses_denied_total counter +haproxy_frontend_responses_denied_total{proxy="healthz"} 1 +haproxy_frontend_responses_denied_total{proxy="http"} 1 +haproxy_frontend_responses_denied_total{proxy="https"} 1 +haproxy_frontend_responses_denied_total{proxy="stats"} 1 +# HELP haproxy_frontend_request_errors_total Total number of request errors. +# TYPE haproxy_frontend_request_errors_total counter +haproxy_frontend_request_errors_total{proxy="healthz"} 1 +haproxy_frontend_request_errors_total{proxy="http"} 1107 +haproxy_frontend_request_errors_total{proxy="https"} 5922 +haproxy_frontend_request_errors_total{proxy="stats"} 12 +# HELP haproxy_frontend_denied_connections_total Total number of requests denied by "tcp-request connection" rules. +# TYPE haproxy_frontend_denied_connections_total counter +haproxy_frontend_denied_connections_total{proxy="healthz"} 1 +haproxy_frontend_denied_connections_total{proxy="http"} 1 +haproxy_frontend_denied_connections_total{proxy="https"} 1 +haproxy_frontend_denied_connections_total{proxy="stats"} 1 +# HELP haproxy_frontend_denied_sessions_total Total number of requests denied by "tcp-request session" rules. +# TYPE haproxy_frontend_denied_sessions_total counter +haproxy_frontend_denied_sessions_total{proxy="healthz"} 1 +haproxy_frontend_denied_sessions_total{proxy="http"} 1 +haproxy_frontend_denied_sessions_total{proxy="https"} 1 +haproxy_frontend_denied_sessions_total{proxy="stats"} 1 +# HELP haproxy_frontend_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE haproxy_frontend_failed_header_rewriting_total counter +haproxy_frontend_failed_header_rewriting_total{proxy="healthz"} 1 +haproxy_frontend_failed_header_rewriting_total{proxy="http"} 1 +haproxy_frontend_failed_header_rewriting_total{proxy="https"} 1 +haproxy_frontend_failed_header_rewriting_total{proxy="stats"} 1 +# HELP haproxy_frontend_internal_errors_total Total number of internal errors. +# TYPE haproxy_frontend_internal_errors_total counter +haproxy_frontend_internal_errors_total{proxy="healthz"} 1 +haproxy_frontend_internal_errors_total{proxy="http"} 1 +haproxy_frontend_internal_errors_total{proxy="https"} 1 +haproxy_frontend_internal_errors_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_requests_rate_max Maximum observed number of HTTP requests per second. +# TYPE haproxy_frontend_http_requests_rate_max gauge +haproxy_frontend_http_requests_rate_max{proxy="healthz"} 1 +haproxy_frontend_http_requests_rate_max{proxy="http"} 12 +haproxy_frontend_http_requests_rate_max{proxy="https"} 101 +haproxy_frontend_http_requests_rate_max{proxy="stats"} 2 +# HELP haproxy_frontend_http_requests_total Total number of HTTP requests received. +# TYPE haproxy_frontend_http_requests_total counter +haproxy_frontend_http_requests_total{proxy="healthz"} 723971 +haproxy_frontend_http_requests_total{proxy="http"} 1402 +haproxy_frontend_http_requests_total{proxy="https"} 35664484 +haproxy_frontend_http_requests_total{proxy="stats"} 60011 +# HELP haproxy_frontend_http_responses_total Total number of HTTP responses. +# TYPE haproxy_frontend_http_responses_total counter +haproxy_frontend_http_responses_total{proxy="healthz",code="1xx"} 1 +haproxy_frontend_http_responses_total{proxy="http",code="1xx"} 1 +haproxy_frontend_http_responses_total{proxy="https",code="1xx"} 4130401 +haproxy_frontend_http_responses_total{proxy="stats",code="1xx"} 1 +haproxy_frontend_http_responses_total{proxy="healthz",code="2xx"} 723971 +haproxy_frontend_http_responses_total{proxy="http",code="2xx"} 1 +haproxy_frontend_http_responses_total{proxy="https",code="2xx"} 21338013 +haproxy_frontend_http_responses_total{proxy="stats",code="2xx"} 59998 +haproxy_frontend_http_responses_total{proxy="healthz",code="3xx"} 1 +haproxy_frontend_http_responses_total{proxy="http",code="3xx"} 147 +haproxy_frontend_http_responses_total{proxy="https",code="3xx"} 10004 +haproxy_frontend_http_responses_total{proxy="stats",code="3xx"} 1 +haproxy_frontend_http_responses_total{proxy="healthz",code="4xx"} 1 +haproxy_frontend_http_responses_total{proxy="http",code="4xx"} 1107 +haproxy_frontend_http_responses_total{proxy="https",code="4xx"} 10175979 +haproxy_frontend_http_responses_total{proxy="stats",code="4xx"} 12 +haproxy_frontend_http_responses_total{proxy="healthz",code="5xx"} 1 +haproxy_frontend_http_responses_total{proxy="http",code="5xx"} 148 +haproxy_frontend_http_responses_total{proxy="https",code="5xx"} 3108 +haproxy_frontend_http_responses_total{proxy="stats",code="5xx"} 1 +haproxy_frontend_http_responses_total{proxy="healthz",code="other"} 1 +haproxy_frontend_http_responses_total{proxy="http",code="other"} 1 +haproxy_frontend_http_responses_total{proxy="https",code="other"} 5657 +haproxy_frontend_http_responses_total{proxy="stats",code="other"} 1 +# HELP haproxy_frontend_intercepted_requests_total Total number of intercepted HTTP requests. +# TYPE haproxy_frontend_intercepted_requests_total counter +haproxy_frontend_intercepted_requests_total{proxy="healthz"} 723971 +haproxy_frontend_intercepted_requests_total{proxy="http"} 147 +haproxy_frontend_intercepted_requests_total{proxy="https"} 1 +haproxy_frontend_intercepted_requests_total{proxy="stats"} 59999 +# HELP haproxy_frontend_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE haproxy_frontend_http_cache_lookups_total counter +haproxy_frontend_http_cache_lookups_total{proxy="healthz"} 1 +haproxy_frontend_http_cache_lookups_total{proxy="http"} 1 +haproxy_frontend_http_cache_lookups_total{proxy="https"} 1 +haproxy_frontend_http_cache_lookups_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_cache_hits_total Total number of HTTP cache hits. +# TYPE haproxy_frontend_http_cache_hits_total counter +haproxy_frontend_http_cache_hits_total{proxy="healthz"} 1 +haproxy_frontend_http_cache_hits_total{proxy="http"} 1 +haproxy_frontend_http_cache_hits_total{proxy="https"} 1 +haproxy_frontend_http_cache_hits_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE haproxy_frontend_http_comp_bytes_in_total counter +haproxy_frontend_http_comp_bytes_in_total{proxy="healthz"} 1 +haproxy_frontend_http_comp_bytes_in_total{proxy="http"} 1 +haproxy_frontend_http_comp_bytes_in_total{proxy="https"} 1 +haproxy_frontend_http_comp_bytes_in_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE haproxy_frontend_http_comp_bytes_out_total counter +haproxy_frontend_http_comp_bytes_out_total{proxy="healthz"} 1 +haproxy_frontend_http_comp_bytes_out_total{proxy="http"} 1 +haproxy_frontend_http_comp_bytes_out_total{proxy="https"} 1 +haproxy_frontend_http_comp_bytes_out_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE haproxy_frontend_http_comp_bytes_bypassed_total counter +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="healthz"} 1 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="http"} 1 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="https"} 1 +haproxy_frontend_http_comp_bytes_bypassed_total{proxy="stats"} 1 +# HELP haproxy_frontend_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE haproxy_frontend_http_comp_responses_total counter +haproxy_frontend_http_comp_responses_total{proxy="healthz"} 1 +haproxy_frontend_http_comp_responses_total{proxy="http"} 1 +haproxy_frontend_http_comp_responses_total{proxy="https"} 1 +haproxy_frontend_http_comp_responses_total{proxy="stats"} 1 +# HELP haproxy_backend_status Current status of the service (frontend: 0=STOP, 1=UP - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB). +# TYPE haproxy_backend_status gauge +haproxy_backend_status{proxy="proxy1"} 1 +haproxy_backend_status{proxy="proxy2"} 1 +# HELP haproxy_backend_current_sessions Current number of active sessions. +# TYPE haproxy_backend_current_sessions gauge +haproxy_backend_current_sessions{proxy="proxy1"} 1 +haproxy_backend_current_sessions{proxy="proxy2"} 1322 +# HELP haproxy_backend_max_sessions Maximum observed number of active sessions. +# TYPE haproxy_backend_max_sessions gauge +haproxy_backend_max_sessions{proxy="proxy1"} 112 +haproxy_backend_max_sessions{proxy="proxy2"} 1367 +# HELP haproxy_backend_limit_sessions Configured session limit. +# TYPE haproxy_backend_limit_sessions gauge +haproxy_backend_limit_sessions{proxy="proxy1"} 1 +haproxy_backend_limit_sessions{proxy="proxy2"} 1 +# HELP haproxy_backend_sessions_total Total number of sessions. +# TYPE haproxy_backend_sessions_total counter +haproxy_backend_sessions_total{proxy="proxy1"} 31527507 +haproxy_backend_sessions_total{proxy="proxy2"} 4131723 +# HELP haproxy_backend_max_session_rate Maximum observed number of sessions per second. +# TYPE haproxy_backend_max_session_rate gauge +haproxy_backend_max_session_rate{proxy="proxy1"} 82 +haproxy_backend_max_session_rate{proxy="proxy2"} 41 +# HELP haproxy_backend_last_session_seconds Number of seconds since last session assigned to server/backend. +# TYPE haproxy_backend_last_session_seconds gauge +haproxy_backend_last_session_seconds{proxy="proxy1"} 1 +haproxy_backend_last_session_seconds{proxy="proxy2"} 3 +# HELP haproxy_backend_current_queue Current number of queued requests. +# TYPE haproxy_backend_current_queue gauge +haproxy_backend_current_queue{proxy="proxy1"} 1 +haproxy_backend_current_queue{proxy="proxy2"} 1 +# HELP haproxy_backend_max_queue Maximum observed number of queued requests. +# TYPE haproxy_backend_max_queue gauge +haproxy_backend_max_queue{proxy="proxy1"} 1 +haproxy_backend_max_queue{proxy="proxy2"} 1 +# HELP haproxy_backend_connection_attempts_total Total number of connection establishment attempts. +# TYPE haproxy_backend_connection_attempts_total counter +haproxy_backend_connection_attempts_total{proxy="proxy1"} 19864884 +haproxy_backend_connection_attempts_total{proxy="proxy2"} 4131723 +# HELP haproxy_backend_connection_reuses_total Total number of connection reuses. +# TYPE haproxy_backend_connection_reuses_total counter +haproxy_backend_connection_reuses_total{proxy="proxy1"} 11661922 +haproxy_backend_connection_reuses_total{proxy="proxy2"} 1 +# HELP haproxy_backend_bytes_in_total Current total of incoming bytes. +# TYPE haproxy_backend_bytes_in_total counter +haproxy_backend_bytes_in_total{proxy="proxy1"} 21057046294 +haproxy_backend_bytes_in_total{proxy="proxy2"} 2493759083896 +# HELP haproxy_backend_bytes_out_total Current total of outgoing bytes. +# TYPE haproxy_backend_bytes_out_total counter +haproxy_backend_bytes_out_total{proxy="proxy1"} 41352782609 +haproxy_backend_bytes_out_total{proxy="proxy2"} 5131407558 +# HELP haproxy_backend_queue_time_average_seconds Avg. queue time for last 1024 successful connections. +# TYPE haproxy_backend_queue_time_average_seconds gauge +haproxy_backend_queue_time_average_seconds{proxy="proxy1"} 0.000000 +haproxy_backend_queue_time_average_seconds{proxy="proxy2"} 0.000000 +# HELP haproxy_backend_connect_time_average_seconds Avg. connect time for last 1024 successful connections. +# TYPE haproxy_backend_connect_time_average_seconds gauge +haproxy_backend_connect_time_average_seconds{proxy="proxy1"} 0.000000 +haproxy_backend_connect_time_average_seconds{proxy="proxy2"} 0.001000 +# HELP haproxy_backend_response_time_average_seconds Avg. response time for last 1024 successful connections. +# TYPE haproxy_backend_response_time_average_seconds gauge +haproxy_backend_response_time_average_seconds{proxy="proxy1"} 0.052000 +haproxy_backend_response_time_average_seconds{proxy="proxy2"} 0.001000 +# HELP haproxy_backend_total_time_average_seconds Avg. total time for last 1024 successful connections. +# TYPE haproxy_backend_total_time_average_seconds gauge +haproxy_backend_total_time_average_seconds{proxy="proxy1"} 1.746000 +haproxy_backend_total_time_average_seconds{proxy="proxy2"} 198.639000 +# HELP haproxy_backend_max_queue_time_seconds Maximum observed time spent in the queue +# TYPE haproxy_backend_max_queue_time_seconds gauge +haproxy_backend_max_queue_time_seconds{proxy="proxy1"} 0.000000 +haproxy_backend_max_queue_time_seconds{proxy="proxy2"} 0.000000 +# HELP haproxy_backend_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete +# TYPE haproxy_backend_max_connect_time_seconds gauge +haproxy_backend_max_connect_time_seconds{proxy="proxy1"} 1.063000 +haproxy_backend_max_connect_time_seconds{proxy="proxy2"} 1.061000 +# HELP haproxy_backend_max_response_time_seconds Maximum observed time spent waiting for a server response +# TYPE haproxy_backend_max_response_time_seconds gauge +haproxy_backend_max_response_time_seconds{proxy="proxy1"} 74.050000 +haproxy_backend_max_response_time_seconds{proxy="proxy2"} 1.396000 +# HELP haproxy_backend_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing) +# TYPE haproxy_backend_max_total_time_seconds gauge +haproxy_backend_max_total_time_seconds{proxy="proxy1"} 331.297000 +haproxy_backend_max_total_time_seconds{proxy="proxy2"} 3116820.243000 +# HELP haproxy_backend_requests_denied_total Total number of denied requests. +# TYPE haproxy_backend_requests_denied_total counter +haproxy_backend_requests_denied_total{proxy="proxy1"} 1 +haproxy_backend_requests_denied_total{proxy="proxy2"} 1 +# HELP haproxy_backend_responses_denied_total Total number of denied responses. +# TYPE haproxy_backend_responses_denied_total counter +haproxy_backend_responses_denied_total{proxy="proxy1"} 1 +haproxy_backend_responses_denied_total{proxy="proxy2"} 1 +# HELP haproxy_backend_connection_errors_total Total number of connection errors. +# TYPE haproxy_backend_connection_errors_total counter +haproxy_backend_connection_errors_total{proxy="proxy1"} 1 +haproxy_backend_connection_errors_total{proxy="proxy2"} 1 +# HELP haproxy_backend_response_errors_total Total number of response errors. +# TYPE haproxy_backend_response_errors_total counter +haproxy_backend_response_errors_total{proxy="proxy1"} 13 +haproxy_backend_response_errors_total{proxy="proxy2"} 4122625 +# HELP haproxy_backend_retry_warnings_total Total number of retry warnings. +# TYPE haproxy_backend_retry_warnings_total counter +haproxy_backend_retry_warnings_total{proxy="proxy1"} 1 +haproxy_backend_retry_warnings_total{proxy="proxy2"} 1 +# HELP haproxy_backend_redispatch_warnings_total Total number of redispatch warnings. +# TYPE haproxy_backend_redispatch_warnings_total counter +haproxy_backend_redispatch_warnings_total{proxy="proxy1"} 1 +haproxy_backend_redispatch_warnings_total{proxy="proxy2"} 1 +# HELP haproxy_backend_failed_header_rewriting_total Total number of failed header rewriting warnings. +# TYPE haproxy_backend_failed_header_rewriting_total counter +haproxy_backend_failed_header_rewriting_total{proxy="proxy1"} 1 +haproxy_backend_failed_header_rewriting_total{proxy="proxy2"} 1 +# HELP haproxy_backend_internal_errors_total Total number of internal errors. +# TYPE haproxy_backend_internal_errors_total counter +haproxy_backend_internal_errors_total{proxy="proxy1"} 1 +haproxy_backend_internal_errors_total{proxy="proxy2"} 1 +# HELP haproxy_backend_client_aborts_total Total number of data transfers aborted by the client. +# TYPE haproxy_backend_client_aborts_total counter +haproxy_backend_client_aborts_total{proxy="proxy1"} 27231 +haproxy_backend_client_aborts_total{proxy="proxy2"} 7777 +# HELP haproxy_backend_server_aborts_total Total number of data transfers aborted by the server. +# TYPE haproxy_backend_server_aborts_total counter +haproxy_backend_server_aborts_total{proxy="proxy1"} 1 +haproxy_backend_server_aborts_total{proxy="proxy2"} 4122625 +# HELP haproxy_backend_weight Service weight. +# TYPE haproxy_backend_weight gauge +haproxy_backend_weight{proxy="proxy1"} 256 +haproxy_backend_weight{proxy="proxy2"} 640 +# HELP haproxy_backend_active_servers Current number of active servers. +# TYPE haproxy_backend_active_servers gauge +haproxy_backend_active_servers{proxy="proxy1"} 2 +haproxy_backend_active_servers{proxy="proxy2"} 5 +# HELP haproxy_backend_backup_servers Current number of backup servers. +# TYPE haproxy_backend_backup_servers gauge +haproxy_backend_backup_servers{proxy="proxy1"} 1 +haproxy_backend_backup_servers{proxy="proxy2"} 1 +# HELP haproxy_backend_check_up_down_total Total number of UP->DOWN transitions. +# TYPE haproxy_backend_check_up_down_total counter +haproxy_backend_check_up_down_total{proxy="proxy1"} 1 +haproxy_backend_check_up_down_total{proxy="proxy2"} 1 +# HELP haproxy_backend_check_last_change_seconds Number of seconds since the last UP<->DOWN transition. +# TYPE haproxy_backend_check_last_change_seconds gauge +haproxy_backend_check_last_change_seconds{proxy="proxy1"} 3619864 +haproxy_backend_check_last_change_seconds{proxy="proxy2"} 3619864 +# HELP haproxy_backend_downtime_seconds_total Total downtime (in seconds) for the service. +# TYPE haproxy_backend_downtime_seconds_total counter +haproxy_backend_downtime_seconds_total{proxy="proxy1"} 1 +haproxy_backend_downtime_seconds_total{proxy="proxy2"} 1 +# HELP haproxy_backend_loadbalanced_total Total number of times a service was selected, either for new sessions, or when redispatching. +# TYPE haproxy_backend_loadbalanced_total counter +haproxy_backend_loadbalanced_total{proxy="proxy1"} 31526806 +haproxy_backend_loadbalanced_total{proxy="proxy2"} 4131723 +# HELP haproxy_backend_http_requests_total Total number of HTTP requests received. +# TYPE haproxy_backend_http_requests_total counter +haproxy_backend_http_requests_total{proxy="proxy1"} 31527507 +haproxy_backend_http_requests_total{proxy="proxy2"} 4130401 +# HELP haproxy_backend_http_responses_total Total number of HTTP responses. +# TYPE haproxy_backend_http_responses_total counter +haproxy_backend_http_responses_total{proxy="proxy1",code="1xx"} 1 +haproxy_backend_http_responses_total{proxy="proxy2",code="1xx"} 4130401 +haproxy_backend_http_responses_total{proxy="proxy1",code="2xx"} 21338013 +haproxy_backend_http_responses_total{proxy="proxy2",code="2xx"} 1 +haproxy_backend_http_responses_total{proxy="proxy1",code="3xx"} 10004 +haproxy_backend_http_responses_total{proxy="proxy2",code="3xx"} 1 +haproxy_backend_http_responses_total{proxy="proxy1",code="4xx"} 10170758 +haproxy_backend_http_responses_total{proxy="proxy2",code="4xx"} 1 +haproxy_backend_http_responses_total{proxy="proxy1",code="5xx"} 3075 +haproxy_backend_http_responses_total{proxy="proxy2",code="5xx"} 1 +haproxy_backend_http_responses_total{proxy="proxy1",code="other"} 5657 +haproxy_backend_http_responses_total{proxy="proxy2",code="other"} 1 +# HELP haproxy_backend_http_cache_lookups_total Total number of HTTP cache lookups. +# TYPE haproxy_backend_http_cache_lookups_total counter +haproxy_backend_http_cache_lookups_total{proxy="proxy1"} 1 +haproxy_backend_http_cache_lookups_total{proxy="proxy2"} 1 +# HELP haproxy_backend_http_cache_hits_total Total number of HTTP cache hits. +# TYPE haproxy_backend_http_cache_hits_total counter +haproxy_backend_http_cache_hits_total{proxy="proxy1"} 1 +haproxy_backend_http_cache_hits_total{proxy="proxy2"} 1 +# HELP haproxy_backend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor. +# TYPE haproxy_backend_http_comp_bytes_in_total counter +haproxy_backend_http_comp_bytes_in_total{proxy="proxy1"} 1 +haproxy_backend_http_comp_bytes_in_total{proxy="proxy2"} 1 +# HELP haproxy_backend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor. +# TYPE haproxy_backend_http_comp_bytes_out_total counter +haproxy_backend_http_comp_bytes_out_total{proxy="proxy1"} 1 +haproxy_backend_http_comp_bytes_out_total{proxy="proxy2"} 1 +# HELP haproxy_backend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit). +# TYPE haproxy_backend_http_comp_bytes_bypassed_total counter +haproxy_backend_http_comp_bytes_bypassed_total{proxy="proxy1"} 1 +haproxy_backend_http_comp_bytes_bypassed_total{proxy="proxy2"} 1 +# HELP haproxy_backend_http_comp_responses_total Total number of HTTP responses that were compressed. +# TYPE haproxy_backend_http_comp_responses_total counter +haproxy_backend_http_comp_responses_total{proxy="proxy1"} 1 +haproxy_backend_http_comp_responses_total{proxy="proxy2"} 1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/README.md b/src/go/collectors/go.d.plugin/modules/hdfs/README.md new file mode 120000 index 00000000000000..38f428a068a3c8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/README.md @@ -0,0 +1 @@ +integrations/hadoop_distributed_file_system_hdfs.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/charts.go b/src/go/collectors/go.d.plugin/modules/hdfs/charts.go new file mode 100644 index 00000000000000..77e4b09c89bad7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/charts.go @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Dims = module.Dims + Vars = module.Vars +) + +var jvmCharts = Charts{ + { + ID: "jvm_heap_memory", + Title: "Heap Memory", + Units: "MiB", + Fam: "jvm", + Ctx: "hdfs.heap_memory", + Type: module.Area, + Dims: Dims{ + {ID: "jvm_mem_heap_committed", Name: "committed", Div: 1000}, + {ID: "jvm_mem_heap_used", Name: "used", Div: 1000}, + }, + Vars: Vars{ + {ID: "jvm_mem_heap_max"}, + }, + }, + { + ID: "jvm_gc_count_total", + Title: "GC Events", + Units: "events/s", + Fam: "jvm", + Ctx: "hdfs.gc_count_total", + Dims: Dims{ + {ID: "jvm_gc_count", Name: "gc", Algo: module.Incremental}, + }, + }, + { + ID: "jvm_gc_time_total", + Title: "GC Time", + Units: "ms", + Fam: "jvm", + Ctx: "hdfs.gc_time_total", + Dims: Dims{ + {ID: "jvm_gc_time_millis", Name: "time", Algo: module.Incremental}, + }, + }, + { + ID: "jvm_gc_threshold", + Title: "Number of Times That the GC Threshold is Exceeded", + Units: "events/s", + Fam: "jvm", + Ctx: "hdfs.gc_threshold", + Dims: Dims{ + {ID: "jvm_gc_num_info_threshold_exceeded", Name: "info", Algo: module.Incremental}, + {ID: "jvm_gc_num_warn_threshold_exceeded", Name: "warn", Algo: module.Incremental}, + }, + }, + { + ID: "jvm_threads", + Title: "Number of Threads", + Units: "num", + Fam: "jvm", + Ctx: "hdfs.threads", + Type: module.Stacked, + Dims: Dims{ + {ID: "jvm_threads_new", Name: "new"}, + {ID: "jvm_threads_runnable", Name: "runnable"}, + {ID: "jvm_threads_blocked", Name: "blocked"}, + {ID: "jvm_threads_waiting", Name: "waiting"}, + {ID: "jvm_threads_timed_waiting", Name: "timed_waiting"}, + {ID: "jvm_threads_terminated", Name: "terminated"}, + }, + }, + { + ID: "jvm_logs_total", + Title: "Number of Logs", + Units: "logs/s", + Fam: "jvm", + Ctx: "hdfs.logs_total", + Type: module.Stacked, + Dims: Dims{ + {ID: "jvm_log_info", Name: "info", Algo: module.Incremental}, + {ID: "jvm_log_error", Name: "error", Algo: module.Incremental}, + {ID: "jvm_log_warn", Name: "warn", Algo: module.Incremental}, + {ID: "jvm_log_fatal", Name: "fatal", Algo: module.Incremental}, + }, + }, +} + +var rpcActivityCharts = Charts{ + { + ID: "rpc_bandwidth", + Title: "RPC Bandwidth", + Units: "kilobits/s", + Fam: "rpc", + Ctx: "hdfs.rpc_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: "rpc_received_bytes", Name: "received", Div: 1000, Algo: module.Incremental}, + {ID: "rpc_sent_bytes", Name: "sent", Div: -1000, Algo: module.Incremental}, + }, + }, + { + ID: "rpc_calls", + Title: "RPC Calls", + Units: "calls/s", + Fam: "rpc", + Ctx: "hdfs.rpc_calls", + Dims: Dims{ + {ID: "rpc_queue_time_num_ops", Name: "calls", Algo: module.Incremental}, + }, + }, + { + ID: "rpc_open_connections", + Title: "RPC Open Connections", + Units: "connections", + Fam: "rpc", + Ctx: "hdfs.open_connections", + Dims: Dims{ + {ID: "rpc_num_open_connections", Name: "open"}, + }, + }, + { + ID: "rpc_call_queue_length", + Title: "RPC Call Queue Length", + Units: "num", + Fam: "rpc", + Ctx: "hdfs.call_queue_length", + Dims: Dims{ + {ID: "rpc_call_queue_length", Name: "length"}, + }, + }, + { + ID: "rpc_avg_queue_time", + Title: "RPC Avg Queue Time", + Units: "ms", + Fam: "rpc", + Ctx: "hdfs.avg_queue_time", + Dims: Dims{ + {ID: "rpc_queue_time_avg_time", Name: "time", Div: 1000}, + }, + }, + { + ID: "rpc_avg_processing_time", + Title: "RPC Avg Processing Time", + Units: "ms", + Fam: "rpc", + Ctx: "hdfs.avg_processing_time", + Dims: Dims{ + {ID: "rpc_processing_time_avg_time", Name: "time", Div: 1000}, + }, + }, +} + +var fsNameSystemCharts = Charts{ + { + ID: "fs_name_system_capacity", + Title: "Capacity Across All Datanodes", + Units: "KiB", + Fam: "fs name system", + Ctx: "hdfs.capacity", + Type: module.Stacked, + Dims: Dims{ + {ID: "fsns_capacity_remaining", Name: "remaining", Div: 1024}, + {ID: "fsns_capacity_used", Name: "used", Div: 1024}, + }, + Vars: Vars{ + {ID: "fsns_capacity_total"}, + }, + }, + { + ID: "fs_name_system_used_capacity", + Title: "Used Capacity Across All Datanodes", + Units: "KiB", + Fam: "fs name system", + Ctx: "hdfs.used_capacity", + Type: module.Stacked, + Dims: Dims{ + {ID: "fsns_capacity_used_dfs", Name: "dfs", Div: 1024}, + {ID: "fsns_capacity_used_non_dfs", Name: "non_dfs", Div: 1024}, + }, + }, + { + ID: "fs_name_system_load", + Title: "Number of Concurrent File Accesses (read/write) Across All DataNodes", + Units: "load", + Fam: "fs name system", + Ctx: "hdfs.load", + Dims: Dims{ + {ID: "fsns_total_load", Name: "load"}, + }, + }, + { + ID: "fs_name_system_volume_failures_total", + Title: "Number of Volume Failures Across All Datanodes", + Units: "events/s", + Fam: "fs name system", + Ctx: "hdfs.volume_failures_total", + Dims: Dims{ + {ID: "fsns_volume_failures_total", Name: "failures", Algo: module.Incremental}, + }, + }, + { + ID: "fs_files_total", + Title: "Number of Tracked Files", + Units: "num", + Fam: "fs name system", + Ctx: "hdfs.files_total", + Dims: Dims{ + {ID: "fsns_files_total", Name: "files"}, + }, + }, + { + ID: "fs_blocks_total", + Title: "Number of Allocated Blocks in the System", + Units: "num", + Fam: "fs name system", + Ctx: "hdfs.blocks_total", + Dims: Dims{ + {ID: "fsns_blocks_total", Name: "blocks"}, + }, + }, + { + ID: "fs_problem_blocks", + Title: "Number of Problem Blocks (can point to an unhealthy cluster)", + Units: "num", + Fam: "fs name system", + Ctx: "hdfs.blocks", + Dims: Dims{ + {ID: "fsns_corrupt_blocks", Name: "corrupt"}, + {ID: "fsns_missing_blocks", Name: "missing"}, + {ID: "fsns_under_replicated_blocks", Name: "under_replicated"}, + }, + }, + { + ID: "fs_name_system_data_nodes", + Title: "Number of Data Nodes By Status", + Units: "num", + Fam: "fs name system", + Ctx: "hdfs.data_nodes", + Type: module.Stacked, + Dims: Dims{ + {ID: "fsns_num_live_data_nodes", Name: "live"}, + {ID: "fsns_num_dead_data_nodes", Name: "dead"}, + {ID: "fsns_stale_data_nodes", Name: "stale"}, + }, + }, +} + +var fsDatasetStateCharts = Charts{ + { + ID: "fs_dataset_state_capacity", + Title: "Capacity", + Units: "KiB", + Fam: "fs dataset", + Ctx: "hdfs.datanode_capacity", + Type: module.Stacked, + Dims: Dims{ + {ID: "fsds_capacity_remaining", Name: "remaining", Div: 1024}, + {ID: "fsds_capacity_used", Name: "used", Div: 1024}, + }, + Vars: Vars{ + {ID: "fsds_capacity_total"}, + }, + }, + { + ID: "fs_dataset_state_used_capacity", + Title: "Used Capacity", + Units: "KiB", + Fam: "fs dataset", + Ctx: "hdfs.datanode_used_capacity", + Type: module.Stacked, + Dims: Dims{ + {ID: "fsds_capacity_used_dfs", Name: "dfs", Div: 1024}, + {ID: "fsds_capacity_used_non_dfs", Name: "non_dfs", Div: 1024}, + }, + }, + { + ID: "fs_dataset_state_num_failed_volumes", + Title: "Number of Failed Volumes", + Units: "num", + Fam: "fs dataset", + Ctx: "hdfs.datanode_failed_volumes", + Dims: Dims{ + {ID: "fsds_num_failed_volumes", Name: "failed volumes"}, + }, + }, +} + +var fsDataNodeActivityCharts = Charts{ + { + ID: "dna_bandwidth", + Title: "Bandwidth", + Units: "KiB/s", + Fam: "activity", + Ctx: "hdfs.datanode_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: "dna_bytes_read", Name: "reads", Div: 1024, Algo: module.Incremental}, + {ID: "dna_bytes_written", Name: "writes", Div: -1024, Algo: module.Incremental}, + }, + }, +} + +func dataNodeCharts() *Charts { + charts := Charts{} + panicIfError(charts.Add(*jvmCharts.Copy()...)) + panicIfError(charts.Add(*rpcActivityCharts.Copy()...)) + panicIfError(charts.Add(*fsDatasetStateCharts.Copy()...)) + panicIfError(charts.Add(*fsDataNodeActivityCharts.Copy()...)) + return &charts +} + +func nameNodeCharts() *Charts { + charts := Charts{} + panicIfError(charts.Add(*jvmCharts.Copy()...)) + panicIfError(charts.Add(*rpcActivityCharts.Copy()...)) + panicIfError(charts.Add(*fsNameSystemCharts.Copy()...)) + return &charts +} + +func panicIfError(err error) { + if err != nil { + panic(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/client.go b/src/go/collectors/go.d.plugin/modules/hdfs/client.go new file mode 100644 index 00000000000000..bf46c5ddd7aebc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/client.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func newClient(httpClient *http.Client, request web.Request) *client { + return &client{ + httpClient: httpClient, + request: request, + } +} + +type client struct { + httpClient *http.Client + request web.Request +} + +func (c *client) do() (*http.Response, error) { + req, err := web.NewHTTPRequest(c.request) + if err != nil { + return nil, fmt.Errorf("error on creating http request to %s : %v", c.request.URL, err) + } + + // req.Header.Add("Accept-Encoding", "gzip") + // req.Header.Set("User-Agent", "netdata/go.d.plugin") + + return c.httpClient.Do(req) +} + +func (c *client) doOK() (*http.Response, error) { + resp, err := c.do() + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned %d", c.request.URL, resp.StatusCode) + } + return resp, nil +} + +func (c *client) doOKWithDecodeJSON(dst interface{}) error { + resp, err := c.doOK() + defer closeBody(resp) + if err != nil { + return err + } + + err = json.NewDecoder(resp.Body).Decode(dst) + if err != nil { + return fmt.Errorf("error on decoding response from %s : %v", c.request.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/collect.go b/src/go/collectors/go.d.plugin/modules/hdfs/collect.go new file mode 100644 index 00000000000000..9879787cd400c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/collect.go @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + rawData map[string]json.RawMessage + rawJMX struct { + Beans []rawData + } +) + +func (r rawJMX) isEmpty() bool { + return len(r.Beans) == 0 +} + +func (r rawJMX) find(f func(rawData) bool) rawData { + for _, v := range r.Beans { + if f(v) { + return v + } + } + return nil +} + +func (r rawJMX) findJvm() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" } + return r.find(f) +} + +func (r rawJMX) findRPCActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") } + return r.find(f) +} + +func (r rawJMX) findFSNameSystem() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" } + return r.find(f) +} + +func (r rawJMX) findFSDatasetState() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" } + return r.find(f) +} + +func (r rawJMX) findDataNodeActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") } + return r.find(f) +} + +func (h *HDFS) collect() (map[string]int64, error) { + var raw rawJMX + err := h.client.doOKWithDecodeJSON(&raw) + if err != nil { + return nil, err + } + + if raw.isEmpty() { + return nil, errors.New("empty response") + } + + mx := h.collectRawJMX(raw) + + return stm.ToMap(mx), nil +} + +func (h HDFS) collectRawJMX(raw rawJMX) *metrics { + var mx metrics + switch h.nodeType { + default: + panic(fmt.Sprintf("unsupported node type : '%s'", h.nodeType)) + case nameNodeType: + h.collectNameNode(&mx, raw) + case dataNodeType: + h.collectDataNode(&mx, raw) + } + return &mx +} + +func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) { + err := h.collectJVM(mx, raw) + if err != nil { + h.Debugf("error on collecting jvm : %v", err) + } + + err = h.collectRPCActivity(mx, raw) + if err != nil { + h.Debugf("error on collecting rpc activity : %v", err) + } + + err = h.collectFSNameSystem(mx, raw) + if err != nil { + h.Debugf("error on collecting fs name system : %v", err) + } +} + +func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) { + err := h.collectJVM(mx, raw) + if err != nil { + h.Debugf("error on collecting jvm : %v", err) + } + + err = h.collectRPCActivity(mx, raw) + if err != nil { + h.Debugf("error on collecting rpc activity : %v", err) + } + + err = h.collectFSDatasetState(mx, raw) + if err != nil { + h.Debugf("error on collecting fs dataset state : %v", err) + } + + err = h.collectDataNodeActivity(mx, raw) + if err != nil { + h.Debugf("error on collecting datanode activity state : %v", err) + } +} + +func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error { + v := raw.findJvm() + if v == nil { + return nil + } + + var jvm jvmMetrics + err := writeJSONTo(&jvm, v) + if err != nil { + return err + } + + mx.Jvm = &jvm + return nil +} + +func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { + v := raw.findRPCActivity() + if v == nil { + return nil + } + + var rpc rpcActivityMetrics + err := writeJSONTo(&rpc, v) + if err != nil { + return err + } + + mx.Rpc = &rpc + return nil +} + +func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { + v := raw.findFSNameSystem() + if v == nil { + return nil + } + + var fs fsNameSystemMetrics + err := writeJSONTo(&fs, v) + if err != nil { + return err + } + + fs.CapacityUsed = fs.CapacityDfsUsed + fs.CapacityUsedNonDFS + + mx.FSNameSystem = &fs + return nil +} + +func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { + v := raw.findFSDatasetState() + if v == nil { + return nil + } + + var fs fsDatasetStateMetrics + err := writeJSONTo(&fs, v) + if err != nil { + return err + } + + fs.CapacityUsed = fs.Capacity - fs.Remaining + fs.CapacityUsedNonDFS = fs.CapacityUsed - fs.DfsUsed + + mx.FSDatasetState = &fs + return nil +} + +func (h HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { + v := raw.findDataNodeActivity() + if v == nil { + return nil + } + + var dna dataNodeActivityMetrics + err := writeJSONTo(&dna, v) + if err != nil { + return err + } + + mx.DataNodeActivity = &dna + return nil +} + +func writeJSONTo(dst interface{}, src interface{}) error { + b, err := json.Marshal(src) + if err != nil { + return err + } + return json.Unmarshal(b, dst) +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/config_schema.json b/src/go/collectors/go.d.plugin/modules/hdfs/config_schema.json new file mode 100644 index 00000000000000..483c493016f1bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/hdfs job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/hdfs.go b/src/go/collectors/go.d.plugin/modules/hdfs/hdfs.go new file mode 100644 index 00000000000000..aa0b2efe28a3a9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/hdfs.go @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + _ "embed" + "errors" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("hdfs", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +// New creates HDFS with default values. +func New() *HDFS { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:50070/jmx", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}}, + }, + } + + return &HDFS{ + Config: config, + } +} + +type nodeType string + +const ( + dataNodeType nodeType = "DataNode" + nameNodeType nodeType = "NameNode" +) + +// Config is the HDFS module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// HDFS HDFS module. +type HDFS struct { + module.Base + Config `yaml:",inline"` + + nodeType + client *client +} + +// Cleanup makes cleanup. +func (HDFS) Cleanup() {} + +func (h HDFS) createClient() (*client, error) { + httpClient, err := web.NewHTTPClient(h.Client) + if err != nil { + return nil, err + } + + return newClient(httpClient, h.Request), nil +} + +func (h HDFS) determineNodeType() (nodeType, error) { + var raw rawJMX + err := h.client.doOKWithDecodeJSON(&raw) + if err != nil { + return "", err + } + + if raw.isEmpty() { + return "", errors.New("empty response") + } + + jvm := raw.findJvm() + if jvm == nil { + return "", errors.New("couldn't find jvm in response") + } + + v, ok := jvm["tag.ProcessName"] + if !ok { + return "", errors.New("couldn't find process name in JvmMetrics") + } + + t := nodeType(strings.Trim(string(v), "\"")) + if t == nameNodeType || t == dataNodeType { + return t, nil + } + return "", errors.New("unknown node type") +} + +// Init makes initialization. +func (h *HDFS) Init() bool { + cl, err := h.createClient() + if err != nil { + h.Errorf("error on creating client : %v", err) + return false + } + h.client = cl + + return true +} + +// Check makes check. +func (h *HDFS) Check() bool { + t, err := h.determineNodeType() + if err != nil { + h.Errorf("error on node type determination : %v", err) + return false + } + h.nodeType = t + + return len(h.Collect()) > 0 +} + +// Charts returns Charts. +func (h HDFS) Charts() *Charts { + switch h.nodeType { + default: + return nil + case nameNodeType: + return nameNodeCharts() + case dataNodeType: + return dataNodeCharts() + } +} + +// Collect collects metrics. +func (h *HDFS) Collect() map[string]int64 { + mx, err := h.collect() + + if err != nil { + h.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/hdfs_test.go b/src/go/collectors/go.d.plugin/modules/hdfs/hdfs_test.go new file mode 100644 index 00000000000000..dc5b7cf0e74cfc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/hdfs_test.go @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testUnknownNodeData, _ = os.ReadFile("testdata/unknownnode.json") + testDataNodeData, _ = os.ReadFile("testdata/datanode.json") + testNameNodeData, _ = os.ReadFile("testdata/namenode.json") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, testUnknownNodeData) + assert.NotNil(t, testDataNodeData) + assert.NotNil(t, testNameNodeData) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestHDFS_Init(t *testing.T) { + job := New() + + assert.True(t, job.Init()) +} + +func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { + job := New() + job.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, job.Init()) +} + +func TestHDFS_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNameNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.True(t, job.Check()) + assert.NotZero(t, job.nodeType) +} + +func TestHDFS_CheckDataNode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testDataNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.True(t, job.Check()) + assert.Equal(t, dataNodeType, job.nodeType) +} + +func TestHDFS_CheckNameNode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNameNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.True(t, job.Check()) + assert.Equal(t, nameNodeType, job.nodeType) +} + +func TestHDFS_CheckErrorOnNodeTypeDetermination(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testUnknownNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.False(t, job.Check()) +} + +func TestHDFS_CheckNoResponse(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/jmx" + require.True(t, job.Init()) + + assert.False(t, job.Check()) +} + +func TestHDFS_Charts(t *testing.T) { + assert.Nil(t, New().Charts()) +} + +func TestHDFS_ChartsUnknownNode(t *testing.T) { + job := New() + + assert.Nil(t, job.Charts()) +} + +func TestHDFS_ChartsDataNode(t *testing.T) { + job := New() + job.nodeType = dataNodeType + + assert.Equal(t, dataNodeCharts(), job.Charts()) +} + +func TestHDFS_ChartsNameNode(t *testing.T) { + job := New() + job.nodeType = nameNodeType + + assert.Equal(t, nameNodeCharts(), job.Charts()) +} + +func TestHDFS_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestHDFS_CollectDataNode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testDataNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "dna_bytes_read": 80689178, + "dna_bytes_written": 500960407, + "fsds_capacity_remaining": 32920760320, + "fsds_capacity_total": 53675536384, + "fsds_capacity_used": 20754776064, + "fsds_capacity_used_dfs": 1186058240, + "fsds_capacity_used_non_dfs": 19568717824, + "fsds_num_failed_volumes": 0, + "jvm_gc_count": 155, + "jvm_gc_num_info_threshold_exceeded": 0, + "jvm_gc_num_warn_threshold_exceeded": 0, + "jvm_gc_time_millis": 672, + "jvm_gc_total_extra_sleep_time": 8783, + "jvm_log_error": 1, + "jvm_log_fatal": 0, + "jvm_log_info": 257, + "jvm_log_warn": 2, + "jvm_mem_heap_committed": 60500, + "jvm_mem_heap_max": 843, + "jvm_mem_heap_used": 18885, + "jvm_threads_blocked": 0, + "jvm_threads_new": 0, + "jvm_threads_runnable": 11, + "jvm_threads_terminated": 0, + "jvm_threads_timed_waiting": 25, + "jvm_threads_waiting": 11, + "rpc_call_queue_length": 0, + "rpc_num_open_connections": 0, + "rpc_processing_time_avg_time": 0, + "rpc_queue_time_avg_time": 0, + "rpc_queue_time_num_ops": 0, + "rpc_received_bytes": 7, + "rpc_sent_bytes": 187, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestHDFS_CollectNameNode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testNameNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "fsns_blocks_total": 15, + "fsns_capacity_remaining": 65861697536, + "fsns_capacity_total": 107351072768, + "fsns_capacity_used": 41489375232, + "fsns_capacity_used_dfs": 2372116480, + "fsns_capacity_used_non_dfs": 39117258752, + "fsns_corrupt_blocks": 0, + "fsns_files_total": 12, + "fsns_missing_blocks": 0, + "fsns_num_dead_data_nodes": 0, + "fsns_num_live_data_nodes": 2, + "fsns_stale_data_nodes": 0, + "fsns_total_load": 2, + "fsns_under_replicated_blocks": 0, + "fsns_volume_failures_total": 0, + "jvm_gc_count": 1699, + "jvm_gc_num_info_threshold_exceeded": 0, + "jvm_gc_num_warn_threshold_exceeded": 0, + "jvm_gc_time_millis": 3483, + "jvm_gc_total_extra_sleep_time": 1944, + "jvm_log_error": 0, + "jvm_log_fatal": 0, + "jvm_log_info": 3382077, + "jvm_log_warn": 3378983, + "jvm_mem_heap_committed": 67000, + "jvm_mem_heap_max": 843, + "jvm_mem_heap_used": 26603, + "jvm_threads_blocked": 0, + "jvm_threads_new": 0, + "jvm_threads_runnable": 7, + "jvm_threads_terminated": 0, + "jvm_threads_timed_waiting": 34, + "jvm_threads_waiting": 6, + "rpc_call_queue_length": 0, + "rpc_num_open_connections": 2, + "rpc_processing_time_avg_time": 0, + "rpc_queue_time_avg_time": 58, + "rpc_queue_time_num_ops": 585402, + "rpc_received_bytes": 240431351, + "rpc_sent_bytes": 25067414, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestHDFS_CollectUnknownNode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testUnknownNodeData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.Panics(t, func() { _ = job.Collect() }) +} + +func TestHDFS_CollectNoResponse(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/jmx" + require.True(t, job.Init()) + + assert.Nil(t, job.Collect()) +} + +func TestHDFS_CollectReceiveInvalidResponse(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\ngoodbye!\n")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.Nil(t, job.Collect()) +} + +func TestHDFS_CollectReceive404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + assert.Nil(t, job.Collect()) +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md b/src/go/collectors/go.d.plugin/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md new file mode 100644 index 00000000000000..c19268625f81cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md @@ -0,0 +1,251 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/hdfs/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/hdfs/metadata.yaml" +sidebar_label: "Hadoop Distributed File System (HDFS)" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Hadoop Distributed File System (HDFS) + + +<img src="https://netdata.cloud/img/hadoop.svg" width="150"/> + + +Plugin: go.d.plugin +Module: hfs + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors HDFS nodes. + +Netdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Hadoop Distributed File System (HDFS) instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | DataNode | NameNode | +|:------|:----------|:----|:---:|:---:| +| hdfs.heap_memory | committed, used | MiB | • | • | +| hdfs.gc_count_total | gc | events/s | • | • | +| hdfs.gc_time_total | ms | ms | • | • | +| hdfs.gc_threshold | info, warn | events/s | • | • | +| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | • | • | +| hdfs.logs_total | info, error, warn, fatal | logs/s | • | • | +| hdfs.rpc_bandwidth | received, sent | kilobits/s | • | • | +| hdfs.rpc_calls | calls | calls/s | • | • | +| hdfs.open_connections | open | connections | • | • | +| hdfs.call_queue_length | length | num | • | • | +| hdfs.avg_queue_time | time | ms | • | • | +| hdfs.avg_processing_time | time | ms | • | • | +| hdfs.capacity | remaining, used | KiB | | • | +| hdfs.used_capacity | dfs, non_dfs | KiB | | • | +| hdfs.load | load | load | | • | +| hdfs.volume_failures_total | failures | events/s | | • | +| hdfs.files_total | files | num | | • | +| hdfs.blocks_total | blocks | num | | • | +| hdfs.blocks | corrupt, missing, under_replicated | num | | • | +| hdfs.data_nodes | live, dead, stale | num | | • | +| hdfs.datanode_capacity | remaining, used | KiB | • | | +| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | • | | +| hdfs.datanode_failed_volumes | failed volumes | num | • | | +| hdfs.datanode_bandwidth | reads, writes | KiB/s | • | | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization | +| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks | +| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat | +| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead | +| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/hdfs.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/hdfs.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9870/jmx | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9870/jmx + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9870/jmx + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9870/jmx + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9870/jmx + + - name: remote + url: http://192.0.2.1:9870/jmx + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m hfs + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/metadata.yaml b/src/go/collectors/go.d.plugin/modules/hdfs/metadata.yaml new file mode 100644 index 00000000000000..694868e01a0977 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/metadata.yaml @@ -0,0 +1,388 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-hfs + plugin_name: go.d.plugin + module_name: hfs + monitored_instance: + name: Hadoop Distributed File System (HDFS) + link: https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html + icon_filename: hadoop.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - hdfs + - hadoop + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors HDFS nodes. + + Netdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/hdfs.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9870/jmx + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9870/jmx + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9870/jmx + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:9870/jmx + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9870/jmx + + - name: remote + url: http://192.0.2.1:9870/jmx + troubleshooting: + problems: + list: [] + alerts: + - name: hdfs_capacity_usage + metric: hdfs.capacity + info: summary datanodes space capacity utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf + - name: hdfs_missing_blocks + metric: hdfs.blocks + info: number of missing blocks + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf + - name: hdfs_stale_nodes + metric: hdfs.data_nodes + info: number of datanodes marked stale due to delayed heartbeat + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf + - name: hdfs_dead_nodes + metric: hdfs.data_nodes + info: number of datanodes which are currently dead + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf + - name: hdfs_num_failed_volumes + metric: hdfs.num_failed_volumes + info: number of failed volumes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: + - DataNode + - NameNode + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: hdfs.heap_memory + description: Heap Memory + unit: MiB + chart_type: area + dimensions: + - name: committed + - name: used + - name: hdfs.gc_count_total + description: GC Events + unit: events/s + chart_type: line + dimensions: + - name: gc + - name: hdfs.gc_time_total + description: GC Time + unit: ms + chart_type: line + dimensions: + - name: ms + - name: hdfs.gc_threshold + description: Number of Times That the GC Threshold is Exceeded + unit: events/s + chart_type: line + dimensions: + - name: info + - name: warn + - name: hdfs.threads + description: Number of Threads + unit: num + chart_type: stacked + dimensions: + - name: new + - name: runnable + - name: blocked + - name: waiting + - name: timed_waiting + - name: terminated + - name: hdfs.logs_total + description: Number of Logs + unit: logs/s + chart_type: stacked + dimensions: + - name: info + - name: error + - name: warn + - name: fatal + - name: hdfs.rpc_bandwidth + description: RPC Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: hdfs.rpc_calls + description: RPC Calls + unit: calls/s + chart_type: line + dimensions: + - name: calls + - name: hdfs.open_connections + description: RPC Open Connections + unit: connections + chart_type: line + dimensions: + - name: open + - name: hdfs.call_queue_length + description: RPC Call Queue Length + unit: num + chart_type: line + dimensions: + - name: length + - name: hdfs.avg_queue_time + description: RPC Avg Queue Time + unit: ms + chart_type: line + dimensions: + - name: time + - name: hdfs.avg_processing_time + description: RPC Avg Processing Time + unit: ms + chart_type: line + dimensions: + - name: time + - name: hdfs.capacity + description: Capacity Across All Datanodes + unit: KiB + chart_type: stacked + availability: + - NameNode + dimensions: + - name: remaining + - name: used + - name: hdfs.used_capacity + description: Used Capacity Across All Datanodes + unit: KiB + chart_type: stacked + availability: + - NameNode + dimensions: + - name: dfs + - name: non_dfs + - name: hdfs.load + description: Number of Concurrent File Accesses (read/write) Across All DataNodes + unit: load + chart_type: line + availability: + - NameNode + dimensions: + - name: load + - name: hdfs.volume_failures_total + description: Number of Volume Failures Across All Datanodes + unit: events/s + chart_type: line + availability: + - NameNode + dimensions: + - name: failures + - name: hdfs.files_total + description: Number of Tracked Files + unit: num + chart_type: line + availability: + - NameNode + dimensions: + - name: files + - name: hdfs.blocks_total + description: Number of Allocated Blocks in the System + unit: num + chart_type: line + availability: + - NameNode + dimensions: + - name: blocks + - name: hdfs.blocks + description: Number of Problem Blocks (can point to an unhealthy cluster) + unit: num + chart_type: line + availability: + - NameNode + dimensions: + - name: corrupt + - name: missing + - name: under_replicated + - name: hdfs.data_nodes + description: Number of Data Nodes By Status + unit: num + chart_type: stacked + availability: + - NameNode + dimensions: + - name: live + - name: dead + - name: stale + - name: hdfs.datanode_capacity + description: Capacity + unit: KiB + chart_type: stacked + availability: + - DataNode + dimensions: + - name: remaining + - name: used + - name: hdfs.datanode_used_capacity + description: Used Capacity + unit: KiB + chart_type: stacked + availability: + - DataNode + dimensions: + - name: dfs + - name: non_dfs + - name: hdfs.datanode_failed_volumes + description: Number of Failed Volumes + unit: num + chart_type: line + availability: + - DataNode + dimensions: + - name: failed volumes + - name: hdfs.datanode_bandwidth + description: Bandwidth + unit: KiB/s + chart_type: area + availability: + - DataNode + dimensions: + - name: reads + - name: writes diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/metrics.go b/src/go/collectors/go.d.plugin/modules/hdfs/metrics.go new file mode 100644 index 00000000000000..972436a5d260c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/metrics.go @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +// HDFS Architecture +// https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html#NameNode+and+DataNodes + +// Metrics description +// https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Metrics.html + +// Good article +// https://www.datadoghq.com/blog/monitor-hadoop-metrics/#hdfs-metrics + +type metrics struct { + Jvm *jvmMetrics `stm:"jvm"` // both + Rpc *rpcActivityMetrics `stm:"rpc"` // both + FSNameSystem *fsNameSystemMetrics `stm:"fsns"` // namenode + FSDatasetState *fsDatasetStateMetrics `stm:"fsds"` // datanode + DataNodeActivity *dataNodeActivityMetrics `stm:"dna"` // datanode +} + +type jvmMetrics struct { + ProcessName string `json:"tag.ProcessName"` + HostName string `json:"tag.Hostname"` + //MemNonHeapUsedM float64 `stm:"mem_non_heap_used,1000,1"` + //MemNonHeapCommittedM float64 `stm:"mem_non_heap_committed,1000,1"` + //MemNonHeapMaxM float64 `stm:"mem_non_heap_max"` + MemHeapUsedM float64 `stm:"mem_heap_used,1000,1"` + MemHeapCommittedM float64 `stm:"mem_heap_committed,1000,1"` + MemHeapMaxM float64 `stm:"mem_heap_max"` + //MemMaxM float64 `stm:"mem_max"` + GcCount float64 `stm:"gc_count"` + GcTimeMillis float64 `stm:"gc_time_millis"` + GcNumWarnThresholdExceeded float64 `stm:"gc_num_warn_threshold_exceeded"` + GcNumInfoThresholdExceeded float64 `stm:"gc_num_info_threshold_exceeded"` + GcTotalExtraSleepTime float64 `stm:"gc_total_extra_sleep_time"` + ThreadsNew float64 `stm:"threads_new"` + ThreadsRunnable float64 `stm:"threads_runnable"` + ThreadsBlocked float64 `stm:"threads_blocked"` + ThreadsWaiting float64 `stm:"threads_waiting"` + ThreadsTimedWaiting float64 `stm:"threads_timed_waiting"` + ThreadsTerminated float64 `stm:"threads_terminated"` + LogFatal float64 `stm:"log_fatal"` + LogError float64 `stm:"log_error"` + LogWarn float64 `stm:"log_warn"` + LogInfo float64 `stm:"log_info"` +} + +type rpcActivityMetrics struct { + ReceivedBytes float64 `stm:"received_bytes"` + SentBytes float64 `stm:"sent_bytes"` + RpcQueueTimeNumOps float64 `stm:"queue_time_num_ops"` + RpcQueueTimeAvgTime float64 `stm:"queue_time_avg_time,1000,1"` + //RpcProcessingTimeNumOps float64 + RpcProcessingTimeAvgTime float64 `stm:"processing_time_avg_time,1000,1"` + //DeferredRpcProcessingTimeNumOps float64 + //DeferredRpcProcessingTimeAvgTime float64 + //RpcAuthenticationFailures float64 + //RpcAuthenticationSuccesses float64 + //RpcAuthorizationFailures float64 + //RpcAuthorizationSuccesses float64 + //RpcClientBackoff float64 + //RpcSlowCalls float64 + NumOpenConnections float64 `stm:"num_open_connections"` + CallQueueLength float64 `stm:"call_queue_length"` + //NumDroppedConnections float64 +} + +type fsNameSystemMetrics struct { + HostName string `json:"tag.Hostname"` + HAState string `json:"tag.HAState"` + //TotalSyncTimes float64 `json:"tag.tag.TotalSyncTimes"` + MissingBlocks float64 `stm:"missing_blocks"` + //MissingReplOneBlocks float64 `stm:"missing_repl_one_blocks"` + //ExpiredHeartbeats float64 `stm:"expired_heartbeats"` + //TransactionsSinceLastCheckpoint float64 `stm:"transactions_since_last_checkpoint"` + //TransactionsSinceLastLogRoll float64 `stm:"transactions_since_last_log_roll"` + //LastWrittenTransactionId float64 `stm:"last_written_transaction_id"` + //LastCheckpointTime float64 `stm:"last_checkpoint_time"` + CapacityTotal float64 `stm:"capacity_total"` + //CapacityTotalGB float64 `stm:"capacity_total_gb"` + CapacityDfsUsed float64 `json:"CapacityUsed" stm:"capacity_used_dfs"` + //CapacityUsedGB float64 `stm:"capacity_used_gb"` + CapacityRemaining float64 `stm:"capacity_remaining"` + //ProvidedCapacityTotal float64 `stm:"provided_capacity_total"` + //CapacityRemainingGB float64 `stm:"capacity_remaining_gb"` + CapacityUsedNonDFS float64 `stm:"capacity_used_non_dfs"` + TotalLoad float64 `stm:"total_load"` + //SnapshottableDirectories float64 `stm:"snapshottable_directories"` + //Snapshots float64 `stm:"snapshots"` + //NumEncryptionZones float64 `stm:"num_encryption_zones"` + //LockQueueLength float64 `stm:"lock_queue_length"` + BlocksTotal float64 `stm:"blocks_total"` + //NumFilesUnderConstruction float64 `stm:"num_files_under_construction"` + //NumActiveClients float64 `stm:"num_active_clients"` + FilesTotal float64 `stm:"files_total"` + //PendingReplicationBlocks float64 `stm:"pending_replication_blocks"` + //PendingReconstructionBlocks float64 `stm:"pending_reconstruction_blocks"` + UnderReplicatedBlocks float64 `stm:"under_replicated_blocks"` + //LowRedundancyBlocks float64 `stm:"low_redundancy_blocks"` + CorruptBlocks float64 `stm:"corrupt_blocks"` + //ScheduledReplicationBlocks float64 `stm:"scheduled_replication_blocks"` + //PendingDeletionBlocks float64 `stm:"pending_deletion_blocks"` + //LowRedundancyReplicatedBlocks float64 `stm:"low_redundancy_replicated_blocks"` + //CorruptReplicatedBlocks float64 `stm:"corrupt_replicated_blocks"` + //MissingReplicatedBlocks float64 `stm:"missing_replicated_blocks"` + //MissingReplicationOneBlocks float64 `stm:"missing_replication_one_blocks"` + //HighestPriorityLowRedundancyReplicatedBlocks float64 `stm:"highest_priority_low_redundancy_replicated_blocks"` + //HighestPriorityLowRedundancyECBlocks float64 `stm:"highest_priority_low_redundancy_ec_blocks"` + //BytesInFutureReplicatedBlocks float64 `stm:"bytes_in_future_replicated_blocks"` + //PendingDeletionReplicatedBlocks float64 `stm:"pending_deletion_replicated_blocks"` + //TotalReplicatedBlocks float64 `stm:"total_replicated_blocks"` + //LowRedundancyECBlockGroups float64 `stm:"low_redundancy_ec_block_groups"` + //CorruptECBlockGroups float64 `stm:"corrupt_ec_block_groups"` + //MissingECBlockGroups float64 `stm:"missing_ec_block_groups"` + //BytesInFutureECBlockGroups float64 `stm:"bytes_in_future_ec_block_groups"` + //PendingDeletionECBlocks float64 `stm:"pending_deletion_ec_blocks"` + //TotalECBlockGroups float64 `stm:"total_ec_block_groups"` + //ExcessBlocks float64 `stm:"excess_blocks"` + //NumTimedOutPendingReconstructions float64 `stm:"num_timed_out_pending_reconstructions"` + //PostponedMisreplicatedBlocks float64 `stm:"postponed_misreplicated_blocks"` + //PendingDataNodeMessageCount float64 `stm:"pending_data_node_message_count"` + //MillisSinceLastLoadedEdits float64 `stm:"millis_since_last_loaded_edits"` + //BlockCapacity float64 `stm:"block_capacity"` + NumLiveDataNodes float64 `stm:"num_live_data_nodes"` + NumDeadDataNodes float64 `stm:"num_dead_data_nodes"` + //NumDecomLiveDataNodes float64 `stm:"num_decom_live_data_nodes"` + //NumDecomDeadDataNodes float64 `stm:"num_decom_dead_data_nodes"` + VolumeFailuresTotal float64 `stm:"volume_failures_total"` + //EstimatedCapacityLostTotal float64 `stm:"estimated_capacity_lost_total"` + //NumDecommissioningDataNodes float64 `stm:"num_decommissioning_data_nodes"` + StaleDataNodes float64 `stm:"stale_data_nodes"` + //NumStaleStorages float64 `stm:"num_stale_storages"` + //TotalSyncCount float64 `stm:"total_sync_count"` + //NumInMaintenanceLiveDataNodes float64 `stm:"num_in_maintenance_live_data_nodes"` + //NumInMaintenanceDeadDataNodes float64 `stm:"num_in_maintenance_dead_data_nodes"` + //NumEnteringMaintenanceDataNodes float64 `stm:"num_entering_maintenance_data_nodes"` + + // custom attributes + CapacityUsed float64 `json:"-" stm:"capacity_used"` +} + +type fsDatasetStateMetrics struct { + HostName string `json:"tag.Hostname"` + Capacity float64 `stm:"capacity_total"` + DfsUsed float64 `stm:"capacity_used_dfs"` + Remaining float64 `stm:"capacity_remaining"` + NumFailedVolumes float64 `stm:"num_failed_volumes"` + //LastVolumeFailureDate float64 `stm:"LastVolumeFailureDate"` + //EstimatedCapacityLostTotal float64 `stm:"EstimatedCapacityLostTotal"` + //CacheUsed float64 `stm:"CacheUsed"` + //CacheCapacity float64 `stm:"CacheCapacity"` + //NumBlocksCached float64 `stm:"NumBlocksCached"` + //NumBlocksFailedToCache float64 `stm:"NumBlocksFailedToCache"` + //NumBlocksFailedToUnCache float64 `stm:"NumBlocksFailedToUnCache"` + + // custom attributes + CapacityUsedNonDFS float64 `stm:"capacity_used_non_dfs"` + CapacityUsed float64 `stm:"capacity_used"` +} + +type dataNodeActivityMetrics struct { + HostName string `json:"tag.Hostname"` + BytesWritten float64 `stm:"bytes_written"` + //TotalWriteTime float64 + BytesRead float64 `stm:"bytes_read"` + //TotalReadTime float64 + //BlocksWritten float64 + //BlocksRead float64 + //BlocksReplicated float64 + //BlocksRemoved float64 + //BlocksVerified float64 + //BlockVerificationFailures float64 + //BlocksCached float64 + //BlocksUncached float64 + //ReadsFromLocalClient float64 + //ReadsFromRemoteClient float64 + //WritesFromLocalClient float64 + //WritesFromRemoteClient float64 + //BlocksGetLocalPathInfo float64 + //RemoteBytesRead float64 + //RemoteBytesWritten float64 + //RamDiskBlocksWrite float64 + //RamDiskBlocksWriteFallback float64 + //RamDiskBytesWrite float64 + //RamDiskBlocksReadHits float64 + //RamDiskBlocksEvicted float64 + //RamDiskBlocksEvictedWithoutRead float64 + //RamDiskBlocksEvictionWindowMsNumOps float64 + //RamDiskBlocksEvictionWindowMsAvgTime float64 + //RamDiskBlocksLazyPersisted float64 + //RamDiskBlocksDeletedBeforeLazyPersisted float64 + //RamDiskBytesLazyPersisted float64 + //RamDiskBlocksLazyPersistWindowMsNumOps float64 + //RamDiskBlocksLazyPersistWindowMsAvgTime float64 + //FsyncCount float64 + //VolumeFailures float64 + //DatanodeNetworkErrors float64 + //DataNodeActiveXceiversCount float64 + //ReadBlockOpNumOps float64 + //ReadBlockOpAvgTime float64 + //WriteBlockOpNumOps float64 + //WriteBlockOpAvgTime float64 + //BlockChecksumOpNumOps float64 + //BlockChecksumOpAvgTime float64 + //CopyBlockOpNumOps float64 + //CopyBlockOpAvgTime float64 + //ReplaceBlockOpNumOps float64 + //ReplaceBlockOpAvgTime float64 + //HeartbeatsNumOps float64 + //HeartbeatsAvgTime float64 + //HeartbeatsTotalNumOps float64 + //HeartbeatsTotalAvgTime float64 + //LifelinesNumOps float64 + //LifelinesAvgTime float64 + //BlockReportsNumOps float64 + //BlockReportsAvgTime float64 + //IncrementalBlockReportsNumOps float64 + //IncrementalBlockReportsAvgTime float64 + //CacheReportsNumOps float64 + //CacheReportsAvgTime float64 + //PacketAckRoundTripTimeNanosNumOps float64 + //PacketAckRoundTripTimeNanosAvgTime float64 + //FlushNanosNumOps float64 + //FlushNanosAvgTime float64 + //FsyncNanosNumOps float64 + //FsyncNanosAvgTime float64 + //SendDataPacketBlockedOnNetworkNanosNumOps float64 + //SendDataPacketBlockedOnNetworkNanosAvgTime float64 + //SendDataPacketTransferNanosNumOps float64 + //SendDataPacketTransferNanosAvgTime float64 + //BlocksInPendingIBR float64 + //BlocksReceivingInPendingIBR float64 + //BlocksReceivedInPendingIBR float64 + //BlocksDeletedInPendingIBR float64 + //EcReconstructionTasks float64 + //EcFailedReconstructionTasks float64 + //EcDecodingTimeNanos float64 + //EcReconstructionBytesRead float64 + //EcReconstructionBytesWritten float64 + //EcReconstructionRemoteBytesRead float64 + //EcReconstructionReadTimeMillis float64 + //EcReconstructionDecodingTimeMillis float64 + //EcReconstructionWriteTimeMillis float64 +} diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/testdata/datanode.json b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/datanode.json new file mode 100644 index 00000000000000..0f657d5602c94b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/datanode.json @@ -0,0 +1,165 @@ +{ + "beans":[ + { + "name":"Hadoop:service=DataNode,name=JvmMetrics", + "modelerType":"JvmMetrics", + "tag.Context":"jvm", + "tag.ProcessName":"DataNode", + "tag.SessionId":null, + "tag.Hostname":"dev-slave-01.dev.local", + "MemNonHeapUsedM":53.67546, + "MemNonHeapCommittedM":54.9375, + "MemNonHeapMaxM":-1.0, + "MemHeapUsedM":18.885735, + "MemHeapCommittedM":60.5, + "MemHeapMaxM":843.0, + "MemMaxM":843.0, + "GcCount":155, + "GcTimeMillis":672, + "GcNumWarnThresholdExceeded":0, + "GcNumInfoThresholdExceeded":0, + "GcTotalExtraSleepTime":8783, + "ThreadsNew":0, + "ThreadsRunnable":11, + "ThreadsBlocked":0, + "ThreadsWaiting":11, + "ThreadsTimedWaiting":25, + "ThreadsTerminated":0, + "LogFatal":0, + "LogError":1, + "LogWarn":2, + "LogInfo":257 + }, + { + "name":"Hadoop:service=DataNode,name=FSDatasetState", + "modelerType":"FSDatasetState", + "tag.Context":"FSDatasetState", + "tag.StorageInfo":"FSDataset{dirpath='[/data/hdfs/data]'}", + "tag.Hostname":"dev-slave-01.dev.local", + "Capacity":53675536384, + "DfsUsed":1186058240, + "Remaining":32920760320, + "NumFailedVolumes":0, + "LastVolumeFailureDate":0, + "EstimatedCapacityLostTotal":0, + "CacheUsed":0, + "CacheCapacity":0, + "NumBlocksCached":0, + "NumBlocksFailedToCache":0, + "NumBlocksFailedToUnCache":4 + }, + { + "name":"Hadoop:service=DataNode,name=DataNodeActivity-dev-slave-01.dev.local-9866", + "modelerType":"DataNodeActivity-dev-slave-01.dev.local-9866", + "tag.SessionId":null, + "tag.Context":"dfs", + "tag.Hostname":"dev-slave-01.dev.local", + "BytesWritten":500960407, + "TotalWriteTime":463, + "BytesRead":80689178, + "TotalReadTime":41203, + "BlocksWritten":16, + "BlocksRead":16, + "BlocksReplicated":4, + "BlocksRemoved":4, + "BlocksVerified":0, + "BlockVerificationFailures":0, + "BlocksCached":0, + "BlocksUncached":0, + "ReadsFromLocalClient":0, + "ReadsFromRemoteClient":16, + "WritesFromLocalClient":0, + "WritesFromRemoteClient":12, + "BlocksGetLocalPathInfo":0, + "RemoteBytesRead":80689178, + "RemoteBytesWritten":97283223, + "RamDiskBlocksWrite":0, + "RamDiskBlocksWriteFallback":0, + "RamDiskBytesWrite":0, + "RamDiskBlocksReadHits":0, + "RamDiskBlocksEvicted":0, + "RamDiskBlocksEvictedWithoutRead":0, + "RamDiskBlocksEvictionWindowMsNumOps":0, + "RamDiskBlocksEvictionWindowMsAvgTime":0.0, + "RamDiskBlocksLazyPersisted":0, + "RamDiskBlocksDeletedBeforeLazyPersisted":0, + "RamDiskBytesLazyPersisted":0, + "RamDiskBlocksLazyPersistWindowMsNumOps":0, + "RamDiskBlocksLazyPersistWindowMsAvgTime":0.0, + "FsyncCount":0, + "VolumeFailures":0, + "DatanodeNetworkErrors":7, + "DataNodeActiveXceiversCount":0, + "ReadBlockOpNumOps":16, + "ReadBlockOpAvgTime":2258.2, + "WriteBlockOpNumOps":12, + "WriteBlockOpAvgTime":12640.666666666666, + "BlockChecksumOpNumOps":0, + "BlockChecksumOpAvgTime":0.0, + "CopyBlockOpNumOps":0, + "CopyBlockOpAvgTime":0.0, + "ReplaceBlockOpNumOps":0, + "ReplaceBlockOpAvgTime":0.0, + "HeartbeatsNumOps":285073, + "HeartbeatsAvgTime":1.2035398230088497, + "HeartbeatsTotalNumOps":285073, + "HeartbeatsTotalAvgTime":1.2035398230088497, + "LifelinesNumOps":0, + "LifelinesAvgTime":0.0, + "BlockReportsNumOps":41, + "BlockReportsAvgTime":2.0, + "IncrementalBlockReportsNumOps":20, + "IncrementalBlockReportsAvgTime":1.2, + "CacheReportsNumOps":0, + "CacheReportsAvgTime":0.0, + "PacketAckRoundTripTimeNanosNumOps":603, + "PacketAckRoundTripTimeNanosAvgTime":1733672.0, + "FlushNanosNumOps":7660, + "FlushNanosAvgTime":3988.858108108108, + "FsyncNanosNumOps":0, + "FsyncNanosAvgTime":0.0, + "SendDataPacketBlockedOnNetworkNanosNumOps":7091, + "SendDataPacketBlockedOnNetworkNanosAvgTime":2.4469053762711864E7, + "SendDataPacketTransferNanosNumOps":7091, + "SendDataPacketTransferNanosAvgTime":37130.05084745763, + "BlocksInPendingIBR":0, + "BlocksReceivingInPendingIBR":0, + "BlocksReceivedInPendingIBR":0, + "BlocksDeletedInPendingIBR":0, + "EcReconstructionTasks":0, + "EcFailedReconstructionTasks":0, + "EcDecodingTimeNanos":0, + "EcReconstructionBytesRead":0, + "EcReconstructionBytesWritten":0, + "EcReconstructionRemoteBytesRead":0, + "EcReconstructionReadTimeMillis":0, + "EcReconstructionDecodingTimeMillis":0, + "EcReconstructionWriteTimeMillis":0 + }, + { + "name":"Hadoop:service=DataNode,name=RpcActivityForPort9867", + "modelerType":"RpcActivityForPort9867", + "tag.port":"9867", + "tag.Context":"rpc", + "tag.NumOpenConnectionsPerUser":"{}", + "tag.Hostname":"dev-slave-01.dev.local", + "ReceivedBytes":7, + "SentBytes":187, + "RpcQueueTimeNumOps":0, + "RpcQueueTimeAvgTime":0.0, + "RpcProcessingTimeNumOps":0, + "RpcProcessingTimeAvgTime":0.0, + "DeferredRpcProcessingTimeNumOps":0, + "DeferredRpcProcessingTimeAvgTime":0.0, + "RpcAuthenticationFailures":0, + "RpcAuthenticationSuccesses":0, + "RpcAuthorizationFailures":0, + "RpcAuthorizationSuccesses":0, + "RpcClientBackoff":0, + "RpcSlowCalls":0, + "NumOpenConnections":0, + "CallQueueLength":0, + "NumDroppedConnections":0 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/testdata/namenode.json b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/namenode.json new file mode 100644 index 00000000000000..2d33d32f368acd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/namenode.json @@ -0,0 +1,132 @@ +{ + "beans":[ + { + "name":"Hadoop:service=NameNode,name=JvmMetrics", + "modelerType":"JvmMetrics", + "tag.Context":"jvm", + "tag.ProcessName":"NameNode", + "tag.SessionId":null, + "tag.Hostname":"dev-master-02.dev.local", + "MemNonHeapUsedM":66.170395, + "MemNonHeapCommittedM":67.75, + "MemNonHeapMaxM":-1.0, + "MemHeapUsedM":26.603287, + "MemHeapCommittedM":67.0, + "MemHeapMaxM":843.0, + "MemMaxM":843.0, + "GcCount":1699, + "GcTimeMillis":3483, + "GcNumWarnThresholdExceeded":0, + "GcNumInfoThresholdExceeded":0, + "GcTotalExtraSleepTime":1944, + "ThreadsNew":0, + "ThreadsRunnable":7, + "ThreadsBlocked":0, + "ThreadsWaiting":6, + "ThreadsTimedWaiting":34, + "ThreadsTerminated":0, + "LogFatal":0, + "LogError":0, + "LogWarn":3378983, + "LogInfo":3382077 + }, + { + "name":"Hadoop:service=NameNode,name=FSNamesystem", + "modelerType":"FSNamesystem", + "tag.Context":"dfs", + "tag.HAState":"active", + "tag.TotalSyncTimes":"98 ", + "tag.Hostname":"dev-master-02.dev.local", + "MissingBlocks":0, + "MissingReplOneBlocks":0, + "ExpiredHeartbeats":0, + "TransactionsSinceLastCheckpoint":1, + "TransactionsSinceLastLogRoll":1, + "LastWrittenTransactionId":624, + "LastCheckpointTime":1566814983890, + "CapacityTotal":107351072768, + "CapacityTotalGB":100.0, + "CapacityUsed":2372116480, + "CapacityUsedGB":2.0, + "CapacityRemaining":65861697536, + "ProvidedCapacityTotal":0, + "CapacityRemainingGB":61.0, + "CapacityUsedNonDFS":39117258752, + "TotalLoad":2, + "SnapshottableDirectories":0, + "Snapshots":0, + "NumEncryptionZones":0, + "LockQueueLength":0, + "BlocksTotal":15, + "NumFilesUnderConstruction":0, + "NumActiveClients":0, + "FilesTotal":12, + "PendingReplicationBlocks":0, + "PendingReconstructionBlocks":0, + "UnderReplicatedBlocks":0, + "LowRedundancyBlocks":0, + "CorruptBlocks":0, + "ScheduledReplicationBlocks":0, + "PendingDeletionBlocks":0, + "LowRedundancyReplicatedBlocks":0, + "CorruptReplicatedBlocks":0, + "MissingReplicatedBlocks":0, + "MissingReplicationOneBlocks":0, + "HighestPriorityLowRedundancyReplicatedBlocks":0, + "HighestPriorityLowRedundancyECBlocks":0, + "BytesInFutureReplicatedBlocks":0, + "PendingDeletionReplicatedBlocks":0, + "TotalReplicatedBlocks":15, + "LowRedundancyECBlockGroups":0, + "CorruptECBlockGroups":0, + "MissingECBlockGroups":0, + "BytesInFutureECBlockGroups":0, + "PendingDeletionECBlocks":0, + "TotalECBlockGroups":0, + "ExcessBlocks":0, + "NumTimedOutPendingReconstructions":0, + "PostponedMisreplicatedBlocks":0, + "PendingDataNodeMessageCount":0, + "MillisSinceLastLoadedEdits":0, + "BlockCapacity":2097152, + "NumLiveDataNodes":2, + "NumDeadDataNodes":0, + "NumDecomLiveDataNodes":0, + "NumDecomDeadDataNodes":0, + "VolumeFailuresTotal":0, + "EstimatedCapacityLostTotal":0, + "NumDecommissioningDataNodes":0, + "StaleDataNodes":0, + "NumStaleStorages":0, + "TotalSyncCount":2, + "NumInMaintenanceLiveDataNodes":0, + "NumInMaintenanceDeadDataNodes":0, + "NumEnteringMaintenanceDataNodes":0 + }, + { + "name":"Hadoop:service=NameNode,name=RpcActivityForPort9000", + "modelerType":"RpcActivityForPort9000", + "tag.port":"9000", + "tag.Context":"rpc", + "tag.NumOpenConnectionsPerUser":"{\"hadoop\":2}", + "tag.Hostname":"dev-master-02.dev.local", + "ReceivedBytes":240431351, + "SentBytes":25067414, + "RpcQueueTimeNumOps":585402, + "RpcQueueTimeAvgTime":0.05813953488372093, + "RpcProcessingTimeNumOps":585402, + "RpcProcessingTimeAvgTime":0.0, + "DeferredRpcProcessingTimeNumOps":0, + "DeferredRpcProcessingTimeAvgTime":0.0, + "RpcAuthenticationFailures":0, + "RpcAuthenticationSuccesses":0, + "RpcAuthorizationFailures":0, + "RpcAuthorizationSuccesses":14327, + "RpcClientBackoff":0, + "RpcSlowCalls":0, + "NumOpenConnections":2, + "CallQueueLength":0, + "NumDroppedConnections":0 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/hdfs/testdata/unknownnode.json b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/unknownnode.json new file mode 100644 index 00000000000000..7370a7a37b0a00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/hdfs/testdata/unknownnode.json @@ -0,0 +1,34 @@ +{ + "beans":[ + { + "name":"Hadoop:service=UnknownNode,name=JvmMetrics", + "modelerType":"JvmMetrics", + "tag.Context":"jvm", + "tag.ProcessName":"UnknownNode", + "tag.SessionId":null, + "tag.Hostname":"dev-slave-01.dev.local", + "MemNonHeapUsedM":53.67546, + "MemNonHeapCommittedM":54.9375, + "MemNonHeapMaxM":-1.0, + "MemHeapUsedM":18.885735, + "MemHeapCommittedM":60.5, + "MemHeapMaxM":843.0, + "MemMaxM":843.0, + "GcCount":155, + "GcTimeMillis":672, + "GcNumWarnThresholdExceeded":0, + "GcNumInfoThresholdExceeded":0, + "GcTotalExtraSleepTime":8783, + "ThreadsNew":1, + "ThreadsRunnable":2, + "ThreadsBlocked":3, + "ThreadsWaiting":4, + "ThreadsTimedWaiting":5, + "ThreadsTerminated":6, + "LogFatal":10, + "LogError":11, + "LogWarn":12, + "LogInfo":13 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/README.md b/src/go/collectors/go.d.plugin/modules/httpcheck/README.md new file mode 120000 index 00000000000000..69f0561371d92d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/README.md @@ -0,0 +1 @@ +integrations/http_endpoints.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/charts.go b/src/go/collectors/go.d.plugin/modules/httpcheck/charts.go new file mode 100644 index 00000000000000..c0ae78c22ee378 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/charts.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioResponseTime = module.Priority + iota + prioResponseLength + prioResponseStatus + prioResponseInStatusDuration +) + +var httpCheckCharts = module.Charts{ + responseTimeChart.Copy(), + responseLengthChart.Copy(), + responseStatusChart.Copy(), + responseInStatusDurationChart.Copy(), +} + +var responseTimeChart = module.Chart{ + ID: "response_time", + Title: "HTTP Response Time", + Units: "ms", + Fam: "response", + Ctx: "httpcheck.response_time", + Priority: prioResponseTime, + Dims: module.Dims{ + {ID: "time"}, + }, +} + +var responseLengthChart = module.Chart{ + ID: "response_length", + Title: "HTTP Response Body Length", + Units: "characters", + Fam: "response", + Ctx: "httpcheck.response_length", + Priority: prioResponseLength, + Dims: module.Dims{ + {ID: "length"}, + }, +} + +var responseStatusChart = module.Chart{ + ID: "request_status", + Title: "HTTP Check Status", + Units: "boolean", + Fam: "status", + Ctx: "httpcheck.status", + Priority: prioResponseStatus, + Dims: module.Dims{ + {ID: "success"}, + {ID: "no_connection"}, + {ID: "timeout"}, + {ID: "redirect"}, + {ID: "bad_content"}, + {ID: "bad_status"}, + {ID: "bad_header"}, + }, +} + +var responseInStatusDurationChart = module.Chart{ + ID: "current_state_duration", + Title: "HTTP Current State Duration", + Units: "seconds", + Fam: "status", + Ctx: "httpcheck.in_state", + Priority: prioResponseInStatusDuration, + Dims: module.Dims{ + {ID: "in_state", Name: "time"}, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/collect.go b/src/go/collectors/go.d.plugin/modules/httpcheck/collect.go new file mode 100644 index 00000000000000..813a343d53f6c7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/collect.go @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +type reqErrCode int + +const ( + codeTimeout reqErrCode = iota + codeRedirect + codeNoConnection +) + +func (hc *HTTPCheck) collect() (map[string]int64, error) { + req, err := web.NewHTTPRequest(hc.Request) + if err != nil { + return nil, fmt.Errorf("error on creating HTTP requests to %s : %v", hc.Request.URL, err) + } + + if hc.CookieFile != "" { + if err := hc.readCookieFile(); err != nil { + return nil, fmt.Errorf("error on reading cookie file '%s': %v", hc.CookieFile, err) + } + } + + start := time.Now() + resp, err := hc.httpClient.Do(req) + dur := time.Since(start) + + defer closeBody(resp) + + var mx metrics + + if hc.isError(err, resp) { + hc.Debug(err) + hc.collectErrResponse(&mx, err) + } else { + mx.ResponseTime = durationToMs(dur) + hc.collectOKResponse(&mx, resp) + } + + if hc.metrics.Status != mx.Status { + mx.InState = hc.UpdateEvery + } else { + mx.InState = hc.metrics.InState + hc.UpdateEvery + } + hc.metrics = mx + + return stm.ToMap(mx), nil +} + +func (hc *HTTPCheck) isError(err error, resp *http.Response) bool { + return err != nil && !(errors.Is(err, web.ErrRedirectAttempted) && hc.acceptedStatuses[resp.StatusCode]) +} + +func (hc *HTTPCheck) collectErrResponse(mx *metrics, err error) { + switch code := decodeReqError(err); code { + case codeNoConnection: + mx.Status.NoConnection = true + case codeTimeout: + mx.Status.Timeout = true + case codeRedirect: + mx.Status.Redirect = true + default: + panic(fmt.Sprintf("unknown request error code : %d", code)) + } +} + +func (hc *HTTPCheck) collectOKResponse(mx *metrics, resp *http.Response) { + hc.Debugf("endpoint '%s' returned %d (%s) HTTP status code", hc.URL, resp.StatusCode, resp.Status) + + if !hc.acceptedStatuses[resp.StatusCode] { + mx.Status.BadStatusCode = true + return + } + + bs, err := io.ReadAll(resp.Body) + // golang net/http closes body on redirect + if err != nil && !errors.Is(err, io.EOF) && !strings.Contains(err.Error(), "read on closed response body") { + hc.Warningf("error on reading body : %v", err) + mx.Status.BadContent = true + return + } + + mx.ResponseLength = len(bs) + + if hc.reResponse != nil && !hc.reResponse.Match(bs) { + mx.Status.BadContent = true + return + } + + if ok := hc.checkHeader(resp); !ok { + mx.Status.BadHeader = true + return + } + + mx.Status.Success = true +} + +func (hc *HTTPCheck) checkHeader(resp *http.Response) bool { + for _, m := range hc.headerMatch { + value := resp.Header.Get(m.key) + + var ok bool + switch { + case value == "": + ok = m.exclude + case m.valMatcher == nil: + ok = !m.exclude + default: + ok = m.valMatcher.MatchString(value) + } + + if !ok { + hc.Debugf("header match: bad header: exlude '%v' key '%s' value '%s'", m.exclude, m.key, value) + return false + } + } + + return true +} + +func decodeReqError(err error) reqErrCode { + if err == nil { + panic("nil error") + } + + if errors.Is(err, web.ErrRedirectAttempted) { + return codeRedirect + } + var v net.Error + if errors.As(err, &v) && v.Timeout() { + return codeTimeout + } + return codeNoConnection +} + +func (hc *HTTPCheck) readCookieFile() error { + if hc.CookieFile == "" { + return nil + } + + fi, err := os.Stat(hc.CookieFile) + if err != nil { + return err + } + + if hc.cookieFileModTime.Equal(fi.ModTime()) { + hc.Debugf("cookie file '%s' modification time has not changed, using previously read data", hc.CookieFile) + return nil + } + + hc.Debugf("reading cookie file '%s'", hc.CookieFile) + + jar, err := loadCookieJar(hc.CookieFile) + if err != nil { + return err + } + + hc.httpClient.Jar = jar + hc.cookieFileModTime = fi.ModTime() + + return nil +} + +func closeBody(resp *http.Response) { + if resp == nil || resp.Body == nil { + return + } + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() +} + +func durationToMs(duration time.Duration) int { + return int(duration) / (int(time.Millisecond) / int(time.Nanosecond)) +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/config_schema.json b/src/go/collectors/go.d.plugin/modules/httpcheck/config_schema.json new file mode 100644 index 00000000000000..d344853f71f8dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/config_schema.json @@ -0,0 +1,71 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/httpcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "accepted_statuses": { + "type": "array", + "items": { + "type": "integer" + } + }, + "response_match": { + "type": "string" + }, + "cookie_file": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/cookiejar.go b/src/go/collectors/go.d.plugin/modules/httpcheck/cookiejar.go new file mode 100644 index 00000000000000..628867caa404d7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/cookiejar.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + "bufio" + "fmt" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "strconv" + "strings" + "time" + + "golang.org/x/net/publicsuffix" +) + +// TODO: implement proper cookie auth support +// relevant forum topic: https://community.netdata.cloud/t/howto-http-endpoint-collector-with-cookie-and-user-pass/3981/5?u=ilyam8 + +// cookie file format: https://everything.curl.dev/http/cookies/fileformat +func loadCookieJar(path string) (http.CookieJar, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer func() { _ = file.Close() }() + + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + sc := bufio.NewScanner(file) + + for sc.Scan() { + line, httpOnly := strings.CutPrefix(strings.TrimSpace(sc.Text()), "#HttpOnly_") + + if strings.HasPrefix(line, "#") || line == "" { + continue + } + + parts := strings.Fields(line) + if len(parts) != 6 && len(parts) != 7 { + return nil, fmt.Errorf("got %d fields in line '%s', want 6 or 7", len(parts), line) + } + + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + cookie := &http.Cookie{ + Domain: parts[0], + Path: parts[2], + Name: parts[5], + HttpOnly: httpOnly, + } + cookie.Secure, err = strconv.ParseBool(parts[3]) + if err != nil { + return nil, err + } + expires, err := strconv.ParseInt(parts[4], 10, 64) + if err != nil { + return nil, err + } + if expires > 0 { + cookie.Expires = time.Unix(expires, 0) + } + if len(parts) == 7 { + cookie.Value = parts[6] + } + + scheme := "http" + if cookie.Secure { + scheme = "https" + } + cookieURL := &url.URL{ + Scheme: scheme, + Host: cookie.Domain, + } + + cookies := jar.Cookies(cookieURL) + cookies = append(cookies, cookie) + jar.SetCookies(cookieURL, cookies) + } + + return jar, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck.go b/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck.go new file mode 100644 index 00000000000000..abb2c821e428c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck.go @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + _ "embed" + "net/http" + "regexp" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("httpcheck", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *HTTPCheck { + return &HTTPCheck{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + AcceptedStatuses: []int{200}, + }, + acceptedStatuses: make(map[int]bool), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every"` + AcceptedStatuses []int `yaml:"status_accepted"` + ResponseMatch string `yaml:"response_match"` + CookieFile string `yaml:"cookie_file"` + HeaderMatch []HeaderMatchConfig `yaml:"header_match"` + } + HeaderMatchConfig struct { + Exclude bool `yaml:"exclude"` + Key string `yaml:"key"` + Value string `yaml:"value"` + } +) + +type HTTPCheck struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + + charts *module.Charts + + acceptedStatuses map[int]bool + reResponse *regexp.Regexp + headerMatch []headerMatch + + cookieFileModTime time.Time + + metrics metrics +} + +func (hc *HTTPCheck) Init() bool { + if err := hc.validateConfig(); err != nil { + hc.Errorf("config validation: %v", err) + return false + } + + hc.charts = hc.initCharts() + + httpClient, err := hc.initHTTPClient() + if err != nil { + hc.Errorf("init HTTP client: %v", err) + return false + } + hc.httpClient = httpClient + + re, err := hc.initResponseMatchRegexp() + if err != nil { + hc.Errorf("init response match regexp: %v", err) + return false + } + hc.reResponse = re + + hm, err := hc.initHeaderMatch() + if err != nil { + hc.Errorf("init header match: %v", err) + return false + } + hc.headerMatch = hm + + for _, v := range hc.AcceptedStatuses { + hc.acceptedStatuses[v] = true + } + + hc.Debugf("using URL %s", hc.URL) + hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration) + hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses) + if hc.reResponse != nil { + hc.Debugf("using response match regexp %s", hc.reResponse) + } + + return true +} + +func (hc *HTTPCheck) Check() bool { + return len(hc.Collect()) > 0 +} + +func (hc *HTTPCheck) Charts() *module.Charts { + return hc.charts +} + +func (hc *HTTPCheck) Collect() map[string]int64 { + mx, err := hc.collect() + if err != nil { + hc.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (hc *HTTPCheck) Cleanup() { + if hc.httpClient != nil { + hc.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck_test.go b/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck_test.go new file mode 100644 index 00000000000000..9d866e093800e2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/httpcheck_test.go @@ -0,0 +1,584 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHTTPCheck_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success if url set": { + wantFail: false, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + }, + }, + "fail with default": { + wantFail: true, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fail if wrong response regex": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:38001"}, + }, + ResponseMatch: "(?:qwe))", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpCheck := New() + httpCheck.Config = test.config + + if test.wantFail { + assert.False(t, httpCheck.Init()) + } else { + assert.True(t, httpCheck.Init()) + } + }) + } +} + +func TestHTTPCheck_Charts(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *HTTPCheck + wantCharts bool + }{ + "no charts if not inited": { + wantCharts: false, + prepare: func(t *testing.T) *HTTPCheck { + return New() + }, + }, + "charts if inited": { + wantCharts: true, + prepare: func(t *testing.T) *HTTPCheck { + httpCheck := New() + httpCheck.URL = "http://127.0.0.1:38001" + require.True(t, httpCheck.Init()) + + return httpCheck + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpCheck := test.prepare(t) + + if test.wantCharts { + assert.NotNil(t, httpCheck.Charts()) + } else { + assert.Nil(t, httpCheck.Charts()) + } + }) + } +} + +func TestHTTPCheck_Cleanup(t *testing.T) { + httpCheck := New() + assert.NotPanics(t, httpCheck.Cleanup) + + httpCheck.URL = "http://127.0.0.1:38001" + require.True(t, httpCheck.Init()) + assert.NotPanics(t, httpCheck.Cleanup) +} + +func TestHTTPCheck_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (httpCheck *HTTPCheck, cleanup func()) + wantFail bool + }{ + "success case": {wantFail: false, prepare: prepareSuccessCase}, + "timeout case": {wantFail: false, prepare: prepareTimeoutCase}, + "redirect success": {wantFail: false, prepare: prepareRedirectSuccessCase}, + "redirect fail": {wantFail: false, prepare: prepareRedirectFailCase}, + "bad status case": {wantFail: false, prepare: prepareBadStatusCase}, + "bad content case": {wantFail: false, prepare: prepareBadContentCase}, + "no connection case": {wantFail: false, prepare: prepareNoConnectionCase}, + "cookie auth case": {wantFail: false, prepare: prepareCookieAuthCase}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpCheck, cleanup := test.prepare() + defer cleanup() + + require.True(t, httpCheck.Init()) + + if test.wantFail { + assert.False(t, httpCheck.Check()) + } else { + assert.True(t, httpCheck.Check()) + } + }) + } + +} + +func TestHTTPCheck_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (httpCheck *HTTPCheck, cleanup func()) + update func(check *HTTPCheck) + wantMetrics map[string]int64 + }{ + "success case": { + prepare: prepareSuccessCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "timeout case": { + prepare: prepareTimeoutCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 0, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 1, + }, + }, + "redirect success case": { + prepare: prepareRedirectSuccessCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 0, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "redirect fail case": { + prepare: prepareRedirectFailCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 0, + "no_connection": 0, + "redirect": 1, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "bad status case": { + prepare: prepareBadStatusCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 1, + "in_state": 2, + "length": 0, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "bad content case": { + prepare: prepareBadContentCase, + wantMetrics: map[string]int64{ + "bad_content": 1, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 17, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "no connection case": { + prepare: prepareNoConnectionCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 0, + "no_connection": 1, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "header match include no value success case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Key: "header-key2"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "header match include with value success case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Key: "header-key2", Value: "= header-value"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "header match include no value bad headers case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Key: "header-key99"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 1, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "header match include with value bad headers case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Key: "header-key2", Value: "= header-value99"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 1, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "header match exclude no value success case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Exclude: true, Key: "header-key99"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "header match exclude with value success case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Exclude: true, Key: "header-key2", Value: "= header-value99"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + "header match exclude no value bad headers case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Exclude: true, Key: "header-key2"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 1, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "header match exclude with value bad headers case": { + prepare: prepareSuccessCase, + update: func(httpCheck *HTTPCheck) { + httpCheck.HeaderMatch = []HeaderMatchConfig{ + {Exclude: true, Key: "header-key2", Value: "= header-value"}, + } + }, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 1, + "bad_status": 0, + "in_state": 2, + "length": 5, + "no_connection": 0, + "redirect": 0, + "success": 0, + "time": 0, + "timeout": 0, + }, + }, + "cookie auth case": { + prepare: prepareCookieAuthCase, + wantMetrics: map[string]int64{ + "bad_content": 0, + "bad_header": 0, + "bad_status": 0, + "in_state": 2, + "length": 0, + "no_connection": 0, + "redirect": 0, + "success": 1, + "time": 0, + "timeout": 0, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpCheck, cleanup := test.prepare() + defer cleanup() + + if test.update != nil { + test.update(httpCheck) + } + + require.True(t, httpCheck.Init()) + + var mx map[string]int64 + + for i := 0; i < 2; i++ { + mx = httpCheck.Collect() + time.Sleep(time.Duration(httpCheck.UpdateEvery) * time.Second) + } + + copyResponseTime(test.wantMetrics, mx) + + require.Equal(t, test.wantMetrics, mx) + }) + } +} + +func prepareSuccessCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.ResponseMatch = "match" + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("header-key1", "header-value") + w.Header().Set("header-key2", "header-value") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("match")) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareTimeoutCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.Timeout.Duration = time.Millisecond * 100 + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(httpCheck.Timeout.Duration + time.Millisecond*100) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareRedirectSuccessCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.NotFollowRedirect = true + httpCheck.AcceptedStatuses = []int{301} + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "https://example.com", http.StatusMovedPermanently) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareRedirectFailCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.NotFollowRedirect = true + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "https://example.com", http.StatusMovedPermanently) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareBadStatusCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadGateway) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareBadContentCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.ResponseMatch = "no match" + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("hello and goodbye")) + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func prepareNoConnectionCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.URL = "http://127.0.0.1:38001" + + return httpCheck, func() {} +} + +func prepareCookieAuthCase() (*HTTPCheck, func()) { + httpCheck := New() + httpCheck.UpdateEvery = 1 + httpCheck.CookieFile = "testdata/cookie.txt" + + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if _, err := r.Cookie("JSESSIONID"); err != nil { + w.WriteHeader(http.StatusUnauthorized) + } else { + w.WriteHeader(http.StatusOK) + } + })) + + httpCheck.URL = srv.URL + + return httpCheck, srv.Close +} + +func copyResponseTime(dst, src map[string]int64) { + if v, ok := src["time"]; ok { + if _, ok := dst["time"]; ok { + dst["time"] = v + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/init.go b/src/go/collectors/go.d.plugin/modules/httpcheck/init.go new file mode 100644 index 00000000000000..f5b31ad630e885 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/init.go @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +import ( + "errors" + "fmt" + "net/http" + "regexp" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" +) + +type headerMatch struct { + exclude bool + key string + valMatcher matcher.Matcher +} + +func (hc *HTTPCheck) validateConfig() error { + if hc.URL == "" { + return errors.New("'url' not set") + } + return nil +} + +func (hc *HTTPCheck) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(hc.Client) +} + +func (hc *HTTPCheck) initResponseMatchRegexp() (*regexp.Regexp, error) { + if hc.ResponseMatch == "" { + return nil, nil + } + return regexp.Compile(hc.ResponseMatch) +} + +func (hc *HTTPCheck) initHeaderMatch() ([]headerMatch, error) { + if len(hc.HeaderMatch) == 0 { + return nil, nil + } + + var hms []headerMatch + + for _, v := range hc.HeaderMatch { + if v.Key == "" { + continue + } + + hm := headerMatch{ + exclude: v.Exclude, + key: v.Key, + valMatcher: nil, + } + + if v.Value != "" { + m, err := matcher.Parse(v.Value) + if err != nil { + return nil, fmt.Errorf("parse key '%s value '%s': %v", v.Key, v.Value, err) + } + if v.Exclude { + m = matcher.Not(m) + } + hm.valMatcher = m + } + + hms = append(hms, hm) + } + + return hms, nil +} + +func (hc *HTTPCheck) initCharts() *module.Charts { + charts := httpCheckCharts.Copy() + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "url", Value: hc.URL}, + } + } + + return charts +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/integrations/http_endpoints.md b/src/go/collectors/go.d.plugin/modules/httpcheck/integrations/http_endpoints.md new file mode 100644 index 00000000000000..e436828a9c65cf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/integrations/http_endpoints.md @@ -0,0 +1,317 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/httpcheck/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/httpcheck/metadata.yaml" +sidebar_label: "HTTP Endpoints" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HTTP Endpoints + + +<img src="https://netdata.cloud/img/globe.svg" width="150"/> + + +Plugin: go.d.plugin +Module: httpcheck + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors HTTP servers availability and response time. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per target + +The metrics refer to the monitored target. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| url | url value that is set in the configuration file. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| httpcheck.response_time | time | ms | +| httpcheck.response_length | length | characters | +| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean | +| httpcheck.in_state | time | boolean | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/httpcheck.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/httpcheck.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no | +| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no | +| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no | +| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no | +| headers_match.key | The exact name of the HTTP header to check for. | | yes | +| headers_match.value | The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header. | | no | +| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080 + +``` +</details> + +##### With HTTP request headers + +Configuration with HTTP request headers that will be sent by the client. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080 + headers: + Host: localhost:8080 + User-Agent: netdata/go.d.plugin + Accept: */* + +``` +</details> + +##### With `status_accepted` + +A basic example configuration with non-default status_accepted. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080 + status_accepted: + - 200 + - 204 + +``` +</details> + +##### With `header_match` + +Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax. + +<details><summary>Config</summary> + +```yaml +jobs: + # The "X-Robots-Tag" header must be present in the HTTP response header, + # but the value of the header does not matter. + # This config checks for the presence of the header regardless of its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + + # The "X-Robots-Tag" header must be present in the HTTP response header + # only if its value is equal to "noindex, nofollow". + # This config checks both the presence of the header and its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + value: '= noindex,nofollow' + + # The "X-Robots-Tag" header must not be present in the HTTP response header + # but the value of the header does not matter. + # This config checks for the presence of the header regardless of its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + exclude: yes + + # The "X-Robots-Tag" header must not be present in the HTTP response header + # only if its value is equal to "noindex, nofollow". + # This config checks both the presence of the header and its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + exclude: yes + value: '= noindex,nofollow' + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8080 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080 + + - name: remote + url: http://192.0.2.1:8080 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m httpcheck + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/metadata.yaml b/src/go/collectors/go.d.plugin/modules/httpcheck/metadata.yaml new file mode 100644 index 00000000000000..65833f5aa3d16c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/metadata.yaml @@ -0,0 +1,291 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-httpcheck + plugin_name: go.d.plugin + module_name: httpcheck + monitored_instance: + name: HTTP Endpoints + link: "" + icon_filename: globe.svg + categories: + - data-collection.synthetic-checks + keywords: + - webserver + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors HTTP servers availability and response time. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/httpcheck.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: "" + required: true + - name: status_accepted + description: "HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart." + default_value: "[200]" + required: false + - name: response_match + description: If the status code is accepted, the content of the response will be matched against this regular expression. + default_value: "" + required: false + - name: headers_match + description: "This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response." + default_value: "[]" + required: false + - name: headers_match.exclude + description: "This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it." + default_value: false + required: false + - name: headers_match.key + description: "The exact name of the HTTP header to check for." + default_value: "" + required: true + - name: headers_match.value + description: "The [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) to match against the value of the specified header." + default_value: "" + required: false + - name: cookie_file + description: Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). + default_value: "" + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080 + - name: With HTTP request headers + description: Configuration with HTTP request headers that will be sent by the client. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080 + headers: + Host: localhost:8080 + User-Agent: netdata/go.d.plugin + Accept: */* + - name: With `status_accepted` + description: A basic example configuration with non-default status_accepted. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080 + status_accepted: + - 200 + - 204 + - name: With `header_match` + description: Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format) syntax. + config: | + jobs: + # The "X-Robots-Tag" header must be present in the HTTP response header, + # but the value of the header does not matter. + # This config checks for the presence of the header regardless of its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + + # The "X-Robots-Tag" header must be present in the HTTP response header + # only if its value is equal to "noindex, nofollow". + # This config checks both the presence of the header and its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + value: '= noindex,nofollow' + + # The "X-Robots-Tag" header must not be present in the HTTP response header + # but the value of the header does not matter. + # This config checks for the presence of the header regardless of its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + exclude: yes + + # The "X-Robots-Tag" header must not be present in the HTTP response header + # only if its value is equal to "noindex, nofollow". + # This config checks both the presence of the header and its value. + - name: local + url: http://127.0.0.1:8080 + header_match: + - key: X-Robots-Tag + exclude: yes + value: '= noindex,nofollow' + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080 + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:8080 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080 + + - name: remote + url: http://192.0.2.1:8080 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: target + description: The metrics refer to the monitored target. + labels: + - name: url + description: url value that is set in the configuration file. + metrics: + - name: httpcheck.response_time + description: HTTP Response Time + unit: ms + chart_type: line + dimensions: + - name: time + - name: httpcheck.response_length + description: HTTP Response Body Length + unit: characters + chart_type: line + dimensions: + - name: length + - name: httpcheck.status + description: HTTP Check Status + unit: boolean + chart_type: line + dimensions: + - name: success + - name: timeout + - name: redirect + - name: no_connection + - name: bad_content + - name: bad_header + - name: bad_status + - name: httpcheck.in_state + description: HTTP Current State Duration + unit: boolean + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/metrics.go b/src/go/collectors/go.d.plugin/modules/httpcheck/metrics.go new file mode 100644 index 00000000000000..676346fa0f76bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/metrics.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package httpcheck + +type metrics struct { + Status status `stm:""` + InState int `stm:"in_state"` + ResponseTime int `stm:"time"` + ResponseLength int `stm:"length"` +} + +type status struct { + Success bool `stm:"success"` // No error on request, body reading and checking its content + Timeout bool `stm:"timeout"` + Redirect bool `stm:"redirect"` + BadContent bool `stm:"bad_content"` + BadStatusCode bool `stm:"bad_status"` + BadHeader bool `stm:"bad_header"` + NoConnection bool `stm:"no_connection"` // All other errors basically +} diff --git a/src/go/collectors/go.d.plugin/modules/httpcheck/testdata/cookie.txt b/src/go/collectors/go.d.plugin/modules/httpcheck/testdata/cookie.txt new file mode 100644 index 00000000000000..2504c6ffac04fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/httpcheck/testdata/cookie.txt @@ -0,0 +1,5 @@ +# HTTP Cookie File +# Generated by Wget on 2023-03-20 21:38:07. +# Edit at your own risk. + +127.0.0.1 FALSE / FALSE 0 JSESSIONID 23B508B767344EA167A4EB9B4DA4E59F \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/init.go b/src/go/collectors/go.d.plugin/modules/init.go new file mode 100644 index 00000000000000..9e44cf98a53e83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/init.go @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package modules + +import ( + _ "github.com/netdata/go.d.plugin/modules/activemq" + _ "github.com/netdata/go.d.plugin/modules/apache" + _ "github.com/netdata/go.d.plugin/modules/bind" + _ "github.com/netdata/go.d.plugin/modules/cassandra" + _ "github.com/netdata/go.d.plugin/modules/chrony" + _ "github.com/netdata/go.d.plugin/modules/cockroachdb" + _ "github.com/netdata/go.d.plugin/modules/consul" + _ "github.com/netdata/go.d.plugin/modules/coredns" + _ "github.com/netdata/go.d.plugin/modules/couchbase" + _ "github.com/netdata/go.d.plugin/modules/couchdb" + _ "github.com/netdata/go.d.plugin/modules/dnsdist" + _ "github.com/netdata/go.d.plugin/modules/dnsmasq" + _ "github.com/netdata/go.d.plugin/modules/dnsmasq_dhcp" + _ "github.com/netdata/go.d.plugin/modules/dnsquery" + _ "github.com/netdata/go.d.plugin/modules/docker" + _ "github.com/netdata/go.d.plugin/modules/docker_engine" + _ "github.com/netdata/go.d.plugin/modules/dockerhub" + _ "github.com/netdata/go.d.plugin/modules/elasticsearch" + _ "github.com/netdata/go.d.plugin/modules/energid" + _ "github.com/netdata/go.d.plugin/modules/envoy" + _ "github.com/netdata/go.d.plugin/modules/example" + _ "github.com/netdata/go.d.plugin/modules/filecheck" + _ "github.com/netdata/go.d.plugin/modules/fluentd" + _ "github.com/netdata/go.d.plugin/modules/freeradius" + _ "github.com/netdata/go.d.plugin/modules/geth" + _ "github.com/netdata/go.d.plugin/modules/haproxy" + _ "github.com/netdata/go.d.plugin/modules/hdfs" + _ "github.com/netdata/go.d.plugin/modules/httpcheck" + _ "github.com/netdata/go.d.plugin/modules/isc_dhcpd" + _ "github.com/netdata/go.d.plugin/modules/k8s_kubelet" + _ "github.com/netdata/go.d.plugin/modules/k8s_kubeproxy" + _ "github.com/netdata/go.d.plugin/modules/k8s_state" + _ "github.com/netdata/go.d.plugin/modules/lighttpd" + _ "github.com/netdata/go.d.plugin/modules/logind" + _ "github.com/netdata/go.d.plugin/modules/logstash" + _ "github.com/netdata/go.d.plugin/modules/mongodb" + _ "github.com/netdata/go.d.plugin/modules/mysql" + _ "github.com/netdata/go.d.plugin/modules/nginx" + _ "github.com/netdata/go.d.plugin/modules/nginxplus" + _ "github.com/netdata/go.d.plugin/modules/nginxvts" + _ "github.com/netdata/go.d.plugin/modules/ntpd" + _ "github.com/netdata/go.d.plugin/modules/nvidia_smi" + _ "github.com/netdata/go.d.plugin/modules/nvme" + _ "github.com/netdata/go.d.plugin/modules/openvpn" + _ "github.com/netdata/go.d.plugin/modules/openvpn_status_log" + _ "github.com/netdata/go.d.plugin/modules/pgbouncer" + _ "github.com/netdata/go.d.plugin/modules/phpdaemon" + _ "github.com/netdata/go.d.plugin/modules/phpfpm" + _ "github.com/netdata/go.d.plugin/modules/pihole" + _ "github.com/netdata/go.d.plugin/modules/pika" + _ "github.com/netdata/go.d.plugin/modules/ping" + _ "github.com/netdata/go.d.plugin/modules/portcheck" + _ "github.com/netdata/go.d.plugin/modules/postgres" + _ "github.com/netdata/go.d.plugin/modules/powerdns" + _ "github.com/netdata/go.d.plugin/modules/powerdns_recursor" + _ "github.com/netdata/go.d.plugin/modules/prometheus" + _ "github.com/netdata/go.d.plugin/modules/proxysql" + _ "github.com/netdata/go.d.plugin/modules/pulsar" + _ "github.com/netdata/go.d.plugin/modules/rabbitmq" + _ "github.com/netdata/go.d.plugin/modules/redis" + _ "github.com/netdata/go.d.plugin/modules/scaleio" + _ "github.com/netdata/go.d.plugin/modules/snmp" + _ "github.com/netdata/go.d.plugin/modules/solr" + _ "github.com/netdata/go.d.plugin/modules/springboot2" + _ "github.com/netdata/go.d.plugin/modules/squidlog" + _ "github.com/netdata/go.d.plugin/modules/supervisord" + _ "github.com/netdata/go.d.plugin/modules/systemdunits" + _ "github.com/netdata/go.d.plugin/modules/tengine" + _ "github.com/netdata/go.d.plugin/modules/traefik" + _ "github.com/netdata/go.d.plugin/modules/unbound" + _ "github.com/netdata/go.d.plugin/modules/upsd" + _ "github.com/netdata/go.d.plugin/modules/vcsa" + _ "github.com/netdata/go.d.plugin/modules/vernemq" + _ "github.com/netdata/go.d.plugin/modules/vsphere" + _ "github.com/netdata/go.d.plugin/modules/weblog" + _ "github.com/netdata/go.d.plugin/modules/whoisquery" + _ "github.com/netdata/go.d.plugin/modules/windows" + _ "github.com/netdata/go.d.plugin/modules/wireguard" + _ "github.com/netdata/go.d.plugin/modules/x509check" + _ "github.com/netdata/go.d.plugin/modules/zookeeper" +) diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/README.md b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/README.md new file mode 120000 index 00000000000000..3385a00a4551b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/README.md @@ -0,0 +1 @@ +integrations/isc_dhcp.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/charts.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/charts.go new file mode 100644 index 00000000000000..07d8ccf9061e5e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/charts.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +var activeLeasesTotalChart = module.Chart{ + ID: "active_leases_total", + Title: "Active Leases Total", + Units: "leases", + Fam: "summary", + Ctx: "isc_dhcpd.active_leases_total", + Dims: module.Dims{ + {ID: "active_leases_total", Name: "active"}, + }, +} + +var ( + poolActiveLeasesChart = module.Chart{ + ID: "pool_active_leases", + Title: "Pool Active Leases", + Units: "leases", + Fam: "pools", + Ctx: "isc_dhcpd.pool_active_leases", + } + poolUtilizationChart = module.Chart{ + ID: "pool_utilization", + Title: "Pool Utilization", + Units: "percentage", + Fam: "pools", + Ctx: "isc_dhcpd.pool_utilization", + } +) diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/collect.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/collect.go new file mode 100644 index 00000000000000..6930e8770f493e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/collect.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + "os" +) + +/* +dhcpd.leases db (file), see details: https://kb.isc.org/docs/en/isc-dhcp-44-manual-pages-dhcpdleases#dhcpdleases + +Every time a lease is acquired, renewed or released, its new value is recorded at the end of the lease file. +So if more than one declaration appears for a given lease, the last one in the file is the current one. + +In order to prevent the lease database from growing without bound, the file is rewritten from time to time. +First, a temporary lease database is created and all known leases are dumped to it. +Then, the old lease database is renamed DBDIR/dhcpd.leases~. +Finally, the newly written lease database is moved into place. + +In order to process both DHCPv4 and DHCPv6 messages you will need to run two separate instances of the dhcpd process. +Each of these instances will need its own lease file. +*/ + +func (d *DHCPd) collect() (map[string]int64, error) { + fi, err := os.Stat(d.LeasesPath) + if err != nil { + return nil, err + } + + if d.leasesModTime.Equal(fi.ModTime()) { + d.Debugf("leases file is not modified, returning cached metrics ('%s')", d.LeasesPath) + return d.collected, nil + } + + d.leasesModTime = fi.ModTime() + + leases, err := parseDHCPdLeasesFile(d.LeasesPath) + if err != nil { + return nil, err + } + + activeLeases := removeInactiveLeases(leases) + d.Debugf("found total/active %d/%d leases ('%s')", len(leases), len(activeLeases), d.LeasesPath) + + for _, pool := range d.pools { + collectPool(d.collected, pool, activeLeases) + } + d.collected["active_leases_total"] = int64(len(activeLeases)) + + return d.collected, nil +} + +const precision = 100 + +func collectPool(collected map[string]int64, pool ipPool, leases []leaseEntry) { + n := calcPoolActiveLeases(pool, leases) + collected["pool_"+pool.name+"_active_leases"] = n + collected["pool_"+pool.name+"_utilization"] = int64(calcPoolUtilizationPercentage(pool, n) * precision) +} + +func calcPoolActiveLeases(pool ipPool, leases []leaseEntry) (num int64) { + for _, l := range leases { + if pool.addresses.Contains(l.ip) { + num++ + } + } + return num +} + +func calcPoolUtilizationPercentage(pool ipPool, leases int64) float64 { + size := pool.addresses.Size() + if leases == 0 || !size.IsInt64() { + return 0 + } + if size.Int64() == 0 { + return 100 + } + return float64(leases) / float64(size.Int64()) * 100 +} + +func removeInactiveLeases(leases []leaseEntry) (active []leaseEntry) { + active = leases[:0] + for _, l := range leases { + if l.bindingState == "active" { + active = append(active, l) + } + } + return active +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/config_schema.json b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/config_schema.json new file mode 100644 index 00000000000000..ed860cbeb66380 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/config_schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/isc_dhcpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "leases_path": { + "type": "string" + }, + "pools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "networks": { + "type": "string" + } + }, + "required": [ + "name", + "networks" + ] + } + } + }, + "required": [ + "name", + "leases_path", + "pools" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/init.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/init.go new file mode 100644 index 00000000000000..847a4590b1192b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/init.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + "errors" + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/iprange" +) + +type ipPool struct { + name string + addresses iprange.Pool +} + +func (d DHCPd) validateConfig() error { + if d.Config.LeasesPath == "" { + return errors.New("'lease_path' parameter not set") + } + if len(d.Config.Pools) == 0 { + return errors.New("'pools' parameter not set") + } + for i, cfg := range d.Config.Pools { + if cfg.Name == "" { + return fmt.Errorf("'pools[%d]->pool.name' parameter not set", i+1) + } + if cfg.Networks == "" { + return fmt.Errorf("'pools[%d]->pool.networks' parameter not set", i+1) + } + } + return nil +} + +func (d DHCPd) initPools() ([]ipPool, error) { + var pools []ipPool + for i, cfg := range d.Pools { + rs, err := iprange.ParseRanges(cfg.Networks) + if err != nil { + return nil, fmt.Errorf("parse pools[%d]->pool.networks '%s' ('%s'): %v", i+1, cfg.Name, cfg.Networks, err) + } + if len(rs) != 0 { + pools = append(pools, ipPool{ + name: cfg.Name, + addresses: rs, + }) + } + } + return pools, nil +} + +func (d DHCPd) initCharts(pools []ipPool) (*module.Charts, error) { + charts := &module.Charts{} + + if err := charts.Add(activeLeasesTotalChart.Copy()); err != nil { + return nil, err + } + + chart := poolActiveLeasesChart.Copy() + if err := charts.Add(chart); err != nil { + return nil, err + } + for _, pool := range pools { + dim := &module.Dim{ + ID: "pool_" + pool.name + "_active_leases", + Name: pool.name, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + + chart = poolUtilizationChart.Copy() + if err := charts.Add(chart); err != nil { + return nil, err + } + for _, pool := range pools { + dim := &module.Dim{ + ID: "pool_" + pool.name + "_utilization", + Name: pool.name, + Div: precision, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + + return charts, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/integrations/isc_dhcp.md b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/integrations/isc_dhcp.md new file mode 100644 index 00000000000000..563ab0203ad711 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/integrations/isc_dhcp.md @@ -0,0 +1,178 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/isc_dhcpd/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/isc_dhcpd/metadata.yaml" +sidebar_label: "ISC DHCP" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ISC DHCP + + +<img src="https://netdata.cloud/img/isc.png" width="150"/> + + +Plugin: go.d.plugin +Module: isc_dhcpd + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ISC DHCP instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| isc_dhcpd.active_leases_total | active | leases | +| isc_dhcpd.pool_active_leases | a dimension per DHCP pool | leases | +| isc_dhcpd.pool_utilization | a dimension per DHCP pool | percentage | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/isc_dhcpd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/isc_dhcpd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no | +| pools | List of IP pools to monitor. | | yes | + +##### pools + +List of IP pools to monitor. + +- IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats). +- Syntax: + +```yaml +pools: + - name: "POOL_NAME1" + networks: "SPACE SEPARATED LIST OF IP RANGES" + - name: "POOL_NAME2" + networks: "SPACE SEPARATED LIST OF IP RANGES" +``` + + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + pools: + - name: lan + networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24" + - name: wifi + networks: "10.0.0.0/24" + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m isc_dhcpd + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd.go new file mode 100644 index 00000000000000..e1f4e5764c21c5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("isc_dhcpd", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + }) +} + +type ( + Config struct { + LeasesPath string `yaml:"leases_path"` + Pools []PoolConfig `yaml:"pools"` + } + PoolConfig struct { + Name string `yaml:"name"` + Networks string `yaml:"networks"` + } +) + +type DHCPd struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + pools []ipPool + leasesModTime time.Time + collected map[string]int64 +} + +func New() *DHCPd { + return &DHCPd{ + Config: Config{ + LeasesPath: "/var/lib/dhcp/dhcpd.leases", + }, + + collected: make(map[string]int64), + } +} + +func (DHCPd) Cleanup() {} + +func (d *DHCPd) Init() bool { + err := d.validateConfig() + if err != nil { + d.Errorf("config validation: %v", err) + return false + } + + pools, err := d.initPools() + if err != nil { + d.Errorf("ip pools init: %v", err) + return false + } + d.pools = pools + + charts, err := d.initCharts(pools) + if err != nil { + d.Errorf("charts init: %v", err) + return false + } + d.charts = charts + + d.Debugf("monitoring leases file: %v", d.Config.LeasesPath) + d.Debugf("monitoring ip pools: %v", d.Config.Pools) + return true +} + +func (d *DHCPd) Check() bool { + return len(d.Collect()) > 0 +} + +func (d *DHCPd) Charts() *module.Charts { + return d.charts +} + +func (d *DHCPd) Collect() map[string]int64 { + mx, err := d.collect() + if err != nil { + d.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd_test.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd_test.go new file mode 100644 index 00000000000000..72980e4694c188 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/isc_dhcpd_test.go @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestDHCPd_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestDHCPd_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default": { + wantFail: true, + config: New().Config, + }, + "'leases_path' not set": { + wantFail: true, + config: Config{ + LeasesPath: "", + Pools: []PoolConfig{ + {Name: "test", Networks: "10.220.252.0/24"}, + }, + }, + }, + "'pools' not set": { + wantFail: true, + config: Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4", + }, + }, + "'pools->pool.networks' invalid syntax": { + wantFail: true, + config: Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4", + Pools: []PoolConfig{ + {Name: "test", Networks: "10.220.252./24"}, + }, + }}, + "ok config ('leases_path' and 'pools' are set)": { + config: Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4", + Pools: []PoolConfig{ + {Name: "test", Networks: "10.220.252.0/24"}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dhcpd := New() + dhcpd.Config = test.config + + if test.wantFail { + assert.False(t, dhcpd.Init()) + } else { + assert.True(t, dhcpd.Init()) + } + }) + } +} + +func TestDHCPd_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *DHCPd + wantFail bool + }{ + "lease db not exists": {prepare: prepareDHCPdLeasesNotExists, wantFail: true}, + "lease db is an empty file": {prepare: prepareDHCPdLeasesEmpty}, + "lease db ipv4": {prepare: prepareDHCPdLeasesIPv4}, + "lease db ipv4 with only inactive leases": {prepare: prepareDHCPdLeasesIPv4Inactive}, + "lease db ipv4 with backup leases": {prepare: prepareDHCPdLeasesIPv4Backup}, + "lease db ipv6": {prepare: prepareDHCPdLeasesIPv6}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dhcpd := test.prepare() + require.True(t, dhcpd.Init()) + + if test.wantFail { + assert.False(t, dhcpd.Check()) + } else { + assert.True(t, dhcpd.Check()) + } + }) + } +} + +func TestDHCPd_Charts(t *testing.T) { + dhcpd := New() + dhcpd.LeasesPath = "leases_path" + dhcpd.Pools = []PoolConfig{ + {Name: "name", Networks: "192.0.2.0/24"}, + } + require.True(t, dhcpd.Init()) + + assert.NotNil(t, dhcpd.Charts()) +} + +func TestDHCPd_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *DHCPd + wantCollected map[string]int64 + }{ + "lease db not exists": { + prepare: prepareDHCPdLeasesNotExists, + wantCollected: nil, + }, + "lease db is an empty file": { + prepare: prepareDHCPdLeasesEmpty, + wantCollected: map[string]int64{ + "active_leases_total": 0, + "pool_net1_active_leases": 0, + "pool_net1_utilization": 0, + "pool_net2_active_leases": 0, + "pool_net2_utilization": 0, + "pool_net3_active_leases": 0, + "pool_net3_utilization": 0, + "pool_net4_active_leases": 0, + "pool_net4_utilization": 0, + "pool_net5_active_leases": 0, + "pool_net5_utilization": 0, + "pool_net6_active_leases": 0, + "pool_net6_utilization": 0, + }, + }, + "lease db ipv4": { + prepare: prepareDHCPdLeasesIPv4, + wantCollected: map[string]int64{ + "active_leases_total": 5, + "pool_net1_active_leases": 2, + "pool_net1_utilization": 158, + "pool_net2_active_leases": 1, + "pool_net2_utilization": 39, + "pool_net3_active_leases": 0, + "pool_net3_utilization": 0, + "pool_net4_active_leases": 1, + "pool_net4_utilization": 79, + "pool_net5_active_leases": 0, + "pool_net5_utilization": 0, + "pool_net6_active_leases": 1, + "pool_net6_utilization": 39, + }, + }, + "lease db ipv4 with only inactive leases": { + prepare: prepareDHCPdLeasesIPv4Inactive, + wantCollected: map[string]int64{ + "active_leases_total": 0, + "pool_net1_active_leases": 0, + "pool_net1_utilization": 0, + "pool_net2_active_leases": 0, + "pool_net2_utilization": 0, + "pool_net3_active_leases": 0, + "pool_net3_utilization": 0, + "pool_net4_active_leases": 0, + "pool_net4_utilization": 0, + "pool_net5_active_leases": 0, + "pool_net5_utilization": 0, + "pool_net6_active_leases": 0, + "pool_net6_utilization": 0, + }, + }, + "lease db ipv4 with backup leases": { + prepare: prepareDHCPdLeasesIPv4Backup, + wantCollected: map[string]int64{ + "active_leases_total": 2, + "pool_net1_active_leases": 1, + "pool_net1_utilization": 79, + "pool_net2_active_leases": 0, + "pool_net2_utilization": 0, + "pool_net3_active_leases": 0, + "pool_net3_utilization": 0, + "pool_net4_active_leases": 1, + "pool_net4_utilization": 79, + "pool_net5_active_leases": 0, + "pool_net5_utilization": 0, + "pool_net6_active_leases": 0, + "pool_net6_utilization": 0, + }, + }, + "lease db ipv6": { + prepare: prepareDHCPdLeasesIPv6, + wantCollected: map[string]int64{ + "active_leases_total": 6, + "pool_net1_active_leases": 6, + "pool_net1_utilization": 5454, + "pool_net2_active_leases": 0, + "pool_net2_utilization": 0, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + dhcpd := test.prepare() + require.True(t, dhcpd.Init()) + + collected := dhcpd.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(collected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, dhcpd, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dhcpd *DHCPd, collected map[string]int64) { + for _, chart := range *dhcpd.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareDHCPdLeasesNotExists() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_not_exists", + Pools: []PoolConfig{ + {Name: "net1", Networks: "192.168.3.0/25"}, + {Name: "net2", Networks: "10.254.251.0/24"}, + {Name: "net3", Networks: "10.254.252.0/24"}, + {Name: "net4", Networks: "10.254.253.0/25"}, + {Name: "net5", Networks: "10.254.254.0/25"}, + {Name: "net6", Networks: "10.254.255.0/24"}, + }, + } + return dhcpd +} + +func prepareDHCPdLeasesEmpty() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_empty", + Pools: []PoolConfig{ + {Name: "net1", Networks: "192.168.3.0/25"}, + {Name: "net2", Networks: "10.254.251.0/24"}, + {Name: "net3", Networks: "10.254.252.0/24"}, + {Name: "net4", Networks: "10.254.253.0/25"}, + {Name: "net5", Networks: "10.254.254.0/25"}, + {Name: "net6", Networks: "10.254.255.0/24"}, + }, + } + return dhcpd +} + +func prepareDHCPdLeasesIPv4() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4", + Pools: []PoolConfig{ + {Name: "net1", Networks: "192.168.3.0/25"}, + {Name: "net2", Networks: "10.254.251.0/24"}, + {Name: "net3", Networks: "10.254.252.0/24"}, + {Name: "net4", Networks: "10.254.253.0/25"}, + {Name: "net5", Networks: "10.254.254.0/25"}, + {Name: "net6", Networks: "10.254.255.0/24"}, + }, + } + return dhcpd +} + +func prepareDHCPdLeasesIPv4Backup() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4_backup", + Pools: []PoolConfig{ + {Name: "net1", Networks: "192.168.3.0/25"}, + {Name: "net2", Networks: "10.254.251.0/24"}, + {Name: "net3", Networks: "10.254.252.0/24"}, + {Name: "net4", Networks: "10.254.253.0/25"}, + {Name: "net5", Networks: "10.254.254.0/25"}, + {Name: "net6", Networks: "10.254.255.0/24"}, + }, + } + return dhcpd +} + +func prepareDHCPdLeasesIPv4Inactive() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_ipv4_inactive", + Pools: []PoolConfig{ + {Name: "net1", Networks: "192.168.3.0/25"}, + {Name: "net2", Networks: "10.254.251.0/24"}, + {Name: "net3", Networks: "10.254.252.0/24"}, + {Name: "net4", Networks: "10.254.253.0/25"}, + {Name: "net5", Networks: "10.254.254.0/25"}, + {Name: "net6", Networks: "10.254.255.0/24"}, + }, + } + return dhcpd +} + +func prepareDHCPdLeasesIPv6() *DHCPd { + dhcpd := New() + dhcpd.Config = Config{ + LeasesPath: "testdata/dhcpd.leases_ipv6", + Pools: []PoolConfig{ + {Name: "net1", Networks: "2001:db8::-2001:db8::a"}, + {Name: "net2", Networks: "2001:db8:0:1::-2001:db8:0:1::a"}, + }, + } + return dhcpd +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/metadata.yaml b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/metadata.yaml new file mode 100644 index 00000000000000..c77370c7c9e0a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/metadata.yaml @@ -0,0 +1,129 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-isc_dhcpd + plugin_name: go.d.plugin + module_name: isc_dhcpd + monitored_instance: + name: ISC DHCP + link: https://www.isc.org/dhcp/ + categories: + - data-collection.dns-and-dhcp-servers + icon_filename: isc.png + keywords: + - dhcpd + - dhcp + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: | + This collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases). + method_description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/isc_dhcpd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: leases_path + description: Path to DHCP client lease database. + default_value: /var/lib/dhcp/dhcpd.leases + required: false + - name: pools + description: List of IP pools to monitor. + default_value: "" + required: true + detailed_description: | + List of IP pools to monitor. + + - IP range syntax: see [supported formats](https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats). + - Syntax: + + ```yaml + pools: + - name: "POOL_NAME1" + networks: "SPACE SEPARATED LIST OF IP RANGES" + - name: "POOL_NAME2" + networks: "SPACE SEPARATED LIST OF IP RANGES" + ``` + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + pools: + - name: lan + networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24" + - name: wifi + networks: "10.0.0.0/24" + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: isc_dhcpd.active_leases_total + description: Active Leases Total + unit: leases + chart_type: line + dimensions: + - name: active + - name: isc_dhcpd.pool_active_leases + description: Pool Active Leases + unit: leases + chart_type: line + dimensions: + - name: a dimension per DHCP pool + - name: isc_dhcpd.pool_utilization + description: Pool Utilization + unit: percentage + chart_type: line + dimensions: + - name: a dimension per DHCP pool diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/parse.go b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/parse.go new file mode 100644 index 00000000000000..cb4161745b52bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/parse.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package isc_dhcpd + +import ( + "bufio" + "bytes" + "net" + "os" +) + +/* +Documentation (v4.4): https://kb.isc.org/docs/en/isc-dhcp-44-manual-pages-dhcpdleases + +DHCPv4 prepare declaration: + prepare ip-address { + statements... + } + +DHCPv6 prepare declaration: + ia_ta IAID_DUID { + cltt date; + iaaddr ipv6-address { + statements... + } + } + ia_na IAID_DUID { + cltt date; + iaaddr ipv6-address { + statements... + } + } + ia_pd IAID_DUID { + cltt date; + iaprefix ipv6-address/prefix-length { + statements... + } + } +*/ + +type leaseEntry struct { + ip net.IP + bindingState string +} + +func (l leaseEntry) hasIP() bool { return l.ip != nil } +func (l leaseEntry) hasBindingState() bool { return l.bindingState != "" } + +func parseDHCPdLeasesFile(filepath string) ([]leaseEntry, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + leasesSet := make(map[string]leaseEntry) + l := leaseEntry{} + sc := bufio.NewScanner(f) + + for sc.Scan() { + bs := bytes.TrimSpace(sc.Bytes()) + switch { + case !l.hasIP() && bytes.HasPrefix(bs, []byte("lease")): + // "lease 192.168.0.1 {" => "192.168.0.1" + s := string(bs) + l.ip = net.ParseIP(s[6 : len(s)-2]) + case !l.hasIP() && bytes.HasPrefix(bs, []byte("iaaddr")): + // "iaaddr 1985:470:1f0b:c9a::001 {" => "1985:470:1f0b:c9a::001" + s := string(bs) + l.ip = net.ParseIP(s[7 : len(s)-2]) + case l.hasIP() && !l.hasBindingState() && bytes.HasPrefix(bs, []byte("binding state")): + // "binding state active;" => "active" + s := string(bs) + l.bindingState = s[14 : len(s)-1] + case bytes.HasPrefix(bs, []byte("}")): + if l.hasIP() && l.hasBindingState() { + leasesSet[l.ip.String()] = l + } + l = leaseEntry{} + } + } + + if len(leasesSet) == 0 { + return nil, nil + } + + leases := make([]leaseEntry, 0, len(leasesSet)) + for _, l := range leasesSet { + leases = append(leases, l) + } + return leases, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_empty b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_empty new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4 b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4 new file mode 100644 index 00000000000000..08e0e3f2098469 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4 @@ -0,0 +1,370 @@ +# The format of this file is documented in the dhcpd.leases(5) manual page. +# This lease file was written by isc-dhcp-4.3.1 + +lease 10.254.252.2 { + starts 3 2014/07/23 07:32:16; + ends 3 2014/07/23 09:12:16; + tstp 3 2014/07/23 09:12:16; + cltt 3 2014/07/23 07:32:16; + binding state free; + hardware ethernet f0:de:f1:89:24:1f; + uid "\001\360\336\361\211$\037"; +} +lease 10.254.252.3 { + starts 5 2014/11/28 05:49:01; + ends 5 2014/11/28 07:29:01; + tstp 5 2014/11/28 07:29:01; + cltt 5 2014/11/28 05:49:01; + binding state free; + hardware ethernet c0:4a:00:00:f5:fa; + uid "\001\300J\000\000\365\372"; +} +lease 10.254.252.4 { + starts 5 2016/03/11 01:03:59; + ends 5 2016/03/11 02:33:20; + tstp 5 2016/03/11 02:33:20; + cltt 5 2016/03/11 01:12:33; + binding state free; + hardware ethernet 00:1c:c0:7a:38:3f; + uid "\001\000\034\300z8?"; + set vendor-class-identifier = "MSFT 5.0"; +} +lease 10.254.252.5 { + starts 1 2016/09/05 23:53:19; + ends 2 2016/09/06 01:33:19; + tstp 2 2016/09/06 01:33:19; + cltt 1 2016/09/05 23:53:19; + binding state free; + hardware ethernet 28:28:5d:65:30:ef; + uid "\001((]e0\357"; +} +lease 10.254.252.6 { + starts 4 2016/09/29 01:41:23; + ends 4 2016/09/29 03:21:23; + tstp 4 2016/09/29 03:21:23; + cltt 4 2016/09/29 01:41:23; + binding state free; + hardware ethernet 04:bf:6d:94:1b:0d; + uid "\001\004\277m\224\033\015"; +} +lease 10.254.252.7 { + starts 1 2016/10/03 08:23:14; + ends 1 2016/10/03 10:03:14; + tstp 1 2016/10/03 10:03:14; + cltt 1 2016/10/03 08:23:14; + binding state free; + hardware ethernet ec:22:80:f7:3f:44; + uid "\001\354\"\200\367?D"; +} +lease 10.254.252.8 { + starts 5 2016/10/07 05:43:11; + ends 5 2016/10/07 05:58:31; + tstp 5 2016/10/07 05:58:31; + cltt 5 2016/10/07 05:43:11; + binding state free; + hardware ethernet 70:62:b8:bf:b5:b3; + uid "\001pb\270\277\265\263"; +} +lease 192.168.3.15 { + starts 2 2019/01/08 06:29:58; + ends 2 2019/01/08 08:09:58; + tstp 2 2019/01/08 08:09:58; + cltt 2 2019/01/08 06:29:58; + binding state free; + hardware ethernet a8:f9:4b:20:99:9c; + uid "\001\250\371K \231\234"; +} +lease 192.168.3.18 { + starts 2 2020/03/10 01:46:07; + ends 2 2020/03/10 03:22:21; + tstp 2 2020/03/10 03:22:21; + cltt 2 2020/03/10 01:46:08; + binding state free; + hardware ethernet 04:bf:6d:0d:e2:35; + uid "\001\004\277m\015\3425"; + set vendor-class-identifier = "ndhcpc"; +} +lease 192.168.3.11 { + starts 6 2020/10/03 07:52:36; + ends 6 2020/10/03 09:32:36; + cltt 6 2020/10/03 07:52:36; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 60:a4:4c:3f:6e:78; + uid "\001`\244L?nx"; +} +lease 192.168.3.10 { + starts 6 2020/10/03 08:18:50; + ends 6 2020/10/03 09:58:50; + cltt 6 2020/10/03 08:18:50; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 70:62:b8:bf:b5:b3; + uid "\001pb\270\277\265\263"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.251.101 { + starts 0 2017/03/12 22:11:59; + ends 0 2017/03/12 23:51:58; + tstp 0 2017/03/12 23:51:58; + cltt 0 2017/03/12 22:11:59; + binding state free; + hardware ethernet b4:ce:f6:01:83:73; + set vendor-class-identifier = "dhcpcd-5.5.6"; +} +lease 10.254.251.102 { + starts 5 2017/05/19 06:07:39; + ends 5 2017/05/19 07:47:39; + tstp 5 2017/05/19 07:47:39; + cltt 5 2017/05/19 06:07:39; + binding state free; + hardware ethernet 34:51:c9:4c:40:c9; + uid "\0014Q\311L@\311"; +} +lease 10.254.251.103 { + starts 2 2018/04/24 13:18:00; + ends 2 2018/04/24 14:58:00; + tstp 2 2018/04/24 14:58:00; + cltt 2 2018/04/24 13:18:00; + binding state free; + hardware ethernet 70:8a:09:da:74:d0; + set vendor-class-identifier = "dhcpcd-5.5.6"; +} +lease 10.254.251.104 { + starts 2 2018/04/24 12:54:27; + ends 3 2018/04/25 06:47:20; + tstp 3 2018/04/25 06:47:20; + cltt 2 2018/04/24 12:54:28; + binding state free; + hardware ethernet 78:a3:e4:e8:12:1f; + uid "\001x\243\344\350\022\037"; +} +lease 10.254.251.100 { + starts 6 2020/10/03 07:58:45; + ends 6 2020/10/03 09:38:45; + cltt 6 2020/10/03 07:58:45; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 74:ea:3a:a6:a9:c7; + uid "\001t\352:\246\251\307"; + set vendor-class-identifier = "MSFT 5.0"; + client-hostname "TL-WR741N"; +} +lease 10.254.255.104 { + starts 1 2017/07/10 09:35:24; + ends 1 2017/07/10 09:37:24; + tstp 1 2017/07/10 09:37:24; + cltt 1 2017/07/10 09:35:24; + binding state free; + hardware ethernet 50:85:69:11:b6:ff; + uid "\001P\205i\021\266\377"; +} +lease 10.254.255.102 { + starts 3 2017/08/16 22:01:09; + ends 3 2017/08/16 23:41:09; + tstp 3 2017/08/16 23:41:09; + cltt 3 2017/08/16 22:01:09; + binding state free; + hardware ethernet c8:d3:a3:54:31:3a; + uid "\001\310\323\243T1:"; +} +lease 10.254.255.103 { + starts 0 2018/12/16 00:54:07; + ends 0 2018/12/16 02:34:07; + tstp 0 2018/12/16 02:34:07; + cltt 0 2018/12/16 00:54:07; + binding state free; + hardware ethernet 08:c6:b3:01:e8:18; + uid "\001\010\306\263\001\350\030"; + set vendor-class-identifier = "QTCH-QBR1041WUV2"; +} +lease 10.254.255.100 { + starts 2 2018/12/18 09:21:24; + ends 2 2018/12/18 10:32:36; + tstp 2 2018/12/18 10:32:36; + cltt 2 2018/12/18 09:21:30; + binding state free; + hardware ethernet 70:62:b8:c3:51:a3; + uid "\001pb\270\303Q\243"; +} +lease 10.254.255.105 { + starts 5 2019/03/22 07:42:55; + ends 5 2019/03/22 09:22:55; + tstp 5 2019/03/22 09:22:55; + cltt 5 2019/03/22 07:42:55; + binding state free; + hardware ethernet 58:d5:6e:95:88:30; + uid "\001X\325n\225\2100"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.255.101 { + starts 6 2020/10/03 07:29:24; + ends 6 2020/10/03 09:09:24; + cltt 6 2020/10/03 07:29:24; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 28:3b:82:58:f4:58; + uid "\001(;\202X\364X"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.253.104 { + starts 4 2018/03/15 12:01:12; + ends 4 2018/03/15 12:34:35; + tstp 4 2018/03/15 12:34:35; + cltt 4 2018/03/15 12:02:58; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3d; + uid "\001Pd+O\375="; + set vendor-class-identifier = "udhcp 1.19.4"; +} +lease 10.254.253.105 { + starts 4 2018/03/15 12:39:46; + ends 4 2018/03/15 14:17:39; + tstp 4 2018/03/15 14:17:39; + cltt 4 2018/03/15 12:39:47; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3d; + set vendor-class-identifier = "udhcp 1.19.4"; +} +lease 10.254.253.101 { + starts 5 2018/03/16 11:00:43; + ends 5 2018/03/16 12:40:15; + tstp 5 2018/03/16 12:40:15; + cltt 5 2018/03/16 11:00:43; + binding state free; + hardware ethernet d0:66:7b:8b:e5:ff; + uid "\001\320f{\213\345\377"; + set vendor-class-identifier = "udhcp 1.14.3-VD Linux VDLinux.1.2.1.x"; +} +lease 10.254.253.102 { + starts 5 2018/03/16 11:26:21; + ends 5 2018/03/16 13:06:21; + tstp 5 2018/03/16 13:06:21; + cltt 5 2018/03/16 11:26:21; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3f; + uid "\001Pd+O\375?"; +} +lease 10.254.253.100 { + starts 2 2018/08/21 05:48:43; + ends 2 2018/08/21 07:23:13; + tstp 2 2018/08/21 07:23:13; + cltt 2 2018/08/21 05:48:44; + binding state free; + hardware ethernet 20:cf:30:ef:8e:a4; + uid "\001 \3170\357\216\244"; + set vendor-class-identifier = "udhcp 0.9.8-asus"; +} +lease 10.254.253.103 { + starts 6 2020/10/03 08:07:02; + ends 6 2020/10/03 09:47:02; + cltt 6 2020/10/03 08:07:02; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 34:ce:00:03:08:57; + uid "\0014\316\000\003\010W"; + set vendor-class-identifier = "udhcp 1.24.2"; +} +lease 10.254.254.103 { + starts 3 2015/11/11 09:03:11; + ends 3 2015/11/11 09:05:11; + tstp 3 2015/11/11 09:05:11; + cltt 3 2015/11/11 09:03:11; + binding state free; + hardware ethernet 74:d0:2b:0e:9b:d6; +} +lease 10.254.254.104 { + starts 0 2017/12/03 15:57:29; + ends 0 2017/12/03 17:37:29; + tstp 0 2017/12/03 17:37:29; + cltt 0 2017/12/03 15:57:29; + binding state free; + hardware ethernet ac:22:0b:78:00:78; + uid "\377\3139\012\307\000\002\000\000\253\021(CC\252e\021\000\017"; +} +lease 10.254.254.105 { + starts 2 2018/06/26 12:30:04; + ends 2 2018/06/26 13:09:10; + tstp 2 2018/06/26 13:09:10; + cltt 2 2018/06/26 12:30:04; + binding state free; + hardware ethernet cc:2d:e0:3f:bc:5c; + uid "\001\314-\340?\274\\"; +} +lease 10.254.254.101 { + starts 3 2018/07/25 09:33:10; + ends 3 2018/07/25 11:13:10; + tstp 3 2018/07/25 11:13:10; + cltt 3 2018/07/25 09:33:10; + binding state free; + hardware ethernet 74:d0:2b:0e:9b:d6; + uid "\001t\320+\016\233\326"; + set vendor-class-identifier = "MSFT 5.0"; +} +lease 10.254.254.100 { + starts 2 2020/09/22 11:19:29; + ends 2 2020/09/22 11:21:29; + cltt 2 2020/09/22 11:19:29; + binding state free; + hardware ethernet 30:45:96:6a:f3:de; + uid "\0010E\226j\363\336"; + client-hostname "Honor_7C-bb23201389a3c44"; +} +lease 10.254.254.102 { + starts 2 2020/09/22 11:25:14; + ends 2 2020/09/22 11:27:14; + cltt 2 2020/09/22 11:25:14; + binding state free; + hardware ethernet c8:3d:dc:be:d2:cf; + uid "\001\310=\334\276\322\317"; + client-hostname "Redmi7A-Redmi"; +} +lease 10.254.255.101 { + starts 6 2020/10/03 08:19:24; + ends 6 2020/10/03 09:59:24; + cltt 6 2020/10/03 08:19:24; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 28:3b:82:58:f4:58; + uid "\001(;\202X\364X"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.251.100 { + starts 6 2020/10/03 08:48:45; + ends 6 2020/10/03 10:28:45; + cltt 6 2020/10/03 08:48:45; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 74:ea:3a:a6:a9:c7; + uid "\001t\352:\246\251\307"; + set vendor-class-identifier = "MSFT 5.0"; + client-hostname "TL-WR741N"; +} +lease 10.254.253.103 { + starts 6 2020/10/03 08:57:02; + ends 6 2020/10/03 10:37:02; + cltt 6 2020/10/03 08:57:02; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 34:ce:00:03:08:57; + uid "\0014\316\000\003\010W"; + set vendor-class-identifier = "udhcp 1.24.2"; +} +lease 192.168.3.11 { + starts 6 2020/10/03 09:01:22; + ends 6 2020/10/03 10:41:22; + cltt 6 2020/10/03 09:01:22; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 60:a4:4c:3f:6e:78; + uid "\001`\244L?nx"; +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup new file mode 100644 index 00000000000000..e822ca8466299a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup @@ -0,0 +1,39 @@ +# The format of this file is documented in the dhcpd.leases(5) manual page. +# This lease file was written by isc-dhcp-4.4.2 + +# authoring-byte-order entry is generated, DO NOT DELETE +authoring-byte-order little-endian; + +lease 10.254.253.103 { + starts 6 2020/10/03 08:57:02; + ends 6 2020/10/03 10:37:02; + cltt 6 2020/10/03 08:57:02; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 34:ce:00:03:08:57; + uid "\0014\316\000\003\010W"; + set vendor-class-identifier = "udhcp 1.24.2"; +} +lease 192.168.3.1 { + starts 6 2018/02/17 01:13:21; + tsfp 6 2018/02/17 01:13:21; + atsfp 6 2018/02/17 01:13:21; + binding state backup; +} +lease 192.168.3.11 { + starts 6 2020/10/03 09:01:22; + ends 6 2020/10/03 10:41:22; + cltt 6 2020/10/03 09:01:22; + binding state active; + next binding state free; + rewind binding state free; + hardware ethernet 60:a4:4c:3f:6e:78; + uid "\001`\244L?nx"; +} +lease 192.168.3.2 { + starts 6 2018/02/17 01:13:21; + tsfp 6 2018/02/17 01:13:21; + atsfp 6 2018/02/17 01:13:21; + binding state backup; +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive new file mode 100644 index 00000000000000..c5aed080f8d50c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive @@ -0,0 +1,370 @@ +# The format of this file is documented in the dhcpd.leases(5) manual page. +# This lease file was written by isc-dhcp-4.3.1 + +lease 10.254.252.2 { + starts 3 2014/07/23 07:32:16; + ends 3 2014/07/23 09:12:16; + tstp 3 2014/07/23 09:12:16; + cltt 3 2014/07/23 07:32:16; + binding state free; + hardware ethernet f0:de:f1:89:24:1f; + uid "\001\360\336\361\211$\037"; +} +lease 10.254.252.3 { + starts 5 2014/11/28 05:49:01; + ends 5 2014/11/28 07:29:01; + tstp 5 2014/11/28 07:29:01; + cltt 5 2014/11/28 05:49:01; + binding state free; + hardware ethernet c0:4a:00:00:f5:fa; + uid "\001\300J\000\000\365\372"; +} +lease 10.254.252.4 { + starts 5 2016/03/11 01:03:59; + ends 5 2016/03/11 02:33:20; + tstp 5 2016/03/11 02:33:20; + cltt 5 2016/03/11 01:12:33; + binding state free; + hardware ethernet 00:1c:c0:7a:38:3f; + uid "\001\000\034\300z8?"; + set vendor-class-identifier = "MSFT 5.0"; +} +lease 10.254.252.5 { + starts 1 2016/09/05 23:53:19; + ends 2 2016/09/06 01:33:19; + tstp 2 2016/09/06 01:33:19; + cltt 1 2016/09/05 23:53:19; + binding state free; + hardware ethernet 28:28:5d:65:30:ef; + uid "\001((]e0\357"; +} +lease 10.254.252.6 { + starts 4 2016/09/29 01:41:23; + ends 4 2016/09/29 03:21:23; + tstp 4 2016/09/29 03:21:23; + cltt 4 2016/09/29 01:41:23; + binding state free; + hardware ethernet 04:bf:6d:94:1b:0d; + uid "\001\004\277m\224\033\015"; +} +lease 10.254.252.7 { + starts 1 2016/10/03 08:23:14; + ends 1 2016/10/03 10:03:14; + tstp 1 2016/10/03 10:03:14; + cltt 1 2016/10/03 08:23:14; + binding state free; + hardware ethernet ec:22:80:f7:3f:44; + uid "\001\354\"\200\367?D"; +} +lease 10.254.252.8 { + starts 5 2016/10/07 05:43:11; + ends 5 2016/10/07 05:58:31; + tstp 5 2016/10/07 05:58:31; + cltt 5 2016/10/07 05:43:11; + binding state free; + hardware ethernet 70:62:b8:bf:b5:b3; + uid "\001pb\270\277\265\263"; +} +lease 192.168.3.15 { + starts 2 2019/01/08 06:29:58; + ends 2 2019/01/08 08:09:58; + tstp 2 2019/01/08 08:09:58; + cltt 2 2019/01/08 06:29:58; + binding state free; + hardware ethernet a8:f9:4b:20:99:9c; + uid "\001\250\371K \231\234"; +} +lease 192.168.3.18 { + starts 2 2020/03/10 01:46:07; + ends 2 2020/03/10 03:22:21; + tstp 2 2020/03/10 03:22:21; + cltt 2 2020/03/10 01:46:08; + binding state free; + hardware ethernet 04:bf:6d:0d:e2:35; + uid "\001\004\277m\015\3425"; + set vendor-class-identifier = "ndhcpc"; +} +lease 192.168.3.11 { + starts 6 2020/10/03 07:52:36; + ends 6 2020/10/03 09:32:36; + cltt 6 2020/10/03 07:52:36; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 60:a4:4c:3f:6e:78; + uid "\001`\244L?nx"; +} +lease 192.168.3.10 { + starts 6 2020/10/03 08:18:50; + ends 6 2020/10/03 09:58:50; + cltt 6 2020/10/03 08:18:50; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 70:62:b8:bf:b5:b3; + uid "\001pb\270\277\265\263"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.251.101 { + starts 0 2017/03/12 22:11:59; + ends 0 2017/03/12 23:51:58; + tstp 0 2017/03/12 23:51:58; + cltt 0 2017/03/12 22:11:59; + binding state free; + hardware ethernet b4:ce:f6:01:83:73; + set vendor-class-identifier = "dhcpcd-5.5.6"; +} +lease 10.254.251.102 { + starts 5 2017/05/19 06:07:39; + ends 5 2017/05/19 07:47:39; + tstp 5 2017/05/19 07:47:39; + cltt 5 2017/05/19 06:07:39; + binding state free; + hardware ethernet 34:51:c9:4c:40:c9; + uid "\0014Q\311L@\311"; +} +lease 10.254.251.103 { + starts 2 2018/04/24 13:18:00; + ends 2 2018/04/24 14:58:00; + tstp 2 2018/04/24 14:58:00; + cltt 2 2018/04/24 13:18:00; + binding state free; + hardware ethernet 70:8a:09:da:74:d0; + set vendor-class-identifier = "dhcpcd-5.5.6"; +} +lease 10.254.251.104 { + starts 2 2018/04/24 12:54:27; + ends 3 2018/04/25 06:47:20; + tstp 3 2018/04/25 06:47:20; + cltt 2 2018/04/24 12:54:28; + binding state free; + hardware ethernet 78:a3:e4:e8:12:1f; + uid "\001x\243\344\350\022\037"; +} +lease 10.254.251.100 { + starts 6 2020/10/03 07:58:45; + ends 6 2020/10/03 09:38:45; + cltt 6 2020/10/03 07:58:45; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 74:ea:3a:a6:a9:c7; + uid "\001t\352:\246\251\307"; + set vendor-class-identifier = "MSFT 5.0"; + client-hostname "TL-WR741N"; +} +lease 10.254.255.104 { + starts 1 2017/07/10 09:35:24; + ends 1 2017/07/10 09:37:24; + tstp 1 2017/07/10 09:37:24; + cltt 1 2017/07/10 09:35:24; + binding state free; + hardware ethernet 50:85:69:11:b6:ff; + uid "\001P\205i\021\266\377"; +} +lease 10.254.255.102 { + starts 3 2017/08/16 22:01:09; + ends 3 2017/08/16 23:41:09; + tstp 3 2017/08/16 23:41:09; + cltt 3 2017/08/16 22:01:09; + binding state free; + hardware ethernet c8:d3:a3:54:31:3a; + uid "\001\310\323\243T1:"; +} +lease 10.254.255.103 { + starts 0 2018/12/16 00:54:07; + ends 0 2018/12/16 02:34:07; + tstp 0 2018/12/16 02:34:07; + cltt 0 2018/12/16 00:54:07; + binding state free; + hardware ethernet 08:c6:b3:01:e8:18; + uid "\001\010\306\263\001\350\030"; + set vendor-class-identifier = "QTCH-QBR1041WUV2"; +} +lease 10.254.255.100 { + starts 2 2018/12/18 09:21:24; + ends 2 2018/12/18 10:32:36; + tstp 2 2018/12/18 10:32:36; + cltt 2 2018/12/18 09:21:30; + binding state free; + hardware ethernet 70:62:b8:c3:51:a3; + uid "\001pb\270\303Q\243"; +} +lease 10.254.255.105 { + starts 5 2019/03/22 07:42:55; + ends 5 2019/03/22 09:22:55; + tstp 5 2019/03/22 09:22:55; + cltt 5 2019/03/22 07:42:55; + binding state free; + hardware ethernet 58:d5:6e:95:88:30; + uid "\001X\325n\225\2100"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.255.101 { + starts 6 2020/10/03 07:29:24; + ends 6 2020/10/03 09:09:24; + cltt 6 2020/10/03 07:29:24; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 28:3b:82:58:f4:58; + uid "\001(;\202X\364X"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.253.104 { + starts 4 2018/03/15 12:01:12; + ends 4 2018/03/15 12:34:35; + tstp 4 2018/03/15 12:34:35; + cltt 4 2018/03/15 12:02:58; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3d; + uid "\001Pd+O\375="; + set vendor-class-identifier = "udhcp 1.19.4"; +} +lease 10.254.253.105 { + starts 4 2018/03/15 12:39:46; + ends 4 2018/03/15 14:17:39; + tstp 4 2018/03/15 14:17:39; + cltt 4 2018/03/15 12:39:47; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3d; + set vendor-class-identifier = "udhcp 1.19.4"; +} +lease 10.254.253.101 { + starts 5 2018/03/16 11:00:43; + ends 5 2018/03/16 12:40:15; + tstp 5 2018/03/16 12:40:15; + cltt 5 2018/03/16 11:00:43; + binding state free; + hardware ethernet d0:66:7b:8b:e5:ff; + uid "\001\320f{\213\345\377"; + set vendor-class-identifier = "udhcp 1.14.3-VD Linux VDLinux.1.2.1.x"; +} +lease 10.254.253.102 { + starts 5 2018/03/16 11:26:21; + ends 5 2018/03/16 13:06:21; + tstp 5 2018/03/16 13:06:21; + cltt 5 2018/03/16 11:26:21; + binding state free; + hardware ethernet 50:64:2b:4f:fd:3f; + uid "\001Pd+O\375?"; +} +lease 10.254.253.100 { + starts 2 2018/08/21 05:48:43; + ends 2 2018/08/21 07:23:13; + tstp 2 2018/08/21 07:23:13; + cltt 2 2018/08/21 05:48:44; + binding state free; + hardware ethernet 20:cf:30:ef:8e:a4; + uid "\001 \3170\357\216\244"; + set vendor-class-identifier = "udhcp 0.9.8-asus"; +} +lease 10.254.253.103 { + starts 6 2020/10/03 08:07:02; + ends 6 2020/10/03 09:47:02; + cltt 6 2020/10/03 08:07:02; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 34:ce:00:03:08:57; + uid "\0014\316\000\003\010W"; + set vendor-class-identifier = "udhcp 1.24.2"; +} +lease 10.254.254.103 { + starts 3 2015/11/11 09:03:11; + ends 3 2015/11/11 09:05:11; + tstp 3 2015/11/11 09:05:11; + cltt 3 2015/11/11 09:03:11; + binding state free; + hardware ethernet 74:d0:2b:0e:9b:d6; +} +lease 10.254.254.104 { + starts 0 2017/12/03 15:57:29; + ends 0 2017/12/03 17:37:29; + tstp 0 2017/12/03 17:37:29; + cltt 0 2017/12/03 15:57:29; + binding state free; + hardware ethernet ac:22:0b:78:00:78; + uid "\377\3139\012\307\000\002\000\000\253\021(CC\252e\021\000\017"; +} +lease 10.254.254.105 { + starts 2 2018/06/26 12:30:04; + ends 2 2018/06/26 13:09:10; + tstp 2 2018/06/26 13:09:10; + cltt 2 2018/06/26 12:30:04; + binding state free; + hardware ethernet cc:2d:e0:3f:bc:5c; + uid "\001\314-\340?\274\\"; +} +lease 10.254.254.101 { + starts 3 2018/07/25 09:33:10; + ends 3 2018/07/25 11:13:10; + tstp 3 2018/07/25 11:13:10; + cltt 3 2018/07/25 09:33:10; + binding state free; + hardware ethernet 74:d0:2b:0e:9b:d6; + uid "\001t\320+\016\233\326"; + set vendor-class-identifier = "MSFT 5.0"; +} +lease 10.254.254.100 { + starts 2 2020/09/22 11:19:29; + ends 2 2020/09/22 11:21:29; + cltt 2 2020/09/22 11:19:29; + binding state free; + hardware ethernet 30:45:96:6a:f3:de; + uid "\0010E\226j\363\336"; + client-hostname "Honor_7C-bb23201389a3c44"; +} +lease 10.254.254.102 { + starts 2 2020/09/22 11:25:14; + ends 2 2020/09/22 11:27:14; + cltt 2 2020/09/22 11:25:14; + binding state free; + hardware ethernet c8:3d:dc:be:d2:cf; + uid "\001\310=\334\276\322\317"; + client-hostname "Redmi7A-Redmi"; +} +lease 10.254.255.101 { + starts 6 2020/10/03 08:19:24; + ends 6 2020/10/03 09:59:24; + cltt 6 2020/10/03 08:19:24; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 28:3b:82:58:f4:58; + uid "\001(;\202X\364X"; + set vendor-class-identifier = "dslforum.org"; +} +lease 10.254.251.100 { + starts 6 2020/10/03 08:48:45; + ends 6 2020/10/03 10:28:45; + cltt 6 2020/10/03 08:48:45; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 74:ea:3a:a6:a9:c7; + uid "\001t\352:\246\251\307"; + set vendor-class-identifier = "MSFT 5.0"; + client-hostname "TL-WR741N"; +} +lease 10.254.253.103 { + starts 6 2020/10/03 08:57:02; + ends 6 2020/10/03 10:37:02; + cltt 6 2020/10/03 08:57:02; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 34:ce:00:03:08:57; + uid "\0014\316\000\003\010W"; + set vendor-class-identifier = "udhcp 1.24.2"; +} +lease 192.168.3.11 { + starts 6 2020/10/03 09:01:22; + ends 6 2020/10/03 10:41:22; + cltt 6 2020/10/03 09:01:22; + binding state free; + next binding state free; + rewind binding state free; + hardware ethernet 60:a4:4c:3f:6e:78; + uid "\001`\244L?nx"; +} diff --git a/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6 b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6 new file mode 100644 index 00000000000000..3a4f1520eda2ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6 @@ -0,0 +1,67 @@ +# The format of this file is documented in the dhcpd.leases(5) manual page. +# This lease file was written by isc-dhcp-4.3.6b1 + +# authoring-byte-order entry is generated, DO NOT DELETE +authoring-byte-order little-endian; + +server-duid "\000\001\002\003!\004\005\006\007\008)^6\257"; + +ia-na "'\000\010\016\000\001\000\001!\320\263\003\010\000'\327\337\354" { + cltt 0 2017/12/24 10:53:29; + iaaddr 2001:db8:: { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 10:53:29; + } +} + +ia-na "#\2340\000\000\000\000\000!\300\021]0\234#e\212\261" { + cltt 6 2017/12/23 23:59:58; + iaaddr 2001:db8::1 { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 23:59:58; + } +} + +ia-na "\000\000\000\000\000\001\000\000 \000\301\267xOCl\313\310" { + cltt 0 2017/12/24 02:11:08; + iaaddr 2001:db8::2 { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 02:11:08; + } +} + +ia-na "'\000\000\000\000\000\000\001\027.\010\225\010\000'C8\353" { + cltt 0 2017/12/24 00:48:39; + iaaddr 2001:db8::3 { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 18:48:39; + } +} + +ia-na "\000\000\000\000\000\000\000\265H\006n\305F\351\270i\014\326q\023J\347" { + cltt 0 2017/12/24 01:53:15; + iaaddr 2001:db8::4 { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 14:53:15; + } +} + +ia-na "\000\000\000\000\000\000\000\000 \010\351\267xOCl\313\310" { + cltt 0 2017/12/24 11:33:17; + iaaddr 2001:db8::5 { + binding state active; + preferred-life 604800; + max-life 2592000; + ends 2 2020/09/30 11:33:17; + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/README.md b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/README.md new file mode 120000 index 00000000000000..036630b3e4f5b1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/README.md @@ -0,0 +1 @@ +integrations/kubelet.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/charts.go b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/charts.go new file mode 100644 index 00000000000000..6c80376fcd95d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/charts.go @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Chart is an alias for module.Chart + Chart = module.Chart + // Dims is an alias for module.Dims + Dims = module.Dims + // Dim is an alias for module.Dim + Dim = module.Dim +) + +var charts = Charts{ + { + ID: "apiserver_audit_requests_rejected_total", + Title: "API Server Audit Requests", + Units: "requests/s", + Fam: "api server", + Ctx: "k8s_kubelet.apiserver_audit_requests_rejected", + Dims: Dims{ + {ID: "apiserver_audit_requests_rejected_total", Name: "rejected", Algo: module.Incremental}, + }, + }, + { + ID: "apiserver_storage_data_key_generation_failures_total", + Title: "API Server Failed Data Encryption Key(DEK) Generation Operations", + Units: "events/s", + Fam: "api server", + Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_failures", + Dims: Dims{ + {ID: "apiserver_storage_data_key_generation_failures_total", Name: "failures", Algo: module.Incremental}, + }, + }, + { + ID: "apiserver_storage_data_key_generation_latencies", + Title: "API Server Latencies Of Data Encryption Key(DEK) Generation Operations", + Units: "observes/s", + Fam: "api server", + Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_latencies", + Type: module.Stacked, + Dims: Dims{ + {ID: "apiserver_storage_data_key_generation_bucket_5", Name: "5 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_10", Name: "10 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_20", Name: "20 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_40", Name: "40 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_80", Name: "80 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_160", Name: "160 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_320", Name: "320 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_640", Name: "640 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_1280", Name: "1280 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_2560", Name: "2560 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_5120", Name: "5120 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_10240", Name: "10240 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_20480", Name: "20480 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_40960", Name: "40960 µs", Algo: module.Incremental}, + {ID: "apiserver_storage_data_key_generation_bucket_+Inf", Name: "+Inf", Algo: module.Incremental}, + }, + }, + { + ID: "apiserver_storage_data_key_generation_latencies_percentage", + Title: "API Server Latencies Of Data Encryption Key(DEK) Generation Operations Percentage", + Units: "%", + Fam: "api server", + Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent", + Type: module.Stacked, + Dims: Dims{ + {ID: "apiserver_storage_data_key_generation_bucket_5", Name: "5 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_10", Name: "10 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_20", Name: "20 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_40", Name: "40 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_80", Name: "80 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_160", Name: "160 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_320", Name: "320 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_640", Name: "640 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_1280", Name: "1280 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_2560", Name: "2560 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_5120", Name: "5120 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_10240", Name: "10240 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_20480", Name: "20480 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_40960", Name: "40960 µs", Algo: module.PercentOfIncremental}, + {ID: "apiserver_storage_data_key_generation_bucket_+Inf", Name: "+Inf", Algo: module.PercentOfIncremental}, + }, + }, + { + ID: "apiserver_storage_envelope_transformation_cache_misses_total", + Title: "API Server Storage Envelope Transformation Cache Misses", + Units: "events/s", + Fam: "api server", + Ctx: "k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses", + Dims: Dims{ + {ID: "apiserver_storage_envelope_transformation_cache_misses_total", Name: "cache misses", Algo: module.Incremental}, + }, + }, + { + ID: "kubelet_containers_running", + Title: "Number Of Containers Currently Running", + Units: "running containers", + Fam: "containers", + Ctx: "k8s_kubelet.kubelet_containers_running", + Dims: Dims{ + {ID: "kubelet_running_container", Name: "total"}, + }, + }, + { + ID: "kubelet_pods_running", + Title: "Number Of Pods Currently Running", + Units: "running pods", + Fam: "pods", + Ctx: "k8s_kubelet.kubelet_pods_running", + Dims: Dims{ + {ID: "kubelet_running_pod", Name: "total"}, + }, + }, + { + ID: "kubelet_pods_log_filesystem_used_bytes", + Title: "Bytes Used By The Pod Logs On The Filesystem", + Units: "B", + Fam: "pods", + Ctx: "k8s_kubelet.kubelet_pods_log_filesystem_used_bytes", + Type: module.Stacked, + }, + { + ID: "kubelet_runtime_operations", + Title: "Runtime Operations By Type", + Units: "operations/s", + Fam: "operations", + Ctx: "k8s_kubelet.kubelet_runtime_operations", + Type: module.Stacked, + }, + { + ID: "kubelet_runtime_operations_errors", + Title: "Runtime Operations Errors By Type", + Units: "errors/s", + Fam: "operations", + Ctx: "k8s_kubelet.kubelet_runtime_operations_errors", + Type: module.Stacked, + }, + { + ID: "kubelet_docker_operations", + Title: "Docker Operations By Type", + Units: "operations/s", + Fam: "operations", + Ctx: "k8s_kubelet.kubelet_docker_operations", + Type: module.Stacked, + }, + { + ID: "kubelet_docker_operations_errors", + Title: "Docker Operations Errors By Type", + Units: "errors/s", + Fam: "operations", + Ctx: "k8s_kubelet.kubelet_docker_operations_errors", + Type: module.Stacked, + }, + { + ID: "kubelet_node_config_error", + Title: "Node Configuration-Related Error", + Units: "bool", + Fam: "config error", + Ctx: "k8s_kubelet.kubelet_node_config_error", + Dims: Dims{ + {ID: "kubelet_node_config_error", Name: "experiencing_error"}, + }, + }, + { + ID: "kubelet_pleg_relist_interval_microseconds", + Title: "PLEG Relisting Interval Summary", + Units: "microseconds", + Fam: "pleg relisting", + Ctx: "k8s_kubelet.kubelet_pleg_relist_interval_microseconds", + Type: module.Stacked, + Dims: Dims{ + {ID: "kubelet_pleg_relist_interval_05", Name: "0.5"}, + {ID: "kubelet_pleg_relist_interval_09", Name: "0.9"}, + {ID: "kubelet_pleg_relist_interval_099", Name: "0.99"}, + }, + }, + { + ID: "kubelet_pleg_relist_latency_microseconds", + Title: "PLEG Relisting Latency Summary", + Units: "microseconds", + Fam: "pleg relisting", + Ctx: "k8s_kubelet.kubelet_pleg_relist_latency_microseconds", + Type: module.Stacked, + Dims: Dims{ + {ID: "kubelet_pleg_relist_latency_05", Name: "0.5"}, + {ID: "kubelet_pleg_relist_latency_09", Name: "0.9"}, + {ID: "kubelet_pleg_relist_latency_099", Name: "0.99"}, + }, + }, + { + ID: "kubelet_token_requests", + Title: "Token() Requests To The Alternate Token Source", + Units: "token requests/s", + Fam: "token", + Ctx: "k8s_kubelet.kubelet_token_requests", + Dims: Dims{ + {ID: "token_count", Name: "total", Algo: module.Incremental}, + {ID: "token_fail_count", Name: "failed", Algo: module.Incremental}, + }, + }, + { + ID: "rest_client_requests_by_code", + Title: "HTTP Requests By Status Code", + Units: "requests/s", + Fam: "rest client", + Ctx: "k8s_kubelet.rest_client_requests_by_code", + Type: module.Stacked, + }, + { + ID: "rest_client_requests_by_method", + Title: "HTTP Requests By Status Method", + Units: "requests/s", + Fam: "rest client", + Ctx: "k8s_kubelet.rest_client_requests_by_method", + Type: module.Stacked, + }, +} + +func newVolumeManagerChart(name string) *Chart { + return &Chart{ + ID: "volume_manager_total_volumes_" + name, + Title: "Volume Manager State Of The World, Plugin " + name, + Units: "state", + Fam: "volume manager", + Ctx: "k8s_kubelet.volume_manager_total_volumes", + Dims: Dims{ + {ID: "volume_manager_plugin_" + name + "_state_actual", Name: "actual"}, + {ID: "volume_manager_plugin_" + name + "_state_desired", Name: "desired"}, + }, + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/collect.go b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/collect.go new file mode 100644 index 00000000000000..6aad42715d4f2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/collect.go @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + "math" + + mtx "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (k *Kubelet) collect() (map[string]int64, error) { + raw, err := k.prom.ScrapeSeries() + + if err != nil { + return nil, err + } + + mx := newMetrics() + + k.collectToken(raw, mx) + k.collectRESTClientHTTPRequests(raw, mx) + k.collectAPIServer(raw, mx) + k.collectKubelet(raw, mx) + k.collectVolumeManager(raw, mx) + + return stm.ToMap(mx), nil +} + +func (k *Kubelet) collectLogsUsagePerPod(raw prometheus.Series, mx *metrics) { + chart := k.charts.Get("kubelet_pods_log_filesystem_used_bytes") + seen := make(map[string]bool) + + for _, metric := range raw.FindByName("kubelet_container_log_filesystem_used_bytes") { + pod := metric.Labels.Get("pod") + namespace := metric.Labels.Get("namespace") + + if pod == "" || namespace == "" { + continue + } + + key := namespace + "_" + pod + dimID := "kubelet_log_file_system_usage_" + key + + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: pod}) + chart.MarkNotCreated() + } + + seen[dimID] = true + v := mx.Kubelet.PodLogFileSystemUsage[key] + v.Add(metric.Value) + mx.Kubelet.PodLogFileSystemUsage[key] = v + } + + for _, dim := range chart.Dims { + if seen[dim.ID] { + continue + } + _ = chart.MarkDimRemove(dim.ID, false) + chart.MarkNotCreated() + } +} + +func (k *Kubelet) collectVolumeManager(raw prometheus.Series, mx *metrics) { + vmPlugins := make(map[string]*volumeManagerPlugin) + + for _, metric := range raw.FindByName("volume_manager_total_volumes") { + pluginName := metric.Labels.Get("plugin_name") + state := metric.Labels.Get("state") + + if !k.collectedVMPlugins[pluginName] { + _ = k.charts.Add(newVolumeManagerChart(pluginName)) + k.collectedVMPlugins[pluginName] = true + } + if _, ok := vmPlugins[pluginName]; !ok { + vmPlugins[pluginName] = &volumeManagerPlugin{} + } + + switch state { + case "actual_state_of_world": + vmPlugins[pluginName].State.Actual.Set(metric.Value) + case "desired_state_of_world": + vmPlugins[pluginName].State.Desired.Set(metric.Value) + } + } + + mx.VolumeManager.Plugins = vmPlugins +} + +func (k *Kubelet) collectKubelet(raw prometheus.Series, mx *metrics) { + value := raw.FindByName("kubelet_node_config_error").Max() + mx.Kubelet.NodeConfigError.Set(value) + + /* + # HELP kubelet_running_containers [ALPHA] Number of containers currently running + # TYPE kubelet_running_containers gauge + kubelet_running_containers{container_state="created"} 1 + kubelet_running_containers{container_state="exited"} 13 + kubelet_running_containers{container_state="running"} 42 + kubelet_running_containers{container_state="unknown"} 1 + */ + + ms := raw.FindByName("kubelet_running_container_count") + value = ms.Max() + if ms.Len() == 0 { + for _, m := range raw.FindByName("kubelet_running_containers") { + if m.Labels.Get("container_state") == "running" { + value = m.Value + break + } + } + } + mx.Kubelet.RunningContainerCount.Set(value) + + /* + # HELP kubelet_running_pods [ALPHA] Number of pods currently running + # TYPE kubelet_running_pods gauge + kubelet_running_pods 37 + */ + value = raw.FindByNames("kubelet_running_pod_count", "kubelet_running_pods").Max() + mx.Kubelet.RunningPodCount.Set(value) + + k.collectRuntimeOperations(raw, mx) + k.collectRuntimeOperationsErrors(raw, mx) + k.collectDockerOperations(raw, mx) + k.collectDockerOperationsErrors(raw, mx) + k.collectPLEGRelisting(raw, mx) + k.collectLogsUsagePerPod(raw, mx) +} + +func (k *Kubelet) collectAPIServer(raw prometheus.Series, mx *metrics) { + value := raw.FindByName("apiserver_audit_requests_rejected_total").Max() + mx.APIServer.Audit.Requests.Rejected.Set(value) + + value = raw.FindByName("apiserver_storage_data_key_generation_failures_total").Max() + mx.APIServer.Storage.DataKeyGeneration.Failures.Set(value) + + value = raw.FindByName("apiserver_storage_envelope_transformation_cache_misses_total").Max() + mx.APIServer.Storage.EnvelopeTransformation.CacheMisses.Set(value) + + k.collectStorageDataKeyGenerationLatencies(raw, mx) +} + +func (k *Kubelet) collectToken(raw prometheus.Series, mx *metrics) { + value := raw.FindByName("get_token_count").Max() + mx.Token.Count.Set(value) + + value = raw.FindByName("get_token_fail_count").Max() + mx.Token.FailCount.Set(value) +} + +func (k *Kubelet) collectPLEGRelisting(raw prometheus.Series, mx *metrics) { + // Summary + for _, metric := range raw.FindByName("kubelet_pleg_relist_interval_microseconds") { + if math.IsNaN(metric.Value) { + continue + } + quantile := metric.Labels.Get("quantile") + switch quantile { + case "0.5": + mx.Kubelet.PLEG.Relist.Interval.Quantile05.Set(metric.Value) + case "0.9": + mx.Kubelet.PLEG.Relist.Interval.Quantile09.Set(metric.Value) + case "0.99": + mx.Kubelet.PLEG.Relist.Interval.Quantile099.Set(metric.Value) + } + } + for _, metric := range raw.FindByName("kubelet_pleg_relist_latency_microseconds") { + if math.IsNaN(metric.Value) { + continue + } + quantile := metric.Labels.Get("quantile") + switch quantile { + case "0.5": + mx.Kubelet.PLEG.Relist.Latency.Quantile05.Set(metric.Value) + case "0.9": + mx.Kubelet.PLEG.Relist.Latency.Quantile09.Set(metric.Value) + case "0.99": + mx.Kubelet.PLEG.Relist.Latency.Quantile099.Set(metric.Value) + } + } +} + +func (k *Kubelet) collectStorageDataKeyGenerationLatencies(raw prometheus.Series, mx *metrics) { + latencies := &mx.APIServer.Storage.DataKeyGeneration.Latencies + metricName := "apiserver_storage_data_key_generation_latencies_microseconds_bucket" + + for _, metric := range raw.FindByName(metricName) { + value := metric.Value + bucket := metric.Labels.Get("le") + switch bucket { + case "5": + latencies.LE5.Set(value) + case "10": + latencies.LE10.Set(value) + case "20": + latencies.LE20.Set(value) + case "40": + latencies.LE40.Set(value) + case "80": + latencies.LE80.Set(value) + case "160": + latencies.LE160.Set(value) + case "320": + latencies.LE320.Set(value) + case "640": + latencies.LE640.Set(value) + case "1280": + latencies.LE1280.Set(value) + case "2560": + latencies.LE2560.Set(value) + case "5120": + latencies.LE5120.Set(value) + case "10240": + latencies.LE10240.Set(value) + case "20480": + latencies.LE20480.Set(value) + case "40960": + latencies.LE40960.Set(value) + case "+Inf": + latencies.LEInf.Set(value) + } + } + + latencies.LEInf.Sub(latencies.LE40960.Value()) + latencies.LE40960.Sub(latencies.LE20480.Value()) + latencies.LE20480.Sub(latencies.LE10240.Value()) + latencies.LE10240.Sub(latencies.LE5120.Value()) + latencies.LE5120.Sub(latencies.LE2560.Value()) + latencies.LE2560.Sub(latencies.LE1280.Value()) + latencies.LE1280.Sub(latencies.LE640.Value()) + latencies.LE640.Sub(latencies.LE320.Value()) + latencies.LE320.Sub(latencies.LE160.Value()) + latencies.LE160.Sub(latencies.LE80.Value()) + latencies.LE80.Sub(latencies.LE40.Value()) + latencies.LE40.Sub(latencies.LE20.Value()) + latencies.LE20.Sub(latencies.LE10.Value()) + latencies.LE10.Sub(latencies.LE5.Value()) +} + +func (k *Kubelet) collectRESTClientHTTPRequests(raw prometheus.Series, mx *metrics) { + metricName := "rest_client_requests_total" + chart := k.charts.Get("rest_client_requests_by_code") + + for _, metric := range raw.FindByName(metricName) { + code := metric.Labels.Get("code") + if code == "" { + continue + } + dimID := "rest_client_requests_" + code + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: code, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.RESTClient.Requests.ByStatusCode[code] = mtx.Gauge(metric.Value) + } + + chart = k.charts.Get("rest_client_requests_by_method") + + for _, metric := range raw.FindByName(metricName) { + method := metric.Labels.Get("method") + if method == "" { + continue + } + dimID := "rest_client_requests_" + method + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: method, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.RESTClient.Requests.ByMethod[method] = mtx.Gauge(metric.Value) + } +} + +func (k *Kubelet) collectRuntimeOperations(raw prometheus.Series, mx *metrics) { + chart := k.charts.Get("kubelet_runtime_operations") + + // kubelet_runtime_operations_total + for _, metric := range raw.FindByNames("kubelet_runtime_operations", "kubelet_runtime_operations_total") { + opType := metric.Labels.Get("operation_type") + if opType == "" { + continue + } + dimID := "kubelet_runtime_operations_" + opType + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.Kubelet.Runtime.Operations[opType] = mtx.Gauge(metric.Value) + } +} + +func (k *Kubelet) collectRuntimeOperationsErrors(raw prometheus.Series, mx *metrics) { + chart := k.charts.Get("kubelet_runtime_operations_errors") + + // kubelet_runtime_operations_errors_total + for _, metric := range raw.FindByNames("kubelet_runtime_operations_errors", "kubelet_runtime_operations_errors_total") { + opType := metric.Labels.Get("operation_type") + if opType == "" { + continue + } + dimID := "kubelet_runtime_operations_errors_" + opType + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.Kubelet.Runtime.OperationsErrors[opType] = mtx.Gauge(metric.Value) + } +} + +func (k *Kubelet) collectDockerOperations(raw prometheus.Series, mx *metrics) { + chart := k.charts.Get("kubelet_docker_operations") + + // kubelet_docker_operations_total + for _, metric := range raw.FindByNames("kubelet_docker_operations", "kubelet_docker_operations_total") { + opType := metric.Labels.Get("operation_type") + if opType == "" { + continue + } + dimID := "kubelet_docker_operations_" + opType + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.Kubelet.Docker.Operations[opType] = mtx.Gauge(metric.Value) + } +} + +func (k *Kubelet) collectDockerOperationsErrors(raw prometheus.Series, mx *metrics) { + chart := k.charts.Get("kubelet_docker_operations_errors") + + // kubelet_docker_operations_errors_total + for _, metric := range raw.FindByNames("kubelet_docker_operations_errors", "kubelet_docker_operations_errors_total") { + opType := metric.Labels.Get("operation_type") + if opType == "" { + continue + } + dimID := "kubelet_docker_operations_errors_" + opType + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.Kubelet.Docker.OperationsErrors[opType] = mtx.Gauge(metric.Value) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/config_schema.json b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/config_schema.json new file mode 100644 index 00000000000000..6e42187f2e0a53 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/config_schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_kubelet job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "token_path": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/integrations/kubelet.md b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/integrations/kubelet.md new file mode 100644 index 00000000000000..fdd1a6329f8bc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/integrations/kubelet.md @@ -0,0 +1,219 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_kubelet/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_kubelet/metadata.yaml" +sidebar_label: "Kubelet" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kubelet + + +<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/> + + +Plugin: go.d.plugin +Module: k8s_kubelet + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Kubelet instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Kubelet instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s | +| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s | +| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_µs, 10_µs, 20_µs, 40_µs, 80_µs, 160_µs, 320_µs, 640_µs, 1280_µs, 2560_µs, 5120_µs, 10240_µs, 20480_µs, 40960_µs, +Inf | observes/s | +| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_µs, 10_µs, 20_µs, 40_µs, 80_µs, 160_µs, 320_µs, 640_µs, 1280_µs, 2560_µs, 5120_µs, 10240_µs, 20480_µs, 40960_µs, +Inf | percentage | +| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s | +| k8s_kubelet.kubelet_containers_running | total | running_containers | +| k8s_kubelet.kubelet_pods_running | total | running_pods | +| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B | +| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s | +| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s | +| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s | +| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s | +| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool | +| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds | +| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds | +| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s | +| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s | +| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s | + +### Per volume manager + +These metrics refer to the Volume Manager. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_kubelet.volume_manager_total_volumes | actual, desired | state | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) | +| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source | +| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source | +| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors | +| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/k8s_kubelet.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/k8s_kubelet.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:10255/metrics | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:10255/metrics + +``` +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:10250/metrics + tls_skip_verify: yes + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m k8s_kubelet + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet.go b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet.go new file mode 100644 index 00000000000000..7f62c9f30410b6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + _ "embed" + "os" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("k8s_kubelet", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + // NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 + Priority: 50000, + }, + Create: func() module.Module { return New() }, + }) +} + +// New creates Kubelet with default values. +func New() *Kubelet { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:10255/metrics", + Headers: make(map[string]string), + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", + } + + return &Kubelet{ + Config: config, + charts: charts.Copy(), + collectedVMPlugins: make(map[string]bool), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + TokenPath string `yaml:"token_path"` + } + + Kubelet struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *Charts + // volume_manager_total_volumes + collectedVMPlugins map[string]bool + } +) + +// Cleanup makes cleanup. +func (Kubelet) Cleanup() {} + +// Init makes initialization. +func (k *Kubelet) Init() bool { + b, err := os.ReadFile(k.TokenPath) + if err != nil { + k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err) + } else { + k.Request.Headers["Authorization"] = "Bearer " + string(b) + } + + client, err := web.NewHTTPClient(k.Client) + if err != nil { + k.Errorf("error on creating http client: %v", err) + return false + } + + k.prom = prometheus.New(client, k.Request) + return true +} + +// Check makes check. +func (k *Kubelet) Check() bool { + return len(k.Collect()) > 0 +} + +// Charts creates Charts. +func (k Kubelet) Charts() *Charts { + return k.charts +} + +// Collect collects mx. +func (k *Kubelet) Collect() map[string]int64 { + mx, err := k.collect() + + if err != nil { + k.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet_test.go b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet_test.go new file mode 100644 index 00000000000000..a69a0724b30e1f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/kubelet_test.go @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testMetricsData, _ = os.ReadFile("testdata/metrics.txt") + testTokenData, _ = os.ReadFile("testdata/token.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, testMetricsData) + assert.NotNil(t, testTokenData) +} + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*Kubelet)(nil), job) +} + +func TestKubelet_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestKubelet_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestKubelet_Init(t *testing.T) { + assert.True(t, New().Init()) +} + +func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) { + job := New() + job.TokenPath = "testdata/token.txt" + + assert.True(t, job.Init()) + assert.Equal(t, "Bearer "+string(testTokenData), job.Request.Headers["Authorization"]) +} + +func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { + job := New() + job.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, job.Init()) +} + +func TestKubelet_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testMetricsData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestKubelet_Check_ConnectionRefused(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestKubelet_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testMetricsData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "apiserver_audit_requests_rejected_total": 0, + "apiserver_storage_data_key_generation_bucket_+Inf": 1, + "apiserver_storage_data_key_generation_bucket_10": 1, + "apiserver_storage_data_key_generation_bucket_10240": 1, + "apiserver_storage_data_key_generation_bucket_1280": 1, + "apiserver_storage_data_key_generation_bucket_160": 1, + "apiserver_storage_data_key_generation_bucket_20": 1, + "apiserver_storage_data_key_generation_bucket_20480": 1, + "apiserver_storage_data_key_generation_bucket_2560": 1, + "apiserver_storage_data_key_generation_bucket_320": 1, + "apiserver_storage_data_key_generation_bucket_40": 1, + "apiserver_storage_data_key_generation_bucket_40960": 1, + "apiserver_storage_data_key_generation_bucket_5": 6, + "apiserver_storage_data_key_generation_bucket_5120": 1, + "apiserver_storage_data_key_generation_bucket_640": 1, + "apiserver_storage_data_key_generation_bucket_80": 1, + "apiserver_storage_data_key_generation_failures_total": 0, + "apiserver_storage_envelope_transformation_cache_misses_total": 0, + "kubelet_docker_operations_create_container": 19, + "kubelet_docker_operations_errors_inspect_container": 14, + "kubelet_docker_operations_errors_remove_container": 4, + "kubelet_docker_operations_info": 2, + "kubelet_docker_operations_inspect_container": 223, + "kubelet_docker_operations_inspect_image": 110, + "kubelet_docker_operations_list_containers": 5157, + "kubelet_docker_operations_list_images": 195, + "kubelet_docker_operations_remove_container": 23, + "kubelet_docker_operations_start_container": 19, + "kubelet_docker_operations_stop_container": 23, + "kubelet_docker_operations_version": 472, + "kubelet_log_file_system_usage_kube-system_coredns-86c58d9df4-d22hv": 28672, + "kubelet_log_file_system_usage_kube-system_coredns-86c58d9df4-ks5dj": 28672, + "kubelet_log_file_system_usage_kube-system_etcd-minikube": 36864, + "kubelet_log_file_system_usage_kube-system_kube-addon-manager-minikube": 45056, + "kubelet_log_file_system_usage_kube-system_kube-apiserver-minikube": 36864, + "kubelet_log_file_system_usage_kube-system_kube-controller-manager-minikube": 57344, + "kubelet_log_file_system_usage_kube-system_kube-proxy-q2fvs": 28672, + "kubelet_log_file_system_usage_kube-system_kube-scheduler-minikube": 40960, + "kubelet_log_file_system_usage_kube-system_storage-provisioner": 24576, + "kubelet_node_config_error": 1, + "kubelet_pleg_relist_interval_05": 1013125, + "kubelet_pleg_relist_interval_09": 1016820, + "kubelet_pleg_relist_interval_099": 1032022, + "kubelet_pleg_relist_latency_05": 12741, + "kubelet_pleg_relist_latency_09": 16211, + "kubelet_pleg_relist_latency_099": 31234, + "kubelet_running_container": 9, + "kubelet_running_pod": 9, + "kubelet_runtime_operations_container_status": 90, + "kubelet_runtime_operations_create_container": 10, + "kubelet_runtime_operations_errors_container_status": 14, + "kubelet_runtime_operations_errors_remove_container": 4, + "kubelet_runtime_operations_exec_sync": 138, + "kubelet_runtime_operations_image_status": 25, + "kubelet_runtime_operations_list_containers": 2586, + "kubelet_runtime_operations_list_images": 195, + "kubelet_runtime_operations_list_podsandbox": 2562, + "kubelet_runtime_operations_podsandbox_status": 77, + "kubelet_runtime_operations_remove_container": 14, + "kubelet_runtime_operations_run_podsandbox": 9, + "kubelet_runtime_operations_start_container": 10, + "kubelet_runtime_operations_status": 279, + "kubelet_runtime_operations_stop_podsandbox": 14, + "kubelet_runtime_operations_version": 190, + "rest_client_requests_200": 177, + "rest_client_requests_201": 43, + "rest_client_requests_403": 2, + "rest_client_requests_409": 1, + "rest_client_requests_<error>": 8, + "rest_client_requests_GET": 37, + "rest_client_requests_PATCH": 177, + "rest_client_requests_POST": 8, + "token_count": 0, + "token_fail_count": 0, + "volume_manager_plugin_kubernetes.io/configmap_state_actual": 3, + "volume_manager_plugin_kubernetes.io/configmap_state_desired": 3, + "volume_manager_plugin_kubernetes.io/host-path_state_actual": 15, + "volume_manager_plugin_kubernetes.io/host-path_state_desired": 15, + "volume_manager_plugin_kubernetes.io/secret_state_actual": 4, + "volume_manager_plugin_kubernetes.io/secret_state_desired": 4, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestKubelet_Collect_ReceiveInvalidResponse(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestKubelet_Collect_Receive404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metadata.yaml b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metadata.yaml new file mode 100644 index 00000000000000..0d5229bb572c91 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metadata.yaml @@ -0,0 +1,331 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-k8s_kubelet + plugin_name: go.d.plugin + module_name: k8s_kubelet + monitored_instance: + name: Kubelet + link: https://kubernetes.io/docs/concepts/overview/components/#kubelet + icon_filename: kubernetes.svg + categories: + - data-collection.kubernetes + keywords: + - kubelet + - kubernetes + - k8s + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Kubelet instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/k8s_kubelet.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:10255/metrics + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:10255/metrics + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: http://127.0.0.1:10250/metrics + tls_skip_verify: yes + troubleshooting: + problems: + list: [] + alerts: + - name: kubelet_node_config_error + metric: k8s_kubelet.kubelet_node_config_error + info: "the node is experiencing a configuration-related error (0: false, 1: true)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf + - name: kubelet_token_requests + metric: k8s_kubelet.kubelet_token_requests + info: "number of failed Token() requests to the alternate token source" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf + - name: kubelet_token_requests + metric: k8s_kubelet.kubelet_token_requests + info: "number of failed Token() requests to the alternate token source" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf + - name: kubelet_operations_error + metric: k8s_kubelet.kubelet_operations_errors + info: number of Docker or runtime operation errors + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf + - name: kubelet_operations_error + metric: k8s_kubelet.kubelet_operations_errors + info: number of Docker or runtime operation errors + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: k8s_kubelet.apiserver_audit_requests_rejected + description: API Server Audit Requests + unit: requests/s + chart_type: line + dimensions: + - name: rejected + - name: k8s_kubelet.apiserver_storage_data_key_generation_failures + description: API Server Failed Data Encryption Key(DEK) Generation Operations + unit: events/s + chart_type: line + dimensions: + - name: failures + - name: k8s_kubelet.apiserver_storage_data_key_generation_latencies + description: API Server Latencies Of Data Encryption Key(DEK) Generation Operations + unit: observes/s + chart_type: stacked + dimensions: + - name: 5_µs + - name: 10_µs + - name: 20_µs + - name: 40_µs + - name: 80_µs + - name: 160_µs + - name: 320_µs + - name: 640_µs + - name: 1280_µs + - name: 2560_µs + - name: 5120_µs + - name: 10240_µs + - name: 20480_µs + - name: 40960_µs + - name: +Inf + - name: k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent + description: API Server Latencies Of Data Encryption Key(DEK) Generation Operations Percentage + unit: percentage + chart_type: stacked + dimensions: + - name: 5_µs + - name: 10_µs + - name: 20_µs + - name: 40_µs + - name: 80_µs + - name: 160_µs + - name: 320_µs + - name: 640_µs + - name: 1280_µs + - name: 2560_µs + - name: 5120_µs + - name: 10240_µs + - name: 20480_µs + - name: 40960_µs + - name: +Inf + - name: k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses + description: API Server Storage Envelope Transformation Cache Misses + unit: events/s + chart_type: line + dimensions: + - name: cache misses + - name: k8s_kubelet.kubelet_containers_running + description: Number Of Containers Currently Running + unit: running_containers + chart_type: line + dimensions: + - name: total + - name: k8s_kubelet.kubelet_pods_running + description: Number Of Pods Currently Running + unit: running_pods + chart_type: line + dimensions: + - name: total + - name: k8s_kubelet.kubelet_pods_log_filesystem_used_bytes + description: Bytes Used By The Pod Logs On The Filesystem + unit: B + chart_type: stacked + dimensions: + - name: a dimension per namespace and pod + - name: k8s_kubelet.kubelet_runtime_operations + description: Runtime Operations By Type + unit: operations/s + chart_type: stacked + dimensions: + - name: a dimension per operation type + - name: k8s_kubelet.kubelet_runtime_operations_errors + description: Runtime Operations Errors By Type + unit: errors/s + chart_type: stacked + dimensions: + - name: a dimension per operation type + - name: k8s_kubelet.kubelet_docker_operations + description: Docker Operations By Type + unit: operations/s + chart_type: stacked + dimensions: + - name: a dimension per operation type + - name: k8s_kubelet.kubelet_docker_operations_errors + description: Docker Operations Errors By Type + unit: errors/s + chart_type: stacked + dimensions: + - name: a dimension per operation type + - name: k8s_kubelet.kubelet_node_config_error + description: Node Configuration-Related Error + unit: bool + chart_type: line + dimensions: + - name: experiencing_error + - name: k8s_kubelet.kubelet_pleg_relist_interval_microseconds + description: PLEG Relisting Interval Summary + unit: microseconds + chart_type: stacked + dimensions: + - name: "0.5" + - name: "0.9" + - name: "0.99" + - name: k8s_kubelet.kubelet_pleg_relist_latency_microseconds + description: PLEG Relisting Latency Summary + unit: microseconds + chart_type: stacked + dimensions: + - name: "0.5" + - name: "0.9" + - name: "0.99" + - name: k8s_kubelet.kubelet_token_requests + description: Token() Requests To The Alternate Token Source + unit: token_requests/s + chart_type: line + dimensions: + - name: total + - name: failed + - name: k8s_kubelet.rest_client_requests_by_code + description: HTTP Requests By Status Code + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP status code + - name: k8s_kubelet.rest_client_requests_by_method + description: HTTP Requests By Status Method + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP method + - name: volume manager + description: These metrics refer to the Volume Manager. + labels: [] + metrics: + - name: k8s_kubelet.volume_manager_total_volumes + description: Volume Manager State Of The World + unit: state + chart_type: line + dimensions: + - name: actual + - name: desired diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metrics.go b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metrics.go new file mode 100644 index 00000000000000..9ccd84d1d1f9ed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/metrics.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + mtx "github.com/netdata/go.d.plugin/pkg/metrics" +) + +func newMetrics() *metrics { + var mx metrics + mx.RESTClient.Requests.ByStatusCode = make(map[string]mtx.Gauge) + mx.RESTClient.Requests.ByMethod = make(map[string]mtx.Gauge) + mx.Kubelet.Runtime.Operations = make(map[string]mtx.Gauge) + mx.Kubelet.Runtime.OperationsErrors = make(map[string]mtx.Gauge) + mx.Kubelet.Docker.Operations = make(map[string]mtx.Gauge) + mx.Kubelet.Docker.OperationsErrors = make(map[string]mtx.Gauge) + mx.Kubelet.PodLogFileSystemUsage = make(map[string]mtx.Gauge) + + return &mx +} + +type metrics struct { + Token tokenMetrics `stm:"token"` + RESTClient restClientMetrics `stm:"rest_client"` + APIServer apiServerMetrics `stm:"apiserver"` + Kubelet kubeletMetrics `stm:"kubelet"` + VolumeManager volumeManagerMetrics `stm:"volume_manager"` +} + +type tokenMetrics struct { + Count mtx.Gauge `stm:"count"` + FailCount mtx.Gauge `stm:"fail_count"` +} + +type restClientMetrics struct { + Requests struct { + ByStatusCode map[string]mtx.Gauge `stm:""` + ByMethod map[string]mtx.Gauge `stm:""` + } `stm:"requests"` +} + +type apiServerMetrics struct { + Audit struct { + Requests struct { + Rejected mtx.Gauge `stm:"rejected_total"` + } `stm:"requests"` + } `stm:"audit"` + Storage struct { + EnvelopeTransformation struct { + CacheMisses mtx.Gauge `stm:"cache_misses_total"` + } `stm:"envelope_transformation"` + DataKeyGeneration struct { + Failures mtx.Gauge `stm:"failures_total"` + Latencies struct { + LE5 mtx.Gauge `stm:"5"` + LE10 mtx.Gauge `stm:"10"` + LE20 mtx.Gauge `stm:"20"` + LE40 mtx.Gauge `stm:"40"` + LE80 mtx.Gauge `stm:"80"` + LE160 mtx.Gauge `stm:"160"` + LE320 mtx.Gauge `stm:"320"` + LE640 mtx.Gauge `stm:"640"` + LE1280 mtx.Gauge `stm:"1280"` + LE2560 mtx.Gauge `stm:"2560"` + LE5120 mtx.Gauge `stm:"5120"` + LE10240 mtx.Gauge `stm:"10240"` + LE20480 mtx.Gauge `stm:"20480"` + LE40960 mtx.Gauge `stm:"40960"` + LEInf mtx.Gauge `stm:"+Inf"` + } `stm:"bucket"` + } `stm:"data_key_generation"` + } `stm:"storage"` +} + +type kubeletMetrics struct { + NodeConfigError mtx.Gauge `stm:"node_config_error"` + RunningContainerCount mtx.Gauge `stm:"running_container"` + RunningPodCount mtx.Gauge `stm:"running_pod"` + PLEG struct { + Relist struct { + Interval struct { + Quantile05 mtx.Gauge `stm:"05"` + Quantile09 mtx.Gauge `stm:"09"` + Quantile099 mtx.Gauge `stm:"099"` + } `stm:"interval"` + Latency struct { + Quantile05 mtx.Gauge `stm:"05"` + Quantile09 mtx.Gauge `stm:"09"` + Quantile099 mtx.Gauge `stm:"099"` + } `stm:"latency"` + } `stm:"relist"` + } `stm:"pleg"` + Runtime struct { + Operations map[string]mtx.Gauge `stm:"operations"` + OperationsErrors map[string]mtx.Gauge `stm:"operations_errors"` + } `stm:"runtime"` + Docker struct { + Operations map[string]mtx.Gauge `stm:"operations"` + OperationsErrors map[string]mtx.Gauge `stm:"operations_errors"` + } `stm:"docker"` + PodLogFileSystemUsage map[string]mtx.Gauge `stm:"log_file_system_usage"` +} + +type volumeManagerMetrics struct { + Plugins map[string]*volumeManagerPlugin `stm:"plugin"` +} + +type volumeManagerPlugin struct { + State struct { + Actual mtx.Gauge `stm:"actual"` + Desired mtx.Gauge `stm:"desired"` + } `stm:"state"` +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/metrics.txt b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/metrics.txt new file mode 100644 index 00000000000000..47b63bd5542e3f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/metrics.txt @@ -0,0 +1,574 @@ +# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request. +# TYPE apiserver_client_certificate_expiration_seconds histogram +apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 2 +apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 2 +apiserver_client_certificate_expiration_seconds_sum 6.198359653913356e+07 +apiserver_client_certificate_expiration_seconds_count 2 +# HELP apiserver_storage_data_key_generation_failures_total Total number of failed data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_failures_total counter +apiserver_storage_data_key_generation_failures_total 0 +# HELP apiserver_storage_data_key_generation_latencies_microseconds Latencies in microseconds of data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_latencies_microseconds histogram +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="5"} 6 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="10"} 7 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="20"} 8 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="40"} 9 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="80"} 10 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="160"} 11 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="320"} 12 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="640"} 13 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="1280"} 14 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="2560"} 15 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="5120"} 16 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="10240"} 17 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="20480"} 18 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="40960"} 19 +apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="+Inf"} 20 +apiserver_storage_data_key_generation_latencies_microseconds_sum 0 +apiserver_storage_data_key_generation_latencies_microseconds_count 0 +# HELP apiserver_storage_envelope_transformation_cache_misses_total Total number of cache misses while accessing key decryption key(KEK). +# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter +apiserver_storage_envelope_transformation_cache_misses_total 0 +# HELP get_token_count Counter of total Token() requests to the alternate token source +# TYPE get_token_count counter +get_token_count 0 +# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source +# TYPE get_token_fail_count counter +get_token_fail_count 0 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 3.1848e-05 +go_gc_duration_seconds{quantile="0.25"} 6.1739e-05 +go_gc_duration_seconds{quantile="0.5"} 9.1641e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000143403 +go_gc_duration_seconds{quantile="1"} 0.003400982 +go_gc_duration_seconds_sum 0.041302468 +go_gc_duration_seconds_count 252 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 282 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.2614512e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.851571192e+09 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.81591e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 1.9710993e+07 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 0.0005851177440973569 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.41664e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 2.2614512e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 3.8526976e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 2.5796608e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 114479 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.4323584e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.552938975118211e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 1.9825472e+07 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 3456 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 361304 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 409600 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.612264e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 517010 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 2.78528e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 2.78528e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.2284408e+07 +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 19 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 4933.921 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 4933.921 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 4933.921 +http_request_duration_microseconds_sum{handler="prometheus"} 283201.29 +http_request_duration_microseconds_count{handler="prometheus"} 31 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="prometheus",quantile="0.5"} 423 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 423 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 423 +http_request_size_bytes_sum{handler="prometheus"} 11711 +http_request_size_bytes_count{handler="prometheus"} 31 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="prometheus",method="get"} 31 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="prometheus",quantile="0.5"} 5678 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 5678 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 5678 +http_response_size_bytes_sum{handler="prometheus"} 178006 +http_response_size_bytes_count{handler="prometheus"} 31 +# HELP kubelet_cgroup_manager_latency_microseconds Latency in microseconds for cgroup manager operations. Broken down by method. +# TYPE kubelet_cgroup_manager_latency_microseconds summary +kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.5"} NaN +kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.9"} NaN +kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.99"} NaN +kubelet_cgroup_manager_latency_microseconds_sum{operation_type="create"} 96365 +kubelet_cgroup_manager_latency_microseconds_count{operation_type="create"} 12 +kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.5"} 91 +kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.9"} 193 +kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.99"} 208 +kubelet_cgroup_manager_latency_microseconds_sum{operation_type="update"} 12921 +kubelet_cgroup_manager_latency_microseconds_count{operation_type="update"} 79 +# HELP kubelet_container_log_filesystem_used_bytes Bytes used by the container's logs on the filesystem. +# TYPE kubelet_container_log_filesystem_used_bytes gauge +kubelet_container_log_filesystem_used_bytes{container="coredns",namespace="kube-system",pod="coredns-86c58d9df4-d22hv"} 28672 +kubelet_container_log_filesystem_used_bytes{container="coredns",namespace="kube-system",pod="coredns-86c58d9df4-ks5dj"} 28672 +kubelet_container_log_filesystem_used_bytes{container="etcd",namespace="kube-system",pod="etcd-minikube"} 36864 +kubelet_container_log_filesystem_used_bytes{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 45056 +kubelet_container_log_filesystem_used_bytes{container="kube-apiserver",namespace="kube-system",pod="kube-apiserver-minikube"} 36864 +kubelet_container_log_filesystem_used_bytes{container="kube-controller-manager",namespace="kube-system",pod="kube-controller-manager-minikube"} 57344 +kubelet_container_log_filesystem_used_bytes{container="kube-proxy",namespace="kube-system",pod="kube-proxy-q2fvs"} 28672 +kubelet_container_log_filesystem_used_bytes{container="kube-scheduler",namespace="kube-system",pod="kube-scheduler-minikube"} 40960 +kubelet_container_log_filesystem_used_bytes{container="storage-provisioner",namespace="kube-system",pod="storage-provisioner"} 24576 +# HELP kubelet_containers_per_pod_count The number of containers per pod. +# TYPE kubelet_containers_per_pod_count summary +kubelet_containers_per_pod_count{quantile="0.5"} NaN +kubelet_containers_per_pod_count{quantile="0.9"} NaN +kubelet_containers_per_pod_count{quantile="0.99"} NaN +kubelet_containers_per_pod_count_sum 9 +kubelet_containers_per_pod_count_count 9 +# HELP kubelet_docker_operations Cumulative number of Docker operations by operation type. +# TYPE kubelet_docker_operations counter +kubelet_docker_operations{operation_type="create_container"} 19 +kubelet_docker_operations{operation_type="info"} 2 +kubelet_docker_operations{operation_type="inspect_container"} 223 +kubelet_docker_operations{operation_type="inspect_image"} 110 +kubelet_docker_operations{operation_type="list_containers"} 5157 +kubelet_docker_operations{operation_type="list_images"} 195 +kubelet_docker_operations{operation_type="remove_container"} 23 +kubelet_docker_operations{operation_type="start_container"} 19 +kubelet_docker_operations{operation_type="stop_container"} 23 +kubelet_docker_operations{operation_type="version"} 472 +# HELP kubelet_docker_operations_errors Cumulative number of Docker operation errors by operation type. +# TYPE kubelet_docker_operations_errors counter +kubelet_docker_operations_errors{operation_type="inspect_container"} 14 +kubelet_docker_operations_errors{operation_type="remove_container"} 4 +# HELP kubelet_docker_operations_latency_microseconds Latency in microseconds of Docker operations. Broken down by operation type. +# TYPE kubelet_docker_operations_latency_microseconds summary +kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="create_container"} 1.157649e+07 +kubelet_docker_operations_latency_microseconds_count{operation_type="create_container"} 19 +kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="info"} 15754 +kubelet_docker_operations_latency_microseconds_count{operation_type="info"} 2 +kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="inspect_container"} 6.320335e+06 +kubelet_docker_operations_latency_microseconds_count{operation_type="inspect_container"} 223 +kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.5"} 1112 +kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.9"} 1112 +kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.99"} 1112 +kubelet_docker_operations_latency_microseconds_sum{operation_type="inspect_image"} 276071 +kubelet_docker_operations_latency_microseconds_count{operation_type="inspect_image"} 110 +kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.5"} 3368 +kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.9"} 9003 +kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.99"} 16951 +kubelet_docker_operations_latency_microseconds_sum{operation_type="list_containers"} 2.2912964e+07 +kubelet_docker_operations_latency_microseconds_count{operation_type="list_containers"} 5157 +kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.5"} 3579 +kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.9"} 5431 +kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.99"} 7136 +kubelet_docker_operations_latency_microseconds_sum{operation_type="list_images"} 798789 +kubelet_docker_operations_latency_microseconds_count{operation_type="list_images"} 195 +kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="remove_container"} 5.297973e+06 +kubelet_docker_operations_latency_microseconds_count{operation_type="remove_container"} 23 +kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="start_container"} 1.5755618e+07 +kubelet_docker_operations_latency_microseconds_count{operation_type="start_container"} 19 +kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.5"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.9"} NaN +kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.99"} NaN +kubelet_docker_operations_latency_microseconds_sum{operation_type="stop_container"} 18810 +kubelet_docker_operations_latency_microseconds_count{operation_type="stop_container"} 23 +kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.5"} 869 +kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.9"} 1482 +kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.99"} 2426 +kubelet_docker_operations_latency_microseconds_sum{operation_type="version"} 455522 +kubelet_docker_operations_latency_microseconds_count{operation_type="version"} 472 +# HELP kubelet_network_plugin_operations_latency_microseconds Latency in microseconds of network plugin operations. Broken down by operation type. +# TYPE kubelet_network_plugin_operations_latency_microseconds summary +kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.5"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.9"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.99"} NaN +kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="get_pod_network_status"} 47 +kubelet_network_plugin_operations_latency_microseconds_count{operation_type="get_pod_network_status"} 11 +kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.5"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.9"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.99"} NaN +kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="set_up_pod"} 23 +kubelet_network_plugin_operations_latency_microseconds_count{operation_type="set_up_pod"} 2 +kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.5"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.9"} NaN +kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.99"} NaN +kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="tear_down_pod"} 29 +kubelet_network_plugin_operations_latency_microseconds_count{operation_type="tear_down_pod"} 4 +# HELP kubelet_node_config_error This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise. +# TYPE kubelet_node_config_error gauge +kubelet_node_config_error 1 +# HELP kubelet_pleg_relist_interval_microseconds Interval in microseconds between relisting in PLEG. +# TYPE kubelet_pleg_relist_interval_microseconds summary +kubelet_pleg_relist_interval_microseconds{quantile="0.5"} 1.013125e+06 +kubelet_pleg_relist_interval_microseconds{quantile="0.9"} 1.01682e+06 +kubelet_pleg_relist_interval_microseconds{quantile="0.99"} 1.032022e+06 +kubelet_pleg_relist_interval_microseconds_sum 1.392954348e+09 +kubelet_pleg_relist_interval_microseconds_count 1368 +# HELP kubelet_pleg_relist_latency_microseconds Latency in microseconds for relisting pods in PLEG. +# TYPE kubelet_pleg_relist_latency_microseconds summary +kubelet_pleg_relist_latency_microseconds{quantile="0.5"} 12741 +kubelet_pleg_relist_latency_microseconds{quantile="0.9"} 16211 +kubelet_pleg_relist_latency_microseconds{quantile="0.99"} 31234 +kubelet_pleg_relist_latency_microseconds_sum 2.4227856e+07 +kubelet_pleg_relist_latency_microseconds_count 1369 +# HELP kubelet_pod_start_latency_microseconds Latency in microseconds for a single pod to go from pending to running. +# TYPE kubelet_pod_start_latency_microseconds summary +kubelet_pod_start_latency_microseconds{quantile="0.5"} NaN +kubelet_pod_start_latency_microseconds{quantile="0.9"} NaN +kubelet_pod_start_latency_microseconds{quantile="0.99"} NaN +kubelet_pod_start_latency_microseconds_sum 2.884769e+06 +kubelet_pod_start_latency_microseconds_count 9 +# HELP kubelet_pod_worker_latency_microseconds Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync +# TYPE kubelet_pod_worker_latency_microseconds summary +kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.5"} NaN +kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.9"} NaN +kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.99"} NaN +kubelet_pod_worker_latency_microseconds_sum{operation_type="sync"} 412 +kubelet_pod_worker_latency_microseconds_count{operation_type="sync"} 1 +# HELP kubelet_pod_worker_start_latency_microseconds Latency in microseconds from seeing a pod to starting a worker. +# TYPE kubelet_pod_worker_start_latency_microseconds summary +kubelet_pod_worker_start_latency_microseconds{quantile="0.5"} NaN +kubelet_pod_worker_start_latency_microseconds{quantile="0.9"} NaN +kubelet_pod_worker_start_latency_microseconds{quantile="0.99"} NaN +kubelet_pod_worker_start_latency_microseconds_sum 2.85589e+06 +kubelet_pod_worker_start_latency_microseconds_count 9 +# HELP kubelet_running_container_count Number of containers currently running +# TYPE kubelet_running_container_count gauge +kubelet_running_container_count 9 +# HELP kubelet_running_pod_count Number of pods currently running +# TYPE kubelet_running_pod_count gauge +kubelet_running_pod_count 9 +# HELP kubelet_runtime_operations Cumulative number of runtime operations by operation type. +# TYPE kubelet_runtime_operations counter +kubelet_runtime_operations{operation_type="container_status"} 90 +kubelet_runtime_operations{operation_type="create_container"} 10 +kubelet_runtime_operations{operation_type="exec_sync"} 138 +kubelet_runtime_operations{operation_type="image_status"} 25 +kubelet_runtime_operations{operation_type="list_containers"} 2586 +kubelet_runtime_operations{operation_type="list_images"} 195 +kubelet_runtime_operations{operation_type="list_podsandbox"} 2562 +kubelet_runtime_operations{operation_type="podsandbox_status"} 77 +kubelet_runtime_operations{operation_type="remove_container"} 14 +kubelet_runtime_operations{operation_type="run_podsandbox"} 9 +kubelet_runtime_operations{operation_type="start_container"} 10 +kubelet_runtime_operations{operation_type="status"} 279 +kubelet_runtime_operations{operation_type="stop_podsandbox"} 14 +kubelet_runtime_operations{operation_type="version"} 190 +# HELP kubelet_runtime_operations_errors Cumulative number of runtime operation errors by operation type. +# TYPE kubelet_runtime_operations_errors counter +kubelet_runtime_operations_errors{operation_type="container_status"} 14 +kubelet_runtime_operations_errors{operation_type="remove_container"} 4 +# HELP kubelet_runtime_operations_latency_microseconds Latency in microseconds of runtime operations. Broken down by operation type. +# TYPE kubelet_runtime_operations_latency_microseconds summary +kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="container_status"} 5.830434e+06 +kubelet_runtime_operations_latency_microseconds_count{operation_type="container_status"} 90 +kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="create_container"} 6.237513e+06 +kubelet_runtime_operations_latency_microseconds_count{operation_type="create_container"} 10 +kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.5"} 77674 +kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.9"} 84801 +kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.99"} 91057 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="exec_sync"} 1.1581846e+07 +kubelet_runtime_operations_latency_microseconds_count{operation_type="exec_sync"} 138 +kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.5"} 1379 +kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.9"} 1379 +kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.99"} 1379 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="image_status"} 84242 +kubelet_runtime_operations_latency_microseconds_count{operation_type="image_status"} 25 +kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.5"} 2860 +kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.9"} 5131 +kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.99"} 15491 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_containers"} 8.583973e+06 +kubelet_runtime_operations_latency_microseconds_count{operation_type="list_containers"} 2586 +kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.5"} 4206 +kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.9"} 6102 +kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.99"} 7592 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_images"} 915822 +kubelet_runtime_operations_latency_microseconds_count{operation_type="list_images"} 195 +kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.5"} 6645 +kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.9"} 11038 +kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.99"} 21220 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_podsandbox"} 1.7650737e+07 +kubelet_runtime_operations_latency_microseconds_count{operation_type="list_podsandbox"} 2562 +kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="podsandbox_status"} 673056 +kubelet_runtime_operations_latency_microseconds_count{operation_type="podsandbox_status"} 77 +kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="remove_container"} 1.781569e+06 +kubelet_runtime_operations_latency_microseconds_count{operation_type="remove_container"} 14 +kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="run_podsandbox"} 9.284403e+06 +kubelet_runtime_operations_latency_microseconds_count{operation_type="run_podsandbox"} 9 +kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="start_container"} 1.1913088e+07 +kubelet_runtime_operations_latency_microseconds_count{operation_type="start_container"} 10 +kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.5"} 1555 +kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.9"} 2438 +kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.99"} 4376 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="status"} 496865 +kubelet_runtime_operations_latency_microseconds_count{operation_type="status"} 279 +kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.5"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.9"} NaN +kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.99"} NaN +kubelet_runtime_operations_latency_microseconds_sum{operation_type="stop_podsandbox"} 41502 +kubelet_runtime_operations_latency_microseconds_count{operation_type="stop_podsandbox"} 14 +kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.5"} 933 +kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.9"} 1515 +kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.99"} 1674 +kubelet_runtime_operations_latency_microseconds_sum{operation_type="version"} 216328 +kubelet_runtime_operations_latency_microseconds_count{operation_type="version"} 190 +# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. +# TYPE kubernetes_build_info gauge +kubernetes_build_info{buildDate="2019-02-28T13:35:32Z",compiler="gc",gitCommit="c27b913fddd1a6c480c229191a087698aa92f0b1",gitTreeState="clean",gitVersion="v1.13.4",goVersion="go1.11.5",major="1",minor="13",platform="linux/amd64"} 1 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 44.55 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 33 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 9.2401664e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.55293758654e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.379233792e+09 +# HELP rest_client_request_latency_seconds Request latency in seconds. Broken down by verb and URL. +# TYPE rest_client_request_latency_seconds histogram +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.001"} 44 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.002"} 124 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.004"} 181 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.008"} 183 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.016"} 190 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.032"} 195 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.064"} 195 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.128"} 199 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.256"} 199 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.512"} 199 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="+Inf"} 202 +rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="GET"} 24.538311267 +rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="GET"} 202 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.001"} 0 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.002"} 0 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.004"} 23 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.008"} 160 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.016"} 172 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.032"} 175 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.064"} 176 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.128"} 177 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.256"} 177 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.512"} 177 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="+Inf"} 177 +rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH"} 1.1527289999999994 +rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH"} 177 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.001"} 8 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.002"} 10 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.004"} 17 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.008"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.016"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.032"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.064"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.128"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.256"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.512"} 49 +rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="+Inf"} 52 +rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="POST"} 17.43416557 +rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="POST"} 52 +# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="localhost:8443",method="GET"} 191 +rest_client_requests_total{code="200",host="localhost:8443",method="PATCH"} 177 +rest_client_requests_total{code="201",host="localhost:8443",method="POST"} 43 +rest_client_requests_total{code="403",host="localhost:8443",method="GET"} 2 +rest_client_requests_total{code="409",host="localhost:8443",method="POST"} 1 +rest_client_requests_total{code="<error>",host="localhost:8443",method="GET"} 37 +rest_client_requests_total{code="<error>",host="localhost:8443",method="POST"} 8 +# HELP storage_operation_duration_seconds Storage operation duration +# TYPE storage_operation_duration_seconds histogram +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.1"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.25"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.5"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="1"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="2.5"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="5"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="10"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="15"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="25"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="50"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="+Inf"} 3 +storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap"} 0.00147889 +storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap"} 3 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.1"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.25"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.5"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="1"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="2.5"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="5"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="10"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="15"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="25"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="50"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="+Inf"} 15 +storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path"} 0.002347783 +storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path"} 15 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.1"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.25"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.5"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="1"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="2.5"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="5"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="10"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="15"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="25"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="50"} 4 +storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="+Inf"} 4 +storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret"} 0.001769817 +storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret"} 4 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.1"} 59 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.25"} 60 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.5"} 60 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="1"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="2.5"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="5"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="10"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="15"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="25"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="50"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="+Inf"} 62 +storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap"} 2.039342002999999 +storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap"} 62 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.1"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.25"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.5"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="1"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="2.5"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="5"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="10"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="15"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="25"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="50"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="+Inf"} 15 +storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path"} 0.006827130000000001 +storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path"} 15 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.1"} 83 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.25"} 83 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.5"} 83 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="1"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="2.5"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="5"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="10"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="15"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="25"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="50"} 85 +storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="+Inf"} 85 +storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/secret"} 1.9201849530000006 +storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/secret"} 85 +# HELP volume_manager_total_volumes Number of volumes in Volume Manager +# TYPE volume_manager_total_volumes gauge +volume_manager_total_volumes{plugin_name="kubernetes.io/configmap",state="actual_state_of_world"} 3 +volume_manager_total_volumes{plugin_name="kubernetes.io/configmap",state="desired_state_of_world"} 3 +volume_manager_total_volumes{plugin_name="kubernetes.io/host-path",state="actual_state_of_world"} 15 +volume_manager_total_volumes{plugin_name="kubernetes.io/host-path",state="desired_state_of_world"} 15 +volume_manager_total_volumes{plugin_name="kubernetes.io/secret",state="actual_state_of_world"} 4 +volume_manager_total_volumes{plugin_name="kubernetes.io/secret",state="desired_state_of_world"} 4 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/token.txt b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/token.txt new file mode 100644 index 00000000000000..e769c538e1df2b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubelet/testdata/token.txt @@ -0,0 +1 @@ +8zU5Emm58tPGShVkwTK3ZLn0d4I \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/README.md b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/README.md new file mode 120000 index 00000000000000..020405250aa36a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/README.md @@ -0,0 +1 @@ +integrations/kubeproxy.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/charts.go b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/charts.go new file mode 100644 index 00000000000000..47cbcbecd2c69b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/charts.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims + // Dim is an alias for module.Dim + Dim = module.Dim +) + +var charts = Charts{ + { + ID: "kubeproxy_sync_proxy_rules", + Title: "Sync Proxy Rules", + Units: "events/s", + Fam: "sync proxy rules", + Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules", + Dims: Dims{ + {ID: "sync_proxy_rules_count", Name: "sync proxy rules", Algo: module.Incremental}, + }, + }, + { + ID: "kubeproxy_sync_proxy_rules_latency", + Title: "Sync Proxy Rules Latency", + Units: "observes/s", + Fam: "sync proxy rules", + Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microseconds", + Type: module.Stacked, + Dims: Dims{ + {ID: "sync_proxy_rules_bucket_1000", Name: "0.001 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_2000", Name: "0.002 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_4000", Name: "0.004 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_8000", Name: "0.008 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_16000", Name: "0.016 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_32000", Name: "0.032 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_64000", Name: "0.064 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_128000", Name: "0.128 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_256000", Name: "0.256 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_512000", Name: "0.512 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_1024000", Name: "1.024 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_2048000", Name: "2.048 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_4096000", Name: "4.096 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_8192000", Name: "8.192 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_16384000", Name: "16.384 sec", Algo: module.Incremental}, + {ID: "sync_proxy_rules_bucket_+Inf", Name: "+Inf", Algo: module.Incremental}, + }, + }, + { + ID: "kubeproxy_sync_proxy_rules_latency_percentage", + Title: "Sync Proxy Rules Latency Percentage", + Units: "%", + Fam: "sync proxy rules", + Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency", + Type: module.Stacked, + Dims: Dims{ + {ID: "sync_proxy_rules_bucket_1000", Name: "0.001 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_2000", Name: "0.002 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_4000", Name: "0.004 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_8000", Name: "0.008 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_16000", Name: "0.016 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_32000", Name: "0.032 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_64000", Name: "0.064 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_128000", Name: "0.128 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_256000", Name: "0.256 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_512000", Name: "0.512 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_1024000", Name: "1.024 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_2048000", Name: "2.048 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_4096000", Name: "4.096 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_8192000", Name: "8.192 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_16384000", Name: "16.384 sec", Algo: module.PercentOfIncremental}, + {ID: "sync_proxy_rules_bucket_+Inf", Name: "+Inf", Algo: module.PercentOfIncremental}, + }, + }, + { + ID: "rest_client_requests_by_code", + Title: "HTTP Requests By Status Code", + Units: "requests/s", + Fam: "rest client", + Ctx: "k8s_kubeproxy.rest_client_requests_by_code", + Type: module.Stacked, + }, + { + ID: "rest_client_requests_by_method", + Title: "HTTP Requests By Status Method", + Units: "requests/s", + Fam: "rest client", + Ctx: "k8s_kubeproxy.rest_client_requests_by_method", + Type: module.Stacked, + }, + { + ID: "http_request_duration", + Title: "HTTP Requests Duration", + Units: "microseconds", + Fam: "http", + Ctx: "k8s_kubeproxy.http_request_duration", + Type: module.Stacked, + Dims: Dims{ + {ID: "http_request_duration_05", Name: "0.5"}, + {ID: "http_request_duration_09", Name: "0.9"}, + {ID: "http_request_duration_099", Name: "0.99"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/collect.go b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/collect.go new file mode 100644 index 00000000000000..14f838ff05fb1e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/collect.go @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + "math" + + mtx "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (kp *KubeProxy) collect() (map[string]int64, error) { + raw, err := kp.prom.ScrapeSeries() + + if err != nil { + return nil, err + } + + mx := newMetrics() + + kp.collectSyncProxyRules(raw, mx) + kp.collectRESTClientHTTPRequests(raw, mx) + kp.collectHTTPRequestDuration(raw, mx) + + return stm.ToMap(mx), nil +} + +func (kp *KubeProxy) collectSyncProxyRules(raw prometheus.Series, mx *metrics) { + m := raw.FindByName("kubeproxy_sync_proxy_rules_latency_microseconds_count") + mx.SyncProxyRules.Count.Set(m.Max()) + kp.collectSyncProxyRulesLatency(raw, mx) +} + +func (kp *KubeProxy) collectSyncProxyRulesLatency(raw prometheus.Series, mx *metrics) { + metricName := "kubeproxy_sync_proxy_rules_latency_microseconds_bucket" + latency := &mx.SyncProxyRules.Latency + + for _, metric := range raw.FindByName(metricName) { + bucket := metric.Labels.Get("le") + value := metric.Value + switch bucket { + case "1000": + latency.LE1000.Set(value) + case "2000": + latency.LE2000.Set(value) + case "4000": + latency.LE4000.Set(value) + case "8000": + latency.LE8000.Set(value) + case "16000": + latency.LE16000.Set(value) + case "32000": + latency.LE32000.Set(value) + case "64000": + latency.LE64000.Set(value) + case "128000": + latency.LE128000.Set(value) + case "256000": + latency.LE256000.Set(value) + case "512000": + latency.LE512000.Set(value) + case "1.024e+06": + latency.LE1024000.Set(value) + case "2.048e+06": + latency.LE2048000.Set(value) + case "4.096e+06": + latency.LE4096000.Set(value) + case "8.192e+06": + latency.LE8192000.Set(value) + case "1.6384e+07": + latency.LE16384000.Set(value) + case "+Inf": + latency.Inf.Set(value) + } + } + + latency.Inf.Sub(latency.LE16384000.Value()) + latency.LE16384000.Sub(latency.LE8192000.Value()) + latency.LE8192000.Sub(latency.LE4096000.Value()) + latency.LE4096000.Sub(latency.LE2048000.Value()) + latency.LE2048000.Sub(latency.LE1024000.Value()) + latency.LE1024000.Sub(latency.LE512000.Value()) + latency.LE512000.Sub(latency.LE256000.Value()) + latency.LE256000.Sub(latency.LE128000.Value()) + latency.LE128000.Sub(latency.LE64000.Value()) + latency.LE64000.Sub(latency.LE32000.Value()) + latency.LE32000.Sub(latency.LE16000.Value()) + latency.LE16000.Sub(latency.LE8000.Value()) + latency.LE8000.Sub(latency.LE4000.Value()) + latency.LE4000.Sub(latency.LE2000.Value()) + latency.LE2000.Sub(latency.LE1000.Value()) +} + +func (kp *KubeProxy) collectRESTClientHTTPRequests(raw prometheus.Series, mx *metrics) { + metricName := "rest_client_requests_total" + chart := kp.charts.Get("rest_client_requests_by_code") + + for _, metric := range raw.FindByName(metricName) { + code := metric.Labels.Get("code") + if code == "" { + continue + } + dimID := "rest_client_requests_" + code + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: code, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.RESTClient.Requests.ByStatusCode[code] = mtx.Gauge(metric.Value) + } + + chart = kp.charts.Get("rest_client_requests_by_method") + + for _, metric := range raw.FindByName(metricName) { + method := metric.Labels.Get("method") + if method == "" { + continue + } + dimID := "rest_client_requests_" + method + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: method, Algo: module.Incremental}) + chart.MarkNotCreated() + } + mx.RESTClient.Requests.ByMethod[method] = mtx.Gauge(metric.Value) + } +} + +func (kp *KubeProxy) collectHTTPRequestDuration(raw prometheus.Series, mx *metrics) { + // Summary + for _, metric := range raw.FindByName("http_request_duration_microseconds") { + if math.IsNaN(metric.Value) { + continue + } + quantile := metric.Labels.Get("quantile") + switch quantile { + case "0.5": + mx.HTTP.Request.Duration.Quantile05.Set(metric.Value) + case "0.9": + mx.HTTP.Request.Duration.Quantile09.Set(metric.Value) + case "0.99": + mx.HTTP.Request.Duration.Quantile099.Set(metric.Value) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/config_schema.json b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/config_schema.json new file mode 100644 index 00000000000000..c26231397c2d83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_kubeproxy job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/integrations/kubeproxy.md b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/integrations/kubeproxy.md new file mode 100644 index 00000000000000..c90013f97f7f05 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/integrations/kubeproxy.md @@ -0,0 +1,186 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_kubeproxy/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_kubeproxy/metadata.yaml" +sidebar_label: "Kubeproxy" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kubeproxy + + +<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/> + + +Plugin: go.d.plugin +Module: k8s_kubeproxy + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Kubeproxy instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Kubeproxy instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s | +| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s | +| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage | +| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s | +| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s | +| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/k8s_kubeproxy.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/k8s_kubeproxy.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:10249/metrics | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:10249/metrics + +``` +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:10249/metrics + tls_skip_verify: yes + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m k8s_kubeproxy + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy.go b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy.go new file mode 100644 index 00000000000000..a681619c441ac4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + defaultURL = "http://127.0.0.1:10249/metrics" + defaultHTTPTimeout = time.Second * 2 +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("k8s_kubeproxy", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + // NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 + Priority: 50000, + }, + Create: func() module.Module { return New() }, + }) +} + +// New creates KubeProxy with default values. +func New() *KubeProxy { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &KubeProxy{ + Config: config, + charts: charts.Copy(), + } +} + +// Config is the KubeProxy module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// KubeProxy is KubeProxy module. +type KubeProxy struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *Charts +} + +// Cleanup makes cleanup. +func (KubeProxy) Cleanup() {} + +// Init makes initialization. +func (kp *KubeProxy) Init() bool { + if kp.URL == "" { + kp.Error("URL not set") + return false + } + + client, err := web.NewHTTPClient(kp.Client) + if err != nil { + kp.Errorf("error on creating http client : %v", err) + return false + } + + kp.prom = prometheus.New(client, kp.Request) + + return true +} + +// Check makes check. +func (kp *KubeProxy) Check() bool { + return len(kp.Collect()) > 0 +} + +// Charts creates Charts. +func (kp KubeProxy) Charts() *Charts { + return kp.charts +} + +// Collect collects metrics. +func (kp *KubeProxy) Collect() map[string]int64 { + mx, err := kp.collect() + + if err != nil { + kp.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy_test.go b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy_test.go new file mode 100644 index 00000000000000..4c1831a998fc39 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/kubeproxy_test.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testMetrics, _ = os.ReadFile("testdata/metrics.txt") + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*KubeProxy)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestKubeProxy_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestKubeProxy_Cleanup(t *testing.T) { New().Cleanup() } + +func TestKubeProxy_Init(t *testing.T) { assert.True(t, New().Init()) } + +func TestKubeProxy_InitNG(t *testing.T) { + job := New() + job.URL = "" + assert.False(t, job.Init()) +} + +func TestKubeProxy_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testMetrics) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestKubeProxy_CheckNG(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestKubeProxy_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testMetrics) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "sync_proxy_rules_count": 2669, + "sync_proxy_rules_bucket_1000": 1, + "sync_proxy_rules_bucket_2000": 0, + "sync_proxy_rules_bucket_4000": 0, + "sync_proxy_rules_bucket_8000": 0, + "sync_proxy_rules_bucket_16000": 23, + "sync_proxy_rules_bucket_32000": 2510, + "sync_proxy_rules_bucket_64000": 126, + "sync_proxy_rules_bucket_128000": 8, + "sync_proxy_rules_bucket_256000": 0, + "sync_proxy_rules_bucket_512000": 1, + "sync_proxy_rules_bucket_1024000": 0, + "sync_proxy_rules_bucket_4096000": 0, + "sync_proxy_rules_bucket_8192000": 0, + "sync_proxy_rules_bucket_2048000": 0, + "sync_proxy_rules_bucket_16384000": 0, + "sync_proxy_rules_bucket_+Inf": 0, + "rest_client_requests_201": 1, + "rest_client_requests_200": 362, + "rest_client_requests_GET": 362, + "rest_client_requests_POST": 1, + "http_request_duration_05": 1515, + "http_request_duration_09": 3939, + "http_request_duration_099": 9464, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestKubeProxy_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestKubeProxy_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/metrics" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metadata.yaml b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metadata.yaml new file mode 100644 index 00000000000000..0f8d0d72ab5833 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metadata.yaml @@ -0,0 +1,227 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-k8s_kubeproxy + plugin_name: go.d.plugin + module_name: k8s_kubeproxy + monitored_instance: + name: Kubeproxy + link: https://kubernetes.io/docs/concepts/overview/components/#kube-proxy + icon_filename: kubernetes.svg + categories: + - data-collection.kubernetes + keywords: + - kubeproxy + - kubernetes + - k8s + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Kubeproxy instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/k8s_kubeproxy.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:10249/metrics + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:10249/metrics + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:10249/metrics + tls_skip_verify: yes + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules + description: Sync Proxy Rules + unit: events/s + chart_type: line + dimensions: + - name: sync_proxy_rules + - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond + description: Sync Proxy Rules Latency + unit: observes/s + chart_type: stacked + dimensions: + - name: "0.001" + - name: "0.002" + - name: "0.004" + - name: "0.008" + - name: "0.016" + - name: "0.032" + - name: "0.064" + - name: "0.128" + - name: "0.256" + - name: "0.512" + - name: "1.024" + - name: "2.048" + - name: "4.096" + - name: "8.192" + - name: "16.384" + - name: +Inf + - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency + description: Sync Proxy Rules Latency Percentage + unit: percentage + chart_type: stacked + dimensions: + - name: "0.001" + - name: "0.002" + - name: "0.004" + - name: "0.008" + - name: "0.016" + - name: "0.032" + - name: "0.064" + - name: "0.128" + - name: "0.256" + - name: "0.512" + - name: "1.024" + - name: "2.048" + - name: "4.096" + - name: "8.192" + - name: "16.384" + - name: +Inf + - name: k8s_kubeproxy.rest_client_requests_by_code + description: HTTP Requests By Status Code + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP status code + - name: k8s_kubeproxy.rest_client_requests_by_method + description: HTTP Requests By Status Method + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP method + - name: k8s_kubeproxy.http_request_duration + description: HTTP Requests Duration + unit: microseconds + chart_type: stacked + dimensions: + - name: "0.5" + - name: "0.9" + - name: "0.99" diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metrics.go b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metrics.go new file mode 100644 index 00000000000000..500ebf1a462640 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/metrics.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + mtx "github.com/netdata/go.d.plugin/pkg/metrics" +) + +func newMetrics() *metrics { + var mx metrics + mx.RESTClient.Requests.ByStatusCode = make(map[string]mtx.Gauge) + mx.RESTClient.Requests.ByMethod = make(map[string]mtx.Gauge) + + return &mx +} + +type metrics struct { + SyncProxyRules struct { + Count mtx.Gauge `stm:"count"` + Latency struct { + LE1000 mtx.Gauge `stm:"1000"` + LE2000 mtx.Gauge `stm:"2000"` + LE4000 mtx.Gauge `stm:"4000"` + LE8000 mtx.Gauge `stm:"8000"` + LE16000 mtx.Gauge `stm:"16000"` + LE32000 mtx.Gauge `stm:"32000"` + LE64000 mtx.Gauge `stm:"64000"` + LE128000 mtx.Gauge `stm:"128000"` + LE256000 mtx.Gauge `stm:"256000"` + LE512000 mtx.Gauge `stm:"512000"` + LE1024000 mtx.Gauge `stm:"1024000"` + LE2048000 mtx.Gauge `stm:"2048000"` + LE4096000 mtx.Gauge `stm:"4096000"` + LE8192000 mtx.Gauge `stm:"8192000"` + LE16384000 mtx.Gauge `stm:"16384000"` + Inf mtx.Gauge `stm:"+Inf"` + } `stm:"bucket"` + } `stm:"sync_proxy_rules"` + RESTClient struct { + Requests struct { + ByStatusCode map[string]mtx.Gauge `stm:""` + ByMethod map[string]mtx.Gauge `stm:""` + } `stm:"requests"` + } `stm:"rest_client"` + HTTP struct { + Request struct { + Duration struct { + Quantile05 mtx.Gauge `stm:"05"` + Quantile09 mtx.Gauge `stm:"09"` + Quantile099 mtx.Gauge `stm:"099"` + } `stm:"duration"` + } `stm:"request"` + } `stm:"http"` +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/testdata/metrics.txt b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/testdata/metrics.txt new file mode 100644 index 00000000000000..7a10d847743c1d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_kubeproxy/testdata/metrics.txt @@ -0,0 +1,190 @@ +# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 2.2652e-05 +go_gc_duration_seconds{quantile="0.25"} 5.9037e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000113147 +go_gc_duration_seconds{quantile="0.75"} 0.000232939 +go_gc_duration_seconds{quantile="1"} 0.009002756 +go_gc_duration_seconds_sum 0.294305823 +go_gc_duration_seconds_count 755 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 46 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 6.14748e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 9.53406048e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.535744e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 8.247964e+06 +# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. +# TYPE go_memstats_gc_cpu_fraction gauge +go_memstats_gc_cpu_fraction 7.826953112615371e-06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 2.387968e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 6.14748e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 5.8466304e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 7.82336e+06 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 29543 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 6.6289664e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.5530903816542802e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 8.277507e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 3456 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 89832 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 114688 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 7.132208e+06 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 596472 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 819200 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 819200 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 7.176012e+07 +# HELP go_threads Number of OS threads created +# TYPE go_threads gauge +go_threads 10 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1515.864 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 3939.871 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 9464.15 +http_request_duration_microseconds_sum{handler="prometheus"} 837819.5429999996 +http_request_duration_microseconds_count{handler="prometheus"} 378 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="prometheus",quantile="0.5"} 377 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 377 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 377 +http_request_size_bytes_sum{handler="prometheus"} 142462 +http_request_size_bytes_count{handler="prometheus"} 378 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="prometheus",method="get"} 378 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="prometheus",quantile="0.5"} 2414 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 2419 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 2423 +http_response_size_bytes_sum{handler="prometheus"} 911969 +http_response_size_bytes_count{handler="prometheus"} 378 +# HELP kubeproxy_sync_proxy_rules_latency_microseconds SyncProxyRules latency +# TYPE kubeproxy_sync_proxy_rules_latency_microseconds histogram +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1000"} 1 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="2000"} 1 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="4000"} 1 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="8000"} 1 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="16000"} 24 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="32000"} 2534 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="64000"} 2660 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="128000"} 2668 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="256000"} 2668 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="512000"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1.024e+06"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="2.048e+06"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="4.096e+06"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="8.192e+06"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1.6384e+07"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="+Inf"} 2669 +kubeproxy_sync_proxy_rules_latency_microseconds_sum 6.2885705e+07 +kubeproxy_sync_proxy_rules_latency_microseconds_count 2669 +# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. +# TYPE kubernetes_build_info gauge +kubernetes_build_info{buildDate="2019-02-28T13:35:32Z",compiler="gc",gitCommit="c27b913fddd1a6c480c229191a087698aa92f0b1",gitTreeState="clean",gitVersion="v1.13.4",goVersion="go1.11.5",major="1",minor="13",platform="linux/amd64"} 1 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 156.15 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 11 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 3.5467264e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.5530103809e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.4047232e+08 +# HELP rest_client_request_latency_seconds Request latency in seconds. Broken down by verb and URL. +# TYPE rest_client_request_latency_seconds histogram +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.001"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.002"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.004"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.008"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.016"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.032"} 2 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.064"} 2 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.128"} 2 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.256"} 3 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.512"} 3 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="+Inf"} 3 +rest_client_request_latency_seconds_sum{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET"} 0.28126861 +rest_client_request_latency_seconds_count{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET"} 3 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.001"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.002"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.004"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.008"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.016"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.032"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.064"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.128"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.256"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.512"} 0 +rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="+Inf"} 1 +rest_client_request_latency_seconds_sum{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST"} 4.008446017 +rest_client_request_latency_seconds_count{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST"} 1 +# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="192.168.99.124:8443",method="GET"} 362 +rest_client_requests_total{code="201",host="192.168.99.124:8443",method="POST"} 1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/README.md b/src/go/collectors/go.d.plugin/modules/k8s_state/README.md new file mode 120000 index 00000000000000..72c4e5cab31c9b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/README.md @@ -0,0 +1 @@ +integrations/kubernetes_cluster_state.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/charts.go b/src/go/collectors/go.d.plugin/modules/k8s_state/charts.go new file mode 100644 index 00000000000000..5380a7bcf0bebd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/charts.go @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "fmt" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +// NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000 +const prioDiscoveryDiscovererState = 50999 + +const ( + prioNodeAllocatableCPURequestsUtil = 50100 + iota + prioNodeAllocatableCPURequestsUsed + prioNodeAllocatableCPULimitsUtil + prioNodeAllocatableCPULimitsUsed + prioNodeAllocatableMemRequestsUtil + prioNodeAllocatableMemRequestsUsed + prioNodeAllocatableMemLimitsUtil + prioNodeAllocatableMemLimitsUsed + prioNodeAllocatablePodsUtil + prioNodeAllocatablePodsUsage + prioNodeConditions + prioNodeSchedulability + prioNodePodsReadiness + prioNodePodsReadinessState + prioNodePodsCondition + prioNodePodsPhase + prioNodeContainersCount + prioNodeContainersState + prioNodeInitContainersState + prioNodeAge +) + +const ( + prioPodCPURequestsUsed = 50300 + iota + prioPodCPULimitsUsed + prioPodMemRequestsUsed + prioPodMemLimitsUsed + prioPodCondition + prioPodPhase + prioPodAge + prioPodContainersCount + prioPodContainersState + prioPodInitContainersState + prioPodContainerReadinessState + prioPodContainerRestarts + prioPodContainerState + prioPodContainerWaitingStateReason + prioPodContainerTerminatedStateReason +) + +const ( + labelKeyPrefix = "k8s_" + //labelKeyLabelPrefix = labelKeyPrefix + "label_" + //labelKeyAnnotationPrefix = labelKeyPrefix + "annotation_" + labelKeyClusterID = labelKeyPrefix + "cluster_id" + labelKeyClusterName = labelKeyPrefix + "cluster_name" + labelKeyNamespace = labelKeyPrefix + "namespace" + labelKeyKind = labelKeyPrefix + "kind" + labelKeyPodName = labelKeyPrefix + "pod_name" + labelKeyNodeName = labelKeyPrefix + "node_name" + labelKeyPodUID = labelKeyPrefix + "pod_uid" + labelKeyControllerKind = labelKeyPrefix + "controller_kind" + labelKeyControllerName = labelKeyPrefix + "controller_name" + labelKeyContainerName = labelKeyPrefix + "container_name" + labelKeyContainerID = labelKeyPrefix + "container_id" + labelKeyQoSClass = labelKeyPrefix + "qos_class" +) + +var baseCharts = module.Charts{ + discoveryStatusChart.Copy(), +} + +var nodeChartsTmpl = module.Charts{ + nodeAllocatableCPURequestsUtilChartTmpl.Copy(), + nodeAllocatableCPURequestsUsedChartTmpl.Copy(), + nodeAllocatableCPULimitsUtilChartTmpl.Copy(), + nodeAllocatableCPULimitsUsedChartTmpl.Copy(), + nodeAllocatableMemRequestsUtilChartTmpl.Copy(), + nodeAllocatableMemRequestsUsedChartTmpl.Copy(), + nodeAllocatableMemLimitsUtilChartTmpl.Copy(), + nodeAllocatableMemLimitsUsedChartTmpl.Copy(), + nodeAllocatablePodsUtilizationChartTmpl.Copy(), + nodeAllocatablePodsUsageChartTmpl.Copy(), + nodeConditionsChartTmpl.Copy(), + nodeSchedulabilityChartTmpl.Copy(), + nodePodsReadinessChartTmpl.Copy(), + nodePodsReadinessStateChartTmpl.Copy(), + nodePodsConditionChartTmpl.Copy(), + nodePodsPhaseChartTmpl.Copy(), + nodeContainersChartTmpl.Copy(), + nodeContainersStateChartTmpl.Copy(), + nodeInitContainersStateChartTmpl.Copy(), + nodeAgeChartTmpl.Copy(), +} + +var podChartsTmpl = module.Charts{ + podCPURequestsUsedChartTmpl.Copy(), + podCPULimitsUsedChartTmpl.Copy(), + podMemRequestsUsedChartTmpl.Copy(), + podMemLimitsUsedChartTmpl.Copy(), + podConditionChartTmpl.Copy(), + podPhaseChartTmpl.Copy(), + podAgeChartTmpl.Copy(), + podContainersCountChartTmpl.Copy(), + podContainersStateChartTmpl.Copy(), + podInitContainersStateChartTmpl.Copy(), +} + +var containerChartsTmpl = module.Charts{ + containerReadinessStateChartTmpl.Copy(), + containerRestartsChartTmpl.Copy(), + containersStateChartTmpl.Copy(), + containersStateWaitingChartTmpl.Copy(), + containersStateTerminatedChartTmpl.Copy(), +} + +var ( + // CPU resource + nodeAllocatableCPURequestsUtilChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_cpu_requests_utilization", + Title: "CPU requests utilization", + Units: "%", + Fam: "node cpu resource", + Ctx: "k8s_state.node_allocatable_cpu_requests_utilization", + Priority: prioNodeAllocatableCPURequestsUtil, + Dims: module.Dims{ + {ID: "node_%s_alloc_cpu_requests_util", Name: "requests", Div: precision}, + }, + } + nodeAllocatableCPURequestsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_cpu_requests_used", + Title: "CPU requests used", + Units: "millicpu", + Fam: "node cpu resource", + Ctx: "k8s_state.node_allocatable_cpu_requests_used", + Priority: prioNodeAllocatableCPURequestsUsed, + Dims: module.Dims{ + {ID: "node_%s_alloc_cpu_requests_used", Name: "requests"}, + }, + } + nodeAllocatableCPULimitsUtilChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_cpu_limits_utilization", + Title: "CPU limits utilization", + Units: "%", + Fam: "node cpu resource", + Ctx: "k8s_state.node_allocatable_cpu_limits_utilization", + Priority: prioNodeAllocatableCPULimitsUtil, + Dims: module.Dims{ + {ID: "node_%s_alloc_cpu_limits_util", Name: "limits", Div: precision}, + }, + } + nodeAllocatableCPULimitsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_cpu_limits_used", + Title: "CPU limits used", + Units: "millicpu", + Fam: "node cpu resource", + Ctx: "k8s_state.node_allocatable_cpu_limits_used", + Priority: prioNodeAllocatableCPULimitsUsed, + Dims: module.Dims{ + {ID: "node_%s_alloc_cpu_limits_used", Name: "limits"}, + }, + } + // memory resource + nodeAllocatableMemRequestsUtilChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_mem_requests_utilization", + Title: "Memory requests utilization", + Units: "%", + Fam: "node mem resource", + Ctx: "k8s_state.node_allocatable_mem_requests_utilization", + Priority: prioNodeAllocatableMemRequestsUtil, + Dims: module.Dims{ + {ID: "node_%s_alloc_mem_requests_util", Name: "requests", Div: precision}, + }, + } + nodeAllocatableMemRequestsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_mem_requests_used", + Title: "Memory requests used", + Units: "bytes", + Fam: "node mem resource", + Ctx: "k8s_state.node_allocatable_mem_requests_used", + Priority: prioNodeAllocatableMemRequestsUsed, + Dims: module.Dims{ + {ID: "node_%s_alloc_mem_requests_used", Name: "requests"}, + }, + } + nodeAllocatableMemLimitsUtilChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_mem_limits_utilization", + Title: "Memory limits utilization", + Units: "%", + Fam: "node mem resource", + Ctx: "k8s_state.node_allocatable_mem_limits_utilization", + Priority: prioNodeAllocatableMemLimitsUtil, + Dims: module.Dims{ + {ID: "node_%s_alloc_mem_limits_util", Name: "limits", Div: precision}, + }, + } + nodeAllocatableMemLimitsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_mem_limits_used", + Title: "Memory limits used", + Units: "bytes", + Fam: "node mem resource", + Ctx: "k8s_state.node_allocatable_mem_limits_used", + Priority: prioNodeAllocatableMemLimitsUsed, + Dims: module.Dims{ + {ID: "node_%s_alloc_mem_limits_used", Name: "limits"}, + }, + } + // pods resource + nodeAllocatablePodsUtilizationChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocatable_pods_utilization", + Title: "Pods resource utilization", + Units: "%", + Fam: "node pods resource", + Ctx: "k8s_state.node_allocatable_pods_utilization", + Priority: prioNodeAllocatablePodsUtil, + Dims: module.Dims{ + {ID: "node_%s_alloc_pods_util", Name: "allocated", Div: precision}, + }, + } + nodeAllocatablePodsUsageChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.allocated_pods_usage", + Title: "Pods resource usage", + Units: "pods", + Fam: "node pods resource", + Ctx: "k8s_state.node_allocatable_pods_usage", + Type: module.Stacked, + Priority: prioNodeAllocatablePodsUsage, + Dims: module.Dims{ + {ID: "node_%s_alloc_pods_available", Name: "available"}, + {ID: "node_%s_alloc_pods_allocated", Name: "allocated"}, + }, + } + // condition + nodeConditionsChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.condition_status", + Title: "Condition status", + Units: "status", + Fam: "node condition", + Ctx: "k8s_state.node_condition", + Priority: prioNodeConditions, + } + nodeSchedulabilityChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.schedulability", + Title: "Schedulability", + Units: "state", + Fam: "node schedulability", + Ctx: "k8s_state.node_schedulability", + Priority: prioNodeSchedulability, + Dims: module.Dims{ + {ID: "node_%s_schedulability_schedulable", Name: "schedulable"}, + {ID: "node_%s_schedulability_unschedulable", Name: "unschedulable"}, + }, + } + // pods readiness + nodePodsReadinessChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.pods_readiness", + Title: "Pods readiness", + Units: "%", + Fam: "node pods readiness", + Ctx: "k8s_state.node_pods_readiness", + Priority: prioNodePodsReadiness, + Dims: module.Dims{ + {ID: "node_%s_pods_readiness", Name: "ready", Div: precision}, + }, + } + nodePodsReadinessStateChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.pods_readiness_state", + Title: "Pods readiness state", + Units: "pods", + Fam: "node pods readiness", + Ctx: "k8s_state.node_pods_readiness_state", + Type: module.Stacked, + Priority: prioNodePodsReadinessState, + Dims: module.Dims{ + {ID: "node_%s_pods_readiness_ready", Name: "ready"}, + {ID: "node_%s_pods_readiness_unready", Name: "unready"}, + }, + } + // pods condition + nodePodsConditionChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.pods_condition", + Title: "Pods condition", + Units: "pods", + Fam: "node pods condition", + Ctx: "k8s_state.node_pods_condition", + Priority: prioNodePodsCondition, + Dims: module.Dims{ + {ID: "node_%s_pods_cond_podready", Name: "pod_ready"}, + {ID: "node_%s_pods_cond_podscheduled", Name: "pod_scheduled"}, + {ID: "node_%s_pods_cond_podinitialized", Name: "pod_initialized"}, + {ID: "node_%s_pods_cond_containersready", Name: "containers_ready"}, + }, + } + // pods phase + nodePodsPhaseChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.pods_phase", + Title: "Pods phase", + Units: "pods", + Fam: "node pods phase", + Ctx: "k8s_state.node_pods_phase", + Type: module.Stacked, + Priority: prioNodePodsPhase, + Dims: module.Dims{ + {ID: "node_%s_pods_phase_running", Name: "running"}, + {ID: "node_%s_pods_phase_failed", Name: "failed"}, + {ID: "node_%s_pods_phase_succeeded", Name: "succeeded"}, + {ID: "node_%s_pods_phase_pending", Name: "pending"}, + }, + } + // containers + nodeContainersChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.containers", + Title: "Containers", + Units: "containers", + Fam: "node containers", + Ctx: "k8s_state.node_containers", + Priority: prioNodeContainersCount, + Dims: module.Dims{ + {ID: "node_%s_containers", Name: "containers"}, + {ID: "node_%s_init_containers", Name: "init_containers"}, + }, + } + nodeContainersStateChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.containers_state", + Title: "Containers state", + Units: "containers", + Fam: "node containers", + Ctx: "k8s_state.node_containers_state", + Type: module.Stacked, + Priority: prioNodeContainersState, + Dims: module.Dims{ + {ID: "node_%s_containers_state_running", Name: "running"}, + {ID: "node_%s_containers_state_waiting", Name: "waiting"}, + {ID: "node_%s_containers_state_terminated", Name: "terminated"}, + }, + } + nodeInitContainersStateChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.init_containers_state", + Title: "Init containers state", + Units: "containers", + Fam: "node containers", + Ctx: "k8s_state.node_init_containers_state", + Type: module.Stacked, + Priority: prioNodeInitContainersState, + Dims: module.Dims{ + {ID: "node_%s_init_containers_state_running", Name: "running"}, + {ID: "node_%s_init_containers_state_waiting", Name: "waiting"}, + {ID: "node_%s_init_containers_state_terminated", Name: "terminated"}, + }, + } + // age + nodeAgeChartTmpl = module.Chart{ + IDSep: true, + ID: "node_%s.age", + Title: "Age", + Units: "seconds", + Fam: "node age", + Ctx: "k8s_state.node_age", + Priority: prioNodeAge, + Dims: module.Dims{ + {ID: "node_%s_age", Name: "age"}, + }, + } +) + +func (ks *KubeState) newNodeCharts(ns *nodeState) *module.Charts { + cs := nodeChartsTmpl.Copy() + for _, c := range *cs { + c.ID = fmt.Sprintf(c.ID, replaceDots(ns.id())) + c.Labels = ks.newNodeChartLabels(ns) + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, ns.id()) + } + } + return cs +} + +func (ks *KubeState) newNodeChartLabels(ns *nodeState) []module.Label { + labels := []module.Label{ + {Key: labelKeyNodeName, Value: ns.name, Source: module.LabelSourceK8s}, + {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s}, + {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s}, + } + return labels +} + +func (ks *KubeState) addNodeCharts(ns *nodeState) { + cs := ks.newNodeCharts(ns) + if err := ks.Charts().Add(*cs...); err != nil { + ks.Warning(err) + } +} + +func (ks *KubeState) removeNodeCharts(ns *nodeState) { + prefix := fmt.Sprintf("node_%s", replaceDots(ns.id())) + for _, c := range *ks.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +func (ks *KubeState) addNodeConditionToCharts(ns *nodeState, cond string) { + id := fmt.Sprintf(nodeConditionsChartTmpl.ID, replaceDots(ns.id())) + c := ks.Charts().Get(id) + if c == nil { + ks.Warningf("chart '%s' does not exist", id) + return + } + dim := &module.Dim{ + ID: fmt.Sprintf("node_%s_cond_%s", ns.id(), strings.ToLower(cond)), + Name: cond, + } + if err := c.AddDim(dim); err != nil { + ks.Warning(err) + return + } + c.MarkNotCreated() +} + +var ( + podCPURequestsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.cpu_requests_used", + Title: "CPU requests used", + Units: "millicpu", + Fam: "pod allocated cpu", + Ctx: "k8s_state.pod_cpu_requests_used", + Priority: prioPodCPURequestsUsed, + Dims: module.Dims{ + {ID: "pod_%s_cpu_requests_used", Name: "requests"}, + }, + } + podCPULimitsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.cpu_limits_used", + Title: "CPU limits used", + Units: "millicpu", + Fam: "pod allocated cpu", + Ctx: "k8s_state.pod_cpu_limits_used", + Priority: prioPodCPULimitsUsed, + Dims: module.Dims{ + {ID: "pod_%s_cpu_limits_used", Name: "limits"}, + }, + } + podMemRequestsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.mem_requests_used", + Title: "Memory requests used", + Units: "bytes", + Fam: "pod allocated mem", + Ctx: "k8s_state.pod_mem_requests_used", + Priority: prioPodMemRequestsUsed, + Dims: module.Dims{ + {ID: "pod_%s_mem_requests_used", Name: "requests"}, + }, + } + podMemLimitsUsedChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.mem_limits_used", + Title: "Memory limits used", + Units: "bytes", + Fam: "pod allocated mem", + Ctx: "k8s_state.pod_mem_limits_used", + Priority: prioPodMemLimitsUsed, + Dims: module.Dims{ + {ID: "pod_%s_mem_limits_used", Name: "limits"}, + }, + } + podConditionChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.condition", + Title: "Condition", + Units: "state", + Fam: "pod condition", + Ctx: "k8s_state.pod_condition", + Priority: prioPodCondition, + Dims: module.Dims{ + {ID: "pod_%s_cond_podready", Name: "pod_ready"}, + {ID: "pod_%s_cond_podscheduled", Name: "pod_scheduled"}, + {ID: "pod_%s_cond_podinitialized", Name: "pod_initialized"}, + {ID: "pod_%s_cond_containersready", Name: "containers_ready"}, + }, + } + podPhaseChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.phase", + Title: "Phase", + Units: "state", + Fam: "pod phase", + Ctx: "k8s_state.pod_phase", + Priority: prioPodPhase, + Dims: module.Dims{ + {ID: "pod_%s_phase_running", Name: "running"}, + {ID: "pod_%s_phase_failed", Name: "failed"}, + {ID: "pod_%s_phase_succeeded", Name: "succeeded"}, + {ID: "pod_%s_phase_pending", Name: "pending"}, + }, + } + podAgeChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.age", + Title: "Age", + Units: "seconds", + Fam: "pod age", + Ctx: "k8s_state.pod_age", + Priority: prioPodAge, + Dims: module.Dims{ + {ID: "pod_%s_age", Name: "age"}, + }, + } + podContainersCountChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.containers_count", + Title: "Containers", + Units: "containers", + Fam: "pod containers", + Ctx: "k8s_state.pod_containers", + Priority: prioPodContainersCount, + Dims: module.Dims{ + {ID: "pod_%s_containers", Name: "containers"}, + {ID: "pod_%s_init_containers", Name: "init_containers"}, + }, + } + podContainersStateChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.containers_state", + Title: "Containers state", + Units: "containers", + Fam: "pod containers", + Ctx: "k8s_state.pod_containers_state", + Type: module.Stacked, + Priority: prioPodContainersState, + Dims: module.Dims{ + {ID: "pod_%s_containers_state_running", Name: "running"}, + {ID: "pod_%s_containers_state_waiting", Name: "waiting"}, + {ID: "pod_%s_containers_state_terminated", Name: "terminated"}, + }, + } + podInitContainersStateChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s.init_containers_state", + Title: "Init containers state", + Units: "containers", + Fam: "pod containers", + Ctx: "k8s_state.pod_init_containers_state", + Type: module.Stacked, + Priority: prioPodInitContainersState, + Dims: module.Dims{ + {ID: "pod_%s_init_containers_state_running", Name: "running"}, + {ID: "pod_%s_init_containers_state_waiting", Name: "waiting"}, + {ID: "pod_%s_init_containers_state_terminated", Name: "terminated"}, + }, + } +) + +func (ks *KubeState) newPodCharts(ps *podState) *module.Charts { + charts := podChartsTmpl.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, replaceDots(ps.id())) + c.Labels = ks.newPodChartLabels(ps) + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, ps.id()) + } + } + return charts +} + +func (ks *KubeState) newPodChartLabels(ps *podState) []module.Label { + labels := []module.Label{ + {Key: labelKeyNamespace, Value: ps.namespace, Source: module.LabelSourceK8s}, + {Key: labelKeyPodName, Value: ps.name, Source: module.LabelSourceK8s}, + {Key: labelKeyNodeName, Value: ps.nodeName, Source: module.LabelSourceK8s}, + {Key: labelKeyQoSClass, Value: ps.qosClass, Source: module.LabelSourceK8s}, + {Key: labelKeyControllerKind, Value: ps.controllerKind, Source: module.LabelSourceK8s}, + {Key: labelKeyControllerName, Value: ps.controllerName, Source: module.LabelSourceK8s}, + {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s}, + {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s}, + } + return labels +} + +func (ks *KubeState) addPodCharts(ps *podState) { + charts := ks.newPodCharts(ps) + if err := ks.Charts().Add(*charts...); err != nil { + ks.Warning(err) + } +} + +func (ks *KubeState) updatePodChartsNodeLabel(ps *podState) { + prefix := fmt.Sprintf("pod_%s", replaceDots(ps.id())) + for _, c := range *ks.Charts() { + if strings.HasPrefix(c.ID, prefix) { + updateNodeLabel(c, ps.nodeName) + c.MarkNotCreated() + } + } +} + +func updateNodeLabel(c *module.Chart, nodeName string) { + for i, l := range c.Labels { + if l.Key == labelKeyNodeName { + c.Labels[i].Value = nodeName + break + } + } +} + +func (ks *KubeState) removePodCharts(ps *podState) { + prefix := fmt.Sprintf("pod_%s", replaceDots(ps.id())) + for _, c := range *ks.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +var ( + containerReadinessStateChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.readiness_state", + Title: "Readiness state", + Units: "state", + Fam: "container readiness", + Ctx: "k8s_state.pod_container_readiness_state", + Priority: prioPodContainerReadinessState, + Dims: module.Dims{ + {ID: "pod_%s_container_%s_readiness", Name: "ready"}, + }, + } + containerRestartsChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.restarts", + Title: "Restarts", + Units: "restarts", + Fam: "container restarts", + Ctx: "k8s_state.pod_container_restarts", + Priority: prioPodContainerRestarts, + Dims: module.Dims{ + {ID: "pod_%s_container_%s_restarts", Name: "restarts"}, + }, + } + containersStateChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.state", + Title: "Container state", + Units: "state", + Fam: "container state", + Ctx: "k8s_state.pod_container_state", + Priority: prioPodContainerState, + Dims: module.Dims{ + {ID: "pod_%s_container_%s_state_running", Name: "running"}, + {ID: "pod_%s_container_%s_state_waiting", Name: "waiting"}, + {ID: "pod_%s_container_%s_state_terminated", Name: "terminated"}, + }, + } + containersStateWaitingChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.state_waiting_reason", + Title: "Container waiting state reason", + Units: "state", + Fam: "container waiting reason", + Ctx: "k8s_state.pod_container_waiting_state_reason", + Priority: prioPodContainerWaitingStateReason, + } + containersStateTerminatedChartTmpl = module.Chart{ + IDSep: true, + ID: "pod_%s_container_%s.state_terminated_reason", + Title: "Container terminated state reason", + Units: "state", + Fam: "container terminated reason", + Ctx: "k8s_state.pod_container_terminated_state_reason", + Priority: prioPodContainerTerminatedStateReason, + } +) + +func (ks *KubeState) newContainerCharts(ps *podState, cs *containerState) *module.Charts { + charts := containerChartsTmpl.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, replaceDots(ps.id()), cs.name) + c.Labels = ks.newContainerChartLabels(ps, cs) + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, ps.id(), cs.name) + } + } + return charts +} + +func (ks *KubeState) newContainerChartLabels(ps *podState, cs *containerState) []module.Label { + labels := ks.newPodChartLabels(ps) + labels = append( + labels, module.Label{Key: labelKeyContainerName, Value: cs.name, Source: module.LabelSourceK8s}, + ) + return labels +} + +func (ks *KubeState) addContainerCharts(ps *podState, cs *containerState) { + charts := ks.newContainerCharts(ps, cs) + if err := ks.Charts().Add(*charts...); err != nil { + ks.Warning(err) + } +} + +func (ks *KubeState) addContainerWaitingStateReasonToChart(ps *podState, cs *containerState, reason string) { + id := fmt.Sprintf(containersStateWaitingChartTmpl.ID, replaceDots(ps.id()), cs.name) + c := ks.Charts().Get(id) + if c == nil { + ks.Warningf("chart '%s' does not exist", id) + return + } + dim := &module.Dim{ + ID: fmt.Sprintf("pod_%s_container_%s_state_waiting_reason_%s", ps.id(), cs.name, reason), + Name: reason, + } + if err := c.AddDim(dim); err != nil { + ks.Warning(err) + return + } + c.MarkNotCreated() +} + +func (ks *KubeState) addContainerTerminatedStateReasonToChart(ps *podState, cs *containerState, reason string) { + id := fmt.Sprintf(containersStateTerminatedChartTmpl.ID, replaceDots(ps.id()), cs.name) + c := ks.Charts().Get(id) + if c == nil { + ks.Warningf("chart '%s' does not exist", id) + return + } + dim := &module.Dim{ + ID: fmt.Sprintf("pod_%s_container_%s_state_terminated_reason_%s", ps.id(), cs.name, reason), + Name: reason, + } + if err := c.AddDim(dim); err != nil { + ks.Warning(err) + return + } + c.MarkNotCreated() +} + +var discoveryStatusChart = module.Chart{ + ID: "discovery_discoverers_state", + Title: "Running discoverers state", + Units: "state", + Fam: "discovery", + Ctx: "k8s_state.discovery_discoverers_state", + Priority: prioDiscoveryDiscovererState, + Opts: module.Opts{Hidden: true}, + Dims: module.Dims{ + {ID: "discovery_node_discoverer_state", Name: "node"}, + {ID: "discovery_pod_discoverer_state", Name: "pod"}, + }, +} + +var reDots = regexp.MustCompile(`\.`) + +func replaceDots(v string) string { + return reDots.ReplaceAllString(v, "-") +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/client.go b/src/go/collectors/go.d.plugin/modules/k8s_state/client.go new file mode 100644 index 00000000000000..315e823feeec2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/client.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "errors" + "os" + "path/filepath" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + "github.com/mattn/go-isatty" +) + +const ( + envKubeServiceHost = "KUBERNETES_SERVICE_HOST" + envKubeServicePort = "KUBERNETES_SERVICE_PORT" +) + +func newKubeClient() (kubernetes.Interface, error) { + if os.Getenv(envKubeServiceHost) != "" && os.Getenv(envKubeServicePort) != "" { + return newKubeClientInCluster() + } + if isatty.IsTerminal(os.Stdout.Fd()) { + return newKubeClientOutOfCluster() + } + return nil, errors.New("can not create Kubernetes client: not inside a cluster") +} + +func newKubeClientInCluster() (*kubernetes.Clientset, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + config.UserAgent = "Netdata/kube-state" + return kubernetes.NewForConfig(config) +} + +func newKubeClientOutOfCluster() (*kubernetes.Clientset, error) { + home := homeDir() + if home == "" { + return nil, errors.New("couldn't find home directory") + } + + configPath := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", configPath) + if err != nil { + return nil, err + } + + config.UserAgent = "Netdata/kube-state" + return kubernetes.NewForConfig(config) +} + +func homeDir() string { + if h := os.Getenv("HOME"); h != "" { + return h + } + return os.Getenv("USERPROFILE") // windows +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/cluster_meta.go b/src/go/collectors/go.d.plugin/modules/k8s_state/cluster_meta.go new file mode 100644 index 00000000000000..e7eb809cc147d8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/cluster_meta.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "fmt" + "io" + "net/http" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (ks *KubeState) getKubeClusterID() string { + ns, err := ks.client.CoreV1().Namespaces().Get(ks.ctx, "kube-system", metav1.GetOptions{}) + if err != nil { + ks.Warningf("error on getting 'kube-system' namespace UID: %v", err) + return "" + } + return string(ns.UID) +} + +func (ks *KubeState) getKubeClusterName() string { + client := http.Client{Timeout: time.Second} + n, err := getGKEKubeClusterName(client) + if err != nil { + ks.Debugf("error on getting GKE cluster name: %v", err) + } + return n +} + +func getGKEKubeClusterName(client http.Client) (string, error) { + id, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/project/project-id") + if err != nil { + return "", err + } + loc, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/instance/attributes/cluster-location") + if err != nil { + return "", err + } + name, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/instance/attributes/cluster-name") + if err != nil { + return "", err + } + + return fmt.Sprintf("gke_%s_%s_%s", id, loc, name), nil +} + +func doMetaGKEHTTPReq(client http.Client, url string) (string, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return "", err + } + + req.Header.Add("Metadata-Flavor", "Google") + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer closeHTTPRespBody(resp) + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("'%s' returned HTTP status code %d", url, resp.StatusCode) + } + + bs, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + s := string(bs) + if s == "" { + return "", fmt.Errorf("an empty response from '%s'", url) + } + + return s, nil +} + +func closeHTTPRespBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/collect.go b/src/go/collectors/go.d.plugin/modules/k8s_state/collect.go new file mode 100644 index 00000000000000..baf853867e7909 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/collect.go @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + + corev1 "k8s.io/api/core/v1" +) + +const precision = 1000 + +func (ks *KubeState) collect() (map[string]int64, error) { + if ks.discoverer == nil { + return nil, errors.New("nil discoverer") + } + + ks.once.Do(func() { + ks.startTime = time.Now() + in := make(chan resource) + + ks.wg.Add(1) + go func() { defer ks.wg.Done(); ks.runUpdateState(in) }() + + ks.wg.Add(1) + go func() { defer ks.wg.Done(); ks.discoverer.run(ks.ctx, in) }() + + ks.kubeClusterID = ks.getKubeClusterID() + ks.kubeClusterName = ks.getKubeClusterName() + if chart := ks.Charts().Get(discoveryStatusChart.ID); chart != nil { + chart.Labels = []module.Label{ + {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s}, + {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s}, + } + } + }) + + mx := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + } + + if !ks.discoverer.ready() || time.Since(ks.startTime) < ks.initDelay { + return mx, nil + } + + ks.state.Lock() + defer ks.state.Unlock() + + ks.collectKubeState(mx) + + return mx, nil +} + +func (ks *KubeState) collectKubeState(mx map[string]int64) { + for _, ns := range ks.state.nodes { + ns.resetStats() + } + ks.collectPodsState(mx) + ks.collectNodesState(mx) +} + +func (ks *KubeState) collectPodsState(mx map[string]int64) { + now := time.Now() + for _, ps := range ks.state.pods { + if ps.deleted { + delete(ks.state.pods, podSource(ps.namespace, ps.name)) + ks.removePodCharts(ps) + continue + } + if ps.new { + ps.new = false + ks.addPodCharts(ps) + ps.unscheduled = ps.nodeName == "" + } else if ps.unscheduled && ps.nodeName != "" { + ps.unscheduled = false + ks.updatePodChartsNodeLabel(ps) + } + + ns := ks.state.nodes[nodeSource(ps.nodeName)] + if ns != nil { + ns.stats.pods++ + ns.stats.reqCPU += ps.reqCPU + ns.stats.limitCPU += ps.limitCPU + ns.stats.reqMem += ps.reqMem + ns.stats.limitMem += ps.limitMem + ns.stats.podsCondPodReady += condStatusToInt(ps.condPodReady) + ns.stats.podsCondPodScheduled += condStatusToInt(ps.condPodScheduled) + ns.stats.podsCondPodInitialized += condStatusToInt(ps.condPodInitialized) + ns.stats.podsCondContainersReady += condStatusToInt(ps.condContainersReady) + ns.stats.podsReadinessReady += boolToInt(ps.condPodReady == corev1.ConditionTrue) + ns.stats.podsReadinessUnready += boolToInt(ps.condPodReady != corev1.ConditionTrue) + ns.stats.podsPhasePending += boolToInt(ps.phase == corev1.PodPending) + ns.stats.podsPhaseRunning += boolToInt(ps.phase == corev1.PodRunning) + ns.stats.podsPhaseSucceeded += boolToInt(ps.phase == corev1.PodSucceeded) + ns.stats.podsPhaseFailed += boolToInt(ps.phase == corev1.PodFailed) + for _, cs := range ps.initContainers { + ns.stats.initContainers++ + ns.stats.initContStateRunning += boolToInt(cs.stateRunning) + ns.stats.initContStateWaiting += boolToInt(cs.stateWaiting) + ns.stats.initContStateTerminated += boolToInt(cs.stateTerminated) + } + for _, cs := range ps.containers { + ns.stats.containers++ + ns.stats.contStateRunning += boolToInt(cs.stateRunning) + ns.stats.contStateWaiting += boolToInt(cs.stateWaiting) + ns.stats.contStateTerminated += boolToInt(cs.stateTerminated) + } + } + + px := fmt.Sprintf("pod_%s_", ps.id()) + + mx[px+"cond_podready"] = condStatusToInt(ps.condPodReady) + mx[px+"cond_podscheduled"] = condStatusToInt(ps.condPodScheduled) + mx[px+"cond_podinitialized"] = condStatusToInt(ps.condPodInitialized) + mx[px+"cond_containersready"] = condStatusToInt(ps.condContainersReady) + mx[px+"phase_running"] = boolToInt(ps.phase == corev1.PodRunning) + mx[px+"phase_failed"] = boolToInt(ps.phase == corev1.PodFailed) + mx[px+"phase_succeeded"] = boolToInt(ps.phase == corev1.PodSucceeded) + mx[px+"phase_pending"] = boolToInt(ps.phase == corev1.PodPending) + mx[px+"age"] = int64(now.Sub(ps.creationTime).Seconds()) + mx[px+"cpu_requests_used"] = ps.reqCPU + mx[px+"cpu_limits_used"] = ps.limitCPU + mx[px+"mem_requests_used"] = ps.reqMem + mx[px+"mem_limits_used"] = ps.limitMem + + mx[px+"init_containers"] = int64(len(ps.initContainers)) + mx[px+"containers"] = int64(len(ps.containers)) + + mx[px+"init_containers_state_running"] = 0 + mx[px+"init_containers_state_waiting"] = 0 + mx[px+"init_containers_state_terminated"] = 0 + for _, cs := range ps.initContainers { + mx[px+"init_containers_state_running"] += boolToInt(cs.stateRunning) + mx[px+"init_containers_state_waiting"] += boolToInt(cs.stateWaiting) + mx[px+"init_containers_state_terminated"] += boolToInt(cs.stateTerminated) + } + mx[px+"containers_state_running"] = 0 + mx[px+"containers_state_waiting"] = 0 + mx[px+"containers_state_terminated"] = 0 + for _, cs := range ps.containers { + if cs.new { + cs.new = false + ks.addContainerCharts(ps, cs) + } + mx[px+"containers_state_running"] += boolToInt(cs.stateRunning) + mx[px+"containers_state_waiting"] += boolToInt(cs.stateWaiting) + mx[px+"containers_state_terminated"] += boolToInt(cs.stateTerminated) + + ppx := fmt.Sprintf("%scontainer_%s_", px, cs.name) + mx[ppx+"state_running"] = boolToInt(cs.stateRunning) + mx[ppx+"state_waiting"] = boolToInt(cs.stateWaiting) + mx[ppx+"state_terminated"] = boolToInt(cs.stateTerminated) + mx[ppx+"readiness"] = boolToInt(cs.ready) + mx[ppx+"restarts"] = cs.restarts + for _, r := range cs.stateWaitingReasons { + if r.new { + r.new = false + ks.addContainerWaitingStateReasonToChart(ps, cs, r.reason) + } + mx[ppx+"state_waiting_reason_"+r.reason] = boolToInt(r.active) + } + for _, r := range cs.stateTerminatedReasons { + if r.new { + r.new = false + ks.addContainerTerminatedStateReasonToChart(ps, cs, r.reason) + } + mx[ppx+"state_terminated_reason_"+r.reason] = boolToInt(r.active) + } + } + } +} + +func (ks *KubeState) collectNodesState(mx map[string]int64) { + now := time.Now() + for _, ns := range ks.state.nodes { + if ns.deleted { + delete(ks.state.nodes, nodeSource(ns.name)) + ks.removeNodeCharts(ns) + continue + } + if ns.new { + ns.new = false + ks.addNodeCharts(ns) + } + + px := fmt.Sprintf("node_%s_", ns.id()) + + for typ, cond := range ns.conditions { + if cond.new { + cond.new = false + ks.addNodeConditionToCharts(ns, typ) + } + mx[px+"cond_"+strings.ToLower(typ)] = condStatusToInt(cond.status) + } + + mx[px+"age"] = int64(now.Sub(ns.creationTime).Seconds()) + mx[px+"alloc_pods_util"] = calcPercentage(ns.stats.pods, ns.allocatablePods) + mx[px+"pods_readiness_ready"] = ns.stats.podsReadinessReady + mx[px+"pods_readiness_unready"] = ns.stats.podsReadinessUnready + mx[px+"pods_readiness"] = calcPercentage(ns.stats.podsReadinessReady, ns.stats.pods) + mx[px+"pods_phase_running"] = ns.stats.podsPhaseRunning + mx[px+"pods_phase_failed"] = ns.stats.podsPhaseFailed + mx[px+"pods_phase_succeeded"] = ns.stats.podsPhaseSucceeded + mx[px+"pods_phase_pending"] = ns.stats.podsPhasePending + mx[px+"pods_cond_podready"] = ns.stats.podsCondPodReady + mx[px+"pods_cond_podscheduled"] = ns.stats.podsCondPodScheduled + mx[px+"pods_cond_podinitialized"] = ns.stats.podsCondPodInitialized + mx[px+"pods_cond_containersready"] = ns.stats.podsCondContainersReady + mx[px+"pods_cond_containersready"] = ns.stats.podsCondContainersReady + mx[px+"schedulability_schedulable"] = boolToInt(!ns.unSchedulable) + mx[px+"schedulability_unschedulable"] = boolToInt(ns.unSchedulable) + mx[px+"alloc_pods_available"] = ns.allocatablePods - ns.stats.pods + mx[px+"alloc_pods_allocated"] = ns.stats.pods + mx[px+"alloc_cpu_requests_util"] = calcPercentage(ns.stats.reqCPU, ns.allocatableCPU) + mx[px+"alloc_cpu_limits_util"] = calcPercentage(ns.stats.limitCPU, ns.allocatableCPU) + mx[px+"alloc_mem_requests_util"] = calcPercentage(ns.stats.reqMem, ns.allocatableMem) + mx[px+"alloc_mem_limits_util"] = calcPercentage(ns.stats.limitMem, ns.allocatableMem) + mx[px+"alloc_cpu_requests_used"] = ns.stats.reqCPU + mx[px+"alloc_cpu_limits_used"] = ns.stats.limitCPU + mx[px+"alloc_mem_requests_used"] = ns.stats.reqMem + mx[px+"alloc_mem_limits_used"] = ns.stats.limitMem + mx[px+"init_containers"] = ns.stats.initContainers + mx[px+"containers"] = ns.stats.containers + mx[px+"containers_state_running"] = ns.stats.contStateRunning + mx[px+"containers_state_waiting"] = ns.stats.contStateWaiting + mx[px+"containers_state_terminated"] = ns.stats.contStateTerminated + mx[px+"init_containers_state_running"] = ns.stats.initContStateRunning + mx[px+"init_containers_state_waiting"] = ns.stats.initContStateWaiting + mx[px+"init_containers_state_terminated"] = ns.stats.initContStateTerminated + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +func condStatusToInt(cs corev1.ConditionStatus) int64 { + switch cs { + case corev1.ConditionFalse: + return 0 + case corev1.ConditionTrue: + return 1 + case corev1.ConditionUnknown: + return 0 + default: + return 0 + } +} + +func calcPercentage(value, total int64) int64 { + if total == 0 { + return 0 + } + return int64(float64(value) / float64(total) * 100 * precision) +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/config_schema.json b/src/go/collectors/go.d.plugin/modules/k8s_state/config_schema.json new file mode 100644 index 00000000000000..42b6b0fd686ca5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/config_schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/k8s_state job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/discover_kubernetes.go b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_kubernetes.go new file mode 100644 index 00000000000000..a3cf90d6097fc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_kubernetes.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "context" + "os" + "sync" + "time" + + "github.com/netdata/go.d.plugin/logger" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +func newKubeDiscovery(client kubernetes.Interface, l *logger.Logger) *kubeDiscovery { + return &kubeDiscovery{ + client: client, + Logger: l, + readyCh: make(chan struct{}), + stopCh: make(chan struct{}), + } +} + +type kubeDiscovery struct { + *logger.Logger + client kubernetes.Interface + discoverers []discoverer + readyCh chan struct{} + stopCh chan struct{} +} + +func (d *kubeDiscovery) run(ctx context.Context, in chan<- resource) { + d.Info("kube_discoverer is started") + defer func() { close(d.stopCh); d.Info("kube_discoverer is stopped") }() + + d.discoverers = d.setupDiscoverers(ctx) + + var wg sync.WaitGroup + updates := make(chan resource) + + for _, dd := range d.discoverers { + wg.Add(1) + go func(dd discoverer) { defer wg.Done(); dd.run(ctx, updates) }(dd) + } + + wg.Add(1) + go func() { defer wg.Done(); d.runDiscover(ctx, updates, in) }() + + close(d.readyCh) + wg.Wait() + <-ctx.Done() +} + +func (d *kubeDiscovery) ready() bool { + if !isChanClosed(d.readyCh) { + return false + } + for _, dd := range d.discoverers { + if !dd.ready() { + return false + } + } + return true +} + +func (d *kubeDiscovery) stopped() bool { + if !isChanClosed(d.stopCh) { + return false + } + for _, dd := range d.discoverers { + if !dd.stopped() { + return false + } + } + return true +} + +func (d *kubeDiscovery) runDiscover(ctx context.Context, updates chan resource, in chan<- resource) { + for { + select { + case <-ctx.Done(): + return + case r := <-updates: + select { + case <-ctx.Done(): + return + case in <- r: + } + } + } +} + +const resyncPeriod = 10 * time.Minute + +var ( + myNodeName = os.Getenv("MY_NODE_NAME") +) + +func (d *kubeDiscovery) setupDiscoverers(ctx context.Context) []discoverer { + node := d.client.CoreV1().Nodes() + nodeWatcher := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return node.List(ctx, options) }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return node.Watch(ctx, options) }, + } + + pod := d.client.CoreV1().Pods(corev1.NamespaceAll) + podWatcher := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if myNodeName != "" { + options.FieldSelector = "spec.nodeName=" + myNodeName + } + return pod.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if myNodeName != "" { + options.FieldSelector = "spec.nodeName=" + myNodeName + } + return pod.Watch(ctx, options) + }, + } + + return []discoverer{ + newNodeDiscoverer(cache.NewSharedInformer(nodeWatcher, &corev1.Node{}, resyncPeriod), d.Logger), + newPodDiscoverer(cache.NewSharedInformer(podWatcher, &corev1.Pod{}, resyncPeriod), d.Logger), + } +} + +func enqueue(queue *workqueue.Type, obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + return + } + queue.Add(key) +} + +func send(ctx context.Context, in chan<- resource, r resource) { + if r == nil { + return + } + select { + case <-ctx.Done(): + case in <- r: + } +} + +func isChanClosed(ch chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/discover_node.go b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_node.go new file mode 100644 index 00000000000000..f8030bfe544b6c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_node.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "context" + + "github.com/netdata/go.d.plugin/logger" + + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +func newNodeDiscoverer(si cache.SharedInformer, l *logger.Logger) *nodeDiscoverer { + if si == nil { + panic("nil node shared informer") + } + + queue := workqueue.NewNamed("node") + si.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, + DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + }) + + return &nodeDiscoverer{ + Logger: l, + informer: si, + queue: queue, + readyCh: make(chan struct{}), + stopCh: make(chan struct{}), + } +} + +type nodeResource struct { + src string + val interface{} +} + +func (r nodeResource) source() string { return r.src } +func (r nodeResource) kind() kubeResourceKind { return kubeResourceNode } +func (r nodeResource) value() interface{} { return r.val } + +type nodeDiscoverer struct { + *logger.Logger + informer cache.SharedInformer + queue *workqueue.Type + readyCh chan struct{} + stopCh chan struct{} +} + +func (d *nodeDiscoverer) run(ctx context.Context, in chan<- resource) { + d.Info("node_discoverer is started") + defer func() { close(d.stopCh); d.Info("node_discoverer is stopped") }() + + defer d.queue.ShutDown() + + go d.informer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), d.informer.HasSynced) { + return + } + + go d.runDiscover(ctx, in) + close(d.readyCh) + + <-ctx.Done() +} + +func (d *nodeDiscoverer) ready() bool { return isChanClosed(d.readyCh) } +func (d *nodeDiscoverer) stopped() bool { return isChanClosed(d.stopCh) } + +func (d *nodeDiscoverer) runDiscover(ctx context.Context, in chan<- resource) { + for { + item, shutdown := d.queue.Get() + if shutdown { + return + } + + func() { + defer d.queue.Done(item) + + key := item.(string) + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + item, exists, err := d.informer.GetStore().GetByKey(key) + if err != nil { + return + } + + r := &nodeResource{src: nodeSource(name)} + if exists { + r.val = item + } + send(ctx, in, r) + }() + } +} + +func nodeSource(name string) string { + return "k8s/node/" + name +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/discover_pod.go b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_pod.go new file mode 100644 index 00000000000000..aa44795f3c83d9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/discover_pod.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "context" + + "github.com/netdata/go.d.plugin/logger" + + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +func newPodDiscoverer(si cache.SharedInformer, l *logger.Logger) *podDiscoverer { + if si == nil { + panic("nil pod shared informer") + } + + queue := workqueue.NewNamed("pod") + si.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { enqueue(queue, obj) }, + UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) }, + DeleteFunc: func(obj interface{}) { enqueue(queue, obj) }, + }) + + return &podDiscoverer{ + Logger: l, + informer: si, + queue: queue, + readyCh: make(chan struct{}), + stopCh: make(chan struct{}), + } +} + +type podResource struct { + src string + val interface{} +} + +func (r podResource) source() string { return r.src } +func (r podResource) kind() kubeResourceKind { return kubeResourcePod } +func (r podResource) value() interface{} { return r.val } + +type podDiscoverer struct { + *logger.Logger + informer cache.SharedInformer + queue *workqueue.Type + readyCh chan struct{} + stopCh chan struct{} +} + +func (d *podDiscoverer) run(ctx context.Context, in chan<- resource) { + d.Info("pod_discoverer is started") + defer func() { close(d.stopCh); d.Info("pod_discoverer is stopped") }() + + defer d.queue.ShutDown() + + go d.informer.Run(ctx.Done()) + + if !cache.WaitForCacheSync(ctx.Done(), d.informer.HasSynced) { + return + } + + go d.runDiscover(ctx, in) + close(d.readyCh) + + <-ctx.Done() +} + +func (d *podDiscoverer) ready() bool { return isChanClosed(d.readyCh) } +func (d *podDiscoverer) stopped() bool { return isChanClosed(d.stopCh) } + +func (d *podDiscoverer) runDiscover(ctx context.Context, in chan<- resource) { + for { + item, shutdown := d.queue.Get() + if shutdown { + return + } + + func() { + defer d.queue.Done(item) + + key := item.(string) + ns, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return + } + + item, exists, err := d.informer.GetStore().GetByKey(key) + if err != nil { + return + } + + r := &podResource{src: podSource(ns, name)} + if exists { + r.val = item + } + send(ctx, in, r) + }() + } +} + +func podSource(namespace, name string) string { + return "k8s/pod/" + namespace + "/" + name +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/init.go b/src/go/collectors/go.d.plugin/modules/k8s_state/init.go new file mode 100644 index 00000000000000..2892da1c60a299 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/init.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "k8s.io/client-go/kubernetes" +) + +func (ks KubeState) initClient() (kubernetes.Interface, error) { + return ks.newKubeClient() +} + +func (ks *KubeState) initDiscoverer(client kubernetes.Interface) discoverer { + return newKubeDiscovery(client, ks.Logger) +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/integrations/kubernetes_cluster_state.md b/src/go/collectors/go.d.plugin/modules/k8s_state/integrations/kubernetes_cluster_state.md new file mode 100644 index 00000000000000..ad1acd511c0971 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/integrations/kubernetes_cluster_state.md @@ -0,0 +1,218 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_state/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/k8s_state/metadata.yaml" +sidebar_label: "Kubernetes Cluster State" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kubernetes Cluster State + + +<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/> + + +Plugin: go.d.plugin +Module: k8s_state + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Kubernetes Nodes, Pods and Containers. + + + + +This collector is supported on all platforms. + +This collector only supports collecting metrics from a single instance of this integration. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per node + +These metrics refer to the Node. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. | +| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. | +| k8s_node_name | Node name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_state.node_allocatable_cpu_requests_utilization | requests | % | +| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu | +| k8s_state.node_allocatable_cpu_limits_utilization | limits | % | +| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu | +| k8s_state.node_allocatable_mem_requests_utilization | requests | % | +| k8s_state.node_allocatable_mem_requests_used | requests | bytes | +| k8s_state.node_allocatable_mem_limits_utilization | limits | % | +| k8s_state.node_allocatable_mem_limits_used | limits | bytes | +| k8s_state.node_allocatable_pods_utilization | allocated | % | +| k8s_state.node_allocatable_pods_usage | available, allocated | pods | +| k8s_state.node_condition | a dimension per condition | status | +| k8s_state.node_schedulability | schedulable, unschedulable | state | +| k8s_state.node_pods_readiness | ready | % | +| k8s_state.node_pods_readiness_state | ready, unready | pods | +| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods | +| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods | +| k8s_state.node_containers | containers, init_containers | containers | +| k8s_state.node_containers_state | running, waiting, terminated | containers | +| k8s_state.node_init_containers_state | running, waiting, terminated | containers | +| k8s_state.node_age | age | seconds | + +### Per pod + +These metrics refer to the Pod. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. | +| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. | +| k8s_node_name | Node name. | +| k8s_namespace | Namespace. | +| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). | +| k8s_controller_name | Controller name. | +| k8s_pod_name | Pod name. | +| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_state.pod_cpu_requests_used | requests | millicpu | +| k8s_state.pod_cpu_limits_used | limits | millicpu | +| k8s_state.pod_mem_requests_used | requests | bytes | +| k8s_state.pod_mem_limits_used | limits | bytes | +| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state | +| k8s_state.pod_phase | running, failed, succeeded, pending | state | +| k8s_state.pod_age | age | seconds | +| k8s_state.pod_containers | containers, init_containers | containers | +| k8s_state.pod_containers_state | running, waiting, terminated | containers | +| k8s_state.pod_init_containers_state | running, waiting, terminated | containers | + +### Per container + +These metrics refer to the Pod container. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. | +| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. | +| k8s_node_name | Node name. | +| k8s_namespace | Namespace. | +| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). | +| k8s_controller_name | Controller name. | +| k8s_pod_name | Pod name. | +| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). | +| k8s_container_name | Container name. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| k8s_state.pod_container_readiness_state | ready | state | +| k8s_state.pod_container_restarts | restarts | restarts | +| k8s_state.pod_container_state | running, waiting, terminated | state | +| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state | +| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/k8s_state.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/k8s_state.conf +``` +#### Options + + + +There are no configuration options. + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m k8s_state + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state.go b/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state.go new file mode 100644 index 00000000000000..3a3046e47d26eb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "context" + _ "embed" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + + "k8s.io/client-go/kubernetes" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("k8s_state", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + Disabled: true, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *KubeState { + return &KubeState{ + initDelay: time.Second * 3, + newKubeClient: newKubeClient, + charts: baseCharts.Copy(), + once: &sync.Once{}, + wg: &sync.WaitGroup{}, + state: newKubeState(), + } +} + +type ( + discoverer interface { + run(ctx context.Context, in chan<- resource) + ready() bool + stopped() bool + } + + KubeState struct { + module.Base + + newKubeClient func() (kubernetes.Interface, error) + + startTime time.Time + initDelay time.Duration + + charts *module.Charts + + client kubernetes.Interface + once *sync.Once + wg *sync.WaitGroup + discoverer discoverer + ctx context.Context + ctxCancel context.CancelFunc + state *kubeState + + kubeClusterID string + kubeClusterName string + } +) + +func (ks *KubeState) Init() bool { + client, err := ks.initClient() + if err != nil { + ks.Errorf("client initialization: %v", err) + return false + } + ks.client = client + + ks.ctx, ks.ctxCancel = context.WithCancel(context.Background()) + + ks.discoverer = ks.initDiscoverer(ks.client) + + return true +} + +func (ks *KubeState) Check() bool { + if ks.client == nil || ks.discoverer == nil { + ks.Error("not initialized job") + return false + } + + ver, err := ks.client.Discovery().ServerVersion() + if err != nil { + ks.Errorf("failed to connect to the Kubernetes API server: %v", err) + return false + } + + ks.Infof("successfully connected to the Kubernetes API server '%s'", ver) + return true +} + +func (ks *KubeState) Charts() *module.Charts { + return ks.charts +} + +func (ks *KubeState) Collect() map[string]int64 { + ms, err := ks.collect() + if err != nil { + ks.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (ks *KubeState) Cleanup() { + if ks.ctxCancel == nil { + return + } + ks.ctxCancel() + + c := make(chan struct{}) + go func() { defer close(c); ks.wg.Wait() }() + + t := time.NewTimer(time.Second * 3) + defer t.Stop() + + select { + case <-c: + return + case <-t.C: + return + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state_test.go b/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state_test.go new file mode 100644 index 00000000000000..451028532b4ce7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/kube_state_test.go @@ -0,0 +1,844 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apiresource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestKubeState_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func() *KubeState + }{ + "success when no error on initializing K8s client": { + wantFail: false, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + return ks + }, + }, + "fail when get an error on initializing K8s client": { + wantFail: true, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return nil, errors.New("newKubeClient() error") } + return ks + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ks := test.prepare() + + if test.wantFail { + assert.False(t, ks.Init()) + } else { + assert.True(t, ks.Init()) + } + }) + } +} + +func TestKubeState_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func() *KubeState + }{ + "success when connected to the K8s API": { + wantFail: false, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + return ks + }, + }, + "fail when not connected to the K8s API": { + wantFail: true, + prepare: func() *KubeState { + ks := New() + client := &brokenInfoKubeClient{fake.NewSimpleClientset()} + ks.newKubeClient = func() (kubernetes.Interface, error) { return client, nil } + return ks + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ks := test.prepare() + require.True(t, ks.Init()) + + if test.wantFail { + assert.False(t, ks.Check()) + } else { + assert.True(t, ks.Check()) + } + }) + } +} + +func TestKubeState_Charts(t *testing.T) { + ks := New() + + assert.NotEmpty(t, *ks.Charts()) +} + +func TestKubeState_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func() *KubeState + doInit bool + doCollect bool + }{ + "before init": { + doInit: false, + doCollect: false, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + return ks + }, + }, + "after init": { + doInit: true, + doCollect: false, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + return ks + }, + }, + "after collect": { + doInit: true, + doCollect: true, + prepare: func() *KubeState { + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil } + return ks + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ks := test.prepare() + + if test.doInit { + _ = ks.Init() + } + if test.doCollect { + _ = ks.Collect() + time.Sleep(ks.initDelay) + } + + assert.NotPanics(t, ks.Cleanup) + time.Sleep(time.Second) + if test.doCollect { + assert.True(t, ks.discoverer.stopped()) + } + }) + } +} + +func TestKubeState_Collect(t *testing.T) { + type ( + testCaseStep func(t *testing.T, ks *KubeState) + testCase struct { + client kubernetes.Interface + steps []testCaseStep + } + ) + + tests := map[string]struct { + create func(t *testing.T) testCase + }{ + "Node only": { + create: func(t *testing.T) testCase { + client := fake.NewSimpleClientset( + newNode("node01"), + ) + + step1 := func(t *testing.T, ks *KubeState) { + mx := ks.Collect() + expected := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 3, + "node_node01_alloc_cpu_limits_used": 0, + "node_node01_alloc_cpu_limits_util": 0, + "node_node01_alloc_cpu_requests_used": 0, + "node_node01_alloc_cpu_requests_util": 0, + "node_node01_alloc_mem_limits_used": 0, + "node_node01_alloc_mem_limits_util": 0, + "node_node01_alloc_mem_requests_used": 0, + "node_node01_alloc_mem_requests_util": 0, + "node_node01_alloc_pods_allocated": 0, + "node_node01_alloc_pods_available": 110, + "node_node01_alloc_pods_util": 0, + "node_node01_cond_diskpressure": 0, + "node_node01_cond_memorypressure": 0, + "node_node01_cond_networkunavailable": 0, + "node_node01_cond_pidpressure": 0, + "node_node01_cond_ready": 1, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "node_node01_containers": 0, + "node_node01_containers_state_running": 0, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 0, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 0, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 0, + "node_node01_pods_cond_podinitialized": 0, + "node_node01_pods_cond_podready": 0, + "node_node01_pods_cond_podscheduled": 0, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 0, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 0, + "node_node01_pods_readiness_ready": 0, + "node_node01_pods_readiness_unready": 0, + } + copyAge(expected, mx) + assert.Equal(t, expected, mx) + assert.Equal(t, + len(nodeChartsTmpl)+len(baseCharts), + len(*ks.Charts()), + ) + } + + return testCase{ + client: client, + steps: []testCaseStep{step1}, + } + }, + }, + "Pod only": { + create: func(t *testing.T) testCase { + pod := newPod("node01", "pod01") + client := fake.NewSimpleClientset( + pod, + ) + + step1 := func(t *testing.T, ks *KubeState) { + mx := ks.Collect() + expected := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "pod_default_pod01_age": 3, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + } + copyAge(expected, mx) + + assert.Equal(t, expected, mx) + assert.Equal(t, + len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts), + len(*ks.Charts()), + ) + } + + return testCase{ + client: client, + steps: []testCaseStep{step1}, + } + }, + }, + "Nodes and Pods": { + create: func(t *testing.T) testCase { + node := newNode("node01") + pod := newPod(node.Name, "pod01") + client := fake.NewSimpleClientset( + node, + pod, + ) + + step1 := func(t *testing.T, ks *KubeState) { + mx := ks.Collect() + expected := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 3, + "node_node01_alloc_cpu_limits_used": 400, + "node_node01_alloc_cpu_limits_util": 11428, + "node_node01_alloc_cpu_requests_used": 200, + "node_node01_alloc_cpu_requests_util": 5714, + "node_node01_alloc_mem_limits_used": 419430400, + "node_node01_alloc_mem_limits_util": 11428, + "node_node01_alloc_mem_requests_used": 209715200, + "node_node01_alloc_mem_requests_util": 5714, + "node_node01_alloc_pods_allocated": 1, + "node_node01_alloc_pods_available": 109, + "node_node01_alloc_pods_util": 909, + "node_node01_cond_diskpressure": 0, + "node_node01_cond_memorypressure": 0, + "node_node01_cond_networkunavailable": 0, + "node_node01_cond_pidpressure": 0, + "node_node01_cond_ready": 1, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "node_node01_containers": 2, + "node_node01_containers_state_running": 2, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 1, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 1, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 1, + "node_node01_pods_cond_podinitialized": 1, + "node_node01_pods_cond_podready": 1, + "node_node01_pods_cond_podscheduled": 1, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 1, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 100000, + "node_node01_pods_readiness_ready": 1, + "node_node01_pods_readiness_unready": 0, + "pod_default_pod01_age": 3, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + } + copyAge(expected, mx) + + assert.Equal(t, expected, mx) + assert.Equal(t, + len(nodeChartsTmpl)+len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts), + len(*ks.Charts()), + ) + } + + return testCase{ + client: client, + steps: []testCaseStep{step1}, + } + }, + }, + "delete a Pod in runtime": { + create: func(t *testing.T) testCase { + ctx := context.Background() + node := newNode("node01") + pod := newPod(node.Name, "pod01") + client := fake.NewSimpleClientset( + node, + pod, + ) + step1 := func(t *testing.T, ks *KubeState) { + _ = ks.Collect() + _ = client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) + } + + step2 := func(t *testing.T, ks *KubeState) { + mx := ks.Collect() + expected := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 4, + "node_node01_alloc_cpu_limits_used": 0, + "node_node01_alloc_cpu_limits_util": 0, + "node_node01_alloc_cpu_requests_used": 0, + "node_node01_alloc_cpu_requests_util": 0, + "node_node01_alloc_mem_limits_used": 0, + "node_node01_alloc_mem_limits_util": 0, + "node_node01_alloc_mem_requests_used": 0, + "node_node01_alloc_mem_requests_util": 0, + "node_node01_alloc_pods_allocated": 0, + "node_node01_alloc_pods_available": 110, + "node_node01_alloc_pods_util": 0, + "node_node01_cond_diskpressure": 0, + "node_node01_cond_memorypressure": 0, + "node_node01_cond_networkunavailable": 0, + "node_node01_cond_pidpressure": 0, + "node_node01_cond_ready": 1, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "node_node01_containers": 0, + "node_node01_containers_state_running": 0, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 0, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 0, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 0, + "node_node01_pods_cond_podinitialized": 0, + "node_node01_pods_cond_podready": 0, + "node_node01_pods_cond_podscheduled": 0, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 0, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 0, + "node_node01_pods_readiness_ready": 0, + "node_node01_pods_readiness_unready": 0, + } + copyAge(expected, mx) + + assert.Equal(t, expected, mx) + assert.Equal(t, + len(nodeChartsTmpl)+len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts), + len(*ks.Charts()), + ) + assert.Equal(t, + len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers), + calcObsoleteCharts(*ks.Charts()), + ) + } + + return testCase{ + client: client, + steps: []testCaseStep{step1, step2}, + } + }, + }, + "slow spec.NodeName set": { + create: func(t *testing.T) testCase { + ctx := context.Background() + node := newNode("node01") + podOrig := newPod(node.Name, "pod01") + podOrig.Spec.NodeName = "" + client := fake.NewSimpleClientset( + node, + podOrig, + ) + podUpdated := newPod(node.Name, "pod01") // with set Spec.NodeName + + step1 := func(t *testing.T, ks *KubeState) { + _ = ks.Collect() + for _, c := range *ks.Charts() { + if strings.HasPrefix(c.ID, "pod_") { + ok := isLabelValueSet(c, labelKeyNodeName) + assert.Falsef(t, ok, "chart '%s' has not empty %s label", c.ID, labelKeyNodeName) + } + } + } + step2 := func(t *testing.T, ks *KubeState) { + _, _ = client.CoreV1().Pods(podOrig.Namespace).Update(ctx, podUpdated, metav1.UpdateOptions{}) + time.Sleep(time.Millisecond * 50) + _ = ks.Collect() + + for _, c := range *ks.Charts() { + if strings.HasPrefix(c.ID, "pod_") { + ok := isLabelValueSet(c, labelKeyNodeName) + assert.Truef(t, ok, "chart '%s' has empty %s label", c.ID, labelKeyNodeName) + } + } + } + + return testCase{ + client: client, + steps: []testCaseStep{step1, step2}, + } + }, + }, + "add a Pod in runtime": { + create: func(t *testing.T) testCase { + ctx := context.Background() + node := newNode("node01") + pod1 := newPod(node.Name, "pod01") + pod2 := newPod(node.Name, "pod02") + client := fake.NewSimpleClientset( + node, + pod1, + ) + step1 := func(t *testing.T, ks *KubeState) { + _ = ks.Collect() + _, _ = client.CoreV1().Pods(pod1.Namespace).Create(ctx, pod2, metav1.CreateOptions{}) + } + + step2 := func(t *testing.T, ks *KubeState) { + mx := ks.Collect() + expected := map[string]int64{ + "discovery_node_discoverer_state": 1, + "discovery_pod_discoverer_state": 1, + "node_node01_age": 4, + "node_node01_alloc_cpu_limits_used": 800, + "node_node01_alloc_cpu_limits_util": 22857, + "node_node01_alloc_cpu_requests_used": 400, + "node_node01_alloc_cpu_requests_util": 11428, + "node_node01_alloc_mem_limits_used": 838860800, + "node_node01_alloc_mem_limits_util": 22857, + "node_node01_alloc_mem_requests_used": 419430400, + "node_node01_alloc_mem_requests_util": 11428, + "node_node01_alloc_pods_allocated": 2, + "node_node01_alloc_pods_available": 108, + "node_node01_alloc_pods_util": 1818, + "node_node01_cond_diskpressure": 0, + "node_node01_cond_memorypressure": 0, + "node_node01_cond_networkunavailable": 0, + "node_node01_cond_pidpressure": 0, + "node_node01_cond_ready": 1, + "node_node01_schedulability_schedulable": 1, + "node_node01_schedulability_unschedulable": 0, + "node_node01_containers": 4, + "node_node01_containers_state_running": 4, + "node_node01_containers_state_terminated": 0, + "node_node01_containers_state_waiting": 0, + "node_node01_init_containers": 2, + "node_node01_init_containers_state_running": 0, + "node_node01_init_containers_state_terminated": 2, + "node_node01_init_containers_state_waiting": 0, + "node_node01_pods_cond_containersready": 2, + "node_node01_pods_cond_podinitialized": 2, + "node_node01_pods_cond_podready": 2, + "node_node01_pods_cond_podscheduled": 2, + "node_node01_pods_phase_failed": 0, + "node_node01_pods_phase_pending": 0, + "node_node01_pods_phase_running": 2, + "node_node01_pods_phase_succeeded": 0, + "node_node01_pods_readiness": 100000, + "node_node01_pods_readiness_ready": 2, + "node_node01_pods_readiness_unready": 0, + "pod_default_pod01_age": 4, + "pod_default_pod01_cpu_limits_used": 400, + "pod_default_pod01_cpu_requests_used": 200, + "pod_default_pod01_mem_limits_used": 419430400, + "pod_default_pod01_mem_requests_used": 209715200, + "pod_default_pod01_cond_containersready": 1, + "pod_default_pod01_cond_podinitialized": 1, + "pod_default_pod01_cond_podready": 1, + "pod_default_pod01_cond_podscheduled": 1, + "pod_default_pod01_container_container1_readiness": 1, + "pod_default_pod01_container_container1_restarts": 0, + "pod_default_pod01_container_container1_state_running": 1, + "pod_default_pod01_container_container1_state_terminated": 0, + "pod_default_pod01_container_container1_state_waiting": 0, + "pod_default_pod01_container_container2_readiness": 1, + "pod_default_pod01_container_container2_restarts": 0, + "pod_default_pod01_container_container2_state_running": 1, + "pod_default_pod01_container_container2_state_terminated": 0, + "pod_default_pod01_container_container2_state_waiting": 0, + "pod_default_pod01_containers": 2, + "pod_default_pod01_containers_state_running": 2, + "pod_default_pod01_containers_state_terminated": 0, + "pod_default_pod01_containers_state_waiting": 0, + "pod_default_pod01_init_containers": 1, + "pod_default_pod01_init_containers_state_running": 0, + "pod_default_pod01_init_containers_state_terminated": 1, + "pod_default_pod01_init_containers_state_waiting": 0, + "pod_default_pod01_phase_failed": 0, + "pod_default_pod01_phase_pending": 0, + "pod_default_pod01_phase_running": 1, + "pod_default_pod01_phase_succeeded": 0, + "pod_default_pod02_age": 4, + "pod_default_pod02_cpu_limits_used": 400, + "pod_default_pod02_cpu_requests_used": 200, + "pod_default_pod02_mem_limits_used": 419430400, + "pod_default_pod02_mem_requests_used": 209715200, + "pod_default_pod02_cond_containersready": 1, + "pod_default_pod02_cond_podinitialized": 1, + "pod_default_pod02_cond_podready": 1, + "pod_default_pod02_cond_podscheduled": 1, + "pod_default_pod02_container_container1_readiness": 1, + "pod_default_pod02_container_container1_restarts": 0, + "pod_default_pod02_container_container1_state_running": 1, + "pod_default_pod02_container_container1_state_terminated": 0, + "pod_default_pod02_container_container1_state_waiting": 0, + "pod_default_pod02_container_container2_readiness": 1, + "pod_default_pod02_container_container2_restarts": 0, + "pod_default_pod02_container_container2_state_running": 1, + "pod_default_pod02_container_container2_state_terminated": 0, + "pod_default_pod02_container_container2_state_waiting": 0, + "pod_default_pod02_containers": 2, + "pod_default_pod02_containers_state_running": 2, + "pod_default_pod02_containers_state_terminated": 0, + "pod_default_pod02_containers_state_waiting": 0, + "pod_default_pod02_init_containers": 1, + "pod_default_pod02_init_containers_state_running": 0, + "pod_default_pod02_init_containers_state_terminated": 1, + "pod_default_pod02_init_containers_state_waiting": 0, + "pod_default_pod02_phase_failed": 0, + "pod_default_pod02_phase_pending": 0, + "pod_default_pod02_phase_running": 1, + "pod_default_pod02_phase_succeeded": 0, + } + copyAge(expected, mx) + + assert.Equal(t, expected, mx) + assert.Equal(t, + len(nodeChartsTmpl)+ + len(podChartsTmpl)*2+ + len(containerChartsTmpl)*len(pod1.Spec.Containers)+ + len(containerChartsTmpl)*len(pod2.Spec.Containers)+ + len(baseCharts), + len(*ks.Charts()), + ) + } + + return testCase{ + client: client, + steps: []testCaseStep{step1, step2}, + } + }, + }, + } + + for name, creator := range tests { + t.Run(name, func(t *testing.T) { + test := creator.create(t) + + ks := New() + ks.newKubeClient = func() (kubernetes.Interface, error) { return test.client, nil } + + require.True(t, ks.Init()) + require.True(t, ks.Check()) + defer ks.Cleanup() + + for i, executeStep := range test.steps { + if i == 0 { + _ = ks.Collect() + time.Sleep(ks.initDelay) + } else { + time.Sleep(time.Second) + } + executeStep(t, ks) + } + }) + } +} + +func newNode(name string) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + CreationTimestamp: metav1.Time{Time: time.Now()}, + }, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("4000m"), + corev1.ResourceMemory: mustQuantity("4000Mi"), + "pods": mustQuantity("110"), + }, + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("3500m"), + corev1.ResourceMemory: mustQuantity("3500Mi"), + "pods": mustQuantity("110"), + }, + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + {Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse}, + {Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse}, + {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse}, + {Type: corev1.NodeNetworkUnavailable, Status: corev1.ConditionFalse}, + }, + }, + } +} + +func newPod(nodeName, name string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: corev1.NamespaceDefault, + CreationTimestamp: metav1.Time{Time: time.Now()}, + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + InitContainers: []corev1.Container{ + { + Name: "init-container1", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("50m"), + corev1.ResourceMemory: mustQuantity("50Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("10m"), + corev1.ResourceMemory: mustQuantity("10Mi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "container1", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("200m"), + corev1.ResourceMemory: mustQuantity("200Mi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("100m"), + corev1.ResourceMemory: mustQuantity("100Mi"), + }, + }, + }, + { + Name: "container2", + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("200m"), + corev1.ResourceMemory: mustQuantity("200Mi")}, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: mustQuantity("100m"), + corev1.ResourceMemory: mustQuantity("100Mi"), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + {Type: corev1.PodReady, Status: corev1.ConditionTrue}, + {Type: corev1.PodScheduled, Status: corev1.ConditionTrue}, + {Type: corev1.PodInitialized, Status: corev1.ConditionTrue}, + {Type: corev1.ContainersReady, Status: corev1.ConditionTrue}, + }, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container1", + State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{}}, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + { + Name: "container2", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + } +} + +type brokenInfoKubeClient struct { + kubernetes.Interface +} + +func (kc *brokenInfoKubeClient) Discovery() discovery.DiscoveryInterface { + return &brokenInfoDiscovery{kc.Interface.Discovery()} +} + +type brokenInfoDiscovery struct { + discovery.DiscoveryInterface +} + +func (d *brokenInfoDiscovery) ServerVersion() (*version.Info, error) { + return nil, errors.New("brokenInfoDiscovery.ServerVersion() error") +} + +func calcObsoleteCharts(charts module.Charts) (num int) { + for _, c := range charts { + if c.Obsolete { + num++ + } + } + return num +} + +func mustQuantity(s string) apiresource.Quantity { + q, err := apiresource.ParseQuantity(s) + if err != nil { + panic(fmt.Sprintf("fail to create resource quantity: %v", err)) + } + return q +} + +func copyAge(dst, src map[string]int64) { + for k, v := range src { + if !strings.HasSuffix(k, "_age") { + continue + } + if _, ok := dst[k]; ok { + dst[k] = v + } + } +} + +func isLabelValueSet(c *module.Chart, name string) bool { + for _, l := range c.Labels { + if l.Key == name { + return l.Value != "" + } + } + return false +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/metadata.yaml b/src/go/collectors/go.d.plugin/modules/k8s_state/metadata.yaml new file mode 100644 index 00000000000000..7617b297f40c9b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/metadata.yaml @@ -0,0 +1,356 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-k8s_state + plugin_name: go.d.plugin + module_name: k8s_state + monitored_instance: + name: Kubernetes Cluster State + link: https://kubernetes.io/ + icon_filename: kubernetes.svg + categories: + - data-collection.kubernetes + keywords: + - kubernetes + - k8s + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Kubernetes Nodes, Pods and Containers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: false + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/k8s_state.conf + options: + description: "" + folding: + title: Config options + enabled: true + list: [] + examples: + folding: + title: Config + enabled: true + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: node + description: These metrics refer to the Node. + labels: + - name: k8s_cluster_id + description: Cluster ID. This is equal to the kube-system namespace UID. + - name: k8s_cluster_name + description: Cluster name. Cluster name discovery only works in GKE. + - name: k8s_node_name + description: Node name. + metrics: + - name: k8s_state.node_allocatable_cpu_requests_utilization + description: CPU requests utilization + unit: '%' + chart_type: line + dimensions: + - name: requests + - name: k8s_state.node_allocatable_cpu_requests_used + description: CPU requests used + unit: millicpu + chart_type: line + dimensions: + - name: requests + - name: k8s_state.node_allocatable_cpu_limits_utilization + description: CPU limits utilization + unit: '%' + chart_type: line + dimensions: + - name: limits + - name: k8s_state.node_allocatable_cpu_limits_used + description: CPU limits used + unit: millicpu + chart_type: line + dimensions: + - name: limits + - name: k8s_state.node_allocatable_mem_requests_utilization + description: Memory requests utilization + unit: '%' + chart_type: line + dimensions: + - name: requests + - name: k8s_state.node_allocatable_mem_requests_used + description: Memory requests used + unit: bytes + chart_type: line + dimensions: + - name: requests + - name: k8s_state.node_allocatable_mem_limits_utilization + description: Memory limits utilization + unit: '%' + chart_type: line + dimensions: + - name: limits + - name: k8s_state.node_allocatable_mem_limits_used + description: Memory limits used + unit: bytes + chart_type: line + dimensions: + - name: limits + - name: k8s_state.node_allocatable_pods_utilization + description: Pods resource utilization + unit: '%' + chart_type: line + dimensions: + - name: allocated + - name: k8s_state.node_allocatable_pods_usage + description: Pods resource usage + unit: pods + chart_type: stacked + dimensions: + - name: available + - name: allocated + - name: k8s_state.node_condition + description: Condition status + unit: status + chart_type: line + dimensions: + - name: a dimension per condition + - name: k8s_state.node_schedulability + description: Schedulability + unit: state + chart_type: line + dimensions: + - name: schedulable + - name: unschedulable + - name: k8s_state.node_pods_readiness + description: Pods readiness + unit: '%' + chart_type: line + dimensions: + - name: ready + - name: k8s_state.node_pods_readiness_state + description: Pods readiness state + unit: pods + chart_type: line + dimensions: + - name: ready + - name: unready + - name: k8s_state.node_pods_condition + description: Pods condition + unit: pods + chart_type: line + dimensions: + - name: pod_ready + - name: pod_scheduled + - name: pod_initialized + - name: containers_ready + - name: k8s_state.node_pods_phase + description: Pods phase + unit: pods + chart_type: stacked + dimensions: + - name: running + - name: failed + - name: succeeded + - name: pending + - name: k8s_state.node_containers + description: Containers + unit: containers + chart_type: line + dimensions: + - name: containers + - name: init_containers + - name: k8s_state.node_containers_state + description: Containers state + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: waiting + - name: terminated + - name: k8s_state.node_init_containers_state + description: Init containers state + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: waiting + - name: terminated + - name: k8s_state.node_age + description: Age + unit: seconds + chart_type: line + dimensions: + - name: age + - name: pod + description: These metrics refer to the Pod. + labels: + - name: k8s_cluster_id + description: Cluster ID. This is equal to the kube-system namespace UID. + - name: k8s_cluster_name + description: Cluster name. Cluster name discovery only works in GKE. + - name: k8s_node_name + description: Node name. + - name: k8s_namespace + description: Namespace. + - name: k8s_controller_kind + description: Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). + - name: k8s_controller_name + description: Controller name. + - name: k8s_pod_name + description: Pod name. + - name: k8s_qos_class + description: Pod QOS class (burstable, guaranteed, besteffort). + metrics: + - name: k8s_state.pod_cpu_requests_used + description: CPU requests used + unit: millicpu + chart_type: line + dimensions: + - name: requests + - name: k8s_state.pod_cpu_limits_used + description: CPU limits used + unit: millicpu + chart_type: line + dimensions: + - name: limits + - name: k8s_state.pod_mem_requests_used + description: Memory requests used + unit: bytes + chart_type: line + dimensions: + - name: requests + - name: k8s_state.pod_mem_limits_used + description: Memory limits used + unit: bytes + chart_type: line + dimensions: + - name: limits + - name: k8s_state.pod_condition + description: Condition + unit: state + chart_type: line + dimensions: + - name: pod_ready + - name: pod_scheduled + - name: pod_initialized + - name: containers_ready + - name: k8s_state.pod_phase + description: Phase + unit: state + chart_type: line + dimensions: + - name: running + - name: failed + - name: succeeded + - name: pending + - name: k8s_state.pod_age + description: Age + unit: seconds + chart_type: line + dimensions: + - name: age + - name: k8s_state.pod_containers + description: Containers + unit: containers + chart_type: line + dimensions: + - name: containers + - name: init_containers + - name: k8s_state.pod_containers_state + description: Containers state + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: waiting + - name: terminated + - name: k8s_state.pod_init_containers_state + description: Init containers state + unit: containers + chart_type: stacked + dimensions: + - name: running + - name: waiting + - name: terminated + - name: container + description: These metrics refer to the Pod container. + labels: + - name: k8s_cluster_id + description: Cluster ID. This is equal to the kube-system namespace UID. + - name: k8s_cluster_name + description: Cluster name. Cluster name discovery only works in GKE. + - name: k8s_node_name + description: Node name. + - name: k8s_namespace + description: Namespace. + - name: k8s_controller_kind + description: Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). + - name: k8s_controller_name + description: Controller name. + - name: k8s_pod_name + description: Pod name. + - name: k8s_qos_class + description: Pod QOS class (burstable, guaranteed, besteffort). + - name: k8s_container_name + description: Container name. + metrics: + - name: k8s_state.pod_container_readiness_state + description: Readiness state + unit: state + chart_type: line + dimensions: + - name: ready + - name: k8s_state.pod_container_restarts + description: Restarts + unit: restarts + chart_type: line + dimensions: + - name: restarts + - name: k8s_state.pod_container_state + description: Container state + unit: state + chart_type: line + dimensions: + - name: running + - name: waiting + - name: terminated + - name: k8s_state.pod_container_waiting_state_reason + description: Container waiting state reason + unit: state + chart_type: line + dimensions: + - name: a dimension per reason + - name: k8s_state.pod_container_terminated_state_reason + description: Container terminated state reason + unit: state + chart_type: line + dimensions: + - name: a dimension per reason diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/resource.go b/src/go/collectors/go.d.plugin/modules/k8s_state/resource.go new file mode 100644 index 00000000000000..cabd41a67e84d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/resource.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +type resource interface { + source() string + kind() kubeResourceKind + value() interface{} +} + +type kubeResourceKind uint8 + +const ( + kubeResourceNode kubeResourceKind = iota + 1 + kubeResourcePod +) + +func toNode(i interface{}) (*corev1.Node, error) { + switch v := i.(type) { + case *corev1.Node: + return v, nil + case resource: + return toNode(v.value()) + default: + return nil, fmt.Errorf("unexpected type: %T (expected %T or %T)", v, &corev1.Node{}, resource(nil)) + } +} + +func toPod(i interface{}) (*corev1.Pod, error) { + switch v := i.(type) { + case *corev1.Pod: + return v, nil + case resource: + return toPod(v.value()) + default: + return nil, fmt.Errorf("unexpected type: %T (expected %T or %T)", v, &corev1.Pod{}, resource(nil)) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/state.go b/src/go/collectors/go.d.plugin/modules/k8s_state/state.go new file mode 100644 index 00000000000000..1d39df10e9fdf3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/state.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "sync" + "time" + + corev1 "k8s.io/api/core/v1" +) + +func newKubeState() *kubeState { + return &kubeState{ + Mutex: &sync.Mutex{}, + nodes: make(map[string]*nodeState), + pods: make(map[string]*podState), + } +} + +func newNodeState() *nodeState { + return &nodeState{ + new: true, + labels: make(map[string]string), + conditions: make(map[string]*nodeStateCondition), + } +} + +func newPodState() *podState { + return &podState{ + new: true, + labels: make(map[string]string), + initContainers: make(map[string]*containerState), + containers: make(map[string]*containerState), + } +} + +func newContainerState() *containerState { + return &containerState{ + new: true, + stateWaitingReasons: make(map[string]*containerStateReason), + stateTerminatedReasons: make(map[string]*containerStateReason), + } +} + +type kubeState struct { + *sync.Mutex + nodes map[string]*nodeState + pods map[string]*podState +} + +type ( + nodeState struct { + new bool + deleted bool + + name string + unSchedulable bool + labels map[string]string + creationTime time.Time + allocatableCPU int64 + allocatableMem int64 + allocatablePods int64 + conditions map[string]*nodeStateCondition + + stats nodeStateStats + } + nodeStateCondition struct { + new bool + // https://kubernetes.io/docs/concepts/architecture/nodes/#condition + //typ corev1.NodeConditionType + status corev1.ConditionStatus + } + nodeStateStats struct { + reqCPU int64 + limitCPU int64 + reqMem int64 + limitMem int64 + pods int64 + + podsCondPodReady int64 + podsCondPodScheduled int64 + podsCondPodInitialized int64 + podsCondContainersReady int64 + + podsReadinessReady int64 + podsReadinessUnready int64 + + podsPhaseRunning int64 + podsPhaseFailed int64 + podsPhaseSucceeded int64 + podsPhasePending int64 + + containers int64 + initContainers int64 + initContStateRunning int64 + initContStateWaiting int64 + initContStateTerminated int64 + contStateRunning int64 + contStateWaiting int64 + contStateTerminated int64 + } +) + +func (ns nodeState) id() string { return ns.name } +func (ns *nodeState) resetStats() { ns.stats = nodeStateStats{} } + +type ( + podState struct { + new bool + deleted bool + unscheduled bool + + name string + nodeName string + namespace string + uid string + labels map[string]string + controllerKind string + controllerName string + qosClass string + creationTime time.Time + reqCPU int64 + reqMem int64 + limitCPU int64 + limitMem int64 + // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions + condPodScheduled corev1.ConditionStatus + condContainersReady corev1.ConditionStatus + condPodInitialized corev1.ConditionStatus + condPodReady corev1.ConditionStatus + // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + phase corev1.PodPhase + + initContainers map[string]*containerState + containers map[string]*containerState + } +) + +func (ps podState) id() string { return ps.namespace + "_" + ps.name } + +type ( + containerState struct { + new bool + + name string + uid string + + podName string + nodeName string + namespace string + + ready bool + restarts int64 + stateRunning bool + stateWaiting bool + stateTerminated bool + stateWaitingReasons map[string]*containerStateReason + stateTerminatedReasons map[string]*containerStateReason + } + containerStateReason struct { + new bool + reason string + active bool + } +) diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/update_node_state.go b/src/go/collectors/go.d.plugin/modules/k8s_state/update_node_state.go new file mode 100644 index 00000000000000..80f5c26c841445 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/update_node_state.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +func (ks *KubeState) updateNodeState(r resource) { + if r.value() == nil { + if ns, ok := ks.state.nodes[r.source()]; ok { + ns.deleted = true + } + return + } + + node, err := toNode(r) + if err != nil { + ks.Warning(err) + return + } + + if myNodeName != "" && node.Name != myNodeName { + return + } + + ns, ok := ks.state.nodes[r.source()] + if !ok { + ns = newNodeState() + ks.state.nodes[r.source()] = ns + } + + if !ok { + ns.name = node.Name + ns.creationTime = node.CreationTimestamp.Time + ns.allocatableCPU = int64(node.Status.Allocatable.Cpu().AsApproximateFloat64() * 1000) + ns.allocatableMem = node.Status.Allocatable.Memory().Value() + ns.allocatablePods = node.Status.Allocatable.Pods().Value() + copyLabels(ns.labels, node.Labels) + } + + ns.unSchedulable = node.Spec.Unschedulable + + for _, c := range node.Status.Conditions { + if v, ok := ns.conditions[string(c.Type)]; !ok { + ns.conditions[string(c.Type)] = &nodeStateCondition{new: true, status: c.Status} + } else { + v.status = c.Status + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/update_pod_state.go b/src/go/collectors/go.d.plugin/modules/k8s_state/update_pod_state.go new file mode 100644 index 00000000000000..22ef0f7fcb6791 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/update_pod_state.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" +) + +func (ks *KubeState) updatePodState(r resource) { + if r.value() == nil { + if ps, ok := ks.state.pods[r.source()]; ok { + ps.deleted = true + } + return + } + + pod, err := toPod(r) + if err != nil { + ks.Warning(err) + return + } + + ps, ok := ks.state.pods[r.source()] + if !ok { + ps = newPodState() + ks.state.pods[r.source()] = ps + } + + if !ok { + ps.name = pod.Name + ps.nodeName = pod.Spec.NodeName + ps.namespace = pod.Namespace + ps.creationTime = pod.CreationTimestamp.Time + ps.uid = string(pod.UID) + ps.qosClass = strings.ToLower(string(pod.Status.QOSClass)) + copyLabels(ps.labels, pod.Labels) + for _, ref := range pod.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + ps.controllerKind = ref.Kind + ps.controllerName = ref.Name + } + } + var res struct{ rCPU, lCPU, rMem, lMem, irCPU, ilCPU, irMem, ilMem int64 } + for _, cntr := range pod.Spec.Containers { + res.rCPU += int64(cntr.Resources.Requests.Cpu().AsApproximateFloat64() * 1000) + res.lCPU += int64(cntr.Resources.Limits.Cpu().AsApproximateFloat64() * 1000) + res.rMem += cntr.Resources.Requests.Memory().Value() + res.lMem += cntr.Resources.Limits.Memory().Value() + } + for _, cntr := range pod.Spec.InitContainers { + res.irCPU += int64(cntr.Resources.Requests.Cpu().AsApproximateFloat64() * 1000) + res.ilCPU += int64(cntr.Resources.Limits.Cpu().AsApproximateFloat64() * 1000) + res.irMem += cntr.Resources.Requests.Memory().Value() + res.ilMem += cntr.Resources.Limits.Memory().Value() + } + ps.reqCPU = max(res.rCPU, res.irCPU) + ps.limitCPU = max(res.lCPU, res.ilCPU) + ps.reqMem = max(res.rMem, res.irMem) + ps.limitMem = max(res.lMem, res.ilMem) + } + if ps.nodeName == "" { + ps.nodeName = pod.Spec.NodeName + } + + for _, c := range pod.Status.Conditions { + switch c.Type { + case corev1.ContainersReady: + ps.condContainersReady = c.Status + case corev1.PodInitialized: + ps.condPodInitialized = c.Status + case corev1.PodReady: + ps.condPodReady = c.Status + case corev1.PodScheduled: + ps.condPodScheduled = c.Status + } + } + + ps.phase = pod.Status.Phase + + for _, cs := range ps.containers { + for _, r := range cs.stateWaitingReasons { + r.active = false + } + for _, r := range cs.stateTerminatedReasons { + r.active = false + } + } + + for _, cntr := range pod.Status.ContainerStatuses { + cs, ok := ps.containers[cntr.Name] + if !ok { + cs = newContainerState() + ps.containers[cntr.Name] = cs + } + if !ok { + cs.name = cntr.Name + cs.podName = pod.Name + cs.namespace = pod.Namespace + cs.nodeName = pod.Spec.NodeName + cs.uid = extractContainerID(cntr.ContainerID) + } + cs.ready = cntr.Ready + cs.restarts = int64(cntr.RestartCount) + cs.stateRunning = cntr.State.Running != nil + cs.stateWaiting = cntr.State.Waiting != nil + cs.stateTerminated = cntr.State.Terminated != nil + + if cntr.State.Waiting != nil { + reason := cntr.State.Waiting.Reason + r, ok := cs.stateWaitingReasons[reason] + if !ok { + r = &containerStateReason{new: true, reason: reason} + cs.stateWaitingReasons[reason] = r + } + r.active = true + } + + if cntr.State.Terminated != nil { + reason := cntr.State.Terminated.Reason + r, ok := cs.stateTerminatedReasons[reason] + if !ok { + r = &containerStateReason{new: true, reason: reason} + cs.stateTerminatedReasons[reason] = r + } + r.active = true + } + } + + for _, cntr := range pod.Status.InitContainerStatuses { + cs, ok := ps.initContainers[cntr.Name] + if !ok { + cs = newContainerState() + ps.initContainers[cntr.Name] = cs + } + if !ok { + cs.name = cntr.Name + cs.podName = pod.Name + cs.namespace = pod.Namespace + cs.nodeName = pod.Spec.NodeName + cs.uid = extractContainerID(cntr.ContainerID) + } + cs.ready = cntr.Ready + cs.restarts = int64(cntr.RestartCount) + cs.stateRunning = cntr.State.Running != nil + cs.stateWaiting = cntr.State.Waiting != nil + cs.stateTerminated = cntr.State.Terminated != nil + } +} + +func max(a, b int64) int64 { + if a < b { + return b + } + return a +} + +func extractContainerID(id string) string { + // docker://d98... + if i := strings.LastIndexByte(id, '/'); i != -1 { + id = id[i+1:] + } + return id +} diff --git a/src/go/collectors/go.d.plugin/modules/k8s_state/update_state.go b/src/go/collectors/go.d.plugin/modules/k8s_state/update_state.go new file mode 100644 index 00000000000000..88f3272c109b45 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/k8s_state/update_state.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_state + +func (ks *KubeState) runUpdateState(in <-chan resource) { + for { + select { + case <-ks.ctx.Done(): + return + case r := <-in: + ks.state.Lock() + switch r.kind() { + case kubeResourceNode: + ks.updateNodeState(r) + case kubeResourcePod: + ks.updatePodState(r) + } + ks.state.Unlock() + } + } +} + +func copyLabels(dst, src map[string]string) { + for k, v := range src { + dst[k] = v + } +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/README.md b/src/go/collectors/go.d.plugin/modules/lighttpd/README.md new file mode 120000 index 00000000000000..b0d3613bf901d5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/README.md @@ -0,0 +1 @@ +integrations/lighttpd.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/apiclient.go b/src/go/collectors/go.d.plugin/modules/lighttpd/apiclient.go new file mode 100644 index 00000000000000..5e65f1f4d735e5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/apiclient.go @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "bufio" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + busyWorkers = "BusyWorkers" + idleWorkers = "IdleWorkers" + + busyServers = "BusyServers" + idleServers = "IdleServers" + totalAccesses = "Total Accesses" + totalkBytes = "Total kBytes" + uptime = "Uptime" + scoreBoard = "Scoreboard" +) + +func newAPIClient(client *http.Client, request web.Request) *apiClient { + return &apiClient{httpClient: client, request: request} +} + +type apiClient struct { + httpClient *http.Client + request web.Request +} + +func (a apiClient) getServerStatus() (*serverStatus, error) { + req, err := web.NewHTTPRequest(a.request) + + if err != nil { + return nil, fmt.Errorf("error on creating request : %v", err) + } + + resp, err := a.doRequestOK(req) + + defer closeBody(resp) + + if err != nil { + return nil, err + } + + status, err := parseResponse(resp.Body) + + if err != nil { + return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return status, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error on request : %v", err) + } + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + return resp, nil +} + +func parseResponse(r io.Reader) (*serverStatus, error) { + s := bufio.NewScanner(r) + var status serverStatus + + for s.Scan() { + parts := strings.Split(s.Text(), ":") + if len(parts) != 2 { + continue + } + key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + + switch key { + default: + case busyWorkers, idleWorkers: + return nil, fmt.Errorf("found '%s', apache data", key) + case busyServers: + status.Servers.Busy = mustParseInt(value) + case idleServers: + status.Servers.Idle = mustParseInt(value) + case totalAccesses: + status.Total.Accesses = mustParseInt(value) + case totalkBytes: + status.Total.KBytes = mustParseInt(value) + case uptime: + status.Uptime = mustParseInt(value) + case scoreBoard: + status.Scoreboard = parseScoreboard(value) + } + } + + return &status, nil +} + +func parseScoreboard(value string) *scoreboard { + // Descriptions from https://blog.serverdensity.com/monitor-lighttpd/ + // + // “.” = Opening the TCP connection (connect) + // “C” = Closing the TCP connection if no other HTTP request will use it (close) + // “E” = hard error + // “k” = Keeping the TCP connection open for more HTTP requests from the same client to avoid the TCP handling overhead (keep-alive) + // “r” = ReadAsMap the content of the HTTP request (read) + // “R” = ReadAsMap the content of the HTTP request (read-POST) + // “W” = Write the HTTP response to the socket (write) + // “h” = Decide action to take with the request (handle-request) + // “q” = Start of HTTP request (request-start) + // “Q” = End of HTTP request (request-end) + // “s” = Start of the HTTP request response (response-start) + // “S” = End of the HTTP request response (response-end) + // “_” Waiting for Connection (NOTE: not sure, copied the description from apache score board) + + var sb scoreboard + for _, s := range strings.Split(value, "") { + switch s { + case "_": + sb.Waiting++ + case ".": + sb.Open++ + case "C": + sb.Close++ + case "E": + sb.HardError++ + case "k": + sb.KeepAlive++ + case "r": + sb.Read++ + case "R": + sb.ReadPost++ + case "W": + sb.Write++ + case "h": + sb.HandleRequest++ + case "q": + sb.RequestStart++ + case "Q": + sb.RequestEnd++ + case "s": + sb.ResponseStart++ + case "S": + sb.ResponseEnd++ + } + } + + return &sb +} + +func mustParseInt(value string) *int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + return &v +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/charts.go b/src/go/collectors/go.d.plugin/modules/lighttpd/charts.go new file mode 100644 index 00000000000000..bacea70a171a73 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/charts.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "requests", + Title: "Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "lighttpd.requests", + Dims: Dims{ + {ID: "total_accesses", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "net", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "lighttpd.net", + Type: module.Area, + Dims: Dims{ + {ID: "total_kBytes", Name: "sent", Algo: module.Incremental, Mul: 8}, + }, + }, + { + ID: "servers", + Title: "Servers", + Units: "servers", + Fam: "servers", + Ctx: "lighttpd.workers", + Type: module.Stacked, + Dims: Dims{ + {ID: "idle_servers", Name: "idle"}, + {ID: "busy_servers", Name: "busy"}, + }, + }, + { + ID: "scoreboard", + Title: "ScoreBoard", + Units: "connections", + Fam: "connections", + Ctx: "lighttpd.scoreboard", + Dims: Dims{ + {ID: "scoreboard_waiting", Name: "waiting"}, + {ID: "scoreboard_open", Name: "open"}, + {ID: "scoreboard_close", Name: "close"}, + {ID: "scoreboard_hard_error", Name: "hard error"}, + {ID: "scoreboard_keepalive", Name: "keepalive"}, + {ID: "scoreboard_read", Name: "read"}, + {ID: "scoreboard_read_post", Name: "read post"}, + {ID: "scoreboard_write", Name: "write"}, + {ID: "scoreboard_handle_request", Name: "handle request"}, + {ID: "scoreboard_request_start", Name: "request start"}, + {ID: "scoreboard_request_end", Name: "request end"}, + {ID: "scoreboard_response_start", Name: "response start"}, + {ID: "scoreboard_response_end", Name: "response end"}, + }, + }, + { + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "lighttpd.uptime", + Dims: Dims{ + {ID: "uptime"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/collect.go b/src/go/collectors/go.d.plugin/modules/lighttpd/collect.go new file mode 100644 index 00000000000000..609a701059b8d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/collect.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (l *Lighttpd) collect() (map[string]int64, error) { + status, err := l.apiClient.getServerStatus() + + if err != nil { + return nil, err + } + + mx := stm.ToMap(status) + + if len(mx) == 0 { + return nil, fmt.Errorf("nothing was collected from %s", l.URL) + } + + return mx, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/config_schema.json b/src/go/collectors/go.d.plugin/modules/lighttpd/config_schema.json new file mode 100644 index 00000000000000..c1b51d0658c308 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/lighttpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/integrations/lighttpd.md b/src/go/collectors/go.d.plugin/modules/lighttpd/integrations/lighttpd.md new file mode 100644 index 00000000000000..091010ebbc66a4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/integrations/lighttpd.md @@ -0,0 +1,231 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/lighttpd/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/lighttpd/metadata.yaml" +sidebar_label: "Lighttpd" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Lighttpd + + +<img src="https://netdata.cloud/img/lighttpd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: lighttpd + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more. + + +It sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), +which is a built-in location that provides metrics about the Lighttpd server. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Lighttpd instances running on localhost that are listening on port 80. +On startup, it tries to collect metrics from: + +- http://localhost/server-status?auto +- http://127.0.0.1/server-status?auto + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Lighttpd instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| lighttpd.requests | requests | requests/s | +| lighttpd.net | sent | kilobits/s | +| lighttpd.workers | idle, busy | servers | +| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections | +| lighttpd.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable Lighttpd status support + +To enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/lighttpd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/lighttpd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/server-status?auto | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Lighttpd with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1/server-status?auto + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + + - name: remote + url: http://192.0.2.1/server-status?auto + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m lighttpd + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd.go b/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd.go new file mode 100644 index 00000000000000..2f98a96bf92a8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + _ "embed" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("lighttpd", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1/server-status?auto" + defaultHTTPTimeout = time.Second * 2 +) + +// New creates Lighttpd with default values. +func New() *Lighttpd { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &Lighttpd{Config: config} +} + +// Config is the Lighttpd module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Lighttpd struct { + module.Base + Config `yaml:",inline"` + apiClient *apiClient +} + +// Cleanup makes cleanup. +func (Lighttpd) Cleanup() {} + +// Init makes initialization. +func (l *Lighttpd) Init() bool { + if l.URL == "" { + l.Error("URL not set") + return false + } + + if !strings.HasSuffix(l.URL, "?auto") { + l.Errorf("bad URL '%s', should ends in '?auto'", l.URL) + return false + } + + client, err := web.NewHTTPClient(l.Client) + if err != nil { + l.Errorf("error on creating http client : %v", err) + return false + } + l.apiClient = newAPIClient(client, l.Request) + + l.Debugf("using URL %s", l.URL) + l.Debugf("using timeout: %s", l.Timeout.Duration) + + return true +} + +// Check makes check +func (l *Lighttpd) Check() bool { return len(l.Collect()) > 0 } + +// Charts returns Charts. +func (l Lighttpd) Charts() *Charts { return charts.Copy() } + +// Collect collects metrics. +func (l *Lighttpd) Collect() map[string]int64 { + mx, err := l.collect() + + if err != nil { + l.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd_test.go b/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd_test.go new file mode 100644 index 00000000000000..e6a7b016e591e6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/lighttpd_test.go @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testStatusData, _ = os.ReadFile("testdata/status.txt") + testApacheStatusData, _ = os.ReadFile("testdata/apache-status.txt") +) + +func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() } + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestLighttpd_Init(t *testing.T) { + job := New() + + require.True(t, job.Init()) + assert.NotNil(t, job.apiClient) +} + +func TestLighttpd_InitNG(t *testing.T) { + job := New() + + job.URL = "" + assert.False(t, job.Init()) +} + +func TestLighttpd_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/server-status?auto" + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestLighttpd_CheckNG(t *testing.T) { + job := New() + + job.URL = "http://127.0.0.1:38001/server-status?auto" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestLighttpd_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestLighttpd_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/server-status?auto" + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "scoreboard_waiting": 125, + "scoreboard_request_end": 0, + "busy_servers": 3, + "scoreboard_keepalive": 1, + "scoreboard_read": 1, + "scoreboard_request_start": 0, + "scoreboard_response_start": 0, + "scoreboard_close": 0, + "scoreboard_open": 0, + "scoreboard_hard_error": 0, + "scoreboard_handle_request": 1, + "idle_servers": 125, + "total_kBytes": 4, + "uptime": 11, + "scoreboard_read_post": 0, + "scoreboard_write": 0, + "scoreboard_response_end": 0, + "total_accesses": 12, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestLighttpd_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/server-status?auto" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestLighttpd_ApacheData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testApacheStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/server-status?auto" + require.True(t, job.Init()) + require.False(t, job.Check()) +} + +func TestLighttpd_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/server-status?auto" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/metadata.yaml b/src/go/collectors/go.d.plugin/modules/lighttpd/metadata.yaml new file mode 100644 index 00000000000000..a90ac05edcc937 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/metadata.yaml @@ -0,0 +1,231 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-lighttpd + plugin_name: go.d.plugin + module_name: lighttpd + monitored_instance: + name: Lighttpd + link: https://www.lighttpd.net/ + icon_filename: lighttpd.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - webserver + related_resources: + integrations: + list: + - plugin_name: go.d.plugin + module_name: weblog + - plugin_name: go.d.plugin + module_name: httpcheck + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more. + method_description: | + It sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status), + which is a built-in location that provides metrics about the Lighttpd server. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Lighttpd instances running on localhost that are listening on port 80. + On startup, it tries to collect metrics from: + + - http://localhost/server-status?auto + - http://127.0.0.1/server-status?auto + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable Lighttpd status support + description: | + To enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status). + configuration: + file: + name: go.d/lighttpd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/server-status?auto + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + - name: HTTPS with self-signed certificate + description: Lighttpd with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1/server-status?auto + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + + - name: remote + url: http://192.0.2.1/server-status?auto + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: lighttpd.requests + description: Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: lighttpd.net + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: sent + - name: lighttpd.workers + description: Servers + unit: servers + chart_type: stacked + dimensions: + - name: idle + - name: busy + - name: lighttpd.scoreboard + description: ScoreBoard + unit: connections + chart_type: line + dimensions: + - name: waiting + - name: open + - name: close + - name: hard_error + - name: keepalive + - name: read + - name: read_post + - name: write + - name: handle_request + - name: request_start + - name: request_end + - name: lighttpd.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/metrics.go b/src/go/collectors/go.d.plugin/modules/lighttpd/metrics.go new file mode 100644 index 00000000000000..6c39d2d06ac450 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/metrics.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +type ( + serverStatus struct { + Total struct { + Accesses *int64 `stm:"accesses"` + KBytes *int64 `stm:"kBytes"` + } `stm:"total"` + Servers struct { + Busy *int64 `stm:"busy_servers"` + Idle *int64 `stm:"idle_servers"` + } `stm:""` + Uptime *int64 `stm:"uptime"` + Scoreboard *scoreboard `stm:"scoreboard"` + } + scoreboard struct { + Waiting int64 `stm:"waiting"` + Open int64 `stm:"open"` + Close int64 `stm:"close"` + HardError int64 `stm:"hard_error"` + KeepAlive int64 `stm:"keepalive"` + Read int64 `stm:"read"` + ReadPost int64 `stm:"read_post"` + Write int64 `stm:"write"` + HandleRequest int64 `stm:"handle_request"` + RequestStart int64 `stm:"request_start"` + RequestEnd int64 `stm:"request_end"` + ResponseStart int64 `stm:"response_start"` + ResponseEnd int64 `stm:"response_end"` + } +) diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/apache-status.txt b/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/apache-status.txt new file mode 100644 index 00000000000000..136b69363c8a98 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/apache-status.txt @@ -0,0 +1,39 @@ +127.0.0.1 +ServerVersion: Apache/2.4.37 (Unix) +ServerMPM: event +Server Built: Oct 23 2018 18:27:46 +CurrentTime: Sunday, 13-Jan-2019 20:39:30 MSK +RestartTime: Sunday, 13-Jan-2019 20:35:13 MSK +ParentServerConfigGeneration: 1 +ParentServerMPMGeneration: 0 +ServerUptimeSeconds: 256 +ServerUptime: 4 minutes 16 seconds +Load1: 1.02 +Load5: 1.30 +Load15: 1.41 +Total Accesses: 9 +Total kBytes: 12 +Total Duration: 1 +CPUUser: 0 +CPUSystem: .01 +CPUChildrenUser: 0 +CPUChildrenSystem: 0 +CPULoad: .00390625 +Uptime: 256 +ReqPerSec: .0351563 +BytesPerSec: 48 +BytesPerReq: 1365.33 +DurationPerReq: .111111 +BusyWorkers: 1 +IdleWorkers: 99 +Processes: 4 +Stopping: 0 +BusyWorkers: 1 +IdleWorkers: 99 +ConnsTotal: 0 +ConnsAsyncWriting: 0 +ConnsAsyncKeepAlive: 0 +ConnsAsyncClosing: 0 +Scoreboard: ____________________________________________________________W_______________________________________............................................................................................................................................................................................................................................................................................................ +Using GnuTLS version: 3.6.5 +Built against GnuTLS version: 3.5.19 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/status.txt b/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/status.txt new file mode 100644 index 00000000000000..07d8e06e83618c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/lighttpd/testdata/status.txt @@ -0,0 +1,6 @@ +Total Accesses: 12 +Total kBytes: 4 +Uptime: 11 +BusyServers: 3 +IdleServers: 125 +Scoreboard: khr_____________________________________________________________________________________________________________________________ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/logind/README.md b/src/go/collectors/go.d.plugin/modules/logind/README.md new file mode 120000 index 00000000000000..22c20d70504725 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/README.md @@ -0,0 +1 @@ +integrations/systemd-logind_users.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/logind/charts.go b/src/go/collectors/go.d.plugin/modules/logind/charts.go new file mode 100644 index 00000000000000..a7ba42ff15d927 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/charts.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package logind + +import "github.com/netdata/go.d.plugin/agent/module" + +const ( + prioSessions = module.Priority + iota + prioSessionsType + prioSessionsState + prioUsersState +) + +var charts = module.Charts{ + sessionsChart.Copy(), + sessionsTypeChart.Copy(), + sessionsStateChart.Copy(), + usersStateChart.Copy(), +} + +var sessionsChart = module.Chart{ + ID: "sessions", + Title: "Logind Sessions", + Units: "sessions", + Fam: "sessions", + Ctx: "logind.sessions", + Priority: prioSessions, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "sessions_remote", Name: "remote"}, + {ID: "sessions_local", Name: "local"}, + }, +} + +var sessionsTypeChart = module.Chart{ + ID: "sessions_type", + Title: "Logind Sessions By Type", + Units: "sessions", + Fam: "sessions", + Ctx: "logind.sessions_type", + Priority: prioSessionsType, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "sessions_type_console", Name: "console"}, + {ID: "sessions_type_graphical", Name: "graphical"}, + {ID: "sessions_type_other", Name: "other"}, + }, +} + +var sessionsStateChart = module.Chart{ + ID: "sessions_state", + Title: "Logind Sessions By State", + Units: "sessions", + Fam: "sessions", + Ctx: "logind.sessions_state", + Priority: prioSessionsState, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "sessions_state_online", Name: "online"}, + {ID: "sessions_state_closing", Name: "closing"}, + {ID: "sessions_state_active", Name: "active"}, + }, +} + +var usersStateChart = module.Chart{ + ID: "users_state", + Title: "Logind Users By State", + Units: "users", + Fam: "users", + Ctx: "logind.users_state", + Priority: prioUsersState, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "users_state_offline", Name: "offline"}, + {ID: "users_state_closing", Name: "closing"}, + {ID: "users_state_online", Name: "online"}, + {ID: "users_state_lingering", Name: "lingering"}, + {ID: "users_state_active", Name: "active"}, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/collect.go b/src/go/collectors/go.d.plugin/modules/logind/collect.go new file mode 100644 index 00000000000000..1f22478b183eba --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/collect.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package logind + +import ( + "strings" +) + +func (l *Logind) collect() (map[string]int64, error) { + if l.conn == nil { + conn, err := l.newLogindConn(l.Config) + if err != nil { + return nil, err + } + l.conn = conn + } + + mx := make(map[string]int64) + + // https://www.freedesktop.org/wiki/Software/systemd/logind/ (Session Objects) + if err := l.collectSessions(mx); err != nil { + return nil, err + } + // https://www.freedesktop.org/wiki/Software/systemd/logind/ (User Objects) + if err := l.collectUsers(mx); err != nil { + return nil, err + } + + return mx, nil +} + +func (l *Logind) collectSessions(mx map[string]int64) error { + sessions, err := l.conn.ListSessions() + if err != nil { + return err + } + + mx["sessions_remote"] = 0 + mx["sessions_local"] = 0 + mx["sessions_type_graphical"] = 0 + mx["sessions_type_console"] = 0 + mx["sessions_type_other"] = 0 + mx["sessions_state_online"] = 0 + mx["sessions_state_active"] = 0 + mx["sessions_state_closing"] = 0 + + for _, session := range sessions { + props, err := l.conn.GetSessionProperties(session.Path) + if err != nil { + return err + } + + if v, ok := props["Remote"]; ok && v.String() == "true" { + mx["sessions_remote"]++ + } else { + mx["sessions_local"]++ + } + + if v, ok := props["Type"]; ok { + typ := strings.Trim(v.String(), "\"") + switch typ { + case "x11", "mir", "wayland": + mx["sessions_type_graphical"]++ + case "tty": + mx["sessions_type_console"]++ + case "unspecified": + mx["sessions_type_other"]++ + default: + l.Debugf("unknown session type '%s' for session '%s/%s'", typ, session.User, session.ID) + mx["sessions_type_other"]++ + } + } + + if v, ok := props["State"]; ok { + state := strings.Trim(v.String(), "\"") + switch state { + case "online": + mx["sessions_state_online"]++ + case "active": + mx["sessions_state_active"]++ + case "closing": + mx["sessions_state_closing"]++ + default: + l.Debugf("unknown session state '%s' for session '%s/%s'", state, session.User, session.ID) + } + } + } + return nil +} + +func (l *Logind) collectUsers(mx map[string]int64) error { + users, err := l.conn.ListUsers() + if err != nil { + return err + } + + // https://www.freedesktop.org/software/systemd/man/sd_uid_get_state.html + mx["users_state_offline"] = 0 + mx["users_state_lingering"] = 0 + mx["users_state_online"] = 0 + mx["users_state_active"] = 0 + mx["users_state_closing"] = 0 + + for _, user := range users { + v, err := l.conn.GetUserProperty(user.Path, "State") + if err != nil { + return err + } + + state := strings.Trim(v.String(), "\"") + switch state { + case "offline": + mx["users_state_offline"]++ + case "lingering": + mx["users_state_lingering"]++ + case "online": + mx["users_state_online"]++ + case "active": + mx["users_state_active"]++ + case "closing": + mx["users_state_closing"]++ + default: + l.Debugf("unknown user state '%s' for user '%s/%d'", state, user.Name, user.UID) + } + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/config_schema.json b/src/go/collectors/go.d.plugin/modules/logind/config_schema.json new file mode 100644 index 00000000000000..b7ad53e9a19adc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/config_schema.json @@ -0,0 +1,19 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/logind job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/connection.go b/src/go/collectors/go.d.plugin/modules/logind/connection.go new file mode 100644 index 00000000000000..b97387acf06a0e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/connection.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package logind + +import ( + "context" + "time" + + "github.com/coreos/go-systemd/v22/login1" + "github.com/godbus/dbus/v5" +) + +type logindConnection interface { + Close() + + ListSessions() ([]login1.Session, error) + GetSessionProperties(dbus.ObjectPath) (map[string]dbus.Variant, error) + + ListUsers() ([]login1.User, error) + GetUserProperty(dbus.ObjectPath, string) (*dbus.Variant, error) +} + +func newLogindConnection(timeout time.Duration) (logindConnection, error) { + conn, err := login1.New() + if err != nil { + return nil, err + } + return &logindDBusConnection{ + conn: conn, + timeout: timeout, + }, nil +} + +type logindDBusConnection struct { + conn *login1.Conn + timeout time.Duration +} + +func (c *logindDBusConnection) Close() { + if c.conn != nil { + c.conn.Close() + c.conn = nil + } +} + +func (c *logindDBusConnection) ListSessions() ([]login1.Session, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + return c.conn.ListSessionsContext(ctx) +} + +func (c *logindDBusConnection) ListUsers() ([]login1.User, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + return c.conn.ListUsersContext(ctx) +} + +func (c *logindDBusConnection) GetSessionProperties(path dbus.ObjectPath) (map[string]dbus.Variant, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + return c.conn.GetSessionPropertiesContext(ctx, path) +} + +func (c *logindDBusConnection) GetUserProperty(path dbus.ObjectPath, property string) (*dbus.Variant, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.timeout) + defer cancel() + + return c.conn.GetUserPropertyContext(ctx, path, property) +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/doc.go b/src/go/collectors/go.d.plugin/modules/logind/doc.go new file mode 100644 index 00000000000000..90aa8b4ef38551 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/doc.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logind diff --git a/src/go/collectors/go.d.plugin/modules/logind/integrations/systemd-logind_users.md b/src/go/collectors/go.d.plugin/modules/logind/integrations/systemd-logind_users.md new file mode 100644 index 00000000000000..a22a42a695f230 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/integrations/systemd-logind_users.md @@ -0,0 +1,135 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/logind/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/logind/metadata.yaml" +sidebar_label: "systemd-logind users" +learn_status: "Published" +learn_rel_path: "Data Collection/Systemd" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# systemd-logind users + + +<img src="https://netdata.cloud/img/users.svg" width="150"/> + + +Plugin: go.d.plugin +Module: logind + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per systemd-logind users instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| logind.sessions | remote, local | sessions | +| logind.sessions_type | console, graphical, other | sessions | +| logind.sessions_state | online, closing, active | sessions | +| logind.users_state | offline, closing, online, lingering, active | users | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/logind.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/logind.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m logind + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/logind/logind.go b/src/go/collectors/go.d.plugin/modules/logind/logind.go new file mode 100644 index 00000000000000..456217e9f64bf7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/logind.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package logind + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("logind", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + Priority: 59999, // copied from the python collector + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Logind { + return &Logind{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 2}, + }, + newLogindConn: func(cfg Config) (logindConnection, error) { + return newLogindConnection(cfg.Timeout.Duration) + }, + charts: charts.Copy(), + } +} + +type Config struct { + Timeout web.Duration `yaml:"timeout"` +} + +type Logind struct { + module.Base + Config `yaml:",inline"` + + newLogindConn func(config Config) (logindConnection, error) + conn logindConnection + charts *module.Charts +} + +func (l *Logind) Init() bool { + return true +} + +func (l *Logind) Check() bool { + return len(l.Collect()) > 0 +} + +func (l *Logind) Charts() *module.Charts { + return l.charts +} + +func (l *Logind) Collect() map[string]int64 { + mx, err := l.collect() + if err != nil { + l.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (l *Logind) Cleanup() { + if l.conn != nil { + l.conn.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/logind_test.go b/src/go/collectors/go.d.plugin/modules/logind/logind_test.go new file mode 100644 index 00000000000000..07b00c16843937 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/logind_test.go @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package logind + +import ( + "errors" + "testing" + + "github.com/coreos/go-systemd/v22/login1" + "github.com/godbus/dbus/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogind_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default config": { + wantFail: false, + config: New().Config, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + l := New() + l.Config = test.config + + if test.wantFail { + assert.False(t, l.Init()) + } else { + assert.True(t, l.Init()) + } + }) + } +} + +func TestLogind_Charts(t *testing.T) { + assert.Equal(t, len(charts), len(*New().Charts())) +} + +func TestLogind_Cleanup(t *testing.T) { + tests := map[string]struct { + wantClose bool + prepare func(l *Logind) + }{ + "after New": { + wantClose: false, + prepare: func(l *Logind) {}, + }, + "after Init": { + wantClose: false, + prepare: func(l *Logind) { l.Init() }, + }, + "after Check": { + wantClose: true, + prepare: func(l *Logind) { l.Init(); l.Check() }, + }, + "after Collect": { + wantClose: true, + prepare: func(l *Logind) { l.Init(); l.Collect() }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + l := New() + m := prepareConnOK() + l.newLogindConn = func(Config) (logindConnection, error) { return m, nil } + test.prepare(l) + + require.NotPanics(t, l.Cleanup) + + if test.wantClose { + assert.True(t, m.closeCalled) + } else { + assert.False(t, m.closeCalled) + } + }) + } +} + +func TestLogind_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func() *mockConn + }{ + "success when response contains sessions and users": { + wantFail: false, + prepare: prepareConnOK, + }, + "success when response does not contain sessions and users": { + wantFail: false, + prepare: prepareConnOKNoSessionsNoUsers, + }, + "fail when error on list sessions": { + wantFail: true, + prepare: prepareConnErrOnListSessions, + }, + "fail when error on get session properties": { + wantFail: true, + prepare: prepareConnErrOnGetSessionProperties, + }, + "fail when error on list users": { + wantFail: true, + prepare: prepareConnErrOnListUsers, + }, + "fail when error on get user property": { + wantFail: true, + prepare: prepareConnErrOnGetUserProperty, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + l := New() + require.True(t, l.Init()) + l.conn = test.prepare() + + if test.wantFail { + assert.False(t, l.Check()) + } else { + assert.True(t, l.Check()) + } + }) + } +} + +func TestLogind_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *mockConn + expected map[string]int64 + }{ + "success when response contains sessions and users": { + prepare: prepareConnOK, + expected: map[string]int64{ + "sessions_local": 3, + "sessions_remote": 0, + "sessions_state_active": 0, + "sessions_state_closing": 0, + "sessions_state_online": 3, + "sessions_type_console": 3, + "sessions_type_graphical": 0, + "sessions_type_other": 0, + "users_state_active": 3, + "users_state_closing": 0, + "users_state_lingering": 0, + "users_state_offline": 0, + "users_state_online": 0, + }, + }, + "success when response does not contain sessions and users": { + prepare: prepareConnOKNoSessionsNoUsers, + expected: map[string]int64{ + "sessions_local": 0, + "sessions_remote": 0, + "sessions_state_active": 0, + "sessions_state_closing": 0, + "sessions_state_online": 0, + "sessions_type_console": 0, + "sessions_type_graphical": 0, + "sessions_type_other": 0, + "users_state_active": 0, + "users_state_closing": 0, + "users_state_lingering": 0, + "users_state_offline": 0, + "users_state_online": 0, + }, + }, + "fail when error on list sessions": { + prepare: prepareConnErrOnListSessions, + expected: map[string]int64(nil), + }, + "fail when error on get session properties": { + prepare: prepareConnErrOnGetSessionProperties, + expected: map[string]int64(nil), + }, + "fail when error on list users": { + prepare: prepareConnErrOnListUsers, + expected: map[string]int64(nil), + }, + "fail when error on get user property": { + prepare: prepareConnErrOnGetUserProperty, + expected: map[string]int64(nil), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + l := New() + require.True(t, l.Init()) + l.conn = test.prepare() + + mx := l.Collect() + + assert.Equal(t, test.expected, mx) + }) + } +} + +func prepareConnOK() *mockConn { + return &mockConn{ + sessions: []login1.Session{ + {Path: "/org/freedesktop/login1/session/_3156", User: "user1", ID: "123"}, + {Path: "/org/freedesktop/login1/session/_3157", User: "user2", ID: "124"}, + {Path: "/org/freedesktop/login1/session/_3158", User: "user3", ID: "125"}, + }, + users: []login1.User{ + {Path: "/org/freedesktop/login1/user/_1000", Name: "user1", UID: 123}, + {Path: "/org/freedesktop/login1/user/_1001", Name: "user2", UID: 124}, + {Path: "/org/freedesktop/login1/user/_1002", Name: "user3", UID: 125}, + }, + errOnListSessions: false, + errOnGetSessionProperties: false, + errOnListUsers: false, + errOnGetUserProperty: false, + closeCalled: false, + } +} + +func prepareConnOKNoSessionsNoUsers() *mockConn { + conn := prepareConnOK() + conn.sessions = nil + conn.users = nil + return conn +} + +func prepareConnErrOnListSessions() *mockConn { + conn := prepareConnOK() + conn.errOnListSessions = true + return conn +} + +func prepareConnErrOnGetSessionProperties() *mockConn { + conn := prepareConnOK() + conn.errOnGetSessionProperties = true + return conn +} + +func prepareConnErrOnListUsers() *mockConn { + conn := prepareConnOK() + conn.errOnListUsers = true + return conn +} + +func prepareConnErrOnGetUserProperty() *mockConn { + conn := prepareConnOK() + conn.errOnGetUserProperty = true + return conn +} + +type mockConn struct { + sessions []login1.Session + users []login1.User + + errOnListSessions bool + errOnGetSessionProperties bool + errOnListUsers bool + errOnGetUserProperty bool + closeCalled bool +} + +func (m *mockConn) Close() { + m.closeCalled = true +} + +func (m *mockConn) ListSessions() ([]login1.Session, error) { + if m.errOnListSessions { + return nil, errors.New("mock.ListSessions() error") + } + return m.sessions, nil +} + +func (m *mockConn) GetSessionProperties(path dbus.ObjectPath) (map[string]dbus.Variant, error) { + if m.errOnGetSessionProperties { + return nil, errors.New("mock.GetSessionProperties() error") + } + + var found bool + for _, s := range m.sessions { + if s.Path == path { + found = true + break + } + } + + if !found { + return nil, errors.New("mock.GetUserProperty(): session is not found") + } + + return map[string]dbus.Variant{ + "Remote": dbus.MakeVariant("true"), + "Type": dbus.MakeVariant("tty"), + "State": dbus.MakeVariant("online"), + }, nil +} + +func (m *mockConn) ListUsers() ([]login1.User, error) { + if m.errOnListUsers { + return nil, errors.New("mock.ListUsers() error") + } + return m.users, nil +} + +func (m *mockConn) GetUserProperty(path dbus.ObjectPath, _ string) (*dbus.Variant, error) { + if m.errOnGetUserProperty { + return nil, errors.New("mock.GetUserProperty() error") + } + + var found bool + for _, u := range m.users { + if u.Path == path { + found = true + break + } + } + + if !found { + return nil, errors.New("mock.GetUserProperty(): user is not found") + } + + v := dbus.MakeVariant("active") + return &v, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/logind/metadata.yaml b/src/go/collectors/go.d.plugin/modules/logind/metadata.yaml new file mode 100644 index 00000000000000..792a515fe15ffc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logind/metadata.yaml @@ -0,0 +1,105 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-logind + plugin_name: go.d.plugin + module_name: logind + monitored_instance: + name: systemd-logind users + link: https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html + icon_filename: users.svg + categories: + - data-collection.systemd + keywords: + - logind + - systemd + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/logind.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: [] + examples: + folding: + title: Config + enabled: true + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: logind.sessions + description: Logind Sessions + unit: sessions + chart_type: stacked + dimensions: + - name: remote + - name: local + - name: logind.sessions_type + description: Logind Sessions By Type + unit: sessions + chart_type: stacked + dimensions: + - name: console + - name: graphical + - name: other + - name: logind.sessions_state + description: Logind Sessions By State + unit: sessions + chart_type: stacked + dimensions: + - name: online + - name: closing + - name: active + - name: logind.users_state + description: Logind Users By State + unit: users + chart_type: stacked + dimensions: + - name: offline + - name: closing + - name: online + - name: lingering + - name: active diff --git a/src/go/collectors/go.d.plugin/modules/logstash/README.md b/src/go/collectors/go.d.plugin/modules/logstash/README.md new file mode 120000 index 00000000000000..7a35ae8ff0dacc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/README.md @@ -0,0 +1 @@ +integrations/logstash.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/logstash/charts.go b/src/go/collectors/go.d.plugin/modules/logstash/charts.go new file mode 100644 index 00000000000000..c6173e3caee446 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/charts.go @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logstash + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioJVMThreads = module.Priority + iota + prioJVMMemHeapUsed + prioJVMMemHeap + prioJVMMemPoolsEden + prioJVMMemPoolsSurvivor + prioJVMMemPoolsOld + prioJVMGCCollectorCount + prioJVMGCCollectorTime + prioOpenFileDescriptors + prioEvent + prioEventDuration + prioPipelineEvent + prioPipelineEventDurations + prioUptime +) + +var charts = module.Charts{ + // thread + { + ID: "jvm_threads", + Title: "JVM Threads", + Units: "count", + Fam: "threads", + Ctx: "logstash.jvm_threads", + Priority: prioJVMThreads, + Dims: module.Dims{ + {ID: "jvm_threads_count", Name: "threads"}, + }, + }, + // memory + { + ID: "jvm_mem_heap_used", + Title: "JVM Heap Memory Percentage", + Units: "percentage", + Fam: "memory", + Ctx: "logstash.jvm_mem_heap_used", + Priority: prioJVMMemHeapUsed, + Dims: module.Dims{ + {ID: "jvm_mem_heap_used_percent", Name: "in use"}, + }, + }, + { + ID: "jvm_mem_heap", + Title: "JVM Heap Memory", + Units: "KiB", + Fam: "memory", + Ctx: "logstash.jvm_mem_heap", + Type: module.Area, + Priority: prioJVMMemHeap, + Dims: module.Dims{ + {ID: "jvm_mem_heap_committed_in_bytes", Name: "committed", Div: 1024}, + {ID: "jvm_mem_heap_used_in_bytes", Name: "used", Div: 1024}, + }, + }, + { + ID: "jvm_mem_pools_eden", + Title: "JVM Pool Eden Memory", + Units: "KiB", + Fam: "memory", + Ctx: "logstash.jvm_mem_pools_eden", + Type: module.Area, + Priority: prioJVMMemPoolsEden, + Dims: module.Dims{ + {ID: "jvm_mem_pools_eden_committed_in_bytes", Name: "committed", Div: 1024}, + {ID: "jvm_mem_pools_eden_used_in_bytes", Name: "used", Div: 1024}, + }, + }, + { + ID: "jvm_mem_pools_survivor", + Title: "JVM Pool Survivor Memory", + Units: "KiB", + Fam: "memory", + Ctx: "logstash.jvm_mem_pools_survivor", + Type: module.Area, + Priority: prioJVMMemPoolsSurvivor, + Dims: module.Dims{ + {ID: "jvm_mem_pools_survivor_committed_in_bytes", Name: "committed", Div: 1024}, + {ID: "jvm_mem_pools_survivor_used_in_bytes", Name: "used", Div: 1024}, + }, + }, + { + ID: "jvm_mem_pools_old", + Title: "JVM Pool Old Memory", + Units: "KiB", + Fam: "memory", + Ctx: "logstash.jvm_mem_pools_old", + Type: module.Area, + Priority: prioJVMMemPoolsOld, + Dims: module.Dims{ + {ID: "jvm_mem_pools_old_committed_in_bytes", Name: "committed", Div: 1024}, + {ID: "jvm_mem_pools_old_used_in_bytes", Name: "used", Div: 1024}, + }, + }, + // garbage collection + { + ID: "jvm_gc_collector_count", + Title: "Garbage Collection Count", + Units: "counts/s", + Fam: "garbage collection", + Ctx: "logstash.jvm_gc_collector_count", + Priority: prioJVMGCCollectorCount, + Dims: module.Dims{ + {ID: "jvm_gc_collectors_eden_collection_count", Name: "eden", Algo: module.Incremental}, + {ID: "jvm_gc_collectors_old_collection_count", Name: "old", Algo: module.Incremental}, + }, + }, + { + ID: "jvm_gc_collector_time", + Title: "Time Spent On Garbage Collection", + Units: "ms", + Fam: "garbage collection", + Ctx: "logstash.jvm_gc_collector_time", + Priority: prioJVMGCCollectorTime, + Dims: module.Dims{ + {ID: "jvm_gc_collectors_eden_collection_time_in_millis", Name: "eden", Algo: module.Incremental}, + {ID: "jvm_gc_collectors_old_collection_time_in_millis", Name: "old", Algo: module.Incremental}, + }, + }, + // processes + { + ID: "open_file_descriptors", + Title: "Open File Descriptors", + Units: "fd", + Fam: "processes", + Ctx: "logstash.open_file_descriptors", + Priority: prioOpenFileDescriptors, + Dims: module.Dims{ + {ID: "process_open_file_descriptors", Name: "open"}, + }, + }, + // events + { + ID: "event", + Title: "Events Overview", + Units: "events/s", + Fam: "events", + Ctx: "logstash.event", + Priority: prioEvent, + Dims: module.Dims{ + {ID: "event_in", Name: "in", Algo: module.Incremental}, + {ID: "event_filtered", Name: "filtered", Algo: module.Incremental}, + {ID: "event_out", Name: "out", Algo: module.Incremental}, + }, + }, + { + ID: "event_duration", + Title: "Events Duration", + Units: "seconds", + Fam: "events", + Ctx: "logstash.event_duration", + Priority: prioEventDuration, + Dims: module.Dims{ + {ID: "event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental}, + {ID: "event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental}, + }, + }, + // uptime + { + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "logstash.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "jvm_uptime_in_millis", Name: "uptime", Div: 1000}, + }, + }, +} + +var pipelineChartsTmpl = module.Charts{ + { + ID: "pipeline_%s_event", + Title: "Pipeline Events", + Units: "events/s", + Fam: "pipeline events", + Ctx: "logstash.pipeline_event", + Priority: prioPipelineEvent, + Dims: module.Dims{ + {ID: "pipelines_%s_event_in", Name: "in", Algo: module.Incremental}, + {ID: "pipelines_%s_event_filtered", Name: "filtered", Algo: module.Incremental}, + {ID: "pipelines_%s_event_out", Name: "out", Algo: module.Incremental}, + }, + }, + { + ID: "pipeline_%s_event_duration", + Title: "Pipeline Events Duration", + Units: "seconds", + Fam: "pipeline events duration", + Ctx: "logstash.pipeline_event_duration", + Priority: prioPipelineEventDurations, + Dims: module.Dims{ + {ID: "pipelines_%s_event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental}, + {ID: "pipelines_%s_event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental}, + }, + }, +} + +func (l *Logstash) addPipelineCharts(id string) { + charts := pipelineChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, id) + chart.Labels = []module.Label{ + {Key: "pipeline", Value: id}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + } + + if err := l.Charts().Add(*charts...); err != nil { + l.Warning(err) + } +} + +func (l *Logstash) removePipelineCharts(id string) { + for _, chart := range *l.Charts() { + if strings.HasPrefix(chart.ID, "pipeline_"+id) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/collect.go b/src/go/collectors/go.d.plugin/modules/logstash/collect.go new file mode 100644 index 00000000000000..144113a4874883 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/collect.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logstash + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const urlPathNodeStatsAPI = "/_node/stats" + +func (l *Logstash) collect() (map[string]int64, error) { + stats, err := l.queryNodeStats() + if err != nil { + return nil, err + } + + l.updateCharts(stats.Pipelines) + + return stm.ToMap(stats), nil +} + +func (l *Logstash) updateCharts(pipelines map[string]pipelineStats) { + seen := make(map[string]bool) + + for id := range pipelines { + seen[id] = true + if !l.pipelines[id] { + l.pipelines[id] = true + l.addPipelineCharts(id) + } + } + + for id := range l.pipelines { + if !seen[id] { + delete(l.pipelines, id) + l.removePipelineCharts(id) + } + } +} + +func (l *Logstash) queryNodeStats() (*nodeStats, error) { + req, _ := web.NewHTTPRequest(l.Request.Copy()) + req.URL.Path = urlPathNodeStatsAPI + + var stats nodeStats + + if err := l.doWithDecode(&stats, req); err != nil { + return nil, err + } + + return &stats, nil +} + +func (l *Logstash) doWithDecode(dst interface{}, req *http.Request) error { + l.Debugf("executing %s '%s'", req.Method, req.URL) + resp, err := l.httpClient.Do(req) + if err != nil { + return err + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status) + } + + content, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error on reading response from %s : %v", req.URL, err) + } + + if err := json.Unmarshal(content, dst); err != nil { + return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/config_schema.json b/src/go/collectors/go.d.plugin/modules/logstash/config_schema.json new file mode 100644 index 00000000000000..9e4d596426c7e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/logstash job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/integrations/logstash.md b/src/go/collectors/go.d.plugin/modules/logstash/integrations/logstash.md new file mode 100644 index 00000000000000..7497b842cc57a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/integrations/logstash.md @@ -0,0 +1,248 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/logstash/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/logstash/metadata.yaml" +sidebar_label: "Logstash" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Logstash + + +<img src="https://netdata.cloud/img/elastic-logstash.svg" width="150"/> + + +Plugin: go.d.plugin +Module: logstash + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Logstash instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Logstash instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| logstash.jvm_threads | threads | count | +| logstash.jvm_mem_heap_used | in_use | percentage | +| logstash.jvm_mem_heap | committed, used | KiB | +| logstash.jvm_mem_pools_eden | committed, used | KiB | +| logstash.jvm_mem_pools_survivor | committed, used | KiB | +| logstash.jvm_mem_pools_old | committed, used | KiB | +| logstash.jvm_gc_collector_count | eden, old | counts/s | +| logstash.jvm_gc_collector_time | eden, old | ms | +| logstash.open_file_descriptors | open | fd | +| logstash.event | in, filtered, out | events/s | +| logstash.event_duration | event, queue | seconds | +| logstash.uptime | uptime | seconds | + +### Per pipeline + +These metrics refer to the pipeline. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| pipeline | pipeline name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| logstash.pipeline_event | in, filtered, out | events/s | +| logstash.pipeline_event | event, queue | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/logstatsh.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/logstatsh.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://localhost:9600 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:9600 + +``` +</details> + +##### HTTP authentication + +HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:9600 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://localhost:9600 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:9600 + + - name: remote + url: http://192.0.2.1:9600 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m logstash + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/logstash/logstash.go b/src/go/collectors/go.d.plugin/modules/logstash/logstash.go new file mode 100644 index 00000000000000..72826729408f04 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/logstash.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logstash + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("logstash", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Logstash { + return &Logstash{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://localhost:9600", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + charts: charts.Copy(), + pipelines: make(map[string]bool), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Logstash struct { + module.Base + Config `yaml:",inline"` + httpClient *http.Client + charts *module.Charts + pipelines map[string]bool +} + +func (l *Logstash) Init() bool { + if l.URL == "" { + l.Error("config validation: 'url' cannot be empty") + return false + } + + httpClient, err := web.NewHTTPClient(l.Client) + if err != nil { + l.Errorf("init HTTP client: %v", err) + return false + } + l.httpClient = httpClient + + l.Debugf("using URL %s", l.URL) + l.Debugf("using timeout: %s", l.Timeout.Duration) + return true +} + +func (l *Logstash) Check() bool { + return len(l.Collect()) > 0 +} + +func (l *Logstash) Charts() *module.Charts { + return l.charts +} + +func (l *Logstash) Collect() map[string]int64 { + mx, err := l.collect() + if err != nil { + l.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (l *Logstash) Cleanup() { + if l.httpClient != nil { + l.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/logstash_test.go b/src/go/collectors/go.d.plugin/modules/logstash/logstash_test.go new file mode 100644 index 00000000000000..2b5fd32d5dbd77 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/logstash_test.go @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logstash + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + nodeStataData, _ = os.ReadFile("testdata/stats.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "nodeStataData": nodeStataData, + } { + require.NotNilf(t, data, name) + + } +} + +func TestLogstash_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ls := New() + ls.Config = test.config + + if test.wantFail { + assert.False(t, ls.Init()) + } else { + assert.True(t, ls.Init()) + } + }) + } +} + +func TestLogstash_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestLogstash_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestLogstash_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (ls *Logstash, cleanup func()) + }{ + "success on valid response": { + wantFail: false, + prepare: caseValidResponse, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + "fail on 404 response": { + wantFail: true, + prepare: case404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ls, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, ls.Check()) + } else { + assert.True(t, ls.Check()) + } + }) + } +} + +func TestLogstash_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (ls *Logstash, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success on valid response": { + prepare: caseValidResponse, + wantNumOfCharts: len(charts) + len(pipelineChartsTmpl), + wantMetrics: map[string]int64{ + "event_duration_in_millis": 0, + "event_filtered": 0, + "event_in": 0, + "event_out": 0, + "event_queue_push_duration_in_millis": 0, + "jvm_gc_collectors_eden_collection_count": 5796, + "jvm_gc_collectors_eden_collection_time_in_millis": 45008, + "jvm_gc_collectors_old_collection_count": 7, + "jvm_gc_collectors_old_collection_time_in_millis": 3263, + "jvm_mem_heap_committed_in_bytes": 528154624, + "jvm_mem_heap_used_in_bytes": 189973480, + "jvm_mem_heap_used_percent": 35, + "jvm_mem_pools_eden_committed_in_bytes": 69795840, + "jvm_mem_pools_eden_used_in_bytes": 2600120, + "jvm_mem_pools_old_committed_in_bytes": 449642496, + "jvm_mem_pools_old_used_in_bytes": 185944824, + "jvm_mem_pools_survivor_committed_in_bytes": 8716288, + "jvm_mem_pools_survivor_used_in_bytes": 1428536, + "jvm_threads_count": 28, + "jvm_uptime_in_millis": 699809475, + "pipelines_pipeline-1_event_duration_in_millis": 5027018, + "pipelines_pipeline-1_event_filtered": 567639, + "pipelines_pipeline-1_event_in": 567639, + "pipelines_pipeline-1_event_out": 567639, + "pipelines_pipeline-1_event_queue_push_duration_in_millis": 84241, + "process_open_file_descriptors": 101, + }, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on 404 response": { + prepare: case404, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ls, cleanup := test.prepare(t) + defer cleanup() + + mx := ls.Collect() + + require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { + assert.Equal(t, test.wantNumOfCharts, len(*ls.Charts())) + ensureCollectedHasAllChartsDimsVarsIDs(t, ls, mx) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ls *Logstash, mx map[string]int64) { + for _, chart := range *ls.Charts() { + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := mx[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func caseValidResponse(t *testing.T) (*Logstash, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathNodeStatsAPI: + _, _ = w.Write(nodeStataData) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + ls := New() + ls.URL = srv.URL + require.True(t, ls.Init()) + + return ls, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + ls := New() + ls.URL = srv.URL + require.True(t, ls.Init()) + + return ls, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*Logstash, func()) { + t.Helper() + ls := New() + ls.URL = "http://127.0.0.1:65001" + require.True(t, ls.Init()) + + return ls, func() {} +} + +func case404(t *testing.T) (*Logstash, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ls := New() + ls.URL = srv.URL + require.True(t, ls.Init()) + + return ls, srv.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/metadata.yaml b/src/go/collectors/go.d.plugin/modules/logstash/metadata.yaml new file mode 100644 index 00000000000000..2b3ddfd27d4193 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/metadata.yaml @@ -0,0 +1,274 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-logstash + plugin_name: go.d.plugin + module_name: logstash + monitored_instance: + name: Logstash + link: https://www.elastic.co/products/logstash + icon_filename: elastic-logstash.svg + categories: + - data-collection.logs-servers + keywords: + - logstatsh + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Logstash instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/logstatsh.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://localhost:9600 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://localhost:9600 + - name: HTTP authentication + description: HTTP authentication. + config: | + jobs: + - name: local + url: http://localhost:9600 + username: username + password: password + - name: HTTPS with self-signed certificate + description: HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://localhost:9600 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://localhost:9600 + + - name: remote + url: http://192.0.2.1:9600 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: logstash.jvm_threads + description: JVM Threads + unit: count + chart_type: line + dimensions: + - name: threads + - name: logstash.jvm_mem_heap_used + description: JVM Heap Memory Percentage + unit: percentage + chart_type: line + dimensions: + - name: in_use + - name: logstash.jvm_mem_heap + description: JVM Heap Memory + unit: KiB + chart_type: area + dimensions: + - name: committed + - name: used + - name: logstash.jvm_mem_pools_eden + description: JVM Pool Eden Memory + unit: KiB + chart_type: area + dimensions: + - name: committed + - name: used + - name: logstash.jvm_mem_pools_survivor + description: JVM Pool Survivor Memory + unit: KiB + chart_type: area + dimensions: + - name: committed + - name: used + - name: logstash.jvm_mem_pools_old + description: JVM Pool Old Memory + unit: KiB + chart_type: area + dimensions: + - name: committed + - name: used + - name: logstash.jvm_gc_collector_count + description: Garbage Collection Count + unit: counts/s + chart_type: line + dimensions: + - name: eden + - name: old + - name: logstash.jvm_gc_collector_time + description: Time Spent On Garbage Collection + unit: ms + chart_type: line + dimensions: + - name: eden + - name: old + - name: logstash.open_file_descriptors + description: Open File Descriptors + unit: fd + chart_type: line + dimensions: + - name: open + - name: logstash.event + description: Events Overview + unit: events/s + chart_type: line + dimensions: + - name: in + - name: filtered + - name: out + - name: logstash.event_duration + description: Events Duration + unit: seconds + chart_type: line + dimensions: + - name: event + - name: queue + - name: logstash.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: pipeline + description: These metrics refer to the pipeline. + labels: + - name: pipeline + description: pipeline name + metrics: + - name: logstash.pipeline_event + description: Pipeline Events + unit: events/s + chart_type: line + dimensions: + - name: in + - name: filtered + - name: out + - name: logstash.pipeline_event + description: Pipeline Events Duration + unit: seconds + chart_type: line + dimensions: + - name: event + - name: queue diff --git a/src/go/collectors/go.d.plugin/modules/logstash/node_stats.go b/src/go/collectors/go.d.plugin/modules/logstash/node_stats.go new file mode 100644 index 00000000000000..1687f333de1ad7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/node_stats.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logstash + +// https://www.elastic.co/guide/en/logstash/current/node-stats-api.html + +type nodeStats struct { + JVM jvmStats `json:"jvm" stm:"jvm"` + Process processStats `json:"process" stm:"process"` + Event eventsStats `json:"event" stm:"event"` + Pipelines map[string]pipelineStats `json:"pipelines" stm:"pipelines"` +} + +type pipelineStats struct { + Event eventsStats `json:"events" stm:"event"` +} + +type eventsStats struct { + In int `json:"in" stm:"in"` + Filtered int `json:"filtered" stm:"filtered"` + Out int `json:"out" stm:"out"` + DurationInMillis int `json:"duration_in_millis" stm:"duration_in_millis"` + QueuePushDurationInMillis int `json:"queue_push_duration_in_millis" stm:"queue_push_duration_in_millis"` +} + +type processStats struct { + OpenFileDescriptors int `json:"open_file_descriptors" stm:"open_file_descriptors"` +} + +type jvmStats struct { + Threads struct { + Count int `stm:"count"` + } `stm:"threads"` + Mem jvmMemStats `stm:"mem"` + GC jvmGCStats `stm:"gc"` + UptimeInMillis int `json:"uptime_in_millis" stm:"uptime_in_millis"` +} + +type jvmMemStats struct { + HeapUsedPercent int `json:"heap_used_percent" stm:"heap_used_percent"` + HeapCommittedInBytes int `json:"heap_committed_in_bytes" stm:"heap_committed_in_bytes"` + HeapUsedInBytes int `json:"heap_used_in_bytes" stm:"heap_used_in_bytes"` + Pools struct { + Survivor jvmPoolStats `stm:"survivor"` + Old jvmPoolStats `stm:"old"` + Young jvmPoolStats `stm:"eden"` + } `stm:"pools"` +} + +type jvmPoolStats struct { + UsedInBytes int `json:"used_in_bytes" stm:"used_in_bytes"` + CommittedInBytes int `json:"committed_in_bytes" stm:"committed_in_bytes"` +} + +type jvmGCStats struct { + Collectors struct { + Old gcCollectorStats `stm:"old"` + Young gcCollectorStats `stm:"eden"` + } `stm:"collectors"` +} + +type gcCollectorStats struct { + CollectionTimeInMillis int `json:"collection_time_in_millis" stm:"collection_time_in_millis"` + CollectionCount int `json:"collection_count" stm:"collection_count"` +} diff --git a/src/go/collectors/go.d.plugin/modules/logstash/testdata/stats.json b/src/go/collectors/go.d.plugin/modules/logstash/testdata/stats.json new file mode 100644 index 00000000000000..50fd7b071c6084 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/logstash/testdata/stats.json @@ -0,0 +1,252 @@ +{ + "host" : "<replaced>", + "version" : "7.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "<replaced>", + "name" : "<replaced>", + "ephemeral_id" : "339d4ddb-8a6e-4ddc-b843-efd4abf4bf73", + "status" : "green", + "snapshot" : false, + "pipeline" : { + "workers" : 1, + "batch_size" : 125, + "batch_delay" : 50 + }, + "jvm" : { + "threads" : { + "count" : 28, + "peak_count" : 32 + }, + "mem" : { + "heap_used_percent" : 35, + "heap_committed_in_bytes" : 528154624, + "heap_max_in_bytes" : 528154624, + "heap_used_in_bytes" : 189973480, + "non_heap_used_in_bytes" : 178053280, + "non_heap_committed_in_bytes" : 235200512, + "pools" : { + "young" : { + "committed_in_bytes" : 69795840, + "peak_max_in_bytes" : 69795840, + "max_in_bytes" : 69795840, + "peak_used_in_bytes" : 69795840, + "used_in_bytes" : 2600120 + }, + "old" : { + "committed_in_bytes" : 449642496, + "peak_max_in_bytes" : 449642496, + "max_in_bytes" : 449642496, + "peak_used_in_bytes" : 185944824, + "used_in_bytes" : 185944824 + }, + "survivor" : { + "committed_in_bytes" : 8716288, + "peak_max_in_bytes" : 8716288, + "max_in_bytes" : 8716288, + "peak_used_in_bytes" : 8716288, + "used_in_bytes" : 1428536 + } + } + }, + "gc" : { + "collectors" : { + "young" : { + "collection_count" : 5796, + "collection_time_in_millis" : 45008 + }, + "old" : { + "collection_count" : 7, + "collection_time_in_millis" : 3263 + } + } + }, + "uptime_in_millis" : 699809475 + }, + "process" : { + "open_file_descriptors" : 101, + "peak_open_file_descriptors" : 105, + "max_file_descriptors" : 1048576, + "mem" : { + "total_virtual_in_bytes" : 5074657280 + }, + "cpu" : { + "total_in_millis" : 7304550, + "percent" : 0, + "load_average" : { + "1m" : 0.73, + "5m" : 1.13, + "15m" : 1.06 + } + } + }, + "events" : { + "in" : 567639, + "filtered" : 567639, + "out" : 567639, + "duration_in_millis" : 5027018, + "queue_push_duration_in_millis" : 84241 + }, + "pipelines" : { + "pipeline-1" : { + "events" : { + "queue_push_duration_in_millis" : 84241, + "filtered" : 567639, + "duration_in_millis" : 5027018, + "in" : 567639, + "out" : 567639 + }, + "plugins" : { + "inputs" : [ { + "id" : "kafka input", + "events" : { + "queue_push_duration_in_millis" : 84241, + "out" : 567639 + }, + "name" : "kafka" + } ], + "codecs" : [ { + "id" : "json_9562e6c4-7a1a-4c18-919f-f012e58923dd", + "decode" : { + "writes_in" : 567639, + "duration_in_millis" : 86778, + "out" : 567639 + }, + "name" : "json", + "encode" : { + "writes_in" : 0, + "duration_in_millis" : 0 + } + }, { + "id" : "plain_13e28721-e681-43ec-aa2c-c0a4d856b9ed", + "decode" : { + "writes_in" : 0, + "duration_in_millis" : 0, + "out" : 0 + }, + "name" : "plain", + "encode" : { + "writes_in" : 0, + "duration_in_millis" : 0 + } + } ], + "filters" : [ { + "id" : "set default timezone", + "events" : { + "duration_in_millis" : 340, + "in" : 326901, + "out" : 326901 + }, + "name" : "mutate" + }, { + "id" : "assign index (filebeat)", + "events" : { + "duration_in_millis" : 858, + "in" : 567639, + "out" : 567639 + }, + "name" : "mutate" + }, { + "id" : "parse JSON", + "events" : { + "duration_in_millis" : 112, + "in" : 0, + "out" : 0 + }, + "name" : "json" + }, { + "id" : "parse LTSV", + "events" : { + "duration_in_millis" : 130, + "in" : 0, + "out" : 0 + }, + "name" : "kv" + }, { + "id" : "assign document_id", + "events" : { + "duration_in_millis" : 2406, + "in" : 567639, + "out" : 567639 + }, + "name" : "fingerprint" + }, { + "id" : "assign index (fluentd)", + "events" : { + "duration_in_millis" : 140, + "in" : 0, + "out" : 0 + }, + "name" : "mutate" + }, { + "id" : "parse timestamp", + "events" : { + "duration_in_millis" : 7261, + "in" : 326901, + "out" : 326901 + }, + "name" : "date", + "failures" : 1, + "matches" : 326900 + } ], + "outputs" : [ { + "id" : "0f72afb28c5ff3a3897d87b04fc1b0a5fe8358cb55bbc29b995056fd868e612b", + "events" : { + "duration_in_millis" : 4063485, + "in" : 567639, + "out" : 567639 + }, + "name" : "elasticsearch", + "documents" : { + "successes" : 567639 + }, + "bulk_requests" : { + "responses" : { + "200" : 50735 + }, + "successes" : 50735 + } + } ] + }, + "reloads" : { + "last_error" : null, + "last_failure_timestamp" : null, + "last_success_timestamp" : null, + "failures" : 0, + "successes" : 0 + }, + "queue" : { + "type" : "persisted", + "events_count" : 0, + "queue_size_in_bytes" : 45085456, + "max_queue_size_in_bytes" : 1073741824 + }, + "hash" : "46f5c757f55a52d08ed841e9f51698653cf228ff9be41b7372f20a1b699bf129", + "ephemeral_id" : "c43b3a8e-882c-4e3a-a2f2-8515a5ef4ecc" + } + }, + "reloads" : { + "failures" : 0, + "successes" : 0 + }, + "os" : { + "cgroup" : { + "cpuacct" : { + "control_group" : "/", + "usage_nanos" : 7304416115351 + }, + "cpu" : { + "control_group" : "/", + "cfs_quota_micros" : 100000, + "cfs_period_micros" : 100000, + "stat" : { + "time_throttled_nanos" : 124716913549, + "number_of_elapsed_periods" : 5875889, + "number_of_times_throttled" : 1219 + } + } + } + }, + "queue" : { + "events_count" : 0 + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/README.md b/src/go/collectors/go.d.plugin/modules/mongodb/README.md new file mode 120000 index 00000000000000..a28253054e1b69 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/README.md @@ -0,0 +1 @@ +integrations/mongodb.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/charts.go b/src/go/collectors/go.d.plugin/modules/mongodb/charts.go new file mode 100644 index 00000000000000..9852541d422204 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/charts.go @@ -0,0 +1,1036 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioOperationsRate = module.Priority + iota + prioOperationsLatencyTime + prioOperationsByTypeRate + prioDocumentOperationsRate + prioScannedIndexesRate + prioScannedDocumentsRate + + prioActiveClientsCount + prioQueuedOperationsCount + + prioGlobalLockAcquisitionsRate + prioDatabaseLockAcquisitionsRate + prioCollectionLockAcquisitionsRate + prioMutexLockAcquisitionsRate + prioMetadataLockAcquisitionsRate + prioOpLogLockAcquisitionsRate + + prioCursorsOpenCount + prioCursorsOpenNoTimeoutCount + prioCursorsOpenedRate + prioTimedOutCursorsRate + prioCursorsByLifespanCount + + prioTransactionsCount + prioTransactionsRate + prioTransactionsNoShardsCommitsRate + prioTransactionsNoShardsCommitsDurationTime + prioTransactionsSingleShardCommitsRate + prioTransactionsSingleShardCommitsDurationTime + prioTransactionsSingleWriteShardCommitsRate + prioTransactionsSingleWriteShardCommitsDurationTime + prioTransactionsReadOnlyCommitsRate + prioTransactionsReadOnlyCommitsDurationTime + prioTransactionsTwoPhaseCommitCommitsRate + prioTransactionsTwoPhaseCommitCommitsDurationTime + prioTransactionsRecoverWithTokenCommitsRate + prioTransactionsRecoverWithTokenCommitsDurationTime + + prioConnectionsUsage + prioConnectionsByStateCount + prioConnectionsRate + + prioAssertsRate + + prioNetworkTrafficRate + prioNetworkRequestsRate + prioNetworkSlowDNSResolutionsRate + prioNetworkSlowSSLHandshakesRate + + prioMemoryResidentSize + prioMemoryVirtualSize + prioMemoryPageFaultsRate + prioMemoryTCMallocStats + + prioWiredTigerConcurrentReadTransactionsUsage + prioWiredTigerConcurrentWriteTransactionsUsage + prioWiredTigerCacheUsage + prioWiredTigerCacheDirtySpaceSize + prioWiredTigerCacheIORate + prioWiredTigerCacheEvictionsRate + + prioDatabaseCollectionsCount + prioDatabaseIndexesCount + prioDatabaseViewsCount + prioDatabaseDocumentsCount + prioDatabaseDataSize + prioDatabaseStorageSize + prioDatabaseIndexSize + + prioReplSetMemberState + prioReplSetMemberHealthStatus + prioReplSetMemberReplicationLagTime + prioReplSetMemberHeartbeatLatencyTime + prioReplSetMemberPingRTTTime + prioReplSetMemberUptime + + prioShardingNodesCount + prioShardingShardedDatabasesCount + prioShardingShardedCollectionsCount + prioShardChunks +) + +const ( + chartPxDatabase = "database_" + chartPxReplSetMember = "replica_set_member_" + chartPxShard = "sharding_shard_" +) + +// these charts are expected to be available in many versions +var chartsServerStatus = module.Charts{ + chartOperationsByTypeRate.Copy(), + chartDocumentOperationsRate.Copy(), + chartScannedIndexesRate.Copy(), + chartScannedDocumentsRate.Copy(), + + chartConnectionsUsage.Copy(), + chartConnectionsByStateCount.Copy(), + chartConnectionsRate.Copy(), + + chartNetworkTrafficRate.Copy(), + chartNetworkRequestsRate.Copy(), + + chartMemoryResidentSize.Copy(), + chartMemoryVirtualSize.Copy(), + chartMemoryPageFaultsRate.Copy(), + + chartAssertsRate.Copy(), +} + +var chartsTmplDatabase = module.Charts{ + chartTmplDatabaseCollectionsCount.Copy(), + chartTmplDatabaseIndexesCount.Copy(), + chartTmplDatabaseViewsCount.Copy(), + chartTmplDatabaseDocumentsCount.Copy(), + chartTmplDatabaseDataSize.Copy(), + chartTmplDatabaseStorageSize.Copy(), + chartTmplDatabaseIndexSize.Copy(), +} + +var chartsTmplReplSetMember = module.Charts{ + chartTmplReplSetMemberState.Copy(), + chartTmplReplSetMemberHealthStatus.Copy(), + chartTmplReplSetMemberReplicationLagTime.Copy(), + chartTmplReplSetMemberHeartbeatLatencyTime.Copy(), + chartTmplReplSetMemberPingRTTTime.Copy(), + chartTmplReplSetMemberUptime.Copy(), +} + +var chartsSharding = module.Charts{ + chartShardingNodesCount.Copy(), + chartShardingShardedDatabases.Copy(), + chartShardingShardedCollectionsCount.Copy(), +} + +var chartsTmplShardingShard = module.Charts{ + chartTmplShardChunks.Copy(), +} + +var ( + chartOperationsRate = module.Chart{ + ID: "operations_rate", + Title: "Operations rate", + Units: "operations/s", + Fam: "operations", + Ctx: "mongodb.operations_rate", + Priority: prioOperationsRate, + Dims: module.Dims{ + {ID: "operations_latencies_reads_ops", Name: "reads", Algo: module.Incremental}, + {ID: "operations_latencies_writes_ops", Name: "writes", Algo: module.Incremental}, + {ID: "operations_latencies_commands_ops", Name: "commands", Algo: module.Incremental}, + }, + } + chartOperationsLatencyTime = module.Chart{ + ID: "operations_latency_time", + Title: "Operations Latency", + Units: "milliseconds", + Fam: "operations", + Ctx: "mongodb.operations_latency_time", + Priority: prioOperationsLatencyTime, + Dims: module.Dims{ + {ID: "operations_latencies_reads_latency", Name: "reads", Algo: module.Incremental, Div: 1000}, + {ID: "operations_latencies_writes_latency", Name: "writes", Algo: module.Incremental, Div: 1000}, + {ID: "operations_latencies_commands_latency", Name: "commands", Algo: module.Incremental, Div: 1000}, + }, + } + chartOperationsByTypeRate = module.Chart{ + ID: "operations_by_type_rate", + Title: "Operations by type", + Units: "operations/s", + Fam: "operations", + Ctx: "mongodb.operations_by_type_rate", + Priority: prioOperationsByTypeRate, + Dims: module.Dims{ + {ID: "operations_insert", Name: "insert", Algo: module.Incremental}, + {ID: "operations_query", Name: "query", Algo: module.Incremental}, + {ID: "operations_update", Name: "update", Algo: module.Incremental}, + {ID: "operations_delete", Name: "delete", Algo: module.Incremental}, + {ID: "operations_getmore", Name: "getmore", Algo: module.Incremental}, + {ID: "operations_command", Name: "command", Algo: module.Incremental}, + }, + } + chartDocumentOperationsRate = module.Chart{ + ID: "document_operations_rate", + Title: "Document operations", + Units: "operations/s", + Fam: "operations", + Ctx: "mongodb.document_operations_rate", + Type: module.Stacked, + Priority: prioDocumentOperationsRate, + Dims: module.Dims{ + {ID: "metrics_document_inserted", Name: "inserted", Algo: module.Incremental}, + {ID: "metrics_document_deleted", Name: "deleted", Algo: module.Incremental}, + {ID: "metrics_document_returned", Name: "returned", Algo: module.Incremental}, + {ID: "metrics_document_updated", Name: "updated", Algo: module.Incremental}, + }, + } + chartScannedIndexesRate = module.Chart{ + ID: "scanned_indexes_rate", + Title: "Scanned indexes", + Units: "indexes/s", + Fam: "operations", + Ctx: "mongodb.scanned_indexes_rate", + Priority: prioScannedIndexesRate, + Dims: module.Dims{ + {ID: "metrics_query_executor_scanned", Name: "scanned", Algo: module.Incremental}, + }, + } + chartScannedDocumentsRate = module.Chart{ + ID: "scanned_documents_rate", + Title: "Scanned documents", + Units: "documents/s", + Fam: "operations", + Ctx: "mongodb.scanned_documents_rate", + Priority: prioScannedDocumentsRate, + Dims: module.Dims{ + {ID: "metrics_query_executor_scanned_objects", Name: "scanned", Algo: module.Incremental}, + }, + } + + chartGlobalLockActiveClientsCount = module.Chart{ + ID: "active_clients_count", + Title: "Connected clients", + Units: "clients", + Fam: "clients", + Ctx: "mongodb.active_clients_count", + Priority: prioActiveClientsCount, + Dims: module.Dims{ + {ID: "global_lock_active_clients_readers", Name: "readers"}, + {ID: "global_lock_active_clients_writers", Name: "writers"}, + }, + } + chartGlobalLockCurrentQueueCount = module.Chart{ + ID: "queued_operations", + Title: "Queued operations because of a lock", + Units: "operations", + Fam: "clients", + Ctx: "mongodb.queued_operations_count", + Priority: prioQueuedOperationsCount, + Dims: module.Dims{ + {ID: "global_lock_current_queue_readers", Name: "readers"}, + {ID: "global_lock_current_queue_writers", Name: "writers"}, + }, + } + + chartConnectionsUsage = module.Chart{ + ID: "connections_usage", + Title: "Connections usage", + Units: "connections", + Fam: "connections", + Ctx: "mongodb.connections_usage", + Type: module.Stacked, + Priority: prioConnectionsUsage, + Dims: module.Dims{ + {ID: "connections_available", Name: "available"}, + {ID: "connections_current", Name: "used"}, + }, + } + chartConnectionsByStateCount = module.Chart{ + ID: "connections_by_state_count", + Title: "Connections By State", + Units: "connections", + Fam: "connections", + Ctx: "mongodb.connections_by_state_count", + Priority: prioConnectionsByStateCount, + Dims: module.Dims{ + {ID: "connections_active", Name: "active"}, + {ID: "connections_threaded", Name: "threaded"}, + {ID: "connections_exhaust_is_master", Name: "exhaust_is_master"}, + {ID: "connections_exhaust_hello", Name: "exhaust_hello"}, + {ID: "connections_awaiting_topology_changes", Name: "awaiting_topology_changes"}, + }, + } + chartConnectionsRate = module.Chart{ + ID: "connections_rate", + Title: "Connections Rate", + Units: "connections/s", + Fam: "connections", + Ctx: "mongodb.connections_rate", + Priority: prioConnectionsRate, + Dims: module.Dims{ + {ID: "connections_total_created", Name: "created", Algo: module.Incremental}, + }, + } + + chartNetworkTrafficRate = module.Chart{ + ID: "network_traffic", + Title: "Network traffic", + Units: "bytes/s", + Fam: "network", + Ctx: "mongodb.network_traffic_rate", + Priority: prioNetworkTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "network_bytes_in", Name: "in", Algo: module.Incremental}, + {ID: "network_bytes_out", Name: "out", Algo: module.Incremental}, + }, + } + chartNetworkRequestsRate = module.Chart{ + ID: "network_requests_rate", + Title: "Network Requests", + Units: "requests/s", + Fam: "network", + Ctx: "mongodb.network_requests_rate", + Priority: prioNetworkRequestsRate, + Dims: module.Dims{ + {ID: "network_requests", Name: "requests", Algo: module.Incremental}, + }, + } + chartNetworkSlowDNSResolutionsRate = module.Chart{ + ID: "network_slow_dns_resolutions_rate", + Title: "Slow DNS resolution operations", + Units: "resolutions/s", + Fam: "network", + Ctx: "mongodb.network_slow_dns_resolutions_rate", + Priority: prioNetworkSlowDNSResolutionsRate, + Dims: module.Dims{ + {ID: "network_slow_dns_operations", Name: "slow_dns", Algo: module.Incremental}, + }, + } + chartNetworkSlowSSLHandshakesRate = module.Chart{ + ID: "network_slow_ssl_handshakes_rate", + Title: "Slow SSL handshake operations", + Units: "handshakes/s", + Fam: "network", + Ctx: "mongodb.network_slow_ssl_handshakes_rate", + Priority: prioNetworkSlowSSLHandshakesRate, + Dims: module.Dims{ + {ID: "network_slow_ssl_operations", Name: "slow_ssl", Algo: module.Incremental}, + }, + } + + chartMemoryResidentSize = module.Chart{ + ID: "memory_resident_size", + Title: "Used resident memory", + Units: "bytes", + Fam: "memory", + Ctx: "mongodb.memory_resident_size", + Priority: prioMemoryResidentSize, + Dims: module.Dims{ + {ID: "memory_resident", Name: "used"}, + }, + } + chartMemoryVirtualSize = module.Chart{ + ID: "memory_virtual_size", + Title: "Used virtual memory", + Units: "bytes", + Fam: "memory", + Ctx: "mongodb.memory_virtual_size", + Priority: prioMemoryVirtualSize, + Dims: module.Dims{ + {ID: "memory_virtual", Name: "used"}, + }, + } + chartMemoryPageFaultsRate = module.Chart{ + ID: "memory_page_faults", + Title: "Memory page faults", + Units: "pgfaults/s", + Fam: "memory", + Ctx: "mongodb.memory_page_faults_rate", + Priority: prioMemoryPageFaultsRate, + Dims: module.Dims{ + {ID: "extra_info_page_faults", Name: "pgfaults", Algo: module.Incremental}, + }, + } + chartMemoryTCMallocStatsChart = module.Chart{ + ID: "memory_tcmalloc_stats", + Title: "TCMalloc statistics", + Units: "bytes", + Fam: "memory", + Ctx: "mongodb.memory_tcmalloc_stats", + Priority: prioMemoryTCMallocStats, + Dims: module.Dims{ + {ID: "tcmalloc_generic_current_allocated_bytes", Name: "allocated"}, + {ID: "tcmalloc_central_cache_free_bytes", Name: "central_cache_freelist"}, + {ID: "tcmalloc_transfer_cache_free_bytes", Name: "transfer_cache_freelist"}, + {ID: "tcmalloc_thread_cache_free_bytes", Name: "thread_cache_freelists"}, + {ID: "tcmalloc_pageheap_free_bytes", Name: "pageheap_freelist"}, + {ID: "tcmalloc_pageheap_unmapped_bytes", Name: "pageheap_unmapped"}, + }, + } + + chartAssertsRate = module.Chart{ + ID: "asserts_rate", + Title: "Raised assertions", + Units: "asserts/s", + Fam: "asserts", + Ctx: "mongodb.asserts_rate", + Type: module.Stacked, + Priority: prioAssertsRate, + Dims: module.Dims{ + {ID: "asserts_regular", Name: "regular", Algo: module.Incremental}, + {ID: "asserts_warning", Name: "warning", Algo: module.Incremental}, + {ID: "asserts_msg", Name: "msg", Algo: module.Incremental}, + {ID: "asserts_user", Name: "user", Algo: module.Incremental}, + {ID: "asserts_tripwire", Name: "tripwire", Algo: module.Incremental}, + {ID: "asserts_rollovers", Name: "rollovers", Algo: module.Incremental}, + }, + } + + chartTransactionsCount = module.Chart{ + ID: "transactions_count", + Title: "Current transactions", + Units: "transactions", + Fam: "transactions", + Ctx: "mongodb.transactions_count", + Priority: prioTransactionsCount, + Dims: module.Dims{ + {ID: "txn_active", Name: "active"}, + {ID: "txn_inactive", Name: "inactive"}, + {ID: "txn_open", Name: "open"}, + {ID: "txn_prepared", Name: "prepared"}, + }, + } + chartTransactionsRate = module.Chart{ + ID: "transactions_rate", + Title: "Transactions rate", + Units: "transactions/s", + Fam: "transactions", + Ctx: "mongodb.transactions_rate", + Priority: prioTransactionsRate, + Dims: module.Dims{ + {ID: "txn_total_started", Name: "started", Algo: module.Incremental}, + {ID: "txn_total_aborted", Name: "aborted", Algo: module.Incremental}, + {ID: "txn_total_committed", Name: "committed", Algo: module.Incremental}, + {ID: "txn_total_prepared", Name: "prepared", Algo: module.Incremental}, + }, + } + chartTransactionsNoShardsCommitsRate = module.Chart{ + ID: "transactions_no_shards_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsNoShardsCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "noShards"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_no_shards_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_no_shards_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsNoShardsCommitsDurationTime = module.Chart{ + ID: "transactions_no_shards_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsNoShardsCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "noShards"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_no_shards_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + chartTransactionsSingleShardCommitsRate = module.Chart{ + ID: "transactions_single_shard_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsSingleShardCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "singleShard"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_single_shard_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_single_shard_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsSingleShardCommitsDurationTime = module.Chart{ + ID: "transactions_single_shard_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsSingleShardCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "singleShard"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_single_shard_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + chartTransactionsSingleWriteShardCommitsRate = module.Chart{ + ID: "transactions_single_write_shard_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsSingleWriteShardCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "singleWriteShard"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_single_write_shard_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_single_write_shard_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsSingleWriteShardCommitsDurationTime = module.Chart{ + ID: "transactions_single_write_shard_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsSingleWriteShardCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "singleWriteShard"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_single_write_shard_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + chartTransactionsReadOnlyCommitsRate = module.Chart{ + ID: "transactions_read_only_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsReadOnlyCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "readOnly"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_read_only_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_read_only_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsReadOnlyCommitsDurationTime = module.Chart{ + ID: "transactions_read_only_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsReadOnlyCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "readOnly"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_read_only_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + chartTransactionsTwoPhaseCommitCommitsRate = module.Chart{ + ID: "transactions_two_phase_commit_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsTwoPhaseCommitCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "twoPhaseCommit"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_two_phase_commit_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_two_phase_commit_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsTwoPhaseCommitCommitsDurationTime = module.Chart{ + ID: "transactions_two_phase_commit_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsTwoPhaseCommitCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "twoPhaseCommit"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_two_phase_commit_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + chartTransactionsRecoverWithTokenCommitsRate = module.Chart{ + ID: "transactions_recover_with_token_commits_rate", + Title: "Transactions commits", + Units: "commits/s", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_rate", + Priority: prioTransactionsRecoverWithTokenCommitsRate, + Type: module.Stacked, + Labels: []module.Label{{Key: "commit_type", Value: "recoverWithToken"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_recover_with_token_successful", Name: "success", Algo: module.Incremental}, + {ID: "txn_commit_types_recover_with_token_unsuccessful", Name: "fail", Algo: module.Incremental}, + }, + } + chartTransactionsRecoverWithTokenCommitsDurationTime = module.Chart{ + ID: "transactions_recover_with_token_commits_duration_time", + Title: "Transactions successful commits duration", + Units: "milliseconds", + Fam: "transactions", + Ctx: "mongodb.transactions_commits_duration_time", + Priority: prioTransactionsRecoverWithTokenCommitsDurationTime, + Labels: []module.Label{{Key: "commit_type", Value: "recoverWithToken"}}, + Dims: module.Dims{ + {ID: "txn_commit_types_recover_with_token_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000}, + }, + } + + chartGlobalLockAcquisitionsRate = module.Chart{ + ID: "global_lock_acquisitions_rate", + Title: "Global lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioGlobalLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "global"}}, + Dims: module.Dims{ + {ID: "locks_global_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_global_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_global_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_global_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + chartDatabaseLockAcquisitionsRate = module.Chart{ + ID: "database_lock_acquisitions_rate", + Title: "Database lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioDatabaseLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "database"}}, + Dims: module.Dims{ + {ID: "locks_database_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_database_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_database_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_database_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + chartCollectionLockAcquisitionsRate = module.Chart{ + ID: "collection_lock_acquisitions_rate", + Title: "Collection lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioCollectionLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "collection"}}, + Dims: module.Dims{ + {ID: "locks_collection_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_collection_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_collection_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_collection_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + chartMutexLockAcquisitionsRate = module.Chart{ + ID: "mutex_lock_acquisitions_rate", + Title: "Mutex lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioMutexLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "mutex"}}, + Dims: module.Dims{ + {ID: "locks_mutex_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_mutex_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_mutex_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_mutex_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + chartMetadataLockAcquisitionsRate = module.Chart{ + ID: "metadata_lock_acquisitions_rate", + Title: "Metadata lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioMetadataLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "metadata"}}, + Dims: module.Dims{ + {ID: "locks_metadata_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_metadata_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_metadata_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_metadata_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + chartOpLogLockAcquisitionsRate = module.Chart{ + ID: "oplog_lock_acquisitions_rate", + Title: "Operations log lock acquisitions", + Units: "acquisitions/s", + Fam: "locks", + Ctx: "mongodb.lock_acquisitions_rate", + Priority: prioOpLogLockAcquisitionsRate, + Labels: []module.Label{{Key: "lock_type", Value: "oplog"}}, + Dims: module.Dims{ + {ID: "locks_oplog_acquire_shared", Name: "shared", Algo: module.Incremental}, + {ID: "locks_oplog_acquire_exclusive", Name: "exclusive", Algo: module.Incremental}, + {ID: "locks_oplog_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental}, + {ID: "locks_oplog_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental}, + }, + } + + chartCursorsOpenCount = module.Chart{ + ID: "cursors_open_count", + Title: "Open cursors", + Units: "cursors", + Fam: "cursors", + Ctx: "mongodb.cursors_open_count", + Priority: prioCursorsOpenCount, + Dims: module.Dims{ + {ID: "metrics_cursor_open_total", Name: "open"}, + }, + } + chartCursorsOpenNoTimeoutCount = module.Chart{ + ID: "cursors_open_no_timeout_count", + Title: "Open cursors with disabled timeout", + Units: "cursors", + Fam: "cursors", + Ctx: "mongodb.cursors_open_no_timeout_count", + Priority: prioCursorsOpenNoTimeoutCount, + Dims: module.Dims{ + {ID: "metrics_cursor_open_no_timeout", Name: "open_no_timeout"}, + }, + } + chartCursorsOpenedRate = module.Chart{ + ID: "cursors_opened_rate", + Title: "Opened cursors rate", + Units: "cursors/s", + Fam: "cursors", + Ctx: "mongodb.cursors_opened_rate", + Priority: prioCursorsOpenedRate, + Dims: module.Dims{ + {ID: "metrics_cursor_total_opened", Name: "opened"}, + }, + } + chartCursorsTimedOutRate = module.Chart{ + ID: "cursors_timed_out_rate", + Title: "Timed-out cursors", + Units: "cursors/s", + Fam: "cursors", + Ctx: "mongodb.cursors_timed_out_rate", + Priority: prioTimedOutCursorsRate, + Dims: module.Dims{ + {ID: "metrics_cursor_timed_out", Name: "timed_out"}, + }, + } + chartCursorsByLifespanCount = module.Chart{ + ID: "cursors_by_lifespan_count", + Title: "Cursors lifespan", + Units: "cursors", + Fam: "cursors", + Ctx: "mongodb.cursors_by_lifespan_count", + Priority: prioCursorsByLifespanCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "metrics_cursor_lifespan_less_than_1_second", Name: "le_1s"}, + {ID: "metrics_cursor_lifespan_less_than_5_seconds", Name: "1s_5s"}, + {ID: "metrics_cursor_lifespan_less_than_15_seconds", Name: "5s_15s"}, + {ID: "metrics_cursor_lifespan_less_than_30_seconds", Name: "15s_30s"}, + {ID: "metrics_cursor_lifespan_less_than_1_minute", Name: "30s_1m"}, + {ID: "metrics_cursor_lifespan_less_than_10_minutes", Name: "1m_10m"}, + {ID: "metrics_cursor_lifespan_greater_than_or_equal_10_minutes", Name: "ge_10m"}, + }, + } + + chartWiredTigerConcurrentReadTransactionsUsage = module.Chart{ + ID: "wiredtiger_concurrent_read_transactions_usage", + Title: "Wired Tiger concurrent read transactions usage", + Units: "transactions", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_concurrent_read_transactions_usage", + Priority: prioWiredTigerConcurrentReadTransactionsUsage, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "wiredtiger_concurrent_txn_read_available", Name: "available"}, + {ID: "wiredtiger_concurrent_txn_read_out", Name: "used"}, + }, + } + chartWiredTigerConcurrentWriteTransactionsUsage = module.Chart{ + ID: "wiredtiger_concurrent_write_transactions_usage", + Title: "Wired Tiger concurrent write transactions usage", + Units: "transactions", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_concurrent_write_transactions_usage", + Priority: prioWiredTigerConcurrentWriteTransactionsUsage, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "wiredtiger_concurrent_txn_write_available", Name: "available"}, + {ID: "wiredtiger_concurrent_txn_write_out", Name: "used"}, + }, + } + chartWiredTigerCacheUsage = module.Chart{ + ID: "wiredtiger_cache_usage", + Title: "Wired Tiger cache usage", + Units: "bytes", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_cache_usage", + Priority: prioWiredTigerCacheUsage, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "wiredtiger_cache_currently_in_cache_bytes", Name: "used"}, + }, + } + chartWiredTigerCacheDirtySpaceSize = module.Chart{ + ID: "wiredtiger_cache_dirty_space_size", + Title: "Wired Tiger cache dirty space size", + Units: "bytes", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_cache_dirty_space_size", + Priority: prioWiredTigerCacheDirtySpaceSize, + Dims: module.Dims{ + {ID: "wiredtiger_cache_tracked_dirty_in_the_cache_bytes", Name: "dirty"}, + }, + } + chartWiredTigerCacheIORate = module.Chart{ + ID: "wiredtiger_cache_io_rate", + Title: "Wired Tiger IO activity", + Units: "pages/s", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_cache_io_rate", + Priority: prioWiredTigerCacheIORate, + Dims: module.Dims{ + {ID: "wiredtiger_cache_read_into_cache_pages", Name: "read", Algo: module.Incremental}, + {ID: "wiredtiger_cache_written_from_cache_pages", Name: "written", Algo: module.Incremental}, + }, + } + chartWiredTigerCacheEvictionsRate = module.Chart{ + ID: "wiredtiger_cache_eviction_rate", + Title: "Wired Tiger cache evictions", + Units: "pages/s", + Fam: "wiredtiger", + Ctx: "mongodb.wiredtiger_cache_evictions_rate", + Type: module.Stacked, + Priority: prioWiredTigerCacheEvictionsRate, + Dims: module.Dims{ + {ID: "wiredtiger_cache_unmodified_evicted_pages", Name: "unmodified", Algo: module.Incremental}, + {ID: "wiredtiger_cache_modified_evicted_pages", Name: "modified", Algo: module.Incremental}, + }, + } +) + +var ( + chartTmplDatabaseCollectionsCount = &module.Chart{ + ID: chartPxDatabase + "%s_collections_count", + Title: "Database collections", + Units: "collections", + Fam: "databases", + Ctx: "mongodb.database_collections_count", + Priority: prioDatabaseCollectionsCount, + Dims: module.Dims{ + {ID: "database_%s_collections", Name: "collections"}, + }, + } + chartTmplDatabaseIndexesCount = &module.Chart{ + ID: chartPxDatabase + "%s_indexes_count", + Title: "Database indexes", + Units: "indexes", + Fam: "databases", + Ctx: "mongodb.database_indexes_count", + Priority: prioDatabaseIndexesCount, + Dims: module.Dims{ + {ID: "database_%s_indexes", Name: "indexes"}, + }, + } + chartTmplDatabaseViewsCount = &module.Chart{ + ID: chartPxDatabase + "%s_views_count", + Title: "Database views", + Units: "views", + Fam: "databases", + Ctx: "mongodb.database_views_count", + Priority: prioDatabaseViewsCount, + Dims: module.Dims{ + {ID: "database_%s_views", Name: "views"}, + }, + } + chartTmplDatabaseDocumentsCount = &module.Chart{ + ID: chartPxDatabase + "%s_documents_count", + Title: "Database documents", + Units: "documents", + Fam: "databases", + Ctx: "mongodb.database_documents_count", + Priority: prioDatabaseDocumentsCount, + Dims: module.Dims{ + {ID: "database_%s_documents", Name: "documents"}, + }, + } + chartTmplDatabaseDataSize = &module.Chart{ + ID: chartPxDatabase + "%s_data_size", + Title: "Database data size", + Units: "bytes", + Fam: "databases", + Ctx: "mongodb.database_data_size", + Priority: prioDatabaseDataSize, + Dims: module.Dims{ + {ID: "database_%s_data_size", Name: "data_size"}, + }, + } + chartTmplDatabaseStorageSize = &module.Chart{ + ID: chartPxDatabase + "%s_storage_size", + Title: "Database storage size", + Units: "bytes", + Fam: "databases", + Ctx: "mongodb.database_storage_size", + Priority: prioDatabaseStorageSize, + Dims: module.Dims{ + {ID: "database_%s_storage_size", Name: "storage_size"}, + }, + } + chartTmplDatabaseIndexSize = &module.Chart{ + ID: chartPxDatabase + "%s_index_size", + Title: "Database index size", + Units: "bytes", + Fam: "databases", + Ctx: "mongodb.database_index_size", + Priority: prioDatabaseIndexSize, + Dims: module.Dims{ + {ID: "database_%s_index_size", Name: "index_size"}, + }, + } +) + +var ( + chartTmplReplSetMemberState = &module.Chart{ + ID: chartPxReplSetMember + "%s_state", + Title: "Replica Set member state", + Units: "state", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_state", + Priority: prioReplSetMemberState, + Dims: module.Dims{ + {ID: "repl_set_member_%s_state_primary", Name: "primary"}, + {ID: "repl_set_member_%s_state_startup", Name: "startup"}, + {ID: "repl_set_member_%s_state_secondary", Name: "secondary"}, + {ID: "repl_set_member_%s_state_recovering", Name: "recovering"}, + {ID: "repl_set_member_%s_state_startup2", Name: "startup2"}, + {ID: "repl_set_member_%s_state_unknown", Name: "unknown"}, + {ID: "repl_set_member_%s_state_arbiter", Name: "arbiter"}, + {ID: "repl_set_member_%s_state_down", Name: "down"}, + {ID: "repl_set_member_%s_state_rollback", Name: "rollback"}, + {ID: "repl_set_member_%s_state_removed", Name: "removed"}, + }, + } + chartTmplReplSetMemberHealthStatus = &module.Chart{ + ID: chartPxReplSetMember + "%s_health_status", + Title: "Replica Set member health status", + Units: "status", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_health_status", + Priority: prioReplSetMemberHealthStatus, + Dims: module.Dims{ + {ID: "repl_set_member_%s_health_status_up", Name: "up"}, + {ID: "repl_set_member_%s_health_status_down", Name: "down"}, + }, + } + chartTmplReplSetMemberReplicationLagTime = &module.Chart{ + ID: chartPxReplSetMember + "%s_replication_lag_time", + Title: "Replica Set member replication lag", + Units: "milliseconds", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_replication_lag_time", + Priority: prioReplSetMemberReplicationLagTime, + Dims: module.Dims{ + {ID: "repl_set_member_%s_replication_lag", Name: "replication_lag"}, + }, + } + chartTmplReplSetMemberHeartbeatLatencyTime = &module.Chart{ + ID: chartPxReplSetMember + "%s_heartbeat_latency_time", + Title: "Replica Set member heartbeat latency", + Units: "milliseconds", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_heartbeat_latency_time", + Priority: prioReplSetMemberHeartbeatLatencyTime, + Dims: module.Dims{ + {ID: "repl_set_member_%s_heartbeat_latency", Name: "heartbeat_latency"}, + }, + } + chartTmplReplSetMemberPingRTTTime = &module.Chart{ + ID: chartPxReplSetMember + "%s_ping_rtt_time", + Title: "Replica Set member ping RTT", + Units: "milliseconds", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_ping_rtt_time", + Priority: prioReplSetMemberPingRTTTime, + Dims: module.Dims{ + {ID: "repl_set_member_%s_ping_rtt", Name: "ping_rtt"}, + }, + } + chartTmplReplSetMemberUptime = &module.Chart{ + ID: chartPxReplSetMember + "%s_uptime", + Title: "Replica Set member uptime", + Units: "seconds", + Fam: "replica sets", + Ctx: "mongodb.repl_set_member_uptime", + Priority: prioReplSetMemberUptime, + Dims: module.Dims{ + {ID: "repl_set_member_%s_uptime", Name: "uptime"}, + }, + } +) + +var ( + chartShardingNodesCount = &module.Chart{ + ID: "sharding_nodes_count", + Title: "Sharding Nodes", + Units: "nodes", + Fam: "sharding", + Ctx: "mongodb.sharding_nodes_count", + Type: module.Stacked, + Priority: prioShardingNodesCount, + Dims: module.Dims{ + {ID: "shard_nodes_aware", Name: "shard_aware"}, + {ID: "shard_nodes_unaware", Name: "shard_unaware"}, + }, + } + chartShardingShardedDatabases = &module.Chart{ + ID: "sharding_sharded_databases_count", + Title: "Sharded databases", + Units: "databases", + Fam: "sharding", + Ctx: "mongodb.sharding_sharded_databases_count", + Type: module.Stacked, + Priority: prioShardingShardedDatabasesCount, + Dims: module.Dims{ + {ID: "shard_databases_partitioned", Name: "partitioned"}, + {ID: "shard_databases_unpartitioned", Name: "unpartitioned"}, + }, + } + + chartShardingShardedCollectionsCount = &module.Chart{ + ID: "sharding_sharded_collections_count", + Title: "Sharded collections", + Units: "collections", + Fam: "sharding", + Ctx: "mongodb.sharding_sharded_collections_count", + Type: module.Stacked, + Priority: prioShardingShardedCollectionsCount, + Dims: module.Dims{ + {ID: "shard_collections_partitioned", Name: "partitioned"}, + {ID: "shard_collections_unpartitioned", Name: "unpartitioned"}, + }, + } + + chartTmplShardChunks = &module.Chart{ + ID: chartPxShard + "%s_chunks", + Title: "Shard chunks", + Units: "chunks", + Fam: "sharding", + Ctx: "mongodb.sharding_shard_chunks_count", + Priority: prioShardChunks, + Dims: module.Dims{ + {ID: "shard_id_%s_chunks", Name: "chunks"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/client.go b/src/go/collectors/go.d.plugin/modules/mongodb/client.go new file mode 100644 index 00000000000000..4da13eebd383c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/client.go @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "context" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + mongos = "mongos" +) + +type mongoConn interface { + serverStatus() (*documentServerStatus, error) + listDatabaseNames() ([]string, error) + dbStats(name string) (*documentDBStats, error) + isReplicaSet() bool + isMongos() bool + replSetGetStatus() (*documentReplSetStatus, error) + shardNodes() (*documentShardNodesResult, error) + shardDatabasesPartitioning() (*documentPartitionedResult, error) + shardCollectionsPartitioning() (*documentPartitionedResult, error) + shardChunks() (map[string]int64, error) + initClient(uri string, timeout time.Duration) error + close() error +} + +type mongoClient struct { + client *mongo.Client + timeout time.Duration + replicaSetFlag *bool + mongosFlag *bool +} + +func (c *mongoClient) serverStatus() (*documentServerStatus, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + cmd := bson.D{ + {Key: "serverStatus", Value: 1}, + {Key: "repl", Value: 1}, + {Key: "metrics", + Value: bson.D{ + {Key: "document", Value: true}, + {Key: "cursor", Value: true}, + {Key: "queryExecutor", Value: true}, + {Key: "apiVersions", Value: false}, + {Key: "aggStageCounters", Value: false}, + {Key: "commands", Value: false}, + {Key: "dotsAndDollarsFields", Value: false}, + {Key: "getLastError", Value: false}, + {Key: "mongos", Value: false}, + {Key: "operation", Value: false}, + {Key: "operatorCounters", Value: false}, + {Key: "query", Value: false}, + {Key: "record", Value: false}, + {Key: "repl", Value: false}, + {Key: "storage", Value: false}, + {Key: "ttl", Value: false}, + }, + }, + } + var status *documentServerStatus + + err := c.client.Database("admin").RunCommand(ctx, cmd).Decode(&status) + if err != nil { + return nil, err + } + + isReplSet := status.Repl != nil + c.replicaSetFlag = &isReplSet + + isMongos := status.Process == mongos + c.mongosFlag = &isMongos + + return status, err +} + +func (c *mongoClient) listDatabaseNames() ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + return c.client.ListDatabaseNames(ctx, bson.M{}) +} + +func (c *mongoClient) dbStats(name string) (*documentDBStats, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + cmd := bson.M{"dbStats": 1} + var stats documentDBStats + + if err := c.client.Database(name).RunCommand(ctx, cmd).Decode(&stats); err != nil { + return nil, err + } + + return &stats, nil +} + +func (c *mongoClient) isReplicaSet() bool { + if c.replicaSetFlag != nil { + return *c.replicaSetFlag + } + + status, err := c.serverStatus() + if err != nil { + return false + } + + return status.Repl != nil +} + +func (c *mongoClient) isMongos() bool { + if c.mongosFlag != nil { + return *c.mongosFlag + } + + status, err := c.serverStatus() + if err != nil { + return false + } + + return status.Process == mongos +} + +func (c *mongoClient) replSetGetStatus() (*documentReplSetStatus, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + var status *documentReplSetStatus + cmd := bson.M{"replSetGetStatus": 1} + + err := c.client.Database("admin").RunCommand(ctx, cmd).Decode(&status) + if err != nil { + return nil, err + } + + return status, err +} + +func (c *mongoClient) shardNodes() (*documentShardNodesResult, error) { + collection := "shards" + groupStage := bson.D{{Key: "$sortByCount", Value: "$state"}} + + nodesByState, err := c.shardCollectAggregation(collection, []bson.D{groupStage}) + if err != nil { + return nil, err + } + + return &documentShardNodesResult{nodesByState.True, nodesByState.False}, nil +} + +func (c *mongoClient) shardDatabasesPartitioning() (*documentPartitionedResult, error) { + collection := "databases" + groupStage := bson.D{{Key: "$sortByCount", Value: "$partitioned"}} + + partitioning, err := c.shardCollectAggregation(collection, []bson.D{groupStage}) + if err != nil { + return nil, err + } + + return &documentPartitionedResult{partitioning.True, partitioning.False}, nil +} + +func (c *mongoClient) shardCollectionsPartitioning() (*documentPartitionedResult, error) { + collection := "collections" + matchStage := bson.D{{Key: "$match", Value: bson.D{{Key: "dropped", Value: false}}}} + countStage := bson.D{{Key: "$sortByCount", Value: bson.D{{Key: "$eq", Value: bson.A{"$distributionMode", "sharded"}}}}} + + partitioning, err := c.shardCollectAggregation(collection, []bson.D{matchStage, countStage}) + if err != nil { + return nil, err + } + + return &documentPartitionedResult{partitioning.True, partitioning.False}, nil +} + +func (c *mongoClient) shardCollectAggregation(collection string, aggr []bson.D) (*documentAggrResult, error) { + rows, err := c.dbAggregate(collection, aggr) + if err != nil { + return nil, err + } + + result := &documentAggrResult{} + + for _, row := range rows { + if row.Bool { + result.True = row.Count + } else { + result.False = row.Count + } + } + + return result, err +} + +func (c *mongoClient) shardChunks() (map[string]int64, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + col := c.client.Database("config").Collection("chunks") + + cursor, err := col.Aggregate(ctx, mongo.Pipeline{bson.D{{Key: "$sortByCount", Value: "$shard"}}}) + if err != nil { + return nil, err + } + + var shards []bson.M + if err = cursor.All(ctx, &shards); err != nil { + return nil, err + } + + defer func() { _ = cursor.Close(ctx) }() + + result := map[string]int64{} + + for _, row := range shards { + k, ok := row["_id"].(string) + if !ok { + return nil, fmt.Errorf("shard name is not a string: %v", row["_id"]) + } + v, ok := row["count"].(int32) + if !ok { + return nil, fmt.Errorf("shard chunk count is not a int32: %v", row["count"]) + } + result[k] = int64(v) + } + + return result, err +} + +func (c *mongoClient) initClient(uri string, timeout time.Duration) error { + if c.client != nil { + return nil + } + + c.timeout = timeout + + client, err := mongo.NewClient(options.Client().ApplyURI(uri)) + if err != nil { + return err + } + + ctxConn, cancelConn := context.WithTimeout(context.Background(), c.timeout*time.Second) + defer cancelConn() + + if err := client.Connect(ctxConn); err != nil { + return err + } + + ctxPing, cancelPing := context.WithTimeout(context.Background(), c.timeout*time.Second) + defer cancelPing() + + if err := client.Ping(ctxPing, nil); err != nil { + return err + } + + c.client = client + + return nil +} + +func (c *mongoClient) close() error { + if c.client == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), c.timeout*time.Second) + defer cancel() + + if err := c.client.Disconnect(ctx); err != nil { + return err + } + + c.client = nil + + return nil +} + +func (c *mongoClient) dbAggregate(collection string, aggr []bson.D) ([]documentAggrResults, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*c.timeout) + defer cancel() + + cursor, err := c.client.Database("config").Collection(collection).Aggregate(ctx, aggr) + if err != nil { + return nil, err + } + + defer func() { _ = cursor.Close(ctx) }() + + var rows []documentAggrResults + if err := cursor.All(ctx, &rows); err != nil { + return nil, err + } + + return rows, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/collect.go b/src/go/collectors/go.d.plugin/modules/mongodb/collect.go new file mode 100644 index 00000000000000..a050d217fd6a83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/collect.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import "fmt" + +func (m *Mongo) collect() (map[string]int64, error) { + if err := m.conn.initClient(m.URI, m.Timeout); err != nil { + return nil, fmt.Errorf("init mongo conn: %v", err) + } + + mx := make(map[string]int64) + + if err := m.collectServerStatus(mx); err != nil { + return nil, fmt.Errorf("couldn't collect server status metrics: %v", err) + } + + if err := m.collectDbStats(mx); err != nil { + return mx, fmt.Errorf("couldn't collect dbstats metrics: %v", err) + } + + if m.conn.isReplicaSet() { + if err := m.collectReplSetStatus(mx); err != nil { + return mx, fmt.Errorf("couldn't collect documentReplSetStatus metrics: %v", err) + } + } + + if m.conn.isMongos() { + m.addShardingChartsOnce.Do(m.addShardingCharts) + if err := m.collectSharding(mx); err != nil { + return mx, fmt.Errorf("couldn't collect sharding metrics: %v", err) + } + } + + return mx, nil +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/collect_dbstats.go b/src/go/collectors/go.d.plugin/modules/mongodb/collect_dbstats.go new file mode 100644 index 00000000000000..bd08ba9a23be95 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/collect_dbstats.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (m *Mongo) collectDbStats(mx map[string]int64) error { + if m.dbSelector == nil { + m.Debug("'database' selector not set, skip collecting database statistics") + return nil + } + + allDBs, err := m.conn.listDatabaseNames() + if err != nil { + return fmt.Errorf("cannot get database names: %v", err) + } + + m.Debugf("all databases on the server: '%v'", allDBs) + + var dbs []string + for _, db := range allDBs { + if m.dbSelector.MatchString(db) { + dbs = append(dbs, db) + } + } + + if len(allDBs) != len(dbs) { + m.Debugf("databases remaining after filtering: %v", dbs) + } + + seen := make(map[string]bool) + for _, db := range dbs { + s, err := m.conn.dbStats(db) + if err != nil { + return fmt.Errorf("dbStats command failed: %v", err) + } + + seen[db] = true + + mx["database_"+db+"_collections"] = s.Collections + mx["database_"+db+"_views"] = s.Views + mx["database_"+db+"_indexes"] = s.Indexes + mx["database_"+db+"_documents"] = s.Objects + mx["database_"+db+"_data_size"] = s.DataSize + mx["database_"+db+"_index_size"] = s.IndexSize + mx["database_"+db+"_storage_size"] = s.StorageSize + } + + for db := range seen { + if !m.databases[db] { + m.databases[db] = true + m.Debugf("new database '%s': creating charts", db) + m.addDatabaseCharts(db) + } + } + + for db := range m.databases { + if !seen[db] { + delete(m.databases, db) + m.Debugf("stale database '%s': removing charts", db) + m.removeDatabaseCharts(db) + } + } + + return nil +} + +func (m *Mongo) addDatabaseCharts(name string) { + charts := chartsTmplDatabase.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "database", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := m.Charts().Add(*charts...); err != nil { + m.Warning(err) + } +} + +func (m *Mongo) removeDatabaseCharts(name string) { + px := fmt.Sprintf("%s%s_", chartPxDatabase, name) + + for _, chart := range *m.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/collect_replsetgetstatus.go b/src/go/collectors/go.d.plugin/modules/mongodb/collect_replsetgetstatus.go new file mode 100644 index 00000000000000..d94c8020839db1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/collect_replsetgetstatus.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +// https://www.mongodb.com/docs/manual/reference/replica-states/#replica-set-member-states +var replicaSetMemberStates = map[string]int{ + "startup": 0, + "primary": 1, + "secondary": 2, + "recovering": 3, + "startup2": 5, + "unknown": 6, + "arbiter": 7, + "down": 8, + "rollback": 9, + "removed": 10, +} + +// TODO: deal with duplicates if we collect metrics from all cluster nodes +// should we only collect ReplSetStatus (at least by default) from primary nodes? (db.runCommand( { isMaster: 1 } )) +func (m *Mongo) collectReplSetStatus(mx map[string]int64) error { + s, err := m.conn.replSetGetStatus() + if err != nil { + return fmt.Errorf("error get status of the replica set from mongo: %s", err) + } + + seen := make(map[string]documentReplSetMember) + + for _, member := range s.Members { + seen[member.Name] = member + + px := fmt.Sprintf("repl_set_member_%s_", member.Name) + + mx[px+"replication_lag"] = s.Date.Sub(member.OptimeDate).Milliseconds() + + for k, v := range replicaSetMemberStates { + mx[px+"state_"+k] = boolToInt(member.State == v) + } + + mx[px+"health_status_up"] = boolToInt(member.Health == 1) + mx[px+"health_status_down"] = boolToInt(member.Health == 0) + + if member.Self == nil { + mx[px+"uptime"] = member.Uptime + if v := member.LastHeartbeatRecv; v != nil && !v.IsZero() { + mx[px+"heartbeat_latency"] = s.Date.Sub(*v).Milliseconds() + } + if v := member.PingMs; v != nil { + mx[px+"ping_rtt"] = *v + } + } + } + + for name, member := range seen { + if !m.replSetMembers[name] { + m.replSetMembers[name] = true + m.Debugf("new replica set member '%s': adding charts", name) + m.addReplSetMemberCharts(member) + } + } + + for name := range m.replSetMembers { + if _, ok := seen[name]; !ok { + delete(m.replSetMembers, name) + m.Debugf("stale replica set member '%s': removing charts", name) + m.removeReplSetMemberCharts(name) + } + } + + return nil +} + +func (m *Mongo) addReplSetMemberCharts(v documentReplSetMember) { + charts := chartsTmplReplSetMember.Copy() + + if v.Self != nil { + _ = charts.Remove(chartTmplReplSetMemberHeartbeatLatencyTime.ID) + _ = charts.Remove(chartTmplReplSetMemberPingRTTTime.ID) + _ = charts.Remove(chartTmplReplSetMemberUptime.ID) + } + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, v.Name) + chart.Labels = []module.Label{ + {Key: "repl_set_member", Value: v.Name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, v.Name) + } + } + + if err := m.Charts().Add(*charts...); err != nil { + m.Warning(err) + } +} + +func (m *Mongo) removeReplSetMemberCharts(name string) { + px := fmt.Sprintf("%s%s_", chartPxReplSetMember, name) + + for _, chart := range *m.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/collect_serverstatus.go b/src/go/collectors/go.d.plugin/modules/mongodb/collect_serverstatus.go new file mode 100644 index 00000000000000..738e12392f5b5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/collect_serverstatus.go @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "fmt" + "reflect" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +// collectServerStatus creates the map[string]int64 for the available dims. +// nil values will be ignored and not added to the map and thus metrics should not appear on the dashboard. +// Because mongo reports a metric only after it first appears,some dims might take a while to appear. +// For example, in order to report number of create commands, a document must be created first. +func (m *Mongo) collectServerStatus(mx map[string]int64) error { + s, err := m.conn.serverStatus() + if err != nil { + return fmt.Errorf("serverStatus command failed: %s", err) + } + + m.addOptionalCharts(s) + + for k, v := range stm.ToMap(s) { + mx[k] = v + } + + if s.Transactions != nil && s.Transactions.CommitTypes != nil { + px := "txn_commit_types_" + v := s.Transactions.CommitTypes + mx[px+"no_shards_unsuccessful"] = v.NoShards.Initiated - v.NoShards.Successful + mx[px+"single_shard_unsuccessful"] = v.SingleShard.Initiated - v.SingleShard.Successful + mx[px+"single_write_shard_unsuccessful"] = v.SingleWriteShard.Initiated - v.SingleWriteShard.Successful + mx[px+"read_only_unsuccessful"] = v.ReadOnly.Initiated - v.ReadOnly.Successful + mx[px+"two_phase_commit_unsuccessful"] = v.TwoPhaseCommit.Initiated - v.TwoPhaseCommit.Successful + mx[px+"recover_with_token_unsuccessful"] = v.RecoverWithToken.Initiated - v.RecoverWithToken.Successful + } + + return nil +} + +func (m *Mongo) addOptionalCharts(s *documentServerStatus) { + m.addOptionalChart(s.OpLatencies, + &chartOperationsRate, + &chartOperationsLatencyTime, + ) + m.addOptionalChart(s.WiredTiger, + &chartWiredTigerConcurrentReadTransactionsUsage, + &chartWiredTigerConcurrentWriteTransactionsUsage, + &chartWiredTigerCacheUsage, + &chartWiredTigerCacheDirtySpaceSize, + &chartWiredTigerCacheIORate, + &chartWiredTigerCacheEvictionsRate, + ) + m.addOptionalChart(s.Tcmalloc, + &chartMemoryTCMallocStatsChart, + ) + m.addOptionalChart(s.GlobalLock, + &chartGlobalLockActiveClientsCount, + &chartGlobalLockCurrentQueueCount, + ) + m.addOptionalChart(s.Network.NumSlowDNSOperations, + &chartNetworkSlowDNSResolutionsRate, + ) + m.addOptionalChart(s.Network.NumSlowSSLOperations, + &chartNetworkSlowSSLHandshakesRate, + ) + m.addOptionalChart(s.Metrics.Cursor.TotalOpened, + &chartCursorsOpenedRate, + ) + m.addOptionalChart(s.Metrics.Cursor.TimedOut, + &chartCursorsTimedOutRate, + ) + m.addOptionalChart(s.Metrics.Cursor.Open.Total, + &chartCursorsOpenCount, + ) + m.addOptionalChart(s.Metrics.Cursor.Open.NoTimeout, + &chartCursorsOpenNoTimeoutCount, + ) + m.addOptionalChart(s.Metrics.Cursor.Lifespan, + &chartCursorsByLifespanCount, + ) + + if s.Transactions != nil { + m.addOptionalChart(s.Transactions, + &chartTransactionsCount, + &chartTransactionsRate, + ) + m.addOptionalChart(s.Transactions.CommitTypes, + &chartTransactionsNoShardsCommitsRate, + &chartTransactionsNoShardsCommitsDurationTime, + &chartTransactionsSingleShardCommitsRate, + &chartTransactionsSingleShardCommitsDurationTime, + &chartTransactionsSingleWriteShardCommitsRate, + &chartTransactionsSingleWriteShardCommitsDurationTime, + &chartTransactionsReadOnlyCommitsRate, + &chartTransactionsReadOnlyCommitsDurationTime, + &chartTransactionsTwoPhaseCommitCommitsRate, + &chartTransactionsTwoPhaseCommitCommitsDurationTime, + &chartTransactionsRecoverWithTokenCommitsRate, + &chartTransactionsRecoverWithTokenCommitsDurationTime, + ) + } + if s.Locks != nil { + m.addOptionalChart(s.Locks.Global, &chartGlobalLockAcquisitionsRate) + m.addOptionalChart(s.Locks.Database, &chartDatabaseLockAcquisitionsRate) + m.addOptionalChart(s.Locks.Collection, &chartCollectionLockAcquisitionsRate) + m.addOptionalChart(s.Locks.Mutex, &chartMutexLockAcquisitionsRate) + m.addOptionalChart(s.Locks.Metadata, &chartMetadataLockAcquisitionsRate) + m.addOptionalChart(s.Locks.Oplog, &chartOpLogLockAcquisitionsRate) + } +} + +func (m *Mongo) addOptionalChart(iface any, charts ...*module.Chart) { + if reflect.ValueOf(iface).IsNil() { + return + } + for _, chart := range charts { + if m.optionalCharts[chart.ID] { + continue + } + m.optionalCharts[chart.ID] = true + + if err := m.charts.Add(chart.Copy()); err != nil { + m.Warning(err) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/collect_sharding.go b/src/go/collectors/go.d.plugin/modules/mongodb/collect_sharding.go new file mode 100644 index 00000000000000..c2da45c63985dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/collect_sharding.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (m *Mongo) collectSharding(mx map[string]int64) error { + nodes, err := m.conn.shardNodes() + if err != nil { + return err + } + + mx["shard_nodes_aware"] = nodes.ShardAware + mx["shard_nodes_unaware"] = nodes.ShardUnaware + + dbPart, err := m.conn.shardDatabasesPartitioning() + if err != nil { + return err + } + + mx["shard_databases_partitioned"] = dbPart.Partitioned + mx["shard_databases_unpartitioned"] = dbPart.UnPartitioned + + collPart, err := m.conn.shardCollectionsPartitioning() + if err != nil { + return err + } + + mx["shard_collections_partitioned"] = collPart.Partitioned + mx["shard_collections_unpartitioned"] = collPart.UnPartitioned + + chunksPerShard, err := m.conn.shardChunks() + if err != nil { + return err + } + + seen := make(map[string]bool) + + for shard, count := range chunksPerShard { + seen[shard] = true + mx["shard_id_"+shard+"_chunks"] = count + } + + for id := range seen { + if !m.shards[id] { + m.shards[id] = true + m.addShardCharts(id) + } + } + + for id := range m.shards { + if !seen[id] { + delete(m.shards, id) + m.removeShardCharts(id) + } + } + + return nil +} + +func (m *Mongo) addShardCharts(id string) { + charts := chartsTmplShardingShard.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, id) + chart.Labels = []module.Label{ + {Key: "shard_id", Value: id}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, id) + } + } + + if err := m.Charts().Add(*charts...); err != nil { + m.Warning(err) + } + +} + +func (m *Mongo) removeShardCharts(id string) { + px := fmt.Sprintf("%s%s_", chartPxShard, id) + + for _, chart := range *m.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func (m *Mongo) addShardingCharts() { + charts := chartsSharding.Copy() + + if err := m.Charts().Add(*charts...); err != nil { + m.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/config_schema.json b/src/go/collectors/go.d.plugin/modules/mongodb/config_schema.json new file mode 100644 index 00000000000000..48afef5840c8c3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/config_schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mongodb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uri": { + "type": "string" + }, + "timeout": { + "type": "number" + }, + "databases": { + "type": "string" + } + }, + "required": [ + "name", + "uri" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/documents.go b/src/go/collectors/go.d.plugin/modules/mongodb/documents.go new file mode 100644 index 00000000000000..5c95e952ee9b52 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/documents.go @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import "time" + +// https://www.mongodb.com/docs/manual/reference/command/serverStatus +type documentServerStatus struct { + Process string `bson:"process"` // mongod|mongos + OpCounters documentOpCounters `bson:"opcounters" stm:"operations"` + OpLatencies *documentOpLatencies `bson:"opLatencies" stm:"operations_latencies"` // mongod only + Connections documentConnections `bson:"connections" stm:"connections"` + Network documentNetwork `bson:"network" stm:"network"` + Memory documentMemory `bson:"mem" stm:"memory"` + Metrics documentMetrics `bson:"metrics" stm:"metrics"` + ExtraInfo documentExtraInfo `bson:"extra_info" stm:"extra_info"` + Asserts documentAsserts `bson:"asserts" stm:"asserts"` + Transactions *documentTransactions `bson:"transactions" stm:"txn"` // mongod in 3.6.3+ and on mongos in 4.2+ + GlobalLock *documentGlobalLock `bson:"globalLock" stm:"global_lock"` + Tcmalloc *documentTCMallocStatus `bson:"tcmalloc" stm:"tcmalloc"` + Locks *documentLocks `bson:"locks" stm:"locks"` + WiredTiger *documentWiredTiger `bson:"wiredTiger" stm:"wiredtiger"` + Repl interface{} `bson:"repl"` +} + +type ( + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#opcounters + documentOpCounters struct { + Insert int64 `bson:"insert" stm:"insert"` + Query int64 `bson:"query" stm:"query"` + Update int64 `bson:"update" stm:"update"` + Delete int64 `bson:"delete" stm:"delete"` + GetMore int64 `bson:"getmore" stm:"getmore"` + Command int64 `bson:"command" stm:"command"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#oplatencies + documentOpLatencies struct { + Reads documentLatencyStats `bson:"reads" stm:"reads"` + Writes documentLatencyStats `bson:"writes" stm:"writes"` + Commands documentLatencyStats `bson:"commands" stm:"commands"` + } + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/collStats/#latencystats-document + documentLatencyStats struct { + Latency int64 `bson:"latency" stm:"latency"` + Ops int64 `bson:"ops" stm:"ops"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#connections + documentConnections struct { + Current int64 `bson:"current" stm:"current"` + Available int64 `bson:"available" stm:"available"` + TotalCreated int64 `bson:"totalCreated" stm:"total_created"` + Active *int64 `bson:"active" stm:"active"` + Threaded *int64 `bson:"threaded" stm:"threaded"` + ExhaustIsMaster *int64 `bson:"exhaustIsMaster" stm:"exhaust_is_master"` + ExhaustHello *int64 `bson:"exhaustHello" stm:"exhaust_hello"` + AwaitingTopologyChanges *int64 `bson:"awaitingTopologyChanges" stm:"awaiting_topology_changes"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#network + documentNetwork struct { + BytesIn int64 `bson:"bytesIn" stm:"bytes_in"` + BytesOut int64 `bson:"bytesOut" stm:"bytes_out"` + NumRequests int64 `bson:"numRequests" stm:"requests"` + NumSlowDNSOperations *int64 `bson:"numSlowDNSOperations" stm:"slow_dns_operations"` // 4.4+ + NumSlowSSLOperations *int64 `bson:"numSlowSSLOperations" stm:"slow_ssl_operations"` // 4.4+ + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mem + documentMemory struct { + Resident int64 `bson:"resident" stm:"resident,1048576,1"` + Virtual int64 `bson:"virtual" stm:"virtual,1048576,1"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#extra_info + documentExtraInfo struct { + PageFaults int64 `bson:"page_faults" stm:"page_faults"` + } + // Values: + // - mongodb: https://github.com/mongodb/mongo/blob/54e1be7d98aa154e1676d6d652b4d2d1a1073b07/src/mongo/util/tcmalloc_server_status_section.cpp#L88 + // - tcmalloc: https://github.com/google/tcmalloc/blob/927c1433141daa1f0bcf920e6d71bf64795cc2c2/tcmalloc/global_stats.cc#L582 + // formattedString: + // - https://github.com/google/tcmalloc/blob/master/docs/stats.md + // - https://github.com/google/tcmalloc/blob/927c1433141daa1f0bcf920e6d71bf64795cc2c2/tcmalloc/global_stats.cc#L208 + documentTCMallocStatus struct { + Generic *struct { + CurrentAllocatedBytes int64 `bson:"current_allocated_bytes" stm:"current_allocated_bytes"` + HeapSize int64 `bson:"heap_size" stm:"heap_size"` + } `bson:"generic" stm:"generic"` + Tcmalloc *struct { + PageheapFreeBytes int64 `bson:"pageheap_free_bytes" stm:"pageheap_free_bytes"` + PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes" stm:"pageheap_unmapped_bytes"` + MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes" stm:"max_total_thread_cache_bytes"` + CurrentTotalThreadCacheBytes int64 `bson:"current_total_thread_cache_bytes" stm:"current_total_thread_cache_bytes"` + TotalFreeBytes int64 `bson:"total_free_bytes" stm:"total_free_bytes"` + CentralCacheFreeBytes int64 `bson:"central_cache_free_bytes" stm:"central_cache_free_bytes"` + TransferCacheFreeBytes int64 `bson:"transfer_cache_free_bytes" stm:"transfer_cache_free_bytes"` + ThreadCacheFreeBytes int64 `bson:"thread_cache_free_bytes" stm:"thread_cache_free_bytes"` + AggressiveMemoryDecommit int64 `bson:"aggressive_memory_decommit" stm:"aggressive_memory_decommit"` + PageheapCommittedBytes int64 `bson:"pageheap_committed_bytes" stm:"pageheap_committed_bytes"` + PageheapScavengeBytes int64 `bson:"pageheap_scavenge_bytes" stm:"pageheap_scavenge_bytes"` + PageheapCommitCount int64 `bson:"pageheap_commit_count" stm:"pageheap_commit_count"` + PageheapTotalCommitBytes int64 `bson:"pageheap_total_commit_bytes" stm:"pageheap_total_commit_bytes"` + PageheapDecommitCount int64 `bson:"pageheap_decommit_count" stm:"pageheap_decommit_count"` + PageheapTotalDecommitBytes int64 `bson:"pageheap_total_decommit_bytes" stm:"pageheap_total_decommit_bytes"` + PageheapReserveCount int64 `bson:"pageheap_reserve_count" stm:"pageheap_reserve_count"` + PageheapTotalReserveBytes int64 `bson:"pageheap_total_reserve_bytes" stm:"pageheap_total_reserve_bytes"` + SpinlockTotalDelayNs int64 `bson:"spinlock_total_delay_ns" stm:"spinlock_total_delay_ns"` + } `bson:"tcmalloc" stm:""` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#metrics + documentMetrics struct { + Cursor struct { + TotalOpened *int64 `bson:"totalOpened" stm:"total_opened"` + TimedOut *int64 `bson:"timedOut" stm:"timed_out"` + Open struct { + NoTimeout *int64 `bson:"noTimeout" stm:"no_timeout"` + Total *int64 `bson:"total" stm:"total"` + } `bson:"open" stm:"open"` + Lifespan *struct { + GreaterThanOrEqual10Minutes int64 `bson:"greaterThanOrEqual10Minutes" stm:"greater_than_or_equal_10_minutes"` + LessThan10Minutes int64 `bson:"lessThan10Minutes" stm:"less_than_10_minutes"` + LessThan15Seconds int64 `bson:"lessThan15Seconds" stm:"less_than_15_seconds"` + LessThan1Minute int64 `bson:"lessThan1Minute" stm:"less_than_1_minute"` + LessThan1Second int64 `bson:"lessThan1Second" stm:"less_than_1_second"` + LessThan30Seconds int64 `bson:"lessThan30Seconds" stm:"less_than_30_seconds"` + LessThan5Seconds int64 `bson:"lessThan5Seconds" stm:"less_than_5_seconds"` + } `bson:"lifespan" stm:"lifespan"` + } `bson:"cursor" stm:"cursor"` + Document struct { + Deleted int64 `bson:"deleted" stm:"deleted"` + Inserted int64 `bson:"inserted" stm:"inserted"` + Returned int64 `bson:"returned" stm:"returned"` + Updated int64 `bson:"updated" stm:"updated"` + } `bson:"document" stm:"document"` + QueryExecutor struct { + Scanned int64 `bson:"scanned" stm:"scanned"` + ScannedObjects int64 `bson:"scannedObjects" stm:"scanned_objects"` + } `bson:"queryExecutor" stm:"query_executor"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#asserts + documentAsserts struct { + Regular int64 `bson:"regular" stm:"regular"` + Warning int64 `bson:"warning" stm:"warning"` + Msg int64 `bson:"msg" stm:"msg"` + User int64 `bson:"user" stm:"user"` + Tripwire int64 `bson:"tripwire" stm:"tripwire"` + Rollovers int64 `bson:"rollovers" stm:"rollovers"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#transactions + documentTransactions struct { + CurrentActive *int64 `bson:"currentActive" stm:"active"` // mongod in 4.0.2+ and mongos in 4.2.1+ + CurrentInactive *int64 `bson:"currentInactive" stm:"inactive"` // mongod in 4.0.2+ and mongos in 4.2.1+ + CurrentOpen *int64 `bson:"currentOpen" stm:"open"` // mongod in 4.0.2+ and mongos in 4.2.1+ + CurrentPrepared *int64 `bson:"currentPrepared" stm:"prepared"` // 4.2+ mongod only + TotalAborted *int64 `bson:"totalAborted" stm:"total_aborted"` // mongod in 4.0.2+ and mongos in 4.2+ + TotalCommitted *int64 `bson:"totalCommitted" stm:"total_committed"` // mongod in 4.0.2+ and mongos in 4.2+ + TotalStarted *int64 `bson:"totalStarted" stm:"total_started"` // mongod in 4.0.2+ and mongos in 4.2+ + TotalPrepared *int64 `bson:"totalPrepared" stm:"total_prepared"` // mongod in 4.0.2+ and mongos in 4.2+ + CommitTypes *documentTransactionsCommitTypes `bson:"commitTypes" stm:"commit_types"` // mongos only + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mongodb-serverstatus-serverstatus.transactions.commitTypes + documentTransactionsCommitTypes struct { + NoShards documentTransactionsCommitType `bson:"noShards" stm:"no_shards"` + SingleShard documentTransactionsCommitType `bson:"singleShard" stm:"single_shard"` + SingleWriteShard documentTransactionsCommitType `bson:"singleWriteShard" stm:"single_write_shard"` + ReadOnly documentTransactionsCommitType `bson:"readOnly" stm:"read_only"` + TwoPhaseCommit documentTransactionsCommitType `bson:"twoPhaseCommit" stm:"two_phase_commit"` + RecoverWithToken documentTransactionsCommitType `bson:"recoverWithToken" stm:"recover_with_token"` + } + documentTransactionsCommitType struct { + Initiated int64 `json:"initiated" stm:"initiated"` + Successful int64 `json:"successful" stm:"successful"` + SuccessfulDurationMicros int64 `json:"successfulDurationMicros" stm:"successful_duration_micros"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#globallock + documentGlobalLock struct { + CurrentQueue *struct { + Readers int64 `bson:"readers" stm:"readers"` + Writers int64 `bson:"writers" stm:"writers"` + } `bson:"currentQueue" stm:"current_queue"` + ActiveClients *struct { + Readers int64 `bson:"readers" stm:"readers"` + Writers int64 `bson:"writers" stm:"writers"` + } `bson:"activeClients" stm:"active_clients"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mongodb-serverstatus-serverstatus.locks + documentLocks struct { + Global *documentLockType `bson:"Global" stm:"global"` + Database *documentLockType `bson:"Database" stm:"database"` + Collection *documentLockType `bson:"Collection" stm:"collection"` + Mutex *documentLockType `bson:"Mutex" stm:"mutex"` + Metadata *documentLockType `bson:"Metadata" stm:"metadata"` + Oplog *documentLockType `bson:"oplog" stm:"oplog"` + } + documentLockType struct { + AcquireCount documentLockModes `bson:"acquireCount" stm:"acquire"` + } + documentLockModes struct { + Shared int64 `bson:"R" stm:"shared"` + Exclusive int64 `bson:"W" stm:"exclusive"` + IntentShared int64 `bson:"r" stm:"intent_shared"` + IntentExclusive int64 `bson:"w" stm:"intent_exclusive"` + } + // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#wiredtiger + documentWiredTiger struct { + ConcurrentTransaction struct { + Write struct { + Out int `bson:"out" stm:"out"` + Available int `bson:"available" stm:"available"` + } `bson:"write" stm:"write"` + Read struct { + Out int `bson:"out" stm:"out"` + Available int `bson:"available" stm:"available"` + } `bson:"read" stm:"read"` + } `bson:"concurrentTransactions" stm:"concurrent_txn"` + Cache struct { + BytesCurrentlyInCache int `bson:"bytes currently in the cache" stm:"currently_in_cache_bytes"` + MaximumBytesConfigured int `bson:"maximum bytes configured" stm:"maximum_configured_bytes"` + TrackedDirtyBytesInCache int `bson:"tracked dirty bytes in the cache" stm:"tracked_dirty_in_the_cache_bytes"` + UnmodifiedPagesEvicted int `bson:"unmodified pages evicted" stm:"unmodified_evicted_pages"` + ModifiedPagesEvicted int `bson:"modified pages evicted" stm:"modified_evicted_pages"` + PagesReadIntoCache int `bson:"pages read into cache" stm:"read_into_cache_pages"` + PagesWrittenFromCache int `bson:"pages written from cache" stm:"written_from_cache_pages"` + } `bson:"cache" stm:"cache"` + } +) + +// https://www.mongodb.com/docs/manual/reference/command/dbStats/ +type documentDBStats struct { + Collections int64 `bson:"collections"` + Views int64 `bson:"views"` + Indexes int64 `bson:"indexes"` + Objects int64 `bson:"objects"` + DataSize int64 `bson:"dataSize"` + IndexSize int64 `bson:"indexSize"` + StorageSize int64 `bson:"storageSize"` +} + +// https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/ +type documentReplSetStatus struct { + Date time.Time `bson:"date"` + Members []documentReplSetMember `bson:"members"` +} + +type ( + documentReplSetMember struct { + Name string `bson:"name"` + Self *bool `bson:"self"` + State int `bson:"state"` + Health int `bson:"health"` + OptimeDate time.Time `bson:"optimeDate"` + LastHeartbeat *time.Time `bson:"lastHeartbeat"` + LastHeartbeatRecv *time.Time `bson:"lastHeartbeatRecv"` + PingMs *int64 `bson:"pingMs"` + Uptime int64 `bson:"uptime"` + } +) + +type documentAggrResults struct { + Bool bool `bson:"_id"` + Count int64 `bson:"count"` +} + +type ( + documentAggrResult struct { + True int64 + False int64 + } +) + +type documentPartitionedResult struct { + Partitioned int64 + UnPartitioned int64 +} + +type documentShardNodesResult struct { + ShardAware int64 + ShardUnaware int64 +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/init.go b/src/go/collectors/go.d.plugin/modules/mongodb/init.go new file mode 100644 index 00000000000000..b881e8711f856e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "errors" +) + +func (m *Mongo) verifyConfig() error { + if m.URI == "" { + return errors.New("connection URI is empty") + } + + return nil +} + +func (m *Mongo) initDatabaseSelector() error { + if m.Databases.Empty() { + return nil + } + + sr, err := m.Databases.Parse() + if err != nil { + return err + } + m.dbSelector = sr + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/integrations/mongodb.md b/src/go/collectors/go.d.plugin/modules/mongodb/integrations/mongodb.md new file mode 100644 index 00000000000000..c60bafe7caaf53 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/integrations/mongodb.md @@ -0,0 +1,356 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/mongodb/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/mongodb/metadata.yaml" +sidebar_label: "MongoDB" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MongoDB + + +<img src="https://netdata.cloud/img/mongodb.svg" width="150"/> + + +Plugin: go.d.plugin +Module: mongodb + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors MongoDB servers. + +Executed queries: + +- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/) +- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/) +- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the + storage engine. +- Sharding metrics are available on shards only + for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/). + + +### Per MongoDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.operations_rate | reads, writes, commands | operations/s | +| mongodb.operations_latency_time | reads, writes, commands | milliseconds | +| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s | +| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s | +| mongodb.scanned_indexes_rate | scanned | indexes/s | +| mongodb.scanned_documents_rate | scanned | documents/s | +| mongodb.active_clients_count | readers, writers | clients | +| mongodb.queued_operations_count | reads, writes | operations | +| mongodb.cursors_open_count | open | cursors | +| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors | +| mongodb.cursors_opened_rate | opened | cursors/s | +| mongodb.cursors_timed_out_rate | timed_out | cursors/s | +| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors | +| mongodb.transactions_count | active, inactive, open, prepared | transactions | +| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s | +| mongodb.connections_usage | available, used | connections | +| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections | +| mongodb.connections_rate | created | connections/s | +| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s | +| mongodb.network_traffic_rate | in, out | bytes/s | +| mongodb.network_requests_rate | requests | requests/s | +| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s | +| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s | +| mongodb.memory_resident_size | used | bytes | +| mongodb.memory_virtual_size | used | bytes | +| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s | +| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes | +| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions | +| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions | +| mongodb.wiredtiger_cache_usage | used | bytes | +| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes | +| mongodb.wiredtiger_cache_io_rate | read, written | pages/s | +| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s | +| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes | +| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases | +| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections | + +### Per lock type + +These metrics refer to the lock type. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| lock_type | lock type (e.g. global, database, collection, mutex) | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s | + +### Per commit type + +These metrics refer to the commit type. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.transactions_commits_rate | success, fail | commits/s | +| mongodb.transactions_commits_duration_time | commits | milliseconds | + +### Per database + +These metrics refer to the database. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| database | database name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.database_collection_count | collections | collections | +| mongodb.database_indexes_count | indexes | indexes | +| mongodb.database_views_count | views | views | +| mongodb.database_documents_count | documents | documents | +| mongodb.database_data_size | data_size | bytes | +| mongodb.database_storage_size | storage_size | bytes | +| mongodb.database_index_size | index_size | bytes | + +### Per replica set member + +These metrics refer to the replica set member. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| repl_set_member | replica set member name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state | +| mongodb.repl_set_member_health_status | up, down | status | +| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds | +| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds | +| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds | +| mongodb.repl_set_member_uptime | uptime | seconds | + +### Per shard + +These metrics refer to the shard. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| shard_id | shard id | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mongodb.sharding_shard_chunks_count | chunks | chunks | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Create a read-only user + +Create a read-only user for Netdata in the admin database. + +- Authenticate as the admin user: + + ```bash + use admin + db.auth("admin", "<MONGODB_ADMIN_PASSWORD>") + ``` + +- Create a user: + + ```bash + db.createUser({ + "user":"netdata", + "pwd": "<UNIQUE_PASSWORD>", + "roles" : [ + {role: 'read', db: 'admin' }, + {role: 'clusterMonitor', db: 'admin'}, + {role: 'read', db: 'local' } + ] + }) + ``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/mongodb.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/mongodb.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes | +| timeout | Query timeout in seconds. | 2 | no | +| databases | Databases selector. Determines which database metrics will be collected. | | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + +``` +</details> + +##### With databases metrics + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + databases: + includes: + - "* *" + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + + - name: remote + uri: mongodb://netdata:password@203.0.113.0:27017 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m mongodb + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/metadata.yaml b/src/go/collectors/go.d.plugin/modules/mongodb/metadata.yaml new file mode 100644 index 00000000000000..20630e6ab13115 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/metadata.yaml @@ -0,0 +1,580 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-mongodb + plugin_name: go.d.plugin + module_name: mongodb + monitored_instance: + name: MongoDB + link: https://www.mongodb.com/ + icon_filename: mongodb.svg + categories: + - data-collection.database-servers + keywords: + - mongodb + - databases + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors MongoDB servers. + + Executed queries: + + - [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/) + - [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/) + - [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Create a read-only user + description: | + Create a read-only user for Netdata in the admin database. + + - Authenticate as the admin user: + + ```bash + use admin + db.auth("admin", "<MONGODB_ADMIN_PASSWORD>") + ``` + + - Create a user: + + ```bash + db.createUser({ + "user":"netdata", + "pwd": "<UNIQUE_PASSWORD>", + "roles" : [ + {role: 'read', db: 'admin' }, + {role: 'clusterMonitor', db: 'admin'}, + {role: 'read', db: 'local' } + ] + }) + ``` + configuration: + file: + name: go.d/mongodb.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: uri + description: MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). + default_value: mongodb://localhost:27017 + required: true + - name: timeout + description: Query timeout in seconds. + default_value: 2 + required: false + - name: databases + description: Databases selector. Determines which database metrics will be collected. + default_value: "" + required: false + details: | + Metrics of databases matching the selector will be collected. + + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + - Syntax: + + ```yaml + per_user_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + - name: With databases metrics + description: An example configuration. + config: | + jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + databases: + includes: + - "* *" + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + uri: mongodb://netdata:password@localhost:27017 + + - name: remote + uri: mongodb://netdata:password@203.0.113.0:27017 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + availability: [] + description: | + - WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the + storage engine. + - Sharding metrics are available on shards only + for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/). + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: mongodb.operations_rate + description: Operations rate + unit: operations/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: commands + - name: mongodb.operations_latency_time + description: Operations Latency + unit: milliseconds + chart_type: line + dimensions: + - name: reads + - name: writes + - name: commands + - name: mongodb.operations_by_type_rate + description: Operations by type + unit: operations/s + chart_type: line + dimensions: + - name: insert + - name: query + - name: update + - name: delete + - name: getmore + - name: command + - name: mongodb.document_operations_rate + description: Document operations + unit: operations/s + chart_type: stacked + dimensions: + - name: inserted + - name: deleted + - name: returned + - name: updated + - name: mongodb.scanned_indexes_rate + description: Scanned indexes + unit: indexes/s + chart_type: line + dimensions: + - name: scanned + - name: mongodb.scanned_documents_rate + description: Scanned documents + unit: documents/s + chart_type: line + dimensions: + - name: scanned + - name: mongodb.active_clients_count + description: Connected clients + unit: clients + chart_type: line + dimensions: + - name: readers + - name: writers + - name: mongodb.queued_operations_count + description: Queued operations because of a lock + unit: operations + chart_type: line + dimensions: + - name: reads + - name: writes + - name: mongodb.cursors_open_count + description: Open cursors + unit: cursors + chart_type: line + dimensions: + - name: open + - name: mongodb.cursors_open_no_timeout_count + description: Open cursors with disabled timeout + unit: cursors + chart_type: line + dimensions: + - name: open_no_timeout + - name: mongodb.cursors_opened_rate + description: Opened cursors rate + unit: cursors/s + chart_type: line + dimensions: + - name: opened + - name: mongodb.cursors_timed_out_rate + description: Timed-out cursors + unit: cursors/s + chart_type: line + dimensions: + - name: timed_out + - name: mongodb.cursors_by_lifespan_count + description: Cursors lifespan + unit: cursors + chart_type: stacked + dimensions: + - name: le_1s + - name: 1s_5s + - name: 5s_15s + - name: 15s_30s + - name: 30s_1m + - name: 1m_10m + - name: ge_10m + - name: mongodb.transactions_count + description: Current transactions + unit: transactions + chart_type: line + dimensions: + - name: active + - name: inactive + - name: open + - name: prepared + - name: mongodb.transactions_rate + description: Transactions rate + unit: transactions/s + chart_type: line + dimensions: + - name: started + - name: aborted + - name: committed + - name: prepared + - name: mongodb.connections_usage + description: Connections usage + unit: connections + chart_type: stacked + dimensions: + - name: available + - name: used + - name: mongodb.connections_by_state_count + description: Connections By State + unit: connections + chart_type: line + dimensions: + - name: active + - name: threaded + - name: exhaust_is_master + - name: exhaust_hello + - name: awaiting_topology_changes + - name: mongodb.connections_rate + description: Connections Rate + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: mongodb.asserts_rate + description: Raised assertions + unit: asserts/s + chart_type: stacked + dimensions: + - name: regular + - name: warning + - name: msg + - name: user + - name: tripwire + - name: rollovers + - name: mongodb.network_traffic_rate + description: Network traffic + unit: bytes/s + chart_type: stacked + dimensions: + - name: in + - name: out + - name: mongodb.network_requests_rate + description: Network Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: mongodb.network_slow_dns_resolutions_rate + description: Slow DNS resolution operations + unit: resolutions/s + chart_type: line + dimensions: + - name: slow_dns + - name: mongodb.network_slow_ssl_handshakes_rate + description: Slow SSL handshake operations + unit: handshakes/s + chart_type: line + dimensions: + - name: slow_ssl + - name: mongodb.memory_resident_size + description: Used resident memory + unit: bytes + chart_type: line + dimensions: + - name: used + - name: mongodb.memory_virtual_size + description: Used virtual memory + unit: bytes + chart_type: line + dimensions: + - name: used + - name: mongodb.memory_page_faults_rate + description: Memory page faults + unit: pgfaults/s + chart_type: line + dimensions: + - name: pgfaults + - name: mongodb.memory_tcmalloc_stats + description: TCMalloc statistics + unit: bytes + chart_type: line + dimensions: + - name: allocated + - name: central_cache_freelist + - name: transfer_cache_freelist + - name: thread_cache_freelists + - name: pageheap_freelist + - name: pageheap_unmapped + - name: mongodb.wiredtiger_concurrent_read_transactions_usage + description: Wired Tiger concurrent read transactions usage + unit: transactions + chart_type: stacked + dimensions: + - name: available + - name: used + - name: mongodb.wiredtiger_concurrent_write_transactions_usage + description: Wired Tiger concurrent write transactions usage + unit: transactions + chart_type: stacked + dimensions: + - name: available + - name: used + - name: mongodb.wiredtiger_cache_usage + description: Wired Tiger cache usage + unit: bytes + chart_type: line + dimensions: + - name: used + - name: mongodb.wiredtiger_cache_dirty_space_size + description: Wired Tiger cache dirty space size + unit: bytes + chart_type: line + dimensions: + - name: dirty + - name: mongodb.wiredtiger_cache_io_rate + description: Wired Tiger IO activity + unit: pages/s + chart_type: line + dimensions: + - name: read + - name: written + - name: mongodb.wiredtiger_cache_evictions_rate + description: Wired Tiger cache evictions + unit: pages/s + chart_type: stacked + dimensions: + - name: unmodified + - name: modified + - name: mongodb.sharding_nodes_count + description: Sharding Nodes + unit: nodes + chart_type: stacked + dimensions: + - name: shard_aware + - name: shard_unaware + - name: mongodb.sharding_sharded_databases_count + description: Sharded databases + unit: databases + chart_type: stacked + dimensions: + - name: partitioned + - name: unpartitioned + - name: mongodb.sharding_sharded_collections_count + description: Sharded collections + unit: collections + chart_type: stacked + dimensions: + - name: partitioned + - name: unpartitioned + - name: lock type + description: These metrics refer to the lock type. + labels: + - name: lock_type + description: lock type (e.g. global, database, collection, mutex) + metrics: + - name: mongodb.lock_acquisitions_rate + description: Lock acquisitions + unit: acquisitions/s + chart_type: line + dimensions: + - name: shared + - name: exclusive + - name: intent_shared + - name: intent_exclusive + - name: commit type + description: These metrics refer to the commit type. + labels: + - name: commit_type + description: commit type (e.g. noShards, singleShard, singleWriteShard) + metrics: + - name: mongodb.transactions_commits_rate + description: Transactions commits + unit: commits/s + chart_type: line + dimensions: + - name: success + - name: fail + - name: mongodb.transactions_commits_duration_time + description: Transactions successful commits duration + unit: milliseconds + chart_type: line + dimensions: + - name: commits + - name: database + description: These metrics refer to the database. + labels: + - name: database + description: database name + metrics: + - name: mongodb.database_collection_count + description: Database collections + unit: collections + chart_type: line + dimensions: + - name: collections + - name: mongodb.database_indexes_count + description: Database indexes + unit: indexes + chart_type: line + dimensions: + - name: indexes + - name: mongodb.database_views_count + description: Database views + unit: views + chart_type: line + dimensions: + - name: views + - name: mongodb.database_documents_count + description: Database documents + unit: documents + chart_type: line + dimensions: + - name: documents + - name: mongodb.database_data_size + description: Database data size + unit: bytes + chart_type: line + dimensions: + - name: data_size + - name: mongodb.database_storage_size + description: Database storage size + unit: bytes + chart_type: line + dimensions: + - name: storage_size + - name: mongodb.database_index_size + description: Database index size + unit: bytes + chart_type: line + dimensions: + - name: index_size + - name: replica set member + description: These metrics refer to the replica set member. + labels: + - name: repl_set_member + description: replica set member name + metrics: + - name: mongodb.repl_set_member_state + description: Replica Set member state + unit: state + chart_type: line + dimensions: + - name: primary + - name: startup + - name: secondary + - name: recovering + - name: startup2 + - name: unknown + - name: arbiter + - name: down + - name: rollback + - name: removed + - name: mongodb.repl_set_member_health_status + description: Replica Set member health status + unit: status + chart_type: line + dimensions: + - name: up + - name: down + - name: mongodb.repl_set_member_replication_lag_time + description: Replica Set member replication lag + unit: milliseconds + chart_type: line + dimensions: + - name: replication_lag + - name: mongodb.repl_set_member_heartbeat_latency_time + description: Replica Set member heartbeat latency + unit: milliseconds + chart_type: line + dimensions: + - name: heartbeat_latency + - name: mongodb.repl_set_member_ping_rtt_time + description: Replica Set member ping RTT + unit: milliseconds + chart_type: line + dimensions: + - name: ping_rtt + - name: mongodb.repl_set_member_uptime + description: Replica Set member uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: shard + description: These metrics refer to the shard. + labels: + - name: shard_id + description: shard id + metrics: + - name: mongodb.sharding_shard_chunks_count + description: Shard chunks + unit: chunks + chart_type: line + dimensions: + - name: chunks diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/mongodb.go b/src/go/collectors/go.d.plugin/modules/mongodb/mongodb.go new file mode 100644 index 00000000000000..522acbaa0db787 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/mongodb.go @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + _ "embed" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("mongodb", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Mongo { + return &Mongo{ + Config: Config{ + Timeout: 2, + URI: "mongodb://localhost:27017", + Databases: matcher.SimpleExpr{ + Includes: []string{}, + Excludes: []string{}, + }, + }, + + conn: &mongoClient{}, + + charts: chartsServerStatus.Copy(), + addShardingChartsOnce: &sync.Once{}, + + optionalCharts: make(map[string]bool), + replSetMembers: make(map[string]bool), + databases: make(map[string]bool), + shards: make(map[string]bool), + } +} + +type Config struct { + URI string `yaml:"uri"` + Timeout time.Duration `yaml:"timeout"` + Databases matcher.SimpleExpr `yaml:"databases"` +} + +type Mongo struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + conn mongoConn + + dbSelector matcher.Matcher + + addShardingChartsOnce *sync.Once + + optionalCharts map[string]bool + databases map[string]bool + replSetMembers map[string]bool + shards map[string]bool +} + +func (m *Mongo) Init() bool { + if err := m.verifyConfig(); err != nil { + m.Errorf("config validation: %v", err) + return false + } + + if err := m.initDatabaseSelector(); err != nil { + m.Errorf("init database selector: %v", err) + return false + } + + return true +} + +func (m *Mongo) Check() bool { + return len(m.Collect()) > 0 +} + +func (m *Mongo) Charts() *module.Charts { + return m.charts +} + +func (m *Mongo) Collect() map[string]int64 { + mx, err := m.collect() + if err != nil { + m.Error(err) + } + + if len(mx) == 0 { + m.Warning("no values collected") + return nil + } + + return mx +} + +func (m *Mongo) Cleanup() { + if m.conn == nil { + return + } + if err := m.conn.close(); err != nil { + m.Warningf("cleanup: error on closing mongo conn: %v", err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/mongodb_test.go b/src/go/collectors/go.d.plugin/modules/mongodb/mongodb_test.go new file mode 100644 index 00000000000000..37da851edb3a2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/mongodb_test.go @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mongo + +import ( + "encoding/json" + "errors" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +var ( + dataV6MongodServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongod-serverStatus.json") + dataV6MongosServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongos-serverStatus.json") + dataV6DbStats, _ = os.ReadFile("testdata/v6.0.3/dbStats.json") + dataV6ReplSetGetStatus, _ = os.ReadFile("testdata/v6.0.3/replSetGetStatus.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataV6MongodServerStatus": dataV6MongodServerStatus, + "dataV6MongosServerStatus": dataV6MongosServerStatus, + "dataV6DbStats": dataV6DbStats, + "dataV6ReplSetGetStatus": dataV6ReplSetGetStatus, + } { + require.NotNilf(t, data, name) + } +} + +func TestMongo_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + wantFail: false, + config: New().Config, + }, + "fails on unset 'address'": { + wantFail: true, + config: Config{ + URI: "", + }, + }, + "fails on invalid database selector": { + wantFail: true, + config: Config{ + URI: "mongodb://localhost:27017", + Databases: matcher.SimpleExpr{ + Includes: []string{"!@#"}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mongo := New() + mongo.Config = test.config + + if test.wantFail { + assert.False(t, mongo.Init()) + } else { + assert.True(t, mongo.Init()) + } + }) + } +} + +func TestMongo_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestMongo_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Mongo + wantClose bool + }{ + "client not initialized": { + wantClose: false, + prepare: func(t *testing.T) *Mongo { + return New() + }, + }, + "client initialized": { + wantClose: true, + prepare: func(t *testing.T) *Mongo { + mongo := New() + mongo.conn = caseMongod() + _ = mongo.conn.initClient("", 0) + + return mongo + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mongo := test.prepare(t) + + require.NotPanics(t, mongo.Cleanup) + if test.wantClose { + mock, ok := mongo.conn.(*mockMongoClient) + require.True(t, ok) + assert.True(t, mock.closeCalled) + } + }) + } +} + +func TestMongo_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *mockMongoClient + wantFail bool + }{ + "success on Mongod (v6)": { + wantFail: false, + prepare: caseMongod, + }, + "success on Mongod Replicas Set(v6)": { + wantFail: false, + prepare: caseMongodReplicaSet, + }, + "success on Mongos (v6)": { + wantFail: false, + prepare: caseMongos, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mongo := prepareMongo() + defer mongo.Cleanup() + mongo.conn = test.prepare() + + require.True(t, mongo.Init()) + + if test.wantFail { + assert.False(t, mongo.Check()) + } else { + assert.True(t, mongo.Check()) + } + }) + } +} + +func TestMongo_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *mockMongoClient + wantCollected map[string]int64 + }{ + "success on Mongod (v6)": { + prepare: caseMongod, + wantCollected: map[string]int64{ + "asserts_msg": 0, + "asserts_regular": 0, + "asserts_rollovers": 0, + "asserts_tripwire": 0, + "asserts_user": 246, + "asserts_warning": 0, + "connections_active": 7, + "connections_available": 838841, + "connections_awaiting_topology_changes": 5, + "connections_current": 19, + "connections_exhaust_hello": 2, + "connections_exhaust_is_master": 1, + "connections_threaded": 19, + "connections_total_created": 77, + "database_admin_collections": 3, + "database_admin_data_size": 796, + "database_admin_documents": 5, + "database_admin_index_size": 81920, + "database_admin_indexes": 4, + "database_admin_storage_size": 61440, + "database_admin_views": 0, + "database_config_collections": 3, + "database_config_data_size": 796, + "database_config_documents": 5, + "database_config_index_size": 81920, + "database_config_indexes": 4, + "database_config_storage_size": 61440, + "database_config_views": 0, + "database_local_collections": 3, + "database_local_data_size": 796, + "database_local_documents": 5, + "database_local_index_size": 81920, + "database_local_indexes": 4, + "database_local_storage_size": 61440, + "database_local_views": 0, + "extra_info_page_faults": 0, + "global_lock_active_clients_readers": 0, + "global_lock_active_clients_writers": 0, + "global_lock_current_queue_readers": 0, + "global_lock_current_queue_writers": 0, + "locks_collection_acquire_exclusive": 6, + "locks_collection_acquire_intent_exclusive": 172523, + "locks_collection_acquire_intent_shared": 336370, + "locks_collection_acquire_shared": 0, + "locks_database_acquire_exclusive": 3, + "locks_database_acquire_intent_exclusive": 172539, + "locks_database_acquire_intent_shared": 50971, + "locks_database_acquire_shared": 0, + "locks_global_acquire_exclusive": 6, + "locks_global_acquire_intent_exclusive": 174228, + "locks_global_acquire_intent_shared": 437905, + "locks_global_acquire_shared": 0, + "locks_mutex_acquire_exclusive": 0, + "locks_mutex_acquire_intent_exclusive": 0, + "locks_mutex_acquire_intent_shared": 245077, + "locks_mutex_acquire_shared": 0, + "locks_oplog_acquire_exclusive": 0, + "locks_oplog_acquire_intent_exclusive": 1, + "locks_oplog_acquire_intent_shared": 16788, + "locks_oplog_acquire_shared": 0, + "memory_resident": 193986560, + "memory_virtual": 3023044608, + "metrics_cursor_lifespan_greater_than_or_equal_10_minutes": 0, + "metrics_cursor_lifespan_less_than_10_minutes": 0, + "metrics_cursor_lifespan_less_than_15_seconds": 0, + "metrics_cursor_lifespan_less_than_1_minute": 0, + "metrics_cursor_lifespan_less_than_1_second": 0, + "metrics_cursor_lifespan_less_than_30_seconds": 0, + "metrics_cursor_lifespan_less_than_5_seconds": 0, + "metrics_cursor_open_no_timeout": 0, + "metrics_cursor_open_total": 1, + "metrics_cursor_timed_out": 0, + "metrics_cursor_total_opened": 1, + "metrics_document_deleted": 7, + "metrics_document_inserted": 0, + "metrics_document_returned": 1699, + "metrics_document_updated": 52, + "metrics_query_executor_scanned": 61, + "metrics_query_executor_scanned_objects": 1760, + "network_bytes_in": 38851356, + "network_bytes_out": 706335836, + "network_requests": 130530, + "network_slow_dns_operations": 0, + "network_slow_ssl_operations": 0, + "operations_command": 125531, + "operations_delete": 7, + "operations_getmore": 5110, + "operations_insert": 0, + "operations_latencies_commands_latency": 46432082, + "operations_latencies_commands_ops": 125412, + "operations_latencies_reads_latency": 1009868, + "operations_latencies_reads_ops": 5111, + "operations_latencies_writes_latency": 0, + "operations_latencies_writes_ops": 0, + "operations_query": 76, + "operations_update": 59, + "tcmalloc_aggressive_memory_decommit": 0, + "tcmalloc_central_cache_free_bytes": 406680, + "tcmalloc_current_total_thread_cache_bytes": 2490832, + "tcmalloc_generic_current_allocated_bytes": 109050648, + "tcmalloc_generic_heap_size": 127213568, + "tcmalloc_max_total_thread_cache_bytes": 1073741824, + "tcmalloc_pageheap_commit_count": 376, + "tcmalloc_pageheap_committed_bytes": 127086592, + "tcmalloc_pageheap_decommit_count": 122, + "tcmalloc_pageheap_free_bytes": 13959168, + "tcmalloc_pageheap_reserve_count": 60, + "tcmalloc_pageheap_scavenge_bytes": 0, + "tcmalloc_pageheap_total_commit_bytes": 229060608, + "tcmalloc_pageheap_total_decommit_bytes": 101974016, + "tcmalloc_pageheap_total_reserve_bytes": 127213568, + "tcmalloc_pageheap_unmapped_bytes": 126976, + "tcmalloc_spinlock_total_delay_ns": 33426251, + "tcmalloc_thread_cache_free_bytes": 2490832, + "tcmalloc_total_free_bytes": 4076776, + "tcmalloc_transfer_cache_free_bytes": 1179264, + "txn_active": 0, + "txn_inactive": 0, + "txn_open": 0, + "txn_prepared": 0, + "txn_total_aborted": 0, + "txn_total_committed": 0, + "txn_total_prepared": 0, + "txn_total_started": 0, + "wiredtiger_cache_currently_in_cache_bytes": 814375, + "wiredtiger_cache_maximum_configured_bytes": 7854882816, + "wiredtiger_cache_modified_evicted_pages": 0, + "wiredtiger_cache_read_into_cache_pages": 108, + "wiredtiger_cache_tracked_dirty_in_the_cache_bytes": 456446, + "wiredtiger_cache_unmodified_evicted_pages": 0, + "wiredtiger_cache_written_from_cache_pages": 3177, + "wiredtiger_concurrent_txn_read_available": 128, + "wiredtiger_concurrent_txn_read_out": 0, + "wiredtiger_concurrent_txn_write_available": 128, + "wiredtiger_concurrent_txn_write_out": 0, + }, + }, + "success on Mongod Replica Set (v6)": { + prepare: caseMongodReplicaSet, + wantCollected: map[string]int64{ + "asserts_msg": 0, + "asserts_regular": 0, + "asserts_rollovers": 0, + "asserts_tripwire": 0, + "asserts_user": 246, + "asserts_warning": 0, + "connections_active": 7, + "connections_available": 838841, + "connections_awaiting_topology_changes": 5, + "connections_current": 19, + "connections_exhaust_hello": 2, + "connections_exhaust_is_master": 1, + "connections_threaded": 19, + "connections_total_created": 77, + "database_admin_collections": 3, + "database_admin_data_size": 796, + "database_admin_documents": 5, + "database_admin_index_size": 81920, + "database_admin_indexes": 4, + "database_admin_storage_size": 61440, + "database_admin_views": 0, + "database_config_collections": 3, + "database_config_data_size": 796, + "database_config_documents": 5, + "database_config_index_size": 81920, + "database_config_indexes": 4, + "database_config_storage_size": 61440, + "database_config_views": 0, + "database_local_collections": 3, + "database_local_data_size": 796, + "database_local_documents": 5, + "database_local_index_size": 81920, + "database_local_indexes": 4, + "database_local_storage_size": 61440, + "database_local_views": 0, + "extra_info_page_faults": 0, + "global_lock_active_clients_readers": 0, + "global_lock_active_clients_writers": 0, + "global_lock_current_queue_readers": 0, + "global_lock_current_queue_writers": 0, + "locks_collection_acquire_exclusive": 6, + "locks_collection_acquire_intent_exclusive": 172523, + "locks_collection_acquire_intent_shared": 336370, + "locks_collection_acquire_shared": 0, + "locks_database_acquire_exclusive": 3, + "locks_database_acquire_intent_exclusive": 172539, + "locks_database_acquire_intent_shared": 50971, + "locks_database_acquire_shared": 0, + "locks_global_acquire_exclusive": 6, + "locks_global_acquire_intent_exclusive": 174228, + "locks_global_acquire_intent_shared": 437905, + "locks_global_acquire_shared": 0, + "locks_mutex_acquire_exclusive": 0, + "locks_mutex_acquire_intent_exclusive": 0, + "locks_mutex_acquire_intent_shared": 245077, + "locks_mutex_acquire_shared": 0, + "locks_oplog_acquire_exclusive": 0, + "locks_oplog_acquire_intent_exclusive": 1, + "locks_oplog_acquire_intent_shared": 16788, + "locks_oplog_acquire_shared": 0, + "memory_resident": 193986560, + "memory_virtual": 3023044608, + "metrics_cursor_lifespan_greater_than_or_equal_10_minutes": 0, + "metrics_cursor_lifespan_less_than_10_minutes": 0, + "metrics_cursor_lifespan_less_than_15_seconds": 0, + "metrics_cursor_lifespan_less_than_1_minute": 0, + "metrics_cursor_lifespan_less_than_1_second": 0, + "metrics_cursor_lifespan_less_than_30_seconds": 0, + "metrics_cursor_lifespan_less_than_5_seconds": 0, + "metrics_cursor_open_no_timeout": 0, + "metrics_cursor_open_total": 1, + "metrics_cursor_timed_out": 0, + "metrics_cursor_total_opened": 1, + "metrics_document_deleted": 7, + "metrics_document_inserted": 0, + "metrics_document_returned": 1699, + "metrics_document_updated": 52, + "metrics_query_executor_scanned": 61, + "metrics_query_executor_scanned_objects": 1760, + "network_bytes_in": 38851356, + "network_bytes_out": 706335836, + "network_requests": 130530, + "network_slow_dns_operations": 0, + "network_slow_ssl_operations": 0, + "operations_command": 125531, + "operations_delete": 7, + "operations_getmore": 5110, + "operations_insert": 0, + "operations_latencies_commands_latency": 46432082, + "operations_latencies_commands_ops": 125412, + "operations_latencies_reads_latency": 1009868, + "operations_latencies_reads_ops": 5111, + "operations_latencies_writes_latency": 0, + "operations_latencies_writes_ops": 0, + "operations_query": 76, + "operations_update": 59, + "repl_set_member_mongodb-primary:27017_health_status_down": 0, + "repl_set_member_mongodb-primary:27017_health_status_up": 1, + "repl_set_member_mongodb-primary:27017_replication_lag": 4572, + "repl_set_member_mongodb-primary:27017_state_arbiter": 0, + "repl_set_member_mongodb-primary:27017_state_down": 0, + "repl_set_member_mongodb-primary:27017_state_primary": 1, + "repl_set_member_mongodb-primary:27017_state_recovering": 0, + "repl_set_member_mongodb-primary:27017_state_removed": 0, + "repl_set_member_mongodb-primary:27017_state_rollback": 0, + "repl_set_member_mongodb-primary:27017_state_secondary": 0, + "repl_set_member_mongodb-primary:27017_state_startup": 0, + "repl_set_member_mongodb-primary:27017_state_startup2": 0, + "repl_set_member_mongodb-primary:27017_state_unknown": 0, + "repl_set_member_mongodb-secondary:27017_health_status_down": 0, + "repl_set_member_mongodb-secondary:27017_health_status_up": 1, + "repl_set_member_mongodb-secondary:27017_heartbeat_latency": 1359, + "repl_set_member_mongodb-secondary:27017_ping_rtt": 0, + "repl_set_member_mongodb-secondary:27017_replication_lag": 4572, + "repl_set_member_mongodb-secondary:27017_state_arbiter": 0, + "repl_set_member_mongodb-secondary:27017_state_down": 0, + "repl_set_member_mongodb-secondary:27017_state_primary": 0, + "repl_set_member_mongodb-secondary:27017_state_recovering": 0, + "repl_set_member_mongodb-secondary:27017_state_removed": 0, + "repl_set_member_mongodb-secondary:27017_state_rollback": 0, + "repl_set_member_mongodb-secondary:27017_state_secondary": 1, + "repl_set_member_mongodb-secondary:27017_state_startup": 0, + "repl_set_member_mongodb-secondary:27017_state_startup2": 0, + "repl_set_member_mongodb-secondary:27017_state_unknown": 0, + "repl_set_member_mongodb-secondary:27017_uptime": 192370, + "tcmalloc_aggressive_memory_decommit": 0, + "tcmalloc_central_cache_free_bytes": 406680, + "tcmalloc_current_total_thread_cache_bytes": 2490832, + "tcmalloc_generic_current_allocated_bytes": 109050648, + "tcmalloc_generic_heap_size": 127213568, + "tcmalloc_max_total_thread_cache_bytes": 1073741824, + "tcmalloc_pageheap_commit_count": 376, + "tcmalloc_pageheap_committed_bytes": 127086592, + "tcmalloc_pageheap_decommit_count": 122, + "tcmalloc_pageheap_free_bytes": 13959168, + "tcmalloc_pageheap_reserve_count": 60, + "tcmalloc_pageheap_scavenge_bytes": 0, + "tcmalloc_pageheap_total_commit_bytes": 229060608, + "tcmalloc_pageheap_total_decommit_bytes": 101974016, + "tcmalloc_pageheap_total_reserve_bytes": 127213568, + "tcmalloc_pageheap_unmapped_bytes": 126976, + "tcmalloc_spinlock_total_delay_ns": 33426251, + "tcmalloc_thread_cache_free_bytes": 2490832, + "tcmalloc_total_free_bytes": 4076776, + "tcmalloc_transfer_cache_free_bytes": 1179264, + "txn_active": 0, + "txn_inactive": 0, + "txn_open": 0, + "txn_prepared": 0, + "txn_total_aborted": 0, + "txn_total_committed": 0, + "txn_total_prepared": 0, + "txn_total_started": 0, + "wiredtiger_cache_currently_in_cache_bytes": 814375, + "wiredtiger_cache_maximum_configured_bytes": 7854882816, + "wiredtiger_cache_modified_evicted_pages": 0, + "wiredtiger_cache_read_into_cache_pages": 108, + "wiredtiger_cache_tracked_dirty_in_the_cache_bytes": 456446, + "wiredtiger_cache_unmodified_evicted_pages": 0, + "wiredtiger_cache_written_from_cache_pages": 3177, + "wiredtiger_concurrent_txn_read_available": 128, + "wiredtiger_concurrent_txn_read_out": 0, + "wiredtiger_concurrent_txn_write_available": 128, + "wiredtiger_concurrent_txn_write_out": 0, + }, + }, + "success on Mongos (v6)": { + prepare: caseMongos, + wantCollected: map[string]int64{ + "asserts_msg": 0, + "asserts_regular": 0, + "asserts_rollovers": 0, + "asserts_tripwire": 0, + "asserts_user": 352, + "asserts_warning": 0, + "connections_active": 5, + "connections_available": 838842, + "connections_awaiting_topology_changes": 4, + "connections_current": 18, + "connections_exhaust_hello": 3, + "connections_exhaust_is_master": 0, + "connections_threaded": 18, + "connections_total_created": 89, + "database_admin_collections": 3, + "database_admin_data_size": 796, + "database_admin_documents": 5, + "database_admin_index_size": 81920, + "database_admin_indexes": 4, + "database_admin_storage_size": 61440, + "database_admin_views": 0, + "database_config_collections": 3, + "database_config_data_size": 796, + "database_config_documents": 5, + "database_config_index_size": 81920, + "database_config_indexes": 4, + "database_config_storage_size": 61440, + "database_config_views": 0, + "database_local_collections": 3, + "database_local_data_size": 796, + "database_local_documents": 5, + "database_local_index_size": 81920, + "database_local_indexes": 4, + "database_local_storage_size": 61440, + "database_local_views": 0, + "extra_info_page_faults": 526, + "memory_resident": 84934656, + "memory_virtual": 2596274176, + "metrics_document_deleted": 0, + "metrics_document_inserted": 0, + "metrics_document_returned": 0, + "metrics_document_updated": 0, + "metrics_query_executor_scanned": 0, + "metrics_query_executor_scanned_objects": 0, + "network_bytes_in": 57943348, + "network_bytes_out": 247343709, + "network_requests": 227310, + "network_slow_dns_operations": 0, + "network_slow_ssl_operations": 0, + "operations_command": 227283, + "operations_delete": 0, + "operations_getmore": 0, + "operations_insert": 0, + "operations_query": 10, + "operations_update": 0, + "shard_collections_partitioned": 1, + "shard_collections_unpartitioned": 1, + "shard_databases_partitioned": 1, + "shard_databases_unpartitioned": 1, + "shard_id_shard0_chunks": 1, + "shard_id_shard1_chunks": 1, + "shard_nodes_aware": 1, + "shard_nodes_unaware": 1, + "tcmalloc_aggressive_memory_decommit": 0, + "tcmalloc_central_cache_free_bytes": 736960, + "tcmalloc_current_total_thread_cache_bytes": 1638104, + "tcmalloc_generic_current_allocated_bytes": 13519784, + "tcmalloc_generic_heap_size": 24576000, + "tcmalloc_max_total_thread_cache_bytes": 1042284544, + "tcmalloc_pageheap_commit_count": 480, + "tcmalloc_pageheap_committed_bytes": 24518656, + "tcmalloc_pageheap_decommit_count": 127, + "tcmalloc_pageheap_free_bytes": 5697536, + "tcmalloc_pageheap_reserve_count": 15, + "tcmalloc_pageheap_scavenge_bytes": 0, + "tcmalloc_pageheap_total_commit_bytes": 84799488, + "tcmalloc_pageheap_total_decommit_bytes": 60280832, + "tcmalloc_pageheap_total_reserve_bytes": 24576000, + "tcmalloc_pageheap_unmapped_bytes": 57344, + "tcmalloc_spinlock_total_delay_ns": 96785212, + "tcmalloc_thread_cache_free_bytes": 1638104, + "tcmalloc_total_free_bytes": 5301336, + "tcmalloc_transfer_cache_free_bytes": 2926272, + "txn_active": 0, + "txn_commit_types_no_shards_initiated": 0, + "txn_commit_types_no_shards_successful": 0, + "txn_commit_types_no_shards_successful_duration_micros": 0, + "txn_commit_types_no_shards_unsuccessful": 0, + "txn_commit_types_read_only_initiated": 0, + "txn_commit_types_read_only_successful": 0, + "txn_commit_types_read_only_successful_duration_micros": 0, + "txn_commit_types_read_only_unsuccessful": 0, + "txn_commit_types_recover_with_token_initiated": 0, + "txn_commit_types_recover_with_token_successful": 0, + "txn_commit_types_recover_with_token_successful_duration_micros": 0, + "txn_commit_types_recover_with_token_unsuccessful": 0, + "txn_commit_types_single_shard_initiated": 0, + "txn_commit_types_single_shard_successful": 0, + "txn_commit_types_single_shard_successful_duration_micros": 0, + "txn_commit_types_single_shard_unsuccessful": 0, + "txn_commit_types_single_write_shard_initiated": 0, + "txn_commit_types_single_write_shard_successful": 0, + "txn_commit_types_single_write_shard_successful_duration_micros": 0, + "txn_commit_types_single_write_shard_unsuccessful": 0, + "txn_commit_types_two_phase_commit_initiated": 0, + "txn_commit_types_two_phase_commit_successful": 0, + "txn_commit_types_two_phase_commit_successful_duration_micros": 0, + "txn_commit_types_two_phase_commit_unsuccessful": 0, + "txn_inactive": 0, + "txn_open": 0, + "txn_total_aborted": 0, + "txn_total_committed": 0, + "txn_total_started": 0, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mongo := prepareMongo() + defer mongo.Cleanup() + mongo.conn = test.prepare() + + require.True(t, mongo.Init()) + + mx := mongo.Collect() + + assert.Equal(t, test.wantCollected, mx) + }) + } +} + +func prepareMongo() *Mongo { + m := New() + m.Databases = matcher.SimpleExpr{Includes: []string{"* *"}} + return m +} + +func caseMongodReplicaSet() *mockMongoClient { + return &mockMongoClient{replicaSet: true} +} + +func caseMongod() *mockMongoClient { + return &mockMongoClient{} +} + +func caseMongos() *mockMongoClient { + return &mockMongoClient{mongos: true} +} + +type mockMongoClient struct { + replicaSet bool + mongos bool + errOnServerStatus bool + errOnListDatabaseNames bool + errOnDbStats bool + errOnReplSetGetStatus bool + errOnShardNodes bool + errOnShardDatabasesPartitioning bool + errOnShardCollectionsPartitioning bool + errOnShardChunks bool + errOnInitClient bool + clientInited bool + closeCalled bool +} + +func (m *mockMongoClient) serverStatus() (*documentServerStatus, error) { + if !m.clientInited { + return nil, errors.New("mock.serverStatus() error: mongo client not inited") + } + if m.errOnServerStatus { + return nil, errors.New("mock.serverStatus() error") + } + + data := dataV6MongodServerStatus + if m.mongos { + data = dataV6MongosServerStatus + } + + var s documentServerStatus + if err := json.Unmarshal(data, &s); err != nil { + return nil, err + } + + return &s, nil +} + +func (m *mockMongoClient) listDatabaseNames() ([]string, error) { + if !m.clientInited { + return nil, errors.New("mock.listDatabaseNames() error: mongo client not inited") + } + if m.errOnListDatabaseNames { + return nil, errors.New("mock.listDatabaseNames() error") + } + return []string{"admin", "config", "local"}, nil +} + +func (m *mockMongoClient) dbStats(_ string) (*documentDBStats, error) { + if !m.clientInited { + return nil, errors.New("mock.dbStats() error: mongo client not inited") + } + if m.errOnDbStats { + return nil, errors.New("mock.dbStats() error") + } + + var s documentDBStats + if err := json.Unmarshal(dataV6DbStats, &s); err != nil { + return nil, err + } + + return &s, nil +} + +func (m *mockMongoClient) isReplicaSet() bool { + return m.replicaSet +} + +func (m *mockMongoClient) isMongos() bool { + return m.mongos +} + +func (m *mockMongoClient) replSetGetStatus() (*documentReplSetStatus, error) { + if !m.clientInited { + return nil, errors.New("mock.replSetGetStatus() error: mongo client not inited") + } + if m.mongos { + return nil, errors.New("mock.replSetGetStatus() error: shouldn't be called for mongos") + } + if !m.replicaSet { + return nil, errors.New("mock.replSetGetStatus() error: should be called for replica set") + } + if m.errOnReplSetGetStatus { + return nil, errors.New("mock.replSetGetStatus() error") + } + + var s documentReplSetStatus + if err := json.Unmarshal(dataV6ReplSetGetStatus, &s); err != nil { + return nil, err + } + + return &s, nil +} + +func (m *mockMongoClient) shardNodes() (*documentShardNodesResult, error) { + if !m.clientInited { + return nil, errors.New("mock.shardNodes() error: mongo client not inited") + } + if m.replicaSet { + return nil, errors.New("mock.replSetGetStatus() error: shouldn't be called for replica set") + } + if !m.mongos { + return nil, errors.New("mock.shardNodes() error: should be called for mongos") + } + if m.errOnShardNodes { + return nil, errors.New("mock.shardNodes() error") + } + + return &documentShardNodesResult{ + ShardAware: 1, + ShardUnaware: 1, + }, nil +} + +func (m *mockMongoClient) shardDatabasesPartitioning() (*documentPartitionedResult, error) { + if !m.clientInited { + return nil, errors.New("mock.shardDatabasesPartitioning() error: mongo client not inited") + } + if m.replicaSet { + return nil, errors.New("mock.shardDatabasesPartitioning() error: shouldn't be called for replica set") + } + if !m.mongos { + return nil, errors.New("mock.shardDatabasesPartitioning() error: should be called for mongos") + } + if m.errOnShardDatabasesPartitioning { + return nil, errors.New("mock.shardDatabasesPartitioning() error") + } + + return &documentPartitionedResult{ + Partitioned: 1, + UnPartitioned: 1, + }, nil +} + +func (m *mockMongoClient) shardCollectionsPartitioning() (*documentPartitionedResult, error) { + if !m.clientInited { + return nil, errors.New("mock.shardCollectionsPartitioning() error: mongo client not inited") + } + if m.replicaSet { + return nil, errors.New("mock.shardCollectionsPartitioning() error: shouldn't be called for replica set") + } + if !m.mongos { + return nil, errors.New("mock.shardCollectionsPartitioning() error: should be called for mongos") + } + if m.errOnShardCollectionsPartitioning { + return nil, errors.New("mock.shardCollectionsPartitioning() error") + } + + return &documentPartitionedResult{ + Partitioned: 1, + UnPartitioned: 1, + }, nil +} + +func (m *mockMongoClient) shardChunks() (map[string]int64, error) { + if !m.clientInited { + return nil, errors.New("mock.shardChunks() error: mongo client not inited") + } + if m.replicaSet { + return nil, errors.New("mock.shardChunks() error: shouldn't be called for replica set") + } + if !m.mongos { + return nil, errors.New("mock.shardChunks() error: should be called for mongos") + } + if m.errOnShardChunks { + return nil, errors.New("mock.shardChunks() error") + } + + return map[string]int64{ + "shard0": 1, + "shard1": 1, + }, nil +} + +func (m *mockMongoClient) initClient(_ string, _ time.Duration) error { + if m.errOnInitClient { + return errors.New("mock.initClient() error") + } + m.clientInited = true + return nil +} + +func (m *mockMongoClient) close() error { + if m.clientInited { + m.closeCalled = true + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/dbStats.json b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/dbStats.json new file mode 100644 index 00000000000000..52a51320347af1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/dbStats.json @@ -0,0 +1,9 @@ +{ + "Collections": 3, + "Views": 0, + "Indexes": 4, + "Objects": 5, + "DataSize": 796, + "IndexSize": 81920, + "StorageSize": 61440 +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json new file mode 100644 index 00000000000000..77f083923b4478 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json @@ -0,0 +1,497 @@ +{ + "Process": "mongod", + "OpCounters": { + "Insert": 0, + "Query": 76, + "Update": 59, + "Delete": 7, + "GetMore": 5110, + "Command": 125531 + }, + "OpLatencies": { + "Reads": { + "Latency": 1009868, + "Ops": 5111 + }, + "Writes": { + "Latency": 0, + "Ops": 0 + }, + "Commands": { + "Latency": 46432082, + "Ops": 125412 + } + }, + "Connections": { + "Current": 19, + "Available": 838841, + "TotalCreated": 77, + "Active": 7, + "Threaded": 19, + "ExhaustIsMaster": 1, + "ExhaustHello": 2, + "AwaitingTopologyChanges": 5 + }, + "Network": { + "BytesIn": 38851356, + "BytesOut": 706335836, + "NumRequests": 130530, + "NumSlowDNSOperations": 0, + "NumSlowSSLOperations": 0 + }, + "Memory": { + "Resident": 185, + "Virtual": 2883 + }, + "Metrics": { + "Cursor": { + "TotalOpened": 1, + "TimedOut": 0, + "Open": { + "NoTimeout": 0, + "Total": 1 + }, + "Lifespan": { + "GreaterThanOrEqual10Minutes": 0, + "LessThan10Minutes": 0, + "LessThan15Seconds": 0, + "LessThan1Minute": 0, + "LessThan1Second": 0, + "LessThan30Seconds": 0, + "LessThan5Seconds": 0 + } + }, + "Document": { + "Deleted": 7, + "Inserted": 0, + "Returned": 1699, + "Updated": 52 + }, + "QueryExecutor": { + "Scanned": 61, + "ScannedObjects": 1760 + } + }, + "ExtraInfo": { + "PageFaults": 0 + }, + "Asserts": { + "Regular": 0, + "Warning": 0, + "Msg": 0, + "User": 246, + "Tripwire": 0, + "Rollovers": 0 + }, + "Transactions": { + "CurrentActive": 0, + "CurrentInactive": 0, + "CurrentOpen": 0, + "CurrentPrepared": 0, + "TotalAborted": 0, + "TotalCommitted": 0, + "TotalStarted": 0, + "TotalPrepared": 0, + "CommitTypes": null + }, + "GlobalLock": { + "CurrentQueue": { + "Readers": 0, + "Writers": 0 + }, + "ActiveClients": { + "Readers": 0, + "Writers": 0 + } + }, + "Tcmalloc": { + "Generic": { + "CurrentAllocatedBytes": 109050648, + "HeapSize": 127213568 + }, + "Tcmalloc": { + "PageheapFreeBytes": 13959168, + "PageheapUnmappedBytes": 126976, + "MaxTotalThreadCacheBytes": 1073741824, + "CurrentTotalThreadCacheBytes": 2490832, + "TotalFreeBytes": 4076776, + "CentralCacheFreeBytes": 406680, + "TransferCacheFreeBytes": 1179264, + "ThreadCacheFreeBytes": 2490832, + "AggressiveMemoryDecommit": 0, + "PageheapCommittedBytes": 127086592, + "PageheapScavengeBytes": 0, + "PageheapCommitCount": 376, + "PageheapTotalCommitBytes": 229060608, + "PageheapDecommitCount": 122, + "PageheapTotalDecommitBytes": 101974016, + "PageheapReserveCount": 60, + "PageheapTotalReserveBytes": 127213568, + "SpinlockTotalDelayNs": 33426251 + } + }, + "Locks": { + "Global": { + "AcquireCount": { + "Shared": 0, + "Exclusive": 6, + "IntentShared": 437905, + "IntentExclusive": 174228 + } + }, + "Database": { + "AcquireCount": { + "Shared": 0, + "Exclusive": 3, + "IntentShared": 50971, + "IntentExclusive": 172539 + } + }, + "Collection": { + "AcquireCount": { + "Shared": 0, + "Exclusive": 6, + "IntentShared": 336370, + "IntentExclusive": 172523 + } + }, + "Mutex": { + "AcquireCount": { + "Shared": 0, + "Exclusive": 0, + "IntentShared": 245077, + "IntentExclusive": 0 + } + }, + "Metadata": null, + "Oplog": { + "AcquireCount": { + "Shared": 0, + "Exclusive": 0, + "IntentShared": 16788, + "IntentExclusive": 1 + } + } + }, + "WiredTiger": { + "ConcurrentTransaction": { + "Write": { + "Out": 0, + "Available": 128 + }, + "Read": { + "Out": 0, + "Available": 128 + } + }, + "Cache": { + "BytesCurrentlyInCache": 814375, + "MaximumBytesConfigured": 7854882816, + "TrackedDirtyBytesInCache": 456446, + "UnmodifiedPagesEvicted": 0, + "ModifiedPagesEvicted": 0, + "PagesReadIntoCache": 108, + "PagesWrittenFromCache": 3177 + } + }, + "Repl": [ + { + "Key": "topologyVersion", + "Value": [ + { + "Key": "processId", + "Value": "63b043be562288304ad3b4fe" + }, + { + "Key": "counter", + "Value": 7 + } + ] + }, + { + "Key": "hosts", + "Value": [ + "mongodb-primary:27017", + "mongodb-secondary:27017" + ] + }, + { + "Key": "setName", + "Value": "replicaset" + }, + { + "Key": "setVersion", + "Value": 4 + }, + { + "Key": "isWritablePrimary", + "Value": true + }, + { + "Key": "secondary", + "Value": false + }, + { + "Key": "primary", + "Value": "mongodb-primary:27017" + }, + { + "Key": "me", + "Value": "mongodb-primary:27017" + }, + { + "Key": "electionId", + "Value": "7fffffff0000000000000006" + }, + { + "Key": "lastWrite", + "Value": [ + { + "Key": "opTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "lastWriteDate", + "Value": "2022-12-31T20:54:44+02:00" + }, + { + "Key": "majorityOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "majorityWriteDate", + "Value": "2022-12-31T20:54:44+02:00" + } + ] + }, + { + "Key": "replicationProgress", + "Value": [ + [ + { + "Key": "host", + "Value": "mongodb-primary:27017" + }, + { + "Key": "optime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "lastAppliedOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "heartbeatAppliedOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 0, + "I": 0 + } + }, + { + "Key": "t", + "Value": -1 + } + ] + }, + { + "Key": "heartbeatDurableOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 0, + "I": 0 + } + }, + { + "Key": "t", + "Value": -1 + } + ] + }, + { + "Key": "memberId", + "Value": 0 + } + ], + [ + { + "Key": "host", + "Value": "mongodb-secondary:27017" + }, + { + "Key": "optime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "lastAppliedOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "heartbeatAppliedOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "heartbeatDurableOpTime", + "Value": [ + { + "Key": "ts", + "Value": { + "T": 1672512884, + "I": 1 + } + }, + { + "Key": "t", + "Value": 6 + } + ] + }, + { + "Key": "memberId", + "Value": 1 + } + ] + ] + }, + { + "Key": "primaryOnlyServices", + "Value": [ + { + "Key": "ShardSplitDonorService", + "Value": [ + { + "Key": "state", + "Value": "running" + }, + { + "Key": "numInstances", + "Value": 0 + } + ] + }, + { + "Key": "TenantMigrationRecipientService", + "Value": [ + { + "Key": "state", + "Value": "running" + }, + { + "Key": "numInstances", + "Value": 0 + } + ] + }, + { + "Key": "TenantMigrationDonorService", + "Value": [ + { + "Key": "state", + "Value": "running" + }, + { + "Key": "numInstances", + "Value": 0 + } + ] + } + ] + }, + { + "Key": "rbid", + "Value": 2 + }, + { + "Key": "userWriteBlockMode", + "Value": 1 + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json new file mode 100644 index 00000000000000..ecf766715a5f2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json @@ -0,0 +1,129 @@ +{ + "Process": "mongos", + "OpCounters": { + "Insert": 0, + "Query": 10, + "Update": 0, + "Delete": 0, + "GetMore": 0, + "Command": 227283 + }, + "OpLatencies": null, + "Connections": { + "Current": 18, + "Available": 838842, + "TotalCreated": 89, + "Active": 5, + "Threaded": 18, + "ExhaustIsMaster": 0, + "ExhaustHello": 3, + "AwaitingTopologyChanges": 4 + }, + "Network": { + "BytesIn": 57943348, + "BytesOut": 247343709, + "NumRequests": 227310, + "NumSlowDNSOperations": 0, + "NumSlowSSLOperations": 0 + }, + "Memory": { + "Resident": 81, + "Virtual": 2476 + }, + "Metrics": { + "Cursor": {}, + "Document": { + "Deleted": 0, + "Inserted": 0, + "Returned": 0, + "Updated": 0 + }, + "QueryExecutor": { + "Scanned": 0, + "ScannedObjects": 0 + } + }, + "ExtraInfo": { + "PageFaults": 526 + }, + "Asserts": { + "Regular": 0, + "Warning": 0, + "Msg": 0, + "User": 352, + "Tripwire": 0, + "Rollovers": 0 + }, + "Transactions": { + "CurrentActive": 0, + "CurrentInactive": 0, + "CurrentOpen": 0, + "CurrentPrepared": null, + "TotalAborted": 0, + "TotalCommitted": 0, + "TotalStarted": 0, + "TotalPrepared": null, + "CommitTypes": { + "NoShards": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + }, + "SingleShard": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + }, + "SingleWriteShard": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + }, + "ReadOnly": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + }, + "TwoPhaseCommit": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + }, + "RecoverWithToken": { + "initiated": 0, + "successful": 0, + "successfulDurationMicros": 0 + } + } + }, + "GlobalLock": null, + "Tcmalloc": { + "Generic": { + "CurrentAllocatedBytes": 13519784, + "HeapSize": 24576000 + }, + "Tcmalloc": { + "PageheapFreeBytes": 5697536, + "PageheapUnmappedBytes": 57344, + "MaxTotalThreadCacheBytes": 1042284544, + "CurrentTotalThreadCacheBytes": 1638104, + "TotalFreeBytes": 5301336, + "CentralCacheFreeBytes": 736960, + "TransferCacheFreeBytes": 2926272, + "ThreadCacheFreeBytes": 1638104, + "AggressiveMemoryDecommit": 0, + "PageheapCommittedBytes": 24518656, + "PageheapScavengeBytes": 0, + "PageheapCommitCount": 480, + "PageheapTotalCommitBytes": 84799488, + "PageheapDecommitCount": 127, + "PageheapTotalDecommitBytes": 60280832, + "PageheapReserveCount": 15, + "PageheapTotalReserveBytes": 24576000, + "SpinlockTotalDelayNs": 96785212 + } + }, + "Locks": null, + "WiredTiger": null, + "Repl": null +} diff --git a/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json new file mode 100644 index 00000000000000..c97a77f318e885 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json @@ -0,0 +1,27 @@ +{ + "Date": "2022-12-30T22:19:29.572Z", + "Members": [ + { + "Name": "mongodb-primary:27017", + "Self": true, + "State": 1, + "Health": 1, + "OptimeDate": "2022-12-30T22:19:25Z", + "LastHeartbeat": null, + "LastHeartbeatRecv": null, + "PingMs": null, + "Uptime": 192588 + }, + { + "Name": "mongodb-secondary:27017", + "Self": null, + "State": 2, + "Health": 1, + "OptimeDate": "2022-12-30T22:19:25Z", + "LastHeartbeat": "2022-12-30T22:19:28.214Z", + "LastHeartbeatRecv": "2022-12-30T22:19:28.213Z", + "PingMs": 0, + "Uptime": 192370 + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/README.md b/src/go/collectors/go.d.plugin/modules/mysql/README.md new file mode 120000 index 00000000000000..edf116deef1e66 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/README.md @@ -0,0 +1 @@ +integrations/mysql.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/charts.go b/src/go/collectors/go.d.plugin/modules/mysql/charts.go new file mode 100644 index 00000000000000..c5d017ca477545 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/charts.go @@ -0,0 +1,1239 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioBandwidth = module.Priority + iota + prioQueries + prioQueriesType + prioHandlers + prioTableOpenCacheOverflows + prioTableLocks + prioTableJoinIssues + prioTableSortIssues + prioTmpOperations + prioConnections + prioActiveConnections + prioBinlogCache + prioBinlogStatementCache + prioThreads + prioThreadsCreated + prioThreadCacheMisses + prioInnoDBIO + prioInnoDBIOOperations + prioInnoDBIOPendingOperations + prioInnoDBLog + prioInnoDBOSLog + prioInnoDBOSLogFsyncWrites + prioInnoDBOSLogIO + prioInnoDBCurRowLock + prioInnoDBRows + prioInnoDBBufferPoolPages + prioInnoDBBufferPoolPagesFlushed + prioInnoDBBufferPoolBytes + prioInnoDBBufferPoolReadAhead + prioInnoDBBufferPoolReadAheadRnd + prioInnoDBBufferPoolOperations + prioMyISAMKeyBlocks + prioMyISAMKeyRequests + prioMyISAMKeyDiskOperations + prioOpenFiles + prioOpenFilesRate + prioConnectionErrors + prioOpenedTables + prioOpenTables + prioProcessListFetchQueryDuration + prioProcessListQueries + prioProcessListLongestQueryDuration + prioInnoDBDeadlocks + prioQCacheOperations + prioQCacheQueries + prioQCacheFreeMem + prioQCacheMemBlocks + prioGaleraWriteSets + prioGaleraBytes + prioGaleraQueue + prioGaleraConflicts + prioGaleraFlowControl + prioGaleraClusterStatus + prioGaleraClusterState + prioGaleraClusterSize + prioGaleraClusterWeight + prioGaleraClusterConnectionStatus + prioGaleraReadinessState + prioGaleraOpenTransactions + prioGaleraThreadCount + prioSlaveSecondsBehindMaster + prioSlaveSQLIOThreadRunningState + prioUserStatsCPUTime + prioUserStatsRows + prioUserStatsCommands + prioUserStatsDeniedCommands + prioUserStatsTransactions + prioUserStatsBinlogWritten + prioUserStatsEmptyQueries + prioUserStatsConnections + prioUserStatsLostConnections + prioUserStatsDeniedConnections +) + +var baseCharts = module.Charts{ + chartBandwidth.Copy(), + chartQueries.Copy(), + chartQueriesType.Copy(), + chartHandlers.Copy(), + chartTableLocks.Copy(), + chartTableJoinIssues.Copy(), + chartTableSortIssues.Copy(), + chartTmpOperations.Copy(), + chartConnections.Copy(), + chartActiveConnections.Copy(), + chartThreads.Copy(), + chartThreadCreationRate.Copy(), + chartThreadsCacheMisses.Copy(), + chartInnoDBIO.Copy(), + chartInnoDBIOOperations.Copy(), + chartInnoDBPendingIOOperations.Copy(), + chartInnoDBLogOperations.Copy(), + chartInnoDBCurrentRowLocks.Copy(), + chartInnoDBRowsOperations.Copy(), + chartInnoDBBufferPoolPages.Copy(), + chartInnoDBBufferPoolPagesFlushed.Copy(), + chartInnoDBBufferPoolBytes.Copy(), + chartInnoDBBufferPoolReadAhead.Copy(), + chartInnoDBBufferPoolReadAheadRnd.Copy(), + chartInnoDBBufferPoolOperations.Copy(), + chartOpenFiles.Copy(), + chartOpenedFilesRate.Copy(), + chartConnectionErrors.Copy(), + chartOpenedTables.Copy(), + chartOpenTables.Copy(), + chartProcessListFetchQueryDuration.Copy(), + chartProcessListQueries.Copy(), + chartProcessListLongestQueryDuration.Copy(), +} + +var ( + chartBandwidth = module.Chart{ + ID: "net", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "mysql.net", + Type: module.Area, + Priority: prioBandwidth, + Dims: module.Dims{ + {ID: "bytes_received", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "bytes_sent", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000}, + }, + } + chartQueries = module.Chart{ + ID: "queries", + Title: "Queries", + Units: "queries/s", + Fam: "queries", + Ctx: "mysql.queries", + Priority: prioQueries, + Dims: module.Dims{ + {ID: "queries", Name: "queries", Algo: module.Incremental}, + {ID: "questions", Name: "questions", Algo: module.Incremental}, + {ID: "slow_queries", Name: "slow_queries", Algo: module.Incremental}, + }, + } + chartQueriesType = module.Chart{ + ID: "queries_type", + Title: "Queries By Type", + Units: "queries/s", + Fam: "queries", + Ctx: "mysql.queries_type", + Type: module.Stacked, + Priority: prioQueriesType, + Dims: module.Dims{ + {ID: "com_select", Name: "select", Algo: module.Incremental}, + {ID: "com_delete", Name: "delete", Algo: module.Incremental}, + {ID: "com_update", Name: "update", Algo: module.Incremental}, + {ID: "com_insert", Name: "insert", Algo: module.Incremental}, + {ID: "com_replace", Name: "replace", Algo: module.Incremental}, + }, + } + chartHandlers = module.Chart{ + ID: "handlers", + Title: "Handlers", + Units: "handlers/s", + Fam: "handlers", + Ctx: "mysql.handlers", + Priority: prioHandlers, + Dims: module.Dims{ + {ID: "handler_commit", Name: "commit", Algo: module.Incremental}, + {ID: "handler_delete", Name: "delete", Algo: module.Incremental}, + {ID: "handler_prepare", Name: "prepare", Algo: module.Incremental}, + {ID: "handler_read_first", Name: "read first", Algo: module.Incremental}, + {ID: "handler_read_key", Name: "read key", Algo: module.Incremental}, + {ID: "handler_read_next", Name: "read next", Algo: module.Incremental}, + {ID: "handler_read_prev", Name: "read prev", Algo: module.Incremental}, + {ID: "handler_read_rnd", Name: "read rnd", Algo: module.Incremental}, + {ID: "handler_read_rnd_next", Name: "read rnd next", Algo: module.Incremental}, + {ID: "handler_rollback", Name: "rollback", Algo: module.Incremental}, + {ID: "handler_savepoint", Name: "savepoint", Algo: module.Incremental}, + {ID: "handler_savepoint_rollback", Name: "savepointrollback", Algo: module.Incremental}, + {ID: "handler_update", Name: "update", Algo: module.Incremental}, + {ID: "handler_write", Name: "write", Algo: module.Incremental}, + }, + } + chartTableOpenCacheOverflows = module.Chart{ + ID: "table_open_cache_overflows", + Title: "Table open cache overflows", + Units: "overflows/s", + Fam: "open cache", + Ctx: "mysql.table_open_cache_overflows", + Priority: prioTableOpenCacheOverflows, + Dims: module.Dims{ + {ID: "table_open_cache_overflows", Name: "open_cache", Algo: module.Incremental}, + }, + } + chartTableLocks = module.Chart{ + ID: "table_locks", + Title: "Table Locks", + Units: "locks/s", + Fam: "locks", + Ctx: "mysql.table_locks", + Priority: prioTableLocks, + Dims: module.Dims{ + {ID: "table_locks_immediate", Name: "immediate", Algo: module.Incremental}, + {ID: "table_locks_waited", Name: "waited", Algo: module.Incremental, Mul: -1}, + }, + } + chartTableJoinIssues = module.Chart{ + ID: "join_issues", + Title: "Table Select Join Issues", + Units: "joins/s", + Fam: "issues", + Ctx: "mysql.join_issues", + Priority: prioTableJoinIssues, + Dims: module.Dims{ + {ID: "select_full_join", Name: "full join", Algo: module.Incremental}, + {ID: "select_full_range_join", Name: "full range join", Algo: module.Incremental}, + {ID: "select_range", Name: "range", Algo: module.Incremental}, + {ID: "select_range_check", Name: "range check", Algo: module.Incremental}, + {ID: "select_scan", Name: "scan", Algo: module.Incremental}, + }, + } + chartTableSortIssues = module.Chart{ + ID: "sort_issues", + Title: "Table Sort Issues", + Units: "issues/s", + Fam: "issues", + Ctx: "mysql.sort_issues", + Priority: prioTableSortIssues, + Dims: module.Dims{ + {ID: "sort_merge_passes", Name: "merge passes", Algo: module.Incremental}, + {ID: "sort_range", Name: "range", Algo: module.Incremental}, + {ID: "sort_scan", Name: "scan", Algo: module.Incremental}, + }, + } + chartTmpOperations = module.Chart{ + ID: "tmp", + Title: "Tmp Operations", + Units: "events/s", + Fam: "temporaries", + Ctx: "mysql.tmp", + Priority: prioTmpOperations, + Dims: module.Dims{ + {ID: "created_tmp_disk_tables", Name: "disk tables", Algo: module.Incremental}, + {ID: "created_tmp_files", Name: "files", Algo: module.Incremental}, + {ID: "created_tmp_tables", Name: "tables", Algo: module.Incremental}, + }, + } + chartConnections = module.Chart{ + ID: "connections", + Title: "Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "mysql.connections", + Priority: prioConnections, + Dims: module.Dims{ + {ID: "connections", Name: "all", Algo: module.Incremental}, + {ID: "aborted_connects", Name: "aborted", Algo: module.Incremental}, + }, + } + chartActiveConnections = module.Chart{ + ID: "connections_active", + Title: "Active Connections", + Units: "connections", + Fam: "connections", + Ctx: "mysql.connections_active", + Priority: prioActiveConnections, + Dims: module.Dims{ + {ID: "threads_connected", Name: "active"}, + {ID: "max_connections", Name: "limit"}, + {ID: "max_used_connections", Name: "max active"}, + }, + } + chartThreads = module.Chart{ + ID: "threads", + Title: "Threads", + Units: "threads", + Fam: "threads", + Ctx: "mysql.threads", + Priority: prioThreads, + Dims: module.Dims{ + {ID: "threads_connected", Name: "connected"}, + {ID: "threads_cached", Name: "cached", Mul: -1}, + {ID: "threads_running", Name: "running"}, + }, + } + chartThreadCreationRate = module.Chart{ + ID: "threads_creation_rate", + Title: "Threads Creation Rate", + Units: "threads/s", + Fam: "threads", + Ctx: "mysql.threads_created", + Priority: prioThreadsCreated, + Dims: module.Dims{ + {ID: "threads_created", Name: "created", Algo: module.Incremental}, + }, + } + chartThreadsCacheMisses = module.Chart{ + ID: "thread_cache_misses", + Title: "Threads Cache Misses", + Units: "misses", + Fam: "threads", + Ctx: "mysql.thread_cache_misses", + Type: module.Area, + Priority: prioThreadCacheMisses, + Dims: module.Dims{ + {ID: "thread_cache_misses", Name: "misses", Div: 100}, + }, + } + chartInnoDBIO = module.Chart{ + ID: "innodb_io", + Title: "InnoDB I/O Bandwidth", + Units: "KiB/s", + Fam: "innodb", + Ctx: "mysql.innodb_io", + Type: module.Area, + Priority: prioInnoDBIO, + Dims: module.Dims{ + {ID: "innodb_data_read", Name: "read", Algo: module.Incremental, Div: 1024}, + {ID: "innodb_data_written", Name: "write", Algo: module.Incremental, Div: 1024}, + }, + } + chartInnoDBIOOperations = module.Chart{ + ID: "innodb_io_ops", + Title: "InnoDB I/O Operations", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_io_ops", + Priority: prioInnoDBIOOperations, + Dims: module.Dims{ + {ID: "innodb_data_reads", Name: "reads", Algo: module.Incremental}, + {ID: "innodb_data_writes", Name: "writes", Algo: module.Incremental, Mul: -1}, + {ID: "innodb_data_fsyncs", Name: "fsyncs", Algo: module.Incremental}, + }, + } + chartInnoDBPendingIOOperations = module.Chart{ + ID: "innodb_io_pending_ops", + Title: "InnoDB Pending I/O Operations", + Units: "operations", + Fam: "innodb", + Ctx: "mysql.innodb_io_pending_ops", + Priority: prioInnoDBIOPendingOperations, + Dims: module.Dims{ + {ID: "innodb_data_pending_reads", Name: "reads"}, + {ID: "innodb_data_pending_writes", Name: "writes", Mul: -1}, + {ID: "innodb_data_pending_fsyncs", Name: "fsyncs"}, + }, + } + chartInnoDBLogOperations = module.Chart{ + ID: "innodb_log", + Title: "InnoDB Log Operations", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_log", + Priority: prioInnoDBLog, + Dims: module.Dims{ + {ID: "innodb_log_waits", Name: "waits", Algo: module.Incremental}, + {ID: "innodb_log_write_requests", Name: "write requests", Algo: module.Incremental, Mul: -1}, + {ID: "innodb_log_writes", Name: "writes", Algo: module.Incremental, Mul: -1}, + }, + } + chartInnoDBCurrentRowLocks = module.Chart{ + ID: "innodb_cur_row_lock", + Title: "InnoDB Current Row Locks", + Units: "operations", + Fam: "innodb", + Ctx: "mysql.innodb_cur_row_lock", + Type: module.Area, + Priority: prioInnoDBCurRowLock, + Dims: module.Dims{ + {ID: "innodb_row_lock_current_waits", Name: "current waits"}, + }, + } + chartInnoDBRowsOperations = module.Chart{ + ID: "innodb_rows", + Title: "InnoDB Row Operations", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_rows", + Type: module.Area, + Priority: prioInnoDBRows, + Dims: module.Dims{ + {ID: "innodb_rows_inserted", Name: "inserted", Algo: module.Incremental}, + {ID: "innodb_rows_read", Name: "read", Algo: module.Incremental}, + {ID: "innodb_rows_updated", Name: "updated", Algo: module.Incremental}, + {ID: "innodb_rows_deleted", Name: "deleted", Algo: module.Incremental, Mul: -1}, + }, + } + chartInnoDBBufferPoolPages = module.Chart{ + ID: "innodb_buffer_pool_pages", + Title: "InnoDB Buffer Pool Pages", + Units: "pages", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_pages", + Priority: prioInnoDBBufferPoolPages, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_pages_data", Name: "data"}, + {ID: "innodb_buffer_pool_pages_dirty", Name: "dirty", Mul: -1}, + {ID: "innodb_buffer_pool_pages_free", Name: "free"}, + {ID: "innodb_buffer_pool_pages_misc", Name: "misc", Mul: -1}, + {ID: "innodb_buffer_pool_pages_total", Name: "total"}, + }, + } + chartInnoDBBufferPoolPagesFlushed = module.Chart{ + ID: "innodb_buffer_pool_flush_pages_requests", + Title: "InnoDB Buffer Pool Flush Pages Requests", + Units: "requests/s", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_pages_flushed", + Priority: prioInnoDBBufferPoolPagesFlushed, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_pages_flushed", Name: "flush pages", Algo: module.Incremental}, + }, + } + chartInnoDBBufferPoolBytes = module.Chart{ + ID: "innodb_buffer_pool_bytes", + Title: "InnoDB Buffer Pool Bytes", + Units: "MiB", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_bytes", + Type: module.Area, + Priority: prioInnoDBBufferPoolBytes, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_bytes_data", Name: "data", Div: 1024 * 1024}, + {ID: "innodb_buffer_pool_bytes_dirty", Name: "dirty", Mul: -1, Div: 1024 * 1024}, + }, + } + chartInnoDBBufferPoolReadAhead = module.Chart{ + ID: "innodb_buffer_pool_read_ahead", + Title: "InnoDB Buffer Pool Read Pages", + Units: "pages/s", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_read_ahead", + Type: module.Area, + Priority: prioInnoDBBufferPoolReadAhead, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_read_ahead", Name: "all", Algo: module.Incremental}, + {ID: "innodb_buffer_pool_read_ahead_evicted", Name: "evicted", Algo: module.Incremental, Mul: -1}, + }, + } + chartInnoDBBufferPoolReadAheadRnd = module.Chart{ + ID: "innodb_buffer_pool_read_ahead_rnd", + Title: "InnoDB Buffer Pool Random Read-Aheads", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_read_ahead_rnd", + Priority: prioInnoDBBufferPoolReadAheadRnd, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_read_ahead_rnd", Name: "read-ahead", Algo: module.Incremental}, + }, + } + chartInnoDBBufferPoolOperations = module.Chart{ + ID: "innodb_buffer_pool_ops", + Title: "InnoDB Buffer Pool Operations", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_buffer_pool_ops", + Type: module.Area, + Priority: prioInnoDBBufferPoolOperations, + Dims: module.Dims{ + {ID: "innodb_buffer_pool_reads", Name: "disk reads", Algo: module.Incremental}, + {ID: "innodb_buffer_pool_wait_free", Name: "wait free", Algo: module.Incremental, Mul: -1, Div: 1}, + }, + } + chartOpenFiles = module.Chart{ + ID: "files", + Title: "Open Files", + Units: "files", + Fam: "files", + Ctx: "mysql.files", + Priority: prioOpenFiles, + Dims: module.Dims{ + {ID: "open_files", Name: "files"}, + }, + } + chartOpenedFilesRate = module.Chart{ + ID: "files_rate", + Title: "Opened Files Rate", + Units: "files/s", + Fam: "files", + Ctx: "mysql.files_rate", + Priority: prioOpenFilesRate, + Dims: module.Dims{ + {ID: "opened_files", Name: "files", Algo: module.Incremental}, + }, + } + chartConnectionErrors = module.Chart{ + ID: "connection_errors", + Title: "Connection Errors", + Units: "errors/s", + Fam: "connections", + Ctx: "mysql.connection_errors", + Priority: prioConnectionErrors, + Dims: module.Dims{ + {ID: "connection_errors_accept", Name: "accept", Algo: module.Incremental}, + {ID: "connection_errors_internal", Name: "internal", Algo: module.Incremental}, + {ID: "connection_errors_max_connections", Name: "max", Algo: module.Incremental}, + {ID: "connection_errors_peer_address", Name: "peer addr", Algo: module.Incremental}, + {ID: "connection_errors_select", Name: "select", Algo: module.Incremental}, + {ID: "connection_errors_tcpwrap", Name: "tcpwrap", Algo: module.Incremental}, + }, + } + chartOpenedTables = module.Chart{ + ID: "opened_tables", + Title: "Opened Tables", + Units: "tables/s", + Fam: "open tables", + Ctx: "mysql.opened_tables", + Priority: prioOpenedTables, + Dims: module.Dims{ + {ID: "opened_tables", Name: "tables", Algo: module.Incremental}, + }, + } + chartOpenTables = module.Chart{ + ID: "open_tables", + Title: "Open Tables", + Units: "tables", + Fam: "open tables", + Ctx: "mysql.open_tables", + Type: module.Area, + Priority: prioOpenTables, + Dims: module.Dims{ + {ID: "table_open_cache", Name: "cache"}, + {ID: "open_tables", Name: "tables"}, + }, + } + chartProcessListFetchQueryDuration = module.Chart{ + ID: "process_list_fetch_duration", + Title: "Process List Fetch Duration", + Units: "milliseconds", + Fam: "process list", + Ctx: "mysql.process_list_fetch_query_duration", + Priority: prioProcessListFetchQueryDuration, + Dims: module.Dims{ + {ID: "process_list_fetch_query_duration", Name: "duration"}, + }, + } + chartProcessListQueries = module.Chart{ + ID: "process_list_queries_count", + Title: "Queries Count", + Units: "queries", + Fam: "process list", + Ctx: "mysql.process_list_queries_count", + Type: module.Stacked, + Priority: prioProcessListQueries, + Dims: module.Dims{ + {ID: "process_list_queries_count_system", Name: "system"}, + {ID: "process_list_queries_count_user", Name: "user"}, + }, + } + chartProcessListLongestQueryDuration = module.Chart{ + ID: "process_list_longest_query_duration", + Title: "Longest Query Duration", + Units: "seconds", + Fam: "process list", + Ctx: "mysql.process_list_longest_query_duration", + Priority: prioProcessListLongestQueryDuration, + Dims: module.Dims{ + {ID: "process_list_longest_query_duration", Name: "duration"}, + }, + } +) + +var chartsInnoDBOSLog = module.Charts{ + chartInnoDBOSLogPendingOperations.Copy(), + chartInnoDBOSLogOperations.Copy(), + chartInnoDBOSLogIO.Copy(), +} + +var ( + chartInnoDBOSLogPendingOperations = module.Chart{ + ID: "innodb_os_log", + Title: "InnoDB OS Log Pending Operations", + Units: "operations", + Fam: "innodb", + Ctx: "mysql.innodb_os_log", + Priority: prioInnoDBOSLog, + Dims: module.Dims{ + {ID: "innodb_os_log_pending_fsyncs", Name: "fsyncs"}, + {ID: "innodb_os_log_pending_writes", Name: "writes", Mul: -1}, + }, + } + chartInnoDBOSLogOperations = module.Chart{ + ID: "innodb_os_log_fsync_writes", + Title: "InnoDB OS Log Operations", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_os_log_fsync_writes", + Priority: prioInnoDBOSLogFsyncWrites, + Dims: module.Dims{ + {ID: "innodb_os_log_fsyncs", Name: "fsyncs", Algo: module.Incremental}, + }, + } + chartInnoDBOSLogIO = module.Chart{ + ID: "innodb_os_log_io", + Title: "InnoDB OS Log Bandwidth", + Units: "KiB/s", + Fam: "innodb", + Ctx: "mysql.innodb_os_log_io", + Type: module.Area, + Priority: prioInnoDBOSLogIO, + Dims: module.Dims{ + {ID: "innodb_os_log_written", Name: "write", Algo: module.Incremental, Mul: -1, Div: 1024}, + }, + } +) + +var chartInnoDBDeadlocks = module.Chart{ + ID: "innodb_deadlocks", + Title: "InnoDB Deadlocks", + Units: "operations/s", + Fam: "innodb", + Ctx: "mysql.innodb_deadlocks", + Type: module.Area, + Priority: prioInnoDBDeadlocks, + Dims: module.Dims{ + {ID: "innodb_deadlocks", Name: "deadlocks", Algo: module.Incremental}, + }, +} + +var chartsQCache = module.Charts{ + chartQCacheOperations.Copy(), + chartQCacheQueries.Copy(), + chartQCacheFreeMemory.Copy(), + chartQCacheMemoryBlocks.Copy(), +} + +var ( + chartQCacheOperations = module.Chart{ + ID: "qcache_ops", + Title: "QCache Operations", + Units: "queries/s", + Fam: "qcache", + Ctx: "mysql.qcache_ops", + Priority: prioQCacheOperations, + Dims: module.Dims{ + {ID: "qcache_hits", Name: "hits", Algo: module.Incremental}, + {ID: "qcache_lowmem_prunes", Name: "lowmem prunes", Algo: module.Incremental, Mul: -1}, + {ID: "qcache_inserts", Name: "inserts", Algo: module.Incremental}, + {ID: "qcache_not_cached", Name: "not cached", Algo: module.Incremental, Mul: -1}, + }, + } + chartQCacheQueries = module.Chart{ + ID: "qcache", + Title: "QCache Queries in Cache", + Units: "queries", + Fam: "qcache", + Ctx: "mysql.qcache", + Priority: prioQCacheQueries, + Dims: module.Dims{ + {ID: "qcache_queries_in_cache", Name: "queries", Algo: module.Absolute}, + }, + } + chartQCacheFreeMemory = module.Chart{ + ID: "qcache_freemem", + Title: "QCache Free Memory", + Units: "MiB", + Fam: "qcache", + Ctx: "mysql.qcache_freemem", + Type: module.Area, + Priority: prioQCacheFreeMem, + Dims: module.Dims{ + {ID: "qcache_free_memory", Name: "free", Div: 1024 * 1024}, + }, + } + chartQCacheMemoryBlocks = module.Chart{ + ID: "qcache_memblocks", + Title: "QCache Memory Blocks", + Units: "blocks", + Fam: "qcache", + Ctx: "mysql.qcache_memblocks", + Priority: prioQCacheMemBlocks, + Dims: module.Dims{ + {ID: "qcache_free_blocks", Name: "free"}, + {ID: "qcache_total_blocks", Name: "total"}, + }, + } +) + +var chartsGalera = module.Charts{ + chartGaleraWriteSets.Copy(), + chartGaleraBytes.Copy(), + chartGaleraQueue.Copy(), + chartGaleraConflicts.Copy(), + chartGaleraFlowControl.Copy(), + chartGaleraClusterStatus.Copy(), + chartGaleraClusterState.Copy(), + chartGaleraClusterSize.Copy(), + chartGaleraClusterWeight.Copy(), + chartGaleraClusterConnectionStatus.Copy(), + chartGaleraReadinessState.Copy(), + chartGaleraOpenTransactions.Copy(), + chartGaleraThreads.Copy(), +} +var ( + chartGaleraWriteSets = module.Chart{ + ID: "galera_writesets", + Title: "Replicated Writesets", + Units: "writesets/s", + Fam: "galera", + Ctx: "mysql.galera_writesets", + Priority: prioGaleraWriteSets, + Dims: module.Dims{ + {ID: "wsrep_received", Name: "rx", Algo: module.Incremental}, + {ID: "wsrep_replicated", Name: "tx", Algo: module.Incremental, Mul: -1}, + }, + } + chartGaleraBytes = module.Chart{ + ID: "galera_bytes", + Title: "Replicated Bytes", + Units: "KiB/s", + Fam: "galera", + Ctx: "mysql.galera_bytes", + Type: module.Area, + Priority: prioGaleraBytes, + Dims: module.Dims{ + {ID: "wsrep_received_bytes", Name: "rx", Algo: module.Incremental, Div: 1024}, + {ID: "wsrep_replicated_bytes", Name: "tx", Algo: module.Incremental, Mul: -1, Div: 1024}, + }, + } + chartGaleraQueue = module.Chart{ + ID: "galera_queue", + Title: "Galera Queue", + Units: "writesets", + Fam: "galera", + Ctx: "mysql.galera_queue", + Priority: prioGaleraQueue, + Dims: module.Dims{ + {ID: "wsrep_local_recv_queue", Name: "rx"}, + {ID: "wsrep_local_send_queue", Name: "tx", Mul: -1}, + }, + } + chartGaleraConflicts = module.Chart{ + ID: "galera_conflicts", + Title: "Replication Conflicts", + Units: "transactions", + Fam: "galera", + Ctx: "mysql.galera_conflicts", + Type: module.Area, + Priority: prioGaleraConflicts, + Dims: module.Dims{ + {ID: "wsrep_local_bf_aborts", Name: "bf aborts", Algo: module.Incremental}, + {ID: "wsrep_local_cert_failures", Name: "cert fails", Algo: module.Incremental, Mul: -1}, + }, + } + chartGaleraFlowControl = module.Chart{ + ID: "galera_flow_control", + Title: "Flow Control", + Units: "ms", + Fam: "galera", + Ctx: "mysql.galera_flow_control", + Type: module.Area, + Priority: prioGaleraFlowControl, + Dims: module.Dims{ + {ID: "wsrep_flow_control_paused_ns", Name: "paused", Algo: module.Incremental, Div: 1000000}, + }, + } + chartGaleraClusterStatus = module.Chart{ + ID: "galera_cluster_status", + Title: "Cluster Component Status", + Units: "status", + Fam: "galera", + Ctx: "mysql.galera_cluster_status", + Priority: prioGaleraClusterStatus, + Dims: module.Dims{ + {ID: "wsrep_cluster_status_primary", Name: "primary"}, + {ID: "wsrep_cluster_status_non_primary", Name: "non_primary"}, + {ID: "wsrep_cluster_status_disconnected", Name: "disconnected"}, + }, + } + chartGaleraClusterState = module.Chart{ + ID: "galera_cluster_state", + Title: "Cluster Component State", + Units: "state", + Fam: "galera", + Ctx: "mysql.galera_cluster_state", + Priority: prioGaleraClusterState, + Dims: module.Dims{ + {ID: "wsrep_local_state_undefined", Name: "undefined"}, + {ID: "wsrep_local_state_joiner", Name: "joining"}, + {ID: "wsrep_local_state_donor", Name: "donor"}, + {ID: "wsrep_local_state_joined", Name: "joined"}, + {ID: "wsrep_local_state_synced", Name: "synced"}, + {ID: "wsrep_local_state_error", Name: "error"}, + }, + } + chartGaleraClusterSize = module.Chart{ + ID: "galera_cluster_size", + Title: "Number of Nodes in the Cluster", + Units: "nodes", + Fam: "galera", + Ctx: "mysql.galera_cluster_size", + Priority: prioGaleraClusterSize, + Dims: module.Dims{ + {ID: "wsrep_cluster_size", Name: "nodes"}, + }, + } + chartGaleraClusterWeight = module.Chart{ + ID: "galera_cluster_weight", + Title: "The Total Weight of the Current Members in the Cluster", + Units: "weight", + Fam: "galera", + Ctx: "mysql.galera_cluster_weight", + Priority: prioGaleraClusterWeight, + Dims: module.Dims{ + {ID: "wsrep_cluster_weight", Name: "weight"}, + }, + } + chartGaleraClusterConnectionStatus = module.Chart{ + ID: "galera_connected", + Title: "Cluster Connection Status", + Units: "boolean", + Fam: "galera", + Ctx: "mysql.galera_connected", + Priority: prioGaleraClusterConnectionStatus, + Dims: module.Dims{ + {ID: "wsrep_connected", Name: "connected"}, + }, + } + chartGaleraReadinessState = module.Chart{ + ID: "galera_ready", + Title: "Accept Queries Readiness Status", + Units: "boolean", + Fam: "galera", + Ctx: "mysql.galera_ready", + Priority: prioGaleraReadinessState, + Dims: module.Dims{ + {ID: "wsrep_ready", Name: "ready"}, + }, + } + chartGaleraOpenTransactions = module.Chart{ + ID: "galera_open_transactions", + Title: "Open Transactions", + Units: "transactions", + Fam: "galera", + Ctx: "mysql.galera_open_transactions", + Priority: prioGaleraOpenTransactions, + Dims: module.Dims{ + {ID: "wsrep_open_transactions", Name: "open"}, + }, + } + chartGaleraThreads = module.Chart{ + ID: "galera_thread_count", + Title: "Total Number of WSRep (applier/rollbacker) Threads", + Units: "threads", + Fam: "galera", + Ctx: "mysql.galera_thread_count", + Priority: prioGaleraThreadCount, + Dims: module.Dims{ + {ID: "wsrep_thread_count", Name: "threads"}, + }, + } +) + +var chartsMyISAM = module.Charts{ + chartMyISAMKeyCacheBlocks.Copy(), + chartMyISAMKeyCacheRequests.Copy(), + chartMyISAMKeyCacheDiskOperations.Copy(), +} +var ( + chartMyISAMKeyCacheBlocks = module.Chart{ + ID: "key_blocks", + Title: "MyISAM Key Cache Blocks", + Units: "blocks", + Fam: "myisam", + Ctx: "mysql.key_blocks", + Priority: prioMyISAMKeyBlocks, + Dims: module.Dims{ + {ID: "key_blocks_unused", Name: "unused"}, + {ID: "key_blocks_used", Name: "used", Mul: -1}, + {ID: "key_blocks_not_flushed", Name: "not flushed"}, + }, + } + chartMyISAMKeyCacheRequests = module.Chart{ + ID: "key_requests", + Title: "MyISAM Key Cache Requests", + Units: "requests/s", + Fam: "myisam", + Ctx: "mysql.key_requests", + Type: module.Area, + Priority: prioMyISAMKeyRequests, + Dims: module.Dims{ + {ID: "key_read_requests", Name: "reads", Algo: module.Incremental}, + {ID: "key_write_requests", Name: "writes", Algo: module.Incremental, Mul: -1}, + }, + } + chartMyISAMKeyCacheDiskOperations = module.Chart{ + ID: "key_disk_ops", + Title: "MyISAM Key Cache Disk Operations", + Units: "operations/s", + Fam: "myisam", + Ctx: "mysql.key_disk_ops", + Priority: prioMyISAMKeyDiskOperations, + Type: module.Area, + Dims: module.Dims{ + {ID: "key_reads", Name: "reads", Algo: module.Incremental}, + {ID: "key_writes", Name: "writes", Algo: module.Incremental, Mul: -1}, + }, + } +) + +var chartsBinlog = module.Charts{ + chartBinlogCache.Copy(), + chartBinlogStatementCache.Copy(), +} + +var ( + chartBinlogCache = module.Chart{ + ID: "binlog_cache", + Title: "Binlog Cache", + Units: "transactions/s", + Fam: "binlog", + Ctx: "mysql.binlog_cache", + Priority: prioBinlogCache, + Dims: module.Dims{ + {ID: "binlog_cache_disk_use", Name: "disk", Algo: module.Incremental}, + {ID: "binlog_cache_use", Name: "all", Algo: module.Incremental}, + }, + } + chartBinlogStatementCache = module.Chart{ + ID: "binlog_stmt_cache", + Title: "Binlog Statement Cache", + Units: "statements/s", + Fam: "binlog", + Ctx: "mysql.binlog_stmt_cache", + Priority: prioBinlogStatementCache, + Dims: module.Dims{ + {ID: "binlog_stmt_cache_disk_use", Name: "disk", Algo: module.Incremental}, + {ID: "binlog_stmt_cache_use", Name: "all", Algo: module.Incremental}, + }, + } +) + +var ( + chartsSlaveReplication = module.Charts{ + chartSlaveBehindSeconds.Copy(), + chartSlaveSQLIOThreadRunningState.Copy(), + } + + chartSlaveBehindSeconds = module.Chart{ + ID: "slave_behind", + Title: "Slave Behind Seconds", + Units: "seconds", + Fam: "slave", + Ctx: "mysql.slave_behind", + Priority: prioSlaveSecondsBehindMaster, + Dims: module.Dims{ + {ID: "seconds_behind_master", Name: "seconds"}, + }, + } + chartSlaveSQLIOThreadRunningState = module.Chart{ + ID: "slave_thread_running", + Title: "I/O / SQL Thread Running State", + Units: "boolean", + Fam: "slave", + Ctx: "mysql.slave_status", + Priority: prioSlaveSQLIOThreadRunningState, + Dims: module.Dims{ + {ID: "slave_sql_running", Name: "sql_running"}, + {ID: "slave_io_running", Name: "io_running"}, + }, + } +) + +func newSlaveReplConnCharts(conn string) *module.Charts { + orig := conn + conn = strings.ToLower(conn) + cs := chartsSlaveReplication.Copy() + for _, chart := range *cs { + chart.ID += "_" + conn + chart.Title += " Connection " + orig + for _, dim := range chart.Dims { + dim.ID += "_" + conn + } + } + return cs +} + +func newMariaDBUserStatisticsCharts(user string) *module.Charts { + lcUser := strings.ToLower(user) + charts := chartsTmplUserStats.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, lcUser) + c.Labels = []module.Label{ + {Key: "user", Value: user}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, lcUser) + } + } + return charts +} + +func newPerconaUserStatisticsCharts(user string) *module.Charts { + lcUser := strings.ToLower(user) + charts := chartsTmplPerconaUserStats.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, lcUser) + c.Labels = []module.Label{ + {Key: "user", Value: user}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, lcUser) + } + } + return charts +} + +var ( + chartsTmplUserStats = module.Charts{ + chartUserStatsCPU.Copy(), + chartTmplUserStatsRowsOperations.Copy(), + chartTmplUserStatsCommands.Copy(), + chartTmplUserStatsDeniedCommands.Copy(), + chartTmplUserStatsTransactions.Copy(), + chartTmplUserStatsBinlogWritten.Copy(), + chartTmplUserStatsEmptyQueries.Copy(), + chartTmplUserStatsCreatedConnections.Copy(), + chartTmplUserStatsLostConnections.Copy(), + chartTmplUserStatsDeniedConnections.Copy(), + } + chartsTmplPerconaUserStats = module.Charts{ + chartUserStatsCPU.Copy(), + chartTmplPerconaUserStatsRowsOperations.Copy(), + chartTmplUserStatsCommands.Copy(), + chartTmplUserStatsDeniedCommands.Copy(), + chartTmplUserStatsTransactions.Copy(), + chartTmplUserStatsBinlogWritten.Copy(), + chartTmplUserStatsEmptyQueries.Copy(), + chartTmplUserStatsCreatedConnections.Copy(), + chartTmplUserStatsLostConnections.Copy(), + chartTmplUserStatsDeniedConnections.Copy(), + } + + chartUserStatsCPU = module.Chart{ + ID: "userstats_cpu_%s", + Title: "User CPU Time", + Units: "percentage", + Fam: "user cpu time", + Ctx: "mysql.userstats_cpu", + Priority: prioUserStatsCPUTime, + Dims: module.Dims{ + {ID: "userstats_%s_cpu_time", Name: "used", Mul: 100, Div: 1000, Algo: module.Incremental}, + }, + } + chartTmplUserStatsRowsOperations = module.Chart{ + ID: "userstats_rows_%s", + Title: "User Rows Operations", + Units: "operations/s", + Fam: "user operations", + Ctx: "mysql.userstats_rows", + Type: module.Stacked, + Priority: prioUserStatsRows, + Dims: module.Dims{ + {ID: "userstats_%s_rows_read", Name: "read", Algo: module.Incremental}, + {ID: "userstats_%s_rows_sent", Name: "sent", Algo: module.Incremental}, + {ID: "userstats_%s_rows_updated", Name: "updated", Algo: module.Incremental}, + {ID: "userstats_%s_rows_inserted", Name: "inserted", Algo: module.Incremental}, + {ID: "userstats_%s_rows_deleted", Name: "deleted", Algo: module.Incremental}, + }, + } + chartTmplPerconaUserStatsRowsOperations = module.Chart{ + ID: "userstats_rows_%s", + Title: "User Rows Operations", + Units: "operations/s", + Fam: "user operations", + Ctx: "mysql.userstats_rows", + Type: module.Stacked, + Priority: prioUserStatsRows, + Dims: module.Dims{ + {ID: "userstats_%s_rows_fetched", Name: "fetched", Algo: module.Incremental}, + {ID: "userstats_%s_rows_updated", Name: "updated", Algo: module.Incremental}, + }, + } + chartTmplUserStatsCommands = module.Chart{ + ID: "userstats_commands_%s", + Title: "User Commands", + Units: "commands/s", + Fam: "user commands", + Ctx: "mysql.userstats_commands", + Type: module.Stacked, + Priority: prioUserStatsCommands, + Dims: module.Dims{ + {ID: "userstats_%s_select_commands", Name: "select", Algo: module.Incremental}, + {ID: "userstats_%s_update_commands", Name: "update", Algo: module.Incremental}, + {ID: "userstats_%s_other_commands", Name: "other", Algo: module.Incremental}, + }, + } + chartTmplUserStatsDeniedCommands = module.Chart{ + ID: "userstats_denied_commands_%s", + Title: "User Denied Commands", + Units: "commands/s", + Fam: "user commands denied", + Ctx: "mysql.userstats_denied_commands", + Priority: prioUserStatsDeniedCommands, + Dims: module.Dims{ + {ID: "userstats_%s_access_denied", Name: "denied", Algo: module.Incremental}, + }, + } + chartTmplUserStatsTransactions = module.Chart{ + ID: "userstats_transactions_%s", + Title: "User Transactions", + Units: "transactions/s", + Fam: "user transactions", + Ctx: "mysql.userstats_created_transactions", + Type: module.Area, + Priority: prioUserStatsTransactions, + Dims: module.Dims{ + {ID: "userstats_%s_commit_transactions", Name: "commit", Algo: module.Incremental}, + {ID: "userstats_%s_rollback_transactions", Name: "rollback", Algo: module.Incremental}, + }, + } + chartTmplUserStatsBinlogWritten = module.Chart{ + ID: "userstats_binlog_written_%s", + Title: "User Binlog Written", + Units: "B/s", + Fam: "user binlog written", + Ctx: "mysql.userstats_binlog_written", + Priority: prioUserStatsBinlogWritten, + Dims: module.Dims{ + {ID: "userstats_%s_binlog_bytes_written", Name: "written", Algo: module.Incremental}, + }, + } + chartTmplUserStatsEmptyQueries = module.Chart{ + ID: "userstats_empty_queries_%s", + Title: "User Empty Queries", + Units: "queries/s", + Fam: "user empty queries", + Ctx: "mysql.userstats_empty_queries", + Priority: prioUserStatsEmptyQueries, + Dims: module.Dims{ + {ID: "userstats_%s_empty_queries", Name: "empty", Algo: module.Incremental}, + }, + } + chartTmplUserStatsCreatedConnections = module.Chart{ + ID: "userstats_connections_%s", + Title: "User Created Connections", + Units: "connections/s", + Fam: "user connections created ", + Ctx: "mysql.userstats_connections", + Priority: prioUserStatsConnections, + Dims: module.Dims{ + {ID: "userstats_%s_total_connections", Name: "created", Algo: module.Incremental}, + }, + } + chartTmplUserStatsLostConnections = module.Chart{ + ID: "userstats_lost_connections_%s", + Title: "User Lost Connections", + Units: "connections/s", + Fam: "user connections lost", + Ctx: "mysql.userstats_lost_connections", + Priority: prioUserStatsLostConnections, + Dims: module.Dims{ + {ID: "userstats_%s_lost_connections", Name: "lost", Algo: module.Incremental}, + }, + } + chartTmplUserStatsDeniedConnections = module.Chart{ + ID: "userstats_denied_connections_%s", + Title: "User Denied Connections", + Units: "connections/s", + Fam: "user connections denied", + Ctx: "mysql.userstats_denied_connections", + Priority: prioUserStatsDeniedConnections, + Dims: module.Dims{ + {ID: "userstats_%s_denied_connections", Name: "denied", Algo: module.Incremental}, + }, + } +) + +func (m *MySQL) addSlaveReplicationConnCharts(conn string) { + var charts *module.Charts + if conn == "" { + charts = chartsSlaveReplication.Copy() + } else { + charts = newSlaveReplConnCharts(conn) + } + if err := m.Charts().Add(*charts...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addUserStatisticsCharts(user string) { + if m.isPercona { + if err := m.Charts().Add(*newPerconaUserStatisticsCharts(user)...); err != nil { + m.Warning(err) + } + } else { + if err := m.Charts().Add(*newMariaDBUserStatisticsCharts(user)...); err != nil { + m.Warning(err) + } + } +} + +func (m *MySQL) addInnoDBOSLogCharts() { + if err := m.Charts().Add(*chartsInnoDBOSLog.Copy()...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addMyISAMCharts() { + if err := m.Charts().Add(*chartsMyISAM.Copy()...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addBinlogCharts() { + if err := m.Charts().Add(*chartsBinlog.Copy()...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addInnodbDeadlocksChart() { + if err := m.Charts().Add(chartInnoDBDeadlocks.Copy()); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addQCacheCharts() { + if err := m.Charts().Add(*chartsQCache.Copy()...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addGaleraCharts() { + if err := m.Charts().Add(*chartsGalera.Copy()...); err != nil { + m.Warning(err) + } +} + +func (m *MySQL) addTableOpenCacheOverflowChart() { + if err := m.Charts().Add(chartTableOpenCacheOverflows.Copy()); err != nil { + m.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect.go b/src/go/collectors/go.d.plugin/modules/mysql/collect.go new file mode 100644 index 00000000000000..3ff0882ad1e005 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/blang/semver/v4" +) + +func (m *MySQL) collect() (map[string]int64, error) { + if m.db == nil { + if err := m.openConnection(); err != nil { + return nil, err + } + } + if m.version == nil { + if err := m.collectVersion(); err != nil { + return nil, fmt.Errorf("error on collecting version: %v", err) + } + // https://mariadb.com/kb/en/user-statistics/ + m.doUserStatistics = m.isPercona || m.isMariaDB && m.version.GTE(semver.Version{Major: 10, Minor: 1, Patch: 1}) + } + + mx := make(map[string]int64) + + if err := m.collectGlobalStatus(mx); err != nil { + return nil, fmt.Errorf("error on collecting global status: %v", err) + } + + if hasInnodbOSLog(mx) { + m.addInnoDBOSLogOnce.Do(m.addInnoDBOSLogCharts) + } + if hasInnodbDeadlocks(mx) { + m.addInnodbDeadlocksOnce.Do(m.addInnodbDeadlocksChart) + } + if hasQCacheMetrics(mx) { + m.addQCacheOnce.Do(m.addQCacheCharts) + } + if hasGaleraMetrics(mx) { + m.addGaleraOnce.Do(m.addGaleraCharts) + } + if hasTableOpenCacheOverflowsMetrics(mx) { + m.addTableOpenCacheOverflowsOnce.Do(m.addTableOpenCacheOverflowChart) + } + + now := time.Now() + if now.Sub(m.recheckGlobalVarsTime) > m.recheckGlobalVarsEvery { + if err := m.collectGlobalVariables(); err != nil { + return nil, fmt.Errorf("error on collecting global variables: %v", err) + } + } + mx["max_connections"] = m.varMaxConns + mx["table_open_cache"] = m.varTableOpenCache + + if m.isMariaDB || !strings.Contains(m.varDisabledStorageEngine, "MyISAM") { + m.addMyISAMOnce.Do(m.addMyISAMCharts) + } + if m.varLogBin != "OFF" { + m.addBinlogOnce.Do(m.addBinlogCharts) + } + + // TODO: perhaps make a decisions based on privileges? (SHOW GRANTS FOR CURRENT_USER();) + if m.doSlaveStatus { + if err := m.collectSlaveStatus(mx); err != nil { + m.Warningf("error on collecting slave status: %v", err) + m.doSlaveStatus = errors.Is(err, context.DeadlineExceeded) + } + } + + if m.doUserStatistics { + if err := m.collectUserStatistics(mx); err != nil { + m.Warningf("error on collecting user statistics: %v", err) + m.doUserStatistics = errors.Is(err, context.DeadlineExceeded) + } + } + + if err := m.collectProcessListStatistics(mx); err != nil { + m.Errorf("error on collecting process list statistics: %v", err) + } + + calcThreadCacheMisses(mx) + return mx, nil +} + +func (m *MySQL) openConnection() error { + db, err := sql.Open("mysql", m.DSN) + if err != nil { + return fmt.Errorf("error on opening a connection with the mysql database [%s]: %v", m.safeDSN, err) + } + + db.SetConnMaxLifetime(10 * time.Minute) + + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + _ = db.Close() + return fmt.Errorf("error on pinging the mysql database [%s]: %v", m.safeDSN, err) + } + + m.db = db + return nil +} + +func calcThreadCacheMisses(collected map[string]int64) { + threads, cons := collected["threads_created"], collected["connections"] + if threads == 0 || cons == 0 { + collected["thread_cache_misses"] = 0 + } else { + collected["thread_cache_misses"] = int64(float64(threads) / float64(cons) * 10000) + } +} + +func hasInnodbOSLog(collected map[string]int64) bool { + // removed in MariaDB 10.8 (https://mariadb.com/kb/en/innodb-status-variables/#innodb_os_log_fsyncs) + _, ok := collected["innodb_os_log_fsyncs"] + return ok +} + +func hasInnodbDeadlocks(collected map[string]int64) bool { + _, ok := collected["innodb_deadlocks"] + return ok +} + +func hasGaleraMetrics(collected map[string]int64) bool { + _, ok := collected["wsrep_received"] + return ok +} + +func hasQCacheMetrics(collected map[string]int64) bool { + _, ok := collected["qcache_hits"] + return ok +} + +func hasTableOpenCacheOverflowsMetrics(collected map[string]int64) bool { + _, ok := collected["table_open_cache_overflows"] + return ok +} + +func (m *MySQL) collectQuery(query string, assign func(column, value string, lineEnd bool)) (duration int64, err error) { + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + defer cancel() + + s := time.Now() + rows, err := m.db.QueryContext(ctx, query) + if err != nil { + return 0, err + } + duration = time.Since(s).Milliseconds() + defer func() { _ = rows.Close() }() + + columns, err := rows.Columns() + if err != nil { + return duration, err + } + + vs := makeValues(len(columns)) + for rows.Next() { + if err := rows.Scan(vs...); err != nil { + return duration, err + } + for i, l := 0, len(vs); i < l; i++ { + assign(columns[i], valueToString(vs[i]), i == l-1) + } + } + return duration, rows.Err() +} + +func makeValues(size int) []any { + vs := make([]any, size) + for i := range vs { + vs[i] = &sql.NullString{} + } + return vs +} + +func valueToString(value any) string { + v, ok := value.(*sql.NullString) + if !ok || !v.Valid { + return "" + } + return v.String +} + +func parseInt(s string) int64 { + v, _ := strconv.ParseInt(s, 10, 64) + return v +} + +func parseFloat(s string) float64 { + v, _ := strconv.ParseFloat(s, 64) + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_global_status.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_global_status.go new file mode 100644 index 00000000000000..c6dff9e933d507 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_global_status.go @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "strings" +) + +const queryShowGlobalStatus = "SHOW GLOBAL STATUS;" + +func (m *MySQL) collectGlobalStatus(mx map[string]int64) error { + // MariaDB: https://mariadb.com/kb/en/server-status-variables/ + // MySQL: https://dev.mysql.com/doc/refman/8.0/en/server-status-variable-reference.html + q := queryShowGlobalStatus + m.Debugf("executing query: '%s'", q) + + var name string + _, err := m.collectQuery(q, func(column, value string, _ bool) { + switch column { + case "Variable_name": + name = value + case "Value": + if !globalStatusKeys[name] { + return + } + switch name { + case "wsrep_connected": + mx[name] = parseInt(convertWsrepConnected(value)) + case "wsrep_ready": + mx[name] = parseInt(convertWsrepReady(value)) + case "wsrep_local_state": + // https://mariadb.com/kb/en/galera-cluster-status-variables/#wsrep_local_state + // https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h#L256 + mx[name+"_undefined"] = boolToInt(value == "0") + mx[name+"_joiner"] = boolToInt(value == "1") + mx[name+"_donor"] = boolToInt(value == "2") + mx[name+"_joined"] = boolToInt(value == "3") + mx[name+"_synced"] = boolToInt(value == "4") + mx[name+"_error"] = boolToInt(parseInt(value) >= 5) + case "wsrep_cluster_status": + // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_cluster_status + // https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h + // https://github.com/codership/wsrep-API/blob/f71cd270414ee70dde839cfc59c1731eea4230ea/examples/node/wsrep.c#L80 + value = strings.ToUpper(value) + mx[name+"_primary"] = boolToInt(value == "PRIMARY") + mx[name+"_non_primary"] = boolToInt(value == "NON-PRIMARY") + mx[name+"_disconnected"] = boolToInt(value == "DISCONNECTED") + default: + mx[strings.ToLower(name)] = parseInt(value) + } + } + }) + return err +} + +func convertWsrepConnected(val string) string { + // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_connected + switch val { + case "OFF": + return "0" + case "ON": + return "1" + default: + return "-1" + } +} + +func convertWsrepReady(val string) string { + // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_ready + switch val { + case "OFF": + return "0" + case "ON": + return "1" + default: + return "-1" + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +var globalStatusKeys = map[string]bool{ + "Bytes_received": true, + "Bytes_sent": true, + "Queries": true, + "Questions": true, + "Slow_queries": true, + "Handler_commit": true, + "Handler_delete": true, + "Handler_prepare": true, + "Handler_read_first": true, + "Handler_read_key": true, + "Handler_read_next": true, + "Handler_read_prev": true, + "Handler_read_rnd": true, + "Handler_read_rnd_next": true, + "Handler_rollback": true, + "Handler_savepoint": true, + "Handler_savepoint_rollback": true, + "Handler_update": true, + "Handler_write": true, + "Table_locks_immediate": true, + "Table_locks_waited": true, + "Table_open_cache_overflows": true, + "Select_full_join": true, + "Select_full_range_join": true, + "Select_range": true, + "Select_range_check": true, + "Select_scan": true, + "Sort_merge_passes": true, + "Sort_range": true, + "Sort_scan": true, + "Created_tmp_disk_tables": true, + "Created_tmp_files": true, + "Created_tmp_tables": true, + "Connections": true, + "Aborted_connects": true, + "Max_used_connections": true, + "Binlog_cache_disk_use": true, + "Binlog_cache_use": true, + "Threads_connected": true, + "Threads_created": true, + "Threads_cached": true, + "Threads_running": true, + "Thread_cache_misses": true, + "Innodb_data_read": true, + "Innodb_data_written": true, + "Innodb_data_reads": true, + "Innodb_data_writes": true, + "Innodb_data_fsyncs": true, + "Innodb_data_pending_reads": true, + "Innodb_data_pending_writes": true, + "Innodb_data_pending_fsyncs": true, + "Innodb_log_waits": true, + "Innodb_log_write_requests": true, + "Innodb_log_writes": true, + "Innodb_os_log_fsyncs": true, + "Innodb_os_log_pending_fsyncs": true, + "Innodb_os_log_pending_writes": true, + "Innodb_os_log_written": true, + "Innodb_row_lock_current_waits": true, + "Innodb_rows_inserted": true, + "Innodb_rows_read": true, + "Innodb_rows_updated": true, + "Innodb_rows_deleted": true, + "Innodb_buffer_pool_pages_data": true, + "Innodb_buffer_pool_pages_dirty": true, + "Innodb_buffer_pool_pages_free": true, + "Innodb_buffer_pool_pages_flushed": true, + "Innodb_buffer_pool_pages_misc": true, + "Innodb_buffer_pool_pages_total": true, + "Innodb_buffer_pool_bytes_data": true, + "Innodb_buffer_pool_bytes_dirty": true, + "Innodb_buffer_pool_read_ahead": true, + "Innodb_buffer_pool_read_ahead_evicted": true, + "Innodb_buffer_pool_read_ahead_rnd": true, + "Innodb_buffer_pool_read_requests": true, + "Innodb_buffer_pool_write_requests": true, + "Innodb_buffer_pool_reads": true, + "Innodb_buffer_pool_wait_free": true, + "Innodb_deadlocks": true, + "Qcache_hits": true, + "Qcache_lowmem_prunes": true, + "Qcache_inserts": true, + "Qcache_not_cached": true, + "Qcache_queries_in_cache": true, + "Qcache_free_memory": true, + "Qcache_free_blocks": true, + "Qcache_total_blocks": true, + "Key_blocks_unused": true, + "Key_blocks_used": true, + "Key_blocks_not_flushed": true, + "Key_read_requests": true, + "Key_write_requests": true, + "Key_reads": true, + "Key_writes": true, + "Open_files": true, + "Opened_files": true, + "Binlog_stmt_cache_disk_use": true, + "Binlog_stmt_cache_use": true, + "Connection_errors_accept": true, + "Connection_errors_internal": true, + "Connection_errors_max_connections": true, + "Connection_errors_peer_address": true, + "Connection_errors_select": true, + "Connection_errors_tcpwrap": true, + "Com_delete": true, + "Com_insert": true, + "Com_select": true, + "Com_update": true, + "Com_replace": true, + "Opened_tables": true, + "Open_tables": true, + "wsrep_local_recv_queue": true, + "wsrep_local_send_queue": true, + "wsrep_received": true, + "wsrep_replicated": true, + "wsrep_received_bytes": true, + "wsrep_replicated_bytes": true, + "wsrep_local_bf_aborts": true, + "wsrep_local_cert_failures": true, + "wsrep_flow_control_paused_ns": true, + "wsrep_cluster_weight": true, + "wsrep_cluster_size": true, + "wsrep_local_state": true, + "wsrep_open_transactions": true, + "wsrep_thread_count": true, + "wsrep_connected": true, + "wsrep_ready": true, + "wsrep_cluster_status": true, +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_global_vars.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_global_vars.go new file mode 100644 index 00000000000000..ae6278088bdb91 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_global_vars.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +const ( + queryShowGlobalVariables = ` +SHOW GLOBAL VARIABLES +WHERE + Variable_name LIKE 'max_connections' + OR Variable_name LIKE 'table_open_cache' + OR Variable_name LIKE 'disabled_storage_engines' + OR Variable_name LIKE 'log_bin' + OR Variable_name LIKE 'performance_schema';` +) + +func (m *MySQL) collectGlobalVariables() error { + // MariaDB: https://mariadb.com/kb/en/server-system-variables/ + // MySQL: https://dev.mysql.com/doc/refman/8.0/en/server-system-variable-reference.html + q := queryShowGlobalVariables + m.Debugf("executing query: '%s'", q) + + var name string + _, err := m.collectQuery(q, func(column, value string, _ bool) { + switch column { + case "Variable_name": + name = value + case "Value": + switch name { + case "disabled_storage_engines": + m.varDisabledStorageEngine = value + case "log_bin": + m.varLogBin = value + case "max_connections": + m.varMaxConns = parseInt(value) + case "performance_schema": + m.varPerformanceSchema = value + case "table_open_cache": + m.varTableOpenCache = parseInt(value) + } + } + }) + return err +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_process_list.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_process_list.go new file mode 100644 index 00000000000000..08c08c6d57e5fe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_process_list.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "github.com/blang/semver/v4" +) + +// Table Schema: +// (MariaDB) https://mariadb.com/kb/en/information-schema-processlist-table/ +// (MySql) https://dev.mysql.com/doc/refman/5.7/en/information-schema-processlist-table.html +const ( + queryShowProcessList = ` +SELECT + time, + user +FROM + information_schema.processlist +WHERE + info IS NOT NULL + AND info NOT LIKE '%PROCESSLIST%' +ORDER BY + time;` +) + +// Performance Schema +// (MySQL) https://dev.mysql.com/doc/refman/8.0/en/performance-schema-processlist-table.html +const ( + queryShowProcessListPS = ` +SELECT + time, + user +FROM + performance_schema.processlist +WHERE + info IS NOT NULL + AND info NOT LIKE '%PROCESSLIST%' +ORDER BY + time;` +) + +func (m *MySQL) collectProcessListStatistics(mx map[string]int64) error { + var q string + mysqlMinVer := semver.Version{Major: 8, Minor: 0, Patch: 22} + if !m.isMariaDB && m.version.GTE(mysqlMinVer) && m.varPerformanceSchema == "ON" { + q = queryShowProcessListPS + } else { + q = queryShowProcessList + } + m.Debugf("executing query: '%s'", q) + + var maxTime int64 // slowest query milliseconds in process list + + duration, err := m.collectQuery(q, func(column, value string, _ bool) { + switch column { + case "time": + maxTime = parseInt(value) + case "user": + // system user refers to non-client threads + // event_scheduler is the thread used to monitor scheduled events + // system user and event_scheduler threads are grouped as system/database threads + // authenticated and unauthenticated user are grouped as users + // please see USER section in + // https://dev.mysql.com/doc/refman/8.0/en/information-schema-processlist-table.html + switch value { + case "system user", "event_scheduler": + mx["process_list_queries_count_system"] += 1 + default: + mx["process_list_queries_count_user"] += 1 + } + } + }) + if err != nil { + return err + } + + if _, ok := mx["process_list_queries_count_system"]; !ok { + mx["process_list_queries_count_system"] = 0 + } + if _, ok := mx["process_list_queries_count_user"]; !ok { + mx["process_list_queries_count_user"] = 0 + } + mx["process_list_fetch_query_duration"] = duration + mx["process_list_longest_query_duration"] = maxTime + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_slave_status.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_slave_status.go new file mode 100644 index 00000000000000..37d4bf59b54d34 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_slave_status.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "strings" + + "github.com/blang/semver/v4" +) + +const ( + queryShowReplicaStatus = "SHOW REPLICA STATUS;" + queryShowSlaveStatus = "SHOW SLAVE STATUS;" + queryShowAllSlavesStatus = "SHOW ALL SLAVES STATUS;" +) + +func (m *MySQL) collectSlaveStatus(mx map[string]int64) error { + // https://mariadb.com/docs/reference/es/sql-statements/SHOW_ALL_SLAVES_STATUS/ + mariaDBMinVer := semver.Version{Major: 10, Minor: 2, Patch: 0} + mysqlMinVer := semver.Version{Major: 8, Minor: 0, Patch: 22} + var q string + if m.isMariaDB && m.version.GTE(mariaDBMinVer) { + q = queryShowAllSlavesStatus + } else if !m.isMariaDB && m.version.GTE(mysqlMinVer) { + q = queryShowReplicaStatus + } else { + q = queryShowSlaveStatus + } + m.Debugf("executing query: '%s'", q) + + v := struct { + name string + behindMaster int64 + sqlRunning int64 + ioRunning int64 + }{} + + _, err := m.collectQuery(q, func(column, value string, lineEnd bool) { + switch column { + case "Connection_name", "Channel_Name": + v.name = value + case "Seconds_Behind_Master", "Seconds_Behind_Source": + v.behindMaster = parseInt(value) + case "Slave_SQL_Running", "Replica_SQL_Running": + v.sqlRunning = parseInt(convertSlaveSQLRunning(value)) + case "Slave_IO_Running", "Replica_IO_Running": + v.ioRunning = parseInt(convertSlaveIORunning(value)) + } + if lineEnd { + if !m.collectedReplConns[v.name] { + m.collectedReplConns[v.name] = true + m.addSlaveReplicationConnCharts(v.name) + } + s := strings.ToLower(slaveMetricSuffix(v.name)) + mx["seconds_behind_master"+s] = v.behindMaster + mx["slave_sql_running"+s] = v.sqlRunning + mx["slave_io_running"+s] = v.ioRunning + } + }) + return err +} + +func convertSlaveSQLRunning(value string) string { + switch value { + case "Yes": + return "1" + default: + return "0" + } +} + +func convertSlaveIORunning(value string) string { + // NOTE: There is 'Connecting' state and probably others + switch value { + case "Yes": + return "1" + default: + return "0" + } +} + +func slaveMetricSuffix(conn string) string { + if conn == "" { + return "" + } + return "_" + conn +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_user_statistics.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_user_statistics.go new file mode 100644 index 00000000000000..b00703a46d3a5f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_user_statistics.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "strings" +) + +const queryShowUserStatistics = "SHOW USER_STATISTICS;" + +func (m *MySQL) collectUserStatistics(mx map[string]int64) error { + // https://mariadb.com/kb/en/user-statistics/ + // https://mariadb.com/kb/en/information-schema-user_statistics-table/ + q := queryShowUserStatistics + m.Debugf("executing query: '%s'", q) + + var user, prefix string + _, err := m.collectQuery(q, func(column, value string, _ bool) { + switch column { + case "User": + user = value + prefix = "userstats_" + user + "_" + if !m.collectedUsers[user] { + m.collectedUsers[user] = true + m.addUserStatisticsCharts(user) + } + case "Cpu_time": + mx[strings.ToLower(prefix+column)] = int64(parseFloat(value) * 1000) + case + "Total_connections", + "Lost_connections", + "Denied_connections", + "Empty_queries", + "Binlog_bytes_written", + "Rows_read", + "Rows_sent", + "Rows_deleted", + "Rows_inserted", + "Rows_updated", + "Rows_fetched", // Percona + "Select_commands", + "Update_commands", + "Other_commands", + "Access_denied", + "Commit_transactions", + "Rollback_transactions": + mx[strings.ToLower(prefix+column)] = parseInt(value) + } + }) + return err +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/collect_version.go b/src/go/collectors/go.d.plugin/modules/mysql/collect_version.go new file mode 100644 index 00000000000000..b85922e2c6e133 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/collect_version.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "fmt" + "regexp" + "strings" + + "github.com/blang/semver/v4" +) + +const queryShowVersion = ` +SHOW GLOBAL VARIABLES +WHERE + Variable_name LIKE 'version' + OR Variable_name LIKE 'version_comment';` + +var reVersionCore = regexp.MustCompile(`^\d+\.\d+\.\d+`) + +func (m *MySQL) collectVersion() error { + // https://mariadb.com/kb/en/version/ + q := queryShowVersion + m.Debugf("executing query: '%s'", queryShowVersion) + + var name, version, versionComment string + _, err := m.collectQuery(q, func(column, value string, _ bool) { + switch column { + case "Variable_name": + name = value + case "Value": + switch name { + case "version": + version = value + case "version_comment": + versionComment = value + } + } + }) + if err != nil { + return err + } + + m.Infof("application version: '%s', version_comment: '%s'", version, versionComment) + + // version string is not always valid semver (ex.: 8.0.22-0ubuntu0.20.04.2) + s := reVersionCore.FindString(version) + if s == "" { + return fmt.Errorf("couldn't parse version string '%s'", version) + } + + ver, err := semver.New(s) + if err != nil { + return fmt.Errorf("couldn't parse version string '%s': %v", s, err) + } + + m.version = ver + m.isMariaDB = strings.Contains(version, "MariaDB") || strings.Contains(versionComment, "mariadb") + m.isPercona = strings.Contains(versionComment, "Percona") + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/config_schema.json b/src/go/collectors/go.d.plugin/modules/mysql/config_schema.json new file mode 100644 index 00000000000000..1db919824e769c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/config_schema.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "update_every": { + "type": "integer" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "dsn" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/integrations/mariadb.md b/src/go/collectors/go.d.plugin/modules/mysql/integrations/mariadb.md new file mode 100644 index 00000000000000..2005779e1d0925 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/integrations/mariadb.md @@ -0,0 +1,370 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/integrations/mariadb.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/metadata.yaml" +sidebar_label: "MariaDB" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MariaDB + + +<img src="https://netdata.cloud/img/mariadb.svg" width="150"/> + + +Plugin: go.d.plugin +Module: mysql + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics. + + +It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands: + +Executed queries: + +- `SELECT VERSION();` +- `SHOW GLOBAL STATUS;` +- `SHOW GLOBAL VARIABLES;` +- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+) +- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+) +- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;` + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets: + +- /var/run/mysqld/mysqld.sock +- /var/run/mysqld/mysql.sock +- /var/lib/mysql/mysql.sock +- /tmp/mysql.sock +- 127.0.0.1:3306 +- "[::1]:3306" + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MariaDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.net | in, out | kilobits/s | • | • | • | +| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • | +| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • | +| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • | +| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • | +| mysql.table_locks | immediate, waited | locks/s | • | • | • | +| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • | +| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • | +| mysql.tmp | disk_tables, files, tables | events/s | • | • | • | +| mysql.connections | all, aborted | connections/s | • | • | • | +| mysql.connections_active | active, limit, max_active | connections | • | • | • | +| mysql.threads | connected, cached, running | threads | • | • | • | +| mysql.threads_created | created | threads/s | • | • | • | +| mysql.thread_cache_misses | misses | misses | • | • | • | +| mysql.innodb_io | read, write | KiB/s | • | • | • | +| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • | +| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • | +| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • | +| mysql.innodb_cur_row_lock | current waits | operations | • | • | • | +| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • | +| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • | +| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • | +| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • | +| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • | +| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • | +| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • | +| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • | +| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • | +| mysql.innodb_os_log_io | write | KiB/s | • | • | • | +| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • | +| mysql.files | files | files | • | • | • | +| mysql.files_rate | files | files/s | • | • | • | +| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • | +| mysql.opened_tables | tables | tables/s | • | • | • | +| mysql.open_tables | cache, tables | tables | • | • | • | +| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • | +| mysql.process_list_queries_count | system, user | queries | • | • | • | +| mysql.process_list_longest_query_duration | duration | seconds | • | • | • | +| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • | +| mysql.qcache | queries | queries | • | • | • | +| mysql.qcache_freemem | free | MiB | • | • | • | +| mysql.qcache_memblocks | free, total | blocks | • | • | • | +| mysql.galera_writesets | rx, tx | writesets/s | • | • | • | +| mysql.galera_bytes | rx, tx | KiB/s | • | • | • | +| mysql.galera_queue | rx, tx | writesets | • | • | • | +| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • | +| mysql.galera_flow_control | paused | ms | • | • | • | +| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • | +| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • | +| mysql.galera_cluster_size | nodes | nodes | • | • | • | +| mysql.galera_cluster_weight | weight | weight | • | • | • | +| mysql.galera_connected | connected | boolean | • | • | • | +| mysql.galera_ready | ready | boolean | • | • | • | +| mysql.galera_open_transactions | open | transactions | • | • | • | +| mysql.galera_thread_count | threads | threads | • | • | • | +| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • | +| mysql.key_requests | reads, writes | requests/s | • | • | • | +| mysql.key_disk_ops | reads, writes | operations/s | • | • | • | +| mysql.binlog_cache | disk, all | transactions/s | • | • | • | +| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • | + +### Per connection + +These metrics refer to the replication connection. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.slave_behind | seconds | seconds | • | • | • | +| mysql.slave_status | sql_running, io_running | boolean | • | • | • | + +### Per user + +These metrics refer to the MySQL user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user | username | + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.userstats_cpu | used | percentage | | • | • | +| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • | +| mysql.userstats_commands | select, update, other | commands/s | | • | • | +| mysql.userstats_denied_commands | denied | commands/s | | • | • | +| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • | +| mysql.userstats_binlog_written | written | B/s | | • | • | +| mysql.userstats_empty_queries | empty | queries/s | | • | • | +| mysql.userstats_connections | created | connections/s | | • | • | +| mysql.userstats_lost_connections | lost | connections/s | | • | • | +| mysql.userstats_denied_connections | denied | connections/s | | • | • | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds | +| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds | +| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds | +| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds | +| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization | +| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) | +| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master | +| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago | +| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes | +| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined | +| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error | +| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. | + + +## Setup + +### Prerequisites + +#### Create netdata user + +A user account should have the +following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html): + +- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage) +- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client) +- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process) + +To create the `netdata` user with these permissions, execute the following in the MySQL shell: + +```mysql +CREATE USER 'netdata'@'localhost'; +GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; +FLUSH PRIVILEGES; +``` + +The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only +be able to gather statistics without being able to alter or affect operations in any way. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/mysql.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/mysql.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes | +| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no | +| timeout | Query timeout in seconds. | 1 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@unix(/var/lib/mysql/mysql.sock)/ + +``` +</details> + +##### Connection with password + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netconfig:password@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### my.cnf + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + my.cnf: '/etc/my.cnf' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + + - name: remote + dsn: netconfig:password@tcp(203.0.113.0:3306)/ + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m mysql + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/mysql/integrations/mysql.md b/src/go/collectors/go.d.plugin/modules/mysql/integrations/mysql.md new file mode 100644 index 00000000000000..9710326db12634 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/integrations/mysql.md @@ -0,0 +1,370 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/integrations/mysql.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/metadata.yaml" +sidebar_label: "MySQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MySQL + + +<img src="https://netdata.cloud/img/mysql.svg" width="150"/> + + +Plugin: go.d.plugin +Module: mysql + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics. + + +It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands: + +Executed queries: + +- `SELECT VERSION();` +- `SHOW GLOBAL STATUS;` +- `SHOW GLOBAL VARIABLES;` +- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+) +- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+) +- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;` + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets: + +- /var/run/mysqld/mysqld.sock +- /var/run/mysqld/mysql.sock +- /var/lib/mysql/mysql.sock +- /tmp/mysql.sock +- 127.0.0.1:3306 +- "[::1]:3306" + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MariaDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.net | in, out | kilobits/s | • | • | • | +| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • | +| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • | +| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • | +| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • | +| mysql.table_locks | immediate, waited | locks/s | • | • | • | +| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • | +| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • | +| mysql.tmp | disk_tables, files, tables | events/s | • | • | • | +| mysql.connections | all, aborted | connections/s | • | • | • | +| mysql.connections_active | active, limit, max_active | connections | • | • | • | +| mysql.threads | connected, cached, running | threads | • | • | • | +| mysql.threads_created | created | threads/s | • | • | • | +| mysql.thread_cache_misses | misses | misses | • | • | • | +| mysql.innodb_io | read, write | KiB/s | • | • | • | +| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • | +| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • | +| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • | +| mysql.innodb_cur_row_lock | current waits | operations | • | • | • | +| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • | +| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • | +| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • | +| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • | +| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • | +| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • | +| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • | +| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • | +| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • | +| mysql.innodb_os_log_io | write | KiB/s | • | • | • | +| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • | +| mysql.files | files | files | • | • | • | +| mysql.files_rate | files | files/s | • | • | • | +| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • | +| mysql.opened_tables | tables | tables/s | • | • | • | +| mysql.open_tables | cache, tables | tables | • | • | • | +| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • | +| mysql.process_list_queries_count | system, user | queries | • | • | • | +| mysql.process_list_longest_query_duration | duration | seconds | • | • | • | +| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • | +| mysql.qcache | queries | queries | • | • | • | +| mysql.qcache_freemem | free | MiB | • | • | • | +| mysql.qcache_memblocks | free, total | blocks | • | • | • | +| mysql.galera_writesets | rx, tx | writesets/s | • | • | • | +| mysql.galera_bytes | rx, tx | KiB/s | • | • | • | +| mysql.galera_queue | rx, tx | writesets | • | • | • | +| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • | +| mysql.galera_flow_control | paused | ms | • | • | • | +| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • | +| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • | +| mysql.galera_cluster_size | nodes | nodes | • | • | • | +| mysql.galera_cluster_weight | weight | weight | • | • | • | +| mysql.galera_connected | connected | boolean | • | • | • | +| mysql.galera_ready | ready | boolean | • | • | • | +| mysql.galera_open_transactions | open | transactions | • | • | • | +| mysql.galera_thread_count | threads | threads | • | • | • | +| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • | +| mysql.key_requests | reads, writes | requests/s | • | • | • | +| mysql.key_disk_ops | reads, writes | operations/s | • | • | • | +| mysql.binlog_cache | disk, all | transactions/s | • | • | • | +| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • | + +### Per connection + +These metrics refer to the replication connection. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.slave_behind | seconds | seconds | • | • | • | +| mysql.slave_status | sql_running, io_running | boolean | • | • | • | + +### Per user + +These metrics refer to the MySQL user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user | username | + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.userstats_cpu | used | percentage | | • | • | +| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • | +| mysql.userstats_commands | select, update, other | commands/s | | • | • | +| mysql.userstats_denied_commands | denied | commands/s | | • | • | +| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • | +| mysql.userstats_binlog_written | written | B/s | | • | • | +| mysql.userstats_empty_queries | empty | queries/s | | • | • | +| mysql.userstats_connections | created | connections/s | | • | • | +| mysql.userstats_lost_connections | lost | connections/s | | • | • | +| mysql.userstats_denied_connections | denied | connections/s | | • | • | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds | +| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds | +| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds | +| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds | +| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization | +| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) | +| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master | +| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago | +| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes | +| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined | +| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error | +| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. | + + +## Setup + +### Prerequisites + +#### Create netdata user + +A user account should have the +following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html): + +- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage) +- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client) +- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process) + +To create the `netdata` user with these permissions, execute the following in the MySQL shell: + +```mysql +CREATE USER 'netdata'@'localhost'; +GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; +FLUSH PRIVILEGES; +``` + +The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only +be able to gather statistics without being able to alter or affect operations in any way. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/mysql.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/mysql.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes | +| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no | +| timeout | Query timeout in seconds. | 1 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@unix(/var/lib/mysql/mysql.sock)/ + +``` +</details> + +##### Connection with password + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netconfig:password@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### my.cnf + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + my.cnf: '/etc/my.cnf' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + + - name: remote + dsn: netconfig:password@tcp(203.0.113.0:3306)/ + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m mysql + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/mysql/integrations/percona_mysql.md b/src/go/collectors/go.d.plugin/modules/mysql/integrations/percona_mysql.md new file mode 100644 index 00000000000000..4539290939a81d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/integrations/percona_mysql.md @@ -0,0 +1,370 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/integrations/percona_mysql.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/mysql/metadata.yaml" +sidebar_label: "Percona MySQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Percona MySQL + + +<img src="https://netdata.cloud/img/percona.svg" width="150"/> + + +Plugin: go.d.plugin +Module: mysql + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics. + + +It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands: + +Executed queries: + +- `SELECT VERSION();` +- `SHOW GLOBAL STATUS;` +- `SHOW GLOBAL VARIABLES;` +- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+) +- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+) +- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;` + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets: + +- /var/run/mysqld/mysqld.sock +- /var/run/mysqld/mysql.sock +- /var/lib/mysql/mysql.sock +- /tmp/mysql.sock +- 127.0.0.1:3306 +- "[::1]:3306" + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per MariaDB instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.net | in, out | kilobits/s | • | • | • | +| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • | +| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • | +| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • | +| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • | +| mysql.table_locks | immediate, waited | locks/s | • | • | • | +| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • | +| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • | +| mysql.tmp | disk_tables, files, tables | events/s | • | • | • | +| mysql.connections | all, aborted | connections/s | • | • | • | +| mysql.connections_active | active, limit, max_active | connections | • | • | • | +| mysql.threads | connected, cached, running | threads | • | • | • | +| mysql.threads_created | created | threads/s | • | • | • | +| mysql.thread_cache_misses | misses | misses | • | • | • | +| mysql.innodb_io | read, write | KiB/s | • | • | • | +| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • | +| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • | +| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • | +| mysql.innodb_cur_row_lock | current waits | operations | • | • | • | +| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • | +| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • | +| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • | +| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • | +| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • | +| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • | +| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • | +| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • | +| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • | +| mysql.innodb_os_log_io | write | KiB/s | • | • | • | +| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • | +| mysql.files | files | files | • | • | • | +| mysql.files_rate | files | files/s | • | • | • | +| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • | +| mysql.opened_tables | tables | tables/s | • | • | • | +| mysql.open_tables | cache, tables | tables | • | • | • | +| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • | +| mysql.process_list_queries_count | system, user | queries | • | • | • | +| mysql.process_list_longest_query_duration | duration | seconds | • | • | • | +| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • | +| mysql.qcache | queries | queries | • | • | • | +| mysql.qcache_freemem | free | MiB | • | • | • | +| mysql.qcache_memblocks | free, total | blocks | • | • | • | +| mysql.galera_writesets | rx, tx | writesets/s | • | • | • | +| mysql.galera_bytes | rx, tx | KiB/s | • | • | • | +| mysql.galera_queue | rx, tx | writesets | • | • | • | +| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • | +| mysql.galera_flow_control | paused | ms | • | • | • | +| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • | +| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • | +| mysql.galera_cluster_size | nodes | nodes | • | • | • | +| mysql.galera_cluster_weight | weight | weight | • | • | • | +| mysql.galera_connected | connected | boolean | • | • | • | +| mysql.galera_ready | ready | boolean | • | • | • | +| mysql.galera_open_transactions | open | transactions | • | • | • | +| mysql.galera_thread_count | threads | threads | • | • | • | +| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • | +| mysql.key_requests | reads, writes | requests/s | • | • | • | +| mysql.key_disk_ops | reads, writes | operations/s | • | • | • | +| mysql.binlog_cache | disk, all | transactions/s | • | • | • | +| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • | + +### Per connection + +These metrics refer to the replication connection. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.slave_behind | seconds | seconds | • | • | • | +| mysql.slave_status | sql_running, io_running | boolean | • | • | • | + +### Per user + +These metrics refer to the MySQL user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user | username | + +Metrics: + +| Metric | Dimensions | Unit | MySQL | MariaDB | Percona | +|:------|:----------|:----|:---:|:---:|:---:| +| mysql.userstats_cpu | used | percentage | | • | • | +| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • | +| mysql.userstats_commands | select, update, other | commands/s | | • | • | +| mysql.userstats_denied_commands | denied | commands/s | | • | • | +| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • | +| mysql.userstats_binlog_written | written | B/s | | • | • | +| mysql.userstats_empty_queries | empty | queries/s | | • | • | +| mysql.userstats_connections | created | connections/s | | • | • | +| mysql.userstats_lost_connections | lost | connections/s | | • | • | +| mysql.userstats_denied_connections | denied | connections/s | | • | • | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds | +| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds | +| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds | +| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds | +| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization | +| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) | +| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master | +| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago | +| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes | +| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined | +| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error | +| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. | + + +## Setup + +### Prerequisites + +#### Create netdata user + +A user account should have the +following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html): + +- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage) +- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client) +- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process) + +To create the `netdata` user with these permissions, execute the following in the MySQL shell: + +```mysql +CREATE USER 'netdata'@'localhost'; +GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; +FLUSH PRIVILEGES; +``` + +The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only +be able to gather statistics without being able to alter or affect operations in any way. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/mysql.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/mysql.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes | +| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no | +| timeout | Query timeout in seconds. | 1 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@unix(/var/lib/mysql/mysql.sock)/ + +``` +</details> + +##### Connection with password + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netconfig:password@tcp(127.0.0.1:3306)/ + +``` +</details> + +##### my.cnf + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + my.cnf: '/etc/my.cnf' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + + - name: remote + dsn: netconfig:password@tcp(203.0.113.0:3306)/ + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m mysql + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml b/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml new file mode 100644 index 00000000000000..1bc1332389cfb3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/metadata.yaml @@ -0,0 +1,806 @@ +plugin_name: go.d.plugin +modules: + - &module + meta: &meta + id: collector-go.d.plugin-mysql + plugin_name: go.d.plugin + module_name: mysql + monitored_instance: + name: MySQL + link: https://www.mysql.com/ + categories: + - data-collection.database-servers + icon_filename: mysql.svg + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + - plugin_name: cgroups.plugin + module_name: cgroups + info_provided_to_referring_integrations: + description: "" + keywords: + - "db" + - "database" + - "mysql" + - "maria" + - "mariadb" + - "sql" + most_popular: true + overview: + multi_instance: true + data_collection: + metrics_description: | + This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics. + method_description: | + It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands: + + Executed queries: + + - `SELECT VERSION();` + - `SHOW GLOBAL STATUS;` + - `SHOW GLOBAL VARIABLES;` + - `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+) + - `SHOW USER_STATISTICS;` (MariaDBv10.1.1+) + - `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;` + default_behavior: + auto_detection: + description: | + By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP and UNIX sockets: + + - /var/run/mysqld/mysqld.sock + - /var/run/mysqld/mysql.sock + - /var/lib/mysql/mysql.sock + - /tmp/mysql.sock + - 127.0.0.1:3306 + - "[::1]:3306" + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Create netdata user + description: | + A user account should have the + following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html): + + - [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage) + - [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client) + - [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process) + + To create the `netdata` user with these permissions, execute the following in the MySQL shell: + + ```mysql + CREATE USER 'netdata'@'localhost'; + GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost'; + FLUSH PRIVILEGES; + ``` + + The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only + be able to gather statistics without being able to alter or affect operations in any way. + configuration: + file: + name: go.d/mysql.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: dsn + description: MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). + default_value: root@tcp(localhost:3306)/ + required: true + - name: my.cnf + description: Specifies the my.cnf file to read the connection settings from the [client] section. + default_value: "" + required: false + - name: timeout + description: Query timeout in seconds. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + - name: Unix socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: netdata@unix(/var/lib/mysql/mysql.sock)/ + - name: Connection with password + description: An example configuration. + config: | + jobs: + - name: local + dsn: netconfig:password@tcp(127.0.0.1:3306)/ + - name: my.cnf + description: An example configuration. + config: | + jobs: + - name: local + my.cnf: '/etc/my.cnf' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + dsn: netdata@tcp(127.0.0.1:3306)/ + + - name: remote + dsn: netconfig:password@tcp(203.0.113.0:3306)/ + troubleshooting: + problems: + list: [] + alerts: + - name: mysql_10s_slow_queries + metric: mysql.queries + info: number of slow queries in the last 10 seconds + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_10s_table_locks_immediate + metric: mysql.table_locks + info: number of table immediate locks in the last 10 seconds + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_10s_table_locks_waited + metric: mysql.table_locks + info: number of table waited locks in the last 10 seconds + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_10s_waited_locks_ratio + metric: mysql.table_locks + info: ratio of waited table locks over the last 10 seconds + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_connections + metric: mysql.connections_active + info: client connections utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_replication + metric: mysql.slave_status + info: "replication status (0: stopped, 1: working)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_replication_lag + metric: mysql.slave_behind + info: difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_galera_cluster_size_max_2m + metric: mysql.galera_cluster_size + info: maximum galera cluster size in the last 2 minutes starting one minute ago + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_galera_cluster_size + metric: mysql.galera_cluster_size + info: current galera cluster size, compared to the maximum size in the last 2 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_galera_cluster_state_warn + metric: mysql.galera_cluster_state + info: galera node state is either Donor/Desynced or Joined + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_galera_cluster_state_crit + metric: mysql.galera_cluster_state + info: galera node state is either Undefined or Joining or Error + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + - name: mysql_galera_cluster_status + metric: mysql.galera_cluster_status + info: galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: + - MySQL + - MariaDB + - Percona + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: mysql.net + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: in + - name: out + - name: mysql.queries + description: Queries + unit: queries/s + chart_type: line + dimensions: + - name: queries + - name: questions + - name: slow_queries + - name: mysql.queries_type + description: Queries By Type + unit: queries/s + chart_type: stacked + dimensions: + - name: select + - name: delete + - name: update + - name: insert + - name: replace + - name: mysql.handlers + description: Handlers + unit: handlers/s + chart_type: line + dimensions: + - name: commit + - name: delete + - name: prepare + - name: read_first + - name: read_key + - name: read_next + - name: read_prev + - name: read_rnd + - name: read_rnd_next + - name: rollback + - name: savepoint + - name: savepointrollback + - name: update + - name: write + - name: mysql.table_open_cache_overflows + description: Table open cache overflows + unit: overflows/s + chart_type: line + dimensions: + - name: open_cache + - name: mysql.table_locks + description: Table Locks + unit: locks/s + chart_type: line + dimensions: + - name: immediate + - name: waited + - name: mysql.join_issues + description: Table Select Join Issues + unit: joins/s + chart_type: line + dimensions: + - name: full_join + - name: full_range_join + - name: range + - name: range_check + - name: scan + - name: mysql.sort_issues + description: Table Sort Issues + unit: issues/s + chart_type: line + dimensions: + - name: merge_passes + - name: range + - name: scan + - name: mysql.tmp + description: Tmp Operations + unit: events/s + chart_type: line + dimensions: + - name: disk_tables + - name: files + - name: tables + - name: mysql.connections + description: Connections + unit: connections/s + chart_type: line + dimensions: + - name: all + - name: aborted + - name: mysql.connections_active + description: Active Connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: limit + - name: max_active + - name: mysql.threads + description: Threads + unit: threads + chart_type: line + dimensions: + - name: connected + - name: cached + - name: running + - name: mysql.threads_created + description: Threads Creation Rate + unit: threads/s + chart_type: line + dimensions: + - name: created + - name: mysql.thread_cache_misses + description: Threads Cache Misses + unit: misses + chart_type: line + dimensions: + - name: misses + - name: mysql.innodb_io + description: InnoDB I/O Bandwidth + unit: KiB/s + chart_type: line + dimensions: + - name: read + - name: write + - name: mysql.innodb_io_ops + description: InnoDB I/O Operations + unit: operations/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: fsyncs + - name: mysql.innodb_io_pending_ops + description: InnoDB Pending I/O Operations + unit: operations + chart_type: line + dimensions: + - name: reads + - name: writes + - name: fsyncs + - name: mysql.innodb_log + description: InnoDB Log Operations + unit: operations/s + chart_type: line + dimensions: + - name: waits + - name: write_requests + - name: writes + - name: mysql.innodb_cur_row_lock + description: InnoDB Current Row Locks + unit: operations + chart_type: line + dimensions: + - name: current waits + - name: mysql.innodb_rows + description: InnoDB Row Operations + unit: operations/s + chart_type: area + dimensions: + - name: inserted + - name: read + - name: updated + - name: deleted + - name: mysql.innodb_buffer_pool_pages + description: InnoDB Buffer Pool Pages + unit: pages + chart_type: line + dimensions: + - name: data + - name: dirty + - name: free + - name: misc + - name: total + - name: mysql.innodb_buffer_pool_pages_flushed + description: InnoDB Buffer Pool Flush Pages Requests + unit: requests/s + chart_type: line + dimensions: + - name: flush_pages + - name: mysql.innodb_buffer_pool_bytes + description: InnoDB Buffer Pool Bytes + unit: MiB + chart_type: line + dimensions: + - name: data + - name: dirty + - name: mysql.innodb_buffer_pool_read_ahead + description: InnoDB Buffer Pool Read Pages + unit: pages/s + chart_type: line + dimensions: + - name: all + - name: evicted + - name: mysql.innodb_buffer_pool_read_ahead_rnd + description: InnoDB Buffer Pool Random Read-Aheads + unit: operations/s + chart_type: line + dimensions: + - name: read-ahead + - name: mysql.innodb_buffer_pool_ops + description: InnoDB Buffer Pool Operations + unit: operations/s + chart_type: area + dimensions: + - name: disk_reads + - name: wait_free + - name: mysql.innodb_os_log + description: InnoDB OS Log Pending Operations + unit: operations + chart_type: line + dimensions: + - name: fsyncs + - name: writes + - name: mysql.innodb_os_log_fsync_writes + description: InnoDB OS Log Operations + unit: operations/s + chart_type: line + dimensions: + - name: fsyncs + - name: mysql.innodb_os_log_io + description: InnoDB OS Log Bandwidth + unit: KiB/s + chart_type: area + dimensions: + - name: write + - name: mysql.innodb_deadlocks + description: InnoDB Deadlocks + unit: operations/s + chart_type: area + dimensions: + - name: deadlocks + - name: mysql.files + description: Open Files + unit: files + chart_type: line + dimensions: + - name: files + - name: mysql.files_rate + description: Opened Files Rate + unit: files/s + chart_type: line + dimensions: + - name: files + - name: mysql.connection_errors + description: Connection Errors + unit: errors/s + chart_type: line + dimensions: + - name: accept + - name: internal + - name: max + - name: peer_addr + - name: select + - name: tcpwrap + - name: mysql.opened_tables + description: Opened Tables + unit: tables/s + chart_type: line + dimensions: + - name: tables + - name: mysql.open_tables + description: Open Tables + unit: tables + chart_type: area + dimensions: + - name: cache + - name: tables + - name: mysql.process_list_fetch_query_duration + description: Process List Fetch Duration + unit: milliseconds + chart_type: line + dimensions: + - name: duration + - name: mysql.process_list_queries_count + description: Queries Count + unit: queries + chart_type: stacked + dimensions: + - name: system + - name: user + - name: mysql.process_list_longest_query_duration + description: Longest Query Duration + unit: seconds + chart_type: line + dimensions: + - name: duration + - name: mysql.qcache_ops + description: QCache Operations + unit: queries/s + chart_type: line + dimensions: + - name: hits + - name: lowmem_prunes + - name: inserts + - name: not_cached + - name: mysql.qcache + description: QCache Queries in Cache + unit: queries + chart_type: line + dimensions: + - name: queries + - name: mysql.qcache_freemem + description: QCache Free Memory + unit: MiB + chart_type: area + dimensions: + - name: free + - name: mysql.qcache_memblocks + description: QCache Memory Blocks + unit: blocks + chart_type: line + dimensions: + - name: free + - name: total + - name: mysql.galera_writesets + description: Replicated Writesets + unit: writesets/s + chart_type: line + dimensions: + - name: rx + - name: tx + - name: mysql.galera_bytes + description: Replicated Bytes + unit: KiB/s + chart_type: area + dimensions: + - name: rx + - name: tx + - name: mysql.galera_queue + description: Galera Queue + unit: writesets + chart_type: line + dimensions: + - name: rx + - name: tx + - name: mysql.galera_conflicts + description: Replication Conflicts + unit: transactions + chart_type: area + dimensions: + - name: bf_aborts + - name: cert_fails + - name: mysql.galera_flow_control + description: Flow Control + unit: ms + chart_type: area + dimensions: + - name: paused + - name: mysql.galera_cluster_status + description: Cluster Component Status + unit: status + chart_type: line + dimensions: + - name: primary + - name: non_primary + - name: disconnected + - name: mysql.galera_cluster_state + description: Cluster Component State + unit: state + chart_type: line + dimensions: + - name: undefined + - name: joining + - name: donor + - name: joined + - name: synced + - name: error + - name: mysql.galera_cluster_size + description: Number of Nodes in the Cluster + unit: nodes + chart_type: line + dimensions: + - name: nodes + - name: mysql.galera_cluster_weight + description: The Total Weight of the Current Members in the Cluster + unit: weight + chart_type: line + dimensions: + - name: weight + - name: mysql.galera_connected + description: Cluster Connection Status + unit: boolean + chart_type: line + dimensions: + - name: connected + - name: mysql.galera_ready + description: Accept Queries Readiness Status + unit: boolean + chart_type: line + dimensions: + - name: ready + - name: mysql.galera_open_transactions + description: Open Transactions + unit: transactions + chart_type: line + dimensions: + - name: open + - name: mysql.galera_thread_count + description: Total Number of WSRep (applier/rollbacker) Threads + unit: threads + chart_type: line + dimensions: + - name: threads + - name: mysql.key_blocks + description: MyISAM Key Cache Blocks + unit: blocks + chart_type: line + dimensions: + - name: unused + - name: used + - name: not_flushed + - name: mysql.key_requests + description: MyISAM Key Cache Requests + unit: requests/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: mysql.key_disk_ops + description: MyISAM Key Cache Disk Operations + unit: operations/s + chart_type: area + dimensions: + - name: reads + - name: writes + - name: mysql.binlog_cache + description: Binlog Cache + unit: transactions/s + chart_type: line + dimensions: + - name: disk + - name: all + - name: mysql.binlog_stmt_cache + description: Binlog Statement Cache + unit: statements/s + chart_type: line + dimensions: + - name: disk + - name: all + - name: connection + description: These metrics refer to the replication connection. + labels: [] + metrics: + - name: mysql.slave_behind + description: Slave Behind Seconds + unit: seconds + chart_type: line + dimensions: + - name: seconds + - name: mysql.slave_status + description: I/O / SQL Thread Running State + unit: boolean + chart_type: line + dimensions: + - name: sql_running + - name: io_running + - name: user + description: These metrics refer to the MySQL user. + labels: + - name: user + description: username + metrics: + - name: mysql.userstats_cpu + description: User CPU Time + unit: percentage + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: used + - name: mysql.userstats_rows + description: User Rows Operations + unit: operations/s + chart_type: stacked + availability: + - MariaDB + - Percona + dimensions: + - name: read + - name: sent + - name: updated + - name: inserted + - name: deleted + - name: mysql.userstats_commands + description: User Commands + unit: commands/s + chart_type: stacked + availability: + - MariaDB + - Percona + dimensions: + - name: select + - name: update + - name: other + - name: mysql.userstats_denied_commands + description: User Denied Commands + unit: commands/s + chart_type: stacked + availability: + - MariaDB + - Percona + dimensions: + - name: denied + - name: mysql.userstats_created_transactions + description: User Transactions + unit: transactions/s + chart_type: area + availability: + - MariaDB + - Percona + dimensions: + - name: commit + - name: rollback + - name: mysql.userstats_binlog_written + description: User Binlog Written + unit: B/s + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: written + - name: mysql.userstats_empty_queries + description: User Empty Queries + unit: queries/s + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: empty + - name: mysql.userstats_connections + description: User Created Connections + unit: connections/s + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: created + - name: mysql.userstats_lost_connections + description: User Lost Connections + unit: connections/s + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: lost + - name: mysql.userstats_denied_connections + description: User Denied Connections + unit: connections/s + chart_type: line + availability: + - MariaDB + - Percona + dimensions: + - name: denied + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-mariadb + monitored_instance: + name: MariaDB + link: https://mariadb.org/ + icon_filename: mariadb.svg + categories: + - data-collection.database-servers + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-percona_mysql + most_popular: false + monitored_instance: + name: Percona MySQL + link: https://www.percona.com/software/mysql-database/percona-server + icon_filename: percona.svg + categories: + - data-collection.database-servers diff --git a/src/go/collectors/go.d.plugin/modules/mysql/mycnf.go b/src/go/collectors/go.d.plugin/modules/mysql/mycnf.go new file mode 100644 index 00000000000000..2069af80da559a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/mycnf.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "fmt" + "os" + + "gopkg.in/ini.v1" +) + +func dsnFromFile(filename string) (string, error) { + f, err := ini.Load(filename) + if err != nil { + return "", err + } + + section, err := f.GetSection("client") + if err != nil { + return "", err + } + + defaultUser := getUser() + defaultHost := "localhost" + defaultPort := "3306" + + user := section.Key("user").String() + password := section.Key("password").String() + socket := section.Key("socket").String() + host := section.Key("host").String() + port := section.Key("port").String() + database := section.Key("database").String() + + var dsn string + + if user != "" { + dsn = user + } else { + dsn = defaultUser + } + + if password != "" { + dsn += ":" + password + } + + switch { + case socket != "": + dsn += fmt.Sprintf("@unix(%s)/", socket) + case host != "" && port != "": + dsn += fmt.Sprintf("@tcp(%s:%s)/", host, port) + case host != "": + dsn += fmt.Sprintf("@tcp(%s:%s)/", host, defaultPort) + case port != "": + dsn += fmt.Sprintf("@tcp(%s:%s)/", defaultHost, port) + default: + dsn += "@/" + } + + if database != "" { + dsn += database + } + return dsn, nil +} + +func getUser() (user string) { + if user = os.Getenv("LOGNAME"); user != "" { + return user + } + if user = os.Getenv("USER"); user != "" { + return user + } + if user = os.Getenv("LNAME"); user != "" { + return user + } + if user = os.Getenv("USERNAME"); user != "" { + return user + } + return "" +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/mycnf_test.go b/src/go/collectors/go.d.plugin/modules/mysql/mycnf_test.go new file mode 100644 index 00000000000000..f68680272905f2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/mycnf_test.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_dsnFromFile(t *testing.T) { + user := getUser() + tests := map[string]struct { + config string + expectedDSN string + wantErr bool + }{ + "socket": { + config: ` +[client] +socket=/opt/bitnami/mariadb/tmp/mysql.sock +`, + expectedDSN: user + "@unix(/opt/bitnami/mariadb/tmp/mysql.sock)/", + }, + "socket, host, port": { + config: ` +[client] +host=10.0.0.0 +port=3307 +socket=/opt/bitnami/mariadb/tmp/mysql.sock +`, + expectedDSN: user + "@unix(/opt/bitnami/mariadb/tmp/mysql.sock)/", + }, + "host, port": { + config: ` +[client] +host=10.0.0.0 +port=3307 +`, + expectedDSN: user + "@tcp(10.0.0.0:3307)/", + }, + "only host": { + config: ` +[client] +host=10.0.0.0 +`, + expectedDSN: user + "@tcp(10.0.0.0:3306)/", + }, + "only port": { + config: ` +[client] +port=3307 +`, + expectedDSN: user + "@tcp(localhost:3307)/", + }, + "user, password": { + config: ` +[client] +user=user +password=password +`, + expectedDSN: "user:password@/", + }, + "empty": { + config: ` +[client] +`, + expectedDSN: user + "@/", + }, + "no client section": { + config: ` +[no_client] +`, + wantErr: true, + }, + } + pattern := "netdata-godplugin-mysql-dsnFromFile-*" + dir, err := os.MkdirTemp(os.TempDir(), pattern) + require.NoError(t, err) + defer func() { _ = os.RemoveAll(dir) }() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + f, err := os.CreateTemp(dir, name) + require.NoError(t, err) + _ = f.Close() + defer func() { _ = os.Remove(f.Name()) }() + _ = os.WriteFile(f.Name(), []byte(test.config), 0644) + + if dsn, err := dsnFromFile(f.Name()); test.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedDSN, dsn) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/mysql.go b/src/go/collectors/go.d.plugin/modules/mysql/mysql.go new file mode 100644 index 00000000000000..c7016098f4f049 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/mysql.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "database/sql" + _ "embed" + "strings" + "sync" + "time" + + "github.com/blang/semver/v4" + "github.com/go-sql-driver/mysql" + _ "github.com/go-sql-driver/mysql" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("mysql", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *MySQL { + return &MySQL{ + Config: Config{ + DSN: "root@tcp(localhost:3306)/", + Timeout: web.Duration{Duration: time.Second}, + }, + + charts: baseCharts.Copy(), + addInnoDBOSLogOnce: &sync.Once{}, + addBinlogOnce: &sync.Once{}, + addMyISAMOnce: &sync.Once{}, + addInnodbDeadlocksOnce: &sync.Once{}, + addGaleraOnce: &sync.Once{}, + addQCacheOnce: &sync.Once{}, + addTableOpenCacheOverflowsOnce: &sync.Once{}, + doSlaveStatus: true, + doUserStatistics: true, + collectedReplConns: make(map[string]bool), + collectedUsers: make(map[string]bool), + + recheckGlobalVarsEvery: time.Minute * 10, + } +} + +type Config struct { + DSN string `yaml:"dsn"` + MyCNF string `yaml:"my.cnf"` + UpdateEvery int `yaml:"update_every"` + Timeout web.Duration `yaml:"timeout"` +} + +type MySQL struct { + module.Base + Config `yaml:",inline"` + + db *sql.DB + safeDSN string + version *semver.Version + isMariaDB bool + isPercona bool + + charts *module.Charts + + addInnoDBOSLogOnce *sync.Once + addBinlogOnce *sync.Once + addMyISAMOnce *sync.Once + addInnodbDeadlocksOnce *sync.Once + addGaleraOnce *sync.Once + addQCacheOnce *sync.Once + addTableOpenCacheOverflowsOnce *sync.Once + + doSlaveStatus bool + collectedReplConns map[string]bool + doUserStatistics bool + collectedUsers map[string]bool + + recheckGlobalVarsTime time.Time + recheckGlobalVarsEvery time.Duration + varMaxConns int64 + varTableOpenCache int64 + varDisabledStorageEngine string + varLogBin string + varPerformanceSchema string +} + +func (m *MySQL) Init() bool { + if m.MyCNF != "" { + dsn, err := dsnFromFile(m.MyCNF) + if err != nil { + m.Error(err) + return false + } + m.DSN = dsn + } + + if m.DSN == "" { + m.Error("DSN not set") + return false + } + + cfg, err := mysql.ParseDSN(m.DSN) + if err != nil { + m.Errorf("error on parsing DSN: %v", err) + return false + } + + cfg.Passwd = strings.Repeat("*", len(cfg.Passwd)) + m.safeDSN = cfg.FormatDSN() + + m.Debugf("using DSN [%s]", m.DSN) + return true +} + +func (m *MySQL) Check() bool { + return len(m.Collect()) > 0 +} + +func (m *MySQL) Charts() *module.Charts { + return m.charts +} + +func (m *MySQL) Collect() map[string]int64 { + mx, err := m.collect() + if err != nil { + m.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (m *MySQL) Cleanup() { + if m.db == nil { + return + } + if err := m.db.Close(); err != nil { + m.Errorf("cleanup: error on closing the mysql database [%s]: %v", m.safeDSN, err) + } + m.db = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/mysql_test.go b/src/go/collectors/go.d.plugin/modules/mysql/mysql_test.go new file mode 100644 index 00000000000000..283b137702812b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/mysql_test.go @@ -0,0 +1,1713 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package mysql + +import ( + "bufio" + "bytes" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/blang/semver/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataMySQLV8030Version, _ = os.ReadFile("testdata/mysql/v8.0.30/version.txt") + dataMySQLV8030GlobalStatus, _ = os.ReadFile("testdata/mysql/v8.0.30/global_status.txt") + dataMySQLV8030GlobalVariables, _ = os.ReadFile("testdata/mysql/v8.0.30/global_variables.txt") + dataMySQLV8030ReplicaStatusMultiSource, _ = os.ReadFile("testdata/mysql/v8.0.30/replica_status_multi_source.txt") + dataMySQLV8030ProcessList, _ = os.ReadFile("testdata/mysql/v8.0.30/process_list.txt") + + dataPerconaV8029Version, _ = os.ReadFile("testdata/percona/v8.0.29/version.txt") + dataPerconaV8029GlobalStatus, _ = os.ReadFile("testdata/percona/v8.0.29/global_status.txt") + dataPerconaV8029GlobalVariables, _ = os.ReadFile("testdata/percona/v8.0.29/global_variables.txt") + dataPerconaV8029UserStatistics, _ = os.ReadFile("testdata/percona/v8.0.29/user_statistics.txt") + dataPerconaV8029ProcessList, _ = os.ReadFile("testdata/percona/v8.0.29/process_list.txt") + + dataMariaV5564Version, _ = os.ReadFile("testdata/mariadb/v5.5.64/version.txt") + dataMariaV5564GlobalStatus, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_status.txt") + dataMariaV5564GlobalVariables, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_variables.txt") + dataMariaV5564ProcessList, _ = os.ReadFile("testdata/mariadb/v5.5.64/process_list.txt") + + dataMariaV1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4/version.txt") + dataMariaV1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_status.txt") + dataMariaV1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_variables.txt") + dataMariaV1084AllSlavesStatusSingleSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt") + dataMariaV1084AllSlavesStatusMultiSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt") + dataMariaV1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4/user_statistics.txt") + dataMariaV1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4/process_list.txt") + + dataMariaGaleraClusterV1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/version.txt") + dataMariaGaleraClusterV1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_status.txt") + dataMariaGaleraClusterV1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt") + dataMariaGaleraClusterV1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt") + dataMariaGaleraClusterV1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/process_list.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataMySQLV8030Version": dataMySQLV8030Version, + "dataMySQLV8030GlobalStatus": dataMySQLV8030GlobalStatus, + "dataMySQLV8030GlobalVariables": dataMySQLV8030GlobalVariables, + "dataMySQLV8030ReplicaStatusMultiSource": dataMySQLV8030ReplicaStatusMultiSource, + "dataMySQLV8030ProcessList": dataMySQLV8030ProcessList, + + "dataPerconaV8029Version": dataPerconaV8029Version, + "dataPerconaV8029GlobalStatus": dataPerconaV8029GlobalStatus, + "dataPerconaV8029GlobalVariables": dataPerconaV8029GlobalVariables, + "dataPerconaV8029UserStatistics": dataPerconaV8029UserStatistics, + "dataPerconaV8029ProcessList": dataPerconaV8029ProcessList, + + "dataMariaV5564Version": dataMariaV5564Version, + "dataMariaV5564GlobalStatus": dataMariaV5564GlobalStatus, + "dataMariaV5564GlobalVariables": dataMariaV5564GlobalVariables, + "dataMariaV5564ProcessList": dataMariaV5564ProcessList, + + "dataMariaV1084Version": dataMariaV1084Version, + "dataMariaV1084GlobalStatus": dataMariaV1084GlobalStatus, + "dataMariaV1084GlobalVariables": dataMariaV1084GlobalVariables, + "dataMariaV1084AllSlavesStatusSingleSource": dataMariaV1084AllSlavesStatusSingleSource, + "dataMariaV1084AllSlavesStatusMultiSource": dataMariaV1084AllSlavesStatusMultiSource, + "dataMariaV1084UserStatistics": dataMariaV1084UserStatistics, + "dataMariaV1084ProcessList": dataMariaV1084ProcessList, + + "dataMariaGaleraClusterV1084Version": dataMariaGaleraClusterV1084Version, + "dataMariaGaleraClusterV1084GlobalStatus": dataMariaGaleraClusterV1084GlobalStatus, + "dataMariaGaleraClusterV1084GlobalVariables": dataMariaGaleraClusterV1084GlobalVariables, + "dataMariaGaleraClusterV1084UserStatistics": dataMariaGaleraClusterV1084UserStatistics, + "dataMariaGaleraClusterV1084ProcessList": dataMariaGaleraClusterV1084ProcessList, + } { + require.NotNilf(t, data, fmt.Sprintf("read data: %s", name)) + _, err := prepareMockRows(data) + require.NoErrorf(t, err, fmt.Sprintf("prepare mock rows: %s", name)) + } +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestMySQL_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "empty DSN": { + config: Config{DSN: ""}, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mySQL := New() + mySQL.Config = test.config + + if test.wantFail { + assert.False(t, mySQL.Init()) + } else { + assert.True(t, mySQL.Init()) + } + }) + } +} + +func TestMySQL_Cleanup(t *testing.T) { + tests := map[string]func(t *testing.T) (mySQL *MySQL, cleanup func()){ + "db connection not initialized": func(t *testing.T) (mySQL *MySQL, cleanup func()) { + return New(), func() {} + }, + "db connection initialized": func(t *testing.T) (mySQL *MySQL, cleanup func()) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + + mock.ExpectClose() + mySQL = New() + mySQL.db = db + cleanup = func() { _ = db.Close() } + + return mySQL, cleanup + }, + } + + for name, prepare := range tests { + t.Run(name, func(t *testing.T) { + mySQL, cleanup := prepare(t) + defer cleanup() + + assert.NotPanics(t, mySQL.Cleanup) + assert.Nil(t, mySQL.db) + }) + } +} + +func TestMySQL_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestMySQL_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + wantFail bool + }{ + "success on all queries": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + }, + "fails when error on querying version": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpectErr(m, queryShowVersion) + }, + }, + "fails when error on querying global status": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpectErr(m, queryShowGlobalStatus) + }, + }, + "fails when error on querying global variables": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpectErr(m, queryShowGlobalStatus) + }, + }, + "success when error on querying slave status": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpectErr(m, queryShowAllSlavesStatus) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + }, + "success when error on querying user statistics": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource) + mockExpectErr(m, queryShowUserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + }, + "success when error on querying process list": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpectErr(m, queryShowProcessList) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + my := New() + my.db = db + defer func() { _ = db.Close() }() + + require.True(t, my.Init()) + + test.prepareMock(t, mock) + + if test.wantFail { + assert.False(t, my.Check()) + } else { + assert.True(t, my.Check()) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func TestMySQL_Collect(t *testing.T) { + type testCaseStep struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + check func(t *testing.T, my *MySQL) + } + tests := map[string][]testCaseStep{ + "MariaDB-Standalone[v5.5.46]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV5564Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV5564GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV5564GlobalVariables) + mockExpect(t, m, queryShowSlaveStatus, nil) + mockExpect(t, m, queryShowProcessList, dataMariaV5564ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 0, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 639, + "bytes_sent": 41620, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 4, + "com_update": 0, + "connections": 4, + "created_tmp_disk_tables": 0, + "created_tmp_files": 6, + "created_tmp_tables": 5, + "handler_commit": 0, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 0, + "handler_read_key": 0, + "handler_read_next": 0, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 1264, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 0, + "handler_write": 0, + "innodb_buffer_pool_bytes_data": 2342912, + "innodb_buffer_pool_bytes_dirty": 0, + "innodb_buffer_pool_pages_data": 143, + "innodb_buffer_pool_pages_dirty": 0, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 16240, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 16383, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 459, + "innodb_buffer_pool_reads": 144, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 0, + "innodb_data_fsyncs": 3, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 4542976, + "innodb_data_reads": 155, + "innodb_data_writes": 3, + "innodb_data_written": 1536, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 0, + "innodb_log_writes": 1, + "innodb_os_log_fsyncs": 3, + "innodb_os_log_pending_fsyncs": 0, + "innodb_os_log_pending_writes": 0, + "innodb_os_log_written": 512, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107171, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 100, + "max_used_connections": 1, + "open_files": 21, + "open_tables": 26, + "opened_files": 84, + "opened_tables": 0, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 67091120, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 4, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 12, + "questions": 11, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 5, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 36, + "table_locks_waited": 0, + "table_open_cache": 400, + "thread_cache_misses": 2500, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 1, + "threads_running": 1, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MariaDB-Standalone[v10.8.4]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, nil) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + + "aborted_connects": 2, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 81392, + "bytes_sent": 56794, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 6, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 12, + "created_tmp_disk_tables": 0, + "created_tmp_files": 5, + "created_tmp_tables": 2, + "handler_commit": 30, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 7, + "handler_read_key": 7, + "handler_read_next": 3, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 626, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 3, + "handler_write": 13, + "innodb_buffer_pool_bytes_data": 5062656, + "innodb_buffer_pool_bytes_dirty": 475136, + "innodb_buffer_pool_pages_data": 309, + "innodb_buffer_pool_pages_dirty": 29, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 7755, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 8064, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 1911, + "innodb_buffer_pool_reads": 171, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 148, + "innodb_data_fsyncs": 17, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 2801664, + "innodb_data_reads": 185, + "innodb_data_writes": 16, + "innodb_data_written": 0, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 109, + "innodb_log_writes": 15, + "innodb_os_log_written": 6097, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107163, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 1, + "open_files": 29, + "open_tables": 10, + "opened_files": 100, + "opened_tables": 16, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 1031272, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 0, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 33, + "questions": 24, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 2, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 60, + "table_locks_waited": 0, + "table_open_cache": 2000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 1666, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 2, + "threads_running": 3, + "userstats_netdata_access_denied": 33, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 77, + "userstats_netdata_denied_connections": 49698, + "userstats_netdata_empty_queries": 66, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 0, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_deleted": 0, + "userstats_netdata_rows_inserted": 0, + "userstats_netdata_rows_read": 0, + "userstats_netdata_rows_sent": 99, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 33, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 0, + "userstats_root_denied_connections": 0, + "userstats_root_empty_queries": 0, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 0, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_deleted": 0, + "userstats_root_rows_inserted": 0, + "userstats_root_rows_read": 0, + "userstats_root_rows_sent": 2, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 0, + "userstats_root_total_connections": 1, + "userstats_root_update_commands": 0, + "wsrep_cluster_size": 0, + "wsrep_cluster_status_disconnected": 1, + "wsrep_cluster_status_non_primary": 0, + "wsrep_cluster_status_primary": 0, + "wsrep_connected": 0, + "wsrep_local_bf_aborts": 0, + "wsrep_ready": 0, + "wsrep_thread_count": 0, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MariaDB-SingleSourceReplication[v10.8.4]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusSingleSource) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 2, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 81392, + "bytes_sent": 56794, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 6, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 12, + "created_tmp_disk_tables": 0, + "created_tmp_files": 5, + "created_tmp_tables": 2, + "handler_commit": 30, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 7, + "handler_read_key": 7, + "handler_read_next": 3, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 626, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 3, + "handler_write": 13, + "innodb_buffer_pool_bytes_data": 5062656, + "innodb_buffer_pool_bytes_dirty": 475136, + "innodb_buffer_pool_pages_data": 309, + "innodb_buffer_pool_pages_dirty": 29, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 7755, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 8064, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 1911, + "innodb_buffer_pool_reads": 171, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 148, + "innodb_data_fsyncs": 17, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 2801664, + "innodb_data_reads": 185, + "innodb_data_writes": 16, + "innodb_data_written": 0, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 109, + "innodb_log_writes": 15, + "innodb_os_log_written": 6097, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107163, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 1, + "open_files": 29, + "open_tables": 10, + "opened_files": 100, + "opened_tables": 16, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 1031272, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 0, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 33, + "questions": 24, + "seconds_behind_master": 0, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 2, + "slave_io_running": 1, + "slave_sql_running": 1, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 60, + "table_locks_waited": 0, + "table_open_cache": 2000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 1666, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 2, + "threads_running": 3, + "userstats_netdata_access_denied": 33, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 77, + "userstats_netdata_denied_connections": 49698, + "userstats_netdata_empty_queries": 66, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 0, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_deleted": 0, + "userstats_netdata_rows_inserted": 0, + "userstats_netdata_rows_read": 0, + "userstats_netdata_rows_sent": 99, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 33, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 0, + "userstats_root_denied_connections": 0, + "userstats_root_empty_queries": 0, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 0, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_deleted": 0, + "userstats_root_rows_inserted": 0, + "userstats_root_rows_read": 0, + "userstats_root_rows_sent": 2, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 0, + "userstats_root_total_connections": 1, + "userstats_root_update_commands": 0, + "wsrep_cluster_size": 0, + "wsrep_cluster_status_disconnected": 1, + "wsrep_cluster_status_non_primary": 0, + "wsrep_cluster_status_primary": 0, + "wsrep_connected": 0, + "wsrep_local_bf_aborts": 0, + "wsrep_ready": 0, + "wsrep_thread_count": 0, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MariaDB-MultiSourceReplication[v10.8.4]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 2, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 81392, + "bytes_sent": 56794, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 6, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 12, + "created_tmp_disk_tables": 0, + "created_tmp_files": 5, + "created_tmp_tables": 2, + "handler_commit": 30, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 7, + "handler_read_key": 7, + "handler_read_next": 3, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 626, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 3, + "handler_write": 13, + "innodb_buffer_pool_bytes_data": 5062656, + "innodb_buffer_pool_bytes_dirty": 475136, + "innodb_buffer_pool_pages_data": 309, + "innodb_buffer_pool_pages_dirty": 29, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 7755, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 8064, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 1911, + "innodb_buffer_pool_reads": 171, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 148, + "innodb_data_fsyncs": 17, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 2801664, + "innodb_data_reads": 185, + "innodb_data_writes": 16, + "innodb_data_written": 0, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 109, + "innodb_log_writes": 15, + "innodb_os_log_written": 6097, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107163, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 1, + "open_files": 29, + "open_tables": 10, + "opened_files": 100, + "opened_tables": 16, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 1031272, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 0, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 33, + "questions": 24, + "seconds_behind_master_master1": 0, + "seconds_behind_master_master2": 0, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 2, + "slave_io_running_master1": 1, + "slave_io_running_master2": 1, + "slave_sql_running_master1": 1, + "slave_sql_running_master2": 1, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 60, + "table_locks_waited": 0, + "table_open_cache": 2000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 1666, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 2, + "threads_running": 3, + "userstats_netdata_access_denied": 33, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 77, + "userstats_netdata_denied_connections": 49698, + "userstats_netdata_empty_queries": 66, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 0, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_deleted": 0, + "userstats_netdata_rows_inserted": 0, + "userstats_netdata_rows_read": 0, + "userstats_netdata_rows_sent": 99, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 33, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 0, + "userstats_root_denied_connections": 0, + "userstats_root_empty_queries": 0, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 0, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_deleted": 0, + "userstats_root_rows_inserted": 0, + "userstats_root_rows_read": 0, + "userstats_root_rows_sent": 2, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 0, + "userstats_root_total_connections": 1, + "userstats_root_update_commands": 0, + "wsrep_cluster_size": 0, + "wsrep_cluster_status_disconnected": 1, + "wsrep_cluster_status_non_primary": 0, + "wsrep_cluster_status_primary": 0, + "wsrep_connected": 0, + "wsrep_local_bf_aborts": 0, + "wsrep_ready": 0, + "wsrep_thread_count": 0, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MariaDB-MultiSourceReplication[v10.8.4]: error on slaves status (no permissions)": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables) + mockExpectErr(m, queryShowAllSlavesStatus) + mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 2, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 81392, + "bytes_sent": 56794, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 6, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 12, + "created_tmp_disk_tables": 0, + "created_tmp_files": 5, + "created_tmp_tables": 2, + "handler_commit": 30, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 7, + "handler_read_key": 7, + "handler_read_next": 3, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 626, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 3, + "handler_write": 13, + "innodb_buffer_pool_bytes_data": 5062656, + "innodb_buffer_pool_bytes_dirty": 475136, + "innodb_buffer_pool_pages_data": 309, + "innodb_buffer_pool_pages_dirty": 29, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 7755, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 8064, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 1911, + "innodb_buffer_pool_reads": 171, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 148, + "innodb_data_fsyncs": 17, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 2801664, + "innodb_data_reads": 185, + "innodb_data_writes": 16, + "innodb_data_written": 0, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 109, + "innodb_log_writes": 15, + "innodb_os_log_written": 6097, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107163, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 1, + "open_files": 29, + "open_tables": 10, + "opened_files": 100, + "opened_tables": 16, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 1031272, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 0, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 33, + "questions": 24, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 2, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 60, + "table_locks_waited": 0, + "table_open_cache": 2000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 1666, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 2, + "threads_running": 3, + "userstats_netdata_access_denied": 33, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 77, + "userstats_netdata_denied_connections": 49698, + "userstats_netdata_empty_queries": 66, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 0, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_deleted": 0, + "userstats_netdata_rows_inserted": 0, + "userstats_netdata_rows_read": 0, + "userstats_netdata_rows_sent": 99, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 33, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 0, + "userstats_root_denied_connections": 0, + "userstats_root_empty_queries": 0, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 0, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_deleted": 0, + "userstats_root_rows_inserted": 0, + "userstats_root_rows_read": 0, + "userstats_root_rows_sent": 2, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 0, + "userstats_root_total_connections": 1, + "userstats_root_update_commands": 0, + "wsrep_cluster_size": 0, + "wsrep_cluster_status_disconnected": 1, + "wsrep_cluster_status_non_primary": 0, + "wsrep_cluster_status_primary": 0, + "wsrep_connected": 0, + "wsrep_local_bf_aborts": 0, + "wsrep_ready": 0, + "wsrep_thread_count": 0, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MariaDB-GaleraCluster[v10.8.4]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMariaGaleraClusterV1084Version) + mockExpect(t, m, queryShowGlobalStatus, dataMariaGaleraClusterV1084GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMariaGaleraClusterV1084GlobalVariables) + mockExpect(t, m, queryShowAllSlavesStatus, nil) + mockExpect(t, m, queryShowUserStatistics, dataMariaGaleraClusterV1084UserStatistics) + mockExpect(t, m, queryShowProcessList, dataMariaGaleraClusterV1084ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 0, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 3009, + "bytes_sent": 228856, + "com_delete": 6, + "com_insert": 0, + "com_replace": 0, + "com_select": 12, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 15, + "created_tmp_disk_tables": 4, + "created_tmp_files": 5, + "created_tmp_tables": 17, + "handler_commit": 37, + "handler_delete": 7, + "handler_prepare": 0, + "handler_read_first": 3, + "handler_read_key": 9, + "handler_read_next": 1, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 6222, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 0, + "handler_write": 9, + "innodb_buffer_pool_bytes_data": 5193728, + "innodb_buffer_pool_bytes_dirty": 2260992, + "innodb_buffer_pool_pages_data": 317, + "innodb_buffer_pool_pages_dirty": 138, + "innodb_buffer_pool_pages_flushed": 0, + "innodb_buffer_pool_pages_free": 7747, + "innodb_buffer_pool_pages_misc": 0, + "innodb_buffer_pool_pages_total": 8064, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 2298, + "innodb_buffer_pool_reads": 184, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 203, + "innodb_data_fsyncs": 15, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 3014656, + "innodb_data_reads": 201, + "innodb_data_writes": 14, + "innodb_data_written": 0, + "innodb_deadlocks": 0, + "innodb_log_waits": 0, + "innodb_log_write_requests": 65, + "innodb_log_writes": 13, + "innodb_os_log_written": 4785, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 107163, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 1, + "open_files": 7, + "open_tables": 0, + "opened_files": 125, + "opened_tables": 24, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "qcache_free_blocks": 1, + "qcache_free_memory": 1031272, + "qcache_hits": 0, + "qcache_inserts": 0, + "qcache_lowmem_prunes": 0, + "qcache_not_cached": 0, + "qcache_queries_in_cache": 0, + "qcache_total_blocks": 1, + "queries": 75, + "questions": 62, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 17, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 17, + "table_locks_waited": 0, + "table_open_cache": 2000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 4000, + "threads_cached": 0, + "threads_connected": 1, + "threads_created": 6, + "threads_running": 1, + "userstats_netdata_access_denied": 33, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 77, + "userstats_netdata_denied_connections": 49698, + "userstats_netdata_empty_queries": 66, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 0, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_deleted": 0, + "userstats_netdata_rows_inserted": 0, + "userstats_netdata_rows_read": 0, + "userstats_netdata_rows_sent": 99, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 33, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 0, + "userstats_root_denied_connections": 0, + "userstats_root_empty_queries": 0, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 0, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_deleted": 0, + "userstats_root_rows_inserted": 0, + "userstats_root_rows_read": 0, + "userstats_root_rows_sent": 2, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 0, + "userstats_root_total_connections": 1, + "userstats_root_update_commands": 0, + "wsrep_cluster_size": 3, + "wsrep_cluster_status_disconnected": 0, + "wsrep_cluster_status_non_primary": 0, + "wsrep_cluster_status_primary": 1, + "wsrep_cluster_weight": 3, + "wsrep_connected": 1, + "wsrep_flow_control_paused_ns": 0, + "wsrep_local_bf_aborts": 0, + "wsrep_local_cert_failures": 0, + "wsrep_local_recv_queue": 0, + "wsrep_local_send_queue": 0, + "wsrep_local_state_donor": 0, + "wsrep_local_state_error": 0, + "wsrep_local_state_joined": 0, + "wsrep_local_state_joiner": 0, + "wsrep_local_state_synced": 1, + "wsrep_local_state_undefined": 0, + "wsrep_open_transactions": 0, + "wsrep_ready": 1, + "wsrep_received": 11, + "wsrep_received_bytes": 1410, + "wsrep_replicated": 0, + "wsrep_replicated_bytes": 0, + "wsrep_thread_count": 5, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "MySQL-MultiSourceReplication[v8.0.30]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataMySQLV8030Version) + mockExpect(t, m, queryShowGlobalStatus, dataMySQLV8030GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataMySQLV8030GlobalVariables) + mockExpect(t, m, queryShowReplicaStatus, dataMySQLV8030ReplicaStatusMultiSource) + mockExpect(t, m, queryShowProcessListPS, dataMySQLV8030ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 0, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 6, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 5584, + "bytes_sent": 70700, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 2, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 25, + "created_tmp_disk_tables": 0, + "created_tmp_files": 5, + "created_tmp_tables": 6, + "handler_commit": 720, + "handler_delete": 8, + "handler_prepare": 24, + "handler_read_first": 50, + "handler_read_key": 1914, + "handler_read_next": 4303, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 4723, + "handler_rollback": 1, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 373, + "handler_write": 1966, + "innodb_buffer_pool_bytes_data": 17121280, + "innodb_buffer_pool_bytes_dirty": 0, + "innodb_buffer_pool_pages_data": 1045, + "innodb_buffer_pool_pages_dirty": 0, + "innodb_buffer_pool_pages_flushed": 361, + "innodb_buffer_pool_pages_free": 7143, + "innodb_buffer_pool_pages_misc": 4, + "innodb_buffer_pool_pages_total": 8192, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 16723, + "innodb_buffer_pool_reads": 878, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 2377, + "innodb_data_fsyncs": 255, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 14453760, + "innodb_data_reads": 899, + "innodb_data_writes": 561, + "innodb_data_written": 6128128, + "innodb_log_waits": 0, + "innodb_log_write_requests": 1062, + "innodb_log_writes": 116, + "innodb_os_log_fsyncs": 69, + "innodb_os_log_pending_fsyncs": 0, + "innodb_os_log_pending_writes": 0, + "innodb_os_log_written": 147968, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 0, + "innodb_rows_read": 0, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 6698, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 2, + "open_files": 8, + "open_tables": 127, + "opened_files": 8, + "opened_tables": 208, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "queries": 27, + "questions": 15, + "seconds_behind_master_master1": 0, + "seconds_behind_master_master2": 0, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 12, + "slave_io_running_master1": 1, + "slave_io_running_master2": 1, + "slave_sql_running_master1": 1, + "slave_sql_running_master2": 1, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 0, + "table_locks_immediate": 6, + "table_locks_waited": 0, + "table_open_cache": 4000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 800, + "threads_cached": 1, + "threads_connected": 1, + "threads_created": 2, + "threads_running": 2, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + "Percona-Standalone[v8.0.29]: success on all queries": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataPerconaV8029Version) + mockExpect(t, m, queryShowGlobalStatus, dataPerconaV8029GlobalStatus) + mockExpect(t, m, queryShowGlobalVariables, dataPerconaV8029GlobalVariables) + mockExpect(t, m, queryShowReplicaStatus, nil) + mockExpect(t, m, queryShowUserStatistics, dataPerconaV8029UserStatistics) + mockExpect(t, m, queryShowProcessListPS, dataPerconaV8029ProcessList) + }, + check: func(t *testing.T, my *MySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "aborted_connects": 1, + "binlog_cache_disk_use": 0, + "binlog_cache_use": 0, + "binlog_stmt_cache_disk_use": 0, + "binlog_stmt_cache_use": 0, + "bytes_received": 682970, + "bytes_sent": 33668405, + "com_delete": 0, + "com_insert": 0, + "com_replace": 0, + "com_select": 1687, + "com_update": 0, + "connection_errors_accept": 0, + "connection_errors_internal": 0, + "connection_errors_max_connections": 0, + "connection_errors_peer_address": 0, + "connection_errors_select": 0, + "connection_errors_tcpwrap": 0, + "connections": 13, + "created_tmp_disk_tables": 1683, + "created_tmp_files": 5, + "created_tmp_tables": 5054, + "handler_commit": 576, + "handler_delete": 0, + "handler_prepare": 0, + "handler_read_first": 1724, + "handler_read_key": 3439, + "handler_read_next": 4147, + "handler_read_prev": 0, + "handler_read_rnd": 0, + "handler_read_rnd_next": 2983285, + "handler_rollback": 0, + "handler_savepoint": 0, + "handler_savepoint_rollback": 0, + "handler_update": 317, + "handler_write": 906501, + "innodb_buffer_pool_bytes_data": 18399232, + "innodb_buffer_pool_bytes_dirty": 49152, + "innodb_buffer_pool_pages_data": 1123, + "innodb_buffer_pool_pages_dirty": 3, + "innodb_buffer_pool_pages_flushed": 205, + "innodb_buffer_pool_pages_free": 7064, + "innodb_buffer_pool_pages_misc": 5, + "innodb_buffer_pool_pages_total": 8192, + "innodb_buffer_pool_read_ahead": 0, + "innodb_buffer_pool_read_ahead_evicted": 0, + "innodb_buffer_pool_read_ahead_rnd": 0, + "innodb_buffer_pool_read_requests": 109817, + "innodb_buffer_pool_reads": 978, + "innodb_buffer_pool_wait_free": 0, + "innodb_buffer_pool_write_requests": 77412, + "innodb_data_fsyncs": 50, + "innodb_data_pending_fsyncs": 0, + "innodb_data_pending_reads": 0, + "innodb_data_pending_writes": 0, + "innodb_data_read": 16094208, + "innodb_data_reads": 1002, + "innodb_data_writes": 288, + "innodb_data_written": 3420160, + "innodb_log_waits": 0, + "innodb_log_write_requests": 651, + "innodb_log_writes": 47, + "innodb_os_log_fsyncs": 13, + "innodb_os_log_pending_fsyncs": 0, + "innodb_os_log_pending_writes": 0, + "innodb_os_log_written": 45568, + "innodb_row_lock_current_waits": 0, + "innodb_rows_deleted": 0, + "innodb_rows_inserted": 5055, + "innodb_rows_read": 5055, + "innodb_rows_updated": 0, + "key_blocks_not_flushed": 0, + "key_blocks_unused": 6698, + "key_blocks_used": 0, + "key_read_requests": 0, + "key_reads": 0, + "key_write_requests": 0, + "key_writes": 0, + "max_connections": 151, + "max_used_connections": 3, + "open_files": 2, + "open_tables": 77, + "opened_files": 2, + "opened_tables": 158, + "process_list_fetch_query_duration": 0, + "process_list_longest_query_duration": 9, + "process_list_queries_count_system": 0, + "process_list_queries_count_user": 2, + "queries": 6748, + "questions": 6746, + "select_full_join": 0, + "select_full_range_join": 0, + "select_range": 0, + "select_range_check": 0, + "select_scan": 8425, + "slow_queries": 0, + "sort_merge_passes": 0, + "sort_range": 0, + "sort_scan": 1681, + "table_locks_immediate": 3371, + "table_locks_waited": 0, + "table_open_cache": 4000, + "table_open_cache_overflows": 0, + "thread_cache_misses": 2307, + "threads_cached": 1, + "threads_connected": 2, + "threads_created": 3, + "threads_running": 2, + "userstats_netdata_access_denied": 0, + "userstats_netdata_binlog_bytes_written": 0, + "userstats_netdata_commit_transactions": 0, + "userstats_netdata_cpu_time": 0, + "userstats_netdata_denied_connections": 0, + "userstats_netdata_empty_queries": 0, + "userstats_netdata_lost_connections": 0, + "userstats_netdata_other_commands": 1, + "userstats_netdata_rollback_transactions": 0, + "userstats_netdata_rows_fetched": 1, + "userstats_netdata_rows_updated": 0, + "userstats_netdata_select_commands": 1, + "userstats_netdata_total_connections": 1, + "userstats_netdata_update_commands": 0, + "userstats_root_access_denied": 0, + "userstats_root_binlog_bytes_written": 0, + "userstats_root_commit_transactions": 0, + "userstats_root_cpu_time": 151, + "userstats_root_denied_connections": 1, + "userstats_root_empty_queries": 36, + "userstats_root_lost_connections": 0, + "userstats_root_other_commands": 110, + "userstats_root_rollback_transactions": 0, + "userstats_root_rows_fetched": 1, + "userstats_root_rows_updated": 0, + "userstats_root_select_commands": 37, + "userstats_root_total_connections": 2, + "userstats_root_update_commands": 0, + } + + copyProcessListQueryDuration(mx, expected) + require.Equal(t, expected, mx) + ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + my := New() + my.db = db + defer func() { _ = db.Close() }() + + require.True(t, my.Init()) + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepareMock(t, mock) + step.check(t, my) + }) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, mySQL *MySQL, collected map[string]int64) { + for _, chart := range *mySQL.Charts() { + if mySQL.isMariaDB { + // https://mariadb.com/kb/en/server-status-variables/#connection_errors_accept + if mySQL.version.LT(semver.Version{Major: 10, Minor: 0, Patch: 4}) && chart.ID == "connection_errors" { + continue + } + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func copyProcessListQueryDuration(dst, src map[string]int64) { + if _, ok := dst["process_list_fetch_query_duration"]; !ok { + return + } + if _, ok := src["process_list_fetch_query_duration"]; !ok { + return + } + dst["process_list_fetch_query_duration"] = src["process_list_fetch_query_duration"] +} + +func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows { + rows, err := prepareMockRows(data) + require.NoError(t, err) + return rows +} + +func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) { + mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed() +} + +func mockExpectErr(mock sqlmock.Sqlmock, query string) { + mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query)) +} + +func prepareMockRows(data []byte) (*sqlmock.Rows, error) { + if len(data) == 0 { + return sqlmock.NewRows(nil), nil + } + + r := bytes.NewReader(data) + sc := bufio.NewScanner(r) + + var numColumns int + var rows *sqlmock.Rows + + for sc.Scan() { + s := strings.TrimSpace(strings.Trim(sc.Text(), "|")) + switch { + case s == "", + strings.HasPrefix(s, "+"), + strings.HasPrefix(s, "ft_boolean_syntax"): + continue + } + + parts := strings.Split(s, "|") + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + if rows == nil { + numColumns = len(parts) + rows = sqlmock.NewRows(parts) + continue + } + + if len(parts) != numColumns { + return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts)) + } + + values := make([]driver.Value, len(parts)) + for i, v := range parts { + values[i] = v + } + rows.AddRow(values...) + } + + if rows == nil { + return nil, errors.New("prepareMockRows(): nil rows result") + } + + return rows, sc.Err() +} diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt new file mode 100644 index 00000000000000..8a6b691cd5cbd2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt @@ -0,0 +1,621 @@ ++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ +| Variable_name | Value | ++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ +| Aborted_clients | 0 | +| Aborted_connects | 0 | +| Aborted_connects_preauth | 0 | +| Access_denied_errors | 0 | +| Acl_column_grants | 0 | +| Acl_database_grants | 3 | +| Acl_function_grants | 0 | +| Acl_procedure_grants | 0 | +| Acl_package_spec_grants | 0 | +| Acl_package_body_grants | 0 | +| Acl_proxy_users | 1 | +| Acl_role_grants | 0 | +| Acl_roles | 0 | +| Acl_table_grants | 1 | +| Acl_users | 6 | +| Aria_pagecache_blocks_not_flushed | 0 | +| Aria_pagecache_blocks_unused | 15647 | +| Aria_pagecache_blocks_used | 13 | +| Aria_pagecache_read_requests | 306 | +| Aria_pagecache_reads | 17 | +| Aria_pagecache_write_requests | 8 | +| Aria_pagecache_writes | 8 | +| Aria_transaction_log_syncs | 0 | +| Binlog_commits | 0 | +| Binlog_group_commits | 0 | +| Binlog_group_commit_trigger_count | 0 | +| Binlog_group_commit_trigger_lock_wait | 0 | +| Binlog_group_commit_trigger_timeout | 0 | +| Binlog_snapshot_file | mysql-bin.000005 | +| Binlog_snapshot_position | 385 | +| Binlog_bytes_written | 183 | +| Binlog_cache_disk_use | 0 | +| Binlog_cache_use | 0 | +| Binlog_stmt_cache_disk_use | 0 | +| Binlog_stmt_cache_use | 0 | +| Busy_time | 0.000000 | +| Bytes_received | 3009 | +| Bytes_sent | 228856 | +| Column_compressions | 0 | +| Column_decompressions | 0 | +| Com_admin_commands | 0 | +| Com_alter_db | 0 | +| Com_alter_db_upgrade | 0 | +| Com_alter_event | 0 | +| Com_alter_function | 0 | +| Com_alter_procedure | 0 | +| Com_alter_server | 0 | +| Com_alter_sequence | 0 | +| Com_alter_table | 3 | +| Com_alter_user | 0 | +| Com_analyze | 0 | +| Com_assign_to_keycache | 0 | +| Com_backup | 6 | +| Com_backup_lock | 0 | +| Com_begin | 0 | +| Com_binlog | 0 | +| Com_call_procedure | 0 | +| Com_change_db | 0 | +| Com_change_master | 0 | +| Com_check | 0 | +| Com_checksum | 0 | +| Com_commit | 0 | +| Com_compound_sql | 0 | +| Com_create_db | 1 | +| Com_create_event | 0 | +| Com_create_function | 0 | +| Com_create_index | 0 | +| Com_create_package | 0 | +| Com_create_package_body | 0 | +| Com_create_procedure | 0 | +| Com_create_role | 0 | +| Com_create_sequence | 0 | +| Com_create_server | 0 | +| Com_create_table | 3 | +| Com_create_temporary_table | 0 | +| Com_create_trigger | 0 | +| Com_create_udf | 0 | +| Com_create_user | 0 | +| Com_create_view | 0 | +| Com_dealloc_sql | 0 | +| Com_delete | 6 | +| Com_delete_multi | 0 | +| Com_do | 0 | +| Com_drop_db | 0 | +| Com_drop_event | 0 | +| Com_drop_function | 0 | +| Com_drop_index | 0 | +| Com_drop_procedure | 0 | +| Com_drop_package | 0 | +| Com_drop_package_body | 0 | +| Com_drop_role | 0 | +| Com_drop_server | 0 | +| Com_drop_sequence | 0 | +| Com_drop_table | 0 | +| Com_drop_temporary_table | 0 | +| Com_drop_trigger | 0 | +| Com_drop_user | 0 | +| Com_drop_view | 0 | +| Com_empty_query | 0 | +| Com_execute_immediate | 0 | +| Com_execute_sql | 0 | +| Com_flush | 4 | +| Com_get_diagnostics | 0 | +| Com_grant | 0 | +| Com_grant_role | 0 | +| Com_ha_close | 0 | +| Com_ha_open | 0 | +| Com_ha_read | 0 | +| Com_help | 0 | +| Com_insert | 0 | +| Com_insert_select | 0 | +| Com_install_plugin | 0 | +| Com_kill | 0 | +| Com_load | 0 | +| Com_lock_tables | 0 | +| Com_optimize | 0 | +| Com_preload_keys | 0 | +| Com_prepare_sql | 0 | +| Com_purge | 0 | +| Com_purge_before_date | 0 | +| Com_release_savepoint | 0 | +| Com_rename_table | 0 | +| Com_rename_user | 0 | +| Com_repair | 0 | +| Com_replace | 0 | +| Com_replace_select | 0 | +| Com_reset | 0 | +| Com_resignal | 0 | +| Com_revoke | 0 | +| Com_revoke_all | 0 | +| Com_revoke_role | 0 | +| Com_rollback | 0 | +| Com_rollback_to_savepoint | 0 | +| Com_savepoint | 0 | +| Com_select | 12 | +| Com_set_option | 6 | +| Com_show_authors | 0 | +| Com_show_binlog_events | 0 | +| Com_show_binlogs | 0 | +| Com_show_charsets | 0 | +| Com_show_collations | 0 | +| Com_show_contributors | 0 | +| Com_show_create_db | 0 | +| Com_show_create_event | 0 | +| Com_show_create_func | 0 | +| Com_show_create_package | 0 | +| Com_show_create_package_body | 0 | +| Com_show_create_proc | 0 | +| Com_show_create_table | 0 | +| Com_show_create_trigger | 0 | +| Com_show_create_user | 0 | +| Com_show_databases | 1 | +| Com_show_engine_logs | 0 | +| Com_show_engine_mutex | 0 | +| Com_show_engine_status | 2 | +| Com_show_errors | 0 | +| Com_show_events | 0 | +| Com_show_explain | 0 | +| Com_show_fields | 0 | +| Com_show_function_status | 0 | +| Com_show_generic | 0 | +| Com_show_grants | 2 | +| Com_show_keys | 0 | +| Com_show_binlog_status | 6 | +| Com_show_open_tables | 0 | +| Com_show_package_status | 0 | +| Com_show_package_body_status | 0 | +| Com_show_plugins | 0 | +| Com_show_privileges | 0 | +| Com_show_procedure_status | 0 | +| Com_show_processlist | 0 | +| Com_show_profile | 0 | +| Com_show_profiles | 0 | +| Com_show_relaylog_events | 0 | +| Com_show_slave_hosts | 0 | +| Com_show_slave_status | 6 | +| Com_show_status | 6 | +| Com_show_storage_engines | 0 | +| Com_show_table_status | 0 | +| Com_show_tables | 0 | +| Com_show_triggers | 0 | +| Com_show_variables | 6 | +| Com_show_warnings | 0 | +| Com_shutdown | 0 | +| Com_signal | 0 | +| Com_start_all_slaves | 0 | +| Com_start_slave | 0 | +| Com_stmt_close | 0 | +| Com_stmt_execute | 0 | +| Com_stmt_fetch | 0 | +| Com_stmt_prepare | 0 | +| Com_stmt_reprepare | 0 | +| Com_stmt_reset | 0 | +| Com_stmt_send_long_data | 0 | +| Com_stop_all_slaves | 0 | +| Com_stop_slave | 0 | +| Com_truncate | 0 | +| Com_uninstall_plugin | 0 | +| Com_unlock_tables | 0 | +| Com_update | 0 | +| Com_update_multi | 0 | +| Com_xa_commit | 0 | +| Com_xa_end | 0 | +| Com_xa_prepare | 0 | +| Com_xa_recover | 0 | +| Com_xa_rollback | 0 | +| Com_xa_start | 0 | +| Compression | OFF | +| Connection_errors_accept | 0 | +| Connection_errors_internal | 0 | +| Connection_errors_max_connections | 0 | +| Connection_errors_peer_address | 0 | +| Connection_errors_select | 0 | +| Connection_errors_tcpwrap | 0 | +| Connections | 15 | +| Cpu_time | 0.000000 | +| Created_tmp_disk_tables | 4 | +| Created_tmp_files | 5 | +| Created_tmp_tables | 17 | +| Delayed_errors | 0 | +| Delayed_insert_threads | 0 | +| Delayed_writes | 0 | +| Delete_scan | 6 | +| Empty_queries | 2 | +| Executed_events | 0 | +| Executed_triggers | 0 | +| Feature_application_time_periods | 0 | +| Feature_check_constraint | 1 | +| Feature_custom_aggregate_functions | 0 | +| Feature_delay_key_write | 0 | +| Feature_dynamic_columns | 0 | +| Feature_fulltext | 0 | +| Feature_gis | 0 | +| Feature_insert_returning | 0 | +| Feature_invisible_columns | 0 | +| Feature_json | 1 | +| Feature_locale | 0 | +| Feature_subquery | 0 | +| Feature_system_versioning | 0 | +| Feature_timezone | 0 | +| Feature_trigger | 0 | +| Feature_window_functions | 0 | +| Feature_xml | 0 | +| Handler_commit | 37 | +| Handler_delete | 7 | +| Handler_discover | 0 | +| Handler_external_lock | 0 | +| Handler_icp_attempts | 0 | +| Handler_icp_match | 0 | +| Handler_mrr_init | 0 | +| Handler_mrr_key_refills | 0 | +| Handler_mrr_rowid_refills | 0 | +| Handler_prepare | 0 | +| Handler_read_first | 3 | +| Handler_read_key | 9 | +| Handler_read_last | 0 | +| Handler_read_next | 1 | +| Handler_read_prev | 0 | +| Handler_read_retry | 0 | +| Handler_read_rnd | 0 | +| Handler_read_rnd_deleted | 0 | +| Handler_read_rnd_next | 6222 | +| Handler_rollback | 0 | +| Handler_savepoint | 0 | +| Handler_savepoint_rollback | 0 | +| Handler_tmp_delete | 0 | +| Handler_tmp_update | 0 | +| Handler_tmp_write | 6165 | +| Handler_update | 0 | +| Handler_write | 9 | +| Innodb_adaptive_hash_hash_searches | 0 | +| Innodb_adaptive_hash_non_hash_searches | 0 | +| Innodb_background_log_sync | 896 | +| Innodb_buffer_pool_dump_status | | +| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220817 19:46:29 | +| Innodb_buffer_pool_resize_status | | +| Innodb_buffer_pool_load_incomplete | OFF | +| Innodb_buffer_pool_pages_data | 317 | +| Innodb_buffer_pool_bytes_data | 5193728 | +| Innodb_buffer_pool_pages_dirty | 138 | +| Innodb_buffer_pool_bytes_dirty | 2260992 | +| Innodb_buffer_pool_pages_flushed | 0 | +| Innodb_buffer_pool_pages_free | 7747 | +| Innodb_buffer_pool_pages_made_not_young | 0 | +| Innodb_buffer_pool_pages_made_young | 0 | +| Innodb_buffer_pool_pages_misc | 0 | +| Innodb_buffer_pool_pages_old | 0 | +| Innodb_buffer_pool_pages_total | 8064 | +| Innodb_buffer_pool_pages_lru_flushed | 0 | +| Innodb_buffer_pool_pages_lru_freed | 0 | +| Innodb_buffer_pool_read_ahead_rnd | 0 | +| Innodb_buffer_pool_read_ahead | 0 | +| Innodb_buffer_pool_read_ahead_evicted | 0 | +| Innodb_buffer_pool_read_requests | 2298 | +| Innodb_buffer_pool_reads | 184 | +| Innodb_buffer_pool_wait_free | 0 | +| Innodb_buffer_pool_write_requests | 203 | +| Innodb_checkpoint_age | 4785 | +| Innodb_checkpoint_max_age | 80819529 | +| Innodb_data_fsyncs | 15 | +| Innodb_data_pending_fsyncs | 0 | +| Innodb_data_pending_reads | 0 | +| Innodb_data_pending_writes | 0 | +| Innodb_data_read | 3014656 | +| Innodb_data_reads | 201 | +| Innodb_data_writes | 14 | +| Innodb_data_written | 0 | +| Innodb_dblwr_pages_written | 0 | +| Innodb_dblwr_writes | 0 | +| Innodb_deadlocks | 0 | +| Innodb_history_list_length | 1 | +| Innodb_ibuf_discarded_delete_marks | 0 | +| Innodb_ibuf_discarded_deletes | 0 | +| Innodb_ibuf_discarded_inserts | 0 | +| Innodb_ibuf_free_list | 0 | +| Innodb_ibuf_merged_delete_marks | 0 | +| Innodb_ibuf_merged_deletes | 0 | +| Innodb_ibuf_merged_inserts | 0 | +| Innodb_ibuf_merges | 0 | +| Innodb_ibuf_segment_size | 2 | +| Innodb_ibuf_size | 1 | +| Innodb_log_waits | 0 | +| Innodb_log_write_requests | 65 | +| Innodb_log_writes | 13 | +| Innodb_lsn_current | 73172 | +| Innodb_lsn_flushed | 73172 | +| Innodb_lsn_last_checkpoint | 68387 | +| Innodb_master_thread_active_loops | 0 | +| Innodb_master_thread_idle_loops | 896 | +| Innodb_max_trx_id | 38 | +| Innodb_mem_adaptive_hash | 0 | +| Innodb_mem_dictionary | 862248 | +| Innodb_os_log_written | 4785 | +| Innodb_page_size | 16384 | +| Innodb_pages_created | 133 | +| Innodb_pages_read | 184 | +| Innodb_pages_written | 0 | +| Innodb_row_lock_current_waits | 0 | +| Innodb_row_lock_time | 0 | +| Innodb_row_lock_time_avg | 0 | +| Innodb_row_lock_time_max | 0 | +| Innodb_row_lock_waits | 0 | +| Innodb_rows_deleted | 0 | +| Innodb_rows_inserted | 0 | +| Innodb_rows_read | 0 | +| Innodb_rows_updated | 0 | +| Innodb_system_rows_deleted | 7 | +| Innodb_system_rows_inserted | 9 | +| Innodb_system_rows_read | 15 | +| Innodb_system_rows_updated | 0 | +| Innodb_num_open_files | 9 | +| Innodb_truncated_status_writes | 0 | +| Innodb_available_undo_logs | 128 | +| Innodb_undo_truncations | 0 | +| Innodb_page_compression_saved | 0 | +| Innodb_num_pages_page_compressed | 0 | +| Innodb_num_page_compressed_trim_op | 0 | +| Innodb_num_pages_page_decompressed | 0 | +| Innodb_num_pages_page_compression_error | 0 | +| Innodb_num_pages_encrypted | 0 | +| Innodb_num_pages_decrypted | 0 | +| Innodb_have_lz4 | OFF | +| Innodb_have_lzo | OFF | +| Innodb_have_lzma | OFF | +| Innodb_have_bzip2 | OFF | +| Innodb_have_snappy | OFF | +| Innodb_have_punch_hole | ON | +| Innodb_defragment_compression_failures | 0 | +| Innodb_defragment_failures | 0 | +| Innodb_defragment_count | 0 | +| Innodb_instant_alter_column | 0 | +| Innodb_onlineddl_rowlog_rows | 0 | +| Innodb_onlineddl_rowlog_pct_used | 0 | +| Innodb_onlineddl_pct_progress | 0 | +| Innodb_secondary_index_triggered_cluster_reads | 0 | +| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 | +| Innodb_encryption_rotation_pages_read_from_cache | 0 | +| Innodb_encryption_rotation_pages_read_from_disk | 0 | +| Innodb_encryption_rotation_pages_modified | 0 | +| Innodb_encryption_rotation_pages_flushed | 0 | +| Innodb_encryption_rotation_estimated_iops | 0 | +| Innodb_encryption_n_merge_blocks_encrypted | 0 | +| Innodb_encryption_n_merge_blocks_decrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_encrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_decrypted | 0 | +| Innodb_encryption_n_temp_blocks_encrypted | 0 | +| Innodb_encryption_n_temp_blocks_decrypted | 0 | +| Innodb_encryption_num_key_requests | 0 | +| Key_blocks_not_flushed | 0 | +| Key_blocks_unused | 107163 | +| Key_blocks_used | 0 | +| Key_blocks_warm | 0 | +| Key_read_requests | 0 | +| Key_reads | 0 | +| Key_write_requests | 0 | +| Key_writes | 0 | +| Last_query_cost | 0.000000 | +| Master_gtid_wait_count | 0 | +| Master_gtid_wait_time | 0 | +| Master_gtid_wait_timeouts | 0 | +| Max_statement_time_exceeded | 0 | +| Max_used_connections | 1 | +| Memory_used | 35590104 | +| Memory_used_initial | 35583712 | +| Not_flushed_delayed_rows | 0 | +| Open_files | 7 | +| Open_streams | 4 | +| Open_table_definitions | 0 | +| Open_tables | 0 | +| Opened_files | 125 | +| Opened_plugin_libraries | 1 | +| Opened_table_definitions | 27 | +| Opened_tables | 24 | +| Opened_views | 0 | +| Performance_schema_accounts_lost | 0 | +| Performance_schema_cond_classes_lost | 0 | +| Performance_schema_cond_instances_lost | 0 | +| Performance_schema_digest_lost | 0 | +| Performance_schema_file_classes_lost | 0 | +| Performance_schema_file_handles_lost | 0 | +| Performance_schema_file_instances_lost | 0 | +| Performance_schema_hosts_lost | 0 | +| Performance_schema_index_stat_lost | 0 | +| Performance_schema_locker_lost | 0 | +| Performance_schema_memory_classes_lost | 0 | +| Performance_schema_metadata_lock_lost | 0 | +| Performance_schema_mutex_classes_lost | 0 | +| Performance_schema_mutex_instances_lost | 0 | +| Performance_schema_nested_statement_lost | 0 | +| Performance_schema_prepared_statements_lost | 0 | +| Performance_schema_program_lost | 0 | +| Performance_schema_rwlock_classes_lost | 0 | +| Performance_schema_rwlock_instances_lost | 0 | +| Performance_schema_session_connect_attrs_lost | 0 | +| Performance_schema_socket_classes_lost | 0 | +| Performance_schema_socket_instances_lost | 0 | +| Performance_schema_stage_classes_lost | 0 | +| Performance_schema_statement_classes_lost | 0 | +| Performance_schema_table_handles_lost | 0 | +| Performance_schema_table_instances_lost | 0 | +| Performance_schema_table_lock_stat_lost | 0 | +| Performance_schema_thread_classes_lost | 0 | +| Performance_schema_thread_instances_lost | 0 | +| Performance_schema_users_lost | 0 | +| Prepared_stmt_count | 0 | +| Qcache_free_blocks | 1 | +| Qcache_free_memory | 1031272 | +| Qcache_hits | 0 | +| Qcache_inserts | 0 | +| Qcache_lowmem_prunes | 0 | +| Qcache_not_cached | 0 | +| Qcache_queries_in_cache | 0 | +| Qcache_total_blocks | 1 | +| Queries | 75 | +| Questions | 62 | +| Resultset_metadata_skipped | 0 | +| Rows_read | 27 | +| Rows_sent | 5888 | +| Rows_tmp_read | 6162 | +| Rpl_semi_sync_master_clients | 0 | +| Rpl_semi_sync_master_get_ack | 0 | +| Rpl_semi_sync_master_net_avg_wait_time | 0 | +| Rpl_semi_sync_master_net_wait_time | 0 | +| Rpl_semi_sync_master_net_waits | 0 | +| Rpl_semi_sync_master_no_times | 0 | +| Rpl_semi_sync_master_no_tx | 0 | +| Rpl_semi_sync_master_request_ack | 0 | +| Rpl_semi_sync_master_status | OFF | +| Rpl_semi_sync_master_timefunc_failures | 0 | +| Rpl_semi_sync_master_tx_avg_wait_time | 0 | +| Rpl_semi_sync_master_tx_wait_time | 0 | +| Rpl_semi_sync_master_tx_waits | 0 | +| Rpl_semi_sync_master_wait_pos_backtraverse | 0 | +| Rpl_semi_sync_master_wait_sessions | 0 | +| Rpl_semi_sync_master_yes_tx | 0 | +| Rpl_semi_sync_slave_send_ack | 0 | +| Rpl_semi_sync_slave_status | OFF | +| Rpl_status | AUTH_MASTER | +| Rpl_transactions_multi_engine | 0 | +| Select_full_join | 0 | +| Select_full_range_join | 0 | +| Select_range | 0 | +| Select_range_check | 0 | +| Select_scan | 17 | +| Slave_connections | 0 | +| Slave_heartbeat_period | 0.000 | +| Slave_open_temp_tables | 0 | +| Slave_received_heartbeats | 0 | +| Slave_retried_transactions | 0 | +| Slave_running | OFF | +| Slave_skipped_errors | 0 | +| Slaves_connected | 0 | +| Slaves_running | 0 | +| Slow_launch_threads | 0 | +| Slow_queries | 0 | +| Sort_merge_passes | 0 | +| Sort_priority_queue_sorts | 0 | +| Sort_range | 0 | +| Sort_rows | 0 | +| Sort_scan | 0 | +| Ssl_accept_renegotiates | 0 | +| Ssl_accepts | 0 | +| Ssl_callback_cache_hits | 0 | +| Ssl_cipher | | +| Ssl_cipher_list | | +| Ssl_client_connects | 0 | +| Ssl_connect_renegotiates | 0 | +| Ssl_ctx_verify_depth | 0 | +| Ssl_ctx_verify_mode | 0 | +| Ssl_default_timeout | 0 | +| Ssl_finished_accepts | 0 | +| Ssl_finished_connects | 0 | +| Ssl_server_not_after | | +| Ssl_server_not_before | | +| Ssl_session_cache_hits | 0 | +| Ssl_session_cache_misses | 0 | +| Ssl_session_cache_mode | NONE | +| Ssl_session_cache_overflows | 0 | +| Ssl_session_cache_size | 0 | +| Ssl_session_cache_timeouts | 0 | +| Ssl_sessions_reused | 0 | +| Ssl_used_session_cache_entries | 0 | +| Ssl_verify_depth | 0 | +| Ssl_verify_mode | 0 | +| Ssl_version | | +| Subquery_cache_hit | 0 | +| Subquery_cache_miss | 0 | +| Syncs | 87 | +| Table_locks_immediate | 17 | +| Table_locks_waited | 0 | +| Table_open_cache_active_instances | 1 | +| Table_open_cache_hits | 16 | +| Table_open_cache_misses | 24 | +| Table_open_cache_overflows | 0 | +| Tc_log_max_pages_used | 0 | +| Tc_log_page_size | 0 | +| Tc_log_page_waits | 0 | +| Threadpool_idle_threads | 0 | +| Threadpool_threads | 0 | +| Threads_cached | 0 | +| Threads_connected | 1 | +| Threads_created | 6 | +| Threads_running | 1 | +| Transactions_gtid_foreign_engine | 0 | +| Transactions_multi_engine | 0 | +| Update_scan | 0 | +| Uptime | 895 | +| Uptime_since_flush_status | 895 | +| wsrep_local_state_uuid | 479ce105-1e65-11ed-b2c3-8ac44f1dd1c9 | +| wsrep_protocol_version | 10 | +| wsrep_last_committed | 18 | +| wsrep_replicated | 0 | +| wsrep_replicated_bytes | 0 | +| wsrep_repl_keys | 0 | +| wsrep_repl_keys_bytes | 0 | +| wsrep_repl_data_bytes | 0 | +| wsrep_repl_other_bytes | 0 | +| wsrep_received | 11 | +| wsrep_received_bytes | 1410 | +| wsrep_local_commits | 0 | +| wsrep_local_cert_failures | 0 | +| wsrep_local_replays | 0 | +| wsrep_local_send_queue | 0 | +| wsrep_local_send_queue_max | 2 | +| wsrep_local_send_queue_min | 0 | +| wsrep_local_send_queue_avg | 0.25 | +| wsrep_local_recv_queue | 0 | +| wsrep_local_recv_queue_max | 1 | +| wsrep_local_recv_queue_min | 0 | +| wsrep_local_recv_queue_avg | 0 | +| wsrep_local_cached_downto | 1 | +| wsrep_flow_control_paused_ns | 0 | +| wsrep_flow_control_paused | 0 | +| wsrep_flow_control_sent | 0 | +| wsrep_flow_control_recv | 0 | +| wsrep_flow_control_active | false | +| wsrep_flow_control_requested | false | +| wsrep_cert_deps_distance | 1 | +| wsrep_apply_oooe | 0 | +| wsrep_apply_oool | 0 | +| wsrep_apply_window | 1 | +| wsrep_apply_waits | 0 | +| wsrep_commit_oooe | 0 | +| wsrep_commit_oool | 0 | +| wsrep_commit_window | 1 | +| wsrep_local_state | 4 | +| wsrep_local_state_comment | Synced | +| wsrep_cert_index_size | 1 | +| wsrep_causal_reads | 0 | +| wsrep_cert_interval | 0 | +| wsrep_open_transactions | 0 | +| wsrep_open_connections | 0 | +| wsrep_incoming_addresses | 172.17.0.2:3306,172.17.0.4:3306,172.17.0.3:3306 | +| wsrep_cluster_weight | 3 | +| wsrep_desync_count | 0 | +| wsrep_evs_delayed | | +| wsrep_evs_evict_list | | +| wsrep_evs_repl_latency | 0.000200973/0.125339/1.00029/0.330702/8 | +| wsrep_evs_state | OPERATIONAL | +| wsrep_gcomm_uuid | 49826f19-1e65-11ed-8435-a308cd7c3ccc | +| wsrep_gmcast_segment | 0 | +| wsrep_applier_thread_count | 4 | +| wsrep_cluster_capabilities | | +| wsrep_cluster_conf_id | 3 | +| wsrep_cluster_size | 3 | +| wsrep_cluster_state_uuid | 479ce105-1e65-11ed-b2c3-8ac44f1dd1c9 | +| wsrep_cluster_status | Primary | +| wsrep_connected | ON | +| wsrep_local_bf_aborts | 0 | +| wsrep_local_index | 0 | +| wsrep_provider_capabilities | :MULTI_MASTER:CERTIFICATION:PARALLEL_APPLYING:TRX_REPLAY:ISOLATION:PAUSE:CAUSAL_READS:INCREMENTAL_WRITESET:UNORDERED:PREORDERED:STREAMING:NBO: | +| wsrep_provider_name | Galera | +| wsrep_provider_vendor | Codership Oy <info@codership.com> | +| wsrep_provider_version | 4.12(r6311685) | +| wsrep_ready | ON | +| wsrep_rollbacker_thread_count | 1 | +| wsrep_thread_count | 5 | ++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt new file mode 100644 index 00000000000000..96591afdf7f811 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt @@ -0,0 +1,8 @@ ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| log_bin | ON | +| max_connections | 151 | +| performance_schema | ON | +| table_open_cache | 2000 | ++--------------------+-------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt new file mode 100644 index 00000000000000..a44ce5e70f7e13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt @@ -0,0 +1,6 @@ ++------+---------+ +| time | user | ++------+---------+ +| 1 | netdata | +| 9 | root | ++------+---------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt new file mode 100644 index 00000000000000..7a44b8b5ad5838 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt @@ -0,0 +1,6 @@ ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ +| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_read | Rows_sent | Rows_deleted | Rows_inserted | Rows_updated | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections | Max_statement_time_exceeded | ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ +| root | 1 | 0 | 9 | 0.000156 | 0.0001541 | 25 | 2799 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| netdata | 1 | 0 | 32 | 0.09262200000000004 | 0.07723410000000001 | 13440 | 105432 | 0 | 0 | 99 | 0 | 0 | 0 | 33 | 0 | 0 | 0 | 0 | 49698 | 0 | 33 | 66 | 0 | 0 | ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt new file mode 100644 index 00000000000000..ee5e77d9aae0e3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt @@ -0,0 +1,6 @@ ++-----------------+---------------------+ +| Variable_name | Value | ++-----------------+---------------------+ +| version | 10.8.4-MariaDB-log | +| version_comment | Source distribution | ++-----------------+---------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt new file mode 100644 index 00000000000000..b117cb6c7dae98 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt @@ -0,0 +1,6 @@ ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ +| Connection_name | Slave_SQL_State | Slave_IO_State | Master_Host | Master_User | Master_Port | Connect_Retry | Master_Log_File | Read_Master_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Master_Log_File | Slave_IO_Running | Slave_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Master_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Master_SSL_Allowed | Master_SSL_CA_File | Master_SSL_CA_Path | Master_SSL_Cert | Master_SSL_Cipher | Master_SSL_Key | Seconds_Behind_Master | Master_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Master_Server_Id | Master_SSL_Crl | Master_SSL_Crlpath | Using_Gtid | Gtid_IO_Pos | Replicate_Do_Domain_Ids | Replicate_Ignore_Domain_Ids | Parallel_Mode | SQL_Delay | SQL_Remaining_Delay | Slave_SQL_Running_State | Slave_DDL_Groups | Slave_Non_Transactional_Groups | Slave_Transactional_Groups | Retried_transactions | Max_relay_log_size | Executed_log_entries | Slave_received_heartbeats | Slave_heartbeat_period | Gtid_Slave_Pos | ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ +| Master1 | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 | +| Master2 | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 | ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt new file mode 100644 index 00000000000000..61428f084aa00a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt @@ -0,0 +1,5 @@ ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ +| Connection_name | Slave_SQL_State | Slave_IO_State | Master_Host | Master_User | Master_Port | Connect_Retry | Master_Log_File | Read_Master_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Master_Log_File | Slave_IO_Running | Slave_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Master_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Master_SSL_Allowed | Master_SSL_CA_File | Master_SSL_CA_Path | Master_SSL_Cert | Master_SSL_Cipher | Master_SSL_Key | Seconds_Behind_Master | Master_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Master_Server_Id | Master_SSL_Crl | Master_SSL_Crlpath | Using_Gtid | Gtid_IO_Pos | Replicate_Do_Domain_Ids | Replicate_Ignore_Domain_Ids | Parallel_Mode | SQL_Delay | SQL_Remaining_Delay | Slave_SQL_Running_State | Slave_DDL_Groups | Slave_Non_Transactional_Groups | Slave_Transactional_Groups | Retried_transactions | Max_relay_log_size | Executed_log_entries | Slave_received_heartbeats | Slave_heartbeat_period | Gtid_Slave_Pos | ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ +| | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 | ++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt new file mode 100644 index 00000000000000..c82531c74ef389 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt @@ -0,0 +1,569 @@ ++--------------------------------------------------------+--------------------------------------------------+ +| Variable_name | Value | ++--------------------------------------------------------+--------------------------------------------------+ +| Aborted_clients | 1 | +| Aborted_connects | 2 | +| Aborted_connects_preauth | 0 | +| Access_denied_errors | 2 | +| Acl_column_grants | 0 | +| Acl_database_grants | 3 | +| Acl_function_grants | 0 | +| Acl_procedure_grants | 0 | +| Acl_package_spec_grants | 0 | +| Acl_package_body_grants | 0 | +| Acl_proxy_users | 1 | +| Acl_role_grants | 0 | +| Acl_roles | 0 | +| Acl_table_grants | 1 | +| Acl_users | 4 | +| Aria_pagecache_blocks_not_flushed | 7 | +| Aria_pagecache_blocks_unused | 15630 | +| Aria_pagecache_blocks_used | 17 | +| Aria_pagecache_read_requests | 78 | +| Aria_pagecache_reads | 19 | +| Aria_pagecache_write_requests | 8 | +| Aria_pagecache_writes | 0 | +| Aria_transaction_log_syncs | 6 | +| Binlog_commits | 0 | +| Binlog_group_commits | 0 | +| Binlog_group_commit_trigger_count | 0 | +| Binlog_group_commit_trigger_lock_wait | 0 | +| Binlog_group_commit_trigger_timeout | 0 | +| Binlog_snapshot_file | mysql-bin.000002 | +| Binlog_snapshot_position | 1806 | +| Binlog_bytes_written | 1842 | +| Binlog_cache_disk_use | 0 | +| Binlog_cache_use | 0 | +| Binlog_stmt_cache_disk_use | 0 | +| Binlog_stmt_cache_use | 0 | +| Busy_time | 0.000000 | +| Bytes_received | 81392 | +| Bytes_sent | 56794 | +| Column_compressions | 0 | +| Column_decompressions | 0 | +| Com_admin_commands | 0 | +| Com_alter_db | 0 | +| Com_alter_db_upgrade | 0 | +| Com_alter_event | 0 | +| Com_alter_function | 0 | +| Com_alter_procedure | 0 | +| Com_alter_server | 0 | +| Com_alter_sequence | 0 | +| Com_alter_table | 0 | +| Com_alter_user | 0 | +| Com_analyze | 0 | +| Com_assign_to_keycache | 0 | +| Com_backup | 0 | +| Com_backup_lock | 0 | +| Com_begin | 0 | +| Com_binlog | 0 | +| Com_call_procedure | 0 | +| Com_change_db | 0 | +| Com_change_master | 0 | +| Com_check | 0 | +| Com_checksum | 0 | +| Com_commit | 0 | +| Com_compound_sql | 0 | +| Com_create_db | 1 | +| Com_create_event | 0 | +| Com_create_function | 0 | +| Com_create_index | 0 | +| Com_create_package | 0 | +| Com_create_package_body | 0 | +| Com_create_procedure | 0 | +| Com_create_role | 0 | +| Com_create_sequence | 0 | +| Com_create_server | 0 | +| Com_create_table | 0 | +| Com_create_temporary_table | 0 | +| Com_create_trigger | 0 | +| Com_create_udf | 0 | +| Com_create_user | 3 | +| Com_create_view | 0 | +| Com_dealloc_sql | 0 | +| Com_delete | 0 | +| Com_delete_multi | 0 | +| Com_do | 0 | +| Com_drop_db | 0 | +| Com_drop_event | 0 | +| Com_drop_function | 0 | +| Com_drop_index | 0 | +| Com_drop_procedure | 0 | +| Com_drop_package | 0 | +| Com_drop_package_body | 0 | +| Com_drop_role | 0 | +| Com_drop_server | 0 | +| Com_drop_sequence | 0 | +| Com_drop_table | 0 | +| Com_drop_temporary_table | 0 | +| Com_drop_trigger | 0 | +| Com_drop_user | 0 | +| Com_drop_view | 0 | +| Com_empty_query | 0 | +| Com_execute_immediate | 0 | +| Com_execute_sql | 0 | +| Com_flush | 2 | +| Com_get_diagnostics | 0 | +| Com_grant | 3 | +| Com_grant_role | 0 | +| Com_ha_close | 0 | +| Com_ha_open | 0 | +| Com_ha_read | 0 | +| Com_help | 0 | +| Com_insert | 0 | +| Com_insert_select | 0 | +| Com_install_plugin | 0 | +| Com_kill | 0 | +| Com_load | 0 | +| Com_lock_tables | 0 | +| Com_optimize | 0 | +| Com_preload_keys | 0 | +| Com_prepare_sql | 0 | +| Com_purge | 0 | +| Com_purge_before_date | 0 | +| Com_release_savepoint | 0 | +| Com_rename_table | 0 | +| Com_rename_user | 0 | +| Com_repair | 0 | +| Com_replace | 0 | +| Com_replace_select | 0 | +| Com_reset | 0 | +| Com_resignal | 0 | +| Com_revoke | 0 | +| Com_revoke_all | 0 | +| Com_revoke_role | 0 | +| Com_rollback | 0 | +| Com_rollback_to_savepoint | 0 | +| Com_savepoint | 0 | +| Com_select | 6 | +| Com_set_option | 0 | +| Com_show_authors | 0 | +| Com_show_binlog_events | 0 | +| Com_show_binlogs | 0 | +| Com_show_charsets | 0 | +| Com_show_collations | 0 | +| Com_show_contributors | 0 | +| Com_show_create_db | 0 | +| Com_show_create_event | 0 | +| Com_show_create_func | 0 | +| Com_show_create_package | 0 | +| Com_show_create_package_body | 0 | +| Com_show_create_proc | 0 | +| Com_show_create_table | 0 | +| Com_show_create_trigger | 0 | +| Com_show_create_user | 0 | +| Com_show_databases | 0 | +| Com_show_engine_logs | 0 | +| Com_show_engine_mutex | 0 | +| Com_show_engine_status | 0 | +| Com_show_errors | 0 | +| Com_show_events | 0 | +| Com_show_explain | 0 | +| Com_show_fields | 0 | +| Com_show_function_status | 0 | +| Com_show_generic | 0 | +| Com_show_grants | 0 | +| Com_show_keys | 0 | +| Com_show_binlog_status | 0 | +| Com_show_open_tables | 0 | +| Com_show_package_status | 0 | +| Com_show_package_body_status | 0 | +| Com_show_plugins | 0 | +| Com_show_privileges | 0 | +| Com_show_procedure_status | 0 | +| Com_show_processlist | 0 | +| Com_show_profile | 0 | +| Com_show_profiles | 0 | +| Com_show_relaylog_events | 0 | +| Com_show_slave_hosts | 0 | +| Com_show_slave_status | 14 | +| Com_show_status | 2 | +| Com_show_storage_engines | 0 | +| Com_show_table_status | 0 | +| Com_show_tables | 0 | +| Com_show_triggers | 0 | +| Com_show_variables | 0 | +| Com_show_warnings | 0 | +| Com_shutdown | 0 | +| Com_signal | 0 | +| Com_start_all_slaves | 0 | +| Com_start_slave | 0 | +| Com_stmt_close | 0 | +| Com_stmt_execute | 0 | +| Com_stmt_fetch | 0 | +| Com_stmt_prepare | 0 | +| Com_stmt_reprepare | 0 | +| Com_stmt_reset | 0 | +| Com_stmt_send_long_data | 0 | +| Com_stop_all_slaves | 0 | +| Com_stop_slave | 0 | +| Com_truncate | 0 | +| Com_uninstall_plugin | 0 | +| Com_unlock_tables | 0 | +| Com_update | 0 | +| Com_update_multi | 0 | +| Com_xa_commit | 0 | +| Com_xa_end | 0 | +| Com_xa_prepare | 0 | +| Com_xa_recover | 0 | +| Com_xa_rollback | 0 | +| Com_xa_start | 0 | +| Compression | OFF | +| Connection_errors_accept | 0 | +| Connection_errors_internal | 0 | +| Connection_errors_max_connections | 0 | +| Connection_errors_peer_address | 0 | +| Connection_errors_select | 0 | +| Connection_errors_tcpwrap | 0 | +| Connections | 12 | +| Cpu_time | 0.000000 | +| Created_tmp_disk_tables | 0 | +| Created_tmp_files | 5 | +| Created_tmp_tables | 2 | +| Delayed_errors | 0 | +| Delayed_insert_threads | 0 | +| Delayed_writes | 0 | +| Delete_scan | 0 | +| Empty_queries | 0 | +| Executed_events | 0 | +| Executed_triggers | 0 | +| Feature_application_time_periods | 0 | +| Feature_check_constraint | 1 | +| Feature_custom_aggregate_functions | 0 | +| Feature_delay_key_write | 0 | +| Feature_dynamic_columns | 0 | +| Feature_fulltext | 0 | +| Feature_gis | 0 | +| Feature_insert_returning | 0 | +| Feature_invisible_columns | 0 | +| Feature_json | 1 | +| Feature_locale | 0 | +| Feature_subquery | 0 | +| Feature_system_versioning | 0 | +| Feature_timezone | 0 | +| Feature_trigger | 0 | +| Feature_window_functions | 0 | +| Feature_xml | 0 | +| Handler_commit | 30 | +| Handler_delete | 0 | +| Handler_discover | 0 | +| Handler_external_lock | 0 | +| Handler_icp_attempts | 0 | +| Handler_icp_match | 0 | +| Handler_mrr_init | 0 | +| Handler_mrr_key_refills | 0 | +| Handler_mrr_rowid_refills | 0 | +| Handler_prepare | 0 | +| Handler_read_first | 7 | +| Handler_read_key | 7 | +| Handler_read_last | 0 | +| Handler_read_next | 3 | +| Handler_read_prev | 0 | +| Handler_read_retry | 0 | +| Handler_read_rnd | 0 | +| Handler_read_rnd_deleted | 0 | +| Handler_read_rnd_next | 626 | +| Handler_rollback | 0 | +| Handler_savepoint | 0 | +| Handler_savepoint_rollback | 0 | +| Handler_tmp_delete | 0 | +| Handler_tmp_update | 0 | +| Handler_tmp_write | 568 | +| Handler_update | 3 | +| Handler_write | 13 | +| Innodb_adaptive_hash_hash_searches | 0 | +| Innodb_adaptive_hash_non_hash_searches | 0 | +| Innodb_background_log_sync | 52300 | +| Innodb_buffer_pool_dump_status | | +| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220817 21:14:57 | +| Innodb_buffer_pool_resize_status | | +| Innodb_buffer_pool_load_incomplete | OFF | +| Innodb_buffer_pool_pages_data | 309 | +| Innodb_buffer_pool_bytes_data | 5062656 | +| Innodb_buffer_pool_pages_dirty | 29 | +| Innodb_buffer_pool_bytes_dirty | 475136 | +| Innodb_buffer_pool_pages_flushed | 0 | +| Innodb_buffer_pool_pages_free | 7755 | +| Innodb_buffer_pool_pages_made_not_young | 0 | +| Innodb_buffer_pool_pages_made_young | 0 | +| Innodb_buffer_pool_pages_misc | 0 | +| Innodb_buffer_pool_pages_old | 0 | +| Innodb_buffer_pool_pages_total | 8064 | +| Innodb_buffer_pool_pages_lru_flushed | 0 | +| Innodb_buffer_pool_pages_lru_freed | 0 | +| Innodb_buffer_pool_read_ahead_rnd | 0 | +| Innodb_buffer_pool_read_ahead | 0 | +| Innodb_buffer_pool_read_ahead_evicted | 0 | +| Innodb_buffer_pool_read_requests | 1911 | +| Innodb_buffer_pool_reads | 171 | +| Innodb_buffer_pool_wait_free | 0 | +| Innodb_buffer_pool_write_requests | 148 | +| Innodb_checkpoint_age | 6097 | +| Innodb_checkpoint_max_age | 80819529 | +| Innodb_data_fsyncs | 17 | +| Innodb_data_pending_fsyncs | 0 | +| Innodb_data_pending_reads | 0 | +| Innodb_data_pending_writes | 0 | +| Innodb_data_read | 2801664 | +| Innodb_data_reads | 185 | +| Innodb_data_writes | 16 | +| Innodb_data_written | 0 | +| Innodb_dblwr_pages_written | 0 | +| Innodb_dblwr_writes | 0 | +| Innodb_deadlocks | 0 | +| Innodb_history_list_length | 0 | +| Innodb_ibuf_discarded_delete_marks | 0 | +| Innodb_ibuf_discarded_deletes | 0 | +| Innodb_ibuf_discarded_inserts | 0 | +| Innodb_ibuf_free_list | 0 | +| Innodb_ibuf_merged_delete_marks | 0 | +| Innodb_ibuf_merged_deletes | 0 | +| Innodb_ibuf_merged_inserts | 0 | +| Innodb_ibuf_merges | 0 | +| Innodb_ibuf_segment_size | 2 | +| Innodb_ibuf_size | 1 | +| Innodb_log_waits | 0 | +| Innodb_log_write_requests | 109 | +| Innodb_log_writes | 15 | +| Innodb_lsn_current | 52826 | +| Innodb_lsn_flushed | 52826 | +| Innodb_lsn_last_checkpoint | 46729 | +| Innodb_master_thread_active_loops | 0 | +| Innodb_master_thread_idle_loops | 52301 | +| Innodb_max_trx_id | 37 | +| Innodb_mem_adaptive_hash | 0 | +| Innodb_mem_dictionary | 855336 | +| Innodb_os_log_written | 6097 | +| Innodb_page_size | 16384 | +| Innodb_pages_created | 138 | +| Innodb_pages_read | 171 | +| Innodb_pages_written | 0 | +| Innodb_row_lock_current_waits | 0 | +| Innodb_row_lock_time | 0 | +| Innodb_row_lock_time_avg | 0 | +| Innodb_row_lock_time_max | 0 | +| Innodb_row_lock_waits | 0 | +| Innodb_rows_deleted | 0 | +| Innodb_rows_inserted | 0 | +| Innodb_rows_read | 0 | +| Innodb_rows_updated | 0 | +| Innodb_system_rows_deleted | 0 | +| Innodb_system_rows_inserted | 9 | +| Innodb_system_rows_read | 0 | +| Innodb_system_rows_updated | 0 | +| Innodb_num_open_files | 6 | +| Innodb_truncated_status_writes | 0 | +| Innodb_available_undo_logs | 128 | +| Innodb_undo_truncations | 0 | +| Innodb_page_compression_saved | 0 | +| Innodb_num_pages_page_compressed | 0 | +| Innodb_num_page_compressed_trim_op | 0 | +| Innodb_num_pages_page_decompressed | 0 | +| Innodb_num_pages_page_compression_error | 0 | +| Innodb_num_pages_encrypted | 0 | +| Innodb_num_pages_decrypted | 0 | +| Innodb_have_lz4 | OFF | +| Innodb_have_lzo | OFF | +| Innodb_have_lzma | OFF | +| Innodb_have_bzip2 | OFF | +| Innodb_have_snappy | OFF | +| Innodb_have_punch_hole | ON | +| Innodb_defragment_compression_failures | 0 | +| Innodb_defragment_failures | 0 | +| Innodb_defragment_count | 0 | +| Innodb_instant_alter_column | 0 | +| Innodb_onlineddl_rowlog_rows | 0 | +| Innodb_onlineddl_rowlog_pct_used | 0 | +| Innodb_onlineddl_pct_progress | 0 | +| Innodb_secondary_index_triggered_cluster_reads | 0 | +| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 | +| Innodb_encryption_rotation_pages_read_from_cache | 0 | +| Innodb_encryption_rotation_pages_read_from_disk | 0 | +| Innodb_encryption_rotation_pages_modified | 0 | +| Innodb_encryption_rotation_pages_flushed | 0 | +| Innodb_encryption_rotation_estimated_iops | 0 | +| Innodb_encryption_n_merge_blocks_encrypted | 0 | +| Innodb_encryption_n_merge_blocks_decrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_encrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_decrypted | 0 | +| Innodb_encryption_n_temp_blocks_encrypted | 0 | +| Innodb_encryption_n_temp_blocks_decrypted | 0 | +| Innodb_encryption_num_key_requests | 0 | +| Key_blocks_not_flushed | 0 | +| Key_blocks_unused | 107163 | +| Key_blocks_used | 0 | +| Key_blocks_warm | 0 | +| Key_read_requests | 0 | +| Key_reads | 0 | +| Key_write_requests | 0 | +| Key_writes | 0 | +| Last_query_cost | 0.000000 | +| Master_gtid_wait_count | 0 | +| Master_gtid_wait_time | 0 | +| Master_gtid_wait_timeouts | 0 | +| Max_statement_time_exceeded | 0 | +| Max_used_connections | 1 | +| Memory_used | 35982280 | +| Memory_used_initial | 35441456 | +| Not_flushed_delayed_rows | 0 | +| Open_files | 29 | +| Open_streams | 4 | +| Open_table_definitions | 17 | +| Open_tables | 10 | +| Opened_files | 100 | +| Opened_plugin_libraries | 0 | +| Opened_table_definitions | 16 | +| Opened_tables | 16 | +| Opened_views | 0 | +| Performance_schema_accounts_lost | 0 | +| Performance_schema_cond_classes_lost | 0 | +| Performance_schema_cond_instances_lost | 0 | +| Performance_schema_digest_lost | 0 | +| Performance_schema_file_classes_lost | 0 | +| Performance_schema_file_handles_lost | 0 | +| Performance_schema_file_instances_lost | 0 | +| Performance_schema_hosts_lost | 0 | +| Performance_schema_index_stat_lost | 0 | +| Performance_schema_locker_lost | 0 | +| Performance_schema_memory_classes_lost | 0 | +| Performance_schema_metadata_lock_lost | 0 | +| Performance_schema_mutex_classes_lost | 0 | +| Performance_schema_mutex_instances_lost | 0 | +| Performance_schema_nested_statement_lost | 0 | +| Performance_schema_prepared_statements_lost | 0 | +| Performance_schema_program_lost | 0 | +| Performance_schema_rwlock_classes_lost | 0 | +| Performance_schema_rwlock_instances_lost | 0 | +| Performance_schema_session_connect_attrs_lost | 0 | +| Performance_schema_socket_classes_lost | 0 | +| Performance_schema_socket_instances_lost | 0 | +| Performance_schema_stage_classes_lost | 0 | +| Performance_schema_statement_classes_lost | 0 | +| Performance_schema_table_handles_lost | 0 | +| Performance_schema_table_instances_lost | 0 | +| Performance_schema_table_lock_stat_lost | 0 | +| Performance_schema_thread_classes_lost | 0 | +| Performance_schema_thread_instances_lost | 0 | +| Performance_schema_users_lost | 0 | +| Prepared_stmt_count | 0 | +| Qcache_free_blocks | 1 | +| Qcache_free_memory | 1031272 | +| Qcache_hits | 0 | +| Qcache_inserts | 0 | +| Qcache_lowmem_prunes | 0 | +| Qcache_not_cached | 0 | +| Qcache_queries_in_cache | 0 | +| Qcache_total_blocks | 1 | +| Queries | 33 | +| Questions | 24 | +| Resultset_metadata_skipped | 0 | +| Rows_read | 36 | +| Rows_sent | 571 | +| Rows_tmp_read | 565 | +| Rpl_semi_sync_master_clients | 0 | +| Rpl_semi_sync_master_get_ack | 0 | +| Rpl_semi_sync_master_net_avg_wait_time | 0 | +| Rpl_semi_sync_master_net_wait_time | 0 | +| Rpl_semi_sync_master_net_waits | 0 | +| Rpl_semi_sync_master_no_times | 0 | +| Rpl_semi_sync_master_no_tx | 0 | +| Rpl_semi_sync_master_request_ack | 0 | +| Rpl_semi_sync_master_status | OFF | +| Rpl_semi_sync_master_timefunc_failures | 0 | +| Rpl_semi_sync_master_tx_avg_wait_time | 0 | +| Rpl_semi_sync_master_tx_wait_time | 0 | +| Rpl_semi_sync_master_tx_waits | 0 | +| Rpl_semi_sync_master_wait_pos_backtraverse | 0 | +| Rpl_semi_sync_master_wait_sessions | 0 | +| Rpl_semi_sync_master_yes_tx | 0 | +| Rpl_semi_sync_slave_send_ack | 0 | +| Rpl_semi_sync_slave_status | OFF | +| Rpl_status | AUTH_MASTER | +| Rpl_transactions_multi_engine | 0 | +| Select_full_join | 0 | +| Select_full_range_join | 0 | +| Select_range | 0 | +| Select_range_check | 0 | +| Select_scan | 2 | +| Slave_connections | 0 | +| Slave_heartbeat_period | 30.000 | +| Slave_open_temp_tables | 0 | +| Slave_received_heartbeats | 1743 | +| Slave_retried_transactions | 0 | +| Slave_running | ON | +| Slave_skipped_errors | 0 | +| Slaves_connected | 0 | +| Slaves_running | 1 | +| Slow_launch_threads | 0 | +| Slow_queries | 0 | +| Sort_merge_passes | 0 | +| Sort_priority_queue_sorts | 0 | +| Sort_range | 0 | +| Sort_rows | 0 | +| Sort_scan | 0 | +| Ssl_accept_renegotiates | 0 | +| Ssl_accepts | 0 | +| Ssl_callback_cache_hits | 0 | +| Ssl_cipher | | +| Ssl_cipher_list | | +| Ssl_client_connects | 0 | +| Ssl_connect_renegotiates | 0 | +| Ssl_ctx_verify_depth | 0 | +| Ssl_ctx_verify_mode | 0 | +| Ssl_default_timeout | 0 | +| Ssl_finished_accepts | 0 | +| Ssl_finished_connects | 0 | +| Ssl_server_not_after | | +| Ssl_server_not_before | | +| Ssl_session_cache_hits | 0 | +| Ssl_session_cache_misses | 0 | +| Ssl_session_cache_mode | NONE | +| Ssl_session_cache_overflows | 0 | +| Ssl_session_cache_size | 0 | +| Ssl_session_cache_timeouts | 0 | +| Ssl_sessions_reused | 0 | +| Ssl_used_session_cache_entries | 0 | +| Ssl_verify_depth | 0 | +| Ssl_verify_mode | 0 | +| Ssl_version | | +| Subquery_cache_hit | 0 | +| Subquery_cache_miss | 0 | +| Syncs | 56 | +| Table_locks_immediate | 60 | +| Table_locks_waited | 0 | +| Table_open_cache_active_instances | 1 | +| Table_open_cache_hits | 54 | +| Table_open_cache_misses | 16 | +| Table_open_cache_overflows | 0 | +| Tc_log_max_pages_used | 0 | +| Tc_log_page_size | 0 | +| Tc_log_page_waits | 0 | +| Threadpool_idle_threads | 0 | +| Threadpool_threads | 0 | +| Threads_cached | 0 | +| Threads_connected | 1 | +| Threads_created | 2 | +| Threads_running | 3 | +| Transactions_gtid_foreign_engine | 0 | +| Transactions_multi_engine | 0 | +| Update_scan | 0 | +| Uptime | 52310 | +| Uptime_since_flush_status | 52310 | +| wsrep | 0 | +| wsrep_applier_thread_count | 0 | +| wsrep_cluster_capabilities | | +| wsrep_cluster_conf_id | 18446744073709551615 | +| wsrep_cluster_size | 0 | +| wsrep_cluster_state_uuid | | +| wsrep_cluster_status | Disconnected | +| wsrep_connected | OFF | +| wsrep_local_bf_aborts | 0 | +| wsrep_local_index | 18446744073709551615 | +| wsrep_provider_capabilities | | +| wsrep_provider_name | | +| wsrep_provider_vendor | | +| wsrep_provider_version | | +| wsrep_ready | OFF | +| wsrep_rollbacker_thread_count | 0 | +| wsrep_thread_count | 0 | ++--------------------------------------------------------+--------------------------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt new file mode 100644 index 00000000000000..96591afdf7f811 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt @@ -0,0 +1,8 @@ ++--------------------+-------+ +| Variable_name | Value | ++--------------------+-------+ +| log_bin | ON | +| max_connections | 151 | +| performance_schema | ON | +| table_open_cache | 2000 | ++--------------------+-------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt new file mode 100644 index 00000000000000..a44ce5e70f7e13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt @@ -0,0 +1,6 @@ ++------+---------+ +| time | user | ++------+---------+ +| 1 | netdata | +| 9 | root | ++------+---------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt new file mode 100644 index 00000000000000..7a44b8b5ad5838 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt @@ -0,0 +1,6 @@ ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ +| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_read | Rows_sent | Rows_deleted | Rows_inserted | Rows_updated | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections | Max_statement_time_exceeded | ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ +| root | 1 | 0 | 9 | 0.000156 | 0.0001541 | 25 | 2799 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| netdata | 1 | 0 | 32 | 0.09262200000000004 | 0.07723410000000001 | 13440 | 105432 | 0 | 0 | 99 | 0 | 0 | 0 | 33 | 0 | 0 | 0 | 0 | 49698 | 0 | 33 | 66 | 0 | 0 | ++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/version.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/version.txt new file mode 100644 index 00000000000000..2e7ca5b027e001 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v10.8.4/version.txt @@ -0,0 +1,6 @@ ++-----------------+---------------------+ +| Variable_name | Value | ++-----------------+---------------------+ +| version | 10.8.4-MariaDB | +| version_comment | Source distribution | ++-----------------+---------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt new file mode 100644 index 00000000000000..7c75f061931d09 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt @@ -0,0 +1,423 @@ ++------------------------------------------+-------------+ +| Variable_name | Value | ++------------------------------------------+-------------+ +| Aborted_clients | 0 | +| Aborted_connects | 0 | +| Access_denied_errors | 0 | +| Aria_pagecache_blocks_not_flushed | 0 | +| Aria_pagecache_blocks_unused | 15737 | +| Aria_pagecache_blocks_used | 0 | +| Aria_pagecache_read_requests | 0 | +| Aria_pagecache_reads | 0 | +| Aria_pagecache_write_requests | 0 | +| Aria_pagecache_writes | 0 | +| Aria_transaction_log_syncs | 0 | +| Binlog_commits | 0 | +| Binlog_group_commits | 0 | +| Binlog_snapshot_file | | +| Binlog_snapshot_position | 0 | +| Binlog_bytes_written | 0 | +| Binlog_cache_disk_use | 0 | +| Binlog_cache_use | 0 | +| Binlog_stmt_cache_disk_use | 0 | +| Binlog_stmt_cache_use | 0 | +| Busy_time | 0.000000 | +| Bytes_received | 639 | +| Bytes_sent | 41620 | +| Com_admin_commands | 0 | +| Com_alter_db | 0 | +| Com_alter_db_upgrade | 0 | +| Com_alter_event | 0 | +| Com_alter_function | 0 | +| Com_alter_procedure | 0 | +| Com_alter_server | 0 | +| Com_alter_table | 0 | +| Com_alter_tablespace | 0 | +| Com_analyze | 0 | +| Com_assign_to_keycache | 0 | +| Com_begin | 0 | +| Com_binlog | 0 | +| Com_call_procedure | 0 | +| Com_change_db | 0 | +| Com_change_master | 0 | +| Com_check | 0 | +| Com_checksum | 0 | +| Com_commit | 0 | +| Com_create_db | 0 | +| Com_create_event | 0 | +| Com_create_function | 0 | +| Com_create_index | 0 | +| Com_create_procedure | 0 | +| Com_create_server | 0 | +| Com_create_table | 0 | +| Com_create_trigger | 0 | +| Com_create_udf | 0 | +| Com_create_user | 0 | +| Com_create_view | 0 | +| Com_dealloc_sql | 0 | +| Com_delete | 0 | +| Com_delete_multi | 0 | +| Com_do | 0 | +| Com_drop_db | 0 | +| Com_drop_event | 0 | +| Com_drop_function | 0 | +| Com_drop_index | 0 | +| Com_drop_procedure | 0 | +| Com_drop_server | 0 | +| Com_drop_table | 0 | +| Com_drop_trigger | 0 | +| Com_drop_user | 0 | +| Com_drop_view | 0 | +| Com_empty_query | 0 | +| Com_execute_sql | 0 | +| Com_flush | 0 | +| Com_grant | 0 | +| Com_ha_close | 0 | +| Com_ha_open | 0 | +| Com_ha_read | 0 | +| Com_help | 0 | +| Com_insert | 0 | +| Com_insert_select | 0 | +| Com_install_plugin | 0 | +| Com_kill | 0 | +| Com_load | 0 | +| Com_lock_tables | 0 | +| Com_optimize | 0 | +| Com_preload_keys | 0 | +| Com_prepare_sql | 0 | +| Com_purge | 0 | +| Com_purge_before_date | 0 | +| Com_release_savepoint | 0 | +| Com_rename_table | 0 | +| Com_rename_user | 0 | +| Com_repair | 0 | +| Com_replace | 0 | +| Com_replace_select | 0 | +| Com_reset | 0 | +| Com_resignal | 0 | +| Com_revoke | 0 | +| Com_revoke_all | 0 | +| Com_rollback | 0 | +| Com_rollback_to_savepoint | 0 | +| Com_savepoint | 0 | +| Com_select | 4 | +| Com_set_option | 0 | +| Com_show_authors | 0 | +| Com_show_binlog_events | 0 | +| Com_show_binlogs | 0 | +| Com_show_charsets | 0 | +| Com_show_client_statistics | 0 | +| Com_show_collations | 0 | +| Com_show_contributors | 0 | +| Com_show_create_db | 0 | +| Com_show_create_event | 0 | +| Com_show_create_func | 0 | +| Com_show_create_proc | 0 | +| Com_show_create_table | 0 | +| Com_show_create_trigger | 0 | +| Com_show_databases | 0 | +| Com_show_engine_logs | 0 | +| Com_show_engine_mutex | 0 | +| Com_show_engine_status | 0 | +| Com_show_errors | 0 | +| Com_show_events | 0 | +| Com_show_fields | 0 | +| Com_show_function_status | 0 | +| Com_show_grants | 0 | +| Com_show_index_statistics | 0 | +| Com_show_keys | 0 | +| Com_show_master_status | 0 | +| Com_show_open_tables | 0 | +| Com_show_plugins | 0 | +| Com_show_privileges | 0 | +| Com_show_procedure_status | 0 | +| Com_show_processlist | 0 | +| Com_show_profile | 0 | +| Com_show_profiles | 0 | +| Com_show_relaylog_events | 0 | +| Com_show_slave_hosts | 0 | +| Com_show_slave_status | 0 | +| Com_show_status | 1 | +| Com_show_storage_engines | 0 | +| Com_show_table_statistics | 0 | +| Com_show_table_status | 0 | +| Com_show_tables | 0 | +| Com_show_triggers | 0 | +| Com_show_user_statistics | 0 | +| Com_show_variables | 4 | +| Com_show_warnings | 0 | +| Com_signal | 0 | +| Com_slave_start | 0 | +| Com_slave_stop | 0 | +| Com_stmt_close | 0 | +| Com_stmt_execute | 0 | +| Com_stmt_fetch | 0 | +| Com_stmt_prepare | 0 | +| Com_stmt_reprepare | 0 | +| Com_stmt_reset | 0 | +| Com_stmt_send_long_data | 0 | +| Com_truncate | 0 | +| Com_uninstall_plugin | 0 | +| Com_unlock_tables | 0 | +| Com_update | 0 | +| Com_update_multi | 0 | +| Com_xa_commit | 0 | +| Com_xa_end | 0 | +| Com_xa_prepare | 0 | +| Com_xa_recover | 0 | +| Com_xa_rollback | 0 | +| Com_xa_start | 0 | +| Compression | OFF | +| Connections | 4 | +| Cpu_time | 0.000000 | +| Created_tmp_disk_tables | 0 | +| Created_tmp_files | 6 | +| Created_tmp_tables | 5 | +| Delayed_errors | 0 | +| Delayed_insert_threads | 0 | +| Delayed_writes | 0 | +| Empty_queries | 0 | +| Executed_events | 0 | +| Executed_triggers | 0 | +| Feature_dynamic_columns | 0 | +| Feature_fulltext | 0 | +| Feature_gis | 0 | +| Feature_locale | 0 | +| Feature_subquery | 0 | +| Feature_timezone | 0 | +| Feature_trigger | 0 | +| Feature_xml | 0 | +| Flush_commands | 2 | +| Handler_commit | 0 | +| Handler_delete | 0 | +| Handler_discover | 0 | +| Handler_icp_attempts | 0 | +| Handler_icp_match | 0 | +| Handler_mrr_init | 0 | +| Handler_mrr_key_refills | 0 | +| Handler_mrr_rowid_refills | 0 | +| Handler_prepare | 0 | +| Handler_read_first | 0 | +| Handler_read_key | 0 | +| Handler_read_last | 0 | +| Handler_read_next | 0 | +| Handler_read_prev | 0 | +| Handler_read_rnd | 0 | +| Handler_read_rnd_deleted | 0 | +| Handler_read_rnd_next | 1264 | +| Handler_rollback | 0 | +| Handler_savepoint | 0 | +| Handler_savepoint_rollback | 0 | +| Handler_tmp_update | 0 | +| Handler_tmp_write | 1260 | +| Handler_update | 0 | +| Handler_write | 0 | +| Innodb_adaptive_hash_cells | 553229 | +| Innodb_adaptive_hash_hash_searches | 0 | +| Innodb_adaptive_hash_heap_buffers | 0 | +| Innodb_adaptive_hash_non_hash_searches | 19 | +| Innodb_background_log_sync | 1 | +| Innodb_buffer_pool_bytes_data | 2342912 | +| Innodb_buffer_pool_bytes_dirty | 0 | +| Innodb_buffer_pool_pages_data | 143 | +| Innodb_buffer_pool_pages_dirty | 0 | +| Innodb_buffer_pool_pages_flushed | 0 | +| Innodb_buffer_pool_pages_free | 16240 | +| Innodb_buffer_pool_pages_LRU_flushed | 0 | +| Innodb_buffer_pool_pages_made_not_young | 0 | +| Innodb_buffer_pool_pages_made_young | 0 | +| Innodb_buffer_pool_pages_misc | 0 | +| Innodb_buffer_pool_pages_old | 0 | +| Innodb_buffer_pool_pages_total | 16383 | +| Innodb_buffer_pool_read_ahead | 0 | +| Innodb_buffer_pool_read_ahead_evicted | 0 | +| Innodb_buffer_pool_read_ahead_rnd | 0 | +| Innodb_buffer_pool_read_requests | 459 | +| Innodb_buffer_pool_reads | 144 | +| Innodb_buffer_pool_wait_free | 0 | +| Innodb_buffer_pool_write_requests | 0 | +| Innodb_checkpoint_age | 0 | +| Innodb_checkpoint_max_age | 7782360 | +| Innodb_checkpoint_target_age | 7539162 | +| Innodb_current_row_locks | 0 | +| Innodb_data_fsyncs | 3 | +| Innodb_data_pending_fsyncs | 0 | +| Innodb_data_pending_reads | 0 | +| Innodb_data_pending_writes | 0 | +| Innodb_data_read | 4542976 | +| Innodb_data_reads | 155 | +| Innodb_data_writes | 3 | +| Innodb_data_written | 1536 | +| Innodb_dblwr_pages_written | 0 | +| Innodb_dblwr_writes | 0 | +| Innodb_deadlocks | 0 | +| Innodb_descriptors_memory | 8000 | +| Innodb_dict_tables | 8 | +| Innodb_have_atomic_builtins | ON | +| Innodb_history_list_length | 0 | +| Innodb_ibuf_discarded_delete_marks | 0 | +| Innodb_ibuf_discarded_deletes | 0 | +| Innodb_ibuf_discarded_inserts | 0 | +| Innodb_ibuf_free_list | 0 | +| Innodb_ibuf_merged_delete_marks | 0 | +| Innodb_ibuf_merged_deletes | 0 | +| Innodb_ibuf_merged_inserts | 0 | +| Innodb_ibuf_merges | 0 | +| Innodb_ibuf_segment_size | 2 | +| Innodb_ibuf_size | 1 | +| Innodb_log_waits | 0 | +| Innodb_log_write_requests | 0 | +| Innodb_log_writes | 1 | +| Innodb_lsn_current | 1597945 | +| Innodb_lsn_flushed | 1597945 | +| Innodb_lsn_last_checkpoint | 1597945 | +| Innodb_master_thread_1_second_loops | 1 | +| Innodb_master_thread_10_second_loops | 0 | +| Innodb_master_thread_background_loops | 1 | +| Innodb_master_thread_main_flush_loops | 1 | +| Innodb_master_thread_sleeps | 1 | +| Innodb_max_trx_id | 1280 | +| Innodb_mem_adaptive_hash | 4430048 | +| Innodb_mem_dictionary | 1146964 | +| Innodb_mem_total | 275513344 | +| Innodb_mutex_os_waits | 0 | +| Innodb_mutex_spin_rounds | 2 | +| Innodb_mutex_spin_waits | 1 | +| Innodb_oldest_view_low_limit_trx_id | 1280 | +| Innodb_os_log_fsyncs | 3 | +| Innodb_os_log_pending_fsyncs | 0 | +| Innodb_os_log_pending_writes | 0 | +| Innodb_os_log_written | 512 | +| Innodb_page_size | 16384 | +| Innodb_pages_created | 0 | +| Innodb_pages_read | 143 | +| Innodb_pages_written | 0 | +| Innodb_purge_trx_id | 0 | +| Innodb_purge_undo_no | 0 | +| Innodb_read_views_memory | 88 | +| Innodb_row_lock_current_waits | 0 | +| Innodb_row_lock_time | 0 | +| Innodb_row_lock_time_avg | 0 | +| Innodb_row_lock_time_max | 0 | +| Innodb_row_lock_waits | 0 | +| Innodb_rows_deleted | 0 | +| Innodb_rows_inserted | 0 | +| Innodb_rows_read | 0 | +| Innodb_rows_updated | 0 | +| Innodb_s_lock_os_waits | 2 | +| Innodb_s_lock_spin_rounds | 60 | +| Innodb_s_lock_spin_waits | 2 | +| Innodb_truncated_status_writes | 0 | +| Innodb_x_lock_os_waits | 0 | +| Innodb_x_lock_spin_rounds | 0 | +| Innodb_x_lock_spin_waits | 0 | +| Key_blocks_not_flushed | 0 | +| Key_blocks_unused | 107171 | +| Key_blocks_used | 0 | +| Key_blocks_warm | 0 | +| Key_read_requests | 0 | +| Key_reads | 0 | +| Key_write_requests | 0 | +| Key_writes | 0 | +| Last_query_cost | 0.000000 | +| Max_used_connections | 1 | +| Not_flushed_delayed_rows | 0 | +| Open_files | 21 | +| Open_streams | 0 | +| Open_table_definitions | 33 | +| Open_tables | 26 | +| Opened_files | 84 | +| Opened_table_definitions | 0 | +| Opened_tables | 0 | +| Opened_views | 0 | +| Performance_schema_cond_classes_lost | 0 | +| Performance_schema_cond_instances_lost | 0 | +| Performance_schema_file_classes_lost | 0 | +| Performance_schema_file_handles_lost | 0 | +| Performance_schema_file_instances_lost | 0 | +| Performance_schema_locker_lost | 0 | +| Performance_schema_mutex_classes_lost | 0 | +| Performance_schema_mutex_instances_lost | 0 | +| Performance_schema_rwlock_classes_lost | 0 | +| Performance_schema_rwlock_instances_lost | 0 | +| Performance_schema_table_handles_lost | 0 | +| Performance_schema_table_instances_lost | 0 | +| Performance_schema_thread_classes_lost | 0 | +| Performance_schema_thread_instances_lost | 0 | +| Prepared_stmt_count | 0 | +| Qcache_free_blocks | 1 | +| Qcache_free_memory | 67091120 | +| Qcache_hits | 0 | +| Qcache_inserts | 0 | +| Qcache_lowmem_prunes | 0 | +| Qcache_not_cached | 4 | +| Qcache_queries_in_cache | 0 | +| Qcache_total_blocks | 1 | +| Queries | 12 | +| Questions | 11 | +| Rows_read | 0 | +| Rows_sent | 1264 | +| Rows_tmp_read | 1260 | +| Rpl_status | AUTH_MASTER | +| Select_full_join | 0 | +| Select_full_range_join | 0 | +| Select_range | 0 | +| Select_range_check | 0 | +| Select_scan | 5 | +| Slave_heartbeat_period | 0.000 | +| Slave_open_temp_tables | 0 | +| Slave_received_heartbeats | 0 | +| Slave_retried_transactions | 0 | +| Slave_running | OFF | +| Slow_launch_threads | 0 | +| Slow_queries | 0 | +| Sort_merge_passes | 0 | +| Sort_range | 0 | +| Sort_rows | 0 | +| Sort_scan | 0 | +| Sphinx_error | | +| Sphinx_time | | +| Sphinx_total | | +| Sphinx_total_found | | +| Sphinx_word_count | | +| Sphinx_words | | +| Ssl_accept_renegotiates | 0 | +| Ssl_accepts | 0 | +| Ssl_callback_cache_hits | 0 | +| Ssl_cipher | | +| Ssl_cipher_list | | +| Ssl_client_connects | 0 | +| Ssl_connect_renegotiates | 0 | +| Ssl_ctx_verify_depth | 0 | +| Ssl_ctx_verify_mode | 0 | +| Ssl_default_timeout | 0 | +| Ssl_finished_accepts | 0 | +| Ssl_finished_connects | 0 | +| Ssl_session_cache_hits | 0 | +| Ssl_session_cache_misses | 0 | +| Ssl_session_cache_mode | NONE | +| Ssl_session_cache_overflows | 0 | +| Ssl_session_cache_size | 0 | +| Ssl_session_cache_timeouts | 0 | +| Ssl_sessions_reused | 0 | +| Ssl_used_session_cache_entries | 0 | +| Ssl_verify_depth | 0 | +| Ssl_verify_mode | 0 | +| Ssl_version | | +| Subquery_cache_hit | 0 | +| Subquery_cache_miss | 0 | +| Syncs | 0 | +| Table_locks_immediate | 36 | +| Table_locks_waited | 0 | +| Tc_log_max_pages_used | 0 | +| Tc_log_page_size | 0 | +| Tc_log_page_waits | 0 | +| Threadpool_idle_threads | 0 | +| Threadpool_threads | 0 | +| Threads_cached | 0 | +| Threads_connected | 1 | +| Threads_created | 1 | +| Threads_running | 1 | +| Uptime | 113 | +| Uptime_since_flush_status | 113 | ++------------------------------------------+-------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt new file mode 100644 index 00000000000000..5f0906eedfef7e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt @@ -0,0 +1,7 @@ ++------------------+-------+ +| Variable_name | Value | ++------------------+-------+ +| log_bin | OFF | +| max_connections | 100 | +| table_open_cache | 400 | ++------------------+-------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt new file mode 100644 index 00000000000000..a44ce5e70f7e13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt @@ -0,0 +1,6 @@ ++------+---------+ +| time | user | ++------+---------+ +| 1 | netdata | +| 9 | root | ++------+---------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/version.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/version.txt new file mode 100644 index 00000000000000..de684279d94db7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mariadb/v5.5.64/version.txt @@ -0,0 +1,6 @@ ++-----------------+---------------------------------+ +| Variable_name | Value | ++-----------------+---------------------------------+ +| version | 5.5.64-MariaDB-1~trusty | +| version_comment | mariadb.org binary distribution | ++-----------------+---------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_status.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_status.txt new file mode 100644 index 00000000000000..a4b2f2f93bb7cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_status.txt @@ -0,0 +1,490 @@ ++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Variable_name | Value | ++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Aborted_clients | 0 | +| Aborted_connects | 0 | +| Acl_cache_items_count | 0 | +| Binlog_cache_disk_use | 0 | +| Binlog_cache_use | 6 | +| Binlog_stmt_cache_disk_use | 0 | +| Binlog_stmt_cache_use | 0 | +| Bytes_received | 5584 | +| Bytes_sent | 70700 | +| Com_admin_commands | 5 | +| Com_assign_to_keycache | 0 | +| Com_alter_db | 0 | +| Com_alter_event | 0 | +| Com_alter_function | 0 | +| Com_alter_instance | 0 | +| Com_alter_procedure | 0 | +| Com_alter_resource_group | 0 | +| Com_alter_server | 0 | +| Com_alter_table | 0 | +| Com_alter_tablespace | 0 | +| Com_alter_user | 2 | +| Com_alter_user_default_role | 0 | +| Com_analyze | 0 | +| Com_begin | 0 | +| Com_binlog | 0 | +| Com_call_procedure | 0 | +| Com_change_db | 1 | +| Com_change_master | 0 | +| Com_change_repl_filter | 0 | +| Com_change_replication_source | 0 | +| Com_check | 0 | +| Com_checksum | 0 | +| Com_clone | 0 | +| Com_commit | 0 | +| Com_create_db | 1 | +| Com_create_event | 0 | +| Com_create_function | 0 | +| Com_create_index | 0 | +| Com_create_procedure | 0 | +| Com_create_role | 0 | +| Com_create_server | 0 | +| Com_create_table | 35 | +| Com_create_resource_group | 0 | +| Com_create_trigger | 0 | +| Com_create_udf | 0 | +| Com_create_user | 2 | +| Com_create_view | 0 | +| Com_create_spatial_reference_system | 0 | +| Com_dealloc_sql | 0 | +| Com_delete | 0 | +| Com_delete_multi | 0 | +| Com_do | 0 | +| Com_drop_db | 0 | +| Com_drop_event | 0 | +| Com_drop_function | 0 | +| Com_drop_index | 0 | +| Com_drop_procedure | 0 | +| Com_drop_resource_group | 0 | +| Com_drop_role | 0 | +| Com_drop_server | 0 | +| Com_drop_spatial_reference_system | 0 | +| Com_drop_table | 0 | +| Com_drop_trigger | 0 | +| Com_drop_user | 0 | +| Com_drop_view | 0 | +| Com_empty_query | 0 | +| Com_execute_sql | 0 | +| Com_explain_other | 0 | +| Com_flush | 1 | +| Com_get_diagnostics | 0 | +| Com_grant | 2 | +| Com_grant_roles | 0 | +| Com_ha_close | 0 | +| Com_ha_open | 0 | +| Com_ha_read | 0 | +| Com_help | 0 | +| Com_import | 0 | +| Com_insert | 0 | +| Com_insert_select | 0 | +| Com_install_component | 0 | +| Com_install_plugin | 0 | +| Com_kill | 0 | +| Com_load | 0 | +| Com_lock_instance | 0 | +| Com_lock_tables | 0 | +| Com_optimize | 0 | +| Com_preload_keys | 0 | +| Com_prepare_sql | 0 | +| Com_purge | 0 | +| Com_purge_before_date | 0 | +| Com_release_savepoint | 0 | +| Com_rename_table | 0 | +| Com_rename_user | 0 | +| Com_repair | 0 | +| Com_replace | 0 | +| Com_replace_select | 0 | +| Com_reset | 0 | +| Com_resignal | 0 | +| Com_restart | 0 | +| Com_revoke | 0 | +| Com_revoke_all | 0 | +| Com_revoke_roles | 0 | +| Com_rollback | 0 | +| Com_rollback_to_savepoint | 0 | +| Com_savepoint | 0 | +| Com_select | 2 | +| Com_set_option | 4 | +| Com_set_password | 0 | +| Com_set_resource_group | 0 | +| Com_set_role | 0 | +| Com_signal | 0 | +| Com_show_binlog_events | 0 | +| Com_show_binlogs | 0 | +| Com_show_charsets | 0 | +| Com_show_collations | 0 | +| Com_show_create_db | 0 | +| Com_show_create_event | 0 | +| Com_show_create_func | 0 | +| Com_show_create_proc | 0 | +| Com_show_create_table | 0 | +| Com_show_create_trigger | 0 | +| Com_show_databases | 0 | +| Com_show_engine_logs | 0 | +| Com_show_engine_mutex | 0 | +| Com_show_engine_status | 0 | +| Com_show_events | 0 | +| Com_show_errors | 0 | +| Com_show_fields | 0 | +| Com_show_function_code | 0 | +| Com_show_function_status | 0 | +| Com_show_grants | 0 | +| Com_show_keys | 0 | +| Com_show_master_status | 0 | +| Com_show_open_tables | 0 | +| Com_show_plugins | 0 | +| Com_show_privileges | 0 | +| Com_show_procedure_code | 0 | +| Com_show_procedure_status | 0 | +| Com_show_processlist | 0 | +| Com_show_profile | 0 | +| Com_show_profiles | 0 | +| Com_show_relaylog_events | 0 | +| Com_show_replicas | 0 | +| Com_show_slave_hosts | 0 | +| Com_show_replica_status | 2 | +| Com_show_slave_status | 2 | +| Com_show_status | 5 | +| Com_show_storage_engines | 0 | +| Com_show_table_status | 0 | +| Com_show_tables | 0 | +| Com_show_triggers | 0 | +| Com_show_variables | 1 | +| Com_show_warnings | 0 | +| Com_show_create_user | 0 | +| Com_shutdown | 0 | +| Com_replica_start | 0 | +| Com_slave_start | 0 | +| Com_replica_stop | 0 | +| Com_slave_stop | 0 | +| Com_group_replication_start | 0 | +| Com_group_replication_stop | 0 | +| Com_stmt_execute | 0 | +| Com_stmt_close | 0 | +| Com_stmt_fetch | 0 | +| Com_stmt_prepare | 0 | +| Com_stmt_reset | 0 | +| Com_stmt_send_long_data | 0 | +| Com_truncate | 0 | +| Com_uninstall_component | 0 | +| Com_uninstall_plugin | 0 | +| Com_unlock_instance | 0 | +| Com_unlock_tables | 0 | +| Com_update | 0 | +| Com_update_multi | 0 | +| Com_xa_commit | 0 | +| Com_xa_end | 0 | +| Com_xa_prepare | 0 | +| Com_xa_recover | 0 | +| Com_xa_rollback | 0 | +| Com_xa_start | 0 | +| Com_stmt_reprepare | 0 | +| Connection_errors_accept | 0 | +| Connection_errors_internal | 0 | +| Connection_errors_max_connections | 0 | +| Connection_errors_peer_address | 0 | +| Connection_errors_select | 0 | +| Connection_errors_tcpwrap | 0 | +| Connections | 25 | +| Created_tmp_disk_tables | 0 | +| Created_tmp_files | 5 | +| Created_tmp_tables | 6 | +| Current_tls_ca | ca.pem | +| Current_tls_capath | | +| Current_tls_cert | server-cert.pem | +| Current_tls_cipher | | +| Current_tls_ciphersuites | | +| Current_tls_crl | | +| Current_tls_crlpath | | +| Current_tls_key | server-key.pem | +| Current_tls_version | TLSv1.2,TLSv1.3 | +| Delayed_errors | 0 | +| Delayed_insert_threads | 0 | +| Delayed_writes | 0 | +| Error_log_buffered_bytes | 2752 | +| Error_log_buffered_events | 15 | +| Error_log_expired_events | 0 | +| Error_log_latest_write | 1660827046947930 | +| Flush_commands | 3 | +| Global_connection_memory | 0 | +| Handler_commit | 720 | +| Handler_delete | 8 | +| Handler_discover | 0 | +| Handler_external_lock | 6779 | +| Handler_mrr_init | 0 | +| Handler_prepare | 24 | +| Handler_read_first | 50 | +| Handler_read_key | 1914 | +| Handler_read_last | 0 | +| Handler_read_next | 4303 | +| Handler_read_prev | 0 | +| Handler_read_rnd | 0 | +| Handler_read_rnd_next | 4723 | +| Handler_rollback | 1 | +| Handler_savepoint | 0 | +| Handler_savepoint_rollback | 0 | +| Handler_update | 373 | +| Handler_write | 1966 | +| Innodb_buffer_pool_dump_status | Dumping of buffer pool not started | +| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220818 12:50:46 | +| Innodb_buffer_pool_resize_status | | +| Innodb_buffer_pool_pages_data | 1045 | +| Innodb_buffer_pool_bytes_data | 17121280 | +| Innodb_buffer_pool_pages_dirty | 0 | +| Innodb_buffer_pool_bytes_dirty | 0 | +| Innodb_buffer_pool_pages_flushed | 361 | +| Innodb_buffer_pool_pages_free | 7143 | +| Innodb_buffer_pool_pages_misc | 4 | +| Innodb_buffer_pool_pages_total | 8192 | +| Innodb_buffer_pool_read_ahead_rnd | 0 | +| Innodb_buffer_pool_read_ahead | 0 | +| Innodb_buffer_pool_read_ahead_evicted | 0 | +| Innodb_buffer_pool_read_requests | 16723 | +| Innodb_buffer_pool_reads | 878 | +| Innodb_buffer_pool_wait_free | 0 | +| Innodb_buffer_pool_write_requests | 2377 | +| Innodb_data_fsyncs | 255 | +| Innodb_data_pending_fsyncs | 0 | +| Innodb_data_pending_reads | 0 | +| Innodb_data_pending_writes | 0 | +| Innodb_data_read | 14453760 | +| Innodb_data_reads | 899 | +| Innodb_data_writes | 561 | +| Innodb_data_written | 6128128 | +| Innodb_dblwr_pages_written | 220 | +| Innodb_dblwr_writes | 58 | +| Innodb_redo_log_read_only | OFF | +| Innodb_redo_log_uuid | 1075899837 | +| Innodb_redo_log_checkpoint_lsn | 31758453 | +| Innodb_redo_log_current_lsn | 31758453 | +| Innodb_redo_log_flushed_to_disk_lsn | 31758453 | +| Innodb_redo_log_logical_size | 512 | +| Innodb_redo_log_physical_size | 3276800 | +| Innodb_redo_log_capacity_resized | 104857600 | +| Innodb_redo_log_resize_status | OK | +| Innodb_log_waits | 0 | +| Innodb_log_write_requests | 1062 | +| Innodb_log_writes | 116 | +| Innodb_os_log_fsyncs | 69 | +| Innodb_os_log_pending_fsyncs | 0 | +| Innodb_os_log_pending_writes | 0 | +| Innodb_os_log_written | 147968 | +| Innodb_page_size | 16384 | +| Innodb_pages_created | 168 | +| Innodb_pages_read | 877 | +| Innodb_pages_written | 361 | +| Innodb_redo_log_enabled | ON | +| Innodb_row_lock_current_waits | 0 | +| Innodb_row_lock_time | 0 | +| Innodb_row_lock_time_avg | 0 | +| Innodb_row_lock_time_max | 0 | +| Innodb_row_lock_waits | 0 | +| Innodb_rows_deleted | 0 | +| Innodb_rows_inserted | 0 | +| Innodb_rows_read | 0 | +| Innodb_rows_updated | 0 | +| Innodb_system_rows_deleted | 8 | +| Innodb_system_rows_inserted | 12 | +| Innodb_system_rows_read | 5134 | +| Innodb_system_rows_updated | 373 | +| Innodb_sampled_pages_read | 0 | +| Innodb_sampled_pages_skipped | 0 | +| Innodb_num_open_files | 15 | +| Innodb_truncated_status_writes | 0 | +| Innodb_undo_tablespaces_total | 2 | +| Innodb_undo_tablespaces_implicit | 2 | +| Innodb_undo_tablespaces_explicit | 0 | +| Innodb_undo_tablespaces_active | 2 | +| Key_blocks_not_flushed | 0 | +| Key_blocks_unused | 6698 | +| Key_blocks_used | 0 | +| Key_read_requests | 0 | +| Key_reads | 0 | +| Key_write_requests | 0 | +| Key_writes | 0 | +| Locked_connects | 0 | +| Max_execution_time_exceeded | 0 | +| Max_execution_time_set | 0 | +| Max_execution_time_set_failed | 0 | +| Max_used_connections | 2 | +| Max_used_connections_time | 2022-08-18 12:51:46 | +| Mysqlx_aborted_clients | 0 | +| Mysqlx_address | :: | +| Mysqlx_bytes_received | 0 | +| Mysqlx_bytes_received_compressed_payload | 0 | +| Mysqlx_bytes_received_uncompressed_frame | 0 | +| Mysqlx_bytes_sent | 0 | +| Mysqlx_bytes_sent_compressed_payload | 0 | +| Mysqlx_bytes_sent_uncompressed_frame | 0 | +| Mysqlx_compression_algorithm | | +| Mysqlx_compression_level | | +| Mysqlx_connection_accept_errors | 0 | +| Mysqlx_connection_errors | 0 | +| Mysqlx_connections_accepted | 0 | +| Mysqlx_connections_closed | 0 | +| Mysqlx_connections_rejected | 0 | +| Mysqlx_crud_create_view | 0 | +| Mysqlx_crud_delete | 0 | +| Mysqlx_crud_drop_view | 0 | +| Mysqlx_crud_find | 0 | +| Mysqlx_crud_insert | 0 | +| Mysqlx_crud_modify_view | 0 | +| Mysqlx_crud_update | 0 | +| Mysqlx_cursor_close | 0 | +| Mysqlx_cursor_fetch | 0 | +| Mysqlx_cursor_open | 0 | +| Mysqlx_errors_sent | 0 | +| Mysqlx_errors_unknown_message_type | 0 | +| Mysqlx_expect_close | 0 | +| Mysqlx_expect_open | 0 | +| Mysqlx_init_error | 0 | +| Mysqlx_messages_sent | 0 | +| Mysqlx_notice_global_sent | 0 | +| Mysqlx_notice_other_sent | 0 | +| Mysqlx_notice_warning_sent | 0 | +| Mysqlx_notified_by_group_replication | 0 | +| Mysqlx_port | 33060 | +| Mysqlx_prep_deallocate | 0 | +| Mysqlx_prep_execute | 0 | +| Mysqlx_prep_prepare | 0 | +| Mysqlx_rows_sent | 0 | +| Mysqlx_sessions | 0 | +| Mysqlx_sessions_accepted | 0 | +| Mysqlx_sessions_closed | 0 | +| Mysqlx_sessions_fatal_error | 0 | +| Mysqlx_sessions_killed | 0 | +| Mysqlx_sessions_rejected | 0 | +| Mysqlx_socket | /var/run/mysqld/mysqlx.sock | +| Mysqlx_ssl_accepts | 0 | +| Mysqlx_ssl_active | | +| Mysqlx_ssl_cipher | | +| Mysqlx_ssl_cipher_list | | +| Mysqlx_ssl_ctx_verify_depth | 18446744073709551615 | +| Mysqlx_ssl_ctx_verify_mode | 5 | +| Mysqlx_ssl_finished_accepts | 0 | +| Mysqlx_ssl_server_not_after | Aug 15 12:43:39 2032 GMT | +| Mysqlx_ssl_server_not_before | Aug 18 12:43:39 2022 GMT | +| Mysqlx_ssl_verify_depth | | +| Mysqlx_ssl_verify_mode | | +| Mysqlx_ssl_version | | +| Mysqlx_stmt_create_collection | 0 | +| Mysqlx_stmt_create_collection_index | 0 | +| Mysqlx_stmt_disable_notices | 0 | +| Mysqlx_stmt_drop_collection | 0 | +| Mysqlx_stmt_drop_collection_index | 0 | +| Mysqlx_stmt_enable_notices | 0 | +| Mysqlx_stmt_ensure_collection | 0 | +| Mysqlx_stmt_execute_mysqlx | 0 | +| Mysqlx_stmt_execute_sql | 0 | +| Mysqlx_stmt_execute_xplugin | 0 | +| Mysqlx_stmt_get_collection_options | 0 | +| Mysqlx_stmt_kill_client | 0 | +| Mysqlx_stmt_list_clients | 0 | +| Mysqlx_stmt_list_notices | 0 | +| Mysqlx_stmt_list_objects | 0 | +| Mysqlx_stmt_modify_collection_options | 0 | +| Mysqlx_stmt_ping | 0 | +| Mysqlx_worker_threads | 2 | +| Mysqlx_worker_threads_active | 0 | +| Not_flushed_delayed_rows | 0 | +| Ongoing_anonymous_transaction_count | 0 | +| Open_files | 8 | +| Open_streams | 0 | +| Open_table_definitions | 48 | +| Open_tables | 127 | +| Opened_files | 8 | +| Opened_table_definitions | 77 | +| Opened_tables | 208 | +| Performance_schema_accounts_lost | 0 | +| Performance_schema_cond_classes_lost | 0 | +| Performance_schema_cond_instances_lost | 0 | +| Performance_schema_digest_lost | 0 | +| Performance_schema_file_classes_lost | 0 | +| Performance_schema_file_handles_lost | 0 | +| Performance_schema_file_instances_lost | 0 | +| Performance_schema_hosts_lost | 0 | +| Performance_schema_index_stat_lost | 0 | +| Performance_schema_locker_lost | 0 | +| Performance_schema_memory_classes_lost | 0 | +| Performance_schema_metadata_lock_lost | 0 | +| Performance_schema_mutex_classes_lost | 0 | +| Performance_schema_mutex_instances_lost | 0 | +| Performance_schema_nested_statement_lost | 0 | +| Performance_schema_prepared_statements_lost | 0 | +| Performance_schema_program_lost | 0 | +| Performance_schema_rwlock_classes_lost | 0 | +| Performance_schema_rwlock_instances_lost | 0 | +| Performance_schema_session_connect_attrs_longest_seen | 112 | +| Performance_schema_session_connect_attrs_lost | 0 | +| Performance_schema_socket_classes_lost | 0 | +| Performance_schema_socket_instances_lost | 0 | +| Performance_schema_stage_classes_lost | 0 | +| Performance_schema_statement_classes_lost | 0 | +| Performance_schema_table_handles_lost | 0 | +| Performance_schema_table_instances_lost | 0 | +| Performance_schema_table_lock_stat_lost | 0 | +| Performance_schema_thread_classes_lost | 0 | +| Performance_schema_thread_instances_lost | 0 | +| Performance_schema_users_lost | 0 | +| Prepared_stmt_count | 0 | +| Queries | 27 | +| Questions | 15 | +| Replica_open_temp_tables | 0 | +| Secondary_engine_execution_count | 0 | +| Select_full_join | 0 | +| Select_full_range_join | 0 | +| Select_range | 0 | +| Select_range_check | 0 | +| Select_scan | 12 | +| Slave_open_temp_tables | 0 | +| Slow_launch_threads | 0 | +| Slow_queries | 0 | +| Sort_merge_passes | 0 | +| Sort_range | 0 | +| Sort_rows | 0 | +| Sort_scan | 0 | +| Ssl_accept_renegotiates | 0 | +| Ssl_accepts | 0 | +| Ssl_callback_cache_hits | 0 | +| Ssl_cipher | | +| Ssl_cipher_list | | +| Ssl_client_connects | 0 | +| Ssl_connect_renegotiates | 0 | +| Ssl_ctx_verify_depth | 18446744073709551615 | +| Ssl_ctx_verify_mode | 5 | +| Ssl_default_timeout | 0 | +| Ssl_finished_accepts | 0 | +| Ssl_finished_connects | 0 | +| Ssl_server_not_after | Aug 15 12:43:39 2032 GMT | +| Ssl_server_not_before | Aug 18 12:43:39 2022 GMT | +| Ssl_session_cache_hits | 0 | +| Ssl_session_cache_misses | 0 | +| Ssl_session_cache_mode | SERVER | +| Ssl_session_cache_overflows | 0 | +| Ssl_session_cache_size | 128 | +| Ssl_session_cache_timeout | 300 | +| Ssl_session_cache_timeouts | 0 | +| Ssl_sessions_reused | 0 | +| Ssl_used_session_cache_entries | 0 | +| Ssl_verify_depth | 0 | +| Ssl_verify_mode | 0 | +| Ssl_version | | +| Table_locks_immediate | 6 | +| Table_locks_waited | 0 | +| Table_open_cache_hits | 3182 | +| Table_open_cache_misses | 208 | +| Table_open_cache_overflows | 0 | +| Tc_log_max_pages_used | 0 | +| Tc_log_page_size | 0 | +| Tc_log_page_waits | 0 | +| Threads_cached | 1 | +| Threads_connected | 1 | +| Threads_created | 2 | +| Threads_running | 2 | +| Tls_library_version | OpenSSL 1.1.1k FIPS 25 Mar 2021 | +| Uptime | 152 | +| Uptime_since_flush_status | 152 | ++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt new file mode 100644 index 00000000000000..02be0ae8ebf74a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt @@ -0,0 +1,9 @@ ++--------------------------+-------+ +| Variable_name | Value | ++--------------------------+-------+ +| disabled_storage_engines | | +| log_bin | ON | +| max_connections | 151 | +| performance_schema | ON | +| table_open_cache | 4000 | ++--------------------------+-------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/process_list.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/process_list.txt new file mode 100644 index 00000000000000..a44ce5e70f7e13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/process_list.txt @@ -0,0 +1,6 @@ ++------+---------+ +| time | user | ++------+---------+ +| 1 | netdata | +| 9 | root | ++------+---------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt new file mode 100644 index 00000000000000..8a5e06836b84f4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt @@ -0,0 +1,6 @@ ++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+ +| Replica_IO_State | Source_Host | Source_User | Source_Port | Connect_Retry | Source_Log_File | Read_Source_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Source_Log_File | Replica_IO_Running | Replica_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Source_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Source_SSL_Allowed | Source_SSL_CA_File | Source_SSL_CA_Path | Source_SSL_Cert | Source_SSL_Cipher | Source_SSL_Key | Seconds_Behind_Source | Source_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Source_Server_Id | Master_UUID | Source_Info_File | SQL_Delay | SQL_Remaining_Delay | Replica_SQL_Running_State | Source_Retry_Count | Source_Bind | Last_IO_Error_Timestamp | Last_SQL_Error_Timestamp | Source_SSL_Crl | Source_SSL_Crlpath | Retrieved_Gtid_Set | Executed_Gtid_Set | Auto_Position | Replicate_Rewrite_DB | Channel_Name | Source_TLS_Version | Source_public_key_path | Get_Source_public_key | Network_Namespace | ++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+ +| Waiting for source to send event | mysql-master1 | repl1 | 3306 | 60 | mysql-bin-1.000003 | 975 | mysql-slave-relay-bin-master1.000003 | 1195 | mysql-bin-1.000003 | Yes | Yes | | | | | | | 0 | | 0 | 975 | 1599 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 1 | 61221e31-1ef3-11ed-a56a-0242ac120002 | mysql.slave_master_info | 0 | NULL | Replica has read all relay log; waiting for more updates | 86400 | | | | | | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3 | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3,6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 1 | | master1 | | | 0 | | +| Waiting for source to send event | mysql-master2 | repl2 | 3306 | 60 | mysql-bin-1.000003 | 974 | mysql-slave-relay-bin-master2.000003 | 1194 | mysql-bin-1.000003 | Yes | Yes | | | | | | | 0 | | 0 | 974 | 1598 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 2 | 6151d979-1ef3-11ed-a509-0242ac120003 | mysql.slave_master_info | 0 | NULL | Replica has read all relay log; waiting for more updates | 86400 | | | | | | 6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3,6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 1 | | master2 | | | 0 | | ++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/version.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/version.txt new file mode 100644 index 00000000000000..5c553b1adfd1eb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/mysql/v8.0.30/version.txt @@ -0,0 +1,6 @@ ++-----------------+------------------------------+ +| Variable_name | Value | ++-----------------+------------------------------+ +| version | 8.0.30 | +| version_comment | MySQL Community Server - GPL | ++-----------------+------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_status.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_status.txt new file mode 100644 index 00000000000000..d7ee5741aa524f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_status.txt @@ -0,0 +1,533 @@ ++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Variable_name | Value | ++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Aborted_clients | 0 | +| Aborted_connects | 1 | +| Acl_cache_items_count | 0 | +| Binlog_snapshot_file | | +| Binlog_snapshot_position | 0 | +| Binlog_cache_disk_use | 0 | +| Binlog_cache_use | 0 | +| Binlog_snapshot_gtid_executed | not-in-consistent-snapshot | +| Binlog_stmt_cache_disk_use | 0 | +| Binlog_stmt_cache_use | 0 | +| Bytes_received | 682970 | +| Bytes_sent | 33668405 | +| Com_admin_commands | 1 | +| Com_assign_to_keycache | 0 | +| Com_alter_db | 0 | +| Com_alter_event | 0 | +| Com_alter_function | 0 | +| Com_alter_instance | 0 | +| Com_alter_procedure | 0 | +| Com_alter_resource_group | 0 | +| Com_alter_server | 0 | +| Com_alter_table | 0 | +| Com_alter_tablespace | 0 | +| Com_alter_user | 0 | +| Com_alter_user_default_role | 0 | +| Com_analyze | 0 | +| Com_begin | 0 | +| Com_binlog | 0 | +| Com_call_procedure | 0 | +| Com_change_db | 1 | +| Com_change_master | 0 | +| Com_change_repl_filter | 0 | +| Com_change_replication_source | 0 | +| Com_check | 0 | +| Com_checksum | 0 | +| Com_clone | 0 | +| Com_commit | 0 | +| Com_create_compression_dictionary | 0 | +| Com_create_db | 1 | +| Com_create_event | 0 | +| Com_create_function | 0 | +| Com_create_index | 0 | +| Com_create_procedure | 0 | +| Com_create_role | 0 | +| Com_create_server | 0 | +| Com_create_table | 34 | +| Com_create_resource_group | 0 | +| Com_create_trigger | 0 | +| Com_create_udf | 0 | +| Com_create_user | 0 | +| Com_create_view | 0 | +| Com_create_spatial_reference_system | 0 | +| Com_dealloc_sql | 0 | +| Com_delete | 0 | +| Com_delete_multi | 0 | +| Com_do | 0 | +| Com_drop_compression_dictionary | 0 | +| Com_drop_db | 0 | +| Com_drop_event | 0 | +| Com_drop_function | 0 | +| Com_drop_index | 0 | +| Com_drop_procedure | 0 | +| Com_drop_resource_group | 0 | +| Com_drop_role | 0 | +| Com_drop_server | 0 | +| Com_drop_spatial_reference_system | 0 | +| Com_drop_table | 0 | +| Com_drop_trigger | 0 | +| Com_drop_user | 0 | +| Com_drop_view | 0 | +| Com_empty_query | 0 | +| Com_execute_sql | 0 | +| Com_explain_other | 0 | +| Com_flush | 1 | +| Com_get_diagnostics | 0 | +| Com_grant | 0 | +| Com_grant_roles | 0 | +| Com_ha_close | 0 | +| Com_ha_open | 0 | +| Com_ha_read | 0 | +| Com_help | 0 | +| Com_import | 0 | +| Com_insert | 0 | +| Com_insert_select | 0 | +| Com_install_component | 0 | +| Com_install_plugin | 0 | +| Com_kill | 0 | +| Com_load | 0 | +| Com_lock_instance | 0 | +| Com_lock_tables | 0 | +| Com_lock_tables_for_backup | 0 | +| Com_optimize | 0 | +| Com_preload_keys | 0 | +| Com_prepare_sql | 0 | +| Com_purge | 0 | +| Com_purge_before_date | 0 | +| Com_release_savepoint | 0 | +| Com_rename_table | 0 | +| Com_rename_user | 0 | +| Com_repair | 0 | +| Com_replace | 0 | +| Com_replace_select | 0 | +| Com_reset | 0 | +| Com_resignal | 0 | +| Com_restart | 0 | +| Com_revoke | 0 | +| Com_revoke_all | 0 | +| Com_revoke_roles | 0 | +| Com_rollback | 0 | +| Com_rollback_to_savepoint | 0 | +| Com_savepoint | 0 | +| Com_select | 1687 | +| Com_set_option | 4 | +| Com_set_password | 0 | +| Com_set_resource_group | 0 | +| Com_set_role | 0 | +| Com_signal | 0 | +| Com_show_binlog_events | 0 | +| Com_show_binlogs | 0 | +| Com_show_charsets | 0 | +| Com_show_client_statistics | 0 | +| Com_show_collations | 0 | +| Com_show_create_db | 0 | +| Com_show_create_event | 0 | +| Com_show_create_func | 0 | +| Com_show_create_proc | 0 | +| Com_show_create_table | 0 | +| Com_show_create_trigger | 0 | +| Com_show_databases | 0 | +| Com_show_engine_logs | 0 | +| Com_show_engine_mutex | 0 | +| Com_show_engine_status | 0 | +| Com_show_events | 0 | +| Com_show_errors | 0 | +| Com_show_fields | 0 | +| Com_show_function_code | 0 | +| Com_show_function_status | 0 | +| Com_show_grants | 0 | +| Com_show_index_statistics | 0 | +| Com_show_keys | 0 | +| Com_show_master_status | 0 | +| Com_show_open_tables | 0 | +| Com_show_plugins | 2 | +| Com_show_privileges | 0 | +| Com_show_procedure_code | 0 | +| Com_show_procedure_status | 0 | +| Com_show_processlist | 0 | +| Com_show_profile | 0 | +| Com_show_profiles | 0 | +| Com_show_relaylog_events | 0 | +| Com_show_replicas | 0 | +| Com_show_slave_hosts | 0 | +| Com_show_replica_status | 1681 | +| Com_show_slave_status | 1681 | +| Com_show_status | 1682 | +| Com_show_storage_engines | 0 | +| Com_show_table_statistics | 0 | +| Com_show_table_status | 0 | +| Com_show_tables | 0 | +| Com_show_thread_statistics | 0 | +| Com_show_triggers | 0 | +| Com_show_user_statistics | 0 | +| Com_show_variables | 1689 | +| Com_show_warnings | 0 | +| Com_show_create_user | 0 | +| Com_shutdown | 0 | +| Com_replica_start | 0 | +| Com_slave_start | 0 | +| Com_replica_stop | 0 | +| Com_slave_stop | 0 | +| Com_group_replication_start | 0 | +| Com_group_replication_stop | 0 | +| Com_stmt_execute | 0 | +| Com_stmt_close | 0 | +| Com_stmt_fetch | 0 | +| Com_stmt_prepare | 0 | +| Com_stmt_reset | 0 | +| Com_stmt_send_long_data | 0 | +| Com_truncate | 0 | +| Com_uninstall_component | 0 | +| Com_uninstall_plugin | 0 | +| Com_unlock_instance | 0 | +| Com_unlock_tables | 0 | +| Com_update | 0 | +| Com_update_multi | 0 | +| Com_xa_commit | 0 | +| Com_xa_end | 0 | +| Com_xa_prepare | 0 | +| Com_xa_recover | 0 | +| Com_xa_rollback | 0 | +| Com_xa_start | 0 | +| Com_stmt_reprepare | 0 | +| Connection_errors_accept | 0 | +| Connection_errors_internal | 0 | +| Connection_errors_max_connections | 0 | +| Connection_errors_peer_address | 0 | +| Connection_errors_select | 0 | +| Connection_errors_tcpwrap | 0 | +| Connections | 13 | +| Created_tmp_disk_tables | 1683 | +| Created_tmp_files | 5 | +| Created_tmp_tables | 5054 | +| Current_tls_ca | ca.pem | +| Current_tls_capath | | +| Current_tls_cert | server-cert.pem | +| Current_tls_cipher | | +| Current_tls_ciphersuites | | +| Current_tls_crl | | +| Current_tls_crlpath | | +| Current_tls_key | server-key.pem | +| Current_tls_version | TLSv1.2,TLSv1.3 | +| Delayed_errors | 0 | +| Delayed_insert_threads | 0 | +| Delayed_writes | 0 | +| Error_log_buffered_bytes | 1304 | +| Error_log_buffered_events | 9 | +| Error_log_expired_events | 0 | +| Error_log_latest_write | 1660920303043759 | +| Flush_commands | 3 | +| Global_connection_memory | 0 | +| Handler_commit | 576 | +| Handler_delete | 0 | +| Handler_discover | 0 | +| Handler_external_lock | 13215 | +| Handler_mrr_init | 0 | +| Handler_prepare | 0 | +| Handler_read_first | 1724 | +| Handler_read_key | 3439 | +| Handler_read_last | 0 | +| Handler_read_next | 4147 | +| Handler_read_prev | 0 | +| Handler_read_rnd | 0 | +| Handler_read_rnd_next | 2983285 | +| Handler_rollback | 0 | +| Handler_savepoint | 0 | +| Handler_savepoint_rollback | 0 | +| Handler_update | 317 | +| Handler_write | 906501 | +| Innodb_background_log_sync | 0 | +| Innodb_buffer_pool_dump_status | Dumping of buffer pool not started | +| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220819 14:45:02 | +| Innodb_buffer_pool_resize_status | | +| Innodb_buffer_pool_pages_data | 1123 | +| Innodb_buffer_pool_bytes_data | 18399232 | +| Innodb_buffer_pool_pages_dirty | 3 | +| Innodb_buffer_pool_bytes_dirty | 49152 | +| Innodb_buffer_pool_pages_flushed | 205 | +| Innodb_buffer_pool_pages_free | 7064 | +| Innodb_buffer_pool_pages_LRU_flushed | 0 | +| Innodb_buffer_pool_pages_made_not_young | 27 | +| Innodb_buffer_pool_pages_made_young | 6342 | +| Innodb_buffer_pool_pages_misc | 5 | +| Innodb_buffer_pool_pages_old | 421 | +| Innodb_buffer_pool_pages_total | 8192 | +| Innodb_buffer_pool_read_ahead_rnd | 0 | +| Innodb_buffer_pool_read_ahead | 0 | +| Innodb_buffer_pool_read_ahead_evicted | 0 | +| Innodb_buffer_pool_read_requests | 109817 | +| Innodb_buffer_pool_reads | 978 | +| Innodb_buffer_pool_wait_free | 0 | +| Innodb_buffer_pool_write_requests | 77412 | +| Innodb_checkpoint_age | 0 | +| Innodb_checkpoint_max_age | 80576000 | +| Innodb_data_fsyncs | 50 | +| Innodb_data_pending_fsyncs | 0 | +| Innodb_data_pending_reads | 0 | +| Innodb_data_pending_writes | 0 | +| Innodb_data_read | 16094208 | +| Innodb_data_reads | 1002 | +| Innodb_data_writes | 288 | +| Innodb_data_written | 3420160 | +| Innodb_dblwr_pages_written | 30 | +| Innodb_dblwr_writes | 8 | +| Innodb_ibuf_free_list | 0 | +| Innodb_ibuf_segment_size | 2 | +| Innodb_log_waits | 0 | +| Innodb_log_write_requests | 651 | +| Innodb_log_writes | 47 | +| Innodb_lsn_current | 31778525 | +| Innodb_lsn_flushed | 31778525 | +| Innodb_lsn_last_checkpoint | 31778525 | +| Innodb_master_thread_active_loops | 1674 | +| Innodb_master_thread_idle_loops | 36 | +| Innodb_max_trx_id | 1803 | +| Innodb_oldest_view_low_limit_trx_id | 0 | +| Innodb_os_log_fsyncs | 13 | +| Innodb_os_log_pending_fsyncs | 0 | +| Innodb_os_log_pending_writes | 0 | +| Innodb_os_log_written | 45568 | +| Innodb_page_size | 16384 | +| Innodb_pages_created | 155 | +| Innodb_pages_read | 977 | +| Innodb_pages0_read | 7 | +| Innodb_pages_written | 205 | +| Innodb_purge_trx_id | 1801 | +| Innodb_purge_undo_no | 0 | +| Innodb_redo_log_enabled | ON | +| Innodb_row_lock_current_waits | 0 | +| Innodb_row_lock_time | 0 | +| Innodb_row_lock_time_avg | 0 | +| Innodb_row_lock_time_max | 0 | +| Innodb_row_lock_waits | 0 | +| Innodb_rows_deleted | 0 | +| Innodb_rows_inserted | 5055 | +| Innodb_rows_read | 5055 | +| Innodb_rows_updated | 0 | +| Innodb_system_rows_deleted | 0 | +| Innodb_system_rows_inserted | 0 | +| Innodb_system_rows_read | 4881 | +| Innodb_system_rows_updated | 317 | +| Innodb_sampled_pages_read | 0 | +| Innodb_sampled_pages_skipped | 0 | +| Innodb_num_open_files | 17 | +| Innodb_truncated_status_writes | 0 | +| Innodb_undo_tablespaces_total | 2 | +| Innodb_undo_tablespaces_implicit | 2 | +| Innodb_undo_tablespaces_explicit | 0 | +| Innodb_undo_tablespaces_active | 2 | +| Innodb_secondary_index_triggered_cluster_reads | 2098 | +| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 | +| Innodb_buffered_aio_submitted | 0 | +| Innodb_scan_pages_contiguous | 0 | +| Innodb_scan_pages_disjointed | 0 | +| Innodb_scan_pages_total_seek_distance | 0 | +| Innodb_scan_data_size | 0 | +| Innodb_scan_deleted_recs_size | 0 | +| Innodb_scrub_log | 0 | +| Innodb_scrub_background_page_reorganizations | 0 | +| Innodb_scrub_background_page_splits | 0 | +| Innodb_scrub_background_page_split_failures_underflow | 0 | +| Innodb_scrub_background_page_split_failures_out_of_filespace | 0 | +| Innodb_scrub_background_page_split_failures_missing_index | 0 | +| Innodb_scrub_background_page_split_failures_unknown | 0 | +| Innodb_encryption_n_merge_blocks_encrypted | 0 | +| Innodb_encryption_n_merge_blocks_decrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_encrypted | 0 | +| Innodb_encryption_n_rowlog_blocks_decrypted | 0 | +| Innodb_encryption_redo_key_version | 0 | +| Key_blocks_not_flushed | 0 | +| Key_blocks_unused | 6698 | +| Key_blocks_used | 0 | +| Key_read_requests | 0 | +| Key_reads | 0 | +| Key_write_requests | 0 | +| Key_writes | 0 | +| Locked_connects | 0 | +| Max_execution_time_exceeded | 0 | +| Max_execution_time_set | 0 | +| Max_execution_time_set_failed | 0 | +| Max_used_connections | 3 | +| Max_used_connections_time | 2022-08-19 15:05:34 | +| Mysqlx_aborted_clients | 0 | +| Mysqlx_address | :: | +| Mysqlx_bytes_received | 0 | +| Mysqlx_bytes_received_compressed_payload | 0 | +| Mysqlx_bytes_received_uncompressed_frame | 0 | +| Mysqlx_bytes_sent | 0 | +| Mysqlx_bytes_sent_compressed_payload | 0 | +| Mysqlx_bytes_sent_uncompressed_frame | 0 | +| Mysqlx_compression_algorithm | | +| Mysqlx_compression_level | | +| Mysqlx_connection_accept_errors | 0 | +| Mysqlx_connection_errors | 0 | +| Mysqlx_connections_accepted | 0 | +| Mysqlx_connections_closed | 0 | +| Mysqlx_connections_rejected | 0 | +| Mysqlx_crud_create_view | 0 | +| Mysqlx_crud_delete | 0 | +| Mysqlx_crud_drop_view | 0 | +| Mysqlx_crud_find | 0 | +| Mysqlx_crud_insert | 0 | +| Mysqlx_crud_modify_view | 0 | +| Mysqlx_crud_update | 0 | +| Mysqlx_cursor_close | 0 | +| Mysqlx_cursor_fetch | 0 | +| Mysqlx_cursor_open | 0 | +| Mysqlx_errors_sent | 0 | +| Mysqlx_errors_unknown_message_type | 0 | +| Mysqlx_expect_close | 0 | +| Mysqlx_expect_open | 0 | +| Mysqlx_init_error | 0 | +| Mysqlx_messages_sent | 0 | +| Mysqlx_notice_global_sent | 0 | +| Mysqlx_notice_other_sent | 0 | +| Mysqlx_notice_warning_sent | 0 | +| Mysqlx_notified_by_group_replication | 0 | +| Mysqlx_port | 33060 | +| Mysqlx_prep_deallocate | 0 | +| Mysqlx_prep_execute | 0 | +| Mysqlx_prep_prepare | 0 | +| Mysqlx_rows_sent | 0 | +| Mysqlx_sessions | 0 | +| Mysqlx_sessions_accepted | 0 | +| Mysqlx_sessions_closed | 0 | +| Mysqlx_sessions_fatal_error | 0 | +| Mysqlx_sessions_killed | 0 | +| Mysqlx_sessions_rejected | 0 | +| Mysqlx_socket | /var/lib/mysql/mysqlx.sock | +| Mysqlx_ssl_accepts | 0 | +| Mysqlx_ssl_active | | +| Mysqlx_ssl_cipher | | +| Mysqlx_ssl_cipher_list | | +| Mysqlx_ssl_ctx_verify_depth | 18446744073709551615 | +| Mysqlx_ssl_ctx_verify_mode | 5 | +| Mysqlx_ssl_finished_accepts | 0 | +| Mysqlx_ssl_server_not_after | Aug 16 14:44:56 2032 GMT | +| Mysqlx_ssl_server_not_before | Aug 19 14:44:56 2022 GMT | +| Mysqlx_ssl_verify_depth | | +| Mysqlx_ssl_verify_mode | | +| Mysqlx_ssl_version | | +| Mysqlx_stmt_create_collection | 0 | +| Mysqlx_stmt_create_collection_index | 0 | +| Mysqlx_stmt_disable_notices | 0 | +| Mysqlx_stmt_drop_collection | 0 | +| Mysqlx_stmt_drop_collection_index | 0 | +| Mysqlx_stmt_enable_notices | 0 | +| Mysqlx_stmt_ensure_collection | 0 | +| Mysqlx_stmt_execute_mysqlx | 0 | +| Mysqlx_stmt_execute_sql | 0 | +| Mysqlx_stmt_execute_xplugin | 0 | +| Mysqlx_stmt_get_collection_options | 0 | +| Mysqlx_stmt_kill_client | 0 | +| Mysqlx_stmt_list_clients | 0 | +| Mysqlx_stmt_list_notices | 0 | +| Mysqlx_stmt_list_objects | 0 | +| Mysqlx_stmt_modify_collection_options | 0 | +| Mysqlx_stmt_ping | 0 | +| Mysqlx_worker_threads | 2 | +| Mysqlx_worker_threads_active | 0 | +| Net_buffer_length | 32768 | +| Not_flushed_delayed_rows | 0 | +| Ongoing_anonymous_transaction_count | 0 | +| Open_files | 2 | +| Open_streams | 0 | +| Open_table_definitions | 44 | +| Open_tables | 77 | +| Opened_files | 2 | +| Opened_table_definitions | 73 | +| Opened_tables | 158 | +| Performance_schema_accounts_lost | 0 | +| Performance_schema_cond_classes_lost | 0 | +| Performance_schema_cond_instances_lost | 0 | +| Performance_schema_digest_lost | 0 | +| Performance_schema_file_classes_lost | 0 | +| Performance_schema_file_handles_lost | 0 | +| Performance_schema_file_instances_lost | 0 | +| Performance_schema_hosts_lost | 0 | +| Performance_schema_index_stat_lost | 0 | +| Performance_schema_locker_lost | 0 | +| Performance_schema_memory_classes_lost | 0 | +| Performance_schema_metadata_lock_lost | 0 | +| Performance_schema_mutex_classes_lost | 0 | +| Performance_schema_mutex_instances_lost | 0 | +| Performance_schema_nested_statement_lost | 0 | +| Performance_schema_prepared_statements_lost | 0 | +| Performance_schema_program_lost | 0 | +| Performance_schema_rwlock_classes_lost | 0 | +| Performance_schema_rwlock_instances_lost | 0 | +| Performance_schema_session_connect_attrs_longest_seen | 117 | +| Performance_schema_session_connect_attrs_lost | 0 | +| Performance_schema_socket_classes_lost | 0 | +| Performance_schema_socket_instances_lost | 0 | +| Performance_schema_stage_classes_lost | 0 | +| Performance_schema_statement_classes_lost | 0 | +| Performance_schema_table_handles_lost | 0 | +| Performance_schema_table_instances_lost | 0 | +| Performance_schema_table_lock_stat_lost | 0 | +| Performance_schema_thread_classes_lost | 0 | +| Performance_schema_thread_instances_lost | 0 | +| Performance_schema_users_lost | 0 | +| Prepared_stmt_count | 0 | +| Queries | 6748 | +| Questions | 6746 | +| Replica_open_temp_tables | 0 | +| Secondary_engine_execution_count | 0 | +| Select_full_join | 0 | +| Select_full_range_join | 0 | +| Select_range | 0 | +| Select_range_check | 0 | +| Select_scan | 8425 | +| Slave_open_temp_tables | 0 | +| Slow_launch_threads | 0 | +| Slow_queries | 0 | +| Sort_merge_passes | 0 | +| Sort_range | 0 | +| Sort_rows | 0 | +| Sort_scan | 1681 | +| Ssl_accept_renegotiates | 0 | +| Ssl_accepts | 0 | +| Ssl_callback_cache_hits | 0 | +| Ssl_cipher | | +| Ssl_cipher_list | | +| Ssl_client_connects | 0 | +| Ssl_connect_renegotiates | 0 | +| Ssl_ctx_verify_depth | 18446744073709551615 | +| Ssl_ctx_verify_mode | 5 | +| Ssl_default_timeout | 0 | +| Ssl_finished_accepts | 0 | +| Ssl_finished_connects | 0 | +| Ssl_server_not_after | Aug 16 14:44:56 2032 GMT | +| Ssl_server_not_before | Aug 19 14:44:56 2022 GMT | +| Ssl_session_cache_hits | 0 | +| Ssl_session_cache_misses | 0 | +| Ssl_session_cache_mode | SERVER | +| Ssl_session_cache_overflows | 0 | +| Ssl_session_cache_size | 128 | +| Ssl_session_cache_timeout | 300 | +| Ssl_session_cache_timeouts | 0 | +| Ssl_sessions_reused | 0 | +| Ssl_used_session_cache_entries | 0 | +| Ssl_verify_depth | 0 | +| Ssl_verify_mode | 0 | +| Ssl_version | | +| Table_locks_immediate | 3371 | +| Table_locks_waited | 0 | +| Table_open_cache_hits | 6450 | +| Table_open_cache_misses | 158 | +| Table_open_cache_overflows | 0 | +| Tc_log_max_pages_used | 0 | +| Tc_log_page_size | 0 | +| Tc_log_page_waits | 0 | +| Threadpool_idle_threads | 0 | +| Threadpool_threads | 0 | +| Threads_cached | 1 | +| Threads_connected | 2 | +| Threads_created | 3 | +| Threads_running | 2 | +| Uptime | 1711 | +| Uptime_since_flush_status | 1711 | ++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_variables.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_variables.txt new file mode 100644 index 00000000000000..02be0ae8ebf74a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/global_variables.txt @@ -0,0 +1,9 @@ ++--------------------------+-------+ +| Variable_name | Value | ++--------------------------+-------+ +| disabled_storage_engines | | +| log_bin | ON | +| max_connections | 151 | +| performance_schema | ON | +| table_open_cache | 4000 | ++--------------------------+-------+ diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/process_list.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/process_list.txt new file mode 100644 index 00000000000000..a44ce5e70f7e13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/process_list.txt @@ -0,0 +1,6 @@ ++------+---------+ +| time | user | ++------+---------+ +| 1 | netdata | +| 9 | root | ++------+---------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt new file mode 100644 index 00000000000000..d7c206e474c0a6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt @@ -0,0 +1,6 @@ ++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+ +| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_fetched | Rows_updated | Table_rows_read | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections | ++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+ +| netdata | 1 | 0 | 7.6873109 | 0.000136 | 0.000141228 | 71 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| root | 2 | 0 | 1843013485340.5564 | 0.15132199999999996 | 0.15179981700000006 | 14681 | 573440 | 0 | 1 | 0 | 114633 | 37 | 0 | 110 | 0 | 0 | 1 | 0 | 0 | 36 | 0 | ++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/version.txt b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/version.txt new file mode 100644 index 00000000000000..dede361efb938e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/mysql/testdata/percona/v8.0.29/version.txt @@ -0,0 +1,6 @@ ++-----------------+--------------------------------------------------------+ +| Variable_name | Value | ++-----------------+--------------------------------------------------------+ +| version | 8.0.29-21 | +| version_comment | Percona Server (GPL), Release 21, Revision c59f87d2854 | ++-----------------+--------------------------------------------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginx/README.md b/src/go/collectors/go.d.plugin/modules/nginx/README.md new file mode 120000 index 00000000000000..7b19fe44f27ff8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/README.md @@ -0,0 +1 @@ +integrations/nginx.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginx/apiclient.go b/src/go/collectors/go.d.plugin/modules/nginx/apiclient.go new file mode 100644 index 00000000000000..0e3d09e037e276 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/apiclient.go @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import ( + "bufio" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + connActive = "connActive" + connAccepts = "connAccepts" + connHandled = "connHandled" + requests = "requests" + requestTime = "requestTime" + connReading = "connReading" + connWriting = "connWriting" + connWaiting = "connWaiting" +) + +var ( + nginxSeq = []string{ + connActive, + connAccepts, + connHandled, + requests, + connReading, + connWriting, + connWaiting, + } + tengineSeq = []string{ + connActive, + connAccepts, + connHandled, + requests, + requestTime, + connReading, + connWriting, + connWaiting, + } + + reStatus = regexp.MustCompile(`^Active connections: ([0-9]+)\n[^\d]+([0-9]+) ([0-9]+) ([0-9]+) ?([0-9]+)?\nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)`) +) + +func newAPIClient(client *http.Client, request web.Request) *apiClient { + return &apiClient{httpClient: client, request: request} +} + +type apiClient struct { + httpClient *http.Client + request web.Request +} + +func (a apiClient) getStubStatus() (*stubStatus, error) { + req, err := web.NewHTTPRequest(a.request) + if err != nil { + return nil, fmt.Errorf("error on creating request : %v", err) + } + + resp, err := a.doRequestOK(req) + defer closeBody(resp) + if err != nil { + return nil, err + } + + status, err := parseStubStatus(resp.Body) + if err != nil { + return nil, fmt.Errorf("error on parsing response : %v", err) + } + + return status, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return resp, fmt.Errorf("error on request : %v", err) + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + return resp, err +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func parseStubStatus(r io.Reader) (*stubStatus, error) { + sc := bufio.NewScanner(r) + var lines []string + + for sc.Scan() { + lines = append(lines, strings.Trim(sc.Text(), "\r\n ")) + } + + parsed := reStatus.FindStringSubmatch(strings.Join(lines, "\n")) + + if len(parsed) == 0 { + return nil, fmt.Errorf("can't parse '%v'", lines) + } + + parsed = parsed[1:] + + var ( + seq []string + status stubStatus + ) + + switch len(parsed) { + default: + return nil, fmt.Errorf("invalid number of fields, got %d, expect %d or %d", len(parsed), len(nginxSeq), len(tengineSeq)) + case len(nginxSeq): + seq = nginxSeq + case len(tengineSeq): + seq = tengineSeq + } + + for i, key := range seq { + strValue := parsed[i] + if strValue == "" { + continue + } + value := mustParseInt(strValue) + switch key { + default: + return nil, fmt.Errorf("unknown key in seq : %s", key) + case connActive: + status.Connections.Active = value + case connAccepts: + status.Connections.Accepts = value + case connHandled: + status.Connections.Handled = value + case requests: + status.Requests.Total = value + case connReading: + status.Connections.Reading = value + case connWriting: + status.Connections.Writing = value + case connWaiting: + status.Connections.Waiting = value + case requestTime: + status.Requests.Time = &value + } + } + + return &status, nil +} + +func mustParseInt(value string) int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/charts.go b/src/go/collectors/go.d.plugin/modules/nginx/charts.go new file mode 100644 index 00000000000000..b1e76996f4e71a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/charts.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "connections", + Title: "Active Client Connections Including Waiting Connections", + Units: "connections", + Fam: "connections", + Ctx: "nginx.connections", + Dims: Dims{ + {ID: "active"}, + }, + }, + { + ID: "connections_statuses", + Title: "Active Connections Per Status", + Units: "connections", + Fam: "connections", + Ctx: "nginx.connections_status", + Dims: Dims{ + {ID: "reading"}, + {ID: "writing"}, + {ID: "waiting", Name: "idle"}, + }, + }, + { + ID: "connections_accepted_handled", + Title: "Accepted And Handled Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "nginx.connections_accepted_handled", + Dims: Dims{ + {ID: "accepts", Name: "accepted", Algo: module.Incremental}, + {ID: "handled", Algo: module.Incremental}, + }, + }, + { + ID: "requests", + Title: "Client Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "nginx.requests", + Dims: Dims{ + {ID: "requests", Algo: module.Incremental}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/collect.go b/src/go/collectors/go.d.plugin/modules/nginx/collect.go new file mode 100644 index 00000000000000..351317393b1129 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/collect.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import ( + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (n *Nginx) collect() (map[string]int64, error) { + status, err := n.apiClient.getStubStatus() + + if err != nil { + return nil, err + } + + return stm.ToMap(status), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/config_schema.json b/src/go/collectors/go.d.plugin/modules/nginx/config_schema.json new file mode 100644 index 00000000000000..58a6865da171bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginx job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/integrations/nginx.md b/src/go/collectors/go.d.plugin/modules/nginx/integrations/nginx.md new file mode 100644 index 00000000000000..779cfee04d4fe9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/integrations/nginx.md @@ -0,0 +1,232 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginx/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginx/metadata.yaml" +sidebar_label: "NGINX" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NGINX + + +<img src="https://netdata.cloud/img/nginx.svg" width="150"/> + + +Plugin: go.d.plugin +Module: nginx + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests. + + +It sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects NGINX instances running on localhost that are listening on port 80. +On startup, it tries to collect metrics from: + +- http://127.0.0.1/basic_status +- http://localhost/stub_status +- http://127.0.0.1/stub_status +- http://127.0.0.1/nginx_status +- http://127.0.0.1/status + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NGINX instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginx.connections | active | connections | +| nginx.connections_status | reading, writing, idle | connections | +| nginx.connections_accepted_handled | accepted, handled | connections/s | +| nginx.requests | requests | requests/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable status support + +Configure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nginx.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nginx.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/stub_status | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1/stub_status + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/stub_status + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +NGINX with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/stub_status + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/stub_status + + - name: remote + url: http://192.0.2.1/stub_status + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nginx + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/nginx/metadata.yaml b/src/go/collectors/go.d.plugin/modules/nginx/metadata.yaml new file mode 100644 index 00000000000000..49b12c4eccc18c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/metadata.yaml @@ -0,0 +1,226 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nginx + plugin_name: go.d.plugin + module_name: nginx + monitored_instance: + name: NGINX + link: https://www.nginx.com/ + categories: + - data-collection.web-servers-and-web-proxies + icon_filename: nginx.svg + related_resources: + integrations: + list: + - plugin_name: go.d.plugin + module_name: httpcheck + - plugin_name: go.d.plugin + module_name: web_log + - plugin_name: apps.plugin + module_name: apps + - plugin_name: cgroups.plugin + module_name: cgroups + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - nginx + - web + - webserver + - http + - proxy + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests. + method_description: | + It sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server. + default_behavior: + auto_detection: + description: | + By default, it detects NGINX instances running on localhost that are listening on port 80. + On startup, it tries to collect metrics from: + + - http://127.0.0.1/basic_status + - http://localhost/stub_status + - http://127.0.0.1/stub_status + - http://127.0.0.1/nginx_status + - http://127.0.0.1/status + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Enable status support + description: | + Configure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html). + configuration: + file: + name: go.d/nginx.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/stub_status + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + url: http://127.0.0.1/stub_status + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1/stub_status + username: username + password: password + - name: HTTPS with self-signed certificate + description: NGINX with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: http://127.0.0.1/stub_status + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1/stub_status + + - name: remote + url: http://192.0.2.1/stub_status + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: nginx.connections + description: Active Client Connections Including Waiting Connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: nginx.connections_status + description: Active Connections Per Status + unit: connections + chart_type: line + dimensions: + - name: reading + - name: writing + - name: idle + - name: nginx.connections_accepted_handled + description: Accepted And Handled Connections + unit: connections/s + chart_type: line + dimensions: + - name: accepted + - name: handled + - name: nginx.requests + description: Client Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests diff --git a/src/go/collectors/go.d.plugin/modules/nginx/metrics.go b/src/go/collectors/go.d.plugin/modules/nginx/metrics.go new file mode 100644 index 00000000000000..66e6a160ed3a5f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/metrics.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +type stubStatus struct { + Connections struct { + // The current number of active client connections including Waiting connections. + Active int64 `stm:"active"` + + // The total number of accepted client connections. + Accepts int64 `stm:"accepts"` + + // The total number of handled connections. + // Generally, the parameter value is the same as accepts unless some resource limits have been reached. + Handled int64 `stm:"handled"` + + // The current number of connections where nginx is reading the request header. + Reading int64 `stm:"reading"` + + // The current number of connections where nginx is writing the response back to the client. + Writing int64 `stm:"writing"` + + // The current number of idle client connections waiting for a request. + Waiting int64 `stm:"waiting"` + } `stm:""` + Requests struct { + // The total number of client requests. + Total int64 `stm:"requests"` + + // Note: tengine specific + // The total requests' response time, which is in millisecond + Time *int64 `stm:"request_time"` + } `stm:""` +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/nginx.go b/src/go/collectors/go.d.plugin/modules/nginx/nginx.go new file mode 100644 index 00000000000000..9acf1e72b34c7e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/nginx.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nginx", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1/stub_status" + defaultHTTPTimeout = time.Second +) + +// New creates Nginx with default values. +func New() *Nginx { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + + return &Nginx{Config: config} +} + +// Config is the Nginx module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// Nginx nginx module. +type Nginx struct { + module.Base + Config `yaml:",inline"` + + apiClient *apiClient +} + +// Cleanup makes cleanup. +func (Nginx) Cleanup() {} + +// Init makes initialization. +func (n *Nginx) Init() bool { + if n.URL == "" { + n.Error("URL not set") + return false + } + + client, err := web.NewHTTPClient(n.Client) + if err != nil { + n.Error(err) + return false + } + + n.apiClient = newAPIClient(client, n.Request) + + n.Debugf("using URL %s", n.URL) + n.Debugf("using timeout: %s", n.Timeout.Duration) + + return true +} + +// Check makes check. +func (n *Nginx) Check() bool { return len(n.Collect()) > 0 } + +// Charts creates Charts. +func (Nginx) Charts() *Charts { return charts.Copy() } + +// Collect collects metrics. +func (n *Nginx) Collect() map[string]int64 { + mx, err := n.collect() + + if err != nil { + n.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/nginx_test.go b/src/go/collectors/go.d.plugin/modules/nginx/nginx_test.go new file mode 100644 index 00000000000000..ef115482ed28f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/nginx_test.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginx + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testStatusData, _ = os.ReadFile("testdata/status.txt") + testTengineStatusData, _ = os.ReadFile("testdata/tengine-status.txt") +) + +func TestNginx_Cleanup(t *testing.T) { New().Cleanup() } + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestNginx_Init(t *testing.T) { + job := New() + + require.True(t, job.Init()) + assert.NotNil(t, job.apiClient) +} + +func TestNginx_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestNginx_CheckNG(t *testing.T) { + job := New() + + job.URL = "http://127.0.0.1:38001/us" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestNginx_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestNginx_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "accepts": 36, + "active": 1, + "handled": 36, + "reading": 0, + "requests": 126, + "waiting": 0, + "writing": 1, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestNginx_CollectTengine(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testTengineStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "accepts": 1140, + "active": 1, + "handled": 1140, + "reading": 0, + "request_time": 75806, + "requests": 1140, + "waiting": 0, + "writing": 1, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestNginx_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestNginx_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/nginx/testdata/status.txt b/src/go/collectors/go.d.plugin/modules/nginx/testdata/status.txt new file mode 100644 index 00000000000000..f4835bef4f4ac3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/testdata/status.txt @@ -0,0 +1,4 @@ +Active connections: 1 +server accepts handled requests +36 36 126 +Reading: 0 Writing: 1 Waiting: 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginx/testdata/tengine-status.txt b/src/go/collectors/go.d.plugin/modules/nginx/testdata/tengine-status.txt new file mode 100644 index 00000000000000..1e6a62c211b94c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginx/testdata/tengine-status.txt @@ -0,0 +1,4 @@ +Active connections: 1 +server accepts handled requests request_time +1140 1140 1140 75806 +Reading: 0 Writing: 1 Waiting: 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/README.md b/src/go/collectors/go.d.plugin/modules/nginxplus/README.md new file mode 120000 index 00000000000000..16cb6c1b704e42 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/README.md @@ -0,0 +1 @@ +integrations/nginx_plus.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/cache.go b/src/go/collectors/go.d.plugin/modules/nginxplus/cache.go new file mode 100644 index 00000000000000..af58f3a55104e6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/cache.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +func newCache() *cache { + return &cache{ + httpCaches: make(map[string]*cacheHTTPCacheEntry), + httpServerZones: make(map[string]*cacheZoneEntry), + httpLocationZones: make(map[string]*cacheZoneEntry), + httpUpstreams: make(map[string]*cacheUpstreamEntry), + httpUpstreamServers: make(map[string]*cacheUpstreamServerEntry), + streamServerZones: make(map[string]*cacheZoneEntry), + streamUpstreams: make(map[string]*cacheUpstreamEntry), + streamUpstreamServers: make(map[string]*cacheUpstreamServerEntry), + resolvers: make(map[string]*cacheResolverEntry), + } +} + +type ( + cache struct { + httpCaches map[string]*cacheHTTPCacheEntry + httpServerZones map[string]*cacheZoneEntry + httpLocationZones map[string]*cacheZoneEntry + httpUpstreams map[string]*cacheUpstreamEntry + httpUpstreamServers map[string]*cacheUpstreamServerEntry + streamServerZones map[string]*cacheZoneEntry + streamUpstreams map[string]*cacheUpstreamEntry + streamUpstreamServers map[string]*cacheUpstreamServerEntry + resolvers map[string]*cacheResolverEntry + } + cacheEntry struct { + hasCharts bool + updated bool + notSeenTimes int + } + cacheHTTPCacheEntry struct { + name string + cacheEntry + } + cacheResolverEntry struct { + zone string + cacheEntry + } + cacheZoneEntry struct { + zone string + cacheEntry + } + cacheUpstreamEntry struct { + name string + zone string + cacheEntry + } + cacheUpstreamServerEntry struct { + name string + zone string + serverAddr string + serverName string + cacheEntry + } +) + +func (c *cache) resetUpdated() { + for _, v := range c.httpCaches { + v.updated = false + } + for _, v := range c.httpServerZones { + v.updated = false + } + for _, v := range c.httpLocationZones { + v.updated = false + } + for _, v := range c.httpUpstreams { + v.updated = false + } + for _, v := range c.httpUpstreamServers { + v.updated = false + } + for _, v := range c.streamServerZones { + v.updated = false + } + for _, v := range c.streamUpstreams { + v.updated = false + } + for _, v := range c.streamUpstreamServers { + v.updated = false + } + for _, v := range c.resolvers { + v.updated = false + } +} + +func (c *cache) putHTTPCache(cache string) { + v, ok := c.httpCaches[cache] + if !ok { + v = &cacheHTTPCacheEntry{name: cache} + c.httpCaches[cache] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putHTTPServerZone(zone string) { + v, ok := c.httpServerZones[zone] + if !ok { + v = &cacheZoneEntry{zone: zone} + c.httpServerZones[zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putHTTPLocationZone(zone string) { + v, ok := c.httpLocationZones[zone] + if !ok { + v = &cacheZoneEntry{zone: zone} + c.httpLocationZones[zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putHTTPUpstream(name, zone string) { + v, ok := c.httpUpstreams[name+"_"+zone] + if !ok { + v = &cacheUpstreamEntry{name: name, zone: zone} + c.httpUpstreams[name+"_"+zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putHTTPUpstreamServer(name, serverAddr, serverName, zone string) { + v, ok := c.httpUpstreamServers[name+"_"+serverAddr+"_"+zone] + if !ok { + v = &cacheUpstreamServerEntry{name: name, zone: zone, serverAddr: serverAddr, serverName: serverName} + c.httpUpstreamServers[name+"_"+serverAddr+"_"+zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putStreamServerZone(zone string) { + v, ok := c.streamServerZones[zone] + if !ok { + v = &cacheZoneEntry{zone: zone} + c.streamServerZones[zone] = v + } + v.updated, v.notSeenTimes = true, 0 + +} + +func (c *cache) putStreamUpstream(name, zone string) { + v, ok := c.streamUpstreams[name+"_"+zone] + if !ok { + v = &cacheUpstreamEntry{name: name, zone: zone} + c.streamUpstreams[name+"_"+zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putStreamUpstreamServer(name, serverAddr, serverName, zone string) { + v, ok := c.streamUpstreamServers[name+"_"+serverAddr+"_"+zone] + if !ok { + v = &cacheUpstreamServerEntry{name: name, zone: zone, serverAddr: serverAddr, serverName: serverName} + c.streamUpstreamServers[name+"_"+serverAddr+"_"+zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} + +func (c *cache) putResolver(zone string) { + v, ok := c.resolvers[zone] + if !ok { + v = &cacheResolverEntry{zone: zone} + c.resolvers[zone] = v + } + v.updated, v.notSeenTimes = true, 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/charts.go b/src/go/collectors/go.d.plugin/modules/nginxplus/charts.go new file mode 100644 index 00000000000000..1195b930c61f40 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/charts.go @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioClientConnectionsRate = module.Priority + iota + prioClientConnectionsCount + + prioSSLHandshakesRate + prioSSLHandshakesFailuresRate + prioSSLVerificationErrorsRate + prioSSLSessionReusesRate + + prioHTTPRequestsRate + prioHTTPRequestsCount + prioHTTPServerZoneRequestsRate + prioHTTPLocationZoneRequestsRate + prioHTTPServerZoneRequestsProcessingCount + prioHTTPServerZoneRequestsDiscardedRate + prioHTTPLocationZoneRequestsDiscardedRate + + prioHTTPServerZoneResponsesPerCodeClassRate + prioHTTPLocationZoneResponsesPerCodeClassRate + + prioHTTPServerZoneTrafficRate + prioHTTPLocationZoneTrafficRate + + prioHTTPUpstreamPeersCount + prioHTTPUpstreamZombiesCount + prioHTTPUpstreamKeepaliveCount + + prioHTTPUpstreamServerState + prioHTTPUpstreamServerDowntime + + prioHTTPUpstreamServerConnectionsCount + + prioHTTPUpstreamServerRequestsRate + + prioHTTPUpstreamServerResponsesPerCodeClassRate + + prioHTTPUpstreamServerResponseTime + prioHTTPUpstreamServerResponseHeaderTime + + prioHTTPUpstreamServerTrafficRate + + prioHTTPCacheState + prioHTTPCacheIOPS + prioHTTPCacheIO + prioHTTPCacheSize + + prioStreamServerZoneConnectionsRate + prioStreamServerZoneConnectionsProcessingCount + prioStreamServerZoneConnectionsDiscardedRate + + prioStreamServerZoneSessionsPerCodeClassRate + + prioStreamServerZoneTrafficRate + + prioStreamUpstreamPeersCount + prioStreamUpstreamZombiesCount + + prioStreamUpstreamServerState + prioStreamUpstreamServerDowntime + + prioStreamUpstreamServerConnectionsRate + prioStreamUpstreamServerConnectionsCount + + prioStreamUpstreamServerTrafficRate + + prioResolverZoneRequestsRate + prioResolverZoneResponsesRate + + prioUptime +) + +var ( + baseCharts = module.Charts{ + clientConnectionsRateChart.Copy(), + clientConnectionsCountChart.Copy(), + sslHandshakesRateChart.Copy(), + sslHandshakesFailuresRateChart.Copy(), + sslVerificationErrorsRateChart.Copy(), + sslSessionReusesRateChart.Copy(), + httpRequestsRateChart.Copy(), + httpRequestsCountChart.Copy(), + uptimeChart.Copy(), + } + + clientConnectionsRateChart = module.Chart{ + ID: "client_connections_rate", + Title: "Client connections rate", + Units: "connections/s", + Fam: "connections", + Ctx: "nginxplus.client_connections_rate", + Priority: prioClientConnectionsRate, + Dims: module.Dims{ + {ID: "connections_accepted", Name: "accepted", Algo: module.Incremental}, + {ID: "connections_dropped", Name: "dropped", Algo: module.Incremental}, + }, + } + clientConnectionsCountChart = module.Chart{ + ID: "client_connections_count", + Title: "Client connections", + Units: "connections", + Fam: "connections", + Ctx: "nginxplus.client_connections_count", + Priority: prioClientConnectionsCount, + Dims: module.Dims{ + {ID: "connections_active", Name: "active"}, + {ID: "connections_idle", Name: "idle"}, + }, + } + sslHandshakesRateChart = module.Chart{ + ID: "ssl_handshakes_rate", + Title: "SSL handshakes rate", + Units: "handshakes/s", + Fam: "ssl", + Ctx: "nginxplus.ssl_handshakes_rate", + Priority: prioSSLHandshakesRate, + Dims: module.Dims{ + {ID: "ssl_handshakes", Name: "successful", Algo: module.Incremental}, + {ID: "ssl_handshakes_failed", Name: "failed", Algo: module.Incremental}, + }, + } + sslHandshakesFailuresRateChart = module.Chart{ + ID: "ssl_handshakes_failures_rate", + Title: "SSL handshakes failures rate", + Units: "failures/s", + Fam: "ssl", + Ctx: "nginxplus.ssl_handshakes_failures_rate", + Priority: prioSSLHandshakesFailuresRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "ssl_no_common_protocol", Name: "no_common_protocol", Algo: module.Incremental}, + {ID: "ssl_no_common_cipher", Name: "no_common_cipher", Algo: module.Incremental}, + {ID: "ssl_handshake_timeout", Name: "timeout", Algo: module.Incremental}, + {ID: "ssl_peer_rejected_cert", Name: "peer_rejected_cert", Algo: module.Incremental}, + }, + } + sslVerificationErrorsRateChart = module.Chart{ + ID: "ssl_verification_errors_rate", + Title: "SSL verification errors rate", + Units: "errors/s", + Fam: "ssl", + Ctx: "nginxplus.ssl_verification_errors_rate", + Priority: prioSSLVerificationErrorsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "ssl_verify_failures_no_cert", Name: "no_cert", Algo: module.Incremental}, + {ID: "ssl_verify_failures_expired_cert", Name: "expired_cert", Algo: module.Incremental}, + {ID: "ssl_verify_failures_revoked_cert", Name: "revoked_cert", Algo: module.Incremental}, + {ID: "ssl_verify_failures_hostname_mismatch", Name: "hostname_mismatch", Algo: module.Incremental}, + {ID: "ssl_verify_failures_other", Name: "other", Algo: module.Incremental}, + }, + } + sslSessionReusesRateChart = module.Chart{ + ID: "ssl_session_reuses_rate", + Title: "Session reuses during SSL handshake", + Units: "reuses/s", + Fam: "ssl", + Ctx: "nginxplus.ssl_session_reuses_rate", + Priority: prioSSLSessionReusesRate, + Dims: module.Dims{ + {ID: "ssl_session_reuses", Name: "ssl_session", Algo: module.Incremental}, + }, + } + httpRequestsRateChart = module.Chart{ + ID: "http_requests_rate", + Title: "HTTP requests rate", + Units: "requests/s", + Fam: "http requests", + Ctx: "nginxplus.http_requests_rate", + Priority: prioHTTPRequestsRate, + Dims: module.Dims{ + {ID: "http_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + httpRequestsCountChart = module.Chart{ + ID: "http_requests_count", + Title: "HTTP requests", + Units: "requests", + Fam: "http requests", + Ctx: "nginxplus.http_requests_count", + Priority: prioHTTPRequestsCount, + Dims: module.Dims{ + {ID: "http_requests_current", Name: "requests"}, + }, + } + uptimeChart = module.Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "nginxplus.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "uptime", Name: "uptime"}, + }, + } +) + +var ( + httpServerZoneChartsTmpl = module.Charts{ + httpServerZoneRequestsRateChartTmpl.Copy(), + httpServerZoneResponsesPerCodeClassRateChartTmpl.Copy(), + httpServerZoneTrafficRateChartTmpl.Copy(), + httpServerZoneRequestsProcessingCountChartTmpl.Copy(), + httpServerZoneRequestsDiscardedRateChartTmpl.Copy(), + } + httpServerZoneRequestsRateChartTmpl = module.Chart{ + ID: "http_server_zone_%s_requests_rate", + Title: "HTTP Server Zone requests rate", + Units: "requests/s", + Fam: "http requests", + Ctx: "nginxplus.http_server_zone_requests_rate", + Priority: prioHTTPServerZoneRequestsRate, + Dims: module.Dims{ + {ID: "http_server_zone_%s_requests", Name: "requests", Algo: module.Incremental}, + }, + } + httpServerZoneResponsesPerCodeClassRateChartTmpl = module.Chart{ + ID: "http_server_zone_%s_responses_per_code_class_rate", + Title: "HTTP Server Zone responses rate", + Units: "responses/s", + Fam: "http responses", + Ctx: "nginxplus.http_server_zone_responses_per_code_class_rate", + Priority: prioHTTPServerZoneResponsesPerCodeClassRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "http_server_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: "http_server_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "http_server_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "http_server_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_server_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental}, + }, + } + httpServerZoneTrafficRateChartTmpl = module.Chart{ + ID: "http_server_zone_%s_traffic_rate", + Title: "HTTP Server Zone traffic", + Units: "bytes/s", + Fam: "http traffic", + Ctx: "nginxplus.http_server_zone_traffic_rate", + Priority: prioHTTPServerZoneTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "http_server_zone_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "http_server_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + httpServerZoneRequestsProcessingCountChartTmpl = module.Chart{ + ID: "http_server_zone_%s_requests_processing_count", + Title: "HTTP Server Zone currently processed requests", + Units: "requests", + Fam: "http requests", + Ctx: "nginxplus.http_server_zone_requests_processing_count", + Priority: prioHTTPServerZoneRequestsProcessingCount, + Dims: module.Dims{ + {ID: "http_server_zone_%s_requests_processing", Name: "processing"}, + }, + } + httpServerZoneRequestsDiscardedRateChartTmpl = module.Chart{ + ID: "http_server_zone_%s_requests_discarded_rate", + Title: "HTTP Server Zone requests discarded rate", + Units: "requests/s", + Fam: "http requests", + Ctx: "nginxplus.http_server_zone_requests_discarded_rate", + Priority: prioHTTPServerZoneRequestsDiscardedRate, + Dims: module.Dims{ + {ID: "http_server_zone_%s_requests_discarded", Name: "discarded", Algo: module.Incremental}, + }, + } +) + +var ( + httpLocationZoneChartsTmpl = module.Charts{ + httpLocationZoneRequestsRateChartTmpl.Copy(), + httpLocationZoneRequestsDiscardedRateChartTmpl.Copy(), + httpLocationZoneTrafficRateChartTmpl.Copy(), + httpLocationZoneResponsesPerCodeClassRateChartTmpl.Copy(), + } + httpLocationZoneRequestsRateChartTmpl = module.Chart{ + ID: "http_location_zone_%s_requests_rate", + Title: "HTTP Location Zone requests rate", + Units: "requests/s", + Fam: "http requests", + Ctx: "nginxplus.http_location_zone_requests_rate", + Priority: prioHTTPLocationZoneRequestsRate, + Dims: module.Dims{ + {ID: "http_location_zone_%s_requests", Name: "requests", Algo: module.Incremental}, + }, + } + httpLocationZoneResponsesPerCodeClassRateChartTmpl = module.Chart{ + ID: "http_location_zone_%s_responses_per_code_class_rate", + Title: "HTTP Location Zone responses rate", + Units: "responses/s", + Fam: "http responses", + Ctx: "nginxplus.http_location_zone_responses_per_code_class_rate", + Priority: prioHTTPLocationZoneResponsesPerCodeClassRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "http_location_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: "http_location_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "http_location_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "http_location_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_location_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental}, + }, + } + httpLocationZoneTrafficRateChartTmpl = module.Chart{ + ID: "http_location_zone_%s_traffic_rate", + Title: "HTTP Location Zone traffic rate", + Units: "bytes/s", + Fam: "http traffic", + Ctx: "nginxplus.http_location_zone_traffic_rate", + Priority: prioHTTPLocationZoneTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "http_location_zone_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "http_location_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + httpLocationZoneRequestsDiscardedRateChartTmpl = module.Chart{ + ID: "http_location_zone_%s_requests_discarded_rate", + Title: "HTTP Location Zone requests discarded rate", + Units: "requests/s", + Fam: "http requests", + Ctx: "nginxplus.http_location_zone_requests_discarded_rate", + Priority: prioHTTPLocationZoneRequestsDiscardedRate, + Dims: module.Dims{ + {ID: "http_location_zone_%s_requests_discarded", Name: "discarded", Algo: module.Incremental}, + }, + } +) + +var ( + httpUpstreamChartsTmpl = module.Charts{ + httpUpstreamPeersCountChartTmpl.Copy(), + httpUpstreamZombiesCountChartTmpl.Copy(), + httpUpstreamKeepaliveCountChartTmpl.Copy(), + } + httpUpstreamPeersCountChartTmpl = module.Chart{ + ID: "http_upstream_%s_zone_%s_peers_count", + Title: "HTTP Upstream peers", + Units: "peers", + Fam: "http upstream", + Ctx: "nginxplus.http_upstream_peers_count", + Priority: prioHTTPUpstreamPeersCount, + Dims: module.Dims{ + {ID: "http_upstream_%s_zone_%s_peers", Name: "peers"}, + }, + } + httpUpstreamZombiesCountChartTmpl = module.Chart{ + ID: "http_upstream_%s_zone_%s_zombies_count", + Title: "HTTP Upstream zombies", + Units: "servers", + Fam: "http upstream", + Ctx: "nginxplus.http_upstream_zombies_count", + Priority: prioHTTPUpstreamZombiesCount, + Dims: module.Dims{ + {ID: "http_upstream_%s_zone_%s_zombies", Name: "zombie"}, + }, + } + httpUpstreamKeepaliveCountChartTmpl = module.Chart{ + ID: "http_upstream_%s_zone_%s_keepalive_count", + Title: "HTTP Upstream keepalive", + Units: "connections", + Fam: "http upstream", + Ctx: "nginxplus.http_upstream_keepalive_count", + Priority: prioHTTPUpstreamKeepaliveCount, + Dims: module.Dims{ + {ID: "http_upstream_%s_zone_%s_keepalive", Name: "keepalive"}, + }, + } + + httpUpstreamServerChartsTmpl = module.Charts{ + httpUpstreamServerRequestsRateChartTmpl.Copy(), + httpUpstreamServerResponsesPerCodeClassRateChartTmpl.Copy(), + httpUpstreamServerResponseTimeChartTmpl.Copy(), + httpUpstreamServerResponseHeaderTimeChartTmpl.Copy(), + httpUpstreamServerTrafficRateChartTmpl.Copy(), + httpUpstreamServerStateChartTmpl.Copy(), + httpUpstreamServerDowntimeChartTmpl.Copy(), + httpUpstreamServerConnectionsCountChartTmpl.Copy(), + } + httpUpstreamServerRequestsRateChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_requests_rate", + Title: "HTTP Upstream Server requests", + Units: "requests/s", + Fam: "http upstream requests", + Ctx: "nginxplus.http_upstream_server_requests_rate", + Priority: prioHTTPUpstreamServerRequestsRate, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_requests", Name: "requests", Algo: module.Incremental}, + }, + } + httpUpstreamServerResponsesPerCodeClassRateChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_responses_per_code_class_rate", + Title: "HTTP Upstream Server responses", + Units: "responses/s", + Fam: "http upstream responses", + Ctx: "nginxplus.http_upstream_server_responses_per_code_class_rate", + Priority: prioHTTPUpstreamServerResponsesPerCodeClassRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: "http_upstream_%s_server_%s_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "http_upstream_%s_server_%s_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "http_upstream_%s_server_%s_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_upstream_%s_server_%s_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental}, + }, + } + httpUpstreamServerResponseTimeChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_response_time", + Title: "HTTP Upstream Server average response time", + Units: "milliseconds", + Fam: "http upstream response time", + Ctx: "nginxplus.http_upstream_server_response_time", + Priority: prioHTTPUpstreamServerResponseTime, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_response_time", Name: "response"}, + }, + } + httpUpstreamServerResponseHeaderTimeChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_response_header_time", + Title: "HTTP Upstream Server average response header time", + Units: "milliseconds", + Fam: "http upstream response time", + Ctx: "nginxplus.http_upstream_server_response_header_time", + Priority: prioHTTPUpstreamServerResponseHeaderTime, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_header_time", Name: "header"}, + }, + } + httpUpstreamServerTrafficRateChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_traffic_rate", + Title: "HTTP Upstream Server traffic rate", + Units: "bytes/s", + Fam: "http upstream traffic", + Ctx: "nginxplus.http_upstream_server_traffic_rate", + Priority: prioHTTPUpstreamServerTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "http_upstream_%s_server_%s_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + httpUpstreamServerStateChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_state", + Title: "HTTP Upstream Server state", + Units: "state", + Fam: "http upstream state", + Ctx: "nginxplus.http_upstream_server_state", + Priority: prioHTTPUpstreamServerState, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_state_up", Name: "up"}, + {ID: "http_upstream_%s_server_%s_zone_%s_state_down", Name: "down"}, + {ID: "http_upstream_%s_server_%s_zone_%s_state_draining", Name: "draining"}, + {ID: "http_upstream_%s_server_%s_zone_%s_state_unavail", Name: "unavail"}, + {ID: "http_upstream_%s_server_%s_zone_%s_state_checking", Name: "checking"}, + {ID: "http_upstream_%s_server_%s_zone_%s_state_unhealthy", Name: "unhealthy"}, + }, + } + httpUpstreamServerConnectionsCountChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_connection_count", + Title: "HTTP Upstream Server connections", + Units: "connections", + Fam: "http upstream connections", + Ctx: "nginxplus.http_upstream_server_connections_count", + Priority: prioHTTPUpstreamServerConnectionsCount, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_active", Name: "active"}, + }, + } + httpUpstreamServerDowntimeChartTmpl = module.Chart{ + ID: "http_upstream_%s_server_%s_zone_%s_downtime", + Title: "HTTP Upstream Server downtime", + Units: "seconds", + Fam: "http upstream state", + Ctx: "nginxplus.http_upstream_server_downtime", + Priority: prioHTTPUpstreamServerDowntime, + Dims: module.Dims{ + {ID: "http_upstream_%s_server_%s_zone_%s_downtime", Name: "downtime"}, + }, + } +) + +var ( + httpCacheChartsTmpl = module.Charts{ + httpCacheStateChartTmpl.Copy(), + httpCacheIOPSChartTmpl.Copy(), + httpCacheIOChartTmpl.Copy(), + httpCacheSizeChartTmpl.Copy(), + } + httpCacheStateChartTmpl = module.Chart{ + ID: "http_cache_%s_state", + Title: "HTTP Cache state", + Units: "state", + Fam: "http cache", + Ctx: "nginxplus.http_cache_state", + Priority: prioHTTPCacheState, + Dims: module.Dims{ + {ID: "http_cache_%s_state_warm", Name: "warm"}, + {ID: "http_cache_%s_state_cold", Name: "cold"}, + }, + } + httpCacheSizeChartTmpl = module.Chart{ + ID: "http_cache_%s_size", + Title: "HTTP Cache size", + Units: "bytes", + Fam: "http cache", + Ctx: "nginxplus.http_cache_size", + Priority: prioHTTPCacheSize, + Dims: module.Dims{ + {ID: "http_cache_%s_size", Name: "size"}, + }, + } + httpCacheIOPSChartTmpl = module.Chart{ + ID: "http_cache_%s_iops", + Title: "HTTP Cache IOPS", + Units: "responses/s", + Fam: "http cache", + Ctx: "nginxplus.http_cache_iops", + Priority: prioHTTPCacheIOPS, + Dims: module.Dims{ + {ID: "http_cache_%s_served_responses", Name: "served", Algo: module.Incremental}, + {ID: "http_cache_%s_written_responses", Name: "written", Algo: module.Incremental}, + {ID: "http_cache_%s_bypassed_responses", Name: "bypassed", Algo: module.Incremental}, + }, + } + httpCacheIOChartTmpl = module.Chart{ + ID: "http_cache_%s_io", + Title: "HTTP Cache IO", + Units: "bytes/s", + Fam: "http cache", + Ctx: "nginxplus.http_cache_io", + Priority: prioHTTPCacheIO, + Dims: module.Dims{ + {ID: "http_cache_%s_served_bytes", Name: "served", Algo: module.Incremental}, + {ID: "http_cache_%s_written_bytes", Name: "written", Algo: module.Incremental}, + {ID: "http_cache_%s_bypassed_bytes", Name: "bypassed", Algo: module.Incremental}, + }, + } +) + +var ( + streamServerZoneChartsTmpl = module.Charts{ + streamServerZoneConnectionsRateChartTmpl.Copy(), + streamServerZoneTrafficRateChartTmpl.Copy(), + streamServerZoneSessionsPerCodeClassRateChartTmpl.Copy(), + streamServerZoneConnectionsProcessingCountRateChartTmpl.Copy(), + streamServerZoneConnectionsDiscardedRateChartTmpl.Copy(), + } + streamServerZoneConnectionsRateChartTmpl = module.Chart{ + ID: "stream_server_zone_%s_connections_rate", + Title: "Stream Server Zone connections rate", + Units: "connections/s", + Fam: "stream connections", + Ctx: "nginxplus.stream_server_zone_connections_rate", + Priority: prioStreamServerZoneConnectionsRate, + Dims: module.Dims{ + {ID: "stream_server_zone_%s_connections", Name: "accepted", Algo: module.Incremental}, + }, + } + streamServerZoneSessionsPerCodeClassRateChartTmpl = module.Chart{ + ID: "stream_server_zone_%s_sessions_per_code_class_rate", + Title: "Stream Server Zone sessions rate", + Units: "sessions/s", + Fam: "stream sessions", + Ctx: "nginxplus.stream_server_zone_sessions_per_code_class_rate", + Priority: prioStreamServerZoneSessionsPerCodeClassRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "stream_server_zone_%s_sessions_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "stream_server_zone_%s_sessions_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "stream_server_zone_%s_sessions_5xx", Name: "5xx", Algo: module.Incremental}, + }, + } + streamServerZoneTrafficRateChartTmpl = module.Chart{ + ID: "stream_server_zone_%s_traffic_rate", + Title: "Stream Server Zone traffic rate", + Units: "bytes/s", + Fam: "stream traffic", + Ctx: "nginxplus.stream_server_zone_traffic_rate", + Priority: prioStreamServerZoneTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "stream_server_zone_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "stream_server_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + streamServerZoneConnectionsProcessingCountRateChartTmpl = module.Chart{ + ID: "stream_server_zone_%s_connections_processing_count", + Title: "Stream Server Zone connections processed", + Units: "connections", + Fam: "stream connections", + Ctx: "nginxplus.stream_server_zone_connections_processing_count", + Priority: prioStreamServerZoneConnectionsProcessingCount, + Dims: module.Dims{ + {ID: "stream_server_zone_%s_connections_processing", Name: "processing"}, + }, + } + streamServerZoneConnectionsDiscardedRateChartTmpl = module.Chart{ + ID: "stream_server_zone_%s_connections_discarded_rate", + Title: "Stream Server Zone connections discarded", + Units: "connections/s", + Fam: "stream connections", + Ctx: "nginxplus.stream_server_zone_connections_discarded_rate", + Priority: prioStreamServerZoneConnectionsDiscardedRate, + Dims: module.Dims{ + {ID: "stream_server_zone_%s_connections_discarded", Name: "discarded", Algo: module.Incremental}, + }, + } +) + +var ( + streamUpstreamChartsTmpl = module.Charts{ + streamUpstreamPeersCountChartTmpl.Copy(), + streamUpstreamZombiesCountChartTmpl.Copy(), + } + streamUpstreamPeersCountChartTmpl = module.Chart{ + ID: "stream_upstream_%s_zone_%s_peers_count", + Title: "Stream Upstream peers", + Units: "peers", + Fam: "stream upstream", + Ctx: "nginxplus.stream_upstream_peers_count", + Priority: prioStreamUpstreamPeersCount, + Dims: module.Dims{ + {ID: "stream_upstream_%s_zone_%s_peers", Name: "peers"}, + }, + } + streamUpstreamZombiesCountChartTmpl = module.Chart{ + ID: "stream_upstream_%s_zone_%s_zombies_count", + Title: "Stream Upstream zombies", + Units: "servers", + Fam: "stream upstream", + Ctx: "nginxplus.stream_upstream_zombies_count", + Priority: prioStreamUpstreamZombiesCount, + Dims: module.Dims{ + {ID: "stream_upstream_%s_zone_%s_zombies", Name: "zombie"}, + }, + } + + streamUpstreamServerChartsTmpl = module.Charts{ + streamUpstreamServerConnectionsRateChartTmpl.Copy(), + streamUpstreamServerTrafficRateChartTmpl.Copy(), + streamUpstreamServerConnectionsCountChartTmpl.Copy(), + streamUpstreamServerStateChartTmpl.Copy(), + streamUpstreamServerDowntimeChartTmpl.Copy(), + } + streamUpstreamServerConnectionsRateChartTmpl = module.Chart{ + ID: "stream_upstream_%s_server_%s_zone_%s_connection_rate", + Title: "Stream Upstream Server connections", + Units: "connections/s", + Fam: "stream upstream connections", + Ctx: "nginxplus.stream_upstream_server_connections_rate", + Priority: prioStreamUpstreamServerConnectionsRate, + Dims: module.Dims{ + {ID: "stream_upstream_%s_server_%s_zone_%s_connections", Name: "forwarded", Algo: module.Incremental}, + }, + } + streamUpstreamServerTrafficRateChartTmpl = module.Chart{ + ID: "stream_upstream_%s_server_%s_zone_%s_traffic_rate", + Title: "Stream Upstream Server traffic rate", + Units: "bytes/s", + Fam: "stream upstream traffic", + Ctx: "nginxplus.stream_upstream_server_traffic_rate", + Priority: prioStreamUpstreamServerTrafficRate, + Type: module.Area, + Dims: module.Dims{ + {ID: "stream_upstream_%s_server_%s_zone_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "stream_upstream_%s_server_%s_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + streamUpstreamServerStateChartTmpl = module.Chart{ + ID: "stream_upstream_%s_server_%s_zone_%s_state", + Title: "Stream Upstream Server state", + Units: "state", + Fam: "stream upstream state", + Ctx: "nginxplus.stream_upstream_server_state", + Priority: prioStreamUpstreamServerState, + Dims: module.Dims{ + {ID: "stream_upstream_%s_server_%s_zone_%s_state_up", Name: "up"}, + {ID: "stream_upstream_%s_server_%s_zone_%s_state_down", Name: "down"}, + {ID: "stream_upstream_%s_server_%s_zone_%s_state_unavail", Name: "unavail"}, + {ID: "stream_upstream_%s_server_%s_zone_%s_state_checking", Name: "checking"}, + {ID: "stream_upstream_%s_server_%s_zone_%s_state_unhealthy", Name: "unhealthy"}, + }, + } + streamUpstreamServerDowntimeChartTmpl = module.Chart{ + ID: "stream_upstream_%s_server_%s_zone_%s_downtime", + Title: "Stream Upstream Server downtime", + Units: "seconds", + Fam: "stream upstream state", + Ctx: "nginxplus.stream_upstream_server_downtime", + Priority: prioStreamUpstreamServerDowntime, + Dims: module.Dims{ + {ID: "stream_upstream_%s_server_%s_zone_%s_downtime", Name: "downtime"}, + }, + } + streamUpstreamServerConnectionsCountChartTmpl = module.Chart{ + ID: "stream_upstream_%s_server_%s_zone_%s_connection_count", + Title: "Stream Upstream Server connections", + Units: "connections", + Fam: "stream upstream connections", + Ctx: "nginxplus.stream_upstream_server_connections_count", + Priority: prioStreamUpstreamServerConnectionsCount, + Dims: module.Dims{ + {ID: "stream_upstream_%s_server_%s_zone_%s_active", Name: "active"}, + }, + } +) + +var ( + resolverZoneChartsTmpl = module.Charts{ + resolverZoneRequestsRateChartTmpl.Copy(), + resolverZoneResponsesRateChartTmpl.Copy(), + } + resolverZoneRequestsRateChartTmpl = module.Chart{ + ID: "resolver_zone_%s_requests_rate", + Title: "Resolver requests rate", + Units: "requests/s", + Fam: "resolver requests", + Ctx: "nginxplus.resolver_zone_requests_rate", + Priority: prioResolverZoneRequestsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "resolver_zone_%s_requests_name", Name: "name", Algo: module.Incremental}, + {ID: "resolver_zone_%s_requests_srv", Name: "srv", Algo: module.Incremental}, + {ID: "resolver_zone_%s_requests_addr", Name: "addr", Algo: module.Incremental}, + }, + } + resolverZoneResponsesRateChartTmpl = module.Chart{ + ID: "resolver_zone_%s_responses_rate", + Title: "Resolver responses rate", + Units: "responses/s", + Fam: "resolver responses", + Ctx: "nginxplus.resolver_zone_responses_rate", + Priority: prioResolverZoneResponsesRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "resolver_zone_%s_responses_noerror", Name: "noerror", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_formerr", Name: "formerr", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_servfail", Name: "servfail", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_nxdomain", Name: "nxdomain", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_notimp", Name: "notimp", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_refused", Name: "refused", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_timedout", Name: "timedout", Algo: module.Incremental}, + {ID: "resolver_zone_%s_responses_unknown", Name: "unknown", Algo: module.Incremental}, + }, + } +) + +func (n *NginxPlus) addHTTPCacheCharts(name string) { + charts := httpCacheChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "http_cache", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeHTTPCacheCharts(name string) { + px := fmt.Sprintf("http_cache_%s_", name) + n.removeCharts(px) +} + +func (n *NginxPlus) addHTTPServerZoneCharts(zone string) { + charts := httpServerZoneChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone) + chart.Labels = []module.Label{ + {Key: "http_server_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeHTTPServerZoneCharts(zone string) { + px := fmt.Sprintf("http_server_zone_%s_", zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addHTTPLocationZoneCharts(zone string) { + charts := httpLocationZoneChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone) + chart.Labels = []module.Label{ + {Key: "http_location_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeHTTPLocationZoneCharts(zone string) { + px := fmt.Sprintf("http_location_zone_%s_", zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addHTTPUpstreamCharts(name, zone string) { + charts := httpUpstreamChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name, zone) + chart.Labels = []module.Label{ + {Key: "http_upstream_name", Value: name}, + {Key: "http_upstream_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeHTTPUpstreamCharts(name, zone string) { + px := fmt.Sprintf("http_upstream_%s_zone_%s", name, zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addHTTPUpstreamServerCharts(name, serverAddr, serverName, zone string) { + charts := httpUpstreamServerChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name, serverAddr, zone) + chart.Labels = []module.Label{ + {Key: "http_upstream_name", Value: name}, + {Key: "http_upstream_zone", Value: zone}, + {Key: "http_upstream_server_address", Value: serverAddr}, + {Key: "http_upstream_server_name", Value: serverName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name, serverAddr, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeHTTPUpstreamServerCharts(name, serverAddr, zone string) { + px := fmt.Sprintf("http_upstream_%s_server_%s_zone_%s_", name, zone, serverAddr) + n.removeCharts(px) +} + +func (n *NginxPlus) addStreamServerZoneCharts(zone string) { + charts := streamServerZoneChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone) + chart.Labels = []module.Label{ + {Key: "stream_server_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeStreamServerZoneCharts(zone string) { + px := fmt.Sprintf("stream_server_zone_%s_", zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addStreamUpstreamCharts(zone, name string) { + charts := streamUpstreamChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone, name) + chart.Labels = []module.Label{ + {Key: "stream_upstream_zone", Value: name}, + {Key: "stream_upstream_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone, name) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeStreamUpstreamCharts(name, zone string) { + px := fmt.Sprintf("stream_upstream_%s_zone_%s_", name, zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addStreamUpstreamServerCharts(name, serverAddr, serverName, zone string) { + charts := streamUpstreamServerChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name, serverAddr, zone) + chart.Labels = []module.Label{ + {Key: "stream_upstream_name", Value: name}, + {Key: "stream_upstream_zone", Value: zone}, + {Key: "stream_upstream_server_address", Value: serverAddr}, + {Key: "stream_upstream_server_name", Value: serverName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name, serverAddr, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeStreamUpstreamServerCharts(name, serverAddr, zone string) { + px := fmt.Sprintf("stream_upstream_%s_server_%s_zone_%s", name, serverAddr, zone) + n.removeCharts(px) +} + +func (n *NginxPlus) addResolverZoneCharts(zone string) { + charts := resolverZoneChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone) + chart.Labels = []module.Label{ + {Key: "resolver_zone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NginxPlus) removeResolverZoneCharts(zone string) { + px := fmt.Sprintf("resolver_zone_%s_", zone) + n.removeCharts(px) +} + +func (n *NginxPlus) removeCharts(prefix string) { + for _, chart := range *n.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/collect.go b/src/go/collectors/go.d.plugin/modules/nginxplus/collect.go new file mode 100644 index 00000000000000..f986778ba5f217 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/collect.go @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import ( + "errors" + "fmt" + "time" +) + +func (n *NginxPlus) collect() (map[string]int64, error) { + if n.apiVersion == 0 { + v, err := n.queryAPIVersion() + if err != nil { + return nil, err + } + n.apiVersion = v + } + + now := time.Now() + if now.Sub(n.queryEndpointsTime) > n.queryEndpointsEvery { + n.queryEndpointsTime = now + if err := n.queryAvailableEndpoints(); err != nil { + return nil, err + } + } + + ms := n.queryMetrics() + if ms.empty() { + return nil, errors.New("no metrics collected") + } + + mx := make(map[string]int64) + n.cache.resetUpdated() + n.collectInfo(mx, ms) + n.collectConnections(mx, ms) + n.collectSSL(mx, ms) + n.collectHTTPRequests(mx, ms) + n.collectHTTPCache(mx, ms) + n.collectHTTPServerZones(mx, ms) + n.collectHTTPLocationZones(mx, ms) + n.collectHTTPUpstreams(mx, ms) + n.collectStreamServerZones(mx, ms) + n.collectStreamUpstreams(mx, ms) + n.collectResolvers(mx, ms) + n.updateCharts() + + return mx, nil +} + +func (n *NginxPlus) collectInfo(mx map[string]int64, ms *nginxMetrics) { + if ms.info == nil { + return + } + mx["uptime"] = int64(ms.info.Timestamp.Sub(ms.info.LoadTimestamp).Seconds()) +} + +func (n *NginxPlus) collectConnections(mx map[string]int64, ms *nginxMetrics) { + if ms.connections == nil { + return + } + mx["connections_accepted"] = ms.connections.Accepted + mx["connections_dropped"] = ms.connections.Dropped + mx["connections_active"] = ms.connections.Active + mx["connections_idle"] = ms.connections.Idle +} + +func (n *NginxPlus) collectSSL(mx map[string]int64, ms *nginxMetrics) { + if ms.ssl == nil { + return + } + mx["ssl_handshakes"] = ms.ssl.Handshakes + mx["ssl_handshakes_failed"] = ms.ssl.HandshakesFailed + mx["ssl_session_reuses"] = ms.ssl.SessionReuses + mx["ssl_no_common_protocol"] = ms.ssl.NoCommonProtocol + mx["ssl_no_common_cipher"] = ms.ssl.NoCommonCipher + mx["ssl_handshake_timeout"] = ms.ssl.HandshakeTimeout + mx["ssl_peer_rejected_cert"] = ms.ssl.PeerRejectedCert + mx["ssl_verify_failures_no_cert"] = ms.ssl.VerifyFailures.NoCert + mx["ssl_verify_failures_expired_cert"] = ms.ssl.VerifyFailures.ExpiredCert + mx["ssl_verify_failures_revoked_cert"] = ms.ssl.VerifyFailures.RevokedCert + mx["ssl_verify_failures_hostname_mismatch"] = ms.ssl.VerifyFailures.HostnameMismatch + mx["ssl_verify_failures_other"] = ms.ssl.VerifyFailures.Other +} + +func (n *NginxPlus) collectHTTPRequests(mx map[string]int64, ms *nginxMetrics) { + if ms.httpRequests == nil { + return + } + mx["http_requests_total"] = ms.httpRequests.Total + mx["http_requests_current"] = ms.httpRequests.Current +} + +func (n *NginxPlus) collectHTTPCache(mx map[string]int64, ms *nginxMetrics) { + if ms.httpCaches == nil { + return + } + for name, cache := range *ms.httpCaches { + n.cache.putHTTPCache(name) + px := fmt.Sprintf("http_cache_%s_", name) + mx[px+"state_cold"] = boolToInt(cache.Cold) + mx[px+"state_warm"] = boolToInt(!cache.Cold) + mx[px+"size"] = cache.Size + mx[px+"served_responses"] = cache.Hit.Responses + cache.Stale.Responses + cache.Updating.Responses + cache.Revalidated.Responses + mx[px+"written_responses"] = cache.Miss.ResponsesWritten + cache.Expired.ResponsesWritten + cache.Bypass.ResponsesWritten + mx[px+"bypassed_responses"] = cache.Miss.Responses + cache.Expired.Responses + cache.Bypass.Responses + mx[px+"served_bytes"] = cache.Hit.Bytes + cache.Stale.Bytes + cache.Updating.Bytes + cache.Revalidated.Bytes + mx[px+"written_bytes"] = cache.Miss.BytesWritten + cache.Expired.BytesWritten + cache.Bypass.BytesWritten + mx[px+"bypassed_bytes"] = cache.Miss.Bytes + cache.Expired.Bytes + cache.Bypass.Bytes + } +} + +func (n *NginxPlus) collectHTTPServerZones(mx map[string]int64, ms *nginxMetrics) { + if ms.httpServerZones == nil { + return + } + for name, zone := range *ms.httpServerZones { + n.cache.putHTTPServerZone(name) + + px := fmt.Sprintf("http_server_zone_%s_", name) + mx[px+"requests_processing"] = zone.Processing + mx[px+"requests"] = zone.Requests + mx[px+"requests_discarded"] = zone.Discarded + mx[px+"bytes_received"] = zone.Received + mx[px+"bytes_sent"] = zone.Sent + mx[px+"responses"] = zone.Responses.Total + mx[px+"responses_1xx"] = zone.Responses.Class1xx + mx[px+"responses_2xx"] = zone.Responses.Class2xx + mx[px+"responses_3xx"] = zone.Responses.Class3xx + mx[px+"responses_4xx"] = zone.Responses.Class4xx + mx[px+"responses_5xx"] = zone.Responses.Class5xx + } +} + +func (n *NginxPlus) collectHTTPLocationZones(mx map[string]int64, ms *nginxMetrics) { + if ms.httpLocationZones == nil { + return + } + for name, zone := range *ms.httpLocationZones { + n.cache.putHTTPLocationZone(name) + + px := fmt.Sprintf("http_location_zone_%s_", name) + mx[px+"requests"] = zone.Requests + mx[px+"requests_discarded"] = zone.Discarded + mx[px+"bytes_received"] = zone.Received + mx[px+"bytes_sent"] = zone.Sent + mx[px+"responses"] = zone.Responses.Total + mx[px+"responses_1xx"] = zone.Responses.Class1xx + mx[px+"responses_2xx"] = zone.Responses.Class2xx + mx[px+"responses_3xx"] = zone.Responses.Class3xx + mx[px+"responses_4xx"] = zone.Responses.Class4xx + mx[px+"responses_5xx"] = zone.Responses.Class5xx + } +} + +func (n *NginxPlus) collectHTTPUpstreams(mx map[string]int64, ms *nginxMetrics) { + if ms.httpUpstreams == nil { + return + } + for name, upstream := range *ms.httpUpstreams { + n.cache.putHTTPUpstream(name, upstream.Zone) + + px := fmt.Sprintf("http_upstream_%s_zone_%s_", name, upstream.Zone) + mx[px+"zombies"] = upstream.Zombies + mx[px+"keepalive"] = upstream.Keepalive + mx[px+"peers"] = int64(len(upstream.Peers)) + + for _, peer := range upstream.Peers { + n.cache.putHTTPUpstreamServer(name, peer.Server, peer.Name, upstream.Zone) + + px = fmt.Sprintf("http_upstream_%s_server_%s_zone_%s_", name, peer.Server, upstream.Zone) + mx[px+"active"] = peer.Active + mx[px+"state_up"] = boolToInt(peer.State == "up") + mx[px+"state_down"] = boolToInt(peer.State == "down") + mx[px+"state_draining"] = boolToInt(peer.State == "draining") + mx[px+"state_unavail"] = boolToInt(peer.State == "unavail") + mx[px+"state_checking"] = boolToInt(peer.State == "checking") + mx[px+"state_unhealthy"] = boolToInt(peer.State == "unhealthy") + mx[px+"bytes_received"] = peer.Received + mx[px+"bytes_sent"] = peer.Sent + mx[px+"requests"] = peer.Requests + mx[px+"responses"] = peer.Responses.Total + mx[px+"responses_1xx"] = peer.Responses.Class1xx + mx[px+"responses_2xx"] = peer.Responses.Class2xx + mx[px+"responses_3xx"] = peer.Responses.Class3xx + mx[px+"responses_4xx"] = peer.Responses.Class4xx + mx[px+"responses_5xx"] = peer.Responses.Class5xx + mx[px+"response_time"] = peer.ResponseTime + mx[px+"header_time"] = peer.HeaderTime + mx[px+"downtime"] = peer.Downtime / 1000 + } + } +} + +func (n *NginxPlus) collectStreamServerZones(mx map[string]int64, ms *nginxMetrics) { + if ms.streamServerZones == nil { + return + } + for name, zone := range *ms.streamServerZones { + n.cache.putStreamServerZone(name) + + px := fmt.Sprintf("stream_server_zone_%s_", name) + mx[px+"connections"] = zone.Connections + mx[px+"connections_processing"] = zone.Processing + mx[px+"connections_discarded"] = zone.Discarded + mx[px+"bytes_received"] = zone.Received + mx[px+"bytes_sent"] = zone.Sent + mx[px+"sessions"] = zone.Sessions.Total + mx[px+"sessions_2xx"] = zone.Sessions.Class2xx + mx[px+"sessions_4xx"] = zone.Sessions.Class4xx + mx[px+"sessions_5xx"] = zone.Sessions.Class5xx + } +} + +func (n *NginxPlus) collectStreamUpstreams(mx map[string]int64, ms *nginxMetrics) { + if ms.streamUpstreams == nil { + return + } + for name, upstream := range *ms.streamUpstreams { + n.cache.putStreamUpstream(name, upstream.Zone) + + px := fmt.Sprintf("stream_upstream_%s_zone_%s_", name, upstream.Zone) + mx[px+"zombies"] = upstream.Zombies + mx[px+"peers"] = int64(len(upstream.Peers)) + + for _, peer := range upstream.Peers { + n.cache.putStreamUpstreamServer(name, peer.Server, peer.Name, upstream.Zone) + + px = fmt.Sprintf("stream_upstream_%s_server_%s_zone_%s_", name, peer.Server, upstream.Zone) + mx[px+"active"] = peer.Active + mx[px+"connections"] = peer.Connections + mx[px+"state_up"] = boolToInt(peer.State == "up") + mx[px+"state_down"] = boolToInt(peer.State == "down") + mx[px+"state_unavail"] = boolToInt(peer.State == "unavail") + mx[px+"state_checking"] = boolToInt(peer.State == "checking") + mx[px+"state_unhealthy"] = boolToInt(peer.State == "unhealthy") + mx[px+"bytes_received"] = peer.Received + mx[px+"bytes_sent"] = peer.Sent + mx[px+"downtime"] = peer.Downtime / 1000 + } + } +} + +func (n *NginxPlus) collectResolvers(mx map[string]int64, ms *nginxMetrics) { + if ms.resolvers == nil { + return + } + for name, zone := range *ms.resolvers { + n.cache.putResolver(name) + + px := fmt.Sprintf("resolver_zone_%s_", name) + mx[px+"requests_name"] = zone.Requests.Name + mx[px+"requests_srv"] = zone.Requests.Srv + mx[px+"requests_addr"] = zone.Requests.Addr + mx[px+"responses_noerror"] = zone.Responses.NoError + mx[px+"responses_formerr"] = zone.Responses.Formerr + mx[px+"responses_servfail"] = zone.Responses.Servfail + mx[px+"responses_nxdomain"] = zone.Responses.Nxdomain + mx[px+"responses_notimp"] = zone.Responses.Notimp + mx[px+"responses_refused"] = zone.Responses.Refused + mx[px+"responses_timedout"] = zone.Responses.TimedOut + mx[px+"responses_unknown"] = zone.Responses.Unknown + } +} + +func (n *NginxPlus) updateCharts() { + const notSeenLimit = 3 + + for key, v := range n.cache.httpCaches { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addHTTPCacheCharts(v.name) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.httpCaches, key) + n.removeHTTPCacheCharts(v.name) + } + } + } + for key, v := range n.cache.httpServerZones { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addHTTPServerZoneCharts(v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.httpServerZones, key) + n.removeHTTPServerZoneCharts(v.zone) + } + } + } + for key, v := range n.cache.httpLocationZones { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addHTTPLocationZoneCharts(v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.httpLocationZones, key) + n.removeHTTPLocationZoneCharts(v.zone) + } + } + } + for key, v := range n.cache.httpUpstreams { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addHTTPUpstreamCharts(v.name, v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.httpUpstreams, key) + n.removeHTTPUpstreamCharts(v.name, v.zone) + } + } + } + for key, v := range n.cache.httpUpstreamServers { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addHTTPUpstreamServerCharts(v.name, v.serverAddr, v.serverName, v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.httpUpstreamServers, key) + n.removeHTTPUpstreamServerCharts(v.name, v.serverAddr, v.zone) + } + } + } + for key, v := range n.cache.streamServerZones { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addStreamServerZoneCharts(v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.streamServerZones, key) + n.removeStreamServerZoneCharts(v.zone) + } + } + } + for key, v := range n.cache.streamUpstreams { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addStreamUpstreamCharts(v.name, v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.streamUpstreams, key) + n.removeStreamUpstreamCharts(v.name, v.zone) + } + } + } + for key, v := range n.cache.streamUpstreamServers { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addStreamUpstreamServerCharts(v.name, v.serverAddr, v.serverName, v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.streamUpstreamServers, key) + n.removeStreamUpstreamServerCharts(v.name, v.serverAddr, v.zone) + } + } + } + for key, v := range n.cache.resolvers { + if v.updated && !v.hasCharts { + v.hasCharts = true + n.addResolverZoneCharts(v.zone) + continue + } + if !v.updated { + if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit { + delete(n.cache.resolvers, key) + n.removeResolverZoneCharts(v.zone) + } + } + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/config_schema.json b/src/go/collectors/go.d.plugin/modules/nginxplus/config_schema.json new file mode 100644 index 00000000000000..c1457d2d7608d0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginxplus job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/integrations/nginx_plus.md b/src/go/collectors/go.d.plugin/modules/nginxplus/integrations/nginx_plus.md new file mode 100644 index 00000000000000..44dad3051c88a7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/integrations/nginx_plus.md @@ -0,0 +1,413 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginxplus/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginxplus/metadata.yaml" +sidebar_label: "NGINX Plus" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NGINX Plus + + +<img src="https://netdata.cloud/img/nginxplus.svg" width="150"/> + + +Plugin: go.d.plugin +Module: nginxplus + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors NGINX Plus servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NGINX Plus instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.client_connections_rate | accepted, dropped | connections/s | +| nginxplus.client_connections_count | active, idle | connections | +| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s | +| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s | +| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s | +| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s | +| nginxplus.http_requests_rate | requests | requests/s | +| nginxplus.http_requests_count | requests | requests | +| nginxplus.uptime | uptime | seconds | + +### Per http server zone + +These metrics refer to the HTTP server zone. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| http_server_zone | HTTP server zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.http_server_zone_requests_rate | requests | requests/s | +| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s | +| nginxplus.http_server_zone_requests_processing_count | processing | requests | +| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s | + +### Per http location zone + +These metrics refer to the HTTP location zone. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| http_location_zone | HTTP location zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.http_location_zone_requests_rate | requests | requests/s | +| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s | +| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s | + +### Per http upstream + +These metrics refer to the HTTP upstream. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| http_upstream_name | HTTP upstream name | +| http_upstream_zone | HTTP upstream zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.http_upstream_peers_count | peers | peers | +| nginxplus.http_upstream_zombies_count | zombie | servers | +| nginxplus.http_upstream_keepalive_count | keepalive | connections | + +### Per http upstream server + +These metrics refer to the HTTP upstream server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| http_upstream_name | HTTP upstream name | +| http_upstream_zone | HTTP upstream zone name | +| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) | +| http_upstream_server_name | HTTP upstream server name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.http_upstream_server_requests_rate | requests | requests/s | +| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| nginxplus.http_upstream_server_response_time | response | milliseconds | +| nginxplus.http_upstream_server_response_header_time | header | milliseconds | +| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s | +| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state | +| nginxplus.http_upstream_server_connections_count | active | connections | +| nginxplus.http_upstream_server_downtime | downtime | seconds | + +### Per http cache + +These metrics refer to the HTTP cache. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| http_cache | HTTP cache name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.http_cache_state | warm, cold | state | +| nginxplus.http_cache_iops | served, written, bypass | responses/s | +| nginxplus.http_cache_io | served, written, bypass | bytes/s | +| nginxplus.http_cache_size | size | bytes | + +### Per stream server zone + +These metrics refer to the Stream server zone. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| stream_server_zone | Stream server zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.stream_server_zone_connections_rate | accepted | connections/s | +| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s | +| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s | +| nginxplus.stream_server_zone_connections_processing_count | processing | connections | +| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s | + +### Per stream upstream + +These metrics refer to the Stream upstream. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| stream_upstream_name | Stream upstream name | +| stream_upstream_zone | Stream upstream zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.stream_upstream_peers_count | peers | peers | +| nginxplus.stream_upstream_zombies_count | zombie | servers | + +### Per stream upstream server + +These metrics refer to the Stream upstream server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| stream_upstream_name | Stream upstream name | +| stream_upstream_zone | Stream upstream zone name | +| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) | +| stream_upstream_server_name | Stream upstream server name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s | +| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s | +| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state | +| nginxplus.stream_upstream_server_downtime | downtime | seconds | +| nginxplus.stream_upstream_server_connections_count | active | connections | + +### Per resolver zone + +These metrics refer to the resolver zone. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| resolver_zone | resolver zone name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s | +| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Config API + +To configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nginxplus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nginxplus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1 + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1 + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +NGINX Plus with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1 + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1 + + - name: remote + url: http://192.0.2.1 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nginxplus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/metadata.yaml b/src/go/collectors/go.d.plugin/modules/nginxplus/metadata.yaml new file mode 100644 index 00000000000000..6bc3a29bdd02c5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/metadata.yaml @@ -0,0 +1,584 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nginxplus + plugin_name: go.d.plugin + module_name: nginxplus + monitored_instance: + name: NGINX Plus + link: https://www.nginx.com/products/nginx/ + icon_filename: nginxplus.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - nginxplus + - nginx + - web + - webserver + - http + - proxy + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors NGINX Plus servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Config API + description: | + To configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api). + configuration: + file: + name: go.d/nginxplus.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1 + username: username + password: password + - name: HTTPS with self-signed certificate + description: NGINX Plus with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1 + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1 + + - name: remote + url: http://192.0.2.1 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: nginxplus.client_connections_rate + description: Client connections rate + unit: connections/s + chart_type: line + dimensions: + - name: accepted + - name: dropped + - name: nginxplus.client_connections_count + description: Client connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: idle + - name: nginxplus.ssl_handshakes_rate + description: SSL handshakes rate + unit: handshakes/s + chart_type: line + dimensions: + - name: successful + - name: failed + - name: nginxplus.ssl_handshakes_failures_rate + description: SSL handshakes failures rate + unit: failures/s + chart_type: stacked + dimensions: + - name: no_common_protocol + - name: no_common_cipher + - name: timeout + - name: peer_rejected_cert + - name: nginxplus.ssl_verification_errors_rate + description: SSL verification errors rate + unit: errors/s + chart_type: stacked + dimensions: + - name: no_cert + - name: expired_cert + - name: revoked_cert + - name: hostname_mismatch + - name: other + - name: nginxplus.ssl_session_reuses_rate + description: Session reuses during SSL handshak + unit: reuses/s + chart_type: line + dimensions: + - name: ssl_session + - name: nginxplus.http_requests_rate + description: HTTP requests rate + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxplus.http_requests_count + description: HTTP requests + unit: requests + chart_type: line + dimensions: + - name: requests + - name: nginxplus.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: http server zone + description: These metrics refer to the HTTP server zone. + labels: + - name: http_server_zone + description: HTTP server zone name + metrics: + - name: nginxplus.http_server_zone_requests_rate + description: HTTP Server Zone requests rate + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxplus.http_server_zone_responses_per_code_class_rate + description: HTTP Server Zone responses rate + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: nginxplus.http_server_zone_traffic_rate + description: HTTP Server Zone traffic + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: nginxplus.http_server_zone_requests_processing_count + description: HTTP Server Zone currently processed requests + unit: requests + chart_type: line + dimensions: + - name: processing + - name: nginxplus.http_server_zone_requests_discarded_rate + description: HTTP Server Zone requests discarded rate + unit: requests/s + chart_type: line + dimensions: + - name: discarded + - name: http location zone + description: These metrics refer to the HTTP location zone. + labels: + - name: http_location_zone + description: HTTP location zone name + metrics: + - name: nginxplus.http_location_zone_requests_rate + description: HTTP Location Zone requests rate + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxplus.http_location_zone_responses_per_code_class_rate + description: HTTP Location Zone responses rate + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: nginxplus.http_location_zone_traffic_rate + description: HTTP Location Zone traffic rate + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: nginxplus.http_location_zone_requests_discarded_rate + description: HTTP Location Zone requests discarded rate + unit: requests/s + chart_type: line + dimensions: + - name: discarded + - name: http upstream + description: These metrics refer to the HTTP upstream. + labels: + - name: http_upstream_name + description: HTTP upstream name + - name: http_upstream_zone + description: HTTP upstream zone name + metrics: + - name: nginxplus.http_upstream_peers_count + description: HTTP Upstream peers + unit: peers + chart_type: line + dimensions: + - name: peers + - name: nginxplus.http_upstream_zombies_count + description: HTTP Upstream zombies + unit: servers + chart_type: line + dimensions: + - name: zombie + - name: nginxplus.http_upstream_keepalive_count + description: HTTP Upstream keepalive + unit: connections + chart_type: line + dimensions: + - name: keepalive + - name: http upstream server + description: These metrics refer to the HTTP upstream server. + labels: + - name: http_upstream_name + description: HTTP upstream name + - name: http_upstream_zone + description: HTTP upstream zone name + - name: http_upstream_server_address + description: HTTP upstream server address (e.g. 127.0.0.1:81) + - name: http_upstream_server_name + description: HTTP upstream server name + metrics: + - name: nginxplus.http_upstream_server_requests_rate + description: HTTP Upstream Server requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxplus.http_upstream_server_responses_per_code_class_rate + description: HTTP Upstream Server responses + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: nginxplus.http_upstream_server_response_time + description: HTTP Upstream Server average response time + unit: milliseconds + chart_type: line + dimensions: + - name: response + - name: nginxplus.http_upstream_server_response_header_time + description: HTTP Upstream Server average response header time + unit: milliseconds + chart_type: line + dimensions: + - name: header + - name: nginxplus.http_upstream_server_traffic_rate + description: HTTP Upstream Server traffic rate + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: nginxplus.http_upstream_server_state + description: HTTP Upstream Server state + unit: state + chart_type: line + dimensions: + - name: up + - name: down + - name: draining + - name: unavail + - name: checking + - name: unhealthy + - name: nginxplus.http_upstream_server_connections_count + description: HTTP Upstream Server connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: nginxplus.http_upstream_server_downtime + description: HTTP Upstream Server downtime + unit: seconds + chart_type: line + dimensions: + - name: downtime + - name: http cache + description: These metrics refer to the HTTP cache. + labels: + - name: http_cache + description: HTTP cache name + metrics: + - name: nginxplus.http_cache_state + description: HTTP Cache state + unit: state + chart_type: line + dimensions: + - name: warm + - name: cold + - name: nginxplus.http_cache_iops + description: HTTP Cache size + unit: responses/s + chart_type: line + dimensions: + - name: served + - name: written + - name: bypass + - name: nginxplus.http_cache_io + description: HTTP Cache IOPS + unit: bytes/s + chart_type: line + dimensions: + - name: served + - name: written + - name: bypass + - name: nginxplus.http_cache_size + description: HTTP Cache IO + unit: bytes + chart_type: line + dimensions: + - name: size + - name: stream server zone + description: These metrics refer to the Stream server zone. + labels: + - name: stream_server_zone + description: Stream server zone name + metrics: + - name: nginxplus.stream_server_zone_connections_rate + description: Stream Server Zone connections rate + unit: connections/s + chart_type: line + dimensions: + - name: accepted + - name: nginxplus.stream_server_zone_sessions_per_code_class_rate + description: Stream Server Zone sessions rate + unit: sessions/s + chart_type: stacked + dimensions: + - name: 2xx + - name: 4xx + - name: 5xx + - name: nginxplus.stream_server_zone_traffic_rate + description: Stream Server Zone traffic rate + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: nginxplus.stream_server_zone_connections_processing_count + description: Stream Server Zone connections processed + unit: connections + chart_type: line + dimensions: + - name: processing + - name: nginxplus.stream_server_zone_connections_discarded_rate + description: Stream Server Zone connections discarded + unit: connections/s + chart_type: line + dimensions: + - name: discarded + - name: stream upstream + description: These metrics refer to the Stream upstream. + labels: + - name: stream_upstream_name + description: Stream upstream name + - name: stream_upstream_zone + description: Stream upstream zone name + metrics: + - name: nginxplus.stream_upstream_peers_count + description: Stream Upstream peers + unit: peers + chart_type: line + dimensions: + - name: peers + - name: nginxplus.stream_upstream_zombies_count + description: Stream Upstream zombies + unit: servers + chart_type: line + dimensions: + - name: zombie + - name: stream upstream server + description: These metrics refer to the Stream upstream server. + labels: + - name: stream_upstream_name + description: Stream upstream name + - name: stream_upstream_zone + description: Stream upstream zone name + - name: stream_upstream_server_address + description: Stream upstream server address (e.g. 127.0.0.1:12346) + - name: stream_upstream_server_name + description: Stream upstream server name + metrics: + - name: nginxplus.stream_upstream_server_connections_rate + description: Stream Upstream Server connections + unit: connections/s + chart_type: line + dimensions: + - name: forwarded + - name: nginxplus.stream_upstream_server_traffic_rate + description: Stream Upstream Server traffic rate + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: nginxplus.stream_upstream_server_state + description: Stream Upstream Server state + unit: state + chart_type: line + dimensions: + - name: up + - name: down + - name: unavail + - name: checking + - name: unhealthy + - name: nginxplus.stream_upstream_server_downtime + description: Stream Upstream Server downtime + unit: seconds + chart_type: line + dimensions: + - name: downtime + - name: nginxplus.stream_upstream_server_connections_count + description: Stream Upstream Server connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: resolver zone + description: These metrics refer to the resolver zone. + labels: + - name: resolver_zone + description: resolver zone name + metrics: + - name: nginxplus.resolver_zone_requests_rate + description: Resolver requests rate + unit: requests/s + chart_type: stacked + dimensions: + - name: name + - name: srv + - name: addr + - name: nginxplus.resolver_zone_responses_rate + description: Resolver responses rate + unit: responses/s + chart_type: stacked + dimensions: + - name: noerror + - name: formerr + - name: servfail + - name: nxdomain + - name: notimp + - name: refused + - name: timedout + - name: unknown diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api.go b/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api.go new file mode 100644 index 00000000000000..0f7999ac514792 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api.go @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import "time" + +// https://demo.nginx.com/dashboard.html +// https://demo.nginx.com/swagger-ui/ +// http://nginx.org/en/docs/http/ngx_http_api_module.html + +type nginxAPIVersions []int64 + +type ( + nginxInfo struct { + Version string `json:"version"` + Build string `json:"build"` + Address string `json:"address"` + Generation int `json:"generation"` + LoadTimestamp time.Time `json:"load_timestamp"` + Timestamp time.Time `json:"timestamp"` + } + nginxConnections struct { + Accepted int64 `json:"accepted"` + Dropped int64 `json:"dropped"` + Active int64 `json:"active"` + Idle int64 `json:"idle"` + } + nginxSSL struct { + Handshakes int64 `json:"handshakes"` + HandshakesFailed int64 `json:"handshakes_failed"` + SessionReuses int64 `json:"session_reuses"` + NoCommonProtocol int64 `json:"no_common_protocol"` + NoCommonCipher int64 `json:"no_common_cipher"` + HandshakeTimeout int64 `json:"handshake_timeout"` + PeerRejectedCert int64 `json:"peer_rejected_cert"` + VerifyFailures struct { + NoCert int64 `json:"no_cert"` + ExpiredCert int64 `json:"expired_cert"` + RevokedCert int64 `json:"revoked_cert"` + HostnameMismatch int64 `json:"hostname_mismatch"` + Other int64 `json:"other"` + } `json:"verify_failures"` + } +) + +type ( + nginxHTTPRequests struct { + Total int64 `json:"total"` + Current int64 `json:"current"` + } + nginxHTTPServerZones map[string]struct { + Processing int64 `json:"processing"` + Requests int64 `json:"requests"` + Responses struct { + Class1xx int64 `json:"1xx"` + Class2xx int64 `json:"2xx"` + Class3xx int64 `json:"3xx"` + Class4xx int64 `json:"4xx"` + Class5xx int64 `json:"5xx"` + Total int64 + } `json:"responses"` + Discarded int64 `json:"discarded"` + Received int64 `json:"received"` + Sent int64 `json:"sent"` + } + nginxHTTPLocationZones map[string]struct { + Requests int64 `json:"requests"` + Responses struct { + Class1xx int64 `json:"1xx"` + Class2xx int64 `json:"2xx"` + Class3xx int64 `json:"3xx"` + Class4xx int64 `json:"4xx"` + Class5xx int64 `json:"5xx"` + Total int64 + } `json:"responses"` + Discarded int64 `json:"discarded"` + Received int64 `json:"received"` + Sent int64 `json:"sent"` + } + nginxHTTPUpstreams map[string]struct { + Peers []struct { + Id int64 `json:"id"` + Server string `json:"server"` + Name string `json:"name"` + Backup bool `json:"backup"` + Weight int64 `json:"weight"` + State string `json:"state"` + Active int64 `json:"active"` + Requests int64 `json:"requests"` + HeaderTime int64 `json:"header_time"` + ResponseTime int64 `json:"response_time"` + Responses struct { + Class1xx int64 `json:"1xx"` + Class2xx int64 `json:"2xx"` + Class3xx int64 `json:"3xx"` + Class4xx int64 `json:"4xx"` + Class5xx int64 `json:"5xx"` + Total int64 + } `json:"responses"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks struct { + Checks int64 `json:"checks"` + Fails int64 `json:"fails"` + Unhealthy int64 `json:"unhealthy"` + } `json:"health_checks"` + Downtime int64 `json:"downtime"` + Selected time.Time `json:"selected"` + } `json:"peers"` + Keepalive int64 `json:"keepalive"` + Zombies int64 `json:"zombies"` + Zone string `json:"zone"` + } + nginxHTTPCaches map[string]struct { + Size int64 `json:"size"` + Cold bool `json:"cold"` + Hit struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + } `json:"hit"` + Stale struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + } `json:"stale"` + Updating struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + } `json:"updating"` + Revalidated struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + } `json:"revalidated"` + Miss struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + ResponsesWritten int64 `json:"responses_written"` + BytesWritten int64 `json:"bytes_written"` + } `json:"miss"` + Expired struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + ResponsesWritten int64 `json:"responses_written"` + BytesWritten int64 `json:"bytes_written"` + } `json:"expired"` + Bypass struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` + ResponsesWritten int64 `json:"responses_written"` + BytesWritten int64 `json:"bytes_written"` + } `json:"bypass"` + } +) + +type ( + nginxStreamServerZones map[string]struct { + Processing int64 `json:"processing"` + Connections int64 `json:"connections"` + Sessions struct { + Class2xx int64 `json:"2xx"` + Class4xx int64 `json:"4xx"` + Class5xx int64 `json:"5xx"` + Total int64 `json:"total"` + } `json:"sessions"` + Discarded int64 `json:"discarded"` + Received int64 `json:"received"` + Sent int64 `json:"sent"` + } + nginxStreamUpstreams map[string]struct { + Peers []struct { + Id int64 `json:"id"` + Server string `json:"server"` + Name string `json:"name"` + Backup bool `json:"backup"` + Weight int64 `json:"weight"` + State string `json:"state"` + Active int64 `json:"active"` + Connections int64 `json:"connections"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks struct { + Checks int64 `json:"checks"` + Fails int64 `json:"fails"` + Unhealthy int64 `json:"unhealthy"` + } `json:"health_checks"` + Downtime int64 `json:"downtime"` + } `json:"peers"` + Zombies int64 `json:"zombies"` + Zone string `json:"zone"` + } +) + +type nginxResolvers map[string]struct { + Requests struct { + Name int64 `json:"name"` + Srv int64 `json:"srv"` + Addr int64 `json:"addr"` + } `json:"requests"` + Responses struct { + NoError int64 `json:"noerror"` + Formerr int64 `json:"formerr"` + Servfail int64 `json:"servfail"` + Nxdomain int64 `json:"nxdomain"` + Notimp int64 `json:"notimp"` + Refused int64 `json:"refused"` + TimedOut int64 `json:"timedout"` + Unknown int64 `json:"unknown"` + } `json:"responses"` +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api_query.go b/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api_query.go new file mode 100644 index 00000000000000..48c31d0eaf106d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/nginx_http_api_query.go @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "sync" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathAPIVersions = "/api/" + urlPathAPIEndpointsRoot = "/api/%d" + urlPathAPINginx = "/api/%d/nginx" + urlPathAPIEndpointsHTTP = "/api/%d/http" + urlPathAPIEndpointsStream = "/api/%d/stream" + urlPathAPIConnections = "/api/%d/connections" + urlPathAPISSL = "/api/%d/ssl" + urlPathAPIResolvers = "/api/%d/resolvers" + urlPathAPIHTTPRequests = "/api/%d/http/requests" + urlPathAPIHTTPServerZones = "/api/%d/http/server_zones" + urlPathAPIHTTPLocationZones = "/api/%d/http/location_zones" + urlPathAPIHTTPUpstreams = "/api/%d/http/upstreams" + urlPathAPIHTTPCaches = "/api/%d/http/caches" + urlPathAPIStreamServerZones = "/api/%d/stream/server_zones" + urlPathAPIStreamUpstreams = "/api/%d/stream/upstreams" +) + +type nginxMetrics struct { + info *nginxInfo + connections *nginxConnections + ssl *nginxSSL + httpRequests *nginxHTTPRequests + httpServerZones *nginxHTTPServerZones + httpLocationZones *nginxHTTPLocationZones + httpUpstreams *nginxHTTPUpstreams + httpCaches *nginxHTTPCaches + streamServerZones *nginxStreamServerZones + streamUpstreams *nginxStreamUpstreams + resolvers *nginxResolvers +} + +func (n *NginxPlus) queryAPIVersion() (int64, error) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = urlPathAPIVersions + + var versions nginxAPIVersions + if err := n.doWithDecode(&versions, req); err != nil { + return 0, err + } + + if len(versions) == 0 { + return 0, fmt.Errorf("'%s' returned no data", req.URL) + } + + return versions[len(versions)-1], nil +} + +func (n *NginxPlus) queryAvailableEndpoints() error { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIEndpointsRoot, n.apiVersion) + + var endpoints []string + if err := n.doWithDecode(&endpoints, req); err != nil { + return err + } + + n.Debugf("discovered root endpoints: %v", endpoints) + var hasHTTP, hasStream bool + for _, v := range endpoints { + switch v { + case "nginx": + n.endpoints.nginx = true + case "connections": + n.endpoints.connections = true + case "ssl": + n.endpoints.ssl = true + case "resolvers": + n.endpoints.resolvers = true + case "http": + hasHTTP = true + case "stream": + hasStream = true + } + } + + if hasHTTP { + endpoints = endpoints[:0] + req, _ = web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIEndpointsHTTP, n.apiVersion) + + if err := n.doWithDecode(&endpoints, req); err != nil { + return err + } + + n.Debugf("discovered http endpoints: %v", endpoints) + for _, v := range endpoints { + switch v { + case "requests": + n.endpoints.httpRequest = true + case "server_zones": + n.endpoints.httpServerZones = true + case "location_zones": + n.endpoints.httpLocationZones = true + case "caches": + n.endpoints.httpCaches = true + case "upstreams": + n.endpoints.httpUpstreams = true + } + } + } + + if hasStream { + endpoints = endpoints[:0] + req, _ = web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIEndpointsStream, n.apiVersion) + + if err := n.doWithDecode(&endpoints, req); err != nil { + return err + } + + n.Debugf("discovered stream endpoints: %v", endpoints) + for _, v := range endpoints { + switch v { + case "server_zones": + n.endpoints.streamServerZones = true + case "upstreams": + n.endpoints.streamUpstreams = true + } + } + } + + return nil +} + +func (n *NginxPlus) queryMetrics() *nginxMetrics { + ms := &nginxMetrics{} + wg := &sync.WaitGroup{} + + for _, task := range []struct { + do bool + fn func(*nginxMetrics) + }{ + {do: n.endpoints.nginx, fn: n.queryNginxInfo}, + {do: n.endpoints.connections, fn: n.queryConnections}, + {do: n.endpoints.ssl, fn: n.querySSL}, + {do: n.endpoints.httpRequest, fn: n.queryHTTPRequests}, + {do: n.endpoints.httpServerZones, fn: n.queryHTTPServerZones}, + {do: n.endpoints.httpLocationZones, fn: n.queryHTTPLocationZones}, + {do: n.endpoints.httpUpstreams, fn: n.queryHTTPUpstreams}, + {do: n.endpoints.httpCaches, fn: n.queryHTTPCaches}, + {do: n.endpoints.streamServerZones, fn: n.queryStreamServerZones}, + {do: n.endpoints.streamUpstreams, fn: n.queryStreamUpstreams}, + {do: n.endpoints.resolvers, fn: n.queryResolvers}, + } { + task := task + if task.do { + wg.Add(1) + go func() { task.fn(ms); wg.Done() }() + } + } + + wg.Wait() + + return ms +} + +func (n *NginxPlus) queryNginxInfo(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPINginx, n.apiVersion) + + var v nginxInfo + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.nginx = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.info = &v +} + +func (n *NginxPlus) queryConnections(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIConnections, n.apiVersion) + + var v nginxConnections + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.connections = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.connections = &v +} + +func (n *NginxPlus) querySSL(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPISSL, n.apiVersion) + + var v nginxSSL + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.ssl = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.ssl = &v +} + +func (n *NginxPlus) queryHTTPRequests(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIHTTPRequests, n.apiVersion) + + var v nginxHTTPRequests + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.httpRequest = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.httpRequests = &v +} + +func (n *NginxPlus) queryHTTPServerZones(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIHTTPServerZones, n.apiVersion) + + var v nginxHTTPServerZones + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.httpServerZones = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.httpServerZones = &v +} + +func (n *NginxPlus) queryHTTPLocationZones(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIHTTPLocationZones, n.apiVersion) + + var v nginxHTTPLocationZones + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.httpLocationZones = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.httpLocationZones = &v +} + +func (n *NginxPlus) queryHTTPUpstreams(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIHTTPUpstreams, n.apiVersion) + + var v nginxHTTPUpstreams + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.httpUpstreams = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.httpUpstreams = &v +} + +func (n *NginxPlus) queryHTTPCaches(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIHTTPCaches, n.apiVersion) + + var v nginxHTTPCaches + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.httpCaches = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.httpCaches = &v +} + +func (n *NginxPlus) queryStreamServerZones(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIStreamServerZones, n.apiVersion) + + var v nginxStreamServerZones + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.streamServerZones = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.streamServerZones = &v +} + +func (n *NginxPlus) queryStreamUpstreams(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIStreamUpstreams, n.apiVersion) + + var v nginxStreamUpstreams + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.streamUpstreams = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.streamUpstreams = &v +} + +func (n *NginxPlus) queryResolvers(ms *nginxMetrics) { + req, _ := web.NewHTTPRequest(n.Request.Copy()) + req.URL.Path = fmt.Sprintf(urlPathAPIResolvers, n.apiVersion) + + var v nginxResolvers + + if err := n.doWithDecode(&v, req); err != nil { + n.endpoints.resolvers = !errors.Is(err, errPathNotFound) + n.Warning(err) + return + } + + ms.resolvers = &v +} + +var ( + errPathNotFound = errors.New("path not found") +) + +func (n *NginxPlus) doWithDecode(dst interface{}, req *http.Request) error { + n.Debugf("executing %s '%s'", req.Method, req.URL) + resp, err := n.httpClient.Do(req) + if err != nil { + return err + } + defer closeBody(resp) + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("%s returned %d status code (%w)", req.URL, resp.StatusCode, errPathNotFound) + } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status) + } + + content, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error on reading response from %s : %v", req.URL, err) + } + + if err := json.Unmarshal(content, dst); err != nil { + return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func (n *nginxMetrics) empty() bool { + return n.info != nil && + n.connections == nil && + n.ssl == nil && + n.httpRequests == nil && + n.httpServerZones == nil && + n.httpLocationZones == nil && + n.httpUpstreams == nil && + n.httpCaches == nil && + n.streamServerZones == nil && + n.streamUpstreams == nil && + n.resolvers != nil +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus.go b/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus.go new file mode 100644 index 00000000000000..ba82242f89a5bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus.go @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nginxplus", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *NginxPlus { + return &NginxPlus{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 1}, + }, + }, + }, + charts: baseCharts.Copy(), + queryEndpointsEvery: time.Minute, + cache: newCache(), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type NginxPlus struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + httpClient *http.Client + + apiVersion int64 + + endpoints struct { + nginx bool + connections bool + ssl bool + httpCaches bool + httpRequest bool + httpServerZones bool + httpLocationZones bool + httpUpstreams bool + streamServerZones bool + streamUpstreams bool + resolvers bool + } + queryEndpointsTime time.Time + queryEndpointsEvery time.Duration + + cache *cache +} + +func (n *NginxPlus) Init() bool { + if n.URL == "" { + n.Error("config validation: 'url' can not be empty'") + return false + } + + client, err := web.NewHTTPClient(n.Client) + if err != nil { + n.Errorf("init HTTP client: %v", err) + return false + } + n.httpClient = client + + return true +} + +func (n *NginxPlus) Check() bool { + return len(n.Collect()) > 0 +} + +func (n *NginxPlus) Charts() *module.Charts { + return n.charts +} + +func (n *NginxPlus) Collect() map[string]int64 { + mx, err := n.collect() + + if err != nil { + n.Error(err) + return nil + } + + return mx +} + +func (n *NginxPlus) Cleanup() { + if n.httpClient != nil { + n.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus_test.go b/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus_test.go new file mode 100644 index 00000000000000..7bbe8955776b90 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/nginxplus_test.go @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxplus + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataAPI8APIVersions, _ = os.ReadFile("testdata/api-8/api_versions.json") + dataAPI8Connections, _ = os.ReadFile("testdata/api-8/connections.json") + dataAPI8EndpointsHTTP, _ = os.ReadFile("testdata/api-8/endpoints_http.json") + dataAPI8EndpointsRoot, _ = os.ReadFile("testdata/api-8/endpoints_root.json") + dataAPI8EndpointsStream, _ = os.ReadFile("testdata/api-8/endpoints_stream.json") + dataAPI8HTTPCaches, _ = os.ReadFile("testdata/api-8/http_caches.json") + dataAPI8HTTPLocationZones, _ = os.ReadFile("testdata/api-8/http_location_zones.json") + dataAPI8HTTPRequests, _ = os.ReadFile("testdata/api-8/http_requests.json") + dataAPI8HTTPServerZones, _ = os.ReadFile("testdata/api-8/http_server_zones.json") + dataAPI8HTTPUpstreams, _ = os.ReadFile("testdata/api-8/http_upstreams.json") + dataAPI8SSL, _ = os.ReadFile("testdata/api-8/ssl.json") + dataAPI8StreamServerZones, _ = os.ReadFile("testdata/api-8/stream_server_zones.json") + dataAPI8StreamUpstreams, _ = os.ReadFile("testdata/api-8/stream_upstreams.json") + dataAPI8Resolvers, _ = os.ReadFile("testdata/api-8/resolvers.json") + data404, _ = os.ReadFile("testdata/404.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataAPI8APIVersions": dataAPI8APIVersions, + "dataAPI8Connections": dataAPI8Connections, + "dataAPI8EndpointsHTTP": dataAPI8EndpointsHTTP, + "dataAPI8EndpointsRoot": dataAPI8EndpointsRoot, + "dataAPI8EndpointsStream": dataAPI8EndpointsStream, + "dataAPI8HTTPCaches": dataAPI8HTTPCaches, + "dataAPI8HTTPLocationZones": dataAPI8HTTPLocationZones, + "dataAPI8HTTPRequests": dataAPI8HTTPRequests, + "dataAPI8HTTPServerZones": dataAPI8HTTPServerZones, + "dataAPI8HTTPUpstreams": dataAPI8HTTPUpstreams, + "dataAPI8SSL": dataAPI8SSL, + "dataAPI8StreamServerZones": dataAPI8StreamServerZones, + "dataAPI8StreamUpstreams": dataAPI8StreamUpstreams, + "dataAPI8Resolvers": dataAPI8Resolvers, + "data404": data404, + } { + require.NotNilf(t, data, name) + } +} + +func TestNginxPlus_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nginx := New() + nginx.Config = test.config + + if test.wantFail { + assert.False(t, nginx.Init()) + } else { + assert.True(t, nginx.Init()) + } + }) + } +} + +func TestNginxPlus_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (nginx *NginxPlus, cleanup func()) + }{ + "success when all requests OK": { + wantFail: false, + prepare: caseAPI8AllRequestsOK, + }, + "success when all requests except stream OK": { + wantFail: false, + prepare: caseAPI8AllRequestsExceptStreamOK, + }, + "fail on invalid data response": { + wantFail: true, + prepare: caseInvalidDataResponse, + }, + "fail on connection refused": { + wantFail: true, + prepare: caseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nginx, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, nginx.Check()) + } else { + assert.True(t, nginx.Check()) + } + }) + } +} + +func TestNginxPlus_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (nginx *NginxPlus, cleanup func()) + wantNumOfCharts int + wantMetrics map[string]int64 + }{ + "success when all requests OK": { + prepare: caseAPI8AllRequestsOK, + wantNumOfCharts: len(baseCharts) + + len(httpCacheChartsTmpl) + + len(httpServerZoneChartsTmpl) + + len(httpLocationZoneChartsTmpl)*2 + + len(httpUpstreamChartsTmpl) + + len(httpUpstreamServerChartsTmpl)*2 + + len(streamServerZoneChartsTmpl) + + len(streamUpstreamChartsTmpl) + + len(streamUpstreamServerChartsTmpl)*2 + + len(resolverZoneChartsTmpl)*2, + wantMetrics: map[string]int64{ + "connections_accepted": 6079, + "connections_active": 1, + "connections_dropped": 0, + "connections_idle": 8, + "http_cache_cache_backend_bypassed_bytes": 67035, + "http_cache_cache_backend_bypassed_responses": 109, + "http_cache_cache_backend_served_bytes": 0, + "http_cache_cache_backend_served_responses": 0, + "http_cache_cache_backend_size": 0, + "http_cache_cache_backend_state_cold": 0, + "http_cache_cache_backend_state_warm": 1, + "http_cache_cache_backend_written_bytes": 0, + "http_cache_cache_backend_written_responses": 0, + "http_location_zone_server_api_bytes_received": 1854427, + "http_location_zone_server_api_bytes_sent": 4668778, + "http_location_zone_server_api_requests": 9188, + "http_location_zone_server_api_requests_discarded": 0, + "http_location_zone_server_api_responses": 9188, + "http_location_zone_server_api_responses_1xx": 0, + "http_location_zone_server_api_responses_2xx": 9187, + "http_location_zone_server_api_responses_3xx": 0, + "http_location_zone_server_api_responses_4xx": 1, + "http_location_zone_server_api_responses_5xx": 0, + "http_location_zone_server_dashboard_bytes_received": 0, + "http_location_zone_server_dashboard_bytes_sent": 0, + "http_location_zone_server_dashboard_requests": 0, + "http_location_zone_server_dashboard_requests_discarded": 0, + "http_location_zone_server_dashboard_responses": 0, + "http_location_zone_server_dashboard_responses_1xx": 0, + "http_location_zone_server_dashboard_responses_2xx": 0, + "http_location_zone_server_dashboard_responses_3xx": 0, + "http_location_zone_server_dashboard_responses_4xx": 0, + "http_location_zone_server_dashboard_responses_5xx": 0, + "http_requests_current": 1, + "http_requests_total": 8363, + "http_server_zone_server_backend_bytes_received": 1773834, + "http_server_zone_server_backend_bytes_sent": 4585734, + "http_server_zone_server_backend_requests": 8962, + "http_server_zone_server_backend_requests_discarded": 0, + "http_server_zone_server_backend_requests_processing": 1, + "http_server_zone_server_backend_responses": 8961, + "http_server_zone_server_backend_responses_1xx": 0, + "http_server_zone_server_backend_responses_2xx": 8960, + "http_server_zone_server_backend_responses_3xx": 0, + "http_server_zone_server_backend_responses_4xx": 1, + "http_server_zone_server_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_active": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_received": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_sent": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_downtime": 1020, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_header_time": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_requests": 26, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_response_time": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_1xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_2xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_3xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_4xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_checking": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_down": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_draining": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unavail": 1, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unhealthy": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_up": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_active": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_received": 86496, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_sent": 9180, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_downtime": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_header_time": 1, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_requests": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_response_time": 1, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_1xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_2xx": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_3xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_4xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_checking": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_down": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_draining": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unavail": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unhealthy": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_up": 1, + "http_upstream_backend_zone_http_backend_keepalive": 0, + "http_upstream_backend_zone_http_backend_peers": 2, + "http_upstream_backend_zone_http_backend_zombies": 0, + "resolver_zone_resolver-http_requests_addr": 0, + "resolver_zone_resolver-http_requests_name": 0, + "resolver_zone_resolver-http_requests_srv": 2939408, + "resolver_zone_resolver-http_responses_formerr": 0, + "resolver_zone_resolver-http_responses_noerror": 0, + "resolver_zone_resolver-http_responses_notimp": 0, + "resolver_zone_resolver-http_responses_nxdomain": 2939404, + "resolver_zone_resolver-http_responses_refused": 0, + "resolver_zone_resolver-http_responses_servfail": 0, + "resolver_zone_resolver-http_responses_timedout": 4, + "resolver_zone_resolver-http_responses_unknown": 0, + "resolver_zone_resolver-stream_requests_addr": 0, + "resolver_zone_resolver-stream_requests_name": 638797, + "resolver_zone_resolver-stream_requests_srv": 0, + "resolver_zone_resolver-stream_responses_formerr": 0, + "resolver_zone_resolver-stream_responses_noerror": 433136, + "resolver_zone_resolver-stream_responses_notimp": 0, + "resolver_zone_resolver-stream_responses_nxdomain": 40022, + "resolver_zone_resolver-stream_responses_refused": 165639, + "resolver_zone_resolver-stream_responses_servfail": 0, + "resolver_zone_resolver-stream_responses_timedout": 0, + "resolver_zone_resolver-stream_responses_unknown": 0, + "ssl_handshake_timeout": 4, + "ssl_handshakes": 15804607, + "ssl_handshakes_failed": 37862, + "ssl_no_common_cipher": 24, + "ssl_no_common_protocol": 16648, + "ssl_peer_rejected_cert": 0, + "ssl_session_reuses": 13096060, + "ssl_verify_failures_expired_cert": 0, + "ssl_verify_failures_hostname_mismatch": 0, + "ssl_verify_failures_other": 0, + "ssl_verify_failures_no_cert": 0, + "ssl_verify_failures_revoked_cert": 0, + "stream_server_zone_tcp_server_bytes_received": 0, + "stream_server_zone_tcp_server_bytes_sent": 0, + "stream_server_zone_tcp_server_connections": 0, + "stream_server_zone_tcp_server_connections_discarded": 0, + "stream_server_zone_tcp_server_connections_processing": 0, + "stream_server_zone_tcp_server_sessions": 0, + "stream_server_zone_tcp_server_sessions_2xx": 0, + "stream_server_zone_tcp_server_sessions_4xx": 0, + "stream_server_zone_tcp_server_sessions_5xx": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_active": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_bytes_received": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_bytes_sent": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_connections": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_downtime": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_checking": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_down": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_unavail": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_unhealthy": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_up": 1, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_active": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_bytes_received": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_bytes_sent": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_connections": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_downtime": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_checking": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_down": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_unavail": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_unhealthy": 0, + "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_up": 1, + "stream_upstream_stream_backend_zone_tcp_servers_peers": 2, + "stream_upstream_stream_backend_zone_tcp_servers_zombies": 0, + }, + }, + "success when all requests except stream OK": { + prepare: caseAPI8AllRequestsExceptStreamOK, + wantNumOfCharts: len(baseCharts) + + len(httpCacheChartsTmpl) + + len(httpServerZoneChartsTmpl) + + len(httpLocationZoneChartsTmpl)*2 + + len(httpUpstreamChartsTmpl) + + len(httpUpstreamServerChartsTmpl)*2 + + len(resolverZoneChartsTmpl)*2, + wantMetrics: map[string]int64{ + "connections_accepted": 6079, + "connections_active": 1, + "connections_dropped": 0, + "connections_idle": 8, + "http_cache_cache_backend_bypassed_bytes": 67035, + "http_cache_cache_backend_bypassed_responses": 109, + "http_cache_cache_backend_served_bytes": 0, + "http_cache_cache_backend_served_responses": 0, + "http_cache_cache_backend_size": 0, + "http_cache_cache_backend_state_cold": 0, + "http_cache_cache_backend_state_warm": 1, + "http_cache_cache_backend_written_bytes": 0, + "http_cache_cache_backend_written_responses": 0, + "http_location_zone_server_api_bytes_received": 1854427, + "http_location_zone_server_api_bytes_sent": 4668778, + "http_location_zone_server_api_requests": 9188, + "http_location_zone_server_api_requests_discarded": 0, + "http_location_zone_server_api_responses": 9188, + "http_location_zone_server_api_responses_1xx": 0, + "http_location_zone_server_api_responses_2xx": 9187, + "http_location_zone_server_api_responses_3xx": 0, + "http_location_zone_server_api_responses_4xx": 1, + "http_location_zone_server_api_responses_5xx": 0, + "http_location_zone_server_dashboard_bytes_received": 0, + "http_location_zone_server_dashboard_bytes_sent": 0, + "http_location_zone_server_dashboard_requests": 0, + "http_location_zone_server_dashboard_requests_discarded": 0, + "http_location_zone_server_dashboard_responses": 0, + "http_location_zone_server_dashboard_responses_1xx": 0, + "http_location_zone_server_dashboard_responses_2xx": 0, + "http_location_zone_server_dashboard_responses_3xx": 0, + "http_location_zone_server_dashboard_responses_4xx": 0, + "http_location_zone_server_dashboard_responses_5xx": 0, + "http_requests_current": 1, + "http_requests_total": 8363, + "http_server_zone_server_backend_bytes_received": 1773834, + "http_server_zone_server_backend_bytes_sent": 4585734, + "http_server_zone_server_backend_requests": 8962, + "http_server_zone_server_backend_requests_discarded": 0, + "http_server_zone_server_backend_requests_processing": 1, + "http_server_zone_server_backend_responses": 8961, + "http_server_zone_server_backend_responses_1xx": 0, + "http_server_zone_server_backend_responses_2xx": 8960, + "http_server_zone_server_backend_responses_3xx": 0, + "http_server_zone_server_backend_responses_4xx": 1, + "http_server_zone_server_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_active": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_received": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_sent": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_downtime": 1020, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_header_time": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_requests": 26, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_response_time": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_1xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_2xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_3xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_4xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_checking": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_down": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_draining": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unavail": 1, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unhealthy": 0, + "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_up": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_active": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_received": 86496, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_sent": 9180, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_downtime": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_header_time": 1, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_requests": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_response_time": 1, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_1xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_2xx": 102, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_3xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_4xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_5xx": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_checking": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_down": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_draining": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unavail": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unhealthy": 0, + "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_up": 1, + "http_upstream_backend_zone_http_backend_keepalive": 0, + "http_upstream_backend_zone_http_backend_peers": 2, + "http_upstream_backend_zone_http_backend_zombies": 0, + "resolver_zone_resolver-http_requests_addr": 0, + "resolver_zone_resolver-http_requests_name": 0, + "resolver_zone_resolver-http_requests_srv": 2939408, + "resolver_zone_resolver-http_responses_formerr": 0, + "resolver_zone_resolver-http_responses_noerror": 0, + "resolver_zone_resolver-http_responses_notimp": 0, + "resolver_zone_resolver-http_responses_nxdomain": 2939404, + "resolver_zone_resolver-http_responses_refused": 0, + "resolver_zone_resolver-http_responses_servfail": 0, + "resolver_zone_resolver-http_responses_timedout": 4, + "resolver_zone_resolver-http_responses_unknown": 0, + "resolver_zone_resolver-stream_requests_addr": 0, + "resolver_zone_resolver-stream_requests_name": 638797, + "resolver_zone_resolver-stream_requests_srv": 0, + "resolver_zone_resolver-stream_responses_formerr": 0, + "resolver_zone_resolver-stream_responses_noerror": 433136, + "resolver_zone_resolver-stream_responses_notimp": 0, + "resolver_zone_resolver-stream_responses_nxdomain": 40022, + "resolver_zone_resolver-stream_responses_refused": 165639, + "resolver_zone_resolver-stream_responses_servfail": 0, + "resolver_zone_resolver-stream_responses_timedout": 0, + "resolver_zone_resolver-stream_responses_unknown": 0, + "ssl_handshake_timeout": 4, + "ssl_handshakes": 15804607, + "ssl_handshakes_failed": 37862, + "ssl_no_common_cipher": 24, + "ssl_no_common_protocol": 16648, + "ssl_peer_rejected_cert": 0, + "ssl_session_reuses": 13096060, + "ssl_verify_failures_expired_cert": 0, + "ssl_verify_failures_hostname_mismatch": 0, + "ssl_verify_failures_other": 0, + "ssl_verify_failures_no_cert": 0, + "ssl_verify_failures_revoked_cert": 0, + }, + }, + "fail on invalid data response": { + prepare: caseInvalidDataResponse, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + "fail on connection refused": { + prepare: caseConnectionRefused, + wantNumOfCharts: 0, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nginx, cleanup := test.prepare(t) + defer cleanup() + + mx := nginx.Collect() + + require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { + assert.Equalf(t, test.wantNumOfCharts, len(*nginx.Charts()), "number of charts") + ensureCollectedHasAllChartsDimsVarsIDs(t, nginx, mx) + } + }) + } +} + +func caseAPI8AllRequestsOK(t *testing.T) (*NginxPlus, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathAPIVersions: + _, _ = w.Write(dataAPI8APIVersions) + case fmt.Sprintf(urlPathAPIEndpointsRoot, 8): + _, _ = w.Write(dataAPI8EndpointsRoot) + case fmt.Sprintf(urlPathAPIEndpointsHTTP, 8): + _, _ = w.Write(dataAPI8EndpointsHTTP) + case fmt.Sprintf(urlPathAPIEndpointsStream, 8): + _, _ = w.Write(dataAPI8EndpointsStream) + case fmt.Sprintf(urlPathAPIConnections, 8): + _, _ = w.Write(dataAPI8Connections) + case fmt.Sprintf(urlPathAPISSL, 8): + _, _ = w.Write(dataAPI8SSL) + case fmt.Sprintf(urlPathAPIHTTPRequests, 8): + _, _ = w.Write(dataAPI8HTTPRequests) + case fmt.Sprintf(urlPathAPIHTTPServerZones, 8): + _, _ = w.Write(dataAPI8HTTPServerZones) + case fmt.Sprintf(urlPathAPIHTTPLocationZones, 8): + _, _ = w.Write(dataAPI8HTTPLocationZones) + case fmt.Sprintf(urlPathAPIHTTPUpstreams, 8): + _, _ = w.Write(dataAPI8HTTPUpstreams) + case fmt.Sprintf(urlPathAPIHTTPCaches, 8): + _, _ = w.Write(dataAPI8HTTPCaches) + case fmt.Sprintf(urlPathAPIStreamServerZones, 8): + _, _ = w.Write(dataAPI8StreamServerZones) + case fmt.Sprintf(urlPathAPIStreamUpstreams, 8): + _, _ = w.Write(dataAPI8StreamUpstreams) + case fmt.Sprintf(urlPathAPIResolvers, 8): + _, _ = w.Write(dataAPI8Resolvers) + default: + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(data404) + + } + })) + nginx := New() + nginx.URL = srv.URL + require.True(t, nginx.Init()) + + return nginx, srv.Close +} + +func caseAPI8AllRequestsExceptStreamOK(t *testing.T) (*NginxPlus, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathAPIVersions: + _, _ = w.Write(dataAPI8APIVersions) + case fmt.Sprintf(urlPathAPIEndpointsRoot, 8): + _, _ = w.Write(dataAPI8EndpointsRoot) + case fmt.Sprintf(urlPathAPIEndpointsHTTP, 8): + _, _ = w.Write(dataAPI8EndpointsHTTP) + case fmt.Sprintf(urlPathAPIEndpointsStream, 8): + _, _ = w.Write(dataAPI8EndpointsStream) + case fmt.Sprintf(urlPathAPIConnections, 8): + _, _ = w.Write(dataAPI8Connections) + case fmt.Sprintf(urlPathAPISSL, 8): + _, _ = w.Write(dataAPI8SSL) + case fmt.Sprintf(urlPathAPIHTTPRequests, 8): + _, _ = w.Write(dataAPI8HTTPRequests) + case fmt.Sprintf(urlPathAPIHTTPServerZones, 8): + _, _ = w.Write(dataAPI8HTTPServerZones) + case fmt.Sprintf(urlPathAPIHTTPLocationZones, 8): + _, _ = w.Write(dataAPI8HTTPLocationZones) + case fmt.Sprintf(urlPathAPIHTTPUpstreams, 8): + _, _ = w.Write(dataAPI8HTTPUpstreams) + case fmt.Sprintf(urlPathAPIHTTPCaches, 8): + _, _ = w.Write(dataAPI8HTTPCaches) + case fmt.Sprintf(urlPathAPIResolvers, 8): + _, _ = w.Write(dataAPI8Resolvers) + default: + w.WriteHeader(http.StatusNotFound) + _, _ = w.Write(data404) + + } + })) + nginx := New() + nginx.URL = srv.URL + require.True(t, nginx.Init()) + + return nginx, srv.Close +} + +func caseInvalidDataResponse(t *testing.T) (*NginxPlus, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + nginx := New() + nginx.URL = srv.URL + require.True(t, nginx.Init()) + + return nginx, srv.Close +} + +func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) { + t.Helper() + nginx := New() + nginx.URL = "http://127.0.0.1:65001" + require.True(t, nginx.Init()) + + return nginx, func() {} +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, n *NginxPlus, mx map[string]int64) { + for _, chart := range *n.Charts() { + if chart.ID == uptimeChart.ID { + continue + } + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := mx[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/404.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/404.json new file mode 100644 index 00000000000000..d2ed8c9a85bb28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/404.json @@ -0,0 +1,9 @@ +{ + "error": { + "status": 404, + "text": "path not found", + "code": "PathNotFound" + }, + "request_id": "f0d20aca461d043e787ebaa52f018cb2", + "href": "https://nginx.org/en/docs/http/ngx_http_api_module.html" +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/api_versions.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/api_versions.json new file mode 100644 index 00000000000000..9ffc33973ae0c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/api_versions.json @@ -0,0 +1,10 @@ +[ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 +] diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/connections.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/connections.json new file mode 100644 index 00000000000000..490ca13fc4e3bc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/connections.json @@ -0,0 +1,6 @@ +{ + "accepted": 6079, + "dropped": 0, + "active": 1, + "idle": 8 +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_http.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_http.json new file mode 100644 index 00000000000000..57c4e4aa212c6f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_http.json @@ -0,0 +1,10 @@ +[ + "requests", + "server_zones", + "location_zones", + "caches", + "limit_conns", + "limit_reqs", + "keyvals", + "upstreams" +] diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_root.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_root.json new file mode 100644 index 00000000000000..b185c55f2e47c7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_root.json @@ -0,0 +1,10 @@ +[ + "nginx", + "processes", + "connections", + "slabs", + "http", + "stream", + "resolvers", + "ssl" +] diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_stream.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_stream.json new file mode 100644 index 00000000000000..0da092376d2a68 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/endpoints_stream.json @@ -0,0 +1,6 @@ +[ + "server_zones", + "limit_conns", + "keyvals", + "upstreams" +] diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_caches.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_caches.json new file mode 100644 index 00000000000000..dd2d03adffbd70 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_caches.json @@ -0,0 +1,40 @@ +{ + "cache_backend": { + "size": 0, + "cold": false, + "hit": { + "responses": 0, + "bytes": 0 + }, + "stale": { + "responses": 0, + "bytes": 0 + }, + "updating": { + "responses": 0, + "bytes": 0 + }, + "revalidated": { + "responses": 0, + "bytes": 0 + }, + "miss": { + "responses": 109, + "bytes": 67035, + "responses_written": 0, + "bytes_written": 0 + }, + "expired": { + "responses": 0, + "bytes": 0, + "responses_written": 0, + "bytes_written": 0 + }, + "bypass": { + "responses": 0, + "bytes": 0, + "responses_written": 0, + "bytes_written": 0 + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_location_zones.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_location_zones.json new file mode 100644 index 00000000000000..8812e6dffb1ad3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_location_zones.json @@ -0,0 +1,35 @@ +{ + "server_api": { + "requests": 9188, + "responses": { + "1xx": 0, + "2xx": 9187, + "3xx": 0, + "4xx": 1, + "5xx": 0, + "codes": { + "200": 9187, + "404": 1 + }, + "total": 9188 + }, + "discarded": 0, + "received": 1854427, + "sent": 4668778 + }, + "server_dashboard": { + "requests": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "codes": {}, + "total": 0 + }, + "discarded": 0, + "received": 0, + "sent": 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_requests.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_requests.json new file mode 100644 index 00000000000000..0c2a175037326f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_requests.json @@ -0,0 +1,4 @@ +{ + "total": 8363, + "current": 1 +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_server_zones.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_server_zones.json new file mode 100644 index 00000000000000..c253892106cecb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_server_zones.json @@ -0,0 +1,21 @@ +{ + "server_backend": { + "processing": 1, + "requests": 8962, + "responses": { + "1xx": 0, + "2xx": 8960, + "3xx": 0, + "4xx": 1, + "5xx": 0, + "codes": { + "200": 8960, + "404": 1 + }, + "total": 8961 + }, + "discarded": 0, + "received": 1773834, + "sent": 4585734 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_upstreams.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_upstreams.json new file mode 100644 index 00000000000000..0f7ba713566700 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/http_upstreams.json @@ -0,0 +1,76 @@ +{ + "backend": { + "peers": [ + { + "id": 0, + "server": "127.0.0.1:81", + "name": "127.0.0.1:81", + "backup": false, + "weight": 5, + "state": "unavail", + "active": 0, + "requests": 26, + "header_time": 0, + "response_time": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "codes": {}, + "total": 0 + }, + "sent": 0, + "received": 0, + "fails": 26, + "unavail": 1, + "health_checks": { + "checks": 0, + "fails": 0, + "unhealthy": 0 + }, + "downtime": 1020702, + "downstart": "2022-11-18T19:17:09.258Z", + "selected": "2022-11-18T19:33:50Z" + }, + { + "id": 1, + "server": "127.0.0.1:82", + "name": "127.0.0.1:82", + "backup": false, + "weight": 1, + "state": "up", + "active": 0, + "requests": 102, + "header_time": 1, + "response_time": 1, + "responses": { + "1xx": 0, + "2xx": 102, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "codes": { + "200": 102 + }, + "total": 102 + }, + "sent": 9180, + "received": 86496, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 0, + "fails": 0, + "unhealthy": 0 + }, + "downtime": 0, + "selected": "2022-11-18T19:34:00Z" + } + ], + "keepalive": 0, + "zombies": 0, + "zone": "http_backend" + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/nginx.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/nginx.json new file mode 100644 index 00000000000000..4480c2bcce1710 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/nginx.json @@ -0,0 +1,10 @@ +{ + "version": "1.21.6", + "build": "nginx-plus-r27-p1", + "address": "127.0.0.1", + "generation": 1, + "load_timestamp": "2022-11-19T14:38:38.676Z", + "timestamp": "2022-11-19T14:38:57.031Z", + "pid": 2254633, + "ppid": 2254629 +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/resolvers.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/resolvers.json new file mode 100644 index 00000000000000..ad66f558427135 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/resolvers.json @@ -0,0 +1,36 @@ +{ + "resolver-http": { + "requests": { + "name": 0, + "srv": 2939408, + "addr": 0 + }, + "responses": { + "noerror": 0, + "formerr": 0, + "servfail": 0, + "nxdomain": 2939404, + "notimp": 0, + "refused": 0, + "timedout": 4, + "unknown": 0 + } + }, + "resolver-stream": { + "requests": { + "name": 638797, + "srv": 0, + "addr": 0 + }, + "responses": { + "noerror": 433136, + "formerr": 0, + "servfail": 0, + "nxdomain": 40022, + "notimp": 0, + "refused": 165639, + "timedout": 0, + "unknown": 0 + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/ssl.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/ssl.json new file mode 100644 index 00000000000000..2ca8a6a3e3e83b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/ssl.json @@ -0,0 +1,16 @@ +{ + "handshakes": 15804607, + "session_reuses": 13096060, + "handshakes_failed": 37862, + "no_common_protocol": 16648, + "no_common_cipher": 24, + "handshake_timeout": 4, + "peer_rejected_cert": 0, + "verify_failures": { + "no_cert": 0, + "expired_cert": 0, + "revoked_cert": 0, + "hostname_mismatch": 0, + "other": 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_server_zones.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_server_zones.json new file mode 100644 index 00000000000000..0c7df7873cfc81 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_server_zones.json @@ -0,0 +1,15 @@ +{ + "tcp_server": { + "processing": 0, + "connections": 0, + "sessions": { + "2xx": 0, + "4xx": 0, + "5xx": 0, + "total": 0 + }, + "discarded": 0, + "received": 0, + "sent": 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_upstreams.json b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_upstreams.json new file mode 100644 index 00000000000000..707ad4db785517 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxplus/testdata/api-8/stream_upstreams.json @@ -0,0 +1,48 @@ +{ + "stream_backend": { + "peers": [ + { + "id": 0, + "server": "127.0.0.1:12346", + "name": "127.0.0.1:12346", + "backup": false, + "weight": 1, + "state": "up", + "active": 0, + "connections": 0, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 0, + "fails": 0, + "unhealthy": 0 + }, + "downtime": 0 + }, + { + "id": 1, + "server": "127.0.0.1:12347", + "name": "127.0.0.1:12347", + "backup": false, + "weight": 1, + "state": "up", + "active": 0, + "connections": 0, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 0, + "fails": 0, + "unhealthy": 0 + }, + "downtime": 0 + } + ], + "zombies": 0, + "zone": "tcp_servers" + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/README.md b/src/go/collectors/go.d.plugin/modules/nginxvts/README.md new file mode 120000 index 00000000000000..e185fa81b4be20 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/README.md @@ -0,0 +1 @@ +integrations/nginx_vts.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/charts.go b/src/go/collectors/go.d.plugin/modules/nginxvts/charts.go new file mode 100644 index 00000000000000..367a534e6669d4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/charts.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +import "github.com/netdata/go.d.plugin/agent/module" + +var mainCharts = module.Charts{ + { + ID: "requests", + Title: "Total requests", + Units: "requests/s", + Fam: "requests", + Ctx: "nginxvts.requests_total", + Dims: module.Dims{ + {ID: "connections_requests", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "active_connections", + Title: "Active connections", + Units: "connections", + Fam: "connections", + Ctx: "nginxvts.active_connections", + Dims: module.Dims{ + {ID: "connections_active", Name: "active"}, + }, + }, + { + ID: "connections", + Title: "Total connections", + Units: "connections/s", + Fam: "connections", + Ctx: "nginxvts.connections_total", + Dims: module.Dims{ + {ID: "connections_reading", Name: "reading", Algo: module.Incremental}, + {ID: "connections_writing", Name: "writing", Algo: module.Incremental}, + {ID: "connections_waiting", Name: "waiting", Algo: module.Incremental}, + {ID: "connections_accepted", Name: "accepted", Algo: module.Incremental}, + {ID: "connections_handled", Name: "handled", Algo: module.Incremental}, + }, + }, + { + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "nginxvts.uptime", + Dims: module.Dims{ + {ID: "uptime", Name: "uptime"}, + }, + }, +} +var sharedZonesCharts = module.Charts{ + { + ID: "shared_memory_size", + Title: "Shared memory size", + Units: "bytes", + Fam: "shared memory", + Ctx: "nginxvts.shm_usage", + Dims: module.Dims{ + {ID: "sharedzones_maxsize", Name: "max"}, + {ID: "sharedzones_usedsize", Name: "used"}, + }, + }, + { + ID: "shared_memory_used_node", + Title: "Number of node using shared memory", + Units: "nodes", + Fam: "shared memory", + Ctx: "nginxvts.shm_used_node", + Dims: module.Dims{ + {ID: "sharedzones_usednode", Name: "used"}, + }, + }, +} + +var serverZonesCharts = module.Charts{ + { + ID: "server_requests_total", + Title: "Total number of client requests", + Units: "requests/s", + Fam: "serverzones", + Ctx: "nginxvts.server_requests_total", + Dims: module.Dims{ + {ID: "total_requestcounter", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "server_responses_total", + Title: "Total number of responses by code class", + Units: "responses/s", + Fam: "serverzones", + Ctx: "nginxvts.server_responses_total", + Dims: module.Dims{ + {ID: "total_responses_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: "total_responses_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "total_responses_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "total_responses_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "total_responses_5xx", Name: "5xx", Algo: module.Incremental}, + }, + }, + { + ID: "server_traffic_total", + Title: "Total amount of data transferred to and from the server", + Units: "bytes/s", + Fam: "serverzones", + Ctx: "nginxvts.server_traffic_total", + Dims: module.Dims{ + {ID: "total_inbytes", Name: "in", Algo: module.Incremental}, + {ID: "total_outbytes", Name: "out", Algo: module.Incremental}, + }, + }, + { + ID: "server_cache_total", + Title: "Total server cache", + Units: "events/s", + Fam: "serverzones", + Ctx: "nginxvts.server_cache_total", + Dims: module.Dims{ + {ID: "total_cache_miss", Name: "miss", Algo: module.Incremental}, + {ID: "total_cache_bypass", Name: "bypass", Algo: module.Incremental}, + {ID: "total_cache_expired", Name: "expired", Algo: module.Incremental}, + {ID: "total_cache_stale", Name: "stale", Algo: module.Incremental}, + {ID: "total_cache_updating", Name: "updating", Algo: module.Incremental}, + {ID: "total_cache_revalidated", Name: "revalidated", Algo: module.Incremental}, + {ID: "total_cache_hit", Name: "hit", Algo: module.Incremental}, + {ID: "total_cache_scarce", Name: "scarce", Algo: module.Incremental}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/collect.go b/src/go/collectors/go.d.plugin/modules/nginxvts/collect.go new file mode 100644 index 00000000000000..c3613e7abd412d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/collect.go @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (vts *NginxVTS) collect() (map[string]int64, error) { + ms, err := vts.scapeVTS() + if err != nil { + return nil, nil + } + + collected := make(map[string]interface{}) + vts.collectMain(collected, ms) + vts.collectSharedZones(collected, ms) + vts.collectServerZones(collected, ms) + + return stm.ToMap(collected), nil +} + +func (vts *NginxVTS) collectMain(collected map[string]interface{}, ms *vtsMetrics) { + collected["uptime"] = (ms.NowMsec - ms.LoadMsec) / 1000 + collected["connections"] = ms.Connections +} + +func (vts *NginxVTS) collectSharedZones(collected map[string]interface{}, ms *vtsMetrics) { + collected["sharedzones"] = ms.SharedZones +} + +func (vts *NginxVTS) collectServerZones(collected map[string]interface{}, ms *vtsMetrics) { + if !ms.hasServerZones() { + return + } + + // "*" means all servers + collected["total"] = ms.ServerZones["*"] +} + +func (vts *NginxVTS) scapeVTS() (*vtsMetrics, error) { + req, _ := web.NewHTTPRequest(vts.Request) + + var total vtsMetrics + + if err := vts.doOKDecode(req, &total); err != nil { + vts.Warning(err) + return nil, err + } + return &total, nil +} + +func (vts *NginxVTS) doOKDecode(req *http.Request, in interface{}) error { + resp, err := vts.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/config_schema.json b/src/go/collectors/go.d.plugin/modules/nginxvts/config_schema.json new file mode 100644 index 00000000000000..a4b44429f15621 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nginxvts job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/init.go b/src/go/collectors/go.d.plugin/modules/nginxvts/init.go new file mode 100644 index 00000000000000..7ebf049abbe7c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/init.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (vts NginxVTS) validateConfig() error { + if vts.URL == "" { + return errors.New("URL not set") + } + + if _, err := web.NewHTTPRequest(vts.Request); err != nil { + return err + } + return nil +} + +func (vts NginxVTS) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(vts.Client) +} + +func (vts NginxVTS) initCharts() (*module.Charts, error) { + charts := module.Charts{} + + if err := charts.Add(*mainCharts.Copy()...); err != nil { + return nil, err + } + + if err := charts.Add(*sharedZonesCharts.Copy()...); err != nil { + return nil, err + } + + if err := charts.Add(*serverZonesCharts.Copy()...); err != nil { + return nil, err + } + + if len(charts) == 0 { + return nil, errors.New("zero charts") + } + return &charts, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/integrations/nginx_vts.md b/src/go/collectors/go.d.plugin/modules/nginxvts/integrations/nginx_vts.md new file mode 100644 index 00000000000000..7415d0adb53853 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/integrations/nginx_vts.md @@ -0,0 +1,233 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginxvts/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/nginxvts/metadata.yaml" +sidebar_label: "NGINX VTS" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NGINX VTS + + +<img src="https://netdata.cloud/img/nginx.svg" width="150"/> + + +Plugin: go.d.plugin +Module: nginxvts + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts). + + +It sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), +which is a built-in location that provides metrics about the NGINX VTS server. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects NGINX instances running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NGINX VTS instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nginxvts.requests_total | requests | requests/s | +| nginxvts.active_connections | active | connections | +| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s | +| nginxvts.uptime | uptime | seconds | +| nginxvts.shm_usage | max, used | bytes | +| nginxvts.shm_used_node | used | nodes | +| nginxvts.server_requests_total | requests | requests/s | +| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| nginxvts.server_traffic_total | in, out | bytes/s | +| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure nginx-vts module + +To configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nginxvts.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nginxvts.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/status/format/json | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1/status/format/json + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1/status/format/json + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/status/format/json + + - name: remote + url: http://192.0.2.1/status/format/json + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nginxvts + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/metadata.yaml b/src/go/collectors/go.d.plugin/modules/nginxvts/metadata.yaml new file mode 100644 index 00000000000000..bb602863b0fe0a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/metadata.yaml @@ -0,0 +1,264 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nginxvts + plugin_name: go.d.plugin + module_name: nginxvts + monitored_instance: + name: NGINX VTS + link: https://www.nginx.com/ + icon_filename: nginx.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - webserver + related_resources: + integrations: + list: + - plugin_name: go.d.plugin + module_name: weblog + - plugin_name: go.d.plugin + module_name: httpcheck + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts). + method_description: | + It sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis), + which is a built-in location that provides metrics about the NGINX VTS server. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects NGINX instances running on localhost. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Configure nginx-vts module + description: | + To configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation). + configuration: + file: + name: go.d/nginxvts.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/status/format/json + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1/status/format/json + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1/server-status?auto + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1/status/format/json + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1/status/format/json + + - name: remote + url: http://192.0.2.1/status/format/json + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: nginxvts.requests_total + description: Total requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxvts.active_connections + description: Active connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: nginxvts.connections_total + description: Total connections + unit: connections/s + chart_type: line + dimensions: + - name: reading + - name: writing + - name: waiting + - name: accepted + - name: handled + - name: nginxvts.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: nginxvts.shm_usage + description: Shared memory size + unit: bytes + chart_type: line + dimensions: + - name: max + - name: used + - name: nginxvts.shm_used_node + description: Number of node using shared memory + unit: nodes + chart_type: line + dimensions: + - name: used + - name: nginxvts.server_requests_total + description: Total number of client requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: nginxvts.server_responses_total + description: Total number of responses by code class + unit: responses/s + chart_type: line + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: nginxvts.server_traffic_total + description: Total amount of data transferred to and from the server + unit: bytes/s + chart_type: line + dimensions: + - name: in + - name: out + - name: nginxvts.server_cache_total + description: Total server cache + unit: events/s + chart_type: line + dimensions: + - name: miss + - name: bypass + - name: expired + - name: stale + - name: updating + - name: revalidated + - name: hit + - name: scarce diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/metrics.go b/src/go/collectors/go.d.plugin/modules/nginxvts/metrics.go new file mode 100644 index 00000000000000..2674d4bbe40ebd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/metrics.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +// NginxVTS metrics: https://github.com/vozlt/nginx-module-vts#json + +type vtsMetrics struct { + // HostName string + // NginxVersion string + LoadMsec int64 + NowMsec int64 + Uptime int64 + Connections struct { + Active int64 `stm:"active"` + Reading int64 `stm:"reading"` + Writing int64 `stm:"writing"` + Waiting int64 `stm:"waiting"` + Accepted int64 `stm:"accepted"` + Handled int64 `stm:"handled"` + Requests int64 `stm:"requests"` + } `stm:"connections"` + SharedZones struct { + // Name string + MaxSize int64 `stm:"maxsize"` + UsedSize int64 `stm:"usedsize"` + UsedNode int64 `stm:"usednode"` + } + ServerZones map[string]Server +} + +func (m vtsMetrics) hasServerZones() bool { return m.ServerZones != nil } + +// Server is for total Nginx server +type Server struct { + RequestCounter int64 `stm:"requestcounter"` + InBytes int64 `stm:"inbytes"` + OutBytes int64 `stm:"outbytes"` + Responses struct { + Resp1xx int64 `stm:"responses_1xx" json:"1xx"` + Resp2xx int64 `stm:"responses_2xx" json:"2xx"` + Resp3xx int64 `stm:"responses_3xx" json:"3xx"` + Resp4xx int64 `stm:"responses_4xx" json:"4xx"` + Resp5xx int64 `stm:"responses_5xx" json:"5xx"` + Miss int64 `stm:"cache_miss"` + Bypass int64 `stm:"cache_bypass"` + Expired int64 `stm:"cache_expired"` + Stale int64 `stm:"cache_stale"` + Updating int64 `stm:"cache_updating"` + Revalidated int64 `stm:"cache_revalidated"` + Hit int64 `stm:"cache_hit"` + Scarce int64 `stm:"cache_scarce"` + } `stm:""` +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts.go b/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts.go new file mode 100644 index 00000000000000..1cc3a601416ad4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nginxvts", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 1, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *NginxVTS { + return &NginxVTS{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://localhost/status/format/json", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type NginxVTS struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (vts *NginxVTS) Cleanup() { + if vts.httpClient == nil { + return + } + vts.httpClient.CloseIdleConnections() +} + +func (vts *NginxVTS) Init() bool { + err := vts.validateConfig() + if err != nil { + vts.Errorf("check configuration: %v", err) + return false + } + + httpClient, err := vts.initHTTPClient() + if err != nil { + vts.Errorf("init HTTP client: %v", err) + } + vts.httpClient = httpClient + + charts, err := vts.initCharts() + if err != nil { + vts.Errorf("init charts: %v", err) + return false + } + vts.charts = charts + + return true +} + +func (vts *NginxVTS) Check() bool { + return len(vts.Collect()) > 0 +} + +func (vts *NginxVTS) Charts() *module.Charts { + return vts.charts +} + +func (vts *NginxVTS) Collect() map[string]int64 { + mx, err := vts.collect() + if err != nil { + vts.Error(err) + return nil + } + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts_test.go b/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts_test.go new file mode 100644 index 00000000000000..ef204ad7561842 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/nginxvts_test.go @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nginxvts + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v0118Response, _ = os.ReadFile("testdata/vts-v0.1.18.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v0118Response": v0118Response, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestNginxVTS_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantNumOfCharts int + wantFail bool + }{ + "default": { + wantNumOfCharts: numOfCharts( + mainCharts, + sharedZonesCharts, + serverZonesCharts, + ), + config: New().Config, + }, + "URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }}, + }, + "invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + es := New() + es.Config = test.config + + if test.wantFail { + assert.False(t, es.Init()) + } else { + assert.True(t, es.Init()) + assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) + } + }) + } +} + +func TestNginxVTS_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (vts *NginxVTS, cleanup func()) + wantFail bool + }{ + "valid data": {prepare: prepareNginxVTSValidData}, + "invalid data": {prepare: prepareNginxVTSInvalidData, wantFail: true}, + "404": {prepare: prepareNginxVTS404, wantFail: true}, + "connection refused": {prepare: prepareNginxVTSConnectionRefused, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + vts, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, vts.Check()) + } else { + assert.True(t, vts.Check()) + } + }) + } +} + +func TestNginxVTS_Charts(t *testing.T) { + assert.Nil(t, New().Charts()) +} + +func TestNginxVTS_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestNginxVTS_Collect(t *testing.T) { + tests := map[string]struct { + // prepare func() *NginxVTS + prepare func(t *testing.T) (vts *NginxVTS, cleanup func()) + wantCollected map[string]int64 + checkCharts bool + }{ + "right metrics": { + prepare: prepareNginxVTSValidData, + wantCollected: map[string]int64{ + // Nginx running time + "uptime": 319, + // Nginx connections + "connections_active": 2, + "connections_reading": 0, + "connections_writing": 1, + "connections_waiting": 1, + "connections_accepted": 12, + "connections_handled": 12, + "connections_requests": 17, + // Nginx shared memory + "sharedzones_maxsize": 1048575, + "sharedzones_usedsize": 45799, + "sharedzones_usednode": 13, + // Nginx traffic + "total_requestcounter": 2, + "total_inbytes": 156, + "total_outbytes": 692, + // Nginx response code + "total_responses_1xx": 1, + "total_responses_2xx": 2, + "total_responses_3xx": 3, + "total_responses_4xx": 4, + "total_responses_5xx": 5, + // Nginx cache + "total_cache_miss": 2, + "total_cache_bypass": 4, + "total_cache_expired": 6, + "total_cache_stale": 8, + "total_cache_updating": 10, + "total_cache_revalidated": 12, + "total_cache_hit": 14, + "total_cache_scarce": 16, + }, + checkCharts: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + vts, cleanup := test.prepare(t) + defer cleanup() + + collected := vts.Collect() + + assert.Equal(t, test.wantCollected, collected) + if test.checkCharts { + ensureCollectedHasAllChartsDimsVarsIDs(t, vts, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vts *NginxVTS, collected map[string]int64) { + for _, chart := range *vts.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxVTS, cleanup func()) { + t.Helper() + vts = createNginxVTS() + srv := prepareNginxVTSEndpoint() + vts.URL = srv.URL + + require.True(t, vts.Init()) + + return vts, srv.Close +} + +func prepareNginxVTSValidData(t *testing.T) (vts *NginxVTS, cleanup func()) { + return prepareNginxVTS(t, New) +} + +func prepareNginxVTSInvalidData(t *testing.T) (*NginxVTS, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + vts := New() + vts.URL = srv.URL + require.True(t, vts.Init()) + + return vts, srv.Close +} + +func prepareNginxVTS404(t *testing.T) (*NginxVTS, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + vts := New() + vts.URL = srv.URL + require.True(t, vts.Init()) + + return vts, srv.Close +} + +func prepareNginxVTSConnectionRefused(t *testing.T) (*NginxVTS, func()) { + t.Helper() + vts := New() + vts.URL = "http://127.0.0.1:18080" + require.True(t, vts.Init()) + + return vts, func() {} +} + +func prepareNginxVTSEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/": + _, _ = w.Write(v0118Response) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} + +func numOfCharts(charts ...module.Charts) (num int) { + for _, v := range charts { + num += len(v) + } + return num +} diff --git a/src/go/collectors/go.d.plugin/modules/nginxvts/testdata/vts-v0.1.18.json b/src/go/collectors/go.d.plugin/modules/nginxvts/testdata/vts-v0.1.18.json new file mode 100644 index 00000000000000..cdc331d5f061c7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nginxvts/testdata/vts-v0.1.18.json @@ -0,0 +1,44 @@ +{ + "hostName": "Web", + "nginxVersion": "1.18.0", + "loadMsec": 1606489796895, + "nowMsec": 1606490116734, + "connections": { + "active": 2, + "reading": 0, + "writing": 1, + "waiting": 1, + "accepted": 12, + "handled": 12, + "requests": 17 + }, + "sharedZones": { + "name": "ngx_http_vhost_traffic_status", + "maxSize": 1048575, + "usedSize": 45799, + "usedNode": 13 + }, + "serverZones": { + "*": { + "requestCounter": 2, + "inBytes": 156, + "outBytes": 692, + "responses": { + "1xx": 1, + "2xx": 2, + "3xx": 3, + "4xx": 4, + "5xx": 5, + "miss": 2, + "bypass": 4, + "expired": 6, + "stale": 8, + "updating": 10, + "revalidated": 12, + "hit": 14, + "scarce": 16 + } + } + } +} + diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/README.md b/src/go/collectors/go.d.plugin/modules/ntpd/README.md new file mode 120000 index 00000000000000..bad92b03a2613a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/README.md @@ -0,0 +1 @@ +integrations/ntpd.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/charts.go b/src/go/collectors/go.d.plugin/modules/ntpd/charts.go new file mode 100644 index 00000000000000..dc9d183d04ddee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/charts.go @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ntpd + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioSystemOffset = module.Priority + iota + prioSystemJitter + prioSystemFrequency + prioSystemWander + prioSystemRootDelay + prioSystemRootDispersion + prioSystemStratum + prioSystemTimeConstant + prioSystemPrecision + + prioPeerOffset + prioPeerDelay + prioPeerDispersion + prioPeerJitter + prioPeerXleave + prioPeerRootDelay + prioPeerRootDispersion + prioPeerStratum + prioPeerHostMode + prioPeerPeerMode + prioPeerHostPoll + prioPeerPeerPoll + prioPeerPrecision +) + +var ( + systemCharts = module.Charts{ + systemOffsetChart.Copy(), + systemJitterChart.Copy(), + systemFrequencyChart.Copy(), + systemWanderChart.Copy(), + systemRootDelayChart.Copy(), + systemRootDispersionChart.Copy(), + systemStratumChart.Copy(), + systemTimeConstantChart.Copy(), + systemPrecisionChart.Copy(), + } + systemOffsetChart = module.Chart{ + ID: "sys_offset", + Title: "Combined offset of server relative to this host", + Units: "milliseconds", + Fam: "system", + Ctx: "ntpd.sys_offset", + Type: module.Area, + Priority: prioSystemOffset, + Dims: module.Dims{ + {ID: "offset", Name: "offset", Div: precision}, + }, + } + systemJitterChart = module.Chart{ + ID: "sys_jitter", + Title: "Combined system jitter and clock jitter", + Units: "milliseconds", + Fam: "system", + Ctx: "ntpd.sys_jitter", + Priority: prioSystemJitter, + Dims: module.Dims{ + {ID: "sys_jitter", Name: "system", Div: precision}, + {ID: "clk_jitter", Name: "clock", Div: precision}, + }, + } + systemFrequencyChart = module.Chart{ + ID: "sys_frequency", + Title: "Frequency offset relative to hardware clock", + Units: "ppm", + Fam: "system", + Ctx: "ntpd.sys_frequency", + Type: module.Area, + Priority: prioSystemFrequency, + Dims: module.Dims{ + {ID: "frequency", Name: "frequency", Div: precision}, + }, + } + systemWanderChart = module.Chart{ + ID: "sys_wander", + Title: "Clock frequency wander", + Units: "ppm", + Fam: "system", + Ctx: "ntpd.sys_wander", + Type: module.Area, + Priority: prioSystemWander, + Dims: module.Dims{ + {ID: "clk_wander", Name: "clock", Div: precision}, + }, + } + systemRootDelayChart = module.Chart{ + ID: "sys_rootdelay", + Title: "Total roundtrip delay to the primary reference clock", + Units: "milliseconds", + Fam: "system", + Ctx: "ntpd.sys_rootdelay", + Type: module.Area, + Priority: prioSystemRootDelay, + Dims: module.Dims{ + {ID: "rootdelay", Name: "delay", Div: precision}, + }, + } + systemRootDispersionChart = module.Chart{ + ID: "sys_rootdisp", + Title: "Total root dispersion to the primary reference clock", + Units: "milliseconds", + Fam: "system", + Ctx: "ntpd.sys_rootdisp", + Type: module.Area, + Priority: prioSystemRootDispersion, + Dims: module.Dims{ + {ID: "rootdisp", Name: "dispersion", Div: precision}, + }, + } + systemStratumChart = module.Chart{ + ID: "sys_stratum", + Title: "Stratum", + Units: "stratum", + Fam: "system", + Ctx: "ntpd.sys_stratum", + Priority: prioSystemStratum, + Dims: module.Dims{ + {ID: "stratum", Name: "stratum", Div: precision}, + }, + } + systemTimeConstantChart = module.Chart{ + ID: "sys_tc", + Title: "Time constant and poll exponent", + Units: "log2", + Fam: "system", + Ctx: "ntpd.sys_tc", + Priority: prioSystemTimeConstant, + Dims: module.Dims{ + {ID: "tc", Name: "current", Div: precision}, + {ID: "mintc", Name: "minimum", Div: precision}, + }, + } + systemPrecisionChart = module.Chart{ + ID: "sys_precision", + Title: "Precision", + Units: "log2", + Fam: "system", + Ctx: "ntpd.sys_precision", + Priority: prioSystemPrecision, + Dims: module.Dims{ + {ID: "precision", Name: "precision", Div: precision}, + }, + } +) + +var ( + peerChartsTmpl = module.Charts{ + peerOffsetChartTmpl.Copy(), + peerDelayChartTmpl.Copy(), + peerDispersionChartTmpl.Copy(), + peerJitterChartTmpl.Copy(), + peerXleaveChartTmpl.Copy(), + peerRootDelayChartTmpl.Copy(), + peerRootDispersionChartTmpl.Copy(), + peerStratumChartTmpl.Copy(), + peerHostModeChartTmpl.Copy(), + peerPeerModeChartTmpl.Copy(), + peerHostPollChartTmpl.Copy(), + peerPeerPollChartTmpl.Copy(), + peerPrecisionChartTmpl.Copy(), + } + peerOffsetChartTmpl = module.Chart{ + ID: "peer_%s_offset", + Title: "Peer offset", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_offset", + Priority: prioPeerOffset, + Dims: module.Dims{ + {ID: "peer_%s_offset", Name: "offset", Div: precision}, + }, + } + peerDelayChartTmpl = module.Chart{ + ID: "peer_%s_delay", + Title: "Peer delay", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_delay", + Priority: prioPeerDelay, + Dims: module.Dims{ + {ID: "peer_%s_delay", Name: "delay", Div: precision}, + }, + } + peerDispersionChartTmpl = module.Chart{ + ID: "peer_%s_dispersion", + Title: "Peer dispersion", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_dispersion", + Priority: prioPeerDispersion, + Dims: module.Dims{ + {ID: "peer_%s_dispersion", Name: "dispersion", Div: precision}, + }, + } + peerJitterChartTmpl = module.Chart{ + ID: "peer_%s_jitter", + Title: "Peer jitter", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_jitter", + Priority: prioPeerJitter, + Dims: module.Dims{ + {ID: "peer_%s_jitter", Name: "jitter", Div: precision}, + }, + } + peerXleaveChartTmpl = module.Chart{ + ID: "peer_%s_xleave", + Title: "Peer interleave delay", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_xleave", + Priority: prioPeerXleave, + Dims: module.Dims{ + {ID: "peer_%s_xleave", Name: "xleave", Div: precision}, + }, + } + peerRootDelayChartTmpl = module.Chart{ + ID: "peer_%s_rootdelay", + Title: "Peer roundtrip delay to the primary reference clock", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_rootdelay", + Priority: prioPeerRootDelay, + Dims: module.Dims{ + {ID: "peer_%s_rootdelay", Name: "rootdelay", Div: precision}, + }, + } + peerRootDispersionChartTmpl = module.Chart{ + ID: "peer_%s_rootdisp", + Title: "Peer root dispersion to the primary reference clock", + Units: "milliseconds", + Fam: "peers", + Ctx: "ntpd.peer_rootdisp", + Priority: prioPeerRootDispersion, + Dims: module.Dims{ + {ID: "peer_%s_rootdisp", Name: "dispersion", Div: precision}, + }, + } + peerStratumChartTmpl = module.Chart{ + ID: "peer_%s_stratum", + Title: "Peer stratum", + Units: "stratum", + Fam: "peers", + Ctx: "ntpd.peer_stratum", + Priority: prioPeerStratum, + Dims: module.Dims{ + {ID: "peer_%s_stratum", Name: "stratum", Div: precision}, + }, + } + peerHostModeChartTmpl = module.Chart{ + ID: "peer_%s_hmode", + Title: "Peer host mode", + Units: "hmode", + Fam: "peers", + Ctx: "ntpd.peer_hmode", + Priority: prioPeerHostMode, + Dims: module.Dims{ + {ID: "peer_%s_hmode", Name: "hmode", Div: precision}, + }, + } + peerPeerModeChartTmpl = module.Chart{ + ID: "peer_%s_pmode", + Title: "Peer mode", + Units: "pmode", + Fam: "peers", + Ctx: "ntpd.peer_pmode", + Priority: prioPeerPeerMode, + Dims: module.Dims{ + {ID: "peer_%s_pmode", Name: "pmode", Div: precision}, + }, + } + peerHostPollChartTmpl = module.Chart{ + ID: "peer_%s_hpoll", + Title: "Peer host poll exponent", + Units: "log2", + Fam: "peers", + Ctx: "ntpd.peer_hpoll", + Priority: prioPeerHostPoll, + Dims: module.Dims{ + {ID: "peer_%s_hpoll", Name: "hpoll", Div: precision}, + }, + } + peerPeerPollChartTmpl = module.Chart{ + ID: "peer_%s_ppoll", + Title: "Peer poll exponent", + Units: "log2", + Fam: "peers", + Ctx: "ntpd.peer_ppoll", + Priority: prioPeerPeerPoll, + Dims: module.Dims{ + {ID: "peer_%s_ppoll", Name: "hpoll", Div: precision}, + }, + } + peerPrecisionChartTmpl = module.Chart{ + ID: "peer_%s_precision", + Title: "Peer precision", + Units: "log2", + Fam: "peers", + Ctx: "ntpd.peer_precision", + Priority: prioPeerPrecision, + Dims: module.Dims{ + {ID: "peer_%s_precision", Name: "precision", Div: precision}, + }, + } +) + +func (n *NTPd) addPeerCharts(addr string) { + charts := peerChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(addr, ".", "_")) + chart.Labels = []module.Label{ + {Key: "peer_address", Value: addr}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, addr) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NTPd) removePeerCharts(addr string) { + px := fmt.Sprintf("peer_%s", strings.ReplaceAll(addr, ".", "_")) + + for _, chart := range *n.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/client.go b/src/go/collectors/go.d.plugin/modules/ntpd/client.go new file mode 100644 index 00000000000000..5164c80e8b998d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/client.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ntpd + +import ( + "net" + "time" + + "github.com/facebook/time/ntp/control" +) + +func newNTPClient(c Config) (ntpConn, error) { + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + if err != nil { + return nil, err + } + + client := &ntpClient{ + conn: conn, + timeout: c.Timeout.Duration, + client: &control.NTPClient{Connection: conn}, + } + + return client, nil +} + +type ntpClient struct { + conn net.Conn + timeout time.Duration + client *control.NTPClient +} + +func (c *ntpClient) systemInfo() (map[string]string, error) { + return c.peerInfo(0) +} + +func (c *ntpClient) peerInfo(id uint16) (map[string]string, error) { + msg := &control.NTPControlMsgHead{ + VnMode: control.MakeVnMode(2, control.Mode), + REMOp: control.OpReadVariables, + AssociationID: id, + } + + if err := c.conn.SetDeadline(time.Now().Add(c.timeout)); err != nil { + return nil, err + } + + resp, err := c.client.Communicate(msg) + if err != nil { + return nil, err + } + + return resp.GetAssociationInfo() +} + +func (c *ntpClient) peerIDs() ([]uint16, error) { + msg := &control.NTPControlMsgHead{ + VnMode: control.MakeVnMode(2, control.Mode), + REMOp: control.OpReadStatus, + } + + if err := c.conn.SetDeadline(time.Now().Add(c.timeout)); err != nil { + return nil, err + } + + resp, err := c.client.Communicate(msg) + if err != nil { + return nil, err + } + + peers, err := resp.GetAssociations() + if err != nil { + return nil, err + } + + var ids []uint16 + for id := range peers { + ids = append(ids, id) + } + + return ids, nil +} + +func (c *ntpClient) close() { + if c.conn != nil { + _ = c.conn.Close() + c.conn = nil + } +} diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/collect.go b/src/go/collectors/go.d.plugin/modules/ntpd/collect.go new file mode 100644 index 00000000000000..09553a65cfab03 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/collect.go @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ntpd + +import ( + "fmt" + "net" + "strconv" + "time" +) + +const ( + precision = 1000000 +) + +func (n *NTPd) collect() (map[string]int64, error) { + if n.client == nil { + client, err := n.newClient(n.Config) + if err != nil { + return nil, fmt.Errorf("creating NTP client: %v", err) + } + n.client = client + } + + mx := make(map[string]int64) + + if err := n.collectInfo(mx); err != nil { + return nil, err + } + + if n.CollectPeers { + if now := time.Now(); now.Sub(n.findPeersTime) > n.findPeersEvery { + n.findPeersTime = now + if err := n.findPeers(); err != nil { + n.Warning(err) + } + } + n.collectPeersInfo(mx) + } + + return mx, nil +} + +func (n *NTPd) collectInfo(mx map[string]int64) error { + info, err := n.client.systemInfo() + if err != nil { + return fmt.Errorf("error on querying system info: %v", err) + } + + for k, v := range info { + switch k { + case + "offset", + "sys_jitter", + "clk_jitter", + "frequency", + "clk_wander", + "rootdelay", + "rootdisp", + "stratum", + "tc", + "mintc", + "precision": + if val, err := strconv.ParseFloat(v, 64); err == nil { + mx[k] = int64(val * precision) + } + } + } + return nil +} + +func (n *NTPd) collectPeersInfo(mx map[string]int64) { + for _, id := range n.peerIDs { + info, err := n.client.peerInfo(id) + if err != nil { + n.Warningf("error on querying NTP peer info id='%d': %v", id, err) + continue + } + + addr, ok := info["srcadr"] + if !ok { + continue + } + + for k, v := range info { + switch k { + case + "offset", + "delay", + "dispersion", + "jitter", + "xleave", + "rootdelay", + "rootdisp", + "stratum", + "hmode", + "pmode", + "hpoll", + "ppoll", + "precision": + if val, err := strconv.ParseFloat(v, 64); err == nil { + mx["peer_"+addr+"_"+k] = int64(val * precision) + } + } + } + } +} + +func (n *NTPd) findPeers() error { + n.peerIDs = n.peerIDs[:0] + + n.Debug("querying NTP peers") + peers, err := n.client.peerIDs() + if err != nil { + return fmt.Errorf("querying NTP peers: %v", err) + } + + n.Debugf("found %d NTP peers (ids: %v)", len(peers), peers) + seen := make(map[string]bool) + + for _, id := range peers { + info, err := n.client.peerInfo(id) + if err != nil { + n.Debugf("error on querying NTP peer info id='%d': %v", id, err) + continue + } + + addr, ok := info["srcadr"] + if ip := net.ParseIP(addr); !ok || ip == nil || n.peerIPAddrFilter.Contains(ip) { + n.Debugf("skipping NTP peer id='%d', srcadr='%s'", id, addr) + continue + } + + seen[addr] = true + + if !n.peerAddr[addr] { + n.peerAddr[addr] = true + n.Debugf("new NTP peer id='%d', srcadr='%s': creating charts", id, addr) + n.addPeerCharts(addr) + } + + n.peerIDs = append(n.peerIDs, id) + } + + for addr := range n.peerAddr { + if !seen[addr] { + delete(n.peerAddr, addr) + n.Debugf("stale NTP peer srcadr='%s': removing charts", addr) + n.removePeerCharts(addr) + } + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/config_schema.json b/src/go/collectors/go.d.plugin/modules/ntpd/config_schema.json new file mode 100644 index 00000000000000..ef360a7f95eb69 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/config_schema.json @@ -0,0 +1,26 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/ntpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_peers": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/integrations/ntpd.md b/src/go/collectors/go.d.plugin/modules/ntpd/integrations/ntpd.md new file mode 100644 index 00000000000000..be765ae18903a8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/integrations/ntpd.md @@ -0,0 +1,228 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/ntpd/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/ntpd/metadata.yaml" +sidebar_label: "NTPd" +learn_status: "Published" +learn_rel_path: "Data Collection/System Clock and NTP" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NTPd + + +<img src="https://netdata.cloud/img/ntp.png" width="150"/> + + +Plugin: go.d.plugin +Module: ntpd + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per NTPd instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ntpd.sys_offset | offset | milliseconds | +| ntpd.sys_jitter | system, clock | milliseconds | +| ntpd.sys_frequency | frequency | ppm | +| ntpd.sys_wander | clock | ppm | +| ntpd.sys_rootdelay | delay | milliseconds | +| ntpd.sys_rootdisp | dispersion | milliseconds | +| ntpd.sys_stratum | stratum | stratum | +| ntpd.sys_tc | current, minimum | log2 | +| ntpd.sys_precision | precision | log2 | + +### Per peer + +These metrics refer to the NTPd peer. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| peer_address | peer's source IP address | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ntpd.peer_offset | offset | milliseconds | +| ntpd.peer_delay | delay | milliseconds | +| ntpd.peer_dispersion | dispersion | milliseconds | +| ntpd.peer_jitter | jitter | milliseconds | +| ntpd.peer_xleave | xleave | milliseconds | +| ntpd.peer_rootdelay | rootdelay | milliseconds | +| ntpd.peer_rootdisp | dispersion | milliseconds | +| ntpd.peer_stratum | stratum | stratum | +| ntpd.peer_hmode | hmode | hmode | +| ntpd.peer_pmode | pmode | pmode | +| ntpd.peer_hpoll | hpoll | log2 | +| ntpd.peer_ppoll | ppoll | log2 | +| ntpd.peer_precision | precision | log2 | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/ntpd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/ntpd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes | +| timeout | Connection/read/write timeout. | 3 | no | +| collect_peers | Determines whether peer metrics will be collected. | no | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:123 + +``` +</details> + +##### With peers metrics + +Collect peers metrics. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:123 + collect_peers: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:123 + + - name: remote + address: 203.0.113.0:123 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m ntpd + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/metadata.yaml b/src/go/collectors/go.d.plugin/modules/ntpd/metadata.yaml new file mode 100644 index 00000000000000..3b968f20c89395 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/metadata.yaml @@ -0,0 +1,260 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-ntpd + plugin_name: go.d.plugin + module_name: ntpd + monitored_instance: + name: NTPd + link: https://www.ntp.org/documentation/4.2.8-series/ntpd + icon_filename: ntp.png + categories: + - data-collection.system-clock-and-ntp + keywords: + - ntpd + - ntp + - time + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: > + This collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) + using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, + the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html). + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/ntpd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address in IP:PORT format. + default_value: 127.0.0.1:123 + required: true + - name: timeout + description: Connection/read/write timeout. + default_value: 3 + required: false + - name: collect_peers + description: Determines whether peer metrics will be collected. + default_value: false + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:123 + - name: With peers metrics + description: Collect peers metrics. + config: | + jobs: + - name: local + address: 127.0.0.1:123 + collect_peers: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:123 + + - name: remote + address: 203.0.113.0:123 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: ntpd.sys_offset + description: Combined offset of server relative to this host + unit: milliseconds + chart_type: area + dimensions: + - name: offset + - name: ntpd.sys_jitter + description: Combined system jitter and clock jitter + unit: milliseconds + chart_type: line + dimensions: + - name: system + - name: clock + - name: ntpd.sys_frequency + description: Frequency offset relative to hardware clock + unit: ppm + chart_type: area + dimensions: + - name: frequency + - name: ntpd.sys_wander + description: Clock frequency wander + unit: ppm + chart_type: area + dimensions: + - name: clock + - name: ntpd.sys_rootdelay + description: Total roundtrip delay to the primary reference clock + unit: milliseconds + chart_type: area + dimensions: + - name: delay + - name: ntpd.sys_rootdisp + description: Total root dispersion to the primary reference clock + unit: milliseconds + chart_type: area + dimensions: + - name: dispersion + - name: ntpd.sys_stratum + description: Stratum + unit: stratum + chart_type: line + dimensions: + - name: stratum + - name: ntpd.sys_tc + description: Time constant and poll exponent + unit: log2 + chart_type: line + dimensions: + - name: current + - name: minimum + - name: ntpd.sys_precision + description: Precision + unit: log2 + chart_type: line + dimensions: + - name: precision + - name: peer + description: These metrics refer to the NTPd peer. + labels: + - name: peer_address + description: peer's source IP address + metrics: + - name: ntpd.peer_offset + description: Peer offset + unit: milliseconds + chart_type: line + dimensions: + - name: offset + - name: ntpd.peer_delay + description: Peer delay + unit: milliseconds + chart_type: line + dimensions: + - name: delay + - name: ntpd.peer_dispersion + description: Peer dispersion + unit: milliseconds + chart_type: line + dimensions: + - name: dispersion + - name: ntpd.peer_jitter + description: Peer jitter + unit: milliseconds + chart_type: line + dimensions: + - name: jitter + - name: ntpd.peer_xleave + description: Peer interleave delay + unit: milliseconds + chart_type: line + dimensions: + - name: xleave + - name: ntpd.peer_rootdelay + description: Peer roundtrip delay to the primary reference clock + unit: milliseconds + chart_type: line + dimensions: + - name: rootdelay + - name: ntpd.peer_rootdisp + description: Peer root dispersion to the primary reference clock + unit: milliseconds + chart_type: line + dimensions: + - name: dispersion + - name: ntpd.peer_stratum + description: Peer stratum + unit: stratum + chart_type: line + dimensions: + - name: stratum + - name: ntpd.peer_hmode + description: Peer host mode + unit: hmode + chart_type: line + dimensions: + - name: hmode + - name: ntpd.peer_pmode + description: Peer mode + unit: pmode + chart_type: line + dimensions: + - name: pmode + - name: ntpd.peer_hpoll + description: Peer host poll exponent + unit: log2 + chart_type: line + dimensions: + - name: hpoll + - name: ntpd.peer_ppoll + description: Peer poll exponent + unit: log2 + chart_type: line + dimensions: + - name: ppoll + - name: ntpd.peer_precision + description: Peer precision + unit: log2 + chart_type: line + dimensions: + - name: precision diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/ntpd.go b/src/go/collectors/go.d.plugin/modules/ntpd/ntpd.go new file mode 100644 index 00000000000000..8bbc0ba4f3aa00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/ntpd.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ntpd + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/iprange" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("ntpd", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *NTPd { + return &NTPd{ + Config: Config{ + Address: "127.0.0.1:123", + Timeout: web.Duration{Duration: time.Second * 3}, + CollectPeers: false, + }, + charts: systemCharts.Copy(), + newClient: newNTPClient, + findPeersEvery: time.Minute * 3, + peerAddr: make(map[string]bool), + } +} + +type Config struct { + Address string `yaml:"address"` + Timeout web.Duration `yaml:"timeout"` + CollectPeers bool `yaml:"collect_peers"` +} + +type ( + NTPd struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + newClient func(c Config) (ntpConn, error) + client ntpConn + + findPeersTime time.Time + findPeersEvery time.Duration + peerAddr map[string]bool + peerIDs []uint16 + peerIPAddrFilter iprange.Pool + } + ntpConn interface { + systemInfo() (map[string]string, error) + peerInfo(id uint16) (map[string]string, error) + peerIDs() ([]uint16, error) + close() + } +) + +func (n *NTPd) Init() bool { + if n.Address == "" { + n.Error("config validation: 'address' can not be empty") + return false + } + + txt := "0.0.0.0 127.0.0.0/8" + r, err := iprange.ParseRanges(txt) + if err != nil { + n.Errorf("error on parse ip range '%s': %v", txt, err) + return false + } + + n.peerIPAddrFilter = r + + return true +} + +func (n *NTPd) Check() bool { + return len(n.Collect()) > 0 +} + +func (n *NTPd) Charts() *module.Charts { + return n.charts +} + +func (n *NTPd) Collect() map[string]int64 { + mx, err := n.collect() + if err != nil { + n.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (n *NTPd) Cleanup() { + if n.client != nil { + n.client.close() + n.client = nil + } +} diff --git a/src/go/collectors/go.d.plugin/modules/ntpd/ntpd_test.go b/src/go/collectors/go.d.plugin/modules/ntpd/ntpd_test.go new file mode 100644 index 00000000000000..481d2d7e954f37 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ntpd/ntpd_test.go @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ntpd + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNTPd_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default config": { + config: New().Config, + }, + "unset 'address'": { + wantFail: true, + config: Config{ + Address: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + n := New() + n.Config = test.config + + if test.wantFail { + assert.False(t, n.Init()) + } else { + assert.True(t, n.Init()) + } + }) + } +} + +func TestNTPd_Charts(t *testing.T) { + assert.Equal(t, len(systemCharts), len(*New().Charts())) +} + +func TestNTPd_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func(*NTPd) + wantClose bool + }{ + "after New": { + wantClose: false, + prepare: func(*NTPd) {}, + }, + "after Init": { + wantClose: false, + prepare: func(n *NTPd) { n.Init() }, + }, + "after Check": { + wantClose: true, + prepare: func(n *NTPd) { n.Init(); n.Check() }, + }, + "after Collect": { + wantClose: true, + prepare: func(n *NTPd) { n.Init(); n.Collect() }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := &mockClient{} + n := prepareNTPdWithMock(m, true) + test.prepare(n) + + require.NotPanics(t, n.Cleanup) + + if test.wantClose { + assert.True(t, m.closeCalled) + } else { + assert.False(t, m.closeCalled) + } + }) + } +} + +func TestNTPd_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *NTPd + wantFail bool + }{ + "system: success, peers: success": { + wantFail: false, + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{}, true) }, + }, + "system: success, list peers: fails": { + wantFail: false, + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerIDs: true}, true) }, + }, + "system: success, peers info: fails": { + wantFail: false, + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerInfo: true}, true) }, + }, + "system: fails": { + wantFail: true, + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnSystemInfo: true}, true) }, + }, + "fail on creating client": { + wantFail: true, + prepare: func() *NTPd { return prepareNTPdWithMock(nil, true) }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + n := test.prepare() + + require.True(t, n.Init()) + + if test.wantFail { + assert.False(t, n.Check()) + } else { + assert.True(t, n.Check()) + } + }) + } + +} + +func TestNTPd_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *NTPd + expected map[string]int64 + expectedCharts int + }{ + "system: success, peers: success": { + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{}, true) }, + expected: map[string]int64{ + "clk_jitter": 626000, + "clk_wander": 81000, + "mintc": 3000000, + "offset": -149638, + "peer_203.0.113.1_delay": 10464000, + "peer_203.0.113.1_dispersion": 5376000, + "peer_203.0.113.1_hmode": 3000000, + "peer_203.0.113.1_hpoll": 7000000, + "peer_203.0.113.1_jitter": 5204000, + "peer_203.0.113.1_offset": 312000, + "peer_203.0.113.1_pmode": 4000000, + "peer_203.0.113.1_ppoll": 7000000, + "peer_203.0.113.1_precision": -21000000, + "peer_203.0.113.1_rootdelay": 198000, + "peer_203.0.113.1_rootdisp": 14465000, + "peer_203.0.113.1_stratum": 2000000, + "peer_203.0.113.1_xleave": 95000, + "peer_203.0.113.2_delay": 10464000, + "peer_203.0.113.2_dispersion": 5376000, + "peer_203.0.113.2_hmode": 3000000, + "peer_203.0.113.2_hpoll": 7000000, + "peer_203.0.113.2_jitter": 5204000, + "peer_203.0.113.2_offset": 312000, + "peer_203.0.113.2_pmode": 4000000, + "peer_203.0.113.2_ppoll": 7000000, + "peer_203.0.113.2_precision": -21000000, + "peer_203.0.113.2_rootdelay": 198000, + "peer_203.0.113.2_rootdisp": 14465000, + "peer_203.0.113.2_stratum": 2000000, + "peer_203.0.113.2_xleave": 95000, + "peer_203.0.113.3_delay": 10464000, + "peer_203.0.113.3_dispersion": 5376000, + "peer_203.0.113.3_hmode": 3000000, + "peer_203.0.113.3_hpoll": 7000000, + "peer_203.0.113.3_jitter": 5204000, + "peer_203.0.113.3_offset": 312000, + "peer_203.0.113.3_pmode": 4000000, + "peer_203.0.113.3_ppoll": 7000000, + "peer_203.0.113.3_precision": -21000000, + "peer_203.0.113.3_rootdelay": 198000, + "peer_203.0.113.3_rootdisp": 14465000, + "peer_203.0.113.3_stratum": 2000000, + "peer_203.0.113.3_xleave": 95000, + "precision": -24000000, + "rootdelay": 10385000, + "rootdisp": 23404000, + "stratum": 2000000, + "sys_jitter": 1648010, + "tc": 7000000, + }, + expectedCharts: len(systemCharts) + len(peerChartsTmpl)*3, + }, + "system: success, list peers: fails": { + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerIDs: true}, true) }, + expected: map[string]int64{ + "clk_jitter": 626000, + "clk_wander": 81000, + "mintc": 3000000, + "offset": -149638, + "precision": -24000000, + "rootdelay": 10385000, + "rootdisp": 23404000, + "stratum": 2000000, + "sys_jitter": 1648010, + "tc": 7000000, + }, + expectedCharts: len(systemCharts), + }, + "system: success, peers info: fails": { + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerInfo: true}, true) }, + expected: map[string]int64{ + "clk_jitter": 626000, + "clk_wander": 81000, + "mintc": 3000000, + "offset": -149638, + "precision": -24000000, + "rootdelay": 10385000, + "rootdisp": 23404000, + "stratum": 2000000, + "sys_jitter": 1648010, + "tc": 7000000, + }, + expectedCharts: len(systemCharts), + }, + "system: fails": { + prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnSystemInfo: true}, true) }, + expected: nil, + expectedCharts: len(systemCharts), + }, + "fail on creating client": { + prepare: func() *NTPd { return prepareNTPdWithMock(nil, true) }, + expected: nil, + expectedCharts: len(systemCharts), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + n := test.prepare() + + require.True(t, n.Init()) + _ = n.Check() + + mx := n.Collect() + + assert.Equal(t, test.expected, mx) + assert.Equal(t, test.expectedCharts, len(*n.Charts())) + }) + } +} + +func prepareNTPdWithMock(m *mockClient, collectPeers bool) *NTPd { + n := New() + n.CollectPeers = collectPeers + if m == nil { + n.newClient = func(_ Config) (ntpConn, error) { return nil, errors.New("mock.newClient error") } + } else { + n.newClient = func(_ Config) (ntpConn, error) { return m, nil } + } + return n +} + +type mockClient struct { + errOnSystemInfo bool + errOnPeerInfo bool + errOnPeerIDs bool + closeCalled bool +} + +func (m *mockClient) systemInfo() (map[string]string, error) { + if m.errOnSystemInfo { + return nil, errors.New("mockClient.info() error") + } + + info := map[string]string{ + "rootdelay": "10.385", + "tc": "7", + "mintc": "3", + "processor": "x86_64", + "refid": "194.177.210.54", + "reftime": "0xe7504a10.74414244", + "clock": "0xe7504e80.8c46aa3f", + "peer": "14835", + "sys_jitter": "1.648010", + "leapsec": "201701010000", + "expire": "202306280000", + "leap": "0", + "stratum": "2", + "precision": "-24", + "offset": "-0.149638", + "frequency": "- 7.734", + "clk_wander": "0.081", + "tai": "37", + "version": "ntpd 4.2.8p15@1.3728-o Wed Sep 23 11:46:38 UTC 2020 (1)", + "rootdisp": "23.404", + "clk_jitter": "0.626", + "system": "Linux/5.10.0-19-amd64", + } + + return info, nil +} + +func (m *mockClient) peerInfo(id uint16) (map[string]string, error) { + if m.errOnPeerInfo { + return nil, errors.New("mockClient.peerInfo() error") + } + + info := map[string]string{ + "delay": "10.464", + "dispersion": "5.376", + "dstadr": "10.10.10.20", + "dstport": "123", + "filtdelay": "11.34 10.53 10.49 10.46 10.92 10.56 10.69 37.99", + "filtdisp": "0.00 2.01 4.01 5.93 7.89 9.84 11.81 13.73", + "filtoffset": "0.66 0.32 0.18 0.31 0.33 0.10 0.34 14.07", + "flash": "0x0", + "headway": "0", + "hmode": "3", + "hpoll": "7", + "jitter": "5.204", + "keyid": "0", + "leap": "0", + "offset": "0.312", + "pmode": "4", + "ppoll": "7", + "precision": "-21", + "reach": "0xff", + "rec": "0xe7504df8.74802284", + "refid": "193.93.164.193", + "reftime": "0xe7504b8b.0c98a518", + "rootdelay": "0.198", + "rootdisp": "14.465", + "srcadr": fmt.Sprintf("203.0.113.%d", id), + "srcport": "123", + "stratum": "2", + "unreach": "0", + "xleave": "0.095", + } + + return info, nil +} + +func (m *mockClient) peerIDs() ([]uint16, error) { + if m.errOnPeerIDs { + return nil, errors.New("mockClient.peerIDs() error") + } + return []uint16{1, 2, 3}, nil +} + +func (m *mockClient) close() { + m.closeCalled = true +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/README.md b/src/go/collectors/go.d.plugin/modules/nvidia_smi/README.md new file mode 120000 index 00000000000000..3527bdb4b2a4e2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/README.md @@ -0,0 +1 @@ +integrations/nvidia_gpu.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/charts.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/charts.go new file mode 100644 index 00000000000000..f8017f82f31a8b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/charts.go @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioGPUPCIBandwidthUsage = module.Priority + iota + prioGPUPCIBandwidthUtilization + prioGPUFanSpeed + prioGPUUtilization + prioGPUMemUtilization + prioGPUDecoderUtilization + prioGPUEncoderUtilization + prioGPUMIGModeStatus + prioGPUMIGDevicesCount + prioGPUFBMemoryUsage + prioGPUMIGFBMemoryUsage + prioGPUBAR1MemoryUsage + prioGPUMIGBAR1MemoryUsage + prioGPUTemperatureChart + prioGPUVoltageChart + prioGPUClockFreq + prioGPUPowerDraw + prioGPUPerformanceState +) + +var ( + gpuXMLCharts = module.Charts{ + gpuPCIBandwidthUsageChartTmpl.Copy(), + gpuPCIBandwidthUtilizationChartTmpl.Copy(), + gpuFanSpeedPercChartTmpl.Copy(), + gpuUtilizationChartTmpl.Copy(), + gpuMemUtilizationChartTmpl.Copy(), + gpuDecoderUtilizationChartTmpl.Copy(), + gpuEncoderUtilizationChartTmpl.Copy(), + gpuMIGModeCurrentStatusChartTmpl.Copy(), + gpuMIGDevicesCountChartTmpl.Copy(), + gpuFrameBufferMemoryUsageChartTmpl.Copy(), + gpuBAR1MemoryUsageChartTmpl.Copy(), + gpuVoltageChartTmpl.Copy(), + gpuTemperatureChartTmpl.Copy(), + gpuClockFreqChartTmpl.Copy(), + gpuPowerDrawChartTmpl.Copy(), + gpuPerformanceStateChartTmpl.Copy(), + } + migDeviceXMLCharts = module.Charts{ + migDeviceFrameBufferMemoryUsageChartTmpl.Copy(), + migDeviceBAR1MemoryUsageChartTmpl.Copy(), + } + gpuCSVCharts = module.Charts{ + gpuFanSpeedPercChartTmpl.Copy(), + gpuUtilizationChartTmpl.Copy(), + gpuMemUtilizationChartTmpl.Copy(), + gpuFrameBufferMemoryUsageChartTmpl.Copy(), + gpuTemperatureChartTmpl.Copy(), + gpuClockFreqChartTmpl.Copy(), + gpuPowerDrawChartTmpl.Copy(), + gpuPerformanceStateChartTmpl.Copy(), + } +) + +var ( + gpuPCIBandwidthUsageChartTmpl = module.Chart{ + ID: "gpu_%s_pcie_bandwidth_usage", + Title: "PCI Express Bandwidth Usage", + Units: "B/s", + Fam: "pcie bandwidth", + Ctx: "nvidia_smi.gpu_pcie_bandwidth_usage", + Type: module.Area, + Priority: prioGPUPCIBandwidthUsage, + Dims: module.Dims{ + {ID: "gpu_%s_pcie_bandwidth_usage_rx", Name: "rx"}, + {ID: "gpu_%s_pcie_bandwidth_usage_tx", Name: "tx", Mul: -1}, + }, + } + gpuPCIBandwidthUtilizationChartTmpl = module.Chart{ + ID: "gpu_%s_pcie_bandwidth_utilization", + Title: "PCI Express Bandwidth Utilization", + Units: "percentage", + Fam: "pcie bandwidth", + Ctx: "nvidia_smi.gpu_pcie_bandwidth_utilization", + Priority: prioGPUPCIBandwidthUtilization, + Dims: module.Dims{ + {ID: "gpu_%s_pcie_bandwidth_utilization_rx", Name: "rx", Div: 100}, + {ID: "gpu_%s_pcie_bandwidth_utilization_tx", Name: "tx", Div: 100}, + }, + } + gpuFanSpeedPercChartTmpl = module.Chart{ + ID: "gpu_%s_fan_speed_perc", + Title: "Fan speed", + Units: "%", + Fam: "fan speed", + Ctx: "nvidia_smi.gpu_fan_speed_perc", + Priority: prioGPUFanSpeed, + Dims: module.Dims{ + {ID: "gpu_%s_fan_speed_perc", Name: "fan_speed"}, + }, + } + gpuUtilizationChartTmpl = module.Chart{ + ID: "gpu_%s_gpu_utilization", + Title: "GPU utilization", + Units: "%", + Fam: "gpu utilization", + Ctx: "nvidia_smi.gpu_utilization", + Priority: prioGPUUtilization, + Dims: module.Dims{ + {ID: "gpu_%s_gpu_utilization", Name: "gpu"}, + }, + } + gpuMemUtilizationChartTmpl = module.Chart{ + ID: "gpu_%s_memory_utilization", + Title: "Memory utilization", + Units: "%", + Fam: "mem utilization", + Ctx: "nvidia_smi.gpu_memory_utilization", + Priority: prioGPUMemUtilization, + Dims: module.Dims{ + {ID: "gpu_%s_mem_utilization", Name: "memory"}, + }, + } + gpuDecoderUtilizationChartTmpl = module.Chart{ + ID: "gpu_%s_decoder_utilization", + Title: "Decoder utilization", + Units: "%", + Fam: "dec utilization", + Ctx: "nvidia_smi.gpu_decoder_utilization", + Priority: prioGPUDecoderUtilization, + Dims: module.Dims{ + {ID: "gpu_%s_decoder_utilization", Name: "decoder"}, + }, + } + gpuEncoderUtilizationChartTmpl = module.Chart{ + ID: "gpu_%s_encoder_utilization", + Title: "Encoder utilization", + Units: "%", + Fam: "enc utilization", + Ctx: "nvidia_smi.gpu_encoder_utilization", + Priority: prioGPUEncoderUtilization, + Dims: module.Dims{ + {ID: "gpu_%s_encoder_utilization", Name: "encoder"}, + }, + } + gpuMIGModeCurrentStatusChartTmpl = module.Chart{ + ID: "gpu_%s_mig_mode_current_status", + Title: "MIG current mode", + Units: "status", + Fam: "mig", + Ctx: "nvidia_smi.gpu_mig_mode_current_status", + Priority: prioGPUMIGModeStatus, + Dims: module.Dims{ + {ID: "gpu_%s_mig_current_mode_enabled", Name: "enabled"}, + {ID: "gpu_%s_mig_current_mode_disabled", Name: "disabled"}, + }, + } + gpuMIGDevicesCountChartTmpl = module.Chart{ + ID: "gpu_%s_mig_devices_count", + Title: "MIG devices", + Units: "devices", + Fam: "mig", + Ctx: "nvidia_smi.gpu_mig_devices_count", + Priority: prioGPUMIGDevicesCount, + Dims: module.Dims{ + {ID: "gpu_%s_mig_devices_count", Name: "mig"}, + }, + } + gpuFrameBufferMemoryUsageChartTmpl = module.Chart{ + ID: "gpu_%s_frame_buffer_memory_usage", + Title: "Frame buffer memory usage", + Units: "B", + Fam: "fb mem usage", + Ctx: "nvidia_smi.gpu_frame_buffer_memory_usage", + Type: module.Stacked, + Priority: prioGPUFBMemoryUsage, + Dims: module.Dims{ + {ID: "gpu_%s_frame_buffer_memory_usage_free", Name: "free"}, + {ID: "gpu_%s_frame_buffer_memory_usage_used", Name: "used"}, + {ID: "gpu_%s_frame_buffer_memory_usage_reserved", Name: "reserved"}, + }, + } + gpuBAR1MemoryUsageChartTmpl = module.Chart{ + ID: "gpu_%s_bar1_memory_usage", + Title: "BAR1 memory usage", + Units: "B", + Fam: "bar1 mem usage", + Ctx: "nvidia_smi.gpu_bar1_memory_usage", + Type: module.Stacked, + Priority: prioGPUBAR1MemoryUsage, + Dims: module.Dims{ + {ID: "gpu_%s_bar1_memory_usage_free", Name: "free"}, + {ID: "gpu_%s_bar1_memory_usage_used", Name: "used"}, + }, + } + gpuTemperatureChartTmpl = module.Chart{ + ID: "gpu_%s_temperature", + Title: "Temperature", + Units: "Celsius", + Fam: "temperature", + Ctx: "nvidia_smi.gpu_temperature", + Priority: prioGPUTemperatureChart, + Dims: module.Dims{ + {ID: "gpu_%s_temperature", Name: "temperature"}, + }, + } + gpuVoltageChartTmpl = module.Chart{ + ID: "gpu_%s_voltage", + Title: "Voltage", + Units: "V", + Fam: "voltage", + Ctx: "nvidia_smi.gpu_voltage", + Priority: prioGPUVoltageChart, + Dims: module.Dims{ + {ID: "gpu_%s_voltage", Name: "voltage", Div: 1000}, // mV => V + }, + } + gpuClockFreqChartTmpl = module.Chart{ + ID: "gpu_%s_clock_freq", + Title: "Clock current frequency", + Units: "MHz", + Fam: "clocks", + Ctx: "nvidia_smi.gpu_clock_freq", + Priority: prioGPUClockFreq, + Dims: module.Dims{ + {ID: "gpu_%s_graphics_clock", Name: "graphics"}, + {ID: "gpu_%s_video_clock", Name: "video"}, + {ID: "gpu_%s_sm_clock", Name: "sm"}, + {ID: "gpu_%s_mem_clock", Name: "mem"}, + }, + } + gpuPowerDrawChartTmpl = module.Chart{ + ID: "gpu_%s_power_draw", + Title: "Power draw", + Units: "Watts", + Fam: "power draw", + Ctx: "nvidia_smi.gpu_power_draw", + Priority: prioGPUPowerDraw, + Dims: module.Dims{ + {ID: "gpu_%s_power_draw", Name: "power_draw"}, + }, + } + gpuPerformanceStateChartTmpl = module.Chart{ + ID: "gpu_%s_performance_state", + Title: "Performance state", + Units: "state", + Fam: "performance state", + Ctx: "nvidia_smi.gpu_performance_state", + Priority: prioGPUPerformanceState, + Dims: module.Dims{ + {ID: "gpu_%s_performance_state_P0", Name: "P0"}, + {ID: "gpu_%s_performance_state_P1", Name: "P1"}, + {ID: "gpu_%s_performance_state_P2", Name: "P2"}, + {ID: "gpu_%s_performance_state_P3", Name: "P3"}, + {ID: "gpu_%s_performance_state_P4", Name: "P4"}, + {ID: "gpu_%s_performance_state_P5", Name: "P5"}, + {ID: "gpu_%s_performance_state_P6", Name: "P6"}, + {ID: "gpu_%s_performance_state_P7", Name: "P7"}, + {ID: "gpu_%s_performance_state_P8", Name: "P8"}, + {ID: "gpu_%s_performance_state_P9", Name: "P9"}, + {ID: "gpu_%s_performance_state_P10", Name: "P10"}, + {ID: "gpu_%s_performance_state_P11", Name: "P11"}, + {ID: "gpu_%s_performance_state_P12", Name: "P12"}, + {ID: "gpu_%s_performance_state_P13", Name: "P13"}, + {ID: "gpu_%s_performance_state_P14", Name: "P14"}, + {ID: "gpu_%s_performance_state_P15", Name: "P15"}, + }, + } +) + +func (nv *NvidiaSMI) addGPUXMLCharts(gpu xmlGPUInfo) { + charts := gpuXMLCharts.Copy() + + if !isValidValue(gpu.Utilization.GpuUtil) { + _ = charts.Remove(gpuUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.Utilization.MemoryUtil) { + _ = charts.Remove(gpuMemUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.Utilization.DecoderUtil) { + _ = charts.Remove(gpuDecoderUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.Utilization.EncoderUtil) { + _ = charts.Remove(gpuEncoderUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.MIGMode.CurrentMIG) { + _ = charts.Remove(gpuMIGModeCurrentStatusChartTmpl.ID) + _ = charts.Remove(gpuMIGDevicesCountChartTmpl.ID) + } + if !isValidValue(gpu.FanSpeed) { + _ = charts.Remove(gpuFanSpeedPercChartTmpl.ID) + } + if (gpu.PowerReadings == nil || !isValidValue(gpu.PowerReadings.PowerDraw)) && + (gpu.GPUPowerReadings == nil || !isValidValue(gpu.GPUPowerReadings.PowerDraw)) { + _ = charts.Remove(gpuPowerDrawChartTmpl.ID) + } + if !isValidValue(gpu.Voltage.GraphicsVolt) { + _ = charts.Remove(gpuVoltageChartTmpl.ID) + } + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, strings.ToLower(gpu.UUID)) + c.Labels = []module.Label{ + // csv output has no 'product_brand' + {Key: "uuid", Value: gpu.UUID}, + {Key: "product_name", Value: gpu.ProductName}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, gpu.UUID) + } + } + + if err := nv.Charts().Add(*charts...); err != nil { + nv.Warning(err) + } +} + +func (nv *NvidiaSMI) addGPUCSVCharts(gpu csvGPUInfo) { + charts := gpuCSVCharts.Copy() + + if !isValidValue(gpu.utilizationGPU) { + _ = charts.Remove(gpuUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.utilizationMemory) { + _ = charts.Remove(gpuMemUtilizationChartTmpl.ID) + } + if !isValidValue(gpu.fanSpeed) { + _ = charts.Remove(gpuFanSpeedPercChartTmpl.ID) + } + if !isValidValue(gpu.powerDraw) { + _ = charts.Remove(gpuPowerDrawChartTmpl.ID) + } + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, strings.ToLower(gpu.uuid)) + c.Labels = []module.Label{ + {Key: "product_name", Value: gpu.name}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, gpu.uuid) + } + } + + if err := nv.Charts().Add(*charts...); err != nil { + nv.Warning(err) + } +} + +var ( + migDeviceFrameBufferMemoryUsageChartTmpl = module.Chart{ + ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage", + Title: "MIG Frame buffer memory usage", + Units: "B", + Fam: "fb mem usage", + Ctx: "nvidia_smi.gpu_mig_frame_buffer_memory_usage", + Type: module.Stacked, + Priority: prioGPUMIGFBMemoryUsage, + Dims: module.Dims{ + {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_free", Name: "free"}, + {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_used", Name: "used"}, + {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_reserved", Name: "reserved"}, + }, + } + migDeviceBAR1MemoryUsageChartTmpl = module.Chart{ + ID: "mig_instance_%s_gpu_%s_bar1_memory_usage", + Title: "MIG BAR1 memory usage", + Units: "B", + Fam: "bar1 mem usage", + Ctx: "nvidia_smi.gpu_mig_bar1_memory_usage", + Type: module.Stacked, + Priority: prioGPUMIGBAR1MemoryUsage, + Dims: module.Dims{ + {ID: "mig_instance_%s_gpu_%s_bar1_memory_usage_free", Name: "free"}, + {ID: "mig_instance_%s_gpu_%s_bar1_memory_usage_used", Name: "used"}, + }, + } +) + +func (nv *NvidiaSMI) addMIGDeviceXMLCharts(gpu xmlGPUInfo, mig xmlMIGDeviceInfo) { + charts := migDeviceXMLCharts.Copy() + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, strings.ToLower(mig.GPUInstanceID), strings.ToLower(gpu.UUID)) + c.Labels = []module.Label{ + {Key: "gpu_uuid", Value: gpu.UUID}, + {Key: "gpu_product_name", Value: gpu.ProductName}, + {Key: "gpu_instance_id", Value: mig.GPUInstanceID}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, mig.GPUInstanceID, gpu.UUID) + } + } + + if err := nv.Charts().Add(*charts...); err != nil { + nv.Warning(err) + } +} + +func (nv *NvidiaSMI) removeCharts(prefix string) { + prefix = strings.ToLower(prefix) + + for _, c := range *nv.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect.go new file mode 100644 index 00000000000000..0830b54a361525 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "errors" + "strconv" + "strings" +) + +func (nv *NvidiaSMI) collect() (map[string]int64, error) { + if nv.exec == nil { + return nil, errors.New("nvidia-smi exec is not initialized") + } + + mx := make(map[string]int64) + + if err := nv.collectGPUInfo(mx); err != nil { + return nil, err + } + + return mx, nil +} + +func (nv *NvidiaSMI) collectGPUInfo(mx map[string]int64) error { + if nv.UseCSVFormat { + return nv.collectGPUInfoCSV(mx) + } + return nv.collectGPUInfoXML(mx) +} + +func addMetric(mx map[string]int64, key, value string, mul int) { + if !isValidValue(value) { + return + } + + value = removeUnits(value) + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return + } + + if mul > 0 { + v *= float64(mul) + } + + mx[key] = int64(v) +} + +func isValidValue(v string) bool { + return v != "" && v != "N/A" && v != "[N/A]" +} + +func parseFloat(s string) float64 { + v, _ := strconv.ParseFloat(removeUnits(s), 64) + return v +} + +func removeUnits(s string) string { + if i := strings.IndexByte(s, ' '); i != -1 { + s = s[:i] + } + return s +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_csv.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_csv.go new file mode 100644 index 00000000000000..2584aaffe37ae3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_csv.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "bufio" + "bytes" + "encoding/csv" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" +) + +// use of property aliases is not implemented ('"<property>" or "<alias>"' in help-query-gpu) +var knownProperties = map[string]bool{ + "uuid": true, + "name": true, + "fan.speed": true, + "pstate": true, + "utilization.gpu": true, + "utilization.memory": true, + "memory.used": true, + "memory.free": true, + "memory.reserved": true, + "temperature.gpu": true, + "clocks.current.graphics": true, + "clocks.current.video": true, + "clocks.current.sm": true, + "clocks.current.memory": true, + "power.draw": true, +} + +var reHelpProperty = regexp.MustCompile(`"([a-zA-Z_.]+)"`) + +func (nv *NvidiaSMI) collectGPUInfoCSV(mx map[string]int64) error { + if len(nv.gpuQueryProperties) == 0 { + bs, err := nv.exec.queryHelpQueryGPU() + if err != nil { + return err + } + + sc := bufio.NewScanner(bytes.NewBuffer(bs)) + + for sc.Scan() { + if !strings.HasPrefix(sc.Text(), "\"") { + continue + } + matches := reHelpProperty.FindAllString(sc.Text(), -1) + if len(matches) == 0 { + continue + } + for _, v := range matches { + if v = strings.Trim(v, "\""); knownProperties[v] { + nv.gpuQueryProperties = append(nv.gpuQueryProperties, v) + } + } + } + nv.Debugf("found query GPU properties: %v", nv.gpuQueryProperties) + } + + bs, err := nv.exec.queryGPUInfoCSV(nv.gpuQueryProperties) + if err != nil { + return err + } + + nv.Debugf("GPU info:\n%s", bs) + + r := csv.NewReader(bytes.NewBuffer(bs)) + r.Comma = ',' + r.ReuseRecord = true + r.TrimLeadingSpace = true + + // skip headers + if _, err := r.Read(); err != nil && err != io.EOF { + return err + } + + var gpusInfo []csvGPUInfo + for { + record, err := r.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + if len(record) != len(nv.gpuQueryProperties) { + return fmt.Errorf("record values (%d) != queried properties (%d)", len(record), len(nv.gpuQueryProperties)) + } + + var gpu csvGPUInfo + for i, v := range record { + switch nv.gpuQueryProperties[i] { + case "uuid": + gpu.uuid = v + case "name": + gpu.name = v + case "fan.speed": + gpu.fanSpeed = v + case "pstate": + gpu.pstate = v + case "utilization.gpu": + gpu.utilizationGPU = v + case "utilization.memory": + gpu.utilizationMemory = v + case "memory.used": + gpu.memoryUsed = v + case "memory.free": + gpu.memoryFree = v + case "memory.reserved": + gpu.memoryReserved = v + case "temperature.gpu": + gpu.temperatureGPU = v + case "clocks.current.graphics": + gpu.clocksCurrentGraphics = v + case "clocks.current.video": + gpu.clocksCurrentVideo = v + case "clocks.current.sm": + gpu.clocksCurrentSM = v + case "clocks.current.memory": + gpu.clocksCurrentMemory = v + case "power.draw": + gpu.powerDraw = v + } + } + gpusInfo = append(gpusInfo, gpu) + } + + seen := make(map[string]bool) + + for _, gpu := range gpusInfo { + if !isValidValue(gpu.uuid) || !isValidValue(gpu.name) { + continue + } + + px := "gpu_" + gpu.uuid + "_" + + seen[px] = true + + if !nv.gpus[px] { + nv.gpus[px] = true + nv.addGPUCSVCharts(gpu) + } + + addMetric(mx, px+"fan_speed_perc", gpu.fanSpeed, 0) + addMetric(mx, px+"gpu_utilization", gpu.utilizationGPU, 0) + addMetric(mx, px+"mem_utilization", gpu.utilizationMemory, 0) + addMetric(mx, px+"frame_buffer_memory_usage_free", gpu.memoryFree, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_used", gpu.memoryUsed, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_reserved", gpu.memoryReserved, 1024*1024) // MiB => bytes + addMetric(mx, px+"temperature", gpu.temperatureGPU, 0) + addMetric(mx, px+"graphics_clock", gpu.clocksCurrentGraphics, 0) + addMetric(mx, px+"video_clock", gpu.clocksCurrentVideo, 0) + addMetric(mx, px+"sm_clock", gpu.clocksCurrentSM, 0) + addMetric(mx, px+"mem_clock", gpu.clocksCurrentMemory, 0) + addMetric(mx, px+"power_draw", gpu.powerDraw, 0) + for i := 0; i < 16; i++ { + if s := "P" + strconv.Itoa(i); gpu.pstate == s { + mx[px+"performance_state_"+s] = 1 + } else { + mx[px+"performance_state_"+s] = 0 + } + } + } + + for px := range nv.gpus { + if !seen[px] { + delete(nv.gpus, px) + nv.removeCharts(px) + } + } + + return nil +} + +type ( + csvGPUInfo struct { + uuid string + name string + fanSpeed string + pstate string + utilizationGPU string + utilizationMemory string + memoryUsed string + memoryFree string + memoryReserved string + temperatureGPU string + clocksCurrentGraphics string + clocksCurrentVideo string + clocksCurrentSM string + clocksCurrentMemory string + powerDraw string + } +) diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_xml.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_xml.go new file mode 100644 index 00000000000000..41713f2444dca3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/collect_xml.go @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "encoding/xml" + "fmt" + "strconv" + "strings" +) + +func (nv *NvidiaSMI) collectGPUInfoXML(mx map[string]int64) error { + bs, err := nv.exec.queryGPUInfoXML() + if err != nil { + return fmt.Errorf("error on quering XML GPU info: %v", err) + } + + info := &xmlInfo{} + if err := xml.Unmarshal(bs, info); err != nil { + return fmt.Errorf("error on unmarshaling XML GPU info response: %v", err) + } + + seenGPU := make(map[string]bool) + seenMIG := make(map[string]bool) + + for _, gpu := range info.GPUs { + if !isValidValue(gpu.UUID) { + continue + } + + px := "gpu_" + gpu.UUID + "_" + + seenGPU[px] = true + + if !nv.gpus[px] { + nv.gpus[px] = true + nv.addGPUXMLCharts(gpu) + } + + addMetric(mx, px+"pcie_bandwidth_usage_rx", gpu.PCI.RxUtil, 1024) // KB => bytes + addMetric(mx, px+"pcie_bandwidth_usage_tx", gpu.PCI.TxUtil, 1024) // KB => bytes + if max := calcMaxPCIEBandwidth(gpu); max > 0 { + rx := parseFloat(gpu.PCI.RxUtil) * 1024 // KB => bytes + tx := parseFloat(gpu.PCI.TxUtil) * 1024 // KB => bytes + mx[px+"pcie_bandwidth_utilization_rx"] = int64((rx * 100 / max) * 100) + mx[px+"pcie_bandwidth_utilization_tx"] = int64((tx * 100 / max) * 100) + } + addMetric(mx, px+"fan_speed_perc", gpu.FanSpeed, 0) + addMetric(mx, px+"gpu_utilization", gpu.Utilization.GpuUtil, 0) + addMetric(mx, px+"mem_utilization", gpu.Utilization.MemoryUtil, 0) + addMetric(mx, px+"decoder_utilization", gpu.Utilization.DecoderUtil, 0) + addMetric(mx, px+"encoder_utilization", gpu.Utilization.EncoderUtil, 0) + addMetric(mx, px+"frame_buffer_memory_usage_free", gpu.FBMemoryUsage.Free, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_used", gpu.FBMemoryUsage.Used, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_reserved", gpu.FBMemoryUsage.Reserved, 1024*1024) // MiB => bytes + addMetric(mx, px+"bar1_memory_usage_free", gpu.Bar1MemoryUsage.Free, 1024*1024) // MiB => bytes + addMetric(mx, px+"bar1_memory_usage_used", gpu.Bar1MemoryUsage.Used, 1024*1024) // MiB => bytes + addMetric(mx, px+"temperature", gpu.Temperature.GpuTemp, 0) + addMetric(mx, px+"graphics_clock", gpu.Clocks.GraphicsClock, 0) + addMetric(mx, px+"video_clock", gpu.Clocks.VideoClock, 0) + addMetric(mx, px+"sm_clock", gpu.Clocks.SmClock, 0) + addMetric(mx, px+"mem_clock", gpu.Clocks.MemClock, 0) + if gpu.PowerReadings != nil { + addMetric(mx, px+"power_draw", gpu.PowerReadings.PowerDraw, 0) + } else if gpu.GPUPowerReadings != nil { + addMetric(mx, px+"power_draw", gpu.GPUPowerReadings.PowerDraw, 0) + } + addMetric(mx, px+"voltage", gpu.Voltage.GraphicsVolt, 0) + for i := 0; i < 16; i++ { + s := "P" + strconv.Itoa(i) + mx[px+"performance_state_"+s] = boolToInt(gpu.PerformanceState == s) + } + if isValidValue(gpu.MIGMode.CurrentMIG) { + mode := strings.ToLower(gpu.MIGMode.CurrentMIG) + mx[px+"mig_current_mode_enabled"] = boolToInt(mode == "enabled") + mx[px+"mig_current_mode_disabled"] = boolToInt(mode == "disabled") + mx[px+"mig_devices_count"] = int64(len(gpu.MIGDevices.MIGDevice)) + } + + for _, mig := range gpu.MIGDevices.MIGDevice { + if !isValidValue(mig.GPUInstanceID) { + continue + } + + px := "mig_instance_" + mig.GPUInstanceID + "_" + px + + seenMIG[px] = true + + if !nv.migs[px] { + nv.migs[px] = true + nv.addMIGDeviceXMLCharts(gpu, mig) + } + + addMetric(mx, px+"ecc_error_sram_uncorrectable", mig.ECCErrorCount.VolatileCount.SRAMUncorrectable, 0) + addMetric(mx, px+"frame_buffer_memory_usage_free", mig.FBMemoryUsage.Free, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_used", mig.FBMemoryUsage.Used, 1024*1024) // MiB => bytes + addMetric(mx, px+"frame_buffer_memory_usage_reserved", mig.FBMemoryUsage.Reserved, 1024*1024) // MiB => bytes + addMetric(mx, px+"bar1_memory_usage_free", mig.BAR1MemoryUsage.Free, 1024*1024) // MiB => bytes + addMetric(mx, px+"bar1_memory_usage_used", mig.BAR1MemoryUsage.Used, 1024*1024) // MiB => bytes + } + } + + for px := range nv.gpus { + if !seenGPU[px] { + delete(nv.gpus, px) + nv.removeCharts(px) + } + } + + for px := range nv.migs { + if !seenMIG[px] { + delete(nv.migs, px) + nv.removeCharts(px) + } + } + + return nil +} + +func calcMaxPCIEBandwidth(gpu xmlGPUInfo) float64 { + gen := gpu.PCI.PCIGPULinkInfo.PCIEGen.MaxLinkGen + width := strings.TrimSuffix(gpu.PCI.PCIGPULinkInfo.LinkWidths.MaxLinkWidth, "x") + + if !isValidValue(gen) || !isValidValue(width) { + return 0 + } + + // https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance + var speed, enc float64 + switch gen { + case "1": + speed, enc = 2.5, 1/5 + case "2": + speed, enc = 5, 1/5 + case "3": + speed, enc = 8, 2/130 + case "4": + speed, enc = 16, 2/130 + case "5": + speed, enc = 32, 2/130 + default: + return 0 + } + + // Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s + return (speed*parseFloat(width)*(1-enc) - 1) * 1e9 / 8 // Gb/s => bytes +} + +type ( + xmlInfo struct { + GPUs []xmlGPUInfo `xml:"gpu"` + } + xmlGPUInfo struct { + ID string `xml:"id,attr"` + ProductName string `xml:"product_name"` + ProductBrand string `xml:"product_brand"` + ProductArchitecture string `xml:"product_architecture"` + UUID string `xml:"uuid"` + FanSpeed string `xml:"fan_speed"` + PerformanceState string `xml:"performance_state"` + MIGMode struct { + CurrentMIG string `xml:"current_mig"` + } `xml:"mig_mode"` + MIGDevices struct { + MIGDevice []xmlMIGDeviceInfo `xml:"mig_device"` + } `xml:"mig_devices"` + PCI struct { + TxUtil string `xml:"tx_util"` + RxUtil string `xml:"rx_util"` + PCIGPULinkInfo struct { + PCIEGen struct { + MaxLinkGen string `xml:"max_link_gen"` + } `xml:"pcie_gen"` + LinkWidths struct { + MaxLinkWidth string `xml:"max_link_width"` + } `xml:"link_widths"` + } `xml:"pci_gpu_link_info"` + } `xml:"pci"` + Utilization struct { + GpuUtil string `xml:"gpu_util"` + MemoryUtil string `xml:"memory_util"` + EncoderUtil string `xml:"encoder_util"` + DecoderUtil string `xml:"decoder_util"` + } `xml:"utilization"` + FBMemoryUsage struct { + Total string `xml:"total"` + Reserved string `xml:"reserved"` + Used string `xml:"used"` + Free string `xml:"free"` + } `xml:"fb_memory_usage"` + Bar1MemoryUsage struct { + Total string `xml:"total"` + Used string `xml:"used"` + Free string `xml:"free"` + } `xml:"bar1_memory_usage"` + Temperature struct { + GpuTemp string `xml:"gpu_temp"` + GpuTempMaxThreshold string `xml:"gpu_temp_max_threshold"` + GpuTempSlowThreshold string `xml:"gpu_temp_slow_threshold"` + GpuTempMaxGpuThreshold string `xml:"gpu_temp_max_gpu_threshold"` + GpuTargetTemperature string `xml:"gpu_target_temperature"` + MemoryTemp string `xml:"memory_temp"` + GpuTempMaxMemThreshold string `xml:"gpu_temp_max_mem_threshold"` + } `xml:"temperature"` + Clocks struct { + GraphicsClock string `xml:"graphics_clock"` + SmClock string `xml:"sm_clock"` + MemClock string `xml:"mem_clock"` + VideoClock string `xml:"video_clock"` + } `xml:"clocks"` + PowerReadings *xmlPowerReadings `xml:"power_readings"` + GPUPowerReadings *xmlPowerReadings `xml:"gpu_power_readings"` + Voltage struct { + GraphicsVolt string `xml:"graphics_volt"` + } `xml:"voltage"` + Processes struct { + ProcessInfo []struct { + PID string `xml:"pid"` + ProcessName string `xml:"process_name"` + UsedMemory string `xml:"used_memory"` + } `sml:"process_info"` + } `xml:"processes"` + } + + xmlPowerReadings struct { + //PowerState string `xml:"power_state"` + //PowerManagement string `xml:"power_management"` + PowerDraw string `xml:"power_draw"` + //PowerLimit string `xml:"power_limit"` + //DefaultPowerLimit string `xml:"default_power_limit"` + //EnforcedPowerLimit string `xml:"enforced_power_limit"` + //MinPowerLimit string `xml:"min_power_limit"` + //MaxPowerLimit string `xml:"max_power_limit"` + } + + xmlMIGDeviceInfo struct { + Index string `xml:"index"` + GPUInstanceID string `xml:"gpu_instance_id"` + ComputeInstanceID string `xml:"compute_instance_id"` + DeviceAttributes struct { + Shared struct { + MultiprocessorCount string `xml:"multiprocessor_count"` + CopyEngineCount string `xml:"copy_engine_count"` + EncoderCount string `xml:"encoder_count"` + DecoderCount string `xml:"decoder_count"` + OFACount string `xml:"ofa_count"` + JPGCount string `xml:"jpg_count"` + } `xml:"shared"` + } `xml:"device_attributes"` + ECCErrorCount struct { + VolatileCount struct { + SRAMUncorrectable string `xml:"sram_uncorrectable"` + } `xml:"volatile_count"` + } `xml:"ecc_error_count"` + FBMemoryUsage struct { + Free string `xml:"free"` + Used string `xml:"used"` + Reserved string `xml:"reserved"` + } `xml:"fb_memory_usage"` + BAR1MemoryUsage struct { + Free string `xml:"free"` + Used string `xml:"used"` + } `xml:"bar1_memory_usage"` + } +) diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/config_schema.json b/src/go/collectors/go.d.plugin/modules/nvidia_smi/config_schema.json new file mode 100644 index 00000000000000..fc5b38e085b9c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/config_schema.json @@ -0,0 +1,25 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvidia_smi job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + }, + "use_csv_format": { + "type": "boolean" + } + }, + "required": [ + "name" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/exec.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/exec.go new file mode 100644 index 00000000000000..93e23057b5e25b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/exec.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/netdata/go.d.plugin/logger" +) + +func newNvidiaSMIExec(path string, cfg Config, log *logger.Logger) (*nvidiaSMIExec, error) { + return &nvidiaSMIExec{ + binPath: path, + timeout: cfg.Timeout.Duration, + Logger: log, + }, nil +} + +type nvidiaSMIExec struct { + binPath string + timeout time.Duration + *logger.Logger +} + +func (e *nvidiaSMIExec) queryGPUInfoXML() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.binPath, "-q", "-x") + + e.Debugf("executing '%s'", cmd) + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} + +func (e *nvidiaSMIExec) queryGPUInfoCSV(properties []string) ([]byte, error) { + if len(properties) == 0 { + return nil, errors.New("can not query CSV GPU Info without properties") + } + + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.binPath, "--query-gpu="+strings.Join(properties, ","), "--format=csv,nounits") + + e.Debugf("executing '%s'", cmd) + + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, nil +} + +func (e *nvidiaSMIExec) queryHelpQueryGPU() ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), e.timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, e.binPath, "--help-query-gpu") + + e.Debugf("executing '%s'", cmd) + bs, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("error on '%s': %v", cmd, err) + } + + return bs, err +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/init.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/init.go new file mode 100644 index 00000000000000..d8a815bb44327e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/init.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "fmt" + "os" + "os/exec" +) + +func (nv *NvidiaSMI) initNvidiaSMIExec() (nvidiaSMI, error) { + binPath := nv.BinaryPath + if _, err := os.Stat(binPath); os.IsNotExist(err) { + path, err := exec.LookPath(nv.binName) + if err != nil { + return nil, fmt.Errorf("error on lookup '%s': %v", nv.binName, err) + } + binPath = path + } + + return newNvidiaSMIExec(binPath, nv.Config, nv.Logger) +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/integrations/nvidia_gpu.md b/src/go/collectors/go.d.plugin/modules/nvidia_smi/integrations/nvidia_gpu.md new file mode 100644 index 00000000000000..0b49d21d243e05 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/integrations/nvidia_gpu.md @@ -0,0 +1,217 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/nvidia_smi/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/nvidia_smi/metadata.yaml" +sidebar_label: "Nvidia GPU" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Nvidia GPU + + +<img src="https://netdata.cloud/img/nvidia.svg" width="150"/> + + +Plugin: go.d.plugin +Module: nvidia_smi + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors GPUs performance metrics using +the [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool. + +> **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per gpu + +These metrics refer to the GPU. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| uuid | GPU id (e.g. 00000000:00:04.0) | +| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) | + +Metrics: + +| Metric | Dimensions | Unit | XML | CSV | +|:------|:----------|:----|:---:|:---:| +| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s | • | | +| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % | • | | +| nvidia_smi.gpu_fan_speed_perc | fan_speed | % | • | • | +| nvidia_smi.gpu_utilization | gpu | % | • | • | +| nvidia_smi.gpu_memory_utilization | memory | % | • | • | +| nvidia_smi.gpu_decoder_utilization | decoder | % | • | | +| nvidia_smi.gpu_encoder_utilization | encoder | % | • | | +| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B | • | • | +| nvidia_smi.gpu_bar1_memory_usage | free, used | B | • | | +| nvidia_smi.gpu_temperature | temperature | Celsius | • | • | +| nvidia_smi.gpu_voltage | voltage | V | • | | +| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz | • | • | +| nvidia_smi.gpu_power_draw | power_draw | Watts | • | • | +| nvidia_smi.gpu_performance_state | P0-P15 | state | • | • | +| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status | • | | +| nvidia_smi.gpu_mig_devices_count | mig | devices | • | | + +### Per mig + +These metrics refer to the Multi-Instance GPU (MIG). + +Labels: + +| Label | Description | +|:-----------|:----------------| +| uuid | GPU id (e.g. 00000000:00:04.0) | +| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) | +| gpu_instance_id | GPU instance id (e.g. 1) | + +Metrics: + +| Metric | Dimensions | Unit | XML | CSV | +|:------|:----------|:----|:---:|:---:| +| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B | • | | +| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B | • | | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable in go.d.conf. + +This collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nvidia_smi.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nvidia_smi.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| binary_path | Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no | +| timeout | nvidia_smi binary execution timeout. | 2 | no | +| use_csv_format | Used format when requesting GPU information. XML is used if set to 'no'. | yes | no | + +</details> + +#### Examples + +##### XML format + +Use XML format when requesting GPU information. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: nvidia_smi + use_csv_format: no + +``` +</details> + +##### Custom binary path + +The executable is not in the directories specified in the PATH environment variable. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: nvidia_smi + binary_path: /usr/local/sbin/nvidia_smi + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nvidia_smi + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/metadata.yaml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/metadata.yaml new file mode 100644 index 00000000000000..e18370bafd97bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/metadata.yaml @@ -0,0 +1,296 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nvidia_smi + plugin_name: go.d.plugin + module_name: nvidia_smi + monitored_instance: + name: Nvidia GPU + link: https://www.nvidia.com/en-us/ + icon_filename: nvidia.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: + - nvidia + - gpu + - hardware + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors GPUs performance metrics using + the [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool. + + > **Warning**: under development, [loop mode](https://github.com/netdata/netdata/issues/14522) not implemented yet. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable in go.d.conf. + description: | + This collector is disabled by default. You need to explicitly enable it in the `go.d.conf` file. + configuration: + file: + name: go.d/nvidia_smi.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: binary_path + description: Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. + default_value: nvidia_smi + required: false + - name: timeout + description: nvidia_smi binary execution timeout. + default_value: 2 + required: false + - name: use_csv_format + description: Used format when requesting GPU information. XML is used if set to 'no'. + default_value: true + required: false + details: | + This module supports data collection in CSV and XML formats. The default is CSV. + + - XML provides more metrics, but requesting GPU information consumes more CPU, especially if there are multiple GPUs in the system. + - CSV provides fewer metrics, but is much lighter than XML in terms of CPU usage. + examples: + folding: + title: Config + enabled: true + list: + - name: XML format + description: Use XML format when requesting GPU information. + config: | + jobs: + - name: nvidia_smi + use_csv_format: no + - name: Custom binary path + description: The executable is not in the directories specified in the PATH environment variable. + config: | + jobs: + - name: nvidia_smi + binary_path: /usr/local/sbin/nvidia_smi + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: + - XML + - CSV + scopes: + - name: gpu + description: These metrics refer to the GPU. + labels: + - name: uuid + description: GPU id (e.g. 00000000:00:04.0) + - name: product_name + description: GPU product name (e.g. NVIDIA A100-SXM4-40GB) + metrics: + - name: nvidia_smi.gpu_pcie_bandwidth_usage + availability: + - XML + description: PCI Express Bandwidth Usage + unit: B/s + chart_type: line + dimensions: + - name: rx + - name: tx + - name: nvidia_smi.gpu_pcie_bandwidth_utilization + availability: + - XML + description: PCI Express Bandwidth Utilization + unit: '%' + chart_type: line + dimensions: + - name: rx + - name: tx + - name: nvidia_smi.gpu_fan_speed_perc + availability: + - XML + - CSV + description: Fan speed + unit: '%' + chart_type: line + dimensions: + - name: fan_speed + - name: nvidia_smi.gpu_utilization + availability: + - XML + - CSV + description: GPU utilization + unit: '%' + chart_type: line + dimensions: + - name: gpu + - name: nvidia_smi.gpu_memory_utilization + availability: + - XML + - CSV + description: Memory utilization + unit: '%' + chart_type: line + dimensions: + - name: memory + - name: nvidia_smi.gpu_decoder_utilization + availability: + - XML + description: Decoder utilization + unit: '%' + chart_type: line + dimensions: + - name: decoder + - name: nvidia_smi.gpu_encoder_utilization + availability: + - XML + description: Encoder utilization + unit: '%' + chart_type: line + dimensions: + - name: encoder + - name: nvidia_smi.gpu_frame_buffer_memory_usage + availability: + - XML + - CSV + description: Frame buffer memory usage + unit: B + chart_type: stacked + dimensions: + - name: free + - name: used + - name: reserved + - name: nvidia_smi.gpu_bar1_memory_usage + availability: + - XML + description: BAR1 memory usage + unit: B + chart_type: stacked + dimensions: + - name: free + - name: used + - name: nvidia_smi.gpu_temperature + availability: + - XML + - CSV + description: Temperature + unit: Celsius + chart_type: line + dimensions: + - name: temperature + - name: nvidia_smi.gpu_voltage + availability: + - XML + description: Voltage + unit: V + chart_type: line + dimensions: + - name: voltage + - name: nvidia_smi.gpu_clock_freq + availability: + - XML + - CSV + description: Clock current frequency + unit: MHz + chart_type: line + dimensions: + - name: graphics + - name: video + - name: sm + - name: mem + - name: nvidia_smi.gpu_power_draw + availability: + - XML + - CSV + description: Power draw + unit: Watts + chart_type: line + dimensions: + - name: power_draw + - name: nvidia_smi.gpu_performance_state + availability: + - XML + - CSV + description: Performance state + unit: state + chart_type: line + dimensions: + - name: P0-P15 + - name: nvidia_smi.gpu_mig_mode_current_status + availability: + - XML + description: MIG current mode + unit: status + chart_type: line + dimensions: + - name: enabled + - name: disabled + - name: nvidia_smi.gpu_mig_devices_count + availability: + - XML + description: MIG devices + unit: devices + chart_type: line + dimensions: + - name: mig + - name: mig + description: These metrics refer to the Multi-Instance GPU (MIG). + labels: + - name: uuid + description: GPU id (e.g. 00000000:00:04.0) + - name: product_name + description: GPU product name (e.g. NVIDIA A100-SXM4-40GB) + - name: gpu_instance_id + description: GPU instance id (e.g. 1) + metrics: + - name: nvidia_smi.gpu_mig_frame_buffer_memory_usage + availability: + - XML + description: Frame buffer memory usage + unit: B + chart_type: stacked + dimensions: + - name: free + - name: used + - name: reserved + - name: nvidia_smi.gpu_mig_bar1_memory_usage + availability: + - XML + description: BAR1 memory usage + unit: B + chart_type: stacked + dimensions: + - name: free + - name: used diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi.go new file mode 100644 index 00000000000000..1370b433563f09 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nvidia_smi", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + Disabled: true, + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *NvidiaSMI { + return &NvidiaSMI{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 10}, + UseCSVFormat: true, + }, + binName: "nvidia-smi", + charts: &module.Charts{}, + gpus: make(map[string]bool), + migs: make(map[string]bool), + } + +} + +type Config struct { + Timeout web.Duration + BinaryPath string `yaml:"binary_path"` + UseCSVFormat bool `yaml:"use_csv_format"` +} + +type ( + NvidiaSMI struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + binName string + exec nvidiaSMI + + gpuQueryProperties []string + + gpus map[string]bool + migs map[string]bool + } + nvidiaSMI interface { + queryGPUInfoXML() ([]byte, error) + queryGPUInfoCSV(properties []string) ([]byte, error) + queryHelpQueryGPU() ([]byte, error) + } +) + +func (nv *NvidiaSMI) Init() bool { + if nv.exec == nil { + smi, err := nv.initNvidiaSMIExec() + if err != nil { + nv.Error(err) + return false + } + nv.exec = smi + } + + return true +} + +func (nv *NvidiaSMI) Check() bool { + return len(nv.Collect()) > 0 +} + +func (nv *NvidiaSMI) Charts() *module.Charts { + return nv.charts +} + +func (nv *NvidiaSMI) Collect() map[string]int64 { + mx, err := nv.collect() + if err != nil { + nv.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (nv *NvidiaSMI) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi_test.go b/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi_test.go new file mode 100644 index 00000000000000..cdd7742fd46ac1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/nvidia_smi_test.go @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvidia_smi + +import ( + "errors" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataXMLRTX2080Win, _ = os.ReadFile("testdata/rtx-2080-win.xml") + dataXMLRTX4090Driver535, _ = os.ReadFile("testdata/rtx-4090-driver-535.xml") + dataXMLRTX3060, _ = os.ReadFile("testdata/rtx-3060.xml") + dataXMLTeslaP100, _ = os.ReadFile("testdata/tesla-p100.xml") + + dataXMLA100SXM4MIG, _ = os.ReadFile("testdata/a100-sxm4-mig.xml") + + dataHelpQueryGPU, _ = os.ReadFile("testdata/help-query-gpu.txt") + dataCSVTeslaP100, _ = os.ReadFile("testdata/tesla-p100.csv") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataXMLRTX2080Win": dataXMLRTX2080Win, + "dataXMLRTX4090Driver535": dataXMLRTX4090Driver535, + "dataXMLRTX3060": dataXMLRTX3060, + "dataXMLTeslaP100": dataXMLTeslaP100, + + "dataXMLA100SXM4MIG": dataXMLA100SXM4MIG, + + "dataHelpQueryGPU": dataHelpQueryGPU, + "dataCSVTeslaP100": dataCSVTeslaP100, + } { + require.NotNilf(t, data, name) + } +} + +func TestNvidiaSMI_Init(t *testing.T) { + tests := map[string]struct { + prepare func(nv *NvidiaSMI) + wantFail bool + }{ + "fails if can't local nvidia-smi": { + wantFail: true, + prepare: func(nv *NvidiaSMI) { + nv.binName += "!!!" + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nv := New() + + test.prepare(nv) + + if test.wantFail { + assert.False(t, nv.Init()) + } else { + assert.True(t, nv.Init()) + } + }) + } +} + +func TestNvidiaSMI_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestNvidiaSMI_Check(t *testing.T) { + tests := map[string]struct { + prepare func(nv *NvidiaSMI) + wantFail bool + }{ + "success A100-SXM4 MIG [XML]": { + wantFail: false, + prepare: prepareCaseMIGA100formatXML, + }, + "success RTX 3060 [XML]": { + wantFail: false, + prepare: prepareCaseRTX3060formatXML, + }, + "success Tesla P100 [XML]": { + wantFail: false, + prepare: prepareCaseTeslaP100formatXML, + }, + "success Tesla P100 [CSV]": { + wantFail: false, + prepare: prepareCaseTeslaP100formatCSV, + }, + "success RTX 2080 Win [XML]": { + wantFail: false, + prepare: prepareCaseRTX2080WinFormatXML, + }, + "fail on queryGPUInfoXML error": { + wantFail: true, + prepare: prepareCaseErrOnQueryGPUInfoXML, + }, + "fail on queryGPUInfoCSV error": { + wantFail: true, + prepare: prepareCaseErrOnQueryGPUInfoCSV, + }, + "fail on queryHelpQueryGPU error": { + wantFail: true, + prepare: prepareCaseErrOnQueryHelpQueryGPU, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nv := New() + + test.prepare(nv) + + if test.wantFail { + assert.False(t, nv.Check()) + } else { + assert.True(t, nv.Check()) + } + }) + } +} + +func TestNvidiaSMI_Collect(t *testing.T) { + type testCaseStep struct { + prepare func(nv *NvidiaSMI) + check func(t *testing.T, nv *NvidiaSMI) + } + tests := map[string][]testCaseStep{ + "success A100-SXM4 MIG [XML]": { + { + prepare: prepareCaseMIGA100formatXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 68718428160, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 1048576, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 42273341440, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 634388480, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 39845888, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_graphics_clock": 1410, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mem_clock": 1215, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_current_mode_disabled": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_current_mode_enabled": 1, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_devices_count": 2, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_usage_rx": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_usage_tx": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_utilization_rx": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_utilization_tx": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P0": 1, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P1": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P10": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P11": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P12": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P13": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P14": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P15": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P2": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P3": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P4": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P5": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P6": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P7": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P8": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P9": 0, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_power_draw": 66, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_sm_clock": 1410, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_temperature": 36, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_video_clock": 1275, + "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_voltage": 881, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 34358689792, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 0, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_ecc_error_sram_uncorrectable": 0, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 20916994048, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 0, + "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 19922944, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 34358689792, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 0, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_ecc_error_sram_uncorrectable": 0, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 20916994048, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 0, + "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 19922944, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success RTX 4090 Driver 535 [XML]": { + { + prepare: prepareCaseRTX4090Driver535formatXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_bar1_memory_usage_free": 267386880, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_bar1_memory_usage_used": 1048576, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_decoder_utilization": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_encoder_utilization": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_fan_speed_perc": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_free": 25390219264, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_reserved": 362807296, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_used": 2097152, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_gpu_utilization": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_graphics_clock": 210, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_mem_clock": 405, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_mem_utilization": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_usage_rx": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_usage_tx": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_utilization_rx": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_utilization_tx": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P0": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P1": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P10": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P11": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P12": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P13": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P14": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P15": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P2": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P3": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P4": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P5": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P6": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P7": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P8": 1, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P9": 0, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_power_draw": 26, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_sm_clock": 210, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_temperature": 40, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_video_clock": 1185, + "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_voltage": 880, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success RTX 3060 [XML]": { + { + prepare: prepareCaseRTX3060formatXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_bar1_memory_usage_free": 8586788864, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_bar1_memory_usage_used": 3145728, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_decoder_utilization": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_encoder_utilization": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_free": 6228541440, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_reserved": 206569472, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_used": 5242880, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_gpu_utilization": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_graphics_clock": 210, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_mem_clock": 405, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_mem_utilization": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_usage_rx": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_usage_tx": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_utilization_rx": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_utilization_tx": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P0": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P1": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P10": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P11": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P12": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P13": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P14": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P15": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P2": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P3": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P4": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P5": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P6": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P7": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P8": 1, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P9": 0, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_power_draw": 8, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_sm_clock": 210, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_temperature": 45, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_video_clock": 555, + "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_voltage": 631, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success Tesla P100 [XML]": { + { + prepare: prepareCaseTeslaP100formatXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_bar1_memory_usage_free": 17177772032, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_bar1_memory_usage_used": 2097152, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_decoder_utilization": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_encoder_utilization": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_free": 17070817280, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_reserved": 108003328, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_used": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_gpu_utilization": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_graphics_clock": 405, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_mem_clock": 715, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_mem_utilization": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_usage_rx": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_usage_tx": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_utilization_rx": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_utilization_tx": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P0": 1, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P1": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P10": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P11": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P12": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P13": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P14": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P15": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P2": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P3": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P4": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P5": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P6": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P7": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P8": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P9": 0, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_power_draw": 26, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_sm_clock": 405, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_temperature": 38, + "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_video_clock": 835, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success Tesla P100 [CSV]": { + { + prepare: prepareCaseTeslaP100formatCSV, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_frame_buffer_memory_usage_free": 17070817280, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_frame_buffer_memory_usage_reserved": 108003328, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_frame_buffer_memory_usage_used": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_gpu_utilization": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_graphics_clock": 405, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_mem_clock": 715, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_mem_utilization": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P0": 1, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P1": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P10": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P11": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P12": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P13": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P14": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P15": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P2": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P3": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P4": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P5": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P6": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P7": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P8": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_performance_state_P9": 0, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_power_draw": 28, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_sm_clock": 405, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_temperature": 37, + "gpu_GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6_video_clock": 835, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success RTX 2080 Win [XML]": { + { + prepare: prepareCaseRTX2080WinFormatXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + expected := map[string]int64{ + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_bar1_memory_usage_free": 266338304, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_bar1_memory_usage_used": 2097152, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_decoder_utilization": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_encoder_utilization": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_fan_speed_perc": 37, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_free": 7494172672, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_reserved": 190840832, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_used": 903872512, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_gpu_utilization": 2, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_graphics_clock": 193, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_mem_clock": 403, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_mem_utilization": 7, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_usage_rx": 93184000, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_usage_tx": 13312000, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_utilization_rx": 58, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_utilization_tx": 8, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P0": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P1": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P10": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P11": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P12": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P13": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P14": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P15": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P2": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P3": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P4": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P5": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P6": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P7": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P8": 1, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P9": 0, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_power_draw": 14, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_sm_clock": 193, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_temperature": 29, + "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_video_clock": 539, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "fail on queryGPUInfoXML error [XML]": { + { + prepare: prepareCaseErrOnQueryGPUInfoXML, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + assert.Equal(t, map[string]int64(nil), mx) + }, + }, + }, + "fail on queryGPUInfoCSV error [CSV]": { + { + prepare: prepareCaseErrOnQueryGPUInfoCSV, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + assert.Equal(t, map[string]int64(nil), mx) + }, + }, + }, + "fail on queryHelpQueryGPU error": { + { + prepare: prepareCaseErrOnQueryHelpQueryGPU, + check: func(t *testing.T, nv *NvidiaSMI) { + mx := nv.Collect() + + assert.Equal(t, map[string]int64(nil), mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nv := New() + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepare(nv) + step.check(t, nv) + }) + } + }) + } +} + +type mockNvidiaSMI struct { + gpuInfoXML []byte + errOnQueryGPUInfoXML bool + + gpuInfoCSV []byte + errOnQueryGPUInfoCSV bool + + helpQueryGPU []byte + errOnQueryHelpQueryGPU bool +} + +func (m *mockNvidiaSMI) queryGPUInfoXML() ([]byte, error) { + if m.errOnQueryGPUInfoXML { + return nil, errors.New("error on mock.queryGPUInfoXML()") + } + return m.gpuInfoXML, nil +} + +func (m *mockNvidiaSMI) queryGPUInfoCSV(_ []string) ([]byte, error) { + if m.errOnQueryGPUInfoCSV { + return nil, errors.New("error on mock.queryGPUInfoCSV()") + } + return m.gpuInfoCSV, nil +} + +func (m *mockNvidiaSMI) queryHelpQueryGPU() ([]byte, error) { + if m.errOnQueryHelpQueryGPU { + return nil, errors.New("error on mock.queryHelpQueryGPU()") + } + return m.helpQueryGPU, nil +} + +func prepareCaseMIGA100formatXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{gpuInfoXML: dataXMLA100SXM4MIG} +} + +func prepareCaseRTX3060formatXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{gpuInfoXML: dataXMLRTX3060} +} + +func prepareCaseRTX4090Driver535formatXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{gpuInfoXML: dataXMLRTX4090Driver535} +} + +func prepareCaseTeslaP100formatXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{gpuInfoXML: dataXMLTeslaP100} +} + +func prepareCaseRTX2080WinFormatXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{gpuInfoXML: dataXMLRTX2080Win} +} + +func prepareCaseErrOnQueryGPUInfoXML(nv *NvidiaSMI) { + nv.UseCSVFormat = false + nv.exec = &mockNvidiaSMI{errOnQueryGPUInfoXML: true} +} + +func prepareCaseTeslaP100formatCSV(nv *NvidiaSMI) { + nv.UseCSVFormat = true + nv.exec = &mockNvidiaSMI{helpQueryGPU: dataHelpQueryGPU, gpuInfoCSV: dataCSVTeslaP100} +} + +func prepareCaseErrOnQueryHelpQueryGPU(nv *NvidiaSMI) { + nv.UseCSVFormat = true + nv.exec = &mockNvidiaSMI{errOnQueryHelpQueryGPU: true} +} + +func prepareCaseErrOnQueryGPUInfoCSV(nv *NvidiaSMI) { + nv.UseCSVFormat = true + nv.exec = &mockNvidiaSMI{helpQueryGPU: dataHelpQueryGPU, errOnQueryGPUInfoCSV: true} +} diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/a100-sxm4-mig.xml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/a100-sxm4-mig.xml new file mode 100644 index 00000000000000..74146ac7874a8a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/a100-sxm4-mig.xml @@ -0,0 +1,359 @@ +<?xml version="1.0" ?> +<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd"> +<nvidia_smi_log> + <timestamp>Fri Jan 27 11:32:31 2023</timestamp> + <driver_version>510.47.03</driver_version> + <cuda_version>11.6</cuda_version> + <attached_gpus>1</attached_gpus> + <gpu id="00000000:00:04.0"> + <product_name>NVIDIA A100-SXM4-40GB</product_name> + <product_brand>NVIDIA</product_brand> + <product_architecture>Ampere</product_architecture> + <display_mode>Enabled</display_mode> + <display_active>Disabled</display_active> + <persistence_mode>Disabled</persistence_mode> + <mig_mode> + <current_mig>Enabled</current_mig> + <pending_mig>Enabled</pending_mig> + </mig_mode> + <mig_devices> + <mig_device> + <index>0</index> + <gpu_instance_id>1</gpu_instance_id> + <compute_instance_id>0</compute_instance_id> + <device_attributes> + <shared> + <multiprocessor_count>42</multiprocessor_count> + <copy_engine_count>3</copy_engine_count> + <encoder_count>0</encoder_count> + <decoder_count>2</decoder_count> + <ofa_count>0</ofa_count> + <jpg_count>0</jpg_count> + </shared> + </device_attributes> + <ecc_error_count> + <volatile_count> + <sram_uncorrectable>0</sram_uncorrectable> + </volatile_count> + </ecc_error_count> + <fb_memory_usage> + <total>19968 MiB</total> + <reserved>0 MiB</reserved> + <used>19 MiB</used> + <free>19948 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>32767 MiB</total> + <used>0 MiB</used> + <free>32767 MiB</free> + </bar1_memory_usage> + </mig_device> + <mig_device> + <index>1</index> + <gpu_instance_id>2</gpu_instance_id> + <compute_instance_id>0</compute_instance_id> + <device_attributes> + <shared> + <multiprocessor_count>42</multiprocessor_count> + <copy_engine_count>3</copy_engine_count> + <encoder_count>0</encoder_count> + <decoder_count>2</decoder_count> + <ofa_count>0</ofa_count> + <jpg_count>0</jpg_count> + </shared> + </device_attributes> + <ecc_error_count> + <volatile_count> + <sram_uncorrectable>0</sram_uncorrectable> + </volatile_count> + </ecc_error_count> + <fb_memory_usage> + <total>19968 MiB</total> + <reserved>0 MiB</reserved> + <used>19 MiB</used> + <free>19948 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>32767 MiB</total> + <used>0 MiB</used> + <free>32767 MiB</free> + </bar1_memory_usage> + </mig_device> + </mig_devices> + <accounting_mode>Disabled</accounting_mode> + <accounting_mode_buffer_size>4000</accounting_mode_buffer_size> + <driver_model> + <current_dm>N/A</current_dm> + <pending_dm>N/A</pending_dm> + </driver_model> + <serial>1324321002473</serial> + <uuid>GPU-27b94a00-ed54-5c24-b1fd-1054085de32a</uuid> + <minor_number>0</minor_number> + <vbios_version>92.00.45.00.03</vbios_version> + <multigpu_board>No</multigpu_board> + <board_id>0x4</board_id> + <gpu_part_number>692-2G506-0200-003</gpu_part_number> + <gpu_module_id>3</gpu_module_id> + <inforom_version> + <img_version>G506.0200.00.04</img_version> + <oem_object>2.0</oem_object> + <ecc_object>6.16</ecc_object> + <pwr_object>N/A</pwr_object> + </inforom_version> + <gpu_operation_mode> + <current_gom>N/A</current_gom> + <pending_gom>N/A</pending_gom> + </gpu_operation_mode> + <gsp_firmware_version>510.47.03</gsp_firmware_version> + <gpu_virtualization_mode> + <virtualization_mode>Pass-Through</virtualization_mode> + <host_vgpu_mode>N/A</host_vgpu_mode> + </gpu_virtualization_mode> + <ibmnpu> + <relaxed_ordering_mode>N/A</relaxed_ordering_mode> + </ibmnpu> + <pci> + <pci_bus>00</pci_bus> + <pci_device>04</pci_device> + <pci_domain>0000</pci_domain> + <pci_device_id>20B010DE</pci_device_id> + <pci_bus_id>00000000:00:04.0</pci_bus_id> + <pci_sub_system_id>134F10DE</pci_sub_system_id> + <pci_gpu_link_info> + <pcie_gen> + <max_link_gen>4</max_link_gen> + <current_link_gen>4</current_link_gen> + </pcie_gen> + <link_widths> + <max_link_width>16x</max_link_width> + <current_link_width>16x</current_link_width> + </link_widths> + </pci_gpu_link_info> + <pci_bridge_chip> + <bridge_chip_type>N/A</bridge_chip_type> + <bridge_chip_fw>N/A</bridge_chip_fw> + </pci_bridge_chip> + <replay_counter>0</replay_counter> + <replay_rollover_counter>0</replay_rollover_counter> + <tx_util>0 KB/s</tx_util> + <rx_util>0 KB/s</rx_util> + </pci> + <fan_speed>N/A</fan_speed> + <performance_state>P0</performance_state> + <clocks_throttle_reasons> + <clocks_throttle_reason_gpu_idle>Not Active</clocks_throttle_reason_gpu_idle> + <clocks_throttle_reason_applications_clocks_setting>Not Active + </clocks_throttle_reason_applications_clocks_setting> + <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap> + <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown> + <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown> + <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown> + <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost> + <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown> + <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting> + </clocks_throttle_reasons> + <fb_memory_usage> + <total>40960 MiB</total> + <reserved>605 MiB</reserved> + <used>38 MiB</used> + <free>40315 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>65536 MiB</total> + <used>1 MiB</used> + <free>65535 MiB</free> + </bar1_memory_usage> + <compute_mode>Default</compute_mode> + <utilization> + <gpu_util>N/A</gpu_util> + <memory_util>N/A</memory_util> + <encoder_util>N/A</encoder_util> + <decoder_util>N/A</decoder_util> + </utilization> + <encoder_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </encoder_stats> + <fbc_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </fbc_stats> + <ecc_mode> + <current_ecc>Enabled</current_ecc> + <pending_ecc>Enabled</pending_ecc> + </ecc_mode> + <ecc_errors> + <volatile> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </volatile> + <aggregate> + <sram_correctable>0</sram_correctable> + <sram_uncorrectable>0</sram_uncorrectable> + <dram_correctable>0</dram_correctable> + <dram_uncorrectable>0</dram_uncorrectable> + </aggregate> + </ecc_errors> + <retired_pages> + <multiple_single_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </multiple_single_bit_retirement> + <double_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </double_bit_retirement> + <pending_blacklist>N/A</pending_blacklist> + <pending_retirement>N/A</pending_retirement> + </retired_pages> + <remapped_rows>N/A</remapped_rows> + <temperature> + <gpu_temp>36 C</gpu_temp> + <gpu_temp_max_threshold>92 C</gpu_temp_max_threshold> + <gpu_temp_slow_threshold>89 C</gpu_temp_slow_threshold> + <gpu_temp_max_gpu_threshold>85 C</gpu_temp_max_gpu_threshold> + <gpu_target_temperature>N/A</gpu_target_temperature> + <memory_temp>44 C</memory_temp> + <gpu_temp_max_mem_threshold>95 C</gpu_temp_max_mem_threshold> + </temperature> + <supported_gpu_target_temp> + <gpu_target_temp_min>N/A</gpu_target_temp_min> + <gpu_target_temp_max>N/A</gpu_target_temp_max> + </supported_gpu_target_temp> + <power_readings> + <power_state>P0</power_state> + <power_management>Supported</power_management> + <power_draw>66.92 W</power_draw> + <power_limit>400.00 W</power_limit> + <default_power_limit>400.00 W</default_power_limit> + <enforced_power_limit>400.00 W</enforced_power_limit> + <min_power_limit>100.00 W</min_power_limit> + <max_power_limit>400.00 W</max_power_limit> + </power_readings> + <clocks> + <graphics_clock>1410 MHz</graphics_clock> + <sm_clock>1410 MHz</sm_clock> + <mem_clock>1215 MHz</mem_clock> + <video_clock>1275 MHz</video_clock> + </clocks> + <applications_clocks> + <graphics_clock>1095 MHz</graphics_clock> + <mem_clock>1215 MHz</mem_clock> + </applications_clocks> + <default_applications_clocks> + <graphics_clock>1095 MHz</graphics_clock> + <mem_clock>1215 MHz</mem_clock> + </default_applications_clocks> + <max_clocks> + <graphics_clock>1410 MHz</graphics_clock> + <sm_clock>1410 MHz</sm_clock> + <mem_clock>1215 MHz</mem_clock> + <video_clock>1290 MHz</video_clock> + </max_clocks> + <max_customer_boost_clocks> + <graphics_clock>1410 MHz</graphics_clock> + </max_customer_boost_clocks> + <clock_policy> + <auto_boost>N/A</auto_boost> + <auto_boost_default>N/A</auto_boost_default> + </clock_policy> + <voltage> + <graphics_volt>881.250 mV</graphics_volt> + </voltage> + <supported_clocks> + <supported_mem_clock> + <value>1215 MHz</value> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + <supported_graphics_clock>210 MHz</supported_graphics_clock> + </supported_mem_clock> + </supported_clocks> + <processes> + </processes> + <accounted_processes> + </accounted_processes> + </gpu> + +</nvidia_smi_log> diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/help-query-gpu.txt b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/help-query-gpu.txt new file mode 100644 index 00000000000000..2dd3285e1a6394 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/help-query-gpu.txt @@ -0,0 +1,414 @@ +List of valid properties to query for the switch "--query-gpu=": + +"timestamp" +The timestamp of when the query was made in format "YYYY/MM/DD HH:MM:SS.msec". + +"driver_version" +The version of the installed NVIDIA display driver. This is an alphanumeric string. + +"count" +The number of NVIDIA GPUs in the system. + +"name" or "gpu_name" +The official product name of the GPU. This is an alphanumeric string. For all products. + +"serial" or "gpu_serial" +This number matches the serial number physically printed on each board. It is a globally unique immutable alphanumeric value. + +"uuid" or "gpu_uuid" +This value is the globally unique immutable alphanumeric identifier of the GPU. It does not correspond to any physical label on the board. + +"pci.bus_id" or "gpu_bus_id" +PCI bus id as "domain:bus:device.function", in hex. + +"pci.domain" +PCI domain number, in hex. + +"pci.bus" +PCI bus number, in hex. + +"pci.device" +PCI device number, in hex. + +"pci.device_id" +PCI vendor device id, in hex + +"pci.sub_device_id" +PCI Sub System id, in hex + +"pcie.link.gen.current" +The current PCI-E link generation. These may be reduced when the GPU is not in use. + +"pcie.link.gen.max" +The maximum PCI-E link generation possible with this GPU and system configuration. For example, if the GPU supports a higher PCIe generation than the system supports then this reports the system PCIe generation. + +"pcie.link.width.current" +The current PCI-E link width. These may be reduced when the GPU is not in use. + +"pcie.link.width.max" +The maximum PCI-E link width possible with this GPU and system configuration. For example, if the GPU supports a higher PCIe generation than the system supports then this reports the system PCIe generation. + +"index" +Zero based index of the GPU. Can change at each boot. + +"display_mode" +A flag that indicates whether a physical display (e.g. monitor) is currently connected to any of the GPU's connectors. "Enabled" indicates an attached display. "Disabled" indicates otherwise. + +"display_active" +A flag that indicates whether a display is initialized on the GPU's (e.g. memory is allocated on the device for display). Display can be active even when no monitor is physically attached. "Enabled" indicates an active display. "Disabled" indicates otherwise. + +"persistence_mode" +A flag that indicates whether persistence mode is enabled for the GPU. Value is either "Enabled" or "Disabled". When persistence mode is enabled the NVIDIA driver remains loaded even when no active clients, such as X11 or nvidia-smi, exist. This minimizes the driver load latency associated with running dependent apps, such as CUDA programs. Linux only. + +"accounting.mode" +A flag that indicates whether accounting mode is enabled for the GPU. Value is either "Enabled" or "Disabled". When accounting is enabled statistics are calculated for each compute process running on the GPU.Statistics can be queried during the lifetime or after termination of the process.The execution time of process is reported as 0 while the process is in running state and updated to actualexecution time after the process has terminated. See --help-query-accounted-apps for more info. + +"accounting.buffer_size" +The size of the circular buffer that holds list of processes that can be queried for accounting stats. This is the maximum number of processes that accounting information will be stored for before information about oldest processes will get overwritten by information about new processes. + +Section about driver_model properties +On Windows, the TCC and WDDM driver models are supported. The driver model can be changed with the (-dm) or (-fdm) flags. The TCC driver model is optimized for compute applications. I.E. kernel launch times will be quicker with TCC. The WDDM driver model is designed for graphics applications and is not recommended for compute applications. Linux does not support multiple driver models, and will always have the value of "N/A". Only for selected products. Please see feature matrix in NVML documentation. + +"driver_model.current" +The driver model currently in use. Always "N/A" on Linux. + +"driver_model.pending" +The driver model that will be used on the next reboot. Always "N/A" on Linux. + +"vbios_version" +The BIOS of the GPU board. + +Section about inforom properties +Version numbers for each object in the GPU board's inforom storage. The inforom is a small, persistent store of configuration and state data for the GPU. All inforom version fields are numerical. It can be useful to know these version numbers because some GPU features are only available with inforoms of a certain version or higher. + +"inforom.img" or "inforom.image" +Global version of the infoROM image. Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board in contrast to infoROM object version which is only an indicator of supported features. + +"inforom.oem" +Version for the OEM configuration data. + +"inforom.ecc" +Version for the ECC recording data. + +"inforom.pwr" or "inforom.power" +Version for the power management data. + +Section about gom properties +GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features. Each GOM is designed to meet specific user needs. +In "All On" mode everything is enabled and running at full speed. +The "Compute" mode is designed for running only compute tasks. Graphics operations are not allowed. +The "Low Double Precision" mode is designed for running graphics applications that don't require high bandwidth double precision. +GOM can be changed with the (--gom) flag. + +"gom.current" or "gpu_operation_mode.current" +The GOM currently in use. + +"gom.pending" or "gpu_operation_mode.pending" +The GOM that will be used on the next reboot. + +"fan.speed" +The fan speed value is the percent of the product's maximum noise tolerance fan speed that the device's fan is currently intended to run at. This value may exceed 100% in certain cases. Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, this output will not match the actual fan speed. Many parts do not report fan speeds because they rely on cooling via fans in the surrounding enclosure. + +"pstate" +The current performance state for the GPU. States range from P0 (maximum performance) to P12 (minimum performance). + +Section about clocks_throttle_reasons properties +Retrieves information about factors that are reducing the frequency of clocks. If all throttle reasons are returned as "Not Active" it means that clocks are running as high as possible. + +"clocks_throttle_reasons.supported" +Bitmask of supported clock throttle reasons. See nvml.h for more details. + +"clocks_throttle_reasons.active" +Bitmask of active clock throttle reasons. See nvml.h for more details. + +"clocks_throttle_reasons.gpu_idle" +Nothing is running on the GPU and the clocks are dropping to Idle state. This limiter may be removed in a later release. + +"clocks_throttle_reasons.applications_clocks_setting" +GPU clocks are limited by applications clocks setting. E.g. can be changed by nvidia-smi --applications-clocks= + +"clocks_throttle_reasons.sw_power_cap" +SW Power Scaling algorithm is reducing the clocks below requested clocks because the GPU is consuming too much power. E.g. SW power cap limit can be changed with nvidia-smi --power-limit= + +"clocks_throttle_reasons.hw_slowdown" +HW Slowdown (reducing the core clocks by a factor of 2 or more) is engaged. This is an indicator of: + HW Thermal Slowdown: temperature being too high + HW Power Brake Slowdown: External Power Brake Assertion is triggered (e.g. by the system power supply) + * Power draw is too high and Fast Trigger protection is reducing the clocks + * May be also reported during PState or clock change + * This behavior may be removed in a later release + +"clocks_throttle_reasons.hw_thermal_slowdown" +HW Thermal Slowdown (reducing the core clocks by a factor of 2 or more) is engaged. This is an indicator of temperature being too high + +"clocks_throttle_reasons.hw_power_brake_slowdown" +HW Power Brake Slowdown (reducing the core clocks by a factor of 2 or more) is engaged. This is an indicator of External Power Brake Assertion being triggered (e.g. by the system power supply) + +"clocks_throttle_reasons.sw_thermal_slowdown" +SW Thermal capping algorithm is reducing clocks below requested clocks because GPU temperature is higher than Max Operating Temp. + +"clocks_throttle_reasons.sync_boost" +Sync Boost This GPU has been added to a Sync boost group with nvidia-smi or DCGM in + * order to maximize performance per watt. All GPUs in the sync boost group + * will boost to the minimum possible clocks across the entire group. Look at + * the throttle reasons for other GPUs in the system to see why those GPUs are + * holding this one at lower clocks. + +Section about memory properties +On-board memory information. Reported total memory is affected by ECC state. If ECC is enabled the total available memory is decreased by several percent, due to the requisite parity bits. The driver may also reserve a small amount of memory for internal use, even without active work on the GPU. + +"memory.total" +Total installed GPU memory. + +"memory.reserved" +Total memory reserved by the NVIDIA driver and firmware. + +"memory.used" +Total memory allocated by active contexts. + +"memory.free" +Total free memory. + +"compute_mode" +The compute mode flag indicates whether individual or multiple compute applications may run on the GPU. +"0: Default" means multiple contexts are allowed per device. +"1: Exclusive_Thread", deprecated, use Exclusive_Process instead +"2: Prohibited" means no contexts are allowed per device (no compute apps). +"3: Exclusive_Process" means only one context is allowed per device, usable from multiple threads at a time. + +"compute_cap" +The CUDA Compute Capability, represented as Major DOT Minor. + +Section about utilization properties +Utilization rates report how busy each GPU is over time, and can be used to determine how much an application is using the GPUs in the system. + +"utilization.gpu" +Percent of time over the past sample period during which one or more kernels was executing on the GPU. +The sample period may be between 1 second and 1/6 second depending on the product. + +"utilization.memory" +Percent of time over the past sample period during which global (device) memory was being read or written. +The sample period may be between 1 second and 1/6 second depending on the product. + +Section about encoder.stats properties +Encoder stats report number of encoder sessions, average FPS and average latency in us for given GPUs in the system. + +"encoder.stats.sessionCount" +Number of encoder sessions running on the GPU. + +"encoder.stats.averageFps" +Average FPS of all sessions running on the GPU. + +"encoder.stats.averageLatency" +Average latency in microseconds of all sessions running on the GPU. + +Section about ecc.mode properties +A flag that indicates whether ECC support is enabled. May be either "Enabled" or "Disabled". Changes to ECC mode require a reboot. Requires Inforom ECC object version 1.0 or higher. + +"ecc.mode.current" +The ECC mode that the GPU is currently operating under. + +"ecc.mode.pending" +The ECC mode that the GPU will operate under after the next reboot. + +Section about ecc.errors properties +NVIDIA GPUs can provide error counts for various types of ECC errors. Some ECC errors are either single or double bit, where single bit errors are corrected and double bit errors are uncorrectable. Texture memory errors may be correctable via resend or uncorrectable if the resend fails. These errors are available across two timescales (volatile and aggregate). Single bit ECC errors are automatically corrected by the HW and do not result in data corruption. Double bit errors are detected but not corrected. Please see the ECC documents on the web for information on compute application behavior when double bit errors occur. Volatile error counters track the number of errors detected since the last driver load. Aggregate error counts persist indefinitely and thus act as a lifetime counter. + +"ecc.errors.corrected.volatile.device_memory" +Errors detected in global device memory. + +"ecc.errors.corrected.volatile.dram" +Errors detected in global device memory. + +"ecc.errors.corrected.volatile.register_file" +Errors detected in register file memory. + +"ecc.errors.corrected.volatile.l1_cache" +Errors detected in the L1 cache. + +"ecc.errors.corrected.volatile.l2_cache" +Errors detected in the L2 cache. + +"ecc.errors.corrected.volatile.texture_memory" +Parity errors detected in texture memory. + +"ecc.errors.corrected.volatile.cbu" +Parity errors detected in CBU. + +"ecc.errors.corrected.volatile.sram" +Errors detected in global SRAMs. + +"ecc.errors.corrected.volatile.total" +Total errors detected across entire chip. + +"ecc.errors.corrected.aggregate.device_memory" +Errors detected in global device memory. + +"ecc.errors.corrected.aggregate.dram" +Errors detected in global device memory. + +"ecc.errors.corrected.aggregate.register_file" +Errors detected in register file memory. + +"ecc.errors.corrected.aggregate.l1_cache" +Errors detected in the L1 cache. + +"ecc.errors.corrected.aggregate.l2_cache" +Errors detected in the L2 cache. + +"ecc.errors.corrected.aggregate.texture_memory" +Parity errors detected in texture memory. + +"ecc.errors.corrected.aggregate.cbu" +Parity errors detected in CBU. + +"ecc.errors.corrected.aggregate.sram" +Errors detected in global SRAMs. + +"ecc.errors.corrected.aggregate.total" +Total errors detected across entire chip. + +"ecc.errors.uncorrected.volatile.device_memory" +Errors detected in global device memory. + +"ecc.errors.uncorrected.volatile.dram" +Errors detected in global device memory. + +"ecc.errors.uncorrected.volatile.register_file" +Errors detected in register file memory. + +"ecc.errors.uncorrected.volatile.l1_cache" +Errors detected in the L1 cache. + +"ecc.errors.uncorrected.volatile.l2_cache" +Errors detected in the L2 cache. + +"ecc.errors.uncorrected.volatile.texture_memory" +Parity errors detected in texture memory. + +"ecc.errors.uncorrected.volatile.cbu" +Parity errors detected in CBU. + +"ecc.errors.uncorrected.volatile.sram" +Errors detected in global SRAMs. + +"ecc.errors.uncorrected.volatile.total" +Total errors detected across entire chip. + +"ecc.errors.uncorrected.aggregate.device_memory" +Errors detected in global device memory. + +"ecc.errors.uncorrected.aggregate.dram" +Errors detected in global device memory. + +"ecc.errors.uncorrected.aggregate.register_file" +Errors detected in register file memory. + +"ecc.errors.uncorrected.aggregate.l1_cache" +Errors detected in the L1 cache. + +"ecc.errors.uncorrected.aggregate.l2_cache" +Errors detected in the L2 cache. + +"ecc.errors.uncorrected.aggregate.texture_memory" +Parity errors detected in texture memory. + +"ecc.errors.uncorrected.aggregate.cbu" +Parity errors detected in CBU. + +"ecc.errors.uncorrected.aggregate.sram" +Errors detected in global SRAMs. + +"ecc.errors.uncorrected.aggregate.total" +Total errors detected across entire chip. + +Section about retired_pages properties +NVIDIA GPUs can retire pages of GPU device memory when they become unreliable. This can happen when multiple single bit ECC errors occur for the same page, or on a double bit ECC error. When a page is retired, the NVIDIA driver will hide it such that no driver, or application memory allocations can access it. + +"retired_pages.single_bit_ecc.count" or "retired_pages.sbe" +The number of GPU device memory pages that have been retired due to multiple single bit ECC errors. + +"retired_pages.double_bit.count" or "retired_pages.dbe" +The number of GPU device memory pages that have been retired due to a double bit ECC error. + +"retired_pages.pending" +Checks if any GPU device memory pages are pending retirement on the next reboot. Pages that are pending retirement can still be allocated, and may cause further reliability issues. + +"temperature.gpu" + Core GPU temperature. in degrees C. + +"temperature.memory" + HBM memory temperature. in degrees C. + +"power.management" +A flag that indicates whether power management is enabled. Either "Supported" or "[Not Supported]". Requires Inforom PWR object version 3.0 or higher or Kepler device. + +"power.draw" +The last measured power draw for the entire board, in watts. Only available if power management is supported. This reading is accurate to within +/- 5 watts. + +"power.limit" +The software power limit in watts. Set by software like nvidia-smi. On Kepler devices Power Limit can be adjusted using [-pl | --power-limit=] switches. + +"enforced.power.limit" +The power management algorithm's power ceiling, in watts. Total board power draw is manipulated by the power management algorithm such that it stays under this value. This value is the minimum of various power limiters. + +"power.default_limit" +The default power management algorithm's power ceiling, in watts. Power Limit will be set back to Default Power Limit after driver unload. + +"power.min_limit" +The minimum value in watts that power limit can be set to. + +"power.max_limit" +The maximum value in watts that power limit can be set to. + +"clocks.current.graphics" or "clocks.gr" +Current frequency of graphics (shader) clock. + +"clocks.current.sm" or "clocks.sm" +Current frequency of SM (Streaming Multiprocessor) clock. + +"clocks.current.memory" or "clocks.mem" +Current frequency of memory clock. + +"clocks.current.video" or "clocks.video" +Current frequency of video encoder/decoder clock. + +Section about clocks.applications properties +User specified frequency at which applications will be running at. Can be changed with [-ac | --applications-clocks] switches. + +"clocks.applications.graphics" or "clocks.applications.gr" +User specified frequency of graphics (shader) clock. + +"clocks.applications.memory" or "clocks.applications.mem" +User specified frequency of memory clock. + +Section about clocks.default_applications properties +Default frequency at which applications will be running at. Application clocks can be changed with [-ac | --applications-clocks] switches. Application clocks can be set to default using [-rac | --reset-applications-clocks] switches. + +"clocks.default_applications.graphics" or "clocks.default_applications.gr" +Default frequency of applications graphics (shader) clock. + +"clocks.default_applications.memory" or "clocks.default_applications.mem" +Default frequency of applications memory clock. + +Section about clocks.max properties +Maximum frequency at which parts of the GPU are design to run. + +"clocks.max.graphics" or "clocks.max.gr" +Maximum frequency of graphics (shader) clock. + +"clocks.max.sm" or "clocks.max.sm" +Maximum frequency of SM (Streaming Multiprocessor) clock. + +"clocks.max.memory" or "clocks.max.mem" +Maximum frequency of memory clock. + +Section about mig.mode properties +A flag that indicates whether MIG mode is enabled. May be either "Enabled" or "Disabled". Changes to MIG mode require a GPU reset. + +"mig.mode.current" +The MIG mode that the GPU is currently operating under. + +"mig.mode.pending" +The MIG mode that the GPU will operate under after reset. + diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-2080-win.xml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-2080-win.xml new file mode 100644 index 00000000000000..9bc0d222020234 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-2080-win.xml @@ -0,0 +1,776 @@ +<?xml version="1.0" ?> +<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd"> +<nvidia_smi_log> + <timestamp>Tue Sep 20 14:07:39 2022</timestamp> + <driver_version>516.59</driver_version> + <cuda_version>11.7</cuda_version> + <attached_gpus>1</attached_gpus> + <gpu id="00000000:0A:00.0"> + <product_name>NVIDIA GeForce RTX 2080</product_name> + <product_brand>GeForce</product_brand> + <product_architecture>Turing</product_architecture> + <display_mode>Enabled</display_mode> + <display_active>Enabled</display_active> + <persistence_mode>N/A</persistence_mode> + <mig_mode> + <current_mig>N/A</current_mig> + <pending_mig>N/A</pending_mig> + </mig_mode> + <mig_devices> + None + </mig_devices> + <accounting_mode>Disabled</accounting_mode> + <accounting_mode_buffer_size>4000</accounting_mode_buffer_size> + <driver_model> + <current_dm>WDDM</current_dm> + <pending_dm>WDDM</pending_dm> + </driver_model> + <serial>N/A</serial> + <uuid>GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3</uuid> + <minor_number>N/A</minor_number> + <vbios_version>90.04.23.00.db</vbios_version> + <multigpu_board>No</multigpu_board> + <board_id>0xa00</board_id> + <gpu_part_number>N/A</gpu_part_number> + <gpu_module_id>0</gpu_module_id> + <inforom_version> + <img_version>G001.0000.02.04</img_version> + <oem_object>1.1</oem_object> + <ecc_object>N/A</ecc_object> + <pwr_object>N/A</pwr_object> + </inforom_version> + <gpu_operation_mode> + <current_gom>N/A</current_gom> + <pending_gom>N/A</pending_gom> + </gpu_operation_mode> + <gsp_firmware_version>N/A</gsp_firmware_version> + <gpu_virtualization_mode> + <virtualization_mode>None</virtualization_mode> + <host_vgpu_mode>N/A</host_vgpu_mode> + </gpu_virtualization_mode> + <ibmnpu> + <relaxed_ordering_mode>N/A</relaxed_ordering_mode> + </ibmnpu> + <pci> + <pci_bus>0A</pci_bus> + <pci_device>00</pci_device> + <pci_domain>0000</pci_domain> + <pci_device_id>1E8710DE</pci_device_id> + <pci_bus_id>00000000:0A:00.0</pci_bus_id> + <pci_sub_system_id>37AF1458</pci_sub_system_id> + <pci_gpu_link_info> + <pcie_gen> + <max_link_gen>3</max_link_gen> + <current_link_gen>3</current_link_gen> + </pcie_gen> + <link_widths> + <max_link_width>16x</max_link_width> + <current_link_width>8x</current_link_width> + </link_widths> + </pci_gpu_link_info> + <pci_bridge_chip> + <bridge_chip_type>N/A</bridge_chip_type> + <bridge_chip_fw>N/A</bridge_chip_fw> + </pci_bridge_chip> + <replay_counter>0</replay_counter> + <replay_rollover_counter>0</replay_rollover_counter> + <tx_util>13000 KB/s</tx_util> + <rx_util>91000 KB/s</rx_util> + </pci> + <fan_speed>37 %</fan_speed> + <performance_state>P8</performance_state> + <clocks_throttle_reasons> + <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle> + <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting> + <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap> + <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown> + <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown> + <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown> + <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost> + <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown> + <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting> + </clocks_throttle_reasons> + <fb_memory_usage> + <total>8192 MiB</total> + <reserved>182 MiB</reserved> + <used>862 MiB</used> + <free>7147 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>256 MiB</total> + <used>2 MiB</used> + <free>254 MiB</free> + </bar1_memory_usage> + <compute_mode>Default</compute_mode> + <utilization> + <gpu_util>2 %</gpu_util> + <memory_util>7 %</memory_util> + <encoder_util>0 %</encoder_util> + <decoder_util>0 %</decoder_util> + </utilization> + <encoder_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </encoder_stats> + <fbc_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </fbc_stats> + <ecc_mode> + <current_ecc>N/A</current_ecc> + <pending_ecc>N/A</pending_ecc> + </ecc_mode> + <ecc_errors> + <volatile> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </volatile> + <aggregate> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </aggregate> + </ecc_errors> + <retired_pages> + <multiple_single_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </multiple_single_bit_retirement> + <double_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </double_bit_retirement> + <pending_blacklist>N/A</pending_blacklist> + <pending_retirement>N/A</pending_retirement> + </retired_pages> + <remapped_rows>N/A</remapped_rows> + <temperature> + <gpu_temp>29 C</gpu_temp> + <gpu_temp_max_threshold>100 C</gpu_temp_max_threshold> + <gpu_temp_slow_threshold>97 C</gpu_temp_slow_threshold> + <gpu_temp_max_gpu_threshold>88 C</gpu_temp_max_gpu_threshold> + <gpu_target_temperature>83 C</gpu_target_temperature> + <memory_temp>N/A</memory_temp> + <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold> + </temperature> + <supported_gpu_target_temp> + <gpu_target_temp_min>65 C</gpu_target_temp_min> + <gpu_target_temp_max>88 C</gpu_target_temp_max> + </supported_gpu_target_temp> + <power_readings> + <power_state>P8</power_state> + <power_management>Supported</power_management> + <power_draw>14.50 W</power_draw> + <power_limit>275.00 W</power_limit> + <default_power_limit>275.00 W</default_power_limit> + <enforced_power_limit>275.00 W</enforced_power_limit> + <min_power_limit>105.00 W</min_power_limit> + <max_power_limit>350.00 W</max_power_limit> + </power_readings> + <clocks> + <graphics_clock>193 MHz</graphics_clock> + <sm_clock>193 MHz</sm_clock> + <mem_clock>403 MHz</mem_clock> + <video_clock>539 MHz</video_clock> + </clocks> + <applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </applications_clocks> + <default_applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </default_applications_clocks> + <max_clocks> + <graphics_clock>3060 MHz</graphics_clock> + <sm_clock>3060 MHz</sm_clock> + <mem_clock>7560 MHz</mem_clock> + <video_clock>1950 MHz</video_clock> + </max_clocks> + <max_customer_boost_clocks> + <graphics_clock>N/A</graphics_clock> + </max_customer_boost_clocks> + <clock_policy> + <auto_boost>N/A</auto_boost> + <auto_boost_default>N/A</auto_boost_default> + </clock_policy> + <voltage> + <graphics_volt>N/A</graphics_volt> + </voltage> + <supported_clocks> + <supported_mem_clock> + <value>7560 MHz</value> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>7360 MHz</value> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>5000 MHz</value> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>810 MHz</value> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>405 MHz</value> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + </supported_mem_clock> + </supported_clocks> + <processes> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>7724</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.YourPhone_1.22062.543.0_x64__8wekyb3d8bbwe\PhoneExperienceHost.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>10808</pid> + <type>C+G</type> + <process_name></process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>11556</pid> + <type>C+G</type> + <process_name>C:\Windows\SystemApps\ShellExperienceHost_cw5n1h2txyewy\ShellExperienceHost.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>12452</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.SkypeApp_15.88.3401.0_x86__kzf8qxf38zg5c\Skype\Skype.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>12636</pid> + <type>C+G</type> + <process_name></process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>13120</pid> + <type>C+G</type> + <process_name></process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>14296</pid> + <type>C+G</type> + <process_name>C:\Windows\explorer.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>16508</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.549981C3F5F10_4.2204.13303.0_x64__8wekyb3d8bbwe\Cortana.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>16592</pid> + <type>C+G</type> + <process_name>C:\ProgramData\Logishrd\LogiOptions\Software\Current\LogiOptionsMgr.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>20920</pid> + <type>C+G</type> + <process_name>C:\Windows\SystemApps\Microsoft.LockApp_cw5n1h2txyewy\LockApp.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21004</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.Windows.Photos_2022.31070.26005.0_x64__8wekyb3d8bbwe\Microsoft.Photos.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21036</pid> + <type>C+G</type> + <process_name>C:\Program Files (x86)\Garmin\Express\CefSharp.BrowserSubprocess.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21048</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\91750D7E.Slack_4.28.171.0_x64__8she8kybcnzg4\app\Slack.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21104</pid> + <type>C+G</type> + <process_name>C:\Windows\SystemApps\MicrosoftWindows.Client.CBS_cw5n1h2txyewy\TextInputHost.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21292</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.ZuneVideo_10.22041.10091.0_x64__8wekyb3d8bbwe\Video.UI.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21472</pid> + <type>C+G</type> + <process_name>C:\Program Files (x86)\Google\Chrome\Application\chrome.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>21852</pid> + <type>C+G</type> + <process_name>C:\ProgramData\Logishrd\LogiOptions\Software\Current\LogiOverlay.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>22600</pid> + <type>C+G</type> + <process_name></process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>23652</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\microsoft.windowscommunicationsapps_16005.14326.20970.0_x64__8wekyb3d8bbwe\HxOutlook.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>25436</pid> + <type>C+G</type> + <process_name>C:\Windows\SystemApps\MicrosoftWindows.Client.CBS_cw5n1h2txyewy\SearchHost.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>25520</pid> + <type>C+G</type> + <process_name></process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>25696</pid> + <type>C+G</type> + <process_name>C:\Users\Vlad\AppData\Local\Viber\Viber.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>26972</pid> + <type>C+G</type> + <process_name>C:\Windows\SystemApps\Microsoft.Windows.StartMenuExperienceHost_cw5n1h2txyewy\StartMenuExperienceHost.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>27148</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\Microsoft.Office.OneNote_16001.14326.21090.0_x64__8wekyb3d8bbwe\onenoteim.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>27628</pid> + <type>C+G</type> + <process_name>C:\Program Files\WindowsApps\49297T.Partl.ClockOut_2.9.9.0_x64__jr9bq2af9farr\WorkingHours.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>30212</pid> + <type>C+G</type> + <process_name>C:\Program Files (x86)\Microsoft\EdgeWebView\Application\105.0.1343.42\msedgewebview2.exe</process_name> + <used_memory>N/A</used_memory> + </process_info> + </processes> + <accounted_processes> + </accounted_processes> + </gpu> + +</nvidia_smi_log> \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-3060.xml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-3060.xml new file mode 100644 index 00000000000000..ad63fd51b8df04 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-3060.xml @@ -0,0 +1,917 @@ +<?xml version="1.0" ?> +<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd"> +<nvidia_smi_log> + <timestamp>Tue Sep 20 15:21:01 2022</timestamp> + <driver_version>515.65.01</driver_version> + <cuda_version>11.7</cuda_version> + <attached_gpus>1</attached_gpus> + <gpu id="00000000:01:00.0"> + <product_name>NVIDIA GeForce RTX 3060 Laptop GPU</product_name> + <product_brand>GeForce</product_brand> + <product_architecture>Ampere</product_architecture> + <display_mode>Disabled</display_mode> + <display_active>Disabled</display_active> + <persistence_mode>Disabled</persistence_mode> + <mig_mode> + <current_mig>N/A</current_mig> + <pending_mig>N/A</pending_mig> + </mig_mode> + <mig_devices> + None + </mig_devices> + <accounting_mode>Disabled</accounting_mode> + <accounting_mode_buffer_size>4000</accounting_mode_buffer_size> + <driver_model> + <current_dm>N/A</current_dm> + <pending_dm>N/A</pending_dm> + </driver_model> + <serial>N/A</serial> + <uuid>GPU-473d8d0f-d462-185c-6b36-6fc23e23e571</uuid> + <minor_number>0</minor_number> + <vbios_version>94.06.19.00.51</vbios_version> + <multigpu_board>No</multigpu_board> + <board_id>0x100</board_id> + <gpu_part_number>N/A</gpu_part_number> + <gpu_module_id>0</gpu_module_id> + <inforom_version> + <img_version>G001.0000.03.03</img_version> + <oem_object>2.0</oem_object> + <ecc_object>N/A</ecc_object> + <pwr_object>N/A</pwr_object> + </inforom_version> + <gpu_operation_mode> + <current_gom>N/A</current_gom> + <pending_gom>N/A</pending_gom> + </gpu_operation_mode> + <gsp_firmware_version>N/A</gsp_firmware_version> + <gpu_virtualization_mode> + <virtualization_mode>None</virtualization_mode> + <host_vgpu_mode>N/A</host_vgpu_mode> + </gpu_virtualization_mode> + <ibmnpu> + <relaxed_ordering_mode>N/A</relaxed_ordering_mode> + </ibmnpu> + <pci> + <pci_bus>01</pci_bus> + <pci_device>00</pci_device> + <pci_domain>0000</pci_domain> + <pci_device_id>252010DE</pci_device_id> + <pci_bus_id>00000000:01:00.0</pci_bus_id> + <pci_sub_system_id>0A831028</pci_sub_system_id> + <pci_gpu_link_info> + <pcie_gen> + <max_link_gen>4</max_link_gen> + <current_link_gen>1</current_link_gen> + </pcie_gen> + <link_widths> + <max_link_width>16x</max_link_width> + <current_link_width>8x</current_link_width> + </link_widths> + </pci_gpu_link_info> + <pci_bridge_chip> + <bridge_chip_type>N/A</bridge_chip_type> + <bridge_chip_fw>N/A</bridge_chip_fw> + </pci_bridge_chip> + <replay_counter>0</replay_counter> + <replay_rollover_counter>0</replay_rollover_counter> + <tx_util>0 KB/s</tx_util> + <rx_util>0 KB/s</rx_util> + </pci> + <fan_speed>N/A</fan_speed> + <performance_state>P8</performance_state> + <clocks_throttle_reasons> + <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle> + <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting> + <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap> + <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown> + <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown> + <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown> + <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost> + <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown> + <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting> + </clocks_throttle_reasons> + <fb_memory_usage> + <total>6144 MiB</total> + <reserved>197 MiB</reserved> + <used>5 MiB</used> + <free>5940 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>8192 MiB</total> + <used>3 MiB</used> + <free>8189 MiB</free> + </bar1_memory_usage> + <compute_mode>Default</compute_mode> + <utilization> + <gpu_util>0 %</gpu_util> + <memory_util>0 %</memory_util> + <encoder_util>0 %</encoder_util> + <decoder_util>0 %</decoder_util> + </utilization> + <encoder_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </encoder_stats> + <fbc_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </fbc_stats> + <ecc_mode> + <current_ecc>N/A</current_ecc> + <pending_ecc>N/A</pending_ecc> + </ecc_mode> + <ecc_errors> + <volatile> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </volatile> + <aggregate> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </aggregate> + </ecc_errors> + <retired_pages> + <multiple_single_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </multiple_single_bit_retirement> + <double_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </double_bit_retirement> + <pending_blacklist>N/A</pending_blacklist> + <pending_retirement>N/A</pending_retirement> + </retired_pages> + <remapped_rows>N/A</remapped_rows> + <temperature> + <gpu_temp>45 C</gpu_temp> + <gpu_temp_max_threshold>105 C</gpu_temp_max_threshold> + <gpu_temp_slow_threshold>102 C</gpu_temp_slow_threshold> + <gpu_temp_max_gpu_threshold>75 C</gpu_temp_max_gpu_threshold> + <gpu_target_temperature>N/A</gpu_target_temperature> + <memory_temp>N/A</memory_temp> + <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold> + </temperature> + <supported_gpu_target_temp> + <gpu_target_temp_min>N/A</gpu_target_temp_min> + <gpu_target_temp_max>N/A</gpu_target_temp_max> + </supported_gpu_target_temp> + <power_readings> + <power_state>P8</power_state> + <power_management>N/A</power_management> + <power_draw>8.70 W</power_draw> + <power_limit>N/A</power_limit> + <default_power_limit>N/A</default_power_limit> + <enforced_power_limit>N/A</enforced_power_limit> + <min_power_limit>N/A</min_power_limit> + <max_power_limit>N/A</max_power_limit> + </power_readings> + <clocks> + <graphics_clock>210 MHz</graphics_clock> + <sm_clock>210 MHz</sm_clock> + <mem_clock>405 MHz</mem_clock> + <video_clock>555 MHz</video_clock> + </clocks> + <applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </applications_clocks> + <default_applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </default_applications_clocks> + <max_clocks> + <graphics_clock>2100 MHz</graphics_clock> + <sm_clock>2100 MHz</sm_clock> + <mem_clock>6001 MHz</mem_clock> + <video_clock>1950 MHz</video_clock> + </max_clocks> + <max_customer_boost_clocks> + <graphics_clock>N/A</graphics_clock> + </max_customer_boost_clocks> + <clock_policy> + <auto_boost>N/A</auto_boost> + <auto_boost_default>N/A</auto_boost_default> + </clock_policy> + <voltage> + <graphics_volt>631.250 mV</graphics_volt> + </voltage> + <supported_clocks> + <supported_mem_clock> + <value>6001 MHz</value> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2092 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2077 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2062 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2047 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2032 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2017 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>2002 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1987 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1972 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1957 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1942 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1927 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1912 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1897 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1882 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1867 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1852 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1837 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1822 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1807 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1792 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1777 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1762 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1747 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1732 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1717 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1702 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1687 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1672 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1657 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1642 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1627 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1612 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1597 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1582 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1567 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1552 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1537 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1522 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1507 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1492 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1477 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1462 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1447 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1432 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1417 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1402 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1387 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1372 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1357 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1342 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1327 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1312 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1297 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1282 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1267 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1252 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1237 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1222 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1207 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1192 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1177 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1162 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1147 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1132 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1117 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1102 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1087 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1072 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1057 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1042 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1027 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1012 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>997 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>982 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>967 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>952 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>937 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>922 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>907 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>892 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>877 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>862 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>847 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>832 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>817 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>802 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>787 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>772 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>757 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>742 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>727 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>712 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>697 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>682 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>667 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>652 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>637 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>622 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>607 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>592 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>577 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>562 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>547 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>532 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>517 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>502 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>487 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>472 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>457 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>442 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>427 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>412 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>5501 MHz</value> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2092 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2077 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2062 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2047 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2032 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2017 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>2002 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1987 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1972 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1957 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1942 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1927 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1912 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1897 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1882 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1867 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1852 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1837 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1822 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1807 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1792 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1777 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1762 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1747 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1732 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1717 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1702 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1687 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1672 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1657 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1642 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1627 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1612 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1597 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1582 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1567 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1552 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1537 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1522 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1507 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1492 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1477 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1462 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1447 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1432 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1417 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1402 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1387 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1372 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1357 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1342 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1327 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1312 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1297 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1282 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1267 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1252 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1237 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1222 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1207 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1192 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1177 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1162 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1147 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1132 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1117 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1102 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1087 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1072 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1057 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1042 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1027 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1012 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>997 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>982 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>967 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>952 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>937 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>922 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>907 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>892 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>877 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>862 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>847 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>832 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>817 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>802 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>787 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>772 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>757 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>742 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>727 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>712 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>697 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>682 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>667 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>652 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>637 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>622 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>607 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>592 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>577 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>562 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>547 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>532 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>517 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>502 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>487 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>472 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>457 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>442 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>427 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>412 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>810 MHz</value> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2092 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2077 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2062 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2047 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2032 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2017 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>2002 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1987 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1972 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1957 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1942 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1927 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1912 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1897 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1882 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1867 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1852 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1837 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1822 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1807 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1792 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1777 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1762 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1747 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1732 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1717 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1702 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1687 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1672 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1657 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1642 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1627 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1612 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1597 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1582 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1567 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1552 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1537 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1522 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1507 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1492 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1477 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1462 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1447 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1432 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1417 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1402 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1387 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1372 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1357 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1342 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1327 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1312 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1297 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1282 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1267 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1252 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1237 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1222 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1207 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1192 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1177 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1162 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1147 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1132 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1117 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1102 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1087 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1072 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1057 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1042 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1027 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1012 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>997 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>982 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>967 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>952 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>937 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>922 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>907 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>892 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>877 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>862 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>847 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>832 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>817 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>802 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>787 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>772 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>757 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>742 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>727 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>712 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>697 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>682 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>667 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>652 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>637 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>622 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>607 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>592 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>577 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>562 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>547 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>532 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>517 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>502 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>487 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>472 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>457 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>442 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>427 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>412 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>405 MHz</value> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>412 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + </supported_mem_clock> + </supported_clocks> + <processes> + <process_info> + <gpu_instance_id>N/A</gpu_instance_id> + <compute_instance_id>N/A</compute_instance_id> + <pid>28543</pid> + <type>G</type> + <process_name>/usr/libexec/Xorg</process_name> + <used_memory>4 MiB</used_memory> + </process_info> + </processes> + <accounted_processes> + </accounted_processes> + </gpu> + +</nvidia_smi_log> diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml new file mode 100644 index 00000000000000..c3c253ffa23644 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml @@ -0,0 +1,1082 @@ +<?xml version="1.0" ?> +<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v12.dtd"> +<nvidia_smi_log> + <timestamp>Mon Aug 7 11:10:06 2023</timestamp> + <driver_version>535.86.05</driver_version> + <cuda_version>12.2</cuda_version> + <attached_gpus>1</attached_gpus> + <gpu id="00000000:84:00.0"> + <product_name>NVIDIA GeForce RTX 4090</product_name> + <product_brand>GeForce</product_brand> + <product_architecture>Ada Lovelace</product_architecture> + <display_mode>Disabled</display_mode> + <display_active>Disabled</display_active> + <persistence_mode>Enabled</persistence_mode> + <addressing_mode>None</addressing_mode> + <mig_mode> + <current_mig>N/A</current_mig> + <pending_mig>N/A</pending_mig> + </mig_mode> + <mig_devices> + None + </mig_devices> + <accounting_mode>Disabled</accounting_mode> + <accounting_mode_buffer_size>4000</accounting_mode_buffer_size> + <driver_model> + <current_dm>N/A</current_dm> + <pending_dm>N/A</pending_dm> + </driver_model> + <serial>N/A</serial> + <uuid>GPU-71d1acc2-662d-2166-bf9f-65272d2fc437</uuid> + <minor_number>0</minor_number> + <vbios_version>95.02.18.80.5F</vbios_version> + <multigpu_board>No</multigpu_board> + <board_id>0x8400</board_id> + <board_part_number>N/A</board_part_number> + <gpu_part_number>2684-300-A1</gpu_part_number> + <gpu_fru_part_number>N/A</gpu_fru_part_number> + <gpu_module_id>1</gpu_module_id> + <inforom_version> + <img_version>G002.0000.00.03</img_version> + <oem_object>2.0</oem_object> + <ecc_object>6.16</ecc_object> + <pwr_object>N/A</pwr_object> + </inforom_version> + <gpu_operation_mode> + <current_gom>N/A</current_gom> + <pending_gom>N/A</pending_gom> + </gpu_operation_mode> + <gsp_firmware_version>N/A</gsp_firmware_version> + <gpu_virtualization_mode> + <virtualization_mode>None</virtualization_mode> + <host_vgpu_mode>N/A</host_vgpu_mode> + </gpu_virtualization_mode> + <gpu_reset_status> + <reset_required>No</reset_required> + <drain_and_reset_recommended>N/A</drain_and_reset_recommended> + </gpu_reset_status> + <ibmnpu> + <relaxed_ordering_mode>N/A</relaxed_ordering_mode> + </ibmnpu> + <pci> + <pci_bus>84</pci_bus> + <pci_device>00</pci_device> + <pci_domain>0000</pci_domain> + <pci_device_id>268410DE</pci_device_id> + <pci_bus_id>00000000:84:00.0</pci_bus_id> + <pci_sub_system_id>40BF1458</pci_sub_system_id> + <pci_gpu_link_info> + <pcie_gen> + <max_link_gen>3</max_link_gen> + <current_link_gen>1</current_link_gen> + <device_current_link_gen>1</device_current_link_gen> + <max_device_link_gen>4</max_device_link_gen> + <max_host_link_gen>3</max_host_link_gen> + </pcie_gen> + <link_widths> + <max_link_width>16x</max_link_width> + <current_link_width>16x</current_link_width> + </link_widths> + </pci_gpu_link_info> + <pci_bridge_chip> + <bridge_chip_type>N/A</bridge_chip_type> + <bridge_chip_fw>N/A</bridge_chip_fw> + </pci_bridge_chip> + <replay_counter>0</replay_counter> + <replay_rollover_counter>0</replay_rollover_counter> + <tx_util>0 KB/s</tx_util> + <rx_util>0 KB/s</rx_util> + <atomic_caps_inbound>N/A</atomic_caps_inbound> + <atomic_caps_outbound>N/A</atomic_caps_outbound> + </pci> + <fan_speed>0 %</fan_speed> + <performance_state>P8</performance_state> + <clocks_event_reasons> + <clocks_event_reason_gpu_idle>Active</clocks_event_reason_gpu_idle> + <clocks_event_reason_applications_clocks_setting>Not Active + </clocks_event_reason_applications_clocks_setting> + <clocks_event_reason_sw_power_cap>Not Active</clocks_event_reason_sw_power_cap> + <clocks_event_reason_hw_slowdown>Not Active</clocks_event_reason_hw_slowdown> + <clocks_event_reason_hw_thermal_slowdown>Not Active</clocks_event_reason_hw_thermal_slowdown> + <clocks_event_reason_hw_power_brake_slowdown>Not Active</clocks_event_reason_hw_power_brake_slowdown> + <clocks_event_reason_sync_boost>Not Active</clocks_event_reason_sync_boost> + <clocks_event_reason_sw_thermal_slowdown>Not Active</clocks_event_reason_sw_thermal_slowdown> + <clocks_event_reason_display_clocks_setting>Not Active</clocks_event_reason_display_clocks_setting> + </clocks_event_reasons> + <fb_memory_usage> + <total>24564 MiB</total> + <reserved>346 MiB</reserved> + <used>2 MiB</used> + <free>24214 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>256 MiB</total> + <used>1 MiB</used> + <free>255 MiB</free> + </bar1_memory_usage> + <cc_protected_memory_usage> + <total>0 MiB</total> + <used>0 MiB</used> + <free>0 MiB</free> + </cc_protected_memory_usage> + <compute_mode>Default</compute_mode> + <utilization> + <gpu_util>0 %</gpu_util> + <memory_util>0 %</memory_util> + <encoder_util>0 %</encoder_util> + <decoder_util>0 %</decoder_util> + <jpeg_util>0 %</jpeg_util> + <ofa_util>0 %</ofa_util> + </utilization> + <encoder_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </encoder_stats> + <fbc_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </fbc_stats> + <ecc_mode> + <current_ecc>Disabled</current_ecc> + <pending_ecc>Disabled</pending_ecc> + </ecc_mode> + <ecc_errors> + <volatile> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </volatile> + <aggregate> + <sram_correctable>N/A</sram_correctable> + <sram_uncorrectable>N/A</sram_uncorrectable> + <dram_correctable>N/A</dram_correctable> + <dram_uncorrectable>N/A</dram_uncorrectable> + </aggregate> + </ecc_errors> + <retired_pages> + <multiple_single_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </multiple_single_bit_retirement> + <double_bit_retirement> + <retired_count>N/A</retired_count> + <retired_pagelist>N/A</retired_pagelist> + </double_bit_retirement> + <pending_blacklist>N/A</pending_blacklist> + <pending_retirement>N/A</pending_retirement> + </retired_pages> + <remapped_rows> + <remapped_row_corr>0</remapped_row_corr> + <remapped_row_unc>0</remapped_row_unc> + <remapped_row_pending>No</remapped_row_pending> + <remapped_row_failure>No</remapped_row_failure> + <row_remapper_histogram> + <row_remapper_histogram_max>192 bank(s)</row_remapper_histogram_max> + <row_remapper_histogram_high>0 bank(s)</row_remapper_histogram_high> + <row_remapper_histogram_partial>0 bank(s)</row_remapper_histogram_partial> + <row_remapper_histogram_low>0 bank(s)</row_remapper_histogram_low> + <row_remapper_histogram_none>0 bank(s)</row_remapper_histogram_none> + </row_remapper_histogram> + </remapped_rows> + <temperature> + <gpu_temp>40 C</gpu_temp> + <gpu_temp_tlimit>43 C</gpu_temp_tlimit> + <gpu_temp_max_tlimit_threshold>-7 C</gpu_temp_max_tlimit_threshold> + <gpu_temp_slow_tlimit_threshold>-2 C</gpu_temp_slow_tlimit_threshold> + <gpu_temp_max_gpu_tlimit_threshold>0 C</gpu_temp_max_gpu_tlimit_threshold> + <gpu_target_temperature>84 C</gpu_target_temperature> + <memory_temp>N/A</memory_temp> + <gpu_temp_max_mem_tlimit_threshold>N/A</gpu_temp_max_mem_tlimit_threshold> + </temperature> + <supported_gpu_target_temp> + <gpu_target_temp_min>65 C</gpu_target_temp_min> + <gpu_target_temp_max>88 C</gpu_target_temp_max> + </supported_gpu_target_temp> + <gpu_power_readings> + <power_state>P8</power_state> + <power_draw>26.84 W</power_draw> + <current_power_limit>450.00 W</current_power_limit> + <requested_power_limit>450.00 W</requested_power_limit> + <default_power_limit>450.00 W</default_power_limit> + <min_power_limit>10.00 W</min_power_limit> + <max_power_limit>600.00 W</max_power_limit> + </gpu_power_readings> + <module_power_readings> + <power_state>P8</power_state> + <power_draw>N/A</power_draw> + <current_power_limit>N/A</current_power_limit> + <requested_power_limit>N/A</requested_power_limit> + <default_power_limit>N/A</default_power_limit> + <min_power_limit>N/A</min_power_limit> + <max_power_limit>N/A</max_power_limit> + </module_power_readings> + <clocks> + <graphics_clock>210 MHz</graphics_clock> + <sm_clock>210 MHz</sm_clock> + <mem_clock>405 MHz</mem_clock> + <video_clock>1185 MHz</video_clock> + </clocks> + <applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </applications_clocks> + <default_applications_clocks> + <graphics_clock>N/A</graphics_clock> + <mem_clock>N/A</mem_clock> + </default_applications_clocks> + <deferred_clocks> + <mem_clock>N/A</mem_clock> + </deferred_clocks> + <max_clocks> + <graphics_clock>3120 MHz</graphics_clock> + <sm_clock>3120 MHz</sm_clock> + <mem_clock>10501 MHz</mem_clock> + <video_clock>2415 MHz</video_clock> + </max_clocks> + <max_customer_boost_clocks> + <graphics_clock>N/A</graphics_clock> + </max_customer_boost_clocks> + <clock_policy> + <auto_boost>N/A</auto_boost> + <auto_boost_default>N/A</auto_boost_default> + </clock_policy> + <voltage> + <graphics_volt>880.000 mV</graphics_volt> + </voltage> + <fabric> + <state>N/A</state> + <status>N/A</status> + </fabric> + <supported_clocks> + <supported_mem_clock> + <value>10501 MHz</value> + <supported_graphics_clock>3120 MHz</supported_graphics_clock> + <supported_graphics_clock>3105 MHz</supported_graphics_clock> + <supported_graphics_clock>3090 MHz</supported_graphics_clock> + <supported_graphics_clock>3075 MHz</supported_graphics_clock> + <supported_graphics_clock>3060 MHz</supported_graphics_clock> + <supported_graphics_clock>3045 MHz</supported_graphics_clock> + <supported_graphics_clock>3030 MHz</supported_graphics_clock> + <supported_graphics_clock>3015 MHz</supported_graphics_clock> + <supported_graphics_clock>3000 MHz</supported_graphics_clock> + <supported_graphics_clock>2985 MHz</supported_graphics_clock> + <supported_graphics_clock>2970 MHz</supported_graphics_clock> + <supported_graphics_clock>2955 MHz</supported_graphics_clock> + <supported_graphics_clock>2940 MHz</supported_graphics_clock> + <supported_graphics_clock>2925 MHz</supported_graphics_clock> + <supported_graphics_clock>2910 MHz</supported_graphics_clock> + <supported_graphics_clock>2895 MHz</supported_graphics_clock> + <supported_graphics_clock>2880 MHz</supported_graphics_clock> + <supported_graphics_clock>2865 MHz</supported_graphics_clock> + <supported_graphics_clock>2850 MHz</supported_graphics_clock> + <supported_graphics_clock>2835 MHz</supported_graphics_clock> + <supported_graphics_clock>2820 MHz</supported_graphics_clock> + <supported_graphics_clock>2805 MHz</supported_graphics_clock> + <supported_graphics_clock>2790 MHz</supported_graphics_clock> + <supported_graphics_clock>2775 MHz</supported_graphics_clock> + <supported_graphics_clock>2760 MHz</supported_graphics_clock> + <supported_graphics_clock>2745 MHz</supported_graphics_clock> + <supported_graphics_clock>2730 MHz</supported_graphics_clock> + <supported_graphics_clock>2715 MHz</supported_graphics_clock> + <supported_graphics_clock>2700 MHz</supported_graphics_clock> + <supported_graphics_clock>2685 MHz</supported_graphics_clock> + <supported_graphics_clock>2670 MHz</supported_graphics_clock> + <supported_graphics_clock>2655 MHz</supported_graphics_clock> + <supported_graphics_clock>2640 MHz</supported_graphics_clock> + <supported_graphics_clock>2625 MHz</supported_graphics_clock> + <supported_graphics_clock>2610 MHz</supported_graphics_clock> + <supported_graphics_clock>2595 MHz</supported_graphics_clock> + <supported_graphics_clock>2580 MHz</supported_graphics_clock> + <supported_graphics_clock>2565 MHz</supported_graphics_clock> + <supported_graphics_clock>2550 MHz</supported_graphics_clock> + <supported_graphics_clock>2535 MHz</supported_graphics_clock> + <supported_graphics_clock>2520 MHz</supported_graphics_clock> + <supported_graphics_clock>2505 MHz</supported_graphics_clock> + <supported_graphics_clock>2490 MHz</supported_graphics_clock> + <supported_graphics_clock>2475 MHz</supported_graphics_clock> + <supported_graphics_clock>2460 MHz</supported_graphics_clock> + <supported_graphics_clock>2445 MHz</supported_graphics_clock> + <supported_graphics_clock>2430 MHz</supported_graphics_clock> + <supported_graphics_clock>2415 MHz</supported_graphics_clock> + <supported_graphics_clock>2400 MHz</supported_graphics_clock> + <supported_graphics_clock>2385 MHz</supported_graphics_clock> + <supported_graphics_clock>2370 MHz</supported_graphics_clock> + <supported_graphics_clock>2355 MHz</supported_graphics_clock> + <supported_graphics_clock>2340 MHz</supported_graphics_clock> + <supported_graphics_clock>2325 MHz</supported_graphics_clock> + <supported_graphics_clock>2310 MHz</supported_graphics_clock> + <supported_graphics_clock>2295 MHz</supported_graphics_clock> + <supported_graphics_clock>2280 MHz</supported_graphics_clock> + <supported_graphics_clock>2265 MHz</supported_graphics_clock> + <supported_graphics_clock>2250 MHz</supported_graphics_clock> + <supported_graphics_clock>2235 MHz</supported_graphics_clock> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>10251 MHz</value> + <supported_graphics_clock>3120 MHz</supported_graphics_clock> + <supported_graphics_clock>3105 MHz</supported_graphics_clock> + <supported_graphics_clock>3090 MHz</supported_graphics_clock> + <supported_graphics_clock>3075 MHz</supported_graphics_clock> + <supported_graphics_clock>3060 MHz</supported_graphics_clock> + <supported_graphics_clock>3045 MHz</supported_graphics_clock> + <supported_graphics_clock>3030 MHz</supported_graphics_clock> + <supported_graphics_clock>3015 MHz</supported_graphics_clock> + <supported_graphics_clock>3000 MHz</supported_graphics_clock> + <supported_graphics_clock>2985 MHz</supported_graphics_clock> + <supported_graphics_clock>2970 MHz</supported_graphics_clock> + <supported_graphics_clock>2955 MHz</supported_graphics_clock> + <supported_graphics_clock>2940 MHz</supported_graphics_clock> + <supported_graphics_clock>2925 MHz</supported_graphics_clock> + <supported_graphics_clock>2910 MHz</supported_graphics_clock> + <supported_graphics_clock>2895 MHz</supported_graphics_clock> + <supported_graphics_clock>2880 MHz</supported_graphics_clock> + <supported_graphics_clock>2865 MHz</supported_graphics_clock> + <supported_graphics_clock>2850 MHz</supported_graphics_clock> + <supported_graphics_clock>2835 MHz</supported_graphics_clock> + <supported_graphics_clock>2820 MHz</supported_graphics_clock> + <supported_graphics_clock>2805 MHz</supported_graphics_clock> + <supported_graphics_clock>2790 MHz</supported_graphics_clock> + <supported_graphics_clock>2775 MHz</supported_graphics_clock> + <supported_graphics_clock>2760 MHz</supported_graphics_clock> + <supported_graphics_clock>2745 MHz</supported_graphics_clock> + <supported_graphics_clock>2730 MHz</supported_graphics_clock> + <supported_graphics_clock>2715 MHz</supported_graphics_clock> + <supported_graphics_clock>2700 MHz</supported_graphics_clock> + <supported_graphics_clock>2685 MHz</supported_graphics_clock> + <supported_graphics_clock>2670 MHz</supported_graphics_clock> + <supported_graphics_clock>2655 MHz</supported_graphics_clock> + <supported_graphics_clock>2640 MHz</supported_graphics_clock> + <supported_graphics_clock>2625 MHz</supported_graphics_clock> + <supported_graphics_clock>2610 MHz</supported_graphics_clock> + <supported_graphics_clock>2595 MHz</supported_graphics_clock> + <supported_graphics_clock>2580 MHz</supported_graphics_clock> + <supported_graphics_clock>2565 MHz</supported_graphics_clock> + <supported_graphics_clock>2550 MHz</supported_graphics_clock> + <supported_graphics_clock>2535 MHz</supported_graphics_clock> + <supported_graphics_clock>2520 MHz</supported_graphics_clock> + <supported_graphics_clock>2505 MHz</supported_graphics_clock> + <supported_graphics_clock>2490 MHz</supported_graphics_clock> + <supported_graphics_clock>2475 MHz</supported_graphics_clock> + <supported_graphics_clock>2460 MHz</supported_graphics_clock> + <supported_graphics_clock>2445 MHz</supported_graphics_clock> + <supported_graphics_clock>2430 MHz</supported_graphics_clock> + <supported_graphics_clock>2415 MHz</supported_graphics_clock> + <supported_graphics_clock>2400 MHz</supported_graphics_clock> + <supported_graphics_clock>2385 MHz</supported_graphics_clock> + <supported_graphics_clock>2370 MHz</supported_graphics_clock> + <supported_graphics_clock>2355 MHz</supported_graphics_clock> + <supported_graphics_clock>2340 MHz</supported_graphics_clock> + <supported_graphics_clock>2325 MHz</supported_graphics_clock> + <supported_graphics_clock>2310 MHz</supported_graphics_clock> + <supported_graphics_clock>2295 MHz</supported_graphics_clock> + <supported_graphics_clock>2280 MHz</supported_graphics_clock> + <supported_graphics_clock>2265 MHz</supported_graphics_clock> + <supported_graphics_clock>2250 MHz</supported_graphics_clock> + <supported_graphics_clock>2235 MHz</supported_graphics_clock> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>5001 MHz</value> + <supported_graphics_clock>3120 MHz</supported_graphics_clock> + <supported_graphics_clock>3105 MHz</supported_graphics_clock> + <supported_graphics_clock>3090 MHz</supported_graphics_clock> + <supported_graphics_clock>3075 MHz</supported_graphics_clock> + <supported_graphics_clock>3060 MHz</supported_graphics_clock> + <supported_graphics_clock>3045 MHz</supported_graphics_clock> + <supported_graphics_clock>3030 MHz</supported_graphics_clock> + <supported_graphics_clock>3015 MHz</supported_graphics_clock> + <supported_graphics_clock>3000 MHz</supported_graphics_clock> + <supported_graphics_clock>2985 MHz</supported_graphics_clock> + <supported_graphics_clock>2970 MHz</supported_graphics_clock> + <supported_graphics_clock>2955 MHz</supported_graphics_clock> + <supported_graphics_clock>2940 MHz</supported_graphics_clock> + <supported_graphics_clock>2925 MHz</supported_graphics_clock> + <supported_graphics_clock>2910 MHz</supported_graphics_clock> + <supported_graphics_clock>2895 MHz</supported_graphics_clock> + <supported_graphics_clock>2880 MHz</supported_graphics_clock> + <supported_graphics_clock>2865 MHz</supported_graphics_clock> + <supported_graphics_clock>2850 MHz</supported_graphics_clock> + <supported_graphics_clock>2835 MHz</supported_graphics_clock> + <supported_graphics_clock>2820 MHz</supported_graphics_clock> + <supported_graphics_clock>2805 MHz</supported_graphics_clock> + <supported_graphics_clock>2790 MHz</supported_graphics_clock> + <supported_graphics_clock>2775 MHz</supported_graphics_clock> + <supported_graphics_clock>2760 MHz</supported_graphics_clock> + <supported_graphics_clock>2745 MHz</supported_graphics_clock> + <supported_graphics_clock>2730 MHz</supported_graphics_clock> + <supported_graphics_clock>2715 MHz</supported_graphics_clock> + <supported_graphics_clock>2700 MHz</supported_graphics_clock> + <supported_graphics_clock>2685 MHz</supported_graphics_clock> + <supported_graphics_clock>2670 MHz</supported_graphics_clock> + <supported_graphics_clock>2655 MHz</supported_graphics_clock> + <supported_graphics_clock>2640 MHz</supported_graphics_clock> + <supported_graphics_clock>2625 MHz</supported_graphics_clock> + <supported_graphics_clock>2610 MHz</supported_graphics_clock> + <supported_graphics_clock>2595 MHz</supported_graphics_clock> + <supported_graphics_clock>2580 MHz</supported_graphics_clock> + <supported_graphics_clock>2565 MHz</supported_graphics_clock> + <supported_graphics_clock>2550 MHz</supported_graphics_clock> + <supported_graphics_clock>2535 MHz</supported_graphics_clock> + <supported_graphics_clock>2520 MHz</supported_graphics_clock> + <supported_graphics_clock>2505 MHz</supported_graphics_clock> + <supported_graphics_clock>2490 MHz</supported_graphics_clock> + <supported_graphics_clock>2475 MHz</supported_graphics_clock> + <supported_graphics_clock>2460 MHz</supported_graphics_clock> + <supported_graphics_clock>2445 MHz</supported_graphics_clock> + <supported_graphics_clock>2430 MHz</supported_graphics_clock> + <supported_graphics_clock>2415 MHz</supported_graphics_clock> + <supported_graphics_clock>2400 MHz</supported_graphics_clock> + <supported_graphics_clock>2385 MHz</supported_graphics_clock> + <supported_graphics_clock>2370 MHz</supported_graphics_clock> + <supported_graphics_clock>2355 MHz</supported_graphics_clock> + <supported_graphics_clock>2340 MHz</supported_graphics_clock> + <supported_graphics_clock>2325 MHz</supported_graphics_clock> + <supported_graphics_clock>2310 MHz</supported_graphics_clock> + <supported_graphics_clock>2295 MHz</supported_graphics_clock> + <supported_graphics_clock>2280 MHz</supported_graphics_clock> + <supported_graphics_clock>2265 MHz</supported_graphics_clock> + <supported_graphics_clock>2250 MHz</supported_graphics_clock> + <supported_graphics_clock>2235 MHz</supported_graphics_clock> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>810 MHz</value> + <supported_graphics_clock>3105 MHz</supported_graphics_clock> + <supported_graphics_clock>3090 MHz</supported_graphics_clock> + <supported_graphics_clock>3075 MHz</supported_graphics_clock> + <supported_graphics_clock>3060 MHz</supported_graphics_clock> + <supported_graphics_clock>3045 MHz</supported_graphics_clock> + <supported_graphics_clock>3030 MHz</supported_graphics_clock> + <supported_graphics_clock>3015 MHz</supported_graphics_clock> + <supported_graphics_clock>3000 MHz</supported_graphics_clock> + <supported_graphics_clock>2985 MHz</supported_graphics_clock> + <supported_graphics_clock>2970 MHz</supported_graphics_clock> + <supported_graphics_clock>2955 MHz</supported_graphics_clock> + <supported_graphics_clock>2940 MHz</supported_graphics_clock> + <supported_graphics_clock>2925 MHz</supported_graphics_clock> + <supported_graphics_clock>2910 MHz</supported_graphics_clock> + <supported_graphics_clock>2895 MHz</supported_graphics_clock> + <supported_graphics_clock>2880 MHz</supported_graphics_clock> + <supported_graphics_clock>2865 MHz</supported_graphics_clock> + <supported_graphics_clock>2850 MHz</supported_graphics_clock> + <supported_graphics_clock>2835 MHz</supported_graphics_clock> + <supported_graphics_clock>2820 MHz</supported_graphics_clock> + <supported_graphics_clock>2805 MHz</supported_graphics_clock> + <supported_graphics_clock>2790 MHz</supported_graphics_clock> + <supported_graphics_clock>2775 MHz</supported_graphics_clock> + <supported_graphics_clock>2760 MHz</supported_graphics_clock> + <supported_graphics_clock>2745 MHz</supported_graphics_clock> + <supported_graphics_clock>2730 MHz</supported_graphics_clock> + <supported_graphics_clock>2715 MHz</supported_graphics_clock> + <supported_graphics_clock>2700 MHz</supported_graphics_clock> + <supported_graphics_clock>2685 MHz</supported_graphics_clock> + <supported_graphics_clock>2670 MHz</supported_graphics_clock> + <supported_graphics_clock>2655 MHz</supported_graphics_clock> + <supported_graphics_clock>2640 MHz</supported_graphics_clock> + <supported_graphics_clock>2625 MHz</supported_graphics_clock> + <supported_graphics_clock>2610 MHz</supported_graphics_clock> + <supported_graphics_clock>2595 MHz</supported_graphics_clock> + <supported_graphics_clock>2580 MHz</supported_graphics_clock> + <supported_graphics_clock>2565 MHz</supported_graphics_clock> + <supported_graphics_clock>2550 MHz</supported_graphics_clock> + <supported_graphics_clock>2535 MHz</supported_graphics_clock> + <supported_graphics_clock>2520 MHz</supported_graphics_clock> + <supported_graphics_clock>2505 MHz</supported_graphics_clock> + <supported_graphics_clock>2490 MHz</supported_graphics_clock> + <supported_graphics_clock>2475 MHz</supported_graphics_clock> + <supported_graphics_clock>2460 MHz</supported_graphics_clock> + <supported_graphics_clock>2445 MHz</supported_graphics_clock> + <supported_graphics_clock>2430 MHz</supported_graphics_clock> + <supported_graphics_clock>2415 MHz</supported_graphics_clock> + <supported_graphics_clock>2400 MHz</supported_graphics_clock> + <supported_graphics_clock>2385 MHz</supported_graphics_clock> + <supported_graphics_clock>2370 MHz</supported_graphics_clock> + <supported_graphics_clock>2355 MHz</supported_graphics_clock> + <supported_graphics_clock>2340 MHz</supported_graphics_clock> + <supported_graphics_clock>2325 MHz</supported_graphics_clock> + <supported_graphics_clock>2310 MHz</supported_graphics_clock> + <supported_graphics_clock>2295 MHz</supported_graphics_clock> + <supported_graphics_clock>2280 MHz</supported_graphics_clock> + <supported_graphics_clock>2265 MHz</supported_graphics_clock> + <supported_graphics_clock>2250 MHz</supported_graphics_clock> + <supported_graphics_clock>2235 MHz</supported_graphics_clock> + <supported_graphics_clock>2220 MHz</supported_graphics_clock> + <supported_graphics_clock>2205 MHz</supported_graphics_clock> + <supported_graphics_clock>2190 MHz</supported_graphics_clock> + <supported_graphics_clock>2175 MHz</supported_graphics_clock> + <supported_graphics_clock>2160 MHz</supported_graphics_clock> + <supported_graphics_clock>2145 MHz</supported_graphics_clock> + <supported_graphics_clock>2130 MHz</supported_graphics_clock> + <supported_graphics_clock>2115 MHz</supported_graphics_clock> + <supported_graphics_clock>2100 MHz</supported_graphics_clock> + <supported_graphics_clock>2085 MHz</supported_graphics_clock> + <supported_graphics_clock>2070 MHz</supported_graphics_clock> + <supported_graphics_clock>2055 MHz</supported_graphics_clock> + <supported_graphics_clock>2040 MHz</supported_graphics_clock> + <supported_graphics_clock>2025 MHz</supported_graphics_clock> + <supported_graphics_clock>2010 MHz</supported_graphics_clock> + <supported_graphics_clock>1995 MHz</supported_graphics_clock> + <supported_graphics_clock>1980 MHz</supported_graphics_clock> + <supported_graphics_clock>1965 MHz</supported_graphics_clock> + <supported_graphics_clock>1950 MHz</supported_graphics_clock> + <supported_graphics_clock>1935 MHz</supported_graphics_clock> + <supported_graphics_clock>1920 MHz</supported_graphics_clock> + <supported_graphics_clock>1905 MHz</supported_graphics_clock> + <supported_graphics_clock>1890 MHz</supported_graphics_clock> + <supported_graphics_clock>1875 MHz</supported_graphics_clock> + <supported_graphics_clock>1860 MHz</supported_graphics_clock> + <supported_graphics_clock>1845 MHz</supported_graphics_clock> + <supported_graphics_clock>1830 MHz</supported_graphics_clock> + <supported_graphics_clock>1815 MHz</supported_graphics_clock> + <supported_graphics_clock>1800 MHz</supported_graphics_clock> + <supported_graphics_clock>1785 MHz</supported_graphics_clock> + <supported_graphics_clock>1770 MHz</supported_graphics_clock> + <supported_graphics_clock>1755 MHz</supported_graphics_clock> + <supported_graphics_clock>1740 MHz</supported_graphics_clock> + <supported_graphics_clock>1725 MHz</supported_graphics_clock> + <supported_graphics_clock>1710 MHz</supported_graphics_clock> + <supported_graphics_clock>1695 MHz</supported_graphics_clock> + <supported_graphics_clock>1680 MHz</supported_graphics_clock> + <supported_graphics_clock>1665 MHz</supported_graphics_clock> + <supported_graphics_clock>1650 MHz</supported_graphics_clock> + <supported_graphics_clock>1635 MHz</supported_graphics_clock> + <supported_graphics_clock>1620 MHz</supported_graphics_clock> + <supported_graphics_clock>1605 MHz</supported_graphics_clock> + <supported_graphics_clock>1590 MHz</supported_graphics_clock> + <supported_graphics_clock>1575 MHz</supported_graphics_clock> + <supported_graphics_clock>1560 MHz</supported_graphics_clock> + <supported_graphics_clock>1545 MHz</supported_graphics_clock> + <supported_graphics_clock>1530 MHz</supported_graphics_clock> + <supported_graphics_clock>1515 MHz</supported_graphics_clock> + <supported_graphics_clock>1500 MHz</supported_graphics_clock> + <supported_graphics_clock>1485 MHz</supported_graphics_clock> + <supported_graphics_clock>1470 MHz</supported_graphics_clock> + <supported_graphics_clock>1455 MHz</supported_graphics_clock> + <supported_graphics_clock>1440 MHz</supported_graphics_clock> + <supported_graphics_clock>1425 MHz</supported_graphics_clock> + <supported_graphics_clock>1410 MHz</supported_graphics_clock> + <supported_graphics_clock>1395 MHz</supported_graphics_clock> + <supported_graphics_clock>1380 MHz</supported_graphics_clock> + <supported_graphics_clock>1365 MHz</supported_graphics_clock> + <supported_graphics_clock>1350 MHz</supported_graphics_clock> + <supported_graphics_clock>1335 MHz</supported_graphics_clock> + <supported_graphics_clock>1320 MHz</supported_graphics_clock> + <supported_graphics_clock>1305 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1275 MHz</supported_graphics_clock> + <supported_graphics_clock>1260 MHz</supported_graphics_clock> + <supported_graphics_clock>1245 MHz</supported_graphics_clock> + <supported_graphics_clock>1230 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1200 MHz</supported_graphics_clock> + <supported_graphics_clock>1185 MHz</supported_graphics_clock> + <supported_graphics_clock>1170 MHz</supported_graphics_clock> + <supported_graphics_clock>1155 MHz</supported_graphics_clock> + <supported_graphics_clock>1140 MHz</supported_graphics_clock> + <supported_graphics_clock>1125 MHz</supported_graphics_clock> + <supported_graphics_clock>1110 MHz</supported_graphics_clock> + <supported_graphics_clock>1095 MHz</supported_graphics_clock> + <supported_graphics_clock>1080 MHz</supported_graphics_clock> + <supported_graphics_clock>1065 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1035 MHz</supported_graphics_clock> + <supported_graphics_clock>1020 MHz</supported_graphics_clock> + <supported_graphics_clock>1005 MHz</supported_graphics_clock> + <supported_graphics_clock>990 MHz</supported_graphics_clock> + <supported_graphics_clock>975 MHz</supported_graphics_clock> + <supported_graphics_clock>960 MHz</supported_graphics_clock> + <supported_graphics_clock>945 MHz</supported_graphics_clock> + <supported_graphics_clock>930 MHz</supported_graphics_clock> + <supported_graphics_clock>915 MHz</supported_graphics_clock> + <supported_graphics_clock>900 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>870 MHz</supported_graphics_clock> + <supported_graphics_clock>855 MHz</supported_graphics_clock> + <supported_graphics_clock>840 MHz</supported_graphics_clock> + <supported_graphics_clock>825 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>795 MHz</supported_graphics_clock> + <supported_graphics_clock>780 MHz</supported_graphics_clock> + <supported_graphics_clock>765 MHz</supported_graphics_clock> + <supported_graphics_clock>750 MHz</supported_graphics_clock> + <supported_graphics_clock>735 MHz</supported_graphics_clock> + <supported_graphics_clock>720 MHz</supported_graphics_clock> + <supported_graphics_clock>705 MHz</supported_graphics_clock> + <supported_graphics_clock>690 MHz</supported_graphics_clock> + <supported_graphics_clock>675 MHz</supported_graphics_clock> + <supported_graphics_clock>660 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + <supported_graphics_clock>210 MHz</supported_graphics_clock> + </supported_mem_clock> + <supported_mem_clock> + <value>405 MHz</value> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>630 MHz</supported_graphics_clock> + <supported_graphics_clock>615 MHz</supported_graphics_clock> + <supported_graphics_clock>600 MHz</supported_graphics_clock> + <supported_graphics_clock>585 MHz</supported_graphics_clock> + <supported_graphics_clock>570 MHz</supported_graphics_clock> + <supported_graphics_clock>555 MHz</supported_graphics_clock> + <supported_graphics_clock>540 MHz</supported_graphics_clock> + <supported_graphics_clock>525 MHz</supported_graphics_clock> + <supported_graphics_clock>510 MHz</supported_graphics_clock> + <supported_graphics_clock>495 MHz</supported_graphics_clock> + <supported_graphics_clock>480 MHz</supported_graphics_clock> + <supported_graphics_clock>465 MHz</supported_graphics_clock> + <supported_graphics_clock>450 MHz</supported_graphics_clock> + <supported_graphics_clock>435 MHz</supported_graphics_clock> + <supported_graphics_clock>420 MHz</supported_graphics_clock> + <supported_graphics_clock>405 MHz</supported_graphics_clock> + <supported_graphics_clock>390 MHz</supported_graphics_clock> + <supported_graphics_clock>375 MHz</supported_graphics_clock> + <supported_graphics_clock>360 MHz</supported_graphics_clock> + <supported_graphics_clock>345 MHz</supported_graphics_clock> + <supported_graphics_clock>330 MHz</supported_graphics_clock> + <supported_graphics_clock>315 MHz</supported_graphics_clock> + <supported_graphics_clock>300 MHz</supported_graphics_clock> + <supported_graphics_clock>285 MHz</supported_graphics_clock> + <supported_graphics_clock>270 MHz</supported_graphics_clock> + <supported_graphics_clock>255 MHz</supported_graphics_clock> + <supported_graphics_clock>240 MHz</supported_graphics_clock> + <supported_graphics_clock>225 MHz</supported_graphics_clock> + <supported_graphics_clock>210 MHz</supported_graphics_clock> + </supported_mem_clock> + </supported_clocks> + <processes> + </processes> + <accounted_processes> + </accounted_processes> + </gpu> + +</nvidia_smi_log> diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.csv b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.csv new file mode 100644 index 00000000000000..9a4c1e1a9dcd7d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.csv @@ -0,0 +1,2 @@ +name, uuid, fan.speed [%], pstate, memory.reserved [MiB], memory.used [MiB], memory.free [MiB], utilization.gpu [%], utilization.memory [%], temperature.gpu, power.draw [W], clocks.current.graphics [MHz], clocks.current.sm [MHz], clocks.current.memory [MHz], clocks.current.video [MHz] +Tesla P100-PCIE-16GB, GPU-ef1b2c9b-38d8-2090-2bd1-f567a3eb42a6, [N/A], P0, 103, 0, 16280, 0, 0, 37, 28.16, 405, 405, 715, 835 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.xml b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.xml new file mode 100644 index 00000000000000..4c43125f9f0c1f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvidia_smi/testdata/tesla-p100.xml @@ -0,0 +1,313 @@ +<?xml version="1.0" ?> +<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd"> +<nvidia_smi_log> + <timestamp>Sat Sep 17 17:06:50 2022</timestamp> + <driver_version>510.47.03</driver_version> + <cuda_version>11.6</cuda_version> + <attached_gpus>1</attached_gpus> + <gpu id="00000000:00:04.0"> + <product_name>Tesla P100-PCIE-16GB</product_name> + <product_brand>Tesla</product_brand> + <product_architecture>Pascal</product_architecture> + <display_mode>Enabled</display_mode> + <display_active>Disabled</display_active> + <persistence_mode>Disabled</persistence_mode> + <mig_mode> + <current_mig>N/A</current_mig> + <pending_mig>N/A</pending_mig> + </mig_mode> + <mig_devices> + None + </mig_devices> + <accounting_mode>Disabled</accounting_mode> + <accounting_mode_buffer_size>4000</accounting_mode_buffer_size> + <driver_model> + <current_dm>N/A</current_dm> + <pending_dm>N/A</pending_dm> + </driver_model> + <serial>0324217145110</serial> + <uuid>GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e</uuid> + <minor_number>0</minor_number> + <vbios_version>86.00.52.00.02</vbios_version> + <multigpu_board>No</multigpu_board> + <board_id>0x4</board_id> + <gpu_part_number>900-2H400-6300-031</gpu_part_number> + <gpu_module_id>0</gpu_module_id> + <inforom_version> + <img_version>H400.0201.00.08</img_version> + <oem_object>1.1</oem_object> + <ecc_object>4.1</ecc_object> + <pwr_object>N/A</pwr_object> + </inforom_version> + <gpu_operation_mode> + <current_gom>N/A</current_gom> + <pending_gom>N/A</pending_gom> + </gpu_operation_mode> + <gsp_firmware_version>N/A</gsp_firmware_version> + <gpu_virtualization_mode> + <virtualization_mode>Pass-Through</virtualization_mode> + <host_vgpu_mode>N/A</host_vgpu_mode> + </gpu_virtualization_mode> + <ibmnpu> + <relaxed_ordering_mode>N/A</relaxed_ordering_mode> + </ibmnpu> + <pci> + <pci_bus>00</pci_bus> + <pci_device>04</pci_device> + <pci_domain>0000</pci_domain> + <pci_device_id>15F810DE</pci_device_id> + <pci_bus_id>00000000:00:04.0</pci_bus_id> + <pci_sub_system_id>118F10DE</pci_sub_system_id> + <pci_gpu_link_info> + <pcie_gen> + <max_link_gen>3</max_link_gen> + <current_link_gen>3</current_link_gen> + </pcie_gen> + <link_widths> + <max_link_width>16x</max_link_width> + <current_link_width>16x</current_link_width> + </link_widths> + </pci_gpu_link_info> + <pci_bridge_chip> + <bridge_chip_type>N/A</bridge_chip_type> + <bridge_chip_fw>N/A</bridge_chip_fw> + </pci_bridge_chip> + <replay_counter>0</replay_counter> + <replay_rollover_counter>0</replay_rollover_counter> + <tx_util>0 KB/s</tx_util> + <rx_util>0 KB/s</rx_util> + </pci> + <fan_speed>N/A</fan_speed> + <performance_state>P0</performance_state> + <clocks_throttle_reasons> + <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle> + <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting> + <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap> + <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown> + <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown> + <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown> + <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost> + <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown> + <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting> + </clocks_throttle_reasons> + <fb_memory_usage> + <total>16384 MiB</total> + <reserved>103 MiB</reserved> + <used>0 MiB</used> + <free>16280 MiB</free> + </fb_memory_usage> + <bar1_memory_usage> + <total>16384 MiB</total> + <used>2 MiB</used> + <free>16382 MiB</free> + </bar1_memory_usage> + <compute_mode>Default</compute_mode> + <utilization> + <gpu_util>0 %</gpu_util> + <memory_util>0 %</memory_util> + <encoder_util>0 %</encoder_util> + <decoder_util>0 %</decoder_util> + </utilization> + <encoder_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </encoder_stats> + <fbc_stats> + <session_count>0</session_count> + <average_fps>0</average_fps> + <average_latency>0</average_latency> + </fbc_stats> + <ecc_mode> + <current_ecc>Enabled</current_ecc> + <pending_ecc>Enabled</pending_ecc> + </ecc_mode> + <ecc_errors> + <volatile> + <single_bit> + <device_memory>0</device_memory> + <register_file>0</register_file> + <l1_cache>N/A</l1_cache> + <l2_cache>0</l2_cache> + <texture_memory>0</texture_memory> + <texture_shm>0</texture_shm> + <cbu>N/A</cbu> + <total>0</total> + </single_bit> + <double_bit> + <device_memory>0</device_memory> + <register_file>0</register_file> + <l1_cache>N/A</l1_cache> + <l2_cache>0</l2_cache> + <texture_memory>0</texture_memory> + <texture_shm>0</texture_shm> + <cbu>N/A</cbu> + <total>0</total> + </double_bit> + </volatile> + <aggregate> + <single_bit> + <device_memory>3</device_memory> + <register_file>0</register_file> + <l1_cache>N/A</l1_cache> + <l2_cache>0</l2_cache> + <texture_memory>0</texture_memory> + <texture_shm>0</texture_shm> + <cbu>N/A</cbu> + <total>3</total> + </single_bit> + <double_bit> + <device_memory>0</device_memory> + <register_file>0</register_file> + <l1_cache>N/A</l1_cache> + <l2_cache>0</l2_cache> + <texture_memory>0</texture_memory> + <texture_shm>0</texture_shm> + <cbu>N/A</cbu> + <total>0</total> + </double_bit> + </aggregate> + </ecc_errors> + <retired_pages> + <multiple_single_bit_retirement> + <retired_count>0</retired_count> + <retired_pagelist> + </retired_pagelist> + </multiple_single_bit_retirement> + <double_bit_retirement> + <retired_count>0</retired_count> + <retired_pagelist> + </retired_pagelist> + </double_bit_retirement> + <pending_blacklist>No</pending_blacklist> + <pending_retirement>No</pending_retirement> + </retired_pages> + <remapped_rows>N/A</remapped_rows> + <temperature> + <gpu_temp>38 C</gpu_temp> + <gpu_temp_max_threshold>85 C</gpu_temp_max_threshold> + <gpu_temp_slow_threshold>82 C</gpu_temp_slow_threshold> + <gpu_temp_max_gpu_threshold>N/A</gpu_temp_max_gpu_threshold> + <gpu_target_temperature>N/A</gpu_target_temperature> + <memory_temp>N/A</memory_temp> + <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold> + </temperature> + <supported_gpu_target_temp> + <gpu_target_temp_min>N/A</gpu_target_temp_min> + <gpu_target_temp_max>N/A</gpu_target_temp_max> + </supported_gpu_target_temp> + <power_readings> + <power_state>P0</power_state> + <power_management>Supported</power_management> + <power_draw>26.16 W</power_draw> + <power_limit>250.00 W</power_limit> + <default_power_limit>250.00 W</default_power_limit> + <enforced_power_limit>250.00 W</enforced_power_limit> + <min_power_limit>125.00 W</min_power_limit> + <max_power_limit>250.00 W</max_power_limit> + </power_readings> + <clocks> + <graphics_clock>405 MHz</graphics_clock> + <sm_clock>405 MHz</sm_clock> + <mem_clock>715 MHz</mem_clock> + <video_clock>835 MHz</video_clock> + </clocks> + <applications_clocks> + <graphics_clock>1189 MHz</graphics_clock> + <mem_clock>715 MHz</mem_clock> + </applications_clocks> + <default_applications_clocks> + <graphics_clock>1189 MHz</graphics_clock> + <mem_clock>715 MHz</mem_clock> + </default_applications_clocks> + <max_clocks> + <graphics_clock>1328 MHz</graphics_clock> + <sm_clock>1328 MHz</sm_clock> + <mem_clock>715 MHz</mem_clock> + <video_clock>1328 MHz</video_clock> + </max_clocks> + <max_customer_boost_clocks> + <graphics_clock>1328 MHz</graphics_clock> + </max_customer_boost_clocks> + <clock_policy> + <auto_boost>N/A</auto_boost> + <auto_boost_default>N/A</auto_boost_default> + </clock_policy> + <voltage> + <graphics_volt>N/A</graphics_volt> + </voltage> + <supported_clocks> + <supported_mem_clock> + <value>715 MHz</value> + <supported_graphics_clock>1328 MHz</supported_graphics_clock> + <supported_graphics_clock>1316 MHz</supported_graphics_clock> + <supported_graphics_clock>1303 MHz</supported_graphics_clock> + <supported_graphics_clock>1290 MHz</supported_graphics_clock> + <supported_graphics_clock>1278 MHz</supported_graphics_clock> + <supported_graphics_clock>1265 MHz</supported_graphics_clock> + <supported_graphics_clock>1252 MHz</supported_graphics_clock> + <supported_graphics_clock>1240 MHz</supported_graphics_clock> + <supported_graphics_clock>1227 MHz</supported_graphics_clock> + <supported_graphics_clock>1215 MHz</supported_graphics_clock> + <supported_graphics_clock>1202 MHz</supported_graphics_clock> + <supported_graphics_clock>1189 MHz</supported_graphics_clock> + <supported_graphics_clock>1177 MHz</supported_graphics_clock> + <supported_graphics_clock>1164 MHz</supported_graphics_clock> + <supported_graphics_clock>1151 MHz</supported_graphics_clock> + <supported_graphics_clock>1139 MHz</supported_graphics_clock> + <supported_graphics_clock>1126 MHz</supported_graphics_clock> + <supported_graphics_clock>1113 MHz</supported_graphics_clock> + <supported_graphics_clock>1101 MHz</supported_graphics_clock> + <supported_graphics_clock>1088 MHz</supported_graphics_clock> + <supported_graphics_clock>1075 MHz</supported_graphics_clock> + <supported_graphics_clock>1063 MHz</supported_graphics_clock> + <supported_graphics_clock>1050 MHz</supported_graphics_clock> + <supported_graphics_clock>1037 MHz</supported_graphics_clock> + <supported_graphics_clock>1025 MHz</supported_graphics_clock> + <supported_graphics_clock>1012 MHz</supported_graphics_clock> + <supported_graphics_clock>999 MHz</supported_graphics_clock> + <supported_graphics_clock>987 MHz</supported_graphics_clock> + <supported_graphics_clock>974 MHz</supported_graphics_clock> + <supported_graphics_clock>961 MHz</supported_graphics_clock> + <supported_graphics_clock>949 MHz</supported_graphics_clock> + <supported_graphics_clock>936 MHz</supported_graphics_clock> + <supported_graphics_clock>923 MHz</supported_graphics_clock> + <supported_graphics_clock>911 MHz</supported_graphics_clock> + <supported_graphics_clock>898 MHz</supported_graphics_clock> + <supported_graphics_clock>885 MHz</supported_graphics_clock> + <supported_graphics_clock>873 MHz</supported_graphics_clock> + <supported_graphics_clock>860 MHz</supported_graphics_clock> + <supported_graphics_clock>847 MHz</supported_graphics_clock> + <supported_graphics_clock>835 MHz</supported_graphics_clock> + <supported_graphics_clock>822 MHz</supported_graphics_clock> + <supported_graphics_clock>810 MHz</supported_graphics_clock> + <supported_graphics_clock>797 MHz</supported_graphics_clock> + <supported_graphics_clock>784 MHz</supported_graphics_clock> + <supported_graphics_clock>772 MHz</supported_graphics_clock> + <supported_graphics_clock>759 MHz</supported_graphics_clock> + <supported_graphics_clock>746 MHz</supported_graphics_clock> + <supported_graphics_clock>734 MHz</supported_graphics_clock> + <supported_graphics_clock>721 MHz</supported_graphics_clock> + <supported_graphics_clock>708 MHz</supported_graphics_clock> + <supported_graphics_clock>696 MHz</supported_graphics_clock> + <supported_graphics_clock>683 MHz</supported_graphics_clock> + <supported_graphics_clock>670 MHz</supported_graphics_clock> + <supported_graphics_clock>658 MHz</supported_graphics_clock> + <supported_graphics_clock>645 MHz</supported_graphics_clock> + <supported_graphics_clock>632 MHz</supported_graphics_clock> + <supported_graphics_clock>620 MHz</supported_graphics_clock> + <supported_graphics_clock>607 MHz</supported_graphics_clock> + <supported_graphics_clock>594 MHz</supported_graphics_clock> + <supported_graphics_clock>582 MHz</supported_graphics_clock> + <supported_graphics_clock>569 MHz</supported_graphics_clock> + <supported_graphics_clock>556 MHz</supported_graphics_clock> + <supported_graphics_clock>544 MHz</supported_graphics_clock> + </supported_mem_clock> + </supported_clocks> + <processes> + </processes> + <accounted_processes> + </accounted_processes> + </gpu> + +</nvidia_smi_log> \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvme/README.md b/src/go/collectors/go.d.plugin/modules/nvme/README.md new file mode 120000 index 00000000000000..ca657b905e47c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/README.md @@ -0,0 +1 @@ +integrations/nvme_devices.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvme/charts.go b/src/go/collectors/go.d.plugin/modules/nvme/charts.go new file mode 100644 index 00000000000000..9218c27a19f10d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/charts.go @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + _ = 2050 + iota // right after Disks section + prioDeviceEstimatedEndurancePerc + prioDeviceAvailableSparePerc + prioDeviceCompositeTemperature + prioDeviceIOTransferredCount + prioDevicePowerCyclesCount + prioDevicePowerOnTime + prioDeviceUnsafeShutdownsCount + prioDeviceCriticalWarningsState + prioDeviceMediaErrorsRate + prioDeviceErrorLogEntriesRate + prioDeviceWarningCompositeTemperatureTime + prioDeviceCriticalCompositeTemperatureTime + prioDeviceThmTemp1TransitionsCount + prioDeviceThmTemp2TransitionsRate + prioDeviceThmTemp1Time + prioDeviceThmTemp2Time +) + +var deviceChartsTmpl = module.Charts{ + deviceEstimatedEndurancePercChartTmpl.Copy(), + deviceAvailableSparePercChartTmpl.Copy(), + deviceCompositeTemperatureChartTmpl.Copy(), + deviceIOTransferredCountChartTmpl.Copy(), + devicePowerCyclesCountChartTmpl.Copy(), + devicePowerOnTimeChartTmpl.Copy(), + deviceUnsafeShutdownsCountChartTmpl.Copy(), + deviceCriticalWarningsStateChartTmpl.Copy(), + deviceMediaErrorsRateChartTmpl.Copy(), + deviceErrorLogEntriesRateChartTmpl.Copy(), + deviceWarnCompositeTemperatureTimeChartTmpl.Copy(), + deviceCritCompositeTemperatureTimeChartTmpl.Copy(), + deviceThmTemp1TransitionsRateChartTmpl.Copy(), + deviceThmTemp2TransitionsRateChartTmpl.Copy(), + deviceThmTemp1TimeChartTmpl.Copy(), + deviceThmTemp2TimeChartTmpl.Copy(), +} + +var deviceEstimatedEndurancePercChartTmpl = module.Chart{ + ID: "device_%s_estimated_endurance_perc", + Title: "Estimated endurance", + Units: "percentage", + Fam: "endurance", + Ctx: "nvme.device_estimated_endurance_perc", + Priority: prioDeviceEstimatedEndurancePerc, + Dims: module.Dims{ + {ID: "device_%s_percentage_used", Name: "used"}, + }, +} +var deviceAvailableSparePercChartTmpl = module.Chart{ + ID: "device_%s_available_spare_perc", + Title: "Remaining spare capacity", + Units: "percentage", + Fam: "spare", + Ctx: "nvme.device_available_spare_perc", + Priority: prioDeviceAvailableSparePerc, + Dims: module.Dims{ + {ID: "device_%s_available_spare", Name: "spare"}, + }, +} +var deviceCompositeTemperatureChartTmpl = module.Chart{ + ID: "device_%s_temperature", + Title: "Composite temperature", + Units: "celsius", + Fam: "temperature", + Ctx: "nvme.device_composite_temperature", + Priority: prioDeviceCompositeTemperature, + Dims: module.Dims{ + {ID: "device_%s_temperature", Name: "temperature"}, + }, +} +var deviceIOTransferredCountChartTmpl = module.Chart{ + ID: "device_%s_io_transferred_count", + Title: "Amount of data transferred to and from device", + Units: "bytes", + Fam: "transferred data", + Ctx: "nvme.device_io_transferred_count", + Priority: prioDeviceIOTransferredCount, + Type: module.Area, + Dims: module.Dims{ + {ID: "device_%s_data_units_read", Name: "read"}, + {ID: "device_%s_data_units_written", Name: "written", Mul: -1}, + }, +} + +var devicePowerCyclesCountChartTmpl = module.Chart{ + ID: "device_%s_power_cycles_count", + Title: "Power cycles", + Units: "cycles", + Fam: "power cycles", + Ctx: "nvme.device_power_cycles_count", + Priority: prioDevicePowerCyclesCount, + Dims: module.Dims{ + {ID: "device_%s_power_cycles", Name: "power"}, + }, +} +var devicePowerOnTimeChartTmpl = module.Chart{ + ID: "device_%s_power_on_time", + Title: "Power-on time", + Units: "seconds", + Fam: "power-on time", + Ctx: "nvme.device_power_on_time", + Priority: prioDevicePowerOnTime, + Dims: module.Dims{ + {ID: "device_%s_power_on_time", Name: "power-on"}, + }, +} +var deviceCriticalWarningsStateChartTmpl = module.Chart{ + ID: "device_%s_critical_warnings_state", + Title: "Critical warnings state", + Units: "state", + Fam: "critical warnings", + Ctx: "nvme.device_critical_warnings_state", + Priority: prioDeviceCriticalWarningsState, + Dims: module.Dims{ + {ID: "device_%s_critical_warning_available_spare", Name: "available_spare"}, + {ID: "device_%s_critical_warning_temp_threshold", Name: "temp_threshold"}, + {ID: "device_%s_critical_warning_nvm_subsystem_reliability", Name: "nvm_subsystem_reliability"}, + {ID: "device_%s_critical_warning_read_only", Name: "read_only"}, + {ID: "device_%s_critical_warning_volatile_mem_backup_failed", Name: "volatile_mem_backup_failed"}, + {ID: "device_%s_critical_warning_persistent_memory_read_only", Name: "persistent_memory_read_only"}, + }, +} +var deviceUnsafeShutdownsCountChartTmpl = module.Chart{ + ID: "device_%s_unsafe_shutdowns_count", + Title: "Unsafe shutdowns", + Units: "shutdowns", + Fam: "shutdowns", + Ctx: "nvme.device_unsafe_shutdowns_count", + Priority: prioDeviceUnsafeShutdownsCount, + Dims: module.Dims{ + {ID: "device_%s_unsafe_shutdowns", Name: "unsafe"}, + }, +} +var deviceMediaErrorsRateChartTmpl = module.Chart{ + ID: "device_%s_media_errors_rate", + Title: "Media and data integrity errors", + Units: "errors/s", + Fam: "media errors", + Ctx: "nvme.device_media_errors_rate", + Priority: prioDeviceMediaErrorsRate, + Dims: module.Dims{ + {ID: "device_%s_media_errors", Name: "media", Algo: module.Incremental}, + }, +} +var deviceErrorLogEntriesRateChartTmpl = module.Chart{ + ID: "device_%s_error_log_entries_rate", + Title: "Error log entries", + Units: "entries/s", + Fam: "error log", + Ctx: "nvme.device_error_log_entries_rate", + Priority: prioDeviceErrorLogEntriesRate, + Dims: module.Dims{ + {ID: "device_%s_num_err_log_entries", Name: "error_log", Algo: module.Incremental}, + }, +} +var deviceWarnCompositeTemperatureTimeChartTmpl = module.Chart{ + ID: "device_%s_warning_composite_temperature_time", + Title: "Warning composite temperature time", + Units: "seconds", + Fam: "warn temp time", + Ctx: "nvme.device_warning_composite_temperature_time", + Priority: prioDeviceWarningCompositeTemperatureTime, + Dims: module.Dims{ + {ID: "device_%s_warning_temp_time", Name: "wctemp"}, + }, +} +var deviceCritCompositeTemperatureTimeChartTmpl = module.Chart{ + ID: "device_%s_critical_composite_temperature_time", + Title: "Critical composite temperature time", + Units: "seconds", + Fam: "crit temp time", + Ctx: "nvme.device_critical_composite_temperature_time", + Priority: prioDeviceCriticalCompositeTemperatureTime, + Dims: module.Dims{ + {ID: "device_%s_critical_comp_time", Name: "cctemp"}, + }, +} +var ( + deviceThmTemp1TransitionsRateChartTmpl = module.Chart{ + ID: "device_%s_thm_temp1_transitions_rate", + Title: "Thermal management temp1 transitions", + Units: "transitions/s", + Fam: "thermal mgmt transitions", + Ctx: "nvme.device_thermal_mgmt_temp1_transitions_rate", + Priority: prioDeviceThmTemp1TransitionsCount, + Dims: module.Dims{ + {ID: "device_%s_thm_temp1_trans_count", Name: "temp1", Algo: module.Incremental}, + }, + } + deviceThmTemp2TransitionsRateChartTmpl = module.Chart{ + ID: "device_%s_thm_temp2_transitions_rate", + Title: "Thermal management temp2 transitions", + Units: "transitions/s", + Fam: "thermal mgmt transitions", + Ctx: "nvme.device_thermal_mgmt_temp2_transitions_rate", + Priority: prioDeviceThmTemp2TransitionsRate, + Dims: module.Dims{ + {ID: "device_%s_thm_temp2_trans_count", Name: "temp2", Algo: module.Incremental}, + }, + } +) +var ( + deviceThmTemp1TimeChartTmpl = module.Chart{ + ID: "device_%s_thm_temp1_time", + Title: "Thermal management temp1 time", + Units: "seconds", + Fam: "thermal mgmt time", + Ctx: "nvme.device_thermal_mgmt_temp1_time", + Priority: prioDeviceThmTemp1Time, + Dims: module.Dims{ + {ID: "device_%s_thm_temp1_total_time", Name: "temp1"}, + }, + } + deviceThmTemp2TimeChartTmpl = module.Chart{ + ID: "device_%s_thm_temp2_time", + Title: "Thermal management temp1 time", + Units: "seconds", + Fam: "thermal mgmt time", + Ctx: "nvme.device_thermal_mgmt_temp2_time", + Priority: prioDeviceThmTemp2Time, + Dims: module.Dims{ + {ID: "device_%s_thm_temp2_total_time", Name: "temp2"}, + }, + } +) + +func (n *NVMe) addDeviceCharts(device string) { + charts := deviceChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, device) + chart.Labels = []module.Label{ + {Key: "device", Value: device}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, device) + } + } + + if err := n.Charts().Add(*charts...); err != nil { + n.Warning(err) + } +} + +func (n *NVMe) removeDeviceCharts(device string) { + px := fmt.Sprintf("device_%s", device) + + for _, chart := range *n.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/collect.go b/src/go/collectors/go.d.plugin/modules/nvme/collect.go new file mode 100644 index 00000000000000..1cc94239597bd1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/collect.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + "errors" + "fmt" + "path/filepath" + "strconv" + "time" +) + +func (n *NVMe) collect() (map[string]int64, error) { + if n.exec == nil { + return nil, errors.New("nvme-cli is not initialized (nil)") + } + + now := time.Now() + if n.forceListDevices || now.Sub(n.listDevicesTime) > n.listDevicesEvery { + n.forceListDevices = false + n.listDevicesTime = now + if err := n.listNVMeDevices(); err != nil { + return nil, err + } + } + + mx := make(map[string]int64) + + for path := range n.devicePaths { + if err := n.collectNVMeDevice(mx, path); err != nil { + n.Error(err) + n.forceListDevices = true + continue + } + } + + return mx, nil +} + +func (n *NVMe) collectNVMeDevice(mx map[string]int64, devicePath string) error { + stats, err := n.exec.smartLog(devicePath) + if err != nil { + return fmt.Errorf("exec nvme smart-log for '%s': %v", devicePath, err) + } + + device := extractDeviceFromPath(devicePath) + + mx["device_"+device+"_temperature"] = int64(float64(parseValue(stats.Temperature)) - 273.15) // Kelvin => Celsius + mx["device_"+device+"_percentage_used"] = parseValue(stats.PercentUsed) + mx["device_"+device+"_available_spare"] = parseValue(stats.AvailSpare) + mx["device_"+device+"_data_units_read"] = parseValue(stats.DataUnitsRead) * 1000 * 512 // units => bytes + mx["device_"+device+"_data_units_written"] = parseValue(stats.DataUnitsWritten) * 1000 * 512 // units => bytes + mx["device_"+device+"_host_read_commands"] = parseValue(stats.HostReadCommands) + mx["device_"+device+"_host_write_commands"] = parseValue(stats.HostWriteCommands) + mx["device_"+device+"_power_cycles"] = parseValue(stats.PowerCycles) + mx["device_"+device+"_power_on_time"] = parseValue(stats.PowerOnHours) * 3600 // hours => seconds + mx["device_"+device+"_unsafe_shutdowns"] = parseValue(stats.UnsafeShutdowns) + mx["device_"+device+"_media_errors"] = parseValue(stats.MediaErrors) + mx["device_"+device+"_num_err_log_entries"] = parseValue(stats.NumErrLogEntries) + mx["device_"+device+"_controller_busy_time"] = parseValue(stats.ControllerBusyTime) * 60 // minutes => seconds + mx["device_"+device+"_warning_temp_time"] = parseValue(stats.WarningTempTime) * 60 // minutes => seconds + mx["device_"+device+"_critical_comp_time"] = parseValue(stats.CriticalCompTime) * 60 // minutes => seconds + mx["device_"+device+"_thm_temp1_trans_count"] = parseValue(stats.ThmTemp1TransCount) + mx["device_"+device+"_thm_temp2_trans_count"] = parseValue(stats.ThmTemp2TransCount) + mx["device_"+device+"_thm_temp1_total_time"] = parseValue(stats.ThmTemp1TotalTime) // seconds + mx["device_"+device+"_thm_temp2_total_time"] = parseValue(stats.ThmTemp2TotalTime) // seconds + + mx["device_"+device+"_critical_warning_available_spare"] = boolToInt(parseValue(stats.CriticalWarning)&1 != 0) + mx["device_"+device+"_critical_warning_temp_threshold"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<1) != 0) + mx["device_"+device+"_critical_warning_nvm_subsystem_reliability"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<2) != 0) + mx["device_"+device+"_critical_warning_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<3) != 0) + mx["device_"+device+"_critical_warning_volatile_mem_backup_failed"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<4) != 0) + mx["device_"+device+"_critical_warning_persistent_memory_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<5) != 0) + + return nil +} + +func (n *NVMe) listNVMeDevices() error { + devices, err := n.exec.list() + if err != nil { + return fmt.Errorf("exec nvme list: %v", err) + } + + seen := make(map[string]bool) + for _, v := range devices.Devices { + device := extractDeviceFromPath(v.DevicePath) + seen[device] = true + + if !n.devicePaths[v.DevicePath] { + n.devicePaths[v.DevicePath] = true + n.addDeviceCharts(device) + } + } + for path := range n.devicePaths { + device := extractDeviceFromPath(path) + if !seen[device] { + delete(n.devicePaths, device) + n.removeDeviceCharts(device) + } + } + + return nil +} + +func extractDeviceFromPath(devicePath string) string { + _, name := filepath.Split(devicePath) + return name +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +func parseValue(s nvmeNumber) int64 { + v, _ := strconv.ParseFloat(string(s), 64) + return int64(v) +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/config_schema.json b/src/go/collectors/go.d.plugin/modules/nvme/config_schema.json new file mode 100644 index 00000000000000..fcd2869d6bef03 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/config_schema.json @@ -0,0 +1,22 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvme job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + } + }, + "required": [ + "name" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/exec.go b/src/go/collectors/go.d.plugin/modules/nvme/exec.go new file mode 100644 index 00000000000000..2e7687a87eae1d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/exec.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + "bytes" + "context" + "encoding/json" + "os/exec" + "time" +) + +type nvmeDeviceList struct { + Devices []struct { + DevicePath string `json:"DevicePath"` + UsedBytes nvmeNumber `json:"UsedBytes"` + PhysicalSize nvmeNumber `json:"PhysicalSize"` + SectorSize nvmeNumber `json:"SectorSize"` + } +} + +// See "Health Information Log Page" in the Current Specification Version +// https://nvmexpress.org/developers/nvme-specification/ +type nvmeDeviceSmartLog struct { + CriticalWarning nvmeNumber `json:"critical_warning"` + Temperature nvmeNumber `json:"temperature"` + AvailSpare nvmeNumber `json:"avail_spare"` + SpareThresh nvmeNumber `json:"spare_thresh"` + PercentUsed nvmeNumber `json:"percent_used"` + DataUnitsRead nvmeNumber `json:"data_units_read"` + DataUnitsWritten nvmeNumber `json:"data_units_written"` + HostReadCommands nvmeNumber `json:"host_read_commands"` + HostWriteCommands nvmeNumber `json:"host_write_commands"` + ControllerBusyTime nvmeNumber `json:"controller_busy_time"` + PowerCycles nvmeNumber `json:"power_cycles"` + PowerOnHours nvmeNumber `json:"power_on_hours"` + UnsafeShutdowns nvmeNumber `json:"unsafe_shutdowns"` + MediaErrors nvmeNumber `json:"media_errors"` + NumErrLogEntries nvmeNumber `json:"num_err_log_entries"` + WarningTempTime nvmeNumber `json:"warning_temp_time"` + CriticalCompTime nvmeNumber `json:"critical_comp_time"` + ThmTemp1TransCount nvmeNumber `json:"thm_temp1_trans_count"` + ThmTemp2TransCount nvmeNumber `json:"thm_temp2_trans_count"` + ThmTemp1TotalTime nvmeNumber `json:"thm_temp1_total_time"` + ThmTemp2TotalTime nvmeNumber `json:"thm_temp2_total_time"` +} + +// nvme-cli 2.1.1 exposes some values as strings +type nvmeNumber string + +func (n *nvmeNumber) UnmarshalJSON(b []byte) error { + *n = nvmeNumber(bytes.Trim(b, "\"")) + return nil +} + +type nvmeCLIExec struct { + sudoPath string + nvmePath string + ndsudoPath string + timeout time.Duration +} + +func (n *nvmeCLIExec) list() (*nvmeDeviceList, error) { + var data []byte + var err error + + if n.ndsudoPath != "" { + data, err = n.executeNdSudo("nvme-list") + } else { + data, err = n.execute("list", "--output-format=json") + } + if err != nil { + return nil, err + } + + var v nvmeDeviceList + if err := json.Unmarshal(data, &v); err != nil { + return nil, err + } + + return &v, nil +} + +func (n *nvmeCLIExec) smartLog(devicePath string) (*nvmeDeviceSmartLog, error) { + var data []byte + var err error + + if n.ndsudoPath != "" { + data, err = n.executeNdSudo("nvme-smart-log", "--device", devicePath) + } else { + data, err = n.execute("smart-log", devicePath, "--output-format=json") + } + if err != nil { + return nil, err + } + + var v nvmeDeviceSmartLog + if err := json.Unmarshal(data, &v); err != nil { + return nil, err + } + + return &v, nil +} + +func (n *nvmeCLIExec) execute(arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), n.timeout) + defer cancel() + + if n.sudoPath != "" { + args := append([]string{"-n", n.nvmePath}, arg...) + return exec.CommandContext(ctx, n.sudoPath, args...).Output() + } + + return exec.CommandContext(ctx, n.nvmePath, arg...).Output() +} + +func (n *nvmeCLIExec) executeNdSudo(arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), n.timeout) + defer cancel() + + return exec.CommandContext(ctx, n.ndsudoPath, arg...).Output() +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/init.go b/src/go/collectors/go.d.plugin/modules/nvme/init.go new file mode 100644 index 00000000000000..70988031cdf89b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/init.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" +) + +func (n *NVMe) validateConfig() error { + if n.BinaryPath == "" { + return errors.New("'binary_path' can not be empty") + } + + return nil +} + +func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { + if exePath, err := os.Executable(); err == nil { + ndsudoPath := filepath.Join(filepath.Dir(exePath), "ndsudo") + + if fi, err := os.Stat(ndsudoPath); err == nil { + // executable by owner or group + if fi.Mode().Perm()&0110 != 0 { + n.Debug("using ndsudo") + return &nvmeCLIExec{ + ndsudoPath: ndsudoPath, + timeout: n.Timeout.Duration, + }, nil + } + } + } + + // TODO: remove after next minor release of Netdata (latest is v1.44.0) + // can't remove now because it will break "from source + stable channel" installations + nvmePath, err := exec.LookPath(n.BinaryPath) + if err != nil { + return nil, err + } + + var sudoPath string + if os.Getuid() != 0 { + sudoPath, err = exec.LookPath("sudo") + if err != nil { + return nil, err + } + } + + if sudoPath != "" { + ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration) + defer cancel1() + + if _, err := exec.CommandContext(ctx1, sudoPath, "-n", "-v").Output(); err != nil { + return nil, fmt.Errorf("can not run sudo on this host: %v", err) + } + + ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration) + defer cancel2() + + if _, err := exec.CommandContext(ctx2, sudoPath, "-n", "-l", nvmePath).Output(); err != nil { + return nil, fmt.Errorf("can not run '%s' with sudo: %v", n.BinaryPath, err) + } + } + + return &nvmeCLIExec{ + sudoPath: sudoPath, + nvmePath: nvmePath, + timeout: n.Timeout.Duration, + }, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/integrations/nvme_devices.md b/src/go/collectors/go.d.plugin/modules/nvme/integrations/nvme_devices.md new file mode 100644 index 00000000000000..ce55310e6c15d7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/integrations/nvme_devices.md @@ -0,0 +1,193 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/nvme/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/nvme/metadata.yaml" +sidebar_label: "NVMe devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NVMe devices + + +<img src="https://netdata.cloud/img/nvme.svg" width="150"/> + + +Plugin: go.d.plugin +Module: nvme + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the health of NVMe devices using the command line tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and assumes it is set up so that the netdata user can execute `nvme` as root without a password. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per device + +These metrics refer to the NVME device. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | NVMe device name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| nvme.device_estimated_endurance_perc | used | % | +| nvme.device_available_spare_perc | spare | % | +| nvme.device_composite_temperature | temperature | celsius | +| nvme.device_io_transferred_count | read, written | bytes | +| nvme.device_power_cycles_count | power | cycles | +| nvme.device_power_on_time | power-on | seconds | +| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state | +| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns | +| nvme.device_media_errors_rate | media | errors/s | +| nvme.device_error_log_entries_rate | error_log | entries/s | +| nvme.device_warning_composite_temperature_time | wctemp | seconds | +| nvme.device_critical_composite_temperature_time | cctemp | seconds | +| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s | +| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s | +| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds | +| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings | + + +## Setup + +### Prerequisites + +#### Install nvme-cli + +See [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager. + + +#### Allow netdata to execute nvme + +Add the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary): + +```bash +netdata ALL=(root) NOPASSWD: /usr/sbin/nvme +``` + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/nvme.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/nvme.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| binary_path | Path to nvme binary. The default is "nvme" and the executable is looked for in the directories specified in the PATH environment variable. | nvme | no | +| timeout | nvme binary execution timeout. | 2 | no | + +</details> + +#### Examples + +##### Custom binary path + +The executable is not in the directories specified in the PATH environment variable. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: nvme + binary_path: /usr/local/sbin/nvme + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m nvme + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/nvme/metadata.yaml b/src/go/collectors/go.d.plugin/modules/nvme/metadata.yaml new file mode 100644 index 00000000000000..71a5be2e7cb266 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/metadata.yaml @@ -0,0 +1,213 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-nvme + plugin_name: go.d.plugin + module_name: nvme + monitored_instance: + name: NVMe devices + link: "" + icon_filename: nvme.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - nvme + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: > + This collector monitors the health of NVMe devices using the command line + tool [nvme](https://github.com/linux-nvme/nvme-cli#nvme-cli), which can only be run by the root user. It uses `sudo` and + assumes it is set up so that the netdata user can execute `nvme` as root without a password. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Install nvme-cli + description: | + See [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager. + - title: Allow netdata to execute nvme + description: | + Add the netdata user to `/etc/sudoers` (use `which nvme` to find the full path to the binary): + + ```bash + netdata ALL=(root) NOPASSWD: /usr/sbin/nvme + ``` + configuration: + file: + name: go.d/nvme.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: binary_path + description: Path to nvme binary. The default is "nvme" and the executable is looked for in the directories specified in the PATH environment variable. + default_value: nvme + required: false + - name: timeout + description: nvme binary execution timeout. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Custom binary path + description: The executable is not in the directories specified in the PATH environment variable. + config: | + jobs: + - name: nvme + binary_path: /usr/local/sbin/nvme + troubleshooting: + problems: + list: [] + alerts: + - name: nvme_device_critical_warnings_state + metric: nvme.device_critical_warnings_state + info: "NVMe device ${label:device} has critical warnings" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: device + description: These metrics refer to the NVME device. + labels: + - name: device + description: NVMe device name + metrics: + - name: nvme.device_estimated_endurance_perc + description: Estimated endurance + unit: '%' + chart_type: line + dimensions: + - name: used + - name: nvme.device_available_spare_perc + description: Remaining spare capacity + unit: '%' + chart_type: line + dimensions: + - name: spare + - name: nvme.device_composite_temperature + description: Composite temperature + unit: celsius + chart_type: line + dimensions: + - name: temperature + - name: nvme.device_io_transferred_count + description: Amount of data transferred to and from device + unit: bytes + chart_type: area + dimensions: + - name: read + - name: written + - name: nvme.device_power_cycles_count + description: Power cycles + unit: cycles + chart_type: line + dimensions: + - name: power + - name: nvme.device_power_on_time + description: Power-on time + unit: seconds + chart_type: line + dimensions: + - name: power-on + - name: nvme.device_critical_warnings_state + description: Critical warnings state + unit: state + chart_type: line + dimensions: + - name: available_spare + - name: temp_threshold + - name: nvm_subsystem_reliability + - name: read_only + - name: volatile_mem_backup_failed + - name: persistent_memory_read_only + - name: nvme.device_unsafe_shutdowns_count + description: Unsafe shutdowns + unit: shutdowns + chart_type: line + dimensions: + - name: unsafe + - name: nvme.device_media_errors_rate + description: Media and data integrity errors + unit: errors/s + chart_type: line + dimensions: + - name: media + - name: nvme.device_error_log_entries_rate + description: Error log entries + unit: entries/s + chart_type: line + dimensions: + - name: error_log + - name: nvme.device_warning_composite_temperature_time + description: Warning composite temperature time + unit: seconds + chart_type: line + dimensions: + - name: wctemp + - name: nvme.device_critical_composite_temperature_time + description: Critical composite temperature time + unit: seconds + chart_type: line + dimensions: + - name: cctemp + - name: nvme.device_thermal_mgmt_temp1_transitions_rate + description: Thermal management temp1 transitions + unit: transitions/s + chart_type: line + dimensions: + - name: temp1 + - name: nvme.device_thermal_mgmt_temp2_transitions_rate + description: Thermal management temp2 transitions + unit: transitions/s + chart_type: line + dimensions: + - name: temp2 + - name: nvme.device_thermal_mgmt_temp1_time + description: Thermal management temp1 time + unit: seconds + chart_type: line + dimensions: + - name: temp1 + - name: nvme.device_thermal_mgmt_temp2_time + description: Thermal management temp2 time + unit: seconds + chart_type: line + dimensions: + - name: temp2 diff --git a/src/go/collectors/go.d.plugin/modules/nvme/nvme.go b/src/go/collectors/go.d.plugin/modules/nvme/nvme.go new file mode 100644 index 00000000000000..d8f86869a6a9f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/nvme.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("nvme", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *NVMe { + return &NVMe{ + Config: Config{ + BinaryPath: "nvme", + Timeout: web.Duration{Duration: time.Second * 2}, + }, + charts: &module.Charts{}, + devicePaths: make(map[string]bool), + listDevicesEvery: time.Minute * 10, + } + +} + +type Config struct { + Timeout web.Duration + BinaryPath string `yaml:"binary_path"` +} + +type ( + NVMe struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + exec nvmeCLI + + devicePaths map[string]bool + listDevicesTime time.Time + listDevicesEvery time.Duration + forceListDevices bool + } + nvmeCLI interface { + list() (*nvmeDeviceList, error) + smartLog(devicePath string) (*nvmeDeviceSmartLog, error) + } +) + +func (n *NVMe) Init() bool { + if err := n.validateConfig(); err != nil { + n.Errorf("config validation: %v", err) + return false + } + + v, err := n.initNVMeCLIExec() + if err != nil { + n.Errorf("init nvme-cli exec: %v", err) + return false + } + n.exec = v + + return true +} + +func (n *NVMe) Check() bool { + return len(n.Collect()) > 0 +} + +func (n *NVMe) Charts() *module.Charts { + return n.charts +} + +func (n *NVMe) Collect() map[string]int64 { + mx, err := n.collect() + if err != nil { + n.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (n *NVMe) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/nvme_test.go b/src/go/collectors/go.d.plugin/modules/nvme/nvme_test.go new file mode 100644 index 00000000000000..26c55182b1b8ee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/nvme_test.go @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package nvme + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataNVMeListJSON, _ = os.ReadFile("testdata/nvme-list.json") + dataNVMeListEmptyJSON, _ = os.ReadFile("testdata/nvme-list-empty.json") + dataNVMeSmartLogJSON, _ = os.ReadFile("testdata/nvme-smart-log.json") + dataNVMeSmartLogStringJSON, _ = os.ReadFile("testdata/nvme-smart-log-string.json") + dataNVMeSmartLogFloatJSON, _ = os.ReadFile("testdata/nvme-smart-log-float.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataNVMeListJSON": dataNVMeListJSON, + "dataNVMeListEmptyJSON": dataNVMeListEmptyJSON, + "dataNVMeSmartLogStringJSON": dataNVMeSmartLogStringJSON, + "dataNVMeSmartLogFloatJSON": dataNVMeSmartLogFloatJSON, + } { + require.NotNilf(t, data, name) + } +} + +func TestNVMe_Init(t *testing.T) { + tests := map[string]struct { + prepare func(n *NVMe) + wantFail bool + }{ + "fails if 'binary_path' not set": { + wantFail: true, + prepare: func(n *NVMe) { + n.BinaryPath = "" + }, + }, + "fails if can't locate nvme-cli": { + wantFail: true, + prepare: func(n *NVMe) { + n.BinaryPath += "!!!" + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + nv := New() + + test.prepare(nv) + + if test.wantFail { + assert.False(t, nv.Init()) + } else { + assert.True(t, nv.Init()) + } + }) + } +} + +func TestNVMe_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestNVMe_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestNVMe_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(n *NVMe) + }{ + "success if all calls successful": { + wantFail: false, + prepare: prepareCaseOK, + }, + "fails if 'nvme list' returns an empty list": { + wantFail: true, + prepare: prepareCaseEmptyList, + }, + "fails if 'nvme list' returns an error": { + wantFail: true, + prepare: prepareCaseErrOnList, + }, + "fails if 'nvme smart-log' returns an error": { + wantFail: true, + prepare: prepareCaseErrOnSmartLog, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + n := New() + + test.prepare(n) + + if test.wantFail { + assert.False(t, n.Check()) + } else { + assert.True(t, n.Check()) + } + }) + } +} + +func TestNVMe_Collect(t *testing.T) { + type testCaseStep struct { + prepare func(n *NVMe) + check func(t *testing.T, n *NVMe) + } + + tests := map[string][]testCaseStep{ + "success if all calls successful": { + { + prepare: prepareCaseOK, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + expected := map[string]int64{ + "device_nvme0n1_available_spare": 100, + "device_nvme0n1_controller_busy_time": 497040, + "device_nvme0n1_critical_comp_time": 0, + "device_nvme0n1_critical_warning_available_spare": 0, + "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme0n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme0n1_critical_warning_read_only": 0, + "device_nvme0n1_critical_warning_temp_threshold": 0, + "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme0n1_data_units_read": 5068041216000, + "device_nvme0n1_data_units_written": 69712734208000, + "device_nvme0n1_host_read_commands": 313528805, + "device_nvme0n1_host_write_commands": 1928062610, + "device_nvme0n1_media_errors": 0, + "device_nvme0n1_num_err_log_entries": 110, + "device_nvme0n1_percentage_used": 2, + "device_nvme0n1_power_cycles": 64, + "device_nvme0n1_power_on_time": 17906400, + "device_nvme0n1_temperature": 36, + "device_nvme0n1_thm_temp1_total_time": 0, + "device_nvme0n1_thm_temp1_trans_count": 0, + "device_nvme0n1_thm_temp2_total_time": 0, + "device_nvme0n1_thm_temp2_trans_count": 0, + "device_nvme0n1_unsafe_shutdowns": 39, + "device_nvme0n1_warning_temp_time": 0, + "device_nvme1n1_available_spare": 100, + "device_nvme1n1_controller_busy_time": 497040, + "device_nvme1n1_critical_comp_time": 0, + "device_nvme1n1_critical_warning_available_spare": 0, + "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme1n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme1n1_critical_warning_read_only": 0, + "device_nvme1n1_critical_warning_temp_threshold": 0, + "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme1n1_data_units_read": 5068041216000, + "device_nvme1n1_data_units_written": 69712734208000, + "device_nvme1n1_host_read_commands": 313528805, + "device_nvme1n1_host_write_commands": 1928062610, + "device_nvme1n1_media_errors": 0, + "device_nvme1n1_num_err_log_entries": 110, + "device_nvme1n1_percentage_used": 2, + "device_nvme1n1_power_cycles": 64, + "device_nvme1n1_power_on_time": 17906400, + "device_nvme1n1_temperature": 36, + "device_nvme1n1_thm_temp1_total_time": 0, + "device_nvme1n1_thm_temp1_trans_count": 0, + "device_nvme1n1_thm_temp2_total_time": 0, + "device_nvme1n1_thm_temp2_trans_count": 0, + "device_nvme1n1_unsafe_shutdowns": 39, + "device_nvme1n1_warning_temp_time": 0, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success if all calls successful with string values": { + { + prepare: prepareCaseStringValuesOK, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + expected := map[string]int64{ + "device_nvme0n1_available_spare": 100, + "device_nvme0n1_controller_busy_time": 497040, + "device_nvme0n1_critical_comp_time": 0, + "device_nvme0n1_critical_warning_available_spare": 0, + "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme0n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme0n1_critical_warning_read_only": 0, + "device_nvme0n1_critical_warning_temp_threshold": 0, + "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme0n1_data_units_read": 5068041216000, + "device_nvme0n1_data_units_written": 69712734208000, + "device_nvme0n1_host_read_commands": 313528805, + "device_nvme0n1_host_write_commands": 1928062610, + "device_nvme0n1_media_errors": 0, + "device_nvme0n1_num_err_log_entries": 110, + "device_nvme0n1_percentage_used": 2, + "device_nvme0n1_power_cycles": 64, + "device_nvme0n1_power_on_time": 17906400, + "device_nvme0n1_temperature": 36, + "device_nvme0n1_thm_temp1_total_time": 0, + "device_nvme0n1_thm_temp1_trans_count": 0, + "device_nvme0n1_thm_temp2_total_time": 0, + "device_nvme0n1_thm_temp2_trans_count": 0, + "device_nvme0n1_unsafe_shutdowns": 39, + "device_nvme0n1_warning_temp_time": 0, + "device_nvme1n1_available_spare": 100, + "device_nvme1n1_controller_busy_time": 497040, + "device_nvme1n1_critical_comp_time": 0, + "device_nvme1n1_critical_warning_available_spare": 0, + "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme1n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme1n1_critical_warning_read_only": 0, + "device_nvme1n1_critical_warning_temp_threshold": 0, + "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme1n1_data_units_read": 5068041216000, + "device_nvme1n1_data_units_written": 69712734208000, + "device_nvme1n1_host_read_commands": 313528805, + "device_nvme1n1_host_write_commands": 1928062610, + "device_nvme1n1_media_errors": 0, + "device_nvme1n1_num_err_log_entries": 110, + "device_nvme1n1_percentage_used": 2, + "device_nvme1n1_power_cycles": 64, + "device_nvme1n1_power_on_time": 17906400, + "device_nvme1n1_temperature": 36, + "device_nvme1n1_thm_temp1_total_time": 0, + "device_nvme1n1_thm_temp1_trans_count": 0, + "device_nvme1n1_thm_temp2_total_time": 0, + "device_nvme1n1_thm_temp2_trans_count": 0, + "device_nvme1n1_unsafe_shutdowns": 39, + "device_nvme1n1_warning_temp_time": 0, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "success if all calls successful with float values": { + { + prepare: prepareCaseFloatValuesOK, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + expected := map[string]int64{ + "device_nvme0n1_available_spare": 100, + "device_nvme0n1_controller_busy_time": 497040, + "device_nvme0n1_critical_comp_time": 0, + "device_nvme0n1_critical_warning_available_spare": 0, + "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme0n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme0n1_critical_warning_read_only": 0, + "device_nvme0n1_critical_warning_temp_threshold": 0, + "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme0n1_data_units_read": 5068041216000, + "device_nvme0n1_data_units_written": 69712734208000, + "device_nvme0n1_host_read_commands": 313528805, + "device_nvme0n1_host_write_commands": 1928062610, + "device_nvme0n1_media_errors": 0, + "device_nvme0n1_num_err_log_entries": 110, + "device_nvme0n1_percentage_used": 2, + "device_nvme0n1_power_cycles": 64, + "device_nvme0n1_power_on_time": 17906400, + "device_nvme0n1_temperature": 36, + "device_nvme0n1_thm_temp1_total_time": 0, + "device_nvme0n1_thm_temp1_trans_count": 0, + "device_nvme0n1_thm_temp2_total_time": 0, + "device_nvme0n1_thm_temp2_trans_count": 0, + "device_nvme0n1_unsafe_shutdowns": 39, + "device_nvme0n1_warning_temp_time": 0, + "device_nvme1n1_available_spare": 100, + "device_nvme1n1_controller_busy_time": 497040, + "device_nvme1n1_critical_comp_time": 0, + "device_nvme1n1_critical_warning_available_spare": 0, + "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0, + "device_nvme1n1_critical_warning_persistent_memory_read_only": 0, + "device_nvme1n1_critical_warning_read_only": 0, + "device_nvme1n1_critical_warning_temp_threshold": 0, + "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0, + "device_nvme1n1_data_units_read": 5068041216000, + "device_nvme1n1_data_units_written": 69712734208000, + "device_nvme1n1_host_read_commands": 313528805, + "device_nvme1n1_host_write_commands": 1928062610, + "device_nvme1n1_media_errors": 0, + "device_nvme1n1_num_err_log_entries": 110, + "device_nvme1n1_percentage_used": 2, + "device_nvme1n1_power_cycles": 64, + "device_nvme1n1_power_on_time": 17906400, + "device_nvme1n1_temperature": 36, + "device_nvme1n1_thm_temp1_total_time": 0, + "device_nvme1n1_thm_temp1_trans_count": 0, + "device_nvme1n1_thm_temp2_total_time": 0, + "device_nvme1n1_thm_temp2_trans_count": 0, + "device_nvme1n1_unsafe_shutdowns": 39, + "device_nvme1n1_warning_temp_time": 0, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "fail if 'nvme list' returns an empty list": { + { + prepare: prepareCaseEmptyList, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + assert.Equal(t, (map[string]int64)(nil), mx) + }, + }, + }, + "fail if 'nvme list' returns an error": { + { + prepare: prepareCaseErrOnList, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + assert.Equal(t, (map[string]int64)(nil), mx) + }, + }, + }, + "fail if 'nvme smart-log' returns an error": { + { + prepare: prepareCaseErrOnSmartLog, + check: func(t *testing.T, n *NVMe) { + mx := n.Collect() + + assert.Equal(t, (map[string]int64)(nil), mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + n := New() + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepare(n) + step.check(t, n) + }) + } + }) + } +} + +func prepareCaseOK(n *NVMe) { + n.exec = &mockNVMeCLIExec{} +} + +func prepareCaseStringValuesOK(n *NVMe) { + n.exec = &mockNVMeCLIExec{smartLogString: true} +} + +func prepareCaseFloatValuesOK(n *NVMe) { + n.exec = &mockNVMeCLIExec{smartLogFloat: true} +} + +func prepareCaseEmptyList(n *NVMe) { + n.exec = &mockNVMeCLIExec{emptyList: true} +} + +func prepareCaseErrOnList(n *NVMe) { + n.exec = &mockNVMeCLIExec{errOnList: true} +} + +func prepareCaseErrOnSmartLog(n *NVMe) { + n.exec = &mockNVMeCLIExec{errOnSmartLog: true} +} + +type mockNVMeCLIExec struct { + errOnList bool + errOnSmartLog bool + emptyList bool + smartLogString bool + smartLogFloat bool +} + +func (m *mockNVMeCLIExec) list() (*nvmeDeviceList, error) { + if m.errOnList { + return nil, errors.New("mock.list() error") + } + + data := dataNVMeListJSON + if m.emptyList { + data = dataNVMeListEmptyJSON + } + + var v nvmeDeviceList + if err := json.Unmarshal(data, &v); err != nil { + return nil, err + } + + return &v, nil +} + +func (m *mockNVMeCLIExec) smartLog(_ string) (*nvmeDeviceSmartLog, error) { + if m.errOnSmartLog { + return nil, errors.New("mock.smartLog() error") + } + if m.emptyList { + return nil, errors.New("mock.smartLog() no devices error") + } + + data := dataNVMeSmartLogJSON + if m.smartLogString { + data = dataNVMeSmartLogStringJSON + } + if m.smartLogFloat { + data = dataNVMeSmartLogFloatJSON + } + + var v nvmeDeviceSmartLog + if err := json.Unmarshal(data, &v); err != nil { + return nil, err + } + + return &v, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list-empty.json b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list-empty.json new file mode 100644 index 00000000000000..e8da2407f6f96f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list-empty.json @@ -0,0 +1,4 @@ +{ + "Devices": [ + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list.json b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list.json new file mode 100644 index 00000000000000..6bf159c4f6fe2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-list.json @@ -0,0 +1,30 @@ +{ + "Devices": [ + { + "NameSpace": 1, + "DevicePath": "/dev/nvme0n1", + "Firmware": "SU6SM001", + "Index": 0, + "ModelNumber": "Seagate FireCuda 530 ZP4000GM30023", + "ProductName": "Non-Volatile memory controller: Seagate Technology PLC Device 0x5018", + "SerialNumber": "7VS00KNX", + "UsedBytes": 4000787030016, + "MaximumLBA": 7814037168, + "PhysicalSize": 4000787030016, + "SectorSize": 512 + }, + { + "NameSpace": 1, + "DevicePath": "/dev/nvme1n1", + "Firmware": "SU6SM001", + "Index": 1, + "ModelNumber": "Seagate FireCuda 530 ZP4000GM30023", + "ProductName": "Non-Volatile memory controller: Seagate Technology PLC Device 0x5018", + "SerialNumber": "7VS00J76", + "UsedBytes": 4000787030016, + "MaximumLBA": 7814037168, + "PhysicalSize": 4000787030016, + "SectorSize": 512 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-float.json b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-float.json new file mode 100644 index 00000000000000..f63dd977269068 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-float.json @@ -0,0 +1,24 @@ +{ + "critical_warning": 0, + "temperature": 310.0, + "avail_spare": 100.0, + "spare_thresh": 5.0, + "percent_used": 2.0, + "endurance_grp_critical_warning_summary": 0, + "data_units_read": 9898518.0, + "data_units_written": 136157684.0, + "host_read_commands": 313528805.0, + "host_write_commands": 1928062610.0, + "controller_busy_time": 8284.0, + "power_cycles": 64.0, + "power_on_hours": 4974.0, + "unsafe_shutdowns": 39.0, + "media_errors": 0, + "num_err_log_entries": 110.0, + "warning_temp_time": 0, + "critical_comp_time": 0, + "thm_temp1_trans_count": 0, + "thm_temp2_trans_count": 0, + "thm_temp1_total_time": 0, + "thm_temp2_total_time": 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-string.json b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-string.json new file mode 100644 index 00000000000000..f582e748587c0c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log-string.json @@ -0,0 +1,24 @@ +{ + "critical_warning": "0", + "temperature": "310", + "avail_spare": "100", + "spare_thresh": "5", + "percent_used": "2", + "endurance_grp_critical_warning_summary": "0", + "data_units_read": "9898518", + "data_units_written": "136157684", + "host_read_commands": "313528805", + "host_write_commands": "1928062610", + "controller_busy_time": "8284", + "power_cycles": "64", + "power_on_hours": "4974", + "unsafe_shutdowns": "39", + "media_errors": "0", + "num_err_log_entries": "110", + "warning_temp_time": "0", + "critical_comp_time": "0", + "thm_temp1_trans_count": "0", + "thm_temp2_trans_count": "0", + "thm_temp1_total_time": "0", + "thm_temp2_total_time": "0" +} diff --git a/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log.json b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log.json new file mode 100644 index 00000000000000..cbd0e4c7d48271 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/nvme/testdata/nvme-smart-log.json @@ -0,0 +1,24 @@ +{ + "critical_warning": 0, + "temperature": 310, + "avail_spare": 100, + "spare_thresh": 5, + "percent_used": 2, + "endurance_grp_critical_warning_summary": 0, + "data_units_read": 9898518, + "data_units_written": 136157684, + "host_read_commands": 313528805, + "host_write_commands": 1928062610, + "controller_busy_time": 8284, + "power_cycles": 64, + "power_on_hours": 4974, + "unsafe_shutdowns": 39, + "media_errors": 0, + "num_err_log_entries": 110, + "warning_temp_time": 0, + "critical_comp_time": 0, + "thm_temp1_trans_count": 0, + "thm_temp2_trans_count": 0, + "thm_temp1_total_time": 0, + "thm_temp2_total_time": 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/README.md b/src/go/collectors/go.d.plugin/modules/openvpn/README.md new file mode 120000 index 00000000000000..020da3ac666af1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/README.md @@ -0,0 +1 @@ +integrations/openvpn.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/charts.go b/src/go/collectors/go.d.plugin/modules/openvpn/charts.go new file mode 100644 index 00000000000000..4228d5df62c7b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/charts.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "active_clients", + Title: "Total Number Of Active Clients", + Units: "clients", + Fam: "clients", + Ctx: "openvpn.active_clients", + Dims: Dims{ + {ID: "clients"}, + }, + }, + { + ID: "total_traffic", + Title: "Total Traffic", + Units: "kilobits/s", + Fam: "traffic", + Ctx: "openvpn.total_traffic", + Type: module.Area, + Dims: Dims{ + {ID: "bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: 8, Div: -1000}, + }, + }, +} + +var userCharts = Charts{ + { + ID: "%s_user_traffic", + Title: "User Traffic", + Units: "kilobits/s", + Fam: "user %s", + Ctx: "openvpn.user_traffic", + Type: module.Area, + Dims: Dims{ + {ID: "%s_bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1000}, + }, + }, + { + ID: "%s_user_connection_time", + Title: "User Connection Time", + Units: "seconds", + Fam: "user %s", + Ctx: "openvpn.user_connection_time", + Dims: Dims{ + {ID: "%s_connection_time", Name: "time"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/client.go b/src/go/collectors/go.d.plugin/modules/openvpn/client/client.go new file mode 100644 index 00000000000000..12309de09bd090 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/client.go @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/socket" +) + +var ( + reLoadStats = regexp.MustCompile(`^SUCCESS: nclients=([0-9]+),bytesin=([0-9]+),bytesout=([0-9]+)`) + reVersion = regexp.MustCompile(`^OpenVPN Version: OpenVPN ([0-9]+)\.([0-9]+)\.([0-9]+) .+Management Version: ([0-9])`) +) + +const maxLinesToRead = 500 + +// New creates new OpenVPN client. +func New(config socket.Config) *Client { + return &Client{Client: socket.New(config)} +} + +// Client represents OpenVPN client. +type Client struct { + socket.Client +} + +// Users Users. +func (c *Client) Users() (Users, error) { + lines, err := c.get(commandStatus3, readUntilEND) + if err != nil { + return nil, err + } + return decodeUsers(lines) +} + +// LoadStats LoadStats. +func (c *Client) LoadStats() (*LoadStats, error) { + lines, err := c.get(commandLoadStats, readOneLine) + if err != nil { + return nil, err + } + return decodeLoadStats(lines) +} + +// Version Version. +func (c *Client) Version() (*Version, error) { + lines, err := c.get(commandVersion, readUntilEND) + if err != nil { + return nil, err + } + return decodeVersion(lines) +} + +func (c *Client) get(command string, stopRead stopReadFunc) (output []string, err error) { + var num int + var maxLinesErr error + err = c.Command(command, func(bytes []byte) bool { + line := string(bytes) + num++ + if num > maxLinesToRead { + maxLinesErr = fmt.Errorf("read line limit exceeded (%d)", maxLinesToRead) + return false + } + + // skip real-time messages + if strings.HasPrefix(line, ">") { + return true + } + + line = strings.Trim(line, "\r\n ") + output = append(output, line) + if stopRead != nil && stopRead(line) { + return false + } + return true + }) + if maxLinesErr != nil { + return nil, maxLinesErr + } + return output, err +} + +type stopReadFunc func(string) bool + +func readOneLine(_ string) bool { return true } + +func readUntilEND(s string) bool { return strings.HasSuffix(s, "END") } + +func decodeLoadStats(src []string) (*LoadStats, error) { + m := reLoadStats.FindStringSubmatch(strings.Join(src, " ")) + if len(m) == 0 { + return nil, fmt.Errorf("parse failed : %v", src) + } + return &LoadStats{ + NumOfClients: mustParseInt(m[1]), + BytesIn: mustParseInt(m[2]), + BytesOut: mustParseInt(m[3]), + }, nil +} + +func decodeVersion(src []string) (*Version, error) { + m := reVersion.FindStringSubmatch(strings.Join(src, " ")) + if len(m) == 0 { + return nil, fmt.Errorf("parse failed : %v", src) + } + return &Version{ + Major: mustParseInt(m[1]), + Minor: mustParseInt(m[2]), + Patch: mustParseInt(m[3]), + Management: mustParseInt(m[4]), + }, nil +} + +// works only for `status 3\n` +func decodeUsers(src []string) (Users, error) { + var users Users + + // [CLIENT_LIST common_name 178.66.34.194:54200 10.9.0.5 9319 8978 Thu May 9 05:01:44 2019 1557345704 username] + for _, v := range src { + if !strings.HasPrefix(v, "CLIENT_LIST") { + continue + } + parts := strings.Fields(v) + // Right after the connection there are no virtual ip, and both common name and username UNDEF + // CLIENT_LIST UNDEF 178.70.95.93:39324 1411 3474 Fri May 10 07:41:54 2019 1557441714 UNDEF + if len(parts) != 13 { + continue + } + u := User{ + CommonName: parts[1], + RealAddress: parts[2], + VirtualAddress: parts[3], + BytesReceived: mustParseInt(parts[4]), + BytesSent: mustParseInt(parts[5]), + ConnectedSince: mustParseInt(parts[11]), + Username: parts[12], + } + users = append(users, u) + } + return users, nil +} + +func mustParseInt(str string) int64 { + v, err := strconv.ParseInt(str, 10, 64) + if err != nil { + panic(err) + } + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/client_test.go b/src/go/collectors/go.d.plugin/modules/openvpn/client/client_test.go new file mode 100644 index 00000000000000..685d77c274e363 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/client_test.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/stretchr/testify/assert" +) + +var ( + testLoadStatsData, _ = os.ReadFile("testdata/load-stats.txt") + testVersionData, _ = os.ReadFile("testdata/version.txt") + testStatus3Data, _ = os.ReadFile("testdata/status3.txt") + testMaxLinesExceededData = strings.Repeat(">CLIENT:ESTABLISHED,0\n", 501) +) + +func TestNew(t *testing.T) { assert.IsType(t, (*Client)(nil), New(socket.Config{})) } + +func TestClient_GetVersion(t *testing.T) { + client := Client{Client: &mockSocketClient{}} + ver, err := client.Version() + assert.NoError(t, err) + expected := &Version{Major: 2, Minor: 3, Patch: 4, Management: 1} + assert.Equal(t, expected, ver) +} + +func TestClient_GetLoadStats(t *testing.T) { + client := Client{Client: &mockSocketClient{}} + stats, err := client.LoadStats() + assert.NoError(t, err) + expected := &LoadStats{NumOfClients: 1, BytesIn: 7811, BytesOut: 7667} + assert.Equal(t, expected, stats) +} + +func TestClient_GetUsers(t *testing.T) { + client := Client{ + Client: &mockSocketClient{}, + } + users, err := client.Users() + assert.NoError(t, err) + expected := Users{{ + CommonName: "pepehome", + RealAddress: "1.2.3.4:44347", + VirtualAddress: "10.9.0.5", + BytesReceived: 6043, + BytesSent: 5661, + ConnectedSince: 1555439465, + Username: "pepe", + }} + assert.Equal(t, expected, users) +} + +func TestClient_MaxLineExceeded(t *testing.T) { + client := Client{ + Client: &mockSocketClient{maxLineExceeded: true}, + } + _, err := client.Users() + assert.Error(t, err) +} + +type mockSocketClient struct { + maxLineExceeded bool +} + +func (m *mockSocketClient) Connect() error { return nil } + +func (m *mockSocketClient) Disconnect() error { return nil } + +func (m *mockSocketClient) Command(command string, process socket.Processor) error { + var s *bufio.Scanner + switch command { + default: + return fmt.Errorf("unknown command : %s", command) + case commandExit: + case commandVersion: + s = bufio.NewScanner(bytes.NewReader(testVersionData)) + case commandStatus3: + if m.maxLineExceeded { + s = bufio.NewScanner(strings.NewReader(testMaxLinesExceededData)) + break + } + s = bufio.NewScanner(bytes.NewReader(testStatus3Data)) + case commandLoadStats: + s = bufio.NewScanner(bytes.NewReader(testLoadStatsData)) + } + + for s.Scan() { + process(s.Bytes()) + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/commands.go b/src/go/collectors/go.d.plugin/modules/openvpn/client/commands.go new file mode 100644 index 00000000000000..f06b05c906fbea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/commands.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +/* +https://openvpn.net/community-resources/management-interface/ + +OUTPUT FORMAT +------------- + +(1) Command success/failure indicated by "SUCCESS: [text]" or + "ERROR: [text]". + +(2) For commands which print multiple lines of output, + the last line will be "END". + +(3) Real-time messages will be in the form ">[source]:[text]", + where source is "CLIENT", "ECHO", "FATAL", "HOLD", "INFO", "LOG", + "NEED-OK", "PASSWORD", or "STATE". +*/ + +var ( + // Close the management session, and resume listening on the + // management port for connections from other clients. Currently, + // the OpenVPN daemon can at most support a single management client + // any one time. + commandExit = "exit\n" + + // Show current daemon status information, in the same format as + // that produced by the OpenVPN --status directive. + commandStatus3 = "status 3\n" + + // no description in docs ¯\(°_o)/¯ + commandLoadStats = "load-stats\n" + + // Show the current OpenVPN and Management Interface versions. + commandVersion = "version\n" +) diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/load-stats.txt b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/load-stats.txt new file mode 100644 index 00000000000000..39c19ac5b4bb89 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/load-stats.txt @@ -0,0 +1 @@ +SUCCESS: nclients=1,bytesin=7811,bytesout=7667 diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/status3.txt b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/status3.txt new file mode 100644 index 00000000000000..1986703d2f59d6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/status3.txt @@ -0,0 +1,77 @@ +>CLIENT:ESTABLISHED,0 +>CLIENT:ENV,n_clients=1 +>CLIENT:ENV,ifconfig_pool_local_ip=10.9.0.6 +>CLIENT:ENV,ifconfig_pool_remote_ip=10.9.0.5 +>CLIENT:ENV,time_unix=1555439465 +>CLIENT:ENV,time_ascii=Wed Apr 17 03:31:05 2019 +>CLIENT:ENV,trusted_port=44347 +>CLIENT:ENV,trusted_ip=1.2.3.4 +>CLIENT:ENV,common_name=pepehome +>CLIENT:ENV,auth_control_file=/tmp/openvpn_acf_ae7f48d495d3d4cfb3065763b916d9ab.tmp +>CLIENT:ENV,untrusted_port=44347 +>CLIENT:ENV,untrusted_ip=1.2.3.4 +>CLIENT:ENV,username=pepe +>CLIENT:ENV,tls_serial_hex_0=04 +>CLIENT:ENV,tls_serial_0=4 +>CLIENT:ENV,tls_digest_0=be:83:8c:95:21:bf:f3:87:1a:35:86:d9:2e:f3:f5:d7:08:a9:db:7e +>CLIENT:ENV,tls_id_0=C=RU, ST=AM, L=Blagoveshchensk, O=L2ISBAD, OU=MyOrganizationalUnit, CN=pepehome, name=EasyRSA, emailAddress=me@myhost.mydomain +>CLIENT:ENV,X509_0_emailAddress=me@myhost.mydomain +>CLIENT:ENV,X509_0_name=EasyRSA +>CLIENT:ENV,X509_0_CN=pepehome +>CLIENT:ENV,X509_0_OU=MyOrganizationalUnit +>CLIENT:ENV,X509_0_O=L2ISBAD +>CLIENT:ENV,X509_0_L=Blagoveshchensk +>CLIENT:ENV,X509_0_ST=AM +>CLIENT:ENV,X509_0_C=RU +>CLIENT:ENV,tls_serial_hex_1=ad:4c:1e:65:e8:3c:ec:6f +>CLIENT:ENV,tls_serial_1=12487389289828379759 +>CLIENT:ENV,tls_digest_1=52:e2:1d:41:3f:34:09:70:4c:2d:71:8c:a7:28:fa:6b:66:2b:28:6e +>CLIENT:ENV,tls_id_1=C=RU, ST=AM, L=Blagoveshchensk, O=L2ISBAD, OU=MyOrganizationalUnit, CN=L2ISBAD CA, name=EasyRSA, emailAddress=me@myhost.mydomain +>CLIENT:ENV,X509_1_emailAddress=me@myhost.mydomain +>CLIENT:ENV,X509_1_name=EasyRSA +>CLIENT:ENV,X509_1_CN=L2ISBAD CA +>CLIENT:ENV,X509_1_OU=MyOrganizationalUnit +>CLIENT:ENV,X509_1_O=L2ISBAD +>CLIENT:ENV,X509_1_L=Blagoveshchensk +>CLIENT:ENV,X509_1_ST=AM +>CLIENT:ENV,X509_1_C=RU +>CLIENT:ENV,remote_port_1=1194 +>CLIENT:ENV,local_port_1=1194 +>CLIENT:ENV,proto_1=udp +>CLIENT:ENV,daemon_pid=4237 +>CLIENT:ENV,daemon_start_time=1555439449 +>CLIENT:ENV,daemon_log_redirect=0 +>CLIENT:ENV,daemon=1 +>CLIENT:ENV,verb=3 +>CLIENT:ENV,config=/etc/openvpn/server.conf +>CLIENT:ENV,ifconfig_local=10.8.0.1 +>CLIENT:ENV,ifconfig_remote=10.8.0.2 +>CLIENT:ENV,route_net_gateway=188.168.142.252 +>CLIENT:ENV,route_vpn_gateway=10.8.0.2 +>CLIENT:ENV,route_network_1=10.9.0.1 +>CLIENT:ENV,route_netmask_1=255.255.255.255 +>CLIENT:ENV,route_gateway_1=10.8.0.2 +>CLIENT:ENV,route_network_2=10.9.0.5 +>CLIENT:ENV,route_netmask_2=255.255.255.255 +>CLIENT:ENV,route_gateway_2=10.8.0.2 +>CLIENT:ENV,route_network_3=10.9.0.9 +>CLIENT:ENV,route_netmask_3=255.255.255.255 +>CLIENT:ENV,route_gateway_3=10.8.0.2 +>CLIENT:ENV,route_network_4=10.8.0.0 +>CLIENT:ENV,route_netmask_4=255.255.255.0 +>CLIENT:ENV,route_gateway_4=10.8.0.2 +>CLIENT:ENV,script_context=init +>CLIENT:ENV,tun_mtu=1500 +>CLIENT:ENV,link_mtu=1558 +>CLIENT:ENV,dev=tun99 +>CLIENT:ENV,dev_type=tun +>CLIENT:ENV,redirect_gateway=0 +>CLIENT:ENV,END +TITLE OpenVPN 2.3.4 i586-pc-linux-gnu [SSL (OpenSSL)] [LZO] [EPOLL] [PKCS11] [MH] [IPv6] built on Jun 26 2017 +TIME Wed Apr 17 03:31:06 2019 1555439466 +HEADER CLIENT_LIST Common Name Real Address Virtual Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username +CLIENT_LIST pepehome 1.2.3.4:44347 10.9.0.5 6043 5661 Wed Apr 17 03:31:05 2019 1555439465 pepe +HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t) +ROUTING_TABLE 10.9.0.5 pepehome 1.2.3.4:44347 Wed Apr 17 03:31:06 2019 1555439466 +GLOBAL_STATS Max bcast/mcast queue length 0 +END diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/version.txt b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/version.txt new file mode 100644 index 00000000000000..e525876d8fbce1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/testdata/version.txt @@ -0,0 +1,3 @@ +OpenVPN Version: OpenVPN 2.3.4 i586-pc-linux-gnu [SSL (OpenSSL)] [LZO] [EPOLL] [PKCS11] [MH] [IPv6] built on Jun 26 2017 +Management Version: 1 +END diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/client/types.go b/src/go/collectors/go.d.plugin/modules/openvpn/client/types.go new file mode 100644 index 00000000000000..a0a2830285b561 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/client/types.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +type LoadStats struct { + NumOfClients int64 + BytesIn int64 + BytesOut int64 +} + +type Version struct { + Major int64 + Minor int64 + Patch int64 + Management int64 +} + +type Users []User + +type User struct { + CommonName string + RealAddress string + VirtualAddress string + BytesReceived int64 + BytesSent int64 + ConnectedSince int64 + Username string +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/collect.go b/src/go/collectors/go.d.plugin/modules/openvpn/collect.go new file mode 100644 index 00000000000000..180fae3bd986b2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/collect.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import ( + "fmt" + "time" +) + +func (o *OpenVPN) collect() (map[string]int64, error) { + var err error + + if err := o.client.Connect(); err != nil { + return nil, err + } + defer func() { _ = o.client.Disconnect() }() + + mx := make(map[string]int64) + + if err = o.collectLoadStats(mx); err != nil { + return nil, err + } + + if o.perUserMatcher != nil { + if err = o.collectUsers(mx); err != nil { + return nil, err + } + } + + return mx, nil +} + +func (o *OpenVPN) collectLoadStats(mx map[string]int64) error { + stats, err := o.client.LoadStats() + if err != nil { + return err + } + + mx["clients"] = stats.NumOfClients + mx["bytes_in"] = stats.BytesIn + mx["bytes_out"] = stats.BytesOut + return nil +} + +func (o *OpenVPN) collectUsers(mx map[string]int64) error { + users, err := o.client.Users() + if err != nil { + return err + } + + now := time.Now().Unix() + var name string + + for _, user := range users { + if user.Username == "UNDEF" { + name = user.CommonName + } else { + name = user.Username + } + + if !o.perUserMatcher.MatchString(name) { + continue + } + if !o.collectedUsers[name] { + o.collectedUsers[name] = true + if err := o.addUserCharts(name); err != nil { + o.Warning(err) + } + } + mx[name+"_bytes_received"] = user.BytesReceived + mx[name+"_bytes_sent"] = user.BytesSent + mx[name+"_connection_time"] = now - user.ConnectedSince + } + return nil +} + +func (o *OpenVPN) addUserCharts(userName string) error { + cs := userCharts.Copy() + + for _, chart := range *cs { + chart.ID = fmt.Sprintf(chart.ID, userName) + chart.Fam = fmt.Sprintf(chart.Fam, userName) + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, userName) + } + chart.MarkNotCreated() + } + return o.charts.Add(*cs...) +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/config_schema.json b/src/go/collectors/go.d.plugin/modules/openvpn/config_schema.json new file mode 100644 index 00000000000000..db6442db92d854 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/config_schema.json @@ -0,0 +1,52 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "connect_timeout": { + "type": [ + "string", + "integer" + ] + }, + "read_timeout": { + "type": [ + "string", + "integer" + ] + }, + "write_timeout": { + "type": [ + "string", + "integer" + ] + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/integrations/openvpn.md b/src/go/collectors/go.d.plugin/modules/openvpn/integrations/openvpn.md new file mode 100644 index 00000000000000..9b32bc277bd4a0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/integrations/openvpn.md @@ -0,0 +1,225 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/openvpn/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/openvpn/metadata.yaml" +sidebar_label: "OpenVPN" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenVPN + + +<img src="https://netdata.cloud/img/openvpn.svg" width="150"/> + + +Plugin: go.d.plugin +Module: openvpn + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors OpenVPN servers. + +It uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per OpenVPN instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openvpn.active_clients | clients | clients | +| openvpn.total_traffic | in, out | kilobits/s | + +### Per user + +These metrics refer to the VPN user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| username | VPN username | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openvpn.user_traffic | in, out | kilobits/s | +| openvpn.user_connection_time | time | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable in go.d.conf. + +This collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf). + +From the documentation for the OpenVPN Management Interface: +> Currently, the OpenVPN daemon can at most support a single management client any one time. + +It is disabled to not break other tools which use `Management Interface`. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/openvpn.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/openvpn.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes | +| per_user_stats | User selector. Determines which user metrics will be collected. | | no | +| connect_timeout | Connection timeout in seconds. The timeout includes name resolution, if required. | 2 | no | +| read_timeout | Read timeout in seconds. Sets deadline for read calls. | 2 | no | +| write_timeout | Write timeout in seconds. Sets deadline for write calls. | 2 | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:7505 + +``` +</details> + +##### With user metrics + +Collect metrics of all users. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:7505 + per_user_stats: + includes: + - "* *" + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:7505 + + - name: remote + address: 203.0.113.0:7505 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m openvpn + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/metadata.yaml b/src/go/collectors/go.d.plugin/modules/openvpn/metadata.yaml new file mode 100644 index 00000000000000..9d3e2e330deb4d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/metadata.yaml @@ -0,0 +1,185 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-openvpn + plugin_name: go.d.plugin + module_name: openvpn + monitored_instance: + name: OpenVPN + link: https://openvpn.net/ + icon_filename: openvpn.svg + categories: + - data-collection.vpns + keywords: + - openvpn + - vpn + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors OpenVPN servers. + + It uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable in go.d.conf. + description: | + This collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf). + + From the documentation for the OpenVPN Management Interface: + > Currently, the OpenVPN daemon can at most support a single management client any one time. + + It is disabled to not break other tools which use `Management Interface`. + configuration: + file: + name: go.d/openvpn.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address in IP:PORT format. + default_value: 127.0.0.1:7505 + required: true + - name: per_user_stats + description: User selector. Determines which user metrics will be collected. + default_value: "" + required: false + details: | + Metrics of users matching the selector will be collected. + + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + - Syntax: + + ```yaml + per_user_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + - name: connect_timeout + description: Connection timeout in seconds. The timeout includes name resolution, if required. + default_value: 2 + required: false + - name: read_timeout + description: Read timeout in seconds. Sets deadline for read calls. + default_value: 2 + required: false + - name: write_timeout + description: Write timeout in seconds. Sets deadline for write calls. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:7505 + - name: With user metrics + description: Collect metrics of all users. + config: | + jobs: + - name: local + address: 127.0.0.1:7505 + per_user_stats: + includes: + - "* *" + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:7505 + + - name: remote + address: 203.0.113.0:7505 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: openvpn.active_clients + description: Total Number Of Active Clients + unit: clients + chart_type: line + dimensions: + - name: clients + - name: openvpn.total_traffic + description: Total Traffic + unit: kilobits/s + chart_type: area + dimensions: + - name: in + - name: out + - name: user + description: These metrics refer to the VPN user. + labels: + - name: username + description: VPN username + metrics: + - name: openvpn.user_traffic + description: User Traffic + unit: kilobits/s + chart_type: area + dimensions: + - name: in + - name: out + - name: openvpn.user_connection_time + description: User Connection Time + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/openvpn.go b/src/go/collectors/go.d.plugin/modules/openvpn/openvpn.go new file mode 100644 index 00000000000000..0a6ccbb81b4c4c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/openvpn.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/modules/openvpn/client" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + defaultAddress = "127.0.0.1:7505" + defaultConnectTimeout = time.Second * 2 + defaultReadTimeout = time.Second * 2 + defaultWriteTimeout = time.Second * 2 +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("openvpn", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + Disabled: true, + }, + Create: func() module.Module { return New() }, + }) +} + +// New creates OpenVPN with default values. +func New() *OpenVPN { + config := Config{ + Address: defaultAddress, + ConnectTimeout: web.Duration{Duration: defaultConnectTimeout}, + ReadTimeout: web.Duration{Duration: defaultReadTimeout}, + WriteTimeout: web.Duration{Duration: defaultWriteTimeout}, + } + return &OpenVPN{ + Config: config, + charts: charts.Copy(), + collectedUsers: make(map[string]bool), + } +} + +// Config is the OpenVPN module configuration. +type Config struct { + Address string + ConnectTimeout web.Duration `yaml:"connect_timeout"` + ReadTimeout web.Duration `yaml:"read_timeout"` + WriteTimeout web.Duration `yaml:"write_timeout"` + PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"` +} + +type openVPNClient interface { + socket.Client + Version() (*client.Version, error) + LoadStats() (*client.LoadStats, error) + Users() (client.Users, error) +} + +// OpenVPN OpenVPN module. +type OpenVPN struct { + module.Base + Config `yaml:",inline"` + client openVPNClient + charts *Charts + collectedUsers map[string]bool + perUserMatcher matcher.Matcher +} + +// Cleanup makes cleanup. +func (o *OpenVPN) Cleanup() { + if o.client == nil { + return + } + _ = o.client.Disconnect() +} + +// Init makes initialization. +func (o *OpenVPN) Init() bool { + if !o.PerUserStats.Empty() { + m, err := o.PerUserStats.Parse() + if err != nil { + o.Errorf("error on creating per user stats matcher : %v", err) + return false + } + o.perUserMatcher = matcher.WithCache(m) + } + + config := socket.Config{ + Address: o.Address, + ConnectTimeout: o.ConnectTimeout.Duration, + ReadTimeout: o.ReadTimeout.Duration, + WriteTimeout: o.WriteTimeout.Duration, + } + o.client = &client.Client{Client: socket.New(config)} + + o.Infof("using address: %s, connect timeout: %s, read timeout: %s, write timeout: %s", + o.Address, o.ConnectTimeout.Duration, o.ReadTimeout.Duration, o.WriteTimeout.Duration) + + return true +} + +// Check makes check. +func (o *OpenVPN) Check() bool { + if err := o.client.Connect(); err != nil { + o.Error(err) + return false + } + defer func() { _ = o.client.Disconnect() }() + + ver, err := o.client.Version() + if err != nil { + o.Error(err) + o.Cleanup() + return false + } + + o.Infof("connected to OpenVPN v%d.%d.%d, Management v%d", ver.Major, ver.Minor, ver.Patch, ver.Management) + return true +} + +// Charts creates Charts. +func (o OpenVPN) Charts() *Charts { return o.charts } + +// Collect collects metrics. +func (o *OpenVPN) Collect() map[string]int64 { + mx, err := o.collect() + if err != nil { + o.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn/openvpn_test.go b/src/go/collectors/go.d.plugin/modules/openvpn/openvpn_test.go new file mode 100644 index 00000000000000..02fa1a602c3f70 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn/openvpn_test.go @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import ( + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/modules/openvpn/client" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testVersion = client.Version{Major: 1, Minor: 1, Patch: 1, Management: 1} + testLoadStats = client.LoadStats{NumOfClients: 1, BytesIn: 1, BytesOut: 1} + testUsers = client.Users{{ + CommonName: "common_name", + RealAddress: "1.2.3.4:4321", + VirtualAddress: "1.2.3.4", + BytesReceived: 1, + BytesSent: 2, + ConnectedSince: 3, + Username: "name", + }} + testUsersUNDEF = client.Users{{ + CommonName: "common_name", + RealAddress: "1.2.3.4:4321", + VirtualAddress: "1.2.3.4", + BytesReceived: 1, + BytesSent: 2, + ConnectedSince: 3, + Username: "UNDEF", + }} +) + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultAddress, job.Address) + assert.Equal(t, defaultConnectTimeout, job.ConnectTimeout.Duration) + assert.Equal(t, defaultReadTimeout, job.ReadTimeout.Duration) + assert.Equal(t, defaultWriteTimeout, job.WriteTimeout.Duration) + assert.NotNil(t, job.charts) + assert.NotNil(t, job.collectedUsers) +} + +func TestOpenVPN_Init(t *testing.T) { + assert.True(t, New().Init()) +} + +func TestOpenVPN_Check(t *testing.T) { + job := New() + + require.True(t, job.Init()) + job.client = prepareMockOpenVPNClient() + require.True(t, job.Check()) +} + +func TestOpenVPN_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestOpenVPN_Cleanup(t *testing.T) { + job := New() + + assert.NotPanics(t, job.Cleanup) + require.True(t, job.Init()) + job.client = prepareMockOpenVPNClient() + require.True(t, job.Check()) + job.Cleanup() +} + +func TestOpenVPN_Collect(t *testing.T) { + job := New() + + require.True(t, job.Init()) + job.perUserMatcher = matcher.TRUE() + job.client = prepareMockOpenVPNClient() + require.True(t, job.Check()) + + expected := map[string]int64{ + "bytes_in": 1, + "bytes_out": 1, + "clients": 1, + "name_bytes_received": 1, + "name_bytes_sent": 2, + } + + mx := job.Collect() + require.NotNil(t, mx) + delete(mx, "name_connection_time") + assert.Equal(t, expected, mx) +} + +func TestOpenVPN_Collect_UNDEFUsername(t *testing.T) { + job := New() + + require.True(t, job.Init()) + job.perUserMatcher = matcher.TRUE() + cl := prepareMockOpenVPNClient() + cl.users = testUsersUNDEF + job.client = cl + require.True(t, job.Check()) + + expected := map[string]int64{ + "bytes_in": 1, + "bytes_out": 1, + "clients": 1, + "common_name_bytes_received": 1, + "common_name_bytes_sent": 2, + } + + mx := job.Collect() + require.NotNil(t, mx) + delete(mx, "common_name_connection_time") + assert.Equal(t, expected, mx) +} + +func prepareMockOpenVPNClient() *mockOpenVPNClient { + return &mockOpenVPNClient{ + version: testVersion, + loadStats: testLoadStats, + users: testUsers, + } +} + +type mockOpenVPNClient struct { + version client.Version + loadStats client.LoadStats + users client.Users +} + +func (m *mockOpenVPNClient) Connect() error { return nil } +func (m *mockOpenVPNClient) Disconnect() error { return nil } +func (m mockOpenVPNClient) Version() (*client.Version, error) { return &m.version, nil } +func (m mockOpenVPNClient) LoadStats() (*client.LoadStats, error) { return &m.loadStats, nil } +func (m mockOpenVPNClient) Users() (client.Users, error) { return m.users, nil } +func (m *mockOpenVPNClient) Command(_ string, _ socket.Processor) error { + // mocks are done on the individual commands. e.g. in Version() below + panic("should be called in the mock") +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/README.md b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/README.md new file mode 120000 index 00000000000000..603c8249beee6d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/README.md @@ -0,0 +1 @@ +integrations/openvpn_status_log.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/charts.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/charts.go new file mode 100644 index 00000000000000..da7d58aaf8f4f2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/charts.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +var charts = module.Charts{ + { + ID: "active_clients", + Title: "Active Clients", + Units: "active clients", + Fam: "active_clients", + Ctx: "openvpn.active_clients", + Dims: module.Dims{ + {ID: "clients"}, + }, + }, + { + ID: "traffic", + Title: "Traffic", + Units: "kilobits/s", + Fam: "traffic", + Ctx: "openvpn.total_traffic", + Type: module.Area, + Dims: module.Dims{ + {ID: "bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000}, + }, + }, +} + +var userCharts = module.Charts{ + { + ID: "%s_user_traffic", + Title: "User Traffic", + Units: "kilobits/s", + Fam: "user stats", + Ctx: "openvpn.user_traffic", + Type: module.Area, + Dims: module.Dims{ + {ID: "%s_bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "%s_bytes_out", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000}, + }, + }, + { + ID: "%s_user_connection_time", + Title: "User Connection Time", + Units: "seconds", + Fam: "user stats", + Ctx: "openvpn.user_connection_time", + Dims: module.Dims{ + {ID: "%s_connection_time", Name: "time"}, + }, + }, +} + +func (o *OpenVPNStatusLog) addUserCharts(userName string) error { + cs := userCharts.Copy() + + for _, chart := range *cs { + chart.ID = fmt.Sprintf(chart.ID, userName) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, userName) + } + chart.MarkNotCreated() + } + return o.charts.Add(*cs...) +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/collect.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/collect.go new file mode 100644 index 00000000000000..f6a442fd53c30c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/collect.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + "time" +) + +func (o *OpenVPNStatusLog) collect() (map[string]int64, error) { + clients, err := parse(o.LogPath) + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + collectTotalStats(mx, clients) + + if o.perUserMatcher != nil && numOfClients(clients) > 0 { + o.collectUsers(mx, clients) + } + + return mx, nil +} + +func collectTotalStats(mx map[string]int64, clients []clientInfo) { + var in, out int64 + for _, c := range clients { + in += c.bytesReceived + out += c.bytesSent + } + mx["clients"] = numOfClients(clients) + mx["bytes_in"] = in + mx["bytes_out"] = out +} + +func (o *OpenVPNStatusLog) collectUsers(mx map[string]int64, clients []clientInfo) { + now := time.Now().Unix() + + for _, user := range clients { + name := user.commonName + if !o.perUserMatcher.MatchString(name) { + continue + } + if !o.collectedUsers[name] { + o.collectedUsers[name] = true + if err := o.addUserCharts(name); err != nil { + o.Warning(err) + } + } + mx[name+"_bytes_in"] = user.bytesReceived + mx[name+"_bytes_out"] = user.bytesSent + mx[name+"_connection_time"] = now - user.connectedSince + } +} + +func numOfClients(clients []clientInfo) int64 { + var num int64 + for _, v := range clients { + if v.commonName != "" && v.commonName != "UNDEF" { + num++ + } + } + return num +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/config_schema.json b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/config_schema.json new file mode 100644 index 00000000000000..904da56c0eb327 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/config_schema.json @@ -0,0 +1,34 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn_status_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "log_path": { + "type": "string" + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "required": [ + "name", + "log_path" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/init.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/init.go new file mode 100644 index 00000000000000..9bd34a5105ddd7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/init.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (o OpenVPNStatusLog) validateConfig() error { + if o.LogPath == "" { + return errors.New("empty 'log_path'") + } + return nil +} + +func (o OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) { + if o.PerUserStats.Empty() { + return nil, nil + } + m, err := o.PerUserStats.Parse() + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/integrations/openvpn_status_log.md b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/integrations/openvpn_status_log.md new file mode 100644 index 00000000000000..e7cb58658c831b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/integrations/openvpn_status_log.md @@ -0,0 +1,178 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/openvpn_status_log/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/openvpn_status_log/metadata.yaml" +sidebar_label: "OpenVPN status log" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenVPN status log + + +<img src="https://netdata.cloud/img/openvpn.svg" width="150"/> + + +Plugin: go.d.plugin +Module: openvpn_status_log + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors OpenVPN server. + +It parses server log files and provides summary and per user metrics. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per OpenVPN status log instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openvpn.active_clients | clients | clients | +| openvpn.total_traffic | in, out | kilobits/s | + +### Per user + +These metrics refer to the VPN user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| username | VPN username | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| openvpn.user_traffic | in, out | kilobits/s | +| openvpn.user_connection_time | time | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/openvpn_status_log.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/openvpn_status_log.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| log_path | Path to status log. | /var/log/openvpn/status.log | yes | +| per_user_stats | User selector. Determines which user metrics will be collected. | | no | + +</details> + +#### Examples + +##### With user metrics + +Collect metrics of all users. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + per_user_stats: + includes: + - "* *" + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m openvpn_status_log + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/metadata.yaml b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/metadata.yaml new file mode 100644 index 00000000000000..868c3592701f9d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/metadata.yaml @@ -0,0 +1,144 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-openvpn_status_log + plugin_name: go.d.plugin + module_name: openvpn_status_log + monitored_instance: + name: OpenVPN status log + link: https://openvpn.net/ + icon_filename: openvpn.svg + categories: + - data-collection.vpns + keywords: + - openvpn + - vpn + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors OpenVPN server. + + It parses server log files and provides summary and per user metrics. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/openvpn_status_log.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: log_path + description: Path to status log. + default_value: /var/log/openvpn/status.log + required: true + - name: per_user_stats + description: User selector. Determines which user metrics will be collected. + default_value: "" + required: false + details: | + Metrics of users matching the selector will be collected. + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + - Syntax: + ```yaml + per_user_stats: + includes: + - pattern1 + - pattern2 + excludes: + - pattern3 + - pattern4 + ``` + examples: + folding: + title: Config + enabled: true + list: + - name: With user metrics + description: Collect metrics of all users. + config: | + jobs: + - name: local + per_user_stats: + includes: + - "* *" + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: openvpn.active_clients + description: Total Number Of Active Clients + unit: clients + chart_type: line + dimensions: + - name: clients + - name: openvpn.total_traffic + description: Total Traffic + unit: kilobits/s + chart_type: area + dimensions: + - name: in + - name: out + - name: user + description: These metrics refer to the VPN user. + labels: + - name: username + description: VPN username + metrics: + - name: openvpn.user_traffic + description: User Traffic + unit: kilobits/s + chart_type: area + dimensions: + - name: in + - name: out + - name: openvpn.user_connection_time + description: User Connection Time + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn.go new file mode 100644 index 00000000000000..dc9e7340b3f97a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + _ "embed" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("openvpn_status_log", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *OpenVPNStatusLog { + config := Config{ + LogPath: "/var/log/openvpn/status.log", + } + return &OpenVPNStatusLog{ + Config: config, + charts: charts.Copy(), + collectedUsers: make(map[string]bool), + } +} + +type Config struct { + LogPath string `yaml:"log_path"` + PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"` +} + +type OpenVPNStatusLog struct { + module.Base + + Config `yaml:",inline"` + + charts *module.Charts + + collectedUsers map[string]bool + perUserMatcher matcher.Matcher +} + +func (o *OpenVPNStatusLog) Init() bool { + if err := o.validateConfig(); err != nil { + o.Errorf("error on validating config: %v", err) + return false + } + + m, err := o.initPerUserStatsMatcher() + if err != nil { + o.Errorf("error on creating 'per_user_stats' matcher: %v", err) + return false + } + + if m != nil { + o.perUserMatcher = m + } + + return true +} + +func (o *OpenVPNStatusLog) Check() bool { + return len(o.Collect()) > 0 +} + +func (o OpenVPNStatusLog) Charts() *module.Charts { + return o.charts +} + +func (o *OpenVPNStatusLog) Collect() map[string]int64 { + mx, err := o.collect() + if err != nil { + o.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (o *OpenVPNStatusLog) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn_test.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn_test.go new file mode 100644 index 00000000000000..d54d278248f94f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/openvpn_test.go @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +const ( + pathNonExistentFile = "testdata/v2.5.1/non-existent.txt" + pathEmptyFile = "testdata/v2.5.1/empty.txt" + pathStaticKey = "testdata/v2.5.1/static-key.txt" + pathStatusVersion1 = "testdata/v2.5.1/version1.txt" + pathStatusVersion1NoClients = "testdata/v2.5.1/version1-no-clients.txt" + pathStatusVersion2 = "testdata/v2.5.1/version2.txt" + pathStatusVersion2NoClients = "testdata/v2.5.1/version2-no-clients.txt" + pathStatusVersion3 = "testdata/v2.5.1/version3.txt" + pathStatusVersion3NoClients = "testdata/v2.5.1/version3-no-clients.txt" +) + +func TestNew(t *testing.T) { +} + +func TestOpenVPNStatusLog_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default config": { + config: New().Config, + }, + "unset 'log_path'": { + wantFail: true, + config: Config{ + LogPath: "", + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ovpn := New() + ovpn.Config = test.config + + if test.wantFail { + assert.False(t, ovpn.Init()) + } else { + assert.True(t, ovpn.Init()) + } + }) + } +} + +func TestOpenVPNStatusLog_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *OpenVPNStatusLog + wantFail bool + }{ + "status version 1": {prepare: prepareCaseStatusVersion1}, + "status version 1 with no clients": {prepare: prepareCaseStatusVersion1NoClients}, + "status version 2": {prepare: prepareCaseStatusVersion2}, + "status version 2 with no clients": {prepare: prepareCaseStatusVersion2NoClients}, + "status version 3": {prepare: prepareCaseStatusVersion3}, + "status version 3 with no clients": {prepare: prepareCaseStatusVersion3NoClients}, + "empty file": {prepare: prepareCaseEmptyFile, wantFail: true}, + "non-existent file": {prepare: prepareCaseNonExistentFile, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ovpn := test.prepare() + + require.True(t, ovpn.Init()) + + if test.wantFail { + assert.False(t, ovpn.Check()) + } else { + assert.True(t, ovpn.Check()) + } + }) + } +} + +func TestOpenVPNStatusLog_Charts(t *testing.T) { + tests := map[string]struct { + prepare func() *OpenVPNStatusLog + wantNumCharts int + }{ + "status version 1 with user stats": { + prepare: prepareCaseStatusVersion1WithUserStats, + wantNumCharts: len(charts) + len(userCharts)*2, + }, + "status version 2 with user stats": { + prepare: prepareCaseStatusVersion2WithUserStats, + wantNumCharts: len(charts) + len(userCharts)*2, + }, + "status version 3 with user stats": { + prepare: prepareCaseStatusVersion2WithUserStats, + wantNumCharts: len(charts) + len(userCharts)*2, + }, + "status version with static key": { + prepare: prepareCaseStatusStaticKey, + wantNumCharts: len(charts), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ovpn := test.prepare() + + require.True(t, ovpn.Init()) + _ = ovpn.Check() + _ = ovpn.Collect() + + assert.Equal(t, test.wantNumCharts, len(*ovpn.Charts())) + }) + } +} + +func TestOpenVPNStatusLog_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *OpenVPNStatusLog + expected map[string]int64 + }{ + "status version 1": { + prepare: prepareCaseStatusVersion1, + expected: map[string]int64{ + "bytes_in": 6168, + "bytes_out": 6369, + "clients": 2, + }, + }, + "status version 1 with user stats": { + prepare: prepareCaseStatusVersion1WithUserStats, + expected: map[string]int64{ + "bytes_in": 6168, + "bytes_out": 6369, + "clients": 2, + "vpnclient2_bytes_in": 3084, + "vpnclient2_bytes_out": 3184, + "vpnclient2_connection_time": 63793143069, + "vpnclient_bytes_in": 3084, + "vpnclient_bytes_out": 3185, + "vpnclient_connection_time": 63793143069, + }, + }, + "status version 1 with no clients": { + prepare: prepareCaseStatusVersion1NoClients, + expected: map[string]int64{ + "bytes_in": 0, + "bytes_out": 0, + "clients": 0, + }, + }, + "status version 2": { + prepare: prepareCaseStatusVersion2, + expected: map[string]int64{ + "bytes_in": 6241, + "bytes_out": 6369, + "clients": 2, + }, + }, + "status version 2 with user stats": { + prepare: prepareCaseStatusVersion2WithUserStats, + expected: map[string]int64{ + "bytes_in": 6241, + "bytes_out": 6369, + "clients": 2, + "vpnclient2_bytes_in": 3157, + "vpnclient2_bytes_out": 3184, + "vpnclient2_connection_time": 264610, + "vpnclient_bytes_in": 3084, + "vpnclient_bytes_out": 3185, + "vpnclient_connection_time": 264609, + }, + }, + "status version 2 with no clients": { + prepare: prepareCaseStatusVersion2NoClients, + expected: map[string]int64{ + "bytes_in": 0, + "bytes_out": 0, + "clients": 0, + }, + }, + "status version 3": { + prepare: prepareCaseStatusVersion3, + expected: map[string]int64{ + "bytes_in": 7308, + "bytes_out": 7235, + "clients": 2, + }, + }, + "status version 3 with user stats": { + prepare: prepareCaseStatusVersion3WithUserStats, + expected: map[string]int64{ + "bytes_in": 7308, + "bytes_out": 7235, + "clients": 2, + "vpnclient2_bytes_in": 3654, + "vpnclient2_bytes_out": 3617, + "vpnclient2_connection_time": 265498, + "vpnclient_bytes_in": 3654, + "vpnclient_bytes_out": 3618, + "vpnclient_connection_time": 265496, + }, + }, + "status version 3 with no clients": { + prepare: prepareCaseStatusVersion3NoClients, + expected: map[string]int64{ + "bytes_in": 0, + "bytes_out": 0, + "clients": 0, + }, + }, + "status with static key": { + prepare: prepareCaseStatusStaticKey, + expected: map[string]int64{ + "bytes_in": 19265, + "bytes_out": 261631, + "clients": 0, + }, + }, + "empty file": { + prepare: prepareCaseEmptyFile, + expected: nil, + }, + "non-existent file": { + prepare: prepareCaseNonExistentFile, + expected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ovpn := test.prepare() + + require.True(t, ovpn.Init()) + _ = ovpn.Check() + + collected := ovpn.Collect() + + copyConnTime(collected, test.expected) + assert.Equal(t, test.expected, collected) + }) + } +} + +func prepareCaseStatusVersion1() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion1 + return ovpn +} + +func prepareCaseStatusVersion1WithUserStats() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion1 + ovpn.PerUserStats = matcher.SimpleExpr{ + Includes: []string{"* *"}, + } + return ovpn +} + +func prepareCaseStatusVersion1NoClients() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion1NoClients + return ovpn +} + +func prepareCaseStatusVersion2() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion2 + return ovpn +} + +func prepareCaseStatusVersion2WithUserStats() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion2 + ovpn.PerUserStats = matcher.SimpleExpr{ + Includes: []string{"* *"}, + } + return ovpn +} + +func prepareCaseStatusVersion2NoClients() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion2NoClients + return ovpn +} + +func prepareCaseStatusVersion3() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion3 + return ovpn +} + +func prepareCaseStatusVersion3WithUserStats() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion3 + ovpn.PerUserStats = matcher.SimpleExpr{ + Includes: []string{"* *"}, + } + return ovpn +} + +func prepareCaseStatusVersion3NoClients() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStatusVersion3NoClients + return ovpn +} + +func prepareCaseStatusStaticKey() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathStaticKey + return ovpn +} + +func prepareCaseEmptyFile() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathEmptyFile + return ovpn +} + +func prepareCaseNonExistentFile() *OpenVPNStatusLog { + ovpn := New() + ovpn.LogPath = pathNonExistentFile + return ovpn +} + +func copyConnTime(dst, src map[string]int64) { + for k, v := range src { + if !strings.HasSuffix(k, "connection_time") { + continue + } + if _, ok := dst[k]; !ok { + continue + } + dst[k] = v + } +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/parser.go b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/parser.go new file mode 100644 index 00000000000000..c734fd5fb1a493 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/parser.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn_status_log + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + "time" +) + +type clientInfo struct { + commonName string + bytesReceived int64 + bytesSent int64 + connectedSince int64 +} + +func parse(path string) ([]clientInfo, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + sc := bufio.NewScanner(f) + _ = sc.Scan() + line := sc.Text() + + if line == "OpenVPN CLIENT LIST" { + return parseV1(sc), nil + } + if strings.HasPrefix(line, "TITLE,OpenVPN") || strings.HasPrefix(line, "TITLE\tOpenVPN") { + return parseV2V3(sc), nil + } + if line == "OpenVPN STATISTICS" { + return parseStaticKey(sc), nil + } + return nil, fmt.Errorf("the status log file is invalid (%s)", path) +} + +func parseV1(sc *bufio.Scanner) []clientInfo { + // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/multi.c#L836 + var clients []clientInfo + + for sc.Scan() { + if !strings.HasPrefix(sc.Text(), "Common Name") { + continue + } + for sc.Scan() && !strings.HasPrefix(sc.Text(), "ROUTING TABLE") { + parts := strings.Split(sc.Text(), ",") + if len(parts) != 5 { + continue + } + + name := parts[0] + bytesRx, _ := strconv.ParseInt(parts[2], 10, 64) + bytesTx, _ := strconv.ParseInt(parts[3], 10, 64) + connSince, _ := time.Parse("Mon Jan 2 15:04:05 2006", parts[4]) + + clients = append(clients, clientInfo{ + commonName: name, + bytesReceived: bytesRx, + bytesSent: bytesTx, + connectedSince: connSince.Unix(), + }) + } + break + } + return clients +} + +func parseV2V3(sc *bufio.Scanner) []clientInfo { + // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/multi.c#L901 + var clients []clientInfo + var sep string + if strings.IndexByte(sc.Text(), '\t') != -1 { + sep = "\t" + } else { + sep = "," + } + + for sc.Scan() { + line := sc.Text() + if !strings.HasPrefix(line, "CLIENT_LIST") { + continue + } + parts := strings.Split(line, sep) + if len(parts) != 13 { + continue + } + + name := parts[1] + bytesRx, _ := strconv.ParseInt(parts[5], 10, 64) + bytesTx, _ := strconv.ParseInt(parts[6], 10, 64) + connSince, _ := strconv.ParseInt(parts[8], 10, 64) + + clients = append(clients, clientInfo{ + commonName: name, + bytesReceived: bytesRx, + bytesSent: bytesTx, + connectedSince: connSince, + }) + } + return clients +} + +func parseStaticKey(sc *bufio.Scanner) []clientInfo { + // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/sig.c#L283 + var info clientInfo + for sc.Scan() { + line := sc.Text() + if !strings.HasPrefix(line, "TCP/UDP") { + continue + } + i := strings.IndexByte(line, ',') + if i == -1 || len(line) == i { + continue + } + bytes, _ := strconv.ParseInt(line[i+1:], 10, 64) + switch line[:i] { + case "TCP/UDP read bytes": + info.bytesReceived += bytes + case "TCP/UDP write bytes": + info.bytesSent += bytes + } + } + return []clientInfo{info} +} diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/empty.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/empty.txt new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt new file mode 100644 index 00000000000000..64b691fcd1f38d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt @@ -0,0 +1,8 @@ +OpenVPN STATISTICS +Updated,2022-05-05 12:35:47 +TUN/TAP read bytes,123 +TUN/TAP write bytes,1155 +TCP/UDP read bytes,19265 +TCP/UDP write bytes,261631 +Auth read bytes,0 +END diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt new file mode 100644 index 00000000000000..34d7a748f9ac3f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt @@ -0,0 +1,8 @@ +OpenVPN CLIENT LIST +Updated,2022-07-08 15:05:57 +Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since +ROUTING TABLE +Virtual Address,Common Name,Real Address,Last Ref +GLOBAL STATS +Max bcast/mcast queue length,0 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1.txt new file mode 100644 index 00000000000000..0d2f33ba557718 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version1.txt @@ -0,0 +1,12 @@ +OpenVPN CLIENT LIST +Updated,2022-07-08 15:14:45 +Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since +vpnclient,10.10.10.107:46195,3084,3185,2022-07-08 15:14:42 +vpnclient2,10.10.10.50:51275,3084,3184,2022-07-08 15:14:41 +ROUTING TABLE +Virtual Address,Common Name,Real Address,Last Ref +10.8.0.10,vpnclient,10.10.10.107:46195,2022-07-08 15:14:42 +10.8.0.6,vpnclient2,10.10.10.50:51275,2022-07-08 15:14:41 +GLOBAL STATS +Max bcast/mcast queue length,0 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt new file mode 100644 index 00000000000000..6d1ea1e3252eef --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt @@ -0,0 +1,6 @@ +TITLE,OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021 +TIME,2022-07-08 15:04:54,1657281894 +HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Virtual IPv6 Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username,Client ID,Peer ID,Data Channel Cipher +HEADER,ROUTING_TABLE,Virtual Address,Common Name,Real Address,Last Ref,Last Ref (time_t) +GLOBAL_STATS,Max bcast/mcast queue length,0 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2.txt new file mode 100644 index 00000000000000..d0f4ac8e393500 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version2.txt @@ -0,0 +1,10 @@ +TITLE,OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021 +TIME,2022-07-08 15:05:14,1657281914 +HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Virtual IPv6 Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username,Client ID,Peer ID,Data Channel Cipher +CLIENT_LIST,vpnclient2,10.10.10.50:38535,10.8.0.6,,3157,3184,2022-07-08 15:05:09,1657281909,UNDEF,0,0,AES-256-GCM +CLIENT_LIST,vpnclient,10.10.10.107:50026,10.8.0.10,,3084,3185,2022-07-08 15:05:10,1657281910,UNDEF,1,1,AES-256-GCM +HEADER,ROUTING_TABLE,Virtual Address,Common Name,Real Address,Last Ref,Last Ref (time_t) +ROUTING_TABLE,10.8.0.6,vpnclient2,10.10.10.50:38535,2022-07-08 15:05:09,1657281909 +ROUTING_TABLE,10.8.0.10,vpnclient,10.10.10.107:50026,2022-07-08 15:05:10,1657281910 +GLOBAL_STATS,Max bcast/mcast queue length,0 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt new file mode 100644 index 00000000000000..6ab671f200d6d6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt @@ -0,0 +1,6 @@ +TITLE OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021 +TIME 2022-07-08 15:02:27 1657281747 +HEADER CLIENT_LIST Common Name Real Address Virtual Address Virtual IPv6 Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username Client ID Peer ID Data Channel Cipher +HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t) +GLOBAL_STATS Max bcast/mcast queue length 2 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3.txt b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3.txt new file mode 100644 index 00000000000000..7d732042e4454b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/openvpn_status_log/testdata/v2.5.1/version3.txt @@ -0,0 +1,10 @@ +TITLE OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021 +TIME 2022-07-08 14:53:40 1657281220 +HEADER CLIENT_LIST Common Name Real Address Virtual Address Virtual IPv6 Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username Client ID Peer ID Data Channel Cipher +CLIENT_LIST vpnclient2 10.10.10.50:53856 10.8.0.6 3654 3617 2022-07-08 14:50:56 1657281056 UNDEF 0 0 AES-256-GCM +CLIENT_LIST vpnclient 10.10.10.107:42132 10.8.0.10 3654 3618 2022-07-08 14:50:58 1657281058 UNDEF 1 1 AES-256-GCM +HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t) +ROUTING_TABLE 10.8.0.6 vpnclient2 10.10.10.50:53856 2022-07-08 14:50:56 1657281056 +ROUTING_TABLE 10.8.0.10 vpnclient 10.10.10.107:42132 2022-07-08 14:50:58 1657281058 +GLOBAL_STATS Max bcast/mcast queue length 2 +END \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/README.md b/src/go/collectors/go.d.plugin/modules/pgbouncer/README.md new file mode 120000 index 00000000000000..3bfcaba0b332ee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/README.md @@ -0,0 +1 @@ +integrations/pgbouncer.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/charts.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/charts.go new file mode 100644 index 00000000000000..6a4277244d2263 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/charts.go @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioClientConnectionsUtilization = module.Priority + iota + prioDBClientConnections + prioDBServerConnections + prioDBServerConnectionsUtilization + prioDBClientsWaitTime + prioDBClientsWaitMaxTime + prioDBTransactions + prioDBTransactionsTime + prioDBTransactionsAvgTime + prioDBQueries + prioDBQueriesTime + prioDBQueryAvgTime + prioDBNetworkIO +) + +var ( + globalCharts = module.Charts{ + clientConnectionsUtilization.Copy(), + } + + clientConnectionsUtilization = module.Chart{ + ID: "client_connections_utilization", + Title: "Client connections utilization", + Units: "percentage", + Fam: "client connections", + Ctx: "pgbouncer.client_connections_utilization", + Priority: prioClientConnectionsUtilization, + Dims: module.Dims{ + {ID: "cl_conns_utilization", Name: "used"}, + }, + } +) + +var ( + dbChartsTmpl = module.Charts{ + dbClientConnectionsTmpl.Copy(), + + dbServerConnectionsUtilizationTmpl.Copy(), + dbServerConnectionsTmpl.Copy(), + + dbClientsWaitTimeChartTmpl.Copy(), + dbClientMaxWaitTimeChartTmpl.Copy(), + + dbTransactionsChartTmpl.Copy(), + dbTransactionsTimeChartTmpl.Copy(), + dbTransactionAvgTimeChartTmpl.Copy(), + + dbQueriesChartTmpl.Copy(), + dbQueriesTimeChartTmpl.Copy(), + dbQueryAvgTimeChartTmpl.Copy(), + + dbNetworkIOChartTmpl.Copy(), + } + + dbClientConnectionsTmpl = module.Chart{ + ID: "db_%s_client_connections", + Title: "Database client connections", + Units: "connections", + Fam: "client connections", + Ctx: "pgbouncer.db_client_connections", + Priority: prioDBClientConnections, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "db_%s_cl_active", Name: "active"}, + {ID: "db_%s_cl_waiting", Name: "waiting"}, + {ID: "db_%s_cl_cancel_req", Name: "cancel_req"}, + }, + } + + dbServerConnectionsTmpl = module.Chart{ + ID: "db_%s_server_connections", + Title: "Database server connections", + Units: "connections", + Fam: "server connections", + Ctx: "pgbouncer.db_server_connections", + Priority: prioDBServerConnections, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "db_%s_sv_active", Name: "active"}, + {ID: "db_%s_sv_idle", Name: "idle"}, + {ID: "db_%s_sv_used", Name: "used"}, + {ID: "db_%s_sv_tested", Name: "tested"}, + {ID: "db_%s_sv_login", Name: "login"}, + }, + } + + dbServerConnectionsUtilizationTmpl = module.Chart{ + ID: "db_%s_server_connections_utilization", + Title: "Database server connections utilization", + Units: "percentage", + Fam: "server connections limit", + Ctx: "pgbouncer.db_server_connections_utilization", + Priority: prioDBServerConnectionsUtilization, + Dims: module.Dims{ + {ID: "db_%s_sv_conns_utilization", Name: "used"}, + }, + } + + dbClientsWaitTimeChartTmpl = module.Chart{ + ID: "db_%s_clients_wait_time", + Title: "Database clients wait time", + Units: "seconds", + Fam: "clients wait time", + Ctx: "pgbouncer.db_clients_wait_time", + Priority: prioDBClientsWaitTime, + Dims: module.Dims{ + {ID: "db_%s_total_wait_time", Name: "time", Algo: module.Incremental, Div: 1e6}, + }, + } + dbClientMaxWaitTimeChartTmpl = module.Chart{ + ID: "db_%s_client_max_wait_time", + Title: "Database client max wait time", + Units: "seconds", + Fam: "client max wait time", + Ctx: "pgbouncer.db_client_max_wait_time", + Priority: prioDBClientsWaitMaxTime, + Dims: module.Dims{ + {ID: "db_%s_maxwait", Name: "time", Div: 1e6}, + }, + } + + dbTransactionsChartTmpl = module.Chart{ + ID: "db_%s_transactions", + Title: "Database pooled SQL transactions", + Units: "transactions/s", + Fam: "transactions", + Ctx: "pgbouncer.db_transactions", + Priority: prioDBTransactions, + Dims: module.Dims{ + {ID: "db_%s_total_xact_count", Name: "transactions", Algo: module.Incremental}, + }, + } + dbTransactionsTimeChartTmpl = module.Chart{ + ID: "db_%s_transactions_time", + Title: "Database transactions time", + Units: "seconds", + Fam: "transactions time", + Ctx: "pgbouncer.db_transactions_time", + Priority: prioDBTransactionsTime, + Dims: module.Dims{ + {ID: "db_%s_total_xact_time", Name: "time", Algo: module.Incremental, Div: 1e6}, + }, + } + dbTransactionAvgTimeChartTmpl = module.Chart{ + ID: "db_%s_transactions_average_time", + Title: "Database transaction average time", + Units: "seconds", + Fam: "transaction avg time", + Ctx: "pgbouncer.db_transaction_avg_time", + Priority: prioDBTransactionsAvgTime, + Dims: module.Dims{ + {ID: "db_%s_avg_xact_time", Name: "time", Algo: module.Incremental, Div: 1e6}, + }, + } + + dbQueriesChartTmpl = module.Chart{ + ID: "db_%s_queries", + Title: "Database pooled SQL queries", + Units: "queries/s", + Fam: "queries", + Ctx: "pgbouncer.db_queries", + Priority: prioDBQueries, + Dims: module.Dims{ + {ID: "db_%s_total_query_count", Name: "queries", Algo: module.Incremental}, + }, + } + dbQueriesTimeChartTmpl = module.Chart{ + ID: "db_%s_queries_time", + Title: "Database queries time", + Units: "seconds", + Fam: "queries time", + Ctx: "pgbouncer.db_queries_time", + Priority: prioDBQueriesTime, + Dims: module.Dims{ + {ID: "db_%s_total_query_time", Name: "time", Algo: module.Incremental, Div: 1e6}, + }, + } + dbQueryAvgTimeChartTmpl = module.Chart{ + ID: "db_%s_query_average_time", + Title: "Database query average time", + Units: "seconds", + Fam: "query avg time", + Ctx: "pgbouncer.db_query_avg_time", + Priority: prioDBQueryAvgTime, + Dims: module.Dims{ + {ID: "db_%s_avg_query_time", Name: "time", Algo: module.Incremental, Div: 1e6}, + }, + } + + dbNetworkIOChartTmpl = module.Chart{ + ID: "db_%s_network_io", + Title: "Database traffic", + Units: "B/s", + Fam: "traffic", + Ctx: "pgbouncer.db_network_io", + Priority: prioDBNetworkIO, + Type: module.Area, + Dims: module.Dims{ + {ID: "db_%s_total_received", Name: "received", Algo: module.Incremental}, + {ID: "db_%s_total_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } +) + +func newDatabaseCharts(dbname, pgDBName string) *module.Charts { + charts := dbChartsTmpl.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, dbname) + c.Labels = []module.Label{ + {Key: "database", Value: dbname}, + {Key: "postgres_database", Value: pgDBName}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, dbname) + } + } + return charts +} + +func (p *PgBouncer) addNewDatabaseCharts(dbname, pgDBName string) { + charts := newDatabaseCharts(dbname, pgDBName) + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *PgBouncer) removeDatabaseCharts(dbname string) { + prefix := fmt.Sprintf("db_%s_", dbname) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/collect.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/collect.go new file mode 100644 index 00000000000000..40dbddb9f03a8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/collect.go @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/blang/semver/v4" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" +) + +// 'SHOW STATS;' response was changed significantly in v1.8.0 +// v1.8.0 was released in 2015 - no need to complicate the code to support the old version. +var minSupportedVersion = semver.Version{Major: 1, Minor: 8, Patch: 0} + +const ( + queryShowVersion = "SHOW VERSION;" + queryShowConfig = "SHOW CONFIG;" + queryShowDatabases = "SHOW DATABASES;" + queryShowStats = "SHOW STATS;" + queryShowPools = "SHOW POOLS;" +) + +func (p *PgBouncer) collect() (map[string]int64, error) { + if p.db == nil { + if err := p.openConnection(); err != nil { + return nil, err + } + } + if p.version == nil { + ver, err := p.queryVersion() + if err != nil { + return nil, err + } + p.Debugf("connected to PgBouncer v%s", ver) + if ver.LE(minSupportedVersion) { + return nil, fmt.Errorf("unsupported version: v%s, required v%s+", ver, minSupportedVersion) + } + p.version = ver + } + + now := time.Now() + if now.Sub(p.recheckSettingsTime) > p.recheckSettingsEvery { + v, err := p.queryMaxClientConn() + if err != nil { + return nil, err + } + p.maxClientConn = v + } + + // http://www.pgbouncer.org/usage.html + + p.resetMetrics() + + if err := p.collectDatabases(); err != nil { + return nil, err + } + if err := p.collectStats(); err != nil { + return nil, err + } + if err := p.collectPools(); err != nil { + return nil, err + } + + mx := make(map[string]int64) + p.collectMetrics(mx) + + return mx, nil +} + +func (p *PgBouncer) collectMetrics(mx map[string]int64) { + var clientConns int64 + for name, db := range p.metrics.dbs { + if !db.updated { + delete(p.metrics.dbs, name) + p.removeDatabaseCharts(name) + continue + } + if !db.hasCharts { + db.hasCharts = true + p.addNewDatabaseCharts(name, db.pgDBName) + } + + mx["db_"+name+"_total_xact_count"] = db.totalXactCount + mx["db_"+name+"_total_xact_time"] = db.totalXactTime + mx["db_"+name+"_avg_xact_time"] = db.avgXactTime + + mx["db_"+name+"_total_query_count"] = db.totalQueryCount + mx["db_"+name+"_total_query_time"] = db.totalQueryTime + mx["db_"+name+"_avg_query_time"] = db.avgQueryTime + + mx["db_"+name+"_total_wait_time"] = db.totalWaitTime + mx["db_"+name+"_maxwait"] = db.maxWait*1e6 + db.maxWaitUS + + mx["db_"+name+"_cl_active"] = db.clActive + mx["db_"+name+"_cl_waiting"] = db.clWaiting + mx["db_"+name+"_cl_cancel_req"] = db.clCancelReq + clientConns += db.clActive + db.clWaiting + db.clCancelReq + + mx["db_"+name+"_sv_active"] = db.svActive + mx["db_"+name+"_sv_idle"] = db.svIdle + mx["db_"+name+"_sv_used"] = db.svUsed + mx["db_"+name+"_sv_tested"] = db.svTested + mx["db_"+name+"_sv_login"] = db.svLogin + + mx["db_"+name+"_total_received"] = db.totalReceived + mx["db_"+name+"_total_sent"] = db.totalSent + + mx["db_"+name+"_sv_conns_utilization"] = calcPercentage(db.currentConnections, db.maxConnections) + } + + mx["cl_conns_utilization"] = calcPercentage(clientConns, p.maxClientConn) +} + +func (p *PgBouncer) collectDatabases() error { + q := queryShowDatabases + p.Debugf("executing query: %v", q) + + var db string + return p.collectQuery(q, func(column, value string) { + switch column { + case "name": + db = value + p.getDBMetrics(db).updated = true + case "database": + p.getDBMetrics(db).pgDBName = value + case "max_connections": + p.getDBMetrics(db).maxConnections = parseInt(value) + case "current_connections": + p.getDBMetrics(db).currentConnections = parseInt(value) + case "paused": + p.getDBMetrics(db).paused = parseInt(value) + case "disabled": + p.getDBMetrics(db).disabled = parseInt(value) + } + }) +} + +func (p *PgBouncer) collectStats() error { + q := queryShowStats + p.Debugf("executing query: %v", q) + + var db string + return p.collectQuery(q, func(column, value string) { + switch column { + case "database": + db = value + p.getDBMetrics(db).updated = true + case "total_xact_count": + p.getDBMetrics(db).totalXactCount = parseInt(value) + case "total_query_count": + p.getDBMetrics(db).totalQueryCount = parseInt(value) + case "total_received": + p.getDBMetrics(db).totalReceived = parseInt(value) + case "total_sent": + p.getDBMetrics(db).totalSent = parseInt(value) + case "total_xact_time": + p.getDBMetrics(db).totalXactTime = parseInt(value) + case "total_query_time": + p.getDBMetrics(db).totalQueryTime = parseInt(value) + case "total_wait_time": + p.getDBMetrics(db).totalWaitTime = parseInt(value) + case "avg_xact_time": + p.getDBMetrics(db).avgXactTime = parseInt(value) + case "avg_query_time": + p.getDBMetrics(db).avgQueryTime = parseInt(value) + } + }) +} + +func (p *PgBouncer) collectPools() error { + q := queryShowPools + p.Debugf("executing query: %v", q) + + // an entry is made for each couple of (database, user). + var db string + return p.collectQuery(q, func(column, value string) { + switch column { + case "database": + db = value + p.getDBMetrics(db).updated = true + case "cl_active": + p.getDBMetrics(db).clActive += parseInt(value) + case "cl_waiting": + p.getDBMetrics(db).clWaiting += parseInt(value) + case "cl_cancel_req": + p.getDBMetrics(db).clCancelReq += parseInt(value) + case "sv_active": + p.getDBMetrics(db).svActive += parseInt(value) + case "sv_idle": + p.getDBMetrics(db).svIdle += parseInt(value) + case "sv_used": + p.getDBMetrics(db).svUsed += parseInt(value) + case "sv_tested": + p.getDBMetrics(db).svTested += parseInt(value) + case "sv_login": + p.getDBMetrics(db).svLogin += parseInt(value) + case "maxwait": + p.getDBMetrics(db).maxWait += parseInt(value) + case "maxwait_us": + p.getDBMetrics(db).maxWaitUS += parseInt(value) + } + }) +} + +func (p *PgBouncer) queryMaxClientConn() (int64, error) { + q := queryShowConfig + p.Debugf("executing query: %v", q) + + var v int64 + var key string + err := p.collectQuery(q, func(column, value string) { + switch column { + case "key": + key = value + case "value": + if key == "max_client_conn" { + v = parseInt(value) + } + } + }) + return v, err +} + +var reVersion = regexp.MustCompile(`\d+\.\d+\.\d+`) + +func (p *PgBouncer) queryVersion() (*semver.Version, error) { + q := queryShowVersion + p.Debugf("executing query: %v", q) + + var resp string + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + if err := p.db.QueryRowContext(ctx, q).Scan(&resp); err != nil { + return nil, err + } + + if !strings.Contains(resp, "PgBouncer") { + return nil, fmt.Errorf("not PgBouncer instance: version response: %s", resp) + } + + ver := reVersion.FindString(resp) + if ver == "" { + return nil, fmt.Errorf("couldn't parse version string '%s' (expected pattern '%s')", resp, reVersion) + } + + v, err := semver.New(ver) + if err != nil { + return nil, fmt.Errorf("couldn't parse version string '%s': %v", ver, err) + } + + return v, nil +} + +func (p *PgBouncer) openConnection() error { + cfg, err := pgx.ParseConfig(p.DSN) + if err != nil { + return err + } + cfg.PreferSimpleProtocol = true + + db, err := sql.Open("pgx", stdlib.RegisterConnConfig(cfg)) + if err != nil { + return fmt.Errorf("error on opening a connection with the PgBouncer database [%s]: %v", p.DSN, err) + } + + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(10 * time.Minute) + + p.db = db + + return nil +} + +func (p *PgBouncer) collectQuery(query string, assign func(column, value string)) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + rows, err := p.db.QueryContext(ctx, query) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() + + columns, err := rows.Columns() + if err != nil { + return err + } + + values := makeNullStrings(len(columns)) + for rows.Next() { + if err := rows.Scan(values...); err != nil { + return err + } + for i, v := range values { + assign(columns[i], valueToString(v)) + } + } + return rows.Err() +} + +func (p *PgBouncer) getDBMetrics(dbname string) *dbMetrics { + db, ok := p.metrics.dbs[dbname] + if !ok { + db = &dbMetrics{name: dbname} + p.metrics.dbs[dbname] = db + } + return db +} + +func (p *PgBouncer) resetMetrics() { + for name, db := range p.metrics.dbs { + p.metrics.dbs[name] = &dbMetrics{ + name: db.name, + pgDBName: db.pgDBName, + hasCharts: db.hasCharts, + } + } +} + +func valueToString(value any) string { + v, ok := value.(*sql.NullString) + if !ok || !v.Valid { + return "" + } + return v.String +} + +func makeNullStrings(size int) []any { + vs := make([]any, size) + for i := range vs { + vs[i] = &sql.NullString{} + } + return vs +} + +func parseInt(s string) int64 { + v, _ := strconv.ParseInt(s, 10, 64) + return v +} + +func calcPercentage(value, total int64) int64 { + if total == 0 { + return 0 + } + return value * 100 / total +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/config_schema.json b/src/go/collectors/go.d.plugin/modules/pgbouncer/config_schema.json new file mode 100644 index 00000000000000..16cf22ecbd5ad1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/config_schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pgbouncer job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "dsn" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/init.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/init.go new file mode 100644 index 00000000000000..14633508577c3a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/init.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +import "errors" + +func (p *PgBouncer) validateConfig() error { + if p.DSN == "" { + return errors.New("DSN not set") + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/integrations/pgbouncer.md b/src/go/collectors/go.d.plugin/modules/pgbouncer/integrations/pgbouncer.md new file mode 100644 index 00000000000000..fceca13dfb4605 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/integrations/pgbouncer.md @@ -0,0 +1,254 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/pgbouncer/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/pgbouncer/metadata.yaml" +sidebar_label: "PgBouncer" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# PgBouncer + + +<img src="https://netdata.cloud/img/postgres.svg" width="150"/> + + +Plugin: go.d.plugin +Module: pgbouncer + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors PgBouncer servers. + +Executed queries: + +- `SHOW VERSION;` +- `SHOW CONFIG;` +- `SHOW DATABASES;` +- `SHOW STATS;` +- `SHOW POOLS;` + +Information about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per PgBouncer instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pgbouncer.client_connections_utilization | used | percentage | + +### Per database + +These metrics refer to the database. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| database | database name | +| postgres_database | Postgres database name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pgbouncer.db_client_connections | active, waiting, cancel_req | connections | +| pgbouncer.db_server_connections | active, idle, used, tested, login | connections | +| pgbouncer.db_server_connections_utilization | used | percentage | +| pgbouncer.db_clients_wait_time | time | seconds | +| pgbouncer.db_client_max_wait_time | time | seconds | +| pgbouncer.db_transactions | transactions | transactions/s | +| pgbouncer.db_transactions_time | time | seconds | +| pgbouncer.db_transaction_avg_time | time | seconds | +| pgbouncer.db_queries | queries | queries/s | +| pgbouncer.db_queries_time | time | seconds | +| pgbouncer.db_query_avg_time | time | seconds | +| pgbouncer.db_network_io | received, sent | B/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Create netdata user + +Create a user with `stats_users` permissions to query your PgBouncer instance. + +To create the `netdata` user: + +- Add `netdata` user to the `pgbouncer.ini` file: + + ```text + stats_users = netdata + ``` + +- Add a password for the `netdata` user to the `userlist.txt` file: + + ```text + "netdata" "<PASSWORD>" + ``` + +- To verify the credentials, run the following command + + ```bash + psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL + ``` + + When it prompts for a password, enter the password you added to `userlist.txt`. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/pgbouncer.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/pgbouncer.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes | +| timeout | Query timeout in seconds. | 1 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer' + +``` +</details> + +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer' + + - name: remote + dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m pgbouncer + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/metadata.yaml b/src/go/collectors/go.d.plugin/modules/pgbouncer/metadata.yaml new file mode 100644 index 00000000000000..e4a098bc2e87bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/metadata.yaml @@ -0,0 +1,239 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-pgbouncer + plugin_name: go.d.plugin + module_name: pgbouncer + monitored_instance: + name: PgBouncer + link: https://www.pgbouncer.org/ + icon_filename: postgres.svg + categories: + - data-collection.database-servers + keywords: + - pgbouncer + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors PgBouncer servers. + + Executed queries: + + - `SHOW VERSION;` + - `SHOW CONFIG;` + - `SHOW DATABASES;` + - `SHOW STATS;` + - `SHOW POOLS;` + + Information about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html). + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Create netdata user + description: | + Create a user with `stats_users` permissions to query your PgBouncer instance. + + To create the `netdata` user: + + - Add `netdata` user to the `pgbouncer.ini` file: + + ```text + stats_users = netdata + ``` + + - Add a password for the `netdata` user to the `userlist.txt` file: + + ```text + "netdata" "<PASSWORD>" + ``` + + - To verify the credentials, run the following command + + ```bash + psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL + ``` + + When it prompts for a password, enter the password you added to `userlist.txt`. + configuration: + file: + name: go.d/pgbouncer.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: dsn + description: PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). + default_value: postgres://postgres:postgres@127.0.0.1:6432/pgbouncer + required: true + - name: timeout + description: Query timeout in seconds. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer' + - name: Unix socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer' + + - name: remote + dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: pgbouncer.client_connections_utilization + description: Client connections utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: database + description: These metrics refer to the database. + labels: + - name: database + description: database name + - name: postgres_database + description: Postgres database name + metrics: + - name: pgbouncer.db_client_connections + description: Database client connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: waiting + - name: cancel_req + - name: pgbouncer.db_server_connections + description: Database server connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: idle + - name: used + - name: tested + - name: login + - name: pgbouncer.db_server_connections_utilization + description: Database server connections utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: pgbouncer.db_clients_wait_time + description: Database clients wait time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_client_max_wait_time + description: Database client max wait time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_transactions + description: Database pooled SQL transactions + unit: transactions/s + chart_type: line + dimensions: + - name: transactions + - name: pgbouncer.db_transactions_time + description: Database transactions time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_transaction_avg_time + description: Database transaction average time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_queries + description: Database pooled SQL queries + unit: queries/s + chart_type: line + dimensions: + - name: queries + - name: pgbouncer.db_queries_time + description: Database queries time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_query_avg_time + description: Database query average time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: pgbouncer.db_network_io + description: Database traffic + unit: B/s + chart_type: area + dimensions: + - name: received + - name: sent diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/metrics.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/metrics.go new file mode 100644 index 00000000000000..eaac52771973e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/metrics.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +type metrics struct { + dbs map[string]*dbMetrics +} + +// dbMetrics represents PgBouncer database (not the PostgreSQL database of the outgoing connection). +type dbMetrics struct { + name string + pgDBName string + + updated bool + hasCharts bool + + // command 'SHOW DATABASES;' + maxConnections int64 + currentConnections int64 + paused int64 + disabled int64 + + // command 'SHOW STATS;' + // https://github.com/pgbouncer/pgbouncer/blob/9a346b0e451d842d7202abc3eccf0ff5a66b2dd6/src/stats.c#L76 + totalXactCount int64 // v1.8+ + totalQueryCount int64 // v1.8+ + totalReceived int64 + totalSent int64 + totalXactTime int64 // v1.8+ + totalQueryTime int64 + totalWaitTime int64 // v1.8+ + avgXactTime int64 // v1.8+ + avgQueryTime int64 + + // command 'SHOW POOLS;' + // https://github.com/pgbouncer/pgbouncer/blob/9a346b0e451d842d7202abc3eccf0ff5a66b2dd6/src/admin.c#L804 + clActive int64 + clWaiting int64 + clCancelReq int64 + svActive int64 + svIdle int64 + svUsed int64 + svTested int64 + svLogin int64 + maxWait int64 + maxWaitUS int64 // v1.8+ +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer.go new file mode 100644 index 00000000000000..ebb11327b17c51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +import ( + "database/sql" + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/blang/semver/v4" + _ "github.com/jackc/pgx/v4/stdlib" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("pgbouncer", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *PgBouncer { + return &PgBouncer{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second}, + DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer", + }, + charts: globalCharts.Copy(), + recheckSettingsEvery: time.Minute * 5, + metrics: &metrics{ + dbs: make(map[string]*dbMetrics), + }, + } +} + +type Config struct { + DSN string `yaml:"dsn"` + Timeout web.Duration `yaml:"timeout"` +} + +type PgBouncer struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + db *sql.DB + version *semver.Version + + recheckSettingsTime time.Time + recheckSettingsEvery time.Duration + maxClientConn int64 + + metrics *metrics +} + +func (p *PgBouncer) Init() bool { + err := p.validateConfig() + if err != nil { + p.Errorf("config validation: %v", err) + return false + } + + return true +} + +func (p *PgBouncer) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *PgBouncer) Charts() *module.Charts { + return p.charts +} + +func (p *PgBouncer) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (p *PgBouncer) Cleanup() { + if p.db == nil { + return + } + if err := p.db.Close(); err != nil { + p.Warningf("cleanup: error on closing the PgBouncer database [%s]: %v", p.DSN, err) + } + p.db = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer_test.go b/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer_test.go new file mode 100644 index 00000000000000..e1e0695dd739e6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/pgbouncer_test.go @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pgbouncer + +import ( + "bufio" + "bytes" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataV170Version, _ = os.ReadFile("testdata/v1.7.0/version.txt") + dataV1170Version, _ = os.ReadFile("testdata/v1.17.0/version.txt") + dataV1170Config, _ = os.ReadFile("testdata/v1.17.0/config.txt") + dataV1170Databases, _ = os.ReadFile("testdata/v1.17.0/databases.txt") + dataV1170Pools, _ = os.ReadFile("testdata/v1.17.0/pools.txt") + dataV1170Stats, _ = os.ReadFile("testdata/v1.17.0/stats.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataV170Version": dataV170Version, + "dataV1170Version": dataV1170Version, + "dataV1170Config": dataV1170Config, + "dataV1170Databases": dataV1170Databases, + "dataV1170Pools": dataV1170Pools, + "dataV1170Stats": dataV1170Stats, + } { + require.NotNilf(t, data, name) + } +} + +func TestPgBouncer_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "Success with default": { + wantFail: false, + config: New().Config, + }, + "Fail when DSN not set": { + wantFail: true, + config: Config{DSN: ""}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + p := New() + p.Config = test.config + + if test.wantFail { + assert.False(t, p.Init()) + } else { + assert.True(t, p.Init()) + } + }) + } +} + +func TestPgBouncer_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestPgBouncer_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + wantFail bool + }{ + "Success when all queries are successful (v1.17.0)": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV1170Version) + mockExpect(t, m, queryShowConfig, dataV1170Config) + mockExpect(t, m, queryShowDatabases, dataV1170Databases) + mockExpect(t, m, queryShowStats, dataV1170Stats) + mockExpect(t, m, queryShowPools, dataV1170Pools) + }, + }, + "Fail when querying version returns an error": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpectErr(m, queryShowVersion) + }, + }, + "Fail when querying version returns unsupported version": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV170Version) + }, + }, + "Fail when querying config returns an error": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV1170Version) + mockExpectErr(m, queryShowConfig) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + p := New() + p.db = db + defer func() { _ = db.Close() }() + + require.True(t, p.Init()) + + test.prepareMock(t, mock) + + if test.wantFail { + assert.False(t, p.Check()) + } else { + assert.True(t, p.Check()) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func TestPgBouncer_Collect(t *testing.T) { + type testCaseStep struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + check func(t *testing.T, p *PgBouncer) + } + tests := map[string][]testCaseStep{ + "Success on all queries (v1.17.0)": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV1170Version) + mockExpect(t, m, queryShowConfig, dataV1170Config) + mockExpect(t, m, queryShowDatabases, dataV1170Databases) + mockExpect(t, m, queryShowStats, dataV1170Stats) + mockExpect(t, m, queryShowPools, dataV1170Pools) + }, + check: func(t *testing.T, p *PgBouncer) { + mx := p.Collect() + + expected := map[string]int64{ + "cl_conns_utilization": 47, + "db_myprod1_avg_query_time": 575, + "db_myprod1_avg_xact_time": 575, + "db_myprod1_cl_active": 15, + "db_myprod1_cl_cancel_req": 0, + "db_myprod1_cl_waiting": 0, + "db_myprod1_maxwait": 0, + "db_myprod1_sv_active": 15, + "db_myprod1_sv_conns_utilization": 0, + "db_myprod1_sv_idle": 5, + "db_myprod1_sv_login": 0, + "db_myprod1_sv_tested": 0, + "db_myprod1_sv_used": 0, + "db_myprod1_total_query_count": 12683170, + "db_myprod1_total_query_time": 7223566620, + "db_myprod1_total_received": 809093651, + "db_myprod1_total_sent": 1990971542, + "db_myprod1_total_wait_time": 1029555, + "db_myprod1_total_xact_count": 12683170, + "db_myprod1_total_xact_time": 7223566620, + "db_myprod2_avg_query_time": 581, + "db_myprod2_avg_xact_time": 581, + "db_myprod2_cl_active": 12, + "db_myprod2_cl_cancel_req": 0, + "db_myprod2_cl_waiting": 0, + "db_myprod2_maxwait": 0, + "db_myprod2_sv_active": 11, + "db_myprod2_sv_conns_utilization": 0, + "db_myprod2_sv_idle": 9, + "db_myprod2_sv_login": 0, + "db_myprod2_sv_tested": 0, + "db_myprod2_sv_used": 0, + "db_myprod2_total_query_count": 12538544, + "db_myprod2_total_query_time": 7144226450, + "db_myprod2_total_received": 799867464, + "db_myprod2_total_sent": 1968267687, + "db_myprod2_total_wait_time": 993313, + "db_myprod2_total_xact_count": 12538544, + "db_myprod2_total_xact_time": 7144226450, + "db_pgbouncer_avg_query_time": 0, + "db_pgbouncer_avg_xact_time": 0, + "db_pgbouncer_cl_active": 2, + "db_pgbouncer_cl_cancel_req": 0, + "db_pgbouncer_cl_waiting": 0, + "db_pgbouncer_maxwait": 0, + "db_pgbouncer_sv_active": 0, + "db_pgbouncer_sv_conns_utilization": 0, + "db_pgbouncer_sv_idle": 0, + "db_pgbouncer_sv_login": 0, + "db_pgbouncer_sv_tested": 0, + "db_pgbouncer_sv_used": 0, + "db_pgbouncer_total_query_count": 45, + "db_pgbouncer_total_query_time": 0, + "db_pgbouncer_total_received": 0, + "db_pgbouncer_total_sent": 0, + "db_pgbouncer_total_wait_time": 0, + "db_pgbouncer_total_xact_count": 45, + "db_pgbouncer_total_xact_time": 0, + "db_postgres_avg_query_time": 2790, + "db_postgres_avg_xact_time": 2790, + "db_postgres_cl_active": 18, + "db_postgres_cl_cancel_req": 0, + "db_postgres_cl_waiting": 0, + "db_postgres_maxwait": 0, + "db_postgres_sv_active": 18, + "db_postgres_sv_conns_utilization": 0, + "db_postgres_sv_idle": 2, + "db_postgres_sv_login": 0, + "db_postgres_sv_tested": 0, + "db_postgres_sv_used": 0, + "db_postgres_total_query_count": 25328823, + "db_postgres_total_query_time": 72471882827, + "db_postgres_total_received": 1615791619, + "db_postgres_total_sent": 3976053858, + "db_postgres_total_wait_time": 50439622253, + "db_postgres_total_xact_count": 25328823, + "db_postgres_total_xact_time": 72471882827, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying version returns an error": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpectErr(m, queryShowVersion) + }, + check: func(t *testing.T, p *PgBouncer) { + mx := p.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying version returns unsupported version": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV170Version) + }, + check: func(t *testing.T, p *PgBouncer) { + mx := p.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying config returns an error": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryShowVersion, dataV1170Version) + mockExpectErr(m, queryShowConfig) + }, + check: func(t *testing.T, p *PgBouncer) { + mx := p.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + p := New() + p.db = db + defer func() { _ = db.Close() }() + + require.True(t, p.Init()) + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepareMock(t, mock) + step.check(t, p) + }) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) { + mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed() +} + +func mockExpectErr(mock sqlmock.Sqlmock, query string) { + mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query)) +} + +func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows { + rows, err := prepareMockRows(data) + require.NoError(t, err) + return rows +} + +func prepareMockRows(data []byte) (*sqlmock.Rows, error) { + r := bytes.NewReader(data) + sc := bufio.NewScanner(r) + + var numColumns int + var rows *sqlmock.Rows + + for sc.Scan() { + s := strings.TrimSpace(sc.Text()) + if s == "" || strings.HasPrefix(s, "---") { + continue + } + + parts := strings.Split(s, "|") + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + if rows == nil { + numColumns = len(parts) + rows = sqlmock.NewRows(parts) + continue + } + + if len(parts) != numColumns { + return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts)) + } + + values := make([]driver.Value, len(parts)) + for i, v := range parts { + values[i] = v + } + rows.AddRow(values...) + } + + if rows == nil { + return nil, errors.New("prepareMockRows(): nil rows result") + } + + return rows, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/config.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/config.txt new file mode 100644 index 00000000000000..da1aba6098fdf6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/config.txt @@ -0,0 +1,86 @@ + key | value | default | changeable +---------------------------+--------------------------------------------------------+--------------------------------------------------------+------------ + admin_users | postgres | | yes + application_name_add_host | 0 | 0 | yes + auth_file | /etc/pgbouncer/userlist.txt | | yes + auth_hba_file | | | yes + auth_query | SELECT usename, passwd FROM pg_shadow WHERE usename=$1 | SELECT usename, passwd FROM pg_shadow WHERE usename=$1 | yes + auth_type | md5 | md5 | yes + auth_user | | | yes + autodb_idle_timeout | 3600 | 3600 | yes + client_idle_timeout | 0 | 0 | yes + client_login_timeout | 60 | 60 | yes + client_tls_ca_file | | | yes + client_tls_cert_file | | | yes + client_tls_ciphers | fast | fast | yes + client_tls_dheparams | auto | auto | yes + client_tls_ecdhcurve | auto | auto | yes + client_tls_key_file | | | yes + client_tls_protocols | secure | secure | yes + client_tls_sslmode | disable | disable | yes + conffile | /etc/pgbouncer/pgbouncer.ini | | yes + default_pool_size | 20 | 20 | yes + disable_pqexec | 0 | 0 | no + dns_max_ttl | 15 | 15 | yes + dns_nxdomain_ttl | 15 | 15 | yes + dns_zone_check_period | 0 | 0 | yes + idle_transaction_timeout | 0 | 0 | yes + ignore_startup_parameters | extra_float_digits | | yes + job_name | pgbouncer | pgbouncer | no + listen_addr | 0.0.0.0 | | no + listen_backlog | 128 | 128 | no + listen_port | 6432 | 6432 | no + log_connections | 1 | 1 | yes + log_disconnections | 1 | 1 | yes + log_pooler_errors | 1 | 1 | yes + log_stats | 1 | 1 | yes + logfile | | | yes + max_client_conn | 100 | 100 | yes + max_db_connections | 0 | 0 | yes + max_packet_size | 2147483647 | 2147483647 | yes + max_user_connections | 0 | 0 | yes + min_pool_size | 0 | 0 | yes + pidfile | | | no + pkt_buf | 4096 | 4096 | no + pool_mode | session | session | yes + query_timeout | 0 | 0 | yes + query_wait_timeout | 120 | 120 | yes + reserve_pool_size | 0 | 0 | yes + reserve_pool_timeout | 5 | 5 | yes + resolv_conf | | | no + sbuf_loopcnt | 5 | 5 | yes + server_check_delay | 30 | 30 | yes + server_check_query | select 1 | select 1 | yes + server_connect_timeout | 15 | 15 | yes + server_fast_close | 0 | 0 | yes + server_idle_timeout | 600 | 600 | yes + server_lifetime | 3600 | 3600 | yes + server_login_retry | 15 | 15 | yes + server_reset_query | DISCARD ALL | DISCARD ALL | yes + server_reset_query_always | 0 | 0 | yes + server_round_robin | 0 | 0 | yes + server_tls_ca_file | | | yes + server_tls_cert_file | | | yes + server_tls_ciphers | fast | fast | yes + server_tls_key_file | | | yes + server_tls_protocols | secure | secure | yes + server_tls_sslmode | disable | disable | yes + so_reuseport | 0 | 0 | no + stats_period | 60 | 60 | yes + stats_users | | | yes + suspend_timeout | 10 | 10 | yes + syslog | 0 | 0 | yes + syslog_facility | daemon | daemon | yes + syslog_ident | pgbouncer | pgbouncer | yes + tcp_defer_accept | 1 | 1 | yes + tcp_keepalive | 1 | 1 | yes + tcp_keepcnt | 0 | 0 | yes + tcp_keepidle | 0 | 0 | yes + tcp_keepintvl | 0 | 0 | yes + tcp_socket_buffer | 0 | 0 | yes + tcp_user_timeout | 0 | 0 | yes + unix_socket_dir | | /tmp | no + unix_socket_group | | | no + unix_socket_mode | 511 | 0777 | no + user | postgres | | no + verbose | 0 | | yes \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/databases.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/databases.txt new file mode 100644 index 00000000000000..9e8f14695076ef --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/databases.txt @@ -0,0 +1,6 @@ + name | host | port | database | force_user | pool_size | min_pool_size | reserve_pool | pool_mode | max_connections | current_connections | paused | disabled +-----------+-----------+------+-----------+------------+-----------+---------------+--------------+-----------+-----------------+---------------------+--------+---------- + myprod1 | 127.0.0.1 | 5432 | myprod1 | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0 + myprod2 | 127.0.0.1 | 5432 | myprod2 | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0 + pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | 0 | statement | 0 | 0 | 0 | 0 + postgres | 127.0.0.1 | 5432 | postgres | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/pools.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/pools.txt new file mode 100644 index 00000000000000..dec3326adf0fdf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/pools.txt @@ -0,0 +1,6 @@ + database | user | cl_active | cl_waiting | cl_cancel_req | sv_active | sv_idle | sv_used | sv_tested | sv_login | maxwait | maxwait_us | pool_mode +-----------+-----------+-----------+------------+---------------+-----------+---------+---------+-----------+----------+---------+------------+----------- + myprod1 | postgres | 15 | 0 | 0 | 15 | 5 | 0 | 0 | 0 | 0 | 0 | session + myprod2 | postgres | 12 | 0 | 0 | 11 | 9 | 0 | 0 | 0 | 0 | 0 | session + pgbouncer | pgbouncer | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | statement + postgres | postgres | 18 | 0 | 0 | 18 | 2 | 0 | 0 | 0 | 0 | 0 | session \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/stats.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/stats.txt new file mode 100644 index 00000000000000..3b66fc3237b8bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/stats.txt @@ -0,0 +1,6 @@ + database | total_xact_count | total_query_count | total_received | total_sent | total_xact_time | total_query_time | total_wait_time | avg_xact_count | avg_query_count | avg_recv | avg_sent | avg_xact_time | avg_query_time | avg_wait_time +-----------+------------------+-------------------+----------------+------------+-----------------+------------------+-----------------+----------------+-----------------+----------+----------+---------------+----------------+--------------- + myprod1 | 12683170 | 12683170 | 809093651 | 1990971542 | 7223566620 | 7223566620 | 1029555 | 900 | 900 | 57434 | 141358 | 575 | 575 | 3 + myprod2 | 12538544 | 12538544 | 799867464 | 1968267687 | 7144226450 | 7144226450 | 993313 | 885 | 885 | 56511 | 139050 | 581 | 581 | 14 + pgbouncer | 45 | 45 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + postgres | 25328823 | 25328823 | 1615791619 | 3976053858 | 72471882827 | 72471882827 | 50439622253 | 1901 | 1901 | 121329 | 298556 | 2790 | 2790 | 3641761 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/version.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/version.txt new file mode 100644 index 00000000000000..fa2c806a2805fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.17.0/version.txt @@ -0,0 +1,3 @@ + version +------------------ + PgBouncer 1.17.0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.7.0/version.txt b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.7.0/version.txt new file mode 100644 index 00000000000000..ff0fd70a89d090 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pgbouncer/testdata/v1.7.0/version.txt @@ -0,0 +1,3 @@ + version +------------------ + PgBouncer 1.7.0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/README.md b/src/go/collectors/go.d.plugin/modules/phpdaemon/README.md new file mode 120000 index 00000000000000..2f2fca9f19e354 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/README.md @@ -0,0 +1 @@ +integrations/phpdaemon.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/charts.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/charts.go new file mode 100644 index 00000000000000..c821208a2ac200 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/charts.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Chart is an alias for module.Chart + Chart = module.Chart + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "workers", + Title: "Workers", + Units: "workers", + Fam: "workers", + Ctx: "phpdaemon.workers", + Type: module.Stacked, + Dims: Dims{ + {ID: "alive"}, + {ID: "shutdown"}, + }, + }, + { + ID: "alive_workers", + Title: "Alive Workers State", + Units: "workers", + Fam: "workers", + Ctx: "phpdaemon.alive_workers", + Type: module.Stacked, + Dims: Dims{ + {ID: "idle"}, + {ID: "busy"}, + {ID: "reloading"}, + }, + }, + { + ID: "idle_workers", + Title: "Idle Workers State", + Units: "workers", + Fam: "workers", + Ctx: "phpdaemon.idle_workers", + Type: module.Stacked, + Dims: Dims{ + {ID: "preinit"}, + {ID: "init"}, + {ID: "initialized"}, + }, + }, +} + +var uptimeChart = Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "phpdaemon.uptime", + Dims: Dims{ + {ID: "uptime", Name: "time"}, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/client.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/client.go new file mode 100644 index 00000000000000..11b44524d667a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/client.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +type decodeFunc func(dst interface{}, reader io.Reader) error + +func decodeJson(dst interface{}, reader io.Reader) error { return json.NewDecoder(reader).Decode(dst) } + +func newAPIClient(httpClient *http.Client, request web.Request) *client { + return &client{ + httpClient: httpClient, + request: request, + } +} + +type client struct { + httpClient *http.Client + request web.Request +} + +func (c *client) queryFullStatus() (*FullStatus, error) { + var status FullStatus + err := c.doWithDecode(&status, decodeJson, c.request) + if err != nil { + return nil, err + } + + return &status, nil +} + +func (c *client) doWithDecode(dst interface{}, decode decodeFunc, request web.Request) error { + req, err := web.NewHTTPRequest(request) + if err != nil { + return fmt.Errorf("error on creating http request to %s : %v", request.URL, err) + } + + resp, err := c.doOK(req) + defer closeBody(resp) + if err != nil { + return err + } + + if err = decode(dst, resp.Body); err != nil { + return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return nil +} + +func (c client) doOK(req *http.Request) (*http.Response, error) { + resp, err := c.httpClient.Do(req) + if err != nil { + return resp, fmt.Errorf("error on request : %v", err) + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + return resp, err +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/collect.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/collect.go new file mode 100644 index 00000000000000..c31c10c8b3b6f1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/collect.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import "github.com/netdata/go.d.plugin/pkg/stm" + +func (p *PHPDaemon) collect() (map[string]int64, error) { + s, err := p.client.queryFullStatus() + + if err != nil { + return nil, err + } + + // https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php + // see getStateOfWorkers() + s.Initialized = s.Idle - (s.Init + s.Preinit) + + return stm.ToMap(s), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/config_schema.json b/src/go/collectors/go.d.plugin/modules/phpdaemon/config_schema.json new file mode 100644 index 00000000000000..c200d437b6c881 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/phpdaemon job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/integrations/phpdaemon.md b/src/go/collectors/go.d.plugin/modules/phpdaemon/integrations/phpdaemon.md new file mode 100644 index 00000000000000..6f2ee1cb12c961 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/integrations/phpdaemon.md @@ -0,0 +1,298 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/phpdaemon/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/phpdaemon/metadata.yaml" +sidebar_label: "phpDaemon" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# phpDaemon + + +<img src="https://netdata.cloud/img/php.svg" width="150"/> + + +Plugin: go.d.plugin +Module: phpdaemon + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors phpDaemon instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per phpDaemon instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| phpdaemon.workers | alive, shutdown | workers | +| phpdaemon.alive_workers | idle, busy, reloading | workers | +| phpdaemon.idle_workers | preinit, init, initialized | workers | +| phpdaemon.uptime | time | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable phpDaemon's HTTP server + +Statistics expected to be in JSON format. + +<details> +<summary>phpDaemon configuration</summary> + +Instruction from [@METAJIJI](https://github.com/METAJIJI). + +To enable `phpd` statistics on http, you must enable the http server and write an application. +Application is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`. + +```php +// /opt/phpdaemon/conf/phpd.conf + +path /opt/phpdaemon/conf/AppResolver.php; +Pool:HTTPServer { + privileged; + listen '127.0.0.1'; + port 8509; +} +``` + +```php +// /opt/phpdaemon/conf/AppResolver.php + +<?php + +class MyAppResolver extends \PHPDaemon\Core\AppResolver { + public function getRequestRoute($req, $upstream) { + if (preg_match('~^/(ServerStatus|FullStatus)/~', $req->attrs->server['DOCUMENT_URI'], $m)) { + return $m[1]; + } + } +} + +return new MyAppResolver; +``` + +```php +/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php + +<?php +namespace PHPDaemon\Applications; + +class FullStatus extends \PHPDaemon\Core\AppInstance { + public function beginRequest($req, $upstream) { + return new FullStatusRequest($this, $upstream, $req); + } +} +``` + +```php +// /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatusRequest.php + +<?php +namespace PHPDaemon\Applications; + +use PHPDaemon\Core\Daemon; +use PHPDaemon\HTTPRequest\Generic; + +class FullStatusRequest extends Generic { + public function run() { + $stime = microtime(true); + $this->header('Content-Type: application/javascript; charset=utf-8'); + + $stat = Daemon::getStateOfWorkers(); + $stat['uptime'] = time() - Daemon::$startTime; + echo json_encode($stat); + } +} +``` + +</details> + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/phpdaemon.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/phpdaemon.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes | +| timeout | HTTP request timeout. | 2 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + +``` +</details> + +##### HTTP authentication + +HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +HTTPS with self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + + - name: remote + url: http://192.0.2.1:8509/FullStatus + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m phpdaemon + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/metadata.yaml b/src/go/collectors/go.d.plugin/modules/phpdaemon/metadata.yaml new file mode 100644 index 00000000000000..bd3ae8e5757332 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/metadata.yaml @@ -0,0 +1,276 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-phpdaemon + plugin_name: go.d.plugin + module_name: phpdaemon + monitored_instance: + name: phpDaemon + link: https://github.com/kakserpom/phpdaemon + icon_filename: php.svg + categories: + - data-collection.apm + keywords: + - phpdaemon + - php + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors phpDaemon instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable phpDaemon's HTTP server + description: | + Statistics expected to be in JSON format. + + <details> + <summary>phpDaemon configuration</summary> + + Instruction from [@METAJIJI](https://github.com/METAJIJI). + + To enable `phpd` statistics on http, you must enable the http server and write an application. + Application is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`. + + ```php + // /opt/phpdaemon/conf/phpd.conf + + path /opt/phpdaemon/conf/AppResolver.php; + Pool:HTTPServer { + privileged; + listen '127.0.0.1'; + port 8509; + } + ``` + + ```php + // /opt/phpdaemon/conf/AppResolver.php + + <?php + + class MyAppResolver extends \PHPDaemon\Core\AppResolver { + public function getRequestRoute($req, $upstream) { + if (preg_match('~^/(ServerStatus|FullStatus)/~', $req->attrs->server['DOCUMENT_URI'], $m)) { + return $m[1]; + } + } + } + + return new MyAppResolver; + ``` + + ```php + /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php + + <?php + namespace PHPDaemon\Applications; + + class FullStatus extends \PHPDaemon\Core\AppInstance { + public function beginRequest($req, $upstream) { + return new FullStatusRequest($this, $upstream, $req); + } + } + ``` + + ```php + // /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatusRequest.php + + <?php + namespace PHPDaemon\Applications; + + use PHPDaemon\Core\Daemon; + use PHPDaemon\HTTPRequest\Generic; + + class FullStatusRequest extends Generic { + public function run() { + $stime = microtime(true); + $this->header('Content-Type: application/javascript; charset=utf-8'); + + $stat = Daemon::getStateOfWorkers(); + $stat['uptime'] = time() - Daemon::$startTime; + echo json_encode($stat); + } + } + ``` + + </details> + configuration: + file: + name: go.d/phpdaemon.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8509/FullStatus + required: true + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + - name: HTTP authentication + description: HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + username: username + password: password + - name: HTTPS with self-signed certificate + description: HTTPS with self-signed certificate. + config: | + jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8509/FullStatus + + - name: remote + url: http://192.0.2.1:8509/FullStatus + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: phpdaemon.workers + description: Workers + unit: workers + chart_type: line + dimensions: + - name: alive + - name: shutdown + - name: phpdaemon.alive_workers + description: Alive Workers State + unit: workers + chart_type: line + dimensions: + - name: idle + - name: busy + - name: reloading + - name: phpdaemon.idle_workers + description: Idle Workers State + unit: workers + chart_type: line + dimensions: + - name: preinit + - name: init + - name: initialized + - name: phpdaemon.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/metrics.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/metrics.go new file mode 100644 index 00000000000000..1be3c0be3999a4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/metrics.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +// https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php +// see getStateOfWorkers() + +// WorkerState represents phpdaemon worker state. +type WorkerState struct { + // Alive is sum of Idle, Busy and Reloading + Alive int64 `stm:"alive"` + Shutdown int64 `stm:"shutdown"` + + // Idle that the worker is not in the middle of execution valuable callback (e.g. request) at this moment of time. + // It does not mean that worker not have any pending operations. + // Idle is sum of Preinit, Init and Initialized. + Idle int64 `stm:"idle"` + // Busy means that the worker is in the middle of execution valuable callback. + Busy int64 `stm:"busy"` + Reloading int64 `stm:"reloading"` + + Preinit int64 `stm:"preinit"` + // Init means that worker is starting right now. + Init int64 `stm:"init"` + // Initialized means that the worker is in Idle state. + Initialized int64 `stm:"initialized"` +} + +// FullStatus FullStatus. +type FullStatus struct { + WorkerState `stm:""` + Uptime *int64 `stm:"uptime"` +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon.go new file mode 100644 index 00000000000000..506892cfe050e9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("phpdaemon", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1:8509/FullStatus" + defaultHTTPTimeout = time.Second * 2 +) + +// New creates PHPDaemon with default values. +func New() *PHPDaemon { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + + return &PHPDaemon{ + Config: config, + charts: charts.Copy(), + } +} + +// Config is the PHPDaemon module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// PHPDaemon PHPDaemon module. +type PHPDaemon struct { + module.Base + Config `yaml:",inline"` + + client *client + charts *Charts +} + +// Cleanup makes cleanup. +func (PHPDaemon) Cleanup() {} + +// Init makes initialization. +func (p *PHPDaemon) Init() bool { + httpClient, err := web.NewHTTPClient(p.Client) + if err != nil { + p.Errorf("error on creating http client : %v", err) + return false + } + + _, err = web.NewHTTPRequest(p.Request) + if err != nil { + p.Errorf("error on creating http request to %s : %v", p.URL, err) + return false + } + + p.client = newAPIClient(httpClient, p.Request) + + p.Debugf("using URL %s", p.URL) + p.Debugf("using timeout: %s", p.Timeout.Duration) + + return true +} + +// Check makes check. +func (p *PHPDaemon) Check() bool { + mx := p.Collect() + + if len(mx) == 0 { + return false + } + if _, ok := mx["uptime"]; ok { + // TODO: remove panic + panicIf(p.charts.Add(uptimeChart.Copy())) + } + + return true +} + +// Charts creates Charts. +func (p PHPDaemon) Charts() *Charts { return p.charts } + +// Collect collects metrics. +func (p *PHPDaemon) Collect() map[string]int64 { + mx, err := p.collect() + + if err != nil { + p.Error(err) + return nil + } + + return mx +} + +func panicIf(err error) { + if err == nil { + return + } + panic(err) +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon_test.go b/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon_test.go new file mode 100644 index 00000000000000..0634e6ec481725 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/phpdaemon_test.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testURL = "http://127.0.0.1:38001" +) + +var testFullStatusData, _ = os.ReadFile("testdata/fullstatus.json") + +func Test_testData(t *testing.T) { + assert.NotEmpty(t, testFullStatusData) +} + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestPHPDaemon_Init(t *testing.T) { + job := New() + + require.True(t, job.Init()) + assert.NotNil(t, job.client) +} + +func TestPHPDaemon_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testFullStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestPHPDaemon_CheckNG(t *testing.T) { + job := New() + job.URL = testURL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestPHPDaemon_Charts(t *testing.T) { + job := New() + + assert.NotNil(t, job.Charts()) + assert.False(t, job.charts.Has(uptimeChart.ID)) + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testFullStatusData) + })) + defer ts.Close() + + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) + assert.True(t, job.charts.Has(uptimeChart.ID)) +} + +func TestPHPDaemon_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestPHPDaemon_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testFullStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) + + expected := map[string]int64{ + "alive": 350, + "busy": 200, + "idle": 50, + "init": 20, + "initialized": 10, + "preinit": 20, + "reloading": 100, + "shutdown": 500, + "uptime": 15765, + } + + assert.Equal(t, expected, job.Collect()) + +} + +func TestPHPDaemon_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestPHPDaemon_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/phpdaemon/testdata/fullstatus.json b/src/go/collectors/go.d.plugin/modules/phpdaemon/testdata/fullstatus.json new file mode 100644 index 00000000000000..b7d2a5e7771b24 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpdaemon/testdata/fullstatus.json @@ -0,0 +1,10 @@ +{ + "idle": 50, + "busy": 200, + "alive": 350, + "shutdown": 500, + "preinit": 20, + "init": 20, + "reloading": 100, + "uptime": 15765 +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/README.md b/src/go/collectors/go.d.plugin/modules/phpfpm/README.md new file mode 120000 index 00000000000000..2953ff4df826b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/README.md @@ -0,0 +1 @@ +integrations/php-fpm.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/charts.go b/src/go/collectors/go.d.plugin/modules/phpfpm/charts.go new file mode 100644 index 00000000000000..bf264ae9f9c98f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/charts.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "connections", + Title: "Active Connections", + Units: "connections", + Fam: "active connections", + Ctx: "phpfpm.connections", + Dims: Dims{ + {ID: "active"}, + {ID: "maxActive", Name: "max active"}, + {ID: "idle"}, + }, + }, + { + ID: "requests", + Title: "Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "phpfpm.requests", + Dims: Dims{ + {ID: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "performance", + Title: "Performance", + Units: "status", + Fam: "performance", + Ctx: "phpfpm.performance", + Dims: Dims{ + {ID: "reached", Name: "max children reached"}, + {ID: "slow", Name: "slow requests"}, + }, + }, + { + ID: "request_duration", + Title: "Requests Duration Among All Idle Processes", + Units: "milliseconds", + Fam: "request duration", + Ctx: "phpfpm.request_duration", + Dims: Dims{ + {ID: "minReqDur", Name: "min", Div: 1000}, + {ID: "maxReqDur", Name: "max", Div: 1000}, + {ID: "avgReqDur", Name: "avg", Div: 1000}, + }, + }, + { + ID: "request_cpu", + Title: "Last Request CPU Usage Among All Idle Processes", + Units: "percentage", + Fam: "request CPU", + Ctx: "phpfpm.request_cpu", + Dims: Dims{ + {ID: "minReqCpu", Name: "min"}, + {ID: "maxReqCpu", Name: "max"}, + {ID: "avgReqCpu", Name: "avg"}, + }, + }, + { + ID: "request_mem", + Title: "Last Request Memory Usage Among All Idle Processes", + Units: "KB", + Fam: "request memory", + Ctx: "phpfpm.request_mem", + Dims: Dims{ + {ID: "minReqMem", Name: "min", Div: 1024}, + {ID: "maxReqMem", Name: "max", Div: 1024}, + {ID: "avgReqMem", Name: "avg", Div: 1024}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/client.go b/src/go/collectors/go.d.plugin/modules/phpfpm/client.go new file mode 100644 index 00000000000000..aa3eb636812969 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/client.go @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + fcgiclient "github.com/tomasen/fcgi_client" +) + +type ( + status struct { + Active int64 `json:"active processes" stm:"active"` + MaxActive int64 `json:"max active processes" stm:"maxActive"` + Idle int64 `json:"idle processes" stm:"idle"` + Requests int64 `json:"accepted conn" stm:"requests"` + Reached int64 `json:"max children reached" stm:"reached"` + Slow int64 `json:"slow requests" stm:"slow"` + Processes []proc `json:"processes"` + } + proc struct { + PID int64 `json:"pid"` + State string `json:"state"` + Duration requestDuration `json:"request duration"` + CPU float64 `json:"last request cpu"` + Memory int64 `json:"last request memory"` + } + requestDuration int64 +) + +// UnmarshalJSON customise JSON for timestamp. +func (rd *requestDuration) UnmarshalJSON(b []byte) error { + if rdc, err := strconv.Atoi(string(b)); err != nil { + *rd = 0 + } else { + *rd = requestDuration(rdc) + } + return nil +} + +type client interface { + getStatus() (*status, error) +} + +type httpClient struct { + client *http.Client + req web.Request + dec decoder +} + +func newHTTPClient(c *http.Client, r web.Request) (*httpClient, error) { + u, err := url.Parse(r.URL) + if err != nil { + return nil, err + } + + dec := decodeText + if _, ok := u.Query()["json"]; ok { + dec = decodeJSON + } + return &httpClient{ + client: c, + req: r, + dec: dec, + }, nil +} + +func (c *httpClient) getStatus() (*status, error) { + req, err := web.NewHTTPRequest(c.req) + if err != nil { + return nil, fmt.Errorf("error on creating HTTP request: %v", err) + } + + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("error on HTTP request to '%s': %v", req.URL, err) + } + defer func() { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + st := &status{} + if err := c.dec(resp.Body, st); err != nil { + return nil, fmt.Errorf("error parsing HTTP response from '%s': %v", req.URL, err) + } + return st, nil +} + +type socketClient struct { + socket string + timeout time.Duration + env map[string]string +} + +func newSocketClient(socket string, timeout time.Duration, fcgiPath string) *socketClient { + return &socketClient{ + socket: socket, + timeout: timeout, + env: map[string]string{ + "SCRIPT_NAME": fcgiPath, + "SCRIPT_FILENAME": fcgiPath, + "SERVER_SOFTWARE": "go / fcgiclient ", + "REMOTE_ADDR": "127.0.0.1", + "QUERY_STRING": "json&full", + "REQUEST_METHOD": "GET", + "CONTENT_TYPE": "application/json", + }, + } +} + +func (c *socketClient) getStatus() (*status, error) { + socket, err := fcgiclient.DialTimeout("unix", c.socket, c.timeout) + if err != nil { + return nil, fmt.Errorf("error on connecting to socket '%s': %v", c.socket, err) + } + defer socket.Close() + + resp, err := socket.Get(c.env) + if err != nil { + return nil, fmt.Errorf("error on getting data from socket '%s': %v", c.socket, err) + } + + content, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error on reading response from socket '%s': %v", c.socket, err) + } + + st := &status{} + if err := json.Unmarshal(content, st); err != nil { + return nil, fmt.Errorf("error on decoding response from socket '%s': %v", c.socket, err) + } + return st, nil +} + +type tcpClient struct { + address string + timeout time.Duration + env map[string]string +} + +func newTcpClient(address string, timeout time.Duration, fcgiPath string) *tcpClient { + return &tcpClient{ + address: address, + timeout: timeout, + env: map[string]string{ + "SCRIPT_NAME": fcgiPath, + "SCRIPT_FILENAME": fcgiPath, + "SERVER_SOFTWARE": "go / fcgiclient ", + "REMOTE_ADDR": "127.0.0.1", + "QUERY_STRING": "json&full", + "REQUEST_METHOD": "GET", + "CONTENT_TYPE": "application/json", + }, + } +} + +func (c *tcpClient) getStatus() (*status, error) { + client, err := fcgiclient.DialTimeout("tcp", c.address, c.timeout) + if err != nil { + return nil, fmt.Errorf("error on connecting to address '%s': %v", c.address, err) + } + defer client.Close() + + resp, err := client.Get(c.env) + if err != nil { + return nil, fmt.Errorf("error on getting data from address '%s': %v", c.address, err) + } + + content, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error on reading response from address '%s': %v", c.address, err) + } + + st := &status{} + if err := json.Unmarshal(content, st); err != nil { + return nil, fmt.Errorf("error on decoding response from address '%s': %v", c.address, err) + } + return st, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/collect.go b/src/go/collectors/go.d.plugin/modules/phpfpm/collect.go new file mode 100644 index 00000000000000..c1bc877a860382 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/collect.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + "math" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (p *Phpfpm) collect() (map[string]int64, error) { + st, err := p.client.getStatus() + if err != nil { + return nil, err + } + + mx := stm.ToMap(st) + if !hasIdleProcesses(st.Processes) { + return mx, nil + } + + calcIdleProcessesRequestsDuration(mx, st.Processes) + calcIdleProcessesLastRequestCPU(mx, st.Processes) + calcIdleProcessesLastRequestMemory(mx, st.Processes) + return mx, nil +} + +func calcIdleProcessesRequestsDuration(mx map[string]int64, processes []proc) { + statProcesses(mx, processes, "ReqDur", func(p proc) int64 { return int64(p.Duration) }) +} + +func calcIdleProcessesLastRequestCPU(mx map[string]int64, processes []proc) { + statProcesses(mx, processes, "ReqCpu", func(p proc) int64 { return int64(p.CPU) }) +} + +func calcIdleProcessesLastRequestMemory(mx map[string]int64, processes []proc) { + statProcesses(mx, processes, "ReqMem", func(p proc) int64 { return p.Memory }) +} + +func hasIdleProcesses(processes []proc) bool { + for _, p := range processes { + if p.State == "Idle" { + return true + } + } + return false +} + +type accessor func(p proc) int64 + +func statProcesses(m map[string]int64, processes []proc, met string, acc accessor) { + var sum, count, min, max int64 + for _, proc := range processes { + if proc.State != "Idle" { + continue + } + + val := acc(proc) + sum += val + count += 1 + if count == 1 { + min, max = val, val + continue + } + min = int64(math.Min(float64(min), float64(val))) + max = int64(math.Max(float64(max), float64(val))) + } + + m["min"+met] = min + m["max"+met] = max + m["avg"+met] = sum / count +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/config_schema.json b/src/go/collectors/go.d.plugin/modules/phpfpm/config_schema.json new file mode 100644 index 00000000000000..a6b0140f37fcab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/config_schema.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/phpfpm job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "socket": { + "type": "string" + }, + "address": { + "type": "string" + }, + "fcgi_path": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "oneOf": [ + { + "required": [ + "name", + "url" + ] + }, + { + "required": [ + "name", + "socket" + ] + }, + { + "required": [ + "name", + "address" + ] + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/decode.go b/src/go/collectors/go.d.plugin/modules/phpfpm/decode.go new file mode 100644 index 00000000000000..021e1fb4c5d121 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/decode.go @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + "bufio" + "encoding/json" + "errors" + "io" + "strconv" + "strings" +) + +type decoder func(r io.Reader, s *status) error + +func decodeJSON(r io.Reader, s *status) error { + return json.NewDecoder(r).Decode(s) +} + +func decodeText(r io.Reader, s *status) error { + parts := readParts(r) + if len(parts) == 0 { + return errors.New("invalid text format") + } + + part, parts := parts[0], parts[1:] + if err := readStatus(part, s); err != nil { + return err + } + + return readProcesses(parts, s) +} + +func readParts(r io.Reader) [][]string { + sc := bufio.NewScanner(r) + + var parts [][]string + var lines []string + for sc.Scan() { + line := strings.Trim(sc.Text(), "\r\n ") + // Split parts by star border + if strings.HasPrefix(line, "***") { + parts = append(parts, lines) + lines = []string{} + continue + } + // Skip empty lines + if line == "" { + continue + } + lines = append(lines, line) + } + + if len(lines) > 0 { + parts = append(parts, lines) + } + return parts +} + +func readStatus(data []string, s *status) error { + for _, line := range data { + key, val, err := parseLine(line) + if err != nil { + return err + } + + switch key { + case "active processes": + s.Active = parseInt(val) + case "max active processes": + s.MaxActive = parseInt(val) + case "idle processes": + s.Idle = parseInt(val) + case "accepted conn": + s.Requests = parseInt(val) + case "max children reached": + s.Reached = parseInt(val) + case "slow requests": + s.Slow = parseInt(val) + } + } + return nil +} + +func readProcesses(procs [][]string, s *status) error { + for _, part := range procs { + var proc proc + for _, line := range part { + key, val, err := parseLine(line) + if err != nil { + return err + } + + switch key { + case "state": + proc.State = val + case "request duration": + proc.Duration = requestDuration(parseInt(val)) + case "last request cpu": + proc.CPU = parseFloat(val) + case "last request memory": + proc.Memory = parseInt(val) + } + } + s.Processes = append(s.Processes, proc) + } + return nil +} + +func parseLine(s string) (string, string, error) { + kv := strings.SplitN(s, ":", 2) + if len(kv) != 2 { + return "", "", errors.New("invalid text format line") + } + return strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]), nil +} + +func parseInt(s string) int64 { + val, err := strconv.ParseInt(strings.TrimSpace(s), 10, 64) + if err != nil { + return 0 + } + return val +} + +func parseFloat(s string) float64 { + val, err := strconv.ParseFloat(strings.TrimSpace(s), 64) + if err != nil { + return 0 + } + return val +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/init.go b/src/go/collectors/go.d.plugin/modules/phpfpm/init.go new file mode 100644 index 00000000000000..0e764cbe039d27 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/init.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + "errors" + "fmt" + "os" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p Phpfpm) initClient() (client, error) { + if p.Socket != "" { + return p.initSocketClient() + } + if p.Address != "" { + return p.initTcpClient() + } + if p.URL != "" { + return p.initHTTPClient() + } + return nil, errors.New("neither 'socket' nor 'url' set") +} + +func (p Phpfpm) initHTTPClient() (*httpClient, error) { + c, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, fmt.Errorf("create HTTP client: %v", err) + } + p.Debugf("using HTTP client, URL: %s", p.URL) + p.Debugf("using timeout: %s", p.Timeout.Duration) + return newHTTPClient(c, p.Request) +} + +func (p Phpfpm) initSocketClient() (*socketClient, error) { + if _, err := os.Stat(p.Socket); err != nil { + return nil, fmt.Errorf("the socket '%s' does not exist: %v", p.Socket, err) + } + p.Debugf("using socket client: %s", p.Socket) + p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using fcgi path: %s", p.FcgiPath) + return newSocketClient(p.Socket, p.Timeout.Duration, p.FcgiPath), nil +} + +func (p Phpfpm) initTcpClient() (*tcpClient, error) { + p.Debugf("using tcp client: %s", p.Address) + p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using fcgi path: %s", p.FcgiPath) + return newTcpClient(p.Address, p.Timeout.Duration, p.FcgiPath), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/integrations/php-fpm.md b/src/go/collectors/go.d.plugin/modules/phpfpm/integrations/php-fpm.md new file mode 100644 index 00000000000000..3fbae9719303b2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/integrations/php-fpm.md @@ -0,0 +1,229 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/phpfpm/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/phpfpm/metadata.yaml" +sidebar_label: "PHP-FPM" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# PHP-FPM + + +<img src="https://netdata.cloud/img/php.svg" width="150"/> + + +Plugin: go.d.plugin +Module: phpfpm + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors PHP-FPM instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per PHP-FPM instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| phpfpm.connections | active, max_active, idle | connections | +| phpfpm.requests | requests | requests/s | +| phpfpm.performance | max_children_reached, slow_requests | status | +| phpfpm.request_duration | min, max, avg | milliseconds | +| phpfpm.request_cpu | min, max, avg | percentage | +| phpfpm.request_mem | min, max, avg | KB | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable status page + +Uncomment the `pm.status_path = /status` variable in the `php-fpm` config file. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/phpfpm.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/phpfpm.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/status?full&json | yes | +| socket | Server Unix socket. | | no | +| address | Server address in IP:PORT format. | | no | +| fcgi_path | Status path. | /status | no | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### HTTP + +Collecting data from a local instance over HTTP. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost/status?full&json + +``` +</details> + +##### Unix socket + +Collecting data from a local instance over Unix socket. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + socket: '/tmp/php-fpm.sock' + +``` +</details> + +##### TCP socket + +Collecting data from a local instance over TCP socket. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:9000 + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost/status?full&json + + - name: remote + url: http://203.0.113.10/status?full&json + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m phpfpm + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/metadata.yaml b/src/go/collectors/go.d.plugin/modules/phpfpm/metadata.yaml new file mode 100644 index 00000000000000..739e7b7b8a7009 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/metadata.yaml @@ -0,0 +1,230 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-phpfpm + plugin_name: go.d.plugin + module_name: phpfpm + monitored_instance: + name: PHP-FPM + link: https://php-fpm.org/ + icon_filename: php.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - phpfpm + - php + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors PHP-FPM instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable status page + description: | + Uncomment the `pm.status_path = /status` variable in the `php-fpm` config file. + configuration: + file: + name: go.d/phpfpm.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/status?full&json + required: true + - name: socket + description: Server Unix socket. + default_value: "" + required: false + - name: address + description: Server address in IP:PORT format. + default_value: "" + required: false + - name: fcgi_path + description: Status path. + default_value: /status + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: HTTP + description: Collecting data from a local instance over HTTP. + config: | + jobs: + - name: local + url: http://localhost/status?full&json + - name: Unix socket + description: Collecting data from a local instance over Unix socket. + config: | + jobs: + - name: local + socket: '/tmp/php-fpm.sock' + - name: TCP socket + description: Collecting data from a local instance over TCP socket. + config: | + jobs: + - name: local + address: 127.0.0.1:9000 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://localhost/status?full&json + + - name: remote + url: http://203.0.113.10/status?full&json + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: phpfpm.connections + description: Active Connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: max_active + - name: idle + - name: phpfpm.requests + description: Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: phpfpm.performance + description: Performance + unit: status + chart_type: line + dimensions: + - name: max_children_reached + - name: slow_requests + - name: phpfpm.request_duration + description: Requests Duration Among All Idle Processes + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: phpfpm.request_cpu + description: Last Request CPU Usage Among All Idle Processes + unit: percentage + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: phpfpm.request_mem + description: Last Request Memory Usage Among All Idle Processes + unit: KB + chart_type: line + dimensions: + - name: min + - name: max + - name: avg diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm.go b/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm.go new file mode 100644 index 00000000000000..a61827929137ff --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm.go @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("phpfpm", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Phpfpm { + return &Phpfpm{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/status?full&json", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + FcgiPath: "/status", + }, + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + Socket string `yaml:"socket"` + Address string `yaml:"address"` + FcgiPath string `yaml:"fcgi_path"` + } + Phpfpm struct { + module.Base + Config `yaml:",inline"` + + client client + } +) + +func (p *Phpfpm) Init() bool { + c, err := p.initClient() + if err != nil { + p.Errorf("init client: %v", err) + return false + } + p.client = c + return true +} + +func (p *Phpfpm) Check() bool { + return len(p.Collect()) > 0 +} + +func (Phpfpm) Charts() *Charts { + return charts.Copy() +} + +func (p *Phpfpm) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (Phpfpm) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm_test.go b/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm_test.go new file mode 100644 index 00000000000000..5b9ecd23651217 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/phpfpm_test.go @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpfpm + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testStatusJSON, _ = os.ReadFile("testdata/status.json") + testStatusFullJSON, _ = os.ReadFile("testdata/status-full.json") + testStatusFullNoIdleJSON, _ = os.ReadFile("testdata/status-full-no-idle.json") + testStatusText, _ = os.ReadFile("testdata/status.txt") + testStatusFullText, _ = os.ReadFile("testdata/status-full.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, testStatusJSON) + assert.NotNil(t, testStatusFullJSON) + assert.NotNil(t, testStatusFullNoIdleJSON) + assert.NotNil(t, testStatusText) + assert.NotNil(t, testStatusFullText) +} + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) +} + +func TestPhpfpm_Init(t *testing.T) { + job := New() + + got := job.Init() + + require.True(t, got) + assert.NotNil(t, job.client) +} + +func TestPhpfpm_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusText) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + job.Init() + require.True(t, job.Init()) + + got := job.Check() + + assert.True(t, got) +} + +func TestPhpfpm_CheckReturnsFalseOnFailure(t *testing.T) { + job := New() + job.URL = "http://127.0.0.1:38001/us" + require.True(t, job.Init()) + + got := job.Check() + + assert.False(t, got) +} + +func TestPhpfpm_Charts(t *testing.T) { + job := New() + + got := job.Charts() + + assert.NotNil(t, got) +} + +func TestPhpfpm_CollectJSON(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusJSON) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/?json" + require.True(t, job.Init()) + + got := job.Collect() + + want := map[string]int64{ + "active": 1, + "idle": 1, + "maxActive": 1, + "reached": 0, + "requests": 21, + "slow": 0, + } + assert.Equal(t, want, got) +} + +func TestPhpfpm_CollectJSONFull(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusFullJSON) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/?json" + require.True(t, job.Init()) + + got := job.Collect() + + want := map[string]int64{ + "active": 1, + "idle": 1, + "maxActive": 1, + "reached": 0, + "requests": 22, + "slow": 0, + "minReqCpu": 0, + "maxReqCpu": 10, + "avgReqCpu": 5, + "minReqDur": 0, + "maxReqDur": 919, + "avgReqDur": 459, + "minReqMem": 2093045, + "maxReqMem": 2097152, + "avgReqMem": 2095098, + } + assert.Equal(t, want, got) +} + +func TestPhpfpm_CollectNoIdleProcessesJSONFull(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusFullNoIdleJSON) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + "/?json" + require.True(t, job.Init()) + + got := job.Collect() + + want := map[string]int64{ + "active": 1, + "idle": 1, + "maxActive": 1, + "reached": 0, + "requests": 22, + "slow": 0, + } + assert.Equal(t, want, got) +} + +func TestPhpfpm_CollectText(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusText) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + got := job.Collect() + + want := map[string]int64{ + "active": 1, + "idle": 1, + "maxActive": 1, + "reached": 0, + "requests": 19, + "slow": 0, + } + assert.Equal(t, want, got) +} + +func TestPhpfpm_CollectTextFull(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusFullText) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + got := job.Collect() + + want := map[string]int64{ + "active": 1, + "idle": 1, + "maxActive": 1, + "reached": 0, + "requests": 20, + "slow": 0, + "minReqCpu": 0, + "maxReqCpu": 10, + "avgReqCpu": 5, + "minReqDur": 0, + "maxReqDur": 536, + "avgReqDur": 268, + "minReqMem": 2093045, + "maxReqMem": 2097152, + "avgReqMem": 2095098, + } + assert.Equal(t, want, got) +} + +func TestPhpfpm_CollectReturnsNothingWhenInvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye\nfrom someone\nfoobar")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + got := job.Collect() + + assert.Len(t, got, 0) +} + +func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte{}) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + got := job.Collect() + + assert.Len(t, got, 0) +} + +func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + + got := job.Collect() + + assert.Len(t, got, 0) +} + +func TestPhpfpm_Cleanup(t *testing.T) { + New().Cleanup() +} diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full-no-idle.json b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full-no-idle.json new file mode 100644 index 00000000000000..e5b63accd8887e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full-no-idle.json @@ -0,0 +1,63 @@ +{ + "pool": "www", + "process manager": "dynamic", + "start time": 1566371090, + "start since": 1119, + "accepted conn": 22, + "listen queue": 0, + "max listen queue": 0, + "listen queue len": 0, + "idle processes": 1, + "active processes": 1, + "total processes": 2, + "max active processes": 1, + "max children reached": 0, + "slow requests": 0, + "processes": [ + { + "pid": 67858, + "state": "Running", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 834, + "request method": "GET", + "request uri": "/status?json&full", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 0, + "last request memory": 0 + }, + { + "pid": 67859, + "state": "Running", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 919, + "request method": "GET", + "request uri": "/status?json", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 0, + "last request memory": 2097152 + }, + { + "pid": 67860, + "state": "Running", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 18446744073709551227, + "request method": "GET", + "request uri": "/status?json&full", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 10.0, + "last request memory": 2093045 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.json b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.json new file mode 100644 index 00000000000000..456f6253e1fbd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.json @@ -0,0 +1,63 @@ +{ + "pool": "www", + "process manager": "dynamic", + "start time": 1566371090, + "start since": 1119, + "accepted conn": 22, + "listen queue": 0, + "max listen queue": 0, + "listen queue len": 0, + "idle processes": 1, + "active processes": 1, + "total processes": 2, + "max active processes": 1, + "max children reached": 0, + "slow requests": 0, + "processes": [ + { + "pid": 67858, + "state": "Running", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 834, + "request method": "GET", + "request uri": "/status?json&full", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 0, + "last request memory": 0 + }, + { + "pid": 67859, + "state": "Idle", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 919, + "request method": "GET", + "request uri": "/status?json", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 0, + "last request memory": 2097152 + }, + { + "pid": 67860, + "state": "Idle", + "start time": 1566371090, + "start since": 1119, + "requests": 11, + "request duration": 18446744073709551227, + "request method": "GET", + "request uri": "/status?json&full", + "content length": 0, + "user": "-", + "script": "-", + "last request cpu": 10.0, + "last request memory": 2093045 + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.txt b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.txt new file mode 100644 index 00000000000000..a5e90987c0af9d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status-full.txt @@ -0,0 +1,59 @@ +pool: www +process manager: dynamic +start time: 21/Aug/2019:09:04:50 +0200 +start since: 1079 +accepted conn: 20 +listen queue: 0 +max listen queue: 0 +listen queue len: 0 +idle processes: 1 +active processes: 1 +total processes: 2 +max active processes: 1 +max children reached: 0 +slow requests: 0 + +************************ +pid: 67858 +state: Running +start time: 21/Aug/2019:09:04:50 +0200 +start since: 1079 +requests: 10 +request duration: 697 +request method: GET +request URI: /status?full +content length: 0 +user: - +script: - +last request cpu: 0.00 +last request memory: 0 + +************************ +pid: 67859 +state: Idle +start time: 21/Aug/2019:09:04:50 +0200 +start since: 1079 +requests: 10 +request duration: 536 +request method: GET +request URI: /status +content length: 0 +user: - +script: - +last request cpu: 0.00 +last request memory: 2097152 + +************************ +pid: 67860 +state: Idle +start time: 21/Aug/2019:09:04:50 +0200 +start since: 1079 +requests: 10 +request duration: 18446744073709551227 +request method: GET +request URI: /status?full +content length: 0 +user: - +script: - +last request cpu: 10.00 +last request memory: 2093045 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.json b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.json new file mode 100644 index 00000000000000..80af3e0bcc30dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.json @@ -0,0 +1,16 @@ +{ + "pool": "www", + "process manager": "dynamic", + "start time": 1566371090, + "start since": 1088, + "accepted conn": 21, + "listen queue": 0, + "max listen queue": 0, + "listen queue len": 0, + "idle processes": 1, + "active processes": 1, + "total processes": 2, + "max active processes": 1, + "max children reached": 0, + "slow requests": 0 +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.txt b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.txt new file mode 100644 index 00000000000000..08dc158fb06f6d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/phpfpm/testdata/status.txt @@ -0,0 +1,14 @@ +pool: www +process manager: dynamic +start time: 21/Aug/2019:09:04:50 +0200 +start since: 1066 +accepted conn: 19 +listen queue: 0 +max listen queue: 0 +listen queue len: 0 +idle processes: 1 +active processes: 1 +total processes: 2 +max active processes: 1 +max children reached: 0 +slow requests: 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pihole/README.md b/src/go/collectors/go.d.plugin/modules/pihole/README.md new file mode 120000 index 00000000000000..b8d3a7b4080ce5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/README.md @@ -0,0 +1 @@ +integrations/pi-hole.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pihole/charts.go b/src/go/collectors/go.d.plugin/modules/pihole/charts.go new file mode 100644 index 00000000000000..3a340bc07d2f41 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/charts.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioDNSQueriesTotal = module.Priority + iota + prioDNSQueries + prioDNSQueriesPerc + prioUniqueClients + prioDomainsOnBlocklist + prioBlocklistLastUpdate + prioUnwantedDomainsBlockingStatus + + prioDNSQueriesTypes + prioDNSQueriesForwardedDestination +) + +var baseCharts = module.Charts{ + chartDNSQueriesTotal.Copy(), + chartDNSQueries.Copy(), + chartDNSQueriesPerc.Copy(), + chartUniqueClients.Copy(), + chartDomainsOnBlocklist.Copy(), + chartBlocklistLastUpdate.Copy(), + chartUnwantedDomainsBlockingStatus.Copy(), +} + +var ( + chartDNSQueriesTotal = module.Chart{ + ID: "dns_queries_total", + Title: "DNS Queries Total (Cached, Blocked and Forwarded)", + Units: "queries", + Fam: "queries", + Ctx: "pihole.dns_queries_total", + Priority: prioDNSQueriesTotal, + Dims: module.Dims{ + {ID: "dns_queries_today", Name: "queries"}, + }, + } + chartDNSQueries = module.Chart{ + ID: "dns_queries", + Title: "DNS Queries", + Units: "queries", + Fam: "queries", + Ctx: "pihole.dns_queries", + Type: module.Stacked, + Priority: prioDNSQueries, + Dims: module.Dims{ + {ID: "queries_cached", Name: "cached"}, + {ID: "ads_blocked_today", Name: "blocked"}, + {ID: "queries_forwarded", Name: "forwarded"}, + }, + } + chartDNSQueriesPerc = module.Chart{ + ID: "dns_queries_percentage", + Title: "DNS Queries Percentage", + Units: "percentage", + Fam: "queries", + Ctx: "pihole.dns_queries_percentage", + Type: module.Stacked, + Priority: prioDNSQueriesPerc, + Dims: module.Dims{ + {ID: "queries_cached_perc", Name: "cached", Div: precision}, + {ID: "ads_blocked_today_perc", Name: "blocked", Div: precision}, + {ID: "queries_forwarded_perc", Name: "forwarded", Div: precision}, + }, + } + chartUniqueClients = module.Chart{ + ID: "unique_clients", + Title: "Unique Clients", + Units: "clients", + Fam: "clients", + Ctx: "pihole.unique_clients", + Priority: prioUniqueClients, + Dims: module.Dims{ + {ID: "unique_clients", Name: "unique"}, + }, + } + chartDomainsOnBlocklist = module.Chart{ + ID: "domains_on_blocklist", + Title: "Domains On Blocklist", + Units: "domains", + Fam: "blocklist", + Ctx: "pihole.domains_on_blocklist", + Priority: prioDomainsOnBlocklist, + Dims: module.Dims{ + {ID: "domains_being_blocked", Name: "blocklist"}, + }, + } + chartBlocklistLastUpdate = module.Chart{ + ID: "blocklist_last_update", + Title: "Blocklist Last Update", + Units: "seconds", + Fam: "blocklist", + Ctx: "pihole.blocklist_last_update", + Priority: prioBlocklistLastUpdate, + Dims: module.Dims{ + {ID: "blocklist_last_update", Name: "ago"}, + }, + } + chartUnwantedDomainsBlockingStatus = module.Chart{ + ID: "unwanted_domains_blocking_status", + Title: "Unwanted Domains Blocking Status", + Units: "status", + Fam: "status", + Ctx: "pihole.unwanted_domains_blocking_status", + Priority: prioUnwantedDomainsBlockingStatus, + Dims: module.Dims{ + {ID: "blocking_status_enabled", Name: "enabled"}, + {ID: "blocking_status_disabled", Name: "disabled"}, + }, + } +) + +var ( + chartDNSQueriesTypes = module.Chart{ + ID: "dns_queries_types", + Title: "DNS Queries Per Type", + Units: "percentage", + Fam: "doQuery types", + Ctx: "pihole.dns_queries_types", + Type: module.Stacked, + Priority: prioDNSQueriesTypes, + Dims: module.Dims{ + {ID: "A", Div: 100}, + {ID: "AAAA", Div: 100}, + {ID: "ANY", Div: 100}, + {ID: "PTR", Div: 100}, + {ID: "SOA", Div: 100}, + {ID: "SRV", Div: 100}, + {ID: "TXT", Div: 100}, + }, + } + chartDNSQueriesForwardedDestination = module.Chart{ + ID: "dns_queries_forwarded_destination", + Title: "DNS Queries Per Destination", + Units: "percentage", + Fam: "queries answered by", + Ctx: "pihole.dns_queries_forwarded_destination", + Type: module.Stacked, + Priority: prioDNSQueriesForwardedDestination, + Dims: module.Dims{ + {ID: "destination_cached", Name: "cached", Div: 100}, + {ID: "destination_blocked", Name: "blocked", Div: 100}, + {ID: "destination_other", Name: "other", Div: 100}, + }, + } +) + +func (p *Pihole) addChartDNSQueriesType() { + chart := chartDNSQueriesTypes.Copy() + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Pihole) addChartDNSQueriesForwardedDestinations() { + chart := chartDNSQueriesForwardedDestination.Copy() + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/collect.go b/src/go/collectors/go.d.plugin/modules/pihole/collect.go new file mode 100644 index 00000000000000..f8a3d760da259d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/collect.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const wantAPIVersion = 3 + +const ( + urlPathAPI = "/admin/api.php" + urlQueryKeyAuth = "auth" + urlQueryKeyAPIVersion = "version" + urlQueryKeySummaryRaw = "summaryRaw" + urlQueryKeyGetQueryTypes = "getQueryTypes" // need auth + urlQueryKeyGetForwardDestinations = "getForwardDestinations" // need auth +) + +const ( + precision = 1000 +) + +func (p *Pihole) collect() (map[string]int64, error) { + if p.checkVersion { + ver, err := p.queryAPIVersion() + if err != nil { + return nil, err + } + if ver != wantAPIVersion { + return nil, fmt.Errorf("API version: %d, supported version: %d", ver, wantAPIVersion) + } + p.checkVersion = false + } + + pmx := new(piholeMetrics) + p.queryMetrics(pmx, true) + + if pmx.hasQueryTypes() { + p.addQueriesTypesOnce.Do(p.addChartDNSQueriesType) + } + if pmx.hasForwarders() { + p.addFwsDestinationsOnce.Do(p.addChartDNSQueriesForwardedDestinations) + } + + mx := make(map[string]int64) + p.collectMetrics(mx, pmx) + + return mx, nil +} + +func (p *Pihole) collectMetrics(mx map[string]int64, pmx *piholeMetrics) { + if pmx.hasSummary() { + mx["ads_blocked_today"] = pmx.summary.AdsBlockedToday + mx["ads_percentage_today"] = int64(pmx.summary.AdsPercentageToday * 100) + mx["domains_being_blocked"] = pmx.summary.DomainsBeingBlocked + // GravityLastUpdated.Absolute is <nil> if the file does not exist (deleted/moved) + if pmx.summary.GravityLastUpdated.Absolute != nil { + mx["blocklist_last_update"] = time.Now().Unix() - *pmx.summary.GravityLastUpdated.Absolute + } + mx["dns_queries_today"] = pmx.summary.DNSQueriesToday + mx["queries_forwarded"] = pmx.summary.QueriesForwarded + mx["queries_cached"] = pmx.summary.QueriesCached + mx["unique_clients"] = pmx.summary.UniqueClients + mx["blocking_status_enabled"] = boolToInt(pmx.summary.Status == "enabled") + mx["blocking_status_disabled"] = boolToInt(pmx.summary.Status != "enabled") + + tot := pmx.summary.QueriesCached + pmx.summary.AdsBlockedToday + pmx.summary.QueriesForwarded + mx["queries_cached_perc"] = calcPercentage(pmx.summary.QueriesCached, tot) + mx["ads_blocked_today_perc"] = calcPercentage(pmx.summary.AdsBlockedToday, tot) + mx["queries_forwarded_perc"] = calcPercentage(pmx.summary.QueriesForwarded, tot) + } + + if pmx.hasQueryTypes() { + mx["A"] = int64(pmx.queryTypes.Types.A * 100) + mx["AAAA"] = int64(pmx.queryTypes.Types.AAAA * 100) + mx["ANY"] = int64(pmx.queryTypes.Types.ANY * 100) + mx["PTR"] = int64(pmx.queryTypes.Types.PTR * 100) + mx["SOA"] = int64(pmx.queryTypes.Types.SOA * 100) + mx["SRV"] = int64(pmx.queryTypes.Types.SRV * 100) + mx["TXT"] = int64(pmx.queryTypes.Types.TXT * 100) + } + + if pmx.hasForwarders() { + for k, v := range pmx.forwarders.Destinations { + name := strings.Split(k, "|")[0] + mx["destination_"+name] = int64(v * 100) + } + } +} + +func (p *Pihole) queryMetrics(pmx *piholeMetrics, doConcurrently bool) { + type task func(*piholeMetrics) + + var tasks = []task{p.querySummary} + + if p.Password != "" { + tasks = []task{ + p.querySummary, + p.queryQueryTypes, + p.queryForwardedDestinations, + } + } + + wg := &sync.WaitGroup{} + + wrap := func(call task) task { + return func(metrics *piholeMetrics) { call(metrics); wg.Done() } + } + + for _, task := range tasks { + if doConcurrently { + wg.Add(1) + task = wrap(task) + go task(pmx) + } else { + task(pmx) + } + } + + wg.Wait() +} + +func (p *Pihole) querySummary(pmx *piholeMetrics) { + req, err := web.NewHTTPRequest(p.Request) + if err != nil { + p.Error(err) + return + } + + req.URL.Path = urlPathAPI + req.URL.RawQuery = url.Values{ + urlQueryKeyAuth: []string{p.Password}, + urlQueryKeySummaryRaw: []string{"true"}, + }.Encode() + + var v summaryRawMetrics + if err = p.doWithDecode(&v, req); err != nil { + p.Error(err) + return + } + + pmx.summary = &v +} + +func (p *Pihole) queryQueryTypes(pmx *piholeMetrics) { + req, err := web.NewHTTPRequest(p.Request) + if err != nil { + p.Error(err) + return + } + + req.URL.Path = urlPathAPI + req.URL.RawQuery = url.Values{ + urlQueryKeyAuth: []string{p.Password}, + urlQueryKeyGetQueryTypes: []string{"true"}, + }.Encode() + + var v queryTypesMetrics + err = p.doWithDecode(&v, req) + if err != nil { + p.Error(err) + return + } + + pmx.queryTypes = &v +} + +func (p *Pihole) queryForwardedDestinations(pmx *piholeMetrics) { + req, err := web.NewHTTPRequest(p.Request) + if err != nil { + p.Error(err) + return + } + + req.URL.Path = urlPathAPI + req.URL.RawQuery = url.Values{ + urlQueryKeyAuth: []string{p.Password}, + urlQueryKeyGetForwardDestinations: []string{"true"}, + }.Encode() + + var v forwardDestinations + err = p.doWithDecode(&v, req) + if err != nil { + p.Error(err) + return + } + + pmx.forwarders = &v +} + +func (p *Pihole) queryAPIVersion() (int, error) { + req, err := web.NewHTTPRequest(p.Request) + if err != nil { + return 0, err + } + + req.URL.Path = urlPathAPI + req.URL.RawQuery = url.Values{ + urlQueryKeyAuth: []string{p.Password}, + urlQueryKeyAPIVersion: []string{"true"}, + }.Encode() + + var v piholeAPIVersion + err = p.doWithDecode(&v, req) + if err != nil { + return 0, err + } + + return v.Version, nil +} + +func (p *Pihole) doWithDecode(dst interface{}, req *http.Request) error { + resp, err := p.httpClient.Do(req) + if err != nil { + return err + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned %d status code", req.URL, resp.StatusCode) + } + + content, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error on reading response from %s : %v", req.URL, err) + } + + // empty array if unauthorized query or wrong query + if isEmptyArray(content) { + return fmt.Errorf("unauthorized access to %s", req.URL) + } + + if err := json.Unmarshal(content, dst); err != nil { + return fmt.Errorf("error on parsing response from %s : %v", req.URL, err) + } + + return nil +} + +func isEmptyArray(data []byte) bool { + empty := "[]" + return len(data) == len(empty) && string(data) == empty +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func boolToInt(b bool) int64 { + if !b { + return 0 + } + return 1 +} + +func calcPercentage(value, total int64) (v int64) { + if total == 0 { + return 0 + } + return int64(float64(value) * 100 / float64(total) * precision) +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/config_schema.json b/src/go/collectors/go.d.plugin/modules/pihole/config_schema.json new file mode 100644 index 00000000000000..e4c13fa10d23ab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/config_schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pihole job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "setup_vars_path": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/init.go b/src/go/collectors/go.d.plugin/modules/pihole/init.go new file mode 100644 index 00000000000000..a6dc2f42dbd5b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/init.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +import ( + "bufio" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *Pihole) validateConfig() error { + if p.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (p *Pihole) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(p.Client) +} + +func (p *Pihole) getWebPassword() string { + // do no read setupVarsPath is password is set in the configuration file + if p.Password != "" { + return p.Password + } + if !isLocalHost(p.URL) { + p.Info("abort web password auto detection, host is not localhost") + return "" + } + + p.Infof("starting web password auto detection, reading : %s", p.SetupVarsPath) + pass, err := getWebPassword(p.SetupVarsPath) + if err != nil { + p.Warningf("error during reading '%s' : %v", p.SetupVarsPath, err) + } + + return pass +} + +func getWebPassword(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer func() { _ = f.Close() }() + + s := bufio.NewScanner(f) + var password string + + for s.Scan() && password == "" { + if strings.HasPrefix(s.Text(), "WEBPASSWORD") { + parts := strings.Split(s.Text(), "=") + if len(parts) != 2 { + return "", fmt.Errorf("unparsable line : %s", s.Text()) + } + password = parts[1] + } + } + + return password, nil +} + +func isLocalHost(u string) bool { + if strings.Contains(u, "127.0.0.1") { + return true + } + if strings.Contains(u, "localhost") { + return true + } + + return false +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/integrations/pi-hole.md b/src/go/collectors/go.d.plugin/modules/pihole/integrations/pi-hole.md new file mode 100644 index 00000000000000..2ee2eb1a0bac5c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/integrations/pi-hole.md @@ -0,0 +1,224 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/pihole/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/pihole/metadata.yaml" +sidebar_label: "Pi-hole" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Pi-hole + + +<img src="https://netdata.cloud/img/pihole.png" width="150"/> + + +Plugin: go.d.plugin +Module: pihole + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE). + +The data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the +module's collection interval. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Pi-hole instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pihole.dns_queries_total | queries | queries | +| pihole.dns_queries | cached, blocked, forwarded | queries | +| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage | +| pihole.unique_clients | unique | clients | +| pihole.domains_on_blocklist | blocklist | domains | +| pihole.blocklist_last_update | ago | seconds | +| pihole.unwanted_domains_blocking_status | enabled, disabled | status | +| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage | +| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time | +| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/pihole.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/pihole.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1 | yes | +| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no | +| timeout | HTTP request timeout. | 5 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1 + +``` +</details> + +##### HTTPS with self-signed certificate + +Remote instance with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://203.0.113.11 + tls_skip_verify: yes + password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1 + + - name: remote + url: http://203.0.113.10 + password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m pihole + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/pihole/metadata.yaml b/src/go/collectors/go.d.plugin/modules/pihole/metadata.yaml new file mode 100644 index 00000000000000..b6ef9656f102d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/metadata.yaml @@ -0,0 +1,248 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-pihole + plugin_name: go.d.plugin + module_name: pihole + monitored_instance: + name: Pi-hole + link: https://pi-hole.net + icon_filename: pihole.png + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - pihole + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE). + + The data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the + module's collection interval. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/pihole.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1 + required: true + - name: setup_vars_path + description: Path to setupVars.conf. This file is used to get the web password. + default_value: /etc/pihole/setupVars.conf + required: false + - name: timeout + description: HTTP request timeout. + default_value: 5 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1 + - name: HTTPS with self-signed certificate + description: Remote instance with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://203.0.113.11 + tls_skip_verify: yes + password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1 + + - name: remote + url: http://203.0.113.10 + password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b + troubleshooting: + problems: + list: [] + alerts: + - name: pihole_blocklist_last_update + metric: pihole.blocklist_last_update + info: "gravity.list (blocklist) file last update time" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf + - name: pihole_status + metric: pihole.unwanted_domains_blocking_status + info: unwanted domains blocking is disabled + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: pihole.dns_queries_total + description: DNS Queries Total (Cached, Blocked and Forwarded) + unit: queries + chart_type: line + dimensions: + - name: queries + - name: pihole.dns_queries + description: DNS Queries + unit: queries + chart_type: stacked + dimensions: + - name: cached + - name: blocked + - name: forwarded + - name: pihole.dns_queries_percentage + description: DNS Queries Percentage + unit: percentage + chart_type: stacked + dimensions: + - name: cached + - name: blocked + - name: forwarded + - name: pihole.unique_clients + description: Unique Clients + unit: clients + chart_type: line + dimensions: + - name: unique + - name: pihole.domains_on_blocklist + description: Domains On Blocklist + unit: domains + chart_type: line + dimensions: + - name: blocklist + - name: pihole.blocklist_last_update + description: Blocklist Last Update + unit: seconds + chart_type: line + dimensions: + - name: ago + - name: pihole.unwanted_domains_blocking_status + description: Unwanted Domains Blocking Status + unit: status + chart_type: line + dimensions: + - name: enabled + - name: disabled + - name: pihole.dns_queries_types + description: DNS Queries Per Type + unit: percentage + chart_type: stacked + dimensions: + - name: a + - name: aaaa + - name: any + - name: ptr + - name: soa + - name: srv + - name: txt + - name: pihole.dns_queries_forwarded_destination + description: DNS Queries Per Destination + unit: percentage + chart_type: stacked + dimensions: + - name: cached + - name: blocked + - name: other diff --git a/src/go/collectors/go.d.plugin/modules/pihole/metrics.go b/src/go/collectors/go.d.plugin/modules/pihole/metrics.go new file mode 100644 index 00000000000000..dd4b3b644cdb28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/metrics.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +type piholeMetrics struct { + summary *summaryRawMetrics // ?summary + queryTypes *queryTypesMetrics // ?getQueryTypes + forwarders *forwardDestinations // ?getForwardedDestinations +} + +func (p piholeMetrics) hasSummary() bool { + return p.summary != nil +} +func (p piholeMetrics) hasQueryTypes() bool { + return p.queryTypes != nil +} +func (p piholeMetrics) hasForwarders() bool { + return p.forwarders != nil && len(p.forwarders.Destinations) > 0 +} + +type piholeAPIVersion struct { + Version int +} + +type summaryRawMetrics struct { + DomainsBeingBlocked int64 `json:"domains_being_blocked"` + DNSQueriesToday int64 `json:"dns_queries_today"` + AdsBlockedToday int64 `json:"ads_blocked_today"` + AdsPercentageToday float64 `json:"ads_percentage_today"` + UniqueDomains int64 `json:"unique_domains"` + QueriesForwarded int64 `json:"queries_forwarded"` + QueriesCached int64 `json:"queries_cached"` + ClientsEverSeen int64 `json:"clients_ever_seen"` + UniqueClients int64 `json:"unique_clients"` + DNSQueriesAllTypes int64 `json:"dns_queries_all_types"` + ReplyNODATA int64 `json:"reply_NODATA"` + ReplyNXDOMAIN int64 `json:"reply_NXDOMAIN"` + ReplyCNAME int64 `json:"reply_CNAME"` + ReplyIP int64 `json:"reply_IP"` + PrivacyLevel int64 `json:"privacy_level"` + Status string `json:"status"` + GravityLastUpdated struct { + // gravity.list has been removed (https://github.com/pi-hole/pi-hole/pull/2871#issuecomment-520251509) + FileExists bool `json:"file_exists"` + Absolute *int64 + } `json:"gravity_last_updated"` +} + +type queryTypesMetrics struct { + Types struct { + A float64 `json:"A (IPv4)"` + AAAA float64 `json:"AAAA (IPv6)"` + ANY float64 + SRV float64 + SOA float64 + PTR float64 + TXT float64 + } `json:"querytypes"` +} + +// https://github.com/pi-hole/FTL/blob/6f69dd5b4ca60f925d68bfff3869350e934a7240/src/api/api.c#L474 +type forwardDestinations struct { + Destinations map[string]float64 `json:"forward_destinations"` +} + +//type ( +// item map[string]int64 +// +// topClients struct { +// Sources item `json:"top_sources"` +// } +// topItems struct { +// TopQueries item `json:"top_queries"` +// TopAds item `json:"top_ads"` +// } +//) +// +//func (i *item) UnmarshalJSON(data []byte) error { +// if isEmptyArray(data) { +// return nil +// } +// type plain *item +// return json.Unmarshal(data, (plain)(i)) +//} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/pihole.go b/src/go/collectors/go.d.plugin/modules/pihole/pihole.go new file mode 100644 index 00000000000000..6aba5cad02b42c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/pihole.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +import ( + _ "embed" + "net/http" + "sync" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("pihole", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Pihole { + return &Pihole{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}}, + }, + SetupVarsPath: "/etc/pihole/setupVars.conf", + }, + checkVersion: true, + charts: baseCharts.Copy(), + addQueriesTypesOnce: &sync.Once{}, + addFwsDestinationsOnce: &sync.Once{}, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + SetupVarsPath string `yaml:"setup_vars_path"` +} + +type Pihole struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + addQueriesTypesOnce *sync.Once + addFwsDestinationsOnce *sync.Once + + httpClient *http.Client + checkVersion bool +} + +func (p *Pihole) Init() bool { + if err := p.validateConfig(); err != nil { + p.Errorf("config validation: %v", err) + return false + } + + httpClient, err := p.initHTTPClient() + if err != nil { + p.Errorf("init http client: %v", err) + return false + } + p.httpClient = httpClient + + p.Password = p.getWebPassword() + if p.Password == "" { + p.Warning("no web password, not all metrics available") + } else { + p.Debugf("web password: %s", p.Password) + } + + return true +} + +func (p *Pihole) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Pihole) Charts() *module.Charts { + return p.charts +} + +func (p *Pihole) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (p *Pihole) Cleanup() { + if p.httpClient != nil { + p.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/pihole_test.go b/src/go/collectors/go.d.plugin/modules/pihole/pihole_test.go new file mode 100644 index 00000000000000..08ad244a79ffc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/pihole_test.go @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pihole + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + pathSetupVarsOK = "testdata/setupVars.conf" + pathSetupVarsWrong = "testdata/wrong.conf" +) + +var ( + dataEmptyResp = []byte("[]") + dataSummaryRawResp, _ = os.ReadFile("testdata/summaryRaw.json") + dataGetQueryTypesResp, _ = os.ReadFile("testdata/getQueryTypes.json") + dataGetForwardDestinationsResp, _ = os.ReadFile("testdata/getForwardDestinations.json") +) + +func TestPihole_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + p := New() + p.Config = test.config + + if test.wantFail { + assert.False(t, p.Init()) + } else { + assert.True(t, p.Init()) + } + }) + } +} + +func TestPihole_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (p *Pihole, cleanup func()) + }{ + "success with web password": { + wantFail: false, + prepare: caseSuccessWithWebPassword, + }, + "fail without web password": { + wantFail: true, + prepare: caseFailNoWebPassword, + }, + "fail on unsupported version": { + wantFail: true, + prepare: caseFailUnsupportedVersion, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + p, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, p.Check()) + } else { + assert.True(t, p.Check()) + } + }) + } +} + +func TestPihole_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestPihole_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (p *Pihole, cleanup func()) + wantMetrics map[string]int64 + wantNumCharts int + }{ + "success with web password": { + prepare: caseSuccessWithWebPassword, + wantNumCharts: len(baseCharts) + 2, + wantMetrics: map[string]int64{ + "A": 1229, + "AAAA": 1229, + "ANY": 100, + "PTR": 7143, + "SOA": 100, + "SRV": 100, + "TXT": 100, + "ads_blocked_today": 1, + "ads_blocked_today_perc": 33333, + "ads_percentage_today": 100, + "blocking_status_disabled": 0, + "blocking_status_enabled": 1, + "blocklist_last_update": 106273651, + "destination_blocked": 220, + "destination_cached": 8840, + "destination_other": 940, + "dns_queries_today": 1, + "domains_being_blocked": 1, + "queries_cached": 1, + "queries_cached_perc": 33333, + "queries_forwarded": 1, + "queries_forwarded_perc": 33333, + "unique_clients": 1, + }, + }, + "fail without web password": { + prepare: caseFailNoWebPassword, + wantMetrics: nil, + }, + "fail on unsupported version": { + prepare: caseFailUnsupportedVersion, + wantMetrics: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + p, cleanup := test.prepare(t) + defer cleanup() + + mx := p.Collect() + + copyBlockListLastUpdate(mx, test.wantMetrics) + require.Equal(t, test.wantMetrics, mx) + if len(test.wantMetrics) > 0 { + assert.Len(t, *p.Charts(), test.wantNumCharts) + } + }) + } +} + +func caseSuccessWithWebPassword(t *testing.T) (*Pihole, func()) { + p, srv := New(), mockPiholeServer{}.newPiholeHTTPServer() + + p.SetupVarsPath = pathSetupVarsOK + p.URL = srv.URL + + require.True(t, p.Init()) + + return p, srv.Close +} + +func caseFailNoWebPassword(t *testing.T) (*Pihole, func()) { + p, srv := New(), mockPiholeServer{}.newPiholeHTTPServer() + + p.SetupVarsPath = pathSetupVarsWrong + p.URL = srv.URL + + require.True(t, p.Init()) + + return p, srv.Close +} + +func caseFailUnsupportedVersion(t *testing.T) (*Pihole, func()) { + p, srv := New(), mockPiholeServer{unsupportedVersion: true}.newPiholeHTTPServer() + + p.SetupVarsPath = pathSetupVarsOK + p.URL = srv.URL + + require.True(t, p.Init()) + + return p, srv.Close +} + +type mockPiholeServer struct { + unsupportedVersion bool + errOnAPIVersion bool + errOnSummary bool + errOnQueryTypes bool + errOnGetForwardDst bool + errOnTopClients bool + errOnTopItems bool +} + +func (m mockPiholeServer) newPiholeHTTPServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != urlPathAPI || len(r.URL.Query()) == 0 { + w.WriteHeader(http.StatusBadRequest) + } + + if r.URL.Query().Get(urlQueryKeyAuth) == "" { + _, _ = w.Write(dataEmptyResp) + return + } + + if r.URL.Query().Has(urlQueryKeyAPIVersion) { + if m.errOnAPIVersion { + w.WriteHeader(http.StatusNotFound) + } else if m.unsupportedVersion { + _, _ = w.Write([]byte(fmt.Sprintf(`{"version": %d}`, wantAPIVersion+1))) + } else { + _, _ = w.Write([]byte(fmt.Sprintf(`{"version": %d}`, wantAPIVersion))) + } + return + } + + if r.URL.Query().Has(urlQueryKeySummaryRaw) { + if m.errOnSummary { + w.WriteHeader(http.StatusNotFound) + } else { + _, _ = w.Write(dataSummaryRawResp) + } + return + } + + data := dataEmptyResp + isErr := false + switch { + case r.URL.Query().Has(urlQueryKeyGetQueryTypes): + data, isErr = dataGetQueryTypesResp, m.errOnQueryTypes + case r.URL.Query().Has(urlQueryKeyGetForwardDestinations): + data, isErr = dataGetForwardDestinationsResp, m.errOnGetForwardDst + } + + if isErr { + w.WriteHeader(http.StatusNotFound) + } else { + _, _ = w.Write(data) + } + })) +} + +func copyBlockListLastUpdate(dst, src map[string]int64) { + k := "blocklist_last_update" + if v, ok := src[k]; ok { + if _, ok := dst[k]; ok { + dst[k] = v + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/pihole/testdata/getForwardDestinations.json b/src/go/collectors/go.d.plugin/modules/pihole/testdata/getForwardDestinations.json new file mode 100644 index 00000000000000..3bfc646d0893cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/testdata/getForwardDestinations.json @@ -0,0 +1,7 @@ +{ + "forward_destinations": { + "blocked|blocked": 2.2, + "cached|cached": 88.4, + "other|other": 9.4 + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pihole/testdata/getQueryTypes.json b/src/go/collectors/go.d.plugin/modules/pihole/testdata/getQueryTypes.json new file mode 100644 index 00000000000000..cf7f19f958695d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/testdata/getQueryTypes.json @@ -0,0 +1,11 @@ +{ + "querytypes": { + "A (IPv4)": 12.29, + "AAAA (IPv6)": 12.29, + "ANY": 1, + "SRV": 1, + "SOA": 1, + "PTR": 71.43, + "TXT": 1 + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pihole/testdata/setupVars.conf b/src/go/collectors/go.d.plugin/modules/pihole/testdata/setupVars.conf new file mode 100644 index 00000000000000..97f2602971852b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/testdata/setupVars.conf @@ -0,0 +1,11 @@ +WEBPASSWORD=1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b +BLOCKING_ENABLED=true +PIHOLE_INTERFACE=enp0s9 +IPV4_ADDRESS=192.168.88.228/24 +IPV6_ADDRESS= +PIHOLE_DNS_1=208.67.222.222 +PIHOLE_DNS_2=208.67.220.220 +QUERY_LOGGING=true +INSTALL_WEB_SERVER=true +INSTALL_WEB_INTERFACE=true +LIGHTTPD_ENABLED=true \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pihole/testdata/summaryRaw.json b/src/go/collectors/go.d.plugin/modules/pihole/testdata/summaryRaw.json new file mode 100644 index 00000000000000..8a4e59c160802b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pihole/testdata/summaryRaw.json @@ -0,0 +1,27 @@ +{ + "domains_being_blocked": 1, + "dns_queries_today": 1, + "ads_blocked_today": 1, + "ads_percentage_today": 1, + "unique_domains": 1, + "queries_forwarded": 1, + "queries_cached": 1, + "clients_ever_seen": 1, + "unique_clients": 1, + "dns_queries_all_types": 1, + "reply_NODATA": 1, + "reply_NXDOMAIN": 1, + "reply_CNAME": 1, + "reply_IP": 1, + "privacy_level": 1, + "status": "enabled", + "gravity_last_updated": { + "file_exists": true, + "absolute": 1560443834, + "relative": { + "days": "3", + "hours": "06", + "minutes": "05" + } + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pika/README.md b/src/go/collectors/go.d.plugin/modules/pika/README.md new file mode 120000 index 00000000000000..5e3a8da77d5851 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/README.md @@ -0,0 +1 @@ +integrations/pika.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pika/charts.go b/src/go/collectors/go.d.plugin/modules/pika/charts.go new file mode 100644 index 00000000000000..16041c112cb455 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/charts.go @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import "github.com/netdata/go.d.plugin/agent/module" + +var pikaCharts = module.Charts{ + chartConnections.Copy(), + chartClients.Copy(), + + chartMemory.Copy(), + + chartConnectedReplicas.Copy(), + + chartCommands.Copy(), + chartCommandsCalls.Copy(), + + chartDbStringsKeys.Copy(), + chartDbStringsExpiresKeys.Copy(), + chartDbStringsInvalidKeys.Copy(), + chartDbHashesKeys.Copy(), + chartDbHashesExpiresKeys.Copy(), + chartDbHashesInvalidKeys.Copy(), + chartDbListsKeys.Copy(), + chartDbListsExpiresKeys.Copy(), + chartDbListsInvalidKeys.Copy(), + chartDbZsetsKeys.Copy(), + chartDbZsetsExpiresKeys.Copy(), + chartDbZsetsInvalidKeys.Copy(), + chartDbSetsKeys.Copy(), + chartDbSetsExpiresKeys.Copy(), + chartDbSetsInvalidKeys.Copy(), + + chartUptime.Copy(), +} + +var ( + chartConnections = module.Chart{ + ID: "connections", + Title: "Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "pika.connections", + Dims: module.Dims{ + {ID: "total_connections_received", Name: "accepted", Algo: module.Incremental}, + }, + } + chartClients = module.Chart{ + ID: "clients", + Title: "Clients", + Units: "clients", + Fam: "connections", + Ctx: "pika.clients", + Dims: module.Dims{ + {ID: "connected_clients", Name: "connected"}, + }, + } +) + +var ( + chartMemory = module.Chart{ + ID: "memory", + Title: "Memory usage", + Units: "bytes", + Fam: "memory", + Ctx: "pika.memory", + Type: module.Area, + Dims: module.Dims{ + {ID: "used_memory", Name: "used"}, + }, + } +) + +var ( + chartConnectedReplicas = module.Chart{ + ID: "connected_replicas", + Title: "Connected replicas", + Units: "replicas", + Fam: "replication", + Ctx: "pika.connected_replicas", + Dims: module.Dims{ + {ID: "connected_slaves", Name: "connected"}, + }, + } +) + +var ( + chartCommands = module.Chart{ + ID: "commands", + Title: "Processed commands", + Units: "commands/s", + Fam: "commands", + Ctx: "pika.commands", + Dims: module.Dims{ + {ID: "total_commands_processed", Name: "processed", Algo: module.Incremental}, + }, + } + chartCommandsCalls = module.Chart{ + ID: "commands_calls", + Title: "Calls per command", + Units: "calls/s", + Fam: "commands", + Ctx: "pika.commands_calls", + Type: module.Stacked, + } +) + +var ( + chartDbStringsKeys = module.Chart{ + ID: "database_strings_keys", + Title: "Strings type keys per database", + Units: "keys", + Fam: "keyspace strings", + Ctx: "pika.database_strings_keys", + Type: module.Stacked, + } + chartDbStringsExpiresKeys = module.Chart{ + ID: "database_strings_expires_keys", + Title: "Strings type expires keys per database", + Units: "keys", + Fam: "keyspace strings", + Ctx: "pika.database_strings_expires_keys", + Type: module.Stacked, + } + chartDbStringsInvalidKeys = module.Chart{ + ID: "database_strings_invalid_keys", + Title: "Strings type invalid keys per database", + Units: "keys", + Fam: "keyspace strings", + Ctx: "pika.database_strings_invalid_keys", + Type: module.Stacked, + } + + chartDbHashesKeys = module.Chart{ + ID: "database_hashes_keys", + Title: "Hashes type keys per database", + Units: "keys", + Fam: "keyspace hashes", + Ctx: "pika.database_hashes_keys", + Type: module.Stacked, + } + chartDbHashesExpiresKeys = module.Chart{ + ID: "database_hashes_expires_keys", + Title: "Hashes type expires keys per database", + Units: "keys", + Fam: "keyspace hashes", + Ctx: "pika.database_hashes_expires_keys", + Type: module.Stacked, + } + chartDbHashesInvalidKeys = module.Chart{ + ID: "database_hashes_invalid_keys", + Title: "Hashes type invalid keys per database", + Units: "keys", + Fam: "keyspace hashes", + Ctx: "pika.database_hashes_invalid_keys", + Type: module.Stacked, + } + + chartDbListsKeys = module.Chart{ + ID: "database_lists_keys", + Title: "Lists type keys per database", + Units: "keys", + Fam: "keyspace lists", + Ctx: "pika.database_lists_keys", + Type: module.Stacked, + } + chartDbListsExpiresKeys = module.Chart{ + ID: "database_lists_expires_keys", + Title: "Lists type expires keys per database", + Units: "keys", + Fam: "keyspace lists", + Ctx: "pika.database_lists_expires_keys", + Type: module.Stacked, + } + chartDbListsInvalidKeys = module.Chart{ + ID: "database_lists_invalid_keys", + Title: "Lists type invalid keys per database", + Units: "keys", + Fam: "keyspace lists", + Ctx: "pika.database_lists_invalid_keys", + Type: module.Stacked, + } + + chartDbZsetsKeys = module.Chart{ + ID: "database_zsets_keys", + Title: "Zsets type keys per database", + Units: "keys", + Fam: "keyspace zsets", + Ctx: "pika.database_zsets_keys", + Type: module.Stacked, + } + chartDbZsetsExpiresKeys = module.Chart{ + ID: "database_zsets_expires_keys", + Title: "Zsets type expires keys per database", + Units: "keys", + Fam: "keyspace zsets", + Ctx: "pika.database_zsets_expires_keys", + Type: module.Stacked, + } + chartDbZsetsInvalidKeys = module.Chart{ + ID: "database_zsets_invalid_keys", + Title: "Zsets type invalid keys per database", + Units: "keys", + Fam: "keyspace zsets", + Ctx: "pika.database_zsets_invalid_keys", + Type: module.Stacked, + } + + chartDbSetsKeys = module.Chart{ + ID: "database_sets_keys", + Title: "Sets type keys per database", + Units: "keys", + Fam: "keyspace sets", + Ctx: "pika.database_sets_keys", + Type: module.Stacked, + } + chartDbSetsExpiresKeys = module.Chart{ + ID: "database_sets_expires_keys", + Title: "Sets type expires keys per database", + Units: "keys", + Fam: "keyspace sets", + Ctx: "pika.database_sets_expires_keys", + Type: module.Stacked, + } + chartDbSetsInvalidKeys = module.Chart{ + ID: "database_sets_invalid_keys", + Title: "Sets invalid keys per database", + Units: "keys", + Fam: "keyspace sets", + Ctx: "pika.database_sets_invalid_keys", + Type: module.Stacked, + } +) + +var ( + chartUptime = module.Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "pika.uptime", + Dims: module.Dims{ + {ID: "uptime_in_seconds", Name: "uptime"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/pika/collect.go b/src/go/collectors/go.d.plugin/modules/pika/collect.go new file mode 100644 index 00000000000000..72a4961dd97bf5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/collect.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import ( + "bufio" + "context" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/blang/semver/v4" +) + +const precision = 1000 // float values multiplier and dimensions divisor + +func (p *Pika) collect() (map[string]int64, error) { + info, err := p.pdb.Info(context.Background(), "all").Result() + if err != nil { + return nil, err + } + + if p.server == "" { + s, v, err := extractServerVersion(info) + if err != nil { + return nil, fmt.Errorf("can not extract server app and version: %v", err) + } + p.server, p.version = s, v + p.Debugf(`server="%s",version="%s"`, s, v) + } + + if p.server != "pika" { + return nil, fmt.Errorf("unsupported server app, want=pika, got=%s", p.server) + } + + ms := make(map[string]int64) + p.collectInfo(ms, info) + + return ms, nil +} + +// pika_version:3.4.0 +var reVersion = regexp.MustCompile(`([a-z]+)_version:(\d+\.\d+\.\d+)`) + +func extractServerVersion(info string) (string, *semver.Version, error) { + var versionLine string + for sc := bufio.NewScanner(strings.NewReader(info)); sc.Scan(); { + line := sc.Text() + if strings.Contains(line, "_version") { + versionLine = strings.TrimSpace(line) + break + } + } + if versionLine == "" { + return "", nil, errors.New("no version property") + } + + match := reVersion.FindStringSubmatch(versionLine) + if match == nil { + return "", nil, fmt.Errorf("can not parse version property '%s'", versionLine) + } + + server, version := match[1], match[2] + ver, err := semver.New(version) + if err != nil { + return "", nil, err + } + + return server, ver, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/collect_info.go b/src/go/collectors/go.d.plugin/modules/pika/collect_info.go new file mode 100644 index 00000000000000..c7043c816b1327 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/collect_info.go @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import ( + "bufio" + "regexp" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +// https://github.com/Qihoo360/pika/blob/master/src/pika_admin.cc +// https://github.com/Qihoo360/pika/blob/a0dbdcf5897dd7800ba8a4d1eafce1595619ddc8/src/pika_admin.cc#L694-L710 + +const ( + infoSectionServer = "# Server" + infoSectionData = "# Data" + infoSectionClients = "# Clients" + infoSectionStats = "# Stats" + infoSectionCommandExecCount = "# Command_Exec_Count" + infoSectionCPU = "# CPU" + infoSectionReplMaster = "# Replication(MASTER)" + infoSectionReplSlave = "# Replication(SLAVE)" + infoSectionReplMasterSlave = "# Replication(Master && SLAVE)" + infoSectionKeyspace = "# Keyspace" +) + +var infoSections = map[string]struct{}{ + infoSectionServer: {}, + infoSectionData: {}, + infoSectionClients: {}, + infoSectionStats: {}, + infoSectionCommandExecCount: {}, + infoSectionCPU: {}, + infoSectionReplMaster: {}, + infoSectionReplSlave: {}, + infoSectionReplMasterSlave: {}, + infoSectionKeyspace: {}, +} + +func isInfoSection(line string) bool { _, ok := infoSections[line]; return ok } + +func (p *Pika) collectInfo(ms map[string]int64, info string) { + var curSection string + + sc := bufio.NewScanner(strings.NewReader(info)) + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if len(line) == 0 { + curSection = "" + continue + } + if strings.HasPrefix(line, "#") { + if isInfoSection(line) { + curSection = line + } + continue + } + + field, value, ok := parseProperty(line) + if !ok { + continue + } + + switch curSection { + case infoSectionCommandExecCount: + p.collectInfoCommandExecCountProperty(ms, field, value) + case infoSectionKeyspace: + p.collectInfoKeyspaceProperty(ms, field, value) + default: + collectNumericValue(ms, field, value) + } + } +} + +var reKeyspaceValue = regexp.MustCompile(`^(.+)_keys=(\d+), expires=(\d+), invalid_keys=(\d+)`) + +func (p *Pika) collectInfoKeyspaceProperty(ms map[string]int64, field, value string) { + match := reKeyspaceValue.FindStringSubmatch(value) + if match == nil { + return + } + + dataType, keys, expires, invalid := strings.ToLower(match[1]), match[2], match[3], match[4] + collectNumericValue(ms, field+"_"+dataType+"_keys", keys) + collectNumericValue(ms, field+"_"+dataType+"_expires_keys", expires) + collectNumericValue(ms, field+"_"+dataType+"_invalid_keys", invalid) + + if !p.collectedDbs[field] { + p.collectedDbs[field] = true + p.addDbToKeyspaceCharts(field) + } +} + +func (p *Pika) collectInfoCommandExecCountProperty(ms map[string]int64, field, value string) { + collectNumericValue(ms, "cmd_"+field+"_calls", value) + + if !p.collectedCommands[field] { + p.collectedCommands[field] = true + p.addCmdToCommandsCharts(field) + } +} + +func (p *Pika) addCmdToCommandsCharts(cmd string) { + p.addDimToChart(chartCommandsCalls.ID, &module.Dim{ + ID: "cmd_" + cmd + "_calls", + Name: cmd, + Algo: module.Incremental, + }) +} + +func (p *Pika) addDbToKeyspaceCharts(db string) { + p.addDimToChart(chartDbStringsKeys.ID, &module.Dim{ + ID: db + "_strings_keys", + Name: db, + }) + p.addDimToChart(chartDbStringsExpiresKeys.ID, &module.Dim{ + ID: db + "_strings_expires_keys", + Name: db, + }) + p.addDimToChart(chartDbStringsInvalidKeys.ID, &module.Dim{ + ID: db + "_strings_invalid_keys", + Name: db, + }) + + p.addDimToChart(chartDbHashesKeys.ID, &module.Dim{ + ID: db + "_hashes_keys", + Name: db, + }) + p.addDimToChart(chartDbHashesExpiresKeys.ID, &module.Dim{ + ID: db + "_hashes_expires_keys", + Name: db, + }) + p.addDimToChart(chartDbHashesInvalidKeys.ID, &module.Dim{ + ID: db + "_hashes_invalid_keys", + Name: db, + }) + + p.addDimToChart(chartDbListsKeys.ID, &module.Dim{ + ID: db + "_lists_keys", + Name: db, + }) + p.addDimToChart(chartDbListsExpiresKeys.ID, &module.Dim{ + ID: db + "_lists_expires_keys", + Name: db, + }) + p.addDimToChart(chartDbListsInvalidKeys.ID, &module.Dim{ + ID: db + "_lists_invalid_keys", + Name: db, + }) + + p.addDimToChart(chartDbZsetsKeys.ID, &module.Dim{ + ID: db + "_zsets_keys", + Name: db, + }) + p.addDimToChart(chartDbZsetsExpiresKeys.ID, &module.Dim{ + ID: db + "_zsets_expires_keys", + Name: db, + }) + p.addDimToChart(chartDbZsetsInvalidKeys.ID, &module.Dim{ + ID: db + "_zsets_invalid_keys", + Name: db, + }) + + p.addDimToChart(chartDbSetsKeys.ID, &module.Dim{ + ID: db + "_sets_keys", + Name: db, + }) + p.addDimToChart(chartDbSetsExpiresKeys.ID, &module.Dim{ + ID: db + "_sets_expires_keys", + Name: db, + }) + p.addDimToChart(chartDbSetsInvalidKeys.ID, &module.Dim{ + ID: db + "_sets_invalid_keys", + Name: db, + }) +} + +func (p *Pika) addDimToChart(chartID string, dim *module.Dim) { + chart := p.Charts().Get(chartID) + if chart == nil { + p.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID) + return + } + if err := chart.AddDim(dim); err != nil { + p.Warning(err) + return + } + chart.MarkNotCreated() +} + +func parseProperty(prop string) (field, value string, ok bool) { + var sep byte + if strings.HasPrefix(prop, "db") { + sep = ' ' + } else { + sep = ':' + } + i := strings.IndexByte(prop, sep) + if i == -1 { + return "", "", false + } + field, value = prop[:i], prop[i+1:] + return field, value, field != "" && value != "" +} + +func collectNumericValue(ms map[string]int64, field, value string) { + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return + } + if strings.IndexByte(value, '.') == -1 { + ms[field] = int64(v) + } else { + ms[field] = int64(v * precision) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/config_schema.json b/src/go/collectors/go.d.plugin/modules/pika/config_schema.json new file mode 100644 index 00000000000000..d284faaa1162f9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/config_schema.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/pika job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/init.go b/src/go/collectors/go.d.plugin/modules/pika/init.go new file mode 100644 index 00000000000000..2ad3ae8ec57bb3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/init.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/go-redis/redis/v8" +) + +func (p Pika) validateConfig() error { + if p.Address == "" { + return errors.New("'address' not set") + } + return nil +} + +func (p Pika) initRedisClient() (*redis.Client, error) { + opts, err := redis.ParseURL(p.Address) + if err != nil { + return nil, err + } + + tlsConfig, err := tlscfg.NewTLSConfig(p.TLSConfig) + if err != nil { + return nil, err + } + + if opts.TLSConfig != nil && tlsConfig != nil { + tlsConfig.ServerName = opts.TLSConfig.ServerName + } + + opts.PoolSize = 1 + opts.TLSConfig = tlsConfig + opts.DialTimeout = p.Timeout.Duration + opts.ReadTimeout = p.Timeout.Duration + opts.WriteTimeout = p.Timeout.Duration + + return redis.NewClient(opts), nil +} + +func (p Pika) initCharts() (*module.Charts, error) { + return pikaCharts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/integrations/pika.md b/src/go/collectors/go.d.plugin/modules/pika/integrations/pika.md new file mode 100644 index 00000000000000..e19331f9f90cbd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/integrations/pika.md @@ -0,0 +1,221 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/pika/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/pika/metadata.yaml" +sidebar_label: "Pika" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Pika + + +<img src="https://netdata.cloud/img/pika.svg" width="150"/> + + +Plugin: go.d.plugin +Module: pika + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Pika servers. + +It collects information and statistics about the server executing the following commands: + +- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Pika instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pika.connections | accepted | connections | +| pika.clients | connected | clients | +| pika.memory | used | bytes | +| pika.connected_replicas | connected | replicas | +| pika.commands | processed | commands/s | +| pika.commands_calls | a dimension per command | calls/s | +| pika.database_strings_keys | a dimension per database | keys | +| pika.database_strings_expires_keys | a dimension per database | keys | +| pika.database_strings_invalid_keys | a dimension per database | keys | +| pika.database_hashes_keys | a dimension per database | keys | +| pika.database_hashes_expires_keys | a dimension per database | keys | +| pika.database_hashes_invalid_keys | a dimension per database | keys | +| pika.database_lists_keys | a dimension per database | keys | +| pika.database_lists_expires_keys | a dimension per database | keys | +| pika.database_lists_invalid_keys | a dimension per database | keys | +| pika.database_zsets_keys | a dimension per database | keys | +| pika.database_zsets_expires_keys | a dimension per database | keys | +| pika.database_zsets_invalid_keys | a dimension per database | keys | +| pika.database_sets_keys | a dimension per database | keys | +| pika.database_sets_expires_keys | a dimension per database | keys | +| pika.database_sets_invalid_keys | a dimension per database | keys | +| pika.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/pika.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/pika.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Pika server address. | redis://@localhost:9221 | yes | +| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no | +| username | Username used for authentication. | | no | +| password | Password used for authentication. | | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certificate authority that client use when verifying server certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://@localhost:9221' + +``` +</details> + +##### TCP socket with password + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://:password@127.0.0.1:9221' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://:password@127.0.0.1:9221' + + - name: remote + address: 'redis://user:password@203.0.113.0:9221' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m pika + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/pika/metadata.yaml b/src/go/collectors/go.d.plugin/modules/pika/metadata.yaml new file mode 100644 index 00000000000000..c87cd9b2718f83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/metadata.yaml @@ -0,0 +1,277 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-pika + plugin_name: go.d.plugin + module_name: pika + monitored_instance: + name: Pika + link: https://github.com/OpenAtomFoundation/pika + icon_filename: pika.svg + categories: + - data-collection.database-servers + keywords: + - pika + - databases + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Pika servers. + + It collects information and statistics about the server executing the following commands: + + - [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/pika.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Pika server address. + default_value: redis://@localhost:9221 + required: true + details: | + There are two connection types: by tcp socket and by unix socket. + + - Tcp connection: `redis://<user>:<password>@<host>:<port>/<db_number>` + - Unix connection: `unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>` + - name: timeout + description: Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. + default_value: 1 + required: false + - name: username + description: Username used for authentication. + default_value: "" + required: false + - name: password + description: Password used for authentication. + default_value: "" + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certificate authority that client use when verifying server certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + address: 'redis://@localhost:9221' + - name: TCP socket with password + description: An example configuration. + config: | + jobs: + - name: local + address: 'redis://:password@127.0.0.1:9221' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + address: 'redis://:password@127.0.0.1:9221' + + - name: remote + address: 'redis://user:password@203.0.113.0:9221' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: pika.connections + description: Connections + unit: connections + chart_type: line + dimensions: + - name: accepted + - name: pika.clients + description: Clients + unit: clients + chart_type: line + dimensions: + - name: connected + - name: pika.memory + description: Memory usage + unit: bytes + chart_type: area + dimensions: + - name: used + - name: pika.connected_replicas + description: Connected replicas + unit: replicas + chart_type: line + dimensions: + - name: connected + - name: pika.commands + description: Processed commands + unit: commands/s + chart_type: line + dimensions: + - name: processed + - name: pika.commands_calls + description: Calls per command + unit: calls/s + chart_type: stacked + dimensions: + - name: a dimension per command + - name: pika.database_strings_keys + description: Strings type keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_strings_expires_keys + description: Strings type expires keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_strings_invalid_keys + description: Strings type invalid keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_hashes_keys + description: Hashes type keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_hashes_expires_keys + description: Hashes type expires keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_hashes_invalid_keys + description: Hashes type invalid keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_lists_keys + description: Lists type keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_lists_expires_keys + description: Lists type expires keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_lists_invalid_keys + description: Lists type invalid keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_zsets_keys + description: Zsets type keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_zsets_expires_keys + description: Zsets type expires keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_zsets_invalid_keys + description: Zsets type invalid keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_sets_keys + description: Sets type keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_sets_expires_keys + description: Sets type expires keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.database_sets_invalid_keys + description: Sets invalid keys per database + unit: keys + chart_type: stacked + dimensions: + - name: a dimension per database + - name: pika.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime diff --git a/src/go/collectors/go.d.plugin/modules/pika/pika.go b/src/go/collectors/go.d.plugin/modules/pika/pika.go new file mode 100644 index 00000000000000..a14a4411362322 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/pika.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import ( + "context" + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/blang/semver/v4" + "github.com/go-redis/redis/v8" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("pika", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Pika { + return &Pika{ + Config: Config{ + Address: "redis://@localhost:9221", + Timeout: web.Duration{Duration: time.Second}, + }, + + collectedCommands: make(map[string]bool), + collectedDbs: make(map[string]bool), + } +} + +type Config struct { + Address string `yaml:"address"` + Timeout web.Duration `yaml:"timeout"` + tlscfg.TLSConfig `yaml:",inline"` +} + +type ( + Pika struct { + module.Base + Config `yaml:",inline"` + + pdb redisClient + + server string + version *semver.Version + + collectedCommands map[string]bool + collectedDbs map[string]bool + + charts *module.Charts + } + redisClient interface { + Info(ctx context.Context, section ...string) *redis.StringCmd + Close() error + } +) + +func (p *Pika) Init() bool { + err := p.validateConfig() + if err != nil { + p.Errorf("config validation: %v", err) + return false + } + + pdb, err := p.initRedisClient() + if err != nil { + p.Errorf("init redis client: %v", err) + return false + } + p.pdb = pdb + + charts, err := p.initCharts() + if err != nil { + p.Errorf("init charts: %v", err) + return false + } + p.charts = charts + + return true +} + +func (p *Pika) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Pika) Charts() *module.Charts { + return p.charts +} + +func (p *Pika) Collect() map[string]int64 { + ms, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (p *Pika) Cleanup() { + if p.pdb == nil { + return + } + err := p.pdb.Close() + if err != nil { + p.Warningf("cleanup: error on closing redis client [%s]: %v", p.Address, err) + } + p.pdb = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/pika_test.go b/src/go/collectors/go.d.plugin/modules/pika/pika_test.go new file mode 100644 index 00000000000000..a564a54ce8c42b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/pika_test.go @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pika + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/go-redis/redis/v8" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + redisInfoAll, _ = os.ReadFile("testdata/redis/info_all.txt") + v340InfoAll, _ = os.ReadFile("testdata/v3.4.0/info_all.txt") +) + +func Test_Testdata(t *testing.T) { + for name, data := range map[string][]byte{ + "redisInfoAll": redisInfoAll, + "v340InfoAll": v340InfoAll, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Pika)(nil), New()) +} + +func TestPika_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'address'": { + wantFail: true, + config: Config{Address: ""}, + }, + "fails on invalid 'address' format": { + wantFail: true, + config: Config{Address: "127.0.0.1:9221"}, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + Address: "redis://@127.0.0.1:9221", + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pika := New() + pika.Config = test.config + + if test.wantFail { + assert.False(t, pika.Init()) + } else { + assert.True(t, pika.Init()) + } + }) + } +} + +func TestPika_Check(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Pika + wantFail bool + }{ + "success on valid response v3.4.0": { + prepare: preparePikaV340, + }, + "fails on error on Info": { + wantFail: true, + prepare: preparePikaErrorOnInfo, + }, + "fails on response from not Pika instance": { + wantFail: true, + prepare: preparePikaWithRedisMetrics, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pika := test.prepare(t) + + if test.wantFail { + assert.False(t, pika.Check()) + } else { + assert.True(t, pika.Check()) + } + }) + } +} + +func TestPika_Charts(t *testing.T) { + pika := New() + require.True(t, pika.Init()) + + assert.NotNil(t, pika.Charts()) +} + +func TestPika_Cleanup(t *testing.T) { + pika := New() + assert.NotPanics(t, pika.Cleanup) + + require.True(t, pika.Init()) + m := &mockRedisClient{} + pika.pdb = m + + pika.Cleanup() + + assert.True(t, m.calledClose) +} + +func TestPika_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Pika + wantCollected map[string]int64 + }{ + "success on valid response v3.4.0": { + prepare: preparePikaV340, + wantCollected: map[string]int64{ + "cmd_INFO_calls": 1, + "cmd_SET_calls": 2, + "arch_bits": 64, + "connected_clients": 1, + "connected_slaves": 0, + "db0_hashes_expires_keys": 0, + "db0_hashes_invalid_keys": 0, + "db0_hashes_keys": 0, + "db0_lists_expires_keys": 0, + "db0_lists_invalid_keys": 0, + "db0_lists_keys": 0, + "db0_sets_expires_keys": 0, + "db0_sets_invalid_keys": 0, + "db0_sets_keys": 0, + "db0_strings_expires_keys": 0, + "db0_strings_invalid_keys": 0, + "db0_strings_keys": 0, + "db0_zsets_expires_keys": 0, + "db0_zsets_invalid_keys": 0, + "db0_zsets_keys": 0, + "instantaneous_ops_per_sec": 0, + "log_size": 4272814, + "process_id": 1, + "server_id": 1, + "sync_thread_num": 6, + "tcp_port": 9221, + "thread_num": 1, + "total_commands_processed": 3, + "total_connections_received": 3, + "uptime_in_days": 1, + "uptime_in_seconds": 1884, + "used_cpu_sys": 158200, + "used_cpu_sys_children": 30, + "used_cpu_user": 22050, + "used_cpu_user_children": 20, + "used_memory": 8198, + }, + }, + "fails on error on Info": { + prepare: preparePikaErrorOnInfo, + }, + "fails on response from not Pika instance": { + prepare: preparePikaWithRedisMetrics, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pika := test.prepare(t) + + ms := pika.Collect() + + assert.Equal(t, test.wantCollected, ms) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, pika, ms) + ensureCollectedCommandsAddedToCharts(t, pika) + ensureCollectedDbsAddedToCharts(t, pika) + } + }) + } +} + +func preparePikaV340(t *testing.T) *Pika { + pika := New() + require.True(t, pika.Init()) + pika.pdb = &mockRedisClient{ + result: v340InfoAll, + } + return pika +} + +func preparePikaErrorOnInfo(t *testing.T) *Pika { + pika := New() + require.True(t, pika.Init()) + pika.pdb = &mockRedisClient{ + errOnInfo: true, + } + return pika +} + +func preparePikaWithRedisMetrics(t *testing.T) *Pika { + pika := New() + require.True(t, pika.Init()) + pika.pdb = &mockRedisClient{ + result: redisInfoAll, + } + return pika +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pika *Pika, ms map[string]int64) { + for _, chart := range *pika.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func ensureCollectedCommandsAddedToCharts(t *testing.T, pika *Pika) { + for _, id := range []string{ + chartCommandsCalls.ID, + } { + chart := pika.Charts().Get(id) + require.NotNilf(t, chart, "'%s' chart is not in charts", id) + assert.Lenf(t, chart.Dims, len(pika.collectedCommands), + "'%s' chart unexpected number of dimensions", id) + } +} + +func ensureCollectedDbsAddedToCharts(t *testing.T, pika *Pika) { + for _, id := range []string{ + chartDbStringsKeys.ID, + chartDbStringsExpiresKeys.ID, + chartDbStringsInvalidKeys.ID, + chartDbHashesKeys.ID, + chartDbHashesExpiresKeys.ID, + chartDbHashesInvalidKeys.ID, + chartDbListsKeys.ID, + chartDbListsExpiresKeys.ID, + chartDbListsInvalidKeys.ID, + chartDbZsetsKeys.ID, + chartDbZsetsExpiresKeys.ID, + chartDbZsetsInvalidKeys.ID, + chartDbSetsKeys.ID, + chartDbSetsExpiresKeys.ID, + chartDbSetsInvalidKeys.ID, + } { + chart := pika.Charts().Get(id) + require.NotNilf(t, chart, "'%s' chart is not in charts", id) + assert.Lenf(t, chart.Dims, len(pika.collectedDbs), + "'%s' chart unexpected number of dimensions", id) + } +} + +type mockRedisClient struct { + errOnInfo bool + result []byte + calledClose bool +} + +func (m mockRedisClient) Info(_ context.Context, _ ...string) (cmd *redis.StringCmd) { + if m.errOnInfo { + cmd = redis.NewStringResult("", errors.New("error on Info")) + } else { + cmd = redis.NewStringResult(string(m.result), nil) + } + return cmd +} + +func (m *mockRedisClient) Close() error { + m.calledClose = true + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/pika/testdata/redis/info_all.txt b/src/go/collectors/go.d.plugin/modules/pika/testdata/redis/info_all.txt new file mode 100644 index 00000000000000..8ab3816203ec61 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/testdata/redis/info_all.txt @@ -0,0 +1,165 @@ +$4050 +# Server +redis_version:6.0.9 +redis_git_sha1:00000000 +redis_git_dirty:0 +redis_build_id:12c354e6793cb936 +redis_mode:standalone +os:Linux 5.4.39-linuxkit x86_64 +arch_bits:64 +multiplexing_api:epoll +atomicvar_api:atomic-builtin +gcc_version:8.3.0 +process_id:1 +run_id:5d97fd948bbf6cb68458685fc747f9f9019c3fc4 +tcp_port:6379 +uptime_in_seconds:252812 +uptime_in_days:2 +hz:10 +configured_hz:10 +lru_clock:13181377 +executable:/data/redis-server +config_file: +io_threads_active:0 + +# Clients +connected_clients:1 +client_recent_max_input_buffer:8 +client_recent_max_output_buffer:0 +blocked_clients:0 +tracking_clients:0 +clients_in_timeout_table:0 + +# Memory +used_memory:867160 +used_memory_human:846.84K +used_memory_rss:3989504 +used_memory_rss_human:3.80M +used_memory_peak:923360 +used_memory_peak_human:901.72K +used_memory_peak_perc:93.91% +used_memory_overhead:803344 +used_memory_startup:803152 +used_memory_dataset:63816 +used_memory_dataset_perc:99.70% +allocator_allocated:903408 +allocator_active:1208320 +allocator_resident:3723264 +total_system_memory:2084032512 +total_system_memory_human:1.94G +used_memory_lua:37888 +used_memory_lua_human:37.00K +used_memory_scripts:0 +used_memory_scripts_human:0B +number_of_cached_scripts:0 +maxmemory:0 +maxmemory_human:0B +maxmemory_policy:noeviction +allocator_frag_ratio:1.34 +allocator_frag_bytes:304912 +allocator_rss_ratio:3.08 +allocator_rss_bytes:2514944 +rss_overhead_ratio:1.07 +rss_overhead_bytes:266240 +mem_fragmentation_ratio:4.96 +mem_fragmentation_bytes:3185848 +mem_not_counted_for_evict:0 +mem_replication_backlog:0 +mem_clients_slaves:0 +mem_clients_normal:0 +mem_aof_buffer:0 +mem_allocator:jemalloc-5.1.0 +active_defrag_running:0 +lazyfree_pending_objects:0 + +# Persistence +loading:0 +rdb_changes_since_last_save:0 +rdb_bgsave_in_progress:0 +rdb_last_save_time:1606951667 +rdb_last_bgsave_status:ok +rdb_last_bgsave_time_sec:0 +rdb_current_bgsave_time_sec:-1 +rdb_last_cow_size:290816 +aof_enabled:0 +aof_rewrite_in_progress:0 +aof_rewrite_scheduled:0 +aof_last_rewrite_time_sec:-1 +aof_current_rewrite_time_sec:-1 +aof_last_bgrewrite_status:ok +aof_last_write_status:ok +aof_last_cow_size:0 +module_fork_in_progress:0 +module_fork_last_cow_size:0 + +# Stats +total_connections_received:87 +total_commands_processed:161 +instantaneous_ops_per_sec:0 +total_net_input_bytes:2301 +total_net_output_bytes:507187 +instantaneous_input_kbps:0.00 +instantaneous_output_kbps:0.00 +rejected_connections:0 +sync_full:0 +sync_partial_ok:0 +sync_partial_err:0 +expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +expire_cycle_cpu_milliseconds:28362 +evicted_keys:0 +keyspace_hits:2 +keyspace_misses:0 +pubsub_channels:0 +pubsub_patterns:0 +latest_fork_usec:810 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 +tracking_total_keys:0 +tracking_total_items:0 +tracking_total_prefixes:0 +unexpected_error_replies:0 +total_reads_processed:250 +total_writes_processed:163 +io_threaded_reads_processed:0 +io_threaded_writes_processed:0 + +# Replication +role:master +connected_slaves:0 +master_replid:3f0ad529c9c59a17834bde8ae85f09f77609ecb1 +master_replid2:0000000000000000000000000000000000000000 +master_repl_offset:0 +second_repl_offset:-1 +repl_backlog_active:0 +repl_backlog_size:1048576 +repl_backlog_first_byte_offset:0 +repl_backlog_histlen:0 + +# CPU +used_cpu_sys:630.829091 +used_cpu_user:188.394908 +used_cpu_sys_children:0.020626 +used_cpu_user_children:0.002731 + +# Modules + +# Commandstats +cmdstat_set:calls=3,usec=140,usec_per_call=46.67 +cmdstat_command:calls=2,usec=2182,usec_per_call=1091.00 +cmdstat_get:calls=2,usec=29,usec_per_call=14.50 +cmdstat_hmset:calls=2,usec=408,usec_per_call=204.00 +cmdstat_hello:calls=1,usec=15,usec_per_call=15.00 +cmdstat_ping:calls=19,usec=286,usec_per_call=15.05 +cmdstat_info:calls=132,usec=37296,usec_per_call=282.55 + +# Cluster +cluster_enabled:0 + +# Keyspace +db0:keys=4,expires=0,avg_ttl=0 diff --git a/src/go/collectors/go.d.plugin/modules/pika/testdata/v3.4.0/info_all.txt b/src/go/collectors/go.d.plugin/modules/pika/testdata/v3.4.0/info_all.txt new file mode 100644 index 00000000000000..ec58524ce579f3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pika/testdata/v3.4.0/info_all.txt @@ -0,0 +1,64 @@ +$1283 +# Server +pika_version:3.4.0 +pika_git_sha:bd30511bf82038c2c6531b3d84872c9825fe836a +pika_build_compile_date: Dec 1 2020 +os:Linux 5.4.39-linuxkit x86_64 +arch_bits:64 +process_id:1 +tcp_port:9221 +thread_num:1 +sync_thread_num:6 +uptime_in_seconds:1884 +uptime_in_days:1 +config_file:/pika/conf/pika.conf +server_id:1 + +# Data +db_size:645807 +db_size_human:0M +log_size:4272814 +log_size_human:4M +compression:snappy +used_memory:8198 +used_memory_human:0M +db_memtable_usage:8072 +db_tablereader_usage:126 +db_fatal:0 +db_fatal_msg:NULL + +# Clients +connected_clients:1 + +# Stats +total_connections_received:3 +instantaneous_ops_per_sec:0 +total_commands_processed:3 +is_bgsaving:No +is_scaning_keyspace:No +is_compact:No +compact_cron: +compact_interval: + +# Command_Exec_Count +INFO:1 +SET:2 + +# CPU +used_cpu_sys:158.20 +used_cpu_user:22.05 +used_cpu_sys_children:0.03 +used_cpu_user_children:0.02 + +# Replication(MASTER) +role:master +connected_slaves:0 +db0 binlog_offset=0 589,safety_purge=none + +# Keyspace +# Time:1970-01-01 08:00:00 +db0 Strings_keys=0, expires=0, invalid_keys=0 +db0 Hashes_keys=0, expires=0, invalid_keys=0 +db0 Lists_keys=0, expires=0, invalid_keys=0 +db0 Zsets_keys=0, expires=0, invalid_keys=0 +db0 Sets_keys=0, expires=0, invalid_keys=0 diff --git a/src/go/collectors/go.d.plugin/modules/ping/README.md b/src/go/collectors/go.d.plugin/modules/ping/README.md new file mode 120000 index 00000000000000..a1381e57b753ad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/README.md @@ -0,0 +1 @@ +integrations/ping.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/ping/charts.go b/src/go/collectors/go.d.plugin/modules/ping/charts.go new file mode 100644 index 00000000000000..27bcfcf82db619 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/charts.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioHostRTT = module.Priority + iota + prioHostStdDevRTT + prioHostPingPacketLoss + prioHostPingPackets +) + +var hostChartsTmpl = module.Charts{ + hostRTTChartTmpl.Copy(), + hostStdDevRTTChartTmpl.Copy(), + hostPacketLossChartTmpl.Copy(), + hostPacketsChartTmpl.Copy(), +} + +var ( + hostRTTChartTmpl = module.Chart{ + ID: "host_%s_rtt", + Title: "Ping round-trip time", + Units: "milliseconds", + Fam: "latency", + Ctx: "ping.host_rtt", + Priority: prioHostRTT, + Type: module.Area, + Dims: module.Dims{ + {ID: "host_%s_min_rtt", Name: "min", Div: 1e3}, + {ID: "host_%s_max_rtt", Name: "max", Div: 1e3}, + {ID: "host_%s_avg_rtt", Name: "avg", Div: 1e3}, + }, + } + hostStdDevRTTChartTmpl = module.Chart{ + ID: "host_%s_std_dev_rtt", + Title: "Ping round-trip time standard deviation", + Units: "milliseconds", + Fam: "latency", + Ctx: "ping.host_std_dev_rtt", + Priority: prioHostStdDevRTT, + Dims: module.Dims{ + {ID: "host_%s_std_dev_rtt", Name: "std_dev", Div: 1e3}, + }, + } +) + +var hostPacketLossChartTmpl = module.Chart{ + ID: "host_%s_packet_loss", + Title: "Ping packet loss", + Units: "percentage", + Fam: "packet loss", + Ctx: "ping.host_packet_loss", + Priority: prioHostPingPacketLoss, + Dims: module.Dims{ + {ID: "host_%s_packet_loss", Name: "loss", Div: 1000}, + }, +} + +var hostPacketsChartTmpl = module.Chart{ + ID: "host_%s_packets", + Title: "Ping packets transferred", + Units: "packets", + Fam: "packets", + Ctx: "ping.host_packets", + Priority: prioHostPingPackets, + Dims: module.Dims{ + {ID: "host_%s_packets_recv", Name: "received"}, + {ID: "host_%s_packets_sent", Name: "sent"}, + }, +} + +func newHostCharts(host string) *module.Charts { + charts := hostChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(host, ".", "_")) + chart.Labels = []module.Label{ + {Key: "host", Value: host}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, host) + } + } + + return charts +} + +func (p *Ping) addHostCharts(host string) { + charts := newHostCharts(host) + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/ping/collect.go b/src/go/collectors/go.d.plugin/modules/ping/collect.go new file mode 100644 index 00000000000000..c162a2b158f6a6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/collect.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + "fmt" + "sync" +) + +func (p *Ping) collect() (map[string]int64, error) { + mu := &sync.Mutex{} + mx := make(map[string]int64) + var wg sync.WaitGroup + + for _, v := range p.Hosts { + wg.Add(1) + go func(v string) { defer wg.Done(); p.pingHost(v, mx, mu) }(v) + } + wg.Wait() + + return mx, nil +} + +func (p *Ping) pingHost(host string, mx map[string]int64, mu *sync.Mutex) { + stats, err := p.prober.ping(host) + if err != nil { + p.Error(err) + return + } + + mu.Lock() + defer mu.Unlock() + + if !p.hosts[host] { + p.hosts[host] = true + p.addHostCharts(host) + } + + px := fmt.Sprintf("host_%s_", host) + if stats.PacketsRecv != 0 { + mx[px+"min_rtt"] = stats.MinRtt.Microseconds() + mx[px+"max_rtt"] = stats.MaxRtt.Microseconds() + mx[px+"avg_rtt"] = stats.AvgRtt.Microseconds() + mx[px+"std_dev_rtt"] = stats.StdDevRtt.Microseconds() + } + mx[px+"packets_recv"] = int64(stats.PacketsRecv) + mx[px+"packets_sent"] = int64(stats.PacketsSent) + mx[px+"packet_loss"] = int64(stats.PacketLoss * 1000) +} diff --git a/src/go/collectors/go.d.plugin/modules/ping/config_schema.json b/src/go/collectors/go.d.plugin/modules/ping/config_schema.json new file mode 100644 index 00000000000000..fe3779bf4be061 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/config_schema.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/ping job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "update_every": { + "type": "integer", + "minimum": 1 + }, + "hosts": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "network": { + "type": "string", + "enum": [ + "ip", + "ip4", + "ip6" + ] + }, + "privileged": { + "type": "boolean" + }, + "sendPackets": { + "type": "integer", + "minimum": 1 + }, + "interval": { + "type": "integer", + "minimum": 1 + }, + "interface": { + "type": "string" + } + }, + "required": [ + "name", + "hosts" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/ping/init.go b/src/go/collectors/go.d.plugin/modules/ping/init.go new file mode 100644 index 00000000000000..e71aa6c75032c7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/init.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + "errors" + "time" +) + +func (p *Ping) validateConfig() error { + if len(p.Hosts) == 0 { + return errors.New("'hosts' can't be empty") + } + if p.SendPackets <= 0 { + return errors.New("'send_packets' can't be <= 0") + } + return nil +} + +func (p *Ping) initProber() (prober, error) { + mul := 0.9 + if p.UpdateEvery > 1 { + mul = 0.95 + } + deadline := time.Millisecond * time.Duration(float64(p.UpdateEvery)*mul*1000) + if deadline.Milliseconds() == 0 { + return nil, errors.New("zero ping deadline") + } + + conf := pingProberConfig{ + privileged: p.Privileged, + packets: p.SendPackets, + iface: p.Interface, + interval: p.Interval.Duration, + deadline: deadline, + } + + return p.newProber(conf, p.Logger), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/ping/integrations/ping.md b/src/go/collectors/go.d.plugin/modules/ping/integrations/ping.md new file mode 100644 index 00000000000000..6a3bf820a48840 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/integrations/ping.md @@ -0,0 +1,236 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/ping/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/ping/metadata.yaml" +sidebar_label: "Ping" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Ping + + +<img src="https://netdata.cloud/img/globe.svg" width="150"/> + + +Plugin: go.d.plugin +Module: ping + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This module measures round-tripe time and packet loss by sending ping messages to network hosts. + +There are two operational modes: + +- privileged (send raw ICMP ping, default). Requires + CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges: + > **Note**: set automatically during Netdata installation. + + ```bash + sudo setcap CAP_NET_RAW=eip <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin + ``` + +- unprivileged (send UDP ping, Linux only). + Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): + + ```bash + sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" + ``` + To persist the change add `net.ipv4.ping_group_range="0 2147483647"` to `/etc/sysctl.conf` and + execute `sudo sysctl -p`. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per host + +These metrics refer to the remote host. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| host | remote host | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| ping.host_rtt | min, max, avg | milliseconds | +| ping.host_std_dev_rtt | std_dev | milliseconds | +| ping.host_packet_loss | loss | percentage | +| ping.host_packets | received, sent | packets | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status | +| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes | +| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/ping.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/ping.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| hosts | Network hosts. | | yes | +| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no | +| privileged | Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping. | yes | no | +| packets | Number of ping packets to send. | 5 | no | +| interval | Timeout between sending ping packets. | 100ms | no | + +</details> + +#### Examples + +##### IPv4 hosts + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: example + hosts: + - 192.0.2.0 + - 192.0.2.1 + +``` +</details> + +##### Unprivileged mode + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: example + privileged: no + hosts: + - 192.0.2.0 + - 192.0.2.1 + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Multiple instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: example1 + hosts: + - 192.0.2.0 + - 192.0.2.1 + + - name: example2 + packets: 10 + hosts: + - 192.0.2.3 + - 192.0.2.4 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m ping + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/ping/metadata.yaml b/src/go/collectors/go.d.plugin/modules/ping/metadata.yaml new file mode 100644 index 00000000000000..e446e428d31e16 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/metadata.yaml @@ -0,0 +1,193 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-ping + plugin_name: go.d.plugin + module_name: ping + monitored_instance: + name: Ping + link: "" + icon_filename: globe.svg + categories: + - data-collection.synthetic-checks + keywords: + - ping + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This module measures round-tripe time and packet loss by sending ping messages to network hosts. + + There are two operational modes: + + - privileged (send raw ICMP ping, default). Requires + CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges: + > **Note**: set automatically during Netdata installation. + + ```bash + sudo setcap CAP_NET_RAW=eip <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin + ``` + + - unprivileged (send UDP ping, Linux only). + Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html): + + ```bash + sudo sysctl -w net.ipv4.ping_group_range="0 2147483647" + ``` + To persist the change add `net.ipv4.ping_group_range="0 2147483647"` to `/etc/sysctl.conf` and + execute `sudo sysctl -p`. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/ping.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: hosts + description: Network hosts. + default_value: "" + required: true + - name: network + description: "Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6)." + default_value: "ip" + required: false + - name: privileged + description: Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping. + default_value: true + required: false + - name: packets + description: Number of ping packets to send. + default_value: 5 + required: false + - name: interval + description: Timeout between sending ping packets. + default_value: 100ms + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: IPv4 hosts + description: An example configuration. + config: | + jobs: + - name: example + hosts: + - 192.0.2.0 + - 192.0.2.1 + - name: Unprivileged mode + description: An example configuration. + config: | + jobs: + - name: example + privileged: no + hosts: + - 192.0.2.0 + - 192.0.2.1 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Multiple instances. + config: | + jobs: + - name: example1 + hosts: + - 192.0.2.0 + - 192.0.2.1 + + - name: example2 + packets: 10 + hosts: + - 192.0.2.3 + - 192.0.2.4 + troubleshooting: + problems: + list: [] + alerts: + - name: ping_host_reachable + metric: ping.host_packet_loss + info: "network host ${lab1el:host} reachability status" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf + - name: ping_packet_loss + metric: ping.host_packet_loss + info: "packet loss percentage to the network host ${label:host} over the last 10 minutes" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf + - name: ping_host_latency + metric: ping.host_rtt + info: "average latency to the network host ${label:host} over the last 10 seconds" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: host + description: These metrics refer to the remote host. + labels: + - name: host + description: remote host + metrics: + - name: ping.host_rtt + description: Ping round-trip time + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: ping.host_std_dev_rtt + description: Ping round-trip time standard deviation + unit: milliseconds + chart_type: line + dimensions: + - name: std_dev + - name: ping.host_packet_loss + description: Ping packet loss + unit: percentage + chart_type: line + dimensions: + - name: loss + - name: ping.host_packets + description: Ping packets transferred + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent diff --git a/src/go/collectors/go.d.plugin/modules/ping/ping.go b/src/go/collectors/go.d.plugin/modules/ping/ping.go new file mode 100644 index 00000000000000..7aa402985f2bea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/ping.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/logger" + "github.com/netdata/go.d.plugin/pkg/web" + + probing "github.com/prometheus-community/pro-bing" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("ping", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Ping { + return &Ping{ + Config: Config{ + Network: "ip", + Privileged: true, + SendPackets: 5, + Interval: web.Duration{Duration: time.Millisecond * 100}, + }, + + charts: &module.Charts{}, + hosts: make(map[string]bool), + newProber: newPingProber, + } +} + +type Config struct { + UpdateEvery int `yaml:"update_every"` + Hosts []string `yaml:"hosts"` + Network string `yaml:"network"` + Privileged bool `yaml:"privileged"` + SendPackets int `yaml:"packets"` + Interval web.Duration `yaml:"interval"` + Interface string `yaml:"interface"` +} + +type ( + Ping struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + hosts map[string]bool + + newProber func(pingProberConfig, *logger.Logger) prober + prober prober + } + prober interface { + ping(host string) (*probing.Statistics, error) + } +) + +func (p *Ping) Init() bool { + err := p.validateConfig() + if err != nil { + p.Errorf("config validation: %v", err) + return false + } + + pr, err := p.initProber() + if err != nil { + p.Errorf("init prober: %v", err) + return false + } + p.prober = pr + + return true +} + +func (p *Ping) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Ping) Charts() *module.Charts { + return p.charts +} + +func (p *Ping) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (p *Ping) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/ping/ping_test.go b/src/go/collectors/go.d.plugin/modules/ping/ping_test.go new file mode 100644 index 00000000000000..57958d557426c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/ping_test.go @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + "errors" + "testing" + "time" + + "github.com/netdata/go.d.plugin/logger" + + probing "github.com/prometheus-community/pro-bing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPing_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "fail with default": { + wantFail: true, + config: New().Config, + }, + "success when 'hosts' set": { + wantFail: false, + config: Config{ + SendPackets: 1, + Hosts: []string{"192.0.2.0"}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ping := New() + ping.Config = test.config + ping.UpdateEvery = 1 + + if test.wantFail { + assert.False(t, ping.Init()) + } else { + assert.True(t, ping.Init()) + } + }) + } +} + +func TestPing_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestPing_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestPing_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) *Ping + }{ + "success when ping does not return an error": { + wantFail: false, + prepare: casePingSuccess, + }, + "fail when ping returns an error": { + wantFail: true, + prepare: casePingError, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ping := test.prepare(t) + + if test.wantFail { + assert.False(t, ping.Check()) + } else { + assert.True(t, ping.Check()) + } + }) + } +} + +func TestPing_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Ping + wantMetrics map[string]int64 + wantNumCharts int + }{ + "success when ping does not return an error": { + prepare: casePingSuccess, + wantMetrics: map[string]int64{ + "host_192.0.2.1_avg_rtt": 15000, + "host_192.0.2.1_max_rtt": 20000, + "host_192.0.2.1_min_rtt": 10000, + "host_192.0.2.1_packet_loss": 0, + "host_192.0.2.1_packets_recv": 5, + "host_192.0.2.1_packets_sent": 5, + "host_192.0.2.1_std_dev_rtt": 5000, + "host_192.0.2.2_avg_rtt": 15000, + "host_192.0.2.2_max_rtt": 20000, + "host_192.0.2.2_min_rtt": 10000, + "host_192.0.2.2_packet_loss": 0, + "host_192.0.2.2_packets_recv": 5, + "host_192.0.2.2_packets_sent": 5, + "host_192.0.2.2_std_dev_rtt": 5000, + "host_example.com_avg_rtt": 15000, + "host_example.com_max_rtt": 20000, + "host_example.com_min_rtt": 10000, + "host_example.com_packet_loss": 0, + "host_example.com_packets_recv": 5, + "host_example.com_packets_sent": 5, + "host_example.com_std_dev_rtt": 5000, + }, + wantNumCharts: 3 * len(hostChartsTmpl), + }, + "fail when ping returns an error": { + prepare: casePingError, + wantMetrics: nil, + wantNumCharts: 0, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ping := test.prepare(t) + + mx := ping.Collect() + + require.Equal(t, test.wantMetrics, mx) + + if len(test.wantMetrics) > 0 { + assert.Len(t, *ping.Charts(), test.wantNumCharts) + } + }) + } +} + +func casePingSuccess(t *testing.T) *Ping { + ping := New() + ping.UpdateEvery = 1 + ping.Hosts = []string{"192.0.2.1", "192.0.2.2", "example.com"} + ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { + return &mockProber{} + } + require.True(t, ping.Init()) + return ping +} + +func casePingError(t *testing.T) *Ping { + ping := New() + ping.UpdateEvery = 1 + ping.Hosts = []string{"192.0.2.1", "192.0.2.2", "example.com"} + ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { + return &mockProber{errOnPing: true} + } + require.True(t, ping.Init()) + return ping +} + +type mockProber struct { + errOnPing bool +} + +func (m *mockProber) ping(host string) (*probing.Statistics, error) { + if m.errOnPing { + return nil, errors.New("mock.ping() error") + } + + stats := probing.Statistics{ + PacketsRecv: 5, + PacketsSent: 5, + PacketsRecvDuplicates: 0, + PacketLoss: 0, + Addr: host, + Rtts: nil, + MinRtt: time.Millisecond * 10, + MaxRtt: time.Millisecond * 20, + AvgRtt: time.Millisecond * 15, + StdDevRtt: time.Millisecond * 5, + } + + return &stats, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/ping/prober.go b/src/go/collectors/go.d.plugin/modules/ping/prober.go new file mode 100644 index 00000000000000..bd7c8cfbfde11a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/ping/prober.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package ping + +import ( + "errors" + "fmt" + "net" + "time" + + "github.com/netdata/go.d.plugin/logger" + + probing "github.com/prometheus-community/pro-bing" +) + +func newPingProber(conf pingProberConfig, log *logger.Logger) prober { + var source string + if conf.iface != "" { + if addr, err := getInterfaceIPAddress(conf.iface); err != nil { + log.Warningf("error getting interface '%s' IP address: %v", conf.iface, err) + } else { + log.Infof("interface '%s' IP address '%s', will use it as the source", conf.iface, addr) + source = addr + } + } + + return &pingProber{ + network: conf.network, + privileged: conf.privileged, + packets: conf.packets, + source: source, + interval: conf.interval, + deadline: conf.deadline, + Logger: log, + } +} + +type pingProberConfig struct { + network string + privileged bool + packets int + iface string + interval time.Duration + deadline time.Duration +} + +type pingProber struct { + *logger.Logger + + network string + privileged bool + packets int + source string + interval time.Duration + deadline time.Duration +} + +func (p *pingProber) ping(host string) (*probing.Statistics, error) { + pr := probing.New(host) + + pr.SetNetwork(p.network) + + if err := pr.Resolve(); err != nil { + return nil, fmt.Errorf("DNS lookup '%s' : %v", host, err) + } + + pr.Source = p.source + pr.RecordRtts = false + pr.Interval = p.interval + pr.Count = p.packets + pr.Timeout = p.deadline + pr.SetPrivileged(p.privileged) + pr.SetLogger(nil) + + if err := pr.Run(); err != nil { + return nil, fmt.Errorf("pinging host '%s' (ip %s): %v", pr.Addr(), pr.IPAddr(), err) + } + + stats := pr.Statistics() + + p.Debugf("ping stats for host '%s' (ip '%s'): %+v", pr.Addr(), pr.IPAddr(), stats) + + return stats, nil +} + +func getInterfaceIPAddress(ifaceName string) (ipaddr string, err error) { + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return "", err + } + + addresses, err := iface.Addrs() + if err != nil { + return "", err + } + + // FIXME: add IPv6 support + var v4Addr string + for _, addr := range addresses { + if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.To4() != nil { + v4Addr = ipnet.IP.To4().String() + break + } + } + + if v4Addr == "" { + return "", errors.New("ipv4 addresses not found") + } + + return v4Addr, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/README.md b/src/go/collectors/go.d.plugin/modules/portcheck/README.md new file mode 120000 index 00000000000000..4bee556ef6e00a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/README.md @@ -0,0 +1 @@ +integrations/tcp_endpoints.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/charts.go b/src/go/collectors/go.d.plugin/modules/portcheck/charts.go new file mode 100644 index 00000000000000..b4e9b0977a645d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/charts.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "fmt" + "strconv" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioCheckStatus = module.Priority + iota + prioCheckInStatusDuration + prioCheckLatency +) + +var chartsTmpl = module.Charts{ + checkStatusChartTmpl.Copy(), + checkInStateDurationChartTmpl.Copy(), + checkConnectionLatencyChartTmpl.Copy(), +} + +var checkStatusChartTmpl = module.Chart{ + ID: "port_%d_status", + Title: "TCP Check Status", + Units: "boolean", + Fam: "status", + Ctx: "portcheck.status", + Priority: prioCheckStatus, + Dims: module.Dims{ + {ID: "port_%d_success", Name: "success"}, + {ID: "port_%d_failed", Name: "failed"}, + {ID: "port_%d_timeout", Name: "timeout"}, + }, +} + +var checkInStateDurationChartTmpl = module.Chart{ + ID: "port_%d_current_state_duration", + Title: "Current State Duration", + Units: "seconds", + Fam: "status duration", + Ctx: "portcheck.state_duration", + Priority: prioCheckInStatusDuration, + Dims: module.Dims{ + {ID: "port_%d_current_state_duration", Name: "time"}, + }, +} + +var checkConnectionLatencyChartTmpl = module.Chart{ + ID: "port_%d_connection_latency", + Title: "TCP Connection Latency", + Units: "ms", + Fam: "latency", + Ctx: "portcheck.latency", + Priority: prioCheckLatency, + Dims: module.Dims{ + {ID: "port_%d_latency", Name: "time"}, + }, +} + +func newPortCharts(host string, port int) *module.Charts { + charts := chartsTmpl.Copy() + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "host", Value: host}, + {Key: "port", Value: strconv.Itoa(port)}, + } + chart.ID = fmt.Sprintf(chart.ID, port) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, port) + } + } + return charts +} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/collect.go b/src/go/collectors/go.d.plugin/modules/portcheck/collect.go new file mode 100644 index 00000000000000..723c105c3a9e1a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/collect.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "fmt" + "sync" + "time" +) + +type checkState string + +const ( + checkStateSuccess checkState = "success" + checkStateTimeout checkState = "timeout" + checkStateFailed checkState = "failed" +) + +func (pc *PortCheck) collect() (map[string]int64, error) { + wg := &sync.WaitGroup{} + + for _, p := range pc.ports { + wg.Add(1) + go func(p *port) { pc.checkPort(p); wg.Done() }(p) + } + wg.Wait() + + mx := make(map[string]int64) + + for _, p := range pc.ports { + mx[fmt.Sprintf("port_%d_current_state_duration", p.number)] = int64(p.inState) + mx[fmt.Sprintf("port_%d_latency", p.number)] = int64(p.latency) + mx[fmt.Sprintf("port_%d_%s", p.number, checkStateSuccess)] = 0 + mx[fmt.Sprintf("port_%d_%s", p.number, checkStateTimeout)] = 0 + mx[fmt.Sprintf("port_%d_%s", p.number, checkStateFailed)] = 0 + mx[fmt.Sprintf("port_%d_%s", p.number, p.state)] = 1 + } + + return mx, nil +} + +func (pc *PortCheck) checkPort(p *port) { + start := time.Now() + conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration) + dur := time.Since(start) + + defer func() { + if conn != nil { + _ = conn.Close() + } + }() + + if err != nil { + v, ok := err.(interface{ Timeout() bool }) + if ok && v.Timeout() { + pc.setPortState(p, checkStateTimeout) + } else { + pc.setPortState(p, checkStateFailed) + } + return + } + pc.setPortState(p, checkStateSuccess) + p.latency = durationToMs(dur) +} + +func (pc *PortCheck) setPortState(p *port, s checkState) { + if p.state != s { + p.inState = pc.UpdateEvery + p.state = s + } else { + p.inState += pc.UpdateEvery + } +} + +func durationToMs(duration time.Duration) int { + return int(duration) / (int(time.Millisecond) / int(time.Nanosecond)) +} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/config_schema.json b/src/go/collectors/go.d.plugin/modules/portcheck/config_schema.json new file mode 100644 index 00000000000000..8b9515702b32c8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/config_schema.json @@ -0,0 +1,37 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/portcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "host": { + "type": "string", + "minLength": 1 + }, + "ports": { + "type": "array", + "items": { + "type": "integer", + "minimum": 1 + }, + "minItems": 1 + }, + "timeout": { + "type": [ + "string", + "integer" + ], + "minLength": 1, + "minimum": 1, + "description": "The timeout duration, in seconds. Must be at least 1." + } + }, + "required": [ + "name", + "host", + "ports" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/init.go b/src/go/collectors/go.d.plugin/modules/portcheck/init.go new file mode 100644 index 00000000000000..d5c2ebb557bd8b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/init.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (pc *PortCheck) validateConfig() error { + if pc.Host == "" { + return errors.New("'host' parameter not set") + } + if len(pc.Ports) == 0 { + return errors.New("'ports' parameter not set") + } + return nil +} + +func (pc *PortCheck) initCharts() (*module.Charts, error) { + var charts module.Charts + + for _, port := range pc.Ports { + if err := charts.Add(*newPortCharts(pc.Host, port)...); err != nil { + return nil, err + } + } + + return &charts, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/integrations/tcp_endpoints.md b/src/go/collectors/go.d.plugin/modules/portcheck/integrations/tcp_endpoints.md new file mode 100644 index 00000000000000..f14aac41c34270 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/integrations/tcp_endpoints.md @@ -0,0 +1,217 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/portcheck/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/portcheck/metadata.yaml" +sidebar_label: "TCP Endpoints" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# TCP Endpoints + + +<img src="https://netdata.cloud/img/globe.svg" width="150"/> + + +Plugin: go.d.plugin +Module: portcheck + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors TCP services availability and response time. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per tcp endpoint + +These metrics refer to the TCP endpoint. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| host | host | +| port | port | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| portcheck.status | success, failed, timeout | boolean | +| portcheck.state_duration | time | seconds | +| portcheck.latency | time | ms | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status | +| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | +| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/portcheck.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/portcheck.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes | +| ports | Remote host ports. Must be specified in numeric format. | | yes | +| timeout | HTTP request timeout. | 2 | no | + +</details> + +#### Examples + +##### Check SSH and telnet + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: server1 + host: 127.0.0.1 + ports: + - 22 + - 23 + +``` +</details> + +##### Check webserver with IPv6 address + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: server2 + host: "[2001:DB8::1]" + ports: + - 80 + - 8080 + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Multiple instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: server1 + host: 127.0.0.1 + ports: + - 22 + - 23 + + - name: server2 + host: 203.0.113.10 + ports: + - 22 + - 23 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m portcheck + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/metadata.yaml b/src/go/collectors/go.d.plugin/modules/portcheck/metadata.yaml new file mode 100644 index 00000000000000..c0ccfde1d87e7d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/metadata.yaml @@ -0,0 +1,162 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-portcheck + plugin_name: go.d.plugin + module_name: portcheck + monitored_instance: + name: TCP Endpoints + link: "" + icon_filename: globe.svg + categories: + - data-collection.synthetic-checks + keywords: [] + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors TCP services availability and response time. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/portcheck.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: host + description: Remote host address in IPv4, IPv6 format, or DNS name. + default_value: "" + required: true + - name: ports + description: Remote host ports. Must be specified in numeric format. + default_value: "" + required: true + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Check SSH and telnet + description: An example configuration. + config: | + jobs: + - name: server1 + host: 127.0.0.1 + ports: + - 22 + - 23 + - name: Check webserver with IPv6 address + description: An example configuration. + config: | + jobs: + - name: server2 + host: "[2001:DB8::1]" + ports: + - 80 + - 8080 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Multiple instances. + config: | + jobs: + - name: server1 + host: 127.0.0.1 + ports: + - 22 + - 23 + + - name: server2 + host: 203.0.113.10 + ports: + - 22 + - 23 + troubleshooting: + problems: + list: [] + alerts: + - name: portcheck_service_reachable + metric: portcheck.status + info: "TCP host ${label:host} port ${label:port} liveness status" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf + - name: portcheck_connection_timeouts + metric: portcheck.status + info: "percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf + - name: portcheck_connection_fails + metric: portcheck.status + info: "percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: tcp endpoint + description: These metrics refer to the TCP endpoint. + labels: + - name: host + description: host + - name: port + description: port + metrics: + - name: portcheck.status + description: TCP Check Status + unit: boolean + chart_type: line + dimensions: + - name: success + - name: failed + - name: timeout + - name: portcheck.state_duration + description: Current State Duration + unit: seconds + chart_type: line + dimensions: + - name: time + - name: portcheck.latency + description: TCP Connection Latency + unit: ms + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/portcheck.go b/src/go/collectors/go.d.plugin/modules/portcheck/portcheck.go new file mode 100644 index 00000000000000..c7e2c0b9dcb630 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/portcheck.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + _ "embed" + "net" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("portcheck", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *PortCheck { + return &PortCheck{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 2}, + }, + dial: net.DialTimeout, + } +} + +type Config struct { + Host string `yaml:"host"` + Ports []int `yaml:"ports"` + Timeout web.Duration `yaml:"timeout"` +} + +type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) + +type port struct { + number int + state checkState + inState int + latency int +} + +type PortCheck struct { + module.Base + Config `yaml:",inline"` + UpdateEvery int `yaml:"update_every"` + + charts *module.Charts + dial dialFunc + ports []*port +} + +func (pc *PortCheck) Init() bool { + if err := pc.validateConfig(); err != nil { + pc.Errorf("config validation: %v", err) + return false + } + + charts, err := pc.initCharts() + if err != nil { + pc.Errorf("init charts: %v", err) + return false + } + pc.charts = charts + + for _, p := range pc.Ports { + pc.ports = append(pc.ports, &port{number: p}) + } + + pc.Debugf("using host: %s", pc.Host) + pc.Debugf("using ports: %v", pc.Ports) + pc.Debugf("using TCP connection timeout: %s", pc.Timeout) + + return true +} + +func (pc *PortCheck) Check() bool { + return true +} + +func (pc *PortCheck) Charts() *module.Charts { + return pc.charts +} + +func (pc *PortCheck) Collect() map[string]int64 { + mx, err := pc.collect() + if err != nil { + pc.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (pc *PortCheck) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/portcheck/portcheck_test.go b/src/go/collectors/go.d.plugin/modules/portcheck/portcheck_test.go new file mode 100644 index 00000000000000..2e242cbbbbe61a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/portcheck/portcheck_test.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package portcheck + +import ( + "errors" + "net" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) +} + +func TestPortCheck_Init(t *testing.T) { + job := New() + + job.Host = "127.0.0.1" + job.Ports = []int{39001, 39002} + assert.True(t, job.Init()) + assert.Len(t, job.ports, 2) +} +func TestPortCheck_InitNG(t *testing.T) { + job := New() + + assert.False(t, job.Init()) + job.Host = "127.0.0.1" + assert.False(t, job.Init()) + job.Ports = []int{39001, 39002} + assert.True(t, job.Init()) +} + +func TestPortCheck_Check(t *testing.T) { + assert.True(t, New().Check()) +} + +func TestPortCheck_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestPortCheck_Charts(t *testing.T) { + job := New() + job.Ports = []int{1, 2} + job.Host = "localhost" + require.True(t, job.Init()) + assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports)) +} + +func TestPortCheck_Collect(t *testing.T) { + job := New() + + job.Host = "127.0.0.1" + job.Ports = []int{39001, 39002} + job.UpdateEvery = 5 + job.dial = testDial(nil) + require.True(t, job.Init()) + require.True(t, job.Check()) + + copyLatency := func(dst, src map[string]int64) { + for k := range dst { + if strings.HasSuffix(k, "latency") { + dst[k] = src[k] + } + } + } + + expected := map[string]int64{ + "port_39001_current_state_duration": int64(job.UpdateEvery), + "port_39001_failed": 0, + "port_39001_latency": 0, + "port_39001_success": 1, + "port_39001_timeout": 0, + "port_39002_current_state_duration": int64(job.UpdateEvery), + "port_39002_failed": 0, + "port_39002_latency": 0, + "port_39002_success": 1, + "port_39002_timeout": 0, + } + collected := job.Collect() + copyLatency(expected, collected) + + assert.Equal(t, expected, collected) + + expected = map[string]int64{ + "port_39001_current_state_duration": int64(job.UpdateEvery) * 2, + "port_39001_failed": 0, + "port_39001_latency": 0, + "port_39001_success": 1, + "port_39001_timeout": 0, + "port_39002_current_state_duration": int64(job.UpdateEvery) * 2, + "port_39002_failed": 0, + "port_39002_latency": 0, + "port_39002_success": 1, + "port_39002_timeout": 0, + } + collected = job.Collect() + copyLatency(expected, collected) + + assert.Equal(t, expected, collected) + + job.dial = testDial(errors.New("checkStateFailed")) + + expected = map[string]int64{ + "port_39001_current_state_duration": int64(job.UpdateEvery), + "port_39001_failed": 1, + "port_39001_latency": 0, + "port_39001_success": 0, + "port_39001_timeout": 0, + "port_39002_current_state_duration": int64(job.UpdateEvery), + "port_39002_failed": 1, + "port_39002_latency": 0, + "port_39002_success": 0, + "port_39002_timeout": 0, + } + collected = job.Collect() + copyLatency(expected, collected) + + assert.Equal(t, expected, collected) + + job.dial = testDial(timeoutError{}) + + expected = map[string]int64{ + "port_39001_current_state_duration": int64(job.UpdateEvery), + "port_39001_failed": 0, + "port_39001_latency": 0, + "port_39001_success": 0, + "port_39001_timeout": 1, + "port_39002_current_state_duration": int64(job.UpdateEvery), + "port_39002_failed": 0, + "port_39002_latency": 0, + "port_39002_success": 0, + "port_39002_timeout": 1, + } + collected = job.Collect() + copyLatency(expected, collected) + + assert.Equal(t, expected, collected) +} + +func testDial(err error) dialFunc { + return func(_, _ string, _ time.Duration) (net.Conn, error) { return &net.TCPConn{}, err } +} + +type timeoutError struct{} + +func (timeoutError) Error() string { return "timeout" } +func (timeoutError) Timeout() bool { return true } diff --git a/src/go/collectors/go.d.plugin/modules/postgres/README.md b/src/go/collectors/go.d.plugin/modules/postgres/README.md new file mode 120000 index 00000000000000..73b67b984b4368 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/README.md @@ -0,0 +1 @@ +integrations/postgresql.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/charts.go b/src/go/collectors/go.d.plugin/modules/postgres/charts.go new file mode 100644 index 00000000000000..02904fcf1b080e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/charts.go @@ -0,0 +1,1400 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "fmt" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioConnectionsUtilization = module.Priority + iota + prioConnectionsUsage + prioConnectionsStateCount + prioDBConnectionsUtilization + prioDBConnectionsCount + + prioTransactionsDuration + prioDBTransactionsRatio + prioDBTransactionsRate + + prioQueriesDuration + + prioDBOpsFetchedRowsRatio + prioDBOpsReadRowsRate + prioDBOpsWriteRowsRate + prioDBTempFilesCreatedRate + prioDBTempFilesIORate + prioTableOpsRowsRate + prioTableOpsRowsHOTRatio + prioTableOpsRowsHOTRate + prioTableScansRate + prioTableScansRowsRate + + prioDBCacheIORatio + prioDBIORate + prioTableCacheIORatio + prioTableIORate + prioTableIndexCacheIORatio + prioTableIndexIORate + prioTableToastCacheIORatio + prioTableToastIORate + prioTableToastIndexCacheIORatio + prioTableToastIndexIORate + + prioDBSize + prioTableTotalSize + prioIndexSize + + prioTableBloatSizePerc + prioTableBloatSize + prioIndexBloatSizePerc + prioIndexBloatSize + + prioLocksUtilization + prioDBLocksHeldCount + prioDBLocksAwaitedCount + prioDBDeadlocksRate + + prioAutovacuumWorkersCount + prioTableAutovacuumSinceTime + prioTableVacuumSinceTime + prioTableAutoAnalyzeSinceTime + prioTableLastAnalyzeAgo + + prioCheckpointsRate + prioCheckpointsTime + prioBGWriterHaltsRate + prioBuffersIORate + prioBuffersBackendFsyncRate + prioBuffersAllocRate + prioTXIDExhaustionTowardsAutovacuumPerc + prioTXIDExhaustionPerc + prioTXIDExhaustionOldestTXIDNum + prioTableRowsDeadRatio + prioTableRowsCount + prioTableNullColumns + prioIndexUsageStatus + + prioReplicationAppWALLagSize + prioReplicationAppWALLagTime + prioReplicationSlotFilesCount + prioDBConflictsRate + prioDBConflictsReasonRate + + prioWALIORate + prioWALFilesCount + prioWALArchivingFilesCount + + prioDatabasesCount + prioCatalogRelationsCount + prioCatalogRelationsSize + + prioUptime +) + +var baseCharts = module.Charts{ + serverConnectionsUtilizationChart.Copy(), + serverConnectionsUsageChart.Copy(), + serverConnectionsStateCount.Copy(), + locksUtilization.Copy(), + checkpointsChart.Copy(), + checkpointWriteChart.Copy(), + buffersIORateChart.Copy(), + buffersAllocRateChart.Copy(), + bgWriterHaltsRateChart.Copy(), + buffersBackendFsyncRateChart.Copy(), + walIORateChart.Copy(), + autovacuumWorkersCountChart.Copy(), + txidExhaustionTowardsAutovacuumPercChart.Copy(), + txidExhaustionPercChart.Copy(), + txidExhaustionOldestTXIDNumChart.Copy(), + + catalogRelationSCountChart.Copy(), + catalogRelationsSizeChart.Copy(), + serverUptimeChart.Copy(), + databasesCountChart.Copy(), +} + +var walFilesCharts = module.Charts{ + walFilesCountChart.Copy(), + walArchivingFilesCountChart.Copy(), +} + +func (p *Postgres) addWALFilesCharts() { + charts := walFilesCharts.Copy() + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +var ( + serverConnectionsUtilizationChart = module.Chart{ + ID: "connections_utilization", + Title: "Connections utilization", + Units: "percentage", + Fam: "connections", + Ctx: "postgres.connections_utilization", + Priority: prioConnectionsUtilization, + Dims: module.Dims{ + {ID: "server_connections_utilization", Name: "used"}, + }, + } + serverConnectionsUsageChart = module.Chart{ + ID: "connections_usage", + Title: "Connections usage", + Units: "connections", + Fam: "connections", + Ctx: "postgres.connections_usage", + Priority: prioConnectionsUsage, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "server_connections_available", Name: "available"}, + {ID: "server_connections_used", Name: "used"}, + }, + } + serverConnectionsStateCount = module.Chart{ + ID: "connections_state", + Title: "Connections in each state", + Units: "connections", + Fam: "connections", + Ctx: "postgres.connections_state_count", + Priority: prioConnectionsStateCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "server_connections_state_active", Name: "active"}, + {ID: "server_connections_state_idle", Name: "idle"}, + {ID: "server_connections_state_idle_in_transaction", Name: "idle_in_transaction"}, + {ID: "server_connections_state_idle_in_transaction_aborted", Name: "idle_in_transaction_aborted"}, + {ID: "server_connections_state_fastpath_function_call", Name: "fastpath_function_call"}, + {ID: "server_connections_state_disabled", Name: "disabled"}, + }, + } + + locksUtilization = module.Chart{ + ID: "locks_utilization", + Title: "Acquired locks utilization", + Units: "percentage", + Fam: "locks", + Ctx: "postgres.locks_utilization", + Priority: prioLocksUtilization, + Dims: module.Dims{ + {ID: "locks_utilization", Name: "used"}, + }, + } + + checkpointsChart = module.Chart{ + ID: "checkpoints_rate", + Title: "Checkpoints", + Units: "checkpoints/s", + Fam: "maintenance", + Ctx: "postgres.checkpoints_rate", + Priority: prioCheckpointsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "checkpoints_timed", Name: "scheduled", Algo: module.Incremental}, + {ID: "checkpoints_req", Name: "requested", Algo: module.Incremental}, + }, + } + // TODO: should be seconds, also it is units/s when using incremental... + checkpointWriteChart = module.Chart{ + ID: "checkpoints_time", + Title: "Checkpoint time", + Units: "milliseconds", + Fam: "maintenance", + Ctx: "postgres.checkpoints_time", + Priority: prioCheckpointsTime, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "checkpoint_write_time", Name: "write", Algo: module.Incremental}, + {ID: "checkpoint_sync_time", Name: "sync", Algo: module.Incremental}, + }, + } + bgWriterHaltsRateChart = module.Chart{ + ID: "bgwriter_halts_rate", + Title: "Background writer scan halts", + Units: "halts/s", + Fam: "maintenance", + Ctx: "postgres.bgwriter_halts_rate", + Priority: prioBGWriterHaltsRate, + Dims: module.Dims{ + {ID: "maxwritten_clean", Name: "maxwritten", Algo: module.Incremental}, + }, + } + + buffersIORateChart = module.Chart{ + ID: "buffers_io_rate", + Title: "Buffers written rate", + Units: "B/s", + Fam: "maintenance", + Ctx: "postgres.buffers_io_rate", + Priority: prioBuffersIORate, + Type: module.Area, + Dims: module.Dims{ + {ID: "buffers_checkpoint", Name: "checkpoint", Algo: module.Incremental}, + {ID: "buffers_backend", Name: "backend", Algo: module.Incremental}, + {ID: "buffers_clean", Name: "bgwriter", Algo: module.Incremental}, + }, + } + buffersBackendFsyncRateChart = module.Chart{ + ID: "buffers_backend_fsync_rate", + Title: "Backend fsync calls", + Units: "calls/s", + Fam: "maintenance", + Ctx: "postgres.buffers_backend_fsync_rate", + Priority: prioBuffersBackendFsyncRate, + Dims: module.Dims{ + {ID: "buffers_backend_fsync", Name: "fsync", Algo: module.Incremental}, + }, + } + buffersAllocRateChart = module.Chart{ + ID: "buffers_alloc_rate", + Title: "Buffers allocated", + Units: "B/s", + Fam: "maintenance", + Ctx: "postgres.buffers_allocated_rate", + Priority: prioBuffersAllocRate, + Dims: module.Dims{ + {ID: "buffers_alloc", Name: "allocated", Algo: module.Incremental}, + }, + } + + walIORateChart = module.Chart{ + ID: "wal_io_rate", + Title: "Write-Ahead Log writes", + Units: "B/s", + Fam: "wal", + Ctx: "postgres.wal_io_rate", + Priority: prioWALIORate, + Dims: module.Dims{ + {ID: "wal_writes", Name: "written", Algo: module.Incremental}, + }, + } + walFilesCountChart = module.Chart{ + ID: "wal_files_count", + Title: "Write-Ahead Log files", + Units: "files", + Fam: "wal", + Ctx: "postgres.wal_files_count", + Priority: prioWALFilesCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "wal_written_files", Name: "written"}, + {ID: "wal_recycled_files", Name: "recycled"}, + }, + } + + walArchivingFilesCountChart = module.Chart{ + ID: "wal_archiving_files_count", + Title: "Write-Ahead Log archived files", + Units: "files/s", + Fam: "wal", + Ctx: "postgres.wal_archiving_files_count", + Priority: prioWALArchivingFilesCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "wal_archive_files_ready_count", Name: "ready"}, + {ID: "wal_archive_files_done_count", Name: "done"}, + }, + } + + autovacuumWorkersCountChart = module.Chart{ + ID: "autovacuum_workers_count", + Title: "Autovacuum workers", + Units: "workers", + Fam: "vacuum and analyze", + Ctx: "postgres.autovacuum_workers_count", + Priority: prioAutovacuumWorkersCount, + Dims: module.Dims{ + {ID: "autovacuum_analyze", Name: "analyze"}, + {ID: "autovacuum_vacuum_analyze", Name: "vacuum_analyze"}, + {ID: "autovacuum_vacuum", Name: "vacuum"}, + {ID: "autovacuum_vacuum_freeze", Name: "vacuum_freeze"}, + {ID: "autovacuum_brin_summarize", Name: "brin_summarize"}, + }, + } + + txidExhaustionTowardsAutovacuumPercChart = module.Chart{ + ID: "txid_exhaustion_towards_autovacuum_perc", + Title: "Percent towards emergency autovacuum", + Units: "percentage", + Fam: "maintenance", + Ctx: "postgres.txid_exhaustion_towards_autovacuum_perc", + Priority: prioTXIDExhaustionTowardsAutovacuumPerc, + Dims: module.Dims{ + {ID: "percent_towards_emergency_autovacuum", Name: "emergency_autovacuum"}, + }, + } + txidExhaustionPercChart = module.Chart{ + ID: "txid_exhaustion_perc", + Title: "Percent towards transaction ID wraparound", + Units: "percentage", + Fam: "maintenance", + Ctx: "postgres.txid_exhaustion_perc", + Priority: prioTXIDExhaustionPerc, + Dims: module.Dims{ + {ID: "percent_towards_wraparound", Name: "txid_exhaustion"}, + }, + } + txidExhaustionOldestTXIDNumChart = module.Chart{ + ID: "txid_exhaustion_oldest_txid_num", + Title: "Oldest transaction XID", + Units: "xid", + Fam: "maintenance", + Ctx: "postgres.txid_exhaustion_oldest_txid_num", + Priority: prioTXIDExhaustionOldestTXIDNum, + Dims: module.Dims{ + {ID: "oldest_current_xid", Name: "xid"}, + }, + } + + catalogRelationSCountChart = module.Chart{ + ID: "catalog_relations_count", + Title: "Relation count", + Units: "relations", + Fam: "catalog", + Ctx: "postgres.catalog_relations_count", + Priority: prioCatalogRelationsCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "catalog_relkind_r_count", Name: "ordinary_table"}, + {ID: "catalog_relkind_i_count", Name: "index"}, + {ID: "catalog_relkind_S_count", Name: "sequence"}, + {ID: "catalog_relkind_t_count", Name: "toast_table"}, + {ID: "catalog_relkind_v_count", Name: "view"}, + {ID: "catalog_relkind_m_count", Name: "materialized_view"}, + {ID: "catalog_relkind_c_count", Name: "composite_type"}, + {ID: "catalog_relkind_f_count", Name: "foreign_table"}, + {ID: "catalog_relkind_p_count", Name: "partitioned_table"}, + {ID: "catalog_relkind_I_count", Name: "partitioned_index"}, + }, + } + catalogRelationsSizeChart = module.Chart{ + ID: "catalog_relations_size", + Title: "Relation size", + Units: "B", + Fam: "catalog", + Ctx: "postgres.catalog_relations_size", + Priority: prioCatalogRelationsSize, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "catalog_relkind_r_size", Name: "ordinary_table"}, + {ID: "catalog_relkind_i_size", Name: "index"}, + {ID: "catalog_relkind_S_size", Name: "sequence"}, + {ID: "catalog_relkind_t_size", Name: "toast_table"}, + {ID: "catalog_relkind_v_size", Name: "view"}, + {ID: "catalog_relkind_m_size", Name: "materialized_view"}, + {ID: "catalog_relkind_c_size", Name: "composite_type"}, + {ID: "catalog_relkind_f_size", Name: "foreign_table"}, + {ID: "catalog_relkind_p_size", Name: "partitioned_table"}, + {ID: "catalog_relkind_I_size", Name: "partitioned_index"}, + }, + } + + serverUptimeChart = module.Chart{ + ID: "server_uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "postgres.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "server_uptime", Name: "uptime"}, + }, + } + + databasesCountChart = module.Chart{ + ID: "databases_count", + Title: "Number of databases", + Units: "databases", + Fam: "catalog", + Ctx: "postgres.databases_count", + Priority: prioDatabasesCount, + Dims: module.Dims{ + {ID: "databases_count", Name: "databases"}, + }, + } + + transactionsDurationChartTmpl = module.Chart{ + ID: "transactions_duration", + Title: "Observed transactions time", + Units: "transactions/s", + Fam: "transactions", + Ctx: "postgres.transactions_duration", + Priority: prioTransactionsDuration, + Type: module.Stacked, + } + queriesDurationChartTmpl = module.Chart{ + ID: "queries_duration", + Title: "Observed active queries time", + Units: "queries/s", + Fam: "queries", + Ctx: "postgres.queries_duration", + Priority: prioQueriesDuration, + Type: module.Stacked, + } +) + +func newRunningTimeHistogramChart(tmpl module.Chart, prefix string, buckets []float64) (*module.Chart, error) { + chart := tmpl.Copy() + + for i, v := range buckets { + dim := &module.Dim{ + ID: fmt.Sprintf("%s_hist_bucket_%d", prefix, i+1), + Name: time.Duration(v * float64(time.Second)).String(), + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + + dim := &module.Dim{ + ID: fmt.Sprintf("%s_hist_bucket_inf", prefix), + Name: "+Inf", + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + + return chart, nil +} + +func (p *Postgres) addTransactionsRunTimeHistogramChart() { + chart, err := newRunningTimeHistogramChart( + transactionsDurationChartTmpl, + "transaction_running_time", + p.XactTimeHistogram, + ) + if err != nil { + p.Warning(err) + return + } + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addQueriesRunTimeHistogramChart() { + chart, err := newRunningTimeHistogramChart( + queriesDurationChartTmpl, + "query_running_time", + p.QueryTimeHistogram, + ) + if err != nil { + p.Warning(err) + return + } + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +var ( + replicationStandbyAppCharts = module.Charts{ + replicationAppWALLagSizeChartTmpl.Copy(), + replicationAppWALLagTimeChartTmpl.Copy(), + } + replicationAppWALLagSizeChartTmpl = module.Chart{ + ID: "replication_app_%s_wal_lag_size", + Title: "Standby application WAL lag size", + Units: "B", + Fam: "replication", + Ctx: "postgres.replication_app_wal_lag_size", + Priority: prioReplicationAppWALLagSize, + Dims: module.Dims{ + {ID: "repl_standby_app_%s_wal_sent_lag_size", Name: "sent_lag"}, + {ID: "repl_standby_app_%s_wal_write_lag_size", Name: "write_lag"}, + {ID: "repl_standby_app_%s_wal_flush_lag_size", Name: "flush_lag"}, + {ID: "repl_standby_app_%s_wal_replay_lag_size", Name: "replay_lag"}, + }, + } + replicationAppWALLagTimeChartTmpl = module.Chart{ + ID: "replication_app_%s_wal_lag_time", + Title: "Standby application WAL lag time", + Units: "seconds", + Fam: "replication", + Ctx: "postgres.replication_app_wal_lag_time", + Priority: prioReplicationAppWALLagTime, + Dims: module.Dims{ + {ID: "repl_standby_app_%s_wal_write_lag_time", Name: "write_lag"}, + {ID: "repl_standby_app_%s_wal_flush_lag_time", Name: "flush_lag"}, + {ID: "repl_standby_app_%s_wal_replay_lag_time", Name: "replay_lag"}, + }, + } +) + +func newReplicationStandbyAppCharts(app string) *module.Charts { + charts := replicationStandbyAppCharts.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, app) + c.Labels = []module.Label{ + {Key: "application", Value: app}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, app) + } + } + return charts +} + +func (p *Postgres) addNewReplicationStandbyAppCharts(app string) { + charts := newReplicationStandbyAppCharts(app) + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) removeReplicationStandbyAppCharts(app string) { + prefix := fmt.Sprintf("replication_standby_app_%s_", app) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +var ( + replicationSlotCharts = module.Charts{ + replicationSlotFilesCountChartTmpl.Copy(), + } + replicationSlotFilesCountChartTmpl = module.Chart{ + ID: "replication_slot_%s_files_count", + Title: "Replication slot files", + Units: "files", + Fam: "replication", + Ctx: "postgres.replication_slot_files_count", + Priority: prioReplicationSlotFilesCount, + Dims: module.Dims{ + {ID: "repl_slot_%s_replslot_wal_keep", Name: "wal_keep"}, + {ID: "repl_slot_%s_replslot_files", Name: "pg_replslot_files"}, + }, + } +) + +func newReplicationSlotCharts(slot string) *module.Charts { + charts := replicationSlotCharts.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, slot) + c.Labels = []module.Label{ + {Key: "slot", Value: slot}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, slot) + } + } + return charts +} + +func (p *Postgres) addNewReplicationSlotCharts(slot string) { + charts := newReplicationSlotCharts(slot) + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) removeReplicationSlotCharts(slot string) { + prefix := fmt.Sprintf("replication_slot_%s_", slot) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +var ( + dbChartsTmpl = module.Charts{ + dbTransactionsRatioChartTmpl.Copy(), + dbTransactionsRateChartTmpl.Copy(), + dbConnectionsUtilizationChartTmpl.Copy(), + dbConnectionsCountChartTmpl.Copy(), + dbCacheIORatioChartTmpl.Copy(), + dbIORateChartTmpl.Copy(), + dbOpsFetchedRowsRatioChartTmpl.Copy(), + dbOpsReadRowsRateChartTmpl.Copy(), + dbOpsWriteRowsRateChartTmpl.Copy(), + dbDeadlocksRateChartTmpl.Copy(), + dbLocksHeldCountChartTmpl.Copy(), + dbLocksAwaitedCountChartTmpl.Copy(), + dbTempFilesCreatedRateChartTmpl.Copy(), + dbTempFilesIORateChartTmpl.Copy(), + dbSizeChartTmpl.Copy(), + } + dbTransactionsRatioChartTmpl = module.Chart{ + ID: "db_%s_transactions_ratio", + Title: "Database transactions ratio", + Units: "percentage", + Fam: "transactions", + Ctx: "postgres.db_transactions_ratio", + Priority: prioDBTransactionsRatio, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "db_%s_xact_commit", Name: "committed", Algo: module.PercentOfIncremental}, + {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.PercentOfIncremental}, + }, + } + dbTransactionsRateChartTmpl = module.Chart{ + ID: "db_%s_transactions_rate", + Title: "Database transactions", + Units: "transactions/s", + Fam: "transactions", + Ctx: "postgres.db_transactions_rate", + Priority: prioDBTransactionsRate, + Dims: module.Dims{ + {ID: "db_%s_xact_commit", Name: "committed", Algo: module.Incremental}, + {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.Incremental}, + }, + } + dbConnectionsUtilizationChartTmpl = module.Chart{ + ID: "db_%s_connections_utilization", + Title: "Database connections utilization", + Units: "percentage", + Fam: "connections", + Ctx: "postgres.db_connections_utilization", + Priority: prioDBConnectionsUtilization, + Dims: module.Dims{ + {ID: "db_%s_numbackends_utilization", Name: "used"}, + }, + } + dbConnectionsCountChartTmpl = module.Chart{ + ID: "db_%s_connections", + Title: "Database connections", + Units: "connections", + Fam: "connections", + Ctx: "postgres.db_connections_count", + Priority: prioDBConnectionsCount, + Dims: module.Dims{ + {ID: "db_%s_numbackends", Name: "connections"}, + }, + } + dbCacheIORatioChartTmpl = module.Chart{ + ID: "db_%s_cache_io_ratio", + Title: "Database buffer cache miss ratio", + Units: "percentage", + Fam: "cache", + Ctx: "postgres.db_cache_io_ratio", + Priority: prioDBCacheIORatio, + Dims: module.Dims{ + {ID: "db_%s_blks_read_perc", Name: "miss"}, + }, + } + dbIORateChartTmpl = module.Chart{ + ID: "db_%s_io_rate", + Title: "Database reads", + Units: "B/s", + Fam: "cache", + Ctx: "postgres.db_io_rate", + Priority: prioDBIORate, + Type: module.Area, + Dims: module.Dims{ + {ID: "db_%s_blks_hit", Name: "memory", Algo: module.Incremental}, + {ID: "db_%s_blks_read", Name: "disk", Algo: module.Incremental}, + }, + } + dbOpsFetchedRowsRatioChartTmpl = module.Chart{ + ID: "db_%s_db_ops_fetched_rows_ratio", + Title: "Database rows fetched ratio", + Units: "percentage", + Fam: "throughput", + Ctx: "postgres.db_ops_fetched_rows_ratio", + Priority: prioDBOpsFetchedRowsRatio, + Dims: module.Dims{ + {ID: "db_%s_tup_fetched_perc", Name: "fetched"}, + }, + } + dbOpsReadRowsRateChartTmpl = module.Chart{ + ID: "db_%s_ops_read_rows_rate", + Title: "Database rows read", + Units: "rows/s", + Fam: "throughput", + Ctx: "postgres.db_ops_read_rows_rate", + Priority: prioDBOpsReadRowsRate, + Dims: module.Dims{ + {ID: "db_%s_tup_returned", Name: "returned", Algo: module.Incremental}, + {ID: "db_%s_tup_fetched", Name: "fetched", Algo: module.Incremental}, + }, + } + dbOpsWriteRowsRateChartTmpl = module.Chart{ + ID: "db_%s_ops_write_rows_rate", + Title: "Database rows written", + Units: "rows/s", + Fam: "throughput", + Ctx: "postgres.db_ops_write_rows_rate", + Priority: prioDBOpsWriteRowsRate, + Dims: module.Dims{ + {ID: "db_%s_tup_inserted", Name: "inserted", Algo: module.Incremental}, + {ID: "db_%s_tup_deleted", Name: "deleted", Algo: module.Incremental}, + {ID: "db_%s_tup_updated", Name: "updated", Algo: module.Incremental}, + }, + } + dbConflictsRateChartTmpl = module.Chart{ + ID: "db_%s_conflicts_rate", + Title: "Database canceled queries", + Units: "queries/s", + Fam: "replication", + Ctx: "postgres.db_conflicts_rate", + Priority: prioDBConflictsRate, + Dims: module.Dims{ + {ID: "db_%s_conflicts", Name: "conflicts", Algo: module.Incremental}, + }, + } + dbConflictsReasonRateChartTmpl = module.Chart{ + ID: "db_%s_conflicts_reason_rate", + Title: "Database canceled queries by reason", + Units: "queries/s", + Fam: "replication", + Ctx: "postgres.db_conflicts_reason_rate", + Priority: prioDBConflictsReasonRate, + Dims: module.Dims{ + {ID: "db_%s_confl_tablespace", Name: "tablespace", Algo: module.Incremental}, + {ID: "db_%s_confl_lock", Name: "lock", Algo: module.Incremental}, + {ID: "db_%s_confl_snapshot", Name: "snapshot", Algo: module.Incremental}, + {ID: "db_%s_confl_bufferpin", Name: "bufferpin", Algo: module.Incremental}, + {ID: "db_%s_confl_deadlock", Name: "deadlock", Algo: module.Incremental}, + }, + } + dbDeadlocksRateChartTmpl = module.Chart{ + ID: "db_%s_deadlocks_rate", + Title: "Database deadlocks", + Units: "deadlocks/s", + Fam: "locks", + Ctx: "postgres.db_deadlocks_rate", + Priority: prioDBDeadlocksRate, + Dims: module.Dims{ + {ID: "db_%s_deadlocks", Name: "deadlocks", Algo: module.Incremental}, + }, + } + dbLocksHeldCountChartTmpl = module.Chart{ + ID: "db_%s_locks_held", + Title: "Database locks held", + Units: "locks", + Fam: "locks", + Ctx: "postgres.db_locks_held_count", + Priority: prioDBLocksHeldCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "db_%s_lock_mode_AccessShareLock_held", Name: "access_share"}, + {ID: "db_%s_lock_mode_RowShareLock_held", Name: "row_share"}, + {ID: "db_%s_lock_mode_RowExclusiveLock_held", Name: "row_exclusive"}, + {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_held", Name: "share_update"}, + {ID: "db_%s_lock_mode_ShareLock_held", Name: "share"}, + {ID: "db_%s_lock_mode_ShareRowExclusiveLock_held", Name: "share_row_exclusive"}, + {ID: "db_%s_lock_mode_ExclusiveLock_held", Name: "exclusive"}, + {ID: "db_%s_lock_mode_AccessExclusiveLock_held", Name: "access_exclusive"}, + }, + } + dbLocksAwaitedCountChartTmpl = module.Chart{ + ID: "db_%s_locks_awaited_count", + Title: "Database locks awaited", + Units: "locks", + Fam: "locks", + Ctx: "postgres.db_locks_awaited_count", + Priority: prioDBLocksAwaitedCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "db_%s_lock_mode_AccessShareLock_awaited", Name: "access_share"}, + {ID: "db_%s_lock_mode_RowShareLock_awaited", Name: "row_share"}, + {ID: "db_%s_lock_mode_RowExclusiveLock_awaited", Name: "row_exclusive"}, + {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_awaited", Name: "share_update"}, + {ID: "db_%s_lock_mode_ShareLock_awaited", Name: "share"}, + {ID: "db_%s_lock_mode_ShareRowExclusiveLock_awaited", Name: "share_row_exclusive"}, + {ID: "db_%s_lock_mode_ExclusiveLock_awaited", Name: "exclusive"}, + {ID: "db_%s_lock_mode_AccessExclusiveLock_awaited", Name: "access_exclusive"}, + }, + } + dbTempFilesCreatedRateChartTmpl = module.Chart{ + ID: "db_%s_temp_files_files_created_rate", + Title: "Database created temporary files", + Units: "files/s", + Fam: "throughput", + Ctx: "postgres.db_temp_files_created_rate", + Priority: prioDBTempFilesCreatedRate, + Dims: module.Dims{ + {ID: "db_%s_temp_files", Name: "created", Algo: module.Incremental}, + }, + } + dbTempFilesIORateChartTmpl = module.Chart{ + ID: "db_%s_temp_files_io_rate", + Title: "Database temporary files data written to disk", + Units: "B/s", + Fam: "throughput", + Ctx: "postgres.db_temp_files_io_rate", + Priority: prioDBTempFilesIORate, + Dims: module.Dims{ + {ID: "db_%s_temp_bytes", Name: "written", Algo: module.Incremental}, + }, + } + dbSizeChartTmpl = module.Chart{ + ID: "db_%s_size", + Title: "Database size", + Units: "B", + Fam: "size", + Ctx: "postgres.db_size", + Priority: prioDBSize, + Dims: module.Dims{ + {ID: "db_%s_size", Name: "size"}, + }, + } +) + +func (p *Postgres) addDBConflictsCharts(db *dbMetrics) { + tmpl := module.Charts{ + dbConflictsRateChartTmpl.Copy(), + dbConflictsReasonRateChartTmpl.Copy(), + } + charts := newDatabaseCharts(tmpl.Copy(), db) + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func newDatabaseCharts(tmpl *module.Charts, db *dbMetrics) *module.Charts { + charts := tmpl.Copy() + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, db.name) + c.Labels = []module.Label{ + {Key: "database", Value: db.name}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, db.name) + } + } + return charts +} + +func (p *Postgres) addNewDatabaseCharts(db *dbMetrics) { + charts := newDatabaseCharts(dbChartsTmpl.Copy(), db) + + if db.size == nil { + _ = charts.Remove(fmt.Sprintf(dbSizeChartTmpl.ID, db.name)) + } + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) removeDatabaseCharts(db *dbMetrics) { + prefix := fmt.Sprintf("db_%s_", db.name) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +var ( + tableChartsTmpl = module.Charts{ + tableRowsCountChartTmpl.Copy(), + tableDeadRowsDeadRatioChartTmpl.Copy(), + tableOpsRowsRateChartTmpl.Copy(), + tableOpsRowsHOTRatioChartTmpl.Copy(), + tableOpsRowsHOTRateChartTmpl.Copy(), + tableScansRateChartTmpl.Copy(), + tableScansRowsRateChartTmpl.Copy(), + tableNullColumnsCountChartTmpl.Copy(), + tableTotalSizeChartTmpl.Copy(), + tableBloatSizePercChartTmpl.Copy(), + tableBloatSizeChartTmpl.Copy(), + } + + tableDeadRowsDeadRatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_rows_dead_ratio", + Title: "Table dead rows", + Units: "%", + Fam: "maintenance", + Ctx: "postgres.table_rows_dead_ratio", + Priority: prioTableRowsDeadRatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_n_dead_tup_perc", Name: "dead"}, + }, + } + tableRowsCountChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_rows_count", + Title: "Table total rows", + Units: "rows", + Fam: "maintenance", + Ctx: "postgres.table_rows_count", + Priority: prioTableRowsCount, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_n_live_tup", Name: "live"}, + {ID: "table_%s_db_%s_schema_%s_n_dead_tup", Name: "dead"}, + }, + } + tableOpsRowsRateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_ops_rows_rate", + Title: "Table throughput", + Units: "rows/s", + Fam: "throughput", + Ctx: "postgres.table_ops_rows_rate", + Priority: prioTableOpsRowsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_n_tup_ins", Name: "inserted", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_n_tup_del", Name: "deleted", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_n_tup_upd", Name: "updated", Algo: module.Incremental}, + }, + } + tableOpsRowsHOTRatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_ops_rows_hot_ratio", + Title: "Table HOT updates ratio", + Units: "percentage", + Fam: "throughput", + Ctx: "postgres.table_ops_rows_hot_ratio", + Priority: prioTableOpsRowsHOTRatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd_perc", Name: "hot"}, + }, + } + tableOpsRowsHOTRateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_ops_rows_hot_rate", + Title: "Table HOT updates", + Units: "rows/s", + Fam: "throughput", + Ctx: "postgres.table_ops_rows_hot_rate", + Priority: prioTableOpsRowsHOTRate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd", Name: "hot", Algo: module.Incremental}, + }, + } + tableCacheIORatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_cache_io_ratio", + Title: "Table I/O cache miss ratio", + Units: "percentage", + Fam: "cache", + Ctx: "postgres.table_cache_io_ratio", + Priority: prioTableCacheIORatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_heap_blks_read_perc", Name: "miss"}, + }, + } + tableIORateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_io_rate", + Title: "Table I/O", + Units: "B/s", + Fam: "cache", + Ctx: "postgres.table_io_rate", + Priority: prioTableIORate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_heap_blks_hit", Name: "memory", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_heap_blks_read", Name: "disk", Algo: module.Incremental}, + }, + } + tableIndexCacheIORatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_index_cache_io_ratio", + Title: "Table index I/O cache miss ratio", + Units: "percentage", + Fam: "cache", + Ctx: "postgres.table_index_cache_io_ratio", + Priority: prioTableIndexCacheIORatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_idx_blks_read_perc", Name: "miss", Algo: module.Incremental}, + }, + } + tableIndexIORateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_index_io_rate", + Title: "Table index I/O", + Units: "B/s", + Fam: "cache", + Ctx: "postgres.table_index_io_rate", + Priority: prioTableIndexIORate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_idx_blks_hit", Name: "memory", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_idx_blks_read", Name: "disk", Algo: module.Incremental}, + }, + } + tableTOASCacheIORatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_toast_cache_io_ratio", + Title: "Table TOAST I/O cache miss ratio", + Units: "percentage", + Fam: "cache", + Ctx: "postgres.table_toast_cache_io_ratio", + Priority: prioTableToastCacheIORatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_toast_blks_read_perc", Name: "miss", Algo: module.Incremental}, + }, + } + tableTOASTIORateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_toast_io_rate", + Title: "Table TOAST I/O", + Units: "B/s", + Fam: "cache", + Ctx: "postgres.table_toast_io_rate", + Priority: prioTableToastIORate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_toast_blks_hit", Name: "memory", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_toast_blks_read", Name: "disk", Algo: module.Incremental}, + }, + } + tableTOASTIndexCacheIORatioChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_toast_index_cache_io_ratio", + Title: "Table TOAST index I/O cache miss ratio", + Units: "percentage", + Fam: "cache", + Ctx: "postgres.table_toast_index_cache_io_ratio", + Priority: prioTableToastIndexCacheIORatio, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_tidx_blks_read_perc", Name: "miss", Algo: module.Incremental}, + }, + } + tableTOASTIndexIORateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_toast_index_io_rate", + Title: "Table TOAST index I/O", + Units: "B/s", + Fam: "cache", + Ctx: "postgres.table_toast_index_io_rate", + Priority: prioTableToastIndexIORate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_tidx_blks_hit", Name: "memory", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_tidx_blks_read", Name: "disk", Algo: module.Incremental}, + }, + } + tableScansRateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_scans_rate", + Title: "Table scans", + Units: "scans/s", + Fam: "throughput", + Ctx: "postgres.table_scans_rate", + Priority: prioTableScansRate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_idx_scan", Name: "index", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_seq_scan", Name: "sequential", Algo: module.Incremental}, + }, + } + tableScansRowsRateChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_scans_rows_rate", + Title: "Table live rows fetched by scans", + Units: "rows/s", + Fam: "throughput", + Ctx: "postgres.table_scans_rows_rate", + Priority: prioTableScansRowsRate, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_idx_tup_fetch", Name: "index", Algo: module.Incremental}, + {ID: "table_%s_db_%s_schema_%s_seq_tup_read", Name: "sequential", Algo: module.Incremental}, + }, + } + tableAutoVacuumSinceTimeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_autovacuum_since_time", + Title: "Table time since last auto VACUUM", + Units: "seconds", + Fam: "vacuum and analyze", + Ctx: "postgres.table_autovacuum_since_time", + Priority: prioTableAutovacuumSinceTime, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_last_autovacuum_ago", Name: "time"}, + }, + } + tableVacuumSinceTimeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_vacuum_since_time", + Title: "Table time since last manual VACUUM", + Units: "seconds", + Fam: "vacuum and analyze", + Ctx: "postgres.table_vacuum_since_time", + Priority: prioTableVacuumSinceTime, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_last_vacuum_ago", Name: "time"}, + }, + } + tableAutoAnalyzeSinceTimeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_autoanalyze_since_time", + Title: "Table time since last auto ANALYZE", + Units: "seconds", + Fam: "vacuum and analyze", + Ctx: "postgres.table_autoanalyze_since_time", + Priority: prioTableAutoAnalyzeSinceTime, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_last_autoanalyze_ago", Name: "time"}, + }, + } + tableAnalyzeSinceTimeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_analyze_since_time", + Title: "Table time since last manual ANALYZE", + Units: "seconds", + Fam: "vacuum and analyze", + Ctx: "postgres.table_analyze_since_time", + Priority: prioTableLastAnalyzeAgo, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_last_analyze_ago", Name: "time"}, + }, + } + tableNullColumnsCountChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_null_columns_count", + Title: "Table null columns", + Units: "columns", + Fam: "maintenance", + Ctx: "postgres.table_null_columns_count", + Priority: prioTableNullColumns, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_null_columns", Name: "null"}, + }, + } + tableTotalSizeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_total_size", + Title: "Table total size", + Units: "B", + Fam: "size", + Ctx: "postgres.table_total_size", + Priority: prioTableTotalSize, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_total_size", Name: "size"}, + }, + } + tableBloatSizePercChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_bloat_size_perc", + Title: "Table bloat size percentage", + Units: "percentage", + Fam: "bloat", + Ctx: "postgres.table_bloat_size_perc", + Priority: prioTableBloatSizePerc, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"}, + }, + Vars: module.Vars{ + {ID: "table_%s_db_%s_schema_%s_total_size", Name: "table_size"}, + }, + } + tableBloatSizeChartTmpl = module.Chart{ + ID: "table_%s_db_%s_schema_%s_bloat_size", + Title: "Table bloat size", + Units: "B", + Fam: "bloat", + Ctx: "postgres.table_bloat_size", + Priority: prioTableBloatSize, + Dims: module.Dims{ + {ID: "table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"}, + }, + } +) + +func newTableCharts(tbl *tableMetrics) *module.Charts { + charts := tableChartsTmpl.Copy() + + if tbl.bloatSize == nil { + _ = charts.Remove(tableBloatSizeChartTmpl.ID) + _ = charts.Remove(tableBloatSizePercChartTmpl.ID) + } + + for i, chart := range *charts { + (*charts)[i] = newTableChart(chart, tbl) + } + + return charts +} + +func newTableChart(chart *module.Chart, tbl *tableMetrics) *module.Chart { + chart = chart.Copy() + chart.ID = fmt.Sprintf(chart.ID, tbl.name, tbl.db, tbl.schema) + chart.Labels = []module.Label{ + {Key: "database", Value: tbl.db}, + {Key: "schema", Value: tbl.schema}, + {Key: "table", Value: tbl.name}, + {Key: "parent_table", Value: tbl.parentName}, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, tbl.name, tbl.db, tbl.schema) + } + for _, v := range chart.Vars { + v.ID = fmt.Sprintf(v.ID, tbl.name, tbl.db, tbl.schema) + } + return chart +} + +func (p *Postgres) addNewTableCharts(tbl *tableMetrics) { + charts := newTableCharts(tbl) + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableLastAutoVacuumAgoChart(tbl *tableMetrics) { + chart := newTableChart(tableAutoVacuumSinceTimeChartTmpl.Copy(), tbl) + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableLastVacuumAgoChart(tbl *tableMetrics) { + chart := newTableChart(tableVacuumSinceTimeChartTmpl.Copy(), tbl) + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableLastAutoAnalyzeAgoChart(tbl *tableMetrics) { + chart := newTableChart(tableAutoAnalyzeSinceTimeChartTmpl.Copy(), tbl) + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableLastAnalyzeAgoChart(tbl *tableMetrics) { + chart := newTableChart(tableAnalyzeSinceTimeChartTmpl.Copy(), tbl) + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableIOChartsCharts(tbl *tableMetrics) { + charts := module.Charts{ + newTableChart(tableCacheIORatioChartTmpl.Copy(), tbl), + newTableChart(tableIORateChartTmpl.Copy(), tbl), + } + + if err := p.Charts().Add(charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableIndexIOCharts(tbl *tableMetrics) { + charts := module.Charts{ + newTableChart(tableIndexCacheIORatioChartTmpl.Copy(), tbl), + newTableChart(tableIndexIORateChartTmpl.Copy(), tbl), + } + + if err := p.Charts().Add(charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableTOASTIOCharts(tbl *tableMetrics) { + charts := module.Charts{ + newTableChart(tableTOASCacheIORatioChartTmpl.Copy(), tbl), + newTableChart(tableTOASTIORateChartTmpl.Copy(), tbl), + } + + if err := p.Charts().Add(charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) addTableTOASTIndexIOCharts(tbl *tableMetrics) { + charts := module.Charts{ + newTableChart(tableTOASTIndexCacheIORatioChartTmpl.Copy(), tbl), + newTableChart(tableTOASTIndexIORateChartTmpl.Copy(), tbl), + } + + if err := p.Charts().Add(charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) removeTableCharts(tbl *tableMetrics) { + prefix := fmt.Sprintf("table_%s_db_%s_schema_%s", tbl.name, tbl.db, tbl.schema) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +var ( + indexChartsTmpl = module.Charts{ + indexSizeChartTmpl.Copy(), + indexBloatSizePercChartTmpl.Copy(), + indexBloatSizeChartTmpl.Copy(), + indexUsageStatusChartTmpl.Copy(), + } + indexSizeChartTmpl = module.Chart{ + ID: "index_%s_table_%s_db_%s_schema_%s_size", + Title: "Index size", + Units: "B", + Fam: "size", + Ctx: "postgres.index_size", + Priority: prioIndexSize, + Dims: module.Dims{ + {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "size"}, + }, + } + indexBloatSizePercChartTmpl = module.Chart{ + ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc", + Title: "Index bloat size percentage", + Units: "percentage", + Fam: "bloat", + Ctx: "postgres.index_bloat_size_perc", + Priority: prioIndexBloatSizePerc, + Dims: module.Dims{ + {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"}, + }, + Vars: module.Vars{ + {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "index_size"}, + }, + } + indexBloatSizeChartTmpl = module.Chart{ + ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size", + Title: "Index bloat size", + Units: "B", + Fam: "bloat", + Ctx: "postgres.index_bloat_size", + Priority: prioIndexBloatSize, + Dims: module.Dims{ + {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"}, + }, + } + indexUsageStatusChartTmpl = module.Chart{ + ID: "index_%s_table_%s_db_%s_schema_%s_usage_status", + Title: "Index usage status", + Units: "status", + Fam: "maintenance", + Ctx: "postgres.index_usage_status", + Priority: prioIndexUsageStatus, + Dims: module.Dims{ + {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_used", Name: "used"}, + {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_unused", Name: "unused"}, + }, + } +) + +func (p *Postgres) addNewIndexCharts(idx *indexMetrics) { + charts := indexChartsTmpl.Copy() + + if idx.bloatSize == nil { + _ = charts.Remove(indexBloatSizeChartTmpl.ID) + _ = charts.Remove(indexBloatSizePercChartTmpl.ID) + } + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, idx.name, idx.table, idx.db, idx.schema) + chart.Labels = []module.Label{ + {Key: "database", Value: idx.db}, + {Key: "schema", Value: idx.schema}, + {Key: "table", Value: idx.table}, + {Key: "parent_table", Value: idx.parentTable}, + {Key: "index", Value: idx.name}, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, idx.name, idx.table, idx.db, idx.schema) + } + for _, v := range chart.Vars { + v.ID = fmt.Sprintf(v.ID, idx.name, idx.table, idx.db, idx.schema) + } + } + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Postgres) removeIndexCharts(idx *indexMetrics) { + prefix := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s", idx.name, idx.table, idx.db, idx.schema) + for _, c := range *p.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/collect.go b/src/go/collectors/go.d.plugin/modules/postgres/collect.go new file mode 100644 index 00000000000000..f66e956a3f5298 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/collect.go @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" +) + +const ( + pgVersion94 = 9_04_00 + pgVersion10 = 10_00_00 + pgVersion11 = 11_00_00 +) + +func (p *Postgres) collect() (map[string]int64, error) { + if p.db == nil { + db, err := p.openPrimaryConnection() + if err != nil { + return nil, err + } + p.db = db + } + + if p.pgVersion == 0 { + ver, err := p.doQueryServerVersion() + if err != nil { + return nil, fmt.Errorf("querying server version error: %v", err) + } + p.pgVersion = ver + p.Debugf("connected to PostgreSQL v%d", p.pgVersion) + } + + if p.superUser == nil { + v, err := p.doQueryIsSuperUser() + if err != nil { + return nil, fmt.Errorf("querying is super user error: %v", err) + } + p.superUser = &v + p.Debugf("connected as super user: %v", *p.superUser) + } + + if p.pgIsInRecovery == nil { + v, err := p.doQueryPGIsInRecovery() + if err != nil { + return nil, fmt.Errorf("querying recovery status error: %v", err) + } + p.pgIsInRecovery = &v + p.Debugf("the instance is in recovery mode: %v", *p.pgIsInRecovery) + } + + now := time.Now() + + if now.Sub(p.recheckSettingsTime) > p.recheckSettingsEvery { + p.recheckSettingsTime = now + maxConn, err := p.doQuerySettingsMaxConnections() + if err != nil { + return nil, fmt.Errorf("querying settings max connections error: %v", err) + } + p.mx.maxConnections = maxConn + + maxLocks, err := p.doQuerySettingsMaxLocksHeld() + if err != nil { + return nil, fmt.Errorf("querying settings max locks held error: %v", err) + } + p.mx.maxLocksHeld = maxLocks + } + + p.resetMetrics() + + if p.pgVersion >= pgVersion10 { + // need 'backend_type' in pg_stat_activity + p.addXactQueryRunningTimeChartsOnce.Do(func() { + p.addTransactionsRunTimeHistogramChart() + p.addQueriesRunTimeHistogramChart() + }) + } + if p.isSuperUser() { + p.addWALFilesChartsOnce.Do(p.addWALFilesCharts) + } + + if err := p.doQueryGlobalMetrics(); err != nil { + return nil, err + } + if err := p.doQueryReplicationMetrics(); err != nil { + return nil, err + } + if err := p.doQueryDatabasesMetrics(); err != nil { + return nil, err + } + if p.dbSr != nil { + if err := p.doQueryQueryableDatabases(); err != nil { + return nil, err + } + } + if err := p.doQueryTablesMetrics(); err != nil { + return nil, err + } + if err := p.doQueryIndexesMetrics(); err != nil { + return nil, err + } + + if now.Sub(p.doSlowTime) > p.doSlowEvery { + p.doSlowTime = now + if err := p.doQueryBloat(); err != nil { + return nil, err + } + if err := p.doQueryColumns(); err != nil { + return nil, err + } + } + + mx := make(map[string]int64) + p.collectMetrics(mx) + + return mx, nil +} + +func (p *Postgres) openPrimaryConnection() (*sql.DB, error) { + db, err := sql.Open("pgx", p.DSN) + if err != nil { + return nil, fmt.Errorf("error on opening a connection with the Postgres database [%s]: %v", p.DSN, err) + } + + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(10 * time.Minute) + + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + _ = db.Close() + return nil, fmt.Errorf("error on pinging the Postgres database [%s]: %v", p.DSN, err) + } + + return db, nil +} + +func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, error) { + cfg, err := pgx.ParseConfig(p.DSN) + if err != nil { + return nil, "", fmt.Errorf("error on parsing DSN [%s]: %v", p.DSN, err) + } + + cfg.Database = dbname + connStr := stdlib.RegisterConnConfig(cfg) + + db, err := sql.Open("pgx", connStr) + if err != nil { + stdlib.UnregisterConnConfig(connStr) + return nil, "", fmt.Errorf("error on opening a secondary connection with the Postgres database [%s]: %v", dbname, err) + } + + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(10 * time.Minute) + + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + stdlib.UnregisterConnConfig(connStr) + _ = db.Close() + return nil, "", fmt.Errorf("error on pinging the secondary Postgres database [%s]: %v", dbname, err) + } + + return db, connStr, nil +} + +func (p *Postgres) isSuperUser() bool { return p.superUser != nil && *p.superUser } + +func (p *Postgres) isPGInRecovery() bool { return p.pgIsInRecovery != nil && *p.pgIsInRecovery } + +func (p *Postgres) getDBMetrics(name string) *dbMetrics { + db, ok := p.mx.dbs[name] + if !ok { + db = &dbMetrics{name: name} + p.mx.dbs[name] = db + } + return db +} + +func (p *Postgres) getTableMetrics(name, db, schema string) *tableMetrics { + key := name + "_" + db + "_" + schema + m, ok := p.mx.tables[key] + if !ok { + m = &tableMetrics{db: db, schema: schema, name: name} + p.mx.tables[key] = m + } + return m +} + +func (p *Postgres) hasTableMetrics(name, db, schema string) bool { + key := name + "_" + db + "_" + schema + _, ok := p.mx.tables[key] + return ok +} + +func (p *Postgres) getIndexMetrics(name, table, db, schema string) *indexMetrics { + key := name + "_" + table + "_" + db + "_" + schema + m, ok := p.mx.indexes[key] + if !ok { + m = &indexMetrics{name: name, db: db, schema: schema, table: table} + p.mx.indexes[key] = m + } + return m +} + +func (p *Postgres) hasIndexMetrics(name, table, db, schema string) bool { + key := name + "_" + table + "_" + db + "_" + schema + _, ok := p.mx.indexes[key] + return ok +} + +func (p *Postgres) getReplAppMetrics(name string) *replStandbyAppMetrics { + app, ok := p.mx.replApps[name] + if !ok { + app = &replStandbyAppMetrics{name: name} + p.mx.replApps[name] = app + } + return app +} + +func (p *Postgres) getReplSlotMetrics(name string) *replSlotMetrics { + slot, ok := p.mx.replSlots[name] + if !ok { + slot = &replSlotMetrics{name: name} + p.mx.replSlots[name] = slot + } + return slot +} + +func parseInt(s string) int64 { + v, _ := strconv.ParseInt(s, 10, 64) + return v +} + +func parseFloat(s string) int64 { + v, _ := strconv.ParseFloat(s, 64) + return int64(v) +} + +func newInt(v int64) *int64 { + return &v +} + +func calcPercentage(value, total int64) (v int64) { + if total == 0 { + return 0 + } + if v = value * 100 / total; v < 0 { + v = -v + } + return v +} + +func calcDeltaPercentage(a, b incDelta) int64 { + return calcPercentage(a.delta(), a.delta()+b.delta()) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go b/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go new file mode 100644 index 00000000000000..84f9abbc7b8524 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import "fmt" + +func (p *Postgres) collectMetrics(mx map[string]int64) { + mx["server_connections_used"] = p.mx.connUsed + if p.mx.maxConnections > 0 { + mx["server_connections_available"] = p.mx.maxConnections - p.mx.connUsed + mx["server_connections_utilization"] = calcPercentage(p.mx.connUsed, p.mx.maxConnections) + } + p.mx.xactTimeHist.WriteTo(mx, "transaction_running_time_hist", 1, 1) + p.mx.queryTimeHist.WriteTo(mx, "query_running_time_hist", 1, 1) + mx["server_uptime"] = p.mx.uptime + mx["server_connections_state_active"] = p.mx.connStateActive + mx["server_connections_state_idle"] = p.mx.connStateIdle + mx["server_connections_state_idle_in_transaction"] = p.mx.connStateIdleInTrans + mx["server_connections_state_idle_in_transaction_aborted"] = p.mx.connStateIdleInTransAborted + mx["server_connections_state_fastpath_function_call"] = p.mx.connStateFastpathFunctionCall + mx["server_connections_state_disabled"] = p.mx.connStateDisabled + mx["checkpoints_timed"] = p.mx.checkpointsTimed + mx["checkpoints_req"] = p.mx.checkpointsReq + mx["checkpoint_write_time"] = p.mx.checkpointWriteTime + mx["checkpoint_sync_time"] = p.mx.checkpointSyncTime + mx["buffers_checkpoint"] = p.mx.buffersCheckpoint + mx["buffers_clean"] = p.mx.buffersClean + mx["maxwritten_clean"] = p.mx.maxwrittenClean + mx["buffers_backend"] = p.mx.buffersBackend + mx["buffers_backend_fsync"] = p.mx.buffersBackendFsync + mx["buffers_alloc"] = p.mx.buffersAlloc + mx["oldest_current_xid"] = p.mx.oldestXID + mx["percent_towards_wraparound"] = p.mx.percentTowardsWraparound + mx["percent_towards_emergency_autovacuum"] = p.mx.percentTowardsEmergencyAutovacuum + mx["wal_writes"] = p.mx.walWrites + mx["wal_recycled_files"] = p.mx.walRecycledFiles + mx["wal_written_files"] = p.mx.walWrittenFiles + mx["wal_archive_files_ready_count"] = p.mx.walArchiveFilesReady + mx["wal_archive_files_done_count"] = p.mx.walArchiveFilesDone + mx["catalog_relkind_r_count"] = p.mx.relkindOrdinaryTable + mx["catalog_relkind_i_count"] = p.mx.relkindIndex + mx["catalog_relkind_S_count"] = p.mx.relkindSequence + mx["catalog_relkind_t_count"] = p.mx.relkindTOASTTable + mx["catalog_relkind_v_count"] = p.mx.relkindView + mx["catalog_relkind_m_count"] = p.mx.relkindMatView + mx["catalog_relkind_c_count"] = p.mx.relkindCompositeType + mx["catalog_relkind_f_count"] = p.mx.relkindForeignTable + mx["catalog_relkind_p_count"] = p.mx.relkindPartitionedTable + mx["catalog_relkind_I_count"] = p.mx.relkindPartitionedIndex + mx["catalog_relkind_r_size"] = p.mx.relkindOrdinaryTableSize + mx["catalog_relkind_i_size"] = p.mx.relkindIndexSize + mx["catalog_relkind_S_size"] = p.mx.relkindSequenceSize + mx["catalog_relkind_t_size"] = p.mx.relkindTOASTTableSize + mx["catalog_relkind_v_size"] = p.mx.relkindViewSize + mx["catalog_relkind_m_size"] = p.mx.relkindMatViewSize + mx["catalog_relkind_c_size"] = p.mx.relkindCompositeTypeSize + mx["catalog_relkind_f_size"] = p.mx.relkindForeignTableSize + mx["catalog_relkind_p_size"] = p.mx.relkindPartitionedTableSize + mx["catalog_relkind_I_size"] = p.mx.relkindPartitionedIndexSize + mx["autovacuum_analyze"] = p.mx.autovacuumWorkersAnalyze + mx["autovacuum_vacuum_analyze"] = p.mx.autovacuumWorkersVacuumAnalyze + mx["autovacuum_vacuum"] = p.mx.autovacuumWorkersVacuum + mx["autovacuum_vacuum_freeze"] = p.mx.autovacuumWorkersVacuumFreeze + mx["autovacuum_brin_summarize"] = p.mx.autovacuumWorkersBrinSummarize + + var locksHeld int64 + for name, m := range p.mx.dbs { + if !m.updated { + delete(p.mx.dbs, name) + p.removeDatabaseCharts(m) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addNewDatabaseCharts(m) + if p.isPGInRecovery() { + p.addDBConflictsCharts(m) + } + } + px := "db_" + m.name + "_" + mx[px+"numbackends"] = m.numBackends + if m.datConnLimit <= 0 { + mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, p.mx.maxConnections) + } else { + mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, m.datConnLimit) + } + mx[px+"xact_commit"] = m.xactCommit + mx[px+"xact_rollback"] = m.xactRollback + mx[px+"blks_read"] = m.blksRead.last + mx[px+"blks_hit"] = m.blksHit.last + mx[px+"blks_read_perc"] = calcDeltaPercentage(m.blksRead, m.blksHit) + m.blksRead.prev, m.blksHit.prev = m.blksRead.last, m.blksHit.last + mx[px+"tup_returned"] = m.tupReturned.last + mx[px+"tup_fetched"] = m.tupFetched.last + mx[px+"tup_fetched_perc"] = calcPercentage(m.tupFetched.delta(), m.tupReturned.delta()) + m.tupReturned.prev, m.tupFetched.prev = m.tupReturned.last, m.tupFetched.last + mx[px+"tup_inserted"] = m.tupInserted + mx[px+"tup_updated"] = m.tupUpdated + mx[px+"tup_deleted"] = m.tupDeleted + mx[px+"conflicts"] = m.conflicts + if m.size != nil { + mx[px+"size"] = *m.size + } + mx[px+"temp_files"] = m.tempFiles + mx[px+"temp_bytes"] = m.tempBytes + mx[px+"deadlocks"] = m.deadlocks + mx[px+"confl_tablespace"] = m.conflTablespace + mx[px+"confl_lock"] = m.conflLock + mx[px+"confl_snapshot"] = m.conflSnapshot + mx[px+"confl_bufferpin"] = m.conflBufferpin + mx[px+"confl_deadlock"] = m.conflDeadlock + mx[px+"lock_mode_AccessShareLock_held"] = m.accessShareLockHeld + mx[px+"lock_mode_RowShareLock_held"] = m.rowShareLockHeld + mx[px+"lock_mode_RowExclusiveLock_held"] = m.rowExclusiveLockHeld + mx[px+"lock_mode_ShareUpdateExclusiveLock_held"] = m.shareUpdateExclusiveLockHeld + mx[px+"lock_mode_ShareLock_held"] = m.shareLockHeld + mx[px+"lock_mode_ShareRowExclusiveLock_held"] = m.shareRowExclusiveLockHeld + mx[px+"lock_mode_ExclusiveLock_held"] = m.exclusiveLockHeld + mx[px+"lock_mode_AccessExclusiveLock_held"] = m.accessExclusiveLockHeld + mx[px+"lock_mode_AccessShareLock_awaited"] = m.accessShareLockAwaited + mx[px+"lock_mode_RowShareLock_awaited"] = m.rowShareLockAwaited + mx[px+"lock_mode_RowExclusiveLock_awaited"] = m.rowExclusiveLockAwaited + mx[px+"lock_mode_ShareUpdateExclusiveLock_awaited"] = m.shareUpdateExclusiveLockAwaited + mx[px+"lock_mode_ShareLock_awaited"] = m.shareLockAwaited + mx[px+"lock_mode_ShareRowExclusiveLock_awaited"] = m.shareRowExclusiveLockAwaited + mx[px+"lock_mode_ExclusiveLock_awaited"] = m.exclusiveLockAwaited + mx[px+"lock_mode_AccessExclusiveLock_awaited"] = m.accessExclusiveLockAwaited + locksHeld += m.accessShareLockHeld + m.rowShareLockHeld + + m.rowExclusiveLockHeld + m.shareUpdateExclusiveLockHeld + + m.shareLockHeld + m.shareRowExclusiveLockHeld + + m.exclusiveLockHeld + m.accessExclusiveLockHeld + } + mx["databases_count"] = int64(len(p.mx.dbs)) + mx["locks_utilization"] = calcPercentage(locksHeld, p.mx.maxLocksHeld) + + for name, m := range p.mx.tables { + if !m.updated { + delete(p.mx.tables, name) + p.removeTableCharts(m) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addNewTableCharts(m) + } + if !m.hasLastAutoVacuumChart && m.lastAutoVacuumAgo > 0 { + m.hasLastAutoVacuumChart = true + p.addTableLastAutoVacuumAgoChart(m) + } + if !m.hasLastVacuumChart && m.lastVacuumAgo > 0 { + m.hasLastVacuumChart = true + p.addTableLastVacuumAgoChart(m) + } + if !m.hasLastAutoAnalyzeChart && m.lastAutoAnalyzeAgo > 0 { + m.hasLastAutoAnalyzeChart = true + p.addTableLastAutoAnalyzeAgoChart(m) + } + if !m.hasLastAnalyzeChart && m.lastAnalyzeAgo > 0 { + m.hasLastAnalyzeChart = true + p.addTableLastAnalyzeAgoChart(m) + } + if !m.hasTableIOCharts && m.heapBlksRead.last != -1 { + m.hasTableIOCharts = true + p.addTableIOChartsCharts(m) + } + if !m.hasTableIdxIOCharts && m.idxBlksRead.last != -1 { + m.hasTableIdxIOCharts = true + p.addTableIndexIOCharts(m) + } + if !m.hasTableTOASTIOCharts && m.toastBlksRead.last != -1 { + m.hasTableTOASTIOCharts = true + p.addTableTOASTIOCharts(m) + } + if !m.hasTableTOASTIdxIOCharts && m.tidxBlksRead.last != -1 { + m.hasTableTOASTIdxIOCharts = true + p.addTableTOASTIndexIOCharts(m) + } + + px := fmt.Sprintf("table_%s_db_%s_schema_%s_", m.name, m.db, m.schema) + + mx[px+"seq_scan"] = m.seqScan + mx[px+"seq_tup_read"] = m.seqTupRead + mx[px+"idx_scan"] = m.idxScan + mx[px+"idx_tup_fetch"] = m.idxTupFetch + mx[px+"n_live_tup"] = m.nLiveTup + mx[px+"n_dead_tup"] = m.nDeadTup + mx[px+"n_dead_tup_perc"] = calcPercentage(m.nDeadTup, m.nDeadTup+m.nLiveTup) + mx[px+"n_tup_ins"] = m.nTupIns + mx[px+"n_tup_upd"] = m.nTupUpd.last + mx[px+"n_tup_del"] = m.nTupDel + mx[px+"n_tup_hot_upd"] = m.nTupHotUpd.last + if m.lastAutoVacuumAgo != -1 { + mx[px+"last_autovacuum_ago"] = m.lastAutoVacuumAgo + } + if m.lastVacuumAgo != -1 { + mx[px+"last_vacuum_ago"] = m.lastVacuumAgo + } + if m.lastAutoAnalyzeAgo != -1 { + mx[px+"last_autoanalyze_ago"] = m.lastAutoAnalyzeAgo + } + if m.lastAnalyzeAgo != -1 { + mx[px+"last_analyze_ago"] = m.lastAnalyzeAgo + } + mx[px+"total_size"] = m.totalSize + if m.bloatSize != nil && m.bloatSizePerc != nil { + mx[px+"bloat_size"] = *m.bloatSize + mx[px+"bloat_size_perc"] = *m.bloatSizePerc + } + if m.nullColumns != nil { + mx[px+"null_columns"] = *m.nullColumns + } + + mx[px+"n_tup_hot_upd_perc"] = calcPercentage(m.nTupHotUpd.delta(), m.nTupUpd.delta()) + m.nTupHotUpd.prev, m.nTupUpd.prev = m.nTupHotUpd.last, m.nTupUpd.last + + mx[px+"heap_blks_read"] = m.heapBlksRead.last + mx[px+"heap_blks_hit"] = m.heapBlksHit.last + mx[px+"heap_blks_read_perc"] = calcDeltaPercentage(m.heapBlksRead, m.heapBlksHit) + m.heapBlksHit.prev, m.heapBlksRead.prev = m.heapBlksHit.last, m.heapBlksRead.last + + mx[px+"idx_blks_read"] = m.idxBlksRead.last + mx[px+"idx_blks_hit"] = m.idxBlksHit.last + mx[px+"idx_blks_read_perc"] = calcDeltaPercentage(m.idxBlksRead, m.idxBlksHit) + m.idxBlksHit.prev, m.idxBlksRead.prev = m.idxBlksHit.last, m.idxBlksRead.last + + mx[px+"toast_blks_read"] = m.toastBlksRead.last + mx[px+"toast_blks_hit"] = m.toastBlksHit.last + mx[px+"toast_blks_read_perc"] = calcDeltaPercentage(m.toastBlksRead, m.toastBlksHit) + m.toastBlksHit.prev, m.toastBlksRead.prev = m.toastBlksHit.last, m.toastBlksRead.last + + mx[px+"tidx_blks_read"] = m.tidxBlksRead.last + mx[px+"tidx_blks_hit"] = m.tidxBlksHit.last + mx[px+"tidx_blks_read_perc"] = calcDeltaPercentage(m.tidxBlksRead, m.tidxBlksHit) + m.tidxBlksHit.prev, m.tidxBlksRead.prev = m.tidxBlksHit.last, m.tidxBlksRead.last + } + + for name, m := range p.mx.indexes { + if !m.updated { + delete(p.mx.indexes, name) + p.removeIndexCharts(m) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addNewIndexCharts(m) + } + + px := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s_", m.name, m.table, m.db, m.schema) + mx[px+"size"] = m.size + if m.bloatSize != nil && m.bloatSizePerc != nil { + mx[px+"bloat_size"] = *m.bloatSize + mx[px+"bloat_size_perc"] = *m.bloatSizePerc + } + if m.idxScan+m.idxTupRead+m.idxTupFetch > 0 { + mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 1, 0 + } else { + mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 0, 1 + } + } + + for name, m := range p.mx.replApps { + if !m.updated { + delete(p.mx.replApps, name) + p.removeReplicationStandbyAppCharts(name) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addNewReplicationStandbyAppCharts(name) + } + px := "repl_standby_app_" + m.name + "_wal_" + mx[px+"sent_lag_size"] = m.walSentDelta + mx[px+"write_lag_size"] = m.walWriteDelta + mx[px+"flush_lag_size"] = m.walFlushDelta + mx[px+"replay_lag_size"] = m.walReplayDelta + mx[px+"write_time"] = m.walWriteLag + mx[px+"flush_lag_time"] = m.walFlushLag + mx[px+"replay_lag_time"] = m.walReplayLag + } + + for name, m := range p.mx.replSlots { + if !m.updated { + delete(p.mx.replSlots, name) + p.removeReplicationSlotCharts(name) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addNewReplicationSlotCharts(name) + } + px := "repl_slot_" + m.name + "_" + mx[px+"replslot_wal_keep"] = m.walKeep + mx[px+"replslot_files"] = m.files + } +} + +func (p *Postgres) resetMetrics() { + p.mx.srvMetrics = srvMetrics{ + xactTimeHist: p.mx.xactTimeHist, + queryTimeHist: p.mx.queryTimeHist, + maxConnections: p.mx.maxConnections, + maxLocksHeld: p.mx.maxLocksHeld, + } + for name, m := range p.mx.dbs { + p.mx.dbs[name] = &dbMetrics{ + name: m.name, + hasCharts: m.hasCharts, + blksRead: incDelta{prev: m.blksRead.prev}, + blksHit: incDelta{prev: m.blksHit.prev}, + tupReturned: incDelta{prev: m.tupReturned.prev}, + tupFetched: incDelta{prev: m.tupFetched.prev}, + } + } + for name, m := range p.mx.tables { + p.mx.tables[name] = &tableMetrics{ + db: m.db, + schema: m.schema, + name: m.name, + hasCharts: m.hasCharts, + hasLastAutoVacuumChart: m.hasLastAutoVacuumChart, + hasLastVacuumChart: m.hasLastVacuumChart, + hasLastAutoAnalyzeChart: m.hasLastAutoAnalyzeChart, + hasLastAnalyzeChart: m.hasLastAnalyzeChart, + hasTableIOCharts: m.hasTableIOCharts, + hasTableIdxIOCharts: m.hasTableIdxIOCharts, + hasTableTOASTIOCharts: m.hasTableTOASTIOCharts, + hasTableTOASTIdxIOCharts: m.hasTableTOASTIdxIOCharts, + nTupUpd: incDelta{prev: m.nTupUpd.prev}, + nTupHotUpd: incDelta{prev: m.nTupHotUpd.prev}, + heapBlksRead: incDelta{prev: m.heapBlksRead.prev}, + heapBlksHit: incDelta{prev: m.heapBlksHit.prev}, + idxBlksRead: incDelta{prev: m.idxBlksRead.prev}, + idxBlksHit: incDelta{prev: m.idxBlksHit.prev}, + toastBlksRead: incDelta{prev: m.toastBlksRead.prev}, + toastBlksHit: incDelta{prev: m.toastBlksHit.prev}, + tidxBlksRead: incDelta{prev: m.tidxBlksRead.prev}, + tidxBlksHit: incDelta{prev: m.tidxBlksHit.prev}, + bloatSize: m.bloatSize, + bloatSizePerc: m.bloatSizePerc, + nullColumns: m.nullColumns, + } + } + for name, m := range p.mx.indexes { + p.mx.indexes[name] = &indexMetrics{ + name: m.name, + db: m.db, + schema: m.schema, + table: m.table, + updated: m.updated, + hasCharts: m.hasCharts, + bloatSize: m.bloatSize, + bloatSizePerc: m.bloatSizePerc, + } + } + for name, m := range p.mx.replApps { + p.mx.replApps[name] = &replStandbyAppMetrics{ + name: m.name, + hasCharts: m.hasCharts, + } + } + for name, m := range p.mx.replSlots { + p.mx.replSlots[name] = &replSlotMetrics{ + name: m.name, + hasCharts: m.hasCharts, + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json b/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json new file mode 100644 index 00000000000000..98a8616b725f34 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json @@ -0,0 +1,44 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/postgres job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_databases_matching": { + "type": "string" + }, + "transaction_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "query_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "max_db_tables": { + "type": "integer" + }, + "max_db_indexes": { + "type": "integer" + } + }, + "required": [ + "name", + "dsn" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query.go new file mode 100644 index 00000000000000..ea134ec5f3614b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "context" + "database/sql" +) + +func (p *Postgres) doQueryRow(query string, v any) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + return p.db.QueryRowContext(ctx, query).Scan(v) +} + +func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + return db.QueryRowContext(ctx, query).Scan(v) +} + +func (p *Postgres) doQuery(query string, assign func(column, value string, rowEnd bool)) error { + return p.doDBQuery(p.db, query, assign) +} + +func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + rows, err := db.QueryContext(ctx, query) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() + + return readRows(rows, assign) +} + +func readRows(rows *sql.Rows, assign func(column, value string, rowEnd bool)) error { + if assign == nil { + return nil + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + values := makeValues(len(columns)) + + for rows.Next() { + if err := rows.Scan(values...); err != nil { + return err + } + for i, l := 0, len(values); i < l; i++ { + assign(columns[i], valueToString(values[i]), i == l-1) + } + } + return rows.Err() +} + +func valueToString(value any) string { + v, ok := value.(*sql.NullString) + if !ok || !v.Valid { + return "" + } + return v.String +} + +func makeValues(size int) []any { + vs := make([]any, size) + for i := range vs { + vs[i] = &sql.NullString{} + } + return vs +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go new file mode 100644 index 00000000000000..8cfb1c328ff4b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import "database/sql" + +func (p *Postgres) doQueryBloat() error { + if err := p.doDBQueryBloat(p.db); err != nil { + p.Warning(err) + } + for _, conn := range p.dbConns { + if conn.db == nil { + continue + } + if err := p.doDBQueryBloat(conn.db); err != nil { + p.Warning(err) + } + } + return nil +} + +func (p *Postgres) doDBQueryBloat(db *sql.DB) error { + q := queryBloat() + + var dbname, schema, table, iname string + var tableWasted, idxWasted int64 + return p.doDBQuery(db, q, func(column, value string, rowEnd bool) { + switch column { + case "db": + dbname = value + case "schemaname": + schema = value + case "tablename": + table = value + case "wastedbytes": + tableWasted = parseFloat(value) + case "iname": + iname = value + case "wastedibytes": + idxWasted = parseFloat(value) + } + if !rowEnd { + return + } + if p.hasTableMetrics(table, dbname, schema) { + v := p.getTableMetrics(table, dbname, schema) + v.bloatSize = newInt(tableWasted) + v.bloatSizePerc = newInt(calcPercentage(tableWasted, v.totalSize)) + } + if iname != "?" && p.hasIndexMetrics(iname, table, dbname, schema) { + v := p.getIndexMetrics(iname, table, dbname, schema) + v.bloatSize = newInt(idxWasted) + v.bloatSizePerc = newInt(calcPercentage(idxWasted, v.size)) + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go new file mode 100644 index 00000000000000..1da655aafe8908 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import "database/sql" + +func (p *Postgres) doQueryColumns() error { + if err := p.doDBQueryColumns(p.db); err != nil { + p.Warning(err) + } + for _, conn := range p.dbConns { + if conn.db == nil { + continue + } + if err := p.doDBQueryColumns(conn.db); err != nil { + p.Warning(err) + } + } + return nil +} + +func (p *Postgres) doDBQueryColumns(db *sql.DB) error { + q := queryColumnsStats() + + for _, m := range p.mx.tables { + if m.nullColumns != nil { + m.nullColumns = newInt(0) + } + } + + var dbname, schema, table string + var nullPerc int64 + return p.doDBQuery(db, q, func(column, value string, rowEnd bool) { + switch column { + case "datname": + dbname = value + case "schemaname": + schema = value + case "relname": + table = value + case "null_percent": + nullPerc = parseInt(value) + } + if !rowEnd { + return + } + if nullPerc == 100 && p.hasTableMetrics(table, dbname, schema) { + v := p.getTableMetrics(table, dbname, schema) + if v.nullColumns == nil { + v.nullColumns = newInt(0) + } + *v.nullColumns++ + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go new file mode 100644 index 00000000000000..0cee7a0cde10cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "fmt" +) + +func (p *Postgres) doQueryDatabasesMetrics() error { + if err := p.doQueryDatabaseStats(); err != nil { + return fmt.Errorf("querying database stats error: %v", err) + } + if err := p.doQueryDatabaseSize(); err != nil { + return fmt.Errorf("querying database size error: %v", err) + } + if p.isPGInRecovery() { + if err := p.doQueryDatabaseConflicts(); err != nil { + return fmt.Errorf("querying database conflicts error: %v", err) + } + } + if err := p.doQueryDatabaseLocks(); err != nil { + return fmt.Errorf("querying database locks error: %v", err) + } + return nil +} + +func (p *Postgres) doQueryDatabaseStats() error { + q := queryDatabaseStats() + + var db string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "datname": + db = value + p.getDBMetrics(db).updated = true + case "numbackends": + p.getDBMetrics(db).numBackends = parseInt(value) + case "datconnlimit": + p.getDBMetrics(db).datConnLimit = parseInt(value) + case "xact_commit": + p.getDBMetrics(db).xactCommit = parseInt(value) + case "xact_rollback": + p.getDBMetrics(db).xactRollback = parseInt(value) + case "blks_read_bytes": + p.getDBMetrics(db).blksRead.last = parseInt(value) + case "blks_hit_bytes": + p.getDBMetrics(db).blksHit.last = parseInt(value) + case "tup_returned": + p.getDBMetrics(db).tupReturned.last = parseInt(value) + case "tup_fetched": + p.getDBMetrics(db).tupFetched.last = parseInt(value) + case "tup_inserted": + p.getDBMetrics(db).tupInserted = parseInt(value) + case "tup_updated": + p.getDBMetrics(db).tupUpdated = parseInt(value) + case "tup_deleted": + p.getDBMetrics(db).tupDeleted = parseInt(value) + case "conflicts": + p.getDBMetrics(db).conflicts = parseInt(value) + case "temp_files": + p.getDBMetrics(db).tempFiles = parseInt(value) + case "temp_bytes": + p.getDBMetrics(db).tempBytes = parseInt(value) + case "deadlocks": + p.getDBMetrics(db).deadlocks = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryDatabaseSize() error { + q := queryDatabaseSize(p.pgVersion) + + var db string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "datname": + db = value + case "size": + p.getDBMetrics(db).size = newInt(parseInt(value)) + } + }) +} + +func (p *Postgres) doQueryDatabaseConflicts() error { + q := queryDatabaseConflicts() + + var db string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "datname": + db = value + p.getDBMetrics(db).updated = true + case "confl_tablespace": + p.getDBMetrics(db).conflTablespace = parseInt(value) + case "confl_lock": + p.getDBMetrics(db).conflLock = parseInt(value) + case "confl_snapshot": + p.getDBMetrics(db).conflSnapshot = parseInt(value) + case "confl_bufferpin": + p.getDBMetrics(db).conflBufferpin = parseInt(value) + case "confl_deadlock": + p.getDBMetrics(db).conflDeadlock = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryDatabaseLocks() error { + q := queryDatabaseLocks() + + var db, mode string + var granted bool + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "datname": + db = value + p.getDBMetrics(db).updated = true + case "mode": + mode = value + case "granted": + granted = value == "true" || value == "t" + case "locks_count": + // https://github.com/postgres/postgres/blob/7c34555f8c39eeefcc45b3c3f027d7a063d738fc/src/include/storage/lockdefs.h#L36-L45 + // https://www.postgresql.org/docs/7.2/locking-tables.html + switch { + case mode == "AccessShareLock" && granted: + p.getDBMetrics(db).accessShareLockHeld = parseInt(value) + case mode == "AccessShareLock": + p.getDBMetrics(db).accessShareLockAwaited = parseInt(value) + case mode == "RowShareLock" && granted: + p.getDBMetrics(db).rowShareLockHeld = parseInt(value) + case mode == "RowShareLock": + p.getDBMetrics(db).rowShareLockAwaited = parseInt(value) + case mode == "RowExclusiveLock" && granted: + p.getDBMetrics(db).rowExclusiveLockHeld = parseInt(value) + case mode == "RowExclusiveLock": + p.getDBMetrics(db).rowExclusiveLockAwaited = parseInt(value) + case mode == "ShareUpdateExclusiveLock" && granted: + p.getDBMetrics(db).shareUpdateExclusiveLockHeld = parseInt(value) + case mode == "ShareUpdateExclusiveLock": + p.getDBMetrics(db).shareUpdateExclusiveLockAwaited = parseInt(value) + case mode == "ShareLock" && granted: + p.getDBMetrics(db).shareLockHeld = parseInt(value) + case mode == "ShareLock": + p.getDBMetrics(db).shareLockAwaited = parseInt(value) + case mode == "ShareRowExclusiveLock" && granted: + p.getDBMetrics(db).shareRowExclusiveLockHeld = parseInt(value) + case mode == "ShareRowExclusiveLock": + p.getDBMetrics(db).shareRowExclusiveLockAwaited = parseInt(value) + case mode == "ExclusiveLock" && granted: + p.getDBMetrics(db).exclusiveLockHeld = parseInt(value) + case mode == "ExclusiveLock": + p.getDBMetrics(db).exclusiveLockAwaited = parseInt(value) + case mode == "AccessExclusiveLock" && granted: + p.getDBMetrics(db).accessExclusiveLockHeld = parseInt(value) + case mode == "AccessExclusiveLock": + p.getDBMetrics(db).accessExclusiveLockAwaited = parseInt(value) + } + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go new file mode 100644 index 00000000000000..c70772a23e2974 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "fmt" + "strconv" +) + +func (p *Postgres) doQueryGlobalMetrics() error { + if err := p.doQueryConnectionsUsed(); err != nil { + return fmt.Errorf("querying server connections used error: %v", err) + } + if err := p.doQueryConnectionsState(); err != nil { + return fmt.Errorf("querying server connections state error: %v", err) + } + if err := p.doQueryCheckpoints(); err != nil { + return fmt.Errorf("querying database conflicts error: %v", err) + } + if err := p.doQueryUptime(); err != nil { + return fmt.Errorf("querying server uptime error: %v", err) + } + if err := p.doQueryTXIDWraparound(); err != nil { + return fmt.Errorf("querying txid wraparound error: %v", err) + } + if err := p.doQueryWALWrites(); err != nil { + return fmt.Errorf("querying wal writes error: %v", err) + } + if err := p.doQueryCatalogRelations(); err != nil { + return fmt.Errorf("querying catalog relations error: %v", err) + } + if p.pgVersion >= pgVersion94 { + if err := p.doQueryAutovacuumWorkers(); err != nil { + return fmt.Errorf("querying autovacuum workers error: %v", err) + } + } + if p.pgVersion >= pgVersion10 { + if err := p.doQueryXactQueryRunningTime(); err != nil { + return fmt.Errorf("querying xact/query running time: %v", err) + } + } + + if !p.isSuperUser() { + return nil + } + + if p.pgVersion >= pgVersion94 { + if err := p.doQueryWALFiles(); err != nil { + return fmt.Errorf("querying wal files error: %v", err) + } + } + if err := p.doQueryWALArchiveFiles(); err != nil { + return fmt.Errorf("querying wal archive files error: %v", err) + } + + return nil +} + +func (p *Postgres) doQueryConnectionsUsed() error { + q := queryServerCurrentConnectionsUsed() + + var v string + if err := p.doQueryRow(q, &v); err != nil { + return err + } + + p.mx.connUsed = parseInt(v) + + return nil +} + +func (p *Postgres) doQueryConnectionsState() error { + q := queryServerConnectionsState() + + var state string + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "state": + state = value + case "count": + switch state { + case "active": + p.mx.connStateActive = parseInt(value) + case "idle": + p.mx.connStateIdle = parseInt(value) + case "idle in transaction": + p.mx.connStateIdleInTrans = parseInt(value) + case "idle in transaction (aborted)": + p.mx.connStateIdleInTransAborted = parseInt(value) + case "fastpath function call": + p.mx.connStateFastpathFunctionCall = parseInt(value) + case "disabled": + p.mx.connStateDisabled = parseInt(value) + } + } + }) +} + +func (p *Postgres) doQueryCheckpoints() error { + q := queryCheckpoints() + + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "checkpoints_timed": + p.mx.checkpointsTimed = parseInt(value) + case "checkpoints_req": + p.mx.checkpointsReq = parseInt(value) + case "checkpoint_write_time": + p.mx.checkpointWriteTime = parseInt(value) + case "checkpoint_sync_time": + p.mx.checkpointSyncTime = parseInt(value) + case "buffers_checkpoint_bytes": + p.mx.buffersCheckpoint = parseInt(value) + case "buffers_clean_bytes": + p.mx.buffersClean = parseInt(value) + case "maxwritten_clean": + p.mx.maxwrittenClean = parseInt(value) + case "buffers_backend_bytes": + p.mx.buffersBackend = parseInt(value) + case "buffers_backend_fsync": + p.mx.buffersBackendFsync = parseInt(value) + case "buffers_alloc_bytes": + p.mx.buffersAlloc = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryUptime() error { + q := queryServerUptime() + + var s string + if err := p.doQueryRow(q, &s); err != nil { + return err + } + + p.mx.uptime = parseFloat(s) + + return nil +} + +func (p *Postgres) doQueryTXIDWraparound() error { + q := queryTXIDWraparound() + + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "oldest_current_xid": + p.mx.oldestXID = parseInt(value) + case "percent_towards_wraparound": + p.mx.percentTowardsWraparound = parseInt(value) + case "percent_towards_emergency_autovacuum": + p.mx.percentTowardsEmergencyAutovacuum = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryWALWrites() error { + q := queryWALWrites(p.pgVersion) + + var v int64 + if err := p.doQueryRow(q, &v); err != nil { + return err + } + + p.mx.walWrites = v + + return nil +} + +func (p *Postgres) doQueryWALFiles() error { + q := queryWALFiles(p.pgVersion) + + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "wal_recycled_files": + p.mx.walRecycledFiles = parseInt(value) + case "wal_written_files": + p.mx.walWrittenFiles = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryWALArchiveFiles() error { + q := queryWALArchiveFiles(p.pgVersion) + + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "wal_archive_files_ready_count": + p.mx.walArchiveFilesReady = parseInt(value) + case "wal_archive_files_done_count": + p.mx.walArchiveFilesDone = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryCatalogRelations() error { + q := queryCatalogRelations() + + var kind string + var count, size int64 + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "relkind": + kind = value + case "count": + count = parseInt(value) + case "size": + size = parseInt(value) + } + if !rowEnd { + return + } + // https://www.postgresql.org/docs/current/catalog-pg-class.html + switch kind { + case "r": + p.mx.relkindOrdinaryTable = count + p.mx.relkindOrdinaryTableSize = size + case "i": + p.mx.relkindIndex = count + p.mx.relkindIndexSize = size + case "S": + p.mx.relkindSequence = count + p.mx.relkindSequenceSize = size + case "t": + p.mx.relkindTOASTTable = count + p.mx.relkindTOASTTableSize = size + case "v": + p.mx.relkindView = count + p.mx.relkindViewSize = size + case "m": + p.mx.relkindMatView = count + p.mx.relkindMatViewSize = size + case "c": + p.mx.relkindCompositeType = count + p.mx.relkindCompositeTypeSize = size + case "f": + p.mx.relkindForeignTable = count + p.mx.relkindForeignTableSize = size + case "p": + p.mx.relkindPartitionedTable = count + p.mx.relkindPartitionedTableSize = size + case "I": + p.mx.relkindPartitionedIndex = count + p.mx.relkindPartitionedIndexSize = size + } + }) +} + +func (p *Postgres) doQueryAutovacuumWorkers() error { + q := queryAutovacuumWorkers() + + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "autovacuum_analyze": + p.mx.autovacuumWorkersAnalyze = parseInt(value) + case "autovacuum_vacuum_analyze": + p.mx.autovacuumWorkersVacuumAnalyze = parseInt(value) + case "autovacuum_vacuum": + p.mx.autovacuumWorkersVacuum = parseInt(value) + case "autovacuum_vacuum_freeze": + p.mx.autovacuumWorkersVacuumFreeze = parseInt(value) + case "autovacuum_brin_summarize": + p.mx.autovacuumWorkersBrinSummarize = parseInt(value) + } + }) +} + +func (p *Postgres) doQueryXactQueryRunningTime() error { + q := queryXactQueryRunningTime() + + var state string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "state": + state = value + case "xact_running_time": + v, _ := strconv.ParseFloat(value, 64) + p.mx.xactTimeHist.Observe(v) + case "query_running_time": + if state == "active" { + v, _ := strconv.ParseFloat(value, 64) + p.mx.queryTimeHist.Observe(v) + } + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go new file mode 100644 index 00000000000000..f5eb15bb301206 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "database/sql" +) + +func (p *Postgres) doQueryIndexesMetrics() error { + if err := p.doQueryStatUserIndexes(); err != nil { + return err + } + + return nil +} + +func (p *Postgres) doQueryStatUserIndexes() error { + if err := p.doDBQueryStatUserIndexes(p.db); err != nil { + p.Warning(err) + } + for _, conn := range p.dbConns { + if conn.db == nil { + continue + } + if err := p.doDBQueryStatUserIndexes(conn.db); err != nil { + p.Warning(err) + } + } + return nil +} + +func (p *Postgres) doDBQueryStatUserIndexes(db *sql.DB) error { + q := queryStatUserIndexes() + + var dbname, schema, table, name string + return p.doDBQuery(db, q, func(column, value string, _ bool) { + switch column { + case "datname": + dbname = value + case "schemaname": + schema = value + case "relname": + table = value + case "indexrelname": + name = value + p.getIndexMetrics(name, table, dbname, schema).updated = true + case "parent_relname": + p.getIndexMetrics(name, table, dbname, schema).parentTable = value + case "idx_scan": + p.getIndexMetrics(name, table, dbname, schema).idxScan = parseInt(value) + case "idx_tup_read": + p.getIndexMetrics(name, table, dbname, schema).idxTupRead = parseInt(value) + case "idx_tup_fetch": + p.getIndexMetrics(name, table, dbname, schema).idxTupFetch = parseInt(value) + case "size": + p.getIndexMetrics(name, table, dbname, schema).size = parseInt(value) + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go new file mode 100644 index 00000000000000..a2299c8b40ae3b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "database/sql" + "strconv" + + "github.com/jackc/pgx/v4/stdlib" +) + +func (p *Postgres) doQueryServerVersion() (int, error) { + q := queryServerVersion() + + var s string + if err := p.doQueryRow(q, &s); err != nil { + return 0, err + } + + return strconv.Atoi(s) +} + +func (p *Postgres) doQueryIsSuperUser() (bool, error) { + q := queryIsSuperUser() + + var v bool + if err := p.doQueryRow(q, &v); err != nil { + return false, err + } + + return v, nil +} + +func (p *Postgres) doQueryPGIsInRecovery() (bool, error) { + q := queryPGIsInRecovery() + + var v bool + if err := p.doQueryRow(q, &v); err != nil { + return false, err + } + + return v, nil +} + +func (p *Postgres) doQuerySettingsMaxConnections() (int64, error) { + q := querySettingsMaxConnections() + + var s string + if err := p.doQueryRow(q, &s); err != nil { + return 0, err + } + + return strconv.ParseInt(s, 10, 64) +} + +func (p *Postgres) doQuerySettingsMaxLocksHeld() (int64, error) { + q := querySettingsMaxLocksHeld() + + var s string + if err := p.doQueryRow(q, &s); err != nil { + return 0, err + } + + return strconv.ParseInt(s, 10, 64) +} + +const connErrMax = 3 + +func (p *Postgres) doQueryQueryableDatabases() error { + q := queryQueryableDatabaseList() + + var dbs []string + err := p.doQuery(q, func(_, value string, _ bool) { + if p.dbSr != nil && p.dbSr.MatchString(value) { + dbs = append(dbs, value) + } + }) + if err != nil { + return err + } + + seen := make(map[string]bool, len(dbs)) + + for _, dbname := range dbs { + seen[dbname] = true + + conn, ok := p.dbConns[dbname] + if !ok { + conn = &dbConn{} + p.dbConns[dbname] = conn + } + + if conn.db != nil || conn.connErrors >= connErrMax { + continue + } + + db, connStr, err := p.openSecondaryConnection(dbname) + if err != nil { + p.Warning(err) + conn.connErrors++ + continue + } + + tables, err := p.doDBQueryUserTablesCount(db) + if err != nil { + p.Warning(err) + conn.connErrors++ + _ = db.Close() + stdlib.UnregisterConnConfig(connStr) + continue + } + + indexes, err := p.doDBQueryUserIndexesCount(db) + if err != nil { + p.Warning(err) + conn.connErrors++ + _ = db.Close() + stdlib.UnregisterConnConfig(connStr) + continue + } + + if (p.MaxDBTables != 0 && tables > p.MaxDBTables) || (p.MaxDBIndexes != 0 && indexes > p.MaxDBIndexes) { + p.Warningf("database '%s' has too many user tables(%d/%d)/indexes(%d/%d), skipping it", + dbname, tables, p.MaxDBTables, indexes, p.MaxDBIndexes) + conn.connErrors = connErrMax + _ = db.Close() + stdlib.UnregisterConnConfig(connStr) + continue + } + + conn.db, conn.connStr = db, connStr + } + + for dbname, conn := range p.dbConns { + if seen[dbname] { + continue + } + delete(p.dbConns, dbname) + if conn.connStr != "" { + stdlib.UnregisterConnConfig(conn.connStr) + } + if conn.db != nil { + _ = conn.db.Close() + } + } + + return nil +} + +func (p *Postgres) doDBQueryUserTablesCount(db *sql.DB) (int64, error) { + q := queryUserTablesCount() + + var v string + if err := p.doDBQueryRow(db, q, &v); err != nil { + return 0, err + } + + return parseInt(v), nil +} + +func (p *Postgres) doDBQueryUserIndexesCount(db *sql.DB) (int64, error) { + q := queryUserIndexesCount() + + var v string + if err := p.doDBQueryRow(db, q, &v); err != nil { + return 0, err + } + + return parseInt(v), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go new file mode 100644 index 00000000000000..e92aa10df7ae7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "fmt" +) + +func (p *Postgres) doQueryReplicationMetrics() error { + if err := p.doQueryReplStandbyAppWALDelta(); err != nil { + return fmt.Errorf("querying replication standby app wal delta error: %v", err) + } + + if p.pgVersion >= pgVersion10 { + if err := p.doQueryReplStandbyAppWALLag(); err != nil { + return fmt.Errorf("querying replication standby app wal lag error: %v", err) + } + } + + if p.pgVersion >= pgVersion10 && p.isSuperUser() { + if err := p.doQueryReplSlotFiles(); err != nil { + return fmt.Errorf("querying replication slot files error: %v", err) + } + } + + return nil +} + +func (p *Postgres) doQueryReplStandbyAppWALDelta() error { + q := queryReplicationStandbyAppDelta(p.pgVersion) + + var app string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "application_name": + app = value + p.getReplAppMetrics(app).updated = true + default: + // TODO: delta calculation was changed in https://github.com/netdata/go.d.plugin/pull/1039 + // - 'replay_delta' (probably other deltas too?) can be negative + // - Also, WAL delta != WAL lag after that PR + v := parseInt(value) + if v < 0 { + v = 0 + } + switch column { + case "sent_delta": + p.getReplAppMetrics(app).walSentDelta += v + case "write_delta": + p.getReplAppMetrics(app).walWriteDelta += v + case "flush_delta": + p.getReplAppMetrics(app).walFlushDelta += v + case "replay_delta": + p.getReplAppMetrics(app).walReplayDelta += v + } + } + }) +} + +func (p *Postgres) doQueryReplStandbyAppWALLag() error { + q := queryReplicationStandbyAppLag() + + var app string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "application_name": + app = value + p.getReplAppMetrics(app).updated = true + case "write_lag": + p.getReplAppMetrics(app).walWriteLag += parseInt(value) + case "flush_lag": + p.getReplAppMetrics(app).walFlushLag += parseInt(value) + case "replay_lag": + p.getReplAppMetrics(app).walReplayLag += parseInt(value) + } + }) +} + +func (p *Postgres) doQueryReplSlotFiles() error { + q := queryReplicationSlotFiles(p.pgVersion) + + var slot string + return p.doQuery(q, func(column, value string, _ bool) { + switch column { + case "slot_name": + slot = value + p.getReplSlotMetrics(slot).updated = true + case "replslot_wal_keep": + p.getReplSlotMetrics(slot).walKeep += parseInt(value) + case "replslot_files": + p.getReplSlotMetrics(slot).files += parseInt(value) + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go new file mode 100644 index 00000000000000..5b3e2c71d5b13a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "database/sql" + "strings" +) + +func (p *Postgres) doQueryTablesMetrics() error { + if err := p.doQueryStatUserTable(); err != nil { + return err + } + if err := p.doQueryStatIOUserTables(); err != nil { + return err + } + + return nil +} + +func (p *Postgres) doQueryStatUserTable() error { + if err := p.doDBQueryStatUserTables(p.db); err != nil { + p.Warning(err) + } + for _, conn := range p.dbConns { + if conn.db == nil { + continue + } + if err := p.doDBQueryStatUserTables(conn.db); err != nil { + p.Warning(err) + } + } + return nil +} + +func (p *Postgres) doQueryStatIOUserTables() error { + if err := p.doDBQueryStatIOUserTables(p.db); err != nil { + p.Warning(err) + } + for _, conn := range p.dbConns { + if conn.db == nil { + continue + } + if err := p.doDBQueryStatIOUserTables(conn.db); err != nil { + p.Warning(err) + } + } + return nil +} + +func (p *Postgres) doDBQueryStatUserTables(db *sql.DB) error { + q := queryStatUserTables() + + var dbname, schema, name string + return p.doDBQuery(db, q, func(column, value string, _ bool) { + if value == "" && strings.HasPrefix(column, "last_") { + value = "-1" + } + switch column { + case "datname": + dbname = value + case "schemaname": + schema = value + case "relname": + name = value + p.getTableMetrics(name, dbname, schema).updated = true + case "parent_relname": + p.getTableMetrics(name, dbname, schema).parentName = value + case "seq_scan": + p.getTableMetrics(name, dbname, schema).seqScan = parseInt(value) + case "seq_tup_read": + p.getTableMetrics(name, dbname, schema).seqTupRead = parseInt(value) + case "idx_scan": + p.getTableMetrics(name, dbname, schema).idxScan = parseInt(value) + case "idx_tup_fetch": + p.getTableMetrics(name, dbname, schema).idxTupFetch = parseInt(value) + case "n_tup_ins": + p.getTableMetrics(name, dbname, schema).nTupIns = parseInt(value) + case "n_tup_upd": + p.getTableMetrics(name, dbname, schema).nTupUpd.last = parseInt(value) + case "n_tup_del": + p.getTableMetrics(name, dbname, schema).nTupDel = parseInt(value) + case "n_tup_hot_upd": + p.getTableMetrics(name, dbname, schema).nTupHotUpd.last = parseInt(value) + case "n_live_tup": + p.getTableMetrics(name, dbname, schema).nLiveTup = parseInt(value) + case "n_dead_tup": + p.getTableMetrics(name, dbname, schema).nDeadTup = parseInt(value) + case "last_vacuum": + p.getTableMetrics(name, dbname, schema).lastVacuumAgo = parseFloat(value) + case "last_autovacuum": + p.getTableMetrics(name, dbname, schema).lastAutoVacuumAgo = parseFloat(value) + case "last_analyze": + p.getTableMetrics(name, dbname, schema).lastAnalyzeAgo = parseFloat(value) + case "last_autoanalyze": + p.getTableMetrics(name, dbname, schema).lastAutoAnalyzeAgo = parseFloat(value) + case "vacuum_count": + p.getTableMetrics(name, dbname, schema).vacuumCount = parseInt(value) + case "autovacuum_count": + p.getTableMetrics(name, dbname, schema).autovacuumCount = parseInt(value) + case "analyze_count": + p.getTableMetrics(name, dbname, schema).analyzeCount = parseInt(value) + case "autoanalyze_count": + p.getTableMetrics(name, dbname, schema).autoAnalyzeCount = parseInt(value) + case "total_relation_size": + p.getTableMetrics(name, dbname, schema).totalSize = parseInt(value) + } + }) +} + +func (p *Postgres) doDBQueryStatIOUserTables(db *sql.DB) error { + q := queryStatIOUserTables() + + var dbname, schema, name string + return p.doDBQuery(db, q, func(column, value string, rowEnd bool) { + if value == "" && column != "parent_relname" { + value = "-1" + } + switch column { + case "datname": + dbname = value + case "schemaname": + schema = value + case "relname": + name = value + p.getTableMetrics(name, dbname, schema).updated = true + case "parent_relname": + p.getTableMetrics(name, dbname, schema).parentName = value + case "heap_blks_read_bytes": + p.getTableMetrics(name, dbname, schema).heapBlksRead.last = parseInt(value) + case "heap_blks_hit_bytes": + p.getTableMetrics(name, dbname, schema).heapBlksHit.last = parseInt(value) + case "idx_blks_read_bytes": + p.getTableMetrics(name, dbname, schema).idxBlksRead.last = parseInt(value) + case "idx_blks_hit_bytes": + p.getTableMetrics(name, dbname, schema).idxBlksHit.last = parseInt(value) + case "toast_blks_read_bytes": + p.getTableMetrics(name, dbname, schema).toastBlksRead.last = parseInt(value) + case "toast_blks_hit_bytes": + p.getTableMetrics(name, dbname, schema).toastBlksHit.last = parseInt(value) + case "tidx_blks_read_bytes": + p.getTableMetrics(name, dbname, schema).tidxBlksRead.last = parseInt(value) + case "tidx_blks_hit_bytes": + p.getTableMetrics(name, dbname, schema).tidxBlksHit.last = parseInt(value) + } + }) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/init.go b/src/go/collectors/go.d.plugin/modules/postgres/init.go new file mode 100644 index 00000000000000..3b324c07e5f892 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/init.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (p *Postgres) validateConfig() error { + if p.DSN == "" { + return errors.New("DSN not set") + } + return nil +} + +func (p *Postgres) initDBSelector() (matcher.Matcher, error) { + if p.DBSelector == "" { + return nil, nil + } + + return matcher.NewSimplePatternsMatcher(p.DBSelector) +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md b/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md new file mode 100644 index 00000000000000..f3d869b9893bd1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md @@ -0,0 +1,382 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/postgres/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/postgres/metadata.yaml" +sidebar_label: "PostgreSQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# PostgreSQL + + +<img src="https://netdata.cloud/img/postgres.svg" width="150"/> + + +Plugin: go.d.plugin +Module: postgres + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more. + + +It establishes a connection to the Postgres instance via a TCP or UNIX socket. +To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets: + +- 127.0.0.1:5432 +- /var/run/postgresql/ + + +#### Limits + +Table and index metrics are not collected for databases with more than 50 tables or 250 indexes. +These limits can be changed in the configuration file. + + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per PostgreSQL instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.connections_utilization | used | percentage | +| postgres.connections_usage | available, used | connections | +| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections | +| postgres.transactions_duration | a dimension per bucket | transactions/s | +| postgres.queries_duration | a dimension per bucket | queries/s | +| postgres.locks_utilization | used | percentage | +| postgres.checkpoints_rate | scheduled, requested | checkpoints/s | +| postgres.checkpoints_time | write, sync | milliseconds | +| postgres.bgwriter_halts_rate | maxwritten | events/s | +| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s | +| postgres.buffers_backend_fsync_rate | fsync | calls/s | +| postgres.buffers_allocated_rate | allocated | B/s | +| postgres.wal_io_rate | write | B/s | +| postgres.wal_files_count | written, recycled | files | +| postgres.wal_archiving_files_count | ready, done | files/s | +| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers | +| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage | +| postgres.txid_exhaustion_perc | txid_exhaustion | percentage | +| postgres.txid_exhaustion_oldest_txid_num | xid | xid | +| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations | +| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B | +| postgres.uptime | uptime | seconds | +| postgres.databases_count | databases | databases | + +### Per repl application + +These metrics refer to the replication application. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| application | application name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B | +| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds | + +### Per repl slot + +These metrics refer to the replication slot. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| slot | replication slot name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files | + +### Per database + +These metrics refer to the database. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| database | database name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.db_transactions_ratio | committed, rollback | percentage | +| postgres.db_transactions_rate | committed, rollback | transactions/s | +| postgres.db_connections_utilization | used | percentage | +| postgres.db_connections_count | connections | connections | +| postgres.db_cache_io_ratio | miss | percentage | +| postgres.db_io_rate | memory, disk | B/s | +| postgres.db_ops_fetched_rows_ratio | fetched | percentage | +| postgres.db_ops_read_rows_rate | returned, fetched | rows/s | +| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s | +| postgres.db_conflicts_rate | conflicts | queries/s | +| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s | +| postgres.db_deadlocks_rate | deadlocks | deadlocks/s | +| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks | +| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks | +| postgres.db_temp_files_created_rate | created | files/s | +| postgres.db_temp_files_io_rate | written | B/s | +| postgres.db_size | size | B | + +### Per table + +These metrics refer to the database table. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| database | database name | +| schema | schema name | +| table | table name | +| parent_table | parent table name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.table_rows_dead_ratio | dead | percentage | +| postgres.table_rows_count | live, dead | rows | +| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s | +| postgres.table_ops_rows_hot_ratio | hot | percentage | +| postgres.table_ops_rows_hot_rate | hot | rows/s | +| postgres.table_cache_io_ratio | miss | percentage | +| postgres.table_io_rate | memory, disk | B/s | +| postgres.table_index_cache_io_ratio | miss | percentage | +| postgres.table_index_io_rate | memory, disk | B/s | +| postgres.table_toast_cache_io_ratio | miss | percentage | +| postgres.table_toast_io_rate | memory, disk | B/s | +| postgres.table_toast_index_cache_io_ratio | miss | percentage | +| postgres.table_toast_index_io_rate | memory, disk | B/s | +| postgres.table_scans_rate | index, sequential | scans/s | +| postgres.table_scans_rows_rate | index, sequential | rows/s | +| postgres.table_autovacuum_since_time | time | seconds | +| postgres.table_vacuum_since_time | time | seconds | +| postgres.table_autoanalyze_since_time | time | seconds | +| postgres.table_analyze_since_time | time | seconds | +| postgres.table_null_columns | null | columns | +| postgres.table_size | size | B | +| postgres.table_bloat_size_perc | bloat | percentage | +| postgres.table_bloat_size | bloat | B | + +### Per index + +These metrics refer to the table index. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| database | database name | +| schema | schema name | +| table | table name | +| parent_table | parent table name | +| index | index name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| postgres.index_size | size | B | +| postgres.index_bloat_size_perc | bloat | percentage | +| postgres.index_bloat_size | bloat | B | +| postgres.index_usage_status | used, unused | status | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute | +| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute | +| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound | +| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute | +| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes | +| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute | +| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute | +| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute | +| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute | +| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute | +| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} | +| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon | +| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon | +| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} | + + +## Setup + +### Prerequisites + +#### Create netdata user + +Create a user with granted `pg_monitor` +or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html). + +To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges: + +```postgresql +CREATE USER netdata; +GRANT pg_monitor TO netdata; +``` + +After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or +the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your +system. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/postgres.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/postgres.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes | +| timeout | Query timeout in seconds. | 2 | no | +| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). | | no | +| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no | +| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +```yaml +jobs: + - name: local + dsn: 'postgresql://netdata@127.0.0.1:5432/postgres' + +``` +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: 'host=/var/run/postgresql dbname=postgres user=netdata' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: 'postgresql://netdata@127.0.0.1:5432/postgres' + + - name: remote + dsn: 'postgresql://netdata@203.0.113.0:5432/postgres' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m postgres + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml b/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml new file mode 100644 index 00000000000000..94fe1b376acdab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml @@ -0,0 +1,750 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-postgres + plugin_name: go.d.plugin + module_name: postgres + monitored_instance: + name: PostgreSQL + link: https://www.postgresql.org/ + categories: + - data-collection.database-servers + icon_filename: postgres.svg + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + - plugin_name: cgroups.plugin + module_name: cgroups + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - db + - database + - postgres + - postgresql + - sql + most_popular: true + overview: + multi_instance: true + data_collection: + metrics_description: | + This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more. + method_description: | + It establishes a connection to the Postgres instance via a TCP or UNIX socket. + To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database. + default_behavior: + auto_detection: + description: | + By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets: + + - 127.0.0.1:5432 + - /var/run/postgresql/ + limits: + description: | + Table and index metrics are not collected for databases with more than 50 tables or 250 indexes. + These limits can be changed in the configuration file. + performance_impact: + description: "" + additional_permissions: + description: "" + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Create netdata user + description: | + Create a user with granted `pg_monitor` + or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html). + + To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges: + + ```postgresql + CREATE USER netdata; + GRANT pg_monitor TO netdata; + ``` + + After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or + the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your + system. + configuration: + file: + name: go.d/postgres.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: dsn + description: Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). + default_value: postgres://postgres:postgres@127.0.0.1:5432/postgres + required: true + - name: timeout + description: Query timeout in seconds. + default_value: 2 + required: false + - name: collect_databases_matching + description: Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#simple-patterns-matcher). + default_value: "" + required: false + - name: max_db_tables + description: Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. + default_value: 50 + required: false + - name: max_db_indexes + description: Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. + default_value: 250 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + folding: + enabled: false + config: | + jobs: + - name: local + dsn: 'postgresql://netdata@127.0.0.1:5432/postgres' + - name: Unix socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: 'host=/var/run/postgresql dbname=postgres user=netdata' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + dsn: 'postgresql://netdata@127.0.0.1:5432/postgres' + + - name: remote + dsn: 'postgresql://netdata@203.0.113.0:5432/postgres' + troubleshooting: + problems: + list: [] + alerts: + - name: postgres_total_connection_utilization + metric: postgres.connections_utilization + info: average total connection utilization over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_acquired_locks_utilization + metric: postgres.locks_utilization + info: average acquired locks utilization over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_txid_exhaustion_perc + metric: postgres.txid_exhaustion_perc + info: percent towards TXID wraparound + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_db_cache_io_ratio + metric: postgres.db_cache_io_ratio + info: average cache hit ratio in db ${label:database} over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_db_transactions_rollback_ratio + metric: postgres.db_cache_io_ratio + info: average aborted transactions percentage in db ${label:database} over the last five minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_db_deadlocks_rate + metric: postgres.db_deadlocks_rate + info: number of deadlocks detected in db ${label:database} in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_cache_io_ratio + metric: postgres.table_cache_io_ratio + info: average cache hit ratio in db ${label:database} table ${label:table} over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_index_cache_io_ratio + metric: postgres.table_index_cache_io_ratio + info: average index cache hit ratio in db ${label:database} table ${label:table} over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_toast_cache_io_ratio + metric: postgres.table_toast_cache_io_ratio + info: average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_toast_index_cache_io_ratio + metric: postgres.table_toast_index_cache_io_ratio + info: average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_bloat_size_perc + metric: postgres.table_bloat_size_perc + info: bloat size percentage in db ${label:database} table ${label:table} + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_last_autovacuum_time + metric: postgres.table_autovacuum_since_time + info: time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_table_last_autoanalyze_time + metric: postgres.table_autoanalyze_since_time + info: time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + - name: postgres_index_bloat_size_perc + metric: postgres.index_bloat_size_perc + info: bloat size percentage in db ${label:database} table ${label:table} index ${label:index} + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: postgres.connections_utilization + description: Connections utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: postgres.connections_usage + description: Connections usage + unit: connections + chart_type: stacked + dimensions: + - name: available + - name: used + - name: postgres.connections_state_count + description: Connections in each state + unit: connections + chart_type: stacked + dimensions: + - name: active + - name: idle + - name: idle_in_transaction + - name: idle_in_transaction_aborted + - name: disabled + - name: postgres.transactions_duration + description: Observed transactions time + unit: transactions/s + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: postgres.queries_duration + description: Observed active queries time + unit: queries/s + chart_type: stacked + dimensions: + - name: a dimension per bucket + - name: postgres.locks_utilization + description: Acquired locks utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: postgres.checkpoints_rate + description: Checkpoints + unit: checkpoints/s + chart_type: stacked + dimensions: + - name: scheduled + - name: requested + - name: postgres.checkpoints_time + description: Checkpoint time + unit: milliseconds + chart_type: stacked + dimensions: + - name: write + - name: sync + - name: postgres.bgwriter_halts_rate + description: Background writer scan halts + unit: events/s + chart_type: line + dimensions: + - name: maxwritten + - name: postgres.buffers_io_rate + description: Buffers written rate + unit: B/s + chart_type: area + dimensions: + - name: checkpoint + - name: backend + - name: bgwriter + - name: postgres.buffers_backend_fsync_rate + description: Backend fsync calls + unit: calls/s + chart_type: line + dimensions: + - name: fsync + - name: postgres.buffers_allocated_rate + description: Buffers allocated + unit: B/s + chart_type: line + dimensions: + - name: allocated + - name: postgres.wal_io_rate + description: Write-Ahead Log writes + unit: B/s + chart_type: line + dimensions: + - name: write + - name: postgres.wal_files_count + description: Write-Ahead Log files + unit: files + chart_type: stacked + dimensions: + - name: written + - name: recycled + - name: postgres.wal_archiving_files_count + description: Write-Ahead Log archived files + unit: files/s + chart_type: stacked + dimensions: + - name: ready + - name: done + - name: postgres.autovacuum_workers_count + description: Autovacuum workers + unit: workers + chart_type: line + dimensions: + - name: analyze + - name: vacuum_analyze + - name: vacuum + - name: vacuum_freeze + - name: brin_summarize + - name: postgres.txid_exhaustion_towards_autovacuum_perc + description: Percent towards emergency autovacuum + unit: percentage + chart_type: line + dimensions: + - name: emergency_autovacuum + - name: postgres.txid_exhaustion_perc + description: Percent towards transaction ID wraparound + unit: percentage + chart_type: line + dimensions: + - name: txid_exhaustion + - name: postgres.txid_exhaustion_oldest_txid_num + description: Oldest transaction XID + chart_type: line + unit: xid + dimensions: + - name: xid + - name: postgres.catalog_relations_count + description: Relation count + unit: relations + chart_type: stacked + dimensions: + - name: ordinary_table + - name: index + - name: sequence + - name: toast_table + - name: view + - name: materialized_view + - name: composite_type + - name: foreign_table + - name: partitioned_table + - name: partitioned_index + - name: postgres.catalog_relations_size + description: Relation size + unit: B + chart_type: stacked + dimensions: + - name: ordinary_table + - name: index + - name: sequence + - name: toast_table + - name: view + - name: materialized_view + - name: composite_type + - name: foreign_table + - name: partitioned_table + - name: partitioned_index + - name: postgres.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: postgres.databases_count + description: Number of databases + unit: databases + chart_type: line + dimensions: + - name: databases + - name: repl application + description: These metrics refer to the replication application. + labels: + - name: application + description: application name + metrics: + - name: postgres.replication_app_wal_lag_size + description: Standby application WAL lag size + unit: B + chart_type: line + dimensions: + - name: sent_lag + - name: write_lag + - name: flush_lag + - name: replay_lag + - name: postgres.replication_app_wal_lag_time + description: Standby application WAL lag time + unit: seconds + chart_type: line + dimensions: + - name: write_lag + - name: flush_lag + - name: replay_lag + - name: repl slot + description: These metrics refer to the replication slot. + labels: + - name: slot + description: replication slot name + metrics: + - name: postgres.replication_slot_files_count + description: Replication slot files + unit: files + chart_type: line + dimensions: + - name: wal_keep + - name: pg_replslot_files + - name: database + description: These metrics refer to the database. + labels: + - name: database + description: database name + metrics: + - name: postgres.db_transactions_ratio + description: Database transactions ratio + unit: percentage + chart_type: line + dimensions: + - name: committed + - name: rollback + - name: postgres.db_transactions_rate + description: Database transactions + unit: transactions/s + chart_type: line + dimensions: + - name: committed + - name: rollback + - name: postgres.db_connections_utilization + description: Database connections utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: postgres.db_connections_count + description: Database connections + unit: connections + chart_type: line + dimensions: + - name: connections + - name: postgres.db_cache_io_ratio + description: Database buffer cache miss ratio + unit: percentage + chart_type: line + dimensions: + - name: miss + - name: postgres.db_io_rate + description: Database reads + unit: B/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: postgres.db_ops_fetched_rows_ratio + description: Database rows fetched ratio + unit: percentage + chart_type: line + dimensions: + - name: fetched + - name: postgres.db_ops_read_rows_rate + description: Database rows read + unit: rows/s + chart_type: line + dimensions: + - name: returned + - name: fetched + - name: postgres.db_ops_write_rows_rate + description: Database rows written + unit: rows/s + chart_type: line + dimensions: + - name: inserted + - name: deleted + - name: updated + - name: postgres.db_conflicts_rate + description: Database canceled queries + unit: queries/s + chart_type: line + dimensions: + - name: conflicts + - name: postgres.db_conflicts_reason_rate + description: Database canceled queries by reason + unit: queries/s + chart_type: line + dimensions: + - name: tablespace + - name: lock + - name: snapshot + - name: bufferpin + - name: deadlock + - name: postgres.db_deadlocks_rate + description: Database deadlocks + unit: deadlocks/s + chart_type: line + dimensions: + - name: deadlocks + - name: postgres.db_locks_held_count + description: Database locks held + unit: locks + chart_type: stacked + dimensions: + - name: access_share + - name: row_share + - name: row_exclusive + - name: share_update + - name: share + - name: share_row_exclusive + - name: exclusive + - name: access_exclusive + - name: postgres.db_locks_awaited_count + description: Database locks awaited + unit: locks + chart_type: stacked + dimensions: + - name: access_share + - name: row_share + - name: row_exclusive + - name: share_update + - name: share + - name: share_row_exclusive + - name: exclusive + - name: access_exclusive + - name: postgres.db_temp_files_created_rate + description: Database created temporary files + unit: files/s + chart_type: line + dimensions: + - name: created + - name: postgres.db_temp_files_io_rate + description: Database temporary files data written to disk + unit: B/s + chart_type: line + dimensions: + - name: written + - name: postgres.db_size + description: Database size + unit: B + chart_type: line + dimensions: + - name: size + - name: table + description: These metrics refer to the database table. + labels: + - name: database + description: database name + - name: schema + description: schema name + - name: table + description: table name + - name: parent_table + description: parent table name + metrics: + - name: postgres.table_rows_dead_ratio + description: Table dead rows + unit: percentage + chart_type: line + dimensions: + - name: dead + - name: postgres.table_rows_count + description: Table total rows + unit: rows + chart_type: line + dimensions: + - name: live + - name: dead + - name: postgres.table_ops_rows_rate + description: Table throughput + unit: rows/s + chart_type: line + dimensions: + - name: inserted + - name: deleted + - name: updated + - name: postgres.table_ops_rows_hot_ratio + description: Table HOT updates ratio + unit: percentage + chart_type: line + dimensions: + - name: hot + - name: postgres.table_ops_rows_hot_rate + description: Table HOT updates + unit: rows/s + chart_type: line + dimensions: + - name: hot + - name: postgres.table_cache_io_ratio + description: Table I/O cache miss ratio + unit: percentage + chart_type: line + dimensions: + - name: miss + - name: postgres.table_io_rate + description: Table I/O + unit: B/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: postgres.table_index_cache_io_ratio + description: Table index I/O cache miss ratio + unit: percentage + chart_type: line + dimensions: + - name: miss + - name: postgres.table_index_io_rate + description: Table index I/O + unit: B/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: postgres.table_toast_cache_io_ratio + description: Table TOAST I/O cache miss ratio + unit: percentage + chart_type: line + dimensions: + - name: miss + - name: postgres.table_toast_io_rate + description: Table TOAST I/O + unit: B/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: postgres.table_toast_index_cache_io_ratio + description: Table TOAST index I/O cache miss ratio + unit: percentage + chart_type: line + dimensions: + - name: miss + - name: postgres.table_toast_index_io_rate + description: Table TOAST index I/O + unit: B/s + chart_type: line + dimensions: + - name: memory + - name: disk + - name: postgres.table_scans_rate + description: Table scans + unit: scans/s + chart_type: line + dimensions: + - name: index + - name: sequential + - name: postgres.table_scans_rows_rate + description: Table live rows fetched by scans + unit: rows/s + chart_type: line + dimensions: + - name: index + - name: sequential + - name: postgres.table_autovacuum_since_time + description: Table time since last auto VACUUM + unit: seconds + chart_type: line + dimensions: + - name: time + - name: postgres.table_vacuum_since_time + description: Table time since last manual VACUUM + unit: seconds + chart_type: line + dimensions: + - name: time + - name: postgres.table_autoanalyze_since_time + description: Table time since last auto ANALYZE + unit: seconds + chart_type: line + dimensions: + - name: time + - name: postgres.table_analyze_since_time + description: Table time since last manual ANALYZE + unit: seconds + chart_type: line + dimensions: + - name: time + - name: postgres.table_null_columns + description: Table null columns + unit: columns + chart_type: line + dimensions: + - name: "null" + - name: postgres.table_size + description: Table total size + unit: B + chart_type: line + dimensions: + - name: size + - name: postgres.table_bloat_size_perc + description: Table bloat size percentage + unit: percentage + chart_type: line + dimensions: + - name: bloat + - name: postgres.table_bloat_size + description: Table bloat size + unit: B + chart_type: line + dimensions: + - name: bloat + - name: index + description: These metrics refer to the table index. + labels: + - name: database + description: database name + - name: schema + description: schema name + - name: table + description: table name + - name: parent_table + description: parent table name + - name: index + description: index name + metrics: + - name: postgres.index_size + description: Index size + unit: B + chart_type: line + dimensions: + - name: size + - name: postgres.index_bloat_size_perc + description: Index bloat size percentage + unit: percentage + chart_type: line + dimensions: + - name: bloat + - name: postgres.index_bloat_size + description: Index bloat size + unit: B + chart_type: line + dimensions: + - name: bloat + - name: postgres.index_usage_status + description: Index usage status + unit: status + chart_type: line + dimensions: + - name: used + - name: unused diff --git a/src/go/collectors/go.d.plugin/modules/postgres/metrics.go b/src/go/collectors/go.d.plugin/modules/postgres/metrics.go new file mode 100644 index 00000000000000..b071e1694e7bc3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/metrics.go @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import "github.com/netdata/go.d.plugin/pkg/metrics" + +type pgMetrics struct { + srvMetrics + dbs map[string]*dbMetrics + tables map[string]*tableMetrics + indexes map[string]*indexMetrics + replApps map[string]*replStandbyAppMetrics + replSlots map[string]*replSlotMetrics +} + +type srvMetrics struct { + xactTimeHist metrics.Histogram + queryTimeHist metrics.Histogram + + maxConnections int64 + maxLocksHeld int64 + + uptime int64 + + relkindOrdinaryTable int64 + relkindIndex int64 + relkindSequence int64 + relkindTOASTTable int64 + relkindView int64 + relkindMatView int64 + relkindCompositeType int64 + relkindForeignTable int64 + relkindPartitionedTable int64 + relkindPartitionedIndex int64 + relkindOrdinaryTableSize int64 + relkindIndexSize int64 + relkindSequenceSize int64 + relkindTOASTTableSize int64 + relkindViewSize int64 + relkindMatViewSize int64 + relkindCompositeTypeSize int64 + relkindForeignTableSize int64 + relkindPartitionedTableSize int64 + relkindPartitionedIndexSize int64 + + connUsed int64 + connStateActive int64 + connStateIdle int64 + connStateIdleInTrans int64 + connStateIdleInTransAborted int64 + connStateFastpathFunctionCall int64 + connStateDisabled int64 + + checkpointsTimed int64 + checkpointsReq int64 + checkpointWriteTime int64 + checkpointSyncTime int64 + buffersCheckpoint int64 + buffersClean int64 + maxwrittenClean int64 + buffersBackend int64 + buffersBackendFsync int64 + buffersAlloc int64 + + oldestXID int64 + percentTowardsWraparound int64 + percentTowardsEmergencyAutovacuum int64 + + walWrites int64 + walRecycledFiles int64 + walWrittenFiles int64 + walArchiveFilesReady int64 + walArchiveFilesDone int64 + + autovacuumWorkersAnalyze int64 + autovacuumWorkersVacuumAnalyze int64 + autovacuumWorkersVacuum int64 + autovacuumWorkersVacuumFreeze int64 + autovacuumWorkersBrinSummarize int64 +} + +type dbMetrics struct { + name string + + updated bool + hasCharts bool + + numBackends int64 + datConnLimit int64 + xactCommit int64 + xactRollback int64 + blksRead incDelta + blksHit incDelta + tupReturned incDelta + tupFetched incDelta + tupInserted int64 + tupUpdated int64 + tupDeleted int64 + conflicts int64 + tempFiles int64 + tempBytes int64 + deadlocks int64 + + size *int64 // need 'connect' privilege for pg_database_size() + + conflTablespace int64 + conflLock int64 + conflSnapshot int64 + conflBufferpin int64 + conflDeadlock int64 + + accessShareLockHeld int64 + rowShareLockHeld int64 + rowExclusiveLockHeld int64 + shareUpdateExclusiveLockHeld int64 + shareLockHeld int64 + shareRowExclusiveLockHeld int64 + exclusiveLockHeld int64 + accessExclusiveLockHeld int64 + accessShareLockAwaited int64 + rowShareLockAwaited int64 + rowExclusiveLockAwaited int64 + shareUpdateExclusiveLockAwaited int64 + shareLockAwaited int64 + shareRowExclusiveLockAwaited int64 + exclusiveLockAwaited int64 + accessExclusiveLockAwaited int64 +} + +type replStandbyAppMetrics struct { + name string + + updated bool + hasCharts bool + + walSentDelta int64 + walWriteDelta int64 + walFlushDelta int64 + walReplayDelta int64 + + walWriteLag int64 + walFlushLag int64 + walReplayLag int64 +} + +type replSlotMetrics struct { + name string + + updated bool + hasCharts bool + + walKeep int64 + files int64 +} + +type tableMetrics struct { + name string + parentName string + db string + schema string + + updated bool + hasCharts bool + hasLastAutoVacuumChart bool + hasLastVacuumChart bool + hasLastAutoAnalyzeChart bool + hasLastAnalyzeChart bool + hasTableIOCharts bool + hasTableIdxIOCharts bool + hasTableTOASTIOCharts bool + hasTableTOASTIdxIOCharts bool + + // pg_stat_user_tables + seqScan int64 + seqTupRead int64 + idxScan int64 + idxTupFetch int64 + nTupIns int64 + nTupUpd incDelta + nTupDel int64 + nTupHotUpd incDelta + nLiveTup int64 + nDeadTup int64 + lastVacuumAgo int64 + lastAutoVacuumAgo int64 + lastAnalyzeAgo int64 + lastAutoAnalyzeAgo int64 + vacuumCount int64 + autovacuumCount int64 + analyzeCount int64 + autoAnalyzeCount int64 + + // pg_statio_user_tables + heapBlksRead incDelta + heapBlksHit incDelta + idxBlksRead incDelta + idxBlksHit incDelta + toastBlksRead incDelta + toastBlksHit incDelta + tidxBlksRead incDelta + tidxBlksHit incDelta + + totalSize int64 + + bloatSize *int64 // need 'SELECT' access to the table + bloatSizePerc *int64 // need 'SELECT' access to the table + nullColumns *int64 // need 'SELECT' access to the table +} + +type indexMetrics struct { + name string + db string + schema string + table string + parentTable string + + updated bool + hasCharts bool + + idxScan int64 + idxTupRead int64 + idxTupFetch int64 + + size int64 + + bloatSize *int64 // need 'SELECT' access to the table + bloatSizePerc *int64 // need 'SELECT' access to the table +} +type incDelta struct{ prev, last int64 } + +func (pc *incDelta) delta() int64 { return pc.last - pc.prev } diff --git a/src/go/collectors/go.d.plugin/modules/postgres/postgres.go b/src/go/collectors/go.d.plugin/modules/postgres/postgres.go new file mode 100644 index 00000000000000..a1dabf9d39a8a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/postgres.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "database/sql" + _ "embed" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/jackc/pgx/v4/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("postgres", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Postgres { + return &Postgres{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 2}, + DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres", + XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, + QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, + // charts: 20 x table, 4 x index. + // https://discord.com/channels/847502280503590932/1022693928874549368 + MaxDBTables: 50, + MaxDBIndexes: 250, + }, + charts: baseCharts.Copy(), + dbConns: make(map[string]*dbConn), + mx: &pgMetrics{ + dbs: make(map[string]*dbMetrics), + indexes: make(map[string]*indexMetrics), + tables: make(map[string]*tableMetrics), + replApps: make(map[string]*replStandbyAppMetrics), + replSlots: make(map[string]*replSlotMetrics), + }, + recheckSettingsEvery: time.Minute * 30, + doSlowEvery: time.Minute * 5, + addXactQueryRunningTimeChartsOnce: &sync.Once{}, + addWALFilesChartsOnce: &sync.Once{}, + } +} + +type Config struct { + DSN string `yaml:"dsn"` + Timeout web.Duration `yaml:"timeout"` + DBSelector string `yaml:"collect_databases_matching"` + XactTimeHistogram []float64 `yaml:"transaction_time_histogram"` + QueryTimeHistogram []float64 `yaml:"query_time_histogram"` + MaxDBTables int64 `yaml:"max_db_tables"` + MaxDBIndexes int64 `yaml:"max_db_indexes"` +} + +type ( + Postgres struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + db *sql.DB + dbConns map[string]*dbConn + + superUser *bool + pgIsInRecovery *bool + pgVersion int + + addXactQueryRunningTimeChartsOnce *sync.Once + addWALFilesChartsOnce *sync.Once + + dbSr matcher.Matcher + + mx *pgMetrics + + recheckSettingsTime time.Time + recheckSettingsEvery time.Duration + + doSlowTime time.Time + doSlowEvery time.Duration + } + dbConn struct { + db *sql.DB + connStr string + connErrors int + } +) + +func (p *Postgres) Init() bool { + err := p.validateConfig() + if err != nil { + p.Errorf("config validation: %v", err) + return false + } + + sr, err := p.initDBSelector() + if err != nil { + p.Errorf("config validation: %v", err) + return false + } + p.dbSr = sr + + p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram) + p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram) + + return true +} + +func (p *Postgres) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Postgres) Charts() *module.Charts { + return p.charts +} + +func (p *Postgres) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (p *Postgres) Cleanup() { + if p.db == nil { + return + } + if err := p.db.Close(); err != nil { + p.Warningf("cleanup: error on closing the Postgres database [%s]: %v", p.DSN, err) + } + p.db = nil + + for dbname, conn := range p.dbConns { + delete(p.dbConns, dbname) + if conn.connStr != "" { + stdlib.UnregisterConnConfig(conn.connStr) + } + if conn.db != nil { + _ = conn.db.Close() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go b/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go new file mode 100644 index 00000000000000..a41c112355408d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +import ( + "bufio" + "bytes" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/matcher" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataV140004ServerVersionNum, _ = os.ReadFile("testdata/v14.4/server_version_num.txt") + + dataV140004IsSuperUserFalse, _ = os.ReadFile("testdata/v14.4/is_super_user-false.txt") + dataV140004IsSuperUserTrue, _ = os.ReadFile("testdata/v14.4/is_super_user-true.txt") + dataV140004PGIsInRecoveryTrue, _ = os.ReadFile("testdata/v14.4/pg_is_in_recovery-true.txt") + dataV140004SettingsMaxConnections, _ = os.ReadFile("testdata/v14.4/settings_max_connections.txt") + dataV140004SettingsMaxLocksHeld, _ = os.ReadFile("testdata/v14.4/settings_max_locks_held.txt") + + dataV140004ServerCurrentConnections, _ = os.ReadFile("testdata/v14.4/server_current_connections.txt") + dataV140004ServerConnectionsState, _ = os.ReadFile("testdata/v14.4/server_connections_state.txt") + dataV140004Checkpoints, _ = os.ReadFile("testdata/v14.4/checkpoints.txt") + dataV140004ServerUptime, _ = os.ReadFile("testdata/v14.4/uptime.txt") + dataV140004TXIDWraparound, _ = os.ReadFile("testdata/v14.4/txid_wraparound.txt") + dataV140004WALWrites, _ = os.ReadFile("testdata/v14.4/wal_writes.txt") + dataV140004WALFiles, _ = os.ReadFile("testdata/v14.4/wal_files.txt") + dataV140004WALArchiveFiles, _ = os.ReadFile("testdata/v14.4/wal_archive_files.txt") + dataV140004CatalogRelations, _ = os.ReadFile("testdata/v14.4/catalog_relations.txt") + dataV140004AutovacuumWorkers, _ = os.ReadFile("testdata/v14.4/autovacuum_workers.txt") + dataV140004XactQueryRunningTime, _ = os.ReadFile("testdata/v14.4/xact_query_running_time.txt") + + dataV140004ReplStandbyAppDelta, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_delta.txt") + dataV140004ReplStandbyAppLag, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_lag.txt") + + dataV140004ReplSlotFiles, _ = os.ReadFile("testdata/v14.4/replication_slot_files.txt") + + dataV140004DatabaseStats, _ = os.ReadFile("testdata/v14.4/database_stats.txt") + dataV140004DatabaseSize, _ = os.ReadFile("testdata/v14.4/database_size.txt") + dataV140004DatabaseConflicts, _ = os.ReadFile("testdata/v14.4/database_conflicts.txt") + dataV140004DatabaseLocks, _ = os.ReadFile("testdata/v14.4/database_locks.txt") + + dataV140004QueryableDatabaseList, _ = os.ReadFile("testdata/v14.4/queryable_database_list.txt") + + dataV140004StatUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_tables_db_postgres.txt") + dataV140004StatIOUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/statio_user_tables_db_postgres.txt") + + dataV140004StatUserIndexesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_indexes_db_postgres.txt") + + dataV140004Bloat, _ = os.ReadFile("testdata/v14.4/bloat_tables.txt") + dataV140004ColumnsStats, _ = os.ReadFile("testdata/v14.4/table_columns_stats.txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataV140004ServerVersionNum": dataV140004ServerVersionNum, + + "dataV140004IsSuperUserFalse": dataV140004IsSuperUserFalse, + "dataV140004IsSuperUserTrue": dataV140004IsSuperUserTrue, + "dataV140004PGIsInRecoveryTrue": dataV140004PGIsInRecoveryTrue, + "dataV140004SettingsMaxConnections": dataV140004SettingsMaxConnections, + "dataV140004SettingsMaxLocksHeld": dataV140004SettingsMaxLocksHeld, + + "dataV140004ServerCurrentConnections": dataV140004ServerCurrentConnections, + "dataV140004ServerConnectionsState": dataV140004ServerConnectionsState, + "dataV140004Checkpoints": dataV140004Checkpoints, + "dataV140004ServerUptime": dataV140004ServerUptime, + "dataV140004TXIDWraparound": dataV140004TXIDWraparound, + "dataV140004WALWrites": dataV140004WALWrites, + "dataV140004WALFiles": dataV140004WALFiles, + "dataV140004WALArchiveFiles": dataV140004WALArchiveFiles, + "dataV140004CatalogRelations": dataV140004CatalogRelations, + "dataV140004AutovacuumWorkers": dataV140004AutovacuumWorkers, + "dataV140004XactQueryRunningTime": dataV140004XactQueryRunningTime, + + "dataV14004ReplStandbyAppDelta": dataV140004ReplStandbyAppDelta, + "dataV14004ReplStandbyAppLag": dataV140004ReplStandbyAppLag, + + "dataV140004ReplSlotFiles": dataV140004ReplSlotFiles, + + "dataV140004DatabaseStats": dataV140004DatabaseStats, + "dataV140004DatabaseSize": dataV140004DatabaseSize, + "dataV140004DatabaseConflicts": dataV140004DatabaseConflicts, + "dataV140004DatabaseLocks": dataV140004DatabaseLocks, + + "dataV140004QueryableDatabaseList": dataV140004QueryableDatabaseList, + + "dataV140004StatUserTablesDBPostgres": dataV140004StatUserTablesDBPostgres, + "dataV140004StatIOUserTablesDBPostgres": dataV140004StatIOUserTablesDBPostgres, + + "dataV140004StatUserIndexesDBPostgres": dataV140004StatUserIndexesDBPostgres, + + "dataV140004Bloat": dataV140004Bloat, + "dataV140004ColumnsStats": dataV140004ColumnsStats, + } { + require.NotNilf(t, data, name) + } +} + +func TestPostgres_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "Success with default": { + wantFail: false, + config: New().Config, + }, + "Fail when DSN not set": { + wantFail: true, + config: Config{DSN: ""}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pg := New() + pg.Config = test.config + + if test.wantFail { + assert.False(t, pg.Init()) + } else { + assert.True(t, pg.Init()) + } + }) + } +} + +func TestPostgres_Cleanup(t *testing.T) { + +} + +func TestPostgres_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestPostgres_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock) + wantFail bool + }{ + "Success when all queries are successful (v14.4)": { + wantFail: false, + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + pg.dbSr = matcher.TRUE() + + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections) + mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld) + + mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections) + mockExpect(t, m, queryServerConnectionsState(), dataV140004ServerConnectionsState) + mockExpect(t, m, queryCheckpoints(), dataV140004Checkpoints) + mockExpect(t, m, queryServerUptime(), dataV140004ServerUptime) + mockExpect(t, m, queryTXIDWraparound(), dataV140004TXIDWraparound) + mockExpect(t, m, queryWALWrites(140004), dataV140004WALWrites) + mockExpect(t, m, queryCatalogRelations(), dataV140004CatalogRelations) + mockExpect(t, m, queryAutovacuumWorkers(), dataV140004AutovacuumWorkers) + mockExpect(t, m, queryXactQueryRunningTime(), dataV140004XactQueryRunningTime) + + mockExpect(t, m, queryWALFiles(140004), dataV140004WALFiles) + mockExpect(t, m, queryWALArchiveFiles(140004), dataV140004WALArchiveFiles) + + mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataV140004ReplStandbyAppDelta) + mockExpect(t, m, queryReplicationStandbyAppLag(), dataV140004ReplStandbyAppLag) + mockExpect(t, m, queryReplicationSlotFiles(140004), dataV140004ReplSlotFiles) + + mockExpect(t, m, queryDatabaseStats(), dataV140004DatabaseStats) + mockExpect(t, m, queryDatabaseSize(140004), dataV140004DatabaseSize) + mockExpect(t, m, queryDatabaseConflicts(), dataV140004DatabaseConflicts) + mockExpect(t, m, queryDatabaseLocks(), dataV140004DatabaseLocks) + + mockExpect(t, m, queryQueryableDatabaseList(), dataV140004QueryableDatabaseList) + mockExpect(t, m, queryStatUserTables(), dataV140004StatUserTablesDBPostgres) + mockExpect(t, m, queryStatIOUserTables(), dataV140004StatIOUserTablesDBPostgres) + mockExpect(t, m, queryStatUserIndexes(), dataV140004StatUserIndexesDBPostgres) + mockExpect(t, m, queryBloat(), dataV140004Bloat) + mockExpect(t, m, queryColumnsStats(), dataV140004ColumnsStats) + }, + }, + "Fail when the second query unsuccessful (v14.4)": { + wantFail: true, + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpect(t, m, querySettingsMaxConnections(), dataV140004ServerVersionNum) + mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld) + + mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections) + mockExpectErr(m, queryServerConnectionsState()) + }, + }, + "Fail when querying the database version returns an error": { + wantFail: true, + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpectErr(m, queryServerVersion()) + }, + }, + "Fail when querying settings max connection returns an error": { + wantFail: true, + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpectErr(m, querySettingsMaxConnections()) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + pg := New() + pg.db = db + defer func() { _ = db.Close() }() + + require.True(t, pg.Init()) + + test.prepareMock(t, pg, mock) + + if test.wantFail { + assert.False(t, pg.Check()) + } else { + assert.True(t, pg.Check()) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func TestPostgres_Collect(t *testing.T) { + type testCaseStep struct { + prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock) + check func(t *testing.T, pg *Postgres) + } + tests := map[string][]testCaseStep{ + "Success on all queries, collect all dbs (v14.4)": { + { + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + pg.dbSr = matcher.TRUE() + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections) + mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld) + + mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections) + mockExpect(t, m, queryServerConnectionsState(), dataV140004ServerConnectionsState) + mockExpect(t, m, queryCheckpoints(), dataV140004Checkpoints) + mockExpect(t, m, queryServerUptime(), dataV140004ServerUptime) + mockExpect(t, m, queryTXIDWraparound(), dataV140004TXIDWraparound) + mockExpect(t, m, queryWALWrites(140004), dataV140004WALWrites) + mockExpect(t, m, queryCatalogRelations(), dataV140004CatalogRelations) + mockExpect(t, m, queryAutovacuumWorkers(), dataV140004AutovacuumWorkers) + mockExpect(t, m, queryXactQueryRunningTime(), dataV140004XactQueryRunningTime) + + mockExpect(t, m, queryWALFiles(140004), dataV140004WALFiles) + mockExpect(t, m, queryWALArchiveFiles(140004), dataV140004WALArchiveFiles) + + mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataV140004ReplStandbyAppDelta) + mockExpect(t, m, queryReplicationStandbyAppLag(), dataV140004ReplStandbyAppLag) + mockExpect(t, m, queryReplicationSlotFiles(140004), dataV140004ReplSlotFiles) + + mockExpect(t, m, queryDatabaseStats(), dataV140004DatabaseStats) + mockExpect(t, m, queryDatabaseSize(140004), dataV140004DatabaseSize) + mockExpect(t, m, queryDatabaseConflicts(), dataV140004DatabaseConflicts) + mockExpect(t, m, queryDatabaseLocks(), dataV140004DatabaseLocks) + + mockExpect(t, m, queryQueryableDatabaseList(), dataV140004QueryableDatabaseList) + mockExpect(t, m, queryStatUserTables(), dataV140004StatUserTablesDBPostgres) + mockExpect(t, m, queryStatIOUserTables(), dataV140004StatIOUserTablesDBPostgres) + mockExpect(t, m, queryStatUserIndexes(), dataV140004StatUserIndexesDBPostgres) + mockExpect(t, m, queryBloat(), dataV140004Bloat) + mockExpect(t, m, queryColumnsStats(), dataV140004ColumnsStats) + }, + check: func(t *testing.T, pg *Postgres) { + mx := pg.Collect() + + expected := map[string]int64{ + "autovacuum_analyze": 0, + "autovacuum_brin_summarize": 0, + "autovacuum_vacuum": 0, + "autovacuum_vacuum_analyze": 0, + "autovacuum_vacuum_freeze": 0, + "buffers_alloc": 27295744, + "buffers_backend": 0, + "buffers_backend_fsync": 0, + "buffers_checkpoint": 32768, + "buffers_clean": 0, + "catalog_relkind_I_count": 0, + "catalog_relkind_I_size": 0, + "catalog_relkind_S_count": 0, + "catalog_relkind_S_size": 0, + "catalog_relkind_c_count": 0, + "catalog_relkind_c_size": 0, + "catalog_relkind_f_count": 0, + "catalog_relkind_f_size": 0, + "catalog_relkind_i_count": 155, + "catalog_relkind_i_size": 3678208, + "catalog_relkind_m_count": 0, + "catalog_relkind_m_size": 0, + "catalog_relkind_p_count": 0, + "catalog_relkind_p_size": 0, + "catalog_relkind_r_count": 66, + "catalog_relkind_r_size": 3424256, + "catalog_relkind_t_count": 38, + "catalog_relkind_t_size": 548864, + "catalog_relkind_v_count": 137, + "catalog_relkind_v_size": 0, + "checkpoint_sync_time": 47, + "checkpoint_write_time": 167, + "checkpoints_req": 16, + "checkpoints_timed": 1814, + "databases_count": 2, + "db_postgres_blks_hit": 1221125, + "db_postgres_blks_read": 3252, + "db_postgres_blks_read_perc": 0, + "db_postgres_confl_bufferpin": 0, + "db_postgres_confl_deadlock": 0, + "db_postgres_confl_lock": 0, + "db_postgres_confl_snapshot": 0, + "db_postgres_confl_tablespace": 0, + "db_postgres_conflicts": 0, + "db_postgres_deadlocks": 0, + "db_postgres_lock_mode_AccessExclusiveLock_awaited": 0, + "db_postgres_lock_mode_AccessExclusiveLock_held": 0, + "db_postgres_lock_mode_AccessShareLock_awaited": 0, + "db_postgres_lock_mode_AccessShareLock_held": 99, + "db_postgres_lock_mode_ExclusiveLock_awaited": 0, + "db_postgres_lock_mode_ExclusiveLock_held": 0, + "db_postgres_lock_mode_RowExclusiveLock_awaited": 0, + "db_postgres_lock_mode_RowExclusiveLock_held": 99, + "db_postgres_lock_mode_RowShareLock_awaited": 0, + "db_postgres_lock_mode_RowShareLock_held": 99, + "db_postgres_lock_mode_ShareLock_awaited": 0, + "db_postgres_lock_mode_ShareLock_held": 0, + "db_postgres_lock_mode_ShareRowExclusiveLock_awaited": 0, + "db_postgres_lock_mode_ShareRowExclusiveLock_held": 0, + "db_postgres_lock_mode_ShareUpdateExclusiveLock_awaited": 0, + "db_postgres_lock_mode_ShareUpdateExclusiveLock_held": 0, + "db_postgres_numbackends": 3, + "db_postgres_numbackends_utilization": 10, + "db_postgres_size": 8758051, + "db_postgres_temp_bytes": 0, + "db_postgres_temp_files": 0, + "db_postgres_tup_deleted": 0, + "db_postgres_tup_fetched": 359833, + "db_postgres_tup_fetched_perc": 2, + "db_postgres_tup_inserted": 0, + "db_postgres_tup_returned": 13207245, + "db_postgres_tup_updated": 0, + "db_postgres_xact_commit": 1438660, + "db_postgres_xact_rollback": 70, + "db_production_blks_hit": 0, + "db_production_blks_read": 0, + "db_production_blks_read_perc": 0, + "db_production_confl_bufferpin": 0, + "db_production_confl_deadlock": 0, + "db_production_confl_lock": 0, + "db_production_confl_snapshot": 0, + "db_production_confl_tablespace": 0, + "db_production_conflicts": 0, + "db_production_deadlocks": 0, + "db_production_lock_mode_AccessExclusiveLock_awaited": 0, + "db_production_lock_mode_AccessExclusiveLock_held": 0, + "db_production_lock_mode_AccessShareLock_awaited": 0, + "db_production_lock_mode_AccessShareLock_held": 0, + "db_production_lock_mode_ExclusiveLock_awaited": 0, + "db_production_lock_mode_ExclusiveLock_held": 0, + "db_production_lock_mode_RowExclusiveLock_awaited": 0, + "db_production_lock_mode_RowExclusiveLock_held": 0, + "db_production_lock_mode_RowShareLock_awaited": 0, + "db_production_lock_mode_RowShareLock_held": 0, + "db_production_lock_mode_ShareLock_awaited": 99, + "db_production_lock_mode_ShareLock_held": 0, + "db_production_lock_mode_ShareRowExclusiveLock_awaited": 0, + "db_production_lock_mode_ShareRowExclusiveLock_held": 0, + "db_production_lock_mode_ShareUpdateExclusiveLock_awaited": 0, + "db_production_lock_mode_ShareUpdateExclusiveLock_held": 99, + "db_production_numbackends": 1, + "db_production_numbackends_utilization": 1, + "db_production_size": 8602115, + "db_production_temp_bytes": 0, + "db_production_temp_files": 0, + "db_production_tup_deleted": 0, + "db_production_tup_fetched": 0, + "db_production_tup_fetched_perc": 0, + "db_production_tup_inserted": 0, + "db_production_tup_returned": 0, + "db_production_tup_updated": 0, + "db_production_xact_commit": 0, + "db_production_xact_rollback": 0, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_size": 8192, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_size": 8192, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1, + "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_size": 8192, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_size": 8192, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1, + "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_used": 0, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_size": 8192, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_size": 8192, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1, + "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0, + "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size": 0, + "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 0, + "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_size": 112336896, + "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_unused": 0, + "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_used": 1, + "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_size": 16384, + "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_unused": 1, + "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_used": 0, + "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_size": 32768, + "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_unused": 1, + "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_used": 0, + "locks_utilization": 6, + "maxwritten_clean": 0, + "oldest_current_xid": 9, + "percent_towards_emergency_autovacuum": 0, + "percent_towards_wraparound": 0, + "query_running_time_hist_bucket_1": 1, + "query_running_time_hist_bucket_2": 0, + "query_running_time_hist_bucket_3": 0, + "query_running_time_hist_bucket_4": 0, + "query_running_time_hist_bucket_5": 0, + "query_running_time_hist_bucket_6": 0, + "query_running_time_hist_bucket_inf": 0, + "query_running_time_hist_count": 1, + "query_running_time_hist_sum": 0, + "repl_slot_ocean_replslot_files": 0, + "repl_slot_ocean_replslot_wal_keep": 0, + "repl_standby_app_phys-standby2_wal_flush_lag_size": 0, + "repl_standby_app_phys-standby2_wal_flush_lag_time": 0, + "repl_standby_app_phys-standby2_wal_replay_lag_size": 0, + "repl_standby_app_phys-standby2_wal_replay_lag_time": 0, + "repl_standby_app_phys-standby2_wal_sent_lag_size": 0, + "repl_standby_app_phys-standby2_wal_write_lag_size": 0, + "repl_standby_app_phys-standby2_wal_write_time": 0, + "repl_standby_app_walreceiver_wal_flush_lag_size": 2, + "repl_standby_app_walreceiver_wal_flush_lag_time": 2, + "repl_standby_app_walreceiver_wal_replay_lag_size": 2, + "repl_standby_app_walreceiver_wal_replay_lag_time": 2, + "repl_standby_app_walreceiver_wal_sent_lag_size": 2, + "repl_standby_app_walreceiver_wal_write_lag_size": 2, + "repl_standby_app_walreceiver_wal_write_time": 2, + "server_connections_available": 97, + "server_connections_state_active": 1, + "server_connections_state_disabled": 1, + "server_connections_state_fastpath_function_call": 1, + "server_connections_state_idle": 14, + "server_connections_state_idle_in_transaction": 7, + "server_connections_state_idle_in_transaction_aborted": 1, + "server_connections_used": 3, + "server_connections_utilization": 3, + "server_uptime": 499906, + "table_pgbench_accounts_db_postgres_schema_public_bloat_size": 9863168, + "table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 1, + "table_pgbench_accounts_db_postgres_schema_public_heap_blks_hit": 224484753408, + "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read": 1803882668032, + "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read_perc": 88, + "table_pgbench_accounts_db_postgres_schema_public_idx_blks_hit": 7138635948032, + "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read": 973310976000, + "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read_perc": 11, + "table_pgbench_accounts_db_postgres_schema_public_idx_scan": 99955, + "table_pgbench_accounts_db_postgres_schema_public_idx_tup_fetch": 99955, + "table_pgbench_accounts_db_postgres_schema_public_last_analyze_ago": 377149, + "table_pgbench_accounts_db_postgres_schema_public_last_vacuum_ago": 377149, + "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup": 1000048, + "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup_perc": 16, + "table_pgbench_accounts_db_postgres_schema_public_n_live_tup": 5000048, + "table_pgbench_accounts_db_postgres_schema_public_n_tup_del": 0, + "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd": 0, + "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd_perc": 0, + "table_pgbench_accounts_db_postgres_schema_public_n_tup_ins": 5000000, + "table_pgbench_accounts_db_postgres_schema_public_n_tup_upd": 0, + "table_pgbench_accounts_db_postgres_schema_public_seq_scan": 2, + "table_pgbench_accounts_db_postgres_schema_public_seq_tup_read": 5000000, + "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_hit": -1, + "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read": -1, + "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read_perc": 50, + "table_pgbench_accounts_db_postgres_schema_public_toast_blks_hit": -1, + "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read": -1, + "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read_perc": 50, + "table_pgbench_accounts_db_postgres_schema_public_total_size": 784031744, + "table_pgbench_branches_db_postgres_schema_public_heap_blks_hit": 304316416, + "table_pgbench_branches_db_postgres_schema_public_heap_blks_read": 507150336, + "table_pgbench_branches_db_postgres_schema_public_heap_blks_read_perc": 62, + "table_pgbench_branches_db_postgres_schema_public_idx_blks_hit": 101441536, + "table_pgbench_branches_db_postgres_schema_public_idx_blks_read": 101425152, + "table_pgbench_branches_db_postgres_schema_public_idx_blks_read_perc": 49, + "table_pgbench_branches_db_postgres_schema_public_idx_scan": 0, + "table_pgbench_branches_db_postgres_schema_public_idx_tup_fetch": 0, + "table_pgbench_branches_db_postgres_schema_public_last_analyze_ago": 377149, + "table_pgbench_branches_db_postgres_schema_public_last_vacuum_ago": 371719, + "table_pgbench_branches_db_postgres_schema_public_n_dead_tup": 0, + "table_pgbench_branches_db_postgres_schema_public_n_dead_tup_perc": 0, + "table_pgbench_branches_db_postgres_schema_public_n_live_tup": 50, + "table_pgbench_branches_db_postgres_schema_public_n_tup_del": 0, + "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd": 0, + "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd_perc": 0, + "table_pgbench_branches_db_postgres_schema_public_n_tup_ins": 50, + "table_pgbench_branches_db_postgres_schema_public_n_tup_upd": 0, + "table_pgbench_branches_db_postgres_schema_public_seq_scan": 6, + "table_pgbench_branches_db_postgres_schema_public_seq_tup_read": 300, + "table_pgbench_branches_db_postgres_schema_public_tidx_blks_hit": -1, + "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read": -1, + "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read_perc": 50, + "table_pgbench_branches_db_postgres_schema_public_toast_blks_hit": -1, + "table_pgbench_branches_db_postgres_schema_public_toast_blks_read": -1, + "table_pgbench_branches_db_postgres_schema_public_toast_blks_read_perc": 50, + "table_pgbench_branches_db_postgres_schema_public_total_size": 57344, + "table_pgbench_history_db_postgres_schema_public_heap_blks_hit": 0, + "table_pgbench_history_db_postgres_schema_public_heap_blks_read": 0, + "table_pgbench_history_db_postgres_schema_public_heap_blks_read_perc": 0, + "table_pgbench_history_db_postgres_schema_public_idx_blks_hit": -1, + "table_pgbench_history_db_postgres_schema_public_idx_blks_read": -1, + "table_pgbench_history_db_postgres_schema_public_idx_blks_read_perc": 50, + "table_pgbench_history_db_postgres_schema_public_idx_scan": 0, + "table_pgbench_history_db_postgres_schema_public_idx_tup_fetch": 0, + "table_pgbench_history_db_postgres_schema_public_last_analyze_ago": 377149, + "table_pgbench_history_db_postgres_schema_public_last_vacuum_ago": 377149, + "table_pgbench_history_db_postgres_schema_public_n_dead_tup": 0, + "table_pgbench_history_db_postgres_schema_public_n_dead_tup_perc": 0, + "table_pgbench_history_db_postgres_schema_public_n_live_tup": 0, + "table_pgbench_history_db_postgres_schema_public_n_tup_del": 0, + "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd": 0, + "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd_perc": 0, + "table_pgbench_history_db_postgres_schema_public_n_tup_ins": 0, + "table_pgbench_history_db_postgres_schema_public_n_tup_upd": 0, + "table_pgbench_history_db_postgres_schema_public_seq_scan": 0, + "table_pgbench_history_db_postgres_schema_public_seq_tup_read": 0, + "table_pgbench_history_db_postgres_schema_public_tidx_blks_hit": -1, + "table_pgbench_history_db_postgres_schema_public_tidx_blks_read": -1, + "table_pgbench_history_db_postgres_schema_public_tidx_blks_read_perc": 50, + "table_pgbench_history_db_postgres_schema_public_toast_blks_hit": -1, + "table_pgbench_history_db_postgres_schema_public_toast_blks_read": -1, + "table_pgbench_history_db_postgres_schema_public_toast_blks_read_perc": 50, + "table_pgbench_history_db_postgres_schema_public_total_size": 0, + "table_pgbench_tellers_db_postgres_schema_public_heap_blks_hit": 491937792, + "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read": 623828992, + "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read_perc": 55, + "table_pgbench_tellers_db_postgres_schema_public_idx_blks_hit": 0, + "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read": 101433344, + "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read_perc": 100, + "table_pgbench_tellers_db_postgres_schema_public_idx_scan": 0, + "table_pgbench_tellers_db_postgres_schema_public_idx_tup_fetch": 0, + "table_pgbench_tellers_db_postgres_schema_public_last_analyze_ago": 377149, + "table_pgbench_tellers_db_postgres_schema_public_last_vacuum_ago": 371719, + "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup": 0, + "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup_perc": 0, + "table_pgbench_tellers_db_postgres_schema_public_n_live_tup": 500, + "table_pgbench_tellers_db_postgres_schema_public_n_tup_del": 0, + "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd": 0, + "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd_perc": 0, + "table_pgbench_tellers_db_postgres_schema_public_n_tup_ins": 500, + "table_pgbench_tellers_db_postgres_schema_public_n_tup_upd": 0, + "table_pgbench_tellers_db_postgres_schema_public_null_columns": 1, + "table_pgbench_tellers_db_postgres_schema_public_seq_scan": 1, + "table_pgbench_tellers_db_postgres_schema_public_seq_tup_read": 500, + "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_hit": -1, + "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read": -1, + "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read_perc": 50, + "table_pgbench_tellers_db_postgres_schema_public_toast_blks_hit": -1, + "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read": -1, + "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read_perc": 50, + "table_pgbench_tellers_db_postgres_schema_public_total_size": 90112, + "transaction_running_time_hist_bucket_1": 1, + "transaction_running_time_hist_bucket_2": 0, + "transaction_running_time_hist_bucket_3": 0, + "transaction_running_time_hist_bucket_4": 0, + "transaction_running_time_hist_bucket_5": 0, + "transaction_running_time_hist_bucket_6": 0, + "transaction_running_time_hist_bucket_inf": 7, + "transaction_running_time_hist_count": 8, + "transaction_running_time_hist_sum": 4022, + "wal_archive_files_done_count": 1, + "wal_archive_files_ready_count": 1, + "wal_recycled_files": 0, + "wal_writes": 24103144, + "wal_written_files": 1, + } + + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying the database version returns an error": { + { + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpectErr(m, queryServerVersion()) + }, + check: func(t *testing.T, pg *Postgres) { + mx := pg.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying settings max connections returns an error": { + { + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpectErr(m, querySettingsMaxConnections()) + }, + check: func(t *testing.T, pg *Postgres) { + mx := pg.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + "Fail when querying the server connections returns an error": { + { + prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) { + mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum) + mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue) + mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue) + + mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections) + mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld) + + mockExpectErr(m, queryServerCurrentConnectionsUsed()) + }, + check: func(t *testing.T, pg *Postgres) { + mx := pg.Collect() + var expected map[string]int64 + assert.Equal(t, expected, mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + pg := New() + pg.db = db + defer func() { _ = db.Close() }() + + require.True(t, pg.Init()) + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepareMock(t, pg, mock) + step.check(t, pg) + }) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) { + mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed() +} + +func mockExpectErr(mock sqlmock.Sqlmock, query string) { + mock.ExpectQuery(query).WillReturnError(errors.New("mock error")) +} + +func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows { + rows, err := prepareMockRows(data) + require.NoError(t, err) + return rows +} + +func prepareMockRows(data []byte) (*sqlmock.Rows, error) { + r := bytes.NewReader(data) + sc := bufio.NewScanner(r) + + var numColumns int + var rows *sqlmock.Rows + + for sc.Scan() { + s := strings.TrimSpace(sc.Text()) + if s == "" || strings.HasPrefix(s, "---") { + continue + } + + parts := strings.Split(s, "|") + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + if rows == nil { + numColumns = len(parts) + rows = sqlmock.NewRows(parts) + continue + } + + if len(parts) != numColumns { + return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts)) + } + + values := make([]driver.Value, len(parts)) + for i, v := range parts { + values[i] = v + } + rows.AddRow(values...) + } + + if rows == nil { + return nil, errors.New("prepareMockRows(): nil rows result") + } + + return rows, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/queries.go b/src/go/collectors/go.d.plugin/modules/postgres/queries.go new file mode 100644 index 00000000000000..f6afc9342022d7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/queries.go @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package postgres + +func queryServerVersion() string { + return "SHOW server_version_num;" +} + +func queryIsSuperUser() string { + return "SELECT current_setting('is_superuser') = 'on' AS is_superuser;" +} + +func queryPGIsInRecovery() string { + return "SELECT pg_is_in_recovery();" +} + +func querySettingsMaxConnections() string { + return "SELECT current_setting('max_connections')::INT - current_setting('superuser_reserved_connections')::INT;" +} + +func querySettingsMaxLocksHeld() string { + return ` +SELECT current_setting('max_locks_per_transaction')::INT * + (current_setting('max_connections')::INT + current_setting('max_prepared_transactions')::INT); +` +} + +// TODO: this is not correct and we should use pg_stat_activity. +// But we need to check what connections (backend_type) count towards 'max_connections'. +// I think python version query doesn't count it correctly. +// https://github.com/netdata/netdata/blob/1782e2d002bc5203128e5a5d2b801010e2822d2d/collectors/python.d.plugin/postgres/postgres.chart.py#L266 +func queryServerCurrentConnectionsUsed() string { + return "SELECT sum(numbackends) FROM pg_stat_database;" +} + +func queryServerConnectionsState() string { + return ` +SELECT state, + COUNT(*) +FROM pg_stat_activity +WHERE state IN + ( + 'active', + 'idle', + 'idle in transaction', + 'idle in transaction (aborted)', + 'fastpath function call', + 'disabled' + ) +GROUP BY state; +` +} + +func queryCheckpoints() string { + // definition by version: https://pgpedia.info/p/pg_stat_bgwriter.html + // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-BGWRITER-VIEW + // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1104 + + return ` +SELECT checkpoints_timed, + checkpoints_req, + checkpoint_write_time, + checkpoint_sync_time, + buffers_checkpoint * current_setting('block_size')::numeric AS buffers_checkpoint_bytes, + buffers_clean * current_setting('block_size')::numeric AS buffers_clean_bytes, + maxwritten_clean, + buffers_backend * current_setting('block_size')::numeric AS buffers_backend_bytes, + buffers_backend_fsync, + buffers_alloc * current_setting('block_size')::numeric AS buffers_alloc_bytes +FROM pg_stat_bgwriter; +` +} + +func queryServerUptime() string { + return `SELECT EXTRACT(epoch FROM CURRENT_TIMESTAMP - pg_postmaster_start_time());` +} + +func queryTXIDWraparound() string { + // https://www.crunchydata.com/blog/managing-transaction-id-wraparound-in-postgresql + return ` + WITH max_age AS ( SELECT + 2000000000 as max_old_xid, + setting AS autovacuum_freeze_max_age + FROM + pg_catalog.pg_settings + WHERE + name = 'autovacuum_freeze_max_age'), per_database_stats AS ( SELECT + datname , + m.max_old_xid::int , + m.autovacuum_freeze_max_age::int , + age(d.datfrozenxid) AS oldest_current_xid + FROM + pg_catalog.pg_database d + JOIN + max_age m + ON (true) + WHERE + d.datallowconn) SELECT + max(oldest_current_xid) AS oldest_current_xid , + max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound , + max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_emergency_autovacuum + FROM + per_database_stats; +` +} + +func queryWALWrites(version int) string { + if version < pgVersion10 { + return ` +SELECT + pg_xlog_location_diff( + CASE + pg_is_in_recovery() + WHEN + TRUE + THEN + pg_last_xlog_receive_location() + ELSE + pg_current_xlog_location() + END +, '0/0') AS wal_writes ; +` + } + return ` +SELECT + pg_wal_lsn_diff( + CASE + pg_is_in_recovery() + WHEN + TRUE + THEN + pg_last_wal_receive_lsn() + ELSE + pg_current_wal_lsn() + END +, '0/0') AS wal_writes ; +` +} + +func queryWALFiles(version int) string { + if version < pgVersion10 { + return ` +SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files, + count(*) FILTER (WHERE type = 'written') AS wal_written_files +FROM (SELECT wal.name, + pg_xlogfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_xlog_location() + END), + CASE + WHEN wal.name > pg_xlogfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_xlog_location() + END) THEN 'recycled' + ELSE 'written' + END AS type + FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name) + WHERE name ~ '^[0-9A-F]{24}$' + ORDER BY (pg_stat_file('pg_xlog/' || name, true)).modification, + wal.name DESC) sub; +` + } + return ` +SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files, + count(*) FILTER (WHERE type = 'written') AS wal_written_files +FROM (SELECT wal.name, + pg_walfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_wal_lsn() + END), + CASE + WHEN wal.name > pg_walfile_name( + CASE pg_is_in_recovery() + WHEN true THEN NULL + ELSE pg_current_wal_lsn() + END) THEN 'recycled' + ELSE 'written' + END AS type + FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name) + WHERE name ~ '^[0-9A-F]{24}$' + ORDER BY (pg_stat_file('pg_wal/' || name, true)).modification, + wal.name DESC) sub; +` +} + +func queryWALArchiveFiles(version int) string { + if version < pgVersion10 { + return ` + SELECT + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), + 0) AS INT) AS wal_archive_files_ready_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), + 0) AS INT) AS wal_archive_files_done_count + FROM + pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file); +` + } + return ` + SELECT + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), + 0) AS INT) AS wal_archive_files_ready_count, + CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), + 0) AS INT) AS wal_archive_files_done_count + FROM + pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file); +` +} + +func queryCatalogRelations() string { + // kind of same as + // https://github.com/netdata/netdata/blob/750810e1798e09cc6210e83594eb9ed4905f8f12/collectors/python.d.plugin/postgres/postgres.chart.py#L336-L354 + // TODO: do we need that? It is optional and disabled by default in py version. + return ` +SELECT relkind, + COUNT(1), + SUM(relpages) * current_setting('block_size')::NUMERIC AS size +FROM pg_class +GROUP BY relkind; +` +} + +func queryAutovacuumWorkers() string { + // https://github.com/postgres/postgres/blob/9e4f914b5eba3f49ab99bdecdc4f96fac099571f/src/backend/postmaster/autovacuum.c#L3168-L3183 + return ` +SELECT count(*) FILTER ( + WHERE + query LIKE 'autovacuum: ANALYZE%%' + AND query NOT LIKE '%%to prevent wraparound%%' + ) AS autovacuum_analyze, + count(*) FILTER ( + WHERE + query LIKE 'autovacuum: VACUUM ANALYZE%%' + AND query NOT LIKE '%%to prevent wraparound%%' + ) AS autovacuum_vacuum_analyze, + count(*) FILTER ( + WHERE + query LIKE 'autovacuum: VACUUM %.%%' + AND query NOT LIKE '%%to prevent wraparound%%' + ) AS autovacuum_vacuum, + count(*) FILTER ( + WHERE + query LIKE '%%to prevent wraparound%%' + ) AS autovacuum_vacuum_freeze, + count(*) FILTER ( + WHERE + query LIKE 'autovacuum: BRIN summarize%%' + ) AS autovacuum_brin_summarize +FROM pg_stat_activity +WHERE query NOT LIKE '%%pg_stat_activity%%'; +` +} + +func queryXactQueryRunningTime() string { + return ` +SELECT datname, + state, + EXTRACT(epoch from now() - xact_start) as xact_running_time, + EXTRACT(epoch from now() - query_start) as query_running_time +FROM pg_stat_activity +WHERE datname IS NOT NULL + AND state IN + ( + 'active', + 'idle in transaction', + 'idle in transaction (aborted)' + ) + AND backend_type = 'client backend'; +` +} + +func queryReplicationStandbyAppDelta(version int) string { + if version < pgVersion10 { + return ` +SELECT application_name, + pg_xlog_location_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_xlog_receive_location() + ELSE pg_current_xlog_location() + END, + sent_location) AS sent_delta, + pg_xlog_location_diff( + sent_location, write_location) AS write_delta, + pg_xlog_location_diff( + write_location, flush_location) AS flush_delta, + pg_xlog_location_diff( + flush_location, replay_location) AS replay_delta +FROM pg_stat_replication psr +WHERE application_name IS NOT NULL; +` + } + return ` +SELECT application_name, + pg_wal_lsn_diff( + CASE pg_is_in_recovery() + WHEN true THEN pg_last_wal_receive_lsn() + ELSE pg_current_wal_lsn() + END, + sent_lsn) AS sent_delta, + pg_wal_lsn_diff( + sent_lsn, write_lsn) AS write_delta, + pg_wal_lsn_diff( + write_lsn, flush_lsn) AS flush_delta, + pg_wal_lsn_diff( + flush_lsn, replay_lsn) AS replay_delta +FROM pg_stat_replication +WHERE application_name IS NOT NULL; +` +} + +func queryReplicationStandbyAppLag() string { + return ` +SELECT application_name, + COALESCE(EXTRACT(EPOCH FROM write_lag)::bigint, 0) AS write_lag, + COALESCE(EXTRACT(EPOCH FROM flush_lag)::bigint, 0) AS flush_lag, + COALESCE(EXTRACT(EPOCH FROM replay_lag)::bigint, 0) AS replay_lag +FROM pg_stat_replication psr +WHERE application_name IS NOT NULL; +` +} + +func queryReplicationSlotFiles(version int) string { + if version < pgVersion11 { + return ` +WITH wal_size AS ( + SELECT + current_setting('wal_block_size')::INT * setting::INT AS val + FROM pg_settings + WHERE name = 'wal_segment_size' + ) +SELECT + slot_name, + slot_type, + replslot_wal_keep, + count(slot_file) AS replslot_files +FROM + (SELECT + slot.slot_name, + CASE + WHEN slot_file <> 'state' THEN 1 + END AS slot_file , + slot_type, + COALESCE ( + floor( + CASE WHEN pg_is_in_recovery() + THEN ( + pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val) + ) / s.val + ELSE ( + pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_walfile_name_offset(restart_lsn)).file_offset + ) / s.val + END + ),0) AS replslot_wal_keep + FROM pg_replication_slots slot + LEFT JOIN ( + SELECT + slot2.slot_name, + pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file + FROM pg_replication_slots slot2 + ) files (slot_name, slot_file) + ON slot.slot_name = files.slot_name + CROSS JOIN wal_size s + ) AS d +GROUP BY + slot_name, + slot_type, + replslot_wal_keep; +` + } + + return ` +WITH wal_size AS ( + SELECT + setting::int AS val + FROM pg_settings + WHERE name = 'wal_segment_size' + ) +SELECT + slot_name, + slot_type, + replslot_wal_keep, + count(slot_file) AS replslot_files +FROM + (SELECT + slot.slot_name, + CASE + WHEN slot_file <> 'state' THEN 1 + END AS slot_file , + slot_type, + COALESCE ( + floor( + CASE WHEN pg_is_in_recovery() + THEN ( + pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val) + ) / s.val + ELSE ( + pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn) + -- this is needed to account for whole WAL retention and + -- not only size retention + + (pg_walfile_name_offset(restart_lsn)).file_offset + ) / s.val + END + ),0) AS replslot_wal_keep + FROM pg_replication_slots slot + LEFT JOIN ( + SELECT + slot2.slot_name, + pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file + FROM pg_replication_slots slot2 + ) files (slot_name, slot_file) + ON slot.slot_name = files.slot_name + CROSS JOIN wal_size s + ) AS d +GROUP BY + slot_name, + slot_type, + replslot_wal_keep; +` +} + +func queryQueryableDatabaseList() string { + return ` +SELECT datname +FROM pg_database +WHERE datallowconn = true + AND datistemplate = false + AND datname != current_database() + AND has_database_privilege((SELECT CURRENT_USER), datname, 'connect'); +` +} + +func queryDatabaseStats() string { + // definition by version: https://pgpedia.info/p/pg_stat_database.html + // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-VIEW + // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1018 + + return ` +SELECT stat.datname, + numbackends, + pg_database.datconnlimit, + xact_commit, + xact_rollback, + blks_read * current_setting('block_size')::numeric AS blks_read_bytes, + blks_hit * current_setting('block_size')::numeric AS blks_hit_bytes, + tup_returned, + tup_fetched, + tup_inserted, + tup_updated, + tup_deleted, + conflicts, + temp_files, + temp_bytes, + deadlocks +FROM pg_stat_database stat + INNER JOIN + pg_database + ON pg_database.datname = stat.datname +WHERE pg_database.datistemplate = false; +` +} + +func queryDatabaseSize(version int) string { + if version < pgVersion10 { + return ` +SELECT datname, + pg_database_size(datname) AS size +FROM pg_database +WHERE pg_database.datistemplate = false + AND has_database_privilege((SELECT CURRENT_USER), pg_database.datname, 'connect'); +` + } + return ` +SELECT datname, + pg_database_size(datname) AS size +FROM pg_database +WHERE pg_database.datistemplate = false + AND (has_database_privilege((SELECT CURRENT_USER), datname, 'connect') + OR pg_has_role((SELECT CURRENT_USER), 'pg_read_all_stats', 'MEMBER')); +` +} + +func queryDatabaseConflicts() string { + // definition by version: https://pgpedia.info/p/pg_stat_database_conflicts.html + // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-CONFLICTS-VIEW + // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1058 + + return ` +SELECT stat.datname, + confl_tablespace, + confl_lock, + confl_snapshot, + confl_bufferpin, + confl_deadlock +FROM pg_stat_database_conflicts stat + INNER JOIN + pg_database + ON pg_database.datname = stat.datname +WHERE pg_database.datistemplate = false; +` +} + +func queryDatabaseLocks() string { + // definition by version: https://pgpedia.info/p/pg_locks.html + // docs: https://www.postgresql.org/docs/current/view-pg-locks.html + + return ` +SELECT pg_database.datname, + mode, + granted, + count(mode) AS locks_count +FROM pg_locks + INNER JOIN + pg_database + ON pg_database.oid = pg_locks.database +WHERE pg_database.datistemplate = false +GROUP BY datname, + mode, + granted +ORDER BY datname, + mode; +` +} + +func queryUserTablesCount() string { + return "SELECT count(*) from pg_stat_user_tables;" +} + +func queryStatUserTables() string { + return ` +SELECT current_database() as datname, + schemaname, + relname, + inh.parent_relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_live_tup, + n_dead_tup, + EXTRACT(epoch from now() - last_vacuum) as last_vacuum, + EXTRACT(epoch from now() - last_autovacuum) as last_autovacuum, + EXTRACT(epoch from now() - last_analyze) as last_analyze, + EXTRACT(epoch from now() - last_autoanalyze) as last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count, + pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as total_relation_size +FROM pg_stat_user_tables +LEFT JOIN( + SELECT + c.oid AS child_oid, + p.relname AS parent_relname + FROM + pg_inherits + JOIN pg_class AS c ON (inhrelid = c.oid) + JOIN pg_class AS p ON (inhparent = p.oid) + ) AS inh ON inh.child_oid = relid +WHERE has_schema_privilege(schemaname, 'USAGE'); +` +} + +func queryStatIOUserTables() string { + return ` +SELECT current_database() AS datname, + schemaname, + relname, + inh.parent_relname, + heap_blks_read * current_setting('block_size')::numeric AS heap_blks_read_bytes, + heap_blks_hit * current_setting('block_size')::numeric AS heap_blks_hit_bytes, + idx_blks_read * current_setting('block_size')::numeric AS idx_blks_read_bytes, + idx_blks_hit * current_setting('block_size')::numeric AS idx_blks_hit_bytes, + toast_blks_read * current_setting('block_size')::numeric AS toast_blks_read_bytes, + toast_blks_hit * current_setting('block_size')::numeric AS toast_blks_hit_bytes, + tidx_blks_read * current_setting('block_size')::numeric AS tidx_blks_read_bytes, + tidx_blks_hit * current_setting('block_size')::numeric AS tidx_blks_hit_bytes +FROM pg_statio_user_tables +LEFT JOIN( + SELECT + c.oid AS child_oid, + p.relname AS parent_relname + FROM + pg_inherits + JOIN pg_class AS c ON (inhrelid = c.oid) + JOIN pg_class AS p ON (inhparent = p.oid) + ) AS inh ON inh.child_oid = relid +WHERE has_schema_privilege(schemaname, 'USAGE'); +` +} + +func queryUserIndexesCount() string { + return "SELECT count(*) from pg_stat_user_indexes;" +} + +func queryStatUserIndexes() string { + return ` +SELECT current_database() as datname, + schemaname, + relname, + indexrelname, + inh.parent_relname, + idx_scan, + idx_tup_read, + idx_tup_fetch, + pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(indexrelname)::text) as size +FROM pg_stat_user_indexes +LEFT JOIN( + SELECT + c.oid AS child_oid, + p.relname AS parent_relname + FROM + pg_inherits + JOIN pg_class AS c ON (inhrelid = c.oid) + JOIN pg_class AS p ON (inhparent = p.oid) + ) AS inh ON inh.child_oid = relid +WHERE has_schema_privilege(schemaname, 'USAGE'); +` +} + +// The following query for bloat was taken from the venerable check_postgres +// script (https://bucardo.org/check_postgres/), which is: +// +// Copyright (c) 2007-2017 Greg Sabino Mullane +//------------------------------------------------------------------------------ + +func queryBloat() string { + return ` +SELECT + current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta, + ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat, + CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages, + CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes, + CASE WHEN relpages < otta THEN '0 bytes'::text ELSE (bs*(relpages-otta))::bigint::text || ' bytes' END AS wastedsize, + iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta, + ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat, + CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages, + CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes, + CASE WHEN ipages < iotta THEN '0 bytes' ELSE (bs*(ipages-iotta))::bigint::text || ' bytes' END AS wastedisize, + CASE WHEN relpages < otta THEN + CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END + ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint) + ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END + END AS totalwastedbytes +FROM ( + SELECT + nn.nspname AS schemaname, + cc.relname AS tablename, + COALESCE(cc.reltuples,0) AS reltuples, + COALESCE(cc.relpages,0) AS relpages, + COALESCE(bs,0) AS bs, + COALESCE(CEIL((cc.reltuples*((datahdr+ma- + (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta, + COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, + COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols + FROM + pg_class cc + JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema' + LEFT JOIN + ( + SELECT + ma,bs,foo.nspname,foo.relname, + (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, + (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 + FROM ( + SELECT + ns.nspname, tbl.relname, hdr, ma, bs, + SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth, + MAX(coalesce(null_frac,0)) AS maxfracsum, + hdr+( + SELECT 1+count(*)/8 + FROM pg_stats s2 + WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname + ) AS nullhdr + FROM pg_attribute att + JOIN pg_class tbl ON att.attrelid = tbl.oid + JOIN pg_namespace ns ON ns.oid = tbl.relnamespace + LEFT JOIN pg_stats s ON s.schemaname=ns.nspname + AND s.tablename = tbl.relname + AND s.inherited=false + AND s.attname=att.attname, + ( + SELECT + (SELECT current_setting('block_size')::numeric) AS bs, + CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#"[0-9]+.[0-9]+#"%' for '#') + IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, + CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma + FROM (SELECT version() AS v) AS foo + ) AS constants + WHERE att.attnum > 0 AND tbl.relkind='r' + GROUP BY 1,2,3,4,5 + ) AS foo + ) AS rs + ON cc.relname = rs.relname AND nn.nspname = rs.nspname + LEFT JOIN pg_index i ON indrelid = cc.oid + LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid +) AS sml +WHERE sml.relpages - otta > 10 OR ipages - iotta > 10; +` +} + +func queryColumnsStats() string { + return ` +SELECT current_database() AS datname, + nspname AS schemaname, + relname, + st.attname, + typname, + (st.null_frac * 100)::int AS null_percent, + case + when st.n_distinct >= 0 + then st.n_distinct + else + abs(st.n_distinct) * reltuples + end AS "distinct" +FROM pg_class c + JOIN + pg_namespace ns + ON + (ns.oid = relnamespace) + JOIN + pg_attribute at + ON + (c.oid = attrelid) + JOIN + pg_type t + ON + (t.oid = atttypid) + JOIN + pg_stats st + ON + (st.tablename = relname AND st.attname = at.attname) +WHERE relkind = 'r' + AND nspname NOT LIKE E'pg\\_%' + AND nspname != 'information_schema' + AND NOT attisdropped + AND attstattarget != 0 + AND reltuples >= 100 +ORDER BY nspname, + relname, + st.attname; +` +} diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt new file mode 100644 index 00000000000000..7adc787bcf5d6d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt @@ -0,0 +1,3 @@ + autovacuum_analyze | autovacuum_vacuum_analyze | autovacuum_vacuum | autovacuum_vacuum_freeze | autovacuum_brin_summarize +--------------------+---------------------------+-------------------+--------------------------+--------------------------- + 0 | 0 | 0 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt new file mode 100644 index 00000000000000..307695363f70d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt @@ -0,0 +1,12 @@ + db | schemaname | tablename | tups | pages | otta | tbloat | wastedpages | wastedbytes | wastedsize | iname | itups | ipages | iotta | ibloat | wastedipages | wastedibytes | wastedisize | totalwastedbytes +----------+------------+---------------------------------+---------+-------+-------+--------+-------------+-------------+---------------+---------------------------+---------+--------+-------+--------+--------------+--------------+--------------+------------------ + postgres | pg_catalog | pg_proc_oid_index | 3202 | 11 | 0 | 0.0 | 11 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_proc_proname_args_nsp_index | 3202 | 32 | 0 | 0.0 | 32 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_attribute_relid_attnam_index | 2971 | 15 | 0 | 0.0 | 15 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_description_o_c_o_index | 5078 | 27 | 0 | 0.0 | 27 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_depend_depender_index | 8814 | 43 | 0 | 0.0 | 43 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_depend_reference_index | 8814 | 53 | 0 | 0.0 | 53 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 + postgres | pg_catalog | pg_depend | 8814 | 65 | 65 | 0.0 | 0 | 0 | 0 bytes | pg_depend_reference_index | 8814 | 53 | 40 | 1.3 | 13 | 106496 | 106496 bytes | 106496 + postgres | pg_toast | pg_toast_2618 | 283 | 63 | 0 | 0.0 | 63 | 0 | 0 bytes | pg_toast_2618_index | 0 | 1 | 0 | 0.0 | 1 | 0 | 0 bytes | 0 + postgres | public | pgbench_accounts | 5000000 | 81968 | 80764 | 1.0 | 1204 | 9863168 | 9863168 bytes | pgbench_accounts_pkey | 5000000 | 13713 | 66692 | 0.2 | 0 | 0 | 0 bytes | 9863168 + postgres | public | pgbench_accounts_pkey | 5000000 | 13713 | 0 | 0.0 | 13713 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt new file mode 100644 index 00000000000000..cd05e89afefb7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt @@ -0,0 +1,6 @@ + relkind | count | size +---------+-------+--------- + r | 66 | 3424256 + v | 137 | 0 + i | 155 | 3678208 + t | 38 | 548864 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt new file mode 100644 index 00000000000000..851ff13202df88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt @@ -0,0 +1,3 @@ + checkpoints_timed | checkpoints_req | checkpoint_write_time | checkpoint_sync_time | buffers_checkpoint_bytes | buffers_clean_bytes | maxwritten_clean | buffers_backend_bytes | buffers_backend_fsync | buffers_alloc_bytes +-------------------+-----------------+-----------------------+----------------------+--------------------------+---------------------+------------------+-----------------------+-----------------------+-------------------- + 1814 | 16 | 167 | 47 | 32768 | 0 | 0 | 0 | 0 | 27295744 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt new file mode 100644 index 00000000000000..34229182aa4ce1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt @@ -0,0 +1,4 @@ + datname | confl_tablespace | confl_lock | confl_snapshot | confl_bufferpin | confl_deadlock +------------+------------------+------------+----------------+-----------------+---------------- + postgres | 0 | 0 | 0 | 0 | 0 + production | 0 | 0 | 0 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt new file mode 100644 index 00000000000000..8d92f314d57ba0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt @@ -0,0 +1,7 @@ + datname | mode | granted | locks_count +------------+--------------------------+---------+------------- + postgres | AccessShareLock | t | 99 + postgres | RowShareLock | t | 99 + postgres | RowExclusiveLock | t | 99 + production | ShareUpdateExclusiveLock | t | 99 + production | ShareLock | f | 99 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt new file mode 100644 index 00000000000000..367cb6f20a21fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt @@ -0,0 +1,4 @@ + datname | size +------------+-------- + postgres | 8758051 + production | 8602115 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt new file mode 100644 index 00000000000000..d3ce24c6e45341 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt @@ -0,0 +1,4 @@ +datname | numbackends | datconnlimit | xact_commit | xact_rollback | blks_read_bytes | blks_hit_bytes | tup_returned | tup_fetched | tup_inserted | tup_updated | tup_deleted | conflicts | temp_files | temp_bytes | deadlocks +------------+-------------+--------------+-------------+---------------+-----------------+----------------+--------------+-------------+--------------+-------------+-------------+-----------+------------+------------+----------- +postgres | 3 | 30 | 1438660 | 70 | 3252 | 1221125 | 13207245 | 359833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 +production | 1 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt new file mode 100644 index 00000000000000..6cb2222d3c8379 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt @@ -0,0 +1,3 @@ + is_superuser +-------------- + f \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt new file mode 100644 index 00000000000000..84cd8088eba74b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt @@ -0,0 +1,3 @@ + is_superuser +-------------- + t \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt new file mode 100644 index 00000000000000..b684948e3562ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt @@ -0,0 +1,3 @@ + pg_is_in_recovery +------------------- + t \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt new file mode 100644 index 00000000000000..b3f2af4f186885 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt @@ -0,0 +1,2 @@ + datname +--------- \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt new file mode 100644 index 00000000000000..59fcd8fe406fe8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt @@ -0,0 +1,3 @@ + slot_name | slot_type | replslot_wal_keep | replslot_files +-----------+-----------+-------------------+---------------- + ocean | physical | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt new file mode 100644 index 00000000000000..98c3cd99e091de --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt @@ -0,0 +1,5 @@ + application_name | sent_delta | write_delta | flush_delta | replay_delta +------------------+------------+-------------+-------------+-------------- + walreceiver | 1 | 1 | 1 | 1 + walreceiver | 1 | 1 | 1 | 1 + phys-standby2 | 0 | 0 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt new file mode 100644 index 00000000000000..c2e253790a0b1a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt @@ -0,0 +1,5 @@ + application_name | write_lag | flush_lag | replay_lag +------------------+-----------+-----------+------------ + walreceiver | 1 | 1 | 1 + walreceiver | 1 | 1 | 1 + phys-standby2 | 0 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt new file mode 100644 index 00000000000000..7387f4dfb0e020 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt @@ -0,0 +1,8 @@ + state | count +-------------------------------+------- + active | 1 + idle | 14 + idle in transaction | 7 + idle in transaction (aborted) | 1 + fastpath function call | 1 + disabled | 1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt new file mode 100644 index 00000000000000..065188d97e93ee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt @@ -0,0 +1,3 @@ + sum +----- + 3 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt new file mode 100644 index 00000000000000..18d769b32b1c91 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt @@ -0,0 +1,3 @@ + server_version_num +-------------------- + 140004 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt new file mode 100644 index 00000000000000..4d59df2145df35 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt @@ -0,0 +1,3 @@ + current_setting +----------------- + 100 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt new file mode 100644 index 00000000000000..e72bd71aacb2b6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt @@ -0,0 +1,3 @@ + ?column? +---------- + 6400 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt new file mode 100644 index 00000000000000..db73fa4e6ce26a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt @@ -0,0 +1,11 @@ + datname | schemaname | relname | indexrelname | idx_scan | idx_tup_read | idx_tup_fetch | size +----------+------------+------------------+-------------------------+----------+--------------+---------------+----------- + postgres | public | pgbench_branches | pgbench_branches_pkey | 0 | 0 | 0 | 16384 + postgres | public | pgbench_tellers | pgbench_tellers_pkey | 0 | 0 | 0 | 32768 + postgres | public | pgbench_accounts | pgbench_accounts_pkey | 3 | 5000000 | 0 | 112336896 + postgres | public | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192 + postgres | public | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192 + postgres | public | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192 + postgres | myschema | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192 + postgres | myschema | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192 + postgres | myschema | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt new file mode 100644 index 00000000000000..f6f9edb046d737 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt @@ -0,0 +1,6 @@ + datname | schemaname | relname | seq_scan | seq_tup_read | idx_scan | idx_tup_fetch | n_tup_ins | n_tup_upd | n_tup_del | n_tup_hot_upd | n_live_tup | n_dead_tup | last_vacuum | last_autovacuum | last_analyze | last_autoanalyze | vacuum_count | autovacuum_count | analyze_count | autoanalyze_count | total_relation_size +----------+------------+------------------+----------+--------------+----------+---------------+-----------+-----------+-----------+---------------+------------+------------+---------------+-----------------+---------------+------------------+--------------+------------------+---------------+-------------------+--------------------- + postgres | public | pgbench_history | 0 | 0 | | | 0 | 0 | 0 | 0 | 0 | 0 | 377149.085671 | | 377149.085536 | | 1 | 0 | 1 | 0 | 0 + postgres | public | pgbench_accounts | 2 | 5000000 | 99955 | 99955 | 5000000 | 0 | 0 | 0 | 5000048 | 1000048 | 377149.232856 | | 377149.097205 | | 1 | 0 | 1 | 0 | 784031744 + postgres | public | pgbench_tellers | 1 | 500 | 0 | 0 | 500 | 0 | 0 | 0 | 500 | 0 | 371719.262166 | | 377149.824095 | | 6 | 0 | 1 | 0 | 90112 + postgres | public | pgbench_branches | 6 | 300 | 0 | 0 | 50 | 0 | 0 | 0 | 50 | 0 | 371719.262495 | | 377149.826260 | | 6 | 0 | 1 | 0 | 57344 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt new file mode 100644 index 00000000000000..f52b1806b6f403 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt @@ -0,0 +1,6 @@ + datname | schemaname | relname | heap_blks_read_bytes | heap_blks_hit_bytes | idx_blks_read_bytes | idx_blks_hit_bytes | toast_blks_read_bytes | toast_blks_hit_bytes | tidx_blks_read_bytes | tidx_blks_hit_bytes +----------+------------+------------------+----------------------+---------------------+---------------------+--------------------+-----------------------+----------------------+----------------------+--------------------- + postgres | public | pgbench_tellers | 623828992 | 491937792 | 101433344 | 0 | | | | + postgres | public | pgbench_history | 0 | 0 | | | | | | + postgres | public | pgbench_accounts | 1803882668032 | 224484753408 | 973310976000 | 7138635948032 | | | | + postgres | public | pgbench_branches | 507150336 | 304316416 | 101425152 | 101441536 | | | | \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt new file mode 100644 index 00000000000000..645d847d0c52bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt @@ -0,0 +1,10 @@ + datname | schemaname | relname | attname | typname | null_percent | distinct +----------+------------+------------------+----------+---------+--------------+---------- + postgres | public | pgbench_accounts | abalance | int4 | 0 | 1 + postgres | public | pgbench_accounts | aid | int4 | 0 | 5e+06 + postgres | public | pgbench_accounts | bid | int4 | 0 | 50 + postgres | public | pgbench_accounts | filler | bpchar | 0 | 1 + postgres | public | pgbench_tellers | bid | int4 | 0 | 50 + postgres | public | pgbench_tellers | filler | bpchar | 100 | 0 + postgres | public | pgbench_tellers | tbalance | int4 | 0 | 1 + postgres | public | pgbench_tellers | tid | int4 | 0 | 500 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt new file mode 100644 index 00000000000000..9e05f12ab54dfe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt @@ -0,0 +1,3 @@ + oldest_current_xid | percent_towards_wraparound | percent_towards_emergency_autovacuum +--------------------+----------------------------+----------------------------------- + 9 | 0 | 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt new file mode 100644 index 00000000000000..95464bc3cec394 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt @@ -0,0 +1,3 @@ + extract +--------------- + 499906.075943 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt new file mode 100644 index 00000000000000..8b7a86261b52c5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt @@ -0,0 +1,3 @@ + wal_archive_files_ready_count | wal_archive_files_done_count +-------------------------------+------------------------------ + 1 | 1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt new file mode 100644 index 00000000000000..f18aefdcde0719 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt @@ -0,0 +1,3 @@ + wal_recycled_files | wal_written_files +--------------------+------------------- + 0 | 1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt new file mode 100644 index 00000000000000..3bb8f9e956a88c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt @@ -0,0 +1,3 @@ + wal_writes +------------ + 24103144 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt new file mode 100644 index 00000000000000..52617f748f83ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt @@ -0,0 +1,10 @@ + datname | state | xact_running_time | query_running_time +----------+---------------------+-------------------+-------------------- + some_db | idle in transaction | 574.530219 | 574.315061 + some_db | idle in transaction | 574.867167 | 574.330322 + postgres | active | 0.000000 | 0.000000 + some_db | idle in transaction | 574.807256 | 574.377105 + some_db | idle in transaction | 574.680244 | 574.357246 + some_db | idle in transaction | 574.800283 | 574.330328 + some_db | idle in transaction | 574.396730 | 574.290165 + some_db | idle in transaction | 574.665428 | 574.337164 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/README.md b/src/go/collectors/go.d.plugin/modules/powerdns/README.md new file mode 120000 index 00000000000000..3e5989715edafc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/README.md @@ -0,0 +1 @@ +integrations/powerdns_authoritative_server.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens.go b/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens.go new file mode 100644 index 00000000000000..07b7fdbcfea3ac --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("powerdns", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *AuthoritativeNS { + return &AuthoritativeNS{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8081", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type AuthoritativeNS struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (ns *AuthoritativeNS) Init() bool { + err := ns.validateConfig() + if err != nil { + ns.Errorf("config validation: %v", err) + return false + } + + client, err := ns.initHTTPClient() + if err != nil { + ns.Errorf("init HTTP client: %v", err) + return false + } + ns.httpClient = client + + cs, err := ns.initCharts() + if err != nil { + ns.Errorf("init charts: %v", err) + return false + } + ns.charts = cs + + return true +} + +func (ns *AuthoritativeNS) Check() bool { + return len(ns.Collect()) > 0 +} + +func (ns *AuthoritativeNS) Charts() *module.Charts { + return ns.charts +} + +func (ns *AuthoritativeNS) Collect() map[string]int64 { + ms, err := ns.collect() + if err != nil { + ns.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (ns *AuthoritativeNS) Cleanup() { + if ns.httpClient == nil { + return + } + ns.httpClient.CloseIdleConnections() +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens_test.go b/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens_test.go new file mode 100644 index 00000000000000..71e5c6dc4d8a49 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/authoritativens_test.go @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v430statistics, _ = os.ReadFile("testdata/v4.3.0/statistics.json") + recursorStatistics, _ = os.ReadFile("testdata/recursor/statistics.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v430statistics": v430statistics, + "recursorStatistics": recursorStatistics, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*AuthoritativeNS)(nil), New()) +} + +func TestRecursor_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset URL": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:38001", + }, + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ns := New() + ns.Config = test.config + + if test.wantFail { + assert.False(t, ns.Init()) + } else { + assert.True(t, ns.Init()) + } + }) + } +} + +func TestRecursor_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (p *AuthoritativeNS, cleanup func()) + wantFail bool + }{ + "success on valid response v4.3.0": { + prepare: preparePowerDNSAuthoritativeNSV430, + }, + "fails on response from PowerDNS Recursor": { + wantFail: true, + prepare: preparePowerDNSAuthoritativeNSRecursorData, + }, + "fails on 404 response": { + wantFail: true, + prepare: preparePowerDNSAuthoritativeNS404, + }, + "fails on connection refused": { + wantFail: true, + prepare: preparePowerDNSAuthoritativeNSConnectionRefused, + }, + "fails on response with invalid data": { + wantFail: true, + prepare: preparePowerDNSAuthoritativeNSInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + recursor, cleanup := test.prepare() + defer cleanup() + require.True(t, recursor.Init()) + + if test.wantFail { + assert.False(t, recursor.Check()) + } else { + assert.True(t, recursor.Check()) + } + }) + } +} + +func TestRecursor_Charts(t *testing.T) { + recursor := New() + require.True(t, recursor.Init()) + assert.NotNil(t, recursor.Charts()) +} + +func TestRecursor_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestRecursor_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (p *AuthoritativeNS, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v4.3.0": { + prepare: preparePowerDNSAuthoritativeNSV430, + wantCollected: map[string]int64{ + "corrupt-packets": 1, + "cpu-iowait": 513, + "cpu-steal": 1, + "deferred-cache-inserts": 1, + "deferred-cache-lookup": 1, + "deferred-packetcache-inserts": 1, + "deferred-packetcache-lookup": 1, + "dnsupdate-answers": 1, + "dnsupdate-changes": 1, + "dnsupdate-queries": 1, + "dnsupdate-refused": 1, + "fd-usage": 23, + "incoming-notifications": 1, + "key-cache-size": 1, + "latency": 1, + "meta-cache-size": 1, + "open-tcp-connections": 1, + "overload-drops": 1, + "packetcache-hit": 1, + "packetcache-miss": 1, + "packetcache-size": 1, + "qsize-q": 1, + "query-cache-hit": 1, + "query-cache-miss": 1, + "query-cache-size": 1, + "rd-queries": 1, + "real-memory-usage": 164507648, + "recursing-answers": 1, + "recursing-questions": 1, + "recursion-unanswered": 1, + "ring-logmessages-capacity": 10000, + "ring-logmessages-size": 10, + "ring-noerror-queries-capacity": 10000, + "ring-noerror-queries-size": 1, + "ring-nxdomain-queries-capacity": 10000, + "ring-nxdomain-queries-size": 1, + "ring-queries-capacity": 10000, + "ring-queries-size": 1, + "ring-remotes-capacity": 10000, + "ring-remotes-corrupt-capacity": 10000, + "ring-remotes-corrupt-size": 1, + "ring-remotes-size": 1, + "ring-remotes-unauth-capacity": 10000, + "ring-remotes-unauth-size": 1, + "ring-servfail-queries-capacity": 10000, + "ring-servfail-queries-size": 1, + "ring-unauth-queries-capacity": 10000, + "ring-unauth-queries-size": 1, + "security-status": 1, + "servfail-packets": 1, + "signature-cache-size": 1, + "signatures": 1, + "sys-msec": 128, + "tcp-answers": 1, + "tcp-answers-bytes": 1, + "tcp-queries": 1, + "tcp4-answers": 1, + "tcp4-answers-bytes": 1, + "tcp4-queries": 1, + "tcp6-answers": 1, + "tcp6-answers-bytes": 1, + "tcp6-queries": 1, + "timedout-packets": 1, + "udp-answers": 1, + "udp-answers-bytes": 1, + "udp-do-queries": 1, + "udp-in-errors": 1, + "udp-noport-errors": 1, + "udp-queries": 1, + "udp-recvbuf-errors": 1, + "udp-sndbuf-errors": 1, + "udp4-answers": 1, + "udp4-answers-bytes": 1, + "udp4-queries": 1, + "udp6-answers": 1, + "udp6-answers-bytes": 1, + "udp6-queries": 1, + "uptime": 207, + "user-msec": 56, + }, + }, + "fails on response from PowerDNS Recursor": { + prepare: preparePowerDNSAuthoritativeNSRecursorData, + }, + "fails on 404 response": { + prepare: preparePowerDNSAuthoritativeNS404, + }, + "fails on connection refused": { + prepare: preparePowerDNSAuthoritativeNSConnectionRefused, + }, + "fails on response with invalid data": { + prepare: preparePowerDNSAuthoritativeNSInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ns, cleanup := test.prepare() + defer cleanup() + require.True(t, ns.Init()) + + collected := ns.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, ns, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ns *AuthoritativeNS, collected map[string]int64) { + for _, chart := range *ns.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func preparePowerDNSAuthoritativeNSV430() (*AuthoritativeNS, func()) { + srv := preparePowerDNSAuthoritativeNSEndpoint() + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSAuthoritativeNSRecursorData() (*AuthoritativeNS, func()) { + srv := preparePowerDNSRecursorEndpoint() + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSAuthoritativeNSInvalidData() (*AuthoritativeNS, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSAuthoritativeNS404() (*AuthoritativeNS, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + ns := New() + ns.URL = srv.URL + + return ns, srv.Close +} + +func preparePowerDNSAuthoritativeNSConnectionRefused() (*AuthoritativeNS, func()) { + ns := New() + ns.URL = "http://127.0.0.1:38001" + + return ns, func() {} +} + +func preparePowerDNSAuthoritativeNSEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathLocalStatistics: + _, _ = w.Write(v430statistics) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} + +func preparePowerDNSRecursorEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathLocalStatistics: + _, _ = w.Write(recursorStatistics) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/charts.go b/src/go/collectors/go.d.plugin/modules/powerdns/charts.go new file mode 100644 index 00000000000000..aa61149e69d2db --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/charts.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +import "github.com/netdata/go.d.plugin/agent/module" + +var charts = module.Charts{ + { + ID: "questions_in", + Title: "Incoming questions", + Units: "questions/s", + Fam: "questions", + Ctx: "powerdns.questions_in", + Dims: module.Dims{ + {ID: "udp-queries", Name: "udp", Algo: module.Incremental}, + {ID: "tcp-queries", Name: "tcp", Algo: module.Incremental}, + }, + }, + { + ID: "questions_out", + Title: "Outgoing questions", + Units: "questions/s", + Fam: "questions", + Ctx: "powerdns.questions_out", + Dims: module.Dims{ + {ID: "udp-answers", Name: "udp", Algo: module.Incremental}, + {ID: "tcp-answers", Name: "tcp", Algo: module.Incremental}, + }, + }, + { + ID: "cache_usage", + Title: "Cache Usage", + Units: "events/s", + Fam: "cache", + Ctx: "powerdns.cache_usage", + Dims: module.Dims{ + {ID: "query-cache-hit", Algo: module.Incremental}, + {ID: "query-cache-miss", Algo: module.Incremental}, + {ID: "packetcache-hit", Name: "packet-cache-hit", Algo: module.Incremental}, + {ID: "packetcache-miss", Name: "packet-cache-miss", Algo: module.Incremental}, + }, + }, + { + ID: "cache_size", + Title: "Cache Size", + Units: "entries", + Fam: "cache", + Ctx: "powerdns.cache_size", + Dims: module.Dims{ + {ID: "query-cache-size", Name: "query-cache"}, + {ID: "packetcache-size", Name: "packet-cache"}, + {ID: "key-cache-size", Name: "key-cache"}, + {ID: "meta-cache-size", Name: "meta-cache"}, + }, + }, + { + ID: "latency", + Title: "Answer latency", + Units: "microseconds", + Fam: "latency", + Ctx: "powerdns.latency", + Dims: module.Dims{ + {ID: "latency"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/collect.go b/src/go/collectors/go.d.plugin/modules/powerdns/collect.go new file mode 100644 index 00000000000000..7a184d91601413 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/collect.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathLocalStatistics = "/api/v1/servers/localhost/statistics" +) + +func (ns *AuthoritativeNS) collect() (map[string]int64, error) { + statistics, err := ns.scrapeStatistics() + if err != nil { + return nil, err + } + + collected := make(map[string]int64) + + ns.collectStatistics(collected, statistics) + + if !isPowerDNSAuthoritativeNSMetrics(collected) { + return nil, errors.New("returned metrics aren't PowerDNS Authoritative Server metrics") + } + + return collected, nil +} + +func isPowerDNSAuthoritativeNSMetrics(collected map[string]int64) bool { + // PowerDNS Recursor has same endpoint and returns data in the same format. + _, ok1 := collected["over-capacity-drops"] + _, ok2 := collected["tcp-questions"] + return !ok1 && !ok2 +} + +func (ns *AuthoritativeNS) collectStatistics(collected map[string]int64, statistics statisticMetrics) { + for _, s := range statistics { + // https://doc.powerdns.com/authoritative/http-api/statistics.html#statisticitem + if s.Type != "StatisticItem" { + continue + } + + value, ok := s.Value.(string) + if !ok { + ns.Debugf("%s value (%v) unexpected type: want=string, got=%T.", s.Name, s.Value, s.Value) + continue + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + ns.Debugf("%s value (%v) parse error: %v", s.Name, s.Value, err) + continue + } + + collected[s.Name] = v + } +} + +func (ns *AuthoritativeNS) scrapeStatistics() ([]statisticMetric, error) { + req, _ := web.NewHTTPRequest(ns.Request) + req.URL.Path = urlPathLocalStatistics + + var statistics statisticMetrics + if err := ns.doOKDecode(req, &statistics); err != nil { + return nil, err + } + + return statistics, nil +} + +func (ns *AuthoritativeNS) doOKDecode(req *http.Request, in interface{}) error { + resp, err := ns.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/config_schema.json b/src/go/collectors/go.d.plugin/modules/powerdns/config_schema.json new file mode 100644 index 00000000000000..93f8e72a2adba3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/powerdns job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/init.go b/src/go/collectors/go.d.plugin/modules/powerdns/init.go new file mode 100644 index 00000000000000..a577db7732ead8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (ns AuthoritativeNS) validateConfig() error { + if ns.URL == "" { + return errors.New("URL not set") + } + if _, err := web.NewHTTPRequest(ns.Request); err != nil { + return err + } + return nil +} + +func (ns AuthoritativeNS) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(ns.Client) +} + +func (ns AuthoritativeNS) initCharts() (*module.Charts, error) { + return charts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/integrations/powerdns_authoritative_server.md b/src/go/collectors/go.d.plugin/modules/powerdns/integrations/powerdns_authoritative_server.md new file mode 100644 index 00000000000000..4c776193a57898 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/integrations/powerdns_authoritative_server.md @@ -0,0 +1,223 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/powerdns/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/powerdns/metadata.yaml" +sidebar_label: "PowerDNS Authoritative Server" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# PowerDNS Authoritative Server + + +<img src="https://netdata.cloud/img/powerdns.svg" width="150"/> + + +Plugin: go.d.plugin +Module: powerdns + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors PowerDNS Authoritative Server instances. +It collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver). + +Used endpoints: + +- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per PowerDNS Authoritative Server instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| powerdns.questions_in | udp, tcp | questions/s | +| powerdns.questions_out | udp, tcp | questions/s | +| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s | +| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries | +| powerdns.latency | latency | microseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable webserver + +Follow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation. + + +#### Enable HTTP API + +Follow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/powerdns.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/powerdns.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8081 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + username: admin + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + + - name: remote + url: http://203.0.113.0:8081 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m powerdns + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/metadata.yaml b/src/go/collectors/go.d.plugin/modules/powerdns/metadata.yaml new file mode 100644 index 00000000000000..ea4dec0b53a11c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/metadata.yaml @@ -0,0 +1,215 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-powerdns + plugin_name: go.d.plugin + module_name: powerdns + monitored_instance: + name: PowerDNS Authoritative Server + link: https://doc.powerdns.com/authoritative/ + icon_filename: powerdns.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - powerdns + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors PowerDNS Authoritative Server instances. + It collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver). + + Used endpoints: + + - [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable webserver + description: | + Follow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation. + - title: Enable HTTP API + description: | + Follow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation. + configuration: + file: + name: go.d/powerdns.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8081 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + username: admin + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + + - name: remote + url: http://203.0.113.0:8081 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: powerdns.questions_in + description: Incoming questions + unit: questions/s + chart_type: line + dimensions: + - name: udp + - name: tcp + - name: powerdns.questions_out + description: Outgoing questions + unit: questions/s + chart_type: line + dimensions: + - name: udp + - name: tcp + - name: powerdns.cache_usage + description: Cache Usage + unit: events/s + chart_type: line + dimensions: + - name: query-cache-hit + - name: query-cache-miss + - name: packetcache-hit + - name: packetcache-miss + - name: powerdns.cache_size + description: Cache Size + unit: entries + chart_type: line + dimensions: + - name: query-cache + - name: packet-cache + - name: key-cache + - name: meta-cache + - name: powerdns.latency + description: Answer latency + unit: microseconds + chart_type: line + dimensions: + - name: latency diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/metrics.go b/src/go/collectors/go.d.plugin/modules/powerdns/metrics.go new file mode 100644 index 00000000000000..3efa2c9809f8ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/metrics.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns + +// https://doc.powerdns.com/authoritative/http-api/statistics.html#objects +type ( + statisticMetrics []statisticMetric + statisticMetric struct { + Name string + Type string + Value interface{} + } +) diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/testdata/recursor/statistics.json b/src/go/collectors/go.d.plugin/modules/powerdns/testdata/recursor/statistics.json new file mode 100644 index 00000000000000..a31477959717c9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/testdata/recursor/statistics.json @@ -0,0 +1,587 @@ +[ + { + "name": "all-outqueries", + "type": "StatisticItem", + "value": "41" + }, + { + "name": "answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers1-10", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers10-100", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth-zone-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers1-10", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "auth4-answers10-100", + "type": "StatisticItem", + "value": "35" + }, + { + "name": "auth4-answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers1-10", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers10-100", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cache-entries", + "type": "StatisticItem", + "value": "171" + }, + { + "name": "cache-hits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cache-misses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "case-mismatches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "chain-resends", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "client-parse-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "concurrent-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cpu-msec-thread-0", + "type": "StatisticItem", + "value": "439" + }, + { + "name": "cpu-msec-thread-1", + "type": "StatisticItem", + "value": "445" + }, + { + "name": "cpu-msec-thread-2", + "type": "StatisticItem", + "value": "466" + }, + { + "name": "dlg-only-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-authentic-data-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-check-disabled-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-bogus", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-indeterminate", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-insecure", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-nta", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-secure", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "dnssec-validations", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "dont-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ecs-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ecs-responses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "edns-ping-matches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "edns-ping-mismatches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "empty-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "failed-host-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "fd-usage", + "type": "StatisticItem", + "value": "32" + }, + { + "name": "ignored-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ipv6-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ipv6-questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "malloc-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "max-cache-entries", + "type": "StatisticItem", + "value": "1000000" + }, + { + "name": "max-mthread-stack", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "max-packetcache-entries", + "type": "StatisticItem", + "value": "500000" + }, + { + "name": "negcache-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "no-packet-error", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noedns-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noerror-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noping-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "nsset-invalidations", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "nsspeeds-entries", + "type": "StatisticItem", + "value": "78" + }, + { + "name": "nxdomain-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing4-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing6-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "over-capacity-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-hits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-misses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-custom", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-drop", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-noaction", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-nodata", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-nxdomain", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-truncate", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "qa-latency", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "qname-min-fallback-success", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "query-pipe-full-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "real-memory-usage", + "type": "StatisticItem", + "value": "44773376" + }, + { + "name": "rebalanced-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "resource-limits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "security-status", + "type": "StatisticItem", + "value": "3" + }, + { + "name": "server-parse-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "servfail-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "spoof-prevents", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "sys-msec", + "type": "StatisticItem", + "value": "1520" + }, + { + "name": "tcp-client-overflow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-clients", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttle-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttled-out", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttled-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "too-old-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "truncated-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-in-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-noport-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-recvbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-sndbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unauthorized-tcp", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unauthorized-udp", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unexpected-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unreachables", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "uptime", + "type": "StatisticItem", + "value": "1624" + }, + { + "name": "user-msec", + "type": "StatisticItem", + "value": "465" + }, + { + "name": "variable-responses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-our-latency", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime1-2", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime16-32", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime2-4", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime4-8", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime8-16", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "response-by-qtype", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-sizes", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-by-rcode", + "type": "MapStatisticItem", + "value": [] + } +] diff --git a/src/go/collectors/go.d.plugin/modules/powerdns/testdata/v4.3.0/statistics.json b/src/go/collectors/go.d.plugin/modules/powerdns/testdata/v4.3.0/statistics.json new file mode 100644 index 00000000000000..30813d3d8835b9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns/testdata/v4.3.0/statistics.json @@ -0,0 +1,507 @@ +[ + { + "name": "corrupt-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cpu-iowait", + "type": "StatisticItem", + "value": "513" + }, + { + "name": "cpu-steal", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "deferred-cache-inserts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "deferred-cache-lookup", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "deferred-packetcache-inserts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "deferred-packetcache-lookup", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnsupdate-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnsupdate-changes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnsupdate-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnsupdate-refused", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "fd-usage", + "type": "StatisticItem", + "value": "23" + }, + { + "name": "incoming-notifications", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "key-cache-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "latency", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "meta-cache-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "open-tcp-connections", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "overload-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-hit", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-miss", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "qsize-q", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "query-cache-hit", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "query-cache-miss", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "query-cache-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "rd-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "real-memory-usage", + "type": "StatisticItem", + "value": "164507648" + }, + { + "name": "recursing-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "recursing-questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "recursion-unanswered", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-logmessages-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-logmessages-size", + "type": "StatisticItem", + "value": "10" + }, + { + "name": "ring-noerror-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-noerror-queries-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-nxdomain-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-nxdomain-queries-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-queries-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-remotes-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-corrupt-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-corrupt-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-remotes-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-remotes-unauth-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-unauth-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-servfail-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-servfail-queries-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ring-unauth-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-unauth-queries-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "security-status", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "servfail-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "signature-cache-size", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "signatures", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "sys-msec", + "type": "StatisticItem", + "value": "128" + }, + { + "name": "tcp-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp4-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp4-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp4-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp6-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp6-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp6-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "timedout-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-do-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-in-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-noport-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-recvbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-sndbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp4-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp4-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp4-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp6-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp6-answers-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp6-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "uptime", + "type": "StatisticItem", + "value": "207" + }, + { + "name": "user-msec", + "type": "StatisticItem", + "value": "56" + }, + { + "name": "response-by-qtype", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-sizes", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-by-rcode", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "logmessages", + "size": "10000", + "type": "RingStatisticItem", + "value": [ + { + "name": "[webserver] 088688d6-9976-4e4d-a6aa-2272f8c6f173 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] 662e4249-4e9a-42e7-b780-b81929875b8f HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] 8c79870a-9a47-4952-9166-02710d146ab3 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] dc029119-209f-4101-9e8f-82ab02d857d9 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] fa61f546-8607-4771-bc9a-48ddc5a85dc0 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "About to create 3 backend threads for UDP", + "value": "1" + }, + { + "name": "Creating backend connection for TCP", + "value": "1" + }, + { + "name": "Done launching threads, ready to distribute questions", + "value": "1" + }, + { + "name": "Master/slave communicator launching", + "value": "1" + }, + { + "name": "No master domains need notifications", + "value": "1" + } + ] + }, + { + "name": "remotes", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "remotes-corrupt", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "remotes-unauth", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "noerror-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "nxdomain-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "servfail-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "unauth-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + } +] diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/README.md b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/README.md new file mode 120000 index 00000000000000..810e6330874aa3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/README.md @@ -0,0 +1 @@ +integrations/powerdns_recursor.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/charts.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/charts.go new file mode 100644 index 00000000000000..904d807a149a54 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/charts.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +import "github.com/netdata/go.d.plugin/agent/module" + +var charts = module.Charts{ + { + ID: "questions_in", + Title: "Incoming questions", + Units: "questions/s", + Fam: "questions", + Ctx: "powerdns_recursor.questions_in", + Dims: module.Dims{ + {ID: "questions", Name: "total", Algo: module.Incremental}, + {ID: "tcp-questions", Name: "tcp", Algo: module.Incremental}, + {ID: "ipv6-questions", Name: "ipv6", Algo: module.Incremental}, + }, + }, + { + ID: "questions_out", + Title: "Outgoing questions", + Units: "questions/s", + Fam: "questions", + Ctx: "powerdns_recursor.questions_out", + Dims: module.Dims{ + {ID: "all-outqueries", Name: "udp", Algo: module.Incremental}, + {ID: "tcp-outqueries", Name: "tcp", Algo: module.Incremental}, + {ID: "ipv6-outqueries", Name: "ipv6", Algo: module.Incremental}, + {ID: "throttled-outqueries", Name: "throttled", Algo: module.Incremental}, + }, + }, + { + ID: "answer_time", + Title: "Queries answered within a time range", + Units: "queries/s", + Fam: "performance", + Ctx: "powerdns_recursor.answer_time", + Dims: module.Dims{ + {ID: "answers0-1", Name: "0-1ms", Algo: module.Incremental}, + {ID: "answers1-10", Name: "1-10ms", Algo: module.Incremental}, + {ID: "answers10-100", Name: "10-100ms", Algo: module.Incremental}, + {ID: "answers100-1000", Name: "100-1000ms", Algo: module.Incremental}, + {ID: "answers-slow", Name: "slow", Algo: module.Incremental}, + }, + }, + { + ID: "timeouts", + Title: "Timeouts on outgoing UDP queries", + Units: "timeouts/s", + Fam: "performance", + Ctx: "powerdns_recursor.timeouts", + Dims: module.Dims{ + {ID: "outgoing-timeouts", Name: "total", Algo: module.Incremental}, + {ID: "outgoing4-timeouts", Name: "ipv4", Algo: module.Incremental}, + {ID: "outgoing6-timeouts", Name: "ipv6", Algo: module.Incremental}, + }, + }, + { + ID: "drops", + Title: "Drops", + Units: "drops/s", + Fam: "performance", + Ctx: "powerdns_recursor.drops", + Dims: module.Dims{ + {ID: "over-capacity-drops", Algo: module.Incremental}, + {ID: "query-pipe-full-drops", Algo: module.Incremental}, + {ID: "too-old-drops", Algo: module.Incremental}, + {ID: "truncated-drops", Algo: module.Incremental}, + {ID: "empty-queries", Algo: module.Incremental}, + }, + }, + { + ID: "cache_usage", + Title: "Cache Usage", + Units: "events/s", + Fam: "cache", + Ctx: "powerdns_recursor.cache_usage", + Dims: module.Dims{ + {ID: "cache-hits", Algo: module.Incremental}, + {ID: "cache-misses", Algo: module.Incremental}, + {ID: "packetcache-hits", Name: "packet-cache-hits", Algo: module.Incremental}, + {ID: "packetcache-misses", Name: "packet-cache-misses", Algo: module.Incremental}, + }, + }, + { + ID: "cache_size", + Title: "Cache Size", + Units: "entries", + Fam: "cache", + Ctx: "powerdns_recursor.cache_size", + Dims: module.Dims{ + {ID: "cache-entries", Name: "cache"}, + {ID: "packetcache-entries", Name: "packet-cache"}, + {ID: "negcache-entries", Name: "negative-cache"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/collect.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/collect.go new file mode 100644 index 00000000000000..a25dab1f564acd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/collect.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathLocalStatistics = "/api/v1/servers/localhost/statistics" +) + +func (r *Recursor) collect() (map[string]int64, error) { + statistics, err := r.scrapeStatistics() + if err != nil { + return nil, err + } + + collected := make(map[string]int64) + + r.collectStatistics(collected, statistics) + + if !isPowerDNSRecursorMetrics(collected) { + return nil, errors.New("returned metrics aren't PowerDNS Recursor metrics") + } + + return collected, nil +} + +func isPowerDNSRecursorMetrics(collected map[string]int64) bool { + // PowerDNS Authoritative Server has same endpoint and returns data in the same format. + _, ok1 := collected["over-capacity-drops"] + _, ok2 := collected["tcp-questions"] + return ok1 && ok2 +} + +func (r *Recursor) collectStatistics(collected map[string]int64, statistics statisticMetrics) { + for _, s := range statistics { + // https://doc.powerdns.com/authoritative/http-api/statistics.html#statisticitem + if s.Type != "StatisticItem" { + continue + } + + value, ok := s.Value.(string) + if !ok { + r.Debugf("%s value (%v) unexpected type: want=string, got=%T.", s.Name, s.Value, s.Value) + continue + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + r.Debugf("%s value (%v) parse error: %v", s.Name, s.Value, err) + continue + } + + collected[s.Name] = v + } +} + +func (r *Recursor) scrapeStatistics() ([]statisticMetric, error) { + req, _ := web.NewHTTPRequest(r.Request) + req.URL.Path = urlPathLocalStatistics + + var statistics statisticMetrics + if err := r.doOKDecode(req, &statistics); err != nil { + return nil, err + } + + return statistics, nil +} + +func (r *Recursor) doOKDecode(req *http.Request, in interface{}) error { + resp, err := r.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) + } + + if err := json.NewDecoder(resp.Body).Decode(in); err != nil { + return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/config_schema.json b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/config_schema.json new file mode 100644 index 00000000000000..fcd19e15058df5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/powerdns_recursor job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/init.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/init.go new file mode 100644 index 00000000000000..aa74eec2fee58f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (r Recursor) validateConfig() error { + if r.URL == "" { + return errors.New("URL not set") + } + if _, err := web.NewHTTPRequest(r.Request); err != nil { + return err + } + return nil +} + +func (r Recursor) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(r.Client) +} + +func (r Recursor) initCharts() (*module.Charts, error) { + return charts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/integrations/powerdns_recursor.md b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/integrations/powerdns_recursor.md new file mode 100644 index 00000000000000..3e23b649f8e408 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/integrations/powerdns_recursor.md @@ -0,0 +1,226 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/powerdns_recursor/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/powerdns_recursor/metadata.yaml" +sidebar_label: "PowerDNS Recursor" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# PowerDNS Recursor + + +<img src="https://netdata.cloud/img/powerdns.svg" width="150"/> + + +Plugin: go.d.plugin +Module: powerdns_recursor + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors PowerDNS Recursor instances. + +It collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api). + +Used endpoints: + +- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per PowerDNS Recursor instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s | +| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s | +| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s | +| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s | +| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s | +| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s | +| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable webserver + +Follow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation. + + +#### Enable HTTP API + +Follow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/powerdns_recursor.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/powerdns_recursor.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8081 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + +``` +</details> + +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + username: admin + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8081 + + - name: remote + url: http://203.0.113.0:8081 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m powerdns_recursor + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metadata.yaml b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metadata.yaml new file mode 100644 index 00000000000000..82cb991274272c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metadata.yaml @@ -0,0 +1,240 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-powerdns_recursor + plugin_name: go.d.plugin + module_name: powerdns_recursor + monitored_instance: + name: PowerDNS Recursor + link: https://doc.powerdns.com/recursor/ + icon_filename: powerdns.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - powerdns + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors PowerDNS Recursor instances. + + It collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api). + + Used endpoints: + + - [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable webserver + description: | + Follow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation. + - title: Enable HTTP API + description: | + Follow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation. + configuration: + file: + name: go.d/powerdns_recursor.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8081 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + username: admin + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8081 + + - name: remote + url: http://203.0.113.0:8081 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: powerdns_recursor.questions_in + description: Incoming questions + unit: questions/s + chart_type: line + dimensions: + - name: total + - name: tcp + - name: ipv6 + - name: powerdns_recursor.questions_out + description: Outgoing questions + unit: questions/s + chart_type: line + dimensions: + - name: udp + - name: tcp + - name: ipv6 + - name: throttled + - name: powerdns_recursor.answer_time + description: Queries answered within a time range + unit: queries/s + chart_type: line + dimensions: + - name: 0-1ms + - name: 1-10ms + - name: 10-100ms + - name: 100-1000ms + - name: slow + - name: powerdns_recursor.timeouts + description: Timeouts on outgoing UDP queries + unit: timeouts/s + chart_type: line + dimensions: + - name: total + - name: ipv4 + - name: ipv6 + - name: powerdns_recursor.drops + description: Drops + unit: drops/s + chart_type: line + dimensions: + - name: over-capacity-drops + - name: query-pipe-full-drops + - name: too-old-drops + - name: truncated-drops + - name: empty-queries + - name: powerdns_recursor.cache_usage + description: Cache Usage + unit: events/s + chart_type: line + dimensions: + - name: cache-hits + - name: cache-misses + - name: packet-cache-hits + - name: packet-cache-misses + - name: powerdns_recursor.cache_size + description: Cache Size + unit: entries + chart_type: line + dimensions: + - name: cache + - name: packet-cache + - name: negative-cache diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metrics.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metrics.go new file mode 100644 index 00000000000000..a7fbd63c19ff98 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/metrics.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +// https://doc.powerdns.com/recursor/metrics.html +// https://docs.powerdns.com/recursor/performance.html#recursor-caches + +// PowerDNS Recursor documentation has no section about statistics objects, +// fortunately authoritative has. +// https://doc.powerdns.com/authoritative/http-api/statistics.html#objects +type ( + statisticMetrics []statisticMetric + statisticMetric struct { + Name string + Type string + Value interface{} + } +) diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor.go new file mode 100644 index 00000000000000..cd052ba6d40ae4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("powerdns_recursor", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Recursor { + return &Recursor{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8081", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Recursor struct { + module.Base + Config `yaml:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (r *Recursor) Init() bool { + err := r.validateConfig() + if err != nil { + r.Errorf("config validation: %v", err) + return false + } + + client, err := r.initHTTPClient() + if err != nil { + r.Errorf("init HTTP client: %v", err) + return false + } + r.httpClient = client + + cs, err := r.initCharts() + if err != nil { + r.Errorf("init charts: %v", err) + return false + } + r.charts = cs + + return true +} + +func (r *Recursor) Check() bool { + return len(r.Collect()) > 0 +} + +func (r *Recursor) Charts() *module.Charts { + return r.charts +} + +func (r *Recursor) Collect() map[string]int64 { + ms, err := r.collect() + if err != nil { + r.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (r *Recursor) Cleanup() { + if r.httpClient == nil { + return + } + r.httpClient.CloseIdleConnections() +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor_test.go b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor_test.go new file mode 100644 index 00000000000000..4ef3c2d086a0c0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/recursor_test.go @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package powerdns_recursor + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v431statistics, _ = os.ReadFile("testdata/v4.3.1/statistics.json") + authoritativeStatistics, _ = os.ReadFile("testdata/authoritative/statistics.json") +) + +func Test_testDataIsCorrectlyReadAndValid(t *testing.T) { + for name, data := range map[string][]byte{ + "v431statistics": v431statistics, + "authoritativeStatistics": authoritativeStatistics, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Recursor)(nil), New()) +} + +func TestRecursor_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset URL": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:38001", + }, + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + recursor := New() + recursor.Config = test.config + + if test.wantFail { + assert.False(t, recursor.Init()) + } else { + assert.True(t, recursor.Init()) + } + }) + } +} + +func TestRecursor_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (r *Recursor, cleanup func()) + wantFail bool + }{ + "success on valid response v4.3.1": { + prepare: preparePowerDNSRecursorV431, + }, + "fails on response from PowerDNS Authoritative Server": { + wantFail: true, + prepare: preparePowerDNSRecursorAuthoritativeData, + }, + "fails on 404 response": { + wantFail: true, + prepare: preparePowerDNSRecursor404, + }, + "fails on connection refused": { + wantFail: true, + prepare: preparePowerDNSRecursorConnectionRefused, + }, + "fails on response with invalid data": { + wantFail: true, + prepare: preparePowerDNSRecursorInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + recursor, cleanup := test.prepare() + defer cleanup() + require.True(t, recursor.Init()) + + if test.wantFail { + assert.False(t, recursor.Check()) + } else { + assert.True(t, recursor.Check()) + } + }) + } +} + +func TestRecursor_Charts(t *testing.T) { + recursor := New() + require.True(t, recursor.Init()) + assert.NotNil(t, recursor.Charts()) +} + +func TestRecursor_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestRecursor_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (r *Recursor, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v4.3.1": { + prepare: preparePowerDNSRecursorV431, + wantCollected: map[string]int64{ + "all-outqueries": 41, + "answers-slow": 1, + "answers0-1": 1, + "answers1-10": 1, + "answers10-100": 1, + "answers100-1000": 1, + "auth-zone-queries": 1, + "auth4-answers-slow": 1, + "auth4-answers0-1": 1, + "auth4-answers1-10": 5, + "auth4-answers10-100": 35, + "auth4-answers100-1000": 1, + "auth6-answers-slow": 1, + "auth6-answers0-1": 1, + "auth6-answers1-10": 1, + "auth6-answers10-100": 1, + "auth6-answers100-1000": 1, + "cache-entries": 171, + "cache-hits": 1, + "cache-misses": 1, + "case-mismatches": 1, + "chain-resends": 1, + "client-parse-errors": 1, + "concurrent-queries": 1, + "cpu-msec-thread-0": 439, + "cpu-msec-thread-1": 445, + "cpu-msec-thread-2": 466, + "dlg-only-drops": 1, + "dnssec-authentic-data-queries": 1, + "dnssec-check-disabled-queries": 1, + "dnssec-queries": 1, + "dnssec-result-bogus": 1, + "dnssec-result-indeterminate": 1, + "dnssec-result-insecure": 1, + "dnssec-result-nta": 1, + "dnssec-result-secure": 5, + "dnssec-validations": 5, + "dont-outqueries": 1, + "ecs-queries": 1, + "ecs-responses": 1, + "edns-ping-matches": 1, + "edns-ping-mismatches": 1, + "empty-queries": 1, + "failed-host-entries": 1, + "fd-usage": 32, + "ignored-packets": 1, + "ipv6-outqueries": 1, + "ipv6-questions": 1, + "malloc-bytes": 1, + "max-cache-entries": 1000000, + "max-mthread-stack": 1, + "max-packetcache-entries": 500000, + "negcache-entries": 1, + "no-packet-error": 1, + "noedns-outqueries": 1, + "noerror-answers": 1, + "noping-outqueries": 1, + "nsset-invalidations": 1, + "nsspeeds-entries": 78, + "nxdomain-answers": 1, + "outgoing-timeouts": 1, + "outgoing4-timeouts": 1, + "outgoing6-timeouts": 1, + "over-capacity-drops": 1, + "packetcache-entries": 1, + "packetcache-hits": 1, + "packetcache-misses": 1, + "policy-drops": 1, + "policy-result-custom": 1, + "policy-result-drop": 1, + "policy-result-noaction": 1, + "policy-result-nodata": 1, + "policy-result-nxdomain": 1, + "policy-result-truncate": 1, + "qa-latency": 1, + "qname-min-fallback-success": 1, + "query-pipe-full-drops": 1, + "questions": 1, + "real-memory-usage": 44773376, + "rebalanced-queries": 1, + "resource-limits": 1, + "security-status": 3, + "server-parse-errors": 1, + "servfail-answers": 1, + "spoof-prevents": 1, + "sys-msec": 1520, + "tcp-client-overflow": 1, + "tcp-clients": 1, + "tcp-outqueries": 1, + "tcp-questions": 1, + "throttle-entries": 1, + "throttled-out": 1, + "throttled-outqueries": 1, + "too-old-drops": 1, + "truncated-drops": 1, + "udp-in-errors": 1, + "udp-noport-errors": 1, + "udp-recvbuf-errors": 1, + "udp-sndbuf-errors": 1, + "unauthorized-tcp": 1, + "unauthorized-udp": 1, + "unexpected-packets": 1, + "unreachables": 1, + "uptime": 1624, + "user-msec": 465, + "variable-responses": 1, + "x-our-latency": 1, + "x-ourtime-slow": 1, + "x-ourtime0-1": 1, + "x-ourtime1-2": 1, + "x-ourtime16-32": 1, + "x-ourtime2-4": 1, + "x-ourtime4-8": 1, + "x-ourtime8-16": 1, + }, + }, + "fails on response from PowerDNS Authoritative Server": { + prepare: preparePowerDNSRecursorAuthoritativeData, + }, + "fails on 404 response": { + prepare: preparePowerDNSRecursor404, + }, + "fails on connection refused": { + prepare: preparePowerDNSRecursorConnectionRefused, + }, + "fails on response with invalid data": { + prepare: preparePowerDNSRecursorInvalidData, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + recursor, cleanup := test.prepare() + defer cleanup() + require.True(t, recursor.Init()) + + collected := recursor.Collect() + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, recursor, collected) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rec *Recursor, collected map[string]int64) { + for _, chart := range *rec.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func preparePowerDNSRecursorV431() (*Recursor, func()) { + srv := preparePowerDNSRecursorEndpoint() + recursor := New() + recursor.URL = srv.URL + + return recursor, srv.Close +} + +func preparePowerDNSRecursorAuthoritativeData() (*Recursor, func()) { + srv := preparePowerDNSAuthoritativeEndpoint() + recursor := New() + recursor.URL = srv.URL + + return recursor, srv.Close +} + +func preparePowerDNSRecursorInvalidData() (*Recursor, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + recursor := New() + recursor.URL = srv.URL + + return recursor, srv.Close +} + +func preparePowerDNSRecursor404() (*Recursor, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + recursor := New() + recursor.URL = srv.URL + + return recursor, srv.Close +} + +func preparePowerDNSRecursorConnectionRefused() (*Recursor, func()) { + recursor := New() + recursor.URL = "http://127.0.0.1:38001" + + return recursor, func() {} +} + +func preparePowerDNSRecursorEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathLocalStatistics: + _, _ = w.Write(v431statistics) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} + +func preparePowerDNSAuthoritativeEndpoint() *httptest.Server { + return httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathLocalStatistics: + _, _ = w.Write(authoritativeStatistics) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/authoritative/statistics.json b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/authoritative/statistics.json new file mode 100644 index 00000000000000..72bb2f0a20bb62 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/authoritative/statistics.json @@ -0,0 +1,507 @@ +[ + { + "name": "corrupt-packets", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "cpu-iowait", + "type": "StatisticItem", + "value": "513" + }, + { + "name": "cpu-steal", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "deferred-cache-inserts", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "deferred-cache-lookup", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "deferred-packetcache-inserts", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "deferred-packetcache-lookup", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "dnsupdate-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "dnsupdate-changes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "dnsupdate-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "dnsupdate-refused", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "fd-usage", + "type": "StatisticItem", + "value": "23" + }, + { + "name": "incoming-notifications", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "key-cache-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "latency", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "meta-cache-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "open-tcp-connections", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "overload-drops", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "packetcache-hit", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "packetcache-miss", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "packetcache-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "qsize-q", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "query-cache-hit", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "query-cache-miss", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "query-cache-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "rd-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "real-memory-usage", + "type": "StatisticItem", + "value": "164507648" + }, + { + "name": "recursing-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "recursing-questions", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "recursion-unanswered", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-logmessages-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-logmessages-size", + "type": "StatisticItem", + "value": "10" + }, + { + "name": "ring-noerror-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-noerror-queries-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-nxdomain-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-nxdomain-queries-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-queries-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-remotes-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-corrupt-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-corrupt-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-remotes-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-remotes-unauth-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-remotes-unauth-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-servfail-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-servfail-queries-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "ring-unauth-queries-capacity", + "type": "StatisticItem", + "value": "10000" + }, + { + "name": "ring-unauth-queries-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "security-status", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "servfail-packets", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "signature-cache-size", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "signatures", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "sys-msec", + "type": "StatisticItem", + "value": "128" + }, + { + "name": "tcp-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp4-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp4-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp4-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp6-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp6-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "tcp6-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "timedout-packets", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-do-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-in-errors", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-noport-errors", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-recvbuf-errors", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp-sndbuf-errors", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp4-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp4-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp4-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp6-answers", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp6-answers-bytes", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "udp6-queries", + "type": "StatisticItem", + "value": "0" + }, + { + "name": "uptime", + "type": "StatisticItem", + "value": "207" + }, + { + "name": "user-msec", + "type": "StatisticItem", + "value": "56" + }, + { + "name": "response-by-qtype", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-sizes", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-by-rcode", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "logmessages", + "size": "10000", + "type": "RingStatisticItem", + "value": [ + { + "name": "[webserver] 088688d6-9976-4e4d-a6aa-2272f8c6f173 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] 662e4249-4e9a-42e7-b780-b81929875b8f HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] 8c79870a-9a47-4952-9166-02710d146ab3 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] dc029119-209f-4101-9e8f-82ab02d857d9 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "[webserver] fa61f546-8607-4771-bc9a-48ddc5a85dc0 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed", + "value": "1" + }, + { + "name": "About to create 3 backend threads for UDP", + "value": "1" + }, + { + "name": "Creating backend connection for TCP", + "value": "1" + }, + { + "name": "Done launching threads, ready to distribute questions", + "value": "1" + }, + { + "name": "Master/slave communicator launching", + "value": "1" + }, + { + "name": "No master domains need notifications", + "value": "1" + } + ] + }, + { + "name": "remotes", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "remotes-corrupt", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "remotes-unauth", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "noerror-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "nxdomain-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "servfail-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + }, + { + "name": "unauth-queries", + "size": "10000", + "type": "RingStatisticItem", + "value": [] + } +] diff --git a/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/v4.3.1/statistics.json b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/v4.3.1/statistics.json new file mode 100644 index 00000000000000..a31477959717c9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/powerdns_recursor/testdata/v4.3.1/statistics.json @@ -0,0 +1,587 @@ +[ + { + "name": "all-outqueries", + "type": "StatisticItem", + "value": "41" + }, + { + "name": "answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers1-10", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers10-100", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth-zone-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth4-answers1-10", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "auth4-answers10-100", + "type": "StatisticItem", + "value": "35" + }, + { + "name": "auth4-answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers1-10", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers10-100", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "auth6-answers100-1000", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cache-entries", + "type": "StatisticItem", + "value": "171" + }, + { + "name": "cache-hits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cache-misses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "case-mismatches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "chain-resends", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "client-parse-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "concurrent-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "cpu-msec-thread-0", + "type": "StatisticItem", + "value": "439" + }, + { + "name": "cpu-msec-thread-1", + "type": "StatisticItem", + "value": "445" + }, + { + "name": "cpu-msec-thread-2", + "type": "StatisticItem", + "value": "466" + }, + { + "name": "dlg-only-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-authentic-data-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-check-disabled-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-bogus", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-indeterminate", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-insecure", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-nta", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "dnssec-result-secure", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "dnssec-validations", + "type": "StatisticItem", + "value": "5" + }, + { + "name": "dont-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ecs-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ecs-responses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "edns-ping-matches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "edns-ping-mismatches", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "empty-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "failed-host-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "fd-usage", + "type": "StatisticItem", + "value": "32" + }, + { + "name": "ignored-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ipv6-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "ipv6-questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "malloc-bytes", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "max-cache-entries", + "type": "StatisticItem", + "value": "1000000" + }, + { + "name": "max-mthread-stack", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "max-packetcache-entries", + "type": "StatisticItem", + "value": "500000" + }, + { + "name": "negcache-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "no-packet-error", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noedns-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noerror-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "noping-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "nsset-invalidations", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "nsspeeds-entries", + "type": "StatisticItem", + "value": "78" + }, + { + "name": "nxdomain-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing4-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "outgoing6-timeouts", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "over-capacity-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-hits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "packetcache-misses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-custom", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-drop", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-noaction", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-nodata", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-nxdomain", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "policy-result-truncate", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "qa-latency", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "qname-min-fallback-success", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "query-pipe-full-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "real-memory-usage", + "type": "StatisticItem", + "value": "44773376" + }, + { + "name": "rebalanced-queries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "resource-limits", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "security-status", + "type": "StatisticItem", + "value": "3" + }, + { + "name": "server-parse-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "servfail-answers", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "spoof-prevents", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "sys-msec", + "type": "StatisticItem", + "value": "1520" + }, + { + "name": "tcp-client-overflow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-clients", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "tcp-questions", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttle-entries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttled-out", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "throttled-outqueries", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "too-old-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "truncated-drops", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-in-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-noport-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-recvbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "udp-sndbuf-errors", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unauthorized-tcp", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unauthorized-udp", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unexpected-packets", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "unreachables", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "uptime", + "type": "StatisticItem", + "value": "1624" + }, + { + "name": "user-msec", + "type": "StatisticItem", + "value": "465" + }, + { + "name": "variable-responses", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-our-latency", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime-slow", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime0-1", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime1-2", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime16-32", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime2-4", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime4-8", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "x-ourtime8-16", + "type": "StatisticItem", + "value": "1" + }, + { + "name": "response-by-qtype", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-sizes", + "type": "MapStatisticItem", + "value": [] + }, + { + "name": "response-by-rcode", + "type": "MapStatisticItem", + "value": [] + } +] diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/README.md b/src/go/collectors/go.d.plugin/modules/prometheus/README.md new file mode 120000 index 00000000000000..13e59d14de5e35 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/README.md @@ -0,0 +1 @@ +integrations/prometheus_endpoint.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/cache.go b/src/go/collectors/go.d.plugin/modules/prometheus/cache.go new file mode 100644 index 00000000000000..5fc283e2f573c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/cache.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +func newCache() *cache { + return &cache{entries: make(map[string]*cacheEntry)} +} + +type ( + cache struct { + entries map[string]*cacheEntry + } + + cacheEntry struct { + seen bool + notSeenTimes int + charts []*module.Chart + } +) + +func (c *cache) hasP(key string) bool { + v, ok := c.entries[key] + if !ok { + v = &cacheEntry{} + c.entries[key] = v + } + v.seen = true + v.notSeenTimes = 0 + + return ok +} + +func (c *cache) addChart(key string, chart *module.Chart) { + if v, ok := c.entries[key]; ok { + v.charts = append(v.charts, chart) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/charts.go b/src/go/collectors/go.d.plugin/modules/prometheus/charts.go new file mode 100644 index 00000000000000..7092c95e9fc8ef --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/charts.go @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + + "github.com/prometheus/prometheus/model/labels" +) + +const ( + prioDefault = module.Priority + prioGORuntime = prioDefault + 10 +) + +func (p *Prometheus) addGaugeChart(id, name, help string, labels labels.Labels) { + units := getChartUnits(name) + + cType := module.Line + if strings.HasSuffix(units, "bytes") { + cType = module.Area + } + + chart := &module.Chart{ + ID: id, + Title: getChartTitle(name, help), + Units: units, + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name), + Type: cType, + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id, Name: name, Div: precision}, + }, + } + + for _, lbl := range labels { + chart.Labels = append(chart.Labels, + module.Label{Key: lbl.Name, Value: lbl.Value}, + ) + } + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + return + } + + p.cache.addChart(id, chart) +} + +func (p *Prometheus) addCounterChart(id, name, help string, labels labels.Labels) { + units := getChartUnits(name) + + switch units { + case "seconds", "time": + default: + units += "/s" + } + + cType := module.Line + if strings.HasSuffix(units, "bytes/s") { + cType = module.Area + } + + chart := &module.Chart{ + ID: id, + Title: getChartTitle(name, help), + Units: units, + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name), + Type: cType, + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id, Name: name, Algo: module.Incremental, Div: precision}, + }, + } + for _, lbl := range labels { + chart.Labels = append(chart.Labels, + module.Label{Key: lbl.Name, Value: lbl.Value}, + ) + } + + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + return + } + + p.cache.addChart(id, chart) +} + +func (p *Prometheus) addSummaryCharts(id, name, help string, labels labels.Labels, quantiles []prometheus.Quantile) { + units := getChartUnits(name) + + switch units { + case "seconds", "time": + default: + units += "/s" + } + + charts := module.Charts{ + { + ID: id, + Title: getChartTitle(name, help), + Units: units, + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name), + Priority: getChartPriority(name), + Dims: func() (dims module.Dims) { + for _, v := range quantiles { + s := formatFloat(v.Quantile()) + dims = append(dims, &module.Dim{ + ID: fmt.Sprintf("%s_quantile=%s", id, s), + Name: fmt.Sprintf("quantile_%s", s), + Div: precision * precision, + }) + } + return dims + }(), + }, + { + ID: id + "_sum", + Title: getChartTitle(name, help), + Units: units, + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name) + "_sum", + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id + "_sum", Name: name + "_sum", Algo: module.Incremental, Div: precision}, + }, + }, + { + ID: id + "_count", + Title: getChartTitle(name, help), + Units: "events/s", + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name) + "_count", + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id + "_count", Name: name + "_count", Algo: module.Incremental}, + }, + }, + } + + for _, chart := range charts { + for _, lbl := range labels { + chart.Labels = append(chart.Labels, module.Label{Key: lbl.Name, Value: lbl.Value}) + } + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + continue + } + p.cache.addChart(id, chart) + } +} + +func (p *Prometheus) addHistogramCharts(id, name, help string, labels labels.Labels, buckets []prometheus.Bucket) { + units := getChartUnits(name) + + switch units { + case "seconds", "time": + default: + units += "/s" + } + + charts := module.Charts{ + { + ID: id, + Title: getChartTitle(name, help), + Units: "observations/s", + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name), + Priority: getChartPriority(name), + Dims: func() (dims module.Dims) { + for _, v := range buckets { + s := formatFloat(v.UpperBound()) + dims = append(dims, &module.Dim{ + ID: fmt.Sprintf("%s_bucket=%s", id, s), + Name: fmt.Sprintf("bucket_%s", s), + Algo: module.Incremental, + }) + } + return dims + }(), + }, + { + ID: id + "_sum", + Title: getChartTitle(name, help), + Units: units, + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name) + "_sum", + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id + "_sum", Name: name + "_sum", Algo: module.Incremental, Div: precision}, + }, + }, + { + ID: id + "_count", + Title: getChartTitle(name, help), + Units: "events/s", + Fam: getChartFamily(name), + Ctx: getChartContext(p.application(), name) + "_count", + Priority: getChartPriority(name), + Dims: module.Dims{ + {ID: id + "_count", Name: name + "_count", Algo: module.Incremental}, + }, + }, + } + + for _, chart := range charts { + for _, lbl := range labels { + chart.Labels = append(chart.Labels, module.Label{Key: lbl.Name, Value: lbl.Value}) + } + if err := p.Charts().Add(chart); err != nil { + p.Warning(err) + continue + } + p.cache.addChart(id, chart) + } +} + +func (p *Prometheus) application() string { + if p.Application != "" { + return p.Application + } + return p.Name +} + +func getChartTitle(name, help string) string { + if help == "" { + return fmt.Sprintf("Metric \"%s\"", name) + } + + help = strings.Replace(help, "'", "", -1) + help = strings.TrimSuffix(help, ".") + + return help +} + +func getChartContext(app, name string) string { + if app == "" { + return fmt.Sprintf("prometheus.%s", name) + } + return fmt.Sprintf("prometheus.%s.%s", app, name) +} + +func getChartFamily(metric string) (fam string) { + if strings.HasPrefix(metric, "go_") { + return "go" + } + if strings.HasPrefix(metric, "process_") { + return "process" + } + if parts := strings.SplitN(metric, "_", 3); len(parts) < 3 { + fam = metric + } else { + fam = parts[0] + "_" + parts[1] + } + + // remove number suffix if any + // load1, load5, load15 => load + i := len(fam) - 1 + for i >= 0 && fam[i] >= '0' && fam[i] <= '9' { + i-- + } + if i > 0 { + return fam[:i+1] + } + return fam +} + +func getChartUnits(metric string) string { + // https://prometheus.io/docs/practices/naming/#metric-names + // ...must have a single unit (i.e. do not mix seconds with milliseconds, or seconds with bytes). + // ...should have a suffix describing the unit, in plural form. + // Note that an accumulating count has total as a suffix, in addition to the unit if applicable + + idx := strings.LastIndexByte(metric, '_') + if idx == -1 { + return "events" + } + switch suffix := metric[idx:]; suffix { + case "_total", "_sum", "_count": + return getChartUnits(metric[:idx]) + } + switch units := metric[idx+1:]; units { + case "hertz": + return "Hz" + default: + return units + } +} + +func getChartPriority(name string) int { + if strings.HasPrefix(name, "go_") || strings.HasPrefix(name, "process_") { + return prioGORuntime + } + return prioDefault +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/collect.go b/src/go/collectors/go.d.plugin/modules/prometheus/collect.go new file mode 100644 index 00000000000000..4494b885967aab --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/collect.go @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" +) + +const ( + precision = 1000 +) + +func (p *Prometheus) collect() (map[string]int64, error) { + mfs, err := p.prom.Scrape() + if err != nil { + return nil, err + } + + if mfs.Len() == 0 { + p.Warningf("endpoint '%s' returned 0 metric families", p.URL) + return nil, nil + } + + if p.ExpectedPrefix != "" { + if !hasPrefix(mfs, p.ExpectedPrefix) { + return nil, fmt.Errorf("'%s' metrics have no expected prefix (%s)", p.URL, p.ExpectedPrefix) + } + p.ExpectedPrefix = "" + } + + if p.MaxTS > 0 { + if n := calcMetrics(mfs); n > p.MaxTS { + return nil, fmt.Errorf("'%s' num of time series (%d) > limit (%d)", p.URL, n, p.MaxTS) + } + p.MaxTS = 0 + } + + mx := make(map[string]int64) + + p.resetCache() + defer p.removeStaleCharts() + + for _, mf := range mfs { + if strings.HasSuffix(mf.Name(), "_info") { + continue + } + if p.MaxTSPerMetric > 0 && len(mf.Metrics()) > p.MaxTSPerMetric { + p.Debugf("metric '%s' num of time series (%d) > limit (%d), skipping it", + mf.Name(), len(mf.Metrics()), p.MaxTSPerMetric) + continue + } + + switch mf.Type() { + case textparse.MetricTypeGauge: + p.collectGauge(mx, mf) + case textparse.MetricTypeCounter: + p.collectCounter(mx, mf) + case textparse.MetricTypeSummary: + p.collectSummary(mx, mf) + case textparse.MetricTypeHistogram: + p.collectHistogram(mx, mf) + case textparse.MetricTypeUnknown: + p.collectUntyped(mx, mf) + } + } + + return mx, nil +} + +func (p *Prometheus) collectGauge(mx map[string]int64, mf *prometheus.MetricFamily) { + for _, m := range mf.Metrics() { + if m.Gauge() == nil || math.IsNaN(m.Gauge().Value()) { + continue + } + + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addGaugeChart(id, mf.Name(), mf.Help(), m.Labels()) + } + + mx[id] = int64(m.Gauge().Value() * precision) + } +} + +func (p *Prometheus) collectCounter(mx map[string]int64, mf *prometheus.MetricFamily) { + for _, m := range mf.Metrics() { + if m.Counter() == nil || math.IsNaN(m.Counter().Value()) { + continue + } + + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addCounterChart(id, mf.Name(), mf.Help(), m.Labels()) + } + + mx[id] = int64(m.Counter().Value() * precision) + } +} + +func (p *Prometheus) collectSummary(mx map[string]int64, mf *prometheus.MetricFamily) { + for _, m := range mf.Metrics() { + if m.Summary() == nil || len(m.Summary().Quantiles()) == 0 { + continue + } + + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addSummaryCharts(id, mf.Name(), mf.Help(), m.Labels(), m.Summary().Quantiles()) + } + + for _, v := range m.Summary().Quantiles() { + if !math.IsNaN(v.Value()) { + dimID := fmt.Sprintf("%s_quantile=%s", id, formatFloat(v.Quantile())) + mx[dimID] = int64(v.Value() * precision * precision) + } + } + + mx[id+"_sum"] = int64(m.Summary().Sum() * precision) + mx[id+"_count"] = int64(m.Summary().Count()) + } +} + +func (p *Prometheus) collectHistogram(mx map[string]int64, mf *prometheus.MetricFamily) { + for _, m := range mf.Metrics() { + if m.Histogram() == nil || len(m.Histogram().Buckets()) == 0 { + continue + } + + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addHistogramCharts(id, mf.Name(), mf.Help(), m.Labels(), m.Histogram().Buckets()) + } + + for _, v := range m.Histogram().Buckets() { + if !math.IsNaN(v.CumulativeCount()) { + dimID := fmt.Sprintf("%s_bucket=%s", id, formatFloat(v.UpperBound())) + mx[dimID] = int64(v.CumulativeCount()) + } + } + + mx[id+"_sum"] = int64(m.Histogram().Sum() * precision) + mx[id+"_count"] = int64(m.Histogram().Count()) + } +} + +func (p *Prometheus) collectUntyped(mx map[string]int64, mf *prometheus.MetricFamily) { + for _, m := range mf.Metrics() { + if m.Untyped() == nil || math.IsNaN(m.Untyped().Value()) { + continue + } + + if p.isFallbackTypeGauge(mf.Name()) { + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addGaugeChart(id, mf.Name(), mf.Help(), m.Labels()) + } + + mx[id] = int64(m.Untyped().Value() * precision) + } + + if p.isFallbackTypeCounter(mf.Name()) || strings.HasSuffix(mf.Name(), "_total") { + id := mf.Name() + p.joinLabels(m.Labels()) + + if !p.cache.hasP(id) { + p.addCounterChart(id, mf.Name(), mf.Help(), m.Labels()) + } + + mx[id] = int64(m.Untyped().Value() * precision) + } + } +} + +func (p *Prometheus) isFallbackTypeGauge(name string) bool { + return p.fallbackType.gauge != nil && p.fallbackType.gauge.MatchString(name) +} + +func (p *Prometheus) isFallbackTypeCounter(name string) bool { + return p.fallbackType.counter != nil && p.fallbackType.counter.MatchString(name) +} + +func (p *Prometheus) joinLabels(labels labels.Labels) string { + var sb strings.Builder + for _, lbl := range labels { + name, val := lbl.Name, lbl.Value + if name == "" || val == "" { + continue + } + + if strings.IndexByte(val, ' ') != -1 { + val = spaceReplacer.Replace(val) + } + if strings.IndexByte(val, '\\') != -1 { + if val = decodeLabelValue(val); strings.IndexByte(val, '\\') != -1 { + val = backslashReplacer.Replace(val) + } + } + + sb.WriteString("-" + name + "=" + val) + } + return sb.String() +} + +func (p *Prometheus) resetCache() { + for _, v := range p.cache.entries { + v.seen = false + } +} + +const maxNotSeenTimes = 10 + +func (p *Prometheus) removeStaleCharts() { + for k, v := range p.cache.entries { + if v.seen { + continue + } + if v.notSeenTimes++; v.notSeenTimes >= maxNotSeenTimes { + for _, chart := range v.charts { + chart.MarkRemove() + chart.MarkNotCreated() + } + delete(p.cache.entries, k) + } + } +} + +func decodeLabelValue(value string) string { + v, err := strconv.Unquote("\"" + value + "\"") + if err != nil { + return value + } + return v +} + +var ( + spaceReplacer = strings.NewReplacer(" ", "_") + backslashReplacer = strings.NewReplacer(`\`, "_") +) + +func hasPrefix(mf map[string]*prometheus.MetricFamily, prefix string) bool { + for name := range mf { + if strings.HasPrefix(name, prefix) { + return true + } + } + return false +} + +func calcMetrics(mfs prometheus.MetricFamilies) int { + var n int + for _, mf := range mfs { + n += len(mf.Metrics()) + } + return n +} + +func formatFloat(v float64) string { + return strconv.FormatFloat(v, 'f', -1, 64) +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/config_schema.json b/src/go/collectors/go.d.plugin/modules/prometheus/config_schema.json new file mode 100644 index 00000000000000..60261d542e8f5a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/config_schema.json @@ -0,0 +1,113 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/prometheus job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "selector": { + "type": "object", + "properties": { + "allow": { + "type": "array", + "items": { + "type": "string" + } + }, + "deny": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "allow", + "deny" + ] + }, + "fallback_type": { + "type": "object", + "properties": { + "counter": { + "type": "array", + "items": { + "type": "string" + } + }, + "gauge": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "counter", + "gauge" + ] + }, + "bearer_token": { + "type": "string" + }, + "expected_prefix": { + "type": "string" + }, + "max_time_series": { + "type": "integer" + }, + "max_time_series_per_metric": { + "type": "integer" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/init.go b/src/go/collectors/go.d.plugin/modules/prometheus/init.go new file mode 100644 index 00000000000000..638205a95f59da --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/init.go @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "errors" + "fmt" + "os" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *Prometheus) validateConfig() error { + if p.URL == "" { + return errors.New("'url' can not be empty") + } + return nil +} + +func (p *Prometheus) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, fmt.Errorf("init HTTP client: %v", err) + } + + req := p.Request.Copy() + if p.BearerTokenFile != "" { + token, err := os.ReadFile(p.BearerTokenFile) + if err != nil { + return nil, fmt.Errorf("bearer token file: %v", err) + } + req.Headers["Authorization"] = "Bearer " + string(token) + } + + sr, err := p.Selector.Parse() + if err != nil { + return nil, fmt.Errorf("parsing selector: %v", err) + } + + if sr != nil { + return prometheus.NewWithSelector(httpClient, req, sr), nil + } + return prometheus.New(httpClient, req), nil +} + +func (p *Prometheus) initFallbackTypeMatcher(expr []string) (matcher.Matcher, error) { + if len(expr) == 0 { + return nil, nil + } + + m := matcher.FALSE() + + for _, pattern := range expr { + v, err := matcher.NewGlobMatcher(pattern) + if err != nil { + return nil, fmt.Errorf("error on parsing pattern '%s': %v", pattern, err) + } + m = matcher.Or(m, v) + } + + return m, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/4d_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/4d_server.md new file mode 100644 index 00000000000000..94978ed423e545 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/4d_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/4d_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "4D Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# 4D Server + + +<img src="https://netdata.cloud/img/4d_server.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor 4D Server performance metrics for efficient application management and optimization. + + +Metrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/8430ft_modem.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/8430ft_modem.md new file mode 100644 index 00000000000000..3f8debddb9c7f9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/8430ft_modem.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/8430ft_modem.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "8430FT modem" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# 8430FT modem + + +<img src="https://netdata.cloud/img/mtc.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/a10_acos_network_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/a10_acos_network_devices.md new file mode 100644 index 00000000000000..93db7f0e27152e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/a10_acos_network_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/a10_acos_network_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "A10 ACOS network devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# A10 ACOS network devices + + +<img src="https://netdata.cloud/img/a10-networks.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor A10 Networks device metrics for comprehensive management and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/airthings_waveplus_air_sensor.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/airthings_waveplus_air_sensor.md new file mode 100644 index 00000000000000..be9fae0ad2a724 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/airthings_waveplus_air_sensor.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/airthings_waveplus_air_sensor.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Airthings Waveplus air sensor" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Airthings Waveplus air sensor + + +<img src="https://netdata.cloud/img/airthings.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Waveplus radon sensor metrics for efficient indoor air quality monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_edge_dns_traffic.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_edge_dns_traffic.md new file mode 100644 index 00000000000000..d934150028a725 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_edge_dns_traffic.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/akamai_edge_dns_traffic.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Akamai Edge DNS Traffic" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Akamai Edge DNS Traffic + + +<img src="https://netdata.cloud/img/akamai.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track and analyze Akamai Edge DNS traffic for enhanced performance and security. + + +Metrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_global_traffic_management.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_global_traffic_management.md new file mode 100644 index 00000000000000..38acbf0abd9f42 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akamai_global_traffic_management.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/akamai_global_traffic_management.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Akamai Global Traffic Management" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Akamai Global Traffic Management + + +<img src="https://netdata.cloud/img/akamai.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover. + + +Metrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akami_cloudmonitor.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akami_cloudmonitor.md new file mode 100644 index 00000000000000..2953cbdde1a7cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/akami_cloudmonitor.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/akami_cloudmonitor.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Akami Cloudmonitor" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Akami Cloudmonitor + + +<img src="https://netdata.cloud/img/akamai.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management. + + +Metrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alamos_fe2_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alamos_fe2_server.md new file mode 100644 index 00000000000000..3917f61913ef7a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alamos_fe2_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/alamos_fe2_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Alamos FE2 server" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Alamos FE2 server + + +<img src="https://netdata.cloud/img/alamos_fe2.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Alamos FE2 systems for improved performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alibaba_cloud.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alibaba_cloud.md new file mode 100644 index 00000000000000..fc0ded5e9227c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/alibaba_cloud.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/alibaba_cloud.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Alibaba Cloud" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Alibaba Cloud + + +<img src="https://netdata.cloud/img/alibaba-cloud.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Alibaba Cloud services and resources for efficient management and cost optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/altaro_backup.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/altaro_backup.md new file mode 100644 index 00000000000000..bd12eef54093fb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/altaro_backup.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/altaro_backup.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Altaro Backup" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Altaro Backup + + +<img src="https://netdata.cloud/img/altaro.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Altaro Backup performance metrics to ensure smooth data protection and recovery operations. + + +Metrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/amd_cpu_&_gpu.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/amd_cpu_&_gpu.md new file mode 100644 index 00000000000000..5d6471921a1ee0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/amd_cpu_&_gpu.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/amd_cpu_&_gpu.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AMD CPU & GPU" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AMD CPU & GPU + + +<img src="https://netdata.cloud/img/amd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor AMD System Management Interface performance for optimized hardware management. + + +Metrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/andrews_&_arnold_line_status.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/andrews_&_arnold_line_status.md new file mode 100644 index 00000000000000..97e0bbd1a5da98 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/andrews_&_arnold_line_status.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/andrews_&_arnold_line_status.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Andrews & Arnold line status" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Andrews & Arnold line status + + +<img src="https://netdata.cloud/img/andrewsarnold.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_airflow.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_airflow.md new file mode 100644 index 00000000000000..ed1a171aff6d89 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_airflow.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/apache_airflow.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Apache Airflow" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Apache Airflow + + +<img src="https://netdata.cloud/img/airflow.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Apache Airflow metrics to optimize task scheduling and workflow management. + + +Metrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_flink.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_flink.md new file mode 100644 index 00000000000000..ac64e9a2490767 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apache_flink.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/apache_flink.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Apache Flink" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Apache Flink + + +<img src="https://netdata.cloud/img/apache_flink.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Apache Flink metrics for efficient stream processing and application management. + + +Metrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apicast.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apicast.md new file mode 100644 index 00000000000000..0be15112f6d2cd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apicast.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/apicast.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "APIcast" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# APIcast + + +<img src="https://netdata.cloud/img/apicast.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor APIcast performance metrics to optimize API gateway operations and management. + + +Metrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apple_time_machine.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apple_time_machine.md new file mode 100644 index 00000000000000..cac726b194cb5c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/apple_time_machine.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/apple_time_machine.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Apple Time Machine" +learn_status: "Published" +learn_rel_path: "Data Collection/macOS Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Apple Time Machine + + +<img src="https://netdata.cloud/img/apple.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Apple Time Machine backup metrics for efficient data protection and recovery. + + +Metrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arm_hwcpipe.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arm_hwcpipe.md new file mode 100644 index 00000000000000..bbfdfaa7221109 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arm_hwcpipe.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/arm_hwcpipe.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ARM HWCPipe" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ARM HWCPipe + + +<img src="https://netdata.cloud/img/arm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep track of ARM running Android devices and get metrics for efficient performance optimization. + + +Metrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aruba_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aruba_devices.md new file mode 100644 index 00000000000000..f1360738f588e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aruba_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aruba_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Aruba devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Aruba devices + + +<img src="https://netdata.cloud/img/aruba.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Aruba Networks devices performance metrics for comprehensive network management and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arvancloud_cdn.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arvancloud_cdn.md new file mode 100644 index 00000000000000..68e1bdc55eb13a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/arvancloud_cdn.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/arvancloud_cdn.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ArvanCloud CDN" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ArvanCloud CDN + + +<img src="https://netdata.cloud/img/arvancloud.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management. + + +Metrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/audisto.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/audisto.md new file mode 100644 index 00000000000000..35dda7b33a1791 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/audisto.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/audisto.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Audisto" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Audisto + + +<img src="https://netdata.cloud/img/audisto.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Audisto SEO and website metrics for improved search performance and optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/authlog.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/authlog.md new file mode 100644 index 00000000000000..3f771c0c160c3e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/authlog.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/authlog.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AuthLog" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AuthLog + + +<img src="https://netdata.cloud/img/linux.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor authentication logs for security insights and efficient access management. + + +Metrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_compute_instances.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_compute_instances.md new file mode 100644 index 00000000000000..80eddeee435d1f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_compute_instances.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_ec2_compute_instances.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS EC2 Compute instances" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS EC2 Compute instances + + +<img src="https://netdata.cloud/img/aws-ec2.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track AWS EC2 instances key metrics for optimized performance and cost management. + + +Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_spot_instance.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_spot_instance.md new file mode 100644 index 00000000000000..bae99f9aaaed16 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ec2_spot_instance.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_ec2_spot_instance.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS EC2 Spot Instance" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS EC2 Spot Instance + + +<img src="https://netdata.cloud/img/aws-ec2.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization. + + +Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ecs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ecs.md new file mode 100644 index 00000000000000..00ca3dc0b750bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_ecs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_ecs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS ECS" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS ECS + + +<img src="https://netdata.cloud/img/amazon-ecs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on AWS ECS services and resources for optimized container management and orchestration. + + +Metrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_health_events.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_health_events.md new file mode 100644 index 00000000000000..03f8039772a22e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_health_events.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_health_events.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS Health events" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS Health events + + +<img src="https://netdata.cloud/img/aws.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track AWS service health metrics for proactive incident management and resolution. + + +Metrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_instance_health.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_instance_health.md new file mode 100644 index 00000000000000..08ff6239f6ca00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_instance_health.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_instance_health.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS instance health" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS instance health + + +<img src="https://netdata.cloud/img/aws.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor the health of AWS instances for improved performance and availability. + + +Metrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_quota.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_quota.md new file mode 100644 index 00000000000000..4646ec80b6bfad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_quota.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_quota.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS Quota" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS Quota + + +<img src="https://netdata.cloud/img/aws.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor AWS service quotas for effective resource usage and cost management. + + +Metrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_rds.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_rds.md new file mode 100644 index 00000000000000..f383f52bd53565 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_rds.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_rds.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS RDS" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS RDS + + +<img src="https://netdata.cloud/img/aws-rds.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_s3_buckets.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_s3_buckets.md new file mode 100644 index 00000000000000..abbdadac4dfec6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_s3_buckets.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_s3_buckets.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS S3 buckets" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS S3 buckets + + +<img src="https://netdata.cloud/img/aws-s3.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency. + + +Metrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_sqs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_sqs.md new file mode 100644 index 00000000000000..58f888746c5b7c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/aws_sqs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/aws_sqs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "AWS SQS" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# AWS SQS + + +<img src="https://netdata.cloud/img/aws-sqs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track AWS SQS messaging metrics for efficient message processing and queue management. + + +Metrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_ad_app_passwords.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_ad_app_passwords.md new file mode 100644 index 00000000000000..7f4cfcd7c630cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_ad_app_passwords.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_ad_app_passwords.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure AD App passwords" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure AD App passwords + + +<img src="https://netdata.cloud/img/azure.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Safeguard and track Azure App secrets for enhanced security and access management. + + +Metrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_application.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_application.md new file mode 100644 index 00000000000000..4ef23a4142c951 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_application.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_application.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure application" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure application + + +<img src="https://netdata.cloud/img/azure.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Azure Monitor metrics for comprehensive resource management and performance optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_elastic_pool_sql.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_elastic_pool_sql.md new file mode 100644 index 00000000000000..65986c5cb4c946 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_elastic_pool_sql.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_elastic_pool_sql.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure Elastic Pool SQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure Elastic Pool SQL + + +<img src="https://netdata.cloud/img/azure-elastic-sql.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Azure Elastic SQL performance metrics for efficient database management and query optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_resources.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_resources.md new file mode 100644 index 00000000000000..49300906abf7b8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_resources.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_resources.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure Resources" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure Resources + + +<img src="https://netdata.cloud/img/azure.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Azure resources vital metrics for efficient cloud management and cost optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_service_bus.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_service_bus.md new file mode 100644 index 00000000000000..f1b395c336408a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_service_bus.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_service_bus.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure Service Bus" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure Service Bus + + +<img src="https://netdata.cloud/img/azure-service-bus.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Azure Service Bus messaging metrics for optimized communication and integration. + + +Metrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_sql.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_sql.md new file mode 100644 index 00000000000000..9ea9802891cbed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/azure_sql.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/azure_sql.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Azure SQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Azure SQL + + +<img src="https://netdata.cloud/img/azure-sql.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Azure SQL performance metrics for efficient database management and query performance. + + +Metrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bigquery.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bigquery.md new file mode 100644 index 00000000000000..f5a02272b601df --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bigquery.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bigquery.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "BigQuery" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# BigQuery + + +<img src="https://netdata.cloud/img/bigquery.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Google BigQuery metrics for optimized data processing and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bird_routing_daemon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bird_routing_daemon.md new file mode 100644 index 00000000000000..2f68a00fa3348b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bird_routing_daemon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bird_routing_daemon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Bird Routing Daemon" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Bird Routing Daemon + + +<img src="https://netdata.cloud/img/bird.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Bird Routing Daemon metrics for optimized network routing and management. + + +Metrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/blackbox.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/blackbox.md new file mode 100644 index 00000000000000..841645ad43e0e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/blackbox.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/blackbox.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Blackbox" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Blackbox + + +<img src="https://netdata.cloud/img/prometheus.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track external service availability and response times with Blackbox monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bobcat_miner_300.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bobcat_miner_300.md new file mode 100644 index 00000000000000..9935327bd3eace --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bobcat_miner_300.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bobcat_miner_300.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Bobcat Miner 300" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Bobcat Miner 300 + + +<img src="https://netdata.cloud/img/bobcat.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Bobcat equipment metrics for optimized performance and maintenance management. + + +Metrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/borg_backup.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/borg_backup.md new file mode 100644 index 00000000000000..cfaaaee0fb162e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/borg_backup.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/borg_backup.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Borg backup" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Borg backup + + +<img src="https://netdata.cloud/img/borg.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Borg backup performance metrics for efficient data protection and recovery. + + +Metrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bosh.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bosh.md new file mode 100644 index 00000000000000..79af24d5114092 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bosh.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bosh.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "BOSH" +learn_status: "Published" +learn_rel_path: "Data Collection/Provisioning Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# BOSH + + +<img src="https://netdata.cloud/img/bosh.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on BOSH deployment metrics for improved cloud orchestration and resource management. + + +Metrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bpftrace_variables.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bpftrace_variables.md new file mode 100644 index 00000000000000..cf4884e848e380 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bpftrace_variables.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bpftrace_variables.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "bpftrace variables" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# bpftrace variables + + +<img src="https://netdata.cloud/img/bpftrace.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track bpftrace metrics for advanced performance analysis and troubleshooting. + + +Metrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bungeecord.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bungeecord.md new file mode 100644 index 00000000000000..f60a2daaa10013 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/bungeecord.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/bungeecord.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "BungeeCord" +learn_status: "Published" +learn_rel_path: "Data Collection/Gaming" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# BungeeCord + + +<img src="https://netdata.cloud/img/bungee.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track BungeeCord proxy server metrics for efficient load balancing and performance management. + + +Metrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cadvisor.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cadvisor.md new file mode 100644 index 00000000000000..f2d6aaba9d17cf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cadvisor.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cadvisor.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "cAdvisor" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# cAdvisor + + +<img src="https://netdata.cloud/img/cadvisor.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor container resource usage and performance metrics with cAdvisor for efficient container management. + + +Metrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/celery.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/celery.md new file mode 100644 index 00000000000000..de3a10a72f25ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/celery.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/celery.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Celery" +learn_status: "Published" +learn_rel_path: "Data Collection/Task Queues" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Celery + + +<img src="https://netdata.cloud/img/celery.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Celery task queue metrics for optimized task processing and resource management. + + +Metrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/certificate_transparency.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/certificate_transparency.md new file mode 100644 index 00000000000000..185dbbc559f2ec --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/certificate_transparency.md @@ -0,0 +1,292 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/certificate_transparency.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Certificate Transparency" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Certificate Transparency + + +<img src="https://netdata.cloud/img/ct.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track certificate transparency log metrics for enhanced +SSL/TLS certificate management and security. + + +Metrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/checkpoint_device.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/checkpoint_device.md new file mode 100644 index 00000000000000..284fb8775fa267 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/checkpoint_device.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/checkpoint_device.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Checkpoint device" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Checkpoint device + + +<img src="https://netdata.cloud/img/checkpoint.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Check Point firewall and security metrics for enhanced network protection and management. + + +Metrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/chia.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/chia.md new file mode 100644 index 00000000000000..51231b087fa824 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/chia.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/chia.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Chia" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Chia + + +<img src="https://netdata.cloud/img/chia.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Chia blockchain metrics for optimized farming and resource allocation. + + +Metrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md new file mode 100644 index 00000000000000..29f6fcd4ccc3f1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Christ Elektronik CLM5IP power panel" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Christ Elektronik CLM5IP power panel + + +<img src="https://netdata.cloud/img/christelec.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_agent.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_agent.md new file mode 100644 index 00000000000000..72678c161de17e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_agent.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cilium_agent.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cilium Agent" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cilium Agent + + +<img src="https://netdata.cloud/img/cilium.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Cilium Agent metrics for optimized network security and connectivity. + + +Metrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_operator.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_operator.md new file mode 100644 index 00000000000000..36fd328dea3c82 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_operator.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cilium_operator.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cilium Operator" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cilium Operator + + +<img src="https://netdata.cloud/img/cilium.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Cilium Operator metrics for efficient Kubernetes network security management. + + +Metrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_proxy.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_proxy.md new file mode 100644 index 00000000000000..421e17914cfc02 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cilium_proxy.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cilium_proxy.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cilium Proxy" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cilium Proxy + + +<img src="https://netdata.cloud/img/cilium.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Cilium Proxy metrics for enhanced network security and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cisco_aci.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cisco_aci.md new file mode 100644 index 00000000000000..c6dfe188b68141 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cisco_aci.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cisco_aci.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cisco ACI" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cisco ACI + + +<img src="https://netdata.cloud/img/cisco.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Cisco ACI infrastructure metrics for optimized network performance and resource management. + + +Metrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/citrix_netscaler.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/citrix_netscaler.md new file mode 100644 index 00000000000000..aabf12446b9a6f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/citrix_netscaler.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/citrix_netscaler.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Citrix NetScaler" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Citrix NetScaler + + +<img src="https://netdata.cloud/img/citrix.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on NetScaler performance metrics for efficient application delivery and load balancing. + + +Metrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamav_daemon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamav_daemon.md new file mode 100644 index 00000000000000..ae99e3bf2e3499 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamav_daemon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/clamav_daemon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ClamAV daemon" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ClamAV daemon + + +<img src="https://netdata.cloud/img/clamav.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track ClamAV antivirus metrics for enhanced threat detection and management. + + +Metrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamscan_results.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamscan_results.md new file mode 100644 index 00000000000000..42daba173a94fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clamscan_results.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/clamscan_results.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Clamscan results" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Clamscan results + + +<img src="https://netdata.cloud/img/clamav.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor ClamAV scanning performance metrics for efficient malware detection and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clash.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clash.md new file mode 100644 index 00000000000000..2f93c4fd5a0bd6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clash.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/clash.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Clash" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Clash + + +<img src="https://netdata.cloud/img/clash.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Clash proxy server metrics for optimized network performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clickhouse.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clickhouse.md new file mode 100644 index 00000000000000..c55bd2a32cfb7d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clickhouse.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/clickhouse.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ClickHouse" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ClickHouse + + +<img src="https://netdata.cloud/img/clickhouse.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor ClickHouse database metrics for efficient data storage and query performance. + + +Metrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry.md new file mode 100644 index 00000000000000..c2c5811af05b02 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cloud_foundry.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cloud Foundry" +learn_status: "Published" +learn_rel_path: "Data Collection/Provisioning Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cloud Foundry + + +<img src="https://netdata.cloud/img/cloud-foundry.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Cloud Foundry platform metrics for optimized application deployment and management. + + +Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry_firehose.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry_firehose.md new file mode 100644 index 00000000000000..0ae4f22b9fbe50 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloud_foundry_firehose.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cloud_foundry_firehose.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cloud Foundry Firehose" +learn_status: "Published" +learn_rel_path: "Data Collection/Provisioning Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cloud Foundry Firehose + + +<img src="https://netdata.cloud/img/cloud-foundry.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management. + + +Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudflare_pcap.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudflare_pcap.md new file mode 100644 index 00000000000000..75801f82df5713 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudflare_pcap.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cloudflare_pcap.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cloudflare PCAP" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cloudflare PCAP + + +<img src="https://netdata.cloud/img/cloudflare.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection. + + +Metrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudwatch.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudwatch.md new file mode 100644 index 00000000000000..b59d4a482426af --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cloudwatch.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cloudwatch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "CloudWatch" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CloudWatch + + +<img src="https://netdata.cloud/img/aws-cloudwatch.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization. + + +Metrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clustercontrol_cmon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clustercontrol_cmon.md new file mode 100644 index 00000000000000..90c11fe36c7ffd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/clustercontrol_cmon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/clustercontrol_cmon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ClusterControl CMON" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ClusterControl CMON + + +<img src="https://netdata.cloud/img/cluster-control.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations. + + +Metrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/collectd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/collectd.md new file mode 100644 index 00000000000000..1bb2abaa91b1b9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/collectd.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/collectd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Collectd" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Collectd + + +<img src="https://netdata.cloud/img/collectd.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor system and application metrics with Collectd for comprehensive performance analysis. + + +Metrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/concourse.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/concourse.md new file mode 100644 index 00000000000000..f752b3d7ac649d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/concourse.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/concourse.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Concourse" +learn_status: "Published" +learn_rel_path: "Data Collection/CICD Platforms" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Concourse + + +<img src="https://netdata.cloud/img/concourse.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment. + + +Metrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/craftbeerpi.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/craftbeerpi.md new file mode 100644 index 00000000000000..bb5bd96f886da9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/craftbeerpi.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/craftbeerpi.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "CraftBeerPi" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CraftBeerPi + + +<img src="https://netdata.cloud/img/craftbeer.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management. + + +Metrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crowdsec.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crowdsec.md new file mode 100644 index 00000000000000..382b7937e8d950 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crowdsec.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/crowdsec.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Crowdsec" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Crowdsec + + +<img src="https://netdata.cloud/img/crowdsec.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Crowdsec security metrics for efficient threat detection and response. + + +Metrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crypto_exchanges.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crypto_exchanges.md new file mode 100644 index 00000000000000..6393f41da500ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/crypto_exchanges.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/crypto_exchanges.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Crypto exchanges" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Crypto exchanges + + +<img src="https://netdata.cloud/img/crypto.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track cryptocurrency market metrics for informed investment and trading decisions. + + +Metrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cryptowatch.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cryptowatch.md new file mode 100644 index 00000000000000..b918e6404f1b27 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cryptowatch.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cryptowatch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Cryptowatch" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Cryptowatch + + +<img src="https://netdata.cloud/img/cryptowatch.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis. + + +Metrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/csgo.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/csgo.md new file mode 100644 index 00000000000000..edd06408d2ce53 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/csgo.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/csgo.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "CS:GO" +learn_status: "Published" +learn_rel_path: "Data Collection/Gaming" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CS:GO + + +<img src="https://netdata.cloud/img/csgo.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Counter-Strike: Global Offensive server metrics for improved game performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/custom_exporter.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/custom_exporter.md new file mode 100644 index 00000000000000..92342c447be410 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/custom_exporter.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/custom_exporter.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Custom Exporter" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Custom Exporter + + +<img src="https://netdata.cloud/img/customdata.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Create and monitor custom metrics tailored to your specific use case and requirements. + + +Metrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cvmfs_clients.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cvmfs_clients.md new file mode 100644 index 00000000000000..f9c9fb9989aa0a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/cvmfs_clients.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/cvmfs_clients.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "CVMFS clients" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# CVMFS clients + + +<img src="https://netdata.cloud/img/cvmfs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track CernVM File System metrics for optimized distributed file system performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ddwrt_routers.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ddwrt_routers.md new file mode 100644 index 00000000000000..26f5d63a81060f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ddwrt_routers.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ddwrt_routers.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "DDWRT Routers" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DDWRT Routers + + +<img src="https://netdata.cloud/img/ddwrt.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on DD-WRT router metrics for efficient network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_ecs_cluster.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_ecs_cluster.md new file mode 100644 index 00000000000000..aaf2e77a1f1ed1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_ecs_cluster.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dell_emc_ecs_cluster.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dell EMC ECS cluster" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dell EMC ECS cluster + + +<img src="https://netdata.cloud/img/dell.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Dell EMC ECS object storage metrics for optimized storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_isilon_cluster.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_isilon_cluster.md new file mode 100644 index 00000000000000..a0b3eb12b4eff1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_isilon_cluster.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dell_emc_isilon_cluster.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dell EMC Isilon cluster" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dell EMC Isilon cluster + + +<img src="https://netdata.cloud/img/dell.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_xtremio_cluster.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_xtremio_cluster.md new file mode 100644 index 00000000000000..1eff7f558101e2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_emc_xtremio_cluster.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dell_emc_xtremio_cluster.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dell EMC XtremIO cluster" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dell EMC XtremIO cluster + + +<img src="https://netdata.cloud/img/dell.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_powermax.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_powermax.md new file mode 100644 index 00000000000000..82ab63409b30f4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dell_powermax.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dell_powermax.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dell PowerMax" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dell PowerMax + + +<img src="https://netdata.cloud/img/powermax.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Dell EMC PowerMax storage array metrics for efficient storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dependency-track.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dependency-track.md new file mode 100644 index 00000000000000..b766621beb1b01 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dependency-track.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dependency-track.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dependency-Track" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dependency-Track + + +<img src="https://netdata.cloud/img/dependency-track.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis. + + +Metrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/digitalocean.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/digitalocean.md new file mode 100644 index 00000000000000..0e433205d1cd8b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/digitalocean.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/digitalocean.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "DigitalOcean" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DigitalOcean + + +<img src="https://netdata.cloud/img/digitalocean.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track DigitalOcean cloud provider metrics for optimized resource management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/discourse.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/discourse.md new file mode 100644 index 00000000000000..4ec819ec145bd2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/discourse.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/discourse.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Discourse" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Discourse + + +<img src="https://netdata.cloud/img/discourse.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Discourse forum metrics for efficient community management and engagement. + + +Metrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dmarc.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dmarc.md new file mode 100644 index 00000000000000..4f1ba7c27a1463 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dmarc.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dmarc.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "DMARC" +learn_status: "Published" +learn_rel_path: "Data Collection/Mail Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DMARC + + +<img src="https://netdata.cloud/img/dmarc.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track DMARC email authentication metrics for improved email security and deliverability. + + +Metrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dnsbl.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dnsbl.md new file mode 100644 index 00000000000000..a316474621f442 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dnsbl.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dnsbl.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "DNSBL" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# DNSBL + + +<img src="https://netdata.cloud/img/dnsbl.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor DNSBL metrics for efficient domain reputation and security management. + + +Metrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dutch_electricity_smart_meter.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dutch_electricity_smart_meter.md new file mode 100644 index 00000000000000..e37655359e35c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dutch_electricity_smart_meter.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dutch_electricity_smart_meter.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dutch Electricity Smart Meter" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dutch Electricity Smart Meter + + +<img src="https://netdata.cloud/img/dutch-electricity.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dynatrace.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dynatrace.md new file mode 100644 index 00000000000000..102033e17374ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/dynatrace.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/dynatrace.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Dynatrace" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dynatrace + + +<img src="https://netdata.cloud/img/dynatrace.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Dynatrace APM metrics for comprehensive application performance management. + + +Metrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eaton_ups.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eaton_ups.md new file mode 100644 index 00000000000000..c3ebbdb8727827 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eaton_ups.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/eaton_ups.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Eaton UPS" +learn_status: "Published" +learn_rel_path: "Data Collection/UPS" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Eaton UPS + + +<img src="https://netdata.cloud/img/eaton.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/elgato_key_light_devices..md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/elgato_key_light_devices..md new file mode 100644 index 00000000000000..9b0ebd60542f76 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/elgato_key_light_devices..md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/elgato_key_light_devices..md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Elgato Key Light devices." +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Elgato Key Light devices. + + +<img src="https://netdata.cloud/img/elgato.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Elgato Key Light metrics for optimized lighting control and management. + + +Metrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/energomera_smart_power_meters.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/energomera_smart_power_meters.md new file mode 100644 index 00000000000000..2bf3d6b486653e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/energomera_smart_power_meters.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/energomera_smart_power_meters.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Energomera smart power meters" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Energomera smart power meters + + +<img src="https://netdata.cloud/img/energomera.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Energomera electricity meter metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eos.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eos.md new file mode 100644 index 00000000000000..1b697ee5fcc0e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/eos.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/eos.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "EOS" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# EOS + + +<img src="https://netdata.cloud/img/eos.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor CERN EOS metrics for efficient storage management. + + +Metrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/etcd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/etcd.md new file mode 100644 index 00000000000000..799652b01421ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/etcd.md @@ -0,0 +1,287 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/etcd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "etcd" +learn_status: "Published" +learn_rel_path: "Data Collection/Service Discovery / Registry" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# etcd + + +<img src="https://netdata.cloud/img/etcd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track etcd database metrics for optimized distributed key-value store management and performance. + + +Metrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/excel_spreadsheet.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/excel_spreadsheet.md new file mode 100644 index 00000000000000..808b823f980b6f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/excel_spreadsheet.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/excel_spreadsheet.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Excel spreadsheet" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Excel spreadsheet + + +<img src="https://netdata.cloud/img/excel.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Export Prometheus metrics to Excel for versatile data analysis and reporting. + + +Metrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fastd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fastd.md new file mode 100644 index 00000000000000..549eeccc970b9b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fastd.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/fastd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Fastd" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Fastd + + +<img src="https://netdata.cloud/img/fastd.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Fastd VPN metrics for efficient virtual private network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fortigate_firewall.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fortigate_firewall.md new file mode 100644 index 00000000000000..fef9e51653a995 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fortigate_firewall.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/fortigate_firewall.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Fortigate firewall" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Fortigate firewall + + +<img src="https://netdata.cloud/img/fortinet.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Fortigate firewall metrics for enhanced network protection and management. + + +Metrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_nfs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_nfs.md new file mode 100644 index 00000000000000..0975b230bb4b24 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_nfs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/freebsd_nfs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "FreeBSD NFS" +learn_status: "Published" +learn_rel_path: "Data Collection/FreeBSD" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# FreeBSD NFS + + +<img src="https://netdata.cloud/img/freebsd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor FreeBSD Network File System metrics for efficient file sharing management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_rctl-racct.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_rctl-racct.md new file mode 100644 index 00000000000000..923aaafb5b0e2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freebsd_rctl-racct.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/freebsd_rctl-racct.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "FreeBSD RCTL-RACCT" +learn_status: "Published" +learn_rel_path: "Data Collection/FreeBSD" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# FreeBSD RCTL-RACCT + + +<img src="https://netdata.cloud/img/freebsd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on FreeBSD Resource Container metrics for optimized resource management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freifunk_network.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freifunk_network.md new file mode 100644 index 00000000000000..1cc0d25e19ec70 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/freifunk_network.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/freifunk_network.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Freifunk network" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Freifunk network + + +<img src="https://netdata.cloud/img/freifunk.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Freifunk community network metrics for optimized network performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fritzbox_network_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fritzbox_network_devices.md new file mode 100644 index 00000000000000..189ae79421f548 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/fritzbox_network_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/fritzbox_network_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Fritzbox network devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Fritzbox network devices + + +<img src="https://netdata.cloud/img/avm.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track AVM Fritzbox router metrics for efficient home network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/frrouting.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/frrouting.md new file mode 100644 index 00000000000000..06e24f31a329e7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/frrouting.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/frrouting.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "FRRouting" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# FRRouting + + +<img src="https://netdata.cloud/img/frrouting.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Free Range Routing (FRR) metrics for optimized network routing and management. + + +Metrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_gce.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_gce.md new file mode 100644 index 00000000000000..65ddd3d96aad86 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_gce.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gcp_gce.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GCP GCE" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GCP GCE + + +<img src="https://netdata.cloud/img/gcp-gce.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_quota.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_quota.md new file mode 100644 index 00000000000000..fdd865db918f3a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gcp_quota.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gcp_quota.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GCP Quota" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GCP Quota + + +<img src="https://netdata.cloud/img/gcp.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Google Cloud Platform quota metrics for optimized resource usage and cost management. + + +Metrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_command_line_output.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_command_line_output.md new file mode 100644 index 00000000000000..eb4ac5c654a583 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_command_line_output.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/generic_command_line_output.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Generic Command Line Output" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Generic Command Line Output + + +<img src="https://netdata.cloud/img/cli.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track custom command line output metrics for tailored monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_storage_enclosure_tool.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_storage_enclosure_tool.md new file mode 100644 index 00000000000000..4ca57bdf652e58 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/generic_storage_enclosure_tool.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/generic_storage_enclosure_tool.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Generic storage enclosure tool" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Generic storage enclosure tool + + +<img src="https://netdata.cloud/img/storage-enclosure.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor storage enclosure metrics for efficient storage device management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_api_rate_limit.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_api_rate_limit.md new file mode 100644 index 00000000000000..875d184d52cd8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_api_rate_limit.md @@ -0,0 +1,292 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/github_api_rate_limit.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GitHub API rate limit" +learn_status: "Published" +learn_rel_path: "Data Collection/Other" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GitHub API rate limit + + +<img src="https://netdata.cloud/img/github.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor GitHub API rate limit metrics for efficient +API usage and management. + + +Metrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_repository.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_repository.md new file mode 100644 index 00000000000000..a72b8c406bb52f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/github_repository.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/github_repository.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GitHub repository" +learn_status: "Published" +learn_rel_path: "Data Collection/Other" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GitHub repository + + +<img src="https://netdata.cloud/img/github.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track GitHub repository metrics for optimized project and user analytics monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gitlab_runner.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gitlab_runner.md new file mode 100644 index 00000000000000..d9ed2fe8a87e47 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gitlab_runner.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gitlab_runner.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GitLab Runner" +learn_status: "Published" +learn_rel_path: "Data Collection/CICD Platforms" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GitLab Runner + + +<img src="https://netdata.cloud/img/gitlab.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on GitLab CI/CD job metrics for efficient development and deployment management. + + +Metrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gobetween.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gobetween.md new file mode 100644 index 00000000000000..0342e77f6ff780 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gobetween.md @@ -0,0 +1,287 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gobetween.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Gobetween" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Gobetween + + +<img src="https://netdata.cloud/img/gobetween.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Gobetween load balancer metrics for optimized network traffic management and performance. + + +Metrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_cloud_platform.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_cloud_platform.md new file mode 100644 index 00000000000000..86590f20aff0b7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_cloud_platform.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/google_cloud_platform.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Google Cloud Platform" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Google Cloud Platform + + +<img src="https://netdata.cloud/img/gcp.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization. + + +Metrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_pagespeed.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_pagespeed.md new file mode 100644 index 00000000000000..348392ff037a92 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_pagespeed.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/google_pagespeed.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Google Pagespeed" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Google Pagespeed + + +<img src="https://netdata.cloud/img/google.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_stackdriver.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_stackdriver.md new file mode 100644 index 00000000000000..a7a2a2541cf41d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/google_stackdriver.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/google_stackdriver.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Google Stackdriver" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Google Stackdriver + + +<img src="https://netdata.cloud/img/gcp-stackdriver.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gpsd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gpsd.md new file mode 100644 index 00000000000000..c3d2bf0bb442fb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gpsd.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gpsd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "gpsd" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# gpsd + + +<img src="https://netdata.cloud/img/gpsd.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor GPSD (GPS daemon) metrics for efficient GPS data management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/grafana.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/grafana.md new file mode 100644 index 00000000000000..430dabc879f13b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/grafana.md @@ -0,0 +1,287 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/grafana.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Grafana" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Grafana + + +<img src="https://netdata.cloud/img/grafana.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis. + + +Metrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/graylog_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/graylog_server.md new file mode 100644 index 00000000000000..6abcf567b9ba7d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/graylog_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/graylog_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Graylog Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Graylog Server + + +<img src="https://netdata.cloud/img/graylog.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Graylog server metrics for efficient log management and analysis. + + +Metrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gtp.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gtp.md new file mode 100644 index 00000000000000..cd1f0765e21cc0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/gtp.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/gtp.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "GTP" +learn_status: "Published" +learn_rel_path: "Data Collection/Telephony Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# GTP + + +<img src="https://netdata.cloud/img/gtpu.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance. + + +Metrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/halon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/halon.md new file mode 100644 index 00000000000000..c71f99568ac298 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/halon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/halon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Halon" +learn_status: "Published" +learn_rel_path: "Data Collection/Mail Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Halon + + +<img src="https://netdata.cloud/img/halon.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Halon email security and delivery metrics for optimized email management and protection. + + +Metrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hana.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hana.md new file mode 100644 index 00000000000000..dd72aa97b933b5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hana.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hana.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "HANA" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HANA + + +<img src="https://netdata.cloud/img/sap.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track SAP HANA database metrics for efficient data storage and query performance. + + +Metrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hashicorp_vault_secrets.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hashicorp_vault_secrets.md new file mode 100644 index 00000000000000..9dd68d7d4ccc38 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hashicorp_vault_secrets.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hashicorp_vault_secrets.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "HashiCorp Vault secrets" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HashiCorp Vault secrets + + +<img src="https://netdata.cloud/img/vault.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track HashiCorp Vault security assessment metrics for efficient secrets management and security. + + +Metrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hasura_graphql_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hasura_graphql_server.md new file mode 100644 index 00000000000000..b122d46d69b820 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hasura_graphql_server.md @@ -0,0 +1,292 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hasura_graphql_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Hasura GraphQL Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Hasura GraphQL Server + + +<img src="https://netdata.cloud/img/hasura.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Hasura GraphQL engine metrics for optimized +API performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hdsentinel.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hdsentinel.md new file mode 100644 index 00000000000000..5bf41c8afc03d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hdsentinel.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hdsentinel.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "HDSentinel" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HDSentinel + + +<img src="https://netdata.cloud/img/harddisk.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_hotspot.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_hotspot.md new file mode 100644 index 00000000000000..19bd1698093974 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_hotspot.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/helium_hotspot.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Helium hotspot" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Helium hotspot + + +<img src="https://netdata.cloud/img/helium.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Helium hotspot metrics for optimized LoRaWAN network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_miner_validator.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_miner_validator.md new file mode 100644 index 00000000000000..65dc7b75c3701b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/helium_miner_validator.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/helium_miner_validator.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Helium miner (validator)" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Helium miner (validator) + + +<img src="https://netdata.cloud/img/helium.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Helium miner and validator metrics for efficient blockchain performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hhvm.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hhvm.md new file mode 100644 index 00000000000000..86de2bea4d9f7c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hhvm.md @@ -0,0 +1,292 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hhvm.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "HHVM" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HHVM + + +<img src="https://netdata.cloud/img/hhvm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor HipHop Virtual Machine metrics for efficient +PHP execution and performance. + + +Metrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_cgn_series_cpe.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_cgn_series_cpe.md new file mode 100644 index 00000000000000..72e7504bb998d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_cgn_series_cpe.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hitron_cgn_series_cpe.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Hitron CGN series CPE" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Hitron CGN series CPE + + +<img src="https://netdata.cloud/img/hitron.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Hitron CGNV4 gateway metrics for efficient network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_coda_cable_modem.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_coda_cable_modem.md new file mode 100644 index 00000000000000..1a095d4ae22fb0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hitron_coda_cable_modem.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hitron_coda_cable_modem.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Hitron CODA Cable Modem" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Hitron CODA Cable Modem + + +<img src="https://netdata.cloud/img/hitron.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Hitron CODA cable modem metrics for optimized internet connectivity and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homebridge.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homebridge.md new file mode 100644 index 00000000000000..40ee07c948b5be --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homebridge.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/homebridge.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Homebridge" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Homebridge + + +<img src="https://netdata.cloud/img/homebridge.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Homebridge smart home metrics for efficient home automation management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homey.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homey.md new file mode 100644 index 00000000000000..382b562ce767d9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/homey.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/homey.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Homey" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Homey + + +<img src="https://netdata.cloud/img/homey.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Homey smart home controller metrics for efficient home automation and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/honeypot.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/honeypot.md new file mode 100644 index 00000000000000..d9ba8ffb18c69e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/honeypot.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/honeypot.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Honeypot" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Honeypot + + +<img src="https://netdata.cloud/img/intrinsec.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor honeypot metrics for efficient threat detection and management. + + +Metrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hp_ilo.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hp_ilo.md new file mode 100644 index 00000000000000..b104e7b6a600ec --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hp_ilo.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hp_ilo.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "HP iLO" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HP iLO + + +<img src="https://netdata.cloud/img/hp.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/huawei_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/huawei_devices.md new file mode 100644 index 00000000000000..b20dd91fecdc9a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/huawei_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/huawei_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Huawei devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Huawei devices + + +<img src="https://netdata.cloud/img/huawei.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Huawei HiLink device metrics for optimized connectivity and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hubble.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hubble.md new file mode 100644 index 00000000000000..e4e1f65e0f2c6e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/hubble.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/hubble.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Hubble" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Hubble + + +<img src="https://netdata.cloud/img/hubble.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Hubble network observability metrics for efficient network visibility and management. + + +Metrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure built-in Prometheus exporter + +To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_aix_systems_njmon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_aix_systems_njmon.md new file mode 100644 index 00000000000000..be807c36944eef --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_aix_systems_njmon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_aix_systems_njmon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM AIX systems Njmon" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM AIX systems Njmon + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md new file mode 100644 index 00000000000000..fa9839564d5092 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM CryptoExpress (CEX) cards" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM CryptoExpress (CEX) cards + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track IBM Z Crypto Express device metrics for optimized cryptographic performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_mq.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_mq.md new file mode 100644 index 00000000000000..019b5b3a8ab878 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_mq.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_mq.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM MQ" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM MQ + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on IBM MQ message queue metrics for efficient message transport and performance. + + +Metrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum.md new file mode 100644 index 00000000000000..0e525e0c05fae0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_spectrum.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM Spectrum" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM Spectrum + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor IBM Spectrum storage metrics for efficient data management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum_virtualize.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum_virtualize.md new file mode 100644 index 00000000000000..8225ee08ee75a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_spectrum_virtualize.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_spectrum_virtualize.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM Spectrum Virtualize" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM Spectrum Virtualize + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance. + + +Metrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_z_hardware_management_console.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_z_hardware_management_console.md new file mode 100644 index 00000000000000..5086547beb2412 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ibm_z_hardware_management_console.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ibm_z_hardware_management_console.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IBM Z Hardware Management Console" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IBM Z Hardware Management Console + + +<img src="https://netdata.cloud/img/ibm.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/influxdb.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/influxdb.md new file mode 100644 index 00000000000000..2ab8d67a1b109e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/influxdb.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/influxdb.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "InfluxDB" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# InfluxDB + + +<img src="https://netdata.cloud/img/influxdb.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor InfluxDB time-series database metrics for efficient data storage and query performance. + + +Metrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iota_full_node.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iota_full_node.md new file mode 100644 index 00000000000000..59addee3712ed6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iota_full_node.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/iota_full_node.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IOTA full node" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IOTA full node + + +<img src="https://netdata.cloud/img/iota.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ipmi_by_soundcloud.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ipmi_by_soundcloud.md new file mode 100644 index 00000000000000..73391ca8ac9aa9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ipmi_by_soundcloud.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ipmi_by_soundcloud.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "IPMI (By SoundCloud)" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# IPMI (By SoundCloud) + + +<img src="https://netdata.cloud/img/soundcloud.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor IPMI metrics externally for efficient server hardware management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md new file mode 100644 index 00000000000000..a70489cc6b371f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "iqAir AirVisual air quality monitors" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# iqAir AirVisual air quality monitors + + +<img src="https://netdata.cloud/img/iqair.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor air quality data from IQAir devices for efficient environmental monitoring and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jarvis_standing_desk.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jarvis_standing_desk.md new file mode 100644 index 00000000000000..92d8de46f7d5e2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jarvis_standing_desk.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/jarvis_standing_desk.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Jarvis Standing Desk" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Jarvis Standing Desk + + +<img src="https://netdata.cloud/img/jarvis.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Jarvis standing desk usage metrics for efficient workspace ergonomics and management. + + +Metrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jenkins.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jenkins.md new file mode 100644 index 00000000000000..7ca90f97162ac5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jenkins.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/jenkins.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Jenkins" +learn_status: "Published" +learn_rel_path: "Data Collection/CICD Platforms" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Jenkins + + +<img src="https://netdata.cloud/img/jenkins.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Jenkins continuous integration server metrics for efficient development and build management. + + +Metrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jetbrains_floating_license_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jetbrains_floating_license_server.md new file mode 100644 index 00000000000000..b26434ffdc3e1c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jetbrains_floating_license_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/jetbrains_floating_license_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "JetBrains Floating License Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# JetBrains Floating License Server + + +<img src="https://netdata.cloud/img/jetbrains.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor JetBrains floating license server metrics for efficient software licensing management. + + +Metrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jmx.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jmx.md new file mode 100644 index 00000000000000..856b5b8071232f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jmx.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/jmx.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "JMX" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# JMX + + +<img src="https://netdata.cloud/img/java.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Java Management Extensions (JMX) metrics for efficient Java application management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jolokia.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jolokia.md new file mode 100644 index 00000000000000..5db34e647cca17 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/jolokia.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/jolokia.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "jolokia" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# jolokia + + +<img src="https://netdata.cloud/img/jolokia.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Jolokia JVM metrics for optimized Java application performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/journald.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/journald.md new file mode 100644 index 00000000000000..9bb7634f612a88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/journald.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/journald.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "journald" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# journald + + +<img src="https://netdata.cloud/img/linux.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on systemd-journald metrics for efficient log management and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka.md new file mode 100644 index 00000000000000..fbb10b0f9410ec --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kafka.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kafka" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kafka + + +<img src="https://netdata.cloud/img/kafka.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Kafka message queue metrics for optimized data streaming and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_connect.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_connect.md new file mode 100644 index 00000000000000..17acb1d72da761 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_connect.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kafka_connect.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kafka Connect" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kafka Connect + + +<img src="https://netdata.cloud/img/kafka.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Kafka Connect metrics for efficient data streaming and integration. + + +Metrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_consumer_lag.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_consumer_lag.md new file mode 100644 index 00000000000000..7ef30d27223312 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_consumer_lag.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kafka_consumer_lag.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kafka Consumer Lag" +learn_status: "Published" +learn_rel_path: "Data Collection/Service Discovery / Registry" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kafka Consumer Lag + + +<img src="https://netdata.cloud/img/kafka.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Kafka consumer lag metrics for efficient message queue management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_zookeeper.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_zookeeper.md new file mode 100644 index 00000000000000..2d438cede50e38 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kafka_zookeeper.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kafka_zookeeper.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kafka ZooKeeper" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kafka ZooKeeper + + +<img src="https://netdata.cloud/img/kafka.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Kafka ZooKeeper metrics for optimized distributed coordination and management. + + +Metrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kannel.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kannel.md new file mode 100644 index 00000000000000..ad45ba87b38bff --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kannel.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kannel.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kannel" +learn_status: "Published" +learn_rel_path: "Data Collection/Telephony Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kannel + + +<img src="https://netdata.cloud/img/kannel.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/keepalived.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/keepalived.md new file mode 100644 index 00000000000000..9239ba68f4d5bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/keepalived.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/keepalived.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Keepalived" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Keepalived + + +<img src="https://netdata.cloud/img/keepalived.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Keepalived metrics for efficient high-availability and load balancing management. + + +Metrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md new file mode 100644 index 00000000000000..4db075620cdf38 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Kubernetes Cluster Cloud Cost" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Kubernetes Cluster Cloud Cost + + +<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting. + + +Metrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lagerist_disk_latency.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lagerist_disk_latency.md new file mode 100644 index 00000000000000..55fc13f87e9ef6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lagerist_disk_latency.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/lagerist_disk_latency.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Lagerist Disk latency" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Lagerist Disk latency + + +<img src="https://netdata.cloud/img/linux.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track disk latency metrics for efficient storage performance and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ldap.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ldap.md new file mode 100644 index 00000000000000..684d4b475b3a4e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ldap.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ldap.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "LDAP" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# LDAP + + +<img src="https://netdata.cloud/img/ldap.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/linode.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/linode.md new file mode 100644 index 00000000000000..bfeb88ac60594c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/linode.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/linode.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Linode" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Linode + + +<img src="https://netdata.cloud/img/linode.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Linode cloud hosting metrics for efficient virtual server management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/loki.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/loki.md new file mode 100644 index 00000000000000..15157c5d6b47e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/loki.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/loki.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "loki" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# loki + + +<img src="https://netdata.cloud/img/loki.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Loki metrics. + + +Metrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Loki + +Install [loki](https://github.com/grafana/loki) according to its documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lustre_metadata.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lustre_metadata.md new file mode 100644 index 00000000000000..0052a3c4141395 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lustre_metadata.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/lustre_metadata.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Lustre metadata" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Lustre metadata + + +<img src="https://netdata.cloud/img/lustre.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Lustre clustered file system for efficient management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lynis_audit_reports.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lynis_audit_reports.md new file mode 100644 index 00000000000000..79aa29449b98e9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/lynis_audit_reports.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/lynis_audit_reports.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Lynis audit reports" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Lynis audit reports + + +<img src="https://netdata.cloud/img/lynis.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Lynis security auditing tool metrics for efficient system security and compliance management. + + +Metrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/machbase.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/machbase.md new file mode 100644 index 00000000000000..317ecd56b4011d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/machbase.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/machbase.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Machbase" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Machbase + + +<img src="https://netdata.cloud/img/machbase.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Machbase time-series database metrics for efficient data storage and query performance. + + +Metrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/maildir.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/maildir.md new file mode 100644 index 00000000000000..aaf5d6be6ee7a6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/maildir.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/maildir.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Maildir" +learn_status: "Published" +learn_rel_path: "Data Collection/Mail Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Maildir + + +<img src="https://netdata.cloud/img/mailserver.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track mail server metrics for optimized email management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meilisearch.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meilisearch.md new file mode 100644 index 00000000000000..39bda87288fbb3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meilisearch.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/meilisearch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Meilisearch" +learn_status: "Published" +learn_rel_path: "Data Collection/Search Engines" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Meilisearch + + +<img src="https://netdata.cloud/img/meilisearch.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Meilisearch search engine metrics for efficient search performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/memcached_community.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/memcached_community.md new file mode 100644 index 00000000000000..b1e818f5efb731 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/memcached_community.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/memcached_community.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Memcached (community)" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Memcached (community) + + +<img src="https://netdata.cloud/img/memcached.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Memcached in-memory key-value store metrics for efficient caching performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meraki_dashboard.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meraki_dashboard.md new file mode 100644 index 00000000000000..b45bf09f65f348 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/meraki_dashboard.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/meraki_dashboard.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Meraki dashboard" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Meraki dashboard + + +<img src="https://netdata.cloud/img/meraki.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mesos.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mesos.md new file mode 100644 index 00000000000000..5c69b15bd9008a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mesos.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mesos.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Mesos" +learn_status: "Published" +learn_rel_path: "Data Collection/Task Queues" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Mesos + + +<img src="https://netdata.cloud/img/mesos.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Apache Mesos cluster manager metrics for efficient resource management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_devices.md new file mode 100644 index 00000000000000..1b8b9ccfe49c77 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mikrotik_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "MikroTik devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MikroTik devices + + +<img src="https://netdata.cloud/img/mikrotik.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on MikroTik RouterOS metrics for efficient network device management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_routeros_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_routeros_devices.md new file mode 100644 index 00000000000000..b690ba896341a8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mikrotik_routeros_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mikrotik_routeros_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Mikrotik RouterOS devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Mikrotik RouterOS devices + + +<img src="https://netdata.cloud/img/routeros.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track MikroTik RouterOS metrics for efficient network device management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/minecraft.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/minecraft.md new file mode 100644 index 00000000000000..bda5244604ab74 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/minecraft.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/minecraft.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Minecraft" +learn_status: "Published" +learn_rel_path: "Data Collection/Gaming" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Minecraft + + +<img src="https://netdata.cloud/img/minecraft.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Minecraft server metrics for efficient game server management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/modbus_protocol.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/modbus_protocol.md new file mode 100644 index 00000000000000..f36eaa20b507db --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/modbus_protocol.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/modbus_protocol.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Modbus protocol" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Modbus protocol + + +<img src="https://netdata.cloud/img/modbus.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Modbus RTU protocol metrics for efficient industrial automation and control performance. + + +Metrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mogilefs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mogilefs.md new file mode 100644 index 00000000000000..51d3eb4cce75de --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mogilefs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mogilefs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "MogileFS" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MogileFS + + +<img src="https://netdata.cloud/img/filesystem.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor MogileFS distributed file system metrics for efficient storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/monnit_sensors_mqtt.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/monnit_sensors_mqtt.md new file mode 100644 index 00000000000000..8aa3440fc6ee85 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/monnit_sensors_mqtt.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/monnit_sensors_mqtt.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Monnit Sensors MQTT" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Monnit Sensors MQTT + + +<img src="https://netdata.cloud/img/monnit.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Monnit sensor data via MQTT for efficient IoT device monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mosquitto.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mosquitto.md new file mode 100644 index 00000000000000..aae998ca708c85 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mosquitto.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mosquitto.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "mosquitto" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# mosquitto + + +<img src="https://netdata.cloud/img/mosquitto.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance. + + +Metrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mp707_usb_thermometer.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mp707_usb_thermometer.md new file mode 100644 index 00000000000000..7d32cb8b0ce6f3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mp707_usb_thermometer.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mp707_usb_thermometer.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "MP707 USB thermometer" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MP707 USB thermometer + + +<img src="https://netdata.cloud/img/thermometer.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track MP707 power strip metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mqtt_blackbox.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mqtt_blackbox.md new file mode 100644 index 00000000000000..5af1e3e6be860e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mqtt_blackbox.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mqtt_blackbox.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "MQTT Blackbox" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MQTT Blackbox + + +<img src="https://netdata.cloud/img/mqtt.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track MQTT message transport performance using blackbox testing methods. + + +Metrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mtail.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mtail.md new file mode 100644 index 00000000000000..afff635f2c1f93 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/mtail.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/mtail.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "mtail" +learn_status: "Published" +learn_rel_path: "Data Collection/Logs Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# mtail + + +<img src="https://netdata.cloud/img/mtail.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor log data metrics using mtail log data extractor and parser. + + +Metrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/naemon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/naemon.md new file mode 100644 index 00000000000000..8b88353ba70074 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/naemon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/naemon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Naemon" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Naemon + + +<img src="https://netdata.cloud/img/naemon.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nagios.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nagios.md new file mode 100644 index 00000000000000..5d8ccb15fa6a5a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nagios.md @@ -0,0 +1,292 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nagios.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Nagios" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Nagios + + +<img src="https://netdata.cloud/img/nagios.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Nagios network monitoring metrics for efficient +IT infrastructure management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nature_remo_e_lite_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nature_remo_e_lite_devices.md new file mode 100644 index 00000000000000..2d4d3d96e31cce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nature_remo_e_lite_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nature_remo_e_lite_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Nature Remo E lite devices" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Nature Remo E lite devices + + +<img src="https://netdata.cloud/img/nature-remo.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Nature Remo E series smart home device metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_ontap_api.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_ontap_api.md new file mode 100644 index 00000000000000..8c75730d1fee14 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_ontap_api.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/netapp_ontap_api.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Netapp ONTAP API" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Netapp ONTAP API + + +<img src="https://netdata.cloud/img/netapp.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_solidfire.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_solidfire.md new file mode 100644 index 00000000000000..165478a5a0477e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netapp_solidfire.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/netapp_solidfire.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NetApp Solidfire" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NetApp Solidfire + + +<img src="https://netdata.cloud/img/netapp.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track NetApp Solidfire storage system metrics for efficient data storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netatmo_sensors.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netatmo_sensors.md new file mode 100644 index 00000000000000..29c3af344211ad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netatmo_sensors.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/netatmo_sensors.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Netatmo sensors" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Netatmo sensors + + +<img src="https://netdata.cloud/img/netatmo.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Netatmo smart home device metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netflow.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netflow.md new file mode 100644 index 00000000000000..5f9efadd9862b6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netflow.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/netflow.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NetFlow" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NetFlow + + +<img src="https://netdata.cloud/img/netflow.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track NetFlow network traffic metrics for efficient network monitoring and performance. + + +Metrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netmeter.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netmeter.md new file mode 100644 index 00000000000000..7927de5244b778 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/netmeter.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/netmeter.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NetMeter" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NetMeter + + +<img src="https://netdata.cloud/img/netmeter.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor NetMeter network traffic metrics for efficient network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/new_relic.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/new_relic.md new file mode 100644 index 00000000000000..5bd56c886cfad5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/new_relic.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/new_relic.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "New Relic" +learn_status: "Published" +learn_rel_path: "Data Collection/Observability" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# New Relic + + +<img src="https://netdata.cloud/img/newrelic.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor New Relic application performance management metrics for efficient application monitoring and performance. + + +Metrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextcloud_servers.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextcloud_servers.md new file mode 100644 index 00000000000000..bdac74c2ed0a07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextcloud_servers.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nextcloud_servers.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Nextcloud servers" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Nextcloud servers + + +<img src="https://netdata.cloud/img/nextcloud.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Nextcloud cloud storage metrics for efficient file hosting and management. + + +Metrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextdns.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextdns.md new file mode 100644 index 00000000000000..11b272fdb45d12 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nextdns.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nextdns.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NextDNS" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NextDNS + + +<img src="https://netdata.cloud/img/nextdns.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track NextDNS DNS resolver and security platform metrics for efficient DNS management and security. + + +Metrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nftables.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nftables.md new file mode 100644 index 00000000000000..3f3bcc52feec3b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nftables.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nftables.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "nftables" +learn_status: "Published" +learn_rel_path: "Data Collection/Linux Systems/Firewall" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# nftables + + +<img src="https://netdata.cloud/img/nftables.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor nftables firewall metrics for efficient network security and management. + + +Metrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nrpe_daemon.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nrpe_daemon.md new file mode 100644 index 00000000000000..790fe94612c65e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nrpe_daemon.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nrpe_daemon.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NRPE daemon" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NRPE daemon + + +<img src="https://netdata.cloud/img/nrpelinux.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nsx-t.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nsx-t.md new file mode 100644 index 00000000000000..83baf897e768d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nsx-t.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nsx-t.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NSX-T" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NSX-T + + +<img src="https://netdata.cloud/img/vmware-nsx.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track VMware NSX-T software-defined networking metrics for efficient network virtualization and security management. + + +Metrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nvml.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nvml.md new file mode 100644 index 00000000000000..d669d5c91ede5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/nvml.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/nvml.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "NVML" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NVML + + +<img src="https://netdata.cloud/img/nvidia.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/obs_studio.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/obs_studio.md new file mode 100644 index 00000000000000..b463c731d1f863 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/obs_studio.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/obs_studio.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OBS Studio" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OBS Studio + + +<img src="https://netdata.cloud/img/obs-studio.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track OBS Studio live streaming and recording software metrics for efficient video production and performance. + + +Metrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/odbc.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/odbc.md new file mode 100644 index 00000000000000..8bd59e66bf8e23 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/odbc.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/odbc.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ODBC" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ODBC + + +<img src="https://netdata.cloud/img/odbc.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance. + + +Metrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/open_vswitch.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/open_vswitch.md new file mode 100644 index 00000000000000..7b97c637921354 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/open_vswitch.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/open_vswitch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Open vSwitch" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Open vSwitch + + +<img src="https://netdata.cloud/img/ovs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openhab.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openhab.md new file mode 100644 index 00000000000000..4fb5a5190b619d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openhab.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openhab.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenHAB" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenHAB + + +<img src="https://netdata.cloud/img/openhab.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track openHAB smart home automation system metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openldap_community.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openldap_community.md new file mode 100644 index 00000000000000..1f0592e5edbb78 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openldap_community.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openldap_community.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenLDAP (community)" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenLDAP (community) + + +<img src="https://netdata.cloud/img/openldap.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor OpenLDAP directory service metrics for efficient directory management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrc.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrc.md new file mode 100644 index 00000000000000..b3a3a1262e5970 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrc.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openrc.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenRC" +learn_status: "Published" +learn_rel_path: "Data Collection/Linux Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenRC + + +<img src="https://netdata.cloud/img/linux.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on OpenRC init system metrics for efficient system startup and service management. + + +Metrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrct2.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrct2.md new file mode 100644 index 00000000000000..9ed5224f00ab40 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openrct2.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openrct2.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenRCT2" +learn_status: "Published" +learn_rel_path: "Data Collection/Gaming" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenRCT2 + + +<img src="https://netdata.cloud/img/openRCT2.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track OpenRCT2 game metrics for efficient game server management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openroadm_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openroadm_devices.md new file mode 100644 index 00000000000000..9df8a044284f81 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openroadm_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openroadm_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenROADM devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenROADM devices + + +<img src="https://netdata.cloud/img/openroadm.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openstack.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openstack.md new file mode 100644 index 00000000000000..f70896bd365d24 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openstack.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openstack.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenStack" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenStack + + +<img src="https://netdata.cloud/img/openstack.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track OpenStack cloud computing platform metrics for efficient infrastructure management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openvas.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openvas.md new file mode 100644 index 00000000000000..99ab8594bc8e6b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openvas.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openvas.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenVAS" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenVAS + + +<img src="https://netdata.cloud/img/openVAS.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor OpenVAS vulnerability scanner metrics for efficient security assessment and management. + + +Metrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openweathermap.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openweathermap.md new file mode 100644 index 00000000000000..793c57d7633145 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/openweathermap.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/openweathermap.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OpenWeatherMap" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OpenWeatherMap + + +<img src="https://netdata.cloud/img/openweather.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/oracle_db_community.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/oracle_db_community.md new file mode 100644 index 00000000000000..7aa4e9e2424b07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/oracle_db_community.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/oracle_db_community.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Oracle DB (community)" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Oracle DB (community) + + +<img src="https://netdata.cloud/img/oracle.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Oracle Database metrics for efficient database management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/otrs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/otrs.md new file mode 100644 index 00000000000000..0b4acd3d03381d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/otrs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/otrs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "OTRS" +learn_status: "Published" +learn_rel_path: "Data Collection/Incident Management" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# OTRS + + +<img src="https://netdata.cloud/img/otrs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/patroni.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/patroni.md new file mode 100644 index 00000000000000..ad3188e86198d9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/patroni.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/patroni.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Patroni" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Patroni + + +<img src="https://netdata.cloud/img/patroni.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/personal_weather_station.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/personal_weather_station.md new file mode 100644 index 00000000000000..e241b267c4558a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/personal_weather_station.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/personal_weather_station.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Personal Weather Station" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Personal Weather Station + + +<img src="https://netdata.cloud/img/wunderground.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track personal weather station metrics for efficient weather monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgbackrest.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgbackrest.md new file mode 100644 index 00000000000000..ffa34d70cb0473 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgbackrest.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/pgbackrest.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "pgBackRest" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# pgBackRest + + +<img src="https://netdata.cloud/img/pgbackrest.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor pgBackRest PostgreSQL backup metrics for efficient database backup and management. + + +Metrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgpool-ii.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgpool-ii.md new file mode 100644 index 00000000000000..9e0a87f67fe572 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pgpool-ii.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/pgpool-ii.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Pgpool-II" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Pgpool-II + + +<img src="https://netdata.cloud/img/pgpool2.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/philips_hue.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/philips_hue.md new file mode 100644 index 00000000000000..0acfb26baf19e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/philips_hue.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/philips_hue.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Philips Hue" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Philips Hue + + +<img src="https://netdata.cloud/img/hue.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pimoroni_enviro+.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pimoroni_enviro+.md new file mode 100644 index 00000000000000..485cbfc13236b9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pimoroni_enviro+.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/pimoroni_enviro+.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Pimoroni Enviro+" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Pimoroni Enviro+ + + +<img src="https://netdata.cloud/img/pimorino.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis. + + +Metrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pingdom.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pingdom.md new file mode 100644 index 00000000000000..c3d2a0426209fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/pingdom.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/pingdom.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Pingdom" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Pingdom + + +<img src="https://netdata.cloud/img/solarwinds.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics. + + +Metrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/podman.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/podman.md new file mode 100644 index 00000000000000..886da11c31ea36 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/podman.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/podman.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Podman" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Podman + + +<img src="https://netdata.cloud/img/podman.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Podman container runtime metrics for efficient container management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/powerpal_devices.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/powerpal_devices.md new file mode 100644 index 00000000000000..3808a6b00bbc07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/powerpal_devices.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/powerpal_devices.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Powerpal devices" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Powerpal devices + + +<img src="https://netdata.cloud/img/powerpal.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Powerpal smart meter metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proftpd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proftpd.md new file mode 100644 index 00000000000000..fcd270a27a7daa --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proftpd.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/proftpd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ProFTPD" +learn_status: "Published" +learn_rel_path: "Data Collection/FTP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ProFTPD + + +<img src="https://netdata.cloud/img/proftpd.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor ProFTPD FTP server metrics for efficient file transfer and server performance. + + +Metrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/prometheus_endpoint.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/prometheus_endpoint.md new file mode 100644 index 00000000000000..6a83ae576a9b4c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/prometheus_endpoint.md @@ -0,0 +1,287 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/prometheus_endpoint.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Prometheus endpoint" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Prometheus endpoint + + +<img src="https://netdata.cloud/img/prometheus.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints. + + +It collects metrics by periodically sending HTTP requests to the target instance. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proxmox_ve.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proxmox_ve.md new file mode 100644 index 00000000000000..9f8caadcfe83cf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/proxmox_ve.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/proxmox_ve.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Proxmox VE" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Proxmox VE + + +<img src="https://netdata.cloud/img/proxmox.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management. + + +Metrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radio_thermostat.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radio_thermostat.md new file mode 100644 index 00000000000000..4b137662e1625f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radio_thermostat.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/radio_thermostat.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Radio Thermostat" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Radio Thermostat + + +<img src="https://netdata.cloud/img/radiots.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radius.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radius.md new file mode 100644 index 00000000000000..b21eb3e3935884 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/radius.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/radius.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "RADIUS" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# RADIUS + + +<img src="https://netdata.cloud/img/radius.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management. + + +Metrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/rancher.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/rancher.md new file mode 100644 index 00000000000000..98bc9ed8d36c13 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/rancher.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/rancher.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Rancher" +learn_status: "Published" +learn_rel_path: "Data Collection/Kubernetes" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Rancher + + +<img src="https://netdata.cloud/img/rancher.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Rancher container orchestration platform metrics for efficient container management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/raritan_pdu.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/raritan_pdu.md new file mode 100644 index 00000000000000..1af38f186358a1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/raritan_pdu.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/raritan_pdu.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Raritan PDU" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Raritan PDU + + +<img src="https://netdata.cloud/img/raritan.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/redis_queue.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/redis_queue.md new file mode 100644 index 00000000000000..2686def5017887 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/redis_queue.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/redis_queue.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Redis Queue" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Redis Queue + + +<img src="https://netdata.cloud/img/rq.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ripe_atlas.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ripe_atlas.md new file mode 100644 index 00000000000000..bcf7d7b736e481 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ripe_atlas.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ripe_atlas.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "RIPE Atlas" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# RIPE Atlas + + +<img src="https://netdata.cloud/img/ripe.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance. + + +Metrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sabnzbd.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sabnzbd.md new file mode 100644 index 00000000000000..efce5887d3d98a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sabnzbd.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sabnzbd.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SABnzbd" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SABnzbd + + +<img src="https://netdata.cloud/img/sabnzbd.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SABnzbd Usenet client metrics for efficient file downloads and resource management. + + +Metrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/salicru_eqx_inverter.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/salicru_eqx_inverter.md new file mode 100644 index 00000000000000..03e20f1d64cb07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/salicru_eqx_inverter.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/salicru_eqx_inverter.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Salicru EQX inverter" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Salicru EQX inverter + + +<img src="https://netdata.cloud/img/salicru.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sense_energy.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sense_energy.md new file mode 100644 index 00000000000000..62746fd1ab07d6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sense_energy.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sense_energy.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sense Energy" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sense Energy + + +<img src="https://netdata.cloud/img/sense.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sentry.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sentry.md new file mode 100644 index 00000000000000..d91be878a56c28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sentry.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sentry.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sentry" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sentry + + +<img src="https://netdata.cloud/img/sentry.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Sentry error tracking and monitoring platform metrics for efficient application performance and error management. + + +Metrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/servertech.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/servertech.md new file mode 100644 index 00000000000000..2f8190294a4fb4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/servertech.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/servertech.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "ServerTech" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ServerTech + + +<img src="https://netdata.cloud/img/servertech.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shell_command.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shell_command.md new file mode 100644 index 00000000000000..6125e6dc839e6b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shell_command.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/shell_command.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Shell command" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Shell command + + +<img src="https://netdata.cloud/img/crunner.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track custom command output metrics for tailored monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shelly_humidity_sensor.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shelly_humidity_sensor.md new file mode 100644 index 00000000000000..d659d1dd8aeddd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/shelly_humidity_sensor.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/shelly_humidity_sensor.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Shelly humidity sensor" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Shelly humidity sensor + + +<img src="https://netdata.cloud/img/shelly.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Shelly smart home device metrics for efficient home automation and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sia.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sia.md new file mode 100644 index 00000000000000..9993d4afd1ab93 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sia.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sia.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sia" +learn_status: "Published" +learn_rel_path: "Data Collection/Blockchain Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sia + + +<img src="https://netdata.cloud/img/sia.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Sia decentralized storage platform metrics for efficient storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/siemens_s7_plc.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/siemens_s7_plc.md new file mode 100644 index 00000000000000..e80499d3cfadad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/siemens_s7_plc.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/siemens_s7_plc.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Siemens S7 PLC" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Siemens S7 PLC + + +<img src="https://netdata.cloud/img/siemens.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control. + + +Metrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/site_24x7.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/site_24x7.md new file mode 100644 index 00000000000000..004166fe5c3612 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/site_24x7.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/site_24x7.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Site 24x7" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Site 24x7 + + +<img src="https://netdata.cloud/img/site24x7.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management. + + +Metrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/slurm.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/slurm.md new file mode 100644 index 00000000000000..465fb1a60be8e7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/slurm.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/slurm.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Slurm" +learn_status: "Published" +learn_rel_path: "Data Collection/Task Queues" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Slurm + + +<img src="https://netdata.cloud/img/slurm.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management. + + +Metrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sma_inverters.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sma_inverters.md new file mode 100644 index 00000000000000..6951e679e77191 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sma_inverters.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sma_inverters.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SMA Inverters" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SMA Inverters + + +<img src="https://netdata.cloud/img/sma.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SMA solar inverter metrics for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smart_meters_sml.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smart_meters_sml.md new file mode 100644 index 00000000000000..d724471867ae46 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smart_meters_sml.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/smart_meters_sml.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Smart meters SML" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Smart meters SML + + +<img src="https://netdata.cloud/img/sml.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Smart Message Language (SML) metrics for efficient smart metering and energy management. + + +Metrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smartrg_808ac_cable_modem.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smartrg_808ac_cable_modem.md new file mode 100644 index 00000000000000..7cb9d950ad7124 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/smartrg_808ac_cable_modem.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/smartrg_808ac_cable_modem.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SmartRG 808AC Cable Modem" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SmartRG 808AC Cable Modem + + +<img src="https://netdata.cloud/img/smartr.jpeg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SmartRG SR808ac router metrics for efficient network device management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/softether_vpn_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/softether_vpn_server.md new file mode 100644 index 00000000000000..3daf98aab6feed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/softether_vpn_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/softether_vpn_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SoftEther VPN Server" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SoftEther VPN Server + + +<img src="https://netdata.cloud/img/softether.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solar_logging_stick.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solar_logging_stick.md new file mode 100644 index 00000000000000..9ced1c048f4c0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solar_logging_stick.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/solar_logging_stick.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Solar logging stick" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Solar logging stick + + +<img src="https://netdata.cloud/img/solar.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solaredge_inverters.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solaredge_inverters.md new file mode 100644 index 00000000000000..c8d9ad3a4f3ebf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solaredge_inverters.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/solaredge_inverters.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SolarEdge inverters" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SolarEdge inverters + + +<img src="https://netdata.cloud/img/solaredge.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track SolarEdge solar inverter metrics for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solis_ginlong_5g_inverters.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solis_ginlong_5g_inverters.md new file mode 100644 index 00000000000000..be4f43bb4c0a3d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/solis_ginlong_5g_inverters.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/solis_ginlong_5g_inverters.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Solis Ginlong 5G inverters" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Solis Ginlong 5G inverters + + +<img src="https://netdata.cloud/img/solis.jpg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Solis solar inverter metrics for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sonic_nos.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sonic_nos.md new file mode 100644 index 00000000000000..cc0e14efdf03ed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sonic_nos.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sonic_nos.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SONiC NOS" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SONiC NOS + + +<img src="https://netdata.cloud/img/sonic.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/spacelift.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/spacelift.md new file mode 100644 index 00000000000000..2d990fa7a6d25c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/spacelift.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/spacelift.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Spacelift" +learn_status: "Published" +learn_rel_path: "Data Collection/Provisioning Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Spacelift + + +<img src="https://netdata.cloud/img/spacelift.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management. + + +Metrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/speedify_cli.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/speedify_cli.md new file mode 100644 index 00000000000000..b525d83a905131 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/speedify_cli.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/speedify_cli.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Speedify CLI" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Speedify CLI + + +<img src="https://netdata.cloud/img/speedify.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Speedify VPN metrics for efficient virtual private network (VPN) management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sphinx.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sphinx.md new file mode 100644 index 00000000000000..f140589dc120a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sphinx.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sphinx.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sphinx" +learn_status: "Published" +learn_rel_path: "Data Collection/Search Engines" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sphinx + + +<img src="https://netdata.cloud/img/sphinx.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Sphinx search engine metrics for efficient search and indexing performance. + + +Metrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sql_database_agnostic.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sql_database_agnostic.md new file mode 100644 index 00000000000000..6c4d51b6f2257a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sql_database_agnostic.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sql_database_agnostic.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SQL Database agnostic" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SQL Database agnostic + + +<img src="https://netdata.cloud/img/sql.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Query SQL databases for efficient database performance monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssh.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssh.md new file mode 100644 index 00000000000000..a59e8eaa47de9c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssh.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ssh.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SSH" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SSH + + +<img src="https://netdata.cloud/img/ssh.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SSH server metrics for efficient secure shell server management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssl_certificate.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssl_certificate.md new file mode 100644 index 00000000000000..ff50de11f4923e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ssl_certificate.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ssl_certificate.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "SSL Certificate" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SSL Certificate + + +<img src="https://netdata.cloud/img/ssl.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track SSL/TLS certificate metrics for efficient web security and certificate management. + + +Metrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starlink_spacex.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starlink_spacex.md new file mode 100644 index 00000000000000..c6f3e9a8cf19fe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starlink_spacex.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/starlink_spacex.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Starlink (SpaceX)" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Starlink (SpaceX) + + +<img src="https://netdata.cloud/img/starlink.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md new file mode 100644 index 00000000000000..28942ebc61d02f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Starwind VSAN VSphere Edition" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Starwind VSAN VSphere Edition + + +<img src="https://netdata.cloud/img/starwind.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/statuspage.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/statuspage.md new file mode 100644 index 00000000000000..ecb388f11caf8e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/statuspage.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/statuspage.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "StatusPage" +learn_status: "Published" +learn_rel_path: "Data Collection/Incident Management" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# StatusPage + + +<img src="https://netdata.cloud/img/statuspage.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor StatusPage.io incident and status metrics for efficient incident management and communication. + + +Metrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/steam.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/steam.md new file mode 100644 index 00000000000000..10d603cab21477 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/steam.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/steam.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Steam" +learn_status: "Published" +learn_rel_path: "Data Collection/Gaming" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Steam + + +<img src="https://netdata.cloud/img/a2s.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Gain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/storidge.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/storidge.md new file mode 100644 index 00000000000000..6c594a226d09f8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/storidge.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/storidge.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Storidge" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Storidge + + +<img src="https://netdata.cloud/img/storidge.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Storidge storage metrics for efficient storage management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/stream.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/stream.md new file mode 100644 index 00000000000000..0c84e12d6c5df0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/stream.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/stream.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Stream" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Stream + + +<img src="https://netdata.cloud/img/stream.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor streaming metrics for efficient media streaming and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/strongswan.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/strongswan.md new file mode 100644 index 00000000000000..0c5a6cc88be7ee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/strongswan.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/strongswan.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "strongSwan" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# strongSwan + + +<img src="https://netdata.cloud/img/strongswan.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sunspec_solar_energy.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sunspec_solar_energy.md new file mode 100644 index 00000000000000..bb75d542ad738f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sunspec_solar_energy.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sunspec_solar_energy.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sunspec Solar Energy" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sunspec Solar Energy + + +<img src="https://netdata.cloud/img/sunspec.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/suricata.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/suricata.md new file mode 100644 index 00000000000000..d79251f6d0d135 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/suricata.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/suricata.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Suricata" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Suricata + + +<img src="https://netdata.cloud/img/suricata.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/synology_activebackup.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/synology_activebackup.md new file mode 100644 index 00000000000000..e29b5e287d51a1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/synology_activebackup.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/synology_activebackup.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Synology ActiveBackup" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Synology ActiveBackup + + +<img src="https://netdata.cloud/img/synology.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Synology Active Backup metrics for efficient backup and data protection management. + + +Metrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sysload.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sysload.md new file mode 100644 index 00000000000000..46b452eee9e4bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/sysload.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/sysload.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Sysload" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Sysload + + +<img src="https://netdata.cloud/img/sysload.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor system load metrics for efficient system performance and resource management. + + +Metrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md new file mode 100644 index 00000000000000..5115e02432c3c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "T-Rex NVIDIA GPU Miner" +learn_status: "Published" +learn_rel_path: "Data Collection/Hardware Devices and Sensors" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# T-Rex NVIDIA GPU Miner + + +<img src="https://netdata.cloud/img/trex.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance. + + +Metrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tacacs.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tacacs.md new file mode 100644 index 00000000000000..2e9f9871fc76f4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tacacs.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tacacs.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "TACACS" +learn_status: "Published" +learn_rel_path: "Data Collection/Authentication and Authorization" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# TACACS + + +<img src="https://netdata.cloud/img/tacacs.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management. + + +Metrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tado_smart_heating_solution.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tado_smart_heating_solution.md new file mode 100644 index 00000000000000..e4667db737b5d5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tado_smart_heating_solution.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tado_smart_heating_solution.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Tado smart heating solution" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tado smart heating solution + + +<img src="https://netdata.cloud/img/tado.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Tado smart thermostat metrics for efficient home heating and cooling management. + + +Metrics are gathered by periodically sending HTTP requests to [Tado\xB0 Exporter](https://github.com/eko/tado-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tankerkoenig_api.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tankerkoenig_api.md new file mode 100644 index 00000000000000..6fc6f6fbd5450b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tankerkoenig_api.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tankerkoenig_api.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Tankerkoenig API" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tankerkoenig API + + +<img src="https://netdata.cloud/img/tanker.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Tankerknig API fuel price metrics for efficient fuel price monitoring and management. + + +Metrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_powerwall.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_powerwall.md new file mode 100644 index 00000000000000..60f6ff8416427f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_powerwall.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tesla_powerwall.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Tesla Powerwall" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tesla Powerwall + + +<img src="https://netdata.cloud/img/tesla.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Tesla Powerwall metrics for efficient home energy storage and management. + + +Metrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_vehicle.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_vehicle.md new file mode 100644 index 00000000000000..309a26494c3ccf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_vehicle.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tesla_vehicle.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Tesla vehicle" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tesla vehicle + + +<img src="https://netdata.cloud/img/tesla.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Tesla vehicle metrics for efficient electric vehicle management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_wall_connector.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_wall_connector.md new file mode 100644 index 00000000000000..7ccc9d51d53d7e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tesla_wall_connector.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tesla_wall_connector.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Tesla Wall Connector" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tesla Wall Connector + + +<img src="https://netdata.cloud/img/tesla.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management. + + +Metrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tp-link_p110.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tp-link_p110.md new file mode 100644 index 00000000000000..bfb42f829c7462 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/tp-link_p110.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/tp-link_p110.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "TP-Link P110" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# TP-Link P110 + + +<img src="https://netdata.cloud/img/tplink.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track TP-Link P110 smart plug metrics for efficient energy management and monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/traceroute.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/traceroute.md new file mode 100644 index 00000000000000..c113bf4c3c3876 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/traceroute.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/traceroute.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Traceroute" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Traceroute + + +<img src="https://netdata.cloud/img/traceroute.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Export traceroute metrics for efficient network path analysis and performance monitoring. + + +Metrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twincat_ads_web_service.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twincat_ads_web_service.md new file mode 100644 index 00000000000000..4f98bafa98a166 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twincat_ads_web_service.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/twincat_ads_web_service.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "TwinCAT ADS Web Service" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# TwinCAT ADS Web Service + + +<img src="https://netdata.cloud/img/twincat.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control. + + +Metrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twitch.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twitch.md new file mode 100644 index 00000000000000..97be3c74e19d37 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/twitch.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/twitch.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Twitch" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Twitch + + +<img src="https://netdata.cloud/img/twitch.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Twitch streaming platform metrics for efficient live streaming management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ubiquiti_ufiber_olt.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ubiquiti_ufiber_olt.md new file mode 100644 index 00000000000000..bf3d54328d2153 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/ubiquiti_ufiber_olt.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/ubiquiti_ufiber_olt.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Ubiquiti UFiber OLT" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Ubiquiti UFiber OLT + + +<img src="https://netdata.cloud/img/ubiquiti.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/uptimerobot.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/uptimerobot.md new file mode 100644 index 00000000000000..a24795dbebec69 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/uptimerobot.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/uptimerobot.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Uptimerobot" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Uptimerobot + + +<img src="https://netdata.cloud/img/uptimerobot.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management. + + +Metrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vault_pki.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vault_pki.md new file mode 100644 index 00000000000000..9539476bd207a1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vault_pki.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/vault_pki.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Vault PKI" +learn_status: "Published" +learn_rel_path: "Data Collection/Security Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Vault PKI + + +<img src="https://netdata.cloud/img/vault.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security. + + +Metrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vertica.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vertica.md new file mode 100644 index 00000000000000..a17dc1a5de13a0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vertica.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/vertica.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Vertica" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Vertica + + +<img src="https://netdata.cloud/img/vertica.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Vertica analytics database platform metrics for efficient database performance and management. + + +Metrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vscode.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vscode.md new file mode 100644 index 00000000000000..01b2a4c5387088 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/vscode.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/vscode.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "VSCode" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# VSCode + + +<img src="https://netdata.cloud/img/vscode.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Visual Studio Code editor metrics for efficient development environment management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/warp10.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/warp10.md new file mode 100644 index 00000000000000..c484be2bff752e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/warp10.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/warp10.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Warp10" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Warp10 + + +<img src="https://netdata.cloud/img/warp10.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Warp 10 time-series database metrics for efficient time-series data management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xiaomi_mi_flora.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xiaomi_mi_flora.md new file mode 100644 index 00000000000000..fd1a04fe43122e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xiaomi_mi_flora.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/xiaomi_mi_flora.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Xiaomi Mi Flora" +learn_status: "Published" +learn_rel_path: "Data Collection/IoT Devices" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Xiaomi Mi Flora + + +<img src="https://netdata.cloud/img/xiaomi.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Keep tabs on MiFlora plant monitor metrics for efficient plant care and growth management. + + +Metrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xmpp_server.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xmpp_server.md new file mode 100644 index 00000000000000..f15def623c6e78 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/xmpp_server.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/xmpp_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "XMPP Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# XMPP Server + + +<img src="https://netdata.cloud/img/xmpp.svg" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management. + + +Metrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/yourls_url_shortener.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/yourls_url_shortener.md new file mode 100644 index 00000000000000..8989cef14cf245 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/yourls_url_shortener.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/yourls_url_shortener.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "YOURLS URL Shortener" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# YOURLS URL Shortener + + +<img src="https://netdata.cloud/img/yourls.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zerto.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zerto.md new file mode 100644 index 00000000000000..65064d77b8d193 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zerto.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/zerto.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Zerto" +learn_status: "Published" +learn_rel_path: "Data Collection/Cloud Provider Managed" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Zerto + + +<img src="https://netdata.cloud/img/zerto.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management. + + +Metrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zulip.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zulip.md new file mode 100644 index 00000000000000..823e59bdb631c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zulip.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/zulip.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Zulip" +learn_status: "Published" +learn_rel_path: "Data Collection/Media Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Zulip + + +<img src="https://netdata.cloud/img/zulip.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Monitor Zulip open-source group chat application metrics for efficient team communication management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zyxel_gs1200-8.md b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zyxel_gs1200-8.md new file mode 100644 index 00000000000000..f69541e3f3f2b1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/integrations/zyxel_gs1200-8.md @@ -0,0 +1,291 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/integrations/zyxel_gs1200-8.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/prometheus/metadata.yaml" +sidebar_label: "Zyxel GS1200-8" +learn_status: "Published" +learn_rel_path: "Data Collection/Networking Stack and Network Interfaces" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Zyxel GS1200-8 + + +<img src="https://netdata.cloud/img/zyxel.png" width="150"/> + + +Plugin: go.d.plugin +Module: prometheus + +<img src="https://img.shields.io/badge/maintained%20by-Community-blue" /> + +## Overview + +Track Zyxel GS1200 network switch metrics for efficient network device management and performance. + + +Metrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). +The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + +| Metric | Chart | Dimension(s) | Algorithm | +|---------------------------|-------------------------------------------|----------------------|-------------| +| Gauge | for each label set | one, the metric name | absolute | +| Counter | for each label set | one, the metric name | incremental | +| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | +| Summary (sum and count) | for each label set | the metric name | incremental | +| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | +| Histogram (sum and count) | for each label set | the metric name | incremental | + +Untyped metrics (have no '# TYPE') processing: + +- As Counter or Gauge depending on pattern match when 'fallback_type' is used. +- As Counter if it has suffix '_total'. +- As Summary if it has 'quantile' label. +- As Histogram if it has 'le' label. + +**The rest are ignored**. + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Install Exporter + +Install [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/prometheus.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/prometheus.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 10 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| selector | Time series selector (filter). | | no | +| fallback_type | Time series selector (filter). | | no | +| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no | +| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no | +| timeout | HTTP request timeout. | 10 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### selector + +This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + +- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) +- Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). +- Option syntax: + +```yaml +selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 +``` + + +##### fallback_type + +This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + +- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). +- Option syntax: + +```yaml +fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 +``` + + +</details> + +#### Examples + +##### Basic + +> **Note**: Change the port of the monitored application on which it provides metrics. + +A basic example configuration. + + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + +``` +##### Read metrics from a file + +An example configuration to read metrics from a file. + +<details><summary>Config</summary> + +```yaml +# use "file://" scheme +jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + +``` +</details> + +##### HTTP authentication + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Basic HTTP authentication. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +> **Note**: Change the port of the monitored application on which it provides metrics. + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. +> **Note**: Change the port of the monitored application on which it provides metrics. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m prometheus + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml b/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml new file mode 100644 index 00000000000000..7fa0eb34953a1d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/metadata.yaml @@ -0,0 +1,7921 @@ +plugin_name: go.d.plugin +modules: + - &module + meta: &meta + id: collector-go.d.plugin-prometheus-generic + module_name: prometheus + plugin_name: go.d.plugin + monitored_instance: + name: Prometheus endpoint + link: https://prometheus.io/ + icon_filename: prometheus.svg + categories: + - data-collection.generic-data-collection + # - data-collection.apm + keywords: + - prometheus + - openmetrics + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: &overview + data_collection: + metrics_description: | + This generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints. + method_description: | + It collects metrics by periodically sending HTTP requests to the target instance. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations). + The full list of endpoints is available in the collector's [configuration file](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/prometheus.conf). + limits: + description: "" + performance_impact: + description: "" + setup: &setup + prerequisites: + list: [] + configuration: + file: + name: go.d/prometheus.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 10 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: "" + required: true + - name: selector + description: Time series selector (filter). + default_value: "" + required: false + detailed_description: | + This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected. + + - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4) + - Pattern syntax: [selector](https://github.com/netdata/go.d.plugin/blob/master/pkg/prometheus/selector/README.md). + - Option syntax: + + ```yaml + selector: + allow: + - pattern1 + - pattern2 + deny: + - pattern3 + - pattern4 + ``` + - name: fallback_type + description: Time series selector (filter). + default_value: "" + required: false + detailed_description: | + This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them. + + - Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match). + - Option syntax: + + ```yaml + fallback_type: + counter: + - metric_name_pattern1 + - metric_name_pattern2 + gauge: + - metric_name_pattern3 + - metric_name_pattern4 + ``` + - name: max_time_series + description: Global time series limit. If an endpoint returns number of time series > limit the data is not processed. + default_value: 2000 + required: false + - name: max_time_series_per_metric + description: Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. + default_value: 200 + required: false + - name: timeout + description: HTTP request timeout. + default_value: 10 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: | + > **Note**: Change the port of the monitored application on which it provides metrics. + + A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:9090/metrics + - name: Read metrics from a file + description: An example configuration to read metrics from a file. + config: | + # use "file://" scheme + jobs: + - name: myapp + url: file:///opt/metrics/myapp/metrics.txt + - name: HTTP authentication + description: | + > **Note**: Change the port of the monitored application on which it provides metrics. + + Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:9090/metrics + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + > **Note**: Change the port of the monitored application on which it provides metrics. + + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:9090/metrics + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + > **Note**: Change the port of the monitored application on which it provides metrics. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:9090/metrics + + - name: remote + url: http://192.0.2.1:9090/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: | + This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/). + + | Metric | Chart | Dimension(s) | Algorithm | + |---------------------------|-------------------------------------------|----------------------|-------------| + | Gauge | for each label set | one, the metric name | absolute | + | Counter | for each label set | one, the metric name | incremental | + | Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute | + | Summary (sum and count) | for each label set | the metric name | incremental | + | Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental | + | Histogram (sum and count) | for each label set | the metric name | incremental | + + Untyped metrics (have no '# TYPE') processing: + + - As Counter or Gauge depending on pattern match when 'fallback_type' is used. + - As Counter if it has suffix '_total'. + - As Summary if it has 'quantile' label. + - As Histogram if it has 'le' label. + + **The rest are ignored**. + availability: [] + scopes: [] + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-a10-acos + most_popular: false + community: true + monitored_instance: + name: A10 ACOS network devices + link: https://github.com/a10networks/PrometheusExporter + icon_filename: a10-networks.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor A10 Networks device metrics for comprehensive management and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-airflow + most_popular: false + community: true + monitored_instance: + name: Apache Airflow + link: https://github.com/shalb/airflow-exporter + icon_filename: airflow.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Apache Airflow metrics to optimize task scheduling and workflow management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + most_popular: false + community: true + id: collector-go.d.plugin-prometheus-alibaba-cloud + monitored_instance: + name: Alibaba Cloud + link: https://github.com/aylei/aliyun-exporter # FIXME: This repository has been archived by the owner on Oct 28, 2019 + icon_filename: alibaba-cloud.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Alibaba Cloud services and resources for efficient management and cost optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-flink + most_popular: false + community: true + monitored_instance: + name: Apache Flink + link: https://github.com/matsumana/flink_exporter + icon_filename: apache_flink.png + categories: + - data-collection.apm + keywords: + - web server + - http + - https + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Apache Flink metrics for efficient stream processing and application management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + most_popular: false + community: true + id: collector-go.d.plugin-prometheus-aruba + monitored_instance: + name: Aruba devices + link: https://github.com/slashdoom/aruba_exporter + icon_filename: aruba.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - aruba devices + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Aruba Networks devices performance metrics for comprehensive network management and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_ec2 + most_popular: false + community: true + monitored_instance: + name: AWS EC2 Compute instances + link: https://github.com/O1ahmad/aws_ec2_exporter + icon_filename: aws-ec2.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Track AWS EC2 instances key metrics for optimized performance and cost management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_ecs + most_popular: false + community: true + monitored_instance: + name: AWS ECS + link: https://github.com/bevers222/ecs-exporter + icon_filename: amazon-ecs.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on AWS ECS services and resources for optimized container management and orchestration. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_health + most_popular: false + community: true + monitored_instance: + name: AWS Health events + link: https://github.com/vladvasiliu/aws-health-exporter-rs + icon_filename: aws.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Track AWS service health metrics for proactive incident management and resolution. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_instance_health + most_popular: false + community: true + monitored_instance: + name: AWS instance health + link: https://github.com/bobtfish/aws-instance-health-exporter + icon_filename: aws.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor the health of AWS instances for improved performance and availability. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_s3 + most_popular: false + community: true + monitored_instance: + name: AWS S3 buckets + link: https://github.com/ribbybibby/s3_exporter + icon_filename: aws-s3.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_sqs + most_popular: false + community: true + monitored_instance: + name: AWS SQS + link: https://github.com/jmal98/sqs-exporter + icon_filename: aws-sqs.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Track AWS SQS messaging metrics for efficient message processing and queue management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_ad_app_passwords + most_popular: false + community: true + monitored_instance: + name: Azure AD App passwords + link: https://github.com/vladvasiliu/azure-app-secrets-monitor + icon_filename: azure.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - azure services + overview: + <<: *overview + data_collection: + metrics_description: | + Safeguard and track Azure App secrets for enhanced security and access management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_elastic_sql + most_popular: false + community: true + monitored_instance: + name: Azure Elastic Pool SQL + link: https://github.com/benclapp/azure_elastic_sql_exporter + icon_filename: azure-elastic-sql.png + categories: + - data-collection.cloud-provider-managed + keywords: + - database + - relational db + - data querying + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Azure Elastic SQL performance metrics for efficient database management and query optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_app + most_popular: false + community: true + monitored_instance: + name: Azure application + link: https://github.com/RobustPerception/azure_metrics_exporter + icon_filename: azure.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - azure services + overview: + <<: *overview + data_collection: + metrics_description: | + Track Azure Monitor metrics for comprehensive resource management and performance optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_res + most_popular: false + community: true + monitored_instance: + name: Azure Resources + link: https://github.com/FXinnovation/azure-resources-exporter + icon_filename: azure.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - azure services + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Azure resources vital metrics for efficient cloud management and cost optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure Resources Exporter](https://github.com/FXinnovation/azure-resources-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_service_bus + most_popular: false + community: true + monitored_instance: + name: Azure Service Bus + link: https://github.com/marcinbudny/servicebus_exporter + icon_filename: azure-service-bus.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - azure services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Azure Service Bus messaging metrics for optimized communication and integration. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-azure_sql + most_popular: false + community: true + monitored_instance: + name: Azure SQL + link: https://github.com/iamseth/azure_sql_exporter + icon_filename: azure-sql.png + categories: + - data-collection.cloud-provider-managed + keywords: + - database + - relational db + - data querying + overview: + <<: *overview + data_collection: + metrics_description: | + Track Azure SQL performance metrics for efficient database management and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-bigquery + most_popular: false + community: true + monitored_instance: + name: BigQuery + link: https://github.com/m-lab/prometheus-bigquery-exporter + icon_filename: bigquery.png + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Google BigQuery metrics for optimized data processing and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-blackbox + most_popular: false + community: true + monitored_instance: + name: Blackbox + link: https://github.com/prometheus/blackbox_exporter + icon_filename: prometheus.svg + categories: + - data-collection.synthetic-checks + keywords: + - blackbox + overview: + <<: *overview + data_collection: + metrics_description: | + Track external service availability and response times with Blackbox monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-borg + most_popular: false + community: true + monitored_instance: + name: Borg backup + link: https://github.com/k0ral/borg-exporter + icon_filename: borg.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Borg backup performance metrics for efficient data protection and recovery. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cadvisor + most_popular: false + community: true + monitored_instance: + name: cAdvisor + link: https://github.com/google/cadvisor + icon_filename: cadvisor.png + categories: + - data-collection.containers-and-vms + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor container resource usage and performance metrics with cAdvisor for efficient container management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cilium_agent + most_popular: false + community: true + monitored_instance: + name: Cilium Agent + link: https://github.com/cilium/cilium + icon_filename: cilium.png + categories: + - data-collection.kubernetes + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Cilium Agent metrics for optimized network security and connectivity. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cilium_operator + most_popular: false + community: true + monitored_instance: + name: Cilium Operator + link: https://github.com/cilium/cilium + icon_filename: cilium.png + categories: + - data-collection.kubernetes + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Cilium Operator metrics for efficient Kubernetes network security management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cilium_proxy + most_popular: false + community: true + monitored_instance: + name: Cilium Proxy + link: https://github.com/cilium/proxy + icon_filename: cilium.png + categories: + - data-collection.kubernetes + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Cilium Proxy metrics for enhanced network security and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cisco_aci + most_popular: false + community: true + monitored_instance: + name: Cisco ACI + link: https://github.com/RavuAlHemio/prometheus_aci_exporter + icon_filename: cisco.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - cisco devices + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Cisco ACI infrastructure metrics for optimized network performance and resource management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-citrix_netscaler + most_popular: false + community: true + monitored_instance: + name: Citrix NetScaler + link: https://github.com/rokett/Citrix-NetScaler-Exporter + icon_filename: citrix.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on NetScaler performance metrics for efficient application delivery and load balancing. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-clickhouse + most_popular: false + community: true + monitored_instance: + name: ClickHouse + link: https://github.com/ClickHouse/ClickHouse + icon_filename: clickhouse.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ClickHouse database metrics for efficient data storage and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to the ClickHouse built-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://clickhouse.com/docs/en/operations/server-configuration-parameters/settings#server_configuration_parameters-prometheus). + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cloudflare_pcap + most_popular: false + community: true + monitored_instance: + name: Cloudflare PCAP + link: https://github.com/wehkamp/docker-prometheus-cloudflare-exporter + icon_filename: cloudflare.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-aws_cloudwatch + <<: *meta + most_popular: false + community: true + monitored_instance: + name: CloudWatch + link: https://github.com/prometheus/cloudwatch_exporter + icon_filename: aws-cloudwatch.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-concourse + most_popular: false + community: true + monitored_instance: + name: Concourse + link: https://concourse-ci.org + icon_filename: concourse.png + categories: + - data-collection.ci-cd-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment. + method_description: | + Metrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics). + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-crowdsec + most_popular: false + community: true + monitored_instance: + name: Crowdsec + link: https://docs.crowdsec.net/docs/observability/prometheus + icon_filename: crowdsec.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Crowdsec security metrics for efficient threat detection and response. + method_description: | + Metrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/). + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dell_emc_ecs + most_popular: false + community: true + monitored_instance: + name: Dell EMC ECS cluster + link: https://github.com/paychex/prometheus-emcecs-exporter + icon_filename: dell.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Dell EMC ECS object storage metrics for optimized storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dell_emc_isilon + most_popular: false + community: true + monitored_instance: + name: Dell EMC Isilon cluster + link: https://github.com/paychex/prometheus-isilon-exporter + icon_filename: dell.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-digitalocean + most_popular: false + community: true + monitored_instance: + name: DigitalOcean + link: https://github.com/metalmatze/digitalocean_exporter + icon_filename: digitalocean.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track DigitalOcean cloud provider metrics for optimized resource management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-discourse + most_popular: false + community: true + monitored_instance: + name: Discourse + link: https://github.com/discourse/discourse-prometheus + icon_filename: discourse.svg + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Discourse forum metrics for efficient community management and engagement. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dynatrace + most_popular: false + community: true + monitored_instance: + name: Dynatrace + link: https://github.com/Apside-TOP/dynatrace_exporter + icon_filename: dynatrace.svg + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Dynatrace APM metrics for comprehensive application performance management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-eos_web + most_popular: false + community: true + monitored_instance: + name: EOS + link: https://eos-web.web.cern.ch/eos-web/ + icon_filename: eos.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor CERN EOS metrics for efficient storage management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-etcd + most_popular: false + community: true + monitored_instance: + name: etcd + link: https://etcd.io/ + icon_filename: etcd.svg + categories: + - data-collection.service-discovery-registry + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track etcd database metrics for optimized distributed key-value store management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-fortigate + most_popular: false + community: true + monitored_instance: + name: Fortigate firewall + link: https://github.com/bluecmd/fortigate_exporter + icon_filename: fortinet.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Fortigate firewall metrics for enhanced network protection and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-freebsd_nfs + most_popular: false + community: true + monitored_instance: + name: FreeBSD NFS + link: https://github.com/Axcient/freebsd-nfs-exporter + icon_filename: freebsd.svg + categories: + - data-collection.freebsd + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor FreeBSD Network File System metrics for efficient file sharing management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-freebsd_rctl + most_popular: false + community: true + monitored_instance: + name: FreeBSD RCTL-RACCT + link: https://github.com/yo000/rctl_exporter + icon_filename: freebsd.svg + categories: + - data-collection.freebsd + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on FreeBSD Resource Container metrics for optimized resource management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gcp_gce + most_popular: false + community: true + monitored_instance: + name: GCP GCE + link: https://github.com/O1ahmad/gcp-gce-exporter + icon_filename: gcp-gce.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gcp_quota + most_popular: false + community: true + monitored_instance: + name: GCP Quota + link: https://github.com/mintel/gcp-quota-exporter + icon_filename: gcp.png + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Google Cloud Platform quota metrics for optimized resource usage and cost management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-github_repo + most_popular: false + community: true + monitored_instance: + name: GitHub repository + link: https://github.com/githubexporter/github-exporter + icon_filename: github.svg + categories: + - data-collection.other + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track GitHub repository metrics for optimized project and user analytics monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gitlab_runner + most_popular: false + community: true + monitored_instance: + name: GitLab Runner + link: https://gitlab.com/gitlab-org/gitlab-runner + icon_filename: gitlab.png + categories: + - data-collection.ci-cd-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on GitLab CI/CD job metrics for efficient development and deployment management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server). + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gobetween + most_popular: false + community: true + monitored_instance: + name: Gobetween + link: https://github.com/yyyar/gobetween + icon_filename: gobetween.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Gobetween load balancer metrics for optimized network traffic management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gcp + most_popular: false + community: true + monitored_instance: + name: Google Cloud Platform + link: https://github.com/DazWilkin/gcp-exporter + icon_filename: gcp.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gcp_stackdriver + most_popular: false + community: true + monitored_instance: + name: Google Stackdriver + link: https://github.com/prometheus-community/stackdriver_exporter + icon_filename: gcp-stackdriver.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - google cloud services + overview: + <<: *overview + data_collection: + metrics_description: | + Track Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-grafana + most_popular: false + community: true + monitored_instance: + name: Grafana + link: https://grafana.com/ + icon_filename: grafana.png + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-graylog + most_popular: false + community: true + monitored_instance: + name: Graylog Server + link: https://github.com/Graylog2/graylog2-server/ + icon_filename: graylog.svg + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Graylog server metrics for efficient log management and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting). + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hana + most_popular: false + community: true + monitored_instance: + name: HANA + link: https://github.com/jenningsloy318/hana_exporter + icon_filename: sap.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track SAP HANA database metrics for efficient data storage and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-honeypot + most_popular: false + community: true + monitored_instance: + name: Honeypot + link: https://github.com/Intrinsec/honeypot_exporter + icon_filename: intrinsec.svg + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor honeypot metrics for efficient threat detection and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hp_ilo + most_popular: false + community: true + monitored_instance: + name: HP iLO + link: https://github.com/infinityworks/hpilo-exporter + icon_filename: hp.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hubble + most_popular: false + community: true + monitored_instance: + name: Hubble + link: https://github.com/cilium/hubble + icon_filename: hubble.png + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Hubble network observability metrics for efficient network visibility and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter. + setup: + <<: *setup + prerequisites: + list: + - title: Configure built-in Prometheus exporter + description: | + To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics). + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_spectrum + most_popular: false + community: true + monitored_instance: + name: IBM Spectrum + link: https://github.com/topine/ibm-spectrum-exporter + icon_filename: ibm.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor IBM Spectrum storage metrics for efficient data management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-influxdb + most_popular: false + community: true + monitored_instance: + name: InfluxDB + link: https://github.com/prometheus/influxdb_exporter + icon_filename: influxdb.svg + categories: + - data-collection.database-servers + keywords: + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor InfluxDB time-series database metrics for efficient data storage and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-jenkins + most_popular: false + community: true + monitored_instance: + name: Jenkins + link: https://www.jenkins.io/ + icon_filename: jenkins.svg + categories: + - data-collection.ci-cd-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Jenkins continuous integration server metrics for efficient development and build management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-jmx + most_popular: false + community: true + monitored_instance: + name: JMX + link: https://github.com/prometheus/jmx_exporter + icon_filename: java.svg + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Java Management Extensions (JMX) metrics for efficient Java application management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-jolokia + most_popular: false + community: true + monitored_instance: + name: jolokia + link: https://github.com/aklinkert/jolokia_exporter + icon_filename: jolokia.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Jolokia JVM metrics for optimized Java application performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-kafka_consumer_lag + most_popular: false + community: true + monitored_instance: + name: Kafka Consumer Lag + link: https://github.com/omarsmak/kafka-consumer-lag-monitoring + icon_filename: kafka.svg + categories: + - data-collection.service-discovery-registry + keywords: + - big data + - stream processing + - message broker + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Kafka consumer lag metrics for efficient message queue management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-kafka + most_popular: false + community: true + monitored_instance: + name: Kafka + link: https://github.com/danielqsj/kafka_exporter/ + icon_filename: kafka.svg + categories: + - data-collection.message-brokers + keywords: + - big data + - stream processing + - message broker + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Kafka message queue metrics for optimized data streaming and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-kafka_zookeeper + most_popular: false + community: true + monitored_instance: + name: Kafka ZooKeeper + link: https://github.com/cloudflare/kafka_zookeeper_exporter + icon_filename: kafka.svg + categories: + - data-collection.message-brokers + keywords: + - big data + - stream processing + - message broker + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Kafka ZooKeeper metrics for optimized distributed coordination and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-linode + most_popular: false + community: true + monitored_instance: + name: Linode + link: https://github.com/DazWilkin/linode-exporter + icon_filename: linode.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Linode cloud hosting metrics for efficient virtual server management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-loki + most_popular: false + community: true + monitored_instance: + name: loki + link: https://github.com/grafana/loki + icon_filename: loki.png + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Loki metrics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki). + setup: + <<: *setup + prerequisites: + list: + - title: Install Loki + description: | + Install [loki](https://github.com/grafana/loki) according to its documentation. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-minecraft + most_popular: false + community: true + monitored_instance: + name: Minecraft + link: https://github.com/sladkoff/minecraft-prometheus-exporter + icon_filename: minecraft.png + categories: + - data-collection.gaming + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Minecraft server metrics for efficient game server management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mosquitto + most_popular: false + community: true + monitored_instance: + name: mosquitto + link: https://github.com/sapcc/mosquitto-exporter + icon_filename: mosquitto.svg + categories: + - data-collection.message-brokers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mp707 + most_popular: false + community: true + monitored_instance: + name: MP707 USB thermometer + link: https://github.com/nradchenko/mp707_exporter + icon_filename: thermometer.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track MP707 power strip metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_mq + most_popular: false + community: true + monitored_instance: + name: IBM MQ + link: https://github.com/agebhar1/mq_exporter + icon_filename: ibm.svg + categories: + - data-collection.message-brokers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on IBM MQ message queue metrics for efficient message transport and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mqtt_blackbox + most_popular: false + community: true + monitored_instance: + name: MQTT Blackbox + link: https://github.com/inovex/mqtt_blackbox_exporter + icon_filename: mqtt.svg + categories: + - data-collection.message-brokers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track MQTT message transport performance using blackbox testing methods. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-netapp_ontap + most_popular: false + community: true + monitored_instance: + name: Netapp ONTAP API + link: https://github.com/sapcc/netapp-api-exporter + icon_filename: netapp.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-netapp_solidfire + most_popular: false + community: true + monitored_instance: + name: NetApp Solidfire + link: https://github.com/mjavier2k/solidfire-exporter + icon_filename: netapp.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Track NetApp Solidfire storage system metrics for efficient data storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-netmeter + most_popular: false + community: true + monitored_instance: + name: NetMeter + link: https://github.com/ssbostan/netmeter-exporter + icon_filename: netmeter.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor NetMeter network traffic metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-newrelic + most_popular: false + community: true + monitored_instance: + name: New Relic + link: https://github.com/jfindley/newrelic_exporter + icon_filename: newrelic.svg + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor New Relic application performance management metrics for efficient application monitoring and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openvswitch + most_popular: false + community: true + monitored_instance: + name: Open vSwitch + link: https://github.com/digitalocean/openvswitch_exporter + icon_filename: ovs.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openldap + most_popular: false + community: true + monitored_instance: + name: OpenLDAP (community) + link: https://github.com/tomcz/openldap_exporter + icon_filename: openldap.svg + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OpenLDAP directory service metrics for efficient directory management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openstack + most_popular: false + community: true + monitored_instance: + name: OpenStack + link: https://github.com/CanonicalLtd/prometheus-openstack-exporter + icon_filename: openstack.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track OpenStack cloud computing platform metrics for efficient infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openvas + most_popular: false + community: true + monitored_instance: + name: OpenVAS + link: https://github.com/ModeClearCode/openvas_exporter + icon_filename: openVAS.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OpenVAS vulnerability scanner metrics for efficient security assessment and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-google_pagespeed + most_popular: false + community: true + monitored_instance: + name: Google Pagespeed + link: https://github.com/foomo/pagespeed_exporter + icon_filename: google.svg + categories: + - data-collection.apm + keywords: + - cloud services + - cloud computing + - google cloud services + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-philips_hue + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Philips Hue + link: https://github.com/aexel90/hue_exporter + icon_filename: hue.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-podman + most_popular: false + community: true + monitored_instance: + name: Podman + link: https://github.com/containers/prometheus-podman-exporter + icon_filename: podman.png + categories: + - data-collection.containers-and-vms + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Podman container runtime metrics for efficient container management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-proxmox + most_popular: false + community: true + monitored_instance: + name: Proxmox VE + link: https://github.com/prometheus-pve/prometheus-pve-exporter + icon_filename: proxmox.png + categories: + - data-collection.containers-and-vms + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-radius + most_popular: false + community: true + monitored_instance: + name: RADIUS + link: https://github.com/devon-mar/radius-exporter + icon_filename: radius.png + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_rds + most_popular: false + community: true + monitored_instance: + name: AWS RDS + link: https://github.com/percona/rds_exporter + icon_filename: aws-rds.svg + categories: + - data-collection.database-servers + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ripe_atlas + most_popular: false + community: true + monitored_instance: + name: RIPE Atlas + link: https://github.com/czerwonk/atlas_exporter + icon_filename: ripe.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sentry + most_popular: false + community: true + monitored_instance: + name: Sentry + link: https://github.com/snakecharmer/sentry_exporter + icon_filename: sentry.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Sentry error tracking and monitoring platform metrics for efficient application performance and error management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-slurm + most_popular: false + community: true + monitored_instance: + name: Slurm + link: https://github.com/vpenso/prometheus-slurm-exporter + icon_filename: slurm.png + categories: + - data-collection.task-queues + #- data-collection.provisioning-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ipmi + most_popular: false + community: true + monitored_instance: + name: IPMI (By SoundCloud) + link: https://github.com/prometheus-community/ipmi_exporter + icon_filename: soundcloud.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor IPMI metrics externally for efficient server hardware management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-spacelift + most_popular: false + community: true + monitored_instance: + name: Spacelift + link: https://github.com/spacelift-io/prometheus-exporter + icon_filename: spacelift.png + categories: + - data-collection.provisioning-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ssh + most_popular: false + community: true + monitored_instance: + name: SSH + link: https://github.com/Nordstrom/ssh_exporter + icon_filename: ssh.png + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SSH server metrics for efficient secure shell server management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ssl + most_popular: false + community: true + monitored_instance: + name: SSL Certificate + link: https://github.com/ribbybibby/ssl_exporter + icon_filename: ssl.svg + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track SSL/TLS certificate metrics for efficient web security and certificate management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-starlink + most_popular: false + community: true + monitored_instance: + name: Starlink (SpaceX) + link: https://github.com/danopstech/starlink_exporter + icon_filename: starlink.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-statuspage + most_popular: false + community: true + monitored_instance: + name: StatusPage + link: https://github.com/vladvasiliu/statuspage-exporter + icon_filename: statuspage.png + categories: + - data-collection.notifications + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor StatusPage.io incident and status metrics for efficient incident management and communication. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tacas + most_popular: false + community: true + monitored_instance: + name: TACACS + link: https://github.com/devon-mar/tacacs-exporter + icon_filename: tacacs.png + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tesla_vehicle + most_popular: false + community: true + monitored_instance: + name: Tesla vehicle + link: https://github.com/wywywywy/tesla-prometheus-exporter + icon_filename: tesla.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Tesla vehicle metrics for efficient electric vehicle management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tesla_powerwall + most_popular: false + community: true + monitored_instance: + name: Tesla Powerwall + link: https://github.com/foogod/powerwall_exporter + icon_filename: tesla.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Tesla Powerwall metrics for efficient home energy storage and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-twitch + most_popular: false + community: true + monitored_instance: + name: Twitch + link: https://github.com/damoun/twitch_exporter + icon_filename: twitch.svg + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Twitch streaming platform metrics for efficient live streaming management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ubiquity_ufiber + most_popular: false + community: true + monitored_instance: + name: Ubiquiti UFiber OLT + link: https://github.com/swoga/ufiber-exporter + icon_filename: ubiquiti.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-uptimerobot + most_popular: false + community: true + monitored_instance: + name: Uptimerobot + link: https://github.com/wosc/prometheus-uptimerobot + icon_filename: uptimerobot.svg + categories: + - data-collection.synthetic-checks + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hashicorp_vault + most_popular: false + community: true + monitored_instance: + name: HashiCorp Vault secrets + link: https://github.com/tomtom-international/vault-assessment-prometheus-exporter + icon_filename: vault.svg + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track HashiCorp Vault security assessment metrics for efficient secrets management and security. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-vault_pki + most_popular: false + community: true + monitored_instance: + name: Vault PKI + link: https://github.com/aarnaud/vault-pki-exporter + icon_filename: vault.svg + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-vertica + most_popular: false + community: true + monitored_instance: + name: Vertica + link: https://github.com/vertica/vertica-prometheus-exporter + icon_filename: vertica.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Vertica analytics database platform metrics for efficient database performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-vscode + most_popular: false + community: true + monitored_instance: + name: VSCode + link: https://github.com/guicaulada/vscode-exporter + icon_filename: vscode.svg + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Visual Studio Code editor metrics for efficient development environment management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-airthings_waveplus + most_popular: false + community: true + monitored_instance: + name: Airthings Waveplus air sensor + link: https://github.com/jeremybz/waveplus_exporter + icon_filename: airthings.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Waveplus radon sensor metrics for efficient indoor air quality monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-xmpp_blackbox + most_popular: false + community: true + monitored_instance: + name: XMPP Server + link: https://github.com/horazont/xmpp-blackbox-exporter + icon_filename: xmpp.svg + categories: + - data-collection.message-brokers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-4d_server + most_popular: false + community: true + monitored_instance: + name: 4D Server + link: https://github.com/ThomasMaul/Prometheus_4D_Exporter + icon_filename: 4d_server.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor 4D Server performance metrics for efficient application management and optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-8430ft-modem + most_popular: false + community: true + monitored_instance: + name: 8430FT modem + link: https://github.com/dernasherbrezon/8430ft_exporter + icon_filename: mtc.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-steam_a2s + most_popular: false + community: true + monitored_instance: + name: Steam + link: https://github.com/armsnyder/a2s-exporter + icon_filename: a2s.png + categories: + - data-collection.gaming + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Gain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-akami_edgedns + most_popular: false + community: true + monitored_instance: + name: Akamai Edge DNS Traffic + link: https://github.com/akamai/akamai-edgedns-traffic-exporter + icon_filename: akamai.svg + categories: + - data-collection.dns-and-dhcp-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track and analyze Akamai Edge DNS traffic for enhanced performance and security. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-akami_gtm + most_popular: false + community: true + monitored_instance: + name: Akamai Global Traffic Management + link: https://github.com/akamai/akamai-gtm-metrics-exporter + icon_filename: akamai.svg + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-alamos_fe2 + most_popular: false + community: true + monitored_instance: + name: Alamos FE2 server + link: https://github.com/codemonauts/prometheus-fe2-exporter + icon_filename: alamos_fe2.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Alamos FE2 systems for improved performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-altaro_backup + most_popular: false + community: true + monitored_instance: + name: Altaro Backup + link: https://github.com/raph2i/altaro_backup_exporter + icon_filename: altaro.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Altaro Backup performance metrics to ensure smooth data protection and recovery operations. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-amd_smi + most_popular: false + community: true + monitored_instance: + name: AMD CPU & GPU + link: https://github.com/amd/amd_smi_exporter + icon_filename: amd.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor AMD System Management Interface performance for optimized hardware management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aaisp + most_popular: false + community: true + monitored_instance: + name: Andrews & Arnold line status + link: https://github.com/daveio/aaisp-exporter + icon_filename: andrewsarnold.jpg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-apicast + most_popular: false + community: true + monitored_instance: + name: APIcast + link: https://github.com/3scale/apicast + icon_filename: apicast.png + categories: + - data-collection.web-servers-and-web-proxies + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor APIcast performance metrics to optimize API gateway operations and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-apple_timemachine + most_popular: false + community: true + monitored_instance: + name: Apple Time Machine + link: https://github.com/znerol/prometheus-timemachine-exporter + icon_filename: apple.svg + categories: + - data-collection.macos-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Apple Time Machine backup metrics for efficient data protection and recovery. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-arm_hwcpipe + most_popular: false + community: true + monitored_instance: + name: ARM HWCPipe + link: https://github.com/ylz-at/arm-hwcpipe-exporter + icon_filename: arm.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep track of ARM running Android devices and get metrics for efficient performance optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-arvancloud_cdn + most_popular: false + community: true + monitored_instance: + name: ArvanCloud CDN + link: https://github.com/arvancloud/ar-prometheus-exporter + icon_filename: arvancloud.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Track and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-audisto + most_popular: false + community: true + monitored_instance: + name: Audisto + link: https://github.com/ZeitOnline/audisto_exporter + icon_filename: audisto.svg + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Audisto SEO and website metrics for improved search performance and optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-authlog + most_popular: false + community: true + monitored_instance: + name: AuthLog + link: https://github.com/woblerr/authlog_exporter + icon_filename: linux.png + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor authentication logs for security insights and efficient access management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_ec2_spot + most_popular: false + community: true + monitored_instance: + name: AWS EC2 Spot Instance + link: https://github.com/patcadelina/ec2-spot-exporter + icon_filename: aws-ec2.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-aws_quota + most_popular: false + community: true + monitored_instance: + name: AWS Quota + link: https://github.com/emylincon/aws_quota_exporter + icon_filename: aws.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - aws services + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor AWS service quotas for effective resource usage and cost management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-bobcat + most_popular: false + community: true + monitored_instance: + name: Bobcat Miner 300 + link: https://github.com/pperzyna/bobcat_exporter + icon_filename: bobcat.jpg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Bobcat equipment metrics for optimized performance and maintenance management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-bosh + most_popular: false + community: true + monitored_instance: + name: BOSH + link: https://github.com/bosh-prometheus/bosh_exporter + icon_filename: bosh.png + categories: + - data-collection.provisioning-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on BOSH deployment metrics for improved cloud orchestration and resource management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-bpftrace + most_popular: false + community: true + monitored_instance: + name: bpftrace variables + link: https://github.com/andreasgerstmayr/bpftrace_exporter + icon_filename: bpftrace.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track bpftrace metrics for advanced performance analysis and troubleshooting. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-bungeecord + most_popular: false + community: true + monitored_instance: + name: BungeeCord + link: https://github.com/weihao/bungeecord-prometheus-exporter + icon_filename: bungee.png + categories: + - data-collection.gaming + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track BungeeCord proxy server metrics for efficient load balancing and performance management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-celery + most_popular: false + community: true + monitored_instance: + name: Celery + link: https://github.com/ZeitOnline/celery_redis_prometheus + icon_filename: celery.png + categories: + - data-collection.task-queues + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Celery task queue metrics for optimized task processing and resource management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-checkpoint + most_popular: false + community: true + monitored_instance: + name: Checkpoint device + link: https://github.com/RespiroConsulting/CheckPointExporter + icon_filename: checkpoint.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Check Point firewall and security metrics for enhanced network protection and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-chia + most_popular: false + community: true + monitored_instance: + name: Chia + link: https://github.com/chia-network/chia-exporter + icon_filename: chia.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Chia blockchain metrics for optimized farming and resource allocation. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-clm5ip + most_popular: false + community: true + monitored_instance: + name: Christ Elektronik CLM5IP power panel + link: https://github.com/christmann/clm5ip_exporter/ + icon_filename: christelec.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-clamd + most_popular: false + community: true + monitored_instance: + name: ClamAV daemon + link: https://github.com/sergeymakinen/clamav_exporter + icon_filename: clamav.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track ClamAV antivirus metrics for enhanced threat detection and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-clamscan + most_popular: false + community: true + monitored_instance: + name: Clamscan results + link: https://github.com/FortnoxAB/clamscan-exporter + icon_filename: clamav.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ClamAV scanning performance metrics for efficient malware detection and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-clash + most_popular: false + community: true + monitored_instance: + name: Clash + link: https://github.com/elonzh/clash_exporter + icon_filename: clash.png + categories: + - data-collection.web-servers-and-web-proxies + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Clash proxy server metrics for optimized network performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cloud_foundry + most_popular: false + community: true + monitored_instance: + name: Cloud Foundry + link: https://github.com/bosh-prometheus/cf_exporter + icon_filename: cloud-foundry.svg + categories: + - data-collection.provisioning-systems + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Track Cloud Foundry platform metrics for optimized application deployment and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cloud_foundry_firebase + most_popular: false + community: true + monitored_instance: + name: Cloud Foundry Firehose + link: https://github.com/bosh-prometheus/firehose_exporter + icon_filename: cloud-foundry.svg + categories: + - data-collection.provisioning-systems + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-akami_cloudmonitor + most_popular: false + community: true + monitored_instance: + name: Akami Cloudmonitor + link: https://github.com/ExpressenAB/cloudmonitor_exporter + icon_filename: akamai.svg + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-lustre + most_popular: false + community: true + monitored_instance: + name: Lustre metadata + link: https://github.com/GSI-HPC/prometheus-cluster-exporter + icon_filename: lustre.png + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Lustre clustered file system for efficient management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cmon + most_popular: false + community: true + monitored_instance: + name: ClusterControl CMON + link: https://github.com/severalnines/cmon_exporter + icon_filename: cluster-control.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-collectd + most_popular: false + community: true + monitored_instance: + name: Collectd + link: https://github.com/prometheus/collectd_exporter + icon_filename: collectd.png + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor system and application metrics with Collectd for comprehensive performance analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-shell_cmd + most_popular: false + community: true + monitored_instance: + name: Shell command + link: https://github.com/tomwilkie/prom-run + icon_filename: crunner.svg + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track custom command output metrics for tailored monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ftbeerpi + most_popular: false + community: true + monitored_instance: + name: CraftBeerPi + link: https://github.com/jo-hannes/craftbeerpi_exporter + icon_filename: craftbeer.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-crypto + most_popular: false + community: true + monitored_instance: + name: Crypto exchanges + link: https://github.com/ix-ai/crypto-exporter + icon_filename: crypto.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track cryptocurrency market metrics for informed investment and trading decisions. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-cryptowatch + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Cryptowatch + link: https://github.com/nbarrientos/cryptowat_exporter + icon_filename: cryptowatch.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-csgo + most_popular: false + community: true + monitored_instance: + name: CS:GO + link: https://github.com/kinduff/csgo_exporter + icon_filename: csgo.svg + categories: + - data-collection.gaming + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Counter-Strike: Global Offensive server metrics for improved game performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CS:GO Exporter](https://github.com/kinduff/csgo_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [CS:GO Exporter](https://github.com/kinduff/csgo_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-certificate_transparency + most_popular: false + community: true + monitored_instance: + name: Certificate Transparency + link: https://github.com/Hsn723/ct-exporter + icon_filename: ct.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track certificate transparency log metrics for enhanced + SSL/TLS certificate management and security. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-custom + most_popular: false + community: true + monitored_instance: + name: Custom Exporter + link: https://github.com/orange-cloudfoundry/custom_exporter + icon_filename: customdata.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Create and monitor custom metrics tailored to your specific use case and requirements. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-cvmfs + most_popular: false + community: true + monitored_instance: + name: CVMFS clients + link: https://github.com/guilbaults/cvmfs-exporter + icon_filename: cvmfs.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track CernVM File System metrics for optimized distributed file system performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ddwrt + most_popular: false + community: true + monitored_instance: + name: DDWRT Routers + link: https://github.com/camelusferus/ddwrt_collector + icon_filename: ddwrt.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on DD-WRT router metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-dell_emc_xtremio + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Dell EMC XtremIO cluster + link: https://github.com/cthiel42/prometheus-xtremio-exporter + icon_filename: dell.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dependency_track + most_popular: false + community: true + monitored_instance: + name: Dependency-Track + link: https://github.com/jetstack/dependency-track-exporter + icon_filename: dependency-track.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-dmarc + <<: *meta + most_popular: false + community: true + monitored_instance: + name: DMARC + link: https://github.com/jgosmann/dmarc-metrics-exporter + icon_filename: dmarc.png + categories: + - data-collection.mail-servers + keywords: + - email authentication + - policy + - reporting + overview: + <<: *overview + data_collection: + metrics_description: | + Track DMARC email authentication metrics for improved email security and deliverability. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-dnsbl + <<: *meta + most_popular: false + community: true + monitored_instance: + name: DNSBL + link: https://github.com/Luzilla/dnsbl_exporter/ + icon_filename: dnsbl.png + categories: + - data-collection.dns-and-dhcp-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor DNSBL metrics for efficient domain reputation and security management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README. + + + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-bird + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Bird Routing Daemon + link: https://github.com/czerwonk/bird_exporter + icon_filename: bird.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Bird Routing Daemon metrics for optimized network routing and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-elgato_keylight + most_popular: false + community: true + monitored_instance: + name: Elgato Key Light devices. + link: https://github.com/mdlayher/keylight_exporter + icon_filename: elgato.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Elgato Key Light metrics for optimized lighting control and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-energomera + most_popular: false + community: true + monitored_instance: + name: Energomera smart power meters + link: https://github.com/peak-load/energomera_exporter + icon_filename: energomera.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Energomera electricity meter metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-excel + most_popular: false + community: true + monitored_instance: + name: Excel spreadsheet + link: https://github.com/MarcusCalidus/excel-exporter + icon_filename: excel.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Export Prometheus metrics to Excel for versatile data analysis and reporting. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-fastd + most_popular: false + community: true + monitored_instance: + name: Fastd + link: https://github.com/freifunk-darmstadt/fastd-exporter + icon_filename: fastd.png + categories: + - data-collection.vpns + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Fastd VPN metrics for efficient virtual private network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-freifunk + most_popular: false + community: true + monitored_instance: + name: Freifunk network + link: https://github.com/xperimental/freifunk-exporter + icon_filename: freifunk.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Freifunk community network metrics for optimized network performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-fritzbox + most_popular: false + community: true + monitored_instance: + name: Fritzbox network devices + link: https://github.com/pdreker/fritz_exporter + icon_filename: avm.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track AVM Fritzbox router metrics for efficient home network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-frrouting + most_popular: false + community: true + monitored_instance: + name: FRRouting + link: https://github.com/tynany/frr_exporter + icon_filename: frrouting.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Free Range Routing (FRR) metrics for optimized network routing and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-generic_cli + most_popular: false + community: true + monitored_instance: + name: Generic Command Line Output + link: https://github.com/MarioMartReq/generic-exporter + icon_filename: cli.svg + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track custom command line output metrics for tailored monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-github_ratelimit + most_popular: false + community: true + monitored_instance: + name: GitHub API rate limit + link: https://github.com/lunarway/github-ratelimit-exporter + icon_filename: github.svg + categories: + - data-collection.other + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor GitHub API rate limit metrics for efficient + API usage and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gpsd + most_popular: false + community: true + monitored_instance: + name: gpsd + link: https://github.com/natesales/gpsd-exporter + icon_filename: gpsd.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor GPSD (GPS daemon) metrics for efficient GPS data management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-gtp + most_popular: false + community: true + monitored_instance: + name: GTP + link: https://github.com/wmnsk/gtp_exporter + icon_filename: gtpu.png + categories: + - data-collection.telephony-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-halon + most_popular: false + community: true + monitored_instance: + name: Halon + link: https://github.com/tobiasbp/halon_exporter + icon_filename: halon.svg + categories: + - data-collection.mail-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Halon email security and delivery metrics for optimized email management and protection. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hasura_graphql + most_popular: false + community: true + monitored_instance: + name: Hasura GraphQL Server + link: https://github.com/zolamk/hasura-exporter + icon_filename: hasura.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Hasura GraphQL engine metrics for optimized + API performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hdsentinel + most_popular: false + community: true + monitored_instance: + name: HDSentinel + link: https://github.com/qusielle/hdsentinel-exporter + icon_filename: harddisk.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-helium_hotspot + most_popular: false + community: true + monitored_instance: + name: Helium hotspot + link: https://github.com/tedder/helium_hotspot_exporter + icon_filename: helium.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Helium hotspot metrics for optimized LoRaWAN network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-helium_miner + most_popular: false + community: true + monitored_instance: + name: Helium miner (validator) + link: https://github.com/tedder/miner_exporter + icon_filename: helium.svg + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Helium miner and validator metrics for efficient blockchain performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hhvm + most_popular: false + community: true + monitored_instance: + name: HHVM + link: https://github.com/wikimedia/operations-software-hhvm_exporter + icon_filename: hhvm.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor HipHop Virtual Machine metrics for efficient + PHP execution and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hilink + most_popular: false + community: true + monitored_instance: + name: Huawei devices + link: https://github.com/eliecharra/hilink-exporter + icon_filename: huawei.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Huawei HiLink device metrics for optimized connectivity and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hitron_cgm + most_popular: false + community: true + monitored_instance: + name: Hitron CGN series CPE + link: https://github.com/yrro/hitron-exporter + icon_filename: hitron.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Hitron CGNV4 gateway metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-hitron_coda + most_popular: false + community: true + monitored_instance: + name: Hitron CODA Cable Modem + link: https://github.com/hairyhenderson/hitron_coda_exporter + icon_filename: hitron.svg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Hitron CODA cable modem metrics for optimized internet connectivity and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-homebridge + most_popular: false + community: true + monitored_instance: + name: Homebridge + link: https://github.com/lstrojny/homebridge-prometheus-exporter + icon_filename: homebridge.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Homebridge smart home metrics for efficient home automation management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-homey + most_popular: false + community: true + monitored_instance: + name: Homey + link: https://github.com/rickardp/homey-prometheus-exporter + icon_filename: homey.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Homey smart home controller metrics for efficient home automation and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_cex + most_popular: false + community: true + monitored_instance: + name: IBM CryptoExpress (CEX) cards + link: https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin + icon_filename: ibm.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track IBM Z Crypto Express device metrics for optimized cryptographic performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_zhmc + most_popular: false + community: true + monitored_instance: + name: IBM Z Hardware Management Console + link: https://github.com/zhmcclient/zhmc-prometheus-exporter + icon_filename: ibm.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-iota + most_popular: false + community: true + monitored_instance: + name: IOTA full node + link: https://github.com/crholliday/iota-prom-exporter + icon_filename: iota.svg + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-iqair + most_popular: false + community: true + monitored_instance: + name: iqAir AirVisual air quality monitors + link: https://github.com/Packetslave/iqair_exporter + icon_filename: iqair.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor air quality data from IQAir devices for efficient environmental monitoring and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-jarvis + most_popular: false + community: true + monitored_instance: + name: Jarvis Standing Desk + link: https://github.com/hairyhenderson/jarvis_exporter/ + icon_filename: jarvis.jpg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Jarvis standing desk usage metrics for efficient workspace ergonomics and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-enclosure + most_popular: false + community: true + monitored_instance: + name: Generic storage enclosure tool + link: https://github.com/Gandi/jbod-rs + icon_filename: storage-enclosure.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor storage enclosure metrics for efficient storage device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-jetbrains_fls + most_popular: false + community: true + monitored_instance: + name: JetBrains Floating License Server + link: https://github.com/mkreu/jetbrains-fls-exporter + icon_filename: jetbrains.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor JetBrains floating license server metrics for efficient software licensing management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-journald + most_popular: false + community: true + monitored_instance: + name: journald + link: https://github.com/dead-claudia/journald-exporter + icon_filename: linux.png + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on systemd-journald metrics for efficient log management and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-kafka_connect + most_popular: false + community: true + monitored_instance: + name: Kafka Connect + link: https://github.com/findelabs/kafka-connect-exporter-rs + icon_filename: kafka.svg + categories: + - data-collection.message-brokers + keywords: + - big data + - stream processing + - message broker + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Kafka Connect metrics for efficient data streaming and integration. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-kannel + most_popular: false + community: true + monitored_instance: + name: Kannel + link: https://github.com/apostvav/kannel_exporter + icon_filename: kannel.png + categories: + - data-collection.telephony-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-keepalived + most_popular: false + community: true + monitored_instance: + name: Keepalived + link: https://github.com/gen2brain/keepalived_exporter + icon_filename: keepalived.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Keepalived metrics for efficient high-availability and load balancing management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-korral + most_popular: false + community: true + monitored_instance: + name: Kubernetes Cluster Cloud Cost + link: https://github.com/agilestacks/korral + icon_filename: kubernetes.svg + categories: + - data-collection.kubernetes + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-lagerist + most_popular: false + community: true + monitored_instance: + name: Lagerist Disk latency + link: https://github.com/Svedrin/lagerist + icon_filename: linux.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track disk latency metrics for efficient storage performance and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ldap + most_popular: false + community: true + monitored_instance: + name: LDAP + link: https://github.com/titisan/ldap_exporter + icon_filename: ldap.png + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-lynis + most_popular: false + community: true + monitored_instance: + name: Lynis audit reports + link: https://github.com/MauveSoftware/lynis_exporter + icon_filename: lynis.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Lynis security auditing tool metrics for efficient system security and compliance management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-machbase + most_popular: false + community: true + monitored_instance: + name: Machbase + link: https://github.com/MACHBASE/prometheus-machbase-exporter + icon_filename: machbase.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Machbase time-series database metrics for efficient data storage and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-maildir + most_popular: false + community: true + monitored_instance: + name: Maildir + link: https://github.com/cherti/mailexporter + icon_filename: mailserver.svg + categories: + - data-collection.mail-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track mail server metrics for optimized email management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-meilisearch + most_popular: false + community: true + monitored_instance: + name: Meilisearch + link: https://github.com/scottaglia/meilisearch_exporter + icon_filename: meilisearch.svg + categories: + - data-collection.search-engines + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Meilisearch search engine metrics for efficient search performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-memcached + most_popular: false + community: true + monitored_instance: + name: Memcached (community) + link: https://github.com/prometheus/memcached_exporter + icon_filename: memcached.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Memcached in-memory key-value store metrics for efficient caching performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-meraki + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Meraki dashboard + link: https://github.com/TheHolm/meraki-dashboard-promethus-exporter + icon_filename: meraki.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mesos + most_popular: false + community: true + monitored_instance: + name: Mesos + link: http://github.com/mesosphere/mesos_exporter + icon_filename: mesos.svg + categories: + #- data-collection.provisioning-systems + - data-collection.task-queues + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Apache Mesos cluster manager metrics for efficient resource management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-xiaomi_mi_flora + most_popular: false + community: true + monitored_instance: + name: Xiaomi Mi Flora + link: https://github.com/xperimental/flowercare-exporter + icon_filename: xiaomi.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on MiFlora plant monitor metrics for efficient plant care and growth management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-modbus_rtu + most_popular: false + community: true + monitored_instance: + name: Modbus protocol + link: https://github.com/dernasherbrezon/modbusrtu_exporter + icon_filename: modbus.svg + categories: + - data-collection.iot-devices + keywords: + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Track Modbus RTU protocol metrics for efficient industrial automation and control performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mogilefs + most_popular: false + community: true + monitored_instance: + name: MogileFS + link: https://github.com/KKBOX/mogilefs-exporter + icon_filename: filesystem.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor MogileFS distributed file system metrics for efficient storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-monnit_mqtt + most_popular: false + community: true + monitored_instance: + name: Monnit Sensors MQTT + link: https://github.com/braxton9460/monnit-mqtt-exporter + icon_filename: monnit.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Monnit sensor data via MQTT for efficient IoT device monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mtail + most_popular: false + community: true + monitored_instance: + name: mtail + link: https://github.com/google/mtail + icon_filename: mtail.png + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor log data metrics using mtail log data extractor and parser. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-naemon + most_popular: false + community: true + monitored_instance: + name: Naemon + link: https://github.com/Griesbacher/Iapetos + icon_filename: naemon.svg + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nagios + most_popular: false + community: true + monitored_instance: + name: Nagios + link: https://github.com/wbollock/nagios_exporter + icon_filename: nagios.png + categories: + - data-collection.observability + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Nagios network monitoring metrics for efficient + IT infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nature_remo + most_popular: false + community: true + monitored_instance: + name: Nature Remo E lite devices + link: https://github.com/kenfdev/remo-exporter + icon_filename: nature-remo.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Nature Remo E series smart home device metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-netatmo + most_popular: false + community: true + monitored_instance: + name: Netatmo sensors + link: https://github.com/xperimental/netatmo-exporter + icon_filename: netatmo.svg + categories: + - data-collection.iot-devices + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Netatmo smart home device metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-netflow + most_popular: false + community: true + monitored_instance: + name: NetFlow + link: https://github.com/paihu/netflow_exporter + icon_filename: netflow.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Track NetFlow network traffic metrics for efficient network monitoring and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nextcloud + most_popular: false + community: true + monitored_instance: + name: Nextcloud servers + link: https://github.com/xperimental/nextcloud-exporter + icon_filename: nextcloud.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Nextcloud cloud storage metrics for efficient file hosting and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nextdns + most_popular: false + community: true + monitored_instance: + name: NextDNS + link: https://github.com/raylas/nextdns-exporter + icon_filename: nextdns.png + categories: + - data-collection.dns-and-dhcp-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track NextDNS DNS resolver and security platform metrics for efficient DNS management and security. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nftables + most_popular: false + community: true + monitored_instance: + name: nftables + link: https://github.com/Sheridan/nftables_exporter + icon_filename: nftables.png + categories: + - data-collection.linux-systems.firewall-metrics + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor nftables firewall metrics for efficient network security and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_aix_njmon + most_popular: false + community: true + monitored_instance: + name: IBM AIX systems Njmon + link: https://github.com/crooks/njmon_exporter + icon_filename: ibm.svg + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nrpe + most_popular: false + community: true + monitored_instance: + name: NRPE daemon + link: https://github.com/canonical/nrpe_exporter + icon_filename: nrpelinux.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-mikrotik + most_popular: false + community: true + monitored_instance: + name: MikroTik devices + link: https://github.com/swoga/mikrotik-exporter + icon_filename: mikrotik.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on MikroTik RouterOS metrics for efficient network device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-nsxt + most_popular: false + community: true + monitored_instance: + name: NSX-T + link: https://github.com/jk8s/nsxt_exporter + icon_filename: vmware-nsx.svg + categories: + - data-collection.containers-and-vms + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track VMware NSX-T software-defined networking metrics for efficient network virtualization and security management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-nvml + <<: *meta + most_popular: false + community: true + monitored_instance: + name: NVML + link: https://github.com/oko/nvml-exporter-rs + icon_filename: nvidia.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-obs_studio + most_popular: false + community: true + monitored_instance: + name: OBS Studio + link: https://github.com/lukegb/obs_studio_exporter + icon_filename: obs-studio.png + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track OBS Studio live streaming and recording software metrics for efficient video production and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-odbc + most_popular: false + community: true + monitored_instance: + name: ODBC + link: https://github.com/MACHBASE/prometheus-odbc-exporter + icon_filename: odbc.svg + categories: + - data-collection.database-servers + keywords: + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openhab + most_popular: false + community: true + monitored_instance: + name: OpenHAB + link: https://github.com/pdreker/openhab_exporter + icon_filename: openhab.svg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track openHAB smart home automation system metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README. + + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openrc + most_popular: false + community: true + monitored_instance: + name: OpenRC + link: https://git.sr.ht/~tomleb/openrc-exporter + icon_filename: linux.png + categories: + - data-collection.linux-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on OpenRC init system metrics for efficient system startup and service management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openrct2 + most_popular: false + community: true + monitored_instance: + name: OpenRCT2 + link: https://github.com/terinjokes/openrct2-prometheus-exporter + icon_filename: openRCT2.png + categories: + - data-collection.gaming + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track OpenRCT2 game metrics for efficient game server management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openroadm + most_popular: false + community: true + monitored_instance: + name: OpenROADM devices + link: https://github.com/utdal/openroadm_exporter + icon_filename: openroadm.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-openweathermap + most_popular: false + community: true + monitored_instance: + name: OpenWeatherMap + link: https://github.com/Tenzer/openweathermap-exporter + icon_filename: openweather.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-oracledb + most_popular: false + community: true + monitored_instance: + name: Oracle DB (community) + link: https://github.com/iamseth/oracledb_exporter + icon_filename: oracle.svg + categories: + - data-collection.database-servers + keywords: + - oracle + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Track Oracle Database metrics for efficient database management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-otrs + most_popular: false + community: true + monitored_instance: + name: OTRS + link: https://github.com/JulianDroste/otrs_exporter + icon_filename: otrs.png + categories: + - data-collection.notifications + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dutch_electricity_smart_meter + most_popular: false + community: true + monitored_instance: + name: Dutch Electricity Smart Meter + link: https://github.com/TobiasDeBruijn/prometheus-p1-exporter + icon_filename: dutch-electricity.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-patroni + most_popular: false + community: true + monitored_instance: + name: Patroni + link: https://github.com/gopaytech/patroni_exporter + icon_filename: patroni.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-pws + most_popular: false + community: true + monitored_instance: + name: Personal Weather Station + link: https://github.com/JohnOrthoefer/pws-exporter + icon_filename: wunderground.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track personal weather station metrics for efficient weather monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-pgbackrest + most_popular: false + community: true + monitored_instance: + name: pgBackRest + link: https://github.com/woblerr/pgbackrest_exporter + icon_filename: pgbackrest.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor pgBackRest PostgreSQL backup metrics for efficient database backup and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-pgpool2 + most_popular: false + community: true + monitored_instance: + name: Pgpool-II + link: https://github.com/pgpool/pgpool2_exporter + icon_filename: pgpool2.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-pimoroni_enviro_plus + most_popular: false + community: true + monitored_instance: + name: Pimoroni Enviro+ + link: https://github.com/terradolor/prometheus-enviro-exporter + icon_filename: pimorino.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-pingdom + most_popular: false + community: true + monitored_instance: + name: Pingdom + link: https://github.com/veepee-oss/pingdom_exporter + icon_filename: solarwinds.svg + categories: + - data-collection.synthetic-checks + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-dell_powermax + most_popular: false + community: true + monitored_instance: + name: Dell PowerMax + link: https://github.com/kckecheng/powermax_exporter + icon_filename: powermax.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Dell EMC PowerMax storage array metrics for efficient storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-powerpal + most_popular: false + community: true + monitored_instance: + name: Powerpal devices + link: https://github.com/aashley/powerpal_exporter + icon_filename: powerpal.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Powerpal smart meter metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-proftpd + most_popular: false + community: true + monitored_instance: + name: ProFTPD + link: https://github.com/transnano/proftpd_exporter + icon_filename: proftpd.png + categories: + - data-collection.ftp-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ProFTPD FTP server metrics for efficient file transfer and server performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-eaton_ups + most_popular: false + community: true + monitored_instance: + name: Eaton UPS + link: https://github.com/psyinfra/prometheus-eaton-ups-exporter + icon_filename: eaton.svg + categories: + - data-collection.ups + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-redis_queue + most_popular: false + community: true + monitored_instance: + name: Redis Queue + link: https://github.com/mdawar/rq-exporter + icon_filename: rq.png + categories: + - data-collection.message-brokers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-radio_thermostat + most_popular: false + community: true + monitored_instance: + name: Radio Thermostat + link: https://github.com/andrewlow/radio-thermostat-exporter + icon_filename: radiots.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-rancher + most_popular: false + community: true + monitored_instance: + name: Rancher + link: https://github.com/infinityworksltd/prometheus-rancher-exporter + icon_filename: rancher.svg + categories: + - data-collection.kubernetes + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Rancher container orchestration platform metrics for efficient container management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-raritan_pdu + most_popular: false + community: true + monitored_instance: + name: Raritan PDU + link: https://github.com/psyinfra/prometheus-raritan-pdu-exporter + icon_filename: raritan.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-routeros + most_popular: false + community: true + monitored_instance: + name: Mikrotik RouterOS devices + link: https://github.com/welbymcroberts/routeros_exporter + icon_filename: routeros.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track MikroTik RouterOS metrics for efficient network device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sabnzbd + most_popular: false + community: true + monitored_instance: + name: SABnzbd + link: https://github.com/msroest/sabnzbd_exporter + icon_filename: sabnzbd.png + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SABnzbd Usenet client metrics for efficient file downloads and resource management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-salicru_eqx + most_popular: false + community: true + monitored_instance: + name: Salicru EQX inverter + link: https://github.com/alejandroscf/prometheus_salicru_exporter + icon_filename: salicru.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sense_energy + most_popular: false + community: true + monitored_instance: + name: Sense Energy + link: https://github.com/ejsuncy/sense_energy_prometheus_exporter + icon_filename: sense.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-servertech + most_popular: false + community: true + monitored_instance: + name: ServerTech + link: https://github.com/tynany/servertech_exporter + icon_filename: servertech.png + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-shelly + most_popular: false + community: true + monitored_instance: + name: Shelly humidity sensor + link: https://github.com/aexel90/shelly_exporter + icon_filename: shelly.jpg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Shelly smart home device metrics for efficient home automation and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sia + most_popular: false + community: true + monitored_instance: + name: Sia + link: https://github.com/tbenz9/sia_exporter + icon_filename: sia.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Sia decentralized storage platform metrics for efficient storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-s7_plc + most_popular: false + community: true + monitored_instance: + name: Siemens S7 PLC + link: https://github.com/MarcusCalidus/s7-plc-exporter + icon_filename: siemens.svg + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-site24x7 + most_popular: false + community: true + monitored_instance: + name: Site 24x7 + link: https://github.com/svenstaro/site24x7_exporter + icon_filename: site24x7.svg + categories: + - data-collection.synthetic-checks + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sma_inverter + most_popular: false + community: true + monitored_instance: + name: SMA Inverters + link: https://github.com/dr0ps/sma_inverter_exporter + icon_filename: sma.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SMA solar inverter metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-smartrg808ac + most_popular: false + community: true + monitored_instance: + name: SmartRG 808AC Cable Modem + link: https://github.com/AdamIsrael/smartrg808ac_exporter + icon_filename: smartr.jpeg + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SmartRG SR808ac router metrics for efficient network device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sml + most_popular: false + community: true + monitored_instance: + name: Smart meters SML + link: https://github.com/mweinelt/sml-exporter + icon_filename: sml.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Smart Message Language (SML) metrics for efficient smart metering and energy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-softether + most_popular: false + community: true + monitored_instance: + name: SoftEther VPN Server + link: https://github.com/dalance/softether_exporter + icon_filename: softether.svg + categories: + - data-collection.vpns + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-lsx + most_popular: false + community: true + monitored_instance: + name: Solar logging stick + link: https://gitlab.com/bhavin192/lsx-exporter + icon_filename: solar.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-solaredge + most_popular: false + community: true + monitored_instance: + name: SolarEdge inverters + link: https://github.com/dave92082/SolarEdge-Exporter + icon_filename: solaredge.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track SolarEdge solar inverter metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-solis + most_popular: false + community: true + monitored_instance: + name: Solis Ginlong 5G inverters + link: https://github.com/candlerb/solis_exporter + icon_filename: solis.jpg + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Solis solar inverter metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sonic + most_popular: false + community: true + monitored_instance: + name: SONiC NOS + link: https://github.com/kamelnetworks/sonic_exporter + icon_filename: sonic.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-ibm_spectrum_virtualize + most_popular: false + community: true + monitored_instance: + name: IBM Spectrum Virtualize + link: https://github.com/bluecmd/spectrum_virtualize_exporter + icon_filename: ibm.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-speedify + most_popular: false + community: true + monitored_instance: + name: Speedify CLI + link: https://github.com/willshen/speedify_exporter + icon_filename: speedify.png + categories: + - data-collection.vpns + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Speedify VPN metrics for efficient virtual private network (VPN) management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sphinx + most_popular: false + community: true + monitored_instance: + name: Sphinx + link: https://github.com/foxdalas/sphinx_exporter + icon_filename: sphinx.png + categories: + - data-collection.search-engines + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Sphinx search engine metrics for efficient search and indexing performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-sql + most_popular: false + community: true + monitored_instance: + name: SQL Database agnostic + link: https://github.com/free/sql_exporter + icon_filename: sql.svg + categories: + - data-collection.database-servers + keywords: + - database + - relational db + - data querying + overview: + <<: *overview + data_collection: + metrics_description: | + Query SQL databases for efficient database performance monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-starwind_vsan + most_popular: false + community: true + monitored_instance: + name: Starwind VSAN VSphere Edition + link: https://github.com/evoicefire/starwind-vsan-exporter + icon_filename: starwind.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-storidge + most_popular: false + community: true + monitored_instance: + name: Storidge + link: https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md + icon_filename: storidge.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Storidge storage metrics for efficient storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-stream_generic + most_popular: false + community: true + monitored_instance: + name: Stream + link: https://github.com/carlpett/stream_exporter + icon_filename: stream.png + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor streaming metrics for efficient media streaming and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-strongswan + most_popular: false + community: true + monitored_instance: + name: strongSwan + link: https://github.com/jlti-dev/ipsec_exporter + icon_filename: strongswan.svg + categories: + - data-collection.vpns + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-sunspec + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Sunspec Solar Energy + link: https://github.com/inosion/prometheus-sunspec-exporter + icon_filename: sunspec.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-suricata + most_popular: false + community: true + monitored_instance: + name: Suricata + link: https://github.com/corelight/suricata_exporter + icon_filename: suricata.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README. + + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-synology_activebackup + most_popular: false + community: true + monitored_instance: + name: Synology ActiveBackup + link: https://github.com/codemonauts/activebackup-prometheus-exporter + icon_filename: synology.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Synology Active Backup metrics for efficient backup and data protection management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-sysload + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Sysload + link: https://github.com/egmc/sysload_exporter + icon_filename: sysload.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor system load metrics for efficient system performance and resource management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-trex + most_popular: false + community: true + monitored_instance: + name: T-Rex NVIDIA GPU Miner + link: https://github.com/dennisstritzke/trex_exporter + icon_filename: trex.png + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + id: collector-go.d.plugin-prometheus-tado + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Tado smart heating solution + link: https://github.com/eko/tado-exporter + icon_filename: tado.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Tado smart thermostat metrics for efficient home heating and cooling management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tado\xB0 Exporter](https://github.com/eko/tado-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tankerkoenig + most_popular: false + community: true + monitored_instance: + name: Tankerkoenig API + link: https://github.com/lukasmalkmus/tankerkoenig_exporter + icon_filename: tanker.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Tankerknig API fuel price metrics for efficient fuel price monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tesla_wall_connector + most_popular: false + community: true + monitored_instance: + name: Tesla Wall Connector + link: https://github.com/benclapp/tesla_wall_connector_exporter + icon_filename: tesla.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-tplink_p110 + most_popular: false + community: true + monitored_instance: + name: TP-Link P110 + link: https://github.com/ijohanne/prometheus-tplink-p110-exporter + icon_filename: tplink.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track TP-Link P110 smart plug metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-traceroute + most_popular: false + community: true + monitored_instance: + name: Traceroute + link: https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter + icon_filename: traceroute.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Export traceroute metrics for efficient network path analysis and performance monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-twincat_ads_webservice + most_popular: false + community: true + monitored_instance: + name: TwinCAT ADS Web Service + link: https://github.com/MarcusCalidus/twincat-ads-webservice-exporter + icon_filename: twincat.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-warp10 + most_popular: false + community: true + monitored_instance: + name: Warp10 + link: https://github.com/centreon/warp10-sensision-exporter + icon_filename: warp10.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Warp 10 time-series database metrics for efficient time-series data management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README. + + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-yourls + most_popular: false + community: true + monitored_instance: + name: YOURLS URL Shortener + link: https://github.com/just1not2/prometheus-exporter-yourls + icon_filename: yourls.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-zerto + most_popular: false + community: true + monitored_instance: + name: Zerto + link: https://github.com/claranet/zerto-exporter + icon_filename: zerto.png + categories: + - data-collection.cloud-provider-managed + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-zulip + most_popular: false + community: true + monitored_instance: + name: Zulip + link: https://github.com/brokenpip3/zulip-exporter + icon_filename: zulip.png + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Zulip open-source group chat application metrics for efficient team communication management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README. + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-prometheus-zyxel_gs1200 + most_popular: false + community: true + monitored_instance: + name: Zyxel GS1200-8 + link: https://github.com/robinelfrink/gs1200-exporter + icon_filename: zyxel.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Zyxel GS1200 network switch metrics for efficient network device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter). + setup: + <<: *setup + prerequisites: + list: + - title: Install Exporter + description: | + Install [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README. + diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/prometheus.go b/src/go/collectors/go.d.plugin/modules/prometheus/prometheus.go new file mode 100644 index 00000000000000..32a91e5c2504e5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/prometheus.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("prometheus", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Prometheus { + return &Prometheus{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 10}, + }, + }, + MaxTS: 2000, + MaxTSPerMetric: 200, + }, + charts: &module.Charts{}, + cache: newCache(), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + Name string `yaml:"name"` + Application string `yaml:"app"` + BearerTokenFile string `yaml:"bearer_token_file"` + + Selector selector.Expr `yaml:"selector"` + + ExpectedPrefix string `yaml:"expected_prefix"` + MaxTS int `yaml:"max_time_series"` + MaxTSPerMetric int `yaml:"max_time_series_per_metric"` + FallbackType struct { + Counter []string `yaml:"counter"` + Gauge []string `yaml:"gauge"` + } `yaml:"fallback_type"` +} + +type Prometheus struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + prom prometheus.Prometheus + cache *cache + + fallbackType struct { + counter matcher.Matcher + gauge matcher.Matcher + } +} + +func (p *Prometheus) Init() bool { + if err := p.validateConfig(); err != nil { + p.Errorf("validating config: %v", err) + return false + } + + prom, err := p.initPrometheusClient() + if err != nil { + p.Errorf("init prometheus client: %v", err) + return false + } + p.prom = prom + + m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter) + if err != nil { + p.Errorf("init counter fallback type matcher: %v", err) + return false + } + p.fallbackType.counter = m + + m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge) + if err != nil { + p.Errorf("init counter fallback type matcher: %v", err) + return false + } + p.fallbackType.gauge = m + + return true +} + +func (p *Prometheus) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Prometheus) Charts() *module.Charts { + return p.charts +} + +func (p *Prometheus) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (p *Prometheus) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/prometheus_test.go b/src/go/collectors/go.d.plugin/modules/prometheus/prometheus_test.go new file mode 100644 index 00000000000000..95bf55bd20bbf6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/prometheus_test.go @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPrometheus_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "non empty URL": { + wantFail: false, + config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}}}, + }, + "invalid selector syntax": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}}, + Selector: selector.Expr{Allow: []string{`name{label=#"value"}`}}, + }, + }, + "default": { + wantFail: true, + config: New().Config, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + prom := New() + prom.Config = test.config + + if test.wantFail { + assert.False(t, prom.Init()) + } else { + assert.True(t, prom.Init()) + } + }) + } +} + +func TestPrometheus_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) + + prom := New() + prom.URL = "http://127.0.0.1" + require.True(t, prom.Init()) + assert.NotPanics(t, prom.Cleanup) +} + +func TestPrometheus_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (prom *Prometheus, cleanup func()) + wantFail bool + }{ + "success if endpoint returns valid metrics in prometheus format": { + wantFail: false, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(`test_counter_no_meta_metric_1_total{label1="value1"} 11`)) + })) + prom = New() + prom.URL = srv.URL + + return prom, srv.Close + }, + }, + "fail if the total num of metrics exceeds the limit": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` +test_counter_no_meta_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value2"} 11 +`)) + })) + prom = New() + prom.URL = srv.URL + prom.MaxTS = 1 + + return prom, srv.Close + }, + }, + "fail if the num time series in the metric exceeds the limit": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` +test_counter_no_meta_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value2"} 11 +`)) + })) + prom = New() + prom.URL = srv.URL + prom.MaxTSPerMetric = 1 + + return prom, srv.Close + }, + }, + "fail if metrics have no expected prefix": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(`test_counter_no_meta_metric_1_total{label1="value1"} 11`)) + })) + prom = New() + prom.URL = srv.URL + prom.ExpectedPrefix = "prefix_" + + return prom, srv.Close + }, + }, + "fail if endpoint returns data not in prometheus format": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + prom = New() + prom.URL = srv.URL + + return prom, srv.Close + }, + }, + "fail if connection refused": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + prom = New() + prom.URL = "http://127.0.0.1:38001/metrics" + + return prom, func() {} + }, + }, + "fail if endpoint returns 404": { + wantFail: true, + prepare: func() (prom *Prometheus, cleanup func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + prom = New() + prom.URL = srv.URL + + return prom, srv.Close + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + prom, cleanup := test.prepare() + defer cleanup() + + require.True(t, prom.Init()) + + if test.wantFail { + assert.False(t, prom.Check()) + } else { + assert.True(t, prom.Check()) + } + }) + } +} + +func TestPrometheus_Collect(t *testing.T) { + type testCaseStep struct { + desc string + input string + wantCollected map[string]int64 + wantCharts int + } + tests := map[string]struct { + prepare func() *Prometheus + steps []testCaseStep + }{ + "Gauge": { + prepare: New, + steps: []testCaseStep{ + { + desc: "Two first seen series, no meta series ignored", + input: ` +# HELP test_gauge_metric_1 Test Gauge Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_metric_1{label1="value2"} 12 +test_gauge_no_meta_metric_1{label1="value1"} 11 +test_gauge_no_meta_metric_1{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_gauge_metric_1-label1=value1": 11000, + "test_gauge_metric_1-label1=value2": 12000, + }, + wantCharts: 2, + }, + { + desc: "One series removed", + input: ` +# HELP test_gauge_metric_1 Test Gauge Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +`, + wantCollected: map[string]int64{ + "test_gauge_metric_1-label1=value1": 11000, + }, + wantCharts: 1, + }, + { + desc: "One series (re)added", + input: ` +# HELP test_gauge_metric_1 Test Gauge Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_metric_1{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_gauge_metric_1-label1=value1": 11000, + "test_gauge_metric_1-label1=value2": 12000, + }, + wantCharts: 2, + }, + }, + }, + "Counter": { + prepare: New, + steps: []testCaseStep{ + { + desc: "Four first seen series, no meta series collected", + input: ` +# HELP test_counter_metric_1_total Test Counter Metric 1 +# TYPE test_counter_metric_1_total counter +test_counter_metric_1_total{label1="value1"} 11 +test_counter_metric_1_total{label1="value2"} 12 +test_counter_no_meta_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_counter_metric_1_total-label1=value1": 11000, + "test_counter_metric_1_total-label1=value2": 12000, + "test_counter_no_meta_metric_1_total-label1=value1": 11000, + "test_counter_no_meta_metric_1_total-label1=value2": 12000, + }, + wantCharts: 4, + }, + { + desc: "Two series removed", + input: ` +# HELP test_counter_metric_1_total Test Counter Metric 1 +# TYPE test_counter_metric_1_total counter +test_counter_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value1"} 11 +`, + wantCollected: map[string]int64{ + "test_counter_metric_1_total-label1=value1": 11000, + "test_counter_no_meta_metric_1_total-label1=value1": 11000, + }, + wantCharts: 2, + }, + { + desc: "Two series (re)added", + input: ` +# HELP test_counter_metric_1_total Test Counter Metric 1 +# TYPE test_counter_metric_1_total counter +test_counter_metric_1_total{label1="value1"} 11 +test_counter_metric_1_total{label1="value2"} 12 +test_counter_no_meta_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_counter_metric_1_total-label1=value1": 11000, + "test_counter_metric_1_total-label1=value2": 12000, + "test_counter_no_meta_metric_1_total-label1=value1": 11000, + "test_counter_no_meta_metric_1_total-label1=value2": 12000, + }, + wantCharts: 4, + }, + }, + }, + "Summary": { + prepare: New, + steps: []testCaseStep{ + { + desc: "Two first seen series, no meta series collected", + input: ` +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31 +`, + wantCollected: map[string]int64{ + "test_summary_1_duration_microseconds-label1=value1_count": 31, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000, + "test_summary_1_duration_microseconds-label1=value1_sum": 283201290, + "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290, + }, + wantCharts: 6, + }, + { + desc: "One series removed", + input: ` +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +`, + wantCollected: map[string]int64{ + "test_summary_1_duration_microseconds-label1=value1_count": 31, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000, + "test_summary_1_duration_microseconds-label1=value1_sum": 283201290, + }, + wantCharts: 3, + }, + { + desc: "One series (re)added", + input: ` +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31 +`, + wantCollected: map[string]int64{ + "test_summary_1_duration_microseconds-label1=value1_count": 31, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000, + "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000, + "test_summary_1_duration_microseconds-label1=value1_sum": 283201290, + "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000, + "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290, + }, + wantCharts: 6, + }, + }, + }, + "Summary with NaN": { + prepare: New, + steps: []testCaseStep{ + { + desc: "Two first seen series, no meta series collected", + input: ` +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} NaN +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} NaN +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} NaN +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} NaN +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} NaN +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} NaN +test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31 +`, + wantCollected: map[string]int64{ + "test_summary_1_duration_microseconds-label1=value1_count": 31, + "test_summary_1_duration_microseconds-label1=value1_sum": 283201290, + "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31, + "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290, + }, + wantCharts: 6, + }, + }, + }, + "Histogram": { + prepare: New, + steps: []testCaseStep{ + { + desc: "Two first seen series, no meta series collected", + input: ` +# HELP test_histogram_1_duration_seconds Test Histogram Metric 1 +# TYPE test_histogram_1_duration_seconds histogram +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value1"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6 +`, + wantCollected: map[string]int64{ + "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5, + "test_histogram_1_duration_seconds-label1=value1_count": 6, + "test_histogram_1_duration_seconds-label1=value1_sum": 1, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=+Inf": 6, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.1": 4, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.5": 5, + "test_histogram_no_meta_1_duration_seconds-label1=value1_count": 6, + "test_histogram_no_meta_1_duration_seconds-label1=value1_sum": 1, + }, + wantCharts: 6, + }, + { + desc: "One series removed", + input: ` +# HELP test_histogram_1_duration_seconds Test Histogram Metric 1 +# TYPE test_histogram_1_duration_seconds histogram +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +`, + wantCollected: map[string]int64{ + "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5, + "test_histogram_1_duration_seconds-label1=value1_count": 0, + "test_histogram_1_duration_seconds-label1=value1_sum": 0, + }, + wantCharts: 3, + }, + { + desc: "One series (re)added", + input: ` +# HELP test_histogram_1_duration_seconds Test Histogram Metric 1 +# TYPE test_histogram_1_duration_seconds histogram +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value1"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6 +`, + wantCollected: map[string]int64{ + "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4, + "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5, + "test_histogram_1_duration_seconds-label1=value1_count": 6, + "test_histogram_1_duration_seconds-label1=value1_sum": 1, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=+Inf": 6, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.1": 4, + "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.5": 5, + "test_histogram_no_meta_1_duration_seconds-label1=value1_count": 6, + "test_histogram_no_meta_1_duration_seconds-label1=value1_sum": 1, + }, + wantCharts: 6, + }, + }, + }, + "match Untyped as Gauge": { + prepare: func() *Prometheus { + prom := New() + prom.FallbackType.Gauge = []string{"test_gauge_no_meta*"} + return prom + }, + steps: []testCaseStep{ + { + desc: "Two first seen series, meta series processed as Gauge", + input: ` +# HELP test_gauge_metric_1 Test Untyped Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_metric_1{label1="value2"} 12 +test_gauge_no_meta_metric_1{label1="value1"} 11 +test_gauge_no_meta_metric_1{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_gauge_metric_1-label1=value1": 11000, + "test_gauge_metric_1-label1=value2": 12000, + "test_gauge_no_meta_metric_1-label1=value1": 11000, + "test_gauge_no_meta_metric_1-label1=value2": 12000, + }, + wantCharts: 4, + }, + }, + }, + "match Untyped as Counter": { + prepare: func() *Prometheus { + prom := New() + prom.FallbackType.Counter = []string{"test_gauge_no_meta*"} + return prom + }, + steps: []testCaseStep{ + { + desc: "Two first seen series, meta series processed as Counter", + input: ` +# HELP test_gauge_metric_1 Test Untyped Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_metric_1{label1="value2"} 12 +test_gauge_no_meta_metric_1{label1="value1"} 11 +test_gauge_no_meta_metric_1{label1="value2"} 12 +`, + wantCollected: map[string]int64{ + "test_gauge_metric_1-label1=value1": 11000, + "test_gauge_metric_1-label1=value2": 12000, + "test_gauge_no_meta_metric_1-label1=value1": 11000, + "test_gauge_no_meta_metric_1-label1=value2": 12000, + }, + wantCharts: 4, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + prom := test.prepare() + + var metrics []byte + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metrics) + })) + defer srv.Close() + + prom.URL = srv.URL + require.True(t, prom.Init()) + + for num, step := range test.steps { + t.Run(fmt.Sprintf("step num %d ('%s')", num+1, step.desc), func(t *testing.T) { + + metrics = []byte(step.input) + + var mx map[string]int64 + + for i := 0; i < maxNotSeenTimes+1; i++ { + mx = prom.Collect() + } + + assert.Equal(t, step.wantCollected, mx) + removeObsoleteCharts(prom.Charts()) + assert.Len(t, *prom.Charts(), step.wantCharts) + }) + } + }) + } +} + +func removeObsoleteCharts(charts *module.Charts) { + var i int + for _, chart := range *charts { + if !chart.Obsolete { + (*charts)[i] = chart + i++ + } + } + *charts = (*charts)[:i] +} diff --git a/src/go/collectors/go.d.plugin/modules/prometheus/wip_meta.yaml b/src/go/collectors/go.d.plugin/modules/prometheus/wip_meta.yaml new file mode 100644 index 00000000000000..6583c7c6170411 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/prometheus/wip_meta.yaml @@ -0,0 +1,1453 @@ +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: NetApp Trident + link: https://github.com/NetApp/trident + icon_filename: netapp.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor NetApp Trident container storage metrics for efficient storage provisioning and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [NetApp Trident exporter](https://github.com/NetApp/trident). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [NetApp Trident exporter](https://github.com/NetApp/trident) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Scylla-Cluster-Tests + link: https://github.com/scylladb/scylla-cluster-tests/ + icon_filename: scylla.png + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ScyllaDB cluster test metrics for efficient database testing and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Scylla-Cluster-Tests Exporter](https://github.com/scylladb/scylla-cluster-tests/). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Scylla-Cluster-Tests Exporter](https://github.com/scylladb/scylla-cluster-tests/) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: ScyllaDB + link: https://github.com/scylladb/scylladb + icon_filename: scylla.png + categories: + - data-collection.database-servers + keywords: + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Track ScyllaDB NoSQL database metrics for efficient database management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ScyllaDB exporter](https://github.com/scylladb/scylladb). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [ScyllaDB exporter](https://github.com/scylladb/scylladb) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: StayRTR + link: https://github.com/bgp/stayrtr + icon_filename: stayrtr.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track RPKI-to-Router (RTR) protocol metrics for efficient routing security and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [StayRTR Exporter](https://github.com/bgp/stayrtr). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [StayRTR Exporter](https://github.com/bgp/stayrtr) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Wildfly + link: https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye + icon_filename: wildfly.png + categories: + - data-collection.application-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor WildFly (formerly JBoss AS) Java application server metrics for efficient Java application management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Wildfly Exporter](https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Wildfly Exporter](https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Zeek + link: https://github.com/zeek/zeek + icon_filename: zeek.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Zeek (formerly Bro) network security monitoring metrics for efficient network security and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Zeek](https://github.com/zeek/zeek). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Zeek](https://github.com/zeek/zeek) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Authelia + link: https://www.authelia.com/reference/guides/metrics/#prometheus + icon_filename: authelia.png + categories: + - data-collection.authentication-and-authorization + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Authelia authentication and authorization metrics for enhanced security and user management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Authelia](https://www.authelia.com/reference/guides/metrics/#prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Authelia](https://www.authelia.com/reference/guides/metrics/#prometheus) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Butler (Qlik Sense DevOps toolbox) + link: https://github.com/ptarmiganlabs/butler + icon_filename: butler.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Butler Qlik Sense DevOps metrics for efficient development and operations management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Butler (Qlik Sense DevOps toolbox) Exporter. WIP](https://github.com/ptarmiganlabs/butler). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Butler (Qlik Sense DevOps toolbox) Exporter. WIP](https://github.com/ptarmiganlabs/butler) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Butler CW (Qlik Sense cache warming tool) + link: https://github.com/ptarmiganlabs/butler-cw + icon_filename: butler-cw.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Butler CW Qlik Sense cache warming metrics for optimized data access and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Butler CW (Qlik Sense cache warming tool) Exporter. WIP](https://github.com/ptarmiganlabs/butler-cw). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Butler CW (Qlik Sense cache warming tool) Exporter. WIP](https://github.com/ptarmiganlabs/butler-cw) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Butler SOS (Qlik Sense monitoring tool) + link: https://github.com/ptarmiganlabs/butler-sos + icon_filename: butler-sos.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Butler SOS Qlik Sense metrics for comprehensive performance analysis and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Butler SOS (Qlik Sense monitoring tool) Exporter](https://github.com/ptarmiganlabs/butler-sos). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Butler SOS (Qlik Sense monitoring tool) Exporter](https://github.com/ptarmiganlabs/butler-sos) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: c-lightning + link: https://github.com/lightningd/plugins/tree/master/prometheus + icon_filename: lightning.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track c-lightning metrics for optimized Lightning Network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [c-lightning prometheus exporter](https://github.com/lightningd/plugins/tree/master/prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [c-lightning prometheus exporter](https://github.com/lightningd/plugins/tree/master/prometheus) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Cloudprober + link: https://github.com/cloudprober/cloudprober + icon_filename: cloudprober.png + categories: + - data-collection.synthetic-checks + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on cloud service availability and latency with Cloudprober monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Cloudprober Exporter](https://github.com/cloudprober/cloudprober). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Cloudprober Exporter](https://github.com/cloudprober/cloudprober) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: consrv + link: https://github.com/mdlayher/consrv + icon_filename: consrv.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track serial console bridge server metrics for optimized service discovery and health management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [consrv](https://github.com/mdlayher/consrv). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [consrv](https://github.com/mdlayher/consrv) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: CoreRAD + link: https://github.com/mdlayher/corerad + icon_filename: corerad.png + categories: + - data-collection.dns-and-dhcp-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor CoreRAD IPv6 router advertisement daemon metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CoreRAD](https://github.com/mdlayher/corerad). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [CoreRAD](https://github.com/mdlayher/corerad) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: CrateDB remote remote read/write adapter + link: https://github.com/crate/cratedb-prometheus-adapter + icon_filename: cratedb.png + categories: + - data-collection.database-servers + keywords: + - database + - dbms + - data storage + overview: + <<: *overview + data_collection: + metrics_description: | + Track CrateDB metrics for efficient data storage and query performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [CrateDB remote remote read/write adapter](https://github.com/crate/cratedb-prometheus-adapter). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [CrateDB remote remote read/write adapter](https://github.com/crate/cratedb-prometheus-adapter) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: DRBD via drbd-reactor + link: https://github.com/LINBIT/drbd-reactor + icon_filename: drbd.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track DRBD metrics for efficient distributed replicated block device management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [DRBD exporter via drbd-reactor](https://github.com/LINBIT/drbd-reactor). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [DRBD exporter via drbd-reactor](https://github.com/LINBIT/drbd-reactor) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Falco + link: https://github.com/falcosecurity/falco + icon_filename: falco.png + categories: + - data-collection.security-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Falco security metrics for efficient runtime security management and threat detection. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Falco](https://github.com/falcosecurity/falco). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Falco](https://github.com/falcosecurity/falco) by following the instructions mentioned in the exporter README. + + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: FastNetMon Advanced + link: https://fastnetmon.com/docs-fnm-advanced/ + icon_filename: fastnetmon.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on FastNetMon Advanced network monitoring metrics for efficient traffic analysis and DDoS detection. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [FastNetMon Advanced exporter](https://fastnetmon.com/docs-fnm-advanced/). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [FastNetMon Advanced exporter](https://fastnetmon.com/docs-fnm-advanced/) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Faucet SDN Faucet + link: https://github.com/faucetsdn/faucet + icon_filename: faucet.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Faucet software-defined networking metrics for efficient network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Faucet SDN Faucet Exporter](https://github.com/faucetsdn/faucet). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Faucet SDN Faucet Exporter](https://github.com/faucetsdn/faucet) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Fawkes + link: https://github.com/fawkesrobotics/fawkes + icon_filename: fawkes.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Fawkes Robotic Real-Time Applications metrics for enhanced monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Fawkes](https://github.com/fawkesrobotics/fawkes). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Fawkes](https://github.com/fawkesrobotics/fawkes) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: gitlab-pages + link: https://gitlab.com/gitlab-org/gitlab-pages/ + icon_filename: gitlab.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track GitLab Pages metrics for optimized static site hosting and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [gitlab-pages exporter](https://gitlab.com/gitlab-org/gitlab-pages/). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [gitlab-pages exporter](https://gitlab.com/gitlab-org/gitlab-pages/) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: gitlab-workhorse + link: https://gitlab.com/gitlab-org/gitlab-workhorse + icon_filename: gitlab.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor GitLab Workhorse metrics for efficient web server and reverse proxy management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [gitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [gitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: GlusterFS + link: https://github.com/gluster/gluster-prometheus + icon_filename: gluster.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on GlusterFS distributed file system metrics for optimized storage management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [GlusterFS Exporter](https://github.com/gluster/gluster-prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [GlusterFS Exporter](https://github.com/gluster/gluster-prometheus) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Google Cloud Status Dashboard + link: https://github.com/DazWilkin/gcp-status + icon_filename: gcp.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Google Cloud status metrics for efficient service availability management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Google Cloud Status Dashboard exporter](https://github.com/DazWilkin/gcp-status). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Google Cloud Status Dashboard exporter](https://github.com/DazWilkin/gcp-status) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: haraka + link: https://github.com/mailprotector/haraka-plugin-prometheus + icon_filename: haraka.png + categories: + - data-collection.mail-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Haraka SMTP server metrics for efficient email delivery and security management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [haraka exporter](https://github.com/mailprotector/haraka-plugin-prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [haraka exporter](https://github.com/mailprotector/haraka-plugin-prometheus) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Hetzner Cloud CSI Driver (Nodes) + link: https://github.com/hetznercloud/csi-driver + icon_filename: hetznercloud.png + categories: + - data-collection.cloud-provider-managed + keywords: + - cloud services + - cloud computing + - scalability + overview: + <<: *overview + data_collection: + metrics_description: | + Track Hetzner Cloud Container Storage Interface driver metrics for efficient Kubernetes storage management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Hetzner Cloud CSI Driver (Nodes)](https://github.com/hetznercloud/csi-driver). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Hetzner Cloud CSI Driver (Nodes)](https://github.com/hetznercloud/csi-driver) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Lutron Homeworks + link: https://github.com/jbarwick/homeworks-service + icon_filename: lutron-homeworks.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Homeworks home automation system metrics for optimized smart home management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Lutron Homeworks Exporter](https://github.com/jbarwick/homeworks-service). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Lutron Homeworks Exporter](https://github.com/jbarwick/homeworks-service) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: JIRAlert + link: https://github.com/alin-sinpalean/jiralert + icon_filename: jira.png + categories: + - data-collection.notifications + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on JIRA issue tracking metrics for optimized project management and collaboration. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [JIRAlert](https://github.com/alin-sinpalean/jiralert). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [JIRAlert](https://github.com/alin-sinpalean/jiralert) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Kafka Configs + link: https://github.com/EladLeev/kafka-config-metrics + icon_filename: kafka.svg + categories: + - data-collection.message-brokers + keywords: + - big data + - stream processing + - message broker + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Kafka configuration metrics for optimized message queue performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Kafka Configs Metrics Exporter](https://github.com/EladLeev/kafka-config-metrics). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Kafka Configs Metrics Exporter](https://github.com/EladLeev/kafka-config-metrics) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Maddy Mail Server t + link: https://github.com/foxcpp/maddy + icon_filename: maddy.png + categories: + - data-collection.mail-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on Maddy Mail Server metrics for efficient email delivery and security management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Maddy Mail Server metrics endpoint](https://github.com/foxcpp/maddy). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Maddy Mail Server metrics endpoint](https://github.com/foxcpp/maddy) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Purpleidea Mgmt + link: https://github.com/purpleidea/mgmt + icon_filename: mgmtconfig.png + categories: + - data-collection.provisioning-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on MGMT configuration management system metrics for efficient infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [mgmt exporter](https://github.com/purpleidea/mgmt). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [mgmt exporter](https://github.com/purpleidea/mgmt) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Nebula + link: https://github.com/immstudios/promexp + icon_filename: nebula.png + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on Nebula broadcast system metrics for efficient media broadcasting and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Nebula Exporter (unified exporter for broadcasters)](https://github.com/immstudios/promexp). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Nebula Exporter (unified exporter for broadcasters)](https://github.com/immstudios/promexp) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: NeonKube Service + link: https://github.com/nforgeio/neonKUBE + icon_filename: neonkube.png + categories: + - data-collection.service-discovery-registry + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Neon CRM metrics for efficient nonprofit management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Neon Service Standard Exporter](https://github.com/nforgeio/neonKUBE). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Neon Service Standard Exporter](https://github.com/nforgeio/neonKUBE) by following the instructions mentioned in the exporter README. + + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Onionprobe + link: https://gitlab.torproject.org/tpo/onion-services/onionprobe + icon_filename: onion.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Tor network metrics for efficient anonymity network performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Onionprobe](https://gitlab.torproject.org/tpo/onion-services/onionprobe). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Onionprobe](https://gitlab.torproject.org/tpo/onion-services/onionprobe) by following the instructions mentioned in the exporter README. + + + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Opflex-agent + link: https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md + icon_filename: opflex.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep tabs on OpFlex agent metrics for efficient software-defined networking management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Opflex-agent Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Opflex-agent Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Opflex-server + link: https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md + icon_filename: opflex.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OpFlex server metrics for efficient software-defined networking management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Opflex-server Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Opflex-server Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: ovn-kubernetes Master + link: https://github.com/ovn-org/ovn-kubernetes + icon_filename: kube-ovn.png + categories: + - data-collection.kubernetes + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Track OVN-Kubernetes master metrics for efficient Kubernetes networking management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ovn-kubernetes Master Exporter](https://github.com/ovn-org/ovn-kubernetes). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [ovn-kubernetes Master Exporter](https://github.com/ovn-org/ovn-kubernetes) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: ovn-kubernetes Node + link: https://github.com/ovn-org/ovn-kubernetes + icon_filename: kube-ovn.png + categories: + - data-collection.kubernetes + keywords: + - network monitoring + - network performance + - traffic analysis + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor OVN-Kubernetes node metrics for efficient Kubernetes networking management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ovn-kubernetes Node Exporter](https://github.com/ovn-org/ovn-kubernetes). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [ovn-kubernetes Node Exporter](https://github.com/ovn-org/ovn-kubernetes) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Pathvector + link: https://github.com/natesales/pathvector + icon_filename: pathvector.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Pathvector BGP routing metrics for efficient + Border Gateway Protocol management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Pathvector](https://github.com/natesales/pathvector). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Pathvector](https://github.com/natesales/pathvector) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: PCP + link: https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3 + icon_filename: pcp.png + categories: + - data-collection.apm + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Performance Co-Pilot system performance metrics for efficient IT infrastructure management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [PCP exporter](https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [PCP exporter](https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: poudriere + link: https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere + icon_filename: poudriere.png + categories: + - data-collection.freebsd-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Poudriere FreeBSD package building and testing metrics for efficient package management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [poudriere exporter](https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [poudriere exporter](https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: RCT Inverter + link: https://github.com/svalouch/rctmon + icon_filename: rct.png + categories: + - data-collection.iot-devices + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on RCT Power inverter metrics for efficient solar energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [RctMon - RCT Inverter metrics extractor](https://github.com/svalouch/rctmon). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [RctMon - RCT Inverter metrics extractor](https://github.com/svalouch/rctmon) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: RTRTR + link: https://github.com/NLnetLabs/rtrtr + icon_filename: rtrtr.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Keep an eye on RPKI-to-Router (RTR) protocol metrics for efficient routing security and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [rtrtr exporter](https://github.com/NLnetLabs/rtrtr). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [rtrtr exporter](https://github.com/NLnetLabs/rtrtr) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: silverpeak + link: https://github.com/ipHeaders/silverpeak-prometheus + icon_filename: silverpeak.png + categories: + - data-collection.networking-stack-and-network-interfaces + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor Silver Peak SD-WAN metrics for efficient wide area network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [silverpeak-prometheus](https://github.com/ipHeaders/silverpeak-prometheus). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [silverpeak-prometheus](https://github.com/ipHeaders/silverpeak-prometheus) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: SmartPi + link: https://github.com/nDenerserve/SmartPi + icon_filename: smartpi.png + categories: + - data-collection.hardware-devices-and-sensors + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track SmartPi smart meter metrics for efficient energy management and monitoring. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SmartPi](https://github.com/nDenerserve/SmartPi). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [SmartPi](https://github.com/nDenerserve/SmartPi) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: SRS (v5.0.67+) + link: https://github.com/ossrs/srs + icon_filename: srs.jpg + categories: + - data-collection.media-streaming-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Simple-RTMP-Server (SRS) metrics for efficient live streaming server management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SRS (v5.0.67+)](https://github.com/ossrs/srs). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [SRS (v5.0.67+)](https://github.com/ossrs/srs) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: supercronic + link: https://github.com/aptible/supercronic/ + icon_filename: supercronic.png + categories: + - data-collection.provisioning-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Supercronic job scheduler metrics for efficient task scheduling and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [supercronic](https://github.com/aptible/supercronic/). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [supercronic](https://github.com/aptible/supercronic/) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: SUSE Saptune + link: https://github.com/SUSE/saptune + icon_filename: suse.png + categories: + - data-collection.linux-systems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor SUSE Linux Enterprise Server (SLES) Saptune metrics for efficient system tuning and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [SUSE Saptune exporter](https://github.com/SUSE/saptune). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [SUSE Saptune exporter](https://github.com/SUSE/saptune) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: Tezos Node + link: https://gitlab.com/tezos/tezos + icon_filename: tezos.png + categories: + - data-collection.blockchain-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Tezos blockchain node metrics for efficient blockchain network management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Tezos Node Exporter](https://gitlab.com/tezos/tezos). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Tezos Node Exporter](https://gitlab.com/tezos/tezos) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: ThirdAI + link: https://github.com/ThirdAILabs/Demos + icon_filename: thirdai.png + categories: + - data-collection.generic-data-collection + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ThirdAI platform metrics for efficient management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [ThirdAI exporter](https://github.com/ThirdAILabs/Demos). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [ThirdAI exporter](https://github.com/ThirdAILabs/Demos) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: trickster + link: https://github.com/trickstercache/trickster + icon_filename: trickster.png + categories: + - data-collection.web-servers-and-web-proxies + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track Trickster caching reverse proxy for time-series databases metrics for efficient time-series data management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [trickster](https://github.com/trickstercache/trickster). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [trickster](https://github.com/trickstercache/trickster) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: ClickHouse + link: https://github.com/ClickHouse/ClickHouse + icon_filename: clickhouse.svg + categories: + - data-collection.database-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track ClickHouse analytics database metrics for efficient database performance and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [Yandex ClickHouse Exporter](https://github.com/ClickHouse/ClickHouse). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [Yandex ClickHouse Exporter](https://github.com/ClickHouse/ClickHouse) by following the instructions mentioned in the exporter README. +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: zedhook + link: https://github.com/mdlayher/zedhook + icon_filename: zedhook.png + categories: + - data-collection.logs-servers + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Monitor ZFS Event Daemon (ZED) metrics for efficient file system event monitoring and management. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [zedhook](https://github.com/mdlayher/zedhook). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [zedhook](https://github.com/mdlayher/zedhook) by following the instructions mentioned in the exporter README. + +- <<: *module + meta: + <<: *meta + most_popular: false + community: true + monitored_instance: + name: zrepl internal + link: https://github.com/zrepl/zrepl + icon_filename: zrepl.png + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: [] + overview: + <<: *overview + data_collection: + metrics_description: | + Track ZFS replication metrics using zrepl for efficient file systemreplication management and performance. + method_description: | + Metrics are gathered by periodically sending HTTP requests to [zrepl internal exporter](https://github.com/zrepl/zrepl). + setup: + <<: *setup + prerequisites: + list: + - title: Install OpenMetrics Exporter + description: | + Install [zrepl internal exporter](https://github.com/zrepl/zrepl) by following the instructions mentioned in the exporter README. diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/README.md b/src/go/collectors/go.d.plugin/modules/proxysql/README.md new file mode 120000 index 00000000000000..06223157df8df3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/README.md @@ -0,0 +1 @@ +integrations/proxysql.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/cache.go b/src/go/collectors/go.d.plugin/modules/proxysql/cache.go new file mode 100644 index 00000000000000..c4fccefff19d3b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/cache.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package proxysql + +type ( + cache struct { + commands map[string]*commandCache + users map[string]*userCache + backends map[string]*backendCache + } + commandCache struct { + command string + hasCharts, updated bool + } + userCache struct { + user string + hasCharts, updated bool + } + backendCache struct { + hg, host, port string + hasCharts, updated bool + } +) + +func (c *cache) reset() { + for k, m := range c.commands { + c.commands[k] = &commandCache{command: m.command, hasCharts: m.hasCharts} + } + for k, m := range c.users { + c.users[k] = &userCache{user: m.user, hasCharts: m.hasCharts} + } + for k, m := range c.backends { + c.backends[k] = &backendCache{hg: m.hg, host: m.host, port: m.port, hasCharts: m.hasCharts} + } +} + +func (c *cache) getCommand(command string) *commandCache { + v, ok := c.commands[command] + if !ok { + v = &commandCache{command: command} + c.commands[command] = v + } + return v +} + +func (c *cache) getUser(user string) *userCache { + v, ok := c.users[user] + if !ok { + v = &userCache{user: user} + c.users[user] = v + } + return v +} + +func (c *cache) getBackend(hg, host, port string) *backendCache { + id := backendID(hg, host, port) + v, ok := c.backends[id] + if !ok { + v = &backendCache{hg: hg, host: host, port: port} + c.backends[id] = v + } + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/charts.go b/src/go/collectors/go.d.plugin/modules/proxysql/charts.go new file mode 100644 index 00000000000000..69e96ed4ac542f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/charts.go @@ -0,0 +1,726 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package proxysql + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +// TODO: check https://github.com/ProxySQL/proxysql-grafana-prometheus/blob/main/grafana/provisioning/dashboards/ProxySQL-Host-Statistics.json + +const ( + prioClientConnectionsCount = module.Priority + iota + prioClientConnectionsRate + prioServerConnectionsCount + prioServerConnectionsRate + prioBackendsTraffic + prioFrontendsTraffic + prioActiveTransactionsCount + prioQuestionsRate + prioSlowQueriesRate + prioQueriesRate + prioBackendStatementsCount + prioBackendStatementsRate + prioFrontendStatementsCount + prioFrontendStatementsRate + prioCachedStatementsCount + prioQueryCacheEntriesCount + prioQueryCacheIO + prioQueryCacheRequestsRate + prioQueryCacheMemoryUsed + prioMySQLMonitorWorkersCount + prioMySQLMonitorWorkersRate + prioMySQLMonitorConnectChecksRate + prioMySQLMonitorPingChecksRate + prioMySQLMonitorReadOnlyChecksRate + prioMySQLMonitorReplicationLagChecksRate + prioJemallocMemoryUsed + prioMemoryUsed + prioMySQLCommandExecutionsRate + prioMySQLCommandExecutionTime + prioMySQLCommandExecutionDurationHistogram + prioMySQLUserConnectionsUtilization + prioMySQLUserConnectionsCount + prioBackendStatus + prioBackendConnectionsUsage + prioBackendConnectionsRate + prioBackendQueriesRateRate + prioBackendTraffic + prioBackendLatency + prioUptime +) + +var ( + baseCharts = module.Charts{ + clientConnectionsCountChart.Copy(), + clientConnectionsRateChart.Copy(), + serverConnectionsCountChart.Copy(), + serverConnectionsRateChart.Copy(), + backendsTrafficChart.Copy(), + frontendsTrafficChart.Copy(), + activeTransactionsCountChart.Copy(), + questionsRateChart.Copy(), + slowQueriesRateChart.Copy(), + queriesRateChart.Copy(), + backendStatementsCountChart.Copy(), + backendStatementsRateChart.Copy(), + clientStatementsCountChart.Copy(), + clientStatementsRateChart.Copy(), + cachedStatementsCountChart.Copy(), + queryCacheEntriesCountChart.Copy(), + queryCacheIOChart.Copy(), + queryCacheRequestsRateChart.Copy(), + queryCacheMemoryUsedChart.Copy(), + mySQLMonitorWorkersCountChart.Copy(), + mySQLMonitorWorkersRateChart.Copy(), + mySQLMonitorConnectChecksRateChart.Copy(), + mySQLMonitorPingChecksRateChart.Copy(), + mySQLMonitorReadOnlyChecksRateChart.Copy(), + mySQLMonitorReplicationLagChecksRateChart.Copy(), + jemallocMemoryUsedChart.Copy(), + memoryUsedCountChart.Copy(), + uptimeChart.Copy(), + } + + clientConnectionsCountChart = module.Chart{ + ID: "client_connections_count", + Title: "Client connections", + Units: "connections", + Fam: "connections", + Ctx: "proxysql.client_connections_count", + Priority: prioClientConnectionsCount, + Dims: module.Dims{ + {ID: "Client_Connections_connected", Name: "connected"}, + {ID: "Client_Connections_non_idle", Name: "non_idle"}, + {ID: "Client_Connections_hostgroup_locked", Name: "hostgroup_locked"}, + }, + } + clientConnectionsRateChart = module.Chart{ + ID: "client_connections_rate", + Title: "Client connections rate", + Units: "connections/s", + Fam: "connections", + Ctx: "proxysql.client_connections_rate", + Priority: prioClientConnectionsRate, + Dims: module.Dims{ + {ID: "Client_Connections_created", Name: "created", Algo: module.Incremental}, + {ID: "Client_Connections_aborted", Name: "aborted", Algo: module.Incremental}, + }, + } + + serverConnectionsCountChart = module.Chart{ + ID: "server_connections_count", + Title: "Server connections", + Units: "connections", + Fam: "connections", + Ctx: "proxysql.server_connections_count", + Priority: prioServerConnectionsCount, + Dims: module.Dims{ + {ID: "Server_Connections_connected", Name: "connected"}, + }, + } + serverConnectionsRateChart = module.Chart{ + ID: "server_connections_rate", + Title: "Server connections rate", + Units: "connections/s", + Fam: "connections", + Ctx: "proxysql.server_connections_rate", + Priority: prioServerConnectionsRate, + Dims: module.Dims{ + {ID: "Server_Connections_created", Name: "created", Algo: module.Incremental}, + {ID: "Server_Connections_aborted", Name: "aborted", Algo: module.Incremental}, + {ID: "Server_Connections_delayed", Name: "delayed", Algo: module.Incremental}, + }, + } + + backendsTrafficChart = module.Chart{ + ID: "backends_traffic", + Title: "Backends traffic", + Units: "B/s", + Fam: "traffic", + Ctx: "proxysql.backends_traffic", + Priority: prioBackendsTraffic, + Dims: module.Dims{ + {ID: "Queries_backends_bytes_recv", Name: "recv", Algo: module.Incremental}, + {ID: "Queries_backends_bytes_sent", Name: "sent", Algo: module.Incremental}, + }, + } + frontendsTrafficChart = module.Chart{ + ID: "clients_traffic", + Title: "Clients traffic", + Units: "B/s", + Fam: "traffic", + Ctx: "proxysql.clients_traffic", + Priority: prioFrontendsTraffic, + Dims: module.Dims{ + {ID: "Queries_frontends_bytes_recv", Name: "recv", Algo: module.Incremental}, + {ID: "Queries_frontends_bytes_sent", Name: "sent", Algo: module.Incremental}, + }, + } + + activeTransactionsCountChart = module.Chart{ + ID: "active_transactions_count", + Title: "Client connections that are currently processing a transaction", + Units: "transactions", + Fam: "transactions", + Ctx: "proxysql.active_transactions_count", + Priority: prioActiveTransactionsCount, + Dims: module.Dims{ + {ID: "Active_Transactions", Name: "active"}, + }, + } + questionsRateChart = module.Chart{ + ID: "questions_rate", + Title: "Client requests / statements executed", + Units: "questions/s", + Fam: "queries", + Ctx: "proxysql.questions_rate", + Priority: prioQuestionsRate, + Dims: module.Dims{ + {ID: "Questions", Name: "questions", Algo: module.Incremental}, + }, + } + slowQueriesRateChart = module.Chart{ + ID: "slow_queries_rate", + Title: "Slow queries", + Units: "queries/s", + Fam: "queries", + Ctx: "proxysql.slow_queries_rate", + Priority: prioSlowQueriesRate, + Dims: module.Dims{ + {ID: "Slow_queries", Name: "slow", Algo: module.Incremental}, + }, + } + queriesRateChart = module.Chart{ + ID: "queries_rate", + Title: "Queries rate", + Units: "queries/s", + Fam: "queries", + Ctx: "proxysql.queries_rate", + Priority: prioQueriesRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "Com_autocommit", Name: "autocommit", Algo: module.Incremental}, + {ID: "Com_autocommit_filtered", Name: "autocommit_filtered", Algo: module.Incremental}, + {ID: "Com_commit", Name: "commit", Algo: module.Incremental}, + {ID: "Com_commit_filtered", Name: "commit_filtered", Algo: module.Incremental}, + {ID: "Com_rollback", Name: "rollback", Algo: module.Incremental}, + {ID: "Com_rollback_filtered", Name: "rollback_filtered", Algo: module.Incremental}, + {ID: "Com_backend_change_user", Name: "backend_change_user", Algo: module.Incremental}, + {ID: "Com_backend_init_db", Name: "backend_init_db", Algo: module.Incremental}, + {ID: "Com_backend_set_names", Name: "backend_set_names", Algo: module.Incremental}, + {ID: "Com_frontend_init_db", Name: "frontend_init_db", Algo: module.Incremental}, + {ID: "Com_frontend_set_names", Name: "frontend_set_names", Algo: module.Incremental}, + {ID: "Com_frontend_use_db", Name: "frontend_use_db", Algo: module.Incremental}, + }, + } + + backendStatementsCountChart = module.Chart{ + ID: "backend_statements_count", + Title: "Statements available across all backend connections", + Units: "statements", + Fam: "statements", + Ctx: "proxysql.backend_statements_count", + Priority: prioBackendStatementsCount, + Dims: module.Dims{ + {ID: "Stmt_Server_Active_Total", Name: "total"}, + {ID: "Stmt_Server_Active_Unique", Name: "unique"}, + }, + } + backendStatementsRateChart = module.Chart{ + ID: "backend_statements_rate", + Title: "Statements executed against the backends", + Units: "statements/s", + Fam: "statements", + Ctx: "proxysql.backend_statements_rate", + Priority: prioBackendStatementsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "Com_backend_stmt_prepare", Name: "prepare", Algo: module.Incremental}, + {ID: "Com_backend_stmt_execute", Name: "execute", Algo: module.Incremental}, + {ID: "Com_backend_stmt_close", Name: "close", Algo: module.Incremental}, + }, + } + clientStatementsCountChart = module.Chart{ + ID: "client_statements_count", + Title: "Statements that are in use by clients", + Units: "statements", + Fam: "statements", + Ctx: "proxysql.client_statements_count", + Priority: prioFrontendStatementsCount, + Dims: module.Dims{ + {ID: "Stmt_Client_Active_Total", Name: "total"}, + {ID: "Stmt_Client_Active_Unique", Name: "unique"}, + }, + } + clientStatementsRateChart = module.Chart{ + ID: "client_statements_rate", + Title: "Statements executed by clients", + Units: "statements/s", + Fam: "statements", + Ctx: "proxysql.client_statements_rate", + Priority: prioFrontendStatementsRate, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "Com_frontend_stmt_prepare", Name: "prepare", Algo: module.Incremental}, + {ID: "Com_frontend_stmt_execute", Name: "execute", Algo: module.Incremental}, + {ID: "Com_frontend_stmt_close", Name: "close", Algo: module.Incremental}, + }, + } + cachedStatementsCountChart = module.Chart{ + ID: "cached_statements_count", + Title: "Global prepared statements", + Units: "statements", + Fam: "statements", + Ctx: "proxysql.cached_statements_count", + Priority: prioCachedStatementsCount, + Dims: module.Dims{ + {ID: "Stmt_Cached", Name: "cached"}, + }, + } + + queryCacheEntriesCountChart = module.Chart{ + ID: "query_cache_entries_count", + Title: "Query Cache entries", + Units: "entries", + Fam: "query cache", + Ctx: "proxysql.query_cache_entries_count", + Priority: prioQueryCacheEntriesCount, + Dims: module.Dims{ + {ID: "Query_Cache_Entries", Name: "entries"}, + }, + } + queryCacheMemoryUsedChart = module.Chart{ + ID: "query_cache_memory_used", + Title: "Query Cache memory used", + Units: "B", + Fam: "query cache", + Ctx: "proxysql.query_cache_memory_used", + Priority: prioQueryCacheMemoryUsed, + Dims: module.Dims{ + {ID: "Query_Cache_Memory_bytes", Name: "used"}, + }, + } + queryCacheIOChart = module.Chart{ + ID: "query_cache_io", + Title: "Query Cache I/O", + Units: "B/s", + Fam: "query cache", + Ctx: "proxysql.query_cache_io", + Priority: prioQueryCacheIO, + Dims: module.Dims{ + {ID: "Query_Cache_bytes_IN", Name: "in", Algo: module.Incremental}, + {ID: "Query_Cache_bytes_OUT", Name: "out", Algo: module.Incremental}, + }, + } + queryCacheRequestsRateChart = module.Chart{ + ID: "query_cache_requests_rate", + Title: "Query Cache requests", + Units: "requests/s", + Fam: "query cache", + Ctx: "proxysql.query_cache_requests_rate", + Priority: prioQueryCacheRequestsRate, + Dims: module.Dims{ + {ID: "Query_Cache_count_GET", Name: "read", Algo: module.Incremental}, + {ID: "Query_Cache_count_SET", Name: "write", Algo: module.Incremental}, + {ID: "Query_Cache_count_GET_OK", Name: "read_success", Algo: module.Incremental}, + }, + } + + mySQLMonitorWorkersCountChart = module.Chart{ + ID: "mysql_monitor_workers_count", + Title: "MySQL monitor workers", + Units: "threads", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_workers_count", + Priority: prioMySQLMonitorWorkersCount, + Dims: module.Dims{ + {ID: "MySQL_Monitor_Workers", Name: "workers"}, + {ID: "MySQL_Monitor_Workers_Aux", Name: "auxiliary"}, + }, + } + mySQLMonitorWorkersRateChart = module.Chart{ + ID: "mysql_monitor_workers_rate", + Title: "MySQL monitor workers rate", + Units: "workers/s", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_workers_rate", + Priority: prioMySQLMonitorWorkersRate, + Dims: module.Dims{ + {ID: "MySQL_Monitor_Workers_Started", Name: "started", Algo: module.Incremental}, + }, + } + mySQLMonitorConnectChecksRateChart = module.Chart{ + ID: "mysql_monitor_connect_checks_rate", + Title: "MySQL monitor connect checks", + Units: "checks/s", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_connect_checks_rate", + Priority: prioMySQLMonitorConnectChecksRate, + Dims: module.Dims{ + {ID: "MySQL_Monitor_connect_check_OK", Name: "succeed", Algo: module.Incremental}, + {ID: "MySQL_Monitor_connect_check_ERR", Name: "failed", Algo: module.Incremental}, + }, + } + mySQLMonitorPingChecksRateChart = module.Chart{ + ID: "mysql_monitor_ping_checks_rate", + Title: "MySQL monitor ping checks", + Units: "checks/s", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_ping_checks_rate", + Priority: prioMySQLMonitorPingChecksRate, + Dims: module.Dims{ + {ID: "MySQL_Monitor_ping_check_OK", Name: "succeed", Algo: module.Incremental}, + {ID: "MySQL_Monitor_ping_check_ERR", Name: "failed", Algo: module.Incremental}, + }, + } + mySQLMonitorReadOnlyChecksRateChart = module.Chart{ + ID: "mysql_monitor_read_only_checks_rate", + Title: "MySQL monitor read only checks", + Units: "checks/s", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_read_only_checks_rate", + Priority: prioMySQLMonitorReadOnlyChecksRate, + Dims: module.Dims{ + {ID: "MySQL_Monitor_read_only_check_OK", Name: "succeed", Algo: module.Incremental}, + {ID: "MySQL_Monitor_read_only_check_ERR", Name: "failed", Algo: module.Incremental}, + }, + } + mySQLMonitorReplicationLagChecksRateChart = module.Chart{ + ID: "mysql_monitor_replication_lag_checks_rate", + Title: "MySQL monitor replication lag checks", + Units: "checks/s", + Fam: "monitor", + Ctx: "proxysql.mysql_monitor_replication_lag_checks_rate", + Priority: prioMySQLMonitorReplicationLagChecksRate, + Dims: module.Dims{ + {ID: "MySQL_Monitor_replication_lag_check_OK", Name: "succeed", Algo: module.Incremental}, + {ID: "MySQL_Monitor_replication_lag_check_ERR", Name: "failed", Algo: module.Incremental}, + }, + } + + jemallocMemoryUsedChart = module.Chart{ + ID: "jemalloc_memory_used", + Title: "Jemalloc used memory", + Units: "bytes", + Fam: "memory", + Ctx: "proxysql.jemalloc_memory_used", + Type: module.Stacked, + Priority: prioJemallocMemoryUsed, + Dims: module.Dims{ + {ID: "jemalloc_active", Name: "active"}, + {ID: "jemalloc_allocated", Name: "allocated"}, + {ID: "jemalloc_mapped", Name: "mapped"}, + {ID: "jemalloc_metadata", Name: "metadata"}, + {ID: "jemalloc_resident", Name: "resident"}, + {ID: "jemalloc_retained", Name: "retained"}, + }, + } + memoryUsedCountChart = module.Chart{ + ID: "memory_used", + Title: "Memory used", + Units: "bytes", + Fam: "memory", + Ctx: "proxysql.memory_used", + Priority: prioMemoryUsed, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "Auth_memory", Name: "auth"}, + {ID: "SQLite3_memory_bytes", Name: "sqlite3"}, + {ID: "query_digest_memory", Name: "query_digest"}, + {ID: "mysql_query_rules_memory", Name: "query_rules"}, + {ID: "mysql_firewall_users_table", Name: "firewall_users_table"}, + {ID: "mysql_firewall_users_config", Name: "firewall_users_config"}, + {ID: "mysql_firewall_rules_table", Name: "firewall_rules_table"}, + {ID: "mysql_firewall_rules_config", Name: "firewall_rules_config"}, + {ID: "stack_memory_mysql_threads", Name: "mysql_threads"}, + {ID: "stack_memory_admin_threads", Name: "admin_threads"}, + {ID: "stack_memory_cluster_threads", Name: "cluster_threads"}, + }, + } + uptimeChart = module.Chart{ + ID: "proxysql_uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "proxysql.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "ProxySQL_Uptime", Name: "uptime"}, + }, + } +) + +var ( + mySQLCommandChartsTmpl = module.Charts{ + mySQLCommandExecutionRateChartTmpl.Copy(), + mySQLCommandExecutionTimeChartTmpl.Copy(), + mySQLCommandExecutionDurationHistogramChartTmpl.Copy(), + } + + mySQLCommandExecutionRateChartTmpl = module.Chart{ + ID: "mysql_command_%s_execution_rate", + Title: "MySQL command execution", + Units: "commands/s", + Fam: "command exec", + Ctx: "proxysql.mysql_command_execution_rate", + Priority: prioMySQLCommandExecutionsRate, + Dims: module.Dims{ + {ID: "mysql_command_%s_Total_cnt", Name: "commands", Algo: module.Incremental}, + }, + } + mySQLCommandExecutionTimeChartTmpl = module.Chart{ + ID: "mysql_command_%s_execution_time", + Title: "MySQL command execution time", + Units: "microseconds", + Fam: "command exec time", + Ctx: "proxysql.mysql_command_execution_time", + Priority: prioMySQLCommandExecutionTime, + Dims: module.Dims{ + {ID: "mysql_command_%s_Total_Time_us", Name: "time", Algo: module.Incremental}, + }, + } + mySQLCommandExecutionDurationHistogramChartTmpl = module.Chart{ + ID: "mysql_command_%s_execution_duration", + Title: "MySQL command execution duration histogram", + Units: "commands/s", + Fam: "command exec duration", + Ctx: "proxysql.mysql_command_execution_duration", + Type: module.Stacked, + Priority: prioMySQLCommandExecutionDurationHistogram, + Dims: module.Dims{ + {ID: "mysql_command_%s_cnt_100us", Name: "100us", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_500us", Name: "500us", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_1ms", Name: "1ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_5ms", Name: "5ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_10ms", Name: "10ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_50ms", Name: "50ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_100ms", Name: "100ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_500ms", Name: "500ms", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_1s", Name: "1s", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_5s", Name: "5s", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_10s", Name: "10s", Algo: module.Incremental}, + {ID: "mysql_command_%s_cnt_INFs", Name: "+Inf", Algo: module.Incremental}, + }, + } +) + +func newMySQLCommandCountersCharts(command string) *module.Charts { + charts := mySQLCommandChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(command)) + chart.Labels = []module.Label{{Key: "command", Value: command}} + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, command) + } + } + + return charts +} + +func (p *ProxySQL) addMySQLCommandCountersCharts(command string) { + charts := newMySQLCommandCountersCharts(command) + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *ProxySQL) removeMySQLCommandCountersCharts(command string) { + prefix := "mysql_command_" + strings.ToLower(command) + + for _, chart := range *p.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +var ( + mySQLUserChartsTmpl = module.Charts{ + mySQLUserConnectionsUtilizationChartTmpl.Copy(), + mySQLUserConnectionsCountChartTmpl.Copy(), + } + + mySQLUserConnectionsUtilizationChartTmpl = module.Chart{ + ID: "mysql_user_%s_connections_utilization", + Title: "MySQL user connections utilization", + Units: "percentage", + Fam: "user conns", + Ctx: "proxysql.mysql_user_connections_utilization", + Priority: prioMySQLUserConnectionsUtilization, + Dims: module.Dims{ + {ID: "mysql_user_%s_frontend_connections_utilization", Name: "used"}, + }, + } + mySQLUserConnectionsCountChartTmpl = module.Chart{ + ID: "mysql_user_%s_connections_count", + Title: "MySQL user connections used", + Units: "connections", + Fam: "user conns", + Ctx: "proxysql.mysql_user_connections_count", + Priority: prioMySQLUserConnectionsCount, + Dims: module.Dims{ + {ID: "mysql_user_%s_frontend_connections", Name: "used"}, + }, + } +) + +func newMySQLUserCharts(username string) *module.Charts { + charts := mySQLUserChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, username) + chart.Labels = []module.Label{{Key: "user", Value: username}} + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, username) + } + } + + return charts +} + +func (p *ProxySQL) addMySQLUsersCharts(username string) { + charts := newMySQLUserCharts(username) + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *ProxySQL) removeMySQLUserCharts(user string) { + prefix := "mysql_user_" + user + + for _, chart := range *p.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +var ( + backendChartsTmpl = module.Charts{ + backendStatusChartTmpl.Copy(), + backendConnectionsUsageChartTmpl.Copy(), + backendConnectionsRateChartTmpl.Copy(), + backendQueriesRateRateChartTmpl.Copy(), + backendTrafficChartTmpl.Copy(), + backendLatencyChartTmpl.Copy(), + } + + backendStatusChartTmpl = module.Chart{ + ID: "backend_%s_status", + Title: "Backend status", + Units: "status", + Fam: "backend status", + Ctx: "proxysql.backend_status", + Priority: prioBackendStatus, + Dims: module.Dims{ + {ID: "backend_%s_status_ONLINE", Name: "online"}, + {ID: "backend_%s_status_SHUNNED", Name: "shunned"}, + {ID: "backend_%s_status_OFFLINE_SOFT", Name: "offline_soft"}, + {ID: "backend_%s_status_OFFLINE_HARD", Name: "offline_hard"}, + }, + } + backendConnectionsUsageChartTmpl = module.Chart{ + ID: "backend_%s_connections_usage", + Title: "Backend connections usage", + Units: "connections", + Fam: "backend conns usage", + Ctx: "proxysql.backend_connections_usage", + Type: module.Stacked, + Priority: prioBackendConnectionsUsage, + Dims: module.Dims{ + {ID: "backend_%s_ConnFree", Name: "free"}, + {ID: "backend_%s_ConnUsed", Name: "used"}, + }, + } + backendConnectionsRateChartTmpl = module.Chart{ + ID: "backend_%s_connections_rate", + Title: "Backend connections established", + Units: "connections/s", + Fam: "backend conns established", + Ctx: "proxysql.backend_connections_rate", + Priority: prioBackendConnectionsRate, + Dims: module.Dims{ + {ID: "backend_%s_ConnOK", Name: "succeed", Algo: module.Incremental}, + {ID: "backend_%s_ConnERR", Name: "failed", Algo: module.Incremental}, + }, + } + backendQueriesRateRateChartTmpl = module.Chart{ + ID: "backend_%s_queries_rate", + Title: "Backend queries", + Units: "queries/s", + Fam: "backend queries", + Ctx: "proxysql.backend_queries_rate", + Priority: prioBackendQueriesRateRate, + Dims: module.Dims{ + {ID: "backend_%s_Queries", Name: "queries", Algo: module.Incremental}, + }, + } + backendTrafficChartTmpl = module.Chart{ + ID: "backend_%s_traffic", + Title: "Backend traffic", + Units: "B/s", + Fam: "backend traffic", + Ctx: "proxysql.backend_traffic", + Priority: prioBackendTraffic, + Dims: module.Dims{ + {ID: "backend_%s_Bytes_data_recv", Name: "recv", Algo: module.Incremental}, + {ID: "backend_%s_Bytes_data_sent", Name: "sent", Algo: module.Incremental}, + }, + } + backendLatencyChartTmpl = module.Chart{ + ID: "backend_%s_latency", + Title: "Backend latency", + Units: "microseconds", + Fam: "backend latency", + Ctx: "proxysql.backend_latency", + Priority: prioBackendLatency, + Dims: module.Dims{ + {ID: "backend_%s_Latency_us", Name: "latency"}, + }, + } +) + +func newBackendCharts(hg, host, port string) *module.Charts { + charts := backendChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, backendID(hg, host, port)) + chart.Labels = []module.Label{ + {Key: "host", Value: host}, + {Key: "port", Value: port}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, backendID(hg, host, port)) + } + } + + return charts +} + +func (p *ProxySQL) addBackendCharts(hg, host, port string) { + charts := newBackendCharts(hg, host, port) + + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *ProxySQL) removeBackendCharts(hg, host, port string) { + prefix := "backend_" + backendID(hg, host, port) + + for _, chart := range *p.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/collect.go b/src/go/collectors/go.d.plugin/modules/proxysql/collect.go new file mode 100644 index 00000000000000..cc35fc02d512a7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/collect.go @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package proxysql + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + queryVersion = "select version();" + queryStatsMySQLGlobal = "SELECT * FROM stats_mysql_global;" + queryStatsMySQLMemoryMetrics = "SELECT * FROM stats_memory_metrics;" + queryStatsMySQLCommandsCounters = "SELECT * FROM stats_mysql_commands_counters;" + queryStatsMySQLUsers = "SELECT * FROM stats_mysql_users;" + queryStatsMySQLConnectionPool = "SELECT * FROM stats_mysql_connection_pool;" +) + +func (p *ProxySQL) collect() (map[string]int64, error) { + if p.db == nil { + if err := p.openConnection(); err != nil { + return nil, err + } + } + + p.once.Do(func() { + v, err := p.doQueryVersion() + if err != nil { + p.Warningf("error on querying version: %v", err) + } else { + p.Debugf("connected to ProxySQL version: %s", v) + } + }) + + p.cache.reset() + + mx := make(map[string]int64) + + if err := p.collectStatsMySQLGlobal(mx); err != nil { + return nil, fmt.Errorf("error on collecting mysql global status: %v", err) + } + if err := p.collectStatsMySQLMemoryMetrics(mx); err != nil { + return nil, fmt.Errorf("error on collecting memory metrics: %v", err) + } + if err := p.collectStatsMySQLCommandsCounters(mx); err != nil { + return nil, fmt.Errorf("error on collecting mysql command counters: %v", err) + } + if err := p.collectStatsMySQLUsers(mx); err != nil { + return nil, fmt.Errorf("error on collecting mysql users: %v", err) + } + if err := p.collectStatsMySQLConnectionPool(mx); err != nil { + return nil, fmt.Errorf("error on collecting mysql connection pool: %v", err) + } + + p.updateCharts() + + return mx, nil +} + +func (p *ProxySQL) doQueryVersion() (string, error) { + q := queryVersion + p.Debugf("executing query: '%s'", q) + + var v string + if err := p.doQueryRow(q, &v); err != nil { + return "", err + } + + return v, nil +} + +func (p *ProxySQL) collectStatsMySQLGlobal(mx map[string]int64) error { + // https://proxysql.com/documentation/stats-statistics/#stats_mysql_global + q := queryStatsMySQLGlobal + p.Debugf("executing query: '%s'", q) + + var name string + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "Variable_Name": + name = value + case "Variable_Value": + mx[name] = parseInt(value) + } + }) +} + +func (p *ProxySQL) collectStatsMySQLMemoryMetrics(mx map[string]int64) error { + // https://proxysql.com/documentation/stats-statistics/#stats_mysql_memory_metrics + q := queryStatsMySQLMemoryMetrics + p.Debugf("executing query: '%s'", q) + + var name string + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "Variable_Name": + name = value + case "Variable_Value": + mx[name] = parseInt(value) + } + }) +} + +func (p *ProxySQL) collectStatsMySQLCommandsCounters(mx map[string]int64) error { + // https://proxysql.com/documentation/stats-statistics/#stats_mysql_commands_counters + q := queryStatsMySQLCommandsCounters + p.Debugf("executing query: '%s'", q) + + var command string + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "Command": + command = value + p.cache.getCommand(command).updated = true + default: + mx["mysql_command_"+command+"_"+column] = parseInt(value) + } + }) +} + +func (p *ProxySQL) collectStatsMySQLUsers(mx map[string]int64) error { + // https://proxysql.com/documentation/stats-statistics/#stats_mysql_users + q := queryStatsMySQLUsers + p.Debugf("executing query: '%s'", q) + + var user string + var used int64 + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "username": + user = value + p.cache.getUser(user).updated = true + case "frontend_connections": + used = parseInt(value) + mx["mysql_user_"+user+"_"+column] = used + case "frontend_max_connections": + mx["mysql_user_"+user+"_frontend_connections_utilization"] = calcPercentage(used, parseInt(value)) + } + }) +} + +func (p *ProxySQL) collectStatsMySQLConnectionPool(mx map[string]int64) error { + // https://proxysql.com/documentation/stats-statistics/#stats_mysql_connection_pool + q := queryStatsMySQLConnectionPool + p.Debugf("executing query: '%s'", q) + + var hg, host, port string + var px string + return p.doQuery(q, func(column, value string, rowEnd bool) { + switch column { + case "hg", "hostgroup": + hg = value + case "srv_host": + host = value + case "srv_port": + port = value + p.cache.getBackend(hg, host, port).updated = true + px = "backend_" + backendID(hg, host, port) + "_" + case "status": + mx[px+"status_ONLINE"] = boolToInt(value == "1") + mx[px+"status_SHUNNED"] = boolToInt(value == "2") + mx[px+"status_OFFLINE_SOFT"] = boolToInt(value == "3") + mx[px+"status_OFFLINE_HARD"] = boolToInt(value == "4") + default: + mx[px+column] = parseInt(value) + } + }) +} + +func (p *ProxySQL) updateCharts() { + for k, m := range p.cache.commands { + if !m.updated { + delete(p.cache.commands, k) + p.removeMySQLCommandCountersCharts(m.command) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addMySQLCommandCountersCharts(m.command) + } + } + for k, m := range p.cache.users { + if !m.updated { + delete(p.cache.users, k) + p.removeMySQLUserCharts(m.user) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addMySQLUsersCharts(m.user) + } + } + for k, m := range p.cache.backends { + if !m.updated { + delete(p.cache.backends, k) + p.removeBackendCharts(m.hg, m.host, m.port) + continue + } + if !m.hasCharts { + m.hasCharts = true + p.addBackendCharts(m.hg, m.host, m.port) + } + } +} + +func (p *ProxySQL) openConnection() error { + db, err := sql.Open("mysql", p.DSN) + if err != nil { + return fmt.Errorf("error on opening a connection with the proxysql instance [%s]: %v", p.DSN, err) + } + + db.SetConnMaxLifetime(10 * time.Minute) + + if err := db.Ping(); err != nil { + _ = db.Close() + return fmt.Errorf("error on pinging the proxysql instance [%s]: %v", p.DSN, err) + } + + p.db = db + return nil +} + +func (p *ProxySQL) doQueryRow(query string, v any) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + return p.db.QueryRowContext(ctx, query).Scan(v) +} + +func (p *ProxySQL) doQuery(query string, assign func(column, value string, rowEnd bool)) error { + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + defer cancel() + + rows, err := p.db.QueryContext(ctx, query) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() + + return readRows(rows, assign) +} + +func readRows(rows *sql.Rows, assign func(column, value string, rowEnd bool)) error { + columns, err := rows.Columns() + if err != nil { + return err + } + + values := makeValues(len(columns)) + + for rows.Next() { + if err := rows.Scan(values...); err != nil { + return err + } + for i, l := 0, len(values); i < l; i++ { + assign(columns[i], valueToString(values[i]), i == l-1) + } + } + return rows.Err() +} + +func valueToString(value any) string { + v, ok := value.(*sql.NullString) + if !ok || !v.Valid { + return "" + } + return v.String +} + +func makeValues(size int) []any { + vs := make([]any, size) + for i := range vs { + vs[i] = &sql.NullString{} + } + return vs +} + +func parseInt(value string) int64 { + v, _ := strconv.ParseInt(value, 10, 64) + return v +} + +func calcPercentage(value, total int64) (v int64) { + if total == 0 { + return 0 + } + if v = value * 100 / total; v < 0 { + v = -v + } + return v +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} + +func backendID(hg, host, port string) string { + hg = strings.ReplaceAll(strings.ToLower(hg), " ", "_") + host = strings.ReplaceAll(host, ".", "_") + return hg + "_" + host + "_" + port +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/config_schema.json b/src/go/collectors/go.d.plugin/modules/proxysql/config_schema.json new file mode 100644 index 00000000000000..5fab79bc7a2834 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/config_schema.json @@ -0,0 +1,26 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/proxysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "dsn" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/integrations/proxysql.md b/src/go/collectors/go.d.plugin/modules/proxysql/integrations/proxysql.md new file mode 100644 index 00000000000000..ba91f073374acc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/integrations/proxysql.md @@ -0,0 +1,275 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/proxysql/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/proxysql/metadata.yaml" +sidebar_label: "ProxySQL" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ProxySQL + + +<img src="https://netdata.cloud/img/proxysql.png" width="150"/> + + +Plugin: go.d.plugin +Module: proxysql + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors ProxySQL servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ProxySQL instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections | +| proxysql.client_connections_rate | created, aborted | connections/s | +| proxysql.server_connections_count | connected | connections | +| proxysql.server_connections_rate | created, aborted, delayed | connections/s | +| proxysql.backends_traffic | recv, sent | B/s | +| proxysql.clients_traffic | recv, sent | B/s | +| proxysql.active_transactions_count | client | connections | +| proxysql.questions_rate | questions | questions/s | +| proxysql.slow_queries_rate | slow | queries/s | +| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s | +| proxysql.backend_statements_count | total, unique | statements | +| proxysql.backend_statements_rate | prepare, execute, close | statements/s | +| proxysql.client_statements_count | total, unique | statements | +| proxysql.client_statements_rate | prepare, execute, close | statements/s | +| proxysql.cached_statements_count | cached | statements | +| proxysql.query_cache_entries_count | entries | entries | +| proxysql.query_cache_memory_used | used | B | +| proxysql.query_cache_io | in, out | B/s | +| proxysql.query_cache_requests_rate | read, write, read_success | requests/s | +| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads | +| proxysql.mysql_monitor_workers_rate | started | workers/s | +| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s | +| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s | +| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s | +| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s | +| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B | +| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B | +| proxysql.uptime | uptime | seconds | + +### Per command + +These metrics refer to the SQL command. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| command | SQL command. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| proxysql.mysql_command_execution_rate | uptime | seconds | +| proxysql.mysql_command_execution_time | time | microseconds | +| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds | + +### Per user + +These metrics refer to the user. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| user | username from the mysql_users table | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| proxysql.mysql_user_connections_utilization | used | percentage | +| proxysql.mysql_user_connections_count | used | connections | + +### Per backend + +These metrics refer to the backend server. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| host | backend server host | +| port | backend server port | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status | +| proxysql.backend_connections_usage | free, used | connections | +| proxysql.backend_connections_rate | succeed, failed | connections/s | +| proxysql.backend_queries_rate | queries | queries/s | +| proxysql.backend_traffic | recv, send | B/s | +| proxysql.backend_latency | latency | microseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/proxysql.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/proxysql.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes | +| my.cnf | Specifies my.cnf file to read connection parameters from under the [client] section. | | no | +| timeout | Query timeout in seconds. | 1 | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: stats:stats@tcp(127.0.0.1:6032)/ + +``` +</details> + +##### my.cnf + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + my.cnf: '/etc/my.cnf' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + dsn: stats:stats@tcp(127.0.0.1:6032)/ + + - name: remote + dsn: stats:stats@tcp(203.0.113.0:6032)/ + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m proxysql + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/metadata.yaml b/src/go/collectors/go.d.plugin/modules/proxysql/metadata.yaml new file mode 100644 index 00000000000000..a8ba0e638f6b04 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/metadata.yaml @@ -0,0 +1,434 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-proxysql + plugin_name: go.d.plugin + module_name: proxysql + monitored_instance: + name: ProxySQL + link: https://www.proxysql.com/ + icon_filename: proxysql.png + categories: + - data-collection.database-servers + keywords: + - proxysql + - databases + - sql + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors ProxySQL servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/proxysql.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: dsn + description: Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). + default_value: stats:stats@tcp(127.0.0.1:6032)/ + required: true + - name: my.cnf + description: Specifies my.cnf file to read connection parameters from under the [client] section. + default_value: "" + required: false + - name: timeout + description: Query timeout in seconds. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + dsn: stats:stats@tcp(127.0.0.1:6032)/ + - name: my.cnf + description: An example configuration. + config: | + jobs: + - name: local + my.cnf: '/etc/my.cnf' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + dsn: stats:stats@tcp(127.0.0.1:6032)/ + + - name: remote + dsn: stats:stats@tcp(203.0.113.0:6032)/ + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: proxysql.client_connections_count + description: Client connections + unit: connections + chart_type: line + dimensions: + - name: connected + - name: non_idle + - name: hostgroup_locked + - name: proxysql.client_connections_rate + description: Client connections rate + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: aborted + - name: proxysql.server_connections_count + description: Server connections + unit: connections + chart_type: line + dimensions: + - name: connected + - name: proxysql.server_connections_rate + description: Server connections rate + unit: connections/s + chart_type: line + dimensions: + - name: created + - name: aborted + - name: delayed + - name: proxysql.backends_traffic + description: Backends traffic + unit: B/s + chart_type: line + dimensions: + - name: recv + - name: sent + - name: proxysql.clients_traffic + description: Clients traffic + unit: B/s + chart_type: line + dimensions: + - name: recv + - name: sent + - name: proxysql.active_transactions_count + description: Client connections that are currently processing a transaction + unit: connections + chart_type: line + dimensions: + - name: client + - name: proxysql.questions_rate + description: Client requests / statements executed + unit: questions/s + chart_type: line + dimensions: + - name: questions + - name: proxysql.slow_queries_rate + description: Slow queries + unit: queries/s + chart_type: line + dimensions: + - name: slow + - name: proxysql.queries_rate + description: Queries rate + unit: queries/s + chart_type: stacked + dimensions: + - name: autocommit + - name: autocommit_filtered + - name: commit_filtered + - name: rollback + - name: rollback_filtered + - name: backend_change_user + - name: backend_init_db + - name: backend_set_names + - name: frontend_init_db + - name: frontend_set_names + - name: frontend_use_db + - name: proxysql.backend_statements_count + description: Statements available across all backend connections + unit: statements + chart_type: line + dimensions: + - name: total + - name: unique + - name: proxysql.backend_statements_rate + description: Statements executed against the backends + unit: statements/s + chart_type: stacked + dimensions: + - name: prepare + - name: execute + - name: close + - name: proxysql.client_statements_count + description: Statements that are in use by clients + unit: statements + chart_type: line + dimensions: + - name: total + - name: unique + - name: proxysql.client_statements_rate + description: Statements executed by clients + unit: statements/s + chart_type: stacked + dimensions: + - name: prepare + - name: execute + - name: close + - name: proxysql.cached_statements_count + description: Global prepared statements + unit: statements + chart_type: line + dimensions: + - name: cached + - name: proxysql.query_cache_entries_count + description: Query Cache entries + unit: entries + chart_type: line + dimensions: + - name: entries + - name: proxysql.query_cache_memory_used + description: Query Cache memory used + unit: B + chart_type: line + dimensions: + - name: used + - name: proxysql.query_cache_io + description: Query Cache I/O + unit: B/s + chart_type: line + dimensions: + - name: in + - name: out + - name: proxysql.query_cache_requests_rate + description: Query Cache requests + unit: requests/s + chart_type: line + dimensions: + - name: read + - name: write + - name: read_success + - name: proxysql.mysql_monitor_workers_count + description: MySQL monitor workers + unit: threads + chart_type: line + dimensions: + - name: workers + - name: auxiliary + - name: proxysql.mysql_monitor_workers_rate + description: MySQL monitor workers rate + unit: workers/s + chart_type: line + dimensions: + - name: started + - name: proxysql.mysql_monitor_connect_checks_rate + description: MySQL monitor connect checks + unit: checks/s + chart_type: line + dimensions: + - name: succeed + - name: failed + - name: proxysql.mysql_monitor_ping_checks_rate + description: MySQL monitor ping checks + unit: checks/s + chart_type: line + dimensions: + - name: succeed + - name: failed + - name: proxysql.mysql_monitor_read_only_checks_rate + description: MySQL monitor read only checks + unit: checks/s + chart_type: line + dimensions: + - name: succeed + - name: failed + - name: proxysql.mysql_monitor_replication_lag_checks_rate + description: MySQL monitor replication lag checks + unit: checks/s + chart_type: line + dimensions: + - name: succeed + - name: failed + - name: proxysql.jemalloc_memory_used + description: Jemalloc used memory + unit: B + chart_type: stacked + dimensions: + - name: active + - name: allocated + - name: mapped + - name: metadata + - name: resident + - name: retained + - name: proxysql.memory_used + description: Memory used + unit: B + chart_type: stacked + dimensions: + - name: auth + - name: sqlite3 + - name: query_digest + - name: query_rules + - name: firewall_users_table + - name: firewall_users_config + - name: firewall_rules_table + - name: firewall_rules_config + - name: mysql_threads + - name: admin_threads + - name: cluster_threads + - name: proxysql.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: command + description: These metrics refer to the SQL command. + labels: + - name: command + description: SQL command. + metrics: + - name: proxysql.mysql_command_execution_rate + description: MySQL command execution + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: proxysql.mysql_command_execution_time + description: MySQL command execution time + unit: microseconds + chart_type: line + dimensions: + - name: time + - name: proxysql.mysql_command_execution_duration + description: MySQL command execution duration histogram + unit: microseconds + chart_type: stacked + dimensions: + - name: 100us + - name: 500us + - name: 1ms + - name: 5ms + - name: 10ms + - name: 50ms + - name: 100ms + - name: 500ms + - name: 1s + - name: 5s + - name: 10s + - name: +Inf + - name: user + description: These metrics refer to the user. + labels: + - name: user + description: username from the mysql_users table + metrics: + - name: proxysql.mysql_user_connections_utilization + description: MySQL user connections utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: proxysql.mysql_user_connections_count + description: MySQL user connections used + unit: connections + chart_type: line + dimensions: + - name: used + - name: backend + description: These metrics refer to the backend server. + labels: + - name: host + description: backend server host + - name: port + description: backend server port + metrics: + - name: proxysql.backend_status + description: Backend status + unit: status + chart_type: line + dimensions: + - name: online + - name: shunned + - name: offline_soft + - name: offline_hard + - name: proxysql.backend_connections_usage + description: Backend connections usage + unit: connections + chart_type: line + dimensions: + - name: free + - name: used + - name: proxysql.backend_connections_rate + description: Backend connections established + unit: connections/s + chart_type: line + dimensions: + - name: succeed + - name: failed + - name: proxysql.backend_queries_rate + description: Backend queries + unit: queries/s + chart_type: line + dimensions: + - name: queries + - name: proxysql.backend_traffic + description: Backend traffic + unit: B/s + chart_type: line + dimensions: + - name: recv + - name: send + - name: proxysql.backend_latency + description: Backend latency + unit: microseconds + chart_type: line + dimensions: + - name: latency diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/proxysql.go b/src/go/collectors/go.d.plugin/modules/proxysql/proxysql.go new file mode 100644 index 00000000000000..d52c36efd79bd2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/proxysql.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package proxysql + +import ( + "database/sql" + _ "embed" + _ "github.com/go-sql-driver/mysql" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("proxysql", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *ProxySQL { + return &ProxySQL{ + Config: Config{ + DSN: "stats:stats@tcp(127.0.0.1:6032)/", + Timeout: web.Duration{Duration: time.Second * 2}, + }, + + charts: baseCharts.Copy(), + once: &sync.Once{}, + cache: &cache{ + commands: make(map[string]*commandCache), + users: make(map[string]*userCache), + backends: make(map[string]*backendCache), + }, + } +} + +type Config struct { + DSN string `yaml:"dsn"` + MyCNF string `yaml:"my.cnf"` + Timeout web.Duration `yaml:"timeout"` +} + +type ( + ProxySQL struct { + module.Base + Config `yaml:",inline"` + + db *sql.DB + + charts *module.Charts + + once *sync.Once + cache *cache + } +) + +func (p *ProxySQL) Init() bool { + if p.DSN == "" { + p.Error("'dsn' not set") + return false + } + + p.Debugf("using DSN [%s]", p.DSN) + return true +} + +func (p *ProxySQL) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *ProxySQL) Charts() *module.Charts { + return p.charts +} + +func (p *ProxySQL) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (p *ProxySQL) Cleanup() { + if p.db == nil { + return + } + if err := p.db.Close(); err != nil { + p.Errorf("cleanup: error on closing the ProxySQL instance [%s]: %v", p.DSN, err) + } + p.db = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/proxysql_test.go b/src/go/collectors/go.d.plugin/modules/proxysql/proxysql_test.go new file mode 100644 index 00000000000000..ec31c4d85d5a84 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/proxysql_test.go @@ -0,0 +1,1229 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package proxysql + +import ( + "bufio" + "bytes" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataV2010Version, _ = os.ReadFile("testdata/v2.0.10/version.txt") + dataV2010StatsMySQLGlobal, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_global.txt") + dataV2010StatsMemoryMetrics, _ = os.ReadFile("testdata/v2.0.10/stats_memory_metrics.txt") + dataV2010StatsMySQLCommandsCounters, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_commands_counters.txt") + dataV2010StatsMySQLUsers, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_users.txt") + dataV2010StatsMySQLConnectionPool, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_connection_pool .txt") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataV2010Version": dataV2010Version, + "dataV2010StatsMySQLGlobal": dataV2010StatsMySQLGlobal, + "dataV2010StatsMemoryMetrics": dataV2010StatsMemoryMetrics, + "dataV2010StatsMySQLCommandsCounters": dataV2010StatsMySQLCommandsCounters, + "dataV2010StatsMySQLUsers": dataV2010StatsMySQLUsers, + "dataV2010StatsMySQLConnectionPool": dataV2010StatsMySQLConnectionPool, + } { + require.NotNilf(t, data, name) + _, err := prepareMockRows(data) + require.NoErrorf(t, err, name) + } +} + +func TestProxySQL_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default": { + wantFail: false, + config: New().Config, + }, + "empty DSN": { + wantFail: true, + config: Config{DSN: ""}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + proxySQL := New() + proxySQL.Config = test.config + + if test.wantFail { + assert.False(t, proxySQL.Init()) + } else { + assert.True(t, proxySQL.Init()) + } + }) + } +} + +func TestProxySQL_Cleanup(t *testing.T) { + tests := map[string]func(t *testing.T) (proxySQL *ProxySQL, cleanup func()){ + "db connection not initialized": func(t *testing.T) (proxySQL *ProxySQL, cleanup func()) { + return New(), func() {} + }, + "db connection initialized": func(t *testing.T) (proxySQL *ProxySQL, cleanup func()) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + + mock.ExpectClose() + proxySQL = New() + proxySQL.db = db + cleanup = func() { _ = db.Close() } + + return proxySQL, cleanup + }, + } + + for name, prepare := range tests { + t.Run(name, func(t *testing.T) { + proxySQL, cleanup := prepare(t) + defer cleanup() + + assert.NotPanics(t, proxySQL.Cleanup) + assert.Nil(t, proxySQL.db) + }) + } +} + +func TestProxySQL_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestProxySQL_Check(t *testing.T) { + tests := map[string]struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + wantFail bool + }{ + "success on all queries": { + wantFail: false, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal) + mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics) + mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters) + mockExpect(t, m, queryStatsMySQLUsers, dataV2010StatsMySQLUsers) + mockExpect(t, m, queryStatsMySQLConnectionPool, dataV2010StatsMySQLConnectionPool) + }, + }, + "fails when error on querying global stats": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpectErr(m, queryStatsMySQLGlobal) + }, + }, + "fails when error on querying memory metrics": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal) + mockExpectErr(m, queryStatsMySQLMemoryMetrics) + }, + }, + "fails when error on querying mysql command counters": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal) + mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics) + mockExpectErr(m, queryStatsMySQLCommandsCounters) + }, + }, + "fails when error on querying mysql users": { + wantFail: true, + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal) + mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics) + mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters) + mockExpectErr(m, queryStatsMySQLUsers) + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + proxySQL := New() + proxySQL.db = db + defer func() { _ = db.Close() }() + + require.True(t, proxySQL.Init()) + + test.prepareMock(t, mock) + + if test.wantFail { + assert.False(t, proxySQL.Check()) + } else { + assert.True(t, proxySQL.Check()) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func TestProxySQL_Collect(t *testing.T) { + type testCaseStep struct { + prepareMock func(t *testing.T, m sqlmock.Sqlmock) + check func(t *testing.T, my *ProxySQL) + } + tests := map[string][]testCaseStep{ + + "success on all queries (v2.0.10)": { + { + prepareMock: func(t *testing.T, m sqlmock.Sqlmock) { + mockExpect(t, m, queryVersion, dataV2010Version) + mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal) + mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics) + mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters) + mockExpect(t, m, queryStatsMySQLUsers, dataV2010StatsMySQLUsers) + mockExpect(t, m, queryStatsMySQLConnectionPool, dataV2010StatsMySQLConnectionPool) + }, + check: func(t *testing.T, my *ProxySQL) { + mx := my.Collect() + + expected := map[string]int64{ + "Access_Denied_Max_Connections": 0, + "Access_Denied_Max_User_Connections": 0, + "Access_Denied_Wrong_Password": 2, + "Active_Transactions": 0, + "Auth_memory": 1044, + "Backend_query_time_nsec": 0, + "Client_Connections_aborted": 2, + "Client_Connections_connected": 3, + "Client_Connections_created": 5458991, + "Client_Connections_hostgroup_locked": 0, + "Client_Connections_non_idle": 3, + "Com_autocommit": 0, + "Com_autocommit_filtered": 0, + "Com_backend_change_user": 188694, + "Com_backend_init_db": 0, + "Com_backend_set_names": 1517893, + "Com_backend_stmt_close": 0, + "Com_backend_stmt_execute": 36303146, + "Com_backend_stmt_prepare": 16858208, + "Com_commit": 0, + "Com_commit_filtered": 0, + "Com_frontend_init_db": 2, + "Com_frontend_set_names": 0, + "Com_frontend_stmt_close": 32137933, + "Com_frontend_stmt_execute": 36314138, + "Com_frontend_stmt_prepare": 32185987, + "Com_frontend_use_db": 0, + "Com_rollback": 0, + "Com_rollback_filtered": 0, + "ConnPool_get_conn_failure": 212943, + "ConnPool_get_conn_immediate": 13361, + "ConnPool_get_conn_latency_awareness": 0, + "ConnPool_get_conn_success": 36319474, + "ConnPool_memory_bytes": 932248, + "GTID_consistent_queries": 0, + "GTID_session_collected": 0, + "Mirror_concurrency": 0, + "Mirror_queue_length": 0, + "MyHGM_myconnpoll_destroy": 15150, + "MyHGM_myconnpoll_get": 36519056, + "MyHGM_myconnpoll_get_ok": 36306113, + "MyHGM_myconnpoll_push": 37358734, + "MyHGM_myconnpoll_reset": 2, + "MySQL_Monitor_Workers": 10, + "MySQL_Monitor_Workers_Aux": 0, + "MySQL_Monitor_Workers_Started": 10, + "MySQL_Monitor_connect_check_ERR": 130, + "MySQL_Monitor_connect_check_OK": 3548306, + "MySQL_Monitor_ping_check_ERR": 108271, + "MySQL_Monitor_ping_check_OK": 21289849, + "MySQL_Monitor_read_only_check_ERR": 19610, + "MySQL_Monitor_read_only_check_OK": 106246409, + "MySQL_Monitor_replication_lag_check_ERR": 482, + "MySQL_Monitor_replication_lag_check_OK": 28702388, + "MySQL_Thread_Workers": 4, + "ProxySQL_Uptime": 26748286, + "Queries_backends_bytes_recv": 5896210168, + "Queries_backends_bytes_sent": 4329581500, + "Queries_frontends_bytes_recv": 7434816962, + "Queries_frontends_bytes_sent": 11643634097, + "Query_Cache_Entries": 0, + "Query_Cache_Memory_bytes": 0, + "Query_Cache_Purged": 0, + "Query_Cache_bytes_IN": 0, + "Query_Cache_bytes_OUT": 0, + "Query_Cache_count_GET": 0, + "Query_Cache_count_GET_OK": 0, + "Query_Cache_count_SET": 0, + "Query_Processor_time_nsec": 0, + "Questions": 100638067, + "SQLite3_memory_bytes": 6017144, + "Selects_for_update__autocommit0": 0, + "Server_Connections_aborted": 9979, + "Server_Connections_connected": 13, + "Server_Connections_created": 2122254, + "Server_Connections_delayed": 0, + "Servers_table_version": 37, + "Slow_queries": 405818, + "Stmt_Cached": 65, + "Stmt_Client_Active_Total": 18, + "Stmt_Client_Active_Unique": 18, + "Stmt_Max_Stmt_id": 66, + "Stmt_Server_Active_Total": 101, + "Stmt_Server_Active_Unique": 39, + "automatic_detected_sql_injection": 0, + "aws_aurora_replicas_skipped_during_query": 0, + "backend_10_back001-db-master_6001_Bytes_data_recv": 145193069937, + "backend_10_back001-db-master_6001_Bytes_data_sent": 9858463664, + "backend_10_back001-db-master_6001_ConnERR": 0, + "backend_10_back001-db-master_6001_ConnFree": 423, + "backend_10_back001-db-master_6001_ConnOK": 524, + "backend_10_back001-db-master_6001_ConnUsed": 69, + "backend_10_back001-db-master_6001_Latency_us": 17684, + "backend_10_back001-db-master_6001_Queries": 8970367, + "backend_10_back001-db-master_6001_status_OFFLINE_HARD": 0, + "backend_10_back001-db-master_6001_status_OFFLINE_SOFT": 0, + "backend_10_back001-db-master_6001_status_ONLINE": 0, + "backend_10_back001-db-master_6001_status_SHUNNED": 0, + "backend_11_back001-db-master_6002_Bytes_data_recv": 2903, + "backend_11_back001-db-master_6002_Bytes_data_sent": 187675, + "backend_11_back001-db-master_6002_ConnERR": 0, + "backend_11_back001-db-master_6002_ConnFree": 1, + "backend_11_back001-db-master_6002_ConnOK": 1, + "backend_11_back001-db-master_6002_ConnUsed": 0, + "backend_11_back001-db-master_6002_Latency_us": 17684, + "backend_11_back001-db-master_6002_Queries": 69, + "backend_11_back001-db-master_6002_status_OFFLINE_HARD": 0, + "backend_11_back001-db-master_6002_status_OFFLINE_SOFT": 0, + "backend_11_back001-db-master_6002_status_ONLINE": 0, + "backend_11_back001-db-master_6002_status_SHUNNED": 0, + "backend_11_back001-db-reader_6003_Bytes_data_recv": 4994101, + "backend_11_back001-db-reader_6003_Bytes_data_sent": 163690013, + "backend_11_back001-db-reader_6003_ConnERR": 0, + "backend_11_back001-db-reader_6003_ConnFree": 11, + "backend_11_back001-db-reader_6003_ConnOK": 11, + "backend_11_back001-db-reader_6003_ConnUsed": 0, + "backend_11_back001-db-reader_6003_Latency_us": 113, + "backend_11_back001-db-reader_6003_Queries": 63488, + "backend_11_back001-db-reader_6003_status_OFFLINE_HARD": 0, + "backend_11_back001-db-reader_6003_status_OFFLINE_SOFT": 0, + "backend_11_back001-db-reader_6003_status_ONLINE": 0, + "backend_11_back001-db-reader_6003_status_SHUNNED": 0, + "backend_20_back002-db-master_6004_Bytes_data_recv": 266034339, + "backend_20_back002-db-master_6004_Bytes_data_sent": 1086994186, + "backend_20_back002-db-master_6004_ConnERR": 2, + "backend_20_back002-db-master_6004_ConnFree": 188, + "backend_20_back002-db-master_6004_ConnOK": 197, + "backend_20_back002-db-master_6004_ConnUsed": 9, + "backend_20_back002-db-master_6004_Latency_us": 101981, + "backend_20_back002-db-master_6004_Queries": 849461, + "backend_20_back002-db-master_6004_status_OFFLINE_HARD": 0, + "backend_20_back002-db-master_6004_status_OFFLINE_SOFT": 0, + "backend_20_back002-db-master_6004_status_ONLINE": 0, + "backend_20_back002-db-master_6004_status_SHUNNED": 0, + "backend_21_back002-db-reader_6005_Bytes_data_recv": 984, + "backend_21_back002-db-reader_6005_Bytes_data_sent": 6992, + "backend_21_back002-db-reader_6005_ConnERR": 0, + "backend_21_back002-db-reader_6005_ConnFree": 1, + "backend_21_back002-db-reader_6005_ConnOK": 1, + "backend_21_back002-db-reader_6005_ConnUsed": 0, + "backend_21_back002-db-reader_6005_Latency_us": 230, + "backend_21_back002-db-reader_6005_Queries": 8, + "backend_21_back002-db-reader_6005_status_OFFLINE_HARD": 0, + "backend_21_back002-db-reader_6005_status_OFFLINE_SOFT": 0, + "backend_21_back002-db-reader_6005_status_ONLINE": 0, + "backend_21_back002-db-reader_6005_status_SHUNNED": 0, + "backend_31_back003-db-master_6006_Bytes_data_recv": 81438709, + "backend_31_back003-db-master_6006_Bytes_data_sent": 712803, + "backend_31_back003-db-master_6006_ConnERR": 0, + "backend_31_back003-db-master_6006_ConnFree": 3, + "backend_31_back003-db-master_6006_ConnOK": 3, + "backend_31_back003-db-master_6006_ConnUsed": 0, + "backend_31_back003-db-master_6006_Latency_us": 231, + "backend_31_back003-db-master_6006_Queries": 3276, + "backend_31_back003-db-master_6006_status_OFFLINE_HARD": 0, + "backend_31_back003-db-master_6006_status_OFFLINE_SOFT": 0, + "backend_31_back003-db-master_6006_status_ONLINE": 0, + "backend_31_back003-db-master_6006_status_SHUNNED": 0, + "backend_31_back003-db-reader_6007_Bytes_data_recv": 115810708275, + "backend_31_back003-db-reader_6007_Bytes_data_sent": 411900849, + "backend_31_back003-db-reader_6007_ConnERR": 0, + "backend_31_back003-db-reader_6007_ConnFree": 70, + "backend_31_back003-db-reader_6007_ConnOK": 71, + "backend_31_back003-db-reader_6007_ConnUsed": 1, + "backend_31_back003-db-reader_6007_Latency_us": 230, + "backend_31_back003-db-reader_6007_Queries": 2356904, + "backend_31_back003-db-reader_6007_status_OFFLINE_HARD": 0, + "backend_31_back003-db-reader_6007_status_OFFLINE_SOFT": 0, + "backend_31_back003-db-reader_6007_status_ONLINE": 0, + "backend_31_back003-db-reader_6007_status_SHUNNED": 0, + "backend_lagging_during_query": 8880, + "backend_offline_during_query": 8, + "generated_error_packets": 231, + "hostgroup_locked_queries": 0, + "hostgroup_locked_set_cmds": 0, + "jemalloc_active": 385101824, + "jemalloc_allocated": 379402432, + "jemalloc_mapped": 430993408, + "jemalloc_metadata": 17418872, + "jemalloc_resident": 403759104, + "jemalloc_retained": 260542464, + "max_connect_timeouts": 227, + "mysql_backend_buffers_bytes": 0, + "mysql_command_ALTER_TABLE_Total_Time_us": 0, + "mysql_command_ALTER_TABLE_Total_cnt": 0, + "mysql_command_ALTER_TABLE_cnt_100ms": 0, + "mysql_command_ALTER_TABLE_cnt_100us": 0, + "mysql_command_ALTER_TABLE_cnt_10ms": 0, + "mysql_command_ALTER_TABLE_cnt_10s": 0, + "mysql_command_ALTER_TABLE_cnt_1ms": 0, + "mysql_command_ALTER_TABLE_cnt_1s": 0, + "mysql_command_ALTER_TABLE_cnt_500ms": 0, + "mysql_command_ALTER_TABLE_cnt_500us": 0, + "mysql_command_ALTER_TABLE_cnt_50ms": 0, + "mysql_command_ALTER_TABLE_cnt_5ms": 0, + "mysql_command_ALTER_TABLE_cnt_5s": 0, + "mysql_command_ALTER_TABLE_cnt_INFs": 0, + "mysql_command_ALTER_VIEW_Total_Time_us": 0, + "mysql_command_ALTER_VIEW_Total_cnt": 0, + "mysql_command_ALTER_VIEW_cnt_100ms": 0, + "mysql_command_ALTER_VIEW_cnt_100us": 0, + "mysql_command_ALTER_VIEW_cnt_10ms": 0, + "mysql_command_ALTER_VIEW_cnt_10s": 0, + "mysql_command_ALTER_VIEW_cnt_1ms": 0, + "mysql_command_ALTER_VIEW_cnt_1s": 0, + "mysql_command_ALTER_VIEW_cnt_500ms": 0, + "mysql_command_ALTER_VIEW_cnt_500us": 0, + "mysql_command_ALTER_VIEW_cnt_50ms": 0, + "mysql_command_ALTER_VIEW_cnt_5ms": 0, + "mysql_command_ALTER_VIEW_cnt_5s": 0, + "mysql_command_ALTER_VIEW_cnt_INFs": 0, + "mysql_command_ANALYZE_TABLE_Total_Time_us": 0, + "mysql_command_ANALYZE_TABLE_Total_cnt": 0, + "mysql_command_ANALYZE_TABLE_cnt_100ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_100us": 0, + "mysql_command_ANALYZE_TABLE_cnt_10ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_10s": 0, + "mysql_command_ANALYZE_TABLE_cnt_1ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_1s": 0, + "mysql_command_ANALYZE_TABLE_cnt_500ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_500us": 0, + "mysql_command_ANALYZE_TABLE_cnt_50ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_5ms": 0, + "mysql_command_ANALYZE_TABLE_cnt_5s": 0, + "mysql_command_ANALYZE_TABLE_cnt_INFs": 0, + "mysql_command_BEGIN_Total_Time_us": 0, + "mysql_command_BEGIN_Total_cnt": 0, + "mysql_command_BEGIN_cnt_100ms": 0, + "mysql_command_BEGIN_cnt_100us": 0, + "mysql_command_BEGIN_cnt_10ms": 0, + "mysql_command_BEGIN_cnt_10s": 0, + "mysql_command_BEGIN_cnt_1ms": 0, + "mysql_command_BEGIN_cnt_1s": 0, + "mysql_command_BEGIN_cnt_500ms": 0, + "mysql_command_BEGIN_cnt_500us": 0, + "mysql_command_BEGIN_cnt_50ms": 0, + "mysql_command_BEGIN_cnt_5ms": 0, + "mysql_command_BEGIN_cnt_5s": 0, + "mysql_command_BEGIN_cnt_INFs": 0, + "mysql_command_CALL_Total_Time_us": 0, + "mysql_command_CALL_Total_cnt": 0, + "mysql_command_CALL_cnt_100ms": 0, + "mysql_command_CALL_cnt_100us": 0, + "mysql_command_CALL_cnt_10ms": 0, + "mysql_command_CALL_cnt_10s": 0, + "mysql_command_CALL_cnt_1ms": 0, + "mysql_command_CALL_cnt_1s": 0, + "mysql_command_CALL_cnt_500ms": 0, + "mysql_command_CALL_cnt_500us": 0, + "mysql_command_CALL_cnt_50ms": 0, + "mysql_command_CALL_cnt_5ms": 0, + "mysql_command_CALL_cnt_5s": 0, + "mysql_command_CALL_cnt_INFs": 0, + "mysql_command_CHANGE_MASTER_Total_Time_us": 0, + "mysql_command_CHANGE_MASTER_Total_cnt": 0, + "mysql_command_CHANGE_MASTER_cnt_100ms": 0, + "mysql_command_CHANGE_MASTER_cnt_100us": 0, + "mysql_command_CHANGE_MASTER_cnt_10ms": 0, + "mysql_command_CHANGE_MASTER_cnt_10s": 0, + "mysql_command_CHANGE_MASTER_cnt_1ms": 0, + "mysql_command_CHANGE_MASTER_cnt_1s": 0, + "mysql_command_CHANGE_MASTER_cnt_500ms": 0, + "mysql_command_CHANGE_MASTER_cnt_500us": 0, + "mysql_command_CHANGE_MASTER_cnt_50ms": 0, + "mysql_command_CHANGE_MASTER_cnt_5ms": 0, + "mysql_command_CHANGE_MASTER_cnt_5s": 0, + "mysql_command_CHANGE_MASTER_cnt_INFs": 0, + "mysql_command_COMMIT_Total_Time_us": 0, + "mysql_command_COMMIT_Total_cnt": 0, + "mysql_command_COMMIT_cnt_100ms": 0, + "mysql_command_COMMIT_cnt_100us": 0, + "mysql_command_COMMIT_cnt_10ms": 0, + "mysql_command_COMMIT_cnt_10s": 0, + "mysql_command_COMMIT_cnt_1ms": 0, + "mysql_command_COMMIT_cnt_1s": 0, + "mysql_command_COMMIT_cnt_500ms": 0, + "mysql_command_COMMIT_cnt_500us": 0, + "mysql_command_COMMIT_cnt_50ms": 0, + "mysql_command_COMMIT_cnt_5ms": 0, + "mysql_command_COMMIT_cnt_5s": 0, + "mysql_command_COMMIT_cnt_INFs": 0, + "mysql_command_CREATE_DATABASE_Total_Time_us": 0, + "mysql_command_CREATE_DATABASE_Total_cnt": 0, + "mysql_command_CREATE_DATABASE_cnt_100ms": 0, + "mysql_command_CREATE_DATABASE_cnt_100us": 0, + "mysql_command_CREATE_DATABASE_cnt_10ms": 0, + "mysql_command_CREATE_DATABASE_cnt_10s": 0, + "mysql_command_CREATE_DATABASE_cnt_1ms": 0, + "mysql_command_CREATE_DATABASE_cnt_1s": 0, + "mysql_command_CREATE_DATABASE_cnt_500ms": 0, + "mysql_command_CREATE_DATABASE_cnt_500us": 0, + "mysql_command_CREATE_DATABASE_cnt_50ms": 0, + "mysql_command_CREATE_DATABASE_cnt_5ms": 0, + "mysql_command_CREATE_DATABASE_cnt_5s": 0, + "mysql_command_CREATE_DATABASE_cnt_INFs": 0, + "mysql_command_CREATE_INDEX_Total_Time_us": 0, + "mysql_command_CREATE_INDEX_Total_cnt": 0, + "mysql_command_CREATE_INDEX_cnt_100ms": 0, + "mysql_command_CREATE_INDEX_cnt_100us": 0, + "mysql_command_CREATE_INDEX_cnt_10ms": 0, + "mysql_command_CREATE_INDEX_cnt_10s": 0, + "mysql_command_CREATE_INDEX_cnt_1ms": 0, + "mysql_command_CREATE_INDEX_cnt_1s": 0, + "mysql_command_CREATE_INDEX_cnt_500ms": 0, + "mysql_command_CREATE_INDEX_cnt_500us": 0, + "mysql_command_CREATE_INDEX_cnt_50ms": 0, + "mysql_command_CREATE_INDEX_cnt_5ms": 0, + "mysql_command_CREATE_INDEX_cnt_5s": 0, + "mysql_command_CREATE_INDEX_cnt_INFs": 0, + "mysql_command_CREATE_TABLE_Total_Time_us": 0, + "mysql_command_CREATE_TABLE_Total_cnt": 0, + "mysql_command_CREATE_TABLE_cnt_100ms": 0, + "mysql_command_CREATE_TABLE_cnt_100us": 0, + "mysql_command_CREATE_TABLE_cnt_10ms": 0, + "mysql_command_CREATE_TABLE_cnt_10s": 0, + "mysql_command_CREATE_TABLE_cnt_1ms": 0, + "mysql_command_CREATE_TABLE_cnt_1s": 0, + "mysql_command_CREATE_TABLE_cnt_500ms": 0, + "mysql_command_CREATE_TABLE_cnt_500us": 0, + "mysql_command_CREATE_TABLE_cnt_50ms": 0, + "mysql_command_CREATE_TABLE_cnt_5ms": 0, + "mysql_command_CREATE_TABLE_cnt_5s": 0, + "mysql_command_CREATE_TABLE_cnt_INFs": 0, + "mysql_command_CREATE_TEMPORARY_Total_Time_us": 0, + "mysql_command_CREATE_TEMPORARY_Total_cnt": 0, + "mysql_command_CREATE_TEMPORARY_cnt_100ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_100us": 0, + "mysql_command_CREATE_TEMPORARY_cnt_10ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_10s": 0, + "mysql_command_CREATE_TEMPORARY_cnt_1ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_1s": 0, + "mysql_command_CREATE_TEMPORARY_cnt_500ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_500us": 0, + "mysql_command_CREATE_TEMPORARY_cnt_50ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_5ms": 0, + "mysql_command_CREATE_TEMPORARY_cnt_5s": 0, + "mysql_command_CREATE_TEMPORARY_cnt_INFs": 0, + "mysql_command_CREATE_TRIGGER_Total_Time_us": 0, + "mysql_command_CREATE_TRIGGER_Total_cnt": 0, + "mysql_command_CREATE_TRIGGER_cnt_100ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_100us": 0, + "mysql_command_CREATE_TRIGGER_cnt_10ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_10s": 0, + "mysql_command_CREATE_TRIGGER_cnt_1ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_1s": 0, + "mysql_command_CREATE_TRIGGER_cnt_500ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_500us": 0, + "mysql_command_CREATE_TRIGGER_cnt_50ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_5ms": 0, + "mysql_command_CREATE_TRIGGER_cnt_5s": 0, + "mysql_command_CREATE_TRIGGER_cnt_INFs": 0, + "mysql_command_CREATE_USER_Total_Time_us": 0, + "mysql_command_CREATE_USER_Total_cnt": 0, + "mysql_command_CREATE_USER_cnt_100ms": 0, + "mysql_command_CREATE_USER_cnt_100us": 0, + "mysql_command_CREATE_USER_cnt_10ms": 0, + "mysql_command_CREATE_USER_cnt_10s": 0, + "mysql_command_CREATE_USER_cnt_1ms": 0, + "mysql_command_CREATE_USER_cnt_1s": 0, + "mysql_command_CREATE_USER_cnt_500ms": 0, + "mysql_command_CREATE_USER_cnt_500us": 0, + "mysql_command_CREATE_USER_cnt_50ms": 0, + "mysql_command_CREATE_USER_cnt_5ms": 0, + "mysql_command_CREATE_USER_cnt_5s": 0, + "mysql_command_CREATE_USER_cnt_INFs": 0, + "mysql_command_CREATE_VIEW_Total_Time_us": 0, + "mysql_command_CREATE_VIEW_Total_cnt": 0, + "mysql_command_CREATE_VIEW_cnt_100ms": 0, + "mysql_command_CREATE_VIEW_cnt_100us": 0, + "mysql_command_CREATE_VIEW_cnt_10ms": 0, + "mysql_command_CREATE_VIEW_cnt_10s": 0, + "mysql_command_CREATE_VIEW_cnt_1ms": 0, + "mysql_command_CREATE_VIEW_cnt_1s": 0, + "mysql_command_CREATE_VIEW_cnt_500ms": 0, + "mysql_command_CREATE_VIEW_cnt_500us": 0, + "mysql_command_CREATE_VIEW_cnt_50ms": 0, + "mysql_command_CREATE_VIEW_cnt_5ms": 0, + "mysql_command_CREATE_VIEW_cnt_5s": 0, + "mysql_command_CREATE_VIEW_cnt_INFs": 0, + "mysql_command_DEALLOCATE_Total_Time_us": 0, + "mysql_command_DEALLOCATE_Total_cnt": 0, + "mysql_command_DEALLOCATE_cnt_100ms": 0, + "mysql_command_DEALLOCATE_cnt_100us": 0, + "mysql_command_DEALLOCATE_cnt_10ms": 0, + "mysql_command_DEALLOCATE_cnt_10s": 0, + "mysql_command_DEALLOCATE_cnt_1ms": 0, + "mysql_command_DEALLOCATE_cnt_1s": 0, + "mysql_command_DEALLOCATE_cnt_500ms": 0, + "mysql_command_DEALLOCATE_cnt_500us": 0, + "mysql_command_DEALLOCATE_cnt_50ms": 0, + "mysql_command_DEALLOCATE_cnt_5ms": 0, + "mysql_command_DEALLOCATE_cnt_5s": 0, + "mysql_command_DEALLOCATE_cnt_INFs": 0, + "mysql_command_DELETE_Total_Time_us": 0, + "mysql_command_DELETE_Total_cnt": 0, + "mysql_command_DELETE_cnt_100ms": 0, + "mysql_command_DELETE_cnt_100us": 0, + "mysql_command_DELETE_cnt_10ms": 0, + "mysql_command_DELETE_cnt_10s": 0, + "mysql_command_DELETE_cnt_1ms": 0, + "mysql_command_DELETE_cnt_1s": 0, + "mysql_command_DELETE_cnt_500ms": 0, + "mysql_command_DELETE_cnt_500us": 0, + "mysql_command_DELETE_cnt_50ms": 0, + "mysql_command_DELETE_cnt_5ms": 0, + "mysql_command_DELETE_cnt_5s": 0, + "mysql_command_DELETE_cnt_INFs": 0, + "mysql_command_DESCRIBE_Total_Time_us": 0, + "mysql_command_DESCRIBE_Total_cnt": 0, + "mysql_command_DESCRIBE_cnt_100ms": 0, + "mysql_command_DESCRIBE_cnt_100us": 0, + "mysql_command_DESCRIBE_cnt_10ms": 0, + "mysql_command_DESCRIBE_cnt_10s": 0, + "mysql_command_DESCRIBE_cnt_1ms": 0, + "mysql_command_DESCRIBE_cnt_1s": 0, + "mysql_command_DESCRIBE_cnt_500ms": 0, + "mysql_command_DESCRIBE_cnt_500us": 0, + "mysql_command_DESCRIBE_cnt_50ms": 0, + "mysql_command_DESCRIBE_cnt_5ms": 0, + "mysql_command_DESCRIBE_cnt_5s": 0, + "mysql_command_DESCRIBE_cnt_INFs": 0, + "mysql_command_DROP_DATABASE_Total_Time_us": 0, + "mysql_command_DROP_DATABASE_Total_cnt": 0, + "mysql_command_DROP_DATABASE_cnt_100ms": 0, + "mysql_command_DROP_DATABASE_cnt_100us": 0, + "mysql_command_DROP_DATABASE_cnt_10ms": 0, + "mysql_command_DROP_DATABASE_cnt_10s": 0, + "mysql_command_DROP_DATABASE_cnt_1ms": 0, + "mysql_command_DROP_DATABASE_cnt_1s": 0, + "mysql_command_DROP_DATABASE_cnt_500ms": 0, + "mysql_command_DROP_DATABASE_cnt_500us": 0, + "mysql_command_DROP_DATABASE_cnt_50ms": 0, + "mysql_command_DROP_DATABASE_cnt_5ms": 0, + "mysql_command_DROP_DATABASE_cnt_5s": 0, + "mysql_command_DROP_DATABASE_cnt_INFs": 0, + "mysql_command_DROP_INDEX_Total_Time_us": 0, + "mysql_command_DROP_INDEX_Total_cnt": 0, + "mysql_command_DROP_INDEX_cnt_100ms": 0, + "mysql_command_DROP_INDEX_cnt_100us": 0, + "mysql_command_DROP_INDEX_cnt_10ms": 0, + "mysql_command_DROP_INDEX_cnt_10s": 0, + "mysql_command_DROP_INDEX_cnt_1ms": 0, + "mysql_command_DROP_INDEX_cnt_1s": 0, + "mysql_command_DROP_INDEX_cnt_500ms": 0, + "mysql_command_DROP_INDEX_cnt_500us": 0, + "mysql_command_DROP_INDEX_cnt_50ms": 0, + "mysql_command_DROP_INDEX_cnt_5ms": 0, + "mysql_command_DROP_INDEX_cnt_5s": 0, + "mysql_command_DROP_INDEX_cnt_INFs": 0, + "mysql_command_DROP_TABLE_Total_Time_us": 0, + "mysql_command_DROP_TABLE_Total_cnt": 0, + "mysql_command_DROP_TABLE_cnt_100ms": 0, + "mysql_command_DROP_TABLE_cnt_100us": 0, + "mysql_command_DROP_TABLE_cnt_10ms": 0, + "mysql_command_DROP_TABLE_cnt_10s": 0, + "mysql_command_DROP_TABLE_cnt_1ms": 0, + "mysql_command_DROP_TABLE_cnt_1s": 0, + "mysql_command_DROP_TABLE_cnt_500ms": 0, + "mysql_command_DROP_TABLE_cnt_500us": 0, + "mysql_command_DROP_TABLE_cnt_50ms": 0, + "mysql_command_DROP_TABLE_cnt_5ms": 0, + "mysql_command_DROP_TABLE_cnt_5s": 0, + "mysql_command_DROP_TABLE_cnt_INFs": 0, + "mysql_command_DROP_TRIGGER_Total_Time_us": 0, + "mysql_command_DROP_TRIGGER_Total_cnt": 0, + "mysql_command_DROP_TRIGGER_cnt_100ms": 0, + "mysql_command_DROP_TRIGGER_cnt_100us": 0, + "mysql_command_DROP_TRIGGER_cnt_10ms": 0, + "mysql_command_DROP_TRIGGER_cnt_10s": 0, + "mysql_command_DROP_TRIGGER_cnt_1ms": 0, + "mysql_command_DROP_TRIGGER_cnt_1s": 0, + "mysql_command_DROP_TRIGGER_cnt_500ms": 0, + "mysql_command_DROP_TRIGGER_cnt_500us": 0, + "mysql_command_DROP_TRIGGER_cnt_50ms": 0, + "mysql_command_DROP_TRIGGER_cnt_5ms": 0, + "mysql_command_DROP_TRIGGER_cnt_5s": 0, + "mysql_command_DROP_TRIGGER_cnt_INFs": 0, + "mysql_command_DROP_USER_Total_Time_us": 0, + "mysql_command_DROP_USER_Total_cnt": 0, + "mysql_command_DROP_USER_cnt_100ms": 0, + "mysql_command_DROP_USER_cnt_100us": 0, + "mysql_command_DROP_USER_cnt_10ms": 0, + "mysql_command_DROP_USER_cnt_10s": 0, + "mysql_command_DROP_USER_cnt_1ms": 0, + "mysql_command_DROP_USER_cnt_1s": 0, + "mysql_command_DROP_USER_cnt_500ms": 0, + "mysql_command_DROP_USER_cnt_500us": 0, + "mysql_command_DROP_USER_cnt_50ms": 0, + "mysql_command_DROP_USER_cnt_5ms": 0, + "mysql_command_DROP_USER_cnt_5s": 0, + "mysql_command_DROP_USER_cnt_INFs": 0, + "mysql_command_DROP_VIEW_Total_Time_us": 0, + "mysql_command_DROP_VIEW_Total_cnt": 0, + "mysql_command_DROP_VIEW_cnt_100ms": 0, + "mysql_command_DROP_VIEW_cnt_100us": 0, + "mysql_command_DROP_VIEW_cnt_10ms": 0, + "mysql_command_DROP_VIEW_cnt_10s": 0, + "mysql_command_DROP_VIEW_cnt_1ms": 0, + "mysql_command_DROP_VIEW_cnt_1s": 0, + "mysql_command_DROP_VIEW_cnt_500ms": 0, + "mysql_command_DROP_VIEW_cnt_500us": 0, + "mysql_command_DROP_VIEW_cnt_50ms": 0, + "mysql_command_DROP_VIEW_cnt_5ms": 0, + "mysql_command_DROP_VIEW_cnt_5s": 0, + "mysql_command_DROP_VIEW_cnt_INFs": 0, + "mysql_command_EXECUTE_Total_Time_us": 0, + "mysql_command_EXECUTE_Total_cnt": 0, + "mysql_command_EXECUTE_cnt_100ms": 0, + "mysql_command_EXECUTE_cnt_100us": 0, + "mysql_command_EXECUTE_cnt_10ms": 0, + "mysql_command_EXECUTE_cnt_10s": 0, + "mysql_command_EXECUTE_cnt_1ms": 0, + "mysql_command_EXECUTE_cnt_1s": 0, + "mysql_command_EXECUTE_cnt_500ms": 0, + "mysql_command_EXECUTE_cnt_500us": 0, + "mysql_command_EXECUTE_cnt_50ms": 0, + "mysql_command_EXECUTE_cnt_5ms": 0, + "mysql_command_EXECUTE_cnt_5s": 0, + "mysql_command_EXECUTE_cnt_INFs": 0, + "mysql_command_EXPLAIN_Total_Time_us": 0, + "mysql_command_EXPLAIN_Total_cnt": 0, + "mysql_command_EXPLAIN_cnt_100ms": 0, + "mysql_command_EXPLAIN_cnt_100us": 0, + "mysql_command_EXPLAIN_cnt_10ms": 0, + "mysql_command_EXPLAIN_cnt_10s": 0, + "mysql_command_EXPLAIN_cnt_1ms": 0, + "mysql_command_EXPLAIN_cnt_1s": 0, + "mysql_command_EXPLAIN_cnt_500ms": 0, + "mysql_command_EXPLAIN_cnt_500us": 0, + "mysql_command_EXPLAIN_cnt_50ms": 0, + "mysql_command_EXPLAIN_cnt_5ms": 0, + "mysql_command_EXPLAIN_cnt_5s": 0, + "mysql_command_EXPLAIN_cnt_INFs": 0, + "mysql_command_FLUSH_Total_Time_us": 0, + "mysql_command_FLUSH_Total_cnt": 0, + "mysql_command_FLUSH_cnt_100ms": 0, + "mysql_command_FLUSH_cnt_100us": 0, + "mysql_command_FLUSH_cnt_10ms": 0, + "mysql_command_FLUSH_cnt_10s": 0, + "mysql_command_FLUSH_cnt_1ms": 0, + "mysql_command_FLUSH_cnt_1s": 0, + "mysql_command_FLUSH_cnt_500ms": 0, + "mysql_command_FLUSH_cnt_500us": 0, + "mysql_command_FLUSH_cnt_50ms": 0, + "mysql_command_FLUSH_cnt_5ms": 0, + "mysql_command_FLUSH_cnt_5s": 0, + "mysql_command_FLUSH_cnt_INFs": 0, + "mysql_command_GRANT_Total_Time_us": 0, + "mysql_command_GRANT_Total_cnt": 0, + "mysql_command_GRANT_cnt_100ms": 0, + "mysql_command_GRANT_cnt_100us": 0, + "mysql_command_GRANT_cnt_10ms": 0, + "mysql_command_GRANT_cnt_10s": 0, + "mysql_command_GRANT_cnt_1ms": 0, + "mysql_command_GRANT_cnt_1s": 0, + "mysql_command_GRANT_cnt_500ms": 0, + "mysql_command_GRANT_cnt_500us": 0, + "mysql_command_GRANT_cnt_50ms": 0, + "mysql_command_GRANT_cnt_5ms": 0, + "mysql_command_GRANT_cnt_5s": 0, + "mysql_command_GRANT_cnt_INFs": 0, + "mysql_command_INSERT_Total_Time_us": 0, + "mysql_command_INSERT_Total_cnt": 0, + "mysql_command_INSERT_cnt_100ms": 0, + "mysql_command_INSERT_cnt_100us": 0, + "mysql_command_INSERT_cnt_10ms": 0, + "mysql_command_INSERT_cnt_10s": 0, + "mysql_command_INSERT_cnt_1ms": 0, + "mysql_command_INSERT_cnt_1s": 0, + "mysql_command_INSERT_cnt_500ms": 0, + "mysql_command_INSERT_cnt_500us": 0, + "mysql_command_INSERT_cnt_50ms": 0, + "mysql_command_INSERT_cnt_5ms": 0, + "mysql_command_INSERT_cnt_5s": 0, + "mysql_command_INSERT_cnt_INFs": 0, + "mysql_command_KILL_Total_Time_us": 0, + "mysql_command_KILL_Total_cnt": 0, + "mysql_command_KILL_cnt_100ms": 0, + "mysql_command_KILL_cnt_100us": 0, + "mysql_command_KILL_cnt_10ms": 0, + "mysql_command_KILL_cnt_10s": 0, + "mysql_command_KILL_cnt_1ms": 0, + "mysql_command_KILL_cnt_1s": 0, + "mysql_command_KILL_cnt_500ms": 0, + "mysql_command_KILL_cnt_500us": 0, + "mysql_command_KILL_cnt_50ms": 0, + "mysql_command_KILL_cnt_5ms": 0, + "mysql_command_KILL_cnt_5s": 0, + "mysql_command_KILL_cnt_INFs": 0, + "mysql_command_LOAD_Total_Time_us": 0, + "mysql_command_LOAD_Total_cnt": 0, + "mysql_command_LOAD_cnt_100ms": 0, + "mysql_command_LOAD_cnt_100us": 0, + "mysql_command_LOAD_cnt_10ms": 0, + "mysql_command_LOAD_cnt_10s": 0, + "mysql_command_LOAD_cnt_1ms": 0, + "mysql_command_LOAD_cnt_1s": 0, + "mysql_command_LOAD_cnt_500ms": 0, + "mysql_command_LOAD_cnt_500us": 0, + "mysql_command_LOAD_cnt_50ms": 0, + "mysql_command_LOAD_cnt_5ms": 0, + "mysql_command_LOAD_cnt_5s": 0, + "mysql_command_LOAD_cnt_INFs": 0, + "mysql_command_LOCK_TABLE_Total_Time_us": 0, + "mysql_command_LOCK_TABLE_Total_cnt": 0, + "mysql_command_LOCK_TABLE_cnt_100ms": 0, + "mysql_command_LOCK_TABLE_cnt_100us": 0, + "mysql_command_LOCK_TABLE_cnt_10ms": 0, + "mysql_command_LOCK_TABLE_cnt_10s": 0, + "mysql_command_LOCK_TABLE_cnt_1ms": 0, + "mysql_command_LOCK_TABLE_cnt_1s": 0, + "mysql_command_LOCK_TABLE_cnt_500ms": 0, + "mysql_command_LOCK_TABLE_cnt_500us": 0, + "mysql_command_LOCK_TABLE_cnt_50ms": 0, + "mysql_command_LOCK_TABLE_cnt_5ms": 0, + "mysql_command_LOCK_TABLE_cnt_5s": 0, + "mysql_command_LOCK_TABLE_cnt_INFs": 0, + "mysql_command_OPTIMIZE_Total_Time_us": 0, + "mysql_command_OPTIMIZE_Total_cnt": 0, + "mysql_command_OPTIMIZE_cnt_100ms": 0, + "mysql_command_OPTIMIZE_cnt_100us": 0, + "mysql_command_OPTIMIZE_cnt_10ms": 0, + "mysql_command_OPTIMIZE_cnt_10s": 0, + "mysql_command_OPTIMIZE_cnt_1ms": 0, + "mysql_command_OPTIMIZE_cnt_1s": 0, + "mysql_command_OPTIMIZE_cnt_500ms": 0, + "mysql_command_OPTIMIZE_cnt_500us": 0, + "mysql_command_OPTIMIZE_cnt_50ms": 0, + "mysql_command_OPTIMIZE_cnt_5ms": 0, + "mysql_command_OPTIMIZE_cnt_5s": 0, + "mysql_command_OPTIMIZE_cnt_INFs": 0, + "mysql_command_PREPARE_Total_Time_us": 0, + "mysql_command_PREPARE_Total_cnt": 0, + "mysql_command_PREPARE_cnt_100ms": 0, + "mysql_command_PREPARE_cnt_100us": 0, + "mysql_command_PREPARE_cnt_10ms": 0, + "mysql_command_PREPARE_cnt_10s": 0, + "mysql_command_PREPARE_cnt_1ms": 0, + "mysql_command_PREPARE_cnt_1s": 0, + "mysql_command_PREPARE_cnt_500ms": 0, + "mysql_command_PREPARE_cnt_500us": 0, + "mysql_command_PREPARE_cnt_50ms": 0, + "mysql_command_PREPARE_cnt_5ms": 0, + "mysql_command_PREPARE_cnt_5s": 0, + "mysql_command_PREPARE_cnt_INFs": 0, + "mysql_command_PURGE_Total_Time_us": 0, + "mysql_command_PURGE_Total_cnt": 0, + "mysql_command_PURGE_cnt_100ms": 0, + "mysql_command_PURGE_cnt_100us": 0, + "mysql_command_PURGE_cnt_10ms": 0, + "mysql_command_PURGE_cnt_10s": 0, + "mysql_command_PURGE_cnt_1ms": 0, + "mysql_command_PURGE_cnt_1s": 0, + "mysql_command_PURGE_cnt_500ms": 0, + "mysql_command_PURGE_cnt_500us": 0, + "mysql_command_PURGE_cnt_50ms": 0, + "mysql_command_PURGE_cnt_5ms": 0, + "mysql_command_PURGE_cnt_5s": 0, + "mysql_command_PURGE_cnt_INFs": 0, + "mysql_command_RENAME_TABLE_Total_Time_us": 0, + "mysql_command_RENAME_TABLE_Total_cnt": 0, + "mysql_command_RENAME_TABLE_cnt_100ms": 0, + "mysql_command_RENAME_TABLE_cnt_100us": 0, + "mysql_command_RENAME_TABLE_cnt_10ms": 0, + "mysql_command_RENAME_TABLE_cnt_10s": 0, + "mysql_command_RENAME_TABLE_cnt_1ms": 0, + "mysql_command_RENAME_TABLE_cnt_1s": 0, + "mysql_command_RENAME_TABLE_cnt_500ms": 0, + "mysql_command_RENAME_TABLE_cnt_500us": 0, + "mysql_command_RENAME_TABLE_cnt_50ms": 0, + "mysql_command_RENAME_TABLE_cnt_5ms": 0, + "mysql_command_RENAME_TABLE_cnt_5s": 0, + "mysql_command_RENAME_TABLE_cnt_INFs": 0, + "mysql_command_REPLACE_Total_Time_us": 0, + "mysql_command_REPLACE_Total_cnt": 0, + "mysql_command_REPLACE_cnt_100ms": 0, + "mysql_command_REPLACE_cnt_100us": 0, + "mysql_command_REPLACE_cnt_10ms": 0, + "mysql_command_REPLACE_cnt_10s": 0, + "mysql_command_REPLACE_cnt_1ms": 0, + "mysql_command_REPLACE_cnt_1s": 0, + "mysql_command_REPLACE_cnt_500ms": 0, + "mysql_command_REPLACE_cnt_500us": 0, + "mysql_command_REPLACE_cnt_50ms": 0, + "mysql_command_REPLACE_cnt_5ms": 0, + "mysql_command_REPLACE_cnt_5s": 0, + "mysql_command_REPLACE_cnt_INFs": 0, + "mysql_command_RESET_MASTER_Total_Time_us": 0, + "mysql_command_RESET_MASTER_Total_cnt": 0, + "mysql_command_RESET_MASTER_cnt_100ms": 0, + "mysql_command_RESET_MASTER_cnt_100us": 0, + "mysql_command_RESET_MASTER_cnt_10ms": 0, + "mysql_command_RESET_MASTER_cnt_10s": 0, + "mysql_command_RESET_MASTER_cnt_1ms": 0, + "mysql_command_RESET_MASTER_cnt_1s": 0, + "mysql_command_RESET_MASTER_cnt_500ms": 0, + "mysql_command_RESET_MASTER_cnt_500us": 0, + "mysql_command_RESET_MASTER_cnt_50ms": 0, + "mysql_command_RESET_MASTER_cnt_5ms": 0, + "mysql_command_RESET_MASTER_cnt_5s": 0, + "mysql_command_RESET_MASTER_cnt_INFs": 0, + "mysql_command_RESET_SLAVE_Total_Time_us": 0, + "mysql_command_RESET_SLAVE_Total_cnt": 0, + "mysql_command_RESET_SLAVE_cnt_100ms": 0, + "mysql_command_RESET_SLAVE_cnt_100us": 0, + "mysql_command_RESET_SLAVE_cnt_10ms": 0, + "mysql_command_RESET_SLAVE_cnt_10s": 0, + "mysql_command_RESET_SLAVE_cnt_1ms": 0, + "mysql_command_RESET_SLAVE_cnt_1s": 0, + "mysql_command_RESET_SLAVE_cnt_500ms": 0, + "mysql_command_RESET_SLAVE_cnt_500us": 0, + "mysql_command_RESET_SLAVE_cnt_50ms": 0, + "mysql_command_RESET_SLAVE_cnt_5ms": 0, + "mysql_command_RESET_SLAVE_cnt_5s": 0, + "mysql_command_RESET_SLAVE_cnt_INFs": 0, + "mysql_command_REVOKE_Total_Time_us": 0, + "mysql_command_REVOKE_Total_cnt": 0, + "mysql_command_REVOKE_cnt_100ms": 0, + "mysql_command_REVOKE_cnt_100us": 0, + "mysql_command_REVOKE_cnt_10ms": 0, + "mysql_command_REVOKE_cnt_10s": 0, + "mysql_command_REVOKE_cnt_1ms": 0, + "mysql_command_REVOKE_cnt_1s": 0, + "mysql_command_REVOKE_cnt_500ms": 0, + "mysql_command_REVOKE_cnt_500us": 0, + "mysql_command_REVOKE_cnt_50ms": 0, + "mysql_command_REVOKE_cnt_5ms": 0, + "mysql_command_REVOKE_cnt_5s": 0, + "mysql_command_REVOKE_cnt_INFs": 0, + "mysql_command_ROLLBACK_Total_Time_us": 0, + "mysql_command_ROLLBACK_Total_cnt": 0, + "mysql_command_ROLLBACK_cnt_100ms": 0, + "mysql_command_ROLLBACK_cnt_100us": 0, + "mysql_command_ROLLBACK_cnt_10ms": 0, + "mysql_command_ROLLBACK_cnt_10s": 0, + "mysql_command_ROLLBACK_cnt_1ms": 0, + "mysql_command_ROLLBACK_cnt_1s": 0, + "mysql_command_ROLLBACK_cnt_500ms": 0, + "mysql_command_ROLLBACK_cnt_500us": 0, + "mysql_command_ROLLBACK_cnt_50ms": 0, + "mysql_command_ROLLBACK_cnt_5ms": 0, + "mysql_command_ROLLBACK_cnt_5s": 0, + "mysql_command_ROLLBACK_cnt_INFs": 0, + "mysql_command_SAVEPOINT_Total_Time_us": 0, + "mysql_command_SAVEPOINT_Total_cnt": 0, + "mysql_command_SAVEPOINT_cnt_100ms": 0, + "mysql_command_SAVEPOINT_cnt_100us": 0, + "mysql_command_SAVEPOINT_cnt_10ms": 0, + "mysql_command_SAVEPOINT_cnt_10s": 0, + "mysql_command_SAVEPOINT_cnt_1ms": 0, + "mysql_command_SAVEPOINT_cnt_1s": 0, + "mysql_command_SAVEPOINT_cnt_500ms": 0, + "mysql_command_SAVEPOINT_cnt_500us": 0, + "mysql_command_SAVEPOINT_cnt_50ms": 0, + "mysql_command_SAVEPOINT_cnt_5ms": 0, + "mysql_command_SAVEPOINT_cnt_5s": 0, + "mysql_command_SAVEPOINT_cnt_INFs": 0, + "mysql_command_SELECT_FOR_UPDATE_Total_Time_us": 0, + "mysql_command_SELECT_FOR_UPDATE_Total_cnt": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_100ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_100us": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_10ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_10s": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_1ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_1s": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_500ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_500us": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_50ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_5ms": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_5s": 0, + "mysql_command_SELECT_FOR_UPDATE_cnt_INFs": 0, + "mysql_command_SELECT_Total_Time_us": 4673958076637, + "mysql_command_SELECT_Total_cnt": 68490650, + "mysql_command_SELECT_cnt_100ms": 4909816, + "mysql_command_SELECT_cnt_100us": 32185976, + "mysql_command_SELECT_cnt_10ms": 2955830, + "mysql_command_SELECT_cnt_10s": 497, + "mysql_command_SELECT_cnt_1ms": 481335, + "mysql_command_SELECT_cnt_1s": 1321917, + "mysql_command_SELECT_cnt_500ms": 11123900, + "mysql_command_SELECT_cnt_500us": 36650, + "mysql_command_SELECT_cnt_50ms": 10468460, + "mysql_command_SELECT_cnt_5ms": 4600948, + "mysql_command_SELECT_cnt_5s": 403451, + "mysql_command_SELECT_cnt_INFs": 1870, + "mysql_command_SET_Total_Time_us": 0, + "mysql_command_SET_Total_cnt": 0, + "mysql_command_SET_cnt_100ms": 0, + "mysql_command_SET_cnt_100us": 0, + "mysql_command_SET_cnt_10ms": 0, + "mysql_command_SET_cnt_10s": 0, + "mysql_command_SET_cnt_1ms": 0, + "mysql_command_SET_cnt_1s": 0, + "mysql_command_SET_cnt_500ms": 0, + "mysql_command_SET_cnt_500us": 0, + "mysql_command_SET_cnt_50ms": 0, + "mysql_command_SET_cnt_5ms": 0, + "mysql_command_SET_cnt_5s": 0, + "mysql_command_SET_cnt_INFs": 0, + "mysql_command_SHOW_TABLE_STATUS_Total_Time_us": 0, + "mysql_command_SHOW_TABLE_STATUS_Total_cnt": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_100ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_100us": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_10ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_10s": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_1ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_1s": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_500ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_500us": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_50ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_5ms": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_5s": 0, + "mysql_command_SHOW_TABLE_STATUS_cnt_INFs": 0, + "mysql_command_SHOW_Total_Time_us": 2158, + "mysql_command_SHOW_Total_cnt": 1, + "mysql_command_SHOW_cnt_100ms": 0, + "mysql_command_SHOW_cnt_100us": 0, + "mysql_command_SHOW_cnt_10ms": 0, + "mysql_command_SHOW_cnt_10s": 0, + "mysql_command_SHOW_cnt_1ms": 0, + "mysql_command_SHOW_cnt_1s": 0, + "mysql_command_SHOW_cnt_500ms": 0, + "mysql_command_SHOW_cnt_500us": 0, + "mysql_command_SHOW_cnt_50ms": 0, + "mysql_command_SHOW_cnt_5ms": 1, + "mysql_command_SHOW_cnt_5s": 0, + "mysql_command_SHOW_cnt_INFs": 0, + "mysql_command_START_TRANSACTION_Total_Time_us": 0, + "mysql_command_START_TRANSACTION_Total_cnt": 0, + "mysql_command_START_TRANSACTION_cnt_100ms": 0, + "mysql_command_START_TRANSACTION_cnt_100us": 0, + "mysql_command_START_TRANSACTION_cnt_10ms": 0, + "mysql_command_START_TRANSACTION_cnt_10s": 0, + "mysql_command_START_TRANSACTION_cnt_1ms": 0, + "mysql_command_START_TRANSACTION_cnt_1s": 0, + "mysql_command_START_TRANSACTION_cnt_500ms": 0, + "mysql_command_START_TRANSACTION_cnt_500us": 0, + "mysql_command_START_TRANSACTION_cnt_50ms": 0, + "mysql_command_START_TRANSACTION_cnt_5ms": 0, + "mysql_command_START_TRANSACTION_cnt_5s": 0, + "mysql_command_START_TRANSACTION_cnt_INFs": 0, + "mysql_command_TRUNCATE_TABLE_Total_Time_us": 0, + "mysql_command_TRUNCATE_TABLE_Total_cnt": 0, + "mysql_command_TRUNCATE_TABLE_cnt_100ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_100us": 0, + "mysql_command_TRUNCATE_TABLE_cnt_10ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_10s": 0, + "mysql_command_TRUNCATE_TABLE_cnt_1ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_1s": 0, + "mysql_command_TRUNCATE_TABLE_cnt_500ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_500us": 0, + "mysql_command_TRUNCATE_TABLE_cnt_50ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_5ms": 0, + "mysql_command_TRUNCATE_TABLE_cnt_5s": 0, + "mysql_command_TRUNCATE_TABLE_cnt_INFs": 0, + "mysql_command_UNKNOWN_Total_Time_us": 0, + "mysql_command_UNKNOWN_Total_cnt": 0, + "mysql_command_UNKNOWN_cnt_100ms": 0, + "mysql_command_UNKNOWN_cnt_100us": 0, + "mysql_command_UNKNOWN_cnt_10ms": 0, + "mysql_command_UNKNOWN_cnt_10s": 0, + "mysql_command_UNKNOWN_cnt_1ms": 0, + "mysql_command_UNKNOWN_cnt_1s": 0, + "mysql_command_UNKNOWN_cnt_500ms": 0, + "mysql_command_UNKNOWN_cnt_500us": 0, + "mysql_command_UNKNOWN_cnt_50ms": 0, + "mysql_command_UNKNOWN_cnt_5ms": 0, + "mysql_command_UNKNOWN_cnt_5s": 0, + "mysql_command_UNKNOWN_cnt_INFs": 0, + "mysql_command_UNLOCK_TABLES_Total_Time_us": 0, + "mysql_command_UNLOCK_TABLES_Total_cnt": 0, + "mysql_command_UNLOCK_TABLES_cnt_100ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_100us": 0, + "mysql_command_UNLOCK_TABLES_cnt_10ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_10s": 0, + "mysql_command_UNLOCK_TABLES_cnt_1ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_1s": 0, + "mysql_command_UNLOCK_TABLES_cnt_500ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_500us": 0, + "mysql_command_UNLOCK_TABLES_cnt_50ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_5ms": 0, + "mysql_command_UNLOCK_TABLES_cnt_5s": 0, + "mysql_command_UNLOCK_TABLES_cnt_INFs": 0, + "mysql_command_UPDATE_Total_Time_us": 0, + "mysql_command_UPDATE_Total_cnt": 0, + "mysql_command_UPDATE_cnt_100ms": 0, + "mysql_command_UPDATE_cnt_100us": 0, + "mysql_command_UPDATE_cnt_10ms": 0, + "mysql_command_UPDATE_cnt_10s": 0, + "mysql_command_UPDATE_cnt_1ms": 0, + "mysql_command_UPDATE_cnt_1s": 0, + "mysql_command_UPDATE_cnt_500ms": 0, + "mysql_command_UPDATE_cnt_500us": 0, + "mysql_command_UPDATE_cnt_50ms": 0, + "mysql_command_UPDATE_cnt_5ms": 0, + "mysql_command_UPDATE_cnt_5s": 0, + "mysql_command_UPDATE_cnt_INFs": 0, + "mysql_command_USE_Total_Time_us": 0, + "mysql_command_USE_Total_cnt": 0, + "mysql_command_USE_cnt_100ms": 0, + "mysql_command_USE_cnt_100us": 0, + "mysql_command_USE_cnt_10ms": 0, + "mysql_command_USE_cnt_10s": 0, + "mysql_command_USE_cnt_1ms": 0, + "mysql_command_USE_cnt_1s": 0, + "mysql_command_USE_cnt_500ms": 0, + "mysql_command_USE_cnt_500us": 0, + "mysql_command_USE_cnt_50ms": 0, + "mysql_command_USE_cnt_5ms": 0, + "mysql_command_USE_cnt_5s": 0, + "mysql_command_USE_cnt_INFs": 0, + "mysql_firewall_rules_config": 329, + "mysql_firewall_rules_table": 0, + "mysql_firewall_users_config": 0, + "mysql_firewall_users_table": 0, + "mysql_frontend_buffers_bytes": 196608, + "mysql_killed_backend_connections": 0, + "mysql_killed_backend_queries": 0, + "mysql_query_rules_memory": 22825, + "mysql_session_internal_bytes": 20232, + "mysql_unexpected_frontend_com_quit": 0, + "mysql_unexpected_frontend_packets": 0, + "mysql_user_first_user_frontend_connections": 0, + "mysql_user_first_user_frontend_connections_utilization": 0, + "mysql_user_second_user_frontend_connections": 3, + "mysql_user_second_user_frontend_connections_utilization": 20, + "queries_with_max_lag_ms": 0, + "queries_with_max_lag_ms__delayed": 0, + "queries_with_max_lag_ms__total_wait_time_us": 0, + "query_digest_memory": 13688, + "stack_memory_admin_threads": 16777216, + "stack_memory_cluster_threads": 0, + "stack_memory_mysql_threads": 33554432, + "whitelisted_sqli_fingerprint": 0, + } + + require.Equal(t, expected, mx) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db, mock, err := sqlmock.New( + sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual), + ) + require.NoError(t, err) + my := New() + my.db = db + defer func() { _ = db.Close() }() + + require.True(t, my.Init()) + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepareMock(t, mock) + step.check(t, my) + }) + } + assert.NoError(t, mock.ExpectationsWereMet()) + }) + } +} + +func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows { + rows, err := prepareMockRows(data) + require.NoError(t, err) + return rows +} + +func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) { + mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed() +} + +func mockExpectErr(mock sqlmock.Sqlmock, query string) { + mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query)) +} + +func prepareMockRows(data []byte) (*sqlmock.Rows, error) { + if len(data) == 0 { + return sqlmock.NewRows(nil), nil + } + + r := bytes.NewReader(data) + sc := bufio.NewScanner(r) + + var numColumns int + var rows *sqlmock.Rows + + for sc.Scan() { + s := strings.TrimSpace(strings.Trim(sc.Text(), "|")) + switch { + case s == "", + strings.HasPrefix(s, "+"), + strings.HasPrefix(s, "ft_boolean_syntax"): + continue + } + + parts := strings.Split(s, "|") + for i, v := range parts { + parts[i] = strings.TrimSpace(v) + } + + if rows == nil { + numColumns = len(parts) + rows = sqlmock.NewRows(parts) + continue + } + + if len(parts) != numColumns { + return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts)) + } + + values := make([]driver.Value, len(parts)) + for i, v := range parts { + values[i] = v + } + rows.AddRow(values...) + } + + if rows == nil { + return nil, errors.New("prepareMockRows(): nil rows result") + } + + return rows, sc.Err() +} diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt new file mode 100644 index 00000000000000..99ec093e10d07f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt @@ -0,0 +1,21 @@ ++------------------------------+----------------+ +| Variable_Name | Variable_Value | ++------------------------------+----------------+ +| SQLite3_memory_bytes | 6017144 | +| jemalloc_resident | 403759104 | +| jemalloc_active | 385101824 | +| jemalloc_allocated | 379402432 | +| jemalloc_mapped | 430993408 | +| jemalloc_metadata | 17418872 | +| jemalloc_retained | 260542464 | +| Auth_memory | 1044 | +| query_digest_memory | 13688 | +| mysql_query_rules_memory | 22825 | +| mysql_firewall_users_table | 0 | +| mysql_firewall_users_config | 0 | +| mysql_firewall_rules_table | 0 | +| mysql_firewall_rules_config | 329 | +| stack_memory_mysql_threads | 33554432 | +| stack_memory_admin_threads | 16777216 | +| stack_memory_cluster_threads | 0 | ++------------------------------+----------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt new file mode 100644 index 00000000000000..6ab6bb83006092 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt @@ -0,0 +1,56 @@ ++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+ +| Command | Total_Time_us | Total_cnt | cnt_100us | cnt_500us | cnt_1ms | cnt_5ms | cnt_10ms | cnt_50ms | cnt_100ms | cnt_500ms | cnt_1s | cnt_5s | cnt_10s | cnt_INFs | ++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+ +| ALTER_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| ALTER_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| ANALYZE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| BEGIN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CALL | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CHANGE_MASTER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| COMMIT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_DATABASE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_INDEX | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_TEMPORARY | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_TRIGGER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_USER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| CREATE_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DEALLOCATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DELETE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DESCRIBE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_DATABASE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_INDEX | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_TRIGGER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_USER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| DROP_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| GRANT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| EXECUTE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| EXPLAIN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| FLUSH | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| INSERT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| KILL | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| LOAD | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| LOCK_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| OPTIMIZE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| PREPARE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| PURGE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| RENAME_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| RESET_MASTER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| RESET_SLAVE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| REPLACE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| REVOKE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| ROLLBACK | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| SAVEPOINT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| SELECT | 4673958076637 | 68490650 | 32185976 | 36650 | 481335 | 4600948 | 2955830 | 10468460 | 4909816 | 11123900 | 1321917 | 403451 | 497 | 1870 | +| SELECT_FOR_UPDATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| SET | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| SHOW_TABLE_STATUS | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| START_TRANSACTION | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| TRUNCATE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| UNLOCK_TABLES | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| UPDATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| USE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| SHOW | 2158 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | +| UNKNOWN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+ diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt new file mode 100644 index 00000000000000..80b53e1af156c1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt @@ -0,0 +1,11 @@ ++----+-------------------+--------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+------------+ +| hostgroup | srv_host | srv_port | status | ConnUsed | ConnFree | ConnOK | ConnERR | Queries | Bytes_data_sent | Bytes_data_recv | Latency_us | ++-----------+-------------------+----------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+------------+ +| 10 | back001-db-master | 6001 | ONLINE | 69 | 423 | 524 | 0 | 8970367 | 9858463664 | 145193069937 | 17684 | +| 11 | back001-db-master | 6002 | ONLINE | 0 | 1 | 1 | 0 | 69 | 187675 | 2903 | 17684 | +| 11 | back001-db-reader | 6003 | ONLINE | 0 | 11 | 11 | 0 | 63488 | 163690013 | 4994101 | 113 | +| 20 | back002-db-master | 6004 | ONLINE | 9 | 188 | 197 | 2 | 849461 | 1086994186 | 266034339 | 101981 | +| 21 | back002-db-reader | 6005 | ONLINE | 0 | 1 | 1 | 0 | 8 | 6992 | 984 | 230 | +| 31 | back003-db-master | 6006 | ONLINE | 0 | 3 | 3 | 0 | 3276 | 712803 | 81438709 | 231 | +| 31 | back003-db-reader | 6007 | ONLINE | 1 | 70 | 71 | 0 | 2356904 | 411900849 | 115810708275 | 230 | ++-----------+-------------------+--------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+--------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt new file mode 100644 index 00000000000000..442266c454a22f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt @@ -0,0 +1,106 @@ ++---------------------------------------------+----------------+ +| Variable_Name | Variable_Value | ++---------------------------------------------+----------------+ +| ProxySQL_Uptime | 26748286 | +| Active_Transactions | 0 | +| Client_Connections_aborted | 2 | +| Client_Connections_connected | 3 | +| Client_Connections_created | 5458991 | +| Server_Connections_aborted | 9979 | +| Server_Connections_connected | 13 | +| Server_Connections_created | 2122254 | +| Server_Connections_delayed | 0 | +| Client_Connections_non_idle | 3 | +| Queries_backends_bytes_recv | 5896210168 | +| Queries_backends_bytes_sent | 4329581500 | +| Queries_frontends_bytes_recv | 7434816962 | +| Queries_frontends_bytes_sent | 11643634097 | +| Query_Processor_time_nsec | 0 | +| Backend_query_time_nsec | 0 | +| mysql_backend_buffers_bytes | 0 | +| mysql_frontend_buffers_bytes | 196608 | +| mysql_session_internal_bytes | 20232 | +| Com_autocommit | 0 | +| Com_autocommit_filtered | 0 | +| Com_commit | 0 | +| Com_commit_filtered | 0 | +| Com_rollback | 0 | +| Com_rollback_filtered | 0 | +| Com_backend_change_user | 188694 | +| Com_backend_init_db | 0 | +| Com_backend_set_names | 1517893 | +| Com_frontend_init_db | 2 | +| Com_frontend_set_names | 0 | +| Com_frontend_use_db | 0 | +| Com_backend_stmt_prepare | 16858208 | +| Com_backend_stmt_execute | 36303146 | +| Com_backend_stmt_close | 0 | +| Com_frontend_stmt_prepare | 32185987 | +| Com_frontend_stmt_execute | 36314138 | +| Com_frontend_stmt_close | 32137933 | +| Mirror_concurrency | 0 | +| Mirror_queue_length | 0 | +| Questions | 100638067 | +| Selects_for_update__autocommit0 | 0 | +| Slow_queries | 405818 | +| GTID_consistent_queries | 0 | +| GTID_session_collected | 0 | +| Servers_table_version | 37 | +| MySQL_Thread_Workers | 4 | +| Access_Denied_Wrong_Password | 2 | +| Access_Denied_Max_Connections | 0 | +| Access_Denied_Max_User_Connections | 0 | +| MySQL_Monitor_Workers | 10 | +| MySQL_Monitor_Workers_Aux | 0 | +| MySQL_Monitor_Workers_Started | 10 | +| MySQL_Monitor_connect_check_OK | 3548306 | +| MySQL_Monitor_connect_check_ERR | 130 | +| MySQL_Monitor_ping_check_OK | 21289849 | +| MySQL_Monitor_ping_check_ERR | 108271 | +| MySQL_Monitor_read_only_check_OK | 106246409 | +| MySQL_Monitor_read_only_check_ERR | 19610 | +| MySQL_Monitor_replication_lag_check_OK | 28702388 | +| MySQL_Monitor_replication_lag_check_ERR | 482 | +| ConnPool_get_conn_latency_awareness | 0 | +| ConnPool_get_conn_immediate | 13361 | +| ConnPool_get_conn_success | 36319474 | +| ConnPool_get_conn_failure | 212943 | +| generated_error_packets | 231 | +| max_connect_timeouts | 227 | +| backend_lagging_during_query | 8880 | +| backend_offline_during_query | 8 | +| queries_with_max_lag_ms | 0 | +| queries_with_max_lag_ms__delayed | 0 | +| queries_with_max_lag_ms__total_wait_time_us | 0 | +| mysql_unexpected_frontend_com_quit | 0 | +| Client_Connections_hostgroup_locked | 0 | +| hostgroup_locked_set_cmds | 0 | +| hostgroup_locked_queries | 0 | +| mysql_unexpected_frontend_packets | 0 | +| aws_aurora_replicas_skipped_during_query | 0 | +| automatic_detected_sql_injection | 0 | +| whitelisted_sqli_fingerprint | 0 | +| mysql_killed_backend_connections | 0 | +| mysql_killed_backend_queries | 0 | +| MyHGM_myconnpoll_get | 36519056 | +| MyHGM_myconnpoll_get_ok | 36306113 | +| MyHGM_myconnpoll_push | 37358734 | +| MyHGM_myconnpoll_destroy | 15150 | +| MyHGM_myconnpoll_reset | 2 | +| SQLite3_memory_bytes | 6021248 | +| ConnPool_memory_bytes | 932248 | +| Stmt_Client_Active_Total | 18 | +| Stmt_Client_Active_Unique | 18 | +| Stmt_Server_Active_Total | 101 | +| Stmt_Server_Active_Unique | 39 | +| Stmt_Max_Stmt_id | 66 | +| Stmt_Cached | 65 | +| Query_Cache_Memory_bytes | 0 | +| Query_Cache_count_GET | 0 | +| Query_Cache_count_GET_OK | 0 | +| Query_Cache_count_SET | 0 | +| Query_Cache_bytes_IN | 0 | +| Query_Cache_bytes_OUT | 0 | +| Query_Cache_Purged | 0 | +| Query_Cache_Entries | 0 | ++---------------------------------------------+----------------+ diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt new file mode 100644 index 00000000000000..900776b765b27e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt @@ -0,0 +1,6 @@ ++-------------------------+----------------------+--------------------------+ +| username | frontend_connections | frontend_max_connections | ++-------------------------+----------------------+--------------------------+ +| first_user | 0 | 200 | +| second_user | 3 | 15 | ++-------------------------+----------------------+--------------------------+ diff --git a/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/version.txt b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/version.txt new file mode 100644 index 00000000000000..429a880b74ed4d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/proxysql/testdata/v2.0.10/version.txt @@ -0,0 +1,5 @@ ++---------------------+ +| version() | ++---------------------+ +| 2.0.10-27-g5b319972 | ++---------------------+ \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/README.md b/src/go/collectors/go.d.plugin/modules/pulsar/README.md new file mode 120000 index 00000000000000..dfa55301c83cc9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/README.md @@ -0,0 +1 @@ +integrations/apache_pulsar.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/charts.go b/src/go/collectors/go.d.plugin/modules/pulsar/charts.go new file mode 100644 index 00000000000000..6dbb7b49a5adf4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/charts.go @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Dim = module.Dim + Opts = module.Opts +) + +var summaryCharts = Charts{ + sumBrokerComponentsChart.Copy(), + + sumMessagesRateChart.Copy(), + sumThroughputRateChart.Copy(), + + sumStorageSizeChart.Copy(), + sumStorageOperationsRateChart.Copy(), // optional + sumMsgBacklogSizeChart.Copy(), + sumStorageWriteLatencyChart.Copy(), + sumEntrySizeChart.Copy(), + + sumSubsDelayedChart.Copy(), + sumSubsMsgRateRedeliverChart.Copy(), // optional + sumSubsBlockedOnUnackedMsgChart.Copy(), // optional + + sumReplicationRateChart.Copy(), // optional + sumReplicationThroughputRateChart.Copy(), // optional + sumReplicationBacklogChart.Copy(), // optional +} + +var ( + sumBrokerComponentsChart = Chart{ + ID: "broker_components", + Title: "Broker Components", + Units: "components", + Fam: "ns summary", + Ctx: "pulsar.broker_components", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: "pulsar_namespaces_count", Name: "namespaces"}, + {ID: metricPulsarTopicsCount, Name: "topics"}, + {ID: metricPulsarSubscriptionsCount, Name: "subscriptions"}, + {ID: metricPulsarProducersCount, Name: "producers"}, + {ID: metricPulsarConsumersCount, Name: "consumers"}, + }, + } + sumMessagesRateChart = Chart{ + ID: "messages_rate", + Title: "Messages Rate", + Units: "messages/s", + Fam: "ns summary", + Ctx: "pulsar.messages_rate", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarRateIn, Name: "publish", Div: 1000}, + {ID: metricPulsarRateOut, Name: "dispatch", Mul: -1, Div: 1000}, + }, + } + sumThroughputRateChart = Chart{ + ID: "throughput_rate", + Title: "Throughput Rate", + Units: "KiB/s", + Fam: "ns summary", + Ctx: "pulsar.throughput_rate", + Type: module.Area, + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarThroughputIn, Name: "publish", Div: 1024 * 1000}, + {ID: metricPulsarThroughputOut, Name: "dispatch", Mul: -1, Div: 1024 * 1000}, + }, + } + sumStorageSizeChart = Chart{ + ID: "storage_size", + Title: "Storage Size", + Units: "KiB", + Fam: "ns summary", + Ctx: "pulsar.storage_size", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarStorageSize, Name: "used", Div: 1024}, + }, + } + sumStorageOperationsRateChart = Chart{ + ID: "storage_operations_rate", + Title: "Storage Read/Write Operations Rate", + Units: "message batches/s", + Fam: "ns summary", + Ctx: "pulsar.storage_operations_rate", + Type: module.Area, + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarStorageReadRate, Name: "read", Div: 1000}, + {ID: metricPulsarStorageWriteRate, Name: "write", Mul: -1, Div: 1000}, + }, + } + sumMsgBacklogSizeChart = Chart{ + ID: "msg_backlog", + Title: "Messages Backlog Size", + Units: "messages", + Fam: "ns summary", + Ctx: "pulsar.msg_backlog", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarMsgBacklog, Name: "backlog"}, + }, + } + sumStorageWriteLatencyChart = Chart{ + ID: "storage_write_latency", + Title: "Storage Write Latency", + Units: "entries/s", + Fam: "ns summary", + Ctx: "pulsar.storage_write_latency", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: "pulsar_storage_write_latency_le_0_5", Name: "<=0.5ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_1", Name: "<=1ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_5", Name: "<=5ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_10", Name: "<=10ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_20", Name: "<=20ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_50", Name: "<=50ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_100", Name: "<=100ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_200", Name: "<=200ms", Div: 60}, + {ID: "pulsar_storage_write_latency_le_1000", Name: "<=1s", Div: 60}, + {ID: "pulsar_storage_write_latency_overflow", Name: ">1s", Div: 60}, + }, + } + sumEntrySizeChart = Chart{ + ID: "entry_size", + Title: "Entry Size", + Units: "entries/s", + Fam: "ns summary", + Ctx: "pulsar.entry_size", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: "pulsar_entry_size_le_128", Name: "<=128B", Div: 60}, + {ID: "pulsar_entry_size_le_512", Name: "<=512B", Div: 60}, + {ID: "pulsar_entry_size_le_1_kb", Name: "<=1KB", Div: 60}, + {ID: "pulsar_entry_size_le_2_kb", Name: "<=2KB", Div: 60}, + {ID: "pulsar_entry_size_le_4_kb", Name: "<=4KB", Div: 60}, + {ID: "pulsar_entry_size_le_16_kb", Name: "<=16KB", Div: 60}, + {ID: "pulsar_entry_size_le_100_kb", Name: "<=100KB", Div: 60}, + {ID: "pulsar_entry_size_le_1_mb", Name: "<=1MB", Div: 60}, + {ID: "pulsar_entry_size_le_overflow", Name: ">1MB", Div: 60}, + }, + } + sumSubsDelayedChart = Chart{ + ID: "subscription_delayed", + Title: "Subscriptions Delayed for Dispatching", + Units: "message batches", + Fam: "ns summary", + Ctx: "pulsar.subscription_delayed", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarSubscriptionDelayed, Name: "delayed"}, + }, + } + sumSubsMsgRateRedeliverChart = Chart{ + ID: "subscription_msg_rate_redeliver", + Title: "Subscriptions Redelivered Message Rate", + Units: "messages/s", + Fam: "ns summary", + Ctx: "pulsar.subscription_msg_rate_redeliver", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarSubscriptionMsgRateRedeliver, Name: "redelivered", Div: 1000}, + }, + } + sumSubsBlockedOnUnackedMsgChart = Chart{ + ID: "subscription_blocked_on_unacked_messages", + Title: "Subscriptions Blocked On Unacked Messages", + Units: "subscriptions", + Fam: "ns summary", + Ctx: "pulsar.subscription_blocked_on_unacked_messages", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarSubscriptionBlockedOnUnackedMessages, Name: "blocked"}, + }, + } + sumReplicationRateChart = Chart{ + ID: "replication_rate", + Title: "Replication Rate", + Units: "messages/s", + Fam: "ns summary", + Ctx: "pulsar.replication_rate", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarReplicationRateIn, Name: "in", Div: 1000}, + {ID: metricPulsarReplicationRateOut, Name: "out", Mul: -1, Div: 1000}, + }, + } + sumReplicationThroughputRateChart = Chart{ + ID: "replication_throughput_rate", + Title: "Replication Throughput Rate", + Units: "KiB/s", + Fam: "ns summary", + Ctx: "pulsar.replication_throughput_rate", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarReplicationThroughputIn, Name: "in", Div: 1024 * 1000}, + {ID: metricPulsarReplicationThroughputOut, Name: "out", Mul: -1, Div: 1024 * 1000}, + }, + } + sumReplicationBacklogChart = Chart{ + ID: "replication_backlog", + Title: "Replication Backlog", + Units: "messages", + Fam: "ns summary", + Ctx: "pulsar.replication_backlog", + Opts: Opts{StoreFirst: true}, + Dims: Dims{ + {ID: metricPulsarReplicationBacklog, Name: "backlog"}, + }, + } +) + +var namespaceCharts = Charts{ + nsBrokerComponentsChart.Copy(), + topicProducersChart.Copy(), + topicSubscriptionsChart.Copy(), + topicConsumersChart.Copy(), + + nsMessagesRateChart.Copy(), + topicMessagesRateInChart.Copy(), + topicMessagesRateOutChart.Copy(), + nsThroughputRateCharts.Copy(), + topicThroughputRateInChart.Copy(), + topicThroughputRateOutChart.Copy(), + + nsStorageSizeChart.Copy(), + topicStorageSizeChart.Copy(), + nsStorageOperationsChart.Copy(), // optional + topicStorageReadRateChart.Copy(), // optional + topicStorageWriteRateChart.Copy(), // optional + nsMsgBacklogSizeChart.Copy(), + topicMsgBacklogSizeChart.Copy(), + nsStorageWriteLatencyChart.Copy(), + nsEntrySizeChart.Copy(), + + nsSubsDelayedChart.Copy(), + topicSubsDelayedChart.Copy(), + nsSubsMsgRateRedeliverChart.Copy(), // optional + topicSubsMsgRateRedeliverChart.Copy(), // optional + nsSubsBlockedOnUnackedMsgChart.Copy(), // optional + topicSubsBlockedOnUnackedMsgChart.Copy(), // optional + + nsReplicationRateChart.Copy(), // optional + topicReplicationRateInChart.Copy(), // optional + topicReplicationRateOutChart.Copy(), // optional + nsReplicationThroughputChart.Copy(), // optional + topicReplicationThroughputRateInChart.Copy(), // optional + topicReplicationThroughputRateOutChart.Copy(), // optional + nsReplicationBacklogChart.Copy(), // optional + topicReplicationBacklogChart.Copy(), // optional +} + +func toNamespaceChart(chart Chart) Chart { + if chart.ID == sumBrokerComponentsChart.ID { + _ = chart.RemoveDim("pulsar_namespaces_count") + } + chart.ID += "_namespace_%s" + chart.Fam = "ns %s" + if idx := strings.IndexByte(chart.Ctx, '.'); idx > 0 { + // pulsar.messages_rate => pulsar.namespace_messages_rate + chart.Ctx = chart.Ctx[:idx+1] + "namespace_" + chart.Ctx[idx+1:] + } + for _, dim := range chart.Dims { + dim.ID += "_%s" + } + return chart +} + +var ( + nsBrokerComponentsChart = toNamespaceChart(sumBrokerComponentsChart) + nsMessagesRateChart = toNamespaceChart(sumMessagesRateChart) + nsThroughputRateCharts = toNamespaceChart(sumThroughputRateChart) + nsStorageSizeChart = toNamespaceChart(sumStorageSizeChart) + nsStorageOperationsChart = toNamespaceChart(sumStorageOperationsRateChart) + nsMsgBacklogSizeChart = toNamespaceChart(sumMsgBacklogSizeChart) + nsStorageWriteLatencyChart = toNamespaceChart(sumStorageWriteLatencyChart) + nsEntrySizeChart = toNamespaceChart(sumEntrySizeChart) + nsSubsDelayedChart = toNamespaceChart(sumSubsDelayedChart) + nsSubsMsgRateRedeliverChart = toNamespaceChart(sumSubsMsgRateRedeliverChart) + nsSubsBlockedOnUnackedMsgChart = toNamespaceChart(sumSubsBlockedOnUnackedMsgChart) + nsReplicationRateChart = toNamespaceChart(sumReplicationRateChart) + nsReplicationThroughputChart = toNamespaceChart(sumReplicationThroughputRateChart) + nsReplicationBacklogChart = toNamespaceChart(sumReplicationBacklogChart) + + topicProducersChart = Chart{ + ID: "topic_producers_namespace_%s", + Title: "Topic Producers", + Units: "producers", + Fam: "ns %s", + Ctx: "pulsar.topic_producers", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicSubscriptionsChart = Chart{ + ID: "topic_subscriptions_namespace_%s", + Title: "Topic Subscriptions", + Units: "subscriptions", + Fam: "ns %s", + Ctx: "pulsar.topic_subscriptions", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicConsumersChart = Chart{ + ID: "topic_consumers_namespace_%s", + Title: "Topic Consumers", + Units: "consumers", + Fam: "ns %s", + Ctx: "pulsar.topic_consumers", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicMessagesRateInChart = Chart{ + ID: "topic_messages_rate_in_namespace_%s", + Title: "Topic Publish Messages Rate", + Units: "publishes/s", + Fam: "ns %s", + Ctx: "pulsar.topic_messages_rate_in", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicMessagesRateOutChart = Chart{ + ID: "topic_messages_rate_out_namespace_%s", + Title: "Topic Dispatch Messages Rate", + Units: "dispatches/s", + Fam: "ns %s", + Ctx: "pulsar.topic_messages_rate_out", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicThroughputRateInChart = Chart{ + ID: "topic_throughput_rate_in_namespace_%s", + Title: "Topic Publish Throughput Rate", + Units: "KiB/s", + Fam: "ns %s", + Ctx: "pulsar.topic_throughput_rate_in", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicThroughputRateOutChart = Chart{ + ID: "topic_throughput_rate_out_namespace_%s", + Title: "Topic Dispatch Throughput Rate", + Units: "KiB/s", + Fam: "ns %s", + Ctx: "pulsar.topic_throughput_rate_out", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicStorageSizeChart = Chart{ + ID: "topic_storage_size_namespace_%s", + Title: "Topic Storage Size", + Units: "KiB", + Fam: "ns %s", + Ctx: "pulsar.topic_storage_size", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicStorageReadRateChart = Chart{ + ID: "topic_storage_read_rate_namespace_%s", + Title: "Topic Storage Read Rate", + Units: "message batches/s", + Fam: "ns %s", + Ctx: "pulsar.topic_storage_read_rate", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicStorageWriteRateChart = Chart{ + ID: "topic_storage_write_rate_namespace_%s", + Title: "Topic Storage Write Rate", + Units: "message batches/s", + Fam: "ns %s", + Ctx: "pulsar.topic_storage_write_rate", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicMsgBacklogSizeChart = Chart{ + ID: "topic_msg_backlog_namespace_%s", + Title: "Topic Messages Backlog Size", + Units: "messages", + Fam: "ns %s", + Ctx: "pulsar.topic_msg_backlog", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicSubsDelayedChart = Chart{ + ID: "topic_subscription_delayed_namespace_%s", + Title: "Topic Subscriptions Delayed for Dispatching", + Units: "message batches", + Fam: "ns %s", + Ctx: "pulsar.topic_subscription_delayed", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicSubsMsgRateRedeliverChart = Chart{ + ID: "topic_subscription_msg_rate_redeliver_namespace_%s", + Title: "Topic Subscriptions Redelivered Message Rate", + Units: "messages/s", + Fam: "ns %s", + Ctx: "pulsar.topic_subscription_msg_rate_redeliver", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicSubsBlockedOnUnackedMsgChart = Chart{ + ID: "topic_subscription_blocked_on_unacked_messages_namespace_%s", + Title: "Topic Subscriptions Blocked On Unacked Messages", + Units: "blocked subscriptions", + Fam: "ns %s", + Ctx: "pulsar.topic_subscription_blocked_on_unacked_messages", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicReplicationRateInChart = Chart{ + ID: "topic_replication_rate_in_namespace_%s", + Title: "Topic Replication Rate From Remote Cluster", + Units: "messages/s", + Fam: "ns %s", + Ctx: "pulsar.topic_replication_rate_in", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicReplicationRateOutChart = Chart{ + ID: "replication_rate_out_namespace_%s", + Title: "Topic Replication Rate To Remote Cluster", + Units: "messages/s", + Fam: "ns %s", + Ctx: "pulsar.topic_replication_rate_out", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicReplicationThroughputRateInChart = Chart{ + ID: "topic_replication_throughput_rate_in_namespace_%s", + Title: "Topic Replication Throughput Rate From Remote Cluster", + Units: "KiB/s", + Fam: "ns %s", + Ctx: "pulsar.topic_replication_throughput_rate_in", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicReplicationThroughputRateOutChart = Chart{ + ID: "topic_replication_throughput_rate_out_namespace_%s", + Title: "Topic Replication Throughput Rate To Remote Cluster", + Units: "KiB/s", + Fam: "ns %s", + Ctx: "pulsar.topic_replication_throughput_rate_out", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } + topicReplicationBacklogChart = Chart{ + ID: "topic_replication_backlog_namespace_%s", + Title: "Topic Replication Backlog", + Units: "messages", + Fam: "ns %s", + Ctx: "pulsar.topic_replication_backlog", + Type: module.Stacked, + Opts: Opts{StoreFirst: true}, + } +) + +func (p *Pulsar) adjustCharts(pms prometheus.Series) { + if pms := pms.FindByName(metricPulsarStorageReadRate); pms.Len() == 0 || pms[0].Labels.Get("namespace") == "" { + p.removeSummaryChart(sumStorageOperationsRateChart.ID) + p.removeNamespaceChart(nsStorageOperationsChart.ID) + p.removeNamespaceChart(topicStorageReadRateChart.ID) + p.removeNamespaceChart(topicStorageWriteRateChart.ID) + delete(p.topicChartsMapping, topicStorageReadRateChart.ID) + delete(p.topicChartsMapping, topicStorageWriteRateChart.ID) + } + if pms.FindByName(metricPulsarSubscriptionMsgRateRedeliver).Len() == 0 { + p.removeSummaryChart(sumSubsMsgRateRedeliverChart.ID) + p.removeSummaryChart(sumSubsBlockedOnUnackedMsgChart.ID) + p.removeNamespaceChart(nsSubsMsgRateRedeliverChart.ID) + p.removeNamespaceChart(nsSubsBlockedOnUnackedMsgChart.ID) + p.removeNamespaceChart(topicSubsMsgRateRedeliverChart.ID) + p.removeNamespaceChart(topicSubsBlockedOnUnackedMsgChart.ID) + delete(p.topicChartsMapping, topicSubsMsgRateRedeliverChart.ID) + delete(p.topicChartsMapping, topicSubsBlockedOnUnackedMsgChart.ID) + } + if pms.FindByName(metricPulsarReplicationBacklog).Len() == 0 { + p.removeSummaryChart(sumReplicationRateChart.ID) + p.removeSummaryChart(sumReplicationThroughputRateChart.ID) + p.removeSummaryChart(sumReplicationBacklogChart.ID) + p.removeNamespaceChart(nsReplicationRateChart.ID) + p.removeNamespaceChart(nsReplicationThroughputChart.ID) + p.removeNamespaceChart(nsReplicationBacklogChart.ID) + p.removeNamespaceChart(topicReplicationRateInChart.ID) + p.removeNamespaceChart(topicReplicationRateOutChart.ID) + p.removeNamespaceChart(topicReplicationThroughputRateInChart.ID) + p.removeNamespaceChart(topicReplicationThroughputRateOutChart.ID) + p.removeNamespaceChart(topicReplicationBacklogChart.ID) + delete(p.topicChartsMapping, topicReplicationRateInChart.ID) + delete(p.topicChartsMapping, topicReplicationRateOutChart.ID) + delete(p.topicChartsMapping, topicReplicationThroughputRateInChart.ID) + delete(p.topicChartsMapping, topicReplicationThroughputRateOutChart.ID) + delete(p.topicChartsMapping, topicReplicationBacklogChart.ID) + } +} + +func (p *Pulsar) removeSummaryChart(chartID string) { + if err := p.Charts().Remove(chartID); err != nil { + p.Warning(err) + } +} + +func (p *Pulsar) removeNamespaceChart(chartID string) { + if err := p.nsCharts.Remove(chartID); err != nil { + p.Warning(err) + } +} + +func (p *Pulsar) updateCharts() { + // NOTE: order is important + for ns := range p.curCache.namespaces { + if !p.cache.namespaces[ns] { + p.cache.namespaces[ns] = true + p.addNamespaceCharts(ns) + } + } + for top := range p.curCache.topics { + if !p.cache.topics[top] { + p.cache.topics[top] = true + p.addTopicToCharts(top) + } + } + for top := range p.cache.topics { + if p.curCache.topics[top] { + continue + } + delete(p.cache.topics, top) + p.removeTopicFromCharts(top) + } + for ns := range p.cache.namespaces { + if p.curCache.namespaces[ns] { + continue + } + delete(p.cache.namespaces, ns) + p.removeNamespaceFromCharts(ns) + } +} + +func (p *Pulsar) addNamespaceCharts(ns namespace) { + charts := p.nsCharts.Copy() + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, ns.name) + chart.Fam = fmt.Sprintf(chart.Fam, ns.name) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, ns.name) + } + } + if err := p.Charts().Add(*charts...); err != nil { + p.Warning(err) + } +} + +func (p *Pulsar) removeNamespaceFromCharts(ns namespace) { + for _, chart := range *p.nsCharts { + id := fmt.Sprintf(chart.ID, ns.name) + if chart = p.Charts().Get(id); chart != nil { + chart.MarkRemove() + } else { + p.Warningf("could not remove namespace chart '%s'", id) + } + } +} + +func (p *Pulsar) addTopicToCharts(top topic) { + for id, metric := range p.topicChartsMapping { + id = fmt.Sprintf(id, top.namespace) + chart := p.Charts().Get(id) + if chart == nil { + p.Warningf("could not add topic '%s' to chart '%s': chart not found", top.name, id) + continue + } + + dim := Dim{ID: metric + "_" + top.name, Name: extractTopicName(top)} + switch metric { + case metricPulsarThroughputIn, + metricPulsarThroughputOut, + metricPulsarReplicationThroughputIn, + metricPulsarReplicationThroughputOut: + dim.Div = 1024 * 1000 + case metricPulsarRateIn, + metricPulsarRateOut, + metricPulsarStorageWriteRate, + metricPulsarStorageReadRate, + metricPulsarSubscriptionMsgRateRedeliver, + metricPulsarReplicationRateIn, + metricPulsarReplicationRateOut: + dim.Div = 1000 + case metricPulsarStorageSize: + dim.Div = 1024 + } + + if err := chart.AddDim(&dim); err != nil { + p.Warning(err) + } + chart.MarkNotCreated() + } +} + +func (p *Pulsar) removeTopicFromCharts(top topic) { + for id, metric := range p.topicChartsMapping { + id = fmt.Sprintf(id, top.namespace) + chart := p.Charts().Get(id) + if chart == nil { + p.Warningf("could not remove topic '%s' from chart '%s': chart not found", top.name, id) + continue + } + + if err := chart.MarkDimRemove(metric+"_"+top.name, true); err != nil { + p.Warning(err) + } + chart.MarkNotCreated() + } +} + +func topicChartsMapping() map[string]string { + return map[string]string{ + topicSubscriptionsChart.ID: metricPulsarSubscriptionsCount, + topicProducersChart.ID: metricPulsarProducersCount, + topicConsumersChart.ID: metricPulsarConsumersCount, + topicMessagesRateInChart.ID: metricPulsarRateIn, + topicMessagesRateOutChart.ID: metricPulsarRateOut, + topicThroughputRateInChart.ID: metricPulsarThroughputIn, + topicThroughputRateOutChart.ID: metricPulsarThroughputOut, + topicStorageSizeChart.ID: metricPulsarStorageSize, + topicStorageReadRateChart.ID: metricPulsarStorageReadRate, + topicStorageWriteRateChart.ID: metricPulsarStorageWriteRate, + topicMsgBacklogSizeChart.ID: metricPulsarMsgBacklog, + topicSubsDelayedChart.ID: metricPulsarSubscriptionDelayed, + topicSubsMsgRateRedeliverChart.ID: metricPulsarSubscriptionMsgRateRedeliver, + topicSubsBlockedOnUnackedMsgChart.ID: metricPulsarSubscriptionBlockedOnUnackedMessages, + topicReplicationRateInChart.ID: metricPulsarReplicationRateIn, + topicReplicationRateOutChart.ID: metricPulsarReplicationRateOut, + topicReplicationThroughputRateInChart.ID: metricPulsarReplicationThroughputIn, + topicReplicationThroughputRateOutChart.ID: metricPulsarReplicationThroughputOut, + topicReplicationBacklogChart.ID: metricPulsarReplicationBacklog, + } +} + +func extractTopicName(top topic) string { + // persistent://sample/ns1/demo-1 => p:demo-1 + if idx := strings.LastIndexByte(top.name, '/'); idx > 0 { + return top.name[:1] + ":" + top.name[idx+1:] + } + return top.name +} diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/collect.go b/src/go/collectors/go.d.plugin/modules/pulsar/collect.go new file mode 100644 index 00000000000000..6c0b93ce00e8f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/collect.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + "errors" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func isValidPulsarMetrics(pms prometheus.Series) bool { + return pms.FindByName(metricPulsarTopicsCount).Len() > 0 +} + +func (p *Pulsar) resetCurCache() { + for ns := range p.curCache.namespaces { + delete(p.curCache.namespaces, ns) + } + for top := range p.curCache.topics { + delete(p.curCache.topics, top) + } +} + +func (p *Pulsar) collect() (map[string]int64, error) { + pms, err := p.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if !isValidPulsarMetrics(pms) { + return nil, errors.New("returned metrics aren't Apache Pulsar metrics") + } + + p.once.Do(func() { + p.adjustCharts(pms) + }) + + mx := p.collectMetrics(pms) + p.updateCharts() + p.resetCurCache() + + return stm.ToMap(mx), nil +} + +func (p *Pulsar) collectMetrics(pms prometheus.Series) map[string]float64 { + mx := make(map[string]float64) + p.collectBroker(mx, pms) + return mx +} + +func (p *Pulsar) collectBroker(mx map[string]float64, pms prometheus.Series) { + pms = findPulsarMetrics(pms) + for _, pm := range pms { + ns, top := newNamespace(pm), newTopic(pm) + if ns.name == "" { + continue + } + + p.curCache.namespaces[ns] = true + + value := pm.Value * precision(pm.Name()) + mx[pm.Name()] += value + mx[pm.Name()+"_"+ns.name] += value + + if top.name == "" || !p.topicFilter.MatchString(top.name) { + continue + } + + p.curCache.topics[top] = true + mx[pm.Name()+"_"+top.name] += value + } + mx["pulsar_namespaces_count"] = float64(len(p.curCache.namespaces)) +} + +func newNamespace(pm prometheus.SeriesSample) namespace { + return namespace{ + name: pm.Labels.Get("namespace"), + } +} + +func newTopic(pm prometheus.SeriesSample) topic { + return topic{ + namespace: pm.Labels.Get("namespace"), + name: pm.Labels.Get("topic"), + } +} + +func findPulsarMetrics(pms prometheus.Series) prometheus.Series { + var ms prometheus.Series + for _, pm := range pms { + if isPulsarHistogram(pm) { + ms = append(ms, pm) + } + } + pms = pms.FindByNames( + metricPulsarTopicsCount, + metricPulsarSubscriptionDelayed, + metricPulsarSubscriptionsCount, + metricPulsarProducersCount, + metricPulsarConsumersCount, + metricPulsarRateIn, + metricPulsarRateOut, + metricPulsarThroughputIn, + metricPulsarThroughputOut, + metricPulsarStorageSize, + metricPulsarStorageWriteRate, + metricPulsarStorageReadRate, + metricPulsarMsgBacklog, + metricPulsarSubscriptionMsgRateRedeliver, + metricPulsarSubscriptionBlockedOnUnackedMessages, + ) + return append(ms, pms...) +} + +func isPulsarHistogram(pm prometheus.SeriesSample) bool { + s := pm.Name() + return strings.HasPrefix(s, "pulsar_storage_write_latency") || strings.HasPrefix(s, "pulsar_entry_size") +} + +func precision(metric string) float64 { + switch metric { + case metricPulsarRateIn, + metricPulsarRateOut, + metricPulsarThroughputIn, + metricPulsarThroughputOut, + metricPulsarStorageWriteRate, + metricPulsarStorageReadRate, + metricPulsarSubscriptionMsgRateRedeliver, + metricPulsarReplicationRateIn, + metricPulsarReplicationRateOut, + metricPulsarReplicationThroughputIn, + metricPulsarReplicationThroughputOut: + return 1000 + } + return 1 +} diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/config_schema.json b/src/go/collectors/go.d.plugin/modules/pulsar/config_schema.json new file mode 100644 index 00000000000000..083eb0b9818f29 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/config_schema.json @@ -0,0 +1,76 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pulsar job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "topic_filter": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/integrations/apache_pulsar.md b/src/go/collectors/go.d.plugin/modules/pulsar/integrations/apache_pulsar.md new file mode 100644 index 00000000000000..1a16e8cdb9e706 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/integrations/apache_pulsar.md @@ -0,0 +1,279 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/pulsar/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/pulsar/metadata.yaml" +sidebar_label: "Apache Pulsar" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Apache Pulsar + + +<img src="https://netdata.cloud/img/pulsar.svg" width="150"/> + + +Plugin: go.d.plugin +Module: pulsar + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Pulsar servers. + + +It collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects Pulsar instances running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true. +- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true. +- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true. + + +### Per Apache Pulsar instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components | +| pulsar.messages_rate | publish, dispatch | messages/s | +| pulsar.throughput_rate | publish, dispatch | KiB/s | +| pulsar.storage_size | used | KiB | +| pulsar.storage_operations_rate | read, write | message batches/s | +| pulsar.msg_backlog | backlog | messages | +| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s | +| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s | +| pulsar.subscription_delayed | delayed | message batches | +| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s | +| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions | +| pulsar.replication_rate | in, out | messages/s | +| pulsar.replication_throughput_rate | in, out | KiB/s | +| pulsar.replication_backlog | backlog | messages | + +### Per namespace + +TBD + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components | +| pulsar.namespace_messages_rate | publish, dispatch | messages/s | +| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s | +| pulsar.namespace_storage_size | used | KiB | +| pulsar.namespace_storage_operations_rate | read, write | message batches/s | +| pulsar.namespace_msg_backlog | backlog | messages | +| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s | +| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s | +| pulsar.namespace_subscription_delayed | delayed | message batches | +| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s | +| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions | +| pulsar.namespace_replication_rate | in, out | messages/s | +| pulsar.namespace_replication_throughput_rate | in, out | KiB/s | +| pulsar.namespace_replication_backlog | backlog | messages | +| pulsar.topic_producers | a dimension per topic | producers | +| pulsar.topic_subscriptions | a dimension per topic | subscriptions | +| pulsar.topic_consumers | a dimension per topic | consumers | +| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s | +| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s | +| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s | +| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s | +| pulsar.topic_storage_size | a dimension per topic | KiB | +| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s | +| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s | +| pulsar.topic_msg_backlog | a dimension per topic | messages | +| pulsar.topic_subscription_delayed | a dimension per topic | message batches | +| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s | +| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions | +| pulsar.topic_replication_rate_in | a dimension per topic | messages/s | +| pulsar.topic_replication_rate_out | a dimension per topic | messages/s | +| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s | +| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s | +| pulsar.topic_replication_backlog | a dimension per topic | messages | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/pulsar.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/pulsar.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8080/metrics | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8080/metrics + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/metrics + + - name: remote + url: http://192.0.2.1:8080/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m pulsar + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/metadata.yaml b/src/go/collectors/go.d.plugin/modules/pulsar/metadata.yaml new file mode 100644 index 00000000000000..f21389fd2e4d68 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/metadata.yaml @@ -0,0 +1,519 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-pulsar + plugin_name: go.d.plugin + module_name: pulsar + monitored_instance: + name: Apache Pulsar + link: https://pulsar.apache.org/ + icon_filename: pulsar.svg + categories: + - data-collection.message-brokers + keywords: + - pulsar + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Pulsar servers. + method_description: | + It collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats). + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects Pulsar instances running on localhost. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/pulsar.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8080/metrics + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/metrics + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/metrics + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:8080/metrics + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/metrics + + - name: remote + url: http://192.0.2.1:8080/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: | + - topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true. + - subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true. + - replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true. + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: pulsar.broker_components + description: Broker Components + unit: components + chart_type: line + dimensions: + - name: namespaces + - name: topics + - name: subscriptions + - name: producers + - name: consumers + - name: pulsar.messages_rate + description: Messages Rate + unit: messages/s + chart_type: line + dimensions: + - name: publish + - name: dispatch + - name: pulsar.throughput_rate + description: Throughput Rate + unit: KiB/s + chart_type: area + dimensions: + - name: publish + - name: dispatch + - name: pulsar.storage_size + description: Storage Size + unit: KiB + chart_type: line + dimensions: + - name: used + - name: pulsar.storage_operations_rate + description: Storage Read/Write Operations Rate + unit: message batches/s + chart_type: area + dimensions: + - name: read + - name: write + - name: pulsar.msg_backlog + description: Messages Backlog Size + unit: messages + chart_type: line + dimensions: + - name: backlog + - name: pulsar.storage_write_latency + description: Storage Write Latency + unit: entries/s + chart_type: stacked + dimensions: + - name: <=0.5ms + - name: <=1ms + - name: <=5ms + - name: =10ms + - name: <=20ms + - name: <=50ms + - name: <=100ms + - name: <=200ms + - name: <=1s + - name: '>1s' + - name: pulsar.entry_size + description: Entry Size + unit: entries/s + chart_type: stacked + dimensions: + - name: <=128B + - name: <=512B + - name: <=1KB + - name: <=2KB + - name: <=4KB + - name: <=16KB + - name: <=100KB + - name: <=1MB + - name: '>1MB' + - name: pulsar.subscription_delayed + description: Subscriptions Delayed for Dispatching + unit: message batches + chart_type: line + dimensions: + - name: delayed + - name: pulsar.subscription_msg_rate_redeliver + description: Subscriptions Redelivered Message Rate + unit: messages/s + chart_type: line + dimensions: + - name: redelivered + - name: pulsar.subscription_blocked_on_unacked_messages + description: Subscriptions Blocked On Unacked Messages + unit: subscriptions + chart_type: line + dimensions: + - name: blocked + - name: pulsar.replication_rate + description: Replication Rate + unit: messages/s + chart_type: line + dimensions: + - name: in + - name: out + - name: pulsar.replication_throughput_rate + description: Replication Throughput Rate + unit: KiB/s + chart_type: line + dimensions: + - name: in + - name: out + - name: pulsar.replication_backlog + description: Replication Backlog + unit: messages + chart_type: line + dimensions: + - name: backlog + - name: namespace + description: TBD + labels: [] + metrics: + - name: pulsar.namespace_broker_components + description: Broker Components + unit: components + chart_type: line + dimensions: + - name: topics + - name: subscriptions + - name: producers + - name: consumers + - name: pulsar.namespace_messages_rate + description: Messages Rate + unit: messages/s + chart_type: line + dimensions: + - name: publish + - name: dispatch + - name: pulsar.namespace_throughput_rate + description: Throughput Rate + unit: KiB/s + chart_type: area + dimensions: + - name: publish + - name: dispatch + - name: pulsar.namespace_storage_size + description: Storage Size + unit: KiB + chart_type: line + dimensions: + - name: used + - name: pulsar.namespace_storage_operations_rate + description: Storage Read/Write Operations Rate + unit: message batches/s + chart_type: area + dimensions: + - name: read + - name: write + - name: pulsar.namespace_msg_backlog + description: Messages Backlog Size + unit: messages + chart_type: line + dimensions: + - name: backlog + - name: pulsar.namespace_storage_write_latency + description: Storage Write Latency + unit: entries/s + chart_type: stacked + dimensions: + - name: <=0.5ms + - name: <=1ms + - name: <=5ms + - name: =10ms + - name: <=20ms + - name: <=50ms + - name: <=100ms + - name: <=200ms + - name: <=1s + - name: '>1s' + - name: pulsar.namespace_entry_size + description: Entry Size + unit: entries/s + chart_type: stacked + dimensions: + - name: <=128B + - name: <=512B + - name: <=1KB + - name: <=2KB + - name: <=4KB + - name: <=16KB + - name: <=100KB + - name: <=1MB + - name: '>1MB' + - name: pulsar.namespace_subscription_delayed + description: Subscriptions Delayed for Dispatching + unit: message batches + chart_type: line + dimensions: + - name: delayed + - name: pulsar.namespace_subscription_msg_rate_redeliver + description: Subscriptions Redelivered Message Rate + unit: messages/s + chart_type: line + dimensions: + - name: redelivered + - name: pulsar.namespace_subscription_blocked_on_unacked_messages + description: Subscriptions Blocked On Unacked Messages + unit: subscriptions + chart_type: line + dimensions: + - name: blocked + - name: pulsar.namespace_replication_rate + description: Replication Rate + unit: messages/s + chart_type: line + dimensions: + - name: in + - name: out + - name: pulsar.namespace_replication_throughput_rate + description: Replication Throughput Rate + unit: KiB/s + chart_type: line + dimensions: + - name: in + - name: out + - name: pulsar.namespace_replication_backlog + description: Replication Backlog + unit: messages + chart_type: line + dimensions: + - name: backlog + - name: pulsar.topic_producers + description: Topic Producers + unit: producers + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_subscriptions + description: Topic Subscriptions + unit: subscriptions + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_consumers + description: Topic Consumers + unit: consumers + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_messages_rate_in + description: Topic Publish Messages Rate + unit: publishes/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_messages_rate_out + description: Topic Dispatch Messages Rate + unit: dispatches/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_throughput_rate_in + description: Topic Publish Throughput Rate + unit: KiB/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_throughput_rate_out + description: Topic Dispatch Throughput Rate + unit: KiB/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_storage_size + description: Topic Storage Size + unit: KiB + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_storage_read_rate + description: Topic Storage Read Rate + unit: message batches/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_storage_write_rate + description: Topic Storage Write Rate + unit: message batches/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_msg_backlog + description: Topic Messages Backlog Size + unit: messages + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_subscription_delayed + description: Topic Subscriptions Delayed for Dispatching + unit: message batches + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_subscription_msg_rate_redeliver + description: Topic Subscriptions Redelivered Message Rate + unit: messages/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_subscription_blocked_on_unacked_messages + description: Topic Subscriptions Blocked On Unacked Messages + unit: blocked subscriptions + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_replication_rate_in + description: Topic Replication Rate From Remote Cluster + unit: messages/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_replication_rate_out + description: Topic Replication Rate To Remote Cluster + unit: messages/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_replication_throughput_rate_in + description: Topic Replication Throughput Rate From Remote Cluster + unit: messages/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_replication_throughput_rate_out + description: Topic Replication Throughput Rate To Remote Cluster + unit: messages/s + chart_type: stacked + dimensions: + - name: a dimension per topic + - name: pulsar.topic_replication_backlog + description: Topic Replication Backlog + unit: messages + chart_type: stacked + dimensions: + - name: a dimension per topic diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/metrics.go b/src/go/collectors/go.d.plugin/modules/pulsar/metrics.go new file mode 100644 index 00000000000000..9e38e5b9ab6f2a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/metrics.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +/* +Architecture: + - https://pulsar.apache.org/docs/en/concepts-overview/ + +Terminology: + - https://pulsar.apache.org/docs/en/reference-terminology/ + +Deploy Monitoring: + - http://pulsar.apache.org/docs/en/deploy-monitoring/ + +Metrics Reference: + - https://github.com/apache/pulsar/blob/master/site2/docs/reference-metrics.md + +REST API + - http://pulsar.apache.org/admin-rest-api/?version=master + +Grafana Dashboards: + - https://github.com/apache/pulsar/tree/master/docker/grafana/dashboards + +Stats in the source code: + - https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/ + - https://github.com/apache/pulsar/tree/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus + +If !'exposeTopicLevelMetricsInPrometheus: + - https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java +else: + - https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java + +Metrics updates parameters: + - statsUpdateFrequencyInSecs=60 + - statsUpdateInitialDelayInSecs=60 + +Metrics Exposing: + - Namespace : 'exposeTopicLevelMetricsInPrometheus' is set to false. + - Replication : 'replicationMetricsEnabled' is enabled. + - Topic : 'exposeTopicLevelMetricsInPrometheus' is set to true. + - Subscription: 'exposeTopicLevelMetricsInPrometheus' is set to true + - Consumer : 'exposeTopicLevelMetricsInPrometheus' and 'exposeConsumerLevelMetricsInPrometheus' are set to true. + - Publisher : 'exposePublisherStats' is set to true. RESP API option. (/admin/v2/broker-stats/topics) +*/ + +/* +TODO: +Unused broker metrics: + - "pulsar_storage_backlog_size" : ?? is estimated total unconsumed or backlog size in bytes for the managed ledger, without accounting for replicas. + - "pulsar_storage_offloaded_size" : ?? is the size of all ledgers offloaded to 2nd tier storage. + - "pulsar_storage_backlog_quota_limit" : ?? is the total amount of the data in this topic that limit the backlog quota. + - "pulsar_in_bytes_total" : use "pulsar_throughput_in" for the same data. + - "pulsar_in_messages_total" : use "pulsar_rate_in" for the same data. + - "pulsar_subscription_unacked_messages" : negative values (https://github.com/apache/pulsar/issues/6510) + - "pulsar_subscription_back_log" : to detailed, we have summary per topic. Part of "pulsar_msg_backlog" (msgBacklog). + - "pulsar_subscription_msg_rate_out" : to detailed, we have summary per topic. Part of "pulsar_rate_out". + - "pulsar_subscription_msg_throughput_out": to detailed, we have summary per topic. Part of "pulsar_throughput_out". + + + All Consumer metrics (for each namespace, topic, subscription). + + JVM metrics. + + Zookeeper metrics. + + Bookkeeper metrics. + +Hardcoded update interval? (60) + - pulsar_storage_write_latency_le_* + - pulsar_entry_size_le_* +*/ + +/* +https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java +Zero metrics which always present (labels: cluster): + - "pulsar_topics_count" + - "pulsar_subscriptions_count" + - "pulsar_producers_count" + - "pulsar_consumers_count" + - "pulsar_rate_in" + - "pulsar_rate_out" + - "pulsar_throughput_in" + - "pulsar_throughput_out" + - "pulsar_storage_size" + - "pulsar_storage_write_rate" + - "pulsar_storage_read_rate" + - "pulsar_msg_backlog" +*/ + +const ( + // Namespace metrics (labels: namespace) + metricPulsarTopicsCount = "pulsar_topics_count" + // Namespace, Topic metrics (labels: namespace || namespace, topic) + metricPulsarSubscriptionsCount = "pulsar_subscriptions_count" + metricPulsarProducersCount = "pulsar_producers_count" + metricPulsarConsumersCount = "pulsar_consumers_count" + metricPulsarRateIn = "pulsar_rate_in" + metricPulsarRateOut = "pulsar_rate_out" + metricPulsarThroughputIn = "pulsar_throughput_in" + metricPulsarThroughputOut = "pulsar_throughput_out" + metricPulsarStorageSize = "pulsar_storage_size" + metricPulsarStorageWriteRate = "pulsar_storage_write_rate" // exposed with labels only if there is Bookie + metricPulsarStorageReadRate = "pulsar_storage_read_rate" // exposed with labels only if there is Bookie + metricPulsarMsgBacklog = "pulsar_msg_backlog" // has 'remote_cluster' label if no topic stats + // pulsar_storage_write_latency_le_* + // pulsar_entry_size_le_* + + // Subscriptions metrics (labels: namespace, topic, subscription) + metricPulsarSubscriptionDelayed = "pulsar_subscription_delayed" // Number of delayed messages currently being tracked + metricPulsarSubscriptionMsgRateRedeliver = "pulsar_subscription_msg_rate_redeliver" + metricPulsarSubscriptionBlockedOnUnackedMessages = "pulsar_subscription_blocked_on_unacked_messages" + + // Replication metrics (labels: namespace, remote_cluster || namespace, topic, remote_cluster) + // Exposed only when replication is enabled. + metricPulsarReplicationRateIn = "pulsar_replication_rate_in" + metricPulsarReplicationRateOut = "pulsar_replication_rate_out" + metricPulsarReplicationThroughputIn = "pulsar_replication_throughput_in" + metricPulsarReplicationThroughputOut = "pulsar_replication_throughput_out" + metricPulsarReplicationBacklog = "pulsar_replication_backlog" +) diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/pulsar.go b/src/go/collectors/go.d.plugin/modules/pulsar/pulsar.go new file mode 100644 index 00000000000000..8b0ce910195a81 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/pulsar.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + _ "embed" + "errors" + "sync" + "time" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("pulsar", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 60, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Pulsar { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + TopicFiler: matcher.SimpleExpr{ + Includes: nil, + Excludes: []string{"*"}, + }, + } + return &Pulsar{ + Config: config, + once: &sync.Once{}, + charts: summaryCharts.Copy(), + nsCharts: namespaceCharts.Copy(), + topicChartsMapping: topicChartsMapping(), + cache: newCache(), + curCache: newCache(), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + TopicFiler matcher.SimpleExpr `yaml:"topic_filter"` + } + + Pulsar struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + topicFilter matcher.Matcher + cache *cache + curCache *cache + once *sync.Once + charts *Charts + nsCharts *Charts + topicChartsMapping map[string]string + } + + namespace struct{ name string } + topic struct{ namespace, name string } + cache struct { + namespaces map[namespace]bool + topics map[topic]bool + } +) + +func newCache() *cache { + return &cache{ + namespaces: make(map[namespace]bool), + topics: make(map[topic]bool), + } +} + +func (p Pulsar) validateConfig() error { + if p.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (p *Pulsar) initClient() error { + client, err := web.NewHTTPClient(p.Client) + if err != nil { + return err + } + + p.prom = prometheus.New(client, p.Request) + return nil +} + +func (p *Pulsar) initTopicFiler() error { + if p.TopicFiler.Empty() { + p.topicFilter = matcher.TRUE() + return nil + } + + m, err := p.TopicFiler.Parse() + if err != nil { + return err + } + p.topicFilter = m + return nil +} + +func (p *Pulsar) Init() bool { + if err := p.validateConfig(); err != nil { + p.Errorf("config validation: %v", err) + return false + } + if err := p.initClient(); err != nil { + p.Errorf("client initializing: %v", err) + return false + } + if err := p.initTopicFiler(); err != nil { + p.Errorf("topic filer initialization: %v", err) + return false + } + return true +} + +func (p *Pulsar) Check() bool { + return len(p.Collect()) > 0 +} + +func (p *Pulsar) Charts() *Charts { + return p.charts +} + +func (p *Pulsar) Collect() map[string]int64 { + mx, err := p.collect() + if err != nil { + p.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (Pulsar) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/pulsar_test.go b/src/go/collectors/go.d.plugin/modules/pulsar/pulsar_test.go new file mode 100644 index 00000000000000..3bf9468b67b7d3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/pulsar_test.go @@ -0,0 +1,1015 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + metricsNonPulsar, _ = os.ReadFile("testdata/non-pulsar.txt") + metricsStdV250Namespaces, _ = os.ReadFile("testdata/standalone-v2.5.0-namespaces.txt") + metricsStdV250Topics, _ = os.ReadFile("testdata/standalone-v2.5.0-topics.txt") + metricsStdV250Topics2, _ = os.ReadFile("testdata/standalone-v2.5.0-topics-2.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, metricsNonPulsar) + assert.NotNil(t, metricsStdV250Namespaces) + assert.NotNil(t, metricsStdV250Topics) + assert.NotNil(t, metricsStdV250Topics2) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestPulsar_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "default": { + config: New().Config, + }, + "empty topic filter": { + config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metric"}}}, + }, + "bad syntax topic filer": { + config: Config{ + HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metrics"}}, + TopicFiler: matcher.SimpleExpr{Includes: []string{"+"}}}, + wantFail: true, + }, + "empty URL": { + config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + wantFail: true, + }, + "nonexistent TLS CA": { + config: Config{HTTP: web.HTTP{ + Request: web.Request{URL: "http://127.0.0.1:8080/metric"}, + Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}}, + wantFail: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pulsar := New() + pulsar.Config = test.config + + if test.wantFail { + assert.False(t, pulsar.Init()) + } else { + assert.True(t, pulsar.Init()) + } + }) + } +} + +func TestPulsar_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestPulsar_Check(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*Pulsar, *httptest.Server) + wantFail bool + }{ + "standalone v2.5.0 namespaces": {prepare: prepareClientServerStdV250Namespaces}, + "standalone v2.5.0 topics": {prepare: prepareClientServerStdV250Topics}, + "non pulsar": {prepare: prepareClientServerNonPulsar, wantFail: true}, + "invalid data": {prepare: prepareClientServerInvalidData, wantFail: true}, + "404": {prepare: prepareClientServer404, wantFail: true}, + "connection refused": {prepare: prepareClientServerConnectionRefused, wantFail: true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pulsar, srv := test.prepare(t) + defer srv.Close() + + if test.wantFail { + assert.False(t, pulsar.Check()) + } else { + assert.True(t, pulsar.Check()) + } + }) + } +} + +func TestPulsar_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) + +} + +func TestPulsar_Collect_ReturnsNilOnErrors(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*Pulsar, *httptest.Server) + }{ + "non pulsar": {prepare: prepareClientServerNonPulsar}, + "invalid data": {prepare: prepareClientServerInvalidData}, + "404": {prepare: prepareClientServer404}, + "connection refused": {prepare: prepareClientServerConnectionRefused}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pulsar, srv := test.prepare(t) + defer srv.Close() + + assert.Nil(t, pulsar.Collect()) + }) + } +} + +func TestPulsar_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(*testing.T) (*Pulsar, *httptest.Server) + expected map[string]int64 + }{ + "standalone v2.5.0 namespaces": { + prepare: prepareClientServerStdV250Namespaces, + expected: expectedStandaloneV250Namespaces, + }, + "standalone v2.5.0 topics": { + prepare: prepareClientServerStdV250Topics, + expected: expectedStandaloneV250Topics, + }, + "standalone v2.5.0 topics filtered": { + prepare: prepareClientServerStdV250TopicsFiltered, + expected: expectedStandaloneV250TopicsFiltered, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + pulsar, srv := test.prepare(t) + defer srv.Close() + + for i := 0; i < 10; i++ { + _ = pulsar.Collect() + } + collected := pulsar.Collect() + + require.NotNil(t, collected) + require.Equal(t, test.expected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected) + }) + } +} + +func TestPulsar_Collect_RemoveAddNamespacesTopicsInRuntime(t *testing.T) { + pulsar, srv := prepareClientServersDynamicStdV250Topics(t) + defer srv.Close() + + oldNsCharts := Charts{} + + require.NotNil(t, pulsar.Collect()) + oldLength := len(*pulsar.Charts()) + + for _, chart := range *pulsar.Charts() { + for ns := range pulsar.cache.namespaces { + if ns.name != "public/functions" && chart.Fam == "ns "+ns.name { + _ = oldNsCharts.Add(chart) + } + } + } + + require.NotNil(t, pulsar.Collect()) + + l := oldLength + len(*pulsar.nsCharts)*2 // 2 new namespaces + assert.Truef(t, len(*pulsar.Charts()) == l, "expected %d charts, but got %d", l, len(*pulsar.Charts())) + + for _, chart := range oldNsCharts { + assert.Truef(t, chart.Obsolete, "expected chart '%s' Obsolete flag is set", chart.ID) + for _, dim := range chart.Dims { + if strings.HasPrefix(chart.ID, "topic_") { + assert.Truef(t, dim.Obsolete, "expected chart '%s' dim '%s' Obsolete flag is set", chart.ID, dim.ID) + } + } + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pulsar *Pulsar, collected map[string]int64) { + for _, chart := range *pulsar.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsStdV250Namespaces) + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServerStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsStdV250Topics) + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServerStdV250TopicsFiltered(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + pulsar, srv := prepareClientServerStdV250Topics(t) + pulsar.topicFilter = matcher.FALSE() + + return pulsar, srv +} + +func prepareClientServersDynamicStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + var i int + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if i%2 == 0 { + _, _ = w.Write(metricsStdV250Topics) + } else { + _, _ = w.Write(metricsStdV250Topics2) + } + i++ + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServerNonPulsar(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsNonPulsar) + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServerInvalidData(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServer404(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + pulsar := New() + pulsar.URL = srv.URL + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +func prepareClientServerConnectionRefused(t *testing.T) (*Pulsar, *httptest.Server) { + t.Helper() + srv := httptest.NewServer(nil) + + pulsar := New() + pulsar.URL = "http://127.0.0.1:38001/metrics" + require.True(t, pulsar.Init()) + + return pulsar, srv +} + +var expectedStandaloneV250Namespaces = map[string]int64{ + "pulsar_consumers_count": 21, + "pulsar_consumers_count_public/functions": 3, + "pulsar_consumers_count_sample/dev": 10, + "pulsar_consumers_count_sample/prod": 8, + "pulsar_entry_size_count": 6013, + "pulsar_entry_size_count_public/functions": 0, + "pulsar_entry_size_count_sample/dev": 3012, + "pulsar_entry_size_count_sample/prod": 3001, + "pulsar_entry_size_le_100_kb": 0, + "pulsar_entry_size_le_100_kb_public/functions": 0, + "pulsar_entry_size_le_100_kb_sample/dev": 0, + "pulsar_entry_size_le_100_kb_sample/prod": 0, + "pulsar_entry_size_le_128": 6013, + "pulsar_entry_size_le_128_public/functions": 0, + "pulsar_entry_size_le_128_sample/dev": 3012, + "pulsar_entry_size_le_128_sample/prod": 3001, + "pulsar_entry_size_le_16_kb": 0, + "pulsar_entry_size_le_16_kb_public/functions": 0, + "pulsar_entry_size_le_16_kb_sample/dev": 0, + "pulsar_entry_size_le_16_kb_sample/prod": 0, + "pulsar_entry_size_le_1_kb": 0, + "pulsar_entry_size_le_1_kb_public/functions": 0, + "pulsar_entry_size_le_1_kb_sample/dev": 0, + "pulsar_entry_size_le_1_kb_sample/prod": 0, + "pulsar_entry_size_le_1_mb": 0, + "pulsar_entry_size_le_1_mb_public/functions": 0, + "pulsar_entry_size_le_1_mb_sample/dev": 0, + "pulsar_entry_size_le_1_mb_sample/prod": 0, + "pulsar_entry_size_le_2_kb": 0, + "pulsar_entry_size_le_2_kb_public/functions": 0, + "pulsar_entry_size_le_2_kb_sample/dev": 0, + "pulsar_entry_size_le_2_kb_sample/prod": 0, + "pulsar_entry_size_le_4_kb": 0, + "pulsar_entry_size_le_4_kb_public/functions": 0, + "pulsar_entry_size_le_4_kb_sample/dev": 0, + "pulsar_entry_size_le_4_kb_sample/prod": 0, + "pulsar_entry_size_le_512": 0, + "pulsar_entry_size_le_512_public/functions": 0, + "pulsar_entry_size_le_512_sample/dev": 0, + "pulsar_entry_size_le_512_sample/prod": 0, + "pulsar_entry_size_le_overflow": 0, + "pulsar_entry_size_le_overflow_public/functions": 0, + "pulsar_entry_size_le_overflow_sample/dev": 0, + "pulsar_entry_size_le_overflow_sample/prod": 0, + "pulsar_entry_size_sum": 6013, + "pulsar_entry_size_sum_public/functions": 0, + "pulsar_entry_size_sum_sample/dev": 3012, + "pulsar_entry_size_sum_sample/prod": 3001, + "pulsar_msg_backlog": 8, + "pulsar_msg_backlog_public/functions": 0, + "pulsar_msg_backlog_sample/dev": 8, + "pulsar_msg_backlog_sample/prod": 0, + "pulsar_namespaces_count": 3, + "pulsar_producers_count": 10, + "pulsar_producers_count_public/functions": 2, + "pulsar_producers_count_sample/dev": 4, + "pulsar_producers_count_sample/prod": 4, + "pulsar_rate_in": 96023, + "pulsar_rate_in_public/functions": 0, + "pulsar_rate_in_sample/dev": 48004, + "pulsar_rate_in_sample/prod": 48019, + "pulsar_rate_out": 242057, + "pulsar_rate_out_public/functions": 0, + "pulsar_rate_out_sample/dev": 146018, + "pulsar_rate_out_sample/prod": 96039, + "pulsar_storage_read_rate": 0, + "pulsar_storage_read_rate_public/functions": 0, + "pulsar_storage_read_rate_sample/dev": 0, + "pulsar_storage_read_rate_sample/prod": 0, + "pulsar_storage_size": 5468424, + "pulsar_storage_size_public/functions": 0, + "pulsar_storage_size_sample/dev": 2684208, + "pulsar_storage_size_sample/prod": 2784216, + "pulsar_storage_write_latency_count": 6012, + "pulsar_storage_write_latency_count_public/functions": 0, + "pulsar_storage_write_latency_count_sample/dev": 3012, + "pulsar_storage_write_latency_count_sample/prod": 3000, + "pulsar_storage_write_latency_le_0_5": 0, + "pulsar_storage_write_latency_le_0_5_public/functions": 0, + "pulsar_storage_write_latency_le_0_5_sample/dev": 0, + "pulsar_storage_write_latency_le_0_5_sample/prod": 0, + "pulsar_storage_write_latency_le_1": 43, + "pulsar_storage_write_latency_le_10": 163, + "pulsar_storage_write_latency_le_100": 0, + "pulsar_storage_write_latency_le_1000": 0, + "pulsar_storage_write_latency_le_1000_public/functions": 0, + "pulsar_storage_write_latency_le_1000_sample/dev": 0, + "pulsar_storage_write_latency_le_1000_sample/prod": 0, + "pulsar_storage_write_latency_le_100_public/functions": 0, + "pulsar_storage_write_latency_le_100_sample/dev": 0, + "pulsar_storage_write_latency_le_100_sample/prod": 0, + "pulsar_storage_write_latency_le_10_public/functions": 0, + "pulsar_storage_write_latency_le_10_sample/dev": 82, + "pulsar_storage_write_latency_le_10_sample/prod": 81, + "pulsar_storage_write_latency_le_1_public/functions": 0, + "pulsar_storage_write_latency_le_1_sample/dev": 23, + "pulsar_storage_write_latency_le_1_sample/prod": 20, + "pulsar_storage_write_latency_le_20": 7, + "pulsar_storage_write_latency_le_200": 2, + "pulsar_storage_write_latency_le_200_public/functions": 0, + "pulsar_storage_write_latency_le_200_sample/dev": 1, + "pulsar_storage_write_latency_le_200_sample/prod": 1, + "pulsar_storage_write_latency_le_20_public/functions": 0, + "pulsar_storage_write_latency_le_20_sample/dev": 6, + "pulsar_storage_write_latency_le_20_sample/prod": 1, + "pulsar_storage_write_latency_le_5": 5797, + "pulsar_storage_write_latency_le_50": 0, + "pulsar_storage_write_latency_le_50_public/functions": 0, + "pulsar_storage_write_latency_le_50_sample/dev": 0, + "pulsar_storage_write_latency_le_50_sample/prod": 0, + "pulsar_storage_write_latency_le_5_public/functions": 0, + "pulsar_storage_write_latency_le_5_sample/dev": 2900, + "pulsar_storage_write_latency_le_5_sample/prod": 2897, + "pulsar_storage_write_latency_overflow": 0, + "pulsar_storage_write_latency_overflow_public/functions": 0, + "pulsar_storage_write_latency_overflow_sample/dev": 0, + "pulsar_storage_write_latency_overflow_sample/prod": 0, + "pulsar_storage_write_latency_sum": 6012, + "pulsar_storage_write_latency_sum_public/functions": 0, + "pulsar_storage_write_latency_sum_sample/dev": 3012, + "pulsar_storage_write_latency_sum_sample/prod": 3000, + "pulsar_storage_write_rate": 100216, + "pulsar_storage_write_rate_public/functions": 0, + "pulsar_storage_write_rate_sample/dev": 50200, + "pulsar_storage_write_rate_sample/prod": 50016, + "pulsar_subscription_delayed": 0, + "pulsar_subscription_delayed_public/functions": 0, + "pulsar_subscription_delayed_sample/dev": 0, + "pulsar_subscription_delayed_sample/prod": 0, + "pulsar_subscriptions_count": 13, + "pulsar_subscriptions_count_public/functions": 3, + "pulsar_subscriptions_count_sample/dev": 6, + "pulsar_subscriptions_count_sample/prod": 4, + "pulsar_throughput_in": 5569401, + "pulsar_throughput_in_public/functions": 0, + "pulsar_throughput_in_sample/dev": 2736243, + "pulsar_throughput_in_sample/prod": 2833158, + "pulsar_throughput_out": 13989373, + "pulsar_throughput_out_public/functions": 0, + "pulsar_throughput_out_sample/dev": 8323043, + "pulsar_throughput_out_sample/prod": 5666330, + "pulsar_topics_count": 7, + "pulsar_topics_count_public/functions": 3, + "pulsar_topics_count_sample/dev": 2, + "pulsar_topics_count_sample/prod": 2, +} + +var expectedStandaloneV250Topics = map[string]int64{ + "pulsar_consumers_count": 21, + "pulsar_consumers_count_persistent://public/functions/assignments": 1, + "pulsar_consumers_count_persistent://public/functions/coordinate": 1, + "pulsar_consumers_count_persistent://public/functions/metadata": 1, + "pulsar_consumers_count_persistent://sample/dev/dev-1": 4, + "pulsar_consumers_count_persistent://sample/dev/dev-2": 6, + "pulsar_consumers_count_persistent://sample/prod/prod-1": 4, + "pulsar_consumers_count_persistent://sample/prod/prod-2": 4, + "pulsar_consumers_count_public/functions": 3, + "pulsar_consumers_count_sample/dev": 10, + "pulsar_consumers_count_sample/prod": 8, + "pulsar_entry_size_count": 5867, + "pulsar_entry_size_count_persistent://public/functions/assignments": 0, + "pulsar_entry_size_count_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_count_persistent://public/functions/metadata": 0, + "pulsar_entry_size_count_persistent://sample/dev/dev-1": 1448, + "pulsar_entry_size_count_persistent://sample/dev/dev-2": 1477, + "pulsar_entry_size_count_persistent://sample/prod/prod-1": 1469, + "pulsar_entry_size_count_persistent://sample/prod/prod-2": 1473, + "pulsar_entry_size_count_public/functions": 0, + "pulsar_entry_size_count_sample/dev": 2925, + "pulsar_entry_size_count_sample/prod": 2942, + "pulsar_entry_size_le_100_kb": 0, + "pulsar_entry_size_le_100_kb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_100_kb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_100_kb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_100_kb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_100_kb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_100_kb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_100_kb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_100_kb_public/functions": 0, + "pulsar_entry_size_le_100_kb_sample/dev": 0, + "pulsar_entry_size_le_100_kb_sample/prod": 0, + "pulsar_entry_size_le_128": 5867, + "pulsar_entry_size_le_128_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_128_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_128_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_128_persistent://sample/dev/dev-1": 1448, + "pulsar_entry_size_le_128_persistent://sample/dev/dev-2": 1477, + "pulsar_entry_size_le_128_persistent://sample/prod/prod-1": 1469, + "pulsar_entry_size_le_128_persistent://sample/prod/prod-2": 1473, + "pulsar_entry_size_le_128_public/functions": 0, + "pulsar_entry_size_le_128_sample/dev": 2925, + "pulsar_entry_size_le_128_sample/prod": 2942, + "pulsar_entry_size_le_16_kb": 0, + "pulsar_entry_size_le_16_kb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_16_kb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_16_kb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_16_kb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_16_kb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_16_kb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_16_kb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_16_kb_public/functions": 0, + "pulsar_entry_size_le_16_kb_sample/dev": 0, + "pulsar_entry_size_le_16_kb_sample/prod": 0, + "pulsar_entry_size_le_1_kb": 0, + "pulsar_entry_size_le_1_kb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_1_kb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_1_kb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_1_kb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_1_kb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_1_kb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_1_kb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_1_kb_public/functions": 0, + "pulsar_entry_size_le_1_kb_sample/dev": 0, + "pulsar_entry_size_le_1_kb_sample/prod": 0, + "pulsar_entry_size_le_1_mb": 0, + "pulsar_entry_size_le_1_mb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_1_mb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_1_mb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_1_mb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_1_mb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_1_mb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_1_mb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_1_mb_public/functions": 0, + "pulsar_entry_size_le_1_mb_sample/dev": 0, + "pulsar_entry_size_le_1_mb_sample/prod": 0, + "pulsar_entry_size_le_2_kb": 0, + "pulsar_entry_size_le_2_kb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_2_kb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_2_kb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_2_kb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_2_kb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_2_kb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_2_kb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_2_kb_public/functions": 0, + "pulsar_entry_size_le_2_kb_sample/dev": 0, + "pulsar_entry_size_le_2_kb_sample/prod": 0, + "pulsar_entry_size_le_4_kb": 0, + "pulsar_entry_size_le_4_kb_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_4_kb_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_4_kb_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_4_kb_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_4_kb_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_4_kb_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_4_kb_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_4_kb_public/functions": 0, + "pulsar_entry_size_le_4_kb_sample/dev": 0, + "pulsar_entry_size_le_4_kb_sample/prod": 0, + "pulsar_entry_size_le_512": 0, + "pulsar_entry_size_le_512_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_512_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_512_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_512_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_512_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_512_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_512_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_512_public/functions": 0, + "pulsar_entry_size_le_512_sample/dev": 0, + "pulsar_entry_size_le_512_sample/prod": 0, + "pulsar_entry_size_le_overflow": 0, + "pulsar_entry_size_le_overflow_persistent://public/functions/assignments": 0, + "pulsar_entry_size_le_overflow_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_le_overflow_persistent://public/functions/metadata": 0, + "pulsar_entry_size_le_overflow_persistent://sample/dev/dev-1": 0, + "pulsar_entry_size_le_overflow_persistent://sample/dev/dev-2": 0, + "pulsar_entry_size_le_overflow_persistent://sample/prod/prod-1": 0, + "pulsar_entry_size_le_overflow_persistent://sample/prod/prod-2": 0, + "pulsar_entry_size_le_overflow_public/functions": 0, + "pulsar_entry_size_le_overflow_sample/dev": 0, + "pulsar_entry_size_le_overflow_sample/prod": 0, + "pulsar_entry_size_sum": 5867, + "pulsar_entry_size_sum_persistent://public/functions/assignments": 0, + "pulsar_entry_size_sum_persistent://public/functions/coordinate": 0, + "pulsar_entry_size_sum_persistent://public/functions/metadata": 0, + "pulsar_entry_size_sum_persistent://sample/dev/dev-1": 1448, + "pulsar_entry_size_sum_persistent://sample/dev/dev-2": 1477, + "pulsar_entry_size_sum_persistent://sample/prod/prod-1": 1469, + "pulsar_entry_size_sum_persistent://sample/prod/prod-2": 1473, + "pulsar_entry_size_sum_public/functions": 0, + "pulsar_entry_size_sum_sample/dev": 2925, + "pulsar_entry_size_sum_sample/prod": 2942, + "pulsar_msg_backlog": 0, + "pulsar_msg_backlog_persistent://public/functions/assignments": 0, + "pulsar_msg_backlog_persistent://public/functions/coordinate": 0, + "pulsar_msg_backlog_persistent://public/functions/metadata": 0, + "pulsar_msg_backlog_persistent://sample/dev/dev-1": 0, + "pulsar_msg_backlog_persistent://sample/dev/dev-2": 0, + "pulsar_msg_backlog_persistent://sample/prod/prod-1": 0, + "pulsar_msg_backlog_persistent://sample/prod/prod-2": 0, + "pulsar_msg_backlog_public/functions": 0, + "pulsar_msg_backlog_sample/dev": 0, + "pulsar_msg_backlog_sample/prod": 0, + "pulsar_namespaces_count": 3, + "pulsar_producers_count": 10, + "pulsar_producers_count_persistent://public/functions/assignments": 1, + "pulsar_producers_count_persistent://public/functions/coordinate": 0, + "pulsar_producers_count_persistent://public/functions/metadata": 1, + "pulsar_producers_count_persistent://sample/dev/dev-1": 2, + "pulsar_producers_count_persistent://sample/dev/dev-2": 2, + "pulsar_producers_count_persistent://sample/prod/prod-1": 2, + "pulsar_producers_count_persistent://sample/prod/prod-2": 2, + "pulsar_producers_count_public/functions": 2, + "pulsar_producers_count_sample/dev": 4, + "pulsar_producers_count_sample/prod": 4, + "pulsar_rate_in": 102064, + "pulsar_rate_in_persistent://public/functions/assignments": 0, + "pulsar_rate_in_persistent://public/functions/coordinate": 0, + "pulsar_rate_in_persistent://public/functions/metadata": 0, + "pulsar_rate_in_persistent://sample/dev/dev-1": 25013, + "pulsar_rate_in_persistent://sample/dev/dev-2": 25014, + "pulsar_rate_in_persistent://sample/prod/prod-1": 26019, + "pulsar_rate_in_persistent://sample/prod/prod-2": 26018, + "pulsar_rate_in_public/functions": 0, + "pulsar_rate_in_sample/dev": 50027, + "pulsar_rate_in_sample/prod": 52037, + "pulsar_rate_out": 254162, + "pulsar_rate_out_persistent://public/functions/assignments": 0, + "pulsar_rate_out_persistent://public/functions/coordinate": 0, + "pulsar_rate_out_persistent://public/functions/metadata": 0, + "pulsar_rate_out_persistent://sample/dev/dev-1": 50027, + "pulsar_rate_out_persistent://sample/dev/dev-2": 100060, + "pulsar_rate_out_persistent://sample/prod/prod-1": 52038, + "pulsar_rate_out_persistent://sample/prod/prod-2": 52037, + "pulsar_rate_out_public/functions": 0, + "pulsar_rate_out_sample/dev": 150087, + "pulsar_rate_out_sample/prod": 104075, + "pulsar_storage_size": 8112300, + "pulsar_storage_size_persistent://public/functions/assignments": 0, + "pulsar_storage_size_persistent://public/functions/coordinate": 0, + "pulsar_storage_size_persistent://public/functions/metadata": 0, + "pulsar_storage_size_persistent://sample/dev/dev-1": 1951642, + "pulsar_storage_size_persistent://sample/dev/dev-2": 2029478, + "pulsar_storage_size_persistent://sample/prod/prod-1": 2022420, + "pulsar_storage_size_persistent://sample/prod/prod-2": 2108760, + "pulsar_storage_size_public/functions": 0, + "pulsar_storage_size_sample/dev": 3981120, + "pulsar_storage_size_sample/prod": 4131180, + "pulsar_storage_write_latency_count": 5867, + "pulsar_storage_write_latency_count_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_count_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_count_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_count_persistent://sample/dev/dev-1": 1448, + "pulsar_storage_write_latency_count_persistent://sample/dev/dev-2": 1477, + "pulsar_storage_write_latency_count_persistent://sample/prod/prod-1": 1469, + "pulsar_storage_write_latency_count_persistent://sample/prod/prod-2": 1473, + "pulsar_storage_write_latency_count_public/functions": 0, + "pulsar_storage_write_latency_count_sample/dev": 2925, + "pulsar_storage_write_latency_count_sample/prod": 2942, + "pulsar_storage_write_latency_le_0_5": 0, + "pulsar_storage_write_latency_le_0_5_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_0_5_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_0_5_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_0_5_persistent://sample/dev/dev-1": 0, + "pulsar_storage_write_latency_le_0_5_persistent://sample/dev/dev-2": 0, + "pulsar_storage_write_latency_le_0_5_persistent://sample/prod/prod-1": 0, + "pulsar_storage_write_latency_le_0_5_persistent://sample/prod/prod-2": 0, + "pulsar_storage_write_latency_le_0_5_public/functions": 0, + "pulsar_storage_write_latency_le_0_5_sample/dev": 0, + "pulsar_storage_write_latency_le_0_5_sample/prod": 0, + "pulsar_storage_write_latency_le_1": 41, + "pulsar_storage_write_latency_le_10": 341, + "pulsar_storage_write_latency_le_100": 3, + "pulsar_storage_write_latency_le_1000": 0, + "pulsar_storage_write_latency_le_1000_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_1000_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_1000_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_1000_persistent://sample/dev/dev-1": 0, + "pulsar_storage_write_latency_le_1000_persistent://sample/dev/dev-2": 0, + "pulsar_storage_write_latency_le_1000_persistent://sample/prod/prod-1": 0, + "pulsar_storage_write_latency_le_1000_persistent://sample/prod/prod-2": 0, + "pulsar_storage_write_latency_le_1000_public/functions": 0, + "pulsar_storage_write_latency_le_1000_sample/dev": 0, + "pulsar_storage_write_latency_le_1000_sample/prod": 0, + "pulsar_storage_write_latency_le_100_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_100_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_100_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_100_persistent://sample/dev/dev-1": 0, + "pulsar_storage_write_latency_le_100_persistent://sample/dev/dev-2": 1, + "pulsar_storage_write_latency_le_100_persistent://sample/prod/prod-1": 1, + "pulsar_storage_write_latency_le_100_persistent://sample/prod/prod-2": 1, + "pulsar_storage_write_latency_le_100_public/functions": 0, + "pulsar_storage_write_latency_le_100_sample/dev": 1, + "pulsar_storage_write_latency_le_100_sample/prod": 2, + "pulsar_storage_write_latency_le_10_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_10_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_10_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_10_persistent://sample/dev/dev-1": 95, + "pulsar_storage_write_latency_le_10_persistent://sample/dev/dev-2": 82, + "pulsar_storage_write_latency_le_10_persistent://sample/prod/prod-1": 84, + "pulsar_storage_write_latency_le_10_persistent://sample/prod/prod-2": 80, + "pulsar_storage_write_latency_le_10_public/functions": 0, + "pulsar_storage_write_latency_le_10_sample/dev": 177, + "pulsar_storage_write_latency_le_10_sample/prod": 164, + "pulsar_storage_write_latency_le_1_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_1_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_1_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_1_persistent://sample/dev/dev-1": 10, + "pulsar_storage_write_latency_le_1_persistent://sample/dev/dev-2": 15, + "pulsar_storage_write_latency_le_1_persistent://sample/prod/prod-1": 7, + "pulsar_storage_write_latency_le_1_persistent://sample/prod/prod-2": 9, + "pulsar_storage_write_latency_le_1_public/functions": 0, + "pulsar_storage_write_latency_le_1_sample/dev": 25, + "pulsar_storage_write_latency_le_1_sample/prod": 16, + "pulsar_storage_write_latency_le_20": 114, + "pulsar_storage_write_latency_le_200": 0, + "pulsar_storage_write_latency_le_200_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_200_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_200_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_200_persistent://sample/dev/dev-1": 0, + "pulsar_storage_write_latency_le_200_persistent://sample/dev/dev-2": 0, + "pulsar_storage_write_latency_le_200_persistent://sample/prod/prod-1": 0, + "pulsar_storage_write_latency_le_200_persistent://sample/prod/prod-2": 0, + "pulsar_storage_write_latency_le_200_public/functions": 0, + "pulsar_storage_write_latency_le_200_sample/dev": 0, + "pulsar_storage_write_latency_le_200_sample/prod": 0, + "pulsar_storage_write_latency_le_20_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_20_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_20_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_20_persistent://sample/dev/dev-1": 26, + "pulsar_storage_write_latency_le_20_persistent://sample/dev/dev-2": 28, + "pulsar_storage_write_latency_le_20_persistent://sample/prod/prod-1": 26, + "pulsar_storage_write_latency_le_20_persistent://sample/prod/prod-2": 34, + "pulsar_storage_write_latency_le_20_public/functions": 0, + "pulsar_storage_write_latency_le_20_sample/dev": 54, + "pulsar_storage_write_latency_le_20_sample/prod": 60, + "pulsar_storage_write_latency_le_5": 5328, + "pulsar_storage_write_latency_le_50": 40, + "pulsar_storage_write_latency_le_50_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_50_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_50_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_50_persistent://sample/dev/dev-1": 9, + "pulsar_storage_write_latency_le_50_persistent://sample/dev/dev-2": 9, + "pulsar_storage_write_latency_le_50_persistent://sample/prod/prod-1": 12, + "pulsar_storage_write_latency_le_50_persistent://sample/prod/prod-2": 10, + "pulsar_storage_write_latency_le_50_public/functions": 0, + "pulsar_storage_write_latency_le_50_sample/dev": 18, + "pulsar_storage_write_latency_le_50_sample/prod": 22, + "pulsar_storage_write_latency_le_5_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_le_5_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_le_5_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_le_5_persistent://sample/dev/dev-1": 1308, + "pulsar_storage_write_latency_le_5_persistent://sample/dev/dev-2": 1342, + "pulsar_storage_write_latency_le_5_persistent://sample/prod/prod-1": 1339, + "pulsar_storage_write_latency_le_5_persistent://sample/prod/prod-2": 1339, + "pulsar_storage_write_latency_le_5_public/functions": 0, + "pulsar_storage_write_latency_le_5_sample/dev": 2650, + "pulsar_storage_write_latency_le_5_sample/prod": 2678, + "pulsar_storage_write_latency_overflow": 0, + "pulsar_storage_write_latency_overflow_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_overflow_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_overflow_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_overflow_persistent://sample/dev/dev-1": 0, + "pulsar_storage_write_latency_overflow_persistent://sample/dev/dev-2": 0, + "pulsar_storage_write_latency_overflow_persistent://sample/prod/prod-1": 0, + "pulsar_storage_write_latency_overflow_persistent://sample/prod/prod-2": 0, + "pulsar_storage_write_latency_overflow_public/functions": 0, + "pulsar_storage_write_latency_overflow_sample/dev": 0, + "pulsar_storage_write_latency_overflow_sample/prod": 0, + "pulsar_storage_write_latency_sum": 5867, + "pulsar_storage_write_latency_sum_persistent://public/functions/assignments": 0, + "pulsar_storage_write_latency_sum_persistent://public/functions/coordinate": 0, + "pulsar_storage_write_latency_sum_persistent://public/functions/metadata": 0, + "pulsar_storage_write_latency_sum_persistent://sample/dev/dev-1": 1448, + "pulsar_storage_write_latency_sum_persistent://sample/dev/dev-2": 1477, + "pulsar_storage_write_latency_sum_persistent://sample/prod/prod-1": 1469, + "pulsar_storage_write_latency_sum_persistent://sample/prod/prod-2": 1473, + "pulsar_storage_write_latency_sum_public/functions": 0, + "pulsar_storage_write_latency_sum_sample/dev": 2925, + "pulsar_storage_write_latency_sum_sample/prod": 2942, + "pulsar_subscription_blocked_on_unacked_messages": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/assignments": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/coordinate": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/metadata": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/dev/dev-1": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/dev/dev-2": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/prod/prod-1": 0, + "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/prod/prod-2": 0, + "pulsar_subscription_blocked_on_unacked_messages_public/functions": 0, + "pulsar_subscription_blocked_on_unacked_messages_sample/dev": 0, + "pulsar_subscription_blocked_on_unacked_messages_sample/prod": 0, + "pulsar_subscription_delayed": 0, + "pulsar_subscription_delayed_persistent://public/functions/assignments": 0, + "pulsar_subscription_delayed_persistent://public/functions/coordinate": 0, + "pulsar_subscription_delayed_persistent://public/functions/metadata": 0, + "pulsar_subscription_delayed_persistent://sample/dev/dev-1": 0, + "pulsar_subscription_delayed_persistent://sample/dev/dev-2": 0, + "pulsar_subscription_delayed_persistent://sample/prod/prod-1": 0, + "pulsar_subscription_delayed_persistent://sample/prod/prod-2": 0, + "pulsar_subscription_delayed_public/functions": 0, + "pulsar_subscription_delayed_sample/dev": 0, + "pulsar_subscription_delayed_sample/prod": 0, + "pulsar_subscription_msg_rate_redeliver": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/assignments": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/coordinate": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/metadata": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://sample/dev/dev-1": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://sample/dev/dev-2": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://sample/prod/prod-1": 0, + "pulsar_subscription_msg_rate_redeliver_persistent://sample/prod/prod-2": 0, + "pulsar_subscription_msg_rate_redeliver_public/functions": 0, + "pulsar_subscription_msg_rate_redeliver_sample/dev": 0, + "pulsar_subscription_msg_rate_redeliver_sample/prod": 0, + "pulsar_subscriptions_count": 13, + "pulsar_subscriptions_count_persistent://public/functions/assignments": 1, + "pulsar_subscriptions_count_persistent://public/functions/coordinate": 1, + "pulsar_subscriptions_count_persistent://public/functions/metadata": 1, + "pulsar_subscriptions_count_persistent://sample/dev/dev-1": 2, + "pulsar_subscriptions_count_persistent://sample/dev/dev-2": 4, + "pulsar_subscriptions_count_persistent://sample/prod/prod-1": 2, + "pulsar_subscriptions_count_persistent://sample/prod/prod-2": 2, + "pulsar_subscriptions_count_public/functions": 3, + "pulsar_subscriptions_count_sample/dev": 6, + "pulsar_subscriptions_count_sample/prod": 4, + "pulsar_throughput_in": 6023912, + "pulsar_throughput_in_persistent://public/functions/assignments": 0, + "pulsar_throughput_in_persistent://public/functions/coordinate": 0, + "pulsar_throughput_in_persistent://public/functions/metadata": 0, + "pulsar_throughput_in_persistent://sample/dev/dev-1": 1450789, + "pulsar_throughput_in_persistent://sample/dev/dev-2": 1450862, + "pulsar_throughput_in_persistent://sample/prod/prod-1": 1561151, + "pulsar_throughput_in_persistent://sample/prod/prod-2": 1561110, + "pulsar_throughput_in_public/functions": 0, + "pulsar_throughput_in_sample/dev": 2901651, + "pulsar_throughput_in_sample/prod": 3122261, + "pulsar_throughput_out": 14949677, + "pulsar_throughput_out_persistent://public/functions/assignments": 0, + "pulsar_throughput_out_persistent://public/functions/coordinate": 0, + "pulsar_throughput_out_persistent://public/functions/metadata": 0, + "pulsar_throughput_out_persistent://sample/dev/dev-1": 2901607, + "pulsar_throughput_out_persistent://sample/dev/dev-2": 5803500, + "pulsar_throughput_out_persistent://sample/prod/prod-1": 3122322, + "pulsar_throughput_out_persistent://sample/prod/prod-2": 3122248, + "pulsar_throughput_out_public/functions": 0, + "pulsar_throughput_out_sample/dev": 8705107, + "pulsar_throughput_out_sample/prod": 6244570, + "pulsar_topics_count": 14, + "pulsar_topics_count_public/functions": 5, + "pulsar_topics_count_sample/dev": 2, + "pulsar_topics_count_sample/prod": 7, +} + +var expectedStandaloneV250TopicsFiltered = map[string]int64{ + "pulsar_consumers_count": 21, + "pulsar_consumers_count_public/functions": 3, + "pulsar_consumers_count_sample/dev": 10, + "pulsar_consumers_count_sample/prod": 8, + "pulsar_entry_size_count": 5867, + "pulsar_entry_size_count_public/functions": 0, + "pulsar_entry_size_count_sample/dev": 2925, + "pulsar_entry_size_count_sample/prod": 2942, + "pulsar_entry_size_le_100_kb": 0, + "pulsar_entry_size_le_100_kb_public/functions": 0, + "pulsar_entry_size_le_100_kb_sample/dev": 0, + "pulsar_entry_size_le_100_kb_sample/prod": 0, + "pulsar_entry_size_le_128": 5867, + "pulsar_entry_size_le_128_public/functions": 0, + "pulsar_entry_size_le_128_sample/dev": 2925, + "pulsar_entry_size_le_128_sample/prod": 2942, + "pulsar_entry_size_le_16_kb": 0, + "pulsar_entry_size_le_16_kb_public/functions": 0, + "pulsar_entry_size_le_16_kb_sample/dev": 0, + "pulsar_entry_size_le_16_kb_sample/prod": 0, + "pulsar_entry_size_le_1_kb": 0, + "pulsar_entry_size_le_1_kb_public/functions": 0, + "pulsar_entry_size_le_1_kb_sample/dev": 0, + "pulsar_entry_size_le_1_kb_sample/prod": 0, + "pulsar_entry_size_le_1_mb": 0, + "pulsar_entry_size_le_1_mb_public/functions": 0, + "pulsar_entry_size_le_1_mb_sample/dev": 0, + "pulsar_entry_size_le_1_mb_sample/prod": 0, + "pulsar_entry_size_le_2_kb": 0, + "pulsar_entry_size_le_2_kb_public/functions": 0, + "pulsar_entry_size_le_2_kb_sample/dev": 0, + "pulsar_entry_size_le_2_kb_sample/prod": 0, + "pulsar_entry_size_le_4_kb": 0, + "pulsar_entry_size_le_4_kb_public/functions": 0, + "pulsar_entry_size_le_4_kb_sample/dev": 0, + "pulsar_entry_size_le_4_kb_sample/prod": 0, + "pulsar_entry_size_le_512": 0, + "pulsar_entry_size_le_512_public/functions": 0, + "pulsar_entry_size_le_512_sample/dev": 0, + "pulsar_entry_size_le_512_sample/prod": 0, + "pulsar_entry_size_le_overflow": 0, + "pulsar_entry_size_le_overflow_public/functions": 0, + "pulsar_entry_size_le_overflow_sample/dev": 0, + "pulsar_entry_size_le_overflow_sample/prod": 0, + "pulsar_entry_size_sum": 5867, + "pulsar_entry_size_sum_public/functions": 0, + "pulsar_entry_size_sum_sample/dev": 2925, + "pulsar_entry_size_sum_sample/prod": 2942, + "pulsar_msg_backlog": 0, + "pulsar_msg_backlog_public/functions": 0, + "pulsar_msg_backlog_sample/dev": 0, + "pulsar_msg_backlog_sample/prod": 0, + "pulsar_namespaces_count": 3, + "pulsar_producers_count": 10, + "pulsar_producers_count_public/functions": 2, + "pulsar_producers_count_sample/dev": 4, + "pulsar_producers_count_sample/prod": 4, + "pulsar_rate_in": 102064, + "pulsar_rate_in_public/functions": 0, + "pulsar_rate_in_sample/dev": 50027, + "pulsar_rate_in_sample/prod": 52037, + "pulsar_rate_out": 254162, + "pulsar_rate_out_public/functions": 0, + "pulsar_rate_out_sample/dev": 150087, + "pulsar_rate_out_sample/prod": 104075, + "pulsar_storage_size": 8112300, + "pulsar_storage_size_public/functions": 0, + "pulsar_storage_size_sample/dev": 3981120, + "pulsar_storage_size_sample/prod": 4131180, + "pulsar_storage_write_latency_count": 5867, + "pulsar_storage_write_latency_count_public/functions": 0, + "pulsar_storage_write_latency_count_sample/dev": 2925, + "pulsar_storage_write_latency_count_sample/prod": 2942, + "pulsar_storage_write_latency_le_0_5": 0, + "pulsar_storage_write_latency_le_0_5_public/functions": 0, + "pulsar_storage_write_latency_le_0_5_sample/dev": 0, + "pulsar_storage_write_latency_le_0_5_sample/prod": 0, + "pulsar_storage_write_latency_le_1": 41, + "pulsar_storage_write_latency_le_10": 341, + "pulsar_storage_write_latency_le_100": 3, + "pulsar_storage_write_latency_le_1000": 0, + "pulsar_storage_write_latency_le_1000_public/functions": 0, + "pulsar_storage_write_latency_le_1000_sample/dev": 0, + "pulsar_storage_write_latency_le_1000_sample/prod": 0, + "pulsar_storage_write_latency_le_100_public/functions": 0, + "pulsar_storage_write_latency_le_100_sample/dev": 1, + "pulsar_storage_write_latency_le_100_sample/prod": 2, + "pulsar_storage_write_latency_le_10_public/functions": 0, + "pulsar_storage_write_latency_le_10_sample/dev": 177, + "pulsar_storage_write_latency_le_10_sample/prod": 164, + "pulsar_storage_write_latency_le_1_public/functions": 0, + "pulsar_storage_write_latency_le_1_sample/dev": 25, + "pulsar_storage_write_latency_le_1_sample/prod": 16, + "pulsar_storage_write_latency_le_20": 114, + "pulsar_storage_write_latency_le_200": 0, + "pulsar_storage_write_latency_le_200_public/functions": 0, + "pulsar_storage_write_latency_le_200_sample/dev": 0, + "pulsar_storage_write_latency_le_200_sample/prod": 0, + "pulsar_storage_write_latency_le_20_public/functions": 0, + "pulsar_storage_write_latency_le_20_sample/dev": 54, + "pulsar_storage_write_latency_le_20_sample/prod": 60, + "pulsar_storage_write_latency_le_5": 5328, + "pulsar_storage_write_latency_le_50": 40, + "pulsar_storage_write_latency_le_50_public/functions": 0, + "pulsar_storage_write_latency_le_50_sample/dev": 18, + "pulsar_storage_write_latency_le_50_sample/prod": 22, + "pulsar_storage_write_latency_le_5_public/functions": 0, + "pulsar_storage_write_latency_le_5_sample/dev": 2650, + "pulsar_storage_write_latency_le_5_sample/prod": 2678, + "pulsar_storage_write_latency_overflow": 0, + "pulsar_storage_write_latency_overflow_public/functions": 0, + "pulsar_storage_write_latency_overflow_sample/dev": 0, + "pulsar_storage_write_latency_overflow_sample/prod": 0, + "pulsar_storage_write_latency_sum": 5867, + "pulsar_storage_write_latency_sum_public/functions": 0, + "pulsar_storage_write_latency_sum_sample/dev": 2925, + "pulsar_storage_write_latency_sum_sample/prod": 2942, + "pulsar_subscription_blocked_on_unacked_messages": 0, + "pulsar_subscription_blocked_on_unacked_messages_public/functions": 0, + "pulsar_subscription_blocked_on_unacked_messages_sample/dev": 0, + "pulsar_subscription_blocked_on_unacked_messages_sample/prod": 0, + "pulsar_subscription_delayed": 0, + "pulsar_subscription_delayed_public/functions": 0, + "pulsar_subscription_delayed_sample/dev": 0, + "pulsar_subscription_delayed_sample/prod": 0, + "pulsar_subscription_msg_rate_redeliver": 0, + "pulsar_subscription_msg_rate_redeliver_public/functions": 0, + "pulsar_subscription_msg_rate_redeliver_sample/dev": 0, + "pulsar_subscription_msg_rate_redeliver_sample/prod": 0, + "pulsar_subscriptions_count": 13, + "pulsar_subscriptions_count_public/functions": 3, + "pulsar_subscriptions_count_sample/dev": 6, + "pulsar_subscriptions_count_sample/prod": 4, + "pulsar_throughput_in": 6023912, + "pulsar_throughput_in_public/functions": 0, + "pulsar_throughput_in_sample/dev": 2901651, + "pulsar_throughput_in_sample/prod": 3122261, + "pulsar_throughput_out": 14949677, + "pulsar_throughput_out_public/functions": 0, + "pulsar_throughput_out_sample/dev": 8705107, + "pulsar_throughput_out_sample/prod": 6244570, + "pulsar_topics_count": 14, + "pulsar_topics_count_public/functions": 5, + "pulsar_topics_count_sample/dev": 2, + "pulsar_topics_count_sample/prod": 7, +} diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/testdata/non-pulsar.txt b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/non-pulsar.txt new file mode 100644 index 00000000000000..f5f0ae082c69fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/non-pulsar.txt @@ -0,0 +1,27 @@ +# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize +# TYPE wmi_os_process_memory_limix_bytes gauge +wmi_os_process_memory_limix_bytes 1.40737488224256e+14 +# HELP wmi_os_processes OperatingSystem.NumberOfProcesses +# TYPE wmi_os_processes gauge +wmi_os_processes 124 +# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses +# TYPE wmi_os_processes_limit gauge +wmi_os_processes_limit 4.294967295e+09 +# HELP wmi_os_time OperatingSystem.LocalDateTime +# TYPE wmi_os_time gauge +wmi_os_time 1.57804974e+09 +# HELP wmi_os_timezone OperatingSystem.LocalDateTime +# TYPE wmi_os_timezone gauge +wmi_os_timezone{timezone="MSK"} 1 +# HELP wmi_os_users OperatingSystem.NumberOfUsers +# TYPE wmi_os_users gauge +wmi_os_users 2 +# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize +# TYPE wmi_os_virtual_memory_bytes gauge +wmi_os_virtual_memory_bytes 5.770891264e+09 +# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory +# TYPE wmi_os_virtual_memory_free_bytes gauge +wmi_os_virtual_memory_free_bytes 3.76489984e+09 +# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize +# TYPE wmi_os_visible_memory_bytes gauge +wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt new file mode 100644 index 00000000000000..bbc3de4a096e75 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt @@ -0,0 +1,500 @@ +# TYPE zk_read_latency summary +zk_read_latency{cluster="standalone",quantile="0.5"} NaN +zk_read_latency{cluster="standalone",quantile="0.75"} NaN +zk_read_latency{cluster="standalone",quantile="0.95"} NaN +zk_read_latency{cluster="standalone",quantile="0.99"} NaN +zk_read_latency{cluster="standalone",quantile="0.999"} NaN +zk_read_latency{cluster="standalone",quantile="0.9999"} NaN +zk_read_latency_count{cluster="standalone"} 0.0 +zk_read_latency_sum{cluster="standalone"} 0.0 +# TYPE zk_write_latency summary +zk_write_latency{cluster="standalone",quantile="0.5"} NaN +zk_write_latency{cluster="standalone",quantile="0.75"} NaN +zk_write_latency{cluster="standalone",quantile="0.95"} NaN +zk_write_latency{cluster="standalone",quantile="0.99"} NaN +zk_write_latency{cluster="standalone",quantile="0.999"} NaN +zk_write_latency{cluster="standalone",quantile="0.9999"} NaN +zk_write_latency_count{cluster="standalone"} 0.0 +zk_write_latency_sum{cluster="standalone"} 0.0 +# TYPE jvm_memory_direct_bytes_max gauge +jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9 +# TYPE log4j2_appender_total counter +log4j2_appender_total{cluster="standalone",level="debug"} 0.0 +log4j2_appender_total{cluster="standalone",level="warn"} 68.0 +log4j2_appender_total{cluster="standalone",level="trace"} 0.0 +log4j2_appender_total{cluster="standalone",level="error"} 0.0 +log4j2_appender_total{cluster="standalone",level="fatal"} 0.0 +log4j2_appender_total{cluster="standalone",level="info"} 3773.0 +# TYPE jvm_threads_current gauge +jvm_threads_current{cluster="standalone"} 293.0 +# TYPE jvm_threads_daemon gauge +jvm_threads_daemon{cluster="standalone"} 49.0 +# TYPE jvm_threads_peak gauge +jvm_threads_peak{cluster="standalone"} 295.0 +# TYPE jvm_threads_started_total counter +jvm_threads_started_total{cluster="standalone"} 343.0 +# TYPE jvm_threads_deadlocked gauge +jvm_threads_deadlocked{cluster="standalone"} 0.0 +# TYPE jvm_threads_deadlocked_monitor gauge +jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0 +# TYPE zookeeper_server_requests counter +zookeeper_server_requests{cluster="standalone",type="getData"} 1091.0 +zookeeper_server_requests{cluster="standalone",type="setData"} 56.0 +zookeeper_server_requests{cluster="standalone",type="ping"} 1673.0 +zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0 +zookeeper_server_requests{cluster="standalone",type="sync"} 53.0 +zookeeper_server_requests{cluster="standalone",type="delete"} 189.0 +zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0 +zookeeper_server_requests{cluster="standalone",type="multi"} 54.0 +zookeeper_server_requests{cluster="standalone",type="getChildren"} 172.0 +zookeeper_server_requests{cluster="standalone",type="getChildren2"} 250.0 +zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0 +zookeeper_server_requests{cluster="standalone",type="create"} 119.0 +zookeeper_server_requests{cluster="standalone",type="exists"} 577.0 +# TYPE jetty_requests_total counter +jetty_requests_total{cluster="standalone"} 2182.0 +# TYPE jetty_requests_active gauge +jetty_requests_active{cluster="standalone"} 1.0 +# TYPE jetty_requests_active_max gauge +jetty_requests_active_max{cluster="standalone"} 2.0 +# TYPE jetty_request_time_max_seconds gauge +jetty_request_time_max_seconds{cluster="standalone"} 0.539 +# TYPE jetty_request_time_seconds_total counter +jetty_request_time_seconds_total{cluster="standalone"} 10.786 +# TYPE jetty_dispatched_total counter +jetty_dispatched_total{cluster="standalone"} 2182.0 +# TYPE jetty_dispatched_active gauge +jetty_dispatched_active{cluster="standalone"} 0.0 +# TYPE jetty_dispatched_active_max gauge +jetty_dispatched_active_max{cluster="standalone"} 2.0 +# TYPE jetty_dispatched_time_max gauge +jetty_dispatched_time_max{cluster="standalone"} 539.0 +# TYPE jetty_dispatched_time_seconds_total counter +jetty_dispatched_time_seconds_total{cluster="standalone"} 1.745 +# TYPE jetty_async_requests_total counter +jetty_async_requests_total{cluster="standalone"} 1070.0 +# TYPE jetty_async_requests_waiting gauge +jetty_async_requests_waiting{cluster="standalone"} 1.0 +# TYPE jetty_async_requests_waiting_max gauge +jetty_async_requests_waiting_max{cluster="standalone"} 1.0 +# TYPE jetty_async_dispatches_total counter +jetty_async_dispatches_total{cluster="standalone"} 0.0 +# TYPE jetty_expires_total counter +jetty_expires_total{cluster="standalone"} 0.0 +# TYPE jetty_responses_total counter +jetty_responses_total{cluster="standalone",code="1xx"} 0.0 +jetty_responses_total{cluster="standalone",code="2xx"} 1113.0 +jetty_responses_total{cluster="standalone",code="3xx"} 1067.0 +jetty_responses_total{cluster="standalone",code="4xx"} 1.0 +jetty_responses_total{cluster="standalone",code="5xx"} 0.0 +# TYPE jetty_stats_seconds gauge +jetty_stats_seconds{cluster="standalone"} 1001.006 +# TYPE jetty_responses_bytes_total counter +jetty_responses_bytes_total{cluster="standalone"} 3.7698452E7 +# TYPE pulsar_broker_publish_latency summary +pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.821 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.559 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.8 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 10.992 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 10.992 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 10.992 +pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 10.992 +pulsar_broker_publish_latency_count{cluster="standalone"} 95832.0 +pulsar_broker_publish_latency_sum{cluster="standalone"} 234677.0 +# TYPE zookeeper_server_connections gauge +zookeeper_server_connections{cluster="standalone"} 10.0 +# TYPE jvm_info gauge +jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0 +# TYPE topic_load_times summary +topic_load_times{cluster="standalone",quantile="0.5"} NaN +topic_load_times{cluster="standalone",quantile="0.75"} NaN +topic_load_times{cluster="standalone",quantile="0.95"} NaN +topic_load_times{cluster="standalone",quantile="0.99"} NaN +topic_load_times{cluster="standalone",quantile="0.999"} NaN +topic_load_times{cluster="standalone",quantile="0.9999"} NaN +topic_load_times_count{cluster="standalone"} 0.0 +topic_load_times_sum{cluster="standalone"} 0.0 +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{cluster="standalone"} 492.64 +# TYPE process_start_time_seconds gauge +process_start_time_seconds{cluster="standalone"} 1.583774770759E9 +# TYPE process_open_fds gauge +process_open_fds{cluster="standalone"} 676.0 +# TYPE process_max_fds gauge +process_max_fds{cluster="standalone"} 1048576.0 +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{cluster="standalone"} 8.727437312E9 +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{cluster="standalone"} 1.642012672E9 +# TYPE jvm_classes_loaded gauge +jvm_classes_loaded{cluster="standalone"} 14402.0 +# TYPE jvm_classes_loaded_total counter +jvm_classes_loaded_total{cluster="standalone"} 14402.0 +# TYPE jvm_classes_unloaded_total counter +jvm_classes_unloaded_total{cluster="standalone"} 0.0 +# TYPE zookeeper_server_requests_latency_ms summary +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 0.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 2.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 2.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 2.0 +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 3819.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 2033.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 436.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 832.0 +# TYPE zookeeper_server_watches_count gauge +zookeeper_server_watches_count{cluster="standalone"} 37.0 +# TYPE zookeeper_server_ephemerals_count gauge +zookeeper_server_ephemerals_count{cluster="standalone"} 12.0 +# TYPE caffeine_cache_hit_total counter +caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 143.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 156.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 7.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_miss_total counter +caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 4.0 +caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 22.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_requests_total counter +caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 154.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 9.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 6.0 +caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 160.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 29.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_total counter +caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_weight gauge +caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_failure_total counter +caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 17.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_loads_total counter +caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 4.0 +caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 22.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_estimated_size gauge +caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 4.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 5.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_duration_seconds summary +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.05334063 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.039758752 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 4.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.027705247 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.076995851 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 22.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.156849343 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE jvm_memory_direct_bytes_used gauge +jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9 +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{cluster="standalone",area="heap"} 3.01123632E8 +jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.27959784E8 +# TYPE jvm_memory_bytes_committed gauge +jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.33287936E8 +# TYPE jvm_memory_bytes_max gauge +jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0 +# TYPE jvm_memory_bytes_init gauge +jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0 +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 3.5528384E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.2704856E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9726544.0 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 1.75112192E8 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 6.3963136E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 6.2048304E7 +# TYPE jvm_memory_pool_bytes_committed gauge +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 3.5782656E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.6863872E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0641408E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.06430464E9 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 6.3963136E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE jvm_memory_pool_bytes_max gauge +jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9 +# TYPE jvm_memory_pool_bytes_init gauge +jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE jvm_buffer_pool_used_bytes gauge +jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 697534.0 +jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_capacity_bytes gauge +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 697533.0 +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_used_buffers gauge +jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0 +jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0 +# TYPE zookeeper_server_znode_count gauge +zookeeper_server_znode_count{cluster="standalone"} 4175.0 +# TYPE zookeeper_server_data_size_bytes gauge +zookeeper_server_data_size_bytes{cluster="standalone"} 459126.0 +# TYPE jvm_gc_collection_seconds summary +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 14.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 3.13 +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0 +# TYPE pulsar_topics_count gauge +pulsar_topics_count{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_subscriptions_count gauge +pulsar_subscriptions_count{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_producers_count gauge +pulsar_producers_count{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_consumers_count gauge +pulsar_consumers_count{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_rate_in gauge +pulsar_rate_in{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_rate_out gauge +pulsar_rate_out{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_throughput_in gauge +pulsar_throughput_in{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_throughput_out gauge +pulsar_throughput_out{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_storage_size gauge +pulsar_storage_size{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_storage_write_rate gauge +pulsar_storage_write_rate{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_storage_read_rate gauge +pulsar_storage_read_rate{cluster="standalone"} 0 1583775788853 +# TYPE pulsar_msg_backlog gauge +pulsar_msg_backlog{cluster="standalone"} 0 1583775788853 +pulsar_topics_count{cluster="standalone",namespace="sample/dev"} 2 1583775788853 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev"} 6 1583775788853 +pulsar_producers_count{cluster="standalone",namespace="sample/dev"} 4 1583775788853 +pulsar_consumers_count{cluster="standalone",namespace="sample/dev"} 10 1583775788853 +pulsar_rate_in{cluster="standalone",namespace="sample/dev"} 48.004 1583775788853 +pulsar_rate_out{cluster="standalone",namespace="sample/dev"} 146.018 1583775788853 +pulsar_throughput_in{cluster="standalone",namespace="sample/dev"} 2736.243 1583775788853 +pulsar_throughput_out{cluster="standalone",namespace="sample/dev"} 8323.043 1583775788853 +pulsar_storage_size{cluster="standalone",namespace="sample/dev"} 2684208 1583775788853 +# TYPE pulsar_storage_backlog_size gauge +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev"} 35452322 1583775788853 +# TYPE pulsar_storage_offloaded_size gauge +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +pulsar_storage_write_rate{cluster="standalone",namespace="sample/dev"} 50.200 1583775788853 +pulsar_storage_read_rate{cluster="standalone",namespace="sample/dev"} 0.0 1583775788853 +# TYPE pulsar_subscription_delayed gauge +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",remote_cluster="local"} 8.0 1583775788853 +# TYPE pulsar_storage_write_latency_le_0_5 gauge +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_storage_write_latency_le_1 gauge +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev"} 23 1583775788853 +# TYPE pulsar_storage_write_latency_le_5 gauge +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev"} 2900 1583775788853 +# TYPE pulsar_storage_write_latency_le_10 gauge +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev"} 82 1583775788853 +# TYPE pulsar_storage_write_latency_le_20 gauge +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev"} 6 1583775788853 +# TYPE pulsar_storage_write_latency_le_50 gauge +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_storage_write_latency_le_100 gauge +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_storage_write_latency_le_200 gauge +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev"} 1 1583775788853 +# TYPE pulsar_storage_write_latency_le_1000 gauge +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_storage_write_latency_overflow gauge +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_storage_write_latency_count gauge +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev"} 3012 1583775788853 +# TYPE pulsar_storage_write_latency_sum gauge +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev"} 3012 1583775788853 +# TYPE pulsar_entry_size_le_128 gauge +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev"} 3012 1583775788853 +# TYPE pulsar_entry_size_le_512 gauge +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_1_kb gauge +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_2_kb gauge +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_4_kb gauge +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_16_kb gauge +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_100_kb gauge +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_1_mb gauge +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_le_overflow gauge +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev"} 0 1583775788853 +# TYPE pulsar_entry_size_count gauge +pulsar_entry_size_count{cluster="standalone",namespace="sample/dev"} 3012 1583775788853 +# TYPE pulsar_entry_size_sum gauge +pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev"} 3012 1583775788853 +pulsar_topics_count{cluster="standalone",namespace="public/functions"} 3 1583775788853 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions"} 3 1583775788853 +pulsar_producers_count{cluster="standalone",namespace="public/functions"} 2 1583775788853 +pulsar_consumers_count{cluster="standalone",namespace="public/functions"} 3 1583775788853 +pulsar_rate_in{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_rate_out{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_throughput_in{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_throughput_out{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_storage_size{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions"} 35452322 1583775788853 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_rate{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_storage_read_rate{cluster="standalone",namespace="public/functions"} 0.0 1583775788853 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",remote_cluster="local"} 0.0 1583775788853 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions"} 0 1583775788853 +pulsar_topics_count{cluster="standalone",namespace="sample/prod"} 2 1583775788853 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod"} 4 1583775788853 +pulsar_producers_count{cluster="standalone",namespace="sample/prod"} 4 1583775788853 +pulsar_consumers_count{cluster="standalone",namespace="sample/prod"} 8 1583775788853 +pulsar_rate_in{cluster="standalone",namespace="sample/prod"} 48.019 1583775788853 +pulsar_rate_out{cluster="standalone",namespace="sample/prod"} 96.039 1583775788853 +pulsar_throughput_in{cluster="standalone",namespace="sample/prod"} 2833.158 1583775788853 +pulsar_throughput_out{cluster="standalone",namespace="sample/prod"} 5666.330 1583775788853 +pulsar_storage_size{cluster="standalone",namespace="sample/prod"} 2784216 1583775788853 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod"} 35455322 1583775788853 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_rate{cluster="standalone",namespace="sample/prod"} 50.016 1583775788853 +pulsar_storage_read_rate{cluster="standalone",namespace="sample/prod"} 0.0 1583775788853 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",remote_cluster="local"} 0.0 1583775788853 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod"} 20 1583775788853 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod"} 2897 1583775788853 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod"} 81 1583775788853 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod"} 1 1583775788853 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod"} 1 1583775788853 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod"} 3000 1583775788853 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod"} 3000 1583775788853 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod"} 3001 1583775788853 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod"} 0 1583775788853 +pulsar_entry_size_count{cluster="standalone",namespace="sample/prod"} 3001 1583775788853 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod"} 3001 1583775788853 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt new file mode 100644 index 00000000000000..ba5006094916bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt @@ -0,0 +1,748 @@ +# TYPE zookeeper_server_requests counter +zookeeper_server_requests{cluster="standalone",type="getData"} 777.0 +zookeeper_server_requests{cluster="standalone",type="setData"} 14.0 +zookeeper_server_requests{cluster="standalone",type="ping"} 955.0 +zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0 +zookeeper_server_requests{cluster="standalone",type="sync"} 21.0 +zookeeper_server_requests{cluster="standalone",type="delete"} 29.0 +zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0 +zookeeper_server_requests{cluster="standalone",type="multi"} 3.0 +zookeeper_server_requests{cluster="standalone",type="getChildren"} 47.0 +zookeeper_server_requests{cluster="standalone",type="getChildren2"} 121.0 +zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0 +zookeeper_server_requests{cluster="standalone",type="create"} 99.0 +zookeeper_server_requests{cluster="standalone",type="exists"} 340.0 +# TYPE zk_write_latency summary +zk_write_latency{cluster="standalone",quantile="0.5"} NaN +zk_write_latency{cluster="standalone",quantile="0.75"} NaN +zk_write_latency{cluster="standalone",quantile="0.95"} NaN +zk_write_latency{cluster="standalone",quantile="0.99"} NaN +zk_write_latency{cluster="standalone",quantile="0.999"} NaN +zk_write_latency{cluster="standalone",quantile="0.9999"} NaN +zk_write_latency_count{cluster="standalone"} 0.0 +zk_write_latency_sum{cluster="standalone"} 0.0 +# TYPE jetty_requests_total counter +jetty_requests_total{cluster="standalone"} 106.0 +# TYPE jetty_requests_active gauge +jetty_requests_active{cluster="standalone"} 1.0 +# TYPE jetty_requests_active_max gauge +jetty_requests_active_max{cluster="standalone"} 2.0 +# TYPE jetty_request_time_max_seconds gauge +jetty_request_time_max_seconds{cluster="standalone"} 0.453 +# TYPE jetty_request_time_seconds_total counter +jetty_request_time_seconds_total{cluster="standalone"} 1.595 +# TYPE jetty_dispatched_total counter +jetty_dispatched_total{cluster="standalone"} 106.0 +# TYPE jetty_dispatched_active gauge +jetty_dispatched_active{cluster="standalone"} 0.0 +# TYPE jetty_dispatched_active_max gauge +jetty_dispatched_active_max{cluster="standalone"} 2.0 +# TYPE jetty_dispatched_time_max gauge +jetty_dispatched_time_max{cluster="standalone"} 453.0 +# TYPE jetty_dispatched_time_seconds_total counter +jetty_dispatched_time_seconds_total{cluster="standalone"} 0.737 +# TYPE jetty_async_requests_total counter +jetty_async_requests_total{cluster="standalone"} 39.0 +# TYPE jetty_async_requests_waiting gauge +jetty_async_requests_waiting{cluster="standalone"} 1.0 +# TYPE jetty_async_requests_waiting_max gauge +jetty_async_requests_waiting_max{cluster="standalone"} 1.0 +# TYPE jetty_async_dispatches_total counter +jetty_async_dispatches_total{cluster="standalone"} 0.0 +# TYPE jetty_expires_total counter +jetty_expires_total{cluster="standalone"} 0.0 +# TYPE jetty_responses_total counter +jetty_responses_total{cluster="standalone",code="1xx"} 0.0 +jetty_responses_total{cluster="standalone",code="2xx"} 66.0 +jetty_responses_total{cluster="standalone",code="3xx"} 38.0 +jetty_responses_total{cluster="standalone",code="4xx"} 1.0 +jetty_responses_total{cluster="standalone",code="5xx"} 0.0 +# TYPE jetty_stats_seconds gauge +jetty_stats_seconds{cluster="standalone"} 565.434 +# TYPE jetty_responses_bytes_total counter +jetty_responses_bytes_total{cluster="standalone"} 2865485.0 +# TYPE jvm_info gauge +jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0 +# TYPE log4j2_appender_total counter +log4j2_appender_total{cluster="standalone",level="debug"} 0.0 +log4j2_appender_total{cluster="standalone",level="warn"} 44.0 +log4j2_appender_total{cluster="standalone",level="trace"} 0.0 +log4j2_appender_total{cluster="standalone",level="error"} 0.0 +log4j2_appender_total{cluster="standalone",level="fatal"} 0.0 +log4j2_appender_total{cluster="standalone",level="info"} 1437.0 +# TYPE zookeeper_server_connections gauge +zookeeper_server_connections{cluster="standalone"} 10.0 +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{cluster="standalone",area="heap"} 1.30309152E8 +jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.21050512E8 +# TYPE jvm_memory_bytes_committed gauge +jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.26242816E8 +# TYPE jvm_memory_bytes_max gauge +jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0 +# TYPE jvm_memory_bytes_init gauge +jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0 +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 2.9851008E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.1522184E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9677320.0 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 2.2020096E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 7.0254592E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 3.8034464E7 +# TYPE jvm_memory_pool_bytes_committed gauge +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 3.014656E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.5532672E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0563584E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.058013184E9 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 7.0254592E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE jvm_memory_pool_bytes_max gauge +jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9 +# TYPE jvm_memory_pool_bytes_init gauge +jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE jvm_threads_current gauge +jvm_threads_current{cluster="standalone"} 291.0 +# TYPE jvm_threads_daemon gauge +jvm_threads_daemon{cluster="standalone"} 49.0 +# TYPE jvm_threads_peak gauge +jvm_threads_peak{cluster="standalone"} 291.0 +# TYPE jvm_threads_started_total counter +jvm_threads_started_total{cluster="standalone"} 331.0 +# TYPE jvm_threads_deadlocked gauge +jvm_threads_deadlocked{cluster="standalone"} 0.0 +# TYPE jvm_threads_deadlocked_monitor gauge +jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0 +# TYPE caffeine_cache_hit_total counter +caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 95.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 126.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 7.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_miss_total counter +caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 9.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 7.0 +caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 21.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_requests_total counter +caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 106.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 11.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 9.0 +caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 130.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 28.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_total counter +caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_weight gauge +caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_failure_total counter +caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 16.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_loads_total counter +caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 9.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 7.0 +caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 21.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_estimated_size gauge +caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 4.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 5.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_duration_seconds summary +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.136975304 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 9.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.064067898 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 7.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.100136473 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.079620575 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 21.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.117346453 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE zk_read_latency summary +zk_read_latency{cluster="standalone",quantile="0.5"} NaN +zk_read_latency{cluster="standalone",quantile="0.75"} NaN +zk_read_latency{cluster="standalone",quantile="0.95"} NaN +zk_read_latency{cluster="standalone",quantile="0.99"} NaN +zk_read_latency{cluster="standalone",quantile="0.999"} NaN +zk_read_latency{cluster="standalone",quantile="0.9999"} NaN +zk_read_latency_count{cluster="standalone"} 0.0 +zk_read_latency_sum{cluster="standalone"} 0.0 +# TYPE topic_load_times summary +topic_load_times{cluster="standalone",quantile="0.5"} NaN +topic_load_times{cluster="standalone",quantile="0.75"} NaN +topic_load_times{cluster="standalone",quantile="0.95"} NaN +topic_load_times{cluster="standalone",quantile="0.99"} NaN +topic_load_times{cluster="standalone",quantile="0.999"} NaN +topic_load_times{cluster="standalone",quantile="0.9999"} NaN +topic_load_times_count{cluster="standalone"} 0.0 +topic_load_times_sum{cluster="standalone"} 0.0 +# TYPE jvm_classes_loaded gauge +jvm_classes_loaded{cluster="standalone"} 14323.0 +# TYPE jvm_classes_loaded_total counter +jvm_classes_loaded_total{cluster="standalone"} 14323.0 +# TYPE jvm_classes_unloaded_total counter +jvm_classes_unloaded_total{cluster="standalone"} 0.0 +# TYPE zookeeper_server_requests_latency_ms summary +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 0.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 1.0 +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 2245.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 1340.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 182.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 461.0 +# TYPE zookeeper_server_watches_count gauge +zookeeper_server_watches_count{cluster="standalone"} 49.0 +# TYPE zookeeper_server_ephemerals_count gauge +zookeeper_server_ephemerals_count{cluster="standalone"} 12.0 +# TYPE jvm_buffer_pool_used_bytes gauge +jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 688964.0 +jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_capacity_bytes gauge +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 688963.0 +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_used_buffers gauge +jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0 +jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_gc_collection_seconds summary +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 9.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 2.211 +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0 +# TYPE pulsar_broker_publish_latency summary +pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.01 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.333 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.313 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 11.05 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 11.05 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 11.05 +pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 11.05 +pulsar_broker_publish_latency_count{cluster="standalone"} 50123.0 +pulsar_broker_publish_latency_sum{cluster="standalone"} 116757.0 +# TYPE jvm_memory_direct_bytes_used gauge +jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9 +# TYPE zookeeper_server_znode_count gauge +zookeeper_server_znode_count{cluster="standalone"} 4215.0 +# TYPE zookeeper_server_data_size_bytes gauge +zookeeper_server_data_size_bytes{cluster="standalone"} 465029.0 +# TYPE jvm_memory_direct_bytes_max gauge +jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9 +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{cluster="standalone"} 284.97 +# TYPE process_start_time_seconds gauge +process_start_time_seconds{cluster="standalone"} 1.583777691467E9 +# TYPE process_open_fds gauge +process_open_fds{cluster="standalone"} 678.0 +# TYPE process_max_fds gauge +process_max_fds{cluster="standalone"} 1048576.0 +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{cluster="standalone"} 8.720920576E9 +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{cluster="standalone"} 1.597915136E9 +# TYPE pulsar_topics_count gauge +pulsar_topics_count{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_subscriptions_count gauge +pulsar_subscriptions_count{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_producers_count gauge +pulsar_producers_count{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_consumers_count gauge +pulsar_consumers_count{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_rate_in gauge +pulsar_rate_in{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_rate_out gauge +pulsar_rate_out{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_throughput_in gauge +pulsar_throughput_in{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_throughput_out gauge +pulsar_throughput_out{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_storage_size gauge +pulsar_storage_size{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_storage_write_rate gauge +pulsar_storage_write_rate{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_storage_read_rate gauge +pulsar_storage_read_rate{cluster="standalone"} 0 1583778276679 +# TYPE pulsar_msg_backlog gauge +pulsar_msg_backlog{cluster="standalone"} 0 1583778276679 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2.0 1583778276679 +pulsar_producers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2.0 1583778276679 +pulsar_consumers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 4.0 1583778276679 +pulsar_rate_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 19.999 1583778276679 +pulsar_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 39.999 1583778276679 +pulsar_throughput_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1119.988 1583778276679 +pulsar_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2239.979 1583778276679 +pulsar_storage_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 698700.0 1583778276679 +pulsar_msg_backlog{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_backlog_size gauge +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 10045.0 1583778276679 +# TYPE pulsar_storage_offloaded_size gauge +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_backlog_quota_limit gauge +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 10737418240.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_0_5 gauge +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_1 gauge +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 13.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_5 gauge +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1457.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_10 gauge +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 20.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_20 gauge +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 7.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_50 gauge +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_100 gauge +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_200 gauge +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_write_latency_le_1000 gauge +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_write_latency_overflow gauge +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_storage_write_latency_count gauge +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1498.0 1583778276679 +# TYPE pulsar_storage_write_latency_sum gauge +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1498.0 1583778276679 +# TYPE pulsar_entry_size_le_128 gauge +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679 +# TYPE pulsar_entry_size_le_512 gauge +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_1_kb gauge +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_2_kb gauge +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_4_kb gauge +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_16_kb gauge +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_100_kb gauge +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_1_mb gauge +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_le_overflow gauge +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679 +# TYPE pulsar_entry_size_count gauge +pulsar_entry_size_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679 +# TYPE pulsar_entry_size_sum gauge +pulsar_entry_size_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679 +# TYPE pulsar_subscription_back_log gauge +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679 +# TYPE pulsar_subscription_delayed gauge +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679 +# TYPE pulsar_subscription_msg_rate_redeliver gauge +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0.0 1583778276679 +# TYPE pulsar_subscription_unacked_messages gauge +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679 +# TYPE pulsar_subscription_blocked_on_unacked_messages gauge +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679 +# TYPE pulsar_subscription_msg_rate_out gauge +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 19.999 1583778276679 +# TYPE pulsar_subscription_msg_throughput_out gauge +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 1119.990 1583778276679 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0.0 1583778276679 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 19.999 1583778276679 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 1119.989 1583778276679 +# TYPE pulsar_in_bytes_total gauge +pulsar_in_bytes_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 698700.0 1583778276679 +# TYPE pulsar_in_messages_total gauge +pulsar_in_messages_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 12521.0 1583778276679 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 4.0 1583778276679 +pulsar_producers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 2.0 1583778276679 +pulsar_consumers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 6.0 1583778276679 +pulsar_rate_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 21.0 1583778276679 +pulsar_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 84.0 1583778276679 +pulsar_throughput_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1176.007 1583778276679 +pulsar_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 4704.023 1583778276679 +pulsar_storage_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 698532.0 1583778276679 +pulsar_msg_backlog{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 10042.0 1583778276679 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 10737418240.0 1583778276679 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1475.0 1583778276679 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 16.0 1583778276679 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 12.0 1583778276679 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1.0 1583778276679 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679 +pulsar_entry_size_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0.0 1583778276679 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 21.0 1583778276679 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 1176.005 1583778276679 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0.0 1583778276679 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 21.0 1583778276679 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 1176.007 1583778276679 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0.0 1583778276679 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 21.0 1583778276679 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 1176.004 1583778276679 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0.0 1583778276679 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 21.0 1583778276679 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 1176.006 1583778276679 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 698532.0 1583778276679 +pulsar_in_messages_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 12518.0 1583778276679 +pulsar_topics_count{cluster="standalone",namespace="sample/playground"} 2 1583778276679 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2.0 1583778276680 +pulsar_producers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2.0 1583778276680 +pulsar_consumers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 4.0 1583778276680 +pulsar_rate_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 18.999 1583778276680 +pulsar_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 37.998 1583778276680 +pulsar_throughput_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1101.966 1583778276680 +pulsar_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2203.924 1583778276680 +pulsar_storage_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 725250.0 1583778276680 +pulsar_msg_backlog{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 10071.0 1583778276680 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 10737418240.0 1583778276680 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 5.0 1583778276680 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1474.0 1583778276680 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 24.0 1583778276680 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 7.0 1583778276680 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680 +pulsar_entry_size_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 18.999 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 1101.962 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 18.999 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 1101.961 1583778276680 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 725250.0 1583778276680 +pulsar_in_messages_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 12547.0 1583778276680 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680 +pulsar_producers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680 +pulsar_consumers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 4.0 1583778276680 +pulsar_rate_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 19.999 1583778276680 +pulsar_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 39.998 1583778276680 +pulsar_throughput_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1159.956 1583778276680 +pulsar_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2319.911 1583778276680 +pulsar_storage_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 724728.0 1583778276680 +pulsar_msg_backlog{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 10062.0 1583778276680 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 10737418240.0 1583778276680 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 4.0 1583778276680 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1487.0 1583778276680 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 19.0 1583778276680 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 5.0 1583778276680 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680 +pulsar_entry_size_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 19.999 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 1159.955 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 19.999 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 1159.955 1583778276680 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 724844.0 1583778276680 +pulsar_in_messages_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 12540.0 1583778276680 +pulsar_topics_count{cluster="standalone",namespace="sample/test"} 4 1583778276680 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 10737418240.0 1583778276680 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583778276680 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583778276680 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 10737418240.0 1583778276680 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 10737418240.0 1583778276680 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680 +pulsar_topics_count{cluster="standalone",namespace="public/functions"} 7 1583778276680 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics.txt b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics.txt new file mode 100644 index 00000000000000..7e0f0212a13d81 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/pulsar/testdata/standalone-v2.5.0-topics.txt @@ -0,0 +1,748 @@ +# TYPE jvm_buffer_pool_used_bytes gauge +jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 698586.0 +jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_capacity_bytes gauge +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 698585.0 +jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_buffer_pool_used_buffers gauge +jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0 +jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0 +# TYPE jvm_memory_direct_bytes_used gauge +jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9 +# TYPE zk_write_latency summary +zk_write_latency{cluster="standalone",quantile="0.5"} NaN +zk_write_latency{cluster="standalone",quantile="0.75"} NaN +zk_write_latency{cluster="standalone",quantile="0.95"} NaN +zk_write_latency{cluster="standalone",quantile="0.99"} NaN +zk_write_latency{cluster="standalone",quantile="0.999"} NaN +zk_write_latency{cluster="standalone",quantile="0.9999"} NaN +zk_write_latency_count{cluster="standalone"} 0.0 +zk_write_latency_sum{cluster="standalone"} 0.0 +# TYPE jvm_memory_bytes_used gauge +jvm_memory_bytes_used{cluster="standalone",area="heap"} 1.05170488E9 +jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.35478104E8 +# TYPE jvm_memory_bytes_committed gauge +jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.41377536E8 +# TYPE jvm_memory_bytes_max gauge +jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0 +# TYPE jvm_memory_bytes_init gauge +jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9 +jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0 +# TYPE jvm_memory_pool_bytes_used gauge +jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 4.147872E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.4205296E7 +jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9794088.0 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 9.17504E8 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 4194304.0 +jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 1.30006576E8 +# TYPE jvm_memory_pool_bytes_committed gauge +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 4.1811968E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.8817664E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0747904E7 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.124073472E9 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 4194304.0 +jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE jvm_memory_pool_bytes_max gauge +jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0 +jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9 +# TYPE jvm_memory_pool_bytes_init gauge +jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0 +jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9 +# TYPE log4j2_appender_total counter +log4j2_appender_total{cluster="standalone",level="debug"} 0.0 +log4j2_appender_total{cluster="standalone",level="warn"} 307.0 +log4j2_appender_total{cluster="standalone",level="trace"} 0.0 +log4j2_appender_total{cluster="standalone",level="error"} 0.0 +log4j2_appender_total{cluster="standalone",level="fatal"} 0.0 +log4j2_appender_total{cluster="standalone",level="info"} 17746.0 +# TYPE jetty_requests_total counter +jetty_requests_total{cluster="standalone"} 13063.0 +# TYPE jetty_requests_active gauge +jetty_requests_active{cluster="standalone"} 1.0 +# TYPE jetty_requests_active_max gauge +jetty_requests_active_max{cluster="standalone"} 2.0 +# TYPE jetty_request_time_max_seconds gauge +jetty_request_time_max_seconds{cluster="standalone"} 1.02 +# TYPE jetty_request_time_seconds_total counter +jetty_request_time_seconds_total{cluster="standalone"} 64.787 +# TYPE jetty_dispatched_total counter +jetty_dispatched_total{cluster="standalone"} 13063.0 +# TYPE jetty_dispatched_active gauge +jetty_dispatched_active{cluster="standalone"} 0.0 +# TYPE jetty_dispatched_active_max gauge +jetty_dispatched_active_max{cluster="standalone"} 2.0 +# TYPE jetty_dispatched_time_max gauge +jetty_dispatched_time_max{cluster="standalone"} 345.0 +# TYPE jetty_dispatched_time_seconds_total counter +jetty_dispatched_time_seconds_total{cluster="standalone"} 5.054 +# TYPE jetty_async_requests_total counter +jetty_async_requests_total{cluster="standalone"} 6480.0 +# TYPE jetty_async_requests_waiting gauge +jetty_async_requests_waiting{cluster="standalone"} 1.0 +# TYPE jetty_async_requests_waiting_max gauge +jetty_async_requests_waiting_max{cluster="standalone"} 2.0 +# TYPE jetty_async_dispatches_total counter +jetty_async_dispatches_total{cluster="standalone"} 0.0 +# TYPE jetty_expires_total counter +jetty_expires_total{cluster="standalone"} 0.0 +# TYPE jetty_responses_total counter +jetty_responses_total{cluster="standalone",code="1xx"} 0.0 +jetty_responses_total{cluster="standalone",code="2xx"} 6683.0 +jetty_responses_total{cluster="standalone",code="3xx"} 6378.0 +jetty_responses_total{cluster="standalone",code="4xx"} 1.0 +jetty_responses_total{cluster="standalone",code="5xx"} 0.0 +# TYPE jetty_stats_seconds gauge +jetty_stats_seconds{cluster="standalone"} 5822.682 +# TYPE jetty_responses_bytes_total counter +jetty_responses_bytes_total{cluster="standalone"} 4.89996508E8 +# TYPE zookeeper_server_requests_latency_ms summary +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 2.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 5.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 5.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 5.0 +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 17769.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 9455.0 +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN +zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN +zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 2091.0 +zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 3930.0 +# TYPE jvm_info gauge +jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0 +# TYPE zookeeper_server_connections gauge +zookeeper_server_connections{cluster="standalone"} 10.0 +# TYPE caffeine_cache_hit_total counter +caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 714.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 758.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 10.0 +caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_miss_total counter +caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 8.0 +caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 79.0 +caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_requests_total counter +caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 725.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 9.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 10.0 +caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 762.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 89.0 +caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_total counter +caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 5.0 +caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_eviction_weight gauge +caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 5.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_failure_total counter +caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 74.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_loads_total counter +caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 8.0 +caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 79.0 +caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_estimated_size gauge +caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 2.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE caffeine_cache_load_duration_seconds summary +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.064524869 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 7.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.020761008 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 8.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.075053592 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.022866292 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 79.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.424431063 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0 +caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0 +caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0 +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total{cluster="standalone"} 2554.5 +# TYPE process_start_time_seconds gauge +process_start_time_seconds{cluster="standalone"} 1.583768876396E9 +# TYPE process_open_fds gauge +process_open_fds{cluster="standalone"} 678.0 +# TYPE process_max_fds gauge +process_max_fds{cluster="standalone"} 1048576.0 +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes{cluster="standalone"} 8.749596672E9 +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes{cluster="standalone"} 1.679040512E9 +# TYPE jvm_classes_loaded gauge +jvm_classes_loaded{cluster="standalone"} 14522.0 +# TYPE jvm_classes_loaded_total counter +jvm_classes_loaded_total{cluster="standalone"} 14522.0 +# TYPE jvm_classes_unloaded_total counter +jvm_classes_unloaded_total{cluster="standalone"} 0.0 +# TYPE zk_read_latency summary +zk_read_latency{cluster="standalone",quantile="0.5"} NaN +zk_read_latency{cluster="standalone",quantile="0.75"} NaN +zk_read_latency{cluster="standalone",quantile="0.95"} NaN +zk_read_latency{cluster="standalone",quantile="0.99"} NaN +zk_read_latency{cluster="standalone",quantile="0.999"} NaN +zk_read_latency{cluster="standalone",quantile="0.9999"} NaN +zk_read_latency_count{cluster="standalone"} 0.0 +zk_read_latency_sum{cluster="standalone"} 0.0 +# TYPE zookeeper_server_requests counter +zookeeper_server_requests{cluster="standalone",type="getData"} 2948.0 +zookeeper_server_requests{cluster="standalone",type="setData"} 270.0 +zookeeper_server_requests{cluster="standalone",type="ping"} 9679.0 +zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0 +zookeeper_server_requests{cluster="standalone",type="sync"} 225.0 +zookeeper_server_requests{cluster="standalone",type="delete"} 1099.0 +zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0 +zookeeper_server_requests{cluster="standalone",type="multi"} 311.0 +zookeeper_server_requests{cluster="standalone",type="getChildren"} 840.0 +zookeeper_server_requests{cluster="standalone",type="getChildren2"} 889.0 +zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0 +zookeeper_server_requests{cluster="standalone",type="create"} 478.0 +zookeeper_server_requests{cluster="standalone",type="exists"} 3100.0 +# TYPE pulsar_broker_publish_latency summary +pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.521 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.295 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.139 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 19.977 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 19.977 +pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 19.977 +pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 19.977 +pulsar_broker_publish_latency_count{cluster="standalone"} 540306.0 +pulsar_broker_publish_latency_sum{cluster="standalone"} 1410934.0 +# TYPE zookeeper_server_watches_count gauge +zookeeper_server_watches_count{cluster="standalone"} 37.0 +# TYPE zookeeper_server_ephemerals_count gauge +zookeeper_server_ephemerals_count{cluster="standalone"} 12.0 +# TYPE topic_load_times summary +topic_load_times{cluster="standalone",quantile="0.5"} NaN +topic_load_times{cluster="standalone",quantile="0.75"} NaN +topic_load_times{cluster="standalone",quantile="0.95"} NaN +topic_load_times{cluster="standalone",quantile="0.99"} NaN +topic_load_times{cluster="standalone",quantile="0.999"} NaN +topic_load_times{cluster="standalone",quantile="0.9999"} NaN +topic_load_times_count{cluster="standalone"} 0.0 +topic_load_times_sum{cluster="standalone"} 0.0 +# TYPE jvm_gc_collection_seconds summary +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 64.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 13.761 +jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0 +jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0 +# TYPE jvm_memory_direct_bytes_max gauge +jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9 +# TYPE zookeeper_server_znode_count gauge +zookeeper_server_znode_count{cluster="standalone"} 4157.0 +# TYPE zookeeper_server_data_size_bytes gauge +zookeeper_server_data_size_bytes{cluster="standalone"} 457035.0 +# TYPE jvm_threads_current gauge +jvm_threads_current{cluster="standalone"} 303.0 +# TYPE jvm_threads_daemon gauge +jvm_threads_daemon{cluster="standalone"} 49.0 +# TYPE jvm_threads_peak gauge +jvm_threads_peak{cluster="standalone"} 306.0 +# TYPE jvm_threads_started_total counter +jvm_threads_started_total{cluster="standalone"} 474.0 +# TYPE jvm_threads_deadlocked gauge +jvm_threads_deadlocked{cluster="standalone"} 0.0 +# TYPE jvm_threads_deadlocked_monitor gauge +jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0 +# TYPE pulsar_topics_count gauge +pulsar_topics_count{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_subscriptions_count gauge +pulsar_subscriptions_count{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_producers_count gauge +pulsar_producers_count{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_consumers_count gauge +pulsar_consumers_count{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_rate_in gauge +pulsar_rate_in{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_rate_out gauge +pulsar_rate_out{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_throughput_in gauge +pulsar_throughput_in{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_throughput_out gauge +pulsar_throughput_out{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_storage_size gauge +pulsar_storage_size{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_storage_write_rate gauge +pulsar_storage_write_rate{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_storage_read_rate gauge +pulsar_storage_read_rate{cluster="standalone"} 0 1583774714170 +# TYPE pulsar_msg_backlog gauge +pulsar_msg_backlog{cluster="standalone"} 0 1583774714170 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2.0 1583774714170 +pulsar_producers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2.0 1583774714170 +pulsar_consumers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 4.0 1583774714170 +pulsar_rate_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 25.013 1583774714170 +pulsar_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 50.027 1583774714170 +pulsar_throughput_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1450.789 1583774714170 +pulsar_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2901.607 1583774714170 +pulsar_storage_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1951642.0 1583774714170 +pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_backlog_size gauge +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_offloaded_size gauge +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_backlog_quota_limit gauge +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 10737418240.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_0_5 gauge +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_1 gauge +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 10.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_5 gauge +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1308.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_10 gauge +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 95.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_20 gauge +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 26.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_50 gauge +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 9.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_100 gauge +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_200 gauge +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_write_latency_le_1000 gauge +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_write_latency_overflow gauge +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_storage_write_latency_count gauge +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170 +# TYPE pulsar_storage_write_latency_sum gauge +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170 +# TYPE pulsar_entry_size_le_128 gauge +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170 +# TYPE pulsar_entry_size_le_512 gauge +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_1_kb gauge +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_2_kb gauge +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_4_kb gauge +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_16_kb gauge +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_100_kb gauge +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_1_mb gauge +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_le_overflow gauge +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170 +# TYPE pulsar_entry_size_count gauge +pulsar_entry_size_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170 +# TYPE pulsar_entry_size_sum gauge +pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170 +# TYPE pulsar_subscription_back_log gauge +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170 +# TYPE pulsar_subscription_delayed gauge +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170 +# TYPE pulsar_subscription_msg_rate_redeliver gauge +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0.0 1583774714170 +# TYPE pulsar_subscription_unacked_messages gauge +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170 +# TYPE pulsar_subscription_blocked_on_unacked_messages gauge +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170 +# TYPE pulsar_subscription_msg_rate_out gauge +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 25.013 1583774714170 +# TYPE pulsar_subscription_msg_throughput_out gauge +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 1450.808 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 25.013 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 1450.799 1583774714170 +# TYPE pulsar_in_bytes_total gauge +pulsar_in_bytes_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 7657655.0 1583774714170 +# TYPE pulsar_in_messages_total gauge +pulsar_in_messages_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 133649.0 1583774714170 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 4.0 1583774714170 +pulsar_producers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 2.0 1583774714170 +pulsar_consumers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 6.0 1583774714170 +pulsar_rate_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 25.014 1583774714170 +pulsar_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 100.060 1583774714170 +pulsar_throughput_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1450.862 1583774714170 +pulsar_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 5803.500 1583774714170 +pulsar_storage_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 2029478.0 1583774714170 +pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 10737418240.0 1583774714170 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 15.0 1583774714170 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1342.0 1583774714170 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 82.0 1583774714170 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 28.0 1583774714170 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 9.0 1583774714170 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1.0 1583774714170 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170 +pulsar_entry_size_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 25.015 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 1450.873 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 25.015 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 1450.878 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 25.015 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 1450.881 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 25.014 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 1450.866 1583774714170 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 7730949.0 1583774714170 +pulsar_in_messages_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 134992.0 1583774714170 +pulsar_topics_count{cluster="standalone",namespace="sample/dev"} 2 1583774714170 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 10737418240.0 1583774714170 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583774714170 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583774714170 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 10737418240.0 1583774714170 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170 +pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170 +pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170 +pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170 +pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 10737418240.0 1583774714170 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171 +pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171 +pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171 +pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171 +pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171 +pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171 +pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171 +pulsar_topics_count{cluster="standalone",namespace="public/functions"} 5 1583774714171 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2.0 1583774714171 +pulsar_producers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2.0 1583774714171 +pulsar_consumers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 4.0 1583774714171 +pulsar_rate_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 26.018 1583774714171 +pulsar_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 52.037 1583774714171 +pulsar_throughput_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1561.110 1583774714171 +pulsar_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 3122.248 1583774714171 +pulsar_storage_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2108760.0 1583774714171 +pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 10737418240.0 1583774714171 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 9.0 1583774714171 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1339.0 1583774714171 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 80.0 1583774714171 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 34.0 1583774714171 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 10.0 1583774714171 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1.0 1583774714171 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171 +pulsar_entry_size_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0.0 1583774714171 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 26.018 1583774714171 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 1561.118 1583774714171 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0.0 1583774714171 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 26.018 1583774714171 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 1561.130 1583774714171 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 8010057.0 1583774714171 +pulsar_in_messages_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 135146.0 1583774714171 +pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2.0 1583774714171 +pulsar_producers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2.0 1583774714171 +pulsar_consumers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 4.0 1583774714171 +pulsar_rate_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 26.019 1583774714171 +pulsar_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 52.038 1583774714171 +pulsar_throughput_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1561.151 1583774714171 +pulsar_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 3122.322 1583774714171 +pulsar_storage_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2022420.0 1583774714171 +pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 10737418240.0 1583774714171 +pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 7.0 1583774714171 +pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1339.0 1583774714171 +pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 84.0 1583774714171 +pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 26.0 1583774714171 +pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 12.0 1583774714171 +pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1.0 1583774714171 +pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171 +pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171 +pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171 +pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171 +pulsar_entry_size_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171 +pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0.0 1583774714171 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 26.019 1583774714171 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 1561.165 1583774714171 +pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171 +pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171 +pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0.0 1583774714171 +pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171 +pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171 +pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 26.019 1583774714171 +pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 1561.157 1583774714171 +pulsar_in_bytes_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 7928433.0 1583774714171 +pulsar_in_messages_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 133707.0 1583774714171 +pulsar_topics_count{cluster="standalone",namespace="sample/prod"} 7 1583774714171 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/README.md b/src/go/collectors/go.d.plugin/modules/rabbitmq/README.md new file mode 120000 index 00000000000000..0119db91ae1a63 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/README.md @@ -0,0 +1 @@ +integrations/rabbitmq.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/charts.go b/src/go/collectors/go.d.plugin/modules/rabbitmq/charts.go new file mode 100644 index 00000000000000..c2b58e04bbee0b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/charts.go @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package rabbitmq + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioMessagesCount = module.Priority + iota + prioMessagesRate + + prioObjectsCount + + prioConnectionChurnRate + prioChannelChurnRate + prioQueueChurnRate + + prioFileDescriptorsCount + prioSocketsCount + prioErlangProcessesCount + prioErlangRunQueueProcessesCount + prioMemoryUsage + prioDiskSpaceFreeSize + + prioVhostMessagesCount + prioVhostMessagesRate + + prioQueueMessagesCount + prioQueueMessagesRate +) + +var baseCharts = module.Charts{ + chartMessagesCount.Copy(), + chartMessagesRate.Copy(), + + chartObjectsCount.Copy(), + + chartConnectionChurnRate.Copy(), + chartChannelChurnRate.Copy(), + chartQueueChurnRate.Copy(), + + chartFileDescriptorsCount.Copy(), + chartSocketsCount.Copy(), + chartErlangProcessesCount.Copy(), + chartErlangRunQueueProcessesCount.Copy(), + chartMemoryUsage.Copy(), + chartDiskSpaceFreeSize.Copy(), +} + +var chartsTmplVhost = module.Charts{ + chartTmplVhostMessagesCount.Copy(), + chartTmplVhostMessagesRate.Copy(), +} + +var chartsTmplQueue = module.Charts{ + chartTmplQueueMessagesCount.Copy(), + chartTmplQueueMessagesRate.Copy(), +} + +var ( + chartMessagesCount = module.Chart{ + ID: "messages_count", + Title: "Messages", + Units: "messages", + Fam: "messages", + Ctx: "rabbitmq.messages_count", + Type: module.Stacked, + Priority: prioMessagesCount, + Dims: module.Dims{ + {ID: "queue_totals_messages_ready", Name: "ready"}, + {ID: "queue_totals_messages_unacknowledged", Name: "unacknowledged"}, + }, + } + chartMessagesRate = module.Chart{ + ID: "messages_rate", + Title: "Messages", + Units: "messages/s", + Fam: "messages", + Ctx: "rabbitmq.messages_rate", + Priority: prioMessagesRate, + Dims: module.Dims{ + {ID: "message_stats_ack", Name: "ack", Algo: module.Incremental}, + {ID: "message_stats_publish", Name: "publish", Algo: module.Incremental}, + {ID: "message_stats_publish_in", Name: "publish_in", Algo: module.Incremental}, + {ID: "message_stats_publish_out", Name: "publish_out", Algo: module.Incremental}, + {ID: "message_stats_confirm", Name: "confirm", Algo: module.Incremental}, + {ID: "message_stats_deliver", Name: "deliver", Algo: module.Incremental}, + {ID: "message_stats_deliver_no_ack", Name: "deliver_no_ack", Algo: module.Incremental}, + {ID: "message_stats_get", Name: "get", Algo: module.Incremental}, + {ID: "message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental}, + {ID: "message_stats_deliver_get", Name: "deliver_get", Algo: module.Incremental}, + {ID: "message_stats_redeliver", Name: "redeliver", Algo: module.Incremental}, + {ID: "message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental}, + }, + } + chartObjectsCount = module.Chart{ + ID: "objects_count", + Title: "Objects", + Units: "objects", + Fam: "objects", + Ctx: "rabbitmq.objects_count", + Priority: prioObjectsCount, + Dims: module.Dims{ + {ID: "object_totals_channels", Name: "channels"}, + {ID: "object_totals_consumers", Name: "consumers"}, + {ID: "object_totals_connections", Name: "connections"}, + {ID: "object_totals_queues", Name: "queues"}, + {ID: "object_totals_exchanges", Name: "exchanges"}, + }, + } + + chartConnectionChurnRate = module.Chart{ + ID: "connection_churn_rate", + Title: "Connection churn", + Units: "operations/s", + Fam: "churn", + Ctx: "rabbitmq.connection_churn_rate", + Priority: prioConnectionChurnRate, + Dims: module.Dims{ + {ID: "churn_rates_connection_created", Name: "created", Algo: module.Incremental}, + {ID: "churn_rates_connection_closed", Name: "closed", Algo: module.Incremental}, + }, + } + chartChannelChurnRate = module.Chart{ + ID: "channel_churn_rate", + Title: "Channel churn", + Units: "operations/s", + Fam: "churn", + Ctx: "rabbitmq.channel_churn_rate", + Priority: prioChannelChurnRate, + Dims: module.Dims{ + {ID: "churn_rates_channel_created", Name: "created", Algo: module.Incremental}, + {ID: "churn_rates_channel_closed", Name: "closed", Algo: module.Incremental}, + }, + } + chartQueueChurnRate = module.Chart{ + ID: "queue_churn_rate", + Title: "Queue churn", + Units: "operations/s", + Fam: "churn", + Ctx: "rabbitmq.queue_churn_rate", + Priority: prioQueueChurnRate, + Dims: module.Dims{ + {ID: "churn_rates_queue_created", Name: "created", Algo: module.Incremental}, + {ID: "churn_rates_queue_deleted", Name: "deleted", Algo: module.Incremental}, + {ID: "churn_rates_queue_declared", Name: "declared", Algo: module.Incremental}, + }, + } +) + +var ( + chartFileDescriptorsCount = module.Chart{ + ID: "file_descriptors_count", + Title: "File descriptors", + Units: "fd", + Fam: "node stats", + Ctx: "rabbitmq.file_descriptors_count", + Type: module.Stacked, + Priority: prioFileDescriptorsCount, + Dims: module.Dims{ + {ID: "fd_total", Name: "available"}, + {ID: "fd_used", Name: "used"}, + }, + } + chartSocketsCount = module.Chart{ + ID: "sockets_used_count", + Title: "Used sockets", + Units: "sockets", + Fam: "node stats", + Ctx: "rabbitmq.sockets_count", + Type: module.Stacked, + Priority: prioSocketsCount, + Dims: module.Dims{ + {ID: "sockets_total", Name: "available"}, + {ID: "sockets_used", Name: "used"}, + }, + } + chartErlangProcessesCount = module.Chart{ + ID: "erlang_processes_count", + Title: "Erlang processes", + Units: "processes", + Fam: "node stats", + Ctx: "rabbitmq.erlang_processes_count", + Type: module.Stacked, + Priority: prioErlangProcessesCount, + Dims: module.Dims{ + {ID: "proc_available", Name: "available"}, + {ID: "proc_used", Name: "used"}, + }, + } + chartErlangRunQueueProcessesCount = module.Chart{ + ID: "erlang_run_queue_processes_count", + Title: "Erlang run queue", + Units: "processes", + Fam: "node stats", + Ctx: "rabbitmq.erlang_run_queue_processes_count", + Priority: prioErlangRunQueueProcessesCount, + Dims: module.Dims{ + {ID: "run_queue", Name: "length"}, + }, + } + chartMemoryUsage = module.Chart{ + ID: "memory_usage", + Title: "Memory", + Units: "bytes", + Fam: "node stats", + Ctx: "rabbitmq.memory_usage", + Priority: prioMemoryUsage, + Dims: module.Dims{ + {ID: "mem_used", Name: "used"}, + }, + } + chartDiskSpaceFreeSize = module.Chart{ + ID: "disk_space_free_size", + Title: "Free disk space", + Units: "bytes", + Fam: "node stats", + Ctx: "rabbitmq.disk_space_free_size", + Type: module.Area, + Priority: prioDiskSpaceFreeSize, + Dims: module.Dims{ + {ID: "disk_free", Name: "free"}, + }, + } +) + +var ( + chartTmplVhostMessagesCount = module.Chart{ + ID: "vhost_%s_message_count", + Title: "Vhost messages", + Units: "messages", + Fam: "vhost messages", + Ctx: "rabbitmq.vhost_messages_count", + Type: module.Stacked, + Priority: prioVhostMessagesCount, + Dims: module.Dims{ + {ID: "vhost_%s_messages_ready", Name: "ready"}, + {ID: "vhost_%s_messages_unacknowledged", Name: "unacknowledged"}, + }, + } + chartTmplVhostMessagesRate = module.Chart{ + ID: "vhost_%s_message_stats", + Title: "Vhost messages rate", + Units: "messages/s", + Fam: "vhost messages", + Ctx: "rabbitmq.vhost_messages_rate", + Type: module.Stacked, + Priority: prioVhostMessagesRate, + Dims: module.Dims{ + {ID: "vhost_%s_message_stats_ack", Name: "ack", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_confirm", Name: "confirm", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_deliver", Name: "deliver", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_get", Name: "get", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_publish", Name: "publish", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_redeliver", Name: "redeliver", Algo: module.Incremental}, + {ID: "vhost_%s_message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental}, + }, + } +) + +var ( + chartTmplQueueMessagesCount = module.Chart{ + ID: "queue_%s_vhost_%s_message_count", + Title: "Queue messages", + Units: "messages", + Fam: "queue messages", + Ctx: "rabbitmq.queue_messages_count", + Type: module.Stacked, + Priority: prioQueueMessagesCount, + Dims: module.Dims{ + {ID: "queue_%s_vhost_%s_messages_ready", Name: "ready"}, + {ID: "queue_%s_vhost_%s_messages_unacknowledged", Name: "unacknowledged"}, + {ID: "queue_%s_vhost_%s_messages_paged_out", Name: "paged_out"}, + {ID: "queue_%s_vhost_%s_messages_persistent", Name: "persistent"}, + }, + } + chartTmplQueueMessagesRate = module.Chart{ + ID: "queue_%s_vhost_%s_message_stats", + Title: "Queue messages rate", + Units: "messages/s", + Fam: "queue messages", + Ctx: "rabbitmq.queue_messages_rate", + Type: module.Stacked, + Priority: prioQueueMessagesRate, + Dims: module.Dims{ + {ID: "queue_%s_vhost_%s_message_stats_ack", Name: "ack", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_confirm", Name: "confirm", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_deliver", Name: "deliver", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_get", Name: "get", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_publish", Name: "publish", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_redeliver", Name: "redeliver", Algo: module.Incremental}, + {ID: "queue_%s_vhost_%s_message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental}, + }, + } +) + +func (r *RabbitMQ) addVhostCharts(name string) { + charts := chartsTmplVhost.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, forbiddenCharsReplacer.Replace(name)) + chart.Labels = []module.Label{ + {Key: "vhost", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := r.Charts().Add(*charts...); err != nil { + r.Warning(err) + } +} + +func (r *RabbitMQ) removeVhostCharts(vhost string) { + px := fmt.Sprintf("vhost_%s_", forbiddenCharsReplacer.Replace(vhost)) + for _, chart := range *r.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func (r *RabbitMQ) addQueueCharts(queue, vhost string) { + charts := chartsTmplQueue.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, forbiddenCharsReplacer.Replace(queue), forbiddenCharsReplacer.Replace(vhost)) + chart.Labels = []module.Label{ + {Key: "queue", Value: queue}, + {Key: "vhost", Value: vhost}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, queue, vhost) + } + } + + if err := r.Charts().Add(*charts...); err != nil { + r.Warning(err) + } +} + +func (r *RabbitMQ) removeQueueCharts(queue, vhost string) { + px := fmt.Sprintf("queue_%s_vhost_%s_", forbiddenCharsReplacer.Replace(queue), forbiddenCharsReplacer.Replace(vhost)) + for _, chart := range *r.Charts() { + if strings.HasPrefix(chart.ID, px) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_") diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/collect.go b/src/go/collectors/go.d.plugin/modules/rabbitmq/collect.go new file mode 100644 index 00000000000000..785c550a01b57e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/collect.go @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package rabbitmq + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "path/filepath" + + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + urlPathAPIOverview = "/api/overview" + urlPathAPINodes = "/api/nodes/" + urlPathAPIVhosts = "/api/vhosts" + urlPathAPIQueues = "/api/queues" +) + +// TODO: there is built-in prometheus collector since v3.8.0 (https://www.rabbitmq.com/prometheus.html). +// Should use it (in addition?), it is the recommended option according to the docs. +func (r *RabbitMQ) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + if err := r.collectOverviewStats(mx); err != nil { + return nil, err + } + if err := r.collectNodeStats(mx); err != nil { + return mx, err + } + if err := r.collectVhostsStats(mx); err != nil { + return mx, err + } + if r.CollectQueues { + if err := r.collectQueuesStats(mx); err != nil { + return mx, err + } + } + + return mx, nil +} + +func (r *RabbitMQ) collectOverviewStats(mx map[string]int64) error { + var stats overviewStats + if err := r.doOKDecode(urlPathAPIOverview, &stats); err != nil { + return err + } + + if r.nodeName == "" { + r.nodeName = stats.Node + } + + for k, v := range stm.ToMap(stats) { + mx[k] = v + } + + return nil +} + +func (r *RabbitMQ) collectNodeStats(mx map[string]int64) error { + if r.nodeName == "" { + return nil + } + + var stats nodeStats + if err := r.doOKDecode(filepath.Join(urlPathAPINodes, r.nodeName), &stats); err != nil { + return err + } + + for k, v := range stm.ToMap(stats) { + mx[k] = v + } + mx["proc_available"] = int64(stats.ProcTotal - stats.ProcUsed) + + return nil +} + +func (r *RabbitMQ) collectVhostsStats(mx map[string]int64) error { + var stats []vhostStats + if err := r.doOKDecode(urlPathAPIVhosts, &stats); err != nil { + return err + } + + seen := make(map[string]bool) + + for _, vhost := range stats { + seen[vhost.Name] = true + for k, v := range stm.ToMap(vhost) { + mx[fmt.Sprintf("vhost_%s_%s", vhost.Name, k)] = v + } + } + + for name := range seen { + if !r.vhosts[name] { + r.vhosts[name] = true + r.Debugf("new vhost name='%s': creating charts", name) + r.addVhostCharts(name) + } + } + for name := range r.vhosts { + if !seen[name] { + delete(r.vhosts, name) + r.Debugf("stale vhost name='%s': removing charts", name) + r.removeVhostCharts(name) + } + } + + return nil +} + +func (r *RabbitMQ) collectQueuesStats(mx map[string]int64) error { + var stats []queueStats + if err := r.doOKDecode(urlPathAPIQueues, &stats); err != nil { + return err + } + + seen := make(map[string]queueCache) + + for _, queue := range stats { + seen[queue.Name+"|"+queue.Vhost] = queueCache{name: queue.Name, vhost: queue.Vhost} + for k, v := range stm.ToMap(queue) { + mx[fmt.Sprintf("queue_%s_vhost_%s_%s", queue.Name, queue.Vhost, k)] = v + } + } + + for key, queue := range seen { + if _, ok := r.queues[key]; !ok { + r.queues[key] = queue + r.Debugf("new queue name='%s', vhost='%s': creating charts", queue.name, queue.vhost) + r.addQueueCharts(queue.name, queue.vhost) + } + } + for key, queue := range r.queues { + if _, ok := seen[key]; !ok { + delete(r.queues, key) + r.Debugf("stale queue name='%s', vhost='%s': removing charts", queue.name, queue.vhost) + r.removeQueueCharts(queue.name, queue.vhost) + } + } + + return nil +} + +func (r *RabbitMQ) doOKDecode(urlPath string, in interface{}) error { + req, err := web.NewHTTPRequest(r.Request.Copy()) + if err != nil { + return fmt.Errorf("error on creating request: %v", err) + } + + req.URL.Path = urlPath + + r.Debugf("doing HTTP %s to '%s'", req.Method, req.URL) + resp, err := r.httpClient.Do(req) + if err != nil { + return fmt.Errorf("error on request to %s: %v", req.URL, err) + } + + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %d (%s)", req.URL, resp.StatusCode, resp.Status) + } + + if err = json.NewDecoder(resp.Body).Decode(&in); err != nil { + return fmt.Errorf("error on decoding response from %s: %v", req.URL, err) + } + + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/config_schema.json b/src/go/collectors/go.d.plugin/modules/rabbitmq/config_schema.json new file mode 100644 index 00000000000000..ad9f0e7b042868 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/config_schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/rabbitmq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_queues_metrics": { + "type": "boolean" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/integrations/rabbitmq.md b/src/go/collectors/go.d.plugin/modules/rabbitmq/integrations/rabbitmq.md new file mode 100644 index 00000000000000..e7acf0cc1e2ec0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/integrations/rabbitmq.md @@ -0,0 +1,265 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/rabbitmq/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/rabbitmq/metadata.yaml" +sidebar_label: "RabbitMQ" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# RabbitMQ + + +<img src="https://netdata.cloud/img/rabbitmq.svg" width="150"/> + + +Plugin: go.d.plugin +Module: rabbitmq + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors RabbitMQ instances. + +It collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html). +The following endpoints are used: + +- `/api/overview` +- `/api/node/{node_name}` +- `/api/vhosts` +- `/api/queues` (disabled by default) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per RabbitMQ instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| rabbitmq.messages_count | ready, unacknowledged | messages | +| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s | +| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages | +| rabbitmq.connection_churn_rate | created, closed | operations/s | +| rabbitmq.channel_churn_rate | created, closed | operations/s | +| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s | +| rabbitmq.file_descriptors_count | available, used | fd | +| rabbitmq.sockets_count | available, used | sockets | +| rabbitmq.erlang_processes_count | available, used | processes | +| rabbitmq.erlang_run_queue_processes_count | length | processes | +| rabbitmq.memory_usage | used | bytes | +| rabbitmq.disk_space_free_size | free | bytes | + +### Per vhost + +These metrics refer to the virtual host. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vhost | virtual host name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| rabbitmq.vhost_messages_count | ready, unacknowledged | messages | +| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s | + +### Per queue + +These metrics refer to the virtual host queue. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vhost | virtual host name | +| queue | queue name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages | +| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable management plugin. + +The management plugin is included in the RabbitMQ distribution, but disabled. +To enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/rabbitmq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/rabbitmq.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://localhost:15672 | yes | +| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:15672 + +``` +</details> + +##### Basic HTTP auth + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:15672 + username: admin + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:15672 + + - name: remote + url: http://192.0.2.0:15672 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m rabbitmq + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/metadata.yaml b/src/go/collectors/go.d.plugin/modules/rabbitmq/metadata.yaml new file mode 100644 index 00000000000000..f0a17b9e7fbcc8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/metadata.yaml @@ -0,0 +1,341 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-rabbitmq + plugin_name: go.d.plugin + module_name: rabbitmq + monitored_instance: + name: RabbitMQ + link: https://www.rabbitmq.com/ + icon_filename: rabbitmq.svg + categories: + - data-collection.message-brokers + keywords: + - rabbitmq + - message brokers + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors RabbitMQ instances. + + It collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html). + The following endpoints are used: + + - `/api/overview` + - `/api/node/{node_name}` + - `/api/vhosts` + - `/api/queues` (disabled by default) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable management plugin. + description: | + The management plugin is included in the RabbitMQ distribution, but disabled. + To enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation. + configuration: + file: + name: go.d/rabbitmq.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://localhost:15672 + required: true + - name: collect_queues_metrics + description: Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. + default_value: false + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:15672 + - name: Basic HTTP auth + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:15672 + username: admin + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:15672 + + - name: remote + url: http://192.0.2.0:15672 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: rabbitmq.messages_count + description: Messages + unit: messages + chart_type: stacked + dimensions: + - name: ready + - name: unacknowledged + - name: rabbitmq.messages_rate + description: Messages + unit: messages/s + chart_type: line + dimensions: + - name: ack + - name: publish + - name: publish_in + - name: publish_out + - name: confirm + - name: deliver + - name: deliver_no_ack + - name: get + - name: get_no_ack + - name: deliver_get + - name: redeliver + - name: return_unroutable + - name: rabbitmq.objects_count + description: Objects + unit: messages + chart_type: line + dimensions: + - name: channels + - name: consumers + - name: connections + - name: queues + - name: exchanges + - name: rabbitmq.connection_churn_rate + description: Connection churn + unit: operations/s + chart_type: line + dimensions: + - name: created + - name: closed + - name: rabbitmq.channel_churn_rate + description: Channel churn + unit: operations/s + chart_type: line + dimensions: + - name: created + - name: closed + - name: rabbitmq.queue_churn_rate + description: Queue churn + unit: operations/s + chart_type: line + dimensions: + - name: created + - name: deleted + - name: declared + - name: rabbitmq.file_descriptors_count + description: File descriptors + unit: fd + chart_type: stacked + dimensions: + - name: available + - name: used + - name: rabbitmq.sockets_count + description: Used sockets + unit: sockets + chart_type: stacked + dimensions: + - name: available + - name: used + - name: rabbitmq.erlang_processes_count + description: Erlang processes + unit: processes + chart_type: stacked + dimensions: + - name: available + - name: used + - name: rabbitmq.erlang_run_queue_processes_count + description: Erlang run queue + unit: processes + chart_type: line + dimensions: + - name: length + - name: rabbitmq.memory_usage + description: Memory + unit: bytes + chart_type: line + dimensions: + - name: used + - name: rabbitmq.disk_space_free_size + description: Free disk space + unit: bytes + chart_type: line + dimensions: + - name: free + - name: vhost + description: These metrics refer to the virtual host. + labels: + - name: vhost + description: virtual host name + metrics: + - name: rabbitmq.vhost_messages_count + description: Vhost messages + unit: messages + chart_type: line + dimensions: + - name: ready + - name: unacknowledged + - name: rabbitmq.vhost_messages_rate + description: Vhost messages rate + unit: messages/s + chart_type: line + dimensions: + - name: ack + - name: publish + - name: publish_in + - name: publish_out + - name: confirm + - name: deliver + - name: deliver_no_ack + - name: get + - name: get_no_ack + - name: deliver_get + - name: redeliver + - name: return_unroutable + - name: queue + description: These metrics refer to the virtual host queue. + labels: + - name: vhost + description: virtual host name + - name: queue + description: queue name + metrics: + - name: rabbitmq.queue_messages_count + description: Queue messages + unit: messages + chart_type: line + dimensions: + - name: ready + - name: unacknowledged + - name: paged_out + - name: persistent + - name: rabbitmq.queue_messages_rate + description: Queue messages rate + unit: messages/s + chart_type: line + dimensions: + - name: ack + - name: publish + - name: publish_in + - name: publish_out + - name: confirm + - name: deliver + - name: deliver_no_ack + - name: get + - name: get_no_ack + - name: deliver_get + - name: redeliver + - name: return_unroutable diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/metrics.go b/src/go/collectors/go.d.plugin/modules/rabbitmq/metrics.go new file mode 100644 index 00000000000000..871dfd57eef28b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/metrics.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package rabbitmq + +// https://www.rabbitmq.com/monitoring.html#cluster-wide-metrics +type overviewStats struct { + ObjectTotals struct { + Consumers int64 `json:"consumers" stm:"consumers"` + Queues int64 `json:"queues" stm:"queues"` + Exchanges int64 `json:"exchanges" stm:"exchanges"` + Connections int64 `json:"connections" stm:"connections"` + Channels int64 `json:"channels" stm:"channels"` + } `json:"object_totals" stm:"object_totals"` + ChurnRates struct { + ChannelClosed int64 `json:"channel_closed" stm:"channel_closed"` + ChannelCreated int64 `json:"channel_created" stm:"channel_created"` + ConnectionClosed int64 `json:"connection_closed" stm:"connection_closed"` + ConnectionCreated int64 `json:"connection_created" stm:"connection_created"` + QueueCreated int64 `json:"queue_created" stm:"queue_created"` + QueueDeclared int64 `json:"queue_declared" stm:"queue_declared"` + QueueDeleted int64 `json:"queue_deleted" stm:"queue_deleted"` + } `json:"churn_rates" stm:"churn_rates"` + QueueTotals struct { + Messages int64 `json:"messages" stm:"messages"` + MessagesReady int64 `json:"messages_ready" stm:"messages_ready"` + MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"` + } `json:"queue_totals" stm:"queue_totals"` + MessageStats messageStats `json:"message_stats" stm:"message_stats"` + Node string +} + +// https://www.rabbitmq.com/monitoring.html#node-metrics +type nodeStats struct { + FDTotal int64 `json:"fd_total" stm:"fd_total"` + FDUsed int64 `json:"fd_used" stm:"fd_used"` + MemLimit int64 `json:"mem_limit" stm:"mem_limit"` + MemUsed int64 `json:"mem_used" stm:"mem_used"` + SocketsTotal int64 `json:"sockets_total" stm:"sockets_total"` + SocketsUsed int64 `json:"sockets_used" stm:"sockets_used"` + ProcTotal int64 `json:"proc_total" stm:"proc_total"` + ProcUsed int64 `json:"proc_used" stm:"proc_used"` + DiskFree int64 `json:"disk_free" stm:"disk_free"` + RunQueue int64 `json:"run_queue" stm:"run_queue"` +} + +type vhostStats struct { + Name string `json:"name"` + Messages int64 `json:"messages" stm:"messages"` + MessagesReady int64 `json:"messages_ready" stm:"messages_ready"` + MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"` + MessageStats messageStats `json:"message_stats" stm:"message_stats"` +} + +// https://www.rabbitmq.com/monitoring.html#queue-metrics +type queueStats struct { + Name string `json:"name"` + Vhost string `json:"vhost"` + State string `json:"state"` + Type string `json:"type"` + Messages int64 `json:"messages" stm:"messages"` + MessagesReady int64 `json:"messages_ready" stm:"messages_ready"` + MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"` + MessagesPagedOut int64 `json:"messages_paged_out" stm:"messages_paged_out"` + MessagesPersistent int64 `json:"messages_persistent" stm:"messages_persistent"` + MessageStats messageStats `json:"message_stats" stm:"message_stats"` +} + +// https://rawcdn.githack.com/rabbitmq/rabbitmq-server/v3.11.5/deps/rabbitmq_management/priv/www/api/index.html +type messageStats struct { + Ack int64 `json:"ack" stm:"ack"` + Publish int64 `json:"publish" stm:"publish"` + PublishIn int64 `json:"publish_in" stm:"publish_in"` + PublishOut int64 `json:"publish_out" stm:"publish_out"` + Confirm int64 `json:"confirm" stm:"confirm"` + Deliver int64 `json:"deliver" stm:"deliver"` + DeliverNoAck int64 `json:"deliver_no_ack" stm:"deliver_no_ack"` + Get int64 `json:"get" stm:"get"` + GetNoAck int64 `json:"get_no_ack" stm:"get_no_ack"` + DeliverGet int64 `json:"deliver_get" stm:"deliver_get"` + Redeliver int64 `json:"redeliver" stm:"redeliver"` + ReturnUnroutable int64 `json:"return_unroutable" stm:"return_unroutable"` +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq.go b/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq.go new file mode 100644 index 00000000000000..59fe4b1538b5b0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package rabbitmq + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("rabbitmq", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *RabbitMQ { + return &RabbitMQ{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://localhost:15672", + Username: "guest", + Password: "guest", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + CollectQueues: false, + }, + charts: baseCharts.Copy(), + vhosts: make(map[string]bool), + queues: make(map[string]queueCache), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + CollectQueues bool `yaml:"collect_queues_metrics"` +} + +type ( + RabbitMQ struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + httpClient *http.Client + + nodeName string + + vhosts map[string]bool + queues map[string]queueCache + } + queueCache struct { + name, vhost string + } +) + +func (r *RabbitMQ) Init() bool { + if r.URL == "" { + r.Error("'url' can not be empty") + return false + } + + client, err := web.NewHTTPClient(r.Client) + if err != nil { + r.Errorf("init HTTP client: %v", err) + return false + } + r.httpClient = client + + r.Debugf("using URL %s", r.URL) + r.Debugf("using timeout: %s", r.Timeout.Duration) + + return true +} + +func (r *RabbitMQ) Check() bool { + return len(r.Collect()) > 0 +} + +func (r *RabbitMQ) Charts() *module.Charts { + return r.charts +} + +func (r *RabbitMQ) Collect() map[string]int64 { + mx, err := r.collect() + if err != nil { + r.Error(err) + } + + if len(mx) == 0 { + return nil + } + + return mx +} + +func (r *RabbitMQ) Cleanup() { + if r.httpClient != nil { + r.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq_test.go b/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq_test.go new file mode 100644 index 00000000000000..c365726aa35f6b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/rabbitmq_test.go @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package rabbitmq + +import ( + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +var ( + testOverviewStats, _ = os.ReadFile("testdata/v3.11.5/api-overview.json") + testNodeStats, _ = os.ReadFile("testdata/v3.11.5/api-nodes-node.json") + testVhostsStats, _ = os.ReadFile("testdata/v3.11.5/api-vhosts.json") + testQueuesStats, _ = os.ReadFile("testdata/v3.11.5/api-queues.json") +) + +func Test_testDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "testOverviewStats": testOverviewStats, + "testNodeStats": testNodeStats, + "testVhostsStats": testVhostsStats, + "testQueuesStats": testQueuesStats, + } { + require.NotNilf(t, data, name) + } +} + +func TestRabbitMQ_Init(t *testing.T) { + tests := map[string]struct { + wantFail bool + config Config + }{ + "success with default": { + wantFail: false, + config: New().Config, + }, + "fail when URL not set": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Request: web.Request{URL: ""}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rabbit := New() + rabbit.Config = test.config + + if test.wantFail { + assert.False(t, rabbit.Init()) + } else { + assert.True(t, rabbit.Init()) + } + }) + } +} + +func TestRabbitMQ_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestRabbitMQ_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) + + rabbit := New() + require.True(t, rabbit.Init()) + + assert.NotPanics(t, rabbit.Cleanup) +} + +func TestRabbitMQ_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (*RabbitMQ, func()) + wantFail bool + }{ + "success on valid response": {wantFail: false, prepare: caseSuccessAllRequests}, + "fails on invalid response": {wantFail: true, prepare: caseInvalidDataResponse}, + "fails on 404": {wantFail: true, prepare: case404}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rabbit, cleanup := test.prepare() + defer cleanup() + + require.True(t, rabbit.Init()) + + if test.wantFail { + assert.False(t, rabbit.Check()) + } else { + assert.True(t, rabbit.Check()) + } + }) + } +} + +func TestRabbitMQ_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (*RabbitMQ, func()) + wantCollected map[string]int64 + wantCharts int + }{ + "success on valid response": { + prepare: caseSuccessAllRequests, + wantCharts: len(baseCharts) + len(chartsTmplVhost)*3 + len(chartsTmplQueue)*4, + wantCollected: map[string]int64{ + "churn_rates_channel_closed": 0, + "churn_rates_channel_created": 0, + "churn_rates_connection_closed": 0, + "churn_rates_connection_created": 0, + "churn_rates_queue_created": 6, + "churn_rates_queue_declared": 6, + "churn_rates_queue_deleted": 2, + "disk_free": 189799186432, + "fd_total": 1048576, + "fd_used": 43, + "mem_limit": 6713820774, + "mem_used": 172720128, + "message_stats_ack": 0, + "message_stats_confirm": 0, + "message_stats_deliver": 0, + "message_stats_deliver_get": 0, + "message_stats_deliver_no_ack": 0, + "message_stats_get": 0, + "message_stats_get_no_ack": 0, + "message_stats_publish": 0, + "message_stats_publish_in": 0, + "message_stats_publish_out": 0, + "message_stats_redeliver": 0, + "message_stats_return_unroutable": 0, + "object_totals_channels": 0, + "object_totals_connections": 0, + "object_totals_consumers": 0, + "object_totals_exchanges": 21, + "object_totals_queues": 4, + "proc_available": 1048135, + "proc_total": 1048576, + "proc_used": 441, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_ack": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_confirm": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver_get": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver_no_ack": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_get": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_get_no_ack": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish_in": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish_out": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_redeliver": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_return_unroutable": 0, + "queue_MyFirstQueue_vhost_mySecondVhost_messages": 1, + "queue_MyFirstQueue_vhost_mySecondVhost_messages_paged_out": 1, + "queue_MyFirstQueue_vhost_mySecondVhost_messages_persistent": 1, + "queue_MyFirstQueue_vhost_mySecondVhost_messages_ready": 1, + "queue_MyFirstQueue_vhost_mySecondVhost_messages_unacknowledged": 1, + "queue_myFirstQueue_vhost_/_message_stats_ack": 0, + "queue_myFirstQueue_vhost_/_message_stats_confirm": 0, + "queue_myFirstQueue_vhost_/_message_stats_deliver": 0, + "queue_myFirstQueue_vhost_/_message_stats_deliver_get": 0, + "queue_myFirstQueue_vhost_/_message_stats_deliver_no_ack": 0, + "queue_myFirstQueue_vhost_/_message_stats_get": 0, + "queue_myFirstQueue_vhost_/_message_stats_get_no_ack": 0, + "queue_myFirstQueue_vhost_/_message_stats_publish": 0, + "queue_myFirstQueue_vhost_/_message_stats_publish_in": 0, + "queue_myFirstQueue_vhost_/_message_stats_publish_out": 0, + "queue_myFirstQueue_vhost_/_message_stats_redeliver": 0, + "queue_myFirstQueue_vhost_/_message_stats_return_unroutable": 0, + "queue_myFirstQueue_vhost_/_messages": 1, + "queue_myFirstQueue_vhost_/_messages_paged_out": 1, + "queue_myFirstQueue_vhost_/_messages_persistent": 1, + "queue_myFirstQueue_vhost_/_messages_ready": 1, + "queue_myFirstQueue_vhost_/_messages_unacknowledged": 1, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_ack": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_confirm": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver_get": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver_no_ack": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_get": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_get_no_ack": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish_in": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish_out": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_redeliver": 0, + "queue_myFirstQueue_vhost_myFirstVhost_message_stats_return_unroutable": 0, + "queue_myFirstQueue_vhost_myFirstVhost_messages": 1, + "queue_myFirstQueue_vhost_myFirstVhost_messages_paged_out": 1, + "queue_myFirstQueue_vhost_myFirstVhost_messages_persistent": 1, + "queue_myFirstQueue_vhost_myFirstVhost_messages_ready": 1, + "queue_myFirstQueue_vhost_myFirstVhost_messages_unacknowledged": 1, + "queue_mySecondQueue_vhost_/_message_stats_ack": 0, + "queue_mySecondQueue_vhost_/_message_stats_confirm": 0, + "queue_mySecondQueue_vhost_/_message_stats_deliver": 0, + "queue_mySecondQueue_vhost_/_message_stats_deliver_get": 0, + "queue_mySecondQueue_vhost_/_message_stats_deliver_no_ack": 0, + "queue_mySecondQueue_vhost_/_message_stats_get": 0, + "queue_mySecondQueue_vhost_/_message_stats_get_no_ack": 0, + "queue_mySecondQueue_vhost_/_message_stats_publish": 0, + "queue_mySecondQueue_vhost_/_message_stats_publish_in": 0, + "queue_mySecondQueue_vhost_/_message_stats_publish_out": 0, + "queue_mySecondQueue_vhost_/_message_stats_redeliver": 0, + "queue_mySecondQueue_vhost_/_message_stats_return_unroutable": 0, + "queue_mySecondQueue_vhost_/_messages": 1, + "queue_mySecondQueue_vhost_/_messages_paged_out": 1, + "queue_mySecondQueue_vhost_/_messages_persistent": 1, + "queue_mySecondQueue_vhost_/_messages_ready": 1, + "queue_mySecondQueue_vhost_/_messages_unacknowledged": 1, + "queue_totals_messages": 0, + "queue_totals_messages_ready": 0, + "queue_totals_messages_unacknowledged": 0, + "run_queue": 1, + "sockets_total": 943629, + "sockets_used": 0, + "vhost_/_message_stats_ack": 0, + "vhost_/_message_stats_confirm": 0, + "vhost_/_message_stats_deliver": 0, + "vhost_/_message_stats_deliver_get": 0, + "vhost_/_message_stats_deliver_no_ack": 0, + "vhost_/_message_stats_get": 0, + "vhost_/_message_stats_get_no_ack": 0, + "vhost_/_message_stats_publish": 0, + "vhost_/_message_stats_publish_in": 0, + "vhost_/_message_stats_publish_out": 0, + "vhost_/_message_stats_redeliver": 0, + "vhost_/_message_stats_return_unroutable": 0, + "vhost_/_messages": 1, + "vhost_/_messages_ready": 1, + "vhost_/_messages_unacknowledged": 1, + "vhost_myFirstVhost_message_stats_ack": 0, + "vhost_myFirstVhost_message_stats_confirm": 0, + "vhost_myFirstVhost_message_stats_deliver": 0, + "vhost_myFirstVhost_message_stats_deliver_get": 0, + "vhost_myFirstVhost_message_stats_deliver_no_ack": 0, + "vhost_myFirstVhost_message_stats_get": 0, + "vhost_myFirstVhost_message_stats_get_no_ack": 0, + "vhost_myFirstVhost_message_stats_publish": 0, + "vhost_myFirstVhost_message_stats_publish_in": 0, + "vhost_myFirstVhost_message_stats_publish_out": 0, + "vhost_myFirstVhost_message_stats_redeliver": 0, + "vhost_myFirstVhost_message_stats_return_unroutable": 0, + "vhost_myFirstVhost_messages": 1, + "vhost_myFirstVhost_messages_ready": 1, + "vhost_myFirstVhost_messages_unacknowledged": 1, + "vhost_mySecondVhost_message_stats_ack": 0, + "vhost_mySecondVhost_message_stats_confirm": 0, + "vhost_mySecondVhost_message_stats_deliver": 0, + "vhost_mySecondVhost_message_stats_deliver_get": 0, + "vhost_mySecondVhost_message_stats_deliver_no_ack": 0, + "vhost_mySecondVhost_message_stats_get": 0, + "vhost_mySecondVhost_message_stats_get_no_ack": 0, + "vhost_mySecondVhost_message_stats_publish": 0, + "vhost_mySecondVhost_message_stats_publish_in": 0, + "vhost_mySecondVhost_message_stats_publish_out": 0, + "vhost_mySecondVhost_message_stats_redeliver": 0, + "vhost_mySecondVhost_message_stats_return_unroutable": 0, + "vhost_mySecondVhost_messages": 1, + "vhost_mySecondVhost_messages_ready": 1, + "vhost_mySecondVhost_messages_unacknowledged": 1, + }, + }, + "fails on invalid response": { + prepare: caseInvalidDataResponse, + wantCollected: nil, + wantCharts: len(baseCharts), + }, + "fails on 404": { + prepare: case404, + wantCollected: nil, + wantCharts: len(baseCharts), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rabbit, cleanup := test.prepare() + defer cleanup() + + require.True(t, rabbit.Init()) + + mx := rabbit.Collect() + + assert.Equal(t, test.wantCollected, mx) + assert.Equal(t, test.wantCharts, len(*rabbit.Charts())) + }) + } +} + +func caseSuccessAllRequests() (*RabbitMQ, func()) { + srv := prepareRabbitMQEndpoint() + rabbit := New() + rabbit.URL = srv.URL + rabbit.CollectQueues = true + + return rabbit, srv.Close +} + +func caseInvalidDataResponse() (*RabbitMQ, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + rabbit := New() + rabbit.URL = srv.URL + + return rabbit, srv.Close +} + +func case404() (*RabbitMQ, func()) { + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + rabbit := New() + rabbit.URL = srv.URL + + return rabbit, srv.Close +} + +func prepareRabbitMQEndpoint() *httptest.Server { + srv := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case urlPathAPIOverview: + _, _ = w.Write(testOverviewStats) + case filepath.Join(urlPathAPINodes, "rabbit@localhost"): + _, _ = w.Write(testNodeStats) + case urlPathAPIVhosts: + _, _ = w.Write(testVhostsStats) + case urlPathAPIQueues: + _, _ = w.Write(testQueuesStats) + default: + w.WriteHeader(404) + } + })) + return srv +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json new file mode 100644 index 00000000000000..cc0a0ceb05a487 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json @@ -0,0 +1,453 @@ +{ + "partitions": [], + "os_pid": "49", + "fd_total": 1048576, + "sockets_total": 943629, + "mem_limit": 6713820774, + "mem_alarm": false, + "disk_free_limit": 16784551936, + "disk_free_alarm": false, + "proc_total": 1048576, + "rates_mode": "basic", + "uptime": 10098336, + "run_queue": 1, + "processors": 12, + "exchange_types": [ + { + "name": "topic", + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "headers", + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "fanout", + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "direct", + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true + } + ], + "auth_mechanisms": [ + { + "name": "PLAIN", + "description": "SASL PLAIN authentication mechanism", + "enabled": true + }, + { + "name": "AMQPLAIN", + "description": "QPid AMQPLAIN mechanism", + "enabled": true + }, + { + "name": "RABBIT-CR-DEMO", + "description": "RabbitMQ Demo challenge-response authentication mechanism", + "enabled": false + } + ], + "applications": [ + { + "name": "accept", + "description": "Accept header(s) for Erlang/Elixir", + "version": "0.3.5" + }, + { + "name": "amqp10_common", + "description": "Modules shared by rabbitmq-amqp1.0 and rabbitmq-amqp1.0-client", + "version": "3.11.5" + }, + { + "name": "amqp_client", + "description": "RabbitMQ AMQP Client", + "version": "3.11.5" + }, + { + "name": "asn1", + "description": "The Erlang ASN1 compiler version 5.0.21", + "version": "5.0.21" + }, + { + "name": "aten", + "description": "Erlang node failure detector", + "version": "0.5.8" + }, + { + "name": "compiler", + "description": "ERTS CXC 138 10", + "version": "8.2.2" + }, + { + "name": "cowboy", + "description": "Small, fast, modern HTTP server.", + "version": "2.8.0" + }, + { + "name": "cowlib", + "description": "Support library for manipulating Web protocols.", + "version": "2.9.1" + }, + { + "name": "credentials_obfuscation", + "description": "Helper library that obfuscates sensitive values in process state", + "version": "3.2.0" + }, + { + "name": "crypto", + "description": "CRYPTO", + "version": "5.1.2" + }, + { + "name": "cuttlefish", + "description": "cuttlefish configuration abstraction", + "version": "3.1.0" + }, + { + "name": "enough", + "description": "A gen_server implementation with additional, overload-protected call type", + "version": "0.1.0" + }, + { + "name": "gen_batch_server", + "description": "Generic batching server", + "version": "0.8.8" + }, + { + "name": "inets", + "description": "INETS CXC 138 49", + "version": "8.2" + }, + { + "name": "kernel", + "description": "ERTS CXC 138 10", + "version": "8.5.2" + }, + { + "name": "mnesia", + "description": "MNESIA CXC 138 12", + "version": "4.21.3" + }, + { + "name": "observer_cli", + "description": "Visualize Erlang Nodes On The Command Line", + "version": "1.7.3" + }, + { + "name": "os_mon", + "description": "CPO CXC 138 46", + "version": "2.8" + }, + { + "name": "osiris", + "description": "New project", + "version": "1.3.3" + }, + { + "name": "prometheus", + "description": "Prometheus.io client in Erlang", + "version": "4.9.1" + }, + { + "name": "public_key", + "description": "Public key infrastructure", + "version": "1.13.2" + }, + { + "name": "ra", + "description": "Raft library", + "version": "2.4.5" + }, + { + "name": "rabbit", + "description": "RabbitMQ", + "version": "3.11.5" + }, + { + "name": "rabbit_common", + "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client", + "version": "3.11.5" + }, + { + "name": "rabbitmq_management", + "description": "RabbitMQ Management Console", + "version": "3.11.5" + }, + { + "name": "rabbitmq_management_agent", + "description": "RabbitMQ Management Agent", + "version": "3.11.5" + }, + { + "name": "rabbitmq_prelaunch", + "description": "RabbitMQ prelaunch setup", + "version": "3.11.5" + }, + { + "name": "rabbitmq_prometheus", + "description": "", + "version": "3.11.5" + }, + { + "name": "rabbitmq_web_dispatch", + "description": "RabbitMQ Web Dispatcher", + "version": "3.11.5" + }, + { + "name": "ranch", + "description": "Socket acceptor pool for TCP protocols.", + "version": "2.1.0" + }, + { + "name": "recon", + "description": "Diagnostic tools for production use", + "version": "2.5.2" + }, + { + "name": "redbug", + "description": "Erlang Tracing Debugger", + "version": "2.0.7" + }, + { + "name": "runtime_tools", + "description": "RUNTIME_TOOLS", + "version": "1.19" + }, + { + "name": "sasl", + "description": "SASL CXC 138 11", + "version": "4.2" + }, + { + "name": "seshat", + "description": "Counters registry", + "version": "0.4.0" + }, + { + "name": "ssl", + "description": "Erlang/OTP SSL application", + "version": "10.8.6" + }, + { + "name": "stdlib", + "description": "ERTS CXC 138 10", + "version": "4.2" + }, + { + "name": "stdout_formatter", + "description": "Tools to format paragraphs, lists and tables as plain text", + "version": "0.2.4" + }, + { + "name": "syntax_tools", + "description": "Syntax tools", + "version": "3.0" + }, + { + "name": "sysmon_handler", + "description": "Rate-limiting system_monitor event handler", + "version": "1.3.0" + }, + { + "name": "systemd", + "description": "systemd integration for Erlang applications", + "version": "0.6.1" + }, + { + "name": "thoas", + "description": "A blazing fast JSON parser and generator in pure Erlang.", + "version": "0.4.0" + }, + { + "name": "tools", + "description": "DEVTOOLS CXC 138 16", + "version": "3.5.3" + }, + { + "name": "xmerl", + "description": "XML parser", + "version": "1.3.30" + } + ], + "contexts": [ + { + "description": "RabbitMQ Management", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "ip": "0.0.0.0", + "port": "15672" + }, + { + "description": "RabbitMQ Prometheus", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "port": "15692", + "protocol": "'http/prometheus'" + } + ], + "log_files": [ + "/opt/bitnami/rabbitmq/var/log/rabbitmq/rabbit@localhost.log", + "/opt/bitnami/rabbitmq/var/log/rabbitmq/rabbit@localhost_upgrade.log", + "<stdout>" + ], + "db_dir": "/bitnami/rabbitmq/mnesia/rabbit@localhost", + "config_files": [ + "/opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf" + ], + "net_ticktime": 60, + "enabled_plugins": [ + "rabbitmq_management", + "rabbitmq_prometheus" + ], + "mem_calculation_strategy": "rss", + "ra_open_file_metrics": { + "ra_log_wal": 1, + "ra_log_segment_writer": 0 + }, + "name": "rabbit@localhost", + "running": true, + "type": "disc", + "mem_used": 172720128, + "mem_used_details": { + "rate": 0 + }, + "fd_used": 43, + "fd_used_details": { + "rate": -0.2 + }, + "sockets_used": 0, + "sockets_used_details": { + "rate": 0 + }, + "proc_used": 441, + "proc_used_details": { + "rate": -0.2 + }, + "disk_free": 189799186432, + "disk_free_details": { + "rate": 0 + }, + "gc_num": 74226, + "gc_num_details": { + "rate": 4.8 + }, + "gc_bytes_reclaimed": 1847200664, + "gc_bytes_reclaimed_details": { + "rate": 101998.4 + }, + "context_switches": 839195, + "context_switches_details": { + "rate": 59.4 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0 + }, + "io_read_bytes": 1, + "io_read_bytes_details": { + "rate": 0 + }, + "io_read_avg_time": 0.043, + "io_read_avg_time_details": { + "rate": 0 + }, + "io_write_count": 0, + "io_write_count_details": { + "rate": 0 + }, + "io_write_bytes": 0, + "io_write_bytes_details": { + "rate": 0 + }, + "io_write_avg_time": 0, + "io_write_avg_time_details": { + "rate": 0 + }, + "io_sync_count": 0, + "io_sync_count_details": { + "rate": 0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0 + }, + "io_seek_count": 0, + "io_seek_count_details": { + "rate": 0 + }, + "io_seek_avg_time": 0, + "io_seek_avg_time_details": { + "rate": 0 + }, + "io_reopen_count": 0, + "io_reopen_count_details": { + "rate": 0 + }, + "mnesia_ram_tx_count": 272, + "mnesia_ram_tx_count_details": { + "rate": 0 + }, + "mnesia_disk_tx_count": 58, + "mnesia_disk_tx_count_details": { + "rate": 0 + }, + "msg_store_read_count": 0, + "msg_store_read_count_details": { + "rate": 0 + }, + "msg_store_write_count": 0, + "msg_store_write_count_details": { + "rate": 0 + }, + "queue_index_write_count": 0, + "queue_index_write_count_details": { + "rate": 0 + }, + "queue_index_read_count": 0, + "queue_index_read_count_details": { + "rate": 0 + }, + "connection_created": 0, + "connection_created_details": { + "rate": 0 + }, + "connection_closed": 0, + "connection_closed_details": { + "rate": 0 + }, + "channel_created": 0, + "channel_created_details": { + "rate": 0 + }, + "channel_closed": 0, + "channel_closed_details": { + "rate": 0 + }, + "queue_declared": 6, + "queue_declared_details": { + "rate": 0 + }, + "queue_created": 6, + "queue_created_details": { + "rate": 0 + }, + "queue_deleted": 2, + "queue_deleted_details": { + "rate": 0 + }, + "cluster_links": [], + "metrics_gc_queue_length": { + "connection_closed": 0, + "channel_closed": 0, + "consumer_deleted": 0, + "exchange_deleted": 0, + "queue_deleted": 0, + "vhost_deleted": 0, + "node_node_deleted": 0, + "channel_consumer_deleted": 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-overview.json b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-overview.json new file mode 100644 index 00000000000000..5c71aaf5df4417 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-overview.json @@ -0,0 +1,183 @@ +{ + "management_version": "3.11.5", + "rates_mode": "basic", + "sample_retention_policies": { + "global": [ + 600, + 3600, + 28800, + 86400 + ], + "basic": [ + 600, + 3600 + ], + "detailed": [ + 600 + ] + }, + "exchange_types": [ + { + "name": "direct", + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "fanout", + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "headers", + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "topic", + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true + } + ], + "product_version": "3.11.5", + "product_name": "RabbitMQ", + "rabbitmq_version": "3.11.5", + "cluster_name": "rabbit@f705ea2a1bec", + "erlang_version": "25.2", + "erlang_full_version": "Erlang/OTP 25 [erts-13.1.3] [source] [64-bit] [smp:12:12] [ds:12:12:10] [async-threads:1] [jit:ns]", + "release_series_support_status": "supported", + "disable_stats": false, + "enable_queue_totals": false, + "message_stats": { + "disk_reads": 0, + "disk_reads_details": { + "rate": 0 + }, + "disk_writes": 0, + "disk_writes_details": { + "rate": 0 + } + }, + "churn_rates": { + "channel_closed": 0, + "channel_closed_details": { + "rate": 0 + }, + "channel_created": 0, + "channel_created_details": { + "rate": 0 + }, + "connection_closed": 0, + "connection_closed_details": { + "rate": 0 + }, + "connection_created": 0, + "connection_created_details": { + "rate": 0 + }, + "queue_created": 6, + "queue_created_details": { + "rate": 0 + }, + "queue_declared": 6, + "queue_declared_details": { + "rate": 0 + }, + "queue_deleted": 2, + "queue_deleted_details": { + "rate": 0 + } + }, + "queue_totals": { + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + } + }, + "object_totals": { + "channels": 0, + "connections": 0, + "consumers": 0, + "exchanges": 21, + "queues": 4 + }, + "statistics_db_event_queue": 0, + "node": "rabbit@localhost", + "listeners": [ + { + "node": "rabbit@localhost", + "protocol": "amqp", + "ip_address": "::", + "port": 5672, + "socket_opts": { + "backlog": 128, + "nodelay": true, + "linger": [ + true, + 0 + ], + "exit_on_close": false + } + }, + { + "node": "rabbit@localhost", + "protocol": "clustering", + "ip_address": "::", + "port": 25672, + "socket_opts": [] + }, + { + "node": "rabbit@localhost", + "protocol": "http", + "ip_address": "::", + "port": 15672, + "socket_opts": { + "cowboy_opts": { + "sendfile": false + }, + "ip": "0.0.0.0", + "port": 15672 + } + }, + { + "node": "rabbit@localhost", + "protocol": "http/prometheus", + "ip_address": "::", + "port": 15692, + "socket_opts": { + "cowboy_opts": { + "sendfile": false + }, + "port": 15692, + "protocol": "http/prometheus" + } + } + ], + "contexts": [ + { + "ssl_opts": [], + "node": "rabbit@localhost", + "description": "RabbitMQ Management", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "ip": "0.0.0.0", + "port": "15672" + }, + { + "ssl_opts": [], + "node": "rabbit@localhost", + "description": "RabbitMQ Prometheus", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "port": "15692", + "protocol": "'http/prometheus'" + } + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-queues.json b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-queues.json new file mode 100644 index 00000000000000..40c6e6c80745cf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-queues.json @@ -0,0 +1,334 @@ +[ + { + "arguments": {}, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_deliver_seq_id": 0, + "next_seq_id": 0, + "num_pending_acks": 0, + "num_unconfirmed": 0, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity", + "version": 1 + }, + "consumer_capacity": 0, + "consumer_utilisation": 0, + "consumers": 0, + "durable": true, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 74 + }, + "head_message_timestamp": null, + "idle_since": "2023-01-02T15:51:49.985+00:00", + "memory": 55408, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 1, + "messages_persistent": 1, + "messages_ram": 0, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "myFirstQueue", + "node": "rabbit@localhost", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 91946, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_deliver_seq_id": 0, + "next_seq_id": 0, + "num_pending_acks": 0, + "num_unconfirmed": 0, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity", + "version": 1 + }, + "consumer_capacity": 0, + "consumer_utilisation": 0, + "consumers": 0, + "durable": true, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 74 + }, + "head_message_timestamp": null, + "idle_since": "2023-01-02T15:51:49.296+00:00", + "memory": 55408, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 1, + "messages_persistent": 1, + "messages_ram": 0, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "mySecondQueue", + "node": "rabbit@localhost", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 91878, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-queue-type": "classic" + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_deliver_seq_id": 0, + "next_seq_id": 0, + "num_pending_acks": 0, + "num_unconfirmed": 0, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity", + "version": 1 + }, + "consumer_capacity": 0, + "consumer_utilisation": 0, + "consumers": 0, + "durable": true, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 7 + }, + "head_message_timestamp": null, + "idle_since": "2023-01-02T15:52:57.855+00:00", + "memory": 55408, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 1, + "messages_persistent": 1, + "messages_ram": 0, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "myFirstQueue", + "node": "rabbit@localhost", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 7431, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "myFirstVhost" + }, + { + "arguments": { + "x-queue-type": "classic" + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_deliver_seq_id": 0, + "next_seq_id": 0, + "num_pending_acks": 0, + "num_unconfirmed": 0, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity", + "version": 1 + }, + "consumer_capacity": 0, + "consumer_utilisation": 0, + "consumers": 0, + "durable": true, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 7 + }, + "head_message_timestamp": null, + "idle_since": "2023-01-02T15:53:08.260+00:00", + "memory": 55408, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 1, + "messages_persistent": 1, + "messages_ram": 0, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "MyFirstQueue", + "node": "rabbit@localhost", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 7436, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "mySecondVhost" + } +] diff --git a/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json new file mode 100644 index 00000000000000..ed2c3418d59eca --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json @@ -0,0 +1,82 @@ +[ + { + "cluster_state": { + "rabbit@localhost": "running" + }, + "default_queue_type": "undefined", + "description": "Default virtual host", + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "metadata": { + "description": "Default virtual host", + "tags": [] + }, + "name": "/", + "tags": [], + "tracing": false + }, + { + "cluster_state": { + "rabbit@localhost": "running" + }, + "default_queue_type": "classic", + "description": "", + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "metadata": { + "default_queue_type": "classic", + "description": "", + "tags": [] + }, + "name": "myFirstVhost", + "tags": [], + "tracing": false + }, + { + "cluster_state": { + "rabbit@localhost": "running" + }, + "default_queue_type": "classic", + "description": "", + "messages": 1, + "messages_details": { + "rate": 0 + }, + "messages_ready": 1, + "messages_ready_details": { + "rate": 0 + }, + "messages_unacknowledged": 1, + "messages_unacknowledged_details": { + "rate": 0 + }, + "metadata": { + "default_queue_type": "classic", + "description": "", + "tags": [] + }, + "name": "mySecondVhost", + "tags": [], + "tracing": false + } +] diff --git a/src/go/collectors/go.d.plugin/modules/redis/README.md b/src/go/collectors/go.d.plugin/modules/redis/README.md new file mode 120000 index 00000000000000..e4166625791874 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/README.md @@ -0,0 +1 @@ +integrations/redis.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/redis/charts.go b/src/go/collectors/go.d.plugin/modules/redis/charts.go new file mode 100644 index 00000000000000..3305ffbcec9567 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/charts.go @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import "github.com/netdata/go.d.plugin/agent/module" + +const ( + prioConnections = module.Priority + iota + prioClients + + prioPingLatency + prioCommands + prioKeyLookupHitRate + + prioMemory + prioMemoryFragmentationRatio + prioKeyEviction + + prioNet + + prioConnectedReplicas + prioMasterLinkStatus + prioMasterLastIOSinceTime + prioMasterLinkDownSinceTime + + prioPersistenceRDBChanges + prioPersistenceRDBBgSaveNow + prioPersistenceRDBBgSaveHealth + prioPersistenceRDBBgSaveLastSaveSinceTime + prioPersistenceAOFSize + + prioCommandsCalls + prioCommandsUsec + prioCommandsUsecPerSec + + prioKeyExpiration + prioKeys + prioExpiresKeys + + prioUptime +) + +var redisCharts = module.Charts{ + chartConnections.Copy(), + chartClients.Copy(), + + pingLatencyCommands.Copy(), + chartCommands.Copy(), + chartKeyLookupHitRate.Copy(), + + chartMemory.Copy(), + chartMemoryFragmentationRatio.Copy(), + chartKeyEviction.Copy(), + + chartNet.Copy(), + + chartConnectedReplicas.Copy(), + + chartPersistenceRDBChanges.Copy(), + chartPersistenceRDBBgSaveNow.Copy(), + chartPersistenceRDBBgSaveHealth.Copy(), + chartPersistenceRDBLastSaveSinceTime.Copy(), + + chartCommandsCalls.Copy(), + chartCommandsUsec.Copy(), + chartCommandsUsecPerSec.Copy(), + + chartKeyExpiration.Copy(), + chartKeys.Copy(), + chartExpiresKeys.Copy(), + + chartUptime.Copy(), +} + +var ( + chartConnections = module.Chart{ + ID: "connections", + Title: "Accepted and rejected (maxclients limit) connections", + Units: "connections/s", + Fam: "connections", + Ctx: "redis.connections", + Priority: prioConnections, + Dims: module.Dims{ + {ID: "total_connections_received", Name: "accepted", Algo: module.Incremental}, + {ID: "rejected_connections", Name: "rejected", Algo: module.Incremental}, + }, + } + chartClients = module.Chart{ + ID: "clients", + Title: "Clients", + Units: "clients", + Fam: "connections", + Ctx: "redis.clients", + Priority: prioClients, + Dims: module.Dims{ + {ID: "connected_clients", Name: "connected"}, + {ID: "blocked_clients", Name: "blocked"}, + {ID: "tracking_clients", Name: "tracking"}, + {ID: "clients_in_timeout_table", Name: "in_timeout_table"}, + }, + } +) + +var ( + pingLatencyCommands = module.Chart{ + ID: "ping_latency", + Title: "Ping latency", + Units: "seconds", + Fam: "performance", + Ctx: "redis.ping_latency", + Priority: prioPingLatency, + Type: module.Area, + Dims: module.Dims{ + {ID: "ping_latency_min", Name: "min", Div: 1e6}, + {ID: "ping_latency_max", Name: "max", Div: 1e6}, + {ID: "ping_latency_avg", Name: "avg", Div: 1e6}, + }, + } + chartCommands = module.Chart{ + ID: "commands", + Title: "Processed commands", + Units: "commands/s", + Fam: "performance", + Ctx: "redis.commands", + Priority: prioCommands, + Dims: module.Dims{ + {ID: "total_commands_processed", Name: "processed", Algo: module.Incremental}, + }, + } + chartKeyLookupHitRate = module.Chart{ + ID: "key_lookup_hit_rate", + Title: "Keys lookup hit rate", + Units: "percentage", + Fam: "performance", + Ctx: "redis.keyspace_lookup_hit_rate", + Priority: prioKeyLookupHitRate, + Dims: module.Dims{ + {ID: "keyspace_hit_rate", Name: "lookup_hit_rate", Div: precision}, + }, + } +) + +var ( + chartMemory = module.Chart{ + ID: "memory", + Title: "Memory usage", + Units: "bytes", + Fam: "memory", + Ctx: "redis.memory", + Type: module.Area, + Priority: prioMemory, + Dims: module.Dims{ + {ID: "maxmemory", Name: "max"}, + {ID: "used_memory", Name: "used"}, + {ID: "used_memory_rss", Name: "rss"}, + {ID: "used_memory_peak", Name: "peak"}, + {ID: "used_memory_dataset", Name: "dataset"}, + {ID: "used_memory_lua", Name: "lua"}, + {ID: "used_memory_scripts", Name: "scripts"}, + }, + } + chartMemoryFragmentationRatio = module.Chart{ + ID: "mem_fragmentation_ratio", + Title: "Ratio between used_memory_rss and used_memory", + Units: "ratio", + Fam: "memory", + Ctx: "redis.mem_fragmentation_ratio", + Priority: prioMemoryFragmentationRatio, + Dims: module.Dims{ + {ID: "mem_fragmentation_ratio", Name: "mem_fragmentation", Div: precision}, + }, + } + chartKeyEviction = module.Chart{ + ID: "key_eviction_events", + Title: "Evicted keys due to maxmemory limit", + Units: "keys/s", + Fam: "memory", + Ctx: "redis.key_eviction_events", + Priority: prioKeyEviction, + Dims: module.Dims{ + {ID: "evicted_keys", Name: "evicted", Algo: module.Incremental}, + }, + } +) + +var ( + chartNet = module.Chart{ + ID: "net", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "network", + Ctx: "redis.net", + Type: module.Area, + Priority: prioNet, + Dims: module.Dims{ + {ID: "total_net_input_bytes", Name: "received", Mul: 8, Div: 1024, Algo: module.Incremental}, + {ID: "total_net_output_bytes", Name: "sent", Mul: -8, Div: 1024, Algo: module.Incremental}, + }, + } +) + +var ( + chartPersistenceRDBChanges = module.Chart{ + ID: "persistence", + Title: "Operations that produced changes since the last SAVE or BGSAVE", + Units: "operations", + Fam: "persistence", + Ctx: "redis.rdb_changes", + Priority: prioPersistenceRDBChanges, + Dims: module.Dims{ + {ID: "rdb_changes_since_last_save", Name: "changes"}, + }, + } + chartPersistenceRDBBgSaveNow = module.Chart{ + ID: "bgsave_now", + Title: "Duration of the on-going RDB save operation if any", + Units: "seconds", + Fam: "persistence", + Ctx: "redis.bgsave_now", + Priority: prioPersistenceRDBBgSaveNow, + Dims: module.Dims{ + {ID: "rdb_current_bgsave_time_sec", Name: "current_bgsave_time"}, + }, + } + chartPersistenceRDBBgSaveHealth = module.Chart{ + ID: "bgsave_health", + Title: "Status of the last RDB save operation (0: ok, 1: err)", + Units: "status", + Fam: "persistence", + Ctx: "redis.bgsave_health", + Priority: prioPersistenceRDBBgSaveHealth, + Dims: module.Dims{ + {ID: "rdb_last_bgsave_status", Name: "last_bgsave"}, + }, + } + chartPersistenceRDBLastSaveSinceTime = module.Chart{ + ID: "bgsave_last_rdb_save_since_time", + Title: "Time elapsed since the last successful RDB save", + Units: "seconds", + Fam: "persistence", + Ctx: "redis.bgsave_last_rdb_save_since_time", + Priority: prioPersistenceRDBBgSaveLastSaveSinceTime, + Dims: module.Dims{ + {ID: "rdb_last_save_time", Name: "last_bgsave_time"}, + }, + } + + chartPersistenceAOFSize = module.Chart{ + ID: "persistence_aof_size", + Title: "AOF file size", + Units: "bytes", + Fam: "persistence", + Ctx: "redis.aof_file_size", + Priority: prioPersistenceAOFSize, + Dims: module.Dims{ + {ID: "aof_current_size", Name: "current"}, + {ID: "aof_base_size", Name: "base"}, + }, + } +) + +var ( + chartCommandsCalls = module.Chart{ + ID: "commands_calls", + Title: "Calls per command", + Units: "calls/s", + Fam: "commands", + Ctx: "redis.commands_calls", + Type: module.Stacked, + Priority: prioCommandsCalls, + } + chartCommandsUsec = module.Chart{ + ID: "commands_usec", + Title: "Total CPU time consumed by the commands", + Units: "microseconds", + Fam: "commands", + Ctx: "redis.commands_usec", + Type: module.Stacked, + Priority: prioCommandsUsec, + } + chartCommandsUsecPerSec = module.Chart{ + ID: "commands_usec_per_sec", + Title: "Average CPU consumed per command execution", + Units: "microseconds/s", + Fam: "commands", + Ctx: "redis.commands_usec_per_sec", + Priority: prioCommandsUsecPerSec, + } +) + +var ( + chartKeyExpiration = module.Chart{ + ID: "key_expiration_events", + Title: "Expired keys", + Units: "keys/s", + Fam: "keyspace", + Ctx: "redis.key_expiration_events", + Priority: prioKeyExpiration, + Dims: module.Dims{ + {ID: "expired_keys", Name: "expired", Algo: module.Incremental}, + }, + } + chartKeys = module.Chart{ + ID: "keys", + Title: "Keys per database", + Units: "keys", + Fam: "keyspace", + Ctx: "redis.database_keys", + Type: module.Stacked, + Priority: prioKeys, + } + chartExpiresKeys = module.Chart{ + ID: "expires_keys", + Title: "Keys with an expiration per database", + Units: "keys", + Fam: "keyspace", + Ctx: "redis.database_expires_keys", + Type: module.Stacked, + Priority: prioExpiresKeys, + } +) + +var ( + chartConnectedReplicas = module.Chart{ + ID: "connected_replicas", + Title: "Connected replicas", + Units: "replicas", + Fam: "replication", + Ctx: "redis.connected_replicas", + Priority: prioConnectedReplicas, + Dims: module.Dims{ + {ID: "connected_slaves", Name: "connected"}, + }, + } + masterLinkStatusChart = module.Chart{ + ID: "master_last_status", + Title: "Master link status", + Units: "status", + Fam: "replication", + Ctx: "redis.master_link_status", + Priority: prioMasterLinkStatus, + Dims: module.Dims{ + {ID: "master_link_status_up", Name: "up"}, + {ID: "master_link_status_down", Name: "down"}, + }, + } + masterLastIOSinceTimeChart = module.Chart{ + ID: "master_last_io_since_time", + Title: "Time elapsed since the last interaction with master", + Units: "seconds", + Fam: "replication", + Ctx: "redis.master_last_io_since_time", + Priority: prioMasterLastIOSinceTime, + Dims: module.Dims{ + {ID: "master_last_io_seconds_ago", Name: "time"}, + }, + } + masterLinkDownSinceTimeChart = module.Chart{ + ID: "master_link_down_since_stime", + Title: "Time elapsed since the link between master and slave is down", + Units: "seconds", + Fam: "replication", + Ctx: "redis.master_link_down_since_time", + Priority: prioMasterLinkDownSinceTime, + Dims: module.Dims{ + {ID: "master_link_down_since_seconds", Name: "time"}, + }, + } +) + +var ( + chartUptime = module.Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "redis.uptime", + Priority: prioUptime, + Dims: module.Dims{ + {ID: "uptime_in_seconds", Name: "uptime"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/redis/collect.go b/src/go/collectors/go.d.plugin/modules/redis/collect.go new file mode 100644 index 00000000000000..0261646724dd9c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/collect.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "bufio" + "context" + "errors" + "fmt" + "github.com/blang/semver/v4" + "regexp" + "strings" +) + +const precision = 1000 // float values multiplier and dimensions divisor + +func (r *Redis) collect() (map[string]int64, error) { + info, err := r.rdb.Info(context.Background(), "all").Result() + if err != nil { + return nil, err + } + + if r.server == "" { + s, v, err := extractServerVersion(info) + if err != nil { + return nil, fmt.Errorf("can not extract server app and version: %v", err) + } + r.server, r.version = s, v + r.Debugf(`server="%s",version="%s"`, s, v) + } + + if r.server != "redis" { + return nil, fmt.Errorf("unsupported server app, want=redis, got=%s", r.server) + } + + mx := make(map[string]int64) + r.collectInfo(mx, info) + r.collectPingLatency(mx) + + return mx, nil +} + +// redis_version:6.0.9 +var reVersion = regexp.MustCompile(`([a-z]+)_version:(\d+\.\d+\.\d+)`) + +func extractServerVersion(info string) (string, *semver.Version, error) { + var versionLine string + for sc := bufio.NewScanner(strings.NewReader(info)); sc.Scan(); { + line := sc.Text() + if strings.Contains(line, "_version") { + versionLine = strings.TrimSpace(line) + break + } + } + if versionLine == "" { + return "", nil, errors.New("no version property") + } + + match := reVersion.FindStringSubmatch(versionLine) + if match == nil { + return "", nil, fmt.Errorf("can not parse version property '%s'", versionLine) + } + + server, version := match[1], match[2] + ver, err := semver.New(version) + if err != nil { + return "", nil, err + } + + return server, ver, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/collect_info.go b/src/go/collectors/go.d.plugin/modules/redis/collect_info.go new file mode 100644 index 00000000000000..fb5e46f37c164d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/collect_info.go @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "bufio" + "regexp" + "strconv" + "strings" + "time" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + infoSectionServer = "# Server" + infoSectionData = "# Data" + infoSectionClients = "# Clients" + infoSectionStats = "# Stats" + infoSectionCommandstats = "# Commandstats" + infoSectionCPU = "# CPU" + infoSectionRepl = "# Replication" + infoSectionKeyspace = "# Keyspace" +) + +var infoSections = map[string]struct{}{ + infoSectionServer: {}, + infoSectionData: {}, + infoSectionClients: {}, + infoSectionStats: {}, + infoSectionCommandstats: {}, + infoSectionCPU: {}, + infoSectionRepl: {}, + infoSectionKeyspace: {}, +} + +func isInfoSection(line string) bool { _, ok := infoSections[line]; return ok } + +func (r *Redis) collectInfo(mx map[string]int64, info string) { + // https://redis.io/commands/info + // Lines can contain a section name (starting with a # character) or a property. + // All the properties are in the form of field:value terminated by \r\n. + + var curSection string + sc := bufio.NewScanner(strings.NewReader(info)) + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if len(line) == 0 { + curSection = "" + continue + } + if strings.HasPrefix(line, "#") { + if isInfoSection(line) { + curSection = line + } + continue + } + + field, value, ok := parseProperty(line) + if !ok { + continue + } + + switch { + case curSection == infoSectionCommandstats: + r.collectInfoCommandstatsProperty(mx, field, value) + case curSection == infoSectionKeyspace: + r.collectInfoKeyspaceProperty(mx, field, value) + case field == "rdb_last_bgsave_status": + collectNumericValue(mx, field, convertBgSaveStatus(value)) + case field == "rdb_current_bgsave_time_sec" && value == "-1": + // TODO: https://github.com/netdata/dashboard/issues/198 + // "-1" means there is no on-going bgsave operation; + // netdata has 'Convert seconds to time' feature (enabled by default), + // looks like it doesn't respect negative values and does abs(). + // "-1" => "00:00:01". + collectNumericValue(mx, field, "0") + case field == "rdb_last_save_time": + v, _ := strconv.ParseInt(value, 10, 64) + mx[field] = int64(time.Since(time.Unix(v, 0)).Seconds()) + case field == "aof_enabled" && value == "1": + r.addAOFChartsOnce.Do(r.addAOFCharts) + case field == "master_link_status": + mx["master_link_status_up"] = boolToInt(value == "up") + mx["master_link_status_down"] = boolToInt(value == "down") + default: + collectNumericValue(mx, field, value) + } + } + + if has(mx, "keyspace_hits", "keyspace_misses") { + mx["keyspace_hit_rate"] = int64(calcKeyspaceHitRate(mx) * precision) + } + if has(mx, "master_last_io_seconds_ago") { + r.addReplSlaveChartsOnce.Do(r.addReplSlaveCharts) + if !has(mx, "master_link_down_since_seconds") { + mx["master_link_down_since_seconds"] = 0 + } + } +} + +var reKeyspaceValue = regexp.MustCompile(`^keys=(\d+),expires=(\d+)`) + +func (r *Redis) collectInfoKeyspaceProperty(ms map[string]int64, field, value string) { + match := reKeyspaceValue.FindStringSubmatch(value) + if match == nil { + return + } + + keys, expires := match[1], match[2] + collectNumericValue(ms, field+"_keys", keys) + collectNumericValue(ms, field+"_expires_keys", expires) + + if !r.collectedDbs[field] { + r.collectedDbs[field] = true + r.addDbToKeyspaceCharts(field) + } +} + +var reCommandstatsValue = regexp.MustCompile(`^calls=(\d+),usec=(\d+),usec_per_call=([\d.]+)`) + +func (r *Redis) collectInfoCommandstatsProperty(ms map[string]int64, field, value string) { + if !strings.HasPrefix(field, "cmdstat_") { + return + } + cmd := field[len("cmdstat_"):] + + match := reCommandstatsValue.FindStringSubmatch(value) + if match == nil { + return + } + + calls, usec, usecPerCall := match[1], match[2], match[3] + collectNumericValue(ms, "cmd_"+cmd+"_calls", calls) + collectNumericValue(ms, "cmd_"+cmd+"_usec", usec) + collectNumericValue(ms, "cmd_"+cmd+"_usec_per_call", usecPerCall) + + if !r.collectedCommands[cmd] { + r.collectedCommands[cmd] = true + r.addCmdToCommandsCharts(cmd) + } +} + +func collectNumericValue(ms map[string]int64, field, value string) { + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return + } + if strings.IndexByte(value, '.') == -1 { + ms[field] = int64(v) + } else { + ms[field] = int64(v * precision) + } +} + +func convertBgSaveStatus(status string) string { + // https://github.com/redis/redis/blob/unstable/src/server.c + // "ok" or "err" + if status == "ok" { + return "0" + } + return "1" +} + +func parseProperty(prop string) (field, value string, ok bool) { + i := strings.IndexByte(prop, ':') + if i == -1 { + return "", "", false + } + field, value = prop[:i], prop[i+1:] + return field, value, field != "" && value != "" +} + +func calcKeyspaceHitRate(ms map[string]int64) float64 { + hits := ms["keyspace_hits"] + misses := ms["keyspace_misses"] + if hits+misses == 0 { + return 0 + } + return float64(hits) * 100 / float64(hits+misses) +} + +func (r *Redis) addCmdToCommandsCharts(cmd string) { + r.addDimToChart(chartCommandsCalls.ID, &module.Dim{ + ID: "cmd_" + cmd + "_calls", + Name: strings.ToUpper(cmd), + Algo: module.Incremental, + }) + r.addDimToChart(chartCommandsUsec.ID, &module.Dim{ + ID: "cmd_" + cmd + "_usec", + Name: strings.ToUpper(cmd), + Algo: module.Incremental, + }) + r.addDimToChart(chartCommandsUsecPerSec.ID, &module.Dim{ + ID: "cmd_" + cmd + "_usec_per_call", + Name: strings.ToUpper(cmd), + Div: precision, + }) +} + +func (r *Redis) addDbToKeyspaceCharts(db string) { + r.addDimToChart(chartKeys.ID, &module.Dim{ + ID: db + "_keys", + Name: db, + }) + r.addDimToChart(chartExpiresKeys.ID, &module.Dim{ + ID: db + "_expires_keys", + Name: db, + }) +} + +func (r *Redis) addDimToChart(chartID string, dim *module.Dim) { + chart := r.Charts().Get(chartID) + if chart == nil { + r.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID) + return + } + if err := chart.AddDim(dim); err != nil { + r.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (r *Redis) addAOFCharts() { + err := r.Charts().Add(chartPersistenceAOFSize.Copy()) + if err != nil { + r.Warningf("error on adding '%s' chart", chartPersistenceAOFSize.ID) + } +} + +func (r *Redis) addReplSlaveCharts() { + if err := r.Charts().Add(masterLinkStatusChart.Copy()); err != nil { + r.Warningf("error on adding '%s' chart", masterLinkStatusChart.ID) + } + if err := r.Charts().Add(masterLastIOSinceTimeChart.Copy()); err != nil { + r.Warningf("error on adding '%s' chart", masterLastIOSinceTimeChart.ID) + } + if err := r.Charts().Add(masterLinkDownSinceTimeChart.Copy()); err != nil { + r.Warningf("error on adding '%s' chart", masterLinkDownSinceTimeChart.ID) + } +} + +func has(m map[string]int64, key string, keys ...string) bool { + switch _, ok := m[key]; len(keys) { + case 0: + return ok + default: + return ok && has(m, keys[0], keys[1:]...) + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/collect_ping_latency.go b/src/go/collectors/go.d.plugin/modules/redis/collect_ping_latency.go new file mode 100644 index 00000000000000..063673c2c2679e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/collect_ping_latency.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "context" + "time" +) + +func (r *Redis) collectPingLatency(mx map[string]int64) { + r.pingSummary.Reset() + + for i := 0; i < r.PingSamples; i++ { + now := time.Now() + _, err := r.rdb.Ping(context.Background()).Result() + elapsed := time.Since(now) + + if err != nil { + r.Debug(err) + continue + } + + r.pingSummary.Observe(float64(elapsed.Microseconds())) + } + + r.pingSummary.WriteTo(mx, "ping_latency", 1, 1) +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/config_schema.json b/src/go/collectors/go.d.plugin/modules/redis/config_schema.json new file mode 100644 index 00000000000000..ed25da9de72418 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/config_schema.json @@ -0,0 +1,44 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/redis job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "ping_samples": { + "type": "integer" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/init.go b/src/go/collectors/go.d.plugin/modules/redis/init.go new file mode 100644 index 00000000000000..ffed274c3e1c20 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/init.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/go-redis/redis/v8" +) + +func (r *Redis) validateConfig() error { + if r.Address == "" { + return errors.New("'address' not set") + } + return nil +} + +func (r *Redis) initRedisClient() (*redis.Client, error) { + opts, err := redis.ParseURL(r.Address) + if err != nil { + return nil, err + } + + tlsConfig, err := tlscfg.NewTLSConfig(r.TLSConfig) + if err != nil { + return nil, err + } + + if opts.TLSConfig != nil && tlsConfig != nil { + tlsConfig.ServerName = opts.TLSConfig.ServerName + } + + if opts.Username == "" && r.Username != "" { + opts.Username = r.Username + } + if opts.Password == "" && r.Password != "" { + opts.Password = r.Password + } + + opts.PoolSize = 1 + opts.TLSConfig = tlsConfig + opts.DialTimeout = r.Timeout.Duration + opts.ReadTimeout = r.Timeout.Duration + opts.WriteTimeout = r.Timeout.Duration + + return redis.NewClient(opts), nil +} + +func (r *Redis) initCharts() (*module.Charts, error) { + return redisCharts.Copy(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/integrations/redis.md b/src/go/collectors/go.d.plugin/modules/redis/integrations/redis.md new file mode 100644 index 00000000000000..e7cc180d8228ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/integrations/redis.md @@ -0,0 +1,252 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/redis/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/redis/metadata.yaml" +sidebar_label: "Redis" +learn_status: "Published" +learn_rel_path: "Data Collection/Databases" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Redis + + +<img src="https://netdata.cloud/img/redis.svg" width="150"/> + + +Plugin: go.d.plugin +Module: redis + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more. + + +It connects to the Redis instance via a TCP or UNIX socket and executes the following commands: + +- [INFO ALL](https://redis.io/commands/info) +- [PING](https://redis.io/commands/ping/) + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets: + +- 127.0.0.1:6379 +- /tmp/redis.sock +- /var/run/redis/redis.sock +- /var/lib/redis/redis.sock + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Redis instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| redis.connections | accepted, rejected | connections/s | +| redis.clients | connected, blocked, tracking, in_timeout_table | clients | +| redis.ping_latency | min, max, avg | seconds | +| redis.commands | processes | commands/s | +| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage | +| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes | +| redis.mem_fragmentation_ratio | mem_fragmentation | ratio | +| redis.key_eviction_events | evicted | keys/s | +| redis.net | received, sent | kilobits/s | +| redis.rdb_changes | changes | operations | +| redis.bgsave_now | current_bgsave_time | seconds | +| redis.bgsave_health | last_bgsave | status | +| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds | +| redis.aof_file_size | current, base | bytes | +| redis.commands_calls | a dimension per command | calls | +| redis.commands_usec | a dimension per command | microseconds | +| redis.commands_usec_per_sec | a dimension per command | microseconds/s | +| redis.key_expiration_events | expired | keys/s | +| redis.database_keys | a dimension per database | keys | +| redis.database_expires_keys | a dimension per database | keys | +| redis.connected_replicas | connected | replicas | +| redis.master_link_status | up, down | status | +| redis.master_last_io_since_time | time | seconds | +| redis.master_link_down_since_time | time | seconds | +| redis.uptime | uptime | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute | +| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation | +| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) | +| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/redis.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/redis.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Redis server address. | redis://@localhost:6379 | yes | +| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no | +| username | Username used for authentication. | | no | +| password | Password used for authentication. | | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certificate authority that client use when verifying server certificates. | | no | +| tls_cert | Client tls certificate. | | no | +| tls_key | Client tls key. | | no | + +</details> + +#### Examples + +##### TCP socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://@127.0.0.1:6379' + +``` +</details> + +##### Unix socket + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'unix://@/tmp/redis.sock' + +``` +</details> + +##### TCP socket with password + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://:password@127.0.0.1:6379' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 'redis://:password@127.0.0.1:6379' + + - name: remote + address: 'redis://user:password@203.0.113.0:6379' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m redis + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/redis/metadata.yaml b/src/go/collectors/go.d.plugin/modules/redis/metadata.yaml new file mode 100644 index 00000000000000..2d94017d6c88fe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/metadata.yaml @@ -0,0 +1,343 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-redis + plugin_name: go.d.plugin + module_name: redis + monitored_instance: + name: Redis + link: https://redis.com/ + categories: + - data-collection.database-servers + icon_filename: redis.svg + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + - plugin_name: cgroups.plugin + module_name: cgroups + alternative_monitored_instances: [] + info_provided_to_referring_integrations: + description: "" + keywords: + - redis + - databases + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more. + method_description: | + It connects to the Redis instance via a TCP or UNIX socket and executes the following commands: + + - [INFO ALL](https://redis.io/commands/info) + - [PING](https://redis.io/commands/ping/) + default_behavior: + auto_detection: + description: | + By default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets: + + - 127.0.0.1:6379 + - /tmp/redis.sock + - /var/run/redis/redis.sock + - /var/lib/redis/redis.sock + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/redis.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Redis server address. + default_value: redis://@localhost:6379 + required: true + details: | + There are two connection types: by tcp socket and by unix socket. + + - Tcp connection: `redis://<user>:<password>@<host>:<port>/<db_number>` + - Unix connection: `unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>` + - name: timeout + description: Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. + default_value: 1 + required: false + - name: username + description: Username used for authentication. + default_value: "" + required: false + - name: password + description: Password used for authentication. + default_value: "" + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certificate authority that client use when verifying server certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: "" + required: false + - name: tls_key + description: Client tls key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: TCP socket + description: An example configuration. + config: | + jobs: + - name: local + address: 'redis://@127.0.0.1:6379' + - name: Unix socket + description: An example configuration. + config: | + jobs: + - name: local + address: 'unix://@/tmp/redis.sock' + - name: TCP socket with password + description: An example configuration. + config: | + jobs: + - name: local + address: 'redis://:password@127.0.0.1:6379' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + address: 'redis://:password@127.0.0.1:6379' + + - name: remote + address: 'redis://user:password@203.0.113.0:6379' + troubleshooting: + problems: + list: [] + alerts: + - name: redis_connections_rejected + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf + metric: redis.connections + info: connections rejected because of maxclients limit in the last minute + - name: redis_bgsave_slow + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf + metric: redis.bgsave_now + info: duration of the on-going RDB save operation + - name: redis_bgsave_broken + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf + metric: redis.bgsave_health + info: 'status of the last RDB save operation (0: ok, 1: error)' + - name: redis_master_link_down + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf + metric: redis.master_link_down_since_time + info: time elapsed since the link between master and slave is down + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: redis.connections + description: Accepted and rejected (maxclients limit) connections + unit: connections/s + chart_type: line + dimensions: + - name: accepted + - name: rejected + - name: redis.clients + description: Clients + unit: clients + chart_type: line + dimensions: + - name: connected + - name: blocked + - name: tracking + - name: in_timeout_table + - name: redis.ping_latency + description: Ping latency + unit: seconds + chart_type: area + dimensions: + - name: min + - name: max + - name: avg + - name: redis.commands + description: Processed commands + unit: commands/s + chart_type: line + dimensions: + - name: processes + - name: redis.keyspace_lookup_hit_rate + description: Keys lookup hit rate + unit: percentage + chart_type: line + dimensions: + - name: lookup_hit_rate + - name: redis.memory + description: Memory usage + unit: bytes + chart_type: area + dimensions: + - name: max + - name: used + - name: rss + - name: peak + - name: dataset + - name: lua + - name: scripts + - name: redis.mem_fragmentation_ratio + description: Ratio between used_memory_rss and used_memory + unit: ratio + chart_type: line + dimensions: + - name: mem_fragmentation + - name: redis.key_eviction_events + description: Evicted keys due to maxmemory limit + unit: keys/s + chart_type: line + dimensions: + - name: evicted + - name: redis.net + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: redis.rdb_changes + description: Operations that produced changes since the last SAVE or BGSAVE + unit: operations + chart_type: line + dimensions: + - name: changes + - name: redis.bgsave_now + description: Duration of the on-going RDB save operation if any + unit: seconds + chart_type: line + dimensions: + - name: current_bgsave_time + - name: redis.bgsave_health + description: 'Status of the last RDB save operation (0: ok, 1: err)' + unit: status + chart_type: line + dimensions: + - name: last_bgsave + - name: redis.bgsave_last_rdb_save_since_time + description: Time elapsed since the last successful RDB save + unit: seconds + chart_type: line + dimensions: + - name: last_bgsave_time + - name: redis.aof_file_size + description: AOF file size + unit: bytes + chart_type: line + dimensions: + - name: current + - name: base + - name: redis.commands_calls + description: Calls per command + unit: calls + chart_type: stacked + dimensions: + - name: a dimension per command + - name: redis.commands_usec + description: Total CPU time consumed by the commands + unit: microseconds + chart_type: stacked + dimensions: + - name: a dimension per command + - name: redis.commands_usec_per_sec + description: Average CPU consumed per command execution + unit: microseconds/s + chart_type: stacked + dimensions: + - name: a dimension per command + - name: redis.key_expiration_events + description: Expired keys + unit: keys/s + chart_type: line + dimensions: + - name: expired + - name: redis.database_keys + description: Keys per database + unit: keys + chart_type: line + dimensions: + - name: a dimension per database + - name: redis.database_expires_keys + description: Keys with an expiration per database + unit: keys + chart_type: line + dimensions: + - name: a dimension per database + - name: redis.connected_replicas + description: Connected replicas + unit: replicas + chart_type: line + dimensions: + - name: connected + - name: redis.master_link_status + description: Master link status + unit: status + chart_type: line + dimensions: + - name: up + - name: down + - name: redis.master_last_io_since_time + description: Time elapsed since the last interaction with master + unit: seconds + chart_type: line + dimensions: + - name: time + - name: redis.master_link_down_since_time + description: Time elapsed since the link between master and slave is down + unit: seconds + chart_type: line + dimensions: + - name: time + - name: redis.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime diff --git a/src/go/collectors/go.d.plugin/modules/redis/redis.go b/src/go/collectors/go.d.plugin/modules/redis/redis.go new file mode 100644 index 00000000000000..2117cc2ce8ef27 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/redis.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "context" + _ "embed" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/blang/semver/v4" + "github.com/go-redis/redis/v8" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("redis", module.Creator{ + Create: func() module.Module { return New() }, + JobConfigSchema: configSchema, + }) +} + +func New() *Redis { + return &Redis{ + Config: Config{ + Address: "redis://@localhost:6379", + Timeout: web.Duration{Duration: time.Second}, + PingSamples: 5, + }, + + addAOFChartsOnce: &sync.Once{}, + addReplSlaveChartsOnce: &sync.Once{}, + pingSummary: metrics.NewSummary(), + collectedCommands: make(map[string]bool), + collectedDbs: make(map[string]bool), + } +} + +type Config struct { + Address string `yaml:"address"` + Password string `yaml:"password"` + Username string `yaml:"username"` + Timeout web.Duration `yaml:"timeout"` + PingSamples int `yaml:"ping_samples"` + tlscfg.TLSConfig `yaml:",inline"` +} + +type ( + Redis struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + rdb redisClient + + server string + version *semver.Version + + addAOFChartsOnce *sync.Once + addReplSlaveChartsOnce *sync.Once + + pingSummary metrics.Summary + + collectedCommands map[string]bool + collectedDbs map[string]bool + } + redisClient interface { + Info(ctx context.Context, section ...string) *redis.StringCmd + Ping(context.Context) *redis.StatusCmd + Close() error + } +) + +func (r *Redis) Init() bool { + err := r.validateConfig() + if err != nil { + r.Errorf("config validation: %v", err) + return false + } + + rdb, err := r.initRedisClient() + if err != nil { + r.Errorf("init redis client: %v", err) + return false + } + r.rdb = rdb + + charts, err := r.initCharts() + if err != nil { + r.Errorf("init charts: %v", err) + return false + } + r.charts = charts + + return true +} + +func (r *Redis) Check() bool { + return len(r.Collect()) > 0 +} + +func (r *Redis) Charts() *module.Charts { + return r.charts +} + +func (r *Redis) Collect() map[string]int64 { + ms, err := r.collect() + if err != nil { + r.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (r *Redis) Cleanup() { + if r.rdb == nil { + return + } + err := r.rdb.Close() + if err != nil { + r.Warningf("cleanup: error on closing redis client [%s]: %v", r.Address, err) + } + r.rdb = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/redis_test.go b/src/go/collectors/go.d.plugin/modules/redis/redis_test.go new file mode 100644 index 00000000000000..9ee2f54f063eef --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/redis_test.go @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package redis + +import ( + "context" + "errors" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/go-redis/redis/v8" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + pikaInfoAll, _ = os.ReadFile("testdata/pika/info_all.txt") + v609InfoAll, _ = os.ReadFile("testdata/v6.0.9/info_all.txt") +) + +func Test_Testdata(t *testing.T) { + for name, data := range map[string][]byte{ + "pikaInfoAll": pikaInfoAll, + "v609InfoAll": v609InfoAll, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Redis)(nil), New()) +} + +func TestRedis_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'address'": { + wantFail: true, + config: Config{Address: ""}, + }, + "fails on invalid 'address' format": { + wantFail: true, + config: Config{Address: "127.0.0.1:6379"}, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + Address: "redis://127.0.0.1:6379", + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rdb := New() + rdb.Config = test.config + + if test.wantFail { + assert.False(t, rdb.Init()) + } else { + assert.True(t, rdb.Init()) + } + }) + } +} + +func TestRedis_Check(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Redis + wantFail bool + }{ + "success on valid response v6.0.9": { + prepare: prepareRedisV609, + }, + "fails on error on Info": { + wantFail: true, + prepare: prepareRedisErrorOnInfo, + }, + "fails on response from not Redis instance": { + wantFail: true, + prepare: prepareRedisWithPikaMetrics, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rdb := test.prepare(t) + + if test.wantFail { + assert.False(t, rdb.Check()) + } else { + assert.True(t, rdb.Check()) + } + }) + } +} + +func TestRedis_Charts(t *testing.T) { + rdb := New() + require.True(t, rdb.Init()) + + assert.NotNil(t, rdb.Charts()) +} + +func TestRedis_Cleanup(t *testing.T) { + rdb := New() + assert.NotPanics(t, rdb.Cleanup) + + require.True(t, rdb.Init()) + m := &mockRedisClient{} + rdb.rdb = m + + rdb.Cleanup() + + assert.True(t, m.calledClose) +} + +func TestRedis_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Redis + wantCollected map[string]int64 + }{ + "success on valid response v6.0.9": { + prepare: prepareRedisV609, + wantCollected: map[string]int64{ + "active_defrag_hits": 0, + "active_defrag_key_hits": 0, + "active_defrag_key_misses": 0, + "active_defrag_misses": 0, + "active_defrag_running": 0, + "allocator_active": 1208320, + "allocator_allocated": 903408, + "allocator_frag_bytes": 304912, + "allocator_frag_ratio": 1340, + "allocator_resident": 3723264, + "allocator_rss_bytes": 2514944, + "allocator_rss_ratio": 3080, + "aof_base_size": 116, + "aof_buffer_length": 0, + "aof_current_rewrite_time_sec": -1, + "aof_current_size": 294, + "aof_delayed_fsync": 0, + "aof_enabled": 0, + "aof_last_cow_size": 0, + "aof_last_rewrite_time_sec": -1, + "aof_pending_bio_fsync": 0, + "aof_pending_rewrite": 0, + "aof_rewrite_buffer_length": 0, + "aof_rewrite_in_progress": 0, + "aof_rewrite_scheduled": 0, + "arch_bits": 64, + "blocked_clients": 0, + "client_recent_max_input_buffer": 8, + "client_recent_max_output_buffer": 0, + "clients_in_timeout_table": 0, + "cluster_enabled": 0, + "cmd_command_calls": 2, + "cmd_command_usec": 2182, + "cmd_command_usec_per_call": 1091000, + "cmd_get_calls": 2, + "cmd_get_usec": 29, + "cmd_get_usec_per_call": 14500, + "cmd_hello_calls": 1, + "cmd_hello_usec": 15, + "cmd_hello_usec_per_call": 15000, + "cmd_hmset_calls": 2, + "cmd_hmset_usec": 408, + "cmd_hmset_usec_per_call": 204000, + "cmd_info_calls": 132, + "cmd_info_usec": 37296, + "cmd_info_usec_per_call": 282550, + "cmd_ping_calls": 19, + "cmd_ping_usec": 286, + "cmd_ping_usec_per_call": 15050, + "cmd_set_calls": 3, + "cmd_set_usec": 140, + "cmd_set_usec_per_call": 46670, + "configured_hz": 10, + "connected_clients": 1, + "connected_slaves": 0, + "db0_expires_keys": 0, + "db0_keys": 4, + "evicted_keys": 0, + "expire_cycle_cpu_milliseconds": 28362, + "expired_keys": 0, + "expired_stale_perc": 0, + "expired_time_cap_reached_count": 0, + "hz": 10, + "instantaneous_input_kbps": 0, + "instantaneous_ops_per_sec": 0, + "instantaneous_output_kbps": 0, + "io_threaded_reads_processed": 0, + "io_threaded_writes_processed": 0, + "io_threads_active": 0, + "keyspace_hit_rate": 100000, + "keyspace_hits": 2, + "keyspace_misses": 0, + "latest_fork_usec": 810, + "lazyfree_pending_objects": 0, + "loading": 0, + "lru_clock": 13181377, + "master_repl_offset": 0, + "master_replid2": 0, + "maxmemory": 0, + "mem_aof_buffer": 0, + "mem_clients_normal": 0, + "mem_clients_slaves": 0, + "mem_fragmentation_bytes": 3185848, + "mem_fragmentation_ratio": 4960, + "mem_not_counted_for_evict": 0, + "mem_replication_backlog": 0, + "migrate_cached_sockets": 0, + "module_fork_in_progress": 0, + "module_fork_last_cow_size": 0, + "number_of_cached_scripts": 0, + "ping_latency_avg": 0, + "ping_latency_count": 5, + "ping_latency_max": 0, + "ping_latency_min": 0, + "ping_latency_sum": 0, + "process_id": 1, + "pubsub_channels": 0, + "pubsub_patterns": 0, + "rdb_bgsave_in_progress": 0, + "rdb_changes_since_last_save": 0, + "rdb_current_bgsave_time_sec": 0, + "rdb_last_bgsave_status": 0, + "rdb_last_bgsave_time_sec": 0, + "rdb_last_cow_size": 290816, + "rdb_last_save_time": 56978305, + "redis_git_dirty": 0, + "redis_git_sha1": 0, + "rejected_connections": 0, + "repl_backlog_active": 0, + "repl_backlog_first_byte_offset": 0, + "repl_backlog_histlen": 0, + "repl_backlog_size": 1048576, + "rss_overhead_bytes": 266240, + "rss_overhead_ratio": 1070, + "second_repl_offset": -1, + "slave_expires_tracked_keys": 0, + "sync_full": 0, + "sync_partial_err": 0, + "sync_partial_ok": 0, + "tcp_port": 6379, + "total_commands_processed": 161, + "total_connections_received": 87, + "total_net_input_bytes": 2301, + "total_net_output_bytes": 507187, + "total_reads_processed": 250, + "total_system_memory": 2084032512, + "total_writes_processed": 163, + "tracking_clients": 0, + "tracking_total_items": 0, + "tracking_total_keys": 0, + "tracking_total_prefixes": 0, + "unexpected_error_replies": 0, + "uptime_in_days": 2, + "uptime_in_seconds": 252812, + "used_cpu_sys": 630829, + "used_cpu_sys_children": 20, + "used_cpu_user": 188394, + "used_cpu_user_children": 2, + "used_memory": 867160, + "used_memory_dataset": 63816, + "used_memory_lua": 37888, + "used_memory_overhead": 803344, + "used_memory_peak": 923360, + "used_memory_rss": 3989504, + "used_memory_scripts": 0, + "used_memory_startup": 803152, + }, + }, + "fails on error on Info": { + prepare: prepareRedisErrorOnInfo, + }, + "fails on response from not Redis instance": { + prepare: prepareRedisWithPikaMetrics, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rdb := test.prepare(t) + + ms := rdb.Collect() + + copyTimeRelatedMetrics(ms, test.wantCollected) + + assert.Equal(t, test.wantCollected, ms) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, rdb, ms) + ensureCollectedCommandsAddedToCharts(t, rdb) + ensureCollectedDbsAddedToCharts(t, rdb) + } + }) + } +} + +func prepareRedisV609(t *testing.T) *Redis { + rdb := New() + require.True(t, rdb.Init()) + rdb.rdb = &mockRedisClient{ + result: v609InfoAll, + } + return rdb +} + +func prepareRedisErrorOnInfo(t *testing.T) *Redis { + rdb := New() + require.True(t, rdb.Init()) + rdb.rdb = &mockRedisClient{ + errOnInfo: true, + } + return rdb +} + +func prepareRedisWithPikaMetrics(t *testing.T) *Redis { + rdb := New() + require.True(t, rdb.Init()) + rdb.rdb = &mockRedisClient{ + result: pikaInfoAll, + } + return rdb +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rdb *Redis, ms map[string]int64) { + for _, chart := range *rdb.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func ensureCollectedCommandsAddedToCharts(t *testing.T, rdb *Redis) { + for _, id := range []string{ + chartCommandsCalls.ID, + chartCommandsUsec.ID, + chartCommandsUsecPerSec.ID, + } { + chart := rdb.Charts().Get(id) + require.NotNilf(t, chart, "'%s' chart is not in charts", id) + assert.Lenf(t, chart.Dims, len(rdb.collectedCommands), + "'%s' chart unexpected number of dimensions", id) + } +} + +func ensureCollectedDbsAddedToCharts(t *testing.T, rdb *Redis) { + for _, id := range []string{ + chartKeys.ID, + chartExpiresKeys.ID, + } { + chart := rdb.Charts().Get(id) + require.NotNilf(t, chart, "'%s' chart is not in charts", id) + assert.Lenf(t, chart.Dims, len(rdb.collectedDbs), + "'%s' chart unexpected number of dimensions", id) + } +} + +func copyTimeRelatedMetrics(dst, src map[string]int64) { + for k, v := range src { + switch { + case k == "rdb_last_save_time", + strings.HasPrefix(k, "ping_latency"): + + if _, ok := dst[k]; ok { + dst[k] = v + } + } + } +} + +type mockRedisClient struct { + errOnInfo bool + result []byte + calledClose bool +} + +func (m *mockRedisClient) Info(_ context.Context, _ ...string) (cmd *redis.StringCmd) { + if m.errOnInfo { + cmd = redis.NewStringResult("", errors.New("error on Info")) + } else { + cmd = redis.NewStringResult(string(m.result), nil) + } + return cmd +} + +func (m *mockRedisClient) Ping(_ context.Context) (cmd *redis.StatusCmd) { + return redis.NewStatusResult("PONG", nil) +} + +func (m *mockRedisClient) Close() error { + m.calledClose = true + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/redis/testdata/pika/info_all.txt b/src/go/collectors/go.d.plugin/modules/redis/testdata/pika/info_all.txt new file mode 100644 index 00000000000000..a2bebf720a833a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/testdata/pika/info_all.txt @@ -0,0 +1,67 @@ +$1315 +# Server +pika_version:3.4.0 +pika_git_sha:bd30511bf82038c2c6531b3d84872c9825fe836a +pika_build_compile_date: Dec 1 2020 +os:Linux 5.4.39-linuxkit x86_64 +arch_bits:64 +process_id:1 +tcp_port:9221 +thread_num:1 +sync_thread_num:6 +uptime_in_seconds:72089 +uptime_in_days:2 +config_file:/pika/conf/pika.conf +server_id:1 + +# Data +db_size:473558 +db_size_human:0M +log_size:4272095 +log_size_human:4M +compression:snappy +used_memory:8430 +used_memory_human:0M +db_memtable_usage:8304 +db_tablereader_usage:126 +db_fatal:0 +db_fatal_msg:NULL + +# Clients +connected_clients:1 + +# Stats +total_connections_received:14 +instantaneous_ops_per_sec:0 +total_commands_processed:14 +is_bgsaving:No +is_scaning_keyspace:No +is_compact:No +compact_cron: +compact_interval: + +# Command_Exec_Count +INFO:9 +GET:2 +SET:1 +HGETALL:1 +HMSET:1 + +# CPU +used_cpu_sys:3638.63 +used_cpu_user:494.58 +used_cpu_sys_children:0.04 +used_cpu_user_children:0.02 + +# Replication(MASTER) +role:master +connected_slaves:0 +db0 binlog_offset=0 440,safety_purge=none + +# Keyspace +# Time:1970-01-01 08:00:00 +db0 Strings_keys=0, expires=0, invalid_keys=0 +db0 Hashes_keys=0, expires=0, invalid_keys=0 +db0 Lists_keys=0, expires=0, invalid_keys=0 +db0 Zsets_keys=0, expires=0, invalid_keys=0 +db0 Sets_keys=0, expires=0, invalid_keys=0 diff --git a/src/go/collectors/go.d.plugin/modules/redis/testdata/v6.0.9/info_all.txt b/src/go/collectors/go.d.plugin/modules/redis/testdata/v6.0.9/info_all.txt new file mode 100644 index 00000000000000..9f161898280351 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/redis/testdata/v6.0.9/info_all.txt @@ -0,0 +1,172 @@ +$4050 +# Server +redis_version:6.0.9 +redis_git_sha1:00000000 +redis_git_dirty:0 +redis_build_id:12c354e6793cb936 +redis_mode:standalone +os:Linux 5.4.39-linuxkit x86_64 +arch_bits:64 +multiplexing_api:epoll +atomicvar_api:atomic-builtin +gcc_version:8.3.0 +process_id:1 +run_id:5d97fd948bbf6cb68458685fc747f9f9019c3fc4 +tcp_port:6379 +uptime_in_seconds:252812 +uptime_in_days:2 +hz:10 +configured_hz:10 +lru_clock:13181377 +executable:/data/redis-server +config_file: +io_threads_active:0 + +# Clients +connected_clients:1 +client_recent_max_input_buffer:8 +client_recent_max_output_buffer:0 +blocked_clients:0 +tracking_clients:0 +clients_in_timeout_table:0 + +# Memory +used_memory:867160 +used_memory_human:846.84K +used_memory_rss:3989504 +used_memory_rss_human:3.80M +used_memory_peak:923360 +used_memory_peak_human:901.72K +used_memory_peak_perc:93.91% +used_memory_overhead:803344 +used_memory_startup:803152 +used_memory_dataset:63816 +used_memory_dataset_perc:99.70% +allocator_allocated:903408 +allocator_active:1208320 +allocator_resident:3723264 +total_system_memory:2084032512 +total_system_memory_human:1.94G +used_memory_lua:37888 +used_memory_lua_human:37.00K +used_memory_scripts:0 +used_memory_scripts_human:0B +number_of_cached_scripts:0 +maxmemory:0 +maxmemory_human:0B +maxmemory_policy:noeviction +allocator_frag_ratio:1.34 +allocator_frag_bytes:304912 +allocator_rss_ratio:3.08 +allocator_rss_bytes:2514944 +rss_overhead_ratio:1.07 +rss_overhead_bytes:266240 +mem_fragmentation_ratio:4.96 +mem_fragmentation_bytes:3185848 +mem_not_counted_for_evict:0 +mem_replication_backlog:0 +mem_clients_slaves:0 +mem_clients_normal:0 +mem_aof_buffer:0 +mem_allocator:jemalloc-5.1.0 +active_defrag_running:0 +lazyfree_pending_objects:0 + +# Persistence +loading:0 +rdb_changes_since_last_save:0 +rdb_bgsave_in_progress:0 +rdb_last_save_time:1606951667 +rdb_last_bgsave_status:ok +rdb_last_bgsave_time_sec:0 +rdb_current_bgsave_time_sec:-1 +rdb_last_cow_size:290816 +aof_enabled:0 +aof_rewrite_in_progress:0 +aof_rewrite_scheduled:0 +aof_last_rewrite_time_sec:-1 +aof_current_rewrite_time_sec:-1 +aof_last_bgrewrite_status:ok +aof_last_write_status:ok +aof_last_cow_size:0 +module_fork_in_progress:0 +module_fork_last_cow_size:0 +aof_current_size:294 +aof_base_size:116 +aof_pending_rewrite:0 +aof_buffer_length:0 +aof_rewrite_buffer_length:0 +aof_pending_bio_fsync:0 +aof_delayed_fsync:0 + +# Stats +total_connections_received:87 +total_commands_processed:161 +instantaneous_ops_per_sec:0 +total_net_input_bytes:2301 +total_net_output_bytes:507187 +instantaneous_input_kbps:0.00 +instantaneous_output_kbps:0.00 +rejected_connections:0 +sync_full:0 +sync_partial_ok:0 +sync_partial_err:0 +expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +expire_cycle_cpu_milliseconds:28362 +evicted_keys:0 +keyspace_hits:2 +keyspace_misses:0 +pubsub_channels:0 +pubsub_patterns:0 +latest_fork_usec:810 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 +tracking_total_keys:0 +tracking_total_items:0 +tracking_total_prefixes:0 +unexpected_error_replies:0 +total_reads_processed:250 +total_writes_processed:163 +io_threaded_reads_processed:0 +io_threaded_writes_processed:0 + +# Replication +role:master +connected_slaves:0 +master_replid:3f0ad529c9c59a17834bde8ae85f09f77609ecb1 +master_replid2:0000000000000000000000000000000000000000 +master_repl_offset:0 +second_repl_offset:-1 +repl_backlog_active:0 +repl_backlog_size:1048576 +repl_backlog_first_byte_offset:0 +repl_backlog_histlen:0 + +# CPU +used_cpu_sys:630.829091 +used_cpu_user:188.394908 +used_cpu_sys_children:0.020626 +used_cpu_user_children:0.002731 + +# Modules + +# Commandstats +cmdstat_set:calls=3,usec=140,usec_per_call=46.67 +cmdstat_command:calls=2,usec=2182,usec_per_call=1091.00 +cmdstat_get:calls=2,usec=29,usec_per_call=14.50 +cmdstat_hmset:calls=2,usec=408,usec_per_call=204.00 +cmdstat_hello:calls=1,usec=15,usec_per_call=15.00 +cmdstat_ping:calls=19,usec=286,usec_per_call=15.05 +cmdstat_info:calls=132,usec=37296,usec_per_call=282.55 + +# Cluster +cluster_enabled:0 + +# Keyspace +db0:keys=4,expires=0,avg_ttl=0 diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/README.md b/src/go/collectors/go.d.plugin/modules/scaleio/README.md new file mode 120000 index 00000000000000..1836d28053a5bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/README.md @@ -0,0 +1 @@ +integrations/dell_emc_scaleio.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/charts.go b/src/go/collectors/go.d.plugin/modules/scaleio/charts.go new file mode 100644 index 00000000000000..c361c3ef829144 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/charts.go @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/modules/scaleio/client" + + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + // Charts is an alias for module.Charts. + Charts = module.Charts + // Dims is an alias for module.Dims. + Dims = module.Dims + // Vars is an alias for module.Vars. + Vars = module.Vars +) + +var ( + prioStoragePool = module.Priority + len(systemCharts) + 10 + prioSdc = prioStoragePool + len(storagePoolCharts) + 10 +) + +var systemCharts = Charts{ + // Capacity + { + ID: "system_capacity_total", + Title: "Total Capacity", + Units: "KiB", + Fam: "capacity", + Ctx: "scaleio.system_capacity_total", + Dims: Dims{ + {ID: "system_capacity_max_capacity", Name: "total"}, + }, + }, + { + ID: "system_capacity_in_use", + Title: "Capacity In Use", + Units: "KiB", + Fam: "capacity", + Ctx: "scaleio.system_capacity_in_use", + Dims: Dims{ + {ID: "system_capacity_in_use", Name: "in_use"}, + }, + }, + { + ID: "system_capacity_usage", + Title: "Capacity Usage", + Units: "KiB", + Fam: "capacity", + Type: module.Stacked, + Ctx: "scaleio.system_capacity_usage", + Dims: Dims{ + {ID: "system_capacity_thick_in_use", Name: "thick"}, + {ID: "system_capacity_decreased", Name: "decreased"}, + {ID: "system_capacity_thin_in_use", Name: "thin"}, + {ID: "system_capacity_snapshot", Name: "snapshot"}, + {ID: "system_capacity_spare", Name: "spare"}, + {ID: "system_capacity_unused", Name: "unused"}, + }, + }, + { + ID: "system_capacity_available_volume_allocation", + Title: "Available For Volume Allocation", + Units: "KiB", + Fam: "capacity", + Ctx: "scaleio.system_capacity_available_volume_allocation", + Dims: Dims{ + {ID: "system_capacity_available_for_volume_allocation", Name: "available"}, + }, + }, + { + ID: "system_capacity_health_state", + Title: "Capacity Health State", + Units: "KiB", + Fam: "health", + Type: module.Stacked, + Ctx: "scaleio.system_capacity_health_state", + Dims: Dims{ + {ID: "system_capacity_protected", Name: "protected"}, + {ID: "system_capacity_degraded", Name: "degraded"}, + {ID: "system_capacity_in_maintenance", Name: "in_maintenance"}, + {ID: "system_capacity_failed", Name: "failed"}, + {ID: "system_capacity_unreachable_unused", Name: "unavailable"}, + }, + }, + // I/O Workload BW + { + ID: "system_workload_primary_bandwidth_total", + Title: "Primary Backend Bandwidth Total (Read and Write)", + Units: "KiB/s", + Fam: "workload", + Ctx: "scaleio.system_workload_primary_bandwidth_total", + Dims: Dims{ + {ID: "system_backend_primary_bandwidth_read_write", Name: "total", Div: 1000}, + }, + }, + { + ID: "system_workload_primary_bandwidth", + Title: "Primary Backend Bandwidth", + Units: "KiB/s", + Fam: "workload", + Ctx: "scaleio.system_workload_primary_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: "system_backend_primary_bandwidth_read", Name: "read", Div: 1000}, + {ID: "system_backend_primary_bandwidth_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + // I/O Workload IOPS + { + ID: "system_workload_primary_iops_total", + Title: "Primary Backend IOPS Total (Read and Write)", + Units: "iops/s", + Fam: "workload", + Ctx: "scaleio.system_workload_primary_iops_total", + Dims: Dims{ + {ID: "system_backend_primary_iops_read_write", Name: "total", Div: 1000}, + }, + }, + { + ID: "system_workload_primary_iops", + Title: "Primary Backend IOPS", + Units: "iops/s", + Fam: "workload", + Ctx: "scaleio.system_workload_primary_iops", + Type: module.Area, + Dims: Dims{ + {ID: "system_backend_primary_iops_read", Name: "read", Div: 1000}, + {ID: "system_backend_primary_iops_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "system_workload_primary_io_size_total", + Title: "Primary Backend I/O Size Total (Read and Write)", + Units: "KiB", + Fam: "workload", + Ctx: "scaleio.system_workload_primary_io_size_total", + Dims: Dims{ + {ID: "system_backend_primary_io_size_read_write", Name: "io_size", Div: 1000}, + }, + }, + // Rebalance + { + ID: "system_rebalance", + Title: "Rebalance", + Units: "KiB/s", + Fam: "rebalance", + Type: module.Area, + Ctx: "scaleio.system_rebalance", + Dims: Dims{ + {ID: "system_rebalance_bandwidth_read", Name: "read", Div: 1000}, + {ID: "system_rebalance_bandwidth_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "system_rebalance_left", + Title: "Rebalance Pending Capacity", + Units: "KiB", + Fam: "rebalance", + Ctx: "scaleio.system_rebalance_left", + Dims: Dims{ + {ID: "system_rebalance_pending_capacity_in_Kb", Name: "left"}, + }, + }, + { + ID: "system_rebalance_time_until_finish", + Title: "Rebalance Approximate Time Until Finish", + Units: "seconds", + Fam: "rebalance", + Ctx: "scaleio.system_rebalance_time_until_finish", + Dims: Dims{ + {ID: "system_rebalance_time_until_finish", Name: "time"}, + }, + }, + // Rebuild + { + ID: "system_rebuild", + Title: "Rebuild Bandwidth Total (Forward, Backward and Normal)", + Units: "KiB/s", + Fam: "rebuild", + Ctx: "scaleio.system_rebuild", + Type: module.Area, + Dims: Dims{ + {ID: "system_rebuild_total_bandwidth_read", Name: "read", Div: 1000}, + {ID: "system_rebuild_total_bandwidth_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "system_rebuild_left", + Title: "Rebuild Pending Capacity Total (Forward, Backward and Normal)", + Units: "KiB", + Fam: "rebuild", + Ctx: "scaleio.system_rebuild_left", + Dims: Dims{ + {ID: "system_rebuild_total_pending_capacity_in_Kb", Name: "left"}, + }, + }, + // Components + { + ID: "system_defined_components", + Title: "Components", + Units: "components", + Fam: "components", + Ctx: "scaleio.system_defined_components", + Dims: Dims{ + {ID: "system_num_of_devices", Name: "devices"}, + {ID: "system_num_of_fault_sets", Name: "fault_sets"}, + {ID: "system_num_of_protection_domains", Name: "protection_domains"}, + {ID: "system_num_of_rfcache_devices", Name: "rfcache_devices"}, + {ID: "system_num_of_sdc", Name: "sdc"}, + {ID: "system_num_of_sds", Name: "sds"}, + {ID: "system_num_of_snapshots", Name: "snapshots"}, + {ID: "system_num_of_storage_pools", Name: "storage_pools"}, + {ID: "system_num_of_volumes", Name: "volumes"}, + {ID: "system_num_of_vtrees", Name: "vtrees"}, + }, + }, + { + ID: "system_components_volumes_by_type", + Title: "Volumes By Type", + Units: "volumes", + Fam: "components", + Ctx: "scaleio.system_components_volumes_by_type", + Type: module.Stacked, + Dims: Dims{ + {ID: "system_num_of_thick_base_volumes", Name: "thick"}, + {ID: "system_num_of_thin_base_volumes", Name: "thin"}, + }, + }, + { + ID: "system_components_volumes_by_mapping", + Title: "Volumes By Mapping", + Units: "volumes", + Fam: "components", + Ctx: "scaleio.system_components_volumes_by_mapping", + Type: module.Stacked, + Dims: Dims{ + {ID: "system_num_of_mapped_volumes", Name: "mapped"}, + {ID: "system_num_of_unmapped_volumes", Name: "unmapped"}, + }, + }, +} + +var storagePoolCharts = Charts{ + { + ID: "storage_pool_%s_capacity_total", + Title: "Total Capacity", + Units: "KiB", + Fam: "pool %s", + Ctx: "scaleio.storage_pool_capacity_total", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_max_capacity", Name: "total"}, + }, + }, + { + ID: "storage_pool_%s_capacity_in_use", + Title: "Capacity In Use", + Units: "KiB", + Fam: "pool %s", + Ctx: "scaleio.storage_pool_capacity_in_use", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_in_use", Name: "in_use"}, + }, + }, + { + ID: "storage_pool_%s_capacity_usage", + Title: "Capacity Usage", + Units: "KiB", + Fam: "pool %s", + Type: module.Stacked, + Ctx: "scaleio.storage_pool_capacity_usage", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_thick_in_use", Name: "thick"}, + {ID: "storage_pool_%s_capacity_decreased", Name: "decreased"}, + {ID: "storage_pool_%s_capacity_thin_in_use", Name: "thin"}, + {ID: "storage_pool_%s_capacity_snapshot", Name: "snapshot"}, + {ID: "storage_pool_%s_capacity_spare", Name: "spare"}, + {ID: "storage_pool_%s_capacity_unused", Name: "unused"}, + }, + }, + { + ID: "storage_pool_%s_capacity_utilization", + Title: "Capacity Utilization", + Units: "percentage", + Fam: "pool %s", + Ctx: "scaleio.storage_pool_capacity_utilization", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_utilization", Name: "used", Div: 100}, + }, + Vars: Vars{ + {ID: "storage_pool_%s_capacity_alert_high_threshold"}, + {ID: "storage_pool_%s_capacity_alert_critical_threshold"}, + }, + }, + { + ID: "storage_pool_%s_capacity_available_volume_allocation", + Title: "Available For Volume Allocation", + Units: "KiB", + Fam: "pool %s", + Ctx: "scaleio.storage_pool_capacity_available_volume_allocation", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_available_for_volume_allocation", Name: "available"}, + }, + }, + { + ID: "storage_pool_%s_capacity_health_state", + Title: "Capacity Health State", + Units: "KiB", + Fam: "pool %s", + Type: module.Stacked, + Ctx: "scaleio.storage_pool_capacity_health_state", + Dims: Dims{ + {ID: "storage_pool_%s_capacity_protected", Name: "protected"}, + {ID: "storage_pool_%s_capacity_degraded", Name: "degraded"}, + {ID: "storage_pool_%s_capacity_in_maintenance", Name: "in_maintenance"}, + {ID: "storage_pool_%s_capacity_failed", Name: "failed"}, + {ID: "storage_pool_%s_capacity_unreachable_unused", Name: "unavailable"}, + }, + }, + { + ID: "storage_pool_%s_components", + Title: "Components", + Units: "components", + Fam: "pool %s", + Ctx: "scaleio.storage_pool_components", + Dims: Dims{ + {ID: "storage_pool_%s_num_of_devices", Name: "devices"}, + {ID: "storage_pool_%s_num_of_snapshots", Name: "snapshots"}, + {ID: "storage_pool_%s_num_of_volumes", Name: "volumes"}, + {ID: "storage_pool_%s_num_of_vtrees", Name: "vtrees"}, + }, + }, +} + +func newStoragePoolCharts(pool client.StoragePool) *Charts { + charts := storagePoolCharts.Copy() + for i, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, pool.ID) + chart.Fam = fmt.Sprintf(chart.Fam, pool.Name) + chart.Priority = prioStoragePool + i + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, pool.ID) + } + for _, v := range chart.Vars { + v.ID = fmt.Sprintf(v.ID, pool.ID) + } + } + return charts +} + +var sdcCharts = Charts{ + { + ID: "sdc_%s_mdm_connection_state", + Title: "MDM Connection State", + Units: "boolean", + Fam: "sdc %s", + Ctx: "scaleio.sdc_mdm_connection_state", + Dims: Dims{ + {ID: "sdc_%s_mdm_connection_state", Name: "connected"}, + }, + }, + { + ID: "sdc_%s_bandwidth", + Title: "Bandwidth", + Units: "KiB/s", + Fam: "sdc %s", + Ctx: "scaleio.sdc_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: "sdc_%s_bandwidth_read", Name: "read", Div: 1000}, + {ID: "sdc_%s_bandwidth_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "sdc_%s_iops", + Title: "IOPS", + Units: "iops/s", + Fam: "sdc %s", + Ctx: "scaleio.sdc_iops", + Type: module.Area, + Dims: Dims{ + {ID: "sdc_%s_iops_read", Name: "read", Div: 1000}, + {ID: "sdc_%s_iops_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "sdc_%s_io_size", + Title: "I/O Size", + Units: "KiB", + Fam: "sdc %s", + Ctx: "scaleio.sdc_io_size", + Type: module.Area, + Dims: Dims{ + {ID: "sdc_%s_io_size_read", Name: "read", Div: 1000}, + {ID: "sdc_%s_io_size_write", Name: "write", Mul: -1, Div: 1000}, + }, + }, + { + ID: "sdc_%s_num_of_mapped_volumed", + Title: "Mapped Volumes", + Units: "volumes", + Fam: "sdc %s", + Ctx: "scaleio.sdc_num_of_mapped_volumed", + Dims: Dims{ + {ID: "sdc_%s_num_of_mapped_volumes", Name: "mapped"}, + }, + }, +} + +func newSdcCharts(sdc client.Sdc) *Charts { + charts := sdcCharts.Copy() + for i, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, sdc.ID) + chart.Fam = fmt.Sprintf(chart.Fam, sdc.SdcIp) + chart.Priority = prioSdc + i + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, sdc.ID) + } + } + return charts +} + +// TODO: remove stale charts? +func (s *ScaleIO) updateCharts() { + s.updateStoragePoolCharts() + s.updateSdcCharts() +} + +func (s *ScaleIO) updateStoragePoolCharts() { + for _, pool := range s.discovered.pool { + if s.charted[pool.ID] { + continue + } + s.charted[pool.ID] = true + s.addStoragePoolCharts(pool) + } +} + +func (s *ScaleIO) updateSdcCharts() { + for _, sdc := range s.discovered.sdc { + if s.charted[sdc.ID] { + continue + } + s.charted[sdc.ID] = true + s.addSdcCharts(sdc) + } +} + +func (s *ScaleIO) addStoragePoolCharts(pool client.StoragePool) { + charts := newStoragePoolCharts(pool) + if err := s.Charts().Add(*charts...); err != nil { + s.Warningf("couldn't add charts for storage pool '%s(%s)': %v", pool.ID, pool.Name, err) + } +} + +func (s *ScaleIO) addSdcCharts(sdc client.Sdc) { + charts := newSdcCharts(sdc) + if err := s.Charts().Add(*charts...); err != nil { + s.Warningf("couldn't add charts for sdc '%s(%s)': %v", sdc.ID, sdc.SdcIp, err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/client/client.go b/src/go/collectors/go.d.plugin/modules/scaleio/client/client.go new file mode 100644 index 00000000000000..00516d71254a21 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/client/client.go @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +/* +The REST API is served from the VxFlex OS Gateway. +The FxFlex Gateway connects to a single MDM and serves requests by querying the MDM +and reformatting the answers it receives from the MDM in s RESTful manner, back to a REST API. +The Gateway is stateless. It requires the MDM username and password for the login requests. +The login returns a token in the response, that is used for later authentication for other requests. + +The token is valid for 8 hours from the time it was created, unless there has been no activity +for 10 minutes, of if the client has sent a logout request. + +General URI: +- /api/login +- /api/logout +- /api/version +- /api/instances/ // GET all instances +- /api/types/{type}/instances // POST (create) / GET all objects for a given type +- /api/instances/{type::id} // GET by ID +- /api/instances/{type::id}/relationships/{Relationship name} // GET +- /api/instances/querySelectedStatistics // POST Query selected statistics +- /api/instances/{type::id}/action/{actionName} // POST a special action on an object +- /api/types/{type}/instances/action/{actionName} // POST a special action on a given type + +Types: +- System +- Sds +- StoragePool +- ProtectionDomain +- Device +- Volume +- VTree +- Sdc +- User +- FaultSet +- RfcacheDevice +- Alerts + +Actions: +- querySelectedStatistics // All types except Alarm and User +- querySystemLimits // System +- queryDisconnectedSdss // Sds +- querySdsNetworkLatencyMeters // Sds +- queryFailedDevices" // Device. Note: works strange! + +Relationships: +- Statistics // All types except Alarm and User +- ProtectionDomain // System +- Sdc // System +- User // System +- StoragePool // ProtectionDomain +- FaultSet // ProtectionDomain +- Sds // ProtectionDomain +- RfcacheDevice // Sds +- Device // Sds, StoragePool +- Volume // Sdc, StoragePool +- VTree // StoragePool +*/ + +// New creates new ScaleIO client. +func New(client web.Client, request web.Request) (*Client, error) { + httpClient, err := web.NewHTTPClient(client) + if err != nil { + return nil, err + } + return &Client{ + Request: request, + httpClient: httpClient, + token: newToken(), + }, nil +} + +// Client represents ScaleIO client. +type Client struct { + Request web.Request + httpClient *http.Client + token *token +} + +// LoggedIn reports whether the client is logged in. +func (c Client) LoggedIn() bool { + return c.token.isSet() +} + +// Login connects to FxFlex Gateway to get the token that is used for later authentication for other requests. +func (c *Client) Login() error { + if c.LoggedIn() { + _ = c.Logout() + } + req := c.createLoginRequest() + resp, err := c.doOK(req) + defer closeBody(resp) + if err != nil { + return err + } + + token, err := decodeToken(resp.Body) + if err != nil { + return err + } + + c.token.set(token) + return nil +} + +// Logout sends logout request and unsets token. +func (c *Client) Logout() error { + if !c.LoggedIn() { + return nil + } + req := c.createLogoutRequest() + c.token.unset() + + resp, err := c.do(req) + defer closeBody(resp) + return err +} + +// APIVersion returns FxFlex Gateway API version. +func (c *Client) APIVersion() (Version, error) { + req := c.createAPIVersionRequest() + resp, err := c.doOK(req) + defer closeBody(resp) + if err != nil { + return Version{}, err + } + return decodeVersion(resp.Body) +} + +// SelectedStatistics returns selected statistics. +func (c *Client) SelectedStatistics(query SelectedStatisticsQuery) (SelectedStatistics, error) { + b, _ := json.Marshal(query) + req := c.createSelectedStatisticsRequest(b) + var stats SelectedStatistics + err := c.doJSONWithRetry(&stats, req) + return stats, err +} + +// Instances returns all instances. +func (c *Client) Instances() (Instances, error) { + req := c.createInstancesRequest() + var instances Instances + err := c.doJSONWithRetry(&instances, req) + return instances, err +} + +func (c Client) createLoginRequest() web.Request { + req := c.Request.Copy() + u, _ := url.Parse(req.URL) + u.Path = path.Join(u.Path, "/api/login") + req.URL = u.String() + return req +} + +func (c Client) createLogoutRequest() web.Request { + req := c.Request.Copy() + u, _ := url.Parse(req.URL) + u.Path = path.Join(u.Path, "/api/logout") + req.URL = u.String() + req.Password = c.token.get() + return req +} + +func (c Client) createAPIVersionRequest() web.Request { + req := c.Request.Copy() + u, _ := url.Parse(req.URL) + u.Path = path.Join(u.Path, "/api/version") + req.URL = u.String() + req.Password = c.token.get() + return req +} + +func (c Client) createSelectedStatisticsRequest(query []byte) web.Request { + req := c.Request.Copy() + u, _ := url.Parse(req.URL) + u.Path = path.Join(u.Path, "/api/instances/querySelectedStatistics") + req.URL = u.String() + req.Password = c.token.get() + req.Method = http.MethodPost + req.Headers = map[string]string{ + "Content-Type": "application/json", + } + req.Body = string(query) + return req +} + +func (c Client) createInstancesRequest() web.Request { + req := c.Request.Copy() + u, _ := url.Parse(req.URL) + u.Path = path.Join(u.Path, "/api/instances") + req.URL = u.String() + req.Password = c.token.get() + return req +} + +func (c *Client) do(req web.Request) (*http.Response, error) { + httpReq, err := web.NewHTTPRequest(req) + if err != nil { + return nil, fmt.Errorf("error on creating http request to %s: %v", req.URL, err) + } + return c.httpClient.Do(httpReq) +} + +func (c *Client) doOK(req web.Request) (*http.Response, error) { + resp, err := c.do(req) + if err != nil { + return nil, err + } + if err = checkStatusCode(resp); err != nil { + err = fmt.Errorf("%s returned %v", req.URL, err) + } + return resp, err +} + +func (c *Client) doOKWithRetry(req web.Request) (*http.Response, error) { + resp, err := c.do(req) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusUnauthorized { + if err = c.Login(); err != nil { + return resp, err + } + req.Password = c.token.get() + return c.doOK(req) + } + if err = checkStatusCode(resp); err != nil { + err = fmt.Errorf("%s returned %v", req.URL, err) + } + return resp, err +} + +func (c *Client) doJSONWithRetry(dst interface{}, req web.Request) error { + resp, err := c.doOKWithRetry(req) + defer closeBody(resp) + if err != nil { + return err + } + return json.NewDecoder(resp.Body).Decode(dst) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func checkStatusCode(resp *http.Response) error { + // For all 4xx and 5xx return codes, the body may contain an apiError + // instance with more specifics about the failure. + if resp.StatusCode >= 400 { + e := error(&apiError{}) + if err := json.NewDecoder(resp.Body).Decode(e); err != nil { + e = err + } + return fmt.Errorf("HTTP status code %d : %v", resp.StatusCode, e) + } + + // 200(OK), 201(Created), 202(Accepted), 204 (No Content). + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("HTTP status code %d", resp.StatusCode) + } + return nil +} + +func decodeVersion(reader io.Reader) (ver Version, err error) { + bs, err := io.ReadAll(reader) + if err != nil { + return ver, err + } + parts := strings.Split(strings.Trim(string(bs), "\n "), ".") + if len(parts) != 2 { + return ver, fmt.Errorf("can't parse: %s", string(bs)) + } + if ver.Major, err = strconv.ParseInt(parts[0], 10, 64); err != nil { + return ver, err + } + ver.Minor, err = strconv.ParseInt(parts[1], 10, 64) + return ver, err +} + +func decodeToken(reader io.Reader) (string, error) { + bs, err := io.ReadAll(reader) + if err != nil { + return "", err + } + return strings.Trim(string(bs), `"`), nil +} + +type token struct { + mux *sync.RWMutex + value string +} + +func newToken() *token { return &token{mux: &sync.RWMutex{}} } +func (t *token) get() string { t.mux.RLock(); defer t.mux.RUnlock(); return t.value } +func (t *token) set(v string) { t.mux.Lock(); defer t.mux.Unlock(); t.value = v } +func (t *token) unset() { t.set("") } +func (t *token) isSet() bool { return t.get() != "" } diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/client/client_test.go b/src/go/collectors/go.d.plugin/modules/scaleio/client/client_test.go new file mode 100644 index 00000000000000..bc686bf9b521bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/client/client_test.go @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "net/http/httptest" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + _, err := New(web.Client{}, web.Request{}) + assert.NoError(t, err) +} + +func TestClient_Login(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + assert.NoError(t, client.Login()) + assert.Equal(t, testToken, client.token.get()) +} + +func TestClient_Logout(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + require.NoError(t, client.Login()) + + assert.NoError(t, client.Logout()) + assert.False(t, client.token.isSet()) + +} + +func TestClient_LoggedIn(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + assert.False(t, client.LoggedIn()) + assert.NoError(t, client.Login()) + assert.True(t, client.LoggedIn()) +} + +func TestClient_APIVersion(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + err := client.Login() + require.NoError(t, err) + + version, err := client.APIVersion() + assert.NoError(t, err) + assert.Equal(t, Version{Major: 2, Minor: 5}, version) +} + +func TestClient_Instances(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + err := client.Login() + require.NoError(t, err) + + instances, err := client.Instances() + assert.NoError(t, err) + assert.Equal(t, testInstances, instances) +} + +func TestClient_Instances_RetryOnExpiredToken(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + instances, err := client.Instances() + assert.NoError(t, err) + assert.Equal(t, testInstances, instances) +} + +func TestClient_SelectedStatistics(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + err := client.Login() + require.NoError(t, err) + + stats, err := client.SelectedStatistics(SelectedStatisticsQuery{}) + assert.NoError(t, err) + assert.Equal(t, testStatistics, stats) +} + +func TestClient_SelectedStatistics_RetryOnExpiredToken(t *testing.T) { + srv, client := prepareSrvClient(t) + defer srv.Close() + + stats, err := client.SelectedStatistics(SelectedStatisticsQuery{}) + assert.Equal(t, testStatistics, stats) + assert.NoError(t, err) + assert.Equal(t, testStatistics, stats) +} + +func prepareSrvClient(t *testing.T) (*httptest.Server, *Client) { + t.Helper() + srv := httptest.NewServer(MockScaleIOAPIServer{ + User: testUser, + Password: testPassword, + Version: testVersion, + Token: testToken, + Instances: testInstances, + Statistics: testStatistics, + }) + client, err := New(web.Client{}, web.Request{ + URL: srv.URL, + Username: testUser, + Password: testPassword, + }) + assert.NoError(t, err) + return srv, client +} + +var ( + testUser = "user" + testPassword = "password" + testVersion = "2.5" + testToken = "token" + testInstances = Instances{ + StoragePoolList: []StoragePool{ + {ID: "id1", Name: "Marketing", SparePercentage: 10}, + {ID: "id2", Name: "Finance", SparePercentage: 10}, + }, + SdcList: []Sdc{ + {ID: "id1", SdcIp: "10.0.0.1", MdmConnectionState: "Connected"}, + {ID: "id2", SdcIp: "10.0.0.2", MdmConnectionState: "Connected"}, + }, + } + testStatistics = SelectedStatistics{ + System: SystemStatistics{NumOfDevices: 1}, + Sdc: map[string]SdcStatistics{"id1": {}, "id2": {}}, + StoragePool: map[string]StoragePoolStatistics{"id1": {}, "id2": {}}, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/client/server.go b/src/go/collectors/go.d.plugin/modules/scaleio/client/server.go new file mode 100644 index 00000000000000..b7269d339f3eaf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/client/server.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +// MockScaleIOAPIServer represents VxFlex OS Gateway. +type MockScaleIOAPIServer struct { + User string + Password string + Token string + Version string + Instances Instances + Statistics SelectedStatistics +} + +func (s MockScaleIOAPIServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + default: + w.WriteHeader(http.StatusNotFound) + msg := fmt.Sprintf("unknown URL path: %s", r.URL.Path) + writeAPIError(w, msg) + case "/api/login": + s.handleLogin(w, r) + case "/api/logout": + s.handleLogout(w, r) + case "/api/version": + s.handleVersion(w, r) + case "/api/instances": + s.handleInstances(w, r) + case "/api/instances/querySelectedStatistics": + s.handleQuerySelectedStatistics(w, r) + } +} + +func (s MockScaleIOAPIServer) handleLogin(w http.ResponseWriter, r *http.Request) { + if user, pass, ok := r.BasicAuth(); !ok || user != s.User || pass != s.Password { + w.WriteHeader(http.StatusUnauthorized) + msg := fmt.Sprintf("user got/expected: %s/%s, pass got/expected: %s/%s", user, s.User, pass, s.Password) + writeAPIError(w, msg) + return + } + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet) + writeAPIError(w, msg) + return + } + _, _ = w.Write([]byte(s.Token)) +} + +func (s MockScaleIOAPIServer) handleLogout(w http.ResponseWriter, r *http.Request) { + if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token { + w.WriteHeader(http.StatusUnauthorized) + msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token) + writeAPIError(w, msg) + return + } + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet) + writeAPIError(w, msg) + return + } +} + +func (s MockScaleIOAPIServer) handleVersion(w http.ResponseWriter, r *http.Request) { + if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token { + w.WriteHeader(http.StatusUnauthorized) + msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token) + writeAPIError(w, msg) + return + } + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet) + writeAPIError(w, msg) + return + } + _, _ = w.Write([]byte(s.Version)) +} + +func (s MockScaleIOAPIServer) handleInstances(w http.ResponseWriter, r *http.Request) { + if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token { + w.WriteHeader(http.StatusUnauthorized) + msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token) + writeAPIError(w, msg) + return + } + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet) + writeAPIError(w, msg) + return + } + b, err := json.Marshal(s.Instances) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + msg := fmt.Sprintf("marshal Instances: %v", err) + writeAPIError(w, msg) + return + } + _, _ = w.Write(b) +} + +func (s MockScaleIOAPIServer) handleQuerySelectedStatistics(w http.ResponseWriter, r *http.Request) { + if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token { + w.WriteHeader(http.StatusUnauthorized) + msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token) + writeAPIError(w, msg) + return + } + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodPost) + writeAPIError(w, msg) + return + } + if r.Header.Get("Content-Type") != "application/json" { + w.WriteHeader(http.StatusBadRequest) + writeAPIError(w, "no \"Content-Type: application/json\" in the header") + return + } + if err := json.NewDecoder(r.Body).Decode(&SelectedStatisticsQuery{}); err != nil { + w.WriteHeader(http.StatusBadRequest) + msg := fmt.Sprintf("body decode error: %v", err) + writeAPIError(w, msg) + return + } + b, err := json.Marshal(s.Statistics) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + msg := fmt.Sprintf("marshal SelectedStatistics: %v", err) + writeAPIError(w, msg) + return + } + _, _ = w.Write(b) +} + +func writeAPIError(w io.Writer, msg string) { + err := apiError{Message: msg} + b, _ := json.Marshal(err) + _, _ = w.Write(b) +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/client/types.go b/src/go/collectors/go.d.plugin/modules/scaleio/client/types.go new file mode 100644 index 00000000000000..c85bddf8db964d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/client/types.go @@ -0,0 +1,1096 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +// https://github.com/dell/goscaleio/blob/master/types/v1/types.go + +// For all 4xx and 5xx return codes, the body may contain an apiError instance +// with more specifics about the failure. +type apiError struct { + Message string + HTTPStatusCode int + ErrorCode int +} + +func (e apiError) Error() string { + return e.Message +} + +// Version represents API version. +type Version struct { + Major int64 + Minor int64 +} + +// Bwc Bwc. +type Bwc struct { + NumOccured int64 + NumSeconds int64 + TotalWeightInKb int64 +} + +// Sdc represents ScaleIO Data Client. +type Sdc struct { + ID string + SdcIp string + MdmConnectionState string +} + +// StoragePool represents ScaleIO Storage Pool. +type StoragePool struct { + ID string + Name string + SparePercentage int64 + CapacityAlertCriticalThreshold int64 + CapacityAlertHighThreshold int64 +} + +// Instances represents '/api/instances' response. +type Instances struct { + StoragePoolList []StoragePool + SdcList []Sdc +} + +type ( + // SelectedStatisticsQuery represents '/api/instances/querySelectedStatistics' query. + SelectedStatisticsQuery struct { + List []SelectedObject `json:"selectedStatisticsList"` + } + // SelectedObject represents '/api/instances/querySelectedStatistics' query object. + SelectedObject struct { + Type string `json:"type"` // object type (System, ProtectionDomain, Sds, StoragePool, Device, Volume, VTree, Sdc, FaultSet, RfcacheDevice). + + // the following parameters are not relevant to the System type and can be omitted: + IDs []string `json:"ids,omitempty"` // list of objects ids + AllIDs allIds `json:"allIds,omitempty"` // all available objects + + Properties []string `json:"properties"` // list of properties to fetch + } + allIds bool +) + +func (b allIds) MarshalJSON() ([]byte, error) { + // should be set to empty value if AllIDs is true. + if b { + return []byte("[]"), nil + } + return nil, nil +} +func (b *allIds) UnmarshalJSON([]byte) error { + *b = true + return nil +} + +// SelectedStatistics represents '/api/instances/querySelectedStatistics' response. +type SelectedStatistics struct { + System SystemStatistics + Sdc map[string]SdcStatistics + StoragePool map[string]StoragePoolStatistics +} + +// Those commented out structure fields are not deleted on purpose. We need them to see what other metrics can be collected. +type ( + // CapacityStatistics is System/StoragePool capacity statistics. + CapacityStatistics struct { + CapacityAvailableForVolumeAllocationInKb int64 + MaxCapacityInKb int64 + CapacityLimitInKb int64 + ProtectedCapacityInKb int64 + DegradedFailedCapacityInKb int64 + DegradedHealthyCapacityInKb int64 + SpareCapacityInKb int64 + FailedCapacityInKb int64 + UnreachableUnusedCapacityInKb int64 + InMaintenanceCapacityInKb int64 + ThinCapacityAllocatedInKb int64 + ThinCapacityInUseInKb int64 + ThickCapacityInUseInKb int64 + SnapCapacityInUseOccupiedInKb int64 + CapacityInUseInKb int64 + } + SystemStatistics struct { + CapacityStatistics + + NumOfDevices int64 + NumOfFaultSets int64 + NumOfProtectionDomains int64 + NumOfRfcacheDevices int64 + NumOfSdc int64 + NumOfSds int64 + NumOfSnapshots int64 + NumOfStoragePools int64 + NumOfVolumes int64 + NumOfVtrees int64 + NumOfThickBaseVolumes int64 + NumOfThinBaseVolumes int64 + NumOfMappedToAllVolumes int64 + NumOfUnmappedVolumes int64 + + RebalanceReadBwc Bwc + RebalanceWriteBwc Bwc + PendingRebalanceCapacityInKb int64 + + PendingNormRebuildCapacityInKb int64 + PendingBckRebuildCapacityInKb int64 + PendingFwdRebuildCapacityInKb int64 + NormRebuildReadBwc Bwc // TODO: ??? + NormRebuildWriteBwc Bwc // TODO: ??? + BckRebuildReadBwc Bwc // failed node/disk is back alive + BckRebuildWriteBwc Bwc // failed node/disk is back alive + FwdRebuildReadBwc Bwc // node/disk fails + FwdRebuildWriteBwc Bwc // node/disk fails + + PrimaryReadBwc Bwc // Backend (SDSs + Devices) Primary - Mater MDM + PrimaryWriteBwc Bwc // Backend (SDSs + Devices) Primary - Mater MDM + SecondaryReadBwc Bwc // Backend (SDSs + Devices, 2nd) Secondary - Slave MDM + SecondaryWriteBwc Bwc // Backend (SDSs + Devices, 2nd) Secondary - Slave MDM + UserDataReadBwc Bwc // Frontend (Volumes + SDCs) + UserDataWriteBwc Bwc // Frontend (Volumes + SDCs) + TotalReadBwc Bwc // *ReadBwc + TotalWriteBwc Bwc // *WriteBwc + + //SnapCapacityInUseInKb int64 + //BackgroundScanCompareCount int64 + //BackgroundScannedInMB int64 + //ActiveBckRebuildCapacityInKb int64 + //ActiveFwdRebuildCapacityInKb int64 + //ActiveMovingCapacityInKb int64 + //ActiveMovingInBckRebuildJobs int64 + //ActiveMovingInFwdRebuildJobs int64 + //ActiveMovingInNormRebuildJobs int64 + //ActiveMovingInRebalanceJobs int64 + //ActiveMovingOutBckRebuildJobs int64 + //ActiveMovingOutFwdRebuildJobs int64 + //ActiveMovingOutNormRebuildJobs int64 + //ActiveMovingRebalanceJobs int64 + //ActiveNormRebuildCapacityInKb int64 + //ActiveRebalanceCapacityInKb int64 + //AtRestCapacityInKb int64 + //BckRebuildCapacityInKb int64 + //DegradedFailedVacInKb int64 + //DegradedHealthyVacInKb int64 + //FailedVacInKb int64 + //FixedReadErrorCount int64 + //FwdRebuildCapacityInKb int64 + //InMaintenanceVacInKb int64 + //InUseVacInKb int64 + //MovingCapacityInKb int64 + //NormRebuildCapacityInKb int64 + //NumOfScsiInitiators int64 // removed from version 3 of ScaleIO/VxFlex API + //PendingMovingCapacityInKb int64 + //PendingMovingInBckRebuildJobs int64 + //PendingMovingInFwdRebuildJobs int64 + //PendingMovingInNormRebuildJobs int64 + //PendingMovingInRebalanceJobs int64 + //PendingMovingOutBckRebuildJobs int64 + //PendingMovingOutFwdRebuildJobs int64 + //PendingMovingOutNormrebuildJobs int64 + //PendingMovingRebalanceJobs int64 + //PrimaryReadFromDevBwc int64 + //PrimaryReadFromRmcacheBwc int64 + //PrimaryVacInKb int64 + //ProtectedVacInKb int64 + //ProtectionDomainIds int64 + //RebalanceCapacityInKb int64 + //RebalancePerReceiveJobNetThrottlingInKbps int64 + //RebalanceWaitSendQLength int64 + //RebuildPerReceiveJobNetThrottlingInKbps int64 + //RebuildWaitSendQLength int64 + //RfacheReadHit int64 + //RfacheWriteHit int64 + //RfcacheAvgReadTime int64 + //RfcacheAvgWriteTime int64 + //RfcacheFdAvgReadTime int64 + //RfcacheFdAvgWriteTime int64 + //RfcacheFdCacheOverloaded int64 + //RfcacheFdInlightReads int64 + //RfcacheFdInlightWrites int64 + //RfcacheFdIoErrors int64 + //RfcacheFdMonitorErrorStuckIo int64 + //RfcacheFdReadTimeGreater1Min int64 + //RfcacheFdReadTimeGreater1Sec int64 + //RfcacheFdReadTimeGreater500Millis int64 + //RfcacheFdReadTimeGreater5Sec int64 + //RfcacheFdReadsReceived int64 + //RfcacheFdWriteTimeGreater1Min int64 + //RfcacheFdWriteTimeGreater1Sec int64 + //RfcacheFdWriteTimeGreater500Millis int64 + //RfcacheFdWriteTimeGreater5Sec int64 + //RfcacheFdWritesReceived int64 + //RfcacheIoErrors int64 + //RfcacheIosOutstanding int64 + //RfcacheIosSkipped int64 + //RfcachePooIosOutstanding int64 + //RfcachePoolCachePages int64 + //RfcachePoolEvictions int64 + //RfcachePoolInLowMemoryCondition int64 + //RfcachePoolIoTimeGreater1Min int64 + //RfcachePoolLockTimeGreater1Sec int64 + //RfcachePoolLowResourcesInitiatedPassthroughMode int64 + //RfcachePoolNumCacheDevs int64 + //RfcachePoolNumSrcDevs int64 + //RfcachePoolPagesInuse int64 + //RfcachePoolReadHit int64 + //RfcachePoolReadMiss int64 + //RfcachePoolReadPendingG10Millis int64 + //RfcachePoolReadPendingG1Millis int64 + //RfcachePoolReadPendingG1Sec int64 + //RfcachePoolReadPendingG500Micro int64 + //RfcachePoolReadsPending int64 + //RfcachePoolSize int64 + //RfcachePoolSourceIdMismatch int64 + //RfcachePoolSuspendedIos int64 + //RfcachePoolSuspendedPequestsRedundantSearchs int64 + //RfcachePoolWriteHit int64 + //RfcachePoolWriteMiss int64 + //RfcachePoolWritePending int64 + //RfcachePoolWritePendingG10Millis int64 + //RfcachePoolWritePendingG1Millis int64 + //RfcachePoolWritePendingG1Sec int64 + //RfcachePoolWritePendingG500Micro int64 + //RfcacheReadMiss int64 + //RfcacheReadsFromCache int64 + //RfcacheReadsPending int64 + //RfcacheReadsReceived int64 + //RfcacheReadsSkipped int64 + //RfcacheReadsSkippedAlignedSizeTooLarge int64 + //RfcacheReadsSkippedHeavyLoad int64 + //RfcacheReadsSkippedInternalError int64 + //RfcacheReadsSkippedLockIos int64 + //RfcacheReadsSkippedLowResources int64 + //RfcacheReadsSkippedMaxIoSize int64 + //RfcacheReadsSkippedStuckIo int64 + //RfcacheSkippedUnlinedWrite int64 + //RfcacheSourceDeviceReads int64 + //RfcacheSourceDeviceWrites int64 + //RfcacheWriteMiss int64 + //RfcacheWritePending int64 + //RfcacheWritesReceived int64 + //RfcacheWritesSkippedCacheMiss int64 + //RfcacheWritesSkippedHeavyLoad int64 + //RfcacheWritesSkippedInternalError int64 + //RfcacheWritesSkippedLowResources int64 + //RfcacheWritesSkippedMaxIoSize int64 + //RfcacheWritesSkippedStuckIo int64 + //RmPendingAllocatedInKb int64 + //Rmcache128kbEntryCount int64 + //Rmcache16kbEntryCount int64 + //Rmcache32kbEntryCount int64 + //Rmcache4kbEntryCount int64 + //Rmcache64kbEntryCount int64 + //Rmcache8kbEntryCount int64 + //RmcacheBigBlockEvictionCount int64 + //RmcacheBigBlockEvictionSizeCountInKb int64 + //RmcacheCurrNumOf128kbEntries int64 + //RmcacheCurrNumOf16kbEntries int64 + //RmcacheCurrNumOf32kbEntries int64 + //RmcacheCurrNumOf4kbEntries int64 + //RmcacheCurrNumOf64kbEntries int64 + //RmcacheCurrNumOf8kbEntries int64 + //RmcacheEntryEvictionCount int64 + //RmcacheEntryEvictionSizeCountInKb int64 + //RmcacheNoEvictionCount int64 + //RmcacheSizeInKb int64 + //RmcacheSizeInUseInKb int64 + //RmcacheSkipCountCacheAllBusy int64 + //RmcacheSkipCountLargeIo int64 + //RmcacheSkipCountUnaligned4kbIo int64 + //ScsiInitiatorIds int64 + //SdcIds int64 + //SecondaryReadFromDevBwc int64 + //SecondaryReadFromRmcacheBwc int64 + //SecondaryVacInKb int64 + //SemiProtectedCapacityInKb int64 + //SemiProtectedVacInKb int64 + //SnapCapacityInUseOccupiedInKb int64 + //UnusedCapacityInKb int64 + } + SdcStatistics struct { + NumOfMappedVolumes int64 + UserDataReadBwc Bwc + UserDataWriteBwc Bwc + //VolumeIds int64 + } + StoragePoolStatistics struct { + CapacityStatistics + + NumOfDevices int64 + NumOfVolumes int64 + NumOfVtrees int64 + NumOfSnapshots int64 + + //SnapCapacityInUseInKb int64 + //BackgroundScanCompareCount int64 + //BackgroundScannedInMB int64 + //ActiveBckRebuildCapacityInKb int64 + //ActiveFwdRebuildCapacityInKb int64 + //ActiveMovingCapacityInKb int64 + //ActiveMovingInBckRebuildJobs int64 + //ActiveMovingInFwdRebuildJobs int64 + //ActiveMovingInNormRebuildJobs int64 + //ActiveMovingInRebalanceJobs int64 + //ActiveMovingOutBckRebuildJobs int64 + //ActiveMovingOutFwdRebuildJobs int64 + //ActiveMovingOutNormRebuildJobs int64 + //ActiveMovingRebalanceJobs int64 + //ActiveNormRebuildCapacityInKb int64 + //ActiveRebalanceCapacityInKb int64 + //AtRestCapacityInKb int64 + //BckRebuildCapacityInKb int64 + //BckRebuildReadBwc int64 + //BckRebuildWriteBwc int64 + //DegradedFailedVacInKb int64 + //DegradedHealthyVacInKb int64 + //DeviceIds int64 + //FailedVacInKb int64 + //FixedReadErrorCount int64 + //FwdRebuildCapacityInKb int64 + //FwdRebuildReadBwc int64 + //FwdRebuildWriteBwc int64 + //InMaintenanceVacInKb int64 + //InUseVacInKb int64 + //MovingCapacityInKb int64 + //NormRebuildCapacityInKb int64 + //NormRebuildReadBwc int64 + //NormRebuildWriteBwc int64 + //NumOfMappedToAllVolumes int64 + //NumOfThickBaseVolumes int64 + //NumOfThinBaseVolumes int64 + //NumOfUnmappedVolumes int64 + //NumOfVolumesInDeletion int64 + //PendingBckRebuildCapacityInKb int64 + //PendingFwdRebuildCapacityInKb int64 + //PendingMovingCapacityInKb int64 + //PendingMovingInBckRebuildJobs int64 + //PendingMovingInFwdRebuildJobs int64 + //PendingMovingInNormRebuildJobs int64 + //PendingMovingInRebalanceJobs int64 + //PendingMovingOutBckRebuildJobs int64 + //PendingMovingOutFwdRebuildJobs int64 + //PendingMovingOutNormrebuildJobs int64 + //PendingMovingRebalanceJobs int64 + //PendingNormRebuildCapacityInKb int64 + //PendingRebalanceCapacityInKb int64 + //PrimaryReadBwc int64 + //PrimaryReadFromDevBwc int64 + //PrimaryReadFromRmcacheBwc int64 + //PrimaryVacInKb int64 + //PrimaryWriteBwc int64 + //ProtectedVacInKb int64 + //RebalanceCapacityInKb int64 + //RebalanceReadBwc int64 + //RebalanceWriteBwc int64 + //RfacheReadHit int64 + //RfacheWriteHit int64 + //RfcacheAvgReadTime int64 + //RfcacheAvgWriteTime int64 + //RfcacheIoErrors int64 + //RfcacheIosOutstanding int64 + //RfcacheIosSkipped int64 + //RfcacheReadMiss int64 + //RfcacheReadsFromCache int64 + //RfcacheReadsPending int64 + //RfcacheReadsReceived int64 + //RfcacheReadsSkipped int64 + //RfcacheReadsSkippedAlignedSizeTooLarge int64 + //RfcacheReadsSkippedHeavyLoad int64 + //RfcacheReadsSkippedInternalError int64 + //RfcacheReadsSkippedLockIos int64 + //RfcacheReadsSkippedLowResources int64 + //RfcacheReadsSkippedMaxIoSize int64 + //RfcacheReadsSkippedStuckIo int64 + //RfcacheSkippedUnlinedWrite int64 + //RfcacheSourceDeviceReads int64 + //RfcacheSourceDeviceWrites int64 + //RfcacheWriteMiss int64 + //RfcacheWritePending int64 + //RfcacheWritesReceived int64 + //RfcacheWritesSkippedCacheMiss int64 + //RfcacheWritesSkippedHeavyLoad int64 + //RfcacheWritesSkippedInternalError int64 + //RfcacheWritesSkippedLowResources int64 + //RfcacheWritesSkippedMaxIoSize int64 + //RfcacheWritesSkippedStuckIo int64 + //RmPendingAllocatedInKb int64 + //SecondaryReadBwc int64 + //SecondaryReadFromDevBwc int64 + //SecondaryReadFromRmcacheBwc int64 + //SecondaryVacInKb int64 + //SecondaryWriteBwc int64 + //SemiProtectedCapacityInKb int64 + //SemiProtectedVacInKb int64 + //SnapCapacityInUseOccupiedInKb int64 + //TotalReadBwc int64 + //TotalWriteBwc int64 + //UnusedCapacityInKb int64 + //UserDataReadBwc int64 + //UserDataWriteBwc int64 + //VolumeIds int64 + //VtreeIds int64 + } + DeviceStatistic struct { + // BackgroundScanCompareCount int64 + // BackgroundScannedInMB int64 + // ActiveMovingInBckRebuildJobs int64 + // ActiveMovingInFwdRebuildJobs int64 + // ActiveMovingInNormRebuildJobs int64 + // ActiveMovingInRebalanceJobs int64 + // ActiveMovingOutBckRebuildJobs int64 + // ActiveMovingOutFwdRebuildJobs int64 + // ActiveMovingOutNormRebuildJobs int64 + // ActiveMovingRebalanceJobs int64 + // AvgReadLatencyInMicrosec int64 + // AvgReadSizeInBytes int64 + // AvgWriteLatencyInMicrosec int64 + // AvgWriteSizeInBytes int64 + // BckRebuildReadBwc int64 + // BckRebuildWriteBwc int64 + // CapacityInUseInKb int64 + // CapacityLimitInKb int64 + // DegradedFailedVacInKb int64 + // DegradedHealthyVacInKb int64 + // FailedVacInKb int64 + // FixedReadErrorCount int64 + // FwdRebuildReadBwc int64 + // FwdRebuildWriteBwc int64 + // InMaintenanceVacInKb int64 + // InUseVacInKb int64 + // MaxCapacityInKb int64 + // NormRebuildReadBwc int64 + // NormRebuildWriteBwc int64 + // PendingMovingInBckRebuildJobs int64 + // PendingMovingInFwdRebuildJobs int64 + // PendingMovingInNormRebuildJobs int64 + // PendingMovingInRebalanceJobs int64 + // PendingMovingOutBckRebuildJobs int64 + // PendingMovingOutFwdRebuildJobs int64 + // PendingMovingOutNormrebuildJobs int64 + // PendingMovingRebalanceJobs int64 + // PrimaryReadBwc int64 + // PrimaryReadFromDevBwc int64 + // PrimaryReadFromRmcacheBwc int64 + // PrimaryVacInKb int64 + // PrimaryWriteBwc int64 + // ProtectedVacInKb int64 + // RebalanceReadBwc int64 + // RebalanceWriteBwc int64 + // RfacheReadHit int64 + // RfacheWriteHit int64 + // RfcacheAvgReadTime int64 + // RfcacheAvgWriteTime int64 + // RfcacheIoErrors int64 + // RfcacheIosOutstanding int64 + // RfcacheIosSkipped int64 + // RfcacheReadMiss int64 + // RfcacheReadsFromCache int64 + // RfcacheReadsPending int64 + // RfcacheReadsReceived int64 + // RfcacheReadsSkipped int64 + // RfcacheReadsSkippedAlignedSizeTooLarge int64 + // RfcacheReadsSkippedHeavyLoad int64 + // RfcacheReadsSkippedInternalError int64 + // RfcacheReadsSkippedLockIos int64 + // RfcacheReadsSkippedLowResources int64 + // RfcacheReadsSkippedMaxIoSize int64 + // RfcacheReadsSkippedStuckIo int64 + // RfcacheSkippedUnlinedWrite int64 + // RfcacheSourceDeviceReads int64 + // RfcacheSourceDeviceWrites int64 + // RfcacheWriteMiss int64 + // RfcacheWritePending int64 + // RfcacheWritesReceived int64 + // RfcacheWritesSkippedCacheMiss int64 + // RfcacheWritesSkippedHeavyLoad int64 + // RfcacheWritesSkippedInternalError int64 + // RfcacheWritesSkippedLowResources int64 + // RfcacheWritesSkippedMaxIoSize int64 + // RfcacheWritesSkippedStuckIo int64 + // RmPendingAllocatedInKb int64 + // SecondaryReadBwc int64 + // SecondaryReadFromDevBwc int64 + // SecondaryReadFromRmcacheBwc int64 + // SecondaryVacInKb int64 + // SecondaryWriteBwc int64 + // SemiProtectedVacInKb int64 + // SnapCapacityInUseInKb int64 + // SnapCapacityInUseOccupiedInKb int64 + // ThickCapacityInUseInKb int64 + // ThinCapacityAllocatedInKb int64 + // ThinCapacityInUseInKb int64 + // TotalReadBwc int64 + // TotalWriteBwc int64 + // UnreachableUnusedCapacityInKb int64 + // UnusedCapacityInKb int64 + } + FaultSetStatistics struct { + // BackgroundScanCompareCount int64 + // BackgroundScannedInMB int64 + // ActiveMovingInBckRebuildJobs int64 + // ActiveMovingInFwdRebuildJobs int64 + // ActiveMovingInNormRebuildJobs int64 + // ActiveMovingInRebalanceJobs int64 + // ActiveMovingOutBckRebuildJobs int64 + // ActiveMovingOutFwdRebuildJobs int64 + // ActiveMovingOutNormRebuildJobs int64 + // ActiveMovingRebalanceJobs int64 + // BckRebuildReadBwc int64 + // BckRebuildWriteBwc int64 + // CapacityInUseInKb int64 + // CapacityLimitInKb int64 + // DegradedFailedVacInKb int64 + // DegradedHealthyVacInKb int64 + // FailedVacInKb int64 + // FixedReadErrorCount int64 + // FwdRebuildReadBwc int64 + // FwdRebuildWriteBwc int64 + // InMaintenanceVacInKb int64 + // InUseVacInKb int64 + // MaxCapacityInKb int64 + // NormRebuildReadBwc int64 + // NormRebuildWriteBwc int64 + // NumOfSds int64 + // PendingMovingInBckRebuildJobs int64 + // PendingMovingInFwdRebuildJobs int64 + // PendingMovingInNormRebuildJobs int64 + // PendingMovingInRebalanceJobs int64 + // PendingMovingOutBckRebuildJobs int64 + // PendingMovingOutFwdRebuildJobs int64 + // PendingMovingOutNormrebuildJobs int64 + // PendingMovingRebalanceJobs int64 + // PrimaryReadBwc int64 + // PrimaryReadFromDevBwc int64 + // PrimaryReadFromRmcacheBwc int64 + // PrimaryVacInKb int64 + // PrimaryWriteBwc int64 + // ProtectedVacInKb int64 + // RebalancePerReceiveJobNetThrottlingInKbps int64 + // RebalanceReadBwc int64 + // RebalanceWaitSendQLength int64 + // RebalanceWriteBwc int64 + // RebuildPerReceiveJobNetThrottlingInKbps int64 + // RebuildWaitSendQLength int64 + // RfacheReadHit int64 + // RfacheWriteHit int64 + // RfcacheAvgReadTime int64 + // RfcacheAvgWriteTime int64 + // RfcacheFdAvgReadTime int64 + // RfcacheFdAvgWriteTime int64 + // RfcacheFdCacheOverloaded int64 + // RfcacheFdInlightReads int64 + // RfcacheFdInlightWrites int64 + // RfcacheFdIoErrors int64 + // RfcacheFdMonitorErrorStuckIo int64 + // RfcacheFdReadTimeGreater1Min int64 + // RfcacheFdReadTimeGreater1Sec int64 + // RfcacheFdReadTimeGreater500Millis int64 + // RfcacheFdReadTimeGreater5Sec int64 + // RfcacheFdReadsReceived int64 + // RfcacheFdWriteTimeGreater1Min int64 + // RfcacheFdWriteTimeGreater1Sec int64 + // RfcacheFdWriteTimeGreater500Millis int64 + // RfcacheFdWriteTimeGreater5Sec int64 + // RfcacheFdWritesReceived int64 + // RfcacheIoErrors int64 + // RfcacheIosOutstanding int64 + // RfcacheIosSkipped int64 + // RfcachePooIosOutstanding int64 + // RfcachePoolCachePages int64 + // RfcachePoolEvictions int64 + // RfcachePoolInLowMemoryCondition int64 + // RfcachePoolIoTimeGreater1Min int64 + // RfcachePoolLockTimeGreater1Sec int64 + // RfcachePoolLowResourcesInitiatedPassthroughMode int64 + // RfcachePoolNumCacheDevs int64 + // RfcachePoolNumSrcDevs int64 + // RfcachePoolPagesInuse int64 + // RfcachePoolReadHit int64 + // RfcachePoolReadMiss int64 + // RfcachePoolReadPendingG10Millis int64 + // RfcachePoolReadPendingG1Millis int64 + // RfcachePoolReadPendingG1Sec int64 + // RfcachePoolReadPendingG500Micro int64 + // RfcachePoolReadsPending int64 + // RfcachePoolSize int64 + // RfcachePoolSourceIdMismatch int64 + // RfcachePoolSuspendedIos int64 + // RfcachePoolSuspendedPequestsRedundantSearchs int64 + // RfcachePoolWriteHit int64 + // RfcachePoolWriteMiss int64 + // RfcachePoolWritePending int64 + // RfcachePoolWritePendingG10Millis int64 + // RfcachePoolWritePendingG1Millis int64 + // RfcachePoolWritePendingG1Sec int64 + // RfcachePoolWritePendingG500Micro int64 + // RfcacheReadMiss int64 + // RfcacheReadsFromCache int64 + // RfcacheReadsPending int64 + // RfcacheReadsReceived int64 + // RfcacheReadsSkipped int64 + // RfcacheReadsSkippedAlignedSizeTooLarge int64 + // RfcacheReadsSkippedHeavyLoad int64 + // RfcacheReadsSkippedInternalError int64 + // RfcacheReadsSkippedLockIos int64 + // RfcacheReadsSkippedLowResources int64 + // RfcacheReadsSkippedMaxIoSize int64 + // RfcacheReadsSkippedStuckIo int64 + // RfcacheSkippedUnlinedWrite int64 + // RfcacheSourceDeviceReads int64 + // RfcacheSourceDeviceWrites int64 + // RfcacheWriteMiss int64 + // RfcacheWritePending int64 + // RfcacheWritesReceived int64 + // RfcacheWritesSkippedCacheMiss int64 + // RfcacheWritesSkippedHeavyLoad int64 + // RfcacheWritesSkippedInternalError int64 + // RfcacheWritesSkippedLowResources int64 + // RfcacheWritesSkippedMaxIoSize int64 + // RfcacheWritesSkippedStuckIo int64 + // RmPendingAllocatedInKb int64 + // Rmcache128kbEntryCount int64 + // Rmcache16kbEntryCount int64 + // Rmcache32kbEntryCount int64 + // Rmcache4kbEntryCount int64 + // Rmcache64kbEntryCount int64 + // Rmcache8kbEntryCount int64 + // RmcacheBigBlockEvictionCount int64 + // RmcacheBigBlockEvictionSizeCountInKb int64 + // RmcacheCurrNumOf128kbEntries int64 + // RmcacheCurrNumOf16kbEntries int64 + // RmcacheCurrNumOf32kbEntries int64 + // RmcacheCurrNumOf4kbEntries int64 + // RmcacheCurrNumOf64kbEntries int64 + // RmcacheCurrNumOf8kbEntries int64 + // RmcacheEntryEvictionCount int64 + // RmcacheEntryEvictionSizeCountInKb int64 + // RmcacheNoEvictionCount int64 + // RmcacheSizeInKb int64 + // RmcacheSizeInUseInKb int64 + // RmcacheSkipCountCacheAllBusy int64 + // RmcacheSkipCountLargeIo int64 + // RmcacheSkipCountUnaligned4kbIo int64 + // SdsIds int64 + // SecondaryReadBwc int64 + // SecondaryReadFromDevBwc int64 + // SecondaryReadFromRmcacheBwc int64 + // SecondaryVacInKb int64 + // SecondaryWriteBwc int64 + // SemiProtectedVacInKb int64 + // SnapCapacityInUseInKb int64 + // SnapCapacityInUseOccupiedInKb int64 + // ThickCapacityInUseInKb int64 + // ThinCapacityAllocatedInKb int64 + // ThinCapacityInUseInKb int64 + // TotalReadBwc int64 + // TotalWriteBwc int64 + // UnreachableUnusedCapacityInKb int64 + // UnusedCapacityInKb int64 + } + ProtectionDomainStatistics struct { + // BackgroundScanCompareCount int64 + // BackgroundScannedInMB int64 + // ActiveBckRebuildCapacityInKb int64 + // ActiveFwdRebuildCapacityInKb int64 + // ActiveMovingCapacityInKb int64 + // ActiveMovingInBckRebuildJobs int64 + // ActiveMovingInFwdRebuildJobs int64 + // ActiveMovingInNormRebuildJobs int64 + // ActiveMovingInRebalanceJobs int64 + // ActiveMovingOutBckRebuildJobs int64 + // ActiveMovingOutFwdRebuildJobs int64 + // ActiveMovingOutNormRebuildJobs int64 + // ActiveMovingRebalanceJobs int64 + // ActiveNormRebuildCapacityInKb int64 + // ActiveRebalanceCapacityInKb int64 + // AtRestCapacityInKb int64 + // BckRebuildCapacityInKb int64 + // BckRebuildReadBwc int64 + // BckRebuildWriteBwc int64 + // CapacityAvailableForVolumeAllocationInKb int64 + // CapacityInUseInKb int64 + // CapacityLimitInKb int64 + // DegradedFailedCapacityInKb int64 + // DegradedFailedVacInKb int64 + // DegradedHealthyCapacityInKb int64 + // DegradedHealthyVacInKb int64 + // FailedCapacityInKb int64 + // FailedVacInKb int64 + // FaultSetIds int64 + // FixedReadErrorCount int64 + // FwdRebuildCapacityInKb int64 + // FwdRebuildReadBwc int64 + // FwdRebuildWriteBwc int64 + // InMaintenanceCapacityInKb int64 + // InMaintenanceVacInKb int64 + // InUseVacInKb int64 + // MaxCapacityInKb int64 + // MovingCapacityInKb int64 + // NormRebuildCapacityInKb int64 + // NormRebuildReadBwc int64 + // NormRebuildWriteBwc int64 + // NumOfFaultSets int64 + // NumOfMappedToAllVolumes int64 + // NumOfSds int64 + // NumOfSnapshots int64 + // NumOfStoragePools int64 + // NumOfThickBaseVolumes int64 + // NumOfThinBaseVolumes int64 + // NumOfUnmappedVolumes int64 + // NumOfVolumesInDeletion int64 + // PendingBckRebuildCapacityInKb int64 + // PendingFwdRebuildCapacityInKb int64 + // PendingMovingCapacityInKb int64 + // PendingMovingInBckRebuildJobs int64 + // PendingMovingInFwdRebuildJobs int64 + // PendingMovingInNormRebuildJobs int64 + // PendingMovingInRebalanceJobs int64 + // PendingMovingOutBckRebuildJobs int64 + // PendingMovingOutFwdRebuildJobs int64 + // PendingMovingOutNormrebuildJobs int64 + // PendingMovingRebalanceJobs int64 + // PendingNormRebuildCapacityInKb int64 + // PendingRebalanceCapacityInKb int64 + // PrimaryReadBwc int64 + // PrimaryReadFromDevBwc int64 + // PrimaryReadFromRmcacheBwc int64 + // PrimaryVacInKb int64 + // PrimaryWriteBwc int64 + // ProtectedCapacityInKb int64 + // ProtectedVacInKb int64 + // RebalanceCapacityInKb int64 + // RebalancePerReceiveJobNetThrottlingInKbps int64 + // RebalanceReadBwc int64 + // RebalanceWaitSendQLength int64 + // RebalanceWriteBwc int64 + // RebuildPerReceiveJobNetThrottlingInKbps int64 + // RebuildWaitSendQLength int64 + // RfacheReadHit int64 + // RfacheWriteHit int64 + // RfcacheAvgReadTime int64 + // RfcacheAvgWriteTime int64 + // RfcacheFdAvgReadTime int64 + // RfcacheFdAvgWriteTime int64 + // RfcacheFdCacheOverloaded int64 + // RfcacheFdInlightReads int64 + // RfcacheFdInlightWrites int64 + // RfcacheFdIoErrors int64 + // RfcacheFdMonitorErrorStuckIo int64 + // RfcacheFdReadTimeGreater1Min int64 + // RfcacheFdReadTimeGreater1Sec int64 + // RfcacheFdReadTimeGreater500Millis int64 + // RfcacheFdReadTimeGreater5Sec int64 + // RfcacheFdReadsReceived int64 + // RfcacheFdWriteTimeGreater1Min int64 + // RfcacheFdWriteTimeGreater1Sec int64 + // RfcacheFdWriteTimeGreater500Millis int64 + // RfcacheFdWriteTimeGreater5Sec int64 + // RfcacheFdWritesReceived int64 + // RfcacheIoErrors int64 + // RfcacheIosOutstanding int64 + // RfcacheIosSkipped int64 + // RfcachePooIosOutstanding int64 + // RfcachePoolCachePages int64 + // RfcachePoolEvictions int64 + // RfcachePoolInLowMemoryCondition int64 + // RfcachePoolIoTimeGreater1Min int64 + // RfcachePoolLockTimeGreater1Sec int64 + // RfcachePoolLowResourcesInitiatedPassthroughMode int64 + // RfcachePoolNumCacheDevs int64 + // RfcachePoolNumSrcDevs int64 + // RfcachePoolPagesInuse int64 + // RfcachePoolReadHit int64 + // RfcachePoolReadMiss int64 + // RfcachePoolReadPendingG10Millis int64 + // RfcachePoolReadPendingG1Millis int64 + // RfcachePoolReadPendingG1Sec int64 + // RfcachePoolReadPendingG500Micro int64 + // RfcachePoolReadsPending int64 + // RfcachePoolSize int64 + // RfcachePoolSourceIdMismatch int64 + // RfcachePoolSuspendedIos int64 + // RfcachePoolSuspendedPequestsRedundantSearchs int64 + // RfcachePoolWriteHit int64 + // RfcachePoolWriteMiss int64 + // RfcachePoolWritePending int64 + // RfcachePoolWritePendingG10Millis int64 + // RfcachePoolWritePendingG1Millis int64 + // RfcachePoolWritePendingG1Sec int64 + // RfcachePoolWritePendingG500Micro int64 + // RfcacheReadMiss int64 + // RfcacheReadsFromCache int64 + // RfcacheReadsPending int64 + // RfcacheReadsReceived int64 + // RfcacheReadsSkipped int64 + // RfcacheReadsSkippedAlignedSizeTooLarge int64 + // RfcacheReadsSkippedHeavyLoad int64 + // RfcacheReadsSkippedInternalError int64 + // RfcacheReadsSkippedLockIos int64 + // RfcacheReadsSkippedLowResources int64 + // RfcacheReadsSkippedMaxIoSize int64 + // RfcacheReadsSkippedStuckIo int64 + // RfcacheSkippedUnlinedWrite int64 + // RfcacheSourceDeviceReads int64 + // RfcacheSourceDeviceWrites int64 + // RfcacheWriteMiss int64 + // RfcacheWritePending int64 + // RfcacheWritesReceived int64 + // RfcacheWritesSkippedCacheMiss int64 + // RfcacheWritesSkippedHeavyLoad int64 + // RfcacheWritesSkippedInternalError int64 + // RfcacheWritesSkippedLowResources int64 + // RfcacheWritesSkippedMaxIoSize int64 + // RfcacheWritesSkippedStuckIo int64 + // RmPendingAllocatedInKb int64 + // Rmcache128kbEntryCount int64 + // Rmcache16kbEntryCount int64 + // Rmcache32kbEntryCount int64 + // Rmcache4kbEntryCount int64 + // Rmcache64kbEntryCount int64 + // Rmcache8kbEntryCount int64 + // RmcacheBigBlockEvictionCount int64 + // RmcacheBigBlockEvictionSizeCountInKb int64 + // RmcacheCurrNumOf128kbEntries int64 + // RmcacheCurrNumOf16kbEntries int64 + // RmcacheCurrNumOf32kbEntries int64 + // RmcacheCurrNumOf4kbEntries int64 + // RmcacheCurrNumOf64kbEntries int64 + // RmcacheCurrNumOf8kbEntries int64 + // RmcacheEntryEvictionCount int64 + // RmcacheEntryEvictionSizeCountInKb int64 + // RmcacheNoEvictionCount int64 + // RmcacheSizeInKb int64 + // RmcacheSizeInUseInKb int64 + // RmcacheSkipCountCacheAllBusy int64 + // RmcacheSkipCountLargeIo int64 + // RmcacheSkipCountUnaligned4kbIo int64 + // SdsIds int64 + // SecondaryReadBwc int64 + // SecondaryReadFromDevBwc int64 + // SecondaryReadFromRmcacheBwc int64 + // SecondaryVacInKb int64 + // SecondaryWriteBwc int64 + // SemiProtectedCapacityInKb int64 + // SemiProtectedVacInKb int64 + // SnapCapacityInUseInKb int64 + // SnapCapacityInUseOccupiedInKb int64 + // SpareCapacityInKb int64 + // StoragePoolIds int64 + // ThickCapacityInUseInKb int64 + // ThinCapacityAllocatedInKb int64 + // ThinCapacityInUseInKb int64 + // TotalReadBwc int64 + // TotalWriteBwc int64 + // UnreachableUnusedCapacityInKb int64 + // UnusedCapacityInKb int64 + // UserDataReadBwc int64 + // UserDataWriteBwc int64 + } + RFCacheDeviceStatistics struct { + // RfcacheFdAvgReadTime int64 + // RfcacheFdAvgWriteTime int64 + // RfcacheFdCacheOverloaded int64 + // RfcacheFdInlightReads int64 + // RfcacheFdInlightWrites int64 + // RfcacheFdIoErrors int64 + // RfcacheFdMonitorErrorStuckIo int64 + // RfcacheFdReadTimeGreater1Min int64 + // RfcacheFdReadTimeGreater1Sec int64 + // RfcacheFdReadTimeGreater500Millis int64 + // RfcacheFdReadTimeGreater5Sec int64 + // RfcacheFdReadsReceived int64 + // RfcacheFdWriteTimeGreater1Min int64 + // RfcacheFdWriteTimeGreater1Sec int64 + // RfcacheFdWriteTimeGreater500Millis int64 + // RfcacheFdWriteTimeGreater5Sec int64 + // RfcacheFdWritesReceived int64 + } + SdsStatistics struct { + // BackgroundScanCompareCount int64 + // BackgroundScannedInMB int64 + // ActiveMovingInBckRebuildJobs int64 + // ActiveMovingInFwdRebuildJobs int64 + // ActiveMovingInNormRebuildJobs int64 + // ActiveMovingInRebalanceJobs int64 + // ActiveMovingOutBckRebuildJobs int64 + // ActiveMovingOutFwdRebuildJobs int64 + // ActiveMovingOutNormRebuildJobs int64 + // ActiveMovingRebalanceJobs int64 + // BckRebuildReadBwc int64 + // BckRebuildWriteBwc int64 + // CapacityInUseInKb int64 + // CapacityLimitInKb int64 + // DegradedFailedVacInKb int64 + // DegradedHealthyVacInKb int64 + // DeviceIds int64 + // FailedVacInKb int64 + // FixedReadErrorCount int64 + // FwdRebuildReadBwc int64 + // FwdRebuildWriteBwc int64 + // InMaintenanceVacInKb int64 + // InUseVacInKb int64 + // MaxCapacityInKb int64 + // NormRebuildReadBwc int64 + // NormRebuildWriteBwc int64 + // NumOfDevices int64 + // NumOfRfcacheDevices int64 + // PendingMovingInBckRebuildJobs int64 + // PendingMovingInFwdRebuildJobs int64 + // PendingMovingInNormRebuildJobs int64 + // PendingMovingInRebalanceJobs int64 + // PendingMovingOutBckRebuildJobs int64 + // PendingMovingOutFwdRebuildJobs int64 + // PendingMovingOutNormrebuildJobs int64 + // PendingMovingRebalanceJobs int64 + // PrimaryReadBwc int64 + // PrimaryReadFromDevBwc int64 + // PrimaryReadFromRmcacheBwc int64 + // PrimaryVacInKb int64 + // PrimaryWriteBwc int64 + // ProtectedVacInKb int64 + // RebalancePerReceiveJobNetThrottlingInKbps int64 + // RebalanceReadBwc int64 + // RebalanceWaitSendQLength int64 + // RebalanceWriteBwc int64 + // RebuildPerReceiveJobNetThrottlingInKbps int64 + // RebuildWaitSendQLength int64 + // RfacheReadHit int64 + // RfacheWriteHit int64 + // RfcacheAvgReadTime int64 + // RfcacheAvgWriteTime int64 + // RfcacheDeviceIds int64 + // RfcacheFdAvgReadTime int64 + // RfcacheFdAvgWriteTime int64 + // RfcacheFdCacheOverloaded int64 + // RfcacheFdInlightReads int64 + // RfcacheFdInlightWrites int64 + // RfcacheFdIoErrors int64 + // RfcacheFdMonitorErrorStuckIo int64 + // RfcacheFdReadTimeGreater1Min int64 + // RfcacheFdReadTimeGreater1Sec int64 + // RfcacheFdReadTimeGreater500Millis int64 + // RfcacheFdReadTimeGreater5Sec int64 + // RfcacheFdReadsReceived int64 + // RfcacheFdWriteTimeGreater1Min int64 + // RfcacheFdWriteTimeGreater1Sec int64 + // RfcacheFdWriteTimeGreater500Millis int64 + // RfcacheFdWriteTimeGreater5Sec int64 + // RfcacheFdWritesReceived int64 + // RfcacheIoErrors int64 + // RfcacheIosOutstanding int64 + // RfcacheIosSkipped int64 + // RfcachePooIosOutstanding int64 + // RfcachePoolCachePages int64 + // RfcachePoolContinuosMem int64 + // RfcachePoolEvictions int64 + // RfcachePoolInLowMemoryCondition int64 + // RfcachePoolIoTimeGreater1Min int64 + // RfcachePoolLockTimeGreater1Sec int64 + // RfcachePoolLowResourcesInitiatedPassthroughMode int64 + // RfcachePoolMaxIoSize int64 + // RfcachePoolNumCacheDevs int64 + // RfcachePoolNumOfDriverTheads int64 + // RfcachePoolNumSrcDevs int64 + // RfcachePoolOpmode int64 + // RfcachePoolPageSize int64 + // RfcachePoolPagesInuse int64 + // RfcachePoolReadHit int64 + // RfcachePoolReadMiss int64 + // RfcachePoolReadPendingG10Millis int64 + // RfcachePoolReadPendingG1Millis int64 + // RfcachePoolReadPendingG1Sec int64 + // RfcachePoolReadPendingG500Micro int64 + // RfcachePoolReadsPending int64 + // RfcachePoolSize int64 + // RfcachePoolSourceIdMismatch int64 + // RfcachePoolSuspendedIos int64 + // RfcachePoolSuspendedIosMax int64 + // RfcachePoolSuspendedPequestsRedundantSearchs int64 + // RfcachePoolWriteHit int64 + // RfcachePoolWriteMiss int64 + // RfcachePoolWritePending int64 + // RfcachePoolWritePendingG10Millis int64 + // RfcachePoolWritePendingG1Millis int64 + // RfcachePoolWritePendingG1Sec int64 + // RfcachePoolWritePendingG500Micro int64 + // RfcacheReadMiss int64 + // RfcacheReadsFromCache int64 + // RfcacheReadsPending int64 + // RfcacheReadsReceived int64 + // RfcacheReadsSkipped int64 + // RfcacheReadsSkippedAlignedSizeTooLarge int64 + // RfcacheReadsSkippedHeavyLoad int64 + // RfcacheReadsSkippedInternalError int64 + // RfcacheReadsSkippedLockIos int64 + // RfcacheReadsSkippedLowResources int64 + // RfcacheReadsSkippedMaxIoSize int64 + // RfcacheReadsSkippedStuckIo int64 + // RfcacheSkippedUnlinedWrite int64 + // RfcacheSourceDeviceReads int64 + // RfcacheSourceDeviceWrites int64 + // RfcacheWriteMiss int64 + // RfcacheWritePending int64 + // RfcacheWritesReceived int64 + // RfcacheWritesSkippedCacheMiss int64 + // RfcacheWritesSkippedHeavyLoad int64 + // RfcacheWritesSkippedInternalError int64 + // RfcacheWritesSkippedLowResources int64 + // RfcacheWritesSkippedMaxIoSize int64 + // RfcacheWritesSkippedStuckIo int64 + // RmPendingAllocatedInKb int64 + // Rmcache128kbEntryCount int64 + // Rmcache16kbEntryCount int64 + // Rmcache32kbEntryCount int64 + // Rmcache4kbEntryCount int64 + // Rmcache64kbEntryCount int64 + // Rmcache8kbEntryCount int64 + // RmcacheBigBlockEvictionCount int64 + // RmcacheBigBlockEvictionSizeCountInKb int64 + // RmcacheCurrNumOf128kbEntries int64 + // RmcacheCurrNumOf16kbEntries int64 + // RmcacheCurrNumOf32kbEntries int64 + // RmcacheCurrNumOf4kbEntries int64 + // RmcacheCurrNumOf64kbEntries int64 + // RmcacheCurrNumOf8kbEntries int64 + // RmcacheEntryEvictionCount int64 + // RmcacheEntryEvictionSizeCountInKb int64 + // RmcacheNoEvictionCount int64 + // RmcacheSizeInKb int64 + // RmcacheSizeInUseInKb int64 + // RmcacheSkipCountCacheAllBusy int64 + // RmcacheSkipCountLargeIo int64 + // RmcacheSkipCountUnaligned4kbIo int64 + // SecondaryReadBwc int64 + // SecondaryReadFromDevBwc int64 + // SecondaryReadFromRmcacheBwc int64 + // SecondaryVacInKb int64 + // SecondaryWriteBwc int64 + // SemiProtectedVacInKb int64 + // SnapCapacityInUseInKb int64 + // SnapCapacityInUseOccupiedInKb int64 + // ThickCapacityInUseInKb int64 + // ThinCapacityAllocatedInKb int64 + // ThinCapacityInUseInKb int64 + // TotalReadBwc int64 + // TotalWriteBwc int64 + // UnreachableUnusedCapacityInKb int64 + // UnusedCapacityInKb int64 + } + VolumeStatistics struct { + // ChildVolumeIds int64 + // DescendantVolumeIds int64 + // MappedSdcIds int64 + // NumOfChildVolumes int64 + // NumOfDescendantVolumes int64 + // NumOfMappedScsiInitiators int64 + // NumOfMappedSdcs int64 + // UserDataReadBwc int64 + // UserDataWriteBwc int64 + } + VTreeStatistics struct { + // BaseNetCapacityInUseInKb int64 + // NetCapacityInUseInKb int64 + // NumOfVolumes int64 + // SnapNetCapacityInUseInKb int64 + // TrimmedCapacityInKb int64 + // VolumeIds int64 + } +) diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/collect.go b/src/go/collectors/go.d.plugin/modules/scaleio/collect.go new file mode 100644 index 00000000000000..42fd463df3fd51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/collect.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import ( + "time" + + "github.com/netdata/go.d.plugin/modules/scaleio/client" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +const discoveryEvery = 5 + +func (s *ScaleIO) collect() (map[string]int64, error) { + s.runs += 1 + if !s.lastDiscoveryOK || s.runs%discoveryEvery == 0 { + if err := s.discovery(); err != nil { + return nil, err + } + } + + stats, err := s.client.SelectedStatistics(query) + if err != nil { + return nil, err + } + + mx := metrics{ + System: s.collectSystem(stats.System), + StoragePool: s.collectStoragePool(stats.StoragePool), + Sdc: s.collectSdc(stats.Sdc), + } + + s.updateCharts() + return stm.ToMap(mx), nil +} + +func (s *ScaleIO) discovery() error { + start := time.Now() + s.Debugf("starting discovery") + ins, err := s.client.Instances() + if err != nil { + s.lastDiscoveryOK = false + return err + } + s.Debugf("discovering: discovered %d storage pools, %d sdcs, it took %s", + len(ins.StoragePoolList), len(ins.SdcList), time.Since(start)) + + s.discovered.pool = make(map[string]client.StoragePool, len(ins.StoragePoolList)) + for _, pool := range ins.StoragePoolList { + s.discovered.pool[pool.ID] = pool + } + s.discovered.sdc = make(map[string]client.Sdc, len(ins.SdcList)) + for _, sdc := range ins.SdcList { + s.discovered.sdc[sdc.ID] = sdc + } + s.lastDiscoveryOK = true + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/collect_sdc.go b/src/go/collectors/go.d.plugin/modules/scaleio/collect_sdc.go new file mode 100644 index 00000000000000..495b1a03123416 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/collect_sdc.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import "github.com/netdata/go.d.plugin/modules/scaleio/client" + +func (s ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics { + ms := make(map[string]sdcMetrics, len(ss)) + + for id, stats := range ss { + sdc, ok := s.discovered.sdc[id] + if !ok { + continue + } + var m sdcMetrics + m.BW.set( + calcBW(stats.UserDataReadBwc), + calcBW(stats.UserDataWriteBwc), + ) + m.IOPS.set( + calcIOPS(stats.UserDataReadBwc), + calcIOPS(stats.UserDataWriteBwc), + ) + m.IOSize.set( + calcIOSize(stats.UserDataReadBwc), + calcIOSize(stats.UserDataWriteBwc), + ) + m.MappedVolumes = stats.NumOfMappedVolumes + m.MDMConnectionState = isSdcConnected(sdc.MdmConnectionState) + + ms[id] = m + } + return ms +} + +func isSdcConnected(state string) bool { + return state == "Connected" +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/collect_storage_pool.go b/src/go/collectors/go.d.plugin/modules/scaleio/collect_storage_pool.go new file mode 100644 index 00000000000000..7a41b66bd0cb03 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/collect_storage_pool.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import "github.com/netdata/go.d.plugin/modules/scaleio/client" + +func (s ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics { + ms := make(map[string]storagePoolMetrics, len(ss)) + + for id, stats := range ss { + pool, ok := s.discovered.pool[id] + if !ok { + continue + } + var pm storagePoolMetrics + collectStoragePoolCapacity(&pm, stats, pool) + collectStoragePoolComponents(&pm, stats) + + ms[id] = pm + } + return ms +} + +func collectStoragePoolCapacity(pm *storagePoolMetrics, ps client.StoragePoolStatistics, pool client.StoragePool) { + collectCapacity(&pm.Capacity.capacity, ps.CapacityStatistics) + pm.Capacity.Utilization = calcCapacityUtilization(ps.CapacityInUseInKb, ps.MaxCapacityInKb, pool.SparePercentage) + pm.Capacity.AlertThreshold.Critical = pool.CapacityAlertCriticalThreshold + pm.Capacity.AlertThreshold.High = pool.CapacityAlertHighThreshold +} + +func collectStoragePoolComponents(pm *storagePoolMetrics, ps client.StoragePoolStatistics) { + pm.Components.Devices = ps.NumOfDevices + pm.Components.Snapshots = ps.NumOfSnapshots + pm.Components.Volumes = ps.NumOfVolumes + pm.Components.Vtrees = ps.NumOfVtrees +} + +func calcCapacityUtilization(inUse int64, max int64, sparePercent int64) float64 { + spare := float64(max) / 100 * float64(sparePercent) + return divFloat(float64(100*inUse), float64(max)-spare) +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/collect_system.go b/src/go/collectors/go.d.plugin/modules/scaleio/collect_system.go new file mode 100644 index 00000000000000..6806e19690104e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/collect_system.go @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import "github.com/netdata/go.d.plugin/modules/scaleio/client" + +func (ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics { + var sm systemMetrics + collectSystemCapacity(&sm, ss) + collectSystemWorkload(&sm, ss) + collectSystemRebalance(&sm, ss) + collectSystemRebuild(&sm, ss) + collectSystemComponents(&sm, ss) + return sm +} + +func collectSystemCapacity(sm *systemMetrics, ss client.SystemStatistics) { + collectCapacity(&sm.Capacity, ss.CapacityStatistics) +} + +func collectCapacity(m *capacity, ss client.CapacityStatistics) { + // Health + m.Protected = ss.ProtectedCapacityInKb + m.InMaintenance = ss.InMaintenanceCapacityInKb + m.Degraded = sum(ss.DegradedFailedCapacityInKb, ss.DegradedHealthyCapacityInKb) + m.Failed = ss.FailedCapacityInKb + m.UnreachableUnused = ss.UnreachableUnusedCapacityInKb + + // Capacity + m.MaxCapacity = ss.MaxCapacityInKb + m.ThickInUse = ss.ThickCapacityInUseInKb + m.ThinInUse = ss.ThinCapacityInUseInKb + m.Snapshot = ss.SnapCapacityInUseOccupiedInKb + m.Spare = ss.SpareCapacityInKb + m.Decreased = sum(ss.MaxCapacityInKb, -ss.CapacityLimitInKb) // TODO: probably wrong + // Note: can't use 'UnusedCapacityInKb' directly, dashboard shows calculated value + used := sum( + ss.ProtectedCapacityInKb, + ss.InMaintenanceCapacityInKb, + m.Decreased, + m.Degraded, + ss.FailedCapacityInKb, + ss.SpareCapacityInKb, + ss.UnreachableUnusedCapacityInKb, + ss.SnapCapacityInUseOccupiedInKb, + ) + m.Unused = sum(ss.MaxCapacityInKb, -used) + + // Other + m.InUse = ss.CapacityInUseInKb + m.AvailableForVolumeAllocation = ss.CapacityAvailableForVolumeAllocationInKb +} + +func collectSystemComponents(sm *systemMetrics, ss client.SystemStatistics) { + m := &sm.Components + + m.Devices = ss.NumOfDevices + m.FaultSets = ss.NumOfFaultSets + m.MappedToAllVolumes = ss.NumOfMappedToAllVolumes + m.ProtectionDomains = ss.NumOfProtectionDomains + m.RfcacheDevices = ss.NumOfRfcacheDevices + m.Sdc = ss.NumOfSdc + m.Sds = ss.NumOfSds + m.Snapshots = ss.NumOfSnapshots + m.StoragePools = ss.NumOfStoragePools + m.VTrees = ss.NumOfVtrees + m.Volumes = ss.NumOfVolumes + m.ThickBaseVolumes = ss.NumOfThickBaseVolumes + m.ThinBaseVolumes = ss.NumOfThinBaseVolumes + m.UnmappedVolumes = ss.NumOfUnmappedVolumes + m.MappedVolumes = sum(ss.NumOfVolumes, -ss.NumOfUnmappedVolumes) +} + +func collectSystemWorkload(sm *systemMetrics, ss client.SystemStatistics) { + m := &sm.Workload + + m.Total.BW.set( + calcBW(ss.TotalReadBwc), + calcBW(ss.TotalWriteBwc), + ) + m.Frontend.BW.set( + calcBW(ss.UserDataReadBwc), + calcBW(ss.UserDataWriteBwc), + ) + m.Backend.Primary.BW.set( + calcBW(ss.PrimaryReadBwc), + calcBW(ss.PrimaryWriteBwc), + ) + m.Backend.Secondary.BW.set( + calcBW(ss.SecondaryReadBwc), + calcBW(ss.SecondaryWriteBwc), + ) + m.Backend.Total.BW.set( + sumFloat(m.Backend.Primary.BW.Read, m.Backend.Secondary.BW.Read), + sumFloat(m.Backend.Primary.BW.Write, m.Backend.Secondary.BW.Write), + ) + + m.Total.IOPS.set( + calcIOPS(ss.TotalReadBwc), + calcIOPS(ss.TotalWriteBwc), + ) + m.Frontend.IOPS.set( + calcIOPS(ss.UserDataReadBwc), + calcIOPS(ss.UserDataWriteBwc), + ) + m.Backend.Primary.IOPS.set( + calcIOPS(ss.PrimaryReadBwc), + calcIOPS(ss.PrimaryWriteBwc), + ) + m.Backend.Secondary.IOPS.set( + calcIOPS(ss.SecondaryReadBwc), + calcIOPS(ss.SecondaryWriteBwc), + ) + m.Backend.Total.IOPS.set( + sumFloat(m.Backend.Primary.IOPS.Read, m.Backend.Secondary.IOPS.Read), + sumFloat(m.Backend.Primary.IOPS.Write, m.Backend.Secondary.IOPS.Write), + ) + + m.Total.IOSize.set( + calcIOSize(ss.TotalReadBwc), + calcIOSize(ss.TotalWriteBwc), + ) + m.Frontend.IOSize.set( + calcIOSize(ss.UserDataReadBwc), + calcIOSize(ss.UserDataWriteBwc), + ) + m.Backend.Primary.IOSize.set( + calcIOSize(ss.PrimaryReadBwc), + calcIOSize(ss.PrimaryWriteBwc), + ) + m.Backend.Secondary.IOSize.set( + calcIOSize(ss.SecondaryReadBwc), + calcIOSize(ss.SecondaryWriteBwc), + ) + m.Backend.Total.IOSize.set( + sumFloat(m.Backend.Primary.IOSize.Read, m.Backend.Secondary.IOSize.Read), + sumFloat(m.Backend.Primary.IOSize.Write, m.Backend.Secondary.IOSize.Write), + ) +} + +func collectSystemRebuild(sm *systemMetrics, ss client.SystemStatistics) { + m := &sm.Rebuild + + m.Forward.BW.set( + calcBW(ss.FwdRebuildReadBwc), + calcBW(ss.FwdRebuildWriteBwc), + ) + m.Backward.BW.set( + calcBW(ss.BckRebuildReadBwc), + calcBW(ss.BckRebuildWriteBwc), + ) + m.Normal.BW.set( + calcBW(ss.NormRebuildReadBwc), + calcBW(ss.NormRebuildWriteBwc), + ) + m.Total.BW.set( + sumFloat(m.Forward.BW.Read, m.Backward.BW.Read, m.Normal.BW.Read), + sumFloat(m.Forward.BW.Write, m.Backward.BW.Write, m.Normal.BW.Write), + ) + + m.Forward.IOPS.set( + calcIOPS(ss.FwdRebuildReadBwc), + calcIOPS(ss.FwdRebuildWriteBwc), + ) + m.Backward.IOPS.set( + calcIOPS(ss.BckRebuildReadBwc), + calcIOPS(ss.BckRebuildWriteBwc), + ) + m.Normal.IOPS.set( + calcIOPS(ss.NormRebuildReadBwc), + calcIOPS(ss.NormRebuildWriteBwc), + ) + m.Total.IOPS.set( + sumFloat(m.Forward.IOPS.Read, m.Backward.IOPS.Read, m.Normal.IOPS.Read), + sumFloat(m.Forward.IOPS.Write, m.Backward.IOPS.Write, m.Normal.IOPS.Write), + ) + + m.Forward.IOSize.set( + calcIOSize(ss.FwdRebuildReadBwc), + calcIOSize(ss.FwdRebuildWriteBwc), + ) + m.Backward.IOSize.set( + calcIOSize(ss.BckRebuildReadBwc), + calcIOSize(ss.BckRebuildWriteBwc), + ) + m.Normal.IOSize.set( + calcIOSize(ss.NormRebuildReadBwc), + calcIOSize(ss.NormRebuildWriteBwc), + ) + m.Total.IOSize.set( + sumFloat(m.Forward.IOSize.Read, m.Backward.IOSize.Read, m.Normal.IOSize.Read), + sumFloat(m.Forward.IOSize.Write, m.Backward.IOSize.Write, m.Normal.IOSize.Write), + ) + + m.Forward.Pending = ss.PendingFwdRebuildCapacityInKb + m.Backward.Pending = ss.PendingBckRebuildCapacityInKb + m.Normal.Pending = ss.PendingNormRebuildCapacityInKb + m.Total.Pending = sum(m.Forward.Pending, m.Backward.Pending, m.Normal.Pending) +} + +func collectSystemRebalance(sm *systemMetrics, ss client.SystemStatistics) { + m := &sm.Rebalance + + m.BW.set( + calcBW(ss.RebalanceReadBwc), + calcBW(ss.RebalanceWriteBwc), + ) + + m.IOPS.set( + calcIOPS(ss.RebalanceReadBwc), + calcIOPS(ss.RebalanceWriteBwc), + ) + + m.IOSize.set( + calcIOSize(ss.RebalanceReadBwc), + calcIOSize(ss.RebalanceWriteBwc), + ) + + m.Pending = ss.PendingRebalanceCapacityInKb + m.TimeUntilFinish = divFloat(float64(m.Pending), m.BW.ReadWrite) +} + +func calcBW(bwc client.Bwc) float64 { return div(bwc.TotalWeightInKb, bwc.NumSeconds) } +func calcIOPS(bwc client.Bwc) float64 { return div(bwc.NumOccured, bwc.NumSeconds) } +func calcIOSize(bwc client.Bwc) float64 { return div(bwc.TotalWeightInKb, bwc.NumOccured) } + +func sum(a, b int64, others ...int64) (res int64) { + for _, v := range others { + res += v + } + return res + a + b +} + +func sumFloat(a, b float64, others ...float64) (res float64) { + for _, v := range others { + res += v + } + return res + a + b +} + +func div(a, b int64) float64 { + return divFloat(float64(a), float64(b)) +} + +func divFloat(a, b float64) float64 { + if b == 0 { + return 0 + } + return a / b +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/config_schema.json b/src/go/collectors/go.d.plugin/modules/scaleio/config_schema.json new file mode 100644 index 00000000000000..66230acc918a96 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/scaleio job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/integrations/dell_emc_scaleio.md b/src/go/collectors/go.d.plugin/modules/scaleio/integrations/dell_emc_scaleio.md new file mode 100644 index 00000000000000..c384730f19803b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/integrations/dell_emc_scaleio.md @@ -0,0 +1,255 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/scaleio/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/scaleio/metadata.yaml" +sidebar_label: "Dell EMC ScaleIO" +learn_status: "Published" +learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Dell EMC ScaleIO + + +<img src="https://netdata.cloud/img/dell.svg" width="150"/> + + +Plugin: go.d.plugin +Module: scaleio + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API. + +It collects metrics for the following ScaleIO components: + +- System +- Storage Pool +- Sdc + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Dell EMC ScaleIO instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| scaleio.system_capacity_total | total | KiB | +| scaleio.system_capacity_in_use | in_use | KiB | +| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB | +| scaleio.system_capacity_available_volume_allocation | available | KiB | +| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB | +| scaleio.system_workload_primary_bandwidth_total | total | KiB/s | +| scaleio.system_workload_primary_bandwidth | read, write | KiB/s | +| scaleio.system_workload_primary_iops_total | total | iops/s | +| scaleio.system_workload_primary_iops | read, write | iops/s | +| scaleio.system_workload_primary_io_size_total | io_size | KiB | +| scaleio.system_rebalance | read, write | KiB/s | +| scaleio.system_rebalance_left | left | KiB | +| scaleio.system_rebalance_time_until_finish | time | seconds | +| scaleio.system_rebuild | read, write | KiB/s | +| scaleio.system_rebuild_left | left | KiB | +| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components | +| scaleio.system_components_volumes_by_type | thick, thin | volumes | +| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes | + +### Per storage pool + +These metrics refer to the storage pool. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| scaleio.storage_pool_capacity_total | total | KiB | +| scaleio.storage_pool_capacity_in_use | in_use | KiB | +| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB | +| scaleio.storage_pool_capacity_utilization | used | percentage | +| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB | +| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB | +| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components | + +### Per sdc + +These metrics refer to the SDC (ScaleIO Data Client). + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| scaleio.sdc_mdm_connection_state | connected | boolean | +| scaleio.sdc_bandwidth | read, write | KiB/s | +| scaleio.sdc_iops | read, write | iops/s | +| scaleio.sdc_io_size | read, write | KiB | +| scaleio.sdc_num_of_mapped_volumed | mapped | volumes | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/scaleio.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/scaleio.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | https://127.0.0.1:80 | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | yes | +| password | Password for basic HTTP authentication. | | yes | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1 + username: admin + password: password + tls_skip_verify: yes # self-signed certificate + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instance. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1 + username: admin + password: password + tls_skip_verify: yes # self-signed certificate + + - name: remote + url: https://203.0.113.10 + username: admin + password: password + tls_skip_verify: yes + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m scaleio + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/metadata.yaml b/src/go/collectors/go.d.plugin/modules/scaleio/metadata.yaml new file mode 100644 index 00000000000000..edee6fc8b0e430 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/metadata.yaml @@ -0,0 +1,399 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-scaleio + plugin_name: go.d.plugin + module_name: scaleio + monitored_instance: + name: Dell EMC ScaleIO + link: https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm + icon_filename: dell.svg + categories: + - data-collection.storage-mount-points-and-filesystems + keywords: + - scaleio + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API. + + It collects metrics for the following ScaleIO components: + + - System + - Storage Pool + - Sdc + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/scaleio.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: https://127.0.0.1:80 + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: true + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: true + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: https://127.0.0.1 + username: admin + password: password + tls_skip_verify: yes # self-signed certificate + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instance. + config: | + jobs: + - name: local + url: https://127.0.0.1 + username: admin + password: password + tls_skip_verify: yes # self-signed certificate + + - name: remote + url: https://203.0.113.10 + username: admin + password: password + tls_skip_verify: yes + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: scaleio.system_capacity_total + description: Total Capacity + unit: KiB + chart_type: line + dimensions: + - name: total + - name: scaleio.system_capacity_in_use + description: Capacity In Use + unit: KiB + chart_type: line + dimensions: + - name: in_use + - name: scaleio.system_capacity_usage + description: Capacity Usage + unit: KiB + chart_type: stacked + dimensions: + - name: thick + - name: decreased + - name: thin + - name: snapshot + - name: spare + - name: unused + - name: scaleio.system_capacity_available_volume_allocation + description: Available For Volume Allocation + unit: KiB + chart_type: line + dimensions: + - name: available + - name: scaleio.system_capacity_health_state + description: Capacity Health State + unit: KiB + chart_type: stacked + dimensions: + - name: protected + - name: degraded + - name: in_maintenance + - name: failed + - name: unavailable + - name: scaleio.system_workload_primary_bandwidth_total + description: Primary Backend Bandwidth Total (Read and Write) + unit: KiB/s + chart_type: line + dimensions: + - name: total + - name: scaleio.system_workload_primary_bandwidth + description: Primary Backend Bandwidth + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.system_workload_primary_iops_total + description: Primary Backend IOPS Total (Read and Write) + unit: iops/s + chart_type: line + dimensions: + - name: total + - name: scaleio.system_workload_primary_iops + description: Primary Backend IOPS + unit: iops/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.system_workload_primary_io_size_total + description: Primary Backend I/O Size Total (Read and Write) + unit: KiB + chart_type: line + dimensions: + - name: io_size + - name: scaleio.system_rebalance + description: Rebalance + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.system_rebalance_left + description: Rebalance Pending Capacity + unit: KiB + chart_type: line + dimensions: + - name: left + - name: scaleio.system_rebalance_time_until_finish + description: Rebalance Approximate Time Until Finish + unit: seconds + chart_type: line + dimensions: + - name: time + - name: scaleio.system_rebuild + description: Rebuild Bandwidth Total (Forward, Backward and Normal) + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.system_rebuild_left + description: Rebuild Pending Capacity Total (Forward, Backward and Normal) + unit: KiB + chart_type: line + dimensions: + - name: left + - name: scaleio.system_defined_components + description: Components + unit: components + chart_type: line + dimensions: + - name: devices + - name: fault_sets + - name: protection_domains + - name: rfcache_devices + - name: sdc + - name: sds + - name: snapshots + - name: storage_pools + - name: volumes + - name: vtrees + - name: scaleio.system_components_volumes_by_type + description: Volumes By Type + unit: volumes + chart_type: stacked + dimensions: + - name: thick + - name: thin + - name: scaleio.system_components_volumes_by_mapping + description: Volumes By Mapping + unit: volumes + chart_type: stacked + dimensions: + - name: mapped + - name: unmapped + - name: storage pool + description: These metrics refer to the storage pool. + labels: [] + metrics: + - name: scaleio.storage_pool_capacity_total + description: Total Capacity + unit: KiB + chart_type: line + dimensions: + - name: total + - name: scaleio.storage_pool_capacity_in_use + description: Capacity In Use + unit: KiB + chart_type: line + dimensions: + - name: in_use + - name: scaleio.storage_pool_capacity_usage + description: Capacity Usage + unit: KiB + chart_type: stacked + dimensions: + - name: thick + - name: decreased + - name: thin + - name: snapshot + - name: spare + - name: unused + - name: scaleio.storage_pool_capacity_utilization + description: Capacity Utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: scaleio.storage_pool_capacity_available_volume_allocation + description: Available For Volume Allocation + unit: KiB + chart_type: line + dimensions: + - name: available + - name: scaleio.storage_pool_capacity_health_state + description: Capacity Health State + unit: KiB + chart_type: stacked + dimensions: + - name: protected + - name: degraded + - name: in_maintenance + - name: failed + - name: unavailable + - name: scaleio.storage_pool_components + description: Components + unit: components + chart_type: line + dimensions: + - name: devices + - name: snapshots + - name: volumes + - name: vtrees + - name: sdc + description: These metrics refer to the SDC (ScaleIO Data Client). + labels: [] + metrics: + - name: scaleio.sdc_mdm_connection_state + description: MDM Connection State + unit: boolean + chart_type: line + dimensions: + - name: connected + - name: scaleio.sdc_bandwidth + description: Bandwidth + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.sdc_iops + description: IOPS + unit: iops/s + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.sdc_io_size + description: IOPS Size + unit: KiB + chart_type: area + dimensions: + - name: read + - name: write + - name: scaleio.sdc_num_of_mapped_volumed + description: Mapped Volumes + unit: volumes + chart_type: line + dimensions: + - name: mapped diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/metrics.go b/src/go/collectors/go.d.plugin/modules/scaleio/metrics.go new file mode 100644 index 00000000000000..a5a9b9810bdc8c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/metrics.go @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +type metrics struct { + System systemMetrics `stm:"system"` + Sdc map[string]sdcMetrics `stm:"sdc"` + StoragePool map[string]storagePoolMetrics `stm:"storage_pool"` +} + +type capacity struct { + MaxCapacity int64 `stm:"max_capacity"` + ThickInUse int64 `stm:"thick_in_use"` + ThinInUse int64 `stm:"thin_in_use"` + Snapshot int64 `stm:"snapshot"` + Spare int64 `stm:"spare"` + Decreased int64 `stm:"decreased"` // not in statistics, should be calculated + Unused int64 `stm:"unused"` + + InUse int64 `stm:"in_use"` + AvailableForVolumeAllocation int64 `stm:"available_for_volume_allocation"` + + Protected int64 `stm:"protected"` + InMaintenance int64 `stm:"in_maintenance"` + Degraded int64 `stm:"degraded"` + Failed int64 `stm:"failed"` + UnreachableUnused int64 `stm:"unreachable_unused"` +} + +type ( + systemMetrics struct { + Capacity systemCapacity `stm:"capacity"` + Workload systemWorkload `stm:""` + Rebalance systemRebalance `stm:"rebalance"` + Rebuild systemRebuild `stm:"rebuild"` + Components systemComponents `stm:"num_of"` + } + systemCapacity = capacity + systemComponents struct { + Devices int64 `stm:"devices"` + FaultSets int64 `stm:"fault_sets"` + ProtectionDomains int64 `stm:"protection_domains"` + RfcacheDevices int64 `stm:"rfcache_devices"` + Sdc int64 `stm:"sdc"` + Sds int64 `stm:"sds"` + Snapshots int64 `stm:"snapshots"` + StoragePools int64 `stm:"storage_pools"` + MappedToAllVolumes int64 `stm:"mapped_to_all_volumes"` + ThickBaseVolumes int64 `stm:"thick_base_volumes"` + ThinBaseVolumes int64 `stm:"thin_base_volumes"` + UnmappedVolumes int64 `stm:"unmapped_volumes"` + MappedVolumes int64 `stm:"mapped_volumes"` + Volumes int64 `stm:"volumes"` + VTrees int64 `stm:"vtrees"` + } + systemWorkload struct { + Total bwIOPS `stm:"total"` + Backend struct { + Total bwIOPS `stm:"total"` + Primary bwIOPS `stm:"primary"` + Secondary bwIOPS `stm:"secondary"` + } `stm:"backend"` + Frontend bwIOPS `stm:"frontend_user_data"` + } + systemRebalance struct { + TimeUntilFinish float64 `stm:"time_until_finish"` + bwIOPSPending `stm:""` + } + systemRebuild struct { + Total bwIOPSPending `stm:"total"` + Forward bwIOPSPending `stm:"forward"` + Backward bwIOPSPending `stm:"backward"` + Normal bwIOPSPending `stm:"normal"` + } +) + +type ( + sdcMetrics struct { + bwIOPS `stm:""` + MappedVolumes int64 `stm:"num_of_mapped_volumes"` + MDMConnectionState bool `stm:"mdm_connection_state"` + } +) + +type ( + storagePoolMetrics struct { + Capacity storagePoolCapacity `stm:"capacity"` + Components struct { + Devices int64 `stm:"devices"` + Volumes int64 `stm:"volumes"` + Vtrees int64 `stm:"vtrees"` + Snapshots int64 `stm:"snapshots"` + } `stm:"num_of"` + } + storagePoolCapacity struct { + capacity `stm:""` + Utilization float64 `stm:"utilization,100,1"` // TODO: only StoragePool (sparePercentage) + AlertThreshold struct { + Critical int64 `stm:"critical_threshold"` + High int64 `stm:"high_threshold"` + } `stm:"alert"` + } +) + +type ( + readWrite struct { + Read float64 `stm:"read,1000,1"` + Write float64 `stm:"write,1000,1"` + ReadWrite float64 `stm:"read_write,1000,1"` + } + bwIOPS struct { + BW readWrite `stm:"bandwidth"` + IOPS readWrite `stm:"iops"` + IOSize readWrite `stm:"io_size"` + } + bwIOPSPending struct { + bwIOPS `stm:""` + Pending int64 `stm:"pending_capacity_in_Kb"` + } +) + +func (rw *readWrite) set(r, w float64) { + rw.Read = r + rw.Write = w + rw.ReadWrite = r + w +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/queries.go b/src/go/collectors/go.d.plugin/modules/scaleio/queries.go new file mode 100644 index 00000000000000..66e68fc6aecb51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/queries.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import "github.com/netdata/go.d.plugin/modules/scaleio/client" + +/* +Starting from version 3 of ScaleIO/VxFlex API numOfScsiInitiators property is removed from the system selectedStatisticsQuery. +Reference: VxFlex OS v3.x REST API Reference Guide.pdf +*/ + +var query = client.SelectedStatisticsQuery{ + List: []client.SelectedObject{ + { + Type: "System", + Properties: []string{ + "maxCapacityInKb", + "thickCapacityInUseInKb", + "thinCapacityInUseInKb", + "snapCapacityInUseOccupiedInKb", + "spareCapacityInKb", + "capacityLimitInKb", + + "protectedCapacityInKb", + "degradedHealthyCapacityInKb", + "degradedFailedCapacityInKb", + "failedCapacityInKb", + "unreachableUnusedCapacityInKb", + "inMaintenanceCapacityInKb", + + "capacityInUseInKb", + "capacityAvailableForVolumeAllocationInKb", + + "numOfDevices", + "numOfFaultSets", + "numOfProtectionDomains", + "numOfRfcacheDevices", + "numOfSdc", + "numOfSds", + "numOfSnapshots", + "numOfStoragePools", + "numOfVolumes", + "numOfVtrees", + "numOfThickBaseVolumes", + "numOfThinBaseVolumes", + "numOfMappedToAllVolumes", + "numOfUnmappedVolumes", + + "rebalanceReadBwc", + "rebalanceWriteBwc", + "pendingRebalanceCapacityInKb", + + "pendingNormRebuildCapacityInKb", + "pendingBckRebuildCapacityInKb", + "pendingFwdRebuildCapacityInKb", + "normRebuildReadBwc", + "normRebuildWriteBwc", + "bckRebuildReadBwc", + "bckRebuildWriteBwc", + "fwdRebuildReadBwc", + "fwdRebuildWriteBwc", + + "primaryReadBwc", + "primaryWriteBwc", + "secondaryReadBwc", + "secondaryWriteBwc", + "userDataReadBwc", + "userDataWriteBwc", + "totalReadBwc", + "totalWriteBwc", + }, + }, + { + Type: "StoragePool", + AllIDs: true, + Properties: []string{ + "maxCapacityInKb", + "thickCapacityInUseInKb", + "thinCapacityInUseInKb", + "snapCapacityInUseOccupiedInKb", + "spareCapacityInKb", + "capacityLimitInKb", + + "protectedCapacityInKb", + "degradedHealthyCapacityInKb", + "degradedFailedCapacityInKb", + "failedCapacityInKb", + "unreachableUnusedCapacityInKb", + "inMaintenanceCapacityInKb", + + "capacityInUseInKb", + "capacityAvailableForVolumeAllocationInKb", + + "numOfDevices", + "numOfVolumes", + "numOfVtrees", + "numOfSnapshots", + }, + }, + { + Type: "Sdc", + AllIDs: true, + Properties: []string{ + "userDataReadBwc", + "userDataWriteBwc", + + "numOfMappedVolumes", + }, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/scaleio.go b/src/go/collectors/go.d.plugin/modules/scaleio/scaleio.go new file mode 100644 index 00000000000000..05bb03c5be7d05 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/scaleio.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/modules/scaleio/client" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("scaleio", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +// New creates ScaleIO with default values. +func New() *ScaleIO { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "https://127.0.0.1", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + return &ScaleIO{ + Config: config, + charts: systemCharts.Copy(), + charted: make(map[string]bool), + } +} + +type ( + // Config is the ScaleIO module configuration. + Config struct { + web.HTTP `yaml:",inline"` + } + // ScaleIO ScaleIO module. + ScaleIO struct { + module.Base + Config `yaml:",inline"` + client *client.Client + charts *module.Charts + + discovered instances + charted map[string]bool + + lastDiscoveryOK bool + runs int + } + instances struct { + sdc map[string]client.Sdc + pool map[string]client.StoragePool + } +) + +// Init makes initialization. +func (s *ScaleIO) Init() bool { + if s.Username == "" || s.Password == "" { + s.Error("username and password aren't set") + return false + } + + c, err := client.New(s.Client, s.Request) + if err != nil { + s.Errorf("error on creating ScaleIO client: %v", err) + return false + } + s.client = c + + s.Debugf("using URL %s", s.URL) + s.Debugf("using timeout: %s", s.Timeout.Duration) + return true +} + +// Check makes check. +func (s *ScaleIO) Check() bool { + if err := s.client.Login(); err != nil { + s.Error(err) + return false + } + return len(s.Collect()) > 0 +} + +// Charts returns Charts. +func (s *ScaleIO) Charts() *module.Charts { + return s.charts +} + +// Collect collects metrics. +func (s *ScaleIO) Collect() map[string]int64 { + mx, err := s.collect() + if err != nil { + s.Error(err) + return nil + } + + if len(mx) == 0 { + return nil + } + return mx +} + +// Cleanup makes cleanup. +func (s *ScaleIO) Cleanup() { + if s.client == nil { + return + } + _ = s.client.Logout() +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/scaleio_test.go b/src/go/collectors/go.d.plugin/modules/scaleio/scaleio_test.go new file mode 100644 index 00000000000000..5547b174bc49be --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/scaleio_test.go @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scaleio + +import ( + "encoding/json" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/modules/scaleio/client" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + selectedStatisticsData, _ = os.ReadFile("testdata/selected_statistics.json") + instancesData, _ = os.ReadFile("testdata/instances.json") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, selectedStatisticsData) + assert.NotNil(t, instancesData) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestScaleIO_Init(t *testing.T) { + scaleIO := New() + scaleIO.Username = "username" + scaleIO.Password = "password" + + assert.True(t, scaleIO.Init()) +} +func TestScaleIO_Init_UsernameAndPasswordNotSet(t *testing.T) { + assert.False(t, New().Init()) +} + +func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { + job := New() + job.Username = "username" + job.Password = "password" + job.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, job.Init()) +} + +func TestScaleIO_Check(t *testing.T) { + srv, _, scaleIO := prepareSrvMockScaleIO(t) + defer srv.Close() + require.True(t, scaleIO.Init()) + + assert.True(t, scaleIO.Check()) +} + +func TestScaleIO_Check_ErrorOnLogin(t *testing.T) { + srv, mock, scaleIO := prepareSrvMockScaleIO(t) + defer srv.Close() + require.True(t, scaleIO.Init()) + mock.Password = "new password" + + assert.False(t, scaleIO.Check()) +} + +func TestScaleIO_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestScaleIO_Cleanup(t *testing.T) { + srv, _, scaleIO := prepareSrvMockScaleIO(t) + defer srv.Close() + require.True(t, scaleIO.Init()) + require.True(t, scaleIO.Check()) + + scaleIO.Cleanup() + assert.False(t, scaleIO.client.LoggedIn()) +} + +func TestScaleIO_Collect(t *testing.T) { + srv, _, scaleIO := prepareSrvMockScaleIO(t) + defer srv.Close() + require.True(t, scaleIO.Init()) + require.True(t, scaleIO.Check()) + + expected := map[string]int64{ + "sdc_6076fd0f00000000_bandwidth_read": 0, + "sdc_6076fd0f00000000_bandwidth_read_write": 0, + "sdc_6076fd0f00000000_bandwidth_write": 0, + "sdc_6076fd0f00000000_io_size_read": 0, + "sdc_6076fd0f00000000_io_size_read_write": 0, + "sdc_6076fd0f00000000_io_size_write": 0, + "sdc_6076fd0f00000000_iops_read": 0, + "sdc_6076fd0f00000000_iops_read_write": 0, + "sdc_6076fd0f00000000_iops_write": 0, + "sdc_6076fd0f00000000_mdm_connection_state": 1, + "sdc_6076fd0f00000000_num_of_mapped_volumes": 1, + "sdc_6076fd1000000001_bandwidth_read": 1000, + "sdc_6076fd1000000001_bandwidth_read_write": 117400000, + "sdc_6076fd1000000001_bandwidth_write": 117399000, + "sdc_6076fd1000000001_io_size_read": 1000, + "sdc_6076fd1000000001_io_size_read_write": 695668, + "sdc_6076fd1000000001_io_size_write": 694668, + "sdc_6076fd1000000001_iops_read": 1000, + "sdc_6076fd1000000001_iops_read_write": 170000, + "sdc_6076fd1000000001_iops_write": 169000, + "sdc_6076fd1000000001_mdm_connection_state": 0, + "sdc_6076fd1000000001_num_of_mapped_volumes": 1, + "sdc_6076fd1100000002_bandwidth_read": 0, + "sdc_6076fd1100000002_bandwidth_read_write": 118972000, + "sdc_6076fd1100000002_bandwidth_write": 118972000, + "sdc_6076fd1100000002_io_size_read": 0, + "sdc_6076fd1100000002_io_size_read_write": 820496, + "sdc_6076fd1100000002_io_size_write": 820496, + "sdc_6076fd1100000002_iops_read": 0, + "sdc_6076fd1100000002_iops_read_write": 145000, + "sdc_6076fd1100000002_iops_write": 145000, + "sdc_6076fd1100000002_mdm_connection_state": 0, + "sdc_6076fd1100000002_num_of_mapped_volumes": 1, + "storage_pool_40395b7b00000000_capacity_alert_critical_threshold": 90, + "storage_pool_40395b7b00000000_capacity_alert_high_threshold": 80, + "storage_pool_40395b7b00000000_capacity_available_for_volume_allocation": 100663296, + "storage_pool_40395b7b00000000_capacity_decreased": 0, + "storage_pool_40395b7b00000000_capacity_degraded": 0, + "storage_pool_40395b7b00000000_capacity_failed": 0, + "storage_pool_40395b7b00000000_capacity_in_maintenance": 0, + "storage_pool_40395b7b00000000_capacity_in_use": 50110464, + "storage_pool_40395b7b00000000_capacity_max_capacity": 311424000, + "storage_pool_40395b7b00000000_capacity_protected": 50110464, + "storage_pool_40395b7b00000000_capacity_snapshot": 749568, + "storage_pool_40395b7b00000000_capacity_spare": 31141888, + "storage_pool_40395b7b00000000_capacity_thick_in_use": 0, + "storage_pool_40395b7b00000000_capacity_thin_in_use": 49360896, + "storage_pool_40395b7b00000000_capacity_unreachable_unused": 0, + "storage_pool_40395b7b00000000_capacity_unused": 229422080, + "storage_pool_40395b7b00000000_capacity_utilization": 1787, + "storage_pool_40395b7b00000000_num_of_devices": 3, + "storage_pool_40395b7b00000000_num_of_snapshots": 1, + "storage_pool_40395b7b00000000_num_of_volumes": 3, + "storage_pool_40395b7b00000000_num_of_vtrees": 2, + "storage_pool_4039828b00000001_capacity_alert_critical_threshold": 90, + "storage_pool_4039828b00000001_capacity_alert_high_threshold": 80, + "storage_pool_4039828b00000001_capacity_available_for_volume_allocation": 142606336, + "storage_pool_4039828b00000001_capacity_decreased": 0, + "storage_pool_4039828b00000001_capacity_degraded": 0, + "storage_pool_4039828b00000001_capacity_failed": 0, + "storage_pool_4039828b00000001_capacity_in_maintenance": 0, + "storage_pool_4039828b00000001_capacity_in_use": 0, + "storage_pool_4039828b00000001_capacity_max_capacity": 332395520, + "storage_pool_4039828b00000001_capacity_protected": 0, + "storage_pool_4039828b00000001_capacity_snapshot": 0, + "storage_pool_4039828b00000001_capacity_spare": 33239040, + "storage_pool_4039828b00000001_capacity_thick_in_use": 0, + "storage_pool_4039828b00000001_capacity_thin_in_use": 0, + "storage_pool_4039828b00000001_capacity_unreachable_unused": 0, + "storage_pool_4039828b00000001_capacity_unused": 299156480, + "storage_pool_4039828b00000001_capacity_utilization": 0, + "storage_pool_4039828b00000001_num_of_devices": 3, + "storage_pool_4039828b00000001_num_of_snapshots": 0, + "storage_pool_4039828b00000001_num_of_volumes": 0, + "storage_pool_4039828b00000001_num_of_vtrees": 0, + "system_backend_primary_bandwidth_read": 800, + "system_backend_primary_bandwidth_read_write": 238682400, + "system_backend_primary_bandwidth_write": 238681600, + "system_backend_primary_io_size_read": 4000, + "system_backend_primary_io_size_read_write": 770971, + "system_backend_primary_io_size_write": 766971, + "system_backend_primary_iops_read": 200, + "system_backend_primary_iops_read_write": 311400, + "system_backend_primary_iops_write": 311200, + "system_backend_secondary_bandwidth_read": 0, + "system_backend_secondary_bandwidth_read_write": 233926400, + "system_backend_secondary_bandwidth_write": 233926400, + "system_backend_secondary_io_size_read": 0, + "system_backend_secondary_io_size_read_write": 764465, + "system_backend_secondary_io_size_write": 764465, + "system_backend_secondary_iops_read": 0, + "system_backend_secondary_iops_read_write": 306000, + "system_backend_secondary_iops_write": 306000, + "system_backend_total_bandwidth_read": 800, + "system_backend_total_bandwidth_read_write": 472608800, + "system_backend_total_bandwidth_write": 472608000, + "system_backend_total_io_size_read": 4000, + "system_backend_total_io_size_read_write": 1535437, + "system_backend_total_io_size_write": 1531437, + "system_backend_total_iops_read": 200, + "system_backend_total_iops_read_write": 617400, + "system_backend_total_iops_write": 617200, + "system_capacity_available_for_volume_allocation": 243269632, + "system_capacity_decreased": 0, + "system_capacity_degraded": 0, + "system_capacity_failed": 0, + "system_capacity_in_maintenance": 0, + "system_capacity_in_use": 50110464, + "system_capacity_max_capacity": 643819520, + "system_capacity_protected": 50110464, + "system_capacity_snapshot": 749568, + "system_capacity_spare": 64380928, + "system_capacity_thick_in_use": 0, + "system_capacity_thin_in_use": 49360896, + "system_capacity_unreachable_unused": 0, + "system_capacity_unused": 528578560, + "system_frontend_user_data_bandwidth_read": 0, + "system_frontend_user_data_bandwidth_read_write": 227170000, + "system_frontend_user_data_bandwidth_write": 227170000, + "system_frontend_user_data_io_size_read": 0, + "system_frontend_user_data_io_size_read_write": 797087, + "system_frontend_user_data_io_size_write": 797087, + "system_frontend_user_data_iops_read": 0, + "system_frontend_user_data_iops_read_write": 285000, + "system_frontend_user_data_iops_write": 285000, + "system_num_of_devices": 6, + "system_num_of_fault_sets": 0, + "system_num_of_mapped_to_all_volumes": 0, + "system_num_of_mapped_volumes": 3, + "system_num_of_protection_domains": 1, + "system_num_of_rfcache_devices": 0, + "system_num_of_sdc": 3, + "system_num_of_sds": 3, + "system_num_of_snapshots": 1, + "system_num_of_storage_pools": 2, + "system_num_of_thick_base_volumes": 0, + "system_num_of_thin_base_volumes": 2, + "system_num_of_unmapped_volumes": 0, + "system_num_of_volumes": 3, + "system_num_of_vtrees": 2, + "system_rebalance_bandwidth_read": 0, + "system_rebalance_bandwidth_read_write": 0, + "system_rebalance_bandwidth_write": 0, + "system_rebalance_io_size_read": 0, + "system_rebalance_io_size_read_write": 0, + "system_rebalance_io_size_write": 0, + "system_rebalance_iops_read": 0, + "system_rebalance_iops_read_write": 0, + "system_rebalance_iops_write": 0, + "system_rebalance_pending_capacity_in_Kb": 0, + "system_rebalance_time_until_finish": 0, + "system_rebuild_backward_bandwidth_read": 0, + "system_rebuild_backward_bandwidth_read_write": 0, + "system_rebuild_backward_bandwidth_write": 0, + "system_rebuild_backward_io_size_read": 0, + "system_rebuild_backward_io_size_read_write": 0, + "system_rebuild_backward_io_size_write": 0, + "system_rebuild_backward_iops_read": 0, + "system_rebuild_backward_iops_read_write": 0, + "system_rebuild_backward_iops_write": 0, + "system_rebuild_backward_pending_capacity_in_Kb": 0, + "system_rebuild_forward_bandwidth_read": 0, + "system_rebuild_forward_bandwidth_read_write": 0, + "system_rebuild_forward_bandwidth_write": 0, + "system_rebuild_forward_io_size_read": 0, + "system_rebuild_forward_io_size_read_write": 0, + "system_rebuild_forward_io_size_write": 0, + "system_rebuild_forward_iops_read": 0, + "system_rebuild_forward_iops_read_write": 0, + "system_rebuild_forward_iops_write": 0, + "system_rebuild_forward_pending_capacity_in_Kb": 0, + "system_rebuild_normal_bandwidth_read": 0, + "system_rebuild_normal_bandwidth_read_write": 0, + "system_rebuild_normal_bandwidth_write": 0, + "system_rebuild_normal_io_size_read": 0, + "system_rebuild_normal_io_size_read_write": 0, + "system_rebuild_normal_io_size_write": 0, + "system_rebuild_normal_iops_read": 0, + "system_rebuild_normal_iops_read_write": 0, + "system_rebuild_normal_iops_write": 0, + "system_rebuild_normal_pending_capacity_in_Kb": 0, + "system_rebuild_total_bandwidth_read": 0, + "system_rebuild_total_bandwidth_read_write": 0, + "system_rebuild_total_bandwidth_write": 0, + "system_rebuild_total_io_size_read": 0, + "system_rebuild_total_io_size_read_write": 0, + "system_rebuild_total_io_size_write": 0, + "system_rebuild_total_iops_read": 0, + "system_rebuild_total_iops_read_write": 0, + "system_rebuild_total_iops_write": 0, + "system_rebuild_total_pending_capacity_in_Kb": 0, + "system_total_bandwidth_read": 800, + "system_total_bandwidth_read_write": 472608800, + "system_total_bandwidth_write": 472608000, + "system_total_io_size_read": 4000, + "system_total_io_size_read_write": 769729, + "system_total_io_size_write": 765729, + "system_total_iops_read": 200, + "system_total_iops_read_write": 617400, + "system_total_iops_write": 617200, + } + + collected := scaleIO.Collect() + assert.Equal(t, expected, collected) + testCharts(t, scaleIO, collected) +} + +func TestScaleIO_Collect_ConnectionRefused(t *testing.T) { + srv, _, scaleIO := prepareSrvMockScaleIO(t) + defer srv.Close() + require.True(t, scaleIO.Init()) + require.True(t, scaleIO.Check()) + scaleIO.client.Request.URL = "http://127.0.0.1:38001" + + assert.Nil(t, scaleIO.Collect()) +} + +func testCharts(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) { + t.Helper() + ensureStoragePoolChartsAreCreated(t, scaleIO) + ensureSdcChartsAreCreated(t, scaleIO) + ensureCollectedHasAllChartsDimsVarsIDs(t, scaleIO, collected) +} + +func ensureStoragePoolChartsAreCreated(t *testing.T, scaleIO *ScaleIO) { + for _, pool := range scaleIO.discovered.pool { + for _, chart := range *newStoragePoolCharts(pool) { + assert.Truef(t, scaleIO.Charts().Has(chart.ID), "chart '%s' is not created", chart.ID) + } + } +} + +func ensureSdcChartsAreCreated(t *testing.T, scaleIO *ScaleIO) { + for _, sdc := range scaleIO.discovered.sdc { + for _, chart := range *newSdcCharts(sdc) { + assert.Truef(t, scaleIO.Charts().Has(chart.ID), "chart '%s' is not created", chart.ID) + } + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) { + for _, chart := range *scaleIO.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareSrvMockScaleIO(t *testing.T) (*httptest.Server, *client.MockScaleIOAPIServer, *ScaleIO) { + t.Helper() + const ( + user = "user" + password = "password" + version = "2.5" + token = "token" + ) + var stats client.SelectedStatistics + err := json.Unmarshal(selectedStatisticsData, &stats) + require.NoError(t, err) + + var ins client.Instances + err = json.Unmarshal(instancesData, &ins) + require.NoError(t, err) + + mock := client.MockScaleIOAPIServer{ + User: user, + Password: password, + Version: version, + Token: token, + Instances: ins, + Statistics: stats, + } + srv := httptest.NewServer(&mock) + require.NoError(t, err) + + scaleIO := New() + scaleIO.URL = srv.URL + scaleIO.Username = user + scaleIO.Password = password + return srv, &mock, scaleIO +} diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/testdata/instances.json b/src/go/collectors/go.d.plugin/modules/scaleio/testdata/instances.json new file mode 100644 index 00000000000000..bc8c6e8acd7dc1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/testdata/instances.json @@ -0,0 +1,1160 @@ +{ + "System": { + "authenticationMethod": "Native", + "capacityAlertCriticalThresholdPercent": 90, + "capacityAlertHighThresholdPercent": 80, + "capacityTimeLeftInDays": "Unlimited", + "cliPasswordAllowed": true, + "daysInstalled": 17, + "defaultIsVolumeObfuscated": false, + "enterpriseFeaturesEnabled": true, + "id": "499634a44778afc0", + "installId": "3e9fc5811a7efb00", + "isInitialLicense": true, + "links": [ + { + "href": "/api/instances/System::499634a44778afc0", + "rel": "self" + }, + { + "href": "/api/instances/System::499634a44778afc0/relationships/Statistics", + "rel": "/api/System/relationship/Statistics" + }, + { + "href": "/api/instances/System::499634a44778afc0/relationships/ProtectionDomain", + "rel": "/api/System/relationship/ProtectionDomain" + }, + { + "href": "/api/instances/System::499634a44778afc0/relationships/Sdc", + "rel": "/api/System/relationship/Sdc" + }, + { + "href": "/api/instances/System::499634a44778afc0/relationships/User", + "rel": "/api/System/relationship/User" + } + ], + "managementClientSecureCommunicationEnabled": true, + "maxCapacityInGb": "Unlimited", + "mdmCluster": { + "clusterMode": "ThreeNodes", + "clusterState": "ClusteredNormal", + "goodNodesNum": 3, + "goodReplicasNum": 2, + "id": "5302483491453710272", + "master": { + "id": "65ed0ee3247d0a00", + "ips": [ + "100.127.0.10" + ], + "managementIPs": [ + "100.127.0.10" + ], + "name": "Manager1", + "opensslVersion": "OpenSSL 1.0.2g 1 Mar 2016", + "port": 9011, + "role": "Manager", + "versionInfo": "R2_6.11000.0", + "virtualInterfaces": [] + }, + "slaves": [ + { + "id": "4cc44104130ce7b1", + "ips": [ + "100.127.0.11" + ], + "managementIPs": [ + "100.127.0.11" + ], + "name": "Manager2", + "opensslVersion": "OpenSSL 1.0.2g 1 Mar 2016", + "port": 9011, + "role": "Manager", + "status": "Normal", + "versionInfo": "R2_6.11000.0", + "virtualInterfaces": [] + } + ], + "tieBreakers": [ + { + "id": "35bf9d62661a6db2", + "ips": [ + "100.127.0.12" + ], + "managementIPs": [ + "100.127.0.12" + ], + "name": "Tie-Breaker1", + "opensslVersion": "N/A", + "port": 9011, + "role": "TieBreaker", + "status": "Normal", + "versionInfo": "R2_6.11000.0" + } + ] + }, + "mdmManagementPort": 6611, + "mdmToSdsPolicy": "Authentication", + "perfProfile": "Default", + "remoteReadOnlyLimitState": false, + "restrictedSdcMode": "None", + "restrictedSdcModeEnabled": false, + "sdcLongOperationsCounterParameters": { + "longWindow": { + "threshold": 1000000, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 100000, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 10000, + "windowSizeInSec": 60 + } + }, + "sdcMdmNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdcMemoryAllocationFailuresCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdcSdsConnectivityInfo": { + "disconnectedSdcId": null, + "disconnectedSdcName": null, + "disconnectedSdsId": null, + "disconnectedSdsIp": null, + "disconnectedSdsName": null, + "sdcSdsConnectivityStatus": "AllConnected" + }, + "sdcSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 20000, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 4000, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 800, + "windowSizeInSec": 60 + } + }, + "sdcSocketAllocationFailuresCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "showGuid": true, + "swid": "", + "systemVersionName": "DellEMC ScaleIO Version: R2_6.11000.113", + "tlsVersion": "TLSv1.2", + "upgradeState": "NoUpgrade" + }, + "deviceList": [ + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 103808000, + "deviceCurrentPathName": "/dev/sdb", + "deviceOriginalPathName": "/dev/sdb", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbf9d6500010000", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbf9d6500010000", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbf9d6500010000/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabe00000001", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 103808000, + "name": "sdb", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabe00000001", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "40395b7b00000000", + "temperatureState": "NeverFailed" + }, + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 114293760, + "deviceCurrentPathName": "/dev/sdc", + "deviceOriginalPathName": "/dev/sdc", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbfc47300010001", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbfc47300010001", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbfc47300010001/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabe00000001", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 114293760, + "name": "sdc", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabe00000001", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "4039828b00000001", + "temperatureState": "NeverFailed" + }, + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 103808000, + "deviceCurrentPathName": "/dev/sdb", + "deviceOriginalPathName": "/dev/sdb", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbd9d6400000000", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbd9d6400000000", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbd9d6400000000/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabd00000000", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 103808000, + "name": "sdb", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabd00000000", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "40395b7b00000000", + "temperatureState": "NeverFailed" + }, + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 114293760, + "deviceCurrentPathName": "/dev/sdc", + "deviceOriginalPathName": "/dev/sdc", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbfc47700020001", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbfc47700020001", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbfc47700020001/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabf00000002", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 114293760, + "name": "sdc", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabf00000002", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "4039828b00000001", + "temperatureState": "NeverFailed" + }, + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 103808000, + "deviceCurrentPathName": "/dev/sdc", + "deviceOriginalPathName": "/dev/sdc", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbdc47600000001", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbdc47600000001", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbdc47600000001/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabd00000000", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 103808000, + "name": "sdc", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabd00000000", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "40395b7b00000000", + "temperatureState": "NeverFailed" + }, + { + "aggregatedState": "NeverFailed", + "capacityLimitInKb": 103808000, + "deviceCurrentPathName": "/dev/sdb", + "deviceOriginalPathName": "/dev/sdb", + "deviceState": "Normal", + "errorState": "None", + "id": "ebbfc47800020000", + "ledSetting": "Off", + "links": [ + { + "href": "/api/instances/Device::ebbfc47800020000", + "rel": "self" + }, + { + "href": "/api/instances/Device::ebbfc47800020000/relationships/Statistics", + "rel": "/api/Device/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabf00000002", + "rel": "/api/parent/relationship/sdsId" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "longSuccessfulIos": { + "longWindow": null, + "mediumWindow": null, + "shortWindow": null + }, + "maxCapacityInKb": 103808000, + "name": "sdb", + "rfcacheErrorDeviceDoesNotExist": false, + "sdsId": "130dcabf00000002", + "ssdEndOfLifeState": "NeverFailed", + "storagePoolId": "4039828b00000001", + "temperatureState": "NeverFailed" + } + ], + "faultSetList": [ + { + "id": "a6a7b4cf00000000", + "links": [ + { + "href": "/api/instances/FaultSet::a6a7b4cf00000000", + "rel": "self" + }, + { + "href": "/api/instances/FaultSet::a6a7b4cf00000000/relationships/Statistics", + "rel": "/api/FaultSet/relationship/Statistics" + }, + { + "href": "/api/instances/FaultSet::a6a7b4cf00000000/relationships/Sds", + "rel": "/api/FaultSet/relationship/Sds" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "name": "MyFaultSet", + "protectionDomainId": "74d855a900000000" + } + ], + "isDirty": false, + "lastDeviceVersion": 47, + "lastFaultSetVersion": 2, + "lastProtectionDomainVersion": 2, + "lastRfcacheDeviceVersion": 1, + "lastSdcVersion": 7, + "lastSdsVersion": 19, + "lastStoragePoolVersion": 4, + "lastSystemVersion": 2, + "lastVTreeVersion": 3, + "lastVolumeVersion": 3, + "protectionDomainList": [ + { + "id": "74d855a900000000", + "links": [ + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "self" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/Statistics", + "rel": "/api/ProtectionDomain/relationship/Statistics" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/StoragePool", + "rel": "/api/ProtectionDomain/relationship/StoragePool" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/Sds", + "rel": "/api/ProtectionDomain/relationship/Sds" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/FaultSet", + "rel": "/api/ProtectionDomain/relationship/FaultSet" + }, + { + "href": "/api/instances/System::499634a44778afc0", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "name": "default", + "overallIoNetworkThrottlingEnabled": false, + "overallIoNetworkThrottlingInKbps": null, + "protectionDomainState": "Active", + "rebalanceNetworkThrottlingEnabled": false, + "rebalanceNetworkThrottlingInKbps": null, + "rebuildNetworkThrottlingEnabled": false, + "rebuildNetworkThrottlingInKbps": null, + "rfcacheEnabled": true, + "rfcacheMaxIoSizeKb": 128, + "rfcacheOpertionalMode": "WriteMiss", + "rfcachePageSizeKb": 64, + "sdsConfigurationFailureCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsDecoupledCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "sdsReceiveBufferAllocationFailuresCounterParameters": { + "longWindow": { + "threshold": 2000000, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 200000, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 20000, + "windowSizeInSec": 60 + } + }, + "sdsSdsNetworkDisconnectionsCounterParameters": { + "longWindow": { + "threshold": 700, + "windowSizeInSec": 86400 + }, + "mediumWindow": { + "threshold": 500, + "windowSizeInSec": 3600 + }, + "shortWindow": { + "threshold": 300, + "windowSizeInSec": 60 + } + }, + "systemId": "499634a44778afc0" + } + ], + "rfcacheDeviceList": null, + "sdcList": [ + { + "id": "6076fd1100000002", + "installedSoftwareVersionInfo": "R2_6.11000.0", + "kernelBuildNumber": null, + "kernelVersion": "4.15.18", + "links": [ + { + "href": "/api/instances/Sdc::6076fd1100000002", + "rel": "self" + }, + { + "href": "/api/instances/Sdc::6076fd1100000002/relationships/Statistics", + "rel": "/api/Sdc/relationship/Statistics" + }, + { + "href": "/api/instances/Sdc::6076fd1100000002/relationships/Volume", + "rel": "/api/Sdc/relationship/Volume" + }, + { + "href": "/api/instances/System::499634a44778afc0", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmConnectionState": "Disconnected", + "memoryAllocationFailure": null, + "name": null, + "osType": "Linux", + "perfProfile": "Default", + "sdcApproved": true, + "sdcApprovedIps": [ + "100.127.0.12" + ], + "sdcGuid": "B71F01AE-FF7A-47C5-A303-583FFD416818", + "sdcIp": "100.127.0.12", + "socketAllocationFailure": null, + "softwareVersionInfo": null, + "systemId": "499634a44778afc0", + "versionInfo": null + }, + { + "id": "6076fd1000000001", + "installedSoftwareVersionInfo": "R2_6.11000.0", + "kernelBuildNumber": null, + "kernelVersion": "4.15.18", + "links": [ + { + "href": "/api/instances/Sdc::6076fd1000000001", + "rel": "self" + }, + { + "href": "/api/instances/Sdc::6076fd1000000001/relationships/Statistics", + "rel": "/api/Sdc/relationship/Statistics" + }, + { + "href": "/api/instances/Sdc::6076fd1000000001/relationships/Volume", + "rel": "/api/Sdc/relationship/Volume" + }, + { + "href": "/api/instances/System::499634a44778afc0", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmConnectionState": "Disconnected", + "memoryAllocationFailure": null, + "name": null, + "osType": "Linux", + "perfProfile": "Default", + "sdcApproved": true, + "sdcApprovedIps": [ + "100.127.0.11" + ], + "sdcGuid": "5D2B24F9-5D49-4688-A67D-88AF8790BC05", + "sdcIp": "100.127.0.11", + "socketAllocationFailure": null, + "softwareVersionInfo": null, + "systemId": "499634a44778afc0", + "versionInfo": null + }, + { + "id": "6076fd0f00000000", + "installedSoftwareVersionInfo": "R2_6.11000.0", + "kernelBuildNumber": null, + "kernelVersion": "4.15.18", + "links": [ + { + "href": "/api/instances/Sdc::6076fd0f00000000", + "rel": "self" + }, + { + "href": "/api/instances/Sdc::6076fd0f00000000/relationships/Statistics", + "rel": "/api/Sdc/relationship/Statistics" + }, + { + "href": "/api/instances/Sdc::6076fd0f00000000/relationships/Volume", + "rel": "/api/Sdc/relationship/Volume" + }, + { + "href": "/api/instances/System::499634a44778afc0", + "rel": "/api/parent/relationship/systemId" + } + ], + "mdmConnectionState": "Connected", + "memoryAllocationFailure": null, + "name": null, + "osType": "Linux", + "perfProfile": "Default", + "sdcApproved": true, + "sdcApprovedIps": [ + "100.127.0.10" + ], + "sdcGuid": "974F4AC7-FF37-4909-8713-D1BD3F002843", + "sdcIp": "100.127.0.10", + "socketAllocationFailure": null, + "softwareVersionInfo": "R2_6.11000.0", + "systemId": "499634a44778afc0", + "versionInfo": "R2_6.11000.0" + } + ], + "sdsList": [ + { + "authenticationError": "None", + "certificateInfo": { + "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "subject": "/GN=SDS-000/CN=scaleIOslave1/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "thumbprint": "FD:00:99:E9:40:90:A5:CE:85:B8:A9:07:86:BB:7E:F0:E0:DE:F9:75", + "validFrom": "Nov 12 19:17:22 2019 GMT", + "validFromAsn1Format": "191112191722Z", + "validTo": "Nov 10 20:17:22 2029 GMT", + "validToAsn1Format": "291110201722Z" + }, + "drlMode": "Volatile", + "faultSetId": null, + "id": "130dcabd00000000", + "ipList": [ + { + "ip": "100.127.0.11", + "role": "all" + } + ], + "links": [ + { + "href": "/api/instances/Sds::130dcabd00000000", + "rel": "self" + }, + { + "href": "/api/instances/Sds::130dcabd00000000/relationships/Statistics", + "rel": "/api/Sds/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabd00000000/relationships/Device", + "rel": "/api/Sds/relationship/Device" + }, + { + "href": "/api/instances/Sds::130dcabd00000000/relationships/RfcacheDevice", + "rel": "/api/Sds/relationship/RfcacheDevice" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "maintenanceState": "NoMaintenance", + "mdmConnectionState": "Connected", + "membershipState": "Joined", + "name": "SDS_[100.127.0.11]", + "numOfIoBuffers": null, + "onVmWare": false, + "perfProfile": "Default", + "port": 7072, + "protectionDomainId": "74d855a900000000", + "rfcacheEnabled": true, + "rfcacheErrorApiVersionMismatch": false, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheErrorInconsistentCacheConfiguration": false, + "rfcacheErrorInconsistentSourceConfiguration": false, + "rfcacheErrorInvalidDriverPath": false, + "rfcacheErrorLowResources": false, + "rmcacheEnabled": true, + "rmcacheFrozen": false, + "rmcacheMemoryAllocationState": "AllocationPending", + "rmcacheSizeInKb": 131072, + "sdsConfigurationFailure": null, + "sdsDecoupled": null, + "sdsReceiveBufferAllocationFailures": null, + "sdsState": "Normal", + "softwareVersionInfo": "R2_6.11000.0" + }, + { + "authenticationError": "None", + "certificateInfo": { + "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "subject": "/GN=SDS-001/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "thumbprint": "B2:F9:88:84:55:94:A1:D8:7F:C1:4F:50:81:17:56:AC:72:B7:A2:AD", + "validFrom": "Nov 12 19:17:22 2019 GMT", + "validFromAsn1Format": "191112191722Z", + "validTo": "Nov 10 20:17:22 2029 GMT", + "validToAsn1Format": "291110201722Z" + }, + "drlMode": "Volatile", + "faultSetId": null, + "id": "130dcabe00000001", + "ipList": [ + { + "ip": "100.127.0.10", + "role": "all" + } + ], + "links": [ + { + "href": "/api/instances/Sds::130dcabe00000001", + "rel": "self" + }, + { + "href": "/api/instances/Sds::130dcabe00000001/relationships/Statistics", + "rel": "/api/Sds/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabe00000001/relationships/Device", + "rel": "/api/Sds/relationship/Device" + }, + { + "href": "/api/instances/Sds::130dcabe00000001/relationships/RfcacheDevice", + "rel": "/api/Sds/relationship/RfcacheDevice" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "maintenanceState": "NoMaintenance", + "mdmConnectionState": "Connected", + "membershipState": "Joined", + "name": "SDS_[100.127.0.10]", + "numOfIoBuffers": null, + "onVmWare": false, + "perfProfile": "Default", + "port": 7072, + "protectionDomainId": "74d855a900000000", + "rfcacheEnabled": true, + "rfcacheErrorApiVersionMismatch": false, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheErrorInconsistentCacheConfiguration": false, + "rfcacheErrorInconsistentSourceConfiguration": false, + "rfcacheErrorInvalidDriverPath": false, + "rfcacheErrorLowResources": false, + "rmcacheEnabled": true, + "rmcacheFrozen": false, + "rmcacheMemoryAllocationState": "AllocationPending", + "rmcacheSizeInKb": 131072, + "sdsConfigurationFailure": null, + "sdsDecoupled": null, + "sdsReceiveBufferAllocationFailures": null, + "sdsState": "Normal", + "softwareVersionInfo": "R2_6.11000.0" + }, + { + "authenticationError": "None", + "certificateInfo": { + "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "subject": "/GN=SDS-002/CN=scaleIOSlave2/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD", + "thumbprint": "CC:A0:E8:B7:84:9B:E5:D1:2E:F6:7C:3A:AC:21:D6:5C:5F:D1:47:D1", + "validFrom": "Nov 12 19:17:21 2019 GMT", + "validFromAsn1Format": "191112191721Z", + "validTo": "Nov 10 20:17:21 2029 GMT", + "validToAsn1Format": "291110201721Z" + }, + "drlMode": "Volatile", + "faultSetId": null, + "id": "130dcabf00000002", + "ipList": [ + { + "ip": "100.127.0.12", + "role": "all" + } + ], + "links": [ + { + "href": "/api/instances/Sds::130dcabf00000002", + "rel": "self" + }, + { + "href": "/api/instances/Sds::130dcabf00000002/relationships/Statistics", + "rel": "/api/Sds/relationship/Statistics" + }, + { + "href": "/api/instances/Sds::130dcabf00000002/relationships/Device", + "rel": "/api/Sds/relationship/Device" + }, + { + "href": "/api/instances/Sds::130dcabf00000002/relationships/RfcacheDevice", + "rel": "/api/Sds/relationship/RfcacheDevice" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "maintenanceState": "NoMaintenance", + "mdmConnectionState": "Connected", + "membershipState": "Joined", + "name": "SDS_[100.127.0.12]", + "numOfIoBuffers": null, + "onVmWare": false, + "perfProfile": "Default", + "port": 7072, + "protectionDomainId": "74d855a900000000", + "rfcacheEnabled": true, + "rfcacheErrorApiVersionMismatch": false, + "rfcacheErrorDeviceDoesNotExist": false, + "rfcacheErrorInconsistentCacheConfiguration": false, + "rfcacheErrorInconsistentSourceConfiguration": false, + "rfcacheErrorInvalidDriverPath": false, + "rfcacheErrorLowResources": false, + "rmcacheEnabled": true, + "rmcacheFrozen": false, + "rmcacheMemoryAllocationState": "AllocationPending", + "rmcacheSizeInKb": 131072, + "sdsConfigurationFailure": null, + "sdsDecoupled": null, + "sdsReceiveBufferAllocationFailures": null, + "sdsState": "Normal", + "softwareVersionInfo": "R2_6.11000.0" + } + ], + "sessionTag": 19, + "storagePoolList": [ + { + "backgroundScannerBWLimitKBps": 0, + "backgroundScannerMode": "Disabled", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "checksumEnabled": false, + "id": "4039828b00000001", + "links": [ + { + "href": "/api/instances/StoragePool::4039828b00000001", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001/relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001/relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001/relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::4039828b00000001/relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "name": "StoragePool2", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "protectionDomainId": "74d855a900000000", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "zeroPaddingEnabled": false + }, + { + "backgroundScannerBWLimitKBps": 0, + "backgroundScannerMode": "Disabled", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "checksumEnabled": false, + "id": "40395b7b00000000", + "links": [ + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000/relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::74d855a900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "name": "StoragePool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "protectionDomainId": "74d855a900000000", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "zeroPaddingEnabled": false + } + ], + "vTreeList": [ + { + "baseVolumeId": "993a355e00000001", + "id": "252fd6e400000001", + "links": [ + { + "href": "/api/instances/VTree::252fd6e400000001", + "rel": "self" + }, + { + "href": "/api/instances/VTree::252fd6e400000001/relationships/Statistics", + "rel": "/api/VTree/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::252fd6e400000001/relationships/Volume", + "rel": "/api/VTree/relationship/Volume" + }, + { + "href": "/api/instances/Volume::993a355e00000001", + "rel": "/api/parent/relationship/baseVolumeId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "name": null, + "storagePoolId": "40395b7b00000000" + }, + { + "baseVolumeId": "993a355d00000000", + "id": "252fd6e300000000", + "links": [ + { + "href": "/api/instances/VTree::252fd6e300000000", + "rel": "self" + }, + { + "href": "/api/instances/VTree::252fd6e300000000/relationships/Statistics", + "rel": "/api/VTree/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::252fd6e300000000/relationships/Volume", + "rel": "/api/VTree/relationship/Volume" + }, + { + "href": "/api/instances/Volume::993a355d00000000", + "rel": "/api/parent/relationship/baseVolumeId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "name": null, + "storagePoolId": "40395b7b00000000" + } + ], + "volumeList": [ + { + "ancestorVolumeId": null, + "consistencyGroupId": null, + "creationTime": 1574882772, + "id": "993a355e00000001", + "isObfuscated": false, + "isVvol": false, + "links": [ + { + "href": "/api/instances/Volume::993a355e00000001", + "rel": "self" + }, + { + "href": "/api/instances/Volume::993a355e00000001/relationships/Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::252fd6e400000001", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "mappedSdcInfo": [ + { + "limitBwInMbps": 0, + "limitIops": 0, + "sdcId": "6076fd1100000002", + "sdcIp": "100.127.0.12" + } + ], + "mappingToAllSdcsEnabled": false, + "name": "volume2-16", + "sizeInKb": 16777216, + "storagePoolId": "40395b7b00000000", + "useRmcache": false, + "volumeType": "ThinProvisioned", + "vtreeId": "252fd6e400000001" + }, + { + "ancestorVolumeId": null, + "consistencyGroupId": null, + "creationTime": 1574882580, + "id": "993a355d00000000", + "isObfuscated": false, + "isVvol": false, + "links": [ + { + "href": "/api/instances/Volume::993a355d00000000", + "rel": "self" + }, + { + "href": "/api/instances/Volume::993a355d00000000/relationships/Statistics", + "rel": "/api/Volume/relationship/Statistics" + }, + { + "href": "/api/instances/VTree::252fd6e300000000", + "rel": "/api/parent/relationship/vtreeId" + }, + { + "href": "/api/instances/StoragePool::40395b7b00000000", + "rel": "/api/parent/relationship/storagePoolId" + } + ], + "mappedSdcInfo": [ + { + "limitBwInMbps": 0, + "limitIops": 0, + "sdcId": "6076fd1000000001", + "sdcIp": "100.127.0.11" + } + ], + "mappingToAllSdcsEnabled": false, + "name": "volume1-16", + "sizeInKb": 16777216, + "storagePoolId": "40395b7b00000000", + "useRmcache": false, + "volumeType": "ThinProvisioned", + "vtreeId": "252fd6e300000000" + } + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/scaleio/testdata/selected_statistics.json b/src/go/collectors/go.d.plugin/modules/scaleio/testdata/selected_statistics.json new file mode 100644 index 00000000000000..0b141bbe6c3750 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/scaleio/testdata/selected_statistics.json @@ -0,0 +1,777 @@ +{ + "Sdc": { + "6076fd0f00000000": { + "numOfMappedVolumes": 1, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 1, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 1, + "totalWeightInKb": 0 + }, + "volumeIds": [ + "993a5c6d00000002" + ] + }, + "6076fd1000000001": { + "numOfMappedVolumes": 1, + "userDataReadBwc": { + "numOccured": 1, + "numSeconds": 1, + "totalWeightInKb": 1 + }, + "userDataWriteBwc": { + "numOccured": 169, + "numSeconds": 1, + "totalWeightInKb": 117399 + }, + "volumeIds": [ + "993a355d00000000" + ] + }, + "6076fd1100000002": { + "numOfMappedVolumes": 1, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 1, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 145, + "numSeconds": 1, + "totalWeightInKb": 118972 + }, + "volumeIds": [ + "993a355e00000001" + ] + } + }, + "StoragePool": { + "40395b7b00000000": { + "BackgroundScanCompareCount": 0, + "BackgroundScannedInMB": 0, + "activeBckRebuildCapacityInKb": 0, + "activeFwdRebuildCapacityInKb": 0, + "activeMovingCapacityInKb": 0, + "activeMovingInBckRebuildJobs": 0, + "activeMovingInFwdRebuildJobs": 0, + "activeMovingInNormRebuildJobs": 0, + "activeMovingInRebalanceJobs": 0, + "activeMovingOutBckRebuildJobs": 0, + "activeMovingOutFwdRebuildJobs": 0, + "activeMovingOutNormRebuildJobs": 0, + "activeMovingRebalanceJobs": 0, + "activeNormRebuildCapacityInKb": 0, + "activeRebalanceCapacityInKb": 0, + "atRestCapacityInKb": 50110464, + "bckRebuildCapacityInKb": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 100663296, + "capacityInUseInKb": 50110464, + "capacityLimitInKb": 311424000, + "degradedFailedCapacityInKb": 0, + "degradedFailedVacInKb": 0, + "degradedHealthyCapacityInKb": 0, + "degradedHealthyVacInKb": 0, + "deviceIds": [ + "ebbdc47a00000000", + "ebbf9d6500010000", + "ebbfc47900020000" + ], + "failedCapacityInKb": 0, + "failedVacInKb": 0, + "fixedReadErrorCount": 0, + "fwdRebuildCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 67108864, + "maxCapacityInKb": 311424000, + "movingCapacityInKb": 0, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDevices": 3, + "numOfMappedToAllVolumes": 0, + "numOfSnapshots": 1, + "numOfThickBaseVolumes": 0, + "numOfThinBaseVolumes": 2, + "numOfUnmappedVolumes": 0, + "numOfVolumes": 3, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 2, + "pendingBckRebuildCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "pendingMovingInFwdRebuildJobs": 0, + "pendingMovingInNormRebuildJobs": 0, + "pendingMovingInRebalanceJobs": 0, + "pendingMovingOutBckRebuildJobs": 0, + "pendingMovingOutFwdRebuildJobs": 0, + "pendingMovingOutNormrebuildJobs": 0, + "pendingMovingRebalanceJobs": 0, + "pendingNormRebuildCapacityInKb": 0, + "pendingRebalanceCapacityInKb": 0, + "primaryReadBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "primaryReadFromDevBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 33554432, + "primaryWriteBwc": { + "numOccured": 1556, + "numSeconds": 5, + "totalWeightInKb": 1193408 + }, + "protectedCapacityInKb": 50110464, + "protectedVacInKb": 67108864, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rfcacheReadsFromCache": 0, + "rfcacheReadsPending": 0, + "rfcacheReadsReceived": 0, + "rfcacheReadsSkipped": 0, + "rfcacheReadsSkippedAlignedSizeTooLarge": 0, + "rfcacheReadsSkippedHeavyLoad": 0, + "rfcacheReadsSkippedInternalError": 0, + "rfcacheReadsSkippedLockIos": 0, + "rfcacheReadsSkippedLowResources": 0, + "rfcacheReadsSkippedMaxIoSize": 0, + "rfcacheReadsSkippedStuckIo": 0, + "rfcacheSkippedUnlinedWrite": 0, + "rfcacheSourceDeviceReads": 0, + "rfcacheSourceDeviceWrites": 0, + "rfcacheWriteMiss": 0, + "rfcacheWritePending": 0, + "rfcacheWritesReceived": 0, + "rfcacheWritesSkippedCacheMiss": 0, + "rfcacheWritesSkippedHeavyLoad": 0, + "rfcacheWritesSkippedInternalError": 0, + "rfcacheWritesSkippedLowResources": 0, + "rfcacheWritesSkippedMaxIoSize": 0, + "rfcacheWritesSkippedStuckIo": 0, + "rmPendingAllocatedInKb": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 33554432, + "secondaryWriteBwc": { + "numOccured": 1530, + "numSeconds": 5, + "totalWeightInKb": 1169632 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 16699392, + "snapCapacityInUseOccupiedInKb": 749568, + "spareCapacityInKb": 31141888, + "thickCapacityInUseInKb": 0, + "thinCapacityAllocatedInKb": 67108864, + "thinCapacityInUseInKb": 49360896, + "totalReadBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "totalWriteBwc": { + "numOccured": 3086, + "numSeconds": 5, + "totalWeightInKb": 2363040 + }, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 230171648, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 285, + "numSeconds": 1, + "totalWeightInKb": 227170 + }, + "volumeIds": [ + "993a355d00000000", + "993a5c6d00000002", + "993a355e00000001" + ], + "vtreeIds": [ + "252fd6e300000000", + "252fd6e400000001" + ] + }, + "4039828b00000001": { + "BackgroundScanCompareCount": 0, + "BackgroundScannedInMB": 0, + "activeBckRebuildCapacityInKb": 0, + "activeFwdRebuildCapacityInKb": 0, + "activeMovingCapacityInKb": 0, + "activeMovingInBckRebuildJobs": 0, + "activeMovingInFwdRebuildJobs": 0, + "activeMovingInNormRebuildJobs": 0, + "activeMovingInRebalanceJobs": 0, + "activeMovingOutBckRebuildJobs": 0, + "activeMovingOutFwdRebuildJobs": 0, + "activeMovingOutNormRebuildJobs": 0, + "activeMovingRebalanceJobs": 0, + "activeNormRebuildCapacityInKb": 0, + "activeRebalanceCapacityInKb": 0, + "atRestCapacityInKb": 0, + "bckRebuildCapacityInKb": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 142606336, + "capacityInUseInKb": 0, + "capacityLimitInKb": 332395520, + "degradedFailedCapacityInKb": 0, + "degradedFailedVacInKb": 0, + "degradedHealthyCapacityInKb": 0, + "degradedHealthyVacInKb": 0, + "deviceIds": [ + "ebbdc47b00000001", + "ebbfc47300010001", + "ebbfc47700020001" + ], + "failedCapacityInKb": 0, + "failedVacInKb": 0, + "fixedReadErrorCount": 0, + "fwdRebuildCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 0, + "maxCapacityInKb": 332395520, + "movingCapacityInKb": 0, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDevices": 3, + "numOfMappedToAllVolumes": 0, + "numOfSnapshots": 0, + "numOfThickBaseVolumes": 0, + "numOfThinBaseVolumes": 0, + "numOfUnmappedVolumes": 0, + "numOfVolumes": 0, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 0, + "pendingBckRebuildCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "pendingMovingInFwdRebuildJobs": 0, + "pendingMovingInNormRebuildJobs": 0, + "pendingMovingInRebalanceJobs": 0, + "pendingMovingOutBckRebuildJobs": 0, + "pendingMovingOutFwdRebuildJobs": 0, + "pendingMovingOutNormrebuildJobs": 0, + "pendingMovingRebalanceJobs": 0, + "pendingNormRebuildCapacityInKb": 0, + "pendingRebalanceCapacityInKb": 0, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 0, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 0, + "protectedVacInKb": 0, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rfcacheReadsFromCache": 0, + "rfcacheReadsPending": 0, + "rfcacheReadsReceived": 0, + "rfcacheReadsSkipped": 0, + "rfcacheReadsSkippedAlignedSizeTooLarge": 0, + "rfcacheReadsSkippedHeavyLoad": 0, + "rfcacheReadsSkippedInternalError": 0, + "rfcacheReadsSkippedLockIos": 0, + "rfcacheReadsSkippedLowResources": 0, + "rfcacheReadsSkippedMaxIoSize": 0, + "rfcacheReadsSkippedStuckIo": 0, + "rfcacheSkippedUnlinedWrite": 0, + "rfcacheSourceDeviceReads": 0, + "rfcacheSourceDeviceWrites": 0, + "rfcacheWriteMiss": 0, + "rfcacheWritePending": 0, + "rfcacheWritesReceived": 0, + "rfcacheWritesSkippedCacheMiss": 0, + "rfcacheWritesSkippedHeavyLoad": 0, + "rfcacheWritesSkippedInternalError": 0, + "rfcacheWritesSkippedLowResources": 0, + "rfcacheWritesSkippedMaxIoSize": 0, + "rfcacheWritesSkippedStuckIo": 0, + "rmPendingAllocatedInKb": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 0, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 3145728, + "snapCapacityInUseOccupiedInKb": 0, + "spareCapacityInKb": 33239040, + "thickCapacityInUseInKb": 0, + "thinCapacityAllocatedInKb": 0, + "thinCapacityInUseInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 299156480, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeIds": [], + "vtreeIds": [] + } + }, + "System": { + "BackgroundScanCompareCount": 0, + "BackgroundScannedInMB": 0, + "activeBckRebuildCapacityInKb": 0, + "activeFwdRebuildCapacityInKb": 0, + "activeMovingCapacityInKb": 0, + "activeMovingInBckRebuildJobs": 0, + "activeMovingInFwdRebuildJobs": 0, + "activeMovingInNormRebuildJobs": 0, + "activeMovingInRebalanceJobs": 0, + "activeMovingOutBckRebuildJobs": 0, + "activeMovingOutFwdRebuildJobs": 0, + "activeMovingOutNormRebuildJobs": 0, + "activeMovingRebalanceJobs": 0, + "activeNormRebuildCapacityInKb": 0, + "activeRebalanceCapacityInKb": 0, + "atRestCapacityInKb": 50110464, + "bckRebuildCapacityInKb": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 243269632, + "capacityInUseInKb": 50110464, + "capacityLimitInKb": 643819520, + "degradedFailedCapacityInKb": 0, + "degradedFailedVacInKb": 0, + "degradedHealthyCapacityInKb": 0, + "degradedHealthyVacInKb": 0, + "failedCapacityInKb": 0, + "failedVacInKb": 0, + "fixedReadErrorCount": 0, + "fwdRebuildCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 67108864, + "maxCapacityInKb": 643819520, + "movingCapacityInKb": 0, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDevices": 6, + "numOfFaultSets": 0, + "numOfMappedToAllVolumes": 0, + "numOfProtectionDomains": 1, + "numOfRfcacheDevices": 0, + "numOfScsiInitiators": 0, + "numOfSdc": 3, + "numOfSds": 3, + "numOfSnapshots": 1, + "numOfStoragePools": 2, + "numOfThickBaseVolumes": 0, + "numOfThinBaseVolumes": 2, + "numOfUnmappedVolumes": 0, + "numOfVolumes": 3, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 2, + "pendingBckRebuildCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "pendingMovingInFwdRebuildJobs": 0, + "pendingMovingInNormRebuildJobs": 0, + "pendingMovingInRebalanceJobs": 0, + "pendingMovingOutBckRebuildJobs": 0, + "pendingMovingOutFwdRebuildJobs": 0, + "pendingMovingOutNormrebuildJobs": 0, + "pendingMovingRebalanceJobs": 0, + "pendingNormRebuildCapacityInKb": 0, + "pendingRebalanceCapacityInKb": 0, + "primaryReadBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "primaryReadFromDevBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 33554432, + "primaryWriteBwc": { + "numOccured": 1556, + "numSeconds": 5, + "totalWeightInKb": 1193408 + }, + "protectedCapacityInKb": 50110464, + "protectedVacInKb": 67108864, + "protectionDomainIds": [ + "74d855a900000000" + ], + "rebalanceCapacityInKb": 0, + "rebalancePerReceiveJobNetThrottlingInKbps": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWaitSendQLength": 0, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebuildPerReceiveJobNetThrottlingInKbps": 0, + "rebuildWaitSendQLength": 0, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheFdAvgReadTime": 0, + "rfcacheFdAvgWriteTime": 0, + "rfcacheFdCacheOverloaded": 0, + "rfcacheFdInlightReads": 0, + "rfcacheFdInlightWrites": 0, + "rfcacheFdIoErrors": 0, + "rfcacheFdMonitorErrorStuckIo": 0, + "rfcacheFdReadTimeGreater1Min": 0, + "rfcacheFdReadTimeGreater1Sec": 0, + "rfcacheFdReadTimeGreater500Millis": 0, + "rfcacheFdReadTimeGreater5Sec": 0, + "rfcacheFdReadsReceived": 0, + "rfcacheFdWriteTimeGreater1Min": 0, + "rfcacheFdWriteTimeGreater1Sec": 0, + "rfcacheFdWriteTimeGreater500Millis": 0, + "rfcacheFdWriteTimeGreater5Sec": 0, + "rfcacheFdWritesReceived": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcachePooIosOutstanding": 0, + "rfcachePoolCachePages": 0, + "rfcachePoolEvictions": 0, + "rfcachePoolInLowMemoryCondition": 0, + "rfcachePoolIoTimeGreater1Min": 0, + "rfcachePoolLockTimeGreater1Sec": 0, + "rfcachePoolLowResourcesInitiatedPassthroughMode": 0, + "rfcachePoolNumCacheDevs": 0, + "rfcachePoolNumSrcDevs": 0, + "rfcachePoolPagesInuse": 0, + "rfcachePoolReadHit": 0, + "rfcachePoolReadMiss": 0, + "rfcachePoolReadPendingG10Millis": 0, + "rfcachePoolReadPendingG1Millis": 0, + "rfcachePoolReadPendingG1Sec": 0, + "rfcachePoolReadPendingG500Micro": 0, + "rfcachePoolReadsPending": 0, + "rfcachePoolSize": 0, + "rfcachePoolSourceIdMismatch": 0, + "rfcachePoolSuspendedIos": 0, + "rfcachePoolSuspendedPequestsRedundantSearchs": 0, + "rfcachePoolWriteHit": 0, + "rfcachePoolWriteMiss": 0, + "rfcachePoolWritePending": 0, + "rfcachePoolWritePendingG10Millis": 0, + "rfcachePoolWritePendingG1Millis": 0, + "rfcachePoolWritePendingG1Sec": 0, + "rfcachePoolWritePendingG500Micro": 0, + "rfcacheReadMiss": 0, + "rfcacheReadsFromCache": 0, + "rfcacheReadsPending": 0, + "rfcacheReadsReceived": 0, + "rfcacheReadsSkipped": 0, + "rfcacheReadsSkippedAlignedSizeTooLarge": 0, + "rfcacheReadsSkippedHeavyLoad": 0, + "rfcacheReadsSkippedInternalError": 0, + "rfcacheReadsSkippedLockIos": 0, + "rfcacheReadsSkippedLowResources": 0, + "rfcacheReadsSkippedMaxIoSize": 0, + "rfcacheReadsSkippedStuckIo": 0, + "rfcacheSkippedUnlinedWrite": 0, + "rfcacheSourceDeviceReads": 0, + "rfcacheSourceDeviceWrites": 0, + "rfcacheWriteMiss": 0, + "rfcacheWritePending": 0, + "rfcacheWritesReceived": 0, + "rfcacheWritesSkippedCacheMiss": 0, + "rfcacheWritesSkippedHeavyLoad": 0, + "rfcacheWritesSkippedInternalError": 0, + "rfcacheWritesSkippedLowResources": 0, + "rfcacheWritesSkippedMaxIoSize": 0, + "rfcacheWritesSkippedStuckIo": 0, + "rmPendingAllocatedInKb": 0, + "rmcache128kbEntryCount": 0, + "rmcache16kbEntryCount": 0, + "rmcache32kbEntryCount": 0, + "rmcache4kbEntryCount": 0, + "rmcache64kbEntryCount": 0, + "rmcache8kbEntryCount": 0, + "rmcacheBigBlockEvictionCount": 0, + "rmcacheBigBlockEvictionSizeCountInKb": 0, + "rmcacheCurrNumOf128kbEntries": 0, + "rmcacheCurrNumOf16kbEntries": 0, + "rmcacheCurrNumOf32kbEntries": 0, + "rmcacheCurrNumOf4kbEntries": 0, + "rmcacheCurrNumOf64kbEntries": 0, + "rmcacheCurrNumOf8kbEntries": 0, + "rmcacheEntryEvictionCount": 0, + "rmcacheEntryEvictionSizeCountInKb": 0, + "rmcacheNoEvictionCount": 0, + "rmcacheSizeInKb": 393216, + "rmcacheSizeInUseInKb": 0, + "rmcacheSkipCountCacheAllBusy": 0, + "rmcacheSkipCountLargeIo": 0, + "rmcacheSkipCountUnaligned4kbIo": 0, + "scsiInitiatorIds": [], + "sdcIds": [ + "6076fd0f00000000", + "6076fd1000000001", + "6076fd1100000002" + ], + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 33554432, + "secondaryWriteBwc": { + "numOccured": 1530, + "numSeconds": 5, + "totalWeightInKb": 1169632 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 19845120, + "snapCapacityInUseOccupiedInKb": 749568, + "spareCapacityInKb": 64380928, + "thickCapacityInUseInKb": 0, + "thinCapacityAllocatedInKb": 67108864, + "thinCapacityInUseInKb": 49360896, + "totalReadBwc": { + "numOccured": 1, + "numSeconds": 5, + "totalWeightInKb": 4 + }, + "totalWriteBwc": { + "numOccured": 3086, + "numSeconds": 5, + "totalWeightInKb": 2363040 + }, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 529328128, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 285, + "numSeconds": 1, + "totalWeightInKb": 227170 + } + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/snmp/README.md b/src/go/collectors/go.d.plugin/modules/snmp/README.md new file mode 120000 index 00000000000000..edf223bf97d577 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/README.md @@ -0,0 +1 @@ +integrations/snmp_devices.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/snmp/charts.go b/src/go/collectors/go.d.plugin/modules/snmp/charts.go new file mode 100644 index 00000000000000..909ddb010ef1cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/charts.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func newCharts(configs []ChartConfig) (*module.Charts, error) { + charts := &module.Charts{} + for _, cfg := range configs { + if len(cfg.IndexRange) == 2 { + cs, err := newChartsFromIndexRange(cfg) + if err != nil { + return nil, err + } + if err := charts.Add(*cs...); err != nil { + return nil, err + } + } else { + chart, err := newChart(cfg) + if err != nil { + return nil, err + } + if err = charts.Add(chart); err != nil { + return nil, err + } + } + } + return charts, nil +} + +func newChartsFromIndexRange(cfg ChartConfig) (*module.Charts, error) { + var addPrio int + charts := &module.Charts{} + for i := cfg.IndexRange[0]; i <= cfg.IndexRange[1]; i++ { + chart, err := newChartWithOIDIndex(i, cfg) + if err != nil { + return nil, err + } + chart.Priority += addPrio + addPrio += 1 + if err = charts.Add(chart); err != nil { + return nil, err + } + } + return charts, nil +} + +func newChartWithOIDIndex(oidIndex int, cfg ChartConfig) (*module.Chart, error) { + chart, err := newChart(cfg) + if err != nil { + return nil, err + } + + chart.ID = fmt.Sprintf("%s_%d", chart.ID, oidIndex) + chart.Title = fmt.Sprintf("%s %d", chart.Title, oidIndex) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf("%s.%d", dim.ID, oidIndex) + } + + return chart, nil +} + +func newChart(cfg ChartConfig) (*module.Chart, error) { + chart := &module.Chart{ + ID: cfg.ID, + Title: cfg.Title, + Units: cfg.Units, + Fam: cfg.Family, + Ctx: fmt.Sprintf("snmp.%s", cfg.ID), + Type: module.ChartType(cfg.Type), + Priority: cfg.Priority, + } + + if chart.Title == "" { + chart.Title = "Untitled chart" + } + if chart.Units == "" { + chart.Units = "num" + } + if chart.Priority < module.Priority { + chart.Priority += module.Priority + } + + seen := make(map[string]struct{}) + var a string + for _, cfg := range cfg.Dimensions { + if cfg.Algorithm != "" { + seen[cfg.Algorithm] = struct{}{} + a = cfg.Algorithm + } + dim := &module.Dim{ + ID: strings.TrimPrefix(cfg.OID, "."), + Name: cfg.Name, + Algo: module.DimAlgo(cfg.Algorithm), + Mul: cfg.Multiplier, + Div: cfg.Divisor, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + if len(seen) == 1 && a != "" && len(chart.Dims) > 1 { + for _, d := range chart.Dims { + if d.Algo == "" { + d.Algo = module.DimAlgo(a) + } + } + } + + return chart, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/snmp/collect.go b/src/go/collectors/go.d.plugin/modules/snmp/collect.go new file mode 100644 index 00000000000000..9f0e78d7ec914b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/collect.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "github.com/gosnmp/gosnmp" +) + +func (s *SNMP) collect() (map[string]int64, error) { + collected := make(map[string]int64) + + if err := s.collectOIDs(collected); err != nil { + return nil, err + } + + return collected, nil +} + +func (s *SNMP) collectOIDs(collected map[string]int64) error { + for i, end := 0, 0; i < len(s.oids); i += s.Options.MaxOIDs { + if end = i + s.Options.MaxOIDs; end > len(s.oids) { + end = len(s.oids) + } + + oids := s.oids[i:end] + resp, err := s.snmpClient.Get(oids) + if err != nil { + s.Errorf("cannot get SNMP data: %v", err) + return err + } + + for i, oid := range oids { + if i >= len(resp.Variables) { + continue + } + + switch v := resp.Variables[i]; v.Type { + case gosnmp.Boolean, + gosnmp.Counter32, + gosnmp.Counter64, + gosnmp.Gauge32, + gosnmp.TimeTicks, + gosnmp.Uinteger32, + gosnmp.OpaqueFloat, + gosnmp.OpaqueDouble, + gosnmp.Integer: + collected[oid] = gosnmp.ToBigInt(v.Value).Int64() + default: + s.Debugf("skipping OID '%s' (unsupported type '%s')", oid, v.Type) + } + } + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/snmp/config_schema.json b/src/go/collectors/go.d.plugin/modules/snmp/config_schema.json new file mode 100644 index 00000000000000..dd4e9c3ca51a2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/config_schema.json @@ -0,0 +1,188 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "update_every": { + "type": "integer" + }, + "hostname": { + "type": "string" + }, + "community": { + "type": "string" + }, + "user": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "level": { + "type": "string", + "enum": [ + "none", + "authNoPriv", + "authPriv" + ] + }, + "auth_proto": { + "type": "string", + "enum": [ + "none", + "md5", + "sha", + "sha224", + "sha256", + "sha384", + "sha512" + ] + }, + "auth_key": { + "type": "string" + }, + "priv_proto": { + "type": "string", + "enum": [ + "none", + "des", + "aes", + "aes192", + "aes256", + "aes192c" + ] + }, + "priv_key": { + "type": "string" + } + }, + "required": [ + "name", + "level", + "auth_proto", + "auth_key", + "priv_proto", + "priv_key" + ] + }, + "options": { + "type": "object", + "properties": { + "port": { + "type": "integer" + }, + "retries": { + "type": "integer" + }, + "timeout": { + "type": "integer" + }, + "version": { + "type": "string", + "enum": [ + "1", + "2", + "3" + ] + }, + "max_request_size": { + "type": "integer" + } + }, + "required": [ + "port", + "retries", + "timeout", + "version", + "max_request_size" + ] + }, + "charts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "title": { + "type": "string" + }, + "units": { + "type": "string" + }, + "family": { + "type": "string" + }, + "type": { + "type": "string" + }, + "priority": { + "type": "integer" + }, + "multiply_range": { + "type": "array", + "items": { + "type": "integer" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oid": { + "type": "string" + }, + "name": { + "type": "string" + }, + "algorithm": { + "type": "string", + "enum": [ + "absolute", + "incremental" + ] + }, + "multiplier": { + "type": "integer" + }, + "divisor": { + "type": "integer" + } + }, + "required": [ + "oid", + "name", + "algorithm", + "multiplier", + "divisor" + ] + } + } + }, + "required": [ + "id", + "title", + "units", + "family", + "type", + "priority", + "multiply_range", + "dimensions" + ] + } + } + }, + "required": [ + "name", + "update_every", + "hostname", + "community", + "user", + "options", + "charts" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/snmp/init.go b/src/go/collectors/go.d.plugin/modules/snmp/init.go new file mode 100644 index 00000000000000..80243093664e36 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/init.go @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "errors" + "fmt" + "time" + + "github.com/gosnmp/gosnmp" +) + +var newSNMPClient = gosnmp.NewHandler + +func (s SNMP) validateConfig() error { + if len(s.ChartsInput) == 0 { + return errors.New("'charts' are required but not set") + } + + if s.Options.Version == gosnmp.Version3.String() { + if s.User.Name == "" { + return errors.New("'user.name' is required when using SNMPv3 but not set") + } + if _, err := parseSNMPv3SecurityLevel(s.User.SecurityLevel); err != nil { + return err + } + if _, err := parseSNMPv3AuthProtocol(s.User.AuthProto); err != nil { + return err + } + if _, err := parseSNMPv3PrivProtocol(s.User.PrivProto); err != nil { + return err + } + } + + return nil +} + +func (s SNMP) initSNMPClient() (gosnmp.Handler, error) { + client := newSNMPClient() + + if client.SetTarget(s.Hostname); client.Target() == "" { + s.Warningf("'hostname' not set, using the default value: '%s'", defaultHostname) + client.SetTarget(defaultHostname) + } + if client.SetPort(uint16(s.Options.Port)); client.Port() <= 0 || client.Port() > 65535 { + s.Warningf("'options.port' is invalid, changing to the default value: '%d' => '%d'", s.Options.Port, defaultPort) + client.SetPort(defaultPort) + } + if client.SetRetries(s.Options.Retries); client.Retries() < 1 || client.Retries() > 10 { + s.Warningf("'options.retries' is invalid, changing to the default value: '%d' => '%d'", s.Options.Retries, defaultRetries) + client.SetRetries(defaultRetries) + } + if client.SetTimeout(time.Duration(s.Options.Timeout) * time.Second); client.Timeout().Seconds() < 1 { + s.Warningf("'options.timeout' is invalid, changing to the default value: '%d' => '%d'", s.Options.Timeout, defaultTimeout) + client.SetTimeout(defaultTimeout * time.Second) + } + if client.SetMaxOids(s.Options.MaxOIDs); client.MaxOids() < 1 { + s.Warningf("'options.max_request_size' is invalid, changing to the default value: '%d' => '%d'", s.Options.MaxOIDs, defaultMaxOIDs) + client.SetMaxOids(defaultMaxOIDs) + } + + ver, err := parseSNMPVersion(s.Options.Version) + if err != nil { + s.Warningf("'options.version' is invalid, changing to the default value: '%s' => '%s'", + s.Options.Version, defaultVersion) + ver = defaultVersion + } + comm := s.Community + if comm == "" && (ver <= gosnmp.Version2c) { + s.Warningf("'community' not set, using the default value: '%s'", defaultCommunity) + comm = defaultCommunity + } + + switch ver { + case gosnmp.Version1: + client.SetCommunity(comm) + client.SetVersion(gosnmp.Version1) + case gosnmp.Version2c: + client.SetCommunity(comm) + client.SetVersion(gosnmp.Version2c) + case gosnmp.Version3: + client.SetVersion(gosnmp.Version3) + client.SetSecurityModel(gosnmp.UserSecurityModel) + client.SetMsgFlags(safeParseSNMPv3SecurityLevel(s.User.SecurityLevel)) + client.SetSecurityParameters(&gosnmp.UsmSecurityParameters{ + UserName: s.User.Name, + AuthenticationProtocol: safeParseSNMPv3AuthProtocol(s.User.AuthProto), + AuthenticationPassphrase: s.User.AuthKey, + PrivacyProtocol: safeParseSNMPv3PrivProtocol(s.User.PrivProto), + PrivacyPassphrase: s.User.PrivKey, + }) + default: + return nil, fmt.Errorf("invalid SNMP version: %s", s.Options.Version) + } + + return client, nil +} + +func (s SNMP) initOIDs() (oids []string) { + for _, c := range *s.charts { + for _, d := range c.Dims { + oids = append(oids, d.ID) + } + } + return oids +} + +func parseSNMPVersion(version string) (gosnmp.SnmpVersion, error) { + switch version { + case "0", "1": + return gosnmp.Version1, nil + case "2", "2c", "": + return gosnmp.Version2c, nil + case "3": + return gosnmp.Version3, nil + default: + return gosnmp.Version2c, fmt.Errorf("invalid snmp version value (%s)", version) + } +} + +func safeParseSNMPv3SecurityLevel(level string) gosnmp.SnmpV3MsgFlags { + v, _ := parseSNMPv3SecurityLevel(level) + return v +} + +func parseSNMPv3SecurityLevel(level string) (gosnmp.SnmpV3MsgFlags, error) { + switch level { + case "1", "none", "noAuthNoPriv", "": + return gosnmp.NoAuthNoPriv, nil + case "2", "authNoPriv": + return gosnmp.AuthNoPriv, nil + case "3", "authPriv": + return gosnmp.AuthPriv, nil + default: + return gosnmp.NoAuthNoPriv, fmt.Errorf("invalid snmpv3 user security level value (%s)", level) + } +} + +func safeParseSNMPv3AuthProtocol(protocol string) gosnmp.SnmpV3AuthProtocol { + v, _ := parseSNMPv3AuthProtocol(protocol) + return v +} + +func parseSNMPv3AuthProtocol(protocol string) (gosnmp.SnmpV3AuthProtocol, error) { + switch protocol { + case "1", "none", "noAuth", "": + return gosnmp.NoAuth, nil + case "2", "md5": + return gosnmp.MD5, nil + case "3", "sha": + return gosnmp.SHA, nil + case "4", "sha224": + return gosnmp.SHA224, nil + case "5", "sha256": + return gosnmp.SHA256, nil + case "6", "sha384": + return gosnmp.SHA384, nil + case "7", "sha512": + return gosnmp.SHA512, nil + default: + return gosnmp.NoAuth, fmt.Errorf("invalid snmpv3 user auth protocol value (%s)", protocol) + } +} + +func safeParseSNMPv3PrivProtocol(protocol string) gosnmp.SnmpV3PrivProtocol { + v, _ := parseSNMPv3PrivProtocol(protocol) + return v +} + +func parseSNMPv3PrivProtocol(protocol string) (gosnmp.SnmpV3PrivProtocol, error) { + switch protocol { + case "1", "none", "noPriv", "": + return gosnmp.NoPriv, nil + case "2", "des": + return gosnmp.DES, nil + case "3", "aes": + return gosnmp.AES, nil + case "4", "aes192": + return gosnmp.AES192, nil + case "5", "aes256": + return gosnmp.AES256, nil + case "6", "aes192c": + return gosnmp.AES192C, nil + case "7", "aes256c": + return gosnmp.AES256C, nil + default: + return gosnmp.NoPriv, fmt.Errorf("invalid snmpv3 user priv protocol value (%s)", protocol) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/snmp/integrations/snmp_devices.md b/src/go/collectors/go.d.plugin/modules/snmp/integrations/snmp_devices.md new file mode 100644 index 00000000000000..c9b3b36c6994a9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/integrations/snmp_devices.md @@ -0,0 +1,404 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/snmp/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/snmp/metadata.yaml" +sidebar_label: "SNMP devices" +learn_status: "Published" +learn_rel_path: "Data Collection/Generic Data Collection" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# SNMP devices + + +<img src="https://netdata.cloud/img/snmp.png" width="150"/> + + +Plugin: go.d.plugin +Module: snmp + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package. + +It supports: + +- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3. +- any number of SNMP devices. +- each SNMP device can be used to collect data for any number of charts. +- each chart may have any number of dimensions. +- each SNMP device may have a different update frequency. +- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches). + +Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. +`go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode. + +Also, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped. +This is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`). + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +The metrics that will be collected are defined in the configuration file. + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Find OIDs + +Use `snmpwalk`, like this: + +```sh +snmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1 +``` + +- `-t 20` is the timeout in seconds. +- `-O fn` will display full OIDs in numeric format. +- `-v 2c` is the SNMP version. +- `-c public` is the SNMP community. +- `192.0.2.1` is the SNMP device. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/snmp.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/snmp.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| hostname | Target ipv4 address. | 127.0.0.1 | yes | +| community | SNMPv1/2 community string. | public | no | +| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no | +| options.port | Target port. | 161 | no | +| options.retries | Retries to attempt. | 1 | no | +| options.timeout | SNMP request/response timeout. | 10 | no | +| options.max_request_size | Maximum number of OIDs allowed in one one SNMP request. | 60 | no | +| user.name | SNMPv3 user name. | | no | +| user.name | Security level of SNMPv3 messages. | | no | +| user.auth_proto | Security level of SNMPv3 messages. | | no | +| user.name | Authentication protocol for SNMPv3 messages. | | no | +| user.auth_key | Authentication protocol pass phrase. | | no | +| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no | +| user.priv_key | Privacy protocol pass phrase. | | no | +| charts | List of charts. | [] | yes | +| charts.id | Chart ID. Used to uniquely identify the chart. | | yes | +| charts.title | Chart title. | Untitled chart | no | +| charts.units | Chart units. | num | no | +| charts.family | Chart family. | charts.id | no | +| charts.type | Chart type (line, area, stacked). | line | no | +| charts.priority | Chart priority. | 70000 | no | +| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no | +| charts.dimensions | List of chart dimensions. | [] | yes | +| charts.dimensions.oid | Collected metric OID. | | yes | +| charts.dimensions.name | Dimension name. | | yes | +| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no | +| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no | +| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no | + +##### user.auth_proto + +The security of an SNMPv3 message as per RFC 3414 (`user.level`): + +| String value | Int value | Description | +|:------------:|:---------:|------------------------------------------| +| none | 1 | no message authentication or encryption | +| authNoPriv | 2 | message authentication and no encryption | +| authPriv | 3 | message authentication and encryption | + + +##### user.name + +The digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`): + +| String value | Int value | Description | +|:------------:|:---------:|-------------------------------------------| +| none | 1 | no message authentication | +| md5 | 2 | MD5 message authentication (HMAC-MD5-96) | +| sha | 3 | SHA message authentication (HMAC-SHA-96) | +| sha224 | 4 | SHA message authentication (HMAC-SHA-224) | +| sha256 | 5 | SHA message authentication (HMAC-SHA-256) | +| sha384 | 6 | SHA message authentication (HMAC-SHA-384) | +| sha512 | 7 | SHA message authentication (HMAC-SHA-512) | + + +##### user.priv_proto + +The encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`): + +| String value | Int value | Description | +|:------------:|:---------:|-------------------------------------------------------------------------| +| none | 1 | no message encryption | +| des | 2 | ES encryption (CBC-DES) | +| aes | 3 | 128-bit AES encryption (CFB-AES-128) | +| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization | +| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization | +| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization | +| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization | + + +</details> + +#### Examples + +##### SNMPv1/2 + +In this example: + +- the SNMP device is `192.0.2.1`. +- the SNMP version is `2`. +- the SNMP community is `public`. +- we will update the values every 10 seconds. +- we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`. + +> **SNMPv1**: just set `options.version` to 1. +> **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: switch + update_every: 10 + hostname: 192.0.2.1 + community: public + options: + version: 2 + charts: + - id: "bandwidth_port1" + title: "Switch Bandwidth for port 1" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.1" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.1" + multiplier: -8 + divisor: 1000 + - id: "bandwidth_port2" + title: "Switch Bandwidth for port 2" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.2" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.2" + multiplier: -8 + divisor: 1000 + +``` +</details> + +##### SNMPv3 + +To use SNMPv3: + +- use `user` instead of `community`. +- set `options.version` to 3. + +The rest of the configuration is the same as in the SNMPv1/2 example. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: switch + update_every: 10 + hostname: 192.0.2.1 + options: + version: 3 + user: + name: username + level: authPriv + auth_proto: sha256 + auth_key: auth_protocol_passphrase + priv_proto: aes256 + priv_key: priv_protocol_passphrase + +``` +</details> + +##### Multiply range + +If you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option. + +This is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`. + +Each of the 24 new charts will have its id (1-24) appended at: + +- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`. +- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`. +- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`. +- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: switch + update_every: 10 + hostname: "192.0.2.1" + community: public + options: + version: 2 + charts: + - id: "bandwidth_port" + title: "Switch Bandwidth for port" + units: "kilobits/s" + type: "area" + family: "ports" + multiply_range: [1, 24] + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16" + multiplier: -8 + divisor: 1000 + +``` +</details> + +##### Multiple devices with a common configuration + +YAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). +The `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices. + +The following example: + +- adds an `anchor` to the first job. +- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters. +- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters. + + +<details><summary>Config</summary> + +```yaml +jobs: + - &anchor + name: switch + update_every: 10 + hostname: "192.0.2.1" + community: public + options: + version: 2 + charts: + - id: "bandwidth_port1" + title: "Switch Bandwidth for port 1" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.1" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.1" + multiplier: -8 + divisor: 1000 + - <<: *anchor + name: switch2 + hostname: "192.0.2.2" + - <<: *anchor + name: switch3 + hostname: "192.0.2.3" + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m snmp + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/snmp/metadata.yaml b/src/go/collectors/go.d.plugin/modules/snmp/metadata.yaml new file mode 100644 index 00000000000000..a35b3190d8eebe --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/metadata.yaml @@ -0,0 +1,398 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-snmp + plugin_name: go.d.plugin + module_name: snmp + monitored_instance: + name: SNMP devices + link: "" + icon_filename: snmp.png + categories: + - data-collection.generic-data-collection + keywords: + - snmp + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors any SNMP devices and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package. + + It supports: + + - all SNMP versions: SNMPv1, SNMPv2c and SNMPv3. + - any number of SNMP devices. + - each SNMP device can be used to collect data for any number of charts. + - each chart may have any number of dimensions. + - each SNMP device may have a different update frequency. + - each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches). + + Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. + `go.d.plugin` reports the time it took for the SNMP device to respond when executed in the debug mode. + + Also, if many SNMP clients are used on the same SNMP device at the same time, values may be skipped. + This is a problem of the SNMP device, not this collector. In this case, consider reducing the frequency of data collection (increasing `update_every`). + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Find OIDs + description: | + Use `snmpwalk`, like this: + + ```sh + snmpwalk -t 20 -O fn -v 2c -c public 192.0.2.1 + ``` + + - `-t 20` is the timeout in seconds. + - `-O fn` will display full OIDs in numeric format. + - `-v 2c` is the SNMP version. + - `-c public` is the SNMP community. + - `192.0.2.1` is the SNMP device. + configuration: + file: + name: go.d/snmp.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: hostname + description: Target ipv4 address. + default_value: 127.0.0.1 + required: true + - name: community + description: SNMPv1/2 community string. + default_value: public + required: false + - name: options.version + description: "SNMP version. Available versions: 1, 2, 3." + default_value: 2 + required: false + - name: options.port + description: Target port. + default_value: 161 + required: false + - name: options.retries + description: Retries to attempt. + default_value: 1 + required: false + - name: options.timeout + description: SNMP request/response timeout. + default_value: 10 + required: false + - name: options.max_request_size + description: Maximum number of OIDs allowed in one one SNMP request. + default_value: 60 + required: false + - name: user.name + description: SNMPv3 user name. + default_value: "" + required: false + - name: user.name + description: Security level of SNMPv3 messages. + default_value: "" + required: false + - name: user.auth_proto + description: Security level of SNMPv3 messages. + default_value: "" + required: false + detailed_description: | + The security of an SNMPv3 message as per RFC 3414 (`user.level`): + + | String value | Int value | Description | + |:------------:|:---------:|------------------------------------------| + | none | 1 | no message authentication or encryption | + | authNoPriv | 2 | message authentication and no encryption | + | authPriv | 3 | message authentication and encryption | + - name: user.name + description: Authentication protocol for SNMPv3 messages. + default_value: "" + required: false + detailed_description: | + The digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`): + + | String value | Int value | Description | + |:------------:|:---------:|-------------------------------------------| + | none | 1 | no message authentication | + | md5 | 2 | MD5 message authentication (HMAC-MD5-96) | + | sha | 3 | SHA message authentication (HMAC-SHA-96) | + | sha224 | 4 | SHA message authentication (HMAC-SHA-224) | + | sha256 | 5 | SHA message authentication (HMAC-SHA-256) | + | sha384 | 6 | SHA message authentication (HMAC-SHA-384) | + | sha512 | 7 | SHA message authentication (HMAC-SHA-512) | + - name: user.auth_key + description: Authentication protocol pass phrase. + default_value: "" + required: false + - name: user.priv_proto + description: Privacy protocol for SNMPv3 messages. + default_value: "" + required: false + detailed_description: | + The encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`): + + | String value | Int value | Description | + |:------------:|:---------:|-------------------------------------------------------------------------| + | none | 1 | no message encryption | + | des | 2 | ES encryption (CBC-DES) | + | aes | 3 | 128-bit AES encryption (CFB-AES-128) | + | aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization | + | aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization | + | aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization | + | aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization | + - name: user.priv_key + description: Privacy protocol pass phrase. + default_value: "" + required: false + - name: charts + description: List of charts. + default_value: "[]" + required: true + - name: charts.id + description: Chart ID. Used to uniquely identify the chart. + default_value: "" + required: true + - name: charts.title + description: Chart title. + default_value: "Untitled chart" + required: false + - name: charts.units + description: Chart units. + default_value: num + required: false + - name: charts.family + description: Chart family. + default_value: charts.id + required: false + - name: charts.type + description: Chart type (line, area, stacked). + default_value: line + required: false + - name: charts.priority + description: Chart priority. + default_value: 70000 + required: false + - name: charts.multiply_range + description: Used when you need to define many charts using incremental OIDs. + default_value: "[]" + required: false + - name: charts.dimensions + description: List of chart dimensions. + default_value: "[]" + required: true + - name: charts.dimensions.oid + description: Collected metric OID. + default_value: "" + required: true + - name: charts.dimensions.name + description: Dimension name. + default_value: "" + required: true + - name: charts.dimensions.algorithm + description: Dimension algorithm (absolute, incremental). + default_value: absolute + required: false + - name: charts.dimensions.multiplier + description: Collected value multiplier, applied to convert it properly to units. + default_value: 1 + required: false + - name: charts.dimensions.divisor + description: Collected value divisor, applied to convert it properly to units. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: SNMPv1/2 + description: | + In this example: + + - the SNMP device is `192.0.2.1`. + - the SNMP version is `2`. + - the SNMP community is `public`. + - we will update the values every 10 seconds. + - we define 2 charts `bandwidth_port1` and `bandwidth_port2`, each having 2 dimensions: `in` and `out`. + + > **SNMPv1**: just set `options.version` to 1. + > **Note**: the algorithm chosen is `incremental`, because the collected values show the total number of bytes transferred, which we need to transform into kbps. To chart gauges (e.g. temperature), use `absolute` instead. + config: | + jobs: + - name: switch + update_every: 10 + hostname: 192.0.2.1 + community: public + options: + version: 2 + charts: + - id: "bandwidth_port1" + title: "Switch Bandwidth for port 1" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.1" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.1" + multiplier: -8 + divisor: 1000 + - id: "bandwidth_port2" + title: "Switch Bandwidth for port 2" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.2" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.2" + multiplier: -8 + divisor: 1000 + - name: SNMPv3 + description: | + To use SNMPv3: + + - use `user` instead of `community`. + - set `options.version` to 3. + + The rest of the configuration is the same as in the SNMPv1/2 example. + config: | + jobs: + - name: switch + update_every: 10 + hostname: 192.0.2.1 + options: + version: 3 + user: + name: username + level: authPriv + auth_proto: sha256 + auth_key: auth_protocol_passphrase + priv_proto: aes256 + priv_key: priv_protocol_passphrase + - name: Multiply range + description: | + If you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option. + + This is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`. + + Each of the 24 new charts will have its id (1-24) appended at: + + - its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`. + - its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`. + - its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`. + - its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order. + config: | + jobs: + - name: switch + update_every: 10 + hostname: "192.0.2.1" + community: public + options: + version: 2 + charts: + - id: "bandwidth_port" + title: "Switch Bandwidth for port" + units: "kilobits/s" + type: "area" + family: "ports" + multiply_range: [1, 24] + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16" + multiplier: -8 + divisor: 1000 + - name: Multiple devices with a common configuration + description: | + YAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases). + The `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices. + + The following example: + + - adds an `anchor` to the first job. + - injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters. + - injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters. + config: | + jobs: + - &anchor + name: switch + update_every: 10 + hostname: "192.0.2.1" + community: public + options: + version: 2 + charts: + - id: "bandwidth_port1" + title: "Switch Bandwidth for port 1" + units: "kilobits/s" + type: "area" + family: "ports" + dimensions: + - name: "in" + oid: "1.3.6.1.2.1.2.2.1.10.1" + algorithm: "incremental" + multiplier: 8 + divisor: 1000 + - name: "out" + oid: "1.3.6.1.2.1.2.2.1.16.1" + multiplier: -8 + divisor: 1000 + - <<: *anchor + name: switch2 + hostname: "192.0.2.2" + - <<: *anchor + name: switch3 + hostname: "192.0.2.3" + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: The metrics that will be collected are defined in the configuration file. + availability: [] + scopes: [] diff --git a/src/go/collectors/go.d.plugin/modules/snmp/snmp.go b/src/go/collectors/go.d.plugin/modules/snmp/snmp.go new file mode 100644 index 00000000000000..7aa933f64cd60f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/snmp.go @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + _ "embed" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/gosnmp/gosnmp" +) + +const ( + defaultUpdateEvery = 10 + defaultHostname = "127.0.0.1" + defaultCommunity = "public" + defaultVersion = gosnmp.Version2c + defaultPort = 161 + defaultRetries = 1 + defaultTimeout = defaultUpdateEvery + defaultMaxOIDs = 60 +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("snmp", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: defaultUpdateEvery, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *SNMP { + return &SNMP{ + Config: Config{ + Hostname: defaultHostname, + Community: defaultCommunity, + Options: Options{ + Port: defaultPort, + Retries: defaultRetries, + Timeout: defaultUpdateEvery, + Version: defaultVersion.String(), + MaxOIDs: defaultMaxOIDs, + }, + }, + } +} + +type ( + Config struct { + UpdateEvery int `yaml:"update_every"` + Hostname string `yaml:"hostname"` + Community string `yaml:"community"` + User User `yaml:"user"` + Options Options `yaml:"options"` + ChartsInput []ChartConfig `yaml:"charts"` + } + User struct { + Name string `yaml:"name"` + SecurityLevel string `yaml:"level"` + AuthProto string `yaml:"auth_proto"` + AuthKey string `yaml:"auth_key"` + PrivProto string `yaml:"priv_proto"` + PrivKey string `yaml:"priv_key"` + } + Options struct { + Port int `yaml:"port"` + Retries int `yaml:"retries"` + Timeout int `yaml:"timeout"` + Version string `yaml:"version"` + MaxOIDs int `yaml:"max_request_size"` + } + ChartConfig struct { + ID string `yaml:"id"` + Title string `yaml:"title"` + Units string `yaml:"units"` + Family string `yaml:"family"` + Type string `yaml:"type"` + Priority int `yaml:"priority"` + IndexRange []int `yaml:"multiply_range"` + Dimensions []DimensionConfig `yaml:"dimensions"` + } + DimensionConfig struct { + OID string `yaml:"oid"` + Name string `yaml:"name"` + Algorithm string `yaml:"algorithm"` + Multiplier int `yaml:"multiplier"` + Divisor int `yaml:"divisor"` + } +) + +type SNMP struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + snmpClient gosnmp.Handler + oids []string +} + +func (s *SNMP) Init() bool { + err := s.validateConfig() + if err != nil { + s.Errorf("config validation: %v", err) + return false + } + + snmpClient, err := s.initSNMPClient() + if err != nil { + s.Errorf("SNMP client initialization: %v", err) + return false + } + + s.Info(snmpClientConnInfo(snmpClient)) + + err = snmpClient.Connect() + if err != nil { + s.Errorf("SNMP client connect: %v", err) + return false + } + s.snmpClient = snmpClient + + charts, err := newCharts(s.ChartsInput) + if err != nil { + s.Errorf("Population of charts failed: %v", err) + return false + } + s.charts = charts + + s.oids = s.initOIDs() + + return true +} + +func (s *SNMP) Check() bool { + return len(s.Collect()) > 0 +} + +func (s *SNMP) Charts() *module.Charts { + return s.charts +} + +func (s *SNMP) Collect() map[string]int64 { + mx, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (s *SNMP) Cleanup() { + if s.snmpClient != nil { + _ = s.snmpClient.Close() + } +} + +func snmpClientConnInfo(c gosnmp.Handler) string { + var info strings.Builder + info.WriteString(fmt.Sprintf("hostname=%s,port=%d,snmp_version=%s", c.Target(), c.Port(), c.Version())) + switch c.Version() { + case gosnmp.Version1, gosnmp.Version2c: + info.WriteString(fmt.Sprintf(",community=%s", c.Community())) + case gosnmp.Version3: + info.WriteString(fmt.Sprintf(",security_level=%d,%s", c.MsgFlags(), c.SecurityParameters().Description())) + } + return info.String() +} diff --git a/src/go/collectors/go.d.plugin/modules/snmp/snmp_test.go b/src/go/collectors/go.d.plugin/modules/snmp/snmp_test.go new file mode 100644 index 00000000000000..9f1ef0e9057a14 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/snmp/snmp_test.go @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package snmp + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/gosnmp/gosnmp" + snmpmock "github.com/gosnmp/gosnmp/mocks" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.IsType(t, (*SNMP)(nil), New()) +} + +func TestSNMP_Init(t *testing.T) { + tests := map[string]struct { + prepareSNMP func() *SNMP + wantFail bool + }{ + "fail with default config": { + wantFail: true, + prepareSNMP: func() *SNMP { + return New() + }, + }, + "fail when 'charts' not set": { + wantFail: true, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + snmp.ChartsInput = nil + return snmp + }, + }, + "fail when using SNMPv3 but 'user.name' not set": { + wantFail: true, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV3Config() + snmp.User.Name = "" + return snmp + }, + }, + "fail when using SNMPv3 but 'user.level' is invalid": { + wantFail: true, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV3Config() + snmp.User.SecurityLevel = "invalid" + return snmp + }, + }, + "fail when using SNMPv3 but 'user.auth_proto' is invalid": { + wantFail: true, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV3Config() + snmp.User.AuthProto = "invalid" + return snmp + }, + }, + "fail when using SNMPv3 but 'user.priv_proto' is invalid": { + wantFail: true, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV3Config() + snmp.User.PrivProto = "invalid" + return snmp + }, + }, + "success when using SNMPv1 with valid config": { + wantFail: false, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV1Config() + return snmp + }, + }, + "success when using SNMPv2 with valid config": { + wantFail: false, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + return snmp + }, + }, + "success when using SNMPv3 with valid config": { + wantFail: false, + prepareSNMP: func() *SNMP { + snmp := New() + snmp.Config = prepareV3Config() + return snmp + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + snmp := test.prepareSNMP() + + if test.wantFail { + assert.False(t, snmp.Init()) + } else { + assert.True(t, snmp.Init()) + } + }) + } +} + +func TestSNMP_Check(t *testing.T) { + tests := map[string]struct { + prepareSNMP func(m *snmpmock.MockHandler) *SNMP + wantFail bool + }{ + "success when 'max_request_size' > returned OIDs": { + wantFail: false, + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: 10, Type: gosnmp.Gauge32}, + {Value: 20, Type: gosnmp.Gauge32}, + }, + }, nil).Times(1) + + return snmp + }, + }, + "success when 'max_request_size' < returned OIDs": { + wantFail: false, + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + snmp.Config.Options.MaxOIDs = 1 + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: 10, Type: gosnmp.Gauge32}, + {Value: 20, Type: gosnmp.Gauge32}, + }, + }, nil).Times(2) + + return snmp + }, + }, + "success when using 'multiply_range'": { + wantFail: false, + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 1) + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: 10, Type: gosnmp.Gauge32}, + {Value: 20, Type: gosnmp.Gauge32}, + {Value: 30, Type: gosnmp.Gauge32}, + {Value: 40, Type: gosnmp.Gauge32}, + }, + }, nil).Times(1) + + return snmp + }, + }, + "fail when snmp client Get fails": { + wantFail: true, + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + + m.EXPECT().Get(gomock.Any()).Return(nil, errors.New("mock Get() error")).Times(1) + + return snmp + }, + }, + "fail when all OIDs type is unsupported": { + wantFail: true, + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: nil, Type: gosnmp.NoSuchInstance}, + {Value: nil, Type: gosnmp.NoSuchInstance}, + }, + }, nil).Times(1) + + return snmp + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mockSNMP, cleanup := mockInit(t) + defer cleanup() + + newSNMPClient = func() gosnmp.Handler { return mockSNMP } + defaultMockExpects(mockSNMP) + + snmp := test.prepareSNMP(mockSNMP) + require.True(t, snmp.Init()) + + if test.wantFail { + assert.False(t, snmp.Check()) + } else { + assert.True(t, snmp.Check()) + } + }) + } +} + +func TestSNMP_Collect(t *testing.T) { + tests := map[string]struct { + prepareSNMP func(m *snmpmock.MockHandler) *SNMP + wantCollected map[string]int64 + }{ + "success when collecting supported type": { + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 3) + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: 10, Type: gosnmp.Counter32}, + {Value: 20, Type: gosnmp.Counter64}, + {Value: 30, Type: gosnmp.Gauge32}, + {Value: 1, Type: gosnmp.Boolean}, + {Value: 40, Type: gosnmp.Gauge32}, + {Value: 50, Type: gosnmp.TimeTicks}, + {Value: 60, Type: gosnmp.Uinteger32}, + {Value: 70, Type: gosnmp.Integer}, + }, + }, nil).Times(1) + + return snmp + }, + wantCollected: map[string]int64{ + "1.3.6.1.2.1.2.2.1.10.0": 10, + "1.3.6.1.2.1.2.2.1.16.0": 20, + "1.3.6.1.2.1.2.2.1.10.1": 30, + "1.3.6.1.2.1.2.2.1.16.1": 1, + "1.3.6.1.2.1.2.2.1.10.2": 40, + "1.3.6.1.2.1.2.2.1.16.2": 50, + "1.3.6.1.2.1.2.2.1.10.3": 60, + "1.3.6.1.2.1.2.2.1.16.3": 70, + }, + }, + "success when collecting supported and unsupported type": { + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 2) + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: 10, Type: gosnmp.Counter32}, + {Value: 20, Type: gosnmp.Counter64}, + {Value: 30, Type: gosnmp.Gauge32}, + {Value: nil, Type: gosnmp.NoSuchInstance}, + {Value: nil, Type: gosnmp.NoSuchInstance}, + {Value: nil, Type: gosnmp.NoSuchInstance}, + }, + }, nil).Times(1) + + return snmp + }, + wantCollected: map[string]int64{ + "1.3.6.1.2.1.2.2.1.10.0": 10, + "1.3.6.1.2.1.2.2.1.16.0": 20, + "1.3.6.1.2.1.2.2.1.10.1": 30, + }, + }, + "fails when collecting unsupported type": { + prepareSNMP: func(m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 2) + + m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{ + Variables: []gosnmp.SnmpPDU{ + {Value: nil, Type: gosnmp.NoSuchInstance}, + {Value: nil, Type: gosnmp.NoSuchInstance}, + {Value: nil, Type: gosnmp.NoSuchObject}, + {Value: "192.0.2.0", Type: gosnmp.NsapAddress}, + {Value: []uint8{118, 101, 116}, Type: gosnmp.OctetString}, + {Value: ".1.3.6.1.2.1.4.32.1.5.2.1.4.10.19.0.0.16", Type: gosnmp.ObjectIdentifier}, + }, + }, nil).Times(1) + + return snmp + }, + wantCollected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mockSNMP, cleanup := mockInit(t) + defer cleanup() + + newSNMPClient = func() gosnmp.Handler { return mockSNMP } + defaultMockExpects(mockSNMP) + + snmp := test.prepareSNMP(mockSNMP) + require.True(t, snmp.Init()) + + collected := snmp.Collect() + + assert.Equal(t, test.wantCollected, collected) + }) + } +} + +func TestSNMP_Cleanup(t *testing.T) { + tests := map[string]struct { + prepareSNMP func(t *testing.T, m *snmpmock.MockHandler) *SNMP + }{ + "cleanup call if snmpClient initialized": { + prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + require.True(t, snmp.Init()) + + m.EXPECT().Close().Times(1) + + return snmp + }, + }, + "cleanup call does not panic if snmpClient not initialized": { + prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + require.True(t, snmp.Init()) + snmp.snmpClient = nil + + return snmp + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mockSNMP, cleanup := mockInit(t) + defer cleanup() + + newSNMPClient = func() gosnmp.Handler { return mockSNMP } + defaultMockExpects(mockSNMP) + + snmp := test.prepareSNMP(t, mockSNMP) + assert.NotPanics(t, snmp.Cleanup) + }) + } +} + +func TestSNMP_Charts(t *testing.T) { + tests := map[string]struct { + prepareSNMP func(t *testing.T, m *snmpmock.MockHandler) *SNMP + wantNumCharts int + }{ + "without 'multiply_range': got expected number of charts": { + wantNumCharts: 1, + prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareV2Config() + require.True(t, snmp.Init()) + + return snmp + }, + }, + "with 'multiply_range': got expected number of charts": { + wantNumCharts: 10, + prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { + snmp := New() + snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 9) + require.True(t, snmp.Init()) + + return snmp + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mockSNMP, cleanup := mockInit(t) + defer cleanup() + + newSNMPClient = func() gosnmp.Handler { return mockSNMP } + defaultMockExpects(mockSNMP) + + snmp := test.prepareSNMP(t, mockSNMP) + assert.Equal(t, test.wantNumCharts, len(*snmp.Charts())) + }) + } +} + +func mockInit(t *testing.T) (*snmpmock.MockHandler, func()) { + mockCtl := gomock.NewController(t) + cleanup := func() { mockCtl.Finish() } + mockSNMP := snmpmock.NewMockHandler(mockCtl) + + return mockSNMP, cleanup +} + +func defaultMockExpects(m *snmpmock.MockHandler) { + m.EXPECT().Target().AnyTimes() + m.EXPECT().Port().AnyTimes() + m.EXPECT().Retries().AnyTimes() + m.EXPECT().Timeout().AnyTimes() + m.EXPECT().MaxOids().AnyTimes() + m.EXPECT().Version().AnyTimes() + m.EXPECT().Community().AnyTimes() + m.EXPECT().SetTarget(gomock.Any()).AnyTimes() + m.EXPECT().SetPort(gomock.Any()).AnyTimes() + m.EXPECT().SetRetries(gomock.Any()).AnyTimes() + m.EXPECT().SetMaxOids(gomock.Any()).AnyTimes() + m.EXPECT().SetLogger(gomock.Any()).AnyTimes() + m.EXPECT().SetTimeout(gomock.Any()).AnyTimes() + m.EXPECT().SetCommunity(gomock.Any()).AnyTimes() + m.EXPECT().SetVersion(gomock.Any()).AnyTimes() + m.EXPECT().SetSecurityModel(gomock.Any()).AnyTimes() + m.EXPECT().SetMsgFlags(gomock.Any()).AnyTimes() + m.EXPECT().SetSecurityParameters(gomock.Any()).AnyTimes() + m.EXPECT().Connect().Return(nil).AnyTimes() +} + +func prepareConfigWithIndexRange(p func() Config, start, end int) Config { + if start > end || start < 0 || end < 1 { + panic(fmt.Sprintf("invalid index range ('%d'-'%d')", start, end)) + } + cfg := p() + for i := range cfg.ChartsInput { + cfg.ChartsInput[i].IndexRange = []int{start, end} + } + return cfg +} + +func prepareV3Config() Config { + cfg := prepareV2Config() + cfg.Options.Version = gosnmp.Version3.String() + cfg.User = User{ + Name: "name", + SecurityLevel: "authPriv", + AuthProto: strings.ToLower(gosnmp.MD5.String()), + AuthKey: "auth_key", + PrivProto: strings.ToLower(gosnmp.AES.String()), + PrivKey: "priv_key", + } + return cfg +} + +func prepareV2Config() Config { + cfg := prepareV1Config() + cfg.Options.Version = gosnmp.Version2c.String() + return cfg +} + +func prepareV1Config() Config { + return Config{ + UpdateEvery: defaultUpdateEvery, + Hostname: defaultHostname, + Community: defaultCommunity, + Options: Options{ + Port: defaultPort, + Retries: defaultRetries, + Timeout: defaultTimeout, + Version: gosnmp.Version1.String(), + MaxOIDs: defaultMaxOIDs, + }, + ChartsInput: []ChartConfig{ + { + ID: "test_chart1", + Title: "This is Test Chart1", + Units: "kilobits/s", + Family: "family", + Type: module.Area.String(), + Priority: module.Priority, + Dimensions: []DimensionConfig{ + { + OID: "1.3.6.1.2.1.2.2.1.10", + Name: "in", + Algorithm: module.Incremental.String(), + Multiplier: 8, + Divisor: 1000, + }, + { + OID: "1.3.6.1.2.1.2.2.1.16", + Name: "out", + Algorithm: module.Incremental.String(), + Multiplier: 8, + Divisor: 1000, + }, + }, + }, + }, + } +} diff --git a/src/go/collectors/go.d.plugin/modules/solr/README.md b/src/go/collectors/go.d.plugin/modules/solr/README.md new file mode 120000 index 00000000000000..0bca1b31a66b90 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/README.md @@ -0,0 +1 @@ +integrations/solr.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/solr/charts.go b/src/go/collectors/go.d.plugin/modules/solr/charts.go new file mode 100644 index 00000000000000..caaa72489b8a28 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/charts.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package solr + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "search_requests", + Title: "Search Requests", + Units: "requests/s", + Ctx: "solr.search_requests", + Dims: Dims{ + {ID: "query_requests_count", Name: "search", Algo: module.Incremental}, + }, + }, + { + ID: "search_errors", + Title: "Search Errors", + Units: "errors/s", + Ctx: "solr.search_errors", + Dims: Dims{ + {ID: "query_errors_count", Name: "errors", Algo: module.Incremental}, + }, + }, + { + ID: "search_errors_by_type", + Title: "Search Errors By Type", + Units: "errors/s", + Ctx: "solr.search_errors_by_type", + Dims: Dims{ + {ID: "query_clientErrors_count", Name: "client", Algo: module.Incremental}, + {ID: "query_serverErrors_count", Name: "server", Algo: module.Incremental}, + {ID: "query_timeouts_count", Name: "timeouts", Algo: module.Incremental}, + }, + }, + { + ID: "search_requests_processing_time", + Title: "Search Requests Processing Time", + Units: "milliseconds", + Ctx: "solr.search_requests_processing_time", + Dims: Dims{ + {ID: "query_totalTime_count", Name: "time", Algo: module.Incremental}, + }, + }, + { + ID: "search_requests_timings", + Title: "Search Requests Timings", + Units: "milliseconds", + Ctx: "solr.search_requests_timings", + Dims: Dims{ + {ID: "query_requestTimes_min_ms", Name: "min", Div: 1000000}, + {ID: "query_requestTimes_median_ms", Name: "median", Div: 1000000}, + {ID: "query_requestTimes_mean_ms", Name: "mean", Div: 1000000}, + {ID: "query_requestTimes_max_ms", Name: "max", Div: 1000000}, + }, + }, + { + ID: "search_requests_processing_time_percentile", + Title: "Search Requests Processing Time Percentile", + Units: "milliseconds", + Ctx: "solr.search_requests_processing_time_percentile", + Dims: Dims{ + {ID: "query_requestTimes_p75_ms", Name: "p75", Div: 1000000}, + {ID: "query_requestTimes_p95_ms", Name: "p95", Div: 1000000}, + {ID: "query_requestTimes_p99_ms", Name: "p99", Div: 1000000}, + {ID: "query_requestTimes_p999_ms", Name: "p999", Div: 1000000}, + }, + }, + { + ID: "update_requests", + Title: "Update Requests", + Units: "requests/s", + Ctx: "solr.update_requests", + Dims: Dims{ + {ID: "update_requests_count", Name: "update", Algo: module.Incremental}, + }, + }, + { + ID: "update_errors", + Title: "Update Errors", + Units: "errors/s", + Ctx: "solr.update_errors", + Dims: Dims{ + {ID: "update_errors_count", Name: "errors", Algo: module.Incremental}, + }, + }, + { + ID: "update_errors_by_type", + Title: "Update Errors By Type", + Units: "errors/s", + Ctx: "solr.update_errors_by_type", + Dims: Dims{ + {ID: "update_clientErrors_count", Name: "client", Algo: module.Incremental}, + {ID: "update_serverErrors_count", Name: "server", Algo: module.Incremental}, + {ID: "update_timeouts_count", Name: "timeouts", Algo: module.Incremental}, + }, + }, + { + ID: "update_requests_processing_time", + Title: "Update Requests Processing Time", + Units: "milliseconds", + Ctx: "solr.update_requests_processing_time", + Dims: Dims{ + {ID: "update_totalTime_count", Name: "time", Algo: module.Incremental}, + }, + }, + { + ID: "update_requests_timings", + Title: "Update Requests Timings", + Units: "milliseconds", + Ctx: "solr.update_requests_timings", + Dims: Dims{ + {ID: "update_requestTimes_min_ms", Name: "min", Div: 1000000}, + {ID: "update_requestTimes_median_ms", Name: "median", Div: 1000000}, + {ID: "update_requestTimes_mean_ms", Name: "mean", Div: 1000000}, + {ID: "update_requestTimes_max_ms", Name: "max", Div: 1000000}, + }, + }, + { + ID: "update_requests_processing_time_percentile", + Title: "Update Requests Processing Time Percentile", + Units: "milliseconds", + Ctx: "solr.update_requests_processing_time_percentile", + Dims: Dims{ + {ID: "update_requestTimes_p75_ms", Name: "p75", Div: 1000000}, + {ID: "update_requestTimes_p95_ms", Name: "p95", Div: 1000000}, + {ID: "update_requestTimes_p99_ms", Name: "p99", Div: 1000000}, + {ID: "update_requestTimes_p999_ms", Name: "p999", Div: 1000000}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/solr/config_schema.json b/src/go/collectors/go.d.plugin/modules/solr/config_schema.json new file mode 100644 index 00000000000000..66dde58bf8c4e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/solr job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/solr/integrations/solr.md b/src/go/collectors/go.d.plugin/modules/solr/integrations/solr.md new file mode 100644 index 00000000000000..9afebfd17e3fea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/integrations/solr.md @@ -0,0 +1,223 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/solr/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/solr/metadata.yaml" +sidebar_label: "Solr" +learn_status: "Published" +learn_rel_path: "Data Collection/Search Engines" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Solr + + +<img src="https://netdata.cloud/img/solr.svg" width="150"/> + + +Plugin: go.d.plugin +Module: solr + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Solr instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Solr instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| solr.search_requests | search | requests/s | +| solr.search_errors | errors | errors/s | +| solr.search_errors_by_type | client, server, timeouts | errors/s | +| solr.search_requests_processing_time | time | milliseconds | +| solr.search_requests_timings | min, median, mean, max | milliseconds | +| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds | +| solr.update_requests | search | requests/s | +| solr.update_errors | errors | errors/s | +| solr.update_errors_by_type | client, server, timeouts | errors/s | +| solr.update_requests_processing_time | time | milliseconds | +| solr.update_requests_timings | min, median, mean, max | milliseconds | +| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Solr version 6.4+ + +This collector does not work with Solr versions lower 6.4. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/solr.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/solr.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8983 | yes | +| socket | Server Unix socket. | | no | +| address | Server address in IP:PORT format. | | no | +| fcgi_path | Status path. | /status | no | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:8983 + +``` +</details> + +##### Basic HTTP auth + +Local Solr instance with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:8983 + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://localhost:8983 + + - name: remote + url: http://203.0.113.10:8983 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m solr + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/solr/metadata.yaml b/src/go/collectors/go.d.plugin/modules/solr/metadata.yaml new file mode 100644 index 00000000000000..066744f634b198 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/metadata.yaml @@ -0,0 +1,268 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-solr + plugin_name: go.d.plugin + module_name: solr + monitored_instance: + name: Solr + link: https://lucene.apache.org/solr/ + icon_filename: solr.svg + categories: + - data-collection.search-engines + keywords: + - solr + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Solr instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Solr version 6.4+ + description: | + This collector does not work with Solr versions lower 6.4. + configuration: + file: + name: go.d/solr.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8983 + required: true + - name: socket + description: Server Unix socket. + default_value: "" + required: false + - name: address + description: Server address in IP:PORT format. + default_value: "" + required: false + - name: fcgi_path + description: Status path. + default_value: /status + required: false + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://localhost:8983 + - name: Basic HTTP auth + description: Local Solr instance with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://localhost:8983 + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://localhost:8983 + + - name: remote + url: http://203.0.113.10:8983 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: solr.search_requests + description: Search Requests + unit: requests/s + chart_type: line + dimensions: + - name: search + - name: solr.search_errors + description: Search Errors + unit: errors/s + chart_type: line + dimensions: + - name: errors + - name: solr.search_errors_by_type + description: Search Errors By Type + unit: errors/s + chart_type: line + dimensions: + - name: client + - name: server + - name: timeouts + - name: solr.search_requests_processing_time + description: Search Requests Processing Time + unit: milliseconds + chart_type: line + dimensions: + - name: time + - name: solr.search_requests_timings + description: Search Requests Timings + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: median + - name: mean + - name: max + - name: solr.search_requests_processing_time_percentile + description: Search Requests Processing Time Percentile + unit: milliseconds + chart_type: line + dimensions: + - name: p75 + - name: p95 + - name: p99 + - name: p999 + - name: solr.update_requests + description: Update Requests + unit: requests/s + chart_type: line + dimensions: + - name: search + - name: solr.update_errors + description: Update Errors + unit: errors/s + chart_type: line + dimensions: + - name: errors + - name: solr.update_errors_by_type + description: Update Errors By Type + unit: errors/s + chart_type: line + dimensions: + - name: client + - name: server + - name: timeouts + - name: solr.update_requests_processing_time + description: Update Requests Processing Time + unit: milliseconds + chart_type: line + dimensions: + - name: time + - name: solr.update_requests_timings + description: Update Requests Timings + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: median + - name: mean + - name: max + - name: solr.update_requests_processing_time_percentile + description: Update Requests Processing Time Percentile + unit: milliseconds + chart_type: line + dimensions: + - name: p75 + - name: p95 + - name: p99 + - name: p999 diff --git a/src/go/collectors/go.d.plugin/modules/solr/parser.go b/src/go/collectors/go.d.plugin/modules/solr/parser.go new file mode 100644 index 00000000000000..c8a9eaa54a6c2b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/parser.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package solr + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" +) + +type count struct { + Count int64 +} + +type common struct { + Count int64 + MeanRate float64 `json:"meanRate"` + MinRate1min float64 `json:"1minRate"` + MinRate5min float64 `json:"5minRate"` + MinRate15min float64 `json:"15minRate"` +} + +type requestTimes struct { + Count int64 + MeanRate float64 `json:"meanRate"` + MinRate1min float64 `json:"1minRate"` + MinRate5min float64 `json:"5minRate"` + MinRate15min float64 `json:"15minRate"` + MinMS float64 `json:"min_ms"` + MaxMS float64 `json:"max_ms"` + MeanMS float64 `json:"mean_ms"` + MedianMS float64 `json:"median_ms"` + StdDevMS float64 `json:"stddev_ms"` + P75MS float64 `json:"p75_ms"` + P95MS float64 `json:"p95_ms"` + P99MS float64 `json:"p99_ms"` + P999MS float64 `json:"p999_ms"` +} + +type coresMetrics struct { + Metrics map[string]map[string]json.RawMessage +} + +func (s *Solr) parse(resp *http.Response) (map[string]int64, error) { + var cm coresMetrics + var metrics = make(map[string]int64) + + if err := json.NewDecoder(resp.Body).Decode(&cm); err != nil { + return nil, err + } + + if len(cm.Metrics) == 0 { + return nil, errors.New("unparsable data") + } + + for core, data := range cm.Metrics { + coreName := core[10:] + + if !s.cores[coreName] { + s.addCoreCharts(coreName) + s.cores[coreName] = true + } + + if err := s.parseCore(coreName, data, metrics); err != nil { + return nil, err + } + } + + return metrics, nil +} + +func (s *Solr) parseCore(core string, data map[string]json.RawMessage, metrics map[string]int64) error { + var ( + simpleCount int64 + count count + common common + requestTimes requestTimes + ) + + for metric, stats := range data { + parts := strings.Split(metric, ".") + + if len(parts) != 3 { + continue + } + + typ, handler, stat := strings.ToLower(parts[0]), parts[1], parts[2] + + if handler == "updateHandler" { + // TODO: + continue + } + + switch stat { + case "clientErrors", "errors", "serverErrors", "timeouts": + if err := json.Unmarshal(stats, &common); err != nil { + return err + } + metrics[format("%s_%s_%s_count", core, typ, stat)] += common.Count + case "requests", "totalTime": + var c int64 + if s.version < 7.0 { + if err := json.Unmarshal(stats, &count); err != nil { + return err + } + c = count.Count + } else { + if err := json.Unmarshal(stats, &simpleCount); err != nil { + return err + } + c = simpleCount + } + metrics[format("%s_%s_%s_count", core, typ, stat)] += c + case "requestTimes": + if err := json.Unmarshal(stats, &requestTimes); err != nil { + return err + } + metrics[format("%s_%s_%s_count", core, typ, stat)] += requestTimes.Count + metrics[format("%s_%s_%s_min_ms", core, typ, stat)] += int64(requestTimes.MinMS * 1e6) + metrics[format("%s_%s_%s_mean_ms", core, typ, stat)] += int64(requestTimes.MeanMS * 1e6) + metrics[format("%s_%s_%s_median_ms", core, typ, stat)] += int64(requestTimes.MedianMS * 1e6) + metrics[format("%s_%s_%s_max_ms", core, typ, stat)] += int64(requestTimes.MaxMS * 1e6) + metrics[format("%s_%s_%s_p75_ms", core, typ, stat)] += int64(requestTimes.P75MS * 1e6) + metrics[format("%s_%s_%s_p95_ms", core, typ, stat)] += int64(requestTimes.P95MS * 1e6) + metrics[format("%s_%s_%s_p99_ms", core, typ, stat)] += int64(requestTimes.P99MS * 1e6) + metrics[format("%s_%s_%s_p999_ms", core, typ, stat)] += int64(requestTimes.P999MS * 1e6) + } + } + + return nil +} + +func (s *Solr) addCoreCharts(core string) { + charts := charts.Copy() + + for _, chart := range *charts { + chart.ID = format("%s_%s", core, chart.ID) + chart.Fam = format("core %s", core) + + for _, dim := range chart.Dims { + dim.ID = format("%s_%s", core, dim.ID) + } + } + + _ = s.charts.Add(*charts...) + +} + +var format = fmt.Sprintf diff --git a/src/go/collectors/go.d.plugin/modules/solr/solr.go b/src/go/collectors/go.d.plugin/modules/solr/solr.go new file mode 100644 index 00000000000000..57f2d7083b28bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/solr.go @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package solr + +import ( + _ "embed" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("solr", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1:8983" + defaultHTTPTimeout = time.Second +) + +const ( + minSupportedVersion = 6.4 + coresHandlersURLPath = "/solr/admin/metrics" + coresHandlersURLQuery = "group=core&prefix=UPDATE,QUERY&wt=json" + infoSystemURLPath = "/solr/admin/info/system" + infoSystemURLQuery = "wt=json" +) + +type infoSystem struct { + Lucene struct { + Version string `json:"solr-spec-version"` + } +} + +// New creates Solr with default values +func New() *Solr { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &Solr{ + Config: config, + cores: make(map[string]bool), + } +} + +// Config is the Solr module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// Solr solr module +type Solr struct { + module.Base + Config `yaml:",inline"` + + cores map[string]bool + client *http.Client + version float64 + charts *Charts +} + +func (s *Solr) doRequest(req *http.Request) (*http.Response, error) { + return s.client.Do(req) +} + +// Cleanup makes cleanup +func (Solr) Cleanup() {} + +// Init makes initialization +func (s *Solr) Init() bool { + if s.URL == "" { + s.Error("URL not set") + return false + } + + client, err := web.NewHTTPClient(s.Client) + if err != nil { + s.Error(err) + return false + } + + s.client = client + return true +} + +// Check makes check +func (s *Solr) Check() bool { + if err := s.getVersion(); err != nil { + s.Error(err) + return false + } + + if s.version < minSupportedVersion { + s.Errorf("unsupported Solr version : %.1f", s.version) + return false + } + + return true +} + +// Charts creates Charts +func (s *Solr) Charts() *Charts { + s.charts = &Charts{} + + return s.charts +} + +// Collect collects metrics +func (s *Solr) Collect() map[string]int64 { + req, err := createRequest(s.Request, coresHandlersURLPath, coresHandlersURLQuery) + if err != nil { + s.Errorf("error on creating http request : %v", err) + return nil + } + + resp, err := s.doRequest(req) + if err != nil { + s.Errorf("error on request to %s : %s", req.URL, err) + return nil + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + s.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + return nil + } + + metrics, err := s.parse(resp) + if err != nil { + s.Errorf("error on parse response from %s : %s", req.URL, err) + return nil + } + + return metrics +} + +func (s *Solr) getVersion() error { + req, err := createRequest(s.Request, infoSystemURLPath, infoSystemURLQuery) + if err != nil { + return fmt.Errorf("error on creating http request : %v", err) + } + + resp, err := s.doRequest(req) + if err != nil { + return fmt.Errorf("error on request to %s : %s", req.URL, err) + } + defer closeBody(resp) + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) + } + + var info infoSystem + + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return fmt.Errorf("error on decode response from %s : %s", req.URL, err) + } + + var idx int + + if idx = strings.LastIndex(info.Lucene.Version, "."); idx == -1 { + return fmt.Errorf("error on parsing version '%s': bad format", info.Lucene.Version) + } + + if s.version, err = strconv.ParseFloat(info.Lucene.Version[:idx], 64); err != nil { + return fmt.Errorf("error on parsing version '%s' : %s", info.Lucene.Version, err) + } + + return nil +} + +func createRequest(req web.Request, urlPath, urlQuery string) (*http.Request, error) { + r := req.Copy() + u, err := url.Parse(r.URL) + if err != nil { + return nil, err + } + + u.Path = urlPath + u.RawQuery = urlQuery + r.URL = u.String() + return web.NewHTTPRequest(r) +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/solr/solr_test.go b/src/go/collectors/go.d.plugin/modules/solr/solr_test.go new file mode 100644 index 00000000000000..f545adeb05ac80 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/solr_test.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package solr + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + coreMetricsV6, _ = os.ReadFile("testdata/core-metrics-v6.txt") + coreMetricsV7, _ = os.ReadFile("testdata/core-metrics-v7.txt") +) + +func version(v string) string { + return format(`{ "lucene":{ "solr-spec-version":"%s"}}`, v) +} + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) +} + +func TestSolr_Init(t *testing.T) { + job := New() + + assert.True(t, job.Init()) + assert.NotNil(t, job.client) +} + +func TestSolr_Check(t *testing.T) { + job := New() + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/solr/admin/info/system" { + _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion)))) + return + } + })) + + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestSolr_Check_UnsupportedVersion(t *testing.T) { + job := New() + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/solr/admin/info/system" { + _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion-1)))) + return + } + })) + + job.URL = ts.URL + + require.True(t, job.Init()) + + assert.False(t, job.Check()) +} + +func TestSolr_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestSolr_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestSolr_CollectV6(t *testing.T) { + job := New() + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/solr/admin/info/system" { + _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion)))) + return + } + if r.URL.Path == "/solr/admin/metrics" { + _, _ = w.Write(coreMetricsV6) + return + } + })) + + job.URL = ts.URL + + require.True(t, job.Init()) + require.True(t, job.Check()) + require.NotNil(t, job.Charts()) + + expected := map[string]int64{ + "core2_query_requestTimes_min_ms": 0, + "core1_query_serverErrors_count": 3, + "core2_update_requestTimes_mean_ms": 0, + "core2_query_requestTimes_p99_ms": 297000000, + "core2_query_requestTimes_p999_ms": 2997000000, + "core1_update_requestTimes_p99_ms": 297000000, + "core2_update_requestTimes_p75_ms": 225000000, + "core2_update_requests_count": 3, + "core2_query_requestTimes_p75_ms": 225000000, + "core2_update_requestTimes_min_ms": 0, + "core2_query_clientErrors_count": 3, + "core2_query_requestTimes_count": 3, + "core2_query_requestTimes_median_ms": 0, + "core2_query_requestTimes_p95_ms": 285000000, + "core2_update_serverErrors_count": 3, + "core1_query_requestTimes_mean_ms": 0, + "core1_update_totalTime_count": 3, + "core1_update_errors_count": 3, + "core1_query_errors_count": 3, + "core1_query_timeouts_count": 3, + "core1_update_requestTimes_p95_ms": 285000000, + "core1_query_clientErrors_count": 3, + "core2_query_serverErrors_count": 3, + "core1_update_requestTimes_p75_ms": 225000000, + "core2_update_requestTimes_p99_ms": 297000000, + "core2_query_requests_count": 3, + "core2_update_clientErrors_count": 3, + "core1_update_requestTimes_min_ms": 0, + "core1_update_requestTimes_mean_ms": 0, + "core1_query_requestTimes_p95_ms": 285000000, + "core1_query_requestTimes_p999_ms": 2997000000, + "core1_update_serverErrors_count": 3, + "core1_query_requests_count": 3, + "core1_update_requestTimes_p999_ms": 2997000000, + "core1_query_requestTimes_p75_ms": 225000000, + "core1_update_requestTimes_count": 3, + "core2_update_requestTimes_p95_ms": 285000000, + "core1_query_requestTimes_count": 3, + "core1_query_requestTimes_p99_ms": 297000000, + "core1_update_requestTimes_median_ms": 0, + "core1_update_requestTimes_max_ms": 0, + "core2_update_requestTimes_count": 3, + "core1_query_requestTimes_min_ms": 0, + "core1_update_timeouts_count": 3, + "core2_update_timeouts_count": 3, + "core2_update_errors_count": 3, + "core1_update_requests_count": 3, + "core2_query_errors_count": 3, + "core1_query_requestTimes_median_ms": 0, + "core1_query_requestTimes_max_ms": 0, + "core1_update_clientErrors_count": 3, + "core2_update_requestTimes_median_ms": 0, + "core2_query_requestTimes_mean_ms": 0, + "core2_update_totalTime_count": 3, + "core2_update_requestTimes_max_ms": 0, + "core2_update_requestTimes_p999_ms": 2997000000, + "core2_query_timeouts_count": 3, + "core2_query_requestTimes_max_ms": 0, + "core1_query_totalTime_count": 3, + "core2_query_totalTime_count": 3, + } + + assert.Equal(t, expected, job.Collect()) + assert.Equal(t, expected, job.Collect()) +} + +func TestSolr_CollectV7(t *testing.T) { + job := New() + + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/solr/admin/info/system" { + _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion+1)))) + return + } + if r.URL.Path == "/solr/admin/metrics" { + _, _ = w.Write(coreMetricsV7) + return + } + })) + + job.URL = ts.URL + + require.True(t, job.Init()) + require.True(t, job.Check()) + require.NotNil(t, job.Charts()) + + expected := map[string]int64{ + "core1_query_requestTimes_p95_ms": 285000000, + "core1_query_timeouts_count": 3, + "core1_update_requestTimes_p999_ms": 2997000000, + "core2_query_requestTimes_mean_ms": 0, + "core2_query_timeouts_count": 3, + "core1_update_timeouts_count": 3, + "core1_update_requestTimes_mean_ms": 0, + "core2_update_serverErrors_count": 3, + "core2_query_requestTimes_min_ms": 0, + "core2_query_requestTimes_p75_ms": 225000000, + "core2_update_clientErrors_count": 3, + "core2_update_requestTimes_count": 3, + "core2_query_requestTimes_max_ms": 0, + "core1_query_requestTimes_mean_ms": 0, + "core1_update_totalTime_count": 3, + "core1_query_serverErrors_count": 3, + "core1_update_requestTimes_p99_ms": 297000000, + "core2_query_totalTime_count": 3, + "core2_update_requestTimes_max_ms": 0, + "core2_query_requestTimes_p99_ms": 297000000, + "core1_query_requestTimes_count": 3, + "core1_query_requestTimes_median_ms": 0, + "core1_query_clientErrors_count": 3, + "core2_update_requestTimes_mean_ms": 0, + "core2_update_requestTimes_median_ms": 0, + "core2_update_requestTimes_p95_ms": 285000000, + "core2_update_requestTimes_p999_ms": 2997000000, + "core2_update_totalTime_count": 3, + "core1_update_clientErrors_count": 3, + "core2_query_serverErrors_count": 3, + "core2_query_requests_count": 3, + "core1_update_serverErrors_count": 3, + "core1_update_requestTimes_p75_ms": 225000000, + "core2_update_requestTimes_min_ms": 0, + "core2_query_errors_count": 3, + "core1_update_errors_count": 3, + "core1_query_totalTime_count": 3, + "core1_update_requestTimes_p95_ms": 285000000, + "core2_query_requestTimes_p95_ms": 285000000, + "core2_query_requestTimes_p999_ms": 2997000000, + "core1_query_requestTimes_min_ms": 0, + "core2_update_errors_count": 3, + "core2_query_clientErrors_count": 3, + "core1_update_requestTimes_min_ms": 0, + "core1_query_requestTimes_max_ms": 0, + "core1_query_requestTimes_p75_ms": 225000000, + "core1_query_requestTimes_p999_ms": 2997000000, + "core2_update_requestTimes_p75_ms": 225000000, + "core2_update_timeouts_count": 3, + "core1_query_requestTimes_p99_ms": 297000000, + "core1_update_requests_count": 3, + "core1_update_requestTimes_median_ms": 0, + "core1_update_requestTimes_max_ms": 0, + "core2_update_requestTimes_p99_ms": 297000000, + "core2_query_requestTimes_count": 3, + "core1_query_errors_count": 3, + "core1_query_requests_count": 3, + "core1_update_requestTimes_count": 3, + "core2_update_requests_count": 3, + "core2_query_requestTimes_median_ms": 0, + } + + assert.Equal(t, expected, job.Collect()) + assert.Equal(t, expected, job.Collect()) +} + +func TestSolr_Collect_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v6.txt b/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v6.txt new file mode 100644 index 00000000000000..30d756b58b0e19 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v6.txt @@ -0,0 +1,794 @@ +{ + "responseHeader":{ + "status":0, + "QTime":5 + }, + "metrics":{ + "solr.core.core1":{ + "QUERY./select.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./select.requests":{ + "count":1 + }, + "QUERY./select.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.totalTime":{ + "count":1 + }, + "QUERY./sql.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./sql.requests":{ + "count":1 + }, + "QUERY./sql.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.totalTime":{ + "count":1 + }, + "QUERY./stream.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./stream.requests":{ + "count":1 + }, + "QUERY./stream.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.totalTime":{ + "count":1 + }, + "UPDATE./update.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update.requests":{ + "count":1 + }, + "UPDATE./update.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.totalTime":{ + "count":1 + }, + "UPDATE./update/csv.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/csv.requests":{ + "count":1 + }, + "UPDATE./update/csv.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.totalTime":{ + "count":1 + }, + "UPDATE./update/json.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/json.requests":{ + "count":1 + }, + "UPDATE./update/json.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.totalTime":{ + "count":1 + }, + "UPDATE.updateHandler.adds":{ + "value":0 + }, + "UPDATE.updateHandler.autoCommits":{ + "value":0 + }, + "UPDATE.updateHandler.commits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeAdds":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesById":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesByQuery":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.deletesById":{ + "value":0 + }, + "UPDATE.updateHandler.deletesByQuery":{ + "value":0 + }, + "UPDATE.updateHandler.docsPending":{ + "value":0 + }, + "UPDATE.updateHandler.errors":{ + "value":0 + }, + "UPDATE.updateHandler.expungeDeletes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.merges":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.optimizes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.rollbacks":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.softAutoCommits":{ + "value":0 + }, + "UPDATE.updateHandler.splits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + } + }, + "solr.core.core2":{ + "QUERY./select.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./select.requests":{ + "count":1 + }, + "QUERY./select.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.totalTime":{ + "count":1 + }, + "QUERY./sql.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./sql.requests":{ + "count":1 + }, + "QUERY./sql.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.totalTime":{ + "count":1 + }, + "QUERY./stream.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./stream.requests":{ + "count":1 + }, + "QUERY./stream.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.totalTime":{ + "count":1 + }, + "UPDATE./update.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update.requests":{ + "count":1 + }, + "UPDATE./update.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.totalTime":{ + "count":1 + }, + "UPDATE./update/csv.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/csv.requests":{ + "count":1 + }, + "UPDATE./update/csv.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.totalTime":{ + "count":1 + }, + "UPDATE./update/json.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/json.requests":{ + "count":1 + }, + "UPDATE./update/json.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.totalTime":{ + "count":1 + }, + "UPDATE.updateHandler.adds":{ + "value":0 + }, + "UPDATE.updateHandler.autoCommits":{ + "value":0 + }, + "UPDATE.updateHandler.commits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeAdds":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesById":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesByQuery":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.deletesById":{ + "value":0 + }, + "UPDATE.updateHandler.deletesByQuery":{ + "value":0 + }, + "UPDATE.updateHandler.docsPending":{ + "value":0 + }, + "UPDATE.updateHandler.errors":{ + "value":0 + }, + "UPDATE.updateHandler.expungeDeletes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.merges":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.optimizes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.rollbacks":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.softAutoCommits":{ + "value":0 + }, + "UPDATE.updateHandler.splits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + } + } + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v7.txt b/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v7.txt new file mode 100644 index 00000000000000..0567f0d9ba79de --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/solr/testdata/core-metrics-v7.txt @@ -0,0 +1,732 @@ +{ + "responseHeader":{ + "status":0, + "QTime":5 + }, + "metrics":{ + "solr.core.core1":{ + "QUERY./select.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.handlerStart":1546020968904, + "QUERY./select.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./select.requests":1, + "QUERY./select.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.totalTime":1, + "QUERY./sql.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.handlerStart":1546020968901, + "QUERY./sql.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./sql.requests":1, + "QUERY./sql.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.totalTime":1, + "QUERY./stream.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.handlerStart":1546020968894, + "QUERY./stream.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./stream.requests":1, + "QUERY./stream.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.totalTime":1, + "UPDATE./update.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.handlerStart":1546020968419, + "UPDATE./update.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update.requests":1, + "UPDATE./update.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.totalTime":1, + "UPDATE./update/csv.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.handlerStart":1546020968462, + "UPDATE./update/csv.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/csv.requests":1, + "UPDATE./update/csv.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.totalTime":1, + "UPDATE./update/json.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.handlerStart":1546020968445, + "UPDATE./update/json.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/json.requests":1, + "UPDATE./update/json.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.totalTime":1, + "UPDATE.updateHandler.adds":0, + "UPDATE.updateHandler.autoCommitMaxTime":"15000ms", + "UPDATE.updateHandler.autoCommits":0, + "UPDATE.updateHandler.commits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeAdds":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesById":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesByQuery":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.deletesById":0, + "UPDATE.updateHandler.deletesByQuery":0, + "UPDATE.updateHandler.docsPending":0, + "UPDATE.updateHandler.errors":0, + "UPDATE.updateHandler.expungeDeletes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.merges":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.optimizes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.rollbacks":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.softAutoCommits":0, + "UPDATE.updateHandler.splits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + } + }, + "solr.core.core2":{ + "QUERY./select.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.handlerStart":1546020968904, + "QUERY./select.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./select.requests":1, + "QUERY./select.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./select.totalTime":1, + "QUERY./sql.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.handlerStart":1546020968901, + "QUERY./sql.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./sql.requests":1, + "QUERY./sql.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./sql.totalTime":1, + "QUERY./stream.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.handlerStart":1546020968894, + "QUERY./stream.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "QUERY./stream.requests":1, + "QUERY./stream.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "QUERY./stream.totalTime":1, + "UPDATE./update.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.handlerStart":1546020968419, + "UPDATE./update.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update.requests":1, + "UPDATE./update.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update.totalTime":1, + "UPDATE./update/csv.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.handlerStart":1546020968462, + "UPDATE./update/csv.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/csv.requests":1, + "UPDATE./update/csv.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/csv.totalTime":1, + "UPDATE./update/json.clientErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.errors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.handlerStart":1546020968445, + "UPDATE./update/json.requestTimes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0, + "min_ms":0, + "max_ms":0, + "mean_ms":0, + "median_ms":0, + "stddev_ms":0, + "p75_ms":75, + "p95_ms":95, + "p99_ms":99, + "p999_ms":999 + }, + "UPDATE./update/json.requests":1, + "UPDATE./update/json.serverErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.timeouts":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE./update/json.totalTime":1, + "UPDATE.updateHandler.adds":0, + "UPDATE.updateHandler.autoCommitMaxTime":"15000ms", + "UPDATE.updateHandler.autoCommits":0, + "UPDATE.updateHandler.commits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeAdds":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesById":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeDeletesByQuery":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.cumulativeErrors":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.deletesById":0, + "UPDATE.updateHandler.deletesByQuery":0, + "UPDATE.updateHandler.docsPending":0, + "UPDATE.updateHandler.errors":0, + "UPDATE.updateHandler.expungeDeletes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.merges":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.optimizes":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.rollbacks":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + }, + "UPDATE.updateHandler.softAutoCommits":0, + "UPDATE.updateHandler.splits":{ + "count":1, + "meanRate":0, + "1minRate":0, + "5minRate":0, + "15minRate":0 + } + } + } +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/README.md b/src/go/collectors/go.d.plugin/modules/springboot2/README.md new file mode 120000 index 00000000000000..67b32e5172b193 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/README.md @@ -0,0 +1 @@ +integrations/java_spring-boot_2_applications.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/charts.go b/src/go/collectors/go.d.plugin/modules/springboot2/charts.go new file mode 100644 index 00000000000000..9ca9c58062073e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/charts.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package springboot2 + +import ( + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "response_codes", + Title: "Response Codes", Units: "requests/s", Fam: "response_code", Type: module.Stacked, Ctx: "springboot2.response_codes", + Dims: Dims{ + {ID: "resp_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "resp_5xx", Name: "5xx", Algo: module.Incremental}, + {ID: "resp_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "resp_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "resp_1xx", Name: "1xx", Algo: module.Incremental}, + }, + }, + { + ID: "thread", + Title: "Threads", Units: "threads", Fam: "threads", Type: module.Area, Ctx: "springboot2.thread", + Dims: Dims{ + {ID: "threads_daemon", Name: "daemon"}, + {ID: "threads", Name: "total"}, + }, + }, + { + ID: "heap", + Title: "Overview", Units: "B", Fam: "heap", Type: module.Stacked, Ctx: "springboot2.heap", + Dims: Dims{ + {ID: "mem_free", Name: "free"}, + {ID: "heap_used_eden", Name: "eden"}, + {ID: "heap_used_survivor", Name: "survivor"}, + {ID: "heap_used_old", Name: "old"}, + }, + }, + { + ID: "heap_eden", + Title: "Eden Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_eden", + Dims: Dims{ + {ID: "heap_used_eden", Name: "used"}, + {ID: "heap_committed_eden", Name: "committed"}, + }, + }, + { + ID: "heap_survivor", + Title: "Survivor Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_survivor", + Dims: Dims{ + {ID: "heap_used_survivor", Name: "used"}, + {ID: "heap_committed_survivor", Name: "committed"}, + }, + }, + { + ID: "heap_old", + Title: "Old Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_old", + Dims: Dims{ + {ID: "heap_used_old", Name: "used"}, + {ID: "heap_committed_old", Name: "committed"}, + }, + }, + { + ID: "uptime", + Title: "The uptime of the Java virtual machine", Units: "seconds", Fam: "uptime", Type: module.Line, Ctx: "springboot2.uptime", + Dims: Dims{ + {ID: "uptime", Name: "uptime", Div: 1000}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/config_schema.json b/src/go/collectors/go.d.plugin/modules/springboot2/config_schema.json new file mode 100644 index 00000000000000..008a8bb2d25be8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/config_schema.json @@ -0,0 +1,76 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/springboot2 job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "uri_filter": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/integrations/java_spring-boot_2_applications.md b/src/go/collectors/go.d.plugin/modules/springboot2/integrations/java_spring-boot_2_applications.md new file mode 100644 index 00000000000000..534f75f921d1c1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/integrations/java_spring-boot_2_applications.md @@ -0,0 +1,233 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/springboot2/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/springboot2/metadata.yaml" +sidebar_label: "Java Spring-boot 2 applications" +learn_status: "Published" +learn_rel_path: "Data Collection/APM" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Java Spring-boot 2 applications + + +<img src="https://netdata.cloud/img/springboot.png" width="150"/> + + +Plugin: go.d.plugin +Module: springboot2 + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects applications running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Java Spring-boot 2 applications instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s | +| springboot2.thread | daemon, total | threads | +| springboot2.heap | free, eden, survivor, old | B | +| springboot2.heap_eden | used, commited | B | +| springboot2.heap_survivor | used, commited | B | +| springboot2.heap_old | used, commited | B | +| springboot2.uptime | uptime | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Configure Spring Boot Actuator + +The Spring Boot Actuator exposes metrics over HTTP, to use it: + +- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies. +- set `management.endpoints.web.exposure.include=*` in your `application.properties`. + +Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/springboot2.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/springboot2.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1:8080/actuator/prometheus + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + + - name: remote + url: http://192.0.2.1:8080/actuator/prometheus + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m springboot2 + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/metadata.yaml b/src/go/collectors/go.d.plugin/modules/springboot2/metadata.yaml new file mode 100644 index 00000000000000..462d29dae3cba4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/metadata.yaml @@ -0,0 +1,239 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-springboot2 + plugin_name: go.d.plugin + module_name: springboot2 + monitored_instance: + name: Java Spring-boot 2 applications + link: "" + icon_filename: springboot.png + categories: + - data-collection.apm + keywords: + - springboot + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: | + By default, it detects applications running on localhost. + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Configure Spring Boot Actuator + description: | + The Spring Boot Actuator exposes metrics over HTTP, to use it: + + - add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies. + - set `management.endpoints.web.exposure.include=*` in your `application.properties`. + + Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. + configuration: + file: + name: go.d/springboot2.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: "" + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + username: username + password: password + - name: HTTPS with self-signed certificate + description: | + Do not validate server certificate chain and hostname. + config: | + jobs: + - name: local + url: https://127.0.0.1:8080/actuator/prometheus + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8080/actuator/prometheus + + - name: remote + url: http://192.0.2.1:8080/actuator/prometheus + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: springboot2.response_codes + description: Response Codes + unit: requests/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: springboot2.thread + description: Threads + unit: threads + chart_type: area + dimensions: + - name: daemon + - name: total + - name: springboot2.heap + description: Overview + unit: B + chart_type: stacked + dimensions: + - name: free + - name: eden + - name: survivor + - name: old + - name: springboot2.heap_eden + description: Eden Space + unit: B + chart_type: area + dimensions: + - name: used + - name: commited + - name: springboot2.heap_survivor + description: Survivor Space + unit: B + chart_type: area + dimensions: + - name: used + - name: commited + - name: springboot2.heap_old + description: Old Space + unit: B + chart_type: area + dimensions: + - name: used + - name: commited + - name: springboot2.uptime + description: TThe uptime of the Java virtual machine + unit: seconds + chart_type: line + dimensions: + - name: uptime diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/springboot2.go b/src/go/collectors/go.d.plugin/modules/springboot2/springboot2.go new file mode 100644 index 00000000000000..cff9d9c0769621 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/springboot2.go @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package springboot2 + +import ( + _ "embed" + "strings" + "time" + + "github.com/netdata/go.d.plugin/pkg/matcher" + + mtx "github.com/netdata/go.d.plugin/pkg/metrics" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("springboot2", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultHTTPTimeout = time.Second +) + +// New returns SpringBoot2 instance with default values +func New() *SpringBoot2 { + return &SpringBoot2{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } +} + +// SpringBoot2 Spring boot 2 module +type SpringBoot2 struct { + module.Base + + web.HTTP `yaml:",inline"` + URIFilter matcher.SimpleExpr `yaml:"uri_filter"` + + uriFilter matcher.Matcher + + prom prometheus.Prometheus +} + +type metrics struct { + Uptime mtx.Gauge `stm:"uptime,1000"` + + ThreadsDaemon mtx.Gauge `stm:"threads_daemon"` + Threads mtx.Gauge `stm:"threads"` + + Resp1xx mtx.Counter `stm:"resp_1xx"` + Resp2xx mtx.Counter `stm:"resp_2xx"` + Resp3xx mtx.Counter `stm:"resp_3xx"` + Resp4xx mtx.Counter `stm:"resp_4xx"` + Resp5xx mtx.Counter `stm:"resp_5xx"` + + HeapUsed heap `stm:"heap_used"` + HeapCommitted heap `stm:"heap_committed"` + + MemFree mtx.Gauge `stm:"mem_free"` +} + +type heap struct { + Eden mtx.Gauge `stm:"eden"` + Survivor mtx.Gauge `stm:"survivor"` + Old mtx.Gauge `stm:"old"` +} + +// Cleanup Cleanup +func (SpringBoot2) Cleanup() {} + +// Init makes initialization +func (s *SpringBoot2) Init() bool { + client, err := web.NewHTTPClient(s.Client) + if err != nil { + s.Error(err) + return false + } + s.uriFilter, err = s.URIFilter.Parse() + if err != nil && err != matcher.ErrEmptyExpr { + s.Error(err) + return false + } + s.prom = prometheus.New(client, s.Request) + return true +} + +// Check makes check +func (s *SpringBoot2) Check() bool { + rawMetrics, err := s.prom.ScrapeSeries() + if err != nil { + s.Warning(err) + return false + } + jvmMemory := rawMetrics.FindByName("jvm_memory_used_bytes") + + return len(jvmMemory) > 0 +} + +// Charts creates Charts +func (SpringBoot2) Charts() *Charts { + return charts.Copy() +} + +// Collect collects metrics +func (s *SpringBoot2) Collect() map[string]int64 { + rawMetrics, err := s.prom.ScrapeSeries() + if err != nil { + return nil + } + + var m metrics + + // uptime + m.Uptime.Set(rawMetrics.FindByName("process_uptime_seconds").Max()) + + // response + s.gatherResponse(rawMetrics, &m) + + // threads + m.ThreadsDaemon.Set(rawMetrics.FindByNames("jvm_threads_daemon", "jvm_threads_daemon_threads").Max()) + m.Threads.Set(rawMetrics.FindByNames("jvm_threads_live", "jvm_threads_live_threads").Max()) + + // heap memory + gatherHeap(rawMetrics.FindByName("jvm_memory_used_bytes"), &m.HeapUsed) + gatherHeap(rawMetrics.FindByName("jvm_memory_committed_bytes"), &m.HeapCommitted) + m.MemFree.Set(m.HeapCommitted.Sum() - m.HeapUsed.Sum()) + + return stm.ToMap(m) +} + +func gatherHeap(rawMetrics prometheus.Series, m *heap) { + for _, metric := range rawMetrics { + id := metric.Labels.Get("id") + value := metric.Value + switch { + case strings.Contains(id, "Eden"): + m.Eden.Set(value) + case strings.Contains(id, "Survivor"): + m.Survivor.Set(value) + case strings.Contains(id, "Old") || strings.Contains(id, "Tenured"): + m.Old.Set(value) + } + } +} + +func (s *SpringBoot2) gatherResponse(rawMetrics prometheus.Series, m *metrics) { + for _, metric := range rawMetrics.FindByName("http_server_requests_seconds_count") { + if s.uriFilter != nil { + uri := metric.Labels.Get("uri") + if !s.uriFilter.MatchString(uri) { + continue + } + } + + status := metric.Labels.Get("status") + if status == "" { + continue + } + value := metric.Value + switch status[0] { + case '1': + m.Resp1xx.Add(value) + case '2': + m.Resp2xx.Add(value) + case '3': + m.Resp3xx.Add(value) + case '4': + m.Resp4xx.Add(value) + case '5': + m.Resp5xx.Add(value) + } + } +} + +func (h heap) Sum() float64 { + return h.Eden.Value() + h.Survivor.Value() + h.Old.Value() +} diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/springboot2_test.go b/src/go/collectors/go.d.plugin/modules/springboot2/springboot2_test.go new file mode 100644 index 00000000000000..7198498d5d890f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/springboot2_test.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package springboot2 + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + testdata, _ = os.ReadFile("tests/testdata.txt") + testdata2, _ = os.ReadFile("tests/testdata2.txt") +) + +func TestSpringboot2_Collect(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/actuator/prometheus": + _, _ = w.Write(testdata) + case "/actuator/prometheus2": + _, _ = w.Write(testdata2) + } + })) + defer ts.Close() + job1 := New() + job1.HTTP.Request.URL = ts.URL + "/actuator/prometheus" + assert.True(t, job1.Init()) + assert.True(t, job1.Check()) + assert.EqualValues( + t, + map[string]int64{ + "threads": 23, + "threads_daemon": 21, + "resp_1xx": 1, + "resp_2xx": 19, + "resp_3xx": 1, + "resp_4xx": 4, + "resp_5xx": 1, + "heap_used_eden": 129649936, + "heap_used_survivor": 8900136, + "heap_used_old": 17827920, + "heap_committed_eden": 153616384, + "heap_committed_survivor": 8912896, + "heap_committed_old": 40894464, + "mem_free": 47045752, + "uptime": 191730, + }, + job1.Collect(), + ) + + job2 := New() + job2.HTTP.Request.URL = ts.URL + "/actuator/prometheus2" + assert.True(t, job2.Init()) + assert.True(t, job2.Check()) + assert.EqualValues( + t, + map[string]int64{ + "threads": 36, + "threads_daemon": 22, + "resp_1xx": 0, + "resp_2xx": 57740, + "resp_3xx": 0, + "resp_4xx": 4, + "resp_5xx": 0, + "heap_used_eden": 18052960, + "heap_used_survivor": 302704, + "heap_used_old": 40122672, + "heap_committed_eden": 21430272, + "heap_committed_survivor": 2621440, + "heap_committed_old": 53182464, + "mem_free": 18755840, + "uptime": 45501125, + }, + job2.Collect(), + ) +} + +func TestSpringboot2_404(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer ts.Close() + job := New() + job.HTTP.Request.URL = ts.URL + "/actuator/prometheus" + + job.Init() + + assert.False(t, job.Check()) + + job.Cleanup() +} + +func TestSpringBoot2_Charts(t *testing.T) { + job := New() + charts := job.Charts() + + assert.True(t, charts.Has("response_codes")) + assert.True(t, charts.Has("uptime")) +} diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata.txt b/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata.txt new file mode 100644 index 00000000000000..11c70e40d70527 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata.txt @@ -0,0 +1,194 @@ +# HELP tomcat_cache_access_total +# TYPE tomcat_cache_access_total counter +tomcat_cache_access_total 0.0 +# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC +# TYPE jvm_gc_memory_promoted_bytes_total counter +jvm_gc_memory_promoted_bytes_total 562080.0 +# HELP tomcat_cache_hit_total +# TYPE tomcat_cache_hit_total counter +tomcat_cache_hit_total 0.0 +# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC +# TYPE jvm_gc_live_data_size_bytes gauge +jvm_gc_live_data_size_bytes 0.0 +# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management +# TYPE jvm_memory_max_bytes gauge +jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8 +jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0 +jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 +jvm_memory_max_bytes{area="heap",id="PS Eden Space",} 1.55189248E8 +jvm_memory_max_bytes{area="heap",id="PS Survivor Space",} 8912896.0 +jvm_memory_max_bytes{area="heap",id="PS Old Gen",} 3.49700096E8 +# HELP system_cpu_count The number of processors available to the Java virtual machine +# TYPE system_cpu_count gauge +system_cpu_count 2.0 +# HELP tomcat_global_request_seconds +# TYPE tomcat_global_request_seconds summary +tomcat_global_request_seconds_count{name="http-nio-8080",} 23.0 +tomcat_global_request_seconds_sum{name="http-nio-8080",} 1.205 +# HELP jvm_threads_daemon The current number of live daemon threads +# TYPE jvm_threads_daemon gauge +jvm_threads_daemon 21.0 +# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool +# TYPE jvm_buffer_memory_used_bytes gauge +jvm_buffer_memory_used_bytes{id="direct",} 81920.0 +jvm_buffer_memory_used_bytes{id="mapped",} 0.0 +# HELP jvm_buffer_count An estimate of the number of buffers in the pool +# TYPE jvm_buffer_count gauge +jvm_buffer_count{id="direct",} 10.0 +jvm_buffer_count{id="mapped",} 0.0 +# HELP tomcat_threads_current +# TYPE tomcat_threads_current gauge +tomcat_threads_current{name="http-nio-8080",} 10.0 +# HELP tomcat_sessions_created_total +# TYPE tomcat_sessions_created_total counter +tomcat_sessions_created_total 0.0 +# HELP system_cpu_usage The "recent cpu usage" for the whole system +# TYPE system_cpu_usage gauge +system_cpu_usage 0.03682658419046249 +# HELP tomcat_sessions_alive_max_seconds +# TYPE tomcat_sessions_alive_max_seconds gauge +tomcat_sessions_alive_max_seconds 0.0 +# HELP tomcat_servlet_error_total +# TYPE tomcat_servlet_error_total counter +tomcat_servlet_error_total{name="default",} 0.0 +# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time +# TYPE system_load_average_1m gauge +system_load_average_1m 0.2001953125 +# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool +# TYPE jvm_gc_max_data_size_bytes gauge +jvm_gc_max_data_size_bytes 0.0 +# HELP tomcat_sessions_expired_total +# TYPE tomcat_sessions_expired_total counter +tomcat_sessions_expired_total 0.0 +# HELP tomcat_sessions_rejected_total +# TYPE tomcat_sessions_rejected_total counter +tomcat_sessions_rejected_total 0.0 +# HELP process_start_time_seconds The start time of the Java virtual machine +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.544161580708E9 +# HELP jvm_threads_live The current number of live threads including both daemon and non-daemon threads +# TYPE jvm_threads_live gauge +jvm_threads_live 23.0 +# HELP jvm_classes_loaded The number of classes that are currently loaded in the Java virtual machine +# TYPE jvm_classes_loaded gauge +jvm_classes_loaded 7846.0 +# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next +# TYPE jvm_gc_memory_allocated_bytes_total counter +jvm_gc_memory_allocated_bytes_total 3.13524224E8 +# HELP process_uptime_seconds The uptime of the Java virtual machine +# TYPE process_uptime_seconds gauge +process_uptime_seconds 191.73 +# HELP tomcat_global_error_total +# TYPE tomcat_global_error_total counter +tomcat_global_error_total{name="http-nio-8080",} 4.0 +# HELP tomcat_threads_config_max +# TYPE tomcat_threads_config_max gauge +tomcat_threads_config_max{name="http-nio-8080",} 200.0 +# HELP jvm_threads_peak The peak live thread count since the Java virtual machine started or peak was reset +# TYPE jvm_threads_peak gauge +jvm_threads_peak 25.0 +# HELP jvm_classes_unloaded_total The total number of classes unloaded since the Java virtual machine has started execution +# TYPE jvm_classes_unloaded_total counter +jvm_classes_unloaded_total 0.0 +# HELP process_files_max The maximum file descriptor count +# TYPE process_files_max gauge +process_files_max 1048576.0 +# HELP tomcat_servlet_request_max_seconds +# TYPE tomcat_servlet_request_max_seconds gauge +tomcat_servlet_request_max_seconds{name="default",} 0.0 +# HELP tomcat_sessions_active_max +# TYPE tomcat_sessions_active_max gauge +tomcat_sessions_active_max 0.0 +# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use +# TYPE jvm_memory_committed_bytes gauge +jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 1.3369344E7 +jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 4.390912E7 +jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 5636096.0 +jvm_memory_committed_bytes{area="heap",id="PS Eden Space",} 1.53616384E8 +jvm_memory_committed_bytes{area="heap",id="PS Survivor Space",} 8912896.0 +jvm_memory_committed_bytes{area="heap",id="PS Old Gen",} 4.0894464E7 +# HELP tomcat_servlet_request_seconds +# TYPE tomcat_servlet_request_seconds summary +tomcat_servlet_request_seconds_count{name="default",} 0.0 +tomcat_servlet_request_seconds_sum{name="default",} 0.0 +# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool +# TYPE jvm_buffer_total_capacity_bytes gauge +jvm_buffer_total_capacity_bytes{id="direct",} 81920.0 +jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 +# HELP tomcat_global_received_bytes_total +# TYPE tomcat_global_received_bytes_total counter +tomcat_global_received_bytes_total{name="http-nio-8080",} 0.0 +# HELP jvm_gc_pause_seconds Time spent in GC pause +# TYPE jvm_gc_pause_seconds summary +jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 2.0 +jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 0.06 +# HELP jvm_gc_pause_seconds_max Time spent in GC pause +# TYPE jvm_gc_pause_seconds_max gauge +jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.0 +# HELP process_files_open The open file descriptor count +# TYPE process_files_open gauge +process_files_open 29.0 +# HELP tomcat_global_sent_bytes_total +# TYPE tomcat_global_sent_bytes_total counter +tomcat_global_sent_bytes_total{name="http-nio-8080",} 63044.0 +# HELP tomcat_threads_busy +# TYPE tomcat_threads_busy gauge +tomcat_threads_busy{name="http-nio-8080",} 1.0 +# HELP tomcat_global_request_max_seconds +# TYPE tomcat_global_request_max_seconds gauge +tomcat_global_request_max_seconds{name="http-nio-8080",} 0.282 +# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process +# TYPE process_cpu_usage gauge +process_cpu_usage 0.019132561317701215 +# HELP jvm_memory_used_bytes The amount of used memory +# TYPE jvm_memory_used_bytes gauge +jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 1.3269376E7 +jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 4.1364704E7 +jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 5125872.0 +jvm_memory_used_bytes{area="heap",id="PS Eden Space",} 1.29649936E8 +jvm_memory_used_bytes{area="heap",id="PS Survivor Space",} 8900136.0 +jvm_memory_used_bytes{area="heap",id="PS Old Gen",} 1.782792E7 +# HELP logback_events_total Number of error level events that made it to the logs +# TYPE logback_events_total counter +logback_events_total{level="error",} 0.0 +logback_events_total{level="warn",} 0.0 +logback_events_total{level="info",} 41.0 +logback_events_total{level="debug",} 0.0 +logback_events_total{level="trace",} 0.0 +# HELP tomcat_sessions_active_current +# TYPE tomcat_sessions_active_current gauge +tomcat_sessions_active_current 0.0 +# HELP http_server_requests_seconds +# TYPE http_server_requests_seconds summary +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 6.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.2367162 +http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/**",} 3.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/**",} 0.0516521 +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 5.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0587843 +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/hello",} 4.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/hello",} 0.0470746 +http_server_requests_seconds_count{exception="None",method="GET",status="102",uri="/hello",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="102",uri="/hello",} 0.0470746 +http_server_requests_seconds_count{exception="None",method="GET",status="302",uri="/hello",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="302",uri="/hello",} 0.0470746 +http_server_requests_seconds_count{exception="None",method="GET",status="503",uri="/hello",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="503",uri="/hello",} 0.0470746 +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/",} 2.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/",} 0.1888718 +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/health",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562 +http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837 +http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/metrics",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195 +# HELP http_server_requests_seconds_max +# TYPE http_server_requests_seconds_max gauge +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.1311382 +http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/**",} 0.031655 +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0449076 +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/hello",} 0.0248288 +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/",} 0.1840505 +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562 +http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837 +http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata2.txt b/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata2.txt new file mode 100644 index 00000000000000..78bbdf5cd9b65c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/springboot2/tests/testdata2.txt @@ -0,0 +1,193 @@ +# HELP jvm_classes_loaded_classes The number of classes that are currently loaded in the Java virtual machine +# TYPE jvm_classes_loaded_classes gauge +jvm_classes_loaded_classes 12360.0 +# HELP process_files_open_files The open file descriptor count +# TYPE process_files_open_files gauge +process_files_open_files 46.0 +# HELP jvm_memory_used_bytes The amount of used memory +# TYPE jvm_memory_used_bytes gauge +jvm_memory_used_bytes{area="heap",id="Tenured Gen",} 4.0122672E7 +jvm_memory_used_bytes{area="heap",id="Eden Space",} 1.805296E7 +jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 6.6824752E7 +jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.6224704E7 +jvm_memory_used_bytes{area="heap",id="Survivor Space",} 302704.0 +jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 8236936.0 +# HELP system_cpu_count The number of processors available to the Java virtual machine +# TYPE system_cpu_count gauge +system_cpu_count 1.0 +# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process +# TYPE process_cpu_usage gauge +process_cpu_usage 0.0 +# HELP tomcat_sessions_alive_max_seconds +# TYPE tomcat_sessions_alive_max_seconds gauge +tomcat_sessions_alive_max_seconds 0.0 +# HELP tomcat_global_sent_bytes_total +# TYPE tomcat_global_sent_bytes_total counter +tomcat_global_sent_bytes_total{name="http-nio-17001",} 7.06007212E8 +# HELP jvm_threads_states_threads The current number of threads having NEW state +# TYPE jvm_threads_states_threads gauge +jvm_threads_states_threads{state="runnable",} 10.0 +jvm_threads_states_threads{state="blocked",} 0.0 +jvm_threads_states_threads{state="waiting",} 22.0 +jvm_threads_states_threads{state="timed-waiting",} 4.0 +jvm_threads_states_threads{state="new",} 0.0 +jvm_threads_states_threads{state="terminated",} 0.0 +# HELP process_start_time_seconds Start time of the process since unix epoch. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.552476492313E9 +# HELP tomcat_sessions_active_max_sessions +# TYPE tomcat_sessions_active_max_sessions gauge +tomcat_sessions_active_max_sessions 0.0 +# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC +# TYPE jvm_gc_live_data_size_bytes gauge +jvm_gc_live_data_size_bytes 3.1908592E7 +# HELP spring_integration_channels The number of message channels +# TYPE spring_integration_channels gauge +spring_integration_channels 6.0 +# HELP system_cpu_usage The "recent cpu usage" for the whole system +# TYPE system_cpu_usage gauge +system_cpu_usage 0.047619047619047616 +# HELP jvm_classes_unloaded_classes_total The total number of classes unloaded since the Java virtual machine has started execution +# TYPE jvm_classes_unloaded_classes_total counter +jvm_classes_unloaded_classes_total 0.0 +# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management +# TYPE jvm_memory_max_bytes gauge +jvm_memory_max_bytes{area="heap",id="Tenured Gen",} 6.61323776E8 +jvm_memory_max_bytes{area="heap",id="Eden Space",} 2.64568832E8 +jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0 +jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8 +jvm_memory_max_bytes{area="heap",id="Survivor Space",} 3.3030144E7 +jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 +# HELP logback_events_total Number of error level events that made it to the logs +# TYPE logback_events_total counter +logback_events_total{level="warn",} 1.0 +logback_events_total{level="debug",} 0.0 +logback_events_total{level="error",} 0.0 +logback_events_total{level="trace",} 0.0 +logback_events_total{level="info",} 30.0 +# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool +# TYPE jvm_gc_max_data_size_bytes gauge +jvm_gc_max_data_size_bytes 6.61323776E8 +# HELP tomcat_sessions_created_sessions_total +# TYPE tomcat_sessions_created_sessions_total counter +tomcat_sessions_created_sessions_total 0.0 +# HELP process_files_max_files The maximum file descriptor count +# TYPE process_files_max_files gauge +process_files_max_files 1006500.0 +# HELP spring_integration_sources The number of message sources +# TYPE spring_integration_sources gauge +spring_integration_sources 5.0 +# HELP tomcat_global_request_seconds +# TYPE tomcat_global_request_seconds summary +tomcat_global_request_seconds_count{name="http-nio-17001",} 57744.0 +tomcat_global_request_seconds_sum{name="http-nio-17001",} 113.513 +# HELP tomcat_sessions_active_current_sessions +# TYPE tomcat_sessions_active_current_sessions gauge +tomcat_sessions_active_current_sessions 0.0 +# HELP tomcat_global_error_total +# TYPE tomcat_global_error_total counter +tomcat_global_error_total{name="http-nio-17001",} 0.0 +# HELP jvm_threads_daemon_threads The current number of live daemon threads +# TYPE jvm_threads_daemon_threads gauge +jvm_threads_daemon_threads 22.0 +# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next +# TYPE jvm_gc_memory_allocated_bytes_total counter +jvm_gc_memory_allocated_bytes_total 2.7071024304E10 +# HELP http_server_requests_seconds +# TYPE http_server_requests_seconds summary +http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 57717.0 +http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 108.648599202 +http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 13.0 +http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 2.504856475 +http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 1.0 +http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 5.959808087 +http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 9.0 +http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0506538 +http_server_requests_seconds_count{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 4.0 +http_server_requests_seconds_sum{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.00875155 +# HELP http_server_requests_seconds_max +# TYPE http_server_requests_seconds_max gauge +http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 0.007270684 +http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 0.0 +http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 0.0 +http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0 +http_server_requests_seconds_max{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.0 +# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool +# TYPE jvm_buffer_total_capacity_bytes gauge +jvm_buffer_total_capacity_bytes{id="direct",} 278528.0 +jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 +# HELP spring_integration_handlers The number of message handlers +# TYPE spring_integration_handlers gauge +spring_integration_handlers 5.0 +# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC +# TYPE jvm_gc_memory_promoted_bytes_total counter +jvm_gc_memory_promoted_bytes_total 2.4583704E7 +# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool +# TYPE jvm_buffer_count_buffers gauge +jvm_buffer_count_buffers{id="direct",} 15.0 +jvm_buffer_count_buffers{id="mapped",} 0.0 +# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use +# TYPE jvm_memory_committed_bytes gauge +jvm_memory_committed_bytes{area="heap",id="Tenured Gen",} 5.3182464E7 +jvm_memory_committed_bytes{area="heap",id="Eden Space",} 2.1430272E7 +jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 7.0803456E7 +jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 2.6804224E7 +jvm_memory_committed_bytes{area="heap",id="Survivor Space",} 2621440.0 +jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 8953856.0 +# HELP tomcat_global_request_max_seconds +# TYPE tomcat_global_request_max_seconds gauge +tomcat_global_request_max_seconds{name="http-nio-17001",} 6.049 +# HELP process_uptime_seconds The uptime of the Java virtual machine +# TYPE process_uptime_seconds gauge +process_uptime_seconds 45501.125 +# HELP tomcat_threads_config_max_threads +# TYPE tomcat_threads_config_max_threads gauge +tomcat_threads_config_max_threads{name="http-nio-17001",} 200.0 +# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool +# TYPE jvm_buffer_memory_used_bytes gauge +jvm_buffer_memory_used_bytes{id="direct",} 278529.0 +jvm_buffer_memory_used_bytes{id="mapped",} 0.0 +# HELP http_client_requests_seconds Timer of WebClient operation +# TYPE http_client_requests_seconds summary +http_client_requests_seconds_count{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 1.0 +http_client_requests_seconds_sum{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 2.258042154 +http_client_requests_seconds_count{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 2.0 +http_client_requests_seconds_sum{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.305785165 +# HELP http_client_requests_seconds_max Timer of WebClient operation +# TYPE http_client_requests_seconds_max gauge +http_client_requests_seconds_max{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 0.0 +http_client_requests_seconds_max{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.0 +# HELP tomcat_global_received_bytes_total +# TYPE tomcat_global_received_bytes_total counter +tomcat_global_received_bytes_total{name="http-nio-17001",} 0.0 +# HELP jvm_threads_peak_threads The peak live thread count since the Java virtual machine started or peak was reset +# TYPE jvm_threads_peak_threads gauge +jvm_threads_peak_threads 36.0 +# HELP jvm_threads_live_threads The current number of live threads including both daemon and non-daemon threads +# TYPE jvm_threads_live_threads gauge +jvm_threads_live_threads 36.0 +# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time +# TYPE system_load_average_1m gauge +system_load_average_1m 0.02 +# HELP tomcat_threads_current_threads +# TYPE tomcat_threads_current_threads gauge +tomcat_threads_current_threads{name="http-nio-17001",} 10.0 +# HELP tomcat_sessions_expired_sessions_total +# TYPE tomcat_sessions_expired_sessions_total counter +tomcat_sessions_expired_sessions_total 0.0 +# HELP tomcat_sessions_rejected_sessions_total +# TYPE tomcat_sessions_rejected_sessions_total counter +tomcat_sessions_rejected_sessions_total 0.0 +# HELP jvm_gc_pause_seconds Time spent in GC pause +# TYPE jvm_gc_pause_seconds summary +jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 +jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.1 +jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 1269.0 +jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 5.909 +# HELP jvm_gc_pause_seconds_max Time spent in GC pause +# TYPE jvm_gc_pause_seconds_max gauge +jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 +jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.004 +# HELP tomcat_threads_busy_threads +# TYPE tomcat_threads_busy_threads gauge +tomcat_threads_busy_threads{name="http-nio-17001",} 1.0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/README.md b/src/go/collectors/go.d.plugin/modules/squidlog/README.md new file mode 120000 index 00000000000000..876d4b47aee707 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/README.md @@ -0,0 +1 @@ +integrations/squid_log_files.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/charts.go b/src/go/collectors/go.d.plugin/modules/squidlog/charts.go new file mode 100644 index 00000000000000..610c562cf10a41 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/charts.go @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Dim = module.Dim +) + +const ( + prioReqTotal = module.Priority + iota + prioReqExcluded + prioReqType + + prioHTTPRespCodesClass + prioHTTPRespCodes + + prioUniqClients + + prioBandwidth + + prioRespTime + + prioCacheCode + prioCacheTransportTag + prioCacheHandlingTag + prioCacheObjectTag + prioCacheLoadSourceTag + prioCacheErrorTag + + prioReqMethod + + prioHierCode + prioServers + + prioMimeType +) + +var ( + // Requests + reqTotalChart = Chart{ + ID: "requests", + Title: "Total Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "squidlog.requests", + Priority: prioReqTotal, + Dims: Dims{ + {ID: "requests", Algo: module.Incremental}, + }, + } + reqExcludedChart = Chart{ + ID: "excluded_requests", + Title: "Excluded Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "squidlog.excluded_requests", + Priority: prioReqExcluded, + Dims: Dims{ + {ID: "unmatched", Algo: module.Incremental}, + }, + } + reqTypesChart = Chart{ + ID: "requests_by_type", + Title: "Requests By Type", + Units: "requests/s", + Fam: "requests", + Ctx: "squidlog.type_requests", + Type: module.Stacked, + Priority: prioReqType, + Dims: Dims{ + {ID: "req_type_success", Name: "success", Algo: module.Incremental}, + {ID: "req_type_bad", Name: "bad", Algo: module.Incremental}, + {ID: "req_type_redirect", Name: "redirect", Algo: module.Incremental}, + {ID: "req_type_error", Name: "error", Algo: module.Incremental}, + }, + } + + // HTTP Code + httpRespCodeClassChart = Chart{ + ID: "responses_by_http_status_code_class", + Title: "Responses By HTTP Status Code Class", + Units: "responses/s", + Fam: "http code", + Ctx: "squidlog.http_status_code_class_responses", + Type: module.Stacked, + Priority: prioHTTPRespCodesClass, + Dims: Dims{ + {ID: "http_resp_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "http_resp_5xx", Name: "5xx", Algo: module.Incremental}, + {ID: "http_resp_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "http_resp_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_resp_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: "http_resp_0xx", Name: "0xx", Algo: module.Incremental}, + {ID: "http_resp_6xx", Name: "6xx", Algo: module.Incremental}, + }, + } + httpRespCodesChart = Chart{ + ID: "responses_by_http_status_code", + Title: "Responses By HTTP Status Code", + Units: "responses/s", + Fam: "http code", + Ctx: "squidlog.http_status_code_responses", + Type: module.Stacked, + Priority: prioHTTPRespCodes, + } + + // Bandwidth + bandwidthChart = Chart{ + ID: "bandwidth", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "squidlog.bandwidth", + Priority: prioBandwidth, + Dims: Dims{ + {ID: "bytes_sent", Name: "sent", Algo: module.Incremental, Div: 1000}, + }, + } + + // Response Time + respTimeChart = Chart{ + ID: "response_time", + Title: "Response Time", + Units: "milliseconds", + Fam: "timings", + Ctx: "squidlog.response_time", + Priority: prioRespTime, + Dims: Dims{ + {ID: "resp_time_min", Name: "min", Div: 1000}, + {ID: "resp_time_max", Name: "max", Div: 1000}, + {ID: "resp_time_avg", Name: "avg", Div: 1000}, + }, + } + + // Clients + uniqClientsChart = Chart{ + ID: "uniq_clients", + Title: "Unique Clients", + Units: "clients/s", + Fam: "clients", + Ctx: "squidlog.uniq_clients", + Priority: prioUniqClients, + Dims: Dims{ + {ID: "uniq_clients", Name: "clients"}, + }, + } + + // Cache Code Result + cacheCodeChart = Chart{ + ID: "requests_by_cache_result_code", + Title: "Requests By Cache Result Code", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_result_code_requests", + Priority: prioCacheCode, + Type: module.Stacked, + } + cacheCodeTransportTagChart = Chart{ + ID: "requests_by_cache_result_code_transport_tag", + Title: "Requests By Cache Result Delivery Transport Tag", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_result_code_transport_tag_requests", + Type: module.Stacked, + Priority: prioCacheTransportTag, + } + cacheCodeHandlingTagChart = Chart{ + ID: "requests_by_cache_result_code_handling_tag", + Title: "Requests By Cache Result Handling Tag", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_result_code_handling_tag_requests", + Type: module.Stacked, + Priority: prioCacheHandlingTag, + } + cacheCodeObjectTagChart = Chart{ + ID: "requests_by_cache_code_object_tag", + Title: "Requests By Cache Result Produced Object Tag", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_code_object_tag_requests", + Type: module.Stacked, + Priority: prioCacheObjectTag, + } + cacheCodeLoadSourceTagChart = Chart{ + ID: "requests_by_cache_code_load_source_tag", + Title: "Requests By Cache Result Load Source Tag", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_code_load_source_tag_requests", + Type: module.Stacked, + Priority: prioCacheLoadSourceTag, + } + cacheCodeErrorTagChart = Chart{ + ID: "requests_by_cache_code_error_tag", + Title: "Requests By Cache Result Errors Tag", + Units: "requests/s", + Fam: "cache result", + Ctx: "squidlog.cache_code_error_tag_requests", + Type: module.Stacked, + Priority: prioCacheErrorTag, + } + + // HTTP Method + reqMethodChart = Chart{ + ID: "requests_by_http_method", + Title: "Requests By HTTP Method", + Units: "requests/s", + Fam: "http method", + Ctx: "squidlog.http_method_requests", + Type: module.Stacked, + Priority: prioReqMethod, + } + + // MIME Type + mimeTypeChart = Chart{ + ID: "requests_by_mime_type", + Title: "Requests By MIME Type", + Units: "requests/s", + Fam: "mime type", + Ctx: "squidlog.mime_type_requests", + Type: module.Stacked, + Priority: prioMimeType, + } + + // Hierarchy + hierCodeChart = Chart{ + ID: "requests_by_hier_code", + Title: "Requests By Hierarchy Code", + Units: "requests/s", + Fam: "hierarchy", + Ctx: "squidlog.hier_code_requests", + Type: module.Stacked, + Priority: prioHierCode, + } + serverAddrChart = Chart{ + ID: "forwarded_requests_by_server_address", + Title: "Forwarded Requests By Server Address", + Units: "requests/s", + Fam: "hierarchy", + Ctx: "squidlog.server_address_forwarded_requests", + Type: module.Stacked, + Priority: prioServers, + } +) + +func (s *SquidLog) createCharts(line *logLine) error { + if line.empty() { + return errors.New("empty line") + } + charts := &Charts{ + reqTotalChart.Copy(), + reqExcludedChart.Copy(), + } + if line.hasRespTime() { + if err := addRespTimeCharts(charts); err != nil { + return err + } + } + if line.hasClientAddress() { + if err := addClientAddressCharts(charts); err != nil { + return err + } + } + if line.hasCacheCode() { + if err := addCacheCodeCharts(charts); err != nil { + return err + } + } + if line.hasHTTPCode() { + if err := addHTTPRespCodeCharts(charts); err != nil { + return err + } + } + if line.hasRespSize() { + if err := addRespSizeCharts(charts); err != nil { + return err + } + } + if line.hasReqMethod() { + if err := addMethodCharts(charts); err != nil { + return err + } + } + if line.hasHierCode() { + if err := addHierCodeCharts(charts); err != nil { + return err + } + } + if line.hasServerAddress() { + if err := addServerAddressCharts(charts); err != nil { + return err + } + } + if line.hasMimeType() { + if err := addMimeTypeCharts(charts); err != nil { + return err + } + } + s.charts = charts + return nil +} + +func addRespTimeCharts(charts *Charts) error { + return charts.Add(respTimeChart.Copy()) +} + +func addClientAddressCharts(charts *Charts) error { + return charts.Add(uniqClientsChart.Copy()) +} + +func addCacheCodeCharts(charts *Charts) error { + cs := []Chart{ + cacheCodeChart, + cacheCodeTransportTagChart, + cacheCodeHandlingTagChart, + cacheCodeObjectTagChart, + cacheCodeLoadSourceTagChart, + cacheCodeErrorTagChart, + } + for _, chart := range cs { + if err := charts.Add(chart.Copy()); err != nil { + return err + } + } + return nil +} +func addHTTPRespCodeCharts(charts *Charts) error { + cs := []Chart{ + reqTypesChart, + httpRespCodeClassChart, + httpRespCodesChart, + } + for _, chart := range cs { + if err := charts.Add(chart.Copy()); err != nil { + return err + } + } + return nil +} + +func addRespSizeCharts(charts *Charts) error { + return charts.Add(bandwidthChart.Copy()) +} + +func addMethodCharts(charts *Charts) error { + return charts.Add(reqMethodChart.Copy()) +} + +func addHierCodeCharts(charts *Charts) error { + return charts.Add(hierCodeChart.Copy()) +} +func addServerAddressCharts(charts *Charts) error { + return charts.Add(serverAddrChart.Copy()) +} + +func addMimeTypeCharts(charts *Charts) error { + return charts.Add(mimeTypeChart.Copy()) +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/collect.go b/src/go/collectors/go.d.plugin/modules/squidlog/collect.go new file mode 100644 index 00000000000000..20d3f86e888fd0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/collect.go @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "io" + "runtime" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/logs" + "github.com/netdata/go.d.plugin/pkg/stm" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (s SquidLog) logPanicStackIfAny() { + err := recover() + if err == nil { + return + } + s.Errorf("[ERROR] %s\n", err) + for depth := 0; ; depth++ { + _, file, line, ok := runtime.Caller(depth) + if !ok { + break + } + s.Errorf("======> %d: %v:%d", depth, file, line) + } + panic(err) +} + +func (s *SquidLog) collect() (map[string]int64, error) { + defer s.logPanicStackIfAny() + s.mx.reset() + + var mx map[string]int64 + + n, err := s.collectLogLines() + + if n > 0 || err == nil { + mx = stm.ToMap(s.mx) + } + return mx, err +} + +func (s *SquidLog) collectLogLines() (int, error) { + var n int + for { + s.line.reset() + err := s.parser.ReadLine(s.line) + if err != nil { + if err == io.EOF { + return n, nil + } + if !logs.IsParseError(err) { + return n, err + } + n++ + s.collectUnmatched() + continue + } + n++ + if s.line.empty() { + s.collectUnmatched() + } else { + s.collectLogLine() + } + } +} + +func (s *SquidLog) collectLogLine() { + s.mx.Requests.Inc() + s.collectRespTime() + s.collectClientAddress() + s.collectCacheCode() + s.collectHTTPCode() + s.collectRespSize() + s.collectReqMethod() + s.collectHierCode() + s.collectServerAddress() + s.collectMimeType() +} + +func (s *SquidLog) collectUnmatched() { + s.mx.Requests.Inc() + s.mx.Unmatched.Inc() +} + +func (s *SquidLog) collectRespTime() { + if !s.line.hasRespTime() { + return + } + s.mx.RespTime.Observe(float64(s.line.respTime)) +} + +func (s *SquidLog) collectClientAddress() { + if !s.line.hasClientAddress() { + return + } + s.mx.UniqueClients.Insert(s.line.clientAddr) +} + +func (s *SquidLog) collectCacheCode() { + if !s.line.hasCacheCode() { + return + } + + c, ok := s.mx.CacheCode.GetP(s.line.cacheCode) + if !ok { + s.addDimToCacheCodeChart(s.line.cacheCode) + } + c.Inc() + + tags := strings.Split(s.line.cacheCode, "_") + for _, tag := range tags { + s.collectCacheCodeTag(tag) + } +} + +func (s *SquidLog) collectHTTPCode() { + if !s.line.hasHTTPCode() { + return + } + + code := s.line.httpCode + switch { + case code >= 100 && code < 300, code == 0, code == 304, code == 401: + s.mx.ReqSuccess.Inc() + case code >= 300 && code < 400: + s.mx.ReqRedirect.Inc() + case code >= 400 && code < 500: + s.mx.ReqBad.Inc() + case code >= 500 && code <= 603: + s.mx.ReqError.Inc() + } + + switch code / 100 { + case 0: + s.mx.HTTPResp0xx.Inc() + case 1: + s.mx.HTTPResp1xx.Inc() + case 2: + s.mx.HTTPResp2xx.Inc() + case 3: + s.mx.HTTPResp3xx.Inc() + case 4: + s.mx.HTTPResp4xx.Inc() + case 5: + s.mx.HTTPResp5xx.Inc() + case 6: + s.mx.HTTPResp6xx.Inc() + } + + codeStr := strconv.Itoa(code) + c, ok := s.mx.HTTPRespCode.GetP(codeStr) + if !ok { + s.addDimToHTTPRespCodesChart(codeStr) + } + c.Inc() +} + +func (s *SquidLog) collectRespSize() { + if !s.line.hasRespSize() { + return + } + s.mx.BytesSent.Add(float64(s.line.respSize)) +} + +func (s *SquidLog) collectReqMethod() { + if !s.line.hasReqMethod() { + return + } + c, ok := s.mx.ReqMethod.GetP(s.line.reqMethod) + if !ok { + s.addDimToReqMethodChart(s.line.reqMethod) + } + c.Inc() +} + +func (s *SquidLog) collectHierCode() { + if !s.line.hasHierCode() { + return + } + c, ok := s.mx.HierCode.GetP(s.line.hierCode) + if !ok { + s.addDimToHierCodeChart(s.line.hierCode) + } + c.Inc() +} + +func (s *SquidLog) collectServerAddress() { + if !s.line.hasServerAddress() { + return + } + c, ok := s.mx.Server.GetP(s.line.serverAddr) + if !ok { + s.addDimToServerAddressChart(s.line.serverAddr) + } + c.Inc() +} + +func (s *SquidLog) collectMimeType() { + if !s.line.hasMimeType() { + return + } + c, ok := s.mx.MimeType.GetP(s.line.mimeType) + if !ok { + s.addDimToMimeTypeChart(s.line.mimeType) + } + c.Inc() +} + +func (s *SquidLog) collectCacheCodeTag(tag string) { + // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes + switch tag { + default: + case "TCP", "UDP", "NONE": + c, ok := s.mx.CacheCodeTransportTag.GetP(tag) + if !ok { + s.addDimToCacheCodeTransportTagChart(tag) + } + c.Inc() + case "CF", "CLIENT", "IMS", "ASYNC", "SWAPFAIL", "REFRESH", "SHARED", "REPLY": + c, ok := s.mx.CacheCodeHandlingTag.GetP(tag) + if !ok { + s.addDimToCacheCodeHandlingTagChart(tag) + } + c.Inc() + case "NEGATIVE", "STALE", "OFFLINE", "INVALID", "FAIL", "MODIFIED", "UNMODIFIED", "REDIRECT": + c, ok := s.mx.CacheCodeObjectTag.GetP(tag) + if !ok { + s.addDimToCacheCodeObjectTagChart(tag) + } + c.Inc() + case "HIT", "MEM", "MISS", "DENIED", "NOFETCH", "TUNNEL": + c, ok := s.mx.CacheCodeLoadSourceTag.GetP(tag) + if !ok { + s.addDimToCacheCodeLoadSourceTagChart(tag) + } + c.Inc() + case "ABORTED", "TIMEOUT", "IGNORED": + c, ok := s.mx.CacheCodeErrorTag.GetP(tag) + if !ok { + s.addDimToCacheCodeErrorTagChart(tag) + } + c.Inc() + } +} + +func (s *SquidLog) addDimToCacheCodeChart(code string) { + chartID := cacheCodeChart.ID + dimID := pxCacheCode + code + s.addDimToChart(chartID, dimID, code) +} + +func (s *SquidLog) addDimToCacheCodeTransportTagChart(tag string) { + chartID := cacheCodeTransportTagChart.ID + dimID := pxTransportTag + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToCacheCodeHandlingTagChart(tag string) { + chartID := cacheCodeHandlingTagChart.ID + dimID := pxHandlingTag + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToCacheCodeObjectTagChart(tag string) { + chartID := cacheCodeObjectTagChart.ID + dimID := pxObjectTag + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToCacheCodeLoadSourceTagChart(tag string) { + chartID := cacheCodeLoadSourceTagChart.ID + dimID := pxSourceTag + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToCacheCodeErrorTagChart(tag string) { + chartID := cacheCodeErrorTagChart.ID + dimID := pxErrorTag + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToHTTPRespCodesChart(tag string) { + chartID := httpRespCodesChart.ID + dimID := pxHTTPCode + tag + s.addDimToChart(chartID, dimID, tag) +} + +func (s *SquidLog) addDimToReqMethodChart(method string) { + chartID := reqMethodChart.ID + dimID := pxReqMethod + method + s.addDimToChart(chartID, dimID, method) +} + +func (s *SquidLog) addDimToHierCodeChart(code string) { + chartID := hierCodeChart.ID + dimID := pxHierCode + code + dimName := code[5:] // remove "HIER_" + s.addDimToChart(chartID, dimID, dimName) +} + +func (s *SquidLog) addDimToServerAddressChart(address string) { + chartID := serverAddrChart.ID + dimID := pxSrvAddr + address + s.addDimToChartOrCreateIfNotExist(chartID, dimID, address) +} + +func (s *SquidLog) addDimToMimeTypeChart(mimeType string) { + chartID := mimeTypeChart.ID + dimID := pxMimeType + mimeType + s.addDimToChartOrCreateIfNotExist(chartID, dimID, mimeType) +} + +func (s *SquidLog) addDimToChart(chartID, dimID, dimName string) { + chart := s.Charts().Get(chartID) + if chart == nil { + s.Warningf("add '%s' dim: couldn't find '%s' chart in charts", dimID, chartID) + return + } + + dim := &Dim{ID: dimID, Name: dimName, Algo: module.Incremental} + + if err := chart.AddDim(dim); err != nil { + s.Warningf("add '%s' dim: %v", dimID, err) + return + } + chart.MarkNotCreated() +} + +func (s *SquidLog) addDimToChartOrCreateIfNotExist(chartID, dimID, dimName string) { + if s.Charts().Has(chartID) { + s.addDimToChart(chartID, dimID, dimName) + return + } + + chart := newChartByID(chartID) + if chart == nil { + s.Warningf("add '%s' dim: couldn't create '%s' chart", dimID, chartID) + return + } + if err := s.Charts().Add(chart); err != nil { + s.Warning(err) + return + } + s.addDimToChart(chartID, dimID, dimName) +} + +func newChartByID(chartID string) *Chart { + switch chartID { + case serverAddrChart.ID: + return serverAddrChart.Copy() + case mimeTypeChart.ID: + return mimeTypeChart.Copy() + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/config_schema.json b/src/go/collectors/go.d.plugin/modules/squidlog/config_schema.json new file mode 100644 index 00000000000000..dcf439c70d18cc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/config_schema.json @@ -0,0 +1,101 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/squid_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parser": { + "type": "object", + "properties": { + "log_type": { + "type": "string" + }, + "csv_config": { + "type": "object", + "properties": { + "fields_per_record": { + "type": "integer" + }, + "delimiter": { + "type": "string" + }, + "trim_leading_space": { + "type": "boolean" + }, + "format": { + "type": "string" + } + }, + "required": [ + "fields_per_record", + "delimiter", + "trim_leading_space", + "format" + ] + }, + "ltsv_config": { + "type": "object", + "properties": { + "field_delimiter": { + "type": "string" + }, + "value_delimiter": { + "type": "string" + }, + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "field_delimiter", + "value_delimiter", + "mapping" + ] + }, + "regexp_config": { + "type": "object", + "properties": { + "pattern": { + "type": "string" + } + }, + "required": [ + "pattern" + ] + }, + "json_config": { + "type": "object", + "properties": { + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "mapping" + ] + } + }, + "required": [ + "log_type" + ] + }, + "path": { + "type": "string" + }, + "exclude_path": { + "type": "string" + } + }, + "required": [ + "name", + "path" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/init.go b/src/go/collectors/go.d.plugin/modules/squidlog/init.go new file mode 100644 index 00000000000000..60c2c458626586 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/init.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "bytes" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/logs" +) + +func (s *SquidLog) createLogReader() error { + s.Cleanup() + s.Debug("starting log reader creating") + + reader, err := logs.Open(s.Path, s.ExcludePath, s.Logger) + if err != nil { + return fmt.Errorf("creating log reader: %v", err) + } + + s.Debugf("created log reader, current file '%s'", reader.CurrentFilename()) + s.file = reader + return nil +} + +func (s *SquidLog) createParser() error { + s.Debug("starting parser creating") + lastLine, err := logs.ReadLastLine(s.file.CurrentFilename(), 0) + if err != nil { + return fmt.Errorf("read last line: %v", err) + } + + lastLine = bytes.TrimRight(lastLine, "\n") + s.Debugf("last line: '%s'", string(lastLine)) + + s.parser, err = logs.NewParser(s.Parser, s.file) + if err != nil { + return fmt.Errorf("create parser: %v", err) + } + s.Debugf("created parser: %s", s.parser.Info()) + + err = s.parser.Parse(lastLine, s.line) + if err != nil { + return fmt.Errorf("parse last line: %v (%s)", err, string(lastLine)) + } + + if err = s.line.verify(); err != nil { + return fmt.Errorf("verify last line: %v (%s)", err, string(lastLine)) + } + return nil +} + +func checkCSVFormatField(name string) (newName string, offset int, valid bool) { + name = cleanField(name) + if !knownField(name) { + return "", 0, false + } + return name, 0, true +} + +func cleanField(name string) string { + return strings.TrimLeft(name, "$%") +} + +func knownField(name string) bool { + switch name { + case fieldRespTime, fieldClientAddr, fieldCacheCode, fieldHTTPCode, fieldRespSize, fieldReqMethod: + fallthrough + case fieldHierCode, fieldServerAddr, fieldMimeType, fieldResultCode, fieldHierarchy: + return true + } + return false +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/integrations/squid_log_files.md b/src/go/collectors/go.d.plugin/modules/squidlog/integrations/squid_log_files.md new file mode 100644 index 00000000000000..472c912b9a89f9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/integrations/squid_log_files.md @@ -0,0 +1,249 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/squidlog/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/squidlog/metadata.yaml" +sidebar_label: "Squid log files" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Squid log files + + +<img src="https://netdata.cloud/img/squid.png" width="150"/> + + +Plugin: go.d.plugin +Module: squidlog + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +his collector monitors Squid servers by parsing their access log files. + + +It automatically detects log files of Squid severs running on localhost. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Squid log files instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| squidlog.requests | requests | requests/s | +| squidlog.excluded_requests | unmatched | requests/s | +| squidlog.type_requests | success, bad, redirect, error | requests/s | +| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s | +| squidlog.bandwidth | sent | kilobits/s | +| squidlog.response_time | min, max, avg | milliseconds | +| squidlog.uniq_clients | clients | clients | +| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s | +| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s | +| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s | +| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s | +| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s | +| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s | +| squidlog.http_method_requests | a dimension per HTTP method | requests/s | +| squidlog.mime_type_requests | a dimension per MIME type | requests/s | +| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s | +| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/squidlog.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/squidlog.conf +``` +#### Options + +Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/). + +Squidlog is aware how to parse and interpret the following codes: + +| field | squid format code | description | +|----------------|-------------------|---------------------------------------------------------------| +| resp_time | %tr | Response time (milliseconds). | +| client_address | %>a | Client source IP address. | +| client_address | %>A | Client FQDN. | +| cache_code | %Ss | Squid request status (TCP_MISS etc). | +| http_code | %>Hs | The HTTP response status code from Content Gateway to client. | +| resp_size | %<st | Total size of reply sent to client (after adaptation). | +| req_method | %rm | Request method (GET/POST etc). | +| hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc). | +| server_address | %<a | Server IP address of the last server or peer connection. | +| server_address | %<A | Server FQDN or peer name. | +| mime_type | %mt | MIME content type. | + +In addition, to make `Squid` [native log format](https://wiki.squid-cache.org/Features/LogFormat#Squid_native_access.log_format_in_detail) csv parsable, squidlog understands these groups of codes: + +| field | squid format code | description | +|-------------|-------------------|------------------------------------| +| result_code | %Ss/%>Hs | Cache code and http code. | +| hierarchy | %Sh/%<a | Hierarchy code and server address. | + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| path | Path to the Squid access log file. | /var/log/squid/access.log | yes | +| exclude_path | Path to exclude. | *.gz | no | +| parser | Log parser configuration. | | no | +| parser.log_type | Log parser type. | auto | no | +| parser.csv_config | CSV log parser config. | | no | +| parser.csv_config.delimiter | CSV field delimiter. | space | no | +| parser.csv_config.format | CSV log format. | - $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type | yes | +| parser.ltsv_config | LTSV log parser config. | | no | +| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \t | no | +| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no | +| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes | +| parser.regexp_config | RegExp log parser config. | | no | +| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes | + +##### parser.log_type + +Weblog supports 3 different log parsers: + +| Parser type | Description | +|-------------|-------------------------------------------| +| csv | A comma-separated values | +| ltsv | [LTSV](http://ltsv.org/) | +| regexp | Regular expression with named groups | + +Syntax: + +```yaml +parser: + log_type: csv +``` + + +##### parser.csv_config.format + + + +##### parser.ltsv_config.mapping + +The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + +> **Note**: don't use `$` and `%` prefixes for mapped field names. + +```yaml +parser: + log_type: ltsv + ltsv_config: + mapping: + label1: field1 + label2: field2 +``` + + +##### parser.regexp_config.pattern + +Use pattern with subexpressions names. These names should be **known fields**. + +> **Note**: don't use `$` and `%` prefixes for mapped field names. + +Syntax: + +```yaml +parser: + log_type: regexp + regexp_config: + pattern: PATTERN +``` + + +</details> + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m squidlog + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/logline.go b/src/go/collectors/go.d.plugin/modules/squidlog/logline.go new file mode 100644 index 00000000000000..e3d200eaf398ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/logline.go @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// https://wiki.squid-cache.org/Features/LogFormat +// http://www.squid-cache.org/Doc/config/logformat/ +// https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes +// https://www.websense.com/content/support/library/web/v773/wcg_help/squid.aspx + +/* +4.6.1: +logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt +logformat common %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st %Ss:%Sh +logformat combined %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh +logformat referrer %ts.%03tu %>a %{Referer}>h %ru +logformat useragent %>a [%tl] "%{User-Agent}>h" +logformat icap_squid %ts.%03tu %6icap::tr %>A %icap::to/%03icap::Hs %icap::<st %icap::rm %icap::ru %un -/%icap::<A - +*/ + +/* +Valid Capture Name: [A-Za-z0-9_]+ +// TODO: namings + +| local | squid format code | description | +|-------------------------|-------------------|------------------------------------------------------------------------| +| resp_time | %tr | Response time (milliseconds). +| client_address | %>a | Client source IP address. +| client_address | %>A | Client FQDN. +| cache_code | %Ss | Squid request status (TCP_MISS etc). +| http_code | %>Hs | The HTTP response status code from Content Gateway to client. +| resp_size | %<st | Total size of reply sent to client (after adaptation). +| req_method | %rm | Request method (GET/POST etc). +| hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc). +| server_address | %<a | Server IP address of the last server or peer connection. +| server_address | %<A | Server FQDN or peer name. +| mime_type | %mt | MIME content type. + +// Following needed to make default log format csv parsable +| result_code | %Ss/%03>Hs | cache code and http code. +| hierarchy | %Sh/%<a | hierarchy code and server address. + +Notes: +- %<a: older versions of Squid would put the origin server hostname here. +*/ + +var ( + errEmptyLine = errors.New("empty line") + errBadRespTime = errors.New("bad response time") + errBadClientAddr = errors.New("bad client address") + errBadCacheCode = errors.New("bad cache code") + errBadHTTPCode = errors.New("bad http code") + errBadRespSize = errors.New("bad response size") + errBadReqMethod = errors.New("bad request method") + errBadHierCode = errors.New("bad hier code") + errBadServerAddr = errors.New("bad server address") + errBadMimeType = errors.New("bad mime type") + errBadResultCode = errors.New("bad result code") + errBadHierarchy = errors.New("bad hierarchy") +) + +func newEmptyLogLine() *logLine { + var l logLine + l.reset() + return &l +} + +type ( + logLine struct { + clientAddr string + serverAddr string + + respTime int + respSize int + httpCode int + + reqMethod string + mimeType string + + cacheCode string + hierCode string + } +) + +const ( + fieldRespTime = "resp_time" + fieldClientAddr = "client_address" + fieldCacheCode = "cache_code" + fieldHTTPCode = "http_code" + fieldRespSize = "resp_size" + fieldReqMethod = "req_method" + fieldHierCode = "hier_code" + fieldServerAddr = "server_address" + fieldMimeType = "mime_type" + fieldResultCode = "result_code" + fieldHierarchy = "hierarchy" +) + +func (l *logLine) Assign(field string, value string) (err error) { + if value == "" { + return + } + + switch field { + case fieldRespTime: + err = l.assignRespTime(value) + case fieldClientAddr: + err = l.assignClientAddress(value) + case fieldCacheCode: + err = l.assignCacheCode(value) + case fieldHTTPCode: + err = l.assignHTTPCode(value) + case fieldRespSize: + err = l.assignRespSize(value) + case fieldReqMethod: + err = l.assignReqMethod(value) + case fieldHierCode: + err = l.assignHierCode(value) + case fieldMimeType: + err = l.assignMimeType(value) + case fieldServerAddr: + err = l.assignServerAddress(value) + case fieldResultCode: + err = l.assignResultCode(value) + case fieldHierarchy: + err = l.assignHierarchy(value) + } + return err +} + +const hyphen = "-" + +func (l *logLine) assignRespTime(time string) error { + if time == hyphen { + return fmt.Errorf("assign '%s': %w", time, errBadRespTime) + } + v, err := strconv.Atoi(time) + if err != nil || !isRespTimeValid(v) { + return fmt.Errorf("assign '%s': %w", time, errBadRespTime) + } + l.respTime = v + return nil +} + +func (l *logLine) assignClientAddress(address string) error { + if address == hyphen { + return fmt.Errorf("assign '%s': %w", address, errBadClientAddr) + } + l.clientAddr = address + return nil +} + +func (l *logLine) assignCacheCode(code string) error { + if code == hyphen || !isCacheCodeValid(code) { + return fmt.Errorf("assign '%s': %w", code, errBadCacheCode) + } + l.cacheCode = code + return nil +} + +func (l *logLine) assignHTTPCode(code string) error { + if code == hyphen { + return fmt.Errorf("assign '%s': %w", code, errBadHTTPCode) + } + v, err := strconv.Atoi(code) + if err != nil || !isHTTPCodeValid(v) { + return fmt.Errorf("assign '%s': %w", code, errBadHTTPCode) + } + l.httpCode = v + return nil +} + +func (l *logLine) assignResultCode(code string) error { + i := strings.IndexByte(code, '/') + if i <= 0 { + return fmt.Errorf("assign '%s': %w", code, errBadResultCode) + } + if err := l.assignCacheCode(code[:i]); err != nil { + return err + } + return l.assignHTTPCode(code[i+1:]) +} + +func (l *logLine) assignRespSize(size string) error { + if size == hyphen { + return fmt.Errorf("assign '%s': %w", size, errBadRespSize) + } + v, err := strconv.Atoi(size) + if err != nil || !isRespSizeValid(v) { + return fmt.Errorf("assign '%s': %w", size, errBadRespSize) + } + l.respSize = v + return nil +} + +func (l *logLine) assignReqMethod(method string) error { + if method == hyphen || !isReqMethodValid(method) { + return fmt.Errorf("assign '%s': %w", method, errBadReqMethod) + } + l.reqMethod = method + return nil +} + +func (l *logLine) assignHierCode(code string) error { + if code == hyphen || !isHierCodeValid(code) { + return fmt.Errorf("assign '%s': %w", code, errBadHierCode) + } + l.hierCode = code + return nil +} + +func (l *logLine) assignServerAddress(address string) error { + // Logged as "-" if there is no hierarchy information. + // For TCP HIT, TCP failures, cachemgr requests and all UDP requests, there is no hierarchy information. + if address == hyphen { + return nil + } + l.serverAddr = address + return nil +} + +func (l *logLine) assignHierarchy(hierarchy string) error { + i := strings.IndexByte(hierarchy, '/') + if i <= 0 { + return fmt.Errorf("assign '%s': %w", hierarchy, errBadHierarchy) + } + if err := l.assignHierCode(hierarchy[:i]); err != nil { + return err + } + return l.assignServerAddress(hierarchy[i+1:]) +} + +func (l *logLine) assignMimeType(mime string) error { + // ICP exchanges usually don't have any content type, and thus are logged "-". + //Also, some weird replies have content types ":" or even empty ones. + if mime == hyphen || mime == ":" { + return nil + } + // format: type/subtype, type/subtype;parameter=value + i := strings.IndexByte(mime, '/') + if i <= 0 || !isMimeTypeValid(mime[:i]) { + return fmt.Errorf("assign '%s': %w", mime, errBadMimeType) + } + l.mimeType = mime[:i] // drop subtype + return nil +} + +func (l logLine) verify() error { + if l.empty() { + return fmt.Errorf("verify: %w", errEmptyLine) + } + if l.hasRespTime() && !l.isRespTimeValid() { + return fmt.Errorf("verify '%d': %w", l.respTime, errBadRespTime) + } + if l.hasClientAddress() && !l.isClientAddressValid() { + return fmt.Errorf("verify '%s': %w", l.clientAddr, errBadClientAddr) + } + if l.hasCacheCode() && !l.isCacheCodeValid() { + return fmt.Errorf("verify '%s': %w", l.cacheCode, errBadCacheCode) + } + if l.hasHTTPCode() && !l.isHTTPCodeValid() { + return fmt.Errorf("verify '%d': %w", l.httpCode, errBadHTTPCode) + } + if l.hasRespSize() && !l.isRespSizeValid() { + return fmt.Errorf("verify '%d': %w", l.respSize, errBadRespSize) + } + if l.hasReqMethod() && !l.isReqMethodValid() { + return fmt.Errorf("verify '%s': %w", l.reqMethod, errBadReqMethod) + } + if l.hasHierCode() && !l.isHierCodeValid() { + return fmt.Errorf("verify '%s': %w", l.hierCode, errBadHierCode) + } + if l.hasServerAddress() && !l.isServerAddressValid() { + return fmt.Errorf("verify '%s': %w", l.serverAddr, errBadServerAddr) + } + if l.hasMimeType() && !l.isMimeTypeValid() { + return fmt.Errorf("verify '%s': %w", l.mimeType, errBadMimeType) + } + return nil +} + +func (l logLine) empty() bool { return l == emptyLogLine } +func (l logLine) hasRespTime() bool { return !isEmptyNumber(l.respTime) } +func (l logLine) hasClientAddress() bool { return !isEmptyString(l.clientAddr) } +func (l logLine) hasCacheCode() bool { return !isEmptyString(l.cacheCode) } +func (l logLine) hasHTTPCode() bool { return !isEmptyNumber(l.httpCode) } +func (l logLine) hasRespSize() bool { return !isEmptyNumber(l.respSize) } +func (l logLine) hasReqMethod() bool { return !isEmptyString(l.reqMethod) } +func (l logLine) hasHierCode() bool { return !isEmptyString(l.hierCode) } +func (l logLine) hasServerAddress() bool { return !isEmptyString(l.serverAddr) } +func (l logLine) hasMimeType() bool { return !isEmptyString(l.mimeType) } +func (l logLine) isRespTimeValid() bool { return isRespTimeValid(l.respTime) } +func (l logLine) isClientAddressValid() bool { return reAddress.MatchString(l.clientAddr) } +func (l logLine) isCacheCodeValid() bool { return isCacheCodeValid(l.cacheCode) } +func (l logLine) isHTTPCodeValid() bool { return isHTTPCodeValid(l.httpCode) } +func (l logLine) isRespSizeValid() bool { return isRespSizeValid(l.respSize) } +func (l logLine) isReqMethodValid() bool { return isReqMethodValid(l.reqMethod) } +func (l logLine) isHierCodeValid() bool { return isHierCodeValid(l.hierCode) } +func (l logLine) isServerAddressValid() bool { return reAddress.MatchString(l.serverAddr) } +func (l logLine) isMimeTypeValid() bool { return isMimeTypeValid(l.mimeType) } + +func (l *logLine) reset() { + l.respTime = emptyNumber + l.clientAddr = emptyString + l.cacheCode = emptyString + l.httpCode = emptyNumber + l.respSize = emptyNumber + l.reqMethod = emptyString + l.hierCode = emptyString + l.serverAddr = emptyString + l.mimeType = emptyString +} + +var emptyLogLine = *newEmptyLogLine() + +const ( + emptyString = "__empty_string__" + emptyNumber = -9999 +) + +var ( + // IPv4, IPv6, FQDN. + reAddress = regexp.MustCompile(`^(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3}|[a-f0-9:]{3,}|[a-zA-Z0-9-.]{3,})$`) +) + +func isEmptyString(s string) bool { + return s == emptyString || s == "" +} + +func isEmptyNumber(n int) bool { + return n == emptyNumber +} + +func isRespTimeValid(time int) bool { + return time >= 0 +} + +// isCacheCodeValid does not guarantee cache result code is valid, but it is very likely. +func isCacheCodeValid(code string) bool { + // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes + if code == "NONE" { + return true + } + return len(code) > 5 && (code[:4] == "TCP_" || code[:4] == "UDP_") +} + +func isHTTPCodeValid(code int) bool { + // https://wiki.squid-cache.org/SquidFaq/SquidLogs#HTTP_status_codes + return code == 0 || code >= 100 && code <= 603 +} + +func isRespSizeValid(size int) bool { + return size >= 0 +} + +func isReqMethodValid(method string) bool { + // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Request_methods + switch method { + case "GET", + "HEAD", + "POST", + "PUT", + "PATCH", + "DELETE", + "CONNECT", + "OPTIONS", + "TRACE", + "ICP_QUERY", + "PURGE", + "PROPFIND", + "PROPATCH", + "MKCOL", + "COPY", + "MOVE", + "LOCK", + "UNLOCK", + "NONE": + return true + } + return false +} + +// isHierCodeValid does not guarantee hierarchy code is valid, but it is very likely. +func isHierCodeValid(code string) bool { + // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Hierarchy_Codes + return len(code) > 6 && code[:5] == "HIER_" +} + +// isMimeTypeValid expects only mime type part. +func isMimeTypeValid(mimeType string) bool { + // https://www.iana.org/assignments/media-types/media-types.xhtml + if mimeType == "text" { + return true + } + switch mimeType { + case "application", "audio", "font", "image", "message", "model", "multipart", "video": + return true + } + return false +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/logline_test.go b/src/go/collectors/go.d.plugin/modules/squidlog/logline_test.go new file mode 100644 index 00000000000000..4a9069e3f28eb2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/logline_test.go @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const emptyStr = "" + +func TestLogLine_Assign(t *testing.T) { + type subTest struct { + input string + wantLine logLine + wantErr error + } + type test struct { + name string + field string + cases []subTest + } + tests := []test{ + { + name: "Response Time", + field: fieldRespTime, + cases: []subTest{ + {input: "0", wantLine: logLine{respTime: 0}}, + {input: "1000", wantLine: logLine{respTime: 1000}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadRespTime}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespTime}, + {input: "0.000", wantLine: emptyLogLine, wantErr: errBadRespTime}, + }, + }, + { + name: "Client Address", + field: fieldClientAddr, + cases: []subTest{ + {input: "127.0.0.1", wantLine: logLine{clientAddr: "127.0.0.1"}}, + {input: "::1", wantLine: logLine{clientAddr: "::1"}}, + {input: "kadr20.m1.netdata.lan", wantLine: logLine{clientAddr: "kadr20.m1.netdata.lan"}}, + {input: "±!@#$%^&*()", wantLine: logLine{clientAddr: "±!@#$%^&*()"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadClientAddr}, + }, + }, + { + name: "Cache Code", + field: fieldCacheCode, + cases: []subTest{ + {input: "TCP_MISS", wantLine: logLine{cacheCode: "TCP_MISS"}}, + {input: "TCP_DENIED", wantLine: logLine{cacheCode: "TCP_DENIED"}}, + {input: "TCP_CLIENT_REFRESH_MISS", wantLine: logLine{cacheCode: "TCP_CLIENT_REFRESH_MISS"}}, + {input: "UDP_MISS_NOFETCH", wantLine: logLine{cacheCode: "UDP_MISS_NOFETCH"}}, + {input: "UDP_INVALID", wantLine: logLine{cacheCode: "UDP_INVALID"}}, + {input: "NONE", wantLine: logLine{cacheCode: "NONE"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadCacheCode}, + {input: "TCP", wantLine: emptyLogLine, wantErr: errBadCacheCode}, + {input: "UDP_", wantLine: emptyLogLine, wantErr: errBadCacheCode}, + {input: "NONE_MISS", wantLine: emptyLogLine, wantErr: errBadCacheCode}, + }, + }, + { + name: "HTTP Code", + field: fieldHTTPCode, + cases: []subTest{ + {input: "000", wantLine: logLine{httpCode: 0}}, + {input: "100", wantLine: logLine{httpCode: 100}}, + {input: "200", wantLine: logLine{httpCode: 200}}, + {input: "300", wantLine: logLine{httpCode: 300}}, + {input: "400", wantLine: logLine{httpCode: 400}}, + {input: "500", wantLine: logLine{httpCode: 500}}, + {input: "603", wantLine: logLine{httpCode: 603}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHTTPCode}, + {input: "1", wantLine: emptyLogLine, wantErr: errBadHTTPCode}, + {input: "604", wantLine: emptyLogLine, wantErr: errBadHTTPCode}, + {input: "1000", wantLine: emptyLogLine, wantErr: errBadHTTPCode}, + {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadHTTPCode}, + }, + }, + { + name: "Response Size", + field: fieldRespSize, + cases: []subTest{ + {input: "0", wantLine: logLine{respSize: 0}}, + {input: "1000", wantLine: logLine{respSize: 1000}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadRespSize}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespSize}, + {input: "0.000", wantLine: emptyLogLine, wantErr: errBadRespSize}, + }, + }, + { + name: "Request Method", + field: fieldReqMethod, + cases: []subTest{ + {input: "GET", wantLine: logLine{reqMethod: "GET"}}, + {input: "HEAD", wantLine: logLine{reqMethod: "HEAD"}}, + {input: "POST", wantLine: logLine{reqMethod: "POST"}}, + {input: "PUT", wantLine: logLine{reqMethod: "PUT"}}, + {input: "PATCH", wantLine: logLine{reqMethod: "PATCH"}}, + {input: "DELETE", wantLine: logLine{reqMethod: "DELETE"}}, + {input: "CONNECT", wantLine: logLine{reqMethod: "CONNECT"}}, + {input: "OPTIONS", wantLine: logLine{reqMethod: "OPTIONS"}}, + {input: "TRACE", wantLine: logLine{reqMethod: "TRACE"}}, + {input: "ICP_QUERY", wantLine: logLine{reqMethod: "ICP_QUERY"}}, + {input: "PURGE", wantLine: logLine{reqMethod: "PURGE"}}, + {input: "PROPFIND", wantLine: logLine{reqMethod: "PROPFIND"}}, + {input: "PROPATCH", wantLine: logLine{reqMethod: "PROPATCH"}}, + {input: "MKCOL", wantLine: logLine{reqMethod: "MKCOL"}}, + {input: "COPY", wantLine: logLine{reqMethod: "COPY"}}, + {input: "MOVE", wantLine: logLine{reqMethod: "MOVE"}}, + {input: "LOCK", wantLine: logLine{reqMethod: "LOCK"}}, + {input: "UNLOCK", wantLine: logLine{reqMethod: "UNLOCK"}}, + {input: "NONE", wantLine: logLine{reqMethod: "NONE"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "get", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "0.000", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + }, + }, + { + name: "Hier Code", + field: fieldHierCode, + cases: []subTest{ + {input: "HIER_NONE", wantLine: logLine{hierCode: "HIER_NONE"}}, + {input: "HIER_SIBLING_HIT", wantLine: logLine{hierCode: "HIER_SIBLING_HIT"}}, + {input: "HIER_NO_CACHE_DIGEST_DIRECT", wantLine: logLine{hierCode: "HIER_NO_CACHE_DIGEST_DIRECT"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "0.000", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "HIER", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "HIER_", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "NONE", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "SIBLING_HIT", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "NO_CACHE_DIGEST_DIRECT", wantLine: emptyLogLine, wantErr: errBadHierCode}, + }, + }, + { + name: "Server Address", + field: fieldServerAddr, + cases: []subTest{ + {input: "127.0.0.1", wantLine: logLine{serverAddr: "127.0.0.1"}}, + {input: "::1", wantLine: logLine{serverAddr: "::1"}}, + {input: "kadr20.m1.netdata.lan", wantLine: logLine{serverAddr: "kadr20.m1.netdata.lan"}}, + {input: "±!@#$%^&*()", wantLine: logLine{serverAddr: "±!@#$%^&*()"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + { + name: "Mime Type", + field: fieldMimeType, + cases: []subTest{ + {input: "application/zstd", wantLine: logLine{mimeType: "application"}}, + {input: "audio/3gpp2", wantLine: logLine{mimeType: "audio"}}, + {input: "font/otf", wantLine: logLine{mimeType: "font"}}, + {input: "image/tiff", wantLine: logLine{mimeType: "image"}}, + {input: "message/global", wantLine: logLine{mimeType: "message"}}, + {input: "model/example", wantLine: logLine{mimeType: "model"}}, + {input: "multipart/encrypted", wantLine: logLine{mimeType: "multipart"}}, + {input: "text/html", wantLine: logLine{mimeType: "text"}}, + {input: "video/3gpp", wantLine: logLine{mimeType: "video"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "example/example", wantLine: emptyLogLine, wantErr: errBadMimeType}, + {input: "unknown/example", wantLine: emptyLogLine, wantErr: errBadMimeType}, + {input: "audio", wantLine: emptyLogLine, wantErr: errBadMimeType}, + {input: "/", wantLine: emptyLogLine, wantErr: errBadMimeType}, + }, + }, + { + name: "Result Code", + field: fieldResultCode, + cases: []subTest{ + {input: "TCP_MISS/000", wantLine: logLine{cacheCode: "TCP_MISS", httpCode: 0}}, + {input: "TCP_DENIED/603", wantLine: logLine{cacheCode: "TCP_DENIED", httpCode: 603}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadResultCode}, + {input: "TCP_MISS:000", wantLine: emptyLogLine, wantErr: errBadResultCode}, + {input: "TCP_MISS 000", wantLine: emptyLogLine, wantErr: errBadResultCode}, + {input: "/", wantLine: emptyLogLine, wantErr: errBadResultCode}, + {input: "tcp/000", wantLine: emptyLogLine, wantErr: errBadCacheCode}, + {input: "TCP_MISS/", wantLine: logLine{cacheCode: "TCP_MISS", httpCode: emptyNumber}, wantErr: errBadHTTPCode}, + }, + }, + { + name: "Hierarchy", + field: fieldHierarchy, + cases: []subTest{ + {input: "HIER_NONE/-", wantLine: logLine{hierCode: "HIER_NONE", serverAddr: emptyString}}, + {input: "HIER_SIBLING_HIT/127.0.0.1", wantLine: logLine{hierCode: "HIER_SIBLING_HIT", serverAddr: "127.0.0.1"}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHierarchy}, + {input: "HIER_NONE:-", wantLine: emptyLogLine, wantErr: errBadHierarchy}, + {input: "HIER_SIBLING_HIT 127.0.0.1", wantLine: emptyLogLine, wantErr: errBadHierarchy}, + {input: "/", wantLine: emptyLogLine, wantErr: errBadHierarchy}, + {input: "HIER/-", wantLine: emptyLogLine, wantErr: errBadHierCode}, + {input: "HIER_NONE/", wantLine: logLine{hierCode: "HIER_NONE", serverAddr: emptyStr}}, + }, + }, + } + + for _, tt := range tests { + for i, tc := range tt.cases { + name := fmt.Sprintf("[%s:%d]field='%s'|input='%s'", tt.name, i+1, tt.field, tc.input) + t.Run(name, func(t *testing.T) { + + line := newEmptyLogLine() + err := line.Assign(tt.field, tc.input) + + if tc.wantErr != nil { + require.Error(t, err) + assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err) + } else { + require.NoError(t, err) + } + + expected := prepareAssignLogLine(t, tt.field, tc.wantLine) + assert.Equal(t, expected, *line) + }) + } + } +} + +func TestLogLine_verify(t *testing.T) { + type subTest struct { + input string + wantErr error + } + type test = struct { + name string + field string + cases []subTest + } + tests := []test{ + { + name: "Response Time", + field: fieldRespTime, + cases: []subTest{ + {input: "0"}, + {input: "1000"}, + {input: "-1", wantErr: errBadRespTime}, + }, + }, + { + name: "Client Address", + field: fieldClientAddr, + cases: []subTest{ + {input: "127.0.0.1"}, + {input: "::1"}, + {input: "kadr20.m1.netdata.lan"}, + {input: emptyStr}, + {input: "±!@#$%^&*()", wantErr: errBadClientAddr}, + }, + }, + { + name: "Cache Code", + field: fieldCacheCode, + cases: []subTest{ + {input: "TCP_MISS"}, + {input: "TCP_DENIED"}, + {input: "TCP_CLIENT_REFRESH_MISS"}, + {input: "UDP_MISS_NOFETCH"}, + {input: "UDP_INVALID"}, + {input: "NONE"}, + {input: emptyStr}, + {input: "TCP", wantErr: errBadCacheCode}, + {input: "UDP", wantErr: errBadCacheCode}, + {input: "NONE_MISS", wantErr: errBadCacheCode}, + }, + }, + { + name: "HTTP Code", + field: fieldHTTPCode, + cases: []subTest{ + {input: "000"}, + {input: "100"}, + {input: "200"}, + {input: "300"}, + {input: "400"}, + {input: "500"}, + {input: "603"}, + {input: "1", wantErr: errBadHTTPCode}, + {input: "604", wantErr: errBadHTTPCode}, + }, + }, + { + name: "Response Size", + field: fieldRespSize, + cases: []subTest{ + {input: "0"}, + {input: "1000"}, + {input: "-1", wantErr: errBadRespSize}, + }, + }, + { + name: "Request Method", + field: fieldReqMethod, + cases: []subTest{ + {input: "GET"}, + {input: "HEAD"}, + {input: "POST"}, + {input: "PUT"}, + {input: "PATCH"}, + {input: "DELETE"}, + {input: "CONNECT"}, + {input: "OPTIONS"}, + {input: "TRACE"}, + {input: "ICP_QUERY"}, + {input: "PURGE"}, + {input: "PROPFIND"}, + {input: "PROPATCH"}, + {input: "MKCOL"}, + {input: "COPY"}, + {input: "MOVE"}, + {input: "LOCK"}, + {input: "UNLOCK"}, + {input: "NONE"}, + {input: emptyStr}, + {input: "get", wantErr: errBadReqMethod}, + {input: "TCP_MISS", wantErr: errBadReqMethod}, + }, + }, + { + name: "Hier Code", + field: fieldHierCode, + cases: []subTest{ + {input: "HIER_NONE"}, + {input: "HIER_SIBLING_HIT"}, + {input: "HIER_NO_CACHE_DIGEST_DIRECT"}, + {input: emptyStr}, + {input: "0.000", wantErr: errBadHierCode}, + {input: "TCP_MISS", wantErr: errBadHierCode}, + {input: "HIER", wantErr: errBadHierCode}, + {input: "HIER_", wantErr: errBadHierCode}, + {input: "NONE", wantErr: errBadHierCode}, + {input: "SIBLING_HIT", wantErr: errBadHierCode}, + {input: "NO_CACHE_DIGEST_DIRECT", wantErr: errBadHierCode}, + }, + }, + { + name: "Server Address", + field: fieldServerAddr, + cases: []subTest{ + {input: "127.0.0.1"}, + {input: "::1"}, + {input: "kadr20.m1.netdata.lan"}, + {input: emptyStr}, + {input: "±!@#$%^&*()", wantErr: errBadServerAddr}, + }, + }, + { + name: "Mime Type", + field: fieldMimeType, + cases: []subTest{ + {input: "application"}, + {input: "audio"}, + {input: "font"}, + {input: "image"}, + {input: "message"}, + {input: "model"}, + {input: "multipart"}, + {input: "text"}, + {input: "video"}, + {input: emptyStr}, + {input: "example/example", wantErr: errBadMimeType}, + {input: "unknown", wantErr: errBadMimeType}, + {input: "/", wantErr: errBadMimeType}, + }, + }, + } + + for _, tt := range tests { + for i, tc := range tt.cases { + name := fmt.Sprintf("[%s:%d]field='%s'|input='%s'", tt.name, i+1, tt.field, tc.input) + t.Run(name, func(t *testing.T) { + line := prepareVerifyLogLine(t, tt.field, tc.input) + + err := line.verify() + + if tc.wantErr != nil { + require.Error(t, err) + assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err) + } else { + require.NoError(t, err) + } + }) + } + } +} + +func prepareAssignLogLine(t *testing.T, field string, template logLine) logLine { + t.Helper() + if template.empty() { + return template + } + + var line logLine + line.reset() + + switch field { + default: + t.Errorf("prepareAssignLogLine unknown field: '%s'", field) + case fieldRespTime: + line.respTime = template.respTime + case fieldClientAddr: + line.clientAddr = template.clientAddr + case fieldCacheCode: + line.cacheCode = template.cacheCode + case fieldHTTPCode: + line.httpCode = template.httpCode + case fieldRespSize: + line.respSize = template.respSize + case fieldReqMethod: + line.reqMethod = template.reqMethod + case fieldHierCode: + line.hierCode = template.hierCode + case fieldMimeType: + line.mimeType = template.mimeType + case fieldServerAddr: + line.serverAddr = template.serverAddr + case fieldResultCode: + line.cacheCode = template.cacheCode + line.httpCode = template.httpCode + case fieldHierarchy: + line.hierCode = template.hierCode + line.serverAddr = template.serverAddr + } + return line +} + +func prepareVerifyLogLine(t *testing.T, field string, value string) logLine { + t.Helper() + var line logLine + line.reset() + + switch field { + default: + t.Errorf("prepareVerifyLogLine unknown field: '%s'", field) + case fieldRespTime: + v, err := strconv.Atoi(value) + require.NoError(t, err) + line.respTime = v + case fieldClientAddr: + line.clientAddr = value + case fieldCacheCode: + line.cacheCode = value + case fieldHTTPCode: + v, err := strconv.Atoi(value) + require.NoError(t, err) + line.httpCode = v + case fieldRespSize: + v, err := strconv.Atoi(value) + require.NoError(t, err) + line.respSize = v + case fieldReqMethod: + line.reqMethod = value + case fieldHierCode: + line.hierCode = value + case fieldMimeType: + line.mimeType = value + case fieldServerAddr: + line.serverAddr = value + } + return line +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/metadata.yaml b/src/go/collectors/go.d.plugin/modules/squidlog/metadata.yaml new file mode 100644 index 00000000000000..82712f9e56a547 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/metadata.yaml @@ -0,0 +1,315 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-squidlog + plugin_name: go.d.plugin + module_name: squidlog + monitored_instance: + name: Squid log files + link: https://www.lighttpd.net/ + icon_filename: squid.png + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - squid + - logs + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + his collector monitors Squid servers by parsing their access log files. + method_description: | + It automatically detects log files of Squid severs running on localhost. + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/squidlog.conf + options: + description: | + Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/). + + Squidlog is aware how to parse and interpret the following codes: + + | field | squid format code | description | + |----------------|-------------------|---------------------------------------------------------------| + | resp_time | %tr | Response time (milliseconds). | + | client_address | %>a | Client source IP address. | + | client_address | %>A | Client FQDN. | + | cache_code | %Ss | Squid request status (TCP_MISS etc). | + | http_code | %>Hs | The HTTP response status code from Content Gateway to client. | + | resp_size | %<st | Total size of reply sent to client (after adaptation). | + | req_method | %rm | Request method (GET/POST etc). | + | hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc). | + | server_address | %<a | Server IP address of the last server or peer connection. | + | server_address | %<A | Server FQDN or peer name. | + | mime_type | %mt | MIME content type. | + + In addition, to make `Squid` [native log format](https://wiki.squid-cache.org/Features/LogFormat#Squid_native_access.log_format_in_detail) csv parsable, squidlog understands these groups of codes: + + | field | squid format code | description | + |-------------|-------------------|------------------------------------| + | result_code | %Ss/%>Hs | Cache code and http code. | + | hierarchy | %Sh/%<a | Hierarchy code and server address. | + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: path + description: Path to the Squid access log file. + default_value: /var/log/squid/access.log + required: true + - name: exclude_path + description: Path to exclude. + default_value: "*.gz" + required: false + - name: parser + description: Log parser configuration. + default_value: "" + required: false + - name: parser.log_type + description: Log parser type. + default_value: auto + required: false + detailed_description: | + Weblog supports 3 different log parsers: + + | Parser type | Description | + |-------------|-------------------------------------------| + | csv | A comma-separated values | + | ltsv | [LTSV](http://ltsv.org/) | + | regexp | Regular expression with named groups | + + Syntax: + + ```yaml + parser: + log_type: csv + ``` + - name: parser.csv_config + description: CSV log parser config. + default_value: "" + required: false + - name: parser.csv_config.delimiter + description: CSV field delimiter. + default_value: space + required: false + - name: parser.csv_config.format + description: CSV log format. + default_value: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type" + required: true + detailed_description: "" + - name: parser.ltsv_config + description: LTSV log parser config. + default_value: "" + required: false + - name: parser.ltsv_config.field_delimiter + description: LTSV field delimiter. + default_value: "\\t" + required: false + - name: parser.ltsv_config.value_delimiter + description: LTSV value delimiter. + default_value: ":" + required: false + - name: parser.ltsv_config.mapping + description: LTSV fields mapping to **known fields**. + default_value: "" + required: true + detailed_description: | + The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + + > **Note**: don't use `$` and `%` prefixes for mapped field names. + + ```yaml + parser: + log_type: ltsv + ltsv_config: + mapping: + label1: field1 + label2: field2 + ``` + - name: parser.regexp_config + description: RegExp log parser config. + default_value: "" + required: false + - name: parser.regexp_config.pattern + description: RegExp pattern with named groups. + default_value: "" + required: true + detailed_description: | + Use pattern with subexpressions names. These names should be **known fields**. + + > **Note**: don't use `$` and `%` prefixes for mapped field names. + + Syntax: + + ```yaml + parser: + log_type: regexp + regexp_config: + pattern: PATTERN + ``` + examples: + folding: + title: Config + enabled: true + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: squidlog.requests + description: Total Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: squidlog.excluded_requests + description: Excluded Requests + unit: requests/s + chart_type: line + dimensions: + - name: unmatched + - name: squidlog.type_requests + description: Requests By Type + unit: requests/s + chart_type: stacked + dimensions: + - name: success + - name: bad + - name: redirect + - name: error + - name: squidlog.http_status_code_class_responses + description: Responses By HTTP Status Code Class + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: squidlog.http_status_code_responses + description: Responses By HTTP Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP response code + - name: squidlog.bandwidth + description: Bandwidth + unit: kilobits/s + chart_type: line + dimensions: + - name: sent + - name: squidlog.response_time + description: Response Time + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: squidlog.uniq_clients + description: Unique Clients + unit: clients + chart_type: line + dimensions: + - name: clients + - name: squidlog.cache_result_code_requests + description: Requests By Cache Result Code + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result code + - name: squidlog.cache_result_code_transport_tag_requests + description: Requests By Cache Result Delivery Transport Tag + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result delivery transport tag + - name: squidlog.cache_result_code_handling_tag_requests + description: Requests By Cache Result Handling Tag + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result handling tag + - name: squidlog.cache_code_object_tag_requests + description: Requests By Cache Result Produced Object Tag + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result produced object tag + - name: squidlog.cache_code_load_source_tag_requests + description: Requests By Cache Result Load Source Tag + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result load source tag + - name: squidlog.cache_code_error_tag_requests + description: Requests By Cache Result Errors Tag + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per cache result error tag + - name: squidlog.http_method_requests + description: Requests By HTTP Method + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP method + - name: squidlog.mime_type_requests + description: Requests By MIME Type + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per MIME type + - name: squidlog.hier_code_requests + description: Requests By Hierarchy Code + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per hierarchy code + - name: squidlog.server_address_forwarded_requests + description: Forwarded Requests By Server Address + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per server address diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/metrics.go b/src/go/collectors/go.d.plugin/modules/squidlog/metrics.go new file mode 100644 index 00000000000000..95bc3d8ed44953 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/metrics.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import "github.com/netdata/go.d.plugin/pkg/metrics" + +func newSummary() metrics.Summary { + return &summary{metrics.NewSummary()} +} + +type summary struct { + metrics.Summary +} + +func (s summary) WriteTo(rv map[string]int64, key string, mul, div int) { + s.Summary.WriteTo(rv, key, mul, div) + if _, ok := rv[key+"_min"]; !ok { + rv[key+"_min"] = 0 + rv[key+"_max"] = 0 + rv[key+"_avg"] = 0 + } +} + +const ( + pxHTTPCode = "http_resp_code_" + pxReqMethod = "req_method_" + pxCacheCode = "cache_result_code_" + pxTransportTag = "cache_transport_tag_" + pxHandlingTag = "cache_handling_tag_" + pxObjectTag = "cache_object_tag_" + pxSourceTag = "cache_load_source_tag_" + pxErrorTag = "cache_error_tag_" + pxHierCode = "hier_code_" + pxMimeType = "mime_type_" + pxSrvAddr = "server_address_" +) + +type metricsData struct { + Requests metrics.Counter `stm:"requests"` + Unmatched metrics.Counter `stm:"unmatched"` + + HTTPRespCode metrics.CounterVec `stm:"http_resp_code"` + HTTPResp0xx metrics.Counter `stm:"http_resp_0xx"` + HTTPResp1xx metrics.Counter `stm:"http_resp_1xx"` + HTTPResp2xx metrics.Counter `stm:"http_resp_2xx"` + HTTPResp3xx metrics.Counter `stm:"http_resp_3xx"` + HTTPResp4xx metrics.Counter `stm:"http_resp_4xx"` + HTTPResp5xx metrics.Counter `stm:"http_resp_5xx"` + HTTPResp6xx metrics.Counter `stm:"http_resp_6xx"` + + ReqSuccess metrics.Counter `stm:"req_type_success"` + ReqRedirect metrics.Counter `stm:"req_type_redirect"` + ReqBad metrics.Counter `stm:"req_type_bad"` + ReqError metrics.Counter `stm:"req_type_error"` + + BytesSent metrics.Counter `stm:"bytes_sent"` + RespTime metrics.Summary `stm:"resp_time,1000,1"` + UniqueClients metrics.UniqueCounter `stm:"uniq_clients"` + + ReqMethod metrics.CounterVec `stm:"req_method"` + CacheCode metrics.CounterVec `stm:"cache_result_code"` + CacheCodeTransportTag metrics.CounterVec `stm:"cache_transport_tag"` + CacheCodeHandlingTag metrics.CounterVec `stm:"cache_handling_tag"` + CacheCodeObjectTag metrics.CounterVec `stm:"cache_object_tag"` + CacheCodeLoadSourceTag metrics.CounterVec `stm:"cache_load_source_tag"` + CacheCodeErrorTag metrics.CounterVec `stm:"cache_error_tag"` + HierCode metrics.CounterVec `stm:"hier_code"` + MimeType metrics.CounterVec `stm:"mime_type"` + Server metrics.CounterVec `stm:"server_address"` +} + +func (m *metricsData) reset() { + m.RespTime.Reset() + m.UniqueClients.Reset() +} + +func newMetricsData() *metricsData { + return &metricsData{ + RespTime: newSummary(), + UniqueClients: metrics.NewUniqueCounter(true), + HTTPRespCode: metrics.NewCounterVec(), + ReqMethod: metrics.NewCounterVec(), + CacheCode: metrics.NewCounterVec(), + CacheCodeTransportTag: metrics.NewCounterVec(), + CacheCodeHandlingTag: metrics.NewCounterVec(), + CacheCodeObjectTag: metrics.NewCounterVec(), + CacheCodeLoadSourceTag: metrics.NewCounterVec(), + CacheCodeErrorTag: metrics.NewCounterVec(), + HierCode: metrics.NewCounterVec(), + Server: metrics.NewCounterVec(), + MimeType: metrics.NewCounterVec(), + } +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/squidlog.go b/src/go/collectors/go.d.plugin/modules/squidlog/squidlog.go new file mode 100644 index 00000000000000..704bc9627ce2ed --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/squidlog.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + _ "embed" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/logs" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("squidlog", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *SquidLog { + cfg := logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: true, + Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type", + CheckField: checkCSVFormatField, + }, + } + return &SquidLog{ + Config: Config{ + Path: "/var/log/squid/access.log", + ExcludePath: "*.gz", + Parser: cfg, + }, + } +} + +type ( + Config struct { + Parser logs.ParserConfig `yaml:",inline"` + Path string `yaml:"path"` + ExcludePath string `yaml:"exclude_path"` + } + + SquidLog struct { + module.Base + Config `yaml:",inline"` + + file *logs.Reader + parser logs.Parser + line *logLine + + mx *metricsData + charts *module.Charts + } +) + +func (s *SquidLog) Init() bool { + s.line = newEmptyLogLine() + s.mx = newMetricsData() + return true +} + +func (s *SquidLog) Check() bool { + // Note: these inits are here to make auto-detection retry working + if err := s.createLogReader(); err != nil { + s.Warning("check failed: ", err) + return false + } + + if err := s.createParser(); err != nil { + s.Warning("check failed: ", err) + return false + } + + if err := s.createCharts(s.line); err != nil { + s.Warning("check failed: ", err) + return false + } + return true +} + +func (s *SquidLog) Charts() *module.Charts { + return s.charts +} + +func (s *SquidLog) Collect() map[string]int64 { + mx, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (s *SquidLog) Cleanup() { + if s.file != nil { + _ = s.file.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/squidlog/squidlog_test.go b/src/go/collectors/go.d.plugin/modules/squidlog/squidlog_test.go new file mode 100644 index 00000000000000..c6d818bf9bea3e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/squidlog/squidlog_test.go @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package squidlog + +import ( + "bytes" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/logs" + "github.com/netdata/go.d.plugin/pkg/metrics" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + nativeFormatAccessLog, _ = os.ReadFile("testdata/access.log") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, nativeFormatAccessLog) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestSquidLog_Init(t *testing.T) { + squidlog := New() + + assert.True(t, squidlog.Init()) +} + +func TestSquidLog_Check(t *testing.T) { +} + +func TestSquidLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { + squid := New() + defer squid.Cleanup() + squid.Path = "testdata/not_exists.log" + require.True(t, squid.Init()) + + assert.False(t, squid.Check()) +} + +func TestSquid_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { + squid := New() + defer squid.Cleanup() + squid.Path = "testdata/unknown.log" + require.True(t, squid.Init()) + + assert.False(t, squid.Check()) +} + +func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) { + squid := New() + defer squid.Cleanup() + squid.Path = "testdata/access.log" + squid.Parser.CSV.Format = "$one $two" + require.True(t, squid.Init()) + + assert.False(t, squid.Check()) +} + +func TestSquidLog_Charts(t *testing.T) { + assert.Nil(t, New().Charts()) + + squid := prepareSquidCollect(t) + assert.NotNil(t, squid.Charts()) + +} + +func TestSquidLog_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestSquidLog_Collect(t *testing.T) { + squid := prepareSquidCollect(t) + + expected := map[string]int64{ + "bytes_sent": 6827357, + "cache_error_tag_ABORTED": 326, + "cache_handling_tag_CF": 154, + "cache_handling_tag_CLIENT": 172, + "cache_load_source_tag_MEM": 172, + "cache_object_tag_NEGATIVE": 308, + "cache_object_tag_STALE": 172, + "cache_result_code_NONE": 158, + "cache_result_code_TCP_CF_NEGATIVE_NEGATIVE_ABORTED": 154, + "cache_result_code_UDP_CLIENT_STALE_MEM_ABORTED": 172, + "cache_transport_tag_NONE": 158, + "cache_transport_tag_TCP": 154, + "cache_transport_tag_UDP": 172, + "hier_code_HIER_CACHE_DIGEST_HIT": 128, + "hier_code_HIER_NO_CACHE_DIGEST_DIRECT": 130, + "hier_code_HIER_PARENT_HIT": 106, + "hier_code_HIER_SINGLE_PARENT": 120, + "http_resp_0xx": 51, + "http_resp_1xx": 45, + "http_resp_2xx": 62, + "http_resp_3xx": 120, + "http_resp_4xx": 112, + "http_resp_5xx": 46, + "http_resp_6xx": 48, + "http_resp_code_0": 51, + "http_resp_code_100": 45, + "http_resp_code_200": 62, + "http_resp_code_300": 58, + "http_resp_code_304": 62, + "http_resp_code_400": 56, + "http_resp_code_401": 56, + "http_resp_code_500": 46, + "http_resp_code_603": 48, + "mime_type_application": 52, + "mime_type_audio": 56, + "mime_type_font": 44, + "mime_type_image": 50, + "mime_type_message": 44, + "mime_type_model": 62, + "mime_type_multipart": 61, + "mime_type_text": 61, + "mime_type_video": 54, + "req_method_COPY": 84, + "req_method_GET": 70, + "req_method_HEAD": 59, + "req_method_OPTIONS": 99, + "req_method_POST": 74, + "req_method_PURGE": 98, + "req_type_bad": 56, + "req_type_error": 94, + "req_type_redirect": 58, + "req_type_success": 276, + "requests": 500, + "resp_time_avg": 3015931, + "resp_time_count": 484, + "resp_time_max": 4988000, + "resp_time_min": 1002000, + "resp_time_sum": 1459711000, + "server_address_2001:db8:2ce:a": 79, + "server_address_2001:db8:2ce:b": 89, + "server_address_203.0.113.100": 67, + "server_address_203.0.113.200": 70, + "server_address_content-gateway": 87, + "uniq_clients": 5, + "unmatched": 16, + } + + collected := squid.Collect() + + assert.Equal(t, expected, collected) + testCharts(t, squid, collected) +} + +func TestSquidLog_Collect_ReturnOldDataIfNothingRead(t *testing.T) { + squid := prepareSquidCollect(t) + + expected := map[string]int64{ + "bytes_sent": 6827357, + "cache_error_tag_ABORTED": 326, + "cache_handling_tag_CF": 154, + "cache_handling_tag_CLIENT": 172, + "cache_load_source_tag_MEM": 172, + "cache_object_tag_NEGATIVE": 308, + "cache_object_tag_STALE": 172, + "cache_result_code_NONE": 158, + "cache_result_code_TCP_CF_NEGATIVE_NEGATIVE_ABORTED": 154, + "cache_result_code_UDP_CLIENT_STALE_MEM_ABORTED": 172, + "cache_transport_tag_NONE": 158, + "cache_transport_tag_TCP": 154, + "cache_transport_tag_UDP": 172, + "hier_code_HIER_CACHE_DIGEST_HIT": 128, + "hier_code_HIER_NO_CACHE_DIGEST_DIRECT": 130, + "hier_code_HIER_PARENT_HIT": 106, + "hier_code_HIER_SINGLE_PARENT": 120, + "http_resp_0xx": 51, + "http_resp_1xx": 45, + "http_resp_2xx": 62, + "http_resp_3xx": 120, + "http_resp_4xx": 112, + "http_resp_5xx": 46, + "http_resp_6xx": 48, + "http_resp_code_0": 51, + "http_resp_code_100": 45, + "http_resp_code_200": 62, + "http_resp_code_300": 58, + "http_resp_code_304": 62, + "http_resp_code_400": 56, + "http_resp_code_401": 56, + "http_resp_code_500": 46, + "http_resp_code_603": 48, + "mime_type_application": 52, + "mime_type_audio": 56, + "mime_type_font": 44, + "mime_type_image": 50, + "mime_type_message": 44, + "mime_type_model": 62, + "mime_type_multipart": 61, + "mime_type_text": 61, + "mime_type_video": 54, + "req_method_COPY": 84, + "req_method_GET": 70, + "req_method_HEAD": 59, + "req_method_OPTIONS": 99, + "req_method_POST": 74, + "req_method_PURGE": 98, + "req_type_bad": 56, + "req_type_error": 94, + "req_type_redirect": 58, + "req_type_success": 276, + "requests": 500, + "resp_time_avg": 0, + "resp_time_count": 0, + "resp_time_max": 0, + "resp_time_min": 0, + "resp_time_sum": 0, + "server_address_2001:db8:2ce:a": 79, + "server_address_2001:db8:2ce:b": 89, + "server_address_203.0.113.100": 67, + "server_address_203.0.113.200": 70, + "server_address_content-gateway": 87, + "uniq_clients": 0, + "unmatched": 16, + } + + _ = squid.Collect() + collected := squid.Collect() + + assert.Equal(t, expected, collected) + testCharts(t, squid, collected) +} + +func testCharts(t *testing.T, squidlog *SquidLog, collected map[string]int64) { + t.Helper() + ensureChartsDynamicDimsCreated(t, squidlog) + ensureCollectedHasAllChartsDimsVarsIDs(t, squidlog, collected) +} + +func ensureChartsDynamicDimsCreated(t *testing.T, squid *SquidLog) { + ensureDynamicDimsCreated(t, squid, cacheCodeChart.ID, pxCacheCode, squid.mx.CacheCode) + ensureDynamicDimsCreated(t, squid, cacheCodeTransportTagChart.ID, pxTransportTag, squid.mx.CacheCodeTransportTag) + ensureDynamicDimsCreated(t, squid, cacheCodeHandlingTagChart.ID, pxHandlingTag, squid.mx.CacheCodeHandlingTag) + ensureDynamicDimsCreated(t, squid, cacheCodeObjectTagChart.ID, pxObjectTag, squid.mx.CacheCodeObjectTag) + ensureDynamicDimsCreated(t, squid, cacheCodeLoadSourceTagChart.ID, pxSourceTag, squid.mx.CacheCodeLoadSourceTag) + ensureDynamicDimsCreated(t, squid, cacheCodeErrorTagChart.ID, pxErrorTag, squid.mx.CacheCodeErrorTag) + ensureDynamicDimsCreated(t, squid, httpRespCodesChart.ID, pxHTTPCode, squid.mx.HTTPRespCode) + ensureDynamicDimsCreated(t, squid, reqMethodChart.ID, pxReqMethod, squid.mx.ReqMethod) + ensureDynamicDimsCreated(t, squid, hierCodeChart.ID, pxHierCode, squid.mx.HierCode) + ensureDynamicDimsCreated(t, squid, serverAddrChart.ID, pxSrvAddr, squid.mx.Server) + ensureDynamicDimsCreated(t, squid, mimeTypeChart.ID, pxMimeType, squid.mx.MimeType) +} + +func ensureDynamicDimsCreated(t *testing.T, squid *SquidLog, chartID, dimPrefix string, data metrics.CounterVec) { + chart := squid.Charts().Get(chartID) + assert.NotNilf(t, chart, "chart '%s' is not created", chartID) + if chart == nil { + return + } + for v := range data { + id := dimPrefix + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s', expected '%s'", chart.ID, v, id) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, s *SquidLog, collected map[string]int64) { + for _, chart := range *s.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareSquidCollect(t *testing.T) *SquidLog { + t.Helper() + squid := New() + squid.Path = "testdata/access.log" + require.True(t, squid.Init()) + require.True(t, squid.Check()) + defer squid.Cleanup() + + p, err := logs.NewCSVParser(squid.Parser.CSV, bytes.NewReader(nativeFormatAccessLog)) + require.NoError(t, err) + squid.parser = p + return squid +} + +// generateLogs is used to populate 'testdata/access.log' +//func generateLogs(w io.Writer, num int) error { +// var ( +// client = []string{"localhost", "203.0.113.1", "203.0.113.2", "2001:db8:2ce:1", "2001:db8:2ce:2"} +// cacheCode = []string{"TCP_CF_NEGATIVE_NEGATIVE_ABORTED", "UDP_CLIENT_STALE_MEM_ABORTED", "NONE"} +// httpCode = []string{"000", "100", "200", "300", "304", "400", "401", "500", "603"} +// method = []string{"GET", "HEAD", "POST", "COPY", "PURGE", "OPTIONS"} +// hierCode = []string{"HIER_PARENT_HIT", "HIER_SINGLE_PARENT", "HIER_CACHE_DIGEST_HIT", "HIER_NO_CACHE_DIGEST_DIRECT"} +// server = []string{"content-gateway", "203.0.113.100", "203.0.113.200", "2001:db8:2ce:a", "2001:db8:2ce:b", "-"} +// mimeType = []string{"application", "audio", "font", "image", "message", "model", "multipart", "video", "text"} +// ) +// +// r := rand.New(rand.NewSource(time.Now().UnixNano())) +// randFromString := func(s []string) string { return s[r.Intn(len(s))] } +// randInt := func(min, max int) int { return r.Intn(max-min) + min } +// +// var line string +// for i := 0; i < num; i++ { +// unmatched := randInt(1, 100) > 95 +// if i > 0 && unmatched { +// line = "Unmatched! The rat the cat the dog chased killed ate the malt!\n" +// } else { +// // 1576177221.686 0 ::1 TCP_MISS/200 1621 GET cache_object://localhost/counters - HIER_NONE/- text/plain +// line = fmt.Sprintf( +// "1576177221.686 %d %s %s/%s %d %s cache_object://localhost/counters - %s/%s %s/plain\n", +// randInt(1000, 5000), +// randFromString(client), +// randFromString(cacheCode), +// randFromString(httpCode), +// randInt(9000, 19000), +// randFromString(method), +// randFromString(hierCode), +// randFromString(server), +// randFromString(mimeType), +// ) +// } +// _, err := fmt.Fprint(w, line) +// if err != nil { +// return err +// } +// } +// return nil +//} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/README.md b/src/go/collectors/go.d.plugin/modules/supervisord/README.md new file mode 120000 index 00000000000000..a8b7434849992a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/README.md @@ -0,0 +1 @@ +integrations/supervisor.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/charts.go b/src/go/collectors/go.d.plugin/modules/supervisord/charts.go new file mode 100644 index 00000000000000..2f09677fe3b7ea --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/charts.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + summaryChartsPriority = module.Priority + groupChartsPriority = summaryChartsPriority + 20 +) + +var summaryCharts = module.Charts{ + { + ID: "processes", + Title: "Processes", + Units: "processes", + Fam: "summary", + Ctx: "supervisord.summary_processes", + Type: module.Stacked, + Priority: summaryChartsPriority, + Dims: module.Dims{ + {ID: "running_processes", Name: "running"}, + {ID: "non_running_processes", Name: "non-running"}, + }, + }, +} + +var ( + groupChartsTmpl = module.Charts{ + groupProcessesChartTmpl.Copy(), + groupProcessesStateCodeChartTmpl.Copy(), + groupProcessesExitStatusChartTmpl.Copy(), + groupProcessesUptimeChartTmpl.Copy(), + groupProcessesDowntimeChartTmpl.Copy(), + } + + groupProcessesChartTmpl = module.Chart{ + ID: "group_%s_processes", + Title: "Processes", + Units: "processes", + Fam: "group %s", + Ctx: "supervisord.processes", + Type: module.Stacked, + Dims: module.Dims{ + {ID: "group_%s_running_processes", Name: "running"}, + {ID: "group_%s_non_running_processes", Name: "non-running"}, + }, + } + groupProcessesStateCodeChartTmpl = module.Chart{ + ID: "group_%s_processes_state_code", + Title: "State code", + Units: "code", + Fam: "group %s", + Ctx: "supervisord.process_state_code", + } + groupProcessesExitStatusChartTmpl = module.Chart{ + ID: "group_%s_processes_exit_status", + Title: "Exit status", + Units: "status", + Fam: "group %s", + Ctx: "supervisord.process_exit_status", + } + groupProcessesUptimeChartTmpl = module.Chart{ + ID: "group_%s_processes_uptime", + Title: "Uptime", + Units: "seconds", + Fam: "group %s", + Ctx: "supervisord.process_uptime", + } + groupProcessesDowntimeChartTmpl = module.Chart{ + ID: "group_%s_processes_downtime", + Title: "Downtime", + Units: "seconds", + Fam: "group %s", + Ctx: "supervisord.process_downtime", + } +) + +func newProcGroupCharts(group string) *module.Charts { + charts := groupChartsTmpl.Copy() + for i, c := range *charts { + c.ID = fmt.Sprintf(c.ID, group) + c.Fam = fmt.Sprintf(c.Fam, group) + c.Priority = groupChartsPriority + i + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, group) + } + } + return charts +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/client.go b/src/go/collectors/go.d.plugin/modules/supervisord/client.go new file mode 100644 index 00000000000000..da62ca21c8976e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/client.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "github.com/mattn/go-xmlrpc" +) + +type supervisorRPCClient struct { + client *xmlrpc.Client +} + +func newSupervisorRPCClient(serverURL *url.URL, httpClient *http.Client) (supervisorClient, error) { + switch serverURL.Scheme { + case "http", "https": + c := xmlrpc.NewClient(serverURL.String()) + c.HttpClient = httpClient + return &supervisorRPCClient{client: c}, nil + case "unix": + c := xmlrpc.NewClient("http://unix/RPC2") + t, ok := httpClient.Transport.(*http.Transport) + if !ok { + return nil, errors.New("unexpected HTTP client transport") + } + t.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { + d := net.Dialer{Timeout: httpClient.Timeout} + return d.DialContext(ctx, "unix", serverURL.Path) + } + c.HttpClient = httpClient + return &supervisorRPCClient{client: c}, nil + default: + return nil, fmt.Errorf("unexpected URL scheme: %s", serverURL) + } +} + +// http://supervisord.org/api.html#process-control +type processStatus struct { + name string // name of the process. + group string // name of the process’ group. + start int // UNIX timestamp of when the process was started. + stop int // UNIX timestamp of when the process last ended, or 0 if the process has never been stopped. + now int // UNIX timestamp of the current time, which can be used to calculate process up-time. + state int // state code. + stateName string // string description of state. + exitStatus int // exit status (errorlevel) of process, or 0 if the process is still running. +} + +func (c *supervisorRPCClient) getAllProcessInfo() ([]processStatus, error) { + const fn = "supervisor.getAllProcessInfo" + resp, err := c.client.Call(fn) + if err != nil { + return nil, fmt.Errorf("error on '%s' function call: %v", fn, err) + } + return parseGetAllProcessInfo(resp) +} + +func (c *supervisorRPCClient) closeIdleConnections() { + c.client.HttpClient.CloseIdleConnections() +} + +func parseGetAllProcessInfo(resp interface{}) ([]processStatus, error) { + arr, ok := resp.(xmlrpc.Array) + if !ok { + return nil, fmt.Errorf("unexpected response type, want=xmlrpc.Array, got=%T", resp) + } + + var info []processStatus + + for _, item := range arr { + s, ok := item.(xmlrpc.Struct) + if !ok { + continue + } + + var p processStatus + for k, v := range s { + switch strings.ToLower(k) { + case "name": + p.name, _ = v.(string) + case "group": + p.group, _ = v.(string) + case "start": + p.start, _ = v.(int) + case "stop": + p.stop, _ = v.(int) + case "now": + p.now, _ = v.(int) + case "state": + p.state, _ = v.(int) + case "statename": + p.stateName, _ = v.(string) + case "exitstatus": + p.exitStatus, _ = v.(int) + } + } + if p.name != "" && p.group != "" && p.stateName != "" { + info = append(info, p) + } + } + return info, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/collect.go b/src/go/collectors/go.d.plugin/modules/supervisord/collect.go new file mode 100644 index 00000000000000..64feea13d896df --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/collect.go @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (s *Supervisord) collect() (map[string]int64, error) { + info, err := s.client.getAllProcessInfo() + if err != nil { + return nil, err + } + + ms := make(map[string]int64) + s.collectAllProcessInfo(ms, info) + + return ms, nil +} + +func (s *Supervisord) collectAllProcessInfo(ms map[string]int64, info []processStatus) { + s.resetCache() + ms["running_processes"] = 0 + ms["non_running_processes"] = 0 + for _, p := range info { + if _, ok := s.cache[p.group]; !ok { + s.cache[p.group] = make(map[string]bool) + s.addProcessGroupCharts(p) + } + if _, ok := s.cache[p.group][p.name]; !ok { + s.addProcessToCharts(p) + } + s.cache[p.group][p.name] = true + + ms["group_"+p.group+"_running_processes"] += 0 + ms["group_"+p.group+"_non_running_processes"] += 0 + if isProcRunning(p) { + ms["running_processes"] += 1 + ms["group_"+p.group+"_running_processes"] += 1 + } else { + ms["non_running_processes"] += 1 + ms["group_"+p.group+"_non_running_processes"] += 1 + } + id := procID(p) + ms[id+"_state_code"] = int64(p.state) + ms[id+"_exit_status"] = int64(p.exitStatus) + ms[id+"_uptime"] = calcProcessUptime(p) + ms[id+"_downtime"] = calcProcessDowntime(p) + } + s.cleanupCache() +} + +func (s *Supervisord) resetCache() { + for _, procs := range s.cache { + for name := range procs { + procs[name] = false + } + } +} + +func (s *Supervisord) cleanupCache() { + for group, procs := range s.cache { + for name, ok := range procs { + if !ok { + s.removeProcessFromCharts(group, name) + delete(s.cache[group], name) + } + } + if len(s.cache[group]) == 0 { + s.removeProcessGroupCharts(group) + delete(s.cache, group) + } + } +} + +func calcProcessUptime(p processStatus) int64 { + if !isProcRunning(p) { + return 0 + } + return int64(p.now - p.start) +} + +func calcProcessDowntime(p processStatus) int64 { + if isProcRunning(p) || p.stop == 0 { + return 0 + } + return int64(p.now - p.stop) +} + +func (s *Supervisord) addProcessGroupCharts(p processStatus) { + charts := newProcGroupCharts(p.group) + if err := s.Charts().Add(*charts...); err != nil { + s.Warning(err) + } +} + +func (s *Supervisord) addProcessToCharts(p processStatus) { + id := procID(p) + for _, c := range *s.Charts() { + var dimID string + switch c.ID { + case fmt.Sprintf(groupProcessesStateCodeChartTmpl.ID, p.group): + dimID = id + "_state_code" + case fmt.Sprintf(groupProcessesExitStatusChartTmpl.ID, p.group): + dimID = id + "_exit_status" + case fmt.Sprintf(groupProcessesUptimeChartTmpl.ID, p.group): + dimID = id + "_uptime" + case fmt.Sprintf(groupProcessesDowntimeChartTmpl.ID, p.group): + dimID = id + "_downtime" + default: + continue + } + dim := &module.Dim{ID: dimID, Name: p.name} + if err := c.AddDim(dim); err != nil { + s.Warning(err) + return + } + c.MarkNotCreated() + } +} + +func (s *Supervisord) removeProcessGroupCharts(group string) { + prefix := "group_" + group + for _, c := range *s.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +func (s *Supervisord) removeProcessFromCharts(group, name string) { + id := procID(processStatus{name: name, group: group}) + for _, c := range *s.Charts() { + var dimID string + switch c.ID { + case fmt.Sprintf(groupProcessesStateCodeChartTmpl.ID, group): + dimID = id + "_state_code" + case fmt.Sprintf(groupProcessesExitStatusChartTmpl.ID, group): + dimID = id + "_exit_status" + case fmt.Sprintf(groupProcessesUptimeChartTmpl.ID, group): + dimID = id + "_uptime" + case fmt.Sprintf(groupProcessesDowntimeChartTmpl.ID, group): + dimID = id + "_downtime" + default: + continue + } + if err := c.MarkDimRemove(dimID, true); err != nil { + s.Warning(err) + return + } + c.MarkNotCreated() + } +} + +func procID(p processStatus) string { + return fmt.Sprintf("group_%s_process_%s", p.group, p.name) +} + +func isProcRunning(p processStatus) bool { + // http://supervisord.org/subprocess.html#process-states + // STOPPED (0) + // STARTING (10) + // RUNNING (20) + // BACKOFF (30) + // STOPPING (40) + // EXITED (100) + // FATAL (200) + // UNKNOWN (1000) + return p.state == 20 +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/config_schema.json b/src/go/collectors/go.d.plugin/modules/supervisord/config_schema.json new file mode 100644 index 00000000000000..d3617c94a7a544 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/config_schema.json @@ -0,0 +1,21 @@ +{ + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Supervisord collector job configuration", + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The person's first name." + }, + "lastName": { + "type": "string", + "description": "The person's last name." + }, + "age": { + "description": "Age in years which must be equal to or greater than zero.", + "type": "integer", + "minimum": 0 + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/init.go b/src/go/collectors/go.d.plugin/modules/supervisord/init.go new file mode 100644 index 00000000000000..0c5285c3b406b6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/init.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + "errors" + "fmt" + "net/url" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (s Supervisord) verifyConfig() error { + if s.URL == "" { + return errors.New("'url' not set") + } + return nil +} + +func (s Supervisord) initSupervisorClient() (supervisorClient, error) { + u, err := url.Parse(s.URL) + if err != nil { + return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL) + } + httpClient, err := web.NewHTTPClient(s.Client) + if err != nil { + return nil, fmt.Errorf("create HTTP client: %v", err) + } + return newSupervisorRPCClient(u, httpClient) +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/integrations/supervisor.md b/src/go/collectors/go.d.plugin/modules/supervisord/integrations/supervisor.md new file mode 100644 index 00000000000000..c5fff79b7e4801 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/integrations/supervisor.md @@ -0,0 +1,214 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/supervisord/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/supervisord/metadata.yaml" +sidebar_label: "Supervisor" +learn_status: "Published" +learn_rel_path: "Data Collection/Processes and System Services" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Supervisor + + +<img src="https://netdata.cloud/img/supervisord.png" width="150"/> + + +Plugin: go.d.plugin +Module: supervisord + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Supervisor instances. + +It can collect metrics from: + +- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values) +- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings) + +Used methods: + +- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo) + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Supervisor instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| supervisord.summary_processes | running, non-running | processes | + +### Per process group + +These metrics refer to the process group. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| supervisord.processes | running, non-running | processes | +| supervisord.process_state_code | a dimension per process | code | +| supervisord.process_exit_status | a dimension per process | exit status | +| supervisord.process_uptime | a dimension per process | seconds | +| supervisord.process_downtime | a dimension per process | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/supervisord.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/supervisord.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes | +| timeout | System bus requests timeout. | 1 | no | + +</details> + +#### Examples + +##### HTTP + +Collect metrics via HTTP. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: 'http://127.0.0.1:9001/RPC2' + +``` +</details> + +##### Socket + +Collect metrics via Unix socket. + +<details><summary>Config</summary> + +```yaml +- name: local + url: 'unix:///run/supervisor.sock' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collect metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: 'http://127.0.0.1:9001/RPC2' + + - name: remote + url: 'http://192.0.2.1:9001/RPC2' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m supervisord + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/metadata.yaml b/src/go/collectors/go.d.plugin/modules/supervisord/metadata.yaml new file mode 100644 index 00000000000000..b5c81dd0442843 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/metadata.yaml @@ -0,0 +1,161 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-supervisord + plugin_name: go.d.plugin + module_name: supervisord + monitored_instance: + name: Supervisor + link: http://supervisord.org/ + icon_filename: supervisord.png + categories: + - data-collection.processes-and-system-services + keywords: + - supervisor + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Supervisor instances. + + It can collect metrics from: + + - [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values) + - [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings) + + Used methods: + + - [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo) + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/supervisord.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:9001/RPC2 + required: true + - name: timeout + description: System bus requests timeout. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: HTTP + description: Collect metrics via HTTP. + config: | + jobs: + - name: local + url: 'http://127.0.0.1:9001/RPC2' + - name: Socket + description: Collect metrics via Unix socket. + config: | + - name: local + url: 'unix:///run/supervisor.sock' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collect metrics from local and remote instances. + config: | + jobs: + - name: local + url: 'http://127.0.0.1:9001/RPC2' + + - name: remote + url: 'http://192.0.2.1:9001/RPC2' + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: supervisord.summary_processes + description: Processes + unit: processes + chart_type: stacked + dimensions: + - name: running + - name: non-running + - name: process group + description: These metrics refer to the process group. + labels: [] + metrics: + - name: supervisord.processes + description: Processes + unit: processes + chart_type: stacked + dimensions: + - name: running + - name: non-running + - name: supervisord.process_state_code + description: State code + unit: code + chart_type: line + dimensions: + - name: a dimension per process + - name: supervisord.process_exit_status + description: Exit status + unit: exit status + chart_type: line + dimensions: + - name: a dimension per process + - name: supervisord.process_uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: a dimension per process + - name: supervisord.process_downtime + description: Downtime + unit: seconds + chart_type: line + dimensions: + - name: a dimension per process diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/supervisord.go b/src/go/collectors/go.d.plugin/modules/supervisord/supervisord.go new file mode 100644 index 00000000000000..1c99947109c530 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/supervisord.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("supervisord", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Supervisord { + return &Supervisord{ + Config: Config{ + URL: "http://127.0.0.1:9001/RPC2", + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + + charts: summaryCharts.Copy(), + cache: make(map[string]map[string]bool), + } +} + +type Config struct { + URL string `yaml:"url"` + web.Client `yaml:",inline"` +} + +type ( + Supervisord struct { + module.Base + Config `yaml:",inline"` + + client supervisorClient + charts *module.Charts + + cache map[string]map[string]bool // map[group][procName]collected + } + supervisorClient interface { + getAllProcessInfo() ([]processStatus, error) + closeIdleConnections() + } +) + +func (s *Supervisord) Init() bool { + err := s.verifyConfig() + if err != nil { + s.Errorf("verify config: %v", err) + return false + } + + client, err := s.initSupervisorClient() + if err != nil { + s.Errorf("init supervisord client: %v", err) + return false + } + s.client = client + + return true +} + +func (s *Supervisord) Check() bool { + return len(s.Collect()) > 0 +} + +func (s *Supervisord) Charts() *module.Charts { + return s.charts +} + +func (s *Supervisord) Collect() map[string]int64 { + ms, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (s *Supervisord) Cleanup() { + if s.client != nil { + s.client.closeIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/supervisord/supervisord_test.go b/src/go/collectors/go.d.plugin/modules/supervisord/supervisord_test.go new file mode 100644 index 00000000000000..23ef1ff0c64bb6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/supervisord/supervisord_test.go @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package supervisord + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.IsType(t, (*Supervisord)(nil), New()) +} + +func TestSupervisord_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'url'": { + wantFail: true, + config: Config{URL: ""}, + }, + "fails on unexpected 'url' scheme": { + wantFail: true, + config: Config{URL: "tcp://127.0.0.1:9001/RPC2"}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + supvr := New() + supvr.Config = test.config + + if test.wantFail { + assert.False(t, supvr.Init()) + } else { + assert.True(t, supvr.Init()) + } + }) + } +} + +func TestSupervisord_Check(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Supervisord + wantFail bool + }{ + "success on valid response": { + prepare: prepareSupervisordSuccessOnGetAllProcessInfo, + }, + "success on zero processes response": { + prepare: prepareSupervisordZeroProcessesOnGetAllProcessInfo, + }, + "fails on error": { + wantFail: true, + prepare: prepareSupervisordErrorOnGetAllProcessInfo, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + supvr := test.prepare(t) + defer supvr.Cleanup() + + if test.wantFail { + assert.False(t, supvr.Check()) + } else { + assert.True(t, supvr.Check()) + } + }) + } +} + +func TestSupervisord_Charts(t *testing.T) { + supvr := New() + require.True(t, supvr.Init()) + + assert.NotNil(t, supvr.Charts()) +} + +func TestSupervisord_Cleanup(t *testing.T) { + supvr := New() + assert.NotPanics(t, supvr.Cleanup) + + require.True(t, supvr.Init()) + m := &mockSupervisorClient{} + supvr.client = m + + supvr.Cleanup() + + assert.True(t, m.calledCloseIdleConnections) +} + +func TestSupervisord_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) *Supervisord + wantCollected map[string]int64 + }{ + "success on valid response": { + prepare: prepareSupervisordSuccessOnGetAllProcessInfo, + wantCollected: map[string]int64{ + "group_proc1_non_running_processes": 1, + "group_proc1_process_00_downtime": 16276, + "group_proc1_process_00_exit_status": 0, + "group_proc1_process_00_state_code": 200, + "group_proc1_process_00_uptime": 0, + "group_proc1_running_processes": 0, + "group_proc2_non_running_processes": 0, + "group_proc2_process_00_downtime": 0, + "group_proc2_process_00_exit_status": 0, + "group_proc2_process_00_state_code": 20, + "group_proc2_process_00_uptime": 2, + "group_proc2_process_01_downtime": 0, + "group_proc2_process_01_exit_status": 0, + "group_proc2_process_01_state_code": 20, + "group_proc2_process_01_uptime": 2, + "group_proc2_process_02_downtime": 0, + "group_proc2_process_02_exit_status": 0, + "group_proc2_process_02_state_code": 20, + "group_proc2_process_02_uptime": 8, + "group_proc2_running_processes": 3, + "group_proc3_non_running_processes": 0, + "group_proc3_process_00_downtime": 0, + "group_proc3_process_00_exit_status": 0, + "group_proc3_process_00_state_code": 20, + "group_proc3_process_00_uptime": 16291, + "group_proc3_running_processes": 1, + "non_running_processes": 1, + "running_processes": 4, + }, + }, + "success on response with zero processes": { + prepare: prepareSupervisordZeroProcessesOnGetAllProcessInfo, + wantCollected: map[string]int64{ + "non_running_processes": 0, + "running_processes": 0, + }, + }, + "fails on error on getAllProcessesInfo": { + prepare: prepareSupervisordErrorOnGetAllProcessInfo, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + supvr := test.prepare(t) + defer supvr.Cleanup() + + ms := supvr.Collect() + assert.Equal(t, test.wantCollected, ms) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, supvr, ms) + ensureCollectedProcessesAddedToCharts(t, supvr) + } + }) + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, supvr *Supervisord, ms map[string]int64) { + for _, chart := range *supvr.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} + +func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) { + for group := range supvr.cache { + for _, c := range *newProcGroupCharts(group) { + assert.NotNilf(t, supvr.Charts().Get(c.ID), "'%s' chart is not in charts", c.ID) + } + } +} + +func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord { + supvr := New() + require.True(t, supvr.Init()) + supvr.client = &mockSupervisorClient{} + return supvr +} + +func prepareSupervisordZeroProcessesOnGetAllProcessInfo(t *testing.T) *Supervisord { + supvr := New() + require.True(t, supvr.Init()) + supvr.client = &mockSupervisorClient{returnZeroProcesses: true} + return supvr +} + +func prepareSupervisordErrorOnGetAllProcessInfo(t *testing.T) *Supervisord { + supvr := New() + require.True(t, supvr.Init()) + supvr.client = &mockSupervisorClient{errOnGetAllProcessInfo: true} + return supvr +} + +type mockSupervisorClient struct { + errOnGetAllProcessInfo bool + returnZeroProcesses bool + calledCloseIdleConnections bool +} + +func (m mockSupervisorClient) getAllProcessInfo() ([]processStatus, error) { + if m.errOnGetAllProcessInfo { + return nil, errors.New("mock errOnGetAllProcessInfo") + } + if m.returnZeroProcesses { + return nil, nil + } + info := []processStatus{ + { + name: "00", group: "proc1", + start: 1613374760, stop: 1613374762, now: 1613391038, + state: 200, stateName: "FATAL", + exitStatus: 0, + }, + { + name: "00", group: "proc2", + start: 1613391036, stop: 1613391036, now: 1613391038, + state: 20, stateName: "RUNNING", + exitStatus: 0, + }, + { + name: "01", group: "proc2", + start: 1613391036, stop: 1613391036, now: 1613391038, + state: 20, stateName: "RUNNING", + exitStatus: 0, + }, + { + name: "02", group: "proc2", + start: 1613391030, stop: 1613391029, now: 1613391038, + state: 20, stateName: "RUNNING", + exitStatus: 0, + }, + { + name: "00", group: "proc3", + start: 1613374747, stop: 0, now: 1613391038, + state: 20, stateName: "RUNNING", + exitStatus: 0, + }, + } + return info, nil +} + +func (m *mockSupervisorClient) closeIdleConnections() { + m.calledCloseIdleConnections = true +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/README.md b/src/go/collectors/go.d.plugin/modules/systemdunits/README.md new file mode 120000 index 00000000000000..68dd433bf23a49 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/README.md @@ -0,0 +1 @@ +integrations/systemd_units.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/charts.go b/src/go/collectors/go.d.plugin/modules/systemdunits/charts.go new file mode 100644 index 00000000000000..23a1bf75e74ae8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/charts.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +const ( + prioServiceUnitState = module.Priority + iota + prioSocketUnitState + prioTargetUnitState + prioPathUnitState + prioDeviceUnitState + prioMountUnitState + prioAutomountUnitState + prioSwapUnitState + prioTimerUnitState + prioScopeUnitState + prioSliceUnitState +) + +var prioMap = map[string]int{ + unitTypeService: prioServiceUnitState, + unitTypeSocket: prioSocketUnitState, + unitTypeTarget: prioTargetUnitState, + unitTypePath: prioPathUnitState, + unitTypeDevice: prioDeviceUnitState, + unitTypeMount: prioMountUnitState, + unitTypeAutomount: prioAutomountUnitState, + unitTypeSwap: prioSwapUnitState, + unitTypeTimer: prioTimerUnitState, + unitTypeScope: prioScopeUnitState, + unitTypeSlice: prioSliceUnitState, +} + +func newTypedUnitStateChartTmpl(name, typ string) *module.Chart { + chart := module.Chart{ + ID: fmt.Sprintf("unit_%s_%s_state", name, typ), + Title: fmt.Sprintf("%s Unit State", cases.Title(language.English, cases.Compact).String(typ)), + Units: "state", + Fam: fmt.Sprintf("%s units", typ), + Ctx: fmt.Sprintf("systemd.%s_unit_state", typ), + Priority: prioMap[typ], + Labels: []module.Label{ + {Key: "unit_name", Value: name}, + }, + Dims: module.Dims{ + {Name: unitStateActive}, + {Name: unitStateInactive}, + {Name: unitStateActivating}, + {Name: unitStateDeactivating}, + {Name: unitStateFailed}, + }, + } + for _, d := range chart.Dims { + d.ID = fmt.Sprintf("unit_%s_%s_state_%s", name, typ, d.Name) + } + return &chart +} + +func (s *SystemdUnits) addUnitToCharts(name, typ string) { + chart := newTypedUnitStateChartTmpl(name, typ) + + if err := s.Charts().Add(chart); err != nil { + s.Warning(err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/client.go b/src/go/collectors/go.d.plugin/modules/systemdunits/client.go new file mode 100644 index 00000000000000..a2787c4ec10655 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/client.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + "context" + + "github.com/coreos/go-systemd/v22/dbus" +) + +type systemdClient interface { + connect() (systemdConnection, error) +} +type systemdConnection interface { + Close() + GetManagerProperty(string) (string, error) + ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error) + ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]dbus.UnitStatus, error) +} + +type systemdDBusClient struct{} + +func (systemdDBusClient) connect() (systemdConnection, error) { + return dbus.NewWithContext(context.Background()) +} + +func newSystemdDBusClient() *systemdDBusClient { + return &systemdDBusClient{} +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/collect.go b/src/go/collectors/go.d.plugin/modules/systemdunits/collect.go new file mode 100644 index 00000000000000..2843a42302f40a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/collect.go @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/coreos/go-systemd/v22/dbus" +) + +const ( + // https://www.freedesktop.org/software/systemd/man/systemd.html + unitStateActive = "active" + unitStateInactive = "inactive" + unitStateActivating = "activating" + unitStateDeactivating = "deactivating" + unitStateFailed = "failed" + + // https://www.freedesktop.org/software/systemd/man/systemd.html + unitTypeService = "service" + unitTypeSocket = "socket" + unitTypeTarget = "target" + unitTypePath = "path" + unitTypeDevice = "device" + unitTypeMount = "mount" + unitTypeAutomount = "automount" + unitTypeSwap = "swap" + unitTypeTimer = "timer" + unitTypeScope = "scope" + unitTypeSlice = "slice" +) + +var ( + unitStates = []string{ + unitStateActive, + unitStateActivating, + unitStateFailed, + unitStateInactive, + unitStateDeactivating, + } +) + +func (s *SystemdUnits) collect() (map[string]int64, error) { + conn, err := s.getConnection() + if err != nil { + return nil, err + } + + if s.systemdVersion == 0 { + ver, err := s.getSystemdVersion(conn) + if err != nil { + s.closeConnection() + return nil, err + } + s.systemdVersion = ver + } + + var units []dbus.UnitStatus + if s.systemdVersion >= 230 { + // https://github.com/systemd/systemd/pull/3142 + units, err = s.getLoadedUnitsByPatterns(conn) + } else { + units, err = s.getLoadedUnits(conn) + } + if err != nil { + s.closeConnection() + return nil, err + } + + if len(units) == 0 { + return nil, nil + } + + mx := make(map[string]int64) + s.collectUnitsStates(mx, units) + + return mx, nil +} + +func (s *SystemdUnits) collectUnitsStates(mx map[string]int64, units []dbus.UnitStatus) { + for _, unit := range units { + name, typ := extractUnitNameType(cleanUnitName(unit.Name)) + if name == "" || typ == "" { + continue + } + + if !s.units[unit.Name] { + s.units[unit.Name] = true + s.addUnitToCharts(name, typ) + } + + for _, s := range unitStates { + mx[fmt.Sprintf("unit_%s_%s_state_%s", name, typ, s)] = 0 + } + mx[fmt.Sprintf("unit_%s_%s_state_%s", name, typ, unit.ActiveState)] = 1 + } +} + +func (s *SystemdUnits) getConnection() (systemdConnection, error) { + if s.conn == nil { + conn, err := s.client.connect() + if err != nil { + return nil, fmt.Errorf("error on creating a connection: %v", err) + } + s.conn = conn + } + return s.conn, nil +} + +func (s *SystemdUnits) closeConnection() { + if s.conn != nil { + s.conn.Close() + s.conn = nil + } +} + +var reVersion = regexp.MustCompile(`[0-9][0-9][0-9]`) + +const versionProperty = "Version" + +func (s *SystemdUnits) getSystemdVersion(conn systemdConnection) (int, error) { + s.Debugf("calling function 'GetManagerProperty'") + version, err := conn.GetManagerProperty(versionProperty) + if err != nil { + return 0, fmt.Errorf("error on getting '%s' manager property: %v", versionProperty, err) + } + + s.Debugf("systemd version: %s", version) + + major := reVersion.FindString(version) + if major == "" { + return 0, fmt.Errorf("couldn't parse systemd version string '%s'", version) + } + + ver, err := strconv.Atoi(major) + if err != nil { + return 0, fmt.Errorf("couldn't parse systemd version string '%s': %v", version, err) + } + + return ver, nil +} + +func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus, error) { + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + defer cancel() + + s.Debugf("calling function 'ListUnits'") + units, err := conn.ListUnitsContext(ctx) + if err != nil { + return nil, fmt.Errorf("error on ListUnits: %v", err) + } + + loaded := units[:0] + for _, unit := range units { + if unit.LoadState == "loaded" && s.sr.MatchString(unit.Name) { + loaded = append(loaded, unit) + } + } + s.Debugf("got total/loaded %d/%d units", len(units), len(loaded)) + + return loaded, nil +} + +func (s *SystemdUnits) getLoadedUnitsByPatterns(conn systemdConnection) ([]dbus.UnitStatus, error) { + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + defer cancel() + + s.Debugf("calling function 'ListUnitsByPatterns'") + + units, err := conn.ListUnitsByPatternsContext(ctx, unitStates, s.Include) + if err != nil { + return nil, fmt.Errorf("error on ListUnitsByPatterns: %v", err) + } + + loaded := units[:0] + for _, unit := range units { + if unit.LoadState == "loaded" { + loaded = append(loaded, unit) + } + } + s.Debugf("got total/loaded %d/%d units", len(units), len(loaded)) + + return loaded, nil +} + +func extractUnitNameType(name string) (string, string) { + idx := strings.LastIndexByte(name, '.') + if idx <= 0 { + return "", "" + } + return name[:idx], name[idx+1:] +} + +func cleanUnitName(name string) string { + // dev-disk-by\x2duuid-DE44\x2dCEE0.device => dev-disk-by-uuid-DE44-CEE0.device + if strings.IndexByte(name, '\\') == -1 { + return name + } + v, err := strconv.Unquote("\"" + name + "\"") + if err != nil { + return name + } + return v +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/config_schema.json b/src/go/collectors/go.d.plugin/modules/systemdunits/config_schema.json new file mode 100644 index 00000000000000..5a9df2571be7ee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/config_schema.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/systemdunits job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "include": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "include" + ] +} \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/doc.go b/src/go/collectors/go.d.plugin/modules/systemdunits/doc.go new file mode 100644 index 00000000000000..8bb45fab91f061 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/doc.go @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +// Package systemdunits is a systemd units states collector +package systemdunits diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/init.go b/src/go/collectors/go.d.plugin/modules/systemdunits/init.go new file mode 100644 index 00000000000000..d079443b15232c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + "errors" + "strings" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (s *SystemdUnits) validateConfig() error { + if len(s.Include) == 0 { + return errors.New("'include' option not set") + } + return nil +} + +func (s *SystemdUnits) initSelector() (matcher.Matcher, error) { + if len(s.Include) == 0 { + return matcher.TRUE(), nil + } + + expr := strings.Join(s.Include, " ") + return matcher.NewSimplePatternsMatcher(expr) +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/integrations/systemd_units.md b/src/go/collectors/go.d.plugin/modules/systemdunits/integrations/systemd_units.md new file mode 100644 index 00000000000000..9fae020c8a9259 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/integrations/systemd_units.md @@ -0,0 +1,253 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/systemdunits/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/systemdunits/metadata.yaml" +sidebar_label: "Systemd Units" +learn_status: "Published" +learn_rel_path: "Data Collection/Systemd" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Systemd Units + + +<img src="https://netdata.cloud/img/systemd.svg" width="150"/> + + +Plugin: go.d.plugin +Module: systemdunits + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Systemd units state. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per unit + +These metrics refer to the systemd unit. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| unit_name | systemd unit name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state | +| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state | +| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state | +| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state | +| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state | +| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state | +| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state | +| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state | +| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state | +| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state | +| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state | +| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/systemdunits.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/systemdunits.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| include | Systemd units filter. | *.service | no | +| timeout | System bus requests timeout. | 1 | no | + +##### include + +Systemd units matching the selector will be monitored. + +- Logic: (pattern1 OR pattern2) +- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) +- Syntax: + +```yaml +includes: + - pattern1 + - pattern2 +``` + + +</details> + +#### Examples + +##### Service units + +Collect state of all service type units. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: service + include: + - '*.service' + +``` +</details> + +##### One specific unit + +Collect state of one specific unit. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my-specific-service + include: + - 'my-specific.service' + +``` +</details> + +##### All unit types + +Collect state of all units. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my-specific-service-unit + include: + - '*' + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collect state of all service and socket type units. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: service + include: + - '*.service' + + - name: socket + include: + - '*.socket' + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m systemdunits + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/metadata.yaml b/src/go/collectors/go.d.plugin/modules/systemdunits/metadata.yaml new file mode 100644 index 00000000000000..21755bb698dbe9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/metadata.yaml @@ -0,0 +1,290 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-systemdunits + plugin_name: go.d.plugin + module_name: systemdunits + monitored_instance: + name: Systemd Units + link: https://www.freedesktop.org/wiki/Software/systemd/ + icon_filename: systemd.svg + categories: + - data-collection.systemd + keywords: + - systemd + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Systemd units state. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/systemdunits.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: include + description: Systemd units filter. + default_value: "*.service" + required: false + detailed_description: | + Systemd units matching the selector will be monitored. + + - Logic: (pattern1 OR pattern2) + - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match) + - Syntax: + + ```yaml + includes: + - pattern1 + - pattern2 + ``` + - name: timeout + description: System bus requests timeout. + default_value: 1 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Service units + description: Collect state of all service type units. + config: | + jobs: + - name: service + include: + - '*.service' + - name: One specific unit + description: Collect state of one specific unit. + config: | + jobs: + - name: my-specific-service + include: + - 'my-specific.service' + - name: All unit types + description: Collect state of all units. + config: | + jobs: + - name: my-specific-service-unit + include: + - '*' + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collect state of all service and socket type units. + config: | + jobs: + - name: service + include: + - '*.service' + + - name: socket + include: + - '*.socket' + troubleshooting: + problems: + list: [] + alerts: + - name: systemd_service_unit_failed_state + metric: systemd.service_unit_state + info: systemd service unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_socket_unit_failed_state + metric: systemd.socket_unit_state + info: systemd socket unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_target_unit_failed_state + metric: systemd.target_unit_state + info: systemd target unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_path_unit_failed_state + metric: systemd.path_unit_state + info: systemd path unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_device_unit_failed_state + metric: systemd.device_unit_state + info: systemd device unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_mount_unit_failed_state + metric: systemd.mount_unit_state + info: systemd mount unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_automount_unit_failed_state + metric: systemd.automount_unit_state + info: systemd automount unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_swap_unit_failed_state + metric: systemd.swap_unit_state + info: systemd swap unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_scope_unit_failed_state + metric: systemd.scope_unit_state + info: systemd scope unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_slice_unit_failed_state + metric: systemd.slice_unit_state + info: systemd slice unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + - name: systemd_timer_unit_failed_state + metric: systemd.timer_unit_state + info: systemd timer unit in the failed state + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: unit + description: These metrics refer to the systemd unit. + labels: + - name: unit_name + description: systemd unit name + metrics: + - name: systemd.service_unit_state + description: Service Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.socket_unit_state + description: Socket Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.target_unit_state + description: Target Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.path_unit_state + description: Path Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.device_unit_state + description: Device Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.mount_unit_state + description: Mount Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.automount_unit_state + description: Automount Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.swap_unit_state + description: Swap Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.timer_unit_state + description: Timer Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.scope_unit_state + description: Scope Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed + - name: systemd.slice_unit_state + description: Slice Unit State + unit: state + chart_type: line + dimensions: + - name: active + - name: inactive + - name: activating + - name: deactivating + - name: failed diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits.go b/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits.go new file mode 100644 index 00000000000000..3593b531e093f6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("systemdunits", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 10, // gathering systemd units can be a CPU intensive op + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *SystemdUnits { + return &SystemdUnits{ + Config: Config{ + Include: []string{ + "*.service", + }, + Timeout: web.Duration{Duration: time.Second * 2}, + }, + + charts: &module.Charts{}, + client: newSystemdDBusClient(), + units: make(map[string]bool), + } +} + +type Config struct { + Include []string `yaml:"include"` + Timeout web.Duration `yaml:"timeout"` +} + +type SystemdUnits struct { + module.Base + Config `yaml:",inline"` + + client systemdClient + conn systemdConnection + + systemdVersion int + units map[string]bool + sr matcher.Matcher + + charts *module.Charts +} + +func (s *SystemdUnits) Init() bool { + err := s.validateConfig() + if err != nil { + s.Errorf("config validation: %v", err) + return false + } + + sr, err := s.initSelector() + if err != nil { + s.Errorf("init selector: %v", err) + return false + } + s.sr = sr + + s.Debugf("unit names patterns: %v", s.Include) + s.Debugf("timeout: %s", s.Timeout) + return true +} + +func (s *SystemdUnits) Check() bool { + return len(s.Collect()) > 0 +} + +func (s *SystemdUnits) Charts() *module.Charts { + return s.charts +} + +func (s *SystemdUnits) Collect() map[string]int64 { + ms, err := s.collect() + if err != nil { + s.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (s *SystemdUnits) Cleanup() { + s.closeConnection() +} diff --git a/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits_test.go b/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits_test.go new file mode 100644 index 00000000000000..baa9ed46a1e173 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/systemdunits/systemdunits_test.go @@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +//go:build linux +// +build linux + +package systemdunits + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/coreos/go-systemd/v22/dbus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestSystemdUnits_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "success when 'include' option set": { + config: Config{ + Include: []string{"*"}, + }, + }, + "fails when 'include' option not set": { + wantFail: true, + config: Config{Include: []string{}}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + systemd := New() + systemd.Config = test.config + + if test.wantFail { + assert.False(t, systemd.Init()) + } else { + assert.True(t, systemd.Init()) + } + }) + } +} + +func TestSystemdUnits_Check(t *testing.T) { + tests := map[string]struct { + prepare func() *SystemdUnits + wantFail bool + }{ + "success on systemd v230+": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*"} + systemd.client = prepareOKClient(230) + return systemd + }, + }, + "success on systemd v230-": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*"} + systemd.client = prepareOKClient(220) + return systemd + }, + }, + "fails when all unites are filtered": { + wantFail: true, + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*.not_exists"} + systemd.client = prepareOKClient(230) + return systemd + }, + }, + "fails on error on connect": { + wantFail: true, + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnConnect() + return systemd + }, + }, + "fails on error on get manager property": { + wantFail: true, + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnGetManagerProperty() + return systemd + }, + }, + "fails on error on list units": { + wantFail: true, + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnListUnits() + return systemd + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + systemd := test.prepare() + require.True(t, systemd.Init()) + + if test.wantFail { + assert.False(t, systemd.Check()) + } else { + assert.True(t, systemd.Check()) + } + }) + } +} + +func TestSystemdUnits_Charts(t *testing.T) { + systemd := New() + require.True(t, systemd.Init()) + assert.NotNil(t, systemd.Charts()) +} + +func TestSystemdUnits_Cleanup(t *testing.T) { + systemd := New() + systemd.Include = []string{"*"} + client := prepareOKClient(230) + systemd.client = client + + require.True(t, systemd.Init()) + require.NotNil(t, systemd.Collect()) + conn := systemd.conn + systemd.Cleanup() + + assert.Nil(t, systemd.conn) + v, _ := conn.(*mockConn) + assert.True(t, v.closeCalled) +} + +func TestSystemdUnits_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() *SystemdUnits + wantCollected map[string]int64 + }{ + "success on systemd v230+ on collecting all unit type": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*"} + systemd.client = prepareOKClient(230) + return systemd + }, + wantCollected: map[string]int64{ + "unit_dbus_socket_state_activating": 0, + "unit_dbus_socket_state_active": 1, + "unit_dbus_socket_state_deactivating": 0, + "unit_dbus_socket_state_failed": 0, + "unit_dbus_socket_state_inactive": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_activating": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_active": 1, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_deactivating": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_failed": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_inactive": 0, + "unit_dev-nvme0n1_device_state_activating": 0, + "unit_dev-nvme0n1_device_state_active": 1, + "unit_dev-nvme0n1_device_state_deactivating": 0, + "unit_dev-nvme0n1_device_state_failed": 0, + "unit_dev-nvme0n1_device_state_inactive": 0, + "unit_docker_socket_state_activating": 0, + "unit_docker_socket_state_active": 1, + "unit_docker_socket_state_deactivating": 0, + "unit_docker_socket_state_failed": 0, + "unit_docker_socket_state_inactive": 0, + "unit_getty-pre_target_state_activating": 0, + "unit_getty-pre_target_state_active": 0, + "unit_getty-pre_target_state_deactivating": 0, + "unit_getty-pre_target_state_failed": 0, + "unit_getty-pre_target_state_inactive": 1, + "unit_init_scope_state_activating": 0, + "unit_init_scope_state_active": 1, + "unit_init_scope_state_deactivating": 0, + "unit_init_scope_state_failed": 0, + "unit_init_scope_state_inactive": 0, + "unit_logrotate_timer_state_activating": 0, + "unit_logrotate_timer_state_active": 1, + "unit_logrotate_timer_state_deactivating": 0, + "unit_logrotate_timer_state_failed": 0, + "unit_logrotate_timer_state_inactive": 0, + "unit_lvm2-lvmetad_socket_state_activating": 0, + "unit_lvm2-lvmetad_socket_state_active": 1, + "unit_lvm2-lvmetad_socket_state_deactivating": 0, + "unit_lvm2-lvmetad_socket_state_failed": 0, + "unit_lvm2-lvmetad_socket_state_inactive": 0, + "unit_lvm2-lvmpolld_socket_state_activating": 0, + "unit_lvm2-lvmpolld_socket_state_active": 1, + "unit_lvm2-lvmpolld_socket_state_deactivating": 0, + "unit_lvm2-lvmpolld_socket_state_failed": 0, + "unit_lvm2-lvmpolld_socket_state_inactive": 0, + "unit_man-db_timer_state_activating": 0, + "unit_man-db_timer_state_active": 1, + "unit_man-db_timer_state_deactivating": 0, + "unit_man-db_timer_state_failed": 0, + "unit_man-db_timer_state_inactive": 0, + "unit_org.cups.cupsd_path_state_activating": 0, + "unit_org.cups.cupsd_path_state_active": 1, + "unit_org.cups.cupsd_path_state_deactivating": 0, + "unit_org.cups.cupsd_path_state_failed": 0, + "unit_org.cups.cupsd_path_state_inactive": 0, + "unit_pamac-cleancache_timer_state_activating": 0, + "unit_pamac-cleancache_timer_state_active": 1, + "unit_pamac-cleancache_timer_state_deactivating": 0, + "unit_pamac-cleancache_timer_state_failed": 0, + "unit_pamac-cleancache_timer_state_inactive": 0, + "unit_pamac-mirrorlist_timer_state_activating": 0, + "unit_pamac-mirrorlist_timer_state_active": 1, + "unit_pamac-mirrorlist_timer_state_deactivating": 0, + "unit_pamac-mirrorlist_timer_state_failed": 0, + "unit_pamac-mirrorlist_timer_state_inactive": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_activating": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_active": 1, + "unit_proc-sys-fs-binfmt_misc_automount_state_deactivating": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_failed": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_inactive": 0, + "unit_remote-fs-pre_target_state_activating": 0, + "unit_remote-fs-pre_target_state_active": 0, + "unit_remote-fs-pre_target_state_deactivating": 0, + "unit_remote-fs-pre_target_state_failed": 0, + "unit_remote-fs-pre_target_state_inactive": 1, + "unit_rpc_pipefs_target_state_activating": 0, + "unit_rpc_pipefs_target_state_active": 0, + "unit_rpc_pipefs_target_state_deactivating": 0, + "unit_rpc_pipefs_target_state_failed": 0, + "unit_rpc_pipefs_target_state_inactive": 1, + "unit_run-user-1000-gvfs_mount_state_activating": 0, + "unit_run-user-1000-gvfs_mount_state_active": 1, + "unit_run-user-1000-gvfs_mount_state_deactivating": 0, + "unit_run-user-1000-gvfs_mount_state_failed": 0, + "unit_run-user-1000-gvfs_mount_state_inactive": 0, + "unit_run-user-1000_mount_state_activating": 0, + "unit_run-user-1000_mount_state_active": 1, + "unit_run-user-1000_mount_state_deactivating": 0, + "unit_run-user-1000_mount_state_failed": 0, + "unit_run-user-1000_mount_state_inactive": 0, + "unit_session-1_scope_state_activating": 0, + "unit_session-1_scope_state_active": 1, + "unit_session-1_scope_state_deactivating": 0, + "unit_session-1_scope_state_failed": 0, + "unit_session-1_scope_state_inactive": 0, + "unit_session-2_scope_state_activating": 0, + "unit_session-2_scope_state_active": 1, + "unit_session-2_scope_state_deactivating": 0, + "unit_session-2_scope_state_failed": 0, + "unit_session-2_scope_state_inactive": 0, + "unit_session-3_scope_state_activating": 0, + "unit_session-3_scope_state_active": 1, + "unit_session-3_scope_state_deactivating": 0, + "unit_session-3_scope_state_failed": 0, + "unit_session-3_scope_state_inactive": 0, + "unit_session-6_scope_state_activating": 0, + "unit_session-6_scope_state_active": 1, + "unit_session-6_scope_state_deactivating": 0, + "unit_session-6_scope_state_failed": 0, + "unit_session-6_scope_state_inactive": 0, + "unit_shadow_timer_state_activating": 0, + "unit_shadow_timer_state_active": 1, + "unit_shadow_timer_state_deactivating": 0, + "unit_shadow_timer_state_failed": 0, + "unit_shadow_timer_state_inactive": 0, + "unit_sound_target_state_activating": 0, + "unit_sound_target_state_active": 1, + "unit_sound_target_state_deactivating": 0, + "unit_sound_target_state_failed": 0, + "unit_sound_target_state_inactive": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_activating": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_active": 1, + "unit_sys-devices-virtual-net-loopback1_device_state_deactivating": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_failed": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_inactive": 0, + "unit_sys-module-fuse_device_state_activating": 0, + "unit_sys-module-fuse_device_state_active": 1, + "unit_sys-module-fuse_device_state_deactivating": 0, + "unit_sys-module-fuse_device_state_failed": 0, + "unit_sys-module-fuse_device_state_inactive": 0, + "unit_sysinit_target_state_activating": 0, + "unit_sysinit_target_state_active": 1, + "unit_sysinit_target_state_deactivating": 0, + "unit_sysinit_target_state_failed": 0, + "unit_sysinit_target_state_inactive": 0, + "unit_system-getty_slice_state_activating": 0, + "unit_system-getty_slice_state_active": 1, + "unit_system-getty_slice_state_deactivating": 0, + "unit_system-getty_slice_state_failed": 0, + "unit_system-getty_slice_state_inactive": 0, + "unit_system-netctl_slice_state_activating": 0, + "unit_system-netctl_slice_state_active": 1, + "unit_system-netctl_slice_state_deactivating": 0, + "unit_system-netctl_slice_state_failed": 0, + "unit_system-netctl_slice_state_inactive": 0, + "unit_system-systemd-fsck_slice_state_activating": 0, + "unit_system-systemd-fsck_slice_state_active": 1, + "unit_system-systemd-fsck_slice_state_deactivating": 0, + "unit_system-systemd-fsck_slice_state_failed": 0, + "unit_system-systemd-fsck_slice_state_inactive": 0, + "unit_system_slice_state_activating": 0, + "unit_system_slice_state_active": 1, + "unit_system_slice_state_deactivating": 0, + "unit_system_slice_state_failed": 0, + "unit_system_slice_state_inactive": 0, + "unit_systemd-ask-password-console_path_state_activating": 0, + "unit_systemd-ask-password-console_path_state_active": 1, + "unit_systemd-ask-password-console_path_state_deactivating": 0, + "unit_systemd-ask-password-console_path_state_failed": 0, + "unit_systemd-ask-password-console_path_state_inactive": 0, + "unit_systemd-ask-password-wall_path_state_activating": 0, + "unit_systemd-ask-password-wall_path_state_active": 1, + "unit_systemd-ask-password-wall_path_state_deactivating": 0, + "unit_systemd-ask-password-wall_path_state_failed": 0, + "unit_systemd-ask-password-wall_path_state_inactive": 0, + "unit_systemd-ask-password-wall_service_state_activating": 0, + "unit_systemd-ask-password-wall_service_state_active": 0, + "unit_systemd-ask-password-wall_service_state_deactivating": 0, + "unit_systemd-ask-password-wall_service_state_failed": 0, + "unit_systemd-ask-password-wall_service_state_inactive": 1, + "unit_systemd-fsck-root_service_state_activating": 0, + "unit_systemd-fsck-root_service_state_active": 0, + "unit_systemd-fsck-root_service_state_deactivating": 0, + "unit_systemd-fsck-root_service_state_failed": 0, + "unit_systemd-fsck-root_service_state_inactive": 1, + "unit_systemd-udevd-kernel_socket_state_activating": 0, + "unit_systemd-udevd-kernel_socket_state_active": 1, + "unit_systemd-udevd-kernel_socket_state_deactivating": 0, + "unit_systemd-udevd-kernel_socket_state_failed": 0, + "unit_systemd-udevd-kernel_socket_state_inactive": 0, + "unit_tmp_mount_state_activating": 0, + "unit_tmp_mount_state_active": 1, + "unit_tmp_mount_state_deactivating": 0, + "unit_tmp_mount_state_failed": 0, + "unit_tmp_mount_state_inactive": 0, + "unit_user-runtime-dir@1000_service_state_activating": 0, + "unit_user-runtime-dir@1000_service_state_active": 1, + "unit_user-runtime-dir@1000_service_state_deactivating": 0, + "unit_user-runtime-dir@1000_service_state_failed": 0, + "unit_user-runtime-dir@1000_service_state_inactive": 0, + "unit_user@1000_service_state_activating": 0, + "unit_user@1000_service_state_active": 1, + "unit_user@1000_service_state_deactivating": 0, + "unit_user@1000_service_state_failed": 0, + "unit_user@1000_service_state_inactive": 0, + "unit_user_slice_state_activating": 0, + "unit_user_slice_state_active": 1, + "unit_user_slice_state_deactivating": 0, + "unit_user_slice_state_failed": 0, + "unit_user_slice_state_inactive": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_activating": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_active": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_deactivating": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_failed": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_inactive": 1, + }, + }, + "success on systemd v230- on collecting all unit types": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*"} + systemd.client = prepareOKClient(220) + return systemd + }, + wantCollected: map[string]int64{ + "unit_dbus_socket_state_activating": 0, + "unit_dbus_socket_state_active": 1, + "unit_dbus_socket_state_deactivating": 0, + "unit_dbus_socket_state_failed": 0, + "unit_dbus_socket_state_inactive": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_activating": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_active": 1, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_deactivating": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_failed": 0, + "unit_dev-disk-by-uuid-DE44-CEE0_device_state_inactive": 0, + "unit_dev-nvme0n1_device_state_activating": 0, + "unit_dev-nvme0n1_device_state_active": 1, + "unit_dev-nvme0n1_device_state_deactivating": 0, + "unit_dev-nvme0n1_device_state_failed": 0, + "unit_dev-nvme0n1_device_state_inactive": 0, + "unit_docker_socket_state_activating": 0, + "unit_docker_socket_state_active": 1, + "unit_docker_socket_state_deactivating": 0, + "unit_docker_socket_state_failed": 0, + "unit_docker_socket_state_inactive": 0, + "unit_getty-pre_target_state_activating": 0, + "unit_getty-pre_target_state_active": 0, + "unit_getty-pre_target_state_deactivating": 0, + "unit_getty-pre_target_state_failed": 0, + "unit_getty-pre_target_state_inactive": 1, + "unit_init_scope_state_activating": 0, + "unit_init_scope_state_active": 1, + "unit_init_scope_state_deactivating": 0, + "unit_init_scope_state_failed": 0, + "unit_init_scope_state_inactive": 0, + "unit_logrotate_timer_state_activating": 0, + "unit_logrotate_timer_state_active": 1, + "unit_logrotate_timer_state_deactivating": 0, + "unit_logrotate_timer_state_failed": 0, + "unit_logrotate_timer_state_inactive": 0, + "unit_lvm2-lvmetad_socket_state_activating": 0, + "unit_lvm2-lvmetad_socket_state_active": 1, + "unit_lvm2-lvmetad_socket_state_deactivating": 0, + "unit_lvm2-lvmetad_socket_state_failed": 0, + "unit_lvm2-lvmetad_socket_state_inactive": 0, + "unit_lvm2-lvmpolld_socket_state_activating": 0, + "unit_lvm2-lvmpolld_socket_state_active": 1, + "unit_lvm2-lvmpolld_socket_state_deactivating": 0, + "unit_lvm2-lvmpolld_socket_state_failed": 0, + "unit_lvm2-lvmpolld_socket_state_inactive": 0, + "unit_man-db_timer_state_activating": 0, + "unit_man-db_timer_state_active": 1, + "unit_man-db_timer_state_deactivating": 0, + "unit_man-db_timer_state_failed": 0, + "unit_man-db_timer_state_inactive": 0, + "unit_org.cups.cupsd_path_state_activating": 0, + "unit_org.cups.cupsd_path_state_active": 1, + "unit_org.cups.cupsd_path_state_deactivating": 0, + "unit_org.cups.cupsd_path_state_failed": 0, + "unit_org.cups.cupsd_path_state_inactive": 0, + "unit_pamac-cleancache_timer_state_activating": 0, + "unit_pamac-cleancache_timer_state_active": 1, + "unit_pamac-cleancache_timer_state_deactivating": 0, + "unit_pamac-cleancache_timer_state_failed": 0, + "unit_pamac-cleancache_timer_state_inactive": 0, + "unit_pamac-mirrorlist_timer_state_activating": 0, + "unit_pamac-mirrorlist_timer_state_active": 1, + "unit_pamac-mirrorlist_timer_state_deactivating": 0, + "unit_pamac-mirrorlist_timer_state_failed": 0, + "unit_pamac-mirrorlist_timer_state_inactive": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_activating": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_active": 1, + "unit_proc-sys-fs-binfmt_misc_automount_state_deactivating": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_failed": 0, + "unit_proc-sys-fs-binfmt_misc_automount_state_inactive": 0, + "unit_remote-fs-pre_target_state_activating": 0, + "unit_remote-fs-pre_target_state_active": 0, + "unit_remote-fs-pre_target_state_deactivating": 0, + "unit_remote-fs-pre_target_state_failed": 0, + "unit_remote-fs-pre_target_state_inactive": 1, + "unit_rpc_pipefs_target_state_activating": 0, + "unit_rpc_pipefs_target_state_active": 0, + "unit_rpc_pipefs_target_state_deactivating": 0, + "unit_rpc_pipefs_target_state_failed": 0, + "unit_rpc_pipefs_target_state_inactive": 1, + "unit_run-user-1000-gvfs_mount_state_activating": 0, + "unit_run-user-1000-gvfs_mount_state_active": 1, + "unit_run-user-1000-gvfs_mount_state_deactivating": 0, + "unit_run-user-1000-gvfs_mount_state_failed": 0, + "unit_run-user-1000-gvfs_mount_state_inactive": 0, + "unit_run-user-1000_mount_state_activating": 0, + "unit_run-user-1000_mount_state_active": 1, + "unit_run-user-1000_mount_state_deactivating": 0, + "unit_run-user-1000_mount_state_failed": 0, + "unit_run-user-1000_mount_state_inactive": 0, + "unit_session-1_scope_state_activating": 0, + "unit_session-1_scope_state_active": 1, + "unit_session-1_scope_state_deactivating": 0, + "unit_session-1_scope_state_failed": 0, + "unit_session-1_scope_state_inactive": 0, + "unit_session-2_scope_state_activating": 0, + "unit_session-2_scope_state_active": 1, + "unit_session-2_scope_state_deactivating": 0, + "unit_session-2_scope_state_failed": 0, + "unit_session-2_scope_state_inactive": 0, + "unit_session-3_scope_state_activating": 0, + "unit_session-3_scope_state_active": 1, + "unit_session-3_scope_state_deactivating": 0, + "unit_session-3_scope_state_failed": 0, + "unit_session-3_scope_state_inactive": 0, + "unit_session-6_scope_state_activating": 0, + "unit_session-6_scope_state_active": 1, + "unit_session-6_scope_state_deactivating": 0, + "unit_session-6_scope_state_failed": 0, + "unit_session-6_scope_state_inactive": 0, + "unit_shadow_timer_state_activating": 0, + "unit_shadow_timer_state_active": 1, + "unit_shadow_timer_state_deactivating": 0, + "unit_shadow_timer_state_failed": 0, + "unit_shadow_timer_state_inactive": 0, + "unit_sound_target_state_activating": 0, + "unit_sound_target_state_active": 1, + "unit_sound_target_state_deactivating": 0, + "unit_sound_target_state_failed": 0, + "unit_sound_target_state_inactive": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_activating": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_active": 1, + "unit_sys-devices-virtual-net-loopback1_device_state_deactivating": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_failed": 0, + "unit_sys-devices-virtual-net-loopback1_device_state_inactive": 0, + "unit_sys-module-fuse_device_state_activating": 0, + "unit_sys-module-fuse_device_state_active": 1, + "unit_sys-module-fuse_device_state_deactivating": 0, + "unit_sys-module-fuse_device_state_failed": 0, + "unit_sys-module-fuse_device_state_inactive": 0, + "unit_sysinit_target_state_activating": 0, + "unit_sysinit_target_state_active": 1, + "unit_sysinit_target_state_deactivating": 0, + "unit_sysinit_target_state_failed": 0, + "unit_sysinit_target_state_inactive": 0, + "unit_system-getty_slice_state_activating": 0, + "unit_system-getty_slice_state_active": 1, + "unit_system-getty_slice_state_deactivating": 0, + "unit_system-getty_slice_state_failed": 0, + "unit_system-getty_slice_state_inactive": 0, + "unit_system-netctl_slice_state_activating": 0, + "unit_system-netctl_slice_state_active": 1, + "unit_system-netctl_slice_state_deactivating": 0, + "unit_system-netctl_slice_state_failed": 0, + "unit_system-netctl_slice_state_inactive": 0, + "unit_system-systemd-fsck_slice_state_activating": 0, + "unit_system-systemd-fsck_slice_state_active": 1, + "unit_system-systemd-fsck_slice_state_deactivating": 0, + "unit_system-systemd-fsck_slice_state_failed": 0, + "unit_system-systemd-fsck_slice_state_inactive": 0, + "unit_system_slice_state_activating": 0, + "unit_system_slice_state_active": 1, + "unit_system_slice_state_deactivating": 0, + "unit_system_slice_state_failed": 0, + "unit_system_slice_state_inactive": 0, + "unit_systemd-ask-password-console_path_state_activating": 0, + "unit_systemd-ask-password-console_path_state_active": 1, + "unit_systemd-ask-password-console_path_state_deactivating": 0, + "unit_systemd-ask-password-console_path_state_failed": 0, + "unit_systemd-ask-password-console_path_state_inactive": 0, + "unit_systemd-ask-password-wall_path_state_activating": 0, + "unit_systemd-ask-password-wall_path_state_active": 1, + "unit_systemd-ask-password-wall_path_state_deactivating": 0, + "unit_systemd-ask-password-wall_path_state_failed": 0, + "unit_systemd-ask-password-wall_path_state_inactive": 0, + "unit_systemd-ask-password-wall_service_state_activating": 0, + "unit_systemd-ask-password-wall_service_state_active": 0, + "unit_systemd-ask-password-wall_service_state_deactivating": 0, + "unit_systemd-ask-password-wall_service_state_failed": 0, + "unit_systemd-ask-password-wall_service_state_inactive": 1, + "unit_systemd-fsck-root_service_state_activating": 0, + "unit_systemd-fsck-root_service_state_active": 0, + "unit_systemd-fsck-root_service_state_deactivating": 0, + "unit_systemd-fsck-root_service_state_failed": 0, + "unit_systemd-fsck-root_service_state_inactive": 1, + "unit_systemd-udevd-kernel_socket_state_activating": 0, + "unit_systemd-udevd-kernel_socket_state_active": 1, + "unit_systemd-udevd-kernel_socket_state_deactivating": 0, + "unit_systemd-udevd-kernel_socket_state_failed": 0, + "unit_systemd-udevd-kernel_socket_state_inactive": 0, + "unit_tmp_mount_state_activating": 0, + "unit_tmp_mount_state_active": 1, + "unit_tmp_mount_state_deactivating": 0, + "unit_tmp_mount_state_failed": 0, + "unit_tmp_mount_state_inactive": 0, + "unit_user-runtime-dir@1000_service_state_activating": 0, + "unit_user-runtime-dir@1000_service_state_active": 1, + "unit_user-runtime-dir@1000_service_state_deactivating": 0, + "unit_user-runtime-dir@1000_service_state_failed": 0, + "unit_user-runtime-dir@1000_service_state_inactive": 0, + "unit_user@1000_service_state_activating": 0, + "unit_user@1000_service_state_active": 1, + "unit_user@1000_service_state_deactivating": 0, + "unit_user@1000_service_state_failed": 0, + "unit_user@1000_service_state_inactive": 0, + "unit_user_slice_state_activating": 0, + "unit_user_slice_state_active": 1, + "unit_user_slice_state_deactivating": 0, + "unit_user_slice_state_failed": 0, + "unit_user_slice_state_inactive": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_activating": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_active": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_deactivating": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_failed": 0, + "unit_var-lib-nfs-rpc_pipefs_mount_state_inactive": 1, + }, + }, + "success on systemd v230+ on collecting only 'service' unit type": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*.service"} + systemd.client = prepareOKClient(230) + return systemd + }, + wantCollected: map[string]int64{ + "unit_systemd-ask-password-wall_service_state_activating": 0, + "unit_systemd-ask-password-wall_service_state_active": 0, + "unit_systemd-ask-password-wall_service_state_deactivating": 0, + "unit_systemd-ask-password-wall_service_state_failed": 0, + "unit_systemd-ask-password-wall_service_state_inactive": 1, + "unit_systemd-fsck-root_service_state_activating": 0, + "unit_systemd-fsck-root_service_state_active": 0, + "unit_systemd-fsck-root_service_state_deactivating": 0, + "unit_systemd-fsck-root_service_state_failed": 0, + "unit_systemd-fsck-root_service_state_inactive": 1, + "unit_user-runtime-dir@1000_service_state_activating": 0, + "unit_user-runtime-dir@1000_service_state_active": 1, + "unit_user-runtime-dir@1000_service_state_deactivating": 0, + "unit_user-runtime-dir@1000_service_state_failed": 0, + "unit_user-runtime-dir@1000_service_state_inactive": 0, + "unit_user@1000_service_state_activating": 0, + "unit_user@1000_service_state_active": 1, + "unit_user@1000_service_state_deactivating": 0, + "unit_user@1000_service_state_failed": 0, + "unit_user@1000_service_state_inactive": 0, + }, + }, + "success on systemd v230- on collecting only 'service' unit type": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*.service"} + systemd.client = prepareOKClient(220) + return systemd + }, + wantCollected: map[string]int64{ + "unit_systemd-ask-password-wall_service_state_activating": 0, + "unit_systemd-ask-password-wall_service_state_active": 0, + "unit_systemd-ask-password-wall_service_state_deactivating": 0, + "unit_systemd-ask-password-wall_service_state_failed": 0, + "unit_systemd-ask-password-wall_service_state_inactive": 1, + "unit_systemd-fsck-root_service_state_activating": 0, + "unit_systemd-fsck-root_service_state_active": 0, + "unit_systemd-fsck-root_service_state_deactivating": 0, + "unit_systemd-fsck-root_service_state_failed": 0, + "unit_systemd-fsck-root_service_state_inactive": 1, + "unit_user-runtime-dir@1000_service_state_activating": 0, + "unit_user-runtime-dir@1000_service_state_active": 1, + "unit_user-runtime-dir@1000_service_state_deactivating": 0, + "unit_user-runtime-dir@1000_service_state_failed": 0, + "unit_user-runtime-dir@1000_service_state_inactive": 0, + "unit_user@1000_service_state_activating": 0, + "unit_user@1000_service_state_active": 1, + "unit_user@1000_service_state_deactivating": 0, + "unit_user@1000_service_state_failed": 0, + "unit_user@1000_service_state_inactive": 0, + }, + }, + "fails when all unites are filtered": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.Include = []string{"*.not_exists"} + systemd.client = prepareOKClient(230) + return systemd + }, + wantCollected: nil, + }, + "fails on error on connect": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnConnect() + return systemd + }, + wantCollected: nil, + }, + "fails on error on get manager property": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnGetManagerProperty() + return systemd + }, + wantCollected: nil, + }, + "fails on error on list units": { + prepare: func() *SystemdUnits { + systemd := New() + systemd.client = prepareClientErrOnListUnits() + return systemd + }, + wantCollected: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + systemd := test.prepare() + require.True(t, systemd.Init()) + + var collected map[string]int64 + + for i := 0; i < 10; i++ { + collected = systemd.Collect() + } + + assert.Equal(t, test.wantCollected, collected) + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, systemd, collected) + } + }) + } +} + +func TestSystemdUnits_connectionReuse(t *testing.T) { + systemd := New() + systemd.Include = []string{"*"} + client := prepareOKClient(230) + systemd.client = client + require.True(t, systemd.Init()) + + var collected map[string]int64 + for i := 0; i < 10; i++ { + collected = systemd.Collect() + } + + assert.NotEmpty(t, collected) + assert.Equal(t, 1, client.connectCalls) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, sd *SystemdUnits, collected map[string]int64) { + for _, chart := range *sd.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareOKClient(ver int) *mockClient { + return &mockClient{ + conn: &mockConn{ + version: ver, + units: mockSystemdUnits, + }, + } +} + +func prepareClientErrOnConnect() *mockClient { + return &mockClient{ + errOnConnect: true, + } +} + +func prepareClientErrOnGetManagerProperty() *mockClient { + return &mockClient{ + conn: &mockConn{ + version: 230, + errOnGetManagerProperty: true, + units: mockSystemdUnits, + }, + } +} + +func prepareClientErrOnListUnits() *mockClient { + return &mockClient{ + conn: &mockConn{ + version: 230, + errOnListUnits: true, + units: mockSystemdUnits, + }, + } +} + +type mockClient struct { + conn systemdConnection + connectCalls int + errOnConnect bool +} + +func (m *mockClient) connect() (systemdConnection, error) { + m.connectCalls++ + if m.errOnConnect { + return nil, errors.New("mock 'connect' error") + } + return m.conn, nil +} + +type mockConn struct { + version int + units []dbus.UnitStatus + errOnGetManagerProperty bool + errOnListUnits bool + closeCalled bool +} + +func (m *mockConn) Close() { + m.closeCalled = true +} + +func (m *mockConn) GetManagerProperty(prop string) (string, error) { + if m.errOnGetManagerProperty { + return "", errors.New("'GetManagerProperty' call error") + } + if prop != versionProperty { + return "", fmt.Errorf("'GetManagerProperty' unkown property: %s", prop) + } + return fmt.Sprintf("%d.6-1-manjaro", m.version), nil +} + +func (m *mockConn) ListUnitsContext(_ context.Context) ([]dbus.UnitStatus, error) { + if m.errOnListUnits { + return nil, errors.New("'ListUnits' call error") + } + if m.version >= 230 { + return nil, errors.New("'ListUnits' unsupported function error") + } + return append([]dbus.UnitStatus{}, m.units...), nil +} + +func (m *mockConn) ListUnitsByPatternsContext(_ context.Context, _ []string, ps []string) ([]dbus.UnitStatus, error) { + if m.errOnListUnits { + return nil, errors.New("'ListUnitsByPatterns' call error") + } + if m.version < 230 { + return nil, errors.New("'ListUnitsByPatterns' unsupported function error") + } + + matches := func(name string) bool { + for _, p := range ps { + if ok, _ := filepath.Match(p, name); ok { + return true + } + } + return false + } + + var units []dbus.UnitStatus + for _, unit := range m.units { + if matches(unit.Name) { + units = append(units, unit) + } + } + return units, nil +} + +var mockSystemdUnits = []dbus.UnitStatus{ + {Name: `proc-sys-fs-binfmt_misc.automount`, LoadState: "loaded", ActiveState: "active"}, + {Name: `dev-nvme0n1.device`, LoadState: "loaded", ActiveState: "active"}, + {Name: `sys-devices-virtual-net-loopback1.device`, LoadState: "loaded", ActiveState: "active"}, + {Name: `sys-module-fuse.device`, LoadState: "loaded", ActiveState: "active"}, + {Name: `dev-disk-by\x2duuid-DE44\x2dCEE0.device`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `var-lib-nfs-rpc_pipefs.mount`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `var.mount`, LoadState: "not-found", ActiveState: "inactive"}, + {Name: `run-user-1000.mount`, LoadState: "loaded", ActiveState: "active"}, + {Name: `tmp.mount`, LoadState: "loaded", ActiveState: "active"}, + {Name: `run-user-1000-gvfs.mount`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `org.cups.cupsd.path`, LoadState: "loaded", ActiveState: "active"}, + {Name: `systemd-ask-password-wall.path`, LoadState: "loaded", ActiveState: "active"}, + {Name: `systemd-ask-password-console.path`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `init.scope`, LoadState: "loaded", ActiveState: "active"}, + {Name: `session-3.scope`, LoadState: "loaded", ActiveState: "active"}, + {Name: `session-6.scope`, LoadState: "loaded", ActiveState: "active"}, + {Name: `session-1.scope`, LoadState: "loaded", ActiveState: "active"}, + {Name: `session-2.scope`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `systemd-fsck-root.service`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `httpd.service`, LoadState: "not-found", ActiveState: "inactive"}, + {Name: `user-runtime-dir@1000.service`, LoadState: "loaded", ActiveState: "active"}, + {Name: `systemd-ask-password-wall.service`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `user@1000.service`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `user.slice`, LoadState: "loaded", ActiveState: "active"}, + {Name: `system-getty.slice`, LoadState: "loaded", ActiveState: "active"}, + {Name: `system-netctl.slice`, LoadState: "loaded", ActiveState: "active"}, + {Name: `system.slice`, LoadState: "loaded", ActiveState: "active"}, + {Name: `system-systemd\x2dfsck.slice`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `lvm2-lvmpolld.socket`, LoadState: "loaded", ActiveState: "active"}, + {Name: `docker.socket`, LoadState: "loaded", ActiveState: "active"}, + {Name: `systemd-udevd-kernel.socket`, LoadState: "loaded", ActiveState: "active"}, + {Name: `dbus.socket`, LoadState: "loaded", ActiveState: "active"}, + {Name: `lvm2-lvmetad.socket`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `getty-pre.target`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `rpc_pipefs.target`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `remote-fs-pre.target`, LoadState: "loaded", ActiveState: "inactive"}, + {Name: `sysinit.target`, LoadState: "loaded", ActiveState: "active"}, + {Name: `sound.target`, LoadState: "loaded", ActiveState: "active"}, + + {Name: `man-db.timer`, LoadState: "loaded", ActiveState: "active"}, + {Name: `pamac-mirrorlist.timer`, LoadState: "loaded", ActiveState: "active"}, + {Name: `pamac-cleancache.timer`, LoadState: "loaded", ActiveState: "active"}, + {Name: `shadow.timer`, LoadState: "loaded", ActiveState: "active"}, + {Name: `logrotate.timer`, LoadState: "loaded", ActiveState: "active"}, +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/README.md b/src/go/collectors/go.d.plugin/modules/tengine/README.md new file mode 120000 index 00000000000000..e016ea0c76fb0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/README.md @@ -0,0 +1 @@ +integrations/tengine.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/tengine/apiclient.go b/src/go/collectors/go.d.plugin/modules/tengine/apiclient.go new file mode 100644 index 00000000000000..5b39c60549ac5b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/apiclient.go @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import ( + "bufio" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +const ( + bytesIn = "bytes_in" + bytesOut = "bytes_out" + connTotal = "conn_total" + reqTotal = "req_total" + http2xx = "http_2xx" + http3xx = "http_3xx" + http4xx = "http_4xx" + http5xx = "http_5xx" + httpOtherStatus = "http_other_status" + rt = "rt" + upsReq = "ups_req" + upsRT = "ups_rt" + upsTries = "ups_tries" + http200 = "http_200" + http206 = "http_206" + http302 = "http_302" + http304 = "http_304" + http403 = "http_403" + http404 = "http_404" + http416 = "http_416" + http499 = "http_499" + http500 = "http_500" + http502 = "http_502" + http503 = "http_503" + http504 = "http_504" + http508 = "http_508" + httpOtherDetailStatus = "http_other_detail_status" + httpUps4xx = "http_ups_4xx" + httpUps5xx = "http_ups_5xx" +) + +var defaultLineFormat = []string{ + bytesIn, + bytesOut, + connTotal, + reqTotal, + http2xx, + http3xx, + http4xx, + http5xx, + httpOtherStatus, + rt, + upsReq, + upsRT, + upsTries, + http200, + http206, + http302, + http304, + http403, + http404, + http416, + http499, + http500, + http502, + http503, + http504, + http508, + httpOtherDetailStatus, + httpUps4xx, + httpUps5xx, +} + +func newAPIClient(client *http.Client, request web.Request) *apiClient { + return &apiClient{httpClient: client, request: request} +} + +type apiClient struct { + httpClient *http.Client + request web.Request +} + +func (a apiClient) getStatus() (*tengineStatus, error) { + req, err := web.NewHTTPRequest(a.request) + if err != nil { + return nil, fmt.Errorf("error on creating request : %v", err) + } + + resp, err := a.doRequestOK(req) + defer closeBody(resp) + if err != nil { + return nil, err + } + + status, err := parseStatus(resp.Body) + if err != nil { + return nil, fmt.Errorf("error on parsing response : %v", err) + } + + return status, nil +} + +func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { + resp, err := a.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("error on request : %v", err) + } + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned HTTP code %d", req.URL, resp.StatusCode) + } + return resp, nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} + +func parseStatus(r io.Reader) (*tengineStatus, error) { + var status tengineStatus + + s := bufio.NewScanner(r) + for s.Scan() { + m, err := parseStatusLine(s.Text(), defaultLineFormat) + if err != nil { + return nil, err + } + status = append(status, *m) + } + + return &status, nil +} + +func parseStatusLine(line string, lineFormat []string) (*metric, error) { + parts := strings.Split(line, ",") + + // NOTE: only default line format is supported + // TODO: custom line format? + // www.example.com,127.0.0.1:80,162,6242,1,1,1,0,0,0,0,10,1,10,1.... + i := findFirstInt(parts) + if i == -1 { + return nil, fmt.Errorf("invalid line : %s", line) + } + if len(parts[i:]) != len(lineFormat) { + return nil, fmt.Errorf("invalid line length, got %d, expected %d, line : %s", + len(parts[i:]), len(lineFormat), line) + } + + // skip "$host,$server_addr:$server_port" + parts = parts[i:] + + var m metric + for i, key := range lineFormat { + value := mustParseInt(parts[i]) + switch key { + default: + return nil, fmt.Errorf("unknown line format key: %s", key) + case bytesIn: + m.BytesIn = value + case bytesOut: + m.BytesOut = value + case connTotal: + m.ConnTotal = value + case reqTotal: + m.ReqTotal = value + case http2xx: + m.HTTP2xx = value + case http3xx: + m.HTTP3xx = value + case http4xx: + m.HTTP4xx = value + case http5xx: + m.HTTP5xx = value + case httpOtherStatus: + m.HTTPOtherStatus = value + case rt: + m.RT = value + case upsReq: + m.UpsReq = value + case upsRT: + m.UpsRT = value + case upsTries: + m.UpsTries = value + case http200: + m.HTTP200 = value + case http206: + m.HTTP206 = value + case http302: + m.HTTP302 = value + case http304: + m.HTTP304 = value + case http403: + m.HTTP403 = value + case http404: + m.HTTP404 = value + case http416: + m.HTTP416 = value + case http499: + m.HTTP499 = value + case http500: + m.HTTP500 = value + case http502: + m.HTTP502 = value + case http503: + m.HTTP503 = value + case http504: + m.HTTP504 = value + case http508: + m.HTTP508 = value + case httpOtherDetailStatus: + m.HTTPOtherDetailStatus = value + case httpUps4xx: + m.HTTPUps4xx = value + case httpUps5xx: + m.HTTPUps5xx = value + } + } + return &m, nil +} + +func findFirstInt(s []string) int { + for i, v := range s { + _, err := strconv.ParseInt(v, 10, 64) + if err != nil { + continue + } + return i + } + return -1 +} + +func mustParseInt(value string) *int64 { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + panic(err) + } + + return &v +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/charts.go b/src/go/collectors/go.d.plugin/modules/tengine/charts.go new file mode 100644 index 00000000000000..c929a3183ca962 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/charts.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Dims is an alias for module.Dims + Dims = module.Dims +) + +var charts = Charts{ + { + ID: "bandwidth_total", + Title: "Bandwidth", + Units: "B/s", + Fam: "bandwidth", + Ctx: "tengine.bandwidth_total", + Type: module.Area, + Dims: Dims{ + {ID: "bytes_in", Name: "in", Algo: module.Incremental}, + {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: -1}, + }, + }, + { + ID: "connections_total", + Title: "Connections", + Units: "connections/s", + Fam: "connections", + Ctx: "tengine.connections_total", + Dims: Dims{ + {ID: "conn_total", Name: "accepted", Algo: module.Incremental}, + }, + }, + { + ID: "requests_total", + Title: "Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "tengine.requests_total", + Dims: Dims{ + {ID: "req_total", Name: "processed", Algo: module.Incremental}, + }, + }, + { + ID: "requests_per_response_code_family_total", + Title: "Requests Per Response Code Family", + Units: "requests/s", + Fam: "requests", + Ctx: "tengine.requests_per_response_code_family_total", + Type: module.Stacked, + Dims: Dims{ + {ID: "http_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "http_5xx", Name: "5xx", Algo: module.Incremental}, + {ID: "http_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "http_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_other_status", Name: "other", Algo: module.Incremental}, + }, + }, + { + ID: "requests_per_response_code_detailed_total", + Title: "Requests Per Response Code Detailed", + Units: "requests/s", + Ctx: "tengine.requests_per_response_code_detailed_total", + Fam: "requests", + Type: module.Stacked, + Dims: Dims{ + {ID: "http_200", Name: "200", Algo: module.Incremental}, + {ID: "http_206", Name: "206", Algo: module.Incremental}, + {ID: "http_302", Name: "302", Algo: module.Incremental}, + {ID: "http_304", Name: "304", Algo: module.Incremental}, + {ID: "http_403", Name: "403", Algo: module.Incremental}, + {ID: "http_404", Name: "404", Algo: module.Incremental}, + {ID: "http_416", Name: "419", Algo: module.Incremental}, + {ID: "http_499", Name: "499", Algo: module.Incremental}, + {ID: "http_500", Name: "500", Algo: module.Incremental}, + {ID: "http_502", Name: "502", Algo: module.Incremental}, + {ID: "http_503", Name: "503", Algo: module.Incremental}, + {ID: "http_504", Name: "504", Algo: module.Incremental}, + {ID: "http_508", Name: "508", Algo: module.Incremental}, + {ID: "http_other_detail_status", Name: "other", Algo: module.Incremental}, + }, + }, + { + ID: "requests_upstream_total", + Title: "Number Of Requests Calling For Upstream", + Units: "requests/s", + Fam: "upstream", + Ctx: "tengine.requests_upstream_total", + Dims: Dims{ + {ID: "ups_req", Name: "requests", Algo: module.Incremental}, + }, + }, + { + ID: "tries_upstream_total", + Title: "Number Of Times Calling For Upstream", + Units: "calls/s", + Fam: "upstream", + Ctx: "tengine.tries_upstream_total", + Dims: Dims{ + {ID: "ups_tries", Name: "calls", Algo: module.Incremental}, + }, + }, + { + ID: "requests_upstream_per_response_code_family_total", + Title: "Upstream Requests Per Response Code Family", + Units: "requests/s", + Fam: "upstream", + Type: module.Stacked, + Ctx: "tengine.requests_upstream_per_response_code_family_total", + Dims: Dims{ + {ID: "http_ups_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "http_ups_5xx", Name: "5xx", Algo: module.Incremental}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/collect.go b/src/go/collectors/go.d.plugin/modules/tengine/collect.go new file mode 100644 index 00000000000000..423f343d448347 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/collect.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import ( + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func (t *Tengine) collect() (map[string]int64, error) { + status, err := t.apiClient.getStatus() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + for _, m := range *status { + for k, v := range stm.ToMap(m) { + mx[k] += v + } + } + return mx, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/config_schema.json b/src/go/collectors/go.d.plugin/modules/tengine/config_schema.json new file mode 100644 index 00000000000000..30958bb1b4677e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/tengine job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/integrations/tengine.md b/src/go/collectors/go.d.plugin/modules/tengine/integrations/tengine.md new file mode 100644 index 00000000000000..c444d5cae91b2d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/integrations/tengine.md @@ -0,0 +1,232 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/tengine/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/tengine/metadata.yaml" +sidebar_label: "Tengine" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Tengine + + +<img src="https://netdata.cloud/img/tengine.jpeg" width="150"/> + + +Plugin: go.d.plugin +Module: tengine + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Tengine servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Tengine instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| tengine.bandwidth_total | in, out | B/s | +| tengine.connections_total | accepted | connections/s | +| tengine.requests_total | processed | requests/s | +| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s | +| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s | +| tengine.requests_upstream_total | requests | requests/s | +| tengine.tries_upstream_total | calls | calls/s | +| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable ngx_http_reqstat_module module. + +To enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html). +The default line format is the only supported format. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/tengine.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/tengine.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1/us | yes | +| timeout | HTTP request timeout. | 2 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/us + +``` +</details> + +##### HTTP authentication + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/us + username: foo + password: bar + +``` +</details> + +##### HTTPS with self-signed certificate + +Tengine with enabled HTTPS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: https://127.0.0.1/us + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1/us + + - name: remote + url: http://203.0.113.10/us + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m tengine + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/tengine/metadata.yaml b/src/go/collectors/go.d.plugin/modules/tengine/metadata.yaml new file mode 100644 index 00000000000000..b0778c9fc6119d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/metadata.yaml @@ -0,0 +1,245 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-tengine + plugin_name: go.d.plugin + module_name: tengine + monitored_instance: + name: Tengine + link: https://tengine.taobao.org/ + icon_filename: tengine.jpeg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - tengine + - web + - webserver + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Tengine servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable ngx_http_reqstat_module module. + description: | + To enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html). + The default line format is the only supported format. + configuration: + file: + name: go.d/tengine.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1/us + required: true + - name: timeout + description: HTTP request timeout. + default_value: 2 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1/us + - name: HTTP authentication + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1/us + username: foo + password: bar + - name: HTTPS with self-signed certificate + description: Tengine with enabled HTTPS and self-signed certificate. + config: | + jobs: + - name: local + url: https://127.0.0.1/us + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1/us + + - name: remote + url: http://203.0.113.10/us + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: tengine.bandwidth_total + description: Bandwidth + unit: B/s + chart_type: area + dimensions: + - name: in + - name: out + - name: tengine.connections_total + description: Connections + unit: connections/s + chart_type: line + dimensions: + - name: accepted + - name: tengine.requests_total + description: Requests + unit: requests/s + chart_type: line + dimensions: + - name: processed + - name: tengine.requests_per_response_code_family_total + description: Requests Per Response Code Family + unit: requests/s + chart_type: stacked + dimensions: + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: other + - name: tengine.requests_per_response_code_detailed_total + description: Requests Per Response Code Detailed + unit: requests/s + chart_type: stacked + dimensions: + - name: "200" + - name: "206" + - name: "302" + - name: "304" + - name: "403" + - name: "404" + - name: "419" + - name: "499" + - name: "500" + - name: "502" + - name: "503" + - name: "504" + - name: "508" + - name: other + - name: tengine.requests_upstream_total + description: Number Of Requests Calling For Upstream + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: tengine.tries_upstream_total + description: Number Of Times Calling For Upstream + unit: calls/s + chart_type: line + dimensions: + - name: calls + - name: tengine.requests_upstream_per_response_code_family_total + description: Upstream Requests Per Response Code Family + unit: requests/s + chart_type: stacked + dimensions: + - name: 4xx + - name: 5xx diff --git a/src/go/collectors/go.d.plugin/modules/tengine/metrics.go b/src/go/collectors/go.d.plugin/modules/tengine/metrics.go new file mode 100644 index 00000000000000..425559479b2a8f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/metrics.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +/* +http://tengine.taobao.org/document/http_reqstat.html + +bytes_in total number of bytes received from client +bytes_out total number of bytes sent to client +conn_total total number of accepted connections +req_total total number of processed requests +http_2xx total number of 2xx requests +http_3xx total number of 3xx requests +http_4xx total number of 4xx requests +http_5xx total number of 5xx requests +http_other_status total number of other requests +rt accumulation or rt +ups_req total number of requests calling for upstream +ups_rt accumulation or upstream rt +ups_tries total number of times calling for upstream +http_200 total number of 200 requests +http_206 total number of 206 requests +http_302 total number of 302 requests +http_304 total number of 304 requests +http_403 total number of 403 requests +http_404 total number of 404 requests +http_416 total number of 416 requests +http_499 total number of 499 requests +http_500 total number of 500 requests +http_502 total number of 502 requests +http_503 total number of 503 requests +http_504 total number of 504 requests +http_508 total number of 508 requests +http_other_detail_status total number of requests of other status codes +http_ups_4xx total number of requests of upstream 4xx +http_ups_5xx total number of requests of upstream 5xx +*/ + +type ( + tengineStatus []metric + + metric struct { + Host string + ServerAddress string + BytesIn *int64 `stm:"bytes_in"` + BytesOut *int64 `stm:"bytes_out"` + ConnTotal *int64 `stm:"conn_total"` + ReqTotal *int64 `stm:"req_total"` + HTTP2xx *int64 `stm:"http_2xx"` + HTTP3xx *int64 `stm:"http_3xx"` + HTTP4xx *int64 `stm:"http_4xx"` + HTTP5xx *int64 `stm:"http_5xx"` + HTTPOtherStatus *int64 `stm:"http_other_status"` + RT *int64 `stm:"rt"` + UpsReq *int64 `stm:"ups_req"` + UpsRT *int64 `stm:"ups_rt"` + UpsTries *int64 `stm:"ups_tries"` + HTTP200 *int64 `stm:"http_200"` + HTTP206 *int64 `stm:"http_206"` + HTTP302 *int64 `stm:"http_302"` + HTTP304 *int64 `stm:"http_304"` + HTTP403 *int64 `stm:"http_403"` + HTTP404 *int64 `stm:"http_404"` + HTTP416 *int64 `stm:"http_416"` + HTTP499 *int64 `stm:"http_499"` + HTTP500 *int64 `stm:"http_500"` + HTTP502 *int64 `stm:"http_502"` + HTTP503 *int64 `stm:"http_503"` + HTTP504 *int64 `stm:"http_504"` + HTTP508 *int64 `stm:"http_508"` + HTTPOtherDetailStatus *int64 `stm:"http_other_detail_status"` + HTTPUps4xx *int64 `stm:"http_ups_4xx"` + HTTPUps5xx *int64 `stm:"http_ups_5xx"` + } +) diff --git a/src/go/collectors/go.d.plugin/modules/tengine/tengine.go b/src/go/collectors/go.d.plugin/modules/tengine/tengine.go new file mode 100644 index 00000000000000..169b390abab47c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/tengine.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("tengine", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +const ( + defaultURL = "http://127.0.0.1/us" + defaultHTTPTimeout = time.Second * 2 +) + +// New creates Tengine with default values. +func New() *Tengine { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: defaultURL, + }, + Client: web.Client{ + Timeout: web.Duration{Duration: defaultHTTPTimeout}, + }, + }, + } + return &Tengine{Config: config} +} + +// Config is the Tengine module configuration. +type Config struct { + web.HTTP `yaml:",inline"` +} + +// Tengine Tengine module. +type Tengine struct { + module.Base + Config `yaml:",inline"` + + apiClient *apiClient +} + +// Cleanup makes cleanup. +func (Tengine) Cleanup() {} + +// Init makes initialization. +func (t *Tengine) Init() bool { + if t.URL == "" { + t.Error("URL not set") + return false + } + + client, err := web.NewHTTPClient(t.Client) + if err != nil { + t.Errorf("error on creating http client : %v", err) + return false + } + + t.apiClient = newAPIClient(client, t.Request) + + t.Debugf("using URL: %s", t.URL) + t.Debugf("using timeout: %s", t.Timeout.Duration) + return true +} + +// Check makes check +func (t *Tengine) Check() bool { + return len(t.Collect()) > 0 +} + +// Charts returns Charts. +func (t Tengine) Charts() *module.Charts { + return charts.Copy() +} + +// Collect collects metrics. +func (t *Tengine) Collect() map[string]int64 { + mx, err := t.collect() + + if err != nil { + t.Error(err) + return nil + } + + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/tengine_test.go b/src/go/collectors/go.d.plugin/modules/tengine/tengine_test.go new file mode 100644 index 00000000000000..04fe5f9e7fcc79 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/tengine_test.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tengine + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testStatusData, _ = os.ReadFile("testdata/status.txt") +) + +func TestTengine_Cleanup(t *testing.T) { New().Cleanup() } + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) + assert.Equal(t, defaultURL, job.URL) + assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +} + +func TestTengine_Init(t *testing.T) { + job := New() + + require.True(t, job.Init()) + assert.NotNil(t, job.apiClient) +} + +func TestTengine_Check(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.True(t, job.Check()) +} + +func TestTengine_CheckNG(t *testing.T) { + job := New() + + job.URL = "http://127.0.0.1:38001/us" + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestTengine_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } + +func TestTengine_Collect(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testStatusData) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + require.True(t, job.Check()) + + expected := map[string]int64{ + "bytes_in": 5944, + "bytes_out": 20483, + "conn_total": 354, + "http_200": 1536, + "http_206": 0, + "http_2xx": 1536, + "http_302": 43, + "http_304": 0, + "http_3xx": 50, + "http_403": 1, + "http_404": 75, + "http_416": 0, + "http_499": 0, + "http_4xx": 80, + "http_500": 0, + "http_502": 1, + "http_503": 0, + "http_504": 0, + "http_508": 0, + "http_5xx": 1, + "http_other_detail_status": 11, + "http_other_status": 0, + "http_ups_4xx": 26, + "http_ups_5xx": 1, + "req_total": 1672, + "rt": 1339, + "ups_req": 268, + "ups_rt": 644, + "ups_tries": 268, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestTengine_InvalidData(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and goodbye")) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} + +func TestTengine_404(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + job := New() + job.URL = ts.URL + require.True(t, job.Init()) + assert.False(t, job.Check()) +} diff --git a/src/go/collectors/go.d.plugin/modules/tengine/testdata/status.txt b/src/go/collectors/go.d.plugin/modules/tengine/testdata/status.txt new file mode 100644 index 00000000000000..dff2ec2d66f9bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/tengine/testdata/status.txt @@ -0,0 +1,3 @@ +100.127.0.91,100.127.0.91:80,1594,2181,6,7,7,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +127.0.0.1,127.0.0.1:80,4350,18302,58,58,58,0,0,0,0,0,0,0,0,58,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +,0,0,290,1607,1471,50,80,1,0,1339,268,644,268,1471,0,43,0,1,75,0,0,0,1,0,0,0,11,26,1 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/traefik/README.md b/src/go/collectors/go.d.plugin/modules/traefik/README.md new file mode 120000 index 00000000000000..da5abad23c5aa5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/README.md @@ -0,0 +1 @@ +integrations/traefik.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/traefik/charts.go b/src/go/collectors/go.d.plugin/modules/traefik/charts.go new file mode 100644 index 00000000000000..a1edb7d4e8538f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/charts.go @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package traefik + +import ( + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +var chartTmplEntrypointRequests = module.Chart{ + ID: "entrypoint_requests_%s_%s", + Title: "Processed HTTP requests on <code>%s</code> entrypoint (protocol <code>%s</code>)", + Units: "requests/s", + Fam: "entrypoint %s %s", + Ctx: "traefik.entrypoint_requests", + Type: module.Stacked, + Dims: module.Dims{ + {ID: prefixEntrypointRequests + "%s_%s_1xx", Name: "1xx", Algo: module.Incremental}, + {ID: prefixEntrypointRequests + "%s_%s_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: prefixEntrypointRequests + "%s_%s_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: prefixEntrypointRequests + "%s_%s_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: prefixEntrypointRequests + "%s_%s_5xx", Name: "5xx", Algo: module.Incremental}, + }, +} + +var chartTmplEntrypointRequestDuration = module.Chart{ + ID: "entrypoint_request_duration_%s_%s", + Title: "Average HTTP request processing time on <code>%s</code> entrypoint (protocol <code>%s</code>)", + Units: "milliseconds", + Fam: "entrypoint %s %s", + Ctx: "traefik.entrypoint_request_duration_average", + Type: module.Stacked, + Dims: module.Dims{ + {ID: prefixEntrypointReqDurAvg + "%s_%s_1xx", Name: "1xx"}, + {ID: prefixEntrypointReqDurAvg + "%s_%s_2xx", Name: "2xx"}, + {ID: prefixEntrypointReqDurAvg + "%s_%s_3xx", Name: "3xx"}, + {ID: prefixEntrypointReqDurAvg + "%s_%s_4xx", Name: "4xx"}, + {ID: prefixEntrypointReqDurAvg + "%s_%s_5xx", Name: "5xx"}, + }, +} + +var chartTmplEntrypointOpenConnections = module.Chart{ + ID: "entrypoint_open_connections_%s_%s", + Title: "Open connections on <code>%s</code> entrypoint (protocol <code>%s</code>)", + Units: "connections", + Fam: "entrypoint %s %s", + Ctx: "traefik.entrypoint_open_connections", + Type: module.Stacked, +} + +func newChartEntrypointRequests(entrypoint, proto string) *module.Chart { + return newEntrypointChart(chartTmplEntrypointRequests, entrypoint, proto) +} + +func newChartEntrypointRequestDuration(entrypoint, proto string) *module.Chart { + return newEntrypointChart(chartTmplEntrypointRequestDuration, entrypoint, proto) +} + +func newChartEntrypointOpenConnections(entrypoint, proto string) *module.Chart { + return newEntrypointChart(chartTmplEntrypointOpenConnections, entrypoint, proto) +} + +func newEntrypointChart(tmpl module.Chart, entrypoint, proto string) *module.Chart { + chart := tmpl.Copy() + chart.ID = fmt.Sprintf(chart.ID, entrypoint, proto) + chart.Title = fmt.Sprintf(chart.Title, entrypoint, proto) + chart.Fam = fmt.Sprintf(chart.Fam, entrypoint, proto) + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, entrypoint, proto) + } + return chart +} diff --git a/src/go/collectors/go.d.plugin/modules/traefik/collect.go b/src/go/collectors/go.d.plugin/modules/traefik/collect.go new file mode 100644 index 00000000000000..c8df960ca74ce2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/collect.go @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package traefik + +import ( + "errors" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricEntrypointRequestsTotal = "traefik_entrypoint_requests_total" + metricEntrypointRequestDurationSecondsSum = "traefik_entrypoint_request_duration_seconds_sum" + metricEntrypointRequestDurationSecondsCount = "traefik_entrypoint_request_duration_seconds_count" + metricEntrypointOpenConnections = "traefik_entrypoint_open_connections" +) + +const ( + prefixEntrypointRequests = "entrypoint_requests_" + prefixEntrypointReqDurAvg = "entrypoint_request_duration_average_" + prefixEntrypointOpenConn = "entrypoint_open_connections_" +) + +func isTraefikMetrics(pms prometheus.Series) bool { + for _, pm := range pms { + if strings.HasPrefix(pm.Name(), "traefik_") { + return true + } + } + return false +} + +func (t *Traefik) collect() (map[string]int64, error) { + pms, err := t.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if t.checkMetrics && !isTraefikMetrics(pms) { + return nil, errors.New("unexpected metrics (not Traefik)") + } + t.checkMetrics = false + + mx := make(map[string]int64) + + t.collectEntrypointRequestsTotal(mx, pms) + t.collectEntrypointRequestDuration(mx, pms) + t.collectEntrypointOpenConnections(mx, pms) + t.updateCodeClassMetrics(mx) + + return mx, nil +} + +func (t *Traefik) collectEntrypointRequestsTotal(mx map[string]int64, pms prometheus.Series) { + if pms = pms.FindByName(metricEntrypointRequestsTotal); pms.Len() == 0 { + return + } + + for _, pm := range pms { + code := pm.Labels.Get("code") + ep := pm.Labels.Get("entrypoint") + proto := pm.Labels.Get("protocol") + codeClass := getCodeClass(code) + if code == "" || ep == "" || proto == "" || codeClass == "" { + continue + } + + key := prefixEntrypointRequests + ep + "_" + proto + "_" + codeClass + mx[key] += int64(pm.Value) + + id := ep + "_" + proto + ce := t.cacheGetOrPutEntrypoint(id) + if ce.requests == nil { + chart := newChartEntrypointRequests(ep, proto) + ce.requests = chart + if err := t.Charts().Add(chart); err != nil { + t.Warning(err) + } + } + } +} + +func (t *Traefik) collectEntrypointRequestDuration(mx map[string]int64, pms prometheus.Series) { + if pms = pms.FindByNames( + metricEntrypointRequestDurationSecondsCount, + metricEntrypointRequestDurationSecondsSum, + ); pms.Len() == 0 { + return + } + + for _, pm := range pms { + code := pm.Labels.Get("code") + ep := pm.Labels.Get("entrypoint") + proto := pm.Labels.Get("protocol") + codeClass := getCodeClass(code) + if code == "" || ep == "" || proto == "" || codeClass == "" { + continue + } + + id := ep + "_" + proto + ce := t.cacheGetOrPutEntrypoint(id) + v := ce.reqDurData[codeClass] + if pm.Name() == metricEntrypointRequestDurationSecondsSum { + v.cur.secs += pm.Value + } else { + v.cur.reqs += pm.Value + } + ce.reqDurData[codeClass] = v + } + + for id, ce := range t.cache.entrypoints { + if ce.reqDur == nil { + chart := newChartEntrypointRequestDuration(ce.name, ce.proto) + ce.reqDur = chart + if err := t.Charts().Add(chart); err != nil { + t.Warning(err) + } + } + for codeClass, v := range ce.reqDurData { + secs, reqs, seen := v.cur.secs-v.prev.secs, v.cur.reqs-v.prev.reqs, v.seen + v.prev.secs, v.prev.reqs, v.seen = v.cur.secs, v.cur.reqs, true + v.cur.secs, v.cur.reqs = 0, 0 + ce.reqDurData[codeClass] = v + + key := prefixEntrypointReqDurAvg + id + "_" + codeClass + if secs <= 0 || reqs <= 0 || !seen { + mx[key] = 0 + } else { + mx[key] = int64(secs * 1000 / reqs) + } + } + } +} + +func (t *Traefik) collectEntrypointOpenConnections(mx map[string]int64, pms prometheus.Series) { + if pms = pms.FindByName(metricEntrypointOpenConnections); pms.Len() == 0 { + return + } + + for _, pm := range pms { + method := pm.Labels.Get("method") + ep := pm.Labels.Get("entrypoint") + proto := pm.Labels.Get("protocol") + if method == "" || ep == "" || proto == "" { + continue + } + + key := prefixEntrypointOpenConn + ep + "_" + proto + "_" + method + mx[key] += int64(pm.Value) + + id := ep + "_" + proto + ce := t.cacheGetOrPutEntrypoint(id) + if ce.openConn == nil { + chart := newChartEntrypointOpenConnections(ep, proto) + ce.openConn = chart + if err := t.Charts().Add(chart); err != nil { + t.Warning(err) + } + } + + if !ce.openConnMethods[method] { + ce.openConnMethods[method] = true + dim := &module.Dim{ID: key, Name: method} + if err := ce.openConn.AddDim(dim); err != nil { + t.Warning(err) + } + } + } +} + +var httpRespCodeClasses = []string{"1xx", "2xx", "3xx", "4xx", "5xx"} + +func (t Traefik) updateCodeClassMetrics(mx map[string]int64) { + for id, ce := range t.cache.entrypoints { + if ce.requests != nil { + for _, c := range httpRespCodeClasses { + key := prefixEntrypointRequests + id + "_" + c + mx[key] += 0 + } + } + if ce.reqDur != nil { + for _, c := range httpRespCodeClasses { + key := prefixEntrypointReqDurAvg + id + "_" + c + mx[key] += 0 + } + } + } +} + +func getCodeClass(code string) string { + if len(code) != 3 { + return "" + } + return string(code[0]) + "xx" +} + +func (t *Traefik) cacheGetOrPutEntrypoint(id string) *cacheEntrypoint { + if _, ok := t.cache.entrypoints[id]; !ok { + name, proto := id, id + if idx := strings.LastIndexByte(id, '_'); idx != -1 { + name, proto = id[:idx], id[idx+1:] + } + t.cache.entrypoints[id] = &cacheEntrypoint{ + name: name, + proto: proto, + reqDurData: make(map[string]cacheEntrypointReqDur), + openConnMethods: make(map[string]bool), + } + } + return t.cache.entrypoints[id] +} diff --git a/src/go/collectors/go.d.plugin/modules/traefik/config_schema.json b/src/go/collectors/go.d.plugin/modules/traefik/config_schema.json new file mode 100644 index 00000000000000..0596ef83b968a3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/traefik job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/traefik/init.go b/src/go/collectors/go.d.plugin/modules/traefik/init.go new file mode 100644 index 00000000000000..e96ba0b7c8ac8d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/init.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package traefik + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (t Traefik) validateConfig() error { + if t.URL == "" { + return errors.New("'url' is not set") + } + return nil +} + +func (t Traefik) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(t.Client) + if err != nil { + return nil, err + } + + prom := prometheus.NewWithSelector(httpClient, t.Request, sr) + return prom, nil +} + +var sr, _ = selector.Expr{ + Allow: []string{ + metricEntrypointRequestDurationSecondsSum, + metricEntrypointRequestDurationSecondsCount, + metricEntrypointRequestsTotal, + metricEntrypointOpenConnections, + }, +}.Parse() diff --git a/src/go/collectors/go.d.plugin/modules/traefik/integrations/traefik.md b/src/go/collectors/go.d.plugin/modules/traefik/integrations/traefik.md new file mode 100644 index 00000000000000..b5405067e72a54 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/integrations/traefik.md @@ -0,0 +1,211 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/traefik/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/traefik/metadata.yaml" +sidebar_label: "Traefik" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Traefik + + +<img src="https://netdata.cloud/img/traefik.svg" width="150"/> + + +Plugin: go.d.plugin +Module: traefik + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Traefik servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per entrypoint, protocol + +These metrics refer to the endpoint. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s | +| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds | +| traefik.entrypoint_open_connections | a dimension per HTTP method | connections | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable built-in Prometheus exporter + +To enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation. + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/traefik.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/traefik.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>All options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8082/metrics | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8082/metrics + +``` +</details> + +##### Basic HTTP auth + +Local server with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8082/metrics + username: foo + password: bar + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + http://127.0.0.1:8082/metrics + + - name: remote + http://192.0.2.0:8082/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m traefik + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/traefik/metadata.yaml b/src/go/collectors/go.d.plugin/modules/traefik/metadata.yaml new file mode 100644 index 00000000000000..7fe182ea3f8fee --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/metadata.yaml @@ -0,0 +1,196 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-traefik + plugin_name: go.d.plugin + module_name: traefik + monitored_instance: + name: Traefik + link: Traefik + icon_filename: traefik.svg + categories: + - data-collection.web-servers-and-web-proxies + keywords: + - traefik + - proxy + - webproxy + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Traefik servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable built-in Prometheus exporter + description: | + To enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation. + configuration: + file: + name: go.d/traefik.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: All options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8082/metrics + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8082/metrics + - name: Basic HTTP auth + description: Local server with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8082/metrics + username: foo + password: bar + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + http://127.0.0.1:8082/metrics + + - name: remote + http://192.0.2.0:8082/metrics + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: entrypoint, protocol + description: These metrics refer to the endpoint. + labels: [] + metrics: + - name: traefik.entrypoint_requests + description: Processed HTTP requests + unit: requests/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: traefik.entrypoint_request_duration_average + description: Average HTTP request processing time + unit: milliseconds + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: traefik.entrypoint_open_connections + description: Open connections + unit: connections + chart_type: stacked + dimensions: + - name: a dimension per HTTP method diff --git a/src/go/collectors/go.d.plugin/modules/traefik/testdata/v2.2.1/metrics.txt b/src/go/collectors/go.d.plugin/modules/traefik/testdata/v2.2.1/metrics.txt new file mode 100644 index 00000000000000..947a365c08ca04 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/testdata/v2.2.1/metrics.txt @@ -0,0 +1,1170 @@ +# HELP traefik_entrypoint_open_connections How many open connections exist on an entrypoint, partitioned by method and protocol. +# TYPE traefik_entrypoint_open_connections gauge +traefik_entrypoint_open_connections{entrypoint="traefik",method="GET",protocol="http"} 1 +traefik_entrypoint_open_connections{entrypoint="web",method="DELETE",protocol="http"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="GET",protocol="http"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="GET",protocol="websocket"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="HEAD",protocol="http"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="OPTIONS",protocol="http"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="PATCH",protocol="http"} 0 +traefik_entrypoint_open_connections{entrypoint="web",method="POST",protocol="http"} 4 +traefik_entrypoint_open_connections{entrypoint="web",method="PUT",protocol="http"} 0 +# HELP traefik_entrypoint_request_duration_seconds How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method. +# TYPE traefik_entrypoint_request_duration_seconds histogram +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 2.839193e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 2.840809e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 2.840813e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 2.840813e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="5"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 5284.212647182563 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="GET",protocol="http"} 2.840814e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.1"} 6.77133599e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.2"} 7.53631104e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.3"} 7.72627022e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.4"} 7.79474876e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.5"} 7.81903287e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.8"} 7.8476649e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.9"} 7.85122472e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1"} 7.85466352e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1.1"} 7.85699767e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1.2"} 7.85892303e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="5"} 7.86979178e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="+Inf"} 7.87262719e+08 +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="GET",protocol="http"} 3.573930237570157e+07 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="GET",protocol="http"} 7.87262719e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="5"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 6311 +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="HEAD",protocol="http"} 7.36609426899999 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="HEAD",protocol="http"} 6311 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 5617 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 5828 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 5925 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 5968 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 5996 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 6027 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 6034 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1"} 6035 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 6039 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 6039 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="5"} 6045 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 6047 +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="PATCH",protocol="http"} 376.1973577400002 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="PATCH",protocol="http"} 6047 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.1"} 1.0407824e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.2"} 3.0289279e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.3"} 4.9925366e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.4"} 5.7915399e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.5"} 6.292114e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.8"} 6.826269e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.9"} 6.8979431e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1"} 6.9399071e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1.1"} 6.9717772e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1.2"} 6.9953534e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="5"} 7.0917859e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="+Inf"} 7.1907943e+07 +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="POST",protocol="http"} 2.4994444082210593e+07 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="POST",protocol="http"} 7.1907943e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.1"} 1.75296233e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.2"} 1.75817375e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.3"} 1.76334316e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.4"} 1.76415232e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.5"} 1.76453514e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.8"} 1.76535963e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.9"} 1.76564373e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1"} 1.76584473e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1.1"} 1.76599247e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1.2"} 1.76612342e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="5"} 1.76778007e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="+Inf"} 1.76862498e+08 +traefik_entrypoint_request_duration_seconds_sum{code="201",entrypoint="web",method="POST",protocol="http"} 3.734233299392699e+06 +traefik_entrypoint_request_duration_seconds_count{code="201",entrypoint="web",method="POST",protocol="http"} 1.76862498e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 7980 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 8309 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 8412 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 8443 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 8451 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 8528 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 8568 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1"} 8621 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 8730 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 8886 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="5"} 10410 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 10446 +traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="DELETE",protocol="http"} 4241.144239078025 +traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="DELETE",protocol="http"} 10446 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 29818 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 30290 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 30456 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 30508 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 30534 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 30563 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 30571 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1"} 30578 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 30581 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 30581 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="5"} 30602 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 30606 +traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="PATCH",protocol="http"} 797.362519008993 +traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="PATCH",protocol="http"} 30606 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.1"} 54869 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.2"} 61844 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.3"} 63734 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.4"} 65053 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.5"} 66111 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.8"} 66489 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.9"} 66507 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1"} 66512 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1.1"} 66519 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1.2"} 66526 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="5"} 66554 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="+Inf"} 66555 +traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="POST",protocol="http"} 3518.3602801470365 +traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="POST",protocol="http"} 66555 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.1"} 24769 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.2"} 46802 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.3"} 48080 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.4"} 48611 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.5"} 48903 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.8"} 49321 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.9"} 49412 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1"} 49462 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1.1"} 49518 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1.2"} 49558 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="5"} 49829 +traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 49872 +traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="PUT",protocol="http"} 5950.493801841983 +traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="PUT",protocol="http"} 49872 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.1"} 3037 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.2"} 3039 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.3"} 3040 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.4"} 3040 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.5"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.8"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.9"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1.1"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1.2"} 3041 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="5"} 3043 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="+Inf"} 3046 +traefik_entrypoint_request_duration_seconds_sum{code="206",entrypoint="web",method="GET",protocol="http"} 200.91194297900017 +traefik_entrypoint_request_duration_seconds_count{code="206",entrypoint="web",method="GET",protocol="http"} 3046 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="5"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 35 +traefik_entrypoint_request_duration_seconds_sum{code="206",entrypoint="web",method="HEAD",protocol="http"} 0.03518408899999999 +traefik_entrypoint_request_duration_seconds_count{code="206",entrypoint="web",method="HEAD",protocol="http"} 35 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 2767 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 2770 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 2772 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 2772 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 2772 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 2773 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 2773 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1"} 2774 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 2774 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 2774 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="5"} 2775 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 2775 +traefik_entrypoint_request_duration_seconds_sum{code="207",entrypoint="web",method="DELETE",protocol="http"} 33.959802933999995 +traefik_entrypoint_request_duration_seconds_count{code="207",entrypoint="web",method="DELETE",protocol="http"} 2775 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.1"} 93 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.2"} 101 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.3"} 105 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.4"} 112 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.5"} 120 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.8"} 127 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.9"} 127 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1"} 127 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1.1"} 127 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1.2"} 127 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="5"} 128 +traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="+Inf"} 129 +traefik_entrypoint_request_duration_seconds_sum{code="207",entrypoint="web",method="POST",protocol="http"} 27.57962429700001 +traefik_entrypoint_request_duration_seconds_count{code="207",entrypoint="web",method="POST",protocol="http"} 129 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.1"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.2"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.3"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.4"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.5"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.8"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.9"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1.1"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1.2"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="5"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="+Inf"} 248 +traefik_entrypoint_request_duration_seconds_sum{code="301",entrypoint="web",method="GET",protocol="http"} 0.25649611699999997 +traefik_entrypoint_request_duration_seconds_count{code="301",entrypoint="web",method="GET",protocol="http"} 248 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.1"} 30448 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.2"} 38318 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.3"} 41030 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.4"} 43988 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.5"} 46851 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.8"} 48508 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.9"} 48554 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1"} 48571 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1.1"} 48580 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1.2"} 48587 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="5"} 48619 +traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="+Inf"} 48623 +traefik_entrypoint_request_duration_seconds_sum{code="302",entrypoint="web",method="GET",protocol="http"} 5561.800275933011 +traefik_entrypoint_request_duration_seconds_count{code="302",entrypoint="web",method="GET",protocol="http"} 48623 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.1"} 367383 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.2"} 367384 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.3"} 367385 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.4"} 367385 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.5"} 367386 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.8"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.9"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1.1"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1.2"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="5"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="+Inf"} 367387 +traefik_entrypoint_request_duration_seconds_sum{code="304",entrypoint="web",method="GET",protocol="http"} 418.3746390310068 +traefik_entrypoint_request_duration_seconds_count{code="304",entrypoint="web",method="GET",protocol="http"} 367387 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="5"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 4 +traefik_entrypoint_request_duration_seconds_sum{code="304",entrypoint="web",method="HEAD",protocol="http"} 0.0044282570000000005 +traefik_entrypoint_request_duration_seconds_count{code="304",entrypoint="web",method="HEAD",protocol="http"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="5"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 5 +traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="traefik",method="GET",protocol="http"} 0.0006326610000000001 +traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="traefik",method="GET",protocol="http"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.1"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.2"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.3"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.4"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.5"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.8"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.9"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1.1"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1.2"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="5"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="+Inf"} 8 +traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="GET",protocol="http"} 0.010426270999999999 +traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="GET",protocol="http"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.1"} 42862 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.2"} 43468 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.3"} 43839 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.4"} 43940 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.5"} 43978 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.8"} 44029 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.9"} 44038 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1"} 44049 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1.1"} 44061 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1.2"} 44066 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="5"} 44106 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="+Inf"} 59417 +traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="POST",protocol="http"} 77544.51951296844 +traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="POST",protocol="http"} 59417 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.1"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.2"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.3"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.4"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.5"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.8"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.9"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1.1"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1.2"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="5"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 4757 +traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="PUT",protocol="http"} 7.191891319000009 +traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="PUT",protocol="http"} 4757 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="5"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 2 +traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="DELETE",protocol="http"} 0.0018184479999999999 +traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="DELETE",protocol="http"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.289379e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.2896175e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.2896199e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.2896204e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.2896211e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.2896212e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="5"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="GET",protocol="http"} 25752.359368771624 +traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="GET",protocol="http"} 2.2896213e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="5"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 10 +traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="PATCH",protocol="http"} 0.010515436999999999 +traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="PATCH",protocol="http"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.1"} 927908 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.2"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.3"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.4"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.5"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.8"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.9"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1.1"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1.2"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="5"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="+Inf"} 927912 +traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="POST",protocol="http"} 995.9855624980047 +traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="POST",protocol="http"} 927912 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.1"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.2"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.3"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.4"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.5"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.8"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.9"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1.1"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1.2"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="5"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 75 +traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="PUT",protocol="http"} 0.16541799500000004 +traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="PUT",protocol="http"} 75 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 830 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 830 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 830 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 830 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 830 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="5"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 831 +traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="DELETE",protocol="http"} 9.061551029999986 +traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="DELETE",protocol="http"} 831 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.1"} 216932 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.2"} 217462 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.3"} 217600 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.4"} 217648 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.5"} 217684 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.8"} 217723 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.9"} 217728 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1"} 217739 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1.1"} 217744 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1.2"} 217747 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="5"} 217766 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="+Inf"} 217771 +traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="GET",protocol="http"} 1243.8479915990079 +traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="GET",protocol="http"} 217771 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="5"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 90 +traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="PATCH",protocol="http"} 1.039575084 +traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="PATCH",protocol="http"} 90 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.1"} 658814 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.2"} 667999 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.3"} 668305 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.4"} 668348 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.5"} 668368 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.8"} 668417 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.9"} 668427 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1"} 668436 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1.1"} 668441 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1.2"} 668443 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="5"} 668485 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="+Inf"} 668504 +traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="POST",protocol="http"} 5763.404909136024 +traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="POST",protocol="http"} 668504 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.1"} 387 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.2"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.3"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.4"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.5"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.8"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.9"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1.1"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1.2"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="5"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 388 +traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="PUT",protocol="http"} 1.0210683440000006 +traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="PUT",protocol="http"} 388 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="5"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 3 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="traefik",method="GET",protocol="http"} 0.000172581 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="traefik",method="GET",protocol="http"} 3 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="5"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 5 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="DELETE",protocol="http"} 0.049077042999999994 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="DELETE",protocol="http"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.1"} 1.6708334e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.4431309e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.4897006e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.5060706e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.5158815e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.5319277e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.5348008e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1"} 2.5366706e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.5380618e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.5390269e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="5"} 2.5431782e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.5435602e+07 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="GET",protocol="http"} 1.5730236608823321e+06 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="GET",protocol="http"} 2.5435602e+07 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.1"} 76149 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.2"} 77389 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.3"} 78136 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.4"} 78736 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.5"} 78893 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.8"} 79100 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.9"} 79112 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1"} 79125 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1.1"} 79134 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1.2"} 79137 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="5"} 79137 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="+Inf"} 79137 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="GET",protocol="websocket"} 952.6657687000076 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="GET",protocol="websocket"} 79137 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="5"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 440 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="HEAD",protocol="http"} 0.8076752390000003 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="HEAD",protocol="http"} 440 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="5"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 10 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="PATCH",protocol="http"} 0.106270053 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="PATCH",protocol="http"} 10 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.1"} 11831 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.2"} 11996 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.3"} 12058 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.4"} 12066 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.5"} 12068 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.8"} 12080 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.9"} 12084 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1"} 12086 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1.1"} 12087 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1.2"} 12091 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="5"} 12112 +traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="+Inf"} 12125 +traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="POST",protocol="http"} 354.48999692400014 +traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="POST",protocol="http"} 12125 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.1"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.2"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.3"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.4"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.5"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.8"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.9"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1.1"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1.2"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="5"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="+Inf"} 89 +traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 0.111158589 +traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 1 +traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="PATCH",protocol="http"} 0.000997012 +traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="PATCH",protocol="http"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.1"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.2"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.3"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.4"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.5"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.8"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.9"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1.1"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1.2"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="5"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="+Inf"} 13 +traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="POST",protocol="http"} 0.015701319999999998 +traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="POST",protocol="http"} 13 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.1"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.2"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.3"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.4"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.5"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.8"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.9"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1.1"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1.2"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="5"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 518 +traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="PUT",protocol="http"} 0.7715693390000001 +traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="PUT",protocol="http"} 518 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.12735267e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.12837945e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.12867308e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.12881286e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.12890892e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.12908516e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.12912307e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1"} 2.12915414e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.12918123e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.12920839e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="5"} 2.12981945e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.13012914e+08 +traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="GET",protocol="http"} 1.440885906018625e+06 +traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="GET",protocol="http"} 2.13012914e+08 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 289 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 289 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 289 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 290 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 291 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="5"} 293 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 293 +traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="PATCH",protocol="http"} 8.790643885000003 +traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="PATCH",protocol="http"} 293 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.1"} 180 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.2"} 185 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.3"} 189 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.4"} 191 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.5"} 191 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.8"} 192 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.9"} 192 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1"} 192 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1.1"} 192 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1.2"} 192 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="5"} 194 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="+Inf"} 195 +traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="POST",protocol="http"} 17.934394692999998 +traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="POST",protocol="http"} 195 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.1"} 38126 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.2"} 40054 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.3"} 40533 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.4"} 40866 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.5"} 41024 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.8"} 41282 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.9"} 41337 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1"} 41373 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1.1"} 41399 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1.2"} 41422 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="5"} 41610 +traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 41665 +traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="PUT",protocol="http"} 3606.133672342983 +traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="PUT",protocol="http"} 41665 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.1"} 1.706487e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.2"} 1.7067e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.3"} 1.706726e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.4"} 1.706742e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.5"} 1.706757e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.8"} 1.706779e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.9"} 1.706783e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1"} 1.706789e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1.1"} 1.706791e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1.2"} 1.706797e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="5"} 1.706888e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="+Inf"} 1.706931e+06 +traefik_entrypoint_request_duration_seconds_sum{code="410",entrypoint="web",method="GET",protocol="http"} 5115.734139137677 +traefik_entrypoint_request_duration_seconds_count{code="410",entrypoint="web",method="GET",protocol="http"} 1.706931e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 1 +traefik_entrypoint_request_duration_seconds_sum{code="410",entrypoint="web",method="PATCH",protocol="http"} 0.005254578 +traefik_entrypoint_request_duration_seconds_count{code="410",entrypoint="web",method="PATCH",protocol="http"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 1 +traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="DELETE",protocol="http"} 0.023973863 +traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="DELETE",protocol="http"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.1"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.2"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.3"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.4"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.5"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.8"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.9"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1.1"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1.2"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="5"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="+Inf"} 20 +traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="GET",protocol="http"} 0.039623226 +traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="GET",protocol="http"} 20 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="5"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 26 +traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="PATCH",protocol="http"} 0.083693077 +traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="PATCH",protocol="http"} 26 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.1"} 939 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.2"} 948 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.3"} 953 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.4"} 953 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.5"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.8"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.9"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1.1"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1.2"} 954 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="5"} 955 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="+Inf"} 955 +traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="POST",protocol="http"} 11.256437256000007 +traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="POST",protocol="http"} 955 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.1"} 12620 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.2"} 12624 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.3"} 12627 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.4"} 12627 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.5"} 12627 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.8"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.9"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1.1"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1.2"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="5"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 12628 +traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="PUT",protocol="http"} 30.15632766300003 +traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="PUT",protocol="http"} 12628 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.103905e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.103908e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="5"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_sum{code="429",entrypoint="web",method="GET",protocol="http"} 336.7924126419656 +traefik_entrypoint_request_duration_seconds_count{code="429",entrypoint="web",method="GET",protocol="http"} 2.103909e+06 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.1"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.2"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.3"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.4"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.5"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.8"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.9"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1.1"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1.2"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="5"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="+Inf"} 205 +traefik_entrypoint_request_duration_seconds_sum{code="429",entrypoint="web",method="POST",protocol="http"} 0.027288120999999995 +traefik_entrypoint_request_duration_seconds_count{code="429",entrypoint="web",method="POST",protocol="http"} 205 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.1"} 83 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.2"} 144 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.3"} 168 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.4"} 184 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.5"} 194 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.8"} 231 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.9"} 232 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1"} 234 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1.1"} 235 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1.2"} 235 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="5"} 343 +traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="+Inf"} 1255 +traefik_entrypoint_request_duration_seconds_sum{code="444",entrypoint="web",method="GET",protocol="http"} 29923.69344054194 +traefik_entrypoint_request_duration_seconds_count{code="444",entrypoint="web",method="GET",protocol="http"} 1255 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="+Inf"} 269941 +traefik_entrypoint_request_duration_seconds_sum{code="445",entrypoint="web",method="GET",protocol="http"} 1.6198159394737784e+07 +traefik_entrypoint_request_duration_seconds_count{code="445",entrypoint="web",method="GET",protocol="http"} 269941 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.1"} 499 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.2"} 744 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.3"} 842 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.4"} 918 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.5"} 970 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.8"} 1061 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.9"} 1074 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1"} 1094 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1.1"} 1105 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1.2"} 1132 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="5"} 1884 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="+Inf"} 5075 +traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="GET",protocol="http"} 138388.62840130684 +traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="GET",protocol="http"} 5075 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 2 +traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="PATCH",protocol="http"} 45.061508693 +traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="PATCH",protocol="http"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.1"} 85786 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.2"} 125143 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.3"} 144101 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.4"} 151775 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.5"} 156313 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.8"} 163673 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.9"} 165387 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1"} 166772 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1.1"} 168246 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1.2"} 169461 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="5"} 193067 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="+Inf"} 194455 +traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="POST",protocol="http"} 171588.70865418628 +traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="POST",protocol="http"} 194455 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.1"} 70 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.2"} 79 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.3"} 88 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.4"} 89 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.5"} 92 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.8"} 93 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.9"} 94 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1"} 94 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1.1"} 94 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1.2"} 94 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="5"} 94 +traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 16127 +traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="PUT",protocol="http"} 4.809399570415463e+06 +traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="PUT",protocol="http"} 16127 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 5 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="5"} 7 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 7 +traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="DELETE",protocol="http"} 2.9226568759999996 +traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="DELETE",protocol="http"} 7 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.1"} 4304 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.2"} 4314 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.3"} 4315 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.4"} 4317 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.5"} 4322 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.8"} 4333 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.9"} 4333 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1"} 4333 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1.1"} 4333 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1.2"} 4334 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="5"} 4334 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="+Inf"} 12951 +traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="GET",protocol="http"} 495411.215290646 +traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="GET",protocol="http"} 12951 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 11 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="5"} 12 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 12 +traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="PATCH",protocol="http"} 3.4746266410000004 +traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="PATCH",protocol="http"} 12 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.1"} 321 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.2"} 322 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.3"} 323 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.4"} 323 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.5"} 323 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.8"} 324 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.9"} 325 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1"} 325 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1.1"} 325 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1.2"} 325 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="5"} 339 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2196 +traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="POST",protocol="http"} 112599.76971862414 +traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="POST",protocol="http"} 2196 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.1"} 17 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.2"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.3"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.4"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.5"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.8"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.9"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1.1"} 18 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1.2"} 19 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="5"} 22 +traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 1551 +traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="PUT",protocol="http"} 254492.6350865842 +traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="PUT",protocol="http"} 1551 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="5"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 4 +traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="DELETE",protocol="http"} 0.006532118999999999 +traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="DELETE",protocol="http"} 4 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.1"} 107436 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.2"} 107462 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.3"} 107466 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.4"} 107471 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.5"} 107478 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.8"} 107500 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.9"} 107508 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1"} 107522 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1.1"} 107568 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1.2"} 107586 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="5"} 107931 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="+Inf"} 115170 +traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="GET",protocol="http"} 241715.94925767966 +traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="GET",protocol="http"} 115170 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="5"} 1 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 2 +traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="PATCH",protocol="http"} 27.351390443 +traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="PATCH",protocol="http"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.1"} 902 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.2"} 987 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.3"} 1046 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.4"} 1088 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.5"} 1104 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.8"} 1149 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.9"} 1158 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1"} 1169 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1.1"} 1182 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1.2"} 1197 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="5"} 1400 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2900 +traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="POST",protocol="http"} 1.0039723839193305e+06 +traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="POST",protocol="http"} 2900 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.1"} 36 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.2"} 37 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.3"} 37 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.4"} 37 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.5"} 37 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.8"} 37 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.9"} 38 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1"} 38 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1.1"} 38 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1.2"} 38 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="5"} 38 +traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 40 +traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="PUT",protocol="http"} 32.391189919 +traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="PUT",protocol="http"} 40 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.1"} 72447 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.2"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.3"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.4"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.5"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.8"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.9"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1.1"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1.2"} 72448 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="5"} 72454 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="+Inf"} 72538 +traefik_entrypoint_request_duration_seconds_sum{code="503",entrypoint="web",method="GET",protocol="http"} 2883.984412680031 +traefik_entrypoint_request_duration_seconds_count{code="503",entrypoint="web",method="GET",protocol="http"} 72538 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.1"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.2"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.3"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.4"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.5"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.8"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.9"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1.1"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1.2"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="5"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="+Inf"} 15648 +traefik_entrypoint_request_duration_seconds_sum{code="503",entrypoint="web",method="POST",protocol="http"} 18.386133866 +traefik_entrypoint_request_duration_seconds_count{code="503",entrypoint="web",method="POST",protocol="http"} 15648 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="+Inf"} 8 +traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="GET",protocol="http"} 240.012145339 +traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="GET",protocol="http"} 8 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2 +traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="POST",protocol="http"} 60.003337996 +traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="POST",protocol="http"} 2 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.3"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.4"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.8"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.9"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1.1"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1.2"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="5"} 0 +traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 107 +traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="PUT",protocol="http"} 3683.539644907 +traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="PUT",protocol="http"} 107 +# HELP traefik_entrypoint_requests_total How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method. +# TYPE traefik_entrypoint_requests_total counter +traefik_entrypoint_requests_total{code="200",entrypoint="traefik",method="GET",protocol="http"} 2.840814e+06 +traefik_entrypoint_requests_total{code="200",entrypoint="web",method="GET",protocol="http"} 7.87262719e+08 +traefik_entrypoint_requests_total{code="200",entrypoint="web",method="HEAD",protocol="http"} 6311 +traefik_entrypoint_requests_total{code="200",entrypoint="web",method="PATCH",protocol="http"} 6047 +traefik_entrypoint_requests_total{code="200",entrypoint="web",method="POST",protocol="http"} 7.1907943e+07 +traefik_entrypoint_requests_total{code="201",entrypoint="web",method="POST",protocol="http"} 1.76862498e+08 +traefik_entrypoint_requests_total{code="204",entrypoint="web",method="DELETE",protocol="http"} 10446 +traefik_entrypoint_requests_total{code="204",entrypoint="web",method="PATCH",protocol="http"} 30606 +traefik_entrypoint_requests_total{code="204",entrypoint="web",method="POST",protocol="http"} 66555 +traefik_entrypoint_requests_total{code="204",entrypoint="web",method="PUT",protocol="http"} 49872 +traefik_entrypoint_requests_total{code="206",entrypoint="web",method="GET",protocol="http"} 3046 +traefik_entrypoint_requests_total{code="206",entrypoint="web",method="HEAD",protocol="http"} 35 +traefik_entrypoint_requests_total{code="207",entrypoint="web",method="DELETE",protocol="http"} 2775 +traefik_entrypoint_requests_total{code="207",entrypoint="web",method="POST",protocol="http"} 129 +traefik_entrypoint_requests_total{code="301",entrypoint="web",method="GET",protocol="http"} 248 +traefik_entrypoint_requests_total{code="302",entrypoint="web",method="GET",protocol="http"} 48623 +traefik_entrypoint_requests_total{code="304",entrypoint="web",method="GET",protocol="http"} 367387 +traefik_entrypoint_requests_total{code="304",entrypoint="web",method="HEAD",protocol="http"} 4 +traefik_entrypoint_requests_total{code="400",entrypoint="traefik",method="GET",protocol="http"} 5 +traefik_entrypoint_requests_total{code="400",entrypoint="web",method="GET",protocol="http"} 8 +traefik_entrypoint_requests_total{code="400",entrypoint="web",method="POST",protocol="http"} 59417 +traefik_entrypoint_requests_total{code="400",entrypoint="web",method="PUT",protocol="http"} 4757 +traefik_entrypoint_requests_total{code="401",entrypoint="web",method="DELETE",protocol="http"} 2 +traefik_entrypoint_requests_total{code="401",entrypoint="web",method="GET",protocol="http"} 2.2896213e+07 +traefik_entrypoint_requests_total{code="401",entrypoint="web",method="PATCH",protocol="http"} 10 +traefik_entrypoint_requests_total{code="401",entrypoint="web",method="POST",protocol="http"} 927912 +traefik_entrypoint_requests_total{code="401",entrypoint="web",method="PUT",protocol="http"} 75 +traefik_entrypoint_requests_total{code="403",entrypoint="web",method="DELETE",protocol="http"} 831 +traefik_entrypoint_requests_total{code="403",entrypoint="web",method="GET",protocol="http"} 217771 +traefik_entrypoint_requests_total{code="403",entrypoint="web",method="PATCH",protocol="http"} 90 +traefik_entrypoint_requests_total{code="403",entrypoint="web",method="POST",protocol="http"} 668504 +traefik_entrypoint_requests_total{code="403",entrypoint="web",method="PUT",protocol="http"} 388 +traefik_entrypoint_requests_total{code="404",entrypoint="traefik",method="GET",protocol="http"} 3 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="DELETE",protocol="http"} 5 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="GET",protocol="http"} 2.5435602e+07 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="GET",protocol="websocket"} 79137 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="HEAD",protocol="http"} 440 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="PATCH",protocol="http"} 10 +traefik_entrypoint_requests_total{code="404",entrypoint="web",method="POST",protocol="http"} 12125 +traefik_entrypoint_requests_total{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 89 +traefik_entrypoint_requests_total{code="405",entrypoint="web",method="PATCH",protocol="http"} 1 +traefik_entrypoint_requests_total{code="405",entrypoint="web",method="POST",protocol="http"} 13 +traefik_entrypoint_requests_total{code="405",entrypoint="web",method="PUT",protocol="http"} 518 +traefik_entrypoint_requests_total{code="409",entrypoint="web",method="GET",protocol="http"} 2.13012914e+08 +traefik_entrypoint_requests_total{code="409",entrypoint="web",method="PATCH",protocol="http"} 293 +traefik_entrypoint_requests_total{code="409",entrypoint="web",method="POST",protocol="http"} 195 +traefik_entrypoint_requests_total{code="409",entrypoint="web",method="PUT",protocol="http"} 41665 +traefik_entrypoint_requests_total{code="410",entrypoint="web",method="GET",protocol="http"} 1.706931e+06 +traefik_entrypoint_requests_total{code="410",entrypoint="web",method="PATCH",protocol="http"} 1 +traefik_entrypoint_requests_total{code="422",entrypoint="web",method="DELETE",protocol="http"} 1 +traefik_entrypoint_requests_total{code="422",entrypoint="web",method="GET",protocol="http"} 20 +traefik_entrypoint_requests_total{code="422",entrypoint="web",method="PATCH",protocol="http"} 26 +traefik_entrypoint_requests_total{code="422",entrypoint="web",method="POST",protocol="http"} 955 +traefik_entrypoint_requests_total{code="422",entrypoint="web",method="PUT",protocol="http"} 12628 +traefik_entrypoint_requests_total{code="429",entrypoint="web",method="GET",protocol="http"} 2.103909e+06 +traefik_entrypoint_requests_total{code="429",entrypoint="web",method="POST",protocol="http"} 205 +traefik_entrypoint_requests_total{code="444",entrypoint="web",method="GET",protocol="http"} 1255 +traefik_entrypoint_requests_total{code="445",entrypoint="web",method="GET",protocol="http"} 269941 +traefik_entrypoint_requests_total{code="499",entrypoint="web",method="GET",protocol="http"} 5075 +traefik_entrypoint_requests_total{code="499",entrypoint="web",method="PATCH",protocol="http"} 2 +traefik_entrypoint_requests_total{code="499",entrypoint="web",method="POST",protocol="http"} 194455 +traefik_entrypoint_requests_total{code="499",entrypoint="web",method="PUT",protocol="http"} 16127 +traefik_entrypoint_requests_total{code="500",entrypoint="web",method="DELETE",protocol="http"} 7 +traefik_entrypoint_requests_total{code="500",entrypoint="web",method="GET",protocol="http"} 12951 +traefik_entrypoint_requests_total{code="500",entrypoint="web",method="PATCH",protocol="http"} 12 +traefik_entrypoint_requests_total{code="500",entrypoint="web",method="POST",protocol="http"} 2196 +traefik_entrypoint_requests_total{code="500",entrypoint="web",method="PUT",protocol="http"} 1551 +traefik_entrypoint_requests_total{code="502",entrypoint="web",method="DELETE",protocol="http"} 4 +traefik_entrypoint_requests_total{code="502",entrypoint="web",method="GET",protocol="http"} 115170 +traefik_entrypoint_requests_total{code="502",entrypoint="web",method="PATCH",protocol="http"} 2 +traefik_entrypoint_requests_total{code="502",entrypoint="web",method="POST",protocol="http"} 2900 +traefik_entrypoint_requests_total{code="502",entrypoint="web",method="PUT",protocol="http"} 40 +traefik_entrypoint_requests_total{code="503",entrypoint="web",method="GET",protocol="http"} 72538 +traefik_entrypoint_requests_total{code="503",entrypoint="web",method="POST",protocol="http"} 15648 +traefik_entrypoint_requests_total{code="504",entrypoint="web",method="GET",protocol="http"} 8 +traefik_entrypoint_requests_total{code="504",entrypoint="web",method="POST",protocol="http"} 2 +traefik_entrypoint_requests_total{code="504",entrypoint="web",method="PUT",protocol="http"} 107 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/traefik/traefik.go b/src/go/collectors/go.d.plugin/modules/traefik/traefik.go new file mode 100644 index 00000000000000..a121b023631030 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/traefik.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package traefik + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("traefik", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Traefik { + return &Traefik{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8082/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + }, + + charts: &module.Charts{}, + checkMetrics: true, + cache: &cache{ + entrypoints: make(map[string]*cacheEntrypoint), + }, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type ( + Traefik struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *module.Charts + checkMetrics bool + cache *cache + } + cache struct { + entrypoints map[string]*cacheEntrypoint + } + cacheEntrypoint struct { + name, proto string + requests *module.Chart + reqDur *module.Chart + reqDurData map[string]cacheEntrypointReqDur + openConn *module.Chart + openConnMethods map[string]bool + } + cacheEntrypointReqDur struct { + prev, cur struct{ reqs, secs float64 } + seen bool + } +) + +func (t *Traefik) Init() bool { + if err := t.validateConfig(); err != nil { + t.Errorf("config validation: %v", err) + return false + } + + prom, err := t.initPrometheusClient() + if err != nil { + t.Errorf("prometheus client initialization: %v", err) + return false + } + t.prom = prom + + return true +} + +func (t *Traefik) Check() bool { + return len(t.Collect()) > 0 +} + +func (t *Traefik) Charts() *module.Charts { + return t.charts +} + +func (t *Traefik) Collect() map[string]int64 { + mx, err := t.collect() + if err != nil { + t.Error(err) + return nil + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (Traefik) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/traefik/traefik_test.go b/src/go/collectors/go.d.plugin/modules/traefik/traefik_test.go new file mode 100644 index 00000000000000..c5804b672c84d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/traefik/traefik_test.go @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package traefik + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v221Metrics, _ = os.ReadFile("testdata/v2.2.1/metrics.txt") +) + +func Test_Testdata(t *testing.T) { + for name, data := range map[string][]byte{ + "v2.2.1_Metrics": v221Metrics, + } { + require.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Traefik)(nil), New()) +} + +func TestTraefik_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + config: New().Config, + }, + "fails on unset 'url'": { + wantFail: true, + config: Config{HTTP: web.HTTP{ + Request: web.Request{}, + }}, + }, + "fails on invalid TLSCA": { + wantFail: true, + config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, + }, + }}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rdb := New() + rdb.Config = test.config + + if test.wantFail { + assert.False(t, rdb.Init()) + } else { + assert.True(t, rdb.Init()) + } + }) + } +} + +func TestTraefik_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestTraefik_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestTraefik_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(t *testing.T) (tk *Traefik, cleanup func()) + }{ + "success on valid response v2.3.1": { + wantFail: false, + prepare: prepareCaseTraefikV221Metrics, + }, + "fails on response with unexpected metrics (not HAProxy)": { + wantFail: true, + prepare: prepareCaseNotTraefikMetrics, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareCase404Response, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareCaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tk, cleanup := test.prepare(t) + defer cleanup() + + if test.wantFail { + assert.False(t, tk.Check()) + } else { + assert.True(t, tk.Check()) + } + }) + } +} + +func TestTraefik_Collect(t *testing.T) { + tests := map[string]struct { + prepare func(t *testing.T) (tk *Traefik, cleanup func()) + wantCollected []map[string]int64 + }{ + "success on valid response v2.2.1": { + prepare: prepareCaseTraefikV221Metrics, + wantCollected: []map[string]int64{ + { + "entrypoint_open_connections_traefik_http_GET": 1, + "entrypoint_open_connections_web_http_DELETE": 0, + "entrypoint_open_connections_web_http_GET": 0, + "entrypoint_open_connections_web_http_HEAD": 0, + "entrypoint_open_connections_web_http_OPTIONS": 0, + "entrypoint_open_connections_web_http_PATCH": 0, + "entrypoint_open_connections_web_http_POST": 4, + "entrypoint_open_connections_web_http_PUT": 0, + "entrypoint_open_connections_web_websocket_GET": 0, + "entrypoint_request_duration_average_traefik_http_1xx": 0, + "entrypoint_request_duration_average_traefik_http_2xx": 0, + "entrypoint_request_duration_average_traefik_http_3xx": 0, + "entrypoint_request_duration_average_traefik_http_4xx": 0, + "entrypoint_request_duration_average_traefik_http_5xx": 0, + "entrypoint_request_duration_average_web_http_1xx": 0, + "entrypoint_request_duration_average_web_http_2xx": 0, + "entrypoint_request_duration_average_web_http_3xx": 0, + "entrypoint_request_duration_average_web_http_4xx": 0, + "entrypoint_request_duration_average_web_http_5xx": 0, + "entrypoint_request_duration_average_web_websocket_1xx": 0, + "entrypoint_request_duration_average_web_websocket_2xx": 0, + "entrypoint_request_duration_average_web_websocket_3xx": 0, + "entrypoint_request_duration_average_web_websocket_4xx": 0, + "entrypoint_request_duration_average_web_websocket_5xx": 0, + "entrypoint_requests_traefik_http_1xx": 0, + "entrypoint_requests_traefik_http_2xx": 2840814, + "entrypoint_requests_traefik_http_3xx": 0, + "entrypoint_requests_traefik_http_4xx": 8, + "entrypoint_requests_traefik_http_5xx": 0, + "entrypoint_requests_web_http_1xx": 0, + "entrypoint_requests_web_http_2xx": 1036208982, + "entrypoint_requests_web_http_3xx": 416262, + "entrypoint_requests_web_http_4xx": 267591379, + "entrypoint_requests_web_http_5xx": 223136, + "entrypoint_requests_web_websocket_1xx": 0, + "entrypoint_requests_web_websocket_2xx": 0, + "entrypoint_requests_web_websocket_3xx": 0, + "entrypoint_requests_web_websocket_4xx": 79137, + "entrypoint_requests_web_websocket_5xx": 0, + }, + }, + }, + "properly calculating entrypoint request duration delta": { + prepare: prepareCaseTraefikEntrypointRequestDuration, + wantCollected: []map[string]int64{ + { + "entrypoint_request_duration_average_traefik_http_1xx": 0, + "entrypoint_request_duration_average_traefik_http_2xx": 0, + "entrypoint_request_duration_average_traefik_http_3xx": 0, + "entrypoint_request_duration_average_traefik_http_4xx": 0, + "entrypoint_request_duration_average_traefik_http_5xx": 0, + "entrypoint_request_duration_average_web_websocket_1xx": 0, + "entrypoint_request_duration_average_web_websocket_2xx": 0, + "entrypoint_request_duration_average_web_websocket_3xx": 0, + "entrypoint_request_duration_average_web_websocket_4xx": 0, + "entrypoint_request_duration_average_web_websocket_5xx": 0, + }, + { + "entrypoint_request_duration_average_traefik_http_1xx": 0, + "entrypoint_request_duration_average_traefik_http_2xx": 500, + "entrypoint_request_duration_average_traefik_http_3xx": 0, + "entrypoint_request_duration_average_traefik_http_4xx": 0, + "entrypoint_request_duration_average_traefik_http_5xx": 0, + "entrypoint_request_duration_average_web_websocket_1xx": 0, + "entrypoint_request_duration_average_web_websocket_2xx": 0, + "entrypoint_request_duration_average_web_websocket_3xx": 250, + "entrypoint_request_duration_average_web_websocket_4xx": 0, + "entrypoint_request_duration_average_web_websocket_5xx": 0, + }, + { + "entrypoint_request_duration_average_traefik_http_1xx": 0, + "entrypoint_request_duration_average_traefik_http_2xx": 1000, + "entrypoint_request_duration_average_traefik_http_3xx": 0, + "entrypoint_request_duration_average_traefik_http_4xx": 0, + "entrypoint_request_duration_average_traefik_http_5xx": 0, + "entrypoint_request_duration_average_web_websocket_1xx": 0, + "entrypoint_request_duration_average_web_websocket_2xx": 0, + "entrypoint_request_duration_average_web_websocket_3xx": 500, + "entrypoint_request_duration_average_web_websocket_4xx": 0, + "entrypoint_request_duration_average_web_websocket_5xx": 0, + }, + { + "entrypoint_request_duration_average_traefik_http_1xx": 0, + "entrypoint_request_duration_average_traefik_http_2xx": 0, + "entrypoint_request_duration_average_traefik_http_3xx": 0, + "entrypoint_request_duration_average_traefik_http_4xx": 0, + "entrypoint_request_duration_average_traefik_http_5xx": 0, + "entrypoint_request_duration_average_web_websocket_1xx": 0, + "entrypoint_request_duration_average_web_websocket_2xx": 0, + "entrypoint_request_duration_average_web_websocket_3xx": 0, + "entrypoint_request_duration_average_web_websocket_4xx": 0, + "entrypoint_request_duration_average_web_websocket_5xx": 0, + }, + }, + }, + "fails on response with unexpected metrics (not Traefik)": { + prepare: prepareCaseNotTraefikMetrics, + }, + "fails on 404 response": { + prepare: prepareCase404Response, + }, + "fails on connection refused": { + prepare: prepareCaseConnectionRefused, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tk, cleanup := test.prepare(t) + defer cleanup() + + var ms map[string]int64 + for _, want := range test.wantCollected { + ms = tk.Collect() + assert.Equal(t, want, ms) + } + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDimsVarsIDs(t, tk, ms) + } + }) + } +} + +func prepareCaseTraefikV221Metrics(t *testing.T) (*Traefik, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(v221Metrics) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCaseTraefikEntrypointRequestDuration(t *testing.T) (*Traefik, func()) { + t.Helper() + var num int + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + num++ + switch num { + case 1: + _, _ = w.Write([]byte(` +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 10.1 +traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 20.1 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 30 +traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 40 +`)) + case 2: + _, _ = w.Write([]byte(` +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 15.1 +traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 25.1 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 40 +traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 60 +`)) + default: + _, _ = w.Write([]byte(` +traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 25.1 +traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 35.1 +traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 50 +traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 80 +`)) + } + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCaseNotTraefikMetrics(t *testing.T) (*Traefik, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` +# HELP application_backend_http_responses_total Total number of HTTP responses. +# TYPE application_backend_http_responses_total counter +application_backend_http_responses_total{proxy="infra-traefik-web",code="1xx"} 0 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="1xx"} 4130401 +application_backend_http_responses_total{proxy="infra-traefik-web",code="2xx"} 21338013 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="2xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="3xx"} 10004 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="3xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="4xx"} 10170758 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="4xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="5xx"} 3075 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="5xx"} 0 +application_backend_http_responses_total{proxy="infra-traefik-web",code="other"} 5657 +application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} 0 +`)) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCase404Response(t *testing.T) (*Traefik, func()) { + t.Helper() + srv := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + h := New() + h.URL = srv.URL + require.True(t, h.Init()) + + return h, srv.Close +} + +func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) { + t.Helper() + h := New() + h.URL = "http://127.0.0.1:38001" + require.True(t, h.Init()) + + return h, func() {} +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, tk *Traefik, ms map[string]int64) { + for _, chart := range *tk.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := ms[dim.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := ms[v.ID] + assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/README.md b/src/go/collectors/go.d.plugin/modules/unbound/README.md new file mode 120000 index 00000000000000..5b0f42b0475408 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/README.md @@ -0,0 +1 @@ +integrations/unbound.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/charts.go b/src/go/collectors/go.d.plugin/modules/unbound/charts.go new file mode 100644 index 00000000000000..b5d200e01abab6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/charts.go @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package unbound + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +type ( + // Charts is an alias for module.Charts + Charts = module.Charts + // Chart is an alias for module.Charts + Chart = module.Chart + // Dims is an alias for module.Dims + Dims = module.Dims + // Dim is an alias for module.Dim + Dim = module.Dim +) + +const ( + prioQueries = module.Priority + iota + prioIPRateLimitedQueries + prioQueryType + prioQueryClass + prioQueryOpCode + prioQueryFlag + prioDNSCryptQueries + + prioRecurReplies + prioReplyRCode + + prioRecurTime + + prioCache + prioCachePercentage + prioCachePrefetch + prioCacheExpired + prioZeroTTL + prioCacheCount + + prioReqListUsage + prioReqListCurUsage + prioReqListJostle + + prioTCPUsage + + prioMemCache + prioMemMod + prioMemStreamWait + prioUptime + + prioThread +) + +func charts(cumulative bool) *Charts { + return &Charts{ + makeIncrIf(queriesChart.Copy(), cumulative), + makeIncrIf(ipRateLimitedQueriesChart.Copy(), cumulative), + makeIncrIf(cacheChart.Copy(), cumulative), + makePercOfIncrIf(cachePercentageChart.Copy(), cumulative), + makeIncrIf(prefetchChart.Copy(), cumulative), + makeIncrIf(expiredChart.Copy(), cumulative), + makeIncrIf(zeroTTLChart.Copy(), cumulative), + makeIncrIf(dnsCryptChart.Copy(), cumulative), + makeIncrIf(recurRepliesChart.Copy(), cumulative), + recurTimeChart.Copy(), + reqListUsageChart.Copy(), + reqListCurUsageChart.Copy(), + makeIncrIf(reqListJostleChart.Copy(), cumulative), + tcpUsageChart.Copy(), + uptimeChart.Copy(), + } +} + +func extendedCharts(cumulative bool) *Charts { + return &Charts{ + memCacheChart.Copy(), + memModChart.Copy(), + memStreamWaitChart.Copy(), + cacheCountChart.Copy(), + makeIncrIf(queryTypeChart.Copy(), cumulative), + makeIncrIf(queryClassChart.Copy(), cumulative), + makeIncrIf(queryOpCodeChart.Copy(), cumulative), + makeIncrIf(queryFlagChart.Copy(), cumulative), + makeIncrIf(answerRCodeChart.Copy(), cumulative), + } +} + +func threadCharts(thread string, cumulative bool) *Charts { + charts := charts(cumulative) + _ = charts.Remove(uptimeChart.ID) + + for i, chart := range *charts { + convertTotalChartToThread(chart, thread, prioThread+i) + } + return charts +} + +func convertTotalChartToThread(chart *Chart, thread string, priority int) { + chart.ID = fmt.Sprintf("%s_%s", thread, chart.ID) + chart.Title = fmt.Sprintf("%s %s", + cases.Title(language.English, cases.Compact).String(thread), + chart.Title, + ) + chart.Fam = thread + "_stats" + chart.Ctx = "thread_" + chart.Ctx + chart.Priority = priority + for _, dim := range chart.Dims { + dim.ID = strings.Replace(dim.ID, "total", thread, 1) + } +} + +// Common stats charts +// see https://nlnetlabs.nl/documentation/unbound/unbound-control for the stats provided by unbound-control +var ( + queriesChart = Chart{ + ID: "queries", + Title: "Received Queries", + Units: "queries", + Fam: "queries", + Ctx: "unbound.queries", + Priority: prioQueries, + Dims: Dims{ + {ID: "total.num.queries", Name: "queries"}, + }, + } + ipRateLimitedQueriesChart = Chart{ + ID: "queries_ip_ratelimited", + Title: "Rate Limited Queries", + Units: "queries", + Fam: "queries", + Ctx: "unbound.queries_ip_ratelimited", + Priority: prioIPRateLimitedQueries, + Dims: Dims{ + {ID: "total.num.queries_ip_ratelimited", Name: "ratelimited"}, + }, + } + // ifdef USE_DNSCRYPT + dnsCryptChart = Chart{ + ID: "dnscrypt_queries", + Title: "DNSCrypt Queries", + Units: "queries", + Fam: "queries", + Ctx: "unbound.dnscrypt_queries", + Priority: prioDNSCryptQueries, + Dims: Dims{ + {ID: "total.num.dnscrypt.crypted", Name: "crypted"}, + {ID: "total.num.dnscrypt.cert", Name: "cert"}, + {ID: "total.num.dnscrypt.cleartext", Name: "cleartext"}, + {ID: "total.num.dnscrypt.malformed", Name: "malformed"}, + }, + } + cacheChart = Chart{ + ID: "cache", + Title: "Cache Statistics", + Units: "events", + Fam: "cache", + Ctx: "unbound.cache", + Type: module.Stacked, + Priority: prioCache, + Dims: Dims{ + {ID: "total.num.cachehits", Name: "hits"}, + {ID: "total.num.cachemiss", Name: "miss"}, + }, + } + cachePercentageChart = Chart{ + ID: "cache_percentage", + Title: "Cache Statistics Percentage", + Units: "percentage", + Fam: "cache", + Ctx: "unbound.cache_percentage", + Type: module.Stacked, + Priority: prioCachePercentage, + Dims: Dims{ + {ID: "total.num.cachehits", Name: "hits", Algo: module.PercentOfAbsolute}, + {ID: "total.num.cachemiss", Name: "miss", Algo: module.PercentOfAbsolute}, + }, + } + prefetchChart = Chart{ + ID: "cache_prefetch", + Title: "Cache Prefetches", + Units: "prefetches", + Fam: "cache", + Ctx: "unbound.prefetch", + Priority: prioCachePrefetch, + Dims: Dims{ + {ID: "total.num.prefetch", Name: "prefetches"}, + }, + } + expiredChart = Chart{ + ID: "cache_expired", + Title: "Replies Served From Expired Cache", + Units: "replies", + Fam: "cache", + Ctx: "unbound.expired", + Priority: prioCacheExpired, + Dims: Dims{ + {ID: "total.num.expired", Name: "expired"}, + }, + } + zeroTTLChart = Chart{ + ID: "zero_ttl_replies", + Title: "Replies Served From Expired Cache", + Units: "replies", + Fam: "cache", + Ctx: "unbound.zero_ttl_replies", + Priority: prioZeroTTL, + Dims: Dims{ + {ID: "total.num.zero_ttl", Name: "zero_ttl"}, + }, + } + recurRepliesChart = Chart{ + ID: "recursive_replies", + Title: "Replies That Needed Recursive Processing", + Units: "replies", + Fam: "replies", + Ctx: "unbound.recursive_replies", + Priority: prioRecurReplies, + Dims: Dims{ + {ID: "total.num.recursivereplies", Name: "recursive"}, + }, + } + recurTimeChart = Chart{ + ID: "recursion_time", + Title: "Time Spent On Recursive Processing", + Units: "milliseconds", + Fam: "recursion timings", + Ctx: "unbound.recursion_time", + Priority: prioRecurTime, + Dims: Dims{ + {ID: "total.recursion.time.avg", Name: "avg"}, + {ID: "total.recursion.time.median", Name: "median"}, + }, + } + reqListUsageChart = Chart{ + ID: "request_list_usage", + Title: "Request List Usage", + Units: "queries", + Fam: "request list", + Ctx: "unbound.request_list_usage", + Priority: prioReqListUsage, + Dims: Dims{ + {ID: "total.requestlist.avg", Name: "avg", Div: 1000}, + {ID: "total.requestlist.max", Name: "max"}, // all time max in cumulative mode, never resets + }, + } + reqListCurUsageChart = Chart{ + ID: "current_request_list_usage", + Title: "Current Request List Usage", + Units: "queries", + Fam: "request list", + Ctx: "unbound.current_request_list_usage", + Type: module.Area, + Priority: prioReqListCurUsage, + Dims: Dims{ + {ID: "total.requestlist.current.all", Name: "all"}, + {ID: "total.requestlist.current.user", Name: "user"}, + }, + } + reqListJostleChart = Chart{ + ID: "request_list_jostle_list", + Title: "Request List Jostle List Events", + Units: "queries", + Fam: "request list", + Ctx: "unbound.request_list_jostle_list", + Priority: prioReqListJostle, + Dims: Dims{ + {ID: "total.requestlist.overwritten", Name: "overwritten"}, + {ID: "total.requestlist.exceeded", Name: "dropped"}, + }, + } + tcpUsageChart = Chart{ + ID: "tcpusage", + Title: "TCP Handler Buffers", + Units: "buffers", + Fam: "tcp buffers", + Ctx: "unbound.tcpusage", + Priority: prioTCPUsage, + Dims: Dims{ + {ID: "total.tcpusage", Name: "usage"}, + }, + } + uptimeChart = Chart{ + ID: "uptime", + Title: "Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "unbound.uptime", + Priority: prioUptime, + Dims: Dims{ + {ID: "time.up", Name: "time"}, + }, + } +) + +// Extended stats charts +var ( + // TODO: do not add dnscrypt stuff by default? + memCacheChart = Chart{ + ID: "cache_memory", + Title: "Cache Memory", + Units: "KB", + Fam: "mem", + Ctx: "unbound.cache_memory", + Type: module.Stacked, + Priority: prioMemCache, + Dims: Dims{ + {ID: "mem.cache.message", Name: "message", Div: 1024}, + {ID: "mem.cache.rrset", Name: "rrset", Div: 1024}, + {ID: "mem.cache.dnscrypt_nonce", Name: "dnscrypt_nonce", Div: 1024}, // ifdef USE_DNSCRYPT + {ID: "mem.cache.dnscrypt_shared_secret", Name: "dnscrypt_shared_secret", Div: 1024}, // ifdef USE_DNSCRYPT + }, + } + // TODO: do not add subnet and ipsecmod stuff by default? + memModChart = Chart{ + ID: "mod_memory", + Title: "Module Memory", + Units: "KB", + Fam: "mem", + Ctx: "unbound.mod_memory", + Type: module.Stacked, + Priority: prioMemMod, + Dims: Dims{ + {ID: "mem.mod.iterator", Name: "iterator", Div: 1024}, + {ID: "mem.mod.respip", Name: "respip", Div: 1024}, + {ID: "mem.mod.validator", Name: "validator", Div: 1024}, + {ID: "mem.mod.subnet", Name: "subnet", Div: 1024}, // ifdef CLIENT_SUBNET + {ID: "mem.mod.ipsecmod", Name: "ipsec", Div: 1024}, // ifdef USE_IPSECMOD + }, + } + memStreamWaitChart = Chart{ + ID: "mem_stream_wait", + Title: "TCP and TLS Stream Waif Buffer Memory", + Units: "KB", + Fam: "mem", + Ctx: "unbound.mem_streamwait", + Priority: prioMemStreamWait, + Dims: Dims{ + {ID: "mem.streamwait", Name: "streamwait", Div: 1024}, + }, + } + // NOTE: same family as for cacheChart + cacheCountChart = Chart{ + ID: "cache_count", + Title: "Cache Items Count", + Units: "items", + Fam: "cache", + Ctx: "unbound.cache_count", + Type: module.Stacked, + Priority: prioCacheCount, + Dims: Dims{ + {ID: "infra.cache.count", Name: "infra"}, + {ID: "key.cache.count", Name: "key"}, + {ID: "msg.cache.count", Name: "msg"}, + {ID: "rrset.cache.count", Name: "rrset"}, + {ID: "dnscrypt_nonce.cache.count", Name: "dnscrypt_nonce"}, + {ID: "dnscrypt_shared_secret.cache.count", Name: "shared_secret"}, + }, + } + queryTypeChart = Chart{ + ID: "queries_by_type", + Title: "Queries By Type", + Units: "queries", + Fam: "queries", + Ctx: "unbound.type_queries", + Type: module.Stacked, + Priority: prioQueryType, + } + queryClassChart = Chart{ + ID: "queries_by_class", + Title: "Queries By Class", + Units: "queries", + Fam: "queries", + Ctx: "unbound.class_queries", + Type: module.Stacked, + Priority: prioQueryClass, + } + queryOpCodeChart = Chart{ + ID: "queries_by_opcode", + Title: "Queries By OpCode", + Units: "queries", + Fam: "queries", + Ctx: "unbound.opcode_queries", + Type: module.Stacked, + Priority: prioQueryOpCode, + } + queryFlagChart = Chart{ + ID: "queries_by_flag", + Title: "Queries By Flag", + Units: "queries", + Fam: "queries", + Ctx: "unbound.flag_queries", + Type: module.Stacked, + Priority: prioQueryFlag, + Dims: Dims{ + {ID: "num.query.flags.QR", Name: "QR"}, + {ID: "num.query.flags.AA", Name: "AA"}, + {ID: "num.query.flags.TC", Name: "TC"}, + {ID: "num.query.flags.RD", Name: "RD"}, + {ID: "num.query.flags.RA", Name: "RA"}, + {ID: "num.query.flags.Z", Name: "Z"}, + {ID: "num.query.flags.AD", Name: "AD"}, + {ID: "num.query.flags.CD", Name: "CD"}, + }, + } + answerRCodeChart = Chart{ + ID: "replies_by_rcode", + Title: "Replies By RCode", + Units: "replies", + Fam: "replies", + Ctx: "unbound.rcode_answers", + Type: module.Stacked, + Priority: prioReplyRCode, + } +) + +func (u *Unbound) updateCharts() { + if len(u.curCache.threads) > 1 { + for v := range u.curCache.threads { + if !u.cache.threads[v] { + u.cache.threads[v] = true + u.addThreadCharts(v) + } + } + } + // 0-6 rcodes always included + if hasExtendedData := len(u.curCache.answerRCode) > 0; !hasExtendedData { + return + } + + if !u.extChartsCreated { + charts := extendedCharts(u.Cumulative) + if err := u.Charts().Add(*charts...); err != nil { + u.Warningf("add extended charts: %v", err) + } + u.extChartsCreated = true + } + + for v := range u.curCache.queryType { + if !u.cache.queryType[v] { + u.cache.queryType[v] = true + u.addDimToQueryTypeChart(v) + } + } + for v := range u.curCache.queryClass { + if !u.cache.queryClass[v] { + u.cache.queryClass[v] = true + u.addDimToQueryClassChart(v) + } + } + for v := range u.curCache.queryOpCode { + if !u.cache.queryOpCode[v] { + u.cache.queryOpCode[v] = true + u.addDimToQueryOpCodeChart(v) + } + } + for v := range u.curCache.answerRCode { + if !u.cache.answerRCode[v] { + u.cache.answerRCode[v] = true + u.addDimToAnswerRcodeChart(v) + } + } +} + +func (u *Unbound) addThreadCharts(thread string) { + charts := threadCharts(thread, u.Cumulative) + if err := u.Charts().Add(*charts...); err != nil { + u.Warningf("add '%s' charts: %v", thread, err) + } +} + +func (u *Unbound) addDimToQueryTypeChart(typ string) { + u.addDimToChart(queryTypeChart.ID, "num.query.type."+typ, typ) +} +func (u *Unbound) addDimToQueryClassChart(class string) { + u.addDimToChart(queryClassChart.ID, "num.query.class."+class, class) +} +func (u *Unbound) addDimToQueryOpCodeChart(opcode string) { + u.addDimToChart(queryOpCodeChart.ID, "num.query.opcode."+opcode, opcode) +} +func (u *Unbound) addDimToAnswerRcodeChart(rcode string) { + u.addDimToChart(answerRCodeChart.ID, "num.answer.rcode."+rcode, rcode) +} + +func (u *Unbound) addDimToChart(chartID, dimID, dimName string) { + chart := u.Charts().Get(chartID) + if chart == nil { + u.Warningf("add '%s' dim: couldn't find '%s' chart", dimID, chartID) + return + } + dim := &Dim{ID: dimID, Name: dimName} + if u.Cumulative { + dim.Algo = module.Incremental + } + if err := chart.AddDim(dim); err != nil { + u.Warningf("add '%s' dim: %v", dimID, err) + return + } + chart.MarkNotCreated() +} + +func makeIncrIf(chart *Chart, do bool) *Chart { + if !do { + return chart + } + chart.Units += "/s" + for _, d := range chart.Dims { + d.Algo = module.Incremental + } + return chart +} + +func makePercOfIncrIf(chart *Chart, do bool) *Chart { + if !do { + return chart + } + for _, d := range chart.Dims { + d.Algo = module.PercentOfIncremental + } + return chart +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/collect.go b/src/go/collectors/go.d.plugin/modules/unbound/collect.go new file mode 100644 index 00000000000000..125f206aed7f73 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/collect.go @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package unbound + +import ( + "fmt" + "strconv" + "strings" +) + +// https://github.com/NLnetLabs/unbound/blob/master/daemon/remote.c (do_stats: print_stats, print_thread_stats, print_mem, print_uptime, print_ext) +// https://github.com/NLnetLabs/unbound/blob/master/libunbound/unbound.h (structs: ub_server_stats, ub_shm_stat_info) +// https://docs.datadoghq.com/integrations/unbound/#metrics (stats description) +// https://docs.menandmice.com/display/MM/Unbound+request-list+demystified (request lists explanation) + +func (u *Unbound) collect() (map[string]int64, error) { + stats, err := u.scrapeUnboundStats() + if err != nil { + return nil, err + } + + mx := u.collectStats(stats) + u.updateCharts() + return mx, nil +} + +func (u *Unbound) scrapeUnboundStats() ([]entry, error) { + var output []string + var command = "UBCT1 stats" + if u.Cumulative { + command = "UBCT1 stats_noreset" + } + + if err := u.client.Connect(); err != nil { + return nil, fmt.Errorf("failed to connect: %v", err) + } + defer func() { _ = u.client.Disconnect() }() + + err := u.client.Command(command+"\n", func(bytes []byte) bool { + output = append(output, string(bytes)) + return true + }) + if err != nil { + return nil, fmt.Errorf("send command '%s': %w", command, err) + } + + switch len(output) { + case 0: + return nil, fmt.Errorf("command '%s': empty resopnse", command) + case 1: + // in case of error the first line of the response is: error <descriptive text possible> \n + return nil, fmt.Errorf("command '%s': '%s'", command, output[0]) + } + return parseStatsOutput(output) +} + +func (u *Unbound) collectStats(stats []entry) map[string]int64 { + if u.Cumulative { + return u.collectCumulativeStats(stats) + } + return u.collectNonCumulativeStats(stats) +} + +func (u *Unbound) collectCumulativeStats(stats []entry) map[string]int64 { + mul := float64(1000) + // following stats change only on cachemiss event in cumulative mode + // - *.requestlist.avg, + // - *.recursion.time.avg + // - *.recursion.time.median + v := findEntry("total.num.cachemiss", stats) + if v == u.prevCacheMiss { + // so we need to reset them if there is no such event + mul = 0 + } + u.prevCacheMiss = v + return u.processStats(stats, mul) +} + +func (u *Unbound) collectNonCumulativeStats(stats []entry) map[string]int64 { + mul := float64(1000) + mx := u.processStats(stats, mul) + + // see 'static int print_ext(RES* ssl, struct ub_stats_info* s)' in + // https://github.com/NLnetLabs/unbound/blob/master/daemon/remote.c + // - zero value queries type not included + // - zero value queries class not included + // - zero value queries opcode not included + // - only 0-6 rcodes answers always included, other zero value rcodes not included + for k := range u.cache.queryType { + if _, ok := u.curCache.queryType[k]; !ok { + mx["num.query.type."+k] = 0 + } + } + for k := range u.cache.queryClass { + if _, ok := u.curCache.queryClass[k]; !ok { + mx["num.query.class."+k] = 0 + } + } + for k := range u.cache.queryOpCode { + if _, ok := u.curCache.queryOpCode[k]; !ok { + mx["num.query.opcode."+k] = 0 + } + } + for k := range u.cache.answerRCode { + if _, ok := u.curCache.answerRCode[k]; !ok { + mx["num.answer.rcode."+k] = 0 + } + } + return mx +} + +func (u *Unbound) processStats(stats []entry, mul float64) map[string]int64 { + u.curCache.clear() + mx := make(map[string]int64, len(stats)) + for _, e := range stats { + switch { + // *.requestlist.avg, *.recursion.time.avg, *.recursion.time.median + case e.hasSuffix(".avg"), e.hasSuffix(".median"): + e.value *= mul + case e.hasPrefix("thread") && e.hasSuffix("num.queries"): + v := extractThread(e.key) + u.curCache.threads[v] = true + case e.hasPrefix("num.query.type"): + v := extractQueryType(e.key) + u.curCache.queryType[v] = true + case e.hasPrefix("num.query.class"): + v := extractQueryClass(e.key) + u.curCache.queryClass[v] = true + case e.hasPrefix("num.query.opcode"): + v := extractQueryOpCode(e.key) + u.curCache.queryOpCode[v] = true + case e.hasPrefix("num.answer.rcode"): + v := extractAnswerRCode(e.key) + u.curCache.answerRCode[v] = true + } + mx[e.key] = int64(e.value) + } + return mx +} + +func extractThread(key string) string { idx := strings.IndexByte(key, '.'); return key[:idx] } +func extractQueryType(key string) string { i := len("num.query.type."); return key[i:] } +func extractQueryClass(key string) string { i := len("num.query.class."); return key[i:] } +func extractQueryOpCode(key string) string { i := len("num.query.opcode."); return key[i:] } +func extractAnswerRCode(key string) string { i := len("num.answer.rcode."); return key[i:] } + +type entry struct { + key string + value float64 +} + +func (e entry) hasPrefix(prefix string) bool { return strings.HasPrefix(e.key, prefix) } +func (e entry) hasSuffix(suffix string) bool { return strings.HasSuffix(e.key, suffix) } + +func findEntry(key string, entries []entry) float64 { + for _, e := range entries { + if e.key == key { + return e.value + } + } + return -1 +} + +func parseStatsOutput(output []string) ([]entry, error) { + var es []entry + for _, v := range output { + e, err := parseStatsLine(v) + if err != nil { + return nil, err + } + if e.hasPrefix("histogram") { + continue + } + es = append(es, e) + } + return es, nil +} + +func parseStatsLine(line string) (entry, error) { + // 'stats' output is a list of [key]=[value] lines. + parts := strings.Split(line, "=") + if len(parts) != 2 { + return entry{}, fmt.Errorf("bad line syntax: %s", line) + } + f, err := strconv.ParseFloat(parts[1], 64) + return entry{key: parts[0], value: f}, err +} + +func newCollectCache() collectCache { + return collectCache{ + threads: make(map[string]bool), + queryType: make(map[string]bool), + queryClass: make(map[string]bool), + queryOpCode: make(map[string]bool), + answerRCode: make(map[string]bool), + } +} + +type collectCache struct { + threads map[string]bool + queryType map[string]bool + queryClass map[string]bool + queryOpCode map[string]bool + answerRCode map[string]bool +} + +func (c *collectCache) clear() { + *c = newCollectCache() +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/config.go b/src/go/collectors/go.d.plugin/modules/unbound/config/config.go new file mode 100644 index 00000000000000..69dc5c219237cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/config.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package config + +import ( + "fmt" + "strings" +) + +// UnboundConfig represents Unbound configuration file. +type UnboundConfig struct { + cumulative string // statistics-cumulative + enable string // control-enable + iface string // control-interface + port string // control-port + useCert string // control-use-cert + keyFile string // control-key-file + certFile string // control-cert-file +} + +func (c UnboundConfig) String() string { + format := strings.Join([]string{ + "[", + `"statistics-cumulative": '%s', `, + `"control-enable": '%s', `, + `"control-interface": '%s', `, + `"control-port": '%s', `, + `"control-user-cert": '%s', `, + `"control-key-file": '%s', `, + `"control-cert-file": '%s'`, + "]", + }, "") + return fmt.Sprintf(format, c.cumulative, c.enable, c.iface, c.port, c.useCert, c.keyFile, c.certFile) +} + +func (c UnboundConfig) Empty() bool { return c == UnboundConfig{} } +func (c UnboundConfig) Cumulative() (bool, bool) { return c.cumulative == "yes", c.cumulative != "" } +func (c UnboundConfig) ControlEnabled() (bool, bool) { return c.enable == "yes", c.enable != "" } +func (c UnboundConfig) ControlInterface() (string, bool) { return c.iface, c.iface != "" } +func (c UnboundConfig) ControlPort() (string, bool) { return c.port, c.port != "" } +func (c UnboundConfig) ControlUseCert() (bool, bool) { return c.useCert == "yes", c.useCert != "" } +func (c UnboundConfig) ControlKeyFile() (string, bool) { return c.keyFile, c.keyFile != "" } +func (c UnboundConfig) ControlCertFile() (string, bool) { return c.certFile, c.certFile != "" } + +func fromOptions(options []option) *UnboundConfig { + cfg := &UnboundConfig{} + for _, opt := range options { + switch opt.name { + default: + case optInterface: + applyControlInterface(cfg, opt.value) + case optCumulative: + cfg.cumulative = opt.value + case optEnable: + cfg.enable = opt.value + case optPort: + cfg.port = opt.value + case optUseCert: + cfg.useCert = opt.value + case optKeyFile: + cfg.keyFile = opt.value + case optCertFile: + cfg.certFile = opt.value + } + } + return cfg +} + +// Unbound doesn't allow to query stats from unix socket when control-interface is enabled on ip interface. +func applyControlInterface(cfg *UnboundConfig, value string) { + if cfg.iface == "" || !isUnixSocket(value) || isUnixSocket(cfg.iface) { + cfg.iface = value + } +} + +func isUnixSocket(address string) bool { + return strings.HasPrefix(address, "/") +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/config_test.go b/src/go/collectors/go.d.plugin/modules/unbound/config/config_test.go new file mode 100644 index 00000000000000..0375c1368af159 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/config_test.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnboundConfig_Empty(t *testing.T) { + assert.True(t, UnboundConfig{}.Empty()) + assert.False(t, UnboundConfig{enable: "yes"}.Empty()) +} + +func TestUnboundConfig_Cumulative(t *testing.T) { + tests := []struct { + input string + wantValue bool + wantOK bool + }{ + {input: "yes", wantValue: true, wantOK: true}, + {input: "no", wantValue: false, wantOK: true}, + {input: "", wantValue: false, wantOK: false}, + {input: "some value", wantValue: false, wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{cumulative: test.input} + + v, ok := cfg.Cumulative() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlEnabled(t *testing.T) { + tests := []struct { + input string + wantValue bool + wantOK bool + }{ + {input: "yes", wantValue: true, wantOK: true}, + {input: "no", wantValue: false, wantOK: true}, + {input: "", wantValue: false, wantOK: false}, + {input: "some value", wantValue: false, wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{enable: test.input} + + v, ok := cfg.ControlEnabled() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlInterface(t *testing.T) { + tests := []struct { + input string + wantValue string + wantOK bool + }{ + {input: "127.0.0.1", wantValue: "127.0.0.1", wantOK: true}, + {input: "/var/run/unbound.sock", wantValue: "/var/run/unbound.sock", wantOK: true}, + {input: "", wantValue: "", wantOK: false}, + {input: "some value", wantValue: "some value", wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{iface: test.input} + + v, ok := cfg.ControlInterface() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlPort(t *testing.T) { + tests := []struct { + input string + wantValue string + wantOK bool + }{ + {input: "8953", wantValue: "8953", wantOK: true}, + {input: "", wantValue: "", wantOK: false}, + {input: "some value", wantValue: "some value", wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{port: test.input} + + v, ok := cfg.ControlPort() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlUseCert(t *testing.T) { + tests := []struct { + input string + wantValue bool + wantOK bool + }{ + {input: "yes", wantValue: true, wantOK: true}, + {input: "no", wantValue: false, wantOK: true}, + {input: "", wantValue: false, wantOK: false}, + {input: "some value", wantValue: false, wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{useCert: test.input} + + v, ok := cfg.ControlUseCert() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlKeyFile(t *testing.T) { + tests := []struct { + input string + wantValue string + wantOK bool + }{ + {input: "/etc/unbound/unbound_control.key", wantValue: "/etc/unbound/unbound_control.key", wantOK: true}, + {input: "", wantValue: "", wantOK: false}, + {input: "some value", wantValue: "some value", wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{keyFile: test.input} + + v, ok := cfg.ControlKeyFile() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} + +func TestUnboundConfig_ControlCertFile(t *testing.T) { + tests := []struct { + input string + wantValue string + wantOK bool + }{ + {input: "/etc/unbound/unbound_control.pem", wantValue: "/etc/unbound/unbound_control.pem", wantOK: true}, + {input: "", wantValue: "", wantOK: false}, + {input: "some value", wantValue: "some value", wantOK: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + cfg := UnboundConfig{certFile: test.input} + + v, ok := cfg.ControlCertFile() + assert.Equal(t, test.wantValue, v) + assert.Equal(t, test.wantOK, ok) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/parse.go b/src/go/collectors/go.d.plugin/modules/unbound/config/parse.go new file mode 100644 index 00000000000000..99a632d50fc341 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/parse.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package config + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" +) + +type option struct{ name, value string } + +const ( + optInclude = "include" + optIncludeToplevel = "include-toplevel" + optCumulative = "statistics-cumulative" + optEnable = "control-enable" + optInterface = "control-interface" + optPort = "control-port" + optUseCert = "control-use-cert" + optKeyFile = "control-key-file" + optCertFile = "control-cert-file" +) + +func isOptionUsed(opt option) bool { + switch opt.name { + case optInclude, + optIncludeToplevel, + optCumulative, + optEnable, + optInterface, + optPort, + optUseCert, + optKeyFile, + optCertFile: + return true + } + return false +} + +// TODO: +// If also using chroot, using full path names for the included files works, relative pathnames for the included names +// work if the directory where the daemon is started equals its chroot/working directory or is specified before +// the include statement with directory: dir. + +// Parse parses Unbound configuration files into UnboundConfig. +// It follows logic described in the 'man unbound.conf': +// - Files can be included using the 'include:' directive. It can appear anywhere, it accepts a single file name as argument. +// - Processing continues as if the text from the included file was copied into the config file at that point. +// - Wildcards can be used to include multiple files. +// +// It stops processing on any error: syntax error, recursive include, glob matches directory etc. +func Parse(entryPath string) (*UnboundConfig, error) { + options, err := parse(entryPath, nil) + if err != nil { + return nil, err + } + return fromOptions(options), nil +} + +func parse(filename string, visited map[string]bool) ([]option, error) { + if visited == nil { + visited = make(map[string]bool) + } + if visited[filename] { + return nil, fmt.Errorf("'%s' already visited", filename) + } + visited[filename] = true + + f, err := open(filename) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + var options []option + sc := bufio.NewScanner(f) + + for sc.Scan() { + line := strings.TrimSpace(sc.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + opt, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("file '%s', error on parsing line '%s': %v", filename, line, err) + } + + if !isOptionUsed(opt) { + continue + } + + if opt.name != optInclude && opt.name != optIncludeToplevel { + options = append(options, opt) + continue + } + + filenames, err := globInclude(opt.value) + if err != nil { + return nil, err + } + + for _, name := range filenames { + opts, err := parse(name, visited) + if err != nil { + return nil, err + } + options = append(options, opts...) + } + } + return options, nil +} + +func globInclude(include string) ([]string, error) { + if isGlobPattern(include) { + return filepath.Glob(include) + } + return []string{include}, nil +} + +func parseLine(line string) (option, error) { + parts := strings.Split(line, ":") + if len(parts) < 2 { + return option{}, errors.New("bad syntax") + } + key, value := cleanKeyValue(parts[0], parts[1]) + return option{name: key, value: value}, nil +} + +func cleanKeyValue(key, value string) (string, string) { + if i := strings.IndexByte(value, '#'); i > 0 { + value = value[:i-1] + } + key = strings.TrimSpace(key) + value = strings.Trim(strings.TrimSpace(value), "\"'") + return key, value +} + +func isGlobPattern(value string) bool { + magicChars := `*?[` + if runtime.GOOS != "windows" { + magicChars = `*?[\` + } + return strings.ContainsAny(value, magicChars) +} + +func open(filename string) (*os.File, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("'%s' is not a regular file", filename) + } + return f, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/parse_test.go b/src/go/collectors/go.d.plugin/modules/unbound/config/parse_test.go new file mode 100644 index 00000000000000..72542a861f3da3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/parse_test.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package config + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + tests := map[string]struct { + path string + wantCfg UnboundConfig + wantErr bool + }{ + "valid include": { + path: "testdata/valid_include.conf", + wantCfg: UnboundConfig{ + cumulative: "yes", + enable: "yes", + iface: "10.0.0.1", + port: "8955", + useCert: "yes", + keyFile: "/etc/unbound/unbound_control_2.key", + certFile: "/etc/unbound/unbound_control_2.pem", + }, + }, + "valid include-toplevel": { + path: "testdata/valid_include_toplevel.conf", + wantCfg: UnboundConfig{ + cumulative: "yes", + enable: "yes", + iface: "10.0.0.1", + port: "8955", + useCert: "yes", + keyFile: "/etc/unbound/unbound_control_2.key", + certFile: "/etc/unbound/unbound_control_2.pem", + }, + }, + "valid glob include": { + path: "testdata/valid_glob.conf", + wantCfg: UnboundConfig{ + cumulative: "yes", + enable: "yes", + iface: "10.0.0.1", + port: "8955", + useCert: "yes", + keyFile: "/etc/unbound/unbound_control_2.key", + certFile: "/etc/unbound/unbound_control_2.pem", + }, + }, + "non existent glob include": { + path: "testdata/non_existent_glob_include.conf", + wantCfg: UnboundConfig{ + cumulative: "yes", + enable: "yes", + iface: "10.0.0.1", + port: "8953", + useCert: "yes", + keyFile: "/etc/unbound/unbound_control.key", + certFile: "/etc/unbound/unbound_control.pem", + }, + }, + "infinite recursion include": { + path: "testdata/infinite_rec.conf", + wantErr: true, + }, + "non existent include": { + path: "testdata/non_existent_include.conf", + wantErr: true, + }, + "non existent path": { + path: "testdata/non_existent_path.conf", + wantErr: true, + }, + } + + for name, test := range tests { + name = fmt.Sprintf("%s (%s)", name, test.path) + t.Run(name, func(t *testing.T) { + cfg, err := Parse(test.path) + + if test.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, test.wantCfg, *cfg) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/infinite_rec.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/infinite_rec.conf new file mode 100644 index 00000000000000..904f75b3081d16 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/infinite_rec.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/infinite_rec.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_glob_include.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_glob_include.conf new file mode 100644 index 00000000000000..21620f7d5fd26b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_glob_include.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/__non_existent_glob__*.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_include.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_include.conf new file mode 100644 index 00000000000000..e493e35bb25262 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/non_existent_include.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/__non_existent_include__.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob.conf new file mode 100644 index 00000000000000..f020c580acab53 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob.conf @@ -0,0 +1,82 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/valid_glob[2-3].conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob2.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob2.conf new file mode 100644 index 00000000000000..85bd80e0ddb253 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob2.conf @@ -0,0 +1,80 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + # control-interface: ::1 + control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob3.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob3.conf new file mode 100644 index 00000000000000..f20eacf1a038e6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_glob3.conf @@ -0,0 +1,81 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.3 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include.conf new file mode 100644 index 00000000000000..1974f6178f1ed5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include.conf @@ -0,0 +1,82 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/valid_include2.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include2.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include2.conf new file mode 100644 index 00000000000000..c956d44d5e5c70 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include2.conf @@ -0,0 +1,81 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include: "testdata/valid_include3.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + # control-interface: ::1 + control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include3.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include3.conf new file mode 100644 index 00000000000000..f20eacf1a038e6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include3.conf @@ -0,0 +1,81 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.3 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel.conf new file mode 100644 index 00000000000000..9e5675e10b64f5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel.conf @@ -0,0 +1,82 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include-toplevel: "testdata/valid_include_toplevel2.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: +# Script file to load +# python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel2.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel2.conf new file mode 100644 index 00000000000000..f3f69470d2db2e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel2.conf @@ -0,0 +1,81 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +include-toplevel: "testdata/valid_include_toplevel3.conf" + +# The server clause sets the main parameters. +server: +# whitespace is not necessary, but looks cleaner. + +# verbosity number, 0 is least verbose. 1 is default. +# verbosity: 1 + +# print statistics to the log (for every thread) every N seconds. +# Set to "" or 0 to disable. Default is disabled. +# statistics-interval: 0 + +# enable shm for stats, default no. if you enable also enable +# statistics-interval, every time it also writes stats to the +# shared memory segment keyed with shm-key. +# shm-enable: no + +# shm for stats uses this key, and key+1 for the shared mem segment. +# shm-key: 11777 + +# enable cumulative statistics, without clearing them after printing. +# statistics-cumulative: no + +# enable extended statistics (query types, answer codes, status) +# printed from unbound-control. default off, because of speed. +# extended-statistics: no + +# number of threads to create. 1 disables threading. +# num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: +# Script file to load +# python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + # control-interface: ::1 + control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control_2.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control_2.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel3.conf b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel3.conf new file mode 100644 index 00000000000000..d30778c014612b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config/testdata/valid_include_toplevel3.conf @@ -0,0 +1,81 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. + +# The server clause sets the main parameters. +server: +# whitespace is not necessary, but looks cleaner. + +# verbosity number, 0 is least verbose. 1 is default. +# verbosity: 1 + +# print statistics to the log (for every thread) every N seconds. +# Set to "" or 0 to disable. Default is disabled. +# statistics-interval: 0 + +# enable shm for stats, default no. if you enable also enable +# statistics-interval, every time it also writes stats to the +# shared memory segment keyed with shm-key. +# shm-enable: no + +# shm for stats uses this key, and key+1 for the shared mem segment. +# shm-key: 11777 + +# enable cumulative statistics, without clearing them after printing. +# statistics-cumulative: no + +# enable extended statistics (query types, answer codes, status) +# printed from unbound-control. default off, because of speed. +# extended-statistics: no + +# number of threads to create. 1 disables threading. +# num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: +# Script file to load +# python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.3 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8955 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/config_schema.json b/src/go/collectors/go.d.plugin/modules/unbound/config_schema.json new file mode 100644 index 00000000000000..290905ac0dc382 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/config_schema.json @@ -0,0 +1,44 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/unbound job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "conf_path": { + "type": "string" + }, + "cumulative_stats": { + "type": "boolean" + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/init.go b/src/go/collectors/go.d.plugin/modules/unbound/init.go new file mode 100644 index 00000000000000..6ae9543f3d1f96 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/init.go @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package unbound + +import ( + "crypto/tls" + "errors" + "net" + + "github.com/netdata/go.d.plugin/modules/unbound/config" + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" +) + +func (u *Unbound) initConfig() (enabled bool) { + if u.ConfPath == "" { + u.Info("'conf_path' not set, skipping parameters auto detection") + return true + } + + u.Infof("reading '%s'", u.ConfPath) + cfg, err := config.Parse(u.ConfPath) + if err != nil { + u.Warningf("%v, skipping parameters auto detection", err) + return true + } + + if cfg.Empty() { + u.Debug("empty configuration") + return true + } + + if enabled, ok := cfg.ControlEnabled(); ok && !enabled { + u.Info("remote control is disabled in the configuration file") + return false + } + + u.applyConfig(cfg) + return true +} + +func (u *Unbound) applyConfig(cfg *config.UnboundConfig) { + u.Infof("applying configuration: %s", cfg) + if cumulative, ok := cfg.Cumulative(); ok && cumulative != u.Cumulative { + u.Debugf("changing 'cumulative_stats': %v => %v", u.Cumulative, cumulative) + u.Cumulative = cumulative + } + if useCert, ok := cfg.ControlUseCert(); ok && useCert != u.UseTLS { + u.Debugf("changing 'use_tls': %v => %v", u.UseTLS, useCert) + u.UseTLS = useCert + } + if keyFile, ok := cfg.ControlKeyFile(); ok && keyFile != u.TLSKey { + u.Debugf("changing 'tls_key': '%s' => '%s'", u.TLSKey, keyFile) + u.TLSKey = keyFile + } + if certFile, ok := cfg.ControlCertFile(); ok && certFile != u.TLSCert { + u.Debugf("changing 'tls_cert': '%s' => '%s'", u.TLSCert, certFile) + u.TLSCert = certFile + } + if iface, ok := cfg.ControlInterface(); ok && adjustControlInterface(iface) != u.Address { + address := adjustControlInterface(iface) + u.Debugf("changing 'address': '%s' => '%s'", u.Address, address) + u.Address = address + } + if port, ok := cfg.ControlPort(); ok && !socket.IsUnixSocket(u.Address) { + if host, curPort, err := net.SplitHostPort(u.Address); err == nil && curPort != port { + address := net.JoinHostPort(host, port) + u.Debugf("changing 'address': '%s' => '%s'", u.Address, address) + u.Address = address + } + } +} + +func (u *Unbound) initClient() (err error) { + var tlsCfg *tls.Config + useTLS := !socket.IsUnixSocket(u.Address) && u.UseTLS + + if useTLS && (u.TLSConfig.TLSCert == "" || u.TLSConfig.TLSKey == "") { + return errors.New("'tls_cert' or 'tls_key' is missing") + } + + if useTLS { + if tlsCfg, err = tlscfg.NewTLSConfig(u.TLSConfig); err != nil { + return err + } + } + + u.client = socket.New(socket.Config{ + Address: u.Address, + ConnectTimeout: u.Timeout.Duration, + ReadTimeout: u.Timeout.Duration, + WriteTimeout: u.Timeout.Duration, + TLSConf: tlsCfg, + }) + return nil +} + +func adjustControlInterface(value string) string { + if socket.IsUnixSocket(value) { + return value + } + if value == "0.0.0.0" { + value = "127.0.0.1" + } + return net.JoinHostPort(value, "8953") +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/integrations/unbound.md b/src/go/collectors/go.d.plugin/modules/unbound/integrations/unbound.md new file mode 100644 index 00000000000000..b3845b61b1c293 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/integrations/unbound.md @@ -0,0 +1,270 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/unbound/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/unbound/metadata.yaml" +sidebar_label: "Unbound" +learn_status: "Published" +learn_rel_path: "Data Collection/DNS and DHCP Servers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Unbound + + +<img src="https://netdata.cloud/img/unbound.png" width="150"/> + + +Plugin: go.d.plugin +Module: unbound + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Unbound servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Unbound instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| unbound.queries | queries | queries | +| unbound.queries_ip_ratelimited | ratelimited | queries | +| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries | +| unbound.cache | hits, miss | events | +| unbound.cache_percentage | hits, miss | percentage | +| unbound.prefetch | prefetches | prefetches | +| unbound.expired | expired | replies | +| unbound.zero_ttl_replies | zero_ttl | replies | +| unbound.recursive_replies | recursive | replies | +| unbound.recursion_time | avg, median | milliseconds | +| unbound.request_list_usage | avg, max | queries | +| unbound.current_request_list_usage | all, users | queries | +| unbound.request_list_jostle_list | overwritten, dropped | queries | +| unbound.tcpusage | usage | buffers | +| unbound.uptime | time | seconds | +| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB | +| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB | +| unbound.mem_streamwait | streamwait | KB | +| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items | +| unbound.type_queries | a dimension per query type | queries | +| unbound.class_queries | a dimension per query class | queries | +| unbound.opcode_queries | a dimension per query opcode | queries | +| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries | +| unbound.rcode_answers | a dimension per reply rcode | replies | + +### Per thread + +These metrics refer to threads. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| unbound.thread_queries | queries | queries | +| unbound.thread_queries_ip_ratelimited | ratelimited | queries | +| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries | +| unbound.thread_cache | hits, miss | events | +| unbound.thread_cache_percentage | hits, miss | percentage | +| unbound.thread_prefetch | prefetches | prefetches | +| unbound.thread_expired | expired | replies | +| unbound.thread_zero_ttl_replies | zero_ttl | replies | +| unbound.thread_recursive_replies | recursive | replies | +| unbound.thread_recursion_time | avg, median | milliseconds | +| unbound.thread_request_list_usage | avg, max | queries | +| unbound.thread_current_request_list_usage | all, users | queries | +| unbound.thread_request_list_jostle_list | overwritten, dropped | queries | +| unbound.thread_tcpusage | usage | buffers | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Enable remote control interface + +Set `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf). + + +#### Check permissions and adjust if necessary + +If using unix socket: + +- socket should be readable and writeable by `netdata` user + +If using ip socket and TLS is disabled: + +- socket should be accessible via network + +If TLS is enabled, in addition: + +- `control-key-file` should be readable by `netdata` user +- `control-cert-file` should be readable by `netdata` user + +For auto-detection parameters from `unbound.conf`: + +- `unbound.conf` should be readable by `netdata` user +- if you have several configuration files (include feature) all of them should be readable by `netdata` user + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/unbound.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/unbound.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes | +| timeout | Connection/read/write/ssl handshake timeout. | 1 | no | +| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no | +| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | /etc/unbound/unbound.conf | no | +| use_tls | Whether to use TLS or not. | yes | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no | +| tls_ca | Certificate authority that client use when verifying server certificates. | | no | +| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no | +| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:8953 + +``` +</details> + +##### Unix socket + +Connecting through Unix socket. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: socket + address: /var/run/unbound.sock + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:8953 + + - name: remote + address: 203.0.113.11:8953 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m unbound + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/unbound/metadata.yaml b/src/go/collectors/go.d.plugin/modules/unbound/metadata.yaml new file mode 100644 index 00000000000000..3e42aecfcc4f43 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/metadata.yaml @@ -0,0 +1,431 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-unbound + plugin_name: go.d.plugin + module_name: unbound + monitored_instance: + name: Unbound + link: https://nlnetlabs.nl/projects/unbound/about/ + icon_filename: unbound.png + categories: + - data-collection.dns-and-dhcp-servers + keywords: + - unbound + - dns + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Unbound servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: + - title: Enable remote control interface + description: | + Set `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf). + - title: Check permissions and adjust if necessary + description: | + If using unix socket: + + - socket should be readable and writeable by `netdata` user + + If using ip socket and TLS is disabled: + + - socket should be accessible via network + + If TLS is enabled, in addition: + + - `control-key-file` should be readable by `netdata` user + - `control-cert-file` should be readable by `netdata` user + + For auto-detection parameters from `unbound.conf`: + + - `unbound.conf` should be readable by `netdata` user + - if you have several configuration files (include feature) all of them should be readable by `netdata` user + configuration: + file: + name: go.d/unbound.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 5 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address in IP:PORT format. + default_value: 127.0.0.1:8953 + required: true + - name: timeout + description: Connection/read/write/ssl handshake timeout. + default_value: 1 + required: false + - name: conf_path + description: Absolute path to the unbound configuration file. + default_value: /etc/unbound/unbound.conf + required: false + - name: cumulative_stats + description: Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. + default_value: /etc/unbound/unbound.conf + required: false + - name: use_tls + description: Whether to use TLS or not. + default_value: true + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: true + required: false + - name: tls_ca + description: Certificate authority that client use when verifying server certificates. + default_value: "" + required: false + - name: tls_cert + description: Client tls certificate. + default_value: /etc/unbound/unbound_control.pem + required: false + - name: tls_key + description: Client tls key. + default_value: /etc/unbound/unbound_control.key + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:8953 + - name: Unix socket + description: Connecting through Unix socket. + config: | + jobs: + - name: socket + address: /var/run/unbound.sock + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:8953 + + - name: remote + address: 203.0.113.11:8953 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: unbound.queries + description: Received Queries + unit: queries + chart_type: line + dimensions: + - name: queries + - name: unbound.queries_ip_ratelimited + description: Rate Limited Queries + unit: queries + chart_type: line + dimensions: + - name: ratelimited + - name: unbound.dnscrypt_queries + description: DNSCrypt Queries + unit: queries + chart_type: line + dimensions: + - name: crypted + - name: cert + - name: cleartext + - name: malformed + - name: unbound.cache + description: Cache Statistics + unit: events + chart_type: stacked + dimensions: + - name: hits + - name: miss + - name: unbound.cache_percentage + description: Cache Statistics Percentage + unit: percentage + chart_type: stacked + dimensions: + - name: hits + - name: miss + - name: unbound.prefetch + description: Cache Prefetches + unit: prefetches + chart_type: line + dimensions: + - name: prefetches + - name: unbound.expired + description: Replies Served From Expired Cache + unit: replies + chart_type: line + dimensions: + - name: expired + - name: unbound.zero_ttl_replies + description: Replies Served From Expired Cache + unit: replies + chart_type: line + dimensions: + - name: zero_ttl + - name: unbound.recursive_replies + description: Replies That Needed Recursive Processing + unit: replies + chart_type: line + dimensions: + - name: recursive + - name: unbound.recursion_time + description: Time Spent On Recursive Processing + unit: milliseconds + chart_type: line + dimensions: + - name: avg + - name: median + - name: unbound.request_list_usage + description: Request List Usage + unit: queries + chart_type: line + dimensions: + - name: avg + - name: max + - name: unbound.current_request_list_usage + description: Current Request List Usage + unit: queries + chart_type: area + dimensions: + - name: all + - name: users + - name: unbound.request_list_jostle_list + description: Request List Jostle List Events + unit: queries + chart_type: line + dimensions: + - name: overwritten + - name: dropped + - name: unbound.tcpusage + description: TCP Handler Buffers + unit: buffers + chart_type: line + dimensions: + - name: usage + - name: unbound.uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: time + - name: unbound.cache_memory + description: Cache Memory + unit: KB + chart_type: stacked + dimensions: + - name: message + - name: rrset + - name: dnscrypt_nonce + - name: dnscrypt_shared_secret + - name: unbound.mod_memory + description: Module Memory + unit: KB + chart_type: stacked + dimensions: + - name: iterator + - name: respip + - name: validator + - name: subnet + - name: ipsec + - name: unbound.mem_streamwait + description: TCP and TLS Stream Waif Buffer Memory + unit: KB + chart_type: line + dimensions: + - name: streamwait + - name: unbound.cache_count + description: Cache Items Count + unit: items + chart_type: stacked + dimensions: + - name: infra + - name: key + - name: msg + - name: rrset + - name: dnscrypt_nonce + - name: shared_secret + - name: unbound.type_queries + description: Queries By Type + unit: queries + chart_type: stacked + dimensions: + - name: a dimension per query type + - name: unbound.class_queries + description: Queries By Class + unit: queries + chart_type: stacked + dimensions: + - name: a dimension per query class + - name: unbound.opcode_queries + description: Queries By OpCode + unit: queries + chart_type: stacked + dimensions: + - name: a dimension per query opcode + - name: unbound.flag_queries + description: Queries By Flag + unit: queries + chart_type: stacked + dimensions: + - name: qr + - name: aa + - name: tc + - name: rd + - name: ra + - name: z + - name: ad + - name: cd + - name: unbound.rcode_answers + description: Replies By RCode + unit: replies + chart_type: stacked + dimensions: + - name: a dimension per reply rcode + - name: thread + description: These metrics refer to threads. + labels: [] + metrics: + - name: unbound.thread_queries + description: Thread Received Queries + unit: queries + chart_type: line + dimensions: + - name: queries + - name: unbound.thread_queries_ip_ratelimited + description: Thread Rate Limited Queries + unit: queries + chart_type: line + dimensions: + - name: ratelimited + - name: unbound.thread_dnscrypt_queries + description: Thread DNSCrypt Queries + unit: queries + chart_type: line + dimensions: + - name: crypted + - name: cert + - name: cleartext + - name: malformed + - name: unbound.thread_cache + description: Cache Statistics + unit: events + chart_type: line + dimensions: + - name: hits + - name: miss + - name: unbound.thread_cache_percentage + description: Cache Statistics Percentage + unit: percentage + chart_type: line + dimensions: + - name: hits + - name: miss + - name: unbound.thread_prefetch + description: Cache Prefetches + unit: prefetches + chart_type: line + dimensions: + - name: prefetches + - name: unbound.thread_expired + description: Replies Served From Expired Cache + unit: replies + chart_type: line + dimensions: + - name: expired + - name: unbound.thread_zero_ttl_replies + description: Replies Served From Expired Cache + unit: replies + chart_type: line + dimensions: + - name: zero_ttl + - name: unbound.thread_recursive_replies + description: Replies That Needed Recursive Processing + unit: replies + chart_type: line + dimensions: + - name: recursive + - name: unbound.thread_recursion_time + description: Time Spent On Recursive Processing + unit: milliseconds + chart_type: line + dimensions: + - name: avg + - name: median + - name: unbound.thread_request_list_usage + description: Time Spent On Recursive Processing + unit: queries + chart_type: line + dimensions: + - name: avg + - name: max + - name: unbound.thread_current_request_list_usage + description: Current Request List Usage + unit: queries + chart_type: line + dimensions: + - name: all + - name: users + - name: unbound.thread_request_list_jostle_list + description: Request List Jostle List Events + unit: queries + chart_type: line + dimensions: + - name: overwritten + - name: dropped + - name: unbound.thread_tcpusage + description: TCP Handler Buffers + unit: buffers + chart_type: line + dimensions: + - name: usage diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/common.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/common.txt new file mode 100644 index 00000000000000..7a1f91a318c82f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/common.txt @@ -0,0 +1,66 @@ +thread0.num.queries=28 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=21 +thread0.num.cachemiss=7 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=7 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0.857143 +thread0.requestlist.max=6 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=1.255822 +thread0.recursion.time.median=0.480597 +thread0.tcpusage=0 +thread1.num.queries=16 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=13 +thread1.num.cachemiss=3 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=3 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0 +thread1.requestlist.max=0 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.093941 +thread1.recursion.time.median=0 +thread1.tcpusage=0 +total.num.queries=44 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=34 +total.num.cachemiss=10 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=10 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.6 +total.requestlist.max=6 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.907258 +total.recursion.time.median=0.240299 +total.tcpusage=0 +time.now=1574094836.941149 +time.up=88.434983 +time.elapsed=88.4349831 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/extended.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/extended.txt new file mode 100644 index 00000000000000..578794fad6b72d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/extended.txt @@ -0,0 +1,162 @@ +thread0.num.queries=28 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=21 +thread0.num.cachemiss=7 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=7 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0.857143 +thread0.requestlist.max=6 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=1.255822 +thread0.recursion.time.median=0.480597 +thread0.tcpusage=0 +thread1.num.queries=16 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=13 +thread1.num.cachemiss=3 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=3 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0 +thread1.requestlist.max=0 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.093941 +thread1.recursion.time.median=0 +thread1.tcpusage=0 +total.num.queries=44 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=34 +total.num.cachemiss=10 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=10 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.6 +total.requestlist.max=6 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.907258 +total.recursion.time.median=0.240299 +total.tcpusage=0 +time.now=1574094836.941149 +time.up=88.434983 +time.elapsed=88.434983 +mem.cache.rrset=178642 +mem.cache.message=90357 +mem.mod.iterator=16588 +mem.mod.validator=81059 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=0 +histogram.000000.016384.to.000000.032768=0 +histogram.000000.032768.to.000000.065536=2 +histogram.000000.065536.to.000000.131072=0 +histogram.000000.131072.to.000000.262144=2 +histogram.000000.262144.to.000000.524288=3 +histogram.000000.524288.to.000001.000000=2 +histogram.000001.000000.to.000002.000000=0 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=1 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=13 +num.query.type.PTR=5 +num.query.type.MX=13 +num.query.type.AAAA=13 +num.query.class.IN=44 +num.query.opcode.QUERY=44 +num.query.tcp=0 +num.query.tcpout=1 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=39 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=44 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=40 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=4 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=2 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=81 +rrset.cache.count=314 +infra.cache.count=205 +key.cache.count=9 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt new file mode 100644 index 00000000000000..53bd7f955744e3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt @@ -0,0 +1,162 @@ +thread0.num.queries=90 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=80 +thread0.num.cachemiss=10 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=10 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0.1 +thread0.requestlist.max=1 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.222018 +thread0.recursion.time.median=0.337042 +thread0.tcpusage=0 +thread1.num.queries=110 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=101 +thread1.num.cachemiss=9 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=9 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0.222222 +thread1.requestlist.max=1 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.844506 +thread1.recursion.time.median=0.360448 +thread1.tcpusage=0 +total.num.queries=200 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=181 +total.num.cachemiss=19 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=19 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.157895 +total.requestlist.max=1 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.516881 +total.recursion.time.median=0.348745 +total.tcpusage=0 +time.now=1574103378.552596 +time.up=122.956436 +time.elapsed=122.956436 +mem.cache.rrset=175745 +mem.cache.message=93392 +mem.mod.iterator=16588 +mem.mod.validator=81479 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=2 +histogram.000000.016384.to.000000.032768=1 +histogram.000000.032768.to.000000.065536=3 +histogram.000000.065536.to.000000.131072=0 +histogram.000000.131072.to.000000.262144=0 +histogram.000000.262144.to.000000.524288=11 +histogram.000000.524288.to.000001.000000=0 +histogram.000001.000000.to.000002.000000=1 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=1 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=60 +num.query.type.PTR=20 +num.query.type.MX=60 +num.query.type.AAAA=60 +num.query.class.IN=200 +num.query.opcode.QUERY=200 +num.query.tcp=0 +num.query.tcpout=0 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=200 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=184 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=16 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=1 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=94 +rrset.cache.count=304 +infra.cache.count=192 +key.cache.count=11 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt new file mode 100644 index 00000000000000..939ba75de5ed3a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt @@ -0,0 +1,162 @@ +thread0.num.queries=133 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=123 +thread0.num.cachemiss=10 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=10 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0.1 +thread0.requestlist.max=1 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.222018 +thread0.recursion.time.median=0.337042 +thread0.tcpusage=0 +thread1.num.queries=157 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=148 +thread1.num.cachemiss=9 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=9 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0.222222 +thread1.requestlist.max=1 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.844506 +thread1.recursion.time.median=0.360448 +thread1.tcpusage=0 +total.num.queries=290 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=271 +total.num.cachemiss=19 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=19 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.157895 +total.requestlist.max=1 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.516881 +total.recursion.time.median=0.348745 +total.tcpusage=0 +time.now=1574103461.161540 +time.up=205.565380 +time.elapsed=82.608944 +mem.cache.rrset=175745 +mem.cache.message=93392 +mem.mod.iterator=16588 +mem.mod.validator=81479 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=2 +histogram.000000.016384.to.000000.032768=1 +histogram.000000.032768.to.000000.065536=3 +histogram.000000.065536.to.000000.131072=0 +histogram.000000.131072.to.000000.262144=0 +histogram.000000.262144.to.000000.524288=11 +histogram.000000.524288.to.000001.000000=0 +histogram.000001.000000.to.000002.000000=1 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=1 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=90 +num.query.type.PTR=20 +num.query.type.MX=90 +num.query.type.AAAA=90 +num.query.class.IN=290 +num.query.opcode.QUERY=290 +num.query.tcp=0 +num.query.tcpout=0 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=290 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=274 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=16 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=1 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=94 +rrset.cache.count=304 +infra.cache.count=192 +key.cache.count=11 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt new file mode 100644 index 00000000000000..e9448f7d7022e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt @@ -0,0 +1,163 @@ +thread0.num.queries=165 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=150 +thread0.num.cachemiss=15 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=15 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0.0666667 +thread0.requestlist.max=1 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.261497 +thread0.recursion.time.median=0.318318 +thread0.tcpusage=0 +thread1.num.queries=195 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=184 +thread1.num.cachemiss=11 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=11 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0.363636 +thread1.requestlist.max=2 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.709047 +thread1.recursion.time.median=0.294912 +thread1.tcpusage=0 +total.num.queries=360 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=334 +total.num.cachemiss=26 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=26 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.192308 +total.requestlist.max=2 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.450844 +total.recursion.time.median=0.306615 +total.tcpusage=0 +time.now=1574103543.692653 +time.up=288.096493 +time.elapsed=82.531113 +mem.cache.rrset=208839 +mem.cache.message=101198 +mem.mod.iterator=16588 +mem.mod.validator=85725 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=2 +histogram.000000.016384.to.000000.032768=1 +histogram.000000.032768.to.000000.065536=5 +histogram.000000.065536.to.000000.131072=3 +histogram.000000.131072.to.000000.262144=0 +histogram.000000.262144.to.000000.524288=11 +histogram.000000.524288.to.000001.000000=2 +histogram.000001.000000.to.000002.000000=1 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=1 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=120 +num.query.type.PTR=20 +num.query.type.MX=110 +num.query.type.AAAA=110 +num.query.class.IN=360 +num.query.opcode.QUERY=360 +num.query.tcp=0 +num.query.tcpout=0 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=360 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=334 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=10 +num.answer.rcode.NXDOMAIN=16 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.answer.rcode.nodata=20 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=1 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=119 +rrset.cache.count=401 +infra.cache.count=232 +key.cache.count=14 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt new file mode 100644 index 00000000000000..8be40ecb288865 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt @@ -0,0 +1,163 @@ +thread0.num.queries=51 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=44 +thread0.num.cachemiss=7 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=7 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0 +thread0.requestlist.max=0 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.365956 +thread0.recursion.time.median=0.057344 +thread0.tcpusage=0 +thread1.num.queries=49 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=46 +thread1.num.cachemiss=3 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=3 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0 +thread1.requestlist.max=0 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=1.582766 +thread1.recursion.time.median=0 +thread1.tcpusage=0 +total.num.queries=100 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=90 +total.num.cachemiss=10 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=10 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0 +total.requestlist.max=0 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.730999 +total.recursion.time.median=0.028672 +total.tcpusage=0 +time.now=1574103644.993894 +time.up=45.285130 +time.elapsed=45.285130 +mem.cache.rrset=172757 +mem.cache.message=86064 +mem.mod.iterator=16588 +mem.mod.validator=79979 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=0 +histogram.000000.016384.to.000000.032768=2 +histogram.000000.032768.to.000000.065536=3 +histogram.000000.065536.to.000000.131072=1 +histogram.000000.131072.to.000000.262144=1 +histogram.000000.262144.to.000000.524288=1 +histogram.000000.524288.to.000001.000000=0 +histogram.000001.000000.to.000002.000000=1 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=1 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=30 +num.query.type.PTR=10 +num.query.type.MX=30 +num.query.type.AAAA=30 +num.query.class.IN=100 +num.query.opcode.QUERY=100 +num.query.tcp=0 +num.query.tcpout=1 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=100 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=90 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=10 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.answer.rcode.nodata=10 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=2 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=67 +rrset.cache.count=303 +infra.cache.count=181 +key.cache.count=10 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt new file mode 100644 index 00000000000000..08ff128b38920f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt @@ -0,0 +1,156 @@ +thread0.num.queries=0 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=0 +thread0.num.cachemiss=0 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=0 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0 +thread0.requestlist.max=0 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.000000 +thread0.recursion.time.median=0 +thread0.tcpusage=0 +thread1.num.queries=0 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=0 +thread1.num.cachemiss=0 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=0 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=0 +thread1.requestlist.max=0 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.000000 +thread1.recursion.time.median=0 +thread1.tcpusage=0 +total.num.queries=0 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=0 +total.num.cachemiss=0 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=0 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0 +total.requestlist.max=0 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.000000 +total.recursion.time.median=0 +total.tcpusage=0 +time.now=1574103671.543847 +time.up=71.835083 +time.elapsed=26.549953 +mem.cache.rrset=172757 +mem.cache.message=86064 +mem.mod.iterator=16588 +mem.mod.validator=79979 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=0 +histogram.000000.016384.to.000000.032768=0 +histogram.000000.032768.to.000000.065536=0 +histogram.000000.065536.to.000000.131072=0 +histogram.000000.131072.to.000000.262144=0 +histogram.000000.262144.to.000000.524288=0 +histogram.000000.524288.to.000001.000000=0 +histogram.000001.000000.to.000002.000000=0 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=0 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.tcp=0 +num.query.tcpout=0 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=0 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=0 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=0 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=0 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=67 +rrset.cache.count=303 +infra.cache.count=181 +key.cache.count=10 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt new file mode 100644 index 00000000000000..45324bef9ccfd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt @@ -0,0 +1,163 @@ +thread0.num.queries=34 +thread0.num.queries_ip_ratelimited=0 +thread0.num.cachehits=30 +thread0.num.cachemiss=4 +thread0.num.prefetch=0 +thread0.num.expired=0 +thread0.num.zero_ttl=0 +thread0.num.recursivereplies=4 +thread0.num.dnscrypt.crypted=0 +thread0.num.dnscrypt.cert=0 +thread0.num.dnscrypt.cleartext=0 +thread0.num.dnscrypt.malformed=0 +thread0.requestlist.avg=0 +thread0.requestlist.max=0 +thread0.requestlist.overwritten=0 +thread0.requestlist.exceeded=0 +thread0.requestlist.current.all=0 +thread0.requestlist.current.user=0 +thread0.recursion.time.avg=0.541654 +thread0.recursion.time.median=0.098304 +thread0.tcpusage=0 +thread1.num.queries=36 +thread1.num.queries_ip_ratelimited=0 +thread1.num.cachehits=33 +thread1.num.cachemiss=3 +thread1.num.prefetch=0 +thread1.num.expired=0 +thread1.num.zero_ttl=0 +thread1.num.recursivereplies=3 +thread1.num.dnscrypt.crypted=0 +thread1.num.dnscrypt.cert=0 +thread1.num.dnscrypt.cleartext=0 +thread1.num.dnscrypt.malformed=0 +thread1.requestlist.avg=1.66667 +thread1.requestlist.max=5 +thread1.requestlist.overwritten=0 +thread1.requestlist.exceeded=0 +thread1.requestlist.current.all=0 +thread1.requestlist.current.user=0 +thread1.recursion.time.avg=0.062328 +thread1.recursion.time.median=0 +thread1.tcpusage=0 +total.num.queries=70 +total.num.queries_ip_ratelimited=0 +total.num.cachehits=63 +total.num.cachemiss=7 +total.num.prefetch=0 +total.num.expired=0 +total.num.zero_ttl=0 +total.num.recursivereplies=7 +total.num.dnscrypt.crypted=0 +total.num.dnscrypt.cert=0 +total.num.dnscrypt.cleartext=0 +total.num.dnscrypt.malformed=0 +total.requestlist.avg=0.714286 +total.requestlist.max=5 +total.requestlist.overwritten=0 +total.requestlist.exceeded=0 +total.requestlist.current.all=0 +total.requestlist.current.user=0 +total.recursion.time.avg=0.336228 +total.recursion.time.median=0.049152 +total.tcpusage=0 +time.now=1574103731.371896 +time.up=131.663132 +time.elapsed=59.828049 +mem.cache.rrset=235917 +mem.cache.message=105471 +mem.mod.iterator=16588 +mem.mod.validator=87270 +mem.mod.respip=0 +mem.mod.subnet=74504 +mem.cache.dnscrypt_shared_secret=0 +mem.cache.dnscrypt_nonce=0 +mem.streamwait=0 +histogram.000000.000000.to.000000.000001=0 +histogram.000000.000001.to.000000.000002=0 +histogram.000000.000002.to.000000.000004=0 +histogram.000000.000004.to.000000.000008=0 +histogram.000000.000008.to.000000.000016=0 +histogram.000000.000016.to.000000.000032=0 +histogram.000000.000032.to.000000.000064=0 +histogram.000000.000064.to.000000.000128=0 +histogram.000000.000128.to.000000.000256=0 +histogram.000000.000256.to.000000.000512=0 +histogram.000000.000512.to.000000.001024=0 +histogram.000000.001024.to.000000.002048=0 +histogram.000000.002048.to.000000.004096=0 +histogram.000000.004096.to.000000.008192=0 +histogram.000000.008192.to.000000.016384=0 +histogram.000000.016384.to.000000.032768=2 +histogram.000000.032768.to.000000.065536=1 +histogram.000000.065536.to.000000.131072=3 +histogram.000000.131072.to.000000.262144=0 +histogram.000000.262144.to.000000.524288=0 +histogram.000000.524288.to.000001.000000=0 +histogram.000001.000000.to.000002.000000=1 +histogram.000002.000000.to.000004.000000=0 +histogram.000004.000000.to.000008.000000=0 +histogram.000008.000000.to.000016.000000=0 +histogram.000016.000000.to.000032.000000=0 +histogram.000032.000000.to.000064.000000=0 +histogram.000064.000000.to.000128.000000=0 +histogram.000128.000000.to.000256.000000=0 +histogram.000256.000000.to.000512.000000=0 +histogram.000512.000000.to.001024.000000=0 +histogram.001024.000000.to.002048.000000=0 +histogram.002048.000000.to.004096.000000=0 +histogram.004096.000000.to.008192.000000=0 +histogram.008192.000000.to.016384.000000=0 +histogram.016384.000000.to.032768.000000=0 +histogram.032768.000000.to.065536.000000=0 +histogram.065536.000000.to.131072.000000=0 +histogram.131072.000000.to.262144.000000=0 +histogram.262144.000000.to.524288.000000=0 +num.query.type.A=20 +num.query.type.PTR=10 +num.query.type.MX=20 +num.query.type.AAAA=20 +num.query.class.IN=70 +num.query.opcode.QUERY=70 +num.query.tcp=0 +num.query.tcpout=0 +num.query.tls=0 +num.query.tls.resume=0 +num.query.ipv6=0 +num.query.flags.QR=0 +num.query.flags.AA=0 +num.query.flags.TC=0 +num.query.flags.RD=70 +num.query.flags.RA=0 +num.query.flags.Z=0 +num.query.flags.AD=0 +num.query.flags.CD=0 +num.query.edns.present=0 +num.query.edns.DO=0 +num.answer.rcode.NOERROR=60 +num.answer.rcode.FORMERR=0 +num.answer.rcode.SERVFAIL=0 +num.answer.rcode.NXDOMAIN=10 +num.answer.rcode.NOTIMPL=0 +num.answer.rcode.REFUSED=0 +num.answer.rcode.nodata=10 +num.query.ratelimited=0 +num.answer.secure=0 +num.answer.bogus=0 +num.rrset.bogus=0 +num.query.aggressive.NOERROR=2 +num.query.aggressive.NXDOMAIN=0 +unwanted.queries=0 +unwanted.replies=0 +msg.cache.count=127 +rrset.cache.count=501 +infra.cache.count=303 +key.cache.count=15 +dnscrypt_shared_secret.cache.count=0 +dnscrypt_nonce.cache.count=0 +num.query.dnscrypt.shared_secret.cachemiss=0 +num.query.dnscrypt.replay=0 +num.query.authzone.up=0 +num.query.authzone.down=0 +num.query.subnet=0 +num.query.subnet_cache=0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound.conf b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound.conf new file mode 100644 index 00000000000000..a061a34768ea00 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +#include: "otherfile.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: yes + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 10.0.0.1 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8954 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + control-use-cert: "no" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control_other.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control_other.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_disabled.conf b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_disabled.conf new file mode 100644 index 00000000000000..1cef549f8bda57 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_disabled.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +#include: "otherfile.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + control-interface: 0.0.0.0 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_empty.conf b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_empty.conf new file mode 100644 index 00000000000000..a2d15837647399 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/testdata/unbound_empty.conf @@ -0,0 +1,85 @@ +# +# Example configuration file. +# +# See unbound.conf(5) man page, version 1.9.4. +# +# this is a comment. + +#Use this to include other text into the file. +#include: "otherfile.conf" + +# The server clause sets the main parameters. +server: + # whitespace is not necessary, but looks cleaner. + + # verbosity number, 0 is least verbose. 1 is default. + # verbosity: 1 + + # print statistics to the log (for every thread) every N seconds. + # Set to "" or 0 to disable. Default is disabled. + # statistics-interval: 0 + + # enable shm for stats, default no. if you enable also enable + # statistics-interval, every time it also writes stats to the + # shared memory segment keyed with shm-key. + # shm-enable: no + + # shm for stats uses this key, and key+1 for the shared mem segment. + # shm-key: 11777 + + # enable cumulative statistics, without clearing them after printing. + # statistics-cumulative: no + # statistics-cumulative: yes + + # enable extended statistics (query types, answer codes, status) + # printed from unbound-control. default off, because of speed. + # extended-statistics: no + # extended-statistics: yes + + # number of threads to create. 1 disables threading. + # num-threads: 2 + +# Python config section. To enable: +# o use --with-pythonmodule to configure before compiling. +# o list python in the module-config string (above) to enable. +# It can be at the start, it gets validated results, or just before +# the iterator and process before DNSSEC validation. +# o and give a python-script to run. +python: + # Script file to load + # python-script: "/etc/unbound/ubmodule-tst.py" + +# Remote control config section. +remote-control: + # Enable remote control with unbound-control(8) here. + # set up the keys and certificates with unbound-control-setup. + # control-enable: no + + # what interfaces are listened to for remote control. + # give 0.0.0.0 and ::0 to listen to all interfaces. + # set to an absolute path to use a unix local name pipe, certificates + # are not used for that, so key and cert files need not be present. + # control-interface: 127.0.0.1 + # control-interface: 0.0.0.0 + # control-interface: ::1 + # control-interface: /var/run/test.sock + + # port number for remote control operations. + # control-port: 8953 + + # for localhost, you can disable use of TLS by setting this to "no" + # For local sockets this option is ignored, and TLS is not used. + # control-use-cert: "yes" + # control-use-cert: "yes" + + # unbound server key file. + # server-key-file: "/etc/unbound/unbound_server.key" + + # unbound server certificate file. + # server-cert-file: "/etc/unbound/unbound_server.pem" + + # unbound-control key file. + # control-key-file: "/etc/unbound/unbound_control.key" + + # unbound-control certificate file. + # control-cert-file: "/etc/unbound/unbound_control.pem" diff --git a/src/go/collectors/go.d.plugin/modules/unbound/unbound.go b/src/go/collectors/go.d.plugin/modules/unbound/unbound.go new file mode 100644 index 00000000000000..625ef75cd21e83 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/unbound.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package unbound + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("unbound", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *Unbound { + config := Config{ + Address: "127.0.0.1:8953", + ConfPath: "/etc/unbound/unbound.conf", + Timeout: web.Duration{Duration: time.Second}, + Cumulative: false, + UseTLS: true, + TLSConfig: tlscfg.TLSConfig{ + TLSCert: "/etc/unbound/unbound_control.pem", + TLSKey: "/etc/unbound/unbound_control.key", + InsecureSkipVerify: true, + }, + } + + return &Unbound{ + Config: config, + curCache: newCollectCache(), + cache: newCollectCache(), + } +} + +type ( + Config struct { + Address string `yaml:"address"` + ConfPath string `yaml:"conf_path"` + Timeout web.Duration `yaml:"timeout"` + Cumulative bool `yaml:"cumulative_stats"` + UseTLS bool `yaml:"use_tls"` + tlscfg.TLSConfig `yaml:",inline"` + } + Unbound struct { + module.Base + Config `yaml:",inline"` + + client socket.Client + cache collectCache + curCache collectCache + + prevCacheMiss float64 // needed for cumulative mode + extChartsCreated bool + + charts *module.Charts + } +) + +func (Unbound) Cleanup() {} + +func (u *Unbound) Init() bool { + if enabled := u.initConfig(); !enabled { + return false + } + + if err := u.initClient(); err != nil { + u.Errorf("creating client: %v", err) + return false + } + + u.charts = charts(u.Cumulative) + + u.Debugf("using address: %s, cumulative: %v, use_tls: %v, timeout: %s", u.Address, u.Cumulative, u.UseTLS, u.Timeout) + if u.UseTLS { + u.Debugf("using tls_skip_verify: %v, tls_key: %s, tls_cert: %s", u.InsecureSkipVerify, u.TLSKey, u.TLSCert) + } + return true +} + +func (u *Unbound) Check() bool { + return len(u.Collect()) > 0 +} + +func (u Unbound) Charts() *module.Charts { + return u.charts +} + +func (u *Unbound) Collect() map[string]int64 { + mx, err := u.collect() + if err != nil { + u.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/unbound/unbound_test.go b/src/go/collectors/go.d.plugin/modules/unbound/unbound_test.go new file mode 100644 index 00000000000000..fabea299d5ecbf --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/unbound/unbound_test.go @@ -0,0 +1,1278 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package unbound + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + commonStatsData, _ = os.ReadFile("testdata/stats/common.txt") + extStatsData, _ = os.ReadFile("testdata/stats/extended.txt") + lifeCycleCumulativeData1, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended1.txt") + lifeCycleCumulativeData2, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended2.txt") + lifeCycleCumulativeData3, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended3.txt") + lifeCycleResetData1, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended1.txt") + lifeCycleResetData2, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended2.txt") + lifeCycleResetData3, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended3.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, commonStatsData) + assert.NotNil(t, extStatsData) + assert.NotNil(t, lifeCycleCumulativeData1) + assert.NotNil(t, lifeCycleCumulativeData2) + assert.NotNil(t, lifeCycleCumulativeData3) + assert.NotNil(t, lifeCycleResetData1) + assert.NotNil(t, lifeCycleResetData2) + assert.NotNil(t, lifeCycleResetData3) +} + +func nonTLSUnbound() *Unbound { + unbound := New() + unbound.ConfPath = "" + unbound.UseTLS = false + return unbound +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestUnbound_Init(t *testing.T) { + unbound := nonTLSUnbound() + + assert.True(t, unbound.Init()) +} + +func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) { + unbound := New() + unbound.ConfPath = "testdata/unbound.conf" + expectedConfig := Config{ + Address: "10.0.0.1:8954", + ConfPath: unbound.ConfPath, + Timeout: unbound.Timeout, + Cumulative: true, + UseTLS: false, + TLSConfig: tlscfg.TLSConfig{ + TLSCert: "/etc/unbound/unbound_control_other.pem", + TLSKey: "/etc/unbound/unbound_control_other.key", + InsecureSkipVerify: unbound.TLSConfig.InsecureSkipVerify, + }, + } + + assert.True(t, unbound.Init()) + assert.Equal(t, expectedConfig, unbound.Config) +} + +func TestUnbound_Init_DisabledInUnboundConf(t *testing.T) { + unbound := nonTLSUnbound() + unbound.ConfPath = "testdata/unbound_disabled.conf" + + assert.False(t, unbound.Init()) +} + +func TestUnbound_Init_HandleEmptyConfig(t *testing.T) { + unbound := nonTLSUnbound() + unbound.ConfPath = "testdata/unbound_empty.conf" + + assert.True(t, unbound.Init()) +} + +func TestUnbound_Init_HandleNonExistentConfig(t *testing.T) { + unbound := nonTLSUnbound() + unbound.ConfPath = "testdata/unbound_non_existent.conf" + + assert.True(t, unbound.Init()) +} + +func TestUnbound_Check(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{data: commonStatsData, err: false} + + assert.True(t, unbound.Check()) +} + +func TestUnbound_Check_ErrorDuringScrapingUnbound(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{err: true} + + assert.False(t, unbound.Check()) +} + +func TestUnbound_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestUnbound_Charts(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + + assert.NotNil(t, unbound.Charts()) +} + +func TestUnbound_Collect(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{data: commonStatsData, err: false} + + collected := unbound.Collect() + assert.Equal(t, expectedCommon, collected) + testCharts(t, unbound, collected) +} + +func TestUnbound_Collect_ExtendedStats(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{data: extStatsData, err: false} + + collected := unbound.Collect() + assert.Equal(t, expectedExtended, collected) + testCharts(t, unbound, collected) +} + +func TestUnbound_Collect_LifeCycleCumulativeExtendedStats(t *testing.T) { + tests := []struct { + input []byte + expected map[string]int64 + }{ + {input: lifeCycleCumulativeData1, expected: expectedCumulative1}, + {input: lifeCycleCumulativeData2, expected: expectedCumulative2}, + {input: lifeCycleCumulativeData3, expected: expectedCumulative3}, + } + + unbound := nonTLSUnbound() + unbound.Cumulative = true + require.True(t, unbound.Init()) + ubClient := &mockUnboundClient{err: false} + unbound.client = ubClient + + var collected map[string]int64 + for i, test := range tests { + t.Run(fmt.Sprintf("run %d", i+1), func(t *testing.T) { + ubClient.data = test.input + collected = unbound.Collect() + assert.Equal(t, test.expected, collected) + }) + } + + testCharts(t, unbound, collected) +} + +func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) { + tests := []struct { + input []byte + expected map[string]int64 + }{ + {input: lifeCycleResetData1, expected: expectedReset1}, + {input: lifeCycleResetData2, expected: expectedReset2}, + {input: lifeCycleResetData3, expected: expectedReset3}, + } + + unbound := nonTLSUnbound() + unbound.Cumulative = false + require.True(t, unbound.Init()) + ubClient := &mockUnboundClient{err: false} + unbound.client = ubClient + + var collected map[string]int64 + for i, test := range tests { + t.Run(fmt.Sprintf("run %d", i+1), func(t *testing.T) { + ubClient.data = test.input + collected = unbound.Collect() + assert.Equal(t, test.expected, collected) + }) + } + + testCharts(t, unbound, collected) +} + +func TestUnbound_Collect_EmptyResponse(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{data: []byte{}, err: false} + + assert.Nil(t, unbound.Collect()) +} + +func TestUnbound_Collect_ErrorResponse(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{data: []byte("error unknown command 'unknown'"), err: false} + + assert.Nil(t, unbound.Collect()) +} + +func TestUnbound_Collect_ErrorOnSend(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + unbound.client = mockUnboundClient{err: true} + + assert.Nil(t, unbound.Collect()) +} + +func TestUnbound_Collect_ErrorOnParseBadSyntax(t *testing.T) { + unbound := nonTLSUnbound() + require.True(t, unbound.Init()) + data := strings.Repeat("zk_avg_latency 0\nzk_min_latency 0\nzk_mix_latency 0\n", 10) + unbound.client = mockUnboundClient{data: []byte(data), err: false} + + assert.Nil(t, unbound.Collect()) +} + +type mockUnboundClient struct { + data []byte + err bool +} + +func (m mockUnboundClient) Connect() error { + return nil +} + +func (m mockUnboundClient) Disconnect() error { + return nil +} + +func (m mockUnboundClient) Command(_ string, process socket.Processor) error { + if m.err { + return errors.New("mock send error") + } + s := bufio.NewScanner(bytes.NewReader(m.data)) + for s.Scan() { + process(s.Bytes()) + } + return nil +} + +func testCharts(t *testing.T, unbound *Unbound, collected map[string]int64) { + t.Helper() + ensureChartsCreatedForEveryThread(t, unbound) + ensureExtendedChartsCreated(t, unbound) + ensureCollectedHasAllChartsDimsVarsIDs(t, unbound, collected) +} + +func ensureChartsCreatedForEveryThread(t *testing.T, u *Unbound) { + for thread := range u.cache.threads { + for _, chart := range *threadCharts(thread, u.Cumulative) { + assert.Truef(t, u.Charts().Has(chart.ID), "chart '%s' is not created for '%s' thread", chart.ID, thread) + } + } +} + +func ensureExtendedChartsCreated(t *testing.T, u *Unbound) { + if len(u.cache.answerRCode) == 0 { + return + } + for _, chart := range *extendedCharts(u.Cumulative) { + assert.Truef(t, u.Charts().Has(chart.ID), "chart '%s' is not added", chart.ID) + } + + if chart := u.Charts().Get(queryTypeChart.ID); chart != nil { + for typ := range u.cache.queryType { + dimID := "num.query.type." + typ + assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' type, expected '%s'", chart.ID, typ, dimID) + } + } + if chart := u.Charts().Get(queryClassChart.ID); chart != nil { + for class := range u.cache.queryClass { + dimID := "num.query.class." + class + assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' class, expected '%s'", chart.ID, class, dimID) + } + } + if chart := u.Charts().Get(queryOpCodeChart.ID); chart != nil { + for opcode := range u.cache.queryOpCode { + dimID := "num.query.opcode." + opcode + assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' opcode, expected '%s'", chart.ID, opcode, dimID) + } + } + if chart := u.Charts().Get(answerRCodeChart.ID); chart != nil { + for rcode := range u.cache.answerRCode { + dimID := "num.answer.rcode." + rcode + assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' rcode, expected '%s'", chart.ID, rcode, dimID) + } + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, u *Unbound, collected map[string]int64) { + for _, chart := range *u.Charts() { + for _, dim := range chart.Dims { + if dim.ID == "mem.mod.ipsecmod" { + continue + } + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +var ( + expectedCommon = map[string]int64{ + "thread0.num.cachehits": 21, + "thread0.num.cachemiss": 7, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 28, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 7, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 1255, + "thread0.recursion.time.median": 480, + "thread0.requestlist.avg": 857, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 6, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 13, + "thread1.num.cachemiss": 3, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 16, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 3, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 93, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 0, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 0, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 88, + "time.now": 1574094836, + "time.up": 88, + "total.num.cachehits": 34, + "total.num.cachemiss": 10, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 44, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 10, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 907, + "total.recursion.time.median": 240, + "total.requestlist.avg": 600, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 6, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + } + + expectedExtended = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 205, + "key.cache.count": 9, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 90357, + "mem.cache.rrset": 178642, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 81059, + "mem.streamwait": 0, + "msg.cache.count": 81, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 40, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 4, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 2, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 44, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 44, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 39, + "num.query.opcode.QUERY": 44, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 1, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 13, + "num.query.type.AAAA": 13, + "num.query.type.MX": 13, + "num.query.type.PTR": 5, + "num.rrset.bogus": 0, + "rrset.cache.count": 314, + "thread0.num.cachehits": 21, + "thread0.num.cachemiss": 7, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 28, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 7, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 1255, + "thread0.recursion.time.median": 480, + "thread0.requestlist.avg": 857, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 6, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 13, + "thread1.num.cachemiss": 3, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 16, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 3, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 93, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 0, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 0, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 88, + "time.now": 1574094836, + "time.up": 88, + "total.num.cachehits": 34, + "total.num.cachemiss": 10, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 44, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 10, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 907, + "total.recursion.time.median": 240, + "total.requestlist.avg": 600, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 6, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } +) + +var ( + expectedCumulative1 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 192, + "key.cache.count": 11, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 93392, + "mem.cache.rrset": 175745, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 81479, + "mem.streamwait": 0, + "msg.cache.count": 94, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 184, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 16, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 1, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 200, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 200, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 200, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 0, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 60, + "num.query.type.AAAA": 60, + "num.query.type.MX": 60, + "num.query.type.PTR": 20, + "num.rrset.bogus": 0, + "rrset.cache.count": 304, + "thread0.num.cachehits": 80, + "thread0.num.cachemiss": 10, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 90, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 10, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 222, + "thread0.recursion.time.median": 337, + "thread0.requestlist.avg": 100, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 1, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 101, + "thread1.num.cachemiss": 9, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.expired": 0, + "thread1.num.prefetch": 0, + "thread1.num.queries": 110, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 9, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 844, + "thread1.recursion.time.median": 360, + "thread1.requestlist.avg": 222, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 1, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 122, + "time.now": 1574103378, + "time.up": 122, + "total.num.cachehits": 181, + "total.num.cachemiss": 19, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.expired": 0, + "total.num.prefetch": 0, + "total.num.queries": 200, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 19, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 516, + "total.recursion.time.median": 348, + "total.requestlist.avg": 157, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 1, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } + + expectedCumulative2 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 192, + "key.cache.count": 11, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 93392, + "mem.cache.rrset": 175745, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 81479, + "mem.streamwait": 0, + "msg.cache.count": 94, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 274, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 16, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 1, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 290, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 290, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 290, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 0, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 90, + "num.query.type.AAAA": 90, + "num.query.type.MX": 90, + "num.query.type.PTR": 20, + "num.rrset.bogus": 0, + "rrset.cache.count": 304, + "thread0.num.cachehits": 123, + "thread0.num.cachemiss": 10, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 133, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 10, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 0, + "thread0.recursion.time.median": 0, + "thread0.requestlist.avg": 0, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 1, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 148, + "thread1.num.cachemiss": 9, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 157, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 9, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 0, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 0, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 1, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 82, + "time.now": 1574103461, + "time.up": 205, + "total.num.cachehits": 271, + "total.num.cachemiss": 19, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 290, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 19, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 0, + "total.recursion.time.median": 0, + "total.requestlist.avg": 0, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 1, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } + + expectedCumulative3 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 232, + "key.cache.count": 14, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 101198, + "mem.cache.rrset": 208839, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 85725, + "mem.streamwait": 0, + "msg.cache.count": 119, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 334, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 16, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 10, + "num.answer.rcode.nodata": 20, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 1, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 360, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 360, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 360, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 0, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 120, + "num.query.type.AAAA": 110, + "num.query.type.MX": 110, + "num.query.type.PTR": 20, + "num.rrset.bogus": 0, + "rrset.cache.count": 401, + "thread0.num.cachehits": 150, + "thread0.num.cachemiss": 15, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 165, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 15, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 261, + "thread0.recursion.time.median": 318, + "thread0.requestlist.avg": 66, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 1, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 184, + "thread1.num.cachemiss": 11, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 195, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 11, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 709, + "thread1.recursion.time.median": 294, + "thread1.requestlist.avg": 363, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 2, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 82, + "time.now": 1574103543, + "time.up": 288, + "total.num.cachehits": 334, + "total.num.cachemiss": 26, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 360, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 26, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 450, + "total.recursion.time.median": 306, + "total.requestlist.avg": 192, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 2, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } +) + +var ( + expectedReset1 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 181, + "key.cache.count": 10, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 86064, + "mem.cache.rrset": 172757, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 79979, + "mem.streamwait": 0, + "msg.cache.count": 67, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 90, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 10, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.rcode.nodata": 10, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 2, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 100, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 100, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 100, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 1, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 30, + "num.query.type.AAAA": 30, + "num.query.type.MX": 30, + "num.query.type.PTR": 10, + "num.rrset.bogus": 0, + "rrset.cache.count": 303, + "thread0.num.cachehits": 44, + "thread0.num.cachemiss": 7, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 51, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 7, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 365, + "thread0.recursion.time.median": 57, + "thread0.requestlist.avg": 0, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 0, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 46, + "thread1.num.cachemiss": 3, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 49, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 3, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 1582, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 0, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 0, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 45, + "time.now": 1574103644, + "time.up": 45, + "total.num.cachehits": 90, + "total.num.cachemiss": 10, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 100, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 10, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 730, + "total.recursion.time.median": 28, + "total.requestlist.avg": 0, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 0, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } + expectedReset2 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 181, + "key.cache.count": 10, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 86064, + "mem.cache.rrset": 172757, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 79979, + "mem.streamwait": 0, + "msg.cache.count": 67, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 0, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 0, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.rcode.nodata": 0, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 0, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 0, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 0, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 0, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 0, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 0, + "num.query.type.AAAA": 0, + "num.query.type.MX": 0, + "num.query.type.PTR": 0, + "num.rrset.bogus": 0, + "rrset.cache.count": 303, + "thread0.num.cachehits": 0, + "thread0.num.cachemiss": 0, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 0, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 0, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 0, + "thread0.recursion.time.median": 0, + "thread0.requestlist.avg": 0, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 0, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 0, + "thread1.num.cachemiss": 0, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 0, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 0, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 0, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 0, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 0, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 26, + "time.now": 1574103671, + "time.up": 71, + "total.num.cachehits": 0, + "total.num.cachemiss": 0, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 0, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 0, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 0, + "total.recursion.time.median": 0, + "total.requestlist.avg": 0, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 0, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } + + expectedReset3 = map[string]int64{ + "dnscrypt_nonce.cache.count": 0, + "dnscrypt_shared_secret.cache.count": 0, + "infra.cache.count": 303, + "key.cache.count": 15, + "mem.cache.dnscrypt_nonce": 0, + "mem.cache.dnscrypt_shared_secret": 0, + "mem.cache.message": 105471, + "mem.cache.rrset": 235917, + "mem.mod.iterator": 16588, + "mem.mod.respip": 0, + "mem.mod.subnet": 74504, + "mem.mod.validator": 87270, + "mem.streamwait": 0, + "msg.cache.count": 127, + "num.answer.bogus": 0, + "num.answer.rcode.FORMERR": 0, + "num.answer.rcode.NOERROR": 60, + "num.answer.rcode.NOTIMPL": 0, + "num.answer.rcode.NXDOMAIN": 10, + "num.answer.rcode.REFUSED": 0, + "num.answer.rcode.SERVFAIL": 0, + "num.answer.rcode.nodata": 10, + "num.answer.secure": 0, + "num.query.aggressive.NOERROR": 2, + "num.query.aggressive.NXDOMAIN": 0, + "num.query.authzone.down": 0, + "num.query.authzone.up": 0, + "num.query.class.IN": 70, + "num.query.dnscrypt.replay": 0, + "num.query.dnscrypt.shared_secret.cachemiss": 0, + "num.query.edns.DO": 0, + "num.query.edns.present": 0, + "num.query.flags.AA": 0, + "num.query.flags.AD": 0, + "num.query.flags.CD": 0, + "num.query.flags.QR": 0, + "num.query.flags.RA": 0, + "num.query.flags.RD": 70, + "num.query.flags.TC": 0, + "num.query.flags.Z": 0, + "num.query.ipv6": 0, + "num.query.opcode.QUERY": 70, + "num.query.ratelimited": 0, + "num.query.subnet": 0, + "num.query.subnet_cache": 0, + "num.query.tcp": 0, + "num.query.tcpout": 0, + "num.query.tls": 0, + "num.query.tls.resume": 0, + "num.query.type.A": 20, + "num.query.type.AAAA": 20, + "num.query.type.MX": 20, + "num.query.type.PTR": 10, + "num.rrset.bogus": 0, + "rrset.cache.count": 501, + "thread0.num.cachehits": 30, + "thread0.num.cachemiss": 4, + "thread0.num.dnscrypt.cert": 0, + "thread0.num.dnscrypt.cleartext": 0, + "thread0.num.dnscrypt.crypted": 0, + "thread0.num.dnscrypt.malformed": 0, + "thread0.num.expired": 0, + "thread0.num.prefetch": 0, + "thread0.num.queries": 34, + "thread0.num.queries_ip_ratelimited": 0, + "thread0.num.recursivereplies": 4, + "thread0.num.zero_ttl": 0, + "thread0.recursion.time.avg": 541, + "thread0.recursion.time.median": 98, + "thread0.requestlist.avg": 0, + "thread0.requestlist.current.all": 0, + "thread0.requestlist.current.user": 0, + "thread0.requestlist.exceeded": 0, + "thread0.requestlist.max": 0, + "thread0.requestlist.overwritten": 0, + "thread0.tcpusage": 0, + "thread1.num.cachehits": 33, + "thread1.num.cachemiss": 3, + "thread1.num.dnscrypt.cert": 0, + "thread1.num.dnscrypt.cleartext": 0, + "thread1.num.dnscrypt.crypted": 0, + "thread1.num.dnscrypt.malformed": 0, + "thread1.num.prefetch": 0, + "thread1.num.expired": 0, + "thread1.num.queries": 36, + "thread1.num.queries_ip_ratelimited": 0, + "thread1.num.recursivereplies": 3, + "thread1.num.zero_ttl": 0, + "thread1.recursion.time.avg": 62, + "thread1.recursion.time.median": 0, + "thread1.requestlist.avg": 1666, + "thread1.requestlist.current.all": 0, + "thread1.requestlist.current.user": 0, + "thread1.requestlist.exceeded": 0, + "thread1.requestlist.max": 5, + "thread1.requestlist.overwritten": 0, + "thread1.tcpusage": 0, + "time.elapsed": 59, + "time.now": 1574103731, + "time.up": 131, + "total.num.cachehits": 63, + "total.num.cachemiss": 7, + "total.num.dnscrypt.cert": 0, + "total.num.dnscrypt.cleartext": 0, + "total.num.dnscrypt.crypted": 0, + "total.num.dnscrypt.malformed": 0, + "total.num.prefetch": 0, + "total.num.expired": 0, + "total.num.queries": 70, + "total.num.queries_ip_ratelimited": 0, + "total.num.recursivereplies": 7, + "total.num.zero_ttl": 0, + "total.recursion.time.avg": 336, + "total.recursion.time.median": 49, + "total.requestlist.avg": 714, + "total.requestlist.current.all": 0, + "total.requestlist.current.user": 0, + "total.requestlist.exceeded": 0, + "total.requestlist.max": 5, + "total.requestlist.overwritten": 0, + "total.tcpusage": 0, + "unwanted.queries": 0, + "unwanted.replies": 0, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/upsd/README.md b/src/go/collectors/go.d.plugin/modules/upsd/README.md new file mode 120000 index 00000000000000..8dcef84dd8802f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/README.md @@ -0,0 +1 @@ +integrations/ups_nut.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/upsd/charts.go b/src/go/collectors/go.d.plugin/modules/upsd/charts.go new file mode 100644 index 00000000000000..14c18a34f7650c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/charts.go @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioUpsLoad = module.Priority + iota + prioUpsLoadUsage + prioUpsStatus + prioUpsTemperature + + prioBatteryCharge + prioBatteryEstimatedRuntime + prioBatteryVoltage + prioBatteryVoltageNominal + + prioInputVoltage + prioInputVoltageNominal + prioInputCurrent + prioInputCurrentNominal + prioInputFrequency + prioInputFrequencyNominal + + prioOutputVoltage + prioOutputVoltageNominal + prioOutputCurrent + prioOutputCurrentNominal + prioOutputFrequency + prioOutputFrequencyNominal +) + +var upsChartsTmpl = module.Charts{ + upsLoadChartTmpl.Copy(), + upsLoadUsageChartTmpl.Copy(), + upsStatusChartTmpl.Copy(), + upsTemperatureChartTmpl.Copy(), + + upsBatteryChargePercentChartTmpl.Copy(), + upsBatteryEstimatedRuntimeChartTmpl.Copy(), + upsBatteryVoltageChartTmpl.Copy(), + upsBatteryVoltageNominalChartTmpl.Copy(), + + upsInputVoltageChartTmpl.Copy(), + upsInputVoltageNominalChartTmpl.Copy(), + upsInputCurrentChartTmpl.Copy(), + upsInputCurrentNominalChartTmpl.Copy(), + upsInputFrequencyChartTmpl.Copy(), + upsInputFrequencyNominalChartTmpl.Copy(), + + upsOutputVoltageChartTmpl.Copy(), + upsOutputVoltageNominalChartTmpl.Copy(), + upsOutputCurrentChartTmpl.Copy(), + upsOutputCurrentNominalChartTmpl.Copy(), + upsOutputFrequencyChartTmpl.Copy(), + upsOutputFrequencyNominalChartTmpl.Copy(), +} + +var ( + upsLoadChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.load_percentage", + Title: "UPS load", + Units: "percentage", + Fam: "ups", + Ctx: "upsd.ups_load", + Priority: prioUpsLoad, + Type: module.Area, + Dims: module.Dims{ + {ID: "ups_%s_ups.load", Name: "load", Div: varPrecision}, + }, + } + upsLoadUsageChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.load_usage", + Title: "UPS load usage (power output)", + Units: "Watts", + Fam: "ups", + Ctx: "upsd.ups_load_usage", + Priority: prioUpsLoadUsage, + Dims: module.Dims{ + {ID: "ups_%s_ups.load.usage", Name: "load_usage", Div: varPrecision}, + }, + } + upsStatusChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.status", + Title: "UPS status", + Units: "status", + Fam: "ups", + Ctx: "upsd.ups_status", + Priority: prioUpsStatus, + Dims: module.Dims{ + {ID: "ups_%s_ups.status.OL", Name: "on_line"}, + {ID: "ups_%s_ups.status.OB", Name: "on_battery"}, + {ID: "ups_%s_ups.status.LB", Name: "low_battery"}, + {ID: "ups_%s_ups.status.HB", Name: "high_battery"}, + {ID: "ups_%s_ups.status.RB", Name: "replace_battery"}, + {ID: "ups_%s_ups.status.CHRG", Name: "charging"}, + {ID: "ups_%s_ups.status.DISCHRG", Name: "discharging"}, + {ID: "ups_%s_ups.status.BYPASS", Name: "bypass"}, + {ID: "ups_%s_ups.status.CAL", Name: "calibration"}, + {ID: "ups_%s_ups.status.OFF", Name: "offline"}, + {ID: "ups_%s_ups.status.OVER", Name: "overloaded"}, + {ID: "ups_%s_ups.status.TRIM", Name: "trim_input_voltage"}, + {ID: "ups_%s_ups.status.BOOST", Name: "boost_input_voltage"}, + {ID: "ups_%s_ups.status.FSD", Name: "forced_shutdown"}, + {ID: "ups_%s_ups.status.other", Name: "other"}, + }, + } + upsTemperatureChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.temperature", + Title: "UPS temperature", + Units: "Celsius", + Fam: "ups", + Ctx: "upsd.ups_temperature", + Priority: prioUpsTemperature, + Dims: module.Dims{ + {ID: "ups_%s_ups.temperature", Name: "temperature", Div: varPrecision}, + }, + } +) + +var ( + upsBatteryChargePercentChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.battery_charge_percentage", + Title: "UPS Battery charge", + Units: "percentage", + Fam: "battery", + Ctx: "upsd.ups_battery_charge", + Priority: prioBatteryCharge, + Type: module.Area, + Dims: module.Dims{ + {ID: "ups_%s_battery.charge", Name: "charge", Div: varPrecision}, + }, + } + upsBatteryEstimatedRuntimeChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.battery_estimated_runtime", + Title: "UPS Battery estimated runtime", + Units: "seconds", + Fam: "battery", + Ctx: "upsd.ups_battery_estimated_runtime", + Priority: prioBatteryEstimatedRuntime, + Dims: module.Dims{ + {ID: "ups_%s_battery.runtime", Name: "runtime", Div: varPrecision}, + }, + } + upsBatteryVoltageChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.battery_voltage", + Title: "UPS Battery voltage", + Units: "Volts", + Fam: "battery", + Ctx: "upsd.ups_battery_voltage", + Priority: prioBatteryVoltage, + Dims: module.Dims{ + {ID: "ups_%s_battery.voltage", Name: "voltage", Div: varPrecision}, + }, + } + upsBatteryVoltageNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.battery_voltage_nominal", + Title: "UPS Battery voltage nominal", + Units: "Volts", + Fam: "battery", + Ctx: "upsd.ups_battery_voltage_nominal", + Priority: prioBatteryVoltageNominal, + Dims: module.Dims{ + {ID: "ups_%s_battery.voltage.nominal", Name: "nominal_voltage", Div: varPrecision}, + }, + } +) + +var ( + upsInputVoltageChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_voltage", + Title: "UPS Input voltage", + Units: "Volts", + Fam: "input", + Ctx: "upsd.ups_input_voltage", + Priority: prioInputVoltage, + Dims: module.Dims{ + {ID: "ups_%s_input.voltage", Name: "voltage", Div: varPrecision}, + }, + } + upsInputVoltageNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_voltage_nominal", + Title: "UPS Input voltage nominal", + Units: "Volts", + Fam: "input", + Ctx: "upsd.ups_input_voltage_nominal", + Priority: prioInputVoltageNominal, + Dims: module.Dims{ + {ID: "ups_%s_input.voltage.nominal", Name: "nominal_voltage", Div: varPrecision}, + }, + } + upsInputCurrentChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_current", + Title: "UPS Input current", + Units: "Ampere", + Fam: "input", + Ctx: "upsd.ups_input_current", + Priority: prioInputCurrent, + Dims: module.Dims{ + {ID: "ups_%s_input.current", Name: "current", Div: varPrecision}, + }, + } + upsInputCurrentNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_current_nominal", + Title: "UPS Input current nominal", + Units: "Ampere", + Fam: "input", + Ctx: "upsd.ups_input_current_nominal", + Priority: prioInputCurrentNominal, + Dims: module.Dims{ + {ID: "ups_%s_input.current.nominal", Name: "nominal_current", Div: varPrecision}, + }, + } + upsInputFrequencyChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_frequency", + Title: "UPS Input frequency", + Units: "Hz", + Fam: "input", + Ctx: "upsd.ups_input_frequency", + Priority: prioInputFrequency, + Dims: module.Dims{ + {ID: "ups_%s_input.frequency", Name: "frequency", Div: varPrecision}, + }, + } + upsInputFrequencyNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.input_frequency_nominal", + Title: "UPS Input frequency nominal", + Units: "Hz", + Fam: "input", + Ctx: "upsd.ups_input_frequency_nominal", + Priority: prioInputFrequencyNominal, + Dims: module.Dims{ + {ID: "ups_%s_input.frequency.nominal", Name: "nominal_frequency", Div: varPrecision}, + }, + } +) + +var ( + upsOutputVoltageChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_voltage", + Title: "UPS Output voltage", + Units: "Volts", + Fam: "output", + Ctx: "upsd.ups_output_voltage", + Priority: prioOutputVoltage, + Dims: module.Dims{ + {ID: "ups_%s_output.voltage", Name: "voltage", Div: varPrecision}, + }, + } + upsOutputVoltageNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_voltage_nominal", + Title: "UPS Output voltage nominal", + Units: "Volts", + Fam: "output", + Ctx: "upsd.ups_output_voltage_nominal", + Priority: prioOutputVoltageNominal, + Dims: module.Dims{ + {ID: "ups_%s_output.voltage.nominal", Name: "nominal_voltage", Div: varPrecision}, + }, + } + upsOutputCurrentChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_current", + Title: "UPS Output current", + Units: "Ampere", + Fam: "output", + Ctx: "upsd.ups_output_current", + Priority: prioOutputCurrent, + Dims: module.Dims{ + {ID: "ups_%s_output.current", Name: "current", Div: varPrecision}, + }, + } + upsOutputCurrentNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_current_nominal", + Title: "UPS Output current nominal", + Units: "Ampere", + Fam: "output", + Ctx: "upsd.ups_output_current_nominal", + Priority: prioOutputCurrentNominal, + Dims: module.Dims{ + {ID: "ups_%s_output.current.nominal", Name: "nominal_current", Div: varPrecision}, + }, + } + upsOutputFrequencyChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_frequency", + Title: "UPS Output frequency", + Units: "Hz", + Fam: "output", + Ctx: "upsd.ups_output_frequency", + Priority: prioOutputFrequency, + Dims: module.Dims{ + {ID: "ups_%s_output.frequency", Name: "frequency", Div: varPrecision}, + }, + } + upsOutputFrequencyNominalChartTmpl = module.Chart{ + IDSep: true, + ID: "%s.output_frequency_nominal", + Title: "UPS Output frequency nominal", + Units: "Hz", + Fam: "output", + Ctx: "upsd.ups_output_frequency_nominal", + Priority: prioOutputFrequencyNominal, + Dims: module.Dims{ + {ID: "ups_%s_output.frequency.nominal", Name: "nominal_frequency", Div: varPrecision}, + }, + } +) + +func (u *Upsd) addUPSCharts(ups upsUnit) { + charts := upsChartsTmpl.Copy() + + var removed []string + for _, v := range []struct{ v, id string }{ + {varBatteryVoltage, upsBatteryVoltageChartTmpl.ID}, + {varBatteryVoltageNominal, upsBatteryVoltageNominalChartTmpl.ID}, + + {varUpsTemperature, upsTemperatureChartTmpl.ID}, + + {varInputVoltage, upsInputVoltageChartTmpl.ID}, + {varInputVoltageNominal, upsInputVoltageNominalChartTmpl.ID}, + {varInputCurrent, upsInputCurrentChartTmpl.ID}, + {varInputCurrentNominal, upsInputCurrentNominalChartTmpl.ID}, + {varInputFrequency, upsInputFrequencyChartTmpl.ID}, + {varInputFrequencyNominal, upsInputFrequencyNominalChartTmpl.ID}, + + {varOutputVoltage, upsOutputVoltageChartTmpl.ID}, + {varOutputVoltageNominal, upsOutputVoltageNominalChartTmpl.ID}, + {varOutputCurrent, upsOutputCurrentChartTmpl.ID}, + {varOutputCurrentNominal, upsOutputCurrentNominalChartTmpl.ID}, + {varOutputFrequency, upsOutputFrequencyChartTmpl.ID}, + {varOutputFrequencyNominal, upsOutputFrequencyNominalChartTmpl.ID}, + } { + if !hasVar(ups.vars, v.v) { + removed = append(removed, v.v) + _ = charts.Remove(v.id) + } + } + + u.Debugf("UPS '%s' no metrics: %v", ups.name, removed) + + name := cleanUpsName(ups.name) + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "ups_name", Value: ups.name}, + {Key: "battery_type", Value: ups.vars[varBatteryType]}, + {Key: "device_model", Value: ups.vars[varDeviceModel]}, + {Key: "device_serial", Value: ups.vars[varDeviceSerial]}, + {Key: "device_manufacturer", Value: ups.vars[varDeviceMfr]}, + {Key: "device_type", Value: ups.vars[varDeviceType]}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, ups.name) + } + } + + if err := u.Charts().Add(*charts...); err != nil { + u.Warning(err) + } +} + +func (u *Upsd) removeUPSCharts(name string) { + name = cleanUpsName(name) + for _, chart := range *u.Charts() { + if strings.HasPrefix(chart.ID, name) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} + +func cleanUpsName(name string) string { + name = strings.ReplaceAll(name, " ", "_") + name = strings.ReplaceAll(name, ".", "_") + return name +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/client.go b/src/go/collectors/go.d.plugin/modules/upsd/client.go new file mode 100644 index 00000000000000..be0148bc53ba3d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/client.go @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +import ( + "encoding/csv" + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/socket" +) + +const ( + commandUsername = "USERNAME %s" + commandPassword = "PASSWORD %s" + commandListUPS = "LIST UPS" + commandListVar = "LIST VAR %s" + commandLogout = "LOGOUT" +) + +// https://github.com/networkupstools/nut/blob/81fca30b2998fa73085ce4654f075605ff0b9e01/docs/net-protocol.txt#L647 +var errUpsdCommand = errors.New("upsd command error") + +type upsUnit struct { + name string + vars map[string]string +} + +func newUpsdConn(conf Config) upsdConn { + return &upsdClient{conn: socket.New(socket.Config{ + ConnectTimeout: conf.Timeout.Duration, + ReadTimeout: conf.Timeout.Duration, + WriteTimeout: conf.Timeout.Duration, + Address: conf.Address, + })} +} + +type upsdClient struct { + conn socket.Client +} + +func (c *upsdClient) connect() error { + return c.conn.Connect() +} + +func (c *upsdClient) disconnect() error { + _, _ = c.sendCommand(commandLogout) + return c.conn.Disconnect() +} + +func (c *upsdClient) authenticate(username, password string) error { + cmd := fmt.Sprintf(commandUsername, username) + resp, err := c.sendCommand(cmd) + if err != nil { + return err + } + if resp[0] != "OK" { + return errors.New("authentication failed: invalid username") + } + + cmd = fmt.Sprintf(commandPassword, password) + resp, err = c.sendCommand(cmd) + if err != nil { + return err + } + if resp[0] != "OK" { + return errors.New("authentication failed: invalid password") + } + + return nil +} + +func (c *upsdClient) upsUnits() ([]upsUnit, error) { + resp, err := c.sendCommand(commandListUPS) + if err != nil { + return nil, err + } + + var upsNames []string + + for _, v := range resp { + if !strings.HasPrefix(v, "UPS ") { + continue + } + parts := splitLine(v) + if len(parts) < 2 { + continue + } + name := parts[1] + upsNames = append(upsNames, name) + } + + var upsUnits []upsUnit + + for _, name := range upsNames { + cmd := fmt.Sprintf(commandListVar, name) + resp, err := c.sendCommand(cmd) + if err != nil { + return nil, err + } + + ups := upsUnit{ + name: name, + vars: make(map[string]string), + } + + upsUnits = append(upsUnits, ups) + + for _, v := range resp { + if !strings.HasPrefix(v, "VAR ") { + continue + } + parts := splitLine(v) + if len(parts) < 4 { + continue + } + n, v := parts[2], parts[3] + ups.vars[n] = v + } + } + + return upsUnits, nil +} + +func (c *upsdClient) sendCommand(cmd string) ([]string, error) { + var resp []string + var errMsg string + endLine := getEndLine(cmd) + + err := c.conn.Command(cmd+"\n", func(bytes []byte) bool { + line := string(bytes) + resp = append(resp, line) + + if strings.HasPrefix(line, "ERR ") { + errMsg = strings.TrimPrefix(line, "ERR ") + } + + return line != endLine && errMsg == "" + }) + if err != nil { + return nil, err + } + if errMsg != "" { + return nil, fmt.Errorf("%w: %s (cmd: '%s')", errUpsdCommand, errMsg, cmd) + } + + return resp, nil +} + +func getEndLine(cmd string) string { + px, _, _ := strings.Cut(cmd, " ") + + switch px { + case "USERNAME", "PASSWORD", "VER": + return "OK" + } + return fmt.Sprintf("END %s", cmd) +} + +func splitLine(s string) []string { + r := csv.NewReader(strings.NewReader(s)) + r.Comma = ' ' + + parts, _ := r.Read() + + return parts +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/collect.go b/src/go/collectors/go.d.plugin/modules/upsd/collect.go new file mode 100644 index 00000000000000..39e3d1b55bf18f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/collect.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +import ( + "errors" + "strconv" + "strings" +) + +func (u *Upsd) collect() (map[string]int64, error) { + if u.conn == nil { + conn, err := u.establishConnection() + if err != nil { + return nil, err + } + u.conn = conn + } + + upsUnits, err := u.conn.upsUnits() + if err != nil { + if !errors.Is(err, errUpsdCommand) { + _ = u.conn.disconnect() + u.conn = nil + } + return nil, err + } + + u.Debugf("found %d UPS units", len(upsUnits)) + + mx := make(map[string]int64) + + u.collectUPSUnits(mx, upsUnits) + + return mx, nil +} + +func (u *Upsd) establishConnection() (upsdConn, error) { + conn := u.newUpsdConn(u.Config) + + if err := conn.connect(); err != nil { + return nil, err + } + + if u.Username != "" && u.Password != "" { + if err := conn.authenticate(u.Username, u.Password); err != nil { + _ = conn.disconnect() + return nil, err + } + } + + return conn, nil +} + +func (u *Upsd) collectUPSUnits(mx map[string]int64, upsUnits []upsUnit) { + seen := make(map[string]bool) + + for _, ups := range upsUnits { + seen[ups.name] = true + u.Debugf("collecting metrics UPS '%s'", ups.name) + + if !u.upsUnits[ups.name] { + u.upsUnits[ups.name] = true + u.addUPSCharts(ups) + } + + writeVar(mx, ups, varBatteryCharge) + writeVar(mx, ups, varBatteryRuntime) + writeVar(mx, ups, varBatteryVoltage) + writeVar(mx, ups, varBatteryVoltageNominal) + + writeVar(mx, ups, varInputVoltage) + writeVar(mx, ups, varInputVoltageNominal) + writeVar(mx, ups, varInputCurrent) + writeVar(mx, ups, varInputCurrentNominal) + writeVar(mx, ups, varInputFrequency) + writeVar(mx, ups, varInputFrequencyNominal) + + writeVar(mx, ups, varOutputVoltage) + writeVar(mx, ups, varOutputVoltageNominal) + writeVar(mx, ups, varOutputCurrent) + writeVar(mx, ups, varOutputCurrentNominal) + writeVar(mx, ups, varOutputFrequency) + writeVar(mx, ups, varOutputFrequencyNominal) + + writeVar(mx, ups, varUpsLoad) + writeVar(mx, ups, varUpsRealPowerNominal) + writeVar(mx, ups, varUpsTemperature) + writeUpsLoadUsage(mx, ups) + writeUpsStatus(mx, ups) + } + + for name := range u.upsUnits { + if !seen[name] { + delete(u.upsUnits, name) + u.removeUPSCharts(name) + } + } +} + +func writeVar(mx map[string]int64, ups upsUnit, v string) { + s, ok := ups.vars[v] + if !ok { + return + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + return + } + mx[prefix(ups)+v] = int64(n * varPrecision) +} + +func writeUpsLoadUsage(mx map[string]int64, ups upsUnit) { + if hasVar(ups.vars, varUpsRealPower) { + pow, _ := strconv.ParseFloat(ups.vars[varUpsRealPower], 64) + mx[prefix(ups)+"ups.load.usage"] = int64(pow * varPrecision) + return + } + + if !hasVar(ups.vars, varUpsLoad) || !hasVar(ups.vars, varUpsRealPowerNominal) { + return + } + load, err := strconv.ParseFloat(ups.vars[varUpsLoad], 64) + if err != nil { + return + } + nomPower, err := strconv.ParseFloat(ups.vars[varUpsRealPowerNominal], 64) + if err != nil || nomPower == 0 { + return + } + mx[prefix(ups)+"ups.load.usage"] = int64((load / 100 * nomPower) * varPrecision) +} + +// https://networkupstools.org/docs/developer-guide.chunked/ar01s04.html#_status_data +var upsStatuses = map[string]bool{ + "OL": true, + "OB": true, + "LB": true, + "HB": true, + "RB": true, + "CHRG": true, + "DISCHRG": true, + "BYPASS": true, + "CAL": true, + "OFF": true, + "OVER": true, + "TRIM": true, + "BOOST": true, + "FSD": true, +} + +func writeUpsStatus(mx map[string]int64, ups upsUnit) { + if !hasVar(ups.vars, varUpsStatus) { + return + } + + px := prefix(ups) + "ups.status." + + for st := range upsStatuses { + mx[px+st] = 0 + } + mx[px+"other"] = 0 + + for _, st := range strings.Split(ups.vars[varUpsStatus], " ") { + if _, ok := upsStatuses[st]; ok { + mx[px+st] = 1 + } else { + mx[px+"other"] = 1 + } + } +} + +func hasVar(vars map[string]string, v string) bool { + _, ok := vars[v] + return ok +} + +func prefix(ups upsUnit) string { + return "ups_" + ups.name + "_" +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/config_schema.json b/src/go/collectors/go.d.plugin/modules/upsd/config_schema.json new file mode 100644 index 00000000000000..49fc85354d8edc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/config_schema.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/upsd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/integrations/ups_nut.md b/src/go/collectors/go.d.plugin/modules/upsd/integrations/ups_nut.md new file mode 100644 index 00000000000000..3e315af44c0984 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/integrations/ups_nut.md @@ -0,0 +1,211 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/upsd/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/upsd/metadata.yaml" +sidebar_label: "UPS (NUT)" +learn_status: "Published" +learn_rel_path: "Data Collection/UPS" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# UPS (NUT) + + +<img src="https://netdata.cloud/img/plug-circle-bolt.svg" width="150"/> + + +Plugin: go.d.plugin +Module: upsd + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ups + +These metrics refer to the UPS unit. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| ups_name | UPS name. | +| battery_type | Battery type (chemistry). "battery.type" variable value. | +| device_model | Device model. "device.mode" variable value. | +| device_serial | Device serial number. "device.serial" variable value. | +| device_manufacturer | Device manufacturer. "device.mfr" variable value. | +| device_type | Device type (ups, pdu, scd, psu, ats). "device.type" variable value. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| upsd.ups_load | load | percentage | +| upsd.ups_load_usage | load_usage | Watts | +| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status | +| upsd.ups_temperature | temperature | Celsius | +| upsd.ups_battery_charge | charge | percentage | +| upsd.ups_battery_estimated_runtime | runtime | seconds | +| upsd.ups_battery_voltage | voltage | Volts | +| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts | +| upsd.ups_input_voltage | voltage | Volts | +| upsd.ups_input_voltage_nominal | nominal_voltage | Volts | +| upsd.ups_input_current | current | Ampere | +| upsd.ups_input_current_nominal | nominal_current | Ampere | +| upsd.ups_input_frequency | frequency | Hz | +| upsd.ups_input_frequency_nominal | nominal_frequency | Hz | +| upsd.ups_output_voltage | voltage | Volts | +| upsd.ups_output_voltage_nominal | nominal_voltage | Volts | +| upsd.ups_output_current | current | Ampere | +| upsd.ups_output_current_nominal | nominal_current | Ampere | +| upsd.ups_output_frequency | frequency | Hz | +| upsd.ups_output_frequency_nominal | nominal_frequency | Hz | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes | +| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute | +| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/upsd.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/upsd.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes | +| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:3493 + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:3493 + + - name: remote + address: 203.0.113.0:3493 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m upsd + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/upsd/metadata.yaml b/src/go/collectors/go.d.plugin/modules/upsd/metadata.yaml new file mode 100644 index 00000000000000..070b338528a235 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/metadata.yaml @@ -0,0 +1,264 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-upsd + plugin_name: go.d.plugin + module_name: upsd + monitored_instance: + name: UPS (NUT) + link: "" + icon_filename: plug-circle-bolt.svg + categories: + - data-collection.ups + keywords: + - ups + - nut + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/upsd.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: UPS daemon address in IP:PORT format. + default_value: 127.0.0.1:3493 + required: true + - name: timeout + description: Connection/read/write timeout in seconds. The timeout includes name resolution, if required. + default_value: 2 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: A basic example configuration. + config: | + jobs: + - name: local + address: 127.0.0.1:3493 + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:3493 + + - name: remote + address: 203.0.113.0:3493 + troubleshooting: + problems: + list: [] + alerts: + - name: upsd_10min_ups_load + metric: upsd.ups_load + info: "UPS ${label:ups_name} average load over the last 10 minutes" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf + - name: upsd_ups_battery_charge + metric: upsd.ups_battery_charge + info: "UPS ${label:ups_name} average battery charge over the last minute" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf + - name: upsd_ups_last_collected_secs + metric: upsd.ups_load + info: "UPS ${label:ups_name} number of seconds since the last successful data collection" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: ups + description: These metrics refer to the UPS unit. + labels: + - name: ups_name + description: UPS name. + - name: battery_type + description: Battery type (chemistry). "battery.type" variable value. + - name: device_model + description: Device model. "device.mode" variable value. + - name: device_serial + description: Device serial number. "device.serial" variable value. + - name: device_manufacturer + description: Device manufacturer. "device.mfr" variable value. + - name: device_type + description: Device type (ups, pdu, scd, psu, ats). "device.type" variable value. + metrics: + - name: upsd.ups_load + description: UPS load + unit: percentage + chart_type: area + dimensions: + - name: load + - name: upsd.ups_load_usage + description: UPS load usage (power output) + unit: Watts + chart_type: line + dimensions: + - name: load_usage + - name: upsd.ups_status + description: UPS status + unit: status + chart_type: line + dimensions: + - name: on_line + - name: on_battery + - name: low_battery + - name: high_battery + - name: replace_battery + - name: charging + - name: discharging + - name: bypass + - name: calibration + - name: offline + - name: overloaded + - name: trim_input_voltage + - name: boost_input_voltage + - name: forced_shutdown + - name: other + - name: upsd.ups_temperature + description: UPS temperature + unit: Celsius + chart_type: line + dimensions: + - name: temperature + - name: upsd.ups_battery_charge + description: UPS Battery charge + unit: percentage + chart_type: area + dimensions: + - name: charge + - name: upsd.ups_battery_estimated_runtime + description: UPS Battery estimated runtime + unit: seconds + chart_type: line + dimensions: + - name: runtime + - name: upsd.ups_battery_voltage + description: UPS Battery voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage + - name: upsd.ups_battery_voltage_nominal + description: UPS Battery voltage nominal + unit: Volts + chart_type: line + dimensions: + - name: nominal_voltage + - name: upsd.ups_input_voltage + description: UPS Input voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage + - name: upsd.ups_input_voltage_nominal + description: UPS Input voltage nominal + unit: Volts + chart_type: line + dimensions: + - name: nominal_voltage + - name: upsd.ups_input_current + description: UPS Input current + unit: Ampere + chart_type: line + dimensions: + - name: current + - name: upsd.ups_input_current_nominal + description: UPS Input current nominal + unit: Ampere + chart_type: line + dimensions: + - name: nominal_current + - name: upsd.ups_input_frequency + description: UPS Input frequency + unit: Hz + chart_type: line + dimensions: + - name: frequency + - name: upsd.ups_input_frequency_nominal + description: UPS Input frequency nominal + unit: Hz + chart_type: line + dimensions: + - name: nominal_frequency + - name: upsd.ups_output_voltage + description: UPS Output voltage + unit: Volts + chart_type: line + dimensions: + - name: voltage + - name: upsd.ups_output_voltage_nominal + description: UPS Output voltage nominal + unit: Volts + chart_type: line + dimensions: + - name: nominal_voltage + - name: upsd.ups_output_current + description: UPS Output current + unit: Ampere + chart_type: line + dimensions: + - name: current + - name: upsd.ups_output_current_nominal + description: UPS Output current nominal + unit: Ampere + chart_type: line + dimensions: + - name: nominal_current + - name: upsd.ups_output_frequency + description: UPS Output frequency + unit: Hz + chart_type: line + dimensions: + - name: frequency + - name: upsd.ups_output_frequency_nominal + description: UPS Output frequency nominal + unit: Hz + chart_type: line + dimensions: + - name: nominal_frequency diff --git a/src/go/collectors/go.d.plugin/modules/upsd/upsd.go b/src/go/collectors/go.d.plugin/modules/upsd/upsd.go new file mode 100644 index 00000000000000..ebe0f36bc0fe16 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/upsd.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +import ( + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func init() { + module.Register("upsd", module.Creator{ + Create: func() module.Module { return New() }, + }) +} + +func New() *Upsd { + return &Upsd{ + Config: Config{ + Address: "127.0.0.1:3493", + Timeout: web.Duration{Duration: time.Second * 2}, + }, + newUpsdConn: newUpsdConn, + charts: &module.Charts{}, + upsUnits: make(map[string]bool), + } +} + +type Config struct { + Address string `yaml:"address"` + Username string `yaml:"username"` + Password string `yaml:"password"` + Timeout web.Duration `yaml:"timeout"` +} + +type ( + Upsd struct { + module.Base + + Config `yaml:",inline"` + + charts *module.Charts + + newUpsdConn func(Config) upsdConn + conn upsdConn + + upsUnits map[string]bool + } + + upsdConn interface { + connect() error + disconnect() error + authenticate(string, string) error + upsUnits() ([]upsUnit, error) + } +) + +func (u *Upsd) Init() bool { + if u.Address == "" { + u.Error("config: 'address' not set") + return false + } + + return true +} + +func (u *Upsd) Check() bool { + return len(u.Collect()) > 0 +} + +func (u *Upsd) Charts() *module.Charts { + return u.charts +} + +func (u *Upsd) Collect() map[string]int64 { + mx, err := u.collect() + if err != nil { + u.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (u *Upsd) Cleanup() { + if u.conn == nil { + return + } + if err := u.conn.disconnect(); err != nil { + u.Warningf("error on disconnect: %v", err) + } + u.conn = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/upsd_test.go b/src/go/collectors/go.d.plugin/modules/upsd/upsd_test.go new file mode 100644 index 00000000000000..74c8626f1052f2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/upsd_test.go @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUpsd_Cleanup(t *testing.T) { + upsd := New() + + require.NotPanics(t, upsd.Cleanup) + + mock := prepareMockConnOK() + upsd.newUpsdConn = func(Config) upsdConn { return mock } + + require.True(t, upsd.Init()) + _ = upsd.Collect() + require.NotPanics(t, upsd.Cleanup) + assert.True(t, mock.calledDisconnect) +} + +func TestUpsd_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success on default config": { + wantFail: false, + config: New().Config, + }, + "fails when 'address' option not set": { + wantFail: true, + config: Config{Address: ""}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + upsd := New() + upsd.Config = test.config + + if test.wantFail { + assert.False(t, upsd.Init()) + } else { + assert.True(t, upsd.Init()) + } + }) + } +} + +func TestUpsd_Check(t *testing.T) { + tests := map[string]struct { + prepareUpsd func() *Upsd + prepareMock func() *mockUpsdConn + wantFail bool + }{ + "successful data collection": { + wantFail: false, + prepareUpsd: New, + prepareMock: prepareMockConnOK, + }, + "error on connect()": { + wantFail: true, + prepareUpsd: New, + prepareMock: prepareMockConnErrOnConnect, + }, + "error on authenticate()": { + wantFail: true, + prepareUpsd: func() *Upsd { + upsd := New() + upsd.Username = "user" + upsd.Password = "pass" + return upsd + }, + prepareMock: prepareMockConnErrOnAuthenticate, + }, + "error on upsList()": { + wantFail: true, + prepareUpsd: New, + prepareMock: prepareMockConnErrOnUpsUnits, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + upsd := test.prepareUpsd() + upsd.newUpsdConn = func(Config) upsdConn { return test.prepareMock() } + + require.True(t, upsd.Init()) + + if test.wantFail { + assert.False(t, upsd.Check()) + } else { + assert.True(t, upsd.Check()) + } + }) + } +} + +func TestUpsd_Charts(t *testing.T) { + upsd := New() + require.True(t, upsd.Init()) + assert.NotNil(t, upsd.Charts()) +} + +func TestUpsd_Collect(t *testing.T) { + tests := map[string]struct { + prepareUpsd func() *Upsd + prepareMock func() *mockUpsdConn + wantCollected map[string]int64 + wantCharts int + wantConnConnect bool + wantConnDisconnect bool + wantConnAuthenticate bool + }{ + "successful data collection": { + prepareUpsd: New, + prepareMock: prepareMockConnOK, + wantCollected: map[string]int64{ + "ups_cp1500_battery.charge": 10000, + "ups_cp1500_battery.runtime": 489000, + "ups_cp1500_battery.voltage": 2400, + "ups_cp1500_battery.voltage.nominal": 2400, + "ups_cp1500_input.voltage": 22700, + "ups_cp1500_input.voltage.nominal": 23000, + "ups_cp1500_output.voltage": 26000, + "ups_cp1500_ups.load": 800, + "ups_cp1500_ups.load.usage": 4300, + "ups_cp1500_ups.realpower.nominal": 90000, + "ups_cp1500_ups.status.BOOST": 0, + "ups_cp1500_ups.status.BYPASS": 0, + "ups_cp1500_ups.status.CAL": 0, + "ups_cp1500_ups.status.CHRG": 0, + "ups_cp1500_ups.status.DISCHRG": 0, + "ups_cp1500_ups.status.FSD": 0, + "ups_cp1500_ups.status.HB": 0, + "ups_cp1500_ups.status.LB": 0, + "ups_cp1500_ups.status.OB": 0, + "ups_cp1500_ups.status.OFF": 0, + "ups_cp1500_ups.status.OL": 1, + "ups_cp1500_ups.status.OVER": 0, + "ups_cp1500_ups.status.RB": 0, + "ups_cp1500_ups.status.TRIM": 0, + "ups_cp1500_ups.status.other": 0, + "ups_pr3000_battery.charge": 10000, + "ups_pr3000_battery.runtime": 110800, + "ups_pr3000_battery.voltage": 5990, + "ups_pr3000_battery.voltage.nominal": 4800, + "ups_pr3000_input.voltage": 22500, + "ups_pr3000_input.voltage.nominal": 23000, + "ups_pr3000_output.voltage": 22500, + "ups_pr3000_ups.load": 2800, + "ups_pr3000_ups.load.usage": 84000, + "ups_pr3000_ups.realpower.nominal": 300000, + "ups_pr3000_ups.status.BOOST": 0, + "ups_pr3000_ups.status.BYPASS": 0, + "ups_pr3000_ups.status.CAL": 0, + "ups_pr3000_ups.status.CHRG": 0, + "ups_pr3000_ups.status.DISCHRG": 0, + "ups_pr3000_ups.status.FSD": 0, + "ups_pr3000_ups.status.HB": 0, + "ups_pr3000_ups.status.LB": 0, + "ups_pr3000_ups.status.OB": 0, + "ups_pr3000_ups.status.OFF": 0, + "ups_pr3000_ups.status.OL": 1, + "ups_pr3000_ups.status.OVER": 0, + "ups_pr3000_ups.status.RB": 0, + "ups_pr3000_ups.status.TRIM": 0, + "ups_pr3000_ups.status.other": 0, + }, + wantCharts: 20, + wantConnConnect: true, + wantConnDisconnect: false, + wantConnAuthenticate: false, + }, + "error on connect()": { + prepareUpsd: New, + prepareMock: prepareMockConnErrOnConnect, + wantCollected: nil, + wantCharts: 0, + wantConnConnect: true, + wantConnDisconnect: false, + wantConnAuthenticate: false, + }, + "error on authenticate()": { + prepareUpsd: func() *Upsd { + upsd := New() + upsd.Username = "user" + upsd.Password = "pass" + return upsd + }, + prepareMock: prepareMockConnErrOnAuthenticate, + wantCollected: nil, + wantCharts: 0, + wantConnConnect: true, + wantConnDisconnect: true, + wantConnAuthenticate: true, + }, + "err on upsList()": { + prepareUpsd: New, + prepareMock: prepareMockConnErrOnUpsUnits, + wantCollected: nil, + wantCharts: 0, + wantConnConnect: true, + wantConnDisconnect: true, + wantConnAuthenticate: false, + }, + "command err on upsList() (unknown ups)": { + prepareUpsd: New, + prepareMock: prepareMockConnCommandErrOnUpsUnits, + wantCollected: nil, + wantCharts: 0, + wantConnConnect: true, + wantConnDisconnect: false, + wantConnAuthenticate: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + upsd := test.prepareUpsd() + require.True(t, upsd.Init()) + + mock := test.prepareMock() + upsd.newUpsdConn = func(Config) upsdConn { return mock } + + mx := upsd.Collect() + + assert.Equal(t, test.wantCollected, mx) + assert.Equalf(t, test.wantCharts, len(*upsd.Charts()), "number of charts") + if len(test.wantCollected) > 0 { + ensureCollectedHasAllChartsDims(t, upsd, mx) + } + assert.Equalf(t, test.wantConnConnect, mock.calledConnect, "calledConnect") + assert.Equalf(t, test.wantConnDisconnect, mock.calledDisconnect, "calledDisconnect") + assert.Equal(t, test.wantConnAuthenticate, mock.calledAuthenticate, "calledAuthenticate") + }) + } +} + +func ensureCollectedHasAllChartsDims(t *testing.T, upsd *Upsd, mx map[string]int64) { + for _, chart := range *upsd.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := mx[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareMockConnOK() *mockUpsdConn { + return &mockUpsdConn{} +} + +func prepareMockConnErrOnConnect() *mockUpsdConn { + return &mockUpsdConn{errOnConnect: true} +} + +func prepareMockConnErrOnAuthenticate() *mockUpsdConn { + return &mockUpsdConn{errOnAuthenticate: true} +} + +func prepareMockConnErrOnUpsUnits() *mockUpsdConn { + return &mockUpsdConn{errOnUpsUnits: true} +} + +func prepareMockConnCommandErrOnUpsUnits() *mockUpsdConn { + return &mockUpsdConn{commandErrOnUpsUnits: true} +} + +type mockUpsdConn struct { + errOnConnect bool + errOnDisconnect bool + errOnAuthenticate bool + errOnUpsUnits bool + commandErrOnUpsUnits bool + + calledConnect bool + calledDisconnect bool + calledAuthenticate bool +} + +func (m *mockUpsdConn) connect() error { + m.calledConnect = true + if m.errOnConnect { + return errors.New("mock error on connect()") + } + return nil +} + +func (m *mockUpsdConn) disconnect() error { + m.calledDisconnect = true + if m.errOnDisconnect { + return errors.New("mock error on disconnect()") + } + return nil +} + +func (m *mockUpsdConn) authenticate(_, _ string) error { + m.calledAuthenticate = true + if m.errOnAuthenticate { + return errors.New("mock error on authenticate()") + } + return nil +} + +func (m *mockUpsdConn) upsUnits() ([]upsUnit, error) { + if m.errOnUpsUnits { + return nil, errors.New("mock error on upsUnits()") + } + if m.commandErrOnUpsUnits { + return nil, fmt.Errorf("%w: mock command error on upsUnits()", errUpsdCommand) + } + + upsUnits := []upsUnit{ + { + name: "pr3000", + vars: map[string]string{ + "battery.charge": "100", + "battery.charge.warning": "35", + "battery.mfr.date": "CPS", + "battery.runtime": "1108", + "battery.runtime.low": "300", + "battery.type": "PbAcid", + "battery.voltage": "59.9", + "battery.voltage.nominal": "48", + "device.mfr": "CPS", + "device.model": "PR3000ERT2U", + "device.serial": "P11MQ2000041", + "device.type": "ups", + "driver.name": "usbhid-ups", + "driver.parameter.pollfreq": "30", + "driver.parameter.pollinterval": "2", + "driver.parameter.port": "auto", + "driver.parameter.synchronous": "no", + "driver.version": "2.7.4", + "driver.version.data": "CyberPower HID 0.4", + "driver.version.internal": "0.41", + "input.voltage": "225.0", + "input.voltage.nominal": "230", + "output.voltage": "225.0", + "ups.beeper.status": "enabled", + "ups.delay.shutdown": "20", + "ups.delay.start": "30", + "ups.load": "28", + "ups.mfr": "CPS", + "ups.model": "PR3000ERT2U", + "ups.productid": "0601", + "ups.realpower.nominal": "3000", + "ups.serial": "P11MQ2000041", + "ups.status": "OL", + "ups.test.result": "No test initiated", + "ups.timer.shutdown": "0", + "ups.timer.start": "0", + "ups.vendorid": "0764", + }, + }, + { + name: "cp1500", + vars: map[string]string{ + "battery.charge": "100", + "battery.charge.low": "10", + "battery.charge.warning": "20", + "battery.mfr.date": "CPS", + "battery.runtime": "4890", + "battery.runtime.low": "300", + "battery.type": "PbAcid", + "battery.voltage": "24.0", + "battery.voltage.nominal": "24", + "device.mfr": "CPS", + "device.model": "CP1500EPFCLCD", + "device.serial": "CRMNO2000312", + "device.type": "ups", + "driver.name": "usbhid-ups", + "driver.parameter.bus": "001", + "driver.parameter.pollfreq": "30", + "driver.parameter.pollinterval": "2", + "driver.parameter.port": "auto", + "driver.parameter.product": "CP1500EPFCLCD", + "driver.parameter.productid": "0501", + "driver.parameter.serial": "CRMNO2000312", + "driver.parameter.synchronous": "no", + "driver.parameter.vendor": "CPS", + "driver.parameter.vendorid": "0764", + "driver.version": "2.7.4", + "driver.version.data": "CyberPower HID 0.4", + "driver.version.internal": "0.41", + "input.transfer.high": "260", + "input.transfer.low": "170", + "input.voltage": "227.0", + "input.voltage.nominal": "230", + "output.voltage": "260.0", + "ups.beeper.status": "enabled", + "ups.delay.shutdown": "20", + "ups.delay.start": "30", + "ups.load": "8", + "ups.mfr": "CPS", + "ups.model": "CP1500EPFCLCD", + "ups.productid": "0501", + "ups.realpower": "43", + "ups.realpower.nominal": "900", + "ups.serial": "CRMNO2000312", + "ups.status": "OL", + "ups.test.result": "No test initiated", + "ups.timer.shutdown": "-60", + "ups.timer.start": "-60", + "ups.vendorid": "0764", + }, + }, + } + + return upsUnits, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/upsd/variables.go b/src/go/collectors/go.d.plugin/modules/upsd/variables.go new file mode 100644 index 00000000000000..9792e62b9a3527 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/upsd/variables.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package upsd + +const varPrecision = 100 + +// https://networkupstools.org/docs/developer-guide.chunked/apas02.html +const ( + varBatteryCharge = "battery.charge" + varBatteryRuntime = "battery.runtime" + varBatteryVoltage = "battery.voltage" + varBatteryVoltageNominal = "battery.voltage.nominal" + varBatteryType = "battery.type" + + varInputVoltage = "input.voltage" + varInputVoltageNominal = "input.voltage.nominal" + varInputCurrent = "input.current" + varInputCurrentNominal = "input.current.nominal" + varInputFrequency = "input.frequency" + varInputFrequencyNominal = "input.frequency.nominal" + + varOutputVoltage = "output.voltage" + varOutputVoltageNominal = "output.voltage.nominal" + varOutputCurrent = "output.current" + varOutputCurrentNominal = "output.current.nominal" + varOutputFrequency = "output.frequency" + varOutputFrequencyNominal = "output.frequency.nominal" + + varUpsLoad = "ups.load" + varUpsRealPower = "ups.realpower" + varUpsRealPowerNominal = "ups.realpower.nominal" + varUpsTemperature = "ups.temperature" + varUpsStatus = "ups.status" + + varDeviceModel = "device.model" + varDeviceSerial = "device.serial" + varDeviceMfr = "device.mfr" + varDeviceType = "device.type" +) diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/README.md b/src/go/collectors/go.d.plugin/modules/vcsa/README.md new file mode 120000 index 00000000000000..0d00f46730576a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/README.md @@ -0,0 +1 @@ +integrations/vcenter_server_appliance.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/charts.go b/src/go/collectors/go.d.plugin/modules/vcsa/charts.go new file mode 100644 index 00000000000000..061211867425b3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/charts.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vcsa + +import "github.com/netdata/go.d.plugin/agent/module" + +var ( + vcsaHealthCharts = module.Charts{ + systemHealthStatus.Copy(), + applMgmtHealthChart.Copy(), + loadHealthChart.Copy(), + memHealthChart.Copy(), + swapHealthChart.Copy(), + dbStorageHealthChart.Copy(), + storageHealthChart.Copy(), + softwarePackagesHealthChart.Copy(), + } + + systemHealthStatus = module.Chart{ + ID: "system_health_status", + Title: "VCSA Overall System health status", + Units: "status", + Fam: "system", + Ctx: "vcsa.system_health_status", + Dims: module.Dims{ + {ID: "system_status_green", Name: "green"}, + {ID: "system_status_red", Name: "red"}, + {ID: "system_status_yellow", Name: "yellow"}, + {ID: "system_status_orange", Name: "orange"}, + {ID: "system_status_gray", Name: "gray"}, + {ID: "system_status_unknown", Name: "unknown"}, + }, + } + applMgmtHealthChart = module.Chart{ + ID: "applmgmt_health_status", + Title: "VCSA Appliance Management Service (applmgmt) health status", + Units: "status", + Fam: "appliance mgmt service", + Ctx: "vcsa.applmgmt_health_status", + Dims: module.Dims{ + {ID: "applmgmt_status_green", Name: "green"}, + {ID: "applmgmt_status_red", Name: "red"}, + {ID: "applmgmt_status_yellow", Name: "yellow"}, + {ID: "applmgmt_status_orange", Name: "orange"}, + {ID: "applmgmt_status_gray", Name: "gray"}, + {ID: "applmgmt_status_unknown", Name: "unknown"}, + }, + } + loadHealthChart = module.Chart{ + ID: "load_health_status", + Title: "VCSA Load health status", + Units: "status", + Fam: "load", + Ctx: "vcsa.load_health_status", + Dims: module.Dims{ + {ID: "load_status_green", Name: "green"}, + {ID: "load_status_red", Name: "red"}, + {ID: "load_status_yellow", Name: "yellow"}, + {ID: "load_status_orange", Name: "orange"}, + {ID: "load_status_gray", Name: "gray"}, + {ID: "load_status_unknown", Name: "unknown"}, + }, + } + memHealthChart = module.Chart{ + ID: "mem_health_status", + Title: "VCSA Memory health status", + Units: "status", + Fam: "mem", + Ctx: "vcsa.mem_health_status", + Dims: module.Dims{ + {ID: "mem_status_green", Name: "green"}, + {ID: "mem_status_red", Name: "red"}, + {ID: "mem_status_yellow", Name: "yellow"}, + {ID: "mem_status_orange", Name: "orange"}, + {ID: "mem_status_gray", Name: "gray"}, + {ID: "mem_status_unknown", Name: "unknown"}, + }, + } + swapHealthChart = module.Chart{ + ID: "swap_health_status", + Title: "VCSA Swap health status", + Units: "status", + Fam: "swap", + Ctx: "vcsa.swap_health_status", + Dims: module.Dims{ + {ID: "swap_status_green", Name: "green"}, + {ID: "swap_status_red", Name: "red"}, + {ID: "swap_status_yellow", Name: "yellow"}, + {ID: "swap_status_orange", Name: "orange"}, + {ID: "swap_status_gray", Name: "gray"}, + {ID: "swap_status_unknown", Name: "unknown"}, + }, + } + dbStorageHealthChart = module.Chart{ + ID: "database_storage_health_status", + Title: "VCSA Database Storage health status", + Units: "status", + Fam: "db storage", + Ctx: "vcsa.database_storage_health_status", + Dims: module.Dims{ + {ID: "database_storage_status_green", Name: "green"}, + {ID: "database_storage_status_red", Name: "red"}, + {ID: "database_storage_status_yellow", Name: "yellow"}, + {ID: "database_storage_status_orange", Name: "orange"}, + {ID: "database_storage_status_gray", Name: "gray"}, + {ID: "database_storage_status_unknown", Name: "unknown"}, + }, + } + storageHealthChart = module.Chart{ + ID: "storage_health_status", + Title: "VCSA Storage health status", + Units: "status", + Fam: "storage", + Ctx: "vcsa.storage_health_status", + Dims: module.Dims{ + {ID: "storage_status_green", Name: "green"}, + {ID: "storage_status_red", Name: "red"}, + {ID: "storage_status_yellow", Name: "yellow"}, + {ID: "storage_status_orange", Name: "orange"}, + {ID: "storage_status_gray", Name: "gray"}, + {ID: "storage_status_unknown", Name: "unknown"}, + }, + } + softwarePackagesHealthChart = module.Chart{ + ID: "software_packages_health_status", + Title: "VCSA Software Updates health status", + Units: "status", + Fam: "software packages", + Ctx: "vcsa.software_packages_health_status", + Dims: module.Dims{ + {ID: "software_packages_status_green", Name: "green"}, + {ID: "software_packages_status_red", Name: "red"}, + {ID: "software_packages_status_orange", Name: "orange"}, + {ID: "software_packages_status_gray", Name: "gray"}, + {ID: "software_packages_status_unknown", Name: "unknown"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/client/client.go b/src/go/collectors/go.d.plugin/modules/vcsa/client/client.go new file mode 100644 index 00000000000000..f1ae4349795a82 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/client/client.go @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "sync" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +// Session: https://vmware.github.io/vsphere-automation-sdk-rest/vsphere/index.html#SVC_com.vmware.cis.session +// Health: https://vmware.github.io/vsphere-automation-sdk-rest/vsphere/index.html#SVC_com.vmware.appliance.health + +const ( + pathCISSession = "/rest/com/vmware/cis/session" + pathHealthSystem = "/rest/appliance/health/system" + pathHealthSwap = "/rest/appliance/health/swap" + pathHealthStorage = "/rest/appliance/health/storage" + pathHealthSoftwarePackager = "/rest/appliance/health/software-packages" + pathHealthMem = "/rest/appliance/health/mem" + pathHealthLoad = "/rest/appliance/health/load" + pathHealthDatabaseStorage = "/rest/appliance/health/database-storage" + pathHealthApplMgmt = "/rest/appliance/health/applmgmt" + + apiSessIDKey = "vmware-api-session-id" +) + +type sessionToken struct { + m *sync.RWMutex + id string +} + +func (s *sessionToken) set(id string) { + s.m.Lock() + defer s.m.Unlock() + s.id = id +} + +func (s *sessionToken) get() string { + s.m.RLock() + defer s.m.RUnlock() + return s.id +} + +func New(httpClient *http.Client, url, username, password string) *Client { + if httpClient == nil { + httpClient = &http.Client{} + } + return &Client{ + httpClient: httpClient, + url: url, + username: username, + password: password, + token: &sessionToken{m: new(sync.RWMutex)}, + } +} + +type Client struct { + httpClient *http.Client + + url string + username string + password string + + token *sessionToken +} + +// Login creates a session with the API. This operation exchanges user credentials supplied in the security context +// for a session identifier that is to be used for authenticating subsequent calls. +func (c *Client) Login() error { + req := web.Request{ + URL: fmt.Sprintf("%s%s", c.url, pathCISSession), + Username: c.username, + Password: c.password, + Method: http.MethodPost, + } + s := struct{ Value string }{} + + err := c.doOKWithDecode(req, &s) + if err == nil { + c.token.set(s.Value) + } + return err +} + +// Logout terminates the validity of a session token. +func (c *Client) Logout() error { + req := web.Request{ + URL: fmt.Sprintf("%s%s", c.url, pathCISSession), + Method: http.MethodDelete, + Headers: map[string]string{apiSessIDKey: c.token.get()}, + } + + resp, err := c.doOK(req) + closeBody(resp) + c.token.set("") + return err +} + +// Ping sent a request to VCSA server to ensure the link is operating. +// In case of 401 error Ping tries to re authenticate. +func (c *Client) Ping() error { + req := web.Request{ + URL: fmt.Sprintf("%s%s?~action=get", c.url, pathCISSession), + Method: http.MethodPost, + Headers: map[string]string{apiSessIDKey: c.token.get()}, + } + resp, err := c.doOK(req) + defer closeBody(resp) + if resp != nil && resp.StatusCode == http.StatusUnauthorized { + return c.Login() + } + return err +} + +func (c *Client) health(urlPath string) (string, error) { + req := web.Request{ + URL: fmt.Sprintf("%s%s", c.url, urlPath), + Headers: map[string]string{apiSessIDKey: c.token.get()}, + } + s := struct{ Value string }{} + err := c.doOKWithDecode(req, &s) + return s.Value, err +} + +// ApplMgmt provides health status of applmgmt services. +func (c *Client) ApplMgmt() (string, error) { + return c.health(pathHealthApplMgmt) +} + +// DatabaseStorage provides health status of database storage health. +func (c *Client) DatabaseStorage() (string, error) { + return c.health(pathHealthDatabaseStorage) +} + +// Load provides health status of load health. +func (c *Client) Load() (string, error) { + return c.health(pathHealthLoad) +} + +// Mem provides health status of memory health. +func (c *Client) Mem() (string, error) { + return c.health(pathHealthMem) +} + +// SoftwarePackages provides information on available software updates available in remote VUM repository. +// Red indicates that security updates are available. +// Orange indicates that non-security updates are available. +// Green indicates that there are no updates available. +// Gray indicates that there was an error retrieving information on software updates. +func (c *Client) SoftwarePackages() (string, error) { + return c.health(pathHealthSoftwarePackager) +} + +// Storage provides health status of storage health. +func (c *Client) Storage() (string, error) { + return c.health(pathHealthStorage) +} + +// Swap provides health status of swap health. +func (c *Client) Swap() (string, error) { + return c.health(pathHealthSwap) +} + +// System provides overall health of system. +func (c *Client) System() (string, error) { + return c.health(pathHealthSystem) +} + +func (c *Client) do(req web.Request) (*http.Response, error) { + httpReq, err := web.NewHTTPRequest(req) + if err != nil { + return nil, fmt.Errorf("error on creating http request to %s : %v", req.URL, err) + } + return c.httpClient.Do(httpReq) +} + +func (c *Client) doOK(req web.Request) (*http.Response, error) { + resp, err := c.do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return resp, fmt.Errorf("%s returned %d", req.URL, resp.StatusCode) + } + return resp, nil +} + +func (c *Client) doOKWithDecode(req web.Request, dst interface{}) error { + resp, err := c.doOK(req) + defer closeBody(resp) + if err != nil { + return err + } + + err = json.NewDecoder(resp.Body).Decode(dst) + if err != nil { + return fmt.Errorf("error on decoding response from %s : %v", req.URL, err) + } + return nil +} + +func closeBody(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/client/client_test.go b/src/go/collectors/go.d.plugin/modules/vcsa/client/client_test.go new file mode 100644 index 00000000000000..379644b8930f57 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/client/client_test.go @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testUser = "user" + testPass = "pass" + testSessToken = "sessToken" + testHealthValue = "green" +) + +func newTestClient(srvURL string) *Client { + return New(nil, srvURL, testUser, testPass) +} + +func TestClient_Login(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + assert.NoError(t, cl.Login()) + assert.Equal(t, testSessToken, cl.token.get()) +} + +func TestClient_LoginWrongCredentials(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + cl.username += "!" + + assert.Error(t, cl.Login()) +} + +func TestClient_Logout(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + assert.NoError(t, cl.Login()) + assert.NoError(t, cl.Logout()) + assert.Zero(t, cl.token.get()) +} + +func TestClient_Ping(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + assert.NoError(t, cl.Ping()) +} + +func TestClient_PingWithReAuthentication(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + cl.token.set("") + assert.NoError(t, cl.Ping()) + assert.Equal(t, testSessToken, cl.token.get()) +} + +func TestClient_ApplMgmt(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.ApplMgmt() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_DatabaseStorage(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.DatabaseStorage() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_Load(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.Load() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_Mem(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.Mem() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_SoftwarePackages(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.SoftwarePackages() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_Storage(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.Storage() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_Swap(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.Swap() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_System(t *testing.T) { + ts := newTestHTTPServer() + defer ts.Close() + cl := newTestClient(ts.URL) + + require.NoError(t, cl.Login()) + v, err := cl.System() + assert.NoError(t, err) + assert.Equal(t, testHealthValue, v) +} + +func TestClient_InvalidDataOnLogin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello\n and goodbye!")) + })) + defer ts.Close() + cl := newTestClient(ts.URL) + + assert.Error(t, cl.Login()) +} + +func TestClient_404OnLogin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + })) + defer ts.Close() + cl := newTestClient(ts.URL) + + assert.Error(t, cl.Login()) +} + +func newTestHTTPServer() *httptest.Server { + return httptest.NewServer(&mockVCSAServer{ + username: testUser, + password: testPass, + sessionID: testSessToken, + }) +} + +type mockVCSAServer struct { + username string + password string + sessionID string +} + +func (m mockVCSAServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + default: + w.WriteHeader(http.StatusNotFound) + case pathCISSession: + m.handleSession(w, r) + case + pathHealthApplMgmt, + pathHealthDatabaseStorage, + pathHealthLoad, + pathHealthMem, + pathHealthSoftwarePackager, + pathHealthStorage, + pathHealthSwap, + pathHealthSystem: + m.handleHealth(w, r) + } +} + +func (m mockVCSAServer) handleHealth(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusBadRequest) + return + } + + if !m.isSessionAuthenticated(r) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + s := struct{ Value string }{Value: testHealthValue} + b, _ := json.Marshal(s) + _, _ = w.Write(b) +} + +func (m mockVCSAServer) handleSession(w http.ResponseWriter, r *http.Request) { + switch r.Method { + default: + w.WriteHeader(http.StatusBadRequest) + case http.MethodDelete: + m.handleSessionDelete(w, r) + case http.MethodPost: + if r.URL.RawQuery == "" { + m.handleSessionCreate(w, r) + } else { + m.handleSessionGet(w, r) + } + } +} + +func (m mockVCSAServer) handleSessionCreate(w http.ResponseWriter, r *http.Request) { + if !m.isReqAuthenticated(r) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.WriteHeader(http.StatusOK) + s := struct{ Value string }{Value: m.sessionID} + b, _ := json.Marshal(s) + _, _ = w.Write(b) +} + +func (m mockVCSAServer) handleSessionGet(w http.ResponseWriter, r *http.Request) { + if !m.isSessionAuthenticated(r) { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.WriteHeader(http.StatusOK) + s := struct{ Value struct{ User string } }{Value: struct{ User string }{User: m.username}} + b, _ := json.Marshal(s) + _, _ = w.Write(b) +} + +func (m mockVCSAServer) handleSessionDelete(w http.ResponseWriter, r *http.Request) { + if !m.isSessionAuthenticated(r) { + w.WriteHeader(http.StatusUnauthorized) + return + } + w.WriteHeader(http.StatusOK) +} + +func (m mockVCSAServer) isReqAuthenticated(r *http.Request) bool { + u, p, ok := r.BasicAuth() + return ok && m.username == u && p == m.password +} + +func (m mockVCSAServer) isSessionAuthenticated(r *http.Request) bool { + return r.Header.Get(apiSessIDKey) == m.sessionID +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/collect.go b/src/go/collectors/go.d.plugin/modules/vcsa/collect.go new file mode 100644 index 00000000000000..8a734d9e89ea6c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/collect.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vcsa + +import ( + "sync" +) + +var componentHealthStatuses = []string{"green", "red", "yellow", "orange", "gray"} +var softwareHealthStatuses = []string{"green", "red", "orange", "gray"} + +type vcsaHealthStatus struct { + System *string + ApplMgmt *string + Load *string + Mem *string + Swap *string + DatabaseStorage *string + Storage *string + SoftwarePackages *string +} + +func (vc *VCSA) collect() (map[string]int64, error) { + err := vc.client.Ping() + if err != nil { + return nil, err + } + + var status vcsaHealthStatus + vc.scrapeHealth(&status) + + mx := make(map[string]int64) + + writeStatus(mx, "system", componentHealthStatuses, status.System) + writeStatus(mx, "applmgmt", componentHealthStatuses, status.ApplMgmt) + writeStatus(mx, "load", componentHealthStatuses, status.Load) + writeStatus(mx, "mem", componentHealthStatuses, status.Mem) + writeStatus(mx, "swap", componentHealthStatuses, status.Swap) + writeStatus(mx, "database_storage", componentHealthStatuses, status.DatabaseStorage) + writeStatus(mx, "storage", componentHealthStatuses, status.Storage) + writeStatus(mx, "software_packages", softwareHealthStatuses, status.SoftwarePackages) + + return mx, nil +} + +func (vc *VCSA) scrapeHealth(status *vcsaHealthStatus) { + wg := &sync.WaitGroup{} + + scrape := func(fn func() (string, error), value **string) { + v, err := fn() + if err != nil { + vc.Error(err) + return + } + *value = &v + } + + for _, fn := range []func(){ + func() { scrape(vc.client.System, &status.System) }, + func() { scrape(vc.client.ApplMgmt, &status.ApplMgmt) }, + func() { scrape(vc.client.Load, &status.Load) }, + func() { scrape(vc.client.DatabaseStorage, &status.DatabaseStorage) }, + func() { scrape(vc.client.Storage, &status.Storage) }, + func() { scrape(vc.client.Mem, &status.Mem) }, + func() { scrape(vc.client.Swap, &status.Swap) }, + func() { scrape(vc.client.SoftwarePackages, &status.SoftwarePackages) }, + } { + fn := fn + + wg.Add(1) + go func() { defer wg.Done(); fn() }() + } + + wg.Wait() +} + +func writeStatus(mx map[string]int64, key string, statuses []string, status *string) { + if status == nil { + return + } + + var found bool + for _, s := range statuses { + mx[key+"_status_"+s] = boolToInt(s == *status) + found = found || s == *status + } + mx[key+"_status_unknown"] = boolToInt(!found) +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/config_schema.json b/src/go/collectors/go.d.plugin/modules/vcsa/config_schema.json new file mode 100644 index 00000000000000..aab0647abc1803 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vcsa job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/init.go b/src/go/collectors/go.d.plugin/modules/vcsa/init.go new file mode 100644 index 00000000000000..44c023a474dd31 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vcsa + +import ( + "errors" + + "github.com/netdata/go.d.plugin/modules/vcsa/client" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (vc *VCSA) validateConfig() error { + if vc.URL == "" { + return errors.New("URL not set") + } + if vc.Username == "" || vc.Password == "" { + return errors.New("username or password not set") + } + return nil +} + +func (vc *VCSA) initHealthClient() (*client.Client, error) { + httpClient, err := web.NewHTTPClient(vc.Client) + if err != nil { + return nil, err + } + + return client.New(httpClient, vc.URL, vc.Username, vc.Password), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/integrations/vcenter_server_appliance.md b/src/go/collectors/go.d.plugin/modules/vcsa/integrations/vcenter_server_appliance.md new file mode 100644 index 00000000000000..24cc7c2d6400b1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/integrations/vcenter_server_appliance.md @@ -0,0 +1,257 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/vcsa/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/vcsa/metadata.yaml" +sidebar_label: "vCenter Server Appliance" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# vCenter Server Appliance + + +<img src="https://netdata.cloud/img/vmware.svg" width="150"/> + + +Plugin: go.d.plugin +Module: vcsa + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per vCenter Server Appliance instance + +These metrics refer to the entire monitored application. +<details> +<summary>See health statuses</summary> +Overall System Health: + +| Status | Description | +|:-------:|:-------------------------------------------------------------------------------------------------------------------------| +| green | All components in the appliance are healthy. | +| yellow | One or more components in the appliance might become overloaded soon. | +| orange | One or more components in the appliance might be degraded. | +| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. | +| gray | No health data is available. | +| unknown | Collector failed to decode status. | + +Components Health: + +| Status | Description | +|:-------:|:-------------------------------------------------------------| +| green | The component is healthy. | +| yellow | The component is healthy, but may have some problems. | +| orange | The component is degraded, and may have serious problems. | +| red | The component is unavailable, or will stop functioning soon. | +| gray | No health data is available. | +| unknown | Collector failed to decode status. | + +Software Updates Health: + +| Status | Description | +|:-------:|:-----------------------------------------------------| +| green | No updates available. | +| orange | Non-security patches might be available. | +| red | Security patches might be available. | +| gray | An error retrieving information on software updates. | +| unknown | Collector failed to decode status. | + +</details> + + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status | +| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. | +| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. | +| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. | +| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. | +| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/vcsa.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/vcsa.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 5 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | yes | +| password | Password for basic HTTP authentication. | | yes | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: vcsa1 + url: https://203.0.113.1 + username: admin@vsphere.local + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Two instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: vcsa1 + url: https://203.0.113.1 + username: admin@vsphere.local + password: password + + - name: vcsa2 + url: https://203.0.113.10 + username: admin@vsphere.local + password: password + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m vcsa + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/metadata.yaml b/src/go/collectors/go.d.plugin/modules/vcsa/metadata.yaml new file mode 100644 index 00000000000000..d619f3d9680364 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/metadata.yaml @@ -0,0 +1,346 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-vcsa + plugin_name: go.d.plugin + module_name: vcsa + monitored_instance: + name: vCenter Server Appliance + link: https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html + icon_filename: vmware.svg + categories: + - data-collection.containers-and-vms + keywords: + - vmware + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: "go.d/vcsa.conf" + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: "5" + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: "0" + required: false + - name: url + description: Server URL. + default_value: "" + required: true + - name: timeout + description: HTTP request timeout. + default_value: "1" + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: true + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: true + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: "false" + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: "false" + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: vcsa1 + url: https://203.0.113.1 + username: admin@vsphere.local + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Two instances. + config: | + jobs: + - name: vcsa1 + url: https://203.0.113.1 + username: admin@vsphere.local + password: password + + - name: vcsa2 + url: https://203.0.113.10 + username: admin@vsphere.local + password: password + troubleshooting: + problems: + list: [] + alerts: + - name: vcsa_system_health_warn + metric: vcsa.system_health_status + info: VCSA overall system status is orange. One or more components are degraded. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_system_health_crit + metric: vcsa.system_health_status + info: VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_applmgmt_health_warn + metric: vcsa.applmgmt_health_status + info: VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_applmgmt_health_crit + metric: vcsa.applmgmt_health_status + info: VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_load_health_warn + metric: vcsa.load_health_status + info: VCSA Load component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_load_health_crit + metric: vcsa.load_health_status + info: VCSA Load component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_mem_health_warn + metric: vcsa.mem_health_status + info: VCSA Memory component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_mem_health_crit + metric: vcsa.mem_health_status + info: VCSA Memory component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_swap_health_warn + metric: vcsa.swap_health_status + info: VCSA Swap component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_swap_health_crit + metric: vcsa.swap_health_status + info: VCSA Swap component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_database_storage_health_warn + metric: vcsa.database_storage_health_status + info: VCSA Database Storage component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_database_storage_health_crit + metric: vcsa.database_storage_health_status + info: VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_storage_health_warn + metric: vcsa.storage_health_status + info: VCSA Storage component status is orange. It is degraded, and may have serious problems. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_storage_health_crit + metric: vcsa.storage_health_status + info: VCSA Storage component status is red. It is unavailable, or will stop functioning soon. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + - name: vcsa_software_packages_health_warn + metric: vcsa.software_packages_health_status + info: VCSA software packages security updates are available. + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: | + These metrics refer to the entire monitored application. + <details> + <summary>See health statuses</summary> + Overall System Health: + + | Status | Description | + |:-------:|:-------------------------------------------------------------------------------------------------------------------------| + | green | All components in the appliance are healthy. | + | yellow | One or more components in the appliance might become overloaded soon. | + | orange | One or more components in the appliance might be degraded. | + | red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. | + | gray | No health data is available. | + | unknown | Collector failed to decode status. | + + Components Health: + + | Status | Description | + |:-------:|:-------------------------------------------------------------| + | green | The component is healthy. | + | yellow | The component is healthy, but may have some problems. | + | orange | The component is degraded, and may have serious problems. | + | red | The component is unavailable, or will stop functioning soon. | + | gray | No health data is available. | + | unknown | Collector failed to decode status. | + + Software Updates Health: + + | Status | Description | + |:-------:|:-----------------------------------------------------| + | green | No updates available. | + | orange | Non-security patches might be available. | + | red | Security patches might be available. | + | gray | An error retrieving information on software updates. | + | unknown | Collector failed to decode status. | + + </details> + labels: [] + metrics: + - name: vcsa.system_health_status + description: VCSA Overall System health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.applmgmt_health_status + description: VCSA ApplMgmt health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.load_health_status + description: VCSA Load health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.mem_health_status + description: VCSA Memory health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.swap_health_status + description: VCSA Swap health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.database_storage_health_status + description: VCSA Database Storage health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.storage_health_status + description: VCSA Storage health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: orange + - name: gray + - name: unknown + - name: vcsa.software_packages_health_status + description: VCSA Software Updates health status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: orange + - name: gray + - name: unknown diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/vcsa.go b/src/go/collectors/go.d.plugin/modules/vcsa/vcsa.go new file mode 100644 index 00000000000000..ccac96f3a265d1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/vcsa.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vcsa + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("vcsa", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, // VCSA health checks freq is 5 second. + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *VCSA { + return &VCSA{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + }, + charts: vcsaHealthCharts.Copy(), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type ( + VCSA struct { + module.Base + Config `yaml:",inline"` + + client healthClient + + charts *module.Charts + } + + healthClient interface { + Login() error + Logout() error + Ping() error + ApplMgmt() (string, error) + DatabaseStorage() (string, error) + Load() (string, error) + Mem() (string, error) + SoftwarePackages() (string, error) + Storage() (string, error) + Swap() (string, error) + System() (string, error) + } +) + +func (vc *VCSA) Init() bool { + if err := vc.validateConfig(); err != nil { + vc.Error(err) + return false + } + + c, err := vc.initHealthClient() + if err != nil { + vc.Errorf("error on creating health client : %vc", err) + return false + } + vc.client = c + + vc.Debugf("using URL %s", vc.URL) + vc.Debugf("using timeout: %s", vc.Timeout.Duration) + + return true +} + +func (vc *VCSA) Check() bool { + err := vc.client.Login() + if err != nil { + vc.Error(err) + return false + } + + return len(vc.Collect()) > 0 +} + +func (vc *VCSA) Charts() *module.Charts { + return vc.charts +} + +func (vc *VCSA) Collect() map[string]int64 { + mx, err := vc.collect() + if err != nil { + vc.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (vc *VCSA) Cleanup() { + if vc.client == nil { + return + } + err := vc.client.Logout() + if err != nil { + vc.Errorf("error on logout : %v", err) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/vcsa/vcsa_test.go b/src/go/collectors/go.d.plugin/modules/vcsa/vcsa_test.go new file mode 100644 index 00000000000000..86185bfa2fd333 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vcsa/vcsa_test.go @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vcsa + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testNewVCSA() *VCSA { + vc := New() + vc.URL = "https://127.0.0.1:38001" + vc.Username = "user" + vc.Password = "pass" + return vc +} + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*VCSA)(nil), job) +} + +func TestVCSA_Init(t *testing.T) { + job := testNewVCSA() + + assert.True(t, job.Init()) + assert.NotNil(t, job.client) +} + +func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) { + job := New() + + assert.False(t, job.Init()) +} + +func TestVCenter_InitErrorOnCreatingClient(t *testing.T) { + job := testNewVCSA() + job.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, job.Init()) +} + +func TestVCenter_Check(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + job.client = &mockVCenterHealthClient{} + + assert.True(t, job.Check()) +} + +func TestVCenter_CheckErrorOnLogin(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + job.client = &mockVCenterHealthClient{ + login: func() error { return errors.New("login mock error") }, + } + + assert.False(t, job.Check()) +} + +func TestVCenter_CheckEnsureLoggedIn(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{} + job.client = mock + + assert.True(t, job.Check()) + assert.True(t, mock.loginCalls == 1) +} + +func TestVCenter_Cleanup(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{} + job.client = mock + job.Cleanup() + + assert.True(t, mock.logoutCalls == 1) +} + +func TestVCenter_CleanupWithNilClient(t *testing.T) { + job := testNewVCSA() + + assert.NotPanics(t, job.Cleanup) +} + +func TestVCenter_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestVCenter_Collect(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{} + job.client = mock + + expected := map[string]int64{ + "applmgmt_status_gray": 0, + "applmgmt_status_green": 1, + "applmgmt_status_orange": 0, + "applmgmt_status_red": 0, + "applmgmt_status_unknown": 0, + "applmgmt_status_yellow": 0, + "database_storage_status_gray": 0, + "database_storage_status_green": 1, + "database_storage_status_orange": 0, + "database_storage_status_red": 0, + "database_storage_status_unknown": 0, + "database_storage_status_yellow": 0, + "load_status_gray": 0, + "load_status_green": 1, + "load_status_orange": 0, + "load_status_red": 0, + "load_status_unknown": 0, + "load_status_yellow": 0, + "mem_status_gray": 0, + "mem_status_green": 1, + "mem_status_orange": 0, + "mem_status_red": 0, + "mem_status_unknown": 0, + "mem_status_yellow": 0, + "software_packages_status_gray": 0, + "software_packages_status_green": 1, + "software_packages_status_orange": 0, + "software_packages_status_red": 0, + "software_packages_status_unknown": 0, + "storage_status_gray": 0, + "storage_status_green": 1, + "storage_status_orange": 0, + "storage_status_red": 0, + "storage_status_unknown": 0, + "storage_status_yellow": 0, + "swap_status_gray": 0, + "swap_status_green": 1, + "swap_status_orange": 0, + "swap_status_red": 0, + "swap_status_unknown": 0, + "swap_status_yellow": 0, + "system_status_gray": 0, + "system_status_green": 1, + "system_status_orange": 0, + "system_status_red": 0, + "system_status_unknown": 0, + "system_status_yellow": 0, + } + + assert.Equal(t, expected, job.Collect()) +} + +func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{} + job.client = mock + job.Collect() + + assert.True(t, mock.pingCalls == 1) +} + +func TestVCenter_CollectErrorOnPing(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{ + ping: func() error { return errors.New("ping mock error") }, + } + job.client = mock + + assert.Zero(t, job.Collect()) +} + +func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) { + job := testNewVCSA() + require.True(t, job.Init()) + mock := &mockVCenterHealthClient{ + applMgmt: func() (string, error) { return "", errors.New("applMgmt mock error") }, + databaseStorage: func() (string, error) { return "", errors.New("databaseStorage mock error") }, + load: func() (string, error) { return "", errors.New("load mock error") }, + mem: func() (string, error) { return "", errors.New("mem mock error") }, + softwarePackages: func() (string, error) { return "", errors.New("softwarePackages mock error") }, + storage: func() (string, error) { return "", errors.New("storage mock error") }, + swap: func() (string, error) { return "", errors.New("swap mock error") }, + system: func() (string, error) { return "", errors.New("system mock error") }, + } + job.client = mock + + assert.Zero(t, job.Collect()) +} + +type mockVCenterHealthClient struct { + login func() error + logout func() error + ping func() error + applMgmt func() (string, error) + databaseStorage func() (string, error) + load func() (string, error) + mem func() (string, error) + softwarePackages func() (string, error) + storage func() (string, error) + swap func() (string, error) + system func() (string, error) + loginCalls int + logoutCalls int + pingCalls int +} + +func (m *mockVCenterHealthClient) Login() error { + m.loginCalls += 1 + if m.login == nil { + return nil + } + return m.login() +} + +func (m *mockVCenterHealthClient) Logout() error { + m.logoutCalls += 1 + if m.logout == nil { + return nil + } + return m.logout() +} + +func (m *mockVCenterHealthClient) Ping() error { + m.pingCalls += 1 + if m.ping == nil { + return nil + } + return m.ping() +} + +func (m mockVCenterHealthClient) ApplMgmt() (string, error) { + if m.applMgmt == nil { + return "green", nil + } + return m.applMgmt() +} + +func (m mockVCenterHealthClient) DatabaseStorage() (string, error) { + if m.databaseStorage == nil { + return "green", nil + } + return m.databaseStorage() +} + +func (m mockVCenterHealthClient) Load() (string, error) { + if m.load == nil { + return "green", nil + } + return m.load() +} + +func (m mockVCenterHealthClient) Mem() (string, error) { + if m.mem == nil { + return "green", nil + } + return m.mem() +} + +func (m mockVCenterHealthClient) SoftwarePackages() (string, error) { + if m.softwarePackages == nil { + return "green", nil + } + return m.softwarePackages() +} + +func (m mockVCenterHealthClient) Storage() (string, error) { + if m.storage == nil { + return "green", nil + } + return m.storage() +} + +func (m mockVCenterHealthClient) Swap() (string, error) { + if m.swap == nil { + return "green", nil + } + return m.swap() +} + +func (m mockVCenterHealthClient) System() (string, error) { + if m.system == nil { + return "green", nil + } + return m.system() +} diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/README.md b/src/go/collectors/go.d.plugin/modules/vernemq/README.md new file mode 120000 index 00000000000000..3d984de714c0c3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/README.md @@ -0,0 +1 @@ +integrations/vernemq.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/charts.go b/src/go/collectors/go.d.plugin/modules/vernemq/charts.go new file mode 100644 index 00000000000000..54b86b9bd6dcba --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/charts.go @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Dim = module.Dim +) + +var charts = Charts{ + chartOpenSockets.Copy(), + chartSocketEvents.Copy(), + chartClientKeepaliveExpired.Copy(), + chartSocketErrors.Copy(), + chartSocketCloseTimeout.Copy(), + + chartQueueProcesses.Copy(), + chartQueueProcessesEvents.Copy(), + chartQueueProcessesOfflineStorage.Copy(), + chartQueueMessages.Copy(), + chartQueueUndeliveredMessages.Copy(), + + chartRouterSubscriptions.Copy(), + chartRouterMatchedSubscriptions.Copy(), + chartRouterMemory.Copy(), + + chartAverageSchedulerUtilization.Copy(), + chartSchedulerUtilization.Copy(), + chartSystemProcesses.Copy(), + chartSystemReductions.Copy(), + chartSystemContextSwitches.Copy(), + chartSystemIO.Copy(), + chartSystemRunQueue.Copy(), + chartSystemGCCount.Copy(), + chartSystemGCWordsReclaimed.Copy(), + chartSystemMemoryAllocated.Copy(), + + chartBandwidth.Copy(), + + chartRetainMessages.Copy(), + chartRetainMemoryUsage.Copy(), + + chartClusterCommunicationBandwidth.Copy(), + chartClusterCommunicationDropped.Copy(), + chartNetSplitUnresolved.Copy(), + chartNetSplits.Copy(), + + chartMQTTv5AUTH.Copy(), + chartMQTTv5AUTHReceivedReason.Copy(), + chartMQTTv5AUTHSentReason.Copy(), + + chartMQTTv3v5CONNECT.Copy(), + chartMQTTv3v5CONNACKSentReason.Copy(), + + chartMQTTv3v5DISCONNECT.Copy(), + chartMQTTv5DISCONNECTReceivedReason.Copy(), + chartMQTTv5DISCONNECTSentReason.Copy(), + + chartMQTTv3v5SUBSCRIBE.Copy(), + chartMQTTv3v5SUBSCRIBEError.Copy(), + chartMQTTv3v5SUBSCRIBEAuthError.Copy(), + + chartMQTTv3v5UNSUBSCRIBE.Copy(), + chartMQTTv3v5UNSUBSCRIBEError.Copy(), + + chartMQTTv3v5PUBLISH.Copy(), + chartMQTTv3v5PUBLISHErrors.Copy(), + chartMQTTv3v5PUBLISHAuthErrors.Copy(), + chartMQTTv3v5PUBACK.Copy(), + chartMQTTv5PUBACKReceivedReason.Copy(), + chartMQTTv5PUBACKSentReason.Copy(), + chartMQTTv3v5PUBACKUnexpected.Copy(), + chartMQTTv3v5PUBREC.Copy(), + chartMQTTv5PUBRECReceivedReason.Copy(), + chartMQTTv5PUBRECSentReason.Copy(), + chartMQTTv3PUBRECUnexpected.Copy(), + chartMQTTv3v5PUBREL.Copy(), + chartMQTTv5PUBRELReceivedReason.Copy(), + chartMQTTv5PUBRELSentReason.Copy(), + chartMQTTv3v5PUBCOMP.Copy(), + chartMQTTv5PUBCOMPReceivedReason.Copy(), + chartMQTTv5PUBCOMPSentReason.Copy(), + chartMQTTv3v5PUBCOMPUnexpected.Copy(), + + chartMQTTv3v5PING.Copy(), + + chartUptime.Copy(), +} + +// Sockets +var ( + chartOpenSockets = Chart{ + ID: "sockets", + Title: "Open Sockets", + Units: "sockets", + Fam: "sockets", + Ctx: "vernemq.sockets", + Dims: Dims{ + {ID: "open_sockets", Name: "open"}, + }, + } + chartSocketEvents = Chart{ + ID: "socket_events", + Title: "Socket Open and Close Events", + Units: "events/s", + Fam: "sockets", + Ctx: "vernemq.socket_operations", + Dims: Dims{ + {ID: metricSocketOpen, Name: "open", Algo: module.Incremental}, + {ID: metricSocketClose, Name: "close", Algo: module.Incremental, Mul: -1}, + }, + } + chartClientKeepaliveExpired = Chart{ + ID: "client_keepalive_expired", + Title: "Closed Sockets due to Keepalive Time Expired", + Units: "sockets/s", + Fam: "sockets", + Ctx: "vernemq.client_keepalive_expired", + Dims: Dims{ + {ID: metricClientKeepaliveExpired, Name: "closed", Algo: module.Incremental}, + }, + } + chartSocketCloseTimeout = Chart{ + ID: "socket_close_timeout", + Title: "Closed Sockets due to no CONNECT Frame On Time", + Units: "sockets/s", + Fam: "sockets", + Ctx: "vernemq.socket_close_timeout", + Dims: Dims{ + {ID: metricSocketCloseTimeout, Name: "closed", Algo: module.Incremental}, + }, + } + chartSocketErrors = Chart{ + ID: "socket_errors", + Title: "Socket Errors", + Units: "errors/s", + Fam: "sockets", + Ctx: "vernemq.socket_errors", + Dims: Dims{ + {ID: metricSocketError, Name: "errors", Algo: module.Incremental}, + }, + } +) + +// Queues +var ( + chartQueueProcesses = Chart{ + ID: "queue_processes", + Title: "Living Queues in an Online or an Offline State", + Units: "queue processes", + Fam: "queues", + Ctx: "vernemq.queue_processes", + Dims: Dims{ + {ID: metricQueueProcesses, Name: "queue_processes"}, + }, + } + chartQueueProcessesEvents = Chart{ + ID: "queue_processes_events", + Title: "Queue Processes Setup and Teardown Events", + Units: "events/s", + Fam: "queues", + Ctx: "vernemq.queue_processes_operations", + Dims: Dims{ + {ID: metricQueueSetup, Name: "setup", Algo: module.Incremental}, + {ID: metricQueueTeardown, Name: "teardown", Algo: module.Incremental, Mul: -1}, + }, + } + chartQueueProcessesOfflineStorage = Chart{ + ID: "queue_process_init_from_storage", + Title: "Queue Processes Initialized from Offline Storage", + Units: "queue processes/s", + Fam: "queues", + Ctx: "vernemq.queue_process_init_from_storage", + Dims: Dims{ + {ID: metricQueueInitializedFromStorage, Name: "queue processes", Algo: module.Incremental}, + }, + } + chartQueueMessages = Chart{ + ID: "queue_messages", + Title: "Received and Sent PUBLISH Messages", + Units: "messages/s", + Fam: "queues", + Ctx: "vernemq.queue_messages", + Type: module.Area, + Dims: Dims{ + {ID: metricQueueMessageIn, Name: "received", Algo: module.Incremental}, + {ID: metricQueueMessageOut, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartQueueUndeliveredMessages = Chart{ + ID: "queue_undelivered_messages", + Title: "Undelivered PUBLISH Messages", + Units: "messages/s", + Fam: "queues", + Ctx: "vernemq.queue_undelivered_messages", + Type: module.Stacked, + Dims: Dims{ + {ID: metricQueueMessageDrop, Name: "dropped", Algo: module.Incremental}, + {ID: metricQueueMessageExpired, Name: "expired", Algo: module.Incremental}, + {ID: metricQueueMessageUnhandled, Name: "unhandled", Algo: module.Incremental}, + }, + } +) + +// Subscriptions +var ( + chartRouterSubscriptions = Chart{ + ID: "router_subscriptions", + Title: "Subscriptions in the Routing Table", + Units: "subscriptions", + Fam: "subscriptions", + Ctx: "vernemq.router_subscriptions", + Dims: Dims{ + {ID: metricRouterSubscriptions, Name: "subscriptions"}, + }, + } + chartRouterMatchedSubscriptions = Chart{ + ID: "router_matched_subscriptions", + Title: "Matched Subscriptions", + Units: "subscriptions/s", + Fam: "subscriptions", + Ctx: "vernemq.router_matched_subscriptions", + Dims: Dims{ + {ID: metricRouterMatchesLocal, Name: "local", Algo: module.Incremental}, + {ID: metricRouterMatchesRemote, Name: "remote", Algo: module.Incremental}, + }, + } + chartRouterMemory = Chart{ + ID: "router_memory", + Title: "Routing Table Memory Usage", + Units: "KiB", + Fam: "subscriptions", + Ctx: "vernemq.router_memory", + Type: module.Area, + Dims: Dims{ + {ID: metricRouterMemory, Name: "used", Div: 1024}, + }, + } +) + +// Erlang VM +var ( + chartAverageSchedulerUtilization = Chart{ + ID: "average_scheduler_utilization", + Title: "Average Scheduler Utilization", + Units: "percentage", + Fam: "erlang vm", + Ctx: "vernemq.average_scheduler_utilization", + Type: module.Area, + Dims: Dims{ + {ID: metricSystemUtilization, Name: "utilization"}, + }, + } + chartSchedulerUtilization = Chart{ + ID: "scheduler_utilization", + Title: "Scheduler Utilization", + Units: "percentage", + Fam: "erlang vm", + Type: module.Stacked, + Ctx: "vernemq.system_utilization_scheduler", + } + chartSystemProcesses = Chart{ + ID: "system_processes", + Title: "Erlang Processes", + Units: "processes", + Fam: "erlang vm", + Ctx: "vernemq.system_processes", + Dims: Dims{ + {ID: metricSystemProcessCount, Name: "processes"}, + }, + } + chartSystemReductions = Chart{ + ID: "system_reductions", + Title: "Reductions", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.system_reductions", + Dims: Dims{ + {ID: metricSystemReductions, Name: "reductions", Algo: module.Incremental}, + }, + } + chartSystemContextSwitches = Chart{ + ID: "system_context_switches", + Title: "Context Switches", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.system_context_switches", + Dims: Dims{ + {ID: metricSystemContextSwitches, Name: "context switches", Algo: module.Incremental}, + }, + } + chartSystemIO = Chart{ + ID: "system_io", + Title: "Received and Sent Traffic through Ports", + Units: "kilobits/s", + Fam: "erlang vm", + Ctx: "vernemq.system_io", + Type: module.Area, + Dims: Dims{ + {ID: metricSystemIOIn, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, + {ID: metricSystemIOOut, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, + }, + } + chartSystemRunQueue = Chart{ + ID: "system_run_queue", + Title: "Processes that are Ready to Run on All Run-Queues", + Units: "processes", + Fam: "erlang vm", + Ctx: "vernemq.system_run_queue", + Dims: Dims{ + {ID: metricSystemRunQueue, Name: "ready"}, + }, + } + chartSystemGCCount = Chart{ + ID: "system_gc_count", + Title: "GC Count", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.system_gc_count", + Dims: Dims{ + {ID: metricSystemGCCount, Name: "gc", Algo: module.Incremental}, + }, + } + chartSystemGCWordsReclaimed = Chart{ + ID: "system_gc_words_reclaimed", + Title: "GC Words Reclaimed", + Units: "ops/s", + Fam: "erlang vm", + Ctx: "vernemq.system_gc_words_reclaimed", + Dims: Dims{ + {ID: metricSystemWordsReclaimedByGC, Name: "words reclaimed", Algo: module.Incremental}, + }, + } + chartSystemMemoryAllocated = Chart{ + ID: "system_allocated_memory", + Title: "Memory Allocated by the Erlang Processes and by the Emulator", + Units: "KiB", + Fam: "erlang vm", + Ctx: "vernemq.system_allocated_memory", + Type: module.Stacked, + Dims: Dims{ + {ID: metricVMMemoryProcesses, Name: "processes", Div: 1024}, + {ID: metricVMMemorySystem, Name: "system", Div: 1024}, + }, + } +) + +// Bandwidth +var ( + chartBandwidth = Chart{ + ID: "bandwidth", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "vernemq.bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: metricBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, + {ID: metricBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, + }, + } +) + +// Retain +var ( + chartRetainMessages = Chart{ + ID: "retain_messages", + Title: "Stored Retained Messages", + Units: "messages", + Fam: "retain", + Ctx: "vernemq.retain_messages", + Dims: Dims{ + {ID: metricRetainMessages, Name: "messages"}, + }, + } + chartRetainMemoryUsage = Chart{ + ID: "retain_memory", + Title: "Stored Retained Messages Memory Usage", + Units: "KiB", + Fam: "retain", + Ctx: "vernemq.retain_memory", + Type: module.Area, + Dims: Dims{ + {ID: metricRetainMemory, Name: "used", Div: 1024}, + }, + } +) + +// Cluster +var ( + chartClusterCommunicationBandwidth = Chart{ + ID: "cluster_bandwidth", + Title: "Communication with Other Cluster Nodes", + Units: "kilobits/s", + Fam: "cluster", + Ctx: "vernemq.cluster_bandwidth", + Type: module.Area, + Dims: Dims{ + {ID: metricClusterBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024}, + {ID: metricClusterBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024}, + }, + } + chartClusterCommunicationDropped = Chart{ + ID: "cluster_dropped", + Title: "Traffic Dropped During Communication with Other Cluster Nodes", + Units: "kilobits/s", + Fam: "cluster", + Type: module.Area, + Ctx: "vernemq.cluster_dropped", + Dims: Dims{ + {ID: metricClusterBytesDropped, Name: "dropped", Algo: module.Incremental, Mul: 8, Div: 1024}, + }, + } + chartNetSplitUnresolved = Chart{ + ID: "netsplit_unresolved", + Title: "Unresolved Netsplits", + Units: "netsplits", + Fam: "cluster", + Ctx: "vernemq.netsplit_unresolved", + Dims: Dims{ + {ID: "netsplit_unresolved", Name: "unresolved"}, + }, + } + chartNetSplits = Chart{ + ID: "netsplit", + Title: "Netsplits", + Units: "netsplits/s", + Fam: "cluster", + Ctx: "vernemq.netsplits", + Type: module.Stacked, + Dims: Dims{ + {ID: metricNetSplitResolved, Name: "resolved", Algo: module.Incremental}, + {ID: metricNetSplitDetected, Name: "detected", Algo: module.Incremental}, + }, + } +) + +// AUTH +var ( + chartMQTTv5AUTH = Chart{ + ID: "mqtt_auth", + Title: "v5 AUTH", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.mqtt_auth", + Dims: Dims{ + {ID: metricAUTHReceived, Name: "received", Algo: module.Incremental}, + {ID: metricAUTHSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5AUTHReceivedReason = Chart{ + ID: "mqtt_auth_received_reason", + Title: "v5 AUTH Received by Reason", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.mqtt_auth_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricAUTHReceived, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv5AUTHSentReason = Chart{ + ID: "mqtt_auth_sent_reason", + Title: "v5 AUTH Sent by Reason", + Units: "packets/s", + Fam: "mqtt auth", + Ctx: "vernemq.mqtt_auth_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricAUTHSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } +) + +// CONNECT +var ( + chartMQTTv3v5CONNECT = Chart{ + ID: "mqtt_connect", + Title: "v3/v5 CONNECT and CONNACK", + Units: "packets/s", + Fam: "mqtt connect", + Ctx: "vernemq.mqtt_connect", + Dims: Dims{ + {ID: metricCONNECTReceived, Name: "CONNECT", Algo: module.Incremental}, + {ID: metricCONNACKSent, Name: "CONNACK", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv3v5CONNACKSentReason = Chart{ + ID: "mqtt_connack_sent_reason", + Title: "v3/v5 CONNACK Sent by Reason", + Units: "packets/s", + Fam: "mqtt connect", + Ctx: "vernemq.mqtt_connack_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricCONNACKSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } +) + +// DISCONNECT +var ( + chartMQTTv3v5DISCONNECT = Chart{ + ID: "mqtt_disconnect", + Title: "v3/v5 DISCONNECT", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.mqtt_disconnect", + Dims: Dims{ + {ID: metricDISCONNECTReceived, Name: "received", Algo: module.Incremental}, + {ID: metricDISCONNECTSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5DISCONNECTReceivedReason = Chart{ + ID: "mqtt_disconnect_received_reason", + Title: "v5 DISCONNECT Received by Reason", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.mqtt_disconnect_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricDISCONNECTReceived, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental}, + }, + } + chartMQTTv5DISCONNECTSentReason = Chart{ + ID: "mqtt_disconnect_sent_reason", + Title: "v5 DISCONNECT Sent by Reason", + Units: "packets/s", + Fam: "mqtt disconnect", + Ctx: "vernemq.mqtt_disconnect_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricDISCONNECTSent, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental}, + }, + } +) + +// SUBSCRIBE +var ( + chartMQTTv3v5SUBSCRIBE = Chart{ + ID: "mqtt_subscribe", + Title: "v3/v5 SUBSCRIBE and SUBACK", + Units: "packets/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.mqtt_subscribe", + Dims: Dims{ + {ID: metricSUBSCRIBEReceived, Name: "SUBSCRIBE", Algo: module.Incremental}, + {ID: metricSUBACKSent, Name: "SUBACK", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv3v5SUBSCRIBEError = Chart{ + ID: "mqtt_subscribe_error", + Title: "v3/v5 Failed SUBSCRIBE Operations due to a Netsplit", + Units: "ops/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.mqtt_subscribe_error", + Dims: Dims{ + {ID: metricSUBSCRIBEError, Name: "failed", Algo: module.Incremental}, + }, + } + chartMQTTv3v5SUBSCRIBEAuthError = Chart{ + ID: "mqtt_subscribe_auth_error", + Title: "v3/v5 Unauthorized SUBSCRIBE Attempts", + Units: "attempts/s", + Fam: "mqtt subscribe", + Ctx: "vernemq.mqtt_subscribe_auth_error", + Dims: Dims{ + {ID: metricSUBSCRIBEAuthError, Name: "unauth", Algo: module.Incremental}, + }, + } +) + +// UNSUBSCRIBE +var ( + chartMQTTv3v5UNSUBSCRIBE = Chart{ + ID: "mqtt_unsubscribe", + Title: "v3/v5 UNSUBSCRIBE and UNSUBACK", + Units: "packets/s", + Fam: "mqtt unsubscribe", + Ctx: "vernemq.mqtt_unsubscribe", + Dims: Dims{ + {ID: metricUNSUBSCRIBEReceived, Name: "UNSUBSCRIBE", Algo: module.Incremental}, + {ID: metricUNSUBACKSent, Name: "UNSUBACK", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv3v5UNSUBSCRIBEError = Chart{ + ID: "mqtt_unsubscribe_error", + Title: "v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit", + Units: "ops/s", + Fam: "mqtt unsubscribe", + Ctx: "vernemq.mqtt_unsubscribe_error", + Dims: Dims{ + {ID: metricUNSUBSCRIBEError, Name: "failed", Algo: module.Incremental}, + }, + } +) + +// PUBLISH +var ( + chartMQTTv3v5PUBLISH = Chart{ + ID: "mqtt_publish", + Title: "v3/v5 QoS 0,1,2 PUBLISH", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_publish", + Dims: Dims{ + {ID: metricPUBSLISHReceived, Name: "received", Algo: module.Incremental}, + {ID: metricPUBSLIHSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv3v5PUBLISHErrors = Chart{ + ID: "mqtt_publish_errors", + Title: "v3/v5 Failed PUBLISH Operations due to a Netsplit", + Units: "ops/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_publish_errors", + Dims: Dims{ + {ID: metricPUBLISHError, Name: "failed", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBLISHAuthErrors = Chart{ + ID: "mqtt_publish_auth_errors", + Title: "v3/v5 Unauthorized PUBLISH Attempts", + Units: "attempts/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_publish_auth_errors", + Type: module.Area, + Dims: Dims{ + {ID: metricPUBLISHAuthError, Name: "unauth", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBACK = Chart{ + ID: "mqtt_puback", + Title: "v3/v5 QoS 1 PUBACK", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_puback", + Dims: Dims{ + {ID: metricPUBACKReceived, Name: "received", Algo: module.Incremental}, + {ID: metricPUBACKSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5PUBACKReceivedReason = Chart{ + ID: "mqtt_puback_received_reason", + Title: "v5 PUBACK QoS 1 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_puback_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBACKReceived, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv5PUBACKSentReason = Chart{ + ID: "mqtt_puback_sent_reason", + Title: "v5 PUBACK QoS 1 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_puback_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBACKSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBACKUnexpected = Chart{ + ID: "mqtt_puback_unexpected", + Title: "v3/v5 PUBACK QoS 1 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_puback_invalid_error", + Dims: Dims{ + {ID: metricPUBACKInvalid, Name: "unexpected", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBREC = Chart{ + ID: "mqtt_pubrec", + Title: "v3/v5 PUBREC QoS 2", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrec", + Dims: Dims{ + {ID: metricPUBRECReceived, Name: "received", Algo: module.Incremental}, + {ID: metricPUBRECSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5PUBRECReceivedReason = Chart{ + ID: "mqtt_pubrec_received_reason", + Title: "v5 PUBREC QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrec_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBRECReceived, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv5PUBRECSentReason = Chart{ + ID: "mqtt_pubrec_sent_reason", + Title: "v5 PUBREC QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrec_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBRECSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv3PUBRECUnexpected = Chart{ + ID: "mqtt_pubrec_unexpected", + Title: "v3 PUBREC QoS 2 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrec_invalid_error", + Dims: Dims{ + {ID: metricPUBRECInvalid, Name: "unexpected", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBREL = Chart{ + ID: "mqtt_pubrel", + Title: "v3/v5 PUBREL QoS 2", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrel", + Dims: Dims{ + {ID: metricPUBRELReceived, Name: "received", Algo: module.Incremental}, + {ID: metricPUBRELSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5PUBRELReceivedReason = Chart{ + ID: "mqtt_pubrel_received_reason", + Title: "v5 PUBREL QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrel_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBRELReceived, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv5PUBRELSentReason = Chart{ + ID: "mqtt_pubrel_sent_reason", + Title: "v5 PUBREL QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubrel_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBRELSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBCOMP = Chart{ + ID: "mqtt_pubcomp", + Title: "v3/v5 PUBCOMP QoS 2", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubcom", + Dims: Dims{ + {ID: metricPUBCOMPReceived, Name: "received", Algo: module.Incremental}, + {ID: metricPUBCOMPSent, Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + chartMQTTv5PUBCOMPReceivedReason = Chart{ + ID: "mqtt_pubcomp_received_reason", + Title: "v5 PUBCOMP QoS 2 Received by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubcomp_received_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBCOMPReceived, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv5PUBCOMPSentReason = Chart{ + ID: "mqtt_pubcomp_sent_reason", + Title: "v5 PUBCOMP QoS 2 Sent by Reason", + Units: "packets/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubcomp_sent_reason", + Type: module.Stacked, + Dims: Dims{ + {ID: join(metricPUBCOMPSent, "success"), Name: "success", Algo: module.Incremental}, + }, + } + chartMQTTv3v5PUBCOMPUnexpected = Chart{ + ID: "mqtt_pubcomp_unexpected", + Title: "v3/v5 PUBCOMP QoS 2 Received Unexpected Messages", + Units: "messages/s", + Fam: "mqtt publish", + Ctx: "vernemq.mqtt_pubcomp_invalid_error", + Dims: Dims{ + {ID: metricPUNCOMPInvalid, Name: "unexpected", Algo: module.Incremental}, + }, + } +) + +// PING +var ( + chartMQTTv3v5PING = Chart{ + ID: "mqtt_ping", + Title: "v3/v5 PING", + Units: "packets/s", + Fam: "mqtt ping", + Ctx: "vernemq.mqtt_ping", + Dims: Dims{ + {ID: metricPINGREQReceived, Name: "PINGREQ", Algo: module.Incremental}, + {ID: metricPINGRESPSent, Name: "PINGRESP", Algo: module.Incremental, Mul: -1}, + }, + } +) + +var ( + chartUptime = Chart{ + ID: "node_uptime", + Title: "Node Uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "vernemq.node_uptime", + Dims: Dims{ + {ID: metricSystemWallClock, Name: "time", Div: 1000}, + }, + } +) + +func (v *VerneMQ) notifyNewScheduler(name string) { + if v.cache.hasP(name) { + return + } + + id := chartSchedulerUtilization.ID + num := name[len("system_utilization_scheduler_"):] + + v.addAbsDimToChart(id, name, num) +} + +func (v *VerneMQ) notifyNewReason(name, reason string) { + if reason == "success" || reason == "normal_disconnect" { + return + } + key := join(name, reason) + if v.cache.hasP(key) { + return + } + + var chart Chart + switch name { + case metricAUTHReceived: + chart = chartMQTTv5AUTHReceivedReason + case metricAUTHSent: + chart = chartMQTTv5AUTHSentReason + case metricCONNACKSent: + chart = chartMQTTv3v5CONNACKSentReason + case metricDISCONNECTReceived: + chart = chartMQTTv5DISCONNECTReceivedReason + case metricDISCONNECTSent: + chart = chartMQTTv5DISCONNECTSentReason + case metricPUBACKReceived: + chart = chartMQTTv5PUBACKReceivedReason + case metricPUBACKSent: + chart = chartMQTTv5PUBACKSentReason + case metricPUBRECReceived: + chart = chartMQTTv5PUBRECReceivedReason + case metricPUBRECSent: + chart = chartMQTTv5PUBRECSentReason + case metricPUBRELReceived: + chart = chartMQTTv5PUBRELReceivedReason + case metricPUBRELSent: + chart = chartMQTTv5PUBRELSentReason + case metricPUBCOMPReceived: + chart = chartMQTTv5PUBCOMPReceivedReason + case metricPUBCOMPSent: + chart = chartMQTTv5PUBCOMPSentReason + default: + v.Warningf("unknown metric name, wont be added to the charts: '%s'", name) + return + } + + v.addIncDimToChart(chart.ID, key, reason) +} + +func (v *VerneMQ) addAbsDimToChart(chartID, dimID, dimName string) { + v.addDimToChart(chartID, dimID, dimName, false) +} + +func (v *VerneMQ) addIncDimToChart(chartID, dimID, dimName string) { + v.addDimToChart(chartID, dimID, dimName, true) +} + +func (v *VerneMQ) addDimToChart(chartID, dimID, dimName string, inc bool) { + chart := v.Charts().Get(chartID) + if chart == nil { + v.Warningf("add '%s' dim: couldn't find '%s' chart", dimID, chartID) + return + } + + dim := &Dim{ID: dimID, Name: dimName} + if inc { + dim.Algo = module.Incremental + } + + if err := chart.AddDim(dim); err != nil { + v.Warningf("add '%s' dim: %v", dimID, err) + return + } + chart.MarkNotCreated() +} diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/collect.go b/src/go/collectors/go.d.plugin/modules/vernemq/collect.go new file mode 100644 index 00000000000000..999be3e5fdb8e9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/collect.go @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import ( + "errors" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +func isValidVerneMQMetrics(pms prometheus.Series) bool { + return pms.FindByName(metricPUBLISHError).Len() > 0 && pms.FindByName(metricRouterSubscriptions).Len() > 0 +} + +func (v *VerneMQ) collect() (map[string]int64, error) { + pms, err := v.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + if !isValidVerneMQMetrics(pms) { + return nil, errors.New("returned metrics aren't VerneMQ metrics") + } + + mx := v.collectVerneMQ(pms) + + return stm.ToMap(mx), nil +} + +func (v *VerneMQ) collectVerneMQ(pms prometheus.Series) map[string]float64 { + mx := make(map[string]float64) + collectSockets(mx, pms) + collectQueues(mx, pms) + collectSubscriptions(mx, pms) + v.collectErlangVM(mx, pms) + collectBandwidth(mx, pms) + collectRetain(mx, pms) + collectCluster(mx, pms) + collectUptime(mx, pms) + + v.collectAUTH(mx, pms) + v.collectCONNECT(mx, pms) + v.collectDISCONNECT(mx, pms) + v.collectSUBSCRIBE(mx, pms) + v.collectUNSUBSCRIBE(mx, pms) + v.collectPUBLISH(mx, pms) + v.collectPING(mx, pms) + v.collectMQTTInvalidMsgSize(mx, pms) + return mx +} + +func (v *VerneMQ) collectCONNECT(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricCONNECTReceived, + metricCONNACKSent, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectDISCONNECT(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricDISCONNECTReceived, + metricDISCONNECTSent, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectPUBLISH(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricPUBACKReceived, + metricPUBACKSent, + metricPUBACKInvalid, + + metricPUBCOMPReceived, + metricPUBCOMPSent, + metricPUNCOMPInvalid, + + metricPUBSLISHReceived, + metricPUBSLIHSent, + metricPUBLISHError, + metricPUBLISHAuthError, + + metricPUBRECReceived, + metricPUBRECSent, + metricPUBRECInvalid, + + metricPUBRELReceived, + metricPUBRELSent, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectSUBSCRIBE(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricSUBSCRIBEReceived, + metricSUBACKSent, + metricSUBSCRIBEError, + metricSUBSCRIBEAuthError, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectUNSUBSCRIBE(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricUNSUBSCRIBEReceived, + metricUNSUBACKSent, + metricUNSUBSCRIBEError, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectPING(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricPINGREQReceived, + metricPINGRESPSent, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectAUTH(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricAUTHReceived, + metricAUTHSent, + ) + v.collectMQTT(mx, pms) +} + +func (v *VerneMQ) collectMQTTInvalidMsgSize(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByName(metricMQTTInvalidMsgSizeError) + v.collectMQTT(mx, pms) +} + +func collectSockets(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricSocketClose, + metricSocketCloseTimeout, + metricSocketError, + metricSocketOpen, + metricClientKeepaliveExpired, + ) + collectNonMQTT(mx, pms) + mx["open_sockets"] = mx[metricSocketOpen] - mx[metricSocketClose] +} + +func collectQueues(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricQueueInitializedFromStorage, + metricQueueMessageDrop, + metricQueueMessageExpired, + metricQueueMessageIn, + metricQueueMessageOut, + metricQueueMessageUnhandled, + metricQueueProcesses, + metricQueueSetup, + metricQueueTeardown, + ) + collectNonMQTT(mx, pms) +} + +func collectSubscriptions(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricRouterMatchesLocal, + metricRouterMatchesRemote, + metricRouterMemory, + metricRouterSubscriptions, + ) + collectNonMQTT(mx, pms) +} + +func (v *VerneMQ) collectErlangVM(mx map[string]float64, pms prometheus.Series) { + v.collectSchedulersUtilization(mx, pms) + pms = pms.FindByNames( + metricSystemContextSwitches, + metricSystemGCCount, + metricSystemIOIn, + metricSystemIOOut, + metricSystemProcessCount, + metricSystemReductions, + metricSystemRunQueue, + metricSystemUtilization, + metricSystemWordsReclaimedByGC, + metricVMMemoryProcesses, + metricVMMemorySystem, + ) + collectNonMQTT(mx, pms) +} + +func (v *VerneMQ) collectSchedulersUtilization(mx map[string]float64, pms prometheus.Series) { + for _, pm := range pms { + if isSchedulerUtilizationMetric(pm) { + mx[pm.Name()] += pm.Value + v.notifyNewScheduler(pm.Name()) + } + } +} + +func collectBandwidth(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricBytesReceived, + metricBytesSent, + ) + collectNonMQTT(mx, pms) +} + +func collectRetain(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricRetainMemory, + metricRetainMessages, + ) + collectNonMQTT(mx, pms) +} + +func collectCluster(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByNames( + metricClusterBytesDropped, + metricClusterBytesReceived, + metricClusterBytesSent, + metricNetSplitDetected, + metricNetSplitResolved, + ) + collectNonMQTT(mx, pms) + mx["netsplit_unresolved"] = mx[metricNetSplitDetected] - mx[metricNetSplitResolved] +} + +func collectUptime(mx map[string]float64, pms prometheus.Series) { + pms = pms.FindByName(metricSystemWallClock) + collectNonMQTT(mx, pms) +} + +func collectNonMQTT(mx map[string]float64, pms prometheus.Series) { + for _, pm := range pms { + mx[pm.Name()] += pm.Value + } +} + +func (v *VerneMQ) collectMQTT(mx map[string]float64, pms prometheus.Series) { + for _, pm := range pms { + if !isMQTTMetric(pm) { + continue + } + version := versionLabelValue(pm) + if version == "" { + continue + } + + mx[pm.Name()] += pm.Value + mx[join(pm.Name(), "v", version)] += pm.Value + + if reason := reasonCodeLabelValue(pm); reason != "" { + mx[join(pm.Name(), reason)] += pm.Value + mx[join(pm.Name(), "v", version, reason)] += pm.Value + + v.notifyNewReason(pm.Name(), reason) + } + } +} + +func isMQTTMetric(pm prometheus.SeriesSample) bool { + return strings.HasPrefix(pm.Name(), "mqtt_") +} + +func isSchedulerUtilizationMetric(pm prometheus.SeriesSample) bool { + return strings.HasPrefix(pm.Name(), "system_utilization_scheduler_") +} + +func reasonCodeLabelValue(pm prometheus.SeriesSample) string { + if v := pm.Labels.Get("reason_code"); v != "" { + return v + } + // "mqtt_connack_sent" v4 has return_code + return pm.Labels.Get("return_code") +} + +func versionLabelValue(pm prometheus.SeriesSample) string { + return pm.Labels.Get("mqtt_version") +} + +func join(a, b string, rest ...string) string { + v := a + "_" + b + switch len(rest) { + case 0: + return v + default: + return join(v, rest[0], rest[1:]...) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/config_schema.json b/src/go/collectors/go.d.plugin/modules/vernemq/config_schema.json new file mode 100644 index 00000000000000..f21bab4515317f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vernemq job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/integrations/vernemq.md b/src/go/collectors/go.d.plugin/modules/vernemq/integrations/vernemq.md new file mode 100644 index 00000000000000..af6b24a3290aeb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/integrations/vernemq.md @@ -0,0 +1,297 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/vernemq/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/vernemq/metadata.yaml" +sidebar_label: "VerneMQ" +learn_status: "Published" +learn_rel_path: "Data Collection/Message Brokers" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# VerneMQ + + +<img src="https://netdata.cloud/img/vernemq.svg" width="150"/> + + +Plugin: go.d.plugin +Module: vernemq + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors VerneMQ instances. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per VerneMQ instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| vernemq.sockets | open | sockets | +| vernemq.socket_operations | open, close | sockets/s | +| vernemq.client_keepalive_expired | closed | sockets/s | +| vernemq.socket_close_timeout | closed | sockets/s | +| vernemq.socket_errors | errors | errors/s | +| vernemq.queue_processes | queue_processes | queue processes | +| vernemq.queue_processes_operations | setup, teardown | events/s | +| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s | +| vernemq.queue_messages | received, sent | messages/s | +| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s | +| vernemq.router_subscriptions | subscriptions | subscriptions | +| vernemq.router_matched_subscriptions | local, remote | subscriptions/s | +| vernemq.router_memory | used | KiB | +| vernemq.average_scheduler_utilization | utilization | percentage | +| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage | +| vernemq.system_processes | processes | processes | +| vernemq.system_reductions | reductions | ops/s | +| vernemq.system_context_switches | context_switches | ops/s | +| vernemq.system_io | received, sent | kilobits/s | +| vernemq.system_run_queue | ready | processes | +| vernemq.system_gc_count | gc | ops/s | +| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s | +| vernemq.system_allocated_memory | processes, system | KiB | +| vernemq.bandwidth | received, sent | kilobits/s | +| vernemq.retain_messages | messages | messages | +| vernemq.retain_memory | used | KiB | +| vernemq.cluster_bandwidth | received, sent | kilobits/s | +| vernemq.cluster_dropped | dropped | kilobits/s | +| vernemq.netsplit_unresolved | unresolved | netsplits | +| vernemq.netsplits | resolved, detected | netsplits/s | +| vernemq.mqtt_auth | received, sent | packets/s | +| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_connect | connect, connack | packets/s | +| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_disconnect | received, sent | packets/s | +| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_subscribe | subscribe, suback | packets/s | +| vernemq.mqtt_subscribe_error | failed | ops/s | +| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s | +| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s | +| vernemq.mqtt_unsubscribe | mqtt_unsubscribe_error | ops/s | +| vernemq.mqtt_publish | received, sent | packets/s | +| vernemq.mqtt_publish_errors | failed | ops/s | +| vernemq.mqtt_publish_auth_errors | unauth | attempts/s | +| vernemq.mqtt_puback | received, sent | packets/s | +| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_puback_invalid_error | unexpected | messages/s | +| vernemq.mqtt_pubrec | received, sent | packets/s | +| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s | +| vernemq.mqtt_pubrel | received, sent | packets/s | +| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubcom | received, sent | packets/s | +| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s | +| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s | +| vernemq.mqtt_ping | pingreq, pingresp | packets/s | +| vernemq.node_uptime | time | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute | +| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute | +| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute | +| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute | +| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes | +| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute | +| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute | +| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute | +| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute | +| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute | +| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute | +| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute | +| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute | +| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute | +| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute | +| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute | +| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute | +| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute | +| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute | +| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute | +| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute | +| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute | +| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute | +| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/vernemq.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/vernemq.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | http://127.0.0.1:8888/metrics | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +An example configuration. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8888/metrics + +``` +</details> + +##### HTTP authentication + +Local instance with basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8888/metrics + username: username + password: password + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + url: http://127.0.0.1:8888/metrics + + - name: remote + url: http://203.0.113.10:8888/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m vernemq + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/metadata.yaml b/src/go/collectors/go.d.plugin/modules/vernemq/metadata.yaml new file mode 100644 index 00000000000000..e9f62aa9aef4ce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/metadata.yaml @@ -0,0 +1,670 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-vernemq + plugin_name: go.d.plugin + module_name: vernemq + monitored_instance: + name: VerneMQ + link: https://vernemq.com + icon_filename: vernemq.svg + categories: + - data-collection.message-brokers + keywords: + - vernemq + - message brokers + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors VerneMQ instances. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/vernemq.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: http://127.0.0.1:8888/metrics + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: GET + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: An example configuration. + config: | + jobs: + - name: local + url: http://127.0.0.1:8888/metrics + - name: HTTP authentication + description: Local instance with basic HTTP authentication. + config: | + jobs: + - name: local + url: http://127.0.0.1:8888/metrics + username: username + password: password + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Local and remote instances. + config: | + jobs: + - name: local + url: http://127.0.0.1:8888/metrics + + - name: remote + url: http://203.0.113.10:8888/metrics + troubleshooting: + problems: + list: [] + alerts: + - name: vernemq_socket_errors + metric: vernemq.socket_errors + info: number of socket errors in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_queue_message_drop + metric: vernemq.queue_undelivered_messages + info: number of dropped messaged due to full queues in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_queue_message_expired + metric: vernemq.queue_undelivered_messages + info: number of messages which expired before delivery in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_queue_message_unhandled + metric: vernemq.queue_undelivered_messages + info: "number of unhandled messages (connections with clean session=true) in the last minute" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_average_scheduler_utilization + metric: vernemq.average_scheduler_utilization + info: average scheduler utilization over the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_cluster_dropped + metric: vernemq.cluster_dropped + info: amount of traffic dropped during communication with the cluster nodes in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_netsplits + metric: vvernemq.netsplits + info: "number of detected netsplits (split brain situation) in the last minute" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_connack_sent_reason_unsuccessful + metric: vernemq.mqtt_connack_sent_reason + info: number of sent unsuccessful v3/v5 CONNACK packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_disconnect_received_reason_not_normal + metric: vernemq.mqtt_disconnect_received_reason + info: number of received not normal v5 DISCONNECT packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_disconnect_sent_reason_not_normal + metric: vernemq.mqtt_disconnect_sent_reason + info: number of sent not normal v5 DISCONNECT packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_subscribe_error + metric: vernemq.mqtt_subscribe_error + info: number of failed v3/v5 SUBSCRIBE operations in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_subscribe_auth_error + metric: vernemq.mqtt_subscribe_auth_error + info: number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_unsubscribe_error + metric: vernemq.mqtt_unsubscribe_error + info: number of failed v3/v5 UNSUBSCRIBE operations in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_publish_errors + metric: vernemq.mqtt_publish_errors + info: number of failed v3/v5 PUBLISH operations in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_publish_auth_errors + metric: vernemq.mqtt_publish_auth_errors + info: number of unauthorized v3/v5 PUBLISH attempts in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_puback_received_reason_unsuccessful + metric: vernemq.mqtt_puback_received_reason + info: number of received unsuccessful v5 PUBACK packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_puback_sent_reason_unsuccessful + metric: vernemq.mqtt_puback_sent_reason + info: number of sent unsuccessful v5 PUBACK packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_puback_unexpected + metric: vernemq.mqtt_puback_invalid_error + info: number of received unexpected v3/v5 PUBACK packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubrec_received_reason_unsuccessful + metric: vernemq.mqtt_pubrec_received_reason + info: number of received unsuccessful v5 PUBREC packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubrec_sent_reason_unsuccessful + metric: vernemq.mqtt_pubrec_sent_reason + info: number of sent unsuccessful v5 PUBREC packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubrec_invalid_error + metric: vernemq.mqtt_pubrec_invalid_error + info: number of received unexpected v3 PUBREC packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubrel_received_reason_unsuccessful + metric: vernemq.mqtt_pubrel_received_reason + info: number of received unsuccessful v5 PUBREL packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubrel_sent_reason_unsuccessful + metric: vernemq.mqtt_pubrel_sent_reason + info: number of sent unsuccessful v5 PUBREL packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubcomp_received_reason_unsuccessful + metric: vernemq.mqtt_pubcomp_received_reason + info: number of received unsuccessful v5 PUBCOMP packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubcomp_sent_reason_unsuccessful + metric: vernemq.mqtt_pubcomp_sent_reason + info: number of sent unsuccessful v5 PUBCOMP packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + - name: vernemq_mqtt_pubcomp_unexpected + metric: vernemq.mqtt_pubcomp_invalid_error + info: number of received unexpected v3/v5 PUBCOMP packets in the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: vernemq.sockets + description: Open Sockets + unit: sockets + chart_type: line + dimensions: + - name: open + - name: vernemq.socket_operations + description: Socket Open and Close Events + unit: sockets/s + chart_type: line + dimensions: + - name: open + - name: close + - name: vernemq.client_keepalive_expired + description: Closed Sockets due to Keepalive Time Expired + unit: sockets/s + chart_type: line + dimensions: + - name: closed + - name: vernemq.socket_close_timeout + description: Closed Sockets due to no CONNECT Frame On Time + unit: sockets/s + chart_type: line + dimensions: + - name: closed + - name: vernemq.socket_errors + description: Socket Errors + unit: errors/s + chart_type: line + dimensions: + - name: errors + - name: vernemq.queue_processes + description: Living Queues in an Online or an Offline State + unit: queue processes + chart_type: line + dimensions: + - name: queue_processes + - name: vernemq.queue_processes_operations + description: Queue Processes Setup and Teardown Events + unit: events/s + chart_type: line + dimensions: + - name: setup + - name: teardown + - name: vernemq.queue_process_init_from_storage + description: Queue Processes Initialized from Offline Storage + unit: queue processes/s + chart_type: line + dimensions: + - name: queue_processes + - name: vernemq.queue_messages + description: Received and Sent PUBLISH Messages + unit: messages/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: vernemq.queue_undelivered_messages + description: Undelivered PUBLISH Messages + unit: messages/s + chart_type: stacked + dimensions: + - name: dropped + - name: expired + - name: unhandled + - name: vernemq.router_subscriptions + description: Subscriptions in the Routing Table + unit: subscriptions + chart_type: line + dimensions: + - name: subscriptions + - name: vernemq.router_matched_subscriptions + description: Matched Subscriptions + unit: subscriptions/s + chart_type: line + dimensions: + - name: local + - name: remote + - name: vernemq.router_memory + description: Routing Table Memory Usage + unit: KiB + chart_type: area + dimensions: + - name: used + - name: vernemq.average_scheduler_utilization + description: Average Scheduler Utilization + unit: percentage + chart_type: area + dimensions: + - name: utilization + - name: vernemq.system_utilization_scheduler + description: Scheduler Utilization + unit: percentage + chart_type: stacked + dimensions: + - name: a dimension per scheduler + - name: vernemq.system_processes + description: Erlang Processes + unit: processes + chart_type: line + dimensions: + - name: processes + - name: vernemq.system_reductions + description: Reductions + unit: ops/s + chart_type: line + dimensions: + - name: reductions + - name: vernemq.system_context_switches + description: Context Switches + unit: ops/s + chart_type: line + dimensions: + - name: context_switches + - name: vernemq.system_io + description: Received and Sent Traffic through Ports + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: vernemq.system_run_queue + description: Processes that are Ready to Run on All Run-Queues + unit: processes + chart_type: line + dimensions: + - name: ready + - name: vernemq.system_gc_count + description: GC Count + unit: ops/s + chart_type: line + dimensions: + - name: gc + - name: vernemq.system_gc_words_reclaimed + description: GC Words Reclaimed + unit: ops/s + chart_type: line + dimensions: + - name: words_reclaimed + - name: vernemq.system_allocated_memory + description: Memory Allocated by the Erlang Processes and by the Emulator + unit: KiB + chart_type: stacked + dimensions: + - name: processes + - name: system + - name: vernemq.bandwidth + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: vernemq.retain_messages + description: Stored Retained Messages + unit: messages + chart_type: line + dimensions: + - name: messages + - name: vernemq.retain_memory + description: Stored Retained Messages Memory Usage + unit: KiB + chart_type: area + dimensions: + - name: used + - name: vernemq.cluster_bandwidth + description: Communication with Other Cluster Nodes + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: vernemq.cluster_dropped + description: Traffic Dropped During Communication with Other Cluster Nodes + unit: kilobits/s + chart_type: area + dimensions: + - name: dropped + - name: vernemq.netsplit_unresolved + description: Unresolved Netsplits + unit: netsplits + chart_type: line + dimensions: + - name: unresolved + - name: vernemq.netsplits + description: Netsplits + unit: netsplits/s + chart_type: stacked + dimensions: + - name: resolved + - name: detected + - name: vernemq.mqtt_auth + description: v5 AUTH + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_auth_received_reason + description: v5 AUTH Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_auth_sent_reason + description: v5 AUTH Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_connect + description: v3/v5 CONNECT and CONNACK + unit: packets/s + chart_type: line + dimensions: + - name: connect + - name: connack + - name: vernemq.mqtt_connack_sent_reason + description: v3/v5 CONNACK Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_disconnect + description: v3/v5 DISCONNECT + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_disconnect_received_reason + description: v5 DISCONNECT Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_disconnect_sent_reason + description: v5 DISCONNECT Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_subscribe + description: v3/v5 SUBSCRIBE and SUBACK + unit: packets/s + chart_type: line + dimensions: + - name: subscribe + - name: suback + - name: vernemq.mqtt_subscribe_error + description: v3/v5 Failed SUBSCRIBE Operations due to a Netsplit + unit: ops/s + chart_type: line + dimensions: + - name: failed + - name: vernemq.mqtt_subscribe_auth_error + description: v3/v5 Unauthorized SUBSCRIBE Attempts + unit: attempts/s + chart_type: line + dimensions: + - name: unauth + - name: vernemq.mqtt_unsubscribe + description: v3/v5 UNSUBSCRIBE and UNSUBACK + unit: packets/s + chart_type: line + dimensions: + - name: unsubscribe + - name: unsuback + - name: vernemq.mqtt_unsubscribe + description: v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit + unit: ops/s + chart_type: line + dimensions: + - name: mqtt_unsubscribe_error + - name: vernemq.mqtt_publish + description: v3/v5 QoS 0,1,2 PUBLISH + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_publish_errors + description: v3/v5 Failed PUBLISH Operations due to a Netsplit + unit: ops/s + chart_type: line + dimensions: + - name: failed + - name: vernemq.mqtt_publish_auth_errors + description: v3/v5 Unauthorized PUBLISH Attempts + unit: attempts/s + chart_type: area + dimensions: + - name: unauth + - name: vernemq.mqtt_puback + description: v3/v5 QoS 1 PUBACK + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_puback_received_reason + description: v5 PUBACK QoS 1 Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_puback_sent_reason + description: v5 PUBACK QoS 1 Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_puback_invalid_error + description: v3/v5 PUBACK QoS 1 Received Unexpected Messages + unit: messages/s + chart_type: line + dimensions: + - name: unexpected + - name: vernemq.mqtt_pubrec + description: v3/v5 PUBREC QoS 2 + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_pubrec_received_reason + description: v5 PUBREC QoS 2 Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubrec_sent_reason + description: v5 PUBREC QoS 2 Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubrec_invalid_error + description: v3 PUBREC QoS 2 Received Unexpected Messages + unit: messages/s + chart_type: line + dimensions: + - name: unexpected + - name: vernemq.mqtt_pubrel + description: v3/v5 PUBREL QoS 2 + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_pubrel_received_reason + description: v5 PUBREL QoS 2 Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubrel_sent_reason + description: v5 PUBREL QoS 2 Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubcom + description: v3/v5 PUBCOMP QoS 2 + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vernemq.mqtt_pubcomp_received_reason + description: v5 PUBCOMP QoS 2 Received by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubcomp_sent_reason + description: v5 PUBCOMP QoS 2 Sent by Reason + unit: packets/s + chart_type: stacked + dimensions: + - name: a dimensions per reason + - name: vernemq.mqtt_pubcomp_invalid_error + description: v3/v5 PUBCOMP QoS 2 Received Unexpected Messages + unit: messages/s + chart_type: line + dimensions: + - name: unexpected + - name: vernemq.mqtt_ping + description: v3/v5 PING + unit: packets/s + chart_type: line + dimensions: + - name: pingreq + - name: pingresp + - name: vernemq.node_uptime + description: Node Uptime + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/metrics.go b/src/go/collectors/go.d.plugin/modules/vernemq/metrics.go new file mode 100644 index 00000000000000..863cc6355da77d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/metrics.go @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +// Source Code Metrics: +// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.erl +// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.hrl + +// Source Code FSM: +// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt_fsm.erl +// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt5_fsm.erl + +// MQTT Packet Types: +// - v4: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/errata01/os/mqtt-v3.1.1-errata01-os-complete.html#_Toc442180834 +// - v5: https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901019 + +// Erlang VM: +// - http://erlang.org/documentation/doc-5.7.1/erts-5.7.1/doc/html/erlang.html + +// Not used metrics (https://docs.vernemq.com/monitoring/introduction): +// - "mqtt_connack_accepted_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "mqtt_connack_unacceptable_protocol_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "mqtt_connack_identifier_rejected_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "mqtt_connack_server_unavailable_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "mqtt_connack_bad_credentials_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "mqtt_connack_not_authorized_sent" // v4, not populated, "mqtt_connack_sent" used instead +// - "system_exact_reductions" +// - "system_runtime" +// - "vm_memory_atom" +// - "vm_memory_atom_used" +// - "vm_memory_binary" +// - "vm_memory_code" +// - "vm_memory_ets" +// - "vm_memory_processes_used" +// - "vm_memory_total" + +// -----------------------------------------------MQTT------------------------------------------------------------------ +const ( + // AUTH + metricAUTHReceived = "mqtt_auth_received" // v5 has 'reason_code' label + metricAUTHSent = "mqtt_auth_sent" // v5 has 'reason_code' label + + // CONNECT + metricCONNECTReceived = "mqtt_connect_received" // v4, v5 + metricCONNACKSent = "mqtt_connack_sent" // v4 has 'return_code' label, v5 has 'reason_code' + + // SUBSCRIBE + metricSUBSCRIBEReceived = "mqtt_subscribe_received" // v4, v5 + metricSUBACKSent = "mqtt_suback_sent" // v4, v5 + metricSUBSCRIBEError = "mqtt_subscribe_error" // v4, v5 + metricSUBSCRIBEAuthError = "mqtt_subscribe_auth_error" // v4, v5 + + // UNSUBSCRIBE + metricUNSUBSCRIBEReceived = "mqtt_unsubscribe_received" // v4, v5 + metricUNSUBACKSent = "mqtt_unsuback_sent" // v4, v5 + metricUNSUBSCRIBEError = "mqtt_unsubscribe_error" // v4, v5 + + // PUBLISH + metricPUBSLISHReceived = "mqtt_publish_received" // v4, v5 + metricPUBSLIHSent = "mqtt_publish_sent" // v4, v5 + metricPUBLISHError = "mqtt_publish_error" // v4, v5 + metricPUBLISHAuthError = "mqtt_publish_auth_error" // v4, v5 + + // Publish acknowledgment (QoS 1) + metricPUBACKReceived = "mqtt_puback_received" // v4, v5 has 'reason_code' label + metricPUBACKSent = "mqtt_puback_sent" // v4, v5 has 'reason_code' label + metricPUBACKInvalid = "mqtt_puback_invalid_error" // v4, v5 + + // Publish received (QoS 2 delivery part 1) + metricPUBRECReceived = "mqtt_pubrec_received" // v4, v5 has 'reason_code' label + metricPUBRECSent = "mqtt_pubrec_sent" // v4, v5 has 'reason_code' label + metricPUBRECInvalid = "mqtt_pubrec_invalid_error" // v4 + + // Publish release (QoS 2 delivery part 2) + metricPUBRELReceived = "mqtt_pubrel_received" // v4, v5 has 'reason_code' label + metricPUBRELSent = "mqtt_pubrel_sent" // v4, v5 has 'reason_code' label + + // Publish complete (QoS 2 delivery part 3) + metricPUBCOMPReceived = "mqtt_pubcomp_received" // v4, v5 has 'reason_code' label + metricPUBCOMPSent = "mqtt_pubcomp_sent" // v4, v5 has 'reason_code' label + metricPUNCOMPInvalid = "mqtt_pubcomp_invalid_error" // v4, v5 + + // PING + metricPINGREQReceived = "mqtt_pingreq_received" // v4, v5 + metricPINGRESPSent = "mqtt_pingresp_sent" // v4, v5 + + // DISCONNECT + metricDISCONNECTReceived = "mqtt_disconnect_received" // v4, v5 has 'reason_code' label + metricDISCONNECTSent = "mqtt_disconnect_sent" // v5 has 'reason_code' label + + // Misc + metricMQTTInvalidMsgSizeError = "mqtt_invalid_msg_size_error" // v4, v5 +) + +const ( + // Sockets + metricSocketOpen = "socket_open" + metricSocketClose = "socket_close" + metricSocketError = "socket_error" + metricSocketCloseTimeout = "socket_close_timeout" + metricClientKeepaliveExpired = "client_keepalive_expired" // v4, v5 + + // Queues + metricQueueProcesses = "queue_processes" + metricQueueSetup = "queue_setup" + metricQueueTeardown = "queue_teardown" + metricQueueMessageIn = "queue_message_in" + metricQueueMessageOut = "queue_message_out" + metricQueueMessageDrop = "queue_message_drop" + metricQueueMessageExpired = "queue_message_expired" + metricQueueMessageUnhandled = "queue_message_unhandled" + metricQueueInitializedFromStorage = "queue_initialized_from_storage" + + // Subscriptions + metricRouterMatchesLocal = "router_matches_local" + metricRouterMatchesRemote = "router_matches_remote" + metricRouterMemory = "router_memory" + metricRouterSubscriptions = "router_subscriptions" + + // Erlang VM + metricSystemUtilization = "system_utilization" + metricSystemProcessCount = "system_process_count" + metricSystemReductions = "system_reductions" + metricSystemContextSwitches = "system_context_switches" + metricSystemIOIn = "system_io_in" + metricSystemIOOut = "system_io_out" + metricSystemRunQueue = "system_run_queue" + metricSystemGCCount = "system_gc_count" + metricSystemWordsReclaimedByGC = "system_words_reclaimed_by_gc" + metricVMMemoryProcesses = "vm_memory_processes" + metricVMMemorySystem = "vm_memory_system" + + // Bandwidth + metricBytesReceived = "bytes_received" + metricBytesSent = "bytes_sent" + + // Retain + metricRetainMemory = "retain_memory" + metricRetainMessages = "retain_messages" + + // Cluster + metricClusterBytesDropped = "cluster_bytes_dropped" + metricClusterBytesReceived = "cluster_bytes_received" + metricClusterBytesSent = "cluster_bytes_sent" + metricNetSplitDetected = "netsplit_detected" + metricNetSplitResolved = "netsplit_resolved" + + // Uptime + metricSystemWallClock = "system_wallclock" +) diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt b/src/go/collectors/go.d.plugin/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt new file mode 100644 index 00000000000000..2e98a3e94134f6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt @@ -0,0 +1,416 @@ +# HELP socket_open The number of times an MQTT socket has been opened. +# TYPE socket_open counter +socket_open{node="VerneMQ@172.17.0.2"} 338956 +# HELP socket_close The number of times an MQTT socket has been closed. +# TYPE socket_close counter +socket_close{node="VerneMQ@172.17.0.2"} 338956 +# HELP socket_close_timeout The number of times VerneMQ closed an MQTT socket due to no CONNECT frame has been received on time. +# TYPE socket_close_timeout counter +socket_close_timeout{node="VerneMQ@172.17.0.2"} 0 +# HELP socket_error The total number of socket errors that have occurred. +# TYPE socket_error counter +socket_error{node="VerneMQ@172.17.0.2"} 0 +# HELP bytes_received The total number of bytes received. +# TYPE bytes_received counter +bytes_received{node="VerneMQ@172.17.0.2"} 36796908 +# HELP bytes_sent The total number of bytes sent. +# TYPE bytes_sent counter +bytes_sent{node="VerneMQ@172.17.0.2"} 23361693 +# HELP mqtt_connect_received The number of CONNECT packets received. +# TYPE mqtt_connect_received counter +mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 338956 +# HELP mqtt_publish_received The number of PUBLISH packets received. +# TYPE mqtt_publish_received counter +mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537088 +# HELP mqtt_puback_received The number of PUBACK packets received. +# TYPE mqtt_puback_received counter +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525694 +# HELP mqtt_pubrec_received The number of PUBREC packets received. +# TYPE mqtt_pubrec_received counter +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrel_received The number of PUBREL packets received. +# TYPE mqtt_pubrel_received counter +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_received The number of PUBCOMP packets received. +# TYPE mqtt_pubcomp_received counter +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_received The number of SUBSCRIBE packets received. +# TYPE mqtt_subscribe_received counter +mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 +# HELP mqtt_unsubscribe_received The number of UNSUBSCRIBE packets received. +# TYPE mqtt_unsubscribe_received counter +mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 +# HELP mqtt_pingreq_received The number of PINGREQ packets received. +# TYPE mqtt_pingreq_received counter +mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 +# HELP mqtt_disconnect_received The number of DISCONNECT packets received. +# TYPE mqtt_disconnect_received counter +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 107 +# HELP mqtt_connack_accepted_sent The number of times a connection has been accepted. +# TYPE mqtt_connack_accepted_sent counter +mqtt_connack_accepted_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_unacceptable_protocol_sent The number of times the broker is not able to support the requested protocol. +# TYPE mqtt_connack_unacceptable_protocol_sent counter +mqtt_connack_unacceptable_protocol_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_identifier_rejected_sent The number of times a client was rejected due to a unacceptable identifier. +# TYPE mqtt_connack_identifier_rejected_sent counter +mqtt_connack_identifier_rejected_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_server_unavailable_sent The number of times a client was rejected due the the broker being unavailable. +# TYPE mqtt_connack_server_unavailable_sent counter +mqtt_connack_server_unavailable_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_bad_credentials_sent The number of times a client sent bad credentials. +# TYPE mqtt_connack_bad_credentials_sent counter +mqtt_connack_bad_credentials_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_connack_not_authorized_sent The number of times a client was rejected due to insufficient authorization. +# TYPE mqtt_connack_not_authorized_sent counter +mqtt_connack_not_authorized_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_publish_sent The number of PUBLISH packets sent. +# TYPE mqtt_publish_sent counter +mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525721 +# HELP mqtt_puback_sent The number of PUBACK packets sent. +# TYPE mqtt_puback_sent counter +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537068 +# HELP mqtt_pubrec_sent The number of PUBREC packets sent. +# TYPE mqtt_pubrec_sent counter +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrel_sent The number of PUBREL packets sent. +# TYPE mqtt_pubrel_sent counter +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_sent The number of PUBCOMP packets sent. +# TYPE mqtt_pubcomp_sent counter +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_suback_sent The number of SUBACK packets sent. +# TYPE mqtt_suback_sent counter +mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122 +# HELP mqtt_unsuback_sent The number of UNSUBACK packets sent. +# TYPE mqtt_unsuback_sent counter +mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108 +# HELP mqtt_pingresp_sent The number of PINGRESP packets sent. +# TYPE mqtt_pingresp_sent counter +mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205 +# HELP mqtt_publish_auth_error The number of unauthorized publish attempts. +# TYPE mqtt_publish_auth_error counter +mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_auth_error The number of unauthorized subscription attempts. +# TYPE mqtt_subscribe_auth_error counter +mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_invalid_msg_size_error The number of packages exceeding the maximum allowed size. +# TYPE mqtt_invalid_msg_size_error counter +mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_puback_invalid_error The number of unexpected PUBACK messages received. +# TYPE mqtt_puback_invalid_error counter +mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubrec_invalid_error The number of unexpected PUBREC messages received. +# TYPE mqtt_pubrec_invalid_error counter +mqtt_pubrec_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_pubcomp_invalid_error The number of unexpected PUBCOMP messages received. +# TYPE mqtt_pubcomp_invalid_error counter +mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_publish_error The number of times a PUBLISH operation failed due to a netsplit. +# TYPE mqtt_publish_error counter +mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_subscribe_error The number of times a SUBSCRIBE operation failed due to a netsplit. +# TYPE mqtt_subscribe_error counter +mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP mqtt_unsubscribe_error The number of times an UNSUBSCRIBE operation failed due to a netsplit. +# TYPE mqtt_unsubscribe_error counter +mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0 +# HELP client_keepalive_expired The number of clients which failed to communicate within the keepalive time period. +# TYPE client_keepalive_expired counter +client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="4"} 1 +mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0 +# HELP queue_setup The number of times a MQTT queue process has been started. +# TYPE queue_setup counter +queue_setup{node="VerneMQ@172.17.0.2"} 338948 +# HELP queue_initialized_from_storage The number of times a MQTT queue process has been initialized from offline storage. +# TYPE queue_initialized_from_storage counter +queue_initialized_from_storage{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_teardown The number of times a MQTT queue process has been terminated. +# TYPE queue_teardown counter +queue_teardown{node="VerneMQ@172.17.0.2"} 338948 +# HELP queue_message_drop The number of messages dropped due to full queues. +# TYPE queue_message_drop counter +queue_message_drop{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_message_expired The number of messages which expired before delivery. +# TYPE queue_message_expired counter +queue_message_expired{node="VerneMQ@172.17.0.2"} 0 +# HELP queue_message_unhandled The number of unhandled messages when connecting with clean session=true. +# TYPE queue_message_unhandled counter +queue_message_unhandled{node="VerneMQ@172.17.0.2"} 1 +# HELP queue_message_in The number of PUBLISH packets received by MQTT queue processes. +# TYPE queue_message_in counter +queue_message_in{node="VerneMQ@172.17.0.2"} 525722 +# HELP queue_message_out The number of PUBLISH packets sent from MQTT queue processes. +# TYPE queue_message_out counter +queue_message_out{node="VerneMQ@172.17.0.2"} 525721 +# HELP client_expired Not in use (deprecated) +# TYPE client_expired counter +client_expired{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_received The number of bytes received from other cluster nodes. +# TYPE cluster_bytes_received counter +cluster_bytes_received{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_sent The number of bytes send to other cluster nodes. +# TYPE cluster_bytes_sent counter +cluster_bytes_sent{node="VerneMQ@172.17.0.2"} 0 +# HELP cluster_bytes_dropped The number of bytes dropped while sending data to other cluster nodes. +# TYPE cluster_bytes_dropped counter +cluster_bytes_dropped{node="VerneMQ@172.17.0.2"} 0 +# HELP router_matches_local The number of matched local subscriptions. +# TYPE router_matches_local counter +router_matches_local{node="VerneMQ@172.17.0.2"} 525722 +# HELP router_matches_remote The number of matched remote subscriptions. +# TYPE router_matches_remote counter +router_matches_remote{node="VerneMQ@172.17.0.2"} 0 +# HELP mqtt_connack_sent The number of CONNACK packets sent. +# TYPE mqtt_connack_sent counter +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="success"} 338948 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="unsupported_protocol_version"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="client_identifier_not_valid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="server_unavailable"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="bad_username_or_password"} 4 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="not_authorized"} 4 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="disconnect_with_will_msg"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 +mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +# HELP mqtt_disconnect_sent The number of DISCONNECT packets sent. +# TYPE mqtt_disconnect_sent counter +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_shutting_down"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="keep_alive_timeout"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="session_taken_over"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_filter_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="shared_subs_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="max_connect_time"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="subscription_ids_not_supported"} 0 +mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="wildcard_subs_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unsupported_protocol_version"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="client_identifier_not_valid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_username_or_password"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_unavailable"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="banned"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_authentication_method"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0 +mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0 +mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0 +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0 +# HELP mqtt_auth_sent The number of AUTH packets sent. +# TYPE mqtt_auth_sent counter +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 +mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP mqtt_auth_received The number of AUTH packets received. +# TYPE mqtt_auth_received counter +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0 +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0 +mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0 +# HELP queue_processes The number of MQTT queue processes. +# TYPE queue_processes gauge +queue_processes{node="VerneMQ@172.17.0.2"} 0 +# HELP retain_memory The number of bytes used for storing retained messages. +# TYPE retain_memory gauge +retain_memory{node="VerneMQ@172.17.0.2"} 11344 +# HELP retain_messages The number of currently stored retained messages. +# TYPE retain_messages gauge +retain_messages{node="VerneMQ@172.17.0.2"} 0 +# HELP router_memory The number of bytes used by the routing table. +# TYPE router_memory gauge +router_memory{node="VerneMQ@172.17.0.2"} 12752 +# HELP router_subscriptions The number of subscriptions in the routing table. +# TYPE router_subscriptions gauge +router_subscriptions{node="VerneMQ@172.17.0.2"} 0 +# HELP netsplit_resolved The number of resolved netsplits. +# TYPE netsplit_resolved counter +netsplit_resolved{node="VerneMQ@172.17.0.2"} 0 +# HELP netsplit_detected The number of detected netsplits. +# TYPE netsplit_detected counter +netsplit_detected{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_8 Scheduler 8 utilization (percentage) +# TYPE system_utilization_scheduler_8 gauge +system_utilization_scheduler_8{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_7 Scheduler 7 utilization (percentage) +# TYPE system_utilization_scheduler_7 gauge +system_utilization_scheduler_7{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_6 Scheduler 6 utilization (percentage) +# TYPE system_utilization_scheduler_6 gauge +system_utilization_scheduler_6{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_5 Scheduler 5 utilization (percentage) +# TYPE system_utilization_scheduler_5 gauge +system_utilization_scheduler_5{node="VerneMQ@172.17.0.2"} 0 +# HELP system_utilization_scheduler_4 Scheduler 4 utilization (percentage) +# TYPE system_utilization_scheduler_4 gauge +system_utilization_scheduler_4{node="VerneMQ@172.17.0.2"} 19 +# HELP system_utilization_scheduler_3 Scheduler 3 utilization (percentage) +# TYPE system_utilization_scheduler_3 gauge +system_utilization_scheduler_3{node="VerneMQ@172.17.0.2"} 14 +# HELP system_utilization_scheduler_2 Scheduler 2 utilization (percentage) +# TYPE system_utilization_scheduler_2 gauge +system_utilization_scheduler_2{node="VerneMQ@172.17.0.2"} 8 +# HELP system_utilization_scheduler_1 Scheduler 1 utilization (percentage) +# TYPE system_utilization_scheduler_1 gauge +system_utilization_scheduler_1{node="VerneMQ@172.17.0.2"} 34 +# HELP system_utilization The average system (scheduler) utilization (percentage). +# TYPE system_utilization gauge +system_utilization{node="VerneMQ@172.17.0.2"} 9 +# HELP vm_memory_ets The amount of memory allocated for ETS tables. +# TYPE vm_memory_ets gauge +vm_memory_ets{node="VerneMQ@172.17.0.2"} 6065944 +# HELP vm_memory_code The amount of memory allocated for code. +# TYPE vm_memory_code gauge +vm_memory_code{node="VerneMQ@172.17.0.2"} 11372082 +# HELP vm_memory_binary The amount of memory allocated for binaries. +# TYPE vm_memory_binary gauge +vm_memory_binary{node="VerneMQ@172.17.0.2"} 1293672 +# HELP vm_memory_atom_used The amount of memory used by atoms. +# TYPE vm_memory_atom_used gauge +vm_memory_atom_used{node="VerneMQ@172.17.0.2"} 755998 +# HELP vm_memory_atom The amount of memory allocated for atoms. +# TYPE vm_memory_atom gauge +vm_memory_atom{node="VerneMQ@172.17.0.2"} 768953 +# HELP vm_memory_system The amount of memory allocated for the emulator. +# TYPE vm_memory_system gauge +vm_memory_system{node="VerneMQ@172.17.0.2"} 27051848 +# HELP vm_memory_processes_used The amount of memory used by processes. +# TYPE vm_memory_processes_used gauge +vm_memory_processes_used{node="VerneMQ@172.17.0.2"} 8671232 +# HELP vm_memory_processes The amount of memory allocated for processes. +# TYPE vm_memory_processes gauge +vm_memory_processes{node="VerneMQ@172.17.0.2"} 8673288 +# HELP vm_memory_total The total amount of memory allocated. +# TYPE vm_memory_total gauge +vm_memory_total{node="VerneMQ@172.17.0.2"} 35725136 +# HELP system_process_count The number of Erlang processes. +# TYPE system_process_count gauge +system_process_count{node="VerneMQ@172.17.0.2"} 329 +# HELP system_wallclock The number of milli-seconds passed since the node was started. +# TYPE system_wallclock counter +system_wallclock{node="VerneMQ@172.17.0.2"} 163457858 +# HELP system_runtime The sum of the runtime for all threads in the Erlang runtime system. +# TYPE system_runtime counter +system_runtime{node="VerneMQ@172.17.0.2"} 1775355 +# HELP system_run_queue The total number of processes and ports ready to run on all run-queues. +# TYPE system_run_queue gauge +system_run_queue{node="VerneMQ@172.17.0.2"} 0 +# HELP system_reductions The number of reductions performed in the VM since the node was started. +# TYPE system_reductions counter +system_reductions{node="VerneMQ@172.17.0.2"} 3857458067 +# HELP system_io_out The total number of bytes sent through ports. +# TYPE system_io_out counter +system_io_out{node="VerneMQ@172.17.0.2"} 961001488 +# HELP system_io_in The total number of bytes received through ports. +# TYPE system_io_in counter +system_io_in{node="VerneMQ@172.17.0.2"} 68998296 +# HELP system_words_reclaimed_by_gc The number of words reclaimed by the garbage collector. +# TYPE system_words_reclaimed_by_gc counter +system_words_reclaimed_by_gc{node="VerneMQ@172.17.0.2"} 7158470019 +# HELP system_gc_count The number of garbage collections performed. +# TYPE system_gc_count counter +system_gc_count{node="VerneMQ@172.17.0.2"} 12189976 +# HELP system_exact_reductions The exact number of reductions performed. +# TYPE system_exact_reductions counter +system_exact_reductions{node="VerneMQ@172.17.0.2"} 3854024620 +# HELP system_context_switches The total number of context switches. +# TYPE system_context_switches counter +system_context_switches{node="VerneMQ@172.17.0.2"} 39088198 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/testdata/non_vernemq.txt b/src/go/collectors/go.d.plugin/modules/vernemq/testdata/non_vernemq.txt new file mode 100644 index 00000000000000..f5f0ae082c69fd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/testdata/non_vernemq.txt @@ -0,0 +1,27 @@ +# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize +# TYPE wmi_os_process_memory_limix_bytes gauge +wmi_os_process_memory_limix_bytes 1.40737488224256e+14 +# HELP wmi_os_processes OperatingSystem.NumberOfProcesses +# TYPE wmi_os_processes gauge +wmi_os_processes 124 +# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses +# TYPE wmi_os_processes_limit gauge +wmi_os_processes_limit 4.294967295e+09 +# HELP wmi_os_time OperatingSystem.LocalDateTime +# TYPE wmi_os_time gauge +wmi_os_time 1.57804974e+09 +# HELP wmi_os_timezone OperatingSystem.LocalDateTime +# TYPE wmi_os_timezone gauge +wmi_os_timezone{timezone="MSK"} 1 +# HELP wmi_os_users OperatingSystem.NumberOfUsers +# TYPE wmi_os_users gauge +wmi_os_users 2 +# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize +# TYPE wmi_os_virtual_memory_bytes gauge +wmi_os_virtual_memory_bytes 5.770891264e+09 +# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory +# TYPE wmi_os_virtual_memory_free_bytes gauge +wmi_os_virtual_memory_free_bytes 3.76489984e+09 +# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize +# TYPE wmi_os_visible_memory_bytes gauge +wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/vernemq.go b/src/go/collectors/go.d.plugin/modules/vernemq/vernemq.go new file mode 100644 index 00000000000000..d86f3b1185063f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/vernemq.go @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import ( + _ "embed" + "errors" + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("vernemq", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *VerneMQ { + config := Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8888/metrics", + }, + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second}, + }, + }, + } + + return &VerneMQ{ + Config: config, + charts: charts.Copy(), + cache: make(cache), + } +} + +type ( + Config struct { + web.HTTP `yaml:",inline"` + } + + VerneMQ struct { + module.Base + Config `yaml:",inline"` + + prom prometheus.Prometheus + charts *Charts + cache cache + } + + cache map[string]bool +) + +func (c cache) hasP(v string) bool { ok := c[v]; c[v] = true; return ok } + +func (v VerneMQ) validateConfig() error { + if v.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (v *VerneMQ) initClient() error { + client, err := web.NewHTTPClient(v.Client) + if err != nil { + return err + } + + v.prom = prometheus.New(client, v.Request) + return nil +} + +func (v *VerneMQ) Init() bool { + if err := v.validateConfig(); err != nil { + v.Errorf("error on validating config: %v", err) + return false + } + if err := v.initClient(); err != nil { + v.Errorf("error on initializing client: %v", err) + return false + } + return true +} + +func (v *VerneMQ) Check() bool { + return len(v.Collect()) > 0 +} + +func (v *VerneMQ) Charts() *Charts { + return v.charts +} + +func (v *VerneMQ) Collect() map[string]int64 { + mx, err := v.collect() + if err != nil { + v.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (VerneMQ) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/vernemq/vernemq_test.go b/src/go/collectors/go.d.plugin/modules/vernemq/vernemq_test.go new file mode 100644 index 00000000000000..5f07553cd7a310 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vernemq/vernemq_test.go @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + metricsV1101MQTTv5, _ = os.ReadFile("testdata/metrics-v1.10.1-mqtt5.txt") + invalidMetrics, _ = os.ReadFile("testdata/non_vernemq.txt") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, metricsV1101MQTTv5) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestVerneMQ_Init(t *testing.T) { + verneMQ := prepareVerneMQ() + + assert.True(t, verneMQ.Init()) +} + +func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) { + verneMQ := prepareVerneMQ() + verneMQ.URL = "" + + assert.False(t, verneMQ.Init()) +} + +func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { + verneMQ := prepareVerneMQ() + verneMQ.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, verneMQ.Init()) +} + +func TestVerneMQ_Check(t *testing.T) { + verneMQ, srv := prepareClientServerV1101(t) + defer srv.Close() + + assert.True(t, verneMQ.Check()) +} + +func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { + verneMQ := prepareVerneMQ() + require.True(t, verneMQ.Init()) + + assert.False(t, verneMQ.Check()) +} + +func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) { + verneMQ, srv := prepareClientServerNotVerneMQ(t) + defer srv.Close() + require.True(t, verneMQ.Init()) + + assert.False(t, verneMQ.Check()) +} + +func TestVerneMQ_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestVerneMQ_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestVerneMQ_Collect(t *testing.T) { + verneMQ, srv := prepareClientServerV1101(t) + defer srv.Close() + + collected := verneMQ.Collect() + assert.Equal(t, v1101ExpectedMetrics, collected) + testCharts(t, verneMQ, collected) +} + +func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { + verneMQ := prepareVerneMQ() + require.True(t, verneMQ.Init()) + + assert.Nil(t, verneMQ.Collect()) +} + +func TestVerneMQ_Collect_ReturnsNilIfMetricsAreNotVerneMQ(t *testing.T) { + verneMQ, srv := prepareClientServerNotVerneMQ(t) + defer srv.Close() + + assert.Nil(t, verneMQ.Collect()) +} + +func TestVerneMQ_Collect_ReturnsNilIfReceiveInvalidResponse(t *testing.T) { + verneMQ, ts := prepareClientServerInvalid(t) + defer ts.Close() + + assert.Nil(t, verneMQ.Collect()) +} + +func TestVerneMQ_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) { + verneMQ, ts := prepareClientServerResponse404(t) + defer ts.Close() + + assert.Nil(t, verneMQ.Collect()) +} + +func testCharts(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) { + ensureCollectedHasAllChartsDimsVarsIDs(t, verneMQ, collected) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) { + for _, chart := range *verneMQ.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareVerneMQ() *VerneMQ { + verneMQ := New() + verneMQ.URL = "http://127.0.0.1:38001/metrics" + return verneMQ +} + +func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(metricsV1101MQTTv5) + })) + + verneMQ := New() + verneMQ.URL = ts.URL + require.True(t, verneMQ.Init()) + + return verneMQ, ts +} + +func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(invalidMetrics) + })) + + verneMQ := New() + verneMQ.URL = ts.URL + require.True(t, verneMQ.Init()) + + return verneMQ, ts +} + +func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + verneMQ := New() + verneMQ.URL = ts.URL + require.True(t, verneMQ.Init()) + + return verneMQ, ts +} + +func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) { + t.Helper() + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + verneMQ := New() + verneMQ.URL = ts.URL + require.True(t, verneMQ.Init()) + return verneMQ, ts +} + +var v1101ExpectedMetrics = map[string]int64{ + "bytes_received": 36796908, + "bytes_sent": 23361693, + "client_keepalive_expired": 1, + "cluster_bytes_dropped": 0, + "cluster_bytes_received": 0, + "cluster_bytes_sent": 0, + "mqtt_auth_received": 0, + "mqtt_auth_received_continue_authentication": 0, + "mqtt_auth_received_reauthenticate": 0, + "mqtt_auth_received_success": 0, + "mqtt_auth_received_v_5": 0, + "mqtt_auth_received_v_5_continue_authentication": 0, + "mqtt_auth_received_v_5_reauthenticate": 0, + "mqtt_auth_received_v_5_success": 0, + "mqtt_auth_sent": 0, + "mqtt_auth_sent_continue_authentication": 0, + "mqtt_auth_sent_reauthenticate": 0, + "mqtt_auth_sent_success": 0, + "mqtt_auth_sent_v_5": 0, + "mqtt_auth_sent_v_5_continue_authentication": 0, + "mqtt_auth_sent_v_5_reauthenticate": 0, + "mqtt_auth_sent_v_5_success": 0, + "mqtt_connack_sent": 338956, + "mqtt_connack_sent_bad_authentication_method": 0, + "mqtt_connack_sent_bad_username_or_password": 4, + "mqtt_connack_sent_banned": 0, + "mqtt_connack_sent_client_identifier_not_valid": 0, + "mqtt_connack_sent_connection_rate_exceeded": 0, + "mqtt_connack_sent_impl_specific_error": 0, + "mqtt_connack_sent_malformed_packet": 0, + "mqtt_connack_sent_not_authorized": 4, + "mqtt_connack_sent_packet_too_large": 0, + "mqtt_connack_sent_payload_format_invalid": 0, + "mqtt_connack_sent_protocol_error": 0, + "mqtt_connack_sent_qos_not_supported": 0, + "mqtt_connack_sent_quota_exceeded": 0, + "mqtt_connack_sent_retain_not_supported": 0, + "mqtt_connack_sent_server_busy": 0, + "mqtt_connack_sent_server_moved": 0, + "mqtt_connack_sent_server_unavailable": 0, + "mqtt_connack_sent_success": 338948, + "mqtt_connack_sent_topic_name_invalid": 0, + "mqtt_connack_sent_unspecified_error": 0, + "mqtt_connack_sent_unsupported_protocol_version": 0, + "mqtt_connack_sent_use_another_server": 0, + "mqtt_connack_sent_v_4": 338956, + "mqtt_connack_sent_v_4_bad_username_or_password": 4, + "mqtt_connack_sent_v_4_client_identifier_not_valid": 0, + "mqtt_connack_sent_v_4_not_authorized": 4, + "mqtt_connack_sent_v_4_server_unavailable": 0, + "mqtt_connack_sent_v_4_success": 338948, + "mqtt_connack_sent_v_4_unsupported_protocol_version": 0, + "mqtt_connack_sent_v_5": 0, + "mqtt_connack_sent_v_5_bad_authentication_method": 0, + "mqtt_connack_sent_v_5_bad_username_or_password": 0, + "mqtt_connack_sent_v_5_banned": 0, + "mqtt_connack_sent_v_5_client_identifier_not_valid": 0, + "mqtt_connack_sent_v_5_connection_rate_exceeded": 0, + "mqtt_connack_sent_v_5_impl_specific_error": 0, + "mqtt_connack_sent_v_5_malformed_packet": 0, + "mqtt_connack_sent_v_5_not_authorized": 0, + "mqtt_connack_sent_v_5_packet_too_large": 0, + "mqtt_connack_sent_v_5_payload_format_invalid": 0, + "mqtt_connack_sent_v_5_protocol_error": 0, + "mqtt_connack_sent_v_5_qos_not_supported": 0, + "mqtt_connack_sent_v_5_quota_exceeded": 0, + "mqtt_connack_sent_v_5_retain_not_supported": 0, + "mqtt_connack_sent_v_5_server_busy": 0, + "mqtt_connack_sent_v_5_server_moved": 0, + "mqtt_connack_sent_v_5_server_unavailable": 0, + "mqtt_connack_sent_v_5_success": 0, + "mqtt_connack_sent_v_5_topic_name_invalid": 0, + "mqtt_connack_sent_v_5_unspecified_error": 0, + "mqtt_connack_sent_v_5_unsupported_protocol_version": 0, + "mqtt_connack_sent_v_5_use_another_server": 0, + "mqtt_connect_received": 338956, + "mqtt_connect_received_v_4": 338956, + "mqtt_connect_received_v_5": 0, + "mqtt_disconnect_received": 107, + "mqtt_disconnect_received_administrative_action": 0, + "mqtt_disconnect_received_disconnect_with_will_msg": 0, + "mqtt_disconnect_received_impl_specific_error": 0, + "mqtt_disconnect_received_malformed_packet": 0, + "mqtt_disconnect_received_message_rate_too_high": 0, + "mqtt_disconnect_received_normal_disconnect": 0, + "mqtt_disconnect_received_packet_too_large": 0, + "mqtt_disconnect_received_payload_format_invalid": 0, + "mqtt_disconnect_received_protocol_error": 0, + "mqtt_disconnect_received_quota_exceeded": 0, + "mqtt_disconnect_received_receive_max_exceeded": 0, + "mqtt_disconnect_received_topic_alias_invalid": 0, + "mqtt_disconnect_received_topic_name_invalid": 0, + "mqtt_disconnect_received_unspecified_error": 0, + "mqtt_disconnect_received_v_4": 107, + "mqtt_disconnect_received_v_5": 0, + "mqtt_disconnect_received_v_5_administrative_action": 0, + "mqtt_disconnect_received_v_5_disconnect_with_will_msg": 0, + "mqtt_disconnect_received_v_5_impl_specific_error": 0, + "mqtt_disconnect_received_v_5_malformed_packet": 0, + "mqtt_disconnect_received_v_5_message_rate_too_high": 0, + "mqtt_disconnect_received_v_5_normal_disconnect": 0, + "mqtt_disconnect_received_v_5_packet_too_large": 0, + "mqtt_disconnect_received_v_5_payload_format_invalid": 0, + "mqtt_disconnect_received_v_5_protocol_error": 0, + "mqtt_disconnect_received_v_5_quota_exceeded": 0, + "mqtt_disconnect_received_v_5_receive_max_exceeded": 0, + "mqtt_disconnect_received_v_5_topic_alias_invalid": 0, + "mqtt_disconnect_received_v_5_topic_name_invalid": 0, + "mqtt_disconnect_received_v_5_unspecified_error": 0, + "mqtt_disconnect_sent": 0, + "mqtt_disconnect_sent_administrative_action": 0, + "mqtt_disconnect_sent_connection_rate_exceeded": 0, + "mqtt_disconnect_sent_impl_specific_error": 0, + "mqtt_disconnect_sent_keep_alive_timeout": 0, + "mqtt_disconnect_sent_malformed_packet": 0, + "mqtt_disconnect_sent_max_connect_time": 0, + "mqtt_disconnect_sent_message_rate_too_high": 0, + "mqtt_disconnect_sent_normal_disconnect": 0, + "mqtt_disconnect_sent_not_authorized": 0, + "mqtt_disconnect_sent_packet_too_large": 0, + "mqtt_disconnect_sent_payload_format_invalid": 0, + "mqtt_disconnect_sent_protocol_error": 0, + "mqtt_disconnect_sent_qos_not_supported": 0, + "mqtt_disconnect_sent_quota_exceeded": 0, + "mqtt_disconnect_sent_receive_max_exceeded": 0, + "mqtt_disconnect_sent_retain_not_supported": 0, + "mqtt_disconnect_sent_server_busy": 0, + "mqtt_disconnect_sent_server_moved": 0, + "mqtt_disconnect_sent_server_shutting_down": 0, + "mqtt_disconnect_sent_session_taken_over": 0, + "mqtt_disconnect_sent_shared_subs_not_supported": 0, + "mqtt_disconnect_sent_subscription_ids_not_supported": 0, + "mqtt_disconnect_sent_topic_alias_invalid": 0, + "mqtt_disconnect_sent_topic_filter_invalid": 0, + "mqtt_disconnect_sent_topic_name_invalid": 0, + "mqtt_disconnect_sent_unspecified_error": 0, + "mqtt_disconnect_sent_use_another_server": 0, + "mqtt_disconnect_sent_v_5": 0, + "mqtt_disconnect_sent_v_5_administrative_action": 0, + "mqtt_disconnect_sent_v_5_connection_rate_exceeded": 0, + "mqtt_disconnect_sent_v_5_impl_specific_error": 0, + "mqtt_disconnect_sent_v_5_keep_alive_timeout": 0, + "mqtt_disconnect_sent_v_5_malformed_packet": 0, + "mqtt_disconnect_sent_v_5_max_connect_time": 0, + "mqtt_disconnect_sent_v_5_message_rate_too_high": 0, + "mqtt_disconnect_sent_v_5_normal_disconnect": 0, + "mqtt_disconnect_sent_v_5_not_authorized": 0, + "mqtt_disconnect_sent_v_5_packet_too_large": 0, + "mqtt_disconnect_sent_v_5_payload_format_invalid": 0, + "mqtt_disconnect_sent_v_5_protocol_error": 0, + "mqtt_disconnect_sent_v_5_qos_not_supported": 0, + "mqtt_disconnect_sent_v_5_quota_exceeded": 0, + "mqtt_disconnect_sent_v_5_receive_max_exceeded": 0, + "mqtt_disconnect_sent_v_5_retain_not_supported": 0, + "mqtt_disconnect_sent_v_5_server_busy": 0, + "mqtt_disconnect_sent_v_5_server_moved": 0, + "mqtt_disconnect_sent_v_5_server_shutting_down": 0, + "mqtt_disconnect_sent_v_5_session_taken_over": 0, + "mqtt_disconnect_sent_v_5_shared_subs_not_supported": 0, + "mqtt_disconnect_sent_v_5_subscription_ids_not_supported": 0, + "mqtt_disconnect_sent_v_5_topic_alias_invalid": 0, + "mqtt_disconnect_sent_v_5_topic_filter_invalid": 0, + "mqtt_disconnect_sent_v_5_topic_name_invalid": 0, + "mqtt_disconnect_sent_v_5_unspecified_error": 0, + "mqtt_disconnect_sent_v_5_use_another_server": 0, + "mqtt_disconnect_sent_v_5_wildcard_subs_not_supported": 0, + "mqtt_disconnect_sent_wildcard_subs_not_supported": 0, + "mqtt_invalid_msg_size_error": 0, + "mqtt_invalid_msg_size_error_v_4": 0, + "mqtt_invalid_msg_size_error_v_5": 0, + "mqtt_pingreq_received": 205, + "mqtt_pingreq_received_v_4": 205, + "mqtt_pingreq_received_v_5": 0, + "mqtt_pingresp_sent": 205, + "mqtt_pingresp_sent_v_4": 205, + "mqtt_pingresp_sent_v_5": 0, + "mqtt_puback_invalid_error": 0, + "mqtt_puback_invalid_error_v_4": 0, + "mqtt_puback_invalid_error_v_5": 0, + "mqtt_puback_received": 525694, + "mqtt_puback_received_impl_specific_error": 0, + "mqtt_puback_received_no_matching_subscribers": 0, + "mqtt_puback_received_not_authorized": 0, + "mqtt_puback_received_packet_id_in_use": 0, + "mqtt_puback_received_payload_format_invalid": 0, + "mqtt_puback_received_quota_exceeded": 0, + "mqtt_puback_received_success": 0, + "mqtt_puback_received_topic_name_invalid": 0, + "mqtt_puback_received_unspecified_error": 0, + "mqtt_puback_received_v_4": 525694, + "mqtt_puback_received_v_5": 0, + "mqtt_puback_received_v_5_impl_specific_error": 0, + "mqtt_puback_received_v_5_no_matching_subscribers": 0, + "mqtt_puback_received_v_5_not_authorized": 0, + "mqtt_puback_received_v_5_packet_id_in_use": 0, + "mqtt_puback_received_v_5_payload_format_invalid": 0, + "mqtt_puback_received_v_5_quota_exceeded": 0, + "mqtt_puback_received_v_5_success": 0, + "mqtt_puback_received_v_5_topic_name_invalid": 0, + "mqtt_puback_received_v_5_unspecified_error": 0, + "mqtt_puback_sent": 537068, + "mqtt_puback_sent_impl_specific_error": 0, + "mqtt_puback_sent_no_matching_subscribers": 0, + "mqtt_puback_sent_not_authorized": 0, + "mqtt_puback_sent_packet_id_in_use": 0, + "mqtt_puback_sent_payload_format_invalid": 0, + "mqtt_puback_sent_quota_exceeded": 0, + "mqtt_puback_sent_success": 0, + "mqtt_puback_sent_topic_name_invalid": 0, + "mqtt_puback_sent_unspecified_error": 0, + "mqtt_puback_sent_v_4": 537068, + "mqtt_puback_sent_v_5": 0, + "mqtt_puback_sent_v_5_impl_specific_error": 0, + "mqtt_puback_sent_v_5_no_matching_subscribers": 0, + "mqtt_puback_sent_v_5_not_authorized": 0, + "mqtt_puback_sent_v_5_packet_id_in_use": 0, + "mqtt_puback_sent_v_5_payload_format_invalid": 0, + "mqtt_puback_sent_v_5_quota_exceeded": 0, + "mqtt_puback_sent_v_5_success": 0, + "mqtt_puback_sent_v_5_topic_name_invalid": 0, + "mqtt_puback_sent_v_5_unspecified_error": 0, + "mqtt_pubcomp_invalid_error": 0, + "mqtt_pubcomp_invalid_error_v_4": 0, + "mqtt_pubcomp_invalid_error_v_5": 0, + "mqtt_pubcomp_received": 0, + "mqtt_pubcomp_received_packet_id_not_found": 0, + "mqtt_pubcomp_received_success": 0, + "mqtt_pubcomp_received_v_4": 0, + "mqtt_pubcomp_received_v_5": 0, + "mqtt_pubcomp_received_v_5_packet_id_not_found": 0, + "mqtt_pubcomp_received_v_5_success": 0, + "mqtt_pubcomp_sent": 0, + "mqtt_pubcomp_sent_packet_id_not_found": 0, + "mqtt_pubcomp_sent_success": 0, + "mqtt_pubcomp_sent_v_4": 0, + "mqtt_pubcomp_sent_v_5": 0, + "mqtt_pubcomp_sent_v_5_packet_id_not_found": 0, + "mqtt_pubcomp_sent_v_5_success": 0, + "mqtt_publish_auth_error": 0, + "mqtt_publish_auth_error_v_4": 0, + "mqtt_publish_auth_error_v_5": 0, + "mqtt_publish_error": 0, + "mqtt_publish_error_v_4": 0, + "mqtt_publish_error_v_5": 0, + "mqtt_publish_received": 537088, + "mqtt_publish_received_v_4": 537088, + "mqtt_publish_received_v_5": 0, + "mqtt_publish_sent": 525721, + "mqtt_publish_sent_v_4": 525721, + "mqtt_publish_sent_v_5": 0, + "mqtt_pubrec_invalid_error": 0, + "mqtt_pubrec_invalid_error_v_4": 0, + "mqtt_pubrec_received": 0, + "mqtt_pubrec_received_impl_specific_error": 0, + "mqtt_pubrec_received_no_matching_subscribers": 0, + "mqtt_pubrec_received_not_authorized": 0, + "mqtt_pubrec_received_packet_id_in_use": 0, + "mqtt_pubrec_received_payload_format_invalid": 0, + "mqtt_pubrec_received_quota_exceeded": 0, + "mqtt_pubrec_received_success": 0, + "mqtt_pubrec_received_topic_name_invalid": 0, + "mqtt_pubrec_received_unspecified_error": 0, + "mqtt_pubrec_received_v_4": 0, + "mqtt_pubrec_received_v_5": 0, + "mqtt_pubrec_received_v_5_impl_specific_error": 0, + "mqtt_pubrec_received_v_5_no_matching_subscribers": 0, + "mqtt_pubrec_received_v_5_not_authorized": 0, + "mqtt_pubrec_received_v_5_packet_id_in_use": 0, + "mqtt_pubrec_received_v_5_payload_format_invalid": 0, + "mqtt_pubrec_received_v_5_quota_exceeded": 0, + "mqtt_pubrec_received_v_5_success": 0, + "mqtt_pubrec_received_v_5_topic_name_invalid": 0, + "mqtt_pubrec_received_v_5_unspecified_error": 0, + "mqtt_pubrec_sent": 0, + "mqtt_pubrec_sent_impl_specific_error": 0, + "mqtt_pubrec_sent_no_matching_subscribers": 0, + "mqtt_pubrec_sent_not_authorized": 0, + "mqtt_pubrec_sent_packet_id_in_use": 0, + "mqtt_pubrec_sent_payload_format_invalid": 0, + "mqtt_pubrec_sent_quota_exceeded": 0, + "mqtt_pubrec_sent_success": 0, + "mqtt_pubrec_sent_topic_name_invalid": 0, + "mqtt_pubrec_sent_unspecified_error": 0, + "mqtt_pubrec_sent_v_4": 0, + "mqtt_pubrec_sent_v_5": 0, + "mqtt_pubrec_sent_v_5_impl_specific_error": 0, + "mqtt_pubrec_sent_v_5_no_matching_subscribers": 0, + "mqtt_pubrec_sent_v_5_not_authorized": 0, + "mqtt_pubrec_sent_v_5_packet_id_in_use": 0, + "mqtt_pubrec_sent_v_5_payload_format_invalid": 0, + "mqtt_pubrec_sent_v_5_quota_exceeded": 0, + "mqtt_pubrec_sent_v_5_success": 0, + "mqtt_pubrec_sent_v_5_topic_name_invalid": 0, + "mqtt_pubrec_sent_v_5_unspecified_error": 0, + "mqtt_pubrel_received": 0, + "mqtt_pubrel_received_packet_id_not_found": 0, + "mqtt_pubrel_received_success": 0, + "mqtt_pubrel_received_v_4": 0, + "mqtt_pubrel_received_v_5": 0, + "mqtt_pubrel_received_v_5_packet_id_not_found": 0, + "mqtt_pubrel_received_v_5_success": 0, + "mqtt_pubrel_sent": 0, + "mqtt_pubrel_sent_packet_id_not_found": 0, + "mqtt_pubrel_sent_success": 0, + "mqtt_pubrel_sent_v_4": 0, + "mqtt_pubrel_sent_v_5": 0, + "mqtt_pubrel_sent_v_5_packet_id_not_found": 0, + "mqtt_pubrel_sent_v_5_success": 0, + "mqtt_suback_sent": 122, + "mqtt_suback_sent_v_4": 122, + "mqtt_suback_sent_v_5": 0, + "mqtt_subscribe_auth_error": 0, + "mqtt_subscribe_auth_error_v_4": 0, + "mqtt_subscribe_auth_error_v_5": 0, + "mqtt_subscribe_error": 0, + "mqtt_subscribe_error_v_4": 0, + "mqtt_subscribe_error_v_5": 0, + "mqtt_subscribe_received": 122, + "mqtt_subscribe_received_v_4": 122, + "mqtt_subscribe_received_v_5": 0, + "mqtt_unsuback_sent": 108, + "mqtt_unsuback_sent_v_4": 108, + "mqtt_unsuback_sent_v_5": 0, + "mqtt_unsubscribe_error": 0, + "mqtt_unsubscribe_error_v_4": 0, + "mqtt_unsubscribe_error_v_5": 0, + "mqtt_unsubscribe_received": 108, + "mqtt_unsubscribe_received_v_4": 108, + "mqtt_unsubscribe_received_v_5": 0, + "netsplit_detected": 0, + "netsplit_resolved": 0, + "netsplit_unresolved": 0, + "open_sockets": 0, + "queue_initialized_from_storage": 0, + "queue_message_drop": 0, + "queue_message_expired": 0, + "queue_message_in": 525722, + "queue_message_out": 525721, + "queue_message_unhandled": 1, + "queue_processes": 0, + "queue_setup": 338948, + "queue_teardown": 338948, + "retain_memory": 11344, + "retain_messages": 0, + "router_matches_local": 525722, + "router_matches_remote": 0, + "router_memory": 12752, + "router_subscriptions": 0, + "socket_close": 338956, + "socket_close_timeout": 0, + "socket_error": 0, + "socket_open": 338956, + "system_context_switches": 39088198, + "system_gc_count": 12189976, + "system_io_in": 68998296, + "system_io_out": 961001488, + "system_process_count": 329, + "system_reductions": 3857458067, + "system_run_queue": 0, + "system_utilization": 9, + "system_utilization_scheduler_1": 34, + "system_utilization_scheduler_2": 8, + "system_utilization_scheduler_3": 14, + "system_utilization_scheduler_4": 19, + "system_utilization_scheduler_5": 0, + "system_utilization_scheduler_6": 0, + "system_utilization_scheduler_7": 0, + "system_utilization_scheduler_8": 0, + "system_wallclock": 163457858, + "system_words_reclaimed_by_gc": 7158470019, + "vm_memory_processes": 8673288, + "vm_memory_system": 27051848, +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/README.md b/src/go/collectors/go.d.plugin/modules/vsphere/README.md new file mode 120000 index 00000000000000..0a6b0146ed24de --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/README.md @@ -0,0 +1 @@ +integrations/vmware_vcenter_server.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/charts.go b/src/go/collectors/go.d.plugin/modules/vsphere/charts.go new file mode 100644 index 00000000000000..16137cfd7c7e1d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/charts.go @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" +) + +const ( + prioVMCPUUtilization = module.Priority + iota + prioVmMemoryUtilization + prioVmMemoryUsage + prioVmMemorySwapUsage + prioVmMemorySwapIO + prioVmDiskIO + prioVmDiskMaxLatency + prioVmNetworkTraffic + prioVmNetworkPackets + prioVmNetworkDrops + prioVmOverallStatus + prioVmSystemUptime + + prioHostCPUUtilization + prioHostMemoryUtilization + prioHostMemoryUsage + prioHostMemorySwapIO + prioHostDiskIO + prioHostDiskMaxLatency + prioHostNetworkTraffic + prioHostNetworkPackets + prioHostNetworkDrops + prioHostNetworkErrors + prioHostOverallStatus + prioHostSystemUptime +) + +var ( + vmChartsTmpl = module.Charts{ + vmCPUUtilizationChartTmpl.Copy(), + + vmMemoryUtilizationChartTmpl.Copy(), + vmMemoryUsageChartTmpl.Copy(), + vmMemorySwapUsageChartTmpl.Copy(), + vmMemorySwapIOChartTmpl.Copy(), + + vmDiskIOChartTmpl.Copy(), + vmDiskMaxLatencyChartTmpl.Copy(), + + vmNetworkTrafficChartTmpl.Copy(), + vmNetworkPacketsChartTmpl.Copy(), + vmNetworkDropsChartTmpl.Copy(), + + vmOverallStatusChartTmpl.Copy(), + + vmSystemUptimeChartTmpl.Copy(), + } + + vmCPUUtilizationChartTmpl = module.Chart{ + ID: "%s_cpu_utilization", + Title: "Virtual Machine CPU utilization", + Units: "percentage", + Fam: "vms cpu", + Ctx: "vsphere.vm_cpu_utilization", + Priority: prioVMCPUUtilization, + Dims: module.Dims{ + {ID: "%s_cpu.usage.average", Name: "used", Div: 100}, + }, + } + + // Ref: https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html + vmMemoryUtilizationChartTmpl = module.Chart{ + ID: "%s_mem_utilization", + Title: "Virtual Machine memory utilization", + Units: "percentage", + Fam: "vms mem", + Ctx: "vsphere.vm_mem_utilization", + Priority: prioVmMemoryUtilization, + Dims: module.Dims{ + {ID: "%s_mem.usage.average", Name: "used", Div: 100}, + }, + } + vmMemoryUsageChartTmpl = module.Chart{ + ID: "%s_mem_usage", + Title: "Virtual Machine memory usage", + Units: "KiB", + Fam: "vms mem", + Ctx: "vsphere.vm_mem_usage", + Priority: prioVmMemoryUsage, + Dims: module.Dims{ + {ID: "%s_mem.granted.average", Name: "granted"}, + {ID: "%s_mem.consumed.average", Name: "consumed"}, + {ID: "%s_mem.active.average", Name: "active"}, + {ID: "%s_mem.shared.average", Name: "shared"}, + }, + } + vmMemorySwapUsageChartTmpl = module.Chart{ + ID: "%s_mem_swap_usage", + Title: "Virtual Machine VMKernel memory swap usage", + Units: "KiB", + Fam: "vms mem", + Ctx: "vsphere.vm_mem_swap_usage", + Priority: prioVmMemorySwapUsage, + Dims: module.Dims{ + {ID: "%s_mem.swapped.average", Name: "swapped"}, + }, + } + vmMemorySwapIOChartTmpl = module.Chart{ + ID: "%s_mem_swap_io_rate", + Title: "Virtual Machine VMKernel memory swap IO", + Units: "KiB/s", + Fam: "vms mem", + Ctx: "vsphere.vm_mem_swap_io", + Type: module.Area, + Priority: prioVmMemorySwapIO, + Dims: module.Dims{ + {ID: "%s_mem.swapinRate.average", Name: "in"}, + {ID: "%s_mem.swapoutRate.average", Name: "out"}, + }, + } + + vmDiskIOChartTmpl = module.Chart{ + ID: "%s_disk_io", + Title: "Virtual Machine disk IO", + Units: "KiB/s", + Fam: "vms disk", + Ctx: "vsphere.vm_disk_io", + Type: module.Area, + Priority: prioVmDiskIO, + Dims: module.Dims{ + {ID: "%s_disk.read.average", Name: "read"}, + {ID: "%s_disk.write.average", Name: "write", Mul: -1}, + }, + } + vmDiskMaxLatencyChartTmpl = module.Chart{ + ID: "%s_disk_max_latency", + Title: "Virtual Machine disk max latency", + Units: "milliseconds", + Fam: "vms disk", + Ctx: "vsphere.vm_disk_max_latency", + Priority: prioVmDiskMaxLatency, + Dims: module.Dims{ + {ID: "%s_disk.maxTotalLatency.latest", Name: "latency"}, + }, + } + + vmNetworkTrafficChartTmpl = module.Chart{ + ID: "%s_net_traffic", + Title: "Virtual Machine network traffic", + Units: "KiB/s", + Fam: "vms net", + Ctx: "vsphere.vm_net_traffic", + Type: module.Area, + Priority: prioVmNetworkTraffic, + Dims: module.Dims{ + {ID: "%s_net.bytesRx.average", Name: "received"}, + {ID: "%s_net.bytesTx.average", Name: "sent", Mul: -1}, + }, + } + vmNetworkPacketsChartTmpl = module.Chart{ + ID: "%s_net_packets", + Title: "Virtual Machine network packets", + Units: "packets", + Fam: "vms net", + Ctx: "vsphere.vm_net_packets", + Priority: prioVmNetworkPackets, + Dims: module.Dims{ + {ID: "%s_net.packetsRx.summation", Name: "received"}, + {ID: "%s_net.packetsTx.summation", Name: "sent", Mul: -1}, + }, + } + vmNetworkDropsChartTmpl = module.Chart{ + ID: "%s_net_drops", + Title: "Virtual Machine network dropped packets", + Units: "drops", + Fam: "vms net", + Ctx: "vsphere.vm_net_drops", + Priority: prioVmNetworkDrops, + Dims: module.Dims{ + {ID: "%s_net.droppedRx.summation", Name: "received"}, + {ID: "%s_net.droppedTx.summation", Name: "sent", Mul: -1}, + }, + } + + vmOverallStatusChartTmpl = module.Chart{ + ID: "%s_overall_status", + Title: "Virtual Machine overall alarm status", + Units: "status", + Fam: "vms status", + Ctx: "vsphere.vm_overall_status", + Priority: prioVmOverallStatus, + Dims: module.Dims{ + {ID: "%s_overall.status.green", Name: "green"}, + {ID: "%s_overall.status.red", Name: "red"}, + {ID: "%s_overall.status.yellow", Name: "yellow"}, + {ID: "%s_overall.status.gray", Name: "gray"}, + }, + } + + vmSystemUptimeChartTmpl = module.Chart{ + ID: "%s_system_uptime", + Title: "Virtual Machine system uptime", + Units: "seconds", + Fam: "vms uptime", + Ctx: "vsphere.vm_system_uptime", + Priority: prioVmSystemUptime, + Dims: module.Dims{ + {ID: "%s_sys.uptime.latest", Name: "uptime"}, + }, + } +) + +var ( + hostChartsTmpl = module.Charts{ + hostCPUUtilizationChartTmpl.Copy(), + + hostMemUtilizationChartTmpl.Copy(), + hostMemUsageChartTmpl.Copy(), + hostMemSwapIOChartTmpl.Copy(), + + hostDiskIOChartTmpl.Copy(), + hostDiskMaxLatencyChartTmpl.Copy(), + + hostNetworkTraffic.Copy(), + hostNetworkPacketsChartTmpl.Copy(), + hostNetworkDropsChartTmpl.Copy(), + hostNetworkErrorsChartTmpl.Copy(), + + hostOverallStatusChartTmpl.Copy(), + + hostSystemUptimeChartTmpl.Copy(), + } + hostCPUUtilizationChartTmpl = module.Chart{ + ID: "%s_cpu_usage_total", + Title: "ESXi Host CPU utilization", + Units: "percentage", + Fam: "hosts cpu", + Ctx: "vsphere.host_cpu_utilization", + Priority: prioHostCPUUtilization, + Dims: module.Dims{ + {ID: "%s_cpu.usage.average", Name: "used", Div: 100}, + }, + } + hostMemUtilizationChartTmpl = module.Chart{ + ID: "%s_mem_utilization", + Title: "ESXi Host memory utilization", + Units: "percentage", + Fam: "hosts mem", + Ctx: "vsphere.host_mem_utilization", + Priority: prioHostMemoryUtilization, + Dims: module.Dims{ + {ID: "%s_mem.usage.average", Name: "used", Div: 100}, + }, + } + hostMemUsageChartTmpl = module.Chart{ + ID: "%s_mem_usage", + Title: "ESXi Host memory usage", + Units: "KiB", + Fam: "hosts mem", + Ctx: "vsphere.host_mem_usage", + Priority: prioHostMemoryUsage, + Dims: module.Dims{ + {ID: "%s_mem.granted.average", Name: "granted"}, + {ID: "%s_mem.consumed.average", Name: "consumed"}, + {ID: "%s_mem.active.average", Name: "active"}, + {ID: "%s_mem.shared.average", Name: "shared"}, + {ID: "%s_mem.sharedcommon.average", Name: "sharedcommon"}, + }, + } + hostMemSwapIOChartTmpl = module.Chart{ + ID: "%s_mem_swap_rate", + Title: "ESXi Host VMKernel memory swap IO", + Units: "KiB/s", + Fam: "hosts mem", + Ctx: "vsphere.host_mem_swap_io", + Type: module.Area, + Priority: prioHostMemorySwapIO, + Dims: module.Dims{ + {ID: "%s_mem.swapinRate.average", Name: "in"}, + {ID: "%s_mem.swapoutRate.average", Name: "out"}, + }, + } + + hostDiskIOChartTmpl = module.Chart{ + ID: "%s_disk_io", + Title: "ESXi Host disk IO", + Units: "KiB/s", + Fam: "hosts disk", + Ctx: "vsphere.host_disk_io", + Type: module.Area, + Priority: prioHostDiskIO, + Dims: module.Dims{ + {ID: "%s_disk.read.average", Name: "read"}, + {ID: "%s_disk.write.average", Name: "write", Mul: -1}, + }, + } + hostDiskMaxLatencyChartTmpl = module.Chart{ + ID: "%s_disk_max_latency", + Title: "ESXi Host disk max latency", + Units: "milliseconds", + Fam: "hosts disk", + Ctx: "vsphere.host_disk_max_latency", + Priority: prioHostDiskMaxLatency, + Dims: module.Dims{ + {ID: "%s_disk.maxTotalLatency.latest", Name: "latency"}, + }, + } + + hostNetworkTraffic = module.Chart{ + ID: "%s_net_traffic", + Title: "ESXi Host network traffic", + Units: "KiB/s", + Fam: "hosts net", + Ctx: "vsphere.host_net_traffic", + Type: module.Area, + Priority: prioHostNetworkTraffic, + Dims: module.Dims{ + {ID: "%s_net.bytesRx.average", Name: "received"}, + {ID: "%s_net.bytesTx.average", Name: "sent", Mul: -1}, + }, + } + hostNetworkPacketsChartTmpl = module.Chart{ + ID: "%s_net_packets", + Title: "ESXi Host network packets", + Units: "packets", + Fam: "hosts net", + Ctx: "vsphere.host_net_packets", + Priority: prioHostNetworkPackets, + Dims: module.Dims{ + {ID: "%s_net.packetsRx.summation", Name: "received"}, + {ID: "%s_net.packetsTx.summation", Name: "sent", Mul: -1}, + }, + } + hostNetworkDropsChartTmpl = module.Chart{ + ID: "%s_net_drops_total", + Title: "ESXi Host network drops", + Units: "drops", + Fam: "hosts net", + Ctx: "vsphere.host_net_drops", + Priority: prioHostNetworkDrops, + Dims: module.Dims{ + {ID: "%s_net.droppedRx.summation", Name: "received"}, + {ID: "%s_net.droppedTx.summation", Name: "sent", Mul: -1}, + }, + } + hostNetworkErrorsChartTmpl = module.Chart{ + ID: "%s_net_errors", + Title: "ESXi Host network errors", + Units: "errors", + Fam: "hosts net", + Ctx: "vsphere.host_net_errors", + Priority: prioHostNetworkErrors, + Dims: module.Dims{ + {ID: "%s_net.errorsRx.summation", Name: "received"}, + {ID: "%s_net.errorsTx.summation", Name: "sent", Mul: -1}, + }, + } + + hostOverallStatusChartTmpl = module.Chart{ + ID: "%s_overall_status", + Title: "ESXi Host overall alarm status", + Units: "status", + Fam: "hosts status", + Ctx: "vsphere.host_overall_status", + Priority: prioHostOverallStatus, + Dims: module.Dims{ + {ID: "%s_overall.status.green", Name: "green"}, + {ID: "%s_overall.status.red", Name: "red"}, + {ID: "%s_overall.status.yellow", Name: "yellow"}, + {ID: "%s_overall.status.gray", Name: "gray"}, + }, + } + hostSystemUptimeChartTmpl = module.Chart{ + ID: "%s_system_uptime", + Title: "ESXi Host system uptime", + Units: "seconds", + Fam: "hosts uptime", + Ctx: "vsphere.host_system_uptime", + Priority: prioHostSystemUptime, + Dims: module.Dims{ + {ID: "%s_sys.uptime.latest", Name: "uptime"}, + }, + } +) + +const failedUpdatesLimit = 10 + +func (vs *VSphere) updateCharts() { + for id, fails := range vs.discoveredHosts { + if fails >= failedUpdatesLimit { + vs.removeFromCharts(id) + delete(vs.charted, id) + delete(vs.discoveredHosts, id) + continue + } + + host := vs.resources.Hosts.Get(id) + if host == nil || vs.charted[id] || fails != 0 { + continue + } + + vs.charted[id] = true + charts := newHostCharts(host) + if err := vs.Charts().Add(*charts...); err != nil { + vs.Error(err) + } + } + + for id, fails := range vs.discoveredVMs { + if fails >= failedUpdatesLimit { + vs.removeFromCharts(id) + delete(vs.charted, id) + delete(vs.discoveredVMs, id) + continue + } + + vm := vs.resources.VMs.Get(id) + if vm == nil || vs.charted[id] || fails != 0 { + continue + } + + vs.charted[id] = true + charts := newVMCHarts(vm) + if err := vs.Charts().Add(*charts...); err != nil { + vs.Error(err) + } + } +} + +func newVMCHarts(vm *rs.VM) *module.Charts { + charts := vmChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, vm.ID) + chart.Labels = []module.Label{ + {Key: "datacenter", Value: vm.Hier.DC.Name}, + {Key: "cluster", Value: getVMClusterName(vm)}, + {Key: "host", Value: vm.Hier.Host.Name}, + {Key: "vm", Value: vm.Name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, vm.ID) + } + } + + return charts +} + +func getVMClusterName(vm *rs.VM) string { + if vm.Hier.Cluster.Name == vm.Hier.Host.Name { + return "" + } + return vm.Hier.Cluster.Name +} + +func newHostCharts(host *rs.Host) *module.Charts { + charts := hostChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, host.ID) + chart.Labels = []module.Label{ + {Key: "datacenter", Value: host.Hier.DC.Name}, + {Key: "cluster", Value: getHostClusterName(host)}, + {Key: "host", Value: host.Name}, + } + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, host.ID) + } + } + + return charts +} + +func getHostClusterName(host *rs.Host) string { + if host.Hier.Cluster.Name == host.Name { + return "" + } + return host.Hier.Cluster.Name +} + +func (vs *VSphere) removeFromCharts(prefix string) { + for _, c := range *vs.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +//func findMetricSeriesByPrefix(ms []performance.MetricSeries, prefix string) []performance.MetricSeries { +// from := sort.Search(len(ms), func(i int) bool { return ms[i].Name >= prefix }) +// +// if from == len(ms) || !strings.HasPrefix(ms[from].Name, prefix) { +// return nil +// } +// +// until := from + 1 +// for until < len(ms) && strings.HasPrefix(ms[until].Name, prefix) { +// until++ +// } +// return ms[from:until] +//} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/client/client.go b/src/go/collectors/go.d.plugin/modules/vsphere/client/client.go new file mode 100644 index 00000000000000..40ed351c0bbdc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/client/client.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + datacenter = "Datacenter" + folder = "Folder" + computeResource = "ComputeResource" + hostSystem = "HostSystem" + virtualMachine = "VirtualMachine" + + maxIdleConnections = 32 +) + +type Config struct { + URL string + User string + Password string + tlscfg.TLSConfig + Timeout time.Duration +} + +type Client struct { + client *govmomi.Client + root *view.ContainerView + perf *performance.Manager +} + +func newSoapClient(config Config) (*soap.Client, error) { + soapURL, err := soap.ParseURL(config.URL) + if err != nil || soapURL == nil { + return nil, err + } + soapURL.User = url.UserPassword(config.User, config.Password) + soapClient := soap.NewClient(soapURL, config.TLSConfig.InsecureSkipVerify) + + tlsConfig, err := tlscfg.NewTLSConfig(config.TLSConfig) + if err != nil { + return nil, err + } + if tlsConfig != nil && len(tlsConfig.Certificates) > 0 { + soapClient.SetCertificate(tlsConfig.Certificates[0]) + } + if config.TLSConfig.TLSCA != "" { + if err := soapClient.SetRootCAs(config.TLSConfig.TLSCA); err != nil { + return nil, err + } + } + + if t, ok := soapClient.Transport.(*http.Transport); ok { + t.MaxIdleConnsPerHost = maxIdleConnections + t.TLSHandshakeTimeout = config.Timeout + } + soapClient.Timeout = config.Timeout + + return soapClient, nil +} + +func newContainerView(ctx context.Context, client *govmomi.Client) (*view.ContainerView, error) { + viewManager := view.NewManager(client.Client) + return viewManager.CreateContainerView(ctx, client.ServiceContent.RootFolder, []string{}, true) +} + +func newPerformanceManager(client *vim25.Client) *performance.Manager { + perfManager := performance.NewManager(client) + perfManager.Sort = true + return perfManager +} + +func New(config Config) (*Client, error) { + ctx := context.Background() + soapClient, err := newSoapClient(config) + if err != nil { + return nil, err + } + + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + + vmomiClient := &govmomi.Client{ + Client: vimClient, + SessionManager: session.NewManager(vimClient), + } + + userInfo := url.UserPassword(config.User, config.Password) + addKeepAlive(vmomiClient, userInfo) + + err = vmomiClient.Login(ctx, userInfo) + if err != nil { + return nil, err + } + + containerView, err := newContainerView(ctx, vmomiClient) + if err != nil { + return nil, err + } + + perfManager := newPerformanceManager(vimClient) + + client := &Client{ + client: vmomiClient, + perf: perfManager, + root: containerView, + } + + return client, nil +} + +func (c *Client) IsSessionActive() (bool, error) { + return c.client.SessionManager.SessionIsActive(context.Background()) +} + +func (c *Client) Version() string { + return c.client.ServiceContent.About.Version +} + +func (c *Client) Login(userinfo *url.Userinfo) error { + return c.client.Login(context.Background(), userinfo) +} + +func (c *Client) Logout() error { + return c.client.Logout(context.Background()) +} + +func (c *Client) PerformanceMetrics(pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) { + metrics, err := c.perf.Query(context.Background(), pqs) + if err != nil { + return nil, err + } + return c.perf.ToMetricSeries(context.Background(), metrics) +} + +func (c *Client) Datacenters(pathSet ...string) (dcs []mo.Datacenter, err error) { + err = c.root.Retrieve(context.Background(), []string{datacenter}, pathSet, &dcs) + return +} + +func (c *Client) Folders(pathSet ...string) (folders []mo.Folder, err error) { + err = c.root.Retrieve(context.Background(), []string{folder}, pathSet, &folders) + return +} + +func (c *Client) ComputeResources(pathSet ...string) (computes []mo.ComputeResource, err error) { + err = c.root.Retrieve(context.Background(), []string{computeResource}, pathSet, &computes) + return +} + +func (c *Client) Hosts(pathSet ...string) (hosts []mo.HostSystem, err error) { + err = c.root.Retrieve(context.Background(), []string{hostSystem}, pathSet, &hosts) + return +} + +func (c *Client) VirtualMachines(pathSet ...string) (vms []mo.VirtualMachine, err error) { + err = c.root.Retrieve(context.Background(), []string{virtualMachine}, pathSet, &vms) + return +} + +func (c *Client) CounterInfoByName() (map[string]*types.PerfCounterInfo, error) { + return c.perf.CounterInfoByName(context.Background()) +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/client/client_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/client/client_test.go new file mode 100644 index 00000000000000..5624f5c528b7eb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/client/client_test.go @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "crypto/tls" + "net/url" + "testing" + "time" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +func TestNew(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + v, err := client.IsSessionActive() + assert.NoError(t, err) + assert.True(t, v) +} + +func TestClient_Version(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + assert.NotEmpty(t, client.Version()) +} + +func TestClient_CounterInfoByName(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + v, err := client.CounterInfoByName() + assert.NoError(t, err) + assert.IsType(t, map[string]*types.PerfCounterInfo{}, v) + assert.NotEmpty(t, v) +} + +func TestClient_IsSessionActive(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + v, err := client.IsSessionActive() + assert.NoError(t, err) + assert.True(t, v) +} + +func TestClient_Login(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + assert.NoError(t, client.Logout()) + + err := client.Login(url.UserPassword("admin", "password")) + assert.NoError(t, err) + + ok, err := client.IsSessionActive() + assert.NoError(t, err) + assert.True(t, ok) +} + +func TestClient_Logout(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + assert.NoError(t, client.Logout()) + + v, err := client.IsSessionActive() + assert.NoError(t, err) + assert.False(t, v) +} + +func TestClient_Datacenters(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + dcs, err := client.Datacenters() + assert.NoError(t, err) + assert.NotEmpty(t, dcs) +} + +func TestClient_Folders(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + folders, err := client.Folders() + assert.NoError(t, err) + assert.NotEmpty(t, folders) +} + +func TestClient_ComputeResources(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + computes, err := client.ComputeResources() + assert.NoError(t, err) + assert.NotEmpty(t, computes) +} + +func TestClient_Hosts(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + hosts, err := client.Hosts() + assert.NoError(t, err) + assert.NotEmpty(t, hosts) +} + +func TestClient_VirtualMachines(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + vms, err := client.VirtualMachines() + assert.NoError(t, err) + assert.NotEmpty(t, vms) +} + +func TestClient_PerformanceMetrics(t *testing.T) { + client, teardown := prepareClient(t) + defer teardown() + + hosts, err := client.Hosts() + require.NoError(t, err) + metrics, err := client.PerformanceMetrics(hostsPerfQuerySpecs(hosts)) + require.NoError(t, err) + assert.True(t, len(metrics) > 0) +} + +func prepareClient(t *testing.T) (client *Client, teardown func()) { + model, srv := createSim(t) + teardown = func() { model.Remove(); srv.Close() } + return newClient(t, srv.URL), teardown +} + +func newClient(t *testing.T, vCenterURL *url.URL) *Client { + client, err := New(Config{ + URL: vCenterURL.String(), + User: "admin", + Password: "password", + Timeout: time.Second * 3, + TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true}, + }) + require.NoError(t, err) + return client +} + +func createSim(t *testing.T) (*simulator.Model, *simulator.Server) { + model := simulator.VPX() + err := model.Create() + require.NoError(t, err) + model.Service.TLS = new(tls.Config) + return model, model.Service.NewServer() +} + +func hostsPerfQuerySpecs(hosts []mo.HostSystem) []types.PerfQuerySpec { + var pqs []types.PerfQuerySpec + for _, host := range hosts { + pq := types.PerfQuerySpec{ + Entity: host.Reference(), + MaxSample: 1, + MetricId: []types.PerfMetricId{{CounterId: 32, Instance: ""}}, + IntervalId: 20, + Format: "normal", + } + pqs = append(pqs, pq) + } + return pqs +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/client/keepalive.go b/src/go/collectors/go.d.plugin/modules/vsphere/client/keepalive.go new file mode 100644 index 00000000000000..0ce1ef5c063b6f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/client/keepalive.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package client + +import ( + "context" + "net/url" + "time" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + keepAliveEvery = time.Second * 15 +) + +// TODO: survive vCenter reboot, it looks like we need to re New() +func addKeepAlive(client *govmomi.Client, userinfo *url.Userinfo) { + f := func(rt soap.RoundTripper) error { + _, err := methods.GetCurrentTime(context.Background(), rt) + if err == nil { + return nil + } + + if !isNotAuthenticated(err) { + return nil + } + + _ = client.Login(context.Background(), userinfo) + return nil + } + client.Client.RoundTripper = session.KeepAliveHandler(client.Client.RoundTripper, keepAliveEvery, f) +} + +func isNotAuthenticated(err error) bool { + if !soap.IsSoapFault(err) { + return false + } + _, ok := soap.ToSoapFault(err).VimFault().(*types.NotAuthenticated) + return ok +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/collect.go b/src/go/collectors/go.d.plugin/modules/vsphere/collect.go new file mode 100644 index 00000000000000..449f513219ae68 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/collect.go @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + "errors" + "fmt" + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/vmware/govmomi/performance" +) + +// ManagedEntityStatus +var overallStatuses = []string{"green", "red", "yellow", "gray"} + +func (vs *VSphere) collect() (map[string]int64, error) { + vs.collectionLock.Lock() + defer vs.collectionLock.Unlock() + + vs.Debug("starting collection process") + t := time.Now() + mx := make(map[string]int64) + + err := vs.collectHosts(mx) + if err != nil { + return nil, err + } + + err = vs.collectVMs(mx) + if err != nil { + return nil, err + } + + vs.updateCharts() + + vs.Debugf("metrics collected, process took %s", time.Since(t)) + + return mx, nil +} + +func (vs *VSphere) collectHosts(mx map[string]int64) error { + if len(vs.resources.Hosts) == 0 { + return nil + } + // NOTE: returns unsorted if at least one types.PerfMetricId Instance is not "" + metrics := vs.ScrapeHosts(vs.resources.Hosts) + if len(metrics) == 0 { + return errors.New("failed to scrape hosts metrics") + } + + vs.collectHostsMetrics(mx, metrics) + + return nil +} + +func (vs *VSphere) collectHostsMetrics(mx map[string]int64, metrics []performance.EntityMetric) { + for k := range vs.discoveredHosts { + vs.discoveredHosts[k]++ + } + + for _, metric := range metrics { + if host := vs.resources.Hosts.Get(metric.Entity.Value); host != nil { + vs.discoveredHosts[host.ID] = 0 + writeHostMetrics(mx, host, metric.Value) + } + } +} + +func writeHostMetrics(mx map[string]int64, host *rs.Host, metrics []performance.MetricSeries) { + for _, metric := range metrics { + if len(metric.Value) == 0 || metric.Value[0] == -1 { + continue + } + key := fmt.Sprintf("%s_%s", host.ID, metric.Name) + mx[key] = metric.Value[0] + } + for _, v := range overallStatuses { + key := fmt.Sprintf("%s_overall.status.%s", host.ID, v) + mx[key] = boolToInt(host.OverallStatus == v) + } +} + +func (vs *VSphere) collectVMs(mx map[string]int64) error { + if len(vs.resources.VMs) == 0 { + return nil + } + // NOTE: returns unsorted if at least one types.PerfMetricId Instance is not "" + ems := vs.ScrapeVMs(vs.resources.VMs) + if len(ems) == 0 { + return errors.New("failed to scrape vms metrics") + } + + vs.collectVMsMetrics(mx, ems) + + return nil +} + +func (vs *VSphere) collectVMsMetrics(mx map[string]int64, metrics []performance.EntityMetric) { + for id := range vs.discoveredVMs { + vs.discoveredVMs[id]++ + } + + for _, metric := range metrics { + if vm := vs.resources.VMs.Get(metric.Entity.Value); vm != nil { + writeVMMetrics(mx, vm, metric.Value) + vs.discoveredVMs[vm.ID] = 0 + } + } +} + +func writeVMMetrics(mx map[string]int64, vm *rs.VM, metrics []performance.MetricSeries) { + for _, metric := range metrics { + if len(metric.Value) == 0 || metric.Value[0] == -1 { + continue + } + key := fmt.Sprintf("%s_%s", vm.ID, metric.Name) + mx[key] = metric.Value[0] + } + for _, v := range overallStatuses { + key := fmt.Sprintf("%s_overall.status.%s", vm.ID, v) + mx[key] = boolToInt(vm.OverallStatus == v) + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/config_schema.json b/src/go/collectors/go.d.plugin/modules/vsphere/config_schema.json new file mode 100644 index 00000000000000..68bd55e1eddbe1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/config_schema.json @@ -0,0 +1,77 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vsphere job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "discovery_interval": { + "type": [ + "string", + "integer" + ] + }, + "host_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "vm_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover.go new file mode 100644 index 00000000000000..65555a73b45f11 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +func (vs *VSphere) goDiscovery() { + if vs.discoveryTask != nil { + vs.discoveryTask.stop() + } + vs.Infof("starting discovery process, will do discovery every %s", vs.DiscoveryInterval) + + job := func() { + err := vs.discoverOnce() + if err != nil { + vs.Errorf("error on discovering : %v", err) + } + } + vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration) +} + +func (vs *VSphere) discoverOnce() error { + res, err := vs.Discover() + if err != nil { + return err + } + + vs.collectionLock.Lock() + vs.resources = res + vs.collectionLock.Unlock() + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/build.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/build.go new file mode 100644 index 00000000000000..745bff60bc33b1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/build.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/vmware/govmomi/vim25/mo" +) + +func (d Discoverer) build(raw *resources) *rs.Resources { + d.Debug("discovering : building : starting building resources process") + t := time.Now() + + var res rs.Resources + res.DataCenters = d.buildDatacenters(raw.dcs) + res.Folders = d.buildFolders(raw.folders) + res.Clusters = d.buildClusters(raw.clusters) + fixClustersParentID(&res) + res.Hosts = d.buildHosts(raw.hosts) + res.VMs = d.buildVMs(raw.vms) + + d.Infof("discovering : building : built %d/%d dcs, %d/%d folders, %d/%d clusters, %d/%d hosts, %d/%d vms, process took %s", + len(res.DataCenters), + len(raw.dcs), + len(res.Folders), + len(raw.folders), + len(res.Clusters), + len(raw.clusters), + len(res.Hosts), + len(raw.hosts), + len(res.VMs), + len(raw.vms), + time.Since(t), + ) + return &res +} + +// cluster parent is folder by default +// should be called after buildDatacenters, buildFolders and buildClusters +func fixClustersParentID(res *rs.Resources) { + for _, c := range res.Clusters { + c.ParentID = findClusterDcID(c.ParentID, res.Folders) + } +} + +func findClusterDcID(parentID string, folders rs.Folders) string { + f := folders.Get(parentID) + if f == nil { + return parentID + } + return findClusterDcID(f.ParentID, folders) +} + +func (Discoverer) buildDatacenters(raw []mo.Datacenter) rs.DataCenters { + dcs := make(rs.DataCenters) + for _, d := range raw { + dcs.Put(newDC(d)) + } + return dcs +} + +func newDC(raw mo.Datacenter) *rs.Datacenter { + // Datacenter1 datacenter-2 group-h4 group-v3 + return &rs.Datacenter{ + Name: raw.Name, + ID: raw.Reference().Value, + } +} + +func (Discoverer) buildFolders(raw []mo.Folder) rs.Folders { + fs := make(rs.Folders) + for _, d := range raw { + fs.Put(newFolder(d)) + } + return fs +} + +func newFolder(raw mo.Folder) *rs.Folder { + // vm group-v55 datacenter-54 + // host group-h56 datacenter-54 + // datastore group-s57 datacenter-54 + // network group-n58 datacenter-54 + return &rs.Folder{ + Name: raw.Name, + ID: raw.Reference().Value, + ParentID: raw.Parent.Value, + } +} + +func (Discoverer) buildClusters(raw []mo.ComputeResource) rs.Clusters { + clusters := make(rs.Clusters) + for _, c := range raw { + clusters.Put(newCluster(c)) + } + return clusters +} + +func newCluster(raw mo.ComputeResource) *rs.Cluster { + // s - dummy cluster, c - created by user cluster + // 192.168.0.201 domain-s61 group-h4 + // New Cluster1 domain-c52 group-h67 + return &rs.Cluster{ + Name: raw.Name, + ID: raw.Reference().Value, + ParentID: raw.Parent.Value, + } +} + +const ( + poweredOn = "poweredOn" +) + +func (d Discoverer) buildHosts(raw []mo.HostSystem) rs.Hosts { + var num int + hosts := make(rs.Hosts) + for _, h := range raw { + // poweredOn | poweredOff | standBy | unknown + if h.Runtime.PowerState != poweredOn { + num++ + continue + } + // connected | notResponding | disconnected + //if v.Runtime.ConnectionState == "" { + // + //} + hosts.Put(newHost(h)) + } + if num > 0 { + d.Infof("discovering : building : removed %d hosts (not powered on)", num) + } + return hosts +} + +func newHost(raw mo.HostSystem) *rs.Host { + // 192.168.0.201 host-22 domain-s61 + // 192.168.0.202 host-28 domain-c52 + // 192.168.0.203 host-33 domain-c52 + return &rs.Host{ + Name: raw.Name, + ID: raw.Reference().Value, + ParentID: raw.Parent.Value, + OverallStatus: string(raw.Summary.OverallStatus), + Ref: raw.Reference(), + } +} + +func (d Discoverer) buildVMs(raw []mo.VirtualMachine) rs.VMs { + var num int + vms := make(rs.VMs) + for _, v := range raw { + // poweredOff | poweredOn | suspended + if v.Runtime.PowerState != poweredOn { + num++ + continue + } + // connected | disconnected | orphaned | inaccessible | invalid + //if v.Runtime.ConnectionState == "" { + // + //} + vms.Put(newVM(v)) + } + if num > 0 { + d.Infof("discovering : building : removed %d vms (not powered on)", num) + } + return vms +} + +func newVM(raw mo.VirtualMachine) *rs.VM { + // deb91 vm-25 group-v3 host-22 + return &rs.VM{ + Name: raw.Name, + ID: raw.Reference().Value, + ParentID: raw.Runtime.Host.Value, + OverallStatus: string(raw.Summary.OverallStatus), + Ref: raw.Reference(), + } +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover.go new file mode 100644 index 00000000000000..cba4101d8f2a39 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover.go @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "fmt" + "strings" + "time" + + "github.com/netdata/go.d.plugin/modules/vsphere/match" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/netdata/go.d.plugin/logger" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type Client interface { + Datacenters(pathSet ...string) ([]mo.Datacenter, error) + Folders(pathSet ...string) ([]mo.Folder, error) + ComputeResources(pathSet ...string) ([]mo.ComputeResource, error) + Hosts(pathSet ...string) ([]mo.HostSystem, error) + VirtualMachines(pathSet ...string) ([]mo.VirtualMachine, error) + + CounterInfoByName() (map[string]*types.PerfCounterInfo, error) +} + +func New(client Client) *Discoverer { + return &Discoverer{ + Client: client, + } +} + +type Discoverer struct { + *logger.Logger + Client + match.HostMatcher + match.VMMatcher +} + +type resources struct { + dcs []mo.Datacenter + folders []mo.Folder + clusters []mo.ComputeResource + hosts []mo.HostSystem + vms []mo.VirtualMachine +} + +func (d Discoverer) Discover() (*rs.Resources, error) { + startTime := time.Now() + raw, err := d.discover() + if err != nil { + return nil, fmt.Errorf("discovering resources : %v", err) + } + + res := d.build(raw) + + err = d.setHierarchy(res) + if err != nil { + // TODO: handle objects w/o hier? + d.Error(err) + } + + numH := len(res.Hosts) + numV := len(res.VMs) + removed := d.removeUnmatched(res) + if removed == (numH + numV) { + return nil, fmt.Errorf("all resoursces were filtered (%d hosts, %d vms)", numH, numV) + } + + err = d.collectMetricLists(res) + if err != nil { + return nil, fmt.Errorf("collecting metric lists : %v", err) + } + + d.Infof("discovering : discovered %d/%d hosts, %d/%d vms, the whole process took %s", + len(res.Hosts), + len(raw.hosts), + len(res.VMs), + len(raw.vms), + time.Since(startTime)) + + return res, nil +} + +var ( + // properties to set + datacenterPathSet = []string{"name", "parent"} + folderPathSet = []string{"name", "parent"} + clusterPathSet = []string{"name", "parent"} + hostPathSet = []string{"name", "parent", "runtime.powerState", "summary.overallStatus"} + vmPathSet = []string{"name", "runtime.host", "runtime.powerState", "summary.overallStatus"} +) + +func (d Discoverer) discover() (*resources, error) { + d.Debug("discovering : starting resource discovering process") + + start := time.Now() + t := start + datacenters, err := d.Datacenters(datacenterPathSet...) + if err != nil { + return nil, err + } + d.Debugf("discovering : found %d dcs, process took %s", len(datacenters), time.Since(t)) + + t = time.Now() + folders, err := d.Folders(folderPathSet...) + if err != nil { + return nil, err + } + d.Debugf("discovering : found %d folders, process took %s", len(folders), time.Since(t)) + + t = time.Now() + clusters, err := d.ComputeResources(clusterPathSet...) + if err != nil { + return nil, err + } + d.Debugf("discovering : found %d clusters, process took %s", len(clusters), time.Since(t)) + + t = time.Now() + hosts, err := d.Hosts(hostPathSet...) + if err != nil { + return nil, err + } + d.Debugf("discovering : found %d hosts, process took %s", len(hosts), time.Since(t)) + + t = time.Now() + vms, err := d.VirtualMachines(vmPathSet...) + if err != nil { + return nil, err + } + d.Debugf("discovering : found %d vms, process took %s", len(hosts), time.Since(t)) + + raw := resources{ + dcs: datacenters, + folders: folders, + clusters: clusters, + hosts: hosts, + vms: vms, + } + + d.Infof("discovering : found %d dcs, %d folders, %d clusters (%d dummy), %d hosts, %d vms, process took %s", + len(raw.dcs), + len(raw.folders), + len(clusters), + numOfDummyClusters(clusters), + len(raw.hosts), + len(raw.vms), + time.Since(start), + ) + + return &raw, nil +} + +func numOfDummyClusters(clusters []mo.ComputeResource) (num int) { + for _, c := range clusters { + // domain-s61 | domain-c52 + if strings.HasPrefix(c.Reference().Value, "domain-s") { + num++ + } + } + return num +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover_test.go new file mode 100644 index 00000000000000..b113ab3a622491 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/discover_test.go @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "crypto/tls" + "net/url" + "testing" + "time" + + "github.com/netdata/go.d.plugin/modules/vsphere/client" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator" +) + +func TestDiscoverer_Discover(t *testing.T) { + d, _, teardown := prepareDiscovererSim(t) + defer teardown() + + res, err := d.Discover() + + require.NoError(t, err) + assert.True(t, len(res.DataCenters) > 0) + assert.True(t, len(res.Folders) > 0) + assert.True(t, len(res.Clusters) > 0) + assert.True(t, len(res.Hosts) > 0) + assert.True(t, len(res.VMs) > 0) + assert.True(t, isHierarchySet(res)) + assert.True(t, isMetricListsCollected(res)) +} + +func TestDiscoverer_discover(t *testing.T) { + d, model, teardown := prepareDiscovererSim(t) + defer teardown() + + raw, err := d.discover() + + require.NoError(t, err) + count := model.Count() + assert.Lenf(t, raw.dcs, count.Datacenter, "datacenters") + assert.Lenf(t, raw.folders, count.Folder-1, "folders") // minus root folder + dummyClusters := model.Host * count.Datacenter + assert.Lenf(t, raw.clusters, count.Cluster+dummyClusters, "clusters") + assert.Lenf(t, raw.hosts, count.Host, "hosts") + assert.Lenf(t, raw.vms, count.Machine, "hosts") +} + +func TestDiscoverer_build(t *testing.T) { + d, _, teardown := prepareDiscovererSim(t) + defer teardown() + + raw, err := d.discover() + require.NoError(t, err) + + res := d.build(raw) + + assert.Lenf(t, res.DataCenters, len(raw.dcs), "datacenters") + assert.Lenf(t, res.Folders, len(raw.folders), "folders") + assert.Lenf(t, res.Clusters, len(raw.clusters), "clusters") + assert.Lenf(t, res.Hosts, len(raw.hosts), "hosts") + assert.Lenf(t, res.VMs, len(raw.vms), "hosts") +} + +func TestDiscoverer_setHierarchy(t *testing.T) { + d, _, teardown := prepareDiscovererSim(t) + defer teardown() + + raw, err := d.discover() + require.NoError(t, err) + res := d.build(raw) + + err = d.setHierarchy(res) + + require.NoError(t, err) + assert.True(t, isHierarchySet(res)) +} + +func TestDiscoverer_removeUnmatched(t *testing.T) { + d, _, teardown := prepareDiscovererSim(t) + defer teardown() + + d.HostMatcher = falseHostMatcher{} + d.VMMatcher = falseVMMatcher{} + raw, err := d.discover() + require.NoError(t, err) + res := d.build(raw) + + numVMs, numHosts := len(res.VMs), len(res.Hosts) + assert.Equal(t, numVMs+numHosts, d.removeUnmatched(res)) + assert.Lenf(t, res.Hosts, 0, "hosts") + assert.Lenf(t, res.VMs, 0, "vms") +} + +func TestDiscoverer_collectMetricLists(t *testing.T) { + d, _, teardown := prepareDiscovererSim(t) + defer teardown() + + raw, err := d.discover() + require.NoError(t, err) + + res := d.build(raw) + err = d.collectMetricLists(res) + + require.NoError(t, err) + assert.True(t, isMetricListsCollected(res)) +} + +func prepareDiscovererSim(t *testing.T) (d *Discoverer, model *simulator.Model, teardown func()) { + model, srv := createSim(t) + teardown = func() { model.Remove(); srv.Close() } + c := newClient(t, srv.URL) + + return New(c), model, teardown +} + +func newClient(t *testing.T, vCenterURL *url.URL) *client.Client { + c, err := client.New(client.Config{ + URL: vCenterURL.String(), + User: "admin", + Password: "password", + Timeout: time.Second * 3, + TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true}, + }) + require.NoError(t, err) + return c +} + +func createSim(t *testing.T) (*simulator.Model, *simulator.Server) { + model := simulator.VPX() + err := model.Create() + require.NoError(t, err) + model.Service.TLS = new(tls.Config) + return model, model.Service.NewServer() +} + +func isHierarchySet(res *rs.Resources) bool { + for _, c := range res.Clusters { + if !c.Hier.IsSet() { + return false + } + } + for _, h := range res.Hosts { + if !h.Hier.IsSet() { + return false + } + } + for _, v := range res.VMs { + if !v.Hier.IsSet() { + return false + } + } + return true +} + +func isMetricListsCollected(res *rs.Resources) bool { + for _, h := range res.Hosts { + if h.MetricList == nil { + return false + } + } + for _, v := range res.VMs { + if v.MetricList == nil { + return false + } + } + return true +} + +type falseHostMatcher struct{} + +func (falseHostMatcher) Match(*rs.Host) bool { return false } + +type falseVMMatcher struct{} + +func (falseVMMatcher) Match(*rs.VM) bool { return false } diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/filter.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/filter.go new file mode 100644 index 00000000000000..9b015b2a526f35 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/filter.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" +) + +func (d Discoverer) matchHost(host *rs.Host) bool { + if d.HostMatcher == nil { + return true + } + return d.HostMatcher.Match(host) +} + +func (d Discoverer) matchVM(vm *rs.VM) bool { + if d.VMMatcher == nil { + return true + } + return d.VMMatcher.Match(vm) +} + +func (d Discoverer) removeUnmatched(res *rs.Resources) (removed int) { + d.Debug("discovering : filtering : starting filtering resources process") + t := time.Now() + numH, numV := len(res.Hosts), len(res.VMs) + removed += d.removeUnmatchedHosts(res.Hosts) + removed += d.removeUnmatchedVMs(res.VMs) + d.Infof("discovering : filtering : filtered %d/%d hosts, %d/%d vms, process took %s", + numH-len(res.Hosts), + numH, + numV-len(res.VMs), + numV, + time.Since(t)) + return +} + +func (d Discoverer) removeUnmatchedHosts(hosts rs.Hosts) (removed int) { + for _, v := range hosts { + if !d.matchHost(v) { + removed++ + hosts.Remove(v.ID) + } + } + d.Debugf("discovering : filtering : removed %d unmatched hosts", removed) + return removed +} + +func (d Discoverer) removeUnmatchedVMs(vms rs.VMs) (removed int) { + for _, v := range vms { + if !d.matchVM(v) { + removed++ + vms.Remove(v.ID) + } + } + d.Debugf("discovering : filtering : removed %d unmatched vms", removed) + return removed +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/hierarchy.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/hierarchy.go new file mode 100644 index 00000000000000..e8d71467d009fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/hierarchy.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" +) + +func (d Discoverer) setHierarchy(res *rs.Resources) error { + d.Debug("discovering : hierarchy : start setting resources hierarchy process") + t := time.Now() + + c := d.setClustersHierarchy(res) + h := d.setHostsHierarchy(res) + v := d.setVMsHierarchy(res) + + // notSet := len(res.Clusters) + len(res.Hosts) + len(res.VMs) - (c + h + v) + d.Infof("discovering : hierarchy : set %d/%d clusters, %d/%d hosts, %d/%d vms, process took %s", + c, len(res.Clusters), + h, len(res.Hosts), + v, len(res.VMs), + time.Since(t), + ) + + return nil +} + +func (d Discoverer) setClustersHierarchy(res *rs.Resources) (set int) { + for _, cluster := range res.Clusters { + if setClusterHierarchy(cluster, res) { + set++ + } + } + return set +} + +func (d Discoverer) setHostsHierarchy(res *rs.Resources) (set int) { + for _, host := range res.Hosts { + if setHostHierarchy(host, res) { + set++ + } + } + return set +} + +func (d Discoverer) setVMsHierarchy(res *rs.Resources) (set int) { + for _, vm := range res.VMs { + if setVMHierarchy(vm, res) { + set++ + } + } + return set +} + +func setClusterHierarchy(cluster *rs.Cluster, res *rs.Resources) bool { + dc := res.DataCenters.Get(cluster.ParentID) + if dc == nil { + return false + } + cluster.Hier.DC.Set(dc.ID, dc.Name) + return cluster.Hier.IsSet() +} + +func setHostHierarchy(host *rs.Host, res *rs.Resources) bool { + cr := res.Clusters.Get(host.ParentID) + if cr == nil { + return false + } + host.Hier.Cluster.Set(cr.ID, cr.Name) + + dc := res.DataCenters.Get(cr.ParentID) + if dc == nil { + return false + } + host.Hier.DC.Set(dc.ID, dc.Name) + return host.Hier.IsSet() +} + +func setVMHierarchy(vm *rs.VM, res *rs.Resources) bool { + h := res.Hosts.Get(vm.ParentID) + if h == nil { + return false + } + vm.Hier.Host.Set(h.ID, h.Name) + + cr := res.Clusters.Get(h.ParentID) + if cr == nil { + return false + } + vm.Hier.Cluster.Set(cr.ID, cr.Name) + + dc := res.DataCenters.Get(cr.ParentID) + if dc == nil { + return false + } + vm.Hier.DC.Set(dc.ID, dc.Name) + return vm.Hier.IsSet() +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/discover/metric_lists.go b/src/go/collectors/go.d.plugin/modules/vsphere/discover/metric_lists.go new file mode 100644 index 00000000000000..f2102cf6a67470 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/discover/metric_lists.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package discover + +import ( + "sort" + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/vim25/types" +) + +func (d Discoverer) collectMetricLists(res *rs.Resources) error { + d.Debug("discovering : metric lists : starting resources metric lists collection process") + t := time.Now() + perfCounters, err := d.CounterInfoByName() + if err != nil { + return err + } + + hostML := simpleHostMetricList(perfCounters) + for _, h := range res.Hosts { + h.MetricList = hostML + } + vmML := simpleVMMetricList(perfCounters) + for _, v := range res.VMs { + v.MetricList = vmML + } + + d.Infof("discovering : metric lists : collected metric lists for %d/%d hosts, %d/%d vms, process took %s", + len(res.Hosts), + len(res.Hosts), + len(res.VMs), + len(res.VMs), + time.Since(t), + ) + + return nil +} + +func simpleHostMetricList(pci map[string]*types.PerfCounterInfo) performance.MetricList { + return simpleMetricList(hostMetrics, pci) +} + +func simpleVMMetricList(pci map[string]*types.PerfCounterInfo) performance.MetricList { + return simpleMetricList(vmMetrics, pci) +} + +func simpleMetricList(metrics []string, pci map[string]*types.PerfCounterInfo) performance.MetricList { + sort.Strings(metrics) + + var pml performance.MetricList + for _, v := range metrics { + m, ok := pci[v] + if !ok { + // TODO: should be logged + continue + } + // TODO: only summary metrics for now + // TODO: some metrics only appear if Instance is *, for example + // virtualDisk.totalWriteLatency.average.scsi0:0 + // virtualDisk.numberWriteAveraged.average.scsi0:0 + // virtualDisk.write.average.scsi0:0 + // virtualDisk.totalReadLatency.average.scsi0:0 + // virtualDisk.numberReadAveraged.average.scsi0:0 + // virtualDisk.read.average.scsi0:0 + // disk.numberReadAveraged.average + // disk.numberWriteAveraged.average + // TODO: metrics will be unsorted after if at least one Instance is * + pml = append(pml, types.PerfMetricId{CounterId: m.Key, Instance: ""}) + } + return pml +} + +var ( + vmMetrics = []string{ + "cpu.usage.average", + + "mem.usage.average", + "mem.granted.average", + "mem.consumed.average", + "mem.active.average", + "mem.shared.average", + // Refers to VMkernel swapping! + "mem.swapinRate.average", + "mem.swapoutRate.average", + "mem.swapped.average", + + "net.bytesRx.average", + "net.bytesTx.average", + "net.packetsRx.summation", + "net.packetsTx.summation", + "net.droppedRx.summation", + "net.droppedTx.summation", + + // the only summary disk metrics + "disk.read.average", + "disk.write.average", + "disk.maxTotalLatency.latest", + + "sys.uptime.latest", + } + + hostMetrics = []string{ + "cpu.usage.average", + + "mem.usage.average", + "mem.granted.average", + "mem.consumed.average", + "mem.active.average", + "mem.shared.average", + "mem.sharedcommon.average", + // Refers to VMkernel swapping! + "mem.swapinRate.average", + "mem.swapoutRate.average", + + "net.bytesRx.average", + "net.bytesTx.average", + "net.packetsRx.summation", + "net.packetsTx.summation", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + + // the only summary disk metrics + "disk.read.average", + "disk.write.average", + "disk.maxTotalLatency.latest", + + "sys.uptime.latest", + } +) diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/init.go b/src/go/collectors/go.d.plugin/modules/vsphere/init.go new file mode 100644 index 00000000000000..a0f9662203661d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/init.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + "errors" + + "github.com/netdata/go.d.plugin/modules/vsphere/client" + "github.com/netdata/go.d.plugin/modules/vsphere/discover" + "github.com/netdata/go.d.plugin/modules/vsphere/scrape" +) + +func (vs *VSphere) validateConfig() error { + const minRecommendedUpdateEvery = 20 + + if vs.URL == "" { + return errors.New("URL is not set") + } + if vs.Username == "" || vs.Password == "" { + return errors.New("username or password not set") + } + if vs.UpdateEvery < minRecommendedUpdateEvery { + vs.Warningf("update_every is to low, minimum recommended is %d", minRecommendedUpdateEvery) + } + return nil +} + +func (vs *VSphere) initClient() (*client.Client, error) { + config := client.Config{ + URL: vs.URL, + User: vs.Username, + Password: vs.Password, + Timeout: vs.Timeout.Duration, + TLSConfig: vs.Client.TLSConfig, + } + return client.New(config) +} + +func (vs *VSphere) initDiscoverer(c *client.Client) error { + d := discover.New(c) + d.Logger = vs.Logger + + hm, err := vs.HostsInclude.Parse() + if err != nil { + return err + } + if hm != nil { + d.HostMatcher = hm + } + vmm, err := vs.VMsInclude.Parse() + if err != nil { + return err + } + if vmm != nil { + d.VMMatcher = vmm + } + + vs.discoverer = d + return nil +} + +func (vs *VSphere) initScraper(c *client.Client) { + ms := scrape.New(c) + ms.Logger = vs.Logger + vs.scraper = ms +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/integrations/vmware_vcenter_server.md b/src/go/collectors/go.d.plugin/modules/vsphere/integrations/vmware_vcenter_server.md new file mode 100644 index 00000000000000..91740a61b607bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/integrations/vmware_vcenter_server.md @@ -0,0 +1,322 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/vsphere/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/vsphere/metadata.yaml" +sidebar_label: "VMware vCenter Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Containers and VMs" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# VMware vCenter Server + + +<img src="https://netdata.cloud/img/vmware.svg" width="150"/> + + +Plugin: go.d.plugin +Module: vsphere + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors hosts and vms performance statistics from `vCenter` servers. + +> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot. +> go.d.plugin needs to be restarted. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default `update_every` is 20 seconds, and it doesn't make sense to decrease the value. +**VMware real-time statistics are generated at the 20-second specificity**. + +It is likely that 20 seconds is not enough for big installations and the value should be tuned. + +To get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics. + +<details> +<summary>Example (all not related debug lines were removed)</summary> + +``` +[ilyam@pc]$ ./go.d.plugin -d -m vsphere +[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process +[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms +[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms +[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms +[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms +[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms +[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms +[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process +[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3µs +[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process +[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522µs +[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process +[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts +[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms +[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973µs +[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process +[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms +[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms +[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s +[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process +[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms +[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms +[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms +``` + +</details> + +There you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting. +`update_every` and `timeout` parameters should be adjusted based on these numbers. + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per virtual machine + +These metrics refer to the Virtual Machine. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| datacenter | Datacenter name | +| cluster | Cluster name | +| host | Host name | +| vm | Virtual Machine name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| vsphere.vm_cpu_utilization | used | percentage | +| vsphere.vm_mem_utilization | used | percentage | +| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB | +| vsphere.vm_mem_swap_usage | swapped | KiB | +| vsphere.vm_mem_swap_io | in, out | KiB/s | +| vsphere.vm_disk_io | read, write | KiB/s | +| vsphere.vm_disk_max_latency | latency | milliseconds | +| vsphere.vm_net_traffic | received, sent | KiB/s | +| vsphere.vm_net_packets | received, sent | packets | +| vsphere.vm_net_drops | received, sent | packets | +| vsphere.vm_overall_status | green, red, yellow, gray | status | +| vsphere.vm_system_uptime | uptime | seconds | + +### Per host + +These metrics refer to the ESXi host. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| datacenter | Datacenter name | +| cluster | Cluster name | +| host | Host name | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| vsphere.host_cpu_utilization | used | percentage | +| vsphere.host_mem_utilization | used | percentage | +| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB | +| vsphere.host_mem_swap_io | in, out | KiB/s | +| vsphere.host_disk_io | read, write | KiB/s | +| vsphere.host_disk_max_latency | latency | milliseconds | +| vsphere.host_net_traffic | received, sent | KiB/s | +| vsphere.host_net_packets | received, sent | packets | +| vsphere.host_net_drops | received, sent | packets | +| vsphere.host_net_errors | received, sent | errors | +| vsphere.host_overall_status | green, red, yellow, gray | status | +| vsphere.host_system_uptime | uptime | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization | +| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization | +| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization | +| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/vsphere.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/vsphere.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 20 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | vCenter server URL. | | yes | +| host_include | Hosts selector (filter). | | no | +| vm_include | Virtual machines selector (filter). | | no | +| discovery_interval | Hosts and VMs discovery interval. | 300 | no | +| timeout | HTTP request timeout. | 20 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +##### host_include + +Metrics of hosts matching the selector will be collected. + +- Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern". +- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). +- Syntax: + + ```yaml + host_include: + - '/DC1/*' # select all hosts from datacenter DC1 + - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2 + - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3 + ``` + + +##### vm_include + +Metrics of VMs matching the selector will be collected. + +- Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern/VM pattern". +- Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). +- Syntax: + + ```yaml + vm_include: + - '/DC1/*' # select all VMs from datacenter DC + - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2 + - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3 + ``` + + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name : vcenter1 + url : https://203.0.113.1 + username : admin@vsphere.local + password : somepassword + +``` +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name : vcenter1 + url : https://203.0.113.1 + username : admin@vsphere.local + password : somepassword + + - name : vcenter2 + url : https://203.0.113.10 + username : admin@vsphere.local + password : somepassword + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m vsphere + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/match/match.go b/src/go/collectors/go.d.plugin/modules/vsphere/match/match.go new file mode 100644 index 00000000000000..24f9b865db273c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/match/match.go @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package match + +import ( + "fmt" + "strings" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +type HostMatcher interface { + Match(*rs.Host) bool +} + +type VMMatcher interface { + Match(*rs.VM) bool +} + +type ( + hostDCMatcher struct{ m matcher.Matcher } + hostClusterMatcher struct{ m matcher.Matcher } + hostHostMatcher struct{ m matcher.Matcher } + vmDCMatcher struct{ m matcher.Matcher } + vmClusterMatcher struct{ m matcher.Matcher } + vmHostMatcher struct{ m matcher.Matcher } + vmVMMatcher struct{ m matcher.Matcher } + orHostMatcher struct{ lhs, rhs HostMatcher } + orVMMatcher struct{ lhs, rhs VMMatcher } + andHostMatcher struct{ lhs, rhs HostMatcher } + andVMMatcher struct{ lhs, rhs VMMatcher } +) + +func (m hostDCMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Hier.DC.Name) } +func (m hostClusterMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Hier.Cluster.Name) } +func (m hostHostMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Name) } +func (m vmDCMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.DC.Name) } +func (m vmClusterMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.Cluster.Name) } +func (m vmHostMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.Host.Name) } +func (m vmVMMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Name) } +func (m orHostMatcher) Match(host *rs.Host) bool { return m.lhs.Match(host) || m.rhs.Match(host) } +func (m orVMMatcher) Match(vm *rs.VM) bool { return m.lhs.Match(vm) || m.rhs.Match(vm) } +func (m andHostMatcher) Match(host *rs.Host) bool { return m.lhs.Match(host) && m.rhs.Match(host) } +func (m andVMMatcher) Match(vm *rs.VM) bool { return m.lhs.Match(vm) && m.rhs.Match(vm) } + +func newAndHostMatcher(lhs, rhs HostMatcher, others ...HostMatcher) andHostMatcher { + m := andHostMatcher{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newAndHostMatcher(m, others[0], others[1:]...) + } +} + +func newAndVMMatcher(lhs, rhs VMMatcher, others ...VMMatcher) andVMMatcher { + m := andVMMatcher{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newAndVMMatcher(m, others[0], others[1:]...) + } +} + +func newOrHostMatcher(lhs, rhs HostMatcher, others ...HostMatcher) orHostMatcher { + m := orHostMatcher{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newOrHostMatcher(m, others[0], others[1:]...) + } +} + +func newOrVMMatcher(lhs, rhs VMMatcher, others ...VMMatcher) orVMMatcher { + m := orVMMatcher{lhs: lhs, rhs: rhs} + switch len(others) { + case 0: + return m + default: + return newOrVMMatcher(m, others[0], others[1:]...) + } +} + +type ( + VMIncludes []string + HostIncludes []string +) + +func (vi VMIncludes) Parse() (VMMatcher, error) { + var ms []VMMatcher + for _, v := range vi { + m, err := parseVMInclude(v) + if err != nil { + return nil, err + } + if m == nil { + continue + } + ms = append(ms, m) + } + + switch len(ms) { + case 0: + return nil, nil + case 1: + return ms[0], nil + default: + return newOrVMMatcher(ms[0], ms[1], ms[2:]...), nil + } +} + +func (hi HostIncludes) Parse() (HostMatcher, error) { + var ms []HostMatcher + for _, v := range hi { + m, err := parseHostInclude(v) + if err != nil { + return nil, err + } + if m == nil { + continue + } + ms = append(ms, m) + } + + switch len(ms) { + case 0: + return nil, nil + case 1: + return ms[0], nil + default: + return newOrHostMatcher(ms[0], ms[1], ms[2:]...), nil + } +} + +const ( + datacenterIdx = iota + clusterIdx + hostIdx + vmIdx +) + +func cleanInclude(include string) string { + return strings.Trim(include, "/") +} + +func parseHostInclude(include string) (HostMatcher, error) { + if !isIncludeFormatValid(include) { + return nil, fmt.Errorf("bad include format: %s", include) + } + + include = cleanInclude(include) + parts := strings.Split(include, "/") // /dc/clusterIdx/hostIdx + var ms []HostMatcher + + for i, v := range parts { + m, err := parseSubInclude(v) + if err != nil { + return nil, err + } + switch i { + case datacenterIdx: + ms = append(ms, hostDCMatcher{m}) + case clusterIdx: + ms = append(ms, hostClusterMatcher{m}) + case hostIdx: + ms = append(ms, hostHostMatcher{m}) + } + } + + switch len(ms) { + case 0: + return nil, nil + case 1: + return ms[0], nil + default: + return newAndHostMatcher(ms[0], ms[1], ms[2:]...), nil + } +} + +func parseVMInclude(include string) (VMMatcher, error) { + if !isIncludeFormatValid(include) { + return nil, fmt.Errorf("bad include format: %s", include) + } + + include = cleanInclude(include) + parts := strings.Split(include, "/") // /dc/clusterIdx/hostIdx/vmIdx + var ms []VMMatcher + + for i, v := range parts { + m, err := parseSubInclude(v) + if err != nil { + return nil, err + } + switch i { + case datacenterIdx: + ms = append(ms, vmDCMatcher{m}) + case clusterIdx: + ms = append(ms, vmClusterMatcher{m}) + case hostIdx: + ms = append(ms, vmHostMatcher{m}) + case vmIdx: + ms = append(ms, vmVMMatcher{m}) + } + } + + switch len(ms) { + case 0: + return nil, nil + case 1: + return ms[0], nil + default: + return newAndVMMatcher(ms[0], ms[1], ms[2:]...), nil + } +} + +func parseSubInclude(sub string) (matcher.Matcher, error) { + sub = strings.TrimSpace(sub) + if sub == "" || sub == "!*" { + return matcher.FALSE(), nil + } + if sub == "*" { + return matcher.TRUE(), nil + } + return matcher.NewSimplePatternsMatcher(sub) +} + +func isIncludeFormatValid(line string) bool { + return strings.HasPrefix(line, "/") +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/match/match_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/match/match_test.go new file mode 100644 index 00000000000000..df353c1715f3f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/match/match_test.go @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package match + +import ( + "strings" + "testing" + + "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/matcher" + + "github.com/stretchr/testify/assert" +) + +var ( + trueHostDC = hostDCMatcher{matcher.TRUE()} + falseHostDC = hostDCMatcher{matcher.FALSE()} + trueVMDC = vmDCMatcher{matcher.TRUE()} + falseVMDC = vmDCMatcher{matcher.FALSE()} +) + +func TestOrHostMatcher_Match(t *testing.T) { + tests := map[string]struct { + expected bool + lhs HostMatcher + rhs HostMatcher + }{ + "true, true": {expected: true, lhs: trueHostDC, rhs: trueHostDC}, + "true, false": {expected: true, lhs: trueHostDC, rhs: falseHostDC}, + "false, true": {expected: true, lhs: falseHostDC, rhs: trueHostDC}, + "false, false": {expected: false, lhs: falseHostDC, rhs: falseHostDC}, + } + + var host resources.Host + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := newOrHostMatcher(test.lhs, test.rhs) + assert.Equal(t, test.expected, m.Match(&host)) + }) + } +} + +func TestAndHostMatcher_Match(t *testing.T) { + tests := map[string]struct { + expected bool + lhs HostMatcher + rhs HostMatcher + }{ + "true, true": {expected: true, lhs: trueHostDC, rhs: trueHostDC}, + "true, false": {expected: false, lhs: trueHostDC, rhs: falseHostDC}, + "false, true": {expected: false, lhs: falseHostDC, rhs: trueHostDC}, + "false, false": {expected: false, lhs: falseHostDC, rhs: falseHostDC}, + } + + var host resources.Host + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := newAndHostMatcher(test.lhs, test.rhs) + assert.Equal(t, test.expected, m.Match(&host)) + }) + } +} + +func TestOrVMMatcher_Match(t *testing.T) { + tests := map[string]struct { + expected bool + lhs VMMatcher + rhs VMMatcher + }{ + "true, true": {expected: true, lhs: trueVMDC, rhs: trueVMDC}, + "true, false": {expected: true, lhs: trueVMDC, rhs: falseVMDC}, + "false, true": {expected: true, lhs: falseVMDC, rhs: trueVMDC}, + "false, false": {expected: false, lhs: falseVMDC, rhs: falseVMDC}, + } + + var vm resources.VM + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := newOrVMMatcher(test.lhs, test.rhs) + assert.Equal(t, test.expected, m.Match(&vm)) + }) + } +} + +func TestAndVMMatcher_Match(t *testing.T) { + tests := map[string]struct { + expected bool + lhs VMMatcher + rhs VMMatcher + }{ + "true, true": {expected: true, lhs: trueVMDC, rhs: trueVMDC}, + "true, false": {expected: false, lhs: trueVMDC, rhs: falseVMDC}, + "false, true": {expected: false, lhs: falseVMDC, rhs: trueVMDC}, + "false, false": {expected: false, lhs: falseVMDC, rhs: falseVMDC}, + } + + var vm resources.VM + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m := newAndVMMatcher(test.lhs, test.rhs) + assert.Equal(t, test.expected, m.Match(&vm)) + }) + } +} + +func TestHostIncludes_Parse(t *testing.T) { + tests := map[string]struct { + valid bool + expected HostMatcher + }{ + "": {valid: false}, + "*/C1/H1": {valid: false}, + "/": {valid: true, expected: falseHostDC}, + "/*": {valid: true, expected: trueHostDC}, + "/!*": {valid: true, expected: falseHostDC}, + "/!*/": {valid: true, expected: falseHostDC}, + "/!*/ ": { + valid: true, + expected: andHostMatcher{ + lhs: falseHostDC, + rhs: hostClusterMatcher{matcher.FALSE()}, + }, + }, + "/DC1* DC2* !*/Cluster*": { + valid: true, + expected: andHostMatcher{ + lhs: hostDCMatcher{mustSP("DC1* DC2* !*")}, + rhs: hostClusterMatcher{mustSP("Cluster*")}, + }, + }, + "/*/*/HOST1*": { + valid: true, + expected: andHostMatcher{ + lhs: andHostMatcher{ + lhs: trueHostDC, + rhs: hostClusterMatcher{matcher.TRUE()}, + }, + rhs: hostHostMatcher{mustSP("HOST1*")}, + }, + }, + "/*/*/HOST1*/*/*": { + valid: true, + expected: andHostMatcher{ + lhs: andHostMatcher{ + lhs: trueHostDC, + rhs: hostClusterMatcher{matcher.TRUE()}, + }, + rhs: hostHostMatcher{mustSP("HOST1*")}, + }, + }, + "[/DC1*,/DC2*]": { + valid: true, + expected: orHostMatcher{ + lhs: hostDCMatcher{mustSP("DC1*")}, + rhs: hostDCMatcher{mustSP("DC2*")}, + }, + }, + "[/DC1*,/DC2*,/DC3*/Cluster1*/H*]": { + valid: true, + expected: orHostMatcher{ + lhs: orHostMatcher{ + lhs: hostDCMatcher{mustSP("DC1*")}, + rhs: hostDCMatcher{mustSP("DC2*")}, + }, + rhs: andHostMatcher{ + lhs: andHostMatcher{ + lhs: hostDCMatcher{mustSP("DC3*")}, + rhs: hostClusterMatcher{mustSP("Cluster1*")}, + }, + rhs: hostHostMatcher{mustSP("H*")}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + includes := prepareIncludes(name) + m, err := HostIncludes(includes).Parse() + + if !test.valid { + assert.Error(t, err) + } else { + assert.Equal(t, test.expected, m) + } + }) + } +} + +func TestVMIncludes_Parse(t *testing.T) { + tests := map[string]struct { + valid bool + includes []string + expected VMMatcher + }{ + "": {valid: false}, + "*/C1/H1/V1": {valid: false}, + "/*": {valid: true, expected: trueVMDC}, + "/!*": {valid: true, expected: falseVMDC}, + "/!*/": {valid: true, expected: falseVMDC}, + "/!*/ ": { + valid: true, + expected: andVMMatcher{ + lhs: falseVMDC, + rhs: vmClusterMatcher{matcher.FALSE()}, + }, + }, + "/DC1* DC2* !*/Cluster*": { + valid: true, + expected: andVMMatcher{ + lhs: vmDCMatcher{mustSP("DC1* DC2* !*")}, + rhs: vmClusterMatcher{mustSP("Cluster*")}, + }, + }, + "/*/*/HOST1": { + valid: true, + expected: andVMMatcher{ + lhs: andVMMatcher{ + lhs: trueVMDC, + rhs: vmClusterMatcher{matcher.TRUE()}, + }, + rhs: vmHostMatcher{mustSP("HOST1")}, + }, + }, + "/*/*/HOST1*/*/*": { + valid: true, + expected: andVMMatcher{ + lhs: andVMMatcher{ + lhs: andVMMatcher{ + lhs: trueVMDC, + rhs: vmClusterMatcher{matcher.TRUE()}, + }, + rhs: vmHostMatcher{mustSP("HOST1*")}, + }, + rhs: vmVMMatcher{matcher.TRUE()}, + }, + }, + "[/DC1*,/DC2*]": { + valid: true, + expected: orVMMatcher{ + lhs: vmDCMatcher{mustSP("DC1*")}, + rhs: vmDCMatcher{mustSP("DC2*")}, + }, + }, + "[/DC1*,/DC2*,/DC3*/Cluster1*/H*/VM*]": { + valid: true, + expected: orVMMatcher{ + lhs: orVMMatcher{ + lhs: vmDCMatcher{mustSP("DC1*")}, + rhs: vmDCMatcher{mustSP("DC2*")}, + }, + rhs: andVMMatcher{ + lhs: andVMMatcher{ + lhs: andVMMatcher{ + lhs: vmDCMatcher{mustSP("DC3*")}, + rhs: vmClusterMatcher{mustSP("Cluster1*")}, + }, + rhs: vmHostMatcher{mustSP("H*")}, + }, + rhs: vmVMMatcher{mustSP("VM*")}, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + includes := prepareIncludes(name) + m, err := VMIncludes(includes).Parse() + + if !test.valid { + assert.Error(t, err) + } else { + assert.Equal(t, test.expected, m) + } + }) + } +} + +func prepareIncludes(include string) []string { + trimmed := strings.Trim(include, "[]") + return strings.Split(trimmed, ",") +} + +func mustSP(expr string) matcher.Matcher { + return matcher.Must(matcher.NewSimplePatternsMatcher(expr)) +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/metadata.yaml b/src/go/collectors/go.d.plugin/modules/vsphere/metadata.yaml new file mode 100644 index 00000000000000..db35f4a8c5a071 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/metadata.yaml @@ -0,0 +1,439 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-vsphere + plugin_name: go.d.plugin + module_name: vsphere + monitored_instance: + name: VMware vCenter Server + link: https://www.vmware.com/products/vcenter-server.html + icon_filename: vmware.svg + categories: + - data-collection.containers-and-vms + keywords: + - vmware + - esxi + - vcenter + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: true + overview: + data_collection: + metrics_description: | + This collector monitors hosts and vms performance statistics from `vCenter` servers. + + > **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot. + > go.d.plugin needs to be restarted. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: | + The default `update_every` is 20 seconds, and it doesn't make sense to decrease the value. + **VMware real-time statistics are generated at the 20-second specificity**. + + It is likely that 20 seconds is not enough for big installations and the value should be tuned. + + To get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics. + + <details> + <summary>Example (all not related debug lines were removed)</summary> + + ``` + [ilyam@pc]$ ./go.d.plugin -d -m vsphere + [ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process + [ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms + [ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms + [ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms + [ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms + [ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms + [ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms + [ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process + [ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3µs + [ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process + [ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522µs + [ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process + [ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts + [ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms + [ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973µs + [ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process + [ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms + [ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms + [ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s + [ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process + [ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms + [ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms + [ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms + ``` + + </details> + + There you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting. + `update_every` and `timeout` parameters should be adjusted based on these numbers. + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/vsphere.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 20 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: vCenter server URL. + default_value: "" + required: true + - name: host_include + description: Hosts selector (filter). + default_value: "" + required: false + detailed_description: | + Metrics of hosts matching the selector will be collected. + + - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern". + - Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). + - Syntax: + + ```yaml + host_include: + - '/DC1/*' # select all hosts from datacenter DC1 + - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2 + - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3 + ``` + - name: vm_include + description: Virtual machines selector (filter). + default_value: "" + required: false + detailed_description: | + Metrics of VMs matching the selector will be collected. + + - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern/VM pattern". + - Match pattern syntax: [simple patterns](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md#simple-patterns). + - Syntax: + + ```yaml + vm_include: + - '/DC1/*' # select all VMs from datacenter DC + - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2 + - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3 + ``` + - name: discovery_interval + description: Hosts and VMs discovery interval. + default_value: 300 + required: false + - name: timeout + description: HTTP request timeout. + default_value: 20 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name : vcenter1 + url : https://203.0.113.1 + username : admin@vsphere.local + password : somepassword + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name : vcenter1 + url : https://203.0.113.1 + username : admin@vsphere.local + password : somepassword + + - name : vcenter2 + url : https://203.0.113.10 + username : admin@vsphere.local + password : somepassword + troubleshooting: + problems: + list: [] + alerts: + - name: vsphere_vm_cpu_utilization + metric: vsphere.vm_cpu_utilization + info: Virtual Machine CPU utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf + - name: vsphere_vm_mem_usage + metric: vsphere.vm_mem_utilization + info: Virtual Machine memory utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf + - name: vsphere_host_cpu_utilization + metric: vsphere.host_cpu_utilization + info: ESXi Host CPU utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf + - name: vsphere_host_mem_utilization + metric: vsphere.host_mem_utilization + info: ESXi Host memory utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: virtual machine + description: These metrics refer to the Virtual Machine. + labels: + - name: datacenter + description: Datacenter name + - name: cluster + description: Cluster name + - name: host + description: Host name + - name: vm + description: Virtual Machine name + metrics: + - name: vsphere.vm_cpu_utilization + description: Virtual Machine CPU utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: vsphere.vm_mem_utilization + description: Virtual Machine memory utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: vsphere.vm_mem_usage + description: Virtual Machine memory usage + unit: KiB + chart_type: line + dimensions: + - name: granted + - name: consumed + - name: active + - name: shared + - name: vsphere.vm_mem_swap_usage + description: Virtual Machine VMKernel memory swap usage + unit: KiB + chart_type: line + dimensions: + - name: swapped + - name: vsphere.vm_mem_swap_io + description: Virtual Machine VMKernel memory swap IO + unit: KiB/s + chart_type: area + dimensions: + - name: in + - name: out + - name: vsphere.vm_disk_io + description: Virtual Machine disk IO + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: vsphere.vm_disk_max_latency + description: Virtual Machine disk max latency + unit: milliseconds + chart_type: line + dimensions: + - name: latency + - name: vsphere.vm_net_traffic + description: Virtual Machine network traffic + unit: KiB/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: vsphere.vm_net_packets + description: Virtual Machine network packets + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.vm_net_drops + description: Virtual Machine network dropped packets + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.vm_overall_status + description: Virtual Machine overall alarm status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: gray + - name: vsphere.vm_system_uptime + description: Virtual Machine system uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime + - name: host + description: These metrics refer to the ESXi host. + labels: + - name: datacenter + description: Datacenter name + - name: cluster + description: Cluster name + - name: host + description: Host name + metrics: + - name: vsphere.host_cpu_utilization + description: ESXi Host CPU utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: vsphere.host_mem_utilization + description: ESXi Host memory utilization + unit: percentage + chart_type: line + dimensions: + - name: used + - name: vsphere.host_mem_usage + description: ESXi Host memory usage + unit: KiB + chart_type: line + dimensions: + - name: granted + - name: consumed + - name: active + - name: shared + - name: sharedcommon + - name: vsphere.host_mem_swap_io + description: ESXi Host VMKernel memory swap IO + unit: KiB/s + chart_type: area + dimensions: + - name: in + - name: out + - name: vsphere.host_disk_io + description: ESXi Host disk IO + unit: KiB/s + chart_type: area + dimensions: + - name: read + - name: write + - name: vsphere.host_disk_max_latency + description: ESXi Host disk max latency + unit: milliseconds + chart_type: line + dimensions: + - name: latency + - name: vsphere.host_net_traffic + description: ESXi Host network traffic + unit: KiB/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.host_net_packets + description: ESXi Host network packets + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.host_net_drops + description: ESXi Host network drops + unit: packets + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.host_net_errors + description: ESXi Host network errors + unit: errors + chart_type: line + dimensions: + - name: received + - name: sent + - name: vsphere.host_overall_status + description: ESXi Host overall alarm status + unit: status + chart_type: line + dimensions: + - name: green + - name: red + - name: yellow + - name: gray + - name: vsphere.host_system_uptime + description: ESXi Host system uptime + unit: seconds + chart_type: line + dimensions: + - name: uptime diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/metrics.txt b/src/go/collectors/go.d.plugin/modules/vsphere/metrics.txt new file mode 100644 index 00000000000000..30c1f55e28e23d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/metrics.txt @@ -0,0 +1,328 @@ +// [units, statsType, hasInstance] + +/* + virtualMachine: + + cpu.run.summation [ms, delta, true] [Time the virtual machine is scheduled to run] + cpu.ready.summation [ms, delta, true] [Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval] + cpu.usagemhz.average [MHz, rate, true] [CPU usage in megahertz during the interval] + cpu.demandEntitlementRatio.latest [%, absolute, false] [CPU resource entitlement to CPU demand ratio (in percents)] + cpu.used.summation [ms, delta, true] [Total CPU usage] + cpu.idle.summation [ms, delta, true] [Total time that the CPU spent in an idle state] + cpu.maxlimited.summation [ms, delta, true] [Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit setting] + cpu.overlap.summation [ms, delta, true] [Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machines] + cpu.system.summation [ms, delta, false] [Amount of time spent on system processes on each virtual CPU in the virtual machine] + cpu.demand.average [MHz, absolute, false] [The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit] + cpu.wait.summation [ms, delta, true] [Total CPU time spent in wait state] + cpu.latency.average [%, rate, false] [Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)] + cpu.costop.summation [ms, delta, true] [Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints] + cpu.entitlement.latest [MHz, absolute, false] [CPU resources devoted by the ESX scheduler] + cpu.readiness.average [%, rate, true] [Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU] + cpu.swapwait.summation [ms, delta, true] [CPU time spent waiting for swap-in] + cpu.usage.average [%, rate, false] [CPU usage as a percentage during the interval] + + datastore.totalReadLatency.average [ms, absolute, true] [The average time a read from the datastore takes] + datastore.read.average [KBps, rate, true] [Rate of reading data from the datastore] + datastore.write.average [KBps, rate, true] [Rate of writing data to the datastore] + datastore.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all datastores used by the host] + datastore.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the datastore during the collection interval] + datastore.totalWriteLatency.average [ms, absolute, true] [The average time a write to the datastore takes] + datastore.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the datastore during the collection interval] + + disk.read.average [KBps, rate, true] [Average number of kilobytes read from the disk each second during the collection interval] + disk.commands.summation [num, delta, true] [Number of SCSI commands issued during the collection interval] + disk.commandsAborted.summation [num, delta, true] [Number of SCSI commands aborted during the collection interval] + disk.busResets.summation [num, delta, true] [Number of SCSI-bus reset commands issued during the collection interval] + disk.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all disks used by the host] + disk.write.average [KBps, rate, true] [Average number of kilobytes written to disk each second during the collection interval] + disk.numberReadAveraged.average [num, rate, true] [Average number of disk reads per second during the collection interval] + disk.usage.average [KBps, rate, false] [Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.] + disk.numberWrite.summation [num, delta, true] [Number of disk writes during the collection interval] + disk.commandsAveraged.average [num, rate, true] [Average number of SCSI commands issued per second during the collection interval] + disk.numberWriteAveraged.average [num, rate, true] [Average number of disk writes per second during the collection interval] + disk.numberRead.summation [num, delta, true] [Number of disk reads during the collection interval] + + mem.vmmemctltarget.average [KB, absolute, false] [Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi] + mem.overhead.average [KB, absolute, false] [host physical memory consumed by ESXi data structures for running the virtual machines] + mem.zipSaved.latest [KB, absolute, false] [host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memory] + mem.overheadMax.average [KB, absolute, false] [host physical memory reserved by ESXi, for its data structures, for running the virtual machine] + mem.consumed.average [KB, absolute, false] [Amount of host physical memory consumed for backing up guest physical memory pages] + mem.overheadTouched.average [KB, absolute, false] [Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXi] + mem.compressionRate.average [KBps, rate, false] [Rate of guest physical memory page compression by ESXi] + mem.swapin.average [KB, absolute, false] [Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter] + mem.swaptarget.average [KB, absolute, false] [Amount of memory that ESXi needs to reclaim by swapping] + mem.activewrite.average [KB, absolute, false] [Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi] + mem.decompressionRate.average [KBps, rate, false] [Rate of guest physical memory decompression] + mem.entitlement.average [KB, absolute, false] [Amount of host physical memory the virtual machine deserves, as determined by ESXi] + mem.swapoutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the swap space] + mem.swapout.average [KB, absolute, false] [Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.] + mem.shared.average [KB, absolute, false] [Amount of guest physical memory that is shared within a single virtual machine or across virtual machines] + mem.compressed.average [KB, absolute, false] [Guest physical memory pages that have undergone memory compression] + mem.llSwapOutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the host swap cache] + mem.latency.average [%, absolute, false] [Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory] + mem.llSwapInRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the host swap cache] + mem.zero.average [KB, absolute, false] [Guest physical memory pages whose content is 0x00] + mem.swapinRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the swap space] + mem.llSwapUsed.average [KB, absolute, false] [Storage space consumed on the host swap cache for storing swapped guest physical memory pages] + mem.vmmemctl.average [KB, absolute, false] [Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest] + mem.active.average [KB, absolute, false] [Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi] + mem.granted.average [KB, absolute, false] [Amount of host physical memory or physical memory that is mapped for a virtual machine or a host] + mem.usage.average [%, absolute, false] [Percentage of host physical memory that has been consumed] + mem.zipped.latest [KB, absolute, false] [Amount of guest physical memory pages compressed by ESXi] + mem.swapped.average [KB, absolute, false] [Amount of guest physical memory that is swapped out to the swap space] + + net.droppedTx.summation [num, delta, true] [Number of transmits dropped] + net.bytesTx.average [KBps, rate, true] [Average amount of data transmitted per second] + net.transmitted.average [KBps, rate, true] [Average rate at which data was transmitted during the interval] + net.droppedRx.summation [num, delta, true] [Number of receives dropped] + net.bytesRx.average [KBps, rate, true] [Average amount of data received per second] + net.usage.average [KBps, rate, true] [Network utilization (combined transmit-rates and receive-rates) during the interval] + net.multicastRx.summation [num, delta, true] [Number of multicast packets received during the sampling interval] + net.broadcastTx.summation [num, delta, true] [Number of broadcast packets transmitted during the sampling interval] + net.received.average [KBps, rate, true] [Average rate at which data was received during the interval] + net.broadcastRx.summation [num, delta, true] [Number of broadcast packets received during the sampling interval] + net.pnicBytesRx.average [KBps, rate, true] [pnicBytesRx] + net.pnicBytesTx.average [KBps, rate, true] [pnicBytesTx] + net.multicastTx.summation [num, delta, true] [Number of multicast packets transmitted during the sampling interval] + net.packetsTx.summation [num, delta, true] [Number of packets transmitted during the interval] + net.packetsRx.summation [num, delta, true] [Number of packets received during the interval] + + power.energy.summation [J, delta, false] [Total energy used since last stats reset] + power.power.average [W, rate, false] [Current power usage] + + rescpu.actpk5.latest [%, absolute, false] [CPU active peak over 5 minutes] + rescpu.actpk15.latest [%, absolute, false] [CPU active peak over 15 minutes] + rescpu.sampleCount.latest [num, absolute, false] [Group CPU sample count] + rescpu.runav15.latest [%, absolute, false] [CPU running average over 15 minutes] + rescpu.actav1.latest [%, absolute, false] [CPU active average over 1 minute] + rescpu.runpk1.latest [%, absolute, false] [CPU running peak over 1 minute] + rescpu.actav5.latest [%, absolute, false] [CPU active average over 5 minutes] + rescpu.maxLimited5.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 5 minutes] + rescpu.maxLimited1.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 1 minute] + rescpu.runav5.latest [%, absolute, false] [CPU running average over 5 minutes] + rescpu.samplePeriod.latest [ms, absolute, false] [Group CPU sample period] + rescpu.runpk15.latest [%, absolute, false] [CPU running peak over 15 minutes] + rescpu.maxLimited15.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 15 minutes] + rescpu.actav15.latest [%, absolute, false] [CPU active average over 15 minutes] + rescpu.runav1.latest [%, absolute, false] [CPU running average over 1 minute] + rescpu.runpk5.latest [%, absolute, false] [CPU running peak over 5 minutes] + rescpu.actpk1.latest [%, absolute, false] [CPU active peak over 1 minute] + + sys.uptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last system startup] + sys.heartbeat.latest [num, absolute, false] [Number of heartbeats issued per virtual machine during the interval] + sys.osUptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last operating system boot-up] + + virtualDisk.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the virtual disk during the collection interval] + virtualDisk.largeSeeks.latest [num, absolute, true] [Number of seeks during the interval that were greater than 8192 LBNs apart] + virtualDisk.readOIO.latest [num, absolute, true] [Average number of outstanding read requests to the virtual disk during the collection interval] + virtualDisk.mediumSeeks.latest [num, absolute, true] [Number of seeks during the interval that were between 64 and 8192 LBNs apart] + virtualDisk.write.average [KBps, rate, true] [Rate of writing data to the virtual disk] + virtualDisk.smallSeeks.latest [num, absolute, true] [Number of seeks during the interval that were less than 64 LBNs apart] + virtualDisk.read.average [KBps, rate, true] [Rate of reading data from the virtual disk] + virtualDisk.writeLatencyUS.latest [µs, absolute, true] [Write latency in microseconds] + virtualDisk.writeOIO.latest [num, absolute, true] [Average number of outstanding write requests to the virtual disk during the collection interval] + virtualDisk.totalWriteLatency.average [ms, absolute, true] [The average time a write to the virtual disk takes] + virtualDisk.readLoadMetric.latest [num, absolute, true] [Storage DRS virtual disk metric for the read workload model] + virtualDisk.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the virtual disk during the collection interval] + virtualDisk.writeLoadMetric.latest [num, absolute, true] [Storage DRS virtual disk metric for the write workload model] + virtualDisk.totalReadLatency.average [ms, absolute, true] [The average time a read from the virtual disk takes] + virtualDisk.readIOSize.latest [num, absolute, true] [Average read request size in bytes] + virtualDisk.writeIOSize.latest [num, absolute, true] [Average write request size in bytes] + virtualDisk.readLatencyUS.latest [µs, absolute, true] [Read latency in microseconds] +*/ + +/* + HOST: + + cpu.usage.average [%, rate, true] [CPU usage as a percentage during the interval] + cpu.wait.summation [ms, delta, false] [Total CPU time spent in wait state] + cpu.ready.summation [ms, delta, false] [Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval] + cpu.used.summation [ms, delta, true] [Total CPU usage] + cpu.demand.average [MHz, absolute, false] [The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit] + cpu.idle.summation [ms, delta, true] [Total time that the CPU spent in an idle state] + cpu.latency.average [%, rate, false] [Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)] + cpu.utilization.average [%, rate, true] [CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)] + cpu.coreUtilization.average [%, rate, true] [CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)] + cpu.costop.summation [ms, delta, false] [Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints] + cpu.totalCapacity.average [MHz, absolute, false] [Total CPU capacity reserved by and available for virtual machines] + cpu.usagemhz.average [MHz, rate, false] [CPU usage in megahertz during the interval] + cpu.swapwait.summation [ms, delta, false] [CPU time spent waiting for swap-in] + cpu.reservedCapacity.average [MHz, absolute, false] [Total CPU capacity reserved by virtual machines] + cpu.readiness.average [%, rate, false] [Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU] + + datastore.datastoreReadLoadMetric.latest [num, absolute, true] [Storage DRS datastore metric for read workload model] + datastore.datastoreNormalReadLatency.latest [num, absolute, true] [Storage DRS datastore normalized read latency] + datastore.datastoreWriteLoadMetric.latest [num, absolute, true] [Storage DRS datastore metric for write workload model] + datastore.datastoreMaxQueueDepth.latest [num, absolute, true] [Storage I/O Control datastore maximum queue depth] + datastore.totalReadLatency.average [ms, absolute, true] [The average time a read from the datastore takes] + datastore.datastoreWriteOIO.latest [num, absolute, true] [Storage DRS datastore outstanding write requests] + datastore.datastoreReadIops.latest [num, absolute, true] [Storage DRS datastore read I/O rate] + datastore.sizeNormalizedDatastoreLatency.average [µs, absolute, true] [Storage I/O Control size-normalized I/O latency] + datastore.datastoreIops.average [num, absolute, true] [Storage I/O Control aggregated IOPS] + datastore.datastoreVMObservedLatency.latest [µs, absolute, true] [The average datastore latency as seen by virtual machines] + datastore.unmapIOs.summation [num, delta, true] [unmapIOs] + datastore.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the datastore during the collection interval] + datastore.datastoreNormalWriteLatency.latest [num, absolute, true] [Storage DRS datastore normalized write latency] + datastore.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the datastore during the collection interval] + datastore.unmapSize.summation [MB, delta, true] [unmapSize] + datastore.datastoreReadOIO.latest [num, absolute, true] [Storage DRS datastore outstanding read requests] + datastore.write.average [KBps, rate, true] [Rate of writing data to the datastore] + datastore.totalWriteLatency.average [ms, absolute, true] [The average time a write to the datastore takes] + datastore.datastoreWriteIops.latest [num, absolute, true] [Storage DRS datastore write I/O rate] + datastore.datastoreReadBytes.latest [num, absolute, true] [Storage DRS datastore bytes read] + datastore.read.average [KBps, rate, true] [Rate of reading data from the datastore] + datastore.siocActiveTimePercentage.average [%, absolute, true] [Percentage of time Storage I/O Control actively controlled datastore latency] + datastore.datastoreWriteBytes.latest [num, absolute, true] [Storage DRS datastore bytes written] + datastore.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all datastores used by the host] + + disk.queueReadLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI read command, during the collection interval] + disk.numberReadAveraged.average [num, rate, true] [Average number of disk reads per second during the collection interval] + disk.numberRead.summation [num, delta, true] [Number of disk reads during the collection interval] + disk.queueWriteLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI write command, during the collection interval] + disk.totalWriteLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI write command issued by the guest OS to the virtual machine] + disk.kernelWriteLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI write command] + disk.read.average [KBps, rate, true] [Average number of kilobytes read from the disk each second during the collection interval] + disk.usage.average [KBps, rate, false] [Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.] + disk.kernelLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI command] + disk.commandsAveraged.average [num, rate, true] [Average number of SCSI commands issued per second during the collection interval] + disk.numberWrite.summation [num, delta, true] [Number of disk writes during the collection interval] + disk.write.average [KBps, rate, true] [Average number of kilobytes written to disk each second during the collection interval] + disk.queueLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval] + disk.busResets.summation [num, delta, true] [Number of SCSI-bus reset commands issued during the collection interval] + disk.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all disks used by the host] + disk.kernelReadLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI read command] + disk.deviceLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to complete a SCSI command from the physical device] + disk.totalLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine] + disk.commands.summation [num, delta, true] [Number of SCSI commands issued during the collection interval] + disk.numberWriteAveraged.average [num, rate, true] [Average number of disk writes per second during the collection interval] + disk.totalReadLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI read command issued from the guest OS to the virtual machine] + disk.maxQueueDepth.average [num, absolute, true] [Maximum queue depth] + disk.deviceWriteLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to write to the physical device] + disk.commandsAborted.summation [num, delta, true] [Number of SCSI commands aborted during the collection interval] + disk.deviceReadLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to read from the physical device] + + hbr.hbrNetRx.average [KBps, rate, false] [Average amount of data received per second] + hbr.hbrNumVms.average [num, absolute, false] [Current number of replicated virtual machines] + hbr.hbrNetTx.average [KBps, rate, false] [Average amount of data transmitted per second] + + mem.reservedCapacity.average [MB, absolute, false] [Memory reservation consumed by powered-on virtual machines] + mem.swapinRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the swap space] + mem.zero.average [KB, absolute, false] [Guest physical memory pages whose content is 0x00] + mem.heapfree.average [KB, absolute, false] [Free address space in the heap of ESXi. This is less than or equal to Heap] + mem.sharedcommon.average [KB, absolute, false] [Amount of host physical memory that backs shared guest physical memory (Shared)] + mem.swapin.average [KB, absolute, false] [Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter] + mem.unreserved.average [KB, absolute, false] [Amount by which reservation can be raised] + mem.lowfreethreshold.average [KB, absolute, false] [Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooning] + mem.state.latest [num, absolute, false] [Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machines] + mem.decompressionRate.average [KBps, rate, false] [Rate of guest physical memory decompression] + mem.swapout.average [KB, absolute, false] [Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.] + mem.vmfs.pbc.capMissRatio.latest [%, absolute, false] [Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cache] + mem.swapused.average [KB, absolute, false] [Swap storage space consumed] + mem.consumed.average [KB, absolute, false] [Amount of host physical memory consumed for backing up guest physical memory pages] + mem.llSwapOutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the host swap cache] + mem.llSwapOut.average [KB, absolute, false] [Amount of guest physical memory swapped out to the host swap cache] + mem.swapoutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the swap space] + mem.llSwapIn.average [KB, absolute, false] [Amount of guest physical memory swapped in from host cache] + mem.active.average [KB, absolute, false] [Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi] + mem.latency.average [%, absolute, false] [Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory] + mem.llSwapInRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the host swap cache] + mem.vmfs.pbc.sizeMax.latest [MB, absolute, false] [Maximum size the VMFS Pointer Block Cache can grow to] + mem.vmmemctl.average [KB, absolute, false] [Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest] + mem.vmfs.pbc.size.latest [MB, absolute, false] [Space used for holding VMFS Pointer Blocks in memory] + mem.overhead.average [KB, absolute, false] [host physical memory consumed by ESXi data structures for running the virtual machines] + mem.vmfs.pbc.workingSet.latest [TB, absolute, false] [Amount of file blocks whose addresses are cached in the VMFS PB Cache] + mem.shared.average [KB, absolute, false] [Amount of guest physical memory that is shared within a single virtual machine or across virtual machines] + mem.usage.average [%, absolute, false] [Percentage of host physical memory that has been consumed] + mem.vmfs.pbc.workingSetMax.latest [TB, absolute, false] [Maximum amount of file blocks whose addresses are cached in the VMFS PB Cache] + mem.sysUsage.average [KB, absolute, false] [Amount of host physical memory consumed by VMkernel] + mem.compressed.average [KB, absolute, false] [Guest physical memory pages that have undergone memory compression] + mem.vmfs.pbc.overhead.latest [KB, absolute, false] [Amount of VMFS heap used by the VMFS PB Cache] + mem.totalCapacity.average [MB, absolute, false] [Total reservation, available and consumed, for powered-on virtual machines] + mem.activewrite.average [KB, absolute, false] [Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi] + mem.granted.average [KB, absolute, false] [Amount of host physical memory or physical memory that is mapped for a virtual machine or a host] + mem.compressionRate.average [KBps, rate, false] [Rate of guest physical memory page compression by ESXi] + mem.heap.average [KB, absolute, false] [Virtual address space of ESXi that is dedicated to its heap] + mem.llSwapUsed.average [KB, absolute, false] [Storage space consumed on the host swap cache for storing swapped guest physical memory pages] + + net.bytesTx.average [KBps, rate, true] [Average amount of data transmitted per second] + net.droppedRx.summation [num, delta, true] [Number of receives dropped] + net.transmitted.average [KBps, rate, true] [Average rate at which data was transmitted during the interval] + net.multicastTx.summation [num, delta, true] [Number of multicast packets transmitted during the sampling interval] + net.errorsTx.summation [num, delta, true] [Number of packets with errors transmitted during the sampling interval] + net.unknownProtos.summation [num, delta, true] [Number of frames with unknown protocol received during the sampling interval] + net.multicastRx.summation [num, delta, true] [Number of multicast packets received during the sampling interval] + net.broadcastTx.summation [num, delta, true] [Number of broadcast packets transmitted during the sampling interval] + net.received.average [KBps, rate, true] [Average rate at which data was received during the interval] + net.droppedTx.summation [num, delta, true] [Number of transmits dropped] + net.usage.average [KBps, rate, true] [Network utilization (combined transmit-rates and receive-rates) during the interval] + net.broadcastRx.summation [num, delta, true] [Number of broadcast packets received during the sampling interval] + net.packetsRx.summation [num, delta, true] [Number of packets received during the interval] + net.packetsTx.summation [num, delta, true] [Number of packets transmitted during the interval] + net.errorsRx.summation [num, delta, true] [Number of packets with errors received during the sampling interval] + net.bytesRx.average [KBps, rate, true] [Average amount of data received per second] + + power.energy.summation [J, delta, false] [Total energy used since last stats reset] + power.power.average [W, rate, false] [Current power usage] + power.powerCap.average [W, absolute, false] [Maximum allowed power usage] + + rescpu.sampleCount.latest [num, absolute, false] [Group CPU sample count] + rescpu.maxLimited5.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 5 minutes] + rescpu.runav1.latest [%, absolute, false] [CPU running average over 1 minute] + rescpu.actpk5.latest [%, absolute, false] [CPU active peak over 5 minutes] + rescpu.runav5.latest [%, absolute, false] [CPU running average over 5 minutes] + rescpu.actav1.latest [%, absolute, false] [CPU active average over 1 minute] + rescpu.runav15.latest [%, absolute, false] [CPU running average over 15 minutes] + rescpu.actav15.latest [%, absolute, false] [CPU active average over 15 minutes] + rescpu.actav5.latest [%, absolute, false] [CPU active average over 5 minutes] + rescpu.maxLimited15.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 15 minutes] + rescpu.actpk1.latest [%, absolute, false] [CPU active peak over 1 minute] + rescpu.runpk15.latest [%, absolute, false] [CPU running peak over 15 minutes] + rescpu.samplePeriod.latest [ms, absolute, false] [Group CPU sample period] + rescpu.actpk15.latest [%, absolute, false] [CPU active peak over 15 minutes] + rescpu.runpk5.latest [%, absolute, false] [CPU running peak over 5 minutes] + rescpu.runpk1.latest [%, absolute, false] [CPU running peak over 1 minute] + rescpu.maxLimited1.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 1 minute] + + storageAdapter.read.average [KBps, rate, true] [Rate of reading data by the storage adapter] + storageAdapter.commandsAveraged.average [num, rate, true] [Average number of commands issued per second by the storage adapter during the collection interval] + storageAdapter.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second by the storage adapter during the collection interval] + storageAdapter.totalWriteLatency.average [ms, absolute, true] [The average time a write by the storage adapter takes] + storageAdapter.totalReadLatency.average [ms, absolute, true] [The average time a read by the storage adapter takes] + storageAdapter.write.average [KBps, rate, true] [Rate of writing data by the storage adapter] + storageAdapter.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second by the storage adapter during the collection interval] + storageAdapter.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all storage adapters used by the host] + storagePath.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second on the storage path during the collection interval] + storagePath.write.average [KBps, rate, true] [Rate of writing data on the storage path] + storagePath.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all storage paths used by the host] + storagePath.read.average [KBps, rate, true] [Rate of reading data on the storage path] + storagePath.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second on the storage path during the collection interval] + storagePath.totalWriteLatency.average [ms, absolute, true] [The average time a write issued on the storage path takes] + storagePath.totalReadLatency.average [ms, absolute, true] [The average time a read issued on the storage path takes] + storagePath.commandsAveraged.average [num, rate, true] [Average number of commands issued per second on the storage path during the collection interval] + + sys.resourceMemTouched.latest [KB, absolute, true] [Memory touched by the system resource group] + sys.resourceMemSwapped.latest [KB, absolute, true] [Memory swapped out by the system resource group] + sys.resourceMemShared.latest [KB, absolute, true] [Memory saved due to sharing by the system resource group] + sys.resourceMemZero.latest [KB, absolute, true] [Zero filled memory used by the system resource group] + sys.resourceMemMapped.latest [KB, absolute, true] [Memory mapped by the system resource group] + sys.resourceCpuAllocShares.latest [num, absolute, true] [CPU allocation shares of the system resource group] + sys.resourceFdUsage.latest [num, absolute, true] [Number of file descriptors used by the system resource group] + sys.resourceCpuAct5.latest [%, absolute, true] [CPU active average over 5 minutes of the system resource group] + sys.resourceCpuAct1.latest [%, absolute, true] [CPU active average over 1 minute of the system resource group] + sys.resourceCpuUsage.average [MHz, rate, true] [Amount of CPU used by the Service Console and other applications during the interval] + sys.resourceMemOverhead.latest [KB, absolute, true] [Overhead memory consumed by the system resource group] + sys.resourceMemCow.latest [KB, absolute, true] [Memory shared by the system resource group] + sys.resourceCpuAllocMax.latest [MHz, absolute, true] [CPU allocation limit (in MHz) of the system resource group] + sys.resourceMemAllocMax.latest [KB, absolute, true] [Memory allocation limit (in KB) of the system resource group] + sys.resourceMemAllocMin.latest [KB, absolute, true] [Memory allocation reservation (in KB) of the system resource group] + sys.resourceCpuAllocMin.latest [MHz, absolute, true] [CPU allocation reservation (in MHz) of the system resource group] + sys.resourceCpuMaxLimited1.latest [%, absolute, true] [CPU maximum limited over 1 minute of the system resource group] + sys.resourceMemAllocShares.latest [num, absolute, true] [Memory allocation shares of the system resource group] + sys.resourceMemConsumed.latest [KB, absolute, true] [Memory consumed by the system resource group] + sys.uptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last system startup] + sys.resourceCpuMaxLimited5.latest [%, absolute, true] [CPU maximum limited over 5 minutes of the system resource group] + sys.resourceCpuRun5.latest [%, absolute, true] [CPU running average over 5 minutes of the system resource group] + sys.resourceCpuRun1.latest [%, absolute, true] [CPU running average over 1 minute of the system resource group] + + vflashModule.numActiveVMDKs.latest [num, absolute, true] [Number of caches controlled by the virtual flash module] +*/ diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/resources/resources.go b/src/go/collectors/go.d.plugin/modules/vsphere/resources/resources.go new file mode 100644 index 00000000000000..7f04a8587319c1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/resources/resources.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package resources + +import ( + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/vim25/types" +) + +/* + +``` +Virtual Datacenter Architecture Representation (partial). + +<root> ++-DC0 # Virtual datacenter + +-datastore # Datastore folder (created by system) + | +-Datastore1 + | + +-host # Host folder (created by system) + | +-Folder1 # Host and Cluster folder + | | +-NestedFolder1 + | | | +-Cluster1 + | | | | +-Host1 + | +-Cluster2 + | | +-Host2 + | | | +-VM1 + | | | +-VM2 + | | | +-hadoop1 + | +-Host3 # Dummy folder for non-clustered host (created by system) + | | +-Host3 + | | | +-VM3 + | | | +-VM4 + | | | + +-vm # VM folder (created by system) + | +-VM1 + | +-VM2 + | +-Folder2 # VM and Template folder + | | +-hadoop1 + | | +-NestedFolder1 + | | | +-VM3 + | | | +-VM4 +``` +*/ + +type Resources struct { + DataCenters DataCenters + Folders Folders + Clusters Clusters + Hosts Hosts + VMs VMs +} + +type ( + Datacenter struct { + Name string + ID string + } + + Folder struct { + Name string + ID string + ParentID string + } + + HierarchyValue struct { + ID, Name string + } + + ClusterHierarchy struct { + DC HierarchyValue + } + Cluster struct { + Name string + ID string + ParentID string + Hier ClusterHierarchy + } + + HostHierarchy struct { + DC HierarchyValue + Cluster HierarchyValue + } + Host struct { + Name string + ID string + ParentID string + Hier HostHierarchy + OverallStatus string + MetricList performance.MetricList + Ref types.ManagedObjectReference + } + + VMHierarchy struct { + DC HierarchyValue + Cluster HierarchyValue + Host HierarchyValue + } + + VM struct { + Name string + ID string + ParentID string + Hier VMHierarchy + OverallStatus string + MetricList performance.MetricList + Ref types.ManagedObjectReference + } +) + +func (v HierarchyValue) IsSet() bool { return v.ID != "" && v.Name != "" } +func (v *HierarchyValue) Set(id, name string) { v.ID = id; v.Name = name } + +func (h ClusterHierarchy) IsSet() bool { return h.DC.IsSet() } +func (h HostHierarchy) IsSet() bool { return h.DC.IsSet() && h.Cluster.IsSet() } +func (h VMHierarchy) IsSet() bool { return h.DC.IsSet() && h.Cluster.IsSet() && h.Host.IsSet() } + +type ( + DataCenters map[string]*Datacenter + Folders map[string]*Folder + Clusters map[string]*Cluster + Hosts map[string]*Host + VMs map[string]*VM +) + +func (dcs DataCenters) Put(dc *Datacenter) { dcs[dc.ID] = dc } +func (dcs DataCenters) Get(id string) *Datacenter { return dcs[id] } +func (fs Folders) Put(folder *Folder) { fs[folder.ID] = folder } +func (fs Folders) Get(id string) *Folder { return fs[id] } +func (cs Clusters) Put(cluster *Cluster) { cs[cluster.ID] = cluster } +func (cs Clusters) Get(id string) *Cluster { return cs[id] } +func (hs Hosts) Put(host *Host) { hs[host.ID] = host } +func (hs Hosts) Remove(id string) { delete(hs, id) } +func (hs Hosts) Get(id string) *Host { return hs[id] } +func (vs VMs) Put(vm *VM) { vs[vm.ID] = vm } +func (vs VMs) Remove(id string) { delete(vs, id) } +func (vs VMs) Get(id string) *VM { return vs[id] } diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape.go b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape.go new file mode 100644 index 00000000000000..cae59b6e83d887 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scrape + +import ( + "fmt" + "strconv" + "strings" + "sync" + "time" + + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/netdata/go.d.plugin/logger" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/vim25/types" +) + +type Client interface { + Version() string + PerformanceMetrics([]types.PerfQuerySpec) ([]performance.EntityMetric, error) +} + +func New(client Client) *Scraper { + v := &Scraper{Client: client} + v.calcMaxQuery() + return v +} + +type Scraper struct { + *logger.Logger + Client + maxQuery int +} + +// Default settings for vCenter 6.5 and above is 256, prior versions of vCenter have this set to 64. +func (c *Scraper) calcMaxQuery() { + major, minor, err := parseVersion(c.Version()) + if err != nil || major < 6 || minor == 0 { + c.maxQuery = 64 + return + } + c.maxQuery = 256 +} + +func (c Scraper) ScrapeHosts(hosts rs.Hosts) []performance.EntityMetric { + t := time.Now() + pqs := newHostsPerfQuerySpecs(hosts) + ms := c.scrapeMetrics(pqs) + c.Debugf("scraping : scraped metrics for %d/%d hosts, process took %s", + len(ms), + len(hosts), + time.Since(t), + ) + return ms +} + +func (c Scraper) ScrapeVMs(vms rs.VMs) []performance.EntityMetric { + t := time.Now() + pqs := newVMsPerfQuerySpecs(vms) + ms := c.scrapeMetrics(pqs) + c.Debugf("scraping : scraped metrics for %d/%d vms, process took %s", + len(ms), + len(vms), + time.Since(t), + ) + return ms +} + +func (c Scraper) scrapeMetrics(pqs []types.PerfQuerySpec) []performance.EntityMetric { + tc := newThrottledCaller(5) + var ms []performance.EntityMetric + lock := &sync.Mutex{} + + chunks := chunkify(pqs, c.maxQuery) + for _, chunk := range chunks { + pqs := chunk + job := func() { + c.scrape(&ms, lock, pqs) + } + tc.call(job) + } + tc.wait() + + return ms +} + +func (c Scraper) scrape(metrics *[]performance.EntityMetric, lock *sync.Mutex, pqs []types.PerfQuerySpec) { + m, err := c.PerformanceMetrics(pqs) + if err != nil { + c.Error(err) + return + } + + lock.Lock() + *metrics = append(*metrics, m...) + lock.Unlock() +} + +func chunkify(pqs []types.PerfQuerySpec, chunkSize int) (chunks [][]types.PerfQuerySpec) { + for i := 0; i < len(pqs); i += chunkSize { + end := i + chunkSize + if end > len(pqs) { + end = len(pqs) + } + chunks = append(chunks, pqs[i:end]) + } + return chunks +} + +const ( + pqsMaxSample = 1 + pqsIntervalID = 20 + pqsFormat = "normal" +) + +func newHostsPerfQuerySpecs(hosts rs.Hosts) []types.PerfQuerySpec { + pqs := make([]types.PerfQuerySpec, 0, len(hosts)) + for _, host := range hosts { + pq := types.PerfQuerySpec{ + Entity: host.Ref, + MaxSample: pqsMaxSample, + MetricId: host.MetricList, + IntervalId: pqsIntervalID, + Format: pqsFormat, + } + pqs = append(pqs, pq) + } + return pqs +} + +func newVMsPerfQuerySpecs(vms rs.VMs) []types.PerfQuerySpec { + pqs := make([]types.PerfQuerySpec, 0, len(vms)) + for _, vm := range vms { + pq := types.PerfQuerySpec{ + Entity: vm.Ref, + MaxSample: pqsMaxSample, + MetricId: vm.MetricList, + IntervalId: pqsIntervalID, + Format: pqsFormat, + } + pqs = append(pqs, pq) + } + return pqs +} + +func parseVersion(version string) (major, minor int, err error) { + parts := strings.Split(version, ".") + if len(parts) < 2 { + return 0, 0, fmt.Errorf("unparsable version string : %s", version) + } + if major, err = strconv.Atoi(parts[0]); err != nil { + return 0, 0, err + } + if minor, err = strconv.Atoi(parts[1]); err != nil { + return 0, 0, err + } + return major, minor, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape_test.go new file mode 100644 index 00000000000000..5078c96e9b01b9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/scrape_test.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scrape + +import ( + "crypto/tls" + "net/url" + "testing" + "time" + + "github.com/netdata/go.d.plugin/modules/vsphere/client" + "github.com/netdata/go.d.plugin/modules/vsphere/discover" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator" +) + +func TestNew(t *testing.T) { +} + +func TestScraper_ScrapeVMs(t *testing.T) { + s, res, teardown := prepareScraper(t) + defer teardown() + + metrics := s.ScrapeVMs(res.VMs) + assert.Len(t, metrics, len(res.VMs)) +} + +func TestScraper_ScrapeHosts(t *testing.T) { + s, res, teardown := prepareScraper(t) + defer teardown() + + metrics := s.ScrapeHosts(res.Hosts) + assert.Len(t, metrics, len(res.Hosts)) +} + +func prepareScraper(t *testing.T) (s *Scraper, res *rs.Resources, teardown func()) { + model, srv := createSim(t) + teardown = func() { model.Remove(); srv.Close() } + + c := newClient(t, srv.URL) + d := discover.New(c) + res, err := d.Discover() + require.NoError(t, err) + + return New(c), res, teardown +} + +func newClient(t *testing.T, vCenterURL *url.URL) *client.Client { + c, err := client.New(client.Config{ + URL: vCenterURL.String(), + User: "admin", + Password: "password", + Timeout: time.Second * 3, + TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true}, + }) + require.NoError(t, err) + return c +} + +func createSim(t *testing.T) (*simulator.Model, *simulator.Server) { + model := simulator.VPX() + err := model.Create() + require.NoError(t, err) + model.Service.TLS = new(tls.Config) + return model, model.Service.NewServer() +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller.go b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller.go new file mode 100644 index 00000000000000..5127c28c112ea8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scrape + +import "sync" + +type throttledCaller struct { + limit chan struct{} + wg sync.WaitGroup +} + +func newThrottledCaller(limit int) *throttledCaller { + if limit <= 0 { + panic("limit must be > 0") + } + return &throttledCaller{limit: make(chan struct{}, limit)} +} + +func (t *throttledCaller) call(job func()) { + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.limit <- struct{}{} + defer func() { + <-t.limit + }() + job() + }() +} + +func (t *throttledCaller) wait() { + t.wg.Wait() +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller_test.go new file mode 100644 index 00000000000000..545ed16033a685 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/scrape/throttled_caller_test.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package scrape + +import ( + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_throttledCaller(t *testing.T) { + var current int64 + var max int64 + var total int64 + var mux sync.Mutex + limit := 5 + n := 10000 + tc := newThrottledCaller(limit) + + for i := 0; i < n; i++ { + job := func() { + atomic.AddInt64(&total, 1) + atomic.AddInt64(¤t, 1) + time.Sleep(100 * time.Microsecond) + + mux.Lock() + defer mux.Unlock() + if atomic.LoadInt64(¤t) > max { + max = atomic.LoadInt64(¤t) + } + atomic.AddInt64(¤t, -1) + } + tc.call(job) + } + tc.wait() + + assert.Equal(t, int64(n), total) + assert.Equal(t, max, int64(limit)) +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/task.go b/src/go/collectors/go.d.plugin/modules/vsphere/task.go new file mode 100644 index 00000000000000..103ca1ed6fb057 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/task.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + "sync" + "time" +) + +func newTask(doWork func(), doEvery time.Duration) *task { + task := task{ + done: make(chan struct{}), + running: make(chan struct{}), + } + + go func() { + t := time.NewTicker(doEvery) + defer func() { + t.Stop() + close(task.running) + }() + for { + select { + case <-task.done: + return + case <-t.C: + doWork() + } + } + }() + + return &task +} + +type task struct { + once sync.Once + done chan struct{} + running chan struct{} +} + +func (t *task) stop() { + t.once.Do(func() { close(t.done) }) +} + +func (t *task) isStopped() bool { + select { + case <-t.done: + return true + default: + return false + } +} + +func (t *task) isRunning() bool { + select { + case <-t.running: + return false + default: + return true + } +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/task_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/task_test.go new file mode 100644 index 00000000000000..ed55a28a368cde --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/task_test.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_task(t *testing.T) { + var i int64 + job := func() { + atomic.AddInt64(&i, 1) + } + + task := newTask(job, time.Millisecond*200) + defer task.stop() + time.Sleep(time.Second) + assert.True(t, atomic.LoadInt64(&i) > 0) +} + +func Test_task_isStopped(t *testing.T) { + task := newTask(func() {}, time.Second) + assert.False(t, task.isStopped()) + + task.stop() + time.Sleep(time.Millisecond * 500) + assert.True(t, task.isStopped()) +} + +func Test_task_isRunning(t *testing.T) { + task := newTask(func() {}, time.Second) + assert.True(t, task.isRunning()) + + task.stop() + time.Sleep(time.Millisecond * 500) + assert.False(t, task.isRunning()) +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/vsphere.go b/src/go/collectors/go.d.plugin/modules/vsphere/vsphere.go new file mode 100644 index 00000000000000..d7af8a495ac467 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/vsphere.go @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vsphere + +import ( + _ "embed" + "sync" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/modules/vsphere/match" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/vmware/govmomi/performance" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("vsphere", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 20, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *VSphere { + config := Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 20}, + }, + }, + DiscoveryInterval: web.Duration{Duration: time.Minute * 5}, + HostsInclude: []string{"/*"}, + VMsInclude: []string{"/*"}, + } + + return &VSphere{ + collectionLock: new(sync.RWMutex), + Config: config, + charts: &module.Charts{}, + discoveredHosts: make(map[string]int), + discoveredVMs: make(map[string]int), + charted: make(map[string]bool), + } +} + +type Config struct { + web.HTTP `yaml:",inline"` + DiscoveryInterval web.Duration `yaml:"discovery_interval"` + HostsInclude match.HostIncludes `yaml:"host_include"` + VMsInclude match.VMIncludes `yaml:"vm_include"` +} + +type ( + VSphere struct { + module.Base + UpdateEvery int `yaml:"update_every"` + Config `yaml:",inline"` + + discoverer + scraper + + collectionLock *sync.RWMutex + resources *rs.Resources + discoveryTask *task + discoveredHosts map[string]int + discoveredVMs map[string]int + charted map[string]bool + charts *module.Charts + } + discoverer interface { + Discover() (*rs.Resources, error) + } + scraper interface { + ScrapeHosts(rs.Hosts) []performance.EntityMetric + ScrapeVMs(rs.VMs) []performance.EntityMetric + } +) + +func (vs *VSphere) Init() bool { + if err := vs.validateConfig(); err != nil { + vs.Errorf("error on validating config: %v", err) + return false + } + + vsClient, err := vs.initClient() + if err != nil { + vs.Errorf("error on creating vsphere client: %v", err) + return false + } + + err = vs.initDiscoverer(vsClient) + if err != nil { + vs.Errorf("error on creating vsphere discoverer: %v", err) + return false + } + + vs.initScraper(vsClient) + + err = vs.discoverOnce() + if err != nil { + vs.Errorf("error on discovering: %v", err) + return false + } + + vs.goDiscovery() + + return true +} + +func (vs *VSphere) Check() bool { + return true +} + +func (vs *VSphere) Charts() *module.Charts { + return vs.charts +} + +func (vs *VSphere) Collect() map[string]int64 { + mx, err := vs.collect() + if err != nil { + vs.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (vs *VSphere) Cleanup() { + if vs.discoveryTask == nil { + return + } + vs.discoveryTask.stop() +} diff --git a/src/go/collectors/go.d.plugin/modules/vsphere/vsphere_test.go b/src/go/collectors/go.d.plugin/modules/vsphere/vsphere_test.go new file mode 100644 index 00000000000000..97c23d5ba0d377 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/vsphere/vsphere_test.go @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +package vsphere + +import ( + "crypto/tls" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/modules/vsphere/discover" + "github.com/netdata/go.d.plugin/modules/vsphere/match" + rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/simulator" +) + +func TestNew(t *testing.T) { + job := New() + + assert.Implements(t, (*module.Module)(nil), job) +} + +func TestVSphere_Init(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + + assert.True(t, vSphere.Init()) + assert.NotNil(t, vSphere.discoverer) + assert.NotNil(t, vSphere.scraper) + assert.NotNil(t, vSphere.resources) + assert.NotNil(t, vSphere.discoveryTask) + assert.True(t, vSphere.discoveryTask.isRunning()) +} + +func TestVSphere_Init_ReturnsFalseIfURLNotSet(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + vSphere.URL = "" + + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + vSphere.Username = "" + + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + vSphere.Password = "" + + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + vSphere.Client.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + vSphere.URL = "http://127.0.0.1:32001" + + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + + vSphere.HostsInclude = match.HostIncludes{"invalid"} + assert.False(t, vSphere.Init()) + + vSphere.HostsInclude = vSphere.HostsInclude[:0] + + vSphere.VMsInclude = match.VMIncludes{"invalid"} + assert.False(t, vSphere.Init()) +} + +func TestVSphere_Check(t *testing.T) { + assert.NotNil(t, New().Check()) +} + +func TestVSphere_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestVSphere_Cleanup(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + + require.True(t, vSphere.Init()) + + vSphere.Cleanup() + time.Sleep(time.Second) + assert.True(t, vSphere.discoveryTask.isStopped()) + assert.False(t, vSphere.discoveryTask.isRunning()) +} + +func TestVSphere_Cleanup_NotPanicsIfNotInitialized(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestVSphere_Collect(t *testing.T) { + vSphere, model, teardown := prepareVSphereSim(t) + defer teardown() + + require.True(t, vSphere.Init()) + + vSphere.scraper = mockScraper{vSphere.scraper} + + expected := map[string]int64{ + "host-21_cpu.usage.average": 100, + "host-21_disk.maxTotalLatency.latest": 100, + "host-21_disk.read.average": 100, + "host-21_disk.write.average": 100, + "host-21_mem.active.average": 100, + "host-21_mem.consumed.average": 100, + "host-21_mem.granted.average": 100, + "host-21_mem.shared.average": 100, + "host-21_mem.sharedcommon.average": 100, + "host-21_mem.swapinRate.average": 100, + "host-21_mem.swapoutRate.average": 100, + "host-21_mem.usage.average": 100, + "host-21_net.bytesRx.average": 100, + "host-21_net.bytesTx.average": 100, + "host-21_net.droppedRx.summation": 100, + "host-21_net.droppedTx.summation": 100, + "host-21_net.errorsRx.summation": 100, + "host-21_net.errorsTx.summation": 100, + "host-21_net.packetsRx.summation": 100, + "host-21_net.packetsTx.summation": 100, + "host-21_overall.status.gray": 1, + "host-21_overall.status.green": 0, + "host-21_overall.status.red": 0, + "host-21_overall.status.yellow": 0, + "host-21_sys.uptime.latest": 100, + "host-34_cpu.usage.average": 100, + "host-34_disk.maxTotalLatency.latest": 100, + "host-34_disk.read.average": 100, + "host-34_disk.write.average": 100, + "host-34_mem.active.average": 100, + "host-34_mem.consumed.average": 100, + "host-34_mem.granted.average": 100, + "host-34_mem.shared.average": 100, + "host-34_mem.sharedcommon.average": 100, + "host-34_mem.swapinRate.average": 100, + "host-34_mem.swapoutRate.average": 100, + "host-34_mem.usage.average": 100, + "host-34_net.bytesRx.average": 100, + "host-34_net.bytesTx.average": 100, + "host-34_net.droppedRx.summation": 100, + "host-34_net.droppedTx.summation": 100, + "host-34_net.errorsRx.summation": 100, + "host-34_net.errorsTx.summation": 100, + "host-34_net.packetsRx.summation": 100, + "host-34_net.packetsTx.summation": 100, + "host-34_overall.status.gray": 1, + "host-34_overall.status.green": 0, + "host-34_overall.status.red": 0, + "host-34_overall.status.yellow": 0, + "host-34_sys.uptime.latest": 100, + "host-42_cpu.usage.average": 100, + "host-42_disk.maxTotalLatency.latest": 100, + "host-42_disk.read.average": 100, + "host-42_disk.write.average": 100, + "host-42_mem.active.average": 100, + "host-42_mem.consumed.average": 100, + "host-42_mem.granted.average": 100, + "host-42_mem.shared.average": 100, + "host-42_mem.sharedcommon.average": 100, + "host-42_mem.swapinRate.average": 100, + "host-42_mem.swapoutRate.average": 100, + "host-42_mem.usage.average": 100, + "host-42_net.bytesRx.average": 100, + "host-42_net.bytesTx.average": 100, + "host-42_net.droppedRx.summation": 100, + "host-42_net.droppedTx.summation": 100, + "host-42_net.errorsRx.summation": 100, + "host-42_net.errorsTx.summation": 100, + "host-42_net.packetsRx.summation": 100, + "host-42_net.packetsTx.summation": 100, + "host-42_overall.status.gray": 1, + "host-42_overall.status.green": 0, + "host-42_overall.status.red": 0, + "host-42_overall.status.yellow": 0, + "host-42_sys.uptime.latest": 100, + "host-50_cpu.usage.average": 100, + "host-50_disk.maxTotalLatency.latest": 100, + "host-50_disk.read.average": 100, + "host-50_disk.write.average": 100, + "host-50_mem.active.average": 100, + "host-50_mem.consumed.average": 100, + "host-50_mem.granted.average": 100, + "host-50_mem.shared.average": 100, + "host-50_mem.sharedcommon.average": 100, + "host-50_mem.swapinRate.average": 100, + "host-50_mem.swapoutRate.average": 100, + "host-50_mem.usage.average": 100, + "host-50_net.bytesRx.average": 100, + "host-50_net.bytesTx.average": 100, + "host-50_net.droppedRx.summation": 100, + "host-50_net.droppedTx.summation": 100, + "host-50_net.errorsRx.summation": 100, + "host-50_net.errorsTx.summation": 100, + "host-50_net.packetsRx.summation": 100, + "host-50_net.packetsTx.summation": 100, + "host-50_overall.status.gray": 1, + "host-50_overall.status.green": 0, + "host-50_overall.status.red": 0, + "host-50_overall.status.yellow": 0, + "host-50_sys.uptime.latest": 100, + "vm-55_cpu.usage.average": 200, + "vm-55_disk.maxTotalLatency.latest": 200, + "vm-55_disk.read.average": 200, + "vm-55_disk.write.average": 200, + "vm-55_mem.active.average": 200, + "vm-55_mem.consumed.average": 200, + "vm-55_mem.granted.average": 200, + "vm-55_mem.shared.average": 200, + "vm-55_mem.swapinRate.average": 200, + "vm-55_mem.swapoutRate.average": 200, + "vm-55_mem.swapped.average": 200, + "vm-55_mem.usage.average": 200, + "vm-55_net.bytesRx.average": 200, + "vm-55_net.bytesTx.average": 200, + "vm-55_net.droppedRx.summation": 200, + "vm-55_net.droppedTx.summation": 200, + "vm-55_net.packetsRx.summation": 200, + "vm-55_net.packetsTx.summation": 200, + "vm-55_overall.status.gray": 0, + "vm-55_overall.status.green": 1, + "vm-55_overall.status.red": 0, + "vm-55_overall.status.yellow": 0, + "vm-55_sys.uptime.latest": 200, + "vm-58_cpu.usage.average": 200, + "vm-58_disk.maxTotalLatency.latest": 200, + "vm-58_disk.read.average": 200, + "vm-58_disk.write.average": 200, + "vm-58_mem.active.average": 200, + "vm-58_mem.consumed.average": 200, + "vm-58_mem.granted.average": 200, + "vm-58_mem.shared.average": 200, + "vm-58_mem.swapinRate.average": 200, + "vm-58_mem.swapoutRate.average": 200, + "vm-58_mem.swapped.average": 200, + "vm-58_mem.usage.average": 200, + "vm-58_net.bytesRx.average": 200, + "vm-58_net.bytesTx.average": 200, + "vm-58_net.droppedRx.summation": 200, + "vm-58_net.droppedTx.summation": 200, + "vm-58_net.packetsRx.summation": 200, + "vm-58_net.packetsTx.summation": 200, + "vm-58_overall.status.gray": 0, + "vm-58_overall.status.green": 1, + "vm-58_overall.status.red": 0, + "vm-58_overall.status.yellow": 0, + "vm-58_sys.uptime.latest": 200, + "vm-61_cpu.usage.average": 200, + "vm-61_disk.maxTotalLatency.latest": 200, + "vm-61_disk.read.average": 200, + "vm-61_disk.write.average": 200, + "vm-61_mem.active.average": 200, + "vm-61_mem.consumed.average": 200, + "vm-61_mem.granted.average": 200, + "vm-61_mem.shared.average": 200, + "vm-61_mem.swapinRate.average": 200, + "vm-61_mem.swapoutRate.average": 200, + "vm-61_mem.swapped.average": 200, + "vm-61_mem.usage.average": 200, + "vm-61_net.bytesRx.average": 200, + "vm-61_net.bytesTx.average": 200, + "vm-61_net.droppedRx.summation": 200, + "vm-61_net.droppedTx.summation": 200, + "vm-61_net.packetsRx.summation": 200, + "vm-61_net.packetsTx.summation": 200, + "vm-61_overall.status.gray": 0, + "vm-61_overall.status.green": 1, + "vm-61_overall.status.red": 0, + "vm-61_overall.status.yellow": 0, + "vm-61_sys.uptime.latest": 200, + "vm-64_cpu.usage.average": 200, + "vm-64_disk.maxTotalLatency.latest": 200, + "vm-64_disk.read.average": 200, + "vm-64_disk.write.average": 200, + "vm-64_mem.active.average": 200, + "vm-64_mem.consumed.average": 200, + "vm-64_mem.granted.average": 200, + "vm-64_mem.shared.average": 200, + "vm-64_mem.swapinRate.average": 200, + "vm-64_mem.swapoutRate.average": 200, + "vm-64_mem.swapped.average": 200, + "vm-64_mem.usage.average": 200, + "vm-64_net.bytesRx.average": 200, + "vm-64_net.bytesTx.average": 200, + "vm-64_net.droppedRx.summation": 200, + "vm-64_net.droppedTx.summation": 200, + "vm-64_net.packetsRx.summation": 200, + "vm-64_net.packetsTx.summation": 200, + "vm-64_overall.status.gray": 0, + "vm-64_overall.status.green": 1, + "vm-64_overall.status.red": 0, + "vm-64_overall.status.yellow": 0, + "vm-64_sys.uptime.latest": 200, + } + + collected := vSphere.Collect() + require.Equal(t, expected, collected) + + count := model.Count() + assert.Len(t, vSphere.discoveredHosts, count.Host) + assert.Len(t, vSphere.discoveredVMs, count.Machine) + assert.Len(t, vSphere.charted, count.Host+count.Machine) + + assert.Len(t, *vSphere.Charts(), count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl)) + ensureCollectedHasAllChartsDimsVarsIDs(t, vSphere, collected) +} + +func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) { + vSphere, _, teardown := prepareVSphereSim(t) + defer teardown() + + require.True(t, vSphere.Init()) + require.True(t, vSphere.Check()) + + okHostID := "host-50" + okVMID := "vm-64" + vSphere.discoverer.(*discover.Discoverer).HostMatcher = mockHostMatcher{okHostID} + vSphere.discoverer.(*discover.Discoverer).VMMatcher = mockVMMatcher{okVMID} + + require.NoError(t, vSphere.discoverOnce()) + + numOfRuns := 5 + for i := 0; i < numOfRuns; i++ { + vSphere.Collect() + } + + host := vSphere.resources.Hosts.Get(okHostID) + for k, v := range vSphere.discoveredHosts { + if k == host.ID { + assert.Equal(t, 0, v) + } else { + assert.Equal(t, numOfRuns, v) + } + } + + vm := vSphere.resources.VMs.Get(okVMID) + for id, fails := range vSphere.discoveredVMs { + if id == vm.ID { + assert.Equal(t, 0, fails) + } else { + assert.Equal(t, numOfRuns, fails) + } + + } + + for i := numOfRuns; i < failedUpdatesLimit; i++ { + vSphere.Collect() + } + + assert.Len(t, vSphere.discoveredHosts, 1) + assert.Len(t, vSphere.discoveredVMs, 1) + assert.Len(t, vSphere.charted, 2) + + for _, c := range *vSphere.Charts() { + if strings.HasPrefix(c.ID, okHostID) || strings.HasPrefix(c.ID, okVMID) { + assert.False(t, c.Obsolete) + } else { + assert.True(t, c.Obsolete) + } + } +} + +func TestVSphere_Collect_Run(t *testing.T) { + vSphere, model, teardown := prepareVSphereSim(t) + defer teardown() + + vSphere.DiscoveryInterval.Duration = time.Second * 2 + require.True(t, vSphere.Init()) + require.True(t, vSphere.Check()) + + runs := 20 + for i := 0; i < runs; i++ { + assert.True(t, len(vSphere.Collect()) > 0) + if i < 6 { + time.Sleep(time.Second) + } + } + + count := model.Count() + assert.Len(t, vSphere.discoveredHosts, count.Host) + assert.Len(t, vSphere.discoveredVMs, count.Machine) + assert.Len(t, vSphere.charted, count.Host+count.Machine) + assert.Len(t, *vSphere.charts, count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl)) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vSphere *VSphere, collected map[string]int64) { + for _, chart := range *vSphere.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareVSphereSim(t *testing.T) (vSphere *VSphere, model *simulator.Model, teardown func()) { + model, srv := createSim(t) + vSphere = New() + teardown = func() { model.Remove(); srv.Close(); vSphere.Cleanup() } + + vSphere.Username = "administrator" + vSphere.Password = "password" + vSphere.URL = srv.URL.String() + vSphere.TLSConfig.InsecureSkipVerify = true + + return vSphere, model, teardown +} + +func createSim(t *testing.T) (*simulator.Model, *simulator.Server) { + model := simulator.VPX() + err := model.Create() + require.NoError(t, err) + model.Service.TLS = new(tls.Config) + return model, model.Service.NewServer() +} + +type mockScraper struct { + scraper +} + +func (s mockScraper) ScrapeHosts(hosts rs.Hosts) []performance.EntityMetric { + ms := s.scraper.ScrapeHosts(hosts) + return populateMetrics(ms, 100) +} +func (s mockScraper) ScrapeVMs(vms rs.VMs) []performance.EntityMetric { + ms := s.scraper.ScrapeVMs(vms) + return populateMetrics(ms, 200) +} + +func populateMetrics(ms []performance.EntityMetric, value int64) []performance.EntityMetric { + for i := range ms { + for ii := range ms[i].Value { + v := &ms[i].Value[ii].Value + if *v == nil { + *v = append(*v, value) + } else { + (*v)[0] = value + } + } + } + return ms +} + +type mockHostMatcher struct{ name string } +type mockVMMatcher struct{ name string } + +func (m mockHostMatcher) Match(host *rs.Host) bool { return m.name == host.ID } +func (m mockVMMatcher) Match(vm *rs.VM) bool { return m.name == vm.ID } diff --git a/src/go/collectors/go.d.plugin/modules/weblog/README.md b/src/go/collectors/go.d.plugin/modules/weblog/README.md new file mode 120000 index 00000000000000..9da3f21c2f49d0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/README.md @@ -0,0 +1 @@ +integrations/web_server_log_files.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/weblog/charts.go b/src/go/collectors/go.d.plugin/modules/weblog/charts.go new file mode 100644 index 00000000000000..0d9477b108926f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/charts.go @@ -0,0 +1,890 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "errors" + "fmt" + + "github.com/netdata/go.d.plugin/agent/module" +) + +type ( + Charts = module.Charts + Chart = module.Chart + Dims = module.Dims + Dim = module.Dim +) + +const ( + prioReqTotal = module.Priority + iota + prioReqExcluded + prioReqType + + prioRespCodesClass + prioRespCodes + prioRespCodes1xx + prioRespCodes2xx + prioRespCodes3xx + prioRespCodes4xx + prioRespCodes5xx + + prioBandwidth + + prioReqProcTime + prioRespTimeHist + prioUpsRespTime + prioUpsRespTimeHist + + prioUniqIP + + prioReqVhost + prioReqPort + prioReqScheme + prioReqMethod + prioReqVersion + prioReqIPProto + prioReqSSLProto + prioReqSSLCipherSuite + + prioReqCustomFieldPattern // chart per custom field, alphabetical order + prioReqCustomTimeField // chart per custom time field, alphabetical order + prioReqCustomTimeFieldHist // histogram chart per custom time field + prioReqURLPattern + prioURLPatternStats + + prioReqCustomNumericFieldSummary // 3 charts per url pattern, alphabetical order +) + +// NOTE: inconsistency with python web_log +// TODO: current histogram charts are misleading in netdata + +// Requests +var ( + reqTotal = Chart{ + ID: "requests", + Title: "Total Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "web_log.requests", + Priority: prioReqTotal, + Dims: Dims{ + {ID: "requests", Algo: module.Incremental}, + }, + } + reqExcluded = Chart{ + ID: "excluded_requests", + Title: "Excluded Requests", + Units: "requests/s", + Fam: "requests", + Ctx: "web_log.excluded_requests", + Type: module.Stacked, + Priority: prioReqExcluded, + Dims: Dims{ + {ID: "req_unmatched", Name: "unmatched", Algo: module.Incremental}, + }, + } + // netdata specific grouping + reqTypes = Chart{ + ID: "requests_by_type", + Title: "Requests By Type", + Units: "requests/s", + Fam: "requests", + Ctx: "web_log.type_requests", + Type: module.Stacked, + Priority: prioReqType, + Dims: Dims{ + {ID: "req_type_success", Name: "success", Algo: module.Incremental}, + {ID: "req_type_bad", Name: "bad", Algo: module.Incremental}, + {ID: "req_type_redirect", Name: "redirect", Algo: module.Incremental}, + {ID: "req_type_error", Name: "error", Algo: module.Incremental}, + }, + } +) + +// Responses +var ( + respCodeClass = Chart{ + ID: "responses_by_status_code_class", + Title: "Responses By Status Code Class", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_responses", + Type: module.Stacked, + Priority: prioRespCodesClass, + Dims: Dims{ + {ID: "resp_2xx", Name: "2xx", Algo: module.Incremental}, + {ID: "resp_5xx", Name: "5xx", Algo: module.Incremental}, + {ID: "resp_3xx", Name: "3xx", Algo: module.Incremental}, + {ID: "resp_4xx", Name: "4xx", Algo: module.Incremental}, + {ID: "resp_1xx", Name: "1xx", Algo: module.Incremental}, + }, + } + respCodes = Chart{ + ID: "responses_by_status_code", + Title: "Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_responses", + Type: module.Stacked, + Priority: prioRespCodes, + } + respCodes1xx = Chart{ + ID: "status_code_class_1xx_responses", + Title: "Informational Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_1xx_responses", + Type: module.Stacked, + Priority: prioRespCodes1xx, + } + respCodes2xx = Chart{ + ID: "status_code_class_2xx_responses", + Title: "Successful Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_2xx_responses", + Type: module.Stacked, + Priority: prioRespCodes2xx, + } + respCodes3xx = Chart{ + ID: "status_code_class_3xx_responses", + Title: "Redirects Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_3xx_responses", + Type: module.Stacked, + Priority: prioRespCodes3xx, + } + respCodes4xx = Chart{ + ID: "status_code_class_4xx_responses", + Title: "Client Errors Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_4xx_responses", + Type: module.Stacked, + Priority: prioRespCodes4xx, + } + respCodes5xx = Chart{ + ID: "status_code_class_5xx_responses", + Title: "Server Errors Responses By Status Code", + Units: "responses/s", + Fam: "responses", + Ctx: "web_log.status_code_class_5xx_responses", + Type: module.Stacked, + Priority: prioRespCodes5xx, + } +) + +// Bandwidth +var ( + bandwidth = Chart{ + ID: "bandwidth", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "bandwidth", + Ctx: "web_log.bandwidth", + Type: module.Area, + Priority: prioBandwidth, + Dims: Dims{ + {ID: "bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -8, Div: 1000}, + }, + } +) + +// Timings +var ( + reqProcTime = Chart{ + ID: "request_processing_time", + Title: "Request Processing Time", + Units: "milliseconds", + Fam: "timings", + Ctx: "web_log.request_processing_time", + Priority: prioReqProcTime, + Dims: Dims{ + {ID: "req_proc_time_min", Name: "min", Div: 1000}, + {ID: "req_proc_time_max", Name: "max", Div: 1000}, + {ID: "req_proc_time_avg", Name: "avg", Div: 1000}, + }, + } + reqProcTimeHist = Chart{ + ID: "requests_processing_time_histogram", + Title: "Requests Processing Time Histogram", + Units: "requests/s", + Fam: "timings", + Ctx: "web_log.requests_processing_time_histogram", + Priority: prioRespTimeHist, + } +) + +// Upstream +var ( + upsRespTime = Chart{ + ID: "upstream_response_time", + Title: "Upstream Response Time", + Units: "milliseconds", + Fam: "timings", + Ctx: "web_log.upstream_response_time", + Priority: prioUpsRespTime, + Dims: Dims{ + {ID: "upstream_resp_time_min", Name: "min", Div: 1000}, + {ID: "upstream_resp_time_max", Name: "max", Div: 1000}, + {ID: "upstream_resp_time_avg", Name: "avg", Div: 1000}, + }, + } + upsRespTimeHist = Chart{ + ID: "upstream_responses_time_histogram", + Title: "Upstream Responses Time Histogram", + Units: "responses/s", + Fam: "timings", + Ctx: "web_log.upstream_responses_time_histogram", + Priority: prioUpsRespTimeHist, + } +) + +// Clients +var ( + uniqIPsCurPoll = Chart{ + ID: "current_poll_uniq_clients", + Title: "Current Poll Unique Clients", + Units: "clients", + Fam: "client", + Ctx: "web_log.current_poll_uniq_clients", + Type: module.Stacked, + Priority: prioUniqIP, + Dims: Dims{ + {ID: "uniq_ipv4", Name: "ipv4", Algo: module.Absolute}, + {ID: "uniq_ipv6", Name: "ipv6", Algo: module.Absolute}, + }, + } +) + +// Request By N +var ( + reqByVhost = Chart{ + ID: "requests_by_vhost", + Title: "Requests By Vhost", + Units: "requests/s", + Fam: "vhost", + Ctx: "web_log.vhost_requests", + Type: module.Stacked, + Priority: prioReqVhost, + } + reqByPort = Chart{ + ID: "requests_by_port", + Title: "Requests By Port", + Units: "requests/s", + Fam: "port", + Ctx: "web_log.port_requests", + Type: module.Stacked, + Priority: prioReqPort, + } + reqByScheme = Chart{ + ID: "requests_by_scheme", + Title: "Requests By Scheme", + Units: "requests/s", + Fam: "scheme", + Ctx: "web_log.scheme_requests", + Type: module.Stacked, + Priority: prioReqScheme, + Dims: Dims{ + {ID: "req_http_scheme", Name: "http", Algo: module.Incremental}, + {ID: "req_https_scheme", Name: "https", Algo: module.Incremental}, + }, + } + reqByMethod = Chart{ + ID: "requests_by_http_method", + Title: "Requests By HTTP Method", + Units: "requests/s", + Fam: "http method", + Ctx: "web_log.http_method_requests", + Type: module.Stacked, + Priority: prioReqMethod, + } + reqByVersion = Chart{ + ID: "requests_by_http_version", + Title: "Requests By HTTP Version", + Units: "requests/s", + Fam: "http version", + Ctx: "web_log.http_version_requests", + Type: module.Stacked, + Priority: prioReqVersion, + } + reqByIPProto = Chart{ + ID: "requests_by_ip_proto", + Title: "Requests By IP Protocol", + Units: "requests/s", + Fam: "ip proto", + Ctx: "web_log.ip_proto_requests", + Type: module.Stacked, + Priority: prioReqIPProto, + Dims: Dims{ + {ID: "req_ipv4", Name: "ipv4", Algo: module.Incremental}, + {ID: "req_ipv6", Name: "ipv6", Algo: module.Incremental}, + }, + } + reqBySSLProto = Chart{ + ID: "requests_by_ssl_proto", + Title: "Requests By SSL Connection Protocol", + Units: "requests/s", + Fam: "ssl conn", + Ctx: "web_log.ssl_proto_requests", + Type: module.Stacked, + Priority: prioReqSSLProto, + } + reqBySSLCipherSuite = Chart{ + ID: "requests_by_ssl_cipher_suite", + Title: "Requests By SSL Connection Cipher Suite", + Units: "requests/s", + Fam: "ssl conn", + Ctx: "web_log.ssl_cipher_suite_requests", + Type: module.Stacked, + Priority: prioReqSSLCipherSuite, + } +) + +// Request By N Patterns +var ( + reqByURLPattern = Chart{ + ID: "requests_by_url_pattern", + Title: "URL Field Requests By Pattern", + Units: "requests/s", + Fam: "url ptn", + Ctx: "web_log.url_pattern_requests", + Type: module.Stacked, + Priority: prioReqURLPattern, + } + reqByCustomFieldPattern = Chart{ + ID: "custom_field_%s_requests_by_pattern", + Title: "Custom Field %s Requests By Pattern", + Units: "requests/s", + Fam: "custom field ptn", + Ctx: "web_log.custom_field_pattern_requests", + Type: module.Stacked, + Priority: prioReqCustomFieldPattern, + } +) + +// custom time field +var ( + reqByCustomTimeField = Chart{ + ID: "custom_time_field_%s_summary", + Title: `Custom Time Field "%s" Summary`, + Units: "milliseconds", + Fam: "custom time field", + Ctx: "web_log.custom_time_field_summary", + Priority: prioReqCustomTimeField, + Dims: Dims{ + {ID: "custom_time_field_%s_time_min", Name: "min", Div: 1000}, + {ID: "custom_time_field_%s_time_max", Name: "max", Div: 1000}, + {ID: "custom_time_field_%s_time_avg", Name: "avg", Div: 1000}, + }, + } + reqByCustomTimeFieldHist = Chart{ + ID: "custom_time_field_%s_histogram", + Title: `Custom Time Field "%s" Histogram`, + Units: "observations", + Fam: "custom time field", + Ctx: "web_log.custom_time_field_histogram", + Priority: prioReqCustomTimeFieldHist, + } +) + +var ( + customNumericFieldSummaryChartTmpl = Chart{ + ID: "custom_numeric_field_%s_summary", + Title: "Custom Numeric Field Summary", + Units: "", + Fam: "custom numeric fields", + Ctx: "web_log.custom_numeric_field_%s_summary", + Priority: prioReqCustomNumericFieldSummary, + Dims: Dims{ + {ID: "custom_numeric_field_%s_summary_min", Name: "min"}, + {ID: "custom_numeric_field_%s_summary_max", Name: "max"}, + {ID: "custom_numeric_field_%s_summary_avg", Name: "avg"}, + }, + } +) + +// URL pattern stats +var ( + urlPatternRespCodes = Chart{ + ID: "url_pattern_%s_responses_by_status_code", + Title: "Responses By Status Code", + Units: "responses/s", + Fam: "url ptn %s", + Ctx: "web_log.url_pattern_status_code_responses", + Type: module.Stacked, + Priority: prioURLPatternStats, + } + urlPatternReqMethods = Chart{ + ID: "url_pattern_%s_requests_by_http_method", + Title: "Requests By HTTP Method", + Units: "requests/s", + Fam: "url ptn %s", + Ctx: "web_log.url_pattern_http_method_requests", + Type: module.Stacked, + Priority: prioURLPatternStats + 1, + } + urlPatternBandwidth = Chart{ + ID: "url_pattern_%s_bandwidth", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "url ptn %s", + Ctx: "web_log.url_pattern_bandwidth", + Type: module.Area, + Priority: prioURLPatternStats + 2, + Dims: Dims{ + {ID: "url_ptn_%s_bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000}, + {ID: "url_ptn_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -8, Div: 1000}, + }, + } + urlPatternReqProcTime = Chart{ + ID: "url_pattern_%s_request_processing_time", + Title: "Request Processing Time", + Units: "milliseconds", + Fam: "url ptn %s", + Ctx: "web_log.url_pattern_request_processing_time", + Priority: prioURLPatternStats + 3, + Dims: Dims{ + {ID: "url_ptn_%s_req_proc_time_min", Name: "min", Div: 1000}, + {ID: "url_ptn_%s_req_proc_time_max", Name: "max", Div: 1000}, + {ID: "url_ptn_%s_req_proc_time_avg", Name: "avg", Div: 1000}, + }, + } +) + +func newReqProcTimeHistChart(histogram []float64) (*Chart, error) { + chart := reqProcTimeHist.Copy() + for i, v := range histogram { + dim := &Dim{ + ID: fmt.Sprintf("req_proc_time_hist_bucket_%d", i+1), + Name: fmt.Sprintf("%.3f", v), + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + if err := chart.AddDim(&Dim{ + ID: "req_proc_time_hist_count", + Name: "+Inf", + Algo: module.Incremental, + }); err != nil { + return nil, err + } + return chart, nil +} + +func newUpsRespTimeHistChart(histogram []float64) (*Chart, error) { + chart := upsRespTimeHist.Copy() + for i, v := range histogram { + dim := &Dim{ + ID: fmt.Sprintf("upstream_resp_time_hist_bucket_%d", i+1), + Name: fmt.Sprintf("%.3f", v), + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + if err := chart.AddDim(&Dim{ + ID: "upstream_resp_time_hist_count", + Name: "+Inf", + Algo: module.Incremental, + }); err != nil { + return nil, err + } + return chart, nil +} + +func newURLPatternChart(patterns []userPattern) (*Chart, error) { + chart := reqByURLPattern.Copy() + for _, p := range patterns { + dim := &Dim{ + ID: "req_url_ptn_" + p.Name, + Name: p.Name, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + return chart, nil +} + +func newURLPatternRespCodesChart(name string) *Chart { + chart := urlPatternRespCodes.Copy() + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Fam = fmt.Sprintf(chart.Fam, name) + return chart +} + +func newURLPatternReqMethodsChart(name string) *Chart { + chart := urlPatternReqMethods.Copy() + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Fam = fmt.Sprintf(chart.Fam, name) + return chart +} + +func newURLPatternBandwidthChart(name string) *Chart { + chart := urlPatternBandwidth.Copy() + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Fam = fmt.Sprintf(chart.Fam, name) + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, name) + } + return chart +} + +func newURLPatternReqProcTimeChart(name string) *Chart { + chart := urlPatternReqProcTime.Copy() + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Fam = fmt.Sprintf(chart.Fam, name) + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, name) + } + return chart +} + +func newCustomFieldCharts(fields []customField) (Charts, error) { + charts := Charts{} + for _, f := range fields { + chart, err := newCustomFieldChart(f) + if err != nil { + return nil, err + } + if err := charts.Add(chart); err != nil { + return nil, err + } + } + return charts, nil +} + +func newCustomFieldChart(f customField) (*Chart, error) { + chart := reqByCustomFieldPattern.Copy() + chart.ID = fmt.Sprintf(chart.ID, f.Name) + chart.Title = fmt.Sprintf(chart.Title, f.Name) + for _, p := range f.Patterns { + dim := &Dim{ + ID: fmt.Sprintf("custom_field_%s_%s", f.Name, p.Name), + Name: p.Name, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + return chart, nil +} + +func newCustomTimeFieldCharts(fields []customTimeField) (Charts, error) { + charts := Charts{} + for i, f := range fields { + chartTime, err := newCustomTimeFieldChart(f) + if err != nil { + return nil, err + } + chartTime.Priority += i + if err := charts.Add(chartTime); err != nil { + return nil, err + } + if len(f.Histogram) < 1 { + continue + } + + chartHist, err := newCustomTimeFieldHistChart(f) + if err != nil { + return nil, err + } + chartHist.Priority += i + + if err := charts.Add(chartHist); err != nil { + return nil, err + } + } + return charts, nil +} + +func newCustomTimeFieldChart(f customTimeField) (*Chart, error) { + chart := reqByCustomTimeField.Copy() + chart.ID = fmt.Sprintf(chart.ID, f.Name) + chart.Title = fmt.Sprintf(chart.Title, f.Name) + for _, d := range chart.Dims { + d.ID = fmt.Sprintf(d.ID, f.Name) + } + return chart, nil +} + +func newCustomTimeFieldHistChart(f customTimeField) (*Chart, error) { + chart := reqByCustomTimeFieldHist.Copy() + chart.ID = fmt.Sprintf(chart.ID, f.Name) + chart.Title = fmt.Sprintf(chart.Title, f.Name) + for i, v := range f.Histogram { + dim := &Dim{ + ID: fmt.Sprintf("custom_time_field_%s_time_hist_bucket_%d", f.Name, i+1), + Name: fmt.Sprintf("%.3f", v), + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + return nil, err + } + } + if err := chart.AddDim(&Dim{ + ID: fmt.Sprintf("custom_time_field_%s_time_hist_count", f.Name), + Name: "+Inf", + Algo: module.Incremental, + }); err != nil { + return nil, err + } + return chart, nil +} + +func (w *WebLog) createCharts(line *logLine) error { + if line.empty() { + return errors.New("empty line") + } + w.charts = nil + // Following charts are created during runtime: + // - reqBySSLProto, reqBySSLCipherSuite - it is likely line has no SSL stuff at this moment + charts := &Charts{ + reqTotal.Copy(), + reqExcluded.Copy(), + } + if line.hasVhost() { + if err := addVhostCharts(charts); err != nil { + return err + } + } + if line.hasPort() { + if err := addPortCharts(charts); err != nil { + return err + } + } + if line.hasReqScheme() { + if err := addSchemeCharts(charts); err != nil { + return err + } + } + if line.hasReqClient() { + if err := addClientCharts(charts); err != nil { + return err + } + } + if line.hasReqMethod() { + if err := addMethodCharts(charts, w.URLPatterns); err != nil { + return err + } + } + if line.hasReqURL() { + if err := addURLCharts(charts, w.URLPatterns); err != nil { + return err + } + } + if line.hasReqProto() { + if err := addReqProtoCharts(charts); err != nil { + return err + } + } + if line.hasRespCode() { + if err := addRespCodesCharts(charts, w.GroupRespCodes); err != nil { + return err + } + } + if line.hasReqSize() || line.hasRespSize() { + if err := addBandwidthCharts(charts, w.URLPatterns); err != nil { + return err + } + } + if line.hasReqProcTime() { + if err := addReqProcTimeCharts(charts, w.Histogram, w.URLPatterns); err != nil { + return err + } + } + if line.hasUpsRespTime() { + if err := addUpstreamRespTimeCharts(charts, w.Histogram); err != nil { + return err + } + } + if line.hasCustomFields() { + if len(w.CustomFields) > 0 { + if err := addCustomFieldsCharts(charts, w.CustomFields); err != nil { + return err + } + } + if len(w.CustomTimeFields) > 0 { + if err := addCustomTimeFieldsCharts(charts, w.CustomTimeFields); err != nil { + return err + } + } + if len(w.CustomNumericFields) > 0 { + if err := addCustomNumericFieldsCharts(charts, w.CustomNumericFields); err != nil { + return err + } + } + } + + w.charts = charts + + return nil +} + +func addVhostCharts(charts *Charts) error { + return charts.Add(reqByVhost.Copy()) +} + +func addPortCharts(charts *Charts) error { + return charts.Add(reqByPort.Copy()) +} + +func addSchemeCharts(charts *Charts) error { + return charts.Add(reqByScheme.Copy()) +} + +func addClientCharts(charts *Charts) error { + if err := charts.Add(reqByIPProto.Copy()); err != nil { + return err + } + return charts.Add(uniqIPsCurPoll.Copy()) +} + +func addMethodCharts(charts *Charts, patterns []userPattern) error { + if err := charts.Add(reqByMethod.Copy()); err != nil { + return err + } + + for _, p := range patterns { + chart := newURLPatternReqMethodsChart(p.Name) + if err := charts.Add(chart); err != nil { + return err + } + } + return nil +} + +func addURLCharts(charts *Charts, patterns []userPattern) error { + if len(patterns) == 0 { + return nil + } + chart, err := newURLPatternChart(patterns) + if err != nil { + return err + } + if err := charts.Add(chart); err != nil { + return err + } + + for _, p := range patterns { + chart := newURLPatternRespCodesChart(p.Name) + if err := charts.Add(chart); err != nil { + return err + } + } + return nil +} + +func addReqProtoCharts(charts *Charts) error { + return charts.Add(reqByVersion.Copy()) +} + +func addRespCodesCharts(charts *Charts, group bool) error { + if err := charts.Add(reqTypes.Copy()); err != nil { + return err + } + if err := charts.Add(respCodeClass.Copy()); err != nil { + return err + } + if !group { + return charts.Add(respCodes.Copy()) + } + for _, c := range []Chart{respCodes1xx, respCodes2xx, respCodes3xx, respCodes4xx, respCodes5xx} { + if err := charts.Add(c.Copy()); err != nil { + return err + } + } + return nil +} + +func addBandwidthCharts(charts *Charts, patterns []userPattern) error { + if err := charts.Add(bandwidth.Copy()); err != nil { + return err + } + + for _, p := range patterns { + chart := newURLPatternBandwidthChart(p.Name) + if err := charts.Add(chart); err != nil { + return err + } + } + return nil +} + +func addReqProcTimeCharts(charts *Charts, histogram []float64, patterns []userPattern) error { + if err := charts.Add(reqProcTime.Copy()); err != nil { + return err + } + for _, p := range patterns { + chart := newURLPatternReqProcTimeChart(p.Name) + if err := charts.Add(chart); err != nil { + return err + } + } + if len(histogram) == 0 { + return nil + } + chart, err := newReqProcTimeHistChart(histogram) + if err != nil { + return err + } + return charts.Add(chart) +} + +func addUpstreamRespTimeCharts(charts *Charts, histogram []float64) error { + if err := charts.Add(upsRespTime.Copy()); err != nil { + return err + } + if len(histogram) == 0 { + return nil + } + chart, err := newUpsRespTimeHistChart(histogram) + if err != nil { + return err + } + return charts.Add(chart) +} + +func addCustomFieldsCharts(charts *Charts, fields []customField) error { + cs, err := newCustomFieldCharts(fields) + if err != nil { + return err + } + return charts.Add(cs...) +} + +func addCustomTimeFieldsCharts(charts *Charts, fields []customTimeField) error { + cs, err := newCustomTimeFieldCharts(fields) + if err != nil { + return err + } + return charts.Add(cs...) +} + +func addCustomNumericFieldsCharts(charts *module.Charts, fields []customNumericField) error { + for _, f := range fields { + chart := customNumericFieldSummaryChartTmpl.Copy() + chart.ID = fmt.Sprintf(chart.ID, f.Name) + chart.Units = f.Units + chart.Ctx = fmt.Sprintf(chart.Ctx, f.Name) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, f.Name) + dim.Div = f.Divisor + } + + if err := charts.Add(chart); err != nil { + return err + } + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/collect.go b/src/go/collectors/go.d.plugin/modules/weblog/collect.go new file mode 100644 index 00000000000000..b6b2d39b46b1d4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/collect.go @@ -0,0 +1,560 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "fmt" + "io" + "runtime" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/logs" + "github.com/netdata/go.d.plugin/pkg/stm" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (w *WebLog) logPanicStackIfAny() { + err := recover() + if err == nil { + return + } + w.Errorf("[ERROR] %s\n", err) + for depth := 0; ; depth++ { + _, file, line, ok := runtime.Caller(depth) + if !ok { + break + } + w.Errorf("======> %d: %v:%d", depth, file, line) + } + panic(err) +} + +func (w *WebLog) collect() (map[string]int64, error) { + defer w.logPanicStackIfAny() + w.mx.reset() + + var mx map[string]int64 + + n, err := w.collectLogLines() + + if n > 0 || err == nil { + mx = stm.ToMap(w.mx) + } + return mx, err +} + +func (w *WebLog) collectLogLines() (int, error) { + logOnce := true + var n int + for { + w.line.reset() + err := w.parser.ReadLine(w.line) + if err != nil { + if err == io.EOF { + return n, nil + } + if !logs.IsParseError(err) { + return n, err + } + n++ + if logOnce { + w.Infof("unmatched line: %v (parser: %s)", err, w.parser.Info()) + logOnce = false + } + w.collectUnmatched() + continue + } + n++ + if w.line.empty() { + w.collectUnmatched() + } else { + w.collectLogLine() + } + } +} + +func (w *WebLog) collectLogLine() { + w.mx.Requests.Inc() + w.collectVhost() + w.collectPort() + w.collectReqScheme() + w.collectReqClient() + w.collectReqMethod() + w.collectReqURL() + w.collectReqProto() + w.collectRespCode() + w.collectReqSize() + w.collectRespSize() + w.collectReqProcTime() + w.collectUpsRespTime() + w.collectSSLProto() + w.collectSSLCipherSuite() + w.collectCustomFields() +} + +func (w *WebLog) collectUnmatched() { + w.mx.Requests.Inc() + w.mx.ReqUnmatched.Inc() +} + +func (w *WebLog) collectVhost() { + if !w.line.hasVhost() { + return + } + c, ok := w.mx.ReqVhost.GetP(w.line.vhost) + if !ok { + w.addDimToVhostChart(w.line.vhost) + } + c.Inc() +} + +func (w *WebLog) collectPort() { + if !w.line.hasPort() { + return + } + c, ok := w.mx.ReqPort.GetP(w.line.port) + if !ok { + w.addDimToPortChart(w.line.port) + } + c.Inc() +} + +func (w *WebLog) collectReqClient() { + if !w.line.hasReqClient() { + return + } + if strings.ContainsRune(w.line.reqClient, ':') { + w.mx.ReqIPv6.Inc() + w.mx.UniqueIPv6.Insert(w.line.reqClient) + return + } + // NOTE: count hostname as IPv4 address + w.mx.ReqIPv4.Inc() + w.mx.UniqueIPv4.Insert(w.line.reqClient) +} + +func (w *WebLog) collectReqScheme() { + if !w.line.hasReqScheme() { + return + } + if w.line.reqScheme == "https" { + w.mx.ReqHTTPSScheme.Inc() + } else { + w.mx.ReqHTTPScheme.Inc() + } +} + +func (w *WebLog) collectReqMethod() { + if !w.line.hasReqMethod() { + return + } + c, ok := w.mx.ReqMethod.GetP(w.line.reqMethod) + if !ok { + w.addDimToReqMethodChart(w.line.reqMethod) + } + c.Inc() +} + +func (w *WebLog) collectReqURL() { + if !w.line.hasReqURL() { + return + } + for _, p := range w.urlPatterns { + if !p.MatchString(w.line.reqURL) { + continue + } + c, _ := w.mx.ReqURLPattern.GetP(p.name) + c.Inc() + + w.collectURLPatternStats(p.name) + return + } +} + +func (w *WebLog) collectReqProto() { + if !w.line.hasReqProto() { + return + } + c, ok := w.mx.ReqVersion.GetP(w.line.reqProto) + if !ok { + w.addDimToReqVersionChart(w.line.reqProto) + } + c.Inc() +} + +func (w *WebLog) collectRespCode() { + if !w.line.hasRespCode() { + return + } + + code := w.line.respCode + switch { + case code >= 100 && code < 300, code == 304, code == 401: + w.mx.ReqSuccess.Inc() + case code >= 300 && code < 400: + w.mx.ReqRedirect.Inc() + case code >= 400 && code < 500: + w.mx.ReqBad.Inc() + case code >= 500 && code < 600: + w.mx.ReqError.Inc() + } + + switch code / 100 { + case 1: + w.mx.Resp1xx.Inc() + case 2: + w.mx.Resp2xx.Inc() + case 3: + w.mx.Resp3xx.Inc() + case 4: + w.mx.Resp4xx.Inc() + case 5: + w.mx.Resp5xx.Inc() + } + + codeStr := strconv.Itoa(code) + c, ok := w.mx.RespCode.GetP(codeStr) + if !ok { + w.addDimToRespCodesChart(codeStr) + } + c.Inc() +} + +func (w *WebLog) collectReqSize() { + if !w.line.hasReqSize() { + return + } + w.mx.BytesReceived.Add(float64(w.line.reqSize)) +} + +func (w *WebLog) collectRespSize() { + if !w.line.hasRespSize() { + return + } + w.mx.BytesSent.Add(float64(w.line.respSize)) +} + +func (w *WebLog) collectReqProcTime() { + if !w.line.hasReqProcTime() { + return + } + w.mx.ReqProcTime.Observe(w.line.reqProcTime) + if w.mx.ReqProcTimeHist == nil { + return + } + w.mx.ReqProcTimeHist.Observe(w.line.reqProcTime) +} + +func (w *WebLog) collectUpsRespTime() { + if !w.line.hasUpsRespTime() { + return + } + w.mx.UpsRespTime.Observe(w.line.upsRespTime) + if w.mx.UpsRespTimeHist == nil { + return + } + w.mx.UpsRespTimeHist.Observe(w.line.upsRespTime) +} + +func (w *WebLog) collectSSLProto() { + if !w.line.hasSSLProto() { + return + } + c, ok := w.mx.ReqSSLProto.GetP(w.line.sslProto) + if !ok { + w.addDimToSSLProtoChart(w.line.sslProto) + } + c.Inc() +} + +func (w *WebLog) collectSSLCipherSuite() { + if !w.line.hasSSLCipherSuite() { + return + } + c, ok := w.mx.ReqSSLCipherSuite.GetP(w.line.sslCipherSuite) + if !ok { + w.addDimToSSLCipherSuiteChart(w.line.sslCipherSuite) + } + c.Inc() +} + +func (w *WebLog) collectURLPatternStats(name string) { + v, ok := w.mx.URLPatternStats[name] + if !ok { + return + } + if w.line.hasRespCode() { + status := strconv.Itoa(w.line.respCode) + c, ok := v.RespCode.GetP(status) + if !ok { + w.addDimToURLPatternRespCodesChart(name, status) + } + c.Inc() + } + + if w.line.hasReqMethod() { + c, ok := v.ReqMethod.GetP(w.line.reqMethod) + if !ok { + w.addDimToURLPatternReqMethodsChart(name, w.line.reqMethod) + } + c.Inc() + } + + if w.line.hasReqSize() { + v.BytesReceived.Add(float64(w.line.reqSize)) + } + + if w.line.hasRespSize() { + v.BytesSent.Add(float64(w.line.respSize)) + } + + if w.line.hasReqProcTime() { + v.ReqProcTime.Observe(w.line.reqProcTime) + } +} + +func (w *WebLog) collectCustomFields() { + if !w.line.hasCustomFields() { + return + } + + for _, cv := range w.line.custom.values { + _, _ = cv.name, cv.value + + if patterns, ok := w.customFields[cv.name]; ok { + for _, pattern := range patterns { + if !pattern.MatchString(cv.value) { + continue + } + v, ok := w.mx.ReqCustomField[cv.name] + if !ok { + break + } + c, _ := v.GetP(pattern.name) + c.Inc() + break + } + } else if histogram, ok := w.customTimeFields[cv.name]; ok { + v, ok := w.mx.ReqCustomTimeField[cv.name] + if !ok { + continue + } + ctf, err := strconv.ParseFloat(cv.value, 64) + if err != nil || !isTimeValid(ctf) { + continue + } + v.Time.Observe(ctf) + if histogram != nil { + v.TimeHist.Observe(ctf * timeMultiplier(cv.value)) + } + } else if w.customNumericFields[cv.name] { + m, ok := w.mx.ReqCustomNumericField[cv.name] + if !ok { + continue + } + v, err := strconv.ParseFloat(cv.value, 64) + if err != nil { + continue + } + v *= float64(m.multiplier) + m.Summary.Observe(v) + } + } +} + +func (w *WebLog) addDimToVhostChart(vhost string) { + chart := w.Charts().Get(reqByVhost.ID) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", reqByVhost.ID) + return + } + dim := &Dim{ + ID: "req_vhost_" + vhost, + Name: vhost, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToPortChart(port string) { + chart := w.Charts().Get(reqByPort.ID) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", reqByPort.ID) + return + } + dim := &Dim{ + ID: "req_port_" + port, + Name: port, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToReqMethodChart(method string) { + chart := w.Charts().Get(reqByMethod.ID) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", reqByMethod.ID) + return + } + dim := &Dim{ + ID: "req_method_" + method, + Name: method, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToReqVersionChart(version string) { + chart := w.Charts().Get(reqByVersion.ID) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", reqByVersion.ID) + return + } + dim := &Dim{ + ID: "req_version_" + version, + Name: version, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToSSLProtoChart(proto string) { + chart := w.Charts().Get(reqBySSLProto.ID) + if chart == nil { + chart = reqBySSLProto.Copy() + if err := w.Charts().Add(chart); err != nil { + w.Warning(err) + return + } + } + dim := &Dim{ + ID: "req_ssl_proto_" + proto, + Name: proto, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToSSLCipherSuiteChart(cipher string) { + chart := w.Charts().Get(reqBySSLCipherSuite.ID) + if chart == nil { + chart = reqBySSLCipherSuite.Copy() + if err := w.Charts().Add(chart); err != nil { + w.Warning(err) + return + } + } + dim := &Dim{ + ID: "req_ssl_cipher_suite_" + cipher, + Name: cipher, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToRespCodesChart(code string) { + chart := w.findRespCodesChart(code) + if chart == nil { + w.Warning("add dimension: cant find resp codes chart") + return + } + dim := &Dim{ + ID: "resp_code_" + code, + Name: code, + Algo: module.Incremental, + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToURLPatternRespCodesChart(name, code string) { + id := fmt.Sprintf(urlPatternRespCodes.ID, name) + chart := w.Charts().Get(id) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", id) + return + } + dim := &Dim{ + ID: fmt.Sprintf("url_ptn_%s_resp_code_%s", name, code), + Name: code, + Algo: module.Incremental, + } + + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) addDimToURLPatternReqMethodsChart(name, method string) { + id := fmt.Sprintf(urlPatternReqMethods.ID, name) + chart := w.Charts().Get(id) + if chart == nil { + w.Warningf("add dimension: no '%s' chart", id) + return + } + dim := &Dim{ + ID: fmt.Sprintf("url_ptn_%s_req_method_%s", name, method), + Name: method, + Algo: module.Incremental, + } + + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + return + } + chart.MarkNotCreated() +} + +func (w *WebLog) findRespCodesChart(code string) *Chart { + if !w.GroupRespCodes { + return w.Charts().Get(respCodes.ID) + } + + var id string + switch class := code[:1]; class { + case "1": + id = respCodes1xx.ID + case "2": + id = respCodes2xx.ID + case "3": + id = respCodes3xx.ID + case "4": + id = respCodes4xx.ID + case "5": + id = respCodes5xx.ID + default: + return nil + } + return w.Charts().Get(id) +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/config_schema.json b/src/go/collectors/go.d.plugin/modules/weblog/config_schema.json new file mode 100644 index 00000000000000..82b6c358c4ac7e --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/config_schema.json @@ -0,0 +1,208 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/web_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parser": { + "type": "object", + "properties": { + "log_type": { + "type": "string" + }, + "csv_config": { + "type": "object", + "properties": { + "fields_per_record": { + "type": "integer" + }, + "delimiter": { + "type": "string" + }, + "trim_leading_space": { + "type": "boolean" + }, + "format": { + "type": "string" + } + }, + "required": [ + "fields_per_record", + "delimiter", + "trim_leading_space", + "format" + ] + }, + "ltsv_config": { + "type": "object", + "properties": { + "field_delimiter": { + "type": "string" + }, + "value_delimiter": { + "type": "string" + }, + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "field_delimiter", + "value_delimiter", + "mapping" + ] + }, + "regexp_config": { + "type": "object", + "properties": { + "pattern": { + "type": "string" + } + }, + "required": [ + "pattern" + ] + }, + "json_config": { + "type": "object", + "properties": { + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "mapping" + ] + } + }, + "required": [ + "log_type" + ] + }, + "path": { + "type": "string" + }, + "exclude_path": { + "type": "string" + }, + "url_patterns": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "match": { + "type": "string" + } + }, + "required": [ + "name", + "match" + ] + } + }, + "custom_fields": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "patterns": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "match": { + "type": "string" + } + }, + "required": [ + "name", + "match" + ] + } + } + }, + "required": [ + "name", + "patterns" + ] + } + }, + "custom_time_fields": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "histogram": { + "type": "array", + "items": { + "type": "number" + } + } + }, + "required": [ + "name", + "histogram" + ] + } + }, + "custom_numeric_fields": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "units": { + "type": "string" + }, + "multiplier": { + "type": "integer" + }, + "divisor": { + "type": "integer" + } + }, + "required": [ + "name", + "units", + "multiplier", + "divisor" + ] + } + }, + "histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "group_response_codes": { + "type": "boolean" + } + }, + "required": [ + "name", + "path" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/init.go b/src/go/collectors/go.d.plugin/modules/weblog/init.go new file mode 100644 index 00000000000000..9ee45951c8cd46 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/init.go @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/logs" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +type pattern struct { + name string + matcher.Matcher +} + +func newPattern(up userPattern) (*pattern, error) { + if up.Name == "" || up.Match == "" { + return nil, errors.New("empty 'name' or 'match'") + } + + m, err := matcher.Parse(up.Match) + if err != nil { + return nil, err + } + return &pattern{name: up.Name, Matcher: m}, nil +} + +func (w *WebLog) createURLPatterns() error { + if len(w.URLPatterns) == 0 { + w.Debug("skipping URL patterns creating, no patterns provided") + return nil + } + w.Debug("starting URL patterns creating") + for _, up := range w.URLPatterns { + p, err := newPattern(up) + if err != nil { + return fmt.Errorf("create pattern %+v: %v", up, err) + } + w.Debugf("created pattern '%s', type '%T', match '%s'", p.name, p.Matcher, up.Match) + w.urlPatterns = append(w.urlPatterns, p) + } + w.Debugf("created %d URL pattern(s)", len(w.URLPatterns)) + return nil +} + +func (w *WebLog) createCustomFields() error { + if len(w.CustomFields) == 0 { + w.Debug("skipping custom fields creating, no custom fields provided") + return nil + } + + w.Debug("starting custom fields creating") + w.customFields = make(map[string][]*pattern) + for i, cf := range w.CustomFields { + if cf.Name == "" { + return fmt.Errorf("create custom field: name not set (field %d)", i+1) + } + for _, up := range cf.Patterns { + p, err := newPattern(up) + if err != nil { + return fmt.Errorf("create field '%s' pattern %+v: %v", cf.Name, up, err) + } + w.Debugf("created field '%s', pattern '%s', type '%T', match '%s'", cf.Name, p.name, p.Matcher, up.Match) + w.customFields[cf.Name] = append(w.customFields[cf.Name], p) + } + } + w.Debugf("created %d custom field(s)", len(w.CustomFields)) + return nil +} + +func (w *WebLog) createCustomTimeFields() error { + if len(w.CustomTimeFields) == 0 { + w.Debug("skipping custom time fields creating, no custom time fields provided") + return nil + } + + w.Debug("starting custom time fields creating") + w.customTimeFields = make(map[string][]float64) + for i, ctf := range w.CustomTimeFields { + if ctf.Name == "" { + return fmt.Errorf("create custom field: name not set (field %d)", i+1) + } + w.customTimeFields[ctf.Name] = ctf.Histogram + w.Debugf("created time field '%s', histogram '%v'", ctf.Name, ctf.Histogram) + } + w.Debugf("created %d custom time field(s)", len(w.CustomTimeFields)) + return nil +} + +func (w *WebLog) createCustomNumericFields() error { + if len(w.CustomNumericFields) == 0 { + w.Debug("no custom time fields provided") + return nil + } + + w.Debugf("creating custom numeric fields for '%+v'", w.CustomNumericFields) + + w.customNumericFields = make(map[string]bool) + + for i := range w.CustomNumericFields { + v := w.CustomNumericFields[i] + if v.Name == "" { + return fmt.Errorf("custom numeric field (%d): 'name' not set", i+1) + } + if v.Units == "" { + return fmt.Errorf("custom numeric field (%s): 'units' not set", v.Name) + } + if v.Multiplier <= 0 { + v.Multiplier = 1 + } + if v.Divisor <= 0 { + v.Divisor = 1 + } + w.CustomNumericFields[i] = v + w.customNumericFields[v.Name] = true + } + + return nil +} + +func (w *WebLog) createLogLine() { + w.line = newEmptyLogLine() + + for v := range w.customFields { + w.line.custom.fields[v] = struct{}{} + } + for v := range w.customTimeFields { + w.line.custom.fields[v] = struct{}{} + } + for v := range w.customNumericFields { + w.line.custom.fields[v] = struct{}{} + } +} + +func (w *WebLog) createLogReader() error { + w.Cleanup() + w.Debug("starting log reader creating") + + reader, err := logs.Open(w.Path, w.ExcludePath, w.Logger) + if err != nil { + return fmt.Errorf("creating log reader: %v", err) + } + + w.Debugf("created log reader, current file '%s'", reader.CurrentFilename()) + w.file = reader + + return nil +} + +func (w *WebLog) createParser() error { + w.Debug("starting parser creating") + + const readLinesNum = 100 + + lines, err := logs.ReadLastLines(w.file.CurrentFilename(), readLinesNum) + if err != nil { + return fmt.Errorf("failed to read last lines: %v", err) + } + + var found bool + for _, line := range lines { + if line = strings.TrimSpace(line); line == "" { + continue + } + w.Debugf("last line: '%s'", line) + + w.parser, err = w.newParser([]byte(line)) + if err != nil { + w.Debugf("failed to create parser from line: %v", err) + continue + } + + w.line.reset() + + if err = w.parser.Parse([]byte(line), w.line); err != nil { + w.Debugf("failed to parse line: %v", err) + continue + } + + if err = w.line.verify(); err != nil { + w.Debugf("failed to verify line: %v", err) + continue + } + + found = true + break + } + + if !found { + return fmt.Errorf("failed to create log parser (file '%s')", w.file.CurrentFilename()) + } + + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/integrations/web_server_log_files.md b/src/go/collectors/go.d.plugin/modules/weblog/integrations/web_server_log_files.md new file mode 100644 index 00000000000000..59f9809ea847dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/integrations/web_server_log_files.md @@ -0,0 +1,375 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/weblog/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/weblog/metadata.yaml" +sidebar_label: "Web server log files" +learn_status: "Published" +learn_rel_path: "Data Collection/Web Servers and Web Proxies" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Web server log files + + +<img src="https://netdata.cloud/img/webservers.svg" width="150"/> + + +Plugin: go.d.plugin +Module: web_log + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors web servers by parsing their log files. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It automatically detects log files of web servers running on localhost. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per Web server log files instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| web_log.requests | requests | requests/s | +| web_log.excluded_requests | unmatched | requests/s | +| web_log.type_requests | success, bad, redirect, error | requests/s | +| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s | +| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s | +| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s | +| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s | +| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s | +| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s | +| web_log.bandwidth | received, sent | kilobits/s | +| web_log.request_processing_time | min, max, avg | milliseconds | +| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s | +| web_log.upstream_response_time | min, max, avg | milliseconds | +| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s | +| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients | +| web_log.vhost_requests | a dimension per vhost | requests/s | +| web_log.port_requests | a dimension per port | requests/s | +| web_log.scheme_requests | http, https | requests/s | +| web_log.http_method_requests | a dimension per HTTP method | requests/s | +| web_log.http_version_requests | a dimension per HTTP version | requests/s | +| web_log.ip_proto_requests | ipv4, ipv6 | requests/s | +| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s | +| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s | +| web_log.url_pattern_requests | a dimension per URL pattern | requests/s | +| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s | + +### Per custom time field + +TBD + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| web_log.custom_time_field_summary | min, max, avg | milliseconds | +| web_log.custom_time_field_histogram | a dimension per bucket | observations | + +### Per custom numeric field + +TBD + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} | + +### Per URL pattern + +TBD + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s | +| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s | +| web_log.url_pattern_bandwidth | received, sent | kilobits/s | +| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute | +| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) | +| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) | +| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) | +| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) | +| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute | +| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/web_log.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/web_log.conf +``` +#### Options + +Weblog is aware of how to parse and interpret the following fields (**known fields**): + +> [nginx](https://nginx.org/en/docs/varindex.html) +> +> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html) + +| nginx | apache | description | +|-------------------------|----------|------------------------------------------------------------------------------------------| +| $host ($http_host) | %v | Name of the server which accepted a request. | +| $server_port | %p | Port of the server which accepted a request. | +| $scheme | - | Request scheme. "http" or "https". | +| $remote_addr | %a (%h) | Client address. | +| $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". | +| $request_method | %m | Request method. Usually "GET" or "POST". | +| $request_uri | %U | Full original request URI. | +| $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". | +| $status | %s (%>s) | Response status code. | +| $request_length | %I | Bytes received from a client, including request and headers. | +| $bytes_sent | %O | Bytes sent to a client, including request and headers. | +| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. | +| $request_time | %D | Request processing time. | +| $upstream_response_time | - | Time spent on receiving the response from the upstream server. | +| $ssl_protocol | - | Protocol of an established SSL connection. | +| $ssl_cipher | - | String of ciphers used for an established SSL connection. | + +Notes: + +- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`. +- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network. +- To get `%I` and `%O` working you need to enable `mod_logio` on Apache. +- NGINX logs URI with query parameters, Apache doesnt. +- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others. +- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| path | Path to the web server log file. | | yes | +| exclude_path | Path to exclude. | *.gz | no | +| url_patterns | List of URL patterns. | [] | no | +| url_patterns.name | Used as a dimension name. | | yes | +| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). | | yes | +| parser | Log parser configuration. | | no | +| parser.log_type | Log parser type. | auto | no | +| parser.csv_config | CSV log parser config. | | no | +| parser.csv_config.delimiter | CSV field delimiter. | , | no | +| parser.csv_config.format | CSV log format. | | no | +| parser.ltsv_config | LTSV log parser config. | | no | +| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \t | no | +| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no | +| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes | +| parser.json_config | JSON log parser config. | | no | +| parser.json_config.mapping | JSON fields mapping to **known fields**. | | yes | +| parser.regexp_config | RegExp log parser config. | | no | +| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes | + +##### url_patterns + +"URL pattern" scope metrics will be collected for each URL pattern. + +Option syntax: + +```yaml +url_patterns: + - name: name1 + pattern: pattern1 + - name: name2 + pattern: pattern2 +``` + + +##### parser.log_type + +Weblog supports 5 different log parsers: + +| Parser type | Description | +|-------------|-------------------------------------------| +| auto | Use CSV and auto-detect format | +| csv | A comma-separated values | +| json | [JSON](https://www.json.org/json-en.html) | +| ltsv | [LTSV](http://ltsv.org/) | +| regexp | Regular expression with named groups | + +Syntax: + +```yaml +parser: + log_type: auto +``` + +If `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file. + +- checks if format is `CSV` (using regexp). +- checks if format is `JSON` (using regexp). +- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later): + + ```sh + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent + ``` + + If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually. + + +##### parser.csv_config.format + + + +##### parser.ltsv_config.mapping + +The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + +> **Note**: don't use `$` and `%` prefixes for mapped field names. + +```yaml +parser: + log_type: ltsv + ltsv_config: + mapping: + label1: field1 + label2: field2 +``` + + +##### parser.json_config.mapping + +The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + +> **Note**: don't use `$` and `%` prefixes for mapped field names. + +```yaml +parser: + log_type: json + json_config: + mapping: + label1: field1 + label2: field2 +``` + + +##### parser.regexp_config.pattern + +Use pattern with subexpressions names. These names should be **known fields**. + +> **Note**: don't use `$` and `%` prefixes for mapped field names. + +Syntax: + +```yaml +parser: + log_type: regexp + regexp_config: + pattern: PATTERN +``` + + +</details> + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m web_log + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/weblog/logline.go b/src/go/collectors/go.d.plugin/modules/weblog/logline.go new file mode 100644 index 00000000000000..89a04d3e652093 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/logline.go @@ -0,0 +1,616 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// TODO: it is not clear how to handle "-", current handling is not good +// In general it is: +// - If a field is unused in a particular entry dash "-" marks the omitted field. +// In addition to that "-" is used as zero value in: +// - apache: %b '-' when no bytes are sent. +// +// Log Format: +// - CLF: https://www.w3.org/Daemon/User/Config/Logging.html#common-logfile-format +// - ELF: https://www.w3.org/TR/WD-logfile.html +// - Apache CLF: https://httpd.apache.org/docs/trunk/logs.html#common + +// Variables: +// - nginx: http://nginx.org/en/docs/varindex.html +// - apache: http://httpd.apache.org/docs/current/mod/mod_log_config.html#logformat +// - IIS: https://learn.microsoft.com/en-us/windows/win32/http/w3c-logging + +/* +| nginx | apache | description | +|-------------------------|-----------|-----------------------------------------------| +| $host ($http_host) | %v | Name of the server which accepted a request. +| $server_port | %p | Port of the server which accepted a request. +| $scheme | - | Request scheme. "http" or "https". +| $remote_addr | %a (%h) | Client address. +| $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". +| $request_method | %m | Request method. Usually "GET" or "POST". +| $request_uri | %U | Full original request URI. +| $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". +| $status | %s (%>s) | Response status code. +| $request_length | %I | Bytes received from a client, including request and headers. +| $bytes_sent | %O | Bytes sent to a client, including request and headers. +| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. +| $request_time | %D | Request processing time. +| $upstream_response_time | - | Time spent on receiving the response from the upstream server. +| $ssl_protocol | - | Protocol of an established SSL connection. +| $ssl_cipher | - | String of ciphers used for an established SSL connection. +*/ + +var ( + errEmptyLine = errors.New("empty line") + errBadVhost = errors.New("bad vhost") + errBadVhostPort = errors.New("bad vhost with port") + errBadPort = errors.New("bad port") + errBadReqScheme = errors.New("bad req scheme") + errBadReqClient = errors.New("bad req client") + errBadRequest = errors.New("bad request") + errBadReqMethod = errors.New("bad req method") + errBadReqURL = errors.New("bad req url") + errBadReqProto = errors.New("bad req protocol") + errBadReqSize = errors.New("bad req size") + errBadRespCode = errors.New("bad resp status code") + errBadRespSize = errors.New("bad resp size") + errBadReqProcTime = errors.New("bad req processing time") + errBadUpsRespTime = errors.New("bad upstream resp time") + errBadSSLProto = errors.New("bad ssl protocol") + errBadSSLCipherSuite = errors.New("bad ssl cipher suite") +) + +func newEmptyLogLine() *logLine { + var l logLine + l.custom.fields = make(map[string]struct{}) + l.custom.values = make([]customValue, 0, 20) + l.reset() + return &l +} + +type ( + logLine struct { + web + custom custom + } + web struct { + vhost string + port string + reqScheme string + reqClient string + reqMethod string + reqURL string + reqProto string + reqSize int + reqProcTime float64 + respCode int + respSize int + upsRespTime float64 + sslProto string + sslCipherSuite string + } + custom struct { + fields map[string]struct{} + values []customValue + } + customValue struct { + name string + value string + } +) + +func (l *logLine) Assign(field string, value string) (err error) { + if value == "" { + return + } + + switch field { + case "host", "http_host", "v": + err = l.assignVhost(value) + case "server_port", "p": + err = l.assignPort(value) + case "host:$server_port", "v:%p": + err = l.assignVhostWithPort(value) + case "scheme": + err = l.assignReqScheme(value) + case "remote_addr", "a", "h": + err = l.assignReqClient(value) + case "request", "r": + err = l.assignRequest(value) + case "request_method", "m": + err = l.assignReqMethod(value) + case "request_uri", "U": + err = l.assignReqURL(value) + case "server_protocol", "H": + err = l.assignReqProto(value) + case "status", "s", ">s": + err = l.assignRespCode(value) + case "request_length", "I": + err = l.assignReqSize(value) + case "bytes_sent", "body_bytes_sent", "b", "O", "B": + err = l.assignRespSize(value) + case "request_time", "D": + err = l.assignReqProcTime(value) + case "upstream_response_time": + err = l.assignUpsRespTime(value) + case "ssl_protocol": + err = l.assignSSLProto(value) + case "ssl_cipher": + err = l.assignSSLCipherSuite(value) + default: + err = l.assignCustom(field, value) + } + if err != nil { + err = fmt.Errorf("assign '%s': %w", field, err) + } + return err +} + +const hyphen = "-" + +func (l *logLine) assignVhost(vhost string) error { + if vhost == hyphen { + return nil + } + // nginx $host and $http_host returns ipv6 in [], apache not + if idx := strings.IndexByte(vhost, ']'); idx > 0 { + vhost = vhost[1:idx] + } + l.vhost = vhost + return nil +} + +func (l *logLine) assignPort(port string) error { + if port == hyphen { + return nil + } + if !isPortValid(port) { + return fmt.Errorf("assign '%s' : %w", port, errBadPort) + } + l.port = port + return nil +} + +func (l *logLine) assignVhostWithPort(vhostPort string) error { + if vhostPort == hyphen { + return nil + } + idx := strings.LastIndexByte(vhostPort, ':') + if idx == -1 { + return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort) + } + if err := l.assignPort(vhostPort[idx+1:]); err != nil { + return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort) + } + if err := l.assignVhost(vhostPort[0:idx]); err != nil { + return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort) + } + return nil +} + +func (l *logLine) assignReqScheme(scheme string) error { + if scheme == hyphen { + return nil + } + if !isSchemeValid(scheme) { + return fmt.Errorf("assign '%s' : %w", scheme, errBadReqScheme) + } + l.reqScheme = scheme + return nil +} + +func (l *logLine) assignReqClient(client string) error { + if client == hyphen { + return nil + } + l.reqClient = client + return nil +} + +func (l *logLine) assignRequest(request string) error { + if request == hyphen { + return nil + } + var first, last int + if first = strings.IndexByte(request, ' '); first < 0 { + return fmt.Errorf("assign '%s': %w", request, errBadRequest) + } + if last = strings.LastIndexByte(request, ' '); first == last { + return fmt.Errorf("assign '%s': %w", request, errBadRequest) + } + proto := request[last+1:] + url := request[first+1 : last] + method := request[0:first] + if err := l.assignReqMethod(method); err != nil { + return err + } + if err := l.assignReqURL(url); err != nil { + return err + } + return l.assignReqProto(proto) +} + +func (l *logLine) assignReqMethod(method string) error { + if method == hyphen { + return nil + } + if !isReqMethodValid(method) { + return fmt.Errorf("assign '%s' : %w", method, errBadReqMethod) + } + l.reqMethod = method + return nil +} + +func (l *logLine) assignReqURL(url string) error { + if url == hyphen { + return nil + } + if isEmptyString(url) { + return fmt.Errorf("assign '%s' : %w", url, errBadReqURL) + } + l.reqURL = url + return nil +} + +func (l *logLine) assignReqProto(proto string) error { + if proto == hyphen { + return nil + } + if !isReqProtoValid(proto) { + return fmt.Errorf("assign '%s': %w", proto, errBadReqProto) + } + l.reqProto = proto[5:] + return nil +} + +func (l *logLine) assignRespCode(status string) error { + if status == hyphen { + return nil + } + v, err := strconv.Atoi(status) + if err != nil || !isRespCodeValid(v) { + return fmt.Errorf("assign '%s': %w", status, errBadRespCode) + } + l.respCode = v + return nil +} + +func (l *logLine) assignReqSize(size string) error { + // apache: can be "-" according web_log py regexp. + if size == hyphen { + l.reqSize = 0 + return nil + } + v, err := strconv.Atoi(size) + if err != nil || !isSizeValid(v) { + return fmt.Errorf("assign '%s': %w", size, errBadReqSize) + } + l.reqSize = v + return nil +} + +func (l *logLine) assignRespSize(size string) error { + // apache: %b. In CLF format, i.e. a '-' rather than a 0 when no bytes are sent. + if size == hyphen { + l.respSize = 0 + return nil + } + v, err := strconv.Atoi(size) + if err != nil || !isSizeValid(v) { + return fmt.Errorf("assign '%s': %w", size, errBadRespSize) + } + l.respSize = v + return nil +} + +func (l *logLine) assignReqProcTime(time string) error { + if time == hyphen { + return nil + } + if time == "0.000" { + l.reqProcTime = 0 + return nil + } + v, err := strconv.ParseFloat(time, 64) + if err != nil || !isTimeValid(v) { + return fmt.Errorf("assign '%s': %w", time, errBadReqProcTime) + } + l.reqProcTime = v * timeMultiplier(time) + return nil +} + +func isUpstreamTimeSeparator(r rune) bool { return r == ',' || r == ':' } + +func (l *logLine) assignUpsRespTime(time string) error { + if time == hyphen { + return nil + } + + // the upstream response time string can contain multiple values, separated + // by commas (in case the request was handled by multiple servers), or colons + // (in case the request passed between multiple server groups via an internal redirect) + // the individual values should be summed up to obtain the correct amount of time + // the request spent in upstream + var sum float64 + for _, val := range strings.FieldsFunc(time, isUpstreamTimeSeparator) { + val = strings.TrimSpace(val) + v, err := strconv.ParseFloat(val, 64) + if err != nil || !isTimeValid(v) { + return fmt.Errorf("assign '%s': %w", time, errBadUpsRespTime) + } + + sum += v + } + + l.upsRespTime = sum * timeMultiplier(time) + return nil +} + +func (l *logLine) assignSSLProto(proto string) error { + if proto == hyphen { + return nil + } + if !isSSLProtoValid(proto) { + return fmt.Errorf("assign '%s': %w", proto, errBadSSLProto) + } + l.sslProto = proto + return nil +} + +func (l *logLine) assignSSLCipherSuite(cipher string) error { + if cipher == hyphen { + return nil + } + if strings.IndexByte(cipher, '-') <= 0 && strings.IndexByte(cipher, '_') <= 0 { + return fmt.Errorf("assign '%s': %w", cipher, errBadSSLCipherSuite) + } + l.sslCipherSuite = cipher + return nil +} + +func (l *logLine) assignCustom(field, value string) error { + if len(l.custom.fields) == 0 || value == hyphen { + return nil + } + if _, ok := l.custom.fields[field]; ok { + l.custom.values = append(l.custom.values, customValue{name: field, value: value}) + } + return nil +} + +func (l *logLine) verify() error { + if l.empty() { + return fmt.Errorf("verify: %w", errEmptyLine) + } + if l.hasRespCode() && !l.isRespCodeValid() { + return fmt.Errorf("verify '%d': %w", l.respCode, errBadRespCode) + } + if l.hasVhost() && !l.isVhostValid() { + return fmt.Errorf("verify '%s': %w", l.vhost, errBadVhost) + } + if l.hasPort() && !l.isPortValid() { + return fmt.Errorf("verify '%s': %w", l.port, errBadPort) + } + if l.hasReqScheme() && !l.isSchemeValid() { + return fmt.Errorf("verify '%s': %w", l.reqScheme, errBadReqScheme) + } + if l.hasReqClient() && !l.isClientValid() { + return fmt.Errorf("verify '%s': %w", l.reqClient, errBadReqClient) + } + if l.hasReqMethod() && !l.isMethodValid() { + return fmt.Errorf("verify '%s': %w", l.reqMethod, errBadReqMethod) + } + if l.hasReqURL() && !l.isURLValid() { + return fmt.Errorf("verify '%s': %w", l.reqURL, errBadReqURL) + } + if l.hasReqProto() && !l.isProtoValid() { + return fmt.Errorf("verify '%s': %w", l.reqProto, errBadReqProto) + } + if l.hasReqSize() && !l.isReqSizeValid() { + return fmt.Errorf("verify '%d': %w", l.reqSize, errBadReqSize) + } + if l.hasRespSize() && !l.isRespSizeValid() { + return fmt.Errorf("verify '%d': %w", l.respSize, errBadRespSize) + } + if l.hasReqProcTime() && !l.isReqProcTimeValid() { + return fmt.Errorf("verify '%f': %w", l.reqProcTime, errBadReqProcTime) + } + if l.hasUpsRespTime() && !l.isUpsRespTimeValid() { + return fmt.Errorf("verify '%f': %w", l.upsRespTime, errBadUpsRespTime) + } + if l.hasSSLProto() && !l.isSSLProtoValid() { + return fmt.Errorf("verify '%s': %w", l.sslProto, errBadSSLProto) + } + if l.hasSSLCipherSuite() && !l.isSSLCipherSuiteValid() { + return fmt.Errorf("verify '%s': %w", l.sslCipherSuite, errBadSSLCipherSuite) + } + return nil +} + +func (l *logLine) empty() bool { return !l.hasWebFields() && !l.hasCustomFields() } +func (l *logLine) hasCustomFields() bool { return len(l.custom.values) > 0 } +func (l *logLine) hasWebFields() bool { return l.web != emptyWebFields } +func (l *logLine) hasVhost() bool { return !isEmptyString(l.vhost) } +func (l *logLine) hasPort() bool { return !isEmptyString(l.port) } +func (l *logLine) hasReqScheme() bool { return !isEmptyString(l.reqScheme) } +func (l *logLine) hasReqClient() bool { return !isEmptyString(l.reqClient) } +func (l *logLine) hasReqMethod() bool { return !isEmptyString(l.reqMethod) } +func (l *logLine) hasReqURL() bool { return !isEmptyString(l.reqURL) } +func (l *logLine) hasReqProto() bool { return !isEmptyString(l.reqProto) } +func (l *logLine) hasRespCode() bool { return !isEmptyNumber(l.respCode) } +func (l *logLine) hasReqSize() bool { return !isEmptyNumber(l.reqSize) } +func (l *logLine) hasRespSize() bool { return !isEmptyNumber(l.respSize) } +func (l *logLine) hasReqProcTime() bool { return !isEmptyNumber(int(l.reqProcTime)) } +func (l *logLine) hasUpsRespTime() bool { return !isEmptyNumber(int(l.upsRespTime)) } +func (l *logLine) hasSSLProto() bool { return !isEmptyString(l.sslProto) } +func (l *logLine) hasSSLCipherSuite() bool { return !isEmptyString(l.sslCipherSuite) } +func (l *logLine) isVhostValid() bool { return reVhost.MatchString(l.vhost) } +func (l *logLine) isPortValid() bool { return isPortValid(l.port) } +func (l *logLine) isSchemeValid() bool { return isSchemeValid(l.reqScheme) } +func (l *logLine) isClientValid() bool { return reClient.MatchString(l.reqClient) } +func (l *logLine) isMethodValid() bool { return isReqMethodValid(l.reqMethod) } +func (l *logLine) isURLValid() bool { return !isEmptyString(l.reqURL) } +func (l *logLine) isProtoValid() bool { return isReqProtoVerValid(l.reqProto) } +func (l *logLine) isRespCodeValid() bool { return isRespCodeValid(l.respCode) } +func (l *logLine) isReqSizeValid() bool { return isSizeValid(l.reqSize) } +func (l *logLine) isRespSizeValid() bool { return isSizeValid(l.respSize) } +func (l *logLine) isReqProcTimeValid() bool { return isTimeValid(l.reqProcTime) } +func (l *logLine) isUpsRespTimeValid() bool { return isTimeValid(l.upsRespTime) } +func (l *logLine) isSSLProtoValid() bool { return isSSLProtoValid(l.sslProto) } +func (l *logLine) isSSLCipherSuiteValid() bool { return reCipherSuite.MatchString(l.sslCipherSuite) } + +func (l *logLine) reset() { + l.web = emptyWebFields + l.custom.values = l.custom.values[:0] +} + +var ( + // TODO: reClient doesn't work with %h when HostnameLookups is On. + reVhost = regexp.MustCompile(`^[a-zA-Z0-9-:.]+$`) + reClient = regexp.MustCompile(`^([\da-f:.]+|localhost)$`) + reCipherSuite = regexp.MustCompile(`^[A-Z0-9-_]+$`) // openssl -v +) + +var emptyWebFields = web{ + vhost: emptyString, + port: emptyString, + reqScheme: emptyString, + reqClient: emptyString, + reqMethod: emptyString, + reqURL: emptyString, + reqProto: emptyString, + reqSize: emptyNumber, + reqProcTime: emptyNumber, + respCode: emptyNumber, + respSize: emptyNumber, + upsRespTime: emptyNumber, + sslProto: emptyString, + sslCipherSuite: emptyString, +} + +const ( + emptyString = "__empty_string__" + emptyNumber = -9999 +) + +func isEmptyString(s string) bool { + return s == emptyString || s == "" +} + +func isEmptyNumber(n int) bool { + return n == emptyNumber +} + +func isReqMethodValid(method string) bool { + // https://www.iana.org/assignments/http-methods/http-methods.xhtml + switch method { + case "GET", + "ACL", + "BASELINE-CONTROL", + "BIND", + "CHECKIN", + "CHECKOUT", + "CONNECT", + "COPY", + "DELETE", + "HEAD", + "LABEL", + "LINK", + "LOCK", + "MERGE", + "MKACTIVITY", + "MKCALENDAR", + "MKCOL", + "MKREDIRECTREF", + "MKWORKSPACE", + "MOVE", + "OPTIONS", + "ORDERPATCH", + "PATCH", + "POST", + "PRI", + "PROPFIND", + "PROPPATCH", + "PUT", + "REBIND", + "REPORT", + "SEARCH", + "TRACE", + "UNBIND", + "UNCHECKOUT", + "UNLINK", + "UNLOCK", + "UPDATE", + "UPDATEREDIRECTREF": + return true + } + return false +} + +func isReqProtoValid(proto string) bool { + return len(proto) >= 6 && proto[:5] == "HTTP/" && isReqProtoVerValid(proto[5:]) +} + +func isReqProtoVerValid(version string) bool { + switch version { + case "1.1", "1", "1.0", "2", "2.0", "3", "3.0": + return true + } + return false +} + +func isPortValid(port string) bool { + v, err := strconv.Atoi(port) + return err == nil && v >= 80 && v <= 49151 +} + +func isSchemeValid(scheme string) bool { + return scheme == "http" || scheme == "https" +} + +func isRespCodeValid(code int) bool { + // rfc7231 + // Informational responses (100–199), + // Successful responses (200–299), + // Redirects (300–399), + // Client errors (400–499), + // Server errors (500–599). + return code >= 100 && code <= 600 +} + +func isSizeValid(size int) bool { + return size >= 0 +} + +func isTimeValid(time float64) bool { + return time >= 0 +} + +func isSSLProtoValid(proto string) bool { + if proto == "TLSv1.2" { + return true + } + switch proto { + case "TLSv1.3", "SSLv2", "SSLv3", "TLSv1", "TLSv1.1": + return true + } + return false +} + +func timeMultiplier(time string) float64 { + // TODO: Change code to detect and modify properly IIS time (in milliseconds) + // Convert to microseconds: + // - nginx time is in seconds with a milliseconds' resolution. + if strings.IndexByte(time, '.') > 0 { + return 1e6 + } + // - apache time is in microseconds. + return 1 +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/logline_test.go b/src/go/collectors/go.d.plugin/modules/weblog/logline_test.go new file mode 100644 index 00000000000000..0a72d1c4731e07 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/logline_test.go @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + emptyStr = "" +) + +var emptyLogLine = *newEmptyLogLine() + +func TestLogLine_Assign(t *testing.T) { + type subTest struct { + input string + wantLine logLine + wantErr error + } + type test struct { + name string + fields []string + cases []subTest + } + tests := []test{ + { + name: "Vhost", + fields: []string{ + "host", + "http_host", + "v", + }, + cases: []subTest{ + {input: "1.1.1.1", wantLine: logLine{web: web{vhost: "1.1.1.1"}}}, + {input: "::1", wantLine: logLine{web: web{vhost: "::1"}}}, + {input: "[::1]", wantLine: logLine{web: web{vhost: "::1"}}}, + {input: "1ce:1ce::babe", wantLine: logLine{web: web{vhost: "1ce:1ce::babe"}}}, + {input: "[1ce:1ce::babe]", wantLine: logLine{web: web{vhost: "1ce:1ce::babe"}}}, + {input: "localhost", wantLine: logLine{web: web{vhost: "localhost"}}}, + {input: "debian10.debian", wantLine: logLine{web: web{vhost: "debian10.debian"}}}, + {input: "my_vhost", wantLine: logLine{web: web{vhost: "my_vhost"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + { + name: "Server Port", + fields: []string{ + "server_port", + "p", + }, + cases: []subTest{ + {input: "80", wantLine: logLine{web: web{port: "80"}}}, + {input: "8081", wantLine: logLine{web: web{port: "8081"}}}, + {input: "30000", wantLine: logLine{web: web{port: "30000"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadPort}, + {input: "0", wantLine: emptyLogLine, wantErr: errBadPort}, + {input: "50000", wantLine: emptyLogLine, wantErr: errBadPort}, + }, + }, + { + name: "Vhost With Port", + fields: []string{ + "host:$server_port", + "v:%p", + }, + cases: []subTest{ + {input: "1.1.1.1:80", wantLine: logLine{web: web{vhost: "1.1.1.1", port: "80"}}}, + {input: "::1:80", wantLine: logLine{web: web{vhost: "::1", port: "80"}}}, + {input: "[::1]:80", wantLine: logLine{web: web{vhost: "::1", port: "80"}}}, + {input: "1ce:1ce::babe:80", wantLine: logLine{web: web{vhost: "1ce:1ce::babe", port: "80"}}}, + {input: "debian10.debian:81", wantLine: logLine{web: web{vhost: "debian10.debian", port: "81"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "1.1.1.1", wantLine: emptyLogLine, wantErr: errBadVhostPort}, + {input: "1.1.1.1:", wantLine: emptyLogLine, wantErr: errBadVhostPort}, + {input: "1.1.1.1 80", wantLine: emptyLogLine, wantErr: errBadVhostPort}, + {input: "1.1.1.1:20", wantLine: emptyLogLine, wantErr: errBadVhostPort}, + {input: "1.1.1.1:50000", wantLine: emptyLogLine, wantErr: errBadVhostPort}, + }, + }, + { + name: "Scheme", + fields: []string{ + "scheme", + }, + cases: []subTest{ + {input: "http", wantLine: logLine{web: web{reqScheme: "http"}}}, + {input: "https", wantLine: logLine{web: web{reqScheme: "https"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "HTTP", wantLine: emptyLogLine, wantErr: errBadReqScheme}, + {input: "HTTPS", wantLine: emptyLogLine, wantErr: errBadReqScheme}, + }, + }, + { + name: "Client", + fields: []string{ + "remote_addr", + "a", + "h", + }, + cases: []subTest{ + {input: "1.1.1.1", wantLine: logLine{web: web{reqClient: "1.1.1.1"}}}, + {input: "debian10", wantLine: logLine{web: web{reqClient: "debian10"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + { + name: "Request", + fields: []string{ + "request", + "r", + }, + cases: []subTest{ + {input: "GET / HTTP/1.0", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "1.0"}}}, + {input: "HEAD /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "HEAD", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "POST /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "POST", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "PUT /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "PUT", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "PATCH /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "PATCH", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "DELETE /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "DELETE", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "OPTIONS /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "OPTIONS", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "TRACE /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "TRACE", reqURL: "/ihs.gif", reqProto: "1.0"}}}, + {input: "CONNECT ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "CONNECT", reqURL: "ip.cn:443", reqProto: "1.1"}}}, + {input: "MKCOL ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "MKCOL", reqURL: "ip.cn:443", reqProto: "1.1"}}}, + {input: "PROPFIND ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "PROPFIND", reqURL: "ip.cn:443", reqProto: "1.1"}}}, + {input: "MOVE ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "MOVE", reqURL: "ip.cn:443", reqProto: "1.1"}}}, + {input: "SEARCH ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "SEARCH", reqURL: "ip.cn:443", reqProto: "1.1"}}}, + {input: "GET / HTTP/1.1", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "1.1"}}}, + {input: "GET / HTTP/2", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "2"}}}, + {input: "GET / HTTP/2.0", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "2.0"}}}, + {input: "GET /invalid_version http/1.1", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/invalid_version", reqProto: emptyString}}, wantErr: errBadReqProto}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "GET no_version", wantLine: emptyLogLine, wantErr: errBadRequest}, + {input: "GOT / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "get / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "x04\x01\x00P$3\xFE\xEA\x00", wantLine: emptyLogLine, wantErr: errBadRequest}, + }, + }, + { + name: "Request HTTP Method", + fields: []string{ + "request_method", + "m", + }, + cases: []subTest{ + {input: "GET", wantLine: logLine{web: web{reqMethod: "GET"}}}, + {input: "HEAD", wantLine: logLine{web: web{reqMethod: "HEAD"}}}, + {input: "POST", wantLine: logLine{web: web{reqMethod: "POST"}}}, + {input: "PUT", wantLine: logLine{web: web{reqMethod: "PUT"}}}, + {input: "PATCH", wantLine: logLine{web: web{reqMethod: "PATCH"}}}, + {input: "DELETE", wantLine: logLine{web: web{reqMethod: "DELETE"}}}, + {input: "OPTIONS", wantLine: logLine{web: web{reqMethod: "OPTIONS"}}}, + {input: "TRACE", wantLine: logLine{web: web{reqMethod: "TRACE"}}}, + {input: "CONNECT", wantLine: logLine{web: web{reqMethod: "CONNECT"}}}, + {input: "MKCOL", wantLine: logLine{web: web{reqMethod: "MKCOL"}}}, + {input: "PROPFIND", wantLine: logLine{web: web{reqMethod: "PROPFIND"}}}, + {input: "MOVE", wantLine: logLine{web: web{reqMethod: "MOVE"}}}, + {input: "SEARCH", wantLine: logLine{web: web{reqMethod: "SEARCH"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "GET no_version", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "GOT / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + {input: "get / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod}, + }, + }, + { + name: "Request URL", + fields: []string{ + "request_uri", + "U", + }, + cases: []subTest{ + {input: "/server-status?auto", wantLine: logLine{web: web{reqURL: "/server-status?auto"}}}, + {input: "/default.html", wantLine: logLine{web: web{reqURL: "/default.html"}}}, + {input: "10.0.0.1:3128", wantLine: logLine{web: web{reqURL: "10.0.0.1:3128"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + { + name: "Request HTTP Protocol", + fields: []string{ + "server_protocol", + "H", + }, + cases: []subTest{ + {input: "HTTP/1.0", wantLine: logLine{web: web{reqProto: "1.0"}}}, + {input: "HTTP/1.1", wantLine: logLine{web: web{reqProto: "1.1"}}}, + {input: "HTTP/2", wantLine: logLine{web: web{reqProto: "2"}}}, + {input: "HTTP/2.0", wantLine: logLine{web: web{reqProto: "2.0"}}}, + {input: "HTTP/3", wantLine: logLine{web: web{reqProto: "3"}}}, + {input: "HTTP/3.0", wantLine: logLine{web: web{reqProto: "3.0"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "1.1", wantLine: emptyLogLine, wantErr: errBadReqProto}, + {input: "http/1.1", wantLine: emptyLogLine, wantErr: errBadReqProto}, + }, + }, + { + name: "Response Status Code", + fields: []string{ + "status", + "s", + ">s", + }, + cases: []subTest{ + {input: "100", wantLine: logLine{web: web{respCode: 100}}}, + {input: "200", wantLine: logLine{web: web{respCode: 200}}}, + {input: "300", wantLine: logLine{web: web{respCode: 300}}}, + {input: "400", wantLine: logLine{web: web{respCode: 400}}}, + {input: "500", wantLine: logLine{web: web{respCode: 500}}}, + {input: "600", wantLine: logLine{web: web{respCode: 600}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "99", wantLine: emptyLogLine, wantErr: errBadRespCode}, + {input: "601", wantLine: emptyLogLine, wantErr: errBadRespCode}, + {input: "200 ", wantLine: emptyLogLine, wantErr: errBadRespCode}, + {input: "0.222", wantLine: emptyLogLine, wantErr: errBadRespCode}, + {input: "localhost", wantLine: emptyLogLine, wantErr: errBadRespCode}, + }, + }, + { + name: "Request Size", + fields: []string{ + "request_length", + "I", + }, + cases: []subTest{ + {input: "15", wantLine: logLine{web: web{reqSize: 15}}}, + {input: "1000000", wantLine: logLine{web: web{reqSize: 1000000}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: logLine{web: web{reqSize: 0}}}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadReqSize}, + {input: "100.222", wantLine: emptyLogLine, wantErr: errBadReqSize}, + {input: "invalid", wantLine: emptyLogLine, wantErr: errBadReqSize}, + }, + }, + { + name: "Response Size", + fields: []string{ + "bytes_sent", + "body_bytes_sent", + "O", + "B", + "b", + }, + cases: []subTest{ + {input: "15", wantLine: logLine{web: web{respSize: 15}}}, + {input: "1000000", wantLine: logLine{web: web{respSize: 1000000}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: logLine{web: web{respSize: 0}}}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespSize}, + {input: "100.222", wantLine: emptyLogLine, wantErr: errBadRespSize}, + {input: "invalid", wantLine: emptyLogLine, wantErr: errBadRespSize}, + }, + }, + { + name: "Request Processing Time", + fields: []string{ + "request_time", + "D", + }, + cases: []subTest{ + {input: "100222", wantLine: logLine{web: web{reqProcTime: 100222}}}, + {input: "100.222", wantLine: logLine{web: web{reqProcTime: 100222000}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadReqProcTime}, + {input: "0.333,0.444,0.555", wantLine: emptyLogLine, wantErr: errBadReqProcTime}, + {input: "number", wantLine: emptyLogLine, wantErr: errBadReqProcTime}, + }, + }, + { + name: "Upstream Response Time", + fields: []string{ + "upstream_response_time", + }, + cases: []subTest{ + {input: "100222", wantLine: logLine{web: web{upsRespTime: 100222}}}, + {input: "100.222", wantLine: logLine{web: web{upsRespTime: 100222000}}}, + {input: "0.100 , 0.400 : 0.200 ", wantLine: logLine{web: web{upsRespTime: 700000}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadUpsRespTime}, + {input: "number", wantLine: emptyLogLine, wantErr: errBadUpsRespTime}, + }, + }, + { + name: "SSL Protocol", + fields: []string{ + "ssl_protocol", + }, + cases: []subTest{ + {input: "SSLv3", wantLine: logLine{web: web{sslProto: "SSLv3"}}}, + {input: "SSLv2", wantLine: logLine{web: web{sslProto: "SSLv2"}}}, + {input: "TLSv1", wantLine: logLine{web: web{sslProto: "TLSv1"}}}, + {input: "TLSv1.1", wantLine: logLine{web: web{sslProto: "TLSv1.1"}}}, + {input: "TLSv1.2", wantLine: logLine{web: web{sslProto: "TLSv1.2"}}}, + {input: "TLSv1.3", wantLine: logLine{web: web{sslProto: "TLSv1.3"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadSSLProto}, + {input: "invalid", wantLine: emptyLogLine, wantErr: errBadSSLProto}, + }, + }, + { + name: "SSL Cipher Suite", + fields: []string{ + "ssl_cipher", + }, + cases: []subTest{ + {input: "ECDHE-RSA-AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "ECDHE-RSA-AES256-SHA"}}}, + {input: "DHE-RSA-AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "DHE-RSA-AES256-SHA"}}}, + {input: "AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "AES256-SHA"}}}, + {input: "PSK-RC4-SHA", wantLine: logLine{web: web{sslCipherSuite: "PSK-RC4-SHA"}}}, + {input: "TLS_AES_256_GCM_SHA384", wantLine: logLine{web: web{sslCipherSuite: "TLS_AES_256_GCM_SHA384"}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine, wantErr: errBadSSLCipherSuite}, + {input: "invalid", wantLine: emptyLogLine, wantErr: errBadSSLCipherSuite}, + }, + }, + { + name: "Custom Fields", + fields: []string{ + "custom", + }, + cases: []subTest{ + {input: "POST", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "POST"}}}}}, + {input: "/example.com", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "/example.com"}}}}}, + {input: "HTTP/1.1", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "HTTP/1.1"}}}}}, + {input: "0.333,0.444,0.555", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "0.333,0.444,0.555"}}}}}, + {input: "-1", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "-1"}}}}}, + {input: "invalid", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "invalid"}}}}}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + { + name: "Custom Fields Not Exist", + fields: []string{ + "custom_field_not_exist", + }, + cases: []subTest{ + {input: "POST", wantLine: emptyLogLine}, + {input: "/example.com", wantLine: emptyLogLine}, + {input: "HTTP/1.1", wantLine: emptyLogLine}, + {input: "0.333,0.444,0.555", wantLine: emptyLogLine}, + {input: "-1", wantLine: emptyLogLine}, + {input: "invalid", wantLine: emptyLogLine}, + {input: emptyStr, wantLine: emptyLogLine}, + {input: hyphen, wantLine: emptyLogLine}, + }, + }, + } + + for _, tt := range tests { + for _, field := range tt.fields { + for i, tc := range tt.cases { + name := fmt.Sprintf("[%s:%d]field='%s'|line='%s'", tt.name, i+1, field, tc.input) + t.Run(name, func(t *testing.T) { + + line := newEmptyLogLineWithFields() + err := line.Assign(field, tc.input) + + if tc.wantErr != nil { + require.Error(t, err) + assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err) + } else { + require.NoError(t, err) + } + + expected := prepareLogLine(field, tc.wantLine) + assert.Equal(t, expected, *line) + }) + } + } + } +} + +func TestLogLine_verify(t *testing.T) { + type subTest struct { + line logLine + wantErr error + } + tests := []struct { + name string + field string + cases []subTest + }{ + { + name: "Vhost", + field: "host", + cases: []subTest{ + {line: logLine{web: web{vhost: "192.168.0.1"}}}, + {line: logLine{web: web{vhost: "debian10.debian"}}}, + {line: logLine{web: web{vhost: "1ce:1ce::babe"}}}, + {line: logLine{web: web{vhost: "localhost"}}}, + {line: logLine{web: web{vhost: "invalid_vhost"}}, wantErr: errBadVhost}, + {line: logLine{web: web{vhost: "http://192.168.0.1/"}}, wantErr: errBadVhost}, + }, + }, + { + name: "Server Port", + field: "server_port", + cases: []subTest{ + {line: logLine{web: web{port: "80"}}}, + {line: logLine{web: web{port: "8081"}}}, + {line: logLine{web: web{port: "79"}}, wantErr: errBadPort}, + {line: logLine{web: web{port: "50000"}}, wantErr: errBadPort}, + {line: logLine{web: web{port: "0.0.0.0"}}, wantErr: errBadPort}, + }, + }, + { + name: "Scheme", + field: "scheme", + cases: []subTest{ + {line: logLine{web: web{reqScheme: "http"}}}, + {line: logLine{web: web{reqScheme: "https"}}}, + {line: logLine{web: web{reqScheme: "not_https"}}, wantErr: errBadReqScheme}, + {line: logLine{web: web{reqScheme: "HTTP"}}, wantErr: errBadReqScheme}, + {line: logLine{web: web{reqScheme: "HTTPS"}}, wantErr: errBadReqScheme}, + {line: logLine{web: web{reqScheme: "10"}}, wantErr: errBadReqScheme}, + }, + }, + { + name: "Client", + field: "remote_addr", + cases: []subTest{ + {line: logLine{web: web{reqClient: "1.1.1.1"}}}, + {line: logLine{web: web{reqClient: "::1"}}}, + {line: logLine{web: web{reqClient: "1ce:1ce::babe"}}}, + {line: logLine{web: web{reqClient: "localhost"}}}, + {line: logLine{web: web{reqClient: "debian10.debian"}}, wantErr: errBadReqClient}, + {line: logLine{web: web{reqClient: "invalid"}}, wantErr: errBadReqClient}, + }, + }, + { + name: "Request HTTP Method", + field: "request_method", + cases: []subTest{ + {line: logLine{web: web{reqMethod: "GET"}}}, + {line: logLine{web: web{reqMethod: "POST"}}}, + {line: logLine{web: web{reqMethod: "TRACE"}}}, + {line: logLine{web: web{reqMethod: "OPTIONS"}}}, + {line: logLine{web: web{reqMethod: "CONNECT"}}}, + {line: logLine{web: web{reqMethod: "DELETE"}}}, + {line: logLine{web: web{reqMethod: "PUT"}}}, + {line: logLine{web: web{reqMethod: "PATCH"}}}, + {line: logLine{web: web{reqMethod: "HEAD"}}}, + {line: logLine{web: web{reqMethod: "MKCOL"}}}, + {line: logLine{web: web{reqMethod: "PROPFIND"}}}, + {line: logLine{web: web{reqMethod: "MOVE"}}}, + {line: logLine{web: web{reqMethod: "SEARCH"}}}, + {line: logLine{web: web{reqMethod: "Get"}}, wantErr: errBadReqMethod}, + {line: logLine{web: web{reqMethod: "get"}}, wantErr: errBadReqMethod}, + }, + }, + { + name: "Request URL", + field: "request_uri", + cases: []subTest{ + {line: logLine{web: web{reqURL: "/"}}}, + {line: logLine{web: web{reqURL: "/status?full&json"}}}, + {line: logLine{web: web{reqURL: "/icons/openlogo-75.png"}}}, + {line: logLine{web: web{reqURL: "status?full&json"}}}, + {line: logLine{web: web{reqURL: "\"req_url=/ \""}}}, + {line: logLine{web: web{reqURL: "http://192.168.0.1/"}}}, + {line: logLine{web: web{reqURL: ""}}}, + }, + }, + { + name: "Request HTTP Protocol", + field: "server_protocol", + cases: []subTest{ + {line: logLine{web: web{reqProto: "1"}}}, + {line: logLine{web: web{reqProto: "1.0"}}}, + {line: logLine{web: web{reqProto: "1.1"}}}, + {line: logLine{web: web{reqProto: "2.0"}}}, + {line: logLine{web: web{reqProto: "2"}}}, + {line: logLine{web: web{reqProto: "0.9"}}, wantErr: errBadReqProto}, + {line: logLine{web: web{reqProto: "1.1.1"}}, wantErr: errBadReqProto}, + {line: logLine{web: web{reqProto: "2.2"}}, wantErr: errBadReqProto}, + {line: logLine{web: web{reqProto: "localhost"}}, wantErr: errBadReqProto}, + }, + }, + { + name: "Response Status Code", + field: "status", + cases: []subTest{ + {line: logLine{web: web{respCode: 100}}}, + {line: logLine{web: web{respCode: 200}}}, + {line: logLine{web: web{respCode: 300}}}, + {line: logLine{web: web{respCode: 400}}}, + {line: logLine{web: web{respCode: 500}}}, + {line: logLine{web: web{respCode: 600}}}, + {line: logLine{web: web{respCode: -1}}, wantErr: errBadRespCode}, + {line: logLine{web: web{respCode: 99}}, wantErr: errBadRespCode}, + {line: logLine{web: web{respCode: 601}}, wantErr: errBadRespCode}, + }, + }, + { + name: "Request size", + field: "request_length", + cases: []subTest{ + {line: logLine{web: web{reqSize: 0}}}, + {line: logLine{web: web{reqSize: 100}}}, + {line: logLine{web: web{reqSize: 1000000}}}, + {line: logLine{web: web{reqSize: -1}}, wantErr: errBadReqSize}, + }, + }, + { + name: "Response size", + field: "bytes_sent", + cases: []subTest{ + {line: logLine{web: web{respSize: 0}}}, + {line: logLine{web: web{respSize: 100}}}, + {line: logLine{web: web{respSize: 1000000}}}, + {line: logLine{web: web{respSize: -1}}, wantErr: errBadRespSize}, + }, + }, + { + name: "Request Processing Time", + field: "request_time", + cases: []subTest{ + {line: logLine{web: web{reqProcTime: 0}}}, + {line: logLine{web: web{reqProcTime: 100}}}, + {line: logLine{web: web{reqProcTime: 1000.123}}}, + {line: logLine{web: web{reqProcTime: -1}}, wantErr: errBadReqProcTime}, + }, + }, + { + name: "Upstream Response Time", + field: "upstream_response_time", + cases: []subTest{ + {line: logLine{web: web{upsRespTime: 0}}}, + {line: logLine{web: web{upsRespTime: 100}}}, + {line: logLine{web: web{upsRespTime: 1000.123}}}, + {line: logLine{web: web{upsRespTime: -1}}, wantErr: errBadUpsRespTime}, + }, + }, + { + name: "SSL Protocol", + field: "ssl_protocol", + cases: []subTest{ + {line: logLine{web: web{sslProto: "SSLv3"}}}, + {line: logLine{web: web{sslProto: "SSLv2"}}}, + {line: logLine{web: web{sslProto: "TLSv1"}}}, + {line: logLine{web: web{sslProto: "TLSv1.1"}}}, + {line: logLine{web: web{sslProto: "TLSv1.2"}}}, + {line: logLine{web: web{sslProto: "TLSv1.3"}}}, + {line: logLine{web: web{sslProto: "invalid"}}, wantErr: errBadSSLProto}, + }, + }, + { + name: "SSL Cipher Suite", + field: "ssl_cipher", + cases: []subTest{ + {line: logLine{web: web{sslCipherSuite: "ECDHE-RSA-AES256-SHA"}}}, + {line: logLine{web: web{sslCipherSuite: "DHE-RSA-AES256-SHA"}}}, + {line: logLine{web: web{sslCipherSuite: "AES256-SHA"}}}, + {line: logLine{web: web{sslCipherSuite: "TLS_AES_256_GCM_SHA384"}}}, + {line: logLine{web: web{sslCipherSuite: "invalid"}}, wantErr: errBadSSLCipherSuite}, + }, + }, + { + name: "Custom Fields", + field: "custom", + cases: []subTest{ + {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "POST"}}}}}, + {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "/example.com"}}}}}, + {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "0.333,0.444,0.555"}}}}}, + }, + }, + { + name: "Empty Line", + cases: []subTest{ + {line: emptyLogLine, wantErr: errEmptyLine}, + }, + }, + } + + for _, tt := range tests { + for i, tc := range tt.cases { + name := fmt.Sprintf("[%s:%d]field='%s'", tt.name, i+1, tt.field) + + t.Run(name, func(t *testing.T) { + line := prepareLogLine(tt.field, tc.line) + + err := line.verify() + + if tc.wantErr != nil { + require.Error(t, err) + assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err) + } else { + assert.NoError(t, err) + } + }) + } + } +} + +func prepareLogLine(field string, template logLine) logLine { + if template.empty() { + return *newEmptyLogLineWithFields() + } + + line := newEmptyLogLineWithFields() + line.reset() + + switch field { + case "host", "http_host", "v": + line.vhost = template.vhost + case "server_port", "p": + line.port = template.port + case "host:$server_port", "v:%p": + line.vhost = template.vhost + line.port = template.port + case "scheme": + line.reqScheme = template.reqScheme + case "remote_addr", "a", "h": + line.reqClient = template.reqClient + case "request", "r": + line.reqMethod = template.reqMethod + line.reqURL = template.reqURL + line.reqProto = template.reqProto + case "request_method", "m": + line.reqMethod = template.reqMethod + case "request_uri", "U": + line.reqURL = template.reqURL + case "server_protocol", "H": + line.reqProto = template.reqProto + case "status", "s", ">s": + line.respCode = template.respCode + case "request_length", "I": + line.reqSize = template.reqSize + case "bytes_sent", "body_bytes_sent", "b", "O", "B": + line.respSize = template.respSize + case "request_time", "D": + line.reqProcTime = template.reqProcTime + case "upstream_response_time": + line.upsRespTime = template.upsRespTime + case "ssl_protocol": + line.sslProto = template.sslProto + case "ssl_cipher": + line.sslCipherSuite = template.sslCipherSuite + default: + line.custom.values = template.custom.values + } + return *line +} + +func newEmptyLogLineWithFields() *logLine { + l := newEmptyLogLine() + l.custom.fields = map[string]struct{}{"custom": {}} + return l +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/metadata.yaml b/src/go/collectors/go.d.plugin/modules/weblog/metadata.yaml new file mode 100644 index 00000000000000..9aa76be8eb027c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/metadata.yaml @@ -0,0 +1,533 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-web_log + plugin_name: go.d.plugin + module_name: web_log + monitored_instance: + name: Web server log files + link: "" + categories: + - data-collection.web-servers-and-web-proxies + icon_filename: webservers.svg + keywords: + - webserver + - apache + - httpd + - nginx + - lighttpd + - logs + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: | + This collector monitors web servers by parsing their log files. + method_description: "" + default_behavior: + auto_detection: + description: | + It automatically detects log files of web servers running on localhost. + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/web_log.conf + options: + description: | + Weblog is aware of how to parse and interpret the following fields (**known fields**): + + > [nginx](https://nginx.org/en/docs/varindex.html) + > + > [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html) + + | nginx | apache | description | + |-------------------------|----------|------------------------------------------------------------------------------------------| + | $host ($http_host) | %v | Name of the server which accepted a request. | + | $server_port | %p | Port of the server which accepted a request. | + | $scheme | - | Request scheme. "http" or "https". | + | $remote_addr | %a (%h) | Client address. | + | $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". | + | $request_method | %m | Request method. Usually "GET" or "POST". | + | $request_uri | %U | Full original request URI. | + | $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". | + | $status | %s (%>s) | Response status code. | + | $request_length | %I | Bytes received from a client, including request and headers. | + | $bytes_sent | %O | Bytes sent to a client, including request and headers. | + | $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. | + | $request_time | %D | Request processing time. | + | $upstream_response_time | - | Time spent on receiving the response from the upstream server. | + | $ssl_protocol | - | Protocol of an established SSL connection. | + | $ssl_cipher | - | String of ciphers used for an established SSL connection. | + + Notes: + + - Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`. + - Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network. + - To get `%I` and `%O` working you need to enable `mod_logio` on Apache. + - NGINX logs URI with query parameters, Apache doesnt. + - `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others. + - Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: path + description: Path to the web server log file. + default_value: "" + required: true + - name: exclude_path + description: Path to exclude. + default_value: "*.gz" + required: false + - name: url_patterns + description: List of URL patterns. + default_value: "[]" + required: false + detailed_description: | + "URL pattern" scope metrics will be collected for each URL pattern. + + Option syntax: + + ```yaml + url_patterns: + - name: name1 + pattern: pattern1 + - name: name2 + pattern: pattern2 + ``` + - name: url_patterns.name + description: Used as a dimension name. + default_value: "" + required: true + - name: url_patterns.pattern + description: Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format). + default_value: "" + required: true + - name: parser + description: Log parser configuration. + default_value: "" + required: false + - name: parser.log_type + description: Log parser type. + default_value: auto + required: false + detailed_description: | + Weblog supports 5 different log parsers: + + | Parser type | Description | + |-------------|-------------------------------------------| + | auto | Use CSV and auto-detect format | + | csv | A comma-separated values | + | json | [JSON](https://www.json.org/json-en.html) | + | ltsv | [LTSV](http://ltsv.org/) | + | regexp | Regular expression with named groups | + + Syntax: + + ```yaml + parser: + log_type: auto + ``` + + If `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file. + + - checks if format is `CSV` (using regexp). + - checks if format is `JSON` (using regexp). + - assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later): + + ```sh + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time + $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time + $remote_addr - - [$time_local] "$request" $status $body_bytes_sent + ``` + + If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually. + - name: parser.csv_config + description: CSV log parser config. + default_value: "" + required: false + - name: parser.csv_config.delimiter + description: CSV field delimiter. + default_value: "," + required: false + - name: parser.csv_config.format + description: CSV log format. + default_value: "" + required: false + detailed_description: "" + - name: parser.ltsv_config + description: LTSV log parser config. + default_value: "" + required: false + - name: parser.ltsv_config.field_delimiter + description: LTSV field delimiter. + default_value: "\\t" + required: false + - name: parser.ltsv_config.value_delimiter + description: LTSV value delimiter. + default_value: ":" + required: false + - name: parser.ltsv_config.mapping + description: LTSV fields mapping to **known fields**. + default_value: "" + required: true + detailed_description: | + The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + + > **Note**: don't use `$` and `%` prefixes for mapped field names. + + ```yaml + parser: + log_type: ltsv + ltsv_config: + mapping: + label1: field1 + label2: field2 + ``` + - name: parser.json_config + description: JSON log parser config. + default_value: "" + required: false + - name: parser.json_config.mapping + description: JSON fields mapping to **known fields**. + default_value: "" + required: true + detailed_description: | + The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**. + + > **Note**: don't use `$` and `%` prefixes for mapped field names. + + ```yaml + parser: + log_type: json + json_config: + mapping: + label1: field1 + label2: field2 + ``` + - name: parser.regexp_config + description: RegExp log parser config. + default_value: "" + required: false + - name: parser.regexp_config.pattern + description: RegExp pattern with named groups. + default_value: "" + required: true + detailed_description: | + Use pattern with subexpressions names. These names should be **known fields**. + + > **Note**: don't use `$` and `%` prefixes for mapped field names. + + Syntax: + + ```yaml + parser: + log_type: regexp + regexp_config: + pattern: PATTERN + ``` + examples: + folding: + title: Config + enabled: true + list: [] + troubleshooting: + problems: + list: [] + alerts: + - name: web_log_1m_unmatched + metric: web_log.excluded_requests + info: percentage of unparsed log lines over the last minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_1m_requests + metric: web_log.type_requests + info: "ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_1m_redirects + metric: web_log.type_requests + info: "ratio of redirection HTTP requests over the last minute (3xx except 304)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_1m_bad_requests + metric: web_log.type_requests + info: "ratio of client error HTTP requests over the last minute (4xx except 401)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_1m_internal_errors + metric: web_log.type_requests + info: "ratio of server error HTTP requests over the last minute (5xx)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_web_slow + metric: web_log.request_processing_time + info: average HTTP response time over the last 1 minute + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + - name: web_log_5m_requests_ratio + metric: web_log.type_requests + info: ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: web_log.requests + description: Total Requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: web_log.excluded_requests + description: Excluded Requests + unit: requests/s + chart_type: stacked + dimensions: + - name: unmatched + - name: web_log.type_requests + description: Requests By Type + unit: requests/s + chart_type: stacked + dimensions: + - name: success + - name: bad + - name: redirect + - name: error + - name: web_log.status_code_class_responses + description: Responses By Status Code Class + unit: responses/s + chart_type: stacked + dimensions: + - name: 1xx + - name: 2xx + - name: 3xx + - name: 4xx + - name: 5xx + - name: web_log.status_code_class_1xx_responses + description: Informational Responses By Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per 1xx code + - name: web_log.status_code_class_2xx_responses + description: Successful Responses By Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per 2xx code + - name: web_log.status_code_class_3xx_responses + description: Redirects Responses By Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per 3xx code + - name: web_log.status_code_class_4xx_responses + description: Client Errors Responses By Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per 4xx code + - name: web_log.status_code_class_5xx_responses + description: Server Errors Responses By Status Code + unit: responses/s + chart_type: stacked + dimensions: + - name: a dimension per 5xx code + - name: web_log.bandwidth + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: web_log.request_processing_time + description: Request Processing Time + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: web_log.requests_processing_time_histogram + description: Requests Processing Time Histogram + unit: requests/s + chart_type: line + dimensions: + - name: a dimension per bucket + - name: web_log.upstream_response_time + description: Upstream Response Time + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: web_log.upstream_responses_time_histogram + description: Upstream Responses Time Histogram + unit: requests/s + chart_type: line + dimensions: + - name: a dimension per bucket + - name: web_log.current_poll_uniq_clients + description: Current Poll Unique Clients + unit: clients + chart_type: stacked + dimensions: + - name: ipv4 + - name: ipv6 + - name: web_log.vhost_requests + description: Requests By Vhost + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per vhost + - name: web_log.port_requests + description: Requests By Port + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per port + - name: web_log.scheme_requests + description: Requests By Scheme + unit: requests/s + chart_type: stacked + dimensions: + - name: http + - name: https + - name: web_log.http_method_requests + description: Requests By HTTP Method + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP method + - name: web_log.http_version_requests + description: Requests By HTTP Version + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per HTTP version + - name: web_log.ip_proto_requests + description: Requests By IP Protocol + unit: requests/s + chart_type: stacked + dimensions: + - name: ipv4 + - name: ipv6 + - name: web_log.ssl_proto_requests + description: Requests By SSL Connection Protocol + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per SSL protocol + - name: web_log.ssl_cipher_suite_requests + description: Requests By SSL Connection Cipher Suite + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per SSL cipher suite + - name: web_log.url_pattern_requests + description: URL Field Requests By Pattern + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per URL pattern + - name: web_log.custom_field_pattern_requests + description: Custom Field Requests By Pattern + unit: requests/s + chart_type: stacked + dimensions: + - name: a dimension per custom field pattern + - name: custom time field + description: TBD + labels: [] + metrics: + - name: web_log.custom_time_field_summary + description: Custom Time Field Summary + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: web_log.custom_time_field_histogram + description: Custom Time Field Histogram + unit: observations + chart_type: line + dimensions: + - name: a dimension per bucket + - name: custom numeric field + description: TBD + labels: [] + metrics: + - name: web_log.custom_numeric_field_{{field_name}}_summary + description: Custom Numeric Field Summary + unit: '{{units}}' + chart_type: line + dimensions: + - name: min + - name: max + - name: avg + - name: URL pattern + description: TBD + labels: [] + metrics: + - name: web_log.url_pattern_status_code_responses + description: Responses By Status Code + unit: responses/s + chart_type: line + dimensions: + - name: a dimension per pattern + - name: web_log.url_pattern_http_method_requests + description: Requests By HTTP Method + unit: requests/s + chart_type: line + dimensions: + - name: a dimension per HTTP method + - name: web_log.url_pattern_bandwidth + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: web_log.url_pattern_request_processing_time + description: Request Processing Time + unit: milliseconds + chart_type: line + dimensions: + - name: min + - name: max + - name: avg diff --git a/src/go/collectors/go.d.plugin/modules/weblog/metrics.go b/src/go/collectors/go.d.plugin/modules/weblog/metrics.go new file mode 100644 index 00000000000000..8d0284d13c5cbd --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/metrics.go @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "github.com/netdata/go.d.plugin/pkg/metrics" +) + +func newWebLogSummary() metrics.Summary { + return &weblogSummary{metrics.NewSummary()} +} + +type weblogSummary struct { + metrics.Summary +} + +// WriteTo redefines metrics.Summary.WriteTo +// TODO: temporary workaround? +func (s weblogSummary) WriteTo(rv map[string]int64, key string, mul, div int) { + s.Summary.WriteTo(rv, key, mul, div) + if _, ok := rv[key+"_min"]; !ok { + rv[key+"_min"] = 0 + rv[key+"_max"] = 0 + rv[key+"_avg"] = 0 + } +} + +type ( + metricsData struct { + Requests metrics.Counter `stm:"requests"` + ReqUnmatched metrics.Counter `stm:"req_unmatched"` + + RespCode metrics.CounterVec `stm:"resp_code"` + Resp1xx metrics.Counter `stm:"resp_1xx"` + Resp2xx metrics.Counter `stm:"resp_2xx"` + Resp3xx metrics.Counter `stm:"resp_3xx"` + Resp4xx metrics.Counter `stm:"resp_4xx"` + Resp5xx metrics.Counter `stm:"resp_5xx"` + + ReqSuccess metrics.Counter `stm:"req_type_success"` + ReqRedirect metrics.Counter `stm:"req_type_redirect"` + ReqBad metrics.Counter `stm:"req_type_bad"` + ReqError metrics.Counter `stm:"req_type_error"` + + UniqueIPv4 metrics.UniqueCounter `stm:"uniq_ipv4"` + UniqueIPv6 metrics.UniqueCounter `stm:"uniq_ipv6"` + BytesSent metrics.Counter `stm:"bytes_sent"` + BytesReceived metrics.Counter `stm:"bytes_received"` + ReqProcTime metrics.Summary `stm:"req_proc_time"` + ReqProcTimeHist metrics.Histogram `stm:"req_proc_time_hist"` + UpsRespTime metrics.Summary `stm:"upstream_resp_time"` + UpsRespTimeHist metrics.Histogram `stm:"upstream_resp_time_hist"` + + ReqVhost metrics.CounterVec `stm:"req_vhost"` + ReqPort metrics.CounterVec `stm:"req_port"` + ReqMethod metrics.CounterVec `stm:"req_method"` + ReqURLPattern metrics.CounterVec `stm:"req_url_ptn"` + ReqVersion metrics.CounterVec `stm:"req_version"` + ReqSSLProto metrics.CounterVec `stm:"req_ssl_proto"` + ReqSSLCipherSuite metrics.CounterVec `stm:"req_ssl_cipher_suite"` + ReqHTTPScheme metrics.Counter `stm:"req_http_scheme"` + ReqHTTPSScheme metrics.Counter `stm:"req_https_scheme"` + ReqIPv4 metrics.Counter `stm:"req_ipv4"` + ReqIPv6 metrics.Counter `stm:"req_ipv6"` + + ReqCustomField map[string]metrics.CounterVec `stm:"custom_field"` + URLPatternStats map[string]*patternMetrics `stm:"url_ptn"` + + ReqCustomTimeField map[string]*customTimeFieldMetrics `stm:"custom_time_field"` + ReqCustomNumericField map[string]*customNumericFieldMetrics `stm:"custom_numeric_field"` + } + customTimeFieldMetrics struct { + Time metrics.Summary `stm:"time"` + TimeHist metrics.Histogram `stm:"time_hist"` + } + customNumericFieldMetrics struct { + Summary metrics.Summary `stm:"summary"` + + multiplier int + divisor int + } + patternMetrics struct { + RespCode metrics.CounterVec `stm:"resp_code"` + ReqMethod metrics.CounterVec `stm:"req_method"` + BytesSent metrics.Counter `stm:"bytes_sent"` + BytesReceived metrics.Counter `stm:"bytes_received"` + ReqProcTime metrics.Summary `stm:"req_proc_time"` + } +) + +func newMetricsData(config Config) *metricsData { + return &metricsData{ + ReqVhost: metrics.NewCounterVec(), + ReqPort: metrics.NewCounterVec(), + ReqMethod: metrics.NewCounterVec(), + ReqVersion: metrics.NewCounterVec(), + RespCode: metrics.NewCounterVec(), + ReqSSLProto: metrics.NewCounterVec(), + ReqSSLCipherSuite: metrics.NewCounterVec(), + ReqProcTime: newWebLogSummary(), + ReqProcTimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(config.Histogram)), + UpsRespTime: newWebLogSummary(), + UpsRespTimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(config.Histogram)), + UniqueIPv4: metrics.NewUniqueCounter(true), + UniqueIPv6: metrics.NewUniqueCounter(true), + ReqURLPattern: newCounterVecFromPatterns(config.URLPatterns), + ReqCustomField: newReqCustomField(config.CustomFields), + URLPatternStats: newURLPatternStats(config.URLPatterns), + ReqCustomTimeField: newReqCustomTimeField(config.CustomTimeFields), + ReqCustomNumericField: newReqCustomNumericField(config.CustomNumericFields), + } +} + +func (m *metricsData) reset() { + m.UniqueIPv4.Reset() + m.UniqueIPv6.Reset() + m.ReqProcTime.Reset() + m.UpsRespTime.Reset() + for _, v := range m.URLPatternStats { + v.ReqProcTime.Reset() + } + for _, v := range m.ReqCustomTimeField { + v.Time.Reset() + } + for _, v := range m.ReqCustomNumericField { + v.Summary.Reset() + } +} + +func newCounterVecFromPatterns(patterns []userPattern) metrics.CounterVec { + c := metrics.NewCounterVec() + for _, p := range patterns { + _, _ = c.GetP(p.Name) + } + return c +} + +func newURLPatternStats(patterns []userPattern) map[string]*patternMetrics { + stats := make(map[string]*patternMetrics) + for _, p := range patterns { + stats[p.Name] = &patternMetrics{ + RespCode: metrics.NewCounterVec(), + ReqMethod: metrics.NewCounterVec(), + ReqProcTime: newWebLogSummary(), + } + } + return stats +} + +func newReqCustomField(fields []customField) map[string]metrics.CounterVec { + cf := make(map[string]metrics.CounterVec) + for _, f := range fields { + cf[f.Name] = newCounterVecFromPatterns(f.Patterns) + } + return cf +} + +func newReqCustomTimeField(fields []customTimeField) map[string]*customTimeFieldMetrics { + cf := make(map[string]*customTimeFieldMetrics) + for _, f := range fields { + cf[f.Name] = &customTimeFieldMetrics{ + Time: newWebLogSummary(), + TimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(f.Histogram)), + } + } + return cf +} + +func newReqCustomNumericField(fields []customNumericField) map[string]*customNumericFieldMetrics { + rv := make(map[string]*customNumericFieldMetrics) + for _, f := range fields { + rv[f.Name] = &customNumericFieldMetrics{ + Summary: newWebLogSummary(), + multiplier: f.Multiplier, + divisor: f.Divisor, + } + } + return rv +} + +// convert histogram options to microseconds (second => us) +func convHistOptionsToMicroseconds(histogram []float64) []float64 { + var buckets []float64 + for _, value := range histogram { + buckets = append(buckets, value*1e6) + } + return buckets +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/parser.go b/src/go/collectors/go.d.plugin/modules/weblog/parser.go new file mode 100644 index 00000000000000..11a6d2832afcb4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/parser.go @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/pkg/logs" +) + +/* +Default apache log format: + - "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined + - "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined + - "%h %l %u %t \"%r\" %>s %O" common + - "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %I %O" Combined I/O (https://httpd.apache.org/docs/2.4/mod/mod_logio.html) + +Default nginx log format: + - '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '"$http_referer" "$http_user_agent"' combined + +Netdata recommends: + Nginx: + - '$remote_addr - $remote_user [$time_local] ' + '"$request" $status $body_bytes_sent ' + '$request_length $request_time $upstream_response_time ' + '"$http_referer" "$http_user_agent"' + + Apache: + - "%h %l %u %t \"%r\" %>s %B %I %D \"%{Referer}i\" \"%{User-Agent}i\"" +*/ + +var ( + csvCommon = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent` + csvCustom1 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time` + csvCustom2 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time` + csvCustom3 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time` + csvCustom4 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time` + csvVhostCommon = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent` + csvVhostCustom1 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time` + csvVhostCustom2 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time` + csvVhostCustom3 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time` + csvVhostCustom4 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time` + + guessOrder = []string{ + csvVhostCustom4, + csvVhostCustom3, + csvVhostCustom2, + csvVhostCustom1, + csvVhostCommon, + csvCustom4, + csvCustom3, + csvCustom2, + csvCustom1, + csvCommon, + } +) + +func cleanCSVFormat(format string) string { return strings.Join(strings.Fields(format), " ") } +func cleanApacheLogFormat(format string) string { return strings.ReplaceAll(format, `\`, "") } + +const ( + typeAuto = "auto" +) + +var ( + reLTSV = regexp.MustCompile(`^[a-zA-Z0-9]+:[^\t]*(\t[a-zA-Z0-9]+:[^\t]*)*$`) + reJSON = regexp.MustCompile(`^[[:space:]]*{.*}[[:space:]]*$`) +) + +func (w *WebLog) newParser(record []byte) (logs.Parser, error) { + if w.Parser.LogType == typeAuto { + w.Debugf("log_type is %s, will try format auto-detection", typeAuto) + if len(record) == 0 { + return nil, fmt.Errorf("empty line, can't auto-detect format (%s)", w.file.CurrentFilename()) + } + return w.guessParser(record) + } + + w.Parser.CSV.Format = cleanApacheLogFormat(w.Parser.CSV.Format) + w.Debugf("log_type is %s, skipping auto-detection", w.Parser.LogType) + switch w.Parser.LogType { + case logs.TypeCSV: + w.Debugf("config: %+v", w.Parser.CSV) + case logs.TypeLTSV: + w.Debugf("config: %+v", w.Parser.LogType) + case logs.TypeRegExp: + w.Debugf("config: %+v", w.Parser.RegExp) + case logs.TypeJSON: + w.Debugf("config: %+v", w.Parser.JSON) + } + return logs.NewParser(w.Parser, w.file) +} + +func (w *WebLog) guessParser(record []byte) (logs.Parser, error) { + w.Debug("starting log type auto-detection") + if reLTSV.Match(record) { + w.Debug("log type is LTSV") + return logs.NewLTSVParser(w.Parser.LTSV, w.file) + } + if reJSON.Match(record) { + w.Debug("log type is JSON") + return logs.NewJSONParser(w.Parser.JSON, w.file) + } + w.Debug("log type is CSV") + return w.guessCSVParser(record) +} + +func (w *WebLog) guessCSVParser(record []byte) (logs.Parser, error) { + w.Debug("starting csv log format auto-detection") + w.Debugf("config: %+v", w.Parser.CSV) + for _, format := range guessOrder { + format = cleanCSVFormat(format) + cfg := w.Parser.CSV + cfg.Format = format + + w.Debugf("trying format: '%s'", format) + parser, err := logs.NewCSVParser(cfg, w.file) + if err != nil { + return nil, err + } + + line := newEmptyLogLine() + if err := parser.Parse(record, line); err != nil { + w.Debug("parse: ", err) + continue + } + + if err = line.verify(); err != nil { + w.Debug("verify: ", err) + continue + } + return parser, nil + } + return nil, errors.New("cannot auto-detect log format, use custom log format") +} + +func checkCSVFormatField(field string) (newName string, offset int, valid bool) { + if isTimeField(field) { + return "", 1, false + } + if !isFieldValid(field) { + return "", 0, false + } + // remove `$` and `%` to have same field names with regexp parser, + // these symbols aren't allowed in sub exp names + return field[1:], 0, true +} + +func isTimeField(field string) bool { + return field == "[$time_local]" || field == "$time_local" || field == "%t" +} + +func isFieldValid(field string) bool { + return len(field) > 1 && (isNginxField(field) || isApacheField(field)) +} +func isNginxField(field string) bool { + return strings.HasPrefix(field, "$") +} + +func isApacheField(field string) bool { + return strings.HasPrefix(field, "%") +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/parser_test.go b/src/go/collectors/go.d.plugin/modules/weblog/parser_test.go new file mode 100644 index 00000000000000..4e449b60cd0d4c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/parser_test.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "fmt" + "testing" + + "github.com/netdata/go.d.plugin/pkg/logs" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWebLog_guessParser(t *testing.T) { + type test = struct { + name string + inputs []string + wantParserType string + wantErr bool + } + tests := []test{ + { + name: "guessed csv", + wantParserType: logs.TypeCSV, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`, + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`, + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`, + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`, + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`, + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`, + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`, + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`, + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + }, + }, + { + name: "guessed ltsv", + wantParserType: logs.TypeLTSV, + inputs: []string{ + `field1:test.example.com:80 field2:88.191.254.20 field3:"GET / HTTP/1.0" 200 8674 field4:8674 field5:0.123`, + }, + }, + { + name: "guessed json", + wantParserType: logs.TypeJSON, + inputs: []string{ + `{}`, + ` {}`, + ` {} `, + `{"host": "example.com"}`, + `{"host": "example.com","time": "2020-08-04T20:23:27+03:00", "upstream_response_time": "0.776", "remote_addr": "1.2.3.4"}`, + ` {"host": "example.com","time": "2020-08-04T20:23:27+03:00", "upstream_response_time": "0.776", "remote_addr": "1.2.3.4"} `, + }, + }, + { + name: "unknown", + wantErr: true, + inputs: []string{ + `test.example.com 80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + `test.example.com 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + }, + }, + } + + weblog := prepareWebLog() + + for _, tc := range tests { + for i, input := range tc.inputs { + name := fmt.Sprintf("name=%s,input_num=%d", tc.name, i+1) + + t.Run(name, func(t *testing.T) { + p, err := weblog.newParser([]byte(input)) + + if tc.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + switch tc.wantParserType { + default: + t.Errorf("unknown parser type: %s", tc.wantParserType) + case logs.TypeLTSV: + assert.IsType(t, (*logs.LTSVParser)(nil), p) + case logs.TypeCSV: + require.IsType(t, (*logs.CSVParser)(nil), p) + case logs.TypeJSON: + require.IsType(t, (*logs.JSONParser)(nil), p) + } + } + }) + } + } +} + +func TestWebLog_guessCSVParser(t *testing.T) { + type test = struct { + name string + inputs []string + wantCSVFormat string + wantErr bool + } + tests := []test{ + { + name: "guessed vhost custom4", + wantCSVFormat: csvVhostCustom4, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`, + }, + }, + { + name: "guessed vhost custom3", + wantCSVFormat: csvVhostCustom3, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`, + }, + }, + { + name: "guessed vhost custom2", + wantCSVFormat: csvVhostCustom2, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`, + }, + }, + { + name: "guessed vhost custom1", + wantCSVFormat: csvVhostCustom1, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`, + }, + }, + { + name: "guessed vhost common", + wantCSVFormat: csvVhostCommon, + inputs: []string{ + `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + }, + }, + { + name: "guessed custom4", + wantCSVFormat: csvCustom4, + inputs: []string{ + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`, + }, + }, + { + name: "guessed custom3", + wantCSVFormat: csvCustom3, + inputs: []string{ + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`, + }, + }, + { + name: "guessed custom2", + wantCSVFormat: csvCustom2, + inputs: []string{ + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`, + }, + }, + { + name: "guessed custom1", + wantCSVFormat: csvCustom1, + inputs: []string{ + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`, + }, + }, + { + name: "guessed common", + wantCSVFormat: csvCommon, + inputs: []string{ + `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + }, + }, + { + name: "unknown", + wantErr: true, + inputs: []string{ + `test.example.com 80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + `test.example.com 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`, + }, + }, + } + + weblog := prepareWebLog() + + for i, tc := range tests { + for _, input := range tc.inputs { + name := fmt.Sprintf("name=%s,input_num=%d", tc.name, i+1) + + t.Run(name, func(t *testing.T) { + p, err := weblog.guessCSVParser([]byte(input)) + + if tc.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, cleanCSVFormat(tc.wantCSVFormat), p.(*logs.CSVParser).Config.Format) + } + }) + } + } +} + +func prepareWebLog() *WebLog { + cfg := logs.ParserConfig{ + LogType: typeAuto, + CSV: logs.CSVConfig{ + Delimiter: " ", + CheckField: checkCSVFormatField, + }, + LTSV: logs.LTSVConfig{ + FieldDelimiter: "\t", + ValueDelimiter: ":", + }, + } + + return &WebLog{ + Config: Config{ + GroupRespCodes: false, + Parser: cfg, + }, + } +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/weblog.go b/src/go/collectors/go.d.plugin/modules/weblog/weblog.go new file mode 100644 index 00000000000000..27bf43f9acee7f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/weblog.go @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + _ "embed" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/logs" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("web_log", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *WebLog { + return &WebLog{ + Config: Config{ + ExcludePath: "*.gz", + GroupRespCodes: true, + Parser: logs.ParserConfig{ + LogType: typeAuto, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: false, + CheckField: checkCSVFormatField, + }, + LTSV: logs.LTSVConfig{ + FieldDelimiter: "\t", + ValueDelimiter: ":", + }, + RegExp: logs.RegExpConfig{}, + JSON: logs.JSONConfig{}, + }, + }, + } +} + +type ( + Config struct { + Parser logs.ParserConfig `yaml:",inline"` + Path string `yaml:"path"` + ExcludePath string `yaml:"exclude_path"` + URLPatterns []userPattern `yaml:"url_patterns"` + CustomFields []customField `yaml:"custom_fields"` + CustomTimeFields []customTimeField `yaml:"custom_time_fields"` + CustomNumericFields []customNumericField `yaml:"custom_numeric_fields"` + Histogram []float64 `yaml:"histogram"` + GroupRespCodes bool `yaml:"group_response_codes"` + } + userPattern struct { + Name string `yaml:"name"` + Match string `yaml:"match"` + } + customField struct { + Name string `yaml:"name"` + Patterns []userPattern `yaml:"patterns"` + } + customTimeField struct { + Name string `yaml:"name"` + Histogram []float64 `yaml:"histogram"` + } + customNumericField struct { + Name string `yaml:"name"` + Units string `yaml:"units"` + Multiplier int `yaml:"multiplier"` + Divisor int `yaml:"divisor"` + } +) + +type WebLog struct { + module.Base + Config `yaml:",inline"` + + file *logs.Reader + parser logs.Parser + line *logLine + urlPatterns []*pattern + + customFields map[string][]*pattern + customTimeFields map[string][]float64 + customNumericFields map[string]bool + + charts *module.Charts + mx *metricsData +} + +func (w *WebLog) Init() bool { + if err := w.createURLPatterns(); err != nil { + w.Errorf("init failed: %v", err) + return false + } + + if err := w.createCustomFields(); err != nil { + w.Errorf("init failed: %v", err) + return false + } + + if err := w.createCustomTimeFields(); err != nil { + w.Errorf("init failed: %v", err) + return false + } + + if err := w.createCustomNumericFields(); err != nil { + w.Errorf("init failed: %v", err) + } + + w.createLogLine() + w.mx = newMetricsData(w.Config) + + return true +} + +func (w *WebLog) Check() bool { + // Note: these inits are here to make auto-detection retry working + if err := w.createLogReader(); err != nil { + w.Warning("check failed: ", err) + return false + } + + if err := w.createParser(); err != nil { + w.Warning("check failed: ", err) + return false + } + + if err := w.createCharts(w.line); err != nil { + w.Warning("check failed: ", err) + return false + } + return true +} + +func (w *WebLog) Charts() *module.Charts { + return w.charts +} + +func (w *WebLog) Collect() map[string]int64 { + mx, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (w *WebLog) Cleanup() { + if w.file != nil { + _ = w.file.Close() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/weblog/weblog_test.go b/src/go/collectors/go.d.plugin/modules/weblog/weblog_test.go new file mode 100644 index 00000000000000..6195d2e49b3b0d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/weblog/weblog_test.go @@ -0,0 +1,1493 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package weblog + +import ( + "bytes" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/logs" + "github.com/netdata/go.d.plugin/pkg/metrics" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testCommonLog, _ = os.ReadFile("testdata/common.log") + testFullLog, _ = os.ReadFile("testdata/full.log") + testCustomLog, _ = os.ReadFile("testdata/custom.log") + testCustomTimeFieldLog, _ = os.ReadFile("testdata/custom_time_fields.log") + testIISLog, _ = os.ReadFile("testdata/u_ex221107.log") +) + +func Test_readTestData(t *testing.T) { + assert.NotNil(t, testFullLog) + assert.NotNil(t, testCommonLog) + assert.NotNil(t, testCustomLog) + assert.NotNil(t, testCustomTimeFieldLog) + assert.NotNil(t, testIISLog) +} + +func TestNew(t *testing.T) { + assert.Implements(t, (*module.Module)(nil), New()) +} + +func TestWebLog_Init(t *testing.T) { + weblog := New() + + assert.True(t, weblog.Init()) +} + +func TestWebLog_Init_ErrorOnCreatingURLPatterns(t *testing.T) { + weblog := New() + weblog.URLPatterns = []userPattern{{Match: "* !*"}} + + assert.False(t, weblog.Init()) +} + +func TestWebLog_Init_ErrorOnCreatingCustomFields(t *testing.T) { + weblog := New() + weblog.CustomFields = []customField{{Patterns: []userPattern{{Name: "p1", Match: "* !*"}}}} + + assert.False(t, weblog.Init()) +} + +func TestWebLog_Check(t *testing.T) { + weblog := New() + defer weblog.Cleanup() + weblog.Path = "testdata/common.log" + require.True(t, weblog.Init()) + + assert.True(t, weblog.Check()) +} + +func TestWebLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { + weblog := New() + defer weblog.Cleanup() + weblog.Path = "testdata/not_exists.log" + require.True(t, weblog.Init()) + + assert.False(t, weblog.Check()) +} + +func TestWebLog_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { + weblog := New() + defer weblog.Cleanup() + weblog.Path = "testdata/custom.log" + require.True(t, weblog.Init()) + + assert.False(t, weblog.Check()) +} + +func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) { + weblog := New() + defer weblog.Cleanup() + weblog.Path = "testdata/custom.log" + weblog.Parser.LogType = logs.TypeCSV + weblog.Parser.CSV.Format = "$one $two" + require.True(t, weblog.Init()) + + assert.False(t, weblog.Check()) +} + +func TestWebLog_Charts(t *testing.T) { + weblog := New() + defer weblog.Cleanup() + weblog.Path = "testdata/common.log" + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + + assert.NotNil(t, weblog.Charts()) +} + +func TestWebLog_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestWebLog_Collect(t *testing.T) { + weblog := prepareWebLogCollectFull(t) + + //m := weblog.Collect() + //l := make([]string, 0) + //for k := range m { + // l = append(l, k) + //} + //sort.Strings(l) + //for _, value := range l { + // fmt.Println(fmt.Sprintf("\"%s\": %d,", value, m[value])) + //} + + expected := map[string]int64{ + "bytes_received": 1374096, + "bytes_sent": 1373185, + "custom_field_drink_beer": 221, + "custom_field_drink_wine": 231, + "custom_field_side_dark": 231, + "custom_field_side_light": 221, + "req_http_scheme": 218, + "req_https_scheme": 234, + "req_ipv4": 275, + "req_ipv6": 177, + "req_method_GET": 156, + "req_method_HEAD": 150, + "req_method_POST": 146, + "req_port_80": 96, + "req_port_81": 100, + "req_port_82": 84, + "req_port_83": 85, + "req_port_84": 87, + "req_proc_time_avg": 247, + "req_proc_time_count": 452, + "req_proc_time_hist_bucket_1": 452, + "req_proc_time_hist_bucket_10": 452, + "req_proc_time_hist_bucket_11": 452, + "req_proc_time_hist_bucket_2": 452, + "req_proc_time_hist_bucket_3": 452, + "req_proc_time_hist_bucket_4": 452, + "req_proc_time_hist_bucket_5": 452, + "req_proc_time_hist_bucket_6": 452, + "req_proc_time_hist_bucket_7": 452, + "req_proc_time_hist_bucket_8": 452, + "req_proc_time_hist_bucket_9": 452, + "req_proc_time_hist_count": 452, + "req_proc_time_hist_sum": 111927, + "req_proc_time_max": 499, + "req_proc_time_min": 2, + "req_proc_time_sum": 111927, + "req_ssl_cipher_suite_AES256-SHA": 101, + "req_ssl_cipher_suite_DHE-RSA-AES256-SHA": 111, + "req_ssl_cipher_suite_ECDHE-RSA-AES256-SHA": 127, + "req_ssl_cipher_suite_PSK-RC4-SHA": 113, + "req_ssl_proto_SSLv2": 74, + "req_ssl_proto_SSLv3": 57, + "req_ssl_proto_TLSv1": 76, + "req_ssl_proto_TLSv1.1": 87, + "req_ssl_proto_TLSv1.2": 73, + "req_ssl_proto_TLSv1.3": 85, + "req_type_bad": 49, + "req_type_error": 0, + "req_type_redirect": 119, + "req_type_success": 284, + "req_unmatched": 48, + "req_url_ptn_com": 120, + "req_url_ptn_net": 116, + "req_url_ptn_not_match": 0, + "req_url_ptn_org": 113, + "req_version_1.1": 168, + "req_version_2": 143, + "req_version_2.0": 141, + "req_vhost_198.51.100.1": 81, + "req_vhost_2001:db8:1ce::1": 100, + "req_vhost_localhost": 102, + "req_vhost_test.example.com": 87, + "req_vhost_test.example.org": 82, + "requests": 500, + "resp_1xx": 110, + "resp_2xx": 128, + "resp_3xx": 119, + "resp_4xx": 95, + "resp_5xx": 0, + "resp_code_100": 60, + "resp_code_101": 50, + "resp_code_200": 58, + "resp_code_201": 70, + "resp_code_300": 58, + "resp_code_301": 61, + "resp_code_400": 49, + "resp_code_401": 46, + "uniq_ipv4": 3, + "uniq_ipv6": 2, + "upstream_resp_time_avg": 255, + "upstream_resp_time_count": 452, + "upstream_resp_time_hist_bucket_1": 452, + "upstream_resp_time_hist_bucket_10": 452, + "upstream_resp_time_hist_bucket_11": 452, + "upstream_resp_time_hist_bucket_2": 452, + "upstream_resp_time_hist_bucket_3": 452, + "upstream_resp_time_hist_bucket_4": 452, + "upstream_resp_time_hist_bucket_5": 452, + "upstream_resp_time_hist_bucket_6": 452, + "upstream_resp_time_hist_bucket_7": 452, + "upstream_resp_time_hist_bucket_8": 452, + "upstream_resp_time_hist_bucket_9": 452, + "upstream_resp_time_hist_count": 452, + "upstream_resp_time_hist_sum": 115615, + "upstream_resp_time_max": 497, + "upstream_resp_time_min": 7, + "upstream_resp_time_sum": 115615, + "url_ptn_com_bytes_received": 379864, + "url_ptn_com_bytes_sent": 372669, + "url_ptn_com_req_method_GET": 38, + "url_ptn_com_req_method_HEAD": 39, + "url_ptn_com_req_method_POST": 43, + "url_ptn_com_req_proc_time_avg": 212, + "url_ptn_com_req_proc_time_count": 120, + "url_ptn_com_req_proc_time_max": 495, + "url_ptn_com_req_proc_time_min": 5, + "url_ptn_com_req_proc_time_sum": 25544, + "url_ptn_com_resp_code_100": 12, + "url_ptn_com_resp_code_101": 15, + "url_ptn_com_resp_code_200": 13, + "url_ptn_com_resp_code_201": 26, + "url_ptn_com_resp_code_300": 16, + "url_ptn_com_resp_code_301": 12, + "url_ptn_com_resp_code_400": 13, + "url_ptn_com_resp_code_401": 13, + "url_ptn_net_bytes_received": 349988, + "url_ptn_net_bytes_sent": 339867, + "url_ptn_net_req_method_GET": 51, + "url_ptn_net_req_method_HEAD": 33, + "url_ptn_net_req_method_POST": 32, + "url_ptn_net_req_proc_time_avg": 260, + "url_ptn_net_req_proc_time_count": 116, + "url_ptn_net_req_proc_time_max": 499, + "url_ptn_net_req_proc_time_min": 10, + "url_ptn_net_req_proc_time_sum": 30221, + "url_ptn_net_resp_code_100": 16, + "url_ptn_net_resp_code_101": 12, + "url_ptn_net_resp_code_200": 16, + "url_ptn_net_resp_code_201": 14, + "url_ptn_net_resp_code_300": 14, + "url_ptn_net_resp_code_301": 17, + "url_ptn_net_resp_code_400": 14, + "url_ptn_net_resp_code_401": 13, + "url_ptn_not_match_bytes_received": 0, + "url_ptn_not_match_bytes_sent": 0, + "url_ptn_not_match_req_proc_time_avg": 0, + "url_ptn_not_match_req_proc_time_count": 0, + "url_ptn_not_match_req_proc_time_max": 0, + "url_ptn_not_match_req_proc_time_min": 0, + "url_ptn_not_match_req_proc_time_sum": 0, + "url_ptn_org_bytes_received": 331836, + "url_ptn_org_bytes_sent": 340095, + "url_ptn_org_req_method_GET": 29, + "url_ptn_org_req_method_HEAD": 46, + "url_ptn_org_req_method_POST": 38, + "url_ptn_org_req_proc_time_avg": 263, + "url_ptn_org_req_proc_time_count": 113, + "url_ptn_org_req_proc_time_max": 497, + "url_ptn_org_req_proc_time_min": 2, + "url_ptn_org_req_proc_time_sum": 29796, + "url_ptn_org_resp_code_100": 15, + "url_ptn_org_resp_code_101": 11, + "url_ptn_org_resp_code_200": 20, + "url_ptn_org_resp_code_201": 16, + "url_ptn_org_resp_code_300": 10, + "url_ptn_org_resp_code_301": 19, + "url_ptn_org_resp_code_400": 13, + "url_ptn_org_resp_code_401": 9, + "custom_time_field_random_time_field_time_avg": 230, + "custom_time_field_random_time_field_time_count": 452, + "custom_time_field_random_time_field_time_hist_bucket_1": 452, + "custom_time_field_random_time_field_time_hist_bucket_10": 452, + "custom_time_field_random_time_field_time_hist_bucket_11": 452, + "custom_time_field_random_time_field_time_hist_bucket_2": 452, + "custom_time_field_random_time_field_time_hist_bucket_3": 452, + "custom_time_field_random_time_field_time_hist_bucket_4": 452, + "custom_time_field_random_time_field_time_hist_bucket_5": 452, + "custom_time_field_random_time_field_time_hist_bucket_6": 452, + "custom_time_field_random_time_field_time_hist_bucket_7": 452, + "custom_time_field_random_time_field_time_hist_bucket_8": 452, + "custom_time_field_random_time_field_time_hist_bucket_9": 452, + "custom_time_field_random_time_field_time_hist_count": 452, + "custom_time_field_random_time_field_time_hist_sum": 103960, + "custom_time_field_random_time_field_time_max": 230, + "custom_time_field_random_time_field_time_min": 230, + "custom_time_field_random_time_field_time_sum": 103960, + } + + mx := weblog.Collect() + assert.Equal(t, expected, mx) + testCharts(t, weblog, mx) +} + +func TestWebLog_Collect_CommonLogFormat(t *testing.T) { + weblog := prepareWebLogCollectCommon(t) + + expected := map[string]int64{ + "bytes_received": 0, + "bytes_sent": 1388056, + "req_http_scheme": 0, + "req_https_scheme": 0, + "req_ipv4": 283, + "req_ipv6": 173, + "req_method_GET": 159, + "req_method_HEAD": 143, + "req_method_POST": 154, + "req_proc_time_avg": 0, + "req_proc_time_count": 0, + "req_proc_time_hist_bucket_1": 0, + "req_proc_time_hist_bucket_10": 0, + "req_proc_time_hist_bucket_11": 0, + "req_proc_time_hist_bucket_2": 0, + "req_proc_time_hist_bucket_3": 0, + "req_proc_time_hist_bucket_4": 0, + "req_proc_time_hist_bucket_5": 0, + "req_proc_time_hist_bucket_6": 0, + "req_proc_time_hist_bucket_7": 0, + "req_proc_time_hist_bucket_8": 0, + "req_proc_time_hist_bucket_9": 0, + "req_proc_time_hist_count": 0, + "req_proc_time_hist_sum": 0, + "req_proc_time_max": 0, + "req_proc_time_min": 0, + "req_proc_time_sum": 0, + "req_type_bad": 54, + "req_type_error": 0, + "req_type_redirect": 122, + "req_type_success": 280, + "req_unmatched": 44, + "req_version_1.1": 155, + "req_version_2": 147, + "req_version_2.0": 154, + "requests": 500, + "resp_1xx": 130, + "resp_2xx": 100, + "resp_3xx": 122, + "resp_4xx": 104, + "resp_5xx": 0, + "resp_code_100": 80, + "resp_code_101": 50, + "resp_code_200": 43, + "resp_code_201": 57, + "resp_code_300": 70, + "resp_code_301": 52, + "resp_code_400": 54, + "resp_code_401": 50, + "uniq_ipv4": 3, + "uniq_ipv6": 2, + "upstream_resp_time_avg": 0, + "upstream_resp_time_count": 0, + "upstream_resp_time_hist_bucket_1": 0, + "upstream_resp_time_hist_bucket_10": 0, + "upstream_resp_time_hist_bucket_11": 0, + "upstream_resp_time_hist_bucket_2": 0, + "upstream_resp_time_hist_bucket_3": 0, + "upstream_resp_time_hist_bucket_4": 0, + "upstream_resp_time_hist_bucket_5": 0, + "upstream_resp_time_hist_bucket_6": 0, + "upstream_resp_time_hist_bucket_7": 0, + "upstream_resp_time_hist_bucket_8": 0, + "upstream_resp_time_hist_bucket_9": 0, + "upstream_resp_time_hist_count": 0, + "upstream_resp_time_hist_sum": 0, + "upstream_resp_time_max": 0, + "upstream_resp_time_min": 0, + "upstream_resp_time_sum": 0, + } + + mx := weblog.Collect() + assert.Equal(t, expected, mx) + testCharts(t, weblog, mx) +} + +func TestWebLog_Collect_CustomLogs(t *testing.T) { + weblog := prepareWebLogCollectCustom(t) + + expected := map[string]int64{ + "bytes_received": 0, + "bytes_sent": 0, + "custom_field_drink_beer": 52, + "custom_field_drink_wine": 40, + "custom_field_side_dark": 46, + "custom_field_side_light": 46, + "req_http_scheme": 0, + "req_https_scheme": 0, + "req_ipv4": 0, + "req_ipv6": 0, + "req_proc_time_avg": 0, + "req_proc_time_count": 0, + "req_proc_time_hist_bucket_1": 0, + "req_proc_time_hist_bucket_10": 0, + "req_proc_time_hist_bucket_11": 0, + "req_proc_time_hist_bucket_2": 0, + "req_proc_time_hist_bucket_3": 0, + "req_proc_time_hist_bucket_4": 0, + "req_proc_time_hist_bucket_5": 0, + "req_proc_time_hist_bucket_6": 0, + "req_proc_time_hist_bucket_7": 0, + "req_proc_time_hist_bucket_8": 0, + "req_proc_time_hist_bucket_9": 0, + "req_proc_time_hist_count": 0, + "req_proc_time_hist_sum": 0, + "req_proc_time_max": 0, + "req_proc_time_min": 0, + "req_proc_time_sum": 0, + "req_type_bad": 0, + "req_type_error": 0, + "req_type_redirect": 0, + "req_type_success": 0, + "req_unmatched": 8, + "requests": 100, + "resp_1xx": 0, + "resp_2xx": 0, + "resp_3xx": 0, + "resp_4xx": 0, + "resp_5xx": 0, + "uniq_ipv4": 0, + "uniq_ipv6": 0, + "upstream_resp_time_avg": 0, + "upstream_resp_time_count": 0, + "upstream_resp_time_hist_bucket_1": 0, + "upstream_resp_time_hist_bucket_10": 0, + "upstream_resp_time_hist_bucket_11": 0, + "upstream_resp_time_hist_bucket_2": 0, + "upstream_resp_time_hist_bucket_3": 0, + "upstream_resp_time_hist_bucket_4": 0, + "upstream_resp_time_hist_bucket_5": 0, + "upstream_resp_time_hist_bucket_6": 0, + "upstream_resp_time_hist_bucket_7": 0, + "upstream_resp_time_hist_bucket_8": 0, + "upstream_resp_time_hist_bucket_9": 0, + "upstream_resp_time_hist_count": 0, + "upstream_resp_time_hist_sum": 0, + "upstream_resp_time_max": 0, + "upstream_resp_time_min": 0, + "upstream_resp_time_sum": 0, + } + + mx := weblog.Collect() + assert.Equal(t, expected, mx) + testCharts(t, weblog, mx) +} + +func TestWebLog_Collect_CustomTimeFieldsLogs(t *testing.T) { + weblog := prepareWebLogCollectCustomTimeFields(t) + + expected := map[string]int64{ + "bytes_received": 0, + "bytes_sent": 0, + "custom_time_field_time1_time_avg": 224, + "custom_time_field_time1_time_count": 72, + "custom_time_field_time1_time_hist_bucket_1": 72, + "custom_time_field_time1_time_hist_bucket_10": 72, + "custom_time_field_time1_time_hist_bucket_11": 72, + "custom_time_field_time1_time_hist_bucket_2": 72, + "custom_time_field_time1_time_hist_bucket_3": 72, + "custom_time_field_time1_time_hist_bucket_4": 72, + "custom_time_field_time1_time_hist_bucket_5": 72, + "custom_time_field_time1_time_hist_bucket_6": 72, + "custom_time_field_time1_time_hist_bucket_7": 72, + "custom_time_field_time1_time_hist_bucket_8": 72, + "custom_time_field_time1_time_hist_bucket_9": 72, + "custom_time_field_time1_time_hist_count": 72, + "custom_time_field_time1_time_hist_sum": 16152, + "custom_time_field_time1_time_max": 431, + "custom_time_field_time1_time_min": 121, + "custom_time_field_time1_time_sum": 16152, + "custom_time_field_time2_time_avg": 255, + "custom_time_field_time2_time_count": 72, + "custom_time_field_time2_time_hist_bucket_1": 72, + "custom_time_field_time2_time_hist_bucket_10": 72, + "custom_time_field_time2_time_hist_bucket_11": 72, + "custom_time_field_time2_time_hist_bucket_2": 72, + "custom_time_field_time2_time_hist_bucket_3": 72, + "custom_time_field_time2_time_hist_bucket_4": 72, + "custom_time_field_time2_time_hist_bucket_5": 72, + "custom_time_field_time2_time_hist_bucket_6": 72, + "custom_time_field_time2_time_hist_bucket_7": 72, + "custom_time_field_time2_time_hist_bucket_8": 72, + "custom_time_field_time2_time_hist_bucket_9": 72, + "custom_time_field_time2_time_hist_count": 72, + "custom_time_field_time2_time_hist_sum": 18360, + "custom_time_field_time2_time_max": 321, + "custom_time_field_time2_time_min": 123, + "custom_time_field_time2_time_sum": 18360, + "req_http_scheme": 0, + "req_https_scheme": 0, + "req_ipv4": 0, + "req_ipv6": 0, + "req_proc_time_avg": 0, + "req_proc_time_count": 0, + "req_proc_time_hist_bucket_1": 0, + "req_proc_time_hist_bucket_10": 0, + "req_proc_time_hist_bucket_11": 0, + "req_proc_time_hist_bucket_2": 0, + "req_proc_time_hist_bucket_3": 0, + "req_proc_time_hist_bucket_4": 0, + "req_proc_time_hist_bucket_5": 0, + "req_proc_time_hist_bucket_6": 0, + "req_proc_time_hist_bucket_7": 0, + "req_proc_time_hist_bucket_8": 0, + "req_proc_time_hist_bucket_9": 0, + "req_proc_time_hist_count": 0, + "req_proc_time_hist_sum": 0, + "req_proc_time_max": 0, + "req_proc_time_min": 0, + "req_proc_time_sum": 0, + "req_type_bad": 0, + "req_type_error": 0, + "req_type_redirect": 0, + "req_type_success": 0, + "req_unmatched": 0, + "requests": 72, + "resp_1xx": 0, + "resp_2xx": 0, + "resp_3xx": 0, + "resp_4xx": 0, + "resp_5xx": 0, + "uniq_ipv4": 0, + "uniq_ipv6": 0, + "upstream_resp_time_avg": 0, + "upstream_resp_time_count": 0, + "upstream_resp_time_hist_bucket_1": 0, + "upstream_resp_time_hist_bucket_10": 0, + "upstream_resp_time_hist_bucket_11": 0, + "upstream_resp_time_hist_bucket_2": 0, + "upstream_resp_time_hist_bucket_3": 0, + "upstream_resp_time_hist_bucket_4": 0, + "upstream_resp_time_hist_bucket_5": 0, + "upstream_resp_time_hist_bucket_6": 0, + "upstream_resp_time_hist_bucket_7": 0, + "upstream_resp_time_hist_bucket_8": 0, + "upstream_resp_time_hist_bucket_9": 0, + "upstream_resp_time_hist_count": 0, + "upstream_resp_time_hist_sum": 0, + "upstream_resp_time_max": 0, + "upstream_resp_time_min": 0, + "upstream_resp_time_sum": 0, + } + + mx := weblog.Collect() + assert.Equal(t, expected, mx) + testCharts(t, weblog, mx) +} + +func TestWebLog_Collect_CustomNumericFieldsLogs(t *testing.T) { + weblog := prepareWebLogCollectCustomNumericFields(t) + + expected := map[string]int64{ + "bytes_received": 0, + "bytes_sent": 0, + "custom_numeric_field_numeric1_summary_avg": 224, + "custom_numeric_field_numeric1_summary_count": 72, + "custom_numeric_field_numeric1_summary_max": 431, + "custom_numeric_field_numeric1_summary_min": 121, + "custom_numeric_field_numeric1_summary_sum": 16152, + "custom_numeric_field_numeric2_summary_avg": 255, + "custom_numeric_field_numeric2_summary_count": 72, + "custom_numeric_field_numeric2_summary_max": 321, + "custom_numeric_field_numeric2_summary_min": 123, + "custom_numeric_field_numeric2_summary_sum": 18360, + "req_http_scheme": 0, + "req_https_scheme": 0, + "req_ipv4": 0, + "req_ipv6": 0, + "req_proc_time_avg": 0, + "req_proc_time_count": 0, + "req_proc_time_hist_bucket_1": 0, + "req_proc_time_hist_bucket_10": 0, + "req_proc_time_hist_bucket_11": 0, + "req_proc_time_hist_bucket_2": 0, + "req_proc_time_hist_bucket_3": 0, + "req_proc_time_hist_bucket_4": 0, + "req_proc_time_hist_bucket_5": 0, + "req_proc_time_hist_bucket_6": 0, + "req_proc_time_hist_bucket_7": 0, + "req_proc_time_hist_bucket_8": 0, + "req_proc_time_hist_bucket_9": 0, + "req_proc_time_hist_count": 0, + "req_proc_time_hist_sum": 0, + "req_proc_time_max": 0, + "req_proc_time_min": 0, + "req_proc_time_sum": 0, + "req_type_bad": 0, + "req_type_error": 0, + "req_type_redirect": 0, + "req_type_success": 0, + "req_unmatched": 0, + "requests": 72, + "resp_1xx": 0, + "resp_2xx": 0, + "resp_3xx": 0, + "resp_4xx": 0, + "resp_5xx": 0, + "uniq_ipv4": 0, + "uniq_ipv6": 0, + "upstream_resp_time_avg": 0, + "upstream_resp_time_count": 0, + "upstream_resp_time_hist_bucket_1": 0, + "upstream_resp_time_hist_bucket_10": 0, + "upstream_resp_time_hist_bucket_11": 0, + "upstream_resp_time_hist_bucket_2": 0, + "upstream_resp_time_hist_bucket_3": 0, + "upstream_resp_time_hist_bucket_4": 0, + "upstream_resp_time_hist_bucket_5": 0, + "upstream_resp_time_hist_bucket_6": 0, + "upstream_resp_time_hist_bucket_7": 0, + "upstream_resp_time_hist_bucket_8": 0, + "upstream_resp_time_hist_bucket_9": 0, + "upstream_resp_time_hist_count": 0, + "upstream_resp_time_hist_sum": 0, + "upstream_resp_time_max": 0, + "upstream_resp_time_min": 0, + "upstream_resp_time_sum": 0, + } + + mx := weblog.Collect() + + assert.Equal(t, expected, mx) + testCharts(t, weblog, mx) +} + +func TestWebLog_IISLogs(t *testing.T) { + weblog := prepareWebLogCollectIISFields(t) + + expected := map[string]int64{ + "bytes_received": 0, + "bytes_sent": 0, + "req_http_scheme": 0, + "req_https_scheme": 0, + "req_ipv4": 38, + "req_ipv6": 114, + "req_method_GET": 152, + "req_port_80": 152, + "req_proc_time_avg": 5, + "req_proc_time_count": 152, + "req_proc_time_hist_bucket_1": 133, + "req_proc_time_hist_bucket_10": 145, + "req_proc_time_hist_bucket_11": 146, + "req_proc_time_hist_bucket_2": 133, + "req_proc_time_hist_bucket_3": 133, + "req_proc_time_hist_bucket_4": 133, + "req_proc_time_hist_bucket_5": 133, + "req_proc_time_hist_bucket_6": 133, + "req_proc_time_hist_bucket_7": 133, + "req_proc_time_hist_bucket_8": 138, + "req_proc_time_hist_bucket_9": 143, + "req_proc_time_hist_count": 152, + "req_proc_time_hist_sum": 799, + "req_proc_time_max": 256, + "req_proc_time_min": 0, + "req_proc_time_sum": 799, + "req_type_bad": 42, + "req_type_error": 0, + "req_type_redirect": 0, + "req_type_success": 110, + "req_unmatched": 16, + "req_vhost_127.0.0.1": 38, + "req_vhost_::1": 114, + "requests": 168, + "resp_1xx": 0, + "resp_2xx": 99, + "resp_3xx": 11, + "resp_4xx": 42, + "resp_5xx": 0, + "resp_code_200": 99, + "resp_code_304": 11, + "resp_code_404": 42, + "uniq_ipv4": 1, + "uniq_ipv6": 1, + "upstream_resp_time_avg": 0, + "upstream_resp_time_count": 0, + "upstream_resp_time_hist_bucket_1": 0, + "upstream_resp_time_hist_bucket_10": 0, + "upstream_resp_time_hist_bucket_11": 0, + "upstream_resp_time_hist_bucket_2": 0, + "upstream_resp_time_hist_bucket_3": 0, + "upstream_resp_time_hist_bucket_4": 0, + "upstream_resp_time_hist_bucket_5": 0, + "upstream_resp_time_hist_bucket_6": 0, + "upstream_resp_time_hist_bucket_7": 0, + "upstream_resp_time_hist_bucket_8": 0, + "upstream_resp_time_hist_bucket_9": 0, + "upstream_resp_time_hist_count": 0, + "upstream_resp_time_hist_sum": 0, + "upstream_resp_time_max": 0, + "upstream_resp_time_min": 0, + "upstream_resp_time_sum": 0, + } + + mx := weblog.Collect() + assert.Equal(t, expected, mx) +} + +func testCharts(t *testing.T, w *WebLog, mx map[string]int64) { + testVhostChart(t, w) + testPortChart(t, w) + testSchemeChart(t, w) + testClientCharts(t, w) + testHTTPMethodChart(t, w) + testURLPatternChart(t, w) + testHTTPVersionChart(t, w) + testRespCodeCharts(t, w) + testBandwidthChart(t, w) + testReqProcTimeCharts(t, w) + testUpsRespTimeCharts(t, w) + testSSLProtoChart(t, w) + testSSLCipherSuiteChart(t, w) + testURLPatternStatsCharts(t, w) + testCustomFieldCharts(t, w) + testCustomTimeFieldCharts(t, w) + testCustomNumericFieldCharts(t, w) + + testChartsDimIDs(t, w, mx) +} + +func testChartsDimIDs(t *testing.T, w *WebLog, mx map[string]int64) { + for _, chart := range *w.Charts() { + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + } +} + +func testVhostChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqVhost) == 0 { + assert.Falsef(t, w.Charts().Has(reqByVhost.ID), "chart '%s' is created", reqByVhost.ID) + return + } + + chart := w.Charts().Get(reqByVhost.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqByVhost.ID) + if chart == nil { + return + } + for v := range w.mx.ReqVhost { + id := "req_vhost_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' vhost, expected '%s'", chart.ID, v, id) + } +} + +func testPortChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqPort) == 0 { + assert.Falsef(t, w.Charts().Has(reqByPort.ID), "chart '%s' is created", reqByPort.ID) + return + } + + chart := w.Charts().Get(reqByPort.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqByPort.ID) + if chart == nil { + return + } + for v := range w.mx.ReqPort { + id := "req_port_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' port, expected '%s'", chart.ID, v, id) + } +} + +func testSchemeChart(t *testing.T, w *WebLog) { + if w.mx.ReqHTTPScheme.Value() == 0 && w.mx.ReqHTTPSScheme.Value() == 0 { + assert.Falsef(t, w.Charts().Has(reqByScheme.ID), "chart '%s' is created", reqByScheme.ID) + } else { + assert.Truef(t, w.Charts().Has(reqByScheme.ID), "chart '%s' is not created", reqByScheme.ID) + } +} + +func testClientCharts(t *testing.T, w *WebLog) { + if w.mx.ReqIPv4.Value() == 0 && w.mx.ReqIPv6.Value() == 0 { + assert.Falsef(t, w.Charts().Has(reqByIPProto.ID), "chart '%s' is created", reqByIPProto.ID) + } else { + assert.Truef(t, w.Charts().Has(reqByIPProto.ID), "chart '%s' is not created", reqByIPProto.ID) + } + + if w.mx.UniqueIPv4.Value() == 0 && w.mx.UniqueIPv6.Value() == 0 { + assert.Falsef(t, w.Charts().Has(uniqIPsCurPoll.ID), "chart '%s' is created", uniqIPsCurPoll.ID) + } else { + assert.Truef(t, w.Charts().Has(uniqIPsCurPoll.ID), "chart '%s' is not created", uniqIPsCurPoll.ID) + } +} + +func testHTTPMethodChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqMethod) == 0 { + assert.Falsef(t, w.Charts().Has(reqByMethod.ID), "chart '%s' is created", reqByMethod.ID) + return + } + + chart := w.Charts().Get(reqByMethod.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqByMethod.ID) + if chart == nil { + return + } + for v := range w.mx.ReqMethod { + id := "req_method_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' method, expected '%s'", chart.ID, v, id) + } +} + +func testURLPatternChart(t *testing.T, w *WebLog) { + if isEmptyCounterVec(w.mx.ReqURLPattern) { + assert.Falsef(t, w.Charts().Has(reqByURLPattern.ID), "chart '%s' is created", reqByURLPattern.ID) + return + } + + chart := w.Charts().Get(reqByURLPattern.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqByURLPattern.ID) + if chart == nil { + return + } + for v := range w.mx.ReqURLPattern { + id := "req_url_ptn_" + v + assert.True(t, chart.HasDim(id), "chart '%s' has no dim for '%s' pattern, expected '%s'", chart.ID, v, id) + } +} + +func testHTTPVersionChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqVersion) == 0 { + assert.Falsef(t, w.Charts().Has(reqByVersion.ID), "chart '%s' is created", reqByVersion.ID) + return + } + + chart := w.Charts().Get(reqByVersion.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqByVersion.ID) + if chart == nil { + return + } + for v := range w.mx.ReqVersion { + id := "req_version_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' version, expected '%s'", chart.ID, v, id) + } +} + +func testRespCodeCharts(t *testing.T, w *WebLog) { + if isEmptyCounterVec(w.mx.RespCode) { + for _, id := range []string{ + respCodes.ID, + respCodes1xx.ID, + respCodes2xx.ID, + respCodes3xx.ID, + respCodes4xx.ID, + respCodes5xx.ID, + } { + assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id) + } + return + } + + if !w.GroupRespCodes { + chart := w.Charts().Get(respCodes.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", respCodes.ID) + if chart == nil { + return + } + for v := range w.mx.RespCode { + id := "resp_code_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chart.ID, v, id) + } + return + } + + findCodes := func(class string) (codes []string) { + for v := range w.mx.RespCode { + if v[:1] == class { + codes = append(codes, v) + } + } + return codes + } + + var n int + ids := []string{ + respCodes1xx.ID, + respCodes2xx.ID, + respCodes3xx.ID, + respCodes4xx.ID, + respCodes5xx.ID, + } + for i, chartID := range ids { + class := strconv.Itoa(i + 1) + codes := findCodes(class) + n += len(codes) + chart := w.Charts().Get(chartID) + assert.NotNilf(t, chart, "chart '%s' is not created", chartID) + if chart == nil { + return + } + for _, v := range codes { + id := "resp_code_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chartID, v, id) + } + } + assert.Equal(t, len(w.mx.RespCode), n) +} + +func testBandwidthChart(t *testing.T, w *WebLog) { + if w.mx.BytesSent.Value() == 0 && w.mx.BytesReceived.Value() == 0 { + assert.Falsef(t, w.Charts().Has(bandwidth.ID), "chart '%s' is created", bandwidth.ID) + } else { + assert.Truef(t, w.Charts().Has(bandwidth.ID), "chart '%s' is not created", bandwidth.ID) + } +} + +func testReqProcTimeCharts(t *testing.T, w *WebLog) { + if isEmptySummary(w.mx.ReqProcTime) { + assert.Falsef(t, w.Charts().Has(reqProcTime.ID), "chart '%s' is created", reqProcTime.ID) + } else { + assert.Truef(t, w.Charts().Has(reqProcTime.ID), "chart '%s' is not created", reqProcTime.ID) + } + + if isEmptyHistogram(w.mx.ReqProcTimeHist) { + assert.Falsef(t, w.Charts().Has(reqProcTimeHist.ID), "chart '%s' is created", reqProcTimeHist.ID) + } else { + assert.Truef(t, w.Charts().Has(reqProcTimeHist.ID), "chart '%s' is not created", reqProcTimeHist.ID) + } +} + +func testUpsRespTimeCharts(t *testing.T, w *WebLog) { + if isEmptySummary(w.mx.UpsRespTime) { + assert.Falsef(t, w.Charts().Has(upsRespTime.ID), "chart '%s' is created", upsRespTime.ID) + } else { + assert.Truef(t, w.Charts().Has(upsRespTime.ID), "chart '%s' is not created", upsRespTime.ID) + } + + if isEmptyHistogram(w.mx.UpsRespTimeHist) { + assert.Falsef(t, w.Charts().Has(upsRespTimeHist.ID), "chart '%s' is created", upsRespTimeHist.ID) + } else { + assert.Truef(t, w.Charts().Has(upsRespTimeHist.ID), "chart '%s' is not created", upsRespTimeHist.ID) + } +} + +func testSSLProtoChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqSSLProto) == 0 { + assert.Falsef(t, w.Charts().Has(reqBySSLProto.ID), "chart '%s' is created", reqBySSLProto.ID) + return + } + + chart := w.Charts().Get(reqBySSLProto.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqBySSLProto.ID) + if chart == nil { + return + } + for v := range w.mx.ReqSSLProto { + id := "req_ssl_proto_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' ssl proto, expected '%s'", chart.ID, v, id) + } +} + +func testSSLCipherSuiteChart(t *testing.T, w *WebLog) { + if len(w.mx.ReqSSLCipherSuite) == 0 { + assert.Falsef(t, w.Charts().Has(reqBySSLCipherSuite.ID), "chart '%s' is created", reqBySSLCipherSuite.ID) + return + } + + chart := w.Charts().Get(reqBySSLCipherSuite.ID) + assert.NotNilf(t, chart, "chart '%s' is not created", reqBySSLCipherSuite.ID) + if chart == nil { + return + } + for v := range w.mx.ReqSSLCipherSuite { + id := "req_ssl_cipher_suite_" + v + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' ssl cipher suite, expected '%s'", chart.ID, v, id) + } +} + +func testURLPatternStatsCharts(t *testing.T, w *WebLog) { + for _, p := range w.URLPatterns { + chartID := fmt.Sprintf(urlPatternRespCodes.ID, p.Name) + + if isEmptyCounterVec(w.mx.RespCode) { + assert.Falsef(t, w.Charts().Has(chartID), "chart '%s' is created", chartID) + continue + } + + chart := w.Charts().Get(chartID) + assert.NotNilf(t, chart, "chart '%s' is not created", chartID) + if chart == nil { + continue + } + + stats, ok := w.mx.URLPatternStats[p.Name] + assert.Truef(t, ok, "url pattern '%s' has no metric in w.mx.URLPatternStats", p.Name) + if !ok { + continue + } + for v := range stats.RespCode { + id := fmt.Sprintf("url_ptn_%s_resp_code_%s", p.Name, v) + assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chartID, v, id) + } + } + + for _, p := range w.URLPatterns { + id := fmt.Sprintf(urlPatternReqMethods.ID, p.Name) + if isEmptyCounterVec(w.mx.ReqMethod) { + assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id) + continue + } + + chart := w.Charts().Get(id) + assert.NotNilf(t, chart, "chart '%s' is not created", id) + if chart == nil { + continue + } + + stats, ok := w.mx.URLPatternStats[p.Name] + assert.Truef(t, ok, "url pattern '%s' has no metric in w.mx.URLPatternStats", p.Name) + if !ok { + continue + } + for v := range stats.ReqMethod { + dimID := fmt.Sprintf("url_ptn_%s_req_method_%s", p.Name, v) + assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' method, expected '%s'", id, v, dimID) + } + } + + for _, p := range w.URLPatterns { + id := fmt.Sprintf(urlPatternBandwidth.ID, p.Name) + if w.mx.BytesSent.Value() == 0 && w.mx.BytesReceived.Value() == 0 { + assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id) + } else { + assert.Truef(t, w.Charts().Has(id), "chart '%s' is not created", id) + } + } + + for _, p := range w.URLPatterns { + id := fmt.Sprintf(urlPatternReqProcTime.ID, p.Name) + if isEmptySummary(w.mx.ReqProcTime) { + assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id) + } else { + assert.Truef(t, w.Charts().Has(id), "chart '%s' is not created", id) + } + } +} + +func testCustomFieldCharts(t *testing.T, w *WebLog) { + for _, cf := range w.CustomFields { + id := fmt.Sprintf(reqByCustomFieldPattern.ID, cf.Name) + chart := w.Charts().Get(id) + assert.NotNilf(t, chart, "chart '%s' is not created", id) + if chart == nil { + continue + } + + for _, p := range cf.Patterns { + id := fmt.Sprintf("custom_field_%s_%s", cf.Name, p.Name) + assert.True(t, chart.HasDim(id), "chart '%s' has no dim for '%s' pattern, expected '%s'", chart.ID, p, id) + } + } +} + +func testCustomTimeFieldCharts(t *testing.T, w *WebLog) { + for _, cf := range w.CustomTimeFields { + id := fmt.Sprintf(reqByCustomTimeField.ID, cf.Name) + chart := w.Charts().Get(id) + assert.NotNilf(t, chart, "chart '%s' is not created", id) + if chart == nil { + continue + } + dimMinID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name) + assert.True(t, chart.HasDim(dimMinID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMinID) + + dimMaxID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name) + assert.True(t, chart.HasDim(dimMaxID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMaxID) + + dimAveID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name) + assert.True(t, chart.HasDim(dimAveID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimAveID) + } +} + +func testCustomNumericFieldCharts(t *testing.T, w *WebLog) { + for _, cf := range w.CustomNumericFields { + id := fmt.Sprintf(customNumericFieldSummaryChartTmpl.ID, cf.Name) + chart := w.Charts().Get(id) + assert.NotNilf(t, chart, "chart '%s' is not created", id) + if chart == nil { + continue + } + dimMinID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name) + assert.True(t, chart.HasDim(dimMinID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMinID) + + dimMaxID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name) + assert.True(t, chart.HasDim(dimMaxID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMaxID) + + dimAveID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name) + assert.True(t, chart.HasDim(dimAveID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimAveID) + } +} + +var ( + emptySummary = newWebLogSummary() + emptyHistogram = metrics.NewHistogram(metrics.DefBuckets) +) + +func isEmptySummary(s metrics.Summary) bool { return reflect.DeepEqual(s, emptySummary) } +func isEmptyHistogram(h metrics.Histogram) bool { return reflect.DeepEqual(h, emptyHistogram) } + +func isEmptyCounterVec(cv metrics.CounterVec) bool { + for _, c := range cv { + if c.Value() > 0 { + return false + } + } + return true +} + +func prepareWebLogCollectFull(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "$host:$server_port", + "$remote_addr", + "-", + "-", + "$time_local", + `"$request"`, + "$status", + "$body_bytes_sent", + "$request_length", + "$request_time", + "$upstream_response_time", + "$scheme", + "$ssl_protocol", + "$ssl_cipher", + "$side", + "$drink", + "$random_time_field", + }, " ") + + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + Path: "testdata/full.log", + ExcludePath: "", + URLPatterns: []userPattern{ + {Name: "com", Match: "~ com$"}, + {Name: "org", Match: "~ org$"}, + {Name: "net", Match: "~ net$"}, + {Name: "not_match", Match: "* !*"}, + }, + CustomFields: []customField{ + { + Name: "side", + Patterns: []userPattern{ + {Name: "dark", Match: "= dark"}, + {Name: "light", Match: "= light"}, + }, + }, + { + Name: "drink", + Patterns: []userPattern{ + {Name: "beer", Match: "= beer"}, + {Name: "wine", Match: "= wine"}, + }, + }, + }, + CustomTimeFields: []customTimeField{ + { + Name: "random_time_field", + Histogram: metrics.DefBuckets, + }, + }, + Histogram: metrics.DefBuckets, + GroupRespCodes: true, + } + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testFullLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +func prepareWebLogCollectCommon(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "$remote_addr", + "-", + "-", + "$time_local", + `"$request"`, + "$status", + "$body_bytes_sent", + }, " ") + + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + Path: "testdata/common.log", + ExcludePath: "", + URLPatterns: nil, + CustomFields: nil, + Histogram: nil, + GroupRespCodes: false, + } + + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCommonLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +func prepareWebLogCollectCustom(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "$side", + "$drink", + }, " ") + + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: 2, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + CustomFields: []customField{ + { + Name: "side", + Patterns: []userPattern{ + {Name: "dark", Match: "= dark"}, + {Name: "light", Match: "= light"}, + }, + }, + { + Name: "drink", + Patterns: []userPattern{ + {Name: "beer", Match: "= beer"}, + {Name: "wine", Match: "= wine"}, + }, + }, + }, + Path: "testdata/custom.log", + ExcludePath: "", + URLPatterns: nil, + Histogram: nil, + GroupRespCodes: false, + } + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "$time1", + "$time2", + }, " ") + + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: 2, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + CustomTimeFields: []customTimeField{ + { + Name: "time1", + Histogram: metrics.DefBuckets, + }, + { + Name: "time2", + Histogram: metrics.DefBuckets, + }, + }, + Path: "testdata/custom_time_fields.log", + ExcludePath: "", + URLPatterns: nil, + Histogram: nil, + GroupRespCodes: false, + } + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "$numeric1", + "$numeric2", + }, " ") + + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: 2, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + CustomNumericFields: []customNumericField{ + { + Name: "numeric1", + Units: "bytes", + }, + { + Name: "numeric2", + Units: "requests", + }, + }, + Path: "testdata/custom_time_fields.log", + ExcludePath: "", + URLPatterns: nil, + Histogram: nil, + GroupRespCodes: false, + } + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +func prepareWebLogCollectIISFields(t *testing.T) *WebLog { + t.Helper() + format := strings.Join([]string{ + "-", // date + "-", // time + "$host", // s-ip + "$request_method", // cs-method + "$request_uri", // cs-uri-stem + "-", // cs-uri-query + "$server_port", // s-port + "-", // cs-username + "$remote_addr", // c-ip + "-", // cs(User-Agent) + "-", // cs(Referer) + "$status", // sc-status + "-", // sc-substatus + "-", // sc-win32-status + "$request_time", // time-taken + }, " ") + cfg := Config{ + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + // Users can define number of fields + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: false, + Format: format, + CheckField: checkCSVFormatField, + }, + }, + Path: "testdata/u_ex221107.log", + ExcludePath: "", + URLPatterns: nil, + Histogram: nil, + GroupRespCodes: false, + } + + weblog := New() + weblog.Config = cfg + require.True(t, weblog.Init()) + require.True(t, weblog.Check()) + defer weblog.Cleanup() + + p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testIISLog)) + require.NoError(t, err) + weblog.parser = p + return weblog +} + +// generateLogs is used to populate 'testdata/full.log' +//func generateLogs(w io.Writer, num int) error { +// var ( +// vhost = []string{"localhost", "test.example.com", "test.example.org", "198.51.100.1", "2001:db8:1ce::1"} +// scheme = []string{"http", "https"} +// client = []string{"localhost", "203.0.113.1", "203.0.113.2", "2001:db8:2ce:1", "2001:db8:2ce:2"} +// method = []string{"GET", "HEAD", "POST"} +// url = []string{"example.other", "example.com", "example.org", "example.net"} +// version = []string{"1.1", "2", "2.0"} +// status = []int{100, 101, 200, 201, 300, 301, 400, 401} // no 5xx on purpose +// sslProto = []string{"TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3", "SSLv2", "SSLv3"} +// sslCipher = []string{"ECDHE-RSA-AES256-SHA", "DHE-RSA-AES256-SHA", "AES256-SHA", "PSK-RC4-SHA"} +// +// customField1 = []string{"dark", "light"} +// customField2 = []string{"beer", "wine"} +// ) +// +// var line string +// for i := 0; i < num; i++ { +// unmatched := randInt(1, 100) > 90 +// if unmatched { +// line = "Unmatched! The rat the cat the dog chased killed ate the malt!\n" +// } else { +// // test.example.com:80 203.0.113.1 - - "GET / HTTP/1.1" 200 1674 2674 3674 4674 http TLSv1 AES256-SHA dark beer +// line = fmt.Sprintf( +// "%s:%d %s - - [22/Mar/2009:09:30:31 +0100] \"%s /%s HTTP/%s\" %d %d %d %d %d %s %s %s %s %s\n", +// randFromString(vhost), +// randInt(80, 85), +// randFromString(client), +// randFromString(method), +// randFromString(url), +// randFromString(version), +// randFromInt(status), +// randInt(1000, 5000), +// randInt(1000, 5000), +// randInt(1, 500), +// randInt(1, 500), +// randFromString(scheme), +// randFromString(sslProto), +// randFromString(sslCipher), +// randFromString(customField1), +// randFromString(customField2), +// ) +// } +// _, err := fmt.Fprint(w, line) +// if err != nil { +// return err +// } +// } +// return nil +//} +// +//var r = rand.New(rand.NewSource(time.Now().UnixNano())) +// +//func randFromString(s []string) string { return s[r.Intn(len(s))] } +//func randFromInt(s []int) int { return s[r.Intn(len(s))] } +//func randInt(min, max int) int { return r.Intn(max-min) + min } diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/README.md b/src/go/collectors/go.d.plugin/modules/whoisquery/README.md new file mode 120000 index 00000000000000..8661481d1f8bb9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/README.md @@ -0,0 +1 @@ +integrations/domain_expiration_date.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/charts.go b/src/go/collectors/go.d.plugin/modules/whoisquery/charts.go new file mode 100644 index 00000000000000..c8e6fe363a8e74 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/charts.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import "github.com/netdata/go.d.plugin/agent/module" + +var baseCharts = module.Charts{ + { + ID: "time_until_expiration", + Title: "Time Until Domain Expiration", + Units: "seconds", + Fam: "expiration time", + Ctx: "whoisquery.time_until_expiration", + Opts: module.Opts{StoreFirst: true}, + Dims: module.Dims{ + {ID: "expiry"}, + }, + Vars: module.Vars{ + {ID: "days_until_expiration_warning"}, + {ID: "days_until_expiration_critical"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/collect.go b/src/go/collectors/go.d.plugin/modules/whoisquery/collect.go new file mode 100644 index 00000000000000..7bd8ed70f6ed09 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/collect.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import "fmt" + +func (w *WhoisQuery) collect() (map[string]int64, error) { + remainingTime, err := w.prov.remainingTime() + if err != nil { + return nil, fmt.Errorf("%v (source: %s)", err, w.Source) + } + + mx := make(map[string]int64) + w.collectExpiration(mx, remainingTime) + + return mx, nil +} + +func (w *WhoisQuery) collectExpiration(mx map[string]int64, remainingTime float64) { + mx["expiry"] = int64(remainingTime) + mx["days_until_expiration_warning"] = w.DaysUntilWarn + mx["days_until_expiration_critical"] = w.DaysUntilCrit +} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/config_schema.json b/src/go/collectors/go.d.plugin/modules/whoisquery/config_schema.json new file mode 100644 index 00000000000000..9f5131789cbe1c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/config_schema.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/whoisquery job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + } + }, + "required": [ + "name", + "source" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/init.go b/src/go/collectors/go.d.plugin/modules/whoisquery/init.go new file mode 100644 index 00000000000000..7db18a52ebbf2f --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/init.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (w *WhoisQuery) validateConfig() error { + if w.Source == "" { + return errors.New("source is not set") + } + return nil +} + +func (w *WhoisQuery) initProvider() (provider, error) { + return newProvider(w.Config) +} + +func (w *WhoisQuery) initCharts() *module.Charts { + charts := baseCharts.Copy() + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "domain", Value: w.Source}, + } + } + + return charts +} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/integrations/domain_expiration_date.md b/src/go/collectors/go.d.plugin/modules/whoisquery/integrations/domain_expiration_date.md new file mode 100644 index 00000000000000..3d2816d06ef959 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/integrations/domain_expiration_date.md @@ -0,0 +1,187 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/whoisquery/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/whoisquery/metadata.yaml" +sidebar_label: "Domain expiration date" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Domain expiration date + + +<img src="https://netdata.cloud/img/globe.svg" width="150"/> + + +Plugin: go.d.plugin +Module: whoisquery + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the remaining time before the domain expires. + + + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per domain + +These metrics refer to the configured source. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| domain | Configured source | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| whoisquery.time_until_expiration | expiry | seconds | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/whoisquery.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/whoisquery.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| source | Domain address. | | yes | +| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no | +| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no | +| timeout | The query timeout in seconds. | 5 | no | + +</details> + +#### Examples + +##### Basic + +Basic configuration example + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_site + source: my_site.com + +``` +</details> + +##### Multi-instance + +> **Note**: When you define more than one job, their names must be unique. + +Check the expiration status of the multiple domains. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_site1 + source: my_site1.com + + - name: my_site2 + source: my_site2.com + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m whoisquery + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/metadata.yaml b/src/go/collectors/go.d.plugin/modules/whoisquery/metadata.yaml new file mode 100644 index 00000000000000..c98c86a76f7fbb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/metadata.yaml @@ -0,0 +1,125 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-whoisquery + plugin_name: go.d.plugin + module_name: whoisquery + monitored_instance: + name: Domain expiration date + link: "" + icon_filename: globe.svg + categories: + - data-collection.synthetic-checks + keywords: + - whois + related_resources: + integrations: + list: [] + info_provided_to_referring_integrations: + description: "" + most_popular: false + overview: + data_collection: + metrics_description: | + This collector monitors the remaining time before the domain expires. + method_description: "" + supported_platforms: + include: [] + exclude: [] + multi_instance: true + additional_permissions: + description: "" + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/whoisquery.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: source + description: Domain address. + default_value: "" + required: true + - name: days_until_expiration_warning + description: Number of days before the alarm status is warning. + default_value: 30 + required: false + - name: days_until_expiration_critical + description: Number of days before the alarm status is critical. + default_value: 15 + required: false + - name: timeout + description: The query timeout in seconds. + default_value: 5 + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: Basic configuration example + config: | + jobs: + - name: my_site + source: my_site.com + - name: Multi-instance + description: | + > **Note**: When you define more than one job, their names must be unique. + + Check the expiration status of the multiple domains. + config: | + jobs: + - name: my_site1 + source: my_site1.com + + - name: my_site2 + source: my_site2.com + troubleshooting: + problems: + list: [] + alerts: + - name: whoisquery_days_until_expiration + metric: whoisquery.time_until_expiration + info: time until the domain name registration expires + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: domain + description: These metrics refer to the configured source. + labels: + - name: domain + description: Configured source + metrics: + - name: whoisquery.time_until_expiration + description: Time Until Domain Expiration + unit: seconds + chart_type: line + dimensions: + - name: expiry diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/provider.go b/src/go/collectors/go.d.plugin/modules/whoisquery/provider.go new file mode 100644 index 00000000000000..71318dd81b4c06 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/provider.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import ( + "strings" + "time" + + "github.com/araddon/dateparse" + "github.com/likexian/whois" + whoisparser "github.com/likexian/whois-parser" +) + +type provider interface { + remainingTime() (float64, error) +} + +type fromNet struct { + domainAddress string + client *whois.Client +} + +func newProvider(config Config) (provider, error) { + domain := config.Source + client := whois.NewClient() + client.SetTimeout(config.Timeout.Duration) + + return &fromNet{ + domainAddress: domain, + client: client, + }, nil +} + +func (f *fromNet) remainingTime() (float64, error) { + raw, err := f.client.Whois(f.domainAddress) + if err != nil { + return 0, err + } + + result, err := whoisparser.Parse(raw) + if err != nil { + return 0, err + } + + // https://community.netdata.cloud/t/whois-query-monitor-cannot-parse-expiration-time/3485 + if strings.Contains(result.Domain.ExpirationDate, " ") { + if v, err := time.Parse("2006.01.02 15:04:05", result.Domain.ExpirationDate); err == nil { + return time.Until(v).Seconds(), nil + } + } + + expire, err := dateparse.ParseAny(result.Domain.ExpirationDate) + if err != nil { + return 0, err + } + + return time.Until(expire).Seconds(), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery.go b/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery.go new file mode 100644 index 00000000000000..6265b4fb600b51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("whoisquery", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 60, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *WhoisQuery { + return &WhoisQuery{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 5}, + DaysUntilWarn: 90, + DaysUntilCrit: 30, + }, + } +} + +type Config struct { + Source string + Timeout web.Duration `yaml:"timeout"` + DaysUntilWarn int64 `yaml:"days_until_expiration_warning"` + DaysUntilCrit int64 `yaml:"days_until_expiration_critical"` +} + +type WhoisQuery struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + prov provider +} + +func (w *WhoisQuery) Init() bool { + if err := w.validateConfig(); err != nil { + w.Errorf("config validation: %v", err) + return false + } + + prov, err := w.initProvider() + if err != nil { + w.Errorf("init whois provider: %v", err) + return false + } + w.prov = prov + + w.charts = w.initCharts() + + return true +} + +func (w *WhoisQuery) Check() bool { + return len(w.Collect()) > 0 +} + +func (w *WhoisQuery) Charts() *module.Charts { + return w.charts +} + +func (w *WhoisQuery) Collect() map[string]int64 { + mx, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (w *WhoisQuery) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery_test.go b/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery_test.go new file mode 100644 index 00000000000000..1f3c827bd3b085 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/whoisquery/whoisquery_test.go @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package whoisquery + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWhoisQuery_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestWhoisQuery_Charts(t *testing.T) { + whoisquery := New() + whoisquery.Source = "example.com" + require.True(t, whoisquery.Init()) + + assert.NotNil(t, whoisquery.Charts()) +} + +func TestWhoisQuery_Init(t *testing.T) { + const net = iota + tests := map[string]struct { + config Config + providerType int + err bool + }{ + "ok from net": { + config: Config{Source: "example.org"}, + providerType: net, + }, + "empty source": { + config: Config{Source: ""}, + err: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + whoisquery := New() + whoisquery.Config = test.config + + if test.err { + assert.False(t, whoisquery.Init()) + } else { + require.True(t, whoisquery.Init()) + + var typeOK bool + if test.providerType == net { + _, typeOK = whoisquery.prov.(*fromNet) + } + + assert.True(t, typeOK) + } + }) + } +} + +func TestWhoisQuery_Check(t *testing.T) { + whoisquery := New() + whoisquery.prov = &mockProvider{remTime: 12345.678} + + assert.True(t, whoisquery.Check()) +} + +func TestWhoisQuery_Check_ReturnsFalseOnProviderError(t *testing.T) { + whoisquery := New() + whoisquery.prov = &mockProvider{err: true} + + assert.False(t, whoisquery.Check()) +} + +func TestWhoisQuery_Collect(t *testing.T) { + whoisquery := New() + whoisquery.Source = "example.com" + require.True(t, whoisquery.Init()) + whoisquery.prov = &mockProvider{remTime: 12345} + + collected := whoisquery.Collect() + + expected := map[string]int64{ + "expiry": 12345, + "days_until_expiration_warning": 90, + "days_until_expiration_critical": 30, + } + + assert.NotZero(t, collected) + assert.Equal(t, expected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, whoisquery, collected) +} + +func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) { + whoisquery := New() + whoisquery.Source = "example.com" + require.True(t, whoisquery.Init()) + whoisquery.prov = &mockProvider{err: true} + + assert.Nil(t, whoisquery.Collect()) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, whoisquery *WhoisQuery, collected map[string]int64) { + for _, chart := range *whoisquery.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +type mockProvider struct { + remTime float64 + err bool +} + +func (m mockProvider) remainingTime() (float64, error) { + if m.err { + return 0, errors.New("mock remaining time error") + } + return m.remTime, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/README.md b/src/go/collectors/go.d.plugin/modules/windows/README.md new file mode 120000 index 00000000000000..802d61bd11d11a --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/README.md @@ -0,0 +1 @@ +integrations/windows.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/windows/charts.go b/src/go/collectors/go.d.plugin/modules/windows/charts.go new file mode 100644 index 00000000000000..14db70395a7fd4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/charts.go @@ -0,0 +1,4936 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioCPUUtil = module.Priority + iota + prioCPUCoreUtil + prioCPUInterrupts + prioCPUDPCs + prioCPUCoreCState + + prioMemUtil + prioMemPageFaults + prioMemSwapUtil + prioMemSwapOperations + prioMemSwapPages + prioMemCache + prioMemCacheFaults + prioMemSystemPool + + prioDiskSpaceUsage + prioDiskBandwidth + prioDiskOperations + prioDiskAvgLatency + + prioNICBandwidth + prioNICPackets + prioNICErrors + prioNICDiscards + + prioTCPConnsEstablished + prioTCPConnsActive + prioTCPConnsPassive + prioTCPConnsFailure + prioTCPConnsReset + prioTCPSegmentsReceived + prioTCPSegmentsSent + prioTCPSegmentsRetransmitted + + prioOSProcesses + prioOSUsers + prioOSVisibleMemoryUsage + prioOSPagingUsage + + prioSystemThreads + prioSystemUptime + + prioLogonSessions + + prioThermalzoneTemperature + + prioProcessesCPUUtilization + prioProcessesMemoryUsage + prioProcessesIOBytes + prioProcessesIOOperations + prioProcessesPageFaults + prioProcessesPageFileBytes + prioProcessesThreads + prioProcessesHandles + + prioIISWebsiteTraffic + prioIISWebsiteFTPFileTransferRate + prioIISWebsiteActiveConnectionsCount + prioIISWebsiteRequestsRate + prioIISWebsiteConnectionAttemptsRate + prioIISWebsiteUsersCount + prioIISWebsiteISAPIExtRequestsCount + prioIISWebsiteISAPIExtRequestsRate + prioIISWebsiteErrorsRate + prioIISWebsiteLogonAttemptsRate + prioIISWebsiteUptime + + // Connections + prioMSSQLUserConnections + + // Transactions + prioMSSQLDatabaseTransactions + prioMSSQLDatabaseActiveTransactions + prioMSSQLDatabaseWriteTransactions + prioMSSQLDatabaseBackupRestoreOperations + prioMSSQLDatabaseLogFlushes + prioMSSQLDatabaseLogFlushed + + // Size + prioMSSQLDatabaseDataFileSize + + // SQL activity + prioMSSQLStatsBatchRequests + prioMSSQLStatsCompilations + prioMSSQLStatsRecompilations + prioMSSQLStatsAutoParameterization + prioMSSQLStatsSafeAutoParameterization + + // Processes + prioMSSQLBlockedProcess + + // Buffer Cache + prioMSSQLCacheHitRatio + prioMSSQLBufManIOPS + prioMSSQLBufferCheckpointPages + prioMSSQLAccessMethodPageSplits + prioMSSQLBufferPageLifeExpectancy + + // Memory + prioMSSQLMemmgrConnectionMemoryBytes + prioMSSQLMemTotalServer + prioMSSQLMemmgrExternalBenefitOfMemory + prioMSSQLMemmgrPendingMemoryGrants + + // Locks + prioMSSQLLocksLockWait + prioMSSQLLocksDeadLocks + + // Error + prioMSSQLSqlErrorsTotal + + // NET Framework + // Exceptions + prioNETFrameworkCLRExceptionsThrown + prioNETFrameworkCLRExceptionsFilters + prioNETFrameworkCLRExceptionsFinallys + prioNETFrameworkCLRExceptionsThrowToCatchDepth + + // InterOP + prioNETFrameworkCLRInteropCOMCallableWrappers + prioNETFrameworkCLRInteropMarshalling + prioNETFrameworkCLRInteropStubsCreated + prioNETFrameworkCLRJITMethods + + // JIT + prioNETFrameworkCLRJITTime + prioNETFrameworkCLRJITStandardFailures + prioNETFrameworkCLRJITILBytes + + // Loading + prioNETFrameworkCLRLoadingLoaderHeapSize + prioNETFrameworkCLRLoadingAppDomainsLoaded + prioNETFrameworkCLRLoadingAppDomainsUnloaded + prioNETFrameworkCLRLoadingAssembliesLoaded + prioNETFrameworkCLRLoadingClassesLoaded + prioNETFrameworkCLRLoadingClassLoadFailure + + // Locks and threads + prioNETFrameworkCLRLocksAndThreadsQueueLength + prioNETFrameworkCLRLocksAndThreadsCurrentLogicalThreads + prioNETFrameworkCLRLocksAndThreadsCurrentPhysicalThreads + prioNETFrameworkCLRLocksAndThreadsRecognizedThreads + prioNETFrameworkCLRLocksAndThreadsContentions + + // Memory + prioNETFrameworkCLRMemoryAllocatedBytes + prioNETFrameworkCLRMemoryFinalizationSurvivors + prioNETFrameworkCLRMemoryHeapSize + prioNETFrameworkCLRMemoryPromoted + prioNETFrameworkCLRMemoryNumberGCHandles + prioNETFrameworkCLRMemoryCollections + prioNETFrameworkCLRMemoryInducedGC + prioNETFrameworkCLRMemoryNumberPinnedObjects + prioNETFrameworkCLRMemoryNumberSinkBlocksInUse + prioNETFrameworkCLRMemoryCommitted + prioNETFrameworkCLRMemoryReserved + prioNETFrameworkCLRMemoryGCTime + + // Remoting + prioNETFrameworkCLRRemotingChannels + prioNETFrameworkCLRRemotingContextBoundClassesLoaded + prioNETFrameworkCLRRemotingContextBoundObjects + prioNETFrameworkCLRRemotingContextProxies + prioNETFrameworkCLRRemotingContexts + prioNETFrameworkCLRRemotingRemoteCalls + + // Security + prioNETFrameworkCLRSecurityLinkTimeChecks + prioNETFrameworkCLRSecurityRTChecksTime + prioNETFrameworkCLRSecurityStackWalkDepth + prioNETFrameworkCLRSecurityRuntimeChecks + + prioServiceState + prioServiceStatus + + // Database + prioADDatabaseOperations + prioADDirectoryOperations + prioADNameCacheLookups + prioADCacheHits + + // Replication + prioADDRAReplicationIntersiteCompressedTraffic + prioADDRAReplicationIntrasiteCompressedTraffic + prioADDRAReplicationSyncObjectsRemaining + prioADDRAReplicationPropertiesUpdated + prioADDRAReplicationPropertiesFiltered + prioADDRAReplicationObjectsFiltered + prioADReplicationPendingSyncs + prioADDRASyncRequests + prioADDirectoryServiceThreadsInUse + + // Bind + prioADLDAPBindTime + prioADBindsTotal + + // LDAP + prioADLDAPSearchesTotal + + // Thread Queue + prioADATQAverageRequestLatency + prioADATQOutstandingRequests + + // Requests + prioADCSCertTemplateRequests + prioADCSCertTemplateRequestProcessingTime + prioADCSCertTemplateRetrievals + prioADCSCertTemplateFailedRequests + prioADCSCertTemplateIssuesRequests + prioADCSCertTemplatePendingRequests + + // Response + prioADCSCertTemplateChallengeResponses + + // Retrieval + prioADCSCertTemplateRetrievalProcessingTime + + // Timing + prioADCSCertTemplateRequestCryptoSigningTime + prioADCSCertTemplateRequestPolicyModuleProcessingTime + prioADCSCertTemplateChallengeResponseProcessingTime + prioADCSCertTemplateSignedCertificateTimestampLists + prioADCSCertTemplateSignedCertificateTimestampListProcessingTime + + // ADFS + // AD + prioADFSADLoginConnectionFailures + + // DB Artifacts + prioADFSDBArtifactFailures + prioADFSDBArtifactQueryTimeSeconds + + // DB Config + prioADFSDBConfigFailures + prioADFSDBConfigQueryTimeSeconds + + // Auth + prioADFSDeviceAuthentications + prioADFSExternalAuthentications + prioADFSOauthAuthorizationRequests + prioADFSCertificateAuthentications + prioADFSOauthClientAuthentications + prioADFSPassportAuthentications + prioADFSSSOAuthentications + prioADFSUserPasswordAuthentications + prioADFSWindowsIntegratedAuthentications + + // OAuth + prioADFSOauthClientCredentials + prioADFSOauthClientPrivkeyJwtAuthentication + prioADFSOauthClientSecretBasicAuthentications + prioADFSOauthClientSecretPostAuthentications + prioADFSOauthClientWindowsAuthentications + prioADFSOauthLogonCertificateRequests + prioADFSOauthPasswordGrantRequests + prioADFSOauthTokenRequestsSuccess + prioADFSFederatedAuthentications + + // Requests + prioADFSFederationMetadataRequests + prioADFSPassiveRequests + prioADFSPasswordChangeRequests + prioADFSSAMLPTokenRequests + prioADFSWSTrustTokenRequestsSuccess + prioADFSTokenRequests + prioADFSWSFedTokenRequestsSuccess + + // Exchange + // Transport Queue + prioExchangeTransportQueuesActiveMailboxDelivery + prioExchangeTransportQueuesExternalActiveRemoteDelivery + prioExchangeTransportQueuesExternalLargestDelivery + prioExchangeTransportQueuesInternalActiveRemoteDeliery + prioExchangeTransportQueuesInternalLargestDelivery + prioExchangeTransportQueuesRetryMailboxDelivery + prioExchangeTransportQueuesUnreachable + prioExchangeTransportQueuesPoison + + // LDAP + prioExchangeLDAPLongRunningOPS + prioExchangeLDAPReadTime + prioExchangeLDAPSearchTime + prioExchangeLDAPWriteTime + prioExchangeLDAPTimeoutErrors + + // OWA + prioExchangeOWACurrentUniqueUsers + prioExchangeOWARequestsTotal + + // Sync + prioExchangeActiveSyncPingCMDsPending + prioExchangeActiveSyncRequests + prioExchangeActiveSyncSyncCMDs + + // RPC + prioExchangeRPCActiveUserCount + prioExchangeRPCAvgLatency + prioExchangeRPCConnectionCount + prioExchangeRPCOperationsTotal + prioExchangeRPCRequests + prioExchangeRpcUserCount + + // Workload + prioExchangeWorkloadActiveTasks + prioExchangeWorkloadCompleteTasks + prioExchangeWorkloadQueueTasks + prioExchangeWorkloadYieldedTasks + prioExchangeWorkloadActivityStatus + + // HTTP Proxy + prioExchangeHTTPProxyAVGAuthLatency + prioExchangeHTTPProxyAVGCASProcessingLatency + prioExchangeHTTPProxyMailboxProxyFailureRate + prioExchangeHTTPProxyServerLocatorAvgLatency + prioExchangeHTTPProxyOutstandingProxyRequests + prioExchangeHTTPProxyRequestsTotal + + // Request + prioExchangeAutoDiscoverRequests + prioExchangeAvailServiceRequests + + // Hyperv Health + prioHypervVMHealth + + // Hyperv Partition + prioHypervRootPartitionDeviceSpacePages + prioHypervRootPartitionGPASpacePages + prioHypervRootPartitionGPASpaceModifications + prioHypervRootPartitionAttachedDevices + prioHypervRootPartitionDepositedPages + prioHypervRootPartitionSkippedInterrupts + prioHypervRootPartitionDeviceDMAErrors + prioHypervRootPartitionDeviceInterruptErrors + prioHypervRootPartitionDeviceInterruptThrottleEvents + prioHypervRootPartitionIOTlbFlush + prioHypervRootPartitionAddressSpace + prioHypervRootPartitionVirtualTlbFlushEntires + prioHypervRootPartitionVirtualTlbPages + + // Hyperv VM (Memory) + prioHypervVMCPUUsage + prioHypervVMMemoryPhysical + prioHypervVMMemoryPhysicalGuestVisible + prioHypervVMMemoryPressureCurrent + prioHypervVIDPhysicalPagesAllocated + prioHypervVIDRemotePhysicalPages + + // Hyperv Device + prioHypervVMDeviceBytes + prioHypervVMDeviceOperations + prioHypervVMDeviceErrors + + // Hyperv Interface + prioHypervVMInterfaceBytes + prioHypervVMInterfacePacketsDropped + prioHypervVMInterfacePackets + + // Hyperv Vswitch + prioHypervVswitchTrafficTotal + prioHypervVswitchPackets + prioHypervVswitchDirectedPackets + prioHypervVswitchBroadcastPackets + prioHypervVswitchMulticastPackets + prioHypervVswitchDroppedPackets + prioHypervVswitchExtensionsDroppedPackets + prioHypervVswitchPacketsFlooded + prioHypervVswitchLearnedMACAddresses + prioHypervVswitchPurgeMACAddress + + prioCollectorDuration + prioCollectorStatus +) + +// CPU +var ( + cpuCharts = module.Charts{ + cpuUtilChart.Copy(), + } + cpuUtilChart = module.Chart{ + ID: "cpu_utilization_total", + Title: "Total CPU Utilization (all cores)", + Units: "percentage", + Fam: "cpu", + Ctx: "windows.cpu_utilization_total", + Type: module.Stacked, + Priority: prioCPUUtil, + Dims: module.Dims{ + {ID: "cpu_idle_time", Name: "idle", Algo: module.PercentOfIncremental, Div: 1000, DimOpts: module.DimOpts{Hidden: true}}, + {ID: "cpu_dpc_time", Name: "dpc", Algo: module.PercentOfIncremental, Div: 1000}, + {ID: "cpu_user_time", Name: "user", Algo: module.PercentOfIncremental, Div: 1000}, + {ID: "cpu_privileged_time", Name: "privileged", Algo: module.PercentOfIncremental, Div: 1000}, + {ID: "cpu_interrupt_time", Name: "interrupt", Algo: module.PercentOfIncremental, Div: 1000}, + }, + } +) + +// CPU core +var ( + cpuCoreChartsTmpl = module.Charts{ + cpuCoreUtilChartTmpl.Copy(), + cpuCoreInterruptsChartTmpl.Copy(), + cpuDPCsChartTmpl.Copy(), + cpuCoreCStateChartTmpl.Copy(), + } + cpuCoreUtilChartTmpl = module.Chart{ + ID: "core_%s_cpu_utilization", + Title: "Core CPU Utilization", + Units: "percentage", + Fam: "cpu", + Ctx: "windows.cpu_core_utilization", + Type: module.Stacked, + Priority: prioCPUCoreUtil, + Dims: module.Dims{ + {ID: "cpu_core_%s_idle_time", Name: "idle", Algo: module.PercentOfIncremental, Div: precision, DimOpts: module.DimOpts{Hidden: true}}, + {ID: "cpu_core_%s_dpc_time", Name: "dpc", Algo: module.PercentOfIncremental, Div: precision}, + {ID: "cpu_core_%s_user_time", Name: "user", Algo: module.PercentOfIncremental, Div: precision}, + {ID: "cpu_core_%s_privileged_time", Name: "privileged", Algo: module.PercentOfIncremental, Div: precision}, + {ID: "cpu_core_%s_interrupt_time", Name: "interrupt", Algo: module.PercentOfIncremental, Div: precision}, + }, + } + cpuCoreInterruptsChartTmpl = module.Chart{ + ID: "cpu_core_%s_interrupts", + Title: "Received and Serviced Hardware Interrupts", + Units: "interrupts/s", + Fam: "cpu", + Ctx: "windows.cpu_core_interrupts", + Priority: prioCPUInterrupts, + Dims: module.Dims{ + {ID: "cpu_core_%s_interrupts", Name: "interrupts", Algo: module.Incremental}, + }, + } + cpuDPCsChartTmpl = module.Chart{ + ID: "cpu_core_%s_dpcs", + Title: "Received and Serviced Deferred Procedure Calls (DPC)", + Units: "dpc/s", + Fam: "cpu", + Ctx: "windows.cpu_core_dpcs", + Priority: prioCPUDPCs, + Dims: module.Dims{ + {ID: "cpu_core_%s_dpcs", Name: "dpcs", Algo: module.Incremental}, + }, + } + cpuCoreCStateChartTmpl = module.Chart{ + ID: "cpu_core_%s_cpu_cstate", + Title: "Core Time Spent in Low-Power Idle State", + Units: "percentage", + Fam: "cpu", + Ctx: "windows.cpu_core_cstate", + Type: module.Stacked, + Priority: prioCPUCoreCState, + Dims: module.Dims{ + {ID: "cpu_core_%s_cstate_c1", Name: "c1", Algo: module.PercentOfIncremental, Div: precision}, + {ID: "cpu_core_%s_cstate_c2", Name: "c2", Algo: module.PercentOfIncremental, Div: precision}, + {ID: "cpu_core_%s_cstate_c3", Name: "c3", Algo: module.PercentOfIncremental, Div: precision}, + }, + } +) + +// Memory +var ( + memCharts = module.Charts{ + memUtilChart.Copy(), + memPageFaultsChart.Copy(), + memSwapUtilChart.Copy(), + memSwapOperationsChart.Copy(), + memSwapPagesChart.Copy(), + memCacheChart.Copy(), + memCacheFaultsChart.Copy(), + memSystemPoolChart.Copy(), + } + memUtilChart = module.Chart{ + ID: "memory_utilization", + Title: "Memory Utilization", + Units: "bytes", + Fam: "mem", + Ctx: "windows.memory_utilization", + Type: module.Stacked, + Priority: prioMemUtil, + Dims: module.Dims{ + {ID: "memory_available_bytes", Name: "available"}, + {ID: "memory_used_bytes", Name: "used"}, + }, + } + memPageFaultsChart = module.Chart{ + ID: "memory_page_faults", + Title: "Memory Page Faults", + Units: "pgfaults/s", + Fam: "mem", + Ctx: "windows.memory_page_faults", + Priority: prioMemPageFaults, + Dims: module.Dims{ + {ID: "memory_page_faults_total", Name: "page_faults", Algo: module.Incremental}, + }, + } + memSwapUtilChart = module.Chart{ + ID: "memory_swap_utilization", + Title: "Swap Utilization", + Units: "bytes", + Fam: "mem", + Ctx: "windows.memory_swap_utilization", + Type: module.Stacked, + Priority: prioMemSwapUtil, + Dims: module.Dims{ + {ID: "memory_not_committed_bytes", Name: "available"}, + {ID: "memory_committed_bytes", Name: "used"}, + }, + Vars: module.Vars{ + {ID: "memory_commit_limit"}, + }, + } + memSwapOperationsChart = module.Chart{ + ID: "memory_swap_operations", + Title: "Swap Operations", + Units: "operations/s", + Fam: "mem", + Ctx: "windows.memory_swap_operations", + Type: module.Area, + Priority: prioMemSwapOperations, + Dims: module.Dims{ + {ID: "memory_swap_page_reads_total", Name: "read", Algo: module.Incremental}, + {ID: "memory_swap_page_writes_total", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } + memSwapPagesChart = module.Chart{ + ID: "memory_swap_pages", + Title: "Swap Pages", + Units: "pages/s", + Fam: "mem", + Ctx: "windows.memory_swap_pages", + Priority: prioMemSwapPages, + Dims: module.Dims{ + {ID: "memory_swap_pages_read_total", Name: "read", Algo: module.Incremental}, + {ID: "memory_swap_pages_written_total", Name: "written", Algo: module.Incremental, Mul: -1}, + }, + } + memCacheChart = module.Chart{ + ID: "memory_cached", + Title: "Cached", + Units: "bytes", + Fam: "mem", + Ctx: "windows.memory_cached", + Type: module.Area, + Priority: prioMemCache, + Dims: module.Dims{ + {ID: "memory_cache_total", Name: "cached"}, + }, + } + memCacheFaultsChart = module.Chart{ + ID: "memory_cache_faults", + Title: "Cache Faults", + Units: "faults/s", + Fam: "mem", + Ctx: "windows.memory_cache_faults", + Priority: prioMemCacheFaults, + Dims: module.Dims{ + {ID: "memory_cache_faults_total", Name: "cache_faults", Algo: module.Incremental}, + }, + } + memSystemPoolChart = module.Chart{ + ID: "memory_system_pool", + Title: "System Memory Pool", + Units: "bytes", + Fam: "mem", + Ctx: "windows.memory_system_pool", + Type: module.Stacked, + Priority: prioMemSystemPool, + Dims: module.Dims{ + {ID: "memory_pool_paged_bytes", Name: "paged"}, + {ID: "memory_pool_nonpaged_bytes_total", Name: "non-paged"}, + }, + } +) + +// Logical Disks +var ( + diskChartsTmpl = module.Charts{ + diskSpaceUsageChartTmpl.Copy(), + diskBandwidthChartTmpl.Copy(), + diskOperationsChartTmpl.Copy(), + diskAvgLatencyChartTmpl.Copy(), + } + diskSpaceUsageChartTmpl = module.Chart{ + ID: "logical_disk_%s_space_usage", + Title: "Space usage", + Units: "bytes", + Fam: "disk", + Ctx: "windows.logical_disk_space_usage", + Type: module.Stacked, + Priority: prioDiskSpaceUsage, + Dims: module.Dims{ + {ID: "logical_disk_%s_free_space", Name: "free"}, + {ID: "logical_disk_%s_used_space", Name: "used"}, + }, + } + diskBandwidthChartTmpl = module.Chart{ + ID: "logical_disk_%s_bandwidth", + Title: "Bandwidth", + Units: "bytes/s", + Fam: "disk", + Ctx: "windows.logical_disk_bandwidth", + Type: module.Area, + Priority: prioDiskBandwidth, + Dims: module.Dims{ + {ID: "logical_disk_%s_read_bytes_total", Name: "read", Algo: module.Incremental}, + {ID: "logical_disk_%s_write_bytes_total", Name: "write", Algo: module.Incremental, Mul: -1}, + }, + } + diskOperationsChartTmpl = module.Chart{ + ID: "logical_disk_%s_operations", + Title: "Operations", + Units: "operations/s", + Fam: "disk", + Ctx: "windows.logical_disk_operations", + Priority: prioDiskOperations, + Dims: module.Dims{ + {ID: "logical_disk_%s_reads_total", Name: "reads", Algo: module.Incremental}, + {ID: "logical_disk_%s_writes_total", Name: "writes", Algo: module.Incremental, Mul: -1}, + }, + } + diskAvgLatencyChartTmpl = module.Chart{ + ID: "logical_disk_%s_latency", + Title: "Average Read/Write Latency", + Units: "seconds", + Fam: "disk", + Ctx: "windows.logical_disk_latency", + Priority: prioDiskAvgLatency, + Dims: module.Dims{ + {ID: "logical_disk_%s_read_latency", Name: "read", Algo: module.Incremental, Div: precision}, + {ID: "logical_disk_%s_write_latency", Name: "write", Algo: module.Incremental, Div: precision}, + }, + } +) + +// Network interfaces +var ( + nicChartsTmpl = module.Charts{ + nicBandwidthChartTmpl.Copy(), + nicPacketsChartTmpl.Copy(), + nicErrorsChartTmpl.Copy(), + nicDiscardsChartTmpl.Copy(), + } + nicBandwidthChartTmpl = module.Chart{ + ID: "nic_%s_bandwidth", + Title: "Bandwidth", + Units: "kilobits/s", + Fam: "net", + Ctx: "windows.net_nic_bandwidth", + Type: module.Area, + Priority: prioNICBandwidth, + Dims: module.Dims{ + {ID: "net_nic_%s_bytes_received", Name: "received", Algo: module.Incremental, Div: 1000}, + {ID: "net_nic_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1, Div: 1000}, + }, + } + nicPacketsChartTmpl = module.Chart{ + ID: "nic_%s_packets", + Title: "Packets", + Units: "packets/s", + Fam: "net", + Ctx: "windows.net_nic_packets", + Priority: prioNICPackets, + Dims: module.Dims{ + {ID: "net_nic_%s_packets_received_total", Name: "received", Algo: module.Incremental}, + {ID: "net_nic_%s_packets_sent_total", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + nicErrorsChartTmpl = module.Chart{ + ID: "nic_%s_errors", + Title: "Errors", + Units: "errors/s", + Fam: "net", + Ctx: "windows.net_nic_errors", + Priority: prioNICErrors, + Dims: module.Dims{ + {ID: "net_nic_%s_packets_received_errors", Name: "inbound", Algo: module.Incremental}, + {ID: "net_nic_%s_packets_outbound_errors", Name: "outbound", Algo: module.Incremental, Mul: -1}, + }, + } + nicDiscardsChartTmpl = module.Chart{ + ID: "nic_%s_discarded", + Title: "Discards", + Units: "discards/s", + Fam: "net", + Ctx: "windows.net_nic_discarded", + Priority: prioNICDiscards, + Dims: module.Dims{ + {ID: "net_nic_%s_packets_received_discarded", Name: "inbound", Algo: module.Incremental}, + {ID: "net_nic_%s_packets_outbound_discarded", Name: "outbound", Algo: module.Incremental, Mul: -1}, + }, + } +) + +// TCP +var ( + tcpCharts = module.Charts{ + tcpConnsActiveChart.Copy(), + tcpConnsEstablishedChart.Copy(), + tcpConnsFailuresChart.Copy(), + tcpConnsPassiveChart.Copy(), + tcpConnsResetsChart.Copy(), + tcpSegmentsReceivedChart.Copy(), + tcpSegmentsRetransmittedChart.Copy(), + tcpSegmentsSentChart.Copy(), + } + tcpConnsEstablishedChart = module.Chart{ + ID: "tcp_conns_established", + Title: "TCP established connections", + Units: "connections", + Fam: "tcp", + Ctx: "windows.tcp_conns_established", + Priority: prioTCPConnsEstablished, + Dims: module.Dims{ + {ID: "tcp_ipv4_conns_established", Name: "ipv4"}, + {ID: "tcp_ipv6_conns_established", Name: "ipv6"}, + }, + } + tcpConnsActiveChart = module.Chart{ + ID: "tcp_conns_active", + Title: "TCP active connections", + Units: "connections/s", + Fam: "tcp", + Ctx: "windows.tcp_conns_active", + Priority: prioTCPConnsActive, + Dims: module.Dims{ + {ID: "tcp_ipv4_conns_active", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_conns_active", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpConnsPassiveChart = module.Chart{ + ID: "tcp_conns_passive", + Title: "TCP passive connections", + Units: "connections/s", + Fam: "tcp", + Ctx: "windows.tcp_conns_passive", + Priority: prioTCPConnsPassive, + Dims: module.Dims{ + {ID: "tcp_ipv4_conns_passive", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_conns_passive", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpConnsFailuresChart = module.Chart{ + ID: "tcp_conns_failures", + Title: "TCP connection failures", + Units: "failures/s", + Fam: "tcp", + Ctx: "windows.tcp_conns_failures", + Priority: prioTCPConnsFailure, + Dims: module.Dims{ + {ID: "tcp_ipv4_conns_failures", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_conns_failures", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpConnsResetsChart = module.Chart{ + ID: "tcp_conns_resets", + Title: "TCP connections resets", + Units: "resets/s", + Fam: "tcp", + Ctx: "windows.tcp_conns_resets", + Priority: prioTCPConnsReset, + Dims: module.Dims{ + {ID: "tcp_ipv4_conns_resets", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_conns_resets", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpSegmentsReceivedChart = module.Chart{ + ID: "tcp_segments_received", + Title: "Number of TCP segments received", + Units: "segments/s", + Fam: "tcp", + Ctx: "windows.tcp_segments_received", + Priority: prioTCPSegmentsReceived, + Dims: module.Dims{ + {ID: "tcp_ipv4_segments_received", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_segments_received", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpSegmentsSentChart = module.Chart{ + ID: "tcp_segments_sent", + Title: "Number of TCP segments sent", + Units: "segments/s", + Fam: "tcp", + Ctx: "windows.tcp_segments_sent", + Priority: prioTCPSegmentsSent, + Dims: module.Dims{ + {ID: "tcp_ipv4_segments_sent", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_segments_sent", Name: "ipv6", Algo: module.Incremental}, + }, + } + tcpSegmentsRetransmittedChart = module.Chart{ + ID: "tcp_segments_retransmitted", + Title: "Number of TCP segments retransmitted", + Units: "segments/s", + Fam: "tcp", + Ctx: "windows.tcp_segments_retransmitted", + Priority: prioTCPSegmentsRetransmitted, + Dims: module.Dims{ + {ID: "tcp_ipv4_segments_retransmitted", Name: "ipv4", Algo: module.Incremental}, + {ID: "tcp_ipv6_segments_retransmitted", Name: "ipv6", Algo: module.Incremental}, + }, + } +) + +// OS +var ( + osCharts = module.Charts{ + osProcessesChart.Copy(), + osUsersChart.Copy(), + osMemoryUsage.Copy(), + osPagingFilesUsageChart.Copy(), + } + osProcessesChart = module.Chart{ + ID: "os_processes", + Title: "Processes", + Units: "number", + Fam: "os", + Ctx: "windows.os_processes", + Priority: prioOSProcesses, + Dims: module.Dims{ + {ID: "os_processes", Name: "processes"}, + }, + Vars: module.Vars{ + {ID: "os_processes_limit"}, + }, + } + osUsersChart = module.Chart{ + ID: "os_users", + Title: "Number of Users", + Units: "users", + Fam: "os", + Ctx: "windows.os_users", + Priority: prioOSUsers, + Dims: module.Dims{ + {ID: "os_users", Name: "users"}, + }, + } + osMemoryUsage = module.Chart{ + ID: "os_visible_memory_usage", + Title: "Visible Memory Usage", + Units: "bytes", + Fam: "os", + Ctx: "windows.os_visible_memory_usage", + Type: module.Stacked, + Priority: prioOSVisibleMemoryUsage, + Dims: module.Dims{ + {ID: "os_physical_memory_free_bytes", Name: "free"}, + {ID: "os_visible_memory_used_bytes", Name: "used"}, + }, + Vars: module.Vars{ + {ID: "os_visible_memory_bytes"}, + }, + } + osPagingFilesUsageChart = module.Chart{ + ID: "os_paging_files_usage", + Title: "Paging Files Usage", + Units: "bytes", + Fam: "os", + Ctx: "windows.os_paging_files_usage", + Type: module.Stacked, + Priority: prioOSPagingUsage, + Dims: module.Dims{ + {ID: "os_paging_free_bytes", Name: "free"}, + {ID: "os_paging_used_bytes", Name: "used"}, + }, + Vars: module.Vars{ + {ID: "os_paging_limit_bytes"}, + }, + } +) + +// System +var ( + systemCharts = module.Charts{ + systemThreadsChart.Copy(), + systemUptimeChart.Copy(), + } + systemThreadsChart = module.Chart{ + ID: "system_threads", + Title: "Threads", + Units: "number", + Fam: "system", + Ctx: "windows.system_threads", + Priority: prioSystemThreads, + Dims: module.Dims{ + {ID: "system_threads", Name: "threads"}, + }, + } + systemUptimeChart = module.Chart{ + ID: "system_uptime", + Title: "Uptime", + Units: "seconds", + Fam: "system", + Ctx: "windows.system_uptime", + Priority: prioSystemUptime, + Dims: module.Dims{ + {ID: "system_up_time", Name: "time"}, + }, + } +) + +// IIS +var ( + iisWebsiteChartsTmpl = module.Charts{ + iisWebsiteTrafficChartTempl.Copy(), + iisWebsiteRequestsRateChartTmpl.Copy(), + iisWebsiteActiveConnectionsCountChartTmpl.Copy(), + iisWebsiteUsersCountChartTmpl.Copy(), + iisWebsiteConnectionAttemptsRate.Copy(), + iisWebsiteISAPIExtRequestsCountChartTmpl.Copy(), + iisWebsiteISAPIExtRequestsRateChartTmpl.Copy(), + iisWebsiteFTPFileTransferRateChartTempl.Copy(), + iisWebsiteLogonAttemptsRateChartTmpl.Copy(), + iisWebsiteErrorsRateChart.Copy(), + iisWebsiteUptimeChartTmpl.Copy(), + } + iisWebsiteTrafficChartTempl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_traffic", + Title: "Website traffic", + Units: "bytes/s", + Fam: "traffic", + Ctx: "iis.website_traffic", + Type: module.Area, + Priority: prioIISWebsiteTraffic, + Dims: module.Dims{ + {ID: "iis_website_%s_received_bytes_total", Name: "received", Algo: module.Incremental}, + {ID: "iis_website_%s_sent_bytes_total", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + } + iisWebsiteFTPFileTransferRateChartTempl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_ftp_file_transfer_rate", + Title: "Website FTP file transfer rate", + Units: "files/s", + Fam: "traffic", + Ctx: "iis.website_ftp_file_transfer_rate", + Priority: prioIISWebsiteFTPFileTransferRate, + Dims: module.Dims{ + {ID: "iis_website_%s_files_received_total", Name: "received", Algo: module.Incremental}, + {ID: "iis_website_%s_files_sent_total", Name: "sent", Algo: module.Incremental}, + }, + } + iisWebsiteActiveConnectionsCountChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_active_connections_count", + Title: "Website active connections", + Units: "connections", + Fam: "connections", + Ctx: "iis.website_active_connections_count", + Priority: prioIISWebsiteActiveConnectionsCount, + Dims: module.Dims{ + {ID: "iis_website_%s_current_connections", Name: "active"}, + }, + } + iisWebsiteConnectionAttemptsRate = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_connection_attempts_rate", + Title: "Website connections attempts", + Units: "attempts/s", + Fam: "connections", + Ctx: "iis.website_connection_attempts_rate", + Priority: prioIISWebsiteConnectionAttemptsRate, + Dims: module.Dims{ + {ID: "iis_website_%s_connection_attempts_all_instances_total", Name: "connection", Algo: module.Incremental}, + }, + } + iisWebsiteRequestsRateChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_requests_rate", + Title: "Website requests rate", + Units: "requests/s", + Fam: "requests", + Ctx: "iis.website_requests_rate", + Priority: prioIISWebsiteRequestsRate, + Dims: module.Dims{ + {ID: "iis_website_%s_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + iisWebsiteUsersCountChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_users_count", + Title: "Website users with pending requests", + Units: "users", + Fam: "requests", + Ctx: "iis.website_users_count", + Type: module.Stacked, + Priority: prioIISWebsiteUsersCount, + Dims: module.Dims{ + {ID: "iis_website_%s_current_anonymous_users", Name: "anonymous"}, + {ID: "iis_website_%s_current_non_anonymous_users", Name: "non_anonymous"}, + }, + } + iisWebsiteISAPIExtRequestsCountChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_isapi_extension_requests_count", + Title: "ISAPI extension requests", + Units: "requests", + Fam: "requests", + Ctx: "iis.website_isapi_extension_requests_count", + Priority: prioIISWebsiteISAPIExtRequestsCount, + Dims: module.Dims{ + {ID: "iis_website_%s_current_isapi_extension_requests", Name: "isapi"}, + }, + } + iisWebsiteISAPIExtRequestsRateChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_isapi_extension_requests_rate", + Title: "Website extensions request", + Units: "requests/s", + Fam: "requests", + Ctx: "iis.website_isapi_extension_requests_rate", + Priority: prioIISWebsiteISAPIExtRequestsRate, + Dims: module.Dims{ + {ID: "iis_website_%s_isapi_extension_requests_total", Name: "isapi", Algo: module.Incremental}, + }, + } + iisWebsiteErrorsRateChart = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_errors_rate", + Title: "Website errors", + Units: "errors/s", + Fam: "requests", + Ctx: "iis.website_errors_rate", + Type: module.Stacked, + Priority: prioIISWebsiteErrorsRate, + Dims: module.Dims{ + {ID: "iis_website_%s_locked_errors_total", Name: "document_locked", Algo: module.Incremental}, + {ID: "iis_website_%s_not_found_errors_total", Name: "document_not_found", Algo: module.Incremental}, + }, + } + iisWebsiteLogonAttemptsRateChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_logon_attempts_rate", + Title: "Website logon attempts", + Units: "attempts/s", + Fam: "logon", + Ctx: "iis.website_logon_attempts_rate", + Priority: prioIISWebsiteLogonAttemptsRate, + Dims: module.Dims{ + {ID: "iis_website_%s_logon_attempts_total", Name: "logon", Algo: module.Incremental}, + }, + } + iisWebsiteUptimeChartTmpl = module.Chart{ + OverModule: "iis", + ID: "iis_website_%s_uptime", + Title: "Website uptime", + Units: "seconds", + Fam: "uptime", + Ctx: "iis.website_uptime", + Priority: prioIISWebsiteUptime, + Dims: module.Dims{ + {ID: "iis_website_%s_service_uptime", Name: "uptime"}, + }, + } +) + +// MS-SQL +var ( + mssqlInstanceChartsTmpl = module.Charts{ + mssqlAccessMethodPageSplitsChart.Copy(), + mssqlCacheHitRatioChart.Copy(), + mssqlBufferCheckpointPageChart.Copy(), + mssqlBufferPageLifeExpectancyChart.Copy(), + mssqlBufManIOPSChart.Copy(), + mssqlBlockedProcessChart.Copy(), + mssqlLocksWaitChart.Copy(), + mssqlDeadLocksChart.Copy(), + mssqlMemmgrConnectionMemoryBytesChart.Copy(), + mssqlMemmgrExternalBenefitOfMemoryChart.Copy(), + mssqlMemmgrPendingMemoryChart.Copy(), + mssqlMemmgrTotalServerChart.Copy(), + mssqlSQLErrorsTotalChart.Copy(), + mssqlStatsAutoParamChart.Copy(), + mssqlStatsBatchRequestsChart.Copy(), + mssqlStatsSafeAutoChart.Copy(), + mssqlStatsCompilationChart.Copy(), + mssqlStatsRecompilationChart.Copy(), + mssqlUserConnectionChart.Copy(), + } + mssqlDatabaseChartsTmpl = module.Charts{ + mssqlDatabaseActiveTransactionsChart.Copy(), + mssqlDatabaseBackupRestoreOperationsChart.Copy(), + mssqlDatabaseSizeChart.Copy(), + mssqlDatabaseLogFlushedChart.Copy(), + mssqlDatabaseLogFlushesChart.Copy(), + mssqlDatabaseTransactionsChart.Copy(), + mssqlDatabaseWriteTransactionsChart.Copy(), + } + // Access Method: + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object?view=sql-server-ver16 + mssqlAccessMethodPageSplitsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_accessmethods_page_splits", + Title: "Page splits", + Units: "splits/s", + Fam: "buffer cache", + Ctx: "mssql.instance_accessmethods_page_splits", + Priority: prioMSSQLAccessMethodPageSplits, + Dims: module.Dims{ + {ID: "mssql_instance_%s_accessmethods_page_splits", Name: "page", Algo: module.Incremental}, + }, + } + // Buffer Management + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object?view=sql-server-ver16 + mssqlCacheHitRatioChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_cache_hit_ratio", + Title: "Buffer Cache hit ratio", + Units: "percentage", + Fam: "buffer cache", + Ctx: "mssql.instance_cache_hit_ratio", + Priority: prioMSSQLCacheHitRatio, + Dims: module.Dims{ + {ID: "mssql_instance_%s_cache_hit_ratio", Name: "hit_ratio"}, + }, + } + mssqlBufferCheckpointPageChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_bufman_checkpoint_pages", + Title: "Flushed pages", + Units: "pages/s", + Fam: "buffer cache", + Ctx: "mssql.instance_bufman_checkpoint_pages", + Priority: prioMSSQLBufferCheckpointPages, + Dims: module.Dims{ + {ID: "mssql_instance_%s_bufman_checkpoint_pages", Name: "flushed", Algo: module.Incremental}, + }, + } + mssqlBufferPageLifeExpectancyChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_bufman_page_life_expectancy", + Title: "Page life expectancy", + Units: "seconds", + Fam: "buffer cache", + Ctx: "mssql.instance_bufman_page_life_expectancy", + Priority: prioMSSQLBufferPageLifeExpectancy, + Dims: module.Dims{ + {ID: "mssql_instance_%s_bufman_page_life_expectancy_seconds", Name: "life_expectancy"}, + }, + } + mssqlBufManIOPSChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_bufman_iops", + Title: "Number of pages input and output", + Units: "pages/s", + Fam: "buffer cache", + Ctx: "mssql.instance_bufman_iops", + Priority: prioMSSQLBufManIOPS, + Dims: module.Dims{ + {ID: "mssql_instance_%s_bufman_page_reads", Name: "read", Algo: module.Incremental}, + {ID: "mssql_instance_%s_bufman_page_writes", Name: "written", Mul: -1, Algo: module.Incremental}, + }, + } + // General Statistic + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object?view=sql-server-ver16 + mssqlBlockedProcessChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_blocked_process", + Title: "Blocked processes", + Units: "process", + Fam: "processes", + Ctx: "mssql.instance_blocked_processes", + Priority: prioMSSQLBlockedProcess, + Dims: module.Dims{ + {ID: "mssql_instance_%s_genstats_blocked_processes", Name: "blocked"}, + }, + } + mssqlUserConnectionChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_user_connection", + Title: "User connections", + Units: "connections", + Fam: "connections", + Ctx: "mssql.instance_user_connection", + Priority: prioMSSQLUserConnections, + Dims: module.Dims{ + {ID: "mssql_instance_%s_genstats_user_connections", Name: "user"}, + }, + } + // Lock Wait + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object?view=sql-server-ver16 + mssqlLocksWaitChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_locks_lock_wait", + Title: "Lock requests that required the caller to wait", + Units: "locks/s", + Fam: "locks", + Ctx: "mssql.instance_locks_lock_wait", + Priority: prioMSSQLLocksLockWait, + Dims: module.Dims{ + {ID: "mssql_instance_%s_resource_AllocUnit_locks_lock_wait_seconds", Name: "alloc_unit", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Application_locks_lock_wait_seconds", Name: "application", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Database_locks_lock_wait_seconds", Name: "database", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Extent_locks_lock_wait_seconds", Name: "extent", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_File_locks_lock_wait_seconds", Name: "file", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_HoBT_locks_lock_wait_seconds", Name: "hobt", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Key_locks_lock_wait_seconds", Name: "key", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Metadata_locks_lock_wait_seconds", Name: "metadata", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_OIB_locks_lock_wait_seconds", Name: "oib", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Object_locks_lock_wait_seconds", Name: "object", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Page_locks_lock_wait_seconds", Name: "page", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_RID_locks_lock_wait_seconds", Name: "rid", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_RowGroup_locks_lock_wait_seconds", Name: "row_group", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Xact_locks_lock_wait_seconds", Name: "xact", Algo: module.Incremental}, + }, + } + mssqlDeadLocksChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_locks_deadlocks", + Title: "Lock requests that resulted in deadlock", + Units: "locks/s", + Fam: "locks", + Ctx: "mssql.instance_locks_deadlocks", + Priority: prioMSSQLLocksDeadLocks, + Dims: module.Dims{ + {ID: "mssql_instance_%s_resource_AllocUnit_locks_deadlocks", Name: "alloc_unit", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Application_locks_deadlocks", Name: "application", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Database_locks_deadlocks", Name: "database", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Extent_locks_deadlocks", Name: "extent", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_File_locks_deadlocks", Name: "file", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_HoBT_locks_deadlocks", Name: "hobt", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Key_locks_deadlocks", Name: "key", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Metadata_locks_deadlocks", Name: "metadata", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_OIB_locks_deadlocks", Name: "oib", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Object_locks_deadlocks", Name: "object", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Page_locks_deadlocks", Name: "page", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_RID_locks_deadlocks", Name: "rid", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_RowGroup_locks_deadlocks", Name: "row_group", Algo: module.Incremental}, + {ID: "mssql_instance_%s_resource_Xact_locks_deadlocks", Name: "xact", Algo: module.Incremental}, + }, + } + + // Memory Manager + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object?view=sql-server-ver16 + mssqlMemmgrConnectionMemoryBytesChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_memmgr_connection_memory_bytes", + Title: "Amount of dynamic memory to maintain connections", + Units: "bytes", + Fam: "memory", + Ctx: "mssql.instance_memmgr_connection_memory_bytes", + Priority: prioMSSQLMemmgrConnectionMemoryBytes, + Dims: module.Dims{ + {ID: "mssql_instance_%s_memmgr_connection_memory_bytes", Name: "memory", Algo: module.Incremental}, + }, + } + mssqlMemmgrExternalBenefitOfMemoryChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_memmgr_external_benefit_of_memory", + Title: "Performance benefit from adding memory to a specific cache", + Units: "bytes", + Fam: "memory", + Ctx: "mssql.instance_memmgr_external_benefit_of_memory", + Priority: prioMSSQLMemmgrExternalBenefitOfMemory, + Dims: module.Dims{ + {ID: "mssql_instance_%s_memmgr_external_benefit_of_memory", Name: "benefit", Algo: module.Incremental}, + }, + } + mssqlMemmgrPendingMemoryChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_memmgr_pending_memory_grants", + Title: "Process waiting for memory grant", + Units: "process", + Fam: "memory", + Ctx: "mssql.instance_memmgr_pending_memory_grants", + Priority: prioMSSQLMemmgrPendingMemoryGrants, + Dims: module.Dims{ + {ID: "mssql_instance_%s_memmgr_pending_memory_grants", Name: "pending"}, + }, + } + mssqlMemmgrTotalServerChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_memmgr_server_memory", + Title: "Memory committed", + Units: "bytes", + Fam: "memory", + Ctx: "mssql.instance_memmgr_server_memory", + Priority: prioMSSQLMemTotalServer, + Dims: module.Dims{ + {ID: "mssql_instance_%s_memmgr_total_server_memory_bytes", Name: "memory"}, + }, + } + + // SQL errors + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object?view=sql-server-ver16 + mssqlSQLErrorsTotalChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sql_errors_total", + Title: "Errors", + Units: "errors/s", + Fam: "errors", + Ctx: "mssql.instance_sql_errors", + Priority: prioMSSQLSqlErrorsTotal, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sql_errors_total_db_offline_errors", Name: "db_offline", Algo: module.Incremental}, + {ID: "mssql_instance_%s_sql_errors_total_info_errors", Name: "info", Algo: module.Incremental}, + {ID: "mssql_instance_%s_sql_errors_total_kill_connection_errors", Name: "kill_connection", Algo: module.Incremental}, + {ID: "mssql_instance_%s_sql_errors_total_user_errors", Name: "user", Algo: module.Incremental}, + }, + } + + // SQL Statistic + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object?view=sql-server-ver16 + mssqlStatsAutoParamChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sqlstats_auto_parameterization_attempts", + Title: "Failed auto-parameterization attempts", + Units: "attempts/s", + Fam: "sql activity", + Ctx: "mssql.instance_sqlstats_auto_parameterization_attempts", + Priority: prioMSSQLStatsAutoParameterization, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sqlstats_auto_parameterization_attempts", Name: "failed", Algo: module.Incremental}, + }, + } + mssqlStatsBatchRequestsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sqlstats_batch_requests", + Title: "Total of batches requests", + Units: "requests/s", + Fam: "sql activity", + Ctx: "mssql.instance_sqlstats_batch_requests", + Priority: prioMSSQLStatsBatchRequests, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sqlstats_batch_requests", Name: "batch", Algo: module.Incremental}, + }, + } + mssqlStatsSafeAutoChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sqlstats_safe_auto_parameterization_attempts", + Title: "Safe auto-parameterization attempts", + Units: "attempts/s", + Fam: "sql activity", + Ctx: "mssql.instance_sqlstats_safe_auto_parameterization_attempts", + Priority: prioMSSQLStatsSafeAutoParameterization, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sqlstats_safe_auto_parameterization_attempts", Name: "safe", Algo: module.Incremental}, + }, + } + mssqlStatsCompilationChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sqlstats_sql_compilations", + Title: "SQL compilations", + Units: "compilations/s", + Fam: "sql activity", + Ctx: "mssql.instance_sqlstats_sql_compilations", + Priority: prioMSSQLStatsCompilations, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sqlstats_sql_compilations", Name: "compilations", Algo: module.Incremental}, + }, + } + mssqlStatsRecompilationChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_instance_%s_sqlstats_sql_recompilations", + Title: "SQL re-compilations", + Units: "recompiles/s", + Fam: "sql activity", + Ctx: "mssql.instance_sqlstats_sql_recompilations", + Priority: prioMSSQLStatsRecompilations, + Dims: module.Dims{ + {ID: "mssql_instance_%s_sqlstats_sql_recompilations", Name: "recompiles", Algo: module.Incremental}, + }, + } + + // Database + // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017 + mssqlDatabaseActiveTransactionsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_active_transactions", + Title: "Active transactions per database", + Units: "transactions", + Fam: "transactions", + Ctx: "mssql.database_active_transactions", + Priority: prioMSSQLDatabaseActiveTransactions, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_active_transactions", Name: "active"}, + }, + } + mssqlDatabaseBackupRestoreOperationsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_backup_restore_operations", + Title: "Backup IO per database", + Units: "operations/s", + Fam: "transactions", + Ctx: "mssql.database_backup_restore_operations", + Priority: prioMSSQLDatabaseBackupRestoreOperations, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_backup_restore_operations", Name: "backup", Algo: module.Incremental}, + }, + } + mssqlDatabaseSizeChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_data_files_size", + Title: "Current database size", + Units: "bytes", + Fam: "size", + Ctx: "mssql.database_data_files_size", + Priority: prioMSSQLDatabaseDataFileSize, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_data_files_size_bytes", Name: "size"}, + }, + } + mssqlDatabaseLogFlushedChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_log_flushed", + Title: "Log flushed", + Units: "bytes/s", + Fam: "transactions", + Ctx: "mssql.database_log_flushed", + Priority: prioMSSQLDatabaseLogFlushed, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_log_flushed_bytes", Name: "flushed", Algo: module.Incremental}, + }, + } + mssqlDatabaseLogFlushesChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_log_flushes", + Title: "Log flushes", + Units: "flushes/s", + Fam: "transactions", + Ctx: "mssql.database_log_flushes", + Priority: prioMSSQLDatabaseLogFlushes, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_log_flushes", Name: "log", Algo: module.Incremental}, + }, + } + mssqlDatabaseTransactionsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_transactions", + Title: "Transactions", + Units: "transactions/s", + Fam: "transactions", + Ctx: "mssql.database_transactions", + Priority: prioMSSQLDatabaseTransactions, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_transactions", Name: "transactions", Algo: module.Incremental}, + }, + } + mssqlDatabaseWriteTransactionsChart = module.Chart{ + OverModule: "mssql", + ID: "mssql_db_%s_instance_%s_write_transactions", + Title: "Write transactions", + Units: "transactions/s", + Fam: "transactions", + Ctx: "mssql.database_write_transactions", + Priority: prioMSSQLDatabaseWriteTransactions, + Dims: module.Dims{ + {ID: "mssql_db_%s_instance_%s_write_transactions", Name: "write", Algo: module.Incremental}, + }, + } +) + +// AD +var ( + adCharts = module.Charts{ + adDatabaseOperationsChart.Copy(), + adDirectoryOperationsChart.Copy(), + adNameCacheLookupsChart.Copy(), + adNameCacheHitsChart.Copy(), + adDRAReplicationIntersiteCompressedTrafficChart.Copy(), + adDRAReplicationIntrasiteCompressedTrafficChart.Copy(), + adDRAReplicationSyncObjectRemainingChart.Copy(), + adDRAReplicationObjectsFilteredChart.Copy(), + adDRAReplicationPropertiesUpdatedChart.Copy(), + adDRAReplicationPropertiesFilteredChart.Copy(), + adDRAReplicationPendingSyncsChart.Copy(), + adDRAReplicationSyncRequestsChart.Copy(), + adDirectoryServiceThreadsChart.Copy(), + adLDAPLastBindTimeChart.Copy(), + adBindsTotalChart.Copy(), + adLDAPSearchesChart.Copy(), + adATQAverageRequestLatencyChart.Copy(), + adATQOutstandingRequestsChart.Copy(), + } + adDatabaseOperationsChart = module.Chart{ + OverModule: "ad", + ID: "ad_database_operations", + Title: "AD database operations", + Units: "operations/s", + Fam: "database", + Ctx: "ad.database_operations", + Priority: prioADDatabaseOperations, + Dims: module.Dims{ + {ID: "ad_database_operations_total_add", Name: "add", Algo: module.Incremental}, + {ID: "ad_database_operations_total_delete", Name: "delete", Algo: module.Incremental}, + {ID: "ad_database_operations_total_modify", Name: "modify", Algo: module.Incremental}, + {ID: "ad_database_operations_total_recycle", Name: "recycle", Algo: module.Incremental}, + }, + } + adDirectoryOperationsChart = module.Chart{ + OverModule: "ad", + ID: "ad_directory_operations_read", + Title: "AD directory operations", + Units: "operations/s", + Fam: "database", + Ctx: "ad.directory_operations", + Priority: prioADDirectoryOperations, + Dims: module.Dims{ + {ID: "ad_directory_operations_total_read", Name: "read", Algo: module.Incremental}, + {ID: "ad_directory_operations_total_write", Name: "write", Algo: module.Incremental}, + {ID: "ad_directory_operations_total_search", Name: "search", Algo: module.Incremental}, + }, + } + adNameCacheLookupsChart = module.Chart{ + OverModule: "ad", + ID: "ad_name_cache_lookups", + Title: "Name cache lookups", + Units: "lookups/s", + Fam: "database", + Ctx: "ad.name_cache_lookups", + Priority: prioADNameCacheLookups, + Dims: module.Dims{ + {ID: "ad_name_cache_lookups_total", Name: "lookups", Algo: module.Incremental}, + }, + } + adNameCacheHitsChart = module.Chart{ + OverModule: "ad", + ID: "ad_name_cache_hits", + Title: "Name cache hits", + Units: "hits/s", + Fam: "database", + Ctx: "ad.name_cache_hits", + Priority: prioADCacheHits, + Dims: module.Dims{ + {ID: "ad_name_cache_hits_total", Name: "hits", Algo: module.Incremental}, + }, + } + adDRAReplicationIntersiteCompressedTrafficChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_intersite_compressed_traffic", + Title: "DRA replication compressed traffic withing site", + Units: "bytes/s", + Fam: "replication", + Ctx: "ad.dra_replication_intersite_compressed_traffic", + Priority: prioADDRAReplicationIntersiteCompressedTraffic, + Type: module.Area, + Dims: module.Dims{ + {ID: "ad_replication_data_intersite_bytes_total_inbound", Name: "inbound", Algo: module.Incremental}, + {ID: "ad_replication_data_intersite_bytes_total_outbound", Name: "outbound", Algo: module.Incremental, Mul: -1}, + }, + } + adDRAReplicationIntrasiteCompressedTrafficChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_intrasite_compressed_traffic", + Title: "DRA replication compressed traffic between sites", + Units: "bytes/s", + Fam: "replication", + Ctx: "ad.dra_replication_intrasite_compressed_traffic", + Priority: prioADDRAReplicationIntrasiteCompressedTraffic, + Type: module.Area, + Dims: module.Dims{ + {ID: "ad_replication_data_intrasite_bytes_total_inbound", Name: "inbound", Algo: module.Incremental}, + {ID: "ad_replication_data_intrasite_bytes_total_outbound", Name: "outbound", Algo: module.Incremental, Mul: -1}, + }, + } + adDRAReplicationSyncObjectRemainingChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_sync_objects_remaining", + Title: "DRA replication full sync objects remaining", + Units: "objects", + Fam: "replication", + Ctx: "ad.dra_replication_sync_objects_remaining", + Priority: prioADDRAReplicationSyncObjectsRemaining, + Dims: module.Dims{ + {ID: "ad_replication_inbound_sync_objects_remaining", Name: "inbound"}, + }, + } + adDRAReplicationObjectsFilteredChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_objects_filtered", + Title: "DRA replication objects filtered", + Units: "objects/s", + Fam: "replication", + Ctx: "ad.dra_replication_objects_filtered", + Priority: prioADDRAReplicationObjectsFiltered, + Dims: module.Dims{ + {ID: "ad_replication_inbound_objects_filtered_total", Name: "inbound", Algo: module.Incremental}, + }, + } + adDRAReplicationPropertiesUpdatedChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_properties_updated", + Title: "DRA replication properties updated", + Units: "properties/s", + Fam: "replication", + Ctx: "ad.dra_replication_properties_updated", + Priority: prioADDRAReplicationPropertiesUpdated, + Dims: module.Dims{ + {ID: "ad_replication_inbound_properties_updated_total", Name: "inbound", Algo: module.Incremental}, + }, + } + adDRAReplicationPropertiesFilteredChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_properties_filtered", + Title: "DRA replication properties filtered", + Units: "properties/s", + Fam: "replication", + Ctx: "ad.dra_replication_properties_filtered", + Priority: prioADDRAReplicationPropertiesFiltered, + Dims: module.Dims{ + {ID: "ad_replication_inbound_properties_filtered_total", Name: "inbound", Algo: module.Incremental}, + }, + } + adDRAReplicationPendingSyncsChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_pending_syncs", + Title: "DRA replication pending syncs", + Units: "syncs", + Fam: "replication", + Ctx: "ad.dra_replication_pending_syncs", + Priority: prioADReplicationPendingSyncs, + Dims: module.Dims{ + {ID: "ad_replication_pending_synchronizations", Name: "pending"}, + }, + } + adDRAReplicationSyncRequestsChart = module.Chart{ + OverModule: "ad", + ID: "ad_dra_replication_sync_requests", + Title: "DRA replication sync requests", + Units: "requests/s", + Fam: "replication", + Ctx: "ad.dra_replication_sync_requests", + Priority: prioADDRASyncRequests, + Dims: module.Dims{ + {ID: "ad_replication_sync_requests_total", Name: "request", Algo: module.Incremental}, + }, + } + adDirectoryServiceThreadsChart = module.Chart{ + OverModule: "ad", + ID: "ad_ds_threads", + Title: "Directory Service threads", + Units: "threads", + Fam: "replication", + Ctx: "ad.ds_threads", + Priority: prioADDirectoryServiceThreadsInUse, + Dims: module.Dims{ + {ID: "ad_directory_service_threads", Name: "in_use"}, + }, + } + adLDAPLastBindTimeChart = module.Chart{ + OverModule: "ad", + ID: "ad_ldap_last_bind_time", + Title: "LDAP last successful bind time", + Units: "seconds", + Fam: "bind", + Ctx: "ad.ldap_last_bind_time", + Priority: prioADLDAPBindTime, + Dims: module.Dims{ + {ID: "ad_ldap_last_bind_time_seconds", Name: "last_bind"}, + }, + } + adBindsTotalChart = module.Chart{ + OverModule: "ad", + ID: "ad_binds", + Title: "Successful binds", + Units: "bind/s", + Fam: "bind", + Ctx: "ad.binds", + Priority: prioADBindsTotal, + Dims: module.Dims{ + {ID: "ad_binds_total", Name: "binds", Algo: module.Incremental}, + }, + } + adLDAPSearchesChart = module.Chart{ + OverModule: "ad", + ID: "ad_ldap_searches", + Title: "LDAP client search operations", + Units: "searches/s", + Fam: "ldap", + Ctx: "ad.ldap_searches", + Priority: prioADLDAPSearchesTotal, + Dims: module.Dims{ + {ID: "ad_ldap_searches_total", Name: "searches", Algo: module.Incremental}, + }, + } + // https://techcommunity.microsoft.com/t5/ask-the-directory-services-team/understanding-atq-performance-counters-yet-another-twist-in-the/ba-p/400293 + adATQAverageRequestLatencyChart = module.Chart{ + OverModule: "ad", + ID: "ad_atq_average_request_latency", + Title: "Average request processing time", + Units: "seconds", + Fam: "queue", + Ctx: "ad.atq_average_request_latency", + Priority: prioADATQAverageRequestLatency, + Dims: module.Dims{ + {ID: "ad_atq_average_request_latency", Name: "time", Div: precision}, + }, + } + adATQOutstandingRequestsChart = module.Chart{ + OverModule: "ad", + ID: "ad_atq_outstanding_requests", + Title: "Outstanding requests", + Units: "requests", + Fam: "queue", + Ctx: "ad.atq_outstanding_requests", + Priority: prioADATQOutstandingRequests, + Dims: module.Dims{ + {ID: "ad_atq_outstanding_requests", Name: "outstanding"}, + }, + } +) + +// AD CS +var ( + adcsCertTemplateChartsTmpl = module.Charts{ + adcsCertTemplateRequestsChartTmpl.Copy(), + adcsCertTemplateFailedRequestsChartTmpl.Copy(), + adcsCertTemplateIssuedRequestsChartTmpl.Copy(), + adcsCertTemplatePendingRequestsChartTmpl.Copy(), + adcsCertTemplateRequestProcessingTimeChartTmpl.Copy(), + + adcsCertTemplateRetrievalsChartTmpl.Copy(), + adcsCertificateRetrievalsTimeChartTmpl.Copy(), + adcsCertTemplateRequestCryptoSigningTimeChartTmpl.Copy(), + adcsCertTemplateRequestPolicyModuleProcessingTimeChartTmpl.Copy(), + adcsCertTemplateChallengeResponseChartTmpl.Copy(), + adcsCertTemplateChallengeResponseProcessingTimeChartTmpl.Copy(), + adcsCertTemplateSignedCertificateTimestampListsChartTmpl.Copy(), + adcsCertTemplateSignedCertificateTimestampListProcessingTimeChartTmpl.Copy(), + } + adcsCertTemplateRequestsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template%s_requests", + Title: "Certificate requests processed", + Units: "requests/s", + Fam: "requests", + Ctx: "adcs.cert_template_requests", + Priority: prioADCSCertTemplateRequests, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + adcsCertTemplateFailedRequestsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_failed_requests", + Title: "Certificate failed requests processed", + Units: "requests/s", + Fam: "requests", + Ctx: "adcs.cert_template_failed_requests", + Priority: prioADCSCertTemplateFailedRequests, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_failed_requests_total", Name: "failed", Algo: module.Incremental}, + }, + } + adcsCertTemplateIssuedRequestsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_issued_requests", + Title: "Certificate issued requests processed", + Units: "requests/s", + Fam: "requests", + Ctx: "adcs.cert_template_issued_requests", + Priority: prioADCSCertTemplateIssuesRequests, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_issued_requests_total", Name: "issued", Algo: module.Incremental}, + }, + } + adcsCertTemplatePendingRequestsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_pending_requests", + Title: "Certificate pending requests processed", + Units: "requests/s", + Fam: "requests", + Ctx: "adcs.cert_template_pending_requests", + Priority: prioADCSCertTemplatePendingRequests, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_pending_requests_total", Name: "pending", Algo: module.Incremental}, + }, + } + adcsCertTemplateRequestProcessingTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_request_processing_time", + Title: "Certificate last request processing time", + Units: "seconds", + Fam: "requests", + Ctx: "adcs.cert_template_request_processing_time", + Priority: prioADCSCertTemplateRequestProcessingTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_request_processing_time_seconds", Name: "processing_time", Div: precision}, + }, + } + adcsCertTemplateChallengeResponseChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_challenge_responses", + Title: "Certificate challenge responses", + Units: "responses/s", + Fam: "responses", + Ctx: "adcs.cert_template_challenge_responses", + Priority: prioADCSCertTemplateChallengeResponses, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_challenge_responses_total", Name: "challenge", Algo: module.Incremental}, + }, + } + adcsCertTemplateRetrievalsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_retrievals", + Title: "Total of certificate retrievals", + Units: "retrievals/s", + Fam: "retrievals", + Ctx: "adcs.cert_template_retrievals", + Priority: prioADCSCertTemplateRetrievals, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_retrievals_total", Name: "retrievals", Algo: module.Incremental}, + }, + } + adcsCertificateRetrievalsTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_retrievals_processing_time", + Title: "Certificate last retrieval processing time", + Units: "seconds", + Fam: "retrievals", + Ctx: "adcs.cert_template_retrieval_processing_time", + Priority: prioADCSCertTemplateRetrievalProcessingTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_retrievals_processing_time_seconds", Name: "processing_time", Div: precision}, + }, + } + adcsCertTemplateRequestCryptoSigningTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_request_cryptographic_signing_time", + Title: "Certificate last signing operation request time", + Units: "seconds", + Fam: "timings", + Ctx: "adcs.cert_template_request_cryptographic_signing_time", + Priority: prioADCSCertTemplateRequestCryptoSigningTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_request_cryptographic_signing_time_seconds", Name: "singing_time", Div: precision}, + }, + } + adcsCertTemplateRequestPolicyModuleProcessingTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_request_policy_module_processing_time", + Title: "Certificate last policy module processing request time", + Units: "seconds", + Fam: "timings", + Ctx: "adcs.cert_template_request_policy_module_processing", + Priority: prioADCSCertTemplateRequestPolicyModuleProcessingTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_request_policy_module_processing_time_seconds", Name: "processing_time", Div: precision}, + }, + } + adcsCertTemplateChallengeResponseProcessingTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_challenge_response_processing_time", + Title: "Certificate last challenge response time", + Units: "seconds", + Fam: "timings", + Ctx: "adcs.cert_template_challenge_response_processing_time", + Priority: prioADCSCertTemplateChallengeResponseProcessingTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_challenge_response_processing_time_seconds", Name: "processing_time", Div: precision}, + }, + } + adcsCertTemplateSignedCertificateTimestampListsChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_signed_certificate_timestamp_lists", + Title: "Certificate Signed Certificate Timestamp Lists processed", + Units: "lists/s", + Fam: "timings", + Ctx: "adcs.cert_template_signed_certificate_timestamp_lists", + Priority: prioADCSCertTemplateSignedCertificateTimestampLists, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_signed_certificate_timestamp_lists_total", Name: "processed", Algo: module.Incremental}, + }, + } + adcsCertTemplateSignedCertificateTimestampListProcessingTimeChartTmpl = module.Chart{ + OverModule: "adcs", + ID: "adcs_cert_template_%s_signed_certificate_timestamp_list_processing_time", + Title: "Certificate last Signed Certificate Timestamp List process time", + Units: "seconds", + Fam: "timings", + Ctx: "adcs.cert_template_signed_certificate_timestamp_list_processing_time", + Priority: prioADCSCertTemplateSignedCertificateTimestampListProcessingTime, + Dims: module.Dims{ + {ID: "adcs_cert_template_%s_signed_certificate_timestamp_list_processing_time_seconds", Name: "processing_time", Div: precision}, + }, + } +) + +// AD FS +var ( + adfsCharts = module.Charts{ + adfsADLoginConnectionFailuresChart.Copy(), + adfsCertificateAuthenticationsChart.Copy(), + adfsDBArtifactFailuresChart.Copy(), + adfsDBArtifactQueryTimeSecondsChart.Copy(), + adfsDBConfigFailuresChart.Copy(), + adfsDBConfigQueryTimeSecondsChart.Copy(), + adfsDeviceAuthenticationsChart.Copy(), + adfsExternalAuthenticationsChart.Copy(), + adfsFederatedAuthenticationsChart.Copy(), + adfsFederationMetadataRequestsChart.Copy(), + + adfsOAuthAuthorizationRequestsChart.Copy(), + adfsOAuthClientAuthenticationsChart.Copy(), + adfsOAuthClientCredentialRequestsChart.Copy(), + adfsOAuthClientPrivKeyJwtAuthenticationsChart.Copy(), + adfsOAuthClientSecretBasicAuthenticationsChart.Copy(), + adfsOAuthClientSecretPostAuthenticationsChart.Copy(), + adfsOAuthClientWindowsAuthenticationsChart.Copy(), + adfsOAuthLogonCertificateRequestsChart.Copy(), + adfsOAuthPasswordGrantRequestsChart.Copy(), + adfsOAuthTokenRequestsChart.Copy(), + + adfsPassiveRequestsChart.Copy(), + adfsPassportAuthenticationsChart.Copy(), + adfsPasswordChangeChart.Copy(), + adfsSAMLPTokenRequestsChart.Copy(), + adfsSSOAuthenticationsChart.Copy(), + adfsTokenRequestsChart.Copy(), + adfsUserPasswordAuthenticationsChart.Copy(), + adfsWindowsIntegratedAuthenticationsChart.Copy(), + adfsWSFedTokenRequestsSuccessChart.Copy(), + adfsWSTrustTokenRequestsSuccessChart.Copy(), + } + + adfsADLoginConnectionFailuresChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_ad_login_connection_failures", + Title: "Connection failures", + Units: "failures/s", + Fam: "ad", + Ctx: "adfs.ad_login_connection_failures", + Priority: prioADFSADLoginConnectionFailures, + Dims: module.Dims{ + {ID: "adfs_ad_login_connection_failures_total", Name: "connection", Algo: module.Incremental}, + }, + } + adfsCertificateAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_certificate_authentications", + Title: "User Certificate authentications", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.certificate_authentications", + Priority: prioADFSCertificateAuthentications, + Dims: module.Dims{ + {ID: "adfs_certificate_authentications_total", Name: "authentications", Algo: module.Incremental}, + }, + } + + adfsDBArtifactFailuresChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_db_artifact_failures", + Title: "Connection failures to the artifact database", + Units: "failures/s", + Fam: "db artifact", + Ctx: "adfs.db_artifact_failures", + Priority: prioADFSDBArtifactFailures, + Dims: module.Dims{ + {ID: "adfs_db_artifact_failure_total", Name: "connection", Algo: module.Incremental}, + }, + } + adfsDBArtifactQueryTimeSecondsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_db_artifact_query_time_seconds", + Title: "Time taken for an artifact database query", + Units: "seconds/s", + Fam: "db artifact", + Ctx: "adfs.db_artifact_query_time_seconds", + Priority: prioADFSDBArtifactQueryTimeSeconds, + Dims: module.Dims{ + {ID: "adfs_db_artifact_query_time_seconds_total", Name: "query_time", Algo: module.Incremental, Div: precision}, + }, + } + adfsDBConfigFailuresChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_db_config_failures", + Title: "Connection failures to the configuration database", + Units: "failures/s", + Fam: "db config", + Ctx: "adfs.db_config_failures", + Priority: prioADFSDBConfigFailures, + Dims: module.Dims{ + {ID: "adfs_db_config_failure_total", Name: "connection", Algo: module.Incremental}, + }, + } + adfsDBConfigQueryTimeSecondsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_db_config_query_time_seconds", + Title: "Time taken for a configuration database query", + Units: "seconds/s", + Fam: "db config", + Ctx: "adfs.db_config_query_time_seconds", + Priority: prioADFSDBConfigQueryTimeSeconds, + Dims: module.Dims{ + {ID: "adfs_db_config_query_time_seconds_total", Name: "query_time", Algo: module.Incremental, Div: precision}, + }, + } + adfsDeviceAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_device_authentications", + Title: "Device authentications", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.device_authentications", + Priority: prioADFSDeviceAuthentications, + Dims: module.Dims{ + {ID: "adfs_device_authentications_total", Name: "authentications", Algo: module.Incremental}, + }, + } + adfsExternalAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_external_authentications", + Title: "Authentications from external MFA providers", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.external_authentications", + Priority: prioADFSExternalAuthentications, + Dims: module.Dims{ + {ID: "adfs_external_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_external_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsFederatedAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_federated_authentications", + Title: "Authentications from Federated Sources", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.federated_authentications", + Priority: prioADFSFederatedAuthentications, + Dims: module.Dims{ + {ID: "adfs_federated_authentications_total", Name: "authentications", Algo: module.Incremental}, + }, + } + adfsFederationMetadataRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_federation_metadata_requests", + Title: "Federation Metadata requests", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.federation_metadata_requests", + Priority: prioADFSFederationMetadataRequests, + Dims: module.Dims{ + {ID: "adfs_federation_metadata_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + + adfsOAuthAuthorizationRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_authorization_requests", + Title: "Incoming requests to the OAuth Authorization endpoint", + Units: "requests/s", + Fam: "oauth", + Ctx: "adfs.oauth_authorization_requests", + Priority: prioADFSOauthAuthorizationRequests, + Dims: module.Dims{ + {ID: "adfs_oauth_authorization_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + adfsOAuthClientAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_authentications", + Title: "OAuth client authentications", + Units: "authentications/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_authentications", + Priority: prioADFSOauthClientAuthentications, + Dims: module.Dims{ + {ID: "adfs_oauth_client_authentication_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_authentication_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthClientCredentialRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_credentials_requests", + Title: "OAuth client credentials requests", + Units: "requests/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_credentials_requests", + Priority: prioADFSOauthClientCredentials, + Dims: module.Dims{ + {ID: "adfs_oauth_client_credentials_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_credentials_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthClientPrivKeyJwtAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_privkey_jwt_authentications", + Title: "OAuth client private key JWT authentications", + Units: "authentications/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_privkey_jwt_authentications", + Priority: prioADFSOauthClientPrivkeyJwtAuthentication, + Dims: module.Dims{ + {ID: "adfs_oauth_client_privkey_jwt_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_privkey_jtw_authentication_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthClientSecretBasicAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_secret_basic_authentications", + Title: "OAuth client secret basic authentications", + Units: "authentications/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_secret_basic_authentications", + Priority: prioADFSOauthClientSecretBasicAuthentications, + Dims: module.Dims{ + {ID: "adfs_oauth_client_secret_basic_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_secret_basic_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthClientSecretPostAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_secret_post_authentications", + Title: "OAuth client secret post authentications", + Units: "authentications/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_secret_post_authentications", + Priority: prioADFSOauthClientSecretPostAuthentications, + Dims: module.Dims{ + {ID: "adfs_oauth_client_secret_post_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_secret_post_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthClientWindowsAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_client_windows_authentications", + Title: "OAuth client windows integrated authentications", + Units: "authentications/s", + Fam: "oauth", + Ctx: "adfs.oauth_client_windows_authentications", + Priority: prioADFSOauthClientWindowsAuthentications, + Dims: module.Dims{ + {ID: "adfs_oauth_client_windows_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_client_windows_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthLogonCertificateRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_logon_certificate_requests", + Title: "OAuth logon certificate requests", + Units: "requests/s", + Fam: "oauth", + Ctx: "adfs.oauth_logon_certificate_requests", + Priority: prioADFSOauthLogonCertificateRequests, + Dims: module.Dims{ + {ID: "adfs_oauth_logon_certificate_token_requests_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_logon_certificate_requests_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthPasswordGrantRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_password_grant_requests", + Title: "OAuth password grant requests", + Units: "requests/s", + Fam: "oauth", + Ctx: "adfs.oauth_password_grant_requests", + Priority: prioADFSOauthPasswordGrantRequests, + Dims: module.Dims{ + {ID: "adfs_oauth_password_grant_requests_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_oauth_password_grant_requests_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsOAuthTokenRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_oauth_token_requests_success", + Title: "Successful RP token requests over OAuth protocol", + Units: "requests/s", + Fam: "oauth", + Ctx: "adfs.oauth_token_requests_success", + Priority: prioADFSOauthTokenRequestsSuccess, + Dims: module.Dims{ + {ID: "adfs_oauth_token_requests_success_total", Name: "success", Algo: module.Incremental}, + }, + } + + adfsPassiveRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_passive_requests", + Title: "Passive requests", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.passive_requests", + Priority: prioADFSPassiveRequests, + Dims: module.Dims{ + {ID: "adfs_passive_requests_total", Name: "passive", Algo: module.Incremental}, + }, + } + adfsPassportAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_passport_authentications", + Title: "Microsoft Passport SSO authentications", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.passport_authentications", + Priority: prioADFSPassportAuthentications, + Dims: module.Dims{ + {ID: "adfs_passport_authentications_total", Name: "passport", Algo: module.Incremental}, + }, + } + adfsPasswordChangeChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_password_change_requests", + Title: "Password change requests", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.password_change_requests", + Priority: prioADFSPasswordChangeRequests, + Dims: module.Dims{ + {ID: "adfs_password_change_succeeded_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_password_change_failed_total", Name: "failed", Algo: module.Incremental}, + }, + } + adfsSAMLPTokenRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_samlp_token_requests_success", + Title: "Successful RP token requests over SAML-P protocol", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.samlp_token_requests_success", + Priority: prioADFSSAMLPTokenRequests, + Dims: module.Dims{ + {ID: "adfs_samlp_token_requests_success_total", Name: "success", Algo: module.Incremental}, + }, + } + adfsWSTrustTokenRequestsSuccessChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_wstrust_token_requests_success", + Title: "Successful RP token requests over WS-Trust protocol", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.wstrust_token_requests_success", + Priority: prioADFSWSTrustTokenRequestsSuccess, + Dims: module.Dims{ + {ID: "adfs_wstrust_token_requests_success_total", Name: "success", Algo: module.Incremental}, + }, + } + adfsSSOAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_sso_authentications", + Title: "SSO authentications", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.sso_authentications", + Priority: prioADFSSSOAuthentications, + Dims: module.Dims{ + {ID: "adfs_sso_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_sso_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsTokenRequestsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_token_requests", + Title: "Token access requests", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.token_requests", + Priority: prioADFSTokenRequests, + Dims: module.Dims{ + {ID: "adfs_token_requests_total", Name: "requests", Algo: module.Incremental}, + }, + } + adfsUserPasswordAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_userpassword_authentications", + Title: "AD U/P authentications", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.userpassword_authentications", + Priority: prioADFSUserPasswordAuthentications, + Dims: module.Dims{ + {ID: "adfs_sso_authentications_success_total", Name: "success", Algo: module.Incremental}, + {ID: "adfs_sso_authentications_failure_total", Name: "failure", Algo: module.Incremental}, + }, + } + adfsWindowsIntegratedAuthenticationsChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_windows_integrated_authentications", + Title: "Windows integrated authentications using Kerberos or NTLM", + Units: "authentications/s", + Fam: "auth", + Ctx: "adfs.windows_integrated_authentications", + Priority: prioADFSWindowsIntegratedAuthentications, + Dims: module.Dims{ + {ID: "adfs_windows_integrated_authentications_total", Name: "authentications", Algo: module.Incremental}, + }, + } + adfsWSFedTokenRequestsSuccessChart = module.Chart{ + OverModule: "adfs", + ID: "adfs_wsfed_token_requests_success", + Title: "Successful RP token requests over WS-Fed protocol", + Units: "requests/s", + Fam: "requests", + Ctx: "adfs.wsfed_token_requests_success", + Priority: prioADFSWSFedTokenRequestsSuccess, + Dims: module.Dims{ + {ID: "adfs_wsfed_token_requests_success_total", Name: "success", Algo: module.Incremental}, + }, + } +) + +// Exchange +var ( + exchangeCharts = module.Charts{ + exchangeActiveSyncPingCMDsPendingChart.Copy(), + exchangeActiveSyncRequestsChart.Copy(), + exchangeActiveSyncCMDsChart.Copy(), + exchangeAutoDiscoverRequestsChart.Copy(), + exchangeAvailableServiceRequestsChart.Copy(), + exchangeOWACurrentUniqueUsersChart.Copy(), + exchangeOWARequestsChart.Copy(), + exchangeRPCActiveUsersCountChart.Copy(), + exchangeRPCAvgLatencyChart.Copy(), + exchangeRPCConnectionChart.Copy(), + exchangeRPCOperationsChart.Copy(), + exchangeRPCRequestsChart.Copy(), + exchangeRPCUserChart.Copy(), + exchangeTransportQueuesActiveMailBoxDelivery.Copy(), + exchangeTransportQueuesExternalActiveRemoteDelivery.Copy(), + exchangeTransportQueuesExternalLargestDelivery.Copy(), + exchangeTransportQueuesInternalActiveRemoteDelivery.Copy(), + exchangeTransportQueuesInternalLargestDelivery.Copy(), + exchangeTransportQueuesRetryMailboxDelivery.Copy(), + exchangeTransportQueuesUnreachable.Copy(), + exchangeTransportQueuesPoison.Copy(), + } + exchangeWorkloadChartsTmpl = module.Charts{ + exchangeWorkloadActiveTasks.Copy(), + exchangeWorkloadCompletedTasks.Copy(), + exchangeWorkloadQueuedTasks.Copy(), + exchangeWorkloadYieldedTasks.Copy(), + + exchangeWorkloadActivityStatus.Copy(), + } + exchangeLDAPChartsTmpl = module.Charts{ + exchangeLDAPLongRunningOPS.Copy(), + exchangeLDAPReadTime.Copy(), + exchangeLDAPSearchTime.Copy(), + exchangeLDAPTimeoutErrors.Copy(), + exchangeLDAPWriteTime.Copy(), + } + exchangeHTTPProxyChartsTmpl = module.Charts{ + exchangeProxyAvgAuthLatency.Copy(), + exchangeProxyAvgCasProcessingLatencySec.Copy(), + exchangeProxyMailboxProxyFailureRace.Copy(), + exchangeProxyMailboxServerLocatorAvgLatencySec.Copy(), + exchangeProxyOutstandingProxyRequests.Copy(), + exchangeProxyRequestsTotal.Copy(), + } + + exchangeActiveSyncPingCMDsPendingChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_activesync_ping_cmds_pending", + Title: "Ping commands pending in queue", + Units: "commands", + Fam: "sync", + Ctx: "exchange.activesync_ping_cmds_pending", + Priority: prioExchangeActiveSyncPingCMDsPending, + Dims: module.Dims{ + {ID: "exchange_activesync_ping_cmds_pending", Name: "pending"}, + }, + } + exchangeActiveSyncRequestsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_activesync_requests", + Title: "HTTP requests received from ASP.NET", + Units: "requests/s", + Fam: "sync", + Ctx: "exchange.activesync_requests", + Priority: prioExchangeActiveSyncRequests, + Dims: module.Dims{ + {ID: "exchange_activesync_requests_total", Name: "received", Algo: module.Incremental}, + }, + } + exchangeActiveSyncCMDsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_activesync_sync_cmds", + Title: "Sync commands processed", + Units: "commands/s", + Fam: "sync", + Ctx: "exchange.activesync_sync_cmds", + Priority: prioExchangeActiveSyncSyncCMDs, + Dims: module.Dims{ + {ID: "exchange_activesync_sync_cmds_total", Name: "processed", Algo: module.Incremental}, + }, + } + exchangeAutoDiscoverRequestsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_autodiscover_requests", + Title: "Autodiscover service requests processed", + Units: "requests/s", + Fam: "requests", + Ctx: "exchange.autodiscover_requests", + Priority: prioExchangeAutoDiscoverRequests, + Dims: module.Dims{ + {ID: "exchange_autodiscover_requests_total", Name: "processed", Algo: module.Incremental}, + }, + } + exchangeAvailableServiceRequestsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_avail_service_requests", + Title: "Requests serviced", + Units: "requests/s", + Fam: "requests", + Ctx: "exchange.avail_service_requests", + Priority: prioExchangeAvailServiceRequests, + Dims: module.Dims{ + {ID: "exchange_avail_service_requests_per_sec", Name: "serviced", Algo: module.Incremental}, + }, + } + exchangeOWACurrentUniqueUsersChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_owa_current_unique_users", + Title: "Unique users currently logged on to Outlook Web App", + Units: "users", + Fam: "owa", + Ctx: "exchange.owa_current_unique_users", + Priority: prioExchangeOWACurrentUniqueUsers, + Dims: module.Dims{ + {ID: "exchange_owa_current_unique_users", Name: "logged-in"}, + }, + } + exchangeOWARequestsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_owa_requests_total", + Title: "Requests handled by Outlook Web App", + Units: "requests/s", + Fam: "owa", + Ctx: "exchange.owa_requests_total", + Priority: prioExchangeOWARequestsTotal, + Dims: module.Dims{ + {ID: "exchange_owa_requests_total", Name: "handled", Algo: module.Incremental}, + }, + } + exchangeRPCActiveUsersCountChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_active_user", + Title: "Active unique users in the last 2 minutes", + Units: "users", + Fam: "rpc", + Ctx: "exchange.rpc_active_user_count", + Priority: prioExchangeRPCActiveUserCount, + Dims: module.Dims{ + {ID: "exchange_rpc_active_user_count", Name: "active"}, + }, + } + exchangeRPCAvgLatencyChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_avg_latency", + Title: "Average latency", + Units: "seconds", + Fam: "rpc", + Ctx: "exchange.rpc_avg_latency", + Priority: prioExchangeRPCAvgLatency, + Dims: module.Dims{ + {ID: "exchange_rpc_avg_latency_sec", Name: "latency", Div: precision}, + }, + } + exchangeRPCConnectionChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_connection", + Title: "Client connections", + Units: "connections", + Fam: "rpc", + Ctx: "exchange.rpc_connection_count", + Priority: prioExchangeRPCConnectionCount, + Dims: module.Dims{ + {ID: "exchange_rpc_connection_count", Name: "connections"}, + }, + } + exchangeRPCOperationsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_operations", + Title: "RPC operations", + Units: "operations/s", + Fam: "rpc", + Ctx: "exchange.rpc_operations", + Priority: prioExchangeRPCOperationsTotal, + Dims: module.Dims{ + {ID: "exchange_rpc_operations_total", Name: "operations", Algo: module.Incremental}, + }, + } + exchangeRPCRequestsChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_requests_total", + Title: "Clients requests currently being processed", + Units: "requests", + Fam: "rpc", + Ctx: "exchange.rpc_requests", + Priority: prioExchangeRPCRequests, + Dims: module.Dims{ + {ID: "exchange_rpc_requests", Name: "processed"}, + }, + } + exchangeRPCUserChart = module.Chart{ + OverModule: "exchange", + ID: "exchange_rpc_user", + Title: "RPC users", + Units: "users", + Fam: "rpc", + Ctx: "exchange.rpc_user_count", + Priority: prioExchangeRpcUserCount, + Dims: module.Dims{ + {ID: "exchange_rpc_user_count", Name: "users"}, + }, + } + + // Source: https://learn.microsoft.com/en-us/exchange/mail-flow/queues/queues?view=exchserver-2019 + exchangeTransportQueuesActiveMailBoxDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_active_mailbox_delivery", + Title: "Active Mailbox Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_active_mail_box_delivery", + Priority: prioExchangeTransportQueuesActiveMailboxDelivery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_active_mailbox_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_active_mailbox_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_active_mailbox_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_active_mailbox_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesExternalActiveRemoteDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_external_active_remote_delivery", + Title: "External Active Remote Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_external_active_remote_delivery", + Priority: prioExchangeTransportQueuesExternalActiveRemoteDelivery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_external_active_remote_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_external_active_remote_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_external_active_remote_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_external_active_remote_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesExternalLargestDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_external_largest_delivery", + Title: "External Largest Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_external_largest_delivery", + Priority: prioExchangeTransportQueuesExternalLargestDelivery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_external_largest_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_external_largest_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_external_largest_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_external_largest_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesInternalActiveRemoteDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_internal_active_remote_delivery", + Title: "Internal Active Remote Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_internal_active_remote_delivery", + Priority: prioExchangeTransportQueuesInternalActiveRemoteDeliery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_internal_active_remote_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_internal_active_remote_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_internal_active_remote_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_internal_active_remote_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesInternalLargestDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_internal_largest_delivery", + Title: "Internal Largest Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_internal_largest_delivery", + Priority: prioExchangeTransportQueuesInternalLargestDelivery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_internal_largest_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_internal_largest_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_internal_largest_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_internal_largest_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesRetryMailboxDelivery = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_retry_mailbox_delivery", + Title: "Internal Active Remote Delivery Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_retry_mailbox_delivery", + Priority: prioExchangeTransportQueuesRetryMailboxDelivery, + Dims: module.Dims{ + {ID: "exchange_transport_queues_retry_mailbox_delivery_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_retry_mailbox_delivery_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_retry_mailbox_delivery_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_retry_mailbox_delivery_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesUnreachable = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_unreachable", + Title: "Unreachable Queue length", + Units: "messages", + Fam: "queue", + Ctx: "exchange.transport_queues_unreachable", + Priority: prioExchangeTransportQueuesUnreachable, + Dims: module.Dims{ + {ID: "exchange_transport_queues_unreachable_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_unreachable_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_unreachable_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_unreachable_normal_priority", Name: "normal"}, + }, + } + exchangeTransportQueuesPoison = module.Chart{ + OverModule: "exchange", + ID: "exchange_transport_queues_poison", + Title: "Poison Queue Length", + Units: "messages/s", + Fam: "queue", + Ctx: "exchange.transport_queues_poison", + Priority: prioExchangeTransportQueuesPoison, + Dims: module.Dims{ + {ID: "exchange_transport_queues_poison_high_priority", Name: "high"}, + {ID: "exchange_transport_queues_poison_low_priority", Name: "low"}, + {ID: "exchange_transport_queues_poison_none_priority", Name: "none"}, + {ID: "exchange_transport_queues_poison_normal_priority", Name: "normal"}, + }, + } + + exchangeWorkloadActiveTasks = module.Chart{ + OverModule: "exchange", + ID: "exchange_workload_%s_tasks", + Title: "Workload active tasks", + Units: "tasks", + Fam: "workload", + Ctx: "exchange.workload_active_tasks", + Priority: prioExchangeWorkloadActiveTasks, + Dims: module.Dims{ + {ID: "exchange_workload_%s_active_tasks", Name: "active"}, + }, + } + exchangeWorkloadCompletedTasks = module.Chart{ + OverModule: "exchange", + ID: "exchange_workload_%s_completed_tasks", + Title: "Workload completed tasks", + Units: "tasks/s", + Fam: "workload", + Ctx: "exchange.workload_completed_tasks", + Priority: prioExchangeWorkloadCompleteTasks, + Dims: module.Dims{ + {ID: "exchange_workload_%s_completed_tasks", Name: "completed", Algo: module.Incremental}, + }, + } + exchangeWorkloadQueuedTasks = module.Chart{ + OverModule: "exchange", + ID: "exchange_workload_%s_queued_tasks", + Title: "Workload queued tasks", + Units: "tasks/s", + Fam: "workload", + Ctx: "exchange.workload_queued_tasks", + Priority: prioExchangeWorkloadQueueTasks, + Dims: module.Dims{ + {ID: "exchange_workload_%s_queued_tasks", Name: "queued", Algo: module.Incremental}, + }, + } + exchangeWorkloadYieldedTasks = module.Chart{ + OverModule: "exchange", + ID: "exchange_workload_%s_yielded_tasks", + Title: "Workload yielded tasks", + Units: "tasks/s", + Fam: "workload", + Ctx: "exchange.workload_yielded_tasks", + Priority: prioExchangeWorkloadYieldedTasks, + Dims: module.Dims{ + {ID: "exchange_workload_%s_yielded_tasks", Name: "yielded", Algo: module.Incremental}, + }, + } + exchangeWorkloadActivityStatus = module.Chart{ + OverModule: "exchange", + ID: "exchange_workload_%s_activity_status", + Title: "Workload activity status", + Units: "status", + Fam: "workload", + Ctx: "exchange.workload_activity_status", + Priority: prioExchangeWorkloadActivityStatus, + Dims: module.Dims{ + {ID: "exchange_workload_%s_is_active", Name: "active"}, + {ID: "exchange_workload_%s_is_paused", Name: "paused"}, + }, + } + + exchangeLDAPLongRunningOPS = module.Chart{ + OverModule: "exchange", + ID: "exchange_ldap_%s_long_running_ops", + Title: "Long Running LDAP operations", + Units: "operations/s", + Fam: "ldap", + Ctx: "exchange.ldap_long_running_ops_per_sec", + Priority: prioExchangeLDAPLongRunningOPS, + Dims: module.Dims{ + {ID: "exchange_ldap_%s_long_running_ops_per_sec", Name: "long-running", Algo: module.Incremental}, + }, + } + exchangeLDAPReadTime = module.Chart{ + OverModule: "exchange", + ID: "exchange_ldap_%s_read_time", + Title: "Time to send an LDAP read request and receive a response", + Units: "seconds", + Fam: "ldap", + Ctx: "exchange.ldap_read_time", + Priority: prioExchangeLDAPReadTime, + Dims: module.Dims{ + {ID: "exchange_ldap_%s_read_time_sec", Name: "read", Algo: module.Incremental, Div: precision}, + }, + } + exchangeLDAPSearchTime = module.Chart{ + OverModule: "exchange", + ID: "exchange_ldap_%s_search_time", + Title: "Time to send an LDAP search request and receive a response", + Units: "seconds", + Fam: "ldap", + Ctx: "exchange.ldap_search_time", + Priority: prioExchangeLDAPSearchTime, + Dims: module.Dims{ + {ID: "exchange_ldap_%s_search_time_sec", Name: "search", Algo: module.Incremental, Div: precision}, + }, + } + exchangeLDAPWriteTime = module.Chart{ + OverModule: "exchange", + ID: "exchange_ldap_%s_write_time", + Title: "Time to send an LDAP search request and receive a response", + Units: "second", + Fam: "ldap", + Ctx: "exchange.ldap_write_time", + Priority: prioExchangeLDAPWriteTime, + Dims: module.Dims{ + {ID: "exchange_ldap_%s_write_time_sec", Name: "write", Algo: module.Incremental, Div: precision}, + }, + } + exchangeLDAPTimeoutErrors = module.Chart{ + OverModule: "exchange", + ID: "exchange_ldap_%s_timeout_errors", + Title: "LDAP timeout errors", + Units: "errors/s", + Fam: "ldap", + Ctx: "exchange.ldap_timeout_errors", + Priority: prioExchangeLDAPTimeoutErrors, + Dims: module.Dims{ + {ID: "exchange_ldap_%s_timeout_errors_total", Name: "timeout", Algo: module.Incremental}, + }, + } + + exchangeProxyAvgAuthLatency = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_avg_auth_latency", + Title: "Average time spent authenticating CAS", + Units: "seconds", + Fam: "proxy", + Ctx: "exchange.http_proxy_avg_auth_latency", + Priority: prioExchangeHTTPProxyAVGAuthLatency, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_avg_auth_latency", Name: "latency"}, + }, + } + exchangeProxyAvgCasProcessingLatencySec = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_avg_cas_processing_latency_sec", + Title: "Average time spent authenticating CAS", + Units: "seconds", + Fam: "proxy", + Ctx: "exchange.http_proxy_avg_cas_processing_latency_sec", + Priority: prioExchangeHTTPProxyAVGCASProcessingLatency, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_avg_cas_proccessing_latency_sec", Name: "latency"}, + }, + } + exchangeProxyMailboxProxyFailureRace = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_mailbox_proxy_failure_rate", + Title: "Percentage of failures between this CAS and MBX servers", + Units: "percentage", + Fam: "proxy", + Ctx: "exchange.http_proxy_mailbox_proxy_failure_rate", + Priority: prioExchangeHTTPProxyMailboxProxyFailureRate, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_mailbox_proxy_failure_rate", Name: "failures", Div: precision}, + }, + } + exchangeProxyMailboxServerLocatorAvgLatencySec = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_mailbox_server_locator_avg_latency_sec", + Title: "Average latency of MailboxServerLocator web service calls", + Units: "seconds", + Fam: "proxy", + Ctx: "exchange.http_proxy_mailbox_server_locator_avg_latency_sec", + Priority: prioExchangeHTTPProxyServerLocatorAvgLatency, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_mailbox_server_locator_avg_latency_sec", Name: "latency", Div: precision}, + }, + } + exchangeProxyOutstandingProxyRequests = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_outstanding_proxy_requests", + Title: "Concurrent outstanding proxy requests", + Units: "requests", + Fam: "proxy", + Ctx: "exchange.http_proxy_outstanding_proxy_requests", + Priority: prioExchangeHTTPProxyOutstandingProxyRequests, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_outstanding_proxy_requests", Name: "outstanding"}, + }, + } + exchangeProxyRequestsTotal = module.Chart{ + OverModule: "exchange", + ID: "exchange_proxy_%s_requests_total", + Title: "Number of proxy requests processed each second", + Units: "requests/s", + Fam: "proxy", + Ctx: "exchange.http_proxy_requests", + Priority: prioExchangeHTTPProxyRequestsTotal, + Dims: module.Dims{ + {ID: "exchange_http_proxy_%s_requests_total", Name: "processed", Algo: module.Incremental}, + }, + } +) + +// Logon +var ( + logonCharts = module.Charts{ + logonSessionsChart.Copy(), + } + logonSessionsChart = module.Chart{ + ID: "logon_active_sessions_by_type", + Title: "Active User Logon Sessions By Type", + Units: "sessions", + Fam: "logon", + Ctx: "windows.logon_type_sessions", + Type: module.Stacked, + Priority: prioLogonSessions, + Dims: module.Dims{ + {ID: "logon_type_system_sessions", Name: "system"}, + {ID: "logon_type_proxy_sessions", Name: "proxy"}, + {ID: "logon_type_network_sessions", Name: "network"}, + {ID: "logon_type_interactive_sessions", Name: "interactive"}, + {ID: "logon_type_batch_sessions", Name: "batch"}, + {ID: "logon_type_service_sessions", Name: "service"}, + {ID: "logon_type_unlock_sessions", Name: "unlock"}, + {ID: "logon_type_network_clear_text_sessions", Name: "network_clear_text"}, + {ID: "logon_type_new_credentials_sessions", Name: "new_credentials"}, + {ID: "logon_type_remote_interactive_sessions", Name: "remote_interactive"}, + {ID: "logon_type_cached_interactive_sessions", Name: "cached_interactive"}, + {ID: "logon_type_cached_remote_interactive_sessions", Name: "cached_remote_interactive"}, + {ID: "logon_type_cached_unlock_sessions", Name: "cached_unlock"}, + }, + } +) + +// Thermal zone +var ( + thermalzoneChartsTmpl = module.Charts{ + thermalzoneTemperatureChartTmpl.Copy(), + } + thermalzoneTemperatureChartTmpl = module.Chart{ + ID: "thermalzone_%s_temperature", + Title: "Thermal zone temperature", + Units: "Celsius", + Fam: "thermalzone", + Ctx: "windows.thermalzone_temperature", + Priority: prioThermalzoneTemperature, + Dims: module.Dims{ + {ID: "thermalzone_%s_temperature", Name: "temperature"}, + }, + } +) + +// Processes +var ( + processesCharts = module.Charts{ + processesCPUUtilizationTotalChart.Copy(), + processesMemoryUsageChart.Copy(), + processesHandlesChart.Copy(), + processesIOBytesChart.Copy(), + processesIOOperationsChart.Copy(), + processesPageFaultsChart.Copy(), + processesPageFileBytes.Copy(), + processesThreads.Copy(), + } + processesCPUUtilizationTotalChart = module.Chart{ + ID: "processes_cpu_utilization", + Title: "CPU usage (100% = 1 core)", + Units: "percentage", + Fam: "processes", + Ctx: "windows.processes_cpu_utilization", + Type: module.Stacked, + Priority: prioProcessesCPUUtilization, + } + processesMemoryUsageChart = module.Chart{ + ID: "processes_memory_usage", + Title: "Memory usage", + Units: "bytes", + Fam: "processes", + Ctx: "windows.processes_memory_usage", + Type: module.Stacked, + Priority: prioProcessesMemoryUsage, + } + processesIOBytesChart = module.Chart{ + ID: "processes_io_bytes", + Title: "Total of IO bytes (read, write, other)", + Units: "bytes/s", + Fam: "processes", + Ctx: "windows.processes_io_bytes", + Type: module.Stacked, + Priority: prioProcessesIOBytes, + } + processesIOOperationsChart = module.Chart{ + ID: "processes_io_operations", + Title: "Total of IO events (read, write, other)", + Units: "operations/s", + Fam: "processes", + Ctx: "windows.processes_io_operations", + Type: module.Stacked, + Priority: prioProcessesIOOperations, + } + processesPageFaultsChart = module.Chart{ + ID: "processes_page_faults", + Title: "Number of page faults", + Units: "pgfaults/s", + Fam: "processes", + Ctx: "windows.processes_page_faults", + Type: module.Stacked, + Priority: prioProcessesPageFaults, + } + processesPageFileBytes = module.Chart{ + ID: "processes_page_file_bytes", + Title: "Bytes used in page file(s)", + Units: "bytes", + Fam: "processes", + Ctx: "windows.processes_file_bytes", + Type: module.Stacked, + Priority: prioProcessesPageFileBytes, + } + processesThreads = module.Chart{ + ID: "processes_threads", + Title: "Active threads", + Units: "threads", + Fam: "processes", + Ctx: "windows.processes_threads", + Type: module.Stacked, + Priority: prioProcessesThreads, + } + processesHandlesChart = module.Chart{ + ID: "processes_handles", + Title: "Number of handles open", + Units: "handles", + Fam: "processes", + Ctx: "windows.processes_handles", + Type: module.Stacked, + Priority: prioProcessesHandles, + } +) + +// .NET +var ( + netFrameworkCLRExceptionsChartsTmpl = module.Charts{ + netFrameworkCLRExceptionsThrown.Copy(), + netFrameworkCLRExceptionsFilters.Copy(), + netFrameworkCLRExceptionsFinallys.Copy(), + netFrameworkCLRExceptionsThrowToCatchDepth.Copy(), + } + + netFrameworkCLRInteropChartsTmpl = module.Charts{ + netFrameworkCLRInteropCOMCallableWrapper.Copy(), + netFrameworkCLRInteropMarshalling.Copy(), + netFrameworkCLRInteropStubsCreated.Copy(), + } + + netFrameworkCLRJITChartsTmpl = module.Charts{ + netFrameworkCLRJITMethods.Copy(), + netFrameworkCLRJITTime.Copy(), + netFrameworkCLRJITStandardFailures.Copy(), + netFrameworkCLRJITILBytes.Copy(), + } + + netFrameworkCLRLoadingChartsTmpl = module.Charts{ + netFrameworkCLRLoadingLoaderHeapSize.Copy(), + netFrameworkCLRLoadingAppDomainsLoaded.Copy(), + netFrameworkCLRLoadingAppDomainsUnloaded.Copy(), + netFrameworkCLRLoadingAssembliesLoaded.Copy(), + netFrameworkCLRLoadingClassesLoaded.Copy(), + netFrameworkCLRLoadingClassLoadFailure.Copy(), + } + + netFrameworkCLRLocksAndThreadsChartsTmpl = module.Charts{ + netFrameworkCLRLockAndThreadsQueueLength.Copy(), + netFrameworkCLRLockAndThreadsCurrentLogicalThreads.Copy(), + netFrameworkCLRLockAndThreadsCurrentPhysicalThreads.Copy(), + netFrameworkCLRLockAndThreadsRecognizedThreads.Copy(), + netFrameworkCLRLockAndThreadsContentions.Copy(), + } + + netFrameworkCLRMemoryChartsTmpl = module.Charts{ + netFrameworkCLRMemoryAllocatedBytes.Copy(), + netFrameworkCLRMemoryFinalizationSurvivors.Copy(), + netFrameworkCLRMemoryHeapSize.Copy(), + netFrameworkCLRMemoryPromoted.Copy(), + netFrameworkCLRMemoryNumberGCHandles.Copy(), + netFrameworkCLRMemoryCollections.Copy(), + netFrameworkCLRMemoryInducedGC.Copy(), + netFrameworkCLRMemoryNumberPinnedObjects.Copy(), + netFrameworkCLRMemoryNumberSinkBlocksInUse.Copy(), + netFrameworkCLRMemoryCommitted.Copy(), + netFrameworkCLRMemoryReserved.Copy(), + netFrameworkCLRMemoryGCTime.Copy(), + } + + netFrameworkCLRRemotingChartsTmpl = module.Charts{ + netFrameworkCLRRemotingChannels.Copy(), + netFrameworkCLRRemotingContextBoundClassesLoaded.Copy(), + netFrameworkCLRRemotingContextBoundObjects.Copy(), + netFrameworkCLRRemotingContextProxies.Copy(), + netFrameworkCLRRemotingContexts.Copy(), + netFrameworkCLRRemotingCalls.Copy(), + } + + netFrameworkCLRSecurityChartsTmpl = module.Charts{ + netFrameworkCLRSecurityLinkTimeChecks.Copy(), + netFrameworkCLRSecurityChecksTime.Copy(), + netFrameworkCLRSecurityStackWalkDepth.Copy(), + netFrameworkCLRSecurityRuntimeChecks.Copy(), + } + + // Exceptions + netFrameworkCLRExceptionsThrown = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrexception_thrown", + Title: "Thrown exceptions", + Units: "exceptions/s", + Fam: "exceptions", + Ctx: "netframework.clrexception_thrown", + Priority: prioNETFrameworkCLRExceptionsThrown, + Dims: module.Dims{ + {ID: "netframework_%s_clrexception_thrown_total", Name: "exceptions", Algo: module.Incremental}, + }, + } + netFrameworkCLRExceptionsFilters = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrexception_filters", + Title: "Executed exception filters", + Units: "filters/s", + Fam: "exceptions", + Ctx: "netframework.clrexception_filters", + Priority: prioNETFrameworkCLRExceptionsFilters, + Dims: module.Dims{ + {ID: "netframework_%s_clrexception_filters_total", Name: "filters", Algo: module.Incremental}, + }, + } + netFrameworkCLRExceptionsFinallys = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrexception_finallys", + Title: "Executed finally blocks", + Units: "finallys/s", + Fam: "exceptions", + Ctx: "netframework.clrexception_finallys", + Priority: prioNETFrameworkCLRExceptionsFinallys, + Dims: module.Dims{ + {ID: "netframework_%s_clrexception_finallys_total", Name: "finallys", Algo: module.Incremental}, + }, + } + netFrameworkCLRExceptionsThrowToCatchDepth = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrexception_throw_to_catch_depth", + Title: "Traversed stack frames", + Units: "stack_frames/s", + Fam: "exceptions", + Ctx: "netframework.clrexception_throw_to_catch_depth", + Priority: prioNETFrameworkCLRExceptionsThrowToCatchDepth, + Dims: module.Dims{ + {ID: "netframework_%s_clrexception_throw_to_catch_depth_total", Name: "traversed", Algo: module.Incremental}, + }, + } + + // Interop + netFrameworkCLRInteropCOMCallableWrapper = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrinterop_com_callable_wrappers", + Title: "COM callable wrappers (CCW)", + Units: "ccw/s", + Fam: "interop", + Ctx: "netframework.clrinterop_com_callable_wrappers", + Priority: prioNETFrameworkCLRInteropCOMCallableWrappers, + Dims: module.Dims{ + {ID: "netframework_%s_clrinterop_com_callable_wrappers_total", Name: "com_callable_wrappers", Algo: module.Incremental}, + }, + } + netFrameworkCLRInteropMarshalling = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrinterop_interop_marshalling", + Title: "Arguments and return values marshallings", + Units: "marshalling/s", + Fam: "interop", + Ctx: "netframework.clrinterop_interop_marshallings", + Priority: prioNETFrameworkCLRInteropMarshalling, + Dims: module.Dims{ + {ID: "netframework_%s_clrinterop_interop_marshalling_total", Name: "marshallings", Algo: module.Incremental}, + }, + } + netFrameworkCLRInteropStubsCreated = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrinterop_interop_stubs_created", + Title: "Created stubs", + Units: "stubs/s", + Fam: "interop", + Ctx: "netframework.clrinterop_interop_stubs_created", + Priority: prioNETFrameworkCLRInteropStubsCreated, + Dims: module.Dims{ + {ID: "netframework_%s_clrinterop_interop_stubs_created_total", Name: "created", Algo: module.Incremental}, + }, + } + + // JIT + netFrameworkCLRJITMethods = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrjit_methods", + Title: "JIT-compiled methods", + Units: "methods/s", + Fam: "jit", + Ctx: "netframework.clrjit_methods", + Priority: prioNETFrameworkCLRJITMethods, + Dims: module.Dims{ + {ID: "netframework_%s_clrjit_methods_total", Name: "jit-compiled", Algo: module.Incremental}, + }, + } + netFrameworkCLRJITTime = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrjit_time", + Title: "Time spent in JIT compilation", + Units: "percentage", + Fam: "jit", + Ctx: "netframework.clrjit_time", + Priority: prioNETFrameworkCLRJITTime, + Dims: module.Dims{ + {ID: "netframework_%s_clrjit_time_percent", Name: "time"}, + }, + } + netFrameworkCLRJITStandardFailures = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrjit_standard_failures", + Title: "JIT compiler failures", + Units: "failures/s", + Fam: "jit", + Ctx: "netframework.clrjit_standard_failures", + Priority: prioNETFrameworkCLRJITStandardFailures, + Dims: module.Dims{ + {ID: "netframework_%s_clrjit_standard_failures_total", Name: "failures", Algo: module.Incremental}, + }, + } + netFrameworkCLRJITILBytes = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrjit_il_bytes", + Title: "Compiled Microsoft intermediate language (MSIL) bytes", + Units: "bytes/s", + Fam: "jit", + Ctx: "netframework.clrjit_il_bytes", + Priority: prioNETFrameworkCLRJITILBytes, + Dims: module.Dims{ + {ID: "netframework_%s_clrjit_il_bytes_total", Name: "compiled_msil", Algo: module.Incremental}, + }, + } + + // Loading + netFrameworkCLRLoadingLoaderHeapSize = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_loader_heap_size", + Title: "Memory committed by class loader", + Units: "bytes", + Fam: "loading", + Ctx: "netframework.clrloading_loader_heap_size", + Priority: prioNETFrameworkCLRLoadingLoaderHeapSize, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_loader_heap_size_bytes", Name: "committed"}, + }, + } + netFrameworkCLRLoadingAppDomainsLoaded = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_appdomains_loaded", + Title: "Loaded application domains", + Units: "domain/s", + Fam: "loading", + Ctx: "netframework.clrloading_appdomains_loaded", + Priority: prioNETFrameworkCLRLoadingAppDomainsLoaded, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_appdomains_loaded_total", Name: "loaded", Algo: module.Incremental}, + }, + } + netFrameworkCLRLoadingAppDomainsUnloaded = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_appdomains_unloaded", + Title: "Unloaded application domains", + Units: "domain/s", + Fam: "loading", + Ctx: "netframework.clrloading_appdomains_unloaded", + Priority: prioNETFrameworkCLRLoadingAppDomainsUnloaded, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_appdomains_unloaded_total", Name: "unloaded", Algo: module.Incremental}, + }, + } + netFrameworkCLRLoadingAssembliesLoaded = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_assemblies_loaded", + Title: "Loaded assemblies", + Units: "assemblies/s", + Fam: "loading", + Ctx: "netframework.clrloading_assemblies_loaded", + Priority: prioNETFrameworkCLRLoadingAssembliesLoaded, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_assemblies_loaded_total", Name: "loaded", Algo: module.Incremental}, + }, + } + netFrameworkCLRLoadingClassesLoaded = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_classes_loaded", + Title: "Loaded classes in all assemblies", + Units: "classes/s", + Fam: "loading", + Ctx: "netframework.clrloading_classes_loaded", + Priority: prioNETFrameworkCLRLoadingClassesLoaded, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_classes_loaded_total", Name: "loaded", Algo: module.Incremental}, + }, + } + netFrameworkCLRLoadingClassLoadFailure = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrloading_class_load_failure", + Title: "Class load failures", + Units: "failures/s", + Fam: "loading", + Ctx: "netframework.clrloading_class_load_failures", + Priority: prioNETFrameworkCLRLoadingClassLoadFailure, + Dims: module.Dims{ + {ID: "netframework_%s_clrloading_class_load_failures_total", Name: "class_load", Algo: module.Incremental}, + }, + } + + // Lock and Threads + netFrameworkCLRLockAndThreadsQueueLength = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrlocksandthreads_queue_length", + Title: "Threads waited to acquire a managed lock", + Units: "threads/s", + Fam: "locks threads", + Ctx: "netframework.clrlocksandthreads_queue_length", + Priority: prioNETFrameworkCLRLocksAndThreadsQueueLength, + Dims: module.Dims{ + {ID: "netframework_%s_clrlocksandthreads_queue_length_total", Name: "threads", Algo: module.Incremental}, + }, + } + netFrameworkCLRLockAndThreadsCurrentLogicalThreads = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrlocksandthreads_current_logical_threads", + Title: "Logical threads", + Units: "threads", + Fam: "locks threads", + Ctx: "netframework.clrlocksandthreads_current_logical_threads", + Priority: prioNETFrameworkCLRLocksAndThreadsCurrentLogicalThreads, + Dims: module.Dims{ + {ID: "netframework_%s_clrlocksandthreads_current_logical_threads", Name: "logical"}, + }, + } + netFrameworkCLRLockAndThreadsCurrentPhysicalThreads = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrlocksandthreads_current_physical_threads", + Title: "Physical threads", + Units: "threads", + Fam: "locks threads", + Ctx: "netframework.clrlocksandthreads_current_physical_threads", + Priority: prioNETFrameworkCLRLocksAndThreadsCurrentPhysicalThreads, + Dims: module.Dims{ + {ID: "netframework_%s_clrlocksandthreads_physical_threads_current", Name: "physical"}, + }, + } + netFrameworkCLRLockAndThreadsRecognizedThreads = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrlocksandthreads_recognized_threads", + Title: "Threads recognized by the runtime", + Units: "threads/s", + Fam: "locks threads", + Ctx: "netframework.clrlocksandthreads_recognized_threads", + Priority: prioNETFrameworkCLRLocksAndThreadsRecognizedThreads, + Dims: module.Dims{ + {ID: "netframework_%s_clrlocksandthreads_recognized_threads_total", Name: "threads", Algo: module.Incremental}, + }, + } + netFrameworkCLRLockAndThreadsContentions = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrlocksandthreads_contentions", + Title: "Fails to acquire a managed lock", + Units: "contentions/s", + Fam: "locks threads", + Ctx: "netframework.clrlocksandthreads_contentions", + Priority: prioNETFrameworkCLRLocksAndThreadsContentions, + Dims: module.Dims{ + {ID: "netframework_%s_clrlocksandthreads_contentions_total", Name: "contentions", Algo: module.Incremental}, + }, + } + + // Memory + netFrameworkCLRMemoryAllocatedBytes = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_allocated_bytes", + Title: "Memory allocated on the garbage collection heap", + Units: "bytes/s", + Fam: "memory", + Ctx: "netframework.clrmemory_allocated_bytes", + Priority: prioNETFrameworkCLRMemoryAllocatedBytes, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_allocated_bytes_total", Name: "allocated", Algo: module.Incremental}, + }, + } + netFrameworkCLRMemoryFinalizationSurvivors = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_finalization_survivors", + Title: "Objects that survived garbage-collection", + Units: "objects", + Fam: "memory", + Ctx: "netframework.clrmemory_finalization_survivors", + Priority: prioNETFrameworkCLRMemoryFinalizationSurvivors, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_finalization_survivors", Name: "survived"}, + }, + } + netFrameworkCLRMemoryHeapSize = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_heap_size", + Title: "Maximum bytes that can be allocated", + Units: "bytes", + Fam: "memory", + Ctx: "netframework.clrmemory_heap_size", + Priority: prioNETFrameworkCLRMemoryHeapSize, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_heap_size_bytes", Name: "heap"}, + }, + } + netFrameworkCLRMemoryPromoted = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_promoted", + Title: "Memory promoted to the next generation", + Units: "bytes", + Fam: "memory", + Ctx: "netframework.clrmemory_promoted", + Priority: prioNETFrameworkCLRMemoryPromoted, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_promoted_bytes", Name: "promoted"}, + }, + } + netFrameworkCLRMemoryNumberGCHandles = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_number_gc_handles", + Title: "Garbage collection handles", + Units: "handles", + Fam: "memory", + Ctx: "netframework.clrmemory_number_gc_handles", + Priority: prioNETFrameworkCLRMemoryNumberGCHandles, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_number_gc_handles", Name: "used"}, + }, + } + netFrameworkCLRMemoryCollections = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_collections", + Title: "Garbage collections", + Units: "gc/s", + Fam: "memory", + Ctx: "netframework.clrmemory_collections", + Priority: prioNETFrameworkCLRMemoryCollections, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_collections_total", Name: "gc", Algo: module.Incremental}, + }, + } + netFrameworkCLRMemoryInducedGC = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_induced_gc", + Title: "Garbage collections induced", + Units: "gc/s", + Fam: "memory", + Ctx: "netframework.clrmemory_induced_gc", + Priority: prioNETFrameworkCLRMemoryInducedGC, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_induced_gc_total", Name: "gc", Algo: module.Incremental}, + }, + } + netFrameworkCLRMemoryNumberPinnedObjects = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_number_pinned_objects", + Title: "Pinned objects encountered", + Units: "objects", + Fam: "memory", + Ctx: "netframework.clrmemory_number_pinned_objects", + Priority: prioNETFrameworkCLRMemoryNumberPinnedObjects, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_number_pinned_objects", Name: "pinned"}, + }, + } + netFrameworkCLRMemoryNumberSinkBlocksInUse = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_number_sink_blocks_in_use", + Title: "Synchronization blocks in use", + Units: "blocks", + Fam: "memory", + Ctx: "netframework.clrmemory_number_sink_blocks_in_use", + Priority: prioNETFrameworkCLRMemoryNumberSinkBlocksInUse, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_number_sink_blocksinuse", Name: "used"}, + }, + } + netFrameworkCLRMemoryCommitted = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_committed", + Title: "Virtual memory committed by GC", + Units: "bytes", + Fam: "memory", + Ctx: "netframework.clrmemory_committed", + Priority: prioNETFrameworkCLRMemoryCommitted, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_committed_bytes", Name: "committed"}, + }, + } + netFrameworkCLRMemoryReserved = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_reserved", + Title: "Virtual memory reserved by GC", + Units: "bytes", + Fam: "memory", + Ctx: "netframework.clrmemory_reserved", + Priority: prioNETFrameworkCLRMemoryReserved, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_reserved_bytes", Name: "reserved"}, + }, + } + netFrameworkCLRMemoryGCTime = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrmemory_gc_time", + Title: "Time spent on GC", + Units: "percentage", + Fam: "memory", + Ctx: "netframework.clrmemory_gc_time", + Priority: prioNETFrameworkCLRMemoryGCTime, + Dims: module.Dims{ + {ID: "netframework_%s_clrmemory_gc_time_percent", Name: "time"}, + }, + } + + // Remoting + netFrameworkCLRRemotingChannels = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_channels", + Title: "Registered channels", + Units: "channels/s", + Fam: "remoting", + Ctx: "netframework.clrremoting_channels", + Priority: prioNETFrameworkCLRRemotingChannels, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_channels_total", Name: "registered", Algo: module.Incremental}, + }, + } + netFrameworkCLRRemotingContextBoundClassesLoaded = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_context_bound_classes_loaded", + Title: "Loaded context-bound classes", + Units: "classes", + Fam: "remoting", + Ctx: "netframework.clrremoting_context_bound_classes_loaded", + Priority: prioNETFrameworkCLRRemotingContextBoundClassesLoaded, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_context_bound_classes_loaded", Name: "loaded"}, + }, + } + netFrameworkCLRRemotingContextBoundObjects = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_context_bound_objects", + Title: "Allocated context-bound objects", + Units: "objects/s", + Fam: "remoting", + Ctx: "netframework.clrremoting_context_bound_objects", + Priority: prioNETFrameworkCLRRemotingContextBoundObjects, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_context_bound_objects_total", Name: "allocated", Algo: module.Incremental}, + }, + } + netFrameworkCLRRemotingContextProxies = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_context_proxies", + Title: "Remoting proxy objects", + Units: "objects/s", + Fam: "remoting", + Ctx: "netframework.clrremoting_context_proxies", + Priority: prioNETFrameworkCLRRemotingContextProxies, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_context_proxies_total", Name: "objects", Algo: module.Incremental}, + }, + } + netFrameworkCLRRemotingContexts = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_contexts", + Title: "Total of remoting contexts", + Units: "contexts", + Fam: "remoting", + Ctx: "netframework.clrremoting_contexts", + Priority: prioNETFrameworkCLRRemotingContexts, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_contexts", Name: "contexts"}, + }, + } + netFrameworkCLRRemotingCalls = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrremoting_calls", + Title: "Remote Procedure Calls (RPC) invoked", + Units: "calls/s", + Fam: "remoting", + Ctx: "netframework.clrremoting_remote_calls", + Priority: prioNETFrameworkCLRRemotingRemoteCalls, + Dims: module.Dims{ + {ID: "netframework_%s_clrremoting_remote_calls_total", Name: "rpc", Algo: module.Incremental}, + }, + } + + // Security + netFrameworkCLRSecurityLinkTimeChecks = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrsecurity_link_time_checks", + Title: "Link-time code access security checks", + Units: "checks/s", + Fam: "security", + Ctx: "netframework.clrsecurity_link_time_checks", + Priority: prioNETFrameworkCLRSecurityLinkTimeChecks, + Dims: module.Dims{ + {ID: "netframework_%s_clrsecurity_link_time_checks_total", Name: "linktime", Algo: module.Incremental}, + }, + } + netFrameworkCLRSecurityChecksTime = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrsecurity_checks_time", + Title: "Time spent performing runtime code access security checks", + Units: "percentage", + Fam: "security", + Ctx: "netframework.clrsecurity_checks_time", + Priority: prioNETFrameworkCLRSecurityRTChecksTime, + Dims: module.Dims{ + {ID: "netframework_%s_clrsecurity_checks_time_percent", Name: "time"}, + }, + } + netFrameworkCLRSecurityStackWalkDepth = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrsecurity_stack_walk_depth", + Title: "Depth of the stack", + Units: "depth", + Fam: "security", + Ctx: "netframework.clrsecurity_stack_walk_depth", + Priority: prioNETFrameworkCLRSecurityStackWalkDepth, + Dims: module.Dims{ + {ID: "netframework_%s_clrsecurity_stack_walk_depth", Name: "stack"}, + }, + } + netFrameworkCLRSecurityRuntimeChecks = module.Chart{ + OverModule: "netframework", + ID: "netframework_%s_clrsecurity_runtime_checks", + Title: "Runtime code access security checks performed", + Units: "checks/s", + Fam: "security", + Ctx: "netframework.clrsecurity_runtime_checks", + Priority: prioNETFrameworkCLRSecurityRuntimeChecks, + Dims: module.Dims{ + {ID: "netframework_%s_clrsecurity_runtime_checks_total", Name: "runtime", Algo: module.Incremental}, + }, + } +) + +// Service +var ( + serviceChartsTmpl = module.Charts{ + serviceStateChartTmpl.Copy(), + serviceStatusChartTmpl.Copy(), + } + serviceStateChartTmpl = module.Chart{ + ID: "service_%s_state", + Title: "Service state", + Units: "state", + Fam: "services", + Ctx: "windows.service_state", + Priority: prioServiceState, + Dims: module.Dims{ + {ID: "service_%s_state_running", Name: "running"}, + {ID: "service_%s_state_stopped", Name: "stopped"}, + {ID: "service_%s_state_start_pending", Name: "start_pending"}, + {ID: "service_%s_state_stop_pending", Name: "stop_pending"}, + {ID: "service_%s_state_continue_pending", Name: "continue_pending"}, + {ID: "service_%s_state_pause_pending", Name: "pause_pending"}, + {ID: "service_%s_state_paused", Name: "paused"}, + {ID: "service_%s_state_unknown", Name: "unknown"}, + }, + } + serviceStatusChartTmpl = module.Chart{ + ID: "service_%s_status", + Title: "Service status", + Units: "status", + Fam: "services", + Ctx: "windows.service_status", + Priority: prioServiceStatus, + Dims: module.Dims{ + {ID: "service_%s_status_ok", Name: "ok"}, + {ID: "service_%s_status_error", Name: "error"}, + {ID: "service_%s_status_unknown", Name: "unknown"}, + {ID: "service_%s_status_degraded", Name: "degraded"}, + {ID: "service_%s_status_pred_fail", Name: "pred_fail"}, + {ID: "service_%s_status_starting", Name: "starting"}, + {ID: "service_%s_status_stopping", Name: "stopping"}, + {ID: "service_%s_status_service", Name: "service"}, + {ID: "service_%s_status_stressed", Name: "stressed"}, + {ID: "service_%s_status_nonrecover", Name: "nonrecover"}, + {ID: "service_%s_status_no_contact", Name: "no_contact"}, + {ID: "service_%s_status_lost_comm", Name: "lost_comm"}, + }, + } +) + +// HyperV +var ( + hypervChartsTmpl = module.Charts{ + hypervVirtualMachinesHealthChart.Copy(), + hypervRootPartitionDeviceSpacePagesChart.Copy(), + hypervRootPartitionGPASpacePagesChart.Copy(), + hypervRootPartitionGPASpaceModificationsChart.Copy(), + hypervRootPartitionAttachedDevicesChart.Copy(), + hypervRootPartitionDepositedPagesChart.Copy(), + hypervRootPartitionSkippedInterrupts.Copy(), + hypervRootPartitionDeviceDMAErrorsChart.Copy(), + hypervRootPartitionDeviceInterruptErrorsChart.Copy(), + hypervRootPartitionDeviceInterruptThrottledEventsChart.Copy(), + hypervRootPartitionIOTlbFlushChart.Copy(), + hypervRootPartitionAddressSpaceChart.Copy(), + hypervRootPartitionVirtualTlbFlushEntries.Copy(), + hypervRootPartitionVirtualTlbPages.Copy(), + } + hypervVirtualMachinesHealthChart = module.Chart{ + OverModule: "hyperv", + ID: "health_vm", + Title: "Virtual machines health status", + Units: "vms", + Fam: "vms health", + Ctx: "hyperv.vms_health", + Priority: prioHypervVMHealth, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "hyperv_health_ok", Name: "ok"}, + {ID: "hyperv_health_critical", Name: "critical"}, + }, + } + hypervRootPartitionDeviceSpacePagesChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_device_space_pages", + Title: "Root partition pages in the device space", + Units: "pages", + Fam: "root partition", + Ctx: "hyperv.root_partition_device_space_pages", + Priority: prioHypervRootPartitionDeviceSpacePages, + Dims: module.Dims{ + {ID: "hyperv_root_partition_4K_device_pages", Name: "4K"}, + {ID: "hyperv_root_partition_2M_device_pages", Name: "2M"}, + {ID: "hyperv_root_partition_1G_device_pages", Name: "1G"}, + }, + } + hypervRootPartitionGPASpacePagesChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_gpa_space_pages", + Title: "Root partition pages in the GPA space", + Units: "pages", + Fam: "root partition", + Ctx: "windows.hyperv_root_partition_gpa_space_pages", + Priority: prioHypervRootPartitionGPASpacePages, + Dims: module.Dims{ + {ID: "hyperv_root_partition_4K_gpa_pages", Name: "4K"}, + {ID: "hyperv_root_partition_2M_gpa_pages", Name: "2M"}, + {ID: "hyperv_root_partition_1G_gpa_pages", Name: "1G"}, + }, + } + hypervRootPartitionGPASpaceModificationsChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_gpa_space_modifications", + Title: "Root partition GPA space modifications", + Units: "modifications/s", + Fam: "root partition", + Ctx: "hyperv.root_partition_gpa_space_modifications", + Priority: prioHypervRootPartitionGPASpaceModifications, + Dims: module.Dims{ + {ID: "hyperv_root_partition_gpa_space_modifications", Name: "gpa", Algo: module.Incremental}, + }, + } + hypervRootPartitionAttachedDevicesChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_attached_devices", + Title: "Root partition attached devices", + Units: "devices", + Fam: "root partition", + Ctx: "hyperv.root_partition_attached_devices", + Priority: prioHypervRootPartitionAttachedDevices, + Dims: module.Dims{ + {ID: "hyperv_root_partition_attached_devices", Name: "attached"}, + }, + } + hypervRootPartitionDepositedPagesChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_deposited_pages", + Title: "Root partition deposited pages", + Units: "pages", + Fam: "root partition", + Ctx: "hyperv.root_partition_deposited_pages", + Priority: prioHypervRootPartitionDepositedPages, + Dims: module.Dims{ + {ID: "hyperv_root_partition_deposited_pages", Name: "deposited"}, + }, + } + hypervRootPartitionSkippedInterrupts = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_skipped_interrupts", + Title: "Root partition skipped interrupts", + Units: "interrupts", + Fam: "root partition", + Ctx: "hyperv.root_partition_skipped_interrupts", + Priority: prioHypervRootPartitionSkippedInterrupts, + Dims: module.Dims{ + {ID: "hyperv_root_partition_physical_pages_allocated", Name: "skipped"}, + }, + } + hypervRootPartitionDeviceDMAErrorsChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_device_dma_errors", + Title: "Root partition illegal DMA requests", + Units: "requests", + Fam: "root partition", + Ctx: "hyperv.root_partition_device_dma_errors", + Priority: prioHypervRootPartitionDeviceDMAErrors, + Dims: module.Dims{ + {ID: "hyperv_root_partition_deposited_pages", Name: "illegal_dma"}, + }, + } + hypervRootPartitionDeviceInterruptErrorsChart = module.Chart{ + OverModule: "hyperv", + ID: "partition_device_interrupt_errors", + Title: "Root partition illegal interrupt requests", + Units: "requests", + Fam: "root partition", + Ctx: "hyperv.root_partition_device_interrupt_errors", + Priority: prioHypervRootPartitionDeviceInterruptErrors, + Dims: module.Dims{ + {ID: "hyperv_root_partition_device_interrupt_errors", Name: "illegal_interrupt"}, + }, + } + hypervRootPartitionDeviceInterruptThrottledEventsChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_device_interrupt_throttle_events", + Title: "Root partition throttled interrupts", + Units: "events", + Fam: "root partition", + Ctx: "hyperv.root_partition_device_interrupt_throttle_events", + Priority: prioHypervRootPartitionDeviceInterruptThrottleEvents, + Dims: module.Dims{ + {ID: "hyperv_root_partition_device_interrupt_throttle_events", Name: "throttling"}, + }, + } + hypervRootPartitionIOTlbFlushChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_io_tbl_flush", + Title: "Root partition flushes of I/O TLBs", + Units: "flushes/s", + Fam: "root partition", + Ctx: "hyperv.root_partition_io_tlb_flush", + Priority: prioHypervRootPartitionIOTlbFlush, + Dims: module.Dims{ + {ID: "hyperv_root_partition_io_tlb_flush", Name: "flushes", Algo: module.Incremental}, + }, + } + hypervRootPartitionAddressSpaceChart = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_address_space", + Title: "Root partition address spaces in the virtual TLB", + Units: "address spaces", + Fam: "root partition", + Ctx: "hyperv.root_partition_address_space", + Priority: prioHypervRootPartitionAddressSpace, + Dims: module.Dims{ + {ID: "hyperv_root_partition_address_spaces", Name: "address_spaces"}, + }, + } + hypervRootPartitionVirtualTlbFlushEntries = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_virtual_tbl_flush_entries", + Title: "Root partition flushes of the entire virtual TLB", + Units: "flushes/s", + Fam: "root partition", + Ctx: "hyperv.root_partition_virtual_tlb_flush_entries", + Priority: prioHypervRootPartitionVirtualTlbFlushEntires, + Dims: module.Dims{ + {ID: "hyperv_root_partition_virtual_tlb_flush_entires", Name: "flushes", Algo: module.Incremental}, + }, + } + hypervRootPartitionVirtualTlbPages = module.Chart{ + OverModule: "hyperv", + ID: "root_partition_virtual_tlb_pages", + Title: "Root partition pages used by the virtual TLB", + Units: "pages", + Fam: "root partition", + Ctx: "hyperv.root_partition_virtual_tlb_pages", + Priority: prioHypervRootPartitionVirtualTlbPages, + Dims: module.Dims{ + {ID: "hyperv_root_partition_virtual_tlb_pages", Name: "used"}, + }, + } +) + +// HyperV VM Memory +var ( + hypervVMChartsTemplate = module.Charts{ + hypervHypervVMCPUUsageChartTmpl.Copy(), + hypervHypervVMMemoryPhysicalChartTmpl.Copy(), + hypervHypervVMMemoryPhysicalGuestVisibleChartTmpl.Copy(), + hypervHypervVMMemoryPressureCurrentChartTmpl.Copy(), + hypervVIDPhysicalPagesAllocatedChartTmpl.Copy(), + hypervVIDRemotePhysicalPagesChartTmpl.Copy(), + } + hypervHypervVMCPUUsageChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_cpu_usage", + Title: "VM CPU usage (100% = 1 core)", + Units: "percentage", + Fam: "vm cpu", + Ctx: "hyperv.vm_cpu_usage", + Priority: prioHypervVMCPUUsage, + Type: module.Stacked, + Dims: module.Dims{ + {ID: "hyperv_vm_%s_cpu_guest_run_time", Name: "guest", Div: 1e5, Algo: module.Incremental}, + {ID: "hyperv_vm_%s_cpu_hypervisor_run_time", Name: "hypervisor", Div: 1e5, Algo: module.Incremental}, + {ID: "hyperv_vm_%s_cpu_remote_run_time", Name: "remote", Div: 1e5, Algo: module.Incremental}, + }, + } + hypervHypervVMMemoryPhysicalChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_memory_physical", + Title: "VM assigned memory", + Units: "MiB", + Fam: "vm mem", + Ctx: "hyperv.vm_memory_physical", + Priority: prioHypervVMMemoryPhysical, + Dims: module.Dims{ + {ID: "hyperv_vm_%s_memory_physical", Name: "assigned_memory"}, + }, + } + hypervHypervVMMemoryPhysicalGuestVisibleChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_memory_physical_guest_visible", + Title: "VM guest visible memory", + Units: "MiB", + Fam: "vm mem", + Ctx: "hyperv.vm_memory_physical_guest_visible", + Priority: prioHypervVMMemoryPhysicalGuestVisible, + Dims: module.Dims{ + {ID: "hyperv_vm_%s_memory_physical_guest_visible", Name: "visible_memory"}, + }, + } + hypervHypervVMMemoryPressureCurrentChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_memory_pressure_current", + Title: "VM current pressure", + Units: "percentage", + Fam: "vm mem", + Ctx: "hyperv.vm_memory_pressure_current", + Priority: prioHypervVMMemoryPressureCurrent, + Dims: module.Dims{ + {ID: "hyperv_vm_%s_memory_pressure_current", Name: "pressure"}, + }, + } + hypervVIDPhysicalPagesAllocatedChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_vid_physical_pages_allocated", + Title: "VM physical pages allocated", + Units: "pages", + Fam: "vm mem", + Ctx: "hyperv.vm_vid_physical_pages_allocated", + Priority: prioHypervVIDPhysicalPagesAllocated, + Dims: module.Dims{ + {ID: "hyperv_vid_%s_physical_pages_allocated", Name: "allocated"}, + }, + } + hypervVIDRemotePhysicalPagesChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_%s_remote_physical_pages", + Title: "VM physical pages not allocated from the preferred NUMA node", + Units: "pages", + Fam: "vm mem", + Ctx: "hyperv.vm_vid_remote_physical_pages", + Priority: prioHypervVIDRemotePhysicalPages, + Dims: module.Dims{ + {ID: "hyperv_vid_%s_remote_physical_pages", Name: "remote_physical"}, + }, + } +) + +// HyperV VM storage device +var ( + hypervVMDeviceChartsTemplate = module.Charts{ + hypervVMDeviceIOChartTmpl.Copy(), + hypervVMDeviceIOPSChartTmpl.Copy(), + hypervVMDeviceErrorCountChartTmpl.Copy(), + } + hypervVMDeviceIOChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_device_%s_bytes_read", + Title: "VM storage device IO", + Units: "bytes/s", + Fam: "vm disk", + Ctx: "hyperv.vm_device_bytes", + Priority: prioHypervVMDeviceBytes, + Type: module.Area, + Dims: module.Dims{ + {ID: "hyperv_vm_device_%s_bytes_read", Name: "read", Algo: module.Incremental}, + {ID: "hyperv_vm_device_%s_bytes_written", Name: "written", Algo: module.Incremental}, + }, + } + hypervVMDeviceIOPSChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_device_%s_operation_read", + Title: "VM storage device IOPS", + Units: "operations/s", + Fam: "vm disk", + Ctx: "hyperv.vm_device_operations", + Priority: prioHypervVMDeviceOperations, + Dims: module.Dims{ + {ID: "hyperv_vm_device_%s_operations_read", Name: "read", Algo: module.Incremental}, + {ID: "hyperv_vm_device_%s_operations_written", Name: "write", Algo: module.Incremental}, + }, + } + hypervVMDeviceErrorCountChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_device_%s_error_count", + Title: "VM storage device errors", + Units: "errors/s", + Fam: "vm disk", + Ctx: "hyperv.vm_device_errors", + Priority: prioHypervVMDeviceErrors, + Dims: module.Dims{ + {ID: "hyperv_vm_device_%s_error_count", Name: "errors", Algo: module.Incremental}, + }, + } +) + +// HyperV VM network interface +var ( + hypervVMInterfaceChartsTemplate = module.Charts{ + hypervVMInterfaceTrafficChartTmpl.Copy(), + hypervVMInterfacePacketsChartTmpl.Copy(), + hypervVMInterfacePacketsDroppedChartTmpl.Copy(), + } + + hypervVMInterfaceTrafficChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_interface_%s_bytes", + Title: "VM interface traffic", + Units: "bytes/s", + Fam: "vm net", + Ctx: "hyperv.vm_interface_bytes", + Priority: prioHypervVMInterfaceBytes, + Type: module.Area, + Dims: module.Dims{ + {ID: "hyperv_vm_interface_%s_bytes_received", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vm_interface_%s_bytes_sent", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVMInterfacePacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_interface_%s_packets", + Title: "VM interface packets", + Units: "packets/s", + Fam: "vm net", + Ctx: "hyperv.vm_interface_packets", + Priority: prioHypervVMInterfacePackets, + Dims: module.Dims{ + {ID: "hyperv_vm_interface_%s_packets_received", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vm_interface_%s_packets_sent", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVMInterfacePacketsDroppedChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vm_interface_%s_packets_dropped", + Title: "VM interface packets dropped", + Units: "drops/s", + Fam: "vm net", + Ctx: "hyperv.vm_interface_packets_dropped", + Priority: prioHypervVMInterfacePacketsDropped, + Dims: module.Dims{ + {ID: "hyperv_vm_interface_%s_packets_incoming_dropped", Name: "incoming", Algo: module.Incremental}, + {ID: "hyperv_vm_interface_%s_packets_outgoing_dropped", Name: "outgoing", Algo: module.Incremental}, + }, + } +) + +// HyperV Virtual Switch +var ( + hypervVswitchChartsTemplate = module.Charts{ + hypervVswitchTrafficChartTmpl.Copy(), + hypervVswitchPacketsChartTmpl.Copy(), + hypervVswitchDirectedPacketsChartTmpl.Copy(), + hypervVswitchBroadcastPacketsChartTmpl.Copy(), + hypervVswitchMulticastPacketsChartTmpl.Copy(), + hypervVswitchDroppedPacketsChartTmpl.Copy(), + hypervVswitchExtensionDroppedPacketsChartTmpl.Copy(), + hypervVswitchPacketsFloodedTotalChartTmpl.Copy(), + hypervVswitchLearnedMACAddressChartTmpl.Copy(), + hypervVswitchPurgedMACAddressChartTmpl.Copy(), + } + + hypervVswitchTrafficChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_traffic", + Title: "Virtual switch traffic", + Units: "bytes/s", + Fam: "vswitch traffic", + Ctx: "hyperv.vswitch_bytes", + Priority: prioHypervVswitchTrafficTotal, + Type: module.Area, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_bytes_received_total", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_bytes_sent_total", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVswitchPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_packets", + Title: "Virtual switch packets", + Units: "packets/s", + Fam: "vswitch packets", + Ctx: "hyperv.vswitch_packets", + Priority: prioHypervVswitchPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_packets_received_total", Name: "received", Algo: module.Incremental}, + // FIXME: https://github.com/prometheus-community/windows_exporter/pull/1201 + //{ID: "hyperv_vswitch_%s_packets_sent_total", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVswitchDirectedPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_directed_packets", + Title: "Virtual switch directed packets", + Units: "packets/s", + Fam: "vswitch packets", + Ctx: "hyperv.vswitch_directed_packets", + Priority: prioHypervVswitchDirectedPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_directed_packets_received_total", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_directed_packets_send_total", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVswitchBroadcastPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_broadcast_packets", + Title: "Virtual switch broadcast packets", + Units: "packets/s", + Fam: "vswitch packets", + Ctx: "hyperv.vswitch_broadcast_packets", + Priority: prioHypervVswitchBroadcastPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_broadcast_packets_received_total", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_broadcast_packets_sent_total", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVswitchMulticastPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_multicast_packets", + Title: "Virtual switch multicast packets", + Units: "packets/s", + Fam: "vswitch packets", + Ctx: "hyperv.vswitch_multicast_packets", + Priority: prioHypervVswitchMulticastPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_multicast_packets_received_total", Name: "received", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_multicast_packets_sent_total", Name: "sent", Algo: module.Incremental}, + }, + } + hypervVswitchDroppedPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_dropped_packets", + Title: "Virtual switch dropped packets", + Units: "drops/s", + Fam: "vswitch drops", + Ctx: "hyperv.vswitch_dropped_packets", + Priority: prioHypervVswitchDroppedPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_dropped_packets_incoming_total", Name: "incoming", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_dropped_packets_outcoming_total", Name: "outgoing", Algo: module.Incremental}, + }, + } + hypervVswitchExtensionDroppedPacketsChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_extensions_dropped_packets_incoming", + Title: "Virtual switch extensions dropped packets", + Units: "drops/s", + Fam: "vswitch drops", + Ctx: "hyperv.vswitch_extensions_dropped_packets", + Priority: prioHypervVswitchExtensionsDroppedPackets, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_extensions_dropped_packets_incoming_total", Name: "incoming", Algo: module.Incremental}, + {ID: "hyperv_vswitch_%s_extensions_dropped_packets_outcoming_total", Name: "outgoing", Algo: module.Incremental}, + }, + } + hypervVswitchPacketsFloodedTotalChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_packets_flooded", + Title: "Virtual switch flooded packets", + Units: "packets/s", + Fam: "vswitch flood", + Ctx: "hyperv.vswitch_packets_flooded", + Priority: prioHypervVswitchPacketsFlooded, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_packets_flooded_total", Name: "flooded", Algo: module.Incremental}, + }, + } + hypervVswitchLearnedMACAddressChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_learned_mac_addresses", + Title: "Virtual switch learned MAC addresses", + Units: "mac addresses/s", + Fam: "vswitch mac addresses", + Ctx: "hyperv.vswitch_learned_mac_addresses", + Priority: prioHypervVswitchLearnedMACAddresses, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_learned_mac_addresses_total", Name: "learned", Algo: module.Incremental}, + }, + } + hypervVswitchPurgedMACAddressChartTmpl = module.Chart{ + OverModule: "hyperv", + ID: "vswitch_%s_purged_mac_addresses", + Title: "Virtual switch purged MAC addresses", + Units: "mac addresses/s", + Fam: "vswitch mac addresses", + Ctx: "hyperv.vswitch_purged_mac_addresses", + Priority: prioHypervVswitchPurgeMACAddress, + Dims: module.Dims{ + {ID: "hyperv_vswitch_%s_purged_mac_addresses_total", Name: "purged", Algo: module.Incremental}, + }, + } +) + +// Collectors +var ( + collectorChartsTmpl = module.Charts{ + collectorDurationChartTmpl.Copy(), + collectorStatusChartTmpl.Copy(), + } + collectorDurationChartTmpl = module.Chart{ + ID: "collector_%s_duration", + Title: "Duration of a data collection", + Units: "seconds", + Fam: "collection", + Ctx: "windows.collector_duration", + Priority: prioCollectorDuration, + Dims: module.Dims{ + {ID: "collector_%s_duration", Name: "duration", Div: precision}, + }, + } + collectorStatusChartTmpl = module.Chart{ + ID: "collector_%s_status", + Title: "Status of a data collection", + Units: "status", + Fam: "collection", + Ctx: "windows.collector_status", + Priority: prioCollectorStatus, + Dims: module.Dims{ + {ID: "collector_%s_status_success", Name: "success"}, + {ID: "collector_%s_status_fail", Name: "fail"}, + }, + } +) + +func (w *Windows) addCPUCharts() { + charts := cpuCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addCPUCoreCharts(core string) { + charts := cpuCoreChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, core) + chart.Labels = []module.Label{ + {Key: "core", Value: core}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, core) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeCPUCoreCharts(core string) { + px := fmt.Sprintf("cpu_core_%s", core) + w.removeCharts(px) +} + +func (w *Windows) addMemoryCharts() { + charts := memCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addDiskCharts(disk string) { + charts := diskChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, disk) + chart.Labels = []module.Label{ + {Key: "disk", Value: disk}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, disk) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeDiskCharts(disk string) { + px := fmt.Sprintf("logical_disk_%s", disk) + w.removeCharts(px) +} + +func (w *Windows) addNICCharts(nic string) { + charts := nicChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, nic) + chart.Labels = []module.Label{ + {Key: "nic", Value: nic}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, nic) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeNICCharts(nic string) { + px := fmt.Sprintf("nic_%s", nic) + w.removeCharts(px) +} + +func (w *Windows) addTCPCharts() { + charts := tcpCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addOSCharts() { + charts := osCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addSystemCharts() { + charts := systemCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addLogonCharts() { + charts := logonCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addADFSCharts() { + charts := adfsCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addExchangeCharts() { + charts := exchangeCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addExchangeWorkloadCharts(name string) { + charts := exchangeWorkloadChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "workload", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeExchangeWorkloadCharts(name string) { + px := fmt.Sprintf("exchange_workload_%s", name) + w.removeCharts(px) +} + +func (w *Windows) addExchangeLDAPCharts(name string) { + charts := exchangeLDAPChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "ldap_process", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeExchangeLDAPCharts(name string) { + px := fmt.Sprintf("exchange_ldap_%s", name) + w.removeCharts(px) +} + +func (w *Windows) addExchangeHTTPProxyCharts(name string) { + charts := exchangeHTTPProxyChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "http_proxy", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeExchangeHTTPProxyCharts(name string) { + px := fmt.Sprintf("exchange_http_proxy_%s", name) + w.removeCharts(px) +} + +func (w *Windows) addThermalZoneCharts(zone string) { + charts := thermalzoneChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, zone) + chart.Labels = []module.Label{ + {Key: "thermalzone", Value: zone}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, zone) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeThermalZoneCharts(zone string) { + px := fmt.Sprintf("thermalzone_%s", zone) + w.removeCharts(px) +} + +func (w *Windows) addIISWebsiteCharts(website string) { + charts := iisWebsiteChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, website) + chart.Labels = []module.Label{ + {Key: "website", Value: website}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, website) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeIIWebsiteSCharts(website string) { + px := fmt.Sprintf("iis_website_%s", website) + w.removeCharts(px) +} + +func (w *Windows) addMSSQLDBCharts(instance string, dbname string) { + charts := mssqlDatabaseChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, dbname, instance) + chart.Labels = []module.Label{ + {Key: "mssql_instance", Value: instance}, + {Key: "database", Value: dbname}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, dbname, instance) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeMSSQLDBCharts(instance string, dbname string) { + px := fmt.Sprintf("mssql_db_%s_instance_%s", dbname, instance) + w.removeCharts(px) +} + +func (w *Windows) addMSSQLInstanceCharts(instance string) { + charts := mssqlInstanceChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, instance) + chart.Labels = []module.Label{ + {Key: "mssql_instance", Value: instance}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, instance) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeMSSQLInstanceCharts(instance string) { + px := fmt.Sprintf("mssql_instance_%s", instance) + w.removeCharts(px) +} + +func (w *Windows) addProcessesCharts() { + charts := processesCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addADCharts() { + charts := adCharts.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addCertificateTemplateCharts(template string) { + charts := adcsCertTemplateChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, template) + chart.Labels = []module.Label{ + {Key: "cert_template", Value: template}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, template) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeCertificateTemplateCharts(template string) { + px := fmt.Sprintf("adcs_cert_template_%s", template) + w.removeCharts(px) +} + +func (w *Windows) addProcessToCharts(procID string) { + for _, chart := range *w.Charts() { + var dim *module.Dim + switch chart.ID { + case processesCPUUtilizationTotalChart.ID: + id := fmt.Sprintf("process_%s_cpu_time", procID) + dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental, Div: 1000, Mul: 100} + if procID == "Idle" { + dim.Hidden = true + } + case processesMemoryUsageChart.ID: + id := fmt.Sprintf("process_%s_working_set_private_bytes", procID) + dim = &module.Dim{ID: id, Name: procID} + case processesIOBytesChart.ID: + id := fmt.Sprintf("process_%s_io_bytes", procID) + dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental} + case processesIOOperationsChart.ID: + id := fmt.Sprintf("process_%s_io_operations", procID) + dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental} + case processesPageFaultsChart.ID: + id := fmt.Sprintf("process_%s_page_faults", procID) + dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental} + case processesPageFileBytes.ID: + id := fmt.Sprintf("process_%s_page_file_bytes", procID) + dim = &module.Dim{ID: id, Name: procID} + case processesThreads.ID: + id := fmt.Sprintf("process_%s_threads", procID) + dim = &module.Dim{ID: id, Name: procID} + case processesHandlesChart.ID: + id := fmt.Sprintf("process_%s_handles", procID) + dim = &module.Dim{ID: id, Name: procID} + default: + continue + } + + if dim == nil { + continue + } + if err := chart.AddDim(dim); err != nil { + w.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func (w *Windows) removeProcessFromCharts(procID string) { + for _, chart := range *w.Charts() { + var id string + switch chart.ID { + case processesCPUUtilizationTotalChart.ID: + id = fmt.Sprintf("process_%s_cpu_time", procID) + case processesMemoryUsageChart.ID: + id = fmt.Sprintf("process_%s_working_set_private_bytes", procID) + case processesIOBytesChart.ID: + id = fmt.Sprintf("process_%s_io_bytes", procID) + case processesIOOperationsChart.ID: + id = fmt.Sprintf("process_%s_io_operations", procID) + case processesPageFaultsChart.ID: + id = fmt.Sprintf("process_%s_page_faults", procID) + case processesPageFileBytes.ID: + id = fmt.Sprintf("process_%s_page_file_bytes", procID) + case processesThreads.ID: + id = fmt.Sprintf("process_%s_threads", procID) + case processesHandlesChart.ID: + id = fmt.Sprintf("process_%s_handles", procID) + default: + continue + } + + if err := chart.MarkDimRemove(id, false); err != nil { + w.Warning(err) + continue + } + chart.MarkNotCreated() + } +} + +func (w *Windows) addProcessNetFrameworkExceptionsCharts(procName string) { + charts := netFrameworkCLRExceptionsChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessFromNetFrameworkExceptionsCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrexception", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkInteropCharts(procName string) { + charts := netFrameworkCLRInteropChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkInteropCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrinterop", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkJITCharts(procName string) { + charts := netFrameworkCLRJITChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkJITCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrjit", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkLoadingCharts(procName string) { + charts := netFrameworkCLRLoadingChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkLoadingCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrloading", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkLocksAndThreadsCharts(procName string) { + charts := netFrameworkCLRLocksAndThreadsChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkLocksAndThreadsCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrlocksandthreads", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkMemoryCharts(procName string) { + charts := netFrameworkCLRMemoryChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkMemoryCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrmemory", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkRemotingCharts(procName string) { + charts := netFrameworkCLRRemotingChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkRemotingCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrremoting", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addProcessNetFrameworkSecurityCharts(procName string) { + charts := netFrameworkCLRSecurityChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName)) + chart.Labels = []module.Label{ + {Key: "process", Value: procName}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, procName) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeProcessNetFrameworkSecurityCharts(procName string) { + px := fmt.Sprintf("netframework_%s_clrsecurity", strings.ToLower(procName)) + w.removeCharts(px) +} + +func (w *Windows) addServiceCharts(svc string) { + charts := serviceChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, svc) + chart.Labels = []module.Label{ + {Key: "service", Value: svc}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, svc) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeServiceCharts(svc string) { + px := fmt.Sprintf("service_%s", svc) + w.removeCharts(px) +} + +func (w *Windows) addCollectorCharts(name string) { + charts := collectorChartsTmpl.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, name) + chart.Labels = []module.Label{ + {Key: "collector", Value: name}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addHypervCharts() { + charts := hypervChartsTmpl.Copy() + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) addHypervVMCharts(vm string) { + charts := hypervVMChartsTemplate.Copy() + n := hypervCleanName(vm) + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, n) + chart.Labels = []module.Label{ + {Key: "vm_name", Value: vm}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, n) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeHypervVMCharts(vm string) { + px := fmt.Sprintf("vm_%s", hypervCleanName(vm)) + w.removeCharts(px) +} + +func (w *Windows) addHypervVMDeviceCharts(device string) { + charts := hypervVMDeviceChartsTemplate.Copy() + n := hypervCleanName(device) + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, n) + chart.Labels = []module.Label{ + {Key: "vm_device", Value: device}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, n) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeHypervVMDeviceCharts(device string) { + px := fmt.Sprintf("vm_device_%s", hypervCleanName(device)) + w.removeCharts(px) +} + +func (w *Windows) addHypervVMInterfaceCharts(iface string) { + charts := hypervVMInterfaceChartsTemplate.Copy() + n := hypervCleanName(iface) + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, n) + chart.Labels = []module.Label{ + {Key: "vm_interface", Value: iface}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, n) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeHypervVMInterfaceCharts(iface string) { + px := fmt.Sprintf("vm_interface_%s", hypervCleanName(iface)) + w.removeCharts(px) +} + +func (w *Windows) addHypervVSwitchCharts(vswitch string) { + charts := hypervVswitchChartsTemplate.Copy() + n := hypervCleanName(vswitch) + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, n) + chart.Labels = []module.Label{ + {Key: "vswitch", Value: vswitch}, + } + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, n) + } + } + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *Windows) removeHypervVSwitchCharts(vswitch string) { + px := fmt.Sprintf("vswitch_%s", hypervCleanName(vswitch)) + w.removeCharts(px) +} + +func (w *Windows) removeCollectorCharts(name string) { + px := fmt.Sprintf("collector_%s", name) + w.removeCharts(px) +} + +func (w *Windows) removeCharts(prefix string) { + for _, chart := range *w.Charts() { + if strings.HasPrefix(chart.ID, prefix) { + chart.MarkRemove() + chart.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect.go b/src/go/collectors/go.d.plugin/modules/windows/collect.go new file mode 100644 index 00000000000000..288774f99ef450 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect.go @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const precision = 1000 + +const ( + collectorAD = "ad" + collectorADCS = "adcs" + collectorADFS = "adfs" + collectorCPU = "cpu" + collectorMemory = "memory" + collectorNet = "net" + collectorLogicalDisk = "logical_disk" + collectorOS = "os" + collectorSystem = "system" + collectorLogon = "logon" + collectorThermalZone = "thermalzone" + collectorTCP = "tcp" + collectorIIS = "iis" + collectorMSSQL = "mssql" + collectorProcess = "process" + collectorService = "service" + collectorNetFrameworkCLRExceptions = "netframework_clrexceptions" + collectorNetFrameworkCLRInterop = "netframework_clrinterop" + collectorNetFrameworkCLRJIT = "netframework_clrjit" + collectorNetFrameworkCLRLoading = "netframework_clrloading" + collectorNetFrameworkCLRLocksAndThreads = "netframework_clrlocksandthreads" + collectorNetFrameworkCLRMemory = "netframework_clrmemory" + collectorNetFrameworkCLRRemoting = "netframework_clrremoting" + collectorNetFrameworkCLRSecurity = "netframework_clrsecurity" + collectorExchange = "exchange" + collectorHyperv = "hyperv" +) + +func (w *Windows) collect() (map[string]int64, error) { + pms, err := w.prom.ScrapeSeries() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + w.collectMetrics(mx, pms) + + if hasKey(mx, "os_visible_memory_bytes", "memory_available_bytes") { + mx["memory_used_bytes"] = 0 + + mx["os_visible_memory_bytes"] - + mx["memory_available_bytes"] + } + if hasKey(mx, "os_paging_limit_bytes", "os_paging_free_bytes") { + mx["os_paging_used_bytes"] = 0 + + mx["os_paging_limit_bytes"] - + mx["os_paging_free_bytes"] + } + if hasKey(mx, "os_visible_memory_bytes", "os_physical_memory_free_bytes") { + mx["os_visible_memory_used_bytes"] = 0 + + mx["os_visible_memory_bytes"] - + mx["os_physical_memory_free_bytes"] + } + if hasKey(mx, "memory_commit_limit", "memory_committed_bytes") { + mx["memory_not_committed_bytes"] = 0 + + mx["memory_commit_limit"] - + mx["memory_committed_bytes"] + } + if hasKey(mx, "memory_standby_cache_reserve_bytes", "memory_standby_cache_normal_priority_bytes", "memory_standby_cache_core_bytes") { + mx["memory_standby_cache_total"] = 0 + + mx["memory_standby_cache_reserve_bytes"] + + mx["memory_standby_cache_normal_priority_bytes"] + + mx["memory_standby_cache_core_bytes"] + } + if hasKey(mx, "memory_standby_cache_total", "memory_modified_page_list_bytes") { + mx["memory_cache_total"] = 0 + + mx["memory_standby_cache_total"] + + mx["memory_modified_page_list_bytes"] + } + + return mx, nil +} + +func (w *Windows) collectMetrics(mx map[string]int64, pms prometheus.Series) { + w.collectCollector(mx, pms) + for _, pm := range pms.FindByName(metricCollectorSuccess) { + if pm.Value == 0 { + continue + } + + switch pm.Labels.Get("collector") { + case collectorCPU: + w.collectCPU(mx, pms) + case collectorMemory: + w.collectMemory(mx, pms) + case collectorNet: + w.collectNet(mx, pms) + case collectorLogicalDisk: + w.collectLogicalDisk(mx, pms) + case collectorOS: + w.collectOS(mx, pms) + case collectorSystem: + w.collectSystem(mx, pms) + case collectorLogon: + w.collectLogon(mx, pms) + case collectorThermalZone: + w.collectThermalzone(mx, pms) + case collectorTCP: + w.collectTCP(mx, pms) + case collectorProcess: + w.collectProcess(mx, pms) + case collectorService: + w.collectService(mx, pms) + case collectorIIS: + w.collectIIS(mx, pms) + case collectorMSSQL: + w.collectMSSQL(mx, pms) + case collectorAD: + w.collectAD(mx, pms) + case collectorADCS: + w.collectADCS(mx, pms) + case collectorADFS: + w.collectADFS(mx, pms) + case collectorNetFrameworkCLRExceptions: + w.collectNetFrameworkCLRExceptions(mx, pms) + case collectorNetFrameworkCLRInterop: + w.collectNetFrameworkCLRInterop(mx, pms) + case collectorNetFrameworkCLRJIT: + w.collectNetFrameworkCLRJIT(mx, pms) + case collectorNetFrameworkCLRLoading: + w.collectNetFrameworkCLRLoading(mx, pms) + case collectorNetFrameworkCLRLocksAndThreads: + w.collectNetFrameworkCLRLocksAndThreads(mx, pms) + case collectorNetFrameworkCLRMemory: + w.collectNetFrameworkCLRMemory(mx, pms) + case collectorNetFrameworkCLRRemoting: + w.collectNetFrameworkCLRRemoting(mx, pms) + case collectorNetFrameworkCLRSecurity: + w.collectNetFrameworkCLRSecurity(mx, pms) + case collectorExchange: + w.collectExchange(mx, pms) + case collectorHyperv: + w.collectHyperv(mx, pms) + } + } +} + +func hasKey(mx map[string]int64, key string, keys ...string) bool { + _, ok := mx[key] + switch len(keys) { + case 0: + return ok + default: + return ok && hasKey(mx, keys[0], keys[1:]...) + } +} + +func boolToInt(v bool) int64 { + if v { + return 1 + } + return 0 +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_ad.go b/src/go/collectors/go.d.plugin/modules/windows/collect_ad.go new file mode 100644 index 00000000000000..c386cf5bb99684 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_ad.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import "github.com/netdata/go.d.plugin/pkg/prometheus" + +// Windows exporter: +// https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md +// Microsoft: +// https://learn.microsoft.com/en-us/previous-versions/ms803980(v=msdn.10) +const ( + metricADATQAverageRequestLatency = "windows_ad_atq_average_request_latency" + metricADATQOutstandingRequests = "windows_ad_atq_outstanding_requests" + metricADDatabaseOperationsTotal = "windows_ad_database_operations_total" + metricADDirectoryOperationsTotal = "windows_ad_directory_operations_total" + metricADReplicationInboundObjectsFilteringTotal = "windows_ad_replication_inbound_objects_filtered_total" + metricADReplicationInboundPropertiesFilteredTotal = "windows_ad_replication_inbound_properties_filtered_total" + metricADReplicationInboundPropertiesUpdatedTotal = "windows_ad_replication_inbound_properties_updated_total" + metricADReplicationInboundSyncObjectsRemaining = "windows_ad_replication_inbound_sync_objects_remaining" + metricADReplicationDataInterSiteBytesTotal = "windows_ad_replication_data_intersite_bytes_total" + metricADReplicationDataIntraSiteBytesTotal = "windows_ad_replication_data_intrasite_bytes_total" + metricADReplicationPendingSyncs = "windows_ad_replication_pending_synchronizations" + metricADReplicationSyncRequestsTotal = "windows_ad_replication_sync_requests_total" + metricADDirectoryServiceThreads = "windows_ad_directory_service_threads" + metricADLDAPLastBindTimeSecondsTotal = "windows_ad_ldap_last_bind_time_seconds" + metricADBindsTotal = "windows_ad_binds_total" + metricADLDAPSearchesTotal = "windows_ad_ldap_searches_total" + metricADNameCacheLookupsTotal = "windows_ad_name_cache_lookups_total" + metricADNameCacheHitsTotal = "windows_ad_name_cache_hits_total" +) + +func (w *Windows) collectAD(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorAD] { + w.cache.collection[collectorAD] = true + w.addADCharts() + } + + if pm := pms.FindByName(metricADATQAverageRequestLatency); pm.Len() > 0 { + mx["ad_atq_average_request_latency"] = int64(pm.Max() * precision) + } + if pm := pms.FindByName(metricADATQOutstandingRequests); pm.Len() > 0 { + mx["ad_atq_outstanding_requests"] = int64(pm.Max()) + } + for _, pm := range pms.FindByName(metricADDatabaseOperationsTotal) { + if op := pm.Labels.Get("operation"); op != "" { + mx["ad_database_operations_total_"+op] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricADDirectoryOperationsTotal) { + if op := pm.Labels.Get("operation"); op != "" { + mx["ad_directory_operations_total_"+op] += int64(pm.Value) // sum "origin" + } + } + if pm := pms.FindByName(metricADReplicationInboundObjectsFilteringTotal); pm.Len() > 0 { + mx["ad_replication_inbound_objects_filtered_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADReplicationInboundPropertiesFilteredTotal); pm.Len() > 0 { + mx["ad_replication_inbound_properties_filtered_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADReplicationInboundPropertiesUpdatedTotal); pm.Len() > 0 { + mx["ad_replication_inbound_properties_updated_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADReplicationInboundSyncObjectsRemaining); pm.Len() > 0 { + mx["ad_replication_inbound_sync_objects_remaining"] = int64(pm.Max()) + } + for _, pm := range pms.FindByName(metricADReplicationDataInterSiteBytesTotal) { + if name := pm.Labels.Get("direction"); name != "" { + mx["ad_replication_data_intersite_bytes_total_"+name] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricADReplicationDataIntraSiteBytesTotal) { + if name := pm.Labels.Get("direction"); name != "" { + mx["ad_replication_data_intrasite_bytes_total_"+name] = int64(pm.Value) + } + } + if pm := pms.FindByName(metricADReplicationPendingSyncs); pm.Len() > 0 { + mx["ad_replication_pending_synchronizations"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADReplicationSyncRequestsTotal); pm.Len() > 0 { + mx["ad_replication_sync_requests_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADDirectoryServiceThreads); pm.Len() > 0 { + mx["ad_directory_service_threads"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADLDAPLastBindTimeSecondsTotal); pm.Len() > 0 { + mx["ad_ldap_last_bind_time_seconds"] = int64(pm.Max()) + } + for _, pm := range pms.FindByName(metricADBindsTotal) { + mx["ad_binds_total"] += int64(pm.Value) // sum "bind_method"'s + } + if pm := pms.FindByName(metricADLDAPSearchesTotal); pm.Len() > 0 { + mx["ad_ldap_searches_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADNameCacheLookupsTotal); pm.Len() > 0 { + mx["ad_name_cache_lookups_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricADNameCacheHitsTotal); pm.Len() > 0 { + mx["ad_name_cache_hits_total"] = int64(pm.Max()) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_adcs.go b/src/go/collectors/go.d.plugin/modules/windows/collect_adcs.go new file mode 100644 index 00000000000000..0d42d25aad28ec --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_adcs.go @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricADCSRequestsTotal = "windows_adcs_requests_total" + metricADCSFailedRequestsTotal = "windows_adcs_failed_requests_total" + metricADCSIssuedRequestsTotal = "windows_adcs_issued_requests_total" + metricADCSPendingRequestsTotal = "windows_adcs_pending_requests_total" + metricADCSRequestProcessingTime = "windows_adcs_request_processing_time_seconds" + metricADCSRetrievalsTotal = "windows_adcs_retrievals_total" + metricADCSRetrievalsProcessingTime = "windows_adcs_retrievals_processing_time_seconds" + metricADCSRequestCryptoSigningTime = "windows_adcs_request_cryptographic_signing_time_seconds" + metricADCSRequestPolicyModuleProcessingTime = "windows_adcs_request_policy_module_processing_time_seconds" + metricADCSChallengeResponseResponsesTotal = "windows_adcs_challenge_responses_total" + metricADCSChallengeResponseProcessingTime = "windows_adcs_challenge_response_processing_time_seconds" + metricADCSSignedCertTimestampListsTotal = "windows_adcs_signed_certificate_timestamp_lists_total" + metricADCSSignedCertTimestampListProcessingTime = "windows_adcs_signed_certificate_timestamp_list_processing_time_seconds" +) + +func (w *Windows) collectADCS(mx map[string]int64, pms prometheus.Series) { + pms = pms.FindByNames( + metricADCSRequestsTotal, + metricADCSFailedRequestsTotal, + metricADCSIssuedRequestsTotal, + metricADCSPendingRequestsTotal, + metricADCSRequestProcessingTime, + metricADCSRetrievalsTotal, + metricADCSRetrievalsProcessingTime, + metricADCSRequestCryptoSigningTime, + metricADCSRequestPolicyModuleProcessingTime, + metricADCSChallengeResponseResponsesTotal, + metricADCSChallengeResponseProcessingTime, + metricADCSSignedCertTimestampListsTotal, + metricADCSSignedCertTimestampListProcessingTime, + ) + + seen := make(map[string]bool) + + for _, pm := range pms { + if tmpl := pm.Labels.Get("cert_template"); tmpl != "" && tmpl != "_Total" { + seen[tmpl] = true + name := strings.TrimPrefix(pm.Name(), "windows_adcs_") + v := pm.Value + if strings.HasSuffix(pm.Name(), "_seconds") { + v *= precision + } + mx["adcs_cert_template_"+tmpl+"_"+name] += int64(v) + } + } + + for template := range seen { + if !w.cache.adcs[template] { + w.cache.adcs[template] = true + w.addCertificateTemplateCharts(template) + } + } + for template := range w.cache.adcs { + if !seen[template] { + delete(w.cache.adcs, template) + w.removeCertificateTemplateCharts(template) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_adfs.go b/src/go/collectors/go.d.plugin/modules/windows/collect_adfs.go new file mode 100644 index 00000000000000..2f37c01b83915b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_adfs.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricADFSADLoginConnectionFailuresTotal = "windows_adfs_ad_login_connection_failures_total" + metricADFSCertificateAuthenticationsTotal = "windows_adfs_certificate_authentications_total" + metricADFSDBArtifactFailureTotal = "windows_adfs_db_artifact_failure_total" + metricADFSDBArtifactQueryTimeSeconds = "windows_adfs_db_artifact_query_time_seconds_total" + metricADFSDBConfigFailureTotal = "windows_adfs_db_config_failure_total" + metricADFSDBQueryTimeSecondsTotal = "windows_adfs_db_config_query_time_seconds_total" + metricADFSDeviceAuthenticationsTotal = "windows_adfs_device_authentications_total" + metricADFSExternalAuthenticationsFailureTotal = "windows_adfs_external_authentications_failure_total" + metricADFSExternalAuthenticationsSuccessTotal = "windows_adfs_external_authentications_success_total" + metricADFSExtranetAccountLockoutsTotal = "windows_adfs_extranet_account_lockouts_total" + metricADFSFederatedAuthenticationsTotal = "windows_adfs_federated_authentications_total" + metricADFSFederationMetadataRequestsTotal = "windows_adfs_federation_metadata_requests_total" + + metricADFSOauthAuthorizationRequestsTotal = "windows_adfs_oauth_authorization_requests_total" + metricADFSOauthClientAuthenticationFailureTotal = "windows_adfs_oauth_client_authentication_failure_total" + metricADFSOauthClientAuthenticationSuccessTotal = "windows_adfs_oauth_client_authentication_success_total" + metricADFSOauthClientCredentialsFailureTotal = "windows_adfs_oauth_client_credentials_failure_total" + metricADFSOauthClientCredentialsSuccessTotal = "windows_adfs_oauth_client_credentials_success_total" + metricADFSOauthClientPrivKeyJTWAuthenticationFailureTotal = "windows_adfs_oauth_client_privkey_jtw_authentication_failure_total" + metricADFSOauthClientPrivKeyJWTAuthenticationSuccessTotal = "windows_adfs_oauth_client_privkey_jwt_authentications_success_total" + metricADFSOauthClientSecretBasicAuthenticationsFailureTotal = "windows_adfs_oauth_client_secret_basic_authentications_failure_total" + metricADFSADFSOauthClientSecretBasicAuthenticationsSuccessTotal = "windows_adfs_oauth_client_secret_basic_authentications_success_total" + metricADFSOauthClientSecretPostAuthenticationsFailureTotal = "windows_adfs_oauth_client_secret_post_authentications_failure_total" + metricADFSOauthClientSecretPostAuthenticationsSuccessTotal = "windows_adfs_oauth_client_secret_post_authentications_success_total" + metricADFSOauthClientWindowsAuthenticationsFailureTotal = "windows_adfs_oauth_client_windows_authentications_failure_total" + metricADFSOauthClientWindowsAuthenticationsSuccessTotal = "windows_adfs_oauth_client_windows_authentications_success_total" + metricADFSOauthLogonCertificateRequestsFailureTotal = "windows_adfs_oauth_logon_certificate_requests_failure_total" + metricADFSOauthLogonCertificateTokenRequestsSuccessTotal = "windows_adfs_oauth_logon_certificate_token_requests_success_total" + metricADFSOauthPasswordGrantRequestsFailureTotal = "windows_adfs_oauth_password_grant_requests_failure_total" + metricADFSOauthPasswordGrantRequestsSuccessTotal = "windows_adfs_oauth_password_grant_requests_success_total" + metricADFSOauthTokenRequestsSuccessTotal = "windows_adfs_oauth_token_requests_success_total" + + metricADFSPassiveRequestsTotal = "windows_adfs_passive_requests_total" + metricADFSPasswortAuthenticationsTotal = "windows_adfs_passport_authentications_total" + metricADFSPasswordChangeFailedTotal = "windows_adfs_password_change_failed_total" + metricADFSWPasswordChangeSucceededTotal = "windows_adfs_password_change_succeeded_total" + metricADFSSamlpTokenRequestsSuccessTotal = "windows_adfs_samlp_token_requests_success_total" + metricADFSSSOAuthenticationsFailureTotal = "windows_adfs_sso_authentications_failure_total" + metricADFSSSOAuthenticationsSuccessTotal = "windows_adfs_sso_authentications_success_total" + metricADFSTokenRequestsTotal = "windows_adfs_token_requests_total" + metricADFSUserPasswordAuthenticationsFailureTotal = "windows_adfs_userpassword_authentications_failure_total" + metricADFSUserPasswordAuthenticationsSuccessTotal = "windows_adfs_userpassword_authentications_success_total" + metricADFSWindowsIntegratedAuthenticationsTotal = "windows_adfs_windows_integrated_authentications_total" + metricADFSWSFedTokenRequestsSuccessTotal = "windows_adfs_wsfed_token_requests_success_total" + metricADFSWSTrustTokenRequestsSuccessTotal = "windows_adfs_wstrust_token_requests_success_total" +) + +var adfsMetrics = []string{ + metricADFSADLoginConnectionFailuresTotal, + metricADFSCertificateAuthenticationsTotal, + metricADFSDBArtifactFailureTotal, + metricADFSDBArtifactQueryTimeSeconds, + metricADFSDBConfigFailureTotal, + metricADFSDBQueryTimeSecondsTotal, + metricADFSDeviceAuthenticationsTotal, + metricADFSExternalAuthenticationsFailureTotal, + metricADFSExternalAuthenticationsSuccessTotal, + metricADFSExtranetAccountLockoutsTotal, + metricADFSFederatedAuthenticationsTotal, + metricADFSFederationMetadataRequestsTotal, + metricADFSOauthAuthorizationRequestsTotal, + metricADFSOauthClientAuthenticationFailureTotal, + metricADFSOauthClientAuthenticationSuccessTotal, + metricADFSOauthClientCredentialsFailureTotal, + metricADFSOauthClientCredentialsSuccessTotal, + metricADFSOauthClientPrivKeyJTWAuthenticationFailureTotal, + metricADFSOauthClientPrivKeyJWTAuthenticationSuccessTotal, + metricADFSOauthClientSecretBasicAuthenticationsFailureTotal, + metricADFSADFSOauthClientSecretBasicAuthenticationsSuccessTotal, + metricADFSOauthClientSecretPostAuthenticationsFailureTotal, + metricADFSOauthClientSecretPostAuthenticationsSuccessTotal, + metricADFSOauthClientWindowsAuthenticationsFailureTotal, + metricADFSOauthClientWindowsAuthenticationsSuccessTotal, + metricADFSOauthLogonCertificateRequestsFailureTotal, + metricADFSOauthLogonCertificateTokenRequestsSuccessTotal, + metricADFSOauthPasswordGrantRequestsFailureTotal, + metricADFSOauthPasswordGrantRequestsSuccessTotal, + metricADFSOauthTokenRequestsSuccessTotal, + metricADFSPassiveRequestsTotal, + metricADFSPasswortAuthenticationsTotal, + metricADFSPasswordChangeFailedTotal, + metricADFSWPasswordChangeSucceededTotal, + metricADFSSamlpTokenRequestsSuccessTotal, + metricADFSSSOAuthenticationsFailureTotal, + metricADFSSSOAuthenticationsSuccessTotal, + metricADFSTokenRequestsTotal, + metricADFSUserPasswordAuthenticationsFailureTotal, + metricADFSUserPasswordAuthenticationsSuccessTotal, + metricADFSWindowsIntegratedAuthenticationsTotal, + metricADFSWSFedTokenRequestsSuccessTotal, + metricADFSWSTrustTokenRequestsSuccessTotal, +} + +func (w *Windows) collectADFS(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorADFS] { + w.cache.collection[collectorADFS] = true + w.addADFSCharts() + } + + for _, pm := range pms.FindByNames(adfsMetrics...) { + name := strings.TrimPrefix(pm.Name(), "windows_") + v := pm.Value + if strings.HasSuffix(name, "_seconds_total") { + v *= precision + } + mx[name] = int64(v) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_collector.go b/src/go/collectors/go.d.plugin/modules/windows/collect_collector.go new file mode 100644 index 00000000000000..1faf7d703e5f31 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_collector.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricCollectorDuration = "windows_exporter_collector_duration_seconds" + metricCollectorSuccess = "windows_exporter_collector_success" +) + +func (w *Windows) collectCollector(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "collector_" + for _, pm := range pms.FindByName(metricCollectorDuration) { + if name := pm.Labels.Get("collector"); name != "" { + seen[name] = true + mx[px+name+"_duration"] = int64(pm.Value * precision) + } + } + for _, pm := range pms.FindByName(metricCollectorSuccess) { + if name := pm.Labels.Get("collector"); name != "" { + seen[name] = true + if pm.Value == 1 { + mx[px+name+"_status_success"], mx[px+name+"_status_fail"] = 1, 0 + } else { + mx[px+name+"_status_success"], mx[px+name+"_status_fail"] = 0, 1 + } + } + } + + for name := range seen { + if !w.cache.collectors[name] { + w.cache.collectors[name] = true + w.addCollectorCharts(name) + } + } + for name := range w.cache.collectors { + if !seen[name] { + delete(w.cache.collectors, name) + w.removeCollectorCharts(name) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_cpu.go b/src/go/collectors/go.d.plugin/modules/windows/collect_cpu.go new file mode 100644 index 00000000000000..d5369cb40d2881 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_cpu.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricCPUTimeTotal = "windows_cpu_time_total" + metricCPUInterruptsTotal = "windows_cpu_interrupts_total" + metricCPUDPCsTotal = "windows_cpu_dpcs_total" + metricCPUCStateTotal = "windows_cpu_cstate_seconds_total" +) + +func (w *Windows) collectCPU(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorCPU] { + w.cache.collection[collectorCPU] = true + w.addCPUCharts() + } + + seen := make(map[string]bool) + for _, pm := range pms.FindByName(metricCPUTimeTotal) { + core := pm.Labels.Get("core") + mode := pm.Labels.Get("mode") + if core == "" || mode == "" { + continue + } + + seen[core] = true + mx["cpu_"+mode+"_time"] += int64(pm.Value * precision) + mx["cpu_core_"+core+"_"+mode+"_time"] += int64(pm.Value * precision) + } + + for _, pm := range pms.FindByName(metricCPUInterruptsTotal) { + core := pm.Labels.Get("core") + if core == "" { + continue + } + + seen[core] = true + mx["cpu_core_"+core+"_interrupts"] += int64(pm.Value) + } + + for _, pm := range pms.FindByName(metricCPUDPCsTotal) { + core := pm.Labels.Get("core") + if core == "" { + continue + } + + seen[core] = true + mx["cpu_core_"+core+"_dpcs"] += int64(pm.Value) + } + + for _, pm := range pms.FindByName(metricCPUCStateTotal) { + core := pm.Labels.Get("core") + state := pm.Labels.Get("state") + if core == "" || state == "" { + continue + } + + seen[core] = true + mx["cpu_core_"+core+"_cstate_"+state] += int64(pm.Value * precision) + } + + for core := range seen { + if !w.cache.cores[core] { + w.cache.cores[core] = true + w.addCPUCoreCharts(core) + } + } + for core := range w.cache.cores { + if !seen[core] { + delete(w.cache.cores, core) + w.removeCPUCoreCharts(core) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_exchange.go b/src/go/collectors/go.d.plugin/modules/windows/collect_exchange.go new file mode 100644 index 00000000000000..080f122f0a0a4c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_exchange.go @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricExchangeActiveSyncPingCmdsPending = "windows_exchange_activesync_ping_cmds_pending" + metricExchangeActiveSyncRequestsTotal = "windows_exchange_activesync_requests_total" + metricExchangeActiveSyncCMDsTotal = "windows_exchange_activesync_sync_cmds_total" + metricExchangeAutoDiscoverRequestsTotal = "windows_exchange_autodiscover_requests_total" + metricExchangeAvailServiceRequestsPerSec = "windows_exchange_avail_service_requests_per_sec" + metricExchangeOWACurrentUniqueUsers = "windows_exchange_owa_current_unique_users" + metricExchangeOWARequestsTotal = "windows_exchange_owa_requests_total" + metricExchangeRPCActiveUserCount = "windows_exchange_rpc_active_user_count" + metricExchangeRPCAvgLatencySec = "windows_exchange_rpc_avg_latency_sec" + metricExchangeRPCConnectionCount = "windows_exchange_rpc_connection_count" + metricExchangeRPCOperationsTotal = "windows_exchange_rpc_operations_total" + metricExchangeRPCRequests = "windows_exchange_rpc_requests" + metricExchangeRPCUserCount = "windows_exchange_rpc_user_count" + + metricExchangeTransportQueuesActiveMailboxDelivery = "windows_exchange_transport_queues_active_mailbox_delivery" + metricExchangeTransportQueuesExternalActiveRemoteDelivery = "windows_exchange_transport_queues_external_active_remote_delivery" + metricExchangeTransportQueuesExternalLargestDelivery = "windows_exchange_transport_queues_external_largest_delivery" + metricExchangeTransportQueuesInternalActiveRemoteDelivery = "windows_exchange_transport_queues_internal_active_remote_delivery" + metricExchangeTransportQueuesInternalLargestDelivery = "windows_exchange_transport_queues_internal_largest_delivery" + metricExchangeTransportQueuesPoison = "windows_exchange_transport_queues_poison" + metricExchangeTransportQueuesRetryMailboxDelivery = "windows_exchange_transport_queues_retry_mailbox_delivery" + metricExchangeTransportQueuesUnreachable = "windows_exchange_transport_queues_unreachable" + + metricExchangeWorkloadActiveTasks = "windows_exchange_workload_active_tasks" + metricExchangeWorkloadCompletedTasks = "windows_exchange_workload_completed_tasks" + metricExchangeWorkloadQueuedTasks = "windows_exchange_workload_queued_tasks" + metricExchangeWorkloadYieldedTasks = "windows_exchange_workload_yielded_tasks" + metricExchangeWorkloadIsActive = "windows_exchange_workload_is_active" + + metricExchangeLDAPLongRunningOPSPerSec = "windows_exchange_ldap_long_running_ops_per_sec" + metricExchangeLDAPReadTimeSec = "windows_exchange_ldap_read_time_sec" + metricExchangeLDAPSearchTmeSec = "windows_exchange_ldap_search_time_sec" + metricExchangeLDAPWriteTimeSec = "windows_exchange_ldap_write_time_sec" + metricExchangeLDAPTimeoutErrorsTotal = "windows_exchange_ldap_timeout_errors_total" + + metricExchangeHTTPProxyAvgAuthLatency = "windows_exchange_http_proxy_avg_auth_latency" + metricExchangeHTTPProxyAvgCASProcessingLatencySec = "windows_exchange_http_proxy_avg_cas_proccessing_latency_sec" + metricExchangeHTTPProxyMailboxProxyFailureRate = "windows_exchange_http_proxy_mailbox_proxy_failure_rate" + metricExchangeHTTPProxyMailboxServerLocatorAvgLatencySec = "windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec" + metricExchangeHTTPProxyOutstandingProxyRequests = "windows_exchange_http_proxy_outstanding_proxy_requests" + metricExchangeHTTPProxyRequestsTotal = "windows_exchange_http_proxy_requests_total" +) + +func (w *Windows) collectExchange(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorExchange] { + w.cache.collection[collectorExchange] = true + w.addExchangeCharts() + } + + if pm := pms.FindByName(metricExchangeActiveSyncPingCmdsPending); pm.Len() > 0 { + mx["exchange_activesync_ping_cmds_pending"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeActiveSyncRequestsTotal); pm.Len() > 0 { + mx["exchange_activesync_requests_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeActiveSyncCMDsTotal); pm.Len() > 0 { + mx["exchange_activesync_sync_cmds_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeAutoDiscoverRequestsTotal); pm.Len() > 0 { + mx["exchange_autodiscover_requests_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeAvailServiceRequestsPerSec); pm.Len() > 0 { + mx["exchange_avail_service_requests_per_sec"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeOWACurrentUniqueUsers); pm.Len() > 0 { + mx["exchange_owa_current_unique_users"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeOWARequestsTotal); pm.Len() > 0 { + mx["exchange_owa_requests_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeRPCActiveUserCount); pm.Len() > 0 { + mx["exchange_rpc_active_user_count"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeRPCAvgLatencySec); pm.Len() > 0 { + mx["exchange_rpc_avg_latency_sec"] = int64(pm.Max() * precision) + } + if pm := pms.FindByName(metricExchangeRPCConnectionCount); pm.Len() > 0 { + mx["exchange_rpc_connection_count"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeRPCOperationsTotal); pm.Len() > 0 { + mx["exchange_rpc_operations_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeRPCRequests); pm.Len() > 0 { + mx["exchange_rpc_requests"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricExchangeRPCUserCount); pm.Len() > 0 { + mx["exchange_rpc_user_count"] = int64(pm.Max()) + } + + w.collectExchangeAddTransportQueueMetric(mx, pms) + w.collectExchangeAddWorkloadMetric(mx, pms) + w.collectExchangeAddLDAPMetric(mx, pms) + w.collectExchangeAddHTTPProxyMetric(mx, pms) +} + +func (w *Windows) collectExchangeAddTransportQueueMetric(mx map[string]int64, pms prometheus.Series) { + pms = pms.FindByNames( + metricExchangeTransportQueuesActiveMailboxDelivery, + metricExchangeTransportQueuesExternalActiveRemoteDelivery, + metricExchangeTransportQueuesExternalLargestDelivery, + metricExchangeTransportQueuesInternalActiveRemoteDelivery, + metricExchangeTransportQueuesInternalLargestDelivery, + metricExchangeTransportQueuesPoison, + metricExchangeTransportQueuesRetryMailboxDelivery, + metricExchangeTransportQueuesUnreachable, + ) + + for _, pm := range pms { + if name := pm.Labels.Get("name"); name != "" && name != "total_excluding_priority_none" { + metric := strings.TrimPrefix(pm.Name(), "windows_") + mx[metric+"_"+name] += int64(pm.Value) + } + } +} + +func (w *Windows) collectExchangeAddWorkloadMetric(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByNames( + metricExchangeWorkloadActiveTasks, + metricExchangeWorkloadCompletedTasks, + metricExchangeWorkloadQueuedTasks, + metricExchangeWorkloadYieldedTasks, + ) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + metric := strings.TrimPrefix(pm.Name(), "windows_exchange_workload_") + mx["exchange_workload_"+name+"_"+metric] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricExchangeWorkloadIsActive) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + mx["exchange_workload_"+name+"_is_active"] += boolToInt(pm.Value == 1) + mx["exchange_workload_"+name+"_is_paused"] += boolToInt(pm.Value == 0) + } + } + + for name := range seen { + if !w.cache.exchangeWorkload[name] { + w.cache.exchangeWorkload[name] = true + w.addExchangeWorkloadCharts(name) + } + } + for name := range w.cache.exchangeWorkload { + if !seen[name] { + delete(w.cache.exchangeWorkload, name) + w.removeExchangeWorkloadCharts(name) + } + } +} + +func (w *Windows) collectExchangeAddLDAPMetric(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByNames( + metricExchangeLDAPLongRunningOPSPerSec, + metricExchangeLDAPTimeoutErrorsTotal, + ) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + metric := strings.TrimPrefix(pm.Name(), "windows_exchange_ldap_") + mx["exchange_ldap_"+name+"_"+metric] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByNames( + metricExchangeLDAPReadTimeSec, + metricExchangeLDAPSearchTmeSec, + metricExchangeLDAPWriteTimeSec, + ) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + metric := strings.TrimPrefix(pm.Name(), "windows_exchange_ldap_") + mx["exchange_ldap_"+name+"_"+metric] += int64(pm.Value * precision) + } + } + + for name := range seen { + if !w.cache.exchangeLDAP[name] { + w.cache.exchangeLDAP[name] = true + w.addExchangeLDAPCharts(name) + } + } + for name := range w.cache.exchangeLDAP { + if !seen[name] { + delete(w.cache.exchangeLDAP, name) + w.removeExchangeLDAPCharts(name) + } + } +} + +func (w *Windows) collectExchangeAddHTTPProxyMetric(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByNames( + metricExchangeHTTPProxyAvgAuthLatency, + metricExchangeHTTPProxyOutstandingProxyRequests, + metricExchangeHTTPProxyRequestsTotal, + ) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + metric := strings.TrimPrefix(pm.Name(), "windows_exchange_http_proxy_") + mx["exchange_http_proxy_"+name+"_"+metric] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByNames( + metricExchangeHTTPProxyAvgCASProcessingLatencySec, + metricExchangeHTTPProxyMailboxProxyFailureRate, + metricExchangeHTTPProxyMailboxServerLocatorAvgLatencySec, + ) { + if name := pm.Labels.Get("name"); name != "" { + seen[name] = true + metric := strings.TrimPrefix(pm.Name(), "windows_exchange_http_proxy_") + mx["exchange_http_proxy_"+name+"_"+metric] += int64(pm.Value * precision) + } + } + + for name := range seen { + if !w.cache.exchangeHTTPProxy[name] { + w.cache.exchangeHTTPProxy[name] = true + w.addExchangeHTTPProxyCharts(name) + } + } + for name := range w.cache.exchangeHTTPProxy { + if !seen[name] { + delete(w.cache.exchangeHTTPProxy, name) + w.removeExchangeHTTPProxyCharts(name) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_hyperv.go b/src/go/collectors/go.d.plugin/modules/windows/collect_hyperv.go new file mode 100644 index 00000000000000..2caa88ba75cef3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_hyperv.go @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricHypervHealthCritical = "windows_hyperv_health_critical" + metricHypervHealthOK = "windows_hyperv_health_ok" + + metricHypervHypervisorLogicalProcessors = "windows_hyperv_hypervisor_logical_processors" + metricHypervHypervisorVirtualProcessors = "windows_hyperv_hypervisor_virtual_processors" + + metricHypervHostCPUGuestRunTime = "windows_hyperv_host_cpu_guest_run_time" + metricHypervHostCPUHypervisorRunTime = "windows_hyperv_host_cpu_hypervisor_run_time" + metricHypervHostCPURemoteRunTime = "windows_hyperv_host_cpu_remote_run_time" + metricHypervHostCPUTotalRunTime = "windows_hyperv_host_cpu_total_run_time" + metricHypervHostLPGuestRunTimePercent = "windows_hyperv_host_lp_guest_run_time_percent" + metricHypervHostLPHypervisorRunTimePercent = "windows_hyperv_host_lp_hypervisor_run_time_percent" + metricHypervHostLPTotalRunTimePercent = "windows_hyperv_host_lp_total_run_time_percent" + + metricHypervRootPartition4KGPAPages = "windows_hyperv_root_partition_4K_gpa_pages" + metricHypervRootPartition2MGPAPages = "windows_hyperv_root_partition_2M_gpa_pages" + metricHypervRootPartition1GGPAPages = "windows_hyperv_root_partition_1G_gpa_pages" + metricHypervRootPartition4KDevicePages = "windows_hyperv_root_partition_4K_device_pages" + metricHypervRootPartition2MDevicePages = "windows_hyperv_root_partition_2M_device_pages" + metricHypervRootPartition1GDevicePages = "windows_hyperv_root_partition_1G_device_pages" + metricHypervRootPartitionGPASpaceModifications = "windows_hyperv_root_partition_gpa_space_modifications" + metricHypervRootPartitionAttachedDevices = "windows_hyperv_root_partition_attached_devices" + metricHypervRootPartitionDepositedPages = "windows_hyperv_root_partition_deposited_pages" + metricHypervRootPartitionPhysicalPagesAllocated = "windows_hyperv_root_partition_physical_pages_allocated" // SkippedTimerTicks + metricHypervRootPartitionDeviceDMAErrors = "windows_hyperv_root_partition_device_dma_errors" + metricHypervRootPartitionDeviceInterruptErrors = "windows_hyperv_root_partition_device_interrupt_errors" + metricHypervRootPartitionDeviceInterruptThrottleEvents = "windows_hyperv_root_partition_device_interrupt_throttle_events" + metricHypervRootPartitionIOTLBFlush = "windows_hyperv_root_partition_io_tlb_flush" + metricHypervRootPartitionAddressSpace = "windows_hyperv_root_partition_address_spaces" + metricHypervRootPartitionVirtualTLBPages = "windows_hyperv_root_partition_virtual_tlb_pages" + metricHypervRootPartitionVirtualTLBFlushEntries = "windows_hyperv_root_partition_virtual_tlb_flush_entires" + + metricsHypervVMCPUGuestRunTime = "windows_hyperv_vm_cpu_guest_run_time" + metricsHypervVMCPUHypervisorRunTime = "windows_hyperv_vm_cpu_hypervisor_run_time" + metricsHypervVMCPURemoteRunTime = "windows_hyperv_vm_cpu_remote_run_time" + metricsHypervVMCPUTotalRunTime = "windows_hyperv_vm_cpu_total_run_time" + + metricHypervVMMemoryPhysical = "windows_hyperv_vm_memory_physical" + metricHypervVMMemoryPhysicalGuestVisible = "windows_hyperv_vm_memory_physical_guest_visible" + metricHypervVMMemoryPressureCurrent = "windows_hyperv_vm_memory_pressure_current" + metricHyperVVIDPhysicalPagesAllocated = "windows_hyperv_vid_physical_pages_allocated" + metricHyperVVIDRemotePhysicalPages = "windows_hyperv_vid_remote_physical_pages" + + metricHypervVMDeviceBytesRead = "windows_hyperv_vm_device_bytes_read" + metricHypervVMDeviceBytesWritten = "windows_hyperv_vm_device_bytes_written" + metricHypervVMDeviceOperationsRead = "windows_hyperv_vm_device_operations_read" + metricHypervVMDeviceOperationsWritten = "windows_hyperv_vm_device_operations_written" + metricHypervVMDeviceErrorCount = "windows_hyperv_vm_device_error_count" + + metricHypervVMInterfaceBytesReceived = "windows_hyperv_vm_interface_bytes_received" + metricHypervVMInterfaceBytesSent = "windows_hyperv_vm_interface_bytes_sent" + metricHypervVMInterfacePacketsIncomingDropped = "windows_hyperv_vm_interface_packets_incoming_dropped" + metricHypervVMInterfacePacketsOutgoingDropped = "windows_hyperv_vm_interface_packets_outgoing_dropped" + metricHypervVMInterfacePacketsReceived = "windows_hyperv_vm_interface_packets_received" + metricHypervVMInterfacePacketsSent = "windows_hyperv_vm_interface_packets_sent" + + metricHypervVSwitchBroadcastPacketsReceivedTotal = "windows_hyperv_vswitch_broadcast_packets_received_total" + metricHypervVSwitchBroadcastPacketsSentTotal = "windows_hyperv_vswitch_broadcast_packets_sent_total" + metricHypervVSwitchBytesReceivedTotal = "windows_hyperv_vswitch_bytes_received_total" + metricHypervVSwitchBytesSentTotal = "windows_hyperv_vswitch_bytes_sent_total" + metricHypervVSwitchPacketsReceivedTotal = "windows_hyperv_vswitch_packets_received_total" + metricHypervVSwitchPacketsSentTotal = "windows_hyperv_vswitch_packets_sent_total" + metricHypervVSwitchDirectedPacketsReceivedTotal = "windows_hyperv_vswitch_directed_packets_received_total" + metricHypervVSwitchDirectedPacketsSendTotal = "windows_hyperv_vswitch_directed_packets_send_total" + metricHypervVSwitchDroppedPacketsIncomingTotal = "windows_hyperv_vswitch_dropped_packets_incoming_total" + metricHypervVSwitchDroppedPacketsOutcomingTotal = "windows_hyperv_vswitch_dropped_packets_outcoming_total" + metricHypervVSwitchExtensionDroppedAttacksIncomingTotal = "windows_hyperv_vswitch_extensions_dropped_packets_incoming_total" + metricHypervVSwitchExtensionDroppedPacketsOutcomingTotal = "windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total" + metricHypervVSwitchLearnedMACAddressTotal = "windows_hyperv_vswitch_learned_mac_addresses_total" + metricHypervVSwitchMulticastPacketsReceivedTotal = "windows_hyperv_vswitch_multicast_packets_received_total" + metricHypervVSwitchMulticastPacketsSentTotal = "windows_hyperv_vswitch_multicast_packets_sent_total" + metricHypervVSwitchNumberOfSendChannelMovesTotal = "windows_hyperv_vswitch_number_of_send_channel_moves_total" + metricHypervVSwitchNumberOfVMQMovesTotal = "windows_hyperv_vswitch_number_of_vmq_moves_total" + metricHypervVSwitchPacketsFloodedTotal = "windows_hyperv_vswitch_packets_flooded_total" + metricHypervVSwitchPurgedMACAddresses = "windows_hyperv_vswitch_purged_mac_addresses_total" +) + +func (w *Windows) collectHyperv(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorHyperv] { + w.cache.collection[collectorHyperv] = true + w.addHypervCharts() + } + + for _, v := range []string{ + metricHypervHealthOK, + metricHypervHealthCritical, + metricHypervRootPartition4KGPAPages, + metricHypervRootPartition2MGPAPages, + metricHypervRootPartition1GGPAPages, + metricHypervRootPartition4KDevicePages, + metricHypervRootPartition2MDevicePages, + metricHypervRootPartition1GDevicePages, + metricHypervRootPartitionGPASpaceModifications, + metricHypervRootPartitionAddressSpace, + metricHypervRootPartitionAttachedDevices, + metricHypervRootPartitionDepositedPages, + metricHypervRootPartitionPhysicalPagesAllocated, + metricHypervRootPartitionDeviceDMAErrors, + metricHypervRootPartitionDeviceInterruptErrors, + metricHypervRootPartitionDeviceInterruptThrottleEvents, + metricHypervRootPartitionIOTLBFlush, + metricHypervRootPartitionVirtualTLBPages, + metricHypervRootPartitionVirtualTLBFlushEntries, + } { + for _, pm := range pms.FindByName(v) { + name := strings.TrimPrefix(pm.Name(), "windows_") + mx[name] = int64(pm.Value) + } + } + + w.collectHypervVM(mx, pms) + w.collectHypervVMDevices(mx, pms) + w.collectHypervVMInterface(mx, pms) + w.collectHypervVSwitch(mx, pms) +} + +func (w *Windows) collectHypervVM(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "hyperv_vm_" + + for _, v := range []string{ + metricHypervVMMemoryPhysical, + metricHypervVMMemoryPhysicalGuestVisible, + metricHypervVMMemoryPressureCurrent, + metricsHypervVMCPUGuestRunTime, + metricsHypervVMCPUHypervisorRunTime, + metricsHypervVMCPURemoteRunTime, + } { + for _, pm := range pms.FindByName(v) { + if vm := pm.Labels.Get("vm"); vm != "" { + name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm") + seen[vm] = true + mx[px+hypervCleanName(vm)+name] += int64(pm.Value) + } + } + } + + px = "hyperv_vid_" + for _, v := range []string{ + metricHyperVVIDPhysicalPagesAllocated, + metricHyperVVIDRemotePhysicalPages, + } { + for _, pm := range pms.FindByName(v) { + if vm := pm.Labels.Get("vm"); vm != "" { + name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vid") + seen[vm] = true + mx[px+hypervCleanName(vm)+name] = int64(pm.Value) + } + } + } + + for v := range seen { + if !w.cache.hypervVMMem[v] { + w.cache.hypervVMMem[v] = true + w.addHypervVMCharts(v) + } + } + for v := range w.cache.hypervVMMem { + if !seen[v] { + delete(w.cache.hypervVMMem, v) + w.removeHypervVMCharts(v) + } + } +} + +func (w *Windows) collectHypervVMDevices(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "hyperv_vm_device_" + + for _, v := range []string{ + metricHypervVMDeviceBytesRead, + metricHypervVMDeviceBytesWritten, + metricHypervVMDeviceOperationsRead, + metricHypervVMDeviceOperationsWritten, + metricHypervVMDeviceErrorCount, + } { + for _, pm := range pms.FindByName(v) { + if device := pm.Labels.Get("vm_device"); device != "" { + name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm_device") + seen[device] = true + mx[px+hypervCleanName(device)+name] = int64(pm.Value) + } + } + } + + for v := range seen { + if !w.cache.hypervVMDevices[v] { + w.cache.hypervVMDevices[v] = true + w.addHypervVMDeviceCharts(v) + } + } + for v := range w.cache.hypervVMDevices { + if !seen[v] { + delete(w.cache.hypervVMDevices, v) + w.removeHypervVMDeviceCharts(v) + } + } +} + +func (w *Windows) collectHypervVMInterface(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "hyperv_vm_interface_" + + for _, v := range []string{ + metricHypervVMInterfaceBytesReceived, + metricHypervVMInterfaceBytesSent, + metricHypervVMInterfacePacketsIncomingDropped, + metricHypervVMInterfacePacketsOutgoingDropped, + metricHypervVMInterfacePacketsReceived, + metricHypervVMInterfacePacketsSent, + } { + for _, pm := range pms.FindByName(v) { + if iface := pm.Labels.Get("vm_interface"); iface != "" { + name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm_interface") + seen[iface] = true + mx[px+hypervCleanName(iface)+name] = int64(pm.Value) + } + } + } + + for v := range seen { + if !w.cache.hypervVMInterfaces[v] { + w.cache.hypervVMInterfaces[v] = true + w.addHypervVMInterfaceCharts(v) + } + } + for v := range w.cache.hypervVMInterfaces { + if !seen[v] { + delete(w.cache.hypervVMInterfaces, v) + w.removeHypervVMInterfaceCharts(v) + } + } +} + +func (w *Windows) collectHypervVSwitch(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "hyperv_vswitch_" + + for _, v := range []string{ + metricHypervVSwitchBytesReceivedTotal, + metricHypervVSwitchBytesSentTotal, + metricHypervVSwitchPacketsReceivedTotal, + metricHypervVSwitchPacketsSentTotal, + metricHypervVSwitchDirectedPacketsReceivedTotal, + metricHypervVSwitchDirectedPacketsSendTotal, + metricHypervVSwitchBroadcastPacketsReceivedTotal, + metricHypervVSwitchBroadcastPacketsSentTotal, + metricHypervVSwitchMulticastPacketsReceivedTotal, + metricHypervVSwitchMulticastPacketsSentTotal, + metricHypervVSwitchDroppedPacketsIncomingTotal, + metricHypervVSwitchDroppedPacketsOutcomingTotal, + metricHypervVSwitchExtensionDroppedAttacksIncomingTotal, + metricHypervVSwitchExtensionDroppedPacketsOutcomingTotal, + metricHypervVSwitchPacketsFloodedTotal, + metricHypervVSwitchLearnedMACAddressTotal, + metricHypervVSwitchPurgedMACAddresses, + metricHypervVSwitchNumberOfSendChannelMovesTotal, + metricHypervVSwitchNumberOfVMQMovesTotal, + } { + for _, pm := range pms.FindByName(v) { + if vswitch := pm.Labels.Get("vswitch"); vswitch != "" { + name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vswitch") + seen[vswitch] = true + mx[px+hypervCleanName(vswitch)+name] = int64(pm.Value) + } + } + } + + for v := range seen { + if !w.cache.hypervVswitch[v] { + w.cache.hypervVswitch[v] = true + w.addHypervVSwitchCharts(v) + } + } + for v := range w.cache.hypervVswitch { + if !seen[v] { + delete(w.cache.hypervVswitch, v) + w.removeHypervVSwitchCharts(v) + } + } +} + +var hypervNameReplacer = strings.NewReplacer(" ", "_", "?", "_", ":", "_", ".", "_") + +func hypervCleanName(name string) string { + name = hypervNameReplacer.Replace(name) + return strings.ToLower(name) +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_iis.go b/src/go/collectors/go.d.plugin/modules/windows/collect_iis.go new file mode 100644 index 00000000000000..5e9cd03f16ead2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_iis.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricIISCurrentAnonymousUsers = "windows_iis_current_anonymous_users" + metricIISCurrentNonAnonymousUsers = "windows_iis_current_non_anonymous_users" + metricIISCurrentConnections = "windows_iis_current_connections" + metricIICurrentISAPIExtRequests = "windows_iis_current_isapi_extension_requests" + metricIISUptime = "windows_iis_service_uptime" + + metricIISReceivedBytesTotal = "windows_iis_received_bytes_total" + metricIISSentBytesTotal = "windows_iis_sent_bytes_total" + metricIISRequestsTotal = "windows_iis_requests_total" + metricIISIPAPIExtRequestsTotal = "windows_iis_ipapi_extension_requests_total" + metricIISConnAttemptsAllInstancesTotal = "windows_iis_connection_attempts_all_instances_total" + metricIISFilesReceivedTotal = "windows_iis_files_received_total" + metricIISFilesSentTotal = "windows_iis_files_sent_total" + metricIISLogonAttemptsTotal = "windows_iis_logon_attempts_total" + metricIISLockedErrorsTotal = "windows_iis_locked_errors_total" + metricIISNotFoundErrorsTotal = "windows_iis_not_found_errors_total" +) + +func (w *Windows) collectIIS(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "iis_website_" + for _, pm := range pms.FindByName(metricIISCurrentAnonymousUsers) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_current_anonymous_users"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISCurrentNonAnonymousUsers) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_current_non_anonymous_users"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISCurrentConnections) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_current_connections"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIICurrentISAPIExtRequests) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_current_isapi_extension_requests"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISUptime) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_service_uptime"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISReceivedBytesTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_received_bytes_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISSentBytesTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_sent_bytes_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISRequestsTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_requests_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISConnAttemptsAllInstancesTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_connection_attempts_all_instances_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISFilesReceivedTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_files_received_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISFilesSentTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_files_sent_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISIPAPIExtRequestsTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_isapi_extension_requests_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISLogonAttemptsTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_logon_attempts_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISLockedErrorsTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_locked_errors_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricIISNotFoundErrorsTotal) { + if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" { + seen[name] = true + mx[px+name+"_not_found_errors_total"] += int64(pm.Value) + } + } + + for site := range seen { + if !w.cache.iis[site] { + w.cache.iis[site] = true + w.addIISWebsiteCharts(site) + } + } + for site := range w.cache.iis { + if !seen[site] { + delete(w.cache.iis, site) + w.removeIIWebsiteSCharts(site) + } + } +} + +func cleanWebsiteName(name string) string { + return strings.ReplaceAll(name, " ", "_") +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_logical_disk.go b/src/go/collectors/go.d.plugin/modules/windows/collect_logical_disk.go new file mode 100644 index 00000000000000..bf84a2e9dcd893 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_logical_disk.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricLDReadBytesTotal = "windows_logical_disk_read_bytes_total" + metricLDWriteBytesTotal = "windows_logical_disk_write_bytes_total" + metricLDReadsTotal = "windows_logical_disk_reads_total" + metricLDWritesTotal = "windows_logical_disk_writes_total" + metricLDSizeBytes = "windows_logical_disk_size_bytes" + metricLDFreeBytes = "windows_logical_disk_free_bytes" + metricLDReadLatencyTotal = "windows_logical_disk_read_latency_seconds_total" + metricLDWriteLatencyTotal = "windows_logical_disk_write_latency_seconds_total" +) + +func (w *Windows) collectLogicalDisk(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "logical_disk_" + for _, pm := range pms.FindByName(metricLDReadBytesTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_read_bytes_total"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDWriteBytesTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_write_bytes_total"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDReadsTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_reads_total"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDWritesTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_writes_total"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDSizeBytes) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_total_space"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDFreeBytes) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_free_space"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricLDReadLatencyTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_read_latency"] = int64(pm.Value * precision) + } + } + for _, pm := range pms.FindByName(metricLDWriteLatencyTotal) { + vol := pm.Labels.Get("volume") + if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") { + seen[vol] = true + mx[px+vol+"_write_latency"] = int64(pm.Value * precision) + } + } + + for disk := range seen { + if !w.cache.volumes[disk] { + w.cache.volumes[disk] = true + w.addDiskCharts(disk) + } + mx[px+disk+"_used_space"] = mx[px+disk+"_total_space"] - mx[px+disk+"_free_space"] + } + for disk := range w.cache.volumes { + if !seen[disk] { + delete(w.cache.volumes, disk) + w.removeDiskCharts(disk) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_logon.go b/src/go/collectors/go.d.plugin/modules/windows/collect_logon.go new file mode 100644 index 00000000000000..ba8ff3b8b8f8d5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_logon.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricLogonType = "windows_logon_logon_type" +) + +func (w *Windows) collectLogon(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorLogon] { + w.cache.collection[collectorLogon] = true + w.addLogonCharts() + } + + for _, pm := range pms.FindByName(metricLogonType) { + if v := pm.Labels.Get("status"); v != "" { + mx["logon_type_"+v+"_sessions"] = int64(pm.Value) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_memory.go b/src/go/collectors/go.d.plugin/modules/windows/collect_memory.go new file mode 100644 index 00000000000000..5b46f97e2a5718 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_memory.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricMemAvailBytes = "windows_memory_available_bytes" + metricMemCacheFaultsTotal = "windows_memory_cache_faults_total" + metricMemCommitLimit = "windows_memory_commit_limit" + metricMemCommittedBytes = "windows_memory_committed_bytes" + metricMemModifiedPageListBytes = "windows_memory_modified_page_list_bytes" + metricMemPageFaultsTotal = "windows_memory_page_faults_total" + metricMemSwapPageReadsTotal = "windows_memory_swap_page_reads_total" + metricMemSwapPagesReadTotal = "windows_memory_swap_pages_read_total" + metricMemSwapPagesWrittenTotal = "windows_memory_swap_pages_written_total" + metricMemSwapPageWritesTotal = "windows_memory_swap_page_writes_total" + metricMemPoolNonPagedBytesTotal = "windows_memory_pool_nonpaged_bytes" + metricMemPoolPagedBytes = "windows_memory_pool_paged_bytes" + metricMemStandbyCacheCoreBytes = "windows_memory_standby_cache_core_bytes" + metricMemStandbyCacheNormalPriorityBytes = "windows_memory_standby_cache_normal_priority_bytes" + metricMemStandbyCacheReserveBytes = "windows_memory_standby_cache_reserve_bytes" +) + +func (w *Windows) collectMemory(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorMemory] { + w.cache.collection[collectorMemory] = true + w.addMemoryCharts() + } + + if pm := pms.FindByName(metricMemAvailBytes); pm.Len() > 0 { + mx["memory_available_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemCacheFaultsTotal); pm.Len() > 0 { + mx["memory_cache_faults_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemCommitLimit); pm.Len() > 0 { + mx["memory_commit_limit"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemCommittedBytes); pm.Len() > 0 { + mx["memory_committed_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemModifiedPageListBytes); pm.Len() > 0 { + mx["memory_modified_page_list_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemPageFaultsTotal); pm.Len() > 0 { + mx["memory_page_faults_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemSwapPageReadsTotal); pm.Len() > 0 { + mx["memory_swap_page_reads_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemSwapPagesReadTotal); pm.Len() > 0 { + mx["memory_swap_pages_read_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemSwapPagesWrittenTotal); pm.Len() > 0 { + mx["memory_swap_pages_written_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemSwapPageWritesTotal); pm.Len() > 0 { + mx["memory_swap_page_writes_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemPoolNonPagedBytesTotal); pm.Len() > 0 { + mx["memory_pool_nonpaged_bytes_total"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemPoolPagedBytes); pm.Len() > 0 { + mx["memory_pool_paged_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemStandbyCacheCoreBytes); pm.Len() > 0 { + mx["memory_standby_cache_core_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemStandbyCacheNormalPriorityBytes); pm.Len() > 0 { + mx["memory_standby_cache_normal_priority_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricMemStandbyCacheReserveBytes); pm.Len() > 0 { + mx["memory_standby_cache_reserve_bytes"] = int64(pm.Max()) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_mssql.go b/src/go/collectors/go.d.plugin/modules/windows/collect_mssql.go new file mode 100644 index 00000000000000..56625004e8afa6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_mssql.go @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricMSSQLAccessMethodPageSplits = "windows_mssql_accessmethods_page_splits" + metricMSSQLBufferCacheHits = "windows_mssql_bufman_buffer_cache_hits" + metricMSSQLBufferCacheLookups = "windows_mssql_bufman_buffer_cache_lookups" + metricMSSQLBufferCheckpointPages = "windows_mssql_bufman_checkpoint_pages" + metricMSSQLBufferPageLifeExpectancy = "windows_mssql_bufman_page_life_expectancy_seconds" + metricMSSQLBufferPageReads = "windows_mssql_bufman_page_reads" + metricMSSQLBufferPageWrites = "windows_mssql_bufman_page_writes" + metricMSSQLBlockedProcesses = "windows_mssql_genstats_blocked_processes" + metricMSSQLUserConnections = "windows_mssql_genstats_user_connections" + metricMSSQLLockWait = "windows_mssql_locks_lock_wait_seconds" + metricMSSQLDeadlocks = "windows_mssql_locks_deadlocks" + metricMSSQLConnectionMemoryBytes = "windows_mssql_memmgr_connection_memory_bytes" + metricMSSQLExternalBenefitOfMemory = "windows_mssql_memmgr_external_benefit_of_memory" + metricMSSQLPendingMemoryGrants = "windows_mssql_memmgr_pending_memory_grants" + metricMSSQLSQLErrorsTotal = "windows_mssql_sql_errors_total" + metricMSSQLTotalServerMemory = "windows_mssql_memmgr_total_server_memory_bytes" + metricMSSQLStatsAutoParameterization = "windows_mssql_sqlstats_auto_parameterization_attempts" + metricMSSQLStatsBatchRequests = "windows_mssql_sqlstats_batch_requests" + metricMSSQLStatSafeAutoParameterization = "windows_mssql_sqlstats_safe_auto_parameterization_attempts" + metricMSSQLCompilations = "windows_mssql_sqlstats_sql_compilations" + metricMSSQLRecompilations = "windows_mssql_sqlstats_sql_recompilations" + + metricMSSQLDatabaseActiveTransactions = "windows_mssql_databases_active_transactions" + metricMSSQLDatabaseBackupRestoreOperations = "windows_mssql_databases_backup_restore_operations" + metricMSSQLDatabaseDataFileSize = "windows_mssql_databases_data_files_size_bytes" + metricMSSQLDatabaseLogFlushed = "windows_mssql_databases_log_flushed_bytes" + metricMSSQLDatabaseLogFlushes = "windows_mssql_databases_log_flushes" + metricMSSQLDatabaseTransactions = "windows_mssql_databases_transactions" + metricMSSQLDatabaseWriteTransactions = "windows_mssql_databases_write_transactions" +) + +func (w *Windows) collectMSSQL(mx map[string]int64, pms prometheus.Series) { + instances := make(map[string]bool) + dbs := make(map[string]bool) + px := "mssql_instance_" + for _, pm := range pms.FindByName(metricMSSQLAccessMethodPageSplits) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_accessmethods_page_splits"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferCacheHits) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_bufman_buffer_cache_hits"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferCacheLookups) { + if name := pm.Labels.Get("mssql_instance"); name != "" && pm.Value > 0 { + instances[name] = true + mx[px+name+"_cache_hit_ratio"] = int64(float64(mx[px+name+"_bufman_buffer_cache_hits"]) / pm.Value * 100) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferCheckpointPages) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_bufman_checkpoint_pages"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferPageLifeExpectancy) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_bufman_page_life_expectancy_seconds"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferPageReads) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_bufman_page_reads"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBufferPageWrites) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_bufman_page_writes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLBlockedProcesses) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_genstats_blocked_processes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLUserConnections) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_genstats_user_connections"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLLockWait) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + if res := pm.Labels.Get("resource"); res != "" { + mx[px+name+"_resource_"+res+"_locks_lock_wait_seconds"] = int64(pm.Value) + } + } + } + for _, pm := range pms.FindByName(metricMSSQLDeadlocks) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + if res := pm.Labels.Get("resource"); res != "" { + mx[px+name+"_resource_"+res+"_locks_deadlocks"] = int64(pm.Value) + } + } + } + for _, pm := range pms.FindByName(metricMSSQLConnectionMemoryBytes) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_memmgr_connection_memory_bytes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLExternalBenefitOfMemory) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_memmgr_external_benefit_of_memory"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLPendingMemoryGrants) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_memmgr_pending_memory_grants"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLSQLErrorsTotal) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + if res := pm.Labels.Get("resource"); res != "" && res != "_Total" { + dim := mssqlParseResource(res) + mx[px+name+"_sql_errors_total_"+dim] = int64(pm.Value) + } + } + } + for _, pm := range pms.FindByName(metricMSSQLTotalServerMemory) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_memmgr_total_server_memory_bytes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLStatsAutoParameterization) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_sqlstats_auto_parameterization_attempts"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLStatsBatchRequests) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_sqlstats_batch_requests"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLStatSafeAutoParameterization) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_sqlstats_safe_auto_parameterization_attempts"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLCompilations) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_sqlstats_sql_compilations"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLRecompilations) { + if name := pm.Labels.Get("mssql_instance"); name != "" { + instances[name] = true + mx[px+name+"_sqlstats_sql_recompilations"] = int64(pm.Value) + } + } + + px = "mssql_db_" + for _, pm := range pms.FindByName(metricMSSQLDatabaseActiveTransactions) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_active_transactions"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseBackupRestoreOperations) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_backup_restore_operations"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseDataFileSize) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_data_files_size_bytes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseLogFlushed) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_log_flushed_bytes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseLogFlushes) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_log_flushes"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseTransactions) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_transactions"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricMSSQLDatabaseWriteTransactions) { + if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" { + instances[name], dbs[name+":"+db] = true, true + mx[px+db+"_instance_"+name+"_write_transactions"] = int64(pm.Value) + } + } + + for v := range instances { + if !w.cache.mssqlInstances[v] { + w.cache.mssqlInstances[v] = true + w.addMSSQLInstanceCharts(v) + } + } + for v := range w.cache.mssqlInstances { + if !instances[v] { + delete(w.cache.mssqlInstances, v) + w.removeMSSQLInstanceCharts(v) + } + } + + for v := range dbs { + if !w.cache.mssqlDBs[v] { + w.cache.mssqlDBs[v] = true + if s := strings.Split(v, ":"); len(s) == 2 { + w.addMSSQLDBCharts(s[0], s[1]) + } + } + } + for v := range w.cache.mssqlDBs { + if !dbs[v] { + delete(w.cache.mssqlDBs, v) + if s := strings.Split(v, ":"); len(s) == 2 { + w.removeMSSQLDBCharts(s[0], s[1]) + } + } + } +} + +func mssqlParseResource(name string) string { + name = strings.ReplaceAll(name, " ", "_") + return strings.ToLower(name) +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_net.go b/src/go/collectors/go.d.plugin/modules/windows/collect_net.go new file mode 100644 index 00000000000000..6d49d56834a901 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_net.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricNetBytesReceivedTotal = "windows_net_bytes_received_total" + metricNetBytesSentTotal = "windows_net_bytes_sent_total" + metricNetPacketsReceivedTotal = "windows_net_packets_received_total" + metricNetPacketsSentTotal = "windows_net_packets_sent_total" + metricNetPacketsReceivedDiscardedTotal = "windows_net_packets_received_discarded_total" + metricNetPacketsOutboundDiscardedTotal = "windows_net_packets_outbound_discarded_total" + metricNetPacketsReceivedErrorsTotal = "windows_net_packets_received_errors_total" + metricNetPacketsOutboundErrorsTotal = "windows_net_packets_outbound_errors_total" +) + +func (w *Windows) collectNet(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "net_nic_" + for _, pm := range pms.FindByName(metricNetBytesReceivedTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_bytes_received"] += int64(pm.Value * 8) + } + } + for _, pm := range pms.FindByName(metricNetBytesSentTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_bytes_sent"] += int64(pm.Value * 8) + } + } + for _, pm := range pms.FindByName(metricNetPacketsReceivedTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_received_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricNetPacketsSentTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_sent_total"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricNetPacketsReceivedDiscardedTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_received_discarded"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricNetPacketsOutboundDiscardedTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_outbound_discarded"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricNetPacketsReceivedErrorsTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_received_errors"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricNetPacketsOutboundErrorsTotal) { + if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" { + seen[nic] = true + mx[px+nic+"_packets_outbound_errors"] += int64(pm.Value) + } + } + + for nic := range seen { + if !w.cache.nics[nic] { + w.cache.nics[nic] = true + w.addNICCharts(nic) + } + } + for nic := range w.cache.nics { + if !seen[nic] { + delete(w.cache.nics, nic) + w.removeNICCharts(nic) + } + } +} + +func cleanNICID(id string) string { + return strings.Replace(id, "__", "_", -1) +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_netframework.go b/src/go/collectors/go.d.plugin/modules/windows/collect_netframework.go new file mode 100644 index 00000000000000..46be727c3a1e92 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_netframework.go @@ -0,0 +1,531 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + netframeworkPrefix = "netframework_" +) + +const ( + metricNetFrameworkCLRExceptionsThrownTotal = "windows_netframework_clrexceptions_exceptions_thrown_total" + metricNetFrameworkCLRExceptionsFiltersTotal = "windows_netframework_clrexceptions_exceptions_filters_total" + metricNetFrameworkCLRExceptionsFinallysTotal = "windows_netframework_clrexceptions_exceptions_finallys_total" + metricNetFrameworkCLRExceptionsThrowCatchDepthTotal = "windows_netframework_clrexceptions_throw_to_catch_depth_total" +) + +func (w *Windows) collectNetFrameworkCLRExceptions(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsThrownTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrexception_thrown_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsFiltersTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrexception_filters_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsFinallysTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrexception_finallys_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsThrowCatchDepthTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrexception_throw_to_catch_depth_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRExceptions[proc] { + w.cache.netFrameworkCLRExceptions[proc] = true + w.addProcessNetFrameworkExceptionsCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRExceptions { + if !seen[proc] { + delete(w.cache.netFrameworkCLRExceptions, proc) + w.removeProcessFromNetFrameworkExceptionsCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRInteropComCallableWrappersTotal = "windows_netframework_clrinterop_com_callable_wrappers_total" + metricNetFrameworkCLRInteropMarshallingTotal = "windows_netframework_clrinterop_interop_marshalling_total" + metricNetFrameworkCLRInteropStubsCreatedTotal = "windows_netframework_clrinterop_interop_stubs_created_total" +) + +func (w *Windows) collectNetFrameworkCLRInterop(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropComCallableWrappersTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrinterop_com_callable_wrappers_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropMarshallingTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrinterop_interop_marshalling_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropStubsCreatedTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrinterop_interop_stubs_created_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRInterops[proc] { + w.cache.netFrameworkCLRInterops[proc] = true + w.addProcessNetFrameworkInteropCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRInterops { + if !seen[proc] { + delete(w.cache.netFrameworkCLRInterops, proc) + w.removeProcessNetFrameworkInteropCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRJITMethodsTotal = "windows_netframework_clrjit_jit_methods_total" + metricNetFrameworkCLRJITTimePercent = "windows_netframework_clrjit_jit_time_percent" + metricNetFrameworkCLRJITStandardFailuresTotal = "windows_netframework_clrjit_jit_standard_failures_total" + metricNetFrameworkCLRJITILBytesTotal = "windows_netframework_clrjit_jit_il_bytes_total" +) + +func (w *Windows) collectNetFrameworkCLRJIT(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRJITMethodsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrjit_methods_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRJITStandardFailuresTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrjit_standard_failures_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRJITTimePercent) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrjit_time_percent"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRJITILBytesTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrjit_il_bytes_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRJIT[proc] { + w.cache.netFrameworkCLRJIT[proc] = true + w.addProcessNetFrameworkJITCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRJIT { + if !seen[proc] { + delete(w.cache.netFrameworkCLRJIT, proc) + w.removeProcessNetFrameworkJITCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRLoadingLoaderHeapSizeBytes = "windows_netframework_clrloading_loader_heap_size_bytes" + metricNetFrameworkCLRLoadingAppDomainLoadedTotal = "windows_netframework_clrloading_appdomains_loaded_total" + metricNetFrameworkCLRLoadingAppDomainUnloadedTotal = "windows_netframework_clrloading_appdomains_unloaded_total" + metricNetFrameworkCLRLoadingAssembliesLoadedTotal = "windows_netframework_clrloading_assemblies_loaded_total" + metricNetFrameworkCLRLoadingClassesLoadedTotal = "windows_netframework_clrloading_classes_loaded_total" + metricNetFrameworkCLRLoadingClassLoadFailuresTotal = "windows_netframework_clrloading_class_load_failures_total" +) + +func (w *Windows) collectNetFrameworkCLRLoading(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingLoaderHeapSizeBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_loader_heap_size_bytes"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAppDomainLoadedTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_appdomains_loaded_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAppDomainUnloadedTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_appdomains_unloaded_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAssembliesLoadedTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_assemblies_loaded_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingClassesLoadedTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_classes_loaded_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingClassLoadFailuresTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrloading_class_load_failures_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRLoading[proc] { + w.cache.netFrameworkCLRLoading[proc] = true + w.addProcessNetFrameworkLoadingCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRLoading { + if !seen[proc] { + delete(w.cache.netFrameworkCLRLoading, proc) + w.removeProcessNetFrameworkLoadingCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRLocksAndThreadsQueueLengthTotal = "windows_netframework_clrlocksandthreads_queue_length_total" + metricNetFrameworkCLRLocksAndThreadsCurrentLogicalThreads = "windows_netframework_clrlocksandthreads_current_logical_threads" + metricNetFrameworkCLRLocksAndThreadsPhysicalThreadsCurrent = "windows_netframework_clrlocksandthreads_physical_threads_current" + metricNetFrameworkCLRLocksAndThreadsRecognizedThreadsTotal = "windows_netframework_clrlocksandthreads_recognized_threads_total" + metricNetFrameworkCLRLocksAndThreadsContentionsTotal = "windows_netframework_clrlocksandthreads_contentions_total" +) + +func (w *Windows) collectNetFrameworkCLRLocksAndThreads(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsQueueLengthTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrlocksandthreads_queue_length_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsCurrentLogicalThreads) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrlocksandthreads_current_logical_threads"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsPhysicalThreadsCurrent) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrlocksandthreads_physical_threads_current"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsRecognizedThreadsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrlocksandthreads_recognized_threads_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsContentionsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrlocksandthreads_contentions_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRLocksThreads[proc] { + w.cache.netFrameworkCLRLocksThreads[proc] = true + w.addProcessNetFrameworkLocksAndThreadsCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRLocksThreads { + if !seen[proc] { + delete(w.cache.netFrameworkCLRLocksThreads, proc) + w.removeProcessNetFrameworkLocksAndThreadsCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRMemoryAllocatedBytesTotal = "windows_netframework_clrmemory_allocated_bytes_total" + metricNetFrameworkCLRMemoryFinalizationSurvivors = "windows_netframework_clrmemory_finalization_survivors" + metricNetFrameworkCLRMemoryHeapSizeBytes = "windows_netframework_clrmemory_heap_size_bytes" + metricNetFrameworkCLRMemoryPromotedBytes = "windows_netframework_clrmemory_promoted_bytes" + metricNetFrameworkCLRMemoryNumberGCHandles = "windows_netframework_clrmemory_number_gc_handles" + metricNetFrameworkCLRMemoryCollectionsTotal = "windows_netframework_clrmemory_collections_total" + metricNetFrameworkCLRMemoryInducedGCTotal = "windows_netframework_clrmemory_induced_gc_total" + metricNetFrameworkCLRMemoryNumberPinnedObjects = "windows_netframework_clrmemory_number_pinned_objects" + metricNetFrameworkCLRMemoryNumberSinkBlockInUse = "windows_netframework_clrmemory_number_sink_blocksinuse" + metricNetFrameworkCLRMemoryCommittedBytes = "windows_netframework_clrmemory_committed_bytes" + metricNetFrameworkCLRMemoryReservedBytes = "windows_netframework_clrmemory_reserved_bytes" + metricNetFrameworkCLRMemoryGCTimePercent = "windows_netframework_clrmemory_gc_time_percent" +) + +func (w *Windows) collectNetFrameworkCLRMemory(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryAllocatedBytesTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_allocated_bytes_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryFinalizationSurvivors) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_finalization_survivors"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryHeapSizeBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_heap_size_bytes"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryPromotedBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_promoted_bytes"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberGCHandles) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_number_gc_handles"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryCollectionsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_collections_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryInducedGCTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_induced_gc_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberPinnedObjects) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_number_pinned_objects"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberSinkBlockInUse) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_number_sink_blocksinuse"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryCommittedBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_committed_bytes"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryReservedBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_reserved_bytes"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryGCTimePercent) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrmemory_gc_time_percent"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRMemory[proc] { + w.cache.netFrameworkCLRMemory[proc] = true + w.addProcessNetFrameworkMemoryCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRMemory { + if !seen[proc] { + delete(w.cache.netFrameworkCLRMemory, proc) + w.removeProcessNetFrameworkMemoryCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRRemotingChannelsTotal = "windows_netframework_clrremoting_channels_total" + metricNetFrameworkCLRRemotingContextBoundClassesLoaded = "windows_netframework_clrremoting_context_bound_classes_loaded" + metricNetFrameworkCLRRemotingContextBoundObjectsTotal = "windows_netframework_clrremoting_context_bound_objects_total" + metricNetFrameworkCLRRemotingContextProxiesTotal = "windows_netframework_clrremoting_context_proxies_total" + metricNetFrameworkCLRRemotingContexts = "windows_netframework_clrremoting_contexts" + metricNetFrameworkCLRRemotingRemoteCallsTotal = "windows_netframework_clrremoting_remote_calls_total" +) + +func (w *Windows) collectNetFrameworkCLRRemoting(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingChannelsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_channels_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextBoundClassesLoaded) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_context_bound_classes_loaded"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextBoundObjectsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_context_bound_objects_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextProxiesTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_context_proxies_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContexts) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_contexts"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingRemoteCallsTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrremoting_remote_calls_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRRemoting[proc] { + w.cache.netFrameworkCLRRemoting[proc] = true + w.addProcessNetFrameworkRemotingCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRRemoting { + if !seen[proc] { + delete(w.cache.netFrameworkCLRRemoting, proc) + w.removeProcessNetFrameworkRemotingCharts(proc) + } + } +} + +const ( + metricNetFrameworkCLRSecurityLinkTimeChecksTotal = "windows_netframework_clrsecurity_link_time_checks_total" + metricNetFrameworkCLRSecurityRTChecksTimePercent = "windows_netframework_clrsecurity_rt_checks_time_percent" + metricNetFrameworkCLRSecurityStackWalkDepth = "windows_netframework_clrsecurity_stack_walk_depth" + metricNetFrameworkCLRSecurityRuntimeChecksTotal = "windows_netframework_clrsecurity_runtime_checks_total" +) + +func (w *Windows) collectNetFrameworkCLRSecurity(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + + for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityLinkTimeChecksTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrsecurity_link_time_checks_total"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityRTChecksTimePercent) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrsecurity_checks_time_percent"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityStackWalkDepth) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrsecurity_stack_walk_depth"] += int64(pm.Value) + } + } + + for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityRuntimeChecksTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[netframeworkPrefix+name+"_clrsecurity_runtime_checks_total"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.netFrameworkCLRSecurity[proc] { + w.cache.netFrameworkCLRSecurity[proc] = true + w.addProcessNetFrameworkSecurityCharts(proc) + } + } + + for proc := range w.cache.netFrameworkCLRSecurity { + if !seen[proc] { + delete(w.cache.netFrameworkCLRSecurity, proc) + w.removeProcessNetFrameworkSecurityCharts(proc) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_os.go b/src/go/collectors/go.d.plugin/modules/windows/collect_os.go new file mode 100644 index 00000000000000..5fc1447c7b1381 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_os.go @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricOSPhysicalMemoryFreeBytes = "windows_os_physical_memory_free_bytes" + metricOSPagingFreeBytes = "windows_os_paging_free_bytes" + metricOSProcessesLimit = "windows_os_processes_limit" + metricOSProcesses = "windows_os_processes" + metricOSUsers = "windows_os_users" + metricOSPagingLimitBytes = "windows_os_paging_limit_bytes" + metricOSVisibleMemoryBytes = "windows_os_visible_memory_bytes" +) + +func (w *Windows) collectOS(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorOS] { + w.cache.collection[collectorOS] = true + w.addOSCharts() + } + + px := "os_" + if pm := pms.FindByName(metricOSPhysicalMemoryFreeBytes); pm.Len() > 0 { + mx[px+"physical_memory_free_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSPagingFreeBytes); pm.Len() > 0 { + mx[px+"paging_free_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSProcessesLimit); pm.Len() > 0 { + mx[px+"processes_limit"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSProcesses); pm.Len() > 0 { + mx[px+"processes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSUsers); pm.Len() > 0 { + mx[px+"users"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSPagingLimitBytes); pm.Len() > 0 { + mx[px+"paging_limit_bytes"] = int64(pm.Max()) + } + if pm := pms.FindByName(metricOSVisibleMemoryBytes); pm.Len() > 0 { + mx[px+"visible_memory_bytes"] = int64(pm.Max()) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_process.go b/src/go/collectors/go.d.plugin/modules/windows/collect_process.go new file mode 100644 index 00000000000000..e22fe22007fce1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_process.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricProcessCPUTimeTotal = "windows_process_cpu_time_total" + metricProcessWorkingSetBytes = "windows_process_working_set_private_bytes" + metricProcessIOBytes = "windows_process_io_bytes_total" + metricProcessIOOperations = "windows_process_io_operations_total" + metricProcessPageFaults = "windows_process_page_faults_total" + metricProcessPageFileBytes = "windows_process_page_file_bytes" + metricProcessThreads = "windows_process_threads" + metricProcessCPUHandles = "windows_process_handles" +) + +func (w *Windows) collectProcess(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorProcess] { + w.cache.collection[collectorProcess] = true + w.addProcessesCharts() + } + + seen := make(map[string]bool) + px := "process_" + for _, pm := range pms.FindByName(metricProcessCPUTimeTotal) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_cpu_time"] += int64(pm.Value * 1000) + } + } + for _, pm := range pms.FindByName(metricProcessWorkingSetBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_working_set_private_bytes"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessIOBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_io_bytes"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessIOOperations) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_io_operations"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessPageFaults) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_page_faults"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessPageFileBytes) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_page_file_bytes"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessThreads) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_threads"] += int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricProcessCPUHandles) { + if name := cleanProcessName(pm.Labels.Get("process")); name != "" { + seen[name] = true + mx[px+name+"_handles"] += int64(pm.Value) + } + } + + for proc := range seen { + if !w.cache.processes[proc] { + w.cache.processes[proc] = true + w.addProcessToCharts(proc) + } + } + for proc := range w.cache.processes { + if !seen[proc] { + delete(w.cache.processes, proc) + w.removeProcessFromCharts(proc) + } + } +} + +func cleanProcessName(name string) string { + return strings.ReplaceAll(name, " ", "_") +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_service.go b/src/go/collectors/go.d.plugin/modules/windows/collect_service.go new file mode 100644 index 00000000000000..0615674130e9cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_service.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricServiceState = "windows_service_state" + metricServiceStatus = "windows_service_status" +) + +func (w *Windows) collectService(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + px := "service_" + for _, pm := range pms.FindByName(metricServiceState) { + name := cleanService(pm.Labels.Get("name")) + state := cleanService(pm.Labels.Get("state")) + if name == "" || state == "" { + continue + } + + seen[name] = true + mx[px+name+"_state_"+state] = int64(pm.Value) + } + for _, pm := range pms.FindByName(metricServiceStatus) { + name := cleanService(pm.Labels.Get("name")) + status := cleanService(pm.Labels.Get("status")) + if name == "" || status == "" { + continue + } + + seen[name] = true + mx[px+name+"_status_"+status] = int64(pm.Value) + } + + for svc := range seen { + if !w.cache.services[svc] { + w.cache.services[svc] = true + w.addServiceCharts(svc) + } + } + for svc := range w.cache.services { + if !seen[svc] { + delete(w.cache.services, svc) + w.removeServiceCharts(svc) + } + } +} + +func cleanService(name string) string { + return strings.ReplaceAll(name, " ", "_") +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_system.go b/src/go/collectors/go.d.plugin/modules/windows/collect_system.go new file mode 100644 index 00000000000000..8072e07460a423 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_system.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "time" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricSysSystemUpTime = "windows_system_system_up_time" + metricSysThreads = "windows_system_threads" +) + +func (w *Windows) collectSystem(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorSystem] { + w.cache.collection[collectorSystem] = true + w.addSystemCharts() + } + + px := "system_" + if pm := pms.FindByName(metricSysSystemUpTime); pm.Len() > 0 { + mx[px+"up_time"] = time.Now().Unix() - int64(pm.Max()) + } + if pm := pms.FindByName(metricSysThreads); pm.Len() > 0 { + mx[px+"threads"] = int64(pm.Max()) + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_tcp.go b/src/go/collectors/go.d.plugin/modules/windows/collect_tcp.go new file mode 100644 index 00000000000000..cdcb0d8afbd1b0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_tcp.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import "github.com/netdata/go.d.plugin/pkg/prometheus" + +const ( + metricTCPConnectionFailure = "windows_tcp_connection_failures_total" + metricTCPConnectionActive = "windows_tcp_connections_active_total" + metricTCPConnectionEstablished = "windows_tcp_connections_established" + metricTCPConnectionPassive = "windows_tcp_connections_passive_total" + metricTCPConnectionReset = "windows_tcp_connections_reset_total" + metricTCPConnectionSegmentsReceived = "windows_tcp_segments_received_total" + metricTCPConnectionSegmentsRetransmitted = "windows_tcp_segments_retransmitted_total" + metricTCPConnectionSegmentsSent = "windows_tcp_segments_sent_total" +) + +func (w *Windows) collectTCP(mx map[string]int64, pms prometheus.Series) { + if !w.cache.collection[collectorTCP] { + w.cache.collection[collectorTCP] = true + w.addTCPCharts() + } + + px := "tcp_" + for _, pm := range pms.FindByName(metricTCPConnectionFailure) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_conns_failures"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionActive) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_conns_active"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionEstablished) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_conns_established"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionPassive) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_conns_passive"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionReset) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_conns_resets"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionSegmentsReceived) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_segments_received"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionSegmentsRetransmitted) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_segments_retransmitted"] = int64(pm.Value) + } + } + for _, pm := range pms.FindByName(metricTCPConnectionSegmentsSent) { + if af := pm.Labels.Get("af"); af != "" { + mx[px+af+"_segments_sent"] = int64(pm.Value) + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/collect_thermalzone.go b/src/go/collectors/go.d.plugin/modules/windows/collect_thermalzone.go new file mode 100644 index 00000000000000..9832335434db18 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/collect_thermalzone.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +const ( + metricThermalzoneTemperatureCelsius = "windows_thermalzone_temperature_celsius" +) + +func (w *Windows) collectThermalzone(mx map[string]int64, pms prometheus.Series) { + seen := make(map[string]bool) + for _, pm := range pms.FindByName(metricThermalzoneTemperatureCelsius) { + if name := cleanZoneName(pm.Labels.Get("name")); name != "" { + seen[name] = true + mx["thermalzone_"+name+"_temperature"] = int64(pm.Value) + } + } + + for zone := range seen { + if !w.cache.thermalZones[zone] { + w.cache.thermalZones[zone] = true + w.addThermalZoneCharts(zone) + } + } + for zone := range w.cache.thermalZones { + if !seen[zone] { + delete(w.cache.thermalZones, zone) + w.removeThermalZoneCharts(zone) + } + } +} + +func cleanZoneName(name string) string { + // "\\_TZ.TZ10", "\\_TZ.X570" => TZ10, X570 + i := strings.Index(name, ".") + if i == -1 || len(name) == i+1 { + return "" + } + return name[i+1:] +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/config_schema.json b/src/go/collectors/go.d.plugin/modules/windows/config_schema.json new file mode 100644 index 00000000000000..1668dd90555d44 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/config_schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/windows job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "url" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/init.go b/src/go/collectors/go.d.plugin/modules/windows/init.go new file mode 100644 index 00000000000000..34cf8367295fa8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "errors" + "net/http" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (w *Windows) validateConfig() error { + if w.URL == "" { + return errors.New("'url' is not set") + } + return nil +} + +func (w *Windows) initHTTPClient() (*http.Client, error) { + return web.NewHTTPClient(w.Client) +} + +func (w *Windows) initPrometheusClient(client *http.Client) (prometheus.Prometheus, error) { + return prometheus.New(client, w.Request), nil +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/active_directory.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/active_directory.md new file mode 100644 index 00000000000000..d6d26524255dd6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/active_directory.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/active_directory.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "Active Directory" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Active Directory + + +<img src="https://netdata.cloud/img/windows.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/hyperv.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/hyperv.md new file mode 100644 index 00000000000000..8adf5eafe92d95 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/hyperv.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/hyperv.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "HyperV" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# HyperV + + +<img src="https://netdata.cloud/img/windows.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_exchange.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_exchange.md new file mode 100644 index 00000000000000..3a452c4da159a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_exchange.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/ms_exchange.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "MS Exchange" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MS Exchange + + +<img src="https://netdata.cloud/img/exchange.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_sql_server.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_sql_server.md new file mode 100644 index 00000000000000..2df4d8d91d1307 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/ms_sql_server.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/ms_sql_server.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "MS SQL Server" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# MS SQL Server + + +<img src="https://netdata.cloud/img/mssql.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/net_framework.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/net_framework.md new file mode 100644 index 00000000000000..8be407364fb014 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/net_framework.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/net_framework.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "NET Framework" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# NET Framework + + +<img src="https://netdata.cloud/img/dotnet.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/integrations/windows.md b/src/go/collectors/go.d.plugin/modules/windows/integrations/windows.md new file mode 100644 index 00000000000000..b72203fd88093d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/integrations/windows.md @@ -0,0 +1,808 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/integrations/windows.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/windows/metadata.yaml" +sidebar_label: "Windows" +learn_status: "Published" +learn_rel_path: "Data Collection/Windows Systems" +most_popular: True +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# Windows + + +<img src="https://netdata.cloud/img/windows.svg" width="150"/> + + +Plugin: go.d.plugin +Module: windows + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + + +It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + +Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + +The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + +Supported collectors: + +- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) +- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) +- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) +- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) +- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) +- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) +- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) +- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) +- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) +- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) +- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) +- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) +- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) +- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) +- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) +- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) +- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) +- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) +- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) +- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) +- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) +- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) +- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) +- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) +- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + + +### Per Active Directory instance + +These metrics refer to the entire monitored host. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage | +| windows.memory_utilization | available, used | bytes | +| windows.memory_page_faults | page_faults | events/s | +| windows.memory_swap_utilization | available, used | bytes | +| windows.memory_swap_operations | read, write | operations/s | +| windows.memory_swap_pages | read, written | pages/s | +| windows.memory_cached | cached | KiB | +| windows.memory_cache_faults | cache_faults | events/s | +| windows.memory_system_pool | paged, non-paged | bytes | +| windows.tcp_conns_established | ipv4, ipv6 | connections | +| windows.tcp_conns_active | ipv4, ipv6 | connections/s | +| windows.tcp_conns_passive | ipv4, ipv6 | connections/s | +| windows.tcp_conns_failures | ipv4, ipv6 | failures/s | +| windows.tcp_conns_resets | ipv4, ipv6 | resets/s | +| windows.tcp_segments_received | ipv4, ipv6 | segments/s | +| windows.tcp_segments_sent | ipv4, ipv6 | segments/s | +| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s | +| windows.os_processes | processes | number | +| windows.os_users | users | users | +| windows.os_visible_memory_usage | free, used | bytes | +| windows.os_paging_files_usage | free, used | bytes | +| windows.system_threads | threads | number | +| windows.system_uptime | time | seconds | +| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds | +| windows.processes_cpu_utilization | a dimension per process | percentage | +| windows.processes_handles | a dimension per process | handles | +| windows.processes_io_bytes | a dimension per process | bytes/s | +| windows.processes_io_operations | a dimension per process | operations/s | +| windows.processes_page_faults | a dimension per process | pgfaults/s | +| windows.processes_page_file_bytes | a dimension per process | bytes | +| windows.processes_pool_bytes | a dimension per process | bytes | +| windows.processes_threads | a dimension per process | threads | +| ad.database_operations | add, delete, modify, recycle | operations/s | +| ad.directory_operations | read, write, search | operations/s | +| ad.name_cache_lookups | lookups | lookups/s | +| ad.name_cache_hits | hits | hits/s | +| ad.atq_average_request_latency | time | seconds | +| ad.atq_outstanding_requests | outstanding | requests | +| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s | +| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects | +| ad.dra_replication_objects_filtered | inbound, outbound | objects/s | +| ad.dra_replication_properties_updated | inbound, outbound | properties/s | +| ad.dra_replication_properties_filtered | inbound, outbound | properties/s | +| ad.dra_replication_pending_syncs | pending | syncs | +| ad.dra_replication_sync_requests | requests | requests/s | +| ad.ds_threads | in_use | threads | +| ad.ldap_last_bind_time | last_bind | seconds | +| ad.binds | binds | binds/s | +| ad.ldap_searches | searches | searches/s | +| adfs.ad_login_connection_failures | connection | failures/s | +| adfs.certificate_authentications | authentications | authentications/s | +| adfs.db_artifact_failures | connection | failures/s | +| adfs.db_artifact_query_time_seconds | query_time | seconds/s | +| adfs.db_config_failures | connection | failures/s | +| adfs.db_config_query_time_seconds | query_time | seconds/s | +| adfs.device_authentications | authentications | authentications/s | +| adfs.external_authentications | success, failure | authentications/s | +| adfs.federated_authentications | authentications | authentications/s | +| adfs.federation_metadata_requests | requests | requests/s | +| adfs.oauth_authorization_requests | requests | requests/s | +| adfs.oauth_client_authentications | success, failure | authentications/s | +| adfs.oauth_client_credentials_requests | success, failure | requests/s | +| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s | +| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s | +| adfs.oauth_client_windows_authentications | success, failure | authentications/s | +| adfs.oauth_logon_certificate_requests | success, failure | requests/s | +| adfs.oauth_password_grant_requests | success, failure | requests/s | +| adfs.oauth_token_requests_success | success | requests/s | +| adfs.passive_requests | passive | requests/s | +| adfs.passport_authentications | passport | authentications/s | +| adfs.password_change_requests | success, failure | requests/s | +| adfs.samlp_token_requests_success | success | requests/s | +| adfs.sso_authentications | success, failure | authentications/s | +| adfs.token_requests | requests | requests/s | +| adfs.userpassword_authentications | success, failure | authentications/s | +| adfs.windows_integrated_authentications | authentications | authentications/s | +| adfs.wsfed_token_requests_success | success | requests/s | +| adfs.wstrust_token_requests_success | success | requests/s | +| exchange.activesync_ping_cmds_pending | pending | commands | +| exchange.activesync_requests | received | requests/s | +| exchange.activesync_sync_cmds | processed | commands/s | +| exchange.autodiscover_requests | processed | requests/s | +| exchange.avail_service_requests | serviced | requests/s | +| exchange.owa_current_unique_users | logged-in | users | +| exchange.owa_requests_total | handled | requests/s | +| exchange.rpc_active_user_count | active | users | +| exchange.rpc_avg_latency | latency | seconds | +| exchange.rpc_connection_count | connections | connections | +| exchange.rpc_operations | operations | operations/s | +| exchange.rpc_requests | processed | requests | +| exchange.rpc_user_count | users | users | +| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s | +| exchange.transport_queues_poison | low, high, none, normal | messages/s | +| hyperv.vms_health | ok, critical | vms | +| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages | +| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s | +| hyperv.root_partition_attached_devices | attached | devices | +| hyperv.root_partition_deposited_pages | deposited | pages | +| hyperv.root_partition_skipped_interrupts | skipped | interrupts | +| hyperv.root_partition_device_dma_errors | illegal_dma | requests | +| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests | +| hyperv.root_partition_device_interrupt_throttle_events | throttling | events | +| hyperv.root_partition_io_tlb_flush | flushes | flushes/s | +| hyperv.root_partition_address_space | address_spaces | address spaces | +| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s | +| hyperv.root_partition_virtual_tlb_pages | used | pages | + +### Per cpu core + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| core | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage | +| windows.cpu_core_interrupts | interrupts | interrupts/s | +| windows.cpu_core_dpcs | dpcs | dpcs/s | +| windows.cpu_core_cstate | c1, c2, c3 | percentage | + +### Per logical disk + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| disk | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.logical_disk_utilization | free, used | bytes | +| windows.logical_disk_bandwidth | read, write | bytes/s | +| windows.logical_disk_operations | reads, writes | operations/s | +| windows.logical_disk_latency | read, write | seconds | + +### Per network device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| nic | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.net_nic_bandwidth | received, sent | kilobits/s | +| windows.net_nic_packets | received, sent | packets/s | +| windows.net_nic_errors | inbound, outbound | errors/s | +| windows.net_nic_discarded | inbound, outbound | discards/s | + +### Per thermalzone + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| thermalzone | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.thermalzone_temperature | temperature | celsius | + +### Per service + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| service | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state | +| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status | + +### Per website + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| website | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| iis.website_traffic | received, sent | bytes/s | +| iis.website_requests_rate | requests | requests/s | +| iis.website_active_connections_count | active | connections | +| iis.website_users_count | anonymous, non_anonymous | users | +| iis.website_connection_attempts_rate | connection | attempts/s | +| iis.website_isapi_extension_requests_count | isapi | requests | +| iis.website_isapi_extension_requests_rate | isapi | requests/s | +| iis.website_ftp_file_transfer_rate | received, sent | files/s | +| iis.website_logon_attempts_rate | logon | attempts/s | +| iis.website_errors_rate | document_locked, document_not_found | errors/s | +| iis.website_uptime | document_locked, document_not_found | seconds | + +### Per mssql instance + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.instance_accessmethods_page_splits | page | splits/s | +| mssql.instance_cache_hit_ratio | hit_ratio | percentage | +| mssql.instance_bufman_checkpoint_pages | flushed | pages/s | +| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds | +| mssql.instance_bufman_iops | read, written | iops | +| mssql.instance_blocked_processes | blocked | processes | +| mssql.instance_user_connection | user | connections | +| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s | +| mssql.instance_memmgr_connection_memory_bytes | memory | bytes | +| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes | +| mssql.instance_memmgr_pending_memory_grants | pending | processes | +| mssql.instance_memmgr_server_memory | memory | bytes | +| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors | +| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s | +| mssql.instance_sqlstats_batch_requests | batch | requests/s | +| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s | +| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s | +| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s | + +### Per database + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| mssql_instance | TBD | +| database | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| mssql.database_active_transactions | active | transactions | +| mssql.database_backup_restore_operations | backup | operations/s | +| mssql.database_data_files_size | size | bytes | +| mssql.database_log_flushed | flushed | bytes/s | +| mssql.database_log_flushes | log | flushes/s | +| mssql.database_transactions | transactions | transactions/s | +| mssql.database_write_transactions | write | transactions/s | + +### Per certificate template + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| cert_template | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| adcs.cert_template_requests | requests | requests/s | +| adcs.cert_template_failed_requests | failed | requests/s | +| adcs.cert_template_issued_requests | issued | requests/s | +| adcs.cert_template_pending_requests | pending | requests/s | +| adcs.cert_template_request_processing_time | processing_time | seconds | +| adcs.cert_template_retrievals | retrievals | retrievals/s | +| adcs.cert_template_retrieval_processing_time | processing_time | seconds | +| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds | +| adcs.cert_template_request_policy_module_processing | processing_time | seconds | +| adcs.cert_template_challenge_responses | challenge | responses/s | +| adcs.cert_template_challenge_response_processing_time | processing_time | seconds | +| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s | +| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds | + +### Per process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| process | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| netframework.clrexception_thrown | exceptions | exceptions/s | +| netframework.clrexception_filters | filters | filters/s | +| netframework.clrexception_finallys | finallys | finallys/s | +| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s | +| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s | +| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s | +| netframework.clrinterop_interop_stubs_created | created | stubs/s | +| netframework.clrjit_methods | jit-compiled | methods/s | +| netframework.clrjit_time | time | percentage | +| netframework.clrjit_standard_failures | failures | failures/s | +| netframework.clrjit_il_bytes | compiled_msil | bytes/s | +| netframework.clrloading_loader_heap_size | committed | bytes | +| netframework.clrloading_appdomains_loaded | loaded | domain/s | +| netframework.clrloading_appdomains_unloaded | unloaded | domain/s | +| netframework.clrloading_assemblies_loaded | loaded | assemblies/s | +| netframework.clrloading_classes_loaded | loaded | classes/s | +| netframework.clrloading_class_load_failures | class_load | failures/s | +| netframework.clrlocksandthreads_queue_length | threads | threads/s | +| netframework.clrlocksandthreads_current_logical_threads | logical | threads | +| netframework.clrlocksandthreads_current_physical_threads | physical | threads | +| netframework.clrlocksandthreads_recognized_threads | threads | threads/s | +| netframework.clrlocksandthreads_contentions | contentions | contentions/s | +| netframework.clrmemory_allocated_bytes | allocated | bytes/s | +| netframework.clrmemory_finalization_survivors | survived | objects | +| netframework.clrmemory_heap_size | heap | bytes | +| netframework.clrmemory_promoted | promoted | bytes | +| netframework.clrmemory_number_gc_handles | used | handles | +| netframework.clrmemory_collections | gc | gc/s | +| netframework.clrmemory_induced_gc | gc | gc/s | +| netframework.clrmemory_number_pinned_objects | pinned | objects | +| netframework.clrmemory_number_sink_blocks_in_use | used | blocks | +| netframework.clrmemory_committed | committed | bytes | +| netframework.clrmemory_reserved | reserved | bytes | +| netframework.clrmemory_gc_time | time | percentage | +| netframework.clrremoting_channels | registered | channels/s | +| netframework.clrremoting_context_bound_classes_loaded | loaded | classes | +| netframework.clrremoting_context_bound_objects | allocated | objects/s | +| netframework.clrremoting_context_proxies | objects | objects/s | +| netframework.clrremoting_contexts | contexts | contexts | +| netframework.clrremoting_remote_calls | rpc | calls/s | +| netframework.clrsecurity_link_time_checks | linktime | checks/s | +| netframework.clrsecurity_checks_time | time | percentage | +| netframework.clrsecurity_stack_walk_depth | stack | depth | +| netframework.clrsecurity_runtime_checks | runtime | checks/s | + +### Per exchange workload + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.workload_active_tasks | active | tasks | +| exchange.workload_completed_tasks | completed | tasks/s | +| exchange.workload_queued_tasks | queued | tasks/s | +| exchange.workload_yielded_tasks | yielded | tasks/s | +| exchange.workload_activity_status | active, paused | status | + +### Per ldap process + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.ldap_long_running_ops_per_sec | long-running | operations/s | +| exchange.ldap_read_time | read | seconds | +| exchange.ldap_search_time | search | seconds | +| exchange.ldap_write_time | write | seconds | +| exchange.ldap_timeout_errors | timeout | errors/s | + +### Per http proxy + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| workload | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| exchange.http_proxy_avg_auth_latency | latency | seconds | +| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds | +| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage | +| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds | +| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests | +| exchange.http_proxy_requests | processed | requests/s | + +### Per vm + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_name | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage | +| hyperv.vm_memory_physical | assigned_memory | MiB | +| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB | +| hyperv.vm_memory_pressure_current | pressure | percentage | +| hyperv.vm_vid_physical_pages_allocated | allocated | pages | +| hyperv.vm_vid_remote_physical_pages | remote_physical | pages | + +### Per vm device + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_device | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_device_bytes | read, written | bytes/s | +| hyperv.vm_device_operations | read, write | operations/s | +| hyperv.vm_device_errors | errors | errors/s | + +### Per vm interface + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vm_interface | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vm_interface_bytes | received, sent | bytes/s | +| hyperv.vm_interface_packets | received, sent | packets/s | +| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s | + +### Per vswitch + +TBD + +Labels: + +| Label | Description | +|:-----------|:----------------| +| vswitch | TBD | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| hyperv.vswitch_bytes | received, sent | bytes/s | +| hyperv.vswitch_packets | received, sent | packets/s | +| hyperv.vswitch_directed_packets | received, sent | packets/s | +| hyperv.vswitch_broadcast_packets | received, sent | packets/s | +| hyperv.vswitch_multicast_packets | received, sent | packets/s | +| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s | +| hyperv.vswitch_packets_flooded | flooded | packets/s | +| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s | +| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes | +| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization | +| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes | +| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes | +| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes | +| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes | +| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization | + + +## Setup + +### Prerequisites + +#### Install Windows exporter + +To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/windows.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/windows.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| url | Server URL. | | yes | +| timeout | HTTP request timeout. | 1 | no | +| username | Username for basic HTTP authentication. | | no | +| password | Password for basic HTTP authentication. | | no | +| proxy_url | Proxy URL. | | no | +| proxy_username | Username for proxy basic HTTP authentication. | | no | +| proxy_password | Password for proxy basic HTTP authentication. | | no | +| method | HTTP request method. | GET | no | +| body | HTTP request body. | | no | +| headers | HTTP request headers. | | no | +| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +A basic example configuration. + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + +``` +##### HTTP authentication + +Basic HTTP authentication. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + +``` +</details> + +##### HTTPS with self-signed certificate + +Do not validate server certificate chain and hostname. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + +``` +</details> + +##### Virtual Node + +The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. +You can create a virtual node for all your Windows machines and control them as separate entities. + +To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + +> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + +```yaml +# /etc/netdata/vnodes/vnodes.conf +- hostname: win_server + guid: <value> +``` + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from multiple remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m windows + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml b/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml new file mode 100644 index 00000000000000..87ac4cf636f404 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/metadata.yaml @@ -0,0 +1,2172 @@ +plugin_name: go.d.plugin +modules: + - &module + meta: &meta + id: collector-go.d.plugin-windows + plugin_name: go.d.plugin + module_name: windows + monitored_instance: + name: Windows + link: https://www.microsoft.com/en-us/windows + categories: + - data-collection.windows-systems + icon_filename: windows.svg + keywords: + - windows + - microsoft + most_popular: true + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: | + This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL). + method_description: | + It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host. + default_behavior: + auto_detection: + description: | + It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)). + + Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely. + limits: + description: "" + performance_impact: + description: | + Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Install Windows exporter + description: | + To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation). + configuration: + file: + name: go.d/windows.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: url + description: Server URL. + default_value: "" + required: true + - name: timeout + description: HTTP request timeout. + default_value: 1 + required: false + - name: username + description: Username for basic HTTP authentication. + default_value: "" + required: false + - name: password + description: Password for basic HTTP authentication. + default_value: "" + required: false + - name: proxy_url + description: Proxy URL. + default_value: "" + required: false + - name: proxy_username + description: Username for proxy basic HTTP authentication. + default_value: "" + required: false + - name: proxy_password + description: Password for proxy basic HTTP authentication. + default_value: "" + required: false + - name: method + description: HTTP request method. + default_value: "GET" + required: false + - name: body + description: HTTP request body. + default_value: "" + required: false + - name: headers + description: HTTP request headers. + default_value: "" + required: false + - name: not_follow_redirects + description: Redirect handling policy. Controls whether the client follows redirects. + default_value: no + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: no + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + folding: + enabled: false + description: A basic example configuration. + config: | + jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + - name: HTTP authentication + description: Basic HTTP authentication. + config: | + jobs: + - name: win_server + url: http://192.0.2.1:9182/metrics + username: username + password: password + - name: HTTPS with self-signed certificate + description: Do not validate server certificate chain and hostname. + config: | + jobs: + - name: win_server + url: https://192.0.2.1:9182/metrics + tls_skip_verify: yes + - name: Virtual Node + description: | + The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc. + You can create a virtual node for all your Windows machines and control them as separate entities. + + To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`: + + > **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows. + + ```yaml + # /etc/netdata/vnodes/vnodes.conf + - hostname: win_server + guid: <value> + ``` + config: | + jobs: + - name: win_server + vnode: win_server + url: http://192.0.2.1:9182/metrics + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from multiple remote instances. + config: | + jobs: + - name: win_server1 + url: http://192.0.2.1:9182/metrics + + - name: win_server2 + url: http://192.0.2.2:9182/metrics + troubleshooting: + problems: + list: [] + alerts: + - name: windows_10min_cpu_usage + metric: windows.cpu_utilization_total + info: average CPU utilization over the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_ram_in_use + metric: windows.memory_utilization + info: memory utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_inbound_packets_discarded + metric: windows.net_nic_discarded + info: number of inbound discarded packets for the network interface in the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_outbound_packets_discarded + metric: windows.net_nic_discarded + info: number of outbound discarded packets for the network interface in the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_inbound_packets_errors + metric: windows.net_nic_errors + info: number of inbound errors for the network interface in the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_outbound_packets_errors + metric: windows.net_nic_errors + info: number of outbound errors for the network interface in the last 10 minutes + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + - name: windows_disk_in_use + metric: windows.logical_disk_space_usage + info: disk space utilization + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf + metrics: + folding: + title: Metrics + enabled: false + description: | + The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors). + + Supported collectors: + + - [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md) + - [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md) + - [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md) + - [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md) + - [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md) + - [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md) + - [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md) + - [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md) + - [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md) + - [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md) + - [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md) + - [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md) + - [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md) + - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md) + - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md) + - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md) + - [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md) + - [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md) + - [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md) + - [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md) + - [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md) + - [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md) + - [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md) + - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md) + - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md) + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored host. + labels: [] + metrics: + - name: windows.cpu_utilization_total + description: Total CPU Utilization (all cores) + unit: percentage + chart_type: stacked + dimensions: + - name: dpc + - name: user + - name: privileged + - name: interrupt + - name: windows.memory_utilization + description: Memory Utilization + unit: bytes + chart_type: stacked + dimensions: + - name: available + - name: used + - name: windows.memory_page_faults + description: Memory Page Faults + unit: events/s + chart_type: line + dimensions: + - name: page_faults + - name: windows.memory_swap_utilization + description: Swap Utilization + unit: bytes + chart_type: stacked + dimensions: + - name: available + - name: used + - name: windows.memory_swap_operations + description: Swap Operations + unit: operations/s + chart_type: area + dimensions: + - name: read + - name: write + - name: windows.memory_swap_pages + description: Swap Pages + unit: pages/s + chart_type: line + dimensions: + - name: read + - name: written + - name: windows.memory_cached + description: Cached + unit: KiB + chart_type: area + dimensions: + - name: cached + - name: windows.memory_cache_faults + description: Cache Faults + unit: events/s + chart_type: line + dimensions: + - name: cache_faults + - name: windows.memory_system_pool + description: System Memory Pool + unit: bytes + chart_type: area + dimensions: + - name: paged + - name: non-paged + - name: windows.tcp_conns_established + description: TCP established connections + unit: connections + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_conns_active + description: TCP active connections + unit: connections/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_conns_passive + description: TCP passive connections + unit: connections/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_conns_failures + description: TCP connection failures + unit: failures/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_conns_resets + description: TCP connections resets + unit: resets/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_segments_received + description: Number of TCP segments received + unit: segments/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_segments_sent + description: Number of TCP segments sent + unit: segments/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.tcp_segments_retransmitted + description: Number of TCP segments retransmitted + unit: segments/s + chart_type: line + dimensions: + - name: ipv4 + - name: ipv6 + - name: windows.os_processes + description: Processes + unit: number + chart_type: line + dimensions: + - name: processes + - name: windows.os_users + description: Number of Users + unit: users + chart_type: line + dimensions: + - name: users + - name: windows.os_visible_memory_usage + description: Visible Memory Usage + unit: bytes + chart_type: stacked + dimensions: + - name: free + - name: used + - name: windows.os_paging_files_usage + description: Paging Files Usage + unit: bytes + chart_type: stacked + dimensions: + - name: free + - name: used + - name: windows.system_threads + description: Threads + unit: number + chart_type: line + dimensions: + - name: threads + - name: windows.system_uptime + description: Uptime + unit: seconds + chart_type: line + dimensions: + - name: time + - name: windows.logon_type_sessions + description: Active User Logon Sessions By Type + unit: seconds + chart_type: stacked + dimensions: + - name: system + - name: interactive + - name: network + - name: batch + - name: service + - name: proxy + - name: unlock + - name: network_clear_text + - name: new_credentials + - name: remote_interactive + - name: cached_interactive + - name: cached_remote_interactive + - name: cached_unlock + - name: windows.processes_cpu_utilization + description: CPU usage (100% = 1 core) + unit: percentage + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_handles + description: Memory usage + unit: handles + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_io_bytes + description: Total of IO bytes (read, write, other) + unit: bytes/s + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_io_operations + description: Total of IO events (read, write, other) + unit: operations/s + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_page_faults + description: Number of page faults + unit: pgfaults/s + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_page_file_bytes + description: Bytes used in page file(s) + unit: bytes + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_pool_bytes + description: Active threads + unit: bytes + chart_type: stacked + dimensions: + - name: a dimension per process + - name: windows.processes_threads + description: Number of handles open + unit: threads + chart_type: stacked + dimensions: + - name: a dimension per process + - name: ad.database_operations + description: AD database operations + unit: operations/s + chart_type: line + dimensions: + - name: add + - name: delete + - name: modify + - name: recycle + - name: ad.directory_operations + description: AD directory operations + unit: operations/s + chart_type: line + dimensions: + - name: read + - name: write + - name: search + - name: ad.name_cache_lookups + description: Name cache lookups + unit: lookups/s + chart_type: line + dimensions: + - name: lookups + - name: ad.name_cache_hits + description: Name cache hits + unit: hits/s + chart_type: line + dimensions: + - name: hits + - name: ad.atq_average_request_latency + description: Average request processing time + unit: seconds + chart_type: line + dimensions: + - name: time + - name: ad.atq_outstanding_requests + description: Outstanding requests + unit: requests + chart_type: line + dimensions: + - name: outstanding + - name: ad.dra_replication_intersite_compressed_traffic + description: DRA replication compressed traffic withing site + unit: bytes/s + chart_type: area + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_intrasite_compressed_traffic + description: DRA replication compressed traffic between sites + unit: bytes/s + chart_type: area + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_sync_objects_remaining + description: DRA replication full sync objects remaining + unit: objects + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_objects_filtered + description: DRA replication objects filtered + unit: objects/s + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_properties_updated + description: DRA replication properties updated + unit: properties/s + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_properties_filtered + description: DRA replication properties filtered + unit: properties/s + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: ad.dra_replication_pending_syncs + description: DRA replication pending syncs + unit: syncs + chart_type: line + dimensions: + - name: pending + - name: ad.dra_replication_sync_requests + description: DRA replication sync requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: ad.ds_threads + description: Directory Service threads + unit: threads + chart_type: line + dimensions: + - name: in_use + - name: ad.ldap_last_bind_time + description: LDAP last successful bind time + unit: seconds + chart_type: line + dimensions: + - name: last_bind + - name: ad.binds + description: Successful binds + unit: binds/s + chart_type: line + dimensions: + - name: binds + - name: ad.ldap_searches + description: LDAP client search operations + unit: searches/s + chart_type: line + dimensions: + - name: searches + - name: adfs.ad_login_connection_failures + description: Connection failures + unit: failures/s + chart_type: line + dimensions: + - name: connection + - name: adfs.certificate_authentications + description: User Certificate authentications + unit: authentications/s + chart_type: line + dimensions: + - name: authentications + - name: adfs.db_artifact_failures + description: Connection failures to the artifact database + unit: failures/s + chart_type: line + dimensions: + - name: connection + - name: adfs.db_artifact_query_time_seconds + description: Time taken for an artifact database query + unit: seconds/s + chart_type: line + dimensions: + - name: query_time + - name: adfs.db_config_failures + description: Connection failures to the configuration database + unit: failures/s + chart_type: line + dimensions: + - name: connection + - name: adfs.db_config_query_time_seconds + description: Time taken for a configuration database query + unit: seconds/s + chart_type: line + dimensions: + - name: query_time + - name: adfs.device_authentications + description: Device authentications + unit: authentications/s + chart_type: line + dimensions: + - name: authentications + - name: adfs.external_authentications + description: Authentications from external MFA providers + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.federated_authentications + description: Authentications from Federated Sources + unit: authentications/s + chart_type: line + dimensions: + - name: authentications + - name: adfs.federation_metadata_requests + description: Federation Metadata requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: adfs.oauth_authorization_requests + description: Incoming requests to the OAuth Authorization endpoint + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: adfs.oauth_client_authentications + description: OAuth client authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_client_credentials_requests + description: OAuth client credentials requests + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_client_privkey_jwt_authentications + description: OAuth client private key JWT authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_client_secret_basic_authentications + description: OAuth client secret basic authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_client_secret_post_authentications + description: OAuth client secret post authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_client_windows_authentications + description: OAuth client windows integrated authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_logon_certificate_requests + description: OAuth logon certificate requests + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_password_grant_requests + description: OAuth password grant requests + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.oauth_token_requests_success + description: Successful RP token requests over OAuth protocol + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: adfs.passive_requests + description: Passive requests + unit: requests/s + chart_type: line + dimensions: + - name: passive + - name: adfs.passport_authentications + description: Microsoft Passport SSO authentications + unit: authentications/s + chart_type: line + dimensions: + - name: passport + - name: adfs.password_change_requests + description: Password change requests + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.samlp_token_requests_success + description: Successful RP token requests over SAML-P protocol + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: adfs.sso_authentications + description: SSO authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.token_requests + description: Token access requests + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: adfs.userpassword_authentications + description: AD U/P authentications + unit: authentications/s + chart_type: line + dimensions: + - name: success + - name: failure + - name: adfs.windows_integrated_authentications + description: Windows integrated authentications using Kerberos or NTLM + unit: authentications/s + chart_type: line + dimensions: + - name: authentications + - name: adfs.wsfed_token_requests_success + description: Successful RP token requests over WS-Fed protocol + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: adfs.wstrust_token_requests_success + description: Successful RP token requests over WS-Trust protocol + unit: requests/s + chart_type: line + dimensions: + - name: success + - name: exchange.activesync_ping_cmds_pending + description: Ping commands pending in queue + unit: commands + chart_type: line + dimensions: + - name: pending + - name: exchange.activesync_requests + description: HTTP requests received from ASP.NET + unit: requests/s + chart_type: line + dimensions: + - name: received + - name: exchange.activesync_sync_cmds + description: Sync commands processed + unit: commands/s + chart_type: line + dimensions: + - name: processed + - name: exchange.autodiscover_requests + description: Autodiscover service requests processed + unit: requests/s + chart_type: line + dimensions: + - name: processed + - name: exchange.avail_service_requests + description: Requests serviced + unit: requests/s + chart_type: line + dimensions: + - name: serviced + - name: exchange.owa_current_unique_users + description: Unique users currently logged on to Outlook Web App + unit: users + chart_type: line + dimensions: + - name: logged-in + - name: exchange.owa_requests_total + description: Requests handled by Outlook Web App + unit: requests/s + chart_type: line + dimensions: + - name: handled + - name: exchange.rpc_active_user_count + description: Active unique users in the last 2 minutes + unit: users + chart_type: line + dimensions: + - name: active + - name: exchange.rpc_avg_latency + description: Average latency + unit: seconds + chart_type: line + dimensions: + - name: latency + - name: exchange.rpc_connection_count + description: Client connections + unit: connections + chart_type: line + dimensions: + - name: connections + - name: exchange.rpc_operations + description: RPC operations + unit: operations/s + chart_type: line + dimensions: + - name: operations + - name: exchange.rpc_requests + description: Clients requests currently being processed + unit: requests + chart_type: line + dimensions: + - name: processed + - name: exchange.rpc_user_count + description: RPC users + unit: users + chart_type: line + dimensions: + - name: users + - name: exchange.transport_queues_active_mail_box_delivery + description: Active Mailbox Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_external_active_remote_delivery + description: External Active Remote Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_external_largest_delivery + description: External Largest Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_internal_active_remote_delivery + description: Internal Active Remote Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_internal_largest_delivery + description: Internal Largest Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_retry_mailbox_delivery + description: Internal Active Remote Delivery Queue length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: exchange.transport_queues_poison + description: Poison Queue Length + unit: messages/s + chart_type: line + dimensions: + - name: low + - name: high + - name: none + - name: normal + - name: hyperv.vms_health + description: Virtual machines health status + unit: vms + chart_type: stacked + dimensions: + - name: ok + - name: critical + - name: hyperv.root_partition_device_space_pages + description: Root partition pages in the device space + unit: pages + chart_type: line + dimensions: + - name: 4K + - name: 2M + - name: 1G + - name: hyperv.root_partition_gpa_space_pages + description: Root partition pages in the GPA space + unit: pages + chart_type: line + dimensions: + - name: 4K + - name: 2M + - name: 1G + - name: hyperv.root_partition_gpa_space_modifications + description: Root partition GPA space modifications + unit: modifications/s + chart_type: line + dimensions: + - name: gpa + - name: hyperv.root_partition_attached_devices + description: Root partition attached devices + unit: devices + chart_type: line + dimensions: + - name: attached + - name: hyperv.root_partition_deposited_pages + description: Root partition deposited pages + unit: pages + chart_type: line + dimensions: + - name: deposited + - name: hyperv.root_partition_skipped_interrupts + description: Root partition skipped interrupts + unit: interrupts + chart_type: line + dimensions: + - name: skipped + - name: hyperv.root_partition_device_dma_errors + description: Root partition illegal DMA requests + unit: requests + chart_type: line + dimensions: + - name: illegal_dma + - name: hyperv.root_partition_device_interrupt_errors + description: Root partition illegal interrupt requests + unit: requests + chart_type: line + dimensions: + - name: illegal_interrupt + - name: hyperv.root_partition_device_interrupt_throttle_events + description: Root partition throttled interrupts + unit: events + chart_type: line + dimensions: + - name: throttling + - name: hyperv.root_partition_io_tlb_flush + description: Root partition flushes of I/O TLBs + unit: flushes/s + chart_type: line + dimensions: + - name: flushes + - name: hyperv.root_partition_address_space + description: Root partition address spaces in the virtual TLB + unit: address spaces + chart_type: line + dimensions: + - name: address_spaces + - name: hyperv.root_partition_virtual_tlb_flush_entries + description: Root partition flushes of the entire virtual TLB + unit: flushes/s + chart_type: line + dimensions: + - name: flushes + - name: hyperv.root_partition_virtual_tlb_pages + description: Root partition pages used by the virtual TLB + unit: pages + chart_type: line + dimensions: + - name: used + - name: cpu core + description: TBD + labels: + - name: core + description: TBD + metrics: + - name: windows.cpu_core_utilization + description: Core CPU Utilization + unit: percentage + chart_type: stacked + dimensions: + - name: dpc + - name: user + - name: privileged + - name: interrupt + - name: windows.cpu_core_interrupts + description: Received and Serviced Hardware Interrupts + unit: interrupts/s + chart_type: line + dimensions: + - name: interrupts + - name: windows.cpu_core_dpcs + description: Received and Serviced Deferred Procedure Calls (DPC) + unit: dpcs/s + chart_type: line + dimensions: + - name: dpcs + - name: windows.cpu_core_cstate + description: Core Time Spent in Low-Power Idle State + unit: percentage + chart_type: stacked + dimensions: + - name: c1 + - name: c2 + - name: c3 + - name: logical disk + description: TBD + labels: + - name: disk + description: TBD + metrics: + - name: windows.logical_disk_utilization + description: Space usage + unit: bytes + chart_type: stacked + dimensions: + - name: free + - name: used + - name: windows.logical_disk_bandwidth + description: Bandwidth + unit: bytes/s + chart_type: area + dimensions: + - name: read + - name: write + - name: windows.logical_disk_operations + description: Operations + unit: operations/s + chart_type: line + dimensions: + - name: reads + - name: writes + - name: windows.logical_disk_latency + description: Average Read/Write Latency + unit: seconds + chart_type: line + dimensions: + - name: read + - name: write + - name: network device + description: TBD + labels: + - name: nic + description: TBD + metrics: + - name: windows.net_nic_bandwidth + description: Bandwidth + unit: kilobits/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: windows.net_nic_packets + description: Packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: windows.net_nic_errors + description: Errors + unit: errors/s + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: windows.net_nic_discarded + description: Discards + unit: discards/s + chart_type: line + dimensions: + - name: inbound + - name: outbound + - name: thermalzone + description: TBD + labels: + - name: thermalzone + description: TBD + metrics: + - name: windows.thermalzone_temperature + description: Thermal zone temperature + unit: celsius + chart_type: line + dimensions: + - name: temperature + - name: service + description: TBD + labels: + - name: service + description: TBD + metrics: + - name: windows.service_state + description: Service state + unit: state + chart_type: line + dimensions: + - name: running + - name: stopped + - name: start_pending + - name: stop_pending + - name: continue_pending + - name: pause_pending + - name: paused + - name: unknown + - name: windows.service_status + description: Service status + unit: status + chart_type: line + dimensions: + - name: ok + - name: error + - name: unknown + - name: degraded + - name: pred_fail + - name: starting + - name: stopping + - name: service + - name: stressed + - name: nonrecover + - name: no_contact + - name: lost_comm + - name: website + description: TBD + labels: + - name: website + description: TBD + metrics: + - name: iis.website_traffic + description: Website traffic + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: iis.website_requests_rate + description: Website requests rate + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: iis.website_active_connections_count + description: Website active connections + unit: connections + chart_type: line + dimensions: + - name: active + - name: iis.website_users_count + description: Website users with pending requests + unit: users + chart_type: stacked + dimensions: + - name: anonymous + - name: non_anonymous + - name: iis.website_connection_attempts_rate + description: Website connections attempts + unit: attempts/s + chart_type: line + dimensions: + - name: connection + - name: iis.website_isapi_extension_requests_count + description: ISAPI extension requests + unit: requests + chart_type: line + dimensions: + - name: isapi + - name: iis.website_isapi_extension_requests_rate + description: Website extensions request + unit: requests/s + chart_type: line + dimensions: + - name: isapi + - name: iis.website_ftp_file_transfer_rate + description: Website FTP file transfer rate + unit: files/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: iis.website_logon_attempts_rate + description: Website logon attempts + unit: attempts/s + chart_type: line + dimensions: + - name: logon + - name: iis.website_errors_rate + description: Website errors + unit: errors/s + chart_type: stacked + dimensions: + - name: document_locked + - name: document_not_found + - name: iis.website_uptime + description: Website uptime + unit: seconds + chart_type: line + dimensions: + - name: document_locked + - name: document_not_found + - name: mssql instance + description: TBD + labels: + - name: mssql_instance + description: TBD + metrics: + - name: mssql.instance_accessmethods_page_splits + description: Page splits + unit: splits/s + chart_type: line + dimensions: + - name: page + - name: mssql.instance_cache_hit_ratio + description: Buffer Cache hit ratio + unit: percentage + chart_type: line + dimensions: + - name: hit_ratio + - name: mssql.instance_bufman_checkpoint_pages + description: Flushed pages + unit: pages/s + chart_type: line + dimensions: + - name: flushed + - name: mssql.instance_bufman_page_life_expectancy + description: Page life expectancy + unit: seconds + chart_type: line + dimensions: + - name: life_expectancy + - name: mssql.instance_bufman_iops + description: Number of pages input and output + unit: iops + chart_type: line + dimensions: + - name: read + - name: written + - name: mssql.instance_blocked_processes + description: Blocked processes + unit: processes + chart_type: line + dimensions: + - name: blocked + - name: mssql.instance_user_connection + description: User connections + unit: connections + chart_type: line + dimensions: + - name: user + - name: mssql.instance_locks_lock_wait + description: Lock requests that required the caller to wait + unit: locks/s + chart_type: line + dimensions: + - name: alloc_unit + - name: application + - name: database + - name: extent + - name: file + - name: hobt + - name: key + - name: metadata + - name: oib + - name: object + - name: page + - name: rid + - name: row_group + - name: xact + - name: mssql.instance_locks_deadlocks + description: Lock requests that resulted in deadlock + unit: locks/s + chart_type: line + dimensions: + - name: alloc_unit + - name: application + - name: database + - name: extent + - name: file + - name: hobt + - name: key + - name: metadata + - name: oib + - name: object + - name: page + - name: rid + - name: row_group + - name: xact + - name: mssql.instance_memmgr_connection_memory_bytes + description: Amount of dynamic memory to maintain connections + unit: bytes + chart_type: line + dimensions: + - name: memory + - name: mssql.instance_memmgr_external_benefit_of_memory + description: Performance benefit from adding memory to a specific cache + unit: bytes + chart_type: line + dimensions: + - name: benefit + - name: mssql.instance_memmgr_pending_memory_grants + description: Process waiting for memory grant + unit: processes + chart_type: line + dimensions: + - name: pending + - name: mssql.instance_memmgr_server_memory + description: Memory committed + unit: bytes + chart_type: line + dimensions: + - name: memory + - name: mssql.instance_sql_errors + description: Errors + unit: errors + chart_type: line + dimensions: + - name: db_offline + - name: info + - name: kill_connection + - name: user + - name: mssql.instance_sqlstats_auto_parameterization_attempts + description: Failed auto-parameterization attempts + unit: attempts/s + chart_type: line + dimensions: + - name: failed + - name: mssql.instance_sqlstats_batch_requests + description: Total of batches requests + unit: requests/s + chart_type: line + dimensions: + - name: batch + - name: mssql.instance_sqlstats_safe_auto_parameterization_attempts + description: Safe auto-parameterization attempts + unit: attempts/s + chart_type: line + dimensions: + - name: safe + - name: mssql.instance_sqlstats_sql_compilations + description: SQL compilations + unit: compilations/s + chart_type: line + dimensions: + - name: compilations + - name: mssql.instance_sqlstats_sql_recompilations + description: SQL re-compilations + unit: recompiles/s + chart_type: line + dimensions: + - name: recompiles + - name: database + description: TBD + labels: + - name: mssql_instance + description: TBD + - name: database + description: TBD + metrics: + - name: mssql.database_active_transactions + description: Active transactions per database + unit: transactions + chart_type: line + dimensions: + - name: active + - name: mssql.database_backup_restore_operations + description: Backup IO per database + unit: operations/s + chart_type: line + dimensions: + - name: backup + - name: mssql.database_data_files_size + description: Current database size + unit: bytes + chart_type: line + dimensions: + - name: size + - name: mssql.database_log_flushed + description: Log flushed + unit: bytes/s + chart_type: line + dimensions: + - name: flushed + - name: mssql.database_log_flushes + description: Log flushes + unit: flushes/s + chart_type: line + dimensions: + - name: log + - name: mssql.database_transactions + description: Transactions + unit: transactions/s + chart_type: line + dimensions: + - name: transactions + - name: mssql.database_write_transactions + description: Write transactions + unit: transactions/s + chart_type: line + dimensions: + - name: write + - name: certificate template + description: TBD + labels: + - name: cert_template + description: TBD + metrics: + - name: adcs.cert_template_requests + description: Certificate requests processed + unit: requests/s + chart_type: line + dimensions: + - name: requests + - name: adcs.cert_template_failed_requests + description: Certificate failed requests processed + unit: requests/s + chart_type: line + dimensions: + - name: failed + - name: adcs.cert_template_issued_requests + description: Certificate issued requests processed + unit: requests/s + chart_type: line + dimensions: + - name: issued + - name: adcs.cert_template_pending_requests + description: Certificate pending requests processed + unit: requests/s + chart_type: line + dimensions: + - name: pending + - name: adcs.cert_template_request_processing_time + description: Certificate last request processing time + unit: seconds + chart_type: line + dimensions: + - name: processing_time + - name: adcs.cert_template_retrievals + description: Total of certificate retrievals + unit: retrievals/s + chart_type: line + dimensions: + - name: retrievals + - name: adcs.cert_template_retrieval_processing_time + description: Certificate last retrieval processing time + unit: seconds + chart_type: line + dimensions: + - name: processing_time + - name: adcs.cert_template_request_cryptographic_signing_time + description: Certificate last signing operation request time + unit: seconds + chart_type: line + dimensions: + - name: singing_time + - name: adcs.cert_template_request_policy_module_processing + description: Certificate last policy module processing request time + unit: seconds + chart_type: line + dimensions: + - name: processing_time + - name: adcs.cert_template_challenge_responses + description: Certificate challenge responses + unit: responses/s + chart_type: line + dimensions: + - name: challenge + - name: adcs.cert_template_challenge_response_processing_time + description: Certificate last challenge response time + unit: seconds + chart_type: line + dimensions: + - name: processing_time + - name: adcs.cert_template_signed_certificate_timestamp_lists + description: Certificate Signed Certificate Timestamp Lists processed + unit: lists/s + chart_type: line + dimensions: + - name: processed + - name: adcs.cert_template_signed_certificate_timestamp_list_processing_time + description: Certificate last Signed Certificate Timestamp List process time + unit: seconds + chart_type: line + dimensions: + - name: processing_time + - name: process + description: TBD + labels: + - name: process + description: TBD + metrics: + - name: netframework.clrexception_thrown + description: Thrown exceptions + unit: exceptions/s + chart_type: line + dimensions: + - name: exceptions + - name: netframework.clrexception_filters + description: Executed exception filters + unit: filters/s + chart_type: line + dimensions: + - name: filters + - name: netframework.clrexception_finallys + description: Executed finally blocks + unit: finallys/s + chart_type: line + dimensions: + - name: finallys + - name: netframework.clrexception_throw_to_catch_depth + description: Traversed stack frames + unit: stack_frames/s + chart_type: line + dimensions: + - name: traversed + - name: netframework.clrinterop_com_callable_wrappers + description: COM callable wrappers (CCW) + unit: ccw/s + chart_type: line + dimensions: + - name: com_callable_wrappers + - name: netframework.clrinterop_interop_marshallings + description: Arguments and return values marshallings + unit: marshallings/s + chart_type: line + dimensions: + - name: marshallings + - name: netframework.clrinterop_interop_stubs_created + description: Created stubs + unit: stubs/s + chart_type: line + dimensions: + - name: created + - name: netframework.clrjit_methods + description: JIT-compiled methods + unit: methods/s + chart_type: line + dimensions: + - name: jit-compiled + - name: netframework.clrjit_time + description: Time spent in JIT compilation + unit: percentage + chart_type: line + dimensions: + - name: time + - name: netframework.clrjit_standard_failures + description: JIT compiler failures + unit: failures/s + chart_type: line + dimensions: + - name: failures + - name: netframework.clrjit_il_bytes + description: Compiled Microsoft intermediate language (MSIL) bytes + unit: bytes/s + chart_type: line + dimensions: + - name: compiled_msil + - name: netframework.clrloading_loader_heap_size + description: Memory committed by class loader + unit: bytes + chart_type: line + dimensions: + - name: committed + - name: netframework.clrloading_appdomains_loaded + description: Loaded application domains + unit: domain/s + chart_type: line + dimensions: + - name: loaded + - name: netframework.clrloading_appdomains_unloaded + description: Unloaded application domains + unit: domain/s + chart_type: line + dimensions: + - name: unloaded + - name: netframework.clrloading_assemblies_loaded + description: Loaded assemblies + unit: assemblies/s + chart_type: line + dimensions: + - name: loaded + - name: netframework.clrloading_classes_loaded + description: Loaded classes in all assemblies + unit: classes/s + chart_type: line + dimensions: + - name: loaded + - name: netframework.clrloading_class_load_failures + description: Class load failures + unit: failures/s + chart_type: line + dimensions: + - name: class_load + - name: netframework.clrlocksandthreads_queue_length + description: Threads waited to acquire a managed lock + unit: threads/s + chart_type: line + dimensions: + - name: threads + - name: netframework.clrlocksandthreads_current_logical_threads + description: Logical threads + unit: threads + chart_type: line + dimensions: + - name: logical + - name: netframework.clrlocksandthreads_current_physical_threads + description: Physical threads + unit: threads + chart_type: line + dimensions: + - name: physical + - name: netframework.clrlocksandthreads_recognized_threads + description: Threads recognized by the runtime + unit: threads/s + chart_type: line + dimensions: + - name: threads + - name: netframework.clrlocksandthreads_contentions + description: Fails to acquire a managed lock + unit: contentions/s + chart_type: line + dimensions: + - name: contentions + - name: netframework.clrmemory_allocated_bytes + description: Memory allocated on the garbage collection heap + unit: bytes/s + chart_type: line + dimensions: + - name: allocated + - name: netframework.clrmemory_finalization_survivors + description: Objects that survived garbage-collection + unit: objects + chart_type: line + dimensions: + - name: survived + - name: netframework.clrmemory_heap_size + description: Maximum bytes that can be allocated + unit: bytes + chart_type: line + dimensions: + - name: heap + - name: netframework.clrmemory_promoted + description: Memory promoted to the next generation + unit: bytes + chart_type: line + dimensions: + - name: promoted + - name: netframework.clrmemory_number_gc_handles + description: Garbage collection handles + unit: handles + chart_type: line + dimensions: + - name: used + - name: netframework.clrmemory_collections + description: Garbage collections + unit: gc/s + chart_type: line + dimensions: + - name: gc + - name: netframework.clrmemory_induced_gc + description: Garbage collections induced + unit: gc/s + chart_type: line + dimensions: + - name: gc + - name: netframework.clrmemory_number_pinned_objects + description: Pinned objects encountered + unit: objects + chart_type: line + dimensions: + - name: pinned + - name: netframework.clrmemory_number_sink_blocks_in_use + description: Synchronization blocks in use + unit: blocks + chart_type: line + dimensions: + - name: used + - name: netframework.clrmemory_committed + description: Virtual memory committed by GC + unit: bytes + chart_type: line + dimensions: + - name: committed + - name: netframework.clrmemory_reserved + description: Virtual memory reserved by GC + unit: bytes + chart_type: line + dimensions: + - name: reserved + - name: netframework.clrmemory_gc_time + description: Time spent on GC + unit: percentage + chart_type: line + dimensions: + - name: time + - name: netframework.clrremoting_channels + description: Registered channels + unit: channels/s + chart_type: line + dimensions: + - name: registered + - name: netframework.clrremoting_context_bound_classes_loaded + description: Loaded context-bound classes + unit: classes + chart_type: line + dimensions: + - name: loaded + - name: netframework.clrremoting_context_bound_objects + description: Allocated context-bound objects + unit: objects/s + chart_type: line + dimensions: + - name: allocated + - name: netframework.clrremoting_context_proxies + description: Remoting proxy objects + unit: objects/s + chart_type: line + dimensions: + - name: objects + - name: netframework.clrremoting_contexts + description: Total of remoting contexts + unit: contexts + chart_type: line + dimensions: + - name: contexts + - name: netframework.clrremoting_remote_calls + description: Remote Procedure Calls (RPC) invoked + unit: calls/s + chart_type: line + dimensions: + - name: rpc + - name: netframework.clrsecurity_link_time_checks + description: Link-time code access security checks + unit: checks/s + chart_type: line + dimensions: + - name: linktime + - name: netframework.clrsecurity_checks_time + description: Time spent performing runtime code access security checks + unit: percentage + chart_type: line + dimensions: + - name: time + - name: netframework.clrsecurity_stack_walk_depth + description: Depth of the stack + unit: depth + chart_type: line + dimensions: + - name: stack + - name: netframework.clrsecurity_runtime_checks + description: Runtime code access security checks performed + unit: checks/s + chart_type: line + dimensions: + - name: runtime + - name: exchange workload + description: TBD + labels: + - name: workload + description: TBD + metrics: + - name: exchange.workload_active_tasks + description: Workload active tasks + unit: tasks + chart_type: line + dimensions: + - name: active + - name: exchange.workload_completed_tasks + description: Workload completed tasks + unit: tasks/s + chart_type: line + dimensions: + - name: completed + - name: exchange.workload_queued_tasks + description: Workload queued tasks + unit: tasks/s + chart_type: line + dimensions: + - name: queued + - name: exchange.workload_yielded_tasks + description: Workload yielded tasks + unit: tasks/s + chart_type: line + dimensions: + - name: yielded + - name: exchange.workload_activity_status + description: Workload activity status + unit: status + chart_type: line + dimensions: + - name: active + - name: paused + - name: ldap process + description: TBD + labels: + - name: workload + description: TBD + metrics: + - name: exchange.ldap_long_running_ops_per_sec + description: Long Running LDAP operations + unit: operations/s + chart_type: line + dimensions: + - name: long-running + - name: exchange.ldap_read_time + description: Time to send an LDAP read request and receive a response + unit: seconds + chart_type: line + dimensions: + - name: read + - name: exchange.ldap_search_time + description: Time to send an LDAP search request and receive a response + unit: seconds + chart_type: line + dimensions: + - name: search + - name: exchange.ldap_write_time + description: Time to send an LDAP search request and receive a response + unit: seconds + chart_type: line + dimensions: + - name: write + - name: exchange.ldap_timeout_errors + description: LDAP timeout errors + unit: errors/s + chart_type: line + dimensions: + - name: timeout + - name: http proxy + description: TBD + labels: + - name: workload + description: TBD + metrics: + - name: exchange.http_proxy_avg_auth_latency + description: Average time spent authenticating CAS + unit: seconds + chart_type: line + dimensions: + - name: latency + - name: exchange.http_proxy_avg_cas_processing_latency_sec + description: Average time spent authenticating CAS + unit: seconds + chart_type: line + dimensions: + - name: latency + - name: exchange.http_proxy_mailbox_proxy_failure_rate + description: Percentage of failures between this CAS and MBX servers + unit: percentage + chart_type: line + dimensions: + - name: failures + - name: exchange.http_proxy_mailbox_server_locator_avg_latency_sec + description: Average latency of MailboxServerLocator web service calls + unit: seconds + chart_type: line + dimensions: + - name: latency + - name: exchange.http_proxy_outstanding_proxy_requests + description: Concurrent outstanding proxy requests + unit: requests + chart_type: line + dimensions: + - name: outstanding + - name: exchange.http_proxy_requests + description: Number of proxy requests processed each second + unit: requests/s + chart_type: line + dimensions: + - name: processed + - name: vm + description: TBD + labels: + - name: vm_name + description: TBD + metrics: + - name: hyperv.vm_cpu_usage + description: VM CPU usage (100% = 1 core) + unit: percentage + chart_type: stacked + dimensions: + - name: gues + - name: hypervisor + - name: remote + - name: hyperv.vm_memory_physical + description: VM assigned memory + unit: MiB + chart_type: line + dimensions: + - name: assigned_memory + - name: hyperv.vm_memory_physical_guest_visible + description: VM guest visible memory + unit: MiB + chart_type: line + dimensions: + - name: visible_memory + - name: hyperv.vm_memory_pressure_current + description: VM current pressure + unit: percentage + chart_type: line + dimensions: + - name: pressure + - name: hyperv.vm_vid_physical_pages_allocated + description: VM physical pages allocated + unit: pages + chart_type: line + dimensions: + - name: allocated + - name: hyperv.vm_vid_remote_physical_pages + description: VM physical pages not allocated from the preferred NUMA node + unit: pages + chart_type: line + dimensions: + - name: remote_physical + - name: vm device + description: TBD + labels: + - name: vm_device + description: TBD + metrics: + - name: hyperv.vm_device_bytes + description: VM storage device IO + unit: bytes/s + chart_type: area + dimensions: + - name: read + - name: written + - name: hyperv.vm_device_operations + description: VM storage device IOPS + unit: operations/s + chart_type: line + dimensions: + - name: read + - name: write + - name: hyperv.vm_device_errors + description: VM storage device errors + unit: errors/s + chart_type: line + dimensions: + - name: errors + - name: vm interface + description: TBD + labels: + - name: vm_interface + description: TBD + metrics: + - name: hyperv.vm_interface_bytes + description: VM interface traffic + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: hyperv.vm_interface_packets + description: VM interface packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: hyperv.vm_interface_packets_dropped + description: VM interface packets dropped + unit: drops/s + chart_type: line + dimensions: + - name: incoming + - name: outgoing + - name: vswitch + description: TBD + labels: + - name: vswitch + description: TBD + metrics: + - name: hyperv.vswitch_bytes + description: Virtual switch traffic + unit: bytes/s + chart_type: area + dimensions: + - name: received + - name: sent + - name: hyperv.vswitch_packets + description: Virtual switch packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: hyperv.vswitch_directed_packets + description: Virtual switch directed packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: hyperv.vswitch_broadcast_packets + description: Virtual switch broadcast packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: hyperv.vswitch_multicast_packets + description: Virtual switch multicast packets + unit: packets/s + chart_type: line + dimensions: + - name: received + - name: sent + - name: hyperv.vswitch_dropped_packets + description: Virtual switch dropped packets + unit: drops/s + chart_type: line + dimensions: + - name: incoming + - name: outgoing + - name: hyperv.vswitch_extensions_dropped_packets + description: Virtual switch extensions dropped packets + unit: drops/s + chart_type: line + dimensions: + - name: incoming + - name: outgoing + - name: hyperv.vswitch_packets_flooded + description: Virtual switch flooded packets + unit: packets/s + chart_type: line + dimensions: + - name: flooded + - name: hyperv.vswitch_learned_mac_addresses + description: Virtual switch learned MAC addresses + unit: mac addresses/s + chart_type: line + dimensions: + - name: learned + - name: hyperv.vswitch_purged_mac_addresses + description: Virtual switch purged MAC addresses + unit: mac addresses/s + chart_type: line + dimensions: + - name: purged + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-windows-mssql + most_popular: false + keywords: + - windows + - microsoft + - mssql + - database + - db + monitored_instance: + name: MS SQL Server + link: https://www.microsoft.com/en-us/sql-server/ + icon_filename: mssql.svg + categories: + - data-collection.windows-systems + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-windows-hyperv + most_popular: false + keywords: + - windows + - microsoft + - hyperv + - virtualization + - vm + monitored_instance: + name: HyperV + link: https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview + icon_filename: windows.svg + categories: + - data-collection.windows-systems + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-windows-msexchange + most_popular: false + keywords: + - windows + - microsoft + - mail + monitored_instance: + name: MS Exchange + link: https://www.microsoft.com/en-us/microsoft-365/exchange/email + icon_filename: exchange.svg + categories: + - data-collection.windows-systems + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-windows-ad + most_popular: false + keywords: + - windows + - microsoft + - active directory + - ad + - adcs + - adfs + monitored_instance: + name: Active Directory + link: https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview + icon_filename: windows.svg + categories: + - data-collection.windows-systems + - <<: *module + meta: + <<: *meta + id: collector-go.d.plugin-windows-dotnet + most_popular: false + keywords: + - windows + - microsoft + - dotnet + monitored_instance: + name: NET Framework + link: https://dotnet.microsoft.com/en-us/download/dotnet-framework + icon_filename: dotnet.svg + categories: + - data-collection.windows-systems diff --git a/src/go/collectors/go.d.plugin/modules/windows/testdata/v0.20.0/metrics.txt b/src/go/collectors/go.d.plugin/modules/windows/testdata/v0.20.0/metrics.txt new file mode 100644 index 00000000000000..02b68c3f8ab725 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/testdata/v0.20.0/metrics.txt @@ -0,0 +1,3129 @@ +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0 +go_gc_duration_seconds{quantile="0.25"} 0 +go_gc_duration_seconds{quantile="0.5"} 0 +go_gc_duration_seconds{quantile="0.75"} 0 +go_gc_duration_seconds{quantile="1"} 0.0023911 +go_gc_duration_seconds_sum 0.0044814 +go_gc_duration_seconds_count 23 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 10 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.19.1"} 1 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.035808e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 5.9966872e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.462168e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 111234 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 7.78308e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.035808e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 5.767168e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.0551296e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 34382 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 5.496832e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 1.6318464e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.6675087416268353e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 145616 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 4672 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16352 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 102272 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 114240 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.0613856e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 908248 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 458752 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 458752 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 2.7061304e+07 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 10 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.609375 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.6777216e+07 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 352 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 3.229696e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.667508736e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 3.5569664e+07 +# HELP windows_adcs_challenge_response_processing_time_seconds Last time elapsed for challenge response +# TYPE windows_adcs_challenge_response_processing_time_seconds gauge +windows_adcs_challenge_response_processing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_challenge_response_processing_time_seconds{cert_template="DomainController"} 0 +# HELP windows_adcs_challenge_responses_total Total certificate challenge responses processed +# TYPE windows_adcs_challenge_responses_total counter +windows_adcs_challenge_responses_total{cert_template="Administrator"} 0 +windows_adcs_challenge_responses_total{cert_template="DomainController"} 0 +# HELP windows_adcs_failed_requests_total Total failed certificate requests processed +# TYPE windows_adcs_failed_requests_total counter +windows_adcs_failed_requests_total{cert_template="Administrator"} 0 +windows_adcs_failed_requests_total{cert_template="DomainController"} 0 +# HELP windows_adcs_issued_requests_total Total issued certificate requests processed +# TYPE windows_adcs_issued_requests_total counter +windows_adcs_issued_requests_total{cert_template="Administrator"} 0 +windows_adcs_issued_requests_total{cert_template="DomainController"} 1 +# HELP windows_adcs_pending_requests_total Total pending certificate requests processed +# TYPE windows_adcs_pending_requests_total counter +windows_adcs_pending_requests_total{cert_template="Administrator"} 0 +windows_adcs_pending_requests_total{cert_template="DomainController"} 0 +# HELP windows_adcs_request_cryptographic_signing_time_seconds Last time elapsed for signing operation request +# TYPE windows_adcs_request_cryptographic_signing_time_seconds gauge +windows_adcs_request_cryptographic_signing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_request_cryptographic_signing_time_seconds{cert_template="DomainController"} 0 +# HELP windows_adcs_request_policy_module_processing_time_seconds Last time elapsed for policy module processing request +# TYPE windows_adcs_request_policy_module_processing_time_seconds gauge +windows_adcs_request_policy_module_processing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_request_policy_module_processing_time_seconds{cert_template="DomainController"} 0.016 +# HELP windows_adcs_request_processing_time_seconds Last time elapsed for certificate requests +# TYPE windows_adcs_request_processing_time_seconds gauge +windows_adcs_request_processing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_request_processing_time_seconds{cert_template="DomainController"} 0.063 +# HELP windows_adcs_requests_total Total certificate requests processed +# TYPE windows_adcs_requests_total counter +windows_adcs_requests_total{cert_template="Administrator"} 0 +windows_adcs_requests_total{cert_template="DomainController"} 1 +# HELP windows_adcs_retrievals_processing_time_seconds Last time elapsed for certificate retrieval request +# TYPE windows_adcs_retrievals_processing_time_seconds gauge +windows_adcs_retrievals_processing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_retrievals_processing_time_seconds{cert_template="DomainController"} 0 +# HELP windows_adcs_retrievals_total Total certificate retrieval requests processed +# TYPE windows_adcs_retrievals_total counter +windows_adcs_retrievals_total{cert_template="Administrator"} 0 +windows_adcs_retrievals_total{cert_template="DomainController"} 0 +# HELP windows_adcs_signed_certificate_timestamp_list_processing_time_seconds Last time elapsed for Signed Certificate Timestamp List +# TYPE windows_adcs_signed_certificate_timestamp_list_processing_time_seconds gauge +windows_adcs_signed_certificate_timestamp_list_processing_time_seconds{cert_template="Administrator"} 0 +windows_adcs_signed_certificate_timestamp_list_processing_time_seconds{cert_template="DomainController"} 0 +# HELP windows_adcs_signed_certificate_timestamp_lists_total Total Signed Certificate Timestamp Lists processed +# TYPE windows_adcs_signed_certificate_timestamp_lists_total counter +windows_adcs_signed_certificate_timestamp_lists_total{cert_template="Administrator"} 0 +windows_adcs_signed_certificate_timestamp_lists_total{cert_template="DomainController"} 0 +# HELP windows_adfs_ad_login_connection_failures_total Total number of connection failures to an Active Directory domain controller +# TYPE windows_adfs_ad_login_connection_failures_total counter +windows_adfs_ad_login_connection_failures_total 0 +# HELP windows_adfs_certificate_authentications_total Total number of User Certificate authentications +# TYPE windows_adfs_certificate_authentications_total counter +windows_adfs_certificate_authentications_total 0 +# HELP windows_adfs_db_artifact_failure_total Total number of failures connecting to the artifact database +# TYPE windows_adfs_db_artifact_failure_total counter +windows_adfs_db_artifact_failure_total 0 +# HELP windows_adfs_db_artifact_query_time_seconds_total Accumulator of time taken for an artifact database query +# TYPE windows_adfs_db_artifact_query_time_seconds_total counter +windows_adfs_db_artifact_query_time_seconds_total 0 +# HELP windows_adfs_db_config_failure_total Total number of failures connecting to the configuration database +# TYPE windows_adfs_db_config_failure_total counter +windows_adfs_db_config_failure_total 0 +# HELP windows_adfs_db_config_query_time_seconds_total Accumulator of time taken for a configuration database query +# TYPE windows_adfs_db_config_query_time_seconds_total counter +windows_adfs_db_config_query_time_seconds_total 0.10111504 +# HELP windows_adfs_device_authentications_total Total number of Device authentications +# TYPE windows_adfs_device_authentications_total counter +windows_adfs_device_authentications_total 0 +# HELP windows_adfs_external_authentications_failure_total Total number of failed authentications from external MFA providers +# TYPE windows_adfs_external_authentications_failure_total counter +windows_adfs_external_authentications_failure_total 0 +# HELP windows_adfs_external_authentications_success_total Total number of successful authentications from external MFA providers +# TYPE windows_adfs_external_authentications_success_total counter +windows_adfs_external_authentications_success_total 0 +# HELP windows_adfs_extranet_account_lockouts_total Total number of Extranet Account Lockouts +# TYPE windows_adfs_extranet_account_lockouts_total counter +windows_adfs_extranet_account_lockouts_total 0 +# HELP windows_adfs_federated_authentications_total Total number of authentications from a federated source +# TYPE windows_adfs_federated_authentications_total counter +windows_adfs_federated_authentications_total 0 +# HELP windows_adfs_federation_metadata_requests_total Total number of Federation Metadata requests +# TYPE windows_adfs_federation_metadata_requests_total counter +windows_adfs_federation_metadata_requests_total 1 +# HELP windows_adfs_oauth_authorization_requests_total Total number of incoming requests to the OAuth Authorization endpoint +# TYPE windows_adfs_oauth_authorization_requests_total counter +windows_adfs_oauth_authorization_requests_total 0 +# HELP windows_adfs_oauth_client_authentication_failure_total Total number of failed OAuth client Authentications +# TYPE windows_adfs_oauth_client_authentication_failure_total counter +windows_adfs_oauth_client_authentication_failure_total 0 +# HELP windows_adfs_oauth_client_authentication_success_total Total number of successful OAuth client Authentications +# TYPE windows_adfs_oauth_client_authentication_success_total counter +windows_adfs_oauth_client_authentication_success_total 0 +# HELP windows_adfs_oauth_client_credentials_failure_total Total number of failed OAuth Client Credentials Requests +# TYPE windows_adfs_oauth_client_credentials_failure_total counter +windows_adfs_oauth_client_credentials_failure_total 0 +# HELP windows_adfs_oauth_client_credentials_success_total Total number of successful RP tokens issued for OAuth Client Credentials Requests +# TYPE windows_adfs_oauth_client_credentials_success_total counter +windows_adfs_oauth_client_credentials_success_total 0 +# HELP windows_adfs_oauth_client_privkey_jtw_authentication_failure_total Total number of failed OAuth Client Private Key Jwt Authentications +# TYPE windows_adfs_oauth_client_privkey_jtw_authentication_failure_total counter +windows_adfs_oauth_client_privkey_jtw_authentication_failure_total 0 +# HELP windows_adfs_oauth_client_privkey_jwt_authentications_success_total Total number of successful OAuth Client Private Key Jwt Authentications +# TYPE windows_adfs_oauth_client_privkey_jwt_authentications_success_total counter +windows_adfs_oauth_client_privkey_jwt_authentications_success_total 0 +# HELP windows_adfs_oauth_client_secret_basic_authentications_failure_total Total number of failed OAuth Client Secret Basic Authentications +# TYPE windows_adfs_oauth_client_secret_basic_authentications_failure_total counter +windows_adfs_oauth_client_secret_basic_authentications_failure_total 0 +# HELP windows_adfs_oauth_client_secret_basic_authentications_success_total Total number of successful OAuth Client Secret Basic Authentications +# TYPE windows_adfs_oauth_client_secret_basic_authentications_success_total counter +windows_adfs_oauth_client_secret_basic_authentications_success_total 0 +# HELP windows_adfs_oauth_client_secret_post_authentications_failure_total Total number of failed OAuth Client Secret Post Authentications +# TYPE windows_adfs_oauth_client_secret_post_authentications_failure_total counter +windows_adfs_oauth_client_secret_post_authentications_failure_total 0 +# HELP windows_adfs_oauth_client_secret_post_authentications_success_total Total number of successful OAuth Client Secret Post Authentications +# TYPE windows_adfs_oauth_client_secret_post_authentications_success_total counter +windows_adfs_oauth_client_secret_post_authentications_success_total 0 +# HELP windows_adfs_oauth_client_windows_authentications_failure_total Total number of failed OAuth Client Windows Integrated Authentications +# TYPE windows_adfs_oauth_client_windows_authentications_failure_total counter +windows_adfs_oauth_client_windows_authentications_failure_total 0 +# HELP windows_adfs_oauth_client_windows_authentications_success_total Total number of successful OAuth Client Windows Integrated Authentications +# TYPE windows_adfs_oauth_client_windows_authentications_success_total counter +windows_adfs_oauth_client_windows_authentications_success_total 0 +# HELP windows_adfs_oauth_logon_certificate_requests_failure_total Total number of failed OAuth Logon Certificate Requests +# TYPE windows_adfs_oauth_logon_certificate_requests_failure_total counter +windows_adfs_oauth_logon_certificate_requests_failure_total 0 +# HELP windows_adfs_oauth_logon_certificate_token_requests_success_total Total number of successful RP tokens issued for OAuth Logon Certificate Requests +# TYPE windows_adfs_oauth_logon_certificate_token_requests_success_total counter +windows_adfs_oauth_logon_certificate_token_requests_success_total 0 +# HELP windows_adfs_oauth_password_grant_requests_failure_total Total number of failed OAuth Password Grant Requests +# TYPE windows_adfs_oauth_password_grant_requests_failure_total counter +windows_adfs_oauth_password_grant_requests_failure_total 0 +# HELP windows_adfs_oauth_password_grant_requests_success_total Total number of successful OAuth Password Grant Requests +# TYPE windows_adfs_oauth_password_grant_requests_success_total counter +windows_adfs_oauth_password_grant_requests_success_total 0 +# HELP windows_adfs_oauth_token_requests_success_total Total number of successful RP tokens issued over OAuth protocol +# TYPE windows_adfs_oauth_token_requests_success_total counter +windows_adfs_oauth_token_requests_success_total 0 +# HELP windows_adfs_passive_requests_total Total number of passive (browser-based) requests +# TYPE windows_adfs_passive_requests_total counter +windows_adfs_passive_requests_total 0 +# HELP windows_adfs_passport_authentications_total Total number of Microsoft Passport SSO authentications +# TYPE windows_adfs_passport_authentications_total counter +windows_adfs_passport_authentications_total 0 +# HELP windows_adfs_password_change_failed_total Total number of failed password changes +# TYPE windows_adfs_password_change_failed_total counter +windows_adfs_password_change_failed_total 0 +# HELP windows_adfs_password_change_succeeded_total Total number of successful password changes +# TYPE windows_adfs_password_change_succeeded_total counter +windows_adfs_password_change_succeeded_total 0 +# HELP windows_adfs_samlp_token_requests_success_total Total number of successful RP tokens issued over SAML-P protocol +# TYPE windows_adfs_samlp_token_requests_success_total counter +windows_adfs_samlp_token_requests_success_total 0 +# HELP windows_adfs_sso_authentications_failure_total Total number of failed SSO authentications +# TYPE windows_adfs_sso_authentications_failure_total counter +windows_adfs_sso_authentications_failure_total 0 +# HELP windows_adfs_sso_authentications_success_total Total number of successful SSO authentications +# TYPE windows_adfs_sso_authentications_success_total counter +windows_adfs_sso_authentications_success_total 0 +# HELP windows_adfs_token_requests_total Total number of token requests +# TYPE windows_adfs_token_requests_total counter +windows_adfs_token_requests_total 0 +# HELP windows_adfs_userpassword_authentications_failure_total Total number of failed AD U/P authentications +# TYPE windows_adfs_userpassword_authentications_failure_total counter +windows_adfs_userpassword_authentications_failure_total 0 +# HELP windows_adfs_userpassword_authentications_success_total Total number of successful AD U/P authentications +# TYPE windows_adfs_userpassword_authentications_success_total counter +windows_adfs_userpassword_authentications_success_total 0 +# HELP windows_adfs_windows_integrated_authentications_total Total number of Windows integrated authentications (Kerberos/NTLM) +# TYPE windows_adfs_windows_integrated_authentications_total counter +windows_adfs_windows_integrated_authentications_total 0 +# HELP windows_adfs_wsfed_token_requests_success_total Total number of successful RP tokens issued over WS-Fed protocol +# TYPE windows_adfs_wsfed_token_requests_success_total counter +windows_adfs_wsfed_token_requests_success_total 0 +# HELP windows_adfs_wstrust_token_requests_success_total Total number of successful RP tokens issued over WS-Trust protocol +# TYPE windows_adfs_wstrust_token_requests_success_total counter +windows_adfs_wstrust_token_requests_success_total 0 +# HELP windows_ad_atq_average_request_latency +# TYPE windows_ad_atq_average_request_latency gauge +windows_ad_atq_average_request_latency 0 +# HELP windows_ad_atq_outstanding_requests +# TYPE windows_ad_atq_outstanding_requests gauge +windows_ad_atq_outstanding_requests 0 +# HELP windows_ad_database_operations_total +# TYPE windows_ad_database_operations_total counter +windows_ad_database_operations_total{operation="add"} 1 +windows_ad_database_operations_total{operation="delete"} 0 +windows_ad_database_operations_total{operation="modify"} 30 +windows_ad_database_operations_total{operation="recycle"} 0 +# HELP windows_ad_directory_operations_total +# TYPE windows_ad_directory_operations_total counter +windows_ad_directory_operations_total{operation="read",origin="directory_service_api"} 0 +windows_ad_directory_operations_total{operation="read",origin="knowledge_consistency_checker"} 60 +windows_ad_directory_operations_total{operation="read",origin="local_security_authority"} 20 +windows_ad_directory_operations_total{operation="read",origin="name_service_provider_interface"} 0 +windows_ad_directory_operations_total{operation="read",origin="other"} 50 +windows_ad_directory_operations_total{operation="read",origin="replication_agent"} 0 +windows_ad_directory_operations_total{operation="read",origin="security_account_manager"} 596 +windows_ad_directory_operations_total{operation="search",origin="directory_service_api"} 101 +windows_ad_directory_operations_total{operation="search",origin="knowledge_consistency_checker"} 21 +windows_ad_directory_operations_total{operation="search",origin="ldap"} 606 +windows_ad_directory_operations_total{operation="search",origin="local_security_authority"} 9 +windows_ad_directory_operations_total{operation="search",origin="name_service_provider_interface"} 0 +windows_ad_directory_operations_total{operation="search",origin="other"} 56 +windows_ad_directory_operations_total{operation="search",origin="replication_agent"} 0 +windows_ad_directory_operations_total{operation="search",origin="security_account_manager"} 38 +windows_ad_directory_operations_total{operation="write",origin="directory_service_api"} 3 +windows_ad_directory_operations_total{operation="write",origin="knowledge_consistency_checker"} 0 +windows_ad_directory_operations_total{operation="write",origin="ldap"} 1 +windows_ad_directory_operations_total{operation="write",origin="local_security_authority"} 0 +windows_ad_directory_operations_total{operation="write",origin="name_service_provider_interface"} 0 +windows_ad_directory_operations_total{operation="write",origin="other"} 1 +windows_ad_directory_operations_total{operation="write",origin="replication_agent"} 0 +windows_ad_directory_operations_total{operation="write",origin="security_account_manager"} 26 +# HELP windows_ad_name_cache_lookups_total +# TYPE windows_ad_name_cache_lookups_total counter +windows_ad_name_cache_lookups_total 53046 +# HELP windows_ad_name_cache_hits_total +# TYPE windows_ad_name_cache_hits_total counter +windows_ad_name_cache_hits_total 41161 +# HELP windows_ad_replication_inbound_objects_filtered_total +# TYPE windows_ad_replication_inbound_objects_filtered_total counter +windows_ad_replication_inbound_objects_filtered_total 0 +# HELP windows_ad_replication_inbound_properties_filtered_total +# TYPE windows_ad_replication_inbound_properties_filtered_total counter +windows_ad_replication_inbound_properties_filtered_total 0 +# HELP windows_ad_replication_inbound_properties_updated_total +# TYPE windows_ad_replication_inbound_properties_updated_total counter +windows_ad_replication_inbound_properties_updated_total 0 +# HELP windows_ad_replication_inbound_objects_updated_total +# TYPE windows_ad_replication_inbound_objects_updated_total counter +windows_ad_replication_inbound_objects_updated_total 0 +# HELP windows_ad_replication_inbound_sync_objects_remaining +# TYPE windows_ad_replication_inbound_sync_objects_remaining gauge +windows_ad_replication_inbound_sync_objects_remaining 0 +# HELP windows_ad_replication_data_intersite_bytes_total +# TYPE windows_ad_replication_data_intersite_bytes_total counter +windows_ad_replication_data_intersite_bytes_total{direction="inbound"} 0 +windows_ad_replication_data_intersite_bytes_total{direction="outbound"} 0 +# HELP windows_ad_replication_data_intrasite_bytes_total +# TYPE windows_ad_replication_data_intrasite_bytes_total counter +windows_ad_replication_data_intrasite_bytes_total{direction="inbound"} 0 +windows_ad_replication_data_intrasite_bytes_total{direction="outbound"} 0 +# HELP windows_ad_replication_pending_synchronizations +# TYPE windows_ad_replication_pending_synchronizations gauge +windows_ad_replication_pending_synchronizations 0 +# HELP windows_ad_replication_sync_requests_total +# TYPE windows_ad_replication_sync_requests_total counter +windows_ad_replication_sync_requests_total 0 +# HELP windows_ad_directory_service_threads +# TYPE windows_ad_directory_service_threads gauge +windows_ad_directory_service_threads 0 +# HELP windows_ad_ldap_last_bind_time_seconds +# TYPE windows_ad_ldap_last_bind_time_seconds gauge +windows_ad_ldap_last_bind_time_seconds 0 +# HELP windows_ad_binds_total +# TYPE windows_ad_binds_total counter +windows_ad_binds_total{bind_method="ldap"} 184 +# HELP windows_ad_ldap_searches_total +# TYPE windows_ad_ldap_searches_total counter +windows_ad_ldap_searches_total 1382 +# HELP windows_cpu_clock_interrupts_total Total number of received and serviced clock tick interrupts +# TYPE windows_cpu_clock_interrupts_total counter +windows_cpu_clock_interrupts_total{core="0,0"} 9.1949524e+07 +windows_cpu_clock_interrupts_total{core="0,1"} 1.0416934e+07 +windows_cpu_clock_interrupts_total{core="0,2"} 1.0417092e+07 +windows_cpu_clock_interrupts_total{core="0,3"} 1.0416548e+07 +# HELP windows_cpu_core_frequency_mhz Core frequency in megahertz +# TYPE windows_cpu_core_frequency_mhz gauge +windows_cpu_core_frequency_mhz{core="0,0"} 3187 +windows_cpu_core_frequency_mhz{core="0,1"} 3187 +windows_cpu_core_frequency_mhz{core="0,2"} 3187 +windows_cpu_core_frequency_mhz{core="0,3"} 3187 +# HELP windows_cpu_cstate_seconds_total Time spent in low-power idle state +# TYPE windows_cpu_cstate_seconds_total counter +windows_cpu_cstate_seconds_total{core="0,0",state="c1"} 160233.4270483 +windows_cpu_cstate_seconds_total{core="0,0",state="c2"} 0 +windows_cpu_cstate_seconds_total{core="0,0",state="c3"} 0 +windows_cpu_cstate_seconds_total{core="0,1",state="c1"} 159528.0543212 +windows_cpu_cstate_seconds_total{core="0,1",state="c2"} 0 +windows_cpu_cstate_seconds_total{core="0,1",state="c3"} 0 +windows_cpu_cstate_seconds_total{core="0,2",state="c1"} 159891.7232105 +windows_cpu_cstate_seconds_total{core="0,2",state="c2"} 0 +windows_cpu_cstate_seconds_total{core="0,2",state="c3"} 0 +windows_cpu_cstate_seconds_total{core="0,3",state="c1"} 159544.11780809998 +windows_cpu_cstate_seconds_total{core="0,3",state="c2"} 0 +windows_cpu_cstate_seconds_total{core="0,3",state="c3"} 0 +# HELP windows_cpu_dpcs_total Total number of received and serviced deferred procedure calls (DPCs) +# TYPE windows_cpu_dpcs_total counter +windows_cpu_dpcs_total{core="0,0"} 4.8719e+06 +windows_cpu_dpcs_total{core="0,1"} 1.650552e+06 +windows_cpu_dpcs_total{core="0,2"} 2.236469e+06 +windows_cpu_dpcs_total{core="0,3"} 1.185046e+06 +# HELP windows_cpu_idle_break_events_total Total number of time processor was woken from idle +# TYPE windows_cpu_idle_break_events_total counter +windows_cpu_idle_break_events_total{core="0,0"} 1.40806638e+08 +windows_cpu_idle_break_events_total{core="0,1"} 7.069832e+07 +windows_cpu_idle_break_events_total{core="0,2"} 6.0430118e+07 +windows_cpu_idle_break_events_total{core="0,3"} 5.5224469e+07 +# HELP windows_cpu_interrupts_total Total number of received and serviced hardware interrupts +# TYPE windows_cpu_interrupts_total counter +windows_cpu_interrupts_total{core="0,0"} 1.55194331e+08 +windows_cpu_interrupts_total{core="0,1"} 7.9325847e+07 +windows_cpu_interrupts_total{core="0,2"} 6.7305419e+07 +windows_cpu_interrupts_total{core="0,3"} 6.0766938e+07 +# HELP windows_cpu_parking_status Parking Status represents whether a processor is parked or not +# TYPE windows_cpu_parking_status gauge +windows_cpu_parking_status{core="0,0"} 0 +windows_cpu_parking_status{core="0,1"} 0 +windows_cpu_parking_status{core="0,2"} 0 +windows_cpu_parking_status{core="0,3"} 0 +# HELP windows_cpu_processor_performance Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% +# TYPE windows_cpu_processor_performance gauge +windows_cpu_processor_performance{core="0,0"} 2.79873813368e+11 +windows_cpu_processor_performance{core="0,1"} 3.239596095e+11 +windows_cpu_processor_performance{core="0,2"} 3.01145132737e+11 +windows_cpu_processor_performance{core="0,3"} 3.22955641675e+11 +# HELP windows_cpu_time_total Time that processor spent in different modes (dpc, idle, interrupt, privileged, user) +# TYPE windows_cpu_time_total counter +windows_cpu_time_total{core="0,0",mode="dpc"} 67.109375 +windows_cpu_time_total{core="0,0",mode="idle"} 162455.59375 +windows_cpu_time_total{core="0,0",mode="interrupt"} 77.28125 +windows_cpu_time_total{core="0,0",mode="privileged"} 1182.109375 +windows_cpu_time_total{core="0,0",mode="user"} 1073.671875 +windows_cpu_time_total{core="0,1",mode="dpc"} 11.09375 +windows_cpu_time_total{core="0,1",mode="idle"} 159478.125 +windows_cpu_time_total{core="0,1",mode="interrupt"} 58.09375 +windows_cpu_time_total{core="0,1",mode="privileged"} 1801.234375 +windows_cpu_time_total{core="0,1",mode="user"} 3432 +windows_cpu_time_total{core="0,2",mode="dpc"} 16.0625 +windows_cpu_time_total{core="0,2",mode="idle"} 159848.4375 +windows_cpu_time_total{core="0,2",mode="interrupt"} 53.515625 +windows_cpu_time_total{core="0,2",mode="privileged"} 1812.546875 +windows_cpu_time_total{core="0,2",mode="user"} 3050.25 +windows_cpu_time_total{core="0,3",mode="dpc"} 8.140625 +windows_cpu_time_total{core="0,3",mode="idle"} 159527.546875 +windows_cpu_time_total{core="0,3",mode="interrupt"} 44.484375 +windows_cpu_time_total{core="0,3",mode="privileged"} 1760.828125 +windows_cpu_time_total{core="0,3",mode="user"} 3422.875 +# HELP windows_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which windows_exporter was built. +# TYPE windows_exporter_build_info gauge +windows_exporter_build_info{branch="heads/tags/v0.20.0",goversion="go1.19.1",revision="677a7c8d67deb99b92f4f24b8c890e0a4c152b0c",version="0.20.0"} 1 +# HELP windows_exporter_collector_duration_seconds windows_exporter: Duration of a collection. +# TYPE windows_exporter_collector_duration_seconds gauge +windows_exporter_collector_duration_seconds{collector="ad"} 0.7690505 +windows_exporter_collector_duration_seconds{collector="adcs"} 0.0006833 +windows_exporter_collector_duration_seconds{collector="adfs"} 0.0031012 +windows_exporter_collector_duration_seconds{collector="cpu"} 0.00052 +windows_exporter_collector_duration_seconds{collector="exchange"} 0.0334467 +windows_exporter_collector_duration_seconds{collector="hyperv"} 0.9003895 +windows_exporter_collector_duration_seconds{collector="iis"} 0 +windows_exporter_collector_duration_seconds{collector="logical_disk"} 0 +windows_exporter_collector_duration_seconds{collector="logon"} 0.1139134 +windows_exporter_collector_duration_seconds{collector="memory"} 0.00052 +windows_exporter_collector_duration_seconds{collector="mssql"} 0.003369 +windows_exporter_collector_duration_seconds{collector="netframework_clrexceptions"} 1.437537 +windows_exporter_collector_duration_seconds{collector="netframework_clrinterop"} 1.4911402 +windows_exporter_collector_duration_seconds{collector="netframework_clrjit"} 1.2789005 +windows_exporter_collector_duration_seconds{collector="netframework_clrloading"} 1.3232636 +windows_exporter_collector_duration_seconds{collector="netframework_clrlocksandthreads"} 1.3578413999999999 +windows_exporter_collector_duration_seconds{collector="netframework_clrmemory"} 1.4066725 +windows_exporter_collector_duration_seconds{collector="netframework_clrremoting"} 1.5191553 +windows_exporter_collector_duration_seconds{collector="netframework_clrsecurity"} 1.4670829 +windows_exporter_collector_duration_seconds{collector="net"} 0 +windows_exporter_collector_duration_seconds{collector="os"} 0.0023497 +windows_exporter_collector_duration_seconds{collector="process"} 0.1154812 +windows_exporter_collector_duration_seconds{collector="service"} 0.1016404 +windows_exporter_collector_duration_seconds{collector="system"} 0.0006105 +windows_exporter_collector_duration_seconds{collector="tcp"} 0 +# HELP windows_exporter_collector_success windows_exporter: Whether the collector was successful. +# TYPE windows_exporter_collector_success gauge +windows_exporter_collector_success{collector="ad"} 1 +windows_exporter_collector_success{collector="adcs"} 1 +windows_exporter_collector_success{collector="adfs"} 1 +windows_exporter_collector_success{collector="cpu"} 1 +windows_exporter_collector_success{collector="exchange"} 1 +windows_exporter_collector_success{collector="hyperv"} 1 +windows_exporter_collector_success{collector="iis"} 1 +windows_exporter_collector_success{collector="logical_disk"} 1 +windows_exporter_collector_success{collector="logon"} 1 +windows_exporter_collector_success{collector="memory"} 1 +windows_exporter_collector_success{collector="mssql"} 1 +windows_exporter_collector_success{collector="netframework_clrexceptions"} 1 +windows_exporter_collector_success{collector="netframework_clrinterop"} 1 +windows_exporter_collector_success{collector="netframework_clrjit"} 1 +windows_exporter_collector_success{collector="netframework_clrloading"} 1 +windows_exporter_collector_success{collector="netframework_clrlocksandthreads"} 1 +windows_exporter_collector_success{collector="netframework_clrmemory"} 1 +windows_exporter_collector_success{collector="netframework_clrremoting"} 1 +windows_exporter_collector_success{collector="netframework_clrsecurity"} 1 +windows_exporter_collector_success{collector="net"} 1 +windows_exporter_collector_success{collector="os"} 1 +windows_exporter_collector_success{collector="process"} 1 +windows_exporter_collector_success{collector="service"} 1 +windows_exporter_collector_success{collector="system"} 1 +windows_exporter_collector_success{collector="tcp"} 1 +# HELP windows_exporter_collector_timeout windows_exporter: Whether the collector timed out. +# TYPE windows_exporter_collector_timeout gauge +windows_exporter_collector_timeout{collector="ad"} 0 +windows_exporter_collector_timeout{collector="adcs"} 0 +windows_exporter_collector_timeout{collector="adfs"} 0 +windows_exporter_collector_timeout{collector="cpu"} 0 +windows_exporter_collector_timeout{collector="exchange"} 0 +windows_exporter_collector_timeout{collector="hyperv"} 0 +windows_exporter_collector_timeout{collector="iis"} 0 +windows_exporter_collector_timeout{collector="logical_disk"} 0 +windows_exporter_collector_timeout{collector="logon"} 0 +windows_exporter_collector_timeout{collector="memory"} 0 +windows_exporter_collector_timeout{collector="mssql"} 0 +windows_exporter_collector_timeout{collector="netframework_clrexceptions"} 0 +windows_exporter_collector_timeout{collector="netframework_clrinterop"} 0 +windows_exporter_collector_timeout{collector="netframework_clrjit"} 0 +windows_exporter_collector_timeout{collector="netframework_clrloading"} 0 +windows_exporter_collector_timeout{collector="netframework_clrlocksandthreads"} 0 +windows_exporter_collector_timeout{collector="netframework_clrmemory"} 0 +windows_exporter_collector_timeout{collector="netframework_clrremoting"} 0 +windows_exporter_collector_timeout{collector="netframework_clrsecurity"} 0 +windows_exporter_collector_timeout{collector="net"} 0 +windows_exporter_collector_timeout{collector="os"} 0 +windows_exporter_collector_timeout{collector="process"} 0 +windows_exporter_collector_timeout{collector="service"} 0 +windows_exporter_collector_timeout{collector="system"} 0 +windows_exporter_collector_timeout{collector="tcp"} 0 +# HELP windows_exchange_http_proxy_avg_auth_latency Average time spent authenticating CAS requests over the last 200 samples +# TYPE windows_exchange_http_proxy_avg_auth_latency gauge +windows_exchange_http_proxy_avg_auth_latency{name="autodiscover"} 1 +windows_exchange_http_proxy_avg_auth_latency{name="eas"} 0 +# HELP windows_exchange_http_proxy_avg_cas_proccessing_latency_sec Average latency (sec) of CAS processing time over the last 200 reqs +# TYPE windows_exchange_http_proxy_avg_cas_proccessing_latency_sec gauge +windows_exchange_http_proxy_avg_cas_proccessing_latency_sec{name="autodiscover"} 0.003 +windows_exchange_http_proxy_avg_cas_proccessing_latency_sec{name="eas"} 0.003 +# HELP windows_exchange_http_proxy_mailbox_proxy_failure_rate % of failures between this CAS and MBX servers over the last 200 samples +# TYPE windows_exchange_http_proxy_mailbox_proxy_failure_rate gauge +windows_exchange_http_proxy_mailbox_proxy_failure_rate{name="autodiscover"} 0 +windows_exchange_http_proxy_mailbox_proxy_failure_rate{name="eas"} 0 +# HELP windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec Average latency (sec) of MailboxServerLocator web service calls +# TYPE windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec gauge +windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec{name="autodiscover"} 0.008 +windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec{name="eas"} 0.008 +# HELP windows_exchange_http_proxy_outstanding_proxy_requests Number of concurrent outstanding proxy requests +# TYPE windows_exchange_http_proxy_outstanding_proxy_requests gauge +windows_exchange_http_proxy_outstanding_proxy_requests{name="autodiscover"} 0 +windows_exchange_http_proxy_outstanding_proxy_requests{name="eas"} 0 +# HELP windows_exchange_http_proxy_requests_total Number of proxy requests processed each second +# TYPE windows_exchange_http_proxy_requests_total counter +windows_exchange_http_proxy_requests_total{name="autodiscover"} 27122 +windows_exchange_http_proxy_requests_total{name="eas"} 32519 +# HELP windows_exchange_ldap_long_running_ops_per_sec Long Running LDAP operations per second +# TYPE windows_exchange_ldap_long_running_ops_per_sec counter +windows_exchange_ldap_long_running_ops_per_sec{name="complianceauditservice"} 0 +windows_exchange_ldap_long_running_ops_per_sec{name="complianceauditservice_10"} 0 +# HELP windows_exchange_ldap_read_time_sec Time (sec) to send an LDAP read request and receive a response +# TYPE windows_exchange_ldap_read_time_sec counter +windows_exchange_ldap_read_time_sec{name="complianceauditservice"} 0.008 +windows_exchange_ldap_read_time_sec{name="complianceauditservice_10"} 0.018 +# HELP windows_exchange_ldap_search_time_sec Time (sec) to send an LDAP search request and receive a response +# TYPE windows_exchange_ldap_search_time_sec counter +windows_exchange_ldap_search_time_sec{name="complianceauditservice"} 0.046 +windows_exchange_ldap_search_time_sec{name="complianceauditservice_10"} 0.058 +# TYPE windows_exchange_ldap_timeout_errors_total counter +windows_exchange_ldap_timeout_errors_total{name="complianceauditservice"} 0 +windows_exchange_ldap_timeout_errors_total{name="complianceauditservice_10"} 0 +# HELP windows_exchange_ldap_write_time_sec Time (sec) to send an LDAP Add/Modify/Delete request and receive a response +# TYPE windows_exchange_ldap_write_time_sec counter +windows_exchange_ldap_write_time_sec{name="complianceauditservice"} 0 +windows_exchange_ldap_write_time_sec{name="complianceauditservice_10"} 0 +# HELP windows_exporter_perflib_snapshot_duration_seconds Duration of perflib snapshot capture +# TYPE windows_exporter_perflib_snapshot_duration_seconds gauge +windows_exporter_perflib_snapshot_duration_seconds 0.0054258 +# HELP windows_exchange_activesync_ping_cmds_pending Number of ping commands currently pending in the queue +# TYPE windows_exchange_activesync_ping_cmds_pending gauge +windows_exchange_activesync_ping_cmds_pending 0 +# HELP windows_exchange_activesync_requests_total Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load +# TYPE windows_exchange_activesync_requests_total counter +windows_exchange_activesync_requests_total 14 +# HELP windows_exchange_activesync_sync_cmds_total Number of sync commands processed per second. Clients use this command to synchronize items within a folder +# TYPE windows_exchange_activesync_sync_cmds_total counter +windows_exchange_activesync_sync_cmds_total 0 +# HELP windows_exchange_autodiscover_requests_total Number of autodiscover service requests processed each second +# TYPE windows_exchange_autodiscover_requests_total counter +windows_exchange_autodiscover_requests_total 1 +# HELP windows_exchange_avail_service_requests_per_sec Number of requests serviced per second +# TYPE windows_exchange_avail_service_requests_per_sec counter +windows_exchange_avail_service_requests_per_sec 0 +# HELP windows_exchange_owa_current_unique_users Number of unique users currently logged on to Outlook Web App +# TYPE windows_exchange_owa_current_unique_users gauge +windows_exchange_owa_current_unique_users 0 +# HELP windows_exchange_owa_requests_total Number of requests handled by Outlook Web App per second +# TYPE windows_exchange_owa_requests_total counter +windows_exchange_owa_requests_total 0 +# HELP windows_exchange_rpc_active_user_count Number of unique users that have shown some kind of activity in the last 2 minutes +# TYPE windows_exchange_rpc_active_user_count gauge +windows_exchange_rpc_active_user_count 0 +# HELP windows_exchange_rpc_avg_latency_sec The latency (sec), averaged for the past 1024 packets +# TYPE windows_exchange_rpc_avg_latency_sec gauge +windows_exchange_rpc_avg_latency_sec 0.001 +# HELP windows_exchange_rpc_connection_count Total number of client connections maintained +# TYPE windows_exchange_rpc_connection_count gauge +windows_exchange_rpc_connection_count 0 +# HELP windows_exchange_rpc_operations_total The rate at which RPC operations occur +# TYPE windows_exchange_rpc_operations_total counter +windows_exchange_rpc_operations_total 9 +# HELP windows_exchange_rpc_requests Number of client requests currently being processed by the RPC Client Access service +# TYPE windows_exchange_rpc_requests gauge +windows_exchange_rpc_requests 0 +# HELP windows_exchange_rpc_user_count Number of users +# TYPE windows_exchange_rpc_user_count gauge +windows_exchange_rpc_user_count 0 +# HELP windows_exchange_transport_queues_active_mailbox_delivery Active Mailbox Delivery Queue length +# TYPE windows_exchange_transport_queues_active_mailbox_delivery gauge +windows_exchange_transport_queues_active_mailbox_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_active_mailbox_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_active_mailbox_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_active_mailbox_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_active_mailbox_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_external_active_remote_delivery External Active Remote Delivery Queue length +# TYPE windows_exchange_transport_queues_external_active_remote_delivery gauge +windows_exchange_transport_queues_external_active_remote_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_external_active_remote_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_external_active_remote_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_external_active_remote_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_external_active_remote_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_external_largest_delivery External Largest Delivery Queue length +# TYPE windows_exchange_transport_queues_external_largest_delivery gauge +windows_exchange_transport_queues_external_largest_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_external_largest_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_external_largest_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_external_largest_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_external_largest_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_internal_active_remote_delivery Internal Active Remote Delivery Queue length +# TYPE windows_exchange_transport_queues_internal_active_remote_delivery gauge +windows_exchange_transport_queues_internal_active_remote_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_internal_active_remote_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_internal_active_remote_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_internal_active_remote_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_internal_active_remote_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_internal_largest_delivery Internal Largest Delivery Queue length +# TYPE windows_exchange_transport_queues_internal_largest_delivery gauge +windows_exchange_transport_queues_internal_largest_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_internal_largest_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_internal_largest_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_internal_largest_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_internal_largest_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_poison Poison Queue length +# TYPE windows_exchange_transport_queues_poison gauge +windows_exchange_transport_queues_poison{name="high_priority"} 0 +windows_exchange_transport_queues_poison{name="low_priority"} 0 +windows_exchange_transport_queues_poison{name="none_priority"} 0 +windows_exchange_transport_queues_poison{name="normal_priority"} 0 +windows_exchange_transport_queues_poison{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_retry_mailbox_delivery Retry Mailbox Delivery Queue length +# TYPE windows_exchange_transport_queues_retry_mailbox_delivery gauge +windows_exchange_transport_queues_retry_mailbox_delivery{name="high_priority"} 0 +windows_exchange_transport_queues_retry_mailbox_delivery{name="low_priority"} 0 +windows_exchange_transport_queues_retry_mailbox_delivery{name="none_priority"} 0 +windows_exchange_transport_queues_retry_mailbox_delivery{name="normal_priority"} 0 +windows_exchange_transport_queues_retry_mailbox_delivery{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_transport_queues_unreachable Unreachable Queue length +# TYPE windows_exchange_transport_queues_unreachable gauge +windows_exchange_transport_queues_unreachable{name="high_priority"} 0 +windows_exchange_transport_queues_unreachable{name="low_priority"} 0 +windows_exchange_transport_queues_unreachable{name="none_priority"} 0 +windows_exchange_transport_queues_unreachable{name="normal_priority"} 0 +windows_exchange_transport_queues_unreachable{name="total_excluding_priority_none"} 0 +# HELP windows_exchange_workload_active_tasks Number of active tasks currently running in the background for workload management +# TYPE windows_exchange_workload_active_tasks gauge +windows_exchange_workload_active_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0 +windows_exchange_workload_active_tasks{name="microsoft_exchange_servicehost_darruntime"} 0 +# HELP windows_exchange_workload_completed_tasks Number of workload management tasks that have been completed +# TYPE windows_exchange_workload_completed_tasks counter +windows_exchange_workload_completed_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0 +windows_exchange_workload_completed_tasks{name="microsoft_exchange_servicehost_darruntime"} 0 +# HELP windows_exchange_workload_is_active Active indicates whether the workload is in an active (1) or paused (0) state +# TYPE windows_exchange_workload_is_active gauge +windows_exchange_workload_is_active{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 1 +windows_exchange_workload_is_active{name="microsoft_exchange_servicehost_darruntime"} 1 +# HELP windows_exchange_workload_queued_tasks Number of workload management tasks that are currently queued up waiting to be processed +# TYPE windows_exchange_workload_queued_tasks counter +windows_exchange_workload_queued_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0 +windows_exchange_workload_queued_tasks{name="microsoft_exchange_servicehost_darruntime"} 0 +# HELP windows_exchange_workload_yielded_tasks The total number of tasks that have been yielded by a workload +# TYPE windows_exchange_workload_yielded_tasks counter +windows_exchange_workload_yielded_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0 +windows_exchange_workload_yielded_tasks{name="microsoft_exchange_servicehost_darruntime"} 0 +# HELP windows_hyperv_health_critical This counter represents the number of virtual machines with critical health +# TYPE windows_hyperv_health_critical gauge +windows_hyperv_health_critical 0 +# HELP windows_hyperv_health_ok This counter represents the number of virtual machines with ok health +# TYPE windows_hyperv_health_ok gauge +windows_hyperv_health_ok 1 +# HELP windows_hyperv_host_cpu_guest_run_time The time spent by the virtual processor in guest code +# TYPE windows_hyperv_host_cpu_guest_run_time gauge +windows_hyperv_host_cpu_guest_run_time{core="0"} 2.44871648e+09 +# HELP windows_hyperv_host_cpu_hypervisor_run_time The time spent by the virtual processor in hypervisor code +# TYPE windows_hyperv_host_cpu_hypervisor_run_time gauge +windows_hyperv_host_cpu_hypervisor_run_time{core="0"} 2.79010764e+08 +# HELP windows_hyperv_host_cpu_remote_run_time The time spent by the virtual processor running on a remote node +# TYPE windows_hyperv_host_cpu_remote_run_time gauge +windows_hyperv_host_cpu_remote_run_time{core="0"} 0 +# HELP windows_hyperv_host_cpu_total_run_time The time spent by the virtual processor in guest and hypervisor code +# TYPE windows_hyperv_host_cpu_total_run_time gauge +windows_hyperv_host_cpu_total_run_time{core="0"} 2.727727244e+09 +# HELP windows_hyperv_host_lp_guest_run_time_percent The percentage of time spent by the processor in guest code +# TYPE windows_hyperv_host_lp_guest_run_time_percent gauge +windows_hyperv_host_lp_guest_run_time_percent{core="0"} 2.476081579e+09 +# HELP windows_hyperv_host_lp_hypervisor_run_time_percent The percentage of time spent by the processor in hypervisor code +# TYPE windows_hyperv_host_lp_hypervisor_run_time_percent gauge +windows_hyperv_host_lp_hypervisor_run_time_percent{core="0"} 3.52733652e+08 +# HELP windows_hyperv_host_lp_total_run_time_percent The percentage of time spent by the processor in guest and hypervisor code +# TYPE windows_hyperv_host_lp_total_run_time_percent gauge +windows_hyperv_host_lp_total_run_time_percent{core="0"} 2.828815231e+09 +# HELP windows_hyperv_hypervisor_logical_processors The number of logical processors present in the system +# TYPE windows_hyperv_hypervisor_logical_processors gauge +windows_hyperv_hypervisor_logical_processors 16 +# HELP windows_hyperv_hypervisor_virtual_processors The number of virtual processors present in the system +# TYPE windows_hyperv_hypervisor_virtual_processors gauge +windows_hyperv_hypervisor_virtual_processors 24 +# HELP windows_hyperv_root_partition_1G_device_pages The number of 1G pages present in the device space of the partition +# TYPE windows_hyperv_root_partition_1G_device_pages gauge +windows_hyperv_root_partition_1G_device_pages 0 +# HELP windows_hyperv_root_partition_1G_gpa_pages The number of 1G pages present in the GPA space of the partition +# TYPE windows_hyperv_root_partition_1G_gpa_pages gauge +windows_hyperv_root_partition_1G_gpa_pages 6 +# HELP windows_hyperv_root_partition_2M_device_pages The number of 2M pages present in the device space of the partition +# TYPE windows_hyperv_root_partition_2M_device_pages gauge +windows_hyperv_root_partition_2M_device_pages 0 +# HELP windows_hyperv_root_partition_2M_gpa_pages The number of 2M pages present in the GPA space of the partition +# TYPE windows_hyperv_root_partition_2M_gpa_pages gauge +windows_hyperv_root_partition_2M_gpa_pages 5255 +# HELP windows_hyperv_root_partition_4K_device_pages The number of 4K pages present in the device space of the partition +# TYPE windows_hyperv_root_partition_4K_device_pages gauge +windows_hyperv_root_partition_4K_device_pages 0 +# HELP windows_hyperv_root_partition_4K_gpa_pages The number of 4K pages present in the GPA space of the partition +# TYPE windows_hyperv_root_partition_4K_gpa_pages gauge +windows_hyperv_root_partition_4K_gpa_pages 58880 +# HELP windows_hyperv_root_partition_address_spaces The number of address spaces in the virtual TLB of the partition +# TYPE windows_hyperv_root_partition_address_spaces gauge +windows_hyperv_root_partition_address_spaces 0 +# HELP windows_hyperv_root_partition_attached_devices The number of devices attached to the partition +# TYPE windows_hyperv_root_partition_attached_devices gauge +windows_hyperv_root_partition_attached_devices 1 +# HELP windows_hyperv_root_partition_deposited_pages The number of pages deposited into the partition +# TYPE windows_hyperv_root_partition_deposited_pages gauge +windows_hyperv_root_partition_deposited_pages 31732 +# HELP windows_hyperv_root_partition_device_dma_errors An indicator of illegal DMA requests generated by all devices assigned to the partition +# TYPE windows_hyperv_root_partition_device_dma_errors gauge +windows_hyperv_root_partition_device_dma_errors 0 +# HELP windows_hyperv_root_partition_device_interrupt_errors An indicator of illegal interrupt requests generated by all devices assigned to the partition +# TYPE windows_hyperv_root_partition_device_interrupt_errors gauge +windows_hyperv_root_partition_device_interrupt_errors 0 +# HELP windows_hyperv_root_partition_device_interrupt_throttle_events The number of times an interrupt from a device assigned to the partition was temporarily throttled because the device was generating too many interrupts +# TYPE windows_hyperv_root_partition_device_interrupt_throttle_events gauge +windows_hyperv_root_partition_device_interrupt_throttle_events 0 +# HELP windows_hyperv_root_partition_gpa_space_modifications The rate of modifications to the GPA space of the partition +# TYPE windows_hyperv_root_partition_gpa_space_modifications counter +windows_hyperv_root_partition_gpa_space_modifications 0 +# HELP windows_hyperv_root_partition_io_tlb_flush The rate of flushes of I/O TLBs of the partition +# TYPE windows_hyperv_root_partition_io_tlb_flush counter +windows_hyperv_root_partition_io_tlb_flush 23901 +# HELP windows_hyperv_root_partition_io_tlb_flush_cost The average time (in nanoseconds) spent processing an I/O TLB flush +# TYPE windows_hyperv_root_partition_io_tlb_flush_cost gauge +windows_hyperv_root_partition_io_tlb_flush_cost 312574 +# HELP windows_hyperv_root_partition_physical_pages_allocated The number of timer interrupts skipped for the partition +# TYPE windows_hyperv_root_partition_physical_pages_allocated gauge +windows_hyperv_root_partition_physical_pages_allocated 0 +# HELP windows_hyperv_root_partition_preferred_numa_node_index The number of pages present in the GPA space of the partition (zero for root partition) +# TYPE windows_hyperv_root_partition_preferred_numa_node_index gauge +windows_hyperv_root_partition_preferred_numa_node_index 0 +# HELP windows_hyperv_root_partition_recommended_virtual_tlb_size The recommended number of pages to be deposited for the virtual TLB +# TYPE windows_hyperv_root_partition_recommended_virtual_tlb_size gauge +windows_hyperv_root_partition_recommended_virtual_tlb_size 64 +# HELP windows_hyperv_root_partition_virtual_tlb_flush_entires The rate of flushes of the entire virtual TLB +# TYPE windows_hyperv_root_partition_virtual_tlb_flush_entires counter +windows_hyperv_root_partition_virtual_tlb_flush_entires 15234 +# HELP windows_hyperv_root_partition_virtual_tlb_pages The number of pages used by the virtual TLB of the partition +# TYPE windows_hyperv_root_partition_virtual_tlb_pages gauge +windows_hyperv_root_partition_virtual_tlb_pages 64 +# HELP windows_hyperv_vid_physical_pages_allocated The number of physical pages allocated +# TYPE windows_hyperv_vid_physical_pages_allocated gauge +windows_hyperv_vid_physical_pages_allocated{vm="Ubuntu 22.04 LTS"} 745472 +# HELP windows_hyperv_vid_preferred_numa_node_index The preferred NUMA node index associated with this partition +# TYPE windows_hyperv_vid_preferred_numa_node_index gauge +windows_hyperv_vid_preferred_numa_node_index{vm="Ubuntu 22.04 LTS"} 0 +# HELP windows_hyperv_vid_remote_physical_pages The number of physical pages not allocated from the preferred NUMA node +# TYPE windows_hyperv_vid_remote_physical_pages gauge +windows_hyperv_vid_remote_physical_pages{vm="Ubuntu 22.04 LTS"} 0 +# HELP windows_hyperv_vm_cpu_guest_run_time The time spent by the virtual processor in guest code +# TYPE windows_hyperv_vm_cpu_guest_run_time gauge +windows_hyperv_vm_cpu_guest_run_time{core="0",vm="Ubuntu 22.04 LTS"} 6.2534217e+07 +# HELP windows_hyperv_vm_cpu_hypervisor_run_time The time spent by the virtual processor in hypervisor code +# TYPE windows_hyperv_vm_cpu_hypervisor_run_time gauge +windows_hyperv_vm_cpu_hypervisor_run_time{core="0",vm="Ubuntu 22.04 LTS"} 4.457712e+06 +# HELP windows_hyperv_vm_cpu_remote_run_time The time spent by the virtual processor running on a remote node +# TYPE windows_hyperv_vm_cpu_remote_run_time gauge +windows_hyperv_vm_cpu_remote_run_time{core="0",vm="Ubuntu 22.04 LTS"} 0 +# HELP windows_hyperv_vm_cpu_total_run_time The time spent by the virtual processor in guest and hypervisor code +# TYPE windows_hyperv_vm_cpu_total_run_time gauge +windows_hyperv_vm_cpu_total_run_time{core="0",vm="Ubuntu 22.04 LTS"} 6.6991929e+07 +# HELP windows_hyperv_vm_device_bytes_read This counter represents the total number of bytes that have been read per second on this virtual device +# TYPE windows_hyperv_vm_device_bytes_read counter +windows_hyperv_vm_device_bytes_read{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 83456 +windows_hyperv_vm_device_bytes_read{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 5.3118464e+08 +# HELP windows_hyperv_vm_device_bytes_written This counter represents the total number of bytes that have been written per second on this virtual device +# TYPE windows_hyperv_vm_device_bytes_written counter +windows_hyperv_vm_device_bytes_written{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 1.148928e+06 +windows_hyperv_vm_device_bytes_written{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 4.25905152e+08 +# HELP windows_hyperv_vm_device_error_count This counter represents the total number of errors that have occurred on this virtual device +# TYPE windows_hyperv_vm_device_error_count counter +windows_hyperv_vm_device_error_count{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 0 +windows_hyperv_vm_device_error_count{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3 +# HELP windows_hyperv_vm_device_operations_read This counter represents the number of read operations that have occurred per second on this virtual device +# TYPE windows_hyperv_vm_device_operations_read counter +windows_hyperv_vm_device_operations_read{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 6 +windows_hyperv_vm_device_operations_read{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 13196 +# HELP windows_hyperv_vm_device_operations_written This counter represents the number of write operations that have occurred per second on this virtual device +# TYPE windows_hyperv_vm_device_operations_written counter +windows_hyperv_vm_device_operations_written{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 34 +windows_hyperv_vm_device_operations_written{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3866 +# HELP windows_hyperv_vm_device_queue_length This counter represents the current queue length on this virtual device +# TYPE windows_hyperv_vm_device_queue_length counter +windows_hyperv_vm_device_queue_length{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 1.104182e+06 +windows_hyperv_vm_device_queue_length{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3.269422187e+09 +# HELP windows_hyperv_vm_interface_bytes_received This counter represents the total number of bytes received per second by the network adapter +# TYPE windows_hyperv_vm_interface_bytes_received counter +windows_hyperv_vm_interface_bytes_received{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 473654 +windows_hyperv_vm_interface_bytes_received{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 4.3509444e+07 +# HELP windows_hyperv_vm_interface_bytes_sent This counter represents the total number of bytes sent per second by the network adapter +# TYPE windows_hyperv_vm_interface_bytes_sent counter +windows_hyperv_vm_interface_bytes_sent{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 4.3550457e+07 +windows_hyperv_vm_interface_bytes_sent{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 473654 +# HELP windows_hyperv_vm_interface_packets_incoming_dropped This counter represents the total number of dropped packets per second in the incoming direction of the network adapter +# TYPE windows_hyperv_vm_interface_packets_incoming_dropped counter +windows_hyperv_vm_interface_packets_incoming_dropped{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 0 +windows_hyperv_vm_interface_packets_incoming_dropped{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 0 +# HELP windows_hyperv_vm_interface_packets_outgoing_dropped This counter represents the total number of dropped packets per second in the outgoing direction of the network adapter +# TYPE windows_hyperv_vm_interface_packets_outgoing_dropped counter +windows_hyperv_vm_interface_packets_outgoing_dropped{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 284 +windows_hyperv_vm_interface_packets_outgoing_dropped{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 0 +# HELP windows_hyperv_vm_interface_packets_received This counter represents the total number of packets received per second by the network adapter +# TYPE windows_hyperv_vm_interface_packets_received counter +windows_hyperv_vm_interface_packets_received{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 6137 +windows_hyperv_vm_interface_packets_received{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 8621 +# HELP windows_hyperv_vm_interface_packets_sent This counter represents the total number of packets sent per second by the network adapter +# TYPE windows_hyperv_vm_interface_packets_sent counter +windows_hyperv_vm_interface_packets_sent{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 8905 +windows_hyperv_vm_interface_packets_sent{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 6137 +# HELP windows_hyperv_vm_memory_add_operations_total This counter represents the number of operations adding memory to the VM. +# TYPE windows_hyperv_vm_memory_add_operations_total counter +windows_hyperv_vm_memory_add_operations_total{vm="Ubuntu 22.04 LTS"} 3 +# HELP windows_hyperv_vm_memory_added_total This counter represents memory in MB added to the VM +# TYPE windows_hyperv_vm_memory_added_total counter +windows_hyperv_vm_memory_added_total{vm="Ubuntu 22.04 LTS"} 856 +# HELP windows_hyperv_vm_memory_physical This gauge represents the current amount of memory in MB assigned to the VM. +# TYPE windows_hyperv_vm_memory_physical gauge +windows_hyperv_vm_memory_physical{vm="Ubuntu 22.04 LTS"} 2628 +# HELP windows_hyperv_vm_memory_physical_guest_visible 'This gauge represents the amount of memory in MB visible to the VM guest.' +# TYPE windows_hyperv_vm_memory_physical_guest_visible gauge +windows_hyperv_vm_memory_physical_guest_visible{vm="Ubuntu 22.04 LTS"} 2904 +# HELP windows_hyperv_vm_memory_pressure_average This gauge represents the average pressure in the VM. +# TYPE windows_hyperv_vm_memory_pressure_average gauge +windows_hyperv_vm_memory_pressure_average{vm="Ubuntu 22.04 LTS"} 83 +# HELP windows_hyperv_vm_memory_pressure_current This gauge represents the current pressure in the VM. +# TYPE windows_hyperv_vm_memory_pressure_current gauge +windows_hyperv_vm_memory_pressure_current{vm="Ubuntu 22.04 LTS"} 83 +# HELP windows_hyperv_vm_memory_pressure_maximum This gauge represents the maximum pressure band in the VM. +# TYPE windows_hyperv_vm_memory_pressure_maximum gauge +windows_hyperv_vm_memory_pressure_maximum{vm="Ubuntu 22.04 LTS"} 85 +# HELP windows_hyperv_vm_memory_pressure_minimum This gauge represents the minimum pressure band in the VM. +# TYPE windows_hyperv_vm_memory_pressure_minimum gauge +windows_hyperv_vm_memory_pressure_minimum{vm="Ubuntu 22.04 LTS"} 81 +# HELP windows_hyperv_vm_memory_remove_operations_total This counter represents the number of operations removing memory from the VM. +# TYPE windows_hyperv_vm_memory_remove_operations_total counter +windows_hyperv_vm_memory_remove_operations_total{vm="Ubuntu 22.04 LTS"} 1 +# HELP windows_hyperv_vm_memory_removed_total This counter represents memory in MB removed from the VM +# TYPE windows_hyperv_vm_memory_removed_total counter +windows_hyperv_vm_memory_removed_total{vm="Ubuntu 22.04 LTS"} 276 +# HELP windows_hyperv_vswitch_broadcast_packets_received_total This represents the total number of broadcast packets received per second by the virtual switch +# TYPE windows_hyperv_vswitch_broadcast_packets_received_total counter +windows_hyperv_vswitch_broadcast_packets_received_total{vswitch="Default Switch"} 51 +# HELP windows_hyperv_vswitch_broadcast_packets_sent_total This represents the total number of broadcast packets sent per second by the virtual switch +# TYPE windows_hyperv_vswitch_broadcast_packets_sent_total counter +windows_hyperv_vswitch_broadcast_packets_sent_total{vswitch="Default Switch"} 18 +# HELP windows_hyperv_vswitch_bytes_received_total This represents the total number of bytes received per second by the virtual switch +# TYPE windows_hyperv_vswitch_bytes_received_total counter +windows_hyperv_vswitch_bytes_received_total{vswitch="Default Switch"} 4.4024111e+07 +# HELP windows_hyperv_vswitch_bytes_sent_total This represents the total number of bytes sent per second by the virtual switch +# TYPE windows_hyperv_vswitch_bytes_sent_total counter +windows_hyperv_vswitch_bytes_sent_total{vswitch="Default Switch"} 4.3983098e+07 +# HELP windows_hyperv_vswitch_bytes_total This represents the total number of bytes per second traversing the virtual switch +# TYPE windows_hyperv_vswitch_bytes_total counter +windows_hyperv_vswitch_bytes_total{vswitch="Default Switch"} 8.8007209e+07 +# HELP windows_hyperv_vswitch_directed_packets_received_total This represents the total number of directed packets received per second by the virtual switch +# TYPE windows_hyperv_vswitch_directed_packets_received_total counter +windows_hyperv_vswitch_directed_packets_received_total{vswitch="Default Switch"} 14603 +# HELP windows_hyperv_vswitch_directed_packets_send_total This represents the total number of directed packets sent per second by the virtual switch +# TYPE windows_hyperv_vswitch_directed_packets_send_total counter +windows_hyperv_vswitch_directed_packets_send_total{vswitch="Default Switch"} 14603 +# HELP windows_hyperv_vswitch_dropped_packets_incoming_total This represents the total number of packet dropped per second by the virtual switch in the incoming direction +# TYPE windows_hyperv_vswitch_dropped_packets_incoming_total counter +windows_hyperv_vswitch_dropped_packets_incoming_total{vswitch="Default Switch"} 284 +# HELP windows_hyperv_vswitch_dropped_packets_outcoming_total This represents the total number of packet dropped per second by the virtual switch in the outgoing direction +# TYPE windows_hyperv_vswitch_dropped_packets_outcoming_total counter +windows_hyperv_vswitch_dropped_packets_outcoming_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_extensions_dropped_packets_incoming_total This represents the total number of packet dropped per second by the virtual switch extensions in the incoming direction +# TYPE windows_hyperv_vswitch_extensions_dropped_packets_incoming_total counter +windows_hyperv_vswitch_extensions_dropped_packets_incoming_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total This represents the total number of packet dropped per second by the virtual switch extensions in the outgoing direction +# TYPE windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total counter +windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_learned_mac_addresses_total This counter represents the total number of learned MAC addresses of the virtual switch +# TYPE windows_hyperv_vswitch_learned_mac_addresses_total counter +windows_hyperv_vswitch_learned_mac_addresses_total{vswitch="Default Switch"} 2 +# HELP windows_hyperv_vswitch_multicast_packets_received_total This represents the total number of multicast packets received per second by the virtual switch +# TYPE windows_hyperv_vswitch_multicast_packets_received_total counter +windows_hyperv_vswitch_multicast_packets_received_total{vswitch="Default Switch"} 388 +# HELP windows_hyperv_vswitch_multicast_packets_sent_total This represents the total number of multicast packets sent per second by the virtual switch +# TYPE windows_hyperv_vswitch_multicast_packets_sent_total counter +windows_hyperv_vswitch_multicast_packets_sent_total{vswitch="Default Switch"} 137 +# HELP windows_hyperv_vswitch_number_of_send_channel_moves_total This represents the total number of send channel moves per second on this virtual switch +# TYPE windows_hyperv_vswitch_number_of_send_channel_moves_total counter +windows_hyperv_vswitch_number_of_send_channel_moves_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_number_of_vmq_moves_total This represents the total number of VMQ moves per second on this virtual switch +# TYPE windows_hyperv_vswitch_number_of_vmq_moves_total counter +windows_hyperv_vswitch_number_of_vmq_moves_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_packets_flooded_total This counter represents the total number of packets flooded by the virtual switch +# TYPE windows_hyperv_vswitch_packets_flooded_total counter +windows_hyperv_vswitch_packets_flooded_total{vswitch="Default Switch"} 0 +# HELP windows_hyperv_vswitch_packets_received_total This represents the total number of packets received per second by the virtual switch +# TYPE windows_hyperv_vswitch_packets_received_total counter +windows_hyperv_vswitch_packets_received_total{vswitch="Default Switch"} 15042 +# HELP windows_hyperv_vswitch_packets_total This represents the total number of packets per second traversing the virtual switch +# TYPE windows_hyperv_vswitch_packets_total counter +windows_hyperv_vswitch_packets_total{vswitch="Default Switch"} 29800 +# HELP windows_hyperv_vswitch_purged_mac_addresses_total This counter represents the total number of purged MAC addresses of the virtual switch +# TYPE windows_hyperv_vswitch_purged_mac_addresses_total counter +windows_hyperv_vswitch_purged_mac_addresses_total{vswitch="Default Switch"} 0 +# HELP windows_iis_anonymous_users_total Total number of users who established an anonymous connection with the Web service (WebService.TotalAnonymousUsers) +# TYPE windows_iis_anonymous_users_total counter +windows_iis_anonymous_users_total{site="Default Web Site"} 3 +# HELP windows_iis_blocked_async_io_requests_total Total requests temporarily blocked due to bandwidth throttling settings (WebService.TotalBlockedAsyncIORequests) +# TYPE windows_iis_blocked_async_io_requests_total counter +windows_iis_blocked_async_io_requests_total{site="Default Web Site"} 0 +# HELP windows_iis_cgi_requests_total Total CGI requests is the total number of CGI requests (WebService.TotalCGIRequests) +# TYPE windows_iis_cgi_requests_total counter +windows_iis_cgi_requests_total{site="Default Web Site"} 0 +# HELP windows_iis_connection_attempts_all_instances_total Number of connections that have been attempted using the Web service (WebService.TotalConnectionAttemptsAllInstances) +# TYPE windows_iis_connection_attempts_all_instances_total counter +windows_iis_connection_attempts_all_instances_total{site="Default Web Site"} 1 +# HELP windows_iis_current_anonymous_users Number of users who currently have an anonymous connection using the Web service (WebService.CurrentAnonymousUsers) +# TYPE windows_iis_current_anonymous_users gauge +windows_iis_current_anonymous_users{site="Default Web Site"} 0 +# HELP windows_iis_current_application_pool_start_time The unix timestamp for the application pool start time (CurrentApplicationPoolUptime) +# TYPE windows_iis_current_application_pool_start_time gauge +windows_iis_current_application_pool_start_time{app="DefaultAppPool"} 1.6672399883854828e+09 +# HELP windows_iis_current_application_pool_state The current status of the application pool (1 - Uninitialized, 2 - Initialized, 3 - Running, 4 - Disabling, 5 - Disabled, 6 - Shutdown Pending, 7 - Delete Pending) (CurrentApplicationPoolState) +# TYPE windows_iis_current_application_pool_state gauge +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Delete Pending"} 0 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Disabled"} 0 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Disabling"} 0 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Initialized"} 0 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Running"} 1 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Shutdown Pending"} 0 +windows_iis_current_application_pool_state{app="DefaultAppPool",state="Uninitialized"} 0 +# HELP windows_iis_current_blocked_async_io_requests Current requests temporarily blocked due to bandwidth throttling settings (WebService.CurrentBlockedAsyncIORequests) +# TYPE windows_iis_current_blocked_async_io_requests gauge +windows_iis_current_blocked_async_io_requests{site="Default Web Site"} 0 +# HELP windows_iis_current_cgi_requests Current number of CGI requests being simultaneously processed by the Web service (WebService.CurrentCGIRequests) +# TYPE windows_iis_current_cgi_requests gauge +windows_iis_current_cgi_requests{site="Default Web Site"} 0 +# HELP windows_iis_current_connections Current number of connections established with the Web service (WebService.CurrentConnections) +# TYPE windows_iis_current_connections gauge +windows_iis_current_connections{site="Default Web Site"} 0 +# HELP windows_iis_current_isapi_extension_requests Current number of ISAPI requests being simultaneously processed by the Web service (WebService.CurrentISAPIExtensionRequests) +# TYPE windows_iis_current_isapi_extension_requests gauge +windows_iis_current_isapi_extension_requests{site="Default Web Site"} 0 +# HELP windows_iis_current_non_anonymous_users Number of users who currently have a non-anonymous connection using the Web service (WebService.CurrentNonAnonymousUsers) +# TYPE windows_iis_current_non_anonymous_users gauge +windows_iis_current_non_anonymous_users{site="Default Web Site"} 0 +# HELP windows_iis_current_worker_processes The current number of worker processes that are running in the application pool (CurrentWorkerProcesses) +# TYPE windows_iis_current_worker_processes gauge +windows_iis_current_worker_processes{app="DefaultAppPool"} 1 +# HELP windows_iis_files_received_total Number of files received by the Web service (WebService.TotalFilesReceived) +# TYPE windows_iis_files_received_total counter +windows_iis_files_received_total{site="Default Web Site"} 0 +# HELP windows_iis_files_sent_total Number of files sent by the Web service (WebService.TotalFilesSent) +# TYPE windows_iis_files_sent_total counter +windows_iis_files_sent_total{site="Default Web Site"} 2 +# HELP windows_iis_ipapi_extension_requests_total ISAPI Extension Requests received (WebService.TotalISAPIExtensionRequests) +# TYPE windows_iis_ipapi_extension_requests_total counter +windows_iis_ipapi_extension_requests_total{site="Default Web Site"} 0 +# HELP windows_iis_locked_errors_total Number of requests that couldn't be satisfied by the server because the requested resource was locked (WebService.TotalLockedErrors) +# TYPE windows_iis_locked_errors_total counter +windows_iis_locked_errors_total{site="Default Web Site"} 0 +# HELP windows_iis_logon_attempts_total Number of logons attempts to the Web Service (WebService.TotalLogonAttempts) +# TYPE windows_iis_logon_attempts_total counter +windows_iis_logon_attempts_total{site="Default Web Site"} 4 +# HELP windows_iis_maximum_worker_processes The maximum number of worker processes that have been created for the application pool since Windows Process Activation Service (WAS) started (MaximumWorkerProcesses) +# TYPE windows_iis_maximum_worker_processes gauge +windows_iis_maximum_worker_processes{app="DefaultAppPool"} 1 +# HELP windows_iis_non_anonymous_users_total Number of users who established a non-anonymous connection with the Web service (WebService.TotalNonAnonymousUsers) +# TYPE windows_iis_non_anonymous_users_total counter +windows_iis_non_anonymous_users_total{site="Default Web Site"} 0 +# HELP windows_iis_not_found_errors_total Number of requests that couldn't be satisfied by the server because the requested document could not be found (WebService.TotalNotFoundErrors) +# TYPE windows_iis_not_found_errors_total counter +windows_iis_not_found_errors_total{site="Default Web Site"} 1 +# HELP windows_iis_received_bytes_total Number of data bytes that have been received by the Web service (WebService.TotalBytesReceived) +# TYPE windows_iis_received_bytes_total counter +windows_iis_received_bytes_total{site="Default Web Site"} 10289 +# HELP windows_iis_recent_worker_process_failures The number of times that worker processes for the application pool failed during the rapid-fail protection interval (RecentWorkerProcessFailures) +# TYPE windows_iis_recent_worker_process_failures gauge +windows_iis_recent_worker_process_failures{app="DefaultAppPool"} 0 +# HELP windows_iis_rejected_async_io_requests_total Requests rejected due to bandwidth throttling settings (WebService.TotalRejectedAsyncIORequests) +# TYPE windows_iis_rejected_async_io_requests_total counter +windows_iis_rejected_async_io_requests_total{site="Default Web Site"} 0 +# HELP windows_iis_requests_total Number of HTTP requests (WebService.TotalRequests) +# TYPE windows_iis_requests_total counter +windows_iis_requests_total{method="COPY",site="Default Web Site"} 0 +windows_iis_requests_total{method="DELETE",site="Default Web Site"} 0 +windows_iis_requests_total{method="GET",site="Default Web Site"} 3 +windows_iis_requests_total{method="HEAD",site="Default Web Site"} 0 +windows_iis_requests_total{method="LOCK",site="Default Web Site"} 0 +windows_iis_requests_total{method="MKCOL",site="Default Web Site"} 0 +windows_iis_requests_total{method="MOVE",site="Default Web Site"} 0 +windows_iis_requests_total{method="OPTIONS",site="Default Web Site"} 0 +windows_iis_requests_total{method="POST",site="Default Web Site"} 0 +windows_iis_requests_total{method="PROPFIND",site="Default Web Site"} 0 +windows_iis_requests_total{method="PROPPATCH",site="Default Web Site"} 0 +windows_iis_requests_total{method="PUT",site="Default Web Site"} 0 +windows_iis_requests_total{method="SEARCH",site="Default Web Site"} 0 +windows_iis_requests_total{method="TRACE",site="Default Web Site"} 0 +windows_iis_requests_total{method="UNLOCK",site="Default Web Site"} 0 +windows_iis_requests_total{method="other",site="Default Web Site"} 0 +# HELP windows_iis_sent_bytes_total Number of data bytes that have been sent by the Web service (WebService.TotalBytesSent) +# TYPE windows_iis_sent_bytes_total counter +windows_iis_sent_bytes_total{site="Default Web Site"} 105882 +# HELP windows_iis_server_cache_active_flushed_entries Number of file handles cached that will be closed when all current transfers complete. +# TYPE windows_iis_server_cache_active_flushed_entries gauge +windows_iis_server_cache_active_flushed_entries 0 +# HELP windows_iis_server_file_cache_flushes_total Total number of file cache flushes (since service startup) +# TYPE windows_iis_server_file_cache_flushes_total counter +windows_iis_server_file_cache_flushes_total 7 +# HELP windows_iis_server_file_cache_hits_total Total number of successful lookups in the user-mode file cache +# TYPE windows_iis_server_file_cache_hits_total counter +windows_iis_server_file_cache_hits_total 1 +# HELP windows_iis_server_file_cache_items Current number of files whose contents are present in cache +# TYPE windows_iis_server_file_cache_items gauge +windows_iis_server_file_cache_items 1 +# HELP windows_iis_server_file_cache_items_flushed_total Total number of file handles that have been removed from the cache (since service startup) +# TYPE windows_iis_server_file_cache_items_flushed_total counter +windows_iis_server_file_cache_items_flushed_total 0 +# HELP windows_iis_server_file_cache_items_total Total number of files whose contents were ever added to the cache (since service startup) +# TYPE windows_iis_server_file_cache_items_total counter +windows_iis_server_file_cache_items_total 1 +# HELP windows_iis_server_file_cache_max_memory_bytes Maximum number of bytes used by file cache +# TYPE windows_iis_server_file_cache_max_memory_bytes counter +windows_iis_server_file_cache_max_memory_bytes 703 +# HELP windows_iis_server_file_cache_memory_bytes Current number of bytes used by file cache +# TYPE windows_iis_server_file_cache_memory_bytes gauge +windows_iis_server_file_cache_memory_bytes 703 +# HELP windows_iis_server_file_cache_queries_total Total number of file cache queries (hits + misses) +# TYPE windows_iis_server_file_cache_queries_total counter +windows_iis_server_file_cache_queries_total 9 +# HELP windows_iis_server_metadata_cache_flushes_total Total number of metadata cache flushes (since service startup) +# TYPE windows_iis_server_metadata_cache_flushes_total counter +windows_iis_server_metadata_cache_flushes_total 0 +# HELP windows_iis_server_metadata_cache_hits_total Total number of successful lookups in the metadata cache (since service startup) +# TYPE windows_iis_server_metadata_cache_hits_total counter +windows_iis_server_metadata_cache_hits_total 3 +# HELP windows_iis_server_metadata_cache_items Number of metadata information blocks currently present in cache +# TYPE windows_iis_server_metadata_cache_items gauge +windows_iis_server_metadata_cache_items 1 +# HELP windows_iis_server_metadata_cache_items_cached_total Total number of metadata information blocks added to the cache (since service startup) +# TYPE windows_iis_server_metadata_cache_items_cached_total counter +windows_iis_server_metadata_cache_items_cached_total 1 +# HELP windows_iis_server_metadata_cache_items_flushed_total Total number of metadata information blocks removed from the cache (since service startup) +# TYPE windows_iis_server_metadata_cache_items_flushed_total counter +windows_iis_server_metadata_cache_items_flushed_total 0 +# HELP windows_iis_server_metadata_cache_queries_total Total metadata cache queries (hits + misses) +# TYPE windows_iis_server_metadata_cache_queries_total counter +windows_iis_server_metadata_cache_queries_total 4 +# HELP windows_iis_server_output_cache_active_flushed_items +# TYPE windows_iis_server_output_cache_active_flushed_items counter +windows_iis_server_output_cache_active_flushed_items 0 +# HELP windows_iis_server_output_cache_flushes_total Total number of flushes of output cache (since service startup) +# TYPE windows_iis_server_output_cache_flushes_total counter +windows_iis_server_output_cache_flushes_total 0 +# HELP windows_iis_server_output_cache_hits_total Total number of successful lookups in output cache (since service startup) +# TYPE windows_iis_server_output_cache_hits_total counter +windows_iis_server_output_cache_hits_total 0 +# HELP windows_iis_server_output_cache_items Number of items current present in output cache +# TYPE windows_iis_server_output_cache_items counter +windows_iis_server_output_cache_items 0 +# HELP windows_iis_server_output_cache_items_flushed_total Total number of items flushed from output cache (since service startup) +# TYPE windows_iis_server_output_cache_items_flushed_total counter +windows_iis_server_output_cache_items_flushed_total 0 +# HELP windows_iis_server_output_cache_memory_bytes Current number of bytes used by output cache +# TYPE windows_iis_server_output_cache_memory_bytes counter +windows_iis_server_output_cache_memory_bytes 0 +# HELP windows_iis_server_output_cache_queries_total Total output cache queries (hits + misses) +# TYPE windows_iis_server_output_cache_queries_total counter +windows_iis_server_output_cache_queries_total 4 +# HELP windows_iis_server_uri_cache_flushes_total Total number of URI cache flushes (since service startup) +# TYPE windows_iis_server_uri_cache_flushes_total counter +windows_iis_server_uri_cache_flushes_total{mode="kernel"} 0 +windows_iis_server_uri_cache_flushes_total{mode="user"} 0 +# HELP windows_iis_server_uri_cache_hits_total Total number of successful lookups in the URI cache (since service startup) +# TYPE windows_iis_server_uri_cache_hits_total counter +windows_iis_server_uri_cache_hits_total{mode="kernel"} 0 +windows_iis_server_uri_cache_hits_total{mode="user"} 0 +# HELP windows_iis_server_uri_cache_items Number of URI information blocks currently in the cache +# TYPE windows_iis_server_uri_cache_items gauge +windows_iis_server_uri_cache_items{mode="kernel"} 0 +windows_iis_server_uri_cache_items{mode="user"} 0 +# HELP windows_iis_server_uri_cache_items_flushed_total The number of URI information blocks that have been removed from the cache (since service startup) +# TYPE windows_iis_server_uri_cache_items_flushed_total counter +windows_iis_server_uri_cache_items_flushed_total{mode="kernel"} 0 +windows_iis_server_uri_cache_items_flushed_total{mode="user"} 0 +# HELP windows_iis_server_uri_cache_items_total Total number of URI information blocks added to the cache (since service startup) +# TYPE windows_iis_server_uri_cache_items_total counter +windows_iis_server_uri_cache_items_total{mode="kernel"} 0 +windows_iis_server_uri_cache_items_total{mode="user"} 0 +# HELP windows_iis_server_uri_cache_queries_total Total number of uri cache queries (hits + misses) +# TYPE windows_iis_server_uri_cache_queries_total counter +windows_iis_server_uri_cache_queries_total{mode="kernel"} 47 +windows_iis_server_uri_cache_queries_total{mode="user"} 4 +# HELP windows_iis_service_uptime Number of seconds the WebService is up (WebService.ServiceUptime) +# TYPE windows_iis_service_uptime gauge +windows_iis_service_uptime{site="Default Web Site"} 258633 +# HELP windows_iis_time_since_last_worker_process_failure The length of time, in seconds, since the last worker process failure occurred for the application pool (TimeSinceLastWorkerProcessFailure) +# TYPE windows_iis_time_since_last_worker_process_failure gauge +windows_iis_time_since_last_worker_process_failure{app="DefaultAppPool"} 1.6672399883854828e+09 +# HELP windows_iis_total_application_pool_recycles The number of times that the application pool has been recycled since Windows Process Activation Service (WAS) started (TotalApplicationPoolRecycles) +# TYPE windows_iis_total_application_pool_recycles counter +windows_iis_total_application_pool_recycles{app="DefaultAppPool"} 0 +# HELP windows_iis_total_application_pool_start_time The unix timestamp for the application pool of when the Windows Process Activation Service (WAS) started (TotalApplicationPoolUptime) +# TYPE windows_iis_total_application_pool_start_time counter +windows_iis_total_application_pool_start_time{app="DefaultAppPool"} 1.6672399883854828e+09 +# HELP windows_iis_total_worker_process_failures The number of times that worker processes have crashed since the application pool was started (TotalWorkerProcessFailures) +# TYPE windows_iis_total_worker_process_failures counter +windows_iis_total_worker_process_failures{app="DefaultAppPool"} 0 +# HELP windows_iis_total_worker_process_ping_failures The number of times that Windows Process Activation Service (WAS) did not receive a response to ping messages sent to a worker process (TotalWorkerProcessPingFailures) +# TYPE windows_iis_total_worker_process_ping_failures counter +windows_iis_total_worker_process_ping_failures{app="DefaultAppPool"} 0 +# HELP windows_iis_total_worker_process_shutdown_failures The number of times that Windows Process Activation Service (WAS) failed to shut down a worker process (TotalWorkerProcessShutdownFailures) +# TYPE windows_iis_total_worker_process_shutdown_failures counter +windows_iis_total_worker_process_shutdown_failures{app="DefaultAppPool"} 0 +# HELP windows_iis_total_worker_process_startup_failures The number of times that Windows Process Activation Service (WAS) failed to start a worker process (TotalWorkerProcessStartupFailures) +# TYPE windows_iis_total_worker_process_startup_failures counter +windows_iis_total_worker_process_startup_failures{app="DefaultAppPool"} 0 +# HELP windows_iis_total_worker_processes_created The number of worker processes created for the application pool since Windows Process Activation Service (WAS) started (TotalWorkerProcessesCreated) +# TYPE windows_iis_total_worker_processes_created counter +windows_iis_total_worker_processes_created{app="DefaultAppPool"} 1 +# HELP windows_iis_worker_cache_active_flushed_entries Number of file handles cached in user-mode that will be closed when all current transfers complete. +# TYPE windows_iis_worker_cache_active_flushed_entries gauge +windows_iis_worker_cache_active_flushed_entries{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_current_requests Current number of requests being processed by the worker process +# TYPE windows_iis_worker_current_requests counter +windows_iis_worker_current_requests{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_current_websocket_requests +# TYPE windows_iis_worker_current_websocket_requests counter +windows_iis_worker_current_websocket_requests{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_file_cache_flushes_total Total number of files removed from the user-mode cache +# TYPE windows_iis_worker_file_cache_flushes_total counter +windows_iis_worker_file_cache_flushes_total{app="DefaultAppPool",pid="880"} 7 +# HELP windows_iis_worker_file_cache_hits_total Total number of successful lookups in the user-mode file cache +# TYPE windows_iis_worker_file_cache_hits_total counter +windows_iis_worker_file_cache_hits_total{app="DefaultAppPool",pid="880"} 1 +# HELP windows_iis_worker_file_cache_items Current number of files whose contents are present in user-mode cache +# TYPE windows_iis_worker_file_cache_items gauge +windows_iis_worker_file_cache_items{app="DefaultAppPool",pid="880"} 1 +# HELP windows_iis_worker_file_cache_items_flushed_total Total number of file handles that have been removed from the user-mode cache (since service startup) +# TYPE windows_iis_worker_file_cache_items_flushed_total counter +windows_iis_worker_file_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_file_cache_items_total Total number of files whose contents were ever added to the user-mode cache (since service startup) +# TYPE windows_iis_worker_file_cache_items_total counter +windows_iis_worker_file_cache_items_total{app="DefaultAppPool",pid="880"} 1 +# HELP windows_iis_worker_file_cache_max_memory_bytes Maximum number of bytes used by user-mode file cache +# TYPE windows_iis_worker_file_cache_max_memory_bytes counter +windows_iis_worker_file_cache_max_memory_bytes{app="DefaultAppPool",pid="880"} 703 +# HELP windows_iis_worker_file_cache_memory_bytes Current number of bytes used by user-mode file cache +# TYPE windows_iis_worker_file_cache_memory_bytes gauge +windows_iis_worker_file_cache_memory_bytes{app="DefaultAppPool",pid="880"} 703 +# HELP windows_iis_worker_file_cache_queries_total Total file cache queries (hits + misses) +# TYPE windows_iis_worker_file_cache_queries_total counter +windows_iis_worker_file_cache_queries_total{app="DefaultAppPool",pid="880"} 9 +# HELP windows_iis_worker_max_threads Maximum number of threads to which the thread pool can grow as needed +# TYPE windows_iis_worker_max_threads counter +windows_iis_worker_max_threads{app="DefaultAppPool",pid="880"} 256 +# HELP windows_iis_worker_metadata_cache_flushes_total Total number of user-mode metadata cache flushes (since service startup) +# TYPE windows_iis_worker_metadata_cache_flushes_total counter +windows_iis_worker_metadata_cache_flushes_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_metadata_cache_hits_total Total number of successful lookups in the user-mode metadata cache (since service startup) +# TYPE windows_iis_worker_metadata_cache_hits_total counter +windows_iis_worker_metadata_cache_hits_total{app="DefaultAppPool",pid="880"} 3 +# HELP windows_iis_worker_metadata_cache_items Number of metadata information blocks currently present in user-mode cache +# TYPE windows_iis_worker_metadata_cache_items gauge +windows_iis_worker_metadata_cache_items{app="DefaultAppPool",pid="880"} 1 +# HELP windows_iis_worker_metadata_cache_items_cached_total Total number of metadata information blocks added to the user-mode cache (since service startup) +# TYPE windows_iis_worker_metadata_cache_items_cached_total counter +windows_iis_worker_metadata_cache_items_cached_total{app="DefaultAppPool",pid="880"} 1 +# HELP windows_iis_worker_metadata_cache_items_flushed_total Total number of metadata information blocks removed from the user-mode cache (since service startup) +# TYPE windows_iis_worker_metadata_cache_items_flushed_total counter +windows_iis_worker_metadata_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_metadata_cache_queries_total Total metadata cache queries (hits + misses) +# TYPE windows_iis_worker_metadata_cache_queries_total counter +windows_iis_worker_metadata_cache_queries_total{app="DefaultAppPool",pid="880"} 4 +# HELP windows_iis_worker_output_cache_active_flushed_items +# TYPE windows_iis_worker_output_cache_active_flushed_items counter +windows_iis_worker_output_cache_active_flushed_items{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_cache_flushes_total Total number of flushes of output cache (since service startup) +# TYPE windows_iis_worker_output_cache_flushes_total counter +windows_iis_worker_output_cache_flushes_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_cache_hits_total Total number of successful lookups in output cache (since service startup) +# TYPE windows_iis_worker_output_cache_hits_total counter +windows_iis_worker_output_cache_hits_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_cache_items Number of items current present in output cache +# TYPE windows_iis_worker_output_cache_items counter +windows_iis_worker_output_cache_items{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_cache_items_flushed_total Total number of items flushed from output cache (since service startup) +# TYPE windows_iis_worker_output_cache_items_flushed_total counter +windows_iis_worker_output_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_cache_memory_bytes Current number of bytes used by output cache +# TYPE windows_iis_worker_output_cache_memory_bytes counter +windows_iis_worker_output_cache_memory_bytes{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_output_queries_total Total number of output cache queries (hits + misses) +# TYPE windows_iis_worker_output_queries_total counter +windows_iis_worker_output_queries_total{app="DefaultAppPool",pid="880"} 4 +# HELP windows_iis_worker_request_errors_total Total number of requests that returned an error +# TYPE windows_iis_worker_request_errors_total counter +windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="401"} 0 +windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="403"} 0 +windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="404"} 1 +windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="500"} 0 +# HELP windows_iis_worker_requests_total Total number of HTTP requests served by the worker process +# TYPE windows_iis_worker_requests_total counter +windows_iis_worker_requests_total{app="DefaultAppPool",pid="880"} 3 +# HELP windows_iis_worker_threads Number of threads actively processing requests in the worker process +# TYPE windows_iis_worker_threads gauge +windows_iis_worker_threads{app="DefaultAppPool",pid="880",state="busy"} 0 +windows_iis_worker_threads{app="DefaultAppPool",pid="880",state="idle"} 0 +# HELP windows_iis_worker_uri_cache_flushes_total Total number of URI cache flushes (since service startup) +# TYPE windows_iis_worker_uri_cache_flushes_total counter +windows_iis_worker_uri_cache_flushes_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_uri_cache_hits_total Total number of successful lookups in the user-mode URI cache (since service startup) +# TYPE windows_iis_worker_uri_cache_hits_total counter +windows_iis_worker_uri_cache_hits_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_uri_cache_items Number of URI information blocks currently in the user-mode cache +# TYPE windows_iis_worker_uri_cache_items gauge +windows_iis_worker_uri_cache_items{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_uri_cache_items_flushed_total The number of URI information blocks that have been removed from the user-mode cache (since service startup) +# TYPE windows_iis_worker_uri_cache_items_flushed_total counter +windows_iis_worker_uri_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_uri_cache_items_total Total number of URI information blocks added to the user-mode cache (since service startup) +# TYPE windows_iis_worker_uri_cache_items_total counter +windows_iis_worker_uri_cache_items_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_uri_cache_queries_total Total number of uri cache queries (hits + misses) +# TYPE windows_iis_worker_uri_cache_queries_total counter +windows_iis_worker_uri_cache_queries_total{app="DefaultAppPool",pid="880"} 4 +# HELP windows_iis_worker_websocket_connection_accepted_total +# TYPE windows_iis_worker_websocket_connection_accepted_total counter +windows_iis_worker_websocket_connection_accepted_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_websocket_connection_attempts_total +# TYPE windows_iis_worker_websocket_connection_attempts_total counter +windows_iis_worker_websocket_connection_attempts_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_iis_worker_websocket_connection_rejected_total +# TYPE windows_iis_worker_websocket_connection_rejected_total counter +windows_iis_worker_websocket_connection_rejected_total{app="DefaultAppPool",pid="880"} 0 +# HELP windows_logical_disk_free_bytes Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace) +# TYPE windows_logical_disk_free_bytes gauge +windows_logical_disk_free_bytes{volume="C:"} 4.363649024e+10 +windows_logical_disk_free_bytes{volume="HarddiskVolume4"} 8.5983232e+07 +# HELP windows_logical_disk_idle_seconds_total Seconds that the disk was idle (LogicalDisk.PercentIdleTime) +# TYPE windows_logical_disk_idle_seconds_total counter +windows_logical_disk_idle_seconds_total{volume="C:"} 164591.55536549998 +windows_logical_disk_idle_seconds_total{volume="HarddiskVolume4"} 164707.1418503 +# HELP windows_logical_disk_read_bytes_total The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec) +# TYPE windows_logical_disk_read_bytes_total counter +windows_logical_disk_read_bytes_total{volume="C:"} 1.7676328448e+10 +windows_logical_disk_read_bytes_total{volume="HarddiskVolume4"} 24576 +# HELP windows_logical_disk_read_latency_seconds_total Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead) +# TYPE windows_logical_disk_read_latency_seconds_total counter +windows_logical_disk_read_latency_seconds_total{volume="C:"} 97.42094709999999 +windows_logical_disk_read_latency_seconds_total{volume="HarddiskVolume4"} 0.0008895999999999999 +# HELP windows_logical_disk_read_seconds_total Seconds that the disk was busy servicing read requests (LogicalDisk.PercentDiskReadTime) +# TYPE windows_logical_disk_read_seconds_total counter +windows_logical_disk_read_seconds_total{volume="C:"} 97.42094709999999 +windows_logical_disk_read_seconds_total{volume="HarddiskVolume4"} 0.0008895999999999999 +# HELP windows_logical_disk_read_write_latency_seconds_total Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer) +# TYPE windows_logical_disk_read_write_latency_seconds_total counter +windows_logical_disk_read_write_latency_seconds_total{volume="C:"} 221.3335836 +windows_logical_disk_read_write_latency_seconds_total{volume="HarddiskVolume4"} 0.0031135 +# HELP windows_logical_disk_reads_total The number of read operations on the disk (LogicalDisk.DiskReadsPerSec) +# TYPE windows_logical_disk_reads_total counter +windows_logical_disk_reads_total{volume="C:"} 350593 +windows_logical_disk_reads_total{volume="HarddiskVolume4"} 6 +# HELP windows_logical_disk_requests_queued The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength) +# TYPE windows_logical_disk_requests_queued gauge +windows_logical_disk_requests_queued{volume="C:"} 0 +windows_logical_disk_requests_queued{volume="HarddiskVolume4"} 0 +# HELP windows_logical_disk_size_bytes Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base) +# TYPE windows_logical_disk_size_bytes gauge +windows_logical_disk_size_bytes{volume="C:"} 6.7938287616e+10 +windows_logical_disk_size_bytes{volume="HarddiskVolume4"} 6.54311424e+08 +# HELP windows_logical_disk_split_ios_total The number of I/Os to the disk were split into multiple I/Os (LogicalDisk.SplitIOPerSec) +# TYPE windows_logical_disk_split_ios_total counter +windows_logical_disk_split_ios_total{volume="C:"} 37836 +windows_logical_disk_split_ios_total{volume="HarddiskVolume4"} 0 +# HELP windows_logical_disk_write_bytes_total The number of bytes transferred to the disk during write operations (LogicalDisk.DiskWriteBytesPerSec) +# TYPE windows_logical_disk_write_bytes_total counter +windows_logical_disk_write_bytes_total{volume="C:"} 9.135282688e+09 +windows_logical_disk_write_bytes_total{volume="HarddiskVolume4"} 53248 +# HELP windows_logical_disk_write_latency_seconds_total Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite) +# TYPE windows_logical_disk_write_latency_seconds_total counter +windows_logical_disk_write_latency_seconds_total{volume="C:"} 123.91263649999999 +windows_logical_disk_write_latency_seconds_total{volume="HarddiskVolume4"} 0.0022239 +# HELP windows_logical_disk_write_seconds_total Seconds that the disk was busy servicing write requests (LogicalDisk.PercentDiskWriteTime) +# TYPE windows_logical_disk_write_seconds_total counter +windows_logical_disk_write_seconds_total{volume="C:"} 123.91263649999999 +windows_logical_disk_write_seconds_total{volume="HarddiskVolume4"} 0.0022239 +# HELP windows_logical_disk_writes_total The number of write operations on the disk (LogicalDisk.DiskWritesPerSec) +# TYPE windows_logical_disk_writes_total counter +windows_logical_disk_writes_total{volume="C:"} 450705 +windows_logical_disk_writes_total{volume="HarddiskVolume4"} 11 +# HELP windows_logon_logon_type Number of active logon sessions (LogonSession.LogonType) +# TYPE windows_logon_logon_type gauge +windows_logon_logon_type{status="batch"} 0 +windows_logon_logon_type{status="cached_interactive"} 0 +windows_logon_logon_type{status="cached_remote_interactive"} 0 +windows_logon_logon_type{status="cached_unlock"} 0 +windows_logon_logon_type{status="interactive"} 2 +windows_logon_logon_type{status="network"} 0 +windows_logon_logon_type{status="network_clear_text"} 0 +windows_logon_logon_type{status="new_credentials"} 0 +windows_logon_logon_type{status="proxy"} 0 +windows_logon_logon_type{status="remote_interactive"} 0 +windows_logon_logon_type{status="service"} 0 +windows_logon_logon_type{status="system"} 0 +windows_logon_logon_type{status="unlock"} 0 +# HELP windows_memory_available_bytes The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists (AvailableBytes) +# TYPE windows_memory_available_bytes gauge +windows_memory_available_bytes 1.3799424e+09 +# HELP windows_memory_cache_bytes (CacheBytes) +# TYPE windows_memory_cache_bytes gauge +windows_memory_cache_bytes 1.70774528e+08 +# HELP windows_memory_cache_bytes_peak (CacheBytesPeak) +# TYPE windows_memory_cache_bytes_peak gauge +windows_memory_cache_bytes_peak 2.08621568e+08 +# HELP windows_memory_cache_faults_total Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) or from disk (hard fault) (Cache Faults/sec) +# TYPE windows_memory_cache_faults_total counter +windows_memory_cache_faults_total 8.009603e+06 +# HELP windows_memory_commit_limit (CommitLimit) +# TYPE windows_memory_commit_limit gauge +windows_memory_commit_limit 5.733113856e+09 +# HELP windows_memory_committed_bytes (CommittedBytes) +# TYPE windows_memory_committed_bytes gauge +windows_memory_committed_bytes 3.44743936e+09 +# HELP windows_memory_demand_zero_faults_total The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec) +# TYPE windows_memory_demand_zero_faults_total counter +windows_memory_demand_zero_faults_total 1.02505136e+08 +# HELP windows_memory_free_and_zero_page_list_bytes The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately available for allocation to a process or for system use (FreeAndZeroPageListBytes) +# TYPE windows_memory_free_and_zero_page_list_bytes gauge +windows_memory_free_and_zero_page_list_bytes 2.0410368e+07 +# HELP windows_memory_free_system_page_table_entries (FreeSystemPageTableEntries) +# TYPE windows_memory_free_system_page_table_entries gauge +windows_memory_free_system_page_table_entries 1.6722559e+07 +# HELP windows_memory_modified_page_list_bytes The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (ModifiedPageListBytes) +# TYPE windows_memory_modified_page_list_bytes gauge +windows_memory_modified_page_list_bytes 3.2653312e+07 +# HELP windows_memory_page_faults_total Overall rate at which faulted pages are handled by the processor (Page Faults/sec) +# TYPE windows_memory_page_faults_total counter +windows_memory_page_faults_total 1.19093924e+08 +# HELP windows_memory_pool_nonpaged_allocs_total The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs) +# TYPE windows_memory_pool_nonpaged_allocs_total gauge +windows_memory_pool_nonpaged_allocs_total 0 +# HELP windows_memory_pool_nonpaged_bytes Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated (PoolNonpagedBytes) +# TYPE windows_memory_pool_nonpaged_bytes gauge +windows_memory_pool_nonpaged_bytes 1.26865408e+08 +# HELP windows_memory_pool_paged_allocs_total Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs) +# TYPE windows_memory_pool_paged_allocs_total counter +windows_memory_pool_paged_allocs_total 0 +# HELP windows_memory_pool_paged_bytes (PoolPagedBytes) +# TYPE windows_memory_pool_paged_bytes gauge +windows_memory_pool_paged_bytes 3.03906816e+08 +# HELP windows_memory_pool_paged_resident_bytes The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes) +# TYPE windows_memory_pool_paged_resident_bytes gauge +windows_memory_pool_paged_resident_bytes 2.94293504e+08 +# HELP windows_memory_standby_cache_core_bytes The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes) +# TYPE windows_memory_standby_cache_core_bytes gauge +windows_memory_standby_cache_core_bytes 1.0737664e+08 +# HELP windows_memory_standby_cache_normal_priority_bytes The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes) +# TYPE windows_memory_standby_cache_normal_priority_bytes gauge +windows_memory_standby_cache_normal_priority_bytes 1.019121664e+09 +# HELP windows_memory_standby_cache_reserve_bytes The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes) +# TYPE windows_memory_standby_cache_reserve_bytes gauge +windows_memory_standby_cache_reserve_bytes 2.33033728e+08 +# HELP windows_memory_swap_page_operations_total Total number of swap page read and writes (PagesPersec) +# TYPE windows_memory_swap_page_operations_total counter +windows_memory_swap_page_operations_total 4.956175e+06 +# HELP windows_memory_swap_page_reads_total Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec) +# TYPE windows_memory_swap_page_reads_total counter +windows_memory_swap_page_reads_total 402087 +# HELP windows_memory_swap_page_writes_total Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec) +# TYPE windows_memory_swap_page_writes_total counter +windows_memory_swap_page_writes_total 7012 +# HELP windows_memory_swap_pages_read_total Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec) +# TYPE windows_memory_swap_pages_read_total counter +windows_memory_swap_pages_read_total 4.643279e+06 +# HELP windows_memory_swap_pages_written_total Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec) +# TYPE windows_memory_swap_pages_written_total counter +windows_memory_swap_pages_written_total 312896 +# HELP windows_memory_system_cache_resident_bytes The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes) +# TYPE windows_memory_system_cache_resident_bytes gauge +windows_memory_system_cache_resident_bytes 1.70774528e+08 +# HELP windows_memory_system_code_resident_bytes The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes) +# TYPE windows_memory_system_code_resident_bytes gauge +windows_memory_system_code_resident_bytes 1.71008e+07 +# HELP windows_memory_system_code_total_bytes The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes) +# TYPE windows_memory_system_code_total_bytes gauge +windows_memory_system_code_total_bytes 8192 +# HELP windows_memory_system_driver_resident_bytes The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes) +# TYPE windows_memory_system_driver_resident_bytes gauge +windows_memory_system_driver_resident_bytes 4.6092288e+07 +# HELP windows_memory_system_driver_total_bytes The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes) +# TYPE windows_memory_system_driver_total_bytes gauge +windows_memory_system_driver_total_bytes 1.8731008e+07 +# HELP windows_memory_transition_faults_total Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec) +# TYPE windows_memory_transition_faults_total counter +windows_memory_transition_faults_total 2.7183527e+07 +# HELP windows_memory_transition_pages_repurposed_total Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec) +# TYPE windows_memory_transition_pages_repurposed_total counter +windows_memory_transition_pages_repurposed_total 2.856471e+06 +# HELP windows_memory_write_copies_total The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec) +# TYPE windows_memory_write_copies_total counter +windows_memory_write_copies_total 1.194039e+06 +# HELP windows_mssql_accessmethods_au_batch_cleanup_failures (AccessMethods.FailedAUcleanupbatches) +# TYPE windows_mssql_accessmethods_au_batch_cleanup_failures counter +windows_mssql_accessmethods_au_batch_cleanup_failures{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_au_batch_cleanups (AccessMethods.AUcleanupbatches) +# TYPE windows_mssql_accessmethods_au_batch_cleanups counter +windows_mssql_accessmethods_au_batch_cleanups{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_au_cleanups (AccessMethods.AUcleanups) +# TYPE windows_mssql_accessmethods_au_cleanups counter +windows_mssql_accessmethods_au_cleanups{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_by_reference_lob_creates (AccessMethods.ByreferenceLobCreateCount) +# TYPE windows_mssql_accessmethods_by_reference_lob_creates counter +windows_mssql_accessmethods_by_reference_lob_creates{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_by_reference_lob_uses (AccessMethods.ByreferenceLobUseCount) +# TYPE windows_mssql_accessmethods_by_reference_lob_uses counter +windows_mssql_accessmethods_by_reference_lob_uses{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_column_value_pulls (AccessMethods.CountPullInRow) +# TYPE windows_mssql_accessmethods_column_value_pulls counter +windows_mssql_accessmethods_column_value_pulls{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_column_value_pushes (AccessMethods.CountPushOffRow) +# TYPE windows_mssql_accessmethods_column_value_pushes counter +windows_mssql_accessmethods_column_value_pushes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_deferred_dropped_aus (AccessMethods.DeferreddroppedAUs) +# TYPE windows_mssql_accessmethods_deferred_dropped_aus gauge +windows_mssql_accessmethods_deferred_dropped_aus{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_deferred_dropped_rowsets (AccessMethods.DeferredDroppedrowsets) +# TYPE windows_mssql_accessmethods_deferred_dropped_rowsets gauge +windows_mssql_accessmethods_deferred_dropped_rowsets{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_dropped_rowset_cleanups (AccessMethods.Droppedrowsetcleanups) +# TYPE windows_mssql_accessmethods_dropped_rowset_cleanups counter +windows_mssql_accessmethods_dropped_rowset_cleanups{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_dropped_rowset_skips (AccessMethods.Droppedrowsetsskipped) +# TYPE windows_mssql_accessmethods_dropped_rowset_skips counter +windows_mssql_accessmethods_dropped_rowset_skips{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_extent_allocations (AccessMethods.ExtentsAllocated) +# TYPE windows_mssql_accessmethods_extent_allocations counter +windows_mssql_accessmethods_extent_allocations{mssql_instance="SQLEXPRESS"} 16 +# HELP windows_mssql_accessmethods_extent_deallocations (AccessMethods.ExtentDeallocations) +# TYPE windows_mssql_accessmethods_extent_deallocations counter +windows_mssql_accessmethods_extent_deallocations{mssql_instance="SQLEXPRESS"} 3 +# HELP windows_mssql_accessmethods_forwarded_records (AccessMethods.ForwardedRecords) +# TYPE windows_mssql_accessmethods_forwarded_records counter +windows_mssql_accessmethods_forwarded_records{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_free_space_page_fetches (AccessMethods.FreeSpacePageFetches) +# TYPE windows_mssql_accessmethods_free_space_page_fetches counter +windows_mssql_accessmethods_free_space_page_fetches{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_free_space_scans (AccessMethods.FreeSpaceScans) +# TYPE windows_mssql_accessmethods_free_space_scans counter +windows_mssql_accessmethods_free_space_scans{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_full_scans (AccessMethods.FullScans) +# TYPE windows_mssql_accessmethods_full_scans counter +windows_mssql_accessmethods_full_scans{mssql_instance="SQLEXPRESS"} 8743 +# HELP windows_mssql_accessmethods_ghost_record_skips (AccessMethods.SkippedGhostedRecordsPersec) +# TYPE windows_mssql_accessmethods_ghost_record_skips counter +windows_mssql_accessmethods_ghost_record_skips{mssql_instance="SQLEXPRESS"} 20 +# HELP windows_mssql_accessmethods_index_searches (AccessMethods.IndexSearches) +# TYPE windows_mssql_accessmethods_index_searches counter +windows_mssql_accessmethods_index_searches{mssql_instance="SQLEXPRESS"} 843808 +# HELP windows_mssql_accessmethods_insysxact_waits (AccessMethods.InSysXactwaits) +# TYPE windows_mssql_accessmethods_insysxact_waits counter +windows_mssql_accessmethods_insysxact_waits{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_leaf_page_cookie_failures (AccessMethods.Failedleafpagecookie) +# TYPE windows_mssql_accessmethods_leaf_page_cookie_failures counter +windows_mssql_accessmethods_leaf_page_cookie_failures{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_leaf_page_cookie_uses (AccessMethods.Usedleafpagecookie) +# TYPE windows_mssql_accessmethods_leaf_page_cookie_uses counter +windows_mssql_accessmethods_leaf_page_cookie_uses{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_lob_handle_creates (AccessMethods.LobHandleCreateCount) +# TYPE windows_mssql_accessmethods_lob_handle_creates counter +windows_mssql_accessmethods_lob_handle_creates{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_lob_handle_destroys (AccessMethods.LobHandleDestroyCount) +# TYPE windows_mssql_accessmethods_lob_handle_destroys counter +windows_mssql_accessmethods_lob_handle_destroys{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_lob_read_aheads (AccessMethods.CountLobReadahead) +# TYPE windows_mssql_accessmethods_lob_read_aheads counter +windows_mssql_accessmethods_lob_read_aheads{mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_accessmethods_lob_ss_provider_creates (AccessMethods.LobSSProviderCreateCount) +# TYPE windows_mssql_accessmethods_lob_ss_provider_creates counter +windows_mssql_accessmethods_lob_ss_provider_creates{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_lob_ss_provider_destroys (AccessMethods.LobSSProviderDestroyCount) +# TYPE windows_mssql_accessmethods_lob_ss_provider_destroys counter +windows_mssql_accessmethods_lob_ss_provider_destroys{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_lob_ss_provider_truncations (AccessMethods.LobSSProviderTruncationCount) +# TYPE windows_mssql_accessmethods_lob_ss_provider_truncations counter +windows_mssql_accessmethods_lob_ss_provider_truncations{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_mixed_page_allocations (AccessMethods.MixedpageallocationsPersec) +# TYPE windows_mssql_accessmethods_mixed_page_allocations counter +windows_mssql_accessmethods_mixed_page_allocations{mssql_instance="SQLEXPRESS"} 66 +# HELP windows_mssql_accessmethods_page_allocations (AccessMethods.PagesAllocatedPersec) +# TYPE windows_mssql_accessmethods_page_allocations counter +windows_mssql_accessmethods_page_allocations{mssql_instance="SQLEXPRESS"} 83 +# HELP windows_mssql_accessmethods_page_compression_attempts (AccessMethods.PagecompressionattemptsPersec) +# TYPE windows_mssql_accessmethods_page_compression_attempts counter +windows_mssql_accessmethods_page_compression_attempts{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_page_compressions (AccessMethods.PagescompressedPersec) +# TYPE windows_mssql_accessmethods_page_compressions counter +windows_mssql_accessmethods_page_compressions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_page_deallocations (AccessMethods.PageDeallocationsPersec) +# TYPE windows_mssql_accessmethods_page_deallocations counter +windows_mssql_accessmethods_page_deallocations{mssql_instance="SQLEXPRESS"} 60 +# HELP windows_mssql_accessmethods_page_splits (AccessMethods.PageSplitsPersec) +# TYPE windows_mssql_accessmethods_page_splits counter +windows_mssql_accessmethods_page_splits{mssql_instance="SQLEXPRESS"} 429 +# HELP windows_mssql_accessmethods_probe_scans (AccessMethods.ProbeScansPersec) +# TYPE windows_mssql_accessmethods_probe_scans counter +windows_mssql_accessmethods_probe_scans{mssql_instance="SQLEXPRESS"} 217563 +# HELP windows_mssql_accessmethods_range_scans (AccessMethods.RangeScansPersec) +# TYPE windows_mssql_accessmethods_range_scans counter +windows_mssql_accessmethods_range_scans{mssql_instance="SQLEXPRESS"} 590779 +# HELP windows_mssql_accessmethods_scan_point_revalidations (AccessMethods.ScanPointRevalidationsPersec) +# TYPE windows_mssql_accessmethods_scan_point_revalidations counter +windows_mssql_accessmethods_scan_point_revalidations{mssql_instance="SQLEXPRESS"} 5 +# HELP windows_mssql_accessmethods_table_lock_escalations (AccessMethods.TableLockEscalationsPersec) +# TYPE windows_mssql_accessmethods_table_lock_escalations counter +windows_mssql_accessmethods_table_lock_escalations{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_tree_page_cookie_failures (AccessMethods.Failedtreepagecookie) +# TYPE windows_mssql_accessmethods_tree_page_cookie_failures counter +windows_mssql_accessmethods_tree_page_cookie_failures{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_tree_page_cookie_uses (AccessMethods.Usedtreepagecookie) +# TYPE windows_mssql_accessmethods_tree_page_cookie_uses counter +windows_mssql_accessmethods_tree_page_cookie_uses{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_accessmethods_workfile_creates (AccessMethods.WorkfilesCreatedPersec) +# TYPE windows_mssql_accessmethods_workfile_creates counter +windows_mssql_accessmethods_workfile_creates{mssql_instance="SQLEXPRESS"} 96 +# HELP windows_mssql_accessmethods_worktables_creates (AccessMethods.WorktablesCreatedPersec) +# TYPE windows_mssql_accessmethods_worktables_creates counter +windows_mssql_accessmethods_worktables_creates{mssql_instance="SQLEXPRESS"} 557 +# HELP windows_mssql_accessmethods_worktables_from_cache_hits (AccessMethods.WorktablesFromCacheRatio) +# TYPE windows_mssql_accessmethods_worktables_from_cache_hits counter +windows_mssql_accessmethods_worktables_from_cache_hits{mssql_instance="SQLEXPRESS"} 357 +# HELP windows_mssql_accessmethods_worktables_from_cache_lookups (AccessMethods.WorktablesFromCacheRatio_Base) +# TYPE windows_mssql_accessmethods_worktables_from_cache_lookups counter +windows_mssql_accessmethods_worktables_from_cache_lookups{mssql_instance="SQLEXPRESS"} 364 +# HELP windows_mssql_bufman_background_writer_pages (BufferManager.Backgroundwriterpages) +# TYPE windows_mssql_bufman_background_writer_pages counter +windows_mssql_bufman_background_writer_pages{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_buffer_cache_hits (BufferManager.Buffercachehitratio) +# TYPE windows_mssql_bufman_buffer_cache_hits gauge +windows_mssql_bufman_buffer_cache_hits{mssql_instance="SQLEXPRESS"} 86 +# HELP windows_mssql_bufman_buffer_cache_lookups (BufferManager.Buffercachehitratio_Base) +# TYPE windows_mssql_bufman_buffer_cache_lookups gauge +windows_mssql_bufman_buffer_cache_lookups{mssql_instance="SQLEXPRESS"} 86 +# HELP windows_mssql_bufman_checkpoint_pages (BufferManager.Checkpointpages) +# TYPE windows_mssql_bufman_checkpoint_pages counter +windows_mssql_bufman_checkpoint_pages{mssql_instance="SQLEXPRESS"} 82 +# HELP windows_mssql_bufman_database_pages (BufferManager.Databasepages) +# TYPE windows_mssql_bufman_database_pages gauge +windows_mssql_bufman_database_pages{mssql_instance="SQLEXPRESS"} 829 +# HELP windows_mssql_bufman_extension_allocated_pages (BufferManager.Extensionallocatedpages) +# TYPE windows_mssql_bufman_extension_allocated_pages gauge +windows_mssql_bufman_extension_allocated_pages{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_free_pages (BufferManager.Extensionfreepages) +# TYPE windows_mssql_bufman_extension_free_pages gauge +windows_mssql_bufman_extension_free_pages{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_in_use_as_percentage (BufferManager.Extensioninuseaspercentage) +# TYPE windows_mssql_bufman_extension_in_use_as_percentage gauge +windows_mssql_bufman_extension_in_use_as_percentage{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_outstanding_io (BufferManager.ExtensionoutstandingIOcounter) +# TYPE windows_mssql_bufman_extension_outstanding_io gauge +windows_mssql_bufman_extension_outstanding_io{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_page_evictions (BufferManager.Extensionpageevictions) +# TYPE windows_mssql_bufman_extension_page_evictions counter +windows_mssql_bufman_extension_page_evictions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_page_reads (BufferManager.Extensionpagereads) +# TYPE windows_mssql_bufman_extension_page_reads counter +windows_mssql_bufman_extension_page_reads{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_page_unreferenced_seconds (BufferManager.Extensionpageunreferencedtime) +# TYPE windows_mssql_bufman_extension_page_unreferenced_seconds gauge +windows_mssql_bufman_extension_page_unreferenced_seconds{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_extension_page_writes (BufferManager.Extensionpagewrites) +# TYPE windows_mssql_bufman_extension_page_writes counter +windows_mssql_bufman_extension_page_writes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_free_list_stalls (BufferManager.Freeliststalls) +# TYPE windows_mssql_bufman_free_list_stalls counter +windows_mssql_bufman_free_list_stalls{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_integral_controller_slope (BufferManager.IntegralControllerSlope) +# TYPE windows_mssql_bufman_integral_controller_slope gauge +windows_mssql_bufman_integral_controller_slope{mssql_instance="SQLEXPRESS"} 10 +# HELP windows_mssql_bufman_lazywrites (BufferManager.Lazywrites) +# TYPE windows_mssql_bufman_lazywrites counter +windows_mssql_bufman_lazywrites{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_bufman_page_life_expectancy_seconds (BufferManager.Pagelifeexpectancy) +# TYPE windows_mssql_bufman_page_life_expectancy_seconds gauge +windows_mssql_bufman_page_life_expectancy_seconds{mssql_instance="SQLEXPRESS"} 191350 +# HELP windows_mssql_bufman_page_lookups (BufferManager.Pagelookups) +# TYPE windows_mssql_bufman_page_lookups counter +windows_mssql_bufman_page_lookups{mssql_instance="SQLEXPRESS"} 1.699668e+06 +# HELP windows_mssql_bufman_page_reads (BufferManager.Pagereads) +# TYPE windows_mssql_bufman_page_reads counter +windows_mssql_bufman_page_reads{mssql_instance="SQLEXPRESS"} 797 +# HELP windows_mssql_bufman_page_writes (BufferManager.Pagewrites) +# TYPE windows_mssql_bufman_page_writes counter +windows_mssql_bufman_page_writes{mssql_instance="SQLEXPRESS"} 92 +# HELP windows_mssql_bufman_read_ahead_issuing_seconds (BufferManager.Readaheadtime) +# TYPE windows_mssql_bufman_read_ahead_issuing_seconds counter +windows_mssql_bufman_read_ahead_issuing_seconds{mssql_instance="SQLEXPRESS"} 1292 +# HELP windows_mssql_bufman_read_ahead_pages (BufferManager.Readaheadpages) +# TYPE windows_mssql_bufman_read_ahead_pages counter +windows_mssql_bufman_read_ahead_pages{mssql_instance="SQLEXPRESS"} 94 +# HELP windows_mssql_bufman_target_pages (BufferManager.Targetpages) +# TYPE windows_mssql_bufman_target_pages gauge +windows_mssql_bufman_target_pages{mssql_instance="SQLEXPRESS"} 180480 +# HELP windows_mssql_collector_duration_seconds windows_exporter: Duration of an mssql child collection. +# TYPE windows_mssql_collector_duration_seconds gauge +windows_mssql_collector_duration_seconds{collector="accessmethods",mssql_instance="SQLEXPRESS"} 0.0009723 +windows_mssql_collector_duration_seconds{collector="availreplica",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="bufman",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="databases",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="dbreplica",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="genstats",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="locks",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="memmgr",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="sqlerrors",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="sqlstats",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="transactions",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_collector_duration_seconds{collector="waitstats",mssql_instance="SQLEXPRESS"} 0.0012212 +# HELP windows_mssql_collector_success windows_exporter: Whether a mssql child collector was successful. +# TYPE windows_mssql_collector_success gauge +windows_mssql_collector_success{collector="accessmethods",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="availreplica",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="bufman",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="databases",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="dbreplica",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="genstats",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="locks",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="memmgr",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="sqlerrors",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="sqlstats",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="transactions",mssql_instance="SQLEXPRESS"} 1 +windows_mssql_collector_success{collector="waitstats",mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_databases_active_parallel_redo_threads (Databases.ActiveParallelredothreads) +# TYPE windows_mssql_databases_active_parallel_redo_threads gauge +windows_mssql_databases_active_parallel_redo_threads{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_parallel_redo_threads{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_parallel_redo_threads{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_parallel_redo_threads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_parallel_redo_threads{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_active_transactions (Databases.ActiveTransactions) +# TYPE windows_mssql_databases_active_transactions gauge +windows_mssql_databases_active_transactions{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_transactions{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_active_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_backup_restore_operations (Databases.BackupPerRestoreThroughput) +# TYPE windows_mssql_databases_backup_restore_operations counter +windows_mssql_databases_backup_restore_operations{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_backup_restore_operations{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_backup_restore_operations{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_backup_restore_operations{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_backup_restore_operations{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_bulk_copy_bytes (Databases.BulkCopyThroughput) +# TYPE windows_mssql_databases_bulk_copy_bytes counter +windows_mssql_databases_bulk_copy_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_bulk_copy_rows (Databases.BulkCopyRows) +# TYPE windows_mssql_databases_bulk_copy_rows counter +windows_mssql_databases_bulk_copy_rows{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_rows{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_rows{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_rows{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_bulk_copy_rows{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_commit_table_entries (Databases.Committableentries) +# TYPE windows_mssql_databases_commit_table_entries gauge +windows_mssql_databases_commit_table_entries{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_commit_table_entries{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_commit_table_entries{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_commit_table_entries{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_commit_table_entries{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_data_files_size_bytes (Databases.DataFilesSizeKB) +# TYPE windows_mssql_databases_data_files_size_bytes gauge +windows_mssql_databases_data_files_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 4.653056e+06 +windows_mssql_databases_data_files_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 8.388608e+06 +windows_mssql_databases_data_files_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 1.5466496e+07 +windows_mssql_databases_data_files_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 4.194304e+07 +windows_mssql_databases_data_files_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 8.388608e+06 +# HELP windows_mssql_databases_dbcc_logical_scan_bytes (Databases.DBCCLogicalScanBytes) +# TYPE windows_mssql_databases_dbcc_logical_scan_bytes counter +windows_mssql_databases_dbcc_logical_scan_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_dbcc_logical_scan_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_dbcc_logical_scan_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_dbcc_logical_scan_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_dbcc_logical_scan_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_group_commit_stall_seconds (Databases.GroupCommitTime) +# TYPE windows_mssql_databases_group_commit_stall_seconds counter +windows_mssql_databases_group_commit_stall_seconds{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_group_commit_stall_seconds{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_group_commit_stall_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_group_commit_stall_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_group_commit_stall_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_cache_hits (Databases.LogCacheHitRatio) +# TYPE windows_mssql_databases_log_cache_hits gauge +windows_mssql_databases_log_cache_hits{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_hits{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_hits{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_hits{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_hits{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_cache_lookups (Databases.LogCacheHitRatio_Base) +# TYPE windows_mssql_databases_log_cache_lookups gauge +windows_mssql_databases_log_cache_lookups{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_lookups{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_lookups{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_lookups{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_lookups{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_cache_reads (Databases.LogCacheReads) +# TYPE windows_mssql_databases_log_cache_reads counter +windows_mssql_databases_log_cache_reads{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_reads{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_reads{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_reads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_cache_reads{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_files_size_bytes (Databases.LogFilesSizeKB) +# TYPE windows_mssql_databases_log_files_size_bytes gauge +windows_mssql_databases_log_files_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 2.08896e+06 +windows_mssql_databases_log_files_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 8.380416e+06 +windows_mssql_databases_log_files_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 778240 +windows_mssql_databases_log_files_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 1.302528e+06 +windows_mssql_databases_log_files_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 8.380416e+06 +# HELP windows_mssql_databases_log_files_used_size_bytes (Databases.LogFilesUsedSizeKB) +# TYPE windows_mssql_databases_log_files_used_size_bytes gauge +windows_mssql_databases_log_files_used_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 1.210368e+06 +windows_mssql_databases_log_files_used_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 585728 +windows_mssql_databases_log_files_used_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 532480 +windows_mssql_databases_log_files_used_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 637952 +windows_mssql_databases_log_files_used_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 565248 +# HELP windows_mssql_databases_log_flush_wait_seconds (Databases.LogFlushWaitTime) +# TYPE windows_mssql_databases_log_flush_wait_seconds gauge +windows_mssql_databases_log_flush_wait_seconds{database="master",mssql_instance="SQLEXPRESS"} 0.226 +windows_mssql_databases_log_flush_wait_seconds{database="model",mssql_instance="SQLEXPRESS"} 0.002 +windows_mssql_databases_log_flush_wait_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_wait_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_wait_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_flush_waits (Databases.LogFlushWaits) +# TYPE windows_mssql_databases_log_flush_waits counter +windows_mssql_databases_log_flush_waits{database="master",mssql_instance="SQLEXPRESS"} 245 +windows_mssql_databases_log_flush_waits{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_flush_waits{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_waits{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_waits{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_flush_write_seconds (Databases.LogFlushWriteTimems) +# TYPE windows_mssql_databases_log_flush_write_seconds gauge +windows_mssql_databases_log_flush_write_seconds{database="master",mssql_instance="SQLEXPRESS"} 0.164 +windows_mssql_databases_log_flush_write_seconds{database="model",mssql_instance="SQLEXPRESS"} 0.002 +windows_mssql_databases_log_flush_write_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_write_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flush_write_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0.002 +# HELP windows_mssql_databases_log_flushed_bytes (Databases.LogBytesFlushed) +# TYPE windows_mssql_databases_log_flushed_bytes counter +windows_mssql_databases_log_flushed_bytes{database="master",mssql_instance="SQLEXPRESS"} 3.702784e+06 +windows_mssql_databases_log_flushed_bytes{database="model",mssql_instance="SQLEXPRESS"} 12288 +windows_mssql_databases_log_flushed_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flushed_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flushed_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 118784 +# HELP windows_mssql_databases_log_flushes (Databases.LogFlushes) +# TYPE windows_mssql_databases_log_flushes counter +windows_mssql_databases_log_flushes{database="master",mssql_instance="SQLEXPRESS"} 252 +windows_mssql_databases_log_flushes{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_flushes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_flushes{database="tempdb",mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_databases_log_growths (Databases.LogGrowths) +# TYPE windows_mssql_databases_log_growths gauge +windows_mssql_databases_log_growths{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_growths{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_growths{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_growths{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_growths{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_cache_misses (Databases.LogPoolCacheMisses) +# TYPE windows_mssql_databases_log_pool_cache_misses counter +windows_mssql_databases_log_pool_cache_misses{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_cache_misses{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_cache_misses{database="msdb",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_cache_misses{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_cache_misses{database="tempdb",mssql_instance="SQLEXPRESS"} 3 +# HELP windows_mssql_databases_log_pool_disk_reads (Databases.LogPoolDiskReads) +# TYPE windows_mssql_databases_log_pool_disk_reads counter +windows_mssql_databases_log_pool_disk_reads{database="master",mssql_instance="SQLEXPRESS"} 2 +windows_mssql_databases_log_pool_disk_reads{database="model",mssql_instance="SQLEXPRESS"} 2 +windows_mssql_databases_log_pool_disk_reads{database="msdb",mssql_instance="SQLEXPRESS"} 2 +windows_mssql_databases_log_pool_disk_reads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_disk_reads{database="tempdb",mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_databases_log_pool_empty_free_pool_pushes (Databases.LogPoolPushEmptyFreePool) +# TYPE windows_mssql_databases_log_pool_empty_free_pool_pushes counter +windows_mssql_databases_log_pool_empty_free_pool_pushes{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_empty_free_pool_pushes{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_empty_free_pool_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_empty_free_pool_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_empty_free_pool_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_databases_log_pool_hash_deletes (Databases.LogPoolHashDeletes) +# TYPE windows_mssql_databases_log_pool_hash_deletes counter +windows_mssql_databases_log_pool_hash_deletes{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_deletes{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_deletes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_hash_deletes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_hash_deletes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_hash_inserts (Databases.LogPoolHashInserts) +# TYPE windows_mssql_databases_log_pool_hash_inserts counter +windows_mssql_databases_log_pool_hash_inserts{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_inserts{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_inserts{database="msdb",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_inserts{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_hash_inserts{database="tempdb",mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_databases_log_pool_invalid_hash_entries (Databases.LogPoolInvalidHashEntry) +# TYPE windows_mssql_databases_log_pool_invalid_hash_entries counter +windows_mssql_databases_log_pool_invalid_hash_entries{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_invalid_hash_entries{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_invalid_hash_entries{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_invalid_hash_entries{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_invalid_hash_entries{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_log_scan_pushes (Databases.LogPoolLogScanPushes) +# TYPE windows_mssql_databases_log_pool_log_scan_pushes counter +windows_mssql_databases_log_pool_log_scan_pushes{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_log_scan_pushes{database="model",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_log_scan_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_log_scan_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_pool_log_scan_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_databases_log_pool_log_writer_pushes (Databases.LogPoolLogWriterPushes) +# TYPE windows_mssql_databases_log_pool_log_writer_pushes counter +windows_mssql_databases_log_pool_log_writer_pushes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_log_writer_pushes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_log_writer_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_log_writer_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_log_writer_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_low_memory_pushes (Databases.LogPoolPushLowMemory) +# TYPE windows_mssql_databases_log_pool_low_memory_pushes counter +windows_mssql_databases_log_pool_low_memory_pushes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_low_memory_pushes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_low_memory_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_low_memory_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_low_memory_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_no_free_buffer_pushes (Databases.LogPoolPushNoFreeBuffer) +# TYPE windows_mssql_databases_log_pool_no_free_buffer_pushes counter +windows_mssql_databases_log_pool_no_free_buffer_pushes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_no_free_buffer_pushes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_no_free_buffer_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_no_free_buffer_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_no_free_buffer_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_req_behind_trunc (Databases.LogPoolReqBehindTrunc) +# TYPE windows_mssql_databases_log_pool_req_behind_trunc counter +windows_mssql_databases_log_pool_req_behind_trunc{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_req_behind_trunc{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_req_behind_trunc{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_req_behind_trunc{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_req_behind_trunc{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_pool_requests (Databases.LogPoolRequests) +# TYPE windows_mssql_databases_log_pool_requests counter +windows_mssql_databases_log_pool_requests{database="master",mssql_instance="SQLEXPRESS"} 8 +windows_mssql_databases_log_pool_requests{database="model",mssql_instance="SQLEXPRESS"} 8 +windows_mssql_databases_log_pool_requests{database="msdb",mssql_instance="SQLEXPRESS"} 8 +windows_mssql_databases_log_pool_requests{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 8 +windows_mssql_databases_log_pool_requests{database="tempdb",mssql_instance="SQLEXPRESS"} 4 +# HELP windows_mssql_databases_log_pool_requests_old_vlf (Databases.LogPoolRequestsOldVLF) +# TYPE windows_mssql_databases_log_pool_requests_old_vlf counter +windows_mssql_databases_log_pool_requests_old_vlf{database="master",mssql_instance="SQLEXPRESS"} 4 +windows_mssql_databases_log_pool_requests_old_vlf{database="model",mssql_instance="SQLEXPRESS"} 4 +windows_mssql_databases_log_pool_requests_old_vlf{database="msdb",mssql_instance="SQLEXPRESS"} 4 +windows_mssql_databases_log_pool_requests_old_vlf{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 4 +windows_mssql_databases_log_pool_requests_old_vlf{database="tempdb",mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_databases_log_pool_total_active_log_bytes (Databases.LogPoolTotalActiveLogSize) +# TYPE windows_mssql_databases_log_pool_total_active_log_bytes gauge +windows_mssql_databases_log_pool_total_active_log_bytes{database="master",mssql_instance="SQLEXPRESS"} 806912 +windows_mssql_databases_log_pool_total_active_log_bytes{database="model",mssql_instance="SQLEXPRESS"} 1.855488e+06 +windows_mssql_databases_log_pool_total_active_log_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 118784 +windows_mssql_databases_log_pool_total_active_log_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 107008 +windows_mssql_databases_log_pool_total_active_log_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 2.142208e+06 +# HELP windows_mssql_databases_log_pool_total_shared_pool_bytes (Databases.LogPoolTotalSharedPoolSize) +# TYPE windows_mssql_databases_log_pool_total_shared_pool_bytes gauge +windows_mssql_databases_log_pool_total_shared_pool_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_total_shared_pool_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_pool_total_shared_pool_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 16384 +windows_mssql_databases_log_pool_total_shared_pool_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 67584 +windows_mssql_databases_log_pool_total_shared_pool_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 4096 +# HELP windows_mssql_databases_log_shrinks (Databases.LogShrinks) +# TYPE windows_mssql_databases_log_shrinks gauge +windows_mssql_databases_log_shrinks{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_shrinks{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_shrinks{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_shrinks{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_shrinks{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_truncations (Databases.LogTruncations) +# TYPE windows_mssql_databases_log_truncations gauge +windows_mssql_databases_log_truncations{database="master",mssql_instance="SQLEXPRESS"} 3 +windows_mssql_databases_log_truncations{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_truncations{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_truncations{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_log_truncations{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_log_used_percent (Databases.PercentLogUsed) +# TYPE windows_mssql_databases_log_used_percent gauge +windows_mssql_databases_log_used_percent{database="master",mssql_instance="SQLEXPRESS"} 57 +windows_mssql_databases_log_used_percent{database="model",mssql_instance="SQLEXPRESS"} 6 +windows_mssql_databases_log_used_percent{database="msdb",mssql_instance="SQLEXPRESS"} 68 +windows_mssql_databases_log_used_percent{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 49 +windows_mssql_databases_log_used_percent{database="tempdb",mssql_instance="SQLEXPRESS"} 6 +# HELP windows_mssql_databases_pending_repl_transactions (Databases.ReplPendingTransactions) +# TYPE windows_mssql_databases_pending_repl_transactions gauge +windows_mssql_databases_pending_repl_transactions{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_pending_repl_transactions{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_pending_repl_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_pending_repl_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_pending_repl_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_repl_transactions (Databases.ReplTranactions) +# TYPE windows_mssql_databases_repl_transactions counter +windows_mssql_databases_repl_transactions{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_repl_transactions{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_repl_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_repl_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_repl_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_shrink_data_movement_bytes (Databases.ShrinkDataMovementBytes) +# TYPE windows_mssql_databases_shrink_data_movement_bytes counter +windows_mssql_databases_shrink_data_movement_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_shrink_data_movement_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_shrink_data_movement_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_shrink_data_movement_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_shrink_data_movement_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_tracked_transactions (Databases.Trackedtransactions) +# TYPE windows_mssql_databases_tracked_transactions counter +windows_mssql_databases_tracked_transactions{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_tracked_transactions{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_tracked_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_tracked_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_tracked_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_transactions (Databases.Transactions) +# TYPE windows_mssql_databases_transactions counter +windows_mssql_databases_transactions{database="master",mssql_instance="SQLEXPRESS"} 2183 +windows_mssql_databases_transactions{database="model",mssql_instance="SQLEXPRESS"} 4467 +windows_mssql_databases_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 4582 +windows_mssql_databases_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 2 +windows_mssql_databases_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 1558 +# HELP windows_mssql_databases_write_transactions (Databases.WriteTransactions) +# TYPE windows_mssql_databases_write_transactions counter +windows_mssql_databases_write_transactions{database="master",mssql_instance="SQLEXPRESS"} 236 +windows_mssql_databases_write_transactions{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_write_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_write_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_write_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 29 +# HELP windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds (Databases.XTPControllerDLCLatencyPerFetch) +# TYPE windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds gauge +windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds (Databases.XTPControllerDLCPeakLatency) +# TYPE windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds gauge +windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_xtp_controller_log_processed_bytes (Databases.XTPControllerLogProcessed) +# TYPE windows_mssql_databases_xtp_controller_log_processed_bytes counter +windows_mssql_databases_xtp_controller_log_processed_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_log_processed_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_log_processed_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_log_processed_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_controller_log_processed_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_databases_xtp_memory_used_bytes (Databases.XTPMemoryUsedKB) +# TYPE windows_mssql_databases_xtp_memory_used_bytes gauge +windows_mssql_databases_xtp_memory_used_bytes{database="master",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_memory_used_bytes{database="model",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_memory_used_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_memory_used_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_databases_xtp_memory_used_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_active_temp_tables (GeneralStatistics.ActiveTempTables) +# TYPE windows_mssql_genstats_active_temp_tables gauge +windows_mssql_genstats_active_temp_tables{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_blocked_processes (GeneralStatistics.Processesblocked) +# TYPE windows_mssql_genstats_blocked_processes gauge +windows_mssql_genstats_blocked_processes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_connection_resets (GeneralStatistics.ConnectionReset) +# TYPE windows_mssql_genstats_connection_resets counter +windows_mssql_genstats_connection_resets{mssql_instance="SQLEXPRESS"} 1108 +# HELP windows_mssql_genstats_event_notifications_delayed_drop (GeneralStatistics.EventNotificationsDelayedDrop) +# TYPE windows_mssql_genstats_event_notifications_delayed_drop gauge +windows_mssql_genstats_event_notifications_delayed_drop{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_http_authenticated_requests (GeneralStatistics.HTTPAuthenticatedRequests) +# TYPE windows_mssql_genstats_http_authenticated_requests gauge +windows_mssql_genstats_http_authenticated_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_logical_connections (GeneralStatistics.LogicalConnections) +# TYPE windows_mssql_genstats_logical_connections gauge +windows_mssql_genstats_logical_connections{mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_genstats_logins (GeneralStatistics.Logins) +# TYPE windows_mssql_genstats_logins counter +windows_mssql_genstats_logins{mssql_instance="SQLEXPRESS"} 378 +# HELP windows_mssql_genstats_logouts (GeneralStatistics.Logouts) +# TYPE windows_mssql_genstats_logouts counter +windows_mssql_genstats_logouts{mssql_instance="SQLEXPRESS"} 377 +# HELP windows_mssql_genstats_mars_deadlocks (GeneralStatistics.MarsDeadlocks) +# TYPE windows_mssql_genstats_mars_deadlocks gauge +windows_mssql_genstats_mars_deadlocks{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_non_atomic_yields (GeneralStatistics.Nonatomicyields) +# TYPE windows_mssql_genstats_non_atomic_yields counter +windows_mssql_genstats_non_atomic_yields{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soap_empty_requests (GeneralStatistics.SOAPEmptyRequests) +# TYPE windows_mssql_genstats_soap_empty_requests gauge +windows_mssql_genstats_soap_empty_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soap_method_invocations (GeneralStatistics.SOAPMethodInvocations) +# TYPE windows_mssql_genstats_soap_method_invocations gauge +windows_mssql_genstats_soap_method_invocations{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soap_session_initiate_requests (GeneralStatistics.SOAPSessionInitiateRequests) +# TYPE windows_mssql_genstats_soap_session_initiate_requests gauge +windows_mssql_genstats_soap_session_initiate_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soap_session_terminate_requests (GeneralStatistics.SOAPSessionTerminateRequests) +# TYPE windows_mssql_genstats_soap_session_terminate_requests gauge +windows_mssql_genstats_soap_session_terminate_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soapsql_requests (GeneralStatistics.SOAPSQLRequests) +# TYPE windows_mssql_genstats_soapsql_requests gauge +windows_mssql_genstats_soapsql_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_soapwsdl_requests (GeneralStatistics.SOAPWSDLRequests) +# TYPE windows_mssql_genstats_soapwsdl_requests gauge +windows_mssql_genstats_soapwsdl_requests{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_sql_trace_io_provider_lock_waits (GeneralStatistics.SQLTraceIOProviderLockWaits) +# TYPE windows_mssql_genstats_sql_trace_io_provider_lock_waits gauge +windows_mssql_genstats_sql_trace_io_provider_lock_waits{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_temp_tables_awaiting_destruction (GeneralStatistics.TempTablesForDestruction) +# TYPE windows_mssql_genstats_temp_tables_awaiting_destruction gauge +windows_mssql_genstats_temp_tables_awaiting_destruction{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_temp_tables_creations (GeneralStatistics.TempTablesCreations) +# TYPE windows_mssql_genstats_temp_tables_creations counter +windows_mssql_genstats_temp_tables_creations{mssql_instance="SQLEXPRESS"} 4 +# HELP windows_mssql_genstats_tempdb_recovery_unit_ids_generated (GeneralStatistics.Tempdbrecoveryunitid) +# TYPE windows_mssql_genstats_tempdb_recovery_unit_ids_generated gauge +windows_mssql_genstats_tempdb_recovery_unit_ids_generated{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_tempdb_rowset_ids_generated (GeneralStatistics.Tempdbrowsetid) +# TYPE windows_mssql_genstats_tempdb_rowset_ids_generated gauge +windows_mssql_genstats_tempdb_rowset_ids_generated{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_trace_event_notification_queue_size (GeneralStatistics.TraceEventNotificationQueue) +# TYPE windows_mssql_genstats_trace_event_notification_queue_size gauge +windows_mssql_genstats_trace_event_notification_queue_size{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_transactions (GeneralStatistics.Transactions) +# TYPE windows_mssql_genstats_transactions gauge +windows_mssql_genstats_transactions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_genstats_user_connections (GeneralStatistics.UserConnections) +# TYPE windows_mssql_genstats_user_connections gauge +windows_mssql_genstats_user_connections{mssql_instance="SQLEXPRESS"} 1 +# HELP windows_mssql_locks_count (Locks.AverageWaitTimems_Base count of how often requests have run into locks) +# TYPE windows_mssql_locks_count gauge +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Database"} 0.002 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.001 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_deadlocks (Locks.NumberofDeadlocks) +# TYPE windows_mssql_locks_deadlocks counter +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Database"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Metadata"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_lock_requests (Locks.LockRequests) +# TYPE windows_mssql_locks_lock_requests counter +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Database"} 204467 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Extent"} 402 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="File"} 19 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="HoBT"} 28 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Key"} 1.681875e+06 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Metadata"} 25785 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Object"} 760875 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Page"} 757 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="RID"} 123 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_lock_timeouts (Locks.LockTimeouts) +# TYPE windows_mssql_locks_lock_timeouts counter +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Database"} 4 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Key"} 216 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Metadata"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_lock_timeouts_excluding_NOWAIT (Locks.LockTimeoutstimeout0) +# TYPE windows_mssql_locks_lock_timeouts_excluding_NOWAIT counter +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Database"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Metadata"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_lock_wait_seconds (Locks.LockWaitTimems) +# TYPE windows_mssql_locks_lock_wait_seconds gauge +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Database"} 0.391 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.015 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_lock_waits (Locks.LockWaits) +# TYPE windows_mssql_locks_lock_waits counter +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Database"} 2 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Metadata"} 1 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_deadlocks (Locks.NumberofDeadlocks) +# TYPE windows_mssql_locks_deadlocks counter +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Database"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Metadata"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_locks_wait_time_seconds (Locks.AverageWaitTimems Total time in seconds which locks have been holding resources) +# TYPE windows_mssql_locks_wait_time_seconds gauge +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Application"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Database"} 0.391 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Extent"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="File"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="HoBT"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Key"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.015 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="OIB"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Object"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Page"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="RID"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0 +windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Xact"} 0 +# HELP windows_mssql_memmgr_allocated_lock_blocks (MemoryManager.LockBlocksAllocated) +# TYPE windows_mssql_memmgr_allocated_lock_blocks gauge +windows_mssql_memmgr_allocated_lock_blocks{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_allocated_lock_owner_blocks (MemoryManager.LockOwnerBlocksAllocated) +# TYPE windows_mssql_memmgr_allocated_lock_owner_blocks gauge +windows_mssql_memmgr_allocated_lock_owner_blocks{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_connection_memory_bytes (MemoryManager.ConnectionMemoryKB) +# TYPE windows_mssql_memmgr_connection_memory_bytes gauge +windows_mssql_memmgr_connection_memory_bytes{mssql_instance="SQLEXPRESS"} 1.015808e+06 +# HELP windows_mssql_memmgr_database_cache_memory_bytes (MemoryManager.DatabaseCacheMemoryKB) +# TYPE windows_mssql_memmgr_database_cache_memory_bytes gauge +windows_mssql_memmgr_database_cache_memory_bytes{mssql_instance="SQLEXPRESS"} 6.791168e+06 +# HELP windows_mssql_memmgr_external_benefit_of_memory (MemoryManager.Externalbenefitofmemory) +# TYPE windows_mssql_memmgr_external_benefit_of_memory gauge +windows_mssql_memmgr_external_benefit_of_memory{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_free_memory_bytes (MemoryManager.FreeMemoryKB) +# TYPE windows_mssql_memmgr_free_memory_bytes gauge +windows_mssql_memmgr_free_memory_bytes{mssql_instance="SQLEXPRESS"} 1.9234816e+07 +# HELP windows_mssql_memmgr_granted_workspace_memory_bytes (MemoryManager.GrantedWorkspaceMemoryKB) +# TYPE windows_mssql_memmgr_granted_workspace_memory_bytes gauge +windows_mssql_memmgr_granted_workspace_memory_bytes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_lock_blocks (MemoryManager.LockBlocks) +# TYPE windows_mssql_memmgr_lock_blocks gauge +windows_mssql_memmgr_lock_blocks{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_lock_memory_bytes (MemoryManager.LockMemoryKB) +# TYPE windows_mssql_memmgr_lock_memory_bytes gauge +windows_mssql_memmgr_lock_memory_bytes{mssql_instance="SQLEXPRESS"} 663552 +# HELP windows_mssql_memmgr_lock_owner_blocks (MemoryManager.LockOwnerBlocks) +# TYPE windows_mssql_memmgr_lock_owner_blocks gauge +windows_mssql_memmgr_lock_owner_blocks{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_log_pool_memory_bytes (MemoryManager.LogPoolMemoryKB) +# TYPE windows_mssql_memmgr_log_pool_memory_bytes gauge +windows_mssql_memmgr_log_pool_memory_bytes{mssql_instance="SQLEXPRESS"} 2.834432e+06 +# HELP windows_mssql_memmgr_maximum_workspace_memory_bytes (MemoryManager.MaximumWorkspaceMemoryKB) +# TYPE windows_mssql_memmgr_maximum_workspace_memory_bytes gauge +windows_mssql_memmgr_maximum_workspace_memory_bytes{mssql_instance="SQLEXPRESS"} 1.36482816e+09 +# HELP windows_mssql_memmgr_optimizer_memory_bytes (MemoryManager.OptimizerMemoryKB) +# TYPE windows_mssql_memmgr_optimizer_memory_bytes gauge +windows_mssql_memmgr_optimizer_memory_bytes{mssql_instance="SQLEXPRESS"} 1.007616e+06 +# HELP windows_mssql_memmgr_outstanding_memory_grants (MemoryManager.MemoryGrantsOutstanding) +# TYPE windows_mssql_memmgr_outstanding_memory_grants gauge +windows_mssql_memmgr_outstanding_memory_grants{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_pending_memory_grants (MemoryManager.MemoryGrantsPending) +# TYPE windows_mssql_memmgr_pending_memory_grants gauge +windows_mssql_memmgr_pending_memory_grants{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_reserved_server_memory_bytes (MemoryManager.ReservedServerMemoryKB) +# TYPE windows_mssql_memmgr_reserved_server_memory_bytes gauge +windows_mssql_memmgr_reserved_server_memory_bytes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_memmgr_sql_cache_memory_bytes (MemoryManager.SQLCacheMemoryKB) +# TYPE windows_mssql_memmgr_sql_cache_memory_bytes gauge +windows_mssql_memmgr_sql_cache_memory_bytes{mssql_instance="SQLEXPRESS"} 1.728512e+06 +# HELP windows_mssql_memmgr_stolen_server_memory_bytes (MemoryManager.StolenServerMemoryKB) +# TYPE windows_mssql_memmgr_stolen_server_memory_bytes gauge +windows_mssql_memmgr_stolen_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.7281024e+08 +# HELP windows_mssql_memmgr_target_server_memory_bytes (MemoryManager.TargetServerMemoryKB) +# TYPE windows_mssql_memmgr_target_server_memory_bytes gauge +windows_mssql_memmgr_target_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.816387584e+09 +# HELP windows_mssql_memmgr_total_server_memory_bytes (MemoryManager.TotalServerMemoryKB) +# TYPE windows_mssql_memmgr_total_server_memory_bytes gauge +windows_mssql_memmgr_total_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.98836224e+08 +# HELP windows_mssql_sql_errors_total (SQLErrors.Total) +# TYPE windows_mssql_sql_errors_total counter +windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="DB Offline Errors"} 0 +windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="Info Errors"} 766 +windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="Kill Connection Errors"} 0 +windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="User Errors"} 29 +# HELP windows_mssql_sqlstats_auto_parameterization_attempts (SQLStatistics.AutoParamAttempts) +# TYPE windows_mssql_sqlstats_auto_parameterization_attempts counter +windows_mssql_sqlstats_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 37 +# HELP windows_mssql_sqlstats_batch_requests (SQLStatistics.BatchRequests) +# TYPE windows_mssql_sqlstats_batch_requests counter +windows_mssql_sqlstats_batch_requests{mssql_instance="SQLEXPRESS"} 2972 +# HELP windows_mssql_sqlstats_failed_auto_parameterization_attempts (SQLStatistics.FailedAutoParams) +# TYPE windows_mssql_sqlstats_failed_auto_parameterization_attempts counter +windows_mssql_sqlstats_failed_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 29 +# HELP windows_mssql_sqlstats_forced_parameterizations (SQLStatistics.ForcedParameterizations) +# TYPE windows_mssql_sqlstats_forced_parameterizations counter +windows_mssql_sqlstats_forced_parameterizations{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_sqlstats_guided_plan_executions (SQLStatistics.Guidedplanexecutions) +# TYPE windows_mssql_sqlstats_guided_plan_executions counter +windows_mssql_sqlstats_guided_plan_executions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_sqlstats_misguided_plan_executions (SQLStatistics.Misguidedplanexecutions) +# TYPE windows_mssql_sqlstats_misguided_plan_executions counter +windows_mssql_sqlstats_misguided_plan_executions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_sqlstats_safe_auto_parameterization_attempts (SQLStatistics.SafeAutoParams) +# TYPE windows_mssql_sqlstats_safe_auto_parameterization_attempts counter +windows_mssql_sqlstats_safe_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_sqlstats_sql_attentions (SQLStatistics.SQLAttentions) +# TYPE windows_mssql_sqlstats_sql_attentions counter +windows_mssql_sqlstats_sql_attentions{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_sqlstats_sql_compilations (SQLStatistics.SQLCompilations) +# TYPE windows_mssql_sqlstats_sql_compilations counter +windows_mssql_sqlstats_sql_compilations{mssql_instance="SQLEXPRESS"} 376 +# HELP windows_mssql_sqlstats_sql_recompilations (SQLStatistics.SQLReCompilations) +# TYPE windows_mssql_sqlstats_sql_recompilations counter +windows_mssql_sqlstats_sql_recompilations{mssql_instance="SQLEXPRESS"} 8 +# HELP windows_mssql_sqlstats_unsafe_auto_parameterization_attempts (SQLStatistics.UnsafeAutoParams) +# TYPE windows_mssql_sqlstats_unsafe_auto_parameterization_attempts counter +windows_mssql_sqlstats_unsafe_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 6 +# HELP windows_mssql_transactions_active (Transactions.Transactions) +# TYPE windows_mssql_transactions_active gauge +windows_mssql_transactions_active{mssql_instance="SQLEXPRESS"} 6 +# HELP windows_mssql_transactions_longest_transaction_running_seconds (Transactions.LongestTransactionRunningTime) +# TYPE windows_mssql_transactions_longest_transaction_running_seconds gauge +windows_mssql_transactions_longest_transaction_running_seconds{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_nonsnapshot_version_active_total (Transactions.NonSnapshotVersionTransactions) +# TYPE windows_mssql_transactions_nonsnapshot_version_active_total counter +windows_mssql_transactions_nonsnapshot_version_active_total{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_snapshot_active_total (Transactions.SnapshotTransactions) +# TYPE windows_mssql_transactions_snapshot_active_total counter +windows_mssql_transactions_snapshot_active_total{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_tempdb_free_space_bytes (Transactions.FreeSpaceInTempDbKB) +# TYPE windows_mssql_transactions_tempdb_free_space_bytes gauge +windows_mssql_transactions_tempdb_free_space_bytes{mssql_instance="SQLEXPRESS"} 5.046272e+06 +# HELP windows_mssql_transactions_update_conflicts_total (Transactions.UpdateConflictRatio) +# TYPE windows_mssql_transactions_update_conflicts_total counter +windows_mssql_transactions_update_conflicts_total{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_update_snapshot_active_total (Transactions.UpdateSnapshotTransactions) +# TYPE windows_mssql_transactions_update_snapshot_active_total counter +windows_mssql_transactions_update_snapshot_active_total{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_version_cleanup_rate_bytes (Transactions.VersionCleanupRateKBs) +# TYPE windows_mssql_transactions_version_cleanup_rate_bytes gauge +windows_mssql_transactions_version_cleanup_rate_bytes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_version_generation_rate_bytes (Transactions.VersionGenerationRateKBs) +# TYPE windows_mssql_transactions_version_generation_rate_bytes gauge +windows_mssql_transactions_version_generation_rate_bytes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_version_store_creation_units (Transactions.VersionStoreUnitCreation) +# TYPE windows_mssql_transactions_version_store_creation_units counter +windows_mssql_transactions_version_store_creation_units{mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_transactions_version_store_size_bytes (Transactions.VersionStoreSizeKB) +# TYPE windows_mssql_transactions_version_store_size_bytes gauge +windows_mssql_transactions_version_store_size_bytes{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_version_store_truncation_units (Transactions.VersionStoreUnitTruncation) +# TYPE windows_mssql_transactions_version_store_truncation_units counter +windows_mssql_transactions_version_store_truncation_units{mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_transactions_version_store_units (Transactions.VersionStoreUnitCount) +# TYPE windows_mssql_transactions_version_store_units counter +windows_mssql_transactions_version_store_units{mssql_instance="SQLEXPRESS"} 2 +# HELP windows_mssql_waitstats_lock_waits (WaitStats.LockWaits) +# TYPE windows_mssql_waitstats_lock_waits counter +windows_mssql_waitstats_lock_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_lock_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_lock_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_lock_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_log_buffer_waits (WaitStats.LogBufferWaits) +# TYPE windows_mssql_waitstats_log_buffer_waits counter +windows_mssql_waitstats_log_buffer_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_buffer_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_buffer_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_buffer_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_log_write_waits (WaitStats.LogWriteWaits) +# TYPE windows_mssql_waitstats_log_write_waits counter +windows_mssql_waitstats_log_write_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_write_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_write_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_log_write_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_memory_grant_queue_waits (WaitStats.MemoryGrantQueueWaits) +# TYPE windows_mssql_waitstats_memory_grant_queue_waits counter +windows_mssql_waitstats_memory_grant_queue_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_memory_grant_queue_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_memory_grant_queue_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_memory_grant_queue_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_network_io_waits (WaitStats.NetworkIOWaits) +# TYPE windows_mssql_waitstats_network_io_waits counter +windows_mssql_waitstats_network_io_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_network_io_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_network_io_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_network_io_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_nonpage_latch_waits (WaitStats.NonpageLatchWaits) +# TYPE windows_mssql_waitstats_nonpage_latch_waits counter +windows_mssql_waitstats_nonpage_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_nonpage_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_nonpage_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_nonpage_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_page_io_latch_waits (WaitStats.PageIOLatchWaits) +# TYPE windows_mssql_waitstats_page_io_latch_waits counter +windows_mssql_waitstats_page_io_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_io_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_io_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_io_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_page_latch_waits (WaitStats.PageLatchWaits) +# TYPE windows_mssql_waitstats_page_latch_waits counter +windows_mssql_waitstats_page_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_page_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_thread_safe_memory_objects_waits (WaitStats.ThreadSafeMemoryObjectsWaits) +# TYPE windows_mssql_waitstats_thread_safe_memory_objects_waits counter +windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_transaction_ownership_waits (WaitStats.TransactionOwnershipWaits) +# TYPE windows_mssql_waitstats_transaction_ownership_waits counter +windows_mssql_waitstats_transaction_ownership_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_transaction_ownership_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_transaction_ownership_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_transaction_ownership_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_wait_for_the_worker_waits (WaitStats.WaitForTheWorkerWaits) +# TYPE windows_mssql_waitstats_wait_for_the_worker_waits counter +windows_mssql_waitstats_wait_for_the_worker_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_wait_for_the_worker_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_wait_for_the_worker_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_wait_for_the_worker_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_mssql_waitstats_workspace_synchronization_waits (WaitStats.WorkspaceSynchronizationWaits) +# TYPE windows_mssql_waitstats_workspace_synchronization_waits counter +windows_mssql_waitstats_workspace_synchronization_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_workspace_synchronization_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_workspace_synchronization_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0 +windows_mssql_waitstats_workspace_synchronization_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0 +# HELP windows_net_bytes_received_total (Network.BytesReceivedPerSec) +# TYPE windows_net_bytes_received_total counter +windows_net_bytes_received_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 4.786344482e+09 +# HELP windows_net_bytes_sent_total (Network.BytesSentPerSec) +# TYPE windows_net_bytes_sent_total counter +windows_net_bytes_sent_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.026395688e+09 +# HELP windows_net_bytes_total (Network.BytesTotalPerSec) +# TYPE windows_net_bytes_total counter +windows_net_bytes_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 5.81274017e+09 +# HELP windows_net_current_bandwidth_bytes (Network.CurrentBandwidth) +# TYPE windows_net_current_bandwidth_bytes gauge +windows_net_current_bandwidth_bytes{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.25e+08 +# HELP windows_net_packets_outbound_discarded_total (Network.PacketsOutboundDiscarded) +# TYPE windows_net_packets_outbound_discarded_total counter +windows_net_packets_outbound_discarded_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0 +# HELP windows_net_packets_outbound_errors_total (Network.PacketsOutboundErrors) +# TYPE windows_net_packets_outbound_errors_total counter +windows_net_packets_outbound_errors_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0 +# HELP windows_net_packets_received_discarded_total (Network.PacketsReceivedDiscarded) +# TYPE windows_net_packets_received_discarded_total counter +windows_net_packets_received_discarded_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0 +# HELP windows_net_packets_received_errors_total (Network.PacketsReceivedErrors) +# TYPE windows_net_packets_received_errors_total counter +windows_net_packets_received_errors_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0 +# HELP windows_net_packets_received_total (Network.PacketsReceivedPerSec) +# TYPE windows_net_packets_received_total counter +windows_net_packets_received_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 4.120869e+06 +# HELP windows_net_packets_received_unknown_total (Network.PacketsReceivedUnknown) +# TYPE windows_net_packets_received_unknown_total counter +windows_net_packets_received_unknown_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0 +# HELP windows_net_packets_sent_total (Network.PacketsSentPerSec) +# TYPE windows_net_packets_sent_total counter +windows_net_packets_sent_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.332466e+06 +# HELP windows_net_packets_total (Network.PacketsPerSec) +# TYPE windows_net_packets_total counter +windows_net_packets_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 5.453335e+06 +# HELP windows_netframework_clrexceptions_exceptions_filters_total Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled. +# TYPE windows_netframework_clrexceptions_exceptions_filters_total counter +windows_netframework_clrexceptions_exceptions_filters_total{process="WMSvc"} 0 +windows_netframework_clrexceptions_exceptions_filters_total{process="powershell"} 0 +# HELP windows_netframework_clrexceptions_exceptions_finallys_total Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter. +# TYPE windows_netframework_clrexceptions_exceptions_finallys_total counter +windows_netframework_clrexceptions_exceptions_finallys_total{process="WMSvc"} 0 +windows_netframework_clrexceptions_exceptions_finallys_total{process="powershell"} 56 +# HELP windows_netframework_clrexceptions_exceptions_thrown_total Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions. +# TYPE windows_netframework_clrexceptions_exceptions_thrown_total counter +windows_netframework_clrexceptions_exceptions_thrown_total{process="WMSvc"} 0 +windows_netframework_clrexceptions_exceptions_thrown_total{process="powershell"} 37 +# HELP windows_netframework_clrexceptions_throw_to_catch_depth_total Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception. +# TYPE windows_netframework_clrexceptions_throw_to_catch_depth_total counter +windows_netframework_clrexceptions_throw_to_catch_depth_total{process="WMSvc"} 0 +windows_netframework_clrexceptions_throw_to_catch_depth_total{process="powershell"} 140 +# HELP windows_netframework_clrinterop_com_callable_wrappers_total Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client. +# TYPE windows_netframework_clrinterop_com_callable_wrappers_total counter +windows_netframework_clrinterop_com_callable_wrappers_total{process="WMSvc"} 2 +windows_netframework_clrinterop_com_callable_wrappers_total{process="powershell"} 5 +# HELP windows_netframework_clrinterop_interop_marshalling_total Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started. +# TYPE windows_netframework_clrinterop_interop_marshalling_total counter +windows_netframework_clrinterop_interop_marshalling_total{process="WMSvc"} 0 +windows_netframework_clrinterop_interop_marshalling_total{process="powershell"} 0 +# HELP windows_netframework_clrinterop_interop_stubs_created_total Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call. +# TYPE windows_netframework_clrinterop_interop_stubs_created_total counter +windows_netframework_clrinterop_interop_stubs_created_total{process="WMSvc"} 29 +windows_netframework_clrinterop_interop_stubs_created_total{process="powershell"} 345 +# HELP windows_netframework_clrjit_jit_il_bytes_total Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started +# TYPE windows_netframework_clrjit_jit_il_bytes_total counter +windows_netframework_clrjit_jit_il_bytes_total{process="WMSvc"} 4007 +windows_netframework_clrjit_jit_il_bytes_total{process="powershell"} 47021 +# HELP windows_netframework_clrjit_jit_methods_total Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods. +# TYPE windows_netframework_clrjit_jit_methods_total counter +windows_netframework_clrjit_jit_methods_total{process="WMSvc"} 27 +windows_netframework_clrjit_jit_methods_total{process="powershell"} 344 +# HELP windows_netframework_clrjit_jit_standard_failures_total Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler. +# TYPE windows_netframework_clrjit_jit_standard_failures_total gauge +windows_netframework_clrjit_jit_standard_failures_total{process="WMSvc"} 0 +windows_netframework_clrjit_jit_standard_failures_total{process="powershell"} 0 +# HELP windows_netframework_clrjit_jit_time_percent Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled. +# TYPE windows_netframework_clrjit_jit_time_percent gauge +windows_netframework_clrjit_jit_time_percent{process="WMSvc"} 0 +windows_netframework_clrjit_jit_time_percent{process="powershell"} 0 +# HELP windows_netframework_clrloading_appdomains_loaded_current Displays the current number of application domains loaded in this application. +# TYPE windows_netframework_clrloading_appdomains_loaded_current gauge +windows_netframework_clrloading_appdomains_loaded_current{process="WMSvc"} 1 +windows_netframework_clrloading_appdomains_loaded_current{process="powershell"} 1 +# HELP windows_netframework_clrloading_appdomains_loaded_total Displays the peak number of application domains loaded since the application started. +# TYPE windows_netframework_clrloading_appdomains_loaded_total counter +windows_netframework_clrloading_appdomains_loaded_total{process="WMSvc"} 1 +windows_netframework_clrloading_appdomains_loaded_total{process="powershell"} 1 +# HELP windows_netframework_clrloading_appdomains_unloaded_total Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded. +# TYPE windows_netframework_clrloading_appdomains_unloaded_total counter +windows_netframework_clrloading_appdomains_unloaded_total{process="WMSvc"} 0 +windows_netframework_clrloading_appdomains_unloaded_total{process="powershell"} 0 +# HELP windows_netframework_clrloading_assemblies_loaded_current Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. +# TYPE windows_netframework_clrloading_assemblies_loaded_current gauge +windows_netframework_clrloading_assemblies_loaded_current{process="WMSvc"} 5 +windows_netframework_clrloading_assemblies_loaded_current{process="powershell"} 20 +# HELP windows_netframework_clrloading_assemblies_loaded_total Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. +# TYPE windows_netframework_clrloading_assemblies_loaded_total counter +windows_netframework_clrloading_assemblies_loaded_total{process="WMSvc"} 5 +windows_netframework_clrloading_assemblies_loaded_total{process="powershell"} 20 +# HELP windows_netframework_clrloading_class_load_failures_total Displays the peak number of classes that have failed to load since the application started. +# TYPE windows_netframework_clrloading_class_load_failures_total counter +windows_netframework_clrloading_class_load_failures_total{process="WMSvc"} 0 +windows_netframework_clrloading_class_load_failures_total{process="powershell"} 1 +# HELP windows_netframework_clrloading_classes_loaded_current Displays the current number of classes loaded in all assemblies. +# TYPE windows_netframework_clrloading_classes_loaded_current gauge +windows_netframework_clrloading_classes_loaded_current{process="WMSvc"} 18 +windows_netframework_clrloading_classes_loaded_current{process="powershell"} 477 +# HELP windows_netframework_clrloading_classes_loaded_total Displays the cumulative number of classes loaded in all assemblies since the application started. +# TYPE windows_netframework_clrloading_classes_loaded_total counter +windows_netframework_clrloading_classes_loaded_total{process="WMSvc"} 18 +windows_netframework_clrloading_classes_loaded_total{process="powershell"} 477 +# HELP windows_netframework_clrloading_loader_heap_size_bytes Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file. +# TYPE windows_netframework_clrloading_loader_heap_size_bytes gauge +windows_netframework_clrloading_loader_heap_size_bytes{process="WMSvc"} 270336 +windows_netframework_clrloading_loader_heap_size_bytes{process="powershell"} 2.285568e+06 +# HELP windows_netframework_clrlocksandthreads_contentions_total Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully. +# TYPE windows_netframework_clrlocksandthreads_contentions_total counter +windows_netframework_clrlocksandthreads_contentions_total{process="WMSvc"} 0 +windows_netframework_clrlocksandthreads_contentions_total{process="powershell"} 10 +# HELP windows_netframework_clrlocksandthreads_current_logical_threads Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. +# TYPE windows_netframework_clrlocksandthreads_current_logical_threads gauge +windows_netframework_clrlocksandthreads_current_logical_threads{process="WMSvc"} 2 +windows_netframework_clrlocksandthreads_current_logical_threads{process="powershell"} 16 +# HELP windows_netframework_clrlocksandthreads_current_queue_length Displays the total number of threads that are currently waiting to acquire a managed lock in the application. +# TYPE windows_netframework_clrlocksandthreads_current_queue_length gauge +windows_netframework_clrlocksandthreads_current_queue_length{process="WMSvc"} 0 +windows_netframework_clrlocksandthreads_current_queue_length{process="powershell"} 0 +# HELP windows_netframework_clrlocksandthreads_physical_threads_current Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process. +# TYPE windows_netframework_clrlocksandthreads_physical_threads_current gauge +windows_netframework_clrlocksandthreads_physical_threads_current{process="WMSvc"} 1 +windows_netframework_clrlocksandthreads_physical_threads_current{process="powershell"} 13 +# HELP windows_netframework_clrlocksandthreads_queue_length_total Displays the total number of threads that waited to acquire a managed lock since the application started. +# TYPE windows_netframework_clrlocksandthreads_queue_length_total counter +windows_netframework_clrlocksandthreads_queue_length_total{process="WMSvc"} 0 +windows_netframework_clrlocksandthreads_queue_length_total{process="powershell"} 3 +# HELP windows_netframework_clrlocksandthreads_recognized_threads_current Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. +# TYPE windows_netframework_clrlocksandthreads_recognized_threads_current gauge +windows_netframework_clrlocksandthreads_recognized_threads_current{process="WMSvc"} 1 +windows_netframework_clrlocksandthreads_recognized_threads_current{process="powershell"} 3 +# HELP windows_netframework_clrlocksandthreads_recognized_threads_total Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. +# TYPE windows_netframework_clrlocksandthreads_recognized_threads_total counter +windows_netframework_clrlocksandthreads_recognized_threads_total{process="WMSvc"} 1 +windows_netframework_clrlocksandthreads_recognized_threads_total{process="powershell"} 6 +# HELP windows_netframework_clrmemory_allocated_bytes_total Displays the total number of bytes allocated on the garbage collection heap. +# TYPE windows_netframework_clrmemory_allocated_bytes_total counter +windows_netframework_clrmemory_allocated_bytes_total{process="WMSvc"} 227792 +windows_netframework_clrmemory_allocated_bytes_total{process="powershell"} 4.63338e+07 +# HELP windows_netframework_clrmemory_collections_total Displays the number of times the generation objects are garbage collected since the application started. +# TYPE windows_netframework_clrmemory_collections_total counter +windows_netframework_clrmemory_collections_total{area="Gen0",process="WMSvc"} 1 +windows_netframework_clrmemory_collections_total{area="Gen0",process="powershell"} 7 +windows_netframework_clrmemory_collections_total{area="Gen1",process="WMSvc"} 1 +windows_netframework_clrmemory_collections_total{area="Gen1",process="powershell"} 3 +windows_netframework_clrmemory_collections_total{area="Gen2",process="WMSvc"} 0 +windows_netframework_clrmemory_collections_total{area="Gen2",process="powershell"} 1 +# HELP windows_netframework_clrmemory_committed_bytes Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. +# TYPE windows_netframework_clrmemory_committed_bytes gauge +windows_netframework_clrmemory_committed_bytes{process="WMSvc"} 270336 +windows_netframework_clrmemory_committed_bytes{process="powershell"} 2.0475904e+07 +# HELP windows_netframework_clrmemory_finalization_survivors Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. +# TYPE windows_netframework_clrmemory_finalization_survivors gauge +windows_netframework_clrmemory_finalization_survivors{process="WMSvc"} 7 +windows_netframework_clrmemory_finalization_survivors{process="powershell"} 244 +# HELP windows_netframework_clrmemory_gc_time_percent Displays the percentage of time that was spent performing a garbage collection in the last sample. +# TYPE windows_netframework_clrmemory_gc_time_percent gauge +windows_netframework_clrmemory_gc_time_percent{process="WMSvc"} 0 +windows_netframework_clrmemory_gc_time_percent{process="powershell"} 0.00027784979937050934 +# HELP windows_netframework_clrmemory_heap_size_bytes Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. +# TYPE windows_netframework_clrmemory_heap_size_bytes gauge +windows_netframework_clrmemory_heap_size_bytes{area="Gen0",process="WMSvc"} 4.194304e+06 +windows_netframework_clrmemory_heap_size_bytes{area="Gen0",process="powershell"} 2.6417392e+07 +windows_netframework_clrmemory_heap_size_bytes{area="Gen1",process="WMSvc"} 50200 +windows_netframework_clrmemory_heap_size_bytes{area="Gen1",process="powershell"} 122776 +windows_netframework_clrmemory_heap_size_bytes{area="Gen2",process="WMSvc"} 24 +windows_netframework_clrmemory_heap_size_bytes{area="Gen2",process="powershell"} 6.71388e+06 +windows_netframework_clrmemory_heap_size_bytes{area="LOH",process="WMSvc"} 68168 +windows_netframework_clrmemory_heap_size_bytes{area="LOH",process="powershell"} 1.457824e+06 +# HELP windows_netframework_clrmemory_induced_gc_total Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. +# TYPE windows_netframework_clrmemory_induced_gc_total counter +windows_netframework_clrmemory_induced_gc_total{process="WMSvc"} 0 +windows_netframework_clrmemory_induced_gc_total{process="powershell"} 0 +# HELP windows_netframework_clrmemory_number_gc_handles Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. +# TYPE windows_netframework_clrmemory_number_gc_handles gauge +windows_netframework_clrmemory_number_gc_handles{process="WMSvc"} 24 +windows_netframework_clrmemory_number_gc_handles{process="powershell"} 834 +# HELP windows_netframework_clrmemory_number_pinned_objects Displays the number of pinned objects encountered in the last garbage collection. +# TYPE windows_netframework_clrmemory_number_pinned_objects gauge +windows_netframework_clrmemory_number_pinned_objects{process="WMSvc"} 1 +windows_netframework_clrmemory_number_pinned_objects{process="powershell"} 0 +# HELP windows_netframework_clrmemory_number_sink_blocksinuse Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. +# TYPE windows_netframework_clrmemory_number_sink_blocksinuse gauge +windows_netframework_clrmemory_number_sink_blocksinuse{process="WMSvc"} 1 +windows_netframework_clrmemory_number_sink_blocksinuse{process="powershell"} 42 +# HELP windows_netframework_clrmemory_promoted_bytes Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. +# TYPE windows_netframework_clrmemory_promoted_bytes gauge +windows_netframework_clrmemory_promoted_bytes{area="Gen0",process="WMSvc"} 49720 +windows_netframework_clrmemory_promoted_bytes{area="Gen0",process="powershell"} 107352 +windows_netframework_clrmemory_promoted_bytes{area="Gen1",process="WMSvc"} 0 +windows_netframework_clrmemory_promoted_bytes{area="Gen1",process="powershell"} 0 +# HELP windows_netframework_clrmemory_reserved_bytes Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. +# TYPE windows_netframework_clrmemory_reserved_bytes gauge +windows_netframework_clrmemory_reserved_bytes{process="WMSvc"} 4.02644992e+08 +windows_netframework_clrmemory_reserved_bytes{process="powershell"} 4.02644992e+08 +# HELP windows_netframework_clrremoting_channels_total Displays the total number of remoting channels registered across all application domains since application started. +# TYPE windows_netframework_clrremoting_channels_total counter +windows_netframework_clrremoting_channels_total{process="WMSvc"} 0 +windows_netframework_clrremoting_channels_total{process="powershell"} 0 +# HELP windows_netframework_clrremoting_context_bound_classes_loaded Displays the current number of context-bound classes that are loaded. +# TYPE windows_netframework_clrremoting_context_bound_classes_loaded gauge +windows_netframework_clrremoting_context_bound_classes_loaded{process="WMSvc"} 0 +windows_netframework_clrremoting_context_bound_classes_loaded{process="powershell"} 0 +# HELP windows_netframework_clrremoting_context_bound_objects_total Displays the total number of context-bound objects allocated. +# TYPE windows_netframework_clrremoting_context_bound_objects_total counter +windows_netframework_clrremoting_context_bound_objects_total{process="WMSvc"} 0 +windows_netframework_clrremoting_context_bound_objects_total{process="powershell"} 0 +# HELP windows_netframework_clrremoting_context_proxies_total Displays the total number of remoting proxy objects in this process since it started. +# TYPE windows_netframework_clrremoting_context_proxies_total counter +windows_netframework_clrremoting_context_proxies_total{process="WMSvc"} 0 +windows_netframework_clrremoting_context_proxies_total{process="powershell"} 0 +# HELP windows_netframework_clrremoting_contexts Displays the current number of remoting contexts in the application. +# TYPE windows_netframework_clrremoting_contexts gauge +windows_netframework_clrremoting_contexts{process="WMSvc"} 1 +windows_netframework_clrremoting_contexts{process="powershell"} 1 +# HELP windows_netframework_clrremoting_remote_calls_total Displays the total number of remote procedure calls invoked since the application started. +# TYPE windows_netframework_clrremoting_remote_calls_total counter +windows_netframework_clrremoting_remote_calls_total{process="WMSvc"} 0 +windows_netframework_clrremoting_remote_calls_total{process="powershell"} 0 +# HELP windows_netframework_clrsecurity_link_time_checks_total Displays the total number of link-time code access security checks since the application started. +# TYPE windows_netframework_clrsecurity_link_time_checks_total counter +windows_netframework_clrsecurity_link_time_checks_total{process="WMSvc"} 0 +windows_netframework_clrsecurity_link_time_checks_total{process="powershell"} 0 +# HELP windows_netframework_clrsecurity_rt_checks_time_percent Displays the percentage of time spent performing runtime code access security checks in the last sample. +# TYPE windows_netframework_clrsecurity_rt_checks_time_percent gauge +windows_netframework_clrsecurity_rt_checks_time_percent{process="WMSvc"} 0 +windows_netframework_clrsecurity_rt_checks_time_percent{process="powershell"} 0 +# HELP windows_netframework_clrsecurity_runtime_checks_total Displays the total number of runtime code access security checks performed since the application started. +# TYPE windows_netframework_clrsecurity_runtime_checks_total counter +windows_netframework_clrsecurity_runtime_checks_total{process="WMSvc"} 3 +windows_netframework_clrsecurity_runtime_checks_total{process="powershell"} 4386 +# HELP windows_netframework_clrsecurity_stack_walk_depth Displays the depth of the stack during that last runtime code access security check. +# TYPE windows_netframework_clrsecurity_stack_walk_depth gauge +windows_netframework_clrsecurity_stack_walk_depth{process="WMSvc"} 1 +windows_netframework_clrsecurity_stack_walk_depth{process="powershell"} 1 +# HELP windows_os_info OperatingSystem.Caption, OperatingSystem.Version +# TYPE windows_os_info gauge +windows_os_info{build_number="22621",major_version="10",minor_version="0",product="Microsoft Windows 10 Pro",version="10.0.22621"} 1 +# HELP windows_os_paging_free_bytes OperatingSystem.FreeSpaceInPagingFiles +# TYPE windows_os_paging_free_bytes gauge +windows_os_paging_free_bytes 1.414107136e+09 +# HELP windows_os_paging_limit_bytes OperatingSystem.SizeStoredInPagingFiles +# TYPE windows_os_paging_limit_bytes gauge +windows_os_paging_limit_bytes 1.476395008e+09 +# HELP windows_os_physical_memory_free_bytes OperatingSystem.FreePhysicalMemory +# TYPE windows_os_physical_memory_free_bytes gauge +windows_os_physical_memory_free_bytes 1.379946496e+09 +# HELP windows_os_process_memory_limit_bytes OperatingSystem.MaxProcessMemorySize +# TYPE windows_os_process_memory_limit_bytes gauge +windows_os_process_memory_limit_bytes 1.40737488224256e+14 +# HELP windows_os_processes OperatingSystem.NumberOfProcesses +# TYPE windows_os_processes gauge +windows_os_processes 152 +# HELP windows_os_processes_limit OperatingSystem.MaxNumberOfProcesses +# TYPE windows_os_processes_limit gauge +windows_os_processes_limit 4.294967295e+09 +# HELP windows_os_time OperatingSystem.LocalDateTime +# TYPE windows_os_time gauge +windows_os_time 1.667508748e+09 +# HELP windows_os_timezone OperatingSystem.LocalDateTime +# TYPE windows_os_timezone gauge +windows_os_timezone{timezone="EET"} 1 +# HELP windows_os_users OperatingSystem.NumberOfUsers +# TYPE windows_os_users gauge +windows_os_users 2 +# HELP windows_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize +# TYPE windows_os_virtual_memory_bytes gauge +windows_os_virtual_memory_bytes 5.733113856e+09 +# HELP windows_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory +# TYPE windows_os_virtual_memory_free_bytes gauge +windows_os_virtual_memory_free_bytes 2.285674496e+09 +# HELP windows_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize +# TYPE windows_os_visible_memory_bytes gauge +windows_os_visible_memory_bytes 4.256718848e+09 +# HELP windows_process_cpu_time_total Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). +# TYPE windows_process_cpu_time_total counter +windows_process_cpu_time_total{creating_process_id="4300",mode="privileged",process="msedge",process_id="6032"} 21.78125 +windows_process_cpu_time_total{creating_process_id="4300",mode="user",process="msedge",process_id="6032"} 31.46875 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="1204"} 0.09375 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="2296"} 0.203125 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="3044"} 0.15625 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="3728"} 0.28125 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5060"} 110.171875 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5904"} 0.359375 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5936"} 37.40625 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="7800"} 0.03125 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="844"} 1.765625 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="8512"} 0.40625 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="8736"} 47.796875 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="896"} 69.1875 +windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="900"} 0.265625 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="1204"} 0.171875 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="2296"} 0.28125 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="3044"} 0.734375 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="3728"} 0.734375 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5060"} 1281.59375 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5904"} 0.84375 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5936"} 52.515625 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="7800"} 0.015625 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="844"} 10.109375 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="8512"} 1.203125 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="8736"} 85.71875 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="896"} 163.78125 +windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="900"} 0.828125 +# HELP windows_process_handles Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process. +# TYPE windows_process_handles gauge +windows_process_handles{creating_process_id="4300",process="msedge",process_id="6032"} 1868 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="1204"} 227 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="2296"} 254 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="3044"} 285 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="3728"} 220 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="5060"} 443 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="5904"} 271 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="5936"} 298 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="7800"} 204 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="844"} 379 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="8512"} 274 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="8736"} 245 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="896"} 488 +windows_process_handles{creating_process_id="6032",process="msedge",process_id="900"} 323 +# HELP windows_process_io_bytes_total Bytes issued to I/O operations in different modes (read, write, other). +# TYPE windows_process_io_bytes_total counter +windows_process_io_bytes_total{creating_process_id="4300",mode="other",process="msedge",process_id="6032"} 4.348941e+06 +windows_process_io_bytes_total{creating_process_id="4300",mode="read",process="msedge",process_id="6032"} 3.30817247e+08 +windows_process_io_bytes_total{creating_process_id="4300",mode="write",process="msedge",process_id="6032"} 4.71331306e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="1204"} 26082 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="2296"} 26144 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="3044"} 26078 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="3728"} 23912 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5060"} 26596 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5904"} 30800 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5936"} 1.83334e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="7800"} 5128 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="844"} 26598 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="8512"} 26174 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="8736"} 26268 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="896"} 188254 +windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="900"} 26142 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="1204"} 68868 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="2296"} 261004 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="3044"} 400260 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="3728"} 734626 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5060"} 7.35770137e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5904"} 45529 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5936"} 2.72541538e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="7800"} 8804 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="844"} 2.4573337e+07 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="8512"} 1.0120572e+07 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="8736"} 7.202112e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="896"} 5.49114536e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="900"} 656823 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="1204"} 249336 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="2296"} 576080 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="3044"} 1.7264e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="3728"} 1.257063e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5060"} 7.54045349e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5904"} 217248 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5936"} 4.55388644e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="7800"} 1128 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="844"} 1.5475693e+07 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="8512"} 3.635552e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="8736"} 7.987096e+06 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="896"} 3.26369864e+08 +windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="900"} 1.010769e+06 +# HELP windows_process_io_operations_total I/O operations issued in different modes (read, write, other). +# TYPE windows_process_io_operations_total counter +windows_process_io_operations_total{creating_process_id="4300",mode="other",process="msedge",process_id="6032"} 113456 +windows_process_io_operations_total{creating_process_id="4300",mode="read",process="msedge",process_id="6032"} 294229 +windows_process_io_operations_total{creating_process_id="4300",mode="write",process="msedge",process_id="6032"} 200349 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="1204"} 331 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="2296"} 335 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="3044"} 349 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="3728"} 327 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5060"} 399 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5904"} 395 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5936"} 78519 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="7800"} 673 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="844"} 359 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="8512"} 340 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="8736"} 394 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="896"} 4069 +windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="900"} 337 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="1204"} 74 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="2296"} 732 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="3044"} 950 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="3728"} 1447 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5060"} 3.995322e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5904"} 124 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5936"} 1.571962e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="7800"} 102 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="844"} 20686 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="8512"} 6686 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="8736"} 1.788249e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="896"} 537551 +windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="900"} 1519 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="1204"} 114 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="2296"} 437 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="3044"} 1405 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="3728"} 3705 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5060"} 3.848906e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5904"} 118 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5936"} 1.701602e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="7800"} 94 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="844"} 24678 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="8512"} 9689 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="8736"} 1.790946e+06 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="896"} 734759 +windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="900"} 1924 +# HELP windows_process_page_faults_total Page faults by the threads executing in this process. +# TYPE windows_process_page_faults_total counter +windows_process_page_faults_total{creating_process_id="4300",process="msedge",process_id="6032"} 296027 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="1204"} 7965 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="2296"} 11749 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="3044"} 41335 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="3728"} 9529 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5060"} 3.750099e+06 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5904"} 8101 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5936"} 533380 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="7800"} 2636 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="844"} 402098 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="8512"} 35487 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="8736"} 9427 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="896"} 205035 +windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="900"} 43073 +# HELP windows_process_page_file_bytes Current number of bytes this process has used in the paging file(s). +# TYPE windows_process_page_file_bytes gauge +windows_process_page_file_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 7.041024e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 1.3561856e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 1.5511552e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.0756864e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 8.298496e+06 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.32230656e+08 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 8.97024e+06 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 1.3877248e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.060288e+06 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="844"} 9.2012544e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 2.0672512e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 8.126464e+06 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="896"} 4.1484288e+07 +windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="900"} 2.3629824e+07 +# HELP windows_process_pool_bytes Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. +# TYPE windows_process_pool_bytes gauge +windows_process_pool_bytes{creating_process_id="4300",pool="nonpaged",process="msedge",process_id="6032"} 72072 +windows_process_pool_bytes{creating_process_id="4300",pool="paged",process="msedge",process_id="6032"} 1.262872e+06 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="1204"} 15544 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="2296"} 16024 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="3044"} 17816 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="3728"} 14544 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5060"} 24600 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5904"} 16992 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5936"} 19088 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="7800"} 9920 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="844"} 18472 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="8512"} 18536 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="8736"} 15944 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="896"} 34464 +windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="900"} 17040 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="1204"} 651472 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="2296"} 665496 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="3044"} 674248 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="3728"} 656216 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5060"} 849040 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5904"} 722296 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5936"} 705232 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="7800"} 140256 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="844"} 680896 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="8512"} 679648 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="8736"} 677152 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="896"} 839128 +windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="900"} 682408 +# HELP windows_process_priority_base Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process. +# TYPE windows_process_priority_base gauge +windows_process_priority_base{creating_process_id="4300",process="msedge",process_id="6032"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="1204"} 4 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="2296"} 4 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="3044"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="3728"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5060"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5904"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5936"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="7800"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="844"} 4 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="8512"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="8736"} 8 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="896"} 10 +windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="900"} 4 +# HELP windows_process_private_bytes Current number of bytes this process has allocated that cannot be shared with other processes. +# TYPE windows_process_private_bytes gauge +windows_process_private_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 7.041024e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 1.3561856e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 1.5511552e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.0756864e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 8.298496e+06 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.32230656e+08 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 8.97024e+06 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 1.3877248e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.060288e+06 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="844"} 9.2012544e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 2.0672512e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 8.126464e+06 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="896"} 4.1484288e+07 +windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="900"} 2.3629824e+07 +# HELP windows_process_start_time Time of process start. +# TYPE windows_process_start_time gauge +windows_process_start_time{creating_process_id="4300",process="msedge",process_id="6032"} 1.6674729863403437e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="1204"} 1.667489261506441e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="2296"} 1.6674729883723967e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="3044"} 1.6674892546961231e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="3728"} 1.667472986486918e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5060"} 1.6674729865421767e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5904"} 1.6674730465087523e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5936"} 1.6674729864704254e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="7800"} 1.667472986365871e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="844"} 1.6674729865463045e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="8512"} 1.6674729970112965e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="8736"} 1.667472989342484e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="896"} 1.667472986462684e+09 +windows_process_start_time{creating_process_id="6032",process="msedge",process_id="900"} 1.667472995850073e+09 +# HELP windows_process_threads Number of threads currently active in this process. +# TYPE windows_process_threads gauge +windows_process_threads{creating_process_id="4300",process="msedge",process_id="6032"} 38 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="1204"} 12 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="2296"} 15 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="3044"} 15 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="3728"} 9 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="5060"} 21 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="5904"} 9 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="5936"} 12 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="7800"} 7 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="844"} 17 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="8512"} 15 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="8736"} 9 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="896"} 19 +windows_process_threads{creating_process_id="6032",process="msedge",process_id="900"} 15 +# HELP windows_process_virtual_bytes Current size, in bytes, of the virtual address space that the process is using. +# TYPE windows_process_virtual_bytes gauge +windows_process_virtual_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 2.341704609792e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 3.48529324032e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 3.485321392128e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.48532901888e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 2.306839302144e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.485494009856e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.306863792128e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 2.30688589824e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.272204521472e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="844"} 3.486428184576e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 3.485333880832e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 2.306843000832e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="896"} 2.307077632e+12 +windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="900"} 3.485325856768e+12 +# HELP windows_process_working_set_bytes Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. +# TYPE windows_process_working_set_bytes gauge +windows_process_working_set_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 1.59309824e+08 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 2.7205632e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 3.65568e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 7.5198464e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.7866752e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.79973632e+08 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.3228416e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 3.6646912e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 6.950912e+06 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="844"} 1.32747264e+08 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 5.5025664e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 1.9361792e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="896"} 5.873664e+07 +windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="900"} 5.6283136e+07 +# HELP windows_process_working_set_peak_bytes Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. +# TYPE windows_process_working_set_peak_bytes gauge +windows_process_working_set_peak_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 1.73211648e+08 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 2.7205632e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 4.1439232e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 9.2250112e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.9263488e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 4.54914048e+08 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.4363008e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 4.2278912e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 7.626752e+06 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="844"} 2.28954112e+08 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 5.9830272e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 2.0250624e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="896"} 7.835648e+07 +windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="900"} 5.943296e+07 +# HELP windows_process_working_set_private_bytes Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes. +# TYPE windows_process_working_set_private_bytes gauge +windows_process_working_set_private_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 3.6057088e+07 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 5.373952e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 2.072576e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 1.9554304e+07 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.691648e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 2.96091648e+08 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 1.654784e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 6.49216e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 421888 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="844"} 6.250496e+07 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 7.59808e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 1.449984e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="896"} 8.429568e+06 +windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="900"} 1.1952128e+07 +# HELP windows_service_info A metric with a constant '1' value labeled with service information +# TYPE windows_service_info gauge +windows_service_info{display_name="DHCP Client",name="dhcp",process_id="1908",run_as="NT Authority\\LocalService"} 1 +# HELP windows_service_start_mode The start mode of the service (StartMode) +# TYPE windows_service_start_mode gauge +windows_service_start_mode{name="dhcp",start_mode="auto"} 1 +windows_service_start_mode{name="dhcp",start_mode="boot"} 0 +windows_service_start_mode{name="dhcp",start_mode="disabled"} 0 +windows_service_start_mode{name="dhcp",start_mode="manual"} 0 +windows_service_start_mode{name="dhcp",start_mode="system"} 0 +# HELP windows_service_state The state of the service (State) +# TYPE windows_service_state gauge +windows_service_state{name="dhcp",state="continue pending"} 0 +windows_service_state{name="dhcp",state="pause pending"} 0 +windows_service_state{name="dhcp",state="paused"} 0 +windows_service_state{name="dhcp",state="running"} 1 +windows_service_state{name="dhcp",state="start pending"} 0 +windows_service_state{name="dhcp",state="stop pending"} 0 +windows_service_state{name="dhcp",state="stopped"} 0 +windows_service_state{name="dhcp",state="unknown"} 0 +# HELP windows_service_status The status of the service (Status) +# TYPE windows_service_status gauge +windows_service_status{name="dhcp",status="degraded"} 0 +windows_service_status{name="dhcp",status="error"} 0 +windows_service_status{name="dhcp",status="lost comm"} 0 +windows_service_status{name="dhcp",status="no contact"} 0 +windows_service_status{name="dhcp",status="nonrecover"} 0 +windows_service_status{name="dhcp",status="ok"} 1 +windows_service_status{name="dhcp",status="pred fail"} 0 +windows_service_status{name="dhcp",status="service"} 0 +windows_service_status{name="dhcp",status="starting"} 0 +windows_service_status{name="dhcp",status="stopping"} 0 +windows_service_status{name="dhcp",status="stressed"} 0 +windows_service_status{name="dhcp",status="unknown"} 0 +# HELP windows_system_context_switches_total Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec) +# TYPE windows_system_context_switches_total counter +windows_system_context_switches_total 4.8655033e+08 +# HELP windows_system_exception_dispatches_total Total number of exceptions dispatched (WMI source: PerfOS_System.ExceptionDispatchesPersec) +# TYPE windows_system_exception_dispatches_total counter +windows_system_exception_dispatches_total 160348 +# HELP windows_system_processor_queue_length Length of processor queue (WMI source: PerfOS_System.ProcessorQueueLength) +# TYPE windows_system_processor_queue_length gauge +windows_system_processor_queue_length 0 +# HELP windows_system_system_calls_total Total number of system calls (WMI source: PerfOS_System.SystemCallsPersec) +# TYPE windows_system_system_calls_total counter +windows_system_system_calls_total 1.886567439e+09 +# HELP windows_system_system_up_time System boot time (WMI source: PerfOS_System.SystemUpTime) +# TYPE windows_system_system_up_time gauge +windows_system_system_up_time 1.6673440377290363e+09 +# HELP windows_system_threads Current number of threads (WMI source: PerfOS_System.Threads) +# TYPE windows_system_threads gauge +windows_system_threads 1559 +# HELP windows_tcp_connection_failures_total (TCP.ConnectionFailures) +# TYPE windows_tcp_connection_failures_total counter +windows_tcp_connection_failures_total{af="ipv4"} 137 +windows_tcp_connection_failures_total{af="ipv6"} 214 +# HELP windows_tcp_connections_active_total (TCP.ConnectionsActive) +# TYPE windows_tcp_connections_active_total counter +windows_tcp_connections_active_total{af="ipv4"} 4301 +windows_tcp_connections_active_total{af="ipv6"} 214 +# HELP windows_tcp_connections_established (TCP.ConnectionsEstablished) +# TYPE windows_tcp_connections_established gauge +windows_tcp_connections_established{af="ipv4"} 7 +windows_tcp_connections_established{af="ipv6"} 0 +# HELP windows_tcp_connections_passive_total (TCP.ConnectionsPassive) +# TYPE windows_tcp_connections_passive_total counter +windows_tcp_connections_passive_total{af="ipv4"} 501 +windows_tcp_connections_passive_total{af="ipv6"} 0 +# HELP windows_tcp_connections_reset_total (TCP.ConnectionsReset) +# TYPE windows_tcp_connections_reset_total counter +windows_tcp_connections_reset_total{af="ipv4"} 1282 +windows_tcp_connections_reset_total{af="ipv6"} 0 +# HELP windows_tcp_segments_received_total (TCP.SegmentsReceivedTotal) +# TYPE windows_tcp_segments_received_total counter +windows_tcp_segments_received_total{af="ipv4"} 676388 +windows_tcp_segments_received_total{af="ipv6"} 1284 +# HELP windows_tcp_segments_retransmitted_total (TCP.SegmentsRetransmittedTotal) +# TYPE windows_tcp_segments_retransmitted_total counter +windows_tcp_segments_retransmitted_total{af="ipv4"} 2120 +windows_tcp_segments_retransmitted_total{af="ipv6"} 428 +# HELP windows_tcp_segments_sent_total (TCP.SegmentsSentTotal) +# TYPE windows_tcp_segments_sent_total counter +windows_tcp_segments_sent_total{af="ipv4"} 871379 +windows_tcp_segments_sent_total{af="ipv6"} 856 +# HELP windows_tcp_segments_total (TCP.SegmentsTotal) +# TYPE windows_tcp_segments_total counter +windows_tcp_segments_total{af="ipv4"} 1.547767e+06 +windows_tcp_segments_total{af="ipv6"} 2140 diff --git a/src/go/collectors/go.d.plugin/modules/windows/windows.go b/src/go/collectors/go.d.plugin/modules/windows/windows.go new file mode 100644 index 00000000000000..e405887e06e700 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/windows.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + _ "embed" + "net/http" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("windows", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 5, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *Windows { + return &Windows{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration{Duration: time.Second * 5}, + }, + }, + }, + cache: cache{ + collection: make(map[string]bool), + collectors: make(map[string]bool), + cores: make(map[string]bool), + nics: make(map[string]bool), + volumes: make(map[string]bool), + thermalZones: make(map[string]bool), + processes: make(map[string]bool), + iis: make(map[string]bool), + adcs: make(map[string]bool), + services: make(map[string]bool), + netFrameworkCLRExceptions: make(map[string]bool), + netFrameworkCLRInterops: make(map[string]bool), + netFrameworkCLRJIT: make(map[string]bool), + netFrameworkCLRLoading: make(map[string]bool), + netFrameworkCLRLocksThreads: make(map[string]bool), + netFrameworkCLRMemory: make(map[string]bool), + netFrameworkCLRRemoting: make(map[string]bool), + netFrameworkCLRSecurity: make(map[string]bool), + mssqlInstances: make(map[string]bool), + mssqlDBs: make(map[string]bool), + exchangeWorkload: make(map[string]bool), + exchangeLDAP: make(map[string]bool), + exchangeHTTPProxy: make(map[string]bool), + hypervVMMem: make(map[string]bool), + hypervVMDevices: make(map[string]bool), + hypervVMInterfaces: make(map[string]bool), + hypervVswitch: make(map[string]bool), + }, + charts: &module.Charts{}, + } +} + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type ( + Windows struct { + module.Base + Config `yaml:",inline"` + + charts *module.Charts + + doCheck bool + + httpClient *http.Client + prom prometheus.Prometheus + + cache cache + } + cache struct { + cores map[string]bool + volumes map[string]bool + nics map[string]bool + thermalZones map[string]bool + processes map[string]bool + iis map[string]bool + adcs map[string]bool + mssqlInstances map[string]bool + mssqlDBs map[string]bool + services map[string]bool + netFrameworkCLRExceptions map[string]bool + netFrameworkCLRInterops map[string]bool + netFrameworkCLRJIT map[string]bool + netFrameworkCLRLoading map[string]bool + netFrameworkCLRLocksThreads map[string]bool + netFrameworkCLRMemory map[string]bool + netFrameworkCLRRemoting map[string]bool + netFrameworkCLRSecurity map[string]bool + collectors map[string]bool + collection map[string]bool + exchangeWorkload map[string]bool + exchangeLDAP map[string]bool + exchangeHTTPProxy map[string]bool + hypervVMMem map[string]bool + hypervVMDevices map[string]bool + hypervVMInterfaces map[string]bool + hypervVswitch map[string]bool + } +) + +func (w *Windows) Init() bool { + if err := w.validateConfig(); err != nil { + w.Errorf("config validation: %v", err) + return false + } + + httpClient, err := w.initHTTPClient() + if err != nil { + w.Errorf("init HTTP client: %v", err) + return false + } + w.httpClient = httpClient + + prom, err := w.initPrometheusClient(w.httpClient) + if err != nil { + w.Errorf("init prometheus clients: %v", err) + return false + } + w.prom = prom + + return true +} + +func (w *Windows) Check() bool { + return len(w.Collect()) > 0 +} + +func (w *Windows) Charts() *module.Charts { + return w.charts +} + +func (w *Windows) Collect() map[string]int64 { + ms, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(ms) == 0 { + return nil + } + return ms +} + +func (w *Windows) Cleanup() { + if w.httpClient != nil { + w.httpClient.CloseIdleConnections() + } +} diff --git a/src/go/collectors/go.d.plugin/modules/windows/windows_test.go b/src/go/collectors/go.d.plugin/modules/windows/windows_test.go new file mode 100644 index 00000000000000..b98e40de6dbe39 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/windows/windows_test.go @@ -0,0 +1,1090 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package windows + +import ( + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + v0200Metrics, _ = os.ReadFile("testdata/v0.20.0/metrics.txt") +) + +func Test_TestData(t *testing.T) { + for name, data := range map[string][]byte{ + "v0200Metrics": v0200Metrics, + } { + assert.NotNilf(t, data, name) + } +} + +func TestNew(t *testing.T) { + assert.IsType(t, (*Windows)(nil), New()) +} + +func TestWindows_Init(t *testing.T) { + tests := map[string]struct { + config Config + wantFail bool + }{ + "success if 'url' is set": { + config: Config{ + HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9182/metrics"}}}, + }, + "fails on default config": { + wantFail: true, + config: New().Config, + }, + "fails if 'url' is unset": { + wantFail: true, + config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + win := New() + win.Config = test.config + + if test.wantFail { + assert.False(t, win.Init()) + } else { + assert.True(t, win.Init()) + } + }) + } +} + +func TestWindows_Check(t *testing.T) { + tests := map[string]struct { + prepare func() (win *Windows, cleanup func()) + wantFail bool + }{ + "success on valid response v0.20.0": { + prepare: prepareWindowsV0200, + }, + "fails if endpoint returns invalid data": { + wantFail: true, + prepare: prepareWindowsReturnsInvalidData, + }, + "fails on connection refused": { + wantFail: true, + prepare: prepareWindowsConnectionRefused, + }, + "fails on 404 response": { + wantFail: true, + prepare: prepareWindowsResponse404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + win, cleanup := test.prepare() + defer cleanup() + + require.True(t, win.Init()) + + if test.wantFail { + assert.False(t, win.Check()) + } else { + assert.True(t, win.Check()) + } + }) + } +} + +func TestWindows_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestWindows_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestWindows_Collect(t *testing.T) { + tests := map[string]struct { + prepare func() (win *Windows, cleanup func()) + wantCollected map[string]int64 + }{ + "success on valid response v0.20.0": { + prepare: prepareWindowsV0200, + wantCollected: map[string]int64{ + "ad_atq_average_request_latency": 0, + "ad_atq_outstanding_requests": 0, + "ad_binds_total": 184, + "ad_database_operations_total_add": 1, + "ad_database_operations_total_delete": 0, + "ad_database_operations_total_modify": 30, + "ad_database_operations_total_recycle": 0, + "ad_directory_operations_total_read": 726, + "ad_directory_operations_total_search": 831, + "ad_directory_operations_total_write": 31, + "ad_directory_service_threads": 0, + "ad_ldap_last_bind_time_seconds": 0, + "ad_ldap_searches_total": 1382, + "ad_name_cache_hits_total": 41161, + "ad_name_cache_lookups_total": 53046, + "ad_replication_data_intersite_bytes_total_inbound": 0, + "ad_replication_data_intersite_bytes_total_outbound": 0, + "ad_replication_data_intrasite_bytes_total_inbound": 0, + "ad_replication_data_intrasite_bytes_total_outbound": 0, + "ad_replication_inbound_objects_filtered_total": 0, + "ad_replication_inbound_properties_filtered_total": 0, + "ad_replication_inbound_properties_updated_total": 0, + "ad_replication_inbound_sync_objects_remaining": 0, + "ad_replication_pending_synchronizations": 0, + "ad_replication_sync_requests_total": 0, + "adcs_cert_template_Administrator_challenge_response_processing_time_seconds": 0, + "adcs_cert_template_Administrator_challenge_responses_total": 0, + "adcs_cert_template_Administrator_failed_requests_total": 0, + "adcs_cert_template_Administrator_issued_requests_total": 0, + "adcs_cert_template_Administrator_pending_requests_total": 0, + "adcs_cert_template_Administrator_request_cryptographic_signing_time_seconds": 0, + "adcs_cert_template_Administrator_request_policy_module_processing_time_seconds": 0, + "adcs_cert_template_Administrator_request_processing_time_seconds": 0, + "adcs_cert_template_Administrator_requests_total": 0, + "adcs_cert_template_Administrator_retrievals_processing_time_seconds": 0, + "adcs_cert_template_Administrator_retrievals_total": 0, + "adcs_cert_template_Administrator_signed_certificate_timestamp_list_processing_time_seconds": 0, + "adcs_cert_template_Administrator_signed_certificate_timestamp_lists_total": 0, + "adcs_cert_template_DomainController_challenge_response_processing_time_seconds": 0, + "adcs_cert_template_DomainController_challenge_responses_total": 0, + "adcs_cert_template_DomainController_failed_requests_total": 0, + "adcs_cert_template_DomainController_issued_requests_total": 1, + "adcs_cert_template_DomainController_pending_requests_total": 0, + "adcs_cert_template_DomainController_request_cryptographic_signing_time_seconds": 0, + "adcs_cert_template_DomainController_request_policy_module_processing_time_seconds": 16, + "adcs_cert_template_DomainController_request_processing_time_seconds": 63, + "adcs_cert_template_DomainController_requests_total": 1, + "adcs_cert_template_DomainController_retrievals_processing_time_seconds": 0, + "adcs_cert_template_DomainController_retrievals_total": 0, + "adcs_cert_template_DomainController_signed_certificate_timestamp_list_processing_time_seconds": 0, + "adcs_cert_template_DomainController_signed_certificate_timestamp_lists_total": 0, + "adfs_ad_login_connection_failures_total": 0, + "adfs_certificate_authentications_total": 0, + "adfs_db_artifact_failure_total": 0, + "adfs_db_artifact_query_time_seconds_total": 0, + "adfs_db_config_failure_total": 0, + "adfs_db_config_query_time_seconds_total": 101, + "adfs_device_authentications_total": 0, + "adfs_external_authentications_failure_total": 0, + "adfs_external_authentications_success_total": 0, + "adfs_extranet_account_lockouts_total": 0, + "adfs_federated_authentications_total": 0, + "adfs_federation_metadata_requests_total": 1, + "adfs_oauth_authorization_requests_total": 0, + "adfs_oauth_client_authentication_failure_total": 0, + "adfs_oauth_client_authentication_success_total": 0, + "adfs_oauth_client_credentials_failure_total": 0, + "adfs_oauth_client_credentials_success_total": 0, + "adfs_oauth_client_privkey_jtw_authentication_failure_total": 0, + "adfs_oauth_client_privkey_jwt_authentications_success_total": 0, + "adfs_oauth_client_secret_basic_authentications_failure_total": 0, + "adfs_oauth_client_secret_basic_authentications_success_total": 0, + "adfs_oauth_client_secret_post_authentications_failure_total": 0, + "adfs_oauth_client_secret_post_authentications_success_total": 0, + "adfs_oauth_client_windows_authentications_failure_total": 0, + "adfs_oauth_client_windows_authentications_success_total": 0, + "adfs_oauth_logon_certificate_requests_failure_total": 0, + "adfs_oauth_logon_certificate_token_requests_success_total": 0, + "adfs_oauth_password_grant_requests_failure_total": 0, + "adfs_oauth_password_grant_requests_success_total": 0, + "adfs_oauth_token_requests_success_total": 0, + "adfs_passive_requests_total": 0, + "adfs_passport_authentications_total": 0, + "adfs_password_change_failed_total": 0, + "adfs_password_change_succeeded_total": 0, + "adfs_samlp_token_requests_success_total": 0, + "adfs_sso_authentications_failure_total": 0, + "adfs_sso_authentications_success_total": 0, + "adfs_token_requests_total": 0, + "adfs_userpassword_authentications_failure_total": 0, + "adfs_userpassword_authentications_success_total": 0, + "adfs_windows_integrated_authentications_total": 0, + "adfs_wsfed_token_requests_success_total": 0, + "adfs_wstrust_token_requests_success_total": 0, + "collector_ad_duration": 769, + "collector_ad_status_fail": 0, + "collector_ad_status_success": 1, + "collector_adcs_duration": 0, + "collector_adcs_status_fail": 0, + "collector_adcs_status_success": 1, + "collector_adfs_duration": 3, + "collector_adfs_status_fail": 0, + "collector_adfs_status_success": 1, + "collector_cpu_duration": 0, + "collector_cpu_status_fail": 0, + "collector_cpu_status_success": 1, + "collector_exchange_duration": 33, + "collector_exchange_status_fail": 0, + "collector_exchange_status_success": 1, + "collector_hyperv_duration": 900, + "collector_hyperv_status_fail": 0, + "collector_hyperv_status_success": 1, + "collector_iis_duration": 0, + "collector_iis_status_fail": 0, + "collector_iis_status_success": 1, + "collector_logical_disk_duration": 0, + "collector_logical_disk_status_fail": 0, + "collector_logical_disk_status_success": 1, + "collector_logon_duration": 113, + "collector_logon_status_fail": 0, + "collector_logon_status_success": 1, + "collector_memory_duration": 0, + "collector_memory_status_fail": 0, + "collector_memory_status_success": 1, + "collector_mssql_duration": 3, + "collector_mssql_status_fail": 0, + "collector_mssql_status_success": 1, + "collector_net_duration": 0, + "collector_net_status_fail": 0, + "collector_net_status_success": 1, + "collector_netframework_clrexceptions_duration": 1437, + "collector_netframework_clrexceptions_status_fail": 0, + "collector_netframework_clrexceptions_status_success": 1, + "collector_netframework_clrinterop_duration": 1491, + "collector_netframework_clrinterop_status_fail": 0, + "collector_netframework_clrinterop_status_success": 1, + "collector_netframework_clrjit_duration": 1278, + "collector_netframework_clrjit_status_fail": 0, + "collector_netframework_clrjit_status_success": 1, + "collector_netframework_clrloading_duration": 1323, + "collector_netframework_clrloading_status_fail": 0, + "collector_netframework_clrloading_status_success": 1, + "collector_netframework_clrlocksandthreads_duration": 1357, + "collector_netframework_clrlocksandthreads_status_fail": 0, + "collector_netframework_clrlocksandthreads_status_success": 1, + "collector_netframework_clrmemory_duration": 1406, + "collector_netframework_clrmemory_status_fail": 0, + "collector_netframework_clrmemory_status_success": 1, + "collector_netframework_clrremoting_duration": 1519, + "collector_netframework_clrremoting_status_fail": 0, + "collector_netframework_clrremoting_status_success": 1, + "collector_netframework_clrsecurity_duration": 1467, + "collector_netframework_clrsecurity_status_fail": 0, + "collector_netframework_clrsecurity_status_success": 1, + "collector_os_duration": 2, + "collector_os_status_fail": 0, + "collector_os_status_success": 1, + "collector_process_duration": 115, + "collector_process_status_fail": 0, + "collector_process_status_success": 1, + "collector_service_duration": 101, + "collector_service_status_fail": 0, + "collector_service_status_success": 1, + "collector_system_duration": 0, + "collector_system_status_fail": 0, + "collector_system_status_success": 1, + "collector_tcp_duration": 0, + "collector_tcp_status_fail": 0, + "collector_tcp_status_success": 1, + "cpu_core_0,0_cstate_c1": 160233427, + "cpu_core_0,0_cstate_c2": 0, + "cpu_core_0,0_cstate_c3": 0, + "cpu_core_0,0_dpc_time": 67109, + "cpu_core_0,0_dpcs": 4871900, + "cpu_core_0,0_idle_time": 162455593, + "cpu_core_0,0_interrupt_time": 77281, + "cpu_core_0,0_interrupts": 155194331, + "cpu_core_0,0_privileged_time": 1182109, + "cpu_core_0,0_user_time": 1073671, + "cpu_core_0,1_cstate_c1": 159528054, + "cpu_core_0,1_cstate_c2": 0, + "cpu_core_0,1_cstate_c3": 0, + "cpu_core_0,1_dpc_time": 11093, + "cpu_core_0,1_dpcs": 1650552, + "cpu_core_0,1_idle_time": 159478125, + "cpu_core_0,1_interrupt_time": 58093, + "cpu_core_0,1_interrupts": 79325847, + "cpu_core_0,1_privileged_time": 1801234, + "cpu_core_0,1_user_time": 3432000, + "cpu_core_0,2_cstate_c1": 159891723, + "cpu_core_0,2_cstate_c2": 0, + "cpu_core_0,2_cstate_c3": 0, + "cpu_core_0,2_dpc_time": 16062, + "cpu_core_0,2_dpcs": 2236469, + "cpu_core_0,2_idle_time": 159848437, + "cpu_core_0,2_interrupt_time": 53515, + "cpu_core_0,2_interrupts": 67305419, + "cpu_core_0,2_privileged_time": 1812546, + "cpu_core_0,2_user_time": 3050250, + "cpu_core_0,3_cstate_c1": 159544117, + "cpu_core_0,3_cstate_c2": 0, + "cpu_core_0,3_cstate_c3": 0, + "cpu_core_0,3_dpc_time": 8140, + "cpu_core_0,3_dpcs": 1185046, + "cpu_core_0,3_idle_time": 159527546, + "cpu_core_0,3_interrupt_time": 44484, + "cpu_core_0,3_interrupts": 60766938, + "cpu_core_0,3_privileged_time": 1760828, + "cpu_core_0,3_user_time": 3422875, + "cpu_dpc_time": 102404, + "cpu_idle_time": 641309701, + "cpu_interrupt_time": 233373, + "cpu_privileged_time": 6556717, + "cpu_user_time": 10978796, + "exchange_activesync_ping_cmds_pending": 0, + "exchange_activesync_requests_total": 14, + "exchange_activesync_sync_cmds_total": 0, + "exchange_autodiscover_requests_total": 1, + "exchange_avail_service_requests_per_sec": 0, + "exchange_http_proxy_autodiscover_avg_auth_latency": 1, + "exchange_http_proxy_autodiscover_avg_cas_proccessing_latency_sec": 3, + "exchange_http_proxy_autodiscover_mailbox_proxy_failure_rate": 0, + "exchange_http_proxy_autodiscover_mailbox_server_locator_avg_latency_sec": 8, + "exchange_http_proxy_autodiscover_outstanding_proxy_requests": 0, + "exchange_http_proxy_autodiscover_requests_total": 27122, + "exchange_http_proxy_eas_avg_auth_latency": 0, + "exchange_http_proxy_eas_avg_cas_proccessing_latency_sec": 3, + "exchange_http_proxy_eas_mailbox_proxy_failure_rate": 0, + "exchange_http_proxy_eas_mailbox_server_locator_avg_latency_sec": 8, + "exchange_http_proxy_eas_outstanding_proxy_requests": 0, + "exchange_http_proxy_eas_requests_total": 32519, + "exchange_ldap_complianceauditservice_10_long_running_ops_per_sec": 0, + "exchange_ldap_complianceauditservice_10_read_time_sec": 18, + "exchange_ldap_complianceauditservice_10_search_time_sec": 58, + "exchange_ldap_complianceauditservice_10_timeout_errors_total": 0, + "exchange_ldap_complianceauditservice_10_write_time_sec": 0, + "exchange_ldap_complianceauditservice_long_running_ops_per_sec": 0, + "exchange_ldap_complianceauditservice_read_time_sec": 8, + "exchange_ldap_complianceauditservice_search_time_sec": 46, + "exchange_ldap_complianceauditservice_timeout_errors_total": 0, + "exchange_ldap_complianceauditservice_write_time_sec": 0, + "exchange_owa_current_unique_users": 0, + "exchange_owa_requests_total": 0, + "exchange_rpc_active_user_count": 0, + "exchange_rpc_avg_latency_sec": 1, + "exchange_rpc_connection_count": 0, + "exchange_rpc_operations_total": 9, + "exchange_rpc_requests": 0, + "exchange_rpc_user_count": 0, + "exchange_transport_queues_active_mailbox_delivery_high_priority": 0, + "exchange_transport_queues_active_mailbox_delivery_low_priority": 0, + "exchange_transport_queues_active_mailbox_delivery_none_priority": 0, + "exchange_transport_queues_active_mailbox_delivery_normal_priority": 0, + "exchange_transport_queues_external_active_remote_delivery_high_priority": 0, + "exchange_transport_queues_external_active_remote_delivery_low_priority": 0, + "exchange_transport_queues_external_active_remote_delivery_none_priority": 0, + "exchange_transport_queues_external_active_remote_delivery_normal_priority": 0, + "exchange_transport_queues_external_largest_delivery_high_priority": 0, + "exchange_transport_queues_external_largest_delivery_low_priority": 0, + "exchange_transport_queues_external_largest_delivery_none_priority": 0, + "exchange_transport_queues_external_largest_delivery_normal_priority": 0, + "exchange_transport_queues_internal_active_remote_delivery_high_priority": 0, + "exchange_transport_queues_internal_active_remote_delivery_low_priority": 0, + "exchange_transport_queues_internal_active_remote_delivery_none_priority": 0, + "exchange_transport_queues_internal_active_remote_delivery_normal_priority": 0, + "exchange_transport_queues_internal_largest_delivery_high_priority": 0, + "exchange_transport_queues_internal_largest_delivery_low_priority": 0, + "exchange_transport_queues_internal_largest_delivery_none_priority": 0, + "exchange_transport_queues_internal_largest_delivery_normal_priority": 0, + "exchange_transport_queues_poison_high_priority": 0, + "exchange_transport_queues_poison_low_priority": 0, + "exchange_transport_queues_poison_none_priority": 0, + "exchange_transport_queues_poison_normal_priority": 0, + "exchange_transport_queues_retry_mailbox_delivery_high_priority": 0, + "exchange_transport_queues_retry_mailbox_delivery_low_priority": 0, + "exchange_transport_queues_retry_mailbox_delivery_none_priority": 0, + "exchange_transport_queues_retry_mailbox_delivery_normal_priority": 0, + "exchange_transport_queues_unreachable_high_priority": 0, + "exchange_transport_queues_unreachable_low_priority": 0, + "exchange_transport_queues_unreachable_none_priority": 0, + "exchange_transport_queues_unreachable_normal_priority": 0, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_active_tasks": 0, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_completed_tasks": 0, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_is_active": 1, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_is_paused": 0, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_queued_tasks": 0, + "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_yielded_tasks": 0, + "exchange_workload_microsoft_exchange_servicehost_darruntime_active_tasks": 0, + "exchange_workload_microsoft_exchange_servicehost_darruntime_completed_tasks": 0, + "exchange_workload_microsoft_exchange_servicehost_darruntime_is_active": 1, + "exchange_workload_microsoft_exchange_servicehost_darruntime_is_paused": 0, + "exchange_workload_microsoft_exchange_servicehost_darruntime_queued_tasks": 0, + "exchange_workload_microsoft_exchange_servicehost_darruntime_yielded_tasks": 0, + "hyperv_health_critical": 0, + "hyperv_health_ok": 1, + "hyperv_root_partition_1G_device_pages": 0, + "hyperv_root_partition_1G_gpa_pages": 6, + "hyperv_root_partition_2M_device_pages": 0, + "hyperv_root_partition_2M_gpa_pages": 5255, + "hyperv_root_partition_4K_device_pages": 0, + "hyperv_root_partition_4K_gpa_pages": 58880, + "hyperv_root_partition_address_spaces": 0, + "hyperv_root_partition_attached_devices": 1, + "hyperv_root_partition_deposited_pages": 31732, + "hyperv_root_partition_device_dma_errors": 0, + "hyperv_root_partition_device_interrupt_errors": 0, + "hyperv_root_partition_device_interrupt_throttle_events": 0, + "hyperv_root_partition_gpa_space_modifications": 0, + "hyperv_root_partition_io_tlb_flush": 23901, + "hyperv_root_partition_physical_pages_allocated": 0, + "hyperv_root_partition_virtual_tlb_flush_entires": 15234, + "hyperv_root_partition_virtual_tlb_pages": 64, + "hyperv_vid_ubuntu_22_04_lts_physical_pages_allocated": 745472, + "hyperv_vid_ubuntu_22_04_lts_remote_physical_pages": 0, + "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_bytes_read": 83456, + "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_bytes_written": 1148928, + "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_error_count": 0, + "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_operations_read": 6, + "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_operations_written": 34, + "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_bytes_read": 531184640, + "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_bytes_written": 425905152, + "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_error_count": 3, + "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_operations_read": 13196, + "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_operations_written": 3866, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_bytes_received": 473654, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_bytes_sent": 43550457, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_incoming_dropped": 0, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_outgoing_dropped": 284, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_received": 6137, + "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_sent": 8905, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_bytes_received": 43509444, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_bytes_sent": 473654, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_incoming_dropped": 0, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_outgoing_dropped": 0, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_received": 8621, + "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_sent": 6137, + "hyperv_vm_ubuntu_22_04_lts_cpu_guest_run_time": 62534217, + "hyperv_vm_ubuntu_22_04_lts_cpu_hypervisor_run_time": 4457712, + "hyperv_vm_ubuntu_22_04_lts_cpu_remote_run_time": 0, + "hyperv_vm_ubuntu_22_04_lts_memory_physical": 2628, + "hyperv_vm_ubuntu_22_04_lts_memory_physical_guest_visible": 2904, + "hyperv_vm_ubuntu_22_04_lts_memory_pressure_current": 83, + "hyperv_vswitch_default_switch_broadcast_packets_received_total": 51, + "hyperv_vswitch_default_switch_broadcast_packets_sent_total": 18, + "hyperv_vswitch_default_switch_bytes_received_total": 44024111, + "hyperv_vswitch_default_switch_bytes_sent_total": 43983098, + "hyperv_vswitch_default_switch_directed_packets_received_total": 14603, + "hyperv_vswitch_default_switch_directed_packets_send_total": 14603, + "hyperv_vswitch_default_switch_dropped_packets_incoming_total": 284, + "hyperv_vswitch_default_switch_dropped_packets_outcoming_total": 0, + "hyperv_vswitch_default_switch_extensions_dropped_packets_incoming_total": 0, + "hyperv_vswitch_default_switch_extensions_dropped_packets_outcoming_total": 0, + "hyperv_vswitch_default_switch_learned_mac_addresses_total": 2, + "hyperv_vswitch_default_switch_multicast_packets_received_total": 388, + "hyperv_vswitch_default_switch_multicast_packets_sent_total": 137, + "hyperv_vswitch_default_switch_number_of_send_channel_moves_total": 0, + "hyperv_vswitch_default_switch_number_of_vmq_moves_total": 0, + "hyperv_vswitch_default_switch_packets_flooded_total": 0, + "hyperv_vswitch_default_switch_packets_received_total": 15042, + "hyperv_vswitch_default_switch_purged_mac_addresses_total": 0, + "iis_website_Default_Web_Site_connection_attempts_all_instances_total": 1, + "iis_website_Default_Web_Site_current_anonymous_users": 0, + "iis_website_Default_Web_Site_current_connections": 0, + "iis_website_Default_Web_Site_current_isapi_extension_requests": 0, + "iis_website_Default_Web_Site_current_non_anonymous_users": 0, + "iis_website_Default_Web_Site_files_received_total": 0, + "iis_website_Default_Web_Site_files_sent_total": 2, + "iis_website_Default_Web_Site_isapi_extension_requests_total": 0, + "iis_website_Default_Web_Site_locked_errors_total": 0, + "iis_website_Default_Web_Site_logon_attempts_total": 4, + "iis_website_Default_Web_Site_not_found_errors_total": 1, + "iis_website_Default_Web_Site_received_bytes_total": 10289, + "iis_website_Default_Web_Site_requests_total": 3, + "iis_website_Default_Web_Site_sent_bytes_total": 105882, + "iis_website_Default_Web_Site_service_uptime": 258633, + "logical_disk_C:_free_space": 43636490240, + "logical_disk_C:_read_bytes_total": 17676328448, + "logical_disk_C:_read_latency": 97420, + "logical_disk_C:_reads_total": 350593, + "logical_disk_C:_total_space": 67938287616, + "logical_disk_C:_used_space": 24301797376, + "logical_disk_C:_write_bytes_total": 9135282688, + "logical_disk_C:_write_latency": 123912, + "logical_disk_C:_writes_total": 450705, + "logon_type_batch_sessions": 0, + "logon_type_cached_interactive_sessions": 0, + "logon_type_cached_remote_interactive_sessions": 0, + "logon_type_cached_unlock_sessions": 0, + "logon_type_interactive_sessions": 2, + "logon_type_network_clear_text_sessions": 0, + "logon_type_network_sessions": 0, + "logon_type_new_credentials_sessions": 0, + "logon_type_proxy_sessions": 0, + "logon_type_remote_interactive_sessions": 0, + "logon_type_service_sessions": 0, + "logon_type_system_sessions": 0, + "logon_type_unlock_sessions": 0, + "memory_available_bytes": 1379942400, + "memory_cache_faults_total": 8009603, + "memory_cache_total": 1392185344, + "memory_commit_limit": 5733113856, + "memory_committed_bytes": 3447439360, + "memory_modified_page_list_bytes": 32653312, + "memory_not_committed_bytes": 2285674496, + "memory_page_faults_total": 119093924, + "memory_pool_nonpaged_bytes_total": 126865408, + "memory_pool_paged_bytes": 303906816, + "memory_standby_cache_core_bytes": 107376640, + "memory_standby_cache_normal_priority_bytes": 1019121664, + "memory_standby_cache_reserve_bytes": 233033728, + "memory_standby_cache_total": 1359532032, + "memory_swap_page_reads_total": 402087, + "memory_swap_page_writes_total": 7012, + "memory_swap_pages_read_total": 4643279, + "memory_swap_pages_written_total": 312896, + "memory_used_bytes": 2876776448, + "mssql_db_master_instance_SQLEXPRESS_active_transactions": 0, + "mssql_db_master_instance_SQLEXPRESS_backup_restore_operations": 0, + "mssql_db_master_instance_SQLEXPRESS_data_files_size_bytes": 4653056, + "mssql_db_master_instance_SQLEXPRESS_log_flushed_bytes": 3702784, + "mssql_db_master_instance_SQLEXPRESS_log_flushes": 252, + "mssql_db_master_instance_SQLEXPRESS_transactions": 2183, + "mssql_db_master_instance_SQLEXPRESS_write_transactions": 236, + "mssql_db_model_instance_SQLEXPRESS_active_transactions": 0, + "mssql_db_model_instance_SQLEXPRESS_backup_restore_operations": 0, + "mssql_db_model_instance_SQLEXPRESS_data_files_size_bytes": 8388608, + "mssql_db_model_instance_SQLEXPRESS_log_flushed_bytes": 12288, + "mssql_db_model_instance_SQLEXPRESS_log_flushes": 3, + "mssql_db_model_instance_SQLEXPRESS_transactions": 4467, + "mssql_db_model_instance_SQLEXPRESS_write_transactions": 0, + "mssql_db_msdb_instance_SQLEXPRESS_active_transactions": 0, + "mssql_db_msdb_instance_SQLEXPRESS_backup_restore_operations": 0, + "mssql_db_msdb_instance_SQLEXPRESS_data_files_size_bytes": 15466496, + "mssql_db_msdb_instance_SQLEXPRESS_log_flushed_bytes": 0, + "mssql_db_msdb_instance_SQLEXPRESS_log_flushes": 0, + "mssql_db_msdb_instance_SQLEXPRESS_transactions": 4582, + "mssql_db_msdb_instance_SQLEXPRESS_write_transactions": 0, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_active_transactions": 0, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_backup_restore_operations": 0, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_data_files_size_bytes": 41943040, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_log_flushed_bytes": 0, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_log_flushes": 0, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_transactions": 2, + "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_write_transactions": 0, + "mssql_db_tempdb_instance_SQLEXPRESS_active_transactions": 0, + "mssql_db_tempdb_instance_SQLEXPRESS_backup_restore_operations": 0, + "mssql_db_tempdb_instance_SQLEXPRESS_data_files_size_bytes": 8388608, + "mssql_db_tempdb_instance_SQLEXPRESS_log_flushed_bytes": 118784, + "mssql_db_tempdb_instance_SQLEXPRESS_log_flushes": 2, + "mssql_db_tempdb_instance_SQLEXPRESS_transactions": 1558, + "mssql_db_tempdb_instance_SQLEXPRESS_write_transactions": 29, + "mssql_instance_SQLEXPRESS_accessmethods_page_splits": 429, + "mssql_instance_SQLEXPRESS_bufman_buffer_cache_hits": 86, + "mssql_instance_SQLEXPRESS_bufman_checkpoint_pages": 82, + "mssql_instance_SQLEXPRESS_bufman_page_life_expectancy_seconds": 191350, + "mssql_instance_SQLEXPRESS_bufman_page_reads": 797, + "mssql_instance_SQLEXPRESS_bufman_page_writes": 92, + "mssql_instance_SQLEXPRESS_cache_hit_ratio": 100, + "mssql_instance_SQLEXPRESS_genstats_blocked_processes": 0, + "mssql_instance_SQLEXPRESS_genstats_user_connections": 1, + "mssql_instance_SQLEXPRESS_memmgr_connection_memory_bytes": 1015808, + "mssql_instance_SQLEXPRESS_memmgr_external_benefit_of_memory": 0, + "mssql_instance_SQLEXPRESS_memmgr_pending_memory_grants": 0, + "mssql_instance_SQLEXPRESS_memmgr_total_server_memory_bytes": 198836224, + "mssql_instance_SQLEXPRESS_resource_AllocUnit_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_AllocUnit_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Application_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Application_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Database_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Database_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Extent_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Extent_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_File_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_File_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_HoBT_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_HoBT_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Key_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Key_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Metadata_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Metadata_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_OIB_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_OIB_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Object_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Object_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Page_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Page_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_RID_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_RID_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_RowGroup_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_RowGroup_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_resource_Xact_locks_deadlocks": 0, + "mssql_instance_SQLEXPRESS_resource_Xact_locks_lock_wait_seconds": 0, + "mssql_instance_SQLEXPRESS_sql_errors_total_db_offline_errors": 0, + "mssql_instance_SQLEXPRESS_sql_errors_total_info_errors": 766, + "mssql_instance_SQLEXPRESS_sql_errors_total_kill_connection_errors": 0, + "mssql_instance_SQLEXPRESS_sql_errors_total_user_errors": 29, + "mssql_instance_SQLEXPRESS_sqlstats_auto_parameterization_attempts": 37, + "mssql_instance_SQLEXPRESS_sqlstats_batch_requests": 2972, + "mssql_instance_SQLEXPRESS_sqlstats_safe_auto_parameterization_attempts": 2, + "mssql_instance_SQLEXPRESS_sqlstats_sql_compilations": 376, + "mssql_instance_SQLEXPRESS_sqlstats_sql_recompilations": 8, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_bytes_received": 38290755856, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_bytes_sent": 8211165504, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_outbound_discarded": 0, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_outbound_errors": 0, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_discarded": 0, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_errors": 0, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_total": 4120869, + "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_sent_total": 1332466, + "netframework_WMSvc_clrexception_filters_total": 0, + "netframework_WMSvc_clrexception_finallys_total": 0, + "netframework_WMSvc_clrexception_throw_to_catch_depth_total": 0, + "netframework_WMSvc_clrexception_thrown_total": 0, + "netframework_WMSvc_clrinterop_com_callable_wrappers_total": 2, + "netframework_WMSvc_clrinterop_interop_marshalling_total": 0, + "netframework_WMSvc_clrinterop_interop_stubs_created_total": 29, + "netframework_WMSvc_clrjit_il_bytes_total": 4007, + "netframework_WMSvc_clrjit_methods_total": 27, + "netframework_WMSvc_clrjit_standard_failures_total": 0, + "netframework_WMSvc_clrjit_time_percent": 0, + "netframework_WMSvc_clrloading_appdomains_loaded_total": 1, + "netframework_WMSvc_clrloading_appdomains_unloaded_total": 0, + "netframework_WMSvc_clrloading_assemblies_loaded_total": 5, + "netframework_WMSvc_clrloading_class_load_failures_total": 0, + "netframework_WMSvc_clrloading_classes_loaded_total": 18, + "netframework_WMSvc_clrloading_loader_heap_size_bytes": 270336, + "netframework_WMSvc_clrlocksandthreads_contentions_total": 0, + "netframework_WMSvc_clrlocksandthreads_current_logical_threads": 2, + "netframework_WMSvc_clrlocksandthreads_physical_threads_current": 1, + "netframework_WMSvc_clrlocksandthreads_queue_length_total": 0, + "netframework_WMSvc_clrlocksandthreads_recognized_threads_total": 1, + "netframework_WMSvc_clrmemory_allocated_bytes_total": 227792, + "netframework_WMSvc_clrmemory_collections_total": 2, + "netframework_WMSvc_clrmemory_committed_bytes": 270336, + "netframework_WMSvc_clrmemory_finalization_survivors": 7, + "netframework_WMSvc_clrmemory_gc_time_percent": 0, + "netframework_WMSvc_clrmemory_heap_size_bytes": 4312696, + "netframework_WMSvc_clrmemory_induced_gc_total": 0, + "netframework_WMSvc_clrmemory_number_gc_handles": 24, + "netframework_WMSvc_clrmemory_number_pinned_objects": 1, + "netframework_WMSvc_clrmemory_number_sink_blocksinuse": 1, + "netframework_WMSvc_clrmemory_promoted_bytes": 49720, + "netframework_WMSvc_clrmemory_reserved_bytes": 402644992, + "netframework_WMSvc_clrremoting_channels_total": 0, + "netframework_WMSvc_clrremoting_context_bound_classes_loaded": 0, + "netframework_WMSvc_clrremoting_context_bound_objects_total": 0, + "netframework_WMSvc_clrremoting_context_proxies_total": 0, + "netframework_WMSvc_clrremoting_contexts": 1, + "netframework_WMSvc_clrremoting_remote_calls_total": 0, + "netframework_WMSvc_clrsecurity_checks_time_percent": 0, + "netframework_WMSvc_clrsecurity_link_time_checks_total": 0, + "netframework_WMSvc_clrsecurity_runtime_checks_total": 3, + "netframework_WMSvc_clrsecurity_stack_walk_depth": 1, + "netframework_powershell_clrexception_filters_total": 0, + "netframework_powershell_clrexception_finallys_total": 56, + "netframework_powershell_clrexception_throw_to_catch_depth_total": 140, + "netframework_powershell_clrexception_thrown_total": 37, + "netframework_powershell_clrinterop_com_callable_wrappers_total": 5, + "netframework_powershell_clrinterop_interop_marshalling_total": 0, + "netframework_powershell_clrinterop_interop_stubs_created_total": 345, + "netframework_powershell_clrjit_il_bytes_total": 47021, + "netframework_powershell_clrjit_methods_total": 344, + "netframework_powershell_clrjit_standard_failures_total": 0, + "netframework_powershell_clrjit_time_percent": 0, + "netframework_powershell_clrloading_appdomains_loaded_total": 1, + "netframework_powershell_clrloading_appdomains_unloaded_total": 0, + "netframework_powershell_clrloading_assemblies_loaded_total": 20, + "netframework_powershell_clrloading_class_load_failures_total": 1, + "netframework_powershell_clrloading_classes_loaded_total": 477, + "netframework_powershell_clrloading_loader_heap_size_bytes": 2285568, + "netframework_powershell_clrlocksandthreads_contentions_total": 10, + "netframework_powershell_clrlocksandthreads_current_logical_threads": 16, + "netframework_powershell_clrlocksandthreads_physical_threads_current": 13, + "netframework_powershell_clrlocksandthreads_queue_length_total": 3, + "netframework_powershell_clrlocksandthreads_recognized_threads_total": 6, + "netframework_powershell_clrmemory_allocated_bytes_total": 46333800, + "netframework_powershell_clrmemory_collections_total": 11, + "netframework_powershell_clrmemory_committed_bytes": 20475904, + "netframework_powershell_clrmemory_finalization_survivors": 244, + "netframework_powershell_clrmemory_gc_time_percent": 0, + "netframework_powershell_clrmemory_heap_size_bytes": 34711872, + "netframework_powershell_clrmemory_induced_gc_total": 0, + "netframework_powershell_clrmemory_number_gc_handles": 834, + "netframework_powershell_clrmemory_number_pinned_objects": 0, + "netframework_powershell_clrmemory_number_sink_blocksinuse": 42, + "netframework_powershell_clrmemory_promoted_bytes": 107352, + "netframework_powershell_clrmemory_reserved_bytes": 402644992, + "netframework_powershell_clrremoting_channels_total": 0, + "netframework_powershell_clrremoting_context_bound_classes_loaded": 0, + "netframework_powershell_clrremoting_context_bound_objects_total": 0, + "netframework_powershell_clrremoting_context_proxies_total": 0, + "netframework_powershell_clrremoting_contexts": 1, + "netframework_powershell_clrremoting_remote_calls_total": 0, + "netframework_powershell_clrsecurity_checks_time_percent": 0, + "netframework_powershell_clrsecurity_link_time_checks_total": 0, + "netframework_powershell_clrsecurity_runtime_checks_total": 4386, + "netframework_powershell_clrsecurity_stack_walk_depth": 1, + "os_paging_free_bytes": 1414107136, + "os_paging_limit_bytes": 1476395008, + "os_paging_used_bytes": 62287872, + "os_physical_memory_free_bytes": 1379946496, + "os_processes": 152, + "os_processes_limit": 4294967295, + "os_users": 2, + "os_visible_memory_bytes": 4256718848, + "os_visible_memory_used_bytes": 2876772352, + "process_msedge_cpu_time": 1919893, + "process_msedge_handles": 5779, + "process_msedge_io_bytes": 3978227378, + "process_msedge_io_operations": 16738642, + "process_msedge_page_faults": 5355941, + "process_msedge_page_file_bytes": 681603072, + "process_msedge_threads": 213, + "process_msedge_working_set_private_bytes": 461344768, + "service_dhcp_state_continue_pending": 0, + "service_dhcp_state_pause_pending": 0, + "service_dhcp_state_paused": 0, + "service_dhcp_state_running": 1, + "service_dhcp_state_start_pending": 0, + "service_dhcp_state_stop_pending": 0, + "service_dhcp_state_stopped": 0, + "service_dhcp_state_unknown": 0, + "service_dhcp_status_degraded": 0, + "service_dhcp_status_error": 0, + "service_dhcp_status_lost_comm": 0, + "service_dhcp_status_no_contact": 0, + "service_dhcp_status_nonrecover": 0, + "service_dhcp_status_ok": 1, + "service_dhcp_status_pred_fail": 0, + "service_dhcp_status_service": 0, + "service_dhcp_status_starting": 0, + "service_dhcp_status_stopping": 0, + "service_dhcp_status_stressed": 0, + "service_dhcp_status_unknown": 0, + "system_threads": 1559, + "system_up_time": 16208210, + "tcp_ipv4_conns_active": 4301, + "tcp_ipv4_conns_established": 7, + "tcp_ipv4_conns_failures": 137, + "tcp_ipv4_conns_passive": 501, + "tcp_ipv4_conns_resets": 1282, + "tcp_ipv4_segments_received": 676388, + "tcp_ipv4_segments_retransmitted": 2120, + "tcp_ipv4_segments_sent": 871379, + "tcp_ipv6_conns_active": 214, + "tcp_ipv6_conns_established": 0, + "tcp_ipv6_conns_failures": 214, + "tcp_ipv6_conns_passive": 0, + "tcp_ipv6_conns_resets": 0, + "tcp_ipv6_segments_received": 1284, + "tcp_ipv6_segments_retransmitted": 428, + "tcp_ipv6_segments_sent": 856, + }, + }, + "fails if endpoint returns invalid data": { + prepare: prepareWindowsReturnsInvalidData, + }, + "fails on connection refused": { + prepare: prepareWindowsConnectionRefused, + }, + "fails on 404 response": { + prepare: prepareWindowsResponse404, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + win, cleanup := test.prepare() + defer cleanup() + + require.True(t, win.Init()) + + mx := win.Collect() + + if mx != nil && test.wantCollected != nil { + mx["system_up_time"] = test.wantCollected["system_up_time"] + } + + assert.Equal(t, test.wantCollected, mx) + if len(test.wantCollected) > 0 { + testCharts(t, win, mx) + } + }) + } +} + +func testCharts(t *testing.T, win *Windows, mx map[string]int64) { + ensureChartsDimsCreated(t, win) + ensureCollectedHasAllChartsDimsVarsIDs(t, win, mx) +} + +func ensureChartsDimsCreated(t *testing.T, w *Windows) { + for _, chart := range cpuCharts { + if w.cache.collection[collectorCPU] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range memCharts { + if w.cache.collection[collectorMemory] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range tcpCharts { + if w.cache.collection[collectorTCP] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range osCharts { + if w.cache.collection[collectorOS] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range systemCharts { + if w.cache.collection[collectorSystem] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range logonCharts { + if w.cache.collection[collectorLogon] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range processesCharts { + if w.cache.collection[collectorProcess] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRExceptionsChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRExceptions] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRInteropChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRInterop] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRJITChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRJIT] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRLoadingChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRLoading] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRLocksAndThreadsChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRLocksAndThreads] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRMemoryChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRMemory] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRRemotingChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRRemoting] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for _, chart := range netFrameworkCLRSecurityChartsTmpl { + if w.cache.collection[collectorNetFrameworkCLRSecurity] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + + for core := range w.cache.cores { + for _, chart := range cpuCoreChartsTmpl { + id := fmt.Sprintf(chart.ID, core) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' core", id, core) + } + } + for disk := range w.cache.volumes { + for _, chart := range diskChartsTmpl { + id := fmt.Sprintf(chart.ID, disk) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' disk", id, disk) + } + } + for nic := range w.cache.nics { + for _, chart := range nicChartsTmpl { + id := fmt.Sprintf(chart.ID, nic) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' nic", id, nic) + } + } + for zone := range w.cache.thermalZones { + for _, chart := range thermalzoneChartsTmpl { + id := fmt.Sprintf(chart.ID, zone) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' thermalzone", id, zone) + } + } + for svc := range w.cache.services { + for _, chart := range serviceChartsTmpl { + id := fmt.Sprintf(chart.ID, svc) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' service", id, svc) + } + } + for website := range w.cache.iis { + for _, chart := range iisWebsiteChartsTmpl { + id := fmt.Sprintf(chart.ID, website) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' website", id, website) + } + } + for instance := range w.cache.mssqlInstances { + for _, chart := range mssqlInstanceChartsTmpl { + id := fmt.Sprintf(chart.ID, instance) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' instance", id, instance) + } + } + for instanceDB := range w.cache.mssqlDBs { + s := strings.Split(instanceDB, ":") + if assert.Lenf(t, s, 2, "can not extract intance/database from cache.mssqlDBs") { + instance, db := s[0], s[1] + for _, chart := range mssqlDatabaseChartsTmpl { + id := fmt.Sprintf(chart.ID, db, instance) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' instance", id, instance) + } + } + } + for _, chart := range adCharts { + if w.cache.collection[collectorAD] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for template := range w.cache.adcs { + for _, chart := range adcsCertTemplateChartsTmpl { + id := fmt.Sprintf(chart.ID, template) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' template certificate", id, template) + } + } + for name := range w.cache.collectors { + for _, chart := range collectorChartsTmpl { + id := fmt.Sprintf(chart.ID, name) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' collector", id, name) + } + } + + for _, chart := range processesCharts { + if chart = w.Charts().Get(chart.ID); chart == nil { + continue + } + for proc := range w.cache.processes { + var found bool + for _, dim := range chart.Dims { + if found = strings.HasPrefix(dim.ID, "process_"+proc); found { + break + } + } + assert.Truef(t, found, "chart '%s' has not dim for '%s' process", chart.ID, proc) + } + } + + for _, chart := range hypervChartsTmpl { + if w.cache.collection[collectorHyperv] { + assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID) + } else { + assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID) + } + } + for vm := range w.cache.hypervVMMem { + for _, chart := range hypervVMChartsTemplate { + id := fmt.Sprintf(chart.ID, hypervCleanName(vm)) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' virtual machine", id, vm) + } + } + for device := range w.cache.hypervVMDevices { + for _, chart := range hypervVMDeviceChartsTemplate { + id := fmt.Sprintf(chart.ID, hypervCleanName(device)) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' vm storage device", id, device) + } + } + for iface := range w.cache.hypervVMInterfaces { + for _, chart := range hypervVMInterfaceChartsTemplate { + id := fmt.Sprintf(chart.ID, hypervCleanName(iface)) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' vm network interface", id, iface) + } + } + for vswitch := range w.cache.hypervVswitch { + for _, chart := range hypervVswitchChartsTemplate { + id := fmt.Sprintf(chart.ID, hypervCleanName(vswitch)) + assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' virtual switch", id, vswitch) + } + } +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, w *Windows, mx map[string]int64) { + for _, chart := range *w.Charts() { + for _, dim := range chart.Dims { + _, ok := mx[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := mx[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +func prepareWindowsV0200() (win *Windows, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(v0200Metrics) + })) + + win = New() + win.URL = ts.URL + return win, ts.Close +} + +func prepareWindowsReturnsInvalidData() (win *Windows, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("hello and\n goodbye")) + })) + + win = New() + win.URL = ts.URL + return win, ts.Close +} + +func prepareWindowsConnectionRefused() (win *Windows, cleanup func()) { + win = New() + win.URL = "http://127.0.0.1:38001" + return win, func() {} +} + +func prepareWindowsResponse404() (win *Windows, cleanup func()) { + ts := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + win = New() + win.URL = ts.URL + return win, ts.Close +} diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/README.md b/src/go/collectors/go.d.plugin/modules/wireguard/README.md new file mode 120000 index 00000000000000..389e494d7a707d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/README.md @@ -0,0 +1 @@ +integrations/wireguard.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/charts.go b/src/go/collectors/go.d.plugin/modules/wireguard/charts.go new file mode 100644 index 00000000000000..0c38298125e9d6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/charts.go @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package wireguard + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +const ( + prioDeviceNetworkIO = module.Priority + iota + prioDevicePeers + prioPeerNetworkIO + prioPeerLatestHandShake +) + +var ( + deviceChartsTmpl = module.Charts{ + deviceNetworkIOChartTmpl.Copy(), + devicePeersChartTmpl.Copy(), + } + + deviceNetworkIOChartTmpl = module.Chart{ + ID: "device_%s_network_io", + Title: "Device traffic", + Units: "B/s", + Fam: "device traffic", + Ctx: "wireguard.device_network_io", + Type: module.Area, + Priority: prioDeviceNetworkIO, + Dims: module.Dims{ + {ID: "device_%s_receive", Name: "receive", Algo: module.Incremental}, + {ID: "device_%s_transmit", Name: "transmit", Algo: module.Incremental, Mul: -1}, + }, + } + devicePeersChartTmpl = module.Chart{ + ID: "device_%s_peers", + Title: "Device peers", + Units: "peers", + Fam: "device peers", + Ctx: "wireguard.device_peers", + Priority: prioDevicePeers, + Dims: module.Dims{ + {ID: "device_%s_peers", Name: "peers"}, + }, + } +) + +var ( + peerChartsTmpl = module.Charts{ + peerNetworkIOChartTmpl.Copy(), + peerLatestHandShakeChartTmpl.Copy(), + } + + peerNetworkIOChartTmpl = module.Chart{ + ID: "peer_%s_network_io", + Title: "Peer traffic", + Units: "B/s", + Fam: "peer traffic", + Ctx: "wireguard.peer_network_io", + Type: module.Area, + Priority: prioPeerNetworkIO, + Dims: module.Dims{ + {ID: "peer_%s_receive", Name: "receive", Algo: module.Incremental}, + {ID: "peer_%s_transmit", Name: "transmit", Algo: module.Incremental, Mul: -1}, + }, + } + peerLatestHandShakeChartTmpl = module.Chart{ + ID: "peer_%s_latest_handshake_ago", + Title: "Peer time elapsed since the latest handshake", + Units: "seconds", + Fam: "peer latest handshake", + Ctx: "wireguard.peer_latest_handshake_ago", + Priority: prioPeerLatestHandShake, + Dims: module.Dims{ + {ID: "peer_%s_latest_handshake_ago", Name: "time"}, + }, + } +) + +func newDeviceCharts(device string) *module.Charts { + charts := deviceChartsTmpl.Copy() + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, device) + c.Labels = []module.Label{ + {Key: "device", Value: device}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, device) + } + } + + return charts +} + +func (w *WireGuard) addNewDeviceCharts(device string) { + charts := newDeviceCharts(device) + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *WireGuard) removeDeviceCharts(device string) { + prefix := fmt.Sprintf("device_%s", device) + + for _, c := range *w.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} + +func newPeerCharts(id, device, pubKey string) *module.Charts { + charts := peerChartsTmpl.Copy() + + for _, c := range *charts { + c.ID = fmt.Sprintf(c.ID, id) + c.Labels = []module.Label{ + {Key: "device", Value: device}, + {Key: "public_key", Value: pubKey}, + } + for _, d := range c.Dims { + d.ID = fmt.Sprintf(d.ID, id) + } + } + + return charts +} + +func (w *WireGuard) addNewPeerCharts(id, device, pubKey string) { + charts := newPeerCharts(id, device, pubKey) + + if err := w.Charts().Add(*charts...); err != nil { + w.Warning(err) + } +} + +func (w *WireGuard) removePeerCharts(id string) { + prefix := fmt.Sprintf("peer_%s", id) + + for _, c := range *w.Charts() { + if strings.HasPrefix(c.ID, prefix) { + c.MarkRemove() + c.MarkNotCreated() + } + } +} diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/collect.go b/src/go/collectors/go.d.plugin/modules/wireguard/collect.go new file mode 100644 index 00000000000000..cbcc180eccf928 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/collect.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package wireguard + +import ( + "fmt" + "time" + + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func (w *WireGuard) collect() (map[string]int64, error) { + if w.client == nil { + client, err := w.newWGClient() + if err != nil { + return nil, fmt.Errorf("creating WireGuard client: %v", err) + } + w.client = client + } + + // TODO: probably we need to get a list of interfaces and query interfaces using client.Device() + // https://github.com/WireGuard/wgctrl-go/blob/3d4a969bb56bb6931f6661af606bc9c4195b4249/internal/wglinux/client_linux.go#L79-L80 + devices, err := w.client.Devices() + if err != nil { + return nil, fmt.Errorf("retrieving WireGuard devices: %v", err) + } + + if len(devices) == 0 { + w.Info("no WireGuard devices found on the host system") + } + + now := time.Now() + if w.cleanupLastTime.IsZero() { + w.cleanupLastTime = now + } + + mx := make(map[string]int64) + + w.collectDevicesPeers(mx, devices, now) + + if now.Sub(w.cleanupLastTime) > w.cleanupEvery { + w.cleanupLastTime = now + w.cleanupDevicesPeers(devices) + } + + return mx, nil +} + +func (w *WireGuard) collectDevicesPeers(mx map[string]int64, devices []*wgtypes.Device, now time.Time) { + for _, d := range devices { + if !w.devices[d.Name] { + w.devices[d.Name] = true + w.addNewDeviceCharts(d.Name) + } + + mx["device_"+d.Name+"_peers"] = int64(len(d.Peers)) + if len(d.Peers) == 0 { + mx["device_"+d.Name+"_receive"] = 0 + mx["device_"+d.Name+"_transmit"] = 0 + continue + } + + for _, p := range d.Peers { + if p.LastHandshakeTime.IsZero() { + continue + } + + pubKey := p.PublicKey.String() + id := peerID(d.Name, pubKey) + + if !w.peers[id] { + w.peers[id] = true + w.addNewPeerCharts(id, d.Name, pubKey) + } + + mx["device_"+d.Name+"_receive"] += p.ReceiveBytes + mx["device_"+d.Name+"_transmit"] += p.TransmitBytes + mx["peer_"+id+"_receive"] = p.ReceiveBytes + mx["peer_"+id+"_transmit"] = p.TransmitBytes + mx["peer_"+id+"_latest_handshake_ago"] = int64(now.Sub(p.LastHandshakeTime).Seconds()) + } + } +} + +func (w *WireGuard) cleanupDevicesPeers(devices []*wgtypes.Device) { + seenDevices, seenPeers := make(map[string]bool), make(map[string]bool) + for _, d := range devices { + seenDevices[d.Name] = true + for _, p := range d.Peers { + seenPeers[peerID(d.Name, p.PublicKey.String())] = true + } + } + for d := range w.devices { + if !seenDevices[d] { + delete(w.devices, d) + w.removeDeviceCharts(d) + } + } + for p := range w.peers { + if !seenPeers[p] { + delete(w.peers, p) + w.removePeerCharts(p) + } + } +} + +func peerID(device, peerPublicKey string) string { + return device + "_" + peerPublicKey +} diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/config_schema.json b/src/go/collectors/go.d.plugin/modules/wireguard/config_schema.json new file mode 100644 index 00000000000000..c6d6c261f7f54d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/config_schema.json @@ -0,0 +1,13 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/wireguard job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/integrations/wireguard.md b/src/go/collectors/go.d.plugin/modules/wireguard/integrations/wireguard.md new file mode 100644 index 00000000000000..54cbf38cb0e874 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/integrations/wireguard.md @@ -0,0 +1,169 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/wireguard/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/wireguard/metadata.yaml" +sidebar_label: "WireGuard" +learn_status: "Published" +learn_rel_path: "Data Collection/VPNs" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# WireGuard + + +<img src="https://netdata.cloud/img/wireguard.svg" width="150"/> + + +Plugin: go.d.plugin +Module: wireguard + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + +This collector monitors WireGuard VPN devices and peers traffic. + + +It connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + +This collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed. + + +### Default Behavior + +#### Auto-Detection + +It automatically detects instances running on localhost. + + +#### Limits + +Doesn't work if Netdata or WireGuard is installed in the container. + + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per device + +These metrics refer to the VPN network interface. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | VPN network interface | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| wireguard.device_network_io | receive, transmit | B/s | +| wireguard.device_peers | peers | peers | + +### Per peer + +These metrics refer to the VPN peer. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| device | VPN network interface | +| public_key | Public key of a peer | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| wireguard.peer_network_io | receive, transmit | B/s | +| wireguard.peer_latest_handshake_ago | time | seconds | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/wireguard.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/wireguard.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | + +</details> + +#### Examples +There are no configuration examples. + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m wireguard + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/metadata.yaml b/src/go/collectors/go.d.plugin/modules/wireguard/metadata.yaml new file mode 100644 index 00000000000000..0ac680d58bcceb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/metadata.yaml @@ -0,0 +1,121 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-wireguard + plugin_name: go.d.plugin + module_name: wireguard + monitored_instance: + name: WireGuard + link: https://www.wireguard.com/ + categories: + - data-collection.vpns + icon_filename: wireguard.svg + keywords: + - wireguard + - vpn + - security + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: | + This collector monitors WireGuard VPN devices and peers traffic. + method_description: | + It connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go). + default_behavior: + auto_detection: + description: | + It automatically detects instances running on localhost. + limits: + description: | + Doesn't work if Netdata or WireGuard is installed in the container. + performance_impact: + description: "" + additional_permissions: + description: | + This collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed. + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: go.d/wireguard.conf + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + examples: + folding: + title: Config + enabled: true + list: [] + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: device + description: These metrics refer to the VPN network interface. + labels: + - name: device + description: VPN network interface + metrics: + - name: wireguard.device_network_io + description: Device traffic + unit: B/s + chart_type: area + dimensions: + - name: receive + - name: transmit + - name: wireguard.device_peers + description: Device peers + unit: peers + chart_type: line + dimensions: + - name: peers + - name: peer + description: These metrics refer to the VPN peer. + labels: + - name: device + description: VPN network interface + - name: public_key + description: Public key of a peer + metrics: + - name: wireguard.peer_network_io + description: Peer traffic + unit: B/s + chart_type: area + dimensions: + - name: receive + - name: transmit + - name: wireguard.peer_latest_handshake_ago + description: Peer time elapsed since the latest handshake + unit: seconds + chart_type: line + dimensions: + - name: time diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/wireguard.go b/src/go/collectors/go.d.plugin/modules/wireguard/wireguard.go new file mode 100644 index 00000000000000..6587dce3c466dc --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/wireguard.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package wireguard + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("wireguard", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +func New() *WireGuard { + return &WireGuard{ + newWGClient: func() (wgClient, error) { return wgctrl.New() }, + charts: &module.Charts{}, + devices: make(map[string]bool), + peers: make(map[string]bool), + cleanupEvery: time.Minute, + } +} + +type ( + WireGuard struct { + module.Base + + charts *module.Charts + + client wgClient + newWGClient func() (wgClient, error) + + cleanupLastTime time.Time + cleanupEvery time.Duration + + devices map[string]bool + peers map[string]bool + } + wgClient interface { + Devices() ([]*wgtypes.Device, error) + Close() error + } +) + +func (w *WireGuard) Init() bool { + return true +} + +func (w *WireGuard) Check() bool { + return len(w.Collect()) > 0 +} + +func (w *WireGuard) Charts() *module.Charts { + return w.charts +} + +func (w *WireGuard) Collect() map[string]int64 { + mx, err := w.collect() + if err != nil { + w.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (w *WireGuard) Cleanup() { + if w.client == nil { + return + } + if err := w.client.Close(); err != nil { + w.Warningf("cleanup: error on closing connection: %v", err) + } + w.client = nil +} diff --git a/src/go/collectors/go.d.plugin/modules/wireguard/wireguard_test.go b/src/go/collectors/go.d.plugin/modules/wireguard/wireguard_test.go new file mode 100644 index 00000000000000..5e6434dccb1178 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/wireguard/wireguard_test.go @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package wireguard + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func TestWireGuard_Init(t *testing.T) { + assert.True(t, New().Init()) +} + +func TestWireGuard_Charts(t *testing.T) { + assert.Len(t, *New().Charts(), 0) + +} + +func TestWireGuard_Cleanup(t *testing.T) { + tests := map[string]struct { + prepare func(w *WireGuard) + wantClose bool + }{ + "after New": { + wantClose: false, + prepare: func(w *WireGuard) {}, + }, + "after Init": { + wantClose: false, + prepare: func(w *WireGuard) { w.Init() }, + }, + "after Check": { + wantClose: true, + prepare: func(w *WireGuard) { w.Init(); w.Check() }, + }, + "after Collect": { + wantClose: true, + prepare: func(w *WireGuard) { w.Init(); w.Collect() }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w := New() + m := &mockClient{} + w.newWGClient = func() (wgClient, error) { return m, nil } + + test.prepare(w) + + require.NotPanics(t, w.Cleanup) + + if test.wantClose { + assert.True(t, m.closeCalled) + } else { + assert.False(t, m.closeCalled) + } + }) + } +} + +func TestWireGuard_Check(t *testing.T) { + tests := map[string]struct { + wantFail bool + prepare func(w *WireGuard) + }{ + "success when devices and peers found": { + wantFail: false, + prepare: func(w *WireGuard) { + m := &mockClient{} + d1 := prepareDevice(1) + d1.Peers = append(d1.Peers, preparePeer("11")) + d1.Peers = append(d1.Peers, preparePeer("12")) + m.devices = append(m.devices, d1) + w.client = m + }, + }, + "success when devices and no peers found": { + wantFail: false, + prepare: func(w *WireGuard) { + m := &mockClient{} + m.devices = append(m.devices, prepareDevice(1)) + w.client = m + }, + }, + "fail when no devices and no peers found": { + wantFail: true, + prepare: func(w *WireGuard) { + w.client = &mockClient{} + }, + }, + "fail when error on retrieving devices": { + wantFail: true, + prepare: func(w *WireGuard) { + w.client = &mockClient{errOnDevices: true} + }, + }, + "fail when error on creating client": { + wantFail: true, + prepare: func(w *WireGuard) { + w.newWGClient = func() (wgClient, error) { return nil, errors.New("mock.newWGClient() error") } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w := New() + require.True(t, w.Init()) + test.prepare(w) + + if test.wantFail { + assert.False(t, w.Check()) + } else { + assert.True(t, w.Check()) + } + }) + } +} + +func TestWireGuard_Collect(t *testing.T) { + type testCaseStep struct { + prepareMock func(m *mockClient) + check func(t *testing.T, w *WireGuard) + } + tests := map[string][]testCaseStep{ + "several devices no peers": { + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(1)) + m.devices = append(m.devices, prepareDevice(2)) + }, + check: func(t *testing.T, w *WireGuard) { + mx := w.Collect() + + expected := map[string]int64{ + "device_wg1_peers": 0, + "device_wg1_receive": 0, + "device_wg1_transmit": 0, + "device_wg2_peers": 0, + "device_wg2_receive": 0, + "device_wg2_transmit": 0, + } + + copyLatestHandshake(mx, expected) + assert.Equal(t, expected, mx) + assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts())) + }, + }, + }, + "several devices several peers each": { + { + prepareMock: func(m *mockClient) { + d1 := prepareDevice(1) + d1.Peers = append(d1.Peers, preparePeer("11")) + d1.Peers = append(d1.Peers, preparePeer("12")) + m.devices = append(m.devices, d1) + + d2 := prepareDevice(2) + d2.Peers = append(d2.Peers, preparePeer("21")) + d2.Peers = append(d2.Peers, preparePeer("22")) + m.devices = append(m.devices, d2) + }, + check: func(t *testing.T, w *WireGuard) { + mx := w.Collect() + + expected := map[string]int64{ + "device_wg1_peers": 2, + "device_wg1_receive": 0, + "device_wg1_transmit": 0, + "device_wg2_peers": 2, + "device_wg2_receive": 0, + "device_wg2_transmit": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + } + + copyLatestHandshake(mx, expected) + assert.Equal(t, expected, mx) + assert.Equal(t, len(deviceChartsTmpl)*2+len(peerChartsTmpl)*4, len(*w.Charts())) + }, + }, + }, + "peers without last handshake time": { + { + prepareMock: func(m *mockClient) { + d1 := prepareDevice(1) + d1.Peers = append(d1.Peers, preparePeer("11")) + d1.Peers = append(d1.Peers, preparePeer("12")) + d1.Peers = append(d1.Peers, prepareNoLastHandshakePeer("13")) + d1.Peers = append(d1.Peers, prepareNoLastHandshakePeer("14")) + m.devices = append(m.devices, d1) + }, + check: func(t *testing.T, w *WireGuard) { + mx := w.Collect() + + expected := map[string]int64{ + "device_wg1_peers": 4, + "device_wg1_receive": 0, + "device_wg1_transmit": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + } + + copyLatestHandshake(mx, expected) + assert.Equal(t, expected, mx) + assert.Equal(t, len(deviceChartsTmpl)+len(peerChartsTmpl)*2, len(*w.Charts())) + }, + }, + }, + "device added at runtime": { + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(1)) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*1, len(*w.Charts())) + }, + }, + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(2)) + }, + check: func(t *testing.T, w *WireGuard) { + mx := w.Collect() + + expected := map[string]int64{ + "device_wg1_peers": 0, + "device_wg1_receive": 0, + "device_wg1_transmit": 0, + "device_wg2_peers": 0, + "device_wg2_receive": 0, + "device_wg2_transmit": 0, + } + copyLatestHandshake(mx, expected) + assert.Equal(t, expected, mx) + assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts())) + + }, + }, + }, + "device removed at run time, no cleanup occurred": { + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(1)) + m.devices = append(m.devices, prepareDevice(2)) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + }, + }, + { + prepareMock: func(m *mockClient) { + m.devices = m.devices[:len(m.devices)-1] + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts())) + assert.Equal(t, 0, calcObsoleteCharts(w.Charts())) + }, + }, + }, + "device removed at run time, cleanup occurred": { + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(1)) + m.devices = append(m.devices, prepareDevice(2)) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + }, + }, + { + prepareMock: func(m *mockClient) { + m.devices = m.devices[:len(m.devices)-1] + }, + check: func(t *testing.T, w *WireGuard) { + w.cleanupEvery = time.Second + time.Sleep(time.Second) + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts())) + assert.Equal(t, len(deviceChartsTmpl)*1, calcObsoleteCharts(w.Charts())) + }, + }, + }, + "peer added at runtime": { + { + prepareMock: func(m *mockClient) { + m.devices = append(m.devices, prepareDevice(1)) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*1, len(*w.Charts())) + }, + }, + { + prepareMock: func(m *mockClient) { + d1 := m.devices[0] + d1.Peers = append(d1.Peers, preparePeer("11")) + }, + check: func(t *testing.T, w *WireGuard) { + mx := w.Collect() + + expected := map[string]int64{ + "device_wg1_peers": 1, + "device_wg1_receive": 0, + "device_wg1_transmit": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0, + "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0, + } + copyLatestHandshake(mx, expected) + assert.Equal(t, expected, mx) + assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*1, len(*w.Charts())) + }, + }, + }, + "peer removed at run time, no cleanup occurred": { + { + prepareMock: func(m *mockClient) { + d1 := prepareDevice(1) + d1.Peers = append(d1.Peers, preparePeer("11")) + d1.Peers = append(d1.Peers, preparePeer("12")) + m.devices = append(m.devices, d1) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + }, + }, + { + prepareMock: func(m *mockClient) { + d1 := m.devices[0] + d1.Peers = d1.Peers[:len(d1.Peers)-1] + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*2, len(*w.Charts())) + assert.Equal(t, 0, calcObsoleteCharts(w.Charts())) + }, + }, + }, + "peer removed at run time, cleanup occurred": { + { + prepareMock: func(m *mockClient) { + d1 := prepareDevice(1) + d1.Peers = append(d1.Peers, preparePeer("11")) + d1.Peers = append(d1.Peers, preparePeer("12")) + m.devices = append(m.devices, d1) + }, + check: func(t *testing.T, w *WireGuard) { + _ = w.Collect() + }, + }, + { + prepareMock: func(m *mockClient) { + d1 := m.devices[0] + d1.Peers = d1.Peers[:len(d1.Peers)-1] + }, + check: func(t *testing.T, w *WireGuard) { + w.cleanupEvery = time.Second + time.Sleep(time.Second) + _ = w.Collect() + assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*2, len(*w.Charts())) + assert.Equal(t, len(peerChartsTmpl)*1, calcObsoleteCharts(w.Charts())) + }, + }, + }, + "fails if no devices found": { + { + prepareMock: func(m *mockClient) {}, + check: func(t *testing.T, w *WireGuard) { + assert.Equal(t, map[string]int64(nil), w.Collect()) + }, + }, + }, + "fails if error on getting devices list": { + { + prepareMock: func(m *mockClient) { + m.errOnDevices = true + }, + check: func(t *testing.T, w *WireGuard) { + assert.Equal(t, map[string]int64(nil), w.Collect()) + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + w := New() + require.True(t, w.Init()) + m := &mockClient{} + w.client = m + + for i, step := range test { + t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { + step.prepareMock(m) + step.check(t, w) + }) + } + }) + } +} + +type mockClient struct { + devices []*wgtypes.Device + errOnDevices bool + closeCalled bool +} + +func (m *mockClient) Devices() ([]*wgtypes.Device, error) { + if m.errOnDevices { + return nil, errors.New("mock.Devices() error") + } + return m.devices, nil +} + +func (m *mockClient) Close() error { + m.closeCalled = true + return nil +} + +func prepareDevice(num uint8) *wgtypes.Device { + return &wgtypes.Device{ + Name: fmt.Sprintf("wg%d", num), + } +} + +func preparePeer(s string) wgtypes.Peer { + b := make([]byte, 32) + b = append(b[:0], fmt.Sprintf("peer%s", s)...) + k, _ := wgtypes.NewKey(b[:32]) + + return wgtypes.Peer{ + PublicKey: k, + LastHandshakeTime: time.Now().Add(-time.Minute), + ReceiveBytes: 0, + TransmitBytes: 0, + } +} + +func prepareNoLastHandshakePeer(s string) wgtypes.Peer { + p := preparePeer(s) + var lh time.Time + p.LastHandshakeTime = lh + return p +} + +func copyLatestHandshake(dst, src map[string]int64) { + for k, v := range src { + if strings.HasSuffix(k, "latest_handshake_ago") { + if _, ok := dst[k]; ok { + dst[k] = v + } + } + } +} + +func calcObsoleteCharts(charts *module.Charts) int { + var num int + for _, c := range *charts { + if c.Obsolete { + num++ + } + } + return num +} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/README.md b/src/go/collectors/go.d.plugin/modules/x509check/README.md new file mode 120000 index 00000000000000..28978ccf7c93da --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/README.md @@ -0,0 +1 @@ +integrations/x.509_certificate.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/x509check/charts.go b/src/go/collectors/go.d.plugin/modules/x509check/charts.go new file mode 100644 index 00000000000000..50cef77e5e63e8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/charts.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import "github.com/netdata/go.d.plugin/agent/module" + +var ( + baseCharts = module.Charts{ + timeUntilExpirationChart.Copy(), + } + withRevocationCharts = module.Charts{ + timeUntilExpirationChart.Copy(), + revocationStatusChart.Copy(), + } + + timeUntilExpirationChart = module.Chart{ + ID: "time_until_expiration", + Title: "Time Until Certificate Expiration", + Units: "seconds", + Fam: "expiration time", + Ctx: "x509check.time_until_expiration", + Opts: module.Opts{StoreFirst: true}, + Dims: module.Dims{ + {ID: "expiry"}, + }, + Vars: module.Vars{ + {ID: "days_until_expiration_warning"}, + {ID: "days_until_expiration_critical"}, + }, + } + revocationStatusChart = module.Chart{ + ID: "revocation_status", + Title: "Revocation Status", + Units: "boolean", + Fam: "revocation", + Ctx: "x509check.revocation_status", + Opts: module.Opts{StoreFirst: true}, + Dims: module.Dims{ + {ID: "revoked"}, + }, + } +) diff --git a/src/go/collectors/go.d.plugin/modules/x509check/collect.go b/src/go/collectors/go.d.plugin/modules/x509check/collect.go new file mode 100644 index 00000000000000..cad0ae1696bbce --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/collect.go @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import ( + "crypto/x509" + "fmt" + "time" + + "github.com/cloudflare/cfssl/revoke" +) + +func (x *X509Check) collect() (map[string]int64, error) { + certs, err := x.prov.certificates() + if err != nil { + return nil, err + } + + if len(certs) == 0 { + return nil, fmt.Errorf("no certificate was provided by '%s'", x.Config.Source) + } + + mx := make(map[string]int64) + + x.collectExpiration(mx, certs) + if x.CheckRevocation { + x.collectRevocation(mx, certs) + } + + return mx, nil +} + +func (x *X509Check) collectExpiration(mx map[string]int64, certs []*x509.Certificate) { + expiry := time.Until(certs[0].NotAfter).Seconds() + mx["expiry"] = int64(expiry) + mx["days_until_expiration_warning"] = x.DaysUntilWarn + mx["days_until_expiration_critical"] = x.DaysUntilCritical + +} + +func (x *X509Check) collectRevocation(mx map[string]int64, certs []*x509.Certificate) { + rev, ok, err := revoke.VerifyCertificateError(certs[0]) + if err != nil { + x.Debug(err) + } + switch { + case ok && rev: + mx["revoked"] = 1 + case ok && !rev: + mx["revoked"] = 0 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/config_schema.json b/src/go/collectors/go.d.plugin/modules/x509check/config_schema.json new file mode 100644 index 00000000000000..5194715aed19f1 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/config_schema.json @@ -0,0 +1,54 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/x509check job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tlscfg": { + "type": "object", + "properties": { + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "tls_ca", + "tls_cert", + "tls_key" + ] + }, + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + }, + "check_revocation_status": { + "type": "boolean" + } + }, + "required": [ + "name", + "source" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/init.go b/src/go/collectors/go.d.plugin/modules/x509check/init.go new file mode 100644 index 00000000000000..361e110ba01bd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/init.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import ( + "errors" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (x *X509Check) validateConfig() error { + if x.Source == "" { + return errors.New("source is not set") + } + return nil +} + +func (x *X509Check) initProvider() (provider, error) { + return newProvider(x.Config) +} + +func (x *X509Check) initCharts() *module.Charts { + var charts *module.Charts + if x.CheckRevocation { + charts = withRevocationCharts.Copy() + } else { + charts = baseCharts.Copy() + } + + for _, chart := range *charts { + chart.Labels = []module.Label{ + {Key: "source", Value: x.Source}, + } + } + + return charts + +} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/integrations/x.509_certificate.md b/src/go/collectors/go.d.plugin/modules/x509check/integrations/x.509_certificate.md new file mode 100644 index 00000000000000..2a070b30a41e34 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/integrations/x.509_certificate.md @@ -0,0 +1,225 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/x509check/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/x509check/metadata.yaml" +sidebar_label: "X.509 certificate" +learn_status: "Published" +learn_rel_path: "Data Collection/Synthetic Checks" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# X.509 certificate + + +<img src="https://netdata.cloud/img/lock.svg" width="150"/> + + +Plugin: go.d.plugin +Module: x509check + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + + + +This collectors monitors x509 certificates expiration time and revocation status. + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +This integration doesn't support auto-detection. + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per source + +These metrics refer to the configured source. + +Labels: + +| Label | Description | +|:-----------|:----------------| +| source | Configured source. | + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| x509check.time_until_expiration | expiry | seconds | +| x509check.revocation_status | revoked | boolean | + + + +## Alerts + + +The following alerts are available: + +| Alert name | On metric | Description | +|:------------|:----------|:------------| +| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | time until x509 certificate expires | +| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status (0: revoked, 1: valid) | + + +## Setup + +### Prerequisites + +No action required. + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/x509check.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/x509check.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file. | | no | +| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no | +| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no | +| check_revocation_status | Whether to check the revocation status of the certificate. | no | no | +| timeout | SSL connection timeout. | 2 | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Website certificate + +Website certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_site_cert + source: https://my_site.org:443 + +``` +</details> + +##### Local file certificate + +Local file certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_file_cert + source: file:///home/me/cert.pem + +``` +</details> + +##### SMTP certificate + +SMTP certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_smtp_cert + source: smtp://smtp.my_mail.org:587 + +``` +</details> + +##### Multi-instance + +> **Note**: When you define more than one job, their names must be unique. + +Check the expiration status of the multiple websites' certificates. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: my_site_cert1 + source: https://my_site1.org:443 + + - name: my_site_cert2 + source: https://my_site1.org:443 + + - name: my_site_cert3 + source: https://my_site3.org:443 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m x509check + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/x509check/metadata.yaml b/src/go/collectors/go.d.plugin/modules/x509check/metadata.yaml new file mode 100644 index 00000000000000..6f974b94476442 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/metadata.yaml @@ -0,0 +1,171 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-x509check + plugin_name: go.d.plugin + module_name: x509check + monitored_instance: + name: X.509 certificate + link: "" + categories: + - data-collection.synthetic-checks + icon_filename: lock.svg + keywords: + - x509 + - certificate + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: [] + overview: + data_collection: + metrics_description: "" + method_description: | + This collectors monitors x509 certificates expiration time and revocation status. + default_behavior: + auto_detection: + description: "" + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: [] + configuration: + file: + name: "go.d/x509check.conf" + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: source + description: "Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file." + default_value: "" + required: false + - name: days_until_expiration_warning + description: Number of days before the alarm status is warning. + default_value: 30 + required: false + - name: days_until_expiration_critical + description: Number of days before the alarm status is critical. + default_value: 15 + required: false + - name: check_revocation_status + description: Whether to check the revocation status of the certificate. + default_value: false + required: false + - name: timeout + description: SSL connection timeout. + default_value: 2 + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Website certificate + description: Website certificate. + config: | + jobs: + - name: my_site_cert + source: https://my_site.org:443 + - name: Local file certificate + description: Local file certificate. + config: | + jobs: + - name: my_file_cert + source: file:///home/me/cert.pem + - name: SMTP certificate + description: SMTP certificate. + config: | + jobs: + - name: my_smtp_cert + source: smtp://smtp.my_mail.org:587 + - name: Multi-instance + description: | + > **Note**: When you define more than one job, their names must be unique. + + Check the expiration status of the multiple websites' certificates. + config: | + jobs: + - name: my_site_cert1 + source: https://my_site1.org:443 + + - name: my_site_cert2 + source: https://my_site1.org:443 + + - name: my_site_cert3 + source: https://my_site3.org:443 + troubleshooting: + problems: + list: [] + alerts: + - name: x509check_days_until_expiration + metric: x509check.time_until_expiration + info: time until x509 certificate expires + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf + - name: x509check_revocation_status + metric: x509check.revocation_status + info: "x509 certificate revocation status (0: revoked, 1: valid)" + link: https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: source + description: These metrics refer to the configured source. + labels: + - name: source + description: Configured source. + metrics: + - name: x509check.time_until_expiration + description: Time Until Certificate Expiration + unit: seconds + chart_type: line + dimensions: + - name: expiry + - name: x509check.revocation_status + description: Revocation Status + unit: boolean + chart_type: line + dimensions: + - name: revoked diff --git a/src/go/collectors/go.d.plugin/modules/x509check/provider.go b/src/go/collectors/go.d.plugin/modules/x509check/provider.go new file mode 100644 index 00000000000000..c5ac4d71151a3c --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/provider.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "net" + "net/smtp" + "net/url" + "os" + "time" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" +) + +type provider interface { + certificates() ([]*x509.Certificate, error) +} + +type fromFile struct { + path string +} + +type fromNet struct { + url *url.URL + tlsConfig *tls.Config + timeout time.Duration +} + +type fromSMTP struct { + url *url.URL + tlsConfig *tls.Config + timeout time.Duration +} + +func newProvider(config Config) (provider, error) { + sourceURL, err := url.Parse(config.Source) + if err != nil { + return nil, fmt.Errorf("source parse: %v", err) + } + + tlsCfg, err := tlscfg.NewTLSConfig(config.TLSConfig) + if err != nil { + return nil, fmt.Errorf("create tls config: %v", err) + } + + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + tlsCfg.ServerName = sourceURL.Hostname() + + switch sourceURL.Scheme { + case "file": + return &fromFile{path: sourceURL.Path}, nil + case "https", "udp", "udp4", "udp6", "tcp", "tcp4", "tcp6": + if sourceURL.Scheme == "https" { + sourceURL.Scheme = "tcp" + } + return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + case "smtp": + sourceURL.Scheme = "tcp" + return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + default: + return nil, fmt.Errorf("unsupported scheme '%s'", sourceURL) + } +} + +func (f fromFile) certificates() ([]*x509.Certificate, error) { + content, err := os.ReadFile(f.path) + if err != nil { + return nil, fmt.Errorf("error on reading '%s': %v", f.path, err) + } + + block, _ := pem.Decode(content) + if block == nil { + return nil, fmt.Errorf("error on decoding '%s': %v", f.path, err) + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("error on parsing certificate '%s': %v", f.path, err) + } + + return []*x509.Certificate{cert}, nil +} + +func (f fromNet) certificates() ([]*x509.Certificate, error) { + ipConn, err := net.DialTimeout(f.url.Scheme, f.url.Host, f.timeout) + if err != nil { + return nil, fmt.Errorf("error on dial to '%s': %v", f.url, err) + } + defer func() { _ = ipConn.Close() }() + + conn := tls.Client(ipConn, f.tlsConfig.Clone()) + defer func() { _ = conn.Close() }() + if err := conn.Handshake(); err != nil { + return nil, fmt.Errorf("error on SSL handshake with '%s': %v", f.url, err) + } + + certs := conn.ConnectionState().PeerCertificates + return certs, nil +} + +func (f fromSMTP) certificates() ([]*x509.Certificate, error) { + ipConn, err := net.DialTimeout(f.url.Scheme, f.url.Host, f.timeout) + if err != nil { + return nil, fmt.Errorf("error on dial to '%s': %v", f.url, err) + } + defer func() { _ = ipConn.Close() }() + + host, _, _ := net.SplitHostPort(f.url.Host) + smtpClient, err := smtp.NewClient(ipConn, host) + if err != nil { + return nil, fmt.Errorf("error on creating SMTP client: %v", err) + } + defer func() { _ = smtpClient.Quit() }() + + err = smtpClient.StartTLS(f.tlsConfig.Clone()) + if err != nil { + return nil, fmt.Errorf("error on startTLS with '%s': %v", f.url, err) + } + + conn, ok := smtpClient.TLSConnectionState() + if !ok { + return nil, fmt.Errorf("startTLS didn't succeed") + } + return conn.PeerCertificates, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/x509check.go b/src/go/collectors/go.d.plugin/modules/x509check/x509check.go new file mode 100644 index 00000000000000..ed3a10b2fcbb47 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/x509check.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import ( + _ "embed" + "time" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + cfssllog "github.com/cloudflare/cfssl/log" + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + cfssllog.Level = cfssllog.LevelFatal + module.Register("x509check", module.Creator{ + JobConfigSchema: configSchema, + Defaults: module.Defaults{ + UpdateEvery: 60, + }, + Create: func() module.Module { return New() }, + }) +} + +func New() *X509Check { + return &X509Check{ + Config: Config{ + Timeout: web.Duration{Duration: time.Second * 2}, + DaysUntilWarn: 14, + DaysUntilCritical: 7, + }, + } +} + +type Config struct { + Source string + Timeout web.Duration + tlscfg.TLSConfig `yaml:",inline"` + DaysUntilWarn int64 `yaml:"days_until_expiration_warning"` + DaysUntilCritical int64 `yaml:"days_until_expiration_critical"` + CheckRevocation bool `yaml:"check_revocation_status"` +} + +type X509Check struct { + module.Base + Config `yaml:",inline"` + charts *module.Charts + prov provider +} + +func (x *X509Check) Init() bool { + if err := x.validateConfig(); err != nil { + x.Errorf("config validation: %v", err) + return false + } + + prov, err := x.initProvider() + if err != nil { + x.Errorf("certificate provider init: %v", err) + return false + } + x.prov = prov + + x.charts = x.initCharts() + + return true +} + +func (x *X509Check) Check() bool { + return len(x.Collect()) > 0 +} + +func (x *X509Check) Charts() *module.Charts { + return x.charts +} + +func (x *X509Check) Collect() map[string]int64 { + mx, err := x.collect() + if err != nil { + x.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} + +func (x *X509Check) Cleanup() {} diff --git a/src/go/collectors/go.d.plugin/modules/x509check/x509check_test.go b/src/go/collectors/go.d.plugin/modules/x509check/x509check_test.go new file mode 100644 index 00000000000000..2c628af0af7c01 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/x509check/x509check_test.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package x509check + +import ( + "crypto/x509" + "errors" + "testing" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestX509Check_Cleanup(t *testing.T) { + assert.NotPanics(t, New().Cleanup) +} + +func TestX509Check_Charts(t *testing.T) { + x509Check := New() + x509Check.Source = "https://example.com" + require.True(t, x509Check.Init()) + assert.NotNil(t, x509Check.Charts()) +} + +func TestX509Check_Init(t *testing.T) { + const ( + file = iota + net + smtp + ) + tests := map[string]struct { + config Config + providerType int + err bool + }{ + "ok from net https": { + config: Config{Source: "https://example.org"}, + providerType: net, + }, + "ok from net tcp": { + config: Config{Source: "tcp://example.org"}, + providerType: net, + }, + "ok from file": { + config: Config{Source: "file:///home/me/cert.pem"}, + providerType: file, + }, + "ok from smtp": { + config: Config{Source: "smtp://smtp.my_mail.org:587"}, + providerType: smtp, + }, + "empty source": { + config: Config{Source: ""}, + err: true}, + "unknown provider": { + config: Config{Source: "http://example.org"}, + err: true, + }, + "nonexistent TLSCA": { + config: Config{Source: "https://example.org", TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}, + err: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + x509Check := New() + x509Check.Config = test.config + + if test.err { + assert.False(t, x509Check.Init()) + } else { + require.True(t, x509Check.Init()) + + var typeOK bool + switch test.providerType { + case file: + _, typeOK = x509Check.prov.(*fromFile) + case net: + _, typeOK = x509Check.prov.(*fromNet) + case smtp: + _, typeOK = x509Check.prov.(*fromSMTP) + } + + assert.True(t, typeOK) + } + }) + } +} + +func TestX509Check_Check(t *testing.T) { + x509Check := New() + x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} + + assert.True(t, x509Check.Check()) +} + +func TestX509Check_Check_ReturnsFalseOnProviderError(t *testing.T) { + x509Check := New() + x509Check.prov = &mockProvider{err: true} + + assert.False(t, x509Check.Check()) +} + +func TestX509Check_Collect(t *testing.T) { + x509Check := New() + x509Check.Source = "https://example.com" + require.True(t, x509Check.Init()) + x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} + + collected := x509Check.Collect() + + assert.NotZero(t, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, x509Check, collected) +} + +func TestX509Check_Collect_ReturnsNilOnProviderError(t *testing.T) { + x509Check := New() + x509Check.prov = &mockProvider{err: true} + + assert.Nil(t, x509Check.Collect()) +} + +func TestX509Check_Collect_ReturnsNilOnZeroCertificates(t *testing.T) { + x509Check := New() + x509Check.prov = &mockProvider{certs: []*x509.Certificate{}} + mx := x509Check.Collect() + + assert.Nil(t, mx) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, x509Check *X509Check, collected map[string]int64) { + for _, chart := range *x509Check.Charts() { + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +type mockProvider struct { + certs []*x509.Certificate + err bool +} + +func (m mockProvider) certificates() ([]*x509.Certificate, error) { + if m.err { + return nil, errors.New("mock certificates error") + } + return m.certs, nil +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/README.md b/src/go/collectors/go.d.plugin/modules/zookeeper/README.md new file mode 120000 index 00000000000000..ae81b371475fdb --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/README.md @@ -0,0 +1 @@ +integrations/zookeeper.md \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/charts.go b/src/go/collectors/go.d.plugin/modules/zookeeper/charts.go new file mode 100644 index 00000000000000..5828ec3e1500ad --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/charts.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import "github.com/netdata/go.d.plugin/agent/module" + +type ( + Charts = module.Charts + Dims = module.Dims + Vars = module.Vars +) + +var charts = Charts{ + { + ID: "requests", + Title: "Outstanding Requests", + Units: "requests", + Fam: "requests", + Ctx: "zookeeper.requests", + Dims: Dims{ + {ID: "outstanding_requests", Name: "outstanding"}, + }, + }, + { + ID: "requests_latency", + Title: "Requests Latency", + Units: "ms", + Fam: "requests", + Ctx: "zookeeper.requests_latency", + Dims: Dims{ + {ID: "min_latency", Name: "min", Div: 1000}, + {ID: "avg_latency", Name: "avg", Div: 1000}, + {ID: "max_latency", Name: "max", Div: 1000}, + }, + }, + { + ID: "connections", + Title: "Alive Connections", + Units: "connections", + Fam: "connections", + Ctx: "zookeeper.connections", + Dims: Dims{ + {ID: "num_alive_connections", Name: "alive"}, + }, + }, + { + ID: "packets", + Title: "Packets", + Units: "pps", + Fam: "net", + Ctx: "zookeeper.packets", + Dims: Dims{ + {ID: "packets_received", Name: "received", Algo: module.Incremental}, + {ID: "packets_sent", Name: "sent", Algo: module.Incremental, Mul: -1}, + }, + }, + { + ID: "file_descriptor", + Title: "Open File Descriptors", + Units: "file descriptors", + Fam: "file descriptors", + Ctx: "zookeeper.file_descriptor", + Dims: Dims{ + {ID: "open_file_descriptor_count", Name: "open"}, + }, + Vars: Vars{ + {ID: "max_file_descriptor_count"}, + }, + }, + { + ID: "nodes", + Title: "Number of Nodes", + Units: "nodes", + Fam: "data tree", + Ctx: "zookeeper.nodes", + Dims: Dims{ + {ID: "znode_count", Name: "znode"}, + {ID: "ephemerals_count", Name: "ephemerals"}, + }, + }, + { + ID: "watches", + Title: "Number of Watches", + Units: "watches", + Fam: "data tree", + Ctx: "zookeeper.watches", + Dims: Dims{ + {ID: "watch_count", Name: "watches"}, + }, + }, + { + ID: "approximate_data_size", + Title: "Approximate Data Tree Size", + Units: "KiB", + Fam: "data tree", + Ctx: "zookeeper.approximate_data_size", + Dims: Dims{ + {ID: "approximate_data_size", Name: "size", Div: 1024}, + }, + }, + { + ID: "server_state", + Title: "Server State", + Units: "state", + Fam: "server state", + Ctx: "zookeeper.server_state", + Dims: Dims{ + {ID: "server_state", Name: "state"}, + }, + }, +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/collect.go b/src/go/collectors/go.d.plugin/modules/zookeeper/collect.go new file mode 100644 index 00000000000000..97d6f3e6cdcc7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/collect.go @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "fmt" + "strconv" + "strings" +) + +func (z *Zookeeper) collect() (map[string]int64, error) { + return z.collectMntr() +} + +func (z *Zookeeper) collectMntr() (map[string]int64, error) { + const command = "mntr" + lines, err := z.fetch("mntr") + if err != nil { + return nil, err + } + switch len(lines) { + case 0: + return nil, fmt.Errorf("'%s' command returned empty response", command) + case 1: + // mntr is not executed because it is not in the whitelist. + return nil, fmt.Errorf("'%s' command returned bad response: %s", command, lines[0]) + } + + mx := make(map[string]int64) + for _, line := range lines { + parts := strings.Fields(line) + if len(parts) != 2 || !strings.HasPrefix(parts[0], "zk_") { + continue + } + + key, value := strings.TrimPrefix(parts[0], "zk_"), parts[1] + switch key { + case "version": + case "server_state": + mx[key] = convertServerState(value) + case "min_latency", "avg_latency", "max_latency": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + continue + } + mx[key] = int64(v * 1000) + default: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + continue + } + mx[key] = int64(v) + } + } + + if len(mx) == 0 { + return nil, fmt.Errorf("'%s' command: failed to parse response", command) + } + return mx, nil +} + +func convertServerState(state string) int64 { + switch state { + default: + return 0 + case "leader": + return 1 + case "follower": + return 2 + case "observer": + return 3 + case "standalone": + return 4 + } +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/config_schema.json b/src/go/collectors/go.d.plugin/modules/zookeeper/config_schema.json new file mode 100644 index 00000000000000..259987aba981ae --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/config_schema.json @@ -0,0 +1,38 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/zookeeper job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher.go b/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher.go new file mode 100644 index 00000000000000..7c3aae0eab4794 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher.go @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "bytes" + "fmt" + "unsafe" + + "github.com/netdata/go.d.plugin/pkg/socket" +) + +const limitReadLines = 2000 + +type zookeeperFetcher struct { + socket.Client +} + +func (c *zookeeperFetcher) fetch(command string) (rows []string, err error) { + if err = c.Connect(); err != nil { + return nil, err + } + defer func() { _ = c.Disconnect() }() + + var num int + clientErr := c.Command(command, func(b []byte) bool { + if !isZKLine(b) || isMntrLineOK(b) { + rows = append(rows, string(b)) + } + if num += 1; num >= limitReadLines { + err = fmt.Errorf("read line limit exceeded (%d)", limitReadLines) + return false + } + return true + }) + if clientErr != nil { + return nil, clientErr + } + if err != nil { + return nil, err + } + return rows, nil +} + +func isZKLine(line []byte) bool { + return bytes.HasPrefix(line, []byte("zk_")) +} + +func isMntrLineOK(line []byte) bool { + idx := bytes.LastIndexByte(line, '\t') + return idx > 0 && collectedZKKeys[unsafeString(line)[:idx]] +} + +func unsafeString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +var collectedZKKeys = map[string]bool{ + "zk_num_alive_connections": true, + "zk_outstanding_requests": true, + "zk_min_latency": true, + "zk_avg_latency": true, + "zk_max_latency": true, + "zk_packets_received": true, + "zk_packets_sent": true, + "zk_open_file_descriptor_count": true, + "zk_max_file_descriptor_count": true, + "zk_znode_count": true, + "zk_ephemerals_count": true, + "zk_watch_count": true, + "zk_approximate_data_size": true, + "zk_server_state": true, +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher_test.go b/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher_test.go new file mode 100644 index 00000000000000..cdc9c0e2680fd3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/fetcher_test.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "testing" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/stretchr/testify/assert" +) + +func Test_clientFetch(t *testing.T) { + c := &zookeeperFetcher{Client: &mockSocket{rowsNumResp: 10}} + + rows, err := c.fetch("whatever\n") + assert.NoError(t, err) + assert.Len(t, rows, 10) + + rows, err = c.fetch("whatever\n") + assert.NoError(t, err) + assert.Len(t, rows, 10) +} + +func Test_clientFetchReadLineLimitExceeded(t *testing.T) { + c := &zookeeperFetcher{Client: &mockSocket{rowsNumResp: limitReadLines + 1}} + + rows, err := c.fetch("whatever\n") + assert.Error(t, err) + assert.Len(t, rows, 0) +} + +type mockSocket struct { + rowsNumResp int +} + +func (m *mockSocket) Connect() error { + return nil +} + +func (m *mockSocket) Disconnect() error { + return nil +} + +func (m *mockSocket) Command(command string, process socket.Processor) error { + for i := 0; i < m.rowsNumResp; i++ { + process([]byte(command)) + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/integrations/zookeeper.md b/src/go/collectors/go.d.plugin/modules/zookeeper/integrations/zookeeper.md new file mode 100644 index 00000000000000..c3d3337dc19580 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/integrations/zookeeper.md @@ -0,0 +1,215 @@ +<!--startmeta +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/zookeeper/README.md" +meta_yaml: "https://github.com/netdata/go.d.plugin/edit/master/modules/zookeeper/metadata.yaml" +sidebar_label: "ZooKeeper" +learn_status: "Published" +learn_rel_path: "Data Collection/Service Discovery / Registry" +most_popular: False +message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE" +endmeta--> + +# ZooKeeper + + +<img src="https://netdata.cloud/img/zookeeper.svg" width="150"/> + + +Plugin: go.d.plugin +Module: zookeeper + +<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> + +## Overview + + + +It connects to the Zookeeper instance via a TCP and executes the following commands: + +- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands). + + +This collector is supported on all platforms. + +This collector supports collecting metrics from multiple instances of this integration, including remote instances. + + +### Default Behavior + +#### Auto-Detection + +By default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets: + +- 127.0.0.1:2181 +- 127.0.0.1:2182 + + +#### Limits + +The default configuration for this integration does not impose any limits on data collection. + +#### Performance Impact + +The default configuration for this integration is not expected to impose a significant performance impact on the system. + + +## Metrics + +Metrics grouped by *scope*. + +The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. + + + +### Per ZooKeeper instance + +These metrics refer to the entire monitored application. + +This scope has no labels. + +Metrics: + +| Metric | Dimensions | Unit | +|:------|:----------|:----| +| zookeeper.requests | outstanding | requests | +| zookeeper.requests_latency | min, avg, max | ms | +| zookeeper.connections | alive | connections | +| zookeeper.packets | received, sent | pps | +| zookeeper.file_descriptor | open | file descriptors | +| zookeeper.nodes | znode, ephemerals | nodes | +| zookeeper.watches | watches | watches | +| zookeeper.approximate_data_size | size | KiB | +| zookeeper.server_state | state | state | + + + +## Alerts + +There are no alerts configured by default for this integration. + + +## Setup + +### Prerequisites + +#### Whitelist `mntr` command + +Add `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw). + + + +### Configuration + +#### File + +The configuration file name for this integration is `go.d/zookeeper.conf`. + + +You can edit the configuration file using the `edit-config` script from the +Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). + +```bash +cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata +sudo ./edit-config go.d/zookeeper.conf +``` +#### Options + +The following options can be defined globally: update_every, autodetection_retry. + + +<details><summary>Config options</summary> + +| Name | Description | Default | Required | +|:----|:-----------|:-------|:--------:| +| update_every | Data collection frequency. | 1 | no | +| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | +| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes | +| timeout | Connection/read/write/ssl handshake timeout. | 1 | no | +| use_tls | Whether to use TLS or not. | no | no | +| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | +| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | +| tls_cert | Client TLS certificate. | | no | +| tls_key | Client TLS key. | | no | + +</details> + +#### Examples + +##### Basic + +Local server. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:2181 + +``` +</details> + +##### TLS with self-signed certificate + +Zookeeper with TLS and self-signed certificate. + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:2181 + use_tls: yes + tls_skip_verify: yes + +``` +</details> + +##### Multi-instance + +> **Note**: When you define multiple jobs, their names must be unique. + +Collecting metrics from local and remote instances. + + +<details><summary>Config</summary> + +```yaml +jobs: + - name: local + address: 127.0.0.1:2181 + + - name: remote + address: 192.0.2.1:2181 + +``` +</details> + + + +## Troubleshooting + +### Debug Mode + +To troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output +should give you clues as to why the collector isn't working. + +- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on + your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. + + ```bash + cd /usr/libexec/netdata/plugins.d/ + ``` + +- Switch to the `netdata` user. + + ```bash + sudo -u netdata -s + ``` + +- Run the `go.d.plugin` to debug the collector: + + ```bash + ./go.d.plugin -d -m zookeeper + ``` + + diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/metadata.yaml b/src/go/collectors/go.d.plugin/modules/zookeeper/metadata.yaml new file mode 100644 index 00000000000000..527a55fb4872a2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/metadata.yaml @@ -0,0 +1,202 @@ +plugin_name: go.d.plugin +modules: + - meta: + id: collector-go.d.plugin-zookeeper + plugin_name: go.d.plugin + module_name: zookeeper + monitored_instance: + name: ZooKeeper + link: https://zookeeper.apache.org/ + categories: + - data-collection.service-discovery-registry + icon_filename: zookeeper.svg + keywords: + - zookeeper + most_popular: false + info_provided_to_referring_integrations: + description: "" + related_resources: + integrations: + list: + - plugin_name: apps.plugin + module_name: apps + overview: + data_collection: + metrics_description: "" + method_description: | + It connects to the Zookeeper instance via a TCP and executes the following commands: + + - [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands). + default_behavior: + auto_detection: + description: | + By default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets: + + - 127.0.0.1:2181 + - 127.0.0.1:2182 + limits: + description: "" + performance_impact: + description: "" + additional_permissions: + description: "" + multi_instance: true + supported_platforms: + include: [] + exclude: [] + setup: + prerequisites: + list: + - title: Whitelist `mntr` command + description: | + Add `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw). + configuration: + file: + name: "go.d/zookeeper.conf" + options: + description: | + The following options can be defined globally: update_every, autodetection_retry. + folding: + title: Config options + enabled: true + list: + - name: update_every + description: Data collection frequency. + default_value: 1 + required: false + - name: autodetection_retry + description: Recheck interval in seconds. Zero means no recheck will be scheduled. + default_value: 0 + required: false + - name: address + description: Server address. The format is IP:PORT. + default_value: 127.0.0.1:2181 + required: true + - name: timeout + description: Connection/read/write/ssl handshake timeout. + default_value: 1 + required: false + - name: use_tls + description: Whether to use TLS or not. + default_value: false + required: false + - name: tls_skip_verify + description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. + default_value: false + required: false + - name: tls_ca + description: Certification authority that the client uses when verifying the server's certificates. + default_value: "" + required: false + - name: tls_cert + description: Client TLS certificate. + default_value: "" + required: false + - name: tls_key + description: Client TLS key. + default_value: "" + required: false + examples: + folding: + title: Config + enabled: true + list: + - name: Basic + description: Local server. + config: | + jobs: + - name: local + address: 127.0.0.1:2181 + - name: TLS with self-signed certificate + description: Zookeeper with TLS and self-signed certificate. + config: | + jobs: + - name: local + address: 127.0.0.1:2181 + use_tls: yes + tls_skip_verify: yes + - name: Multi-instance + description: | + > **Note**: When you define multiple jobs, their names must be unique. + + Collecting metrics from local and remote instances. + config: | + jobs: + - name: local + address: 127.0.0.1:2181 + + - name: remote + address: 192.0.2.1:2181 + troubleshooting: + problems: + list: [] + alerts: [] + metrics: + folding: + title: Metrics + enabled: false + description: "" + availability: [] + scopes: + - name: global + description: These metrics refer to the entire monitored application. + labels: [] + metrics: + - name: zookeeper.requests + description: Outstanding Requests + unit: requests + chart_type: line + dimensions: + - name: outstanding + - name: zookeeper.requests_latency + description: Requests Latency + unit: ms + chart_type: line + dimensions: + - name: min + - name: avg + - name: max + - name: zookeeper.connections + description: Alive Connections + unit: connections + chart_type: line + dimensions: + - name: alive + - name: zookeeper.packets + description: Packets + unit: pps + chart_type: line + dimensions: + - name: received + - name: sent + - name: zookeeper.file_descriptor + description: Open File Descriptors + unit: file descriptors + chart_type: line + dimensions: + - name: open + - name: zookeeper.nodes + description: Number of Nodes + unit: nodes + chart_type: line + dimensions: + - name: znode + - name: ephemerals + - name: zookeeper.watches + description: Number of Watches + unit: watches + chart_type: line + dimensions: + - name: watches + - name: zookeeper.approximate_data_size + description: Approximate Data Tree Size + unit: KiB + chart_type: line + dimensions: + - name: size + - name: zookeeper.server_state + description: Server State + unit: state + chart_type: line + dimensions: + - name: state diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr.txt b/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr.txt new file mode 100644 index 00000000000000..8e10c287d5a6a9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr.txt @@ -0,0 +1,416 @@ +zk_version 3.6.1--104dcb3e3fb464b30c5186d229e00af9f332524b, built on 04/21/2020 15:01 GMT +zk_server_state standalone +zk_ephemerals_count 0 +zk_min_latency 0.1 +zk_avg_latency 0.1 +zk_num_alive_connections 1 +zk_max_file_descriptor_count 1048576 +zk_outstanding_requests 0 +zk_approximate_data_size 44 +zk_znode_count 5 +zk_open_file_descriptor_count 63 +zk_global_sessions 0 +zk_local_sessions 0 +zk_uptime 27595191 +zk_last_client_response_size -1 +zk_max_latency 0.1 +zk_packets_sent 182 +zk_outstanding_tls_handshake 0 +zk_packets_received 92 +zk_max_client_response_size -1 +zk_connection_drop_probability 0.0 +zk_watch_count 0 +zk_min_client_response_size -1 +zk_proposal_count 0 +zk_outstanding_changes_removed 0 +zk_stale_requests_dropped 0 +zk_large_requests_rejected 0 +zk_connection_rejected 0 +zk_sessionless_connections_expired 0 +zk_looking_count 0 +zk_dead_watchers_queued 0 +zk_stale_requests 0 +zk_connection_drop_count 0 +zk_learner_proposal_received_count 0 +zk_digest_mismatches_count 0 +zk_dead_watchers_cleared 0 +zk_response_packet_cache_hits 0 +zk_bytes_received_count 368 +zk_add_dead_watcher_stall_time 0 +zk_request_throttle_wait_count 0 +zk_response_packet_cache_misses 0 +zk_ensemble_auth_success 0 +zk_prep_processor_request_queued 0 +zk_learner_commit_received_count 0 +zk_stale_replies 0 +zk_connection_request_count 0 +zk_ensemble_auth_fail 0 +zk_diff_count 0 +zk_response_packet_get_children_cache_misses 0 +zk_connection_revalidate_count 0 +zk_quit_leading_due_to_disloyal_voter 0 +zk_snap_count 0 +zk_unrecoverable_error_count 0 +zk_commit_count 0 +zk_stale_sessions_expired 0 +zk_response_packet_get_children_cache_hits 0 +zk_sync_processor_request_queued 0 +zk_outstanding_changes_queued 0 +zk_request_commit_queued 0 +zk_ensemble_auth_skip 0 +zk_tls_handshake_exceeded 0 +zk_revalidate_count 0 +zk_avg_node_created_watch_count 0.0 +zk_min_node_created_watch_count 0 +zk_max_node_created_watch_count 0 +zk_cnt_node_created_watch_count 0 +zk_sum_node_created_watch_count 0 +zk_avg_session_queues_drained 0.0 +zk_min_session_queues_drained 0 +zk_max_session_queues_drained 0 +zk_cnt_session_queues_drained 0 +zk_sum_session_queues_drained 0 +zk_avg_write_commit_proc_req_queued 0.0 +zk_min_write_commit_proc_req_queued 0 +zk_max_write_commit_proc_req_queued 0 +zk_cnt_write_commit_proc_req_queued 0 +zk_sum_write_commit_proc_req_queued 0 +zk_avg_connection_token_deficit 0.0 +zk_min_connection_token_deficit 0 +zk_max_connection_token_deficit 0 +zk_cnt_connection_token_deficit 0 +zk_sum_connection_token_deficit 0 +zk_avg_read_commit_proc_req_queued 0.0 +zk_min_read_commit_proc_req_queued 0 +zk_max_read_commit_proc_req_queued 0 +zk_cnt_read_commit_proc_req_queued 0 +zk_sum_read_commit_proc_req_queued 0 +zk_avg_node_deleted_watch_count 0.0 +zk_min_node_deleted_watch_count 0 +zk_max_node_deleted_watch_count 0 +zk_cnt_node_deleted_watch_count 0 +zk_sum_node_deleted_watch_count 0 +zk_avg_startup_txns_load_time 0.0 +zk_min_startup_txns_load_time 0 +zk_max_startup_txns_load_time 0 +zk_cnt_startup_txns_load_time 0 +zk_sum_startup_txns_load_time 0 +zk_avg_sync_processor_queue_size 0.0 +zk_min_sync_processor_queue_size 0 +zk_max_sync_processor_queue_size 0 +zk_cnt_sync_processor_queue_size 1 +zk_sum_sync_processor_queue_size 0 +zk_avg_follower_sync_time 0.0 +zk_min_follower_sync_time 0 +zk_max_follower_sync_time 0 +zk_cnt_follower_sync_time 0 +zk_sum_follower_sync_time 0 +zk_avg_prep_processor_queue_size 0.0 +zk_min_prep_processor_queue_size 0 +zk_max_prep_processor_queue_size 0 +zk_cnt_prep_processor_queue_size 1 +zk_sum_prep_processor_queue_size 0 +zk_avg_fsynctime 0.0 +zk_min_fsynctime 0 +zk_max_fsynctime 0 +zk_cnt_fsynctime 0 +zk_sum_fsynctime 0 +zk_avg_reads_issued_from_session_queue 0.0 +zk_min_reads_issued_from_session_queue 0 +zk_max_reads_issued_from_session_queue 0 +zk_cnt_reads_issued_from_session_queue 0 +zk_sum_reads_issued_from_session_queue 0 +zk_avg_snapshottime 0.0 +zk_min_snapshottime 0 +zk_max_snapshottime 0 +zk_cnt_snapshottime 1 +zk_sum_snapshottime 0 +zk_avg_startup_txns_loaded 0.0 +zk_min_startup_txns_loaded 0 +zk_max_startup_txns_loaded 0 +zk_cnt_startup_txns_loaded 0 +zk_sum_startup_txns_loaded 0 +zk_avg_reads_after_write_in_session_queue 0.0 +zk_min_reads_after_write_in_session_queue 0 +zk_max_reads_after_write_in_session_queue 0 +zk_cnt_reads_after_write_in_session_queue 0 +zk_sum_reads_after_write_in_session_queue 0 +zk_avg_requests_in_session_queue 0.0 +zk_min_requests_in_session_queue 0 +zk_max_requests_in_session_queue 0 +zk_cnt_requests_in_session_queue 0 +zk_sum_requests_in_session_queue 0 +zk_avg_write_commit_proc_issued 0.0 +zk_min_write_commit_proc_issued 0 +zk_max_write_commit_proc_issued 0 +zk_cnt_write_commit_proc_issued 0 +zk_sum_write_commit_proc_issued 0 +zk_avg_prep_process_time 0.0 +zk_min_prep_process_time 0 +zk_max_prep_process_time 0 +zk_cnt_prep_process_time 0 +zk_sum_prep_process_time 0 +zk_avg_pending_session_queue_size 0.0 +zk_min_pending_session_queue_size 0 +zk_max_pending_session_queue_size 0 +zk_cnt_pending_session_queue_size 0 +zk_sum_pending_session_queue_size 0 +zk_avg_time_waiting_empty_pool_in_commit_processor_read_ms 0.0 +zk_min_time_waiting_empty_pool_in_commit_processor_read_ms 0 +zk_max_time_waiting_empty_pool_in_commit_processor_read_ms 0 +zk_cnt_time_waiting_empty_pool_in_commit_processor_read_ms 0 +zk_sum_time_waiting_empty_pool_in_commit_processor_read_ms 0 +zk_avg_commit_process_time 0.0 +zk_min_commit_process_time 0 +zk_max_commit_process_time 0 +zk_cnt_commit_process_time 0 +zk_sum_commit_process_time 0 +zk_avg_dbinittime 6.0 +zk_min_dbinittime 6 +zk_max_dbinittime 6 +zk_cnt_dbinittime 1 +zk_sum_dbinittime 6 +zk_avg_netty_queued_buffer_capacity 0.0 +zk_min_netty_queued_buffer_capacity 0 +zk_max_netty_queued_buffer_capacity 0 +zk_cnt_netty_queued_buffer_capacity 0 +zk_sum_netty_queued_buffer_capacity 0 +zk_avg_election_time 0.0 +zk_min_election_time 0 +zk_max_election_time 0 +zk_cnt_election_time 0 +zk_sum_election_time 0 +zk_avg_commit_commit_proc_req_queued 0.0 +zk_min_commit_commit_proc_req_queued 0 +zk_max_commit_commit_proc_req_queued 0 +zk_cnt_commit_commit_proc_req_queued 0 +zk_sum_commit_commit_proc_req_queued 0 +zk_avg_sync_processor_batch_size 0.0 +zk_min_sync_processor_batch_size 0 +zk_max_sync_processor_batch_size 0 +zk_cnt_sync_processor_batch_size 0 +zk_sum_sync_processor_batch_size 0 +zk_avg_node_children_watch_count 0.0 +zk_min_node_children_watch_count 0 +zk_max_node_children_watch_count 0 +zk_cnt_node_children_watch_count 0 +zk_sum_node_children_watch_count 0 +zk_avg_write_batch_time_in_commit_processor 0.0 +zk_min_write_batch_time_in_commit_processor 0 +zk_max_write_batch_time_in_commit_processor 0 +zk_cnt_write_batch_time_in_commit_processor 0 +zk_sum_write_batch_time_in_commit_processor 0 +zk_avg_read_commit_proc_issued 0.0 +zk_min_read_commit_proc_issued 0 +zk_max_read_commit_proc_issued 0 +zk_cnt_read_commit_proc_issued 0 +zk_sum_read_commit_proc_issued 0 +zk_avg_concurrent_request_processing_in_commit_processor 0.0 +zk_min_concurrent_request_processing_in_commit_processor 0 +zk_max_concurrent_request_processing_in_commit_processor 0 +zk_cnt_concurrent_request_processing_in_commit_processor 0 +zk_sum_concurrent_request_processing_in_commit_processor 0 +zk_avg_node_changed_watch_count 0.0 +zk_min_node_changed_watch_count 0 +zk_max_node_changed_watch_count 0 +zk_cnt_node_changed_watch_count 0 +zk_sum_node_changed_watch_count 0 +zk_avg_sync_process_time 0.0 +zk_min_sync_process_time 0 +zk_max_sync_process_time 0 +zk_cnt_sync_process_time 0 +zk_sum_sync_process_time 0 +zk_avg_startup_snap_load_time 5.0 +zk_min_startup_snap_load_time 5 +zk_max_startup_snap_load_time 5 +zk_cnt_startup_snap_load_time 1 +zk_sum_startup_snap_load_time 5 +zk_avg_prep_processor_queue_time_ms 0.0 +zk_min_prep_processor_queue_time_ms 0 +zk_max_prep_processor_queue_time_ms 0 +zk_cnt_prep_processor_queue_time_ms 0 +zk_sum_prep_processor_queue_time_ms 0 +zk_p50_prep_processor_queue_time_ms 0 +zk_p95_prep_processor_queue_time_ms 0 +zk_p99_prep_processor_queue_time_ms 0 +zk_p999_prep_processor_queue_time_ms 0 +zk_avg_close_session_prep_time 0.0 +zk_min_close_session_prep_time 0 +zk_max_close_session_prep_time 0 +zk_cnt_close_session_prep_time 0 +zk_sum_close_session_prep_time 0 +zk_p50_close_session_prep_time 0 +zk_p95_close_session_prep_time 0 +zk_p99_close_session_prep_time 0 +zk_p999_close_session_prep_time 0 +zk_avg_read_commitproc_time_ms 0.0 +zk_min_read_commitproc_time_ms 0 +zk_max_read_commitproc_time_ms 0 +zk_cnt_read_commitproc_time_ms 0 +zk_sum_read_commitproc_time_ms 0 +zk_p50_read_commitproc_time_ms 0 +zk_p95_read_commitproc_time_ms 0 +zk_p99_read_commitproc_time_ms 0 +zk_p999_read_commitproc_time_ms 0 +zk_avg_updatelatency 0.0 +zk_min_updatelatency 0 +zk_max_updatelatency 0 +zk_cnt_updatelatency 0 +zk_sum_updatelatency 0 +zk_p50_updatelatency 0 +zk_p95_updatelatency 0 +zk_p99_updatelatency 0 +zk_p999_updatelatency 0 +zk_avg_local_write_committed_time_ms 0.0 +zk_min_local_write_committed_time_ms 0 +zk_max_local_write_committed_time_ms 0 +zk_cnt_local_write_committed_time_ms 0 +zk_sum_local_write_committed_time_ms 0 +zk_p50_local_write_committed_time_ms 0 +zk_p95_local_write_committed_time_ms 0 +zk_p99_local_write_committed_time_ms 0 +zk_p999_local_write_committed_time_ms 0 +zk_avg_readlatency 0.0 +zk_min_readlatency 0 +zk_max_readlatency 0 +zk_cnt_readlatency 0 +zk_sum_readlatency 0 +zk_p50_readlatency 0 +zk_p95_readlatency 0 +zk_p99_readlatency 0 +zk_p999_readlatency 0 +zk_avg_quorum_ack_latency 0.0 +zk_min_quorum_ack_latency 0 +zk_max_quorum_ack_latency 0 +zk_cnt_quorum_ack_latency 0 +zk_sum_quorum_ack_latency 0 +zk_p50_quorum_ack_latency 0 +zk_p95_quorum_ack_latency 0 +zk_p99_quorum_ack_latency 0 +zk_p999_quorum_ack_latency 0 +zk_avg_om_commit_process_time_ms 0.0 +zk_min_om_commit_process_time_ms 0 +zk_max_om_commit_process_time_ms 0 +zk_cnt_om_commit_process_time_ms 0 +zk_sum_om_commit_process_time_ms 0 +zk_p50_om_commit_process_time_ms 0 +zk_p95_om_commit_process_time_ms 0 +zk_p99_om_commit_process_time_ms 0 +zk_p999_om_commit_process_time_ms 0 +zk_avg_read_final_proc_time_ms 0.0 +zk_min_read_final_proc_time_ms 0 +zk_max_read_final_proc_time_ms 0 +zk_cnt_read_final_proc_time_ms 0 +zk_sum_read_final_proc_time_ms 0 +zk_p50_read_final_proc_time_ms 0 +zk_p95_read_final_proc_time_ms 0 +zk_p99_read_final_proc_time_ms 0 +zk_p999_read_final_proc_time_ms 0 +zk_avg_commit_propagation_latency 0.0 +zk_min_commit_propagation_latency 0 +zk_max_commit_propagation_latency 0 +zk_cnt_commit_propagation_latency 0 +zk_sum_commit_propagation_latency 0 +zk_p50_commit_propagation_latency 0 +zk_p95_commit_propagation_latency 0 +zk_p99_commit_propagation_latency 0 +zk_p999_commit_propagation_latency 0 +zk_avg_dead_watchers_cleaner_latency 0.0 +zk_min_dead_watchers_cleaner_latency 0 +zk_max_dead_watchers_cleaner_latency 0 +zk_cnt_dead_watchers_cleaner_latency 0 +zk_sum_dead_watchers_cleaner_latency 0 +zk_p50_dead_watchers_cleaner_latency 0 +zk_p95_dead_watchers_cleaner_latency 0 +zk_p99_dead_watchers_cleaner_latency 0 +zk_p999_dead_watchers_cleaner_latency 0 +zk_avg_write_final_proc_time_ms 0.0 +zk_min_write_final_proc_time_ms 0 +zk_max_write_final_proc_time_ms 0 +zk_cnt_write_final_proc_time_ms 0 +zk_sum_write_final_proc_time_ms 0 +zk_p50_write_final_proc_time_ms 0 +zk_p95_write_final_proc_time_ms 0 +zk_p99_write_final_proc_time_ms 0 +zk_p999_write_final_proc_time_ms 0 +zk_avg_proposal_ack_creation_latency 0.0 +zk_min_proposal_ack_creation_latency 0 +zk_max_proposal_ack_creation_latency 0 +zk_cnt_proposal_ack_creation_latency 0 +zk_sum_proposal_ack_creation_latency 0 +zk_p50_proposal_ack_creation_latency 0 +zk_p95_proposal_ack_creation_latency 0 +zk_p99_proposal_ack_creation_latency 0 +zk_p999_proposal_ack_creation_latency 0 +zk_avg_proposal_latency 0.0 +zk_min_proposal_latency 0 +zk_max_proposal_latency 0 +zk_cnt_proposal_latency 0 +zk_sum_proposal_latency 0 +zk_p50_proposal_latency 0 +zk_p95_proposal_latency 0 +zk_p99_proposal_latency 0 +zk_p999_proposal_latency 0 +zk_avg_om_proposal_process_time_ms 0.0 +zk_min_om_proposal_process_time_ms 0 +zk_max_om_proposal_process_time_ms 0 +zk_cnt_om_proposal_process_time_ms 0 +zk_sum_om_proposal_process_time_ms 0 +zk_p50_om_proposal_process_time_ms 0 +zk_p95_om_proposal_process_time_ms 0 +zk_p99_om_proposal_process_time_ms 0 +zk_p999_om_proposal_process_time_ms 0 +zk_avg_sync_processor_queue_and_flush_time_ms 0.0 +zk_min_sync_processor_queue_and_flush_time_ms 0 +zk_max_sync_processor_queue_and_flush_time_ms 0 +zk_cnt_sync_processor_queue_and_flush_time_ms 0 +zk_sum_sync_processor_queue_and_flush_time_ms 0 +zk_p50_sync_processor_queue_and_flush_time_ms 0 +zk_p95_sync_processor_queue_and_flush_time_ms 0 +zk_p99_sync_processor_queue_and_flush_time_ms 0 +zk_p999_sync_processor_queue_and_flush_time_ms 0 +zk_avg_propagation_latency 0.0 +zk_min_propagation_latency 0 +zk_max_propagation_latency 0 +zk_cnt_propagation_latency 0 +zk_sum_propagation_latency 0 +zk_p50_propagation_latency 0 +zk_p95_propagation_latency 0 +zk_p99_propagation_latency 0 +zk_p999_propagation_latency 0 +zk_avg_server_write_committed_time_ms 0.0 +zk_min_server_write_committed_time_ms 0 +zk_max_server_write_committed_time_ms 0 +zk_cnt_server_write_committed_time_ms 0 +zk_sum_server_write_committed_time_ms 0 +zk_p50_server_write_committed_time_ms 0 +zk_p95_server_write_committed_time_ms 0 +zk_p99_server_write_committed_time_ms 0 +zk_p999_server_write_committed_time_ms 0 +zk_avg_sync_processor_queue_time_ms 0.0 +zk_min_sync_processor_queue_time_ms 0 +zk_max_sync_processor_queue_time_ms 0 +zk_cnt_sync_processor_queue_time_ms 0 +zk_sum_sync_processor_queue_time_ms 0 +zk_p50_sync_processor_queue_time_ms 0 +zk_p95_sync_processor_queue_time_ms 0 +zk_p99_sync_processor_queue_time_ms 0 +zk_p999_sync_processor_queue_time_ms 0 +zk_avg_sync_processor_queue_flush_time_ms 0.0 +zk_min_sync_processor_queue_flush_time_ms 0 +zk_max_sync_processor_queue_flush_time_ms 0 +zk_cnt_sync_processor_queue_flush_time_ms 0 +zk_sum_sync_processor_queue_flush_time_ms 0 +zk_p50_sync_processor_queue_flush_time_ms 0 +zk_p95_sync_processor_queue_flush_time_ms 0 +zk_p99_sync_processor_queue_flush_time_ms 0 +zk_p999_sync_processor_queue_flush_time_ms 0 +zk_avg_write_commitproc_time_ms 0.0 +zk_min_write_commitproc_time_ms 0 +zk_max_write_commitproc_time_ms 0 +zk_cnt_write_commitproc_time_ms 0 +zk_sum_write_commitproc_time_ms 0 +zk_p50_write_commitproc_time_ms 0 +zk_p95_write_commitproc_time_ms 0 +zk_p99_write_commitproc_time_ms 0 +zk_p999_write_commitproc_time_ms 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr_notinwhitelist.txt b/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr_notinwhitelist.txt new file mode 100644 index 00000000000000..1fd1983b7c6fb8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/testdata/mntr_notinwhitelist.txt @@ -0,0 +1 @@ +mntr is not executed because it is not in the whitelist. \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper.go b/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper.go new file mode 100644 index 00000000000000..29ab1f858856d2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "crypto/tls" + _ "embed" + "fmt" + "time" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/agent/module" +) + +//go:embed "config_schema.json" +var configSchema string + +func init() { + module.Register("zookeeper", module.Creator{ + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, + }) +} + +// Config is the Zookeeper module configuration. +type Config struct { + Address string + Timeout web.Duration `yaml:"timeout"` + UseTLS bool `yaml:"use_tls"` + tlscfg.TLSConfig `yaml:",inline"` +} + +// New creates Zookeeper with default values. +func New() *Zookeeper { + config := Config{ + Address: "127.0.0.1:2181", + Timeout: web.Duration{Duration: time.Second}, + UseTLS: false, + } + return &Zookeeper{Config: config} +} + +type fetcher interface { + fetch(command string) ([]string, error) +} + +// Zookeeper Zookeeper module. +type Zookeeper struct { + module.Base + fetcher + Config `yaml:",inline"` +} + +// Cleanup makes cleanup. +func (Zookeeper) Cleanup() {} + +func (z *Zookeeper) createZookeeperFetcher() (err error) { + var tlsConf *tls.Config + if z.UseTLS { + tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig) + if err != nil { + return fmt.Errorf("error on creating tls config : %v", err) + } + } + + sock := socket.New(socket.Config{ + Address: z.Address, + ConnectTimeout: z.Timeout.Duration, + ReadTimeout: z.Timeout.Duration, + WriteTimeout: z.Timeout.Duration, + TLSConf: tlsConf, + }) + z.fetcher = &zookeeperFetcher{Client: sock} + return nil +} + +// Init makes initialization. +func (z *Zookeeper) Init() bool { + err := z.createZookeeperFetcher() + if err != nil { + z.Error(err) + return false + } + + return true +} + +// Check makes check. +func (z *Zookeeper) Check() bool { + return len(z.Collect()) > 0 +} + +// Charts creates Charts. +func (Zookeeper) Charts() *Charts { + return charts.Copy() +} + +// Collect collects metrics. +func (z *Zookeeper) Collect() map[string]int64 { + mx, err := z.collect() + if err != nil { + z.Error(err) + } + + if len(mx) == 0 { + return nil + } + return mx +} diff --git a/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper_test.go b/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper_test.go new file mode 100644 index 00000000000000..13f3632c295e3d --- /dev/null +++ b/src/go/collectors/go.d.plugin/modules/zookeeper/zookeeper_test.go @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "bufio" + "bytes" + "errors" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testMntrData, _ = os.ReadFile("testdata/mntr.txt") + testMntrNotInWhiteListData, _ = os.ReadFile("testdata/mntr_notinwhitelist.txt") +) + +func Test_testDataLoad(t *testing.T) { + assert.NotNil(t, testMntrData) + assert.NotNil(t, testMntrNotInWhiteListData) +} + +func TestNew(t *testing.T) { + job := New() + + assert.IsType(t, (*Zookeeper)(nil), job) +} + +func TestZookeeper_Init(t *testing.T) { + job := New() + + assert.True(t, job.Init()) + assert.NotNil(t, job.fetcher) +} + +func TestZookeeper_InitErrorOnCreatingTLSConfig(t *testing.T) { + job := New() + job.UseTLS = true + job.TLSConfig.TLSCA = "testdata/tls" + + assert.False(t, job.Init()) +} + +func TestZookeeper_Check(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{data: testMntrData} + + assert.True(t, job.Check()) +} + +func TestZookeeper_CheckErrorOnFetch(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{err: true} + + assert.False(t, job.Check()) +} + +func TestZookeeper_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} + +func TestZookeeper_Cleanup(t *testing.T) { + New().Cleanup() +} + +func TestZookeeper_Collect(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{data: testMntrData} + + expected := map[string]int64{ + "approximate_data_size": 44, + "avg_latency": 100, + "ephemerals_count": 0, + "max_file_descriptor_count": 1048576, + "max_latency": 100, + "min_latency": 100, + "num_alive_connections": 1, + "open_file_descriptor_count": 63, + "outstanding_requests": 0, + "packets_received": 92, + "packets_sent": 182, + "server_state": 4, + "watch_count": 0, + "znode_count": 5, + } + + collected := job.Collect() + + assert.Equal(t, expected, collected) + ensureCollectedHasAllChartsDimsVarsIDs(t, job, collected) +} + +func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{data: testMntrNotInWhiteListData} + + assert.Nil(t, job.Collect()) +} + +func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{} + + assert.Nil(t, job.Collect()) +} + +func TestZookeeper_CollectMntrInvalidData(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{data: []byte("hello \nand good buy\n")} + + assert.Nil(t, job.Collect()) +} + +func TestZookeeper_CollectMntrReceiveError(t *testing.T) { + job := New() + require.True(t, job.Init()) + job.fetcher = &mockZookeeperFetcher{err: true} + + assert.Nil(t, job.Collect()) +} + +func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, zk *Zookeeper, collected map[string]int64) { + for _, chart := range *zk.Charts() { + if chart.Obsolete { + continue + } + for _, dim := range chart.Dims { + _, ok := collected[dim.ID] + assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID) + } + for _, v := range chart.Vars { + _, ok := collected[v.ID] + assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID) + } + } +} + +type mockZookeeperFetcher struct { + data []byte + err bool +} + +func (m mockZookeeperFetcher) fetch(_ string) ([]string, error) { + if m.err { + return nil, errors.New("mock fetch error") + } + + var lines []string + s := bufio.NewScanner(bytes.NewReader(m.data)) + for s.Scan() { + if !isZKLine(s.Bytes()) || isMntrLineOK(s.Bytes()) { + lines = append(lines, s.Text()) + } + } + return lines, nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/README.md b/src/go/collectors/go.d.plugin/pkg/README.md new file mode 100644 index 00000000000000..1773f2d0c9323a --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/README.md @@ -0,0 +1,22 @@ +<!-- +title: "Helper Packages" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/README.md" +sidebar_label: "Helper Packages" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# Helper Packages + +- if you need IP ranges consider to + use [`iprange`](https://github.com/netdata/go.d.plugin/blob/master/pkg/iprange/README.md). +- if you parse an application log files, then [`log`](https://github.com/netdata/go.d.plugin/tree/master/pkg/logs) is + handy. +- if you need filtering + check [`matcher`](https://github.com/netdata/go.d.plugin/blob/master/pkg/matcher/README.md). +- if you collect metrics from an HTTP endpoint use [`web`](https://github.com/netdata/go.d.plugin/tree/master/pkg/web). +- if you collect metrics from a prometheus endpoint, + then [`prometheus`](https://github.com/netdata/go.d.plugin/tree/master/pkg/prometheus) + and [`web`](https://github.com/netdata/go.d.plugin/blob/master/pkg/web/README.md) is what you need. +- [`tlscfg`](https://github.com/netdata/go.d.plugin/blob/master/pkg/tlscfg/README.md) provides TLS support. +- [`stm`](https://github.com/netdata/go.d.plugin/blob/master/pkg/stm/README.md) helps you to convert any struct to a `map[string]int64`. diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/README.md b/src/go/collectors/go.d.plugin/pkg/iprange/README.md new file mode 100644 index 00000000000000..25edb8ad444813 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/README.md @@ -0,0 +1,37 @@ +<!-- +title: "iprange" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/iprange/README.md" +sidebar_label: "iprange" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# iprange + +This package helps you to work with IP ranges. + +IP range is a set of IP addresses. Both IPv4 and IPv6 are supported. + +IP range interface: + +``` +type Range interface { + Family() Family + Contains(ip net.IP) bool + Size() *big.Int + fmt.Stringer +} +``` + +## Supported formats + +- `IPv4 address` (192.0.2.1) +- `IPv4 range` (192.0.2.0-192.0.2.10) +- `IPv4 CIDR` (192.0.2.0/24) +- `IPv4 subnet mask` (192.0.2.0/255.255.255.0) +- `IPv6 address` (2001:db8::1) +- `IPv6 range` (2001:db8::-2001:db8::10) +- `IPv6 CIDR` (2001:db8::/64) + +IP range doesn't contain network and broadcast IP addresses if the format is `IPv4 CIDR`, `IPv4 subnet mask` +or `IPv6 CIDR`. diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/parse.go b/src/go/collectors/go.d.plugin/pkg/iprange/parse.go new file mode 100644 index 00000000000000..3471702a11bca3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/parse.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "bytes" + "fmt" + "net" + "regexp" + "strings" + + "github.com/apparentlymart/go-cidr/cidr" +) + +// ParseRanges parses s as a space separated list of IP Ranges, returning the result and an error if any. +// IP Range can be in IPv4 address ("192.0.2.1"), IPv4 range ("192.0.2.0-192.0.2.10") +// IPv4 CIDR ("192.0.2.0/24"), IPv4 subnet mask ("192.0.2.0/255.255.255.0"), +// IPv6 address ("2001:db8::1"), IPv6 range ("2001:db8::-2001:db8::10"), +// or IPv6 CIDR ("2001:db8::/64") form. +// IPv4 CIDR, IPv4 subnet mask and IPv6 CIDR ranges don't include network and broadcast addresses. +func ParseRanges(s string) ([]Range, error) { + parts := strings.Fields(s) + if len(parts) == 0 { + return nil, nil + } + + var ranges []Range + for _, v := range parts { + r, err := ParseRange(v) + if err != nil { + return nil, err + } + + if r != nil { + ranges = append(ranges, r) + } + } + return ranges, nil +} + +var ( + reRange = regexp.MustCompile("^[0-9a-f.:-]+$") // addr | addr-addr + reCIDR = regexp.MustCompile("^[0-9a-f.:]+/[0-9]{1,3}$") // addr/prefix_length + reSubnetMask = regexp.MustCompile("^[0-9.]+/[0-9.]{7,}$") // v4_addr/mask +) + +// ParseRange parses s as an IP Range, returning the result and an error if any. +// The string s can be in IPv4 address ("192.0.2.1"), IPv4 range ("192.0.2.0-192.0.2.10") +// IPv4 CIDR ("192.0.2.0/24"), IPv4 subnet mask ("192.0.2.0/255.255.255.0"), +// IPv6 address ("2001:db8::1"), IPv6 range ("2001:db8::-2001:db8::10"), +// or IPv6 CIDR ("2001:db8::/64") form. +// IPv4 CIDR, IPv4 subnet mask and IPv6 CIDR ranges don't include network and broadcast addresses. +func ParseRange(s string) (Range, error) { + s = strings.ToLower(s) + if s == "" { + return nil, nil + } + + var r Range + switch { + case reRange.MatchString(s): + r = parseRange(s) + case reCIDR.MatchString(s): + r = parseCIDR(s) + case reSubnetMask.MatchString(s): + r = parseSubnetMask(s) + } + + if r == nil { + return nil, fmt.Errorf("ip range (%s) invalid syntax", s) + } + return r, nil +} + +func parseRange(s string) Range { + var start, end net.IP + if idx := strings.IndexByte(s, '-'); idx != -1 { + start, end = net.ParseIP(s[:idx]), net.ParseIP(s[idx+1:]) + } else { + start, end = net.ParseIP(s), net.ParseIP(s) + } + + return New(start, end) +} + +func parseCIDR(s string) Range { + ip, network, err := net.ParseCIDR(s) + if err != nil { + return nil + } + + start, end := cidr.AddressRange(network) + prefixLen, _ := network.Mask.Size() + + if isV4IP(ip) && prefixLen < 31 || isV6IP(ip) && prefixLen < 127 { + start = cidr.Inc(start) + end = cidr.Dec(end) + } + + return parseRange(fmt.Sprintf("%s-%s", start, end)) +} + +func parseSubnetMask(s string) Range { + idx := strings.LastIndexByte(s, '/') + if idx == -1 { + return nil + } + + address, mask := s[:idx], s[idx+1:] + + ip := net.ParseIP(mask).To4() + if ip == nil { + return nil + } + + prefixLen, bits := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]).Size() + if prefixLen+bits == 0 { + return nil + } + + return parseCIDR(fmt.Sprintf("%s/%d", address, prefixLen)) +} + +func isV4RangeValid(start, end net.IP) bool { + return isV4IP(start) && isV4IP(end) && bytes.Compare(end, start) >= 0 +} + +func isV6RangeValid(start, end net.IP) bool { + return isV6IP(start) && isV6IP(end) && bytes.Compare(end, start) >= 0 +} + +func isV4IP(ip net.IP) bool { + return ip.To4() != nil +} + +func isV6IP(ip net.IP) bool { + return !isV4IP(ip) && ip.To16() != nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/parse_test.go b/src/go/collectors/go.d.plugin/pkg/iprange/parse_test.go new file mode 100644 index 00000000000000..8b4ab96b381954 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/parse_test.go @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "fmt" + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseRanges(t *testing.T) { + tests := map[string]struct { + input string + wantRanges []Range + wantErr bool + }{ + "single range": { + input: "192.0.2.0-192.0.2.10", + wantRanges: []Range{ + prepareRange("192.0.2.0", "192.0.2.10"), + }, + }, + "multiple ranges": { + input: "2001:db8::0 192.0.2.0-192.0.2.10 2001:db8::0/126 192.0.2.0/255.255.255.0", + wantRanges: []Range{ + prepareRange("2001:db8::0", "2001:db8::0"), + prepareRange("192.0.2.0", "192.0.2.10"), + prepareRange("2001:db8::1", "2001:db8::2"), + prepareRange("192.0.2.1", "192.0.2.254"), + }, + }, + "single invalid syntax": { + input: "192.0.2.0-192.0.2.", + wantErr: true, + }, + "multiple invalid syntax": { + input: "2001:db8::0 192.0.2.0-192.0.2.10 2001:db8::0/999 192.0.2.0/255.255.255.0", + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rs, err := ParseRanges(test.input) + + if test.wantErr { + assert.Error(t, err) + assert.Nilf(t, rs, "want: nil, got: %s", rs) + } else { + assert.NoError(t, err) + assert.Equalf(t, test.wantRanges, rs, "want: %s, got: %s", test.wantRanges, rs) + } + }) + } +} + +func TestParseRange(t *testing.T) { + tests := map[string]struct { + input string + wantRange Range + wantErr bool + }{ + "v4 IP": { + input: "192.0.2.0", + wantRange: prepareRange("192.0.2.0", "192.0.2.0"), + }, + "v4 IP: invalid address": { + input: "192.0.2.", + wantErr: true, + }, + "v4 Range": { + input: "192.0.2.0-192.0.2.10", + wantRange: prepareRange("192.0.2.0", "192.0.2.10"), + }, + "v4 Range: start == end": { + input: "192.0.2.0-192.0.2.0", + wantRange: prepareRange("192.0.2.0", "192.0.2.0"), + }, + "v4 Range: start > end": { + input: "192.0.2.10-192.0.2.0", + wantErr: true, + }, + "v4 Range: invalid start": { + input: "192.0.2.-192.0.2.10", + wantErr: true, + }, + "v4 Range: invalid end": { + input: "192.0.2.0-192.0.2.", + wantErr: true, + }, + "v4 Range: v6 start": { + input: "2001:db8::0-192.0.2.10", + wantErr: true, + }, + "v4 Range: v6 end": { + input: "192.0.2.0-2001:db8::0", + wantErr: true, + }, + "v4 CIDR: /0": { + input: "192.0.2.0/0", + wantRange: prepareRange("0.0.0.1", "255.255.255.254"), + }, + "v4 CIDR: /24": { + input: "192.0.2.0/24", + wantRange: prepareRange("192.0.2.1", "192.0.2.254"), + }, + "v4 CIDR: /30": { + input: "192.0.2.0/30", + wantRange: prepareRange("192.0.2.1", "192.0.2.2"), + }, + "v4 CIDR: /31": { + input: "192.0.2.0/31", + wantRange: prepareRange("192.0.2.0", "192.0.2.1"), + }, + "v4 CIDR: /32": { + input: "192.0.2.0/32", + wantRange: prepareRange("192.0.2.0", "192.0.2.0"), + }, + "v4 CIDR: ip instead of host address": { + input: "192.0.2.10/24", + wantRange: prepareRange("192.0.2.1", "192.0.2.254"), + }, + "v4 CIDR: missing prefix length": { + input: "192.0.2.0/", + wantErr: true, + }, + "v4 CIDR: invalid prefix length": { + input: "192.0.2.0/99", + wantErr: true, + }, + "v4 Mask: /0": { + input: "192.0.2.0/0.0.0.0", + wantRange: prepareRange("0.0.0.1", "255.255.255.254"), + }, + "v4 Mask: /24": { + input: "192.0.2.0/255.255.255.0", + wantRange: prepareRange("192.0.2.1", "192.0.2.254"), + }, + "v4 Mask: /30": { + input: "192.0.2.0/255.255.255.252", + wantRange: prepareRange("192.0.2.1", "192.0.2.2"), + }, + "v4 Mask: /31": { + input: "192.0.2.0/255.255.255.254", + wantRange: prepareRange("192.0.2.0", "192.0.2.1"), + }, + "v4 Mask: /32": { + input: "192.0.2.0/255.255.255.255", + wantRange: prepareRange("192.0.2.0", "192.0.2.0"), + }, + "v4 Mask: missing prefix mask": { + input: "192.0.2.0/", + wantErr: true, + }, + "v4 Mask: invalid mask": { + input: "192.0.2.0/mask", + wantErr: true, + }, + "v4 Mask: not canonical form mask": { + input: "192.0.2.0/255.255.0.254", + wantErr: true, + }, + "v4 Mask: v6 address": { + input: "2001:db8::/255.255.255.0", + wantErr: true, + }, + + "v6 IP": { + input: "2001:db8::0", + wantRange: prepareRange("2001:db8::0", "2001:db8::0"), + }, + "v6 IP: invalid address": { + input: "2001:db8", + wantErr: true, + }, + "v6 Range": { + input: "2001:db8::-2001:db8::10", + wantRange: prepareRange("2001:db8::", "2001:db8::10"), + }, + "v6 Range: start == end": { + input: "2001:db8::-2001:db8::", + wantRange: prepareRange("2001:db8::", "2001:db8::"), + }, + "v6 Range: start > end": { + input: "2001:db8::10-2001:db8::", + wantErr: true, + }, + "v6 Range: invalid start": { + input: "2001:db8-2001:db8::10", + wantErr: true, + }, + "v6 Range: invalid end": { + input: "2001:db8::-2001:db8", + wantErr: true, + }, + "v6 Range: v4 start": { + input: "192.0.2.0-2001:db8::10", + wantErr: true, + }, + "v6 Range: v4 end": { + input: "2001:db8::-192.0.2.10", + wantErr: true, + }, + "v6 CIDR: /0": { + input: "2001:db8::/0", + wantRange: prepareRange("::1", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"), + }, + "v6 CIDR: /64": { + input: "2001:db8::/64", + wantRange: prepareRange("2001:db8::1", "2001:db8::ffff:ffff:ffff:fffe"), + }, + "v6 CIDR: /126": { + input: "2001:db8::/126", + wantRange: prepareRange("2001:db8::1", "2001:db8::2"), + }, + "v6 CIDR: /127": { + input: "2001:db8::/127", + wantRange: prepareRange("2001:db8::", "2001:db8::1"), + }, + "v6 CIDR: /128": { + input: "2001:db8::/128", + wantRange: prepareRange("2001:db8::", "2001:db8::"), + }, + "v6 CIDR: ip instead of host address": { + input: "2001:db8::10/64", + wantRange: prepareRange("2001:db8::1", "2001:db8::ffff:ffff:ffff:fffe"), + }, + "v6 CIDR: missing prefix length": { + input: "2001:db8::/", + wantErr: true, + }, + "v6 CIDR: invalid prefix length": { + input: "2001:db8::/999", + wantErr: true, + }, + } + + for name, test := range tests { + name = fmt.Sprintf("%s (%s)", name, test.input) + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + + if test.wantErr { + assert.Error(t, err) + assert.Nilf(t, r, "want: nil, got: %s", r) + } else { + assert.NoError(t, err) + assert.Equalf(t, test.wantRange, r, "want: %s, got: %s", test.wantRange, r) + } + }) + } +} + +func prepareRange(start, end string) Range { + return New(net.ParseIP(start), net.ParseIP(end)) +} diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/pool.go b/src/go/collectors/go.d.plugin/pkg/iprange/pool.go new file mode 100644 index 00000000000000..48ba5689bf5c77 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/pool.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "math/big" + "net" + "strings" +) + +// Pool is a collection of IP Ranges. +type Pool []Range + +// String returns the string form of the pool. +func (p Pool) String() string { + var b strings.Builder + for _, r := range p { + b.WriteString(r.String() + " ") + } + return strings.TrimSpace(b.String()) +} + +// Size reports the number of IP addresses in the pool. +func (p Pool) Size() *big.Int { + size := big.NewInt(0) + for _, r := range p { + size.Add(size, r.Size()) + } + return size +} + +// Contains reports whether the pool includes IP. +func (p Pool) Contains(ip net.IP) bool { + for _, r := range p { + if r.Contains(ip) { + return true + } + } + return false +} diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/pool_test.go b/src/go/collectors/go.d.plugin/pkg/iprange/pool_test.go new file mode 100644 index 00000000000000..2864b67116c4dd --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/pool_test.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "fmt" + "math/big" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPool_String(t *testing.T) { + tests := map[string]struct { + input string + wantString string + }{ + "singe": { + input: "192.0.2.0-192.0.2.10", + wantString: "192.0.2.0-192.0.2.10", + }, + "multiple": { + input: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10", + wantString: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rs, err := ParseRanges(test.input) + require.NoError(t, err) + p := Pool(rs) + + assert.Equal(t, test.wantString, p.String()) + }) + } +} + +func TestPool_Size(t *testing.T) { + tests := map[string]struct { + input string + wantSize *big.Int + }{ + "singe": { + input: "192.0.2.0-192.0.2.10", + wantSize: big.NewInt(11), + }, + "multiple": { + input: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10", + wantSize: big.NewInt(11 + 17), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rs, err := ParseRanges(test.input) + require.NoError(t, err) + p := Pool(rs) + + assert.Equal(t, test.wantSize, p.Size()) + }) + } +} + +func TestPool_Contains(t *testing.T) { + tests := map[string]struct { + input string + ip string + wantFail bool + }{ + "inside first": { + input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10", + ip: "192.0.2.5", + }, + "inside last": { + input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10", + ip: "2001:db8::5", + }, + "outside": { + input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10", + ip: "192.0.2.100", + wantFail: true, + }, + } + + for name, test := range tests { + name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip) + t.Run(name, func(t *testing.T) { + rs, err := ParseRanges(test.input) + require.NoError(t, err) + ip := net.ParseIP(test.ip) + require.NotNil(t, ip) + p := Pool(rs) + + if test.wantFail { + assert.False(t, p.Contains(ip)) + } else { + assert.True(t, p.Contains(ip)) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/range.go b/src/go/collectors/go.d.plugin/pkg/iprange/range.go new file mode 100644 index 00000000000000..1fe02eace2217f --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/range.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "bytes" + "fmt" + "math/big" + "net" +) + +// Family represents IP Range address-family. +type Family uint8 + +const ( + // V4Family is IPv4 address-family. + V4Family Family = iota + // V6Family is IPv6 address-family. + V6Family +) + +// Range represents an IP range. +type Range interface { + Family() Family + Contains(ip net.IP) bool + Size() *big.Int + fmt.Stringer +} + +// New returns new IP Range. +// If it is not a valid range (start and end IPs have different address-families, or start > end), +// New returns nil. +func New(start, end net.IP) Range { + if isV4RangeValid(start, end) { + return v4Range{start: start, end: end} + } + if isV6RangeValid(start, end) { + return v6Range{start: start, end: end} + } + return nil +} + +type v4Range struct { + start net.IP + end net.IP +} + +// String returns the string form of the range. +func (r v4Range) String() string { + return fmt.Sprintf("%s-%s", r.start, r.end) +} + +// Family returns the range address family. +func (r v4Range) Family() Family { + return V4Family +} + +// Contains reports whether the range includes IP. +func (r v4Range) Contains(ip net.IP) bool { + return bytes.Compare(ip, r.start) >= 0 && bytes.Compare(ip, r.end) <= 0 +} + +// Size reports the number of IP addresses in the range. +func (r v4Range) Size() *big.Int { + return big.NewInt(v4ToInt(r.end) - v4ToInt(r.start) + 1) +} + +type v6Range struct { + start net.IP + end net.IP +} + +// String returns the string form of the range. +func (r v6Range) String() string { + return fmt.Sprintf("%s-%s", r.start, r.end) +} + +// Family returns the range address family. +func (r v6Range) Family() Family { + return V6Family +} + +// Contains reports whether the range includes IP. +func (r v6Range) Contains(ip net.IP) bool { + return bytes.Compare(ip, r.start) >= 0 && bytes.Compare(ip, r.end) <= 0 +} + +// Size reports the number of IP addresses in the range. +func (r v6Range) Size() *big.Int { + size := big.NewInt(0) + size.Add(size, big.NewInt(0).SetBytes(r.end)) + size.Sub(size, big.NewInt(0).SetBytes(r.start)) + size.Add(size, big.NewInt(1)) + return size +} + +func v4ToInt(ip net.IP) int64 { + ip = ip.To4() + return int64(ip[0])<<24 | int64(ip[1])<<16 | int64(ip[2])<<8 | int64(ip[3]) +} diff --git a/src/go/collectors/go.d.plugin/pkg/iprange/range_test.go b/src/go/collectors/go.d.plugin/pkg/iprange/range_test.go new file mode 100644 index 00000000000000..631d012e0e3645 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/iprange/range_test.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package iprange + +import ( + "fmt" + "math/big" + "net" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestV4Range_String(t *testing.T) { + tests := map[string]struct { + input string + wantString string + }{ + "IP": {input: "192.0.2.0", wantString: "192.0.2.0-192.0.2.0"}, + "Range": {input: "192.0.2.0-192.0.2.10", wantString: "192.0.2.0-192.0.2.10"}, + "CIDR": {input: "192.0.2.0/24", wantString: "192.0.2.1-192.0.2.254"}, + "Mask": {input: "192.0.2.0/255.255.255.0", wantString: "192.0.2.1-192.0.2.254"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, test.wantString, r.String()) + }) + } +} + +func TestV4Range_Family(t *testing.T) { + tests := map[string]struct { + input string + }{ + "IP": {input: "192.0.2.0"}, + "Range": {input: "192.0.2.0-192.0.2.10"}, + "CIDR": {input: "192.0.2.0/24"}, + "Mask": {input: "192.0.2.0/255.255.255.0"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, V4Family, r.Family()) + }) + } +} + +func TestV4Range_Size(t *testing.T) { + tests := map[string]struct { + input string + wantSize *big.Int + }{ + "IP": {input: "192.0.2.0", wantSize: big.NewInt(1)}, + "Range": {input: "192.0.2.0-192.0.2.10", wantSize: big.NewInt(11)}, + "CIDR": {input: "192.0.2.0/24", wantSize: big.NewInt(254)}, + "CIDR 31": {input: "192.0.2.0/31", wantSize: big.NewInt(2)}, + "CIDR 32": {input: "192.0.2.0/32", wantSize: big.NewInt(1)}, + "Mask": {input: "192.0.2.0/255.255.255.0", wantSize: big.NewInt(254)}, + "Mask 31": {input: "192.0.2.0/255.255.255.254", wantSize: big.NewInt(2)}, + "Mask 32": {input: "192.0.2.0/255.255.255.255", wantSize: big.NewInt(1)}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, test.wantSize, r.Size()) + }) + } +} + +func TestV4Range_Contains(t *testing.T) { + tests := map[string]struct { + input string + ip string + wantFail bool + }{ + "inside": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.5"}, + "outside": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.55", wantFail: true}, + "eq start": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.0"}, + "eq end": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.10"}, + "v6": {input: "192.0.2.0-192.0.2.10", ip: "2001:db8::", wantFail: true}, + } + + for name, test := range tests { + name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip) + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + ip := net.ParseIP(test.ip) + require.NotNil(t, ip) + + if test.wantFail { + assert.False(t, r.Contains(ip)) + } else { + assert.True(t, r.Contains(ip)) + } + }) + } +} + +func TestV6Range_String(t *testing.T) { + tests := map[string]struct { + input string + wantString string + }{ + "IP": {input: "2001:db8::", wantString: "2001:db8::-2001:db8::"}, + "Range": {input: "2001:db8::-2001:db8::10", wantString: "2001:db8::-2001:db8::10"}, + "CIDR": {input: "2001:db8::/126", wantString: "2001:db8::1-2001:db8::2"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, test.wantString, r.String()) + }) + } +} + +func TestV6Range_Family(t *testing.T) { + tests := map[string]struct { + input string + }{ + "IP": {input: "2001:db8::"}, + "Range": {input: "2001:db8::-2001:db8::10"}, + "CIDR": {input: "2001:db8::/126"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, V6Family, r.Family()) + }) + } +} + +func TestV6Range_Size(t *testing.T) { + tests := map[string]struct { + input string + wantSize *big.Int + }{ + "IP": {input: "2001:db8::", wantSize: big.NewInt(1)}, + "Range": {input: "2001:db8::-2001:db8::10", wantSize: big.NewInt(17)}, + "CIDR": {input: "2001:db8::/120", wantSize: big.NewInt(254)}, + "CIDR 127": {input: "2001:db8::/127", wantSize: big.NewInt(2)}, + "CIDR 128": {input: "2001:db8::/128", wantSize: big.NewInt(1)}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + + assert.Equal(t, test.wantSize, r.Size()) + }) + } +} + +func TestV6Range_Contains(t *testing.T) { + tests := map[string]struct { + input string + ip string + wantFail bool + }{ + "inside": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::5"}, + "outside": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::ff", wantFail: true}, + "eq start": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::"}, + "eq end": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::10"}, + "v4": {input: "2001:db8::-2001:db8::10", ip: "192.0.2.0", wantFail: true}, + } + + for name, test := range tests { + name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip) + t.Run(name, func(t *testing.T) { + r, err := ParseRange(test.input) + require.NoError(t, err) + ip := net.ParseIP(test.ip) + require.NotNil(t, ip) + + if test.wantFail { + assert.False(t, r.Contains(ip)) + } else { + assert.True(t, r.Contains(ip)) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/k8sclient/k8sclient.go b/src/go/collectors/go.d.plugin/pkg/k8sclient/k8sclient.go new file mode 100644 index 00000000000000..079239c1cf96e3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/k8sclient/k8sclient.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8sclient + +import ( + "errors" + "os" + "path/filepath" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const ( + EnvFakeClient = "KUBERNETES_FAKE_CLIENTSET" + defaultUserAgent = "Netdata/k8s-client" +) + +func New(userAgent string) (kubernetes.Interface, error) { + if userAgent == "" { + userAgent = defaultUserAgent + } + + switch { + case os.Getenv(EnvFakeClient) != "": + return fake.NewSimpleClientset(), nil + case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "": + return newInCluster(userAgent) + default: + return newOutOfCluster(userAgent) + } +} + +func newInCluster(userAgent string) (*kubernetes.Clientset, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + config.UserAgent = userAgent + + return kubernetes.NewForConfig(config) +} + +func newOutOfCluster(userAgent string) (*kubernetes.Clientset, error) { + home := homeDir() + if home == "" { + return nil, errors.New("couldn't find home directory") + } + + path := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", path) + if err != nil { + return nil, err + } + + config.UserAgent = userAgent + + return kubernetes.NewForConfig(config) +} + +func homeDir() string { + if h := os.Getenv("HOME"); h != "" { + return h + } + return os.Getenv("USERPROFILE") // windows +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/csv.go b/src/go/collectors/go.d.plugin/pkg/logs/csv.go new file mode 100644 index 00000000000000..3a7610a705a0d3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/csv.go @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "bytes" + "encoding/csv" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +type ( + CSVConfig struct { + FieldsPerRecord int `yaml:"fields_per_record"` + Delimiter string `yaml:"delimiter"` + TrimLeadingSpace bool `yaml:"trim_leading_space"` + Format string `yaml:"format"` + CheckField func(string) (string, int, bool) `yaml:"-"` + } + + CSVParser struct { + Config CSVConfig + reader *csv.Reader + format *csvFormat + } + + csvFormat struct { + raw string + maxIndex int + fields []csvField + } + + csvField struct { + name string + idx int + } +) + +func NewCSVParser(config CSVConfig, in io.Reader) (*CSVParser, error) { + if config.Format == "" { + return nil, errors.New("empty csv format") + } + + format, err := newCSVFormat(config) + if err != nil { + return nil, fmt.Errorf("bad csv format '%s': %v", config.Format, err) + } + + p := &CSVParser{ + Config: config, + reader: newCSVReader(in, config), + format: format, + } + return p, nil +} + +func (p *CSVParser) ReadLine(line LogLine) error { + record, err := p.reader.Read() + if err != nil { + return handleCSVReaderError(err) + } + return p.format.parse(record, line) +} + +func (p *CSVParser) Parse(row []byte, line LogLine) error { + r := newCSVReader(bytes.NewBuffer(row), p.Config) + record, err := r.Read() + if err != nil { + return handleCSVReaderError(err) + } + return p.format.parse(record, line) +} + +func (p CSVParser) Info() string { + return fmt.Sprintf("csv: %s", p.format.raw) +} + +func (f *csvFormat) parse(record []string, line LogLine) error { + if len(record) <= f.maxIndex { + return &ParseError{msg: "csv parse: unmatched line"} + } + + for _, v := range f.fields { + if err := line.Assign(v.name, record[v.idx]); err != nil { + return &ParseError{msg: fmt.Sprintf("csv parse: %v", err), err: err} + } + } + return nil +} + +func newCSVReader(in io.Reader, config CSVConfig) *csv.Reader { + r := csv.NewReader(in) + if config.Delimiter != "" { + if d, err := parseCSVDelimiter(config.Delimiter); err == nil { + r.Comma = d + } + } + r.TrimLeadingSpace = config.TrimLeadingSpace + r.FieldsPerRecord = config.FieldsPerRecord + r.ReuseRecord = true + return r +} + +func newCSVFormat(config CSVConfig) (*csvFormat, error) { + r := csv.NewReader(strings.NewReader(config.Format)) + if config.Delimiter != "" { + if d, err := parseCSVDelimiter(config.Delimiter); err == nil { + r.Comma = d + } + } + r.TrimLeadingSpace = config.TrimLeadingSpace + + record, err := r.Read() + if err != nil { + return nil, err + } + + fields, err := createCSVFields(record, config.CheckField) + if err != nil { + return nil, err + } + + if len(fields) == 0 { + return nil, errors.New("zero fields") + } + + format := &csvFormat{ + raw: config.Format, + maxIndex: fields[len(fields)-1].idx, + fields: fields, + } + return format, nil +} + +func createCSVFields(format []string, check func(string) (string, int, bool)) ([]csvField, error) { + if check == nil { + check = checkCSVFormatField + } + var fields []csvField + var offset int + seen := make(map[string]bool) + + for i, name := range format { + name = strings.Trim(name, `"`) + + name, addOffset, valid := check(name) + offset += addOffset + if !valid { + continue + } + if seen[name] { + return nil, fmt.Errorf("duplicate field: %s", name) + } + seen[name] = true + + idx := i + offset + fields = append(fields, csvField{name, idx}) + } + return fields, nil +} + +func handleCSVReaderError(err error) error { + if isCSVParseError(err) { + return &ParseError{msg: fmt.Sprintf("csv parse: %v", err), err: err} + } + return err +} + +func isCSVParseError(err error) bool { + return errors.Is(err, csv.ErrBareQuote) || errors.Is(err, csv.ErrFieldCount) || errors.Is(err, csv.ErrQuote) +} + +func checkCSVFormatField(name string) (newName string, offset int, valid bool) { + if len(name) < 2 || !strings.HasPrefix(name, "$") { + return "", 0, false + } + return name, 0, true +} + +func parseCSVDelimiter(s string) (rune, error) { + if isNumber(s) { + d, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid CSV delimiter: %v", err) + } + return rune(d), nil + } + if len(s) != 1 { + return 0, errors.New("invalid CSV delimiter: must be a single character") + } + return rune(s[0]), nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/csv_test.go b/src/go/collectors/go.d.plugin/pkg/logs/csv_test.go new file mode 100644 index 00000000000000..d7baaa1b5b5c74 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/csv_test.go @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testCSVConfig = CSVConfig{ + Delimiter: " ", + Format: "$A %B", +} + +func TestNewCSVParser(t *testing.T) { + tests := []struct { + name string + format string + wantErr bool + }{ + {name: "valid format", format: "$A $B"}, + {name: "empty format", wantErr: true}, + {name: "bad format: csv read error", format: "$A $B \"$C", wantErr: true}, + {name: "bad format: duplicate fields", format: "$A $A", wantErr: true}, + {name: "bad format: zero fields", format: "!A !B", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := testCSVConfig + c.Format = tt.format + p, err := NewCSVParser(c, nil) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, p) + } else { + assert.NoError(t, err) + assert.NotNil(t, p) + } + }) + } +} + +func TestNewCSVFormat(t *testing.T) { + tests := []struct { + format string + wantFormat csvFormat + wantErr bool + }{ + {format: "$A $B", wantFormat: csvFormat{maxIndex: 1, fields: []csvField{{"$A", 0}, {"$B", 1}}}}, + {format: "$A $B !C $E", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 1}, {"$E", 3}}}}, + {format: "!A !B !C $E", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$E", 3}}}}, + {format: "$A $OFFSET $B", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 3}}}}, + {format: "$A $OFFSET $B $OFFSET !A", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 3}}}}, + {format: "$A $OFFSET $OFFSET $B", wantFormat: csvFormat{maxIndex: 5, fields: []csvField{{"$A", 0}, {"$B", 5}}}}, + {format: "$OFFSET $A $OFFSET $B", wantFormat: csvFormat{maxIndex: 5, fields: []csvField{{"$A", 2}, {"$B", 5}}}}, + {format: "$A \"$A", wantErr: true}, + {format: "$A $A", wantErr: true}, + {format: "!A !A", wantErr: true}, + {format: "", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.format, func(t *testing.T) { + c := testCSVConfig + c.Format = tt.format + c.CheckField = testCheckCSVFormatField + tt.wantFormat.raw = tt.format + + f, err := newCSVFormat(c) + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, f) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.wantFormat, *f) + } + }) + } +} + +func TestCSVParser_ReadLine(t *testing.T) { + tests := []struct { + name string + row string + format string + wantErr bool + wantParseErr bool + }{ + {name: "match and no error", row: "1 2 3", format: `$A $B $C`}, + {name: "match but error on assigning", row: "1 2 3", format: `$A $B $ERR`, wantErr: true, wantParseErr: true}, + {name: "not match", row: "1 2 3", format: `$A $B $C $d`, wantErr: true, wantParseErr: true}, + {name: "error on reading csv.Err", row: "1 2\"3", format: `$A $B $C`, wantErr: true, wantParseErr: true}, + {name: "error on reading EOF", row: "", format: `$A $B $C`, wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + r := strings.NewReader(tt.row) + c := testCSVConfig + c.Format = tt.format + p, err := NewCSVParser(c, r) + require.NoError(t, err) + + err = p.ReadLine(&line) + + if tt.wantErr { + require.Error(t, err) + if tt.wantParseErr { + assert.True(t, IsParseError(err)) + } else { + assert.False(t, IsParseError(err)) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestCSVParser_Parse(t *testing.T) { + tests := []struct { + name string + row string + format string + wantErr bool + }{ + {name: "match and no error", row: "1 2 3", format: `$A $B $C`}, + {name: "match but error on assigning", row: "1 2 3", format: `$A $B $ERR`, wantErr: true}, + {name: "not match", row: "1 2 3", format: `$A $B $C $d`, wantErr: true}, + {name: "error on reading csv.Err", row: "1 2\"3", format: `$A $B $C`, wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + r := strings.NewReader(tt.row) + c := testCSVConfig + c.Format = tt.format + p, err := NewCSVParser(c, r) + require.NoError(t, err) + + err = p.ReadLine(&line) + + if tt.wantErr { + require.Error(t, err) + assert.True(t, IsParseError(err)) + } else { + assert.NoError(t, err) + } + }) + } + +} + +func TestCSVParser_Info(t *testing.T) { + p, err := NewCSVParser(testCSVConfig, nil) + require.NoError(t, err) + assert.NotZero(t, p.Info()) +} + +func testCheckCSVFormatField(name string) (newName string, offset int, valid bool) { + if len(name) < 2 || !strings.HasPrefix(name, "$") { + return "", 0, false + } + if name == "$OFFSET" { + return "", 1, false + } + return name, 0, true +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/json.go b/src/go/collectors/go.d.plugin/pkg/logs/json.go new file mode 100644 index 00000000000000..cfd6c83e710b08 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/json.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "bufio" + "fmt" + "io" + "strconv" + + "github.com/valyala/fastjson" +) + +type JSONConfig struct { + Mapping map[string]string `yaml:"mapping"` +} + +type JSONParser struct { + reader *bufio.Reader + parser fastjson.Parser + buf []byte + mapping map[string]string +} + +func NewJSONParser(config JSONConfig, in io.Reader) (*JSONParser, error) { + parser := &JSONParser{ + reader: bufio.NewReader(in), + mapping: config.Mapping, + buf: make([]byte, 0, 100), + } + return parser, nil +} + +func (p *JSONParser) ReadLine(line LogLine) error { + row, err := p.reader.ReadSlice('\n') + if err != nil && len(row) == 0 { + return err + } + if len(row) > 0 && row[len(row)-1] == '\n' { + row = row[:len(row)-1] + } + return p.Parse(row, line) +} + +func (p *JSONParser) Parse(row []byte, line LogLine) error { + val, err := p.parser.ParseBytes(row) + if err != nil { + return err + } + + if err := p.parseObject("", val, line); err != nil { + return &ParseError{msg: fmt.Sprintf("json parse: %v", err), err: err} + } + + return nil +} + +func (p *JSONParser) parseObject(prefix string, val *fastjson.Value, line LogLine) error { + obj, err := val.Object() + if err != nil { + return err + } + + obj.Visit(func(key []byte, v *fastjson.Value) { + if err != nil { + return + } + + k := jsonObjKey(prefix, string(key)) + + switch v.Type() { + case fastjson.TypeString, fastjson.TypeNumber: + err = p.parseStringNumber(k, v, line) + case fastjson.TypeArray: + err = p.parseArray(k, v, line) + case fastjson.TypeObject: + err = p.parseObject(k, v, line) + default: + return + } + }) + + return err +} + +func jsonObjKey(prefix, key string) string { + if prefix == "" { + return key + } + return prefix + "." + key +} + +func (p *JSONParser) parseArray(key string, val *fastjson.Value, line LogLine) error { + arr, err := val.Array() + if err != nil { + return err + } + + for i, v := range arr { + k := jsonObjKey(key, strconv.Itoa(i)) + + switch v.Type() { + case fastjson.TypeString, fastjson.TypeNumber: + err = p.parseStringNumber(k, v, line) + case fastjson.TypeArray: + err = p.parseArray(k, v, line) + case fastjson.TypeObject: + err = p.parseObject(k, v, line) + default: + continue + } + + if err != nil { + return err + } + } + + return err +} + +func (p *JSONParser) parseStringNumber(key string, val *fastjson.Value, line LogLine) error { + if mapped, ok := p.mapping[key]; ok { + key = mapped + } + + p.buf = p.buf[:0] + if p.buf = val.MarshalTo(p.buf); len(p.buf) == 0 { + return nil + } + + if val.Type() == fastjson.TypeString { + // trim " + return line.Assign(key, string(p.buf[1:len(p.buf)-1])) + } + return line.Assign(key, string(p.buf)) +} + +func (p *JSONParser) Info() string { + return fmt.Sprintf("json: %q", p.mapping) +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/json_test.go b/src/go/collectors/go.d.plugin/pkg/logs/json_test.go new file mode 100644 index 00000000000000..b82850031a06b5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/json_test.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewJSONParser(t *testing.T) { + tests := map[string]struct { + config JSONConfig + wantErr bool + }{ + "empty config": { + config: JSONConfig{}, + wantErr: false, + }, + "with mappings": { + config: JSONConfig{Mapping: map[string]string{"from_field_1": "to_field_1"}}, + wantErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + p, err := NewJSONParser(test.config, nil) + + if test.wantErr { + assert.Error(t, err) + assert.Nil(t, p) + } else { + assert.NoError(t, err) + assert.NotNil(t, p) + } + }) + } +} + +func TestJSONParser_ReadLine(t *testing.T) { + tests := map[string]struct { + config JSONConfig + input string + wantAssigned map[string]string + wantErr bool + }{ + "string value": { + input: `{ "string": "example.com" }`, + wantErr: false, + wantAssigned: map[string]string{ + "string": "example.com", + }, + }, + "int value": { + input: `{ "int": 1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "int": "1", + }, + }, + "float value": { + input: `{ "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "float": "1.1", + }, + }, + "string, int, float values": { + input: `{ "string": "example.com", "int": 1, "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "string": "example.com", + "int": "1", + "float": "1.1", + }, + }, + "string, int, float values with mappings": { + config: JSONConfig{Mapping: map[string]string{ + "string": "STRING", + "int": "INT", + "float": "FLOAT", + }}, + input: `{ "string": "example.com", "int": 1, "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "STRING": "example.com", + "INT": "1", + "FLOAT": "1.1", + }, + }, + "nested": { + input: `{"one":{"two":2,"three":{"four":4}},"five":5}`, + config: JSONConfig{Mapping: map[string]string{ + "one.two": "mapped_value", + }}, + wantErr: false, + wantAssigned: map[string]string{ + "mapped_value": "2", + "one.three.four": "4", + "five": "5", + }, + }, + "nested with array": { + input: `{"one":{"two":[2,22]},"five":5}`, + config: JSONConfig{Mapping: map[string]string{ + "one.two.1": "mapped_value", + }}, + wantErr: false, + wantAssigned: map[string]string{ + "one.two.0": "2", + "mapped_value": "22", + "five": "5", + }, + }, + "error on malformed JSON": { + input: `{ "host"": unquoted_string}`, + wantErr: true, + }, + "error on empty input": { + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + line := newLogLine() + in := strings.NewReader(test.input) + p, err := NewJSONParser(test.config, in) + require.NoError(t, err) + require.NotNil(t, p) + + err = p.ReadLine(line) + + if test.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, test.wantAssigned, line.assigned) + } + }) + } +} + +func TestJSONParser_Parse(t *testing.T) { + tests := map[string]struct { + config JSONConfig + input string + wantAssigned map[string]string + wantErr bool + }{ + "string value": { + input: `{ "string": "example.com" }`, + wantErr: false, + wantAssigned: map[string]string{ + "string": "example.com", + }, + }, + "int value": { + input: `{ "int": 1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "int": "1", + }, + }, + "float value": { + input: `{ "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "float": "1.1", + }, + }, + "string, int, float values": { + input: `{ "string": "example.com", "int": 1, "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "string": "example.com", + "int": "1", + "float": "1.1", + }, + }, + "string, int, float values with mappings": { + config: JSONConfig{Mapping: map[string]string{ + "string": "STRING", + "int": "INT", + "float": "FLOAT", + }}, + input: `{ "string": "example.com", "int": 1, "float": 1.1 }`, + wantErr: false, + wantAssigned: map[string]string{ + "STRING": "example.com", + "INT": "1", + "FLOAT": "1.1", + }, + }, + "error on malformed JSON": { + input: `{ "host"": unquoted_string}`, + wantErr: true, + }, + "error on empty input": { + wantErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + line := newLogLine() + p, err := NewJSONParser(test.config, nil) + require.NoError(t, err) + require.NotNil(t, p) + + err = p.Parse([]byte(test.input), line) + + if test.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, test.wantAssigned, line.assigned) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/lastline.go b/src/go/collectors/go.d.plugin/pkg/logs/lastline.go new file mode 100644 index 00000000000000..911dbf49734db2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/lastline.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "errors" + "os" + + "github.com/clbanning/rfile/v2" +) + +const DefaultMaxLineWidth = 4 * 1024 // assume disk block size is 4K + +var ErrTooLongLine = errors.New("too long line") + +// ReadLastLine returns the last line of the file and any read error encountered. +// It expects last line width <= maxLineWidth. +// If maxLineWidth <= 0, it defaults to DefaultMaxLineWidth. +func ReadLastLine(filename string, maxLineWidth int64) ([]byte, error) { + if maxLineWidth <= 0 { + maxLineWidth = DefaultMaxLineWidth + } + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + stat, _ := f.Stat() + endPos := stat.Size() + if endPos == 0 { + return []byte{}, nil + } + startPos := endPos - maxLineWidth + if startPos < 0 { + startPos = 0 + } + buf := make([]byte, endPos-startPos) + n, err := f.ReadAt(buf, startPos) + if err != nil { + return nil, err + } + lnPos := 0 + foundLn := false + for i := n - 2; i >= 0; i-- { + ch := buf[i] + if ch == '\n' { + foundLn = true + lnPos = i + break + } + } + if foundLn { + return buf[lnPos+1 : n], nil + } + if startPos == 0 { + return buf[0:n], nil + } + + return nil, ErrTooLongLine +} + +func ReadLastLines(filename string, n uint) ([]string, error) { + return rfile.Tail(filename, int(n)) +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/lastline_test.go b/src/go/collectors/go.d.plugin/pkg/logs/lastline_test.go new file mode 100644 index 00000000000000..ea0a75e9e55c7c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/lastline_test.go @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadLastLine(t *testing.T) { + tests := []struct { + name string + content string + expected string + err error + }{ + {"empty", "", "", nil}, + {"empty-ln", "\n", "\n", nil}, + {"one-line", "hello", "hello", nil}, + {"one-line-ln", "hello\n", "hello\n", nil}, + {"multi-line", "hello\nworld", "world", nil}, + {"multi-line-ln", "hello\nworld\n", "world\n", nil}, + {"long-line", "hello hello hello", "", ErrTooLongLine}, + {"long-line-ln", "hello hello hello\n", "", ErrTooLongLine}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + filename := prepareFile(t, test.content) + defer func() { _ = os.Remove(filename) }() + + line, err := ReadLastLine(filename, 10) + + if test.err != nil { + require.NotNil(t, err) + assert.Contains(t, err.Error(), test.err.Error()) + } else { + assert.Equal(t, test.expected, string(line)) + } + }) + } +} + +func prepareFile(t *testing.T, content string) string { + t.Helper() + file, err := os.CreateTemp("", "go-test") + require.NoError(t, err) + defer func() { _ = file.Close() }() + + _, _ = file.WriteString(content) + return file.Name() +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/ltsv.go b/src/go/collectors/go.d.plugin/pkg/logs/ltsv.go new file mode 100644 index 00000000000000..558f9e0763ca96 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/ltsv.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "bufio" + "errors" + "fmt" + "io" + "strconv" + "unsafe" + + "github.com/Wing924/ltsv" +) + +type ( + LTSVConfig struct { + FieldDelimiter string `yaml:"field_delimiter"` + ValueDelimiter string `yaml:"value_delimiter"` + Mapping map[string]string `yaml:"mapping"` + } + + LTSVParser struct { + r *bufio.Reader + parser ltsv.Parser + mapping map[string]string + } +) + +func NewLTSVParser(config LTSVConfig, in io.Reader) (*LTSVParser, error) { + p := ltsv.Parser{ + FieldDelimiter: ltsv.DefaultParser.FieldDelimiter, + ValueDelimiter: ltsv.DefaultParser.ValueDelimiter, + StrictMode: false, + } + if config.FieldDelimiter != "" { + if d, err := parseLTSVDelimiter(config.FieldDelimiter); err == nil { + p.FieldDelimiter = d + } + } + if config.ValueDelimiter != "" { + if d, err := parseLTSVDelimiter(config.ValueDelimiter); err == nil { + p.ValueDelimiter = d + } + } + parser := <SVParser{ + r: bufio.NewReader(in), + parser: p, + mapping: config.Mapping, + } + return parser, nil +} + +func (p *LTSVParser) ReadLine(line LogLine) error { + row, err := p.r.ReadSlice('\n') + if err != nil && len(row) == 0 { + return err + } + if len(row) > 0 && row[len(row)-1] == '\n' { + row = row[:len(row)-1] + } + return p.Parse(row, line) +} + +func (p *LTSVParser) Parse(row []byte, line LogLine) error { + err := p.parser.ParseLine(row, func(label []byte, value []byte) error { + s := *(*string)(unsafe.Pointer(&label)) // no alloc, same as in fmt.Builder.String() + if v, ok := p.mapping[s]; ok { + s = v + } + return line.Assign(s, string(value)) + }) + if err != nil { + return &ParseError{msg: fmt.Sprintf("ltsv parse: %v", err), err: err} + } + return nil +} + +func (p LTSVParser) Info() string { + return fmt.Sprintf("ltsv: %q", p.mapping) +} + +func parseLTSVDelimiter(s string) (byte, error) { + if isNumber(s) { + d, err := strconv.ParseUint(s, 10, 8) + if err != nil { + return 0, fmt.Errorf("invalid LTSV delimiter: %v", err) + } + return byte(d), nil + } + if len(s) != 1 { + return 0, errors.New("invalid LTSV delimiter: must be a single character") + } + return s[0], nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/ltsv_test.go b/src/go/collectors/go.d.plugin/pkg/logs/ltsv_test.go new file mode 100644 index 00000000000000..f6d5ec2bd39dca --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/ltsv_test.go @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "strings" + "testing" + + "github.com/Wing924/ltsv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testLTSVConfig = LTSVConfig{ + FieldDelimiter: " ", + ValueDelimiter: "=", + Mapping: map[string]string{"KEY": "key"}, +} + +func TestNewLTSVParser(t *testing.T) { + tests := []struct { + name string + config LTSVConfig + wantErr bool + }{ + {name: "config", config: testLTSVConfig}, + {name: "empty config"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p, err := NewLTSVParser(tt.config, nil) + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, p) + } else { + assert.NoError(t, err) + assert.NotNil(t, p) + if tt.config.FieldDelimiter == "" { + assert.Equal(t, ltsv.DefaultParser.FieldDelimiter, p.parser.FieldDelimiter) + } else { + assert.Equal(t, tt.config.FieldDelimiter, string(p.parser.FieldDelimiter)) + } + if tt.config.ValueDelimiter == "" { + assert.Equal(t, ltsv.DefaultParser.ValueDelimiter, p.parser.ValueDelimiter) + } else { + assert.Equal(t, tt.config.ValueDelimiter, string(p.parser.ValueDelimiter)) + } + assert.Equal(t, tt.config.Mapping, p.mapping) + } + }) + } +} + +func TestLTSVParser_ReadLine(t *testing.T) { + tests := []struct { + name string + row string + wantErr bool + wantParseErr bool + }{ + {name: "no error", row: "A=1 B=2 KEY=3"}, + {name: "error on parsing", row: "NO LABEL", wantErr: true, wantParseErr: true}, + {name: "error on assigning", row: "A=1 ERR=2", wantErr: true, wantParseErr: true}, + {name: "error on reading EOF", row: "", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + r := strings.NewReader(tt.row) + p, err := NewLTSVParser(testLTSVConfig, r) + require.NoError(t, err) + + err = p.ReadLine(&line) + + if tt.wantErr { + require.Error(t, err) + if tt.wantParseErr { + assert.True(t, IsParseError(err)) + } else { + assert.False(t, IsParseError(err)) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestLTSVParser_Parse(t *testing.T) { + tests := []struct { + name string + row string + wantErr bool + }{ + {name: "no error", row: "A=1 B=2"}, + {name: "error on parsing", row: "NO LABEL", wantErr: true}, + {name: "error on assigning", row: "A=1 ERR=2", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + p, err := NewLTSVParser(testLTSVConfig, nil) + require.NoError(t, err) + + err = p.Parse([]byte(tt.row), &line) + + if tt.wantErr { + require.Error(t, err) + assert.True(t, IsParseError(err)) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestLTSVParser_Info(t *testing.T) { + p, err := NewLTSVParser(testLTSVConfig, nil) + require.NoError(t, err) + assert.NotZero(t, p.Info()) +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/parser.go b/src/go/collectors/go.d.plugin/pkg/logs/parser.go new file mode 100644 index 00000000000000..f1807283a8d63b --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/parser.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +type ParseError struct { + msg string + err error +} + +func (e ParseError) Error() string { return e.msg } + +func (e ParseError) Unwrap() error { return e.err } + +func IsParseError(err error) bool { var v *ParseError; return errors.As(err, &v) } + +type ( + LogLine interface { + Assign(name string, value string) error + } + + Parser interface { + ReadLine(LogLine) error + Parse(row []byte, line LogLine) error + Info() string + } +) + +const ( + TypeCSV = "csv" + TypeLTSV = "ltsv" + TypeRegExp = "regexp" + TypeJSON = "json" +) + +type ParserConfig struct { + LogType string `yaml:"log_type"` + CSV CSVConfig `yaml:"csv_config"` + LTSV LTSVConfig `yaml:"ltsv_config"` + RegExp RegExpConfig `yaml:"regexp_config"` + JSON JSONConfig `yaml:"json_config"` +} + +func NewParser(config ParserConfig, in io.Reader) (Parser, error) { + switch config.LogType { + case TypeCSV: + return NewCSVParser(config.CSV, in) + case TypeLTSV: + return NewLTSVParser(config.LTSV, in) + case TypeRegExp: + return NewRegExpParser(config.RegExp, in) + case TypeJSON: + return NewJSONParser(config.JSON, in) + default: + return nil, fmt.Errorf("invalid type: %q", config.LogType) + } +} + +func isNumber(s string) bool { _, err := strconv.Atoi(s); return err == nil } diff --git a/src/go/collectors/go.d.plugin/pkg/logs/parser_test.go b/src/go/collectors/go.d.plugin/pkg/logs/parser_test.go new file mode 100644 index 00000000000000..88ef46c27115cb --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/parser_test.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs diff --git a/src/go/collectors/go.d.plugin/pkg/logs/reader.go b/src/go/collectors/go.d.plugin/pkg/logs/reader.go new file mode 100644 index 00000000000000..f3321984a8e910 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/reader.go @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/netdata/go.d.plugin/logger" +) + +const ( + maxEOF = 60 +) + +var ( + ErrNoMatchedFile = errors.New("no matched files") +) + +// Reader is a log rotate aware Reader +// TODO: better reopen algorithm +// TODO: handle truncate +type Reader struct { + file *os.File + path string + excludePath string + eofCounter int + continuousEOF int + log *logger.Logger +} + +// Open a file and seek to end of the file. +// path: the shell file name pattern +// excludePath: the shell file name pattern +func Open(path string, excludePath string, log *logger.Logger) (*Reader, error) { + var err error + if path, err = filepath.Abs(path); err != nil { + return nil, err + } + if _, err = filepath.Match(path, "/"); err != nil { + return nil, fmt.Errorf("bad path syntax: %q", path) + } + if _, err = filepath.Match(excludePath, "/"); err != nil { + return nil, fmt.Errorf("bad exclude_path syntax: %q", path) + } + r := &Reader{ + path: path, + excludePath: excludePath, + log: log, + } + + if err = r.open(); err != nil { + return nil, err + } + return r, nil +} + +// CurrentFilename get current opened file name +func (r *Reader) CurrentFilename() string { + return r.file.Name() +} + +func (r *Reader) open() error { + path := r.findFile() + if path == "" { + r.log.Debugf("couldn't find log file, used path: '%s', exclude_path: '%s'", r.path, r.excludePath) + return ErrNoMatchedFile + } + r.log.Debug("open log file: ", path) + file, err := os.Open(path) + if err != nil { + return err + } + stat, err := file.Stat() + if err != nil { + return err + } + if _, err = file.Seek(stat.Size(), io.SeekStart); err != nil { + return err + } + r.file = file + return nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + n, err = r.file.Read(p) + if err != nil { + switch err { + case io.EOF: + err = r.handleEOFErr() + case os.ErrInvalid: // r.file is nil after Close + err = r.handleInvalidArgErr() + } + return + } + r.continuousEOF = 0 + return +} + +func (r *Reader) handleEOFErr() (err error) { + err = io.EOF + r.eofCounter++ + r.continuousEOF++ + if r.eofCounter < maxEOF || r.continuousEOF < 2 { + return err + } + if err2 := r.reopen(); err2 != nil { + err = err2 + } + return err +} + +func (r *Reader) handleInvalidArgErr() (err error) { + err = io.EOF + if err2 := r.reopen(); err2 != nil { + err = err2 + } + return err +} + +func (r *Reader) Close() (err error) { + if r == nil || r.file == nil { + return + } + r.log.Debug("close log file: ", r.file.Name()) + err = r.file.Close() + r.file = nil + r.eofCounter = 0 + return +} + +func (r *Reader) reopen() error { + r.log.Debugf("reopen, look for: %s", r.path) + _ = r.Close() + return r.open() +} + +func (r *Reader) findFile() string { + return find(r.path, r.excludePath) +} + +func find(path, exclude string) string { + return finder{}.find(path, exclude) +} + +// TODO: tests +type finder struct{} + +func (f finder) find(path, exclude string) string { + files, _ := filepath.Glob(path) + if len(files) == 0 { + return "" + } + + files = f.filter(files, exclude) + if len(files) == 0 { + return "" + } + + return f.findLastFile(files) +} + +func (f finder) filter(files []string, exclude string) []string { + if exclude == "" { + return files + } + + fs := make([]string, 0, len(files)) + for _, file := range files { + if ok, _ := filepath.Match(exclude, file); ok { + continue + } + fs = append(fs, file) + } + return fs +} + +// TODO: the logic is probably wrong +func (f finder) findLastFile(files []string) string { + sort.Strings(files) + for i := len(files) - 1; i >= 0; i-- { + stat, err := os.Stat(files[i]) + if err != nil || !stat.Mode().IsRegular() { + continue + } + return files[i] + } + return "" +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/reader_test.go b/src/go/collectors/go.d.plugin/pkg/logs/reader_test.go new file mode 100644 index 00000000000000..e6ef47fe781da8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/reader_test.go @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "bufio" + "fmt" + "io" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReader_Read(t *testing.T) { + reader, teardown := prepareTestReader(t) + defer teardown() + + r := testReader{bufio.NewReader(reader)} + filename := reader.CurrentFilename() + numLogs := 5 + var sum int + + for i := 0; i < 10; i++ { + appendLogs(t, filename, time.Millisecond*10, numLogs) + n, err := r.readUntilEOF() + sum += n + + assert.Equal(t, io.EOF, err) + assert.Equal(t, numLogs*(i+1), sum) + } +} + +func TestReader_Read_HandleFileRotation(t *testing.T) { + reader, teardown := prepareTestReader(t) + defer teardown() + + r := testReader{bufio.NewReader(reader)} + filename := reader.CurrentFilename() + numLogs := 5 + rotateFile(t, filename) + appendLogs(t, filename, time.Millisecond*10, numLogs) + + n, err := r.readUntilEOFTimes(maxEOF) + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) + + appendLogs(t, filename, time.Millisecond*10, numLogs) + n, err = r.readUntilEOF() + assert.Equal(t, io.EOF, err) + assert.Equal(t, numLogs, n) +} + +func TestReader_Read_HandleFileRotationWithDelay(t *testing.T) { + reader, teardown := prepareTestReader(t) + defer teardown() + + r := testReader{bufio.NewReader(reader)} + filename := reader.CurrentFilename() + _ = os.Remove(filename) + + // trigger reopen first time + n, err := r.readUntilEOFTimes(maxEOF) + assert.Equal(t, ErrNoMatchedFile, err) + assert.Equal(t, 0, n) + + f, err := os.Create(filename) + require.NoError(t, err) + _ = f.Close() + + // trigger reopen 2nd time + n, err = r.readUntilEOF() + assert.Equal(t, io.EOF, err) + assert.Equal(t, 0, n) + + numLogs := 5 + appendLogs(t, filename, time.Millisecond*10, numLogs) + n, err = r.readUntilEOF() + assert.Equal(t, io.EOF, err) + assert.Equal(t, numLogs, n) +} + +func TestReader_Close(t *testing.T) { + reader, teardown := prepareTestReader(t) + defer teardown() + + assert.NoError(t, reader.Close()) + assert.Nil(t, reader.file) +} + +func TestReader_Close_NilFile(t *testing.T) { + var r Reader + assert.NoError(t, r.Close()) +} + +func TestOpen(t *testing.T) { + tempFileName1 := prepareTempFile(t, "*-web_log-open-test-1.log") + tempFileName2 := prepareTempFile(t, "*-web_log-open-test-2.log") + tempFileName3 := prepareTempFile(t, "*-web_log-open-test-3.log") + defer func() { + _ = os.Remove(tempFileName1) + _ = os.Remove(tempFileName2) + _ = os.Remove(tempFileName3) + }() + + makePath := func(s string) string { + return filepath.Join(os.TempDir(), s) + } + + tests := []struct { + name string + path string + exclude string + err bool + }{ + { + name: "match without exclude", + path: makePath("*-web_log-open-test-[1-3].log"), + }, + { + name: "match with exclude", + path: makePath("*-web_log-open-test-[1-3].log"), + exclude: makePath("*-web_log-open-test-[2-3].log"), + }, + { + name: "exclude everything", + path: makePath("*-web_log-open-test-[1-3].log"), + exclude: makePath("*"), + err: true, + }, + { + name: "no match", + path: makePath("*-web_log-no-match-test-[1-3].log"), + err: true, + }, + { + name: "bad path pattern", + path: "[qw", + err: true, + }, + { + name: "bad exclude path pattern", + path: "[qw", + err: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r, err := Open(tt.path, tt.exclude, nil) + + if tt.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, r.file) + _ = r.Close() + } + }) + } +} + +func TestReader_CurrentFilename(t *testing.T) { + reader, teardown := prepareTestReader(t) + defer teardown() + + assert.Equal(t, reader.file.Name(), reader.CurrentFilename()) +} + +type testReader struct { + *bufio.Reader +} + +func (r *testReader) readUntilEOF() (n int, err error) { + for { + _, err = r.ReadBytes('\n') + if err != nil { + break + } + n++ + } + return n, err +} + +func (r *testReader) readUntilEOFTimes(times int) (sum int, err error) { + var n int + for i := 0; i < times; i++ { + n, err = r.readUntilEOF() + if err != io.EOF { + break + } + sum += n + } + return sum, err +} + +func prepareTempFile(t *testing.T, pattern string) string { + t.Helper() + f, err := os.CreateTemp("", pattern) + require.NoError(t, err) + return f.Name() +} + +func prepareTestReader(t *testing.T) (reader *Reader, teardown func()) { + t.Helper() + filename := prepareTempFile(t, "*-web_log-test.log") + f, err := os.Open(filename) + require.NoError(t, err) + + teardown = func() { + _ = os.Remove(filename) + _ = reader.file.Close() + } + reader = &Reader{ + file: f, + path: filename, + } + return reader, teardown +} + +func rotateFile(t *testing.T, filename string) { + t.Helper() + require.NoError(t, os.Remove(filename)) + f, err := os.Create(filename) + require.NoError(t, err) + _ = f.Close() +} + +func appendLogs(t *testing.T, filename string, interval time.Duration, numOfLogs int) { + t.Helper() + base := filepath.Base(filename) + file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND, os.ModeAppend) + require.NoError(t, err) + require.NotNil(t, file) + defer func() { _ = file.Close() }() + + for i := 0; i < numOfLogs; i++ { + _, err = fmt.Fprintln(file, "line", i, "filename", base) + require.NoError(t, err) + time.Sleep(interval) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/regexp.go b/src/go/collectors/go.d.plugin/pkg/logs/regexp.go new file mode 100644 index 00000000000000..84b725fd944585 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/regexp.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "bufio" + "errors" + "fmt" + "io" + "regexp" +) + +type ( + RegExpConfig struct { + Pattern string `yaml:"pattern"` + } + + RegExpParser struct { + r *bufio.Reader + pattern *regexp.Regexp + } +) + +func NewRegExpParser(config RegExpConfig, in io.Reader) (*RegExpParser, error) { + if config.Pattern == "" { + return nil, errors.New("empty pattern") + } + + pattern, err := regexp.Compile(config.Pattern) + if err != nil { + return nil, fmt.Errorf("compile: %w", err) + } + + if pattern.NumSubexp() == 0 { + return nil, errors.New("pattern has no named subgroups") + } + + p := &RegExpParser{ + r: bufio.NewReader(in), + pattern: pattern, + } + return p, nil +} + +func (p *RegExpParser) ReadLine(line LogLine) error { + row, err := p.r.ReadSlice('\n') + if err != nil && len(row) == 0 { + return err + } + if len(row) > 0 && row[len(row)-1] == '\n' { + row = row[:len(row)-1] + } + return p.Parse(row, line) +} + +func (p *RegExpParser) Parse(row []byte, line LogLine) error { + match := p.pattern.FindSubmatch(row) + if len(match) == 0 { + return &ParseError{msg: "regexp parse: unmatched line"} + } + + for i, name := range p.pattern.SubexpNames() { + if name == "" || match[i] == nil { + continue + } + err := line.Assign(name, string(match[i])) + if err != nil { + return &ParseError{msg: fmt.Sprintf("regexp parse: %v", err), err: err} + } + } + return nil +} + +func (p RegExpParser) Info() string { + return fmt.Sprintf("regexp: %s", p.pattern) +} diff --git a/src/go/collectors/go.d.plugin/pkg/logs/regexp_test.go b/src/go/collectors/go.d.plugin/pkg/logs/regexp_test.go new file mode 100644 index 00000000000000..fc7bacaa5e3921 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/logs/regexp_test.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package logs + +import ( + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewRegExpParser(t *testing.T) { + tests := []struct { + name string + pattern string + wantErr bool + }{ + {name: "valid pattern", pattern: `(?P<A>\d+) (?P<B>\d+)`}, + {name: "no names subgroups in pattern", pattern: `(?:\d+) (?:\d+)`, wantErr: true}, + {name: "invalid pattern", pattern: `(((?P<A>\d+) (?P<B>\d+)`, wantErr: true}, + {name: "empty pattern", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, nil) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, p) + } else { + assert.NoError(t, err) + assert.NotNil(t, p) + } + }) + } +} + +func TestRegExpParser_ReadLine(t *testing.T) { + tests := []struct { + name string + row string + pattern string + wantErr bool + wantParseErr bool + }{ + {name: "match and no error", row: "1 2", pattern: `(?P<A>\d+) (?P<B>\d+)`}, + {name: "match but error on assigning", row: "1 2", pattern: `(?P<A>\d+) (?P<ERR>\d+)`, wantErr: true, wantParseErr: true}, + {name: "not match", row: "A B", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true, wantParseErr: true}, + {name: "not match multiline", row: "a b\n3 4", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true, wantParseErr: true}, + {name: "error on reading EOF", row: "", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + r := strings.NewReader(tt.row) + p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, r) + require.NoError(t, err) + + err = p.ReadLine(&line) + if tt.wantErr { + require.Error(t, err) + if tt.wantParseErr { + assert.True(t, IsParseError(err)) + } else { + assert.False(t, IsParseError(err)) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestRegExpParser_Parse(t *testing.T) { + tests := []struct { + name string + row string + pattern string + wantErr bool + }{ + {name: "match and no error", row: "1 2", pattern: `(?P<A>\d+) (?P<B>\d+)`}, + {name: "match but error on assigning", row: "1 2", pattern: `(?P<A>\d+) (?P<ERR>\d+)`, wantErr: true}, + {name: "not match", row: "A B", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var line logLine + p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, nil) + require.NoError(t, err) + + err = p.Parse([]byte(tt.row), &line) + if tt.wantErr { + require.Error(t, err) + assert.True(t, IsParseError(err)) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestRegExpParser_Info(t *testing.T) { + p, err := NewRegExpParser(RegExpConfig{Pattern: `(?P<A>\d+) (?P<B>\d+)`}, nil) + require.NoError(t, err) + assert.NotZero(t, p.Info()) +} + +type logLine struct { + assigned map[string]string +} + +func newLogLine() *logLine { + return &logLine{ + assigned: make(map[string]string), + } +} + +func (l *logLine) Assign(name, val string) error { + switch name { + case "$ERR", "ERR": + return errors.New("assign error") + } + if l.assigned != nil { + l.assigned[name] = val + } + return nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/README.md b/src/go/collectors/go.d.plugin/pkg/matcher/README.md new file mode 100644 index 00000000000000..fec4d4519ed858 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/README.md @@ -0,0 +1,142 @@ +<!-- +title: "matcher" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/matcher/README.md" +sidebar_label: "matcher" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# matcher +## Supported Format + +* string +* glob +* regexp +* simple patterns + +Depending on the symbol at the start of the string, the `matcher` will use one of the supported formats. + +| matcher | short format | long format | +|-----------------|--------------|-------------------| +| string | ` =` | `string` | +| glob | `*` | `glob` | +| regexp | `~` | `regexp` | +| simple patterns | | `simple_patterns` | + +Example: + +- `* pattern`: It will use the `glob` matcher to find the `pattern` in the string. + +### Syntax + +**Tip**: Read `::=` as `is defined as`. + +``` +Short Syntax + [ <not> ] <format> <space> <expr> + + <not> ::= '!' + negative expression + <format> ::= [ '=', '~', '*' ] + '=' means string match + '~' means regexp match + '*' means glob match + <space> ::= { ' ' | '\t' | '\n' | '\n' | '\r' } + <expr> ::= any string + + Long Syntax + [ <not> ] <format> <separator> <expr> + + <format> ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] + <not> ::= '!' + negative expression + <separator> ::= ':' + <expr> ::= any string +``` + +When using the short syntax, you can enable the glob format by starting the string with a `*`, while in the long syntax +you need to define it more explicitly. The following examples are identical. `simple_patterns` can be used **only** with +the long syntax. + +Examples: + +- Short Syntax: `'* * '` +- Long Syntax: `'glob:*'` + +### String matcher + +The string matcher reports whether the given value equals to the string. + +Examples: + +- `'= foo'` matches only if the string is `foo`. +- `'!= bar'` matches any string that is not `bar`. + +String matcher means **exact match** of the `string`. There are other string match related cases: + +- string has prefix `something` +- string has suffix `something` +- string contains `something` + +This is achievable using the `glob` matcher: + +- `* PREFIX*`, means that it matches with any string that *starts* with `PREFIX`, e.g `PREFIXnetdata` +- `* *SUFFIX`, means that it matches with any string that *ends* with `SUFFIX`, e.g `netdataSUFFIX` +- `* *SUBSTRING*`, means that it matches with any string that *contains* `SUBSTRING`, e.g `netdataSUBSTRINGnetdata` + +### Glob matcher + +The glob matcher reports whether the given value matches the wildcard pattern. It uses the standard `golang` +library `path`. You can read more about the library in the [golang documentation](https://golang.org/pkg/path/#Match), +where you can also practice with the library in order to learn the syntax and use it in your Netdata configuration. + +The pattern syntax is: + +``` + pattern: + { term } + term: + '*' matches any sequence of characters + '?' matches any single character + '[' [ '^' ] { character-range } ']' + character class (must be non-empty) + c matches character c (c != '*', '?', '\\', '[') + '\\' c matches character c + + character-range: + c matches character c (c != '\\', '-', ']') + '\\' c matches character c + lo '-' hi matches character c for lo <= c <= hi +``` + +Examples: + +- `* ?` matches any string that is a single character. +- `'?a'` matches any 2 character string that starts with any character and the second character is `a`, like `ba` but + not `bb` or `bba`. +- `'[^abc]'` matches any character that is NOT a,b,c. `'[abc]'` matches only a, b, c. +- `'*[a-d]'` matches any string (`*`) that ends with a character that is between `a` and `d` (i.e `a,b,c,d`). + +### Regexp matcher + +The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). + +The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. + +Learn more about regular expressions at [RegexOne](https://regexone.com/). + +### Simple patterns matcher + +The simple patterns matcher reports whether the given value matches the simple patterns. + +Simple patterns are a space separated list of words. Each word may use any number of wildcards `*`. Simple patterns +allow negative matches by prefixing a word with `!`. + +Examples: + +- `!*bad* *` matches anything, except all those that contain the word bad. +- `*foobar* !foo* !*bar *` matches everything containing foobar, except strings that start with foo or end with bar. + + + + diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/cache.go b/src/go/collectors/go.d.plugin/pkg/matcher/cache.go new file mode 100644 index 00000000000000..4594fa06f9b2bd --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/cache.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import "sync" + +type ( + cachedMatcher struct { + matcher Matcher + + mux sync.RWMutex + cache map[string]bool + } +) + +// WithCache adds cache to the matcher. +func WithCache(m Matcher) Matcher { + switch m { + case TRUE(), FALSE(): + return m + default: + return &cachedMatcher{matcher: m, cache: make(map[string]bool)} + } +} + +func (m *cachedMatcher) Match(b []byte) bool { + s := string(b) + if result, ok := m.fetch(s); ok { + return result + } + result := m.matcher.Match(b) + m.put(s, result) + return result +} + +func (m *cachedMatcher) MatchString(s string) bool { + if result, ok := m.fetch(s); ok { + return result + } + result := m.matcher.MatchString(s) + m.put(s, result) + return result +} + +func (m *cachedMatcher) fetch(key string) (result bool, ok bool) { + m.mux.RLock() + result, ok = m.cache[key] + m.mux.RUnlock() + return +} + +func (m *cachedMatcher) put(key string, result bool) { + m.mux.Lock() + m.cache[key] = result + m.mux.Unlock() +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/cache_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/cache_test.go new file mode 100644 index 00000000000000..a545777b382371 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/cache_test.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWithCache(t *testing.T) { + regMatcher, _ := NewRegExpMatcher("[0-9]+") + cached := WithCache(regMatcher) + + assert.True(t, cached.MatchString("1")) + assert.True(t, cached.MatchString("1")) + assert.True(t, cached.Match([]byte("2"))) + assert.True(t, cached.Match([]byte("2"))) +} + +func TestWithCache_specialCase(t *testing.T) { + assert.Equal(t, TRUE(), WithCache(TRUE())) + assert.Equal(t, FALSE(), WithCache(FALSE())) +} +func BenchmarkCachedMatcher_MatchString_cache_hit(b *testing.B) { + benchmarks := []struct { + name string + expr string + target string + }{ + {"stringFullMatcher", "= abc123", "abc123"}, + {"stringPrefixMatcher", "~ ^abc123", "abc123456"}, + {"stringSuffixMatcher", "~ abc123$", "hello abc123"}, + {"stringSuffixMatcher", "~ abc123", "hello abc123 world"}, + {"globMatcher", "* abc*def", "abc12345678def"}, + {"regexp", "~ [0-9]+", "1234567890"}, + } + for _, bm := range benchmarks { + m := Must(Parse(bm.expr)) + b.Run(bm.name+"_raw", func(b *testing.B) { + for i := 0; i < b.N; i++ { + m.MatchString(bm.target) + } + }) + b.Run(bm.name+"_cache", func(b *testing.B) { + cached := WithCache(m) + b.ResetTimer() + for i := 0; i < b.N; i++ { + cached.MatchString(bm.target) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/doc.go b/src/go/collectors/go.d.plugin/pkg/matcher/doc.go new file mode 100644 index 00000000000000..33b06988d63cfd --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/doc.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* +Package matcher implements vary formats of string matcher. + +Supported Format + + string + glob + regexp + simple patterns + +The string matcher reports whether the given value equals to the string ( use == ). + +The glob matcher reports whether the given value matches the wildcard pattern. +The pattern syntax is: + + pattern: + { term } + term: + '*' matches any sequence of characters + '?' matches any single character + '[' [ '^' ] { character-range } ']' + character class (must be non-empty) + c matches character c (c != '*', '?', '\\', '[') + '\\' c matches character c + + character-range: + c matches character c (c != '\\', '-', ']') + '\\' c matches character c + lo '-' hi matches character c for lo <= c <= hi + +The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ). +The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/. + +The simple patterns matcher reports whether the given value matches the simple patterns. +The simple patterns is a custom format used in netdata, +it's syntax is described at https://docs.netdata.cloud/libnetdata/simple_pattern/. +*/ +package matcher diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/doc_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/doc_test.go new file mode 100644 index 00000000000000..bda44c55443b4a --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/doc_test.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher_test + +import "github.com/netdata/go.d.plugin/pkg/matcher" + +func ExampleNew_string_format() { + // create a string matcher, which perform full text match + m, err := matcher.New(matcher.FmtString, "hello") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => false +} + +func ExampleNew_glob_format() { + // create a glob matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtString, "hello*") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => true + m.MatchString("Hello world") // => false +} + +func ExampleNew_simple_patterns_format() { + // create a simple patterns matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtSimplePattern, "hello* !*world *") + if err != nil { + panic(err) + } + m.MatchString("hello") // => true + m.MatchString("hello world") // => true + m.MatchString("Hello world") // => false + m.MatchString("Hello world!") // => false +} + +func ExampleNew_regexp_format() { + // create a regexp matcher, which perform wildcard match + m, err := matcher.New(matcher.FmtRegExp, "[0-9]+") + if err != nil { + panic(err) + } + m.MatchString("1") // => true + m.MatchString("1a") // => true + m.MatchString("a") // => false +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/expr.go b/src/go/collectors/go.d.plugin/pkg/matcher/expr.go new file mode 100644 index 00000000000000..f9155f761bcdf0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/expr.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "errors" + "fmt" +) + +type ( + Expr interface { + Parse() (Matcher, error) + } + + // SimpleExpr is a simple expression to describe the condition: + // (includes[0].Match(v) || includes[1].Match(v) || ...) && !(excludes[0].Match(v) || excludes[1].Match(v) || ...) + SimpleExpr struct { + Includes []string `yaml:"includes" json:"includes"` + Excludes []string `yaml:"excludes" json:"excludes"` + } +) + +var ( + ErrEmptyExpr = errors.New("empty expression") +) + +// Empty returns true if both Includes and Excludes are empty. You can't +func (s *SimpleExpr) Empty() bool { + return len(s.Includes) == 0 && len(s.Excludes) == 0 +} + +// Parse parses the given matchers in Includes and Excludes +func (s *SimpleExpr) Parse() (Matcher, error) { + if len(s.Includes) == 0 && len(s.Excludes) == 0 { + return nil, ErrEmptyExpr + } + var ( + includes = FALSE() + excludes = FALSE() + ) + if len(s.Includes) > 0 { + for _, item := range s.Includes { + m, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse matcher %q error: %v", item, err) + } + includes = Or(includes, m) + } + } else { + includes = TRUE() + } + + for _, item := range s.Excludes { + m, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse matcher %q error: %v", item, err) + } + excludes = Or(excludes, m) + } + + return And(includes, Not(excludes)), nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/expr_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/expr_test.go new file mode 100644 index 00000000000000..93a1832266a721 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/expr_test.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleExpr_none(t *testing.T) { + expr := &SimpleExpr{} + + m, err := expr.Parse() + assert.EqualError(t, err, ErrEmptyExpr.Error()) + assert.Nil(t, m) +} + +func TestSimpleExpr_include(t *testing.T) { + expr := &SimpleExpr{ + Includes: []string{ + "~ /api/", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.True(t, m.MatchString("/api/img.php")) + assert.False(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_exclude(t *testing.T) { + expr := &SimpleExpr{ + Excludes: []string{ + "~ /api/img", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.False(t, m.MatchString("/api/img.php")) + assert.True(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_both(t *testing.T) { + expr := &SimpleExpr{ + Includes: []string{ + "~ /api/", + "~ .php$", + }, + Excludes: []string{ + "~ /api/img", + }, + } + + m, err := expr.Parse() + assert.NoError(t, err) + + assert.True(t, m.MatchString("/api/a.php")) + assert.True(t, m.MatchString("/api/a.php2")) + assert.True(t, m.MatchString("/api2/a.php")) + assert.False(t, m.MatchString("/api/img.php")) + assert.False(t, m.MatchString("/api2/img.php2")) +} + +func TestSimpleExpr_Parse_NG(t *testing.T) { + { + expr := &SimpleExpr{ + Includes: []string{ + "~ (ab", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.Error(t, err) + assert.Nil(t, m) + } + { + expr := &SimpleExpr{ + Excludes: []string{ + "~ (ab", + "~ .php$", + }, + } + + m, err := expr.Parse() + assert.Error(t, err) + assert.Nil(t, m) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/glob.go b/src/go/collectors/go.d.plugin/pkg/matcher/glob.go new file mode 100644 index 00000000000000..f8cd5b0728e008 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/glob.go @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "path/filepath" + "regexp" + "unicode/utf8" + + "errors" +) + +// globMatcher implements Matcher, it uses filepath.MatchString to match. +type globMatcher string + +var ( + errBadGlobPattern = errors.New("bad glob pattern") + erGlobPattern = regexp.MustCompile(`(?s)^(?:[*?]|\[\^?([^\\-\]]|\\.|.-.)+\]|\\.|[^\*\?\\\[])*$`) +) + +// NewGlobMatcher create a new matcher with glob format +func NewGlobMatcher(expr string) (Matcher, error) { + switch expr { + case "": + return stringFullMatcher(""), nil + case "*": + return TRUE(), nil + } + + // any strings pass this regexp check are valid pattern + if !erGlobPattern.MatchString(expr) { + return nil, errBadGlobPattern + } + + size := len(expr) + chars := []rune(expr) + startWith := true + endWith := true + startIdx := 0 + endIdx := size - 1 + if chars[startIdx] == '*' { + startWith = false + startIdx = 1 + } + if chars[endIdx] == '*' { + endWith = false + endIdx-- + } + + unescapedExpr := make([]rune, 0, endIdx-startIdx+1) + for i := startIdx; i <= endIdx; i++ { + ch := chars[i] + if ch == '\\' { + nextCh := chars[i+1] + unescapedExpr = append(unescapedExpr, nextCh) + i++ + } else if isGlobMeta(ch) { + return globMatcher(expr), nil + } else { + unescapedExpr = append(unescapedExpr, ch) + } + } + + return NewStringMatcher(string(unescapedExpr), startWith, endWith) +} + +func isGlobMeta(ch rune) bool { + switch ch { + case '*', '?', '[': + return true + default: + return false + } +} + +// Match matches. +func (m globMatcher) Match(b []byte) bool { + return m.MatchString(string(b)) +} + +// MatchString matches. +func (m globMatcher) MatchString(line string) bool { + rs, _ := m.globMatch(line) + return rs +} + +func (m globMatcher) globMatch(name string) (matched bool, err error) { + pattern := string(m) +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches rest of string unless it has a /. + // return !strings.Contains(name, string(Separator)), nil + + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + // Cannot skip /. + for i := 0; i < len(name); i++ { + //for i := 0; i < len(name) && name[i] != Separator; i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + //if s[0] == Separator { + // return + //} + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = filepath.ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = filepath.ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = filepath.ErrBadPattern + } + return +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/glob_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/glob_test.go new file mode 100644 index 00000000000000..09d4561054848c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/glob_test.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewGlobMatcher(t *testing.T) { + cases := []struct { + expr string + matcher Matcher + }{ + {"", stringFullMatcher("")}, + {"a", stringFullMatcher("a")}, + {"a*b", globMatcher("a*b")}, + {`a*\b`, globMatcher(`a*\b`)}, + {`a\[`, stringFullMatcher(`a[`)}, + {`ab\`, nil}, + {`ab[`, nil}, + {`ab]`, stringFullMatcher("ab]")}, + } + for _, c := range cases { + t.Run(c.expr, func(t *testing.T) { + m, err := NewGlobMatcher(c.expr) + if c.matcher != nil { + assert.NoError(t, err) + assert.Equal(t, c.matcher, m) + } else { + assert.Error(t, err) + } + }) + } +} + +func TestGlobMatcher_MatchString(t *testing.T) { + + cases := []struct { + expected bool + expr string + line string + }{ + {true, "/a/*/d", "/a/b/c/d"}, + {true, "foo*", "foo123"}, + {true, "*foo*", "123foo123"}, + {true, "*foo", "123foo"}, + {true, "foo*bar", "foobar"}, + {true, "foo*bar", "foo baz bar"}, + {true, "a[bc]d", "abd"}, + {true, "a[^bc]d", "add"}, + {true, "a??d", "abcd"}, + {true, `a\??d`, "a?cd"}, + {true, "a[b-z]d", "abd"}, + {false, "/a/*/d", "a/b/c/d"}, + {false, "/a/*/d", "This will fail!"}, + } + + for _, c := range cases { + t.Run(c.line, func(t *testing.T) { + m := globMatcher(c.expr) + assert.Equal(t, c.expected, m.Match([]byte(c.line))) + assert.Equal(t, c.expected, m.MatchString(c.line)) + }) + } +} + +func BenchmarkGlob_MatchString(b *testing.B) { + benchmarks := []struct { + expr string + test string + }{ + {"", ""}, + {"abc", "abcd"}, + {"*abc", "abcd"}, + {"abc*", "abcd"}, + {"*abc*", "abcd"}, + {"[a-z]", "abcd"}, + } + for _, bm := range benchmarks { + b.Run(bm.expr+"_raw", func(b *testing.B) { + m := globMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + b.Run(bm.expr+"_optimized", func(b *testing.B) { + m, _ := NewGlobMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/logical.go b/src/go/collectors/go.d.plugin/pkg/matcher/logical.go new file mode 100644 index 00000000000000..af07be8f4e7913 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/logical.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +type ( + trueMatcher struct{} + falseMatcher struct{} + andMatcher struct{ lhs, rhs Matcher } + orMatcher struct{ lhs, rhs Matcher } + negMatcher struct{ Matcher } +) + +var ( + matcherT trueMatcher + matcherF falseMatcher +) + +// TRUE returns a matcher which always returns true +func TRUE() Matcher { + return matcherT +} + +// FALSE returns a matcher which always returns false +func FALSE() Matcher { + return matcherF +} + +// Not returns a matcher which positive the sub-matcher's result +func Not(m Matcher) Matcher { + switch m { + case TRUE(): + return FALSE() + case FALSE(): + return TRUE() + default: + return negMatcher{m} + } +} + +// And returns a matcher which returns true only if all of it's sub-matcher return true +func And(lhs, rhs Matcher, others ...Matcher) Matcher { + var matcher Matcher + switch lhs { + case TRUE(): + matcher = rhs + case FALSE(): + matcher = FALSE() + default: + switch rhs { + case TRUE(): + matcher = lhs + case FALSE(): + matcher = FALSE() + default: + matcher = andMatcher{lhs, rhs} + } + } + if len(others) > 0 { + return And(matcher, others[0], others[1:]...) + } + return matcher +} + +// Or returns a matcher which returns true if any of it's sub-matcher return true +func Or(lhs, rhs Matcher, others ...Matcher) Matcher { + var matcher Matcher + switch lhs { + case TRUE(): + matcher = TRUE() + case FALSE(): + matcher = rhs + default: + switch rhs { + case TRUE(): + matcher = TRUE() + case FALSE(): + matcher = lhs + default: + matcher = orMatcher{lhs, rhs} + } + } + if len(others) > 0 { + return Or(matcher, others[0], others[1:]...) + } + return matcher +} + +func (trueMatcher) Match(_ []byte) bool { return true } +func (trueMatcher) MatchString(_ string) bool { return true } + +func (falseMatcher) Match(_ []byte) bool { return false } +func (falseMatcher) MatchString(_ string) bool { return false } + +func (m andMatcher) Match(b []byte) bool { return m.lhs.Match(b) && m.rhs.Match(b) } +func (m andMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) && m.rhs.MatchString(s) } + +func (m orMatcher) Match(b []byte) bool { return m.lhs.Match(b) || m.rhs.Match(b) } +func (m orMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) || m.rhs.MatchString(s) } + +func (m negMatcher) Match(b []byte) bool { return !m.Matcher.Match(b) } +func (m negMatcher) MatchString(s string) bool { return !m.Matcher.MatchString(s) } diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/logical_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/logical_test.go new file mode 100644 index 00000000000000..64491f1ad8d50f --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/logical_test.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTRUE(t *testing.T) { + assert.True(t, TRUE().Match(nil)) + assert.True(t, TRUE().MatchString("")) +} + +func TestFALSE(t *testing.T) { + assert.False(t, FALSE().Match(nil)) + assert.False(t, FALSE().MatchString("")) +} + +func TestAnd(t *testing.T) { + assert.Equal(t, + matcherF, + And(FALSE(), stringFullMatcher(""))) + assert.Equal(t, + matcherF, + And(stringFullMatcher(""), FALSE())) + + assert.Equal(t, + stringFullMatcher(""), + And(TRUE(), stringFullMatcher(""))) + assert.Equal(t, + stringFullMatcher(""), + And(stringFullMatcher(""), TRUE())) + + assert.Equal(t, + andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + And(stringPartialMatcher("a"), stringPartialMatcher("b"))) + + assert.Equal(t, + andMatcher{ + andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + stringPartialMatcher("c"), + }, + And(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) +} + +func TestOr(t *testing.T) { + assert.Equal(t, + stringFullMatcher(""), + Or(FALSE(), stringFullMatcher(""))) + assert.Equal(t, + stringFullMatcher(""), + Or(stringFullMatcher(""), FALSE())) + + assert.Equal(t, + TRUE(), + Or(TRUE(), stringFullMatcher(""))) + assert.Equal(t, + TRUE(), + Or(stringFullMatcher(""), TRUE())) + + assert.Equal(t, + orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + Or(stringPartialMatcher("a"), stringPartialMatcher("b"))) + + assert.Equal(t, + orMatcher{ + orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")}, + stringPartialMatcher("c"), + }, + Or(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c"))) +} + +func TestAndMatcher_Match(t *testing.T) { + and := andMatcher{ + stringPrefixMatcher("a"), + stringSuffixMatcher("c"), + } + assert.True(t, and.Match([]byte("abc"))) + assert.True(t, and.MatchString("abc")) +} + +func TestOrMatcher_Match(t *testing.T) { + or := orMatcher{ + stringPrefixMatcher("a"), + stringPrefixMatcher("c"), + } + assert.True(t, or.Match([]byte("aaa"))) + assert.True(t, or.MatchString("ccc")) +} + +func TestNegMatcher_Match(t *testing.T) { + neg := negMatcher{stringPrefixMatcher("a")} + assert.False(t, neg.Match([]byte("aaa"))) + assert.True(t, neg.MatchString("ccc")) +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/matcher.go b/src/go/collectors/go.d.plugin/pkg/matcher/matcher.go new file mode 100644 index 00000000000000..76d903325105c0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/matcher.go @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "errors" + "fmt" + "regexp" +) + +type ( + // Matcher is an interface that wraps MatchString method. + Matcher interface { + // Match performs match against given []byte + Match(b []byte) bool + // MatchString performs match against given string + MatchString(string) bool + } + + // Format matcher format + Format string +) + +const ( + // FmtString is a string match format. + FmtString Format = "string" + // FmtGlob is a glob match format. + FmtGlob Format = "glob" + // FmtRegExp is a regex match format. + FmtRegExp Format = "regexp" + // FmtSimplePattern is a simple pattern match format + // https://docs.netdata.cloud/libnetdata/simple_pattern/ + FmtSimplePattern Format = "simple_patterns" + + // Separator is a separator between match format and expression. + Separator = ":" +) + +const ( + symString = "=" + symGlob = "*" + symRegExp = "~" +) + +var ( + reShortSyntax = regexp.MustCompile(`(?s)^(!)?(.)\s*(.*)$`) + reLongSyntax = regexp.MustCompile(`(?s)^(!)?([^:]+):(.*)$`) + + errNotShortSyntax = errors.New("not short syntax") +) + +// Must is a helper that wraps a call to a function returning (Matcher, error) and panics if the error is non-nil. +// It is intended for use in variable initializations such as +// +// var m = matcher.Must(matcher.New(matcher.FmtString, "hello world")) +func Must(m Matcher, err error) Matcher { + if err != nil { + panic(err) + } + return m +} + +// New create a matcher +func New(format Format, expr string) (Matcher, error) { + switch format { + case FmtString: + return NewStringMatcher(expr, true, true) + case FmtGlob: + return NewGlobMatcher(expr) + case FmtRegExp: + return NewRegExpMatcher(expr) + case FmtSimplePattern: + return NewSimplePatternsMatcher(expr) + default: + return nil, fmt.Errorf("unsupported matcher format: '%s'", format) + } +} + +// Parse parses line and returns appropriate matcher based on matched format. +// +// Short Syntax +// +// <line> ::= [ <not> ] <format> <space> <expr> +// <not> ::= '!' +// negative expression +// <format> ::= [ '=', '~', '*' ] +// '=' means string match +// '~' means regexp match +// '*' means glob match +// <space> ::= { ' ' | '\t' | '\n' | '\n' | '\r' } +// <expr> ::= any string +// +// Long Syntax +// +// <line> ::= [ <not> ] <format> <separator> <expr> +// <format> ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ] +// <not> ::= '!' +// negative expression +// <separator> ::= ':' +// <expr> ::= any string +func Parse(line string) (Matcher, error) { + matcher, err := parseShortFormat(line) + if err == nil { + return matcher, nil + } + return parseLongSyntax(line) +} + +func parseShortFormat(line string) (Matcher, error) { + m := reShortSyntax.FindStringSubmatch(line) + if m == nil { + return nil, errNotShortSyntax + } + var format Format + switch m[2] { + case symString: + format = FmtString + case symGlob: + format = FmtGlob + case symRegExp: + format = FmtRegExp + default: + return nil, fmt.Errorf("invalid short syntax: unknown symbol '%s'", m[2]) + } + expr := m[3] + matcher, err := New(format, expr) + if err != nil { + return nil, err + } + if m[1] != "" { + matcher = Not(matcher) + } + return matcher, nil +} + +func parseLongSyntax(line string) (Matcher, error) { + m := reLongSyntax.FindStringSubmatch(line) + if m == nil { + return nil, fmt.Errorf("invalid syntax") + } + matcher, err := New(Format(m[2]), m[3]) + if err != nil { + return nil, err + } + if m[1] != "" { + matcher = Not(matcher) + } + return matcher, nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/matcher_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/matcher_test.go new file mode 100644 index 00000000000000..f304d983ddb32a --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/matcher_test.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "log" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + tests := []struct { + valid bool + line string + matcher Matcher + }{ + {false, "", nil}, + {false, "abc", nil}, + {false, `~ abc\`, nil}, + {false, `invalid_fmt:abc`, nil}, + + {true, "=", stringFullMatcher("")}, + {true, "= ", stringFullMatcher("")}, + {true, "=full", stringFullMatcher("full")}, + {true, "= full", stringFullMatcher("full")}, + {true, "= \t\ffull", stringFullMatcher("full")}, + + {true, "string:", stringFullMatcher("")}, + {true, "string:full", stringFullMatcher("full")}, + + {true, "!=", Not(stringFullMatcher(""))}, + {true, "!=full", Not(stringFullMatcher("full"))}, + {true, "!= full", Not(stringFullMatcher("full"))}, + {true, "!= \t\ffull", Not(stringFullMatcher("full"))}, + + {true, "!string:", Not(stringFullMatcher(""))}, + {true, "!string:full", Not(stringFullMatcher("full"))}, + + {true, "~", TRUE()}, + {true, "~ ", TRUE()}, + {true, `~ ^$`, stringFullMatcher("")}, + {true, "~ partial", stringPartialMatcher("partial")}, + {true, `~ part\.ial`, stringPartialMatcher("part.ial")}, + {true, "~ ^prefix", stringPrefixMatcher("prefix")}, + {true, "~ suffix$", stringSuffixMatcher("suffix")}, + {true, "~ ^full$", stringFullMatcher("full")}, + {true, "~ [0-9]+", regexp.MustCompile(`[0-9]+`)}, + {true, `~ part\s1`, regexp.MustCompile(`part\s1`)}, + + {true, "!~", FALSE()}, + {true, "!~ ", FALSE()}, + {true, "!~ partial", Not(stringPartialMatcher("partial"))}, + {true, `!~ part\.ial`, Not(stringPartialMatcher("part.ial"))}, + {true, "!~ ^prefix", Not(stringPrefixMatcher("prefix"))}, + {true, "!~ suffix$", Not(stringSuffixMatcher("suffix"))}, + {true, "!~ ^full$", Not(stringFullMatcher("full"))}, + {true, "!~ [0-9]+", Not(regexp.MustCompile(`[0-9]+`))}, + + {true, `regexp:partial`, stringPartialMatcher("partial")}, + {true, `!regexp:partial`, Not(stringPartialMatcher("partial"))}, + + {true, `*`, stringFullMatcher("")}, + {true, `* foo`, stringFullMatcher("foo")}, + {true, `* foo*`, stringPrefixMatcher("foo")}, + {true, `* *foo`, stringSuffixMatcher("foo")}, + {true, `* *foo*`, stringPartialMatcher("foo")}, + {true, `* foo*bar`, globMatcher("foo*bar")}, + {true, `* *foo*bar`, globMatcher("*foo*bar")}, + {true, `* foo?bar`, globMatcher("foo?bar")}, + + {true, `!*`, Not(stringFullMatcher(""))}, + {true, `!* foo`, Not(stringFullMatcher("foo"))}, + {true, `!* foo*`, Not(stringPrefixMatcher("foo"))}, + {true, `!* *foo`, Not(stringSuffixMatcher("foo"))}, + {true, `!* *foo*`, Not(stringPartialMatcher("foo"))}, + {true, `!* foo*bar`, Not(globMatcher("foo*bar"))}, + {true, `!* *foo*bar`, Not(globMatcher("*foo*bar"))}, + {true, `!* foo?bar`, Not(globMatcher("foo?bar"))}, + + {true, "glob:foo*bar", globMatcher("foo*bar")}, + {true, "!glob:foo*bar", Not(globMatcher("foo*bar"))}, + + {true, `simple_patterns:`, FALSE()}, + {true, `simple_patterns: `, FALSE()}, + {true, `simple_patterns: foo`, simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + }}, + {true, `simple_patterns: !foo`, simplePatternsMatcher{ + {stringFullMatcher("foo"), false}, + }}, + } + for _, test := range tests { + t.Run(test.line, func(t *testing.T) { + m, err := Parse(test.line) + if test.valid { + require.NoError(t, err) + if test.matcher != nil { + log.Printf("%s %#v", reflect.TypeOf(m).Name(), m) + assert.Equal(t, test.matcher, m) + } + } else { + assert.Error(t, err) + } + }) + } +} + +func TestMust(t *testing.T) { + assert.NotPanics(t, func() { + m := Must(New(FmtRegExp, `[0-9]+`)) + assert.NotNil(t, m) + }) + + assert.Panics(t, func() { + Must(New(FmtRegExp, `[0-9]+\`)) + }) +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/regexp.go b/src/go/collectors/go.d.plugin/pkg/matcher/regexp.go new file mode 100644 index 00000000000000..3a297f3b33ef7b --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/regexp.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import "regexp" + +// NewRegExpMatcher create new matcher with RegExp format +func NewRegExpMatcher(expr string) (Matcher, error) { + switch expr { + case "", "^", "$": + return TRUE(), nil + case "^$", "$^": + return NewStringMatcher("", true, true) + } + size := len(expr) + chars := []rune(expr) + var startWith, endWith bool + startIdx := 0 + endIdx := size - 1 + if chars[startIdx] == '^' { + startWith = true + startIdx = 1 + } + if chars[endIdx] == '$' { + endWith = true + endIdx-- + } + + unescapedExpr := make([]rune, 0, endIdx-startIdx+1) + for i := startIdx; i <= endIdx; i++ { + ch := chars[i] + if ch == '\\' { + if i == endIdx { // end with '\' => invalid format + return regexp.Compile(expr) + } + nextCh := chars[i+1] + if !isRegExpMeta(nextCh) { // '\' + mon-meta char => special meaning + return regexp.Compile(expr) + } + unescapedExpr = append(unescapedExpr, nextCh) + i++ + } else if isRegExpMeta(ch) { + return regexp.Compile(expr) + } else { + unescapedExpr = append(unescapedExpr, ch) + } + } + + return NewStringMatcher(string(unescapedExpr), startWith, endWith) +} + +// isRegExpMeta reports whether byte b needs to be escaped by QuoteMeta. +func isRegExpMeta(b rune) bool { + switch b { + case '\\', '.', '+', '*', '?', '(', ')', '|', '[', ']', '{', '}', '^', '$': + return true + default: + return false + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/regexp_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/regexp_test.go new file mode 100644 index 00000000000000..fe644747bf87c5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/regexp_test.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRegExpMatch_Match(t *testing.T) { + m := regexp.MustCompile("[0-9]+") + + cases := []struct { + expected bool + line string + }{ + { + expected: true, + line: "2019", + }, + { + expected: true, + line: "It's over 9000!", + }, + { + expected: false, + line: "This will never fail!", + }, + } + + for _, c := range cases { + assert.Equal(t, c.expected, m.MatchString(c.line)) + } +} + +func BenchmarkRegExp_MatchString(b *testing.B) { + benchmarks := []struct { + expr string + test string + }{ + {"", ""}, + {"abc", "abcd"}, + {"^abc", "abcd"}, + {"abc$", "abcd"}, + {"^abc$", "abcd"}, + {"[a-z]+", "abcd"}, + } + for _, bm := range benchmarks { + b.Run(bm.expr+"_raw", func(b *testing.B) { + m := regexp.MustCompile(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + b.Run(bm.expr+"_optimized", func(b *testing.B) { + m, _ := NewRegExpMatcher(bm.expr) + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.MatchString(bm.test) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns.go b/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns.go new file mode 100644 index 00000000000000..0c1d69fc68f494 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "strings" +) + +type ( + simplePatternTerm struct { + matcher Matcher + positive bool + } + + // simplePatternsMatcher patterns. + simplePatternsMatcher []simplePatternTerm +) + +// NewSimplePatternsMatcher creates new simple patterns. It returns error in case one of patterns has bad syntax. +func NewSimplePatternsMatcher(expr string) (Matcher, error) { + var ps simplePatternsMatcher + + for _, pattern := range strings.Fields(expr) { + if err := ps.add(pattern); err != nil { + return nil, err + } + } + if len(ps) == 0 { + return FALSE(), nil + } + return ps, nil +} + +func (m *simplePatternsMatcher) add(term string) error { + p := simplePatternTerm{} + if term[0] == '!' { + p.positive = false + term = term[1:] + } else { + p.positive = true + } + matcher, err := NewGlobMatcher(term) + if err != nil { + return err + } + + p.matcher = matcher + *m = append(*m, p) + + return nil +} + +func (m simplePatternsMatcher) Match(b []byte) bool { + return m.MatchString(string(b)) +} + +// MatchString matches. +func (m simplePatternsMatcher) MatchString(line string) bool { + for _, p := range m { + if p.matcher.MatchString(line) { + return p.positive + } + } + return false +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns_test.go new file mode 100644 index 00000000000000..016096d5775c51 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/simple_patterns_test.go @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewSimplePatternsMatcher(t *testing.T) { + tests := []struct { + expr string + expected Matcher + }{ + {"", FALSE()}, + {" ", FALSE()}, + {"foo", simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + }}, + {"!foo", simplePatternsMatcher{ + {stringFullMatcher("foo"), false}, + }}, + {"foo bar", simplePatternsMatcher{ + {stringFullMatcher("foo"), true}, + {stringFullMatcher("bar"), true}, + }}, + {"*foobar* !foo* !*bar *", simplePatternsMatcher{ + {stringPartialMatcher("foobar"), true}, + {stringPrefixMatcher("foo"), false}, + {stringSuffixMatcher("bar"), false}, + {TRUE(), true}, + }}, + {`ab\`, nil}, + } + for _, test := range tests { + t.Run(test.expr, func(t *testing.T) { + matcher, err := NewSimplePatternsMatcher(test.expr) + if test.expected == nil { + assert.Error(t, err) + } else { + assert.Equal(t, test.expected, matcher) + } + }) + } +} + +func TestSimplePatterns_Match(t *testing.T) { + m, err := NewSimplePatternsMatcher("*foobar* !foo* !*bar *") + + require.NoError(t, err) + + cases := []struct { + expected bool + line string + }{ + { + expected: true, + line: "hello world", + }, + { + expected: false, + line: "hello world bar", + }, + { + expected: true, + line: "hello world foobar", + }, + } + + for _, c := range cases { + t.Run(c.line, func(t *testing.T) { + assert.Equal(t, c.expected, m.MatchString(c.line)) + assert.Equal(t, c.expected, m.Match([]byte(c.line))) + }) + } +} + +func TestSimplePatterns_Match2(t *testing.T) { + m, err := NewSimplePatternsMatcher("*foobar") + + require.NoError(t, err) + + assert.True(t, m.MatchString("foobar")) + assert.True(t, m.MatchString("foo foobar")) + assert.False(t, m.MatchString("foobar baz")) +} diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/string.go b/src/go/collectors/go.d.plugin/pkg/matcher/string.go new file mode 100644 index 00000000000000..25827d0d80b167 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/string.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "bytes" + "strings" +) + +type ( + // stringFullMatcher implements Matcher, it uses "==" to match. + stringFullMatcher string + + // stringPartialMatcher implements Matcher, it uses strings.Contains to match. + stringPartialMatcher string + + // stringPrefixMatcher implements Matcher, it uses strings.HasPrefix to match. + stringPrefixMatcher string + + // stringSuffixMatcher implements Matcher, it uses strings.HasSuffix to match. + stringSuffixMatcher string +) + +// NewStringMatcher create a new matcher with string format +func NewStringMatcher(s string, startWith, endWith bool) (Matcher, error) { + switch { + case startWith && endWith: + return stringFullMatcher(s), nil + case startWith && !endWith: + return stringPrefixMatcher(s), nil + case !startWith && endWith: + return stringSuffixMatcher(s), nil + default: + return stringPartialMatcher(s), nil + } +} + +func (m stringFullMatcher) Match(b []byte) bool { return string(m) == string(b) } +func (m stringFullMatcher) MatchString(line string) bool { return string(m) == line } + +func (m stringPartialMatcher) Match(b []byte) bool { return bytes.Contains(b, []byte(m)) } +func (m stringPartialMatcher) MatchString(line string) bool { return strings.Contains(line, string(m)) } + +func (m stringPrefixMatcher) Match(b []byte) bool { return bytes.HasPrefix(b, []byte(m)) } +func (m stringPrefixMatcher) MatchString(line string) bool { return strings.HasPrefix(line, string(m)) } + +func (m stringSuffixMatcher) Match(b []byte) bool { return bytes.HasSuffix(b, []byte(m)) } +func (m stringSuffixMatcher) MatchString(line string) bool { return strings.HasSuffix(line, string(m)) } diff --git a/src/go/collectors/go.d.plugin/pkg/matcher/string_test.go b/src/go/collectors/go.d.plugin/pkg/matcher/string_test.go new file mode 100644 index 00000000000000..1694efbd0af995 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/matcher/string_test.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package matcher + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var stringMatcherTestCases = []struct { + line string + expr string + full, prefix, suffix, partial bool +}{ + {"", "", true, true, true, true}, + {"abc", "", false, true, true, true}, + {"power", "pow", false, true, false, true}, + {"netdata", "data", false, false, true, true}, + {"abc", "def", false, false, false, false}, + {"soon", "o", false, false, false, true}, +} + +func TestStringFullMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringFullMatcher(c.expr) + assert.Equal(t, c.full, m.Match([]byte(c.line))) + assert.Equal(t, c.full, m.MatchString(c.line)) + }) + } +} + +func TestStringPrefixMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringPrefixMatcher(c.expr) + assert.Equal(t, c.prefix, m.Match([]byte(c.line))) + assert.Equal(t, c.prefix, m.MatchString(c.line)) + }) + } +} + +func TestStringSuffixMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringSuffixMatcher(c.expr) + assert.Equal(t, c.suffix, m.Match([]byte(c.line))) + assert.Equal(t, c.suffix, m.MatchString(c.line)) + }) + } +} + +func TestStringPartialMatcher_MatchString(t *testing.T) { + for _, c := range stringMatcherTestCases { + t.Run(c.line, func(t *testing.T) { + m := stringPartialMatcher(c.expr) + assert.Equal(t, c.partial, m.Match([]byte(c.line))) + assert.Equal(t, c.partial, m.MatchString(c.line)) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/counter.go b/src/go/collectors/go.d.plugin/pkg/metrics/counter.go new file mode 100644 index 00000000000000..a48cfd68b38a10 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/counter.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + // Counter is a Metric that represents a single numerical bits that only ever + // goes up. That implies that it cannot be used to count items whose number can + // also go down, e.g. the number of currently running goroutines. Those + // "counters" are represented by Gauges. + // + // A Counter is typically used to count requests served, tasks completed, errors + // occurred, etc. + Counter struct { + valInt int64 + valFloat float64 + } + + // CounterVec is a Collector that bundles a set of Counters which have different values for their names. + // This is used if you want to count the same thing partitioned by various dimensions + // (e.g. number of HTTP requests, partitioned by response code and method). + // + // Create instances with NewCounterVec. + CounterVec map[string]*Counter +) + +var ( + _ stm.Value = Counter{} + _ stm.Value = CounterVec{} +) + +// WriteTo writes its value into given map. +func (c Counter) WriteTo(rv map[string]int64, key string, mul, div int) { + rv[key] = int64(c.Value() * float64(mul) / float64(div)) +} + +// Value gets current counter. +func (c Counter) Value() float64 { + return float64(c.valInt) + c.valFloat +} + +// Inc increments the counter by 1. Use Add to increment it by arbitrary +// non-negative values. +func (c *Counter) Inc() { + c.valInt++ +} + +// Add adds the given bits to the counter. It panics if the value is < 0. +func (c *Counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + val := int64(v) + if float64(val) == v { + c.valInt += val + return + } + c.valFloat += v +} + +// NewCounterVec creates a new CounterVec +func NewCounterVec() CounterVec { + return CounterVec{} +} + +// WriteTo writes its value into given map. +func (c CounterVec) WriteTo(rv map[string]int64, key string, mul, div int) { + for name, value := range c { + rv[key+"_"+name] = int64(value.Value() * float64(mul) / float64(div)) + } +} + +// Get gets counter instance by name +func (c CounterVec) Get(name string) *Counter { + item, _ := c.GetP(name) + return item +} + +// GetP gets counter instance by name +func (c CounterVec) GetP(name string) (counter *Counter, ok bool) { + counter, ok = c[name] + if ok { + return + } + counter = &Counter{} + c[name] = counter + return +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/counter_test.go b/src/go/collectors/go.d.plugin/pkg/metrics/counter_test.go new file mode 100644 index 00000000000000..61f50501ac6155 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/counter_test.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCounter_WriteTo(t *testing.T) { + c := Counter{} + c.Inc() + c.Inc() + c.Inc() + c.Add(0.14) + m := map[string]int64{} + c.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 1) + assert.EqualValues(t, 314, m["pi"]) +} + +func TestCounterVec_WriteTo(t *testing.T) { + c := NewCounterVec() + c.Get("foo").Inc() + c.Get("foo").Inc() + c.Get("bar").Inc() + c.Get("bar").Add(0.14) + + m := map[string]int64{} + c.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 2) + assert.EqualValues(t, 200, m["pi_foo"]) + assert.EqualValues(t, 114, m["pi_bar"]) +} + +func TestCounter_Inc(t *testing.T) { + c := Counter{} + c.Inc() + assert.Equal(t, 1.0, c.Value()) + c.Inc() + assert.Equal(t, 2.0, c.Value()) +} + +func TestCounter_Add(t *testing.T) { + c := Counter{} + c.Add(3.14) + assert.InDelta(t, 3.14, c.Value(), 0.0001) + c.Add(2) + assert.InDelta(t, 5.14, c.Value(), 0.0001) + assert.Panics(t, func() { + c.Add(-1) + }) +} + +func BenchmarkCounter_Add(b *testing.B) { + benchmarks := []struct { + name string + value float64 + }{ + {"int", 1}, + {"float", 3.14}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + var c Counter + for i := 0; i < b.N; i++ { + c.Add(bm.value) + } + }) + } +} + +func BenchmarkCounter_Inc(b *testing.B) { + var c Counter + for i := 0; i < b.N; i++ { + c.Inc() + } +} + +func BenchmarkCounterVec_Inc(b *testing.B) { + c := NewCounterVec() + for i := 0; i < b.N; i++ { + c.Get("foo").Inc() + } +} + +func BenchmarkCounter_Value(b *testing.B) { + var c Counter + c.Inc() + c.Add(3.14) + for i := 0; i < b.N; i++ { + c.Value() + } +} + +func BenchmarkCounter_WriteTo(b *testing.B) { + var c Counter + c.Inc() + c.Add(3.14) + m := map[string]int64{} + for i := 0; i < b.N; i++ { + c.WriteTo(m, "pi", 100, 1) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/gauge.go b/src/go/collectors/go.d.plugin/pkg/metrics/gauge.go new file mode 100644 index 00000000000000..291cee5f711c52 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/gauge.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "time" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + // Gauge is a Metric that represents a single numerical value that can + // arbitrarily go up and down. + // + // A Gauge is typically used for measured values like temperatures or current + // memory usage, but also "counts" that can go up and down, like the number of + // running goroutines. + Gauge float64 + + // GaugeVec is a Collector that bundles a set of Gauges which have different values for their names. + // This is used if you want to count the same thing partitioned by various dimensions + // + // Create instances with NewGaugeVec. + GaugeVec map[string]*Gauge +) + +var ( + _ stm.Value = Gauge(0) + _ stm.Value = GaugeVec{} +) + +// WriteTo writes its value into given map. +func (g Gauge) WriteTo(rv map[string]int64, key string, mul, div int) { + rv[key] = int64(float64(g) * float64(mul) / float64(div)) +} + +// Value gets current counter. +func (g Gauge) Value() float64 { + return float64(g) +} + +// Set sets the atomicGauge to an arbitrary bits. +func (g *Gauge) Set(v float64) { + *g = Gauge(v) +} + +// Inc increments the atomicGauge by 1. Use Add to increment it by arbitrary +// values. +func (g *Gauge) Inc() { + *g++ +} + +// Dec decrements the atomicGauge by 1. Use Sub to decrement it by arbitrary +// values. +func (g *Gauge) Dec() { + *g-- +} + +// Add adds the given bits to the atomicGauge. (The bits can be negative, +// resulting in a decrease of the atomicGauge.) +func (g *Gauge) Add(delta float64) { + *g += Gauge(delta) +} + +// Sub subtracts the given bits from the atomicGauge. (The bits can be +// negative, resulting in an increase of the atomicGauge.) +func (g *Gauge) Sub(delta float64) { + *g -= Gauge(delta) +} + +// SetToCurrentTime sets the atomicGauge to the current Unix time in second. +func (g *Gauge) SetToCurrentTime() { + *g = Gauge(time.Now().UnixNano()) / 1e9 +} + +// NewGaugeVec creates a new GaugeVec +func NewGaugeVec() GaugeVec { + return GaugeVec{} +} + +// WriteTo writes its value into given map. +func (g GaugeVec) WriteTo(rv map[string]int64, key string, mul, div int) { + for name, value := range g { + rv[key+"_"+name] = int64(value.Value() * float64(mul) / float64(div)) + } +} + +// Get gets counter instance by name +func (g GaugeVec) Get(name string) *Gauge { + item, _ := g.GetP(name) + return item +} + +// GetP gets counter instance by name +func (g GaugeVec) GetP(name string) (gauge *Gauge, ok bool) { + gauge, ok = g[name] + if ok { + return + } + gauge = new(Gauge) + g[name] = gauge + return +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/gauge_test.go b/src/go/collectors/go.d.plugin/pkg/metrics/gauge_test.go new file mode 100644 index 00000000000000..8940e330e77d36 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/gauge_test.go @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGauge_Set(t *testing.T) { + var g Gauge + assert.Equal(t, 0.0, g.Value()) + g.Set(100) + assert.Equal(t, 100.0, g.Value()) + g.Set(200) + assert.Equal(t, 200.0, g.Value()) +} + +func TestGauge_Add(t *testing.T) { + var g Gauge + assert.Equal(t, 0.0, g.Value()) + g.Add(100) + assert.Equal(t, 100.0, g.Value()) + g.Add(200) + assert.Equal(t, 300.0, g.Value()) +} +func TestGauge_Sub(t *testing.T) { + var g Gauge + assert.Equal(t, 0.0, g.Value()) + g.Sub(100) + assert.Equal(t, -100.0, g.Value()) + g.Sub(200) + assert.Equal(t, -300.0, g.Value()) +} + +func TestGauge_Inc(t *testing.T) { + var g Gauge + assert.Equal(t, 0.0, g.Value()) + g.Inc() + assert.Equal(t, 1.0, g.Value()) +} + +func TestGauge_Dec(t *testing.T) { + var g Gauge + assert.Equal(t, 0.0, g.Value()) + g.Dec() + assert.Equal(t, -1.0, g.Value()) +} + +func TestGauge_SetToCurrentTime(t *testing.T) { + var g Gauge + g.SetToCurrentTime() + assert.InDelta(t, time.Now().Unix(), g.Value(), 1) +} + +func TestGauge_WriteTo(t *testing.T) { + g := Gauge(3.14) + m := map[string]int64{} + g.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 1) + assert.EqualValues(t, 314, m["pi"]) +} + +func TestGaugeVec_WriteTo(t *testing.T) { + g := NewGaugeVec() + g.Get("foo").Inc() + g.Get("foo").Inc() + g.Get("bar").Inc() + g.Get("bar").Add(0.14) + + m := map[string]int64{} + g.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 2) + assert.EqualValues(t, 200, m["pi_foo"]) + assert.EqualValues(t, 114, m["pi_bar"]) +} + +func BenchmarkGauge_Add(b *testing.B) { + benchmarks := []struct { + name string + value float64 + }{ + {"int", 1}, + {"float", 3.14}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + var c Gauge + for i := 0; i < b.N; i++ { + c.Add(bm.value) + } + }) + } +} + +func BenchmarkGauge_Inc(b *testing.B) { + var c Gauge + for i := 0; i < b.N; i++ { + c.Inc() + } +} + +func BenchmarkGauge_Set(b *testing.B) { + var c Gauge + for i := 0; i < b.N; i++ { + c.Set(3.14) + } +} + +func BenchmarkGauge_Value(b *testing.B) { + var c Gauge + c.Inc() + c.Add(3.14) + for i := 0; i < b.N; i++ { + c.Value() + } +} + +func BenchmarkGauge_WriteTo(b *testing.B) { + var c Gauge + c.Inc() + c.Add(3.14) + m := map[string]int64{} + for i := 0; i < b.N; i++ { + c.WriteTo(m, "pi", 100, 1) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/histogram.go b/src/go/collectors/go.d.plugin/pkg/metrics/histogram.go new file mode 100644 index 00000000000000..2ad5033f0a1b71 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/histogram.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "fmt" + "sort" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + // A Histogram counts individual observations from an event or sample stream in + // configurable buckets. Similar to a summary, it also provides a sum of + // observations and an observation count. + // + // Note that Histograms, in contrast to Summaries, can be aggregated. + // However, Histograms require the user to pre-define suitable + // buckets, and they are in general less accurate. The Observe method of a + // histogram has a very low performance overhead in comparison with the Observe + // method of a summary. + // + // To create histogram instances, use NewHistogram. + Histogram interface { + Observer + } + + histogram struct { + buckets []int64 + upperBounds []float64 + sum float64 + count int64 + rangeBuckets bool + } +) + +var ( + _ stm.Value = histogram{} +) + +// DefBuckets are the default histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// NewHistogram creates a new Histogram. +func NewHistogram(buckets []float64) Histogram { + if len(buckets) == 0 { + buckets = DefBuckets + } else { + sort.Slice(buckets, func(i, j int) bool { return buckets[i] < buckets[j] }) + } + + return &histogram{ + buckets: make([]int64, len(buckets)), + upperBounds: buckets, + count: 0, + sum: 0, + } +} + +func NewHistogramWithRangeBuckets(buckets []float64) Histogram { + if len(buckets) == 0 { + buckets = DefBuckets + } else { + sort.Slice(buckets, func(i, j int) bool { return buckets[i] < buckets[j] }) + } + + return &histogram{ + buckets: make([]int64, len(buckets)), + upperBounds: buckets, + count: 0, + sum: 0, + rangeBuckets: true, + } +} + +// WriteTo writes its values into given map. +// It adds those key-value pairs: +// +// ${key}_sum gauge, for sum of it's observed values +// ${key}_count counter, for count of it's observed values (equals to +Inf bucket) +// ${key}_bucket_1 counter, for 1st bucket count +// ${key}_bucket_2 counter, for 2nd bucket count +// ... +// ${key}_bucket_N counter, for Nth bucket count +func (h histogram) WriteTo(rv map[string]int64, key string, mul, div int) { + rv[key+"_sum"] = int64(h.sum * float64(mul) / float64(div)) + rv[key+"_count"] = h.count + var conn int64 + for i, bucket := range h.buckets { + name := fmt.Sprintf("%s_bucket_%d", key, i+1) + conn += bucket + if h.rangeBuckets { + rv[name] = bucket + } else { + rv[name] = conn + } + } + if h.rangeBuckets { + name := fmt.Sprintf("%s_bucket_inf", key) + rv[name] = h.count - conn + } +} + +// Observe observes a value +func (h *histogram) Observe(v float64) { + hotIdx := h.searchBucketIndex(v) + if hotIdx < len(h.buckets) { + h.buckets[hotIdx]++ + } + h.sum += v + h.count++ +} + +func (h *histogram) searchBucketIndex(v float64) int { + if len(h.upperBounds) < 30 { + for i, upper := range h.upperBounds { + if upper >= v { + return i + } + } + return len(h.upperBounds) + } + return sort.SearchFloat64s(h.upperBounds, v) +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/histogram_test.go b/src/go/collectors/go.d.plugin/pkg/metrics/histogram_test.go new file mode 100644 index 00000000000000..91266915c0fbf9 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/histogram_test.go @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLinearBuckets(t *testing.T) { + buckets := LinearBuckets(0, 1, 10) + assert.Len(t, buckets, 10) + assert.EqualValues(t, 0, buckets[0]) + assert.EqualValues(t, 5.0, buckets[5]) + assert.EqualValues(t, 9.0, buckets[9]) + + assert.Panics(t, func() { + LinearBuckets(0, 1, 0) + }) +} + +func TestExponentialBuckets(t *testing.T) { + buckets := ExponentialBuckets(1, 2, 10) + assert.Len(t, buckets, 10) + assert.EqualValues(t, 1, buckets[0]) + assert.EqualValues(t, 32.0, buckets[5]) + assert.EqualValues(t, 512.0, buckets[9]) + + assert.Panics(t, func() { + ExponentialBuckets(1, 2, 0) + }) + assert.Panics(t, func() { + ExponentialBuckets(0, 2, 2) + }) + + assert.Panics(t, func() { + ExponentialBuckets(1, 1, 2) + }) +} + +func TestNewHistogram(t *testing.T) { + h := NewHistogram(nil).(*histogram) + assert.EqualValues(t, 0, h.count) + assert.EqualValues(t, 0.0, h.sum) + assert.Equal(t, DefBuckets, h.upperBounds) + + h = NewHistogram([]float64{1, 10, 5}).(*histogram) + assert.Equal(t, []float64{1, 5, 10}, h.upperBounds) + assert.Len(t, h.buckets, 3) +} + +func TestHistogram_WriteTo(t *testing.T) { + h := NewHistogram([]float64{1, 2, 3}) + m := map[string]int64{} + h.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 5) + assert.EqualValues(t, 0, m["pi_count"]) + assert.EqualValues(t, 0, m["pi_sum"]) + assert.EqualValues(t, 0, m["pi_bucket_1"]) + assert.EqualValues(t, 0, m["pi_bucket_2"]) + assert.EqualValues(t, 0, m["pi_bucket_3"]) + + h.Observe(0) + h.Observe(1.5) + h.Observe(3.5) + h.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 5) + assert.EqualValues(t, 3, m["pi_count"]) + assert.EqualValues(t, 500, m["pi_sum"]) + assert.EqualValues(t, 1, m["pi_bucket_1"]) + assert.EqualValues(t, 2, m["pi_bucket_2"]) + assert.EqualValues(t, 2, m["pi_bucket_3"]) +} + +func TestHistogram_searchBucketIndex(t *testing.T) { + h := NewHistogram(LinearBuckets(1, 1, 5)).(*histogram) // [1, 2, ..., 5] + assert.Equal(t, 0, h.searchBucketIndex(0.1)) + assert.Equal(t, 1, h.searchBucketIndex(1.1)) + assert.Equal(t, 5, h.searchBucketIndex(8.1)) + + h = NewHistogram(LinearBuckets(1, 1, 40)).(*histogram) // [1, 2, ..., 5] + assert.Equal(t, 0, h.searchBucketIndex(0.1)) + assert.Equal(t, 1, h.searchBucketIndex(1.1)) + assert.Equal(t, 5, h.searchBucketIndex(5.1)) + assert.Equal(t, 7, h.searchBucketIndex(8)) + assert.Equal(t, 39, h.searchBucketIndex(39.5)) + assert.Equal(t, 40, h.searchBucketIndex(40.5)) +} + +func BenchmarkHistogram_Observe(b *testing.B) { + benchmarks := []struct { + name string + buckets []float64 + }{ + {"default", nil}, + {"len_10", LinearBuckets(0, 0.1, 10)}, + {"len_20", LinearBuckets(0, 0.1, 20)}, + {"len_30", LinearBuckets(0, 0.1, 30)}, + {"len_40", LinearBuckets(0, 0.1, 40)}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + h := NewHistogram(bm.buckets) + for i := 0; i < b.N; i++ { + h.Observe(2.5) + } + }) + } +} + +func BenchmarkHistogram_WriteTo(b *testing.B) { + benchmarks := []struct { + name string + buckets []float64 + }{ + {"default", nil}, + {"len_10", LinearBuckets(0, 0.1, 10)}, + {"len_20", LinearBuckets(0, 0.1, 20)}, + {"len_30", LinearBuckets(0, 0.1, 30)}, + {"len_40", LinearBuckets(0, 0.1, 40)}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + h := NewHistogram(bm.buckets) + h.Observe(0.1) + h.Observe(0.01) + h.Observe(0.5) + h.Observe(10) + m := map[string]int64{} + for i := 0; i < b.N; i++ { + h.WriteTo(m, "pi", 100, 1) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/metrics.go b/src/go/collectors/go.d.plugin/pkg/metrics/metrics.go new file mode 100644 index 00000000000000..30396c9ceddbac --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/metrics.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import "github.com/netdata/go.d.plugin/pkg/stm" + +// Observer is an interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + stm.Value + Observe(v float64) +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/summary.go b/src/go/collectors/go.d.plugin/pkg/metrics/summary.go new file mode 100644 index 00000000000000..c6261cfe2c5c04 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/summary.go @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "math" + + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + // A Summary captures individual observations from an event or sample stream and + // summarizes them in a manner similar to traditional summary statistics: + // sum of observations + // observation count + // observation average. + // + // To create summary instances, use NewSummary. + Summary interface { + Observer + Reset() + } + + // SummaryVec is a Collector that bundles a set of Summary which have different values for their names. + // This is used if you want to count the same thing partitioned by various dimensions + // (e.g. number of HTTP response time, partitioned by response code and method). + // + // Create instances with NewSummaryVec. + SummaryVec map[string]Summary + + summary struct { + min float64 + max float64 + sum float64 + count int64 + } +) + +var ( + _ stm.Value = summary{} + _ stm.Value = SummaryVec{} +) + +// NewSummary creates a new Summary. +func NewSummary() Summary { + return &summary{ + min: math.MaxFloat64, + max: -math.MaxFloat64, + } +} + +// WriteTo writes its values into given map. +// It adds those key-value pairs: +// +// ${key}_sum gauge, for sum of it's observed values from last Reset calls +// ${key}_count counter, for count of it's observed values from last Reset calls +// ${key}_min gauge, for min of it's observed values from last Reset calls (only exists if count > 0) +// ${key}_max gauge, for max of it's observed values from last Reset calls (only exists if count > 0) +// ${key}_avg gauge, for avg of it's observed values from last Reset calls (only exists if count > 0) +func (s summary) WriteTo(rv map[string]int64, key string, mul, div int) { + if s.count > 0 { + rv[key+"_min"] = int64(s.min * float64(mul) / float64(div)) + rv[key+"_max"] = int64(s.max * float64(mul) / float64(div)) + rv[key+"_sum"] = int64(s.sum * float64(mul) / float64(div)) + rv[key+"_count"] = s.count + rv[key+"_avg"] = int64(s.sum / float64(s.count) * float64(mul) / float64(div)) + } else { + rv[key+"_count"] = 0 + rv[key+"_sum"] = 0 + delete(rv, key+"_min") + delete(rv, key+"_max") + delete(rv, key+"_avg") + } +} + +// Reset resets all of its counters. +// Call it before every scrape loop. +func (s *summary) Reset() { + s.min = math.MaxFloat64 + s.max = -math.MaxFloat64 + s.sum = 0 + s.count = 0 +} + +// Observe observes a value +func (s *summary) Observe(v float64) { + if v > s.max { + s.max = v + } + if v < s.min { + s.min = v + } + s.sum += v + s.count++ +} + +// NewSummaryVec creates a new SummaryVec instance. +func NewSummaryVec() SummaryVec { + return SummaryVec{} +} + +// WriteTo writes its value into given map. +func (c SummaryVec) WriteTo(rv map[string]int64, key string, mul, div int) { + for name, value := range c { + value.WriteTo(rv, key+"_"+name, mul, div) + } +} + +// Get gets counter instance by name. +func (c SummaryVec) Get(name string) Summary { + item, ok := c[name] + if ok { + return item + } + item = NewSummary() + c[name] = item + return item +} + +// Reset resets its all summaries. +func (c SummaryVec) Reset() { + for _, value := range c { + value.Reset() + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/summary_test.go b/src/go/collectors/go.d.plugin/pkg/metrics/summary_test.go new file mode 100644 index 00000000000000..b98218369ffa41 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/summary_test.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewSummary(t *testing.T) { + s := NewSummary().(*summary) + assert.EqualValues(t, 0, s.count) + assert.Equal(t, 0.0, s.sum) + s.Observe(3.14) + assert.Equal(t, 3.14, s.min) + assert.Equal(t, 3.14, s.max) +} + +func TestSummary_WriteTo(t *testing.T) { + s := NewSummary() + + m1 := map[string]int64{} + s.WriteTo(m1, "pi", 100, 1) + assert.Len(t, m1, 2) + assert.Contains(t, m1, "pi_count") + assert.Contains(t, m1, "pi_sum") + assert.EqualValues(t, 0, m1["pi_count"]) + assert.EqualValues(t, 0, m1["pi_sum"]) + + s.Observe(3.14) + s.Observe(2.71) + s.Observe(-10) + + m2 := map[string]int64{} + s.WriteTo(m1, "pi", 100, 1) + s.WriteTo(m2, "pi", 100, 1) + assert.Equal(t, m1, m2) + assert.Len(t, m1, 5) + assert.EqualValues(t, 3, m1["pi_count"]) + assert.EqualValues(t, -415, m1["pi_sum"]) + assert.EqualValues(t, -1000, m1["pi_min"]) + assert.EqualValues(t, 314, m1["pi_max"]) + assert.EqualValues(t, -138, m1["pi_avg"]) + + s.Reset() + s.WriteTo(m1, "pi", 100, 1) + assert.Len(t, m1, 2) + assert.Contains(t, m1, "pi_count") + assert.Contains(t, m1, "pi_sum") + assert.EqualValues(t, 0, m1["pi_count"]) + assert.EqualValues(t, 0, m1["pi_sum"]) +} + +func TestSummary_Reset(t *testing.T) { + s := NewSummary().(*summary) + s.Observe(1) + s.Reset() + assert.EqualValues(t, 0, s.count) +} + +func BenchmarkSummary_Observe(b *testing.B) { + s := NewSummary() + for i := 0; i < b.N; i++ { + s.Observe(2.5) + } +} + +func BenchmarkSummary_WriteTo(b *testing.B) { + s := NewSummary() + s.Observe(2.5) + s.Observe(3.5) + s.Observe(4.5) + m := map[string]int64{} + for i := 0; i < b.N; i++ { + s.WriteTo(m, "pi", 100, 1) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter.go b/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter.go new file mode 100644 index 00000000000000..0b9da5cdbf3c8d --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "github.com/axiomhq/hyperloglog" + "github.com/netdata/go.d.plugin/pkg/stm" +) + +type ( + UniqueCounter interface { + stm.Value + Insert(s string) + Value() int + Reset() + } + + mapUniqueCounter struct { + m map[string]bool + } + + hyperLogLogUniqueCounter struct { + sketch *hyperloglog.Sketch + } + + UniqueCounterVec struct { + useHyperLogLog bool + Items map[string]UniqueCounter + } +) + +var ( + _ stm.Value = mapUniqueCounter{} + _ stm.Value = hyperLogLogUniqueCounter{} + _ stm.Value = UniqueCounterVec{} +) + +func NewUniqueCounter(useHyperLogLog bool) UniqueCounter { + if useHyperLogLog { + return &hyperLogLogUniqueCounter{hyperloglog.New()} + } + return mapUniqueCounter{map[string]bool{}} +} + +func (c mapUniqueCounter) WriteTo(rv map[string]int64, key string, mul, div int) { + rv[key] = int64(float64(c.Value()*mul) / float64(div)) +} + +func (c mapUniqueCounter) Insert(s string) { + c.m[s] = true +} + +func (c mapUniqueCounter) Value() int { + return len(c.m) +} + +func (c mapUniqueCounter) Reset() { + for key := range c.m { + delete(c.m, key) + } +} + +// WriteTo writes its value into given map. +func (c hyperLogLogUniqueCounter) WriteTo(rv map[string]int64, key string, mul, div int) { + rv[key] = int64(float64(c.Value()*mul) / float64(div)) +} + +func (c *hyperLogLogUniqueCounter) Insert(s string) { + c.sketch.Insert([]byte(s)) +} + +func (c *hyperLogLogUniqueCounter) Value() int { + return int(c.sketch.Estimate()) +} + +func (c *hyperLogLogUniqueCounter) Reset() { + c.sketch = hyperloglog.New() +} + +func NewUniqueCounterVec(useHyperLogLog bool) UniqueCounterVec { + return UniqueCounterVec{ + Items: map[string]UniqueCounter{}, + useHyperLogLog: useHyperLogLog, + } +} + +// WriteTo writes its value into given map. +func (c UniqueCounterVec) WriteTo(rv map[string]int64, key string, mul, div int) { + for name, value := range c.Items { + value.WriteTo(rv, key+"_"+name, mul, div) + } +} + +// Get gets UniqueCounter instance by name +func (c UniqueCounterVec) Get(name string) UniqueCounter { + item, ok := c.Items[name] + if ok { + return item + } + item = NewUniqueCounter(c.useHyperLogLog) + c.Items[name] = item + return item +} + +func (c UniqueCounterVec) Reset() { + for _, value := range c.Items { + value.Reset() + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter_test.go b/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter_test.go new file mode 100644 index 00000000000000..b9439c9a3964c3 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/metrics/unique_counter_test.go @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package metrics + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHyperLogLogUniqueCounter_Value(t *testing.T) { + for _, useHLL := range []bool{true, false} { + t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) { + c := NewUniqueCounter(useHLL) + assert.Equal(t, 0, c.Value()) + + c.Insert("foo") + assert.Equal(t, 1, c.Value()) + + c.Insert("foo") + assert.Equal(t, 1, c.Value()) + + c.Insert("bar") + assert.Equal(t, 2, c.Value()) + + c.Insert("baz") + assert.Equal(t, 3, c.Value()) + + c.Reset() + assert.Equal(t, 0, c.Value()) + + c.Insert("foo") + assert.Equal(t, 1, c.Value()) + }) + } +} + +func TestHyperLogLogUniqueCounter_WriteTo(t *testing.T) { + for _, useHLL := range []bool{true, false} { + t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) { + c := NewUniqueCounterVec(useHLL) + c.Get("a").Insert("foo") + c.Get("a").Insert("bar") + c.Get("b").Insert("foo") + + m := map[string]int64{} + c.WriteTo(m, "pi", 100, 1) + assert.Len(t, m, 2) + assert.EqualValues(t, 200, m["pi_a"]) + assert.EqualValues(t, 100, m["pi_b"]) + }) + } +} + +func TestUniqueCounterVec_Reset(t *testing.T) { + for _, useHLL := range []bool{true, false} { + t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) { + c := NewUniqueCounterVec(useHLL) + c.Get("a").Insert("foo") + c.Get("a").Insert("bar") + c.Get("b").Insert("foo") + + assert.Equal(t, 2, len(c.Items)) + assert.Equal(t, 2, c.Get("a").Value()) + assert.Equal(t, 1, c.Get("b").Value()) + + c.Reset() + assert.Equal(t, 2, len(c.Items)) + assert.Equal(t, 0, c.Get("a").Value()) + assert.Equal(t, 0, c.Get("b").Value()) + }) + } +} + +func BenchmarkUniqueCounter_Insert(b *testing.B) { + benchmarks := []struct { + name string + same bool + hyperloglog bool + nop bool + }{ + + {"map-same", true, false, false}, + {"hll-same", true, true, false}, + + {"nop", false, false, true}, + {"map-diff", false, false, false}, + {"hll-diff", false, true, false}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + c := NewUniqueCounter(bm.hyperloglog) + if bm.same { + for i := 0; i < b.N; i++ { + c.Insert("foo") + } + } else if bm.nop { + for i := 0; i < b.N; i++ { + strconv.Itoa(i) + } + } else { + for i := 0; i < b.N; i++ { + c.Insert(strconv.Itoa(i)) + } + } + }) + } +} + +func BenchmarkUniqueCounterVec_Insert(b *testing.B) { + benchmarks := []struct { + name string + same bool + hyperloglog bool + nop bool + }{ + + {"map-same", true, false, false}, + {"hll-same", true, true, false}, + + {"nop", false, false, true}, + {"map-diff", false, false, false}, + {"hll-diff", false, true, false}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + c := NewUniqueCounterVec(bm.hyperloglog) + if bm.same { + for i := 0; i < b.N; i++ { + c.Get("a").Insert("foo") + } + } else if bm.nop { + for i := 0; i < b.N; i++ { + strconv.Itoa(i) + } + } else { + for i := 0; i < b.N; i++ { + c.Get("a").Insert(strconv.Itoa(i)) + } + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/multipath.go b/src/go/collectors/go.d.plugin/pkg/multipath/multipath.go new file mode 100644 index 00000000000000..041de081b2bf5f --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/multipath/multipath.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package multipath + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/mitchellh/go-homedir" +) + +type ErrNotFound struct{ msg string } + +func (e ErrNotFound) Error() string { return e.msg } + +// IsNotFound returns a boolean indicating whether the error is ErrNotFound or not. +func IsNotFound(err error) bool { + switch err.(type) { + case ErrNotFound: + return true + } + return false +} + +// MultiPath multi-paths +type MultiPath []string + +// New multi-paths +func New(paths ...string) MultiPath { + set := map[string]bool{} + mPath := make(MultiPath, 0) + + for _, dir := range paths { + if dir == "" { + continue + } + if d, err := homedir.Expand(dir); err != nil { + dir = d + } + if !set[dir] { + mPath = append(mPath, dir) + set[dir] = true + } + } + + return mPath +} + +// Find finds a file in given paths +func (p MultiPath) Find(filename string) (string, error) { + for _, dir := range p { + file := filepath.Join(dir, filename) + if _, err := os.Stat(file); !os.IsNotExist(err) { + return file, nil + } + } + return "", ErrNotFound{msg: fmt.Sprintf("can't find '%s' in %v", filename, p)} +} + +func (p MultiPath) FindFiles(suffix string) ([]string, error) { + set := make(map[string]bool) + var files []string + + for _, dir := range p { + entries, err := os.ReadDir(dir) + if err != nil { + continue + } + + for _, e := range entries { + if !e.Type().IsRegular() || !strings.HasSuffix(e.Name(), suffix) || set[e.Name()] { + continue + } + set[e.Name()] = true + + name := filepath.Join(dir, e.Name()) + files = append(files, name) + } + } + + return files, nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/multipath_test.go b/src/go/collectors/go.d.plugin/pkg/multipath/multipath_test.go new file mode 100644 index 00000000000000..d38d6d5bfcbcd8 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/multipath/multipath_test.go @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package multipath + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + assert.Len( + t, + New("path1", "path2", "path2", "", "path3"), + 3, + ) +} + +func TestMultiPath_Find(t *testing.T) { + m := New("path1", "testdata/data1") + + v, err := m.Find("not exist") + assert.Zero(t, v) + assert.Error(t, err) + + v, err = m.Find("test-empty.conf") + assert.Equal(t, "testdata/data1/test-empty.conf", v) + assert.Nil(t, err) + + v, err = m.Find("test.conf") + assert.Equal(t, "testdata/data1/test.conf", v) + assert.Nil(t, err) +} + +func TestIsNotFound(t *testing.T) { + assert.True(t, IsNotFound(ErrNotFound{})) + assert.False(t, IsNotFound(errors.New(""))) +} + +func TestMultiPath_FindFiles(t *testing.T) { + m := New("path1", "testdata/data2", "testdata/data1") + + files, err := m.FindFiles(".conf") + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) + + files, err = m.FindFiles("") + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files) + + files, err = m.FindFiles(".not_exist") + assert.NoError(t, err) + assert.Equal(t, []string(nil), files) + + m = New("path1", "testdata/data1", "testdata/data2") + files, err = m.FindFiles(".conf") + assert.NoError(t, err) + assert.Equal(t, []string{"testdata/data1/test-empty.conf", "testdata/data1/test.conf"}, files) +} diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data1/test-empty.conf b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data1/test-empty.conf new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data1/test.conf b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data1/test.conf new file mode 100644 index 00000000000000..aebe64730f6e88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data1/test.conf @@ -0,0 +1 @@ +not empty! \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data2/test-empty.conf b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data2/test-empty.conf new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data2/test.conf b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data2/test.conf new file mode 100644 index 00000000000000..aebe64730f6e88 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/multipath/testdata/data2/test.conf @@ -0,0 +1 @@ +not empty! \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/client.go b/src/go/collectors/go.d.plugin/pkg/prometheus/client.go new file mode 100644 index 00000000000000..7e359a99ecac10 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/client.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" +) + +type ( + // Prometheus is a helper for scrape and parse prometheus format metrics. + Prometheus interface { + // ScrapeSeries and parse prometheus format metrics + ScrapeSeries() (Series, error) + Scrape() (MetricFamilies, error) + HTTPClient() *http.Client + } + + prometheus struct { + client *http.Client + request web.Request + filepath string + + sr selector.Selector + + parser promTextParser + + buf *bytes.Buffer + gzipr *gzip.Reader + bodyBuf *bufio.Reader + } +) + +const ( + acceptHeader = `text/plain;version=0.0.4;q=1,*/*;q=0.1` + userAgentHeader = `netdata/go.d.plugin` +) + +// New creates a Prometheus instance. +func New(client *http.Client, request web.Request) Prometheus { + return &prometheus{ + client: client, + request: request, + buf: bytes.NewBuffer(make([]byte, 0, 16000)), + } +} + +// NewWithSelector creates a Prometheus instance with the selector. +func NewWithSelector(client *http.Client, request web.Request, sr selector.Selector) Prometheus { + p := &prometheus{ + client: client, + request: request, + sr: sr, + buf: bytes.NewBuffer(make([]byte, 0, 16000)), + parser: promTextParser{sr: sr}, + } + + if v, err := url.Parse(request.URL); err == nil && v.Scheme == "file" { + p.filepath = filepath.Join(v.Host, v.Path) + } + + return p +} + +func (p *prometheus) HTTPClient() *http.Client { + return p.client +} + +// ScrapeSeries scrapes metrics, parses and sorts +func (p *prometheus) ScrapeSeries() (Series, error) { + p.buf.Reset() + + if err := p.fetch(p.buf); err != nil { + return nil, err + } + + return p.parser.parseToSeries(p.buf.Bytes()) +} + +func (p *prometheus) Scrape() (MetricFamilies, error) { + p.buf.Reset() + + if err := p.fetch(p.buf); err != nil { + return nil, err + } + + return p.parser.parseToMetricFamilies(p.buf.Bytes()) +} + +func (p *prometheus) fetch(w io.Writer) error { + // TODO: should be a separate text file prom client + if p.filepath != "" { + f, err := os.Open(p.filepath) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(w, f) + + return err + } + + req, err := web.NewHTTPRequest(p.request) + if err != nil { + return err + } + + req.Header.Add("Accept", acceptHeader) + req.Header.Add("Accept-Encoding", "gzip") + req.Header.Set("User-Agent", userAgentHeader) + + resp, err := p.client.Do(req) + if err != nil { + return err + } + + defer func() { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("server '%s' returned HTTP status code %d (%s)", req.URL, resp.StatusCode, resp.Status) + } + + if resp.Header.Get("Content-Encoding") != "gzip" { + _, err = io.Copy(w, resp.Body) + return err + } + + if p.gzipr == nil { + p.bodyBuf = bufio.NewReader(resp.Body) + p.gzipr, err = gzip.NewReader(p.bodyBuf) + if err != nil { + return err + } + } else { + p.bodyBuf.Reset(resp.Body) + _ = p.gzipr.Reset(p.bodyBuf) + } + + _, err = io.Copy(w, p.gzipr) + _ = p.gzipr.Close() + + return err +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/client_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/client_test.go new file mode 100644 index 00000000000000..45a68dd645827d --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/client_test.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "bytes" + "compress/gzip" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + testData, _ = os.ReadFile("testdata/testdata.txt") + testDataNoMeta, _ = os.ReadFile("testdata/testdata.nometa.txt") +) + +func Test_testClientDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "testData": testData, + } { + require.NotNilf(t, data, name) + } +} + +func TestPrometheus404(t *testing.T) { + tsMux := http.NewServeMux() + tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + ts := httptest.NewServer(tsMux) + defer ts.Close() + + req := web.Request{URL: ts.URL + "/metrics"} + prom := New(http.DefaultClient, req) + res, err := prom.ScrapeSeries() + + assert.Error(t, err) + assert.Nil(t, res) +} + +func TestPrometheusPlain(t *testing.T) { + tsMux := http.NewServeMux() + tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testData) + }) + ts := httptest.NewServer(tsMux) + defer ts.Close() + + req := web.Request{URL: ts.URL + "/metrics"} + prom := New(http.DefaultClient, req) + res, err := prom.ScrapeSeries() + + assert.NoError(t, err) + verifyTestData(t, res) +} + +func TestPrometheusPlainWithSelector(t *testing.T) { + tsMux := http.NewServeMux() + tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write(testData) + }) + ts := httptest.NewServer(tsMux) + defer ts.Close() + + req := web.Request{URL: ts.URL + "/metrics"} + sr, err := selector.Parse("go_gc*") + require.NoError(t, err) + prom := NewWithSelector(http.DefaultClient, req, sr) + + res, err := prom.ScrapeSeries() + require.NoError(t, err) + + for _, v := range res { + assert.Truef(t, strings.HasPrefix(v.Name(), "go_gc"), v.Name()) + } +} + +func TestPrometheusGzip(t *testing.T) { + counter := 0 + rawTestData := [][]byte{testData, testDataNoMeta} + tsMux := http.NewServeMux() + tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Encoding", "gzip") + w.WriteHeader(200) + gz := new(bytes.Buffer) + ww := gzip.NewWriter(gz) + _, _ = ww.Write(rawTestData[counter]) + _ = ww.Close() + _, _ = gz.WriteTo(w) + counter++ + }) + ts := httptest.NewServer(tsMux) + defer ts.Close() + + req := web.Request{URL: ts.URL + "/metrics"} + prom := New(http.DefaultClient, req) + + for i := 0; i < 2; i++ { + res, err := prom.ScrapeSeries() + assert.NoError(t, err) + verifyTestData(t, res) + } +} + +func TestPrometheusReadFromFile(t *testing.T) { + req := web.Request{URL: "file://testdata/testdata.txt"} + prom := NewWithSelector(http.DefaultClient, req, nil) + + for i := 0; i < 2; i++ { + res, err := prom.ScrapeSeries() + assert.NoError(t, err) + verifyTestData(t, res) + } +} + +func verifyTestData(t *testing.T, ms Series) { + assert.Equal(t, 410, len(ms)) + assert.Equal(t, "go_gc_duration_seconds", ms[0].Labels.Get("__name__")) + assert.Equal(t, "0.25", ms[0].Labels.Get("quantile")) + assert.InDelta(t, 4.9351e-05, ms[0].Value, 0.0001) + + notExistYet := ms.FindByName("not_exist_yet") + assert.NotNil(t, notExistYet) + assert.Len(t, notExistYet, 0) + + targetInterval := ms.FindByName("prometheus_target_interval_length_seconds") + assert.Len(t, targetInterval, 5) +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family.go b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family.go new file mode 100644 index 00000000000000..19459cfd0df0bb --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family.go @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" +) + +type ( + MetricFamilies map[string]*MetricFamily + + MetricFamily struct { + name string + help string + typ textparse.MetricType + metrics []Metric + } + Metric struct { + labels []labels.Label + gauge *Gauge + counter *Counter + summary *Summary + histogram *Histogram + untyped *Untyped + } + Gauge struct { + value float64 + } + Counter struct { + value float64 + } + Summary struct { + sum float64 + count float64 + quantiles []Quantile + } + Quantile struct { + quantile float64 + value float64 + } + Histogram struct { + sum float64 + count float64 + buckets []Bucket + } + Bucket struct { + upperBound float64 + cumulativeCount float64 + } + Untyped struct { + value float64 + } +) + +func (mfs MetricFamilies) Len() int { + return len(mfs) +} + +func (mfs MetricFamilies) Get(name string) *MetricFamily { + return (mfs)[name] +} + +func (mfs MetricFamilies) GetGauge(name string) *MetricFamily { + return mfs.get(name, textparse.MetricTypeGauge) +} + +func (mfs MetricFamilies) GetCounter(name string) *MetricFamily { + return mfs.get(name, textparse.MetricTypeCounter) +} + +func (mfs MetricFamilies) GetSummary(name string) *MetricFamily { + return mfs.get(name, textparse.MetricTypeSummary) +} + +func (mfs MetricFamilies) GetHistogram(name string) *MetricFamily { + return mfs.get(name, textparse.MetricTypeHistogram) +} + +func (mfs MetricFamilies) get(name string, typ textparse.MetricType) *MetricFamily { + mf := mfs.Get(name) + if mf == nil || mf.typ != typ { + return nil + } + return mf +} + +func (mf *MetricFamily) Name() string { return mf.name } +func (mf *MetricFamily) Help() string { return mf.help } +func (mf *MetricFamily) Type() textparse.MetricType { return mf.typ } +func (mf *MetricFamily) Metrics() []Metric { return mf.metrics } + +func (m *Metric) Labels() labels.Labels { return m.labels } +func (m *Metric) Gauge() *Gauge { return m.gauge } +func (m *Metric) Counter() *Counter { return m.counter } +func (m *Metric) Summary() *Summary { return m.summary } +func (m *Metric) Histogram() *Histogram { return m.histogram } +func (m *Metric) Untyped() *Untyped { return m.untyped } + +func (g Gauge) Value() float64 { return g.value } +func (c Counter) Value() float64 { return c.value } +func (u Untyped) Value() float64 { return u.value } + +func (s Summary) Count() float64 { return s.count } +func (s Summary) Sum() float64 { return s.sum } +func (s Summary) Quantiles() []Quantile { return s.quantiles } + +func (q Quantile) Quantile() float64 { return q.quantile } +func (q Quantile) Value() float64 { return q.value } + +func (h Histogram) Count() float64 { return h.count } +func (h Histogram) Sum() float64 { return h.sum } +func (h Histogram) Buckets() []Bucket { return h.buckets } + +func (b Bucket) UpperBound() float64 { return b.upperBound } +func (b Bucket) CumulativeCount() float64 { return b.cumulativeCount } diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family_test.go new file mode 100644 index 00000000000000..a9f19c8b67dc08 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_family_test.go @@ -0,0 +1,356 @@ +package prometheus + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/stretchr/testify/assert" +) + +func TestMetricFamilies_Len(t *testing.T) { + tests := map[string]struct { + mfs MetricFamilies + wantLen int + }{ + "initialized with two elements": { + mfs: MetricFamilies{"1": nil, "2": nil}, + wantLen: 2, + }, + "not initialized": { + mfs: nil, + wantLen: 0, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.Len(), test.wantLen) + }) + } +} + +func TestMetricFamilies_Get(t *testing.T) { + const n = "metric" + + tests := map[string]struct { + mfs MetricFamilies + wantMF *MetricFamily + }{ + "etric is found": { + mfs: MetricFamilies{n: &MetricFamily{name: n}}, + wantMF: &MetricFamily{name: n}, + }, + "metric is not found": { + mfs: MetricFamilies{"!" + n: &MetricFamily{name: n}}, + wantMF: nil, + }, + "not initialized": { + mfs: nil, + wantMF: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.Get(n), test.wantMF) + }) + } +} + +func TestMetricFamilies_GetGauge(t *testing.T) { + const n = "metric" + + tests := map[string]struct { + mfs MetricFamilies + wantMF *MetricFamily + }{ + "metric is found and is Gauge": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}, + }, + "metric is found but it is not Gauge": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeUnknown}}, + wantMF: nil, + }, + "metric is not found": { + mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "not initialized": { + mfs: nil, + wantMF: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.GetGauge(n), test.wantMF) + }) + } +} + +func TestMetricFamilies_GetCounter(t *testing.T) { + const n = "metric" + + tests := map[string]struct { + mfs MetricFamilies + wantMF *MetricFamily + }{ + "metric is found and is Counter": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeCounter}}, + wantMF: &MetricFamily{name: n, typ: textparse.MetricTypeCounter}, + }, + "metric is found but it is not Counter": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "metric is not found": { + mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "not initialized": { + mfs: nil, + wantMF: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.GetCounter(n), test.wantMF) + }) + } +} + +func TestMetricFamilies_GetSummary(t *testing.T) { + const n = "metric" + + tests := map[string]struct { + mfs MetricFamilies + wantMF *MetricFamily + }{ + "metric is found and is Summary": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeSummary}}, + wantMF: &MetricFamily{name: n, typ: textparse.MetricTypeSummary}, + }, + "metric is found but it is not Summary": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "metric is not found": { + mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "not initialized": { + mfs: nil, + wantMF: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.GetSummary(n), test.wantMF) + }) + } +} + +func TestMetricFamilies_GetHistogram(t *testing.T) { + const n = "metric" + + tests := map[string]struct { + mfs MetricFamilies + wantMF *MetricFamily + }{ + "metric is found and is Histogram": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeHistogram}}, + wantMF: &MetricFamily{name: n, typ: textparse.MetricTypeHistogram}, + }, + "metric is found but it is not Histogram": { + mfs: MetricFamilies{n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "metric is not found": { + mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: textparse.MetricTypeGauge}}, + wantMF: nil, + }, + "not initialized": { + mfs: nil, + wantMF: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.mfs.GetHistogram(n), test.wantMF) + }) + } +} + +func TestMetricFamily_Name(t *testing.T) { + mf := &MetricFamily{name: "name"} + assert.Equal(t, mf.Name(), "name") +} + +func TestMetricFamily_Type(t *testing.T) { + mf := &MetricFamily{typ: textparse.MetricTypeGauge} + assert.Equal(t, mf.Type(), textparse.MetricTypeGauge) +} + +func TestMetricFamily_Help(t *testing.T) { + mf := &MetricFamily{help: "help"} + assert.Equal(t, mf.Help(), "help") +} + +func TestMetricFamily_Metrics(t *testing.T) { + metrics := []Metric{{gauge: &Gauge{value: 1}, counter: &Counter{value: 1}}} + mf := &MetricFamily{metrics: metrics} + assert.Equal(t, mf.Metrics(), metrics) +} + +func TestMetric_Labels(t *testing.T) { + lbs := labels.Labels{{Name: "1", Value: "1"}, {Name: "2", Value: "2"}} + m := &Metric{labels: lbs} + assert.Equal(t, m.Labels(), lbs) +} + +func TestMetric_Gauge(t *testing.T) { + tests := map[string]struct { + m *Metric + want *Gauge + }{ + "gauge set": { + m: &Metric{gauge: &Gauge{value: 1}}, + want: &Gauge{value: 1}, + }, + "gauge not set": { + m: &Metric{}, + want: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.m.Gauge(), test.want) + }) + } +} + +func TestMetric_Counter(t *testing.T) { + tests := map[string]struct { + m *Metric + want *Counter + }{ + "counter set": { + m: &Metric{counter: &Counter{value: 1}}, + want: &Counter{value: 1}, + }, + "counter not set": { + m: &Metric{}, + want: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.m.Counter(), test.want) + }) + } +} + +func TestMetric_Summary(t *testing.T) { + tests := map[string]struct { + m *Metric + want *Summary + }{ + "summary set": { + m: &Metric{summary: &Summary{sum: 0.1, count: 3}}, + want: &Summary{sum: 0.1, count: 3}, + }, + "summary not set": { + m: &Metric{}, + want: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.m.Summary(), test.want) + }) + } +} + +func TestMetric_Histogram(t *testing.T) { + tests := map[string]struct { + m *Metric + want *Histogram + }{ + "histogram set": { + m: &Metric{histogram: &Histogram{sum: 0.1, count: 3}}, + want: &Histogram{sum: 0.1, count: 3}, + }, + "histogram not set": { + m: &Metric{}, + want: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.m.Histogram(), test.want) + }) + } +} + +func TestGauge_Value(t *testing.T) { + assert.Equal(t, Gauge{value: 1}.Value(), 1.0) +} + +func TestCounter_Value(t *testing.T) { + assert.Equal(t, Counter{value: 1}.Value(), 1.0) +} + +func TestSummary_Sum(t *testing.T) { + assert.Equal(t, Summary{sum: 1}.Sum(), 1.0) +} + +func TestSummary_Count(t *testing.T) { + assert.Equal(t, Summary{count: 1}.Count(), 1.0) +} + +func TestSummary_Quantiles(t *testing.T) { + assert.Equal(t, + Summary{quantiles: []Quantile{{quantile: 0.1, value: 1}}}.Quantiles(), + []Quantile{{quantile: 0.1, value: 1}}, + ) +} + +func TestQuantile_Value(t *testing.T) { + assert.Equal(t, Quantile{value: 1}.Value(), 1.0) +} + +func TestQuantile_Quantile(t *testing.T) { + assert.Equal(t, Quantile{quantile: 0.1}.Quantile(), 0.1) +} + +func TestHistogram_Sum(t *testing.T) { + assert.Equal(t, Histogram{sum: 1}.Sum(), 1.0) +} + +func TestHistogram_Count(t *testing.T) { + assert.Equal(t, Histogram{count: 1}.Count(), 1.0) +} + +func TestHistogram_Buckets(t *testing.T) { + assert.Equal(t, + Histogram{buckets: []Bucket{{upperBound: 0.1, cumulativeCount: 1}}}.Buckets(), + []Bucket{{upperBound: 0.1, cumulativeCount: 1}}, + ) +} + +func TestBucket_UpperBound(t *testing.T) { + assert.Equal(t, Bucket{upperBound: 0.1}.UpperBound(), 0.1) +} + +func TestBucket_CumulativeCount(t *testing.T) { + assert.Equal(t, Bucket{cumulativeCount: 1}.CumulativeCount(), 1.0) +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series.go b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series.go new file mode 100644 index 00000000000000..31914f4b279667 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "sort" + + "github.com/prometheus/prometheus/model/labels" +) + +type ( + // SeriesSample is a pair of label set and value + SeriesSample struct { + Labels labels.Labels + Value float64 + } + + // Series is a list of SeriesSample + Series []SeriesSample +) + +// Name the __name__ label value +func (s SeriesSample) Name() string { + return s.Labels[0].Value +} + +// Add appends a metric. +func (s *Series) Add(kv SeriesSample) { + *s = append(*s, kv) +} + +// Reset resets the buffer to be empty, +// but it retains the underlying storage for use by future writes. +func (s *Series) Reset() { + *s = (*s)[:0] +} + +// Sort sorts data. +func (s Series) Sort() { + sort.Sort(s) +} + +// Len returns metric length. +func (s Series) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s Series) Less(i, j int) bool { + return s[i].Name() < s[j].Name() +} + +// Swap swaps the elements with indexes i and j. +func (s Series) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// FindByName finds metrics where it's __name__ label matches given name. +// It expects the metrics is sorted. +// Complexity: O(log(N)) +func (s Series) FindByName(name string) Series { + from := sort.Search(len(s), func(i int) bool { + return s[i].Name() >= name + }) + if from == len(s) || s[from].Name() != name { // not found + return Series{} + } + until := from + 1 + for until < len(s) && s[until].Name() == name { + until++ + } + return s[from:until] +} + +// FindByNames finds metrics where it's __name__ label matches given any of names. +// It expects the metrics is sorted. +// Complexity: O(log(N)) +func (s Series) FindByNames(names ...string) Series { + switch len(names) { + case 0: + return Series{} + case 1: + return s.FindByName(names[0]) + } + var result Series + for _, name := range names { + result = append(result, s.FindByName(name)...) + } + return result +} + +// Max returns the max value. +// It does NOT expect the metrics is sorted. +// Complexity: O(N) +func (s Series) Max() float64 { + switch len(s) { + case 0: + return 0 + case 1: + return s[0].Value + } + max := s[0].Value + for _, kv := range s[1:] { + if max < kv.Value { + max = kv.Value + } + } + return max +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series_test.go new file mode 100644 index 00000000000000..80c805474fe379 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/metric_series_test.go @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package prometheus + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TODO: write better tests + +const ( + testName1 = "logback_events_total" + testName2 = "jvm_threads_peak" +) + +func newTestSeries() Series { + return Series{ + { + Value: 10, + Labels: labels.Labels{ + {Name: "__name__", Value: testName1}, + {Name: "level", Value: "error"}, + }, + }, + { + Value: 20, + Labels: labels.Labels{ + {Name: "__name__", Value: testName1}, + {Name: "level", Value: "warn"}, + }, + }, + { + Value: 5, + Labels: labels.Labels{ + {Name: "__name__", Value: testName1}, + {Name: "level", Value: "info"}, + }, + }, + { + Value: 15, + Labels: labels.Labels{ + {Name: "__name__", Value: testName1}, + {Name: "level", Value: "debug"}, + }, + }, + { + Value: 26, + Labels: labels.Labels{ + {Name: "__name__", Value: testName2}, + }, + }, + } +} + +func TestSeries_Name(t *testing.T) { + m := newTestSeries() + + assert.Equal(t, testName1, m[0].Name()) + assert.Equal(t, testName1, m[1].Name()) + +} + +func TestSeries_Add(t *testing.T) { + m := newTestSeries() + + require.Len(t, m, 5) + m.Add(SeriesSample{}) + assert.Len(t, m, 6) +} + +func TestSeries_FindByName(t *testing.T) { + m := newTestSeries() + m.Sort() + assert.Len(t, Series{}.FindByName(testName1), 0) + assert.Len(t, m.FindByName(testName1), len(m)-1) +} + +func TestSeries_FindByNames(t *testing.T) { + m := newTestSeries() + m.Sort() + assert.Len(t, m.FindByNames(), 0) + assert.Len(t, m.FindByNames(testName1), len(m)-1) + assert.Len(t, m.FindByNames(testName1, testName2), len(m)) +} + +func TestSeries_Len(t *testing.T) { + m := newTestSeries() + + assert.Equal(t, len(m), m.Len()) +} + +func TestSeries_Less(t *testing.T) { + m := newTestSeries() + + assert.False(t, m.Less(0, 1)) + assert.True(t, m.Less(4, 0)) +} + +func TestSeries_Max(t *testing.T) { + m := newTestSeries() + + assert.Equal(t, float64(26), m.Max()) + +} + +func TestSeries_Reset(t *testing.T) { + m := newTestSeries() + m.Reset() + + assert.Len(t, m, 0) + +} + +func TestSeries_Sort(t *testing.T) { + { + m := newTestSeries() + m.Sort() + assert.Equal(t, testName2, m[0].Name()) + } + { + m := Series{} + assert.Equal(t, 0.0, m.Max()) + } +} + +func TestSeries_Swap(t *testing.T) { + m := newTestSeries() + + m0 := m[0] + m1 := m[1] + + m.Swap(0, 1) + + assert.Equal(t, m0, m[1]) + assert.Equal(t, m1, m[0]) +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/parse.go b/src/go/collectors/go.d.plugin/pkg/prometheus/parse.go new file mode 100644 index 00000000000000..2586243335c431 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/parse.go @@ -0,0 +1,412 @@ +package prometheus + +import ( + "errors" + "io" + "regexp" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" +) + +const ( + quantileLabel = "quantile" + bucketLabel = "le" +) + +const ( + countSuffix = "_count" + sumSuffix = "_sum" + bucketSuffix = "_bucket" +) + +type promTextParser struct { + metrics MetricFamilies + series Series + + sr selector.Selector + + currMF *MetricFamily + currSeries labels.Labels + + summaries map[uint64]*Summary + histograms map[uint64]*Histogram + + isCount bool + isSum bool + isQuantile bool + isBucket bool + + currQuantile float64 + currBucket float64 +} + +func (p *promTextParser) parseToSeries(text []byte) (Series, error) { + p.series.Reset() + + parser := textparse.NewPromParser(text) + for { + entry, err := parser.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + if entry == textparse.EntryInvalid && strings.HasPrefix(err.Error(), "invalid metric type") { + continue + } + return nil, err + } + + switch entry { + case textparse.EntrySeries: + p.currSeries = p.currSeries[:0] + + parser.Metric(&p.currSeries) + + if p.sr != nil && !p.sr.Matches(p.currSeries) { + continue + } + + _, _, val := parser.Series() + p.series.Add(SeriesSample{Labels: copyLabels(p.currSeries), Value: val}) + } + } + + p.series.Sort() + + return p.series, nil +} + +var reSpace = regexp.MustCompile(`\s+`) + +func (p *promTextParser) parseToMetricFamilies(text []byte) (MetricFamilies, error) { + p.reset() + + parser := textparse.NewPromParser(text) + for { + entry, err := parser.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + if entry == textparse.EntryInvalid && strings.HasPrefix(err.Error(), "invalid metric type") { + continue + } + return nil, err + } + + switch entry { + case textparse.EntryHelp: + name, help := parser.Help() + p.setMetricFamilyByName(string(name)) + p.currMF.help = string(help) + if strings.IndexByte(p.currMF.help, '\n') != -1 { + // convert multiline to one line because HELP is used as the chart title. + p.currMF.help = reSpace.ReplaceAllString(strings.TrimSpace(p.currMF.help), " ") + } + case textparse.EntryType: + name, typ := parser.Type() + p.setMetricFamilyByName(string(name)) + p.currMF.typ = typ + case textparse.EntrySeries: + p.currSeries = p.currSeries[:0] + + parser.Metric(&p.currSeries) + + if p.sr != nil && !p.sr.Matches(p.currSeries) { + continue + } + + p.setMetricFamilyBySeries() + + _, _, value := parser.Series() + + switch p.currMF.typ { + case textparse.MetricTypeGauge: + p.addGauge(value) + case textparse.MetricTypeCounter: + p.addCounter(value) + case textparse.MetricTypeSummary: + p.addSummary(value) + case textparse.MetricTypeHistogram: + p.addHistogram(value) + case textparse.MetricTypeUnknown: + p.addUnknown(value) + } + } + } + + for k, v := range p.metrics { + if len(v.Metrics()) == 0 { + delete(p.metrics, k) + } + } + + return p.metrics, nil +} + +func (p *promTextParser) setMetricFamilyByName(name string) { + mf, ok := p.metrics[name] + if !ok { + mf = &MetricFamily{name: name, typ: textparse.MetricTypeUnknown} + p.metrics[name] = mf + } + p.currMF = mf +} + +func (p *promTextParser) setMetricFamilyBySeries() { + p.isSum, p.isCount, p.isQuantile, p.isBucket = false, false, false, false + p.currQuantile, p.currBucket = 0, 0 + + name := p.currSeries[0].Value + + if p.currMF != nil && p.currMF.name == name { + if p.currMF.typ == textparse.MetricTypeSummary { + p.setQuantile() + } + return + } + + typ := textparse.MetricTypeUnknown + + switch { + case strings.HasSuffix(name, sumSuffix): + n := strings.TrimSuffix(name, sumSuffix) + if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) { + p.isSum = true + p.currSeries[0].Value = n + p.currMF = mf + return + } + case strings.HasSuffix(name, countSuffix): + n := strings.TrimSuffix(name, countSuffix) + if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) { + p.isCount = true + p.currSeries[0].Value = n + p.currMF = mf + return + } + case strings.HasSuffix(name, bucketSuffix): + n := strings.TrimSuffix(name, bucketSuffix) + if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) { + p.currSeries[0].Value = n + p.setBucket() + p.currMF = mf + return + } + if p.currSeries.Has(bucketLabel) { + p.currSeries[0].Value = n + p.setBucket() + name = n + typ = textparse.MetricTypeHistogram + } + case p.currSeries.Has(quantileLabel): + typ = textparse.MetricTypeSummary + p.setQuantile() + } + + p.setMetricFamilyByName(name) + if p.currMF.typ == "" || p.currMF.typ == textparse.MetricTypeUnknown { + p.currMF.typ = typ + } +} + +func (p *promTextParser) setQuantile() { + if lbs, v, ok := removeLabel(p.currSeries, quantileLabel); ok { + p.isQuantile = true + p.currSeries = lbs + p.currQuantile, _ = strconv.ParseFloat(v, 64) + } +} + +func (p *promTextParser) setBucket() { + if lbs, v, ok := removeLabel(p.currSeries, bucketLabel); ok { + p.isBucket = true + p.currSeries = lbs + p.currBucket, _ = strconv.ParseFloat(v, 64) + } +} + +func (p *promTextParser) addGauge(value float64) { + p.currSeries = p.currSeries[1:] // remove "__name__" + + if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) { + p.currMF.metrics = append(p.currMF.metrics, Metric{ + labels: copyLabels(p.currSeries), + gauge: &Gauge{value: value}, + }) + } else { + p.currMF.metrics = p.currMF.metrics[:v+1] + if p.currMF.metrics[v].gauge == nil { + p.currMF.metrics[v].gauge = &Gauge{} + } + p.currMF.metrics[v].gauge.value = value + p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0] + p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...) + } +} + +func (p *promTextParser) addCounter(value float64) { + p.currSeries = p.currSeries[1:] // remove "__name__" + + if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) { + p.currMF.metrics = append(p.currMF.metrics, Metric{ + labels: copyLabels(p.currSeries), + counter: &Counter{value: value}, + }) + } else { + p.currMF.metrics = p.currMF.metrics[:v+1] + if p.currMF.metrics[v].counter == nil { + p.currMF.metrics[v].counter = &Counter{} + } + p.currMF.metrics[v].counter.value = value + p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0] + p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...) + } +} + +func (p *promTextParser) addUnknown(value float64) { + p.currSeries = p.currSeries[1:] // remove "__name__" + + if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) { + p.currMF.metrics = append(p.currMF.metrics, Metric{ + labels: copyLabels(p.currSeries), + untyped: &Untyped{value: value}, + }) + } else { + p.currMF.metrics = p.currMF.metrics[:v+1] + if p.currMF.metrics[v].untyped == nil { + p.currMF.metrics[v].untyped = &Untyped{} + } + p.currMF.metrics[v].untyped.value = value + p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0] + p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...) + } +} + +func (p *promTextParser) addSummary(value float64) { + hash := p.currSeries.Hash() + + p.currSeries = p.currSeries[1:] // remove "__name__" + + s, ok := p.summaries[hash] + if !ok { + if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) { + s = &Summary{} + p.currMF.metrics = append(p.currMF.metrics, Metric{ + labels: copyLabels(p.currSeries), + summary: s, + }) + } else { + p.currMF.metrics = p.currMF.metrics[:v+1] + if p.currMF.metrics[v].summary == nil { + p.currMF.metrics[v].summary = &Summary{} + } + p.currMF.metrics[v].summary.sum = 0 + p.currMF.metrics[v].summary.count = 0 + p.currMF.metrics[v].summary.quantiles = p.currMF.metrics[v].summary.quantiles[:0] + p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0] + p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...) + s = p.currMF.metrics[v].summary + } + + p.summaries[hash] = s + } + + switch { + case p.isQuantile: + s.quantiles = append(s.quantiles, Quantile{quantile: p.currQuantile, value: value}) + case p.isSum: + s.sum = value + case p.isCount: + s.count = value + } +} + +func (p *promTextParser) addHistogram(value float64) { + hash := p.currSeries.Hash() + + p.currSeries = p.currSeries[1:] // remove "__name__" + + h, ok := p.histograms[hash] + if !ok { + if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) { + h = &Histogram{} + p.currMF.metrics = append(p.currMF.metrics, Metric{ + labels: copyLabels(p.currSeries), + histogram: h, + }) + } else { + p.currMF.metrics = p.currMF.metrics[:v+1] + if p.currMF.metrics[v].histogram == nil { + p.currMF.metrics[v].histogram = &Histogram{} + } + p.currMF.metrics[v].histogram.sum = 0 + p.currMF.metrics[v].histogram.count = 0 + p.currMF.metrics[v].histogram.buckets = p.currMF.metrics[v].histogram.buckets[:0] + p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0] + p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...) + h = p.currMF.metrics[v].histogram + } + + p.histograms[hash] = h + } + + switch { + case p.isBucket: + h.buckets = append(h.buckets, Bucket{upperBound: p.currBucket, cumulativeCount: value}) + case p.isSum: + h.sum = value + case p.isCount: + h.count = value + } +} + +func (p *promTextParser) reset() { + p.currMF = nil + p.currSeries = p.currSeries[:0] + + if p.metrics == nil { + p.metrics = make(MetricFamilies) + } + for _, mf := range p.metrics { + mf.help = "" + mf.typ = "" + mf.metrics = mf.metrics[:0] + } + + if p.summaries == nil { + p.summaries = make(map[uint64]*Summary) + } + for k := range p.summaries { + delete(p.summaries, k) + } + + if p.histograms == nil { + p.histograms = make(map[uint64]*Histogram) + } + for k := range p.histograms { + delete(p.histograms, k) + } +} + +func copyLabels(lbs []labels.Label) []labels.Label { + return append([]labels.Label(nil), lbs...) +} + +func removeLabel(lbs labels.Labels, name string) (labels.Labels, string, bool) { + for i, v := range lbs { + if v.Name == name { + return append(lbs[:i], lbs[i+1:]...), v.Value, true + } + } + return lbs, "", false +} + +func isSummaryOrHistogram(typ textparse.MetricType) bool { + return typ == textparse.MetricTypeSummary || typ == textparse.MetricTypeHistogram +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/parse_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/parse_test.go new file mode 100644 index 00000000000000..ade471f0dfb744 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/parse_test.go @@ -0,0 +1,1675 @@ +package prometheus + +import ( + "bytes" + "fmt" + "math" + "os" + "testing" + + "github.com/netdata/go.d.plugin/pkg/prometheus/selector" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + dataMultilineHelp, _ = os.ReadFile("testdata/multiline-help.txt") + + dataGaugeMeta, _ = os.ReadFile("testdata/gauge-meta.txt") + dataGaugeNoMeta, _ = os.ReadFile("testdata/gauge-no-meta.txt") + dataCounterMeta, _ = os.ReadFile("testdata/counter-meta.txt") + dataCounterNoMeta, _ = os.ReadFile("testdata/counter-no-meta.txt") + dataSummaryMeta, _ = os.ReadFile("testdata/summary-meta.txt") + dataSummaryNoMeta, _ = os.ReadFile("testdata/summary-no-meta.txt") + dataHistogramMeta, _ = os.ReadFile("testdata/histogram-meta.txt") + dataHistogramNoMeta, _ = os.ReadFile("testdata/histogram-no-meta.txt") + dataAllTypes = joinData( + dataGaugeMeta, dataGaugeNoMeta, dataCounterMeta, dataCounterNoMeta, + dataSummaryMeta, dataSummaryNoMeta, dataHistogramMeta, dataHistogramNoMeta, + ) +) + +func Test_testParseDataIsValid(t *testing.T) { + for name, data := range map[string][]byte{ + "dataMultilineHelp": dataMultilineHelp, + "dataGaugeMeta": dataGaugeMeta, + "dataGaugeNoMeta": dataGaugeNoMeta, + "dataCounterMeta": dataCounterMeta, + "dataCounterNoMeta": dataCounterNoMeta, + "dataSummaryMeta": dataSummaryMeta, + "dataSummaryNoMeta": dataSummaryNoMeta, + "dataHistogramMeta": dataHistogramMeta, + "dataHistogramNoMeta": dataHistogramNoMeta, + "dataAllTypes": dataAllTypes, + } { + require.NotNilf(t, data, name) + } +} + +func TestPromTextParser_parseToMetricFamilies(t *testing.T) { + tests := map[string]struct { + input []byte + want MetricFamilies + }{ + "Gauge with multiline HELP": { + input: dataMultilineHelp, + want: MetricFamilies{ + "test_gauge_metric_1": { + name: "test_gauge_metric_1", + help: "First line. Second line.", + typ: textparse.MetricTypeGauge, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + gauge: &Gauge{value: 11}, + }, + }, + }, + }, + }, + "Gauge with meta parsed as Gauge": { + input: dataGaugeMeta, + want: MetricFamilies{ + "test_gauge_metric_1": { + name: "test_gauge_metric_1", + help: "Test Gauge Metric 1", + typ: textparse.MetricTypeGauge, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + gauge: &Gauge{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + gauge: &Gauge{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + gauge: &Gauge{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + gauge: &Gauge{value: 14}, + }, + }, + }, + "test_gauge_metric_2": { + name: "test_gauge_metric_2", + typ: textparse.MetricTypeGauge, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + gauge: &Gauge{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + gauge: &Gauge{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + gauge: &Gauge{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + gauge: &Gauge{value: 14}, + }, + }, + }, + }, + }, + "Counter with meta parsed as Counter": { + input: dataCounterMeta, + want: MetricFamilies{ + "test_counter_metric_1_total": { + name: "test_counter_metric_1_total", + help: "Test Counter Metric 1", + typ: textparse.MetricTypeCounter, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + counter: &Counter{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + counter: &Counter{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + counter: &Counter{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + counter: &Counter{value: 14}, + }, + }, + }, + "test_counter_metric_2_total": { + name: "test_counter_metric_2_total", + typ: textparse.MetricTypeCounter, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + counter: &Counter{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + counter: &Counter{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + counter: &Counter{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + counter: &Counter{value: 14}, + }, + }, + }, + }, + }, + "Summary with meta parsed as Summary": { + input: dataSummaryMeta, + want: MetricFamilies{ + "test_summary_1_duration_microseconds": { + name: "test_summary_1_duration_microseconds", + help: "Test Summary Metric 1", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + }, + }, + "test_summary_2_duration_microseconds": { + name: "test_summary_2_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + }, + }, + }, + }, + "Histogram with meta parsed as Histogram": { + input: dataHistogramMeta, + want: MetricFamilies{ + "test_histogram_1_duration_seconds": { + name: "test_histogram_1_duration_seconds", + help: "Test Histogram Metric 1", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + }, + }, + "test_histogram_2_duration_seconds": { + name: "test_histogram_2_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + }, + }, + }, + }, + "Gauge no meta parsed as Untyped": { + input: dataGaugeNoMeta, + want: MetricFamilies{ + "test_gauge_no_meta_metric_1": { + name: "test_gauge_no_meta_metric_1", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_gauge_no_meta_metric_2": { + name: "test_gauge_no_meta_metric_2", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + }, + }, + "Counter no meta parsed as Untyped": { + input: dataCounterNoMeta, + want: MetricFamilies{ + "test_counter_no_meta_metric_1_total": { + name: "test_counter_no_meta_metric_1_total", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_counter_no_meta_metric_2_total": { + name: "test_counter_no_meta_metric_2_total", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + }, + }, + "Summary no meta parsed as Summary": { + input: dataSummaryNoMeta, + want: MetricFamilies{ + "test_summary_no_meta_1_duration_microseconds": { + name: "test_summary_no_meta_1_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + }, + }, + "test_summary_no_meta_2_duration_microseconds": { + name: "test_summary_no_meta_2_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + }, + }, + }, + }, + "Histogram no meta parsed as Histogram": { + input: dataHistogramNoMeta, + want: MetricFamilies{ + "test_histogram_no_meta_1_duration_seconds": { + name: "test_histogram_no_meta_1_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + }, + }, + "test_histogram_no_meta_2_duration_seconds": { + name: "test_histogram_no_meta_2_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + }, + }, + }, + }, + "All types": { + input: dataAllTypes, + want: MetricFamilies{ + "test_gauge_metric_1": { + name: "test_gauge_metric_1", + help: "Test Gauge Metric 1", + typ: textparse.MetricTypeGauge, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + gauge: &Gauge{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + gauge: &Gauge{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + gauge: &Gauge{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + gauge: &Gauge{value: 14}, + }, + }, + }, + "test_gauge_metric_2": { + name: "test_gauge_metric_2", + typ: textparse.MetricTypeGauge, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + gauge: &Gauge{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + gauge: &Gauge{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + gauge: &Gauge{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + gauge: &Gauge{value: 14}, + }, + }, + }, + "test_counter_metric_1_total": { + name: "test_counter_metric_1_total", + help: "Test Counter Metric 1", + typ: textparse.MetricTypeCounter, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + counter: &Counter{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + counter: &Counter{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + counter: &Counter{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + counter: &Counter{value: 14}, + }, + }, + }, + "test_counter_metric_2_total": { + name: "test_counter_metric_2_total", + typ: textparse.MetricTypeCounter, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + counter: &Counter{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + counter: &Counter{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + counter: &Counter{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + counter: &Counter{value: 14}, + }, + }, + }, + "test_summary_1_duration_microseconds": { + name: "test_summary_1_duration_microseconds", + help: "Test Summary Metric 1", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + }, + }, + "test_summary_2_duration_microseconds": { + name: "test_summary_2_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + }, + }, + "test_histogram_1_duration_seconds": { + name: "test_histogram_1_duration_seconds", + help: "Test Histogram Metric 1", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + }, + }, + "test_histogram_2_duration_seconds": { + name: "test_histogram_2_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + }, + }, + "test_gauge_no_meta_metric_1": { + name: "test_gauge_no_meta_metric_1", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_gauge_no_meta_metric_2": { + name: "test_gauge_no_meta_metric_2", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_counter_no_meta_metric_1_total": { + name: "test_counter_no_meta_metric_1_total", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_counter_no_meta_metric_2_total": { + name: "test_counter_no_meta_metric_2_total", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + untyped: &Untyped{value: 11}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + untyped: &Untyped{value: 12}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + untyped: &Untyped{value: 13}, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + untyped: &Untyped{value: 14}, + }, + }, + }, + "test_summary_no_meta_1_duration_microseconds": { + name: "test_summary_no_meta_1_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 283201.29, + count: 31, + quantiles: []Quantile{ + {quantile: 0.5, value: 4931.921}, + {quantile: 0.9, value: 4932.921}, + {quantile: 0.99, value: 4933.921}, + }, + }, + }, + }, + }, + "test_summary_no_meta_2_duration_microseconds": { + name: "test_summary_no_meta_2_duration_microseconds", + typ: textparse.MetricTypeSummary, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + summary: &Summary{ + sum: 383201.29, + count: 41, + quantiles: []Quantile{ + {quantile: 0.5, value: 5931.921}, + {quantile: 0.9, value: 5932.921}, + {quantile: 0.99, value: 5933.921}, + }, + }, + }, + }, + }, + "test_histogram_no_meta_1_duration_seconds": { + name: "test_histogram_no_meta_1_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00147889, + count: 6, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 4}, + {upperBound: 0.5, cumulativeCount: 5}, + {upperBound: math.Inf(1), cumulativeCount: 6}, + }, + }, + }, + }, + }, + "test_histogram_no_meta_2_duration_seconds": { + name: "test_histogram_no_meta_2_duration_seconds", + typ: textparse.MetricTypeHistogram, + metrics: []Metric{ + { + labels: labels.Labels{{Name: "label1", Value: "value1"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value2"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value3"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + { + labels: labels.Labels{{Name: "label1", Value: "value4"}}, + histogram: &Histogram{ + sum: 0.00247889, + count: 9, + buckets: []Bucket{ + {upperBound: 0.1, cumulativeCount: 7}, + {upperBound: 0.5, cumulativeCount: 8}, + {upperBound: math.Inf(1), cumulativeCount: 9}, + }, + }, + }, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var p promTextParser + + for i := 0; i < 10; i++ { + t.Run(fmt.Sprintf("parse num %d", i+1), func(t *testing.T) { + mfs, err := p.parseToMetricFamilies(test.input) + if len(test.want) > 0 { + assert.Equal(t, test.want, mfs) + } else { + assert.Error(t, err) + } + }) + } + }) + } +} + +func TestPromTextParser_parseToMetricFamiliesWithSelector(t *testing.T) { + sr, err := selector.Parse(`test_gauge_metric_1{label1="value2"}`) + require.NoError(t, err) + + p := promTextParser{sr: sr} + + txt := []byte(` +test_gauge_metric_1{label1="value1"} 1 +test_gauge_metric_1{label1="value2"} 1 +test_gauge_metric_2{label1="value1"} 1 +test_gauge_metric_2{label1="value2"} 1 +`) + + want := MetricFamilies{ + "test_gauge_metric_1": &MetricFamily{ + name: "test_gauge_metric_1", + typ: textparse.MetricTypeUnknown, + metrics: []Metric{ + {labels: labels.Labels{{Name: "label1", Value: "value2"}}, untyped: &Untyped{value: 1}}, + }, + }, + } + + mfs, err := p.parseToMetricFamilies(txt) + + require.NoError(t, err) + assert.Equal(t, want, mfs) +} + +func TestPromTextParser_parseToSeries(t *testing.T) { + tests := map[string]struct { + input []byte + want Series + }{ + "All types": { + input: []byte(` +# HELP test_gauge_metric_1 Test Gauge Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_no_meta_metric_1{label1="value1"} 11 +# HELP test_counter_metric_1_total Test Counter Metric 1 +# TYPE test_counter_metric_1_total counter +test_counter_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value1"} 11 +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31 +# HELP test_histogram_1_duration_seconds Test Histogram Metric 1 +# TYPE test_histogram_1_duration_seconds histogram +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value1"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6 +`), + want: Series{ + // Gauge + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_gauge_metric_1"}, + {Name: "label1", Value: "value1"}, + }, + Value: 11, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_gauge_no_meta_metric_1"}, + {Name: "label1", Value: "value1"}, + }, + Value: 11, + }, + // Counter + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_counter_metric_1_total"}, + {Name: "label1", Value: "value1"}, + }, + Value: 11, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_counter_no_meta_metric_1_total"}, + {Name: "label1", Value: "value1"}, + }, + Value: 11, + }, + //// Summary + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.5"}, + }, + Value: 4931.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.9"}, + }, + Value: 4932.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.99"}, + }, + Value: 4933.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_1_duration_microseconds_sum"}, + {Name: "label1", Value: "value1"}, + }, + Value: 283201.29, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_1_duration_microseconds_count"}, + {Name: "label1", Value: "value1"}, + }, + Value: 31, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.5"}, + }, + Value: 4931.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.9"}, + }, + Value: 4932.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"}, + {Name: "label1", Value: "value1"}, + {Name: "quantile", Value: "0.99"}, + }, + Value: 4933.921, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds_sum"}, + {Name: "label1", Value: "value1"}, + }, + Value: 283201.29, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds_count"}, + {Name: "label1", Value: "value1"}, + }, + Value: 31, + }, + // Histogram + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "0.1"}, + }, + Value: 4, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "0.5"}, + }, + Value: 5, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "+Inf"}, + }, + Value: 6, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_1_duration_seconds_sum"}, + {Name: "label1", Value: "value1"}, + }, + Value: 0.00147889, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_1_duration_seconds_count"}, + {Name: "label1", Value: "value1"}, + }, + Value: 6, + }, + + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "0.1"}, + }, + Value: 4, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "0.5"}, + }, + Value: 5, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"}, + {Name: "label1", Value: "value1"}, + {Name: "le", Value: "+Inf"}, + }, + Value: 6, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_sum"}, + {Name: "label1", Value: "value1"}, + }, + Value: 0.00147889, + }, + { + Labels: labels.Labels{ + {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_count"}, + {Name: "label1", Value: "value1"}, + }, + Value: 6, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var p promTextParser + + for i := 0; i < 10; i++ { + t.Run(fmt.Sprintf("parse num %d", i+1), func(t *testing.T) { + series, err := p.parseToSeries(test.input) + + if len(test.want) > 0 { + test.want.Sort() + assert.Equal(t, test.want, series) + } else { + assert.Error(t, err) + } + }) + } + }) + } +} + +func TestPromTextParser_parseToSeriesWithSelector(t *testing.T) { + sr, err := selector.Parse(`test_gauge_metric_1{label1="value2"}`) + require.NoError(t, err) + + p := promTextParser{sr: sr} + + txt := []byte(` +test_gauge_metric_1{label1="value1"} 1 +test_gauge_metric_1{label1="value2"} 1 +test_gauge_metric_2{label1="value1"} 1 +test_gauge_metric_2{label1="value2"} 1 +`) + + want := Series{SeriesSample{ + Labels: labels.Labels{ + {Name: "__name__", Value: "test_gauge_metric_1"}, + {Name: "label1", Value: "value2"}, + }, + Value: 1, + }} + + series, err := p.parseToSeries(txt) + + require.NoError(t, err) + assert.Equal(t, want, series) +} + +func joinData(data ...[]byte) []byte { + var buf bytes.Buffer + for _, v := range data { + _, _ = buf.Write(v) + _ = buf.WriteByte('\n') + } + return buf.Bytes() +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md new file mode 100644 index 00000000000000..357e786cd4699e --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/README.md @@ -0,0 +1,102 @@ +<!-- +title: "Time series selector" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/prometheus/selector/README.md" +sidebar_label: "Time series selector" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# Time series selector + +Selectors allow selecting and filtering of a set of time series. + +## Simple Selector + +In the simplest form you need to specify only a metric name. + +### Syntax + +```cmd + <line> ::= <metric_name_pattern> + <metric_name_pattern> ::= simple pattern +``` + +The metric name pattern syntax is [simple pattern](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). + +### Examples + +This example selects all time series that have the `go_memstats_alloc_bytes` metric name: + +```cmd +go_memstats_alloc_bytes +``` + +This example selects all time series with metric names starts with `go_memstats_`: + +```cmd +go_memstats_* +``` + +This example selects all time series with metric names starts with `go_` except `go_memstats_`: + +```cmd +!go_memstats_* go_* +``` + +## Advanced Selector + +It is possible to filter these time series further by appending a comma separated list of label matchers in curly braces (`{}`). + +### Syntax + +```cmd + <line> ::= [ <metric_name_pattern> ]{ <list_of_selectors> } + <metric_name_pattern> ::= simple pattern + <list_of_selectors> ::= a comma separated list <label_name><op><label_value_pattern> + <label_name> ::= an exact label name + <op> ::= [ '=', '!=', '=~', '!~', '=*', '!*' ] + <label_value_pattern> ::= a label value pattern, depends on <op> +``` + +The metric name pattern syntax is [simple pattern](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md). + +Label matching operators: + +- `=`: Match labels that are exactly equal to the provided string. +- `!=`: Match labels that are not equal to the provided string. +- `=~`: Match labels that [regex-match](https://golang.org/pkg/regexp/syntax/) the provided string. +- `!~`: Match labels that do not [regex-match](https://golang.org/pkg/regexp/syntax/) the provided string. +- `=*`: Match labels that [simple-pattern-match](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md) the provided string. +- `!*`: Match labels that do not [simple-pattern-match](https://github.com/netdata/netdata/blob/master/src/libnetdata/simple_pattern/README.md) the provided string. + +### Examples + +This example selects all time series that: + +- have the `node_cooling_device_cur_state` metric name and +- label `type` value not equal to `Fan`: + +```cmd +node_cooling_device_cur_state{type!="Fan"} +``` + +This example selects all time series that: + +- have the `node_filesystem_size_bytes` metric name and +- label `device` value is either `/dev/nvme0n1p1` or `/dev/nvme0n1p2` and +- label `fstype` is equal to `ext4` + +```cmd +node_filesystem_size_bytes{device=~"/dev/nvme0n1p1$|/dev/nvme0n1p2$",fstype="ext4"} +``` + +Label matchers can also be applied to metric names by matching against the internal `__name__` label. + +For example, the expression `node_filesystem_size_bytes` is equivalent to `{__name__="node_filesystem_size_bytes"}`. +This allows using all operators (other than `=*`) for metric names matching. + +The following expression selects all metrics that have a name starting with `node_`: + +```cmd +{__name__=*"node_*"} +``` diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr.go new file mode 100644 index 00000000000000..8d09db20603063 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import "fmt" + +type Expr struct { + Allow []string `yaml:"allow"` + Deny []string `yaml:"deny"` +} + +func (e Expr) Empty() bool { + return len(e.Allow) == 0 && len(e.Deny) == 0 + +} + +func (e Expr) Parse() (Selector, error) { + if e.Empty() { + return nil, nil + } + + var srs []Selector + var allow Selector + var deny Selector + + for _, item := range e.Allow { + sr, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse selector '%s': %v", item, err) + } + srs = append(srs, sr) + } + + switch len(srs) { + case 0: + allow = trueSelector{} + case 1: + allow = srs[0] + default: + allow = Or(srs[0], srs[1], srs[2:]...) + } + + srs = srs[:0] + for _, item := range e.Deny { + sr, err := Parse(item) + if err != nil { + return nil, fmt.Errorf("parse selector '%s': %v", item, err) + } + srs = append(srs, sr) + } + + switch len(srs) { + case 0: + deny = falseSelector{} + case 1: + deny = srs[0] + default: + deny = Or(srs[0], srs[1], srs[2:]...) + } + + return And(allow, Not(deny)), nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr_test.go new file mode 100644 index 00000000000000..598cef9b87d12c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/expr_test.go @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExpr_Empty(t *testing.T) { + tests := map[string]struct { + expr Expr + expected bool + }{ + "empty: both allow and deny": { + expr: Expr{ + Allow: []string{}, + Deny: []string{}, + }, + expected: true, + }, + "nil: both allow and deny": { + expected: true, + }, + "nil, empty: allow, deny": { + expr: Expr{ + Deny: []string{""}, + }, + expected: false, + }, + "empty, nil: allow, deny": { + expr: Expr{ + Allow: []string{""}, + }, + expected: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.expected { + assert.True(t, test.expr.Empty()) + } else { + assert.False(t, test.expr.Empty()) + } + }) + } +} + +func TestExpr_Parse(t *testing.T) { + tests := map[string]struct { + expr Expr + expectedSr Selector + expectedErr bool + }{ + "not set: both allow and deny": { + expr: Expr{}, + }, + "set: both allow and deny": { + expr: Expr{ + Allow: []string{ + "go_memstats_*", + "node_*", + }, + Deny: []string{ + "go_memstats_frees_total", + "node_cooling_*", + }, + }, + expectedSr: andSelector{ + lhs: orSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustSPName("node_*"), + }, + rhs: Not(orSelector{ + lhs: mustSPName("go_memstats_frees_total"), + rhs: mustSPName("node_cooling_*"), + }), + }, + }, + "set: only includes": { + expr: Expr{ + Allow: []string{ + "go_memstats_*", + "node_*", + }, + }, + expectedSr: andSelector{ + lhs: orSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustSPName("node_*"), + }, + rhs: Not(falseSelector{}), + }, + }, + "set: only excludes": { + expr: Expr{ + Deny: []string{ + "go_memstats_frees_total", + "node_cooling_*", + }, + }, + expectedSr: andSelector{ + lhs: trueSelector{}, + rhs: Not(orSelector{ + lhs: mustSPName("go_memstats_frees_total"), + rhs: mustSPName("node_cooling_*"), + }), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + m, err := test.expr.Parse() + + if test.expectedErr { + assert.Error(t, err) + } else { + assert.Equal(t, test.expectedSr, m) + } + }) + } +} + +func TestExprSelector_Matches(t *testing.T) { + tests := map[string]struct { + expr Expr + lbs labels.Labels + expectedMatches bool + }{ + "allow matches: single pattern": { + expr: Expr{ + Allow: []string{"go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: true, + }, + "allow matches: several patterns": { + expr: Expr{ + Allow: []string{"node_*", "go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: true, + }, + "allow not matches": { + expr: Expr{ + Allow: []string{"node_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "deny matches: single pattern": { + expr: Expr{ + Deny: []string{"go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "deny matches: several patterns": { + expr: Expr{ + Deny: []string{"node_*", "go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "deny not matches": { + expr: Expr{ + Deny: []string{"node_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: true, + }, + "allow and deny matches: single pattern": { + expr: Expr{ + Allow: []string{"go_*"}, + Deny: []string{"go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "allow and deny matches: several patterns": { + expr: Expr{ + Allow: []string{"node_*", "go_*"}, + Deny: []string{"node_*", "go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "allow matches and deny not matches": { + expr: Expr{ + Allow: []string{"go_*"}, + Deny: []string{"node_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: true, + }, + "allow not matches and deny matches": { + expr: Expr{ + Allow: []string{"node_*"}, + Deny: []string{"go_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + "allow not matches and deny not matches": { + expr: Expr{ + Allow: []string{"node_*"}, + Deny: []string{"node_*"}, + }, + lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}}, + expectedMatches: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sr, err := test.expr.Parse() + require.NoError(t, err) + + if test.expectedMatches { + assert.True(t, sr.Matches(test.lbs)) + } else { + assert.False(t, sr.Matches(test.lbs)) + } + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical.go new file mode 100644 index 00000000000000..1556d17156c169 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical.go @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "github.com/prometheus/prometheus/model/labels" +) + +type ( + trueSelector struct{} + falseSelector struct{} + negSelector struct{ s Selector } + andSelector struct{ lhs, rhs Selector } + orSelector struct{ lhs, rhs Selector } +) + +func (trueSelector) Matches(_ labels.Labels) bool { return true } +func (falseSelector) Matches(_ labels.Labels) bool { return false } +func (s negSelector) Matches(lbs labels.Labels) bool { return !s.s.Matches(lbs) } +func (s andSelector) Matches(lbs labels.Labels) bool { return s.lhs.Matches(lbs) && s.rhs.Matches(lbs) } +func (s orSelector) Matches(lbs labels.Labels) bool { return s.lhs.Matches(lbs) || s.rhs.Matches(lbs) } + +// True returns a selector which always returns true +func True() Selector { + return trueSelector{} +} + +// And returns a selector which returns true only if all of it's sub-selectors return true +func And(lhs, rhs Selector, others ...Selector) Selector { + s := andSelector{lhs: lhs, rhs: rhs} + if len(others) == 0 { + return s + } + return And(s, others[0], others[1:]...) +} + +// Or returns a selector which returns true if any of it's sub-selectors return true +func Or(lhs, rhs Selector, others ...Selector) Selector { + s := orSelector{lhs: lhs, rhs: rhs} + if len(others) == 0 { + return s + } + return Or(s, others[0], others[1:]...) +} + +// Not returns a selector which returns the negation of the sub-selector's result +func Not(s Selector) Selector { + return negSelector{s} +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical_test.go new file mode 100644 index 00000000000000..239c7f715e46c2 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/logical_test.go @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTrueSelector_Matches(t *testing.T) { + tests := map[string]struct { + sr trueSelector + lbs labels.Labels + expected bool + }{ + "not empty labels": { + lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}}, + expected: true, + }, + "empty labels": { + expected: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.expected { + assert.True(t, test.sr.Matches(test.lbs)) + } else { + assert.False(t, test.sr.Matches(test.lbs)) + } + }) + } +} + +func TestFalseSelector_Matches(t *testing.T) { + tests := map[string]struct { + sr falseSelector + lbs labels.Labels + expected bool + }{ + "not empty labels": { + lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}}, + expected: false, + }, + "empty labels": { + expected: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.expected { + assert.True(t, test.sr.Matches(test.lbs)) + } else { + assert.False(t, test.sr.Matches(test.lbs)) + } + }) + } +} + +func TestNegSelector_Matches(t *testing.T) { + tests := map[string]struct { + sr negSelector + lbs labels.Labels + expected bool + }{ + "true matcher": { + sr: negSelector{trueSelector{}}, + lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}}, + expected: false, + }, + "false matcher": { + sr: negSelector{falseSelector{}}, + lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}}, + expected: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if test.expected { + assert.True(t, test.sr.Matches(test.lbs)) + } else { + assert.False(t, test.sr.Matches(test.lbs)) + } + }) + } +} + +func TestAndSelector_Matches(t *testing.T) { + tests := map[string]struct { + sr andSelector + lbs labels.Labels + expected bool + }{ + "true, true": { + sr: andSelector{lhs: trueSelector{}, rhs: trueSelector{}}, + expected: true, + }, + "true, false": { + sr: andSelector{lhs: trueSelector{}, rhs: falseSelector{}}, + expected: false, + }, + "false, true": { + sr: andSelector{lhs: trueSelector{}, rhs: falseSelector{}}, + expected: false, + }, + "false, false": { + sr: andSelector{lhs: falseSelector{}, rhs: falseSelector{}}, + expected: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.sr.Matches(test.lbs)) + }) + } +} + +func TestOrSelector_Matches(t *testing.T) { + tests := map[string]struct { + sr orSelector + lbs labels.Labels + expected bool + }{ + "true, true": { + sr: orSelector{lhs: trueSelector{}, rhs: trueSelector{}}, + expected: true, + }, + "true, false": { + sr: orSelector{lhs: trueSelector{}, rhs: falseSelector{}}, + expected: true, + }, + "false, true": { + sr: orSelector{lhs: trueSelector{}, rhs: falseSelector{}}, + expected: true, + }, + "false, false": { + sr: orSelector{lhs: falseSelector{}, rhs: falseSelector{}}, + expected: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, test.expected, test.sr.Matches(test.lbs)) + }) + } +} + +func Test_And(t *testing.T) { + tests := map[string]struct { + srs []Selector + expected Selector + }{ + "2 selectors": { + srs: []Selector{trueSelector{}, trueSelector{}}, + expected: andSelector{ + lhs: trueSelector{}, + rhs: trueSelector{}, + }, + }, + "4 selectors": { + srs: []Selector{trueSelector{}, trueSelector{}, trueSelector{}, trueSelector{}}, + expected: andSelector{ + lhs: andSelector{ + lhs: andSelector{ + lhs: trueSelector{}, + rhs: trueSelector{}, + }, + rhs: trueSelector{}, + }, + rhs: trueSelector{}}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require.GreaterOrEqual(t, len(test.srs), 2) + + s := And(test.srs[0], test.srs[1], test.srs[2:]...) + assert.Equal(t, test.expected, s) + }) + } +} + +func Test_Or(t *testing.T) { + tests := map[string]struct { + srs []Selector + expected Selector + }{ + "2 selectors": { + srs: []Selector{trueSelector{}, trueSelector{}}, + expected: orSelector{ + lhs: trueSelector{}, + rhs: trueSelector{}, + }, + }, + "4 selectors": { + srs: []Selector{trueSelector{}, trueSelector{}, trueSelector{}, trueSelector{}}, + expected: orSelector{ + lhs: orSelector{ + lhs: orSelector{ + lhs: trueSelector{}, + rhs: trueSelector{}, + }, + rhs: trueSelector{}, + }, + rhs: trueSelector{}}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require.GreaterOrEqual(t, len(test.srs), 2) + + s := Or(test.srs[0], test.srs[1], test.srs[2:]...) + assert.Equal(t, test.expected, s) + }) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse.go new file mode 100644 index 00000000000000..4a5216d4298cf5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "fmt" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +var ( + reLV = regexp.MustCompile(`^(?P<label_name>[a-zA-Z0-9_]+)(?P<op>=~|!~|=\*|!\*|=|!=)"(?P<pattern>.+)"$`) +) + +func Parse(expr string) (Selector, error) { + var srs []Selector + lvs := strings.Split(unsugarExpr(expr), ",") + + for _, lv := range lvs { + sr, err := parseSelector(lv) + if err != nil { + return nil, err + } + srs = append(srs, sr) + } + + switch len(srs) { + case 0: + return nil, nil + case 1: + return srs[0], nil + default: + return And(srs[0], srs[1], srs[2:]...), nil + } +} + +func parseSelector(line string) (Selector, error) { + sub := reLV.FindStringSubmatch(strings.TrimSpace(line)) + if sub == nil { + return nil, fmt.Errorf("invalid selector syntax: '%s'", line) + } + + name, op, pattern := sub[1], sub[2], strings.Trim(sub[3], "\"") + + var m matcher.Matcher + var err error + + switch op { + case OpEqual, OpNegEqual: + m, err = matcher.NewStringMatcher(pattern, true, true) + case OpRegexp, OpNegRegexp: + m, err = matcher.NewRegExpMatcher(pattern) + case OpSimplePatterns, OpNegSimplePatterns: + m, err = matcher.NewSimplePatternsMatcher(pattern) + default: + err = fmt.Errorf("unknown matching operator: %s", op) + } + if err != nil { + return nil, err + } + + sr := labelSelector{ + name: name, + m: m, + } + + if neg := strings.HasPrefix(op, "!"); neg { + return Not(sr), nil + } + return sr, nil +} + +func unsugarExpr(expr string) string { + // name => __name__=*"name" + // name{label="value"} => __name__=*"name",label="value" + // {label="value"} => label="value" + expr = strings.TrimSpace(expr) + + switch idx := strings.IndexByte(expr, '{'); true { + case idx == -1: + expr = fmt.Sprintf(`__name__%s"%s"`, + OpSimplePatterns, + strings.TrimSpace(expr), + ) + case idx == 0: + expr = strings.Trim(expr, "{}") + default: + expr = fmt.Sprintf(`__name__%s"%s",%s`, + OpSimplePatterns, + strings.TrimSpace(expr[:idx]), + strings.Trim(expr[idx:], "{}"), + ) + } + return expr +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse_test.go new file mode 100644 index 00000000000000..ab70035d7b172a --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/parse_test.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "fmt" + "testing" + + "github.com/netdata/go.d.plugin/pkg/matcher" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + tests := map[string]struct { + input string + expectedSr Selector + expectedErr bool + }{ + "sp op: only metric name": { + input: "go_memstats_alloc_bytes !go_memstats_* *", + expectedSr: mustSPName("go_memstats_alloc_bytes !go_memstats_* *"), + }, + "string op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"value"}`, OpEqual), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustString("label", "value"), + }, + }, + "neg string op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"value"}`, OpNegEqual), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: Not(mustString("label", "value")), + }, + }, + "regexp op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"valu.+"}`, OpRegexp), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustRegexp("label", "valu.+"), + }, + }, + "neg regexp op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"valu.+"}`, OpNegRegexp), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: Not(mustRegexp("label", "valu.+")), + }, + }, + "sp op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"valu*"}`, OpSimplePatterns), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustSP("label", "valu*"), + }, + }, + "neg sp op: metric name with labels": { + input: fmt.Sprintf(`go_memstats_*{label%s"valu*"}`, OpNegSimplePatterns), + expectedSr: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: Not(mustSP("label", "valu*")), + }, + }, + "metric name with several labels": { + input: fmt.Sprintf(`go_memstats_*{label1%s"value1",label2%s"value2"}`, OpEqual, OpEqual), + expectedSr: andSelector{ + lhs: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustString("label1", "value1"), + }, + rhs: mustString("label2", "value2"), + }, + }, + "only labels (unsugar)": { + input: fmt.Sprintf(`{__name__%s"go_memstats_*",label1%s"value1",label2%s"value2"}`, + OpSimplePatterns, OpEqual, OpEqual), + expectedSr: andSelector{ + lhs: andSelector{ + lhs: mustSPName("go_memstats_*"), + rhs: mustString("label1", "value1"), + }, + rhs: mustString("label2", "value2"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sr, err := Parse(test.input) + + if test.expectedErr { + assert.Error(t, err) + } else { + assert.Equal(t, test.expectedSr, sr) + } + }) + } +} + +func mustSPName(pattern string) Selector { + return mustSP(labels.MetricName, pattern) +} + +func mustString(name string, pattern string) Selector { + return labelSelector{name: name, m: matcher.Must(matcher.NewStringMatcher(pattern, true, true))} +} + +func mustRegexp(name string, pattern string) Selector { + return labelSelector{name: name, m: matcher.Must(matcher.NewRegExpMatcher(pattern))} +} + +func mustSP(name string, pattern string) Selector { + return labelSelector{name: name, m: matcher.Must(matcher.NewSimplePatternsMatcher(pattern))} +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector.go new file mode 100644 index 00000000000000..1537d33de3155a --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector.go @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "github.com/netdata/go.d.plugin/pkg/matcher" + + "github.com/prometheus/prometheus/model/labels" +) + +type Selector interface { + Matches(lbs labels.Labels) bool +} + +const ( + OpEqual = "=" + OpNegEqual = "!=" + OpRegexp = "=~" + OpNegRegexp = "!~" + OpSimplePatterns = "=*" + OpNegSimplePatterns = "!*" +) + +type labelSelector struct { + name string + m matcher.Matcher +} + +func (s labelSelector) Matches(lbs labels.Labels) bool { + if s.name == labels.MetricName { + return s.m.MatchString(lbs[0].Value) + } + if label, ok := lookupLabel(s.name, lbs[1:]); ok { + return s.m.MatchString(label.Value) + } + return false +} + +type Func func(lbs labels.Labels) bool + +func (fn Func) Matches(lbs labels.Labels) bool { + return fn(lbs) +} + +func lookupLabel(name string, lbs labels.Labels) (labels.Label, bool) { + for _, label := range lbs { + if label.Name == name { + return label, true + } + } + return labels.Label{}, false +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector_test.go b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector_test.go new file mode 100644 index 00000000000000..aa3110b0351254 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/selector/selector_test.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package selector + +import ( + "testing" +) + +func TestLabelMatcher_Matches(t *testing.T) { + +} diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-meta.txt new file mode 100644 index 00000000000000..53eccda636c2c6 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-meta.txt @@ -0,0 +1,11 @@ +# HELP test_counter_metric_1_total Test Counter Metric 1 +# TYPE test_counter_metric_1_total counter +test_counter_metric_1_total{label1="value1"} 11 +test_counter_metric_1_total{label1="value2"} 12 +test_counter_metric_1_total{label1="value3"} 13 +test_counter_metric_1_total{label1="value4"} 14 +# TYPE test_counter_metric_2_total counter +test_counter_metric_2_total{label1="value1"} 11 +test_counter_metric_2_total{label1="value2"} 12 +test_counter_metric_2_total{label1="value3"} 13 +test_counter_metric_2_total{label1="value4"} 14 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-no-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-no-meta.txt new file mode 100644 index 00000000000000..afb11b9b837557 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/counter-no-meta.txt @@ -0,0 +1,8 @@ +test_counter_no_meta_metric_1_total{label1="value1"} 11 +test_counter_no_meta_metric_1_total{label1="value2"} 12 +test_counter_no_meta_metric_1_total{label1="value3"} 13 +test_counter_no_meta_metric_1_total{label1="value4"} 14 +test_counter_no_meta_metric_2_total{label1="value1"} 11 +test_counter_no_meta_metric_2_total{label1="value2"} 12 +test_counter_no_meta_metric_2_total{label1="value3"} 13 +test_counter_no_meta_metric_2_total{label1="value4"} 14 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-meta.txt new file mode 100644 index 00000000000000..c0773a426ea6bf --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-meta.txt @@ -0,0 +1,11 @@ +# HELP test_gauge_metric_1 Test Gauge Metric 1 +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 +test_gauge_metric_1{label1="value2"} 12 +test_gauge_metric_1{label1="value3"} 13 +test_gauge_metric_1{label1="value4"} 14 +# TYPE test_gauge_metric_2 gauge +test_gauge_metric_2{label1="value1"} 11 +test_gauge_metric_2{label1="value2"} 12 +test_gauge_metric_2{label1="value3"} 13 +test_gauge_metric_2{label1="value4"} 14 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-no-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-no-meta.txt new file mode 100644 index 00000000000000..e89e0e4d9b70e4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/gauge-no-meta.txt @@ -0,0 +1,8 @@ +test_gauge_no_meta_metric_1{label1="value1"} 11 +test_gauge_no_meta_metric_1{label1="value2"} 12 +test_gauge_no_meta_metric_1{label1="value3"} 13 +test_gauge_no_meta_metric_1{label1="value4"} 14 +test_gauge_no_meta_metric_2{label1="value1"} 11 +test_gauge_no_meta_metric_2{label1="value2"} 12 +test_gauge_no_meta_metric_2{label1="value3"} 13 +test_gauge_no_meta_metric_2{label1="value4"} 14 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-meta.txt new file mode 100644 index 00000000000000..9b4b8a965ab1c4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-meta.txt @@ -0,0 +1,43 @@ +# HELP test_histogram_1_duration_seconds Test Histogram Metric 1 +# TYPE test_histogram_1_duration_seconds histogram +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value1"} 6 +test_histogram_1_duration_seconds_bucket{label1="value2",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value2",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value2",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value2"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value2"} 6 +test_histogram_1_duration_seconds_bucket{label1="value3",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value3",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value3",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value3"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value3"} 6 +test_histogram_1_duration_seconds_bucket{label1="value4",le="0.1"} 4 +test_histogram_1_duration_seconds_bucket{label1="value4",le="0.5"} 5 +test_histogram_1_duration_seconds_bucket{label1="value4",le="+Inf"} 6 +test_histogram_1_duration_seconds_sum{label1="value4"} 0.00147889 +test_histogram_1_duration_seconds_count{label1="value4"} 6 +# TYPE test_histogram_2_duration_seconds histogram +test_histogram_2_duration_seconds_bucket{label1="value1",le="0.1"} 7 +test_histogram_2_duration_seconds_bucket{label1="value1",le="0.5"} 8 +test_histogram_2_duration_seconds_bucket{label1="value1",le="+Inf"} 9 +test_histogram_2_duration_seconds_sum{label1="value1"} 0.00247889 +test_histogram_2_duration_seconds_count{label1="value1"} 9 +test_histogram_2_duration_seconds_bucket{label1="value2",le="0.1"} 7 +test_histogram_2_duration_seconds_bucket{label1="value2",le="0.5"} 8 +test_histogram_2_duration_seconds_bucket{label1="value2",le="+Inf"} 9 +test_histogram_2_duration_seconds_sum{label1="value2"} 0.00247889 +test_histogram_2_duration_seconds_count{label1="value2"} 9 +test_histogram_2_duration_seconds_bucket{label1="value3",le="0.1"} 7 +test_histogram_2_duration_seconds_bucket{label1="value3",le="0.5"} 8 +test_histogram_2_duration_seconds_bucket{label1="value3",le="+Inf"} 9 +test_histogram_2_duration_seconds_sum{label1="value3"} 0.00247889 +test_histogram_2_duration_seconds_count{label1="value3"} 9 +test_histogram_2_duration_seconds_bucket{label1="value4",le="0.1"} 7 +test_histogram_2_duration_seconds_bucket{label1="value4",le="0.5"} 8 +test_histogram_2_duration_seconds_bucket{label1="value4",le="+Inf"} 9 +test_histogram_2_duration_seconds_sum{label1="value4"} 0.00247889 +test_histogram_2_duration_seconds_count{label1="value4"} 9 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-no-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-no-meta.txt new file mode 100644 index 00000000000000..49def677cc7dc5 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/histogram-no-meta.txt @@ -0,0 +1,40 @@ +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value2"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value2"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value3"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value3"} 6 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="0.1"} 4 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="0.5"} 5 +test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="+Inf"} 6 +test_histogram_no_meta_1_duration_seconds_sum{label1="value4"} 0.00147889 +test_histogram_no_meta_1_duration_seconds_count{label1="value4"} 6 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="0.1"} 7 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="0.5"} 8 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="+Inf"} 9 +test_histogram_no_meta_2_duration_seconds_sum{label1="value1"} 0.00247889 +test_histogram_no_meta_2_duration_seconds_count{label1="value1"} 9 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="0.1"} 7 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="0.5"} 8 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="+Inf"} 9 +test_histogram_no_meta_2_duration_seconds_sum{label1="value2"} 0.00247889 +test_histogram_no_meta_2_duration_seconds_count{label1="value2"} 9 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="0.1"} 7 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="0.5"} 8 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="+Inf"} 9 +test_histogram_no_meta_2_duration_seconds_sum{label1="value3"} 0.00247889 +test_histogram_no_meta_2_duration_seconds_count{label1="value3"} 9 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="0.1"} 7 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="0.5"} 8 +test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="+Inf"} 9 +test_histogram_no_meta_2_duration_seconds_sum{label1="value4"} 0.00247889 +test_histogram_no_meta_2_duration_seconds_count{label1="value4"} 9 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/multiline-help.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/multiline-help.txt new file mode 100644 index 00000000000000..f1598fcce9b921 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/multiline-help.txt @@ -0,0 +1,3 @@ +# HELP test_gauge_metric_1 \n First line.\n Second line.\n +# TYPE test_gauge_metric_1 gauge +test_gauge_metric_1{label1="value1"} 11 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-meta.txt new file mode 100644 index 00000000000000..3056e80768bdbe --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-meta.txt @@ -0,0 +1,43 @@ +# HELP test_summary_1_duration_microseconds Test Summary Metric 1 +# TYPE test_summary_1_duration_microseconds summary +test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value1"} 31 +test_summary_1_duration_microseconds{label1="value2",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value2",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value2",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value2"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value2"} 31 +test_summary_1_duration_microseconds{label1="value3",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value3",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value3",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value3"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value3"} 31 +test_summary_1_duration_microseconds{label1="value4",quantile="0.5"} 4931.921 +test_summary_1_duration_microseconds{label1="value4",quantile="0.9"} 4932.921 +test_summary_1_duration_microseconds{label1="value4",quantile="0.99"} 4933.921 +test_summary_1_duration_microseconds_sum{label1="value4"} 283201.29 +test_summary_1_duration_microseconds_count{label1="value4"} 31 +# TYPE test_summary_2_duration_microseconds summary +test_summary_2_duration_microseconds{label1="value1",quantile="0.5"} 5931.921 +test_summary_2_duration_microseconds{label1="value1",quantile="0.9"} 5932.921 +test_summary_2_duration_microseconds{label1="value1",quantile="0.99"} 5933.921 +test_summary_2_duration_microseconds_sum{label1="value1"} 383201.29 +test_summary_2_duration_microseconds_count{label1="value1"} 41 +test_summary_2_duration_microseconds{label1="value2",quantile="0.5"} 5931.921 +test_summary_2_duration_microseconds{label1="value2",quantile="0.9"} 5932.921 +test_summary_2_duration_microseconds{label1="value2",quantile="0.99"} 5933.921 +test_summary_2_duration_microseconds_sum{label1="value2"} 383201.29 +test_summary_2_duration_microseconds_count{label1="value2"} 41 +test_summary_2_duration_microseconds{label1="value3",quantile="0.5"} 5931.921 +test_summary_2_duration_microseconds{label1="value3",quantile="0.9"} 5932.921 +test_summary_2_duration_microseconds{label1="value3",quantile="0.99"} 5933.921 +test_summary_2_duration_microseconds_sum{label1="value3"} 383201.29 +test_summary_2_duration_microseconds_count{label1="value3"} 41 +test_summary_2_duration_microseconds{label1="value4",quantile="0.5"} 5931.921 +test_summary_2_duration_microseconds{label1="value4",quantile="0.9"} 5932.921 +test_summary_2_duration_microseconds{label1="value4",quantile="0.99"} 5933.921 +test_summary_2_duration_microseconds_sum{label1="value4"} 383201.29 +test_summary_2_duration_microseconds_count{label1="value4"} 41 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-no-meta.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-no-meta.txt new file mode 100644 index 00000000000000..e66564bb795dc7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/summary-no-meta.txt @@ -0,0 +1,40 @@ +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value2"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value2"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value3"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value3"} 31 +test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.5"} 4931.921 +test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.9"} 4932.921 +test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.99"} 4933.921 +test_summary_no_meta_1_duration_microseconds_sum{label1="value4"} 283201.29 +test_summary_no_meta_1_duration_microseconds_count{label1="value4"} 31 +test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.5"} 5931.921 +test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.9"} 5932.921 +test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.99"} 5933.921 +test_summary_no_meta_2_duration_microseconds_sum{label1="value1"} 383201.29 +test_summary_no_meta_2_duration_microseconds_count{label1="value1"} 41 +test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.5"} 5931.921 +test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.9"} 5932.921 +test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.99"} 5933.921 +test_summary_no_meta_2_duration_microseconds_sum{label1="value2"} 383201.29 +test_summary_no_meta_2_duration_microseconds_count{label1="value2"} 41 +test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.5"} 5931.921 +test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.9"} 5932.921 +test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.99"} 5933.921 +test_summary_no_meta_2_duration_microseconds_sum{label1="value3"} 383201.29 +test_summary_no_meta_2_duration_microseconds_count{label1="value3"} 41 +test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.5"} 5931.921 +test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.9"} 5932.921 +test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.99"} 5933.921 +test_summary_no_meta_2_duration_microseconds_sum{label1="value4"} 383201.29 +test_summary_no_meta_2_duration_microseconds_count{label1="value4"} 41 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.nometa.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.nometa.txt new file mode 100644 index 00000000000000..e760ad2682fcb4 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.nometa.txt @@ -0,0 +1,410 @@ +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +go_goroutines 33 +go_memstats_alloc_bytes 1.7518624e+07 +go_memstats_alloc_bytes_total 8.3062296e+08 +go_memstats_buck_hash_sys_bytes 1.494637e+06 +go_memstats_frees_total 4.65658e+06 +go_memstats_gc_sys_bytes 1.107968e+06 +go_memstats_heap_alloc_bytes 1.7518624e+07 +go_memstats_heap_idle_bytes 6.668288e+06 +go_memstats_heap_inuse_bytes 1.8956288e+07 +go_memstats_heap_objects 72755 +go_memstats_heap_released_bytes_total 0 +go_memstats_heap_sys_bytes 2.5624576e+07 +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +go_memstats_lookups_total 2089 +go_memstats_mallocs_total 4.729335e+06 +go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_sys_bytes 16384 +go_memstats_mspan_inuse_bytes 211520 +go_memstats_mspan_sys_bytes 245760 +go_memstats_next_gc_bytes 2.033527e+07 +go_memstats_other_sys_bytes 2.077323e+06 +go_memstats_stack_inuse_bytes 1.6384e+06 +go_memstats_stack_sys_bytes 1.6384e+06 +go_memstats_sys_bytes 3.2205048e+07 +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +prometheus_config_last_reload_successful 1 +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +prometheus_evaluator_iterations_skipped_total 0 +prometheus_notifications_dropped_total 0 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +prometheus_sd_azure_refresh_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +prometheus_sd_ec2_refresh_failures_total 0 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +prometheus_sd_gce_refresh_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +prometheus_sd_marathon_refresh_failures_total 0 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +prometheus_target_skipped_scrapes_total 0 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 diff --git a/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.txt b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.txt new file mode 100644 index 00000000000000..c7f2a7af0ae475 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/prometheus/testdata/testdata.txt @@ -0,0 +1,528 @@ +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 1.7518624e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 8.3062296e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.494637e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 4.65658e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 1.107968e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.7518624e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 6.668288e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.8956288e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 72755 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.5624576e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 2089 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 4.729335e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 211520 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 245760 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.033527e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.077323e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.6384e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.6384e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.2205048e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_evaluator_duration_seconds summary +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. +# TYPE prometheus_evaluator_iterations_skipped_total counter +prometheus_evaluator_iterations_skipped_total 0 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. +# TYPE prometheus_sd_azure_refresh_duration_seconds summary +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. +# TYPE prometheus_sd_azure_refresh_failures_total counter +prometheus_sd_azure_refresh_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. +# TYPE prometheus_sd_ec2_refresh_duration_seconds summary +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. +# TYPE prometheus_sd_ec2_refresh_failures_total counter +prometheus_sd_ec2_refresh_failures_total 0 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. +# TYPE prometheus_sd_gce_refresh_duration summary +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. +# TYPE prometheus_sd_gce_refresh_failures_total counter +prometheus_sd_gce_refresh_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. +# TYPE prometheus_sd_marathon_refresh_duration_seconds summary +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. +# TYPE prometheus_sd_marathon_refresh_failures_total counter +prometheus_sd_marathon_refresh_failures_total 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. +# TYPE prometheus_target_skipped_scrapes_total counter +prometheus_target_skipped_scrapes_total 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 \ No newline at end of file diff --git a/src/go/collectors/go.d.plugin/pkg/socket/client.go b/src/go/collectors/go.d.plugin/pkg/socket/client.go new file mode 100644 index 00000000000000..ea1b57318e9c3d --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/socket/client.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package socket + +import ( + "bufio" + "crypto/tls" + "errors" + "net" + "time" +) + +// New returns a new pointer to a socket client given the socket +// type (IP, TCP, UDP, UNIX), a network address (IP/domain:port), +// a timeout and a TLS config. It supports both IPv4 and IPv6 address +// and reuses connection where possible. +func New(config Config) *Socket { + return &Socket{ + Config: config, + conn: nil, + } +} + +// Socket is the implementation of a socket client. +type Socket struct { + Config + conn net.Conn +} + +// Connect connects to the Socket address on the named network. +// If the address is a domain name it will also perform the DNS resolution. +// Address like :80 will attempt to connect to the localhost. +// The config timeout and TLS config will be used. +func (s *Socket) Connect() (err error) { + network, address := networkType(s.Address) + if s.TLSConf == nil { + s.conn, err = net.DialTimeout(network, address, s.ConnectTimeout) + } else { + var d net.Dialer + d.Timeout = s.ConnectTimeout + s.conn, err = tls.DialWithDialer(&d, network, address, s.TLSConf) + } + return err +} + +// Disconnect closes the connection. +// Any in-flight commands will be cancelled and return errors. +func (s *Socket) Disconnect() (err error) { + if s.conn != nil { + err = s.conn.Close() + s.conn = nil + } + return err +} + +// Command writes the command string to the connection and passed the +// response bytes line by line to the process function. It uses the +// timeout value from the Socket config and returns read, write and +// timeout errors if any. If a timeout occurs during the processing +// of the responses this function will stop processing and return a +// timeout error. +func (s *Socket) Command(command string, process Processor) error { + if s.conn == nil { + return errors.New("cannot send command on nil connection") + } + if err := write(command, s.conn, s.WriteTimeout); err != nil { + return err + } + return read(s.conn, process, s.ReadTimeout) +} + +func write(command string, writer net.Conn, timeout time.Duration) error { + if writer == nil { + return errors.New("attempt to write on nil connection") + } + if err := writer.SetWriteDeadline(time.Now().Add(timeout)); err != nil { + return err + } + _, err := writer.Write([]byte(command)) + return err +} + +func read(reader net.Conn, process Processor, timeout time.Duration) error { + if process == nil { + return errors.New("process func is nil") + } + if reader == nil { + return errors.New("attempt to read on nil connection") + } + if err := reader.SetReadDeadline(time.Now().Add(timeout)); err != nil { + return err + } + scanner := bufio.NewScanner(reader) + for scanner.Scan() && process(scanner.Bytes()) { + } + return scanner.Err() +} diff --git a/src/go/collectors/go.d.plugin/pkg/socket/client_test.go b/src/go/collectors/go.d.plugin/pkg/socket/client_test.go new file mode 100644 index 00000000000000..fa64f4558cc27c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/socket/client_test.go @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package socket + +import ( + "crypto/tls" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testServerAddress = "127.0.0.1:9999" + testUdpServerAddress = "udp://127.0.0.1:9999" + testUnixServerAddress = "/tmp/testSocketFD" + defaultTimeout = 100 * time.Millisecond +) + +var tcpConfig = Config{ + Address: testServerAddress, + ConnectTimeout: defaultTimeout, + ReadTimeout: defaultTimeout, + WriteTimeout: defaultTimeout, + TLSConf: nil, +} + +var udpConfig = Config{ + Address: testUdpServerAddress, + ConnectTimeout: defaultTimeout, + ReadTimeout: defaultTimeout, + WriteTimeout: defaultTimeout, + TLSConf: nil, +} + +var unixConfig = Config{ + Address: testUnixServerAddress, + ConnectTimeout: defaultTimeout, + ReadTimeout: defaultTimeout, + WriteTimeout: defaultTimeout, + TLSConf: nil, +} + +var tcpTlsConfig = Config{ + Address: testServerAddress, + ConnectTimeout: defaultTimeout, + ReadTimeout: defaultTimeout, + WriteTimeout: defaultTimeout, + TLSConf: &tls.Config{}, +} + +func Test_clientCommand(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run(); defer func() { _ = srv.Close() }() }() + + time.Sleep(time.Millisecond * 100) + sock := New(tcpConfig) + require.NoError(t, sock.Connect()) + err := sock.Command("ping\n", func(bytes []byte) bool { + assert.Equal(t, "pong", string(bytes)) + return true + }) + require.NoError(t, sock.Disconnect()) + require.NoError(t, err) +} + +func Test_clientTimeout(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run() }() + + time.Sleep(time.Millisecond * 100) + sock := New(tcpConfig) + require.NoError(t, sock.Connect()) + sock.ReadTimeout = 0 + sock.ReadTimeout = 0 + err := sock.Command("ping\n", func(bytes []byte) bool { + assert.Equal(t, "pong", string(bytes)) + return true + }) + require.Error(t, err) +} + +func Test_clientIncompleteSSL(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run() }() + + time.Sleep(time.Millisecond * 100) + sock := New(tcpTlsConfig) + err := sock.Connect() + require.Error(t, err) +} + +func Test_clientCommandStopProcessing(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 2} + go func() { _ = srv.Run() }() + + time.Sleep(time.Millisecond * 100) + sock := New(tcpConfig) + require.NoError(t, sock.Connect()) + err := sock.Command("ping\n", func(bytes []byte) bool { + assert.Equal(t, "pong", string(bytes)) + return false + }) + require.NoError(t, sock.Disconnect()) + require.NoError(t, err) +} + +func Test_clientUDPCommand(t *testing.T) { + srv := &udpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run(); defer func() { _ = srv.Close() }() }() + + time.Sleep(time.Millisecond * 100) + sock := New(udpConfig) + require.NoError(t, sock.Connect()) + err := sock.Command("ping\n", func(bytes []byte) bool { + assert.Equal(t, "pong", string(bytes)) + return false + }) + require.NoError(t, sock.Disconnect()) + require.NoError(t, err) +} + +func Test_clientTCPAddress(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run() }() + time.Sleep(time.Millisecond * 100) + + sock := New(tcpConfig) + require.NoError(t, sock.Connect()) + + tcpConfig.Address = "tcp://" + tcpConfig.Address + sock = New(tcpConfig) + require.NoError(t, sock.Connect()) +} + +func Test_clientUnixCommand(t *testing.T) { + srv := &unixServer{addr: testUnixServerAddress, rowsNumResp: 1} + // cleanup previous file descriptors + _ = srv.Close() + go func() { _ = srv.Run() }() + + time.Sleep(time.Millisecond * 200) + sock := New(unixConfig) + require.NoError(t, sock.Connect()) + err := sock.Command("ping\n", func(bytes []byte) bool { + assert.Equal(t, "pong", string(bytes)) + return false + }) + require.NoError(t, err) + require.NoError(t, sock.Disconnect()) +} + +func Test_clientEmptyProcessFunc(t *testing.T) { + srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1} + go func() { _ = srv.Run() }() + + time.Sleep(time.Millisecond * 100) + sock := New(tcpConfig) + require.NoError(t, sock.Connect()) + err := sock.Command("ping\n", nil) + require.Error(t, err, "nil process func should return an error") +} diff --git a/src/go/collectors/go.d.plugin/pkg/socket/servers_test.go b/src/go/collectors/go.d.plugin/pkg/socket/servers_test.go new file mode 100644 index 00000000000000..d6617816242d5d --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/socket/servers_test.go @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package socket + +import ( + "bufio" + "errors" + "fmt" + "net" + "os" + "strings" + "time" +) + +type tcpServer struct { + addr string + server net.Listener + rowsNumResp int +} + +func (t *tcpServer) Run() (err error) { + t.server, err = net.Listen("tcp", t.addr) + if err != nil { + return + } + return t.handleConnections() +} + +func (t *tcpServer) Close() (err error) { + return t.server.Close() +} + +func (t *tcpServer) handleConnections() (err error) { + for { + conn, err := t.server.Accept() + if err != nil || conn == nil { + return errors.New("could not accept connection") + } + t.handleConnection(conn) + } +} + +func (t *tcpServer) handleConnection(conn net.Conn) { + defer func() { _ = conn.Close() }() + _ = conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) + + rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) + _, err := rw.ReadString('\n') + if err != nil { + _, _ = rw.WriteString("failed to read input") + _ = rw.Flush() + } else { + resp := strings.Repeat("pong\n", t.rowsNumResp) + _, _ = rw.WriteString(resp) + _ = rw.Flush() + } +} + +type udpServer struct { + addr string + conn *net.UDPConn + rowsNumResp int +} + +func (u *udpServer) Run() (err error) { + addr, err := net.ResolveUDPAddr("udp", u.addr) + if err != nil { + return err + } + u.conn, err = net.ListenUDP("udp", addr) + if err != nil { + return + } + u.handleConnections() + return nil +} + +func (u *udpServer) Close() (err error) { + return u.conn.Close() +} + +func (u *udpServer) handleConnections() { + for { + var buf [2048]byte + _, addr, _ := u.conn.ReadFromUDP(buf[0:]) + resp := strings.Repeat("pong\n", u.rowsNumResp) + _, _ = u.conn.WriteToUDP([]byte(resp), addr) + } +} + +type unixServer struct { + addr string + conn *net.UnixListener + rowsNumResp int +} + +func (u *unixServer) Run() (err error) { + _, _ = os.CreateTemp("/tmp", "testSocketFD") + addr, err := net.ResolveUnixAddr("unix", u.addr) + if err != nil { + return err + } + u.conn, err = net.ListenUnix("unix", addr) + if err != nil { + return + } + go u.handleConnections() + return nil +} + +func (u *unixServer) Close() (err error) { + _ = os.Remove(testUnixServerAddress) + return u.conn.Close() +} + +func (u *unixServer) handleConnections() { + var conn net.Conn + var err error + conn, err = u.conn.AcceptUnix() + if err != nil { + panic(fmt.Errorf("could not accept connection: %v", err)) + } + u.handleConnection(conn) +} + +func (u *unixServer) handleConnection(conn net.Conn) { + _ = conn.SetDeadline(time.Now().Add(time.Second)) + + rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) + _, err := rw.ReadString('\n') + if err != nil { + _, _ = rw.WriteString("failed to read input") + _ = rw.Flush() + } else { + resp := strings.Repeat("pong\n", u.rowsNumResp) + _, _ = rw.WriteString(resp) + _ = rw.Flush() + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/socket/types.go b/src/go/collectors/go.d.plugin/pkg/socket/types.go new file mode 100644 index 00000000000000..693faf5be1f612 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/socket/types.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package socket + +import ( + "crypto/tls" + "time" +) + +// Processor function passed to the Socket.Command function. +// It is passed by the caller to process a command's response +// line by line. +type Processor func([]byte) bool + +// Client is the interface that wraps the basic socket client operations +// and hides the implementation details from the users. +// +// Connect should prepare the connection. +// +// Disconnect should stop any in-flight connections. +// +// Command should send the actual data to the wire and pass +// any results to the processor function. +// +// Implementations should return TCP, UDP or Unix ready sockets. +type Client interface { + Connect() error + Disconnect() error + Command(command string, process Processor) error +} + +// Config holds the network ip v4 or v6 address, port, +// Socket type(ip, tcp, udp, unix), timeout and TLS configuration +// for a Socket +type Config struct { + Address string + ConnectTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + TLSConf *tls.Config +} diff --git a/src/go/collectors/go.d.plugin/pkg/socket/utils.go b/src/go/collectors/go.d.plugin/pkg/socket/utils.go new file mode 100644 index 00000000000000..dcc48b383f5713 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/socket/utils.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package socket + +import "strings" + +func IsUnixSocket(address string) bool { + return strings.HasPrefix(address, "/") || strings.HasPrefix(address, "unix://") +} + +func IsUdpSocket(address string) bool { + return strings.HasPrefix(address, "udp://") +} + +func networkType(address string) (string, string) { + switch { + case IsUnixSocket(address): + address = strings.TrimPrefix(address, "unix://") + return "unix", address + case IsUdpSocket(address): + return "udp", strings.TrimPrefix(address, "udp://") + default: + return "tcp", strings.TrimPrefix(address, "tcp://") + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/stm/README.md b/src/go/collectors/go.d.plugin/pkg/stm/README.md new file mode 100644 index 00000000000000..a6cdceca0723be --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/stm/README.md @@ -0,0 +1,80 @@ +<!-- +title: "stm" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/stm/README.md" +sidebar_label: "stm" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# stm + +This package helps you to convert a struct to `map[string]int64`. + +## Tags + +The encoding of each struct field can be customized by the format string stored under the `stm` key in the struct +field's tag. The format string gives the name of the field, possibly followed by a comma-separated list of options. + +**Lack of tag means no conversion performed.** +If you don't want a field to be added to the `map[string]int64` just don't add a tag to it. + +Tag syntax: + +``` +`stm:"name,multiplier,divisor"` +``` + +Both `multiplier` and `divisor` are optional, `name` is mandatory. + +Examples of struct field tags and their meanings: + +``` +// Field appears in map as key "name". +Field int `stm:"name"` + +// Field appears in map as key "name" and its value is multiplied by 10. +Field int `stm:"name,10"` + +// Field appears in map as key "name" and its value is multiplied by 10 and divided by 5. +Field int `stm:"name,10,5"` +``` + +## Supported field value kinds + +The list is: + +- `int` +- `float` +- `bool` +- `map` +- `array` +- `slice` +- `pointer` +- `struct` +- `interface { WriteTo(rv map[string]int64, key string, mul, div int) }` + +It is ok to have nested structures. + +## Usage + +Use `ToMap` function. Keep in mind: + +- this function is variadic (can be called with any number of trailing arguments). +- it doesn't allow to have duplicate in result map. +- if there is a duplicate key it panics. + +``` + ms := struct { + MetricA int64 `stm:"metric_a"` + MetricB float64 `stm:"metric_b,1000"` + MetricSet map[string]int64 `stm:"metric_set"` + }{ + MetricA: 10, + MetricB: 5.5, + MetricSet: map[string]int64{ + "a": 10, + "b": 10, + }, + } + fmt.Println(stm.ToMap(ms)) // => map[metric_a:10 metric_b:5500 metric_set_a:10 metric_set_b:10] +``` diff --git a/src/go/collectors/go.d.plugin/pkg/stm/stm.go b/src/go/collectors/go.d.plugin/pkg/stm/stm.go new file mode 100644 index 00000000000000..7d07ba9a47bfd0 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/stm/stm.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package stm + +import ( + "fmt" + "log" + "reflect" + "strconv" + "strings" +) + +const ( + fieldTagName = "stm" + structKey = "STMKey" +) + +type ( + Value interface { + WriteTo(rv map[string]int64, key string, mul, div int) + } +) + +// ToMap converts struct to a map[string]int64 based on 'stm' tags +func ToMap(s ...interface{}) map[string]int64 { + rv := map[string]int64{} + for _, v := range s { + value := reflect.Indirect(reflect.ValueOf(v)) + toMap(value, rv, "", 1, 1) + } + return rv +} + +func toMap(value reflect.Value, rv map[string]int64, key string, mul, div int) { + if !value.IsValid() { + log.Panicf("value is not valid key=%s", key) + } + if value.CanInterface() { + val, ok := value.Interface().(Value) + if ok { + val.WriteTo(rv, key, mul, div) + return + } + } + switch value.Kind() { + case reflect.Ptr: + convertPtr(value, rv, key, mul, div) + case reflect.Struct: + convertStruct(value, rv, key) + case reflect.Array, reflect.Slice: + convertArraySlice(value, rv, key, mul, div) + case reflect.Map: + convertMap(value, rv, key, mul, div) + case reflect.Bool: + convertBool(value, rv, key) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + convertInteger(value, rv, key, mul, div) + case reflect.Float32, reflect.Float64: + convertFloat(value, rv, key, mul, div) + case reflect.Interface: + convertInterface(value, rv, key, mul, div) + default: + log.Panicf("unsupported data type: %v", value.Kind()) + } +} + +func convertPtr(value reflect.Value, rv map[string]int64, key string, mul, div int) { + if !value.IsNil() { + toMap(value.Elem(), rv, key, mul, div) + } +} + +func convertStruct(value reflect.Value, rv map[string]int64, key string) { + t := value.Type() + k := value.FieldByName(structKey) + if k.Kind() == reflect.String { + key = joinPrefix(key, k.String()) + } + for i := 0; i < t.NumField(); i++ { + ft := t.Field(i) + tag, ok := ft.Tag.Lookup(fieldTagName) + if !ok || ft.Name == structKey { + continue + } + value := value.Field(i) + prefix, mul, div := parseTag(tag) + toMap(value, rv, joinPrefix(key, prefix), mul, div) + } +} + +func convertMap(value reflect.Value, rv map[string]int64, key string, mul, div int) { + if value.IsNil() { + log.Panicf("value is nil key=%s", key) + } + for _, k := range value.MapKeys() { + toMap(value.MapIndex(k), rv, joinPrefix(key, k.String()), mul, div) + } +} + +func convertArraySlice(value reflect.Value, rv map[string]int64, key string, mul, div int) { + for i := 0; i < value.Len(); i++ { + toMap(value.Index(i), rv, key, mul, div) + } +} + +func convertBool(value reflect.Value, rv map[string]int64, key string) { + if _, ok := rv[key]; ok { + log.Panic("duplicate key: ", key) + } + if value.Bool() { + rv[key] = 1 + } else { + rv[key] = 0 + } +} + +func convertInteger(value reflect.Value, rv map[string]int64, key string, mul, div int) { + if _, ok := rv[key]; ok { + log.Panic("duplicate key: ", key) + } + intVal := value.Int() + rv[key] = intVal * int64(mul) / int64(div) +} + +func convertFloat(value reflect.Value, rv map[string]int64, key string, mul, div int) { + if _, ok := rv[key]; ok { + log.Panic("duplicate key: ", key) + } + floatVal := value.Float() + rv[key] = int64(floatVal * float64(mul) / float64(div)) +} + +func convertInterface(value reflect.Value, rv map[string]int64, key string, mul, div int) { + fv := reflect.ValueOf(value.Interface()) + toMap(fv, rv, key, mul, div) +} + +func joinPrefix(prefix, key string) string { + if prefix == "" { + return key + } + if key == "" { + return prefix + } + return prefix + "_" + key +} + +func parseTag(tag string) (prefix string, mul int, div int) { + tokens := strings.Split(tag, ",") + mul = 1 + div = 1 + var err error + switch len(tokens) { + case 3: + div, err = strconv.Atoi(tokens[2]) + if err != nil { + log.Panic(err) + } + fallthrough + case 2: + mul, err = strconv.Atoi(tokens[1]) + if err != nil { + log.Panic(err) + } + fallthrough + case 1: + prefix = tokens[0] + default: + log.Panic(fmt.Errorf("invalid tag format: %s", tag)) + } + return +} diff --git a/src/go/collectors/go.d.plugin/pkg/stm/stm_test.go b/src/go/collectors/go.d.plugin/pkg/stm/stm_test.go new file mode 100644 index 00000000000000..2fe2d793aeb527 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/stm/stm_test.go @@ -0,0 +1,415 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package stm_test + +import ( + "testing" + + "github.com/netdata/go.d.plugin/pkg/stm" + + "github.com/netdata/go.d.plugin/pkg/metrics" + + "github.com/stretchr/testify/assert" +) + +func TestToMap_empty(t *testing.T) { + s := struct{}{} + + expected := map[string]int64{} + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_metrics(t *testing.T) { + s := struct { + C metrics.Counter `stm:"c"` + G metrics.Gauge `stm:"g,100"` + H metrics.Histogram `stm:"h,100"` + S metrics.Summary `stm:"s,200,2"` + }{} + s.C.Inc() + s.G.Set(3.14) + s.H = metrics.NewHistogram([]float64{1, 5, 10}) + + s.H.Observe(3.14) + s.H.Observe(6.28) + s.H.Observe(20) + + s.S = metrics.NewSummary() + s.S.Observe(3.14) + s.S.Observe(6.28) + + expected := map[string]int64{ + "c": 1, + "g": 314, + + "h_count": 3, + "h_sum": 2942, + "h_bucket_1": 0, + "h_bucket_2": 1, + "h_bucket_3": 2, + + "s_count": 2, + "s_sum": 942, + "s_min": 314, + "s_max": 628, + "s_avg": 471, + } + + assert.Equal(t, expected, stm.ToMap(s), "value test") + assert.Equal(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_int(t *testing.T) { + s := struct { + I int `stm:"int"` + I8 int8 `stm:"int8"` + I16 int16 `stm:"int16"` + I32 int32 `stm:"int32"` + I64 int64 `stm:"int64"` + }{ + I: 1, I8: 2, I16: 3, I32: 4, I64: 5, + } + + expected := map[string]int64{ + "int": 1, "int8": 2, "int16": 3, "int32": 4, "int64": 5, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_float(t *testing.T) { + s := struct { + F32 float32 `stm:"f32,100"` + F64 float64 `stm:"f64"` + }{ + 3.14, 628, + } + + expected := map[string]int64{ + "f32": 314, "f64": 628, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_struct(t *testing.T) { + type pair struct { + Left int `stm:"left"` + Right int `stm:"right"` + } + s := struct { + I int `stm:"int"` + Pempty pair `stm:""` + Ps pair `stm:"s"` + Notag int + }{ + I: 1, + Pempty: pair{2, 3}, + Ps: pair{4, 5}, + Notag: 6, + } + + expected := map[string]int64{ + "int": 1, + "left": 2, "right": 3, + "s_left": 4, "s_right": 5, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_tree(t *testing.T) { + type node struct { + Value int `stm:"v"` + Left *node `stm:"left"` + Right *node `stm:"right"` + } + s := node{1, + &node{2, nil, nil}, + &node{3, + &node{4, nil, nil}, + nil, + }, + } + expected := map[string]int64{ + "v": 1, + "left_v": 2, + "right_v": 3, + "right_left_v": 4, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_map(t *testing.T) { + s := struct { + I int `stm:"int"` + M map[string]int64 `stm:""` + }{ + I: 1, + M: map[string]int64{ + "a": 2, + "b": 3, + }, + } + + expected := map[string]int64{ + "int": 1, + "a": 2, + "b": 3, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_nestMap(t *testing.T) { + s := struct { + I int `stm:"int"` + M map[string]interface{} `stm:""` + }{ + I: 1, + M: map[string]interface{}{ + "a": 2, + "b": 3, + "m": map[string]interface{}{ + "c": 4, + }, + }, + } + + expected := map[string]int64{ + "int": 1, + "a": 2, + "b": 3, + "m_c": 4, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_ptr(t *testing.T) { + two := 2 + s := struct { + I int `stm:"int"` + Ptr *int `stm:"ptr"` + Nil *int `stm:"nil"` + }{ + I: 1, + Ptr: &two, + Nil: nil, + } + + expected := map[string]int64{ + "int": 1, + "ptr": 2, + } + + assert.EqualValuesf(t, expected, stm.ToMap(s), "value test") + assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test") +} + +func TestToMap_invalidType(t *testing.T) { + s := struct { + Str string `stm:"int"` + }{ + Str: "abc", + } + + assert.Panics(t, func() { + stm.ToMap(s) + }, "value test") + assert.Panics(t, func() { + stm.ToMap(&s) + }, "ptr test") +} + +func TestToMap_duplicateKey(t *testing.T) { + { + s := struct { + Key int `stm:"key"` + M map[string]int `stm:""` + }{ + Key: 1, + M: map[string]int{ + "key": 2, + }, + } + + assert.Panics(t, func() { + stm.ToMap(s) + }, "value test") + assert.Panics(t, func() { + stm.ToMap(&s) + }, "ptr test") + } + { + s := struct { + Key float64 `stm:"key"` + M map[string]float64 `stm:""` + }{ + Key: 1, + M: map[string]float64{ + "key": 2, + }, + } + + assert.Panics(t, func() { + stm.ToMap(s) + }, "value test") + assert.Panics(t, func() { + stm.ToMap(&s) + }, "ptr test") + } +} + +func TestToMap_Variadic(t *testing.T) { + s1 := struct { + Key1 int `stm:"key1"` + }{ + Key1: 1, + } + s2 := struct { + Key2 int `stm:"key2"` + }{ + Key2: 2, + } + s3 := struct { + Key3 int `stm:"key3"` + }{ + Key3: 3, + } + + assert.Equal( + t, + map[string]int64{ + "key1": 1, + "key2": 2, + "key3": 3, + }, + stm.ToMap(s1, s2, s3), + ) +} + +func TestToMap_badTag(t *testing.T) { + assert.Panics(t, func() { + s := struct { + A int `stm:"a,not_int"` + }{1} + stm.ToMap(s) + }) + assert.Panics(t, func() { + s := struct { + A int `stm:"a,1,not_int"` + }{1} + stm.ToMap(s) + }) + assert.Panics(t, func() { + s := struct { + A int `stm:"a,not_int,1"` + }{1} + stm.ToMap(s) + }) + assert.Panics(t, func() { + s := struct { + A int `stm:"a,1,2,3"` + }{1} + stm.ToMap(s) + }) +} + +func TestToMap_nilValue(t *testing.T) { + assert.Panics(t, func() { + s := struct { + a metrics.CounterVec `stm:"a"` + }{nil} + stm.ToMap(s) + }) +} +func TestToMap_bool(t *testing.T) { + s := struct { + A bool `stm:"a"` + B bool `stm:"b"` + }{ + A: true, + B: false, + } + assert.Equal( + t, + map[string]int64{ + "a": 1, + "b": 0, + }, + stm.ToMap(s), + ) +} + +func TestToMap_ArraySlice(t *testing.T) { + s := [4]interface{}{ + map[string]int{ + "B": 1, + "C": 2, + }, + struct { + D int `stm:"D"` + E int `stm:"E"` + }{ + D: 3, + E: 4, + }, + struct { + STMKey string + F int `stm:"F"` + G int `stm:"G"` + }{ + F: 5, + G: 6, + }, + struct { + STMKey string + H int `stm:"H"` + I int `stm:"I"` + }{ + STMKey: "KEY", + H: 7, + I: 8, + }, + } + + assert.Equal( + t, + map[string]int64{ + "B": 1, + "C": 2, + "D": 3, + "E": 4, + "F": 5, + "G": 6, + "KEY_H": 7, + "KEY_I": 8, + }, + stm.ToMap(s), + ) + + assert.Equal( + t, + map[string]int64{ + "B": 1, + "C": 2, + "D": 3, + "E": 4, + "F": 5, + "G": 6, + "KEY_H": 7, + "KEY_I": 8, + }, + stm.ToMap(s[:]), + ) +} diff --git a/src/go/collectors/go.d.plugin/pkg/tlscfg/README.md b/src/go/collectors/go.d.plugin/pkg/tlscfg/README.md new file mode 100644 index 00000000000000..bc7b5209806626 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/tlscfg/README.md @@ -0,0 +1,61 @@ +<!-- +title: "tlscfg" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/tlscfg/README.md" +sidebar_label: "tlscfg" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# tlscfg + +This package contains client TLS configuration and function to create `tls.Config` from it. + +Every module that needs `tls.Config` for data collection should use it. It allows to have same set of user configurable +options across all modules. + +## Configuration options + +- `tls_skip_verify`: controls whether a client verifies the server's certificate chain and host name. +- `tls_ca`: certificate authority to use when verifying server certificates. +- `tls_cert`: tls certificate to use. +- `tls_key`: tls key to use. + +## Usage + +Just make `TLSConfig` part of your module configuration. + +```go +package example + +import "github.com/netdata/go.d.plugin/pkg/tlscfg" + +type Config struct { + tlscfg.TLSConfig `yaml:",inline"` +} + +type Example struct { + Config `yaml:",inline"` +} + +func (e *Example) Init() bool { + tlsCfg, err := tlscfg.NewTLSConfig(e.TLSConfig) + if err != nil { + // ... + return false + } + + // ... + return true +} +``` + +Having `TLSConfig` embedded your configuration inherits all [configuration options](#configuration-options): + +```yaml +jobs: + - name: name + tls_skip_verify: no + tls_ca: path/to/ca.pem + tls_cert: path/to/cert.pem + tls_key: path/to/key.pem +``` diff --git a/src/go/collectors/go.d.plugin/pkg/tlscfg/config.go b/src/go/collectors/go.d.plugin/pkg/tlscfg/config.go new file mode 100644 index 00000000000000..26051e486ad377 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/tlscfg/config.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tlscfg + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "os" +) + +// TLSConfig represents the standard client TLS configuration. +type TLSConfig struct { + // TLSCA specifies the certificate authority to use when verifying server certificates. + TLSCA string `yaml:"tls_ca"` + + // TLSCert specifies tls certificate file. + TLSCert string `yaml:"tls_cert"` + + // TLSKey specifies tls key file. + TLSKey string `yaml:"tls_key"` + + // InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. + InsecureSkipVerify bool `yaml:"tls_skip_verify"` +} + +// NewTLSConfig creates a tls.Config, may be nil without an error if TLS is not configured. +func NewTLSConfig(cfg TLSConfig) (*tls.Config, error) { + if cfg.TLSCA == "" && cfg.TLSKey == "" && cfg.TLSCert == "" && !cfg.InsecureSkipVerify { + return nil, nil + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: cfg.InsecureSkipVerify, + Renegotiation: tls.RenegotiateNever, + } + + if cfg.TLSCA != "" { + pool, err := loadCertPool([]string{cfg.TLSCA}) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = pool + } + + if cfg.TLSCert != "" && cfg.TLSKey != "" { + cert, err := loadCertificate(cfg.TLSCert, cfg.TLSKey) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +func loadCertPool(certFiles []string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + for _, certFile := range certFiles { + pem, err := os.ReadFile(certFile) + if err != nil { + return nil, fmt.Errorf("could not read certificate %q: %v", certFile, err) + } + if !pool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("could not parse any PEM certificates %q: %v", certFile, err) + } + } + return pool, nil +} + +func loadCertificate(certFile, keyFile string) (tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return tls.Certificate{}, fmt.Errorf("could not load keypair %s:%s: %v", certFile, keyFile, err) + } + return cert, nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/tlscfg/config_test.go b/src/go/collectors/go.d.plugin/pkg/tlscfg/config_test.go new file mode 100644 index 00000000000000..d95fe24bc80bef --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/tlscfg/config_test.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package tlscfg + +import "testing" + +// TODO: +func TestNewClientTLSConfig(t *testing.T) { + +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/README.md b/src/go/collectors/go.d.plugin/pkg/web/README.md new file mode 100644 index 00000000000000..d38740df5d1d5c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/README.md @@ -0,0 +1,98 @@ +<!-- +title: "web" +custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/pkg/web/README.md" +sidebar_label: "web" +learn_status: "Published" +learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages" +--> + +# web + +This package contains HTTP related configurations (`Request`, `Client` and `HTTP` structs) and functions to +create `http.Request` and `http.Client` from them. + +`HTTP` embeds both `Request` and `Client`. + +Every module that collects metrics doing HTTP requests should use `HTTP`. It allows to have same set of user +configurable options across all modules. + +## Configuration options + +HTTP request options: + +- `url`: the URL to access. +- `username`: the username for basic HTTP authentication. +- `password`: the password for basic HTTP authentication. +- `proxy_username`: the username for basic HTTP authentication of a user agent to a proxy server. +- `proxy_password`: the password for basic HTTP authentication of a user agent to a proxy server. +- `body`: the HTTP request body to be sent by the client. +- `method`: the HTTP method (GET, POST, PUT, etc.). +- `headers`: the HTTP request header fields to be sent by the client. + +HTTP client options: + +- `timeout`: the HTTP request time limit. +- `not_follow_redirects`: the policy for handling redirects. +- `proxy_url`: the URL of the proxy to use. +- `tls_skip_verify`: controls whether a client verifies the server's certificate chain and host name. +- `tls_ca`: certificate authority to use when verifying server certificates. +- `tls_cert`: tls certificate to use. +- `tls_key`: tls key to use. + +## Usage + +Just make `HTTP` part of your module configuration. + +```go +package example + +import "github.com/netdata/go.d.plugin/pkg/web" + +type Config struct { + web.HTTP `yaml:",inline"` +} + +type Example struct { + Config `yaml:",inline"` +} + +func (e *Example) Init() bool { + httpReq, err := web.NewHTTPRequest(e.Request) + if err != nil { + // ... + return false + } + + httpClient, err := web.NewHTTPClient(e.Client) + if err != nil { + // ... + return false + } + + // ... + return true +} +``` + +Having `HTTP` embedded your configuration inherits all [configuration options](#configuration-options): + +```yaml +jobs: + - name: name + url: url + username: username + password: password + proxy_url: proxy_url + proxy_username: proxy_username + proxy_password: proxy_password + timeout: 1 + method: GET + body: '{"key": "value"}' + headers: + X-API-Key: key + not_follow_redirects: no + tls_skip_verify: no + tls_ca: path/to/ca.pem + tls_cert: path/to/cert.pem + tls_key: path/to/key.pem +``` diff --git a/src/go/collectors/go.d.plugin/pkg/web/client.go b/src/go/collectors/go.d.plugin/pkg/web/client.go new file mode 100644 index 00000000000000..ae3ecd462cfe2c --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/client.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/url" + + "github.com/netdata/go.d.plugin/pkg/tlscfg" +) + +// ErrRedirectAttempted indicates that a redirect occurred. +var ErrRedirectAttempted = errors.New("redirect") + +// Client is the configuration of the HTTP client. +// This structure is not intended to be used directly as part of a module's configuration. +// Supported configuration file formats: YAML. +type Client struct { + // Timeout specifies a time limit for requests made by this Client. + // Default (zero value) is no timeout. Must be set before http.Client creation. + Timeout Duration `yaml:"timeout"` + + // NotFollowRedirect specifies the policy for handling redirects. + // Default (zero value) is std http package default policy (stop after 10 consecutive requests). + NotFollowRedirect bool `yaml:"not_follow_redirects"` + + // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables + // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL. + ProxyURL string `yaml:"proxy_url"` + + // TLSConfig specifies the TLS configuration. + tlscfg.TLSConfig `yaml:",inline"` +} + +// NewHTTPClient returns a new *http.Client given a Client configuration and an error if any. +func NewHTTPClient(cfg Client) (*http.Client, error) { + tlsConfig, err := tlscfg.NewTLSConfig(cfg.TLSConfig) + if err != nil { + return nil, fmt.Errorf("error on creating TLS config: %v", err) + } + + if cfg.ProxyURL != "" { + if _, err := url.Parse(cfg.ProxyURL); err != nil { + return nil, fmt.Errorf("error on parsing proxy URL '%s': %v", cfg.ProxyURL, err) + } + } + + d := &net.Dialer{Timeout: cfg.Timeout.Duration} + + transport := &http.Transport{ + Proxy: proxyFunc(cfg.ProxyURL), + TLSClientConfig: tlsConfig, + DialContext: d.DialContext, + TLSHandshakeTimeout: cfg.Timeout.Duration, + } + + return &http.Client{ + Timeout: cfg.Timeout.Duration, + Transport: transport, + CheckRedirect: redirectFunc(cfg.NotFollowRedirect), + }, nil +} + +func redirectFunc(notFollowRedirect bool) func(req *http.Request, via []*http.Request) error { + if follow := !notFollowRedirect; follow { + return nil + } + return func(_ *http.Request, _ []*http.Request) error { return ErrRedirectAttempted } +} + +func proxyFunc(rawProxyURL string) func(r *http.Request) (*url.URL, error) { + if rawProxyURL == "" { + return http.ProxyFromEnvironment + } + proxyURL, _ := url.Parse(rawProxyURL) + return http.ProxyURL(proxyURL) +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/client_test.go b/src/go/collectors/go.d.plugin/pkg/web/client_test.go new file mode 100644 index 00000000000000..e11d6ce472e825 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/client_test.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestNewHTTPClient(t *testing.T) { + client, _ := NewHTTPClient(Client{ + Timeout: Duration{Duration: time.Second * 5}, + NotFollowRedirect: true, + ProxyURL: "http://127.0.0.1:3128", + }) + + assert.IsType(t, (*http.Client)(nil), client) + assert.Equal(t, time.Second*5, client.Timeout) + assert.NotNil(t, client.CheckRedirect) +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/doc.go b/src/go/collectors/go.d.plugin/pkg/web/doc.go new file mode 100644 index 00000000000000..4c6d31461b07f7 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/doc.go @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +/* +Package web contains HTTP request and client configurations. +HTTP structure embeds both of them, and it's the only structure that intended to be used as part of a module's configuration. +Every module that uses HTTP requests to collect metrics should use it. +It allows to have same set of user configurable options across all modules. +*/ +package web diff --git a/src/go/collectors/go.d.plugin/pkg/web/doc_test.go b/src/go/collectors/go.d.plugin/pkg/web/doc_test.go new file mode 100644 index 00000000000000..137eed20796815 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/doc_test.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +func ExampleHTTP_usage() { + // Just embed HTTP into your module structure. + // It allows you to have both Request and Client fields in the module configuration file. + type myModule struct { + HTTP `yaml:",inline"` + } + + var m myModule + _, _ = NewHTTPRequest(m.Request) + _, _ = NewHTTPClient(m.Client) +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/duration.go b/src/go/collectors/go.d.plugin/pkg/web/duration.go new file mode 100644 index 00000000000000..ced991f91df4fc --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/duration.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "fmt" + "strconv" + "time" +) + +// Duration is a time.Duration wrapper. +type Duration struct { + Duration time.Duration +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + + if err := unmarshal(&s); err != nil { + return err + } + + if v, err := time.ParseDuration(s); err == nil { + d.Duration = v + return nil + } + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + d.Duration = time.Duration(v) * time.Second + return nil + } + if v, err := strconv.ParseFloat(s, 64); err == nil { + d.Duration = time.Duration(v) * time.Second + return nil + } + return fmt.Errorf("unparsable duration format '%s'", s) +} + +func (d Duration) String() string { return d.Duration.String() } diff --git a/src/go/collectors/go.d.plugin/pkg/web/duration_test.go b/src/go/collectors/go.d.plugin/pkg/web/duration_test.go new file mode 100644 index 00000000000000..01ee19dd2f9294 --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/duration_test.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" +) + +func TestDuration_UnmarshalYAML(t *testing.T) { + var d Duration + values := [][]byte{ + []byte("100ms"), // duration + []byte("3s300ms"), // duration + []byte("3"), // int + []byte("3.3"), // float + } + + for _, v := range values { + assert.NoError(t, yaml.Unmarshal(v, &d)) + } +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/request.go b/src/go/collectors/go.d.plugin/pkg/web/request.go new file mode 100644 index 00000000000000..5740da6d1c30fe --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/request.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "encoding/base64" + "io" + "net/http" + "strings" +) + +// Request is the configuration of the HTTP request. +// This structure is not intended to be used directly as part of a module's configuration. +// Supported configuration file formats: YAML. +type Request struct { + // URL specifies the URL to access. + URL string `yaml:"url"` + + // Body specifies the HTTP request body to be sent by the client. + Body string `yaml:"body"` + + // Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET. + Method string `yaml:"method"` + + // Headers specifies the HTTP request header fields to be sent by the client. + Headers map[string]string `yaml:"headers"` + + // Username specifies the username for basic HTTP authentication. + Username string `yaml:"username"` + + // Password specifies the password for basic HTTP authentication. + Password string `yaml:"password"` + + // ProxyUsername specifies the username for basic HTTP authentication. + // It is used to authenticate a user agent to a proxy server. + ProxyUsername string `yaml:"proxy_username"` + + // ProxyPassword specifies the password for basic HTTP authentication. + // It is used to authenticate a user agent to a proxy server. + ProxyPassword string `yaml:"proxy_password"` +} + +// Copy makes a full copy of the Request. +func (r Request) Copy() Request { + headers := make(map[string]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v + } + r.Headers = headers + return r +} + +// NewHTTPRequest returns a new *http.Requests given a Request configuration and an error if any. +func NewHTTPRequest(cfg Request) (*http.Request, error) { + var body io.Reader + if cfg.Body != "" { + body = strings.NewReader(cfg.Body) + } + + req, err := http.NewRequest(cfg.Method, cfg.URL, body) + if err != nil { + return nil, err + } + + if cfg.Username != "" || cfg.Password != "" { + req.SetBasicAuth(cfg.Username, cfg.Password) + } + + if cfg.ProxyUsername != "" && cfg.ProxyPassword != "" { + basicAuth := base64.StdEncoding.EncodeToString([]byte(cfg.ProxyUsername + ":" + cfg.ProxyPassword)) + req.Header.Set("Proxy-Authorization", "Basic "+basicAuth) + } + + for k, v := range cfg.Headers { + switch k { + case "host", "Host": + req.Host = v + default: + req.Header.Set(k, v) + } + } + + return req, nil +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/request_test.go b/src/go/collectors/go.d.plugin/pkg/web/request_test.go new file mode 100644 index 00000000000000..284cccb93ea76f --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/request_test.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +import ( + "encoding/base64" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRequest_Copy(t *testing.T) { + tests := map[string]struct { + orig Request + change func(req *Request) + }{ + "change headers": { + orig: Request{ + URL: "http://127.0.0.1:19999/api/v1/info", + Method: "POST", + Headers: map[string]string{ + "X-Api-Key": "secret", + }, + Username: "username", + Password: "password", + ProxyUsername: "proxy_username", + ProxyPassword: "proxy_password", + }, + change: func(req *Request) { + req.Headers["header_key"] = "header_value" + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + reqCopy := test.orig.Copy() + + assert.Equal(t, test.orig, reqCopy) + test.change(&reqCopy) + assert.NotEqual(t, test.orig, reqCopy) + }) + } +} + +func TestNewHTTPRequest(t *testing.T) { + tests := map[string]struct { + req Request + wantErr bool + }{ + "test url": { + req: Request{ + URL: "http://127.0.0.1:19999/api/v1/info", + }, + wantErr: false, + }, + "test body": { + req: Request{ + Body: "content", + }, + wantErr: false, + }, + "test method": { + req: Request{ + Method: "POST", + }, + wantErr: false, + }, + "test headers": { + req: Request{ + Headers: map[string]string{ + "X-Api-Key": "secret", + }, + }, + wantErr: false, + }, + "test special headers (host)": { + req: Request{ + Headers: map[string]string{ + "host": "Host", + }, + }, + wantErr: false, + }, + "test special headers (Host)": { + req: Request{ + Headers: map[string]string{ + "Host": "Host", + }, + }, + wantErr: false, + }, + "test username and password": { + req: Request{ + Username: "username", + Password: "password", + }, + wantErr: false, + }, + "test proxy username and proxy password": { + req: Request{ + ProxyUsername: "proxy_username", + ProxyPassword: "proxy_password", + }, + wantErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + httpReq, err := NewHTTPRequest(test.req) + + if test.wantErr { + assert.Error(t, err) + assert.Nil(t, httpReq) + return + } + + require.NoError(t, err) + require.NotNil(t, httpReq) + require.IsType(t, (*http.Request)(nil), httpReq) + + assert.Equal(t, test.req.URL, httpReq.URL.String()) + + if test.req.Body != "" { + assert.NotNil(t, httpReq.Body) + } + + if test.req.Username != "" || test.req.Password != "" { + user, pass, ok := httpReq.BasicAuth() + assert.True(t, ok) + assert.Equal(t, test.req.Username, user) + assert.Equal(t, test.req.Password, pass) + } + + if test.req.Method != "" { + assert.Equal(t, test.req.Method, httpReq.Method) + } + + if test.req.ProxyUsername != "" || test.req.ProxyPassword != "" { + user, pass, ok := parseBasicAuth(httpReq.Header.Get("Proxy-Authorization")) + assert.True(t, ok) + assert.Equal(t, test.req.ProxyUsername, user) + assert.Equal(t, test.req.ProxyPassword, pass) + } + + for k, v := range test.req.Headers { + switch k { + case "host", "Host": + assert.Equal(t, httpReq.Host, v) + default: + assert.Equal(t, v, httpReq.Header.Get(k)) + } + } + }) + } +} + +func parseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { + return "", "", false + } + + decoded, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return "", "", false + } + + decodedStr := string(decoded) + idx := strings.IndexByte(decodedStr, ':') + if idx < 0 { + return "", "", false + } + + return decodedStr[:idx], decodedStr[idx+1:], true +} diff --git a/src/go/collectors/go.d.plugin/pkg/web/web.go b/src/go/collectors/go.d.plugin/pkg/web/web.go new file mode 100644 index 00000000000000..e2a7098ba8505f --- /dev/null +++ b/src/go/collectors/go.d.plugin/pkg/web/web.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package web + +// HTTP is a struct with embedded Request and Client. +// This structure intended to be part of the module configuration. +// Supported configuration file formats: YAML. +type HTTP struct { + Request `yaml:",inline"` + Client `yaml:",inline"` +} diff --git a/src/health/REFERENCE.md b/src/health/REFERENCE.md index 40ba6feab882a0..5abcea15aedac8 100644 --- a/src/health/REFERENCE.md +++ b/src/health/REFERENCE.md @@ -72,7 +72,7 @@ Save the file and [reload Netdata's health configuration](#reload-health-configu ## Disable or silence alerts Alerts and notifications can be disabled permanently via configuration changes, or temporarily, via the -[health management API](https://github.com/netdata/netdata/blob/master/web/api/health/README.md). The +[health management API](https://github.com/netdata/netdata/blob/master/src/web/api/health/README.md). The available options are described below. ### Disable all alerts @@ -102,7 +102,7 @@ This action requires that you [reload Netdata's health configuration](#reload-he When you need to frequently disable all or some alerts from triggering during certain times (for instance when running backups) you can use the -[health management API](https://github.com/netdata/netdata/blob/master/web/api/health/README.md). +[health management API](https://github.com/netdata/netdata/blob/master/src/web/api/health/README.md). The API allows you to issue commands to control the health engine's behavior without changing configuration, or restarting the agent. @@ -110,7 +110,7 @@ or restarting the agent. If you want health checks to keep running and alerts to keep getting triggered, but notifications to be suppressed temporarily, you can use the -[health management API](https://github.com/netdata/netdata/blob/master/web/api/health/README.md). +[health management API](https://github.com/netdata/netdata/blob/master/src/web/api/health/README.md). The API allows you to issue commands to control the health engine's behavior without changing configuration, or restarting the agent. @@ -447,9 +447,9 @@ The format is: lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] [foreach DIMENSIONS] ``` -The full [database query API](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) is supported. In short: +The full [database query API](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md) is supported. In short: -- `METHOD` is one of the available [grouping methods](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md#grouping-methods) such as `average`, `min`, `max` etc. +- `METHOD` is one of the available [grouping methods](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md#grouping-methods) such as `average`, `min`, `max` etc. This is required. - `AFTER` is a relative number of seconds, but it also accepts a single letter for changing @@ -464,7 +464,7 @@ The full [database query API](https://github.com/netdata/netdata/blob/master/web above too). - `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`, - `match-ids`, `match-names`. Check the [badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md) documentation for more info. + `match-ids`, `match-names`. Check the [badges](https://github.com/netdata/netdata/blob/master/src/web/api/badges/README.md) documentation for more info. - `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions @@ -860,14 +860,14 @@ You can find all the variables that can be used for a given chart, using Agent dashboard. For example, [variables for the `system.cpu` chart of the registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu). -> If you don't know how to find the CHART_NAME, you can read about it [here](https://github.com/netdata/netdata/blob/master/web/README.md#charts). +> If you don't know how to find the CHART_NAME, you can read about it [here](https://github.com/netdata/netdata/blob/master/src/web/README.md#charts). Netdata supports 3 internal indexes for variables that will be used in health monitoring. <details><summary>The variables below can be used in both chart alerts and context templates.</summary> Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in -templates for charts belonging to a given [context](https://github.com/netdata/netdata/blob/master/web/README.md#contexts). The reason is that all charts of a given +templates for charts belonging to a given [context](https://github.com/netdata/netdata/blob/master/src/web/README.md#contexts). The reason is that all charts of a given context are essentially identical, with the only difference being the family that identifies a particular hardware or software instance. </details> diff --git a/src/health/notifications/alarm-notify.sh.in b/src/health/notifications/alarm-notify.sh.in index 9d95c21dc3a0cf..b1272f73ec820d 100755 --- a/src/health/notifications/alarm-notify.sh.in +++ b/src/health/notifications/alarm-notify.sh.in @@ -2399,7 +2399,7 @@ send_ntfy() { msg="${host} ${status_message}: ${alarm} - ${info}" httpcode=$(docurl -X POST \ "${ntfy_auth_header[@]}" \ - -H "Icon: https://raw.githubusercontent.com/netdata/netdata/master/web/gui/dashboard/images/favicon-196x196.png" \ + -H "Icon: https://raw.githubusercontent.com/netdata/netdata/master/src/web/gui/dashboard/images/favicon-196x196.png" \ -H "Title: ${host}: ${name//_/ }" \ -H "Tags: ${emoji}" \ -H "Priority: ${priority}" \ diff --git a/src/libnetdata/libnetdata.h b/src/libnetdata/libnetdata.h index 8d79215224d940..4bade9b5bb5228 100644 --- a/src/libnetdata/libnetdata.h +++ b/src/libnetdata/libnetdata.h @@ -7,9 +7,7 @@ extern "C" { # endif -#ifdef HAVE_CONFIG_H -#include <config.h> -#endif +#include "config.h" #ifdef ENABLE_OPENSSL #define ENABLE_HTTPS 1 @@ -176,10 +174,8 @@ extern "C" { #include <sys/sysmacros.h> #endif -#ifdef STORAGE_WITH_MATH #include <math.h> #include <float.h> -#endif #if defined(HAVE_INTTYPES_H) #include <inttypes.h> diff --git a/src/libnetdata/storage_number/storage_number.c b/src/libnetdata/storage_number/storage_number.c index 6468951bd07331..89a67a532cfae3 100644 --- a/src/libnetdata/storage_number/storage_number.c +++ b/src/libnetdata/storage_number/storage_number.c @@ -147,13 +147,7 @@ storage_number pack_storage_number(NETDATA_DOUBLE value, SN_FLAGS flags) { r += (m << 27); // the divider m } -#ifdef STORAGE_WITH_MATH - // without this there are rounding problems - // example: 0.9 becomes 0.89 r += lrint((double) n); -#else - r += (storage_number)n; -#endif return r; } @@ -174,60 +168,3 @@ __attribute__((constructor)) void initialize_lut(void) { unpack_storage_number_lut10x[3 * 8 + i] = pow(100, i); // exp = 1 } } - -/* -int print_netdata_double(char *str, NETDATA_DOUBLE value) -{ - char *wstr = str; - - int sign = (value < 0) ? 1 : 0; - if(sign) value = -value; - -#ifdef STORAGE_WITH_MATH - // without llrintl() there are rounding problems - // for example 0.9 becomes 0.89 - unsigned long long uvalue = (unsigned long long int) llrintl(value * (NETDATA_DOUBLE)100000); -#else - unsigned long long uvalue = value * (NETDATA_DOUBLE)100000; -#endif - - wstr = print_number_llu_r_smart(str, uvalue); - - // make sure we have 6 bytes at least - while((wstr - str) < 6) *wstr++ = '0'; - - // put the sign back - if(sign) *wstr++ = '-'; - - // reverse it - char *begin = str, *end = --wstr, aux; - while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; - // wstr--; - // strreverse(str, wstr); - - // remove trailing zeros - int decimal = 5; - while(decimal > 0 && *wstr == '0') { - *wstr-- = '\0'; - decimal--; - } - - // terminate it, one position to the right - // to let space for a dot - wstr[2] = '\0'; - - // make space for the dot - int i; - for(i = 0; i < decimal ;i++) { - wstr[1] = wstr[0]; - wstr--; - } - - // put the dot - if(wstr[2] == '\0') { wstr[1] = '\0'; decimal--; } - else wstr[1] = '.'; - - // return the buffer length - return (int) ((wstr - str) + 2 + decimal ); -} -*/ diff --git a/src/streaming/README.md b/src/streaming/README.md index fe2d7c77f91a65..1c019ac569719b 100644 --- a/src/streaming/README.md +++ b/src/streaming/README.md @@ -153,7 +153,7 @@ cache size` and `dbengine multihost disk space` settings in the `[global]` secti | `[global]` section | | | | `memory mode` | `dbengine` | Determines the [database type](https://github.com/netdata/netdata/blob/master/src/database/README.md) to be used on that node. Other options settings include `none`, and `ram`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. | | `[web]` section | | | -| `mode` | `static-threaded` | Determines the [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | +| `mode` | `static-threaded` | Determines the [web server](https://github.com/netdata/netdata/blob/master/src/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. | | `accept a streaming request every seconds` | `0` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. | ### Basic use cases @@ -287,7 +287,7 @@ Same thing applies with the `[MACHINE_GUID]` configuration. ### Securing streaming with TLS/SSL Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS -support](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you +support](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you need to instruct the child to use TLS/SSL as well. On the child's `stream.conf`, configure the destination as follows: ``` diff --git a/web/README.md b/src/web/README.md similarity index 73% rename from web/README.md rename to src/web/README.md index 0e6c90fc847349..a01adfb1290a5a 100644 --- a/web/README.md +++ b/src/web/README.md @@ -8,17 +8,17 @@ team and the community, but you can also customize them yourself. There are two primary ways to view Netdata's dashboards on the agent: -1. The [local Agent dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md) that comes pre-configured with every Netdata installation. You can +1. The [local Agent dashboard](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md) that comes pre-configured with every Netdata installation. You can see it at `http://NODE:19999`, replacing `NODE` with `localhost`, the hostname of your node, or its IP address. You can customize the contents and colors of the standard dashboard [using - JavaScript](https://github.com/netdata/netdata/blob/master/web/gui/README.md#customizing-the-local-dashboard). + JavaScript](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md#customizing-the-local-dashboard). 2. The [`dashboard.js` JavaScript library](#dashboardjs), which helps you - [customize the standard dashboards](https://github.com/netdata/netdata/blob/master/web/gui/README.md#customizing-the-local-dashboard) - using JavaScript, or create entirely new [custom dashboards](https://github.com/netdata/netdata/blob/master/web/gui/custom/README.md) or - [Atlassian Confluence dashboards](https://github.com/netdata/netdata/blob/master/web/gui/confluence/README.md). + [customize the standard dashboards](https://github.com/netdata/netdata/blob/master/src/web/gui/README.md#customizing-the-local-dashboard) + using JavaScript, or create entirely new [custom dashboards](https://github.com/netdata/netdata/blob/master/src/web/gui/custom/README.md) or + [Atlassian Confluence dashboards](https://github.com/netdata/netdata/blob/master/src/web/gui/confluence/README.md). -You can also view all the data Netdata collects through the [REST API v1](https://github.com/netdata/netdata/blob/master/web/api/README.md#netdata-rest-api). +You can also view all the data Netdata collects through the [REST API v1](https://github.com/netdata/netdata/blob/master/src/web/api/README.md#netdata-rest-api). ## dashboard.js @@ -27,7 +27,7 @@ all the charts and other visualizations that appear on any Netdata dashboard. You need to put `dashboard.js` on any HTML page that's going to render Netdata charts. -The [custom dashboards documentation](https://github.com/netdata/netdata/blob/master/web/gui/custom/README.md) contains examples of such +The [custom dashboards documentation](https://github.com/netdata/netdata/blob/master/src/web/gui/custom/README.md) contains examples of such custom HTML pages. ### Generating dashboard.js diff --git a/web/api/README.md b/src/web/api/README.md similarity index 83% rename from web/api/README.md rename to src/web/api/README.md index 237394a88cfb4f..7ad1a7ad42bb33 100644 --- a/web/api/README.md +++ b/src/web/api/README.md @@ -2,9 +2,9 @@ ## Netdata agent REST API -The complete documentation of the Netdata agent's REST API is documented in the OpenAPI format [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml). +The complete documentation of the Netdata agent's REST API is documented in the OpenAPI format [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/src/web/api/netdata-swagger.yaml). -You can explore it using the **[Swagger UI](https://learn.netdata.cloud/api)**, or the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**. +You can explore it using the **[Swagger UI](https://learn.netdata.cloud/api)**, or the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/src/web/api/netdata-swagger.yaml)**. ## Netdata cloud API diff --git a/web/api/badges/README.md b/src/web/api/badges/README.md similarity index 99% rename from web/api/badges/README.md rename to src/web/api/badges/README.md index d204ddf146edfe..09e14556d340dd 100644 --- a/web/api/badges/README.md +++ b/src/web/api/badges/README.md @@ -1,6 +1,6 @@ <!-- title: "Netdata badges" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/badges/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/badges/README.md sidebar_label: "Netdata badges" learn_status: "Published" learn_topic_type: "References" diff --git a/web/api/badges/web_buffer_svg.c b/src/web/api/badges/web_buffer_svg.c similarity index 100% rename from web/api/badges/web_buffer_svg.c rename to src/web/api/badges/web_buffer_svg.c diff --git a/web/api/badges/web_buffer_svg.h b/src/web/api/badges/web_buffer_svg.h similarity index 100% rename from web/api/badges/web_buffer_svg.h rename to src/web/api/badges/web_buffer_svg.h diff --git a/web/api/exporters/README.md b/src/web/api/exporters/README.md similarity index 87% rename from web/api/exporters/README.md rename to src/web/api/exporters/README.md index 4be56769100a90..206937967a339a 100644 --- a/web/api/exporters/README.md +++ b/src/web/api/exporters/README.md @@ -1,6 +1,6 @@ <!-- title: "Exporters" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/exporters/README.md sidebar_label: "Exporters" learn_status: "Published" learn_topic_type: "References" diff --git a/web/api/exporters/allmetrics.c b/src/web/api/exporters/allmetrics.c similarity index 100% rename from web/api/exporters/allmetrics.c rename to src/web/api/exporters/allmetrics.c diff --git a/web/api/exporters/allmetrics.h b/src/web/api/exporters/allmetrics.h similarity index 100% rename from web/api/exporters/allmetrics.h rename to src/web/api/exporters/allmetrics.h diff --git a/web/api/exporters/prometheus/README.md b/src/web/api/exporters/prometheus/README.md similarity index 90% rename from web/api/exporters/prometheus/README.md rename to src/web/api/exporters/prometheus/README.md index 50cadb3cdeb503..f4bb90a5695c1b 100644 --- a/web/api/exporters/prometheus/README.md +++ b/src/web/api/exporters/prometheus/README.md @@ -1,6 +1,6 @@ <!-- title: "Prometheus exporter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/prometheus/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/exporters/prometheus/README.md sidebar_label: "Prometheus exporter" learn_status: "Published" learn_topic_type: "References" diff --git a/web/api/exporters/shell/README.md b/src/web/api/exporters/shell/README.md similarity index 97% rename from web/api/exporters/shell/README.md rename to src/web/api/exporters/shell/README.md index 7e28829a7c62c9..86b774f1b29c5e 100644 --- a/web/api/exporters/shell/README.md +++ b/src/web/api/exporters/shell/README.md @@ -1,6 +1,6 @@ <!-- title: "Shell exporter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/shell/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/exporters/shell/README.md sidebar_label: "Shell exporter" learn_status: "Published" learn_topic_type: "References" diff --git a/web/api/exporters/shell/allmetrics_shell.c b/src/web/api/exporters/shell/allmetrics_shell.c similarity index 100% rename from web/api/exporters/shell/allmetrics_shell.c rename to src/web/api/exporters/shell/allmetrics_shell.c diff --git a/web/api/exporters/shell/allmetrics_shell.h b/src/web/api/exporters/shell/allmetrics_shell.h similarity index 100% rename from web/api/exporters/shell/allmetrics_shell.h rename to src/web/api/exporters/shell/allmetrics_shell.h diff --git a/web/api/formatters/README.md b/src/web/api/formatters/README.md similarity index 67% rename from web/api/formatters/README.md rename to src/web/api/formatters/README.md index ddc70d90fbed0e..df1ae7867d17ca 100644 --- a/web/api/formatters/README.md +++ b/src/web/api/formatters/README.md @@ -1,6 +1,6 @@ <!-- title: "Query formatting" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/formatters/README.md sidebar_label: "Query formatting" learn_status: "Published" learn_topic_type: "References" @@ -16,18 +16,18 @@ The following formats are supported: | format|module|content type|description| |:----:|:----:|:----------:|:----------| -| `array`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|application/json|a JSON array| -| `csv`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines| -| `csvjsonarray`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)| -| `datasource`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback| -| `datatable`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a Google `datatable`| -| `html`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/html|an html table| -| `json`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a JSON object| -| `jsonp`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback| -| `markdown`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a markdown table| -| `ssv`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values| -| `ssvcomma`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values| -| `tsv`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)| +| `array`|[ssv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/ssv/README.md)|application/json|a JSON array| +| `csv`|[csv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines| +| `csvjsonarray`|[csv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)| +| `datasource`|[json](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback| +| `datatable`|[json](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/json/README.md)|application/json|a Google `datatable`| +| `html`|[csv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/csv/README.md)|text/html|an html table| +| `json`|[json](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/json/README.md)|application/json|a JSON object| +| `jsonp`|[json](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback| +| `markdown`|[csv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/csv/README.md)|text/plain|a markdown table| +| `ssv`|[ssv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values| +| `ssvcomma`|[ssv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values| +| `tsv`|[csv](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)| For examples of each format, check the relative module documentation. diff --git a/web/api/formatters/charts2json.c b/src/web/api/formatters/charts2json.c similarity index 100% rename from web/api/formatters/charts2json.c rename to src/web/api/formatters/charts2json.c diff --git a/web/api/formatters/charts2json.h b/src/web/api/formatters/charts2json.h similarity index 100% rename from web/api/formatters/charts2json.h rename to src/web/api/formatters/charts2json.h diff --git a/web/api/formatters/csv/README.md b/src/web/api/formatters/csv/README.md similarity index 97% rename from web/api/formatters/csv/README.md rename to src/web/api/formatters/csv/README.md index 4585710b4b1494..ee5e3666f84084 100644 --- a/web/api/formatters/csv/README.md +++ b/src/web/api/formatters/csv/README.md @@ -1,6 +1,6 @@ <!-- title: "CSV formatter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/formatters/csv/README.md sidebar_label: "CSV formatter" learn_status: "Published" learn_topic_type: "References" @@ -9,7 +9,7 @@ learn_rel_path: "Developers/Web/Api/Formatters" # CSV formatter -The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) in the following formats: +The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md) in the following formats: | format|content type|description| | :----:|:----------:|:----------| diff --git a/web/api/formatters/csv/csv.c b/src/web/api/formatters/csv/csv.c similarity index 100% rename from web/api/formatters/csv/csv.c rename to src/web/api/formatters/csv/csv.c diff --git a/web/api/formatters/csv/csv.h b/src/web/api/formatters/csv/csv.h similarity index 100% rename from web/api/formatters/csv/csv.h rename to src/web/api/formatters/csv/csv.h diff --git a/web/api/formatters/json/README.md b/src/web/api/formatters/json/README.md similarity index 97% rename from web/api/formatters/json/README.md rename to src/web/api/formatters/json/README.md index bc70aec0295563..b0037cb2e6467b 100644 --- a/web/api/formatters/json/README.md +++ b/src/web/api/formatters/json/README.md @@ -1,6 +1,6 @@ <!-- title: "JSON formatter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/formatters/json/README.md sidebar_label: "JSON formatter" learn_status: "Published" learn_topic_type: "References" @@ -9,7 +9,7 @@ learn_rel_path: "Developers/Web/Api/Formatters" # JSON formatter -The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) in the following formats: +The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md) in the following formats: | format | content type | description| |:----:|:----------:|:----------| diff --git a/web/api/formatters/json/json.c b/src/web/api/formatters/json/json.c similarity index 100% rename from web/api/formatters/json/json.c rename to src/web/api/formatters/json/json.c diff --git a/web/api/formatters/json/json.h b/src/web/api/formatters/json/json.h similarity index 100% rename from web/api/formatters/json/json.h rename to src/web/api/formatters/json/json.h diff --git a/web/api/formatters/json_wrapper.c b/src/web/api/formatters/json_wrapper.c similarity index 100% rename from web/api/formatters/json_wrapper.c rename to src/web/api/formatters/json_wrapper.c diff --git a/web/api/formatters/json_wrapper.h b/src/web/api/formatters/json_wrapper.h similarity index 100% rename from web/api/formatters/json_wrapper.h rename to src/web/api/formatters/json_wrapper.h diff --git a/web/api/formatters/rrd2json.c b/src/web/api/formatters/rrd2json.c similarity index 100% rename from web/api/formatters/rrd2json.c rename to src/web/api/formatters/rrd2json.c diff --git a/web/api/formatters/rrd2json.h b/src/web/api/formatters/rrd2json.h similarity index 100% rename from web/api/formatters/rrd2json.h rename to src/web/api/formatters/rrd2json.h diff --git a/web/api/formatters/rrdset2json.c b/src/web/api/formatters/rrdset2json.c similarity index 100% rename from web/api/formatters/rrdset2json.c rename to src/web/api/formatters/rrdset2json.c diff --git a/web/api/formatters/rrdset2json.h b/src/web/api/formatters/rrdset2json.h similarity index 100% rename from web/api/formatters/rrdset2json.h rename to src/web/api/formatters/rrdset2json.h diff --git a/web/api/formatters/ssv/README.md b/src/web/api/formatters/ssv/README.md similarity index 95% rename from web/api/formatters/ssv/README.md rename to src/web/api/formatters/ssv/README.md index 434d567212c951..2e9dd3886abfc3 100644 --- a/web/api/formatters/ssv/README.md +++ b/src/web/api/formatters/ssv/README.md @@ -1,6 +1,6 @@ <!-- title: "SSV formatter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/formatters/ssv/README.md sidebar_label: "SSV formatter" learn_status: "Published" learn_topic_type: "References" @@ -9,7 +9,7 @@ learn_rel_path: "Developers/Web/Api/Formatters" # SSV formatter -The SSV formatter sums all dimensions in [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) +The SSV formatter sums all dimensions in [results of database queries](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md) to a single value and returns a list of such values showing how it changes through time. It supports the following formats: diff --git a/web/api/formatters/ssv/ssv.c b/src/web/api/formatters/ssv/ssv.c similarity index 100% rename from web/api/formatters/ssv/ssv.c rename to src/web/api/formatters/ssv/ssv.c diff --git a/web/api/formatters/ssv/ssv.h b/src/web/api/formatters/ssv/ssv.h similarity index 100% rename from web/api/formatters/ssv/ssv.h rename to src/web/api/formatters/ssv/ssv.h diff --git a/web/api/formatters/value/README.md b/src/web/api/formatters/value/README.md similarity index 84% rename from web/api/formatters/value/README.md rename to src/web/api/formatters/value/README.md index ed9345552641b8..3599a836e6e6dd 100644 --- a/web/api/formatters/value/README.md +++ b/src/web/api/formatters/value/README.md @@ -1,6 +1,6 @@ <!-- title: "Value formatter" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/formatters/value/README.md sidebar_label: "Value formatter" learn_status: "Published" learn_topic_type: "References" @@ -9,7 +9,7 @@ learn_rel_path: "Developers/Web/Api/Formatters" # Value formatter -The Value formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) as a single value. +The Value formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/src/web/api/queries/README.md) as a single value. To calculate the single value to be returned, it sums the values of all dimensions. @@ -22,7 +22,7 @@ The Value formatter respects the following API `&options=`: | `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)| The Value formatter is not exposed by the API by itself. -Instead it is used by the [`ssv`](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md) formatter +Instead it is used by the [`ssv`](https://github.com/netdata/netdata/blob/master/src/web/api/formatters/ssv/README.md) formatter and [health monitoring queries](https://github.com/netdata/netdata/blob/master/src/health/README.md). diff --git a/web/api/formatters/value/value.c b/src/web/api/formatters/value/value.c similarity index 100% rename from web/api/formatters/value/value.c rename to src/web/api/formatters/value/value.c diff --git a/web/api/formatters/value/value.h b/src/web/api/formatters/value/value.h similarity index 100% rename from web/api/formatters/value/value.h rename to src/web/api/formatters/value/value.h diff --git a/web/api/health/README.md b/src/web/api/health/README.md similarity index 98% rename from web/api/health/README.md rename to src/web/api/health/README.md index abe3f871ebcfda..7e818c68fc5b50 100644 --- a/web/api/health/README.md +++ b/src/web/api/health/README.md @@ -1,7 +1,7 @@ <!-- title: "Health API Calls" date: 2020-04-27 -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/health/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/health/README.md sidebar_label: "Health API Calls" learn_status: "Published" learn_topic_type: "References" @@ -76,7 +76,7 @@ You can access the API via GET requests, by adding the bearer token to an `Autho curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken" ``` -By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) for more information. +By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#access-lists) for more information. The command `RESET` just returns Netdata to the default operation, with all health checks and notifications enabled. If you've configured and entered your token correctly, you should see the plain text response `All health checks and notifications are enabled`. diff --git a/web/api/http_auth.c b/src/web/api/http_auth.c similarity index 100% rename from web/api/http_auth.c rename to src/web/api/http_auth.c diff --git a/web/api/http_auth.h b/src/web/api/http_auth.h similarity index 100% rename from web/api/http_auth.h rename to src/web/api/http_auth.h diff --git a/web/api/http_header.c b/src/web/api/http_header.c similarity index 100% rename from web/api/http_header.c rename to src/web/api/http_header.c diff --git a/web/api/http_header.h b/src/web/api/http_header.h similarity index 100% rename from web/api/http_header.h rename to src/web/api/http_header.h diff --git a/src/web/api/ilove/README.md b/src/web/api/ilove/README.md new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/web/api/ilove/ilove.c b/src/web/api/ilove/ilove.c similarity index 100% rename from web/api/ilove/ilove.c rename to src/web/api/ilove/ilove.c diff --git a/web/api/ilove/ilove.h b/src/web/api/ilove/ilove.h similarity index 100% rename from web/api/ilove/ilove.h rename to src/web/api/ilove/ilove.h diff --git a/web/api/ilove/measure-text.js b/src/web/api/ilove/measure-text.js similarity index 100% rename from web/api/ilove/measure-text.js rename to src/web/api/ilove/measure-text.js diff --git a/web/api/netdata-swagger.json b/src/web/api/netdata-swagger.json similarity index 100% rename from web/api/netdata-swagger.json rename to src/web/api/netdata-swagger.json diff --git a/web/api/netdata-swagger.yaml b/src/web/api/netdata-swagger.yaml similarity index 100% rename from web/api/netdata-swagger.yaml rename to src/web/api/netdata-swagger.yaml diff --git a/web/api/queries/README.md b/src/web/api/queries/README.md similarity index 100% rename from web/api/queries/README.md rename to src/web/api/queries/README.md diff --git a/web/api/queries/average/README.md b/src/web/api/queries/average/README.md similarity index 97% rename from web/api/queries/average/README.md rename to src/web/api/queries/average/README.md index 3a9c539349e81c..1ad78bee5512e4 100644 --- a/web/api/queries/average/README.md +++ b/src/web/api/queries/average/README.md @@ -1,7 +1,7 @@ <!-- title: "Average or Mean" sidebar_label: "Average or Mean" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/average/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/average/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/average/average.c b/src/web/api/queries/average/average.c similarity index 100% rename from web/api/queries/average/average.c rename to src/web/api/queries/average/average.c diff --git a/web/api/queries/average/average.h b/src/web/api/queries/average/average.h similarity index 100% rename from web/api/queries/average/average.h rename to src/web/api/queries/average/average.h diff --git a/web/api/queries/countif/README.md b/src/web/api/queries/countif/README.md similarity index 96% rename from web/api/queries/countif/README.md rename to src/web/api/queries/countif/README.md index 4004e7a27986a8..a40535395ec4da 100644 --- a/web/api/queries/countif/README.md +++ b/src/web/api/queries/countif/README.md @@ -1,7 +1,7 @@ <!-- title: "CountIf" sidebar_label: "CountIf" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/countif/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/countif/countif.c b/src/web/api/queries/countif/countif.c similarity index 100% rename from web/api/queries/countif/countif.c rename to src/web/api/queries/countif/countif.c diff --git a/web/api/queries/countif/countif.h b/src/web/api/queries/countif/countif.h similarity index 100% rename from web/api/queries/countif/countif.h rename to src/web/api/queries/countif/countif.h diff --git a/web/api/queries/des/README.md b/src/web/api/queries/des/README.md similarity index 98% rename from web/api/queries/des/README.md rename to src/web/api/queries/des/README.md index 0cc1a918e974b6..6dc19e732a55bc 100644 --- a/web/api/queries/des/README.md +++ b/src/web/api/queries/des/README.md @@ -1,7 +1,7 @@ <!-- title: "double exponential smoothing" sidebar_label: "double exponential smoothing" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/des/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/des/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/des/des.c b/src/web/api/queries/des/des.c similarity index 100% rename from web/api/queries/des/des.c rename to src/web/api/queries/des/des.c diff --git a/web/api/queries/des/des.h b/src/web/api/queries/des/des.h similarity index 100% rename from web/api/queries/des/des.h rename to src/web/api/queries/des/des.h diff --git a/web/api/queries/incremental_sum/README.md b/src/web/api/queries/incremental_sum/README.md similarity index 97% rename from web/api/queries/incremental_sum/README.md rename to src/web/api/queries/incremental_sum/README.md index c882acba9734df..6f02abe7d04df3 100644 --- a/web/api/queries/incremental_sum/README.md +++ b/src/web/api/queries/incremental_sum/README.md @@ -1,7 +1,7 @@ <!-- title: "Incremental Sum (`incremental_sum`)" sidebar_label: "Incremental Sum (`incremental_sum`)" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/incremental_sum/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/incremental_sum/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/src/web/api/queries/incremental_sum/incremental_sum.c similarity index 100% rename from web/api/queries/incremental_sum/incremental_sum.c rename to src/web/api/queries/incremental_sum/incremental_sum.c diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/src/web/api/queries/incremental_sum/incremental_sum.h similarity index 100% rename from web/api/queries/incremental_sum/incremental_sum.h rename to src/web/api/queries/incremental_sum/incremental_sum.h diff --git a/web/api/queries/max/README.md b/src/web/api/queries/max/README.md similarity index 97% rename from web/api/queries/max/README.md rename to src/web/api/queries/max/README.md index e7ad5446ded9de..ae634e05e0ffe0 100644 --- a/web/api/queries/max/README.md +++ b/src/web/api/queries/max/README.md @@ -1,7 +1,7 @@ <!-- title: "Max" sidebar_label: "Max" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/max/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/max/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/max/max.c b/src/web/api/queries/max/max.c similarity index 100% rename from web/api/queries/max/max.c rename to src/web/api/queries/max/max.c diff --git a/web/api/queries/max/max.h b/src/web/api/queries/max/max.h similarity index 100% rename from web/api/queries/max/max.h rename to src/web/api/queries/max/max.h diff --git a/web/api/queries/median/README.md b/src/web/api/queries/median/README.md similarity index 98% rename from web/api/queries/median/README.md rename to src/web/api/queries/median/README.md index f1fb3a61ca1bff..e6f6c04e7caaa3 100644 --- a/web/api/queries/median/README.md +++ b/src/web/api/queries/median/README.md @@ -2,7 +2,7 @@ title: "Median" sidebar_label: "Median" description: "Use median in API queries and health entities to find the 'middle' value from a sample, eliminating any unwanted spikes in the returned metrics." -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/median/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/median/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/median/median.c b/src/web/api/queries/median/median.c similarity index 100% rename from web/api/queries/median/median.c rename to src/web/api/queries/median/median.c diff --git a/web/api/queries/median/median.h b/src/web/api/queries/median/median.h similarity index 100% rename from web/api/queries/median/median.h rename to src/web/api/queries/median/median.h diff --git a/web/api/queries/min/README.md b/src/web/api/queries/min/README.md similarity index 97% rename from web/api/queries/min/README.md rename to src/web/api/queries/min/README.md index 67f3326edf98f3..35acb8c9e70025 100644 --- a/web/api/queries/min/README.md +++ b/src/web/api/queries/min/README.md @@ -1,7 +1,7 @@ <!-- title: "Min" sidebar_label: "Min" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/min/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/min/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/min/min.c b/src/web/api/queries/min/min.c similarity index 100% rename from web/api/queries/min/min.c rename to src/web/api/queries/min/min.c diff --git a/web/api/queries/min/min.h b/src/web/api/queries/min/min.h similarity index 100% rename from web/api/queries/min/min.h rename to src/web/api/queries/min/min.h diff --git a/web/api/queries/percentile/README.md b/src/web/api/queries/percentile/README.md similarity index 98% rename from web/api/queries/percentile/README.md rename to src/web/api/queries/percentile/README.md index e0d21ee767cf0a..88abf8d5c89647 100644 --- a/web/api/queries/percentile/README.md +++ b/src/web/api/queries/percentile/README.md @@ -2,7 +2,7 @@ title: "Percentile" sidebar_label: "Percentile" description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics." -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/percentile/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/percentile/percentile.c b/src/web/api/queries/percentile/percentile.c similarity index 100% rename from web/api/queries/percentile/percentile.c rename to src/web/api/queries/percentile/percentile.c diff --git a/web/api/queries/percentile/percentile.h b/src/web/api/queries/percentile/percentile.h similarity index 100% rename from web/api/queries/percentile/percentile.h rename to src/web/api/queries/percentile/percentile.h diff --git a/web/api/queries/query.c b/src/web/api/queries/query.c similarity index 100% rename from web/api/queries/query.c rename to src/web/api/queries/query.c diff --git a/web/api/queries/query.h b/src/web/api/queries/query.h similarity index 100% rename from web/api/queries/query.h rename to src/web/api/queries/query.h diff --git a/web/api/queries/rrdr.c b/src/web/api/queries/rrdr.c similarity index 100% rename from web/api/queries/rrdr.c rename to src/web/api/queries/rrdr.c diff --git a/web/api/queries/rrdr.h b/src/web/api/queries/rrdr.h similarity index 100% rename from web/api/queries/rrdr.h rename to src/web/api/queries/rrdr.h diff --git a/web/api/queries/ses/README.md b/src/web/api/queries/ses/README.md similarity index 98% rename from web/api/queries/ses/README.md rename to src/web/api/queries/ses/README.md index a06f646efe351c..e2fd65d7aa3d5f 100644 --- a/web/api/queries/ses/README.md +++ b/src/web/api/queries/ses/README.md @@ -1,7 +1,7 @@ <!-- title: "Single (or Simple) Exponential Smoothing (`ses`)" sidebar_label: "Single (or Simple) Exponential Smoothing (`ses`)" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/ses/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/ses/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/ses/ses.c b/src/web/api/queries/ses/ses.c similarity index 100% rename from web/api/queries/ses/ses.c rename to src/web/api/queries/ses/ses.c diff --git a/web/api/queries/ses/ses.h b/src/web/api/queries/ses/ses.h similarity index 100% rename from web/api/queries/ses/ses.h rename to src/web/api/queries/ses/ses.h diff --git a/web/api/queries/stddev/README.md b/src/web/api/queries/stddev/README.md similarity index 99% rename from web/api/queries/stddev/README.md rename to src/web/api/queries/stddev/README.md index 3f751a6e1aec7f..76cfee1f1d8238 100644 --- a/web/api/queries/stddev/README.md +++ b/src/web/api/queries/stddev/README.md @@ -1,7 +1,7 @@ <!-- title: "standard deviation (`stddev`)" sidebar_label: "standard deviation (`stddev`)" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/stddev/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/stddev/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/stddev/stddev.c b/src/web/api/queries/stddev/stddev.c similarity index 100% rename from web/api/queries/stddev/stddev.c rename to src/web/api/queries/stddev/stddev.c diff --git a/web/api/queries/stddev/stddev.h b/src/web/api/queries/stddev/stddev.h similarity index 100% rename from web/api/queries/stddev/stddev.h rename to src/web/api/queries/stddev/stddev.h diff --git a/web/api/queries/sum/README.md b/src/web/api/queries/sum/README.md similarity index 97% rename from web/api/queries/sum/README.md rename to src/web/api/queries/sum/README.md index 62e18acabc6619..dd29b9c5bc042a 100644 --- a/web/api/queries/sum/README.md +++ b/src/web/api/queries/sum/README.md @@ -1,7 +1,7 @@ <!-- title: "Sum" sidebar_label: "Sum" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/sum/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/sum/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/sum/sum.c b/src/web/api/queries/sum/sum.c similarity index 100% rename from web/api/queries/sum/sum.c rename to src/web/api/queries/sum/sum.c diff --git a/web/api/queries/sum/sum.h b/src/web/api/queries/sum/sum.h similarity index 100% rename from web/api/queries/sum/sum.h rename to src/web/api/queries/sum/sum.h diff --git a/web/api/queries/trimmed_mean/README.md b/src/web/api/queries/trimmed_mean/README.md similarity index 98% rename from web/api/queries/trimmed_mean/README.md rename to src/web/api/queries/trimmed_mean/README.md index 328c4494250363..969023292328b5 100644 --- a/web/api/queries/trimmed_mean/README.md +++ b/src/web/api/queries/trimmed_mean/README.md @@ -2,7 +2,7 @@ title: "Trimmed Mean" sidebar_label: "Trimmed Mean" description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics." -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/api/queries/trimmed_mean/README.md learn_status: "Published" learn_topic_type: "References" learn_rel_path: "Developers/Web/Api/Queries" diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/src/web/api/queries/trimmed_mean/trimmed_mean.c similarity index 100% rename from web/api/queries/trimmed_mean/trimmed_mean.c rename to src/web/api/queries/trimmed_mean/trimmed_mean.c diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/src/web/api/queries/trimmed_mean/trimmed_mean.h similarity index 100% rename from web/api/queries/trimmed_mean/trimmed_mean.h rename to src/web/api/queries/trimmed_mean/trimmed_mean.h diff --git a/web/api/queries/weights.c b/src/web/api/queries/weights.c similarity index 100% rename from web/api/queries/weights.c rename to src/web/api/queries/weights.c diff --git a/web/api/queries/weights.h b/src/web/api/queries/weights.h similarity index 100% rename from web/api/queries/weights.h rename to src/web/api/queries/weights.h diff --git a/web/api/tests/valid_urls.c b/src/web/api/tests/valid_urls.c similarity index 100% rename from web/api/tests/valid_urls.c rename to src/web/api/tests/valid_urls.c diff --git a/web/api/tests/web_api.c b/src/web/api/tests/web_api.c similarity index 100% rename from web/api/tests/web_api.c rename to src/web/api/tests/web_api.c diff --git a/web/api/web_api.c b/src/web/api/web_api.c similarity index 100% rename from web/api/web_api.c rename to src/web/api/web_api.c diff --git a/web/api/web_api.h b/src/web/api/web_api.h similarity index 100% rename from web/api/web_api.h rename to src/web/api/web_api.h diff --git a/web/api/web_api_v1.c b/src/web/api/web_api_v1.c similarity index 100% rename from web/api/web_api_v1.c rename to src/web/api/web_api_v1.c diff --git a/web/api/web_api_v1.h b/src/web/api/web_api_v1.h similarity index 100% rename from web/api/web_api_v1.h rename to src/web/api/web_api_v1.h diff --git a/web/api/web_api_v2.c b/src/web/api/web_api_v2.c similarity index 100% rename from web/api/web_api_v2.c rename to src/web/api/web_api_v2.c diff --git a/web/api/web_api_v2.h b/src/web/api/web_api_v2.h similarity index 100% rename from web/api/web_api_v2.h rename to src/web/api/web_api_v2.h diff --git a/web/gui/.dashboard-notice.md b/src/web/gui/.dashboard-notice.md similarity index 100% rename from web/gui/.dashboard-notice.md rename to src/web/gui/.dashboard-notice.md diff --git a/web/gui/.dashboard-v2-notice.md b/src/web/gui/.dashboard-v2-notice.md similarity index 100% rename from web/gui/.dashboard-v2-notice.md rename to src/web/gui/.dashboard-v2-notice.md diff --git a/web/gui/.well-known/dnt/cookies b/src/web/gui/.well-known/dnt/cookies similarity index 100% rename from web/gui/.well-known/dnt/cookies rename to src/web/gui/.well-known/dnt/cookies diff --git a/web/gui/README.md b/src/web/gui/README.md similarity index 91% rename from web/gui/README.md rename to src/web/gui/README.md index 2b73ad2687c419..5c20031406bd1d 100644 --- a/web/gui/README.md +++ b/src/web/gui/README.md @@ -10,15 +10,15 @@ before: action](https://user-images.githubusercontent.com/1153921/101513938-fae28380-3939-11eb-9434-8ad86a39be62.gif) Learn more about how dashboards work and how they're populated using the `dashboards.js` file in our [web dashboards -overview](https://github.com/netdata/netdata/blob/master/web/README.md). +overview](https://github.com/netdata/netdata/blob/master/src/web/README.md). By default, Netdata starts a web server for its dashboard at port `19999`. Open up your web browser of choice and navigate to `http://NODE:19999`, replacing `NODE` with the IP address or hostname of your Agent. If installed on localhost, you can access it through `http://localhost:19999`. -Netdata uses an [internal, static-threaded web server](https://github.com/netdata/netdata/blob/master/web/server/README.md) to host the HTML, CSS, and JavaScript +Netdata uses an [internal, static-threaded web server](https://github.com/netdata/netdata/blob/master/src/web/server/README.md) to host the HTML, CSS, and JavaScript files that make up the local Agent dashboard. You don't have to configure anything to access it, although you can adjust -[your settings](https://github.com/netdata/netdata/blob/master/web/server/README.md#other-netdataconf-web-section-options) in the `netdata.conf` file, or run Netdata +[your settings](https://github.com/netdata/netdata/blob/master/src/web/server/README.md#other-netdataconf-web-section-options) in the `netdata.conf` file, or run Netdata behind an [Nginx proxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md), and so on. ## Navigating the local dashboard @@ -41,8 +41,8 @@ dashboard](https://user-images.githubusercontent.com/1153921/101509403-f7e59400- Netdata is broken up into multiple **sections**, such as **System Overview**, **CPU**, **Disk**, and more. Inside each section you'll find a number of charts, -broken down into [contexts](https://github.com/netdata/netdata/blob/master/web/README.md#contexts) and -[families](https://github.com/netdata/netdata/blob/master/web/README.md#families). +broken down into [contexts](https://github.com/netdata/netdata/blob/master/src/web/README.md#contexts) and +[families](https://github.com/netdata/netdata/blob/master/src/web/README.md#families). An example of the **Memory** section on a Linux desktop system. @@ -81,7 +81,7 @@ section, and menus link to the section they're associated with. ![A screenshot of metrics menus](https://user-images.githubusercontent.com/1153921/80834638-f08f2880-8ba5-11ea-99ae-f610b2885fd6.png) Most metrics menu items will contain several **submenu** entries, which represent any -[families](https://github.com/netdata/netdata/blob/master/web/README.md#families) from that section. Netdata automatically +[families](https://github.com/netdata/netdata/blob/master/src/web/README.md#families) from that section. Netdata automatically generates these submenu entries. Here's a **Disks** menu with several submenu entries for each disk drive and @@ -161,5 +161,5 @@ file](https://user-images.githubusercontent.com/1153921/62798924-570e6c80-ba94-1 ## Custom dashboards -For information on creating custom dashboards from scratch, see the [custom dashboards](https://github.com/netdata/netdata/blob/master/web/gui/custom/README.md) or -[Atlassian Confluence dashboards](https://github.com/netdata/netdata/blob/master/web/gui/confluence/README.md) guides. +For information on creating custom dashboards from scratch, see the [custom dashboards](https://github.com/netdata/netdata/blob/master/src/web/gui/custom/README.md) or +[Atlassian Confluence dashboards](https://github.com/netdata/netdata/blob/master/src/web/gui/confluence/README.md) guides. diff --git a/web/gui/browserconfig.xml b/src/web/gui/browserconfig.xml similarity index 100% rename from web/gui/browserconfig.xml rename to src/web/gui/browserconfig.xml diff --git a/web/gui/bundle_dashboard_v1.py b/src/web/gui/bundle_dashboard_v1.py similarity index 97% rename from web/gui/bundle_dashboard_v1.py rename to src/web/gui/bundle_dashboard_v1.py index e697a4e4a59148..54d10d4462f8e1 100755 --- a/web/gui/bundle_dashboard_v1.py +++ b/src/web/gui/bundle_dashboard_v1.py @@ -50,7 +50,7 @@ def genfilelist(path): files = [f for f in path.iterdir() if f.is_file() and f.name != 'README.md'] files = [Path(*f.parts[1:]) for f in files] files.sort() - return '\n'.join([("web/gui/v1/" + str(f)) for f in files]) + return '\n'.join([("src/web/gui/v1/" + str(f)) for f in files]) def write_cmakefile(): diff --git a/web/gui/bundle_dashboard_v2.py b/src/web/gui/bundle_dashboard_v2.py similarity index 98% rename from web/gui/bundle_dashboard_v2.py rename to src/web/gui/bundle_dashboard_v2.py index c1422d35eab4d9..336b22122e0838 100755 --- a/web/gui/bundle_dashboard_v2.py +++ b/src/web/gui/bundle_dashboard_v2.py @@ -63,7 +63,7 @@ def genfilelist(path): files = [f for f in path.iterdir() if f.is_file() and f.name != 'README.md'] files = [Path(*f.parts[1:]) for f in files] files.sort() - return '\n'.join([("web/gui/v2/" + str(f)) for f in files]) + return '\n'.join([("src/web/gui/v2/" + str(f)) for f in files]) def write_cmakefile(): diff --git a/web/gui/confluence/README.md b/src/web/gui/confluence/README.md similarity index 99% rename from web/gui/confluence/README.md rename to src/web/gui/confluence/README.md index d76aabf78d3d56..674942ec9c72be 100644 --- a/web/gui/confluence/README.md +++ b/src/web/gui/confluence/README.md @@ -1,6 +1,6 @@ <!-- title: "Atlassian Confluence dashboards" -custom_edit_url: https://github.com/netdata/netdata/edit/master/web/gui/confluence/README.md +custom_edit_url: https://github.com/netdata/netdata/edit/master/src/web/gui/confluence/README.md sidebar_label: "Atlassian Confluence dashboards" learn_status: "Published" learn_topic_type: "Tasks" @@ -89,7 +89,7 @@ This badge is now auto-refreshing. It will update itself based on the update fre > Keep in mind you can add badges with custom Netdata queries too. Netdata automatically creates badges for all the > alerts, but every chart, every dimension on every chart, can be used for a badge. And Netdata badges are quite -> powerful! Check [Creating Badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md) for more information on badges. +> powerful! Check [Creating Badges](https://github.com/netdata/netdata/blob/master/src/web/api/badges/README.md) for more information on badges. So, let's create a table and add this badge for both our web servers: diff --git a/web/gui/css/c3-0.4.18.min.css b/src/web/gui/css/c3-0.4.18.min.css similarity index 100% rename from web/gui/css/c3-0.4.18.min.css rename to src/web/gui/css/c3-0.4.18.min.css diff --git a/web/gui/css/morris-0.5.1.css b/src/web/gui/css/morris-0.5.1.css similarity index 100% rename from web/gui/css/morris-0.5.1.css rename to src/web/gui/css/morris-0.5.1.css diff --git a/web/gui/custom/README.md b/src/web/gui/custom/README.md similarity index 99% rename from web/gui/custom/README.md rename to src/web/gui/custom/README.md index 4a1fc6983669e9..ecdc4e24011d9b 100644 --- a/web/gui/custom/README.md +++ b/src/web/gui/custom/README.md @@ -424,7 +424,7 @@ it, using this: ### API options -You can append Netdata **[REST API v1](https://github.com/netdata/netdata/blob/master/web/api/README.md)** data options, using this: +You can append Netdata **[REST API v1](https://github.com/netdata/netdata/blob/master/src/web/api/README.md)** data options, using this: ```html <div data-netdata="unique.id" diff --git a/web/gui/dashboard_info.js b/src/web/gui/dashboard_info.js similarity index 100% rename from web/gui/dashboard_info.js rename to src/web/gui/dashboard_info.js diff --git a/web/gui/dashboard_info_custom_example.js b/src/web/gui/dashboard_info_custom_example.js similarity index 100% rename from web/gui/dashboard_info_custom_example.js rename to src/web/gui/dashboard_info_custom_example.js diff --git a/src/web/gui/gui.cmake b/src/web/gui/gui.cmake new file mode 100644 index 00000000000000..808ac889c2b55c --- /dev/null +++ b/src/web/gui/gui.cmake @@ -0,0 +1,30 @@ +set(DASHBOARD_JS_FILES src/web/gui/src/dashboard.js/prologue.js.inc + src/web/gui/src/dashboard.js/utils.js + src/web/gui/src/dashboard.js/server-detection.js + src/web/gui/src/dashboard.js/dependencies.js + src/web/gui/src/dashboard.js/error-handling.js + src/web/gui/src/dashboard.js/compatibility.js + src/web/gui/src/dashboard.js/xss.js + src/web/gui/src/dashboard.js/colors.js + src/web/gui/src/dashboard.js/units-conversion.js + src/web/gui/src/dashboard.js/options.js + src/web/gui/src/dashboard.js/localstorage.js + src/web/gui/src/dashboard.js/timeout.js + src/web/gui/src/dashboard.js/themes.js + src/web/gui/src/dashboard.js/charting/dygraph.js + src/web/gui/src/dashboard.js/charting/sparkline.js + src/web/gui/src/dashboard.js/charting/google-charts.js + src/web/gui/src/dashboard.js/charting/gauge.js + src/web/gui/src/dashboard.js/charting/easy-pie-chart.js + src/web/gui/src/dashboard.js/charting/d3pie.js + src/web/gui/src/dashboard.js/charting/d3.js + src/web/gui/src/dashboard.js/charting/peity.js + src/web/gui/src/dashboard.js/charting/textonly.js + src/web/gui/src/dashboard.js/charting.js + src/web/gui/src/dashboard.js/chart-registry.js + src/web/gui/src/dashboard.js/common.js + src/web/gui/src/dashboard.js/main.js + src/web/gui/src/dashboard.js/alarms.js + src/web/gui/src/dashboard.js/registry.js + src/web/gui/src/dashboard.js/boot.js + src/web/gui/src/dashboard.js/epilogue.js.inc) diff --git a/web/gui/ilove.html b/src/web/gui/ilove.html similarity index 100% rename from web/gui/ilove.html rename to src/web/gui/ilove.html diff --git a/web/gui/index.html b/src/web/gui/index.html similarity index 100% rename from web/gui/index.html rename to src/web/gui/index.html diff --git a/web/gui/main.css b/src/web/gui/main.css similarity index 100% rename from web/gui/main.css rename to src/web/gui/main.css diff --git a/web/gui/main.js b/src/web/gui/main.js similarity index 99% rename from web/gui/main.js rename to src/web/gui/main.js index e0c5fd18cc0cb9..f083d7516b28ba 100644 --- a/web/gui/main.js +++ b/src/web/gui/main.js @@ -2042,7 +2042,7 @@ function clipboardCopyBadgeEmbed(url) { function alarmsUpdateModal() { var active = '<h3>Raised Alarms</h3><table class="table">'; var all = '<h3>All Running Alarms</h3><div class="panel-group" id="alarms_all_accordion" role="tablist" aria-multiselectable="true">'; - var footer = '<hr/><a href="https://github.com/netdata/netdata/tree/master/web/api/badges#netdata-badges" target="_blank">netdata badges</a> refresh automatically. Their color indicates the state of the alarm: <span style="color: #e05d44"><b> red </b></span> is critical, <span style="color:#fe7d37"><b> orange </b></span> is warning, <span style="color: #4c1"><b> bright green </b></span> is ok, <span style="color: #9f9f9f"><b> light grey </b></span> is undefined (i.e. no data or no status), <span style="color: #000"><b> black </b></span> is not initialized. You can copy and paste their URLs to embed them in any web page.<br/>netdata can send notifications for these alarms. Check <a href="https://github.com/netdata/netdata/blob/master/src/health/notifications/health_alarm_notify.conf" target="_blank">this configuration file</a> for more information.'; + var footer = '<hr/><a href="https://github.com/netdata/netdata/tree/master/src/web/api/badges#netdata-badges" target="_blank">netdata badges</a> refresh automatically. Their color indicates the state of the alarm: <span style="color: #e05d44"><b> red </b></span> is critical, <span style="color:#fe7d37"><b> orange </b></span> is warning, <span style="color: #4c1"><b> bright green </b></span> is ok, <span style="color: #9f9f9f"><b> light grey </b></span> is undefined (i.e. no data or no status), <span style="color: #000"><b> black </b></span> is not initialized. You can copy and paste their URLs to embed them in any web page.<br/>netdata can send notifications for these alarms. Check <a href="https://github.com/netdata/netdata/blob/master/src/health/notifications/health_alarm_notify.conf" target="_blank">this configuration file</a> for more information.'; loadClipboard(function () { }); diff --git a/web/gui/old/index.html b/src/web/gui/old/index.html similarity index 100% rename from web/gui/old/index.html rename to src/web/gui/old/index.html diff --git a/web/gui/registry-access.html b/src/web/gui/registry-access.html similarity index 100% rename from web/gui/registry-access.html rename to src/web/gui/registry-access.html diff --git a/web/gui/registry-alert-redirect.html b/src/web/gui/registry-alert-redirect.html similarity index 100% rename from web/gui/registry-alert-redirect.html rename to src/web/gui/registry-alert-redirect.html diff --git a/web/gui/registry-hello.html b/src/web/gui/registry-hello.html similarity index 100% rename from web/gui/registry-hello.html rename to src/web/gui/registry-hello.html diff --git a/web/gui/src/dashboard.js/alarms.js b/src/web/gui/src/dashboard.js/alarms.js similarity index 100% rename from web/gui/src/dashboard.js/alarms.js rename to src/web/gui/src/dashboard.js/alarms.js diff --git a/web/gui/src/dashboard.js/boot.js b/src/web/gui/src/dashboard.js/boot.js similarity index 100% rename from web/gui/src/dashboard.js/boot.js rename to src/web/gui/src/dashboard.js/boot.js diff --git a/web/gui/src/dashboard.js/chart-registry.js b/src/web/gui/src/dashboard.js/chart-registry.js similarity index 100% rename from web/gui/src/dashboard.js/chart-registry.js rename to src/web/gui/src/dashboard.js/chart-registry.js diff --git a/web/gui/src/dashboard.js/charting.js b/src/web/gui/src/dashboard.js/charting.js similarity index 100% rename from web/gui/src/dashboard.js/charting.js rename to src/web/gui/src/dashboard.js/charting.js diff --git a/web/gui/src/dashboard.js/charting/_c3.js b/src/web/gui/src/dashboard.js/charting/_c3.js similarity index 100% rename from web/gui/src/dashboard.js/charting/_c3.js rename to src/web/gui/src/dashboard.js/charting/_c3.js diff --git a/web/gui/src/dashboard.js/charting/_morris.js b/src/web/gui/src/dashboard.js/charting/_morris.js similarity index 100% rename from web/gui/src/dashboard.js/charting/_morris.js rename to src/web/gui/src/dashboard.js/charting/_morris.js diff --git a/web/gui/src/dashboard.js/charting/_raphael.js b/src/web/gui/src/dashboard.js/charting/_raphael.js similarity index 100% rename from web/gui/src/dashboard.js/charting/_raphael.js rename to src/web/gui/src/dashboard.js/charting/_raphael.js diff --git a/web/gui/src/dashboard.js/charting/d3.js b/src/web/gui/src/dashboard.js/charting/d3.js similarity index 100% rename from web/gui/src/dashboard.js/charting/d3.js rename to src/web/gui/src/dashboard.js/charting/d3.js diff --git a/web/gui/src/dashboard.js/charting/d3pie.js b/src/web/gui/src/dashboard.js/charting/d3pie.js similarity index 100% rename from web/gui/src/dashboard.js/charting/d3pie.js rename to src/web/gui/src/dashboard.js/charting/d3pie.js diff --git a/web/gui/src/dashboard.js/charting/dygraph.js b/src/web/gui/src/dashboard.js/charting/dygraph.js similarity index 100% rename from web/gui/src/dashboard.js/charting/dygraph.js rename to src/web/gui/src/dashboard.js/charting/dygraph.js diff --git a/web/gui/src/dashboard.js/charting/easy-pie-chart.js b/src/web/gui/src/dashboard.js/charting/easy-pie-chart.js similarity index 100% rename from web/gui/src/dashboard.js/charting/easy-pie-chart.js rename to src/web/gui/src/dashboard.js/charting/easy-pie-chart.js diff --git a/web/gui/src/dashboard.js/charting/gauge.js b/src/web/gui/src/dashboard.js/charting/gauge.js similarity index 100% rename from web/gui/src/dashboard.js/charting/gauge.js rename to src/web/gui/src/dashboard.js/charting/gauge.js diff --git a/web/gui/src/dashboard.js/charting/google-charts.js b/src/web/gui/src/dashboard.js/charting/google-charts.js similarity index 100% rename from web/gui/src/dashboard.js/charting/google-charts.js rename to src/web/gui/src/dashboard.js/charting/google-charts.js diff --git a/web/gui/src/dashboard.js/charting/peity.js b/src/web/gui/src/dashboard.js/charting/peity.js similarity index 100% rename from web/gui/src/dashboard.js/charting/peity.js rename to src/web/gui/src/dashboard.js/charting/peity.js diff --git a/web/gui/src/dashboard.js/charting/sparkline.js b/src/web/gui/src/dashboard.js/charting/sparkline.js similarity index 100% rename from web/gui/src/dashboard.js/charting/sparkline.js rename to src/web/gui/src/dashboard.js/charting/sparkline.js diff --git a/web/gui/src/dashboard.js/charting/textonly.js b/src/web/gui/src/dashboard.js/charting/textonly.js similarity index 100% rename from web/gui/src/dashboard.js/charting/textonly.js rename to src/web/gui/src/dashboard.js/charting/textonly.js diff --git a/web/gui/src/dashboard.js/colors.js b/src/web/gui/src/dashboard.js/colors.js similarity index 100% rename from web/gui/src/dashboard.js/colors.js rename to src/web/gui/src/dashboard.js/colors.js diff --git a/web/gui/src/dashboard.js/common.js b/src/web/gui/src/dashboard.js/common.js similarity index 100% rename from web/gui/src/dashboard.js/common.js rename to src/web/gui/src/dashboard.js/common.js diff --git a/web/gui/src/dashboard.js/compatibility.js b/src/web/gui/src/dashboard.js/compatibility.js similarity index 100% rename from web/gui/src/dashboard.js/compatibility.js rename to src/web/gui/src/dashboard.js/compatibility.js diff --git a/web/gui/src/dashboard.js/dependencies.js b/src/web/gui/src/dashboard.js/dependencies.js similarity index 100% rename from web/gui/src/dashboard.js/dependencies.js rename to src/web/gui/src/dashboard.js/dependencies.js diff --git a/web/gui/src/dashboard.js/epilogue.js.inc b/src/web/gui/src/dashboard.js/epilogue.js.inc similarity index 100% rename from web/gui/src/dashboard.js/epilogue.js.inc rename to src/web/gui/src/dashboard.js/epilogue.js.inc diff --git a/web/gui/src/dashboard.js/error-handling.js b/src/web/gui/src/dashboard.js/error-handling.js similarity index 100% rename from web/gui/src/dashboard.js/error-handling.js rename to src/web/gui/src/dashboard.js/error-handling.js diff --git a/web/gui/src/dashboard.js/localstorage.js b/src/web/gui/src/dashboard.js/localstorage.js similarity index 100% rename from web/gui/src/dashboard.js/localstorage.js rename to src/web/gui/src/dashboard.js/localstorage.js diff --git a/web/gui/src/dashboard.js/main.js b/src/web/gui/src/dashboard.js/main.js similarity index 100% rename from web/gui/src/dashboard.js/main.js rename to src/web/gui/src/dashboard.js/main.js diff --git a/web/gui/src/dashboard.js/options.js b/src/web/gui/src/dashboard.js/options.js similarity index 100% rename from web/gui/src/dashboard.js/options.js rename to src/web/gui/src/dashboard.js/options.js diff --git a/web/gui/src/dashboard.js/prologue.js.inc b/src/web/gui/src/dashboard.js/prologue.js.inc similarity index 100% rename from web/gui/src/dashboard.js/prologue.js.inc rename to src/web/gui/src/dashboard.js/prologue.js.inc diff --git a/web/gui/src/dashboard.js/registry.js b/src/web/gui/src/dashboard.js/registry.js similarity index 100% rename from web/gui/src/dashboard.js/registry.js rename to src/web/gui/src/dashboard.js/registry.js diff --git a/web/gui/src/dashboard.js/server-detection.js b/src/web/gui/src/dashboard.js/server-detection.js similarity index 100% rename from web/gui/src/dashboard.js/server-detection.js rename to src/web/gui/src/dashboard.js/server-detection.js diff --git a/web/gui/src/dashboard.js/themes.js b/src/web/gui/src/dashboard.js/themes.js similarity index 100% rename from web/gui/src/dashboard.js/themes.js rename to src/web/gui/src/dashboard.js/themes.js diff --git a/web/gui/src/dashboard.js/timeout.js b/src/web/gui/src/dashboard.js/timeout.js similarity index 100% rename from web/gui/src/dashboard.js/timeout.js rename to src/web/gui/src/dashboard.js/timeout.js diff --git a/web/gui/src/dashboard.js/units-conversion.js b/src/web/gui/src/dashboard.js/units-conversion.js similarity index 100% rename from web/gui/src/dashboard.js/units-conversion.js rename to src/web/gui/src/dashboard.js/units-conversion.js diff --git a/web/gui/src/dashboard.js/utils.js b/src/web/gui/src/dashboard.js/utils.js similarity index 100% rename from web/gui/src/dashboard.js/utils.js rename to src/web/gui/src/dashboard.js/utils.js diff --git a/web/gui/src/dashboard.js/xss.js b/src/web/gui/src/dashboard.js/xss.js similarity index 100% rename from web/gui/src/dashboard.js/xss.js rename to src/web/gui/src/dashboard.js/xss.js diff --git a/web/gui/static/img/netdata-logomark.svg b/src/web/gui/static/img/netdata-logomark.svg similarity index 100% rename from web/gui/static/img/netdata-logomark.svg rename to src/web/gui/static/img/netdata-logomark.svg diff --git a/web/gui/static/splash.css b/src/web/gui/static/splash.css similarity index 100% rename from web/gui/static/splash.css rename to src/web/gui/static/splash.css diff --git a/web/gui/switch.html b/src/web/gui/switch.html similarity index 100% rename from web/gui/switch.html rename to src/web/gui/switch.html diff --git a/web/gui/v0 b/src/web/gui/v0 similarity index 100% rename from web/gui/v0 rename to src/web/gui/v0 diff --git a/web/gui/v1/README.md b/src/web/gui/v1/README.md similarity index 100% rename from web/gui/v1/README.md rename to src/web/gui/v1/README.md diff --git a/web/gui/v1/asset-manifest.json b/src/web/gui/v1/asset-manifest.json similarity index 100% rename from web/gui/v1/asset-manifest.json rename to src/web/gui/v1/asset-manifest.json diff --git a/web/gui/v1/console.html b/src/web/gui/v1/console.html similarity index 100% rename from web/gui/v1/console.html rename to src/web/gui/v1/console.html diff --git a/web/gui/v1/css/bootstrap-3.3.7.css b/src/web/gui/v1/css/bootstrap-3.3.7.css similarity index 100% rename from web/gui/v1/css/bootstrap-3.3.7.css rename to src/web/gui/v1/css/bootstrap-3.3.7.css diff --git a/web/gui/v1/css/bootstrap-slate-flat-3.3.7.css b/src/web/gui/v1/css/bootstrap-slate-flat-3.3.7.css similarity index 100% rename from web/gui/v1/css/bootstrap-slate-flat-3.3.7.css rename to src/web/gui/v1/css/bootstrap-slate-flat-3.3.7.css diff --git a/web/gui/v1/css/bootstrap-slider-10.0.0.min.css b/src/web/gui/v1/css/bootstrap-slider-10.0.0.min.css similarity index 100% rename from web/gui/v1/css/bootstrap-slider-10.0.0.min.css rename to src/web/gui/v1/css/bootstrap-slider-10.0.0.min.css diff --git a/web/gui/v1/css/bootstrap-theme-3.3.7.min.css b/src/web/gui/v1/css/bootstrap-theme-3.3.7.min.css similarity index 100% rename from web/gui/v1/css/bootstrap-theme-3.3.7.min.css rename to src/web/gui/v1/css/bootstrap-theme-3.3.7.min.css diff --git a/web/gui/v1/css/bootstrap-toggle-2.2.2.min.css b/src/web/gui/v1/css/bootstrap-toggle-2.2.2.min.css similarity index 100% rename from web/gui/v1/css/bootstrap-toggle-2.2.2.min.css rename to src/web/gui/v1/css/bootstrap-toggle-2.2.2.min.css diff --git a/web/gui/v1/css/dashboard.css b/src/web/gui/v1/css/dashboard.css similarity index 100% rename from web/gui/v1/css/dashboard.css rename to src/web/gui/v1/css/dashboard.css diff --git a/web/gui/v1/css/dashboard.slate.css b/src/web/gui/v1/css/dashboard.slate.css similarity index 100% rename from web/gui/v1/css/dashboard.slate.css rename to src/web/gui/v1/css/dashboard.slate.css diff --git a/web/gui/v1/dash-example.html b/src/web/gui/v1/dash-example.html similarity index 100% rename from web/gui/v1/dash-example.html rename to src/web/gui/v1/dash-example.html diff --git a/web/gui/v1/dashboard-react.js b/src/web/gui/v1/dashboard-react.js similarity index 100% rename from web/gui/v1/dashboard-react.js rename to src/web/gui/v1/dashboard-react.js diff --git a/web/gui/v1/dashboard.css b/src/web/gui/v1/dashboard.css similarity index 100% rename from web/gui/v1/dashboard.css rename to src/web/gui/v1/dashboard.css diff --git a/web/gui/v1/dashboard.html b/src/web/gui/v1/dashboard.html similarity index 100% rename from web/gui/v1/dashboard.html rename to src/web/gui/v1/dashboard.html diff --git a/web/gui/v1/dashboard.js b/src/web/gui/v1/dashboard.js similarity index 100% rename from web/gui/v1/dashboard.js rename to src/web/gui/v1/dashboard.js diff --git a/web/gui/v1/dashboard.slate.css b/src/web/gui/v1/dashboard.slate.css similarity index 100% rename from web/gui/v1/dashboard.slate.css rename to src/web/gui/v1/dashboard.slate.css diff --git a/src/web/gui/v1/dashboard_v1.cmake b/src/web/gui/v1/dashboard_v1.cmake new file mode 100644 index 00000000000000..e60e8170d927eb --- /dev/null +++ b/src/web/gui/v1/dashboard_v1.cmake @@ -0,0 +1,168 @@ + + install(FILES src/web/gui/v1/asset-manifest.json +src/web/gui/v1/console.html +src/web/gui/v1/dash-example.html +src/web/gui/v1/dashboard-react.js +src/web/gui/v1/dashboard.css +src/web/gui/v1/dashboard.html +src/web/gui/v1/dashboard.js +src/web/gui/v1/dashboard.slate.css +src/web/gui/v1/demo.html +src/web/gui/v1/demo2.html +src/web/gui/v1/demosites.html +src/web/gui/v1/demosites2.html +src/web/gui/v1/favicon.ico +src/web/gui/v1/goto-host-from-alarm.html +src/web/gui/v1/index-node-view.html +src/web/gui/v1/infographic.html +src/web/gui/v1/manifest.json +src/web/gui/v1/precache-manifest.e2d3811ef5e4b7e75e1f56d6ee92ef2c.js +src/web/gui/v1/refresh-badges.js +src/web/gui/v1/robots.txt +src/web/gui/v1/service-worker.js +src/web/gui/v1/sitemap.xml +src/web/gui/v1/tv-react.html +src/web/gui/v1/tv.html DESTINATION ${WEB_DEST}) + install(FILES src/web/gui/v1/css/bootstrap-3.3.7.css +src/web/gui/v1/css/bootstrap-slate-flat-3.3.7.css +src/web/gui/v1/css/bootstrap-slider-10.0.0.min.css +src/web/gui/v1/css/bootstrap-theme-3.3.7.min.css +src/web/gui/v1/css/bootstrap-toggle-2.2.2.min.css +src/web/gui/v1/css/dashboard.css +src/web/gui/v1/css/dashboard.slate.css DESTINATION ${WEB_DEST}/css) + install(FILES src/web/gui/v1/fonts/glyphicons-halflings-regular.eot +src/web/gui/v1/fonts/glyphicons-halflings-regular.svg +src/web/gui/v1/fonts/glyphicons-halflings-regular.ttf +src/web/gui/v1/fonts/glyphicons-halflings-regular.woff +src/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 DESTINATION ${WEB_DEST}/fonts) + install(FILES src/web/gui/v1/images/alert-128-orange.png +src/web/gui/v1/images/alert-128-red.png +src/web/gui/v1/images/alert-multi-size-orange.ico +src/web/gui/v1/images/alert-multi-size-red.ico +src/web/gui/v1/images/alerts.jpg +src/web/gui/v1/images/alerts.png +src/web/gui/v1/images/android-icon-144x144.png +src/web/gui/v1/images/android-icon-192x192.png +src/web/gui/v1/images/android-icon-36x36.png +src/web/gui/v1/images/android-icon-48x48.png +src/web/gui/v1/images/android-icon-72x72.png +src/web/gui/v1/images/android-icon-96x96.png +src/web/gui/v1/images/animated.gif +src/web/gui/v1/images/apple-icon-114x114.png +src/web/gui/v1/images/apple-icon-120x120.png +src/web/gui/v1/images/apple-icon-144x144.png +src/web/gui/v1/images/apple-icon-152x152.png +src/web/gui/v1/images/apple-icon-180x180.png +src/web/gui/v1/images/apple-icon-57x57.png +src/web/gui/v1/images/apple-icon-60x60.png +src/web/gui/v1/images/apple-icon-72x72.png +src/web/gui/v1/images/apple-icon-76x76.png +src/web/gui/v1/images/apple-icon-precomposed.png +src/web/gui/v1/images/apple-icon.png +src/web/gui/v1/images/banner-icon-144x144.png +src/web/gui/v1/images/check-mark-2-128-green.png +src/web/gui/v1/images/check-mark-2-multi-size-green.ico +src/web/gui/v1/images/dashboards.png +src/web/gui/v1/images/favicon-128.png +src/web/gui/v1/images/favicon-16x16.png +src/web/gui/v1/images/favicon-196x196.png +src/web/gui/v1/images/favicon-32x32.png +src/web/gui/v1/images/favicon-96x96.png +src/web/gui/v1/images/favicon.ico +src/web/gui/v1/images/home.png +src/web/gui/v1/images/ms-icon-144x144.png +src/web/gui/v1/images/ms-icon-150x150.png +src/web/gui/v1/images/ms-icon-310x150.png +src/web/gui/v1/images/ms-icon-310x310.png +src/web/gui/v1/images/ms-icon-36x36.png +src/web/gui/v1/images/ms-icon-70x70.png +src/web/gui/v1/images/netdata-logomark.svg +src/web/gui/v1/images/netdata.svg +src/web/gui/v1/images/nodeView.png +src/web/gui/v1/images/nodes.jpg +src/web/gui/v1/images/overview.png +src/web/gui/v1/images/packaging-beta-tag.svg +src/web/gui/v1/images/post.png +src/web/gui/v1/images/pricing.png +src/web/gui/v1/images/seo-performance-128.png DESTINATION ${WEB_DEST}/images) + install(FILES src/web/gui/v1/lib/bootstrap-3.3.7.min.js +src/web/gui/v1/lib/bootstrap-slider-10.0.0.min.js +src/web/gui/v1/lib/bootstrap-table-1.11.0.min.js +src/web/gui/v1/lib/bootstrap-table-export-1.11.0.min.js +src/web/gui/v1/lib/bootstrap-toggle-2.2.2.min.js +src/web/gui/v1/lib/clipboard-polyfill-be05dad.js +src/web/gui/v1/lib/d3-4.12.2.min.js +src/web/gui/v1/lib/d3pie-0.2.1-netdata-3.js +src/web/gui/v1/lib/dygraph-c91c859.min.js +src/web/gui/v1/lib/dygraph-smooth-plotter-c91c859.js +src/web/gui/v1/lib/fontawesome-all-5.0.1.min.js +src/web/gui/v1/lib/gauge-1.3.2.min.js +src/web/gui/v1/lib/jquery-3.6.0.min.js +src/web/gui/v1/lib/jquery.easypiechart-97b5824.min.js +src/web/gui/v1/lib/jquery.peity-3.2.0.min.js +src/web/gui/v1/lib/jquery.sparkline-2.1.2.min.js +src/web/gui/v1/lib/lz-string-1.4.4.min.js +src/web/gui/v1/lib/pako-1.0.6.min.js +src/web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js +src/web/gui/v1/lib/tableExport-1.6.0.min.js DESTINATION ${WEB_DEST}/lib) + install(FILES src/web/gui/v1/static/css/2.c454aab8.chunk.css +src/web/gui/v1/static/css/2.c454aab8.chunk.css.map +src/web/gui/v1/static/css/4.a36e3b73.chunk.css +src/web/gui/v1/static/css/4.a36e3b73.chunk.css.map +src/web/gui/v1/static/css/main.53ba10f1.chunk.css +src/web/gui/v1/static/css/main.53ba10f1.chunk.css.map DESTINATION ${WEB_DEST}/static/css) + install(FILES src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js +src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map +src/web/gui/v1/static/js/2.62d105c5.chunk.js +src/web/gui/v1/static/js/2.62d105c5.chunk.js.LICENSE +src/web/gui/v1/static/js/2.62d105c5.chunk.js.map +src/web/gui/v1/static/js/3.f137faca.chunk.js +src/web/gui/v1/static/js/3.f137faca.chunk.js.map +src/web/gui/v1/static/js/4.2dbcd906.chunk.js +src/web/gui/v1/static/js/4.2dbcd906.chunk.js.map +src/web/gui/v1/static/js/5.2f783a54.chunk.js +src/web/gui/v1/static/js/5.2f783a54.chunk.js.LICENSE +src/web/gui/v1/static/js/5.2f783a54.chunk.js.map +src/web/gui/v1/static/js/6.e1951239.chunk.js +src/web/gui/v1/static/js/6.e1951239.chunk.js.map +src/web/gui/v1/static/js/7.c2417fb0.chunk.js +src/web/gui/v1/static/js/7.c2417fb0.chunk.js.map +src/web/gui/v1/static/js/8.b4161ea2.chunk.js +src/web/gui/v1/static/js/8.b4161ea2.chunk.js.map +src/web/gui/v1/static/js/9.a4363968.chunk.js +src/web/gui/v1/static/js/9.a4363968.chunk.js.map +src/web/gui/v1/static/js/main.e248095a.chunk.js +src/web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE +src/web/gui/v1/static/js/main.e248095a.chunk.js.map +src/web/gui/v1/static/js/runtime-main.08abed8f.js +src/web/gui/v1/static/js/runtime-main.08abed8f.js.map DESTINATION ${WEB_DEST}/static/js) + install(FILES src/web/gui/v1/static/media/ibm-plex-sans-latin-100.245539db.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-100.9a582f3a.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-100italic.1ea7c5d2.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-100italic.3c34cf08.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-200.67524c36.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-200.bf72c841.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-200italic.52df2560.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-200italic.bbc2d552.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-300.10bb6a0a.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-300.9e1c48af.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-300italic.c76f2ab5.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-300italic.d3566d5b.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-400.263d6267.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-400.a2c56f94.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-400italic.272f8611.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-400italic.89a93a1b.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-500.0866c244.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-500.f6d5c5d5.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-500italic.ccd41bd1.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-500italic.ffd12d59.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-600.337b1651.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-600.7852d4dc.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-600italic.17e5379f.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-600italic.6f4ba6aa.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-700.b8809d61.woff +src/web/gui/v1/static/media/ibm-plex-sans-latin-700.c9983d3d.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-700italic.02954bee.woff2 +src/web/gui/v1/static/media/ibm-plex-sans-latin-700italic.72e9af40.woff +src/web/gui/v1/static/media/material-icons.0509ab09.woff2 DESTINATION ${WEB_DEST}/static/media) + install(FILES src/web/gui/v1/index.html DESTINATION ${WEB_DEST}/v1) diff --git a/web/gui/v1/demo.html b/src/web/gui/v1/demo.html similarity index 100% rename from web/gui/v1/demo.html rename to src/web/gui/v1/demo.html diff --git a/web/gui/v1/demo2.html b/src/web/gui/v1/demo2.html similarity index 100% rename from web/gui/v1/demo2.html rename to src/web/gui/v1/demo2.html diff --git a/web/gui/v1/demosites.html b/src/web/gui/v1/demosites.html similarity index 99% rename from web/gui/v1/demosites.html rename to src/web/gui/v1/demosites.html index 792a9ed2800a98..2ce20df05f13df 100644 --- a/web/gui/v1/demosites.html +++ b/src/web/gui/v1/demosites.html @@ -1281,7 +1281,7 @@ <h3><span class=star>★</span> Immediate results</h3> </p> <p> <small> - netdata can generate auto-refreshing <strong><a href="https://github.com/netdata/netdata/tree/master/web/api/badges#netdata-badges" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Badges>badges</a></strong>, like these: + netdata can generate auto-refreshing <strong><a href="https://github.com/netdata/netdata/tree/master/src/web/api/badges#netdata-badges" target="_blank" data-ga-category="Outbound links" data-ga-action="Nav click" data-ga-label=Badges>badges</a></strong>, like these: </small> <br/> <embed style="padding-top: 10px; padding-bottom: 25px;" src="//registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&refresh=60&v42" type="image/svg+xml" height="20" /> diff --git a/web/gui/v1/demosites2.html b/src/web/gui/v1/demosites2.html similarity index 100% rename from web/gui/v1/demosites2.html rename to src/web/gui/v1/demosites2.html diff --git a/web/gui/v1/favicon.ico b/src/web/gui/v1/favicon.ico similarity index 100% rename from web/gui/v1/favicon.ico rename to src/web/gui/v1/favicon.ico diff --git a/web/gui/v1/fonts/glyphicons-halflings-regular.eot b/src/web/gui/v1/fonts/glyphicons-halflings-regular.eot similarity index 100% rename from web/gui/v1/fonts/glyphicons-halflings-regular.eot rename to src/web/gui/v1/fonts/glyphicons-halflings-regular.eot diff --git a/web/gui/v1/fonts/glyphicons-halflings-regular.svg b/src/web/gui/v1/fonts/glyphicons-halflings-regular.svg similarity index 100% rename from web/gui/v1/fonts/glyphicons-halflings-regular.svg rename to src/web/gui/v1/fonts/glyphicons-halflings-regular.svg diff --git a/web/gui/v1/fonts/glyphicons-halflings-regular.ttf b/src/web/gui/v1/fonts/glyphicons-halflings-regular.ttf similarity index 100% rename from web/gui/v1/fonts/glyphicons-halflings-regular.ttf rename to src/web/gui/v1/fonts/glyphicons-halflings-regular.ttf diff --git a/web/gui/v1/fonts/glyphicons-halflings-regular.woff b/src/web/gui/v1/fonts/glyphicons-halflings-regular.woff similarity index 100% rename from web/gui/v1/fonts/glyphicons-halflings-regular.woff rename to src/web/gui/v1/fonts/glyphicons-halflings-regular.woff diff --git a/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 b/src/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 similarity index 100% rename from web/gui/v1/fonts/glyphicons-halflings-regular.woff2 rename to src/web/gui/v1/fonts/glyphicons-halflings-regular.woff2 diff --git a/web/gui/v1/goto-host-from-alarm.html b/src/web/gui/v1/goto-host-from-alarm.html similarity index 100% rename from web/gui/v1/goto-host-from-alarm.html rename to src/web/gui/v1/goto-host-from-alarm.html diff --git a/web/gui/v1/images/alert-128-orange.png b/src/web/gui/v1/images/alert-128-orange.png similarity index 100% rename from web/gui/v1/images/alert-128-orange.png rename to src/web/gui/v1/images/alert-128-orange.png diff --git a/web/gui/v1/images/alert-128-red.png b/src/web/gui/v1/images/alert-128-red.png similarity index 100% rename from web/gui/v1/images/alert-128-red.png rename to src/web/gui/v1/images/alert-128-red.png diff --git a/web/gui/v1/images/alert-multi-size-orange.ico b/src/web/gui/v1/images/alert-multi-size-orange.ico similarity index 100% rename from web/gui/v1/images/alert-multi-size-orange.ico rename to src/web/gui/v1/images/alert-multi-size-orange.ico diff --git a/web/gui/v1/images/alert-multi-size-red.ico b/src/web/gui/v1/images/alert-multi-size-red.ico similarity index 100% rename from web/gui/v1/images/alert-multi-size-red.ico rename to src/web/gui/v1/images/alert-multi-size-red.ico diff --git a/web/gui/v1/images/alerts.jpg b/src/web/gui/v1/images/alerts.jpg similarity index 100% rename from web/gui/v1/images/alerts.jpg rename to src/web/gui/v1/images/alerts.jpg diff --git a/web/gui/v1/images/alerts.png b/src/web/gui/v1/images/alerts.png similarity index 100% rename from web/gui/v1/images/alerts.png rename to src/web/gui/v1/images/alerts.png diff --git a/web/gui/v1/images/android-icon-144x144.png b/src/web/gui/v1/images/android-icon-144x144.png similarity index 100% rename from web/gui/v1/images/android-icon-144x144.png rename to src/web/gui/v1/images/android-icon-144x144.png diff --git a/web/gui/v1/images/android-icon-192x192.png b/src/web/gui/v1/images/android-icon-192x192.png similarity index 100% rename from web/gui/v1/images/android-icon-192x192.png rename to src/web/gui/v1/images/android-icon-192x192.png diff --git a/web/gui/v1/images/android-icon-36x36.png b/src/web/gui/v1/images/android-icon-36x36.png similarity index 100% rename from web/gui/v1/images/android-icon-36x36.png rename to src/web/gui/v1/images/android-icon-36x36.png diff --git a/web/gui/v1/images/android-icon-48x48.png b/src/web/gui/v1/images/android-icon-48x48.png similarity index 100% rename from web/gui/v1/images/android-icon-48x48.png rename to src/web/gui/v1/images/android-icon-48x48.png diff --git a/web/gui/v1/images/android-icon-72x72.png b/src/web/gui/v1/images/android-icon-72x72.png similarity index 100% rename from web/gui/v1/images/android-icon-72x72.png rename to src/web/gui/v1/images/android-icon-72x72.png diff --git a/web/gui/v1/images/android-icon-96x96.png b/src/web/gui/v1/images/android-icon-96x96.png similarity index 100% rename from web/gui/v1/images/android-icon-96x96.png rename to src/web/gui/v1/images/android-icon-96x96.png diff --git a/web/gui/v1/images/animated.gif b/src/web/gui/v1/images/animated.gif similarity index 100% rename from web/gui/v1/images/animated.gif rename to src/web/gui/v1/images/animated.gif diff --git a/web/gui/v1/images/apple-icon-114x114.png b/src/web/gui/v1/images/apple-icon-114x114.png similarity index 100% rename from web/gui/v1/images/apple-icon-114x114.png rename to src/web/gui/v1/images/apple-icon-114x114.png diff --git a/web/gui/v1/images/apple-icon-120x120.png b/src/web/gui/v1/images/apple-icon-120x120.png similarity index 100% rename from web/gui/v1/images/apple-icon-120x120.png rename to src/web/gui/v1/images/apple-icon-120x120.png diff --git a/web/gui/v1/images/apple-icon-144x144.png b/src/web/gui/v1/images/apple-icon-144x144.png similarity index 100% rename from web/gui/v1/images/apple-icon-144x144.png rename to src/web/gui/v1/images/apple-icon-144x144.png diff --git a/web/gui/v1/images/apple-icon-152x152.png b/src/web/gui/v1/images/apple-icon-152x152.png similarity index 100% rename from web/gui/v1/images/apple-icon-152x152.png rename to src/web/gui/v1/images/apple-icon-152x152.png diff --git a/web/gui/v1/images/apple-icon-180x180.png b/src/web/gui/v1/images/apple-icon-180x180.png similarity index 100% rename from web/gui/v1/images/apple-icon-180x180.png rename to src/web/gui/v1/images/apple-icon-180x180.png diff --git a/web/gui/v1/images/apple-icon-57x57.png b/src/web/gui/v1/images/apple-icon-57x57.png similarity index 100% rename from web/gui/v1/images/apple-icon-57x57.png rename to src/web/gui/v1/images/apple-icon-57x57.png diff --git a/web/gui/v1/images/apple-icon-60x60.png b/src/web/gui/v1/images/apple-icon-60x60.png similarity index 100% rename from web/gui/v1/images/apple-icon-60x60.png rename to src/web/gui/v1/images/apple-icon-60x60.png diff --git a/web/gui/v1/images/apple-icon-72x72.png b/src/web/gui/v1/images/apple-icon-72x72.png similarity index 100% rename from web/gui/v1/images/apple-icon-72x72.png rename to src/web/gui/v1/images/apple-icon-72x72.png diff --git a/web/gui/v1/images/apple-icon-76x76.png b/src/web/gui/v1/images/apple-icon-76x76.png similarity index 100% rename from web/gui/v1/images/apple-icon-76x76.png rename to src/web/gui/v1/images/apple-icon-76x76.png diff --git a/web/gui/v1/images/apple-icon-precomposed.png b/src/web/gui/v1/images/apple-icon-precomposed.png similarity index 100% rename from web/gui/v1/images/apple-icon-precomposed.png rename to src/web/gui/v1/images/apple-icon-precomposed.png diff --git a/web/gui/v1/images/apple-icon.png b/src/web/gui/v1/images/apple-icon.png similarity index 100% rename from web/gui/v1/images/apple-icon.png rename to src/web/gui/v1/images/apple-icon.png diff --git a/web/gui/v1/images/banner-icon-144x144.png b/src/web/gui/v1/images/banner-icon-144x144.png similarity index 100% rename from web/gui/v1/images/banner-icon-144x144.png rename to src/web/gui/v1/images/banner-icon-144x144.png diff --git a/web/gui/v1/images/check-mark-2-128-green.png b/src/web/gui/v1/images/check-mark-2-128-green.png similarity index 100% rename from web/gui/v1/images/check-mark-2-128-green.png rename to src/web/gui/v1/images/check-mark-2-128-green.png diff --git a/web/gui/v1/images/check-mark-2-multi-size-green.ico b/src/web/gui/v1/images/check-mark-2-multi-size-green.ico similarity index 100% rename from web/gui/v1/images/check-mark-2-multi-size-green.ico rename to src/web/gui/v1/images/check-mark-2-multi-size-green.ico diff --git a/web/gui/v1/images/dashboards.png b/src/web/gui/v1/images/dashboards.png similarity index 100% rename from web/gui/v1/images/dashboards.png rename to src/web/gui/v1/images/dashboards.png diff --git a/web/gui/v1/images/favicon-128.png b/src/web/gui/v1/images/favicon-128.png similarity index 100% rename from web/gui/v1/images/favicon-128.png rename to src/web/gui/v1/images/favicon-128.png diff --git a/web/gui/v1/images/favicon-16x16.png b/src/web/gui/v1/images/favicon-16x16.png similarity index 100% rename from web/gui/v1/images/favicon-16x16.png rename to src/web/gui/v1/images/favicon-16x16.png diff --git a/web/gui/v1/images/favicon-196x196.png b/src/web/gui/v1/images/favicon-196x196.png similarity index 100% rename from web/gui/v1/images/favicon-196x196.png rename to src/web/gui/v1/images/favicon-196x196.png diff --git a/web/gui/v1/images/favicon-32x32.png b/src/web/gui/v1/images/favicon-32x32.png similarity index 100% rename from web/gui/v1/images/favicon-32x32.png rename to src/web/gui/v1/images/favicon-32x32.png diff --git a/web/gui/v1/images/favicon-96x96.png b/src/web/gui/v1/images/favicon-96x96.png similarity index 100% rename from web/gui/v1/images/favicon-96x96.png rename to src/web/gui/v1/images/favicon-96x96.png diff --git a/web/gui/v1/images/favicon.ico b/src/web/gui/v1/images/favicon.ico similarity index 100% rename from web/gui/v1/images/favicon.ico rename to src/web/gui/v1/images/favicon.ico diff --git a/web/gui/v1/images/home.png b/src/web/gui/v1/images/home.png similarity index 100% rename from web/gui/v1/images/home.png rename to src/web/gui/v1/images/home.png diff --git a/web/gui/v1/images/ms-icon-144x144.png b/src/web/gui/v1/images/ms-icon-144x144.png similarity index 100% rename from web/gui/v1/images/ms-icon-144x144.png rename to src/web/gui/v1/images/ms-icon-144x144.png diff --git a/web/gui/v1/images/ms-icon-150x150.png b/src/web/gui/v1/images/ms-icon-150x150.png similarity index 100% rename from web/gui/v1/images/ms-icon-150x150.png rename to src/web/gui/v1/images/ms-icon-150x150.png diff --git a/web/gui/v1/images/ms-icon-310x150.png b/src/web/gui/v1/images/ms-icon-310x150.png similarity index 100% rename from web/gui/v1/images/ms-icon-310x150.png rename to src/web/gui/v1/images/ms-icon-310x150.png diff --git a/web/gui/v1/images/ms-icon-310x310.png b/src/web/gui/v1/images/ms-icon-310x310.png similarity index 100% rename from web/gui/v1/images/ms-icon-310x310.png rename to src/web/gui/v1/images/ms-icon-310x310.png diff --git a/web/gui/v1/images/ms-icon-36x36.png b/src/web/gui/v1/images/ms-icon-36x36.png similarity index 100% rename from web/gui/v1/images/ms-icon-36x36.png rename to src/web/gui/v1/images/ms-icon-36x36.png diff --git a/web/gui/v1/images/ms-icon-70x70.png b/src/web/gui/v1/images/ms-icon-70x70.png similarity index 100% rename from web/gui/v1/images/ms-icon-70x70.png rename to src/web/gui/v1/images/ms-icon-70x70.png diff --git a/web/gui/v1/images/netdata-logomark.svg b/src/web/gui/v1/images/netdata-logomark.svg similarity index 100% rename from web/gui/v1/images/netdata-logomark.svg rename to src/web/gui/v1/images/netdata-logomark.svg diff --git a/web/gui/v1/images/netdata.svg b/src/web/gui/v1/images/netdata.svg similarity index 100% rename from web/gui/v1/images/netdata.svg rename to src/web/gui/v1/images/netdata.svg diff --git a/web/gui/v1/images/nodeView.png b/src/web/gui/v1/images/nodeView.png similarity index 100% rename from web/gui/v1/images/nodeView.png rename to src/web/gui/v1/images/nodeView.png diff --git a/web/gui/v1/images/nodes.jpg b/src/web/gui/v1/images/nodes.jpg similarity index 100% rename from web/gui/v1/images/nodes.jpg rename to src/web/gui/v1/images/nodes.jpg diff --git a/web/gui/v1/images/overview.png b/src/web/gui/v1/images/overview.png similarity index 100% rename from web/gui/v1/images/overview.png rename to src/web/gui/v1/images/overview.png diff --git a/web/gui/v1/images/packaging-beta-tag.svg b/src/web/gui/v1/images/packaging-beta-tag.svg similarity index 100% rename from web/gui/v1/images/packaging-beta-tag.svg rename to src/web/gui/v1/images/packaging-beta-tag.svg diff --git a/web/gui/v1/images/post.png b/src/web/gui/v1/images/post.png similarity index 100% rename from web/gui/v1/images/post.png rename to src/web/gui/v1/images/post.png diff --git a/web/gui/v1/images/pricing.png b/src/web/gui/v1/images/pricing.png similarity index 100% rename from web/gui/v1/images/pricing.png rename to src/web/gui/v1/images/pricing.png diff --git a/web/gui/v1/images/seo-performance-128.png b/src/web/gui/v1/images/seo-performance-128.png similarity index 100% rename from web/gui/v1/images/seo-performance-128.png rename to src/web/gui/v1/images/seo-performance-128.png diff --git a/web/gui/v1/index-node-view.html b/src/web/gui/v1/index-node-view.html similarity index 100% rename from web/gui/v1/index-node-view.html rename to src/web/gui/v1/index-node-view.html diff --git a/web/gui/v1/index.html b/src/web/gui/v1/index.html similarity index 100% rename from web/gui/v1/index.html rename to src/web/gui/v1/index.html diff --git a/web/gui/v1/infographic.html b/src/web/gui/v1/infographic.html similarity index 99% rename from web/gui/v1/infographic.html rename to src/web/gui/v1/infographic.html index 0d3f5637549e3b..18d608c6cb8c3e 100644 --- a/web/gui/v1/infographic.html +++ b/src/web/gui/v1/infographic.html @@ -91,8 +91,8 @@ "toolbar":"", "auto-fit":true, "check-visible-state":false, - "edit":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml", - "url":"https://raw.githubusercontent.com/netdata/netdata/master/diagrams/netdata-overview.xml" + "edit":"https://raw.githubusercontent.com/netdata/netdata/master/docs/diagrams/netdata-overview.xml", + "url":"https://raw.githubusercontent.com/netdata/netdata/master/docs/diagrams/netdata-overview.xml" }; document.getElementById("drawing").dataset.mxgraph = JSON.stringify(opts); </script> diff --git a/web/gui/v1/lib/bootstrap-3.3.7.min.js b/src/web/gui/v1/lib/bootstrap-3.3.7.min.js similarity index 100% rename from web/gui/v1/lib/bootstrap-3.3.7.min.js rename to src/web/gui/v1/lib/bootstrap-3.3.7.min.js diff --git a/web/gui/v1/lib/bootstrap-slider-10.0.0.min.js b/src/web/gui/v1/lib/bootstrap-slider-10.0.0.min.js similarity index 100% rename from web/gui/v1/lib/bootstrap-slider-10.0.0.min.js rename to src/web/gui/v1/lib/bootstrap-slider-10.0.0.min.js diff --git a/web/gui/v1/lib/bootstrap-table-1.11.0.min.js b/src/web/gui/v1/lib/bootstrap-table-1.11.0.min.js similarity index 100% rename from web/gui/v1/lib/bootstrap-table-1.11.0.min.js rename to src/web/gui/v1/lib/bootstrap-table-1.11.0.min.js diff --git a/web/gui/v1/lib/bootstrap-table-export-1.11.0.min.js b/src/web/gui/v1/lib/bootstrap-table-export-1.11.0.min.js similarity index 100% rename from web/gui/v1/lib/bootstrap-table-export-1.11.0.min.js rename to src/web/gui/v1/lib/bootstrap-table-export-1.11.0.min.js diff --git a/web/gui/v1/lib/bootstrap-toggle-2.2.2.min.js b/src/web/gui/v1/lib/bootstrap-toggle-2.2.2.min.js similarity index 100% rename from web/gui/v1/lib/bootstrap-toggle-2.2.2.min.js rename to src/web/gui/v1/lib/bootstrap-toggle-2.2.2.min.js diff --git a/web/gui/v1/lib/clipboard-polyfill-be05dad.js b/src/web/gui/v1/lib/clipboard-polyfill-be05dad.js similarity index 100% rename from web/gui/v1/lib/clipboard-polyfill-be05dad.js rename to src/web/gui/v1/lib/clipboard-polyfill-be05dad.js diff --git a/web/gui/v1/lib/d3-4.12.2.min.js b/src/web/gui/v1/lib/d3-4.12.2.min.js similarity index 100% rename from web/gui/v1/lib/d3-4.12.2.min.js rename to src/web/gui/v1/lib/d3-4.12.2.min.js diff --git a/web/gui/v1/lib/d3pie-0.2.1-netdata-3.js b/src/web/gui/v1/lib/d3pie-0.2.1-netdata-3.js similarity index 100% rename from web/gui/v1/lib/d3pie-0.2.1-netdata-3.js rename to src/web/gui/v1/lib/d3pie-0.2.1-netdata-3.js diff --git a/web/gui/v1/lib/dygraph-c91c859.min.js b/src/web/gui/v1/lib/dygraph-c91c859.min.js similarity index 100% rename from web/gui/v1/lib/dygraph-c91c859.min.js rename to src/web/gui/v1/lib/dygraph-c91c859.min.js diff --git a/web/gui/v1/lib/dygraph-smooth-plotter-c91c859.js b/src/web/gui/v1/lib/dygraph-smooth-plotter-c91c859.js similarity index 100% rename from web/gui/v1/lib/dygraph-smooth-plotter-c91c859.js rename to src/web/gui/v1/lib/dygraph-smooth-plotter-c91c859.js diff --git a/web/gui/v1/lib/fontawesome-all-5.0.1.min.js b/src/web/gui/v1/lib/fontawesome-all-5.0.1.min.js similarity index 100% rename from web/gui/v1/lib/fontawesome-all-5.0.1.min.js rename to src/web/gui/v1/lib/fontawesome-all-5.0.1.min.js diff --git a/web/gui/v1/lib/gauge-1.3.2.min.js b/src/web/gui/v1/lib/gauge-1.3.2.min.js similarity index 100% rename from web/gui/v1/lib/gauge-1.3.2.min.js rename to src/web/gui/v1/lib/gauge-1.3.2.min.js diff --git a/web/gui/v1/lib/jquery-3.6.0.min.js b/src/web/gui/v1/lib/jquery-3.6.0.min.js similarity index 100% rename from web/gui/v1/lib/jquery-3.6.0.min.js rename to src/web/gui/v1/lib/jquery-3.6.0.min.js diff --git a/web/gui/v1/lib/jquery.easypiechart-97b5824.min.js b/src/web/gui/v1/lib/jquery.easypiechart-97b5824.min.js similarity index 100% rename from web/gui/v1/lib/jquery.easypiechart-97b5824.min.js rename to src/web/gui/v1/lib/jquery.easypiechart-97b5824.min.js diff --git a/web/gui/v1/lib/jquery.peity-3.2.0.min.js b/src/web/gui/v1/lib/jquery.peity-3.2.0.min.js similarity index 100% rename from web/gui/v1/lib/jquery.peity-3.2.0.min.js rename to src/web/gui/v1/lib/jquery.peity-3.2.0.min.js diff --git a/web/gui/v1/lib/jquery.sparkline-2.1.2.min.js b/src/web/gui/v1/lib/jquery.sparkline-2.1.2.min.js similarity index 100% rename from web/gui/v1/lib/jquery.sparkline-2.1.2.min.js rename to src/web/gui/v1/lib/jquery.sparkline-2.1.2.min.js diff --git a/web/gui/v1/lib/lz-string-1.4.4.min.js b/src/web/gui/v1/lib/lz-string-1.4.4.min.js similarity index 100% rename from web/gui/v1/lib/lz-string-1.4.4.min.js rename to src/web/gui/v1/lib/lz-string-1.4.4.min.js diff --git a/web/gui/v1/lib/pako-1.0.6.min.js b/src/web/gui/v1/lib/pako-1.0.6.min.js similarity index 100% rename from web/gui/v1/lib/pako-1.0.6.min.js rename to src/web/gui/v1/lib/pako-1.0.6.min.js diff --git a/web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js b/src/web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js similarity index 100% rename from web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js rename to src/web/gui/v1/lib/perfect-scrollbar-0.6.15.min.js diff --git a/web/gui/v1/lib/tableExport-1.6.0.min.js b/src/web/gui/v1/lib/tableExport-1.6.0.min.js similarity index 100% rename from web/gui/v1/lib/tableExport-1.6.0.min.js rename to src/web/gui/v1/lib/tableExport-1.6.0.min.js diff --git a/web/gui/v1/manifest.json b/src/web/gui/v1/manifest.json similarity index 100% rename from web/gui/v1/manifest.json rename to src/web/gui/v1/manifest.json diff --git a/web/gui/v1/precache-manifest.e2d3811ef5e4b7e75e1f56d6ee92ef2c.js b/src/web/gui/v1/precache-manifest.e2d3811ef5e4b7e75e1f56d6ee92ef2c.js similarity index 100% rename from web/gui/v1/precache-manifest.e2d3811ef5e4b7e75e1f56d6ee92ef2c.js rename to src/web/gui/v1/precache-manifest.e2d3811ef5e4b7e75e1f56d6ee92ef2c.js diff --git a/web/gui/v1/refresh-badges.js b/src/web/gui/v1/refresh-badges.js similarity index 100% rename from web/gui/v1/refresh-badges.js rename to src/web/gui/v1/refresh-badges.js diff --git a/web/gui/v1/robots.txt b/src/web/gui/v1/robots.txt similarity index 100% rename from web/gui/v1/robots.txt rename to src/web/gui/v1/robots.txt diff --git a/web/gui/v1/service-worker.js b/src/web/gui/v1/service-worker.js similarity index 100% rename from web/gui/v1/service-worker.js rename to src/web/gui/v1/service-worker.js diff --git a/web/gui/v1/sitemap.xml b/src/web/gui/v1/sitemap.xml similarity index 100% rename from web/gui/v1/sitemap.xml rename to src/web/gui/v1/sitemap.xml diff --git a/web/gui/v1/static/css/2.c454aab8.chunk.css b/src/web/gui/v1/static/css/2.c454aab8.chunk.css similarity index 100% rename from web/gui/v1/static/css/2.c454aab8.chunk.css rename to src/web/gui/v1/static/css/2.c454aab8.chunk.css diff --git a/web/gui/v1/static/css/2.c454aab8.chunk.css.map b/src/web/gui/v1/static/css/2.c454aab8.chunk.css.map similarity index 100% rename from web/gui/v1/static/css/2.c454aab8.chunk.css.map rename to src/web/gui/v1/static/css/2.c454aab8.chunk.css.map diff --git a/web/gui/v1/static/css/4.a36e3b73.chunk.css b/src/web/gui/v1/static/css/4.a36e3b73.chunk.css similarity index 100% rename from web/gui/v1/static/css/4.a36e3b73.chunk.css rename to src/web/gui/v1/static/css/4.a36e3b73.chunk.css diff --git a/web/gui/v1/static/css/4.a36e3b73.chunk.css.map b/src/web/gui/v1/static/css/4.a36e3b73.chunk.css.map similarity index 100% rename from web/gui/v1/static/css/4.a36e3b73.chunk.css.map rename to src/web/gui/v1/static/css/4.a36e3b73.chunk.css.map diff --git a/web/gui/v1/static/css/main.53ba10f1.chunk.css b/src/web/gui/v1/static/css/main.53ba10f1.chunk.css similarity index 100% rename from web/gui/v1/static/css/main.53ba10f1.chunk.css rename to src/web/gui/v1/static/css/main.53ba10f1.chunk.css diff --git a/web/gui/v1/static/css/main.53ba10f1.chunk.css.map b/src/web/gui/v1/static/css/main.53ba10f1.chunk.css.map similarity index 100% rename from web/gui/v1/static/css/main.53ba10f1.chunk.css.map rename to src/web/gui/v1/static/css/main.53ba10f1.chunk.css.map diff --git a/web/gui/v1/static/js/10.a5cd7d0e.chunk.js b/src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js similarity index 100% rename from web/gui/v1/static/js/10.a5cd7d0e.chunk.js rename to src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js diff --git a/web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map b/src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map rename to src/web/gui/v1/static/js/10.a5cd7d0e.chunk.js.map diff --git a/web/gui/v1/static/js/2.62d105c5.chunk.js b/src/web/gui/v1/static/js/2.62d105c5.chunk.js similarity index 100% rename from web/gui/v1/static/js/2.62d105c5.chunk.js rename to src/web/gui/v1/static/js/2.62d105c5.chunk.js diff --git a/web/gui/v1/static/js/2.62d105c5.chunk.js.LICENSE b/src/web/gui/v1/static/js/2.62d105c5.chunk.js.LICENSE similarity index 100% rename from web/gui/v1/static/js/2.62d105c5.chunk.js.LICENSE rename to src/web/gui/v1/static/js/2.62d105c5.chunk.js.LICENSE diff --git a/web/gui/v1/static/js/2.62d105c5.chunk.js.map b/src/web/gui/v1/static/js/2.62d105c5.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/2.62d105c5.chunk.js.map rename to src/web/gui/v1/static/js/2.62d105c5.chunk.js.map diff --git a/web/gui/v1/static/js/3.f137faca.chunk.js b/src/web/gui/v1/static/js/3.f137faca.chunk.js similarity index 100% rename from web/gui/v1/static/js/3.f137faca.chunk.js rename to src/web/gui/v1/static/js/3.f137faca.chunk.js diff --git a/web/gui/v1/static/js/3.f137faca.chunk.js.map b/src/web/gui/v1/static/js/3.f137faca.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/3.f137faca.chunk.js.map rename to src/web/gui/v1/static/js/3.f137faca.chunk.js.map diff --git a/web/gui/v1/static/js/4.2dbcd906.chunk.js b/src/web/gui/v1/static/js/4.2dbcd906.chunk.js similarity index 100% rename from web/gui/v1/static/js/4.2dbcd906.chunk.js rename to src/web/gui/v1/static/js/4.2dbcd906.chunk.js diff --git a/web/gui/v1/static/js/4.2dbcd906.chunk.js.map b/src/web/gui/v1/static/js/4.2dbcd906.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/4.2dbcd906.chunk.js.map rename to src/web/gui/v1/static/js/4.2dbcd906.chunk.js.map diff --git a/web/gui/v1/static/js/5.2f783a54.chunk.js b/src/web/gui/v1/static/js/5.2f783a54.chunk.js similarity index 100% rename from web/gui/v1/static/js/5.2f783a54.chunk.js rename to src/web/gui/v1/static/js/5.2f783a54.chunk.js diff --git a/web/gui/v1/static/js/5.2f783a54.chunk.js.LICENSE b/src/web/gui/v1/static/js/5.2f783a54.chunk.js.LICENSE similarity index 100% rename from web/gui/v1/static/js/5.2f783a54.chunk.js.LICENSE rename to src/web/gui/v1/static/js/5.2f783a54.chunk.js.LICENSE diff --git a/web/gui/v1/static/js/5.2f783a54.chunk.js.map b/src/web/gui/v1/static/js/5.2f783a54.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/5.2f783a54.chunk.js.map rename to src/web/gui/v1/static/js/5.2f783a54.chunk.js.map diff --git a/web/gui/v1/static/js/6.e1951239.chunk.js b/src/web/gui/v1/static/js/6.e1951239.chunk.js similarity index 100% rename from web/gui/v1/static/js/6.e1951239.chunk.js rename to src/web/gui/v1/static/js/6.e1951239.chunk.js diff --git a/web/gui/v1/static/js/6.e1951239.chunk.js.map b/src/web/gui/v1/static/js/6.e1951239.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/6.e1951239.chunk.js.map rename to src/web/gui/v1/static/js/6.e1951239.chunk.js.map diff --git a/web/gui/v1/static/js/7.c2417fb0.chunk.js b/src/web/gui/v1/static/js/7.c2417fb0.chunk.js similarity index 100% rename from web/gui/v1/static/js/7.c2417fb0.chunk.js rename to src/web/gui/v1/static/js/7.c2417fb0.chunk.js diff --git a/web/gui/v1/static/js/7.c2417fb0.chunk.js.map b/src/web/gui/v1/static/js/7.c2417fb0.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/7.c2417fb0.chunk.js.map rename to src/web/gui/v1/static/js/7.c2417fb0.chunk.js.map diff --git a/web/gui/v1/static/js/8.b4161ea2.chunk.js b/src/web/gui/v1/static/js/8.b4161ea2.chunk.js similarity index 100% rename from web/gui/v1/static/js/8.b4161ea2.chunk.js rename to src/web/gui/v1/static/js/8.b4161ea2.chunk.js diff --git a/web/gui/v1/static/js/8.b4161ea2.chunk.js.map b/src/web/gui/v1/static/js/8.b4161ea2.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/8.b4161ea2.chunk.js.map rename to src/web/gui/v1/static/js/8.b4161ea2.chunk.js.map diff --git a/web/gui/v1/static/js/9.a4363968.chunk.js b/src/web/gui/v1/static/js/9.a4363968.chunk.js similarity index 100% rename from web/gui/v1/static/js/9.a4363968.chunk.js rename to src/web/gui/v1/static/js/9.a4363968.chunk.js diff --git a/web/gui/v1/static/js/9.a4363968.chunk.js.map b/src/web/gui/v1/static/js/9.a4363968.chunk.js.map similarity index 100% rename from web/gui/v1/static/js/9.a4363968.chunk.js.map rename to src/web/gui/v1/static/js/9.a4363968.chunk.js.map diff --git a/src/web/gui/v1/static/js/main.e248095a.chunk.js b/src/web/gui/v1/static/js/main.e248095a.chunk.js new file mode 100644 index 00000000000000..78e82c50ea16f4 --- /dev/null +++ b/src/web/gui/v1/static/js/main.e248095a.chunk.js @@ -0,0 +1,3 @@ +/*! For license information please see main.e248095a.chunk.js.LICENSE */ +(this["webpackJsonp@netdata/dashboard"]=this["webpackJsonp@netdata/dashboard"]||[]).push([[0],{102:function(e,t,a){"use strict";a.d(t,"b",(function(){return o})),a.d(t,"a",(function(){return d}));var n,r,o=function(e){return e>-10&&e<10?"0".concat(e):"".concat(e)},i=function(e,t){var a=Math.abs(e),n="DAYS"===t?Math.floor(a/86400):0;a-=86400*n;var r="DAYS"===t||"HOURS"===t?Math.floor(a/3600):0;a-=3600*r;var i=Math.floor(a/60);a-=60*i;var s="DAYS"===t?"".concat(n,"d:"):"",l="DAYS"===t||"HOURS"===t?"".concat(o(r),":"):"",c="".concat(o(i),":"),u=o(a.toFixed(2));return"".concat(s).concat(l).concat(c).concat(u)},s={"packets/s":{pps:1,Kpps:1e3,Mpps:1e6},pps:{pps:1,Kpps:1e3,Mpps:1e6},"kilobits/s":{"bits/s":.001,"kilobits/s":1,"megabits/s":1e3,"gigabits/s":1e6,"terabits/s":1e9},"bytes/s":{"bytes/s":1,"kilobytes/s":1024,"megabytes/s":1048576,"gigabytes/s":1073741824,"terabytes/s":1099511627776},"kilobytes/s":{"bytes/s":1/1024,"kilobytes/s":1,"megabytes/s":1024,"gigabytes/s":1048576,"terabytes/s":1073741824},"B/s":{"B/s":1,"KiB/s":1024,"MiB/s":1048576,"GiB/s":1073741824,"TiB/s":1099511627776},"KB/s":{"B/s":1/1024,"KB/s":1,"MB/s":1024,"GB/s":1048576,"TB/s":1073741824},"KiB/s":{"B/s":1/1024,"KiB/s":1,"MiB/s":1024,"GiB/s":1048576,"TiB/s":1073741824},bytes:{bytes:1,kilobytes:1024,megabytes:1048576,gigabytes:1073741824,terabytes:1099511627776},Hz:{Hz:1,kHz:Math.pow(10,3),MHz:Math.pow(10,6),GHz:Math.pow(10,9),THz:Math.pow(10,12),PHz:Math.pow(10,15),EHz:Math.pow(10,18),ZHz:Math.pow(10,21)},B:{B:1,KiB:1024,MiB:1048576,GiB:1073741824,TiB:1099511627776,PiB:0x4000000000000},KB:{B:1/1024,KB:1,MB:1024,GB:1048576,TB:1073741824},KiB:{B:1/1024,KiB:1,MiB:1024,GiB:1048576,TiB:1073741824},MB:{B:1/1048576,KB:1/1024,MB:1,GB:1024,TB:1048576,PB:1073741824},MiB:{B:1/1048576,KiB:1/1024,MiB:1,GiB:1024,TiB:1048576,PiB:1073741824},GB:{B:1/1073741824,KB:1/1048576,MB:1/1024,GB:1,TB:1024,PB:1048576,EB:1073741824},GiB:{B:1/1073741824,KiB:1/1048576,MiB:1/1024,GiB:1,TiB:1024,PiB:1048576,EiB:1073741824}},l=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1;return function(t){return(t*e).toFixed(2)}},c={Celsius:{Fahrenheit:{check:function(){return"fahrenheit"===n},convert:function(e){return 9*e/5+32}}},celsius:{fahrenheit:{check:function(){return"fahrenheit"===n},convert:function(e){return 9*e/5+32}}},milliseconds:{microseconds:{check:function(e){return e<1},convert:l(1e3)},milliseconds:{check:function(e){return e>=1&&e<1e3},convert:l()},seconds:{check:function(e){return e>=1e3&&e<6e4},convert:l(.001)},"MM:SS.ms":{check:function(e){return r&&e>=6e4&&e<36e5},convert:function(e){return i(e/1e3,"MINUTES")}},"HH:MM:SS.ms":{check:function(e){return r&&e>=36e5&&e<864e5},convert:function(e){return i(e/1e3,"HOURS")}},"dHH:MM:SS.ms":{check:function(e){return r&&e>=864e5},convert:function(e){return i(e/1e3,"DAYS")}}},seconds:{microseconds:{check:function(e){return e<.001},convert:l(1e6)},milliseconds:{check:function(e){return e>=.001&&e<1},convert:l(1e3)},seconds:{check:function(e){return e>=1&&e<60},convert:l(1)},"MM:SS.ms":{check:function(e){return r&&e>=60&&e<3600},convert:function(e){return i(e,"MINUTES")}},"HH:MM:SS.ms":{check:function(e){return r&&e>=3600&&e<86400},convert:function(e){return i(e,"HOURS")}},"dHH:MM:SS.ms":{check:function(e){return r&&e>=86400},convert:function(e){return i(e,"DAYS")}}}},u=function(e){return e},d={keys:{},latest:{},globalReset:function(){this.keys={},this.latest={}},get:function(e,t,a,o,i,l,d,h,p){if("undefined"===typeof o&&(o="undefined"),n=h,r=p,"undefined"===typeof s[o]&&"undefined"===typeof c[o])return function(e){return e};if(void 0===i||null===i||"original"===i||i===o)return d(o),u;var f,g=null,m=0;if("undefined"!==typeof s[o]){if("auto"===i){(t=Math.abs(t))>(a=Math.abs(a))&&(a=t);var b=s[o];if(Object.keys(b).forEach((function(e){var t=b[e];t<=a&&t>m&&(g=e,m=t)})),null===g||m<=0)return d(o),u;if("string"===typeof l){var v="".concat(l,"-").concat(o),_=this.keys[v];"undefined"===typeof _&&(this.keys[v]={},_=this.keys[v]),_[e]={units:g,divider:m};var y=_[e];Object.keys(_).forEach((function(e){_[e].divider>y.divider&&(y=_[e])}));var O={units:y.units,divider:y.divider};return this.latest[v]=O,m=O.divider,d(g=O.units),function(e){return m!==O.divider&&(m=O.divider,d(g=O.units)),e/m}}return d(g),function(e){return e/m}}return"undefined"!==typeof s[o][i]?(m=s[o][i],d(i),function(e){return e/m}):(console.log("Units conversion from ".concat(o.toString()," to ").concat(i.toString(),"\n is not supported.")),d(o),u)}return"undefined"!==typeof c[o]?"auto"===i?(Object.keys(c[o]).forEach((function(e){f||c[o][e].check(a)&&(d(e),f=c[o][e].convert)})),f||(d(o),u)):"undefined"!==typeof c[o][i]?(d(i),c[o][i].convert):(console.log("Units conversion from ".concat(o.toString()," to ").concat(i.toString(),"\n is not supported.")),d(o),u):(console.log("Unmatched unit conversion method for units ".concat(o.toString())),d(o),u)}}},110:function(e,t,a){"use strict";var n=a(0),r=a.n(n),o=a(34),i=a.n(o),s=a(8),l=a(9),c=a(165),u=a(37),d=(a(340),n.forwardRef((function(e,t){var a=e.children,r=e.className,o=Object(u.a)(e,["children","className"]);return n.createElement("button",Object.assign({},o,{type:"button",className:i()("netdata-reset-button",r),ref:t}),a)})));a.d(t,"a",(function(){return h}));var h=function(e){var t=e.className,a=e.iconType,o=e.onClick,u=e.onDoubleClick,h=e.onMouseDown,p=e.onTouchStart,f=e.popoverContent,g=e.popoverTitle,m=Object(n.useRef)(null),b=Object(s.b)(l.A);return Object(n.useEffect)((function(){m.current&&b&&window.$(m.current).popover({container:"body",animation:!1,html:!0,trigger:"hover",placement:"bottom",delay:{show:window.NETDATA.options.current.show_help_delay_show_ms,hide:window.NETDATA.options.current.show_help_delay_hide_ms},title:g,content:f})}),[]),r.a.createElement(d,{className:i()(t),onClick:o,onDoubleClick:u,onMouseDown:h,onTouchStart:p,ref:m},r.a.createElement(c.a,{iconType:a}))}},114:function(e,t,a){"use strict";a.d(t,"a",(function(){return u})),a.d(t,"b",(function(){return d}));var n=a(5),r=a(0),o=a.n(r),i=a(110),s=a(18),l=a(56),c=a(8),u="chart_height.",d=function(e){var t=e.chartContainerElement,a=e.chartUuid,d=e.heightId,h=e.isLegendOnBottom,p=Object(r.useState)((function(){return t.clientHeight})),f=Object(n.a)(p,2),g=f[0],m=f[1],b=Object(c.a)();Object(r.useEffect)((function(){g>=70&&b(Object(s.i)({id:a,resizeHeight:g}))}),[g,a,d,b]);var v=Object(r.useCallback)((function(e){e.preventDefault();var a=t.clientHeight,n="touchstart"===e.type?e.touches[0].clientY:e.clientY,r=function(e){var r=a+e-n;if(t.style.height="".concat(r.toString(),"px"),m(r),d){var o=h?r-l.a:r;localStorage.setItem("".concat(u).concat(d),"".concat(o))}},o=function(e){return r(e.clientY)},i=function(e){return r(e.touches[0].clientY)};"touchstart"===e.type?(document.addEventListener("touchmove",i),document.addEventListener("touchend",(function e(){document.removeEventListener("touchmove",i),document.removeEventListener("touchend",e)}))):(document.addEventListener("mousemove",o),document.addEventListener("mouseup",(function e(){document.removeEventListener("mousemove",o),document.removeEventListener("mouseup",e)})))}),[t.clientHeight,t.style.height,d,h]);return o.a.createElement(i.a,{className:"netdata-legend-resize-handler",onDoubleClick:function(e){e.preventDefault(),e.stopPropagation()},onMouseDown:v,onTouchStart:v,iconType:"resize",popoverTitle:"Chart Resize",popoverContent:"Drag this point with your mouse or your finger (on touch devices), to resize the chart vertically. You can also <b>double click it</b> or <b>double tap it</b> to reset between 2 states: the default and the one that fits all the values.<br/><small>Help can be disabled from the settings.</small>"})}},12:function(e,t,a){"use strict";a.d(t,"h",(function(){return i})),a.d(t,"p",(function(){return s})),a.d(t,"o",(function(){return l})),a.d(t,"u",(function(){return c})),a.d(t,"s",(function(){return u})),a.d(t,"j",(function(){return d})),a.d(t,"q",(function(){return h})),a.d(t,"i",(function(){return p})),a.d(t,"r",(function(){return f})),a.d(t,"b",(function(){return g})),a.d(t,"d",(function(){return m})),a.d(t,"D",(function(){return b})),a.d(t,"f",(function(){return v})),a.d(t,"C",(function(){return _})),a.d(t,"a",(function(){return y})),a.d(t,"A",(function(){return O})),a.d(t,"e",(function(){return x})),a.d(t,"B",(function(){return w})),a.d(t,"v",(function(){return E})),a.d(t,"l",(function(){return S})),a.d(t,"g",(function(){return C})),a.d(t,"c",(function(){return A})),a.d(t,"w",(function(){return k})),a.d(t,"x",(function(){return j})),a.d(t,"n",(function(){return T})),a.d(t,"m",(function(){return D})),a.d(t,"t",(function(){return P})),a.d(t,"k",(function(){return M})),a.d(t,"y",(function(){return L})),a.d(t,"z",(function(){return I}));var n=a(22),r=a(92),o=a(26),i=Object(n.createAction)("".concat(o.e,"/globalRequestCommonColors")),s=Object(n.createAction)("".concat(o.e,"/setCommonMin")),l=Object(n.createAction)("".concat(o.e,"/setCommonMax")),c=Object(n.createAction)("".concat(o.e,"/setGlobalSelection")),u=Object(n.createAction)("".concat(o.e,"/setGlobalPanAndZoom")),d=Object(n.createAction)("".concat(o.e,"/resetGlobalPanAndZoomAction")),h=Object(n.createAction)("".concat(o.e,"/setDefaultAfterAction")),p=Object(n.createAction)("".concat(o.e,"/resetDefaultAfterAction")),f=Object(n.createAction)("".concat(o.e,"/setGlobalChartUnderlay")),g=Object(n.createAction)("".concat(o.e,"/centerAroundHighlightAction")),m=Object(n.createAction)("".concat(o.e,"/clearHighlightAction")),b=Object(n.createAction)("".concat(o.e,"/windowFocusChangeAction")),v=Object(r.a)("".concat(o.e,"/fetchHelloAction")),_=Object(n.createAction)("".concat(o.e,"/updatePersonUrlsAction")),y=Object(n.createAction)("".concat(o.e,"/accessRegistrySuccessAction")),O=Object(n.createAction)("".concat(o.e,"/startAlarmsAction")),x=Object(r.a)("".concat(o.e,"/fetchAllAlarmsAction")),w=Object(n.createAction)("".concat(o.e,"/updateActiveAlarmsAction")),E=Object(n.createAction)("".concat(o.e,"/setOptionAction")),S=Object(n.createAction)("".concat(o.e,"/resetOptions")),C=Object(n.createAction)("".concat(o.e,"/loadSnapshotAction")),A=Object(n.createAction)("".concat(o.e,"/chartsMetadataRequestSuccess")),k=Object(n.createAction)("".concat(o.e,"/setSpacePanelStatusAction")),j=Object(n.createAction)("".concat(o.e,"/setSpacePanelStatusAction")),T=Object(n.createAction)("".concat(o.e,"/setAlarmAction")),D=Object(n.createAction)("".concat(o.e,"/resetRegistry")),P=Object(n.createAction)("".concat(o.e,"/setGlobalPauseAction")),M=Object(n.createAction)("".concat(o.e,"/resetGlobalPauseAction")),L=Object(n.createAction)("".concat(o.e,"/setUTCOffset")),I=Object(n.createAction)("".concat(o.e,"/setUserNodeAccess"))},127:function(e,t,a){"use strict";a.d(t,"b",(function(){return l}));var n=a(0),r=a(27),o=a(74),i=a(12),s=a(188),l=function(){return Object(n.useCallback)((function(){Object(o.a)(s.a,{type:"request-refresh-access",payload:!0})}),[])};t.a=function(){var e=Object(r.d)();Object(o.b)("user-node-access",(function(t){e(Object(i.z)({message:t}))}))}},143:function(e,t,a){"use strict";var n=a(28);a.d(t,"a",(function(){return n.d}))},145:function(e,t,a){"use strict";a.d(t,"a",(function(){return n}));var n=function(e){return e.replace(/ /g,"_").replace(/:/g,"_").replace(/\(/g,"_").replace(/\)/g,"_").replace(/\./g,"_").replace(/\//g,"_")}},159:function(e,t,a){"use strict";a.d(t,"b",(function(){return b})),a.d(t,"a",(function(){return v}));var n=a(7),r=a(14),o=a(35),i=a(228),s=a(573),l=a(227),c=a(22),u=a(28),d=a(75),h=a(18),p=a(12),f=a(99),g=a(26),m={commonColorsKeys:{},commonMin:{},commonMax:{},currentSelectionMasterId:null,globalPanAndZoom:null,defaultAfter:d.b?Object(u.e)():-900,globalChartUnderlay:null,hoveredX:null,hasWindowFocus:document.hasFocus(),globalPause:!1,spacePanelIsActive:!1,spacePanelTransitionEndIsActive:!1,registry:{cloudBaseURL:null,hasFetchedInfo:!1,hasFetchedHello:!1,isHelloCallError:null,hostname:"unknown",isCloudEnabled:null,isCloudAvailable:null,isAgentClaimed:null,isACLKAvailable:null,hasStartedInfo:!1,isFetchingHello:!1,fullInfoPayload:null,machineGuid:null,personGuid:null,registryMachines:null,registryMachinesArray:null,registryServer:null},snapshot:null,alarms:{activeAlarms:null,hasStartedAlarms:!1},alarm:null,chartsMetadata:{isFetching:!1,isFetchingError:!1,data:null},options:f.e,userNodeAccess:null},b=Object(c.createReducer)({},m),v=function(e){var t=e.colorsAttribute,a=e.commonColorsAttribute,n=e.chartUuid,r=e.chartContext,o="string"===typeof t&&t.length>0;return a||(o?n:r)},_=function(e){return"ONLY"===Object(i.a)(e)},y=function(e,t){var a,n=t?(a=e.split(" "),_(a)?Object(s.a)(a):a):[],r=!t||!_(e.split(" "));return{assigned:{},available:[].concat(Object(o.a)(n),Object(o.a)(r||0===n.length?window.NETDATA.themes.current.colors:[])),custom:n}};b.on(p.h,(function(e,t){var a=t.chartContext,o=t.chartUuid,i=t.colorsAttribute,s=t.commonColorsAttribute,c=t.dimensionNames,u=v({colorsAttribute:i,commonColorsAttribute:s,chartUuid:o,chartContext:a}),d="string"===typeof i&&i.length>0,h=e.commonColorsKeys[u]||y(i,d),p=Object.keys(h.assigned).length,f=Object(l.a)(c.filter((function(e){return!h.assigned[e]})).map((function(e,t){return Object(r.a)({},e,h.available[(t+p)%h.available.length])}))),g=Object(n.a)({},h.assigned,{},f);return Object(n.a)({},e,{commonColorsKeys:Object(n.a)({},e.commonColorsKeys,Object(r.a)({},u,Object(n.a)({},h,{assigned:g})))})})),b.on(p.p,(function(e,t){var a,i=t.chartUuid,s=t.commonMinKey,l=t.value,c=Object(n.a)({},null===(a=e.commonMin[s])||void 0===a?void 0:a.charts,Object(r.a)({},i,l)),u=Math.min.apply(Math,Object(o.a)(Object.values(c)));return Object(n.a)({},e,{commonMin:Object(n.a)({},e.commonMin,Object(r.a)({},s,{charts:c,currentExtreme:u}))})})),b.on(p.o,(function(e,t){var a,i=t.chartUuid,s=t.commonMaxKey,l=t.value,c=Object(n.a)({},null===(a=e.commonMax[s])||void 0===a?void 0:a.charts,Object(r.a)({},i,l)),u=Math.max.apply(Math,Object(o.a)(Object.values(c)));return Object(n.a)({},e,{commonMax:Object(n.a)({},e.commonMax,Object(r.a)({},s,{charts:c,currentExtreme:u}))})})),b.on(p.w,(function(e,t){var a=t.isActive;return Object(n.a)({},e,{spacePanelIsActive:a})})),b.on(p.x,(function(e,t){var a=t.isActive;return Object(n.a)({},e,{spacePanelTransitionEndIsActive:a})})),b.on(p.u,(function(e,t){var a=t.chartUuid,r=t.hoveredX;return Object(n.a)({},e,{hoveredX:r,currentSelectionMasterId:a})})),b.on(p.s,(function(e,t){return Object(n.a)({},e,{globalPanAndZoom:t})})),b.on(p.j,(function(e){return Object(n.a)({},e,{globalPanAndZoom:m.globalPanAndZoom,hoveredX:m.hoveredX})})),b.on(p.q,(function(e,t){var a=t.after;return Object(n.a)({},e,{defaultAfter:a})})),b.on(p.i,(function(e){return Object(n.a)({},e,{defaultAfter:m.defaultAfter})})),b.on(p.r,(function(e,t){var a=t.after,r=t.before,o=t.masterID;return Object(n.a)({},e,{globalChartUnderlay:{after:a,before:r,masterID:o}})})),b.on(p.b,(function(e){if(!e.globalChartUnderlay)return console.warn("Cannot center around empty selection"),e;var t=e.globalChartUnderlay,a=t.after,r=t.before,o=(r-a)/2;return Object(n.a)({},e,{globalPanAndZoom:{after:a-o,before:r+o}})})),b.on(p.d,(function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},a=t.resetPanAndZoom,r=void 0===a||a;return Object(n.a)({},e,{globalChartUnderlay:m.globalChartUnderlay},r?{globalPanAndZoom:m.globalPanAndZoom}:{})})),b.on(p.D,(function(e,t){var a=t.hasWindowFocus,r=document.hasFocus();return Object(n.a)({},e,{hasWindowFocus:r||a})})),b.on(p.t,(function(e){return Object(n.a)({},e,{globalPause:!0})})),b.on(p.k,(function(e,t){var a=t.forcePlay;return Object(n.a)({},e,{globalPause:m.globalPause,globalPanAndZoom:m.globalPanAndZoom,hoveredX:m.hoveredX,options:Object(n.a)({},e.options,{stop_updates_when_focus_is_lost:!a})})})),b.on(p.y,(function(e,t){var a=t.utcOffset;return Object(n.a)({},e,{options:Object(n.a)({},e.options,{utcOffset:a})})})),b.on(p.f.request,(function(e){return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{isFetchingHello:!0})})})),b.on(p.f.success,(function(e,t){var a=t.cloudBaseURL,r=t.hostname,o=t.machineGuid;return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{cloudBaseURL:a,isFetchingHello:!1,hasFetchedHello:!0,hostname:r,machineGuid:o})})})),b.on(p.f.failure,(function(e){return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{cloudBaseURL:g.a,isFetchingHello:!1,isHelloCallError:!0})})})),b.on(p.a,(function(e,t){var a=t.registryServer;return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{registryServer:a})})})),b.on(p.m,(function(e){return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{hasFetchedHello:m.registry.hasFetchedHello})})})),b.on(h.f,(function(e){return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{hasStartedInfo:!0})})})),b.on(h.f.success,(function(e,t){var a=t.isCloudAvailable,r=t.isCloudEnabled,o=t.isAgentClaimed,i=t.isACLKAvailable,s=t.fullInfoPayload;return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{hasFetchedInfo:!0,isCloudAvailable:a,isCloudEnabled:r,isAgentClaimed:o,isACLKAvailable:i,fullInfoPayload:s})})})),b.on(h.f.failure,(function(e){return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{isCloudAvailable:!1,isCloudEnabled:!1,isAgentClaimed:!1,isACLKAvailable:!1})})})),b.on(p.C,(function(e,t){var a=t.personGuid,r=t.registryMachines,o=t.registryMachinesArray;return Object(n.a)({},e,{registry:Object(n.a)({},e.registry,{personGuid:a,registryMachines:r,registryMachinesArray:o})})})),b.on(p.A,(function(e){return Object(n.a)({},e,{alarms:Object(n.a)({},e.alarms,{hasStartedAlarms:!0})})})),b.on(p.B,(function(e,t){var a=t.activeAlarms;return Object(n.a)({},e,{alarms:Object(n.a)({},e.alarms,{activeAlarms:a})})})),b.on(p.v,(function(e,t){var a=t.key,o=t.value;return Object(n.a)({},e,{options:Object(n.a)({},e.options,Object(r.a)({},a,o))})})),b.on(p.l,(function(e){return Object(f.b)(),Object(n.a)({},e,{options:Object(f.c)()})})),b.on(p.g,(function(e,t){var a=t.snapshot,o=Object.keys(a.data).map((function(e){var t,n;try{if(null===(t=a.uncompress(a.data[e])))return console.warn("uncompressed snapshot data for key ".concat(e," is null")),null;if("undefined"===typeof t)return console.warn("uncompressed snapshot data for key ".concat(e," is undefined")),null}catch(o){console.warn("decompression of snapshot data for key ".concat(e," failed"),o),t=null}if("string"!==typeof t)return console.warn("uncompressed snapshot data for key ".concat(e," is not string")),{};try{n=JSON.parse(t)}catch(o){return console.warn("parsing snapshot data for key ".concat(e," failed")),{}}return Object(r.a)({},e,n)})).reduce((function(e,t){return Object(n.a)({},e,{},t)}),{});return Object(n.a)({},e,{snapshot:Object(n.a)({},a,{data:o})})})),b.on(p.n,(function(e,t){var a=t.alarm;return Object(n.a)({},e,{alarm:a})})),b.on(p.c,(function(e,t){var a=t.data;return Object(n.a)({},e,{chartsMetadata:Object(n.a)({},e.chartsMetadata,{data:a})})})),b.on(p.z,(function(e,t){var a=t.message;return Object(n.a)({},e,{userNodeAccess:a})}))},165:function(e,t,a){"use strict";a.d(t,"a",(function(){return l}));var n=a(0),r=a.n(n),o=a(34),i=a.n(o),s=function(e){return{left:"fa-backward",reset:"fa-play",right:"fa-forward",zoomIn:"fa-plus",zoomOut:"fa-minus",resize:"fa-sort",lineChart:"fa-chart-line",areaChart:"fa-chart-area",noChart:"fa-chart-area",loading:"fa-sync-alt",noData:"fa-exclamation-triangle"}[e]},l=function(e){var t=e.iconType;return r.a.createElement("i",{className:i()("fas",s(t))})}},167:function(e,t,a){"use strict";a.d(t,"b",(function(){return f})),a.d(t,"a",(function(){return g}));var n=a(14),r=a(7),o=a(199),i=a(574),s=a(575),l=a(310),c=a(22),u=a(12),d=a(99),h=a(28),p=a(18),f={chartData:null,chartId:null,chartMetadata:null,chartPanAndZoom:null,fetchDataParams:{isRemotelyControlled:!1,viewRange:null},isFetchingData:!1,isFetchDataFailure:!1,isFetchDetailsFailure:!1,isFetchingDetails:!1,resizeHeight:null,snapshotDataIsFetching:!1,snapshotDataIsError:!1,snapshotData:null,viewRange:null},g=Object(c.createReducer)({},{}),m=function(e,t){return e[t]||f};g.on(p.c.request,(function(e,t){var a=t.chart,o=t.fetchDataParams,i=t.id;return Object(r.a)({},e,Object(n.a)({},i,Object(r.a)({},m(e,i),{chartId:a,isFetchingData:!0,viewRange:o.viewRange})))})),g.on(p.d,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{isFetchingData:!1})))})),g.on(p.c.failure,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{isFetchingData:!1,isFetchDataFailure:!0})))})),g.on(p.c.success,(function(e,t){var a=t.id,o=t.chartData,i=t.fetchDataParams,s=m(e,a);return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},s,{chartData:Object(h.g)(["dimension_names"],s.chartData,o),fetchDataParams:i,isFetchingData:!1,isFetchDataFailure:!1,viewRange:i.viewRange})))})),g.on(p.e.request,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{snapshotDataIsFetching:!0})))})),g.on(p.e.failure,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{snapshotDataIsFetching:!1,snapshotDataIsError:!0})))})),g.on(p.e.success,(function(e,t){var a=t.id,o=t.snapshotData;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{snapshotDataIsFetching:!1,snapshotDataIsError:!1,snapshotData:o})))})),g.on(p.j,(function(e){return Object(o.a)((function(e){return Object(r.a)({},e,{},Object(i.a)(["snapshotDataIsFetching","snapshotDataIsError","snapshotData"],f))}),e)})),g.on(p.b.request,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{isFetchingDetails:!0})))})),g.on(p.b.failure,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{isFetchDetailsFailure:!0})))})),g.on(p.b.success,(function(e,t){var a=t.id,o=t.chartMetadata;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{chartMetadata:o,isFetchingDetails:!1,isFetchDetailsFailure:!1})))})),g.on(p.i,(function(e,t){var a=t.id,o=t.resizeHeight;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{resizeHeight:o})))})),g.on(p.h,(function(e,t){var a=t.after,o=t.before,i=t.id,s=t.shouldForceTimeRange;return Object(r.a)({},e,Object(n.a)({},i,Object(r.a)({},m(e,i),{chartPanAndZoom:{after:a,before:o,shouldForceTimeRange:s}})))})),g.on(p.g,(function(e,t){var a=t.id;return Object(r.a)({},e,Object(n.a)({},a,Object(r.a)({},m(e,a),{chartPanAndZoom:f.chartPanAndZoom})))})),g.on(u.v,(function(e,t){var a=t.key,n=t.value;return a===d.a&&!0===n?Object(o.a)(Object(s.a)("chartPanAndZoom",f.chartPanAndZoom),e):e})),g.on(p.a,(function(e,t){var a=t.id;return Object(l.a)([a],e)}))},177:function(e,t,a){"use strict";a.d(t,"b",(function(){return d})),a.d(t,"a",(function(){return h}));var n=a(7),r=a(14),o=a(576),i=a(227),s=a(579),l=a(99),c=function(e,t,a){var n="data-".concat(t);if(e.hasAttribute(n)){var r=e.getAttribute(n);return"true"===r||"false"!==r&&("null"===r?null:r==="".concat(+r)?+r:/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/.test(r)?JSON.parse(r):r)}return a},u=function(e){return Object(o.a)((function(t){return"boolean"===t.type?function(e,t,a){var n=c(e,t,a);return!0===n||!1===n?n:"string"===typeof n?"yes"===n||"on"===n||""!==n&&"no"!==n&&"off"!==n&&"null"!==n&&a:"number"===typeof n?0!==n:a}(e,t.key,t.defaultValue):c(e,t.key,t.defaultValue)}),{id:{key:"netdata"},host:{key:"host"},httpMethod:{key:"http-method"},title:{key:"title"},chartLibrary:{key:"chart-library",defaultValue:window.NETDATA.chartDefaults.library},width:{key:"width",defaultValue:window.NETDATA.chartDefaults.width},height:{key:"height",defaultValue:window.NETDATA.chartDefaults.height},after:{key:"after"},before:{key:"before",defaultValue:window.NETDATA.chartDefaults.before},legend:{key:"legend",type:"boolean",defaultValue:!0},legendPosition:{key:"legend-position"},units:{key:"units"},unitsCommon:{key:"common-units"},unitsDesired:{key:"desired-units"},aggrMethod:{key:"aggr-method"},labels:{key:"labels"},postGroupBy:{key:"post-group-by"},postAggregationMethod:{key:"post-aggregation-method"},dimensionsAggrMethod:{key:"dimensions-aggr-method"},aggrGroups:{key:"aggrGroups"},selectedChart:{key:"selected-chart"},filteredRows:{key:"filtered-rows"},groupBy:{key:"group-by"},nodeIDs:{key:"node-ids"},colors:{key:"colors"},commonColors:{key:"common-colors"},decimalDigits:{key:"decimal-digits"},dimensions:{key:"dimensions"},selectedDimensions:{key:"selected-dimensions"},forceTimeWindow:{key:"force-time-window"},appendOptions:{key:"append-options"},gtime:{key:"gtime"},method:{key:"method"},overrideOptions:{key:"override-options"},pixelsPerPoint:{key:"pixels-per-point"},points:{key:"points"},heightId:{key:"id"},hideResizeHandler:{key:"hide-resize-handler"},detectResize:{key:"detect-resize"},commonMin:{key:"common-min"},commonMax:{key:"common-max"},dygraphType:{key:"dygraph-type"},dygraphValueRange:{key:"dygraph-valuerange"},dygraphTheme:{key:"dygraph-theme"},dygraphSmooth:{key:"dygraph-smooth",type:"boolean"},dygraphColors:{key:"dygraph-colors"},dygraphRightGap:{key:"dygraph-rightgap"},dygraphShowRangeSelector:{key:"dygraph-showrangeselector",type:"boolean"},dygraphShowRoller:{key:"dygraph-showroller",type:"boolean"},dygraphTitle:{key:"dygraph-title"},dygraphTitleHeight:{key:"dygraph-titleheight"},dygraphLegend:{key:"dygraph-legend"},dygraphLabelsDiv:{key:"dygraph-labelsdiv"},dygraphLabelsSeparateLine:{key:"dygraph-labelsseparatelines",type:"boolean"},dygraphIncludeZero:{key:"dygraph-includezero",type:"boolean"},dygraphShowZeroValues:{key:"dygraph-labelsshowzerovalues",type:"boolean"},dygraphShowLabelsOnHighLight:{key:"dygraph-showlabelsonhighlight",type:"boolean"},dygraphHideOverlayOnMouseOut:{key:"dygraph-hideoverlayonmouseout",type:"boolean"},dygraphXRangePad:{key:"dygraph-xrangepad"},dygraphYRangePad:{key:"dygraph-yrangepad"},dygraphYLabelWidth:{key:"dygraph-ylabelwidth"},dygraphStrokeWidth:{key:"dygraph-strokewidth"},dygraphStrokePattern:{key:"dygraph-strokepattern"},dygraphDrawPoints:{key:"dygraph-drawpoints",type:"boolean"},dygraphDrawGapEdgePoints:{key:"dygraph-drawgapedgepoints",type:"boolean"},dygraphConnectSeparatedPoints:{key:"dygraph-connectseparatedpoints",type:"boolean"},dygraphPointSize:{key:"dygraph-pointsize"},dygraphStepPlot:{key:"dygraph-stepplot",type:"boolean"},dygraphStrokeBorderColor:{key:"dygraph-strokebordercolor"},dygraphStrokeBorderWidth:{key:"dygraph-strokeborderwidth"},dygraphFillGraph:{key:"dygraph-fillgraph",type:"boolean"},dygraphFillAlpha:{key:"dygraph-fillalpha"},dygraphStackedGraph:{key:"dygraph-stackedgraph",type:"boolean"},dygraphStackedGraphNanFill:{key:"dygraph-stackedgraphnanfill"},dygraphAxisLabelFontSize:{key:"dygraph-axislabelfontsize"},dygraphAxisLineColor:{key:"dygraph-axislinecolor"},dygraphAxisLineWidth:{key:"dygraph-axislinewidth"},dygraphDrawGrid:{key:"dygraph-drawgrid",type:"boolean"},dygraphGridLinePattern:{key:"dygraph-gridlinepattern"},dygraphGridLineWidth:{key:"dygraph-gridlinewidth"},dygraphGridLineColor:{key:"dygraph-gridlinecolor"},dygraphMaxNumberWidth:{key:"dygraph-maxnumberwidth"},dygraphSigFigs:{key:"dygraph-sigfigs"},dygraphDigitsAfterDecimal:{key:"dygraph-digitsafterdecimal"},dygraphHighlighCircleSize:{key:"dygraph-highlightcirclesize"},dygraphHighlightSeriesOpts:{key:"dygraph-highlightseriesopts"},dygraphHighlightSeriesBackgroundAlpha:{key:"dygraph-highlightseriesbackgroundalpha"},dygraphXPixelsPerLabel:{key:"dygraph-xpixelsperlabel"},dygraphXAxisLabelWidth:{key:"dygraph-xaxislabelwidth"},dygraphDrawXAxis:{key:"dygraph-drawxaxis",type:"boolean"},dygraphYPixelsPerLabel:{key:"dygraph-ypixelsperlabel"},dygraphYAxisLabelWidth:{key:"dygraph-yaxislabelwidth"},dygraphDrawYAxis:{key:"dygraph-drawyaxis",type:"boolean"},dygraphDrawAxis:{key:"dygraph-drawaxis",type:"boolean"},easyPieChartMinValue:{key:"easypiechart-min-value"},easyPieChartMaxValue:{key:"easypiechart-max-value"},easyPieChartBarColor:{key:"easypiechart-barcolor"},easyPieChartTrackColor:{key:"easypiechart-trackcolor"},easyPieChartScaleColor:{key:"easypiechart-scalecolor"},easyPieChartScaleLength:{key:"easypiechart-scalelength"},easyPieChartLineCap:{key:"easypiechart-linecap"},easyPieChartLineWidth:{key:"easypiechart-linewidth"},easyPieChartTrackWidth:{key:"easypiechart-trackwidth"},easyPieChartSize:{key:"easypiechart-size"},easyPieChartRotate:{key:"easypiechart-rotate"},easyPieChartAnimate:{key:"easypiechart-animate"},easyPieChartEasing:{key:"easypiechart-easing"},gaugeMinValue:{key:"gauge-min-value"},gaugeMaxValue:{key:"gauge-max-value"},gaugePointerColor:{key:"gauge-pointer-color"},gaugeStrokeColor:{key:"gauge-stroke-color"},gaugeStartColor:{key:"gauge-start-color"},gaugeStopColor:{key:"gauge-stop-color"},gaugeGenerateGradient:{key:"gauge-generate-gradient"},sparklineType:{key:"sparkline-type"},sparklineLineColor:{key:"sparkline-linecolor"},sparklineFillColor:{key:"sparkline-fillcolor"},sparklineChartRangeMin:{key:"sparkline-chartrangemin"},sparklineChartRangeMax:{key:"sparkline-chartrangemax"},sparklineComposite:{key:"sparkline-composite"},sparklineEnableTagOptions:{key:"sparkline-enabletagoptions"},sparklineTagOptionPrefix:{key:"sparkline-tagoptionprefix"},sparklineTagValuesAttribute:{key:"sparkline-tagvaluesattribute"},sparklineDisableHiddenCheck:{key:"sparkline-disablehiddencheck"},sparklineDefaultPixelsPerValue:{key:"sparkline-defaultpixelspervalue"},sparklineSpotColor:{key:"sparkline-spotcolor"},sparklineMinSpotColor:{key:"sparkline-minspotcolor"},sparklineMaxSpotColor:{key:"sparkline-maxspotcolor"},sparklineSpotRadius:{key:"sparkline-spotradius"},sparklineValueSpots:{key:"sparkline-valuespots"},sparklineHighlightSpotColor:{key:"sparkline-highlightspotcolor"},sparklineHighlightLineColor:{key:"sparkline-highlightlinecolor"},sparklineLineWidth:{key:"sparkline-linewidth"},sparklineNormalRangeMin:{key:"sparkline-normalrangemin"},sparklineNormalRangeMax:{key:"sparkline-normalrangemax"},sparklineDrawNormalOnTop:{key:"sparkline-drawnormalontop"},sparklineXvalues:{key:"sparkline-xvalues"},sparklineChartRangeClip:{key:"sparkline-chartrangeclip"},sparklineChartRangeMinX:{key:"sparkline-chartrangeminx"},sparklineChartRangeMaxX:{key:"sparkline-chartrangemaxx"},sparklineDisableInteraction:{key:"sparkline-disableinteraction",type:"boolean"},sparklineDisableTooltips:{key:"sparkline-disabletooltips",type:"boolean"},sparklineOnHover:{key:"sparkline-on-hover"},sparklineDisableHighlight:{key:"sparkline-disablehighlight",type:"boolean"},sparklineHighlightLighten:{key:"sparkline-highlightlighten"},sparklineHighlightColor:{key:"sparkline-highlightcolor"},sparklineTooltipContainer:{key:"sparkline-tooltipcontainer"},sparklineTooltipClassname:{key:"sparkline-tooltipclassname"},sparklineTooltipFormat:{key:"sparkline-tooltipformat"},sparklineTooltipPrefix:{key:"sparkline-tooltipprefix"},sparklineTooltipSuffix:{key:"sparkline-tooltipsuffix"},sparklineTooltipSkipNull:{key:"sparkline-tooltipskipnull",type:"boolean"},sparklineTooltipValueLookups:{key:"sparkline-tooltipvaluelookups"},sparklineTooltipFormatFieldlist:{key:"sparkline-tooltipformatfieldlist"},sparklineTooltipFormatFieldlistKey:{key:"sparkline-tooltipformatfieldlistkey"},sparklineNumberFormatter:{key:"sparkline-numberformatter"},sparklineNumberDigitGroupSep:{key:"sparkline-numberdigitgroupsep"},sparklineNumberDecimalMark:{key:"sparkline-numberdecimalmark"},sparklineNumberDigitGroupCount:{key:"sparkline-numberdigitgroupcount"},sparklineAnimatedZooms:{key:"sparkline-animatedzooms",type:"boolean"},d3pieTitle:{key:"d3pie-title"},d3pieSubtitle:{key:"d3pie-subtitle"},d3pieFooter:{key:"d3pie-footer"},d3pieTitleColor:{key:"d3pie-title-color"},d3pieTitleFontsize:{key:"d3pie-title-fontsize"},d3pieTitleFontweight:{key:"d3pie-title-fontweight"},d3pieTitleFont:{key:"d3pie-title-font"},d3PieSubtitleColor:{key:"d3pie-subtitle-color"},d3PieSubtitleFontsize:{key:"d3pie-subtitle-fontsize"},d3PieSubtitleFontweight:{key:"d3pie-subtitle-fontweight"},d3PieSubtitleFont:{key:"d3pie-subtitle-font"},d3PieFooterColor:{key:"d3pie-footer-color"},d3PieFooterFontsize:{key:"d3pie-footer-fontsize"},d3PieFooterFontweight:{key:"d3pie-footer-fontweight"},d3PieFooterFont:{key:"d3pie-footer-font"},d3PieFooterLocation:{key:"d3pie-footer-location"},d3PiePieinnerradius:{key:"d3pie-pieinnerradius"},d3PiePieouterradius:{key:"d3pie-pieouterradius"},d3PieSortorder:{key:"d3pie-sortorder"},d3PieSmallsegmentgroupingEnabled:{key:"d3pie-smallsegmentgrouping-enabled",type:"boolean"},d3PieSmallsegmentgroupingValue:{key:"d3pie-smallsegmentgrouping-value"},d3PieSmallsegmentgroupingValuetype:{key:"d3pie-smallsegmentgrouping-valuetype"},d3PieSmallsegmentgroupingLabel:{key:"d3pie-smallsegmentgrouping-label"},d3PieSmallsegmentgroupingColor:{key:"d3pie-smallsegmentgrouping-color"},d3PieLabelsOuterFormat:{key:"d3pie-labels-outer-format"},d3PieLabelsOuterHidewhenlessthanpercentage:{key:"d3pie-labels-outer-hidewhenlessthanpercentage"},d3PieLabelsOuterPiedistance:{key:"d3pie-labels-outer-piedistance"},d3PieLabelsInnerFormat:{key:"d3pie-labels-inner-format"},d3PieLabelsInnerHidewhenlessthanpercentage:{key:"d3pie-labels-inner-hidewhenlessthanpercentage"},d3PieLabelsMainLabelColor:{key:"d3pie-labels-mainLabel-color"},d3PieLabelsMainLabelFont:{key:"d3pie-labels-mainLabel-font"},d3PieLabelsMainLabelFontsize:{key:"d3pie-labels-mainLabel-fontsize"},d3PieLabelsMainLabelFontweight:{key:"d3pie-labels-mainLabel-fontweight"},d3PieLabelsPercentageColor:{key:"d3pie-labels-percentage-color"},d3PieLabelsPercentageFont:{key:"d3pie-labels-percentage-font"},d3PieLabelsPercentageFontsize:{key:"d3pie-labels-percentage-fontsize"},d3PieLabelsPercentageFontweight:{key:"d3pie-labels-percentage-fontweight"},d3PieLabelsValueColor:{key:"d3pie-labels-value-color"},d3PieLabelsValueFont:{key:"d3pie-labels-value-font"},d3PieLabelsValueFontsize:{key:"d3pie-labels-value-fontsize"},d3PieLabelsValueFontweight:{key:"d3pie-labels-value-fontweight"},d3PieLabelsLinesEnabled:{key:"d3pie-labels-lines-enabled",type:"boolean"},d3PieLabelsLinesStyle:{key:"d3pie-labels-lines-style"},d3PieLabelsLinesColor:{key:"d3pie-labels-lines-color"},d3PieLabelsTruncationEnabled:{key:"d3pie-labels-truncation-enabled",type:"boolean"},d3PieLabelsTruncationTruncatelength:{key:"d3pie-labels-truncation-truncatelength"},d3PieMiscColorsSegmentstroke:{key:"d3pie-misc-colors-segmentstroke"},d3PieMiscGradientEnabled:{key:"d3pie-misc-gradient-enabled",type:"boolean"},d3PieMiscColorsPercentage:{key:"d3pie-misc-colors-percentage"},d3PieMiscGradientColor:{key:"d3pie-misc-gradient-color"},d3PieCssprefix:{key:"d3pie-cssprefix"},peityStrokeWidth:{key:"peity-strokewidth"},textOnlyDecimalPlaces:{key:"textonly-decimal-places"},textOnlyPrefix:{key:"textonly-prefix"},textOnlySuffix:{key:"textonly-suffix"}})},d=function(e){var t=u(e),a=function(e){var t=Array.from(e.attributes).filter((function(e){return e.name.startsWith("data-show-value-of")})).map((function(e){return Object(r.a)({},e.name.replace("data-",""),e.value)})),a=Object(i.a)(t);return Object(s.a)(a)?void 0:a}(e);return Object(n.a)({},t,{showValueOf:a})},h={legendPosition:l.d?"right":"bottom"}},18:function(e,t,a){"use strict";a.d(t,"c",(function(){return i})),a.d(t,"d",(function(){return s})),a.d(t,"e",(function(){return l})),a.d(t,"j",(function(){return c})),a.d(t,"b",(function(){return u})),a.d(t,"i",(function(){return d})),a.d(t,"h",(function(){return h})),a.d(t,"g",(function(){return p})),a.d(t,"a",(function(){return f})),a.d(t,"f",(function(){return g}));var n=a(22),r=a(92),o=a(45),i=(Object(n.createAction)("".concat(o.c,"/updateChartData")),Object(n.createAction)("".concat(o.c,"/updateChartMetadata")),Object(r.a)("".concat(o.c,"/fetchDataAction"))),s=Object(n.createAction)("".concat(o.c,"/fetchDataCancelAction")),l=Object(r.a)("".concat(o.c,"/fetchDataForSnapshotAction")),c=Object(r.a)("".concat(o.c,"/snapshotExportResetAction")),u=Object(r.a)("".concat(o.c,"/fetchChartAction")),d=Object(n.createAction)("".concat(o.c,"/setResizeHeight")),h=Object(n.createAction)("".concat(o.c,"/setChartPanAndZoom")),p=Object(n.createAction)("".concat(o.c,"/resetChartPanAndZoomAction")),f=Object(n.createAction)("".concat(o.c,"/clearChartStateAction")),g=Object(r.a)("".concat(o.c,"/fetchInfoAction"))},188:function(e,t,a){"use strict";a.d(t,"a",(function(){return n}));var n="sign_in_iframe"},225:function(module,__webpack_exports__,__webpack_require__){"use strict";__webpack_require__.d(__webpack_exports__,"b",(function(){return updateLocaleFunctions})),__webpack_require__.d(__webpack_exports__,"a",(function(){return netdataCallback}));var ramda__WEBPACK_IMPORTED_MODULE_0__=__webpack_require__(577),ramda__WEBPACK_IMPORTED_MODULE_1__=__webpack_require__(149),_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__=__webpack_require__(12),_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__=__webpack_require__(9),_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__=__webpack_require__(46),_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__=__webpack_require__(102),_domains_dashboard_actions__WEBPACK_IMPORTED_MODULE_6__=__webpack_require__(44),_domains_chart_actions__WEBPACK_IMPORTED_MODULE_7__=__webpack_require__(18),_domains_chart_selectors__WEBPACK_IMPORTED_MODULE_8__=__webpack_require__(39),_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__=__webpack_require__(36),_utils_name_2_id__WEBPACK_IMPORTED_MODULE_10__=__webpack_require__(145),_utils_date_time__WEBPACK_IMPORTED_MODULE_11__=__webpack_require__(42),_utils__WEBPACK_IMPORTED_MODULE_12__=__webpack_require__(143),utils_hash_utils__WEBPACK_IMPORTED_MODULE_13__=__webpack_require__(64),_utils_is_demo__WEBPACK_IMPORTED_MODULE_14__=__webpack_require__(83),_domains_chart_utils_legend_utils__WEBPACK_IMPORTED_MODULE_15__=__webpack_require__(56),_domains_chart_utils_transformDataAttributes__WEBPACK_IMPORTED_MODULE_16__=__webpack_require__(177),_getHashParam,_getHashParam2,localeDateString,localeTimeString,updateLocaleFunctions=function(e){var t=e.localeDateString,a=e.localeTimeString;localeDateString=t,localeTimeString=a},netdataShowAlarms=!0,netdataRegistry=!0,netdataServer=void 0,netdataServerStatic=void 0,netdataCheckXSS=void 0,reduxStore;function escapeUserInputHTML(e){return e.toString().replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,""").replace(/#/g,"#").replace(/'/g,"'").replace(/\(/g,"(").replace(/\)/g,")").replace(/\//g,"/")}var setOption=function(e,t){reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.v)({key:e,value:t}))},getFromRegistry=function(e){var t=Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.x)(reduxStore.getState());return null===t||void 0===t?void 0:t[e]};function verifyURL(e){return"string"===typeof e&&(e.startsWith("http://")||e.startsWith("https://"))?e.replace(/'/g,"%22").replace(/"/g,"%27").replace(/\)/g,"%28").replace(/\(/g,"%29"):(console.log("invalid URL detected:"),console.log(e),'javascript:alert("invalid url");')}window.urlOptions={hash:"#",theme:null,help:null,mode:"live",update_always:!1,pan_and_zoom:!1,server:null,after:null!==(_getHashParam=Object(utils_hash_utils__WEBPACK_IMPORTED_MODULE_13__.a)("after"))&&void 0!==_getHashParam?_getHashParam:0,before:null!==(_getHashParam2=Object(utils_hash_utils__WEBPACK_IMPORTED_MODULE_13__.a)("before"))&&void 0!==_getHashParam2?_getHashParam2:0,highlight:!1,highlight_after:0,highlight_before:0,nowelcome:!1,show_alarms:!1,chart:null,family:null,alarm:null,utc:null,hasProperty:function(e){return"undefined"!==typeof this[e]},genHash:function(e){var t=urlOptions.hash;return t+=";after="+urlOptions.after.toString()+";before="+urlOptions.before.toString(),!0===urlOptions.highlight&&(t+=";highlight_after="+urlOptions.highlight_after.toString()+";highlight_before="+urlOptions.highlight_before.toString()),null!==urlOptions.theme&&(t+=";theme="+urlOptions.theme.toString()),null!==urlOptions.help&&(t+=";help="+urlOptions.help.toString()),!0===urlOptions.update_always&&(t+=";update_always=true"),!0===e&&null!==urlOptions.server&&(t+=";server="+urlOptions.server.toString()),"live"!==urlOptions.mode&&(t+=";mode="+urlOptions.mode),null!==urlOptions.utc&&(t+=";utc="+urlOptions.utc),t},parseHash:function(){for(var e=document.location.hash.split(";"),t=e.length;t--;)if(0!==t){var a=e[t].split("=");urlOptions.hasProperty(a[0])&&"undefined"!==typeof a[1]&&(urlOptions[a[0]]=decodeURIComponent(a[1]))}else e[t].length>0&&(urlOptions.hash=e[t]);var n=["nowelcome","show_alarms","update_always"];for(t=n.length;t--;)"true"===urlOptions[n[t]]||!0===urlOptions[n[t]]||"1"===urlOptions[n[t]]||1===urlOptions[n[t]]?urlOptions[n[t]]=!0:urlOptions[n[t]]=!1;var r=["after","before","highlight_after","highlight_before"];for(t=r.length;t--;)if("string"===typeof urlOptions[r[t]])try{urlOptions[r[t]]=parseInt(urlOptions[r[t]])}catch(i){console.log("failed to parse URL hash parameter "+r[t]),urlOptions[r[t]]=0}switch(null!==urlOptions.server&&""!==urlOptions.server?(netdataServerStatic=document.location.origin.toString()+document.location.pathname.toString(),netdataServer=urlOptions.server,netdataCheckXSS=!0):urlOptions.server=null,urlOptions.before>0&&urlOptions.after>0?(urlOptions.pan_and_zoom=!0,urlOptions.nowelcome=!0):urlOptions.pan_and_zoom=!1,urlOptions.highlight_before>0&&urlOptions.highlight_after>0?urlOptions.highlight=!0:urlOptions.highlight=!1,urlOptions.mode){case"print":if(urlOptions.theme="white",urlOptions.welcome=!1,urlOptions.help=!1,urlOptions.show_alarms=!1,!1===urlOptions.pan_and_zoom){urlOptions.pan_and_zoom=!0,urlOptions.before=Date.now();var o=urlOptions.after?1e3*urlOptions.after:-6e5;urlOptions.after=urlOptions.before+o}netdataShowAlarms=!1,netdataRegistry=!1;break;case"live":default:urlOptions.mode="live"}},hashUpdate:function(){history.replaceState(null,"",urlOptions.genHash(!0))},netdataPanAndZoomCallback:function(e,t,a){null===netdataSnapshotData&&(urlOptions.pan_and_zoom=e,urlOptions.after=t,urlOptions.before=a)},updateUtcParam:function(e){e&&(urlOptions.utc=e,urlOptions.hashUpdate())},netdataHighlightCallback:function(e,t,a){if(!0===e&&(null===t||null===a||t<=0||a<=0||t>=a)&&(e=!1,t=0,a=0),null===window.netdataSnapshotData?urlOptions.highlight=e:urlOptions.highlight=!1,urlOptions.highlight_after=Math.round(t),urlOptions.highlight_before=Math.round(a),urlOptions.hashUpdate(),!0===e&&t>0&&a>0&&t<a){var n=localeDateString(t),r=localeDateString(a);n===r&&(r=""),document.getElementById("navbar-highlight-content").innerHTML='<span class="navbar-highlight-bar highlight-tooltip" onclick="urlOptions.showHighlight();" title="restore the highlighted view" data-toggle="tooltip" data-placement="bottom">highlighted time-frame <b>'+n+" <code>"+localeTimeString(t)+"</code></b> to <b>"+r+" <code>"+localeTimeString(a)+"</code></b>, duration <b>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(Math.round((a-t)/1e3))+'</b></span><span class="navbar-highlight-button-right highlight-tooltip" onclick="urlOptions.clearHighlight();" title="clear the highlighted time-frame" data-toggle="tooltip" data-placement="bottom"><i class="fas fa-times"></i></span>',$(".navbar-highlight").show(),$(".navbar-highlight").width("80%"),$(".highlight-tooltip").tooltip({html:!0,delay:{show:500,hide:0},container:"body"})}else $(".navbar-highlight").hide(),$(".navbar-highlight").width("100%")},clearHighlight:function(){reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.d)())},showHighlight:function(){reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.b)())}},urlOptions.parseHash();var localStorageTested=-1;function localStorageTest(){if(-1!==localStorageTested)return localStorageTested;if("undefined"!==typeof Storage&&"object"===typeof localStorage){try{localStorage.setItem("test","test"),localStorage.removeItem("test"),localStorageTested=!0}catch(e){console.log(e),localStorageTested=!1}}else localStorageTested=!1;return localStorageTested}function loadLocalStorage(e){var t=null;try{!0===localStorageTest()?t=localStorage.getItem(e):console.log("localStorage is not available")}catch(a){return console.log(a),null}return"undefined"===typeof t||null===t?null:t}function saveLocalStorage(e,t){try{if(!0===localStorageTest())return localStorage.setItem(e,t.toString()),!0}catch(a){console.log(a)}return!1}function getTheme(e){if("print"===urlOptions.mode)return"white";var t=loadLocalStorage("netdataTheme");return"undefined"===typeof t||null===t||"undefined"===t?e:t}function setTheme(e){return"print"!==urlOptions.mode&&(e!==netdataTheme&&saveLocalStorage("netdataTheme",e))}window.netdataTheme=getTheme("slate"),NETDATA.updateTheme();var netdataShowHelp=!0;function naturalSortChunkify(e){for(var t,a,n=[],r=0,o=-1,i=0;t=(a=e.charAt(r++)).charCodeAt(0);){var s=t>=48&&t<=57;s!==i&&(n[++o]="",i=s),n[o]+=a}return n}function naturalSortCompare(e,t){for(var a=naturalSortChunkify(e.toLowerCase()),n=naturalSortChunkify(t.toLowerCase()),r=0;a[r]&&n[r];r++)if(a[r]!==n[r]){var o=Number(a[r]),i=Number(n[r]);return o.toString()===a[r]&&i.toString()===n[r]?o-i:a[r]>n[r]?1:-1}return a.length-n.length}function saveTextToClient(e,t){var a=new Blob([e],{type:"application/octet-stream"}),n=URL.createObjectURL(a),r=document.createElement("a");r.setAttribute("href",n),r.setAttribute("download",t);var o=document.getElementById("hiddenDownloadLinks");o.innerHTML="",o.appendChild(r),setTimeout((function(){o.removeChild(r),URL.revokeObjectURL(n)}),60),r.click()}function saveObjectToClient(e,t){saveTextToClient(JSON.stringify(e),t)}function netdataURL(e,t){return"undefined"===typeof e&&(e=""),-1!==e.indexOf("#")&&(e=e.substring(0,e.indexOf("#"))),e+urlOptions.genHash(t)}function netdataReload(e){document.location=verifyURL(netdataURL(e,!0)),location.reload()}null!==urlOptions.theme?(setTheme(urlOptions.theme),netdataTheme=urlOptions.theme,window.NETDATA.updateTheme()):urlOptions.theme=netdataTheme,null!==urlOptions.help?(saveLocalStorage("options.show_help",urlOptions.help),netdataShowHelp=urlOptions.help):urlOptions.help=loadLocalStorage("options.show_help"),window.gotoHostedModalHandler=function(e){return document.location=verifyURL(e+urlOptions.genHash()),!1};var gotoServerValidateRemaining=0,gotoServerMiddleClick=!1,gotoServerStop=!1;function gotoServerValidateUrl(e,t,a){var n=0,r="failed";document.location.toString().startsWith("http://")&&a.toString().startsWith("https://")?n=500:document.location.toString().startsWith("https://")&&a.toString().startsWith("http://")&&(r="can't check");var o=netdataURL(a);setTimeout((function(){document.getElementById("gotoServerList").innerHTML+='<tr><td style="padding-left: 20px;"><a href="'+verifyURL(o)+'" target="_blank">'+escapeUserInputHTML(a)+'</a></td><td style="padding-left: 30px;"><code id="'+t+"-"+e+'-status">checking...</code></td></tr>',NETDATA.registryHello(a,(function(n){if("undefined"!==typeof n&&null!==n&&"string"===typeof n.machine_guid&&n.machine_guid===t){if(document.getElementById(t+"-"+e+"-status").innerHTML="OK",!gotoServerStop)if(gotoServerStop=!0,gotoServerMiddleClick){window.open(verifyURL(o),"_blank"),gotoServerMiddleClick=!1;var i=getFromRegistry("registryMachines");document.getElementById("gotoServerResponse").innerHTML="<b>Opening new window to "+i[t].name+'<br/><a href="'+verifyURL(o)+'">'+escapeUserInputHTML(a)+"</a></b><br/>(check your pop-up blocker if it fails)"}else document.getElementById("gotoServerResponse").innerHTML+="found it! It is at:<br/><small>"+escapeUserInputHTML(a)+"</small>",document.location=verifyURL(o),$("#gotoServerModal").modal("hide")}else"undefined"!==typeof n&&null!==n&&"string"===typeof n.machine_guid&&n.machine_guid!==t&&(r="wrong machine"),document.getElementById(t+"-"+e+"-status").innerHTML=r,--gotoServerValidateRemaining<=0&&(gotoServerMiddleClick=!1,document.getElementById("gotoServerResponse").innerHTML="<b>Sorry! I cannot find any operational URL for this server</b>")}))}),50*e+n)}window.gotoServerModalHandler=function(e){gotoServerStop=!1;var t={},a=getFromRegistry("registryMachines"),n=a[e].alternateUrls.length,r=0;for(document.getElementById("gotoServerResponse").innerHTML="",document.getElementById("gotoServerList").innerHTML="",document.getElementById("gotoServerName").innerHTML=a[e].name,$("#gotoServerModal").modal("show"),gotoServerValidateRemaining=n;n--;){var o=a[e].alternateUrls[n];t[o]=!0,gotoServerValidateUrl(r++,e,o)}return setTimeout((function(){!1===gotoServerStop&&(document.getElementById("gotoServerResponse").innerHTML="<b>Added all the known URLs for this machine.</b>",NETDATA.registrySearch(e,getFromRegistry,(function(a){for(n=a.urls.length;n--;){var o=a.urls[n][1];"undefined"===typeof t[o]&&(gotoServerValidateRemaining++,t[o]=!0,gotoServerValidateUrl(r++,e,o))}})))}),2e3),!1},window.switchRegistryModalHandler=function(){document.getElementById("switchRegistryPersonGUID").value=getFromRegistry("personGuid"),document.getElementById("switchRegistryURL").innerHTML=getFromRegistry("registryServer"),document.getElementById("switchRegistryResponse").innerHTML="",$("#switchRegistryModal").modal("show")},window.notifyForSwitchRegistry=function(){var e=document.getElementById("switchRegistryPersonGUID").value;""!==e&&36===e.length?$.ajax({url:getFromRegistry("registryServer")+"/api/v1/registry?action=switch&machine="+getFromRegistry("machineGuid")+"&name="+encodeURIComponent(getFromRegistry("hostname"))+"&url="+encodeURIComponent(_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b)+"&to="+e,async:!0,cache:!1,headers:{"Cache-Control":"no-cache, no-store",Pragma:"no-cache"},xhrFields:{withCredentials:!0}}).done((function(e){"string"===typeof(e=NETDATA.xss.checkAlways("/api/v1/registry?action=switch",e)).status&&"ok"===e.status||(console.warn("Netdata registry server send invalid response to SWITCH",e),e=null),$("#switchRegistryModal").modal("hide")})).fail((function(){console.warn("Netdata registry SWITCH failed"),document.getElementById("switchRegistryResponse").innerHTML="<b>Sorry! The registry rejected your request.</b>"})):document.getElementById("switchRegistryResponse").innerHTML="<b>The ID you have entered is not a GUID.</b>"};var deleteRegistryGuid=null,deleteRegistryUrl=null;window.deleteRegistryModalHandler=function(e,t,a){deleteRegistryGuid=e,deleteRegistryUrl=a,document.getElementById("deleteRegistryServerName").innerHTML=t,document.getElementById("deleteRegistryServerName2").innerHTML=t,document.getElementById("deleteRegistryServerURL").innerHTML=a,document.getElementById("deleteRegistryResponse").innerHTML="",$("#deleteRegistryModal").modal("show")},window.notifyForDeleteRegistry=function(){var e=document.getElementById("deleteRegistryResponse");deleteRegistryUrl&&NETDATA.registryDelete(getFromRegistry,_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b,deleteRegistryUrl,(function(t){null!==t?(deleteRegistryUrl=null,$("#deleteRegistryModal").modal("hide"),reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.m)())):e.innerHTML="<b>Sorry, this command was rejected by the registry server!</b>"}))};var options={menus:{},submenu_names:{},data:null,hostname:"netdata_server",version:"unknown",release_channel:"unknown",hosts:[],duration:0,update_every:1,chartsPerRow:0,chartsHeight:180};function chartsPerRow(e){return 0===options.chartsPerRow?1:options.chartsPerRow}function prioritySort(e,t){return e.priority<t.priority?-1:e.priority>t.priority?1:naturalSortCompare(e.name,t.name)}function sortObjectByPriority(e){var t={},a=[];for(var n in e)e.hasOwnProperty(n)&&"undefined"===typeof t[n]&&(t[n]=e[n],a.push(n));return a.sort((function(e,a){return t[e].priority<t[a].priority?-1:t[e].priority>t[a].priority?1:naturalSortCompare(e,a)})),a}window.scrollToId=function(e){if(e&&""!==e&&null!==document.getElementById(e)){var t=$("#"+e).offset();"undefined"!==typeof t&&$("html, body").animate({scrollTop:t.top-30},0)}return!1},window.customDashboard={menu:{},submenu:{},context:{}};var netdataDashboard={sparklines_registry:{},os:"unknown",menu:{},submenu:{},context:{},sparkline:function(e,t,a,n,r){if(null===options.data||"undefined"===typeof options.data.charts)return"";if("undefined"===typeof options.data.charts[t])return"";if("undefined"===typeof options.data.charts[t].dimensions)return"";if("undefined"===typeof options.data.charts[t].dimensions[a])return"";var o=t+"."+a;return"undefined"===typeof n&&(n=""),"undefined"===typeof this.sparklines_registry[o]?this.sparklines_registry[o]={count:1}:this.sparklines_registry[o].count++,e+'<div class="netdata-container" data-netdata="'+t+'" data-width="25%" data-height="15px" data-chart-library="dygraph" data-dygraph-theme="sparkline" data-dimensions="'+a+'" data-show-value-of-'+a+'-at="'+(o=o+"."+this.sparklines_registry[o].count)+'"></div> (<span id="'+o+'" style="display: inline-block; min-width: 50px; text-align: right;">X</span>'+n+")"+r},gaugeChart:function(e,t,a,n){return"undefined"===typeof n&&(n=""),"undefined"===typeof a&&(a=""),'<div class="netdata-container" data-netdata="CHART_UNIQUE_ID" data-dimensions="'+a+'" data-chart-library="gauge" data-gauge-adjust="width" data-title="'+e+'" data-width="'+t+'" data-points="CHART_DURATION" data-colors="'+n+'" role="application"></div>'},anyAttribute:function(e,t,a,n){if("undefined"!==typeof e[a]){var r=e[a][t];return"undefined"===typeof r?n:"function"===typeof r?r(netdataDashboard.os):r}return n},menuTitle:function(e){return"undefined"!==typeof e.menu_pattern?(this.anyAttribute(this.menu,"title",e.menu_pattern,e.menu_pattern).toString()+" "+e.type.slice(-(e.type.length-e.menu_pattern.length-1)).toString()).replace(/_/g," "):this.anyAttribute(this.menu,"title",e.menu,e.menu).toString().replace(/_/g," ")},menuIcon:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"icon",e.menu_pattern,'<i class="fas fa-puzzle-piece"></i>').toString():this.anyAttribute(this.menu,"icon",e.menu,'<i class="fas fa-puzzle-piece"></i>')},menuInfo:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"info",e.menu_pattern,null):this.anyAttribute(this.menu,"info",e.menu,null)},menuHeight:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"height",e.menu_pattern,1):this.anyAttribute(this.menu,"height",e.menu,1)},submenuTitle:function(e,t){var a=e+"."+t,n=this.anyAttribute(this.submenu,"title",a,t).toString().replace(/_/g," ");return n.length>28?n.substring(0,13)+"..."+n.substring(n.length-12,n.length):n},submenuInfo:function(e,t){var a=e+"."+t;return this.anyAttribute(this.submenu,"info",a,null)},submenuHeight:function(e,t,a){var n=e+"."+t;return this.anyAttribute(this.submenu,"height",n,1)*a},contextInfo:function(e){var t=this.anyAttribute(this.context,"info",e,null);return null!==t?'<div class="shorten dashboard-context-info netdata-chart-alignment" role="document">'+t+"</div>":""},contextValueRange:function(e){return"undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].valueRange?this.context[e].valueRange:"[null, null]"},contextHeight:function(e,t){return"undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].height?t*this.context[e].height:t},contextDecimalDigits:function(e,t){return"undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].decimalDigits?this.context[e].decimalDigits:t}};function enrichChartData(e){var t=e.type.split("_"),a=t[0];switch(a){case"ap":case"net":case"disk":case"powersupply":e.menu=a;break;case"apache":e.menu=e.type,t.length>2&&"cache"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"bind":e.menu=e.type,t.length>2&&"rndc"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"cgroup":e.menu=e.type,e.id.match(/.*[\._\/-:]qemu[\._\/-:]*/)||e.id.match(/.*[\._\/-:]kvm[\._\/-:]*/)?e.menu_pattern="cgqemu":e.menu_pattern="cgroup";break;case"go":e.menu=e.type,t.length>2&&"expvar"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"mount":t.length>2?e.menu=a+"_"+t[1]:e.menu=a;break;case"isc":e.menu=e.type,t.length>2&&"dhcpd"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"ovpn":e.menu=e.type,t.length>3&&"status"===t[1]&&"log"===t[2]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"smartd":case"web":e.menu=e.type,t.length>2&&"log"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"dnsmasq":e.menu=e.type,2==t.length&&"dhcp"===t[1]?e.menu=a+"_"+t[1]:t.length>2&&"dhcp"===t[1]?e.menu_pattern=a+"_"+t[1]:t.length>1&&(e.menu_pattern=a);break;case"anomaly":t.length>=2&&t[1].startsWith("detection")&&(e.menu=a+"_detection");break;case"tc":if(e.menu=a,"tc.qos"===e.context&&("undefined"===typeof options.submenu_names[e.family]||options.submenu_names[e.family]===e.family)){var n=e.name.split(".")[1];n.endsWith("_in")?options.submenu_names[e.family]=n.slice(0,n.lastIndexOf("_in")):n.endsWith("_out")?options.submenu_names[e.family]=n.slice(0,n.lastIndexOf("_out")):n.startsWith("in_")?options.submenu_names[e.family]=n.slice(3,n.length):n.startsWith("out_")?options.submenu_names[e.family]=n.slice(4,n.length):options.submenu_names[e.family]=n}e.id.match(/.*-ifb$/)&&e.priority--;break;default:e.menu=e.type,t.length>1&&(e.menu_pattern=a)}e.submenu=e.family}function headMain(e,t,a){if("print"===urlOptions.mode)return"";var n="";return"undefined"!==typeof t["system.swap"]&&(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.swap" data-dimensions="used" data-append-options="percentage" data-chart-library="easypiechart" data-title="Used Swap" data-units="%" data-easypiechart-max-value="100" data-width="9%" data-points="'+a.toString()+'" data-colors="#DD4400" role="application"></div>'),"undefined"!==typeof t["system.io"]?(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io" data-dimensions="in" data-chart-library="easypiechart" data-title="Disk Read" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.io.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.io" data-dimensions="out" data-chart-library="easypiechart" data-title="Disk Write" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.io.mainhead" role="application"></div>'):"undefined"!==typeof t["system.pgpgio"]&&(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.pgpgio" data-dimensions="in" data-chart-library="easypiechart" data-title="Disk Read" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.pgpgio.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.pgpgio" data-dimensions="out" data-chart-library="easypiechart" data-title="Disk Write" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.pgpgio.mainhead" role="application"></div>'),"undefined"!==typeof t["system.cpu"]&&(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.cpu" data-chart-library="gauge" data-title="CPU" data-units="%" data-gauge-max-value="100" data-width="20%" data-points="'+a.toString()+'" data-colors="'+NETDATA.colors[12]+'" role="application"></div>'),"undefined"!==typeof t["system.net"]?(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.net" data-dimensions="received" data-chart-library="easypiechart" data-title="Net Inbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.net.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.net" data-dimensions="sent" data-chart-library="easypiechart" data-title="Net Outbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.net.mainhead" role="application"></div>'):"undefined"!==typeof t["system.ip"]?(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ip" data-dimensions="received" data-chart-library="easypiechart" data-title="IP Inbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ip.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ip" data-dimensions="sent" data-chart-library="easypiechart" data-title="IP Outbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ip.mainhead" role="application"></div>'):"undefined"!==typeof t["system.ipv4"]?(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4" data-dimensions="received" data-chart-library="easypiechart" data-title="IPv4 Inbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ipv4.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv4" data-dimensions="sent" data-chart-library="easypiechart" data-title="IPv4 Outbound" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ipv4.mainhead" role="application"></div>'):"undefined"!==typeof t["system.ipv6"]&&(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6" data-dimensions="received" data-chart-library="easypiechart" data-title="IPv6 Inbound" data-units="kbps" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ipv6.mainhead" role="application"></div>',n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ipv6" data-dimensions="sent" data-chart-library="easypiechart" data-title="IPv6 Outbound" data-units="kbps" data-width="11%" data-points="'+a.toString()+'" data-common-units="system.ipv6.mainhead" role="application"></div>'),"undefined"!==typeof t["system.ram"]&&(n+='<div class="netdata-container" style="margin-right: 10px;" data-netdata="system.ram" data-dimensions="used|buffers|active|wired" data-append-options="percentage" data-chart-library="easypiechart" data-title="Used RAM" data-units="%" data-easypiechart-max-value="100" data-width="9%" data-points="'+a.toString()+'" data-colors="'+NETDATA.colors[7]+'" role="application"></div>'),n}function generateHeadCharts(e,t,a){if("print"===urlOptions.mode)return"";var n="",r=netdataDashboard.anyAttribute(netdataDashboard.context,e,t.context,[]);if(r.length>0)for(var o=0,i=r.length;o<i;)"function"===typeof r[o]?n+=r[o](netdataDashboard.os,t.id).replace(/CHART_DURATION/g,a.toString()).replace(/CHART_UNIQUE_ID/g,t.id):n+=r[o].replace(/CHART_DURATION/g,a.toString()).replace(/CHART_UNIQUE_ID/g,t.id),o++;return n}function renderPage(e,t){var a=document.getElementById("charts_div"),n=Math.floor(100/chartsPerRow($(a).width())),r=60*Math.round($(a).width()*n/100*t.update_every/3/60);options.duration=r,options.update_every=t.update_every;for(var o,i,s,l="",c='<ul class="nav dashboard-sidenav" data-spy="affix" id="sidebar_ul">',u=headMain(netdataDashboard.os,t.charts,r),d=sortObjectByPriority(e),h=0,p=d.length,f="bottom"===_domains_chart_utils_transformDataAttributes__WEBPACK_IMPORTED_MODULE_16__.a.legendPosition,g=f?_domains_chart_utils_legend_utils__WEBPACK_IMPORTED_MODULE_15__.a:0;h<p;){var m=d[h++],b=NETDATA.name2id("menu_"+m);c+='<li class=""><a href="#'+b+'" onClick="return scrollToId(\''+b+"');\">"+e[m].icon+" "+e[m].title+'</a><ul class="nav">',l+='<div role="section" class="dashboard-section"><div role="sectionhead"><h1 id="'+b+'" role="heading">'+e[m].icon+" "+e[m].title+'</h1></div><div role="section" class="dashboard-subsection">',null!==e[m].info&&(l+=e[m].info);var v="",_='<div class="netdata-chart-row">'+u;u="";for(var y=sortObjectByPriority(e[m].submenus),O=0,x=y.length;O<x;){var w=y[O++],E=NETDATA.name2id("menu_"+m+"_submenu_"+w);c+='<li class><a href="#'+E+'" onClick="return scrollToId(\''+E+"');\">"+e[m].submenus[w].title+"</a></li>",v+='<div role="section" class="dashboard-section-container" id="'+E+'"><h2 id="'+E+'" class="netdata-chart-alignment" role="heading">'+e[m].submenus[w].title+"</h2>",null!==e[m].submenus[w].info&&(v+='<div class="dashboard-submenu-info netdata-chart-alignment" role="document">'+e[m].submenus[w].info+"</div>");var S='<div class="netdata-chart-row">',C="";e[m].submenus[w].charts.sort(prioritySort);for(var A=0,k=e[m].submenus[w].charts.length;A<k;){var j=e[m].submenus[w].charts[A++];_+=generateHeadCharts("mainheads",j,r),S+=generateHeadCharts("heads",j,r),"print"===urlOptions.mode&&(C+='<div role="row" class="dashboard-print-row">');var T=netdataDashboard.contextHeight(j.context,options.chartsHeight)+g;C+='<div class="netdata-chartblock-container" style="width: '+n.toString()+'%;">'+netdataDashboard.contextInfo(j.context)+'<div class="netdata-container" id="chart_'+NETDATA.name2id(j.id)+'" data-netdata="'+j.id+'" data-width="100%" data-height="'+T.toString()+'px" data-dygraph-valuerange="'+netdataDashboard.contextValueRange(j.context)+'" data-id="'+NETDATA.name2id(options.hostname+"/"+j.id)+'" data-colors="'+netdataDashboard.anyAttribute(netdataDashboard.context,"colors",j.context,"")+'" data-decimal-digits="'+netdataDashboard.contextDecimalDigits(j.context,-1)+'"'+(f?' data-legend-position="bottom"':"")+(o=j.family,i=j.context,s=j.units,"undefined"!==typeof netdataDashboard.anyAttribute(netdataDashboard.context,"commonMin",i,void 0)?' data-common-min="'+o+"/"+i+"/"+s+'"':"")+function(e,t,a){return"undefined"!==typeof netdataDashboard.anyAttribute(netdataDashboard.context,"commonMax",t,void 0)?' data-common-max="'+e+"/"+t+"/"+a+'"':""}(j.family,j.context,j.units)+' role="application"></div></div>',"print"===urlOptions.mode&&(C+="</div>")}v+=(S+="</div>")+C+"</div>"}c+="</ul></li>",l+=(_+="</div>")+v+'</div></div><hr role="separator"/>'}var D="dbengine"===t.memory_mode;c+='<li class="" style="padding-top:15px;"><a href="https://learn.netdata.cloud/docs/agent/collectors/quickstart/" target="_blank"><i class="fas fa-plus"></i> Add more charts</a></li>',c+='<li class=""><a href="https://learn.netdata.cloud/docs/agent/health/quickstart/" target="_blank"><i class="fas fa-plus"></i> Add more alarms</a></li>',c+='<li class="" style="margin:20px;color:#666;"><small>Every '+(1===t.update_every?"second":t.update_every.toString()+" seconds")+", Netdata collects <strong>"+t.dimensions_count.toLocaleString()+"</strong> metrics on "+t.hostname.toString()+", presents them in <strong>"+t.charts_count.toLocaleString()+"</strong> charts"+(D?"":",")+" and monitors them with <strong>"+t.alarms_count.toLocaleString()+"</strong> alarms.",D||(c+='<br /> <br />Get more history by <a href="https://learn.netdata.cloud/guides/longer-metrics-storage#using-the-round-robin-database" target=_blank>configuring Netdata\'s <strong>history</strong></a> or switching to the <a href="https://learn.netdata.cloud/docs/agent/database/engine" target=_blank>database engine.</a>'),c+="<br/> <br/><strong>netdata</strong><br/>"+t.version.toString()+"</small>",c+="</li>",c+='<li id="sidebar-end-portal-container"></li>',c+="</ul>",a.innerHTML=l,document.getElementById("sidebar").innerHTML=c,!0===urlOptions.highlight&&reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.r)({after:urlOptions.highlight_after,before:urlOptions.highlight_before})),"print"===urlOptions.mode?printPage():finalizePage()}function renderChartsAndMenu(e){options.menus={},options.submenu_names={};var t,a=options.menus,n=e.charts;for(var r in n)if(n.hasOwnProperty(r)){var o=n[r];enrichChartData(o),"undefined"===typeof a[i=o.menu]?a[i]={menu_pattern:o.menu_pattern,priority:o.priority,submenus:{},title:netdataDashboard.menuTitle(o),icon:netdataDashboard.menuIcon(o),info:netdataDashboard.menuInfo(o),height:netdataDashboard.menuHeight(o)*options.chartsHeight}:("undefined"===typeof a[i].menu_pattern&&(a[i].menu_pattern=o.menu_pattern),o.priority<a[i].priority&&(a[i].priority=o.priority)),t="undefined"!==typeof a[i].menu_pattern?a[i].menu_pattern:i,"undefined"===typeof a[i].submenus[o.submenu]?a[i].submenus[o.submenu]={priority:o.priority,charts:[],title:null,info:netdataDashboard.submenuInfo(t,o.submenu),height:netdataDashboard.submenuHeight(t,o.submenu,a[i].height)}:o.priority<a[i].submenus[o.submenu].priority&&(a[i].submenus[o.submenu].priority=o.priority),a[i].submenus[o.submenu].charts.push(o)}for(var i in a)if(a.hasOwnProperty(i))for(var s in a[i].submenus)a[i].submenus.hasOwnProperty(s)&&("undefined"!==typeof options.submenu_names[s]?a[i].submenus[s].title=s+" ("+options.submenu_names[s]+")":(t="undefined"!==typeof a[i].menu_pattern?a[i].menu_pattern:i,a[i].submenus[s].title=netdataDashboard.submenuTitle(t,s)));renderPage(a,e)}window.netdataDashboard=netdataDashboard;var handleLoadJs=function(e,t,a){return e.catch((function(e){console.warn("error",e),alert("Cannot load required JS library: ".concat(t))})).then((function(){a()}))};function loadClipboard(e){handleLoadJs(__webpack_require__.e(8).then(__webpack_require__.bind(null,581)).then((function(e){window.clipboard=e})),"clipboard-polyfill",e)}function loadBootstrapTable(e){handleLoadJs(Promise.all([__webpack_require__.e(6).then(__webpack_require__.t.bind(null,582,7)).then((function(){return __webpack_require__.e(7).then(__webpack_require__.t.bind(null,583,7))})),__webpack_require__.e(10).then(__webpack_require__.t.bind(null,584,7))]),"bootstrap-table",e)}function loadBootstrapSlider(e){handleLoadJs(Promise.all([__webpack_require__.e(5).then(__webpack_require__.t.bind(null,585,7)).then((function(e){var t=e.default;window.Slider=t})),__webpack_require__.e(4).then(__webpack_require__.t.bind(null,586,7))]),"bootstrap-slider",e)}function loadLzString(e){handleLoadJs(__webpack_require__.e(9).then(__webpack_require__.t.bind(null,587,7)),"lz-string",e)}function loadPako(e){handleLoadJs(__webpack_require__.e(3).then(__webpack_require__.t.bind(null,588,7)).then((function(e){var t=e.default;window.pako=t})),"pako",e)}function alarmsUpdateModal(){var e='<h3>Raised Alarms</h3><table class="table">',t='<h3>All Running Alarms</h3><div class="panel-group" id="alarms_all_accordion" role="tablist" aria-multiselectable="true">',a='<hr/><a href="https://github.com/netdata/netdata/tree/master/src/web/api/badges#netdata-badges" target="_blank">netdata badges</a> refresh automatically. Their color indicates the state of the alarm: <span style="color: #e05d44"><b> red </b></span> is critical, <span style="color:#fe7d37"><b> orange </b></span> is warning, <span style="color: #4c1"><b> bright green </b></span> is ok, <span style="color: #9f9f9f"><b> light grey </b></span> is undefined (i.e. no data or no status), <span style="color: #000"><b> black </b></span> is not initialized. You can copy and paste their URLs to embed them in any web page.<br/>netdata can send notifications for these alarms. Check <a href="https://github.com/netdata/netdata/blob/master/src/health/notifications/health_alarm_notify.conf" target="_blank">this configuration file</a> for more information.';loadClipboard((function(){}));reduxStore.dispatch(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.e.request({callback:function(n){if(options.alarm_families=[],null!==n){var r,o,i,s=0,l=0,c={},u=[];for(r in n.alarms)if(n.alarms.hasOwnProperty(r)){o=(i=n.alarms[r]).family;var d=options.data.charts[i.chart];"undefined"===typeof d&&(d=options.data.charts_by_name[i.chart]),"undefined"===typeof d?(console.log("WARNING: alarm "+r+" is linked to chart "+i.chart+", which is not found in the list of chart got from the server."),d={priority:9999999}):"undefined"!==typeof d.menu&&"undefined"!==typeof d.submenu&&(o=d.menu+" - "+d.submenu),"undefined"===typeof c[o]&&(c[o]={name:o,arr:[],priority:d.priority},u.push(c[o])),d.priority<c[o].priority&&(c[o].priority=d.priority),c[o].arr.unshift(i)}for(var h=u.sort((function(e,t){return e.priority<t.priority?-1:e.priority>t.priority?1:naturalSortCompare(e.name,t.name)})),p=0,f=0,g=h.length;g--;){o=h[p++].name;var m=!1;0!==f&&(t+="</table></div></div></div>"),t+='<div class="panel panel-default"><div class="panel-heading" role="tab" id="alarm_all_heading_'+f.toString()+'"><h4 class="panel-title"><a class="collapsed" role="button" data-toggle="collapse" data-parent="#alarms_all_accordion" href="#alarm_all_'+f.toString()+'" aria-expanded="false" aria-controls="alarm_all_'+f.toString()+'">'+o.toString()+'</a></h4></div><div id="alarm_all_'+f.toString()+'" class="panel-collapse collapse " role="tabpanel" aria-labelledby="alarm_all_heading_'+f.toString()+'" data-alarm-id="'+f.toString()+'"><div class="panel-body" id="alarm_all_body_'+f.toString()+'">',options.alarm_families[f]=c[o],f++;for(var b=c[o].arr,v=b.length;v--;)"WARNING"!==(i=b[v]).status&&"CRITICAL"!==i.status||(m||(m=!0,e+='<tr><th class="text-center" colspan="2"><h4>'+o+"</h4></th></tr>"),s++,e+=x(i,!0)),l++}e+="</table>",h.length>0&&(t+="</div></div></div>"),t+="</div>",e+=s?a:'<div style="width:100%; height: 100px; text-align: center;"><span style="font-size: 50px;"><i class="fas fa-thumbs-up"></i></span><br/>Everything is normal. No raised alarms.</div>',t+=l?a:"<h4>No alarms are running in this system.</h4>",document.getElementById("alarms_active").innerHTML=e,document.getElementById("alarms_all").innerHTML=t,enableTooltipsAndPopovers(),h.length>0&&w(0);var _=$("#alarms_all_accordion");_.on("show.bs.collapse",(function(e){var t=$(e.target);w($(t).data("alarm-id"))})),_.on("hidden.bs.collapse",(function(e){var t=$(e.target),a=$(t).data("alarm-id");$("#alarm_all_"+a.toString()).html("")})),document.getElementById("alarms_log").innerHTML='<h3>Alarm Log</h3><table id="alarms_log_table"></table>',loadBootstrapTable((function(){$("#alarms_log_table").bootstrapTable({url:"".concat(_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b,"/api/v1/alarm_log?all"),cache:!1,pagination:!0,pageSize:10,showPaginationSwitch:!1,search:!0,searchTimeOut:300,searchAlign:"left",showColumns:!0,showExport:!0,exportDataType:"all",exportOptions:{fileName:"netdata_alarm_log"},onClickRow:function(e){return scrollToChartAfterHidingModal(e.chart,1e3*e.when,e.status),$("#alarmsModal").modal("hide"),!1},rowStyle:function(e){switch(e.status){case"CRITICAL":return{classes:"danger"};case"WARNING":return{classes:"warning"};case"UNDEFINED":return{classes:"info"};case"CLEAR":return{classes:"success"}}return{}},showFooter:!1,showHeader:!0,showRefresh:!0,showToggle:!1,sortable:!0,silentSort:!1,columns:[{field:"when",title:"Event Date",valign:"middle",titleTooltip:"The date and time the even took place",formatter:function(e,t,a){return O(e," ")},align:"center",switchable:!1,sortable:!0},{field:"hostname",title:"Host",valign:"middle",titleTooltip:"The host that generated this event",align:"center",visible:!1,sortable:!0},{field:"unique_id",title:"Unique ID",titleTooltip:"The host unique ID for this event",formatter:function(e,t,a){return y(e)},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"alarm_id",title:"Alarm ID",titleTooltip:"The ID of the alarm that generated this event",formatter:function(e,t,a){return y(e)},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"alarm_event_id",title:"Alarm Event ID",titleTooltip:"The incremental ID of this event for the given alarm",formatter:function(e,t,a){return y(e)},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"chart",title:"Chart",titleTooltip:"The chart the alarm is attached to",align:"center",valign:"middle",switchable:!1,sortable:!0},{field:"family",title:"Family",titleTooltip:"The family of the chart the alarm is attached to",align:"center",valign:"middle",visible:!1,sortable:!0},{field:"name",title:"Alarm",titleTooltip:"The alarm name that generated this event",formatter:function(e,t,a){return e.toString().replace(/_/g," ")},align:"center",valign:"middle",switchable:!1,sortable:!0},{field:"value_string",title:"Friendly Value",titleTooltip:"The value of the alarm, that triggered this event",align:"right",valign:"middle",sortable:!0},{field:"old_value_string",title:"Friendly Old Value",titleTooltip:"The value of the alarm, just before this event",align:"right",valign:"middle",visible:!1,sortable:!0},{field:"old_value",title:"Old Value",titleTooltip:"The value of the alarm, just before this event",formatter:function(e,t,a){return(null!==e?Math.round(100*e)/100:"NaN").toString()},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"value",title:"Value",titleTooltip:"The value of the alarm, that triggered this event",formatter:function(e,t,a){return(null!==e?Math.round(100*e)/100:"NaN").toString()},align:"right",valign:"middle",visible:!1,sortable:!0},{field:"units",title:"Units",titleTooltip:"The units of the value of the alarm",align:"left",valign:"middle",visible:!1,sortable:!0},{field:"old_status",title:"Old Status",titleTooltip:"The status of the alarm, just before this event",align:"center",valign:"middle",visible:!1,sortable:!0},{field:"status",title:"Status",titleTooltip:"The status of the alarm, that was set due to this event",align:"center",valign:"middle",switchable:!1,sortable:!0},{field:"duration",title:"Last Duration",titleTooltip:"The duration the alarm was at its previous state, just before this event",formatter:function(e,t,a){return Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e,{negative_suffix:"",space:" ",now:"no time"})},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"non_clear_duration",title:"Raised Duration",titleTooltip:"The duration the alarm was raised, just before this event",formatter:function(e,t,a){return Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e,{negative_suffix:"",space:" ",now:"no time"})},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"recipient",title:"Recipient",titleTooltip:"The recipient of this event",align:"center",valign:"middle",visible:!1,sortable:!0},{field:"processed",title:"Processed Status",titleTooltip:"True when this event is processed",formatter:function(e,t,a){return!0===e?"DONE":"PENDING"},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"updated",title:"Updated Status",titleTooltip:"True when this event has been updated by another event",formatter:function(e,t,a){return!0===e?"UPDATED":"CURRENT"},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"updated_by_id",title:"Updated By ID",titleTooltip:"The unique ID of the event that obsoleted this one",formatter:function(e,t,a){return y(e)},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"updates_id",title:"Updates ID",titleTooltip:"The unique ID of the event obsoleted because of this event",formatter:function(e,t,a){return y(e)},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"exec",title:"Script",titleTooltip:"The script to handle the event notification",align:"center",valign:"middle",visible:!1,sortable:!0},{field:"exec_run",title:"Script Run At",titleTooltip:"The date and time the script has been ran",formatter:function(e,t,a){return O(e," ")},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"exec_code",title:"Script Return Value",titleTooltip:"The return code of the script",formatter:function(e,t,a){return 0===e?"OK (returned 0)":"FAILED (with code "+e.toString()+")"},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"delay",title:"Script Delay",titleTooltip:"The hysteresis of the notification",formatter:function(e,t,a){return Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e,{negative_suffix:"",space:" ",now:"no time"})},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"delay_up_to_timestamp",title:"Script Delay Run At",titleTooltip:"The date and time the script should be run, after hysteresis",formatter:function(e,t,a){return O(e," ")},align:"center",valign:"middle",visible:!1,sortable:!0},{field:"info",title:"Description",titleTooltip:"A short description of the alarm",align:"center",valign:"middle",visible:!1,sortable:!0},{field:"source",title:"Alarm Source",titleTooltip:"The source of configuration of the alarm",align:"center",valign:"middle",visible:!1,sortable:!0}]})}))}else document.getElementById("alarms_active").innerHTML=document.getElementById("alarms_all").innerHTML=document.getElementById("alarms_log").innerHTML="failed to load alarm data!";function y(e){return 0===e?"-":e.toString()}function O(e,t){if(0===e)return"-";"undefined"===typeof t&&(t=" ");var a=new Date(1e3*e);return a.toLocaleDateString()+t+a.toLocaleTimeString()}function x(e,t){var a=options.data.charts[e.chart];if("undefined"===typeof a&&"undefined"===typeof(a=options.data.charts_by_name[e.chart]))return console.log("Cannot find chart "+e.chart+", you probably need to refresh the page."),"";var n="undefined"!==typeof e.warn||"undefined"!==typeof e.crit,r="".concat(_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b,"/api/v1/badge.svg?chart=").concat(e.chart,"&alarm=").concat(e.name,"&refresh=auto"),o="<br/> <br/>role: <b>"+e.recipient+'</b><br/> <br/><div class="action-button ripple" title="click to scroll the dashboard to the chart of this alarm" data-toggle="tooltip" data-placement="bottom" onClick="scrollToChartAfterHidingModal(\''+e.chart+"', "+1e3*e.last_status_change+", '"+e.status+'\'); $(\'#alarmsModal\').modal(\'hide\'); return false;"><i class="fab fa-periscope"></i></div><div class="action-button ripple" title="click to copy to the clipboard the URL of this badge" data-toggle="tooltip" data-placement="bottom" onClick="clipboardCopy(\''+r+'\'); return false;"><i class="far fa-copy"></i></div><div class="action-button ripple" title="click to copy to the clipboard an auto-refreshing <code>embed</code> html element for this badge" data-toggle="tooltip" data-placement="bottom" onClick="clipboardCopyBadgeEmbed(\''+r+'\'); return false;"><i class="fas fa-copy"></i></div>',i='<tr><td class="text-center" style="vertical-align: middle; word-break: break-word;" width="40%"><b>'+e.chart+'</b><br/> <br/><embed src="'+r+'" type="image/svg+xml" height="20"/><br/> <br/><span style="font-size: 18px;">'+e.info+"</span>"+o+'</td><td><table class="table">'+("undefined"!==typeof e.warn?'<tr><td width="10%" style="text-align:right">warning when</td><td><span style="font-family: monospace; color:#fe7d37; font-weight: bold;">'+e.warn+"</span></td></tr>":"")+("undefined"!==typeof e.crit?'<tr><td width="10%" style="text-align:right">critical when</td><td><span style="font-family: monospace; color: #e05d44; font-weight: bold;">'+e.crit+"</span></td></tr>":"");if(!0===t){var s=a.units;"%"===s&&(s="%"),i+=("undefined"!==typeof e.lookup_after?'<tr><td width="10%" style="text-align:right">db lookup</td><td>'+function(e,t){var a=" of all values ";return t.dimensions.length>1&&(a=" of the sum of all dimensions "),"undefined"!==typeof e.lookup_dimensions&&(a=e.lookup_dimensions.replace(/|/g,",").split(",").length>1?"of the sum of dimensions <code>"+e.lookup_dimensions+"</code> ":"of all values of dimension <code>"+e.lookup_dimensions+"</code> "),"<code>"+e.lookup_method+"</code> "+a+", of chart <code>"+e.chart+"</code>, starting <code>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.lookup_after+e.lookup_before,{space:" "})+"</code> and up to <code>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.lookup_before,{space:" "})+"</code>"+(e.lookup_options?", with options <code>"+e.lookup_options.replace(/ /g,", ")+"</code>":"")+"."}(e,a)+"</td></tr>":"")+("undefined"!==typeof e.calc?'<tr><td width="10%" style="text-align:right">calculation</td><td><span style="font-family: monospace;">'+e.calc+"</span></td></tr>":"")+(null!==a.green?'<tr><td width="10%" style="text-align:right">green threshold</td><td><code>'+a.green+" "+s+"</code></td></tr>":"")+(null!==a.red?'<tr><td width="10%" style="text-align:right">red threshold</td><td><code>'+a.red+" "+s+"</code></td></tr>":"")}e.warn_repeat_every>0&&(i+='<tr><td width="10%" style="text-align:right">repeat warning</td><td>'+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.warn_repeat_every)+"</td></tr>"),e.crit_repeat_every>0&&(i+='<tr><td width="10%" style="text-align:right">repeat critical</td><td>'+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.crit_repeat_every)+"</td></tr>");var l="";return(e.delay_up_duration>0||e.delay_down_duration>0)&&0!==e.delay_multiplier&&e.delay_max_duration>0&&(e.delay_up_duration===e.delay_down_duration?l+="<small><br/>hysteresis "+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.delay_up_duration,{space:" ",negative_suffix:""}):(l="<small><br/>hysteresis ",e.delay_up_duration>0&&(l+="on escalation <code>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.delay_up_duration,{space:" ",negative_suffix:""})+"</code>, "),e.delay_down_duration>0&&(l+="on recovery <code>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.delay_down_duration,{space:" ",negative_suffix:""})+"</code>, ")),1!==e.delay_multiplier&&(l+="multiplied by <code>"+e.delay_multiplier.toString()+"</code>",l+=", up to <code>"+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.delay_max_duration,{space:" ",negative_suffix:""})+"</code>"),l+="</small>"),i+='<tr><td width="10%" style="text-align:right">check every</td><td>'+Object(_domains_chart_utils_seconds4human__WEBPACK_IMPORTED_MODULE_4__.a)(e.update_every,{space:" ",negative_suffix:""})+"</td></tr>"+(!0===n?'<tr><td width="10%" style="text-align:right">execute</td><td><span style="font-family: monospace;">'+e.exec+"</span>"+l+"</td></tr>":"")+'<tr><td width="10%" style="text-align:right">source</td><td><span style="font-family: monospace; word-break: break-word;">'+e.source+"</span></td></tr></table></td></tr>"}function w(e){for(var t='<table class="table">',a=options.alarm_families[e],n=a.arr.length;n--;){t+=x(a.arr[n],!0)}t+="</table>",$("#alarm_all_"+e.toString()).html(t),enableTooltipsAndPopovers()}},serverDefault:_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b}))}function initializeDynamicDashboardWithData(e){if(null!==e){options.hostname=e.hostname,options.data=e,options.version=e.version,options.release_channel=e.release_channel,options.timezone=e.timezone,netdataDashboard.os=e.os,"undefined"!==typeof e.hosts&&(options.hosts=e.hosts),document.getElementById("netdataVersion").innerHTML=options.version,document.title=options.hostname+" netdata dashboard",e.charts_by_name={};var t,a=e.charts;for(t in a)if(a.hasOwnProperty(t)){var n=a[t];e.charts_by_name[n.name]=n}renderChartsAndMenu(e)}}window.clipboardCopy=function(e){clipboard.writeText(e)},window.clipboardCopyBadgeEmbed=function(e){clipboard.writeText('<embed src="'+e+'" type="image/svg+xml" height="20"/>')};var initializeConfig={url:null,custom_info:!0},loadDashboardInfo=Object(ramda__WEBPACK_IMPORTED_MODULE_0__.a)(ramda__WEBPACK_IMPORTED_MODULE_1__.a,(function(){return $.ajax({url:"".concat(_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b,"dashboard_info.js"),cache:!0,dataType:"script",xhrFields:{withCredentials:!0}}).fail((function(){alert("Cannot load required JS library: dashboard_info.js")}))}));function loadCustomDashboardInfo(e,t){$.ajax({url:e,cache:!0,dataType:"script",xhrFields:{withCredentials:!0}}).fail((function(){alert("Cannot load required JS library: ".concat(e))})).always((function(){$.extend(!0,netdataDashboard,customDashboard),t()}))}function initializeChartsAndCustomInfo(){loadDashboardInfo().then((function(){NETDATA.chartRegistry.downloadAll(initializeConfig.url,(function(e){null!==e&&(reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.c)({data:e})),!0===initializeConfig.custom_info&&"undefined"!==typeof e.custom_info&&""!==e.custom_info&&null===window.netdataSnapshotData?loadCustomDashboardInfo(_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b+e.custom_info,(function(){initializeDynamicDashboardWithData(e)})):initializeDynamicDashboardWithData(e))}))}))}function initializeDynamicDashboard(e){e&&(reduxStore=e,netdataPrepCallback(),initializeConfig.url=_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b),"undefined"!==typeof netdataCheckXSS&&!0===netdataCheckXSS?(document.getElementById("netdataXssModalServer").innerText=initializeConfig.url,$("#xssModal").modal("show")):initializeChartsAndCustomInfo()}function versionLog(e){document.getElementById("versionCheckLog").innerHTML=e}function versionsMatch(e,t){if(e==t)return!0;var a=e.split("."),n=t.split("."),r=parseInt(a[0].substring(1,2),10),o=parseInt(n[0].substring(1,2),10);return!(r<o)&&(r>o||!((r=parseInt(a[1],10))<(o=parseInt(n[1],10)))&&(r>o||(a=a[2].split("-"),n=n[2].split("-"),!((r=parseInt(a[0],10))<(o=parseInt(n[0],10)))&&(r>o||!((r=a.length>1?parseInt(a[1],10):0)<(o=n.length>1?parseInt(n[1],10):0))))))}function getGithubLatestVersion(e,t){var a;versionLog("Downloading latest version id from github..."),a="stable"===t?"https://api.github.com/repos/netdata/netdata/releases/latest":"https://api.github.com/repos/netdata/netdata-nightlies/releases/latest",$.ajax({url:a,async:!0,cache:!1}).done((function(t){versionLog("Latest stable version from github is "+(t=t.tag_name.replace(/(\r\n|\n|\r| |\t)/gm,""))),e(t)})).fail((function(){versionLog("Failed to download the latest stable version id from github!"),e(null)}))}function checkForUpdateByVersion(e,t){return getGithubLatestVersion((function(e){t(options.version,e)}),options.release_channel),null}function showPageFooter(){document.getElementById("footer").style.display="block"}function printPage(){window.NETDATA.parseDom(),urlOptions.after<0?reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.q)({after:urlOptions.after})):!0===urlOptions.pan_and_zoom&&reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.s)({after:urlOptions.after,before:urlOptions.before})),showPageFooter()}function jsonStringifyFn(e){return JSON.stringify(e,(function(e,t){return"function"===typeof t?t.toString():t}))}function jsonParseFn(str){return JSON.parse(str,(function(key,value){return"string"!=typeof value?value:"function"==value.substring(0,8)?eval("("+value+")"):value}))}window.xssModalDisableXss=function(){return NETDATA.xss.enabled=!1,NETDATA.xss.enabled_for_data=!1,initializeConfig.custom_info=!0,initializeChartsAndCustomInfo(),!1},window.xssModalKeepXss=function(){return NETDATA.xss.enabled=!0,NETDATA.xss.enabled_for_data=!0,initializeConfig.custom_info=!1,initializeChartsAndCustomInfo(),!1},window.notifyForUpdate=function(e){versionLog("<p>checking for updates...</p>");var t=Date.now();if("undefined"===typeof e||!0!==e){var a=loadLocalStorage("last_update_check");if(a="string"===typeof a?parseInt(a):0,t-a<288e5)return}checkForUpdateByVersion(e,(function(e,a){var n=!1;if(null===e)n=!1,versionLog('<p><big>Failed to get your netdata version!</big></p><p>You can always get the latest netdata from <a href="https://github.com/netdata/netdata" target="_blank">its github page</a>.</p>');else if(null===a)n=!1,versionLog('<p><big>Failed to get the latest netdata version.</big></p><p>You can always get the latest netdata from <a href="https://github.com/netdata/netdata" target="_blank">its github page</a>.</p>');else if(versionsMatch(e,a))n=!0,versionLog('<p><big>You already have the latest netdata!</big></p><p>No update yet?<br/>We probably need some motivation to keep going on!</p><p>If you haven\'t already, <a href="https://github.com/netdata/netdata" target="_blank">give netdata a <b><i class="fas fa-star"></i></b> at its github page</a>.</p>');else{n=!0;versionLog("<p><big><strong>New version of netdata available!</strong></big></p><p>Latest version: <b><code>"+a+'</code></b></p><p><a href="https://learn.netdata.cloud/docs/agent/changelog/" target="_blank">Click here for the changes log</a> and<br/><a href="https://github.com/netdata/netdata/tree/master/packaging/installer/UPDATE.md" target="_blank">click here for directions on updating</a> your netdata installation.</p><p>We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.<br/>Keeping your netdata updated is generally a good idea.</p>')}n&&saveLocalStorage("last_update_check",t.toString())}))},window.printPreflight=function(){var e=document.location.origin.toString()+document.location.pathname.toString()+document.location.search.toString()+urlOptions.genHash()+";mode=print",t=90*screen.height/100;window.open(e,"","width="+990..toString()+",height="+t.toString()+",menubar=no,toolbar=no,personalbar=no,location=no,resizable=no,scrollbars=yes,status=no,chrome=yes,centerscreen=yes,attention=yes,dialog=yes"),$("#printPreflightModal").modal("hide")};var snapshotOptions={bytes_per_chart:2048,compressionDefault:"pako.deflate.base64",compressions:{none:{bytes_per_point_memory:5.2,bytes_per_point_disk:5.6,compress:function(e){return e},compressed_length:function(e){return e.length},uncompress:function(e){return e}},"pako.deflate.base64":{bytes_per_point_memory:1.8,bytes_per_point_disk:1.9,compress:function(e){return btoa(pako.deflate(e,{to:"string"}))},compressed_length:function(e){return e.length},uncompress:function(e){return pako.inflate(atob(e),{to:"string"})}},"pako.deflate":{bytes_per_point_memory:1.4,bytes_per_point_disk:3.2,compress:function(e){return pako.deflate(e,{to:"string"})},compressed_length:function(e){return e.length},uncompress:function(e){return pako.inflate(e,{to:"string"})}},"lzstring.utf16":{bytes_per_point_memory:1.7,bytes_per_point_disk:2.6,compress:function(e){return LZString.compressToUTF16(e)},compressed_length:function(e){return 2*e.length},uncompress:function(e){return LZString.decompressFromUTF16(e)}},"lzstring.base64":{bytes_per_point_memory:2.1,bytes_per_point_disk:2.3,compress:function(e){return LZString.compressToBase64(e)},compressed_length:function(e){return e.length},uncompress:function(e){return LZString.decompressFromBase64(e)}},"lzstring.uri":{bytes_per_point_memory:2.1,bytes_per_point_disk:2.3,compress:function(e){return LZString.compressToEncodedURIComponent(e)},compressed_length:function(e){return e.length},uncompress:function(e){return LZString.decompressFromEncodedURIComponent(e)}}}};function loadSnapshotModalLog(e,t){document.getElementById("loadSnapshotStatus").className="alert alert-"+e,document.getElementById("loadSnapshotStatus").innerHTML=t}var tmpSnapshotData=null;function loadSnapshotPreflightFile(e){var t=NETDATA.xss.string(e.name),a=new FileReader;a.onload=function(e){document.getElementById("loadSnapshotFilename").innerHTML=t;var a=null;try{a=NETDATA.xss.checkAlways("snapshot",JSON.parse(e.target.result),/^(snapshot\.info|snapshot\.data)$/);var n=new Date(a.after_ms),r=new Date(a.before_ms);"undefined"===typeof a.charts_ok&&(a.charts_ok="unknown"),"undefined"===typeof a.charts_failed&&(a.charts_failed=0),"undefined"===typeof a.compression&&(a.compression="none"),"undefined"===typeof a.data_size&&(a.data_size=0),document.getElementById("loadSnapshotFilename").innerHTML="<code>"+t+"</code>",document.getElementById("loadSnapshotHostname").innerHTML="<b>"+a.hostname+"</b>, netdata version: <b>"+a.netdata_version.toString()+"</b>",document.getElementById("loadSnapshotURL").innerHTML=a.url,document.getElementById("loadSnapshotCharts").innerHTML=a.charts.charts_count.toString()+" charts, "+a.charts.dimensions_count.toString()+" dimensions, "+a.data_points.toString()+" points per dimension, "+Math.round(a.duration_ms/a.data_points).toString()+" ms per point",document.getElementById("loadSnapshotInfo").innerHTML="version: <b>"+a.snapshot_version.toString()+"</b>, includes <b>"+a.charts_ok.toString()+"</b> unique chart data queries "+(a.charts_failed>0?"<b>"+a.charts_failed.toString()+"</b> failed":"").toString()+", compressed with <code>"+a.compression.toString()+"</code>, data size "+(Math.round(100*a.data_size/1024/1024)/100).toString()+" MB",document.getElementById("loadSnapshotTimeRange").innerHTML="<b>"+localeDateString(n)+" "+localeTimeString(n)+"</b> to <b>"+localeDateString(r)+" "+localeTimeString(r)+"</b>",document.getElementById("loadSnapshotComments").innerHTML=(a.comments?a.comments:"").toString(),loadSnapshotModalLog("success","File loaded, click <b>Import</b> to render it!"),$("#loadSnapshotImport").removeClass("disabled"),tmpSnapshotData=a}catch(e){console.log(e),document.getElementById("loadSnapshotStatus").className="alert alert-danger",document.getElementById("loadSnapshotStatus").innerHTML="Failed to parse this file!",$("#loadSnapshotImport").addClass("disabled")}},a.readAsText(e)}function loadSnapshotPreflightEmpty(){document.getElementById("loadSnapshotFilename").innerHTML="",document.getElementById("loadSnapshotHostname").innerHTML="",document.getElementById("loadSnapshotURL").innerHTML="",document.getElementById("loadSnapshotCharts").innerHTML="",document.getElementById("loadSnapshotInfo").innerHTML="",document.getElementById("loadSnapshotTimeRange").innerHTML="",document.getElementById("loadSnapshotComments").innerHTML="",loadSnapshotModalLog("success","Browse for a snapshot file (or drag it and drop it here), then click <b>Import</b> to render it."),$("#loadSnapshotImport").addClass("disabled")}window.loadSnapshot=function(){if($("#loadSnapshotImport").addClass("disabled"),null===tmpSnapshotData)return loadSnapshotPreflightEmpty(),void loadSnapshotModalLog("danger","no data have been loaded");loadPako((function(){loadLzString((function(){if(loadSnapshotModalLog("info","Please wait, activating snapshot..."),$("#loadSnapshotModal").modal("hide"),netdataShowAlarms=!1,netdataRegistry=!1,netdataServer=tmpSnapshotData.server,document.getElementById("charts_div").innerHTML="",document.getElementById("sidebar").innerHTML="","undefined"!==typeof tmpSnapshotData.hash?urlOptions.hash=tmpSnapshotData.hash:urlOptions.hash="#","undefined"!==typeof tmpSnapshotData.info){var e=jsonParseFn(tmpSnapshotData.info);"undefined"!==typeof e.menu&&(netdataDashboard.menu=e.menu),"undefined"!==typeof e.submenu&&(netdataDashboard.submenu=e.submenu),"undefined"!==typeof e.context&&(netdataDashboard.context=e.context)}"string"!==typeof tmpSnapshotData.compression&&(tmpSnapshotData.compression="none"),"undefined"===typeof snapshotOptions.compressions[tmpSnapshotData.compression]&&(alert("unknown compression method: "+tmpSnapshotData.compression),tmpSnapshotData.compression="none"),tmpSnapshotData.uncompress=snapshotOptions.compressions[tmpSnapshotData.compression].uncompress,window.NETDATA.parseDom(),reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.g)({snapshot:tmpSnapshotData})),window.netdataSnapshotData=tmpSnapshotData,urlOptions.after=tmpSnapshotData.after_ms,urlOptions.before=tmpSnapshotData.before_ms,"undefined"!==typeof tmpSnapshotData.highlight_after_ms&&null!==tmpSnapshotData.highlight_after_ms&&tmpSnapshotData.highlight_after_ms>0&&"undefined"!==typeof tmpSnapshotData.highlight_before_ms&&null!==tmpSnapshotData.highlight_before_ms&&tmpSnapshotData.highlight_before_ms>0?(urlOptions.highlight_after=tmpSnapshotData.highlight_after_ms,urlOptions.highlight_before=tmpSnapshotData.highlight_before_ms,urlOptions.highlight=!0):(urlOptions.highlight_after=0,urlOptions.highlight_before=0,urlOptions.highlight=!1),netdataCheckXSS=!1,NETDATA.xss.enabled=!0,NETDATA.xss.enabled_for_data=!0,loadSnapshotPreflightEmpty(),initializeDynamicDashboard()}))}))};var loadSnapshotDragAndDropInitialized=!1;function loadSnapshotDragAndDropSetup(){!1===loadSnapshotDragAndDropInitialized&&(loadSnapshotDragAndDropInitialized=!0,$("#loadSnapshotDragAndDrop").on("drag dragstart dragend dragover dragenter dragleave drop",(function(e){e.preventDefault(),e.stopPropagation()})).on("drop",(function(e){e.originalEvent.dataTransfer.files.length?loadSnapshotPreflightFile(e.originalEvent.dataTransfer.files.item(0)):(loadSnapshotPreflightEmpty(),loadSnapshotModalLog("danger","No file selected"))})))}window.loadSnapshotPreflight=function(){var e=document.getElementById("loadSnapshotSelectFiles").files;if(!e.length)return loadSnapshotPreflightEmpty(),void loadSnapshotModalLog("danger","No file selected");loadSnapshotModalLog("info","Loading file..."),loadSnapshotPreflightFile(e.item(0))};var saveSnapshotStop=!1;function saveSnapshotCancel(){reduxStore.dispatch(Object(_domains_dashboard_actions__WEBPACK_IMPORTED_MODULE_6__.f)()),saveSnapshotStop=!0}var saveSnapshotModalInitialized=!1;function saveSnapshotModalSetup(){!1===saveSnapshotModalInitialized&&(saveSnapshotModalInitialized=!0,$("#saveSnapshotModal").on("hide.bs.modal",saveSnapshotCancel).on("show.bs.modal",saveSnapshotModalInit).on("shown.bs.modal",(function(){$("#saveSnapshotResolutionSlider").find(".slider-handle:first").attr("tabindex",1),document.getElementById("saveSnapshotComments").focus()})))}function saveSnapshotModalLog(e,t){document.getElementById("saveSnapshotStatus").className="alert alert-"+e,document.getElementById("saveSnapshotStatus").innerHTML=t}function saveSnapshotModalShowExpectedSize(){var e=Math.round(saveSnapshotViewDuration/saveSnapshotSelectedSecondsPerPoint),t="info",a="A moderate snapshot.",n=Math.round(10*(options.data.charts_count*snapshotOptions.bytes_per_chart+options.data.dimensions_count*e*snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_disk)/1024/1024)/10,r=Math.round(10*(options.data.charts_count*snapshotOptions.bytes_per_chart+options.data.dimensions_count*e*snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_memory)/1024/1024)/10;n<10&&(t="success",a="A nice small snapshot!"),n>50&&(t="warning",a="Will stress your browser..."),n>100&&(t="danger",a="Hm... good luck..."),saveSnapshotModalLog(t,"The snapshot will have "+e.toString()+" points per dimension. Expected size on disk "+n+" MB, at browser memory "+r+" MB.<br/>"+a)}var saveSnapshotCompression=snapshotOptions.compressionDefault;function saveSnapshotSetCompression(e){saveSnapshotCompression=e,document.getElementById("saveSnapshotCompressionName").innerHTML=saveSnapshotCompression,saveSnapshotModalShowExpectedSize()}var saveSnapshotSlider=null,saveSnapshotSelectedSecondsPerPoint=1,saveSnapshotViewDuration=1,browser_timezone;function saveSnapshotModalInit(){$("#saveSnapshotModalProgressSection").hide(),$("#saveSnapshotResolutionRadio").show(),saveSnapshotModalLog("info","Select resolution and click <b>Save</b>"),$("#saveSnapshotExport").removeClass("disabled"),loadBootstrapSlider((function(){var e=reduxStore.getState();saveSnapshotViewDuration=-Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.i)(e);var t=Math.round(Date.now()-1e3*saveSnapshotViewDuration),a=Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.m)(e);Boolean(a)&&(saveSnapshotViewDuration=Math.round((a.before-a.after)/1e3),t=a.after);var n=new Date(t),r=n.getFullYear()+Object(_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__.b)(n.getMonth()+1)+Object(_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__.b)(n.getDate())+"-"+Object(_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__.b)(n.getHours())+Object(_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__.b)(n.getMinutes())+Object(_utils_units_conversion__WEBPACK_IMPORTED_MODULE_5__.b)(n.getSeconds());document.getElementById("saveSnapshotFilename").value="netdata-"+options.hostname.toString()+"-"+r.toString()+"-"+saveSnapshotViewDuration.toString()+".snapshot",saveSnapshotSetCompression(saveSnapshotCompression);var o=options.update_every,i=Math.round(saveSnapshotViewDuration/100);Boolean(a)&&(i=Math.round(saveSnapshotViewDuration/50));var s=Math.round(saveSnapshotViewDuration/Math.round($(document.getElementById("charts_div")).width()/2));i<10&&(i=10),i<o&&(i=o),s<o&&(s=o),s>i&&(s=i),null!==saveSnapshotSlider&&saveSnapshotSlider.destroy(),saveSnapshotSlider=new Slider("#saveSnapshotResolutionSlider",{ticks:[o,s,i],min:o,max:i,step:options.update_every,value:s,scale:i>100?"logarithmic":"linear",tooltip:"always",formatter:function(e){e<1&&(e=1),e<options.data.update_every&&(e=options.data.update_every),saveSnapshotSelectedSecondsPerPoint=e,saveSnapshotModalShowExpectedSize();var t=" seconds ";return 1===e&&(t=" second "),e+t+"per point"+(e===options.data.update_every?", server default":"").toString()}})}))}window.saveSnapshot=function(){loadPako((function(){loadLzString((function(){saveSnapshotStop=!1,$("#saveSnapshotModalProgressSection").show(),$("#saveSnapshotResolutionRadio").hide(),$("#saveSnapshotExport").addClass("disabled");var e=document.getElementById("saveSnapshotFilename").value;saveSnapshotModalLog("info","Generating snapshot as <code>"+e.toString()+"</code>");var t=document.getElementById("saveSnapshotModalProgressBar"),a=document.getElementById("saveSnapshotModalProgressBarText");options.data.charts_by_name=null;var n=reduxStore.getState(),r=Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.i)(n),o={hostname:options.hostname,server:_utils_server_detection__WEBPACK_IMPORTED_MODULE_9__.b,netdata_version:options.data.version,snapshot_version:1,after_ms:Date.now()+1e3*r,before_ms:Date.now(),highlight_after_ms:urlOptions.highlight_after,highlight_before_ms:urlOptions.highlight_before,duration_ms:1e3*options.duration,update_every_ms:1e3*options.update_every,data_points:0,url:(null!==urlOptions.server?urlOptions.server:document.location.origin.toString()+document.location.pathname.toString()+document.location.search.toString()).toString(),comments:document.getElementById("saveSnapshotComments").value.toString(),hash:urlOptions.hash,charts:options.data,info:jsonStringifyFn({menu:netdataDashboard.menu,submenu:netdataDashboard.submenu,context:netdataDashboard.context}),charts_ok:0,charts_failed:0,compression:saveSnapshotCompression,data_size:0,data:{}};"undefined"===typeof snapshotOptions.compressions[o.compression]&&(alert("unknown compression method: "+o.compression),o.compression="none");var i=snapshotOptions.compressions[o.compression].compress,s=snapshotOptions.compressions[o.compression].compressed_length;function l(e){var t=e.data,a=e.chartDataUniqueID;if(null===t)return 0;var n=JSON.stringify(t),r=i(n);return o.data[a]=r,s(r)}var c=Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.m)(n),u=!1;c?(o.after_ms=c.after,o.before_ms=c.before):(reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.s)({after:o.after_ms,before:o.before_ms})),u=!0),o.duration_ms=o.before_ms-o.after_ms,o.data_points=Math.round((o.before_ms-o.after_ms)/(1e3*saveSnapshotSelectedSecondsPerPoint)),saveSnapshotModalLog("info","Generating snapshot with "+o.data_points.toString()+" data points per dimension..."),reduxStore.dispatch(Object(_domains_dashboard_actions__WEBPACK_IMPORTED_MODULE_6__.e)({charts:o.charts,dataPoints:o.data_points})),window.saveSnapshotRestore=function(){$("#saveSnapshotModal").modal("hide"),$(t).css("width","0%").attr("aria-valuenow",0),a.innerText="0%",reduxStore.dispatch(Object(_domains_dashboard_actions__WEBPACK_IMPORTED_MODULE_6__.f)()),reduxStore.dispatch(Object(_domains_chart_actions__WEBPACK_IMPORTED_MODULE_7__.j)()),u&&reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.j)()),$("#saveSnapshotExport").removeClass("disabled")};var d=0,h=" Resolution: <b>"+saveSnapshotSelectedSecondsPerPoint.toString()+(1===saveSnapshotSelectedSecondsPerPoint?" second ":" seconds ").toString()+"per point</b>.";window.chartUpdated=function(n){var r=n.chart,i=n.chartDataUniqueID,s=n.data;!0===saveSnapshotStop&&(saveSnapshotModalLog("info","Cancelled!"),saveSnapshotRestore());var c=reduxStore.getState(),u=Object(_domains_chart_selectors__WEBPACK_IMPORTED_MODULE_8__.b)(c),p=Object(_domains_chart_selectors__WEBPACK_IMPORTED_MODULE_8__.e)(c),f=Object(_domains_chart_selectors__WEBPACK_IMPORTED_MODULE_8__.d)(c),g=(p+f)/u*100;$(t).css("width",g+"%").attr("aria-valuenow",g),a.innerText=Math.round(g).toString()+"%, "+(r||s.id),d+=l({data:s,chartDataUniqueID:i}),saveSnapshotModalLog(f?"danger":"info","Generated snapshot data size <b>"+(Math.round(100*d/1024/1024)/100).toString()+" MB</b>. "+(f?f.toString()+" charts have failed to be downloaded":"").toString()+h),window.saveData=o,p+f===u&&(o.charts_ok=p,o.charts_failed=f,o.data_size=d,saveObjectToClient(o,e),f>0&&alert("".concat(f," failed to be downloaded")),saveSnapshotRestore(),o=null)}}))}))};try{browser_timezone=Intl.DateTimeFormat().resolvedOptions().timeZone}catch(e){console.log("failed to detect browser timezone: "+e.toString()),browser_timezone="cannot-detect-it"}var getOption=function(e){var t=reduxStore.getState();return Object(_domains_global_selectors__WEBPACK_IMPORTED_MODULE_3__.b)(e)(t)};function dashboardSettingsSetup(){var e=function(){var e=function(e){var t=$("#"+e);t.prop("checked")!==getOption(e)&&t.bootstrapToggle(getOption(e)?"on":"off")};e("stop_updates_when_focus_is_lost"),e("eliminate_zero_dimensions"),e("destroy_on_hide"),e("async_on_scroll"),e("parallel_refresher"),e("concurrent_refreshes"),e("sync_selection"),e("legend_right"),$("#"+"netdata_theme_control").bootstrapToggle("slate"===netdataTheme?"on":"off"),e("show_help"),e("pan_and_zoom_data_padding"),e("smooth_plot"),function(e){var t=$("#"+e);t.prop("checked")!==("auto"===getOption("units"))&&t.bootstrapToggle("auto"===getOption("units")?"on":"off"),!0===t.prop("checked")?($("#settingsLocaleTempRow").show(),$("#settingsLocaleTimeRow").show()):($("#settingsLocaleTempRow").hide(),$("#settingsLocaleTimeRow").hide())}("units_conversion"),function(e){var t=$("#"+e);t.prop("checked")!==("celsius"===getOption("temperature"))&&t.bootstrapToggle("celsius"===getOption("temperature")?"on":"off")}("units_temp"),e("seconds_as_time"),!1===getOption("parallel_refresher")?$("#concurrent_refreshes_row").hide():$("#concurrent_refreshes_row").show()};e(),$("#eliminate_zero_dimensions").change((function(){setOption("eliminate_zero_dimensions",$(this).prop("checked"))})),$("#destroy_on_hide").change((function(){setOption("destroy_on_hide",$(this).prop("checked"))})),$("#async_on_scroll").change((function(){setOption("async_on_scroll",$(this).prop("checked"))})),$("#parallel_refresher").change((function(){setOption("parallel_refresher",$(this).prop("checked"))})),$("#concurrent_refreshes").change((function(){setOption("concurrent_refreshes",$(this).prop("checked"))})),$("#sync_selection").change((function(){setOption("sync_selection",$(this).prop("checked")),netdataReload()})),$("#stop_updates_when_focus_is_lost").change((function(){urlOptions.update_always=!$(this).prop("checked"),urlOptions.hashUpdate(),setOption("stop_updates_when_focus_is_lost",!urlOptions.update_always)})),$("#smooth_plot").change((function(){setOption("smooth_plot",$(this).prop("checked"))})),$("#pan_and_zoom_data_padding").change((function(){setOption("pan_and_zoom_data_padding",$(this).prop("checked"))})),$("#seconds_as_time").change((function(){setOption("seconds_as_time",$(this).prop("checked"))})),$("#units_conversion").change((function(){setOption("units",$(this).prop("checked")?"auto":"original"),e()})),$("#units_temp").change((function(){setOption("temperature",$(this).prop("checked")?"celsius":"fahrenheit")})),$("#legend_right").change((function(){setOption("legend_right",$(this).prop("checked")),netdataReload()})),$("#show_help").change((function(){urlOptions.help=$(this).prop("checked"),urlOptions.hashUpdate(),setOption("show_help",urlOptions.help),netdataReload()})),$("#netdata_theme_control").change((function(){urlOptions.theme=$(this).prop("checked")?"slate":"white",urlOptions.hashUpdate(),setTheme(urlOptions.theme)&&netdataReload()}))}var CHART_DIV_ID_PREFIX="chart_",CHART_DIV_OFFSET=-50;function scrollDashboardTo(){if(null!==window.netdataSnapshotData&&"undefined"!==typeof window.netdataSnapshotData.hash)scrollToId(window.netdataSnapshotData.hash.replace("#",""));else if(scrollToId(urlOptions.hash.replace("#","")),null!==urlOptions.chart){var e=document.getElementById("".concat(CHART_DIV_ID_PREFIX).concat(Object(_utils_name_2_id__WEBPACK_IMPORTED_MODULE_10__.a)(urlOptions.chart)));if(e){var t=e.offsetTop+CHART_DIV_OFFSET;document.querySelector("html").scrollTop=t}}}var modalHiddenCallback=null;function enableTooltipsAndPopovers(){$('[data-toggle="tooltip"]').tooltip({animated:"fade",trigger:"hover",html:!0,delay:{show:500,hide:0},container:"body"}),$('[data-toggle="popover"]').popover()}window.scrollToChartAfterHidingModal=function(e,t,a){modalHiddenCallback=function(){if("string"===typeof e){var n=document.getElementById("".concat(CHART_DIV_ID_PREFIX).concat(Object(_utils_name_2_id__WEBPACK_IMPORTED_MODULE_10__.a)(e)));if(n){var r=n.offsetTop+CHART_DIV_OFFSET;document.querySelector("html").scrollTop=r}}if(["WARNING","CRITICAL"].includes(a)){reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.s)({after:t-12e4,before:t+12e4}))}}};var runOnceOnDashboardLastRun=0;function runOnceOnDashboardWithjQuery(){if(0!==runOnceOnDashboardLastRun)return scrollDashboardTo(),$(document.body).scrollspy("refresh"),void $(document.body).scrollspy("process");runOnceOnDashboardLastRun=Date.now();var e=0,t=0,a=!1,n=!1,r=$(window).height()/3;$(".modal").on("show.bs.modal",(function(){0===t&&(e=window.scrollY,$("body").css({overflow:"hidden",position:"fixed",top:-e}),a=!0,null===NETDATA.options.pauseCallback?(NETDATA.pause((function(){})),n=!0):n=!1),t++})).on("hide.bs.modal",(function(){--t<=0&&(t=0,$("body").css({overflow:"",position:"",top:""}),$("html, body").animate({scrollTop:e},0),!0===n&&(NETDATA.unpause(),n=!1),$(document.body).scrollspy("process"))})).on("hidden.bs.modal",(function(){0===t&&(a=!1),"function"===typeof modalHiddenCallback&&modalHiddenCallback(),modalHiddenCallback=null})),$("#sidebar").affix({offset:{top:0,bottom:0}}).on("affixed.bs.affix",(function(){$(this).removeAttr("style")})).on("affix-top.bs.affix",(function(){if(a)return!1})).on("activate.bs.scrollspy",(function(e){if(!1===a){var t=$(e.target).find("a").attr("href");"string"===typeof t&&"#"===t.substring(0,1)&&!1===urlOptions.hash.startsWith(t+"_submenu_")&&(urlOptions.hash=t,urlOptions.hashUpdate())}})),Ps.initialize(document.getElementById("sidebar"),{wheelSpeed:.5,wheelPropagation:!0,swipePropagation:!0,minScrollbarLength:null,maxScrollbarLength:null,useBothWheelAxes:!1,suppressScrollX:!0,suppressScrollY:!1,scrollXMarginOffset:0,scrollYMarginOffset:0,theme:"default"}),r>250&&(r=250),r<75&&(r=75),document.body.setAttribute("data-offset",r),scrollDashboardTo(),$(document.body).scrollspy({target:"#sidebar",offset:r}),$("#deleteRegistryModal").on("hidden.bs.modal",(function(){deleteRegistryGuid=null})),$("#updateModal").on("show.bs.modal",(function(){versionLog("checking, please wait...")})).on("shown.bs.modal",(function(){notifyForUpdate(!0)})),$("#alarmsModal").on("shown.bs.modal",(function(){alarmsUpdateModal()})).on("hidden.bs.modal",(function(){document.getElementById("alarms_active").innerHTML=document.getElementById("alarms_all").innerHTML=document.getElementById("alarms_log").innerHTML="loading..."})),dashboardSettingsSetup(),loadSnapshotDragAndDropSetup(),saveSnapshotModalSetup(),showPageFooter(),$.fn.shorten=function(e){var t={showChars:750,minHideChars:10,ellipsesText:"...",moreText:'<i class="fas fa-expand"></i> show more information',lessText:'<i class="fas fa-compress"></i> show less information',onLess:function(){NETDATA.onscroll()},onMore:function(){NETDATA.onscroll()},errMsg:null,force:!1};return e&&$.extend(t,e),!($(this).data("jquery.shorten")&&!t.force)&&($(this).data("jquery.shorten",!0),$(document).off("click",".morelink"),$(document).on({click:function(){var e=$(this);return e.hasClass("less")?(e.removeClass("less"),e.html(t.moreText),e.parent().prev().animate({height:"0%"},0,(function(){e.parent().prev().prev().show()})).hide(0,(function(){t.onLess()}))):(e.addClass("less"),e.html(t.lessText),e.parent().prev().animate({height:"100%"},0,(function(){e.parent().prev().prev().hide()})).show(0,(function(){t.onMore()}))),!1}},".morelink"),this.each((function(){var e=$(this),a=e.html();if(e.text().length>t.showChars+t.minHideChars){var n=a.substr(0,t.showChars);if(n.indexOf("<")>=0){for(var r=!1,o="",i=0,s=[],l=null,c=0,u=0;u<=t.showChars;c++)if("<"!==a[c]||r||(r=!0,"/"===(l=a.substring(c+1,a.indexOf(">",c)))[0]?l!=="/"+s[0]?t.errMsg="ERROR en HTML: the top of the stack should be the tag that closes":s.shift():"br"!==l.toLowerCase()&&s.unshift(l)),r&&">"===a[c]&&(r=!1),r)o+=a.charAt(c);else if(u++,i<=t.showChars)o+=a.charAt(c),i++;else if(s.length>0){for(var d=0;d<s.length;d++)o+="</"+s[d]+">";break}n=$("<div/>").html(o+'<span class="ellip">'+t.ellipsesText+"</span>").html()}else n+=t.ellipsesText;var h='<div class="shortcontent">'+n+'</div><div class="allcontent">'+a+'</div><span><a href="javascript://nop/" class="morelink">'+t.moreText+"</a></span>";e.html(h),e.find(".allcontent").hide(),$(".shortcontent p:last",e).css("margin-bottom",0)}})))}}function finalizePage(){urlOptions.after<0?reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.q)({after:urlOptions.after})):!0===urlOptions.pan_and_zoom&&reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.s)({after:urlOptions.after,before:urlOptions.before})),NETDATA.parseDom(),NETDATA.unpause(),runOnceOnDashboardWithjQuery(),$(".shorten").shorten(),enableTooltipsAndPopovers(),_utils_is_demo__WEBPACK_IMPORTED_MODULE_14__.a||notifyForUpdate(),!0===urlOptions.show_alarms&&setTimeout((function(){$("#alarmsModal").modal("show")}),1e3),NETDATA.onresizeCallback=function(){Ps.update(document.getElementById("sidebar"))},NETDATA.onresizeCallback(),null!==window.netdataSnapshotData&&reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.s)({after:window.netdataSnapshotData.after_ms,before:window.netdataSnapshotData.before_ms}))}window.resetDashboardOptions=function(){reduxStore.dispatch(Object(_domains_global_actions__WEBPACK_IMPORTED_MODULE_2__.l)()),urlOptions.update_always=!1,urlOptions.help=!1,urlOptions.theme="slate",urlOptions.hashUpdate(),netdataReload()};var netdataPrepCallback=function(){_utils_is_demo__WEBPACK_IMPORTED_MODULE_14__.a?document.getElementById("masthead").style.display="block":!0===urlOptions.update_always&&setOption("stop_updates_when_focus_is_lost",!urlOptions.update_always)};window.selected_server_timezone=function(e,t){if(document.getElementById("timezone_error_message").innerHTML="","undefined"===typeof t)setOption("user_set_server_timezone",e),Object(_utils_date_time__WEBPACK_IMPORTED_MODULE_11__.a)(e)?($("#local_timezone").prop("checked")&&$("#local_timezone").bootstrapToggle("off"),setOption("timezone",e)):(setOption("timezone","default"),$("#local_timezone").prop("checked")||$("#local_timezone").bootstrapToggle("on"),document.getElementById("timezone_error_message").innerHTML="Ooops! That timezone was not accepted by your browser. Please open a github issue to help us fix it.",setOption("user_set_server_timezone",options.timezone));else if(!0===t)setOption("timezone","default");else{var a=getOption("user_set_server_timezone");"default"===a&&(setOption("user_set_server_timezone",options.timezone),a=options.timezone),Object(_utils_date_time__WEBPACK_IMPORTED_MODULE_11__.a)(a)?setOption("timezone",a):(setOption("timezone","default"),$("#local_timezone").prop("checked")||$("#local_timezone").bootstrapToggle("on"),document.getElementById("timezone_error_message").innerHTML='Sorry. The timezone "'+e.toString()+'" is not accepted by your browser. Please select one from the list.',setOption("user_set_server_timezone",options.timezone))}var n=getOption("timezone");return document.getElementById("current_timezone").innerText="default"===n?"unset, using browser default":n,!1};var netdataCallback=initializeDynamicDashboard;window.showSignInModal=function(){document.getElementById("sim-registry").innerHTML=getFromRegistry("registryServer"),$("#signInModal").modal("show")},window.explicitlySignIn=function(){$("#signInModal").modal("hide"),reduxStore.dispatch(Object(_domains_dashboard_actions__WEBPACK_IMPORTED_MODULE_6__.a)())}},26:function(e,t,a){"use strict";a.d(t,"e",(function(){return n})),a.d(t,"c",(function(){return r})),a.d(t,"b",(function(){return o})),a.d(t,"a",(function(){return i})),a.d(t,"d",(function(){return s}));var n="global",r="***",o=5e3,i="CLOUD_BASE_URL_DISABLED",s=["WARNING","ERROR","REMOVED","UNDEFINED","UNINITIALIZED","CLEAR","CRITICAL"]},276:function(e,t){},277:function(e,t,a){"use strict";var n=a(5),r=a(0),o=a.n(r),i=a(91),s=a(3),l=function(e){var t=e.userStatus,a=e.nodeStatus,n=e.date;return Object(r.useMemo)((function(){return function(e){var t=e.nodeStatus,a=e.userStatus;e.date;return{title:"Netdata Cloud connection status",text:{header:function(){return o.a.createElement(s.B,null,"This node is currently"," ",o.a.createElement(s.B,{strong:!0},"LIVE"===t?"Connected":"Not Connected")," to Netdata Cloud")},bullets:"NOT_LIVE"===t?[function(){return o.a.createElement(s.B,null,"To troubleshoot Netdata Cloud connection issues, please follow"," ",o.a.createElement(i.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/agent/claim#troubleshooting"},"this guide"),".")}]:[],footer:function(){return o.a.createElement(s.B,null,"You are"," ",o.a.createElement(s.B,{strong:!0},"LOGGED_IN"===a?"Logged In":"EXPIRED_LOGIN"===a?"Logged out":"Not signed-up")," ","to Netdata Cloud")}},CTA1:{text:"Take me to Netdata Cloud"}}}({userStatus:t,nodeStatus:a,date:n})}),[t,a,n])},c=a(58),u=function(e){var t=e.title,a=e.text,n=e.CTA1,i=e.closeModal,l=e.onRefresh,u=e.isCTA1Disabled,d=Object(r.useCallback)((function(e){var t=e.link;i(),window.location.href=t}),[i]);return o.a.createElement(s.q,null,o.a.createElement(s.t,{width:180,background:"modalBackground"},o.a.createElement(s.v,null,o.a.createElement(s.k,{margin:[0]},t),o.a.createElement(s.s,{onClose:i})),o.a.createElement(s.r,null,o.a.createElement(s.j,{padding:[0,0,4,0],column:!0,gap:3},a.header({}),a.bullets.length>0&&o.a.createElement(s.j,{column:!0,gap:3},o.a.createElement(s.j,{column:!0,gap:1,as:"ul"},a.bullets.map((function(e,t){return"function"===typeof e?o.a.createElement("li",{key:t}," ",e()):o.a.createElement("li",{key:e},o.a.createElement(s.B,null,e))})))),a.footer())),o.a.createElement(s.u,null,o.a.createElement(s.a,{"data-testid":"cta1",margin:[0,2,0,0],width:{min:40}},o.a.createElement(c.a,{utmParameters:{content:"connection_to_cloud",campaign:"agent_nudge_to_cloud"}},(function(e){var t=e.link;return o.a.createElement(s.b,{"data-ga":"connection-to-cloud::click-ct1::ad",disabled:u,textTransform:"none","data-testid":"cta1-button",onClick:function(){return d({link:t})},width:"100%",label:n.text})}))),o.a.createElement(s.a,{"data-ga":"connection-to-cloud::click-check-now::ad",onClick:l,height:10,className:"btn btn-default","data-testid":"cta2-button",width:{min:40}},o.a.createElement(s.a,{as:s.B,sx:{fontWeight:"500",lineHeight:"25px"}},"Check Now")))))},d=a(27),h=a(127),p=a(9);t.a=function(){var e=Object(d.e)(p.O),t=Object(d.e)(p.u),a=Object(r.useState)(!1),i=Object(n.a)(a,2),c=i[0],f=i[1],g=l({userStatus:(null===e||void 0===e?void 0:e.userStatus)||"UNKNOWN",nodeStatus:(null===e||void 0===e?void 0:e.nodeLiveness)||"NOT_LIVE",date:""});Object(r.useEffect)((function(){document.documentElement.style.overflow=c?"hidden":"auto"}),[c]);var m=Object(r.useCallback)((function(){f(!0)}),[]),b=Object(r.useCallback)((function(){f(!1)}),[]),v=Object(h.b)();return t?o.a.createElement(s.j,{column:!0},o.a.createElement(s.z,{"data-ga":"connection-to-cloud::click-pill::ad","data-testid":"header-connection-to-cloud-button",onClick:m,flavour:"neutral"},"Connection to Cloud"),c&&o.a.createElement(u,Object.assign({},g,{isCTA1Disabled:"LIVE"!==(null===e||void 0===e?void 0:e.nodeLiveness),closeModal:b,onRefresh:v}))):null}},28:function(e,t,a){"use strict";a.d(t,"f",(function(){return i})),a.d(t,"b",(function(){return s})),a.d(t,"a",(function(){return l})),a.d(t,"d",(function(){return c})),a.d(t,"h",(function(){return u})),a.d(t,"e",(function(){return d})),a.d(t,"g",(function(){return h})),a.d(t,"c",(function(){return p}));var n=a(14),r=a(7),o=a(135),i=function(e){return e>0},s="https://registry.my-netdata.io",l=1e3,c=function(e,t){return"".concat(e,"/sso/v2/").concat(t)},u="&utm_source=agent&utm_medium=web",d=function(){var e=document.getElementById("charts_div");return e?60*-Math.round(e.getBoundingClientRect().width/3/60):(console.error("Couldn't find '.charts_div' element to calculate width"),-900)},h=function(e,t,a){return t?e.reduce((function(e,i){return Object(r.a)({},e,Object(n.a)({},i,Object(o.a)(t[i],a[i])?t[i]:a[i]))}),a):a};function p(){for(var e=arguments.length,t=new Array(e),a=0;a<e;a++)t[a]=arguments[a];return function(e){t.forEach((function(t){null===t||void 0===t||t(e)}))}}},300:function(e,t,a){var n,r,o;r=[],void 0===(o="function"===typeof(n=function(){var e=0,t={header:{title:{text:"",color:"#333333",fontSize:18,fontWeight:"bold",font:"arial"},subtitle:{text:"",color:"#666666",fontSize:14,fontWeight:"bold",font:"arial"},location:"top-center",titleSubtitlePadding:8},footer:{text:"",color:"#666666",fontSize:14,fontWeight:"bold",font:"arial",location:"left"},size:{canvasHeight:500,canvasWidth:500,pieInnerRadius:"0%",pieOuterRadius:null},data:{sortOrder:"none",ignoreSmallSegments:{enabled:!1,valueType:"percentage",value:null},smallSegmentGrouping:{enabled:!1,value:1,valueType:"percentage",label:"Other",color:"#cccccc"},content:[]},labels:{outer:{format:"label",hideWhenLessThanPercentage:null,pieDistance:30},inner:{format:"percentage",hideWhenLessThanPercentage:null},mainLabel:{color:"#333333",font:"arial",fontWeight:"normal",fontSize:10},percentage:{color:"#dddddd",font:"arial",fontWeight:"bold",fontSize:10,decimalPlaces:0},value:{color:"#cccc44",fontWeight:"bold",font:"arial",fontSize:10},lines:{enabled:!0,style:"curved",color:"segment"},truncation:{enabled:!1,truncateLength:30},formatter:null},effects:{load:{effect:"none",speed:1e3},pullOutSegmentOnClick:{effect:"none",speed:300,size:10},highlightSegmentOnMouseover:!1,highlightLuminosity:-.2},tooltips:{enabled:!1,type:"placeholder",string:"",placeholderParser:null,styles:{fadeInSpeed:250,backgroundColor:"#000000",backgroundOpacity:.5,color:"#efefef",borderRadius:2,font:"arial",fontWeight:"bold",fontSize:10,padding:4}},misc:{colors:{background:null,segments:["#2484c1","#65a620","#7b6888","#a05d56","#961a1a","#d8d23a","#e98125","#d0743c","#635222","#6ada6a","#0c6197","#7d9058","#207f33","#44b9b0","#bca44a","#e4a14b","#a3acb2","#8cc3e9","#69a6f9","#5b388f","#546e91","#8bde95","#d2ab58","#273c71","#98bf6e","#4daa4b","#98abc5","#cc1010","#31383b","#006391","#c2643f","#b0a474","#a5a39c","#a9c2bc","#22af8c","#7fcecf","#987ac6","#3d3b87","#b77b1c","#c9c2b6","#807ece","#8db27c","#be66a2","#9ed3c6","#00644b","#005064","#77979f","#77e079","#9c73ab","#1f79a7"],segmentStroke:"#ffffff"},gradient:{enabled:!1,percentage:95,color:"#000000"},canvasPadding:{top:5,right:5,bottom:5,left:5},pieCenterOffset:{x:0,y:0},cssPrefix:null},callbacks:{onload:null,onMouseoverSegment:null,onMouseoutSegment:null,onClickSegment:null}},a=function(e){var t=e.cssPrefix,a=e.element,r=e.options;if(!window.d3||!window.d3.hasOwnProperty("version"))return console.error("d3pie error: d3 is not available"),!1;if(!(a instanceof HTMLElement||a instanceof SVGElement))return console.error("d3pie error: the first d3pie() param must be a valid DOM element (not jQuery) or a ID string."),!1;if(!/[a-zA-Z][a-zA-Z0-9_-]*$/.test(t))return console.error("d3pie error: invalid options.misc.cssPrefix"),!1;if(!n.isArray(r.data.content))return console.error("d3pie error: invalid config structure: missing data.content property."),!1;if(0===r.data.content.length)return console.error("d3pie error: no data supplied."),!1;for(var o=[],i=0;i<r.data.content.length;i++)"number"!==typeof r.data.content[i].value||isNaN(r.data.content[i].value)?console.log("not valid: ",r.data.content[i]):r.data.content[i].value<=0?console.log("not valid - should have positive value: ",r.data.content[i]):o.push(r.data.content[i]);return e.options.data.content=o,!0},n={addSVGSpace:function(e){var t=e.element,a=e.options.size.canvasWidth,n=e.options.size.canvasHeight,r=e.options.misc.colors.background,o=d3.select(t).append("svg:svg").attr("width",a).attr("height",n);return"transparent"!==r&&o.style("background-color",(function(){return r})),o},shuffleArray:function(e){for(var t,a,n=e.length;0!==n;)a=Math.floor(Math.random()*n),t=e[n-=1],e[n]=e[a],e[a]=t;return e},processObj:function(e,t,a){return"string"===typeof t?n.processObj(e,t.split("."),a):1===t.length&&void 0!==a?(e[t[0]]=a,e[t[0]]):0===t.length?e:n.processObj(e[t[0]],t.slice(1),a)},getDimensions:function(e){"string"===typeof e&&(e=document.getElementById(e));var t=0,a=0;if(e){var n=e.getBBox();t=n.width,a=n.height}else console.log("error: getDimensions() "+id+" not found.");return{w:t,h:a}},rectIntersect:function(e,t){return!(t.x>e.x+e.w||t.x+t.w<e.x||t.y+t.h<e.y||t.y>e.y+e.h)},getColorShade:function(e,t){(e=String(e).replace(/[^0-9a-f]/gi,"")).length<6&&(e=e[0]+e[0]+e[1]+e[1]+e[2]+e[2]),t=t||0;for(var a="#",n=0;n<3;n++){var r=parseInt(e.substr(2*n,2),16);a+=("00"+(r=Math.round(Math.min(Math.max(0,r+r*t),255)).toString(16))).substr(r.length)}return a},initSegmentColors:function(e){for(var t=e.options.data.content,a=e.options.misc.colors.segments,n=[],r=0;r<t.length;r++)t[r].hasOwnProperty("color")?n.push(t[r].color):n.push(a[r]);return n},applySmallSegmentGrouping:function(e,t){var a;"percentage"===t.valueType&&(a=o.getTotalPieSize(e));for(var n=[],r=[],i=0,s=0;s<e.length;s++)if("percentage"===t.valueType){if(e[s].value/a*100<=t.value){r.push(e[s]),i+=e[s].value;continue}e[s].isGrouped=!1,n.push(e[s])}else{if(e[s].value<=t.value){r.push(e[s]),i+=e[s].value;continue}e[s].isGrouped=!1,n.push(e[s])}return r.length&&n.push({color:t.color,label:t.label,value:i,isGrouped:!0,groupedData:r}),n},showPoint:function(e,t,a){e.append("circle").attr("cx",t).attr("cy",a).attr("r",2).style("fill","black")},isFunction:function(e){return e&&"[object Function]"==={}.toString.call(e)},isArray:function(e){return"[object Array]"===Object.prototype.toString.call(e)}},r=function e(){var t,a,n,r,o,i,s=arguments[0]||{},l=1,c=arguments.length,u=!1,d=Object.prototype.toString,h=Object.prototype.hasOwnProperty,p={"[object Boolean]":"boolean","[object Number]":"number","[object String]":"string","[object Function]":"function","[object Array]":"array","[object Date]":"date","[object RegExp]":"regexp","[object Object]":"object"},f={isFunction:function(e){return"function"===f.type(e)},isArray:Array.isArray||function(e){return"array"===f.type(e)},isWindow:function(e){return null!==e&&e===e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null===e?String(e):p[d.call(e)]||"object"},isPlainObject:function(e){if(!e||"object"!==f.type(e)||e.nodeType)return!1;try{if(e.constructor&&!h.call(e,"constructor")&&!h.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(a){return!1}var t;for(t in e);return void 0===t||h.call(e,t)}};for("boolean"===typeof s&&(u=s,s=arguments[1]||{},l=2),"object"===typeof s||f.isFunction(s)||(s={}),c===l&&(s=this,--l);l<c;l++)if(null!==(t=arguments[l]))for(a in t)n=s[a],s!==(r=t[a])&&(u&&r&&(f.isPlainObject(r)||(o=f.isArray(r)))?(o?(o=!1,i=n&&f.isArray(n)?n:[]):i=n&&f.isPlainObject(n)?n:{},s[a]=e(u,i,r)):void 0!==r&&(s[a]=r));return s},o={toRadians:function(e){return e*(Math.PI/180)},toDegrees:function(e){return e*(180/Math.PI)},computePieRadius:function(e){var t=e.options.size,a=e.options.misc.canvasPadding,n=t.canvasWidth-a.left-a.right,r=t.canvasHeight-a.top-a.bottom;"pie-center"!==e.options.header.location&&(r-=e.textComponents.headerHeight),e.textComponents.footer.exists&&(r-=e.textComponents.footer.h);var o,i,s=(n<(r=r<0?0:r)?n:r)/3;if(null!==t.pieOuterRadius)if(/%/.test(t.pieOuterRadius)){i=(i=(i=parseInt(t.pieOuterRadius.replace(/[\D]/,""),10))>99?99:i)<0?0:i;var l=n<r?n:r;if("none"!==e.options.labels.outer.format){var c=2*parseInt(e.options.labels.outer.pieDistance,10);l-c>0&&(l-=c)}s=Math.floor(l/100*i)/2}else s=parseInt(t.pieOuterRadius,10);/%/.test(t.pieInnerRadius)?(i=(i=(i=parseInt(t.pieInnerRadius.replace(/[\D]/,""),10))>99?99:i)<0?0:i,o=Math.floor(s/100*i)):o=parseInt(t.pieInnerRadius,10),e.innerRadius=o,e.outerRadius=s},getTotalPieSize:function(e){for(var t=0,a=0;a<e.length;a++)t+=e[a].value;return t},sortPieData:function(e){var t=e.options.data.content;switch(e.options.data.sortOrder){case"none":break;case"random":t=n.shuffleArray(t);break;case"value-asc":t.sort((function(e,t){return e.value<t.value?-1:1}));break;case"value-desc":t.sort((function(e,t){return e.value<t.value?1:-1}));break;case"label-asc":t.sort((function(e,t){return e.label.toLowerCase()>t.label.toLowerCase()?1:-1}));break;case"label-desc":t.sort((function(e,t){return e.label.toLowerCase()<t.label.toLowerCase()?1:-1}))}return t},getPieTranslateCenter:function(e){return"translate("+e.x+","+e.y+")"},calculatePieCenter:function(e){var t=e.options.misc.pieCenterOffset,a=e.textComponents.title.exists&&"pie-center"!==e.options.header.location,n=e.textComponents.subtitle.exists&&"pie-center"!==e.options.header.location,r=e.options.misc.canvasPadding.top;a&&n?r+=e.textComponents.title.h+e.options.header.titleSubtitlePadding+e.textComponents.subtitle.h:a?r+=e.textComponents.title.h:n&&(r+=e.textComponents.subtitle.h);var o=0;e.textComponents.footer.exists&&(o=e.textComponents.footer.h+e.options.misc.canvasPadding.bottom);var i=(e.options.size.canvasWidth-e.options.misc.canvasPadding.left-e.options.misc.canvasPadding.right)/2+e.options.misc.canvasPadding.left,s=(e.options.size.canvasHeight-o-r)/2+r;i+=t.x,s+=t.y,e.pieCenter={x:i,y:s}},rotate:function(e,t,a,n,r){r=r*Math.PI/180;var o=Math.cos,i=Math.sin;return{x:(e-a)*o(r)-(t-n)*i(r)+a,y:(e-a)*i(r)+(t-n)*o(r)+n}},translate:function(e,t,a,n){var r=o.toRadians(n);return{x:e+a*Math.sin(r),y:t-a*Math.cos(r)}},pointIsInArc:function(e,t,a){var n=a.innerRadius()(t),r=a.outerRadius()(t),o=a.startAngle()(t),i=a.endAngle()(t),s=e.x*e.x+e.y*e.y,l=Math.atan2(e.x,-e.y);return l=l<0?l+2*Math.PI:l,n*n<=s&&s<=r*r&&o<=l&&l<=i}},i={add:function(e,t,a){var n=i.getIncludes(a),r=e.options.labels,o=e.svg.insert("g","."+e.cssPrefix+"labels-"+t).attr("class",e.cssPrefix+"labels-"+t),s=e.__labels[t]=o.selectAll("."+e.cssPrefix+"labelGroup-"+t).data(e.options.data.content).enter().append("g").attr("id",(function(a,n){return e.cssPrefix+"labelGroup"+n+"-"+t})).attr("data-index",(function(e,t){return t})).attr("class",e.cssPrefix+"labelGroup-"+t).style("opacity",0),l={section:t,sectionDisplayType:a};n.mainLabel&&s.append("text").attr("id",(function(a,n){return e.cssPrefix+"segmentMainLabel"+n+"-"+t})).attr("class",e.cssPrefix+"segmentMainLabel-"+t).text((function(e,t){var a=e.label;return r.formatter?(l.index=t,l.part="mainLabel",l.value=e.value,l.label=a,a=r.formatter(l)):r.truncation.enabled&&e.label.length>r.truncation.truncateLength&&(a=e.label.substring(0,r.truncation.truncateLength)+"..."),a})).style("font-size",r.mainLabel.fontSize+"px").style("font-family",r.mainLabel.font).style("font-weight",r.mainLabel.fontWeight).style("fill",(function(t,a){return"segment"===r.mainLabel.color?e.options.colors[a]:r.mainLabel.color})),n.percentage&&s.append("text").attr("id",(function(a,n){return e.cssPrefix+"segmentPercentage"+n+"-"+t})).attr("class",e.cssPrefix+"segmentPercentage-"+t).text((function(e,t){var a=e.percentage;return r.formatter?(l.index=t,l.part="percentage",l.value=e.value,l.label=e.percentage,a=r.formatter(l)):a+="%",a})).style("font-size",r.percentage.fontSize+"px").style("font-family",r.percentage.font).style("font-weight",r.percentage.fontWeight).style("fill",r.percentage.color),n.value&&s.append("text").attr("id",(function(a,n){return e.cssPrefix+"segmentValue"+n+"-"+t})).attr("class",e.cssPrefix+"segmentValue-"+t).text((function(e,t){return l.index=t,l.part="value",l.value=e.value,l.label=e.value,r.formatter?r.formatter(l,e.value):e.value})).style("font-size",r.value.fontSize+"px").style("font-family",r.value.font).style("font-weight",r.value.fontWeight).style("fill",r.value.color)},positionLabelElements:function(e,t,a){i["dimensions-"+t]=[],e.__labels[t].each((function(a,n){var r=d3.select(this).selectAll("."+e.cssPrefix+"segmentMainLabel-"+t),o=d3.select(this).selectAll("."+e.cssPrefix+"segmentPercentage-"+t),s=d3.select(this).selectAll("."+e.cssPrefix+"segmentValue-"+t);i["dimensions-"+t].push({mainLabel:null!==r.node()?r.node().getBBox():null,percentage:null!==o.node()?o.node().getBBox():null,value:null!==s.node()?s.node().getBBox():null})}));var n=i["dimensions-"+t];switch(a){case"label-value1":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+5}));break;case"label-value2":e.svg.selectAll("."+e.cssPrefix+"segmentValue-"+t).attr("dy",(function(e,t){return n[t].mainLabel.height}));break;case"label-percentage1":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width+5}));break;case"label-percentage2":e.svg.selectAll("."+e.cssPrefix+"segmentPercentage-"+t).attr("dx",(function(e,t){return n[t].mainLabel.width/2-n[t].percentage.width/2})).attr("dy",(function(e,t){return n[t].mainLabel.height}))}},computeLabelLinePositions:function(e){e.lineCoordGroups=[],e.__labels.outer.each((function(t,a){return i.computeLinePosition(e,a)}))},computeLinePosition:function(e,t){var a,n,r,i,l=s.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),c=o.rotate(e.pieCenter.x,e.pieCenter.y-e.outerRadius,e.pieCenter.x,e.pieCenter.y,l),u=e.outerLabelGroupData[t].h/5,d=Math.floor(l/90);switch(2===d&&180===l&&(d=1),d){case 0:a=e.outerLabelGroupData[t].x-6-(e.outerLabelGroupData[t].x-6-c.x)/2,n=e.outerLabelGroupData[t].y+(c.y-e.outerLabelGroupData[t].y)/4,r=e.outerLabelGroupData[t].x-6,i=e.outerLabelGroupData[t].y-u;break;case 1:a=c.x+(e.outerLabelGroupData[t].x-c.x)/4,n=c.y+(e.outerLabelGroupData[t].y-c.y)/4,r=e.outerLabelGroupData[t].x-6,i=e.outerLabelGroupData[t].y-u;break;case 2:var h=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+6;a=c.x-(c.x-h)/4,n=c.y+(e.outerLabelGroupData[t].y-c.y)/4,r=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+6,i=e.outerLabelGroupData[t].y-u;break;case 3:var p=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+6;a=p+(c.x-p)/4,n=e.outerLabelGroupData[t].y+(c.y-e.outerLabelGroupData[t].y)/4,r=e.outerLabelGroupData[t].x+e.outerLabelGroupData[t].w+6,i=e.outerLabelGroupData[t].y-u}"straight"===e.options.labels.lines.style?e.lineCoordGroups[t]=[{x:c.x,y:c.y},{x:r,y:i}]:e.lineCoordGroups[t]=[{x:c.x,y:c.y},{x:a,y:n},{x:r,y:i}]},addLabelLines:function(e){var t=e.svg.insert("g","."+e.cssPrefix+"pieChart").attr("class",e.cssPrefix+"lineGroups").style("opacity",1).selectAll("."+e.cssPrefix+"lineGroup").data(e.lineCoordGroups).enter().append("g").attr("class",e.cssPrefix+"lineGroup"),a=d3.line().curve(d3.curveBasis).x((function(e){return e.x})).y((function(e){return e.y}));t.append("path").attr("d",a).attr("stroke",(function(t,a){return"segment"===e.options.labels.lines.color?e.options.colors[a]:e.options.labels.lines.color})).attr("stroke-width",1).attr("fill","none").style("opacity",(function(t,a){var n=e.options.labels.outer.hideWhenLessThanPercentage;return null!==n&&t.percentage<n||""===e.options.data.content[a].label?0:1}))},positionLabelGroups:function(e,t){"none"!==e.options.labels[t].format&&e.__labels[t].style("opacity",(function(a,n){var r=e.options.labels[t].hideWhenLessThanPercentage;return null!==r&&a.percentage<r?0:1})).attr("transform",(function(a,i){var l,c;if("outer"===t)l=e.outerLabelGroupData[i].x,c=e.outerLabelGroupData[i].y;else{var u=r(!0,{},e.pieCenter);if(e.innerRadius>0){var d=s.getSegmentAngle(i,e.options.data.content,e.totalSize,{midpoint:!0}),h=o.translate(e.pieCenter.x,e.pieCenter.y,e.innerRadius,d);u.x=h.x,u.y=h.y}var p=n.getDimensions(e.cssPrefix+"labelGroup"+i+"-inner"),f=p.w/2,g=p.h/4;l=u.x+(e.lineCoordGroups[i][0].x-u.x)/1.8,c=u.y+(e.lineCoordGroups[i][0].y-u.y)/1.8,l-=f,c+=g}return"translate("+l+","+c+")"}))},getIncludes:function(e){var t=!1,a=!1,n=!1;switch(e){case"label":t=!0;break;case"value":a=!0;break;case"percentage":n=!0;break;case"label-value1":case"label-value2":t=!0,a=!0;break;case"label-percentage1":case"label-percentage2":t=!0,n=!0}return{mainLabel:t,value:a,percentage:n}},computeOuterLabelCoords:function(e){e.__labels.outer.each((function(t,a){return i.getIdealOuterLabelPositions(e,a)})),i.resolveOuterLabelCollisions(e)},resolveOuterLabelCollisions:function(e){if("none"!==e.options.labels.outer.format){var t=e.options.data.content.length;i.checkConflict(e,0,"clockwise",t),i.checkConflict(e,t-1,"anticlockwise",t)}},checkConflict:function(e,t,a,r){var o,s;if(!(r<=1)){var l=e.outerLabelGroupData[t].hs;if(("clockwise"!==a||"right"===l)&&("anticlockwise"!==a||"left"===l)){var c="clockwise"===a?t+1:t-1,u=e.outerLabelGroupData[t],d=e.outerLabelGroupData[c],h={labelHeights:e.outerLabelGroupData[0].h,center:e.pieCenter,lineLength:e.outerRadius+e.options.labels.outer.pieDistance,heightChange:e.outerLabelGroupData[0].h+1};if("clockwise"===a){for(o=0;o<=t;o++)if(s=e.outerLabelGroupData[o],!i.isLabelHidden(e,o)&&n.rectIntersect(s,d)){i.adjustLabelPos(e,c,u,h);break}}else for(o=r-1;o>=t;o--)if(s=e.outerLabelGroupData[o],!i.isLabelHidden(e,o)&&n.rectIntersect(s,d)){i.adjustLabelPos(e,c,u,h);break}i.checkConflict(e,c,a,r)}}},isLabelHidden:function(e,t){var a=e.options.labels.outer.hideWhenLessThanPercentage;return null!==a&&d.percentage<a||""===e.options.data.content[t].label},adjustLabelPos:function(e,t,a,n){var r,o,i,s;s=a.y+n.heightChange,o=n.center.y-s,r=Math.abs(n.lineLength)>Math.abs(o)?Math.sqrt(n.lineLength*n.lineLength-o*o):Math.sqrt(o*o-n.lineLength*n.lineLength),i="right"===a.hs?n.center.x+r:n.center.x-r-e.outerLabelGroupData[t].w,e.outerLabelGroupData[t].x=i,e.outerLabelGroupData[t].y=s},getIdealOuterLabelPositions:function(e,t){var a=e.svg.select("#"+e.cssPrefix+"labelGroup"+t+"-outer").node();if(a){var n=a.getBBox(),r=s.getSegmentAngle(t,e.options.data.content,e.totalSize,{midpoint:!0}),i=e.pieCenter.x,l=e.pieCenter.y-(e.outerRadius+e.options.labels.outer.pieDistance),c=o.rotate(i,l,e.pieCenter.x,e.pieCenter.y,r),u="right";r>180?(c.x-=n.width+8,u="left"):c.x+=8,e.outerLabelGroupData[t]={x:c.x,y:c.y,w:n.width,h:n.height,hs:u}}}},s={effectMap:{none:d3.easeLinear,bounce:d3.easeBounce,linear:d3.easeLinear,sin:d3.easeSin,elastic:d3.easeElastic,back:d3.easeBack,quad:d3.easeQuad,circle:d3.easeCircle,exp:d3.easeExp},create:function(e){var t=e.pieCenter,a=e.options.colors,n=(e.options.effects.load,e.options.misc.colors.segmentStroke),r=e.svg.insert("g","#"+e.cssPrefix+"title").attr("transform",(function(){return o.getPieTranslateCenter(t)})).attr("class",e.cssPrefix+"pieChart"),i=d3.arc().innerRadius(e.innerRadius).outerRadius(e.outerRadius).startAngle(0).endAngle((function(t){return t.value/e.totalSize*2*Math.PI}));r.selectAll("."+e.cssPrefix+"arc").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"arc").append("path").attr("id",(function(t,a){return e.cssPrefix+"segment"+a})).attr("fill",(function(t,n){var r=a[n];return e.options.misc.gradient.enabled&&(r="url(#"+e.cssPrefix+"grad"+n+")"),r})).style("stroke",n).style("stroke-width",1).attr("data-index",(function(e,t){return t})).attr("d",i),e.svg.selectAll("g."+e.cssPrefix+"arc").attr("transform",(function(t,a){var n=0;return a>0&&(n=s.getSegmentAngle(a-1,e.options.data.content,e.totalSize)),"rotate("+n+")"})),e.arc=i},addGradients:function(e){var t=e.svg.append("defs").selectAll("radialGradient").data(e.options.data.content).enter().append("radialGradient").attr("gradientUnits","userSpaceOnUse").attr("cx",0).attr("cy",0).attr("r","120%").attr("id",(function(t,a){return e.cssPrefix+"grad"+a}));t.append("stop").attr("offset","0%").style("stop-color",(function(t,a){return e.options.colors[a]})),t.append("stop").attr("offset",e.options.misc.gradient.percentage+"%").style("stop-color",e.options.misc.gradient.color)},addSegmentEventHandlers:function(e){var t=e.svg.selectAll("."+e.cssPrefix+"arc");(t=t.merge(e.__labels.inner.merge(e.__labels.outer))).on("click",(function(){var t,a=d3.select(this);if(a.attr("class")===e.cssPrefix+"arc")t=a.select("path");else{var n=a.attr("data-index");t=d3.select("#"+e.cssPrefix+"segment"+n)}var r=t.attr("class")===e.cssPrefix+"expanded";s.onSegmentEvent(e,e.options.callbacks.onClickSegment,t,r),"none"!==e.options.effects.pullOutSegmentOnClick.effect&&(r?s.closeSegment(e,t.node()):s.openSegment(e,t.node()))})),t.on("mouseover",(function(){var t,a,r=d3.select(this);if(r.attr("class")===e.cssPrefix+"arc"?t=r.select("path"):(a=r.attr("data-index"),t=d3.select("#"+e.cssPrefix+"segment"+a)),e.options.effects.highlightSegmentOnMouseover){a=t.attr("data-index");var o=e.options.colors[a];t.style("fill",n.getColorShade(o,e.options.effects.highlightLuminosity))}e.options.tooltips.enabled&&(a=t.attr("data-index"),c.showTooltip(e,a));var i=t.attr("class")===e.cssPrefix+"expanded";s.onSegmentEvent(e,e.options.callbacks.onMouseoverSegment,t,i)})),t.on("mousemove",(function(){c.moveTooltip(e)})),t.on("mouseout",(function(){var t,a,n=d3.select(this);if(n.attr("class")===e.cssPrefix+"arc"?t=n.select("path"):(a=n.attr("data-index"),t=d3.select("#"+e.cssPrefix+"segment"+a)),e.options.effects.highlightSegmentOnMouseover){a=t.attr("data-index");var r=e.options.colors[a];e.options.misc.gradient.enabled&&(r="url(#"+e.cssPrefix+"grad"+a+")"),t.style("fill",r)}e.options.tooltips.enabled&&(a=t.attr("data-index"),c.hideTooltip(e,a));var o=t.attr("class")===e.cssPrefix+"expanded";s.onSegmentEvent(e,e.options.callbacks.onMouseoutSegment,t,o)}))},onSegmentEvent:function(e,t,a,r){if(n.isFunction(t)){var o=parseInt(a.attr("data-index"),10);t({segment:a.node(),index:o,expanded:r,data:e.options.data.content[o]})}},openSegment:function(e,t){e.isOpeningSegment||(e.isOpeningSegment=!0,s.maybeCloseOpenSegment(e),d3.select(t).transition().ease(s.effectMap[e.options.effects.pullOutSegmentOnClick.effect]).duration(e.options.effects.pullOutSegmentOnClick.speed).attr("transform",(function(t,a){var n=e.arc.centroid(t),r=n[0],o=n[1],i=Math.sqrt(r*r+o*o),s=parseInt(e.options.effects.pullOutSegmentOnClick.size,10);return"translate("+r/i*s+","+o/i*s+")"})).on("end",(function(a,n){e.currentlyOpenSegment=t,e.isOpeningSegment=!1,d3.select(t).attr("class",e.cssPrefix+"expanded")})))},maybeCloseOpenSegment:function(e){"undefined"!==typeof e&&e.svg.selectAll("."+e.cssPrefix+"expanded").size()>0&&s.closeSegment(e,e.svg.select("."+e.cssPrefix+"expanded").node())},closeSegment:function(e,t){d3.select(t).transition().duration(400).attr("transform","translate(0,0)").on("end",(function(a,n){d3.select(t).attr("class",""),e.currentlyOpenSegment=null}))},getCentroid:function(e){var t=e.getBBox();return{x:t.x+t.width/2,y:t.y+t.height/2}},getSegmentAngle:function(e,t,a,n){var o,i=r({compounded:!0,midpoint:!1},n),s=t[e].value;if(i.compounded){o=0;for(var l=0;l<=e;l++)o+=t[l].value}"undefined"===typeof o&&(o=s);var c=o/a*360;return i.midpoint&&(c-=s/a*360/2),c}},l={offscreenCoord:-1e4,addTitle:function(e){e.__title=e.svg.selectAll("."+e.cssPrefix+"title").data([e.options.header.title]).enter().append("text").text((function(e){return e.text})).attr("id",e.cssPrefix+"title").attr("class",e.cssPrefix+"title").attr("x",l.offscreenCoord).attr("y",l.offscreenCoord).attr("text-anchor",(function(){return"top-center"===e.options.header.location||"pie-center"===e.options.header.location?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize+"px"})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionTitle:function(e){var t,a=e.textComponents,n=e.options.header.location,r=e.options.misc.canvasPadding,o=e.options.size.canvasWidth,i=e.options.header.titleSubtitlePadding;t="top-left"===n?r.left:(o-r.right)/2+r.left,t+=e.options.misc.pieCenterOffset.x;var s=r.top+a.title.h;"pie-center"===n&&(s=e.pieCenter.y,a.subtitle.exists?s=s-(a.title.h+i+a.subtitle.h)/2+a.title.h:s+=a.title.h/4),e.__title.attr("x",t).attr("y",s)},addSubtitle:function(e){var t=e.options.header.location;e.__subtitle=e.svg.selectAll("."+e.cssPrefix+"subtitle").data([e.options.header.subtitle]).enter().append("text").text((function(e){return e.text})).attr("x",l.offscreenCoord).attr("y",l.offscreenCoord).attr("id",e.cssPrefix+"subtitle").attr("class",e.cssPrefix+"subtitle").attr("text-anchor",(function(){return"top-center"===t||"pie-center"===t?"middle":"left"})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize+"px"})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionSubtitle:function(e){var t,a=e.options.misc.canvasPadding,n=e.options.size.canvasWidth;t="top-left"===e.options.header.location?a.left:(n-a.right)/2+a.left,t+=e.options.misc.pieCenterOffset.x;var r=l.getHeaderHeight(e);e.__subtitle.attr("x",t).attr("y",r)},addFooter:function(e){e.__footer=e.svg.selectAll("."+e.cssPrefix+"footer").data([e.options.footer]).enter().append("text").text((function(e){return e.text})).attr("x",l.offscreenCoord).attr("y",l.offscreenCoord).attr("id",e.cssPrefix+"footer").attr("class",e.cssPrefix+"footer").attr("text-anchor",(function(){var t="left";return"bottom-center"===e.options.footer.location?t="middle":"bottom-right"===e.options.footer.location&&(t="left"),t})).attr("fill",(function(e){return e.color})).style("font-size",(function(e){return e.fontSize+"px"})).style("font-weight",(function(e){return e.fontWeight})).style("font-family",(function(e){return e.font}))},positionFooter:function(e){var t,a=e.options.footer.location,n=e.textComponents.footer.w,r=e.options.size.canvasWidth,o=e.options.size.canvasHeight,i=e.options.misc.canvasPadding;t="bottom-left"===a?i.left:"bottom-right"===a?r-n-i.right:r/2,e.__footer.attr("x",t).attr("y",o-i.bottom)},getHeaderHeight:function(e){var t;if(e.textComponents.title.exists){var a=e.textComponents.title.h+e.options.header.titleSubtitlePadding+e.textComponents.subtitle.h;t="pie-center"===e.options.header.location?e.pieCenter.y-a/2+a:a+e.options.misc.canvasPadding.top}else if("pie-center"===e.options.header.location){var n=e.options.misc.canvasPadding.bottom+e.textComponents.footer.h;t=(e.options.size.canvasHeight-n)/2+e.options.misc.canvasPadding.top+e.textComponents.subtitle.h/2}else t=e.options.misc.canvasPadding.top+e.textComponents.subtitle.h;return t}},c={addTooltips:function(e){var t=e.svg.insert("g").attr("class",e.cssPrefix+"tooltips");t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).enter().append("g").attr("class",e.cssPrefix+"tooltip").attr("id",(function(t,a){return e.cssPrefix+"tooltip"+a})).style("opacity",0).append("rect").attr("rx",e.options.tooltips.styles.borderRadius).attr("ry",e.options.tooltips.styles.borderRadius).attr("x",-e.options.tooltips.styles.padding).attr("opacity",e.options.tooltips.styles.backgroundOpacity).style("fill",e.options.tooltips.styles.backgroundColor),t.selectAll("."+e.cssPrefix+"tooltip").data(e.options.data.content).append("text").attr("fill",(function(t){return e.options.tooltips.styles.color})).style("font-size",(function(t){return e.options.tooltips.styles.fontSize})).style("font-weight",(function(t){return e.options.tooltips.styles.fontWeight})).style("font-family",(function(t){return e.options.tooltips.styles.font})).text((function(t,a){var n=e.options.tooltips.string;return"caption"===e.options.tooltips.type&&(n=t.caption),c.replacePlaceholders(e,n,a,{label:t.label,value:t.value,percentage:t.percentage})})),t.selectAll("."+e.cssPrefix+"tooltip rect").attr("width",(function(t,a){return n.getDimensions(e.cssPrefix+"tooltip"+a).w+2*e.options.tooltips.styles.padding})).attr("height",(function(t,a){return n.getDimensions(e.cssPrefix+"tooltip"+a).h+2*e.options.tooltips.styles.padding})).attr("y",(function(t,a){return-n.getDimensions(e.cssPrefix+"tooltip"+a).h/2+1}))},showTooltip:function(e,t){var a=e.options.tooltips.styles.fadeInSpeed;c.currentTooltip===t&&(a=1),c.currentTooltip=t,d3.select("#"+e.cssPrefix+"tooltip"+t).transition().duration(a).style("opacity",(function(){return 1})),c.moveTooltip(e)},moveTooltip:function(e){d3.selectAll("#"+e.cssPrefix+"tooltip"+c.currentTooltip).attr("transform",(function(t){var a=d3.mouse(this.parentNode);return"translate("+(a[0]+e.options.tooltips.styles.padding+2)+","+(a[1]-2*e.options.tooltips.styles.padding-2)+")"}))},hideTooltip:function(e,t){d3.select("#"+e.cssPrefix+"tooltip"+t).style("opacity",(function(){return 0})),d3.select("#"+e.cssPrefix+"tooltip"+c.currentTooltip).attr("transform",(function(t,a){return"translate("+(e.options.size.canvasWidth+1e3)+","+(e.options.size.canvasHeight+1e3)+")"}))},replacePlaceholders:function(e,t,a,r){return n.isFunction(e.options.tooltips.placeholderParser)&&e.options.tooltips.placeholderParser(a,r),t.replace(/\{(\w+)\}/g,(function(e){var t=arguments[1];return r.hasOwnProperty(t)?r[arguments[1]]:arguments[0]}))}},u=function(n,o){if(this.element=n,"string"===typeof n){var i=n.replace(/^#/,"");this.element=document.getElementById(i)}var s={};r(!0,s,t,o),this.options=s,null!==this.options.misc.cssPrefix?this.cssPrefix=this.options.misc.cssPrefix:(this.cssPrefix="p"+e+"_",e++),a(this)&&(d3.select(this.element).attr("d3pie","0.2.1"),h.call(this),p.call(this))};u.prototype.recreate=function(){a(this)&&(h.call(this),p.call(this))},u.prototype.redraw=function(){this.element.innerHTML="",p.call(this)},u.prototype.destroy=function(){this.element.innerHTML="",d3.select(this.element).attr("d3pie",null)},u.prototype.getOpenSegment=function(){var e=this.currentlyOpenSegment;if(null!==e&&"undefined"!==typeof e){var t=parseInt(d3.select(e).attr("data-index"),10);return{element:e,index:t,data:this.options.data.content[t]}}return null},u.prototype.openSegment=function(e){(e=parseInt(e,10))<0||e>this.options.data.content.length-1||s.openSegment(this,d3.select("#"+this.cssPrefix+"segment"+e).node())},u.prototype.closeSegment=function(){s.maybeCloseOpenSegment(this)},u.prototype.updateProp=function(e,t){switch(e){case"header.title.text":var a=n.processObj(this.options,e);n.processObj(this.options,e,t),d3.select("#"+this.cssPrefix+"title").html(t),(""===a&&""!==t||""!==a&&""===t)&&this.redraw();break;case"header.subtitle.text":var r=n.processObj(this.options,e);n.processObj(this.options,e,t),d3.select("#"+this.cssPrefix+"subtitle").html(t),(""===r&&""!==t||""!==r&&""===t)&&this.redraw();break;case"callbacks.onload":case"callbacks.onMouseoverSegment":case"callbacks.onMouseoutSegment":case"callbacks.onClickSegment":case"effects.pullOutSegmentOnClick.effect":case"effects.pullOutSegmentOnClick.speed":case"effects.pullOutSegmentOnClick.size":case"effects.highlightSegmentOnMouseover":case"effects.highlightLuminosity":n.processObj(this.options,e,t);break;default:n.processObj(this.options,e,t),this.destroy(),this.recreate()}};var h=function(){this.options.data.content=o.sortPieData(this),this.options.data.smallSegmentGrouping.enabled&&(this.options.data.content=n.applySmallSegmentGrouping(this.options.data.content,this.options.data.smallSegmentGrouping)),this.options.colors=n.initSegmentColors(this),this.totalSize=o.getTotalPieSize(this.options.data.content);for(var e=this.options.labels.percentage.decimalPlaces,t=0;t<this.options.data.content.length;t++)this.options.data.content[t].percentage=f(this.options.data.content[t].value,this.totalSize,e);for(var a=0,r=0;r<this.options.data.content.length;r++)r===this.options.data.content.length-1&&(this.options.data.content[r].percentage=(100-a).toFixed(e)),a+=parseFloat(this.options.data.content[r].percentage)},p=function(){this.svg=n.addSVGSpace(this),this.textComponents={headerHeight:0,title:{exists:""!==this.options.header.title.text,h:0,w:0},subtitle:{exists:""!==this.options.header.subtitle.text,h:0,w:0},footer:{exists:""!==this.options.footer.text,h:0,w:0}},this.outerLabelGroupData=[],this.textComponents.title.exists&&l.addTitle(this),this.textComponents.subtitle.exists&&l.addSubtitle(this),l.addFooter(this),l.positionFooter(this);var e=n.getDimensions(this.__footer.node());if(this.textComponents.footer.h=e.h,this.textComponents.footer.w=e.w,this.textComponents.title.exists){var t=n.getDimensions(this.__title.node());this.textComponents.title.h=t.h,this.textComponents.title.w=t.w}if(this.textComponents.subtitle.exists){var a=n.getDimensions(this.__subtitle.node());this.textComponents.subtitle.h=a.h,this.textComponents.subtitle.w=a.w}if(this.textComponents.title.exists||this.textComponents.subtitle.exists){var r=0;this.textComponents.title.exists&&(r+=this.textComponents.title.h,this.textComponents.subtitle.exists&&(r+=this.options.header.titleSubtitlePadding)),this.textComponents.subtitle.exists&&(r+=this.textComponents.subtitle.h),this.textComponents.headerHeight=r}if(o.computePieRadius(this),o.calculatePieCenter(this),l.positionTitle(this),l.positionSubtitle(this),this.options.misc.gradient.enabled&&s.addGradients(this),s.create(this),this.__labels={},i.add(this,"inner",this.options.labels.inner.format),i.add(this,"outer",this.options.labels.outer.format),i.positionLabelElements(this,"inner",this.options.labels.inner.format),i.positionLabelElements(this,"outer",this.options.labels.outer.format),i.computeOuterLabelCoords(this),i.positionLabelGroups(this,"outer"),i.computeLabelLinePositions(this),this.options.labels.lines.enabled&&"none"!==this.options.labels.outer.format&&i.addLabelLines(this),i.positionLabelGroups(this,"inner"),n.isFunction(this.options.callbacks.onload))try{this.options.callbacks.onload()}catch(u){}this.options.tooltips.enabled&&c.addTooltips(this),s.addSegmentEventHandlers(this)},f=function(e,t,a){var n=e/t;return a<=0?Math.round(100*n):(100*n).toFixed(a)};return u})?n.apply(t,r):n)||(e.exports=o)},306:function(e,t,a){"use strict";a(276);var n=a(277);a.d(t,"CloudConnectionStatus",(function(){return n.a}))},324:function(e,t,a){e.exports=a(566)},340:function(e,t,a){},36:function(e,t,a){"use strict";a.d(t,"a",(function(){return p})),a.d(t,"b",(function(){return f})),a.d(t,"c",(function(){return g}));var n=a(311),r=a(149),o=a(309),i=a(228),s=a(312),l=a(135),c=a(571),u=a(75),d=u.c?{src:"http://localhost:3000/some-script.js"}:document.currentScript,h=function(){if(u.a)return"http://localhost:19999";if(u.b){var e=window.location.pathname.replace("index.html","").replace("default.html","");return window.location.origin+e.replace(/\/v1\/?$/,"")}return function(e){return e.replace(new RegExp("[^\\/]*\\.js(\\/?.*)?$"),"").replace("/static/js","")}(Object(n.a)([[Boolean,r.a],[o.a,function(){return Object(i.a)(document.getElementsByTagName("script"))}]])(d).src).replace(/\/v1\/?$/,"")},p=Object(n.a)([[Object(s.a)(i.a,Object(l.a)("/")),r.a],[o.a,function(e){return Object(c.a)(e,"/")}]]),f=p(window.netdataServer||h()),g=u.a?"/":p(h())},39:function(e,t,a){"use strict";a.d(t,"f",(function(){return u})),a.d(t,"a",(function(){return f})),a.d(t,"j",(function(){return g})),a.d(t,"h",(function(){return m})),a.d(t,"g",(function(){return b})),a.d(t,"l",(function(){return v})),a.d(t,"i",(function(){return _})),a.d(t,"c",(function(){return y})),a.d(t,"b",(function(){return O})),a.d(t,"k",(function(){return x})),a.d(t,"e",(function(){return w})),a.d(t,"d",(function(){return E}));var n=a(49),r=a(17),o=a(9),i=a(167),s=a(45),l=function(e){return e[s.c]},c=Object(r.a)(l,(function(e,t){return t.id}),(function(e,t){return e[t]||i.b})),u=Object(r.a)(c,(function(e){return e.chartData})),d=Object(r.a)(c,Object(n.a)("chartMetadata")),h=Object(r.a)(o.e,d,(function(e,t){return e||t})),p=Object(r.a)(c,Object(n.a)("isFetchingDetails")),f=function(){return Object(r.a)(h,p,(function(e,t){return{chartMetadata:e,isFetchingDetails:t}}))},g=Object(r.a)(c,(function(e){return e.viewRange})),m=Object(r.a)(c,(function(e){return e.isFetchingData})),b=Object(r.a)(c,(function(e){return e.fetchDataParams})),v=Object(r.a)(c,(function(e){return e.resizeHeight})),_=Object(r.a)(c,Object(n.a)("chartPanAndZoom")),y=(Object(r.a)(l,(function(e){return Object.values(e).some((function(e){return e.isFetchingData}))})),Object(r.a)(l,(function(e){return Object.values(e).reduce((function(e,t){return e+(function(e){return e.isFetchDataFailure||Boolean(e.chartData)||e.isFetchDetailsFailure}(t)?1:0)}),0)}))),O=Object(r.a)(l,(function(e){return Object.keys(e).length})),x=Object(r.a)(l,(function(e){var t;return null===(t=Object.values(e).find((function(e){return e.isFetchingData})))||void 0===t?void 0:t.chartId})),w=Object(r.a)(l,(function(e){return Object.values(e).reduce((function(e,t){return e+(t.snapshotData?1:0)}),0)})),E=Object(r.a)(l,(function(e){return Object.values(e).reduce((function(e,t){return e+(t.snapshotDataIsError?1:0)}),0)}))},403:function(e,t){},42:function(e,t,a){"use strict";a.d(t,"a",(function(){return f})),a.d(t,"b",(function(){return b}));var n=a(37),r=a(7),o=a(0),i=(a(101),a(8)),s=a(9),l=function(e){return e>-10&&e<10?"0".concat(e.toString()):e.toString()},c=!!(Intl&&Intl.DateTimeFormat&&navigator.language),u=function(e){return"number"===typeof e?new Date(e):e},d=function(e){return u(e).toLocaleDateString()},h=function(e){return u(e).toLocaleTimeString()},p=function(e){var t=u(e);return"".concat(l(t.getHours()),":").concat(l(t.getMinutes()),":").concat(l(t.getSeconds()))},f=function(e){try{Intl.DateTimeFormat(navigator.language,{localeMatcher:"best fit",formatMatcher:"best fit",weekday:"short",year:"numeric",month:"short",day:"2-digit",timeZone:e})}catch(t){return!1}return!0},g=function(e,t){var a=t.locale,o=Object(n.a)(t,["locale"]);return new Intl.DateTimeFormat(null!==a&&void 0!==a?a:navigator.language,function(e){var t=e.long,a=e.isTime,n=e.secs,o=e.timezone;return Object(r.a)({hourCycle:"h23"},a?{}:t?{weekday:"short",year:"numeric",month:"short",day:"2-digit"}:{dateStyle:"short"},{},a&&{timeStyle:n?"medium":"short"},{timeZone:o})}(o)).format(e)},m=function(e){return""!==e&&"default"!==e?e:void 0},b=function(){var e=Object(i.b)(s.L),t=Object(i.b)(s.M),a=Object(o.useMemo)((function(){return c?function(t,a){return g(t,Object(r.a)({long:!0,timezone:m(e)},a))}:d}),[e]),n=Object(o.useMemo)((function(){return c?function(t,a){return g(t,Object(r.a)({secs:!0,isTime:!0,timezone:m(e)},a))}:h}),[e]),l=Object(o.useMemo)((function(){return c?function(t){return g(t,{secs:!0,isTime:!0,timezone:m(e)})}:p}),[e]);return{localeDateString:a,localeTimeString:n,xAxisDateString:Object(o.useMemo)((function(){return c?function(t){return g(t,{long:!0,timezone:m(e)})}:p}),[e]),xAxisTimeString:l,utcOffset:t}}},44:function(e,t,a){"use strict";a.d(t,"e",(function(){return o})),a.d(t,"f",(function(){return i})),a.d(t,"d",(function(){return s})),a.d(t,"a",(function(){return l})),a.d(t,"b",(function(){return c})),a.d(t,"c",(function(){return u}));var n=a(22),r=a(76),o=Object(n.createAction)("".concat(r.a,"/isSnapshotModeAction")),i=Object(n.createAction)("".concat(r.a,"/stopSnapshotModeAction")),s=Object(n.createAction)("".concat(r.a,"/showSignInModal")),l=Object(n.createAction)("".concat(r.a,"/explicitlySignIn")),c=Object(n.createAction)("".concat(r.a,"/isSignedInAction")),u=Object(n.createAction)("".concat(r.a,"/setOfflineAction"))},45:function(e,t,a){"use strict";a.d(t,"c",(function(){return n})),a.d(t,"a",(function(){return r})),a.d(t,"b",(function(){return o}));var n="chart",r=2e3,o=300},46:function(e,t,a){"use strict";a.d(t,"a",(function(){return r}));var n=a(7),r=function(e,t){var a={now:"now",space:" ",negative_suffix:"ago",day:"day",days:"days",hour:"hour",hours:"hours",minute:"min",minutes:"mins",second:"sec",seconds:"secs",and:"and"},r="object"===typeof t?Object(n.a)({},a,{},t):a,o="string"===typeof e?parseInt(e,10):e;if(0===o)return r.now;var i="";o<0&&(o=-o,""!==r.negative_suffix&&(i=r.space+r.negative_suffix));var s=Math.floor(o/86400);o-=86400*s;var l=Math.floor(o/3600);o-=3600*l;var c=Math.floor(o/60);o-=60*c;var u=[];if(s>1?u.push(s.toString()+r.space+r.days):1===s&&u.push(s.toString()+r.space+r.day),l>1?u.push(l.toString()+r.space+r.hours):1===l&&u.push(l.toString()+r.space+r.hour),c>1?u.push(c.toString()+r.space+r.minutes):1===c&&u.push(c.toString()+r.space+r.minute),o>1?u.push(Math.floor(o).toString()+r.space+r.seconds):1===o&&u.push(Math.floor(o).toString()+r.space+r.second),1===u.length)return u.pop()+i;var d=u.pop();return"".concat(u.join(", ")," ").concat(r.and," ").concat(d).concat(i)}},470:function(e,t,a){},490:function(e,t,a){},51:function(e,t,a){var n;e.exports=function e(t,a,r){function o(s,l){if(!a[s]){if(!t[s]){if(!l&&"function"==typeof n&&n)return n(s,!0);if(i)return i(s,!0);var c=new Error("Cannot find module '"+s+"'");throw c.code="MODULE_NOT_FOUND",c}var u=a[s]={exports:{}};t[s][0].call(u.exports,(function(e){return o(t[s][1][e]||e)}),u,u.exports,e,t,a,r)}return a[s].exports}for(var i="function"==typeof n&&n,s=0;s<r.length;s++)o(r[s]);return o}({1:[function(e,t,a){function n(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(u===setTimeout)return setTimeout(e,0);if((u===n||!u)&&setTimeout)return u=setTimeout,setTimeout(e,0);try{return u(e,0)}catch(t){try{return u.call(null,e,0)}catch(t){return u.call(this,e,0)}}}function i(){g&&p&&(g=!1,p.length?f=p.concat(f):m=-1,f.length&&s())}function s(){if(!g){var e=o(i);g=!0;for(var t=f.length;t;){for(p=f,f=[];++m<t;)p&&p[m].run();m=-1,t=f.length}p=null,g=!1,function(e){if(d===clearTimeout)return clearTimeout(e);if((d===r||!d)&&clearTimeout)return d=clearTimeout,clearTimeout(e);try{d(e)}catch(t){try{return d.call(null,e)}catch(t){return d.call(this,e)}}}(e)}}function l(e,t){this.fun=e,this.array=t}function c(){}var u,d,h=t.exports={};!function(){try{u="function"==typeof setTimeout?setTimeout:n}catch(e){u=n}try{d="function"==typeof clearTimeout?clearTimeout:r}catch(e){d=r}}();var p,f=[],g=!1,m=-1;h.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var a=1;a<arguments.length;a++)t[a-1]=arguments[a];f.push(new l(e,t)),1!==f.length||g||o(s)},l.prototype.run=function(){this.fun.apply(null,this.array)},h.title="browser",h.browser=!0,h.env={},h.argv=[],h.version="",h.versions={},h.on=c,h.addListener=c,h.once=c,h.off=c,h.removeListener=c,h.removeAllListeners=c,h.emit=c,h.prependListener=c,h.prependOnceListener=c,h.listeners=function(e){return[]},h.binding=function(e){throw new Error("process.binding is not supported")},h.cwd=function(){return"/"},h.chdir=function(e){throw new Error("process.chdir is not supported")},h.umask=function(){return 0}},{}],2:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){return e&&e.__esModule?e:{default:e}}(e("./bars")),r=function(){};(r.prototype=new n.default).extractSeries=function(e,t,a){for(var n,r,o,i=[],s=a.get("logscale"),l=0;l<e.length;l++)n=e[l][0],o=e[l][t],s&&null!==o&&(o[0]<=0||o[1]<=0||o[2]<=0)&&(o=null),null!==o?null===(r=o[1])||isNaN(r)?i.push([n,r,[r,r]]):i.push([n,r,[o[0],o[2]]]):i.push([n,null,[null,null]]);return i},r.prototype.rollingAverage=function(e,t,a){t=Math.min(t,e.length);var n,r,o,i,s,l,c,u=[];for(r=0,i=0,o=0,s=0,l=0;l<e.length;l++){if(n=e[l][1],c=e[l][2],u[l]=e[l],null===n||isNaN(n)||(r+=c[0],i+=n,o+=c[1],s+=1),l-t>=0){var d=e[l-t];null===d[1]||isNaN(d[1])||(r-=d[2][0],i-=d[1],o-=d[2][1],s-=1)}u[l]=s?[e[l][0],1*i/s,[1*r/s,1*o/s]]:[e[l][0],null,[null,null]]}return u},a.default=r,t.exports=a.default},{"./bars":5}],3:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){return e&&e.__esModule?e:{default:e}}(e("./bars")),r=function(){};(r.prototype=new n.default).extractSeries=function(e,t,a){for(var n,r,o,i,s=[],l=a.get("sigma"),c=a.get("logscale"),u=0;u<e.length;u++)n=e[u][0],i=e[u][t],c&&null!==i&&(i[0]<=0||i[0]-l*i[1]<=0)&&(i=null),null!==i?null===(r=i[0])||isNaN(r)?s.push([n,r,[r,r,r]]):(o=l*i[1],s.push([n,r,[r-o,r+o,i[1]]])):s.push([n,null,[null,null,null]]);return s},r.prototype.rollingAverage=function(e,t,a){t=Math.min(t,e.length);var n,r,o,i,s,l,c,u,d,h=[],p=a.get("sigma");for(n=0;n<e.length;n++){for(s=0,u=0,l=0,r=Math.max(0,n-t+1);r<n+1;r++)null===(o=e[r][1])||isNaN(o)||(l++,s+=o,u+=Math.pow(e[r][2][2],2));l?(c=Math.sqrt(u)/l,d=s/l,h[n]=[e[n][0],d,[d-p*c,d+p*c]]):(i=1==t?e[n][1]:null,h[n]=[e[n][0],i,[i,i]])}return h},a.default=r,t.exports=a.default},{"./bars":5}],4:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){return e&&e.__esModule?e:{default:e}}(e("./bars")),r=function(){};(r.prototype=new n.default).extractSeries=function(e,t,a){for(var n,r,o,i,s,l,c,u=[],d=a.get("sigma"),h=a.get("logscale"),p=0;p<e.length;p++)n=e[p][0],o=e[p][t],h&&null!==o&&(o[0]<=0||o[1]<=0)&&(o=null),null!==o?(i=o[0],s=o[1],null===i||isNaN(i)?u.push([n,i,[i,i,i,s]]):(l=s?i/s:0,c=100*(s?d*Math.sqrt(l*(1-l)/s):1),r=100*l,u.push([n,r,[r-c,r+c,i,s]]))):u.push([n,null,[null,null,null,null]]);return u},r.prototype.rollingAverage=function(e,t,a){t=Math.min(t,e.length);var n,r,o,i,s=[],l=a.get("sigma"),c=a.get("wilsonInterval"),u=0,d=0;for(o=0;o<e.length;o++){u+=e[o][2][2],d+=e[o][2][3],o-t>=0&&(u-=e[o-t][2][2],d-=e[o-t][2][3]);var h=e[o][0],p=d?u/d:0;if(c)if(d){var f=p<0?0:p,g=d,m=l*Math.sqrt(f*(1-f)/g+l*l/(4*g*g)),b=1+l*l/d;n=(f+l*l/(2*d)-m)/b,r=(f+l*l/(2*d)+m)/b,s[o]=[h,100*f,[100*n,100*r]]}else s[o]=[h,0,[0,0]];else i=d?l*Math.sqrt(p*(1-p)/d):1,s[o]=[h,100*p,[100*(p-i),100*(p+i)]]}return s},a.default=r,t.exports=a.default},{"./bars":5}],5:[function(e,t,a){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(a,"__esModule",{value:!0});var r=n(e("./datahandler")),o=n(e("../dygraph-layout")),i=function(){r.default.call(this)};(i.prototype=new r.default).extractSeries=function(e,t,a){},i.prototype.rollingAverage=function(e,t,a){},i.prototype.onPointsCreated_=function(e,t){for(var a=0;a<e.length;++a){var n=e[a],o=t[a];o.y_top=NaN,o.y_bottom=NaN,o.yval_minus=r.default.parseFloat(n[2][0]),o.yval_plus=r.default.parseFloat(n[2][1])}},i.prototype.getExtremeYValues=function(e,t,a){for(var n,r=null,o=null,i=e.length-1,s=0;s<=i;s++)if(null!==(n=e[s][1])&&!isNaN(n)){var l=e[s][2][0],c=e[s][2][1];l>n&&(l=n),c<n&&(c=n),(null===o||c>o)&&(o=c),(null===r||l<r)&&(r=l)}return[r,o]},i.prototype.onLineEvaluated=function(e,t,a){for(var n,r=0;r<e.length;r++)(n=e[r]).y_top=o.default.calcYNormal_(t,n.yval_minus,a),n.y_bottom=o.default.calcYNormal_(t,n.yval_plus,a)},a.default=i,t.exports=a.default},{"../dygraph-layout":13,"./datahandler":6}],6:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(){},r=n;r.X=0,r.Y=1,r.EXTRAS=2,r.prototype.extractSeries=function(e,t,a){},r.prototype.seriesToPoints=function(e,t,a){for(var n=[],o=0;o<e.length;++o){var i=e[o],s=i[1],l=null===s?null:r.parseFloat(s),c={x:NaN,y:NaN,xval:r.parseFloat(i[0]),yval:l,name:t,idx:o+a,canvasx:NaN,canvasy:NaN};n.push(c)}return this.onPointsCreated_(e,n),n},r.prototype.onPointsCreated_=function(e,t){},r.prototype.rollingAverage=function(e,t,a){},r.prototype.getExtremeYValues=function(e,t,a){},r.prototype.onLineEvaluated=function(e,t,a){},r.parseFloat=function(e){return null===e?NaN:e},a.default=n,t.exports=a.default},{}],7:[function(e,t,a){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(a,"__esModule",{value:!0});var r=(n(e("./datahandler")),e("./default")),o=n(r),i=function(){};(i.prototype=new o.default).extractSeries=function(e,t,a){for(var n,r,o,i,s,l=[],c=a.get("logscale"),u=0;u<e.length;u++)n=e[u][0],o=e[u][t],c&&null!==o&&(o[0]<=0||o[1]<=0)&&(o=null),null!==o?(i=o[0],s=o[1],null===i||isNaN(i)?l.push([n,i,[i,s]]):(r=100*(s?i/s:0),l.push([n,r,[i,s]]))):l.push([n,null,[null,null]]);return l},i.prototype.rollingAverage=function(e,t,a){t=Math.min(t,e.length);var n,r=[],o=0,i=0;for(n=0;n<e.length;n++){o+=e[n][2][0],i+=e[n][2][1],n-t>=0&&(o-=e[n-t][2][0],i-=e[n-t][2][1]);var s=e[n][0],l=i?o/i:0;r[n]=[s,100*l]}return r},a.default=i,t.exports=a.default},{"./datahandler":6,"./default":8}],8:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){return e&&e.__esModule?e:{default:e}}(e("./datahandler")),r=function(){};(r.prototype=new n.default).extractSeries=function(e,t,a){for(var n=[],r=a.get("logscale"),o=0;o<e.length;o++){var i=e[o][0],s=e[o][t];r&&s<=0&&(s=null),n.push([i,s])}return n},r.prototype.rollingAverage=function(e,t,a){var n,r,o,i,s,l=[];if(1==(t=Math.min(t,e.length)))return e;for(n=0;n<e.length;n++){for(i=0,s=0,r=Math.max(0,n-t+1);r<n+1;r++)null===(o=e[r][1])||isNaN(o)||(s++,i+=e[r][1]);l[n]=s?[e[n][0],i/s]:[e[n][0],null]}return l},r.prototype.getExtremeYValues=function(e,t,a){for(var n,r=null,o=null,i=e.length-1,s=0;s<=i;s++)null===(n=e[s][1])||isNaN(n)||((null===o||n>o)&&(o=n),(null===r||n<r)&&(r=n));return[r,o]},a.default=r,t.exports=a.default},{"./datahandler":6}],9:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils")),r=function(e){return e&&e.__esModule?e:{default:e}}(e("./dygraph")),o=function(e,t,a,r){if(this.dygraph_=e,this.layout=r,this.element=t,this.elementContext=a,this.height=e.height_,this.width=e.width_,!n.isCanvasSupported(this.element))throw"Canvas is not supported.";this.area=r.getPlotArea();var o=this.dygraph_.canvas_ctx_;o.beginPath(),o.rect(this.area.x,this.area.y,this.area.w,this.area.h),o.clip(),(o=this.dygraph_.hidden_ctx_).beginPath(),o.rect(this.area.x,this.area.y,this.area.w,this.area.h),o.clip()};o.prototype.clear=function(){this.elementContext.clearRect(0,0,this.width,this.height)},o.prototype.render=function(){this._updatePoints(),this._renderLineChart()},o._getIteratorPredicate=function(e){return e?o._predicateThatSkipsEmptyPoints:null},o._predicateThatSkipsEmptyPoints=function(e,t){return null!==e[t].yval},o._drawStyledLine=function(e,t,a,r,i,s,l){var c=e.dygraph,u=c.getBooleanOption("stepPlot",e.setName);n.isArrayLike(r)||(r=null);var d=c.getBooleanOption("drawGapEdgePoints",e.setName),h=e.points,p=e.setName,f=n.createIterator(h,0,h.length,o._getIteratorPredicate(c.getBooleanOption("connectSeparatedPoints",p))),g=r&&r.length>=2,m=e.drawingContext;m.save(),g&&m.setLineDash&&m.setLineDash(r);var b=o._drawSeries(e,f,a,l,i,d,u,t);o._drawPointsOnLine(e,b,s,t,l),g&&m.setLineDash&&m.setLineDash([]),m.restore()},o._drawSeries=function(e,t,a,n,r,o,i,s){var l,c,u=null,d=null,h=null,p=[],f=!0,g=e.drawingContext;g.beginPath(),g.strokeStyle=s,g.lineWidth=a;for(var m=t.array_,b=t.end_,v=t.predicate_,_=t.start_;_<b;_++){if(c=m[_],v){for(;_<b&&!v(m,_);)_++;if(_==b)break;c=m[_]}if(null===c.canvasy||c.canvasy!=c.canvasy)i&&null!==u&&(g.moveTo(u,d),g.lineTo(c.canvasx,d)),u=d=null;else{if(l=!1,o||null===u){t.nextIdx_=_,t.next();var y=null===(h=t.hasNext?t.peek.canvasy:null)||h!=h;l=null===u&&y,o&&(!f&&null===u||t.hasNext&&y)&&(l=!0)}null!==u?a&&(i&&(g.moveTo(u,d),g.lineTo(c.canvasx,d)),g.lineTo(c.canvasx,c.canvasy)):g.moveTo(c.canvasx,c.canvasy),(r||l)&&p.push([c.canvasx,c.canvasy,c.idx]),u=c.canvasx,d=c.canvasy}f=!1}return g.stroke(),p},o._drawPointsOnLine=function(e,t,a,n,r){for(var o=e.drawingContext,i=0;i<t.length;i++){var s=t[i];o.save(),a.call(e.dygraph,e.dygraph,e.setName,o,s[0],s[1],n,r,s[2]),o.restore()}},o.prototype._updatePoints=function(){for(var e=this.layout.points,t=e.length;t--;)for(var a=e[t],n=a.length;n--;){var r=a[n];r.canvasx=this.area.w*r.x+this.area.x,r.canvasy=this.area.h*r.y+this.area.y}},o.prototype._renderLineChart=function(e,t){var a,r,o=t||this.elementContext,i=this.layout.points,s=this.layout.setNames;this.colors=this.dygraph_.colorsMap_;var l=this.dygraph_.getOption("plotter"),c=l;n.isArrayLike(c)||(c=[c]);var u={};for(a=0;a<s.length;a++){r=s[a];var d=this.dygraph_.getOption("plotter",r);d!=l&&(u[r]=d)}for(a=0;a<c.length;a++)for(var h=c[a],p=a==c.length-1,f=0;f<i.length;f++)if(r=s[f],!e||r==e){var g=i[f],m=h;if(r in u){if(!p)continue;m=u[r]}var b=this.colors[r],v=this.dygraph_.getOption("strokeWidth",r);o.save(),o.strokeStyle=b,o.lineWidth=v,m({points:g,setName:r,drawingContext:o,color:b,strokeWidth:v,dygraph:this.dygraph_,axis:this.dygraph_.axisPropertiesForSeries(r),plotArea:this.area,seriesIndex:f,seriesCount:i.length,singleSeriesName:e,allSeriesPoints:i}),o.restore()}},o._Plotters={linePlotter:function(e){o._linePlotter(e)},fillPlotter:function(e){o._fillPlotter(e)},errorPlotter:function(e){o._errorPlotter(e)}},o._linePlotter=function(e){var t=e.dygraph,a=e.setName,r=e.strokeWidth,i=t.getNumericOption("strokeBorderWidth",a),s=t.getOption("drawPointCallback",a)||n.Circles.DEFAULT,l=t.getOption("strokePattern",a),c=t.getBooleanOption("drawPoints",a),u=t.getNumericOption("pointSize",a);i&&r&&o._drawStyledLine(e,t.getOption("strokeBorderColor",a),r+2*i,l,c,s,u),o._drawStyledLine(e,e.color,r,l,c,s,u)},o._errorPlotter=function(e){var t=e.dygraph,a=e.setName;if(t.getBooleanOption("errorBars")||t.getBooleanOption("customBars")){t.getBooleanOption("fillGraph",a)&&console.warn("Can't use fillGraph option with error bars");var r,i=e.drawingContext,s=e.color,l=t.getNumericOption("fillAlpha",a),c=t.getBooleanOption("stepPlot",a),u=e.points,d=n.createIterator(u,0,u.length,o._getIteratorPredicate(t.getBooleanOption("connectSeparatedPoints",a))),h=NaN,p=NaN,f=[-1,-1],g=n.toRGB_(s),m="rgba("+g.r+","+g.g+","+g.b+","+l+")";i.fillStyle=m,i.beginPath();for(var b=function(e){return null===e||void 0===e||isNaN(e)};d.hasNext;){var v=d.next();!c&&b(v.y)||c&&!isNaN(p)&&b(p)?h=NaN:(r=[v.y_bottom,v.y_top],c&&(p=v.y),isNaN(r[0])&&(r[0]=v.y),isNaN(r[1])&&(r[1]=v.y),r[0]=e.plotArea.h*r[0]+e.plotArea.y,r[1]=e.plotArea.h*r[1]+e.plotArea.y,isNaN(h)||(c?(i.moveTo(h,f[0]),i.lineTo(v.canvasx,f[0]),i.lineTo(v.canvasx,f[1])):(i.moveTo(h,f[0]),i.lineTo(v.canvasx,r[0]),i.lineTo(v.canvasx,r[1])),i.lineTo(h,f[1]),i.closePath()),f=r,h=v.canvasx)}i.fill()}},o._fastCanvasProxy=function(e){var t=[],a=null,n=null,r=0,o=function(a){!function(e){if(!(t.length<=1)){for(var a=t.length-1;a>0;a--)if(2==(s=t[a])[0]){var n=t[a-1];n[1]==s[1]&&n[2]==s[2]&&t.splice(a,1)}for(a=0;a<t.length-1;)2==(s=t[a])[0]&&2==t[a+1][0]?t.splice(a,1):a++;if(t.length>2&&!e){var r=0;2==t[0][0]&&r++;var o=null,i=null;for(a=r;a<t.length;a++){var s;if(1==(s=t[a])[0])if(null===o&&null===i)o=a,i=a;else{var l=s[2];l<t[o][2]?o=a:l>t[i][2]&&(i=a)}}var c=t[o],u=t[i];t.splice(r,t.length-r),o<i?(t.push(c),t.push(u)):o>i?(t.push(u),t.push(c)):t.push(c)}}}(a);for(var o=0,i=t.length;o<i;o++){var s=t[o];1==s[0]?e.lineTo(s[1],s[2]):2==s[0]&&e.moveTo(s[1],s[2])}t.length&&(n=t[t.length-1][1]),r+=t.length,t=[]},i=function(e,r,i){var s=Math.round(r);null!==a&&s==a||(o(a-n>1||s-a>1),a=s),t.push([e,r,i])};return{moveTo:function(e,t){i(2,e,t)},lineTo:function(e,t){i(1,e,t)},stroke:function(){o(!0),e.stroke()},fill:function(){o(!0),e.fill()},beginPath:function(){o(!0),e.beginPath()},closePath:function(){o(!0),e.closePath()},_count:function(){return r}}},o._fillPlotter=function(e){if(!e.singleSeriesName&&0===e.seriesIndex){for(var t=e.dygraph,a=t.getLabels().slice(1),i=a.length;i>=0;i--)t.visibility()[i]||a.splice(i,1);if(function(){for(var e=0;e<a.length;e++)if(t.getBooleanOption("fillGraph",a[e]))return!0;return!1}())for(var s,l,c=e.plotArea,u=e.allSeriesPoints,d=u.length,h=t.getBooleanOption("stackedGraph"),p=t.getColors(),f={},g=function(e,t,a,n){if(e.lineTo(t,a),h)for(var r=n.length-1;r>=0;r--){var o=n[r];e.lineTo(o[0],o[1])}},m=d-1;m>=0;m--){var b=e.drawingContext,v=a[m];if(t.getBooleanOption("fillGraph",v)){var _=t.getNumericOption("fillAlpha",v),y=t.getBooleanOption("stepPlot",v),O=p[m],x=t.axisPropertiesForSeries(v),w=1+x.minyval*x.yscale;w<0?w=0:w>1&&(w=1),w=c.h*w+c.y;var E,S=u[m],C=n.createIterator(S,0,S.length,o._getIteratorPredicate(t.getBooleanOption("connectSeparatedPoints",v))),A=NaN,k=[-1,-1],j=n.toRGB_(O),T="rgba("+j.r+","+j.g+","+j.b+","+_+")";b.fillStyle=T,b.beginPath();var D,P=!0;(S.length>2*t.width_||r.default.FORCE_FAST_PROXY)&&(b=o._fastCanvasProxy(b));for(var M,L=[];C.hasNext;)if(M=C.next(),n.isOK(M.y)||y){if(h){if(!P&&D==M.xval)continue;var I;P=!1,D=M.xval,I=void 0===(s=f[M.canvasx])?w:l?s[0]:s,E=[M.canvasy,I],y?-1===k[0]?f[M.canvasx]=[M.canvasy,w]:f[M.canvasx]=[M.canvasy,k[0]]:f[M.canvasx]=M.canvasy}else E=isNaN(M.canvasy)&&y?[c.y+c.h,w]:[M.canvasy,w];isNaN(A)?(b.moveTo(M.canvasx,E[1]),b.lineTo(M.canvasx,E[0])):(y?(b.lineTo(M.canvasx,k[0]),b.lineTo(M.canvasx,E[0])):b.lineTo(M.canvasx,E[0]),h&&(L.push([A,k[1]]),l&&s?L.push([M.canvasx,s[1]]):L.push([M.canvasx,E[1]]))),k=E,A=M.canvasx}else g(b,A,k[1],L),L=[],A=NaN,null===M.y_stacked||isNaN(M.y_stacked)||(f[M.canvasx]=c.h*M.y_stacked+c.y);l=y,E&&M&&(g(b,M.canvasx,E[1],L),L=[]),b.fill()}}}},a.default=o,t.exports=a.default},{"./dygraph":18,"./dygraph-utils":17}],10:[function(e,t,a){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}function r(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}Object.defineProperty(a,"__esModule",{value:!0});var o=r(e("./dygraph-tickers")),i=n(e("./dygraph-interaction-model")),s=n(e("./dygraph-canvas")),l=r(e("./dygraph-utils")),c={highlightCircleSize:3,highlightSeriesOpts:null,highlightSeriesBackgroundAlpha:.5,highlightSeriesBackgroundColor:"rgb(255, 255, 255)",labelsSeparateLines:!1,labelsShowZeroValues:!0,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:!0,digitsAfterDecimal:2,maxNumberWidth:6,sigFigs:null,strokeWidth:1,strokeBorderWidth:0,strokeBorderColor:"white",axisTickSize:3,axisLabelFontSize:14,rightGap:5,showRoller:!1,xValueParser:void 0,delimiter:",",sigma:2,errorBars:!1,fractions:!1,wilsonInterval:!0,customBars:!1,fillGraph:!1,fillAlpha:.15,connectSeparatedPoints:!1,stackedGraph:!1,stackedGraphNaNFill:"all",hideOverlayOnMouseOut:!0,legend:"onmouseover",stepPlot:!1,xRangePad:0,yRangePad:null,drawAxesAtZero:!1,titleHeight:28,xLabelHeight:18,yLabelWidth:18,axisLineColor:"black",axisLineWidth:.3,gridLineWidth:.3,axisLabelWidth:50,gridLineColor:"rgb(128,128,128)",interactionModel:i.default.defaultModel,animatedZooms:!1,showRangeSelector:!1,rangeSelectorHeight:40,rangeSelectorPlotStrokeColor:"#808FAB",rangeSelectorPlotFillGradientColor:"white",rangeSelectorPlotFillColor:"#A7B1C4",rangeSelectorBackgroundStrokeColor:"gray",rangeSelectorBackgroundLineWidth:1,rangeSelectorPlotLineWidth:1.5,rangeSelectorForegroundStrokeColor:"black",rangeSelectorForegroundLineWidth:1,rangeSelectorAlpha:.6,showInRangeSelector:null,plotter:[s.default._fillPlotter,s.default._errorPlotter,s.default._linePlotter],plugins:[],axes:{x:{pixelsPerLabel:70,axisLabelWidth:60,axisLabelFormatter:l.dateAxisLabelFormatter,valueFormatter:l.dateValueFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.dateTicker},y:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:l.numberValueFormatter,axisLabelFormatter:l.numberAxisLabelFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.numericTicks},y2:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:l.numberValueFormatter,axisLabelFormatter:l.numberAxisLabelFormatter,drawAxis:!0,drawGrid:!1,independentTicks:!1,ticker:o.numericTicks}}};a.default=c,t.exports=a.default},{"./dygraph-canvas":9,"./dygraph-interaction-model":12,"./dygraph-tickers":16,"./dygraph-utils":17}],11:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){return e&&e.__esModule?e:{default:e}}(e("./dygraph")),r=function(e){this.container=e};r.prototype.draw=function(e,t){this.container.innerHTML="",void 0!==this.date_graph&&this.date_graph.destroy(),this.date_graph=new n.default(this.container,e,t)},r.prototype.setSelection=function(e){var t=!1;e.length&&(t=e[0].row),this.date_graph.setSelection(t)},r.prototype.getSelection=function(){var e=[],t=this.date_graph.getSelection();if(t<0)return e;for(var a=this.date_graph.layout_.points,n=0;n<a.length;++n)e.push({row:t,column:n+1});return e},a.default=r,t.exports=a.default},{"./dygraph":18}],12:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils")),r={maybeTreatMouseOpAsClick:function(e,t,a){a.dragEndX=n.dragGetX_(e,a),a.dragEndY=n.dragGetY_(e,a);var o=Math.abs(a.dragEndX-a.dragStartX),i=Math.abs(a.dragEndY-a.dragStartY);o<2&&i<2&&void 0!==t.lastx_&&-1!=t.lastx_&&r.treatMouseOpAsClick(t,e,a),a.regionWidth=o,a.regionHeight=i},startPan:function(e,t,a){var r,o;a.isPanning=!0;var i=t.xAxisRange();if(t.getOptionForAxis("logscale","x")?(a.initialLeftmostDate=n.log10(i[0]),a.dateRange=n.log10(i[1])-n.log10(i[0])):(a.initialLeftmostDate=i[0],a.dateRange=i[1]-i[0]),a.xUnitsPerPixel=a.dateRange/(t.plotter_.area.w-1),t.getNumericOption("panEdgeFraction")){var s=t.width_*t.getNumericOption("panEdgeFraction"),l=t.xAxisExtremes(),c=t.toDomXCoord(l[0])-s,u=t.toDomXCoord(l[1])+s,d=t.toDataXCoord(c),h=t.toDataXCoord(u);a.boundedDates=[d,h];var p=[],f=t.height_*t.getNumericOption("panEdgeFraction");for(r=0;r<t.axes_.length;r++){var g=(o=t.axes_[r]).extremeRange,m=t.toDomYCoord(g[0],r)+f,b=t.toDomYCoord(g[1],r)-f,v=t.toDataYCoord(m,r),_=t.toDataYCoord(b,r);p[r]=[v,_]}a.boundedValues=p}for(a.is2DPan=!1,a.axes=[],r=0;r<t.axes_.length;r++){o=t.axes_[r];var y={},O=t.yAxisRange(r);t.attributes_.getForAxis("logscale",r)?(y.initialTopValue=n.log10(O[1]),y.dragValueRange=n.log10(O[1])-n.log10(O[0])):(y.initialTopValue=O[1],y.dragValueRange=O[1]-O[0]),y.unitsPerPixel=y.dragValueRange/(t.plotter_.area.h-1),a.axes.push(y),o.valueRange&&(a.is2DPan=!0)}},movePan:function(e,t,a){a.dragEndX=n.dragGetX_(e,a),a.dragEndY=n.dragGetY_(e,a);var r=a.initialLeftmostDate-(a.dragEndX-a.dragStartX)*a.xUnitsPerPixel;a.boundedDates&&(r=Math.max(r,a.boundedDates[0]));var o=r+a.dateRange;if(a.boundedDates&&o>a.boundedDates[1]&&(o=(r-=o-a.boundedDates[1])+a.dateRange),t.getOptionForAxis("logscale","x")?t.dateWindow_=[Math.pow(n.LOG_SCALE,r),Math.pow(n.LOG_SCALE,o)]:t.dateWindow_=[r,o],a.is2DPan)for(var i=a.dragEndY-a.dragStartY,s=0;s<t.axes_.length;s++){var l=t.axes_[s],c=a.axes[s],u=i*c.unitsPerPixel,d=a.boundedValues?a.boundedValues[s]:null,h=c.initialTopValue+u;d&&(h=Math.min(h,d[1]));var p=h-c.dragValueRange;d&&p<d[0]&&(p=(h-=p-d[0])-c.dragValueRange),t.attributes_.getForAxis("logscale",s)?l.valueRange=[Math.pow(n.LOG_SCALE,p),Math.pow(n.LOG_SCALE,h)]:l.valueRange=[p,h]}t.drawGraph_(!1)}};r.endPan=r.maybeTreatMouseOpAsClick,r.startZoom=function(e,t,a){a.isZooming=!0,a.zoomMoved=!1},r.moveZoom=function(e,t,a){a.zoomMoved=!0,a.dragEndX=n.dragGetX_(e,a),a.dragEndY=n.dragGetY_(e,a);var r=Math.abs(a.dragStartX-a.dragEndX),o=Math.abs(a.dragStartY-a.dragEndY);a.dragDirection=r<o/2?n.VERTICAL:n.HORIZONTAL,t.drawZoomRect_(a.dragDirection,a.dragStartX,a.dragEndX,a.dragStartY,a.dragEndY,a.prevDragDirection,a.prevEndX,a.prevEndY),a.prevEndX=a.dragEndX,a.prevEndY=a.dragEndY,a.prevDragDirection=a.dragDirection},r.treatMouseOpAsClick=function(e,t,a){for(var n=e.getFunctionOption("clickCallback"),r=e.getFunctionOption("pointClickCallback"),o=null,i=-1,s=Number.MAX_VALUE,l=0;l<e.selPoints_.length;l++){var c=e.selPoints_[l],u=Math.pow(c.canvasx-a.dragEndX,2)+Math.pow(c.canvasy-a.dragEndY,2);!isNaN(u)&&(-1==i||u<s)&&(s=u,i=l)}var d=e.getNumericOption("highlightCircleSize")+2;if(s<=d*d&&(o=e.selPoints_[i]),o){var h={cancelable:!0,point:o,canvasx:a.dragEndX,canvasy:a.dragEndY};if(e.cascadeEvents_("pointClick",h))return;r&&r.call(e,t,o)}h={cancelable:!0,xval:e.lastx_,pts:e.selPoints_,canvasx:a.dragEndX,canvasy:a.dragEndY},e.cascadeEvents_("click",h)||n&&n.call(e,t,e.lastx_,e.selPoints_)},r.endZoom=function(e,t,a){t.clearZoomRect_(),a.isZooming=!1,r.maybeTreatMouseOpAsClick(e,t,a);var o=t.getArea();if(a.regionWidth>=10&&a.dragDirection==n.HORIZONTAL){var i=Math.min(a.dragStartX,a.dragEndX),s=Math.max(a.dragStartX,a.dragEndX);(i=Math.max(i,o.x))<(s=Math.min(s,o.x+o.w))&&t.doZoomX_(i,s),a.cancelNextDblclick=!0}else if(a.regionHeight>=10&&a.dragDirection==n.VERTICAL){var l=Math.min(a.dragStartY,a.dragEndY),c=Math.max(a.dragStartY,a.dragEndY);(l=Math.max(l,o.y))<(c=Math.min(c,o.y+o.h))&&t.doZoomY_(l,c),a.cancelNextDblclick=!0}a.dragStartX=null,a.dragStartY=null},r.startTouch=function(e,t,a){e.preventDefault(),e.touches.length>1&&(a.startTimeForDoubleTapMs=null);for(var n=[],r=0;r<e.touches.length;r++){var o=e.touches[r];n.push({pageX:o.pageX,pageY:o.pageY,dataX:t.toDataXCoord(o.pageX),dataY:t.toDataYCoord(o.pageY)})}if(a.initialTouches=n,1==n.length)a.initialPinchCenter=n[0],a.touchDirections={x:!0,y:!0};else if(n.length>=2){a.initialPinchCenter={pageX:.5*(n[0].pageX+n[1].pageX),pageY:.5*(n[0].pageY+n[1].pageY),dataX:.5*(n[0].dataX+n[1].dataX),dataY:.5*(n[0].dataY+n[1].dataY)};var i=180/Math.PI*Math.atan2(a.initialPinchCenter.pageY-n[0].pageY,n[0].pageX-a.initialPinchCenter.pageX);(i=Math.abs(i))>90&&(i=90-i),a.touchDirections={x:i<67.5,y:i>22.5}}a.initialRange={x:t.xAxisRange(),y:t.yAxisRange()}},r.moveTouch=function(e,t,a){a.startTimeForDoubleTapMs=null;var n,r=[];for(n=0;n<e.touches.length;n++){var o=e.touches[n];r.push({pageX:o.pageX,pageY:o.pageY})}var i,s,l,c=a.initialTouches,u=a.initialPinchCenter,d={pageX:(i=1==r.length?r[0]:{pageX:.5*(r[0].pageX+r[1].pageX),pageY:.5*(r[0].pageY+r[1].pageY)}).pageX-u.pageX,pageY:i.pageY-u.pageY},h=a.initialRange.x[1]-a.initialRange.x[0],p=a.initialRange.y[0]-a.initialRange.y[1];if(d.dataX=d.pageX/t.plotter_.area.w*h,d.dataY=d.pageY/t.plotter_.area.h*p,1==r.length)s=1,l=1;else if(r.length>=2){var f=c[1].pageX-u.pageX;s=(r[1].pageX-i.pageX)/f;var g=c[1].pageY-u.pageY;l=(r[1].pageY-i.pageY)/g}s=Math.min(8,Math.max(.125,s)),l=Math.min(8,Math.max(.125,l));var m=!1;if(a.touchDirections.x&&(t.dateWindow_=[u.dataX-d.dataX+(a.initialRange.x[0]-u.dataX)/s,u.dataX-d.dataX+(a.initialRange.x[1]-u.dataX)/s],m=!0),a.touchDirections.y)for(n=0;n<1;n++){var b=t.axes_[n];t.attributes_.getForAxis("logscale",n)||(b.valueRange=[u.dataY-d.dataY+(a.initialRange.y[0]-u.dataY)/l,u.dataY-d.dataY+(a.initialRange.y[1]-u.dataY)/l],m=!0)}if(t.drawGraph_(!1),m&&r.length>1&&t.getFunctionOption("zoomCallback")){var v=t.xAxisRange();t.getFunctionOption("zoomCallback").call(t,v[0],v[1],t.yAxisRanges())}},r.endTouch=function(e,t,a){if(0!==e.touches.length)r.startTouch(e,t,a);else if(1==e.changedTouches.length){var n=(new Date).getTime(),o=e.changedTouches[0];a.startTimeForDoubleTapMs&&n-a.startTimeForDoubleTapMs<500&&a.doubleTapX&&Math.abs(a.doubleTapX-o.screenX)<50&&a.doubleTapY&&Math.abs(a.doubleTapY-o.screenY)<50?t.resetZoom():(a.startTimeForDoubleTapMs=n,a.doubleTapX=o.screenX,a.doubleTapY=o.screenY)}};var o=function(e,t,a){return e<t?t-e:e>a?e-a:0},i=function(e,t){var a=n.findPos(t.canvas_),r={left:a.x,right:a.x+t.canvas_.offsetWidth,top:a.y,bottom:a.y+t.canvas_.offsetHeight},i={x:n.pageX(e),y:n.pageY(e)},s=o(i.x,r.left,r.right),l=o(i.y,r.top,r.bottom);return Math.max(s,l)};r.defaultModel={mousedown:function(e,t,a){if(!e.button||2!=e.button){a.initializeMouseDown(e,t,a),e.altKey||e.shiftKey?r.startPan(e,t,a):r.startZoom(e,t,a);var o=function(e){a.isZooming?i(e,t)<100?r.moveZoom(e,t,a):null!==a.dragEndX&&(a.dragEndX=null,a.dragEndY=null,t.clearZoomRect_()):a.isPanning&&r.movePan(e,t,a)};t.addAndTrackEvent(document,"mousemove",o),t.addAndTrackEvent(document,"mouseup",(function e(i){a.isZooming?null!==a.dragEndX?r.endZoom(i,t,a):r.maybeTreatMouseOpAsClick(i,t,a):a.isPanning&&r.endPan(i,t,a),n.removeEvent(document,"mousemove",o),n.removeEvent(document,"mouseup",e),a.destroy()}))}},willDestroyContextMyself:!0,touchstart:function(e,t,a){r.startTouch(e,t,a)},touchmove:function(e,t,a){r.moveTouch(e,t,a)},touchend:function(e,t,a){r.endTouch(e,t,a)},dblclick:function(e,t,a){if(a.cancelNextDblclick)a.cancelNextDblclick=!1;else{var n={canvasx:a.dragEndX,canvasy:a.dragEndY,cancelable:!0};t.cascadeEvents_("dblclick",n)||e.altKey||e.shiftKey||t.resetZoom()}}},r.nonInteractiveModel_={mousedown:function(e,t,a){a.initializeMouseDown(e,t,a)},mouseup:r.maybeTreatMouseOpAsClick},r.dragIsPanInteractionModel={mousedown:function(e,t,a){a.initializeMouseDown(e,t,a),r.startPan(e,t,a)},mousemove:function(e,t,a){a.isPanning&&r.movePan(e,t,a)},mouseup:function(e,t,a){a.isPanning&&r.endPan(e,t,a)}},a.default=r,t.exports=a.default},{"./dygraph-utils":17}],13:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils")),r=function(e){this.dygraph_=e,this.points=[],this.setNames=[],this.annotations=[],this.yAxes_=null,this.xTicks_=null,this.yTicks_=null};r.prototype.addDataset=function(e,t){this.points.push(t),this.setNames.push(e)},r.prototype.getPlotArea=function(){return this.area_},r.prototype.computePlotArea=function(){var e={x:0,y:0};e.w=this.dygraph_.width_-e.x-this.dygraph_.getOption("rightGap"),e.h=this.dygraph_.height_;var t={chart_div:this.dygraph_.graphDiv,reserveSpaceLeft:function(t){var a={x:e.x,y:e.y,w:t,h:e.h};return e.x+=t,e.w-=t,a},reserveSpaceRight:function(t){var a={x:e.x+e.w-t,y:e.y,w:t,h:e.h};return e.w-=t,a},reserveSpaceTop:function(t){var a={x:e.x,y:e.y,w:e.w,h:t};return e.y+=t,e.h-=t,a},reserveSpaceBottom:function(t){var a={x:e.x,y:e.y+e.h-t,w:e.w,h:t};return e.h-=t,a},chartRect:function(){return{x:e.x,y:e.y,w:e.w,h:e.h}}};this.dygraph_.cascadeEvents_("layout",t),this.area_=e},r.prototype.setAnnotations=function(e){this.annotations=[];for(var t=this.dygraph_.getOption("xValueParser")||function(e){return e},a=0;a<e.length;a++){var r={};if(!e[a].xval&&void 0===e[a].x)return void console.error("Annotations must have an 'x' property");if(e[a].icon&&(!e[a].hasOwnProperty("width")||!e[a].hasOwnProperty("height")))return void console.error("Must set width and height when setting annotation.icon property");n.update(r,e[a]),r.xval||(r.xval=t(r.x)),this.annotations.push(r)}},r.prototype.setXTicks=function(e){this.xTicks_=e},r.prototype.setYAxes=function(e){this.yAxes_=e},r.prototype.evaluate=function(){this._xAxis={},this._evaluateLimits(),this._evaluateLineCharts(),this._evaluateLineTicks(),this._evaluateAnnotations()},r.prototype._evaluateLimits=function(){var e=this.dygraph_.xAxisRange();this._xAxis.minval=e[0],this._xAxis.maxval=e[1];var t=e[1]-e[0];this._xAxis.scale=0!==t?1/t:1,this.dygraph_.getOptionForAxis("logscale","x")&&(this._xAxis.xlogrange=n.log10(this._xAxis.maxval)-n.log10(this._xAxis.minval),this._xAxis.xlogscale=0!==this._xAxis.xlogrange?1/this._xAxis.xlogrange:1);for(var a=0;a<this.yAxes_.length;a++){var r=this.yAxes_[a];r.minyval=r.computedValueRange[0],r.maxyval=r.computedValueRange[1],r.yrange=r.maxyval-r.minyval,r.yscale=0!==r.yrange?1/r.yrange:1,this.dygraph_.getOption("logscale")&&(r.ylogrange=n.log10(r.maxyval)-n.log10(r.minyval),r.ylogscale=0!==r.ylogrange?1/r.ylogrange:1,isFinite(r.ylogrange)&&!isNaN(r.ylogrange)||console.error("axis "+a+" of graph at "+r.g+" can't be displayed in log scale for range ["+r.minyval+" - "+r.maxyval+"]"))}},r.calcXNormal_=function(e,t,a){return a?(n.log10(e)-n.log10(t.minval))*t.xlogscale:(e-t.minval)*t.scale},r.calcYNormal_=function(e,t,a){if(a){var r=1-(n.log10(t)-n.log10(e.minyval))*e.ylogscale;return isFinite(r)?r:NaN}return 1-(t-e.minyval)*e.yscale},r.prototype._evaluateLineCharts=function(){for(var e=this.dygraph_.getOption("stackedGraph"),t=this.dygraph_.getOptionForAxis("logscale","x"),a=0;a<this.points.length;a++){for(var n=this.points[a],o=this.setNames[a],i=this.dygraph_.getOption("connectSeparatedPoints",o),s=this.dygraph_.axisPropertiesForSeries(o),l=this.dygraph_.attributes_.getForSeries("logscale",o),c=0;c<n.length;c++){var u=n[c];u.x=r.calcXNormal_(u.xval,this._xAxis,t);var d=u.yval;e&&(u.y_stacked=r.calcYNormal_(s,u.yval_stacked,l),null===d||isNaN(d)||(d=u.yval_stacked)),null===d&&(d=NaN,i||(u.yval=NaN)),u.y=r.calcYNormal_(s,d,l)}this.dygraph_.dataHandler_.onLineEvaluated(n,s,l)}},r.prototype._evaluateLineTicks=function(){var e,t,a,n,r,o;for(this.xticks=[],e=0;e<this.xTicks_.length;e++)a=(t=this.xTicks_[e]).label,r=(o=!("label_v"in t))?t.v:t.label_v,(n=this.dygraph_.toPercentXCoord(r))>=0&&n<1&&this.xticks.push({pos:n,label:a,has_tick:o});for(this.yticks=[],e=0;e<this.yAxes_.length;e++)for(var i=this.yAxes_[e],s=0;s<i.ticks.length;s++)a=(t=i.ticks[s]).label,r=(o=!("label_v"in t))?t.v:t.label_v,(n=this.dygraph_.toPercentYCoord(r,e))>0&&n<=1&&this.yticks.push({axis:e,pos:n,label:a,has_tick:o})},r.prototype._evaluateAnnotations=function(){var e,t={};for(e=0;e<this.annotations.length;e++){var a=this.annotations[e];t[a.xval+","+a.series]=a}if(this.annotated_points=[],this.annotations&&this.annotations.length)for(var n=0;n<this.points.length;n++){var r=this.points[n];for(e=0;e<r.length;e++){var o=r[e],i=o.xval+","+o.name;i in t&&(o.annotation=t[i],this.annotated_points.push(o))}}},r.prototype.removeAllDatasets=function(){delete this.points,delete this.setNames,delete this.setPointsLengths,delete this.setPointsOffsets,this.points=[],this.setNames=[],this.setPointsLengths=[],this.setPointsOffsets=[]},a.default=r,t.exports=a.default},{"./dygraph-utils":17}],14:[function(e,t,a){(function(e){"use strict";Object.defineProperty(a,"__esModule",{value:!0}),a.default=null,t.exports=a.default}).call(this,e("_process"))},{_process:1}],15:[function(e,t,a){(function(n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(a,"__esModule",{value:!0});var o=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils")),i=r(e("./dygraph-default-attrs")),s=(r(e("./dygraph-options-reference")),function(e){this.dygraph_=e,this.yAxes_=[],this.xAxis_={},this.series_={},this.global_=this.dygraph_.attrs_,this.user_=this.dygraph_.user_attrs_||{},this.labels_=[],this.highlightSeries_=this.get("highlightSeriesOpts")||{},this.reparseSeries()});s.AXIS_STRING_MAPPINGS_={y:0,Y:0,y1:0,Y1:0,y2:1,Y2:1},s.axisToIndex_=function(e){if("string"==typeof e){if(s.AXIS_STRING_MAPPINGS_.hasOwnProperty(e))return s.AXIS_STRING_MAPPINGS_[e];throw"Unknown axis : "+e}if("number"==typeof e){if(0===e||1===e)return e;throw"Dygraphs only supports two y-axes, indexed from 0-1."}if(e)throw"Unknown axis : "+e;return 0},s.prototype.reparseSeries=function(){var e=this.get("labels");if(e){this.labels_=e.slice(1),this.yAxes_=[{series:[],options:{}}],this.xAxis_={options:{}},this.series_={};for(var t=this.user_.series||{},a=0;a<this.labels_.length;a++){var n=this.labels_[a],r=t[n]||{},i=s.axisToIndex_(r.axis);this.series_[n]={idx:a,yAxis:i,options:r},this.yAxes_[i]?this.yAxes_[i].series.push(n):this.yAxes_[i]={series:[n],options:{}}}var l=this.user_.axes||{};o.update(this.yAxes_[0].options,l.y||{}),this.yAxes_.length>1&&o.update(this.yAxes_[1].options,l.y2||{}),o.update(this.xAxis_.options,l.x||{})}},s.prototype.get=function(e){var t=this.getGlobalUser_(e);return null!==t?t:this.getGlobalDefault_(e)},s.prototype.getGlobalUser_=function(e){return this.user_.hasOwnProperty(e)?this.user_[e]:null},s.prototype.getGlobalDefault_=function(e){return this.global_.hasOwnProperty(e)?this.global_[e]:i.default.hasOwnProperty(e)?i.default[e]:null},s.prototype.getForAxis=function(e,t){var a,n;if("number"==typeof t)n=0===(a=t)?"y":"y2";else{if("y1"==t&&(t="y"),"y"==t)a=0;else if("y2"==t)a=1;else{if("x"!=t)throw"Unknown axis "+t;a=-1}n=t}var r=-1==a?this.xAxis_:this.yAxes_[a];if(r){var o=r.options;if(o.hasOwnProperty(e))return o[e]}if("x"!==t||"logscale"!==e){var s=this.getGlobalUser_(e);if(null!==s)return s}var l=i.default.axes[n];return l.hasOwnProperty(e)?l[e]:this.getGlobalDefault_(e)},s.prototype.getForSeries=function(e,t){if(t===this.dygraph_.getHighlightSeries()&&this.highlightSeries_.hasOwnProperty(e))return this.highlightSeries_[e];if(!this.series_.hasOwnProperty(t))throw"Unknown series: "+t;var a=this.series_[t],n=a.options;return n.hasOwnProperty(e)?n[e]:this.getForAxis(e,a.yAxis)},s.prototype.numAxes=function(){return this.yAxes_.length},s.prototype.axisForSeries=function(e){return this.series_[e].yAxis},s.prototype.axisOptions=function(e){return this.yAxes_[e].options},s.prototype.seriesForAxis=function(e){return this.yAxes_[e].series},s.prototype.seriesNames=function(){return this.labels_},a.default=s,t.exports=a.default}).call(this,e("_process"))},{"./dygraph-default-attrs":10,"./dygraph-options-reference":14,"./dygraph-utils":17,_process:1}],16:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils"));a.numericLinearTicks=function(e,t,a,n,o,i){return r(e,t,a,(function(e){return"logscale"!==e&&n(e)}),o,i)};var r=function(e,t,a,r,o,i){var s,l,c,u,d=r("pixelsPerLabel"),h=[];if(i)for(s=0;s<i.length;s++)h.push({v:i[s]});else{if(r("logscale")){u=Math.floor(a/d);var p=n.binarySearch(e,g,1),f=n.binarySearch(t,g,-1);-1==p&&(p=0),-1==f&&(f=g.length-1);var m=null;if(f-p>=u/4){for(var b=f;b>=p;b--){var v=g[b],_=Math.log(v/e)/Math.log(t/e)*a,y={v:v};null===m?m={tickValue:v,pixel_coord:_}:Math.abs(_-m.pixel_coord)>=d?m={tickValue:v,pixel_coord:_}:y.label="",h.push(y)}h.reverse()}}if(0===h.length){var O,x;r("labelsKMG2")?(O=[1,2,4,8,16,32,64,128,256],x=16):(O=[1,2,5,10,20,50,100],x=10);var w,E,S,C=Math.ceil(a/d),A=Math.abs(t-e)/C,k=Math.floor(Math.log(A)/Math.log(x)),j=Math.pow(x,k);for(l=0;l<O.length&&(w=j*O[l],E=Math.floor(e/w)*w,S=Math.ceil(t/w)*w,!(a/(u=Math.abs(S-E)/w)>d));l++);for(E>S&&(w*=-1),s=0;s<=u;s++)c=E+s*w,h.push({v:c})}}var T=r("axisLabelFormatter");for(s=0;s<h.length;s++)void 0===h[s].label&&(h[s].label=T.call(o,h[s].v,0,r,o));return h};a.numericTicks=r,a.dateTicker=function(e,t,a,n,r,o){var i=m(e,t,a,n);return i>=0?v(e,t,i,n,r):[]};var o={MILLISECONDLY:0,TWO_MILLISECONDLY:1,FIVE_MILLISECONDLY:2,TEN_MILLISECONDLY:3,FIFTY_MILLISECONDLY:4,HUNDRED_MILLISECONDLY:5,FIVE_HUNDRED_MILLISECONDLY:6,SECONDLY:7,TWO_SECONDLY:8,FIVE_SECONDLY:9,TEN_SECONDLY:10,THIRTY_SECONDLY:11,MINUTELY:12,TWO_MINUTELY:13,FIVE_MINUTELY:14,TEN_MINUTELY:15,THIRTY_MINUTELY:16,HOURLY:17,TWO_HOURLY:18,SIX_HOURLY:19,DAILY:20,TWO_DAILY:21,WEEKLY:22,MONTHLY:23,QUARTERLY:24,BIANNUAL:25,ANNUAL:26,DECADAL:27,CENTENNIAL:28,NUM_GRANULARITIES:29};a.Granularity=o;var i=0,s=1,l=2,c=3,u=4,d=5,h=6,p=7,f=[];f[o.MILLISECONDLY]={datefield:h,step:1,spacing:1},f[o.TWO_MILLISECONDLY]={datefield:h,step:2,spacing:2},f[o.FIVE_MILLISECONDLY]={datefield:h,step:5,spacing:5},f[o.TEN_MILLISECONDLY]={datefield:h,step:10,spacing:10},f[o.FIFTY_MILLISECONDLY]={datefield:h,step:50,spacing:50},f[o.HUNDRED_MILLISECONDLY]={datefield:h,step:100,spacing:100},f[o.FIVE_HUNDRED_MILLISECONDLY]={datefield:h,step:500,spacing:500},f[o.SECONDLY]={datefield:d,step:1,spacing:1e3},f[o.TWO_SECONDLY]={datefield:d,step:2,spacing:2e3},f[o.FIVE_SECONDLY]={datefield:d,step:5,spacing:5e3},f[o.TEN_SECONDLY]={datefield:d,step:10,spacing:1e4},f[o.THIRTY_SECONDLY]={datefield:d,step:30,spacing:3e4},f[o.MINUTELY]={datefield:u,step:1,spacing:6e4},f[o.TWO_MINUTELY]={datefield:u,step:2,spacing:12e4},f[o.FIVE_MINUTELY]={datefield:u,step:5,spacing:3e5},f[o.TEN_MINUTELY]={datefield:u,step:10,spacing:6e5},f[o.THIRTY_MINUTELY]={datefield:u,step:30,spacing:18e5},f[o.HOURLY]={datefield:c,step:1,spacing:36e5},f[o.TWO_HOURLY]={datefield:c,step:2,spacing:72e5},f[o.SIX_HOURLY]={datefield:c,step:6,spacing:216e5},f[o.DAILY]={datefield:l,step:1,spacing:864e5},f[o.TWO_DAILY]={datefield:l,step:2,spacing:1728e5},f[o.WEEKLY]={datefield:l,step:7,spacing:6048e5},f[o.MONTHLY]={datefield:s,step:1,spacing:2629817280},f[o.QUARTERLY]={datefield:s,step:3,spacing:216e5*365.2524},f[o.BIANNUAL]={datefield:s,step:6,spacing:432e5*365.2524},f[o.ANNUAL]={datefield:i,step:1,spacing:864e5*365.2524},f[o.DECADAL]={datefield:i,step:10,spacing:315578073600},f[o.CENTENNIAL]={datefield:i,step:100,spacing:3155780736e3};var g=function(){for(var e=[],t=-39;t<=39;t++)for(var a=Math.pow(10,t),n=1;n<=9;n++){var r=a*n;e.push(r)}return e}(),m=function(e,t,a,n){for(var r=n("pixelsPerLabel"),i=0;i<o.NUM_GRANULARITIES;i++)if(a/b(e,t,i)>=r)return i;return-1},b=function(e,t,a){var n=f[a].spacing;return Math.round(1*(t-e)/n)},v=function(e,t,a,r,g){var m=r("axisLabelFormatter"),b=r("labelsUTC")?n.DateAccessorsUTC:n.DateAccessorsLocal,v=f[a].datefield,_=f[a].step,y=f[a].spacing,O=new Date(e),x=[];x[i]=b.getFullYear(O),x[s]=b.getMonth(O),x[l]=b.getDate(O),x[c]=b.getHours(O),x[u]=b.getMinutes(O),x[d]=b.getSeconds(O),x[h]=b.getMilliseconds(O);var w=x[v]%_;a==o.WEEKLY&&(w=b.getDay(O)),x[v]-=w;for(var E=v+1;E<p;E++)x[E]=E===l?1:0;var S=[],C=b.makeDate.apply(null,x),A=C.getTime();if(a<=o.HOURLY)for(A<e&&(A+=y,C=new Date(A));A<=t;)S.push({v:A,label:m.call(g,C,a,r,g)}),A+=y,C=new Date(A);else for(A<e&&(x[v]+=_,A=(C=b.makeDate.apply(null,x)).getTime());A<=t;)(a>=o.DAILY||b.getHours(C)%_==0)&&S.push({v:A,label:m.call(g,C,a,r,g)}),x[v]+=_,A=(C=b.makeDate.apply(null,x)).getTime();return S};a.getDateAxis=v},{"./dygraph-utils":17}],17:[function(e,t,a){"use strict";function n(e){return!e.pageX||e.pageX<0?0:e.pageX}function r(e){return!e.pageY||e.pageY<0?0:e.pageY}function o(e,t){var a=Math.min(Math.max(1,t||2),21);return Math.abs(e)<.001&&0!==e?e.toExponential(a-1):e.toPrecision(a)}function i(e){return e<10?"0"+e:""+e}function s(e,t,a,n){var r=i(e)+":"+i(t);if(a&&(r+=":"+i(a),n)){var o=""+n;r+="."+("000"+o).substring(o.length)}return r}function l(e,t){var a=t?y:_,n=new Date(e),r=a.getFullYear(n),o=a.getMonth(n),l=a.getDate(n),c=a.getHours(n),u=a.getMinutes(n),d=a.getSeconds(n),h=a.getMilliseconds(n),p=r+"/"+i(o+1)+"/"+i(l);return 3600*c+60*u+d+.001*h&&(p+=" "+s(c,u,d,h)),p}function c(e,t){var a=Math.pow(10,t);return Math.round(e*a)/a}function u(e){return new Date(e).getTime()}function d(e){var t=typeof e;return("object"==t||"function"==t&&"function"==typeof e.item)&&null!==e&&"number"==typeof e.length&&3!==e.nodeType}function h(e,t,a,n){t=t||0,a=a||e.length,this.hasNext=!0,this.peek=null,this.start_=t,this.array_=e,this.predicate_=n,this.end_=Math.min(e.length,t+a),this.nextIdx_=t-1,this.next()}function p(e,t){return t<0?1/Math.pow(e,-t):Math.pow(e,t)}function f(e){var t=E.exec(e);if(!t)return null;var a=parseInt(t[1],10),n=parseInt(t[2],10),r=parseInt(t[3],10);return t[4]?{r:a,g:n,b:r,a:parseFloat(t[4])}:{r:a,g:n,b:r}}function g(e,t){var a=t("sigFigs");if(null!==a)return o(e,a);var n,r=t("digitsAfterDecimal"),i=t("maxNumberWidth"),s=t("labelsKMB"),l=t("labelsKMG2");if(n=0!==e&&(Math.abs(e)>=Math.pow(10,i)||Math.abs(e)<Math.pow(10,-r))?e.toExponential(r):""+c(e,r),s||l){var u,d=[],h=[];s&&(u=1e3,d=S),l&&(s&&console.warn("Setting both labelsKMB and labelsKMG2. Pick one!"),u=1024,d=C,h=A);for(var f=Math.abs(e),g=p(u,d.length),m=d.length-1;m>=0;m--,g/=u)if(f>=g){n=c(e/g,r)+d[m];break}if(l){var b=String(e.toExponential()).split("e-");2===b.length&&b[1]>=3&&b[1]<=24&&(n=b[1]%3>0?c(b[0]/p(10,b[1]%3),r):Number(b[0]).toFixed(2),n+=h[Math.floor(b[1]/3)-1])}}return n}Object.defineProperty(a,"__esModule",{value:!0}),a.removeEvent=function(e,t,a){e.removeEventListener(t,a,!1)},a.cancelEvent=function(e){return(e=e||window.event).stopPropagation&&e.stopPropagation(),e.preventDefault&&e.preventDefault(),e.cancelBubble=!0,e.cancel=!0,e.returnValue=!1,!1},a.hsvToRGB=function(e,t,a){var n,r,o;if(0===t)n=a,r=a,o=a;else{var i=Math.floor(6*e),s=6*e-i,l=a*(1-t),c=a*(1-t*s),u=a*(1-t*(1-s));switch(i){case 1:n=c,r=a,o=l;break;case 2:n=l,r=a,o=u;break;case 3:n=l,r=c,o=a;break;case 4:n=u,r=l,o=a;break;case 5:n=a,r=l,o=c;break;case 6:case 0:n=a,r=u,o=l}}return"rgb("+(n=Math.floor(255*n+.5))+","+(r=Math.floor(255*r+.5))+","+(o=Math.floor(255*o+.5))+")"},a.findPos=function(e){var t=e.getBoundingClientRect(),a=window,n=document.documentElement;return{x:t.left+(a.pageXOffset||n.scrollLeft),y:t.top+(a.pageYOffset||n.scrollTop)}},a.pageX=n,a.pageY=r,a.dragGetX_=function(e,t){return n(e)-t.px},a.dragGetY_=function(e,t){return r(e)-t.py},a.isOK=function(e){return!!e&&!isNaN(e)},a.isValidPoint=function(e,t){return!!e&&null!==e.yval&&null!==e.x&&void 0!==e.x&&null!==e.y&&void 0!==e.y&&!(isNaN(e.x)||!t&&isNaN(e.y))},a.floatFormat=o,a.zeropad=i,a.hmsString_=s,a.dateString_=l,a.round_=c,a.binarySearch=function(e,t,a,n,r){for(var o=!0;o;){var i=e,s=t,l=a,c=n,u=r;if(o=!1,null!==c&&void 0!==c&&null!==u&&void 0!==u||(c=0,u=s.length-1),c>u)return-1;null!==l&&void 0!==l||(l=0);var d,h=function(e){return e>=0&&e<s.length},p=parseInt((c+u)/2,10),f=s[p];if(f==i)return p;if(f>i){if(l>0&&h(d=p-1)&&s[d]<i)return p;e=i,t=s,a=l,n=c,r=p-1,o=!0,h=p=f=d=void 0}else{if(!(f<i))return-1;if(l<0&&h(d=p+1)&&s[d]>i)return p;e=i,t=s,a=l,n=p+1,r=u,o=!0,h=p=f=d=void 0}}},a.dateParser=function(e){var t,a;if((-1==e.search("-")||-1!=e.search("T")||-1!=e.search("Z"))&&(a=u(e))&&!isNaN(a))return a;if(-1!=e.search("-")){for(t=e.replace("-","/","g");-1!=t.search("-");)t=t.replace("-","/");a=u(t)}else a=8==e.length?u(t=e.substr(0,4)+"/"+e.substr(4,2)+"/"+e.substr(6,2)):u(e);return a&&!isNaN(a)||console.error("Couldn't parse "+e+" as a date"),a},a.dateStrToMillis=u,a.update=function(e,t){if(void 0!==t&&null!==t)for(var a in t)t.hasOwnProperty(a)&&(e[a]=t[a]);return e},a.updateDeep=function e(t,a){if(void 0!==a&&null!==a)for(var n in a)a.hasOwnProperty(n)&&(null===a[n]?t[n]=null:d(a[n])?t[n]=a[n].slice():function(e){return"object"==typeof Node?e instanceof Node:"object"==typeof e&&"number"==typeof e.nodeType&&"string"==typeof e.nodeName}(a[n])||"object"!=typeof a[n]?t[n]=a[n]:("object"==typeof t[n]&&null!==t[n]||(t[n]={}),e(t[n],a[n])));return t},a.isArrayLike=d,a.isDateLike=function(e){return"object"==typeof e&&null!==e&&"function"==typeof e.getTime},a.clone=function e(t){for(var a=[],n=0;n<t.length;n++)d(t[n])?a.push(e(t[n])):a.push(t[n]);return a},a.createCanvas=function(){return document.createElement("canvas")},a.getContextPixelRatio=function(e){try{var t=window.devicePixelRatio,a=e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1;return void 0!==t?t/a:1}catch(e){return 1}},a.Iterator=h,a.createIterator=function(e,t,a,n){return new h(e,t,a,n)},a.repeatAndCleanup=function(e,t,a,n){var r,o=0,i=(new Date).getTime();if(e(o),1!=t){var s=t-1;!function l(){o>=t||O.call(window,(function(){var t=(new Date).getTime()-i;r=o;var c=(o=Math.floor(t/a))-r;o+c>s||o>=s?(e(s),n()):(0!==c&&e(o),l())}))}()}else n()},a.isPixelChangingOptionList=function(e,t){var a={};if(e)for(var n=1;n<e.length;n++)a[e[n]]=!0;var r=function(e){for(var t in e)if(e.hasOwnProperty(t)&&!x[t])return!0;return!1};for(var o in t)if(t.hasOwnProperty(o))if("highlightSeriesOpts"==o||a[o]&&!t.series){if(r(t[o]))return!0}else if("series"==o||"axes"==o){var i=t[o];for(var s in i)if(i.hasOwnProperty(s)&&r(i[s]))return!0}else if(!x[o])return!0;return!1},a.detectLineDelimiter=function(e){for(var t=0;t<e.length;t++){var a=e.charAt(t);if("\r"===a)return t+1<e.length&&"\n"===e.charAt(t+1)?"\r\n":a;if("\n"===a)return t+1<e.length&&"\r"===e.charAt(t+1)?"\n\r":a}return null},a.isNodeContainedBy=function(e,t){if(null===t||null===e)return!1;for(var a=e;a&&a!==t;)a=a.parentNode;return a===t},a.pow=p,a.toRGB_=function(e){var t=f(e);if(t)return t;var a=document.createElement("div");a.style.backgroundColor=e,a.style.visibility="hidden",document.body.appendChild(a);var n=window.getComputedStyle(a,null).backgroundColor;return document.body.removeChild(a),f(n)},a.isCanvasSupported=function(e){try{(e||document.createElement("canvas")).getContext("2d")}catch(e){return!1}return!0},a.parseFloat_=function(e,t,a){var n=parseFloat(e);if(!isNaN(n))return n;if(/^ *$/.test(e))return null;if(/^ *nan *$/i.test(e))return NaN;var r="Unable to parse '"+e+"' as a number";return void 0!==a&&void 0!==t&&(r+=" on line "+(1+(t||0))+" ('"+a+"') of CSV."),console.error(r),null},a.numberValueFormatter=g,a.numberAxisLabelFormatter=function(e,t,a){return g.call(this,e,a)},a.dateAxisLabelFormatter=function(e,t,a){var n=a("labelsUTC")?y:_,r=n.getFullYear(e),o=n.getMonth(e),l=n.getDate(e),c=n.getHours(e),u=n.getMinutes(e),d=n.getSeconds(e),h=n.getMilliseconds(e);if(t>=m.Granularity.DECADAL)return""+r;if(t>=m.Granularity.MONTHLY)return k[o]+" "+r;if(0===3600*c+60*u+d+.001*h||t>=m.Granularity.DAILY)return i(l)+" "+k[o];if(t<m.Granularity.SECONDLY){var p=""+h;return i(d)+"."+("000"+p).substring(p.length)}return t>m.Granularity.MINUTELY?s(c,u,d,0):s(c,u,d,h)},a.dateValueFormatter=function(e,t){return l(e,t("labelsUTC"))};var m=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-tickers"));a.LOG_SCALE=10;var b=Math.log(10);a.LN_TEN=b;var v=function(e){return Math.log(e)/b};a.log10=v,a.logRangeFraction=function(e,t,a){var n=v(e),r=n+a*(v(t)-n);return Math.pow(10,r)},a.DOTTED_LINE=[2,2],a.DASHED_LINE=[7,3],a.DOT_DASH_LINE=[7,2,2,2],a.HORIZONTAL=1,a.VERTICAL=2,a.getContext=function(e){return e.getContext("2d")},a.addEvent=function(e,t,a){e.addEventListener(t,a,!1)};var _={getFullYear:function(e){return e.getFullYear()},getMonth:function(e){return e.getMonth()},getDate:function(e){return e.getDate()},getHours:function(e){return e.getHours()},getMinutes:function(e){return e.getMinutes()},getSeconds:function(e){return e.getSeconds()},getMilliseconds:function(e){return e.getMilliseconds()},getDay:function(e){return e.getDay()},makeDate:function(e,t,a,n,r,o,i){return new Date(e,t,a,n,r,o,i)}};a.DateAccessorsLocal=_;var y={getFullYear:function(e){return e.getUTCFullYear()},getMonth:function(e){return e.getUTCMonth()},getDate:function(e){return e.getUTCDate()},getHours:function(e){return e.getUTCHours()},getMinutes:function(e){return e.getUTCMinutes()},getSeconds:function(e){return e.getUTCSeconds()},getMilliseconds:function(e){return e.getUTCMilliseconds()},getDay:function(e){return e.getUTCDay()},makeDate:function(e,t,a,n,r,o,i){return new Date(Date.UTC(e,t,a,n,r,o,i))}};a.DateAccessorsUTC=y,h.prototype.next=function(){if(!this.hasNext)return null;for(var e=this.peek,t=this.nextIdx_+1,a=!1;t<this.end_;){if(!this.predicate_||this.predicate_(this.array_,t)){this.peek=this.array_[t],a=!0;break}t++}return this.nextIdx_=t,a||(this.hasNext=!1,this.peek=null),e};var O=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(e){window.setTimeout(e,1e3/60)};a.requestAnimFrame=O;var x={annotationClickHandler:!0,annotationDblClickHandler:!0,annotationMouseOutHandler:!0,annotationMouseOverHandler:!0,axisLineColor:!0,axisLineWidth:!0,clickCallback:!0,drawCallback:!0,drawHighlightPointCallback:!0,drawPoints:!0,drawPointCallback:!0,drawGrid:!0,fillAlpha:!0,gridLineColor:!0,gridLineWidth:!0,hideOverlayOnMouseOut:!0,highlightCallback:!0,highlightCircleSize:!0,interactionModel:!0,labelsDiv:!0,labelsKMB:!0,labelsKMG2:!0,labelsSeparateLines:!0,labelsShowZeroValues:!0,legend:!0,panEdgeFraction:!0,pixelsPerYLabel:!0,pointClickCallback:!0,pointSize:!0,rangeSelectorPlotFillColor:!0,rangeSelectorPlotFillGradientColor:!0,rangeSelectorPlotStrokeColor:!0,rangeSelectorBackgroundStrokeColor:!0,rangeSelectorBackgroundLineWidth:!0,rangeSelectorPlotLineWidth:!0,rangeSelectorForegroundStrokeColor:!0,rangeSelectorForegroundLineWidth:!0,rangeSelectorAlpha:!0,showLabelsOnHighlight:!0,showRoller:!0,strokeWidth:!0,underlayCallback:!0,unhighlightCallback:!0,zoomCallback:!0},w={DEFAULT:function(e,t,a,n,r,o,i){a.beginPath(),a.fillStyle=o,a.arc(n,r,i,0,2*Math.PI,!1),a.fill()}};a.Circles=w;var E=/^rgba?\((\d{1,3}),\s*(\d{1,3}),\s*(\d{1,3})(?:,\s*([01](?:\.\d+)?))?\)$/,S=["K","M","B","T","Q"],C=["k","M","G","T","P","E","Z","Y"],A=["m","u","n","p","f","a","z","y"],k=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]},{"./dygraph-tickers":16}],18:[function(e,t,a){(function(n){"use strict";function r(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}function o(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(a,"__esModule",{value:!0});var i=function(e,t){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return function(e,t){var a=[],n=!0,r=!1,o=void 0;try{for(var i,s=e[Symbol.iterator]();!(n=(i=s.next()).done)&&(a.push(i.value),!t||a.length!==t);n=!0);}catch(e){r=!0,o=e}finally{try{!n&&s.return&&s.return()}finally{if(r)throw o}}return a}(e,t);throw new TypeError("Invalid attempt to destructure non-iterable instance")},s=o(e("./dygraph-layout")),l=o(e("./dygraph-canvas")),c=o(e("./dygraph-options")),u=o(e("./dygraph-interaction-model")),d=r(e("./dygraph-tickers")),h=r(e("./dygraph-utils")),p=o(e("./dygraph-default-attrs")),f=(o(e("./dygraph-options-reference")),e("./iframe-tarp")),g=o(f),m=o(e("./datahandler/default")),b=o(e("./datahandler/bars-error")),v=o(e("./datahandler/bars-custom")),_=o(e("./datahandler/default-fractions")),y=o(e("./datahandler/bars-fractions")),O=o(e("./datahandler/bars")),x=o(e("./plugins/annotations")),w=o(e("./plugins/axes")),E=o(e("./plugins/chart-labels")),S=o(e("./plugins/grid")),C=o(e("./plugins/legend")),A=o(e("./plugins/range-selector")),k=o(e("./dygraph-gviz")),j=function(e,t,a){this.__init__(e,t,a)};j.NAME="Dygraph",j.VERSION="2.1.0",j.DEFAULT_ROLL_PERIOD=1,j.DEFAULT_WIDTH=480,j.DEFAULT_HEIGHT=320,j.ANIMATION_STEPS=12,j.ANIMATION_DURATION=200,j.Plotters=l.default._Plotters,j.addedAnnotationCSS=!1,j.prototype.__init__=function(e,t,a){if(this.is_initial_draw_=!0,this.readyFns_=[],null!==a&&void 0!==a||(a={}),a=j.copyUserAttrs_(a),"string"==typeof e&&(e=document.getElementById(e)),!e)throw new Error("Constructing dygraph with a non-existent div!");this.maindiv_=e,this.file_=t,this.rollPeriod_=a.rollPeriod||j.DEFAULT_ROLL_PERIOD,this.previousVerticalX_=-1,this.fractions_=a.fractions||!1,this.dateWindow_=a.dateWindow||null,this.annotations_=[],e.innerHTML="",""===e.style.width&&a.width&&(e.style.width=a.width+"px"),""===e.style.height&&a.height&&(e.style.height=a.height+"px"),""===e.style.height&&0===e.clientHeight&&(e.style.height=j.DEFAULT_HEIGHT+"px",""===e.style.width&&(e.style.width=j.DEFAULT_WIDTH+"px")),this.width_=e.clientWidth||a.width||0,this.height_=e.clientHeight||a.height||0,a.stackedGraph&&(a.fillGraph=!0),this.user_attrs_={},h.update(this.user_attrs_,a),this.attrs_={},h.updateDeep(this.attrs_,p.default),this.boundaryIds_=[],this.setIndexByName_={},this.datasetIndex_=[],this.registeredEvents_=[],this.eventListeners_={},this.attributes_=new c.default(this),this.createInterface_(),this.plugins_=[];for(var n=j.PLUGINS.concat(this.getOption("plugins")),r=0;r<n.length;r++){var o,i=n[r],s={plugin:o=void 0!==i.activate?i:new i,events:{},options:{},pluginOptions:{}},l=o.activate(this);for(var u in l)l.hasOwnProperty(u)&&(s.events[u]=l[u]);this.plugins_.push(s)}for(r=0;r<this.plugins_.length;r++){var d=this.plugins_[r];for(var u in d.events)if(d.events.hasOwnProperty(u)){var f=d.events[u],g=[d.plugin,f];u in this.eventListeners_?this.eventListeners_[u].push(g):this.eventListeners_[u]=[g]}}this.createDragInterface_(),this.start_()},j.prototype.cascadeEvents_=function(e,t){if(!(e in this.eventListeners_))return!1;var a={dygraph:this,cancelable:!1,defaultPrevented:!1,preventDefault:function(){if(!a.cancelable)throw"Cannot call preventDefault on non-cancelable event.";a.defaultPrevented=!0},propagationStopped:!1,stopPropagation:function(){a.propagationStopped=!0}};h.update(a,t);var n=this.eventListeners_[e];if(n)for(var r=n.length-1;r>=0;r--){var o=n[r][0];if(n[r][1].call(o,a),a.propagationStopped)break}return a.defaultPrevented},j.prototype.getPluginInstance_=function(e){for(var t=0;t<this.plugins_.length;t++){var a=this.plugins_[t];if(a.plugin instanceof e)return a.plugin}return null},j.prototype.isZoomed=function(e){var t=!!this.dateWindow_;if("x"===e)return t;var a=this.axes_.map((function(e){return!!e.valueRange})).indexOf(!0)>=0;if(null===e||void 0===e)return t||a;if("y"===e)return a;throw new Error("axis parameter is ["+e+"] must be null, 'x' or 'y'.")},j.prototype.toString=function(){var e=this.maindiv_;return"[Dygraph "+(e&&e.id?e.id:e)+"]"},j.prototype.attr_=function(e,t){return t?this.attributes_.getForSeries(e,t):this.attributes_.get(e)},j.prototype.getOption=function(e,t){return this.attr_(e,t)},j.prototype.getNumericOption=function(e,t){return this.getOption(e,t)},j.prototype.getStringOption=function(e,t){return this.getOption(e,t)},j.prototype.getBooleanOption=function(e,t){return this.getOption(e,t)},j.prototype.getFunctionOption=function(e,t){return this.getOption(e,t)},j.prototype.getOptionForAxis=function(e,t){return this.attributes_.getForAxis(e,t)},j.prototype.optionsViewForAxis_=function(e){var t=this;return function(a){var n=t.user_attrs_.axes;return n&&n[e]&&n[e].hasOwnProperty(a)?n[e][a]:("x"!==e||"logscale"!==a)&&(void 0!==t.user_attrs_[a]?t.user_attrs_[a]:(n=t.attrs_.axes)&&n[e]&&n[e].hasOwnProperty(a)?n[e][a]:"y"==e&&t.axes_[0].hasOwnProperty(a)?t.axes_[0][a]:"y2"==e&&t.axes_[1].hasOwnProperty(a)?t.axes_[1][a]:t.attr_(a))}},j.prototype.rollPeriod=function(){return this.rollPeriod_},j.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},j.prototype.xAxisExtremes=function(){var e=this.getNumericOption("xRangePad")/this.plotter_.area.w;if(0===this.numRows())return[0-e,1+e];var t=this.rawData_[0][0],a=this.rawData_[this.rawData_.length-1][0];if(e){var n=a-t;t-=n*e,a+=n*e}return[t,a]},j.prototype.yAxisExtremes=function(){var e=this.gatherDatasets_(this.rolledSeries_,null).extremes,t=this.axes_;this.computeYAxisRanges_(e);var a=this.axes_;return this.axes_=t,a.map((function(e){return e.extremeRange}))},j.prototype.yAxisRange=function(e){if(void 0===e&&(e=0),e<0||e>=this.axes_.length)return null;var t=this.axes_[e];return[t.computedValueRange[0],t.computedValueRange[1]]},j.prototype.yAxisRanges=function(){for(var e=[],t=0;t<this.axes_.length;t++)e.push(this.yAxisRange(t));return e},j.prototype.toDomCoords=function(e,t,a){return[this.toDomXCoord(e),this.toDomYCoord(t,a)]},j.prototype.toDomXCoord=function(e){if(null===e)return null;var t=this.plotter_.area,a=this.xAxisRange();return t.x+(e-a[0])/(a[1]-a[0])*t.w},j.prototype.toDomYCoord=function(e,t){var a=this.toPercentYCoord(e,t);if(null===a)return null;var n=this.plotter_.area;return n.y+a*n.h},j.prototype.toDataCoords=function(e,t,a){return[this.toDataXCoord(e),this.toDataYCoord(t,a)]},j.prototype.toDataXCoord=function(e){if(null===e)return null;var t=this.plotter_.area,a=this.xAxisRange();if(this.attributes_.getForAxis("logscale","x")){var n=(e-t.x)/t.w;return h.logRangeFraction(a[0],a[1],n)}return a[0]+(e-t.x)/t.w*(a[1]-a[0])},j.prototype.toDataYCoord=function(e,t){if(null===e)return null;var a=this.plotter_.area,n=this.yAxisRange(t);if(void 0===t&&(t=0),this.attributes_.getForAxis("logscale",t)){var r=(e-a.y)/a.h;return h.logRangeFraction(n[1],n[0],r)}return n[0]+(a.y+a.h-e)/a.h*(n[1]-n[0])},j.prototype.toPercentYCoord=function(e,t){if(null===e)return null;void 0===t&&(t=0);var a,n=this.yAxisRange(t);if(this.attributes_.getForAxis("logscale",t)){var r=h.log10(n[0]),o=h.log10(n[1]);a=(o-h.log10(e))/(o-r)}else a=(n[1]-e)/(n[1]-n[0]);return a},j.prototype.toPercentXCoord=function(e){if(null===e)return null;var t,a=this.xAxisRange();if(!0===this.attributes_.getForAxis("logscale","x")){var n=h.log10(a[0]),r=h.log10(a[1]);t=(h.log10(e)-n)/(r-n)}else t=(e-a[0])/(a[1]-a[0]);return t},j.prototype.numColumns=function(){return this.rawData_?this.rawData_[0]?this.rawData_[0].length:this.attr_("labels").length:0},j.prototype.numRows=function(){return this.rawData_?this.rawData_.length:0},j.prototype.getValue=function(e,t){return e<0||e>this.rawData_.length?null:t<0||t>this.rawData_[e].length?null:this.rawData_[e][t]},j.prototype.createInterface_=function(){var e=this.maindiv_;this.graphDiv=document.createElement("div"),this.graphDiv.style.textAlign="left",this.graphDiv.style.position="relative",e.appendChild(this.graphDiv),this.canvas_=h.createCanvas(),this.canvas_.style.position="absolute",this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=h.getContext(this.canvas_),this.hidden_ctx_=h.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new s.default(this);var t=this;this.mouseMoveHandler_=function(e){t.mouseMove_(e)},this.mouseOutHandler_=function(e){var a=e.target||e.fromElement,n=e.relatedTarget||e.toElement;h.isNodeContainedBy(a,t.graphDiv)&&!h.isNodeContainedBy(n,t.graphDiv)&&t.mouseOut_(e)},this.addAndTrackEvent(window,"mouseout",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),this.resizeHandler_||(this.resizeHandler_=function(e){t.resize()},this.addAndTrackEvent(window,"resize",this.resizeHandler_))},j.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+"px",this.graphDiv.style.height=this.height_+"px";var e=this.getNumericOption("pixelRatio"),t=e||h.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*t,this.canvas_.height=this.height_*t,this.canvas_.style.width=this.width_+"px",this.canvas_.style.height=this.height_+"px",1!==t&&this.canvas_ctx_.scale(t,t);var a=e||h.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*a,this.hidden_.height=this.height_*a,this.hidden_.style.width=this.width_+"px",this.hidden_.style.height=this.height_+"px",1!==a&&this.hidden_ctx_.scale(a,a)},j.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var e=this.plugins_.length-1;e>=0;e--){var t=this.plugins_.pop();t.plugin.destroy&&t.plugin.destroy()}this.removeTrackedEvents_(),h.removeEvent(window,"mouseout",this.mouseOutHandler_),h.removeEvent(this.mouseEventElement_,"mousemove",this.mouseMoveHandler_),h.removeEvent(window,"resize",this.resizeHandler_),this.resizeHandler_=null,function e(t){for(;t.hasChildNodes();)e(t.firstChild),t.removeChild(t.firstChild)}(this.maindiv_);var a=function(e){for(var t in e)"object"==typeof e[t]&&(e[t]=null)};a(this.layout_),a(this.plotter_),a(this)},j.prototype.createPlotKitCanvas_=function(e){var t=h.createCanvas();return t.style.position="absolute",t.style.top=e.style.top,t.style.left=e.style.left,t.width=this.width_,t.height=this.height_,t.style.width=this.width_+"px",t.style.height=this.height_+"px",t},j.prototype.createMouseEventElement_=function(){return this.canvas_},j.prototype.setColors_=function(){var e=this.getLabels(),t=e.length-1;this.colors_=[],this.colorsMap_={};for(var a=this.getNumericOption("colorSaturation")||1,n=this.getNumericOption("colorValue")||.5,r=Math.ceil(t/2),o=this.getOption("colors"),i=this.visibility(),s=0;s<t;s++)if(i[s]){var l=e[s+1],c=this.attributes_.getForSeries("color",l);if(!c)if(o)c=o[s%o.length];else{var u=1*(s%2?r+(s+1)/2:Math.ceil((s+1)/2))/(1+t);c=h.hsvToRGB(u,a,n)}this.colors_.push(c),this.colorsMap_[l]=c}},j.prototype.getColors=function(){return this.colors_},j.prototype.getPropertiesForSeries=function(e){for(var t=-1,a=this.getLabels(),n=1;n<a.length;n++)if(a[n]==e){t=n;break}return-1==t?null:{name:e,column:t,visible:this.visibility()[t-1],color:this.colorsMap_[e],axis:1+this.attributes_.axisForSeries(e)}},j.prototype.createRollInterface_=function(){var e=this,t=this.roller_;t||(this.roller_=t=document.createElement("input"),t.type="text",t.style.display="none",t.className="dygraph-roller",this.graphDiv.appendChild(t));var a=this.getBooleanOption("showRoller")?"block":"none",n=this.getArea(),r={top:n.y+n.h-25+"px",left:n.x+1+"px",display:a};t.size="2",t.value=this.rollPeriod_,h.update(t.style,r),t.onchange=function(){return e.adjustRoll(t.value)}},j.prototype.createDragInterface_=function(){var e={isZooming:!1,isPanning:!1,is2DPan:!1,dragStartX:null,dragStartY:null,dragEndX:null,dragEndY:null,dragDirection:null,prevEndX:null,prevEndY:null,prevDragDirection:null,cancelNextDblclick:!1,initialLeftmostDate:null,xUnitsPerPixel:null,dateRange:null,px:0,py:0,boundedDates:null,boundedValues:null,tarp:new g.default,initializeMouseDown:function(e,t,a){e.preventDefault?e.preventDefault():(e.returnValue=!1,e.cancelBubble=!0);var n=h.findPos(t.canvas_);a.px=n.x,a.py=n.y,a.dragStartX=h.dragGetX_(e,a),a.dragStartY=h.dragGetY_(e,a),a.cancelNextDblclick=!1,a.tarp.cover()},destroy:function(){var e=this;if((e.isZooming||e.isPanning)&&(e.isZooming=!1,e.dragStartX=null,e.dragStartY=null),e.isPanning){e.isPanning=!1,e.draggingDate=null,e.dateRange=null;for(var t=0;t<a.axes_.length;t++)delete a.axes_[t].draggingValue,delete a.axes_[t].dragValueRange}e.tarp.uncover()}},t=this.getOption("interactionModel"),a=this;for(var n in t)t.hasOwnProperty(n)&&this.addAndTrackEvent(this.mouseEventElement_,n,function(t){return function(n){t(n,a,e)}}(t[n]));t.willDestroyContextMyself||this.addAndTrackEvent(document,"mouseup",(function(t){e.destroy()}))},j.prototype.drawZoomRect_=function(e,t,a,n,r,o,i,s){var l=this.canvas_ctx_;o==h.HORIZONTAL?l.clearRect(Math.min(t,i),this.layout_.getPlotArea().y,Math.abs(t-i),this.layout_.getPlotArea().h):o==h.VERTICAL&&l.clearRect(this.layout_.getPlotArea().x,Math.min(n,s),this.layout_.getPlotArea().w,Math.abs(n-s)),e==h.HORIZONTAL?a&&t&&(l.fillStyle="rgba(128,128,128,0.33)",l.fillRect(Math.min(t,a),this.layout_.getPlotArea().y,Math.abs(a-t),this.layout_.getPlotArea().h)):e==h.VERTICAL&&r&&n&&(l.fillStyle="rgba(128,128,128,0.33)",l.fillRect(this.layout_.getPlotArea().x,Math.min(n,r),this.layout_.getPlotArea().w,Math.abs(r-n)))},j.prototype.clearZoomRect_=function(){this.currentZoomRectArgs_=null,this.canvas_ctx_.clearRect(0,0,this.width_,this.height_)},j.prototype.doZoomX_=function(e,t){this.currentZoomRectArgs_=null;var a=this.toDataXCoord(e),n=this.toDataXCoord(t);this.doZoomXDates_(a,n)},j.prototype.doZoomXDates_=function(e,t){var a=this,n=this.xAxisRange(),r=[e,t],o=this.getFunctionOption("zoomCallback");this.doAnimatedZoom(n,r,null,null,(function(){o&&o.call(a,e,t,a.yAxisRanges())}))},j.prototype.doZoomY_=function(e,t){var a=this;this.currentZoomRectArgs_=null;for(var n=this.yAxisRanges(),r=[],o=0;o<this.axes_.length;o++){var s=this.toDataYCoord(e,o),l=this.toDataYCoord(t,o);r.push([l,s])}var c=this.getFunctionOption("zoomCallback");this.doAnimatedZoom(null,null,n,r,(function(){if(c){var e=a.xAxisRange(),t=i(e,2),n=t[0],r=t[1];c.call(a,n,r,a.yAxisRanges())}}))},j.zoomAnimationFunction=function(e,t){return(1-Math.pow(1.5,-e))/(1-Math.pow(1.5,-t))},j.prototype.resetZoom=function(){var e=this,t=this.isZoomed("x"),a=this.isZoomed("y"),n=t||a;if(this.clearSelection(),n){var r=this.xAxisExtremes(),o=i(r,2),s=o[0],l=o[1],c=this.getBooleanOption("animatedZooms"),u=this.getFunctionOption("zoomCallback");if(!c)return this.dateWindow_=null,this.axes_.forEach((function(e){e.valueRange&&delete e.valueRange})),this.drawGraph_(),void(u&&u.call(this,s,l,this.yAxisRanges()));var d=null,h=null,p=null,f=null;t&&(d=this.xAxisRange(),h=[s,l]),a&&(p=this.yAxisRanges(),f=this.yAxisExtremes()),this.doAnimatedZoom(d,h,p,f,(function(){e.dateWindow_=null,e.axes_.forEach((function(e){e.valueRange&&delete e.valueRange})),u&&u.call(e,s,l,e.yAxisRanges())}))}},j.prototype.doAnimatedZoom=function(e,t,a,n,r){var o,i,s=this,l=this.getBooleanOption("animatedZooms")?j.ANIMATION_STEPS:1,c=[],u=[];if(null!==e&&null!==t)for(o=1;o<=l;o++)i=j.zoomAnimationFunction(o,l),c[o-1]=[e[0]*(1-i)+i*t[0],e[1]*(1-i)+i*t[1]];if(null!==a&&null!==n)for(o=1;o<=l;o++){i=j.zoomAnimationFunction(o,l);for(var d=[],p=0;p<this.axes_.length;p++)d.push([a[p][0]*(1-i)+i*n[p][0],a[p][1]*(1-i)+i*n[p][1]]);u[o-1]=d}h.repeatAndCleanup((function(e){if(u.length)for(var t=0;t<s.axes_.length;t++){var a=u[e][t];s.axes_[t].valueRange=[a[0],a[1]]}c.length&&(s.dateWindow_=c[e]),s.drawGraph_()}),l,j.ANIMATION_DURATION/l,r)},j.prototype.getArea=function(){return this.plotter_.area},j.prototype.eventToDomCoords=function(e){if(e.offsetX&&e.offsetY)return[e.offsetX,e.offsetY];var t=h.findPos(this.mouseEventElement_);return[h.pageX(e)-t.x,h.pageY(e)-t.y]},j.prototype.findClosestRow=function(e){for(var t=1/0,a=-1,n=this.layout_.points,r=0;r<n.length;r++)for(var o=n[r],i=o.length,s=0;s<i;s++){var l=o[s];if(h.isValidPoint(l,!0)){var c=Math.abs(l.canvasx-e);c<t&&(t=c,a=l.idx)}}return a},j.prototype.findClosestPoint=function(e,t){for(var a,n,r,o,i,s,l,c=1/0,u=this.layout_.points.length-1;u>=0;--u)for(var d=this.layout_.points[u],p=0;p<d.length;++p)o=d[p],h.isValidPoint(o)&&(a=(n=o.canvasx-e)*n+(r=o.canvasy-t)*r)<c&&(c=a,i=o,s=u,l=o.idx);return{row:l,seriesName:this.layout_.setNames[s],point:i}},j.prototype.findStackedPoint=function(e,t){for(var a,n,r=this.findClosestRow(e),o=0;o<this.layout_.points.length;++o){var i=r-this.getLeftBoundary_(o),s=this.layout_.points[o];if(!(i>=s.length)){var l=s[i];if(h.isValidPoint(l)){var c=l.canvasy;if(e>l.canvasx&&i+1<s.length){var u=s[i+1];h.isValidPoint(u)&&(d=u.canvasx-l.canvasx)>0&&(c+=(e-l.canvasx)/d*(u.canvasy-l.canvasy))}else if(e<l.canvasx&&i>0){var d,p=s[i-1];h.isValidPoint(p)&&(d=l.canvasx-p.canvasx)>0&&(c+=(l.canvasx-e)/d*(p.canvasy-l.canvasy))}(0===o||c<t)&&(a=l,n=o)}}}return{row:r,seriesName:this.layout_.setNames[n],point:a}},j.prototype.mouseMove_=function(e){var t=this.layout_.points;if(void 0!==t&&null!==t){var a=this.eventToDomCoords(e),n=a[0],r=a[1],o=!1;if(this.getOption("highlightSeriesOpts")&&!this.isSeriesLocked()){var i;i=this.getBooleanOption("stackedGraph")?this.findStackedPoint(n,r):this.findClosestPoint(n,r),o=this.setSelection(i.row,i.seriesName)}else{var s=this.findClosestRow(n);o=this.setSelection(s)}var l=this.getFunctionOption("highlightCallback");l&&o&&l.call(this,e,this.lastx_,this.selPoints_,this.lastRow_,this.highlightSet_)}},j.prototype.getLeftBoundary_=function(e){if(this.boundaryIds_[e])return this.boundaryIds_[e][0];for(var t=0;t<this.boundaryIds_.length;t++)if(void 0!==this.boundaryIds_[t])return this.boundaryIds_[t][0];return 0},j.prototype.animateSelection_=function(e){void 0===this.fadeLevel&&(this.fadeLevel=0),void 0===this.animateId&&(this.animateId=0);var t=this.fadeLevel,a=e<0?t:10-t;if(a<=0)this.fadeLevel&&this.updateSelection_(1);else{var n=++this.animateId,r=this;h.repeatAndCleanup((function(t){r.animateId==n&&(r.fadeLevel+=e,0===r.fadeLevel?r.clearSelection():r.updateSelection_(r.fadeLevel/10))}),a,30,(function(){0!==r.fadeLevel&&e<0&&(r.fadeLevel=0,r.clearSelection())}))}},j.prototype.updateSelection_=function(e){this.cascadeEvents_("select",{selectedRow:-1===this.lastRow_?void 0:this.lastRow_,selectedX:-1===this.lastx_?void 0:this.lastx_,selectedPoints:this.selPoints_});var t,a=this.canvas_ctx_;if(this.getOption("highlightSeriesOpts")){a.clearRect(0,0,this.width_,this.height_);var n=1-this.getNumericOption("highlightSeriesBackgroundAlpha"),r=h.toRGB_(this.getOption("highlightSeriesBackgroundColor"));if(n){if(void 0===e)return void this.animateSelection_(1);n*=e,a.fillStyle="rgba("+r.r+","+r.g+","+r.b+","+n+")",a.fillRect(0,0,this.width_,this.height_)}this.plotter_._renderLineChart(this.highlightSet_,a)}else if(this.previousVerticalX_>=0){var o=0,i=this.attr_("labels");for(t=1;t<i.length;t++){var s=this.getNumericOption("highlightCircleSize",i[t]);s>o&&(o=s)}var l=this.previousVerticalX_;a.clearRect(l-o-1,0,2*o+2,this.height_)}if(this.selPoints_.length>0){var c=this.selPoints_[0].canvasx;for(a.save(),t=0;t<this.selPoints_.length;t++){var u=this.selPoints_[t];if(!isNaN(u.canvasy)){var d=this.getNumericOption("highlightCircleSize",u.name),p=this.getFunctionOption("drawHighlightPointCallback",u.name),f=this.plotter_.colors[u.name];p||(p=h.Circles.DEFAULT),a.lineWidth=this.getNumericOption("strokeWidth",u.name),a.strokeStyle=f,a.fillStyle=f,p.call(this,this,u.name,a,c,u.canvasy,f,d,u.idx)}}a.restore(),this.previousVerticalX_=c}},j.prototype.setSelection=function(e,t,a){this.selPoints_=[];var n=!1;if(!1!==e&&e>=0){e!=this.lastRow_&&(n=!0),this.lastRow_=e;for(var r=0;r<this.layout_.points.length;++r){var o=this.layout_.points[r],i=e-this.getLeftBoundary_(r);if(i>=0&&i<o.length&&o[i].idx==e)null!==(l=o[i]).yval&&this.selPoints_.push(l);else for(var s=0;s<o.length;++s){var l;if((l=o[s]).idx==e){null!==l.yval&&this.selPoints_.push(l);break}}}}else this.lastRow_>=0&&(n=!0),this.lastRow_=-1;return this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=-1,void 0!==t&&(this.highlightSet_!==t&&(n=!0),this.highlightSet_=t),void 0!==a&&(this.lockedSet_=a),n&&this.updateSelection_(void 0),n},j.prototype.mouseOut_=function(e){this.getFunctionOption("unhighlightCallback")&&this.getFunctionOption("unhighlightCallback").call(this,e),this.getBooleanOption("hideOverlayOnMouseOut")&&!this.lockedSet_&&this.clearSelection()},j.prototype.clearSelection=function(){this.cascadeEvents_("deselect",{}),this.lockedSet_=!1,this.fadeLevel?this.animateSelection_(-1):(this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=-1,this.lastRow_=-1,this.highlightSet_=null)},j.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var e=0;e<this.layout_.points.length;e++)for(var t=this.layout_.points[e],a=0;a<t.length;a++)if(t[a].x==this.selPoints_[0].x)return t[a].idx;return-1},j.prototype.getHighlightSeries=function(){return this.highlightSet_},j.prototype.isSeriesLocked=function(){return this.lockedSet_},j.prototype.loadedEvent_=function(e){this.rawData_=this.parseCSV_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_()},j.prototype.addXTicks_=function(){var e;e=this.dateWindow_?[this.dateWindow_[0],this.dateWindow_[1]]:this.xAxisExtremes();var t=this.optionsViewForAxis_("x"),a=t("ticker")(e[0],e[1],this.plotter_.area.w,t,this);this.layout_.setXTicks(a)},j.prototype.getHandlerClass_=function(){return this.attr_("dataHandler")?this.attr_("dataHandler"):this.fractions_?this.getBooleanOption("errorBars")?y.default:_.default:this.getBooleanOption("customBars")?v.default:this.getBooleanOption("errorBars")?b.default:m.default},j.prototype.predraw_=function(){var e=new Date;this.dataHandler_=new(this.getHandlerClass_()),this.layout_.computePlotArea(),this.computeYAxes_(),this.is_initial_draw_||(this.canvas_ctx_.restore(),this.hidden_ctx_.restore()),this.canvas_ctx_.save(),this.hidden_ctx_.save(),this.plotter_=new l.default(this,this.hidden_,this.hidden_ctx_,this.layout_),this.createRollInterface_(),this.cascadeEvents_("predraw"),this.rolledSeries_=[null];for(var t=1;t<this.numColumns();t++){var a=this.dataHandler_.extractSeries(this.rawData_,t,this.attributes_);this.rollPeriod_>1&&(a=this.dataHandler_.rollingAverage(a,this.rollPeriod_,this.attributes_)),this.rolledSeries_.push(a)}this.drawGraph_();var n=new Date;this.drawingTimeMs_=n-e},j.PointType=void 0,j.stackPoints_=function(e,t,a,n){for(var r=null,o=null,i=null,s=-1,l=0;l<e.length;++l){var c=e[l],u=c.xval;void 0===t[u]&&(t[u]=0);var d=c.yval;isNaN(d)||null===d?"none"==n?d=0:(function(t){if(!(s>=t))for(var a=t;a<e.length;++a)if(i=null,!isNaN(e[a].yval)&&null!==e[a].yval){s=a,i=e[a];break}}(l),d=o&&i&&"none"!=n?o.yval+(i.yval-o.yval)*((u-o.xval)/(i.xval-o.xval)):o&&"all"==n?o.yval:i&&"all"==n?i.yval:0):o=c;var h=t[u];r!=u&&(h+=d,t[u]=h),r=u,c.yval_stacked=h,h>a[1]&&(a[1]=h),h<a[0]&&(a[0]=h)}},j.prototype.gatherDatasets_=function(e,t){var a,n,r,o,i,s,l=[],c=[],u=[],d={};for(a=e.length-1;a>=1;a--)if(this.visibility()[a-1]){if(t){s=e[a];var h=t[0],p=t[1];for(r=null,o=null,n=0;n<s.length;n++)s[n][0]>=h&&null===r&&(r=n),s[n][0]<=p&&(o=n);null===r&&(r=0);for(var f=r,g=!0;g&&f>0;)g=null===s[--f][1];null===o&&(o=s.length-1);var m=o;for(g=!0;g&&m<s.length-1;)g=null===s[++m][1];f!==r&&(r=f),m!==o&&(o=m),l[a-1]=[r,o],s=s.slice(r,o+1)}else s=e[a],l[a-1]=[0,s.length-1];var b=this.attr_("labels")[a],v=this.dataHandler_.getExtremeYValues(s,t,this.getBooleanOption("stepPlot",b)),_=this.dataHandler_.seriesToPoints(s,b,l[a-1][0]);this.getBooleanOption("stackedGraph")&&(void 0===u[i=this.attributes_.axisForSeries(b)]&&(u[i]=[]),j.stackPoints_(_,u[i],v,this.getBooleanOption("stackedGraphNaNFill"))),d[b]=v,c[a]=_}return{points:c,extremes:d,boundaryIds:l}},j.prototype.drawGraph_=function(){var e=new Date,t=this.is_initial_draw_;this.is_initial_draw_=!1,this.layout_.removeAllDatasets(),this.setColors_(),this.attrs_.pointSize=.5*this.getNumericOption("highlightCircleSize");var a=this.gatherDatasets_(this.rolledSeries_,this.dateWindow_),n=a.points,r=a.extremes;this.boundaryIds_=a.boundaryIds,this.setIndexByName_={};for(var o=this.attr_("labels"),i=0,s=1;s<n.length;s++)this.visibility()[s-1]&&(this.layout_.addDataset(o[s],n[s]),this.datasetIndex_[s]=i++);for(s=0;s<o.length;s++)this.setIndexByName_[o[s]]=s;if(this.computeYAxisRanges_(r),this.layout_.setYAxes(this.axes_),this.addXTicks_(),this.layout_.evaluate(),this.renderGraph_(t),this.getStringOption("timingName")){var l=new Date;console.log(this.getStringOption("timingName")+" - drawGraph: "+(l-e)+"ms")}},j.prototype.renderGraph_=function(e){this.cascadeEvents_("clearChart"),this.plotter_.clear();var t=this.getFunctionOption("underlayCallback");t&&t.call(this,this.hidden_ctx_,this.layout_.getPlotArea(),this,this);var a={canvas:this.hidden_,drawingContext:this.hidden_ctx_};this.cascadeEvents_("willDrawChart",a),this.plotter_.render(),this.cascadeEvents_("didDrawChart",a),this.lastRow_=-1,this.canvas_.getContext("2d").clearRect(0,0,this.width_,this.height_);var n=this.getFunctionOption("drawCallback");if(null!==n&&n.call(this,this,e),e)for(this.readyFired_=!0;this.readyFns_.length>0;)this.readyFns_.pop()(this)},j.prototype.computeYAxes_=function(){var e,t,a;for(this.axes_=[],e=0;e<this.attributes_.numAxes();e++)t={g:this},h.update(t,this.attributes_.axisOptions(e)),this.axes_[e]=t;for(e=0;e<this.axes_.length;e++)if(0===e)(a=(t=this.optionsViewForAxis_("y"+(e?"2":"")))("valueRange"))&&(this.axes_[e].valueRange=a);else{var n=this.user_attrs_.axes;n&&n.y2&&(a=n.y2.valueRange)&&(this.axes_[e].valueRange=a)}},j.prototype.numAxes=function(){return this.attributes_.numAxes()},j.prototype.axisPropertiesForSeries=function(e){return this.axes_[this.attributes_.axisForSeries(e)]},j.prototype.computeYAxisRanges_=function(e){for(var t,a,n,r,o,i=function(e){return isNaN(parseFloat(e))},s=this.attributes_.numAxes(),l=0;l<s;l++){var c=this.axes_[l],u=this.attributes_.getForAxis("logscale",l),d=this.attributes_.getForAxis("includeZero",l),p=this.attributes_.getForAxis("independentTicks",l);n=this.attributes_.seriesForAxis(l),t=!0,r=.1;var f=this.getNumericOption("yRangePad");if(null!==f&&(t=!1,r=f/this.plotter_.area.h),0===n.length)c.extremeRange=[0,1];else{for(var g,m,b=1/0,v=-1/0,_=0;_<n.length;_++)e.hasOwnProperty(n[_])&&(null!==(g=e[n[_]][0])&&(b=Math.min(g,b)),null!==(m=e[n[_]][1])&&(v=Math.max(m,v)));d&&!u&&(b>0&&(b=0),v<0&&(v=0)),b==1/0&&(b=0),v==-1/0&&(v=1),0===(a=v-b)&&(0!==v?a=Math.abs(v):(v=1,a=1));var y=v,O=b;t&&(u?(y=v+r*a,O=b):((O=b-r*a)<0&&b>=0&&(O=0),(y=v+r*a)>0&&v<=0&&(y=0))),c.extremeRange=[O,y]}if(c.valueRange){var x=i(c.valueRange[0])?c.extremeRange[0]:c.valueRange[0],w=i(c.valueRange[1])?c.extremeRange[1]:c.valueRange[1];c.computedValueRange=[x,w]}else c.computedValueRange=c.extremeRange;if(!t)if((x=c.computedValueRange[0])===(w=c.computedValueRange[1])&&(x-=.5,w+=.5),u){var E=r/(2*r-1),S=(r-1)/(2*r-1);c.computedValueRange[0]=h.logRangeFraction(x,w,E),c.computedValueRange[1]=h.logRangeFraction(x,w,S)}else a=w-x,c.computedValueRange[0]=x-a*r,c.computedValueRange[1]=w+a*r;if(p){c.independentTicks=p;var C=(A=this.optionsViewForAxis_("y"+(l?"2":"")))("ticker");c.ticks=C(c.computedValueRange[0],c.computedValueRange[1],this.plotter_.area.h,A,this),o||(o=c)}}if(void 0===o)throw'Configuration Error: At least one axis has to have the "independentTicks" option activated.';for(l=0;l<s;l++)if(!(c=this.axes_[l]).independentTicks){C=(A=this.optionsViewForAxis_("y"+(l?"2":"")))("ticker");for(var A,k=o.ticks,j=o.computedValueRange[1]-o.computedValueRange[0],T=c.computedValueRange[1]-c.computedValueRange[0],D=[],P=0;P<k.length;P++){var M=(k[P].v-o.computedValueRange[0])/j,L=c.computedValueRange[0]+M*T;D.push(L)}c.ticks=C(c.computedValueRange[0],c.computedValueRange[1],this.plotter_.area.h,A,this,D)}},j.prototype.detectTypeFromString_=function(e){var t=!1,a=e.indexOf("-");a>0&&"e"!=e[a-1]&&"E"!=e[a-1]||e.indexOf("/")>=0||isNaN(parseFloat(e))?t=!0:8==e.length&&e>"19700101"&&e<"20371231"&&(t=!0),this.setXAxisOptions_(t)},j.prototype.setXAxisOptions_=function(e){e?(this.attrs_.xValueParser=h.dateParser,this.attrs_.axes.x.valueFormatter=h.dateValueFormatter,this.attrs_.axes.x.ticker=d.dateTicker,this.attrs_.axes.x.axisLabelFormatter=h.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=d.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},j.prototype.parseCSV_=function(e){var t,a,n=[],r=h.detectLineDelimiter(e),o=e.split(r||"\n"),i=this.getStringOption("delimiter");-1==o[0].indexOf(i)&&o[0].indexOf("\t")>=0&&(i="\t");var s=0;"labels"in this.user_attrs_||(s=1,this.attrs_.labels=o[0].split(i),this.attributes_.reparseSeries());for(var l,c=!1,u=this.attr_("labels").length,d=!1,p=s;p<o.length;p++){var f=o[p];if(0!==f.length&&"#"!=f[0]){var g=f.split(i);if(!(g.length<2)){var m=[];if(c||(this.detectTypeFromString_(g[0]),l=this.getFunctionOption("xValueParser"),c=!0),m[0]=l(g[0],this),this.fractions_)for(a=1;a<g.length;a++)2!=(t=g[a].split("/")).length?(console.error('Expected fractional "num/den" values in CSV data but found a value \''+g[a]+"' on line "+(1+p)+" ('"+f+"') which is not of this form."),m[a]=[0,0]):m[a]=[h.parseFloat_(t[0],p,f),h.parseFloat_(t[1],p,f)];else if(this.getBooleanOption("errorBars"))for(g.length%2!=1&&console.error("Expected alternating (value, stdev.) pairs in CSV data but line "+(1+p)+" has an odd number of values ("+(g.length-1)+"): '"+f+"'"),a=1;a<g.length;a+=2)m[(a+1)/2]=[h.parseFloat_(g[a],p,f),h.parseFloat_(g[a+1],p,f)];else if(this.getBooleanOption("customBars"))for(a=1;a<g.length;a++){var b=g[a];/^ *$/.test(b)?m[a]=[null,null,null]:3==(t=b.split(";")).length?m[a]=[h.parseFloat_(t[0],p,f),h.parseFloat_(t[1],p,f),h.parseFloat_(t[2],p,f)]:console.warn('When using customBars, values must be either blank or "low;center;high" tuples (got "'+b+'" on line '+(1+p))}else for(a=1;a<g.length;a++)m[a]=h.parseFloat_(g[a],p,f);if(n.length>0&&m[0]<n[n.length-1][0]&&(d=!0),m.length!=u&&console.error("Number of columns in line "+p+" ("+m.length+") does not agree with number of labels ("+u+") "+f),0===p&&this.attr_("labels")){var v=!0;for(a=0;v&&a<m.length;a++)m[a]&&(v=!1);if(v){console.warn("The dygraphs 'labels' option is set, but the first row of CSV data ('"+f+"') appears to also contain labels. Will drop the CSV labels and use the option labels.");continue}}n.push(m)}}}return d&&(console.warn("CSV is out of order; order it correctly to speed loading."),n.sort((function(e,t){return e[0]-t[0]}))),n},j.prototype.parseArray_=function(e){if(0===e.length)return console.error("Can't plot empty data set"),null;if(0===e[0].length)return console.error("Data set cannot contain an empty row"),null;var t;if(function(e){var t=e[0],a=t[0];if("number"!=typeof a&&!h.isDateLike(a))throw new Error("Expected number or date but got "+typeof a+": "+a+".");for(var n=1;n<t.length;n++){var r=t[n];if(null!==r&&void 0!==r&&"number"!=typeof r&&!h.isArrayLike(r))throw new Error("Expected number or array but got "+typeof r+": "+r+".")}}(e),null===this.attr_("labels")){for(console.warn("Using default labels. Set labels explicitly via 'labels' in the options parameter"),this.attrs_.labels=["X"],t=1;t<e[0].length;t++)this.attrs_.labels.push("Y"+t);this.attributes_.reparseSeries()}else{var a=this.attr_("labels");if(a.length!=e[0].length)return console.error("Mismatch between number of labels ("+a+") and number of columns in array ("+e[0].length+")"),null}if(h.isDateLike(e[0][0])){this.attrs_.axes.x.valueFormatter=h.dateValueFormatter,this.attrs_.axes.x.ticker=d.dateTicker,this.attrs_.axes.x.axisLabelFormatter=h.dateAxisLabelFormatter;var n=h.clone(e);for(t=0;t<e.length;t++){if(0===n[t].length)return console.error("Row "+(1+t)+" of data is empty"),null;if(null===n[t][0]||"function"!=typeof n[t][0].getTime||isNaN(n[t][0].getTime()))return console.error("x value in row "+(1+t)+" is not a Date"),null;n[t][0]=n[t][0].getTime()}return n}return this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=d.numericTicks,this.attrs_.axes.x.axisLabelFormatter=h.numberAxisLabelFormatter,e},j.prototype.parseDataTable_=function(e){var t=e.getNumberOfColumns(),a=e.getNumberOfRows(),n=e.getColumnType(0);if("date"==n||"datetime"==n)this.attrs_.xValueParser=h.dateParser,this.attrs_.axes.x.valueFormatter=h.dateValueFormatter,this.attrs_.axes.x.ticker=d.dateTicker,this.attrs_.axes.x.axisLabelFormatter=h.dateAxisLabelFormatter;else{if("number"!=n)throw new Error("only 'date', 'datetime' and 'number' types are supported for column 1 of DataTable input (Got '"+n+"')");this.attrs_.xValueParser=function(e){return parseFloat(e)},this.attrs_.axes.x.valueFormatter=function(e){return e},this.attrs_.axes.x.ticker=d.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter}var r,o,i=[],s={},l=!1;for(r=1;r<t;r++){var c=e.getColumnType(r);if("number"==c)i.push(r);else{if("string"!=c||!this.getBooleanOption("displayAnnotations"))throw new Error("Only 'number' is supported as a dependent type with Gviz. 'string' is only supported if displayAnnotations is true");var u=i[i.length-1];s.hasOwnProperty(u)?s[u].push(r):s[u]=[r],l=!0}}var p=[e.getColumnLabel(0)];for(r=0;r<i.length;r++)p.push(e.getColumnLabel(i[r])),this.getBooleanOption("errorBars")&&(r+=1);this.attrs_.labels=p,t=p.length;var f=[],g=!1,m=[];for(r=0;r<a;r++){var b=[];if(void 0!==e.getValue(r,0)&&null!==e.getValue(r,0)){if("date"==n||"datetime"==n?b.push(e.getValue(r,0).getTime()):b.push(e.getValue(r,0)),this.getBooleanOption("errorBars"))for(o=0;o<t-1;o++)b.push([e.getValue(r,1+2*o),e.getValue(r,2+2*o)]);else{for(o=0;o<i.length;o++){var v=i[o];if(b.push(e.getValue(r,v)),l&&s.hasOwnProperty(v)&&null!==e.getValue(r,s[v][0])){var _={};_.series=e.getColumnLabel(v),_.xval=b[0],_.shortText=function(e){var t=String.fromCharCode(65+e%26);for(e=Math.floor(e/26);e>0;)t=String.fromCharCode(65+(e-1)%26)+t.toLowerCase(),e=Math.floor((e-1)/26);return t}(m.length),_.text="";for(var y=0;y<s[v].length;y++)y&&(_.text+="\n"),_.text+=e.getValue(r,s[v][y]);m.push(_)}}for(o=0;o<b.length;o++)isFinite(b[o])||(b[o]=null)}f.length>0&&b[0]<f[f.length-1][0]&&(g=!0),f.push(b)}else console.warn("Ignoring row "+r+" of DataTable because of undefined or null first column.")}g&&(console.warn("DataTable is out of order; order it correctly to speed loading."),f.sort((function(e,t){return e[0]-t[0]}))),this.rawData_=f,m.length>0&&this.setAnnotations(m,!0),this.attributes_.reparseSeries()},j.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_("dataDidUpdate",{})},j.prototype.start_=function(){var e=this.file_;if("function"==typeof e&&(e=e()),h.isArrayLike(e))this.rawData_=this.parseArray_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("object"==typeof e&&"function"==typeof e.getColumnRange)this.parseDataTable_(e),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if("string"==typeof e)if(h.detectLineDelimiter(e))this.loadedEvent_(e);else{var t;t=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject("Microsoft.XMLHTTP");var a=this;t.onreadystatechange=function(){4==t.readyState&&(200!==t.status&&0!==t.status||a.loadedEvent_(t.responseText))},t.open("GET",e,!0),t.send(null)}else console.error("Unknown data format: "+typeof e)},j.prototype.updateOptions=function(e,t){void 0===t&&(t=!1);var a=e.file,n=j.copyUserAttrs_(e);"rollPeriod"in n&&(this.rollPeriod_=n.rollPeriod),"dateWindow"in n&&(this.dateWindow_=n.dateWindow);var r=h.isPixelChangingOptionList(this.attr_("labels"),n);h.updateDeep(this.user_attrs_,n),this.attributes_.reparseSeries(),a?(this.cascadeEvents_("dataWillUpdate",{}),this.file_=a,t||this.start_()):t||(r?this.predraw_():this.renderGraph_(!1))},j.copyUserAttrs_=function(e){var t={};for(var a in e)e.hasOwnProperty(a)&&"file"!=a&&e.hasOwnProperty(a)&&(t[a]=e[a]);return t},j.prototype.resize=function(e,t){if(!this.resize_lock){this.resize_lock=!0,null===e!=(null===t)&&(console.warn("Dygraph.resize() should be called with zero parameters or two non-NULL parameters. Pretending it was zero."),e=t=null);var a=this.width_,n=this.height_;e?(this.maindiv_.style.width=e+"px",this.maindiv_.style.height=t+"px",this.width_=e,this.height_=t):(this.width_=this.maindiv_.clientWidth,this.height_=this.maindiv_.clientHeight),a==this.width_&&n==this.height_||(this.resizeElements_(),this.predraw_()),this.resize_lock=!1}},j.prototype.adjustRoll=function(e){this.rollPeriod_=e,this.predraw_()},j.prototype.visibility=function(){for(this.getOption("visibility")||(this.attrs_.visibility=[]);this.getOption("visibility").length<this.numColumns()-1;)this.attrs_.visibility.push(!0);return this.getOption("visibility")},j.prototype.setVisibility=function(e,t){var a=this.visibility(),n=!1;if(Array.isArray(e)||(null!==e&&"object"==typeof e?n=!0:e=[e]),n)for(var r in e)e.hasOwnProperty(r)&&(r<0||r>=a.length?console.warn("Invalid series number in setVisibility: "+r):a[r]=e[r]);else for(r=0;r<e.length;r++)"boolean"==typeof e[r]?r>=a.length?console.warn("Invalid series number in setVisibility: "+r):a[r]=e[r]:e[r]<0||e[r]>=a.length?console.warn("Invalid series number in setVisibility: "+e[r]):a[e[r]]=t;this.predraw_()},j.prototype.size=function(){return{width:this.width_,height:this.height_}},j.prototype.setAnnotations=function(e,t){this.annotations_=e,this.layout_?(this.layout_.setAnnotations(this.annotations_),t||this.predraw_()):console.warn("Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html")},j.prototype.annotations=function(){return this.annotations_},j.prototype.getLabels=function(){var e=this.attr_("labels");return e?e.slice():null},j.prototype.indexFromSetName=function(e){return this.setIndexByName_[e]},j.prototype.getRowForX=function(e){for(var t=0,a=this.numRows()-1;t<=a;){var n=a+t>>1,r=this.getValue(n,0);if(r<e)t=n+1;else if(r>e)a=n-1;else{if(t==n)return n;a=n}}return null},j.prototype.ready=function(e){this.is_initial_draw_?this.readyFns_.push(e):e.call(this,this)},j.prototype.addAndTrackEvent=function(e,t,a){h.addEvent(e,t,a),this.registeredEvents_.push({elem:e,type:t,fn:a})},j.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var e=0;e<this.registeredEvents_.length;e++){var t=this.registeredEvents_[e];h.removeEvent(t.elem,t.type,t.fn)}this.registeredEvents_=[]},j.PLUGINS=[C.default,w.default,A.default,E.default,x.default,S.default],j.GVizChart=k.default,j.DASHED_LINE=h.DASHED_LINE,j.DOT_DASH_LINE=h.DOT_DASH_LINE,j.dateAxisLabelFormatter=h.dateAxisLabelFormatter,j.toRGB_=h.toRGB_,j.findPos=h.findPos,j.pageX=h.pageX,j.pageY=h.pageY,j.dateString_=h.dateString_,j.defaultInteractionModel=u.default.defaultModel,j.nonInteractiveModel=j.nonInteractiveModel_=u.default.nonInteractiveModel_,j.Circles=h.Circles,j.Plugins={Legend:C.default,Axes:w.default,Annotations:x.default,ChartLabels:E.default,Grid:S.default,RangeSelector:A.default},j.DataHandlers={DefaultHandler:m.default,BarsHandler:O.default,CustomBarsHandler:v.default,DefaultFractionHandler:_.default,ErrorBarsHandler:b.default,FractionsBarsHandler:y.default},j.startPan=u.default.startPan,j.startZoom=u.default.startZoom,j.movePan=u.default.movePan,j.moveZoom=u.default.moveZoom,j.endPan=u.default.endPan,j.endZoom=u.default.endZoom,j.numericLinearTicks=d.numericLinearTicks,j.numericTicks=d.numericTicks,j.dateTicker=d.dateTicker,j.Granularity=d.Granularity,j.getDateAxis=d.getDateAxis,j.floatFormat=h.floatFormat,a.default=j,t.exports=a.default}).call(this,e("_process"))},{"./datahandler/bars":5,"./datahandler/bars-custom":2,"./datahandler/bars-error":3,"./datahandler/bars-fractions":4,"./datahandler/default":8,"./datahandler/default-fractions":7,"./dygraph-canvas":9,"./dygraph-default-attrs":10,"./dygraph-gviz":11,"./dygraph-interaction-model":12,"./dygraph-layout":13,"./dygraph-options":15,"./dygraph-options-reference":14,"./dygraph-tickers":16,"./dygraph-utils":17,"./iframe-tarp":19,"./plugins/annotations":20,"./plugins/axes":21,"./plugins/chart-labels":22,"./plugins/grid":23,"./plugins/legend":24,"./plugins/range-selector":25,_process:1}],19:[function(e,t,a){"use strict";function n(){this.tarps=[]}Object.defineProperty(a,"__esModule",{value:!0});var r=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("./dygraph-utils"));n.prototype.cover=function(){for(var e=document.getElementsByTagName("iframe"),t=0;t<e.length;t++){var a=e[t],n=r.findPos(a),o=n.x,i=n.y,s=a.offsetWidth,l=a.offsetHeight,c=document.createElement("div");c.style.position="absolute",c.style.left=o+"px",c.style.top=i+"px",c.style.width=s+"px",c.style.height=l+"px",c.style.zIndex=999,document.body.appendChild(c),this.tarps.push(c)}},n.prototype.uncover=function(){for(var e=0;e<this.tarps.length;e++)this.tarps[e].parentNode.removeChild(this.tarps[e]);this.tarps=[]},a.default=n,t.exports=a.default},{"./dygraph-utils":17}],20:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(){this.annotations_=[]};n.prototype.toString=function(){return"Annotations Plugin"},n.prototype.activate=function(e){return{clearChart:this.clearChart,didDrawChart:this.didDrawChart}},n.prototype.detachLabels=function(){for(var e=0;e<this.annotations_.length;e++){var t=this.annotations_[e];t.parentNode&&t.parentNode.removeChild(t),this.annotations_[e]=null}this.annotations_=[]},n.prototype.clearChart=function(e){this.detachLabels()},n.prototype.didDrawChart=function(e){var t=e.dygraph,a=t.layout_.annotated_points;if(a&&0!==a.length)for(var n=e.canvas.parentNode,r=function(e,a,n){return function(r){var o=n.annotation;o.hasOwnProperty(e)?o[e](o,n,t,r):t.getOption(a)&&t.getOption(a)(o,n,t,r)}},o=e.dygraph.getArea(),i={},s=0;s<a.length;s++){var l=a[s];if(!(l.canvasx<o.x||l.canvasx>o.x+o.w||l.canvasy<o.y||l.canvasy>o.y+o.h)){var c=l.annotation,u=6;c.hasOwnProperty("tickHeight")&&(u=c.tickHeight);var d=document.createElement("div");d.style.fontSize=t.getOption("axisLabelFontSize")+"px";var h="dygraph-annotation";c.hasOwnProperty("icon")||(h+=" dygraphDefaultAnnotation dygraph-default-annotation"),c.hasOwnProperty("cssClass")&&(h+=" "+c.cssClass),d.className=h;var p=c.hasOwnProperty("width")?c.width:16,f=c.hasOwnProperty("height")?c.height:16;if(c.hasOwnProperty("icon")){var g=document.createElement("img");g.src=c.icon,g.width=p,g.height=f,d.appendChild(g)}else l.annotation.hasOwnProperty("shortText")&&d.appendChild(document.createTextNode(l.annotation.shortText));var m=l.canvasx-p/2;d.style.left=m+"px";var b=0;if(c.attachAtBottom){var v=o.y+o.h-f-u;i[m]?v-=i[m]:i[m]=0,i[m]+=u+f,b=v}else b=l.canvasy-f-u;d.style.top=b+"px",d.style.width=p+"px",d.style.height=f+"px",d.title=l.annotation.text,d.style.color=t.colorsMap_[l.name],d.style.borderColor=t.colorsMap_[l.name],c.div=d,t.addAndTrackEvent(d,"click",r("clickHandler","annotationClickHandler",l)),t.addAndTrackEvent(d,"mouseover",r("mouseOverHandler","annotationMouseOverHandler",l)),t.addAndTrackEvent(d,"mouseout",r("mouseOutHandler","annotationMouseOutHandler",l)),t.addAndTrackEvent(d,"dblclick",r("dblClickHandler","annotationDblClickHandler",l)),n.appendChild(d),this.annotations_.push(d);var _=e.drawingContext;_.save(),_.strokeStyle=c.hasOwnProperty("tickColor")?c.tickColor:t.colorsMap_[l.name],_.lineWidth=c.hasOwnProperty("tickWidth")?c.tickWidth:t.getOption("strokeWidth"),_.beginPath(),c.attachAtBottom?(v=b+f,_.moveTo(l.canvasx,v),_.lineTo(l.canvasx,v+u)):(_.moveTo(l.canvasx,l.canvasy),_.lineTo(l.canvasx,l.canvasy-2-u)),_.closePath(),_.stroke(),_.restore()}}},n.prototype.destroy=function(){this.detachLabels()},a.default=n,t.exports=a.default},{}],21:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("../dygraph-utils")),r=function(){this.xlabels_=[],this.ylabels_=[]};r.prototype.toString=function(){return"Axes Plugin"},r.prototype.activate=function(e){return{layout:this.layout,clearChart:this.clearChart,willDrawChart:this.willDrawChart}},r.prototype.layout=function(e){var t,a=e.dygraph;if(a.getOptionForAxis("drawAxis","y")){var n=a.getOptionForAxis("axisLabelWidth","y")+2*a.getOptionForAxis("axisTickSize","y");e.reserveSpaceLeft(n)}a.getOptionForAxis("drawAxis","x")&&(t=a.getOption("xAxisHeight")?a.getOption("xAxisHeight"):a.getOptionForAxis("axisLabelFontSize","x")+2*a.getOptionForAxis("axisTickSize","x"),e.reserveSpaceBottom(t)),2==a.numAxes()?a.getOptionForAxis("drawAxis","y2")&&(n=a.getOptionForAxis("axisLabelWidth","y2")+2*a.getOptionForAxis("axisTickSize","y2"),e.reserveSpaceRight(n)):a.numAxes()>2&&a.error("Only two y-axes are supported at this time. (Trying to use "+a.numAxes()+")")},r.prototype.detachLabels=function(){function e(e){for(var t=0;t<e.length;t++){var a=e[t];a.parentNode&&a.parentNode.removeChild(a)}}e(this.xlabels_),e(this.ylabels_),this.xlabels_=[],this.ylabels_=[]},r.prototype.clearChart=function(e){this.detachLabels()},r.prototype.willDrawChart=function(e){function t(e){return Math.round(e)+.5}function a(e){return Math.round(e)-.5}var r=this,o=e.dygraph;if(o.getOptionForAxis("drawAxis","x")||o.getOptionForAxis("drawAxis","y")||o.getOptionForAxis("drawAxis","y2")){var i,s,l,c=e.drawingContext,u=e.canvas.parentNode,d=o.width_,h=o.height_,p=function(e){return{position:"absolute",fontSize:o.getOptionForAxis("axisLabelFontSize",e)+"px",width:o.getOptionForAxis("axisLabelWidth",e)+"px"}},f={x:p("x"),y:p("y"),y2:p("y2")},g=function(e,t,a){var r=document.createElement("div"),o=f["y2"==a?"y2":t];n.update(r.style,o);var i=document.createElement("div");return i.className="dygraph-axis-label dygraph-axis-label-"+t+(a?" dygraph-axis-label-"+a:""),i.innerHTML=e,r.appendChild(i),r};c.save();var m=o.layout_,b=e.dygraph.plotter_.area,v=function(e){return function(t){return o.getOptionForAxis(t,e)}};if(o.getOptionForAxis("drawAxis","y")){if(m.yticks&&m.yticks.length>0){var _=o.numAxes(),y=[v("y"),v("y2")];m.yticks.forEach((function(e){if(void 0!==e.label){s=b.x;var t="y1",a=y[0];1==e.axis&&(s=b.x+b.w,t="y2",a=y[1]);var n=a("axisLabelFontSize");l=b.y+e.pos*b.h,i=g(e.label,"y",2==_?t:null);var o=l-n/2;o<0&&(o=0),o+n+3>h?i.style.bottom="0":i.style.top=o+"px",0===e.axis?(i.style.left=b.x-a("axisLabelWidth")-a("axisTickSize")+"px",i.style.textAlign="right"):1==e.axis&&(i.style.left=b.x+b.w+a("axisTickSize")+"px",i.style.textAlign="left"),i.style.width=a("axisLabelWidth")+"px",u.appendChild(i),r.ylabels_.push(i)}}));var O=this.ylabels_[0],x=o.getOptionForAxis("axisLabelFontSize","y");parseInt(O.style.top,10)+x>h-x&&(O.style.top=parseInt(O.style.top,10)-x/2+"px")}var w;o.getOption("drawAxesAtZero")?(((C=o.toPercentXCoord(0))>1||C<0||isNaN(C))&&(C=0),w=t(b.x+C*b.w)):w=t(b.x),c.strokeStyle=o.getOptionForAxis("axisLineColor","y"),c.lineWidth=o.getOptionForAxis("axisLineWidth","y"),c.beginPath(),c.moveTo(w,a(b.y)),c.lineTo(w,a(b.y+b.h)),c.closePath(),c.stroke(),2==o.numAxes()&&(c.strokeStyle=o.getOptionForAxis("axisLineColor","y2"),c.lineWidth=o.getOptionForAxis("axisLineWidth","y2"),c.beginPath(),c.moveTo(a(b.x+b.w),a(b.y)),c.lineTo(a(b.x+b.w),a(b.y+b.h)),c.closePath(),c.stroke())}if(o.getOptionForAxis("drawAxis","x")){if(m.xticks){var E=v("x");m.xticks.forEach((function(e){if(void 0!==e.label){s=b.x+e.pos*b.w,l=b.y+b.h,(i=g(e.label,"x")).style.textAlign="center",i.style.top=l+E("axisTickSize")+"px";var t=s-E("axisLabelWidth")/2;t+E("axisLabelWidth")>d&&(t=d-E("axisLabelWidth"),i.style.textAlign="right"),t<0&&(t=0,i.style.textAlign="left"),i.style.left=t+"px",i.style.width=E("axisLabelWidth")+"px",u.appendChild(i),r.xlabels_.push(i)}}))}var S,C;c.strokeStyle=o.getOptionForAxis("axisLineColor","x"),c.lineWidth=o.getOptionForAxis("axisLineWidth","x"),c.beginPath(),o.getOption("drawAxesAtZero")?(((C=o.toPercentYCoord(0,0))>1||C<0)&&(C=1),S=a(b.y+C*b.h)):S=a(b.y+b.h),c.moveTo(t(b.x),S),c.lineTo(t(b.x+b.w),S),c.closePath(),c.stroke()}c.restore()}},a.default=r,t.exports=a.default},{"../dygraph-utils":17}],22:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(){this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};n.prototype.toString=function(){return"ChartLabels Plugin"},n.prototype.activate=function(e){return{layout:this.layout,didDrawChart:this.didDrawChart}};var r=function(e){var t=document.createElement("div");return t.style.position="absolute",t.style.left=e.x+"px",t.style.top=e.y+"px",t.style.width=e.w+"px",t.style.height=e.h+"px",t};n.prototype.detachLabels_=function(){for(var e=[this.title_div_,this.xlabel_div_,this.ylabel_div_,this.y2label_div_],t=0;t<e.length;t++){var a=e[t];a&&a.parentNode&&a.parentNode.removeChild(a)}this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};var o=function(e,t,a,n,r){var o=document.createElement("div");o.style.position="absolute",o.style.left=1==a?"0px":t.x+"px",o.style.top=t.y+"px",o.style.width=t.w+"px",o.style.height=t.h+"px",o.style.fontSize=e.getOption("yLabelWidth")-2+"px";var i=document.createElement("div");i.style.position="absolute",i.style.width=t.h+"px",i.style.height=t.w+"px",i.style.top=t.h/2-t.w/2+"px",i.style.left=t.w/2-t.h/2+"px",i.className="dygraph-label-rotate-"+(1==a?"right":"left");var s=document.createElement("div");return s.className=n,s.innerHTML=r,i.appendChild(s),o.appendChild(i),o};n.prototype.layout=function(e){this.detachLabels_();var t=e.dygraph,a=e.chart_div;if(t.getOption("title")){var n=e.reserveSpaceTop(t.getOption("titleHeight"));this.title_div_=r(n),this.title_div_.style.fontSize=t.getOption("titleHeight")-8+"px",(i=document.createElement("div")).className="dygraph-label dygraph-title",i.innerHTML=t.getOption("title"),this.title_div_.appendChild(i),a.appendChild(this.title_div_)}if(t.getOption("xlabel")){var i,s=e.reserveSpaceBottom(t.getOption("xLabelHeight"));this.xlabel_div_=r(s),this.xlabel_div_.style.fontSize=t.getOption("xLabelHeight")-2+"px",(i=document.createElement("div")).className="dygraph-label dygraph-xlabel",i.innerHTML=t.getOption("xlabel"),this.xlabel_div_.appendChild(i),a.appendChild(this.xlabel_div_)}if(t.getOption("ylabel")){var l=e.reserveSpaceLeft(0);this.ylabel_div_=o(t,l,1,"dygraph-label dygraph-ylabel",t.getOption("ylabel")),a.appendChild(this.ylabel_div_)}if(t.getOption("y2label")&&2==t.numAxes()){var c=e.reserveSpaceRight(0);this.y2label_div_=o(t,c,2,"dygraph-label dygraph-y2label",t.getOption("y2label")),a.appendChild(this.y2label_div_)}},n.prototype.didDrawChart=function(e){var t=e.dygraph;this.title_div_&&(this.title_div_.children[0].innerHTML=t.getOption("title")),this.xlabel_div_&&(this.xlabel_div_.children[0].innerHTML=t.getOption("xlabel")),this.ylabel_div_&&(this.ylabel_div_.children[0].children[0].innerHTML=t.getOption("ylabel")),this.y2label_div_&&(this.y2label_div_.children[0].children[0].innerHTML=t.getOption("y2label"))},n.prototype.clearChart=function(){},n.prototype.destroy=function(){this.detachLabels_()},a.default=n,t.exports=a.default},{}],23:[function(e,t,a){"use strict";Object.defineProperty(a,"__esModule",{value:!0});var n=function(){};n.prototype.toString=function(){return"Gridline Plugin"},n.prototype.activate=function(e){return{willDrawChart:this.willDrawChart}},n.prototype.willDrawChart=function(e){function t(e){return Math.round(e)+.5}function a(e){return Math.round(e)-.5}var n,r,o,i=e.dygraph,s=e.drawingContext,l=i.layout_,c=e.dygraph.plotter_.area;if(i.getOptionForAxis("drawGrid","y")){for(var u=["y","y2"],d=[],h=[],p=[],f=[],g=[],m=0;m<u.length;m++)p[m]=i.getOptionForAxis("drawGrid",u[m]),p[m]&&(d[m]=i.getOptionForAxis("gridLineColor",u[m]),h[m]=i.getOptionForAxis("gridLineWidth",u[m]),g[m]=i.getOptionForAxis("gridLinePattern",u[m]),f[m]=g[m]&&g[m].length>=2);o=l.yticks,s.save(),o.forEach((function(e){if(e.has_tick){var o=e.axis;p[o]&&(s.save(),f[o]&&s.setLineDash&&s.setLineDash(g[o]),s.strokeStyle=d[o],s.lineWidth=h[o],n=t(c.x),r=a(c.y+e.pos*c.h),s.beginPath(),s.moveTo(n,r),s.lineTo(n+c.w,r),s.stroke(),s.restore())}})),s.restore()}i.getOptionForAxis("drawGrid","x")&&(o=l.xticks,s.save(),g=i.getOptionForAxis("gridLinePattern","x"),(f=g&&g.length>=2)&&s.setLineDash&&s.setLineDash(g),s.strokeStyle=i.getOptionForAxis("gridLineColor","x"),s.lineWidth=i.getOptionForAxis("gridLineWidth","x"),o.forEach((function(e){e.has_tick&&(n=t(c.x+e.pos*c.w),r=a(c.y+c.h),s.beginPath(),s.moveTo(n,r),s.lineTo(n,c.y),s.closePath(),s.stroke())})),f&&s.setLineDash&&s.setLineDash([]),s.restore())},n.prototype.destroy=function(){},a.default=n,t.exports=a.default},{}],24:[function(e,t,a){"use strict";function n(e,t,a){if(!e||e.length<=1)return'<div class="dygraph-legend-line" style="border-bottom-color: '+t+';"></div>';var n,r,o,i,s=0,l=0,c=[];for(n=0;n<=e.length;n++)s+=e[n%e.length];if((i=Math.floor(a/(s-e[0])))>1){for(n=0;n<e.length;n++)c[n]=e[n]/a;l=c.length}else{for(i=1,n=0;n<e.length;n++)c[n]=e[n]/s;l=c.length+1}var u="";for(r=0;r<i;r++)for(n=0;n<l;n+=2)o=c[n%c.length],u+='<div class="dygraph-legend-dash" style="margin-right: '+(n<e.length?c[(n+1)%c.length]:0)+"em; padding-left: "+o+'em;"></div>';return u}Object.defineProperty(a,"__esModule",{value:!0});var r=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("../dygraph-utils")),o=function(){this.legend_div_=null,this.is_generated_div_=!1};o.prototype.toString=function(){return"Legend Plugin"},o.prototype.activate=function(e){var t,a=e.getOption("labelsDiv");return a&&null!==a?t="string"==typeof a||a instanceof String?document.getElementById(a):a:((t=document.createElement("div")).className="dygraph-legend",e.graphDiv.appendChild(t),this.is_generated_div_=!0),this.legend_div_=t,this.one_em_width_=10,{select:this.select,deselect:this.deselect,predraw:this.predraw,didDrawChart:this.didDrawChart}};var i=function(e){return e.replace(/&/g,"&").replace(/"/g,""").replace(/</g,"<").replace(/>/g,">")};o.prototype.select=function(e){var t=e.selectedX,a=e.selectedPoints,n=e.selectedRow,r=e.dygraph.getOption("legend");if("never"!==r){if("follow"===r){var i=e.dygraph.plotter_.area,s=this.legend_div_.offsetWidth,l=e.dygraph.getOptionForAxis("axisLabelWidth","y"),c=a[0].x*i.w+50,u=a[0].y*i.h-50;c+s+1>i.w&&(c=c-100-s-(l-i.x)),e.dygraph.graphDiv.appendChild(this.legend_div_),this.legend_div_.style.left=l+c+"px",this.legend_div_.style.top=u+"px"}var d=o.generateLegendHTML(e.dygraph,t,a,this.one_em_width_,n);this.legend_div_.innerHTML=d,this.legend_div_.style.display=""}else this.legend_div_.style.display="none"},o.prototype.deselect=function(e){"always"!==e.dygraph.getOption("legend")&&(this.legend_div_.style.display="none");var t=function(e){var t=document.createElement("span");t.setAttribute("style","margin: 0; padding: 0 0 0 1em; border: 0;"),e.appendChild(t);var a=t.offsetWidth;return e.removeChild(t),a}(this.legend_div_);this.one_em_width_=t;var a=o.generateLegendHTML(e.dygraph,void 0,void 0,t,null);this.legend_div_.innerHTML=a},o.prototype.didDrawChart=function(e){this.deselect(e)},o.prototype.predraw=function(e){if(this.is_generated_div_){e.dygraph.graphDiv.appendChild(this.legend_div_);var t=e.dygraph.getArea(),a=this.legend_div_.offsetWidth;this.legend_div_.style.left=t.x+t.w-a-1+"px",this.legend_div_.style.top=t.y+"px"}},o.prototype.destroy=function(){this.legend_div_=null},o.generateLegendHTML=function(e,t,a,s,l){var c={dygraph:e,x:t,series:[]},u={},d=e.getLabels();if(d)for(var h=1;h<d.length;h++){var p=e.getPropertiesForSeries(d[h]),f={dashHTML:n(e.getOption("strokePattern",d[h]),p.color,s),label:d[h],labelHTML:i(d[h]),isVisible:p.visible,color:p.color};c.series.push(f),u[d[h]]=f}if(void 0!==t){var g=e.optionsViewForAxis_("x"),m=g("valueFormatter");c.xHTML=m.call(e,t,g,d[0],e,l,0);var b=[],v=e.numAxes();for(h=0;h<v;h++)b[h]=e.optionsViewForAxis_("y"+(h?1+h:""));var _=e.getOption("labelsShowZeroValues"),y=e.getHighlightSeries();for(h=0;h<a.length;h++){var O=a[h];if((f=u[O.name]).y=O.yval,0===O.yval&&!_||isNaN(O.canvasy))f.isVisible=!1;else{var x=b[(p=e.getPropertiesForSeries(O.name)).axis-1],w=x("valueFormatter").call(e,O.yval,x,O.name,e,l,d.indexOf(O.name));r.update(f,{yHTML:w}),O.name==y&&(f.isHighlighted=!0)}}}return(e.getOption("legendFormatter")||o.defaultFormatter).call(e,c)},o.defaultFormatter=function(e){var t=e.dygraph;if(!0!==t.getOption("showLabelsOnHighlight"))return"";var a,n=t.getOption("labelsSeparateLines");if(void 0===e.x){if("always"!=t.getOption("legend"))return"";a="";for(var r=0;r<e.series.length;r++)(o=e.series[r]).isVisible&&(""!==a&&(a+=n?"<br/>":" "),a+="<span style='font-weight: bold; color: "+o.color+";'>"+o.dashHTML+" "+o.labelHTML+"</span>");return a}for(a=e.xHTML+":",r=0;r<e.series.length;r++){var o;(o=e.series[r]).isVisible&&(n&&(a+="<br>"),a+="<span"+(o.isHighlighted?' class="highlight"':"")+"> <b><span style='color: "+o.color+";'>"+o.labelHTML+"</span></b>: "+o.yHTML+"</span>")}return a},a.default=o,t.exports=a.default},{"../dygraph-utils":17}],25:[function(e,t,a){"use strict";function n(e){return e&&e.__esModule?e:{default:e}}Object.defineProperty(a,"__esModule",{value:!0});var r=function(e){if(e&&e.__esModule)return e;var t={};if(null!=e)for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(t[a]=e[a]);return t.default=e,t}(e("../dygraph-utils")),o=n(e("../dygraph-interaction-model")),i=n(e("../iframe-tarp")),s=function(){this.hasTouchInterface_="undefined"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};s.prototype.toString=function(){return"RangeSelector Plugin"},s.prototype.activate=function(e){return this.dygraph_=e,this.getOption_("showRangeSelector")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},s.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},s.prototype.getOption_=function(e,t){return this.dygraph_.getOption(e,t)},s.prototype.setDefaultOption_=function(e,t){this.dygraph_.attrs_[e]=t},s.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_("animatedZooms")&&(console.warn("Animated zooms and range selector are not compatible; disabling animatedZooms."),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},s.prototype.addToGraph_=function(){var e=this.graphDiv_=this.dygraph_.graphDiv;e.appendChild(this.bgcanvas_),e.appendChild(this.fgcanvas_),e.appendChild(this.leftZoomHandle_),e.appendChild(this.rightZoomHandle_)},s.prototype.removeFromGraph_=function(){var e=this.graphDiv_;e.removeChild(this.bgcanvas_),e.removeChild(this.fgcanvas_),e.removeChild(this.leftZoomHandle_),e.removeChild(this.rightZoomHandle_),this.graphDiv_=null},s.prototype.reserveSpace_=function(e){this.getOption_("showRangeSelector")&&e.reserveSpaceBottom(this.getOption_("rangeSelectorHeight")+4)},s.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},s.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},s.prototype.updateVisibility_=function(){var e=this.getOption_("showRangeSelector");if(e)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var t=this.dygraph_;setTimeout((function(){t.width_=0,t.resize()}),1)}return e},s.prototype.resize_=function(){function e(e,t,a,n){var o=n||r.getContextPixelRatio(t);e.style.top=a.y+"px",e.style.left=a.x+"px",e.width=a.w*o,e.height=a.h*o,e.style.width=a.w+"px",e.style.height=a.h+"px",1!=o&&t.scale(o,o)}var t=this.dygraph_.layout_.getPlotArea(),a=0;this.dygraph_.getOptionForAxis("drawAxis","x")&&(a=this.getOption_("xAxisHeight")||this.getOption_("axisLabelFontSize")+2*this.getOption_("axisTickSize")),this.canvasRect_={x:t.x,y:t.y+t.h+a+4,w:t.w,h:this.getOption_("rangeSelectorHeight")};var n=this.dygraph_.getNumericOption("pixelRatio");e(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,n),e(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,n)},s.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className="dygraph-rangesel-bgcanvas",this.bgcanvas_.style.position="absolute",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className="dygraph-rangesel-fgcanvas",this.fgcanvas_.style.position="absolute",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor="default",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},s.prototype.createZoomHandles_=function(){var e=new Image;e.className="dygraph-rangesel-zoomhandle",e.style.position="absolute",e.style.zIndex=10,e.style.visibility="hidden",e.style.cursor="col-resize",e.width=9,e.height=16,e.src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=",this.isMobileDevice_&&(e.width*=2,e.height*=2),this.leftZoomHandle_=e,this.rightZoomHandle_=e.cloneNode(!1)},s.prototype.initInteraction_=function(){var e,t,a,n,s,l,c,u,d,h,p,f,g,m,b=this,v=document,_=0,y=null,O=!1,x=!1,w=!this.isMobileDevice_,E=new i.default;e=function(e){var t=b.dygraph_.xAxisExtremes(),a=(t[1]-t[0])/b.canvasRect_.w;return[t[0]+(e.leftHandlePos-b.canvasRect_.x)*a,t[0]+(e.rightHandlePos-b.canvasRect_.x)*a]},t=function(e){return r.cancelEvent(e),O=!0,_=e.clientX,y=e.target?e.target:e.srcElement,"mousedown"!==e.type&&"dragstart"!==e.type||(r.addEvent(v,"mousemove",a),r.addEvent(v,"mouseup",n)),b.fgcanvas_.style.cursor="col-resize",E.cover(),!0},a=function(e){if(!O)return!1;r.cancelEvent(e);var t=e.clientX-_;if(Math.abs(t)<4)return!0;_=e.clientX;var a,n=b.getZoomHandleStatus_();y==b.leftZoomHandle_?(a=n.leftHandlePos+t,a=Math.min(a,n.rightHandlePos-y.width-3),a=Math.max(a,b.canvasRect_.x)):(a=n.rightHandlePos+t,a=Math.min(a,b.canvasRect_.x+b.canvasRect_.w),a=Math.max(a,n.leftHandlePos+y.width+3));var o=y.width/2;return y.style.left=a-o+"px",b.drawInteractiveLayer_(),w&&s(),!0},n=function(e){return!!O&&(O=!1,E.uncover(),r.removeEvent(v,"mousemove",a),r.removeEvent(v,"mouseup",n),b.fgcanvas_.style.cursor="default",w||s(),!0)},s=function(){try{var t=b.getZoomHandleStatus_();if(b.isChangingRange_=!0,t.isZoomed){var a=e(t);b.dygraph_.doZoomXDates_(a[0],a[1])}else b.dygraph_.resetZoom()}finally{b.isChangingRange_=!1}},l=function(e){var t=b.leftZoomHandle_.getBoundingClientRect(),a=t.left+t.width/2,n=(t=b.rightZoomHandle_.getBoundingClientRect()).left+t.width/2;return e.clientX>a&&e.clientX<n},c=function(e){return!(x||!l(e)||!b.getZoomHandleStatus_().isZoomed)&&(r.cancelEvent(e),x=!0,_=e.clientX,"mousedown"===e.type&&(r.addEvent(v,"mousemove",u),r.addEvent(v,"mouseup",d)),!0)},u=function(e){if(!x)return!1;r.cancelEvent(e);var t=e.clientX-_;if(Math.abs(t)<4)return!0;_=e.clientX;var a=b.getZoomHandleStatus_(),n=a.leftHandlePos,o=a.rightHandlePos,i=o-n;n+t<=b.canvasRect_.x?o=(n=b.canvasRect_.x)+i:o+t>=b.canvasRect_.x+b.canvasRect_.w?n=(o=b.canvasRect_.x+b.canvasRect_.w)-i:(n+=t,o+=t);var s=b.leftZoomHandle_.width/2;return b.leftZoomHandle_.style.left=n-s+"px",b.rightZoomHandle_.style.left=o-s+"px",b.drawInteractiveLayer_(),w&&h(),!0},d=function(e){return!!x&&(x=!1,r.removeEvent(v,"mousemove",u),r.removeEvent(v,"mouseup",d),w||h(),!0)},h=function(){try{b.isChangingRange_=!0,b.dygraph_.dateWindow_=e(b.getZoomHandleStatus_()),b.dygraph_.drawGraph_(!1)}finally{b.isChangingRange_=!1}},p=function(e){if(!O&&!x){var t=l(e)?"move":"default";t!=b.fgcanvas_.style.cursor&&(b.fgcanvas_.style.cursor=t)}},f=function(e){"touchstart"==e.type&&1==e.targetTouches.length?t(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?a(e.targetTouches[0])&&r.cancelEvent(e):n(e)},g=function(e){"touchstart"==e.type&&1==e.targetTouches.length?c(e.targetTouches[0])&&r.cancelEvent(e):"touchmove"==e.type&&1==e.targetTouches.length?u(e.targetTouches[0])&&r.cancelEvent(e):d(e)},m=function(e,t){for(var a=["touchstart","touchend","touchmove","touchcancel"],n=0;n<a.length;n++)b.dygraph_.addAndTrackEvent(e,a[n],t)},this.setDefaultOption_("interactionModel",o.default.dragIsPanInteractionModel),this.setDefaultOption_("panEdgeFraction",1e-4);var S=window.opera?"mousedown":"dragstart";this.dygraph_.addAndTrackEvent(this.leftZoomHandle_,S,t),this.dygraph_.addAndTrackEvent(this.rightZoomHandle_,S,t),this.dygraph_.addAndTrackEvent(this.fgcanvas_,"mousedown",c),this.dygraph_.addAndTrackEvent(this.fgcanvas_,"mousemove",p),this.hasTouchInterface_&&(m(this.leftZoomHandle_,f),m(this.rightZoomHandle_,f),m(this.fgcanvas_,g))},s.prototype.drawStaticLayer_=function(){var e=this.bgcanvas_ctx_;e.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);try{this.drawMiniPlot_()}catch(e){console.warn(e)}this.bgcanvas_ctx_.lineWidth=this.getOption_("rangeSelectorBackgroundLineWidth"),e.strokeStyle=this.getOption_("rangeSelectorBackgroundStrokeColor"),e.beginPath(),e.moveTo(.5,.5),e.lineTo(.5,this.canvasRect_.h-.5),e.lineTo(this.canvasRect_.w-.5,this.canvasRect_.h-.5),e.lineTo(this.canvasRect_.w-.5,.5),e.stroke()},s.prototype.drawMiniPlot_=function(){var e=this.getOption_("rangeSelectorPlotFillColor"),t=this.getOption_("rangeSelectorPlotFillGradientColor"),a=this.getOption_("rangeSelectorPlotStrokeColor");if(e||a){var n=this.getOption_("stepPlot"),r=this.computeCombinedSeriesAndLimits_(),o=r.yMax-r.yMin,i=this.bgcanvas_ctx_,s=this.dygraph_.xAxisExtremes(),l=Math.max(s[1]-s[0],1e-30),c=(this.canvasRect_.w-.5)/l,u=(this.canvasRect_.h-.5)/o,d=this.canvasRect_.w-.5,h=this.canvasRect_.h-.5,p=null,f=null;i.beginPath(),i.moveTo(.5,h);for(var g=0;g<r.data.length;g++){var m=r.data[g],b=null!==m[0]?(m[0]-s[0])*c:NaN,v=null!==m[1]?h-(m[1]-r.yMin)*u:NaN;(n||null===p||Math.round(b)!=Math.round(p))&&(isFinite(b)&&isFinite(v)?(null===p?i.lineTo(b,h):n&&i.lineTo(b,f),i.lineTo(b,v),p=b,f=v):(null!==p&&(n?(i.lineTo(b,f),i.lineTo(b,h)):i.lineTo(p,h)),p=f=null))}if(i.lineTo(d,h),i.closePath(),e){var _=this.bgcanvas_ctx_.createLinearGradient(0,0,0,h);t&&_.addColorStop(0,t),_.addColorStop(1,e),this.bgcanvas_ctx_.fillStyle=_,i.fill()}a&&(this.bgcanvas_ctx_.strokeStyle=a,this.bgcanvas_ctx_.lineWidth=this.getOption_("rangeSelectorPlotLineWidth"),i.stroke())}},s.prototype.computeCombinedSeriesAndLimits_=function(){var e,t=this.dygraph_,a=this.getOption_("logscale"),n=t.numColumns(),o=t.getLabels(),i=new Array(n),s=!1,l=t.visibility(),c=[];for(e=1;e<n;e++){var u=this.getOption_("showInRangeSelector",o[e]);c.push(u),null!==u&&(s=!0)}if(s)for(e=1;e<n;e++)i[e]=c[e-1];else for(e=1;e<n;e++)i[e]=l[e-1];var d=[],h=t.dataHandler_,p=t.attributes_;for(e=1;e<t.numColumns();e++)if(i[e]){var f=h.extractSeries(t.rawData_,e,p);t.rollPeriod()>1&&(f=h.rollingAverage(f,t.rollPeriod(),p)),d.push(f)}var g=[];for(e=0;e<d[0].length;e++){for(var m=0,b=0,v=0;v<d.length;v++){var _=d[v][e][1];null===_||isNaN(_)||(b++,m+=_)}g.push([d[0][e][0],m/b])}var y=Number.MAX_VALUE,O=-Number.MAX_VALUE;for(e=0;e<g.length;e++){var x=g[e][1];null!==x&&isFinite(x)&&(!a||x>0)&&(y=Math.min(y,x),O=Math.max(O,x))}if(a)for(O=r.log10(O),O+=.25*O,y=r.log10(y),e=0;e<g.length;e++)g[e][1]=r.log10(g[e][1]);else{var w,E=O-y;O+=w=E<=Number.MIN_VALUE?.25*O:.25*E,y-=w}return{data:g,yMin:y,yMax:O}},s.prototype.placeZoomHandles_=function(){var e=this.dygraph_.xAxisExtremes(),t=this.dygraph_.xAxisRange(),a=e[1]-e[0],n=Math.max(0,(t[0]-e[0])/a),r=Math.max(0,(e[1]-t[1])/a),o=this.canvasRect_.x+this.canvasRect_.w*n,i=this.canvasRect_.x+this.canvasRect_.w*(1-r),s=Math.max(this.canvasRect_.y,this.canvasRect_.y+(this.canvasRect_.h-this.leftZoomHandle_.height)/2),l=this.leftZoomHandle_.width/2;this.leftZoomHandle_.style.left=o-l+"px",this.leftZoomHandle_.style.top=s+"px",this.rightZoomHandle_.style.left=i-l+"px",this.rightZoomHandle_.style.top=this.leftZoomHandle_.style.top,this.leftZoomHandle_.style.visibility="visible",this.rightZoomHandle_.style.visibility="visible"},s.prototype.drawInteractiveLayer_=function(){var e=this.fgcanvas_ctx_;e.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);var t=this.canvasRect_.w-1,a=this.canvasRect_.h-1,n=this.getZoomHandleStatus_();if(e.strokeStyle=this.getOption_("rangeSelectorForegroundStrokeColor"),e.lineWidth=this.getOption_("rangeSelectorForegroundLineWidth"),n.isZoomed){var r=Math.max(1,n.leftHandlePos-this.canvasRect_.x),o=Math.min(t,n.rightHandlePos-this.canvasRect_.x);e.fillStyle="rgba(240, 240, 240, "+this.getOption_("rangeSelectorAlpha").toString()+")",e.fillRect(0,0,r,this.canvasRect_.h),e.fillRect(o,0,this.canvasRect_.w-o,this.canvasRect_.h),e.beginPath(),e.moveTo(1,1),e.lineTo(r,1),e.lineTo(r,a),e.lineTo(o,a),e.lineTo(o,1),e.lineTo(t,1),e.stroke()}else e.beginPath(),e.moveTo(1,1),e.lineTo(1,a),e.lineTo(t,a),e.lineTo(t,1),e.stroke()},s.prototype.getZoomHandleStatus_=function(){var e=this.leftZoomHandle_.width/2,t=parseFloat(this.leftZoomHandle_.style.left)+e,a=parseFloat(this.rightZoomHandle_.style.left)+e;return{leftHandlePos:t,rightHandlePos:a,isZoomed:t-1>this.canvasRect_.x||a+1<this.canvasRect_.x+this.canvasRect_.w}},a.default=s,t.exports=a.default},{"../dygraph-interaction-model":12,"../dygraph-utils":17,"../iframe-tarp":19}]},{},[18])(18)},510:function(e,t,a){},514:function(e,t,a){},515:function(e,t,a){},56:function(e,t,a){"use strict";a.d(t,"a",(function(){return r})),a.d(t,"c",(function(){return o})),a.d(t,"d",(function(){return i})),a.d(t,"b",(function(){return s}));var n=a(46),r=80,o=function(e,t){var a=" ",n="";return e&&"string"===typeof t.context&&(n=t.context),"string"===typeof t.plugin&&""!==t.plugin?((a=t.plugin).endsWith(".plugin")&&(a=a.substring(0,a.length-7)),"string"===typeof t.module&&""!==t.module&&(a+=":".concat(t.module)),e&&""!==n&&(a+=", ".concat(n))):e&&""!==n&&(a=n),a},i=function(e,t){var a=t.update_every,r=e.view_update_every;return a===r?"resolution ".concat(Object(n.a)(a)):"resolution ".concat(Object(n.a)(r),", collected every ").concat(Object(n.a)(a))},s=function(e){var t,a=e.allDimensions,n=e.selectedDimensions,r=e.clickedDimensionName,o=e.isModifierKeyPressed,i=0===n.length?a:n,s=i.includes(r);return(t=o||!(s&&i.length>1)&&s?s?i.filter((function(e){return e!==r})):i.concat(r):[r]).length===a.length?[]:t}},564:function(e,t){},565:function(e,t,a){},566:function(e,t,a){"use strict";a.r(t);var n=a(0),r=a.n(n),o=a(30),i=a.n(o),s=a(27),l=a(48),c=a(129),u=a(14),d=a(159),h=a(26),p=a(167),f=a(45),g=a(7),m=a(22),b=a(44),v={isSnapshotMode:!1,snapshotCharts:null,snapshotDataPoints:null,isSignedIn:!1,offline:!1},_=Object(m.createReducer)({},v);_.on(b.e,(function(e,t){var a=t.charts,n=t.dataPoints;return Object(g.a)({},e,{snapshotCharts:a,snapshotDataPoints:n,isSnapshotMode:!0})})),_.on(b.f,(function(e){return Object(g.a)({},e,{isSnapshotMode:v.isSnapshotMode,snapshotCharts:v.snapshotCharts,snapshotDataPoints:v.snapshotDataPoints})})),_.on(b.b,(function(e,t){var a=t.isSignedIn;return Object(g.a)({},e,{isSignedIn:a})})),_.on(b.c,(function(e,t){var a=t.offline;return Object(g.a)({},e,{offline:a})}));var y,O=a(76),x=Object(l.c)((y={},Object(u.a)(y,h.e,d.b),Object(u.a)(y,f.c,p.a),Object(u.a)(y,O.a,_),y)),w=a(21),E=a.n(w),S=a(16),C=a(35),A=a(5),k=a(117),j=a.n(k),T=j.a.create({headers:{"Cache-Control":"no-cache, no-store",Pragma:"no-cache"},withCredentials:!0}),D=a(36),P=a(632),M=a(190),L=a(606),I=a(281),N=a(604),R=a(605),B=a(626),F=a(607),H=j.a.create({headers:{"Cache-Control":"no-cache, no-store",Pragma:"no-cache"},withCredentials:!0}),z=function(e){var t=new P.a,a=new P.a,n=Object(I.a)((function(e){var t=e.url,a=e.method,n=void 0===a?"GET":a,r=e.params,o=void 0===r?{}:r,i=e.data,s=e.onErrorCallback,l=e.onSuccessCallback,c=e.cancelTokenSource;return Object(M.a)(H.request({url:t,method:n,params:o,data:i,timeout:15e3,cancelToken:null===c||void 0===c?void 0:c.token})).pipe(Object(N.a)((function(e){var t=e.data;l(t)})),Object(R.a)((function(e){return"Chart scrolled out of view"!==(null===e||void 0===e?void 0:e.message)&&console.warn("fetch error",t),s(e),Object(L.a)()})))}),e);return a.pipe(Object(B.a)(null),Object(F.a)((function(){return t.pipe(n)}))).subscribe(),[t,a]},U=a(75),G=a(280),W=a(608),V=a(635),Y=function(e,t){return"json"===e.format?function(e,t){var a=e.view_update_every;if(!e.result.data.length)return e;var n=e.result.data[0][0]-t*a,r=Object(G.a)(e.result.labels).map((function(){return null})),o=new Array(t).fill(null).map((function(e,t){return[n+t*a].concat(Object(C.a)(r))}));return Object(g.a)({},e,{after:e.after-a*t,result:Object(g.a)({},e.result,{data:o.concat(e.result.data)})})}(e,t):e},X=[],K=function(e,t,a){if("array"===t&&"json"===e.format){if(Array.isArray(e.result))return e;var n=a?Object(W.a)(e.result.data):e.result.data;return Object(g.a)({},e,{format:"array",result:n.reduce((function(e,t){return t.shift(),[].concat(Object(C.a)(e),[Object(V.a)(t)])}),X)})}return e},Z=a(3),q=a(137),$=a(4),J=$.d.div.withConfig({displayName:"styled__Container",componentId:"qqn9iy-0"})(["width:100%;height:100%;min-height:",";display:flex;flex-flow:row nowrap;padding:"," "," "," ",";"],Object(Z.J)(10),Object(Z.J)(2),Object(Z.J)(2),Object(Z.J)(2),Object(Z.J)(2)),Q=$.d.div.withConfig({displayName:"styled__SideContent",componentId:"qqn9iy-1"})(["flex-grow:0;flex-shrink:0;height:100%;align-self:stretch;"]),ee=$.d.div.withConfig({displayName:"styled__ContentContainer",componentId:"qqn9iy-2"})([""]),te=Object($.d)(Z.B).withConfig({displayName:"styled__HeaderText",componentId:"qqn9iy-3"})(["color:",";font-weight:bold;display:block;margin-bottom:",";"],(function(e){var t=e.error;return e.success&&Object(Z.H)("success")||t&&Object(Z.H)("error")}),Object(Z.J)()),ae=Object($.d)(Z.F).withConfig({displayName:"styled__ContentText",componentId:"qqn9iy-4"})(["display:block;color:",";"],(function(e){return e.error&&Object(Z.H)("error")||Object(Z.H)("border")})),ne=function(e){var t=e.header,a=e.text,n=e.leftContent,o=e.rightContent,i=e.renderContent,s=e.success,l=e.error;return r.a.createElement(J,null,n&&r.a.createElement(Q,null,n),r.a.createElement(ee,null,t&&r.a.createElement(te,{success:s,error:l},t),a&&r.a.createElement(ae,{success:s,error:l},a),i&&i(e)),o&&r.a.createElement(Q,{right:!0},o))},re=function(e){return r.a.createElement(ne,e)},oe=$.d.div.withConfig({displayName:"styled__NodeIconContainer",componentId:"fe9uuu-0"})(["width:",";height:",";margin-right:",";display:flex;justify-content:center;align-items:center;"],Object(Z.J)(5),Object(Z.J)(5),Object(Z.J)(2)),ie=$.d.a.withConfig({displayName:"styled__NotificationLink",componentId:"fe9uuu-1"})(["&,&:hover{text-decoration:underline;color:inherit;}"]),se={position:q.b.POSITION.BOTTOM_RIGHT,autoClose:1e4,pauseOnFocusLoss:!1},le=function(){var e=re(Object(g.a)({},{header:"Installation error",text:"The installer could not prepare the required dependencies to enable Netdata Cloud functionality"},{error:!0,leftContent:r.a.createElement(oe,null,r.a.createElement(Z.o,{name:"gear",size:"large",color:"error"}))}));q.b.error(e,se)},ce=function(){var e={header:"Connection Problem",text:r.a.createElement(ie,{href:"https://learn.netdata.cloud/docs/agent/packaging/installer#automatic-one-line-installation-script",target:"_blank"},"To access Cloud install again your agent via the kickstart script")},t=re(Object(g.a)({},e,{error:!0,leftContent:r.a.createElement(oe,null,r.a.createElement(Z.o,{name:"gear",size:"large",color:"error"}))}));q.b.error(t,se)},ue=a(9),de=a(312),he=a(636),pe=a(227),fe=a(572),ge=a(629),me={hash:"#",theme:null,help:null,mode:"live",update_always:!1,pan_and_zoom:!1,server:null,after:0,before:0,highlight:!1,highlight_after:0,highlight_before:0,nowelcome:!1,show_alarms:!1,chart:null,family:null,alarm:null,alarm_unique_id:0,alarm_id:0,alarm_event_id:0,alarm_when:0},be=function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return void 0===me[a]||void 0===n}([a,n])?{}:Object(u.a)({},a,decodeURIComponent(n))},ve="print"===Object(de.a)(Object(he.a)(";"),Object(ge.a)((function(e,t){return 0===t?{hash:e}:be(e.split("="))})),pe.a,Object(fe.a)(me))(document.location.hash).mode,_e=a(18),ye=E.a.mark(Ae),Oe=E.a.mark(Me),xe=E.a.mark(He),we=E.a.mark(ze),Ee=E.a.mark(Ue),Se=U.b?30:60,Ce=Object(c.a)();function Ae(){var e,t,a,n,r,o;return E.a.wrap((function(i){for(;;)switch(i.prev=i.next){case 0:return i.next=3,Object(S.f)(Ce);case 3:if((e=i.sent).type!==_e.c.success.toString()){i.next=15;break}return t=e.payload,a=t.fetchDataParams.viewRange,n=Object(A.a)(a,2),r=n[0],o=n[1],i.next=10,Object(S.d)(ue.m);case 10:if(!i.sent||!(r<=0||o<=0)){i.next=15;break}return i.next=14,Object(S.c)(Object(_e.d)({id:t.id}));case 14:return i.abrupt("continue",0);case 15:return i.next=17,Object(S.c)(e);case 17:i.next=0;break;case 19:case"end":return i.stop()}}),ye)}var ke=function(e,t){return"".concat(void 0===e?"null":encodeURIComponent(e),",").concat(encodeURIComponent(t))},je=!1,Te=function(e){return"chart"===e?"node":"node"===e||"dimension"===e?e:"label=".concat(e)},De=z(ve?2:Se),Pe=Object(A.a)(De,1)[0];function Me(e){var t,a,n,r,o,i,s,l,c,u,d,h,p,f,m,b,v,_,y,O,x,w,A,k,j,T,P,M,L,I,N,R,B,F,H,z,G,W,V;return E.a.wrap((function(E){for(;;)switch(E.prev=E.next){case 0:return t=e.payload,a=t.host,n=t.context,r=t.chart,o=t.format,i=t.points,s=t.group,l=t.gtime,c=t.options,u=t.after,d=t.before,h=t.dimensions,p=t.labels,f=t.postGroupBy,m=t.postAggregationMethod,b=t.aggrMethod,v=t.dimensionsAggrMethod,_=t.nodeIDs,y=t.httpMethod,O=t.groupBy,x=void 0===O?"dimension":O,w=t.aggrGroups,A=void 0===w?[]:w,k=t.fetchDataParams,j=t.id,T=t.cancelTokenSource,E.next=4,Object(S.d)(ue.D);case 4:if(!(P=E.sent)){E.next=15;break}if(M=ke(h,c),L=Object.keys(P.data).find((function(e){return e.startsWith(r)&&e.includes(M)}))){E.next=11;break}return console.warn("Could not find snapshot key for chart: ".concat(r," and id ").concat(j)),E.abrupt("return");case 11:return I=P.data[L],E.next=14,Object(S.c)(_e.c.success({chartData:I,fetchDataParams:k,id:j}));case 14:return E.abrupt("return");case 15:N=U.b?"".concat(Object(D.a)(a),"api/v1/data"):a,R=c.split("|"),B=R.includes("flip"),H=(F=!je&&!B)?R.concat("flip"):R,z=[Te(x),f&&"label=".concat(f)].filter(Boolean),G="POST"===y?{data:Object(g.a)({filter:{nodeIDs:_,context:n,dimensions:h?h.split(/['|]/):void 0,labels:p},after:u,before:d,points:i,group:s,gtime:l,agent_options:H},m&&{post_aggregation_methods:[m]},{aggregations:["dimension"!==x&&{method:v||"sum",groupBy:["chart"].concat(Object(C.a)(z))},"chart"!==x&&Object(g.a)({method:b,groupBy:z},A.length&&{labels:A})].filter(Boolean)})}:{params:{chart:r,_:(new Date).valueOf(),format:o,points:i,group:s,gtime:l,options:c,after:u,before:d,dimensions:h}},W=function(e){if(null===e||void 0===e?void 0:e.result){var t=k.fillMissingPoints,a=K(e,o,F),n=Object(g.a)({},a,{},"post_aggregated_data"in e.result&&{postAggregationMethod:m,groupBy:x,postGroupBy:f,aggrGroups:A,postAggregated:e.result.post_aggregated_data[m]});Ce.put(_e.c.success({chartData:t?Y(n,t):n,fetchDataParams:k,id:j}))}else Ce.put(_e.c.failure({id:j}))},V=function(e){console.warn("fetch chart data failure",e),Ce.put(_e.c.failure({id:j}))},Pe.next(Object(g.a)({},G,{method:y||"GET",url:N,onErrorCallback:V,onSuccessCallback:W,cancelTokenSource:T}));case 25:case"end":return E.stop()}}),Oe)}var Le=z(1),Ie=Object(A.a)(Le,2),Ne=Ie[0],Re=Ie[1];function Be(e){var t=e.payload,a=t.host,n=t.chart,r=t.format,o=t.points,i=t.group,s=t.gtime,l=t.options,c=t.after,u=t.before,d=t.dimensions,h=t.aggrMethod,p=t.groupBy,f=t.nodeIDs,m=t.chartLibrary,b=t.id,v="".concat(n,",").concat(m,",").concat(ke(d,l)),_="".concat(Object(D.a)(a),"api/v1/data"),y=Object(g.a)({chart:n,_:(new Date).valueOf(),format:r,points:o,group:i,gtime:s,options:l,after:c,before:u,dimensions:d},h&&{aggr_method:h},{},f&&{node_ids:f.join(",")},{},p&&{groupBy:p});Ne.next({url:_,params:y,onErrorCallback:function(){Ce.put(_e.e.failure({id:b})),window.chartUpdated({chartDataUniqueID:v,chart:n,data:null})},onSuccessCallback:function(e){Ce.put(_e.e.success({snapshotData:e,id:b})),window.chartUpdated({chartDataUniqueID:v,data:e})}})}function Fe(){Re.next()}function He(e){var t,a,n,r,o,i,s;return E.a.wrap((function(l){for(;;)switch(l.prev=l.next){case 0:return t=e.payload,a=t.chart,n=t.id,r=t.host,l.next=4,Object(S.d)(ue.D);case 4:if(!(o=l.sent)){l.next=9;break}return l.next=8,Object(S.c)(_e.b.success({chartMetadata:o.charts.charts[a],id:n}));case 8:return l.abrupt("return");case 9:return s=U.b?"".concat(Object(D.a)(r),"api/v1/chart"):r.replace("/data","/chart"),l.prev=10,l.next=13,Object(S.a)(T.get,s,{params:{chart:a}});case 13:i=l.sent,l.next=22;break;case 16:return l.prev=16,l.t0=l.catch(10),console.warn("fetch chart details failure"),l.next=21,Object(S.c)(_e.b.failure({id:n}));case 21:return l.abrupt("return");case 22:return l.next=24,Object(S.c)(_e.b.success({chartMetadata:i.data,id:n}));case 24:case"end":return l.stop()}}),xe,null,[[10,16]])}function ze(e){var t,a,n,r,o,i,s,l,c,u,d;return E.a.wrap((function(p){for(;;)switch(p.prev=p.next){case 0:return t=e.payload,a=t.poll,n=!1,r=!1,o=!1,i=!1,p.prev=6,p.next=9,Object(S.d)(ue.x);case 9:return s=p.sent,l=null===s||void 0===s?void 0:s.isCloudAvailable,c=null===s||void 0===s?void 0:s.isACLKAvailable,p.next=14,Object(S.a)(T.get,"".concat(D.b,"/api/v1/info"));case 14:return u=p.sent,d=u.data,o=(null===d||void 0===d?void 0:d["cloud-available"])||!1,n=(null===d||void 0===d?void 0:d["cloud-enabled"])||!1,r=(null===d||void 0===d?void 0:d["agent-claimed"])||!1,i=(null===d||void 0===d?void 0:d["aclk-available"])||!1,p.next=22,Object(S.c)(_e.f.success({isCloudAvailable:o,isCloudEnabled:n,isAgentClaimed:r,isACLKAvailable:i,fullInfoPayload:d}));case 22:n&&null===l&&!o&&le(),o&&r&&!1!==c&&!i&&ce(),p.next=31;break;case 26:return p.prev=26,p.t0=p.catch(6),console.warn("fetch agent info failure"),p.next=31,Object(S.c)(_e.f.failure());case 31:if(!(a&&n&&r)){p.next=36;break}return p.next=34,Object(S.b)(h.b);case 34:return p.next=36,Object(S.c)(Object(_e.f)({poll:!0}));case 36:case"end":return p.stop()}}),we,null,[[6,26]])}function Ue(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.g)(_e.c.request,Me);case 2:return e.next=4,Object(S.g)(_e.b.request,He);case 4:return e.next=6,Object(S.g)(_e.e.request,Be);case 6:return e.next=8,Object(S.g)(b.f,Fe);case 8:return e.next=10,Object(S.g)(_e.f.request,ze);case 10:return e.next=12,Object(S.e)(Ae);case 12:case"end":return e.stop()}}),Ee)}var Ge=a(308),We=a(630),Ve=a(28),Ye=a(83),Xe=.2,Ke=a(12),Ze=a(609),qe=a(49),$e=a(228),Je=a(145),Qe=E.a.mark(mt),et=E.a.mark(vt),tt=E.a.mark(_t),at=E.a.mark(yt),nt=E.a.mark(Ot),rt=E.a.mark(xt),ot=E.a.mark(wt),it=1e3,st=1e4,lt=500,ct="Notification"in window,ut=window.netdataAlarmsNotifCallback,dt=0,ht=0,pt=function(e){"object"===typeof e&&(function(e){if("string"===typeof e){var t=document.querySelector("#chart_".concat(Object(Je.a)(e)));if(t){var a=t.offsetTop+-50;return document.querySelector("html").scrollTop=a,!0}}return!1}(e.chart)&&window.focus())},ft=function(){ct&&"default"===Notification.permission&&Notification.requestPermission()},gt=function(){return ct&&"granted"===Notification.permission};function mt(e,t){var a,n;return E.a.wrap((function(r){for(;;)switch(r.prev=r.next){case 0:return r.prev=0,r.next=3,Object(S.a)(T.get,"".concat(t,"/api/v1/alarm_log?after=").concat(e));case 3:return a=r.sent,n=a.data,r.abrupt("return",n);case 8:return r.prev=8,r.t0=r.catch(0),console.warn("Error fetching alarms log",r.t0),r.abrupt("return",null);case 12:case"end":return r.stop()}}),Qe,null,[[0,8]])}var bt=function(e,t,a){if(!e.updated){var n=e.value_string,r=t.alarms["".concat(e.chart,".").concat(e.name)];"undefined"!==typeof r&&e.status===r.status&&"undefined"!==typeof r.value_string&&(n=r.value_string);var o=e.name.replace(/_/g," "),i=e.status.toLowerCase(),s="".concat(o," = ").concat(n),l=e.alarm_id,c="images/banner-icon-144x144.png",u=!1,d=!0;switch(e.status){case"REMOVED":d=!1;break;case"UNDEFINED":case"UNINITIALIZED":return;case"CLEAR":if(e.unique_id<a)return;if("UNINITIALIZED"===e.old_status||"UNDEFINED"===e.old_status)return;if(e.no_clear_notification)return;s="".concat(o," back to normal (").concat(n,")"),c="images/check-mark-2-128-green.png",u=!1;break;case"WARNING":"CRITICAL"===e.old_status&&(i="demoted to ".concat(e.status.toLowerCase())),c="images/alert-128-orange.png",u=!1;break;case"CRITICAL":"WARNING"===e.old_status&&(i="escalated to ".concat(e.status.toLowerCase())),c="images/alert-128-red.png",u=!0;break;default:return void console.warn("invalid alarm status ".concat(e.status))}return d&&("function"===typeof ut&&(d=ut(e)),d)?{notificationTitle:s,notificationOptions:{body:"".concat(e.hostname," - ").concat(e.chart," (").concat(e.family,") - ").concat(i,": ").concat(e.info),tag:"".concat(l),requireInteraction:u,icon:D.c+c,data:e},notificationHandler:function(e){if(e.preventDefault(),e.target){var t=e.target.data;pt(t)}}}:void 0}};function vt(e,t){var a,n,r,o,i,s,l,c,u;return E.a.wrap((function(d){for(;;)switch(d.prev=d.next){case 0:return d.next=2,Object(S.a)(mt,ht,e);case 2:if(null!==(a=d.sent)&&"object"===typeof a){d.next=6;break}return console.warn("invalid alarms log response"),d.abrupt("return");case 6:if(0!==a.length){d.next=9;break}return console.log("received empty alarm log"),d.abrupt("return");case 9:n=Object(Ze.a)(Object(qe.a)("unique_id"),a),r=n.filter((function(e){return e.unique_id>ht})),o=r.map((function(e){return bt(e,t,dt)})).filter((function(e){return void 0!==e})),i=0;case 13:if(!(i<o.length)){d.next=22;break}return s=o[i],l=s.notificationTitle,c=s.notificationOptions,u=s.notificationHandler,new Notification(l,c).onclick=u,d.next=19,Object(S.b)(lt);case 19:i+=1,d.next=13;break;case 22:ht=Object($e.a)(n).unique_id,("undefined"===typeof window.netdataAlarmsRemember||window.netdataAlarmsRemember)&&localStorage.setItem("last_notification_id","".concat(ht));case 24:case"end":return d.stop()}}),et)}function _t(e,t){var a,n;return E.a.wrap((function(r){for(;;)switch(r.prev=r.next){case 0:return r.next=2,Object(S.a)(T.get,"".concat(t,"/api/v1/alarms?").concat(e));case 2:return a=r.sent,n=a.data,0===dt&&"number"===typeof n.latest_alarm_log_unique_id&&(dt=n.latest_alarm_log_unique_id),r.abrupt("return",n);case 6:case"end":return r.stop()}}),tt)}function yt(e){var t;return E.a.wrap((function(a){for(;;)switch(a.prev=a.next){case 0:return a.next=3,Object(S.a)(_t,"active",e);case 3:if(!(t=a.sent)){a.next=12;break}return a.next=7,Object(S.c)(Object(Ke.B)({activeAlarms:t}));case 7:if(!(gt()&&t.latest_alarm_log_unique_id>ht)){a.next=12;break}return a.next=10,Object(S.a)(vt,e,t);case 10:if(!1!==t.status){a.next=12;break}return a.abrupt("break",16);case 12:return a.next=14,Object(S.b)(st);case 14:a.next=0;break;case 16:case"end":return a.stop()}}),at)}function Ot(){var e,t,a;return E.a.wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return n.next=2,Object(S.f)(Ke.A);case 2:return e=n.sent,t=e.payload,a=t.serverDefault,n.next=7,Object(S.b)(it);case 7:return ht=+(localStorage.getItem("last_notification_id")||ht),ft(),n.next=11,Object(S.a)(yt,a);case 11:case"end":return n.stop()}}),nt)}function xt(e){var t,a,n,r;return E.a.wrap((function(o){for(;;)switch(o.prev=o.next){case 0:return t=e.payload,a=t.callback,n=t.serverDefault,o.next=4,Object(S.a)(_t,"all",n);case 4:r=o.sent,a(r);case 6:case"end":return o.stop()}}),rt)}function wt(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.e)(Ot);case 2:return e.next=4,Object(S.g)(Ke.e.request,xt);case 4:case"end":return e.stop()}}),ot)}var Et=["","https://www.google.com/","https://duckduckgo.com/","https://www.reddit.com/"],St=function(e){return Et.includes(e)||e.endsWith(".my-netdata.io/")||e.startsWith("https://github.com/")||e.endsWith("netdata.cloud/")||e.startsWith("https://app.netdata.cloud/")},Ct=E.a.mark(Lt),At=E.a.mark(It),kt=E.a.mark(Nt),jt=E.a.mark(Ft),Tt=E.a.mark(Ut),Dt=E.a.mark(Gt),Pt=Object(c.a)();function Mt(){window.addEventListener("focus",(function(){Pt.put(Object(Ke.D)({hasWindowFocus:!0}))})),window.addEventListener("blur",(function(){Pt.put(Object(Ke.D)({hasWindowFocus:!1}))}))}function Lt(){var e;return E.a.wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.next=3,Object(S.f)(Pt);case 3:return e=t.sent,t.next=6,Object(S.c)(e);case 6:t.next=0;break;case 8:case"end":return t.stop()}}),Ct)}function It(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.f)(_e.f.success);case 2:return e.abrupt("return",e.sent.payload.fullInfoPayload);case 3:case"end":return e.stop()}}),At)}function Nt(e,t){var a,n,r,o,i,s;return E.a.wrap((function(l){for(;;)switch(l.prev=l.next){case 0:if(!window.posthog){l.next=2;break}return l.abrupt("return");case 2:return l.next=4,Object(S.d)(ue.k);case 4:if(l.t1=l.sent,l.t1){l.next=9;break}return l.next=8,Object(S.a)(It);case 8:l.t1=l.sent;case 9:if(l.t0=l.t1,l.t0){l.next=12;break}l.t0={};case 12:i=l.t0,function(e,t){var a,n,r,o;t.__SV||(window.posthog=t,t._i=[],t.init=function(i,s,l){function c(e,t){var a=t.split(".");2==a.length&&(e=e[a[0]],t=a[1]),e[t]=function(){e.push([t].concat(Array.prototype.slice.call(arguments,0)))}}(r=e.createElement("script")).type="text/javascript",r.async=!0,r.src=s.api_host+"/static/array.js",(o=e.getElementsByTagName("script")[0]).parentNode.insertBefore(r,o);var u=t;for(void 0!==l?u=t[l]=[]:l="posthog",u.people=u.people||[],u.toString=function(e){var t="posthog";return"posthog"!==l&&(t+="."+l),e||(t+=" (stub)"),t},u.people.toString=function(){return u.toString(1)+".people (stub)"},a="capture identify alias people.set people.set_once set_config register register_once unregister opt_out_capturing has_opted_out_capturing opt_in_capturing reset isFeatureEnabled onFeatureFlags".split(" "),n=0;n<a.length;n++)c(u,a[n]);t._i.push([i,s,l])},t.__SV=1)}(document,window.posthog||[]),window.posthog.init("mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y",{api_host:"https://app.posthog.com",loaded:function(e){t&&e.identify(t)}}),s=!Ye.a&&!St(document.referrer),"masked",window.posthog.register(Object(Ge.a)((function(e){return void 0!==e&&null!==e}),{$ip:"127.0.0.1",$current_url:Ye.a?null:"agent dashboard",$pathname:Ye.a?null:"netdata-dashboard",$host:Ye.a?null:"dashboard.netdata.io",$initial_referring_domain:s?"masked":null,$initial_referrer:s?"masked":null,$referring_domain:s?"masked":null,$referrer:s?"masked":null,event_source:"agent dashboard",netdata_version:i.version,netdata_machine_guid:e,netdata_person_id:t||"Unavailable",netdata_buildinfo:i.buildinfo,netdata_release_channel:i["release-channel"],mirrored_host_count:null===(a=i.mirrored_hosts)||void 0===a?void 0:a.length,alarms_normal:null===(n=i.alarms)||void 0===n?void 0:n.normal,alarms_warning:null===(r=i.alarms)||void 0===r?void 0:r.warning,alarms_critical:i.alarms.critical,host_os_name:i.os_name,host_os_id:i.os_id,host_os_id_like:i.os_id_like,host_os_version:i.os_version,host_os_version_id:i.os_version_id,host_os_detection:i.os_detection,system_cores_total:i.cores_total,system_total_disk_space:i.total_disk_space,system_cpu_freq:i.cpu_freq,system_ram_total:i.ram_total,system_kernel_name:i.kernel_name,system_kernel_version:i.kernel_version,system_architecture:i.architecture,system_virtualization:i.virtualization,system_virt_detection:i.virt_detection,system_container:i.container,system_container_detection:i.container_detection,container_os_name:i.container_os_name,container_os_id:i.container_os_id,container_os_id_like:i.container_os_id_like,container_os_version:i.container_os_version,container_os_version_id:i.container_os_version_id,host_collectors_count:i.collectors.length,host_cloud_enabled:i["cloud-enabled"],host_cloud_available:i["cloud-available"],host_agent_claimed:i["agent-claimed"],host_aclk_available:i["aclk-available"],host_aclk_implementation:i["aclk-implementation"],host_allmetrics_json_used:i["allmetrics-json-used"],host_allmetrics_prometheus_used:i["allmetrics-prometheus-used"],host_allmetrics_shell_used:i["allmetrics-shell-used"],host_charts_count:i["charts-count"],host_dashboard_used:i["dashboard-used"],host_metrics_count:i["metrics-count"],host_notification_methods:i["notification-methods"],config_memory_mode:i["memory-mode"],config_exporting_enabled:i["exporting-enabled"],config_exporting_connectors:i["exporting-connectors"],config_hosts_available:i["hosts-available"],config_https_enabled:i["https-enabled"],config_multidb_disk_quota:i["multidb-disk-quota"],config_page_cache_size:i["page-cache-size"],config_stream_enabled:i["stream-enabled"],config_web_enabled:i["web-enabled"],host_is_parent:null===(o=i.host_labels)||void 0===o?void 0:o._is_parent,mirrored_hosts_reachable:i.mirrored_hosts_status.filter((function(e){return e.reachable})).length,mirrored_hosts_unreachable:i.mirrored_hosts_status.filter((function(e){return!e.reachable})).length,host_collectors:i.collectors,host_is_k8s_node:i.is_k8s_node}));case 18:case"end":return l.stop()}}),kt)}var Rt=function e(t){var a=t.machineGuid,n=t.maxRedirects,r=t.name,o=t.registryServer,i=t.url;return T.get("".concat(o,"/api/v1/registry"),{headers:{"Cache-Control":"no-cache, no-store",Pragma:"no-cache"},params:{action:"access",machine:a,name:r,url:i},withCredentials:!0}).then((function(t){var s=t.data,l="string"===typeof s.registry,c=s;if("string"===typeof s.status&&"ok"===s.status||(c=null),null===c)return l&&n>0?e({maxRedirects:n-1,machineGuid:a,name:r,registryServer:s.registry,url:i}):{registryServer:o};var u=s.urls.filter((function(e){return e[1]!==h.c}));return{personGuid:s.person_guid||null,registryServer:o,urls:u}})).catch((function(){return console.warn("error calling registry:",o),null}))},Bt=function(e){var t={};e.slice().reverse().forEach((function(e){var a=Object(A.a)(e,5),n=a[0],r=a[1],o=a[2],i=a[3],s=a[4],l=t[n]||{lastTimestamp:0,accesses:0,alternateUrls:[],guid:"",url:"",name:""},c=l.lastTimestamp<o,u={guid:l.guid||n,url:c?r:l.url,lastTimestamp:c?o:l.lastTimestamp,accesses:l.accesses+i,name:c?s:l.name,alternateUrls:l.alternateUrls.concat(r)};t[n]=u}));var a=Object(We.a)(e.slice().reverse().map((function(e){return Object(A.a)(e,1)[0]}))).map((function(e){return t[e]}));return{registryMachines:t,registryMachinesArray:a}};function Ft(e){var t,a,n,r,o,i,s,l,c,u,d,h,p,f,g;return E.a.wrap((function(m){for(;;)switch(m.prev=m.next){case 0:return t=e.payload,a=t.serverDefault,n="".concat(a,"api/v1/registry?action=hello"),m.prev=3,m.next=6,Object(S.a)(T.get,n,{headers:{"Cache-Control":"no-cache, no-store",Pragma:"no-cache"},withCredentials:!0});case 6:r=m.sent,m.next=15;break;case 9:return m.prev=9,m.t0=m.catch(3),console.warn("error accessing registry or Do-Not-Track is enabled"),m.next=14,Object(S.c)(Ke.f.failure());case 14:return m.abrupt("return");case 15:return o=r.data.cloud_base_url,i=r.data.hostname,s=r.data.machine_guid,l=r.data.registry,c=l===Ve.b,m.next=22,Object(S.c)(Ke.f.success({cloudBaseURL:o,hostname:i,isUsingGlobalRegistry:c,machineGuid:s}));case 22:return u=i,d=a,m.next=26,Object(S.a)(Rt,{machineGuid:s,maxRedirects:2,name:u,registryServer:l,url:d});case 26:if(h=m.sent,!r.data.anonymous_statistics){m.next=30;break}return m.next=30,Object(S.e)(Nt,r.data.machine_guid,null===h||void 0===h?void 0:h.personGuid);case 30:if(!((null===h||void 0===h?void 0:h.urls)&&(null===h||void 0===h?void 0:h.personGuid))){m.next=35;break}return p=Bt(h.urls),f=p.registryMachines,g=p.registryMachinesArray,m.next=35,Object(S.c)(Object(Ke.C)({personGuid:h.personGuid,registryMachines:f,registryMachinesArray:g}));case 35:return m.next=37,Object(S.c)(Object(Ke.a)({registryServer:(null===h||void 0===h?void 0:h.registryServer)||l}));case 37:case"end":return m.stop()}}),jt,null,[[3,9]])}var Ht=function(e){return"options.".concat(e)};function zt(e){var t=e.payload,a=t.key,n=t.value;"stop_updates_when_focus_is_lost"!==a&&localStorage.setItem(Ht(a),JSON.stringify(n))}function Ut(e){var t;return E.a.wrap((function(a){for(;;)switch(a.prev=a.next){case 0:return(t=e.payload).isActive?document.body.className="with-panel":document.body.className="",a.next=4,Object(S.b)(1e3*Xe);case 4:return a.next=6,Object(S.c)(Object(Ke.x)({isActive:t.isActive}));case 6:case"end":return a.stop()}}),Tt)}function Gt(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.e)(Mt);case 2:return e.next=4,Object(S.e)(Lt);case 4:return e.next=6,Object(S.g)(Ke.f.request,Ft);case 6:return e.next=8,Object(S.e)(wt);case 8:return e.next=10,Object(S.g)(Ke.v,zt);case 10:return e.next=12,Object(S.g)(Ke.w,Ut);case 12:case"end":return e.stop()}}),Dt)}var Wt=a(64),Vt=E.a.mark(qt),Yt=E.a.mark($t),Xt="LOCAL-STORAGE-NEEDS-SYNC";function Kt(e){var t=e.payload,a=t.after,n=t.before;if(window.urlOptions)window.urlOptions.after===a&&window.urlOptions.before===n||window.urlOptions.netdataHighlightCallback(!0,a,n);else{var r=Object(Wt.b)(),o=Math.round(a).toString(),i=Math.round(n).toString();r.highlight_after===o&&r.highlight_before===i||Object(Wt.d)({highlight_after:o,highlight_before:i})}}function Zt(){window.urlOptions?window.urlOptions.netdataHighlightCallback(!1,0,0):Object(Wt.c)(["highlight_after","highlight_before"])}function qt(e){var t,a;return E.a.wrap((function(n){for(;;)switch(n.prev=n.next){case 0:if(t=e.payload,!window.showSignInModal){n.next=8;break}return window.showSignInModal(),n.next=5,Object(S.f)(b.a);case 5:a=t.signInLinkHref,window.localStorage.setItem(Xt,"true"),window.location.href=a;case 8:case"end":return n.stop()}}),Vt)}function $t(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.g)(Ke.r,Kt);case 2:return e.next=4,Object(S.g)(Ke.d,Zt);case 4:return e.next=6,Object(S.g)(b.d,qt);case 6:case"end":return e.stop()}}),Yt)}var Jt=E.a.mark(Qt);function Qt(){return E.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,Object(S.e)(Gt);case 2:return e.next=4,Object(S.e)(Ue);case 4:return e.next=6,Object(S.e)($t);case 6:case"end":return e.stop()}}),Jt)}var ea=Object(c.b)(),ta=function(){var e=Object(l.e)(x,Object(l.d)(Object(l.a)(ea)));return ea.run(Qt),e}(),aa=a(294),na=a.n(aa),ra=(a(448),a(467),a(468),a(469),a(470),a(156)),oa=a.n(ra);window.$=oa.a,window.jQuery=oa.a;a(471),a(484),a(485),a(486),a(487),a(490);var ia=function(e){return new Promise((function(t,a){var n=document.createElement("link");n.setAttribute("rel","stylesheet"),n.setAttribute("type","text/css"),n.setAttribute("href",e),n.onload=function(){t()},n.onerror=function(){a(Error("Error loading css: ".concat(e)))},document.getElementsByTagName("head")[0].appendChild(n)}))},sa=a(42),la=a(8),ca=a(177),ua=a(311),da=a(612),ha=a(309),pa=a(620),fa=a(637),ga=a(297),ma=a(638),ba=a(34),va=a.n(ba),_a=function(e){return"sparkline"===e.dygraphTheme},ya={dygraph:{hasToolboxPanAndZoom:!0,xssRegexIgnore:new RegExp("^/api/v1/data.result.data$"),format:"json",options:function(e){return"function"===typeof this.isLogScale?"ms|flip".concat(this.isLogScale(e)?"|abs":""):""},hasLegend:function(e){var t=e.legend,a=void 0===t||t;return!_a(e)&&Boolean(a)},trackColors:!0,pixelsPerPoint:function(e){return _a(e)?2:3},isLogScale:function(e){return"logscale"===e.dygraphTheme},containerClass:function(e){return this.hasLegend(e)?va()("netdata-container-with-legend","bottom"===e.legendPosition&&"netdata-container-with-legend--bottom"):"netdata-container"}},sparkline:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result$"),format:"array",options:function(){return"flip|abs"},hasLegend:function(){return!1},trackColors:!1,pixelsPerPoint:function(){return 3},containerClass:function(){return"netdata-container"}},peity:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result$"),format:"ssvcomma",options:function(){return"null2zero|flip|abs"},hasLegend:function(){return!1},trackColors:!1,pixelsPerPoint:function(){return 3},containerClass:function(){return"netdata-container"}},google:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result.rows$"),format:"datatable",options:function(){return""},hasLegend:function(){return!1},trackColors:!1,pixelsPerPoint:function(){return 4},containerClass:function(){return"netdata-container"}},d3pie:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result.data$"),format:"json",hasLegend:function(){return!1},options:function(){return"objectrows|ms"},trackColors:!1,pixelsPerPoint:function(){return 15},containerClass:function(){return"netdata-container"}},easypiechart:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result$"),format:"array",options:function(){return"absolute"},hasLegend:function(){return!1},trackColors:!0,pixelsPerPoint:function(){return 3},aspectRatio:100,containerClass:function(){return"netdata-container-easypiechart"}},gauge:{hasToolboxPanAndZoom:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result$"),format:"array",options:function(){return"absolute"},hasLegend:function(){return!1},trackColors:!0,pixelsPerPoint:function(){return 3},aspectRatio:60,containerClass:function(){return"netdata-container-gauge"}},textonly:{containerClass:function(){return"netdata-container"},format:"array",hasLegend:function(){return!1},options:function(){return"absolute"},pixelsPerPoint:function(){return 3},trackColors:!1,xssRegexIgnore:new RegExp("^/api/v1/data.result$")},groupbox:{containerClass:function(){return"netdata-container"},hasLegend:function(){return!1},options:function(){return"absolute"},format:"json",trackColors:!1,pixelsPerPoint:function(){return 3},xssRegexIgnore:new RegExp("^/api/v1/data.result$")}},Oa=function(e,t){var a=e.appendOptions,n=e.overrideOptions,r="";return r+=n?n.toString():ya[e.chartLibrary].options(e),"string"===typeof a&&(r+="|".concat(encodeURIComponent(a))),r+="|jsonwrap",t&&(r+="|nonzero"),("sum-of-abs"===e.dimensionsAggrMethod||!e.dimensionsAggrMethod&&e.groupBy&&"dimension"!==e.groupBy)&&(r+="|absolute"),r},xa=a(610),wa=Math.pow(2,31)-1,Ea=a(39),Sa=a(165),Ca=function(e){var t=e.containerNode,a=e.hasEmptyData,n=window.screen.height,o=t.clientHeight,i=Math.max(.2*o,5);o-=i;var s=(i-5)/2,l=t.clientWidth/10;o>l&&(s+=(o-l)/2,o=l),o>n/20&&(s+=(o-n/20)/2,o=n/20);var c=a?" empty":" netdata",u=a?"noData":"loading";return r.a.createElement("div",{className:"netdata-message icon",style:{fontSize:o,paddingTop:s}},r.a.createElement(Sa.a,{iconType:u}),c)},Aa=a(618),ka=a(617),ja=function(e){return e.ctrlKey?window.NETDATA.options.current.pan_and_zoom_factor*window.NETDATA.options.current.pan_and_zoom_factor_multiplier_control:e.shiftKey?window.NETDATA.options.current.pan_and_zoom_factor*window.NETDATA.options.current.pan_and_zoom_factor_multiplier_shift:e.altKey?window.NETDATA.options.current.pan_and_zoom_factor*window.NETDATA.options.current.pan_and_zoom_factor_multiplier_alt:window.NETDATA.options.current.pan_and_zoom_factor},Ta=a(149),Da=a(102),Pa=function(e,t){return e===t||Number.isNaN(e)&&Number.isNaN(t)},Ma=[],La=[],Ia=function(e,t){var a=t;return e===t?("undefined"===typeof Ma[a]&&(Ma[a]=new Intl.NumberFormat(void 0,{useGrouping:!0,minimumFractionDigits:e,maximumFractionDigits:t})),Ma[a]):0===e?("undefined"===typeof La[a]&&(La[a]=new Intl.NumberFormat(void 0,{useGrouping:!0,minimumFractionDigits:e,maximumFractionDigits:t})),La[a]):new Intl.NumberFormat(void 0,{useGrouping:!0,minimumFractionDigits:e,maximumFractionDigits:t})},Na=function(e,t,a){return function(n){if("number"!==typeof n)return"-";var r,o,i=e(n);if("number"!==typeof i)return i;if(null!==t)return t.format(i);if(-1!==a)r=a,o=a;else{r=0;var s=i<0?-i:i;o=s>1e3?0:s>10?1:s>1?2:s>.1?2:s>.01?4:s>.001?5:s>1e-4?6:7}return Ia(r,o).format(i)}},Ra=function(e){var t=e.attributes,a=e.data,r=e.units,o=e.unitsCommon,i=e.unitsDesired,s=e.uuid,l=Object(la.b)(ue.J),c=Object(la.b)(ue.y),u=Object(n.useState)((function(){return Ta.a})),d=Object(A.a)(u,2),h=d[0],p=d[1],f=Object(n.useState)(),g=Object(A.a)(f,2),m=g[0],b=g[1],v=Object(n.useState)(),_=Object(A.a)(v,2),y=_[0],O=_[1],x=Object(n.useState)(r),w=Object(A.a)(x,2),E=w[0],S=w[1],C=Object(n.useState)(-1),k=Object(A.a)(C,2),j=k[0],T=k[1],D=Object(n.useState)(null),P=Object(A.a)(D,2),M=P[0],L=P[1],I=t.decimalDigits,N=void 0===I?-1:I,R=Object(n.useMemo)((function(){return Na(h,M,N)}),[h,N,M]),B=Object(n.useRef)(R),F=function(e,t,a){B.current=Na(e,t,a)};return{legendFormatValue:R,legendFormatValueDecimalsFromMinMax:Object(n.useCallback)((function(e,t){if(Pa(m,e)&&Pa(y,t))return B.current;b(e),O(t);var n=Da.a.get(s,e,t,r,i,o,(function(e){S(e)}),l,c);p((function(){return n}));var u,d=n(e),h=n(t);if("number"!==typeof d||"number"!==typeof h)return F(n,M,N),B.current;if(a.min===a.max)u=-1;else if(-1!==N)u=N;else{var f;u=(f=d===h?Math.abs(d):Math.abs(h-d))>1e3?0:f>10?1:f>1?2:f>.1?2:f>.01?4:f>.001?5:f>1e-4?6:7}var g=M;return u!==j&&(g=u<0?null:Ia(u,u),L((function(){return g})),T(u)),F(n,g,u),B.current}),[j,N,m,y,s,l,r,i,o,c,a.min,a.max,M]),unitsCurrent:E}},Ba=a(56),Fa={r:255,g:0,b:0},Ha=a(17),za=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__LegendContainer",componentId:"ltgk2z-0"})(["margin-bottom:",";padding-left:35px;"],Object(Z.J)(3)),Ua=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__LegendFirstRow",componentId:"ltgk2z-1"})(["margin-top:4px;display:flex;justify-content:space-between;"]),Ga=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__LegendSecondRow",componentId:"ltgk2z-2"})(["margin-top:4px;display:flex;justify-content:space-between;"]),Wa=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__LegendUnit",componentId:"ltgk2z-3"})([""]),Va=$.d.span.withConfig({displayName:"chart-legend-bottomstyled__DateTimeSeparator",componentId:"ltgk2z-4"})(["margin:0 3px;"]),Ya=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__LegendItems",componentId:"ltgk2z-5"})(["display:flex;flex-wrap:wrap;overflow:auto;max-height:80px;"]),Xa=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__DimensionItem",componentId:"ltgk2z-6"})(["display:flex;align-items:center;color:",";margin-right:12px;cursor:pointer;opacity:",";user-select:none;font-size:11px;&:focus{outline:none;}"],(function(e){return e.color}),(function(e){return e.isDisabled?.3:null})),Ka=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__DimensionItemToolboxPlaceholder",componentId:"ltgk2z-7"})(["width:140px;height:20px;"]),Za=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__DimensionIcon",componentId:"ltgk2z-8"})(["width:14px;height:7px;border-radius:4px;overflow:hidden;background-color:",";"],(function(e){return e.color})),qa=$.d.span.withConfig({displayName:"chart-legend-bottomstyled__DimensionLabel",componentId:"ltgk2z-9"})(["margin-left:3px;"]),$a=$.d.span.withConfig({displayName:"chart-legend-bottomstyled__DimensionValue",componentId:"ltgk2z-10"})(["margin-left:5px;min-width:30px;"]),Ja=$.d.div.withConfig({displayName:"chart-legend-bottomstyled__ToolboxContainer",componentId:"ltgk2z-11"})(["position:relative;touch-action:none;"]),Qa={},en=Object(Ha.a)(Ea.f,(function(e){var t=e.dimension_names,a=e.keys;return{dimensionNames:t,keys:void 0===a?Qa:a}})),tn=function(e){var t=e.id,a=e.index,o=Object(la.b)(Object(n.useCallback)((function(e){return en(e,{id:t})}),[t])),i=o.dimensionNames,s=o.keys,l=s.chart,c=s.node;if(l&&c&&2===Object.keys(s).length)return r.a.createElement(qa,null,l[a],"@",c[a]);var u=i[a];return r.a.createElement(qa,null,u)},an=function(e){var t=e.chartUuid,a=e.chartMetadata,o=e.chartLibrary,i=e.colors,s=e.hoveredRow,l=e.hoveredX,c=e.legendFormatValue,u=e.onDimensionClick,d=e.selectedDimensions,h=e.showLatestOnBlur,p=e.unitsCurrent,f=e.viewBefore,g=Object(la.b)(Object(n.useCallback)((function(e){return Object(Ea.f)(e,{id:t})}),[t])),m=g.dimension_names,b=g.dimension_ids,v=-1===s&&!h,_=new Date(l||f),y=window.NETDATA.options.current["color_fill_opacity_".concat(a.chart_type)],O=Object(sa.b)(),x=O.localeDateString,w=O.localeTimeString,E=Object(n.useRef)(null);return Object(n.useEffect)((function(){E.current&&window.Ps.initialize(E.current,{wheelSpeed:.2,wheelPropagation:!0,swipePropagation:!0,minScrollbarLength:null,maxScrollbarLength:null,useBothWheelAxes:!1,suppressScrollX:!0,suppressScrollY:!1,scrollXMarginOffset:0,scrollYMarginOffset:0,theme:"default"})}),[E]),r.a.createElement("div",{className:va()("netdata-chart-legend","netdata-".concat(o,"-legend"))},r.a.createElement("span",{className:"netdata-legend-title-date",title:Object(Ba.c)(!0,a)},v?Object(Ba.c)(!1,a):x(_)),r.a.createElement("br",null),r.a.createElement("span",{className:"netdata-legend-title-time",title:Object(Ba.d)(g,a)},v?a.context.toString():w(_)),r.a.createElement("br",null),r.a.createElement("span",{className:"netdata-legend-title-units"},p),r.a.createElement("br",null),r.a.createElement("div",{className:"netdata-legend-series",ref:E},r.a.createElement("div",{className:"netdata-legend-series-content"},b.map((function(e,o){var l,h=m[o],p=i[h],f=function(e){if(!e)return Fa;var t=e.replace(/^#?([a-f\d])([a-f\d])([a-f\d])$/i,(function(e,t,a,n){return t+t+a+a+n+n})),a=/^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(t);return a||console.warn("wrong color format:",e),a?{r:parseInt(a[1],16),g:parseInt(a[2],16),b:parseInt(a[3],16)}:Fa}(p),b=0===d.length||d.includes(h);if(v)l=null;else if(-1!==s){var _=g.result.data[s];l=_?_[o+1]:null}else l=g.view_latest_values[o];return r.a.createElement(n.Fragment,{key:e},0!==o&&r.a.createElement("br",null),r.a.createElement("span",{title:h,className:va()("netdata-legend-name",b?"selected":"not-selected"),onClick:function(e){u(h,e)},role:"button",style:{color:p},tabIndex:0},r.a.createElement("table",{className:"netdata-legend-name-table-".concat(a.chart_type),style:{backgroundColor:"rgba(".concat(f.r,",").concat(f.g,",").concat(f.b,",").concat(y,")")}},r.a.createElement("tbody",null,r.a.createElement("tr",{className:"netdata-legend-name-tr"},r.a.createElement("td",{className:"netdata-legend-name-td"}))))," ",r.a.createElement(tn,{id:t,index:o})),r.a.createElement("span",{title:h,className:va()("netdata-legend-value",!b&&"hidden"),onClick:function(e){u(h,e)},role:"button",style:{color:p},tabIndex:0},c(l)))})))))},nn=function(e){var t=e.chartMetadata,a=e.showUndefined,n=e.hoveredX,o=e.viewBefore,i=e.chartData,s=Object(sa.b)(),l=s.localeDateString,c=s.localeTimeString,u=new Date(n||o);return r.a.createElement("div",null,r.a.createElement("span",{title:Object(Ba.c)(!0,t)},a?Object(Ba.c)(!1,t):l(u)),r.a.createElement(Va,null,"|"),r.a.createElement("span",{title:Object(Ba.d)(i,t)},a?t.context.toString():c(u)))},rn=function(e){var t=e.chartUuid,a=e.chartMetadata,o=e.colors,i=e.hoveredRow,s=e.hoveredX,l=e.legendFormatValue,c=e.onDimensionClick,u=e.selectedDimensions,d=e.showLatestOnBlur,h=e.unitsCurrent,p=e.viewBefore,f=e.legendToolbox,g=e.resizeHandler,m=-1===i&&!d,b=Object(la.b)(Object(n.useCallback)((function(e){return Object(Ea.f)(e,{id:t})}),[t])),v=b.dimension_names,_=b.dimension_ids;return r.a.createElement(za,null,r.a.createElement(Ua,null,r.a.createElement(Wa,null,h),r.a.createElement(nn,{chartMetadata:a,showUndefined:m,hoveredX:s,viewBefore:p,chartData:b})),r.a.createElement(Ga,null,r.a.createElement(Ya,null,_.map((function(e,a){var n,s=v[a],d=o[s],h=0===u.length||u.includes(s);if(m)n=null;else if(-1!==i){var p=b.result.data[i];n=p?p[a+1]:null}else n=b.view_latest_values[a];return r.a.createElement(Xa,{color:d,onClick:function(e){c(s,e)},role:"button",tabIndex:0,isDisabled:!h,key:e},r.a.createElement(Za,{title:s,color:d}),r.a.createElement(tn,{id:t,index:a}),r.a.createElement($a,null,h&&l(n)))})),r.a.createElement(Ka,null)),r.a.createElement(Ja,null,f,g)))},on=function(e){var t=e.attributes,a=e.chartUuid,o=e.chartMetadata,i=e.chartLibrary,s=e.colors,l=e.hoveredRow,c=e.hoveredX,u=e.legendFormatValue,d=e.selectedDimensions,h=e.setSelectedDimensions,p=e.showLatestOnBlur,f=e.unitsCurrent,g=e.viewBefore,m=e.legendToolbox,b=e.resizeHandler,v=Object(la.b)(Object(n.useCallback)((function(e){return Object(Ea.f)(e,{id:a}).dimension_names}),[a])),_=function(e,t){t.preventDefault();var a=t.shiftKey||t.ctrlKey,n=Object(Ba.b)({allDimensions:v,selectedDimensions:d,clickedDimensionName:e,isModifierKeyPressed:a});h(n)};return"bottom"===t.legendPosition?r.a.createElement(rn,{chartUuid:a,chartLibrary:i,chartMetadata:o,colors:s,hoveredRow:l,hoveredX:c,legendFormatValue:u,onDimensionClick:_,selectedDimensions:d,showLatestOnBlur:p,unitsCurrent:f,viewBefore:g,legendToolbox:m,resizeHandler:b}):r.a.createElement(an,{chartUuid:a,chartLibrary:i,chartMetadata:o,colors:s,hoveredRow:l,hoveredX:c,legendFormatValue:u,onDimensionClick:_,selectedDimensions:d,showLatestOnBlur:p,unitsCurrent:f,viewBefore:g})},sn=a(110),ln=function(e){var t=e.onToolboxLeftClick,a=e.onToolboxRightClick,n=e.onToolboxZoomInClick,o=e.onToolboxZoomOutClick;return r.a.createElement("div",{className:"netdata-legend-toolbox"},r.a.createElement(sn.a,{className:"netdata-legend-toolbox-button",onClick:t,iconType:"left",popoverTitle:"Pan Left",popoverContent:"Pan the chart to the left. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>"}),r.a.createElement(sn.a,{className:"netdata-legend-toolbox-button",onClick:a,iconType:"right",popoverTitle:"Pan Right",popoverContent:"Pan the chart to the right. You can also <b>drag it</b> with your mouse or your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>"}),r.a.createElement(sn.a,{className:"netdata-legend-toolbox-button",onClick:n,iconType:"zoomIn",popoverTitle:"Chart Zoom In",popoverContent:"Zoom in the chart. You can also press SHIFT and select an area of the chart, or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out. <br/><small>Help can be disabled from the settings.</small>"}),r.a.createElement(sn.a,{className:"netdata-legend-toolbox-button",onClick:o,iconType:"zoomOut",popoverTitle:"Chart Zoom Out",popoverContent:"Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel, or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the settings.</small>"}))},cn=a(114),un=a(579),dn=a(611),hn=a(51),pn=a.n(hn),fn=(a(492),a(295)),gn=a(296),mn=a(577),bn=a(224),vn=a.n(bn),_n=function(e,t){return e.map((function(e){var a=Object(gn.a)(e),n=a[0],r=a.slice(1),o=[],i=0,s=0;return r.map((function(e,a){return{isVisible:t[a],value:e}})).slice().reverse().forEach((function(e){var t=e.isVisible,a=e.value;t?a>=0?(s+=a,o.push(s)):(i+=a,o.push(i)):o.push(0)})),[n].concat(o)}))},yn=function(e,t,a,n){var r,o=n.isLogScale(e),i=e.dygraphType,s=void 0===i?a.chart_type:i,l=e.groupBy;if(l&&"dimension"!==l&&("percentage"===(r=a.units)||"percent"===r||-1!==r.indexOf("%")))return"line";var c=s;return"stacked"===c&&1===t.dimensions&&(c="area"),"stacked"===c&&o&&(c="area"),c},On=Object(mn.a)(Object(da.a)("true"),(function(){return vn()(window.NETDATA.themes.current.background)})),xn=function(e){return e.map((function(e){return vn()(e).mix(On(),.2).hex()}))},wn=function(e,t){return e?window.NETDATA.options.current.color_fill_opacity_fake_stacked:"stacked"===t?window.NETDATA.options.current.color_fill_opacity_stacked:window.NETDATA.options.current.color_fill_opacity_area},En=(a(510),a(613)),Sn=function(e,t){var a=Object(En.a)(!1),r=Object(A.a)(a,2),o=r[0],i=r[1],s=Object(n.useRef)(null),l=Object(n.useCallback)((function(a){var n=a.getArea().x,r=a.toDomXCoord(1e3*t.current.chartData.first_entry),o=r>n;if(i(o),o&&s.current){var l=e.current.getBoundingClientRect().height;s.current.style.left="".concat(n,"px"),s.current.style.right="calc(100% - ".concat(r,"px)"),s.current.style.top="".concat(l/2,"px")}}),[]);return[o,s,l]},Cn=function(e,t,a,n){e.current.style.left="".concat(t,"px"),e.current.style.right="calc(100% - ".concat(a,"px)"),e.current.style.top=n},An=$.d.div.withConfig({displayName:"proceeded-chart-disclaimer__Container",componentId:"sc-5o8wdv-0"})(["display:block;"]),kn=Object(n.forwardRef)((function(e,t){return r.a.createElement(An,{ref:t,className:"dygraph__history-tip","data-testid":"proceededChartDisclaimer"},r.a.createElement("span",{className:"dygraph__history-tip-content"},"Want to extend your history of real-time metrics?",r.a.createElement("br",null),r.a.createElement("a",{href:"https://learn.netdata.cloud/guides/longer-metrics-storage/",target:"_blank",rel:"noopener noreferrer","data-testid":"proceededChartDisclaimer-configure"},"Configure Netdata's\xa0",r.a.createElement("b",null,"history")),"\xa0or use the\xa0",r.a.createElement("a",{href:"https://learn.netdata.cloud/docs/agent/database/engine/",target:"_blank",rel:"noopener noreferrer","data-testid":"proceededChartDisclaimer-engine"},"DB engine"),"."))})),jn={WARNING:"#FFF8E1",CRITICAL:"#FFEBEF",CLEAR:"#E5F5E8"},Tn=function(e){return jn[e]||null},Dn={WARNING:"#FFC300",CRITICAL:"#F59B9B",CLEAR:"#68C47D"},Pn=function(e){return Dn[e]||null},Mn={WARNING:"#536775",CRITICAL:"#FF4136",CLEAR:"#00AB44"},Ln=function(e){return Mn[e]||null},In=$.d.div.withConfig({displayName:"alarmBadge__Container",componentId:"tz967u-0"})(["position:absolute;margin-right:10px;overflow:hidden;pointer-events:none;direction:rtl;z-index:10;"]),Nn=$.d.div.withConfig({displayName:"alarmBadge__Badge",componentId:"tz967u-1"})(["display:inline-block;border-radius:36px;padding:2px 12px;background:",";border:1px solid ",";color:",";font-size:12px;font-weight:700;direction:ltr;white-space:nowrap;"],(function(e){return e.background}),(function(e){return e.border}),(function(e){return e.color})),Rn=Object(n.forwardRef)((function(e,t){var a=e.isVisible,n=e.status,o=e.label;return r.a.createElement(In,{ref:t},a&&r.a.createElement(Nn,{background:Tn(n),border:Pn(n),color:Ln(n)},o))})),Bn=function(e){var t=e.attributes,a=e.chartData,n=e.chartMetadata,r=e.chartSettings,o=e.dimensionsVisibility,i=e.hiddenLabelsElementId,s=e.isFakeStacked,l=e.orderedColors,c=e.setMinMax,u=e.shouldSmoothPlot,d=e.unitsCurrent,h=e.xAxisDateString,p=e.xAxisTimeString,f="sparkline"===t.dygraphTheme,g=f?3:4,m=r.isLogScale(t),b=yn(t,a,n,r),v=t.dygraphSmooth,_=void 0===v?"line"===b&&!f:v,y=t.dygraphDrawAxis,O=void 0===y||y,x="bottom"===t.legendPosition,w=t.dygraphColors,E=void 0===w?l:w,S=t.dygraphRightGap,C=void 0===S?5:S,A=t.dygraphShowRangeSelector,k=void 0!==A&&A,j=t.dygraphShowRoller,T=void 0!==j&&j,D=t.dygraphTitle,P=void 0===D?t.title||n.title:D,M=t.dygraphTitleHeight,L=void 0===M?19:M,I=t.dygraphLegend,N=void 0===I?"always":I,R=t.dygraphLabelsDiv,B=void 0===R?i:R,F=t.dygraphLabelsSeparateLine,H=void 0===F||F,z=t.dygraphIncludeZero,U=void 0===z?"stacked"===b:z,G=t.dygraphShowZeroValues,V=void 0===G||G,Y=t.dygraphShowLabelsOnHighLight,X=void 0===Y||Y,K=t.dygraphHideOverlayOnMouseOut,Z=void 0===K||K,q=t.dygraphXRangePad,$=void 0===q?0:q,J=t.dygraphYRangePad,Q=void 0===J?1:J,ee=t.dygraphValueRange,te=void 0===ee?[null,null]:ee,ae=t.dygraphYLabelWidth,ne=void 0===ae?12:ae,re=t.dygraphStrokeWidth,oe=void 0===re?"stacked"===b?.1:!0===_?1.5:.7:re,ie=t.dygraphStrokePattern,se=t.dygraphDrawPoints,le=void 0!==se&&se,ce=t.dygraphDrawGapEdgePoints,ue=void 0===ce||ce,de=t.dygraphConnectSeparatedPoints,he=void 0!==de&&de,pe=t.dygraphPointSize,fe=void 0===pe?1:pe,ge=t.dygraphStepPlot,me=void 0!==ge&&ge,be=t.dygraphStrokeBorderColor,ve=void 0===be?window.NETDATA.themes.current.background:be,_e=t.dygraphStrokeBorderWidth,ye=void 0===_e?0:_e,Oe=t.dygraphFillGraph,xe=void 0===Oe?"area"===b||"stacked"===b:Oe,we=t.dygraphFillAlpha,Ee=void 0===we?wn(s,b):we,Se=t.dygraphStackedGraph,Ce=void 0===Se?"stacked"===b&&!s:Se,Ae=t.dygraphStackedGraphNanFill,ke=void 0===Ae?"none":Ae,je=t.dygraphAxisLabelFontSize,Te=void 0===je?10:je,De=t.dygraphAxisLineColor,Pe=void 0===De?window.NETDATA.themes.current.axis:De,Me=t.dygraphAxisLineWidth,Le=void 0===Me?1:Me,Ie=t.dygraphDrawGrid,Ne=void 0===Ie||Ie,Re=t.dygraphGridLinePattern,Be=t.dygraphGridLineWidth,Fe=void 0===Be?1:Be,He=t.dygraphGridLineColor,ze=void 0===He?window.NETDATA.themes.current.grid:He,Ue=t.dygraphMaxNumberWidth,Ge=void 0===Ue?8:Ue,We=t.dygraphSigFigs,Ve=t.dygraphDigitsAfterDecimal,Ye=void 0===Ve?2:Ve,Xe=t.dygraphHighlighCircleSize,Ke=void 0===Xe?g:Xe,Ze=t.dygraphHighlightSeriesOpts,qe=t.dygraphHighlightSeriesBackgroundAlpha,$e=t.dygraphXPixelsPerLabel,Je=void 0===$e?50:$e,Qe=t.dygraphXAxisLabelWidth,et=void 0===Qe?60:Qe,tt=t.dygraphDrawXAxis,at=void 0===tt?O:tt,nt=t.dygraphYPixelsPerLabel,rt=void 0===nt?15:nt,ot=t.dygraphYAxisLabelWidth,it=void 0===ot?x?30:50:ot,st=t.dygraphDrawYAxis,lt=void 0===st?O:st;return{colors:s?xn(Object(W.a)(E)):E,rightGap:f?0:C,showRangeSelector:k,showRoller:T,title:f?void 0:P,titleHeight:L,legend:N,labels:a.result.labels,labelsDiv:B,labelsSeparateLines:!!f||H,labelsShowZeroValues:!m&&V,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:X,hideOverlayOnMouseOut:Z,includeZero:U,xRangePad:$,yRangePad:f?1:Q,valueRange:te,ylabel:f||x?void 0:d,yLabelWidth:f||x?0:ne,plotter:_&&u?window.smoothPlotter:null,strokeWidth:oe,strokePattern:ie,drawPoints:le,drawGapEdgePoints:ue,connectSeparatedPoints:!m&&he,pointSize:fe,stepPlot:me,strokeBorderColor:ve,strokeBorderWidth:ye,fillGraph:xe,fillAlpha:Ee,stackedGraph:Ce,stackedGraphNaNFill:ke,drawAxis:!f&&O,axisLabelFontSize:Te,axisLineColor:Pe,axisLineWidth:Le,drawGrid:!f&&Ne,gridLinePattern:Re,gridLineWidth:Fe,gridLineColor:ze,maxNumberWidth:Ge,sigFigs:We,digitsAfterDecimal:Ye,highlightCircleSize:Ke,highlightSeriesOpts:Ze,highlightSeriesBackgroundAlpha:qe,visibility:o,logscale:m,axes:{x:{pixelsPerLabel:Je,ticker:pn.a.dateTicker,axisLabelWidth:et,drawAxis:!f&&at,axisLabelFormatter:function(e){return e.toTimeString().startsWith("00:00:00")?h(e):p(e)}},y:{logscale:m,pixelsPerLabel:rt,axisLabelWidth:it,drawAxis:!f&<,axisLabelFormatter:function(e){return c([this.axes_[0].extremeRange[0],this.axes_[0].extremeRange[1]])(e)}}}}},Fn=function(e){var t=e.attributes,a=e.chartData,o=e.chartMetadata,i=e.chartElementClassName,s=e.chartElementId,l=e.chartLibrary,c=e.chartUuid,u=e.dimensionsVisibility,d=e.hasEmptyData,h=e.hasLegend,p=e.isRemotelyControlled,f=e.onUpdateChartPanAndZoom,m=e.orderedColors,b=e.immediatelyDispatchPanAndZoom,v=e.hoveredRow,_=e.hoveredX,y=e.setGlobalChartUnderlay,O=e.setHoveredX,x=e.setMinMax,w=e.unitsCurrent,E=e.viewAfter,S=e.viewBefore,k=Object(la.b)(ue.l),j=Object(la.b)(ue.d),T=(null===j||void 0===j?void 0:j.chartId)===a.id?j:null,D=Object(la.b)(ue.L),P=Object(sa.b)(),M=P.xAxisDateString,L=P.xAxisTimeString,I=ya[l],N="".concat(c,"-hidden-labels-id"),R=yn(t,a,o,I),B=a.min<0&&"stacked"===R,F=wn(B,R),H=Object(n.useRef)(null),z=Object(n.useCallback)((function(e){var t=e.after,a=e.before,n=e.callback,r=e.shouldNotExceedAvailableRange;f({after:t,before:a,callback:n,masterID:c,shouldNotExceedAvailableRange:r})}),[c,f]),U=Object(n.useRef)(),G=Object(n.useRef)(!1),V=Object(n.useRef)(!1),Y=Object(n.useRef)(null),X=Object(n.useRef)(0),K=Object(n.useRef)(0),Z=Object(n.useRef)(),q=Object(la.a)(),$=Object(la.b)(ue.H),J=Object(n.useCallback)((function(){G.current=!1,U.current&&U.current.updateOptions({dateWindow:null}),q($?Object(Ke.j)():Object(_e.g)({id:c}))}),[c,q,$]),Q=function(){var e=Object(En.a)(!1),t=Object(A.a)(e,2),a=t[0],r=t[1],o=Object(n.useRef)(null);return[a,o,function(e,t,a){var n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:Cn;if(e){if(o.current){r(!0);var i=t.getArea(),s=i.x;n(o,s,a,"40px")}}else r(!1)}]}(),ee=Object(A.a)(Q,3),te=ee[0],ae=ee[1],ne=ee[2],re=Object(n.useRef)({alarm:T,chartData:a,globalChartUnderlay:k,hoveredX:_,immediatelyDispatchPanAndZoom:b,resetGlobalPanAndZoom:J,setGlobalChartUnderlay:y,updateAlarmBadge:ne,updateChartPanOrZoom:z,viewAfter:E,viewBefore:S}),oe=Sn(H,re),ie=Object(A.a)(oe,3),se=ie[0],le=ie[1],ce=ie[2];Object(n.useLayoutEffect)((function(){re.current.alarm=T,re.current.chartData=a,re.current.hoveredX=_,re.current.immediatelyDispatchPanAndZoom=b,re.current.globalChartUnderlay=k,re.current.resetGlobalPanAndZoom=J,re.current.setGlobalChartUnderlay=y,re.current.updateAlarmBadge=ne,re.current.updateChartPanOrZoom=z,re.current.viewAfter=E,re.current.viewBefore=S}),[T,a,k,_,b,J,y,ne,z,E,S]);var de=Object(la.b)(ue.C);Object(n.useLayoutEffect)((function(){if(H&&H.current&&!U.current&&!d){var e=Bn({attributes:t,chartData:a,chartMetadata:o,chartSettings:I,dimensionsVisibility:u,hiddenLabelsElementId:N,isFakeStacked:B,orderedColors:m,setMinMax:x,shouldSmoothPlot:de,unitsCurrent:w,xAxisDateString:M,xAxisTimeString:L});G.current=!1;var n=Object(g.a)({},e,{dateWindow:[re.current.viewAfter,re.current.viewBefore],highlightCallback:function(e,t){var a=V.current?null:t;a!==re.current.hoveredX&&O(a)},unhighlightCallback:function(){null!==re.current.hoveredX&&O(null)},drawCallback:function(e){if(G.current){G.current=!1;var t=e.xAxisRange(),a=Math.round(t[0]),n=Math.round(t[1]);(function(e){var t=e.after,a=e.before,n=e.chartData;return t>=1e3*n.first_entry&&a<=1e3*n.last_entry})({after:a,before:n,chartData:re.current.chartData})&&re.current.updateChartPanOrZoom({after:a,before:n})}},zoomCallback:function(e,t){G.current=!0,re.current.updateChartPanOrZoom({after:e,before:t})},underlayCallback:function(e,t,a){if(ce(a),re.current.alarm){var n=re.current.alarm,r=a.toDomXCoord(1e3*n.when),o=Pn(n.status);requestAnimationFrame((function(){e.fillStyle=o;var a=e.globalAlpha;e.globalAlpha=.7,e.fillRect(r-3,t.y,6,t.h),e.globalAlpha=a})),re.current.updateAlarmBadge(re.current.alarm,a,r-3)}if(re.current.globalChartUnderlay){var i=re.current.globalChartUnderlay,s=i.after,l=i.before;if(s<l){var c=a.toDomCoords(s,-20),u=a.toDomCoords(l,20),d=c[0],h=u[0];e.fillStyle=window.NETDATA.themes.current.highlight,e.fillRect(d,t.y,h-d,t.h)}}},interactionModel:{mousedown:function(e,t,a){e.button&&2===e.button||(G.current=!0,V.current=!0,a.initializeMouseDown(e,t,a),a.tarp.tarps=a.tarp.tarps.filter((function(e){var t=Number(e.style.left.replace("px",""))>1e4;return t&&e.parentNode.removeChild(e),!t})),q(Object(Ke.t)()),e.button&&1===e.button?e.shiftKey?(Y.current=null,pn.a.startPan(e,t,a)):e.altKey||e.ctrlKey||e.metaKey?(Y.current=t.toDataXCoord(e.offsetX),pn.a.startZoom(e,t,a)):(Y.current=null,pn.a.startZoom(e,t,a)):e.shiftKey?(Y.current=null,pn.a.startZoom(e,t,a)):e.altKey||e.ctrlKey||e.metaKey?(Y.current=t.toDataXCoord(e.offsetX),pn.a.startZoom(e,t,a)):(Y.current=null,pn.a.startPan(e,t,a)))},mousemove:function(e,t,a){null!==Y.current?(G.current=!0,pn.a.moveZoom(e,t,a),e.preventDefault()):a.isPanning?(G.current=!0,a.is2DPan=!1,pn.a.movePan(e,t,a)):a.isZooming&&pn.a.moveZoom(e,t,a)},mouseup:function(e,t,n){if(V.current=!1,null!==Y.current){var r=Object(Ze.a)((function(e){return+e}),[Y.current,t.toDataXCoord(e.offsetX)]);re.current.setGlobalChartUnderlay({after:r[0],before:r[1],masterID:a.id}),Y.current=null,n.isZooming=!1,t.clearZoomRect_(),t.drawGraph_(!1)}else n.isPanning?(G.current=!0,pn.a.endPan(e,t,n),re.current.immediatelyDispatchPanAndZoom()):n.isZooming&&(G.current=!0,pn.a.endZoom(e,t,n),re.current.immediatelyDispatchPanAndZoom())},wheel:function(e,t){if(e.shiftKey||e.altKey){G.current=!0,e.preventDefault(),e.stopPropagation();var a="number"!==typeof e.wheelDelta||Number.isNaN(e.wheelDelta)?-1.2*e.deltaY:e.wheelDelta/40,n=(e.detail?-1*e.detail:a)/50;e.offsetX||(e.offsetX=e.layerX-e.target.offsetLeft);var r=function(e,t){var a=e.toDomCoords(e.xAxisRange()[0],null),n=Object(A.a)(a,1)[0],r=t-n,o=e.toDomCoords(e.xAxisRange()[1],null)[0]-n;return 0===o?0:r/o}(t,e.offsetX);!function(e,a,n){n=n||.5;var r=e.xAxisRange(),o=Object(A.a)(r,2),i=o[0],s=o[1],l=(s-i)*a,c=i+l*n,u=s-l*(1-n);re.current.updateChartPanOrZoom({after:c,before:u,shouldNotExceedAvailableRange:!0,callback:function(e,a){t.updateOptions({dateWindow:[e,a]})}})}(t,n,r)}},click:function(e){e.preventDefault()},dblclick:function(){q(Object(Ke.k)({forcePlay:!1})),re.current.resetGlobalPanAndZoom()},touchstart:function(e,t,a){V.current=!0,G.current=!0,pn.a.defaultInteractionModel.touchstart(e,t,a),a.touchDirections={x:!0,y:!1},X.current=0,"number"===typeof e.touches[0].pageX?K.current=e.touches[0].pageX:K.current=0},touchmove:function(e,t,a){G.current=!0,pn.a.defaultInteractionModel.touchmove(e,t,a),X.current=Date.now()},touchend:function(e,t,a){if(V.current=!1,G.current=!0,pn.a.defaultInteractionModel.touchend(e,t,a),0===X.current&&0!==K.current&&H.current){G.current=!1;var n=t.plotter_,r=(K.current-(n.area.x+H.current.getBoundingClientRect().left))/n.area.w,o=re.current,i=Math.round(o.viewAfter+(o.viewBefore-o.viewAfter)*r);O(i,!0)}var s=Date.now();"undefined"!==typeof Z.current&&(0===X.current&&s-Z.current<=window.NETDATA.options.current.double_click_speed&&re.current.resetGlobalPanAndZoom());Z.current=s,re.current.immediatelyDispatchPanAndZoom()}}}),r=B?_n(a.result.data,u):a.result.data,i=new pn.a(H.current,r,n);U.current=i}}),[t,a,o,I,c,u,d,N,B,m,O,x,de,w,M,L,ce,q]),Object(fa.a)((function(){if(U.current){var e="sparkline"===t.dygraphTheme,a="bottom"===t.legendPosition;U.current.updateOptions({ylabel:e||a?void 0:w})}}),[t,w]),Object(fa.a)((function(){U.current&&U.current.updateOptions({})}),[T,k]);var he=Object(la.b)(ue.F);Object(fa.a)((function(){U.current&&window.requestAnimationFrame((function(){U.current&&U.current.resize()}))}),[he]),Object(fa.a)((function(){if(U.current&&!d){var e=[E,S],n=U.current.xAxisRange(),r=Math.abs(S-E-(n[1]-n[0]))>5e3,o=S<=0&&n[1]>S&&n[0]>E&&!r,i=p&&!o?{dateWindow:e}:{},s=t.dygraphColors,l=void 0===s?m:s,c=B?_n(a.result.data,u):a.result.data,h=1===u.length||u.filter((function(e){return!0===e})).length>1;U.current.updateOptions(Object(g.a)({},i,{colors:B?xn(Object(W.a)(l)):l,file:c,labels:a.result.labels,fillAlpha:F},"stacked"===R?{includeZero:h}:{},{stackedGraph:"stacked"===R&&!B,visibility:B?Object(W.a)(u):u}))}}),[t,a.result,c,u,R,F,d,B,p,m,E,S]),Object(fa.a)((function(){if(U.current){var e=Bn({attributes:t,chartData:a,chartMetadata:o,chartSettings:I,dimensionsVisibility:u,hiddenLabelsElementId:N,isFakeStacked:B,orderedColors:m,setMinMax:x,shouldSmoothPlot:de,unitsCurrent:w,xAxisDateString:M,xAxisTimeString:L});d||U.current.updateOptions(e)}}),[R,D]);var pe=Object(la.b)(ue.p);Object(n.useLayoutEffect)((function(){if(U.current&&pe!==c){if(-1===v)return void(-1!==U.current.getSelection()&&U.current.clearSelection());U.current.setSelection(v)}}),[a,c,pe,v,E,S]);var fe=Object(la.b)((function(e){return Object(Ea.l)(e,{id:c})}));Object(n.useLayoutEffect)((function(){U.current&&U.current.resize()}),[fe,a.dimension_names.length]);var ge=Object(la.b)((function(e){return t.commonMin?Object(ue.h)(e,t.commonMin):void 0})),me=Object(la.b)((function(e){return t.commonMax?Object(ue.g)(e,t.commonMax):void 0}));Object(n.useLayoutEffect)((function(){var e=t.commonMin,a=t.commonMax;if(U.current&&(e||a)){var n=U.current.yAxisExtremes()[0],r=Object(A.a)(n,2),o=r[0],i=r[1],s=t.dygraphValueRange,l=void 0===s?[null,null]:s,u=null===l[0],d=null===l[1],h=!1,p=Object(C.a)(n);if(e&&u&&ge&&ge.currentExtreme<o&&(p[0]=ge.currentExtreme,h=!0),a&&d&&me&&me.currentExtreme>i&&(p[1]=me.currentExtreme,h=!0),h){U.current.updateOptions({valueRange:p});var f=U.current.yAxisExtremes()[0];p=Object(C.a)(f)}e&&u&&p[0]!==(null===ge||void 0===ge?void 0:ge.charts[c])&&q(Object(Ke.p)({chartUuid:c,commonMinKey:e,value:p[0]})),a&&d&&p[1]!==(null===me||void 0===me?void 0:me.charts[c])&&q(Object(Ke.o)({chartUuid:c,commonMaxKey:a,value:p[1]}))}}),[t,a.result,c,ge,me,q]),Object(n.useLayoutEffect)((function(){se&&U.current&&ce(U.current)}),[se]),Object(ga.a)((function(){U.current&&U.current.destroy()}));var be=Object(n.useRef)();Object(dn.a)((function(){if(t.detectResize){var e=!1,a=function(e,t){var a=null;return function(){for(var n=arguments.length,r=new Array(n),o=0;o<n;o++)r[o]=arguments[o];null!==a&&(clearTimeout(a),a=null),a=setTimeout((function(){return e.apply(void 0,r)}),t)}}((function(){e?U.current&&U.current.resize():e=!0}),500);be.current=new fn.a((function(){a()})),be.current.observe(H.current)}})),Object(ga.a)((function(){U.current=null,be.current&&be.current.disconnect()}));var ve="bottom"===t.legendPosition;return r.a.createElement(r.a.Fragment,null,r.a.createElement("div",{ref:H,id:s,className:va()(i,{"dygraph-chart--legend-bottom":ve})}),se&&h&&r.a.createElement(kn,{ref:le}),(null===T||void 0===T?void 0:T.value)&&h&&r.a.createElement(Rn,{isVisible:te,ref:ae,status:T.status,label:T.value}),r.a.createElement("div",{className:"dygraph-chart__labels-hidden",id:N}))},Hn=a(298),zn=a.n(Hn),Un=a(199),Gn=function(e){var t=e.attributes,a=e.chartData,o=e.chartMetadata,i=e.chartElementClassName,s=e.chartElementId,l=e.chartWidth,c=e.hoveredRow,u=e.legendFormatValue,d=e.orderedColors,h=e.setMinMax,p=e.showUndefined,f=e.unitsCurrent,g=Object(n.useRef)(null),m=Object(n.useState)(),b=Object(A.a)(m,2),v=b[0],_=b[1],y=-1===c?0:a.result.length-1-c,O=p?null:a.result[y],x=t.easyPieChartMinValue,w=void 0===x?a.min:x,E=t.easyPieChartMaxValue,S=void 0===E?a.max:E,C=Object(de.a)(Object(Un.a)((function(e){return+e})),Object(Ze.a)(Ta.a),(function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return[Math.min(a,O||0),Math.max(n,O||0)]}))([w,S]);Object(n.useEffect)((function(){h(C)}),[C]);var k=function(e){var t,a=e.value,n=void 0===a?0:a,r=e.min,o=void 0===r?0:r,i=e.max,s=void 0===i?0:i;return!e.isMinOverride&&o>0&&(o=0),!e.isMaxOverride&&s<0&&(s=0),o<0&&s>0?(s=-o>s?-o:s,t=Math.round(100*n/s)):n>=0&&o>=0&&s>=0?0===(t=Math.round(100*(n-o)/(s-o)))&&(t=.1):0===(t=Math.round(100*(n-s)/(s-o)))&&(t=-.1),t}({value:p?0:O,min:C[0],max:C[1],isMinOverride:void 0!==t.easyPieChartMinValue,isMaxOverride:void 0!==t.easyPieChartMaxValue});Object(n.useEffect)((function(){if(g.current&&!v){var e=Object(ua.a)([[function(e){return e<3},Object(da.a)(2)],[ha.a,Ta.a]])(Math.floor(l/22)),a=t.easyPieChartTrackColor,n=void 0===a?window.NETDATA.themes.current.easypiechart_track:a,r=t.easyPieChartScaleColor,o=void 0===r?window.NETDATA.themes.current.easypiechart_scale:r,i=t.easyPieChartScaleLength,s=void 0===i?5:i,c=t.easyPieChartLineCap,u=void 0===c?"round":c,h=t.easyPieChartLineWidth,p=void 0===h?e:h,f=t.easyPieChartTrackWidth,m=t.easyPieChartSize,b=void 0===m?l:m,y=t.easyPieChartRotate,O=void 0===y?0:y,x=t.easyPieChartAnimate,w=void 0===x?{duration:500,enabled:!0}:x,E=t.easyPieChartEasing,S=new zn.a(g.current,{barColor:d[0],trackColor:n,scaleColor:o,scaleLength:s,lineCap:u,lineWidth:p,trackWidth:f,size:b,rotate:O,animate:w,easing:E});_(S)}}),[t,a,v,l,d]),Object(n.useEffect)((function(){if(v){var e=-1===c&&!p;e&&!v.options.animate.enabled?v.enableAnimation():!e&&v.options.animate.enabled&&v.disableAnimation(),setTimeout((function(){v.update(k)}),0)}}),[v,c,k,p]);var j=2*l/3/5,T=Math.round((l-j-l/40)/2),D=Math.round(1.6*j/3),P=Math.round(T-2*D-l/40),M=Math.round(.9*D),L=Math.round(T+(j+M)+l/40);return r.a.createElement("div",{ref:g,id:s,className:i},r.a.createElement("span",{className:"easyPieChartLabel",style:{fontSize:j,top:T}},u(O)),r.a.createElement("span",{className:"easyPieChartTitle",style:{fontSize:D,top:P}},t.title||o.title),r.a.createElement("span",{className:"easyPieChartUnits",style:{fontSize:M,top:L}},f))},Wn=a(299),Vn=function(e){return"number"===typeof e},Yn=function(e){var t=e.attributes,a=e.chartData,o=e.chartMetadata,i=e.chartElementClassName,s=e.chartElementId,l=e.chartUuid,c=e.chartHeight,u=e.chartWidth,d=e.hoveredRow,h=e.legendFormatValue,p=e.orderedColors,f=e.setMinMax,g=e.showUndefined,m=e.unitsCurrent,b=Object(n.useRef)(null),v=Object(n.useState)(),_=Object(A.a)(v,2),y=_[0],O=_[1],x=-1===d?0:a.result.length-1-d,w=a.result[x],E=t.gaugeMinValue,S=t.gaugeMaxValue,C=Vn(E)?E:a.min,k=Vn(S)?S:a.max,j=Object(de.a)(Object(Un.a)((function(e){return+e})),(function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return[!Vn(E)&&a>0?0:a,!Vn(S)&&n<0?0:n]}),Object(Ze.a)(Ta.a),(function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return[Math.min(a,w),Math.max(n,w)]}))([C,k]),T=Object(A.a)(j,2),D=T[0],P=T[1];Object(n.useEffect)((function(){f([D,P])}),[D,P]);var M=Object(de.a)(Object(da.a)(100*(w-D)/(P-D)),(function(e){return Math.max(.001,e)}),(function(e){return Math.min(99.999,e)}))();Object(n.useEffect)((function(){if(b.current&&!y){var e=t.gaugePointerColor,a=void 0===e?window.NETDATA.themes.current.gauge_pointer:e,n=t.gaugeStrokeColor,r=void 0===n?window.NETDATA.themes.current.gauge_stroke:n,o=t.gaugeStartColor,i=void 0===o?p[0]:o,s=t.gaugeStopColor,l=t.gaugeGenerateGradient,c={lines:12,angle:.14,lineWidth:.57,radiusScale:1,pointer:{length:.85,strokeWidth:.045,color:a},limitMax:!0,limitMin:!0,colorStart:i,colorStop:s,strokeColor:r,generateGradient:!0===(void 0!==l&&l),gradientType:0,highDpiSupport:!0},u=new Wn.Gauge(b.current).setOptions(c);u.minValue=0,u.maxValue=100,O(u)}}),[t,a,y,u,p]),Object(n.useEffect)((function(){if(y){var e=-1===d&&!g?32:1e9;y.animationSpeed=e,setTimeout((function(){y.set(g?0:M)}),0)}}),[y,c,u,d,M,g]);var L=Math.floor(c/5),I=Math.round((c-L)/3.2),N=Math.round(L/2.1),R=Math.round(.9*N),B=Math.round(.75*L);return r.a.createElement("div",{id:s,className:i},r.a.createElement("canvas",{ref:b,className:"gaugeChart",id:"gauge-".concat(l,"-canvas"),style:{width:u,height:c}}),r.a.createElement("span",{className:"gaugeChartLabel",style:{fontSize:L,top:I}},h(g?null:w)),r.a.createElement("span",{className:"gaugeChartTitle",style:{fontSize:N,top:0}},t.title||o.title),r.a.createElement("span",{className:"gaugeChartUnits",style:{fontSize:R}},m),r.a.createElement("span",{className:"gaugeChartMin",style:{fontSize:B}},h(g?null:D)),r.a.createElement("span",{className:"gaugeChartMax",style:{fontSize:B}},h(g?null:P)))},Xn=(a(275),a(136)),Kn=a(614),Zn=a(615),qn=Object(de.a)(Xn.a,Object(Kn.a)(/[^0-9a-f]/gi,""),Object(ua.a)([[function(e){return e.length<6},function(e){return e[0]+e[0]+e[1]+e[1]+e[2]+e[2]}],[ha.a,Ta.a]])),$n=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,a=qn(e),n=Object(de.a)((function(e){return Object(Zn.a)(2,e)}),Object(Un.a)(Object(de.a)((function(e){return parseInt(e,16)}),(function(e){return Math.round(Math.min(Math.max(0,e+e*t),255)).toString(16)}),(function(e){return"00".concat(e).substr(e.length)}))),(function(e){return e.join("")}))(a);return"#".concat(n)},Jn=function(e){return e>0?e:(new Date).valueOf()+e},Qn=function(e){var t=e.attributes,a=e.chartContainerElement,o=e.chartData,i=e.chartMetadata,s=e.chartElementClassName,l=e.chartElementId,c=e.orderedColors,u=e.unitsCurrent,d=e.viewAfterForCurrentData,h=e.viewBeforeForCurrentData,p=Object(n.useRef)(null),f=Object(n.useState)(),m=Object(A.a)(f,2),b=m[0],v=m[1],_=Object(n.useRef)(),y=t.forceTimeWindow?function(e,t){var a=Jn(t[0]),n=Jn(t[1]),r=e.after*Ve.a,o=e.before*Ve.a-r,i=n-a;if(o>i/1.03)return{};var s=o/i;return{paddingLeftPercentage:"".concat((r-a)/(n-a)*100,"%"),widthRatio:s}}(o,[d,h]):{},O=y.paddingLeftPercentage,x=void 0===O?void 0:O,w=y.widthRatio,E=void 0===w?1:w;Object(n.useEffect)((function(){var e=t.sparklineLineColor,a=void 0===e?c[0]:e,n="line"===i.chart_type?window.NETDATA.themes.current.background:$n(a,window.NETDATA.chartDefaults.fill_luminance),r=t.title||i.title,o=function(e){return"disable"===e?"":e},s=t.sparklineType,l=void 0===s?"line":s,d=t.sparklineFillColor,h=void 0===d?n:d,f=t.sparklineDisableInteraction,g=void 0!==f&&f,m=t.sparklineDisableTooltips,y=void 0!==m&&m,O=t.sparklineDisableHighlight,x=void 0!==O&&O,w=t.sparklineHighlightLighten,E=void 0===w?1.4:w,S=t.sparklineTooltipSuffix,C=void 0===S?" ".concat(u):S,A=t.sparklineNumberFormatter,k=void 0===A?function(e){return e.toFixed(2)}:A,j={type:l,lineColor:a,fillColor:h,chartRangeMin:t.sparklineChartRangeMin,chartRangeMax:t.sparklineChartRangeMax,composite:t.sparklineComposite,enableTagOptions:t.sparklineEnableTagOptions,tagOptionPrefix:t.sparklineTagOptionPrefix,tagValuesAttribute:t.sparklineTagValuesAttribute,disableHiddenCheck:t.sparklineDisableHiddenCheck,defaultPixelsPerValue:t.sparklineDefaultPixelsPerValue,spotColor:o(t.sparklineSpotColor),minSpotColor:o(t.sparklineMinSpotColor),maxSpotColor:o(t.sparklineMaxSpotColor),spotRadius:t.sparklineSpotRadius,valueSpots:t.sparklineValueSpots,highlightSpotColor:t.sparklineHighlightSpotColor,highlightLineColor:t.sparklineHighlightLineColor,lineWidth:t.sparklineLineWidth,normalRangeMin:t.sparklineNormalRangeMin,normalRangeMax:t.sparklineNormalRangeMax,drawNormalOnTop:t.sparklineDrawNormalOnTop,xvalues:t.sparklineXvalues,chartRangeClip:t.sparklineChartRangeClip,chartRangeMinX:t.sparklineChartRangeMinX,chartRangeMaxX:t.sparklineChartRangeMaxX,disableInteraction:g,disableTooltips:y,disableHighlight:x,highlightLighten:E,highlightColor:t.sparklineHighlightColor,tooltipContainer:t.sparklineTooltipContainer,tooltipClassname:t.sparklineTooltipClassname,tooltipChartTitle:r,tooltipFormat:t.sparklineTooltipFormat,tooltipPrefix:t.sparklineTooltipPrefix,tooltipSuffix:C,tooltipSkipNull:t.sparklineTooltipSkipNull,tooltipValueLookups:t.sparklineTooltipValueLookups,tooltipFormatFieldlist:t.sparklineTooltipFormatFieldlist,tooltipFormatFieldlistKey:t.sparklineTooltipFormatFieldlistKey,numberFormatter:k,numberDigitGroupSep:t.sparklineNumberDigitGroupSep,numberDecimalMark:t.sparklineNumberDecimalMark,numberDigitGroupCount:t.sparklineNumberDigitGroupCount,animatedZooms:t.sparklineAnimatedZooms};_.current=j,p.current&&!b&&v((function(){return window.$(p.current)}))}),[b,t,a,o.result,i,c,u,E]);var S=t.sparklineOnHover;Object(n.useEffect)((function(){if(b&&S){var e=function(){return S(null)},t=function(e){var t=Object(A.a)(e.sparklines,1)[0].getCurrentRegionFields(),a=t.x,n=t.y;S({x:a,y:n})};return b.bind("sparklineRegionChange",t).bind("mouseleave",e),function(){b.unbind("sparklineRegionChange",t).unbind("mouseleave",e)}}}),[b,S]),Object(n.useEffect)((function(){if(b){var e=a.getBoundingClientRect(),t=e.width,n=e.height;b.sparkline(o.result,Object(g.a)({},_.current,{width:Math.floor(t*E),height:Math.floor(n)}))}}),[b,o.result]);var C=x?{textAlign:"initial",paddingLeft:x}:void 0;return r.a.createElement("div",{ref:p,id:l,className:s,style:C})},er=a(147);window.d3=er;var tr,ar=a(300),nr=a.n(ar),rr=a(46),or={label:"no data",value:100,color:"#666666"},ir=function(e){var t=e.chartData,a=e.index,n=e.localeDateString,r=e.localeTimeString,o=Math.round((t.before-t.after+1)/t.points),i=Object(rr.a)(o),s=t.result.data[a].time,l=s-1e3*o,c=n(l),u=r(l),d=n(s),h=r(s);return c===d?"".concat(c," ").concat(u," to ").concat(h,", ").concat(i):"".concat(c," ").concat(u," to ").concat(d," ").concat(h,", ").concat(i)},sr=function(e){var t=e.attributes,a=e.chartContainerElement,o=e.chartData,i=e.chartMetadata,s=e.chartElementClassName,l=e.chartElementId,c=e.hoveredRow,u=e.hoveredX,d=e.legendFormatValue,h=e.orderedColors,p=e.setMinMax,f=e.unitsCurrent,g=Object(n.useRef)(null),m=Object(n.useRef)(d);m.current=d;var b=Object(n.useState)(),v=Object(A.a)(b,2),_=v[0],y=v[1],O=Object(n.useRef)(),x=Object(sa.b)(),w=x.localeDateString,E=x.localeTimeString;return Object(n.useEffect)((function(){if(g.current&&!_){p([o.min,o.max]);var e=Object(G.a)(o.result.labels).map((function(e,t){return{label:e,value:o.result.data[0][e],color:h[t]}})).filter((function(e){return null!==e.value&&e.value>0})),n=e.length>0?e:or,r=t.title||i.title,s=ir({chartData:o,index:0,localeDateString:w,localeTimeString:E}),l=t.d3pieTitle,c=void 0===l?r:l,u=t.d3pieSubtitle,d=void 0===u?f:u,b=t.d3pieFooter,v=void 0===b?s:b,x=t.d3pieTitleColor,S=void 0===x?window.NETDATA.themes.current.d3pie.title:x,C=t.d3pieTitleFontsize,A=void 0===C?12:C,k=t.d3pieTitleFontweight,j=void 0===k?"bold":k,T=t.d3pieTitleFont,D=void 0===T?"arial":T,P=t.d3PieSubtitleColor,M=void 0===P?window.NETDATA.themes.current.d3pie.subtitle:P,L=t.d3PieSubtitleFontsize,I=void 0===L?10:L,N=t.d3PieSubtitleFontweight,R=void 0===N?"normal":N,B=t.d3PieSubtitleFont,F=void 0===B?"arial":B,H=t.d3PieFooterColor,z=void 0===H?window.NETDATA.themes.current.d3pie.footer:H,U=t.d3PieFooterFontsize,W=void 0===U?9:U,V=t.d3PieFooterFontweight,Y=void 0===V?"bold":V,X=t.d3PieFooterFont,K=void 0===X?"arial":X,Z=t.d3PieFooterLocation,q=void 0===Z?"bottom-center":Z,$=t.d3PiePieinnerradius,J=void 0===$?"45%":$,Q=t.d3PiePieouterradius,ee=void 0===Q?"80%":Q,te=t.d3PieSortorder,ae=void 0===te?"value-desc":te,ne=t.d3PieSmallsegmentgroupingEnabled,re=void 0!==ne&&ne,oe=t.d3PieSmallsegmentgroupingValue,ie=void 0===oe?1:oe,se=t.d3PieSmallsegmentgroupingValuetype,le=void 0===se?"percentage":se,ce=t.d3PieSmallsegmentgroupingLabel,ue=void 0===ce?"other":ce,de=t.d3PieSmallsegmentgroupingColor,he=void 0===de?window.NETDATA.themes.current.d3pie.other:de,pe=t.d3PieLabelsOuterFormat,fe=void 0===pe?"label-value1":pe,ge=t.d3PieLabelsOuterHidewhenlessthanpercentage,me=void 0===ge?null:ge,be=t.d3PieLabelsOuterPiedistance,ve=void 0===be?15:be,_e=t.d3PieLabelsInnerFormat,ye=void 0===_e?"percentage":_e,Oe=t.d3PieLabelsInnerHidewhenlessthanpercentage,xe=void 0===Oe?2:Oe,we=t.d3PieLabelsMainLabelColor,Ee=void 0===we?window.NETDATA.themes.current.d3pie.mainlabel:we,Se=t.d3PieLabelsMainLabelFont,Ce=void 0===Se?"arial":Se,Ae=t.d3PieLabelsMainLabelFontsize,ke=void 0===Ae?10:Ae,je=t.d3PieLabelsMainLabelFontweight,Te=void 0===je?"normal":je,De=t.d3PieLabelsPercentageColor,Pe=void 0===De?window.NETDATA.themes.current.d3pie.percentage:De,Me=t.d3PieLabelsPercentageFont,Le=void 0===Me?"arial":Me,Ie=t.d3PieLabelsPercentageFontsize,Ne=void 0===Ie?10:Ie,Re=t.d3PieLabelsPercentageFontweight,Be=void 0===Re?"bold":Re,Fe=t.d3PieLabelsValueColor,He=void 0===Fe?window.NETDATA.themes.current.d3pie.value:Fe,ze=t.d3PieLabelsValueFont,Ue=void 0===ze?"arial":ze,Ge=t.d3PieLabelsValueFontsize,We=void 0===Ge?10:Ge,Ve=t.d3PieLabelsValueFontweight,Ye=void 0===Ve?"bold":Ve,Xe=t.d3PieLabelsLinesEnabled,Ke=void 0===Xe||Xe,Ze=t.d3PieLabelsLinesStyle,qe=void 0===Ze?"curved":Ze,$e=t.d3PieLabelsLinesColor,Je=void 0===$e?"segment":$e,Qe=t.d3PieLabelsTruncationEnabled,et=void 0!==Qe&&Qe,tt=t.d3PieLabelsTruncationTruncatelength,at=void 0===tt?30:tt,nt=t.d3PieMiscColorsSegmentstroke,rt=void 0===nt?window.NETDATA.themes.current.d3pie.segment_stroke:nt,ot=t.d3PieMiscGradientEnabled,it=void 0!==ot&&ot,st=t.d3PieMiscColorsPercentage,lt=void 0===st?95:st,ct=t.d3PieMiscGradientColor,ut=void 0===ct?window.NETDATA.themes.current.d3pie.gradient_color:ct,dt=t.d3PieCssprefix,ht=void 0===dt?null:dt,pt=a.getBoundingClientRect(),ft=pt.width,gt=pt.height,mt={header:{title:{text:c,color:S,fontSize:A,fontWeight:j,font:D},subtitle:{text:d,color:M,fontSize:I,fontWeight:R,font:F},titleSubtitlePadding:1},footer:{text:v,color:z,fontSize:W,fontWeight:Y,font:K,location:q},size:{canvasHeight:Math.floor(gt),canvasWidth:Math.floor(ft),pieInnerRadius:J,pieOuterRadius:ee},data:{sortOrder:ae,smallSegmentGrouping:{enabled:re,value:ie,valueType:le,label:ue,color:he},content:n},labels:{outer:{format:fe,hideWhenLessThanPercentage:me,pieDistance:ve},inner:{format:ye,hideWhenLessThanPercentage:xe},mainLabel:{color:Ee,font:Ce,fontSize:ke,fontWeight:Te},percentage:{color:Pe,font:Le,fontSize:Ne,fontWeight:Be,decimalPlaces:0},value:{color:He,font:Ue,fontSize:We,fontWeight:Ye},lines:{enabled:Ke,style:qe,color:Je},truncation:{enabled:et,truncateLength:at},formatter:function(e){return"value"===e.part?m.current(e.value):"percentage"===e.part?"".concat(e.label,"%"):e.label}},effects:{load:{effect:"none",speed:0},pullOutSegmentOnClick:{effect:"bounce",speed:400,size:5},highlightSegmentOnMouseover:!0,highlightLuminosity:-.2},tooltips:{enabled:!1,type:"placeholder",string:"",placeholderParser:null,styles:{fadeInSpeed:250,backgroundColor:window.NETDATA.themes.current.d3pie.tooltip_bg,backgroundOpacity:.5,color:window.NETDATA.themes.current.d3pie.tooltip_fg,borderRadius:2,font:"arial",fontSize:12,padding:4}},misc:{colors:{background:"transparent",segmentStroke:rt},gradient:{enabled:it,percentage:lt,color:ut},canvasPadding:{top:5,right:5,bottom:5,left:5},pieCenterOffset:{x:0,y:0},cssPrefix:ht},callbacks:{onload:null,onMouseoverSegment:null,onMouseoutSegment:null,onClickSegment:null}},bt=new nr.a(g.current,mt);O.current=mt,y((function(){return bt}))}}),[t,a,o,i,_,d,w,E,h,p,f]),Object(n.useEffect)((function(){if(_&&O.current){var e=ir({chartData:o,index:0,localeDateString:w,localeTimeString:E}),a=t.d3pieSubtitle,n=void 0===a?f:a,r=t.d3pieFooter,i=void 0===r?e:r,s=!!u&&-1===c,l=o.result.data.length-c-1,d=l<0||l>=o.result.data.length?0:l,p=Object(G.a)(o.result.labels).map((function(e,t){return{label:e,value:o.result.data[d][e],color:h[t]}})).filter((function(e){return null!==e.value&&e.value>0})),g=p.length>0&&!s?p:[or];_.options.header.subtitle.text=n,_.options.footer.text=i,_.options.data.content=g,_.destroy(),_.recreate()}}),[t,o,_,c,u,w,E,h,f]),r.a.createElement("div",{ref:g,id:l,className:s})},lr=(a(511),function(e){var t=e.attributes,a=e.chartContainerElement,o=e.chartData,i=e.chartMetadata,s=e.chartElementClassName,l=e.chartElementId,c=e.orderedColors,u=Object(n.useRef)(null),d=Object(n.useState)(),h=Object(A.a)(d,2),p=h[0],f=h[1],m=Object(n.useRef)();return Object(n.useLayoutEffect)((function(){if(u.current&&!p){var e=window.$(u.current),n=a.getBoundingClientRect(),r=n.width,o=n.height,i=t.peityStrokeWidth,s=void 0===i?1:i,l={stroke:window.NETDATA.themes.current.foreground,strokeWidth:s,width:Math.floor(r),height:Math.floor(o),fill:window.NETDATA.themes.current.foreground};f((function(){return e})),m.current=l}}),[t,p,a]),Object(n.useLayoutEffect)((function(){if(p&&m.current){var e=Object(g.a)({},m.current,{stroke:c[0],fill:c[0]===m.current.stroke?m.current.fill:"line"===i.chart_type?window.NETDATA.themes.current.background:$n(c[0],window.NETDATA.chartDefaults.fill_luminance)});p.peity("line",e),m.current=e}}),[p,o,i,c]),r.a.createElement("div",{ref:u,id:l,className:s},o.result)}),cr=function(e){var t=e.attributes,a=e.chartData,o=e.chartMetadata,i=e.chartElementClassName,s=e.chartElementId,l=e.orderedColors,c=e.unitsCurrent,u=Object(n.useRef)(null),d=Object(n.useRef)(),h=Object(n.useState)(!1),p=Object(A.a)(h,2),f=p[0],g=p[1];(tr||(tr=new Promise((function(e,t){setTimeout((function(){var a=document.createElement("script");a.type="text/javascript",a.async=!0,a.src="https://www.google.com/jsapi",a.onerror=function(){t(Error("error loading google.js api"))},a.onload=function(){e("ok")};var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(a,n)}),1e3)})).then((function(){return new Promise((function(e){window.google.load("visualization","1.1",{packages:["corechart","controls"],callback:e})}))})))).then((function(){g(!0)}));var m=Object(n.useRef)();return Object(n.useLayoutEffect)((function(){if(d.current&&m.current){var e=new window.google.visualization.DataTable(a.result);d.current.draw(e,m.current)}}),[a]),Object(n.useLayoutEffect)((function(){if(u.current&&!m.current&&f){var e=new window.google.visualization.DataTable(a.result),n=t.title,r=void 0===n?o.title:n,i=o.chart_type,s=new Map([["area",window.NETDATA.options.current.color_fill_opacity_area],["stacked",window.NETDATA.options.current.color_fill_opacity_stacked]]).get(i)||.3,h={colors:l,lineWidth:"line"===i?2:1,title:r,fontSize:11,hAxis:{viewWindowMode:"maximized",slantedText:!1,format:"HH:mm:ss",textStyle:{fontSize:9},gridlines:{color:"#EEE"}},vAxis:{title:c,viewWindowMode:"area"===i||"stacked"===i?"maximized":"pretty",minValue:"stacked"===i?void 0:-.1,maxValue:"stacked"===i?void 0:.1,direction:1,textStyle:{fontSize:9},gridlines:{color:"#EEE"}},chartArea:{width:"65%",height:"80%"},focusTarget:"category",annotation:{1:{style:"line"}},pointsVisible:!1,titlePosition:"out",titleTextStyle:{fontSize:11},tooltip:{isHtml:!1,ignoreBounds:!0,textStyle:{fontSize:9}},curveType:"function",areaOpacity:s,isStacked:"stacked"===i},p=["area","stacked"].includes(o.chart_type)?new window.google.visualization.AreaChart(u.current):new window.google.visualization.LineChart(u.current);p.draw(e,h),m.current=h,d.current=p}}),[t,a.result,o,u,f,l,c]),r.a.createElement("div",{ref:u,id:s,className:i})},ur=function(e){var t=e.attributes,a=e.chartData,n=e.chartElementClassName,o=e.chartElementId,i=t.textOnlyDecimalPlaces,s=void 0===i?1:i,l=t.textOnlyPrefix,c=void 0===l?"":l,u=t.textOnlySuffix,d=void 0===u?"":u,h=Math.pow(10,s),p=Math.round(a.result[0]*h)/h,f=0===a.result.length?"":c+p+d;return r.a.createElement("div",{id:o,className:n},f)},dr=a(37),hr=Math.round(16/9),pr=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:11,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;return e-t},fr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:hr;return Math.sqrt(e.length/t)},gr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:hr;return e*t},mr=function(e,t){var a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:11;return Math.floor(t%e)*a},br=function(e,t){var a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:11;return Math.floor(t/e)*a},vr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:11;return Math.ceil(e)*t},_r=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:11,a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:11;return Math.ceil(e)*t+a},yr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:11;return Math.floor(e/t)},Or=function(e,t,a,n){var r=n.onMouseenter,o=n.onMouseout,i=arguments.length>4&&void 0!==arguments[4]?arguments[4]:{},s=i.cellSize,l=i.cellPadding,c=-1,u=function(a){var n=e.getBoundingClientRect(),r=mr(t,a,s),o=br(t,a,s),i=n.left+r,c=n.top+o,u=pr(s,l);return{index:a,left:i,top:c,right:i+u,bottom:c+u,width:u,height:u,offsetX:r,offsetY:o}},d=function(){o(u(c)),c=-1},h=function(e){var n=e.offsetX,o=e.offsetY,i=yr(n,s),l=yr(o,s)*t+i;l!==c&&(-1!==c&&d(),l>=a||(r(u(l)),c=l))};return e.addEventListener("mousemove",h),e.addEventListener("mouseout",d),function(){e.removeEventListener("mousemove",h),e.removeEventListener("mouseout",d)}},xr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},a=t.aspectRatio,n=t.cellSize,r=fr(e,a),o=gr(r,a);return vr(o,n)},wr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},a=t.aspectRatio,n=t.cellSize,r=t.padding,o=fr(e,a),i=gr(o,a),s=vr(i,n),l=_r(o,n,r);return{width:s,height:l,columns:Math.ceil(i)}},Er=["rgba(198, 227, 246, 0.9)","rgba(14, 154, 255, 0.9)"],Sr=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:Er;return Object(er.scaleLinear)().domain(Object(er.extent)(e,(function(e){return e}))).range(t)},Cr=function(e){return e.getBoundingClientRect().top/window.innerHeight>.5?"top":"bottom"},Ar={top:{bottom:"top"},bottom:{top:"bottom"}},kr=function(e){var t=e.data,a=e.renderTooltip,o=Object(dr.a)(e,["data","renderTooltip"]),i=Object(n.useRef)(),s=Object(n.useRef)(),l=Object(n.useRef)(),c=Object(n.useState)(null),u=Object(A.a)(c,2),d=u[0],h=u[1],p=Object(n.useRef)(!1),f=Object(n.useRef)(-1),g=Object(n.useRef)(),m=function(){l.current.deactivateBox(),h(null),p.current=!1,f.current=-1},b=function(){return requestAnimationFrame((function(){h((function(e){return p.current||-1!==f.current&&f.current===(null===e||void 0===e?void 0:e.index)||m(),e}))}))};Object(n.useLayoutEffect)((function(){return l.current=function(e,t){var a=t.onMouseenter,n=t.onMouseout,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=r.cellSize,i=r.cellPadding,s=r.cellStroke,l=void 0===s?2:s,c=r.lineWidth,u=void 0===c?1:c,d=r.colorRange,h=e.getContext("2d"),p=-1,f=function(){},g={},m=function(){},b=function(){f(),m(),h.clearRect(0,0,e.width,e.height),h.beginPath()},v=function(t){var s=t.data,c=wr(s,r),v=c.width,_=c.height,y=c.columns;e.width=parseInt(v),e.height=parseInt(_),b(),m();var O=Sr(s,d),x=function(e,t){h.fillStyle=O(e);var a=mr(y,t,o),n=br(y,t,o);u&&l&&h.clearRect(a-u,n-u,pr(o,i)+l,pr(o,i)+l),h.fillRect(a,n,pr(o,i),pr(o,i))};s.forEach(x),m=Or(e,y,s.length,{onMouseenter:a,onMouseout:n},r),f=function(){-1!==p&&x(s[p],p)},g=function(e){f(),p=e;var t=mr(y,e,o),a=br(y,e,o);u&&l&&(h.lineWidth=u,h.strokeStyle="#fff",h.strokeRect(t+u,a+u,pr(o,i)-l,pr(o,i)-l))}};return{clear:b,update:v,activateBox:function(e){return g(e)},deactivateBox:function(){return f()}}}(s.current,{onMouseenter:function(e){var t=e.index,a=Object(dr.a)(e,["index"]);f.current=t,l.current.activateBox(t),g.current=setTimeout((function(){h({target:{getBoundingClientRect:function(){return a}},index:t,rect:a})}),600)},onMouseout:function(){f.current=-1,clearTimeout(g.current),b()}},o),function(){return l.current.clear()}}),[]),Object(n.useLayoutEffect)((function(){d&&i.current&&i.current.labels[d.index]!==t.labels[d.index]&&m(),i.current=t,l.current.update(t)}),[t]);var v=Object(n.useCallback)((function(){p.current=!0}),[]),_=Object(n.useCallback)((function(){p.current=!1,b()}),[]),y=d&&Cr(d.target);return r.a.createElement(n.Fragment,null,r.a.createElement("canvas",{"data-testid":"groupBox",ref:s}),d&&a&&r.a.createElement(Z.h,{align:Ar[y],target:d.target,onMouseEnter:v,onMouseLeave:_},a(d.index,y)))},jr=$.d.span.withConfig({displayName:"groupBoxes__Title",componentId:"sc-11qnspu-0"})(["white-space:nowrap;text-overflow:ellipsis;overflow-x:hidden;"]),Tr=Object($.d)(Z.j).attrs({as:Z.D,gap:1}).withConfig({displayName:"groupBoxes__Label",componentId:"sc-11qnspu-1"})(["cursor:default;&:hover{font-weight:bold;}"]),Dr=function(e){var t=e.data,a=e.label,o=e.groupIndex,i=e.renderGroupPopover,s=e.renderBoxPopover,l=Object(n.useRef)(),c=l.current&&Cr(l.current),u=Object(n.useMemo)((function(){return{maxWidth:"".concat(xr(t.data),"px")}}),[t]),d=s&&function(e,t){return s({group:a,groupIndex:o,align:t,index:e})},h=i&&function(){return i({group:a,groupIndex:o,align:c})};return r.a.createElement(Z.j,{"data-testid":"groupBoxWrapper",column:!0,alignItems:"start",gap:1,margin:[0,4,0,0]},r.a.createElement(Z.A,{content:h,align:c,plain:!0},(function(e){var n=e.isOpen,o=e.ref,i=Object(dr.a)(e,["isOpen","ref"]);return r.a.createElement(Tr,Object.assign({"data-testid":"groupBoxWrapper-title",ref:function(e){l.current=e,o(e)},strong:n,style:u},i),r.a.createElement(jr,null,a),t.data.length>3&&r.a.createElement("span",null,"(",t.data.length,")"))})),r.a.createElement(kr,{data:t,renderTooltip:d}))},Pr=function(e){var t=e.data,a=e.labels,n=e.renderBoxPopover,o=e.renderGroupPopover;return r.a.createElement(Z.j,{"data-testid":"groupBoxes",flexWrap:!0,overflow:{vertical:"auto"},flex:!0},a.map((function(e,a){return t[a].data.length?r.a.createElement(Dr,{key:e,label:e,groupIndex:a,data:t[a],renderGroupPopover:o,renderBoxPopover:n}):null})))},Mr=Object($.d)(Z.j).attrs({width:"120px",height:"12px",round:!0}).withConfig({displayName:"legend__LinearColorScaleBar",componentId:"sc-7zc5gz-0"})(["background:linear-gradient(to right,#c6e3f6,#0e9aff);"]),Lr=function(e){var t=e.children;return r.a.createElement(Z.j,{"data-testid":"groupBox-legend",gap:4,alignItems:"center"},r.a.createElement(Z.E,{strong:!0},t),r.a.createElement(Z.j,{gap:2,alignItems:"center"},r.a.createElement(Z.E,null,"0%"),r.a.createElement(Mr,null),r.a.createElement(Z.E,null,"100%")))},Ir={k8s_cluster_id:{icon:"cluster",title:"Cluster Id"},k8s_node_name:{icon:"nodes_hollow",title:"Node"},k8s_namespace:{icon:"cluster_spaces",title:"Namespace"},k8s_controller_kind:{icon:"controller_kind",title:"Controller Kind"},k8s_controller_name:{icon:"controller_name",title:"Controller Name"},k8s_pod_name:{icon:"pod",title:"Pod Name"},k8s_container_name:{icon:"container",title:"Container"}},Nr=Object.keys(Ir),Rr=function(e){return e in Ir?Ir[e]:{title:e.replace(/_./g,(function(e){return" ".concat(e[1].toUpperCase())})).replace(/^k8s /,""),icon:"node"}},Br=function(){return r.a.createElement(Z.j,{height:"1px",width:"100%",background:"separator"})},Fr=function(e){return r.a.createElement(Z.m,Object.assign({color:"bright",wordBreak:"break-all","data-testid":"k8sPopover-header"},e))},Hr=Object($.d)(Z.b).attrs((function(e){return{flavour:"borderless",neutral:!0,themeType:"dark",className:"btn",disabled:e.active,"data-testid":"k8sPopoverChart-tab"}})).withConfig({displayName:"tabs__TabButton",componentId:"sc-18vk2nn-0"})(["&&&{height:initial;width:initial;padding:2px 20px;"," color:","}"],(function(e){var t=e.active,a=e.theme;return t&&"border-bottom: 3px solid ".concat(Object(Z.H)("bright")({theme:a}),";")}),(function(e){var t=e.active,a=e.theme;return Object(Z.H)(t?"bright":"separator")({theme:a})})),zr=function(e){var t=e.value,a=e.onChange,n=Object(dr.a)(e,["value","onChange"]);return r.a.createElement(Z.j,Object.assign({"data-testid":"k8sPopoverChart-tabs"},n),r.a.createElement(Hr,{label:"Context",active:"context"===t,onClick:function(){return a("context")}}),r.a.createElement(Hr,{label:"Metrics",active:"metrics"===t,onClick:function(){return a("metrics")}}))},Ur=Object($.d)(Object(Z.K)(Z.b)).attrs({icon:"chevron_right_s",label:"More",flavour:"borderless",neutral:!0,themeType:"dark",className:"btn",alignItems:"baseline",gap:1,direction:"rowReverse"}).withConfig({displayName:"section__ExpandButton",componentId:"z6mskd-0"})(["&&&{padding:0;margin:0;font-weight:normal;height:initial;width:initial;svg{height:6px;width:6px;position:initial;}}"]),Gr=function(e){var t=e.title,a=e.onExpand,n=e.children,o=e.noBorder;return r.a.createElement(Z.j,{gap:3,padding:[0,0,3],border:!o&&{side:"bottom",color:"separator"},column:!0,"data-testid":"k8sPopoverSection"},r.a.createElement(Z.j,{justifyContent:"between","data-testid":"k8sPopoverSection-header"},r.a.createElement(Z.n,{color:"border",wordBreak:"break-all"},t),a&&r.a.createElement(Ur,{onClick:a})),r.a.createElement(Z.j,{gap:4,column:!0,"data-testid":"k8sPopoverSection-content"},n))},Wr=a(176),Vr=a(616),Yr=function(e,t,a){var n,r;"string"===typeof e.width?n=e.width:"number"===typeof e.width&&(n="".concat(e.width.toString(),"px")),void 0===t.aspectRatio&&("string"===typeof e.height?r=e.height:"number"===typeof e.height&&(r="".concat(e.height.toString(),"px")));var o="bottom"===e.legendPosition,i=e.heightId?function(e,t){var a=localStorage.getItem("".concat(cn.a).concat(e));return a?Number.isNaN(Number(a))?null:"".concat(t?Number(a)+Ba.a:a,"px"):null}(e.heightId,o):null;if(i&&(r=i.replace(/"/g,"")),a){var s=o?.5*window.innerHeight:.4*window.innerHeight;r="".concat(s,"px")}var l=window.NETDATA.chartDefaults.min_width;return{height:r,width:n,minWidth:null===l?void 0:l}},Xr={root:null,rootMargin:"0px",threshold:void 0},Kr=function(){var e=[],t=new IntersectionObserver((function(t){t.forEach((function(t){var a,n=t.isIntersecting,r=t.target,o=null===(a=e.find((function(e){return e.element===r})))||void 0===a?void 0:a.callback;o&&o(n)}))}),Xr);return{subscribe:function(a,n){t.observe(a),e=e.concat({element:a,callback:n})},unsubscribe:function(t){e=e.filter((function(e){return e.element!==t}))}}}(),Zr=function(e){var t=e.attributes;return r.a.createElement("span",{style:{position:"absolute",opacity:0,width:0}},t.id)},qr=localStorage.getItem("wipe-chart-state"),$r=function(e){var t,a=e.attributes,o=e.chartUuid,i=e.children,s=e.portalNode,l=Object(la.a)();Object(n.useEffect)((function(){return function(){l(Object(_e.a)({id:o}))}}),[]);var c=ya[a.chartLibrary],u=Object(n.useState)(!1),d=Object(A.a)(u,2),h=d[0],p=d[1],f=(null===(t=Object(la.b)(ue.d))||void 0===t?void 0:t.chartId)===a.id;Object(n.useLayoutEffect)((function(){if(!h){var e=f&&"dygraph"===a.chartLibrary&&c.hasLegend(a),t=Yr(a,c,e);Object(Vr.a)((function(e,t){e&&s.style.setProperty(t,e)}),t),s.className=c.containerClass(a),p(!0)}}),[a,c,h,f,s,p]);var g=Object(la.b)(ue.j),m=Object(n.useRef)(),b=function(e,t){var a=Object(n.useState)(!1),r=Object(A.a)(a,2),o=r[0],i=r[1],s=Object(n.useRef)(o);return Object(n.useEffect)((function(){return"function"===typeof IntersectionObserver&&Kr.subscribe(e,(function(e){s.current!==e&&(t.current&&(t.current.style.visibility=e?"visible":"hidden"),s.current=e,i(e))})),function(){Kr.unsubscribe(e)}}),[t,e]),o}(s,m),v=Object(la.b)(ue.t)?750:100,_=Object(n.useState)(!b),y=Object(A.a)(_,2),O=y[0],x=y[1];Object(ma.a)((function(){x(!b)}),v,[b]);var w=!b||O,E=Object(n.useRef)(b);if(m.current&&E.current!==b&&(E.current=b),Object(n.useEffect)((function(){!ve&&w&&qr&&l(Object(_e.a)({id:o}))}),[o,l,w]),ve)return i;if(w){if(g)return r.a.createElement(Zr,{attributes:a});if(!m.current){var S=Array.from(s.children).map((function(e){return function(e){var t=e.cloneNode(!0),a=t.querySelectorAll("canvas");return e.querySelectorAll("canvas").forEach((function(e,t){var n=a[t],r=n.getContext("2d");n.width=e.width,n.height=e.height,r&&r.drawImage(e,0,0)})),t}(e)})),C=document.createElement("div");C.style.visibility="hidden",S.forEach((function(e){C.appendChild(e)})),m.current=C}return r.a.createElement(r.a.Fragment,null,r.a.createElement(Zr,{attributes:a}),r.a.createElement("div",{ref:function(e){e&&m.current&&e.appendChild(m.current)}}))}return!g&&m.current&&(m.current=void 0),i},Jr=function(e){var t=e.attributes,a=e.chartMetadata,n=e.chartUuid,o=e.dropdownMenu,i=e.portalNode,s=e.renderCustomElementForDygraph,l=e.onAttributesChange,c=e.uuid;return r.a.createElement($r,{attributes:t,portalNode:i,chartUuid:n},r.a.createElement(Uo,{attributes:t,chartUuid:n,renderCustomElementForDygraph:s,onAttributesChange:l,dropdownMenu:o,externalChartMetadata:a,portalNode:i,uuid:c}))},Qr=null,eo={sparklines_registry:{},os:"unknown",menu:{},submenu:{},context:{},sparkline:function(e,t,a){var n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"",r=arguments.length>4?arguments[4]:void 0;if(null===Qr||"undefined"===typeof Qr.charts)return"";if("undefined"===typeof Qr.charts[t])return"";if("undefined"===typeof Qr.charts[t].dimensions)return"";if("undefined"===typeof Qr.charts[t].dimensions[a])return"";var o="".concat(t,".").concat(a);return"undefined"===typeof this.sparklines_registry[o]?this.sparklines_registry[o]={count:1}:this.sparklines_registry[o].count+=1,o="".concat(o,".").concat(this.sparklines_registry[o].count),"".concat(e,'<div class="netdata-container" data-netdata="').concat(t,'" data-after="-120"\n data-width="25%" data-height="15px" data-chart-library="dygraph"\n data-dygraph-theme="sparkline" data-dimensions="').concat(a,'"\n data-show-value-of-').concat(a,'-at="').concat(o,'"></div>\n (<span id="').concat(o,'" style="display: inline-block; min-width: 50px; text-align: right;">\n X</span>').concat(n,")").concat(r)},gaugeChart:function(e,t){var a=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",n=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"";return"".concat('<div class="netdata-container" data-netdata="CHART_UNIQUE_ID" data-dimensions="').concat(a,'"')+' data-chart-library="gauge" data-gauge-adjust="width"'+' data-title="'.concat(e,'"')+' data-width="'.concat(t,'"')+' data-before="0" data-after="-CHART_DURATION" data-points="CHART_DURATION"'+' data-colors="'.concat(n,'"')+' role="application"></div>'},anyAttribute:function(e,t,a,n,r){if("undefined"!==typeof e[a]){var o=e[a],i=(r?Object(g.a)({},o,{},o[r]):o)[t];return void 0===i?n:"function"===typeof i?i(eo.os):i}return n},menuTitle:function(e){if(e.sectionTitle)return e.sectionTitle;if("undefined"!==typeof e.menu_pattern){var t=e.type||e.id.split(".")[0];return"".concat(this.anyAttribute(this.menu,"title",e.menu_pattern,e.menu_pattern).toString()," ").concat(t.slice(-(t.length-e.menu_pattern.length-1)).toString()).replace(/_/g," ")}return this.anyAttribute(this.menu,"title",e.menu,e.menu).toString().replace(/_/g," ")},menuIcon:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"icon",e.menu_pattern,'<i class="fas fa-puzzle-piece"></i>').toString():this.anyAttribute(this.menu,"icon",e.menu,'<i class="fas fa-puzzle-piece"></i>')},menuInfo:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"info",e.menu_pattern,null):this.anyAttribute(this.menu,"info",e.menu,null)},menuHeight:function(e){return"undefined"!==typeof e.menu_pattern?this.anyAttribute(this.menu,"height",e.menu_pattern,1):this.anyAttribute(this.menu,"height",e.menu,1)},submenuTitle:function(e,t){var a="".concat(e,".").concat(t),n=this.anyAttribute(this.submenu,"title",a,t).toString().replace(/_/g," ");if(n.length>28){var r=n.substring(0,13),o=n.substring(n.length-12,n.length);return"".concat(r,"...").concat(o)}return n},submenuInfo:function(e,t){var a="".concat(e,".").concat(t);return this.anyAttribute(this.submenu,"info",a,null)},submenuHeight:function(e,t,a){var n="".concat(e,".").concat(t);return this.anyAttribute(this.submenu,"height",n,1)*a},contextInfo:function(e,t){var a=this.anyAttribute(this.context,"info",e,null,t);return null!==a?'<div class="shorten dashboard-context-info"\n role="document">'.concat(a,"</div>"):""},contextValueRange:function(e){if("undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].valueRange)try{return JSON.parse(this.context[e].valueRange)}catch(t){return[null,null]}return[null,null]},contextHeight:function(e,t){return"undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].height?t*this.context[e].height:t},contextDecimalDigits:function(e,t){return"undefined"!==typeof this.context[e]&&"undefined"!==typeof this.context[e].decimalDigits?this.context[e].decimalDigits:t}};window.netdataDashboard=eo;var to,ao,no,ro=Object($.d)(Z.B).withConfig({displayName:"chartOverview__Title",componentId:"sc-1ga681p-0"})(["text-overflow:ellipsis;max-width:120px;overflow-x:hidden;"]),oo={avg:"Average",sum:"Sum",min:"Min",max:"Max"},io=Object(n.memo)((function(e){var t,a=e.id,n=e.units,o=e.aggrMethod,i=e.displayedIndex,s=Object(la.b)((function(e){return Object(Ea.f)(e,{id:a})})),l="number"===typeof i?s.result[i]:s.view_latest_values[0],c=Ra({attributes:{},data:s,units:n,unitsCommon:null,unitsDesired:null,uuid:a}),u=c.legendFormatValue,d=c.unitsCurrent,h=function(e){return"".concat(oo[e])||""}(o);return r.a.createElement(Z.B,{wordBreak:"keep-all",color:"bright",margin:[0,0,0,"auto"],"data-testid":"k8sPopoverChart-chartValue"},h&&r.a.createElement(Z.B,{margin:[0,1,0,0],color:"separator","data-testid":"k8sPopoverChart-chartValue-aggr"},h),u(l),"percentage"===(t=d)?"%":" ".concat(t.replace(/milliseconds/,"ms")))})),so=function(e){var t=e.id,a=Object(dr.a)(e,["id"]),n=Object(la.b)((function(e){return Object(Ea.f)(e,{id:t})}));return n&&0!==n.result.length?r.a.createElement(io,Object.assign({id:t},a)):null},lo=Object(n.memo)((function(e){var t=e.id,a=e.chartMetadata,n=e.aggrMethod,o=e.displayedIndex,i=a.units,s=a.context.replace(/cgroup\./,""),l=eo.menuIcon(a);return r.a.createElement(Z.j,{gap:2,"data-testid":"k8sPopoverChart-overview"},r.a.createElement(Z.B,{color:"bright",dangerouslySetInnerHTML:{__html:l}}),r.a.createElement(ro,{color:"bright","data-testid":"k8sPopoverChart-title"},s),r.a.createElement(so,{id:t,units:i,aggrMethod:n,displayedIndex:o}))})),co=Object(n.memo)((function(e){var t=e.groupLabel,a=e.postGroupLabel,o=e.id,i=e.attributes,s=e.relatedIndex,l=Object(n.useContext)($.a),c=Object(n.useRef)(),d=Object(n.useState)(),h=Object(A.a)(d,2),p=h[0],f=h[1],m=Object(n.useMemo)((function(){return Object(Wr.b)(400,f)}),[]),b=Object(n.useState)(),v=Object(A.a)(b,2)[1];Object(n.useLayoutEffect)((function(){v(!0)}),[]);var _=i.relatedCharts[s],y=_.chartMetadata,O=_.attributes,x=Object(n.useMemo)((function(){return{id:y.id,width:"100%",height:"60px",chartLibrary:"sparkline",sparklineLineWidth:"2px",sparklineLineColor:Object(Z.H)("border")({theme:l}),sparklineFillColor:Object(Z.H)("disabled")({theme:l}),sparklineSpotRadius:0,sparklineDisableTooltips:!0,sparklineOnHover:function(e){return m(null===e||void 0===e?void 0:e.x)},httpMethod:"POST",host:i.host,nodeIDs:i.nodeIDs,dimensions:O.dimensions,aggrMethod:O.aggrMethod,labels:Object(g.a)(Object(u.a)({k8s_cluster_id:[y.chartLabels.k8s_cluster_id[0]]},i.groupBy,[t]),a&&Object(u.a)({},i.postGroupBy,[a]))}}),[y,i]);return r.a.createElement(Z.j,{gap:2,column:!0,"data-testid":"k8sPopoverChart"},r.a.createElement("div",{ref:c,style:{height:"60px",width:"100%"},"data-testid":"k8sPopoverChart-container"},c.current&&r.a.createElement(Jr,{chartUuid:o,attributes:x,chartMetadata:y,portalNode:c.current})),r.a.createElement(lo,{id:o,aggrMethod:x.aggrMethod,chartMetadata:y,displayedIndex:p}))})),uo=Object($.d)(Z.o).attrs({margin:[0,0,0,"auto"],color:"bright",width:"10px",height:"10px",alignSelf:"center",name:"nav_arrow_goto",role:"button",title:"Go to node","data-testid":"k8sPopoverItem-externalButton"}).withConfig({displayName:"item__ExternalButton",componentId:"sc-351vj-0"})(["cursor:pointer;"]),ho=function(e){var t=e.icon,a=e.title,n=e.secondary,o=e.onClick;return r.a.createElement(Z.j,{gap:1,alignItems:"start","data-testid":"k8sPopoverItem"},r.a.createElement(Z.j,{width:"22px",height:"22px","data-testid":"k8sPopoverItem-icon"},r.a.createElement(Z.o,{name:t,color:"bright",margin:[0,1,0,0],width:"22px",height:"22px"})),r.a.createElement(Z.B,{color:"bright","data-testid":"k8sPopoverItem-title"},a),n&&r.a.createElement(Z.B,{color:"border",wordBreak:"break-all","data-testid":"k8sPopoverItem-detail"},n),o&&r.a.createElement(uo,{onClick:o}))},po=function(e){var t=e.date,a=e.title,n=Object(sa.b)(),o=n.localeDateString,i=n.localeTimeString;return r.a.createElement(ho,{icon:"around_clock",title:a,secondary:"".concat(o(t)," | ").concat(i(t))})},fo=function(e){var t=e.before,a=e.after;return r.a.createElement(Gr,{title:"Time"},r.a.createElement(po,{title:"From",date:a}),r.a.createElement(po,{title:"To",date:t}))},go=function(e){var t=e.groupLabel,a=e.postGroupLabel,n=e.attributes,o=e.viewAfter,i=e.viewBefore;return r.a.createElement(Z.j,{gap:3,column:!0,width:"100%","data-testid":"k8sPopoverMetrics"},r.a.createElement(fo,{after:o,before:i}),r.a.createElement(Gr,{title:"Metrics",noBorder:!0},r.a.createElement(Z.j,{gap:3,column:!0,"data-testid":"k8sPopoverMetrics-container"},n.relatedCharts.map((function(e,o){var i=e.chartMetadata;return r.a.createElement(co,{key:i.id,id:[t,a,n.id,i.id].join("|"),attributes:n,relatedIndex:o,groupLabel:t,postGroupLabel:a})})))))},mo=function(e){var t=e.labelId,a=e.items,n=e.onExpand,o=e.onItemClick,i=Object(dr.a)(e,["labelId","items","onExpand","onItemClick"]),s=Rr(t),l=s.title,c=s.icon,u=a.slice(0,3),d=a.length>3,h=d?"".concat(l," (").concat(a.length,")"):l;return r.a.createElement(Gr,Object.assign({title:h,onExpand:d&&n},i),u.map((function(e){return r.a.createElement(ho,{key:e,icon:c,title:e,onClick:o&&function(){return o(e)}})})))},bo=Object(n.memo)((function(e){var t=e.chartLabels,a=e.onExpand,n=e.onNodeClick,o=function(e){e=Object(g.a)({},e);var t=Nr.reduce((function(t,a){return a in e?(delete e[a],[].concat(Object(C.a)(t),[a])):t}),[]);return[].concat(Object(C.a)(t),Object(C.a)(Object.keys(e)))}(t);return r.a.createElement(Z.j,{gap:3,column:!0,width:"100%","data-testid":"k8sPopoverContext"},o.map((function(e,i){return r.a.createElement(mo,{key:e,labelId:e,items:t[e],onExpand:function(){return a(e)},noBorder:i===o.length-1,onItemClick:"k8s_node_name"===e&&n})})))})),vo=Object($.d)(Object(Z.K)(Z.b)).attrs({flavour:"borderless",neutral:!0,themeType:"dark",className:"btn",alignItems:"start",gap:1}).withConfig({displayName:"list__StyledButton",componentId:"sc-11aix5x-0"})(["&&&{padding:0;margin:0;height:initial;width:initial;svg{height:18px;width:18px;position:initial;}}"]),_o=function(e){var t=e.labelId,a=e.items,n=e.onBack,o=e.onItemClick,i=Rr(t),s=i.title,l=i.icon;return r.a.createElement(Z.j,{height:"100%",gap:3,"data-testid":"k8sPopoverList",column:!0},r.a.createElement(Fr,null,r.a.createElement(vo,{label:"".concat(s," (").concat(a.length,")"),icon:"chevron_left",onClick:n,"data-testid":"k8sPopoverList-back"})),r.a.createElement(Br,null),r.a.createElement(Z.j,{gap:3,overflow:{vertical:"auto",horizontal:"hidden"},column:!0,"data-testid":"k8sPopoverList-container"},a.map((function(e){return r.a.createElement(ho,{key:e,icon:l,title:e,onClick:o&&function(){return o(e)}})}))))},yo=function(e){return r.a.createElement(Z.i,Object.assign({background:["transparent","popover"],padding:[2,4],width:"322px",height:"422px"},e))},Oo=function(e){var t=e.label,a=e.value,n=e.onChange,o=e.children;return r.a.createElement(Z.j,{height:"100%",column:!0},r.a.createElement(Fr,null,t),r.a.createElement(zr,{value:a,onChange:n,margin:[4,0,0,0]}),r.a.createElement(Br,null),r.a.createElement(Z.j,{gap:3,overflow:{vertical:"auto",horizontal:"hidden"},margin:[4,0,0,0]},o))},xo=function(e){var t=e.title,a=e.groupLabel,o=e.postGroupLabel,i=e.chartLabels,s=e.attributes,l=e.viewBefore,c=e.viewAfter,u=Object(dr.a)(e,["title","groupLabel","postGroupLabel","chartLabels","attributes","viewBefore","viewAfter"]),d=Object(n.useState)("context"),h=Object(A.a)(d,2),p=h[0],f=h[1],g="context"!==p&&"metrics"!==p,m=s.onNodeClick;return r.a.createElement(yo,Object.assign({"data-testid":"k8sPopover"},u),g&&r.a.createElement(_o,{labelId:p,items:i[p],attributes:s,onBack:function(){return f("context")},onItemClick:"k8s_node_name"===p&&m}),!g&&r.a.createElement(Oo,{label:t,value:p,onChange:f},"context"===p&&r.a.createElement(bo,{chartLabels:i,onExpand:f,onNodeClick:m}),"metrics"===p&&r.a.createElement(go,{groupLabel:a,postGroupLabel:o,attributes:s,viewAfter:c,viewBefore:l})))},wo=function(e){var t=e.chartData,a=e.chartMetadata,o=e.attributes,i=e.viewAfter,s=e.viewBefore,l=e.hoveredRow,c=e.hoveredX,d=e.showUndefined,h=o.filteredRows,p=Object(n.useMemo)((function(){return function(e,t){var a=e.keys,n=e.labels,r=e.groupBy,o=e.postGroupBy,i=e.aggrGroups,s=e.postAggregated,l=a[r],c=a[o],d=(t||Object(C.a)(Array(l.length)).map((function(e,t){return t}))).reduce((function(e,t){var a=l[t];a in e||(e[a]={labels:[],indexes:[],chartLabels:[],postAggregated:[]});var r=e[a];r.indexes.push(t),r.labels.push(c[t]),r.postAggregated.push(s[t]);var o=i.reduce((function(e,a){return n[a][t]?Object(g.a)({},e,Object(u.a)({},a,n[a][t])):e}),{});return r.chartLabels.push(o),e}),{}),h=Object.keys(d).sort((function(e,t){return d[t].indexes.length-d[e].indexes.length})),p=h.map((function(e){return d[e]})),f=p.map((function(e){return i.reduce((function(t,a){var n=new Set(e.chartLabels.reduce((function(e,t){return t[a]?[].concat(Object(C.a)(e),Object(C.a)(t[a])):e}),[]));return 0===n.size?t:Object(g.a)({},t,Object(u.a)({},a,Array.from(n)))}),{})}));return{labels:h,data:p,chartLabels:f}}(t,h)}),[h,t]),f=p.data,m=p.labels,b=p.chartLabels,v=t.id,_=t.result.data,y=t.groupBy,O=t.postGroupBy,x=Object(n.useMemo)((function(){return f.map((function(e){return{labels:e.labels,data:-1===l||l>_.length||!(l in _)?e.postAggregated:e.indexes.map((function(e){return _[l][e+1]}))||[]}}))}),[_,f,l]);return r.a.createElement(Z.j,{column:!0,width:"100%",height:"100%",gap:4,padding:[4,2]},r.a.createElement(Pr,{data:x,labels:m,renderBoxPopover:function(e){var t=e.groupIndex,a=e.index,n=e.align,l=f[t].labels[a],c=Rr(O).title;return r.a.createElement(xo,{align:n,title:"".concat(c,": ").concat(l),groupLabel:m[t],postGroupLabel:l,chartLabels:f[t].chartLabels[a],attributes:o,viewBefore:s,viewAfter:i})},renderGroupPopover:function(e){var t=e.groupIndex,a=e.align,n=m[t],l=Rr(y).title;return r.a.createElement(xo,{align:a,title:"".concat(l,": ").concat(n),groupLabel:n,chartLabels:b[t],attributes:o,viewBefore:s,viewAfter:i})}}),r.a.createElement(Z.j,{"data-testid":"legend-container",justifyContent:"between"},r.a.createElement(Lr,null,v),r.a.createElement(nn,{chartMetadata:a,showUndefined:d,hoveredX:c,viewBefore:s,chartData:t})))},Eo=function(e){var t=e.attributes,a=e.chartContainerElement,o=e.chartData,i=e.chartMetadata,s=e.chartLibrary,l=e.colors,c=e.chartUuid,u=e.chartHeight,d=e.chartWidth,h=e.dimensionsVisibility,p=e.hasEmptyData,f=e.isRemotelyControlled,g=e.legendFormatValue,m=e.orderedColors,b=e.hoveredRow,v=e.hoveredX,_=e.onUpdateChartPanAndZoom,y=e.immediatelyDispatchPanAndZoom,O=e.setHoveredX,x=e.setMinMax,w=e.showLatestOnBlur,E=e.unitsCurrent,S=e.viewAfterForCurrentData,C=e.viewBeforeForCurrentData,A=Object(la.a)(),k=Object(la.b)(ue.H),j=Object(n.useCallback)((function(e){var t=e.after,a=e.before,n=e.masterID;A(Object(Ke.r)({after:t,before:a,masterID:n})),A(k?Object(Ke.s)({after:S,before:C}):Object(_e.h)({after:S,before:C,id:c}))}),[c,A,k,S,C]),T=ya[s],D=T.hasLegend,P=D(t)?va()("netdata-chart-with-legend-".concat(t.legendPosition||"right"),"netdata-".concat(s,"-chart-with-legend-right")):va()("netdata-chart","netdata-".concat(s,"-chart")),M="".concat(s,"-").concat(c,"-chart"),L=-1===b&&!w;return function(e){var t=e.attributes,a=e.chartData,r=e.chartSettings,o=e.hoveredRow,i=e.legendFormatValue,s=e.showUndefined,l=Object(n.useRef)([]);Object(dn.a)((function(){var e=t.showValueOf;if(e&&!Object(un.a)(e)){var n=a.dimension_names,r=a.dimension_ids;n.forEach((function(t,a){var n=e["show-value-of-".concat(t.toLowerCase())]||e["show-value-of-".concat(r[a].toLowerCase(),"-at")];l.current=l.current.concat(document.getElementById(n))}))}})),Object(n.useEffect)((function(){if(l.current.length){var e=r.options(t),n=e.includes("flip");if("json"===a.format&&!e.includes("objectrows")){var c=a.result.data,u=-1===o?c.length-1:o,d=c[n?u:c.length-u-1];a.dimension_names.forEach((function(e,t){var a=s||!d?"":i(d[t+1]),n=l.current[t];n&&(n.innerText="".concat(a))}))}}}),[t,a,r,o,i,s])}({attributes:t,chartData:o,chartSettings:T,hoveredRow:b,legendFormatValue:g,showUndefined:L}),"easypiechart"===s?r.a.createElement(Gn,{attributes:t,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,chartLibrary:s,chartWidth:d,colors:l,chartUuid:c,dimensionsVisibility:h,isRemotelyControlled:f,key:d,legendFormatValue:g,orderedColors:m,hoveredRow:b,onUpdateChartPanAndZoom:_,setGlobalChartUnderlay:j,setMinMax:x,showUndefined:L,unitsCurrent:E,viewAfter:S,viewBefore:C}):"gauge"===s?r.a.createElement(Yn,{attributes:t,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,chartLibrary:s,chartHeight:u,chartWidth:d,colors:l,chartUuid:c,dimensionsVisibility:h,isRemotelyControlled:f,legendFormatValue:g,orderedColors:m,hoveredRow:b,hoveredX:v,onUpdateChartPanAndZoom:_,setGlobalChartUnderlay:j,setHoveredX:O,setMinMax:x,showUndefined:L,unitsCurrent:E,viewAfter:S,viewBefore:C}):"sparkline"===s?r.a.createElement(Qn,{attributes:t,chartContainerElement:a,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,dimensionsVisibility:h,isRemotelyControlled:f,orderedColors:m,unitsCurrent:E,viewAfterForCurrentData:S,viewBeforeForCurrentData:C}):"d3pie"===s?r.a.createElement(sr,{attributes:t,chartContainerElement:a,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,dimensionsVisibility:h,hoveredRow:b,hoveredX:v,isRemotelyControlled:f,legendFormatValue:g,orderedColors:m,setMinMax:x,showUndefined:L,unitsCurrent:E}):"peity"===s?r.a.createElement(lr,{attributes:t,chartContainerElement:a,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,orderedColors:m}):"google"===s?r.a.createElement(cr,{attributes:t,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,orderedColors:m,unitsCurrent:E}):"textonly"===s?r.a.createElement(ur,{attributes:t,chartData:o,chartElementClassName:P,chartElementId:M}):"groupbox"===s?r.a.createElement(wo,{chartData:o,chartMetadata:i,attributes:t,viewAfter:S,viewBefore:C,hoveredRow:b,hoveredX:v,showUndefined:L}):r.a.createElement(Fn,{attributes:t,chartData:o,chartMetadata:i,chartElementClassName:P,chartElementId:M,chartLibrary:s,colors:l,chartUuid:c,dimensionsVisibility:h,hasEmptyData:p,hasLegend:D(t),isRemotelyControlled:f,orderedColors:m,immediatelyDispatchPanAndZoom:y,hoveredRow:b,hoveredX:v,onUpdateChartPanAndZoom:_,setGlobalChartUnderlay:j,setHoveredX:O,setMinMax:x,unitsCurrent:E,viewAfter:S,viewBefore:C})},So=Object(n.memo)((function(e){var t=e.attributes,a=e.attributes.chartLibrary,o=e.chartContainerElement,i=e.chartData,s=e.chartMetadata,l=e.chartHeight,c=e.chartUuid,u=e.chartWidth,d=e.defaultAfter,h=e.globalPanAndZoom,p=e.hasEmptyData,f=e.isRemotelyControlled,g=e.viewRangeForCurrentData,m=e.viewRange,b=e.selectedDimensions,v=e.setSelectedDimensions,_=e.showLatestOnBlur,y=Object(n.useContext)($.a),O=Object(la.b)(ue.N),x=ya[a].hasLegend,w=t.units,E=void 0===w?s.units:w,S=t.unitsCommon,C=t.unitsDesired,k=void 0===C?O:C,j=Object(n.useMemo)((function(){return i.dimension_names.map((function(e){return 0===b.length||b.includes(e)}))}),[i.dimension_names,b]),T=x(t)&&window.NETDATA.options.current.legend_toolbox,D=T&&window.NETDATA.options.current.resize_charts&&!t.hideResizeHandler,P=Object(la.a)(),M=Object(n.useMemo)((function(){var e=Object.values(s.dimensions).map((function(e){return e.name})),t=i.dimension_names.filter((function(t){return!e.includes(t)}));return e.concat(t)}),[i.dimension_names,s.dimensions]);Object(n.useEffect)((function(){P(Object(Ke.h)({chartContext:s.context,chartUuid:c,colorsAttribute:t.colors,commonColorsAttribute:t.commonColors,dimensionNames:M}))}),[M,t.colors,t.commonColors,s.context,c,P]);var L=Ra({attributes:t,data:i,units:E,unitsCommon:S,unitsDesired:k,uuid:c}),I=L.legendFormatValue,N=L.legendFormatValueDecimalsFromMinMax,R=L.unitsCurrent,B=Object(n.useState)(null),F=Object(A.a)(B,2),H=F[0],z=F[1],U=Object(la.b)(ue.I),G=Object(n.useCallback)((function(e,t){if(U){var a=t?{chartUuid:null,hoveredX:e}:{chartUuid:c,hoveredX:e};P(Object(Ke.u)(a))}else z(e)}),[c,P,U]),W=Object(la.b)(ue.o),V=U?W:H,Y=Object(Ve.f)(m[0])?m[0]:i.after*Ve.a,X=Object(Ve.f)(m[1])?m[1]:i.before*Ve.a,K=Object(Ve.f)(g[0])?g[0]:i.after*Ve.a,Z=Object(Ve.f)(g[1])?g[1]:i.before*Ve.a,q=i.first_entry*Ve.a,J=i.last_entry*Ve.a,Q=Object(n.useMemo)((function(){return Math.round(u/30*s.update_every*Ve.a)}),[s.update_every,u]),ee=Object(la.b)(ue.H),te=Object(ka.a)((function(e){P(Object(Ke.s)(e))}),400),ae=Object(n.useCallback)((function(){te.flush()}),[te]),ne=Object(n.useCallback)((function(e){var t=e.after,a=e.before,n=e.callback,r=e.shouldFlushImmediately,o=void 0!==r&&r,s=e.shouldForceTimeRange,l=e.shouldNotExceedAvailableRange;if(!(a<t)){var u=Q,d=Math.round(X-Y),h=Math.round(t),p=Math.round(a),f=i.view_update_every*Ve.a;if(l){var g=q+f,m=J+f;p>m&&(h-=a-m,p=m),h<g&&(h=g)}var b=(p+=f-p%f)-(h-=h%f);d-f<u&&(u=d-f);var v=!0;if(b<d&&b<u){var _=((u=Q)-b)/2;b=(p+=_)-(h-=_),v=!1}var y=2*f,O=Math.abs(p-X);Math.abs(d-b)<=y&&O<=y&&v||(ee?(te.callback({after:h,before:p,masterID:c,shouldForceTimeRange:s}),o&&te.flush()):P(Object(_e.h)({after:h,before:p,id:c,shouldForceTimeRange:s})),v&&"function"===typeof n&&n(h,p))}}),[i.view_update_every,c,P,Q,ee,q,J,te,Y,X]),re=Object(n.useCallback)((function(e,t){var a=Math.max(e,q),n=Math.min(t,J);ne({after:a,before:n,shouldForceTimeRange:!0,shouldFlushImmediately:!0})}),[ne,q,J]),oe=Object(n.useCallback)((function(e){var t=(X-Y)*ja(e),a=Y-t;a>=q&&re(a,X-t)}),[re,q,Y,X]),ie=Object(n.useCallback)((function(e){var t=X-Y,a=t*ja(e),n=Math.min(X+a,J);re(n-t,n)}),[re,J,Y,X]),se=Object(n.useCallback)((function(e){var t=.8*ja(e);if(h)if(X-Y>1.2*(J-q))re(q,J);else{var a=(X-Y)*t/2;re(Y+a,X-a)}else P(Object(Ke.q)({after:Math.round(d/(t+1))}))}),[d,P,h,re,q,J,Y,X]),le=Object(n.useCallback)((function(e){var t=.8*ja(e);if(h){var a=((X-Y)/(1-.8*t)-(X-Y))/2;re(Y-a,X+a)}else P(Object(Ke.q)({after:Math.round(d*(t+1))}))}),[d,P,h,re,Y,X]),ce=Object(n.useMemo)((function(){return Object(ue.a)({chartContext:s.context,chartUuid:c,colorsAttribute:t.colors,commonColorsAttribute:t.commonColors})}),[t.colors,t.commonColors,s,c]),de=Object(la.b)(ce),he=Object(n.useMemo)((function(){return i.dimension_names.map(Object(qe.a)(Aa.a,de))}),[i,de]);if(!de)return r.a.createElement("span",null);var pe=V&&V>=Y&&V<=X,fe=i.view_update_every*Ve.a,ge=pe?Math.floor((V-i.after*Ve.a)/fe):-1,me="bottom"===t.legendPosition,be=r.a.createElement(ln,{onToolboxLeftClick:oe,onToolboxRightClick:ie,onToolboxZoomInClick:se,onToolboxZoomOutClick:le}),ve=D&&r.a.createElement(cn.b,{chartContainerElement:o,chartUuid:c,heightId:t.heightId,isLegendOnBottom:me});return r.a.createElement(r.a.Fragment,null,r.a.createElement(Eo,{key:y.name,attributes:t,chartContainerElement:o,chartData:i,chartMetadata:s,chartLibrary:a,colors:de,chartUuid:c,chartHeight:l,chartWidth:u,dimensionsVisibility:j,hasEmptyData:p,onUpdateChartPanAndZoom:ne,immediatelyDispatchPanAndZoom:ae,isRemotelyControlled:f,legendFormatValue:I,orderedColors:he,hoveredX:V,hoveredRow:ge,setHoveredX:G,setMinMax:function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1];return N(a,n)},showLatestOnBlur:_,unitsCurrent:R,viewAfterForCurrentData:K,viewBeforeForCurrentData:Z}),x(t)&&r.a.createElement(on,{attributes:t,chartUuid:c,chartMetadata:s,chartLibrary:a,colors:de,hoveredX:V,hoveredRow:ge,legendFormatValue:I,selectedDimensions:b,setSelectedDimensions:v,showLatestOnBlur:_,unitsCurrent:R,viewBefore:X,legendToolbox:be,resizeHandler:ve}),T&&!me&&be,!me&&ve)})),Co=a(631),Ao=a(625),ko=a(198),jo=$.d.div.withConfig({displayName:"styled__DropdownItem",componentId:"sc-1pvwpze-0"})(["display:flex;flex-direction:start;align-items:center;color:",";white-space:nowrap;& > svg use{fill:",";}"],Object(Z.H)(["neutral","limedSpruce"]),Object(Z.H)(["neutral","limedSpruce"])),To=$.d.span.withConfig({displayName:"styled__DropdownItemLabel",componentId:"sc-1pvwpze-1"})(["margin-left:12px;"]),Do=Object($.d)(Z.o).withConfig({displayName:"styled__DotsBtn",componentId:"sc-1pvwpze-2"})(["width:6px;height:10px;cursor:pointer;& use{fill:",";&:hover{fill:",";}}"],Object(Z.H)(["neutral","limedSpruce"]),Object(Z.H)(["neutral","regentgrey"])),Po=function(e){var t=e.attributes,a=e.chartID,o=e.chartMetadata,i=e.dropdownMenu,s=Object(n.useState)(!1),l=Object(A.a)(s,2),c=l[0],u=l[1],d=function(){u(!1)};return r.a.createElement(r.a.Fragment,null,r.a.createElement(Do,{name:"dots_2x3",onClick:function(){u(!0)}}),r.a.createElement(ko.b,null,r.a.createElement(ko.a,{open:c,onClose:d},r.a.createElement(Co.a,null,i.map((function(e){var n=e.icon,i=e.label,s=e.onClick;return r.a.createElement(Ao.b,{key:i,text:r.a.createElement(jo,null,n,r.a.createElement(To,null,i)),onClick:function(){s({attributes:t,chartMetadata:o,chartID:a}),d()}})}))))))},Mo=Object($.e)(["0%{opacity:.1;}50%{opacity:.5;}100%{opacity:.1;}"]),Lo=$.d.div.withConfig({displayName:"styled__SpinnerContainer",componentId:"sc-1foc7xt-0"})(["position:absolute;top:","px;right:","px;display:flex;"],Object(qe.a)("top"),Object(qe.a)("right")),Io=$.d.div.withConfig({displayName:"styled__Circle",componentId:"sc-1foc7xt-1"})(["width:","px;height:","px;background:",";border-radius:50%;animation:1s linear infinite both ",";"],Object(qe.a)("size"),Object(qe.a)("size"),Object(Z.H)("border"),Mo),No=Object($.d)(Io).withConfig({displayName:"styled__Circle2",componentId:"sc-1foc7xt-2"})(["animation-delay:.3s;margin-left:","px;"],Object(qe.a)("spaceBetween")),Ro=Object($.d)(Io).withConfig({displayName:"styled__Circle3",componentId:"sc-1foc7xt-3"})(["animation-delay:.6s;margin-left:","px;"],Object(qe.a)("spaceBetween")),Bo=function(e){var t=e.chartLibrary,a="dygraph"===t?33:0,n="dygraph"===t?8:0,o="dygraph"===t?10:7,i="dygraph"===t?4:2;return r.a.createElement(Lo,{top:a,right:n},r.a.createElement(Io,{size:o}),r.a.createElement(No,{size:o,spaceBetween:i}),r.a.createElement(Ro,{size:o,spaceBetween:i}))},Fo=$.d.div.withConfig({displayName:"styled__ChartDropdownContainer",componentId:"sc-177ahzb-0"})(["position:absolute;top:0;left:40px;width:20px;height:20px;z-index:",";"],10),Ho=(a(514),{"sum-of-abs":"sum"}),zo=[],Uo=function(e){var t,a,o,i,s,l=e.attributes,c=e.chartUuid,u=e.uuid,d=e.dropdownMenu,h=e.externalChartMetadata,p=e.portalNode,g=e.renderCustomElementForDygraph,m=e.onAttributesChange,b=l.host,v=void 0===b?D.b:b,_=l.id,y=l.nodeIDs,O=Object(la.a)(),x=Object(n.useMemo)(Ea.a,[]),w=Object(la.b)((function(e){return x(e,{chartId:_,id:c})})),E=w.chartMetadata,S=w.isFetchingDetails,k=h||E;Object(n.useEffect)((function(){E||S||h||O(_e.b.request({chart:_,id:c,host:v,nodeIDs:y}))}),[_,c,O,v,S,E,h,y,u]);var T=Object(la.b)(ue.m),P=Object(la.b)((function(e){return Object(Ea.i)(e,{id:c})})),M=P||T,L=!!T&&T.masterID===c||Boolean(P),I=(null===M||void 0===M?void 0:M.shouldForceTimeRange)||!1,N=!M||!L||I,R=Object(la.b)((function(e){return Object(Ea.g)(e,{id:c})})),B=Object(la.b)((function(e){return Object(Ea.j)(e,{id:c})})),F=Object(la.b)((function(e){return Object(Ea.f)(e,{id:c})})),H=Object(la.b)((function(e){return Object(Ea.h)(e,{id:c})})),z=Object(la.b)(ue.o),U=Object(ua.a)([[Object(da.a)(!!F),function(){return 1e3*F.view_update_every}],[Object(da.a)(!!k),function(){return 1e3*k.update_every}],[ha.a,Object(da.a)(f.a)]])(),G=function(e){var t=e.areCriteriaMet,a=e.preferedIntervalTime,r=Object(la.b)(ue.s),o=Object(la.b)(ue.G),i=Object(la.b)(ue.n),s=!(!r&&o)&&!i,l=Object(n.useState)(!0),c=Object(A.a)(l,2),u=c[0],d=c[1],h=Object(n.useState)(!1),p=Object(A.a)(h,2),f=p[0],g=p[1];Object(n.useEffect)((function(){f&&s&&(g(!1),d(!0))}),[f,g,s]);var m=!s&&f||ve?wa:a;return Object(xa.a)((function(){if(t){if(!s)return void g(!0);d(!0)}}),m),[u,d]}({areCriteriaMet:!M&&!z,preferedIntervalTime:U}),W=Object(A.a)(G,2),V=W[0],Y=W[1],X=Object(pa.a)(M,f.b);Object(n.useEffect)((function(){Y(!0)}),[X,Y]);var K=Object(la.b)(ue.i);Object(fa.a)((function(){Y(!0)}),[l.after,l.before,K,l.dimensions,l.aggrMethod,l.groupBy]);var Z=l.before,q=void 0===Z?window.NETDATA.chartDefaults.before:Z,$=l.after||K,J=ya[l.chartLibrary],Q=J.hasLegend,ee=p.getBoundingClientRect(),te=ee.width-(Q(l)?140:0),ae=ee.height,ne=Boolean(Object(la.b)(ue.D)),re=Object(la.b)(ue.z)||ne,oe=Object(la.b)(ue.w),ie=j.a.CancelToken,se=Object(n.useMemo)((function(){return ie.source()}),[]);Object(ga.a)((function(){se.cancel("Chart scrolled out of view")}));var le=Object(n.useState)(!1),ce=Object(A.a)(le,2),de=ce[0],he=ce[1],pe=de&&H;Object(ma.a)((function(){H&&he(!0)}),2e3,[H]),Object(n.useEffect)((function(){!H&&de&&he(!1)}),[H,de]),Object(n.useEffect)((function(){if(V&&k&&!H){var e,t,a,n=window.NETDATA.options.force_data_points,r=1;if(M)if(L){if(a=[e=Math.round(M.after/1e3),t=Math.round(M.before/1e3)],oe){var o=Math.round((t-e)/2);e-=o,t+=o,r=2}}else e=Math.round(M.after/1e3),t=Math.round(M.before/1e3),r=1;else t=q,e=$,r=1;a=(a||[e,t]).map((function(e){return 1e3*e}));var i=l.points||Math.round(te/function(e){var t=e.attributes,a=e.chartSettings,n=t.pixelsPerPoint;if("number"===typeof n)return n;var r=a.pixelsPerPoint(t);return Math.max.apply(Math,Object(C.a)([r,window.NETDATA.options.current.pixels_per_point].filter((function(e){return"number"===typeof e}))))}({attributes:l,chartSettings:J})),s=n||i*r,u=l.forceTimeWindow||Boolean(K)?function(e){var t=e.after,a=e.before,n=e.firstEntry,r=e.points,o=Math.round((new Date).valueOf()/1e3),i=t>0?t:o+t,s=a>0?a:o+a;if(i<n){var l=s-i,c=s-Math.max(i,n);return Math.round(r*c/l)}return null}({after:e,before:t,firstEntry:k.first_entry,points:s}):null,d=l.method||window.NETDATA.chartDefaults.method;Y(!1),O(_e.c.request({host:v,context:k.context,chart:k.id,format:J.format,points:u||s,group:d,gtime:l.gtime||0,options:Oa(l,re),after:e||null,before:t||null,dimensions:l.dimensions,labels:l.labels,postGroupBy:l.postGroupBy,postAggregationMethod:l.postAggregationMethod,aggrMethod:l.aggrMethod,aggrGroups:l.aggrGroups,dimensionsAggrMethod:Ho[l.dimensionsAggrMethod]||l.dimensionsAggrMethod,nodeIDs:y,httpMethod:l.httpMethod,groupBy:l.groupBy,fetchDataParams:{fillMissingPoints:u?s-u:void 0,isRemotelyControlled:N,viewRange:a},id:c,cancelTokenSource:se}))}}),[l,k,J,c,te,K,O,Q,v,q,H,L,N,$,M,p,Y,re,oe,V,se,y,u]),Object(la.b)(ue.F);var fe=null===l||void 0===l?void 0:l.selectedDimensions,ge=Object(n.useState)(fe||zo),me=Object(A.a)(ge,2),be=me[0],ye=me[1];Object(n.useLayoutEffect)((function(){fe&&ye(fe)}),[fe]),Object(n.useLayoutEffect)((function(){ye(fe||zo)}),[null===l||void 0===l?void 0:l.groupBy]);var Oe=Object(n.useMemo)((function(){return g&&g({onAttributesChange:m,attributes:l,chartMetadata:k,chartData:F,chartID:_})}),[m,g,l,_,k,F]),xe=0===(null===(t=F)||void 0===t?void 0:null===(a=t.result)||void 0===a?void 0:null===(o=a.data)||void 0===o?void 0:o.length)||0===(null===(i=F)||void 0===i?void 0:null===(s=i.result)||void 0===s?void 0:s.length);return F&&k?r.a.createElement(r.a.Fragment,null,xe&&r.a.createElement(Ca,{key:"".concat(xe),hasEmptyData:xe,containerNode:p}),r.a.createElement(So,{attributes:l,chartContainerElement:p,chartData:F,chartMetadata:k,chartUuid:c,chartHeight:ae,chartWidth:te,defaultAfter:K,globalPanAndZoom:T,hasEmptyData:xe,isRemotelyControlled:R.isRemotelyControlled,viewRangeForCurrentData:R.viewRange,viewRange:B,selectedDimensions:be,setSelectedDimensions:ye,showLatestOnBlur:!M}),pe&&r.a.createElement(Bo,{chartLibrary:l.chartLibrary}),d&&d.length>0&&r.a.createElement(Fo,null,r.a.createElement(Po,{dropdownMenu:d,chartID:_,attributes:l,chartMetadata:k})),Oe):r.a.createElement(r.a.Fragment,null,r.a.createElement(Ca,{key:"".concat(xe),hasEmptyData:xe,containerNode:p}),pe&&r.a.createElement(Bo,{chartLibrary:l.chartLibrary}))},Go=a(574),Wo=function(e){return e[O.a]},Vo=Object(Ha.a)(Wo,Object(qe.a)("isSnapshotMode")),Yo=Object(Ha.a)(Wo,Object(Go.a)(["snapshotCharts","snapshotDataPoints"])),Xo=function(e){var t=e.attributes,a=e.chartUuid,r=t.host||D.b,o=Object(la.b)(Yo).snapshotDataPoints,i=t.method||window.NETDATA.chartDefaults.method,s=t.chartLibrary,l=ya[s],c=Object(la.b)(ue.m),u=c.after/Ve.a,d=c.before/Ve.a,h=Object(la.a)();return Object(n.useEffect)((function(){h(_e.e.request({host:r,context:t.id,chart:t.id,format:l.format,points:o,group:i,gtime:t.gtime||0,options:Oa(t,!0),after:u||null,before:d||null,dimensions:t.dimensions,aggrMethod:t.aggrMethod,nodeIDs:t.nodeIDs,chartLibrary:s,id:a,groupBy:t.groupBy}))})),null},Ko=function(e){var t=e.attributes,a=e.chartUuid;return Object(la.b)(Vo)?r.a.createElement(Xo,{attributes:t,chartUuid:a}):null},Zo=Object(n.memo)((function(){var e=Array.from(document.querySelectorAll("[data-netdata]"));return r.a.createElement(r.a.Fragment,null,e.map((function(e,t){var a=Object(ca.b)(e),n="".concat(a.id,"-").concat(t);return Object(o.createPortal)(r.a.createElement(r.a.Fragment,null,r.a.createElement($r,{attributes:a,chartUuid:n,portalNode:e},r.a.createElement(Uo,{attributes:a,chartUuid:n,portalNode:e})),r.a.createElement(Ko,{attributes:a,chartUuid:n})),e)})))})),qo=function(e){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],a=arguments.length>2?arguments[2]:void 0,r=Object(n.useState)(!1),o=Object(A.a)(r,2),i=o[0],s=o[1],l=Object(n.useState)(!1),c=Object(A.a)(l,2),u=c[0],d=c[1],h=Object(n.useState)(null),p=Object(A.a)(h,2),f=p[0],g=p[1];return Object(n.useEffect)((function(){if(t&&e){var n=a?{headers:null,withCredentials:!1}:{};s(!0),T.get(e,n).then((function(e){e.data&&(g(e.data),d(!1),s(!1))})).catch((function(t){console.warn("error fetching ".concat(e),t),d(!0),s(!1)}))}}),[a,t,e]),[f,i,u]},$o=document.getElementById("modal-root"),Jo=function(e){var t=e.children,a=Object(n.useRef)(document.createElement("div"));return Object(n.useEffect)((function(){return $o.appendChild(a.current),function(){$o.removeChild(a.current)}}),[]),Object(o.createPortal)(t,a.current)},Qo=(a(515),function(){var e=Object(n.useRef)(null);Object(n.useEffect)((function(){e.current&&window.$(e.current).modal("show")}));var t=Object(la.b)(Ea.b),a=Object(la.b)(Ea.c),o=Object(la.b)(Ea.k),i=0===t?0:a/t*100;Object(n.useEffect)((function(){100===i&&setTimeout((function(){window.$(e.current).modal("hide"),window.print(),window.close()}),1e3)}),[i]);var s=o&&"".concat(Math.round(i),"%, ").concat(o);return r.a.createElement(Jo,null,r.a.createElement("div",{ref:e,className:"modal fade",id:"printModal",tabIndex:-1,role:"dialog","aria-labelledby":"printModalLabel","data-keyboard":"false","data-backdrop":"static"},r.a.createElement("div",{className:"modal-dialog modal-lg",role:"document"},r.a.createElement("div",{className:"modal-content"},r.a.createElement("div",{className:"modal-header"},r.a.createElement("button",{type:"button",className:va()("close",{"print-modal__close-button--disabled":!0}),"data-dismiss":"modal","aria-label":"Close"},r.a.createElement("span",{"aria-hidden":"true"},"\xd7")),r.a.createElement("h4",{className:"modal-title",id:"printModalLabel"},"Preparing dashboard for printing...")),r.a.createElement("div",{className:"modal-body"},"Please wait while we initialize and render all the charts on the dashboard.",r.a.createElement("div",{className:"progress progress-striped active",style:{height:"2em"}},r.a.createElement("div",{id:"printModalProgressBar",className:"progress-bar progress-bar-info",role:"progressbar","aria-valuenow":i,"aria-valuemin":0,"aria-valuemax":100,style:{minWidth:"2em",width:"".concat(i,"%")}},r.a.createElement("span",{id:"printModalProgressBarText",style:{paddingLeft:10,paddingTop:4,fontSize:"1.2em",textAlign:"left",width:"100%",position:"absolute",display:"block",color:"black"}},s))),"The print dialog will appear as soon as we finish rendering the page."),r.a.createElement("div",{className:"modal-footer"})))))}),ei=$.d.div.withConfig({displayName:"styled__SocialMediaContainer",componentId:"sc-3gu94j-0"})(["width:185px;padding:",";background:",";font-size:12px;margin-bottom:",";"],Object(Z.J)(2),Object(Z.H)("borderSecondary"),Object(Z.J)(3)),ti=$.d.div.withConfig({displayName:"styled__FirstRow",componentId:"sc-3gu94j-1"})(["display:flex;justify-content:space-between;"]),ai=$.d.div.withConfig({displayName:"styled__GithubCopy",componentId:"sc-3gu94j-2"})([""]),ni=$.d.div.withConfig({displayName:"styled__GithubCopyLine",componentId:"sc-3gu94j-3"})([""]),ri=$.d.a.withConfig({displayName:"styled__SocialMediaLink",componentId:"sc-3gu94j-4"})(["&,&:hover{color:",";}"],Object(Z.H)("main")),oi=Object($.d)(ri).withConfig({displayName:"styled__GithubStarQuestion",componentId:"sc-3gu94j-5"})([""]),ii=Object($.d)(ri).withConfig({displayName:"styled__GithubIcon",componentId:"sc-3gu94j-6"})(["font-size:24px;"]),si=Object($.d)(ri).withConfig({displayName:"styled__TwitterIcon",componentId:"sc-3gu94j-7"})(["font-size:17px;"]),li=Object($.d)(ri).withConfig({displayName:"styled__FacebookIcon",componentId:"sc-3gu94j-8"})(["font-size:23px;"]),ci=$.d.div.withConfig({displayName:"styled__Separator",componentId:"sc-3gu94j-9"})(["margin-top:",";border-top:1px solid ",";"],Object(Z.J)(2),Object(Z.H)("separator")),ui=$.d.div.withConfig({displayName:"styled__SecondRow",componentId:"sc-3gu94j-10"})(["margin-top:",";display:flex;align-items:center;justify-content:space-between;"],Object(Z.J)(2)),di=$.d.span.withConfig({displayName:"styled__SecondRowText",componentId:"sc-3gu94j-11"})(["font-size:10px;"]),hi=function(){return r.a.createElement(ei,null,r.a.createElement(ti,null,r.a.createElement(ai,null,r.a.createElement(ni,null,"Do you like Netdata?"),r.a.createElement(oi,{href:"https://github.com/netdata/netdata/",target:"_blank"},"Give us a star!")),r.a.createElement(ii,{href:"https://github.com/netdata/netdata/",target:"_blank"},r.a.createElement("i",{className:"fab fa-github"}))),r.a.createElement(ci,null),r.a.createElement(ui,null,r.a.createElement(di,null,"And share the word!"),r.a.createElement(si,{href:"https://twitter.com/linuxnetdata/",target:"_blank"},r.a.createElement("i",{className:"fab fa-twitter"})),r.a.createElement(li,{href:"https://www.facebook.com/linuxnetdata/",target:"_blank"},r.a.createElement("i",{className:"fab fa-facebook"}))))},pi=function(e){var t=e.children,a=Object(n.useRef)(document.querySelector("#sidebar-end-portal-container"));return Object(o.createPortal)(t,a.current)},fi=(a(516),Object($.d)((function(e){var t=e.className,a=Object(dr.a)(e,["className"]);return r.a.createElement("div",{className:t},r.a.createElement(q.a,Object.assign({},a,{closeButton:!1})))})).withConfig({displayName:"notifications-container__NotificationsContainer",componentId:"n0kffi-0"})([".Toastify__toast-container{position:fixed;width:unset;min-width:400px;max-width:500px;",";color:",";}.Toastify__toast{padding:0;padding-top:5px;}.Toastify__toast--error{background:",";border:1px solid ",";}.Toastify__toast--warning{}.Toastify__toast--success{background:",";border:1px solid ",";}.Toastify__toast-body{}.Toastify__progress-bar{bottom:unset;top:0;}.Toastify__progress-bar--success{background-color:",";}.Toastify__progress-bar--error{background-color:",";}"],"z-index: 50;",Object(Z.H)(["neutral","limedSpruce"]),Object(Z.H)(["red","lavender"]),Object(Z.H)("error"),Object(Z.H)(["green","frostee"]),Object(Z.H)("success"),Object(Z.H)("success"),Object(Z.H)("error"))),gi=function(e){var t=e.icon,a=e.children,n=e.hasBorder;return r.a.createElement(Z.j,{gap:2,border:n&&{side:"right",color:"separator"},alignItems:"center",padding:[0,3,0,0],height:"100%"},!!t&&r.a.createElement(Z.o,{name:t,color:"bright",height:"15px"}),a)},mi=function(e){var t=e.global.snapshot,a=e.global.chartsMetadata.data;return t||a?t?t.hostname:a.hostname:""},bi=function(){var e=Object(la.b)(mi);return r.a.createElement(gi,{icon:"node_hollow"},r.a.createElement(Z.B,{"data-testid":"header-nodename-".concat(e),color:"bright",strong:!0,truncate:!0},e))},vi=["neutral","black"],_i=function(e){var t=e.children,a=e.isBasic;return r.a.createElement(Z.j,Object.assign({padding:[1.5,2],margin:[2],background:vi,round:1},!a&&{width:{max:"300px"}}),r.a.createElement(Z.F,{color:"bright"},t))},yi=function(e,t){var a=t.isBasic,n="function"===typeof e?e():e;return"string"===typeof e||a?r.a.createElement(_i,{isBasic:a},n):n},Oi=function(e){var t=e.children,a=e.content,o=e.isBasic,i=Object(dr.a)(e,["children","content","isBasic"]),s=Object(n.useCallback)((function(){return yi(a,{isBasic:o})}),[a,o]);return r.a.createElement(Z.G,Object.assign({plain:!0,animation:!0,content:s},i),t)},xi=function(){var e=Object(la.a)(),t=Object(n.useCallback)((function(){return e(Object(Ke.t)())}),[e]);return r.a.createElement(Z.j,{gap:2,"data-testid":"header-options-button"},r.a.createElement(Oi,{content:"Import a Netdata snapshot",align:"bottom",plain:!0},r.a.createElement(Z.b,{flavour:"borderless",neutral:!0,themeType:"dark","data-toggle":"modal","data-target":"#loadSnapshotModal",icon:"download"})),r.a.createElement(Oi,{content:"Export a Netdata snapshot",align:"bottom",plain:!0},r.a.createElement(Z.b,{onClick:t,flavour:"borderless",neutral:!0,themeType:"dark","data-toggle":"modal","data-target":"#saveSnapshotModal",icon:"upload"})),r.a.createElement(Oi,{content:"Print the dashboard",align:"bottom",plain:!0},r.a.createElement(Z.b,{flavour:"borderless",neutral:!0,themeType:"dark","data-toggle":"modal","data-target":"#printPreflightModal",icon:"print"})))},wi=function(e){var t,a=e.currentVersion,n="stable"===e.releaseChannel,o=qo("https://api.github.com/repos/netdata/netdata/releases/latest",n,!0),i=Object(A.a)(o,1)[0],s=qo("https://www.googleapis.com/storage/v1/b/netdata-nightlies/o/latest-version.txt",!n),l=Object(A.a)(s,1)[0],c=qo(null===l||void 0===l?void 0:l.mediaLink,Boolean(l)),u=Object(A.a)(c,1)[0],d=n?null===(t=i)||void 0===t?void 0:t.tag_name.replace(/(\r\n|\n|\r| |\t)/gm,""):u?function(e){return e.replace(/(\r\n|\n|\r| |\t)/gm,"")}(u):null;if(!d)return null;var h=!function(e,t){if(e===t)return!0;var a=e.split("."),n=t.split("."),r=parseInt(a[0].substring(1,2),10),o=parseInt(n[0].substring(1,2),10);return!(r<o)&&(r>o||!((r=parseInt(a[1],10))<(o=parseInt(n[1],10)))&&(r>o||(a=a[2].split("-"),n=n[2].split("-"),!((r=parseInt(a[0],10))<(o=parseInt(n[0],10)))&&(r>o||!((r=a.length>1?parseInt(a[1],10):0)<(o=n.length>1?parseInt(n[1],10):0))))))}(a,d);return r.a.createElement(Oi,{content:h?"Need help?":"Check Version",align:"bottom",plain:!0},r.a.createElement(Z.b,{"data-testid":"header-version-control-button",flavour:"borderless",themeType:"dark",small:!0,neutral:!h,warning:h,name:h?"update_pending":"update",icon:h?"update_pending":"update","data-toggle":"modal","data-target":"#updateModal"}))},Ei=function(e){var t=e.global.chartsMetadata.data;return t?{version:t.version,releaseChannel:t.release_channel}:null},Si=function(){var e=Object(la.b)(Ei);return e&&r.a.createElement(wi,{currentVersion:e.version,releaseChannel:e.releaseChannel})},Ci=function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=Object(n.useState)(!!e),a=Object(A.a)(t,2),r=a[0],o=a[1],i=Object(n.useCallback)((function(){return o((function(e){return!e}))}),[]),s=Object(n.useCallback)((function(){return o(!0)}),[]),l=Object(n.useCallback)((function(){return o(!1)}),[]);return[r,i,s,l]},Ai=function(e){var t,a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return null!==(t=JSON.parse(localStorage.getItem(e)))&&void 0!==t?t:a},ki=function(e,t){var a=Object(n.useState)((function(){return Ai(e,t)})),r=Object(A.a)(a,2),o=r[0],i=r[1];return Object(n.useEffect)((function(){return localStorage.setItem(e,JSON.stringify(o))}),[e,o]),[o,i]},ji=a(628),Ti=$.d.div.withConfig({displayName:"styled__RootContainer",componentId:"sc-1al6duq-0"})(["width:100%;height:100%;display:flex;flex-flow:row nowrap;align-items:center;"]),Di=(Object($.d)(ji.a).withConfig({displayName:"styled__StyledMenu",componentId:"sc-1al6duq-1"})([""]),$.d.div.withConfig({displayName:"styled__DropdownContainer",componentId:"sc-1al6duq-2"})(["cursor:pointer;color:",";.mdc-menu-surface{border-radius:0;.mdc-list{padding:0;}.mdc-list-item{padding:0 "," 0 ",";font-size:14px;height:",";}}"],Object(Z.H)("bright"),Object(Z.J)(5),Object(Z.J)(5),Object(Z.J)(6))),Pi=$.d.div.withConfig({displayName:"styled__ListContainer",componentId:"sc-1al6duq-3"})(["padding:"," 0;"],Object(Z.J)(3)),Mi=Object($.d)(Z.o).withConfig({displayName:"styled__OpenerIcon",componentId:"sc-1al6duq-4"})(["flex-shrink:0;flex-grow:0;margin-left:",";fill:",";width:10px;height:5px;"],(function(e){return e.noMargin?"unset":"16px"}),Object(Z.H)("bright")),Li=$.d.div.withConfig({displayName:"styled__PickerBox",componentId:"y9nggg-0"})(["display:flex;position:relative;min-width:",";min-height:",";flex-direction:column;align-items:flex-end;background-color:",";color:",";z-index:",";border-radius:8px;"],Object(Z.J)(102),Object(Z.J)(43),Object(Z.H)("mainBackground"),Object(Z.H)("text"),60),Ii=$.d.span.withConfig({displayName:"styled__StyledTimePeriod",componentId:"y9nggg-1"})(["margin-top:",";cursor:pointer;width:187px;height:",";&:first-of-type{margin-top:",";}&:last-of-type{margin-bottom:",";}& > span:hover{color:",";}"],Object(Z.J)(3),Object(Z.J)(2),Object(Z.J)(1),Object(Z.J)(1),Object(Z.H)("textLite")),Ni=$.d.span.withConfig({displayName:"styled__StyledCustomTimePeriod",componentId:"y9nggg-2"})(["margin:"," "," 0;color:",";cursor:pointer;&:first-of-type{margin-top:0;}&:hover{color:",";}"],Object(Z.J)(1),Object(Z.J)(3),(function(e){var t=e.isSelected,a=e.theme;return Object(Z.H)(t?"primary":"text")({theme:a})}),Object(Z.H)("textLite")),Ri=Object($.d)((function(e){var t,a=e.title,o=e.children,i=e.className,s=e.renderTitle,l=e.isOpen,c=void 0!==l&&l,u=e.onMenuToggle,d=e.anchorCorner,h=void 0===d?"bottomStart":d,p=e.renderOpener,f=Object(n.useRef)();return r.a.createElement(Di,{className:i},r.a.createElement(ko.b,null,r.a.createElement(ko.a,{ref:f,open:c,onClose:function(){u(!1)},anchorCorner:h},"function"===typeof o?c&&r.a.createElement(Pi,null,r.a.createElement(Co.a,null,o({maxHeight:null===(t=f.current)||void 0===t?void 0:t.root.ref.style.maxHeight}))):r.a.createElement(Pi,null,r.a.createElement(Co.a,null,o))),r.a.createElement(Ti,{onClick:function(){u(!c)}},a||s&&s(),p?p():r.a.createElement(Mi,{name:"triangle_down",noMargin:Boolean(s)}))))})).withConfig({displayName:"styled__StyledDropdown",componentId:"y9nggg-3"})(["width:88px;height:32px;padding-top:8px;padding-bottom:8px;padding-left:8px;padding-right:7px;border:1px solid ",";box-sizing:border-box;border-radius:4px;display:flex;justify-content:center;align-items:center;color:",";.mdc-menu-surface--anchor{.mdc-menu-surface--open{"," margin-top:",";background:",";border-radius:4px;}}.mdc-list{display:flex;flex-direction:column;justify-content:center;align-items:center;}"],Object(Z.H)("border"),Object(Z.H)("text"),"z-index: 45;",Object(Z.J)(2),Object(Z.H)("mainBackground")),Bi=Object($.d)(Z.o).withConfig({displayName:"styled__DropdownIcon",componentId:"y9nggg-4"})(["fill:",";width:12px;height:12px;"],Object(Z.H)("text")),Fi=$.d.input.withConfig({displayName:"styled__CustomInput",componentId:"y9nggg-5"})(["border:1px solid ",";color:inherit;background:",";box-sizing:border-box;border-radius:4px;padding:4px;width:32px;height:32px;margin-left:10px;margin-right:10px;outline:none;&:focus{border:1px solid ",";}"],Object(Z.H)("border"),Object(Z.H)("mainBackground"),Object(Z.H)("primary")),Hi=Object($.d)(Z.h).attrs({background:"mainBackground",round:2,margin:[4,0,0],border:{side:"all",color:"elementBackground"},animation:!0}).withConfig({displayName:"styled__StyledDrop",componentId:"y9nggg-6"})(["box-shadow:0px 4px 4px rgba(0,0,0,0.25);"]),zi=$.d.hr.withConfig({displayName:"styled__StyledHR",componentId:"y9nggg-7"})(["border:none;margin:0;border-left:1px solid ",";height:284px;"],Object(Z.H)("borderSecondary")),Ui=Object(n.memo)((function(e){var t=e.value,a=e.period,o=e.resolution,i=e.isSelected,s=e.setTimeRange,l=e.tagging,c=Object(n.useCallback)((function(){return s(t,o)}),[t,o,s]);return r.a.createElement(Ii,{key:t,onClick:c,"data-ga":"date-picker::click-quick-selector::".concat(l,"::").concat(-t),"data-testid":"timePeriod-value"},r.a.createElement(Z.B,{color:i?"primary":"text"},a))})),Gi=a(633),Wi=a(621),Vi=a(627),Yi=a(622),Xi=a(307),Ki=a(634),Zi=a(302),qi=["minutes","hours","days","months"],$i={minutes:60,hours:3600,days:86400,months:2592e3},Ji=function(e,t){return Math.round(e/$i[t])},Qi=function(e,t){var a=Object(Gi.a)(new Date(0),Object(u.a)({},t,e));return-Object(Wi.a)(a)},es={startDate:"start",endDate:"finish"},ts=[{period:"Last 5 minutes",value:-300,resolution:"minutes"},{period:"Last 15 minutes",value:-900,resolution:"minutes"},{period:"Last 30 minutes",value:-1800,resolution:"minutes"},{period:"Last 2 hours",value:-7200,resolution:"hours"},{period:"Last 6 hours",value:-21600,resolution:"hours"},{period:"Last 12 hours",value:-43200,resolution:"hours"},{period:"Last Day",value:-86400,resolution:"days"},{period:"Last 2 Days",value:-172800,resolution:"days"},{period:"Last 7 Days",value:-604800,resolution:"days"}],as=function(e,t){var a=Object(Yi.a)(e,"MMMM d yyyy, H:mm")?e:Object(Xi.a)(e,"MMMM d yyyy, H:mm",Date.now());return Object(Xi.a)("".concat(a," ").concat(function(e){if(!e)return"+00:00";var t=e.toString().split("."),a=t[0]>0?"+":"-",n=Math.abs(t[0]).toString(),r="".concat(a).concat(n.padStart(2,0));return t.length>1?"".concat(r,":").concat(String(.6*t[1]).padEnd(2,0)):"".concat(r,":00")}(t)),"MMMM d yyyy, H:mm xxx",Date.now())},ns=function(e){var t=e.handleTimePeriodChange,a=e.selectedDate,n=e.tagging;return r.a.createElement(Z.j,{column:!0,justifyContent:"start",alignItems:"start",height:{max:"240px"},overflow:{vertical:"scroll"},"data-testid":"timePeriods"},ts.map((function(e){var o=e.period,i=e.value,s=e.resolution;return r.a.createElement(Ui,{key:i,value:i,period:o,resolution:s,setTimeRange:t,isSelected:a===i,tagging:n})})))},rs=a(180),os=function(e){var t=e.handleTimePeriodChange,a=e.value,o=e.resolution,i=e.tagging,s=function(){return a<=0?Ji(-a,o):0},l=Object(n.useState)(s),c=Object(A.a)(l,2),d=c[0],h=c[1],p=Object(n.useState)(!1),f=Object(A.a)(p,2),g=f[0],m=f[1];Object(n.useEffect)((function(){return h(s())}),[a]);var b=Object(n.useCallback)((function(e){return h(e.target.value)}),[]),v=Object(n.useCallback)((function(e){var n=Number(e.currentTarget.value),r=!Number.isNaN(n)&&Number.isInteger(n)&&n>0,i=Object(Gi.a)(new Date(0),Object(u.a)({},o,n));return r&&Object(rs.a)(i)&&Object(Wi.a)(i)<=94694400?t(Qi(n,o),o):h(a<=0?Ji(-a,o):0)}),[o,a,t]),_=Object(n.useCallback)((function(e){return function(){t(Qi(d,e),e),m(!1)}}),[d,t]);return r.a.createElement(Z.j,{justifyContent:"start",alignItems:"center",height:8,"data-ga":"date-picker::click-last-integer::".concat(i),"data-testid":"customTimePeriod"},r.a.createElement(Z.B,null,"Last"),r.a.createElement(Fi,{value:d,onChange:b,onBlur:v,"data-ga":"date-picker::click-last-integer::".concat(i,"::").concat(d),"data-testid":"timePeriod-timeInput"}),r.a.createElement(Ri,{isOpen:g,onMenuToggle:m,renderTitle:function(){return r.a.createElement(Z.j,{alignItems:"center",flexWrap:!1,width:"100%"},r.a.createElement(Z.B,{padding:[0,4,0,0]},o),r.a.createElement(Bi,{name:"triangle_down"}))},renderOpener:function(){return null}},(function(){return qi.map((function(e){return r.a.createElement(Ni,{key:e,onClick:_(e),"data-ga":"date-picker::click-last-time-".concat(e,"::").concat(i),"data-testid":"timePeriod-option"},e)}))})))},is=a(623),ss=a(303),ls=a.n(ss),cs=(a(517),function(e){var t=e.selected,a=e.selectsStart,n=void 0!==a&&a,o=e.selectsEnd,i=void 0!==o&&o,s=e.startDate,l=e.endDate,c=e.onChange,u=e.minDate,d=e.maxDate,h=e.dateFormat,p=void 0===h?"MM/dd/yyyy":h,f=e.open,g=void 0!==f&&f,m=e.startOpen,b=void 0!==m&&m,v=e.inline,_=void 0!==v&&v,y=e.selectsRange,O=void 0!==y&&y,x=e.monthsShown,w=void 0===x?1:x,E=e.showPopperArrow,S=void 0===E||E,C=e.calendarContainer,A=void 0===C?null:C;return r.a.createElement(ls.a,{selected:t,onChange:c,selectsStart:n,selectsEnd:i,startDate:s,endDate:l,minDate:u,maxDate:d,dateFormat:p,open:g,startOpen:b,inline:_,selectsRange:O,monthsShown:w,showPopperArrow:S,calendarContainer:A})}),us=$.d.input.withConfig({displayName:"styled__StyledDateInput",componentId:"sc-1tun1nl-0"})(["width:100%;text-align:center;border:1px solid ",";color:inherit;background:",";box-sizing:border-box;border-radius:4px;padding:4px;height:32px;margin-left:20px;margin-right:20px;outline:none;&:focus{border:1px solid ",";}"],Object(Z.H)("border"),Object(Z.H)("mainBackground"),Object(Z.H)("primary")),ds=$.d.div.withConfig({displayName:"styled__StyledCalendar",componentId:"sc-1tun1nl-1"})(["background:",";border:0;.react-datepicker{&__navigation{top:8px;&-icon::before{border-color:",";}}&__header{background:",";border:0;.react-datepicker__current-month{color:",";font-weight:normal;}.react-datepicker__day-name{color:",";}}&__day{color:",";&:hover{background:",";}&--disabled{color:",";&:hover{background:inherit;}}&--keyboard-selected,&--keyboard-selected:hover{color:",";background:inherit;border-radius:inherit;}&--selected,&--selected:hover{color:",";background:",";border-radius:8px;}&--in-selecting-range,&--in-range{color:",";background:",";border-radius:0;}&--selecting-range-start,&--range-start{color:",";background:",";border-top-left-radius:8px;border-bottom-left-radius:8px;&:hover{color:",";background:",";border-radius:0;border-top-left-radius:8px;border-bottom-left-radius:8px;}}&--selecting-range-end,&--range-end{color:",";background:",";border-top-right-radius:8px;border-bottom-right-radius:8px;&:hover{color:",";background:",";border-top-right-radius:8px;border-bottom-right-radius:8px;}}}}"],Object(Z.H)("mainBackground"),Object(Z.H)("text"),Object(Z.H)("mainBackground"),Object(Z.H)("main"),Object(Z.H)("textLite"),Object(Z.H)("main"),Object(Z.H)("elementBackground"),Object(Z.H)("textLite"),Object(Z.H)("main"),Object(Z.H)("bright"),Object(Z.H)("primary"),Object(Z.H)("primary"),Object(Z.H)("elementBackground"),Object(Z.H)("bright"),Object(Z.H)("primary"),Object(Z.H)("bright"),Object(Z.I)(["green","netdata"],.8),Object(Z.H)("bright"),Object(Z.H)("primary"),Object(Z.H)("bright"),Object(Z.I)(["green","netdata"],.8)),hs=function(e){var t=e.name,a=void 0===t?"":t,o=e.value,i=void 0===o?"":o,s=e.onDatesChange,l=e.onFocus,c=e.placeholderText,u=void 0===c?"":c,d=Object(sa.b)().utcOffset,h=Object(n.useState)(""),p=Object(A.a)(h,2),f=p[0],g=p[1],m=Object(n.useCallback)((function(e){var t=e.target.value;g(t)}),[]),b=Object(n.useCallback)((function(e){if(Object(rs.a)(e)){var t=Object(Vi.a)(e,"MMMM d yyyy, H:mm");g(t)}}),[]),v=Object(n.useCallback)((function(e){var t=as(e.target.value,d);if(Object(rs.a)(t)&&Object(Zi.a)(t)>0){var a=Object(Zi.a)(t);s(a,(function(){return b(i)}))}else b(i)}),[i,d,s,b]);return Object(n.useEffect)((function(){return b(i)}),[i,b]),r.a.createElement(us,{type:"text",name:a,value:i?f:u,onChange:m,onBlur:v,onFocus:l,placeholder:u,"data-testid":"datePicker-input"})},ps=a(24),fs=function(){var e=Object(sa.b)(),t=e.localeTimeString,a=e.localeDateString;return Object(n.useCallback)((function(e){return"".concat(a(e,{locale:"en-us",long:!1})," ").concat(t(e,{secs:!1}))}),[t,a])},gs=function(e,t){return e>0?Object(ps.a)(new Date(t(e))):e||0===e?Object(ps.a)(new Date(t((new Date).valueOf()+1e3*e))):null},ms=function(e,t){var a=fs();return Object(n.useMemo)((function(){return[gs(e,a),gs(t,a)]}),[e,t,a])},bs=function(e){var t=e.startDate,a=e.setStartDate,o=e.endDate,i=e.setEndDate,s=e.onDatesChange,l=e.onInputFocus,c=fs(),u=ms(t,o),d=Object(A.a)(u,2),h=d[0],p=d[1],f=Object(sa.b)().utcOffset,g=Object(n.useCallback)((function(e,t){return Object(is.a)(gs(e,c),p)?a(e):t()}),[p,c,a]),m=Object(n.useCallback)((function(e,t){return Object(is.a)(h,gs(e,c))?i(e):t()}),[h,c,i]),b=Object(n.useCallback)((function(e){var t=Object(A.a)(e,2),a=t[0],n=t[1],r=a?as(Object(Vi.a)(a,"MMMM d yyyy, H:mm"),f):a,o=n?as(Object(Vi.a)(n,"MMMM d yyyy, H:mm"),f):n,i=Object(Zi.a)(r)||null,l=Object(Zi.a)(o)||null;s(i,l)}),[f,s]);return r.a.createElement(Z.j,{column:!0,justifyContent:"center",alignItems:"center",flex:{grow:1},gap:3,margin:[0,0,0,7],"data-testid":"datePicker-wrapper"},r.a.createElement(cs,{selected:h,onChange:b,startDate:h,endDate:p,maxDate:new Date,minDate:new Date("1/1/2018"),inline:!0,selectsRange:!0,monthsShown:2,dateFormat:"MMMM d yyyy, H:mm",showPopperArrow:!1,calendarContainer:ds}),r.a.createElement(Z.j,{justifyContent:"around",alignItems:"center",width:"100%"},r.a.createElement(hs,{name:"startDate",value:h,onDatesChange:g,onFocus:l,placeholderText:"Select a start date"}),r.a.createElement(hs,{name:"endDate",value:p,onDatesChange:m,onFocus:l,placeholderText:"Select an end date"})))},vs=function(e){var t=e.startDate,a=e.endDate,o=ms(t,a),i=Object(A.a)(o,2),s=i[0],l=i[1],c=Object(n.useMemo)((function(){return function(e,t){return{formattedStartDate:Object(Vi.a)(e,"MMMM d yyyy, H:mm:ss"),formattedEndDate:Object(Vi.a)(t,"MMMM d yyyy, H:mm:ss")}}(s,l)}),[s,l]),u=c.formattedStartDate,d=c.formattedEndDate,h=Object(n.useMemo)((function(){return function(e,t){return Object(Ki.a)(Object(Zi.a)(e),Object(Zi.a)(t))}(s,l)}),[s,l]);return r.a.createElement(Z.j,{alignItems:"center",justifyContent:"between",gap:2},r.a.createElement(Z.j,{alignItems:"center",justifyContent:"center",gap:1.5},r.a.createElement(Z.F,{strong:!0,whiteSpace:"nowrap"},"From"),r.a.createElement(Z.F,{whiteSpace:"nowrap","data-testid":"periodIndication-from"},u)),r.a.createElement(Z.o,{name:"arrow_left",size:"small",color:"textLite",rotate:2}),r.a.createElement(Z.j,{alignItems:"center",justifyContent:"center",gap:1.5},r.a.createElement(Z.F,{strong:!0,whiteSpace:"nowrap"},"To"),r.a.createElement(Z.F,{whiteSpace:"nowrap","data-testid":"periodIndication-to"},d)),r.a.createElement(Z.j,{alignItems:"center",justifyContent:"center",gap:2},r.a.createElement(Z.F,{whiteSpace:"nowrap"},"/"),r.a.createElement(Z.F,{color:"textLite",whiteSpace:"nowrap","data-testid":"periodIndication-period"},h)))},_s=a(101),ys=a.n(_s),Os=[{value:864e5,unit:"d"},{value:36e5,unit:"h"},{value:6e4,unit:"min"},{value:6e4,unit:"min"},{value:1e3,unit:"s"}],xs=function(e,t){return ys.a.duration(e.diff(t))},ws=function(e){var t=Math.abs(e),a=t<6e4;return Os.reduce((function(e,n){var r=n.value,o=n.unit;return 1e3!==r||a?(e+=function(e,t){return e>1?"".concat(Math.floor(e)).concat(t):""}(t/r,o),t%=r,e):e}),"")},Es=Object($.d)(Z.j).withConfig({displayName:"container__Container",componentId:"sc-1oifnqd-0"})(["cursor:pointer;&:hover *{color:",";fill:",";}"],Object(Z.H)("textLite"),Object(Z.H)("textLite")),Ss=function(e){var t=e.isPlaying,a=e.startDate,n=e.endDate,o=e.isSameDate,i=Object(sa.b)(),s=i.localeTimeString,l=i.localeDateString;return r.a.createElement(Z.j,{gap:2},r.a.createElement(Z.F,{color:"text",whiteSpace:"nowrap"},l(a,{long:!1})," \u2022"," ",r.a.createElement(Z.F,{color:t?"accent":"textFocus",whiteSpace:"nowrap"},s(a,{secs:!1}))),r.a.createElement(Z.o,{name:"arrow_left",color:t?"accent":"textFocus",size:"small",rotate:2}),r.a.createElement(Z.F,{color:"text",whiteSpace:"nowrap"},!o&&"".concat(l(n,{long:!1})," \u2022 "),r.a.createElement(Z.F,{color:t?"accent":"textFocus",whiteSpace:"nowrap"},s(n,{secs:!1}))))},Cs=function(e){var t=e.isPlaying,a=e.duration;return r.a.createElement(Z.j,{gap:1},r.a.createElement(Z.j,{width:"24px",justifyContent:"center"},t&&r.a.createElement(Z.F,{color:"text",whiteSpace:"nowrap"},"\u2022 last")),r.a.createElement(Z.F,{color:"text",whiteSpace:"nowrap"},a))},As=Object(n.forwardRef)((function(e,t){var a=e.onClick,o=e.start,i=void 0===o?9e5:o,s=e.end,l=e.isPlaying,c=e.isPickerOpen,u=e.setRangeValues,d=e.tagging,h=Object(n.useState)(),p=Object(A.a)(h,2),f=p[0],g=p[1],m=function(e){return e<0?ys()(new Date).add(e,"seconds"):ys()(e)}(i),b=function(e){return e?ys()(e):ys()(new Date)}(s),v=Object(la.b)(ue.m);Object(n.useEffect)((function(){var e=xs(m,b).as("seconds");l||f===e||g(Math.round(e)),l&&f&&v&&(u({start:Math.round(f)}),g(null))}),[m,b,f,l]);var _=Object(n.useMemo)((function(){return function(e,t){return e.isSame(t,"day")}(m,b)}),[m,b]),y=Object(n.useMemo)((function(){return ws(xs(m,b).as("milliseconds"))}),[l,m,b]);return r.a.createElement(Oi,{content:c?function(){}:"Select a predefined or a custom timeframe",align:"bottom",plain:!0},r.a.createElement(Es,{alignItems:"center",justifyContent:"center",gap:1,height:"100%",width:{min:"380px"},onMouseDown:a,padding:[0,1],ref:t,"data-ga":"date-picker::click-time::".concat(d),"data-testid":"datePicker-accessorElement"},r.a.createElement(Ss,{isPlaying:l,endDate:b,startDate:m,isSameDate:_}),r.a.createElement(Cs,{isPlaying:l,duration:y})))})),ks=function(e){var t=e.onChange,a=e.values,o=(a=void 0===a?{}:a).start,i=a.end,s=e.defaultValue,l=void 0===s?-900:s,c=e.tagging,u=void 0===c?"":c,d=e.isPlaying,h=Object(n.useState)(o),p=Object(A.a)(h,2),f=p[0],g=p[1],m=Object(n.useState)(o),b=Object(A.a)(m,2),v=b[0],_=b[1],y=ki("resolution","minutes"),O=Object(A.a)(y,2),x=O[0],w=O[1],E=Object(n.useState)("startDate"),S=Object(A.a)(E,2),C=S[0],k=S[1],j=Ci(),T=Object(A.a)(j,4),D=T[0],P=T[1],M=T[3],L=Object(n.useRef)(),I=Object(n.useCallback)((function(e){var t=e.startDate,a=e.endDate;g(t),_(a)}),[]);Object(n.useEffect)((function(){I({startDate:o,endDate:i})}),[o,i,I]);var N=Object(n.useCallback)((function(){return I({startDate:l,endDate:0})}),[]),R=Object(n.useCallback)((function(e){e.target.name&&k(e.target.name)}),[]),B=Object(n.useCallback)((function(e){e.stopPropagation(),P()}),[P]),F=Object(n.useMemo)((function(){return function(e){return es[e]}(C)}),[C]),H=null!==f&&null!==v&&f!==v,z=f===o&&v===i,U=Object(n.useMemo)((function(){return l}),[]),G=f===U,W=Object(n.useCallback)((function(e,t){w(t),I({startDate:e,endDate:0})}),[I,w]),V=L.current&&D?r.a.createElement(Hi,{target:L.current,canHideTarget:!1,align:{top:"bottom",left:"left"},onEsc:M,onClickOutside:M},r.a.createElement(Li,{"data-testid":"datePicker"},r.a.createElement(Z.j,{justifyContent:"between",alignItems:"center",width:"100%",padding:[6,6,0,6]},r.a.createElement(Z.j,{column:!0,gap:3,margin:[0,7,0,0]},r.a.createElement(ns,{handleTimePeriodChange:W,selectedDate:f,tagging:u}),r.a.createElement(os,{handleTimePeriodChange:W,value:f,resolution:x,tagging:u})),r.a.createElement(zi,null),r.a.createElement(bs,{startDate:f,endDate:v,setStartDate:g,setEndDate:_,onDatesChange:function(e,t){I({startDate:e,endDate:t}),function(e,t,a,n){var r=arguments.length>4&&void 0!==arguments[4]?arguments[4]:"gaCustomEvent";if(window.dataLayer){var o={event:r,eventCategory:e,eventAction:t,eventLabel:a,eventValue:n};window.dataLayer.push(o)}}("date-picker","click-date-picker",u,String("finish"===F?t||e:e||t))},onInputFocus:R})),r.a.createElement(Z.j,{alignItems:"center",justifyContent:H?"between":"end",width:"100%",padding:[5,6],gap:2},H&&r.a.createElement(vs,{startDate:f,endDate:v}),r.a.createElement(Z.j,{alignItems:"center",justifyContent:"center",gap:4},r.a.createElement(Z.b,{label:"Clear",flavour:"hollow",onClick:N,disabled:G,"data-ga":"date-picker::click-clear::".concat(u,"-").concat(F),"data-testid":"datePicker-clear"}),r.a.createElement(Z.b,{label:"Apply",onClick:function(){t({start:f,end:v}),M()},disabled:!H||z,"data-ga":"date-picker::click-apply::".concat(u,"-").concat(F),"data-testid":"datePicker-apply"}))))):null;return r.a.createElement(r.a.Fragment,null,r.a.createElement(As,{onClick:B,tagging:u,isPickerOpen:D,isPlaying:d,setRangeValues:t,start:o,end:i,ref:L}),V)},js=Object(n.memo)((function(e){var t=e.tagging,a=e.isPlaying,o=Object(la.a)(),i=Object(la.b)(ue.m),s=Boolean(i),l=Object(la.b)(ue.i),c=Object(n.useMemo)((function(){return s?{start:i.after,end:i.before}:{start:l,end:0}}),[s,i,l]);return Object(n.useEffect)((function(){var e=c.start,t=c.end,a=e.toString(),n=t.toString();window.urlOptions.after===a&&window.urlOptions.before===n||window.urlOptions.netdataPanAndZoomCallback(!0,a,n),Object(Wt.d)({after:a,before:n})}),[c]),r.a.createElement(ks,{values:c,defaultValue:l,onChange:function(e){var t=e.start,a=e.end;t<0?(o(Object(Ke.q)({after:t})),s&&o(Object(Ke.j)())):o(Object(Ke.s)({after:t,before:a}))},tagging:t,isPlaying:a})})),Ts=Object($.d)(Z.j).withConfig({displayName:"container__Container",componentId:"sc-1y4i7z8-0"})(["background:",";"],(function(e){var t=e.theme,a=e.isPlaying;return("Dark"===t.name?Object(Z.I)(a?["green","netdata"]:["neutral","tuna"],a?.3:1):Object(Z.I)(a?["green","frostee"]:["neutral","blackhaze"]))({theme:t})})),Ds=function(e){var t=e.isPlaying;return Object(Z.H)(t?["green","chateau"]:["neutral","iron"])},Ps=Object($.d)(Z.z).attrs((function(e){return{flavour:e.isPlaying?"success":"neutral"}})).withConfig({displayName:"styledPill__StyledPill",componentId:"sc-151yrns-0"})(["&:hover{background:",";border-color:",";}"],Ds,Ds),Ms=function(e){var t=e.isPlaying,a=e.isForcePlaying,o=Object(la.a)(),i=Object(n.useMemo)((function(){return function(e,t){return e?t?"forcePlay":"playSolid":"pauseSolid"}(t,a)}),[t,a]);return r.a.createElement(Oi,{content:t?"Click to pause":"Click to play",align:"bottom",plain:!0},r.a.createElement(Ps,{icon:i,onClick:t?function(){return o(Object(Ke.t)())}:function(){return o(Object(Ke.k)({forcePlay:!1}))},isPlaying:t},t?"Playing":"Paused"))},Ls=Object($.d)(Z.j).withConfig({displayName:"item__PanelRowContainer",componentId:"sc-1qh9192-0"})(["cursor:pointer;&:hover{background:",";}",""],Object(Z.H)("selected"),(function(e){return e.selected&&"background: ".concat(Object(Z.H)("selected")(e),";")})),Is=Object(n.forwardRef)((function(e,t){var a=e.disabled,o=e.children,i=e.Wrapper,s=void 0===i?Z.B:i,l=e.onClick,c=e.testid,u=e.icon,d=e.padding,h=void 0===d?[2,3]:d,p=e.margin,f=void 0===p?[0]:p,g=e.round,m=void 0===g?0:g,b=e.actions,v=e.selected,_=e.width,y=void 0===_?"100%":_,O=Object(n.useCallback)((function(){a||l&&l()}),[l,a]);return r.a.createElement(Ls,{ref:t,flexWrap:!1,justifyContent:"between",alignItems:"center",padding:h,margin:f,round:m,onClick:O,"data-testid":c,width:y,selected:v,disabled:a},r.a.createElement(Z.j,{alignItems:"center",gap:3,flex:!0,basis:""},"string"===typeof u?r.a.createElement(Z.o,{name:u,disabled:a,color:"text",height:"16px",width:"16px"}):u,r.a.createElement(s,{opacity:a?"medium":void 0,width:"150px"},o)),b)})),Ns=Object($.d)(Z.l).attrs({padding:[0],margin:[0]}).withConfig({displayName:"list__DefaultListHeader",componentId:"cb73mo-0"})(["cursor:pointer;"]),Rs=function(e){var t=e.toggleOpen,a=e.label,n=e.testid,o=e.Header,i=void 0===o?Ns:o;return r.a.createElement(i,{"data-testid":n,onClick:t},a)},Bs=function(e){var t=e.isOpen,a=void 0!==t&&t,n=e.toggleOpen,o=e.label,i=e.children,s=e.testid,l=e.Header;return r.a.createElement(Z.j,{column:!0},r.a.createElement(Rs,{Header:l,toggleOpen:n,label:o,testid:s}),r.a.createElement(Z.d,{open:a},i))},Fs=(Object($.d)(Z.j).attrs({bacgkround:"disabled",height:"1px",margin:[2,6]}).withConfig({displayName:"styled__Divider",componentId:"sc-1gb5vnf-0"})([""]),function(){return r.a.createElement(Z.j,{padding:[1,2],margin:[1],background:["neutral","black"],round:1,justifyContent:"center",width:{max:"320px"}},r.a.createElement(Z.F,{color:"bright"},"Play to refresh and have live content, pause to see historical, or force play to keep refreshing even when the tab loses focus at the expense of some system performance."))}),Hs=Object($.d)(Z.j).attrs({padding:[1],role:"button"}).withConfig({displayName:"playOptions__MenuButton",componentId:"cm1181-0"})(["cursor:pointer;"]),zs=Object($.d)(Z.j).attrs({column:!0,padding:[2],background:"dropdown",round:1,overflow:{vertical:"auto"},margin:[2,0,0],width:40}).withConfig({displayName:"playOptions__Dropdown",componentId:"cm1181-1"})(["box-shadow:0px 4px 4px rgba(0,0,0,0.25);"]),Us=Object(n.memo)((function(e){var t=e.target,a=Object(la.a)(),o=Object(En.a)(),i=Object(A.a)(o,2),s=i[0],l=i[1],c=function(){return l(!1)};return r.a.createElement(n.Fragment,null,s?r.a.createElement(Hs,{onClick:l,width:"auto"},r.a.createElement(Z.o,{name:"chevron_down",color:"text",width:"12px",height:"12px"})):r.a.createElement(Oi,{content:r.a.createElement(Fs,null),align:"bottom",plain:!0},r.a.createElement(Hs,{onClick:l,width:"auto"},r.a.createElement(Z.o,{name:"chevron_down",color:"text",width:"12px",height:"12px"}))),t.current&&s&&r.a.createElement(Z.h,{target:t.current,align:{top:"bottom",left:"left"},onEsc:c,onClickOutside:c,animation:!0},r.a.createElement(zs,null,r.a.createElement(Is,{round:1,icon:"playOutline",onClick:function(){a(Object(Ke.k)({forcePlay:!1})),c()}},"Play"),r.a.createElement(Is,{round:1,icon:"pauseOutline",onClick:function(){a(Object(Ke.t)()),c()}},"Pause"),r.a.createElement(Is,{round:1,icon:"forcePlayOutline",onClick:function(){a(Object(Ke.k)({forcePlay:!0})),c()}},"Force Play"))))})),Gs=function(){var e=Object(n.useRef)(),t=Object(la.b)(ue.s),a=Object(la.b)(ue.G),o=Object(la.b)(ue.m),i=Object(la.b)(ue.o),s=Object(la.b)(ue.n),l=Object(n.useMemo)((function(){return Boolean((t||!a)&&!o&&!i&&!s)}),[t,a,o,i,s]);return r.a.createElement(gi,{hasBorder:!0},r.a.createElement(Ts,{isPlaying:l,padding:[2,2],round:!0,height:"100%",alignItems:"center",gap:1,ref:e},r.a.createElement(Ms,{isPlaying:l,isForcePlaying:!a}),r.a.createElement(Us,{target:e}),r.a.createElement(js,{isPlaying:l,tagging:"global-view"})))},Ws={warning:"#FFF8E1",error:"#FFEBEF"},Vs=Object($.d)(Z.j).attrs((function(e){var t=e.round,a=void 0===t?999:t,n=e.hollow,r=e.background;return{padding:[.5,2],round:a,border:!!n&&{side:"all",color:r,size:"1px"}}})).withConfig({displayName:"styled__StyledPill",componentId:"sc-1rgk900-0"})(["background:",";cursor:pointer;"],(function(e){var t=e.background;return e.hollow?Ws[t]:t})),Ys=Object(n.forwardRef)((function(e,t){var a=e.children,n=e.background,o=e.color,i=e.hollow,s=Object(dr.a)(e,["children","background","color","hollow"]);return r.a.createElement(Vs,Object.assign({background:n,hollow:i,ref:t},s),r.a.createElement(Z.D,{color:i?n:o,strong:!0},a))})),Xs={"data-toggle":"modal","data-target":"#alarmsModal"},Ks=function(){var e=Object(la.b)(ue.c),t=Object(n.useMemo)((function(){return e?Object.values(e.alarms):[]}),[e]),a=Object(n.useMemo)((function(){return t.reduce((function(e,t){var a=t.status;return"CRITICAL"===a&&(e.critical=e.critical+1),"WARNING"===a&&(e.warning=e.warning+1),e}),{critical:0,warning:0})}),[t]),o=a.critical,i=a.warning;return r.a.createElement(gi,{icon:"alarm"},r.a.createElement(Oi,{content:o?"".concat(o," critical alert").concat(o.length>1?"s":""):"No critical alerts",align:"bottom",plain:!0},r.a.createElement(Ys,Object.assign({background:"error",hollow:!0},Xs),o)),r.a.createElement(Oi,{content:i?"".concat(i," warning alert").concat(i.length>1?"s":""):"No warning alerts",align:"bottom",plain:!0},r.a.createElement(Ys,Object.assign({background:"warning",hollow:!0},Xs),i)))},Zs=function(){return r.a.createElement(Z.y,{app:"agent"},(function(e){var t=e.toggle,a=e.upToDate;return r.a.createElement(Oi,{content:"News",align:"bottom",plain:!0},r.a.createElement(Z.b,{"data-testid":"header-news-button",themeType:"dark",name:"news",icon:"insights",flavour:"borderless",neutral:a,warning:!a,onClick:t}))}))},qs=Object($.d)(Z.j).attrs({column:!0,padding:[2],background:"dropdown",round:1,overflow:{vertical:"auto"},margin:[2,0,0],width:80}).withConfig({displayName:"dropdown__Dropdown",componentId:"i5fe1c-0"})(["box-shadow:0px 4px 4px rgba(0,0,0,0.25);"]),$s=Object($.d)(Z.C).withConfig({displayName:"searchInput__SearchInput",componentId:"sc-1j08rwh-0"})(["& input{background:transparent;}& > label{margin-bottom:0;}"]),Js=Object(n.forwardRef)((function(e,t){var a=e.value,n=e.onChange;return r.a.createElement($s,{inputRef:t,value:a,onChange:n,placeholder:"Search",metaShrinked:!0})})),Qs=Object($.d)(Z.j).attrs({column:!0,padding:[2,0,0],overflow:{vertical:"auto"},height:{max:"320px"}}).withConfig({displayName:"container__Container",componentId:"qngq57-0"})([""]),el=Object($.d)(Z.j).attrs({justifyContent:"between",alignItems:"center",width:"100%",gap:2}).withConfig({displayName:"wrapper__Wrapper",componentId:"sc-1m1od8q-0"})([""]),tl=function(e){var t=e.name,a=e.offset,o=e.utc,i=e.onSelect,s=Object(n.useCallback)((function(){return i(o)}),[o,i]);return r.a.createElement(Is,{round:1,onClick:s,Wrapper:el},r.a.createElement(Z.B,{color:"text"},t),r.a.createElement(Z.B,{color:"textLite",whiteSpace:"nowrap"},"UTC ",a))},al=[{value:"Dateline Standard Time",abbr:"DST",text:"International Date Line West",utc:["Etc/GMT+12"]},{value:"UTC-11",abbr:"U",text:"Coordinated Universal Time-11",utc:["Etc/GMT+11","Pacific/Midway","Pacific/Niue","Pacific/Pago_Pago"]},{value:"Hawaiian Standard Time",abbr:"HST",text:"Hawaii",utc:["Etc/GMT+10","Pacific/Honolulu","Pacific/Johnston","Pacific/Rarotonga","Pacific/Tahiti"]},{value:"Alaskan Standard Time",abbr:"AKDT",text:"Alaska",utc:["America/Anchorage","America/Juneau","America/Nome","America/Sitka","America/Yakutat"]},{value:"Pacific Standard Time (Mexico)",abbr:"PDT",text:"Baja California",utc:["America/Santa_Isabel"]},{value:"Pacific Standard Time",abbr:"PST",text:"Pacific Time (US & Canada)",utc:["America/Dawson","America/Los_Angeles","America/Tijuana","America/Vancouver","America/Whitehorse","PST8PDT"]},{value:"US Mountain Standard Time",abbr:"UMST",text:"Arizona",utc:["America/Creston","America/Dawson_Creek","America/Hermosillo","America/Phoenix","Etc/GMT+7"]},{value:"Mountain Standard Time (Mexico)",abbr:"MDT",text:"Chihuahua, La Paz, Mazatlan",utc:["America/Chihuahua","America/Mazatlan"]},{value:"Mountain Standard Time",abbr:"MDT",text:"Mountain Time (US & Canada)",utc:["America/Boise","America/Cambridge_Bay","America/Denver","America/Edmonton","America/Inuvik","America/Ojinaga","America/Yellowknife","MST7MDT"]},{value:"Central America Standard Time",abbr:"CAST",text:"Central America",utc:["America/Belize","America/Costa_Rica","America/El_Salvador","America/Guatemala","America/Managua","America/Tegucigalpa","Etc/GMT+6","Pacific/Galapagos"]},{value:"Central Standard Time",abbr:"CDT",text:"Central Time (US & Canada)",utc:["America/Chicago","America/Indiana/Knox","America/Indiana/Tell_City","America/Matamoros","America/Menominee","America/North_Dakota/Beulah","America/North_Dakota/Center","America/North_Dakota/New_Salem","America/Rainy_River","America/Rankin_Inlet","America/Resolute","America/Winnipeg","CST6CDT"]},{value:"Central Standard Time (Mexico)",abbr:"CDT",text:"Guadalajara, Mexico City, Monterrey",utc:["America/Bahia_Banderas","America/Cancun","America/Merida","America/Mexico_City","America/Monterrey"]},{value:"Canada Central Standard Time",abbr:"CCST",text:"Saskatchewan",utc:["America/Regina","America/Swift_Current"]},{value:"SA Pacific Standard Time",abbr:"SPST",text:"Bogota, Lima, Quito",utc:["America/Bogota","America/Cayman","America/Coral_Harbour","America/Eirunepe","America/Guayaquil","America/Jamaica","America/Lima","America/Panama","America/Rio_Branco","Etc/GMT+5"]},{value:"Eastern Standard Time",abbr:"EDT",text:"Eastern Time (US & Canada)",utc:["America/Detroit","America/Havana","America/Indiana/Petersburg","America/Indiana/Vincennes","America/Indiana/Winamac","America/Iqaluit","America/Kentucky/Monticello","America/Louisville","America/Montreal","America/Nassau","America/New_York","America/Nipigon","America/Pangnirtung","America/Port-au-Prince","America/Thunder_Bay","America/Toronto","EST5EDT"]},{value:"US Eastern Standard Time",abbr:"UEDT",text:"Indiana (East)",utc:["America/Indiana/Marengo","America/Indiana/Vevay","America/Indianapolis"]},{value:"Venezuela Standard Time",abbr:"VST",text:"Caracas",utc:["America/Caracas"]},{value:"Paraguay Standard Time",abbr:"PYT",text:"Asuncion",utc:["America/Asuncion"]},{value:"Atlantic Standard Time",abbr:"ADT",text:"Atlantic Time (Canada)",utc:["America/Glace_Bay","America/Goose_Bay","America/Halifax","America/Moncton","America/Thule","Atlantic/Bermuda"]},{value:"Central Brazilian Standard Time",abbr:"CBST",text:"Cuiaba",utc:["America/Campo_Grande","America/Cuiaba"]},{value:"SA Western Standard Time",abbr:"SWST",text:"Georgetown, La Paz, Manaus, San Juan",utc:["America/Anguilla","America/Antigua","America/Aruba","America/Barbados","America/Blanc-Sablon","America/Boa_Vista","America/Curacao","America/Dominica","America/Grand_Turk","America/Grenada","America/Guadeloupe","America/Guyana","America/Kralendijk","America/La_Paz","America/Lower_Princes","America/Manaus","America/Marigot","America/Martinique","America/Montserrat","America/Port_of_Spain","America/Porto_Velho","America/Puerto_Rico","America/Santo_Domingo","America/St_Barthelemy","America/St_Kitts","America/St_Lucia","America/St_Thomas","America/St_Vincent","America/Tortola","Etc/GMT+4"]},{value:"Pacific SA Standard Time",abbr:"PSST",text:"Santiago",utc:["America/Santiago","Antarctica/Palmer"]},{value:"Newfoundland Standard Time",abbr:"NDT",text:"Newfoundland",utc:["America/St_Johns"]},{value:"E. South America Standard Time",abbr:"ESAST",text:"Brasilia",utc:["America/Sao_Paulo"]},{value:"Argentina Standard Time",abbr:"AST",text:"Buenos Aires",utc:["America/Argentina/La_Rioja","America/Argentina/Rio_Gallegos","America/Argentina/Salta","America/Argentina/San_Juan","America/Argentina/San_Luis","America/Argentina/Tucuman","America/Argentina/Ushuaia","America/Buenos_Aires","America/Catamarca","America/Cordoba","America/Jujuy","America/Mendoza"]},{value:"SA Eastern Standard Time",abbr:"SEST",text:"Cayenne, Fortaleza",utc:["America/Araguaina","America/Belem","America/Cayenne","America/Fortaleza","America/Maceio","America/Paramaribo","America/Recife","America/Santarem","Antarctica/Rothera","Atlantic/Stanley","Etc/GMT+3"]},{value:"Greenland Standard Time",abbr:"GDT",text:"Greenland",utc:["America/Godthab"]},{value:"Montevideo Standard Time",abbr:"MST",text:"Montevideo",utc:["America/Montevideo"]},{value:"Bahia Standard Time",abbr:"BST",text:"Salvador",utc:["America/Bahia"]},{value:"UTC-02",abbr:"U",text:"Coordinated Universal Time-02",utc:["America/Noronha","Atlantic/South_Georgia","Etc/GMT+2"]},{value:"Mid-Atlantic Standard Time",abbr:"MDT",text:"Mid-Atlantic - Old",utc:[]},{value:"Azores Standard Time",abbr:"ADT",text:"Azores",utc:["America/Scoresbysund","Atlantic/Azores"]},{value:"Cape Verde Standard Time",abbr:"CVST",text:"Cape Verde Is.",utc:["Atlantic/Cape_Verde","Etc/GMT+1"]},{value:"Morocco Standard Time",abbr:"MDT",text:"Casablanca",utc:["Africa/Casablanca","Africa/El_Aaiun"]},{value:"UTC",abbr:"UTC",text:"Coordinated Universal Time",utc:["America/Danmarkshavn","Etc/GMT"]},{value:"GMT Standard Time",abbr:"GMT",text:"Edinburgh, London",utc:["Europe/Isle_of_Man","Europe/Guernsey","Europe/Jersey","Europe/London"]},{value:"GMT Standard Time",abbr:"GDT",text:"Dublin, Lisbon",utc:["Atlantic/Canary","Atlantic/Faeroe","Atlantic/Madeira","Europe/Dublin","Europe/Lisbon"]},{value:"Greenwich Standard Time",abbr:"GST",text:"Monrovia, Reykjavik",utc:["Africa/Abidjan","Africa/Accra","Africa/Bamako","Africa/Banjul","Africa/Bissau","Africa/Conakry","Africa/Dakar","Africa/Freetown","Africa/Lome","Africa/Monrovia","Africa/Nouakchott","Africa/Ouagadougou","Africa/Sao_Tome","Atlantic/Reykjavik","Atlantic/St_Helena"]},{value:"W. Europe Standard Time",abbr:"WEDT",text:"Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna",utc:["Arctic/Longyearbyen","Europe/Amsterdam","Europe/Andorra","Europe/Berlin","Europe/Busingen","Europe/Gibraltar","Europe/Luxembourg","Europe/Malta","Europe/Monaco","Europe/Oslo","Europe/Rome","Europe/San_Marino","Europe/Stockholm","Europe/Vaduz","Europe/Vatican","Europe/Vienna","Europe/Zurich"]},{value:"Central Europe Standard Time",abbr:"CEDT",text:"Belgrade, Bratislava, Budapest, Ljubljana, Prague",utc:["Europe/Belgrade","Europe/Bratislava","Europe/Budapest","Europe/Ljubljana","Europe/Podgorica","Europe/Prague","Europe/Tirane"]},{value:"Romance Standard Time",abbr:"RDT",text:"Brussels, Copenhagen, Madrid, Paris",utc:["Africa/Ceuta","Europe/Brussels","Europe/Copenhagen","Europe/Madrid","Europe/Paris"]},{value:"Central European Standard Time",abbr:"CEDT",text:"Sarajevo, Skopje, Warsaw, Zagreb",utc:["Europe/Sarajevo","Europe/Skopje","Europe/Warsaw","Europe/Zagreb"]},{value:"W. Central Africa Standard Time",abbr:"WCAST",text:"West Central Africa",utc:["Africa/Algiers","Africa/Bangui","Africa/Brazzaville","Africa/Douala","Africa/Kinshasa","Africa/Lagos","Africa/Libreville","Africa/Luanda","Africa/Malabo","Africa/Ndjamena","Africa/Niamey","Africa/Porto-Novo","Africa/Tunis","Etc/GMT-1"]},{value:"Namibia Standard Time",abbr:"NST",text:"Windhoek",utc:["Africa/Windhoek"]},{value:"GTB Standard Time",abbr:"GDT",text:"Athens, Bucharest",utc:["Asia/Nicosia","Europe/Athens","Europe/Bucharest","Europe/Chisinau"]},{value:"Middle East Standard Time",abbr:"MEDT",text:"Beirut",utc:["Asia/Beirut"]},{value:"Egypt Standard Time",abbr:"EST",text:"Cairo",utc:["Africa/Cairo"]},{value:"Syria Standard Time",abbr:"SDT",text:"Damascus",utc:["Asia/Damascus"]},{value:"E. Europe Standard Time",abbr:"EEDT",text:"E. Europe",utc:["Asia/Nicosia","Europe/Athens","Europe/Bucharest","Europe/Chisinau","Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Nicosia","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"South Africa Standard Time",abbr:"SAST",text:"Harare, Pretoria",utc:["Africa/Blantyre","Africa/Bujumbura","Africa/Gaborone","Africa/Harare","Africa/Johannesburg","Africa/Kigali","Africa/Lubumbashi","Africa/Lusaka","Africa/Maputo","Africa/Maseru","Africa/Mbabane","Etc/GMT-2"]},{value:"FLE Standard Time",abbr:"FDT",text:"Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius",utc:["Europe/Helsinki","Europe/Kiev","Europe/Mariehamn","Europe/Riga","Europe/Sofia","Europe/Tallinn","Europe/Uzhgorod","Europe/Vilnius","Europe/Zaporozhye"]},{value:"Turkey Standard Time",abbr:"TDT",text:"Istanbul",utc:["Europe/Istanbul"]},{value:"Israel Standard Time",abbr:"JDT",text:"Jerusalem",utc:["Asia/Jerusalem"]},{value:"Libya Standard Time",abbr:"LST",text:"Tripoli",utc:["Africa/Tripoli"]},{value:"Jordan Standard Time",abbr:"JST",text:"Amman",utc:["Asia/Amman"]},{value:"Arabic Standard Time",abbr:"AST",text:"Baghdad",utc:["Asia/Baghdad"]},{value:"Kaliningrad Standard Time",abbr:"KST",text:"Kaliningrad",utc:["Europe/Kaliningrad"]},{value:"Arab Standard Time",abbr:"AST",text:"Kuwait, Riyadh",utc:["Asia/Aden","Asia/Bahrain","Asia/Kuwait","Asia/Qatar","Asia/Riyadh"]},{value:"E. Africa Standard Time",abbr:"EAST",text:"Nairobi",utc:["Africa/Addis_Ababa","Africa/Asmera","Africa/Dar_es_Salaam","Africa/Djibouti","Africa/Juba","Africa/Kampala","Africa/Khartoum","Africa/Mogadishu","Africa/Nairobi","Antarctica/Syowa","Etc/GMT-3","Indian/Antananarivo","Indian/Comoro","Indian/Mayotte"]},{value:"Moscow Standard Time",abbr:"MSK",text:"Moscow, St. Petersburg, Volgograd, Minsk",utc:["Europe/Kirov","Europe/Moscow","Europe/Simferopol","Europe/Volgograd","Europe/Minsk"]},{value:"Samara Time",abbr:"SAMT",text:"Samara, Ulyanovsk, Saratov",utc:["Europe/Astrakhan","Europe/Samara","Europe/Ulyanovsk"]},{value:"Iran Standard Time",abbr:"IDT",text:"Tehran",utc:["Asia/Tehran"]},{value:"Arabian Standard Time",abbr:"AST",text:"Abu Dhabi, Muscat",utc:["Asia/Dubai","Asia/Muscat","Etc/GMT-4"]},{value:"Azerbaijan Standard Time",abbr:"ADT",text:"Baku",utc:["Asia/Baku"]},{value:"Mauritius Standard Time",abbr:"MST",text:"Port Louis",utc:["Indian/Mahe","Indian/Mauritius","Indian/Reunion"]},{value:"Georgian Standard Time",abbr:"GET",text:"Tbilisi",utc:["Asia/Tbilisi"]},{value:"Caucasus Standard Time",abbr:"CST",text:"Yerevan",utc:["Asia/Yerevan"]},{value:"Afghanistan Standard Time",abbr:"AST",text:"Kabul",utc:["Asia/Kabul"]},{value:"West Asia Standard Time",abbr:"WAST",text:"Ashgabat, Tashkent",utc:["Antarctica/Mawson","Asia/Aqtau","Asia/Aqtobe","Asia/Ashgabat","Asia/Dushanbe","Asia/Oral","Asia/Samarkand","Asia/Tashkent","Etc/GMT-5","Indian/Kerguelen","Indian/Maldives"]},{value:"Yekaterinburg Time",abbr:"YEKT",text:"Yekaterinburg",utc:["Asia/Yekaterinburg"]},{value:"Pakistan Standard Time",abbr:"PKT",text:"Islamabad, Karachi",utc:["Asia/Karachi"]},{value:"India Standard Time",abbr:"IST",text:"Chennai, Kolkata, Mumbai, New Delhi",utc:["Asia/Kolkata"]},{value:"Sri Lanka Standard Time",abbr:"SLST",text:"Sri Jayawardenepura",utc:["Asia/Colombo"]},{value:"Nepal Standard Time",abbr:"NST",text:"Kathmandu",utc:["Asia/Kathmandu"]},{value:"Central Asia Standard Time",abbr:"CAST",text:"Nur-Sultan (Astana)",utc:["Antarctica/Vostok","Asia/Almaty","Asia/Bishkek","Asia/Qyzylorda","Asia/Urumqi","Etc/GMT-6","Indian/Chagos"]},{value:"Bangladesh Standard Time",abbr:"BST",text:"Dhaka",utc:["Asia/Dhaka","Asia/Thimphu"]},{value:"Myanmar Standard Time",abbr:"MST",text:"Yangon (Rangoon)",utc:["Asia/Rangoon","Indian/Cocos"]},{value:"SE Asia Standard Time",abbr:"SAST",text:"Bangkok, Hanoi, Jakarta",utc:["Antarctica/Davis","Asia/Bangkok","Asia/Hovd","Asia/Jakarta","Asia/Phnom_Penh","Asia/Pontianak","Asia/Saigon","Asia/Vientiane","Etc/GMT-7","Indian/Christmas"]},{value:"N. Central Asia Standard Time",abbr:"NCAST",text:"Novosibirsk",utc:["Asia/Novokuznetsk","Asia/Novosibirsk","Asia/Omsk"]},{value:"China Standard Time",abbr:"CST",text:"Beijing, Chongqing, Hong Kong, Urumqi",utc:["Asia/Hong_Kong","Asia/Macau","Asia/Shanghai"]},{value:"North Asia Standard Time",abbr:"NAST",text:"Krasnoyarsk",utc:["Asia/Krasnoyarsk"]},{value:"Singapore Standard Time",abbr:"MPST",text:"Kuala Lumpur, Singapore",utc:["Asia/Brunei","Asia/Kuala_Lumpur","Asia/Kuching","Asia/Makassar","Asia/Manila","Asia/Singapore","Etc/GMT-8"]},{value:"W. Australia Standard Time",abbr:"WAST",text:"Perth",utc:["Antarctica/Casey","Australia/Perth"]},{value:"Taipei Standard Time",abbr:"TST",text:"Taipei",utc:["Asia/Taipei"]},{value:"Ulaanbaatar Standard Time",abbr:"UST",text:"Ulaanbaatar",utc:["Asia/Choibalsan","Asia/Ulaanbaatar"]},{value:"North Asia East Standard Time",abbr:"NAEST",text:"Irkutsk",utc:["Asia/Irkutsk"]},{value:"Japan Standard Time",abbr:"JST",text:"Osaka, Sapporo, Tokyo",utc:["Asia/Dili","Asia/Jayapura","Asia/Tokyo","Etc/GMT-9","Pacific/Palau"]},{value:"Korea Standard Time",abbr:"KST",text:"Seoul",utc:["Asia/Pyongyang","Asia/Seoul"]},{value:"Cen. Australia Standard Time",abbr:"CAST",text:"Adelaide",utc:["Australia/Adelaide","Australia/Broken_Hill"]},{value:"AUS Central Standard Time",abbr:"ACST",text:"Darwin",utc:["Australia/Darwin"]},{value:"E. Australia Standard Time",abbr:"EAST",text:"Brisbane",utc:["Australia/Brisbane","Australia/Lindeman"]},{value:"AUS Eastern Standard Time",abbr:"AEST",text:"Canberra, Melbourne, Sydney",utc:["Australia/Melbourne","Australia/Sydney"]},{value:"West Pacific Standard Time",abbr:"WPST",text:"Guam, Port Moresby",utc:["Antarctica/DumontDUrville","Etc/GMT-10","Pacific/Guam","Pacific/Port_Moresby","Pacific/Saipan","Pacific/Truk"]},{value:"Tasmania Standard Time",abbr:"TST",text:"Hobart",utc:["Australia/Currie","Australia/Hobart"]},{value:"Yakutsk Standard Time",abbr:"YST",text:"Yakutsk",utc:["Asia/Chita","Asia/Khandyga","Asia/Yakutsk"]},{value:"Central Pacific Standard Time",abbr:"CPST",text:"Solomon Is., New Caledonia",utc:["Antarctica/Macquarie","Etc/GMT-11","Pacific/Efate","Pacific/Guadalcanal","Pacific/Kosrae","Pacific/Noumea","Pacific/Ponape"]},{value:"Vladivostok Standard Time",abbr:"VST",text:"Vladivostok",utc:["Asia/Sakhalin","Asia/Ust-Nera","Asia/Vladivostok"]},{value:"New Zealand Standard Time",abbr:"NZST",text:"Auckland, Wellington",utc:["Antarctica/McMurdo","Pacific/Auckland"]},{value:"UTC+12",abbr:"U",text:"Coordinated Universal Time+12",utc:["Etc/GMT-12","Pacific/Funafuti","Pacific/Kwajalein","Pacific/Majuro","Pacific/Nauru","Pacific/Tarawa","Pacific/Wake","Pacific/Wallis"]},{value:"Fiji Standard Time",abbr:"FST",text:"Fiji",utc:["Pacific/Fiji"]},{value:"Magadan Standard Time",abbr:"MST",text:"Magadan",utc:["Asia/Anadyr","Asia/Kamchatka","Asia/Magadan","Asia/Srednekolymsk"]},{value:"Kamchatka Standard Time",abbr:"KDT",text:"Petropavlovsk-Kamchatsky - Old",utc:["Asia/Kamchatka"]},{value:"Tonga Standard Time",abbr:"TST",text:"Nuku'alofa",utc:["Etc/GMT-13","Pacific/Enderbury","Pacific/Fakaofo","Pacific/Tongatapu"]},{value:"Samoa Standard Time",abbr:"SST",text:"Samoa",utc:["Pacific/Apia"]}],nl=new Date,rl=function(){return new Intl.DateTimeFormat("default",{}).resolvedOptions()},ol=function(){var e={};return al.reduce((function(t,a){var n=a.utc;try{var r=new Intl.DateTimeFormat("fr",{timeZone:n[0],timeZoneName:"short"}).format(nl).match(/[\u2212+].+/)||[],o=function(e){return e?e.replace("\u2212","-"):""}(Object(A.a)(r,1)[0]);if(e[o])return t.concat(Object(g.a)({},a,{offset:e[o]}));var i=function(e){if(!e)return"+0";var t=e.split(":");return t.length>1?"".concat(t[0]).concat((t[1]/60).toString().substr(1)):t[0]}(o);return e[o]=i,t.concat(Object(g.a)({},a,{offset:i}))}catch(s){return t}}),[])}().sort((function(e,t){return e.offset-t.offset})),il=ol.reduce((function(e,t){var a=t.utc,n=Object(dr.a)(t,["utc"]);return a.forEach((function(t){return e[t]=Object(g.a)({},n,{utc:t})})),e}),{}),sl=function(){var e=Object(n.useState)(""),t=Object(A.a)(e,2),a=t[0],o=t[1],i=Object(En.a)(),s=Object(A.a)(i,2),l=s[0],c=s[1],u=Object(n.useRef)(),d=Object(n.useRef)(),h=window.urlOptions.updateUtcParam;Object(n.useEffect)((function(){d.current&&l&&d.current.focus()}),[l]);var p=Object(la.a)(),f=Object(la.b)(ue.L),g=Object(n.useMemo)((function(){var e=Object(Wt.b)().utc,t=void 0===e?"":e,a=function(e,t){var a=t||("default"===e?rl().timeZone:e);return il[a in il?a:rl().timeZone]||{}}(f,t),n=a.offset,r=void 0===n?"":n,o=a.utc,i=void 0===o?"":o;return t!==i&&h(i),f!==i&&p(Object(Ke.v)({key:"timezone",value:i})),p(Object(Ke.v)({key:"utcOffset",value:parseFloat(r)})),r}),[f]),m=Object(n.useMemo)((function(){return a?ol.filter((function(e){var t=e.text,n=e.offset;return t.toUpperCase().includes(a.toUpperCase())||n.includes(a)})):ol}),[a]),b=function(){c(!1),o("")},v=Object(n.useCallback)((function(e){h(e),p(Object(Ke.v)({key:"timezone",value:e})),b()}),[]),_=Object(n.useCallback)((function(e){return o(e.target.value)}),[]);return r.a.createElement(gi,{hasBorder:!0},r.a.createElement(Is,{round:1,onClick:c,ref:u,Wrapper:el},r.a.createElement(Z.j,{gap:1},r.a.createElement(Z.B,{color:"textLite",whiteSpace:"nowrap"},"UTC ",g)),r.a.createElement(Z.o,{name:"chevron_down",color:"text",width:"12px",height:"12px"})),u.current&&l&&r.a.createElement(Z.h,{target:u.current,align:{top:"bottom",left:"left"},onEsc:b,onClickOutside:b,animation:!0},r.a.createElement(qs,null,r.a.createElement(Js,{value:a,onChange:_,ref:d}),r.a.createElement(Qs,null,m.map((function(e){var t=e.text,a=e.offset,n=e.utc;return r.a.createElement(tl,{key:t,name:t,offset:a,utc:n[0],onSelect:v})}))))))},ll=a(58),cl=a(580),ul=a(74),dl=a(127),hl=a(188),pl=Object($.d)(Z.j).attrs({position:"absolute"}).withConfig({displayName:"iframe__IframeContainer",componentId:"sc-1tybwh7-0"})(["display:none;"]),fl=function(e){var t=e.signedIn,a=Object(n.useState)(!1),o=Object(A.a)(a,2),i=o[0],s=o[1],l=Object(n.useRef)(),c=Object(n.useRef)(),u=Object(cl.a)(Xt),d=Object(A.a)(u,3),h=d[0],p=d[2],f=Object(la.b)(ue.f),g=Object(la.b)(ue.x),m=Object(la.a)(),v=window.location,_=v.origin,y=v.pathname,O=encodeURIComponent(g.hostname),x=encodeURIComponent(_+y),w=Object(Ve.d)(f,"sign-in?id=".concat(g.machineGuid,"&name=").concat(O,"&origin=").concat(x));Object(ul.b)("hello-from-sign-in",(function(e){l.current=e})),Object(dl.a)();var E=Object(n.useCallback)((function(){s(!0),setTimeout((function(){return m(Object(b.c)({offline:void 0===l.current}))}),500)}),[]);return Object(n.useEffect)((function(){var e=function(e){(null===e||void 0===e?void 0:e.target)&&(e.target.src!==w||i||E())};return window.addEventListener("DOMFrameContentLoaded",e),function(){return window.removeEventListener("DOMFrameContentLoaded",e)}}),[w,i,E]),Object(n.useEffect)((function(){if(t&&c.current&&g.registryServer&&g.registryServer!==Ve.b&&h){p();var e=g.registryMachinesArray;e&&e.length>0&&Object(ul.a)(c.current,{type:"synced-private-registry",payload:e})}}),[t,g,h]),r.a.createElement(pl,{as:"iframe",id:hl.a,src:w,onLoad:E})},gl=function(){var e=Object(s.d)(),t=Object(cl.a)("has-sign-in-history"),a=Object(A.a)(t,2),r=a[0],o=a[1],i=Object(n.useState)(r),l=Object(A.a)(i,2),c=l[0],u=l[1],d=Object(n.useCallback)((function(t){t&&(u(t),o(t)),e(Object(b.b)({isSignedIn:t}))}),[]),h=Object(ul.b)("is-signed-in",d);return[Object(A.a)(h,1)[0],c]},ml=function(){var e=gl(),t=Object(A.a)(e,1)[0];return Object(s.e)(ue.u)&&r.a.createElement(Oi,{content:"Sign in to Netdata to monitor all your nodes at once, have composite charts, custom dashboards, use intelligent features and more",align:"bottom",plain:!0},r.a.createElement("div",null,r.a.createElement(fl,{signedIn:t}),!t&&r.a.createElement(ll.a,{utmParameters:{content:"topbar"}},(function(e){var t=e.isRegistry,a=e.link,n=e.offline,o=e.onSignIn;return r.a.createElement(Z.b,Object.assign({"data-testid":"header-signin",label:"Sign in",disabled:n},t?{as:"a",href:a}:{onClick:o}))}))))},bl=a(306),vl=function(e){var t=e.label,a=e.active,n=e.showBorderLeft,o=e.icon,i=e.onActivate;return r.a.createElement(Z.w,{onActivate:function(){a||i&&i()},icon:r.a.createElement(Z.o,{name:o,size:"small"}),fixed:!0,closable:!1,showBorderLeft:n,active:a},r.a.createElement(Z.B,null,t))},_l="Discover the free benefits of Netdata Cloud",yl=a(91),Ol=function(e){var t=e.children;return r.a.createElement(Z.B,{fontSize:"16px"},t)},xl={Home:{id:"Home",label:"Home",header:"Home",text:function(){return r.a.createElement(Ol,null,"The Home view in Netdata cloud provides summarized relevant information in an easily digestible display. You can see information about your nodes, data collection and retention stats, alerts, users and dashboards.")},icon:"room_home",image:"images/home.png"},nodeView:{id:"nodeView",label:"Node View",header:"Node View",text:function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Ol,null,"The single node view you are currently using will of course be available on Netdata Cloud as well. In addition, the charts and visualization on Netdata Cloud will be more flexible and powerful for troubleshooting than what is available on the agent."),r.a.createElement(Ol,null,"Netdata Cloud also comes with the Metric Correlations feature that lets you quickly find metrics and charts related to a particular window of interest that you want to explore further. By displaying the standard Netdata dashboard, filtered to show only charts that are relevant to the window of interest, you can get to the root cause sooner."))},icon:"nodes_hollow",image:"images/nodeView.png"},Overview:{id:"Overview",label:"Overview",header:"Overview",text:function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Ol,null,"The Overview tab is a great way to monitor your infrastructure using Netdata Cloud. While the interface might look similar to local dashboards served by an Agent, or even the single-node dashboards in Netdata Cloud, Overview uses composite charts. These charts display real-time aggregated metrics from all the nodes (or a filtered selection) in a given War Room."),r.a.createElement(Ol,null,"With Overview's composite charts, you can see your infrastructure from a single pane of glass, discover trends or anomalies, then drill down by grouping metrics by node and jumping to single-node dashboards for root cause analysis."),r.a.createElement(Ol,null,"Here's an example of a composite chart visualizing Disk I/O bandwidth from 5 different nodes in one chart."))},icon:"room_overview",image:"images/overview.png"},Nodes:{id:"Nodes",label:"Nodes",header:"Nodes",text:function(){return r.a.createElement(Ol,null,"The Nodes view in Netdata Cloud lets you see and customize key metrics from any number of Agent-monitored nodes and seamlessly navigate to any node's dashboard for troubleshooting performance issues or anomalies using Netdata's highly-granular metrics.")},icon:"nodes_hollow",image:"images/nodes.jpg"},Dashboards:{id:"Dashboards",label:"Dashboards",header:"Dashboards",text:function(){return r.a.createElement(Ol,null,"With Netdata Cloud, you can build new dashboards that target your infrastructure's unique needs. Put key metrics from any number of distributed systems in one place for a bird's eye view of your infrastructure.")},icon:"dashboard",image:"images/dashboards.png"},Alerts:{id:"Alerts",label:"Alerts",header:"Alerts",text:function(){return r.a.createElement(Ol,null,"The Alerts view gives you a high level of availability and performance information for every node you're monitoring with Netdata Cloud. It also offers an easy way to drill down into any particular alert by taking the user to the dedicated alert view from where the user can run metrics correlation or take further troubleshooting steps.")},icon:"alarm",image:"images/alerts.jpg"},Anomalies:{id:"Anomalies",label:"Anomalies",header:"Anomaies",text:function(){return r.a.createElement(Ol,null,"The Anomalies view on Netdata Cloud lets you quickly surface potentially anomalous metrics and charts related to a particular highlight window of interest using Anomaly Advisor. Anomalies are detected using per metric unsupervised machine learning running at the edge!")},icon:"anomaliesLens",video:"https://user-images.githubusercontent.com/24860547/165943403-1acb9759-7446-4704-8955-c566d04ad7ab.mp4"},Pricing:{id:"Pricing",label:"Pricing",header:"Pricing",text:function(){return r.a.createElement(Ol,null,"Netdata Cloud\u2019s distributed architecture\u2014with processing occurring at the individual nodes\u2014enables us to add any number of users at marginal cost. Couple this with our upcoming paid plan with added functionality for enterprise customers, and it means we can commit to providing our current functionality for free, always.")},image:"images/pricing.png",icon:"pricing"},Privacy:{id:"Privacy",label:"Privacy",header:"Privacy",text:function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Ol,null,"Data privacy is very important to us. We firmly believe that your data belongs to you. This is why we don't store any metric data in Netdata Cloud."),r.a.createElement(Ol,null,"Your local installations of the Netdata Agent form the basis for the Netdata Cloud. All the data that you see in the web browser when using Netdata Cloud, is actually streamed directly from the Netdata Agent to the Netdata Cloud dashboard. The data passes through our systems, but it isn't stored. You can learn more about"," ",r.a.createElement(yl.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/agent/netdata-security"},"the Agent's security design")," ","design in the Agent documentation."),r.a.createElement(Ol,null,"However, to be able to offer the stunning visualizations and advanced functionality of Netdata Cloud, it does store a limited number of metadata. This metadata is ONLY available to Netdata and NEVER to any 3rd parties. You can learn more about what metadata is stored in Netdata cloud in our",r.a.createElement(yl.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/cloud/data-privacy"}," ","documentation")))},icon:"privacy"}},wl=function(e){var t=e.parentRef,a=e.isDropdownOpen,n=e.closeDropdown,o=e.text,i=e.header,s=e.handleGoToCloud,l=e.image,c=e.video;return t.current&&a?r.a.createElement(Z.h,{backdrop:!0,"data-testid":"selectedNodesDropdown",onEsc:n,align:{top:"bottom",left:"left"},target:t.current,onClickOutside:n},r.a.createElement(Z.t,{background:"modalBackground"},r.a.createElement(Z.v,null,r.a.createElement(Z.j,{gap:2},r.a.createElement(Z.o,{color:"white",name:"netdata"}),r.a.createElement(Z.k,{margin:[0]},"Discover the free benefits of Netdata Cloud",":")),r.a.createElement(Z.s,{onClose:n})),r.a.createElement(Z.r,null,r.a.createElement(Z.j,{column:!0,width:189,height:130},r.a.createElement(Z.j,{padding:[0,0,4,0],column:!0,gap:4},r.a.createElement(Z.j,{alignItems:"center"},r.a.createElement(Z.l,{margin:[0]},i),r.a.createElement(Z.a,{sx:{marginLeft:"auto"},"data-testid":"go-to-cloud-cta",margin:[0,2,0,0],width:{min:40}},r.a.createElement(ll.a,{utmParameters:{campaign:"discover_cloud"}},(function(e){var t=e.link;return r.a.createElement(Z.a,{label:r.a.createElement(Z.B,{textTransform:"none",strong:!0,color:"panel"},"Sign in to Netdata Cloud!"),width:"100%",onClick:function(){return s({link:t})},"data-testid":"cta1-button",as:Z.b,small:!0,"data-ga":"go-to-cloud-button"})})))),o()),l&&r.a.createElement(Z.j,{height:"auto",width:"100%",overflow:"hidden"},r.a.createElement(Z.a,{sx:{width:"100%",height:"auto"},as:"img",src:l})),c&&r.a.createElement(Z.j,{height:"100%",width:"100%"},r.a.createElement(Z.a,{sx:{width:"100%",height:"100%"}},r.a.createElement("iframe",{title:"discover-cloud-iframe}",width:"100%",height:"100%",src:c,frameBorder:"0",allow:"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture",allowFullScreen:!0}))))),r.a.createElement(Z.u,null))):null},El=Z.j,Sl=Z.j,Cl=function(){var e=Object(n.useState)(!1),t=Object(A.a)(e,2),a=t[0],o=t[1],i=Object(n.useState)(null),s=Object(A.a)(i,2),l=s[0],c=s[1],u=Object(n.useRef)(),d=function(){o(!0)};return r.a.createElement(El,{padding:[0,0,0,4],position:"relative",height:15},r.a.createElement(Sl,{padding:[4,2,4,2],gap:4,height:"100%",justifyContent:"center",alignItems:"center"},r.a.createElement(Z.B,{color:"primary"},_l,":"),r.a.createElement(Z.j,{ref:u},r.a.createElement(Z.x,null,Object.keys(xl).map((function(e,t){var a=xl[e],n=a.label,o=a.icon,i=a.id,s=l?l.id:null;return r.a.createElement(vl,{key:e,icon:o,active:i===s,label:n,showBorderLeft:0===t,onActivate:Object(Ve.c)(d,(function(){c(xl[e])}))})}))))),r.a.createElement(wl,Object.assign({parentRef:u,isDropdownOpen:a},l,{closeDropdown:Object(Ve.c)((function(){o(!1)}),(function(){c(null)})),handleGoToCloud:function(e){var t=e.link;window.location.href=t}})))},Al=Object($.d)(Z.j).attrs({as:"header",position:"relative",justifyContent:"between",background:"panel",zIndex:20,width:"100%",padding:[2,4,2,4]}).withConfig({displayName:"header__Wrapper",componentId:"c0wsrq-0"})(["pointer-events:all;"]),kl=function(){var e=Object(s.e)(ue.u);return r.a.createElement(Al,null,r.a.createElement(Z.j,{alignItems:"center",gap:3},r.a.createElement(bi,null)),r.a.createElement(Z.j,{justifyContent:"end",alignItems:"center",gap:3},r.a.createElement(bl.CloudConnectionStatus,null),r.a.createElement(Si,null),r.a.createElement(Zs,null),r.a.createElement(xi,null),r.a.createElement(sl,null),r.a.createElement(Gs,null),r.a.createElement(Ks,null),r.a.createElement(ml,null)),e&&r.a.createElement(Z.a,{sx:{background:"#272B30"},position:"absolute",top:"52px",left:"0px",right:"0px"},r.a.createElement(Cl,null)))},jl=Object($.d)(Z.b).withConfig({displayName:"expandButton__ExpandButton",componentId:"ebkdno-0"})(["&&{> .button-icon{width:6px;height:9px;}"]),Tl=a(143),Dl=function(e){var t=e.flavour,a=void 0===t?"default":t,n=Object(dr.a)(e,["flavour"]),o=Object(s.e)(ue.f);return r.a.createElement(Z.j,Object.assign({alignItems:"center",as:"iframe",src:"".concat(Object(Tl.a)(o,"sign-out"),"?type=").concat(a),border:{side:"all",size:"0px"},width:{max:"128px"},height:{max:"40px"}},n))},Pl=function(){return r.a.createElement(ll.a,{utmParameters:{content:"userSettings"}},(function(e){var t=e.isRegistry,a=e.link,n=e.onSignIn;return r.a.createElement(Z.B,{onClick:t?function(e){return function(e,t){e.stopPropagation(),window.location.href=t}(e,a)}:n},"Sign in")}))},Ml=Object(Ha.a)((function(e){return e.dashboard}),(function(e){return e.isSignedIn})),Ll=function(){var e=Object(En.a)(),t=Object(A.a)(e,2),a=t[0],o=t[1],i=Object(la.b)(Ml),s=Object(n.useMemo)((function(){return[].concat(Object(C.a)(i?[{children:"Operational Status",onClick:function(){return window.open("https://status.netdata.cloud","_blank","noopener,noreferrer")}}]:[]),Object(C.a)(i?[{separator:!0}]:[]),Object(C.a)(i?[{children:r.a.createElement(Dl,{"data-testid":"signout-button-sidebar",flavour:"borderless",height:{max:"18px"}})}]:[{children:r.a.createElement(Pl,null)}]))}),[i]);return r.a.createElement($.b,{theme:Z.e},r.a.createElement(Z.b,{"data-testid":"avatar-button-sidebar",flavour:"borderless",neutral:!0,icon:"user",title:"User settings",name:"userSettings",onClick:o}),a&&r.a.createElement(Z.p,{position:"bottom-left",onClickOutside:o,onEsc:o,backdrop:!1,margin:[5,18]},r.a.createElement(Z.j,{column:!0,width:52,background:"mainBackground",padding:[3],round:!0},s.map((function(e,t){return e.separator?r.a.createElement(Z.j,{height:"1px",background:"disabled",key:t}):r.a.createElement(Is,Object.assign({key:t,padding:[2,4],round:1},e.onClick&&{onClick:e.onClick}),e.children)})))))},Il=function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Z.j,{width:"40px",height:"40px",round:2,border:{side:"all",color:"border",size:"2px",type:"dotted"}}),r.a.createElement(Z.j,{height:"1px",background:"separator",width:"20px"}),r.a.createElement(Z.b,{icon:"plus",disabled:!0}))},Nl=function(){var e=Object(la.b)(ue.f);return r.a.createElement(Z.j,{as:"iframe",src:Object(Ve.d)(e,"space-bar"),title:"Space Bar",height:"100%",width:"100%",border:{side:"all",size:"0px"},overflow:"hidden"})},Rl=function(e){var t=e.isOpen,a=e.toggle,n=e.isSignedIn,o=Object(s.e)(ue.u);return r.a.createElement(Z.j,{column:!0,justifyContent:"between",background:"panel",padding:[3,0],width:"64px",alignItems:"center",gap:6,position:"relative",overflow:"hidden"},r.a.createElement(Z.j,{column:!0,gap:4,alignItems:"center",height:"100%",overflow:"hidden"},r.a.createElement(Z.o,{color:"success",name:"netdataPress",height:"32px",width:"32px"}),!t&&r.a.createElement(jl,{icon:"chevron_right_s",onClick:a,small:!0,neutral:!0,flavour:"borderless",themeType:"dark"}),o&&n&&r.a.createElement(Nl,null),o&&!n&&r.a.createElement(Il,null)),r.a.createElement(Z.j,{column:!0,gap:4,alignItems:"center"},r.a.createElement(Z.g,{app:"agent"},(function(e){return r.a.createElement(Z.b,{flavour:"borderless",neutral:!0,themeType:"dark",className:"btn",icon:"question",onClick:e,title:"Need help?"})})),r.a.createElement(Z.b,{flavour:"borderless",neutral:!0,themeType:"dark",className:"btn","data-toggle":"modal","data-target":"#optionsModal",icon:"gear",title:"Settings"}),o&&r.a.createElement(Ll,null)))},Bl=function(e){for(var t,a,n=[],r=0,o=-1,i=0;t=(a=e.charAt(r++)).charCodeAt(0);){var s=t>=48&&t<=57;s!==i&&(n[++o]="",i=s),n[o]+=a}return n},Fl=function(e,t){for(var a=Bl(e.toLowerCase()),n=Bl(t.toLowerCase()),r=0;a[r]&&n[r];r++)if(a[r]!==n[r]){var o=Number(a[r]),i=Number(n[r]);return o.toString()===a[r]&&i.toString()===n[r]?o-i:a[r]>n[r]?1:-1}return a.length-n.length},Hl=function(e,t){return"".concat(e,"/host/").concat(t,"/")},zl=function(e,t,a){if(!e||!t)return{};var n=function(e){var t=document.location.origin.toString()+decodeURI(document.location.pathname.toString());return t.endsWith("/host/".concat(e,"/"))&&(t=t.substring(0,t.length-"/host/".concat(e,"/").toString().length)),t.endsWith("/")&&(t=t.substring(0,t.length-1)),t}(t);return{parentNode:{hostname:Object(A.a)(e,1)[0].hostname,url:"".concat(n,"/")},replicatedNodes:e.slice(1).map((function(e,t){var r,o=e.hostname;return{hostname:o,url:Hl(n,o),status:(null===(r=a.find((function(e){return e.hostname===o})))||void 0===r?void 0:r.reachable)||!1}})).sort((function(e,t){return Fl(e.hostname,t.hostname)}))}},Ul=Object($.d)(Z.j).attrs({as:"a",gap:2,alignItems:"center"}).withConfig({displayName:"anchor__Anchor",componentId:"sc-8wivg4-0"})(["&:hover{text-decoration:none;}"]),Gl=function(e){var t=e.hostname,a=e.url,n=e.status;return r.a.createElement(Ul,{href:a,justifyContent:"between",padding:[0,0,0,2]},r.a.createElement(Z.j,{alignItems:"center",gap:2},r.a.createElement(Z.o,{name:"node",color:"bright"}),r.a.createElement(Z.B,{color:"bright",truncate:!0},t)),r.a.createElement(Ys,{background:n?"success":"border",color:"bright",round:10},n?"Live":"Off"))},Wl=Object($.d)(Z.C).withConfig({displayName:"replicatedNodes__Search",componentId:"sc-1bn9jso-0"})(["& > label{margin-bottom:0;}"]),Vl=Object($.d)(Z.o).withConfig({displayName:"replicatedNodes__StyledIcon",componentId:"sc-1bn9jso-1"})(["transform:",";"],(function(e){return e.right?"rotate(270deg)":"none"})),Yl=function(e){var t=e.parentNode,a=e.replicatedNodes,o=Object(n.useState)(!0),i=Object(A.a)(o,2),s=i[0],l=i[1],c=Object(n.useState)(""),u=Object(A.a)(c,2),d=u[0],h=u[1],p=Object(n.useCallback)((function(){return l((function(e){return!e}))}),[]),f=Object(n.useCallback)((function(e){return h(e.target.value)}),[]),g=Object(n.useMemo)((function(){return d?a.filter((function(e){return e.hostname.toLowerCase().includes(d.toLowerCase())})):a}),[a,d]);return r.a.createElement(Bs,{isOpen:s,toggleOpen:p,label:r.a.createElement(Z.j,{alignItems:"center",justifyContent:"between"},r.a.createElement(Z.B,{strong:!0,color:"border"},"Replicated nodes"),r.a.createElement(Vl,{right:!s,name:"chevron_down",size:"small",color:"text"}))},r.a.createElement(Z.j,{column:!0,gap:4,padding:[4,0,0]},r.a.createElement(Ul,{as:"a",href:t.url,justifyContent:"start"},r.a.createElement(Z.o,{name:"nodes",size:"small",color:"bright"}),r.a.createElement(Z.B,{color:"bright"},t.hostname)),g.length>=5&&r.a.createElement(Z.j,{padding:[0,0,0,2]},r.a.createElement(Wl,{value:d,onChange:f,iconLeft:r.a.createElement(Z.o,{name:"search_s",size:"small",color:"text"}),metaShrinked:!0})),r.a.createElement(Z.j,{column:!0,gap:2},g.map((function(e){var t=e.hostname,a=e.url,n=e.status;return r.a.createElement(Gl,{key:t,hostname:t,url:a,status:n})})))))},Xl=function(e){var t=e.parentNode,a=e.replicatedNodes,o=Object(la.b)(ue.f),i=Object(n.useRef)(),s=Object(ul.b)("hello-from-space-panel"),l=Object(A.a)(s,1)[0];return Object(n.useEffect)((function(){l&&i.current&&Object(ul.a)(i.current,{type:"streamed-hosts-data",payload:{parentNode:t,replicatedNodes:a}})}),[a,t,l]),r.a.createElement(Z.j,{ref:i,as:"iframe",src:Object(Tl.a)(o,"space-panel"),title:"space panel",width:"100%",height:"100%",border:{side:"all",size:"0px"}})},Kl={signIn:{title:"Welcome back!",content:[r.a.createElement(Z.F,{key:"1",color:"bright"},"Sign in again to enjoy the benefits of Netdata Cloud"," ")]},signUp:{title:"Welcome to Netdata Cloud!",content:[r.a.createElement(Z.F,{key:"1",color:"bright"},r.a.createElement(Z.F,{strong:!0,color:"bright"},"A single place")," ","for all your nodes."),r.a.createElement(Z.F,{key:"2",color:"bright"},r.a.createElement(Z.F,{strong:!0,color:"bright"},"Multi-node dashboards")," ","out of the box."),r.a.createElement(Z.F,{key:"3",color:"bright"},r.a.createElement(Z.F,{strong:!0,color:"bright"},"Custom dashboards")," ","for you to create, edit and share online."),r.a.createElement(Z.F,{key:"4",color:"bright"},r.a.createElement(Z.F,{strong:!0,color:"bright"},"Metric Correlations")," ","to find the root cause of anything."),r.a.createElement(Z.F,{key:"5",color:"bright"},r.a.createElement(Z.F,{strong:!0,color:"bright"},"Centrally dispatched notifications")," ","for all alarms of all your nodes."),r.a.createElement(Z.F,{key:"6",color:"bright"},"And... It is"," ",r.a.createElement(Z.F,{as:"a",href:"https://www.netdata.cloud/get-netdata/",target:"_blank",rel:"noopener noreferrer",strong:!0,color:"bright"},"free, forever!"))]}},Zl=function(){return r.a.createElement(ll.a,{utmParameters:{content:"sidebar"}},(function(e){var t=e.isRegistry,a=e.link,n=e.onSignIn,o=e.offline,i=Kl.signIn,s=i.title,l=i.content;return r.a.createElement(Z.j,{background:["neutral","regentgrey"],column:!0,gap:4,padding:[10],border:{side:"right",color:"panel"}},r.a.createElement(Z.B,{color:"bright",strong:!0},s),l.map((function(e){return e})),r.a.createElement(Z.b,Object.assign({width:"100%",label:"Sign in",disabled:o},t?{as:"a",href:a}:{onClick:n})))}))},ql=function(){return r.a.createElement("svg",{width:"68",height:"68",viewBox:"0 0 68 68",fill:"none",xmlns:"http://www.w3.org/2000/svg"},r.a.createElement("path",{d:"M48.875 6.375H19.125C16.7778 6.375 14.875 8.27779 14.875 10.625V40.375C14.875 42.7222 16.7778 44.625 19.125 44.625H48.875C51.2222 44.625 53.125 42.7222 53.125 40.375V10.625C53.125 8.27779 51.2222 6.375 48.875 6.375Z",fill:"white",stroke:"#AEB3B7"}),r.a.createElement("path",{fillRule:"evenodd",clipRule:"evenodd",d:"M41.0834 38.25C41.8658 38.25 42.5 38.8843 42.5 39.6667V41.0833C44.0648 41.0833 45.3334 42.3519 45.3334 43.9167V58.0833C45.3334 59.6481 44.0648 60.9167 42.5 60.9167H38.25V65.1667H31.8278V60.9167H26.9167C25.3519 60.9167 24.0834 59.6481 24.0834 58.0833V43.9167C24.0834 42.3519 25.3519 41.0833 26.9167 41.0833V39.6667C26.9167 38.8843 27.551 38.25 28.3334 38.25H41.0834Z",fill:"#35414A"}),r.a.createElement("path",{fillRule:"evenodd",clipRule:"evenodd",d:"M39.7954 12.75C40.5778 12.75 41.2121 13.3843 41.2121 14.1667L41.2108 16.7294L43.9166 16.7296C44.699 16.7296 45.3333 17.3639 45.3333 18.1463V34C45.3333 34.7824 44.699 35.4167 43.9166 35.4167L43.272 35.4152L43.2727 33.3403H41.2121L41.2108 35.4152H39.151L39.1515 33.3403H37.0909L37.0897 35.4152H35.0299L35.0303 33.3403H32.9697L32.9686 35.4152H30.9088L30.909 33.3403H28.8484L28.8475 35.4152H26.7877L26.7878 33.3403H24.7272L24.7265 35.4152L24.0833 35.4167C23.3009 35.4167 22.6666 34.7824 22.6666 34V18.1463C22.6666 17.3639 23.3009 16.7296 24.0833 16.7296L26.7877 16.7294L26.7878 14.1667C26.7878 13.3843 27.4221 12.75 28.2045 12.75H39.7954Z",fill:"#35414A"}))},$l=function(){return r.a.createElement(Z.j,{alignItems:"center",background:["neutral","regentgrey"],column:!0,gap:1,padding:[10]},r.a.createElement(Z.F,{color:"bright",strong:!0,textAlign:"center"},"Can't connect to Netdata Cloud"),r.a.createElement(ql,null),r.a.createElement(Z.E,{color:"bright",textAlign:"center",margin:[2,0,0]},"Maybe you are behind a firewall or you don\u2019t have connection to the internet"))},Jl=a(624),Ql=(a(561),a(562),a(563),$.d.div.withConfig({displayName:"styled__NodesContainer",componentId:"sc-9qwvkl-0"})([".mdc-list-item{padding:0 0;padding-left:0;}.rmwc-collapsible-list__children{.mdc-list-item{padding:0 0;padding-left:0;height:",";}}.rmwc-collapsible-list__handle{.mdc-list-item{padding:0 ",";}}.mdc-list-item__meta{color:",";}.mdc-list-item:before{background:none;}"],Object(Z.J)(4),Object(Z.J)(2),Object(Z.H)("bright"))),ec=$.d.div.withConfig({displayName:"styled__ListItem",componentId:"sc-9qwvkl-1"})(["width:100%;min-height:",";display:flex;flex-flow:row nowrap;align-items:center;cursor:pointer;justify-content:space-between;"],Object(Z.J)(3)),tc=Object($.d)(Z.o).withConfig({displayName:"styled__TrashIcon",componentId:"sc-9qwvkl-2"})(["fill:#35414a;margin-right:",";transition:opacity 0.4s ease-in;&:hover{opacity:0.6;}"],Object(Z.J)(2)),ac=Object($.d)(Z.o).withConfig({displayName:"styled__StyledIcon",componentId:"sc-9qwvkl-3"})(["flex-shrink:0;flex-grow:0;margin-right:",";fill:",";"],Object(Z.J)(2),Object(Z.H)(["gray","arsenic"])),nc=Object($.d)(Z.E.withComponent("a")).withConfig({displayName:"styled__NodeUrl",componentId:"sc-9qwvkl-4"})(["text-decoration:none;margin-left:",";color:#aeb3b7;max-width:145px;word-wrap:break-word;&:hover{color:inherit;text-decoration:none;}"],Object(Z.J)(5)),rc=Object($.d)(Z.B.withComponent("a")).withConfig({displayName:"styled__NodeName",componentId:"sc-9qwvkl-5"})(["flex:1;overflow:hidden;text-overflow:ellipsis;min-width:0;white-space:nowrap;"]),oc=function(e){var t=e.name,a=e.alternateUrls,n=e.machineGuid;return r.a.createElement(Jl.a,{handle:r.a.createElement(Ao.b,{text:r.a.createElement(r.a.Fragment,null,r.a.createElement(ac,{name:"node"}),r.a.createElement(rc,{color:"bright",href:"",onClick:function(e){e.preventDefault(),e.stopPropagation(),window.gotoServerModalHandler(n)}},t)),metaIcon:a.length&&"chevron_right"})},r.a.createElement(Z.a,{margin:[2,0,0]},a.map((function(e){return r.a.createElement(ec,{key:e},r.a.createElement(nc,{href:e},function(e,t){if(e.length<=t)return e;var a=Math.floor((t-3)/2);return"".concat(e.substring(0,a),"...").concat(e.substring(e.length-a))}(e,50)),r.a.createElement(tc,{name:"trashcan",size:"small",onClick:function(){window.deleteRegistryModalHandler(n,t,e)}}))}))))},ic=function(e){var t=e.machinesArray.sort((function(e,t){return Fl(e.name,t.name)})).filter((function(e){return e.url!==h.c})),a=Object(n.useState)(!0),o=Object(A.a)(a,2),i=o[0],s=o[1],l=Object(n.useCallback)((function(){return s((function(e){return!e}))}),[]);return r.a.createElement(Bs,{isOpen:i,toggleOpen:l,label:r.a.createElement(Z.j,{alignItems:"center",justifyContent:"between"},r.a.createElement(Z.B,{strong:!0,color:"border"},"Visited Nodes"),r.a.createElement(ac,{right:!i,name:"chevron_down",size:"small",color:"text"}))},r.a.createElement(Ql,{column:!0,gap:2},t.map((function(e){var t=e.name,a=e.alternateUrls,n=e.guid,o=e.url;return r.a.createElement(oc,{alternateUrls:a,key:"".concat(t,"-").concat(n),machineGuid:n,name:t,url:o})}))))},sc=Object(Ha.a)((function(e){return e.global.chartsMetadata.data||{}}),(function(e){return e.global.registry.fullInfoPayload.mirrored_hosts_status||{}}),(function(e,t){var a=e.hosts,n=e.hostname;return zl(a,n,t)})),lc=Object(Ha.a)((function(e){return e.global.registry}),(function(e){return e.registryMachinesArray||[]})),cc=Object(Ha.a)((function(e){return e.dashboard}),(function(e){return{isSignedIn:e.isSignedIn,offline:e.offline}})),uc=r.a.memo((function(e){var t=e.isOpen,a=e.toggle,o=Object(s.e)(sc),i=o.parentNode,l=void 0===i?{}:i,c=o.replicatedNodes,u=void 0===c?[]:c,d=Object(s.e)(lc),h=Object(s.e)(ue.v),p=Object(s.e)(cc),f=p.isSignedIn,g=p.offline,m=Object(s.e)(ue.u),b=Object(n.useCallback)((function(){return window.switchRegistryModalHandler()}),[]);return r.a.createElement(Z.d,{width:74,background:"panel",open:t,direction:"horizontal",persist:!0},r.a.createElement(Z.j,{flex:!0,column:!0,overflow:{vertical:"hidden"},margin:[3,0,0],border:{side:"left",color:"separator"},style:{pointerEvents:"all"}},r.a.createElement(Z.j,{overflow:{vertical:"auto"},flex:!0,column:!0,gap:4,padding:[4]},r.a.createElement(Z.j,{alignSelf:"end"},r.a.createElement(Z.b,{neutral:!0,flavour:"borderless",themeType:"dark",small:!0,icon:"chevron_left",onClick:a})),!f&&r.a.createElement(r.a.Fragment,null,!!u.length&&r.a.createElement(Yl,{parentNode:l,replicatedNodes:u}),!!d.length&&r.a.createElement(Z.B,{strong:!0,color:"border"},r.a.createElement(ic,{machinesArray:d}))),f&&r.a.createElement(Xl,{parentNode:l,replicatedNodes:u})),h&&r.a.createElement(Z.j,{border:{side:"top"},justifyContent:"center",alignItems:"center",padding:[6]},r.a.createElement(Z.F,{onClick:b},"Switch Identity")),!f&&m&&r.a.createElement(Zl,null),g&&m&&r.a.createElement($l,null)))})),dc=Object($.d)(Z.j).attrs({height:"100vh",zIndex:10}).withConfig({displayName:"sidebar__Wrapper",componentId:"v4d3v5-0"})(["pointer-events:all;"]),hc=Object(Ha.a)((function(e){return e.dashboard}),(function(e){return e.isSignedIn})),pc=r.a.memo((function(){var e=Object(cl.a)("space-panel-state"),t=Object(A.a)(e,2),a=t[0],o=t[1],i=Object(s.e)(ue.E),l=Object(s.e)(hc),c=Object(s.d)(),u=Object(n.useCallback)((function(){c(Object(Ke.w)({isActive:!i})),o(!i)}),[i]);return Object(n.useEffect)((function(){c(Object(Ke.w)({isActive:!!a&&l}))}),[l]),r.a.createElement(dc,null,r.a.createElement(Rl,{isOpen:i,toggle:u,isSignedIn:l}),r.a.createElement(uc,{isOpen:i,toggle:u,offline:!0}))})),fc=Object($.d)(Z.j).attrs({position:"fixed",justifyContent:"start",alignItems:"start",width:"100%",zIndex:10}).withConfig({displayName:"layout__Wrapper",componentId:"sc-1d05imu-0"})(["top:0;left:0;pointer-events:none;"]),gc=function(e){var t=e.children;return e.printMode?t:r.a.createElement(fc,null,r.a.createElement(pc,null),r.a.createElement(kl,null),t)},mc=(a(564),function(e){return{slate:Z.e,white:Z.f}[e]||Z.e}),bc=a(225),vc=function(e){var t=e.migrationModalPromoInfo,a=e.setUserPrefrence,o=e.closeModal,i=e.savePromoRemindMeSelection,s=e.migrationModalPromo,l=e.requestRefreshOfAccess,c=Object(n.useState)(!1),u=Object(A.a)(c,2),d=u[0],h=u[1],p=Object(n.useCallback)((function(e){var n=e.link,r=e.toPath,s=t.CTA1;"NAVIGATE"===s.action?(d&&(a(s.userPreference),i(d)),"agent"!==r&&(window.location.href=n),o()):"REFRESH"===s.action&&l()}),[t,a,d,l,i,o]),f=Object(n.useCallback)((function(){var e=t.CTA2;d&&(a(e.userPreference),i(d)),"NAVIGATE"===e.action||"REFRESH"===e.action&&l(),o()}),[t,a,d,l,i,o]);return t?r.a.createElement(Z.q,null,r.a.createElement(Z.t,{width:180,background:"modalBackground"},r.a.createElement(Z.v,null,r.a.createElement(Z.k,{margin:[0]},t.title)),r.a.createElement(Z.r,null,r.a.createElement(Z.j,{padding:[0,0,4,0],column:!0,gap:3},"function"===typeof t.text.header?t.text.header({}):r.a.createElement(Z.B,null,t.text.header),t.text.bullets.length>0&&r.a.createElement(Z.j,{column:!0,gap:3},r.a.createElement(Z.j,{column:!0,gap:1,as:"ul"},t.text.bullets.map((function(e){return"function"===typeof e?r.a.createElement("li",{key:e},e()):r.a.createElement("li",{key:e},r.a.createElement(Z.B,null,e))})))),t.text.footer&&r.a.createElement(Z.B,{"data-testid":"body-footer"},t.text.footer))),r.a.createElement(Z.u,null,r.a.createElement(Z.a,{margin:[0,"auto",0,0]},r.a.createElement(Z.c,{"data-ga":"".concat(s,"::click-remind-me::ad"),"data-testid":"remind-me-checkbox",checked:d,onChange:function(e){h(e.currentTarget.checked)},label:t.tickBoxOption.text})),r.a.createElement(Z.a,{"data-testid":"cta1",margin:[0,2,0,0],width:{min:40}},r.a.createElement(ll.a,{utmParameters:{content:s,campaign:"agent_nudge_to_cloud"}},(function(e){var a=e.link;return r.a.createElement(Z.b,{"data-ga":"".concat(s,"::click-ct1::ad"),textTransform:"none","data-testid":"cta1-button",onClick:function(){return p({link:a,toPath:t.CTA1.toPath})},width:"100%",label:t.CTA1.text})}))),t.CTA2&&r.a.createElement(Z.a,{"data-ga":"".concat(s,"::click-ct2::ad"),onClick:f,height:10,className:"btn btn-default","data-testid":"cta2",width:{min:40}},r.a.createElement(Z.a,{as:Z.B,sx:{fontWeight:"500",lineHeight:"25px"}},t.CTA2.text))))):null};!function(e){e.PROMO_SIGN_IN_CLOUD="PROMO_SIGN_IN_CLOUD",e.PROMO_SIGN_UP_CLOUD="PROMO_SIGN_UP_CLOUD",e.PROMO_IVNITED_TO_SPACE="PROMO_IVNITED_TO_SPACE",e.PROMO_CLAIM_NODE="PROMO_CLAIM_NODE",e.PROMO_TO_USE_NEW_DASHBAORD="PROMO_TO_USE_NEW_DASHBAORD",e.FALLBACK_TO_AGENT="FALLBACK_TO_AGENT",e.NO_INFO_FALLBACK_TO_AGENT="NO_INFO_FALLBACK_TO_AGENT"}(no||(no={}));var _c=function(e){return"".concat(Ve.h).concat(Object(ue.P)({content:e,campaign:"agent_nudge_to_cloud"}))},yc=(to={},Object(u.a)(to,no.PROMO_SIGN_UP_CLOUD,{title:"Learn about Netdata Cloud!",text:{header:function(){return r.a.createElement(Z.B,{strong:!0},"Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:")},bullets:["Infrastructure level dashboards (each chart aggregates data from multiple nodes)","Central dispatch of alert notifications","Custom dashboards editor","Intelligence assisted troubleshooting, to help surface the root cause of issues"],footer:"Have a look, you will be surprised!"},tickBoxOption:{text:"Remember my choice",preferenceID:no.PROMO_SIGN_UP_CLOUD},CTA1:{text:"Wow! Let\u2019s go to Netdata Cloud",toPath:"path/signup/cloud",action:"NAVIGATE",userPreference:"CLOUD"},CTA2:{text:"Later, stay at the agent dashboard",action:"NAVIGATE",toPath:"path/agent-dashboard",userPreference:"AGENT"}}),Object(u.a)(to,no.PROMO_SIGN_IN_CLOUD,{title:"Sign-in to Netdata Cloud or get an invitation!",text:{header:function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Z.B,{strong:!0},"This node is connected to Netdata Cloud but you are not. If you have a Netdata Cloud account sign-in, if not ask for an invitation to it."),r.a.createElement(Z.B,null,"Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:"))},bullets:["Infrastructure level dashboards (each chart aggregates data from multiple nodes)","Central dispatch of alert notifications","Custom dashboards editor","Intelligence assisted troubleshooting, to help surface the root cause of issues"],footer:"Have a look, you will be surprised!"},tickBoxOption:{text:"Remember my choice",preferenceID:no.PROMO_SIGN_IN_CLOUD},CTA1:{text:"Sign-in or get a Netdata Cloud account",action:"NAVIGATE",toPath:"path/signin/cloud",userPreference:"CLOUD"},CTA2:{text:"Later, stay at the Agent dashboard",toPath:"path/agent-dashboard",action:"NAVIGATE",userPreference:"AGENT"}}),Object(u.a)(to,no.PROMO_IVNITED_TO_SPACE,{title:"Get an invitation to this Node\u2019s Space!",text:{header:function(){return r.a.createElement(Z.B,{strong:!0},"This node is connected to Netdata Cloud but it isnt available on one of your Spaces.")},bullets:[],footer:"Ask for an invitation to this Space!"},tickBoxOption:{text:"Don't remind me of this again",preferenceID:no.PROMO_IVNITED_TO_SPACE},CTA1:{text:"Thanks, stay at Agent dashboard for now",toPath:"agent",action:"NAVIGATE",userPreference:"AGENT"}}),Object(u.a)(to,no.PROMO_CLAIM_NODE,{title:"This node isn\u2019t connected to Netdata Cloud",text:{header:function(){return r.a.createElement(Z.B,{strong:!0},"For you to be able to see this node on Netdata Cloud you will either need to:")},footer:"Have a look, you will be surprised!",bullets:[function(){return r.a.createElement(Z.B,null," ","Connect this node directly (documentation on"," ",r.a.createElement(yl.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/agent/claim?".concat(_c(no.PROMO_CLAIM_NODE).substring(1),"#how-to-connect-a-node")},"how to connect a node"),") , or")},function(){return r.a.createElement(Z.B,null,"\u0391ctivate streaming to a parent node that is already connected (documentation on"," ",r.a.createElement(yl.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/metrics-storage-management/enable-streaming?".concat(_c(no.PROMO_CLAIM_NODE).substring(1))},"how to configure streaming"),")")}]},tickBoxOption:{text:"Remember my choice.",preferenceID:no.PROMO_CLAIM_NODE},CTA1:{text:"Wow! Let\u2019s go to Netdata Cloud",action:"NAVIGATE",toPath:"path/node/cloud",userPreference:"CLOUD"},CTA2:{text:"Later, stay at the Agent dashboard",action:"NAVIGATE",toPath:"path/agent-dashboard",userPreference:"AGENT"}}),Object(u.a)(to,no.PROMO_TO_USE_NEW_DASHBAORD,{title:"Use the Old or the New dashboard?",text:{header:function(){return r.a.createElement(Z.B,{strong:!0},"This node is available in your Netdata Cloud account. So, you have full access to the NEW dashboards, charts, intelligence-assisted troubleshooting and many more!")},bullets:[]},tickBoxOption:{text:"Remember my choice",preferenceID:no.PROMO_TO_USE_NEW_DASHBAORD},CTA1:{text:"Wow! Let\u2019s go to Netdata Cloud ",action:"NAVIGATE",toPath:"path/dashboard/cloud",userPreference:"CLOUD"},CTA2:{text:"Later, stay at the Agent dashboard",action:"NAVIGATE",toPath:"path/agent-dashboard",userPreference:"AGENT"}}),Object(u.a)(to,no.FALLBACK_TO_AGENT,{title:"Oops! This node has lost connection to Netdata Cloud!",text:{header:function(e){e.date;return r.a.createElement(r.a.Fragment,null,r.a.createElement(Z.B,{strong:!0},"Unfortunately, it seems that this node is not currently connected to Netdata Cloud. So, the old agent dashboard is the only option available."),r.a.createElement(Z.B,null,"To troubleshoot Netdata Cloud connection issues, please follow this"," ",r.a.createElement(yl.a,{target:"_blank",rel:"noopener noreferrer",href:"https://learn.netdata.cloud/docs/agent/claim?".concat(_c(no.FALLBACK_TO_AGENT).substring(1),"#troubleshooting")},"this guide.")))},bullets:[]},tickBoxOption:{text:"Don't show this again",preferenceID:no.FALLBACK_TO_AGENT},CTA1:{text:"Check again please",action:"REFRESH",userPreference:void 0},CTA2:{text:"Thanks, stay at Agent dashboard",toPath:"path/agent",action:"NAVIGATE",userPreference:"AGENT"}}),Object(u.a)(to,no.NO_INFO_FALLBACK_TO_AGENT,{title:"Oops! We aren't able to get information of this node in regards to Netdata Cloud!",text:{header:function(){return r.a.createElement(r.a.Fragment,null,r.a.createElement(Z.B,{strong:!0},"Unfortunately, it seems we aren't able to get information on this node in regards to Netdata Cloud."),r.a.createElement(Z.B,null,"This could be from internet connectivity issues from your end or some temporary issue with our services. So, the old agent dashboard is the only option available."))},bullets:[]},tickBoxOption:{text:"Don't show this again",preferenceID:no.NO_INFO_FALLBACK_TO_AGENT},CTA1:{text:"Check again please",action:"REFRESH",userPreference:void 0},CTA2:{text:"Thanks, stay at Agent dashboard",toPath:"path/agent-dashboard",action:"NAVIGATE",userPreference:"AGENT"}}),to),Oc=(ao={},Object(u.a)(ao,no.FALLBACK_TO_AGENT,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus,r=e.nodeLiveness,o=e.userNodeAccess;return"AGENT"!==t&&("LOGGED_IN"===a||"EXPIRED_LOGIN"===a)&&"CLAIMED"===n&&"NOT_LIVE"===r&&"ACCESS_OK"===o})),Object(u.a)(ao,no.NO_INFO_FALLBACK_TO_AGENT,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus,r=e.nodeLiveness,o=e.userNodeAccess;return"CLOUD"===t&&!a&&!n&&!r&&!o})),Object(u.a)(ao,no.PROMO_TO_USE_NEW_DASHBAORD,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeLiveness,r=e.userNodeAccess;return!t&&("LOGGED_IN"===a||"EXPIRED_LOGIN"===a)&&"LIVE"===n&&"ACCESS_OK"===r})),Object(u.a)(ao,no.PROMO_CLAIM_NODE,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus;return"AGENT"!==t&&("LOGGED_IN"===a||"EXPIRED_LOGIN"===a)&&"NOT_CLAIMED"===n})),Object(u.a)(ao,no.PROMO_IVNITED_TO_SPACE,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus,r=e.userNodeAccess;return"AGENT"!==t&&("LOGGED_IN"===a||"EXPIRED_LOGIN"===a)&&"CLAIMED"===n&&"NO_ACCESS"===r})),Object(u.a)(ao,no.PROMO_SIGN_IN_CLOUD,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus;return"AGENT"!==t&&"UNKNOWN"===a&&"CLAIMED"===n})),Object(u.a)(ao,no.PROMO_SIGN_UP_CLOUD,(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeClaimedStatus;return"AGENT"!==t&&"UNKNOWN"===a&&"NOT_CLAIMED"===n})),ao),xc=function(e){var t=e.userStatus,a=e.nodeClaimedStatus,r=e.userNodeAccess,o=e.nodeLiveness,i=Object(cl.a)("USER_SAVED_PREFERENCE"),s=Object(A.a)(i,2),l=s[0],c=s[1],u=Object(n.useMemo)((function(){return Object.keys(Oc).find((function(e){return Oc[e]({userStatus:t,nodeClaimedStatus:a,userNodeAccess:r,userSavedPreference:l,nodeLiveness:o})}))}),[t,a,r,o,l]);return{migrationModalPromoInfo:yc[u],migrationModalPromo:u,setUserPrefrence:c,userSavedPreference:l}},wc=function(){var e=Object(s.e)(ue.u),t=Object(s.e)(ue.x),a=Object(s.e)((function(e){return Object(ue.B)({content:"agent-auto-redirect",term:t.machineGuid})(e)})),o=Object(n.useMemo)((function(){var e=window.location.href,t=encodeURIComponent(e);return"".concat(a,"&redirect_uri=").concat(t)}),[a]),i=Object(s.e)(ue.O),l=Object(n.useState)(!1),c=Object(A.a)(l,2),u=c[0],d=c[1],h=xc(Object(g.a)({},i)),p=h.migrationModalPromoInfo,f=h.setUserPrefrence,m=h.userSavedPreference,b=h.migrationModalPromo,v=(null===p||void 0===p?void 0:p.tickBoxOption.preferenceID)||"",_=Object(cl.a)(v),y=Object(A.a)(_,2)[1],O=localStorage.getItem(v),x=e&&p&&u&&(!O||"undefined"===O),w=Object(dl.b)();return Object(n.useEffect)((function(){var e=setTimeout((function(){return d(!0)}),4e3);return function(){clearTimeout(e)}}),[]),Object(n.useEffect)((function(){(function(e){var t=e.userSavedPreference,a=e.userStatus,n=e.nodeLiveness,r=e.userNodeAccess;return"CLOUD"===t&&("LOGGED_IN"===a||"EXPIRED_LOGIN"===a)&&"LIVE"===n&&"ACCESS_OK"===r})(Object(g.a)({userSavedPreference:m},i))&&(window.location.href=o)}),[o,i,m]),Object(n.useEffect)((function(){"AGENT"==={userSavedPreference:m}.userSavedPreference&&console.log("Lets go to Agent")}),[m]),Object(n.useEffect)((function(){document.documentElement.style.overflow=x?"hidden":"auto"}),[u,x]),x?r.a.createElement(vc,{savePromoRemindMeSelection:y,migrationModalPromoInfo:p,setUserPrefrence:f,closeModal:function(){d(!1)},migrationModalPromo:b,requestRefreshOfAccess:w}):null},Ec=Z.a;window.Ps=na.a;var Sc=function(){var e=Object(la.b)(ue.u),t=Object(s.f)();Object(n.useEffect)((function(){window.NETDATA.alarms={},window.NETDATA.pause=function(e){e()},Object(bc.a)(t)}),[]);var a=Object(n.useState)(),o=Object(A.a)(a,2),i=o[0],l=o[1],c=void 0!==i,u=Object(n.useRef)((function(){l(Math.random())}));Object(n.useEffect)((function(){if(c){var e=document.getElementById("loadOverlay");e&&(e.style.display="none")}}),[c]);var d=Object(sa.b)(),p=d.localeDateString,f=d.localeTimeString;Object(n.useEffect)((function(){Object(bc.b)({localeDateString:p,localeTimeString:f})}),[p,f]),function(e){var t=Object(la.b)(ue.x),a=Object(la.a)();Object(n.useEffect)((function(){!e||t.isFetchingHello||t.hasFetchedHello||t.isHelloCallError||a(Ke.f.request({serverDefault:D.b}))}),[a,t,e])}(!0),function(e){var t=Object(la.b)(ue.r),a=Object(la.a)();Object(n.useEffect)((function(){e&&!t&&a(Object(Ke.A)({serverDefault:D.b}))}),[a,t,e])}(!0),function(e){var t=Object(la.b)(ue.x),a=(null===t||void 0===t?void 0:t.hasStartedInfo)||!1,r=Object(la.a)();Object(n.useEffect)((function(){e&&!a&&r(_e.f.request({poll:!1}))}),[r,a,e])}(!0);var g=Object(n.useState)(!1),m=Object(A.a)(g,2),b=m[0],v=m[1];Object(n.useLayoutEffect)((function(){Promise.all([ia(D.c+window.NETDATA.themes.current.bootstrap_css),ia(D.c+window.NETDATA.themes.current.dashboard_css)]).then((function(){v(!0)}))}),[]);var _=function(){var e=qo("".concat(D.b,"api/v1/charts"));return Object(A.a)(e,1)[0]}(),y=Object(la.b)(ue.f);window.NETDATA.parseDom=u.current;var O=Object(la.b)(ue.q),x=Object(la.b)(ue.K);return function(){var e=Object(la.a)();Object(dn.a)((function(){var t=Object(Wt.b)(),a=t.alarm_when;if(a){var n=Number(a),r=t.alarm_status,o=t.alarm_chart,i=t.alarm_value;if(!h.d.includes(r)||!o||!i)return;e(Object(Ke.n)({alarm:{chartId:o,status:r,value:i,when:n}}));e(Object(Ke.s)({after:1e3*n-3e5,before:1e3*n+3e5}))}}))}(),r.a.createElement($.b,{theme:mc(x)},b&&r.a.createElement(fi,null),r.a.createElement(r.a.Fragment,null),_&&y&&O&&c&&r.a.createElement(r.a.Fragment,null,r.a.createElement(gc,{printMode:ve},Ye.a?null:r.a.createElement(wc,null),b&&r.a.createElement(r.a.Fragment,null,r.a.createElement(Zo,{key:i}),r.a.createElement(pi,null,r.a.createElement(hi,null)),ve&&r.a.createElement(Qo,null))),e&&r.a.createElement(Ec,{height:15})))};a(565);i.a.render(r.a.createElement(s.a,{store:ta},r.a.createElement(Sc,null)),document.getElementById("root"))},58:function(e,t,a){"use strict";var n=a(5),r=a(0),o=a(27),i=a(17),s=a(580),l=a(44),c=a(9),u=a(28),d=Object(i.a)(c.x,(function(e){return e.registryServer===u.b})),h=Object(i.a)((function(e){return e.dashboard}),(function(e){return e.offline}));t.a=function(e){var t=e.children,a=e.utmParameters,i=Object(s.a)("has-sign-in-history"),u=Object(n.a)(i,1)[0],p=Object(o.e)((function(e){return Object(c.B)(a)(e)})),f=Object(o.e)(d),g=Object(o.e)(h),m=Object(o.d)(),b=Object(r.useMemo)((function(){var e=window.location.href,t=encodeURIComponent(e);return"".concat(p,"&redirect_uri=").concat(t)}),[p]),v=Object(r.useCallback)((function(){return m(Object(l.d)({signInLinkHref:b}))}),[b]);return Object(r.useMemo)((function(){return"function"===typeof t?t({isRegistry:f,link:b,onSignIn:v,offline:g,hasSignedInBefore:u}):t}),[t,f,b,v,g,u])}},64:function(e,t,a){"use strict";a.d(t,"b",(function(){return l})),a.d(t,"d",(function(){return u})),a.d(t,"a",(function(){return d})),a.d(t,"c",(function(){return h}));var n=a(5),r=a(310),o=a(312),i=a(578),s=/[&;]/,l=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:decodeURIComponent(window.location.hash.substr(1));if(0===e.length)return{};var t=e.split(s),a=t.reduce((function(e,t){var a=t.split("="),r=Object(n.a)(a,2),o=r[0],i=r[1];return e[o]=i,e}),{});return a},c=function(e){var t=Object.entries(e);return 0===t.length?"":t.map((function(e){var t=Object(n.a)(e,2),a=t[0],r=t[1];return void 0===r?a:"".concat(a,"=").concat(encodeURIComponent(r))})).join(";")},u=(Object(o.a)(l,c),function(e){var t=l(),a=Object(i.a)(e,t);window.history.replaceState(window.history.state,"","#".concat(c(a)))}),d=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1));return l(t)[e]},h=function(e){window.history.replaceState(window.history.state,"","#".concat(function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:decodeURIComponent(window.location.hash.substr(1)),a=Object(r.a)(e,l(t));return c(a)}(e)))}},74:function(e,t,a){"use strict";a.d(t,"a",(function(){return o})),a.d(t,"b",(function(){return i}));var n=a(5),r=a(0),o=function(e,t){var a="string"===typeof e?document.getElementById(e):e;a.contentWindow&&a.contentWindow.postMessage(t,"*")},i=function(e,t,a){var o=Object(r.useState)(a),i=Object(n.a)(o,2),s=i[0],l=i[1],c=Object(r.useCallback)((function(a){var n=a.data;n.type===e&&(l(n.payload),t&&t(n.payload))}),[t,e]),u=Object(r.useCallback)((function(){l(a)}),[a]);return Object(r.useEffect)((function(){return window.addEventListener("message",c),function(){window.removeEventListener("message",c)}}),[c,e]),[s,u]}},75:function(e,t,a){"use strict";a.d(t,"b",(function(){return n})),a.d(t,"c",(function(){return r})),a.d(t,"a",(function(){return o}));var n="true",r=!1,o=!1},76:function(e,t,a){"use strict";a.d(t,"a",(function(){return n}));var n="dashboard"},8:function(e,t,a){"use strict";a.d(t,"b",(function(){return r})),a.d(t,"a",(function(){return o}));a(0);var n=a(27),r=n.e,o=n.d},83:function(e,t,a){"use strict";a.d(t,"a",(function(){return r}));var n=window.location.hash.split(";").includes("help=true"),r=function(){if(n)return!1;var e=document.location.hostname;return e.endsWith(".my-netdata.io")||e.endsWith(".mynetdata.io")||e.endsWith(".netdata.rocks")||e.endsWith(".netdata.ai")||e.endsWith(".netdata.live")||e.endsWith(".firehol.org")||e.endsWith(".netdata.online")||e.endsWith(".netdata.cloud")}()},9:function(e,t,a){"use strict";a.d(t,"a",(function(){return u})),a.d(t,"h",(function(){return h})),a.d(t,"g",(function(){return p})),a.d(t,"o",(function(){return f})),a.d(t,"p",(function(){return g})),a.d(t,"m",(function(){return m})),a.d(t,"i",(function(){return b})),a.d(t,"l",(function(){return v})),a.d(t,"s",(function(){return _})),a.d(t,"n",(function(){return y})),a.d(t,"D",(function(){return O})),a.d(t,"x",(function(){return x})),a.d(t,"f",(function(){return w})),a.d(t,"P",(function(){return E})),a.d(t,"B",(function(){return S})),a.d(t,"v",(function(){return C})),a.d(t,"u",(function(){return A})),a.d(t,"q",(function(){return k})),a.d(t,"k",(function(){return j})),a.d(t,"r",(function(){return T})),a.d(t,"c",(function(){return D})),a.d(t,"d",(function(){return P})),a.d(t,"E",(function(){return M})),a.d(t,"F",(function(){return L})),a.d(t,"b",(function(){return N})),a.d(t,"j",(function(){return R})),a.d(t,"G",(function(){return B})),a.d(t,"z",(function(){return F})),a.d(t,"t",(function(){return H})),a.d(t,"I",(function(){return z})),a.d(t,"H",(function(){return U})),a.d(t,"K",(function(){return G})),a.d(t,"A",(function(){return W})),a.d(t,"w",(function(){return V})),a.d(t,"C",(function(){return Y})),a.d(t,"N",(function(){return X})),a.d(t,"J",(function(){return K})),a.d(t,"y",(function(){return Z})),a.d(t,"L",(function(){return q})),a.d(t,"M",(function(){return $})),a.d(t,"e",(function(){return Q})),a.d(t,"O",(function(){return ee}));var n=a(49),r=a(221),o=a(17),i=a(28),s=a(36),l=a(159),c=a(26),u=function(e){return function(t){var a=Object(l.a)(e),n=t[c.e].commonColorsKeys[a];return n&&n.assigned}},d=function(e){return e.global},h=Object(o.a)(d,(function(e,t){return t}),(function(e,t){return e.commonMin[t]})),p=Object(o.a)(d,(function(e,t){return t}),(function(e,t){return e.commonMax[t]})),f=Object(o.a)(d,Object(n.a)("hoveredX")),g=Object(o.a)(d,Object(n.a)("currentSelectionMasterId")),m=Object(o.a)(d,Object(n.a)("globalPanAndZoom")),b=Object(o.a)(d,Object(n.a)("defaultAfter")),v=Object(o.a)(d,Object(n.a)("globalChartUnderlay")),_=Object(o.a)(d,Object(n.a)("hasWindowFocus")),y=Object(o.a)(d,Object(n.a)("globalPause")),O=Object(o.a)(d,Object(n.a)("snapshot")),x=Object(o.a)(d,Object(n.a)("registry")),w=Object(o.a)(x,Object(n.a)("cloudBaseURL")),E=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).reduce((function(t,a){return t+"&utm_".concat(a,"=").concat(e[a])}),"")},S=function(e){return Object(o.a)(x,w,(function(t,a){var n=encodeURIComponent(t.hostname),r=encodeURIComponent(Object(s.a)(window.location.origin+window.location.pathname));return"".concat(a,"/sign-in?id=").concat(t.machineGuid,"&name=").concat(n,"&origin=").concat(r).concat(i.h).concat(E(e))}))},C=(Object(o.a)(x,Object(n.a)("isFetchingHello")),Object(o.a)(x,(function(e){var t=e.registryServer;return t&&"https://registry.my-netdata.io"!==t}))),A=Object(o.a)(x,(function(e){return e.isCloudEnabled&&!e.isHelloCallError})),k=Object(o.a)(x,Object(n.a)("hasFetchedInfo")),j=Object(o.a)(x,Object(n.a)("fullInfoPayload")),T=Object(o.a)(d,Object(r.a)(["alarms","hasStartedAlarms"])),D=Object(o.a)(d,(function(e){return e.alarms.activeAlarms})),P=Object(o.a)(d,(function(e){return e.alarm})),M=Object(o.a)(d,Object(n.a)("spacePanelIsActive")),L=Object(o.a)(d,Object(n.a)("spacePanelTransitionEndIsActive")),I=Object(o.a)(d,(function(e){return e.options})),N=function(e){return Object(o.a)(I,(function(t){return t[e]}))},R=N("destroy_on_hide"),B=N("stop_updates_when_focus_is_lost"),F=N("eliminate_zero_dimensions"),H=N("async_on_scroll"),z=(N("parallel_refresher"),N("concurrent_refreshes"),N("sync_selection")),U=N("sync_pan_and_zoom"),G=N("theme"),W=N("show_help"),V=N("pan_and_zoom_data_padding"),Y=N("smooth_plot"),X=N("units"),K=N("temperature"),Z=N("seconds_as_time"),q=N("timezone"),$=N("utcOffset"),J=(N("user_set_server_timezone"),Object(o.a)(d,(function(e){return e.chartsMetadata.data}))),Q=Object(o.a)(J,(function(e,t){return t.chartId}),(function(e,t){return null===e||void 0===e?void 0:e.charts[t]})),ee=Object(o.a)(d,(function(e){return e.userNodeAccess}))},91:function(e,t,a){"use strict";var n=a(37),r=a(0),o=a.n(r),i=a(4),s=a(3),l=Object(i.d)("a").withConfig({displayName:"anchor__BaseAnchor",componentId:"sc-1eq5dgj-0"})(["&&{color:",";:hover{color:",";}:visited{color:",";}}"],Object(s.H)("primary"),Object(s.H)("primary"),Object(s.H)("accent"));t.a=function(e){var t=e.Component,a=void 0===t?s.B:t,r=Object(n.a)(e,["Component"]);return o.a.createElement(a,Object.assign({as:l},r))}},92:function(e,t,a){"use strict";a.d(t,"a",(function(){return o}));var n=a(7),r=a(22),o=function(e){var t=Object(r.createAction)(e.toUpperCase());return Object.assign(t,{request:t,success:Object(r.createAction)("".concat(e.toUpperCase(),"_SUCCESS"),(function(e){return e}),(function(e){return e})),failure:Object(r.createAction)("".concat(e.toUpperCase(),"_FAILURE"),(function(e){return e}),(function(e){return Object(n.a)({},e,{error:!0})}))})}},99:function(e,t,a){"use strict";a.d(t,"a",(function(){return l})),a.d(t,"c",(function(){return u})),a.d(t,"e",(function(){return d})),a.d(t,"d",(function(){return h})),a.d(t,"b",(function(){return p}));var n,r=a(14),o=a(227),i=a(572),s=a(114),l="sync_pan_and_zoom",c=(n={},Object(r.a)(n,"stop_updates_when_focus_is_lost",!0),Object(r.a)(n,"eliminate_zero_dimensions",!0),Object(r.a)(n,"destroy_on_hide",!1),Object(r.a)(n,"async_on_scroll",!1),Object(r.a)(n,"parallel_refresher",!0),Object(r.a)(n,"concurrent_refreshes",!0),Object(r.a)(n,"sync_selection",!0),Object(r.a)(n,l,!0),Object(r.a)(n,"legend_right",!1),Object(r.a)(n,"theme","slate"),Object(r.a)(n,"show_help",Boolean(window.netdataShowHelp)&&!window.netdataNoBootstrap),Object(r.a)(n,"pan_and_zoom_data_padding",!0),Object(r.a)(n,"smooth_plot",!0),Object(r.a)(n,"units","auto"),Object(r.a)(n,"temperature","celsius"),Object(r.a)(n,"seconds_as_time",!0),Object(r.a)(n,"timezone","default"),Object(r.a)(n,"user_set_server_timezone","default"),Object(r.a)(n,"utcOffset",0),n),u=function(){var e=Object.keys(localStorage).filter((function(e){return e.startsWith("options.")||"netdataTheme"===e})).map((function(e){return Object(r.a)({},function(e){return e.replace(/^options\./,"").replace("netdataTheme","theme")}(e),function(e){var t,a=localStorage.getItem(e);if(null===a||"undefined"===a)return localStorage.removeItem(e),null;try{t=JSON.parse(a)}catch(n){return"netdataTheme"===e&&a?a:(console.log('localStorage: failed to read "'.concat(e,'", using default')),localStorage.removeItem(e),null)}return t}(e))})).filter((function(e){return null!==Object.values(e)[0]})),t=Object(o.a)(e);return Object(i.a)(c,t)},d=u(),h=d.legend_right,p=function(){Object.keys(localStorage).forEach((function(e){(e.startsWith(s.a)||e.startsWith("options."))&&localStorage.removeItem(e)}))}}},[[324,1,2]]]); +//# sourceMappingURL=main.e248095a.chunk.js.map \ No newline at end of file diff --git a/web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE b/src/web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE similarity index 100% rename from web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE rename to src/web/gui/v1/static/js/main.e248095a.chunk.js.LICENSE diff --git a/web/gui/v1/static/js/main.e248095a.chunk.js.map b/src/web/gui/v1/static/js/main.e248095a.chunk.js.map similarity index 51% rename from web/gui/v1/static/js/main.e248095a.chunk.js.map rename to src/web/gui/v1/static/js/main.e248095a.chunk.js.map index 12b3dc1023aac5..80a0089978b78b 100644 --- a/web/gui/v1/static/js/main.e248095a.chunk.js.map +++ b/src/web/gui/v1/static/js/main.e248095a.chunk.js.map @@ -1 +1 @@ -{"version":3,"sources":["utils/units-conversion.ts","components/button/button.tsx","domains/chart/components/toolbox-button.tsx","domains/chart/components/resize-handler.tsx","domains/global/actions.ts","hooks/use-user-node-access.ts","utils/index.ts","utils/name-2-id.ts","domains/global/reducer.ts","components/icon.tsx","domains/chart/reducer.ts","domains/chart/utils/transformDataAttributes.ts","domains/chart/actions.ts","components/header/constants.js","main.js","domains/global/constants.ts","components/header/ACLK/use-cloud-connection-status.tsx","components/header/ACLK/cloud-connection-status-modal.tsx","components/header/ACLK/cloud-connection-status.tsx","utils/utils.ts","vendor/d3pie-0.2.1-netdata-3.js","components/header/ACLK/index.ts","utils/server-detection.ts","domains/chart/selectors.ts","utils/date-time.js","domains/dashboard/actions.ts","domains/chart/constants.ts","domains/chart/utils/seconds4human.ts","../public/lib/dygraph-c91c859.min.js","domains/chart/utils/legend-utils.ts","domains/dashboard/reducer.ts","store/root-reducer.ts","utils/api.ts","utils/netdata-sdk/axios-instance.ts","utils/netdata-sdk/metrics-stream.ts","utils/fill-missing-data.ts","components/ui-notification/styled.ts","components/ui-notification/ui-notification.tsx","components/notifications/styled.tsx","components/notifications/notifications.tsx","domains/dashboard/utils/parse-url.ts","domains/chart/sagas.ts","components/space-panel/settings.ts","domains/global/alarms-sagas.ts","domains/global/utils.ts","domains/global/sagas.ts","domains/dashboard/sagas.ts","store/root-saga.ts","store/store.ts","domains/chart/utils/jquery-loader.ts","utils/css-loader.ts","domains/chart/utils/chartLibrariesSettings.ts","domains/chart/utils/get-chart-url-options.ts","utils/biggest-interval-number.ts","domains/chart/components/loader.tsx","domains/chart/utils/get-pan-and-zoom-step.ts","utils/safe-equal-check.ts","domains/chart/utils/formatters.ts","utils/color-hex-2-rgb.ts","domains/chart/components/chart-legend-bottom.styled.ts","domains/chart/components/legendText.js","domains/chart/components/chart-legend-right.tsx","domains/chart/components/chart-legend-bottom.tsx","domains/chart/components/chart-legend.tsx","domains/chart/components/legend-toolbox.tsx","domains/chart/components/lib-charts/dygraph/utils.ts","domains/chart/hooks/use-proceeded-chart.ts","domains/chart/hooks/useDygraphBadge.js","domains/chart/components/lib-charts/proceeded-chart-disclaimer.tsx","domains/chart/components/lib-charts/alarmBadge.js","domains/chart/components/lib-charts/dygraph-chart.tsx","utils/debounce.ts","domains/chart/components/lib-charts/easy-pie-chart.tsx","domains/chart/components/lib-charts/gauge-chart.tsx","domains/chart/utils/color-luminance.ts","domains/chart/components/lib-charts/sparkline-chart.tsx","domains/chart/utils/d3-loader.ts","domains/chart/utils/google-visualization-loader.ts","domains/chart/components/lib-charts/d3pie-chart.tsx","domains/chart/components/lib-charts/peity-chart.tsx","domains/chart/components/lib-charts/google-chart.tsx","domains/chart/components/lib-charts/text-only.tsx","domains/chart/components/lib-charts/group-box-chart/utilities.ts","domains/chart/components/lib-charts/group-box-chart/events.ts","domains/chart/components/lib-charts/group-box-chart/drawBoxes.ts","domains/chart/components/lib-charts/group-box-chart/getAlign.ts","domains/chart/components/lib-charts/group-box-chart/groupBox.tsx","domains/chart/components/lib-charts/group-box-chart/groupBoxes.tsx","domains/chart/components/lib-charts/group-box-chart/legend.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/getLabel.ts","domains/chart/components/lib-charts/group-box-chart/kubernetes/separator.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/header.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/tabs.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/section.tsx","domains/chart/utils/get-portal-node-styles.ts","hooks/use-common-intersection.ts","domains/chart/components/invisible-searchable-text.tsx","domains/chart/components/disable-out-of-view.tsx","domains/chart/components/chart-container/chart-container.tsx","domains/dashboard/utils/netdata-dashboard.ts","domains/chart/components/lib-charts/group-box-chart/kubernetes/chartOverview.tsx","domains/dashboard/components/migration-modal/use-migration-modal.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/chart.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/item.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/dateSection.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/metrics.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/context.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/list.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/popover.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/index.tsx","domains/chart/components/lib-charts/group-box-chart/kubernetes/transform.ts","domains/chart/components/abstract-chart.tsx","hooks/use-show-value-outside.ts","domains/chart/components/chart.tsx","domains/chart/components/chart-dropdown/styled.ts","domains/chart/components/chart-dropdown/chart-dropdown.tsx","domains/chart/components/chart-spinner/styled.ts","domains/chart/components/chart-spinner/chart-spinner.tsx","domains/chart/components/chart-with-loader/styled.ts","styles/z-index.js","domains/chart/components/chart-with-loader/chart-with-loader.tsx","domains/chart/hooks/use-fetch-new-data-clock.ts","domains/chart/utils/get-chart-pixels-per-point.ts","domains/dashboard/selectors.ts","domains/chart/components/snapshot-loader.tsx","domains/chart/components/portals.tsx","hooks/use-http.ts","domains/dashboard/components/modal-portal.tsx","domains/dashboard/components/print-modal/print-modal.tsx","domains/dashboard/components/sidebar-social-media/styled.ts","domains/dashboard/components/sidebar-social-media/sidebar-social-media.tsx","domains/dashboard/components/sidebar-social-media-portal/sidebar-social-media-portal.tsx","components/notifications-container/notifications-container.tsx","components/header/item.js","components/header/node/index.js","components/tooltips/customTooltip.js","components/tooltips/getContent.js","components/tooltips/index.js","components/header/options/index.js","components/app-header/components/versionControl/index.js","components/header/version.js","hooks/useToggle/index.js","hooks/useLocalStorage/index.js","components/mdx-components/dropdown/styled.js","components/date-picker/styled.js","components/mdx-components/dropdown/dropdown.js","components/date-picker/timePeriod.js","components/date-picker/utils.js","components/date-picker/timePeriods.js","components/date-picker/customTimePeriod.js","components/datePicker/datePickerLib.js","components/datePicker/styled.js","components/date-picker/datePickerInput.js","components/date-picker/useLocaleDate.js","components/date-picker/useConvertedDate.js","components/date-picker/datePickerWrapper.js","components/date-picker/periodIndication.js","components/date-picker/accessorElement/utils.js","components/date-picker/accessorElement/container.js","components/date-picker/accessorElement/dateBox.js","components/date-picker/accessorElement/durationBox.js","components/date-picker/accessorElement/index.js","components/date-picker/datePickerDrop.js","components/date-picker/reduxDatePickerContainer.js","components/header/globalControls/container.js","components/header/globalControls/playPausePill/styledPill.js","components/header/globalControls/playPausePill/index.js","components/menus/item.js","components/menus/list.js","components/header/globalControls/playOptions/playOptionsTooltip.js","components/menus/styled.js","components/header/globalControls/playOptions/index.js","components/header/globalControls/index.js","components/header/pill/styled.js","components/header/pill/index.js","components/header/alarms.js","components/header/news/index.js","components/header/timezone/dropdown.js","components/header/timezone/search/searchInput.js","components/header/timezone/search/index.js","components/header/timezone/container.js","components/header/timezone/wrapper.js","components/header/timezone/offsetItem.js","components/header/timezone/timezones.js","components/header/timezone/utils.js","components/header/timezone/index.js","components/auth/signIn/iframe.js","components/auth/signIn/useCheckSignInStatus.js","components/header/signIn/index.js","components/discover-cloud/cloudTab.js","components/discover-cloud/discoverCloudModal.js","components/discover-cloud/contents.js","components/discover-cloud/discoverCloudDrop.js","components/discover-cloud/discoverCloud.js","components/header/index.js","components/sidebar/spaces/expandButton.js","components/auth/signOut/index.js","components/sidebar/spaces/userSettings.js","components/sidebar/spaces/spacesSkeleton.js","components/sidebar/spaces/spacesIframe.js","components/sidebar/spaces/index.js","domains/dashboard/utils/sorting.ts","components/sidebar/space/nodes.js","components/sidebar/space/replicatedNodes/anchor.js","components/sidebar/space/replicatedNodes/node.js","components/sidebar/space/replicatedNodes/index.js","components/sidebar/space/spacePanelIframe.js","components/sidebar/space/prompts/signIn/promptContent.js","components/sidebar/space/prompts/signIn/index.js","components/sidebar/space/prompts/offline/noNetwork.js","components/sidebar/space/prompts/offline/index.js","components/sidebar/space/visitedNodes/styled.js","components/sidebar/space/visitedNodes/index.js","utils/truncateMiddle/index.js","components/sidebar/space/index.js","components/sidebar/index.js","components/layout/index.js","utils/map-theme.ts","domains/dashboard/components/migration-modal/migration-modal.js","domains/dashboard/components/migration-manager/migration-manager.tsx","App.tsx","hooks/use-registry.ts","hooks/use-alarms.ts","hooks/use-info.ts","domains/dashboard/hooks/use-charts-metadata.ts","domains/dashboard/hooks/useAlarmFromUrl.js","index.tsx","components/auth/signIn/index.js","utils/hash-utils.ts","utils/post-message.ts","utils/env.ts","domains/dashboard/constants.ts","store/redux-separate-context.ts","utils/is-demo.ts","domains/global/selectors.ts","components/anchor/index.tsx","utils/createRequestAction.ts","domains/global/options.ts"],"names":["currentTemperatureSetting","currentSecondsAsTimeSetting","zeropad","x","seconds2time","seconds","maxTimeUnit","secondsReturn","Math","abs","days","floor","hours","minutes","daysString","hoursString","minutesString","secondsString","toFixed","scalableUnits","pps","Kpps","Mpps","bytes","kilobytes","megabytes","gigabytes","terabytes","Hz","kHz","MHz","GHz","THz","PHz","EHz","ZHz","B","KiB","MiB","GiB","TiB","PiB","KB","MB","GB","TB","PB","EB","EiB","twoFixed","multiplier","value","convertibleUnits","Celsius","Fahrenheit","check","convert","celsius","fahrenheit","milliseconds","microseconds","max","identity","unitsConversionCreator","keys","latest","globalReset","this","get","uuid","min","units","desiredUnits","commonUnitsName","switchUnitsCallback","temperatureSetting","secondsAsTimeSetting","undefined","newConvertFunction","tunits","tdivider","scalableUnitsGroup","Object","forEach","unit","unitDivider","commonUnitsKey","t","divider","commonUnits","console","log","toString","Button","React","ref","children","className","rest","type","classNames","ToolboxButton","iconType","onClick","onDoubleClick","onMouseDown","onTouchStart","popoverContent","popoverTitle","buttonRef","useRef","showHelp","useSelector","selectShowHelp","useEffect","current","window","$","popover","container","animation","html","trigger","placement","delay","show","NETDATA","options","show_help_delay_show_ms","hide","show_help_delay_hide_ms","title","content","LOCALSTORAGE_HEIGHT_KEY_PREFIX","ResizeHandler","chartContainerElement","chartUuid","heightId","isLegendOnBottom","useState","clientHeight","resizeHeight","setResizeHeight","dispatch","useDispatch","setResizeHeightAction","id","handleResize","useCallback","event","preventDefault","intialHeight","eventStartHeight","touches","clientY","setHeight","currentHeight","nextHeight","style","height","heightForPersistance","LEGEND_BOTTOM_SINGLE_LINE_HEIGHT","localStorage","setItem","onMouseMove","e","onTouchMove","document","addEventListener","onTouchEnd","removeEventListener","onMouseEnd","stopPropagation","requestCommonColorsAction","createAction","storeKey","setCommonMinAction","setCommonMaxAction","setGlobalSelectionAction","setGlobalPanAndZoomAction","resetGlobalPanAndZoomAction","setDefaultAfterAction","resetDefaultAfterAction","setGlobalChartUnderlayAction","centerAroundHighlightAction","clearHighlightAction","windowFocusChangeAction","fetchHelloAction","createRequestAction","updatePersonUrlsAction","accessRegistrySuccessAction","startAlarmsAction","fetchAllAlarmsAction","updateActiveAlarmsAction","setOptionAction","resetOptionsAction","loadSnapshotAction","chartsMetadataRequestSuccess","setSpacePanelStatusAction","setSpacePanelTransitionEndAction","setAlarmAction","resetRegistry","setGlobalPauseAction","resetGlobalPauseAction","setUTCOffset","setUserNodeAccess","useRequestRefreshOfAccessMessage","sendToChildIframe","SIGN_IN_IFRAME_ID","payload","useUserNodeAccessMessage","useListenToPostMessage","message","name2id","s","replace","initialState","commonColorsKeys","commonMin","commonMax","currentSelectionMasterId","globalPanAndZoom","defaultAfter","isMainJs","getInitialAfterFromWindow","globalChartUnderlay","hoveredX","hasWindowFocus","hasFocus","globalPause","spacePanelIsActive","spacePanelTransitionEndIsActive","registry","cloudBaseURL","hasFetchedInfo","hasFetchedHello","isHelloCallError","hostname","isCloudEnabled","isCloudAvailable","isAgentClaimed","isACLKAvailable","hasStartedInfo","isFetchingHello","fullInfoPayload","machineGuid","personGuid","registryMachines","registryMachinesArray","registryServer","snapshot","alarms","activeAlarms","hasStartedAlarms","alarm","chartsMetadata","isFetching","isFetchingError","data","optionsMergedWithLocalStorage","userNodeAccess","globalReducer","createReducer","getKeyForCommonColorsState","colorsAttribute","commonColorsAttribute","chartContext","hasCustomColors","length","hasLastOnly","array","last","createCommonColorsKeysSubstate","custom","split","init","shouldCopyTheme","assigned","available","themes","colors","on","state","dimensionNames","keyName","subState","currentlyAssignedNr","requestedDimensionsAssigned","mergeAll","filter","dimensionName","map","i","commonMinKey","charts","currentExtreme","values","commonMaxKey","isActive","after","before","masterID","warn","highlightMargin","resetPanAndZoom","hasFocusNow","forcePlay","stop_updates_when_focus_is_lost","utcOffset","request","success","failure","CLOUD_BASE_URL_DISABLED","fetchInfoAction","key","clearLocalStorage","getOptionsMergedWithLocalStorage","parsedData","dataKey","uncompressed","uncompress","JSON","parse","reduce","acc","obj","typeToClassName","left","reset","right","zoomIn","zoomOut","resize","lineChart","areaChart","noChart","loading","noData","Icon","initialSingleState","chartData","chartId","chartMetadata","chartPanAndZoom","fetchDataParams","isRemotelyControlled","viewRange","isFetchingData","isFetchDataFailure","isFetchDetailsFailure","isFetchingDetails","snapshotDataIsFetching","snapshotDataIsError","snapshotData","chartReducer","getSubstate","fetchDataAction","chart","fetchDataCancelAction","substate","useNewKeysOnlyIfDifferent","fetchDataForSnapshotAction","snapshotExportResetAction","pick","fetchChartAction","setChartPanAndZoomAction","shouldForceTimeRange","resetChartPanAndZoomAction","SYNC_PAN_AND_ZOOM","assoc","clearChartStateAction","omit","getDataAttribute","element","defaultValue","hasAttribute","getAttribute","test","getAttributesStatic","node","mapObjIndexed","attribute","getDataAttributeBoolean","host","httpMethod","chartLibrary","chartDefaults","library","width","legend","legendPosition","unitsCommon","unitsDesired","aggrMethod","labels","postGroupBy","postAggregationMethod","dimensionsAggrMethod","aggrGroups","selectedChart","filteredRows","groupBy","nodeIDs","commonColors","decimalDigits","dimensions","selectedDimensions","forceTimeWindow","appendOptions","gtime","method","overrideOptions","pixelsPerPoint","points","hideResizeHandler","detectResize","dygraphType","dygraphValueRange","dygraphTheme","dygraphSmooth","dygraphColors","dygraphRightGap","dygraphShowRangeSelector","dygraphShowRoller","dygraphTitle","dygraphTitleHeight","dygraphLegend","dygraphLabelsDiv","dygraphLabelsSeparateLine","dygraphIncludeZero","dygraphShowZeroValues","dygraphShowLabelsOnHighLight","dygraphHideOverlayOnMouseOut","dygraphXRangePad","dygraphYRangePad","dygraphYLabelWidth","dygraphStrokeWidth","dygraphStrokePattern","dygraphDrawPoints","dygraphDrawGapEdgePoints","dygraphConnectSeparatedPoints","dygraphPointSize","dygraphStepPlot","dygraphStrokeBorderColor","dygraphStrokeBorderWidth","dygraphFillGraph","dygraphFillAlpha","dygraphStackedGraph","dygraphStackedGraphNanFill","dygraphAxisLabelFontSize","dygraphAxisLineColor","dygraphAxisLineWidth","dygraphDrawGrid","dygraphGridLinePattern","dygraphGridLineWidth","dygraphGridLineColor","dygraphMaxNumberWidth","dygraphSigFigs","dygraphDigitsAfterDecimal","dygraphHighlighCircleSize","dygraphHighlightSeriesOpts","dygraphHighlightSeriesBackgroundAlpha","dygraphXPixelsPerLabel","dygraphXAxisLabelWidth","dygraphDrawXAxis","dygraphYPixelsPerLabel","dygraphYAxisLabelWidth","dygraphDrawYAxis","dygraphDrawAxis","easyPieChartMinValue","easyPieChartMaxValue","easyPieChartBarColor","easyPieChartTrackColor","easyPieChartScaleColor","easyPieChartScaleLength","easyPieChartLineCap","easyPieChartLineWidth","easyPieChartTrackWidth","easyPieChartSize","easyPieChartRotate","easyPieChartAnimate","easyPieChartEasing","gaugeMinValue","gaugeMaxValue","gaugePointerColor","gaugeStrokeColor","gaugeStartColor","gaugeStopColor","gaugeGenerateGradient","sparklineType","sparklineLineColor","sparklineFillColor","sparklineChartRangeMin","sparklineChartRangeMax","sparklineComposite","sparklineEnableTagOptions","sparklineTagOptionPrefix","sparklineTagValuesAttribute","sparklineDisableHiddenCheck","sparklineDefaultPixelsPerValue","sparklineSpotColor","sparklineMinSpotColor","sparklineMaxSpotColor","sparklineSpotRadius","sparklineValueSpots","sparklineHighlightSpotColor","sparklineHighlightLineColor","sparklineLineWidth","sparklineNormalRangeMin","sparklineNormalRangeMax","sparklineDrawNormalOnTop","sparklineXvalues","sparklineChartRangeClip","sparklineChartRangeMinX","sparklineChartRangeMaxX","sparklineDisableInteraction","sparklineDisableTooltips","sparklineOnHover","sparklineDisableHighlight","sparklineHighlightLighten","sparklineHighlightColor","sparklineTooltipContainer","sparklineTooltipClassname","sparklineTooltipFormat","sparklineTooltipPrefix","sparklineTooltipSuffix","sparklineTooltipSkipNull","sparklineTooltipValueLookups","sparklineTooltipFormatFieldlist","sparklineTooltipFormatFieldlistKey","sparklineNumberFormatter","sparklineNumberDigitGroupSep","sparklineNumberDecimalMark","sparklineNumberDigitGroupCount","sparklineAnimatedZooms","d3pieTitle","d3pieSubtitle","d3pieFooter","d3pieTitleColor","d3pieTitleFontsize","d3pieTitleFontweight","d3pieTitleFont","d3PieSubtitleColor","d3PieSubtitleFontsize","d3PieSubtitleFontweight","d3PieSubtitleFont","d3PieFooterColor","d3PieFooterFontsize","d3PieFooterFontweight","d3PieFooterFont","d3PieFooterLocation","d3PiePieinnerradius","d3PiePieouterradius","d3PieSortorder","d3PieSmallsegmentgroupingEnabled","d3PieSmallsegmentgroupingValue","d3PieSmallsegmentgroupingValuetype","d3PieSmallsegmentgroupingLabel","d3PieSmallsegmentgroupingColor","d3PieLabelsOuterFormat","d3PieLabelsOuterHidewhenlessthanpercentage","d3PieLabelsOuterPiedistance","d3PieLabelsInnerFormat","d3PieLabelsInnerHidewhenlessthanpercentage","d3PieLabelsMainLabelColor","d3PieLabelsMainLabelFont","d3PieLabelsMainLabelFontsize","d3PieLabelsMainLabelFontweight","d3PieLabelsPercentageColor","d3PieLabelsPercentageFont","d3PieLabelsPercentageFontsize","d3PieLabelsPercentageFontweight","d3PieLabelsValueColor","d3PieLabelsValueFont","d3PieLabelsValueFontsize","d3PieLabelsValueFontweight","d3PieLabelsLinesEnabled","d3PieLabelsLinesStyle","d3PieLabelsLinesColor","d3PieLabelsTruncationEnabled","d3PieLabelsTruncationTruncatelength","d3PieMiscColorsSegmentstroke","d3PieMiscGradientEnabled","d3PieMiscColorsPercentage","d3PieMiscGradientColor","d3PieCssprefix","peityStrokeWidth","textOnlyDecimalPlaces","textOnlyPrefix","textOnlySuffix","getAttributes","attributesStatic","showValueOf","showValueOfAttribues","Array","from","attributes","name","startsWith","merged","isEmpty","getAttributesDynamic","defaultAttributes","initialLegendRight","localeDateString","localeTimeString","updateLocaleFunctions","newLocaleDateString","newLocaleTimeString","netdataShowAlarms","netdataRegistry","netdataServer","netdataServerStatic","netdataCheckXSS","reduxStore","escapeUserInputHTML","setOption","getFromRegistry","prop","selectRegistry","getState","verifyURL","urlOptions","hash","theme","help","mode","update_always","pan_and_zoom","server","getHashParam","highlight","highlight_after","highlight_before","nowelcome","show_alarms","family","utc","hasProperty","property","genHash","forReload","parseHash","variables","location","len","p","decodeURIComponent","booleans","numeric","parseInt","origin","pathname","welcome","Date","now","hashUpdate","history","replaceState","netdataPanAndZoomCallback","status","netdataSnapshotData","updateUtcParam","netdataHighlightCallback","round","d1","d2","getElementById","innerHTML","seconds4human","tooltip","clearHighlight","showHighlight","localStorageTested","localStorageTest","Storage","removeItem","loadLocalStorage","ret","getItem","error","saveLocalStorage","getTheme","def","setTheme","netdataTheme","updateTheme","netdataShowHelp","naturalSortChunkify","j","tz","y","n","charAt","charCodeAt","m","naturalSortCompare","a","b","aa","toLowerCase","bb","c","Number","d","saveTextToClient","filename","blob","Blob","url","URL","createObjectURL","link","createElement","setAttribute","el","appendChild","setTimeout","removeChild","revokeObjectURL","click","saveObjectToClient","stringify","netdataURL","indexOf","substring","netdataReload","reload","gotoHostedModalHandler","gotoServerValidateRemaining","gotoServerMiddleClick","gotoServerStop","gotoServerValidateUrl","guid","penalty","finalURL","registryHello","machine_guid","open","modal","gotoServerModalHandler","checked","alternateUrls","count","registrySearch","urls","switchRegistryModalHandler","notifyForSwitchRegistry","newPersonGuid","ajax","encodeURIComponent","serverDefault","async","cache","headers","xhrFields","withCredentials","done","xss","checkAlways","fail","deleteRegistryGuid","deleteRegistryUrl","deleteRegistryModalHandler","notifyForDeleteRegistry","responseEl","registryDelete","result","menus","submenu_names","version","release_channel","hosts","duration","update_every","chartsPerRow","chartsHeight","total","prioritySort","priority","sortObjectByPriority","object","idx","sorted","hasOwnProperty","push","sort","scrollToId","offset","animate","scrollTop","top","customDashboard","menu","submenu","context","netdataDashboard","sparklines_registry","os","sparkline","prefix","dimension","suffix","gaugeChart","anyAttribute","attr","menuTitle","menu_pattern","slice","menuIcon","menuInfo","menuHeight","submenuTitle","submenuInfo","submenuHeight","relative","contextInfo","contextValueRange","valueRange","contextHeight","contextDecimalDigits","enrichChartData","parts","tmp","match","endsWith","lastIndexOf","headMain","head","generateHeadCharts","hcharts","hi","hlen","renderPage","div","pcent_width","sidebar","mainhead","main","hasChartsOnBottom","chartAdditionalHeight","menuid","icon","info","shtml","mhead","sub","submenus","si","slen","submenuid","chtml","ci","clen","chartHeight","chartCommonMax","isMemoryModeDbEngine","memory_mode","dimensions_count","toLocaleString","charts_count","alarms_count","printPage","finalizePage","renderChartsAndMenu","menu_key","handleLoadJs","promise","callback","catch","alert","then","loadClipboard","clipboard","loadBootstrapTable","Promise","all","loadBootstrapSlider","slider","default","Slider","loadLzString","loadPako","pako","alarmsUpdateModal","active","footer","alarm_families","count_active","count_all","families","families_sort","charts_by_name","arr","unshift","families_sorted","fc","active_family_added","alarm_to_html","enableTooltipsAndPopovers","alarm_family_show","$accordion","target","bootstrapTable","pagination","pageSize","showPaginationSwitch","search","searchTimeOut","searchAlign","showColumns","showExport","exportDataType","exportOptions","fileName","onClickRow","row","scrollToChartAfterHidingModal","when","rowStyle","classes","showFooter","showHeader","showRefresh","showToggle","sortable","silentSort","columns","field","valign","titleTooltip","formatter","index","timestamp4human","align","switchable","visible","alarmid4human","negative_suffix","space","timestamp","toLocaleDateString","toLocaleTimeString","full","has_alarm","crit","badge_url","action_buttons","recipient","last_status_change","lookup_after","lookup_dimensions","lookup_method","lookup_before","lookup_options","alarm_lookup_explain","calc","green","red","warn_repeat_every","crit_repeat_every","delay_up_duration","delay_down_duration","delay_multiplier","delay_max_duration","exec","source","initializeDynamicDashboardWithData","timezone","clipboardCopy","text","writeText","clipboardCopyBadgeEmbed","initializeConfig","custom_info","loadDashboardInfo","memoizeWith","dataType","loadCustomDashboardInfo","always","extend","initializeChartsAndCustomInfo","chartRegistry","downloadAll","initializeDynamicDashboard","newReduxStore","netdataPrepCallback","innerText","versionLog","msg","versionsMatch","v1","v2","s1","s2","n1","n2","getGithubLatestVersion","channel","tag_name","checkForUpdateByVersion","force","sha2","showPageFooter","display","parseDom","jsonStringifyFn","jsonParseFn","str","eval","xssModalDisableXss","enabled","enabled_for_data","xssModalKeepXss","notifyForUpdate","sha1","save","printPreflight","screen","snapshotOptions","bytes_per_chart","compressionDefault","compressions","bytes_per_point_memory","bytes_per_point_disk","compress","compressed_length","btoa","deflate","to","inflate","atob","LZString","compressToUTF16","decompressFromUTF16","compressToBase64","decompressFromBase64","compressToEncodedURIComponent","decompressFromEncodedURIComponent","loadSnapshotModalLog","tmpSnapshotData","loadSnapshotPreflightFile","file","string","fr","FileReader","onload","date_after","after_ms","date_before","before_ms","charts_ok","charts_failed","compression","data_size","netdata_version","data_points","duration_ms","snapshot_version","comments","removeClass","addClass","readAsText","loadSnapshotPreflightEmpty","loadSnapshot","highlight_after_ms","highlight_before_ms","loadSnapshotDragAndDropInitialized","loadSnapshotDragAndDropSetup","originalEvent","dataTransfer","files","item","loadSnapshotPreflight","saveSnapshotStop","saveSnapshotCancel","stopSnapshotModeAction","saveSnapshotModalInitialized","saveSnapshotModalSetup","saveSnapshotModalInit","find","focus","saveSnapshotModalLog","saveSnapshotModalShowExpectedSize","saveSnapshotViewDuration","saveSnapshotSelectedSecondsPerPoint","sizemb","saveSnapshotCompression","memmb","saveSnapshotSetCompression","saveSnapshotSlider","browser_timezone","reduxState","selectDefaultAfter","start_ms","selectGlobalPanAndZoom","Boolean","start_date","yyyymmddhhssmm","getFullYear","getMonth","getDate","getHours","getMinutes","getSeconds","view","destroy","ticks","step","scale","saveSnapshot","eltxt","saveData","update_every_ms","pack_api1_v1_chart_data","chartDataUniqueID","cstr","clearPanAndZoom","startSnapshotModeAction","dataPoints","saveSnapshotRestore","css","size","chartUpdated","chartsCount","selectAmountOfCharts","chartsOk","selectAmountOfSnapshotsFetched","chartsFailed","selectAmountOfSnapshotsFailed","pcent","Intl","DateTimeFormat","resolvedOptions","timeZone","getOption","option","createSelectOption","dashboardSettingsSetup","update_options_modal","sync_option","self","bootstrapToggle","units_sync_option","temp_sync_option","change","CHART_DIV_ID_PREFIX","CHART_DIV_OFFSET","scrollDashboardTo","chartElement","offsetTop","querySelector","modalHiddenCallback","animated","alarmDate","alarmStatus","includes","runOnceOnDashboardLastRun","runOnceOnDashboardWithjQuery","body","scrollspy","scrollPos","modal_depth","modal_shown","netdata_paused_on_modal","scrollspyOffset","scrollY","overflow","position","pauseCallback","pause","unpause","affix","bottom","removeAttr","Ps","initialize","wheelSpeed","wheelPropagation","swipePropagation","minScrollbarLength","maxScrollbarLength","useBothWheelAxes","suppressScrollX","suppressScrollY","scrollXMarginOffset","scrollYMarginOffset","fn","shorten","settings","config","showChars","minHideChars","ellipsesText","moreText","lessText","onLess","onscroll","onMore","errMsg","off","$this","hasClass","parent","prev","each","substr","inTag","bag","countChars","openTags","tagName","r","shift","isDemo","onresizeCallback","update","resetDashboardOptions","selected_server_timezone","isProperTimezone","userSetServerTimezone","timezoneOption","netdataCallback","showSignInModal","explicitlySignIn","explicitlySignInAction","MASKED_DATA","INFO_POLLING_FREQUENCY","alarmStatuses","useCloudConnectionStatus","userStatus","nodeStatus","date","useMemo","header","strong","bullets","rel","href","CTA1","makeCloudConnectionStatusInfo","CloudConnectionStatusModal","closeModal","onRefresh","isCTA1Disabled","handleClickedCTA1","background","margin","onClose","padding","column","gap","as","bullet","data-testid","utmParameters","campaign","data-ga","disabled","textTransform","label","Text","sx","fontWeight","lineHeight","CloudConnectionStatus","selectUserNodeAccess","cloudEnabled","selectIsCloudEnabled","isModalOpen","setModalOpen","cloudConnectionStatusInfo","nodeLiveness","documentElement","openModal","flavour","isTimestamp","NETDATA_REGISTRY_SERVER","MS_IN_SECOND","getIframeSrc","path","utmUrlSuffix","getBoundingClientRect","obj1","obj2","equals","callAll","fns","arg","define","_uniqueIDCounter","defaultSettings","color","fontSize","font","subtitle","titleSubtitlePadding","canvasHeight","canvasWidth","pieInnerRadius","pieOuterRadius","sortOrder","ignoreSmallSegments","valueType","smallSegmentGrouping","outer","format","hideWhenLessThanPercentage","pieDistance","inner","mainLabel","percentage","decimalPlaces","lines","truncation","truncateLength","effects","load","effect","speed","pullOutSegmentOnClick","highlightSegmentOnMouseover","highlightLuminosity","tooltips","placeholderParser","styles","fadeInSpeed","backgroundColor","backgroundOpacity","borderRadius","misc","segments","segmentStroke","gradient","canvasPadding","pieCenterOffset","cssPrefix","callbacks","onMouseoverSegment","onMouseoutSegment","onClickSegment","validate","pie","d3","HTMLElement","SVGElement","helpers","isArray","isNaN","addSVGSpace","svg","select","append","shuffleArray","tmpVal","randomIndex","currentIndex","random","processObj","is","getDimensions","w","h","getBBox","rectIntersect","r1","r2","getColorShade","hex","lum","String","newHex","initSegmentColors","finalColors","applySmallSegmentGrouping","totalSize","math","getTotalPieSize","newData","groupedData","totalGroupedData","isGrouped","showPoint","isFunction","functionToCheck","call","o","prototype","src","copy","copyIsArray","clone","arguments","deep","hasOwn","class2type","jQuery","isWindow","isNumeric","parseFloat","isFinite","isPlainObject","nodeType","constructor","toRadians","degrees","PI","toDegrees","radians","computePieRadius","textComponents","headerHeight","exists","innerRadius","percent","outerRadius","smallestDimension","pieDistanceSpace","sortPieData","getPieTranslateCenter","pieCenter","calculatePieCenter","hasTopTitle","hasTopSubtitle","headerOffset","footerOffset","rotate","xm","ym","cos","sin","translate","rads","pointIsInArc","pt","ptData","d3Arc","theta1","startAngle","theta2","endAngle","dist","angle","atan2","add","section","sectionDisplayType","include","getIncludes","outerLabel","insert","labelGroup","__labels","selectAll","enter","formatterContext","part","positionLabelElements","dims","computeLabelLinePositions","lineCoordGroups","computeLinePosition","x2","y2","x3","y3","getSegmentAngle","midpoint","originCoords","heightOffset","outerLabelGroupData","quarter","startOfLabelX","startOfLabel","addLabelLines","lineGroup","lineFunction","line","curve","curveBasis","positionLabelGroups","pieCenterCopy","newCoords","xOffset","yOffset","val","addMainLabel","addValue","addPercentage","computeOuterLabelCoords","getIdealOuterLabelPositions","resolveOuterLabelCollisions","checkConflict","currIndex","direction","curr","currIndexHemisphere","hs","nextIndex","currLabelGroup","examinedLabelGroup","labelHeights","center","lineLength","heightChange","isLabelHidden","adjustLabelPos","lastCorrectlyPositionedLabel","xDiff","yDiff","newXPos","newYPos","sqrt","labelGroupNode","labelGroupDims","originalX","originalY","hemisphere","effectMap","easeLinear","easeBounce","easeSin","easeElastic","easeBack","easeQuad","easeCircle","easeExp","create","pieChartElement","arc","addGradients","grads","addSegmentEventHandlers","merge","segment","currentEl","isExpanded","onSegmentEvent","closeSegment","openSegment","segColor","tt","showTooltip","moveTooltip","hideTooltip","func","expanded","isOpeningSegment","maybeCloseOpenSegment","transition","ease","centroid","pullOutSize","currentlyOpenSegment","getCentroid","bbox","opts","fullValue","compounded","currValue","offscreenCoord","addTitle","__title","positionTitle","headerLocation","addSubtitle","__subtitle","positionSubtitle","getHeaderHeight","addFooter","__footer","positionFooter","footerLocation","footerWidth","totalTitleHeight","footerPlusPadding","addTooltips","caption","replacePlaceholders","currentTooltip","mouseCoords","mouse","parentNode","replacements","placeholder","d3pie","_setupData","_init","recreate","redraw","getOpenSegment","updateProp","propKey","oldVal","oldValue","dp","_getPercentage","totalPercentage","relativeAmount","currentScript","isTestingEnv","getDefaultServer","isDevelopmentEnv","RegExp","getPathFromScriptSource","cond","T","getElementsByTagName","alwaysEndWithSlash","pipe","concat","serverStatic","selectChartsState","selectSingleChartState","createSelector","_","chartsState","selectChartData","chartState","selectChartMetadataFromExplicitCall","selectChartMetadata","selectChartMetadataFromChartsCall","metadataFromAll","metadataFromSingleCall","selectIsFetchingDetails","makeSelectChartMetadataRequest","selectChartViewRange","selectChartIsFetchingData","selectChartFetchDataParams","selectResizeHeight","selectChartPanAndZoom","selectAmountOfFetchedCharts","some","hasCompletedFetching","selectNameOfAnyFetchingChart","isSupportingDateTimeFormat","navigator","language","narrowToDate","localeDateStringNative","localeTimeStringNative","xAxisTimeStringNative","localeMatcher","formatMatcher","weekday","year","month","day","dateFormat","locale","long","isTime","secs","hourCycle","dateStyle","timeStyle","getOptions","getTimezone","useDateTime","selectTimezoneSetting","selectUTCOffsetSetting","xAxisTimeString","xAxisDateString","showSignInModalAction","isSignedInAction","setOfflineAction","fallbackUpdateTimeInterval","panAndZoomDelay","totalSeconds","defaultOptions","hour","minute","second","and","strings","pop","join","module","exports","require","l","Error","code","u","1","v","g","f","run","clearTimeout","fun","nextTick","apply","browser","env","argv","versions","addListener","once","removeListener","removeAllListeners","emit","prependListener","prependOnceListener","listeners","binding","cwd","chdir","umask","2","defineProperty","__esModule","extractSeries","rollingAverage","3","pow","4","5","onPointsCreated_","y_top","NaN","y_bottom","yval_minus","yval_plus","getExtremeYValues","onLineEvaluated","calcYNormal_","6","X","Y","EXTRAS","seriesToPoints","xval","yval","canvasx","canvasy","7","8","9","dygraph_","layout","elementContext","height_","width_","isCanvasSupported","area","getPlotArea","canvas_ctx_","beginPath","rect","clip","hidden_ctx_","clear","clearRect","render","_updatePoints","_renderLineChart","_getIteratorPredicate","_predicateThatSkipsEmptyPoints","_drawStyledLine","dygraph","getBooleanOption","setName","isArrayLike","createIterator","drawingContext","setLineDash","_drawSeries","_drawPointsOnLine","restore","strokeStyle","lineWidth","array_","end_","predicate_","start_","moveTo","lineTo","nextIdx_","next","hasNext","peek","stroke","setNames","colorsMap_","strokeWidth","axis","axisPropertiesForSeries","plotArea","seriesIndex","seriesCount","singleSeriesName","allSeriesPoints","_Plotters","linePlotter","_linePlotter","fillPlotter","_fillPlotter","errorPlotter","_errorPlotter","getNumericOption","Circles","DEFAULT","toRGB_","fillStyle","closePath","fill","_fastCanvasProxy","splice","_count","getLabels","visibility","getColors","A","minyval","yscale","O","D","E","L","S","P","C","M","FORCE_FAST_PROXY","N","F","isOK","k","y_stacked","10","highlightCircleSize","highlightSeriesOpts","highlightSeriesBackgroundAlpha","highlightSeriesBackgroundColor","labelsSeparateLines","labelsShowZeroValues","labelsKMB","labelsKMG2","showLabelsOnHighlight","digitsAfterDecimal","maxNumberWidth","sigFigs","strokeBorderWidth","strokeBorderColor","axisTickSize","axisLabelFontSize","rightGap","showRoller","xValueParser","delimiter","sigma","errorBars","fractions","wilsonInterval","customBars","fillGraph","fillAlpha","connectSeparatedPoints","stackedGraph","stackedGraphNaNFill","hideOverlayOnMouseOut","stepPlot","xRangePad","yRangePad","drawAxesAtZero","titleHeight","xLabelHeight","yLabelWidth","axisLineColor","axisLineWidth","gridLineWidth","axisLabelWidth","gridLineColor","interactionModel","defaultModel","animatedZooms","showRangeSelector","rangeSelectorHeight","rangeSelectorPlotStrokeColor","rangeSelectorPlotFillGradientColor","rangeSelectorPlotFillColor","rangeSelectorBackgroundStrokeColor","rangeSelectorBackgroundLineWidth","rangeSelectorPlotLineWidth","rangeSelectorForegroundStrokeColor","rangeSelectorForegroundLineWidth","rangeSelectorAlpha","showInRangeSelector","plotter","plugins","axes","pixelsPerLabel","axisLabelFormatter","dateAxisLabelFormatter","valueFormatter","dateValueFormatter","drawGrid","drawAxis","independentTicks","ticker","dateTicker","numberValueFormatter","numberAxisLabelFormatter","numericTicks","11","draw","date_graph","setSelection","getSelection","layout_","12","dragEndX","dragGetX_","dragEndY","dragGetY_","dragStartX","dragStartY","lastx_","treatMouseOpAsClick","regionWidth","regionHeight","isPanning","xAxisRange","getOptionForAxis","initialLeftmostDate","log10","dateRange","xUnitsPerPixel","plotter_","xAxisExtremes","toDomXCoord","toDataXCoord","boundedDates","axes_","extremeRange","toDomYCoord","toDataYCoord","boundedValues","is2DPan","yAxisRange","attributes_","getForAxis","initialTopValue","dragValueRange","unitsPerPixel","dateWindow_","LOG_SCALE","drawGraph_","endPan","maybeTreatMouseOpAsClick","startZoom","isZooming","zoomMoved","moveZoom","dragDirection","VERTICAL","HORIZONTAL","drawZoomRect_","prevDragDirection","prevEndX","prevEndY","getFunctionOption","MAX_VALUE","selPoints_","cancelable","point","cascadeEvents_","pts","endZoom","clearZoomRect_","getArea","doZoomX_","cancelNextDblclick","doZoomY_","startTouch","startTimeForDoubleTapMs","pageX","pageY","dataX","dataY","initialTouches","initialPinchCenter","touchDirections","initialRange","moveTouch","yAxisRanges","endTouch","changedTouches","getTime","doubleTapX","screenX","doubleTapY","screenY","resetZoom","findPos","canvas_","offsetWidth","offsetHeight","mousedown","button","initializeMouseDown","altKey","shiftKey","startPan","movePan","addAndTrackEvent","removeEvent","willDestroyContextMyself","touchstart","touchmove","touchend","dblclick","nonInteractiveModel_","mouseup","dragIsPanInteractionModel","mousemove","13","annotations","yAxes_","xTicks_","yTicks_","addDataset","area_","computePlotArea","chart_div","graphDiv","reserveSpaceLeft","reserveSpaceRight","reserveSpaceTop","reserveSpaceBottom","chartRect","setAnnotations","setXTicks","setYAxes","evaluate","_xAxis","_evaluateLimits","_evaluateLineCharts","_evaluateLineTicks","_evaluateAnnotations","minval","maxval","xlogrange","xlogscale","computedValueRange","maxyval","yrange","ylogrange","ylogscale","calcXNormal_","getForSeries","yval_stacked","dataHandler_","xticks","label_v","toPercentXCoord","pos","has_tick","yticks","toPercentYCoord","series","annotated_points","annotation","removeAllDatasets","setPointsLengths","setPointsOffsets","14","_process","15","xAxis_","series_","global_","attrs_","user_","user_attrs_","labels_","highlightSeries_","reparseSeries","AXIS_STRING_MAPPINGS_","y1","Y1","Y2","axisToIndex_","yAxis","getGlobalUser_","getGlobalDefault_","getHighlightSeries","numAxes","axisForSeries","axisOptions","seriesForAxis","seriesNames","16","numericLinearTicks","binarySearch","tickValue","pixel_coord","reverse","ceil","MILLISECONDLY","TWO_MILLISECONDLY","FIVE_MILLISECONDLY","TEN_MILLISECONDLY","FIFTY_MILLISECONDLY","HUNDRED_MILLISECONDLY","FIVE_HUNDRED_MILLISECONDLY","SECONDLY","TWO_SECONDLY","FIVE_SECONDLY","TEN_SECONDLY","THIRTY_SECONDLY","MINUTELY","TWO_MINUTELY","FIVE_MINUTELY","TEN_MINUTELY","THIRTY_MINUTELY","HOURLY","TWO_HOURLY","SIX_HOURLY","DAILY","TWO_DAILY","WEEKLY","MONTHLY","QUARTERLY","BIANNUAL","ANNUAL","DECADAL","CENTENNIAL","NUM_GRANULARITIES","Granularity","datefield","spacing","DateAccessorsUTC","DateAccessorsLocal","getMilliseconds","getDay","makeDate","getDateAxis","17","toExponential","toPrecision","nt","rt","ot","st","cancelEvent","cancelBubble","cancel","returnValue","hsvToRGB","pageXOffset","scrollLeft","pageYOffset","px","py","isValidPoint","floatFormat","hmsString_","dateString_","round_","dateParser","dateStrToMillis","updateDeep","Node","nodeName","isDateLike","createCanvas","getContextPixelRatio","devicePixelRatio","webkitBackingStorePixelRatio","mozBackingStorePixelRatio","msBackingStorePixelRatio","oBackingStorePixelRatio","backingStorePixelRatio","Iterator","repeatAndCleanup","et","isPixelChangingOptionList","at","detectLineDelimiter","isNodeContainedBy","getComputedStyle","getContext","parseFloat_","G","lt","W","LN_TEN","U","logRangeFraction","DOTTED_LINE","DASHED_LINE","DOT_DASH_LINE","addEvent","getUTCFullYear","getUTCMonth","getUTCDate","getUTCHours","getUTCMinutes","getUTCSeconds","getUTCMilliseconds","getUTCDay","UTC","requestAnimationFrame","webkitRequestAnimationFrame","mozRequestAnimationFrame","oRequestAnimationFrame","msRequestAnimationFrame","requestAnimFrame","annotationClickHandler","annotationDblClickHandler","annotationMouseOutHandler","annotationMouseOverHandler","clickCallback","drawCallback","drawHighlightPointCallback","drawPoints","drawPointCallback","highlightCallback","labelsDiv","panEdgeFraction","pixelsPerYLabel","pointClickCallback","pointSize","underlayCallback","unhighlightCallback","zoomCallback","it","18","Symbol","iterator","return","TypeError","R","H","Z","q","Q","__init__","NAME","VERSION","DEFAULT_ROLL_PERIOD","DEFAULT_WIDTH","DEFAULT_HEIGHT","ANIMATION_STEPS","ANIMATION_DURATION","Plotters","addedAnnotationCSS","is_initial_draw_","readyFns_","copyUserAttrs_","maindiv_","file_","rollPeriod_","rollPeriod","previousVerticalX_","fractions_","dateWindow","annotations_","clientWidth","boundaryIds_","setIndexByName_","datasetIndex_","registeredEvents_","eventListeners_","createInterface_","plugins_","PLUGINS","plugin","activate","events","pluginOptions","createDragInterface_","defaultPrevented","propagationStopped","getPluginInstance_","isZoomed","attr_","getStringOption","optionsViewForAxis_","numRows","rawData_","yAxisExtremes","gatherDatasets_","rolledSeries_","extremes","computeYAxisRanges_","toDomCoords","toDataCoords","numColumns","getValue","textAlign","hidden_","createPlotKitCanvas_","resizeElements_","mouseEventElement_","createMouseEventElement_","mouseMoveHandler_","mouseMove_","mouseOutHandler_","fromElement","relatedTarget","toElement","mouseOut_","resizeHandler_","removeTrackedEvents_","hasChildNodes","firstChild","setColors_","colors_","getPropertiesForSeries","createRollInterface_","roller_","onchange","adjustRoll","tarp","cover","draggingDate","draggingValue","uncover","fillRect","currentZoomRectArgs_","doZoomXDates_","doAnimatedZoom","zoomAnimationFunction","clearSelection","eventToDomCoords","offsetX","offsetY","findClosestRow","findClosestPoint","seriesName","findStackedPoint","getLeftBoundary_","isSeriesLocked","lastRow_","highlightSet_","animateSelection_","fadeLevel","animateId","updateSelection_","selectedRow","selectedX","selectedPoints","lockedSet_","loadedEvent_","parseCSV_","cascadeDataDidUpdateEvent_","predraw_","addXTicks_","getHandlerClass_","computeYAxes_","drawingTimeMs_","PointType","stackPoints_","boundaryIds","renderGraph_","canvas","readyFired_","detectTypeFromString_","setXAxisOptions_","parseArray_","parseDataTable_","getNumberOfColumns","getNumberOfRows","getColumnType","getColumnLabel","shortText","fromCharCode","getColumnRange","XMLHttpRequest","ActiveXObject","onreadystatechange","readyState","responseText","send","updateOptions","resize_lock","setVisibility","indexFromSetName","getRowForX","ready","elem","GVizChart","defaultInteractionModel","nonInteractiveModel","Plugins","Legend","Axes","Annotations","ChartLabels","Grid","RangeSelector","DataHandlers","DefaultHandler","BarsHandler","CustomBarsHandler","DefaultFractionHandler","ErrorBarsHandler","FractionsBarsHandler","19","tarps","zIndex","20","clearChart","didDrawChart","detachLabels","tickHeight","cssClass","createTextNode","attachAtBottom","borderColor","tickColor","tickWidth","21","xlabels_","ylabels_","willDrawChart","22","title_div_","xlabel_div_","ylabel_div_","y2label_div_","detachLabels_","23","24","legend_div_","is_generated_div_","one_em_width_","deselect","predraw","generateLegendHTML","dashHTML","labelHTML","isVisible","xHTML","yHTML","isHighlighted","defaultFormatter","25","hasTouchInterface_","TouchEvent","isMobileDevice_","appVersion","interfaceCreated_","getOption_","reserveSpace_","renderStaticLayer_","renderInteractiveLayer_","bgcanvas_","fgcanvas_","leftZoomHandle_","rightZoomHandle_","setDefaultOption_","createCanvases_","createZoomHandles_","initInteraction_","addToGraph_","graphDiv_","removeFromGraph_","updateVisibility_","resize_","drawStaticLayer_","isChangingRange_","placeZoomHandles_","drawInteractiveLayer_","canvasRect_","bgcanvas_ctx_","fgcanvas_ctx_","cursor","Image","cloneNode","leftHandlePos","rightHandlePos","clientX","srcElement","getZoomHandleStatus_","targetTouches","opera","drawMiniPlot_","computeCombinedSeriesAndLimits_","yMax","yMin","createLinearGradient","addColorStop","MIN_VALUE","legendPluginModuleString","withContext","legendResolutionTooltip","collected","viewed","view_update_every","getNewSelectedDimensions","newSelectedDimensions","allDimensions","clickedDimensionName","isModifierKeyPressed","enabledDimensions","isCurrentlySelected","isSnapshotMode","snapshotCharts","snapshotDataPoints","isSignedIn","offline","dashboardReducer","combineReducers","globalKey","chartKey","dashboardKey","axiosInstance","axios","Pragma","getFetchStream","concurrentCallsLimit","fetch$","Subject","resetFetch$","handler","mergeMap","params","onErrorCallback","onSuccessCallback","cancelTokenSource","timeout","cancelToken","token","tap","responseData","catchError","empty","startWith","switchMap","subscribe","fillMissingData","nrOfPointsToFill","viewUpdateEvery","firstAddedTimestamp","emptyPoint","tail","nulls","addPointsDygraph","emptyArray","transformResults","shouldRevertFlip","dataResult","pointData","sum","Container","styled","getSizeBy","SideContent","ContentContainer","HeaderText","getColor","ContentText","TextSmall","UINotification","props","leftContent","rightContent","renderContent","createUINotification","NodeIconContainer","NotificationLink","toastOptions","toast","POSITION","BOTTOM_RIGHT","autoClose","pauseOnFocusLoss","showCloudInstallationProblemNotification","notificationComponent","showCloudConnectionProblemNotification","uiNotification","defaultUrlOptions","alarm_unique_id","alarm_id","alarm_event_id","alarm_when","parseQueryPair","isInvalidPair","isPrintMode","mapIndexed","mergeRight","parseUrl","watchFetchDataResponseChannel","fetchDataSaga","fetchChartSaga","fetchInfoSaga","chartSagas","CONCURRENT_CALLS_LIMIT_METRICS","fetchDataResponseChannel","take","action","start","end","put","constructCompatibleKey","IS_FLIP_RESPECTED_IN_COMPOSITE_CHARTS","getGroupByValues","fetchMetrics$","group","selectSnapshot","dimensionsWithUrlOptions","matchingKey","snapshotKey","agentOptionsOriginal","hasFlip","agentOptions","shouldAddFakeFlip","groupValues","axiosOptions","agent_options","post_aggregation_methods","aggregations","valueOf","fillMissingPoints","transformedResults","postAggregated","post_aggregated_data","fetchForSnapshot$","resetFetchForSnapshot$","fetchDataForSnapshotSaga","aggr_method","node_ids","stopSnapshotModeSaga","response","poll","wasCloudAvailable","wasACLKAvailable","takeEvery","spawn","sidePanelTransitionTimeInSeconds","getLog","notifyAll","alarmsLoop","startAlarms","fetchAllAlarmsSaga","alarmsSagas","ALARMS_INITIALIZATION_DELAY","ALARMS_UPDATE_EVERY","ALARMS_MS_BETWEEN_NOTIFICATIONS","areNotificationsAvailable","notificationCallback","netdataAlarmsNotifCallback","firstNotificationId","lastNotificationId","scrollToAlarm","chartID","scrollToChart","requestPermissions","Notification","permission","requestPermission","hasGivenNotificationPermissions","lastNotificationIdArg","getNotification","entry","firstNotificationIdArg","updated","valueString","value_string","tag","interaction","unique_id","old_status","no_clear_notification","notificationTitle","notificationOptions","requireInteraction","notificationHandler","alarmLogs","logsSorted","sortBy","newLogs","notifications","onclick","netdataAlarmsRemember","what","latest_alarm_log_unique_id","allAlarms","allowedReferrerDomains","isAllowedReferrer","referrer","watchWindowFocusChannel","waitForFullInfoPayload","injectPosthog","fetchHelloSaga","spacePanelSaga","globalSagas","windowFocusChannel","listenToWindowFocus","posthog","selectFullInfoPayload","__SV","_i","api_host","insertBefore","people","loaded","identify","shouldMaskReferrer","register","$ip","$current_url","$pathname","$host","$initial_referring_domain","$initial_referrer","$referring_domain","$referrer","event_source","netdata_machine_guid","netdata_person_id","netdata_buildinfo","netdata_release_channel","mirrored_host_count","mirrored_hosts","alarms_normal","normal","alarms_warning","warning","alarms_critical","critical","host_os_name","os_name","host_os_id","os_id","host_os_id_like","os_id_like","host_os_version","os_version","host_os_version_id","os_version_id","host_os_detection","os_detection","system_cores_total","cores_total","system_total_disk_space","total_disk_space","system_cpu_freq","cpu_freq","system_ram_total","ram_total","system_kernel_name","kernel_name","system_kernel_version","kernel_version","system_architecture","architecture","system_virtualization","virtualization","system_virt_detection","virt_detection","system_container","system_container_detection","container_detection","container_os_name","container_os_id","container_os_id_like","container_os_version","container_os_version_id","host_collectors_count","collectors","host_cloud_enabled","host_cloud_available","host_agent_claimed","host_aclk_available","host_aclk_implementation","host_allmetrics_json_used","host_allmetrics_prometheus_used","host_allmetrics_shell_used","host_charts_count","host_dashboard_used","host_metrics_count","host_notification_methods","config_memory_mode","config_exporting_enabled","config_exporting_connectors","config_hosts_available","config_https_enabled","config_multidb_disk_quota","config_page_cache_size","config_stream_enabled","config_web_enabled","host_is_parent","host_labels","_is_parent","mirrored_hosts_reachable","mirrored_hosts_status","reachable","mirrored_hosts_unreachable","host_collectors","host_is_k8s_node","is_k8s_node","accessRegistry","maxRedirects","machine","isRedirect","returnData","person_guid","parsePersonUrls","personUrls","lastTimestamp","accesses","existingObj","isNewer","extended","uniq","helloCallUrl","cloud_base_url","isUsingGlobalRegistry","accessRegistryResponse","anonymous_statistics","constructOptionStorageKey","setOptionSaga","showSignInSaga","mainJsSagas","LOCAL_STORAGE_NEEDS_SYNC","setGlobalChartUnderlaySaga","hashParams","getHashParams","setHashParams","clearHighlightSaga","removeHashParams","signInLinkHref","rootSaga","sagaMiddleware","createSagaMiddleware","store","createStore","rootReducer","compose","applyMiddleware","configureStore","loadCss","resolve","reject","fileRef","onerror","isDygraphSparkline","chartLibrariesSettings","hasToolboxPanAndZoom","xssRegexIgnore","isLogScale","hasLegend","trackColors","containerClass","peity","google","easypiechart","aspectRatio","gauge","textonly","groupbox","getChartURLOptions","shouldEliminateZeroDimensions","BIGGEST_INTERVAL_NUMBER","Loader","containerNode","hasEmptyData","screenHeight","lost","paddingTop","getPanAndZoomStep","ctrlKey","pan_and_zoom_factor","pan_and_zoom_factor_multiplier_control","pan_and_zoom_factor_multiplier_shift","pan_and_zoom_factor_multiplier_alt","safeEqualCheck","formattersFixed","formattersZeroBased","fastNumberFormat","NumberFormat","useGrouping","minimumFractionDigits","maximumFractionDigits","getLegendFormatValue","convertUnits","intlNumberFormat","valueDecimalDetail","dmin","dmax","convertedValue","useFormatters","selectTemperatureSetting","selectSecondsAsTimeSetting","setConvertUnits","setMin","setMax","unitsCurrent","setUnitsCurrent","decimals","setDecimals","setIntlNumberFormat","legendFormatValue","legendFormatValueRef","updateLegendFormatValueRef","newConvertUnits","newIntlNumberFormat","newDecimalDigits","legendFormatValueDecimalsFromMinMax","newMin","newMax","switchedUnits","newDecimals","convertedMin","convertedMax","delta","defaultColor","LegendContainer","LegendFirstRow","LegendSecondRow","LegendUnit","DateTimeSeparator","span","LegendItems","DimensionItem","isDisabled","DimensionItemToolboxPlaceholder","DimensionIcon","DimensionLabel","DimensionValue","ToolboxContainer","emptyObject","selector","dimension_names","LegendText","ChartLegendRight","hoveredRow","onDimensionClick","showLatestOnBlur","viewBefore","dimensionIds","dimension_ids","showUndefined","legendDate","colorFillOpacity","chart_type","scrollbarRef","dimensionId","rgb","hexFull","colorHex2Rgb","isSelected","hoveredValueArray","view_latest_values","role","tabIndex","ChartTimeframe","ChartLegendBottom","legendToolbox","resizeHandler","ChartLegend","setSelectedDimensions","LegendToolbox","onToolboxLeftClick","onToolboxRightClick","onToolboxZoomInClick","onToolboxZoomOutClick","getDataForFakeStacked","dimensionsVisibility","currentMin","currentMax","getDygraphChartType","chartSettings","dygraphRequestedType","dygraphChartType","getBackgroundColor","Color","transformColors","mix","getDygraphFillAlpha","isFakeStacked","color_fill_opacity_fake_stacked","color_fill_opacity_stacked","color_fill_opacity_area","useProceededChart","chartRef","propsRef","useToggle","proceeded","toggleProceeded","updatePosition","distance","first_entry","hasProceeded","defaultPositionTo","topMargin","ProceededChartDisclaimer","forwardRef","backgroundColorMap","WARNING","CRITICAL","CLEAR","borderColorMap","getBorderColor","textColorMap","Badge","border","getInitialDygraphOptions","hiddenLabelsElementId","orderedColors","setMinMax","shouldSmoothPlot","isSparkline","grid","includeZero","ylabel","smoothPlotter","strokePattern","drawGapEdgePoints","gridLinePattern","logscale","Dygraph","toTimeString","DygraphChart","chartElementClassName","chartElementId","onUpdateChartPanAndZoom","immediatelyDispatchPanAndZoom","setGlobalChartUnderlay","setHoveredX","viewAfter","selectGlobalChartUnderlay","selectedAlarm","selectAlarm","updateChartPanOrZoom","shouldNotExceedAvailableRange","dygraphInstance","latestIsUserAction","isMouseDown","dygraphHighlightAfter","dygraphLastTouchMove","dygraphLastTouchPageX","dygraphLastTouchEnd","isSyncPanAndZoom","selectSyncPanAndZoom","resetGlobalPanAndZoom","isRendered","toggleIsRendered","positionTo","useDygraphBadge","isAlarmBadgeVisible","alarmBadgeRef","updateAlarmBadge","isProceeded","precededChartRef","updatePrecededPosition","useLayoutEffect","selectSmoothPlot","dygraphOptionsStatic","dygraphOptions","newHoveredX","xRange","last_entry","isInRangeOfAvailableData","minDate","maxDate","currentAlarm","alarmPosition","fillColor","globalAlphaCache","globalAlpha","bottomLeft","topRight","isOutsideReasonableViewport","metaKey","sortedRange","wheel","normalDef","wheelDelta","deltaY","detail","layerX","offsetLeft","xPct","axisAfterOffset","offsetToPercentage","zoomInPercentage","bias","afterAxis","beforeAxis","increment","updatedAfter","updatedBefore","zoom","dygraphPlotter","pct","double_click_speed","instance","useUpdateEffect","selectSpacePanelTransitionEndIsActive","forceDateWindow","hasChangedDuration","hasScrolledToTheFutureDuringPlayMode","optionsDateWindow","selectGlobalSelectionMaster","commonMinState","selectCommonMin","commonMaxState","selectCommonMax","shouldUseCommonMin","shouldUseCommonMax","shouldUpdate","newExtremes","useUnmount","resizeObserver","useMount","hasOmitedFirstCallback","callbackDebounced","waitFor","args","debounce","ResizeObserver","observe","disconnect","EasyPieChart","chartWidth","chartInstance","setChartInstance","valueIndex","safeMinMax","_min","_max","isMinOverride","isMaxOverride","getPercentFromValueMinMax","easypiechart_track","easypiechart_scale","newChartInstance","EasyPie","barColor","trackColor","scaleColor","scaleLength","lineCap","trackWidth","easing","shouldUseAnimation","enableAnimation","disableAnimation","valueFontSize","valuetop","titleFontSize","titletop","unitFontSize","unitTop","isSetByUser","GaugeChart","chartCanvasElement","minAttribute","maxAttribute","safeMin","safeMax","_pcent","gauge_pointer","gauge_stroke","radiusScale","pointer","limitMax","limitMin","colorStart","colorStop","strokeColor","generateGradient","gradientType","highDpiSupport","Gauge","setOptions","minValue","maxValue","animationSpeed","set","valueTop","minMaxFontSize","normalizeHex","colorLuminance","hexNormalized","splitEvery","nr","convertToTimestamp","number","SparklineChart","viewAfterForCurrentData","viewBeforeForCurrentData","$chartElement","set$chartElement","sparklineOptions","requestedAfter","requestedBefore","currentDuration","requestedDuration","widthRatio","paddingLeftPercentage","getForceTimeWindowCorrection","defaultFillColor","fill_luminance","chartTitle","emptyStringIfDisable","sparklineInitOptions","lineColor","chartRangeMin","chartRangeMax","composite","enableTagOptions","tagOptionPrefix","tagValuesAttribute","disableHiddenCheck","defaultPixelsPerValue","spotColor","minSpotColor","maxSpotColor","spotRadius","valueSpots","highlightSpotColor","highlightLineColor","normalRangeMin","normalRangeMax","drawNormalOnTop","xvalues","chartRangeClip","chartRangeMinX","chartRangeMaxX","disableInteraction","disableTooltips","disableHighlight","highlightLighten","highlightColor","tooltipContainer","tooltipClassname","tooltipChartTitle","tooltipFormat","tooltipPrefix","tooltipSuffix","tooltipSkipNull","tooltipValueLookups","tooltipFormatFieldlist","tooltipFormatFieldlistKey","numberFormatter","numberDigitGroupSep","numberDecimalMark","numberDigitGroupCount","onLeave","onChange","sparklines","getCurrentRegionFields","bind","unbind","paddingLeft","fetchPromise","emptyContent","getDateRange","dt","dtString","time","t1","t2","D3pieChart","d3pieInstance","setD3pieInstance","d3pieOptions","safeContent","defaultTitle","other","mainlabel","segment_stroke","gradient_color","initialD3pieOptions","tooltip_bg","tooltip_fg","newD3pieInstance","isHoveredButNoData","slot","PeityChart","peityOptions","$element","peityInitOptions","foreground","updatedOptions","GoogleChart","googleChartInstance","hasApiBeenLoaded","setHasApiBeenLoaded","script","firstScript","packages","googleOptions","dataTable","visualization","DataTable","chartType","areaOpacity","Map","initialGoogleOptions","hAxis","viewWindowMode","slantedText","textStyle","gridlines","vAxis","chartArea","focusTarget","pointsVisible","titlePosition","titleTextStyle","isHtml","ignoreBounds","curveType","isStacked","googleInstance","AreaChart","LineChart","TextOnly","precision","textContent","defaultAspectRatio","getCellBoxSize","cellSize","getRows","getColumns","rows","getXPosition","getYPosition","getFullWidth","getFullHeight","getOffsetPosition","onMouseenter","onMouseout","cellPadding","hoveredIndex","getEvent","cellBoxSize","mouseout","nextHoveredIndex","getWidth","getCanvasAttributes","defaultColorRange","makeGetColor","colorRange","scaleLinear","domain","extent","range","innerHeight","aligns","GroupBox","renderTooltip","dataRef","canvasRef","boxesRef","hover","setHover","dropHoverRef","boxHoverRef","timeoutId","close","deactivateBox","closeDrop","currentHover","cellStroke","activeBox","activateBox","clearEvents","drawBox","registerEvents","strokeRect","drawBoxes","onMouseEnter","onMouseLeave","getAlign","Title","Label","Flex","attrs","TextMicro","GroupBoxWrapper","groupIndex","renderGroupPopover","renderBoxPopover","maxWidth","boxPopover","boxAlign","groupPopover","alignItems","plain","isOpen","popoverRef","GroupBoxes","flexWrap","vertical","flex","LinearColorScaleBar","k8s_cluster_id","k8s_node_name","k8s_namespace","k8s_controller_kind","k8s_controller_name","k8s_pod_name","k8s_container_name","labelIds","word","toUpperCase","Separator","Header","wordBreak","TabButton","neutral","themeType","Tabs","ExpandButton","makeFlex","Section","onExpand","noBorder","side","justifyContent","getPortalNodeStyles","shouldAddSpecialHeight","heightFromLocalStorage","heightID","persitedHeight","getHeightFromLocalStorage","heightOverriden","chartDefaultsMinWidth","min_width","minWidth","globalIntersectionOptions","root","rootMargin","threshold","globalIntersectionObserver","globalObserver","IntersectionObserver","entries","isIntersecting","unsubscribe","elementToUnsubscribe","createGlobalIntersectionObserver","InvisibleSearchableText","opacity","shouldCleanChartStateAlways","DisableOutOfView","portalNode","hasPortalNodeBeenStyled","setHasPortalNodeBeenStyled","isShowingAlarmOnChart","forEachObjIndexed","styleName","setProperty","destroyOnHide","selectDestroyOnHide","clonedChildrenRef","isVisibleIntersection","setIsVisible","isVisibleRef","newIsVisible","useCommonIntersection","debounceTime","selectIsAsyncOnScroll","shouldHideDebounced","setShouldHideDebounced","useDebounce","shouldHide","previousIsVisibleIntersection","newClonedChildren","child","cloned","clonedCanvases","querySelectorAll","oldCanvas","newCanvas","drawImage","cloneWithCanvas","clonedChildrenContainer","nodeElement","ChartContainer","dropdownMenu","renderCustomElementForDygraph","onAttributesChange","externalChartMetadata","sectionTitle","MigrationModalPromos","aggrMethods","avg","ChartValueContainer","memo","displayedIndex","aggregation","getAggregation","ChartValue","dangerouslySetInnerHTML","__html","groupLabel","postGroupLabel","relatedIndex","useContext","ThemeContext","chartContainerRef","setDisplayedIndex","setDisplayedIndexThrottled","throttle","repaint","relatedCharts","relatedChartAttributes","chartAttributes","chartLabels","ExternalButton","alignSelf","Item","secondary","DateItem","DateSection","Metrics","LabelsSection","labelId","items","onItemClick","getLabel","sliced","expandable","onNodeClick","ids","predefinedLabelIds","getLabelIds","StyledButton","List","onBack","horizontal","TabsContainer","Popover","setView","isLabelView","Kubernetes","labelValues","postGroupValues","postGroupData","groupValue","indexes","boxes","labelsAcc","groupData","groupChartLabels","groupLabels","Set","accChartLabels","transform","groupBoxData","groupedBoxesData","groupedBox","AbstractChart","showValueAttributesNodes","userElementId","chartSettingCallOptions","isFlipped","dimensionIndex","useShowValueOutside","Chart","viewRangeForCurrentData","themeContext","unitsScalingMethod","selectUnitsScalingMethod","shouldDisplayToolbox","legend_toolbox","shouldDisplayResizeHandler","resize_charts","allDimensionNames","dimensionNamesFromMetadata","additionalDimensionNamesFromData","localHoveredX","setLocalHoveredX","isSyncSelection","selectSyncSelection","handleSetHoveredX","noMaster","globalHoveredX","selectGlobalSelection","netdataFirst","netdataLast","fixedMinDuration","setGlobalPanAndZoomDebounced","useDebouncedCallback","newGlobalPanAndZoom","flush","handleUpdateChartPanAndZoom","shouldFlushImmediately","minDuration","currentDuraton","afterForced","beforeForced","first","wantedDuration","doCallback","tolerance","movement","handleToolBoxPanAndZoom","newAfter","newBefore","handleToolboxLeftClick","handleToolboxRightClick","timeWindow","handleToolboxZoomInClick","panAndZoomStep","handleToolboxZoomOutClick","selectAssignedColors","createSelectAssignedColors","__","isTimeVisible","DropdownItem","DropdownItemLabel","DotsBtn","ChartDropdown","setIsOpen","handleClose","circleAnimation","keyframes","SpinnerContainer","Circle","Circle2","Circle3","ChartSpinner","spaceBetween","ChartDropdownContainer","dimensionsAggrMethodMap","ChartWithLoader","selectChartMetadataRequest","actualChartMetadata","panAndZoom","isPanAndZoomMaster","areCriteriaMet","preferedIntervalTime","selectHasWindowFocus","stopUpdatesWhenFocusIsLost","selectStopUpdatesWhenFocusIsLost","selectGlobalPause","shouldBeUpdating","shouldFetch","setShouldFetch","shouldFetchImmediatelyAfterFocus","setShouldFetchImmediatelyAfterFocus","intervalTime","useInterval","useFetchNewDataClock","panAndZoomThrottled","useThrottle","initialBefore","liveModeAfter","boundingClientRect","isShowingSnapshot","selectShouldEliminateZeroDimensions","shouldUsePanAndZoomPadding","selectPanAndZoomDataPadding","CancelToken","shouldShowSpinnerDebounced","setShouldShowSpinnerDebounced","shouldShowSpinner","newViewRange","forceDataPoints","force_data_points","pointsMultiplier","requestedPadding","pixelsPerPointAttribute","pixelsPerPointSetting","pixels_per_point","getChartPixelsPerPoint","correctedPoints","firstEntry","nowInSeconds","afterAbsolute","beforeAbsolute","requestedRange","availableRange","getCorrectedPoints","externalSelectedDimensions","customElementForDygraph","selectDashboardDomain","selectIsSnapshotMode","selectSnapshotOptions","SnapshotLoader","SnapshotLoaderContainer","Portals","nodes","attributesMapped","createPortal","useHttp","shouldMakeCall","isExternal","setIsFetching","isError","setIsError","setData","modalRoot","ModalPortal","PrintModal","printModalElement","amountOfCharts","amountOfFetchedCharts","nameOfAnyFetchingChart","print","progressBarText","aria-labelledby","data-keyboard","data-backdrop","data-dismiss","aria-label","aria-hidden","aria-valuenow","aria-valuemin","aria-valuemax","SocialMediaContainer","FirstRow","GithubCopy","GithubCopyLine","SocialMediaLink","GithubStarQuestion","GithubIcon","TwitterIcon","FacebookIcon","SecondRow","SecondRowText","SidebarSocialMedia","SidebarSocialMediaPortal","NotificationsContainer","closeButton","hasBorder","hostNameSelector","global","truncate","tooltipBackground","CustomTooltip","isBasic","getContent","contentNode","Tooltip","getTooltipContent","Options","data-toggle","data-target","VersionControl","currentVersion","isStableReleaseChannel","releaseChannel","githubVersion","gcsVersionResponse","mediaLink","mediaLinkResponse","latestVersion","transformGcsVersionResponse","isNewVersionAvailable","small","versionSelector","Version","initialValue","setToggle","toggle","toggleOn","toggleOff","getValueFromStorage","useLocalStorage","setValue","RootContainer","DropdownContainer","Menu","ListContainer","OpenerIcon","noMargin","PickerBox","StyledTimePeriod","StyledCustomTimePeriod","StyledDropdown","renderTitle","onMenuToggle","anchorCorner","renderOpener","maxHeight","DropdownIcon","CustomInput","input","StyledDrop","Drop","StyledHR","hr","period","resolution","setTimeRange","tagging","dateResolutions","resolutionsMapping","MINUTE","HOUR","months","getCustomTimePeriod","parseInputPeriod","timeCorrection","customRange","getUnixTime","focusTaggingMap","startDate","endDate","timePeriods","getDateWithOffset","formattedDate","isMatch","splitOffset","mathSign","absoluteNumber","firstPart","padStart","padEnd","formatOffset","TimePeriods","handleTimePeriodChange","selectedDate","CustomTimePeriod","getInputValue","inputValue","setInputValue","isDropdownOpen","toggleDropdown","onBlur","currentValue","currentTarget","isValidInput","isInteger","timePeriod","isValid","onChangeResolution","newResolution","dateResolution","DatePicker","selected","selectsStart","selectsEnd","startOpen","inline","selectsRange","monthsShown","showPopperArrow","calendarContainer","StyledDateInput","StyledCalendar","getRgbColor","DatePickerInput","onDatesChange","onFocus","placeholderText","setFormattedValue","parsedDate","useLocaleDate","convertTimestampToDate","getLocaleDate","toDate","useConvertedDates","DatePickerWrapper","setStartDate","setEndDate","onInputFocus","convertedStartDate","convertedEndDate","setValidStartDate","setPreviousValue","isBefore","setValidEndDate","dates","startDateWithOffset","endDateWithOffset","startDateTimestamp","endDateTimestamp","grow","PeriodIndication","convertedStart","convertedEnd","formattedStartDate","formattedEndDate","formatDates","formatDistanceStrict","getTimePeriod","whiteSpace","resolutionMap","SECONDS","getDuration","moment","diff","getGranularDuration","showSeconds","getResolution","DateBox","isPlaying","isSameDate","DurationBox","PickerAccessorElement","isPickerOpen","setRangeValues","timeframe","setTimeframe","getStartDate","getEndDate","useDashboardSelector","isSame","getIsSameDate","DatePickerDrop","initialStartDate","initialEndDate","setResolution","focusedInput","setFocusedInput","setDates","clearChanges","togglePicker","focusTagging","getFocusTagging","isValidTimePeriod","isApplyDisabled","consistentDefaultValue","isClearDisabled","pickerDrop","canHideTarget","onEsc","onClickOutside","eventCategory","eventAction","eventLabel","eventValue","dataLayer","eventData","reportEvent","ReduxDatePickerContainer","dashboardDispatch","useDashboardDispatch","isGlobalPanAndZoom","pickedValues","getHoverColor","StyledPill","Pill","PlayPausePill","isForcePlaying","getIcon","PanelRowContainer","MenuItem","Wrapper","testid","actions","basis","DefaultListHeader","H4","SectionHandle","toggleOpen","ItemsList","PlayOptionsTooltip","bacgkround","MenuButton","Dropdown","MemoizedPlayOptions","GlobalControls","hollowColors","hollow","pillProps","Alarms","selectActiveAlarms","News","app","upToDate","SearchInput","TextInput","Search","inputRef","metaShrinked","OffsetItem","onSelect","timezones","abbr","getDefaultTimezone","memoized","timeZoneName","normalizedOffset","parsedOffset","normalizeOffset","digitizedOffset","digitizeOffset","timezoneList","byId","Timezone","selectedTimezone","selectedOffset","timezoneHash","zones","IframeContainer","Iframe","signedIn","rendered","setRendered","signInMsg","lsValue","removeLsValue","selectCloudBaseUrl","nameParam","originParam","signInIframeUrl","onLoad","useCheckSignInStatus","hasSignedInBefore","setHasSignedInBefore","onMessage","isNew","SignIn","isRegistry","onSignIn","CloudTab","showBorderLeft","onActivate","fixed","closable","TITLE","TabsContentText","TabsContent","Home","image","nodeView","Overview","Nodes","Dashboards","Alerts","Anomalies","video","Pricing","Privacy","DiscoverCloudDrop","parentRef","closeDropdown","handleGoToCloud","backdrop","marginLeft","frameBorder","allow","allowFullScreen","InnerPositioner","DiscoverCloud","setIsModalOpen","selectedModalContent","setSelectedModalContent","dropDownParentRef","handleOpenModal","selectedContentId","SignOut","SignInItem","isSignedInSelector","dashboard","UserSettings","menuItems","separator","DarkTheme","SpacesSkeleton","Fragment","SpacesIframe","Spaces","getNodeUrl","baseUrl","getNodes","hostsStatus","base","decodeURI","getBaseUrl","replicatedNodes","Anchor","StyledIcon","ReplicatedNodes","listOpen","setListOpen","toggleListOpen","iconLeft","SpacePanelIframe","spacePanelMessage","promptContent","signIn","signUp","SignInPrompt","NoNetwork","viewBox","xmlns","fillRule","clipRule","OfflinePrompt","NodesContainer","ListItem","TrashIcon","NodeUrl","TextNano","withComponent","NodeName","handle","metaIcon","maxLength","spanLength","truncateMiddle","VisitedNodes","sortedMachines","machinesArray","replicatedNodesSelector","visitedNodesSelector","visitedNodes","globalRegistry","selectIsUsingGlobalRegistry","switchIdentity","persist","pointerEvents","setLsValue","selectSpacePanelIsActive","Layout","printMode","mapTheme","slate","white","DefaultTheme","MigrationModal","migrationModalPromoInfo","setUserPrefrence","savePromoRemindMeSelection","migrationModalPromo","requestRefreshOfAccess","isRememberChoiceChecked","setIsRememberChoiceChecked","toPath","userPreference","handleClickedCTA2","CTA2","tickBoxOption","makeUTMParameters","modalPromo","utmParametersToString","migrationmodalInfo","PROMO_SIGN_UP_CLOUD","preferenceID","PROMO_SIGN_IN_CLOUD","PROMO_IVNITED_TO_SPACE","PROMO_CLAIM_NODE","PROMO_TO_USE_NEW_DASHBAORD","FALLBACK_TO_AGENT","NO_INFO_FALLBACK_TO_AGENT","modalStatusWithPromoFunctions","userSavedPreference","nodeClaimedStatus","useMigrationModal","modalStatus","MigrationManager","cloudUrl","selectSignInUrl","term","linkToCoud","redirectURI","hasPromoSelectionSaved","isPromoEligibleForShow","showModalTimer","goToCloud","FakeMargin","Box","App","useStore","refreshHelper","setRefreshHelper","haveDOMReadyForParsing","loadOverlay","shouldUseRegistry","useRegistry","shouldUseAlarms","selectHasStartedAlarms","useAlarms","shouldUseInfo","useInfo","hasFetchDependencies","setHasFetchDependencies","bootstrap_css","dashboard_css","useChartsMetadata","selectHasFetchedInfo","selectTheme","alarmWhen","alarmTime","alarmChart","alarmValue","useAlarmFromUrl","ReactDOM","isRegistrySelector","offlineSelector","signInUrl","fragmentParamsSeparatorRegEx","param","makeHashFromObject","allParams","allParamsResult","mergeDeepLeft","excludedParams","filteredParams","getFilteredHash","htmlIframeElement","iframeElement","contentWindow","postMessage","messageType","defaultState","lastMessage","setLastMessage","handleMessage","resetMesssage","process","useSelectorOriginal","useDispatchOriginal","getIsDemo","selectGlobal","globalState","selectOptions","optionName","selectChartsMetadata","allMetadata","BaseAnchor","Component","assign","meta","INITIAL_OPTIONS","netdataNoBootstrap","optionsFromLocalStorage","localStorageKeyToOption","parsed","getItemFromLocalStorage","overridenOptions"],"mappings":";gIAAA,oEAAO,IA4LHA,EACAC,EA7LSC,EAAU,SAACC,GACtB,OAAIA,GAAK,IAAMA,EAAI,GACX,IAAN,OAAWA,GAEP,GAAN,OAAUA,IAuBNC,EAAe,SAACC,EAAiBC,GACrC,IAAIC,EAAgBC,KAAKC,IAAIJ,GAEvBK,EAAuB,SAAhBJ,EAAyBE,KAAKG,MAAMJ,EAAgB,OAAS,EAC1EA,GAAwB,MAAPG,EAEjB,IAAME,EACY,SAAhBN,GAA0C,UAAhBA,EAA0BE,KAAKG,MAAMJ,EAAgB,MAAQ,EACzFA,GAAyB,KAARK,EAEjB,IAAMC,EAAUL,KAAKG,MAAMJ,EAAgB,IAC3CA,GAA2B,GAAVM,EAEjB,IAAMC,EAA6B,SAAhBR,EAAA,UAA4BI,EAA5B,MAAuC,GACpDK,EAA8B,SAAhBT,GAA0C,UAAhBA,EAA1B,UAAuDJ,EAAQU,GAA/D,KAA2E,GACzFI,EAAa,UAAMd,EAAQW,GAAd,KACfI,EAAgBf,EAAQK,EAAcW,QAAQ,IAElD,MAAM,GAAN,OAAUJ,GAAV,OAAuBC,GAAvB,OAAqCC,GAArC,OAAqDC,IAGjDE,EAA+B,CACnC,YAAa,CACXC,IAAK,EACLC,KAAM,IACNC,KAAM,KAERF,IAAK,CACHA,IAAK,EACLC,KAAM,IACNC,KAAM,KAER,aAAc,CACZ,SAAU,KACV,aAAc,EACd,aAAc,IACd,aAAc,IACd,aAAc,KAEhB,UAAW,CACT,UAAW,EACX,cAAe,KACf,cAAe,QACf,cAAe,WACf,cAAe,eAEjB,cAAe,CACb,UAAW,EAAI,KACf,cAAe,EACf,cAAe,KACf,cAAe,QACf,cAAe,YAEjB,MAAO,CACL,MAAO,EACP,QAAS,KACT,QAAS,QACT,QAAS,WACT,QAAS,eAEX,OAAQ,CACN,MAAO,EAAI,KACX,OAAQ,EACR,OAAQ,KACR,OAAQ,QACR,OAAQ,YAEV,QAAS,CACP,MAAO,EAAI,KACX,QAAS,EACT,QAAS,KACT,QAAS,QACT,QAAS,YAEXC,MAAO,CACLA,MAAO,EACPC,UAAW,KACXC,UAAW,QACXC,UAAW,WACXC,UAAW,eAEbC,GAAI,CACFA,GAAI,EACJC,IAAI,KAAD,IAAE,GAAM,GACXC,IAAI,KAAD,IAAE,GAAM,GACXC,IAAI,KAAD,IAAE,GAAM,GACXC,IAAI,KAAD,IAAE,GAAM,IACXC,IAAI,KAAD,IAAE,GAAM,IACXC,IAAI,KAAD,IAAE,GAAM,IACXC,IAAI,KAAD,IAAE,GAAM,KAEbC,EAAG,CACDA,EAAG,EACHC,IAAK,KACLC,IAAK,QACLC,IAAK,WACLC,IAAK,cACLC,IAAK,iBAEPC,GAAI,CACFN,EAAG,EAAI,KACPM,GAAI,EACJC,GAAI,KACJC,GAAI,QACJC,GAAI,YAENR,IAAK,CACHD,EAAG,EAAI,KACPC,IAAK,EACLC,IAAK,KACLC,IAAK,QACLC,IAAK,YAEPG,GAAI,CACFP,EAAG,UACHM,GAAI,EAAI,KACRC,GAAI,EACJC,GAAI,KACJC,GAAI,QACJC,GAAI,YAENR,IAAK,CACHF,EAAG,UACHC,IAAK,EAAI,KACTC,IAAK,EACLC,IAAK,KACLC,IAAK,QACLC,IAAK,YAEPG,GAAI,CACFR,EAAG,aACHM,GAAI,UACJC,GAAI,EAAI,KACRC,GAAI,EACJC,GAAI,KACJC,GAAI,QACJC,GAAI,YAENR,IAAK,CACHH,EAAG,aACHC,IAAK,UACLC,IAAK,EAAI,KACTC,IAAK,EACLC,IAAK,KACLC,IAAK,QACLO,IAAK,aA2BHC,EACJ,eAACC,EAAD,uDAAsB,EAAtB,OACA,SAACC,GAAD,OACGA,EAAQD,GAAYhC,QAAQ,KAE3BkC,EAAqC,CACzCC,QAAS,CACPC,WAAY,CACVC,MADU,WAER,MAAqC,eAA9BvD,GAETwD,QAJU,SAIFL,GACN,OAAgB,EAARA,EAAa,EAAI,MAI/BM,QAAS,CACPC,WAAY,CACVH,MADU,WAER,MAAqC,eAA9BvD,GAETwD,QAJU,SAIFL,GACN,OAAgB,EAARA,EAAa,EAAI,MAI/BQ,aAAc,CACZC,aAAc,CACZL,MAAO,SAACM,GAAD,OAAiBA,EAAM,GAC9BL,QAASP,EAAS,MAEpBU,aAAc,CACZJ,MAAO,SAACM,GAAD,OAAiBA,GAAO,GAAKA,EAAM,KAC1CL,QAASP,KAEX5C,QAAS,CACPkD,MAAO,SAACM,GAAD,OAAiBA,GAAO,KAAQA,EAAM,KAC7CL,QAASP,EAAS,OAEpB,WAAY,CACVM,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,KAASA,EAAM,MAC7EL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAQ,IAAM,aAEzD,cAAe,CACbI,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,MAAYA,EAAM,OAChFL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAQ,IAAM,WAEzD,eAAgB,CACdI,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,OAC9DL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAQ,IAAM,WAI3D9C,QAAS,CACPuD,aAAc,CACZL,MAAO,SAACM,GAAD,OAAiBA,EAAM,MAC9BL,QAASP,EAAS,MAEpBU,aAAc,CACZJ,MAAO,SAACM,GAAD,OAAiBA,GAAO,MAASA,EAAM,GAC9CL,QAASP,EAAS,MAEpB5C,QAAS,CACPkD,MAAO,SAACM,GAAD,OAAiBA,GAAO,GAAKA,EAAM,IAC1CL,QAASP,EAAS,IAEpB,WAAY,CACVM,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,IAAMA,EAAM,MAC1EL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAO,aAElD,cAAe,CACbI,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,MAAQA,EAAM,OAC5EL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAO,WAElD,eAAgB,CACdI,MAAO,SAACM,GAAD,OAAiB5D,GAA+B4D,GAAO,OAC9DL,QAAS,SAACL,GAAD,OAAmB/C,EAAa+C,EAAO,YAKhDW,EAAW,SAACX,GAAD,OAAmBA,GAgBvBY,EAAyB,CAEpCC,KAAM,GACNC,OAAQ,GAERC,YALoC,WAMlCC,KAAKH,KAAO,GACZG,KAAKF,OAAS,IAKhBG,IAZoC,SAalCC,EACAC,EACAT,EACAU,EACAC,EACAC,EACAC,EACAC,EACAC,GAaA,GAVqB,qBAAVL,IAETA,EAAQ,aAIVvE,EAA4B2E,EAC5B1E,EAA8B2E,EAGM,qBAAzBzD,EAAcoD,IACe,qBAA5BnB,EAAiBmB,GAI3B,OAAO,SAACpB,GAAD,OAAmBA,GAI5B,QAAqB0B,IAAjBL,GAA+C,OAAjBA,GAA0C,aAAjBA,GACtDA,IAAiBD,EAIpB,OADAG,EAAoBH,GACbT,EAMT,IA6GQgB,EA7GJC,EAAS,KACTC,EAAW,EAEf,GAAoC,qBAAzB7D,EAAcoD,GAAwB,CAI/C,GAAqB,SAAjBC,EAAyB,EAM3BF,EAAM9D,KAAKC,IAAI6D,KACfT,EAAMrD,KAAKC,IAAIoD,MAEbA,EAAMS,GAIR,IAAMW,EAAqB9D,EAAcoD,GASzC,GARAW,OAAOlB,KAAKiB,GAAoBE,SAAQ,SAACC,GACvC,IAAMC,EAAcJ,EAAmBG,GACnCC,GAAexB,GAAOwB,EAAcL,IACtCD,EAASK,EACTJ,EAAWK,MAIA,OAAXN,GAAmBC,GAAY,EAGjC,OADAN,EAAoBH,GACbT,EAGT,GAA+B,kBAApBW,EAA8B,CAIvC,IAAMa,EAAc,UAAMb,EAAN,YAAyBF,GAGzCgB,EAAIpB,KAAKH,KAAKsB,GACD,qBAANC,IACTpB,KAAKH,KAAKsB,GAAkB,GAC5BC,EAAIpB,KAAKH,KAAKsB,IAEhBC,EAAElB,GAAQ,CACRE,MAAOQ,EACPS,QAASR,GAIX,IAAIS,EAAcF,EAAElB,GAEpBa,OAAOlB,KAAKuB,GAAGJ,SAAQ,SAAChF,GAClBoF,EAAEpF,GAAGqF,QAAUC,EAAYD,UAC7BC,EAAcF,EAAEpF,OAKpB,IAAM8D,EAAS,CACbM,MAAOkB,EAAYlB,MACnBiB,QAASC,EAAYD,SASvB,OAPArB,KAAKF,OAAOqB,GAAkBrB,EAG9Be,EAAWf,EAAOuB,QAGlBd,EAJAK,EAASd,EAAOM,OAKT,SAACpB,GASN,OARI6B,IAAaf,EAAOuB,UAItBR,EAAWf,EAAOuB,QAClBd,EAFAK,EAASd,EAAOM,QAKXpB,EAAQ6B,GAOnB,OADAN,EAAoBK,GACb,SAAC5B,GAAD,OAAmBA,EAAQ6B,GAIpC,MAAkD,qBAAvC7D,EAAcoD,GAAOC,IAE9BQ,EAAW7D,EAAcoD,GAAOC,GAChCE,EAAoBF,GACb,SAACrB,GAAD,OAAmBA,EAAQ6B,KAIpCU,QAAQC,IAAR,gCAAqCpB,EAAMqB,WAA3C,eAA4DpB,EAAaoB,WAAzE,+BAGAlB,EAAoBH,GACbT,GACP,MAAuC,qBAA5BV,EAAiBmB,GAEP,SAAjBC,GAEFU,OAAOlB,KAAKZ,EAAiBmB,IAAQY,SAAQ,SAAChF,GACxC2E,GACA1B,EAAkBmB,GAAkBpE,GAAGoD,MAAMM,KAE/Ca,EAAoBvE,GACpB2E,EAAqB1B,EAAkBmB,GAAkBpE,GAAGqD,YAG5DsB,IAKJJ,EAAoBH,GACbT,IAC8C,qBAA1CV,EAAiBmB,GAAOC,IACnCE,EAAoBF,GACbpB,EAAiBmB,GAAOC,GAAchB,UAG/CkC,QAAQC,IAAR,gCAAqCpB,EAAMqB,WAA3C,eAA4DpB,EAAaoB,WAAzE,+BAEAlB,EAAoBH,GACbT,IAIT4B,QAAQC,IAAR,qDAA0DpB,EAAMqB,aAChElB,EAAoBH,GACbT,M,qGCneE+B,G,OAASC,cAAiB,WAI7BC,GAJ6B,IACrCC,EADqC,EACrCA,SACAC,EAFqC,EAErCA,UACGC,EAHkC,+CAKrC,0CACMA,EADN,CAEEC,KAAK,SACLF,UAAWG,IAAW,uBAAwBH,GAC9CF,IAAKA,IAEJC,OCjBL,kCAmBO,IAAMK,EAAgB,SAAC,GASH,IARzBJ,EAQwB,EARxBA,UACAK,EAOwB,EAPxBA,SACAC,EAMwB,EANxBA,QACAC,EAKwB,EALxBA,cACAC,EAIwB,EAJxBA,YACAC,EAGwB,EAHxBA,aACAC,EAEwB,EAFxBA,eACAC,EACwB,EADxBA,aAEMC,EAAYC,iBAAO,MACnBC,EAAWC,YAAYC,KAkB7B,OAjBAC,qBAAU,WACJL,EAAUM,SAAWJ,GACvBK,OAAOC,EAAER,EAAUM,SAASG,QAAQ,CAClCC,UAAW,OACXC,WAAW,EACXC,MAAM,EACNC,QAAS,QACTC,UAAW,SACXC,MAAO,CACLC,KAAMT,OAAOU,QAAQC,QAAQZ,QAAQa,wBACrCC,KAAMb,OAAOU,QAAQC,QAAQZ,QAAQe,yBAEvCC,MAAOvB,EACPwB,QAASzB,MAGZ,IAED,kBAACd,EAAD,CACEI,UAAWG,IAAWH,GACtBM,QAASA,EACTC,cAAeA,EACfC,YAAaA,EACbC,aAAcA,EACdX,IAAKc,GAEL,kBAAC,IAAD,CAAMP,SAAUA,O,gKCnDT+B,EAAiC,gBASjCC,EAAgB,SAAC,GAEhB,IADZC,EACW,EADXA,sBAAuBC,EACZ,EADYA,UAAWC,EACvB,EADuBA,SAAUC,EACjC,EADiCA,iBACjC,EAC6BC,oBAAS,kBAAMJ,EAAsBK,gBADlE,mBACJC,EADI,KACUC,EADV,KAELC,EAAWC,cAEjB9B,qBAAU,WAEJ2B,GAAgB,IAClBE,EACEE,YAAsB,CACpBC,GAAIV,EACJK,oBAIL,CAACA,EAAcL,EAAWC,EAAUM,IAEvC,IAAMI,EAAeC,uBACnB,SAACC,GACCA,EAAMC,iBACN,IAAMC,EAAehB,EAAsBK,aACrCY,EAAkC,eAAfH,EAAMlD,KAC3BkD,EAAMI,QAAQ,GAAGC,QACjBL,EAAMK,QAEJC,EAAY,SAACC,GACjB,IAAMC,EAAaN,EAAeK,EAAgBJ,EAIlD,GAFAjB,EAAsBuB,MAAMC,OAA5B,UAAwCF,EAAWjE,WAAnD,MACAkD,EAAgBe,GACZpB,EAAU,CACZ,IAAMuB,EAAuBtB,EACxBmB,EAAaI,IACdJ,EACJK,aAAaC,QAAb,UACK9B,GADL,OACsCI,GADtC,UAEKuB,MAKHI,EAAc,SAACC,GAAD,OAAmBV,EAAUU,EAAEX,UAC7CY,EAAc,SAACD,GAAD,OAAmBV,EAAUU,EAAEZ,QAAQ,GAAGC,UAY3C,eAAfL,EAAMlD,MACRoE,SAASC,iBAAiB,YAAaF,GACvCC,SAASC,iBAAiB,YAPT,SAAbC,IACJF,SAASG,oBAAoB,YAAaJ,GAC1CC,SAASG,oBAAoB,WAAYD,QAOzCF,SAASC,iBAAiB,YAAaJ,GACvCG,SAASC,iBAAiB,WAfT,SAAbG,IACJJ,SAASG,oBAAoB,YAAaN,GAC1CG,SAASG,oBAAoB,UAAWC,SAgB5C,CAACpC,EAAsBK,aAAcL,EAAsBuB,MAAMC,OAAQtB,EACvEC,IAGJ,OACE,kBAAC,IAAD,CACEzC,UAAU,gCACVO,cAAe,SAAC6C,GACdA,EAAMC,iBACND,EAAMuB,mBAERnE,YAAa0C,EACbzC,aAAcyC,EACd7C,SAAS,SACTM,aAAa,eACbD,eAAe,6S,gCC7FrB,whCAcakE,EAA4BC,uBAAY,UAChDC,IADgD,+BASxCC,EAAqBF,uBAAY,UAAwBC,IAAxB,kBAOjCE,EAAqBH,uBAAY,UAAwBC,IAAxB,kBAMjCG,EAA2BJ,uBAAY,UAC/CC,IAD+C,wBAUvCI,EAA4BL,uBAAY,UAChDC,IADgD,yBAIxCK,EAA8BN,uBAAa,GAAD,OAAIC,IAAJ,iCAK1CM,EAAwBP,uBAAY,UAC5CC,IAD4C,2BAIpCO,EAA0BR,uBAAa,GAAD,OAAIC,IAAJ,6BAOtCQ,EAA+BT,uBAAY,UACnDC,IADmD,4BAI3CS,EAA8BV,uBAAa,GAAD,OAAIC,IAAJ,iCAC1CU,EAAuBX,uBAAa,GAAD,OAAIC,IAAJ,0BAKnCW,EAA0BZ,uBAAY,UAC9CC,IAD8C,6BAmBtCY,EAAmBC,YAAmB,UAG9Cb,IAH8C,sBAUtCc,EAAyBf,uBAAY,UAC7CC,IAD6C,4BAOrCe,EAA8BhB,uBAAY,UAClDC,IADkD,iCAO1CgB,EAAoBjB,uBAAY,UAAwBC,IAAxB,uBAEhCiB,EAAuBJ,YAAoB,GAAD,OAAIb,IAAJ,0BAK1CkB,EAA2BnB,uBAAY,UAC/CC,IAD+C,8BAQvCmB,EAAkBpB,uBAAY,UAAqBC,IAArB,qBAE9BoB,EAAqBrB,uBAAa,GAAD,OAAIC,IAAJ,kBAEjCqB,EAAqBtB,uBAAY,UACzCC,IADyC,wBAIjCsB,EAA+BvB,uBAAY,UACnDC,IADmD,kCAO3CuB,EAA4BxB,uBAAY,UAChDC,IADgD,+BAOxCwB,EAAmCzB,uBAAY,UACvDC,IADuD,+BAI/CyB,EAAiB1B,uBAAY,UAAsBC,IAAtB,oBAE7B0B,EAAgB3B,uBAAa,GAAD,OAAIC,IAAJ,mBAE5B2B,EAAuB5B,uBAAa,GAAD,OAAIC,IAAJ,0BACnC4B,EAAyB7B,uBAAY,UAC7CC,IAD6C,4BAGrC6B,EAAe9B,uBAAY,UACnCC,IADmC,kBAI3B8B,EAAoB/B,uBAAY,UACxCC,IADwC,wB,iCC5K7C,8EAca+B,EAAmC,WAC9C,OAAO1D,uBAAY,WACjB2D,YAAkBC,IAAmB,CAAE7G,KAAM,yBAA0B8G,SAAS,MAC/E,KAGUC,IAbkB,WAC/B,IAAMnE,EAAWC,cACjBmE,YAA8C,oBAAoB,SAAAC,GAChErE,EAAS8D,YAAkB,CAAEO,kB,iCCVjC,iD,iCCAA,kCAAO,IAAMC,EAAU,SAACC,GAAD,OAAeA,EACnCC,QAAQ,KAAM,KACdA,QAAQ,KAAM,KACdA,QAAQ,MAAO,KACfA,QAAQ,MAAO,KACfA,QAAQ,MAAO,KACfA,QAAQ,MAAO,O,mNC+HLC,EAAuB,CAClCC,iBAAkB,GAClBC,UAAW,GACXC,UAAW,GACXC,yBAA0B,KAC1BC,iBAAkB,KAElBC,aATiCC,IAAWC,eAA+B,IAU3EC,oBAAqB,KACrBC,SAAU,KACVC,eAAgB5D,SAAS6D,WACzBC,aAAa,EACbC,oBAAoB,EAEpBC,iCAAiC,EAEjCC,SAAU,CACRC,aAAc,KACdC,gBAAgB,EAChBC,iBAAiB,EACjBC,iBAAkB,KAClBC,SAAU,UACVC,eAAgB,KAChBC,iBAAkB,KAClBC,eAAgB,KAChBC,gBAAiB,KACjBC,gBAAgB,EAChBC,iBAAiB,EACjBC,gBAAiB,KACjBC,YAAa,KACbC,WAAY,KACZC,iBAAkB,KAClBC,sBAAuB,KACvBC,eAAgB,MAGlBC,SAAU,KACVC,OAAQ,CACNC,aAAc,KACdC,kBAAkB,GAEpBC,MAAO,KAEPC,eAAgB,CACdC,YAAY,EACZC,iBAAiB,EACjBC,KAAM,MAGRnI,QAASoI,IACTC,eAAgB,MAGLC,EAAgBC,wBAAsB,GAAI9C,GAQ1C+C,EAA6B,SAAC,GAKnB,IAJtBC,EAIqB,EAJrBA,gBACAC,EAGqB,EAHrBA,sBACAjI,EAEqB,EAFrBA,UACAkI,EACqB,EADrBA,aAEMC,EAA6C,kBAApBH,GAAgCA,EAAgBI,OAAS,EAKxF,OAAOH,IAA0BE,EAAkBnI,EAAYkI,IAG3DG,EAAc,SAACC,GAAD,MAAqC,SAAhBC,YAAKD,IAExCE,EAAiC,SACrCR,EACAG,GAEA,IALsBG,EAKhBG,EAASN,GALOG,EAK2BN,EAA2BU,MAAM,KALvCL,EAAYC,GAASK,YAAKL,GAASA,GAKY,GACpFM,GAAkBT,IAEnBE,EAAaL,EAA2BU,MAAM,MAMnD,MAAO,CACLG,SAAU,GACVC,UANa,sBACVL,GADU,YAETG,GAAqC,IAAlBH,EAAOL,OAAexJ,OAAOU,QAAQyJ,OAAOpK,QAAQqK,OAAS,KAKpFP,WAIJZ,EAAcoB,GACZ5G,KAEA,SAAC6G,EAAD,GAAiG,IAAvFhB,EAAsF,EAAtFA,aAAclI,EAAwE,EAAxEA,UAAWgI,EAA6D,EAA7DA,gBAAiBC,EAA4C,EAA5CA,sBAAuBkB,EAAqB,EAArBA,eACnEC,EAAUrB,EAA2B,CACzCC,kBACAC,wBACAjI,YACAkI,iBAGIC,EAA6C,kBAApBH,GAAgCA,EAAgBI,OAAS,EAClFiB,EACJH,EAAMjE,iBAAiBmE,IACvBZ,EAA+BR,EAAiBG,GAE5CmB,EAAsB5M,OAAOlB,KAAK6N,EAASR,UAAUT,OACrDmB,EAA8BC,YAClCL,EAEGM,QAAO,SAAAC,GAAa,OAAKL,EAASR,SAASa,MAC3CC,KAAI,SAACD,EAAeE,GAAhB,sBACFF,EACCL,EAASP,WAAWc,EAAIN,GAAuBD,EAASP,UAAUV,aAGpES,EAAQ,eACTQ,EAASR,SADA,GAETU,GAGL,OAAO,eACFL,EADL,CAEEjE,iBAAiB,eACZiE,EAAMjE,iBADK,eAEbmE,EAFa,eAGTC,EAHS,CAIZR,oBAOVhB,EAAcoB,GAAGzG,KAAoB,SAAC0G,EAAD,GAAgD,IAAD,EAArClJ,EAAqC,EAArCA,UAAW6J,EAA0B,EAA1BA,aAAclP,EAAY,EAAZA,MAChEmP,EAAM,yBACPZ,EAAMhE,UAAU2E,UADT,aACP,EAA+BC,OADxB,eAET9J,EAAYrF,IAEToP,EAAiB/R,KAAK8D,IAAL,MAAA9D,KAAI,YAAQ0E,OAAOsN,OAAOF,KAEjD,OAAO,eACFZ,EADL,CAEEhE,UAAU,eACLgE,EAAMhE,UADF,eAEN2E,EAAe,CACdC,SACAC,yBAMRlC,EAAcoB,GAAGxG,KAAoB,SAACyG,EAAD,GAAgD,IAAD,EAArClJ,EAAqC,EAArCA,UAAWiK,EAA0B,EAA1BA,aAActP,EAAY,EAAZA,MAChEmP,EAAM,yBACPZ,EAAM/D,UAAU8E,UADT,aACP,EAA+BH,OADxB,eAET9J,EAAYrF,IAEToP,EAAiB/R,KAAKqD,IAAL,MAAArD,KAAI,YAAQ0E,OAAOsN,OAAOF,KAEjD,OAAO,eACFZ,EADL,CAEE/D,UAAU,eACL+D,EAAM/D,UADF,eAEN8E,EAAe,CACdH,SACAC,yBAMRlC,EAAcoB,GAAGnF,KAA2B,SAACoF,EAAD,OAAUgB,EAAV,EAAUA,SAAV,sBACvChB,EADuC,CAE1CpD,mBAAoBoE,OAGtBrC,EAAcoB,GAAGlF,KAAkC,SAACmF,EAAD,OAAUgB,EAAV,EAAUA,SAAV,sBAC9ChB,EAD8C,CAEjDnD,gCAAiCmE,OAGnCrC,EAAcoB,GAAGvG,KAA0B,SAACwG,EAAD,OAAUlJ,EAAV,EAAUA,UAAW0F,EAArB,EAAqBA,SAArB,sBACtCwD,EADsC,CAEzCxD,WACAN,yBAA0BpF,OAG5B6H,EAAcoB,GAAGtG,KAA2B,SAACuG,EAAOzE,GAAR,sBACvCyE,EADuC,CAE1C7D,iBAAkBZ,OAGpBoD,EAAcoB,GAAGrG,KAA6B,SAAAsG,GAAK,sBAC9CA,EAD8C,CAEjD7D,iBAAkBL,EAAaK,iBAC/BK,SAAUV,EAAaU,cAGzBmC,EAAcoB,GAAGpG,KAAuB,SAACqG,EAAD,OAAUiB,EAAV,EAAUA,MAAV,sBACnCjB,EADmC,CAEtC5D,aAAc6E,OAGhBtC,EAAcoB,GAAGnG,KAAyB,SAAAoG,GAAK,sBAC1CA,EAD0C,CAE7C5D,aAAcN,EAAaM,kBAG7BuC,EAAcoB,GAAGlG,KAA8B,SAACmG,EAAD,OAAUiB,EAAV,EAAUA,MAAOC,EAAjB,EAAiBA,OAAQC,EAAzB,EAAyBA,SAAzB,sBAC1CnB,EAD0C,CAE7CzD,oBAAqB,CACnB0E,QACAC,SACAC,iBAIJxC,EAAcoB,GAAGjG,KAA6B,SAAAkG,GAC5C,IAAKA,EAAMzD,oBAGT,OADAvI,QAAQoN,KAAK,wCACNpB,EAJ4C,MAM3BA,EAAMzD,oBAAxB0E,EAN6C,EAM7CA,MAAOC,EANsC,EAMtCA,OACTG,GAAmBH,EAASD,GAAS,EAC3C,OAAO,eACFjB,EADL,CAEE7D,iBAAkB,CAChB8E,MAAOA,EAAQI,EACfH,OAAQA,EAASG,QAKvB1C,EAAcoB,GACZhG,KACA,SAACiG,GAAD,6DAAoE,GAApE,IAAUsB,uBAAV,+BACKtB,EADL,CAEEzD,oBAAqBT,EAAaS,qBAC9B+E,EAAkB,CAAEnF,iBAAkBL,EAAaK,kBAAqB,OAIhFwC,EAAcoB,GAAG/F,KAAyB,SAACgG,EAAD,GAAgC,IAAtBvD,EAAqB,EAArBA,eAG5C8E,EAAc1I,SAAS6D,WAC7B,OAAO,eACFsD,EADL,CAEEvD,eAAgB8E,GAAe9E,OAInCkC,EAAcoB,GAAG/E,KAAsB,SAAAgF,GAAK,sBAAUA,EAAV,CAAiBrD,aAAa,OAC1EgC,EAAcoB,GAAG9E,KAAwB,SAAC+E,EAAD,OAAUwB,EAAV,EAAUA,UAAV,sBACpCxB,EADoC,CAEvCrD,YAAab,EAAaa,YAC1BR,iBAAkBL,EAAaK,iBAC/BK,SAAUV,EAAaU,SACvBnG,QAAQ,eAAM2J,EAAM3J,QAAb,CAAsBoL,iCAAkCD,SAGjE7C,EAAcoB,GAAG7E,KAAc,SAAC8E,EAAD,OAAU0B,EAAV,EAAUA,UAAV,sBAC1B1B,EAD0B,CAE7B3J,QAAQ,eAAM2J,EAAM3J,QAAb,CAAsBqL,mBAG/B/C,EAAcoB,GAAG9F,IAAiB0H,SAAS,SAAA3B,GAAK,sBAC3CA,EAD2C,CAE9ClD,SAAS,eACJkD,EAAMlD,SADH,CAENW,iBAAiB,SAIrBkB,EAAcoB,GAAG9F,IAAiB2H,SAAS,SAAC5B,EAAD,OAAUjD,EAAV,EAAUA,aAAcI,EAAxB,EAAwBA,SAAUQ,EAAlC,EAAkCA,YAAlC,sBACtCqC,EADsC,CAEzClD,SAAS,eACJkD,EAAMlD,SADH,CAENC,eACAU,iBAAiB,EACjBR,iBAAiB,EACjBE,WACAQ,qBAGJgB,EAAcoB,GAAG9F,IAAiB4H,SAAS,SAAA7B,GAAK,sBAC3CA,EAD2C,CAE9ClD,SAAS,eACJkD,EAAMlD,SADH,CAENC,aAAc+E,IACdrE,iBAAiB,EACjBP,kBAAkB,SAGtByB,EAAcoB,GAAG3F,KAA6B,SAAC4F,EAAD,OAAUjC,EAAV,EAAUA,eAAV,sBACzCiC,EADyC,CAE5ClD,SAAS,eACJkD,EAAMlD,SADH,CAENiB,wBAIJY,EAAcoB,GAAGhF,KAAe,SAAAiF,GAAK,sBAChCA,EADgC,CAEnClD,SAAS,eACJkD,EAAMlD,SADH,CAENG,gBAAiBnB,EAAagB,SAASG,uBAI3C0B,EAAcoB,GAAGgC,KAAiB,SAAA/B,GAAK,sBAClCA,EADkC,CAErClD,SAAS,eACJkD,EAAMlD,SADH,CAENU,gBAAgB,SAGpBmB,EAAcoB,GACZgC,IAAgBH,SAChB,SACE5B,EADF,OAEI3C,EAFJ,EAEIA,iBAAkBD,EAFtB,EAEsBA,eAAgBE,EAFtC,EAEsCA,eAAgBC,EAFtD,EAEsDA,gBAAiBG,EAFvE,EAEuEA,gBAFvE,sBAIKsC,EAJL,CAKElD,SAAS,eACJkD,EAAMlD,SADH,CAENE,gBAAgB,EAChBK,mBACAD,iBACAE,iBACAC,kBACAG,yBAKNiB,EAAcoB,GAAGgC,IAAgBF,SAAS,SAAA7B,GAAK,sBAC1CA,EAD0C,CAE7ClD,SAAS,eACJkD,EAAMlD,SADH,CAENO,kBAAkB,EAClBD,gBAAgB,EAChBE,gBAAgB,EAChBC,iBAAiB,SAIrBoB,EAAcoB,GACZ5F,KACA,SAAC6F,EAAD,OAAUpC,EAAV,EAAUA,WAAYC,EAAtB,EAAsBA,iBAAkBC,EAAxC,EAAwCA,sBAAxC,sBACKkC,EADL,CAEElD,SAAS,eACJkD,EAAMlD,SADH,CAENc,aACAC,mBACAC,+BAKNa,EAAcoB,GAAG1F,KAAmB,SAAA2F,GAAK,sBACpCA,EADoC,CAEvC/B,OAAO,eACF+B,EAAM/B,OADL,CAEJE,kBAAkB,SAItBQ,EAAcoB,GAAGxF,KAA0B,SAACyF,EAAD,OAAU9B,EAAV,EAAUA,aAAV,sBACtC8B,EADsC,CAEzC/B,OAAO,eACF+B,EAAM/B,OADL,CAEJC,sBAIJS,EAAcoB,GAAGvF,KAAiB,SAACwF,EAAD,OAAUgC,EAAV,EAAUA,IAAKvQ,EAAf,EAAeA,MAAf,sBAC7BuO,EAD6B,CAEhC3J,QAAQ,eACH2J,EAAM3J,QADJ,eAEJ2L,EAAMvQ,SAIXkN,EAAcoB,GAAGtF,KAAoB,SAAAuF,GAEnC,OADAiC,cACO,eACFjC,EADL,CAEE3J,QAAS6L,mBAIbvD,EAAcoB,GAAGrF,KAAoB,SAACsF,EAAD,GAA0B,IAAhBhC,EAAe,EAAfA,SACvCmE,EAAa3O,OAAOlB,KAAK0L,EAASQ,MACrCiC,KAAI,SAAA2B,GACH,IAAIC,EA6BA7D,EA5BJ,IAKE,GAAqB,QAHrB6D,EAAerE,EAASsE,WAAWtE,EAASQ,KAAK4D,KAM/C,OADApO,QAAQoN,KAAR,6CAAmDgB,EAAnD,aACO,KAGT,GAA4B,qBAAjBC,EAGT,OADArO,QAAQoN,KAAR,6CAAmDgB,EAAnD,kBACO,KAET,MAAOzJ,GAEP3E,QAAQoN,KAAR,iDAAuDgB,EAAvD,WAAyEzJ,GACzE0J,EAAe,KAGjB,GAA4B,kBAAjBA,EAGT,OADArO,QAAQoN,KAAR,6CAAmDgB,EAAnD,mBACO,GAIT,IACE5D,EAAO+D,KAAKC,MAAMH,GAClB,MAAO1J,GAGP,OADA3E,QAAQoN,KAAR,wCAA8CgB,EAA9C,YACO,GAGT,OAAO,eAAGA,EAAU5D,MAErBiE,QAAO,SAACC,EAAKC,GAAN,sBAAoBD,EAApB,GAA4BC,KAAQ,IAE9C,OAAO,eACF3C,EADL,CAEEhC,SAAS,eACJA,EADG,CAENQ,KAAM2D,SAKZxD,EAAcoB,GAAGjF,KAAgB,SAACkF,EAAD,OAAU5B,EAAV,EAAUA,MAAV,sBAC5B4B,EAD4B,CAE/B5B,aAGFO,EAAcoB,GAAGpF,KAA8B,SAACqF,EAAD,OAAUxB,EAAV,EAAUA,KAAV,sBAC1CwB,EAD0C,CAE7C3B,eAAe,eACV2B,EAAM3B,eADG,CAEZG,cAIJG,EAAcoB,GAAG5E,KAAmB,SAAC6E,EAAD,OAAUtE,EAAV,EAAUA,QAAV,sBAA8BsE,EAA9B,CAAqCtB,eAAgBhD,Q,iCCzlBzF,uEAMMkH,EAAkB,SAAChO,GAAD,MAAyB,CAC/CiO,KAAM,cACNC,MAAO,UACPC,MAAO,aACPC,OAAQ,UACRC,QAAS,WACTC,OAAQ,UACRC,UAAW,gBACXC,UAAW,gBACXC,QAAS,gBACTC,QAAS,cACTC,OAAQ,2BACwB3O,IAKrB4O,EAAO,SAAC,GAAD,IAAG5O,EAAH,EAAGA,SAAH,OAClB,uBAAGL,UAAWG,IAAW,MAAOkO,EAAgBhO,Q,oMCIrC6O,EAAqB,CAChCC,UAAW,KACXC,QAAS,KACTC,cAAe,KACfC,gBAAiB,KACjBC,gBAAiB,CACfC,sBAAsB,EACtBC,UAAW,MAEbC,gBAAgB,EAChBC,oBAAoB,EACpBC,uBAAuB,EACvBC,mBAAmB,EACnBjN,aAAc,KAEdkN,wBAAwB,EACxBC,qBAAqB,EACrBC,aAAc,KACdP,UAAW,MAGAQ,EAAe5F,wBAC1B,GAxB0B,IA4Bf6F,EAAc,SAACzE,EAAexI,GAAhB,OAA+BwI,EAAMxI,IAAOiM,GAEvEe,EAAazE,GAAG2E,IAAgB/C,SAAS,SAAC3B,EAAD,OAAU2E,EAAV,EAAUA,MAAOb,EAAjB,EAAiBA,gBAAiBtM,EAAlC,EAAkCA,GAAlC,sBACpCwI,EADoC,eAEtCxI,EAFsC,eAGlCiN,EAAYzE,EAAOxI,GAHe,CAIrCmM,QAASgB,EACTV,gBAAgB,EAChBD,UAAWF,EAAgBE,iBAI/BQ,EAAazE,GAAG6E,KAAuB,SAAC5E,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBAClCwI,EADkC,eAEpCxI,EAFoC,eAGhCiN,EAAYzE,EAAOxI,GAHa,CAInCyM,gBAAgB,SAIpBO,EAAazE,GAAG2E,IAAgB7C,SAAS,SAAC7B,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBACpCwI,EADoC,eAEtCxI,EAFsC,eAGlCiN,EAAYzE,EAAOxI,GAHe,CAIrCyM,gBAAgB,EAChBC,oBAAoB,SAIxBM,EAAazE,GAAG2E,IAAgB9C,SAAS,SAAC5B,EAAD,GAAgD,IAAtCxI,EAAqC,EAArCA,GAAIkM,EAAiC,EAAjCA,UAAWI,EAAsB,EAAtBA,gBAC1De,EAAWJ,EAAYzE,EAAOxI,GACpC,OAAO,eACFwI,EADL,eAEGxI,EAFH,eAGOqN,EAHP,CAIInB,UAAWoB,YAA0B,CAAC,mBAAoBD,EAASnB,UAAWA,GAC9EI,kBACAG,gBAAgB,EAChBC,oBAAoB,EACpBF,UAAWF,EAAgBE,iBAMjCQ,EAAazE,GAAGgF,IAA2BpD,SAAS,SAAC3B,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBAC/CwI,EAD+C,eAEjDxI,EAFiD,eAG7CiN,EAAYzE,EAAOxI,GAH0B,CAIhD6M,wBAAwB,SAI5BG,EAAazE,GAAGgF,IAA2BlD,SAAS,SAAC7B,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBAC/CwI,EAD+C,eAEjDxI,EAFiD,eAG7CiN,EAAYzE,EAAOxI,GAH0B,CAIhD6M,wBAAwB,EACxBC,qBAAqB,SAIzBE,EAAazE,GAAGgF,IAA2BnD,SAAS,SAAC5B,EAAD,OAAUxI,EAAV,EAAUA,GAAI+M,EAAd,EAAcA,aAAd,sBAC/CvE,EAD+C,eAEjDxI,EAFiD,eAG7CiN,EAAYzE,EAAOxI,GAH0B,CAIhD6M,wBAAwB,EACxBC,qBAAqB,EACrBC,sBAIJC,EAAazE,GAAGiF,KAA2B,SAAChF,GAAD,OAAWS,aAAI,SAACoE,GAAD,sBACrDA,EADqD,GAErDI,YAAK,CAAC,yBAA0B,sBAAuB,gBAAiBxB,MACzEzD,MAGJwE,EAAazE,GAAGmF,IAAiBvD,SAAS,SAAC3B,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBACrCwI,EADqC,eAEvCxI,EAFuC,eAGnCiN,EAAYzE,EAAOxI,GAHgB,CAItC4M,mBAAmB,SAIvBI,EAAazE,GAAGmF,IAAiBrD,SAAS,SAAC7B,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBACrCwI,EADqC,eAEvCxI,EAFuC,eAGnCiN,EAAYzE,EAAOxI,GAHgB,CAItC2M,uBAAuB,SAI3BK,EAAazE,GAAGmF,IAAiBtD,SAAS,SAAC5B,EAAD,OAAUxI,EAAV,EAAUA,GAAIoM,EAAd,EAAcA,cAAd,sBACrC5D,EADqC,eAEvCxI,EAFuC,eAGnCiN,EAAYzE,EAAOxI,GAHgB,CAItCoM,gBACAQ,mBAAmB,EACnBD,uBAAuB,SAa3BK,EAAazE,GAAGxI,KAAuB,SAACyI,EAAD,OAAUxI,EAAV,EAAUA,GAAIL,EAAd,EAAcA,aAAd,sBAClC6I,EADkC,eAEpCxI,EAFoC,eAGhCiN,EAAYzE,EAAOxI,GAHa,CAInCL,sBAIJqN,EAAazE,GAAGoF,KAA0B,SAACnF,EAAD,OACxCiB,EADwC,EACxCA,MAAOC,EADiC,EACjCA,OAAQ1J,EADyB,EACzBA,GAAI4N,EADqB,EACrBA,qBADqB,sBAGrCpF,EAHqC,eAIvCxI,EAJuC,eAKnCiN,EAAYzE,EAAOxI,GALgB,CAMtCqM,gBAAiB,CAAE5C,QAAOC,SAAQkE,+BAItCZ,EAAazE,GAAGsF,KAA4B,SAACrF,EAAD,OAAUxI,EAAV,EAAUA,GAAV,sBACvCwI,EADuC,eAEzCxI,EAFyC,eAGrCiN,EAAYzE,EAAOxI,GAHkB,CAIxCqM,gBAAiBJ,EAAmBI,uBAIxCW,EAAazE,GAAGvF,KAAiB,SAACwF,EAAD,GAA4B,IAAlBgC,EAAiB,EAAjBA,IAAKvQ,EAAY,EAAZA,MAE9C,OAAIuQ,IAAQsD,MAA+B,IAAV7T,EACxBgP,YACL8E,YAAM,kBAAmB9B,EAAmBI,iBAC5C7D,GAGGA,KAGTwE,EAAazE,GAAGyF,KAAuB,SAACxF,EAAD,OAAUxI,EAAV,EAAUA,GAAV,OAAmBiO,YAAK,CAACjO,GAAKwI,O,2JCrM/D0F,EAAmB,SAACC,EAAkB3D,EAAa4D,GACvD,IAAMxD,EAAO,eAAWJ,GACxB,GAAI2D,EAAQE,aAAazD,GAAU,CAEjC,IAAM5D,EAAOmH,EAAQG,aAAa1D,GAElC,MAAa,SAAT5D,GAGS,UAATA,IAGS,SAATA,EACK,KAILA,IAAI,WAASA,IACPA,EAGN,gCAAgCuH,KAAKvH,GAChC+D,KAAKC,MAAMhE,GAGbA,GAKT,OAAOoH,GA2gBII,EAAsB,SAACC,GAAD,OAA+BC,aAChE,SAACC,GAAD,MACiD,YAA9CA,EAAqC1R,KA1gBV,SAACkR,EAAkB3D,EAAa4D,GAC9D,IAAMnU,EAAQiU,EAAiBC,EAAS3D,EAAK4D,GAE7C,OAAc,IAAVnU,IAA4B,IAAVA,EACbA,EAGc,kBAAXA,EACI,QAAVA,GAA6B,OAAVA,GAIT,KAAVA,GAA0B,OAAVA,GAA4B,QAAVA,GAA6B,SAAVA,GAIlDmU,EAGc,kBAAXnU,EACO,IAAVA,EAGFmU,EAofDQ,CACAH,EACAE,EAAUnE,IACRmE,EAAUP,cACVF,EAAiBO,EAAME,EAAUnE,IAAKmE,EAAUP,gBAvPX,CAG7CpO,GAAI,CAAEwK,IAAK,WACXqE,KAAM,CAAErE,IAAK,QACbsE,WAAY,CAAEtE,IAAK,eACnBvL,MAAO,CAAEuL,IAAK,SACduE,aAAc,CAAEvE,IAAK,gBAAiB4D,aAAclQ,OAAOU,QAAQoQ,cAAcC,SACjFC,MAAO,CAAE1E,IAAK,QAAS4D,aAAclQ,OAAOU,QAAQoQ,cAAcE,OAClErO,OAAQ,CAAE2J,IAAK,SAAU4D,aAAclQ,OAAOU,QAAQoQ,cAAcnO,QAGpE4I,MAAO,CAAEe,IAAK,SACdd,OAAQ,CAAEc,IAAK,SAAU4D,aAAclQ,OAAOU,QAAQoQ,cAActF,QACpEyF,OAAQ,CAAE3E,IAAK,SAAUvN,KAAM,UAAWmR,cAAc,GACxDgB,eAAgB,CAAE5E,IAAK,mBACvBnP,MAAO,CAAEmP,IAAK,SACd6E,YAAa,CAAE7E,IAAK,gBACpB8E,aAAc,CAAE9E,IAAK,iBACrB+E,WAAY,CAAE/E,IAAK,eACnBgF,OAAQ,CAAEhF,IAAK,UACfiF,YAAa,CAAEjF,IAAK,iBACpBkF,sBAAuB,CAAElF,IAAK,2BAC9BmF,qBAAsB,CAAEnF,IAAK,0BAC7BoF,WAAY,CAAEpF,IAAK,cACnBqF,cAAe,CAAErF,IAAK,kBACtBsF,aAAc,CAAEtF,IAAK,iBACrBuF,QAAS,CAAEvF,IAAK,YAChBwF,QAAS,CAAExF,IAAK,YAChBlC,OAAQ,CAAEkC,IAAK,UACfyF,aAAc,CAAEzF,IAAK,iBACrB0F,cAAe,CAAE1F,IAAK,kBACtB2F,WAAY,CAAE3F,IAAK,cACnB4F,mBAAoB,CAAE5F,IAAK,uBAC3B6F,gBAAiB,CAAE7F,IAAK,qBAExB8F,cAAe,CAAE9F,IAAK,kBACtB+F,MAAO,CAAE/F,IAAK,SACdgG,OAAQ,CAAEhG,IAAK,UACfiG,gBAAiB,CAAEjG,IAAK,oBACxBkG,eAAgB,CAAElG,IAAK,oBACvBmG,OAAQ,CAAEnG,IAAK,UACfjL,SAAU,CAAEiL,IAAK,MACjBoG,kBAAmB,CAAEpG,IAAK,uBAC1BqG,aAAc,CAAErG,IAAK,iBACrBhG,UAAW,CAAEgG,IAAK,cAClB/F,UAAW,CAAE+F,IAAK,cAIlBsG,YAAa,CAAEtG,IAAK,gBACpBuG,kBAAmB,CAAEvG,IAAK,sBAC1BwG,aAAc,CAAExG,IAAK,iBACrByG,cAAe,CAAEzG,IAAK,iBAAkBvN,KAAM,WAC9CiU,cAAe,CAAE1G,IAAK,kBACtB2G,gBAAiB,CAAE3G,IAAK,oBACxB4G,yBAA0B,CAAE5G,IAAK,4BAA6BvN,KAAM,WACpEoU,kBAAmB,CAAE7G,IAAK,qBAAsBvN,KAAM,WACtDqU,aAAc,CAAE9G,IAAK,iBACrB+G,mBAAoB,CAAE/G,IAAK,uBAC3BgH,cAAe,CAAEhH,IAAK,kBACtBiH,iBAAkB,CAAEjH,IAAK,qBACzBkH,0BAA2B,CAAElH,IAAK,8BAA+BvN,KAAM,WACvE0U,mBAAoB,CAAEnH,IAAK,sBAAuBvN,KAAM,WACxD2U,sBAAuB,CAAEpH,IAAK,+BAAgCvN,KAAM,WACpE4U,6BAA8B,CAAErH,IAAK,gCAAiCvN,KAAM,WAC5E6U,6BAA8B,CAAEtH,IAAK,gCAAiCvN,KAAM,WAC5E8U,iBAAkB,CAAEvH,IAAK,qBACzBwH,iBAAkB,CAAExH,IAAK,qBACzByH,mBAAoB,CAAEzH,IAAK,uBAC3B0H,mBAAoB,CAAE1H,IAAK,uBAC3B2H,qBAAsB,CAAE3H,IAAK,yBAC7B4H,kBAAmB,CAAE5H,IAAK,qBAAsBvN,KAAM,WACtDoV,yBAA0B,CAAE7H,IAAK,4BAA6BvN,KAAM,WACpEqV,8BAA+B,CAAE9H,IAAK,iCAAkCvN,KAAM,WAC9EsV,iBAAkB,CAAE/H,IAAK,qBACzBgI,gBAAiB,CAAEhI,IAAK,mBAAoBvN,KAAM,WAClDwV,yBAA0B,CAAEjI,IAAK,6BACjCkI,yBAA0B,CAAElI,IAAK,6BAEjCmI,iBAAkB,CAAEnI,IAAK,oBAAqBvN,KAAM,WACpD2V,iBAAkB,CAAEpI,IAAK,qBAEzBqI,oBAAqB,CAAErI,IAAK,uBAAwBvN,KAAM,WAC1D6V,2BAA4B,CAAEtI,IAAK,+BACnCuI,yBAA0B,CAAEvI,IAAK,6BACjCwI,qBAAsB,CAAExI,IAAK,yBAC7ByI,qBAAsB,CAAEzI,IAAK,yBAC7B0I,gBAAiB,CAAE1I,IAAK,mBAAoBvN,KAAM,WAClDkW,uBAAwB,CAAE3I,IAAK,2BAC/B4I,qBAAsB,CAAE5I,IAAK,yBAC7B6I,qBAAsB,CAAE7I,IAAK,yBAC7B8I,sBAAuB,CAAE9I,IAAK,0BAC9B+I,eAAgB,CAAE/I,IAAK,mBACvBgJ,0BAA2B,CAAEhJ,IAAK,8BAElCiJ,0BAA2B,CAAEjJ,IAAK,+BAClCkJ,2BAA4B,CAAElJ,IAAK,+BACnCmJ,sCAAuC,CAAEnJ,IAAK,0CAE9CoJ,uBAAwB,CAAEpJ,IAAK,2BAC/BqJ,uBAAwB,CAAErJ,IAAK,2BAC/BsJ,iBAAkB,CAAEtJ,IAAK,oBAAqBvN,KAAM,WACpD8W,uBAAwB,CAAEvJ,IAAK,2BAC/BwJ,uBAAwB,CAAExJ,IAAK,2BAC/ByJ,iBAAkB,CAAEzJ,IAAK,oBAAqBvN,KAAM,WACpDiX,gBAAiB,CAAE1J,IAAK,mBAAoBvN,KAAM,WAElDkX,qBAAsB,CAAE3J,IAAK,0BAC7B4J,qBAAsB,CAAE5J,IAAK,0BAC7B6J,qBAAsB,CAAE7J,IAAK,yBAC7B8J,uBAAwB,CAAE9J,IAAK,2BAC/B+J,uBAAwB,CAAE/J,IAAK,2BAC/BgK,wBAAyB,CAAEhK,IAAK,4BAChCiK,oBAAqB,CAAEjK,IAAK,wBAC5BkK,sBAAuB,CAAElK,IAAK,0BAC9BmK,uBAAwB,CAAEnK,IAAK,2BAC/BoK,iBAAkB,CAAEpK,IAAK,qBACzBqK,mBAAoB,CAAErK,IAAK,uBAC3BsK,oBAAqB,CAAEtK,IAAK,wBAC5BuK,mBAAoB,CAAEvK,IAAK,uBAE3BwK,cAAe,CAAExK,IAAK,mBACtByK,cAAe,CAAEzK,IAAK,mBACtB0K,kBAAmB,CAAE1K,IAAK,uBAC1B2K,iBAAkB,CAAE3K,IAAK,sBACzB4K,gBAAiB,CAAE5K,IAAK,qBACxB6K,eAAgB,CAAE7K,IAAK,oBACvB8K,sBAAuB,CAAE9K,IAAK,2BAE9B+K,cAAe,CAAE/K,IAAK,kBACtBgL,mBAAoB,CAAEhL,IAAK,uBAC3BiL,mBAAoB,CAAEjL,IAAK,uBAC3BkL,uBAAwB,CAAElL,IAAK,2BAC/BmL,uBAAwB,CAAEnL,IAAK,2BAC/BoL,mBAAoB,CAAEpL,IAAK,uBAC3BqL,0BAA2B,CAAErL,IAAK,8BAClCsL,yBAA0B,CAAEtL,IAAK,6BACjCuL,4BAA6B,CAAEvL,IAAK,gCACpCwL,4BAA6B,CAAExL,IAAK,gCACpCyL,+BAAgC,CAAEzL,IAAK,mCACvC0L,mBAAoB,CAAE1L,IAAK,uBAC3B2L,sBAAuB,CAAE3L,IAAK,0BAC9B4L,sBAAuB,CAAE5L,IAAK,0BAC9B6L,oBAAqB,CAAE7L,IAAK,wBAC5B8L,oBAAqB,CAAE9L,IAAK,wBAC5B+L,4BAA6B,CAAE/L,IAAK,gCACpCgM,4BAA6B,CAAEhM,IAAK,gCACpCiM,mBAAoB,CAAEjM,IAAK,uBAC3BkM,wBAAyB,CAAElM,IAAK,4BAChCmM,wBAAyB,CAAEnM,IAAK,4BAChCoM,yBAA0B,CAAEpM,IAAK,6BACjCqM,iBAAkB,CAAErM,IAAK,qBACzBsM,wBAAyB,CAAEtM,IAAK,4BAChCuM,wBAAyB,CAAEvM,IAAK,4BAChCwM,wBAAyB,CAAExM,IAAK,4BAChCyM,4BAA6B,CAAEzM,IAAK,+BAAgCvN,KAAM,WAC1Eia,yBAA0B,CAAE1M,IAAK,4BAA6BvN,KAAM,WACpEka,iBAAkB,CAAE3M,IAAK,sBACzB4M,0BAA2B,CAAE5M,IAAK,6BAA8BvN,KAAM,WACtEoa,0BAA2B,CAAE7M,IAAK,8BAClC8M,wBAAyB,CAAE9M,IAAK,4BAChC+M,0BAA2B,CAAE/M,IAAK,8BAClCgN,0BAA2B,CAAEhN,IAAK,8BAClCiN,uBAAwB,CAAEjN,IAAK,2BAC/BkN,uBAAwB,CAAElN,IAAK,2BAC/BmN,uBAAwB,CAAEnN,IAAK,2BAC/BoN,yBAA0B,CAAEpN,IAAK,4BAA6BvN,KAAM,WACpE4a,6BAA8B,CAAErN,IAAK,iCACrCsN,gCAAiC,CAAEtN,IAAK,oCACxCuN,mCAAoC,CAAEvN,IAAK,uCAC3CwN,yBAA0B,CAAExN,IAAK,6BACjCyN,6BAA8B,CAAEzN,IAAK,iCACrC0N,2BAA4B,CAAE1N,IAAK,+BACnC2N,+BAAgC,CAAE3N,IAAK,mCACvC4N,uBAAwB,CAAE5N,IAAK,0BAA2BvN,KAAM,WAEhEob,WAAY,CAAE7N,IAAK,eACnB8N,cAAe,CAAE9N,IAAK,kBACtB+N,YAAa,CAAE/N,IAAK,gBACpBgO,gBAAiB,CAAEhO,IAAK,qBACxBiO,mBAAoB,CAAEjO,IAAK,wBAC3BkO,qBAAsB,CAAElO,IAAK,0BAC7BmO,eAAgB,CAAEnO,IAAK,oBACvBoO,mBAAoB,CAAEpO,IAAK,wBAC3BqO,sBAAuB,CAAErO,IAAK,2BAC9BsO,wBAAyB,CAAEtO,IAAK,6BAChCuO,kBAAmB,CAAEvO,IAAK,uBAC1BwO,iBAAkB,CAAExO,IAAK,sBACzByO,oBAAqB,CAAEzO,IAAK,yBAC5B0O,sBAAuB,CAAE1O,IAAK,2BAC9B2O,gBAAiB,CAAE3O,IAAK,qBACxB4O,oBAAqB,CAAE5O,IAAK,yBAC5B6O,oBAAqB,CAAE7O,IAAK,wBAC5B8O,oBAAqB,CAAE9O,IAAK,wBAC5B+O,eAAgB,CAAE/O,IAAK,mBACvBgP,iCAAkC,CAAEhP,IAAK,qCAAsCvN,KAAM,WACrFwc,+BAAgC,CAAEjP,IAAK,oCACvCkP,mCAAoC,CAAElP,IAAK,wCAC3CmP,+BAAgC,CAAEnP,IAAK,oCACvCoP,+BAAgC,CAAEpP,IAAK,oCACvCqP,uBAAwB,CAAErP,IAAK,6BAC/BsP,2CAA4C,CAC1CtP,IAAK,iDAEPuP,4BAA6B,CAAEvP,IAAK,kCACpCwP,uBAAwB,CAAExP,IAAK,6BAC/ByP,2CAA4C,CAC1CzP,IAAK,iDAEP0P,0BAA2B,CAAE1P,IAAK,gCAClC2P,yBAA0B,CAAE3P,IAAK,+BACjC4P,6BAA8B,CAAE5P,IAAK,mCACrC6P,+BAAgC,CAAE7P,IAAK,qCACvC8P,2BAA4B,CAAE9P,IAAK,iCACnC+P,0BAA2B,CAAE/P,IAAK,gCAClCgQ,8BAA+B,CAAEhQ,IAAK,oCACtCiQ,gCAAiC,CAAEjQ,IAAK,sCACxCkQ,sBAAuB,CAAElQ,IAAK,4BAC9BmQ,qBAAsB,CAAEnQ,IAAK,2BAC7BoQ,yBAA0B,CAAEpQ,IAAK,+BACjCqQ,2BAA4B,CAAErQ,IAAK,iCACnCsQ,wBAAyB,CAAEtQ,IAAK,6BAA8BvN,KAAM,WACpE8d,sBAAuB,CAAEvQ,IAAK,4BAC9BwQ,sBAAuB,CAAExQ,IAAK,4BAC9ByQ,6BAA8B,CAAEzQ,IAAK,kCAAmCvN,KAAM,WAC9Eie,oCAAqC,CAAE1Q,IAAK,0CAC5C2Q,6BAA8B,CAAE3Q,IAAK,mCACrC4Q,yBAA0B,CAAE5Q,IAAK,8BAA+BvN,KAAM,WACtEoe,0BAA2B,CAAE7Q,IAAK,gCAClC8Q,uBAAwB,CAAE9Q,IAAK,6BAC/B+Q,eAAgB,CAAE/Q,IAAK,mBAEvBgR,iBAAkB,CAAEhR,IAAK,qBAEzBiR,sBAAuB,CAAEjR,IAAK,2BAC9BkR,eAAgB,CAAElR,IAAK,mBACvBmR,eAAgB,CAAEnR,IAAK,sBAyBZoR,EAAgB,SAACnN,GAC5B,IAAMoN,EAAmBrN,EAAoBC,GACvCqN,EAZ4B,SAACrN,GACnC,IAAMsN,EAAuBC,MAAMC,KAAKxN,EAAKyN,YAC1CnT,QAAO,SAAC4F,GAAD,OAAeA,EAAUwN,KAAKC,WAAW,yBAChDnT,KAAI,SAAC0F,GAAD,sBACFA,EAAUwN,KAAK9X,QAAQ,QAAS,IAAMsK,EAAU1U,UAE/CoiB,EAASvT,YAASiT,GACxB,OAAOO,YAAQD,QAAU1gB,EAAY0gB,EAKjBE,CAAqB9N,GACzC,OAAO,eAAKoN,EAAZ,CAA8BC,iBAGnBU,EAAyC,CACpDpN,eAAgBqN,IAAqB,QAAU,W,gCC7kBjD,gXA0DavP,GA7CwBtL,uBAAY,UAC5CC,IAD4C,qBAQRD,uBAAY,UAChDC,IADgD,yBAqCtBa,YAAmB,UAG7Cb,IAH6C,sBAOrCuL,EAAwBxL,uBAAY,UAC5CC,IAD4C,2BAQpC0L,EAA6B7K,YAAmB,UAGxDb,IAHwD,gCAKhD2L,EAA4B9K,YAAoB,GAAD,OACvDb,IADuD,+BAW/C6L,EAAmBhL,YAAmB,UAG9Cb,IAH8C,sBAUtC9B,EAAwB6B,uBAAY,UAC5CC,IAD4C,qBAUpC8L,EAA2B/L,uBAAY,UAC/CC,IAD+C,wBAIvCgM,EAA6BjM,uBAAY,UACjDC,IADiD,gCAIzCmM,EAAwBpM,uBAAY,UAC5CC,IAD4C,2BAcpC0I,EAAkB7H,YAAmB,UAG7Cb,IAH6C,sB,iCCnIlD,kCAAO,IAAMiC,EAAoB,kB,8hDCkD7B4Y,iBAAkBC,iBACTC,sBAAwB,SAAC,GAG/B,IAFeC,EAEhB,EAFFH,iBACkBI,EAChB,EADFH,iBAEAD,iBAAmBG,EACnBF,iBAAmBG,GAInBC,mBAAoB,EAGpBC,iBAAkB,EAGlBC,mBAAgBthB,EAChBuhB,yBAAsBvhB,EACtBwhB,qBAAkBxhB,EAElByhB,WAEJ,SAASC,oBAAoBjZ,GACzB,OAAOA,EAAE1H,WACJ2H,QAAQ,KAAM,SACdA,QAAQ,KAAM,QACdA,QAAQ,KAAM,QACdA,QAAQ,KAAM,UACdA,QAAQ,KAAM,SACdA,QAAQ,KAAM,SACdA,QAAQ,MAAO,SACfA,QAAQ,MAAO,SACfA,QAAQ,MAAO,SAGxB,IAAMiZ,UAAY,SAAC9S,EAAKvQ,GACpBmjB,WAAWvd,SAASmD,+DAAgB,CAChCwH,MACAvQ,YAKFsjB,gBAAkB,SAACC,GACrB,IAAMlY,EAAWmY,iEAAeL,WAAWM,YAC3C,cAAOpY,QAAP,IAAOA,OAAP,EAAOA,EAAWkY,IAGtB,SAASG,UAAUvZ,GACf,MAAmB,kBAAPA,IAAoBA,EAAEgY,WAAW,YAAchY,EAAEgY,WAAW,aAC7DhY,EACFC,QAAQ,KAAM,OACdA,QAAQ,KAAM,OACdA,QAAQ,MAAO,OACfA,QAAQ,MAAO,QAGxB7H,QAAQC,IAAI,yBACZD,QAAQC,IAAI2H,GACL,oCAMXlG,OAAO0f,WAAa,CAChBC,KAAM,IACNC,MAAO,KACPC,KAAM,KACNC,KAAM,OACNC,eAAe,EACfC,cAAc,EACdC,OAAQ,KACR1U,MAAK,sBAAE2U,yDAAa,gBAAf,gCAA2B,EAChC1U,OAAM,uBAAE0U,yDAAa,iBAAf,kCAA4B,EAClCC,WAAW,EACXC,gBAAiB,EACjBC,iBAAkB,EAClBC,WAAW,EACXC,aAAa,EACbtR,MAAO,KACPuR,OAAQ,KACR9X,MAAO,KACP+X,IAAK,KAELC,YAAa,SAAUC,GAEnB,MAAiC,qBAAnB5jB,KAAK4jB,IAGvBC,QAAS,SAAUC,GACf,IAAIlB,EAAOD,WAAWC,KAkCtB,OAhCAA,GAAQ,UAAYD,WAAWnU,MAAM/M,WACnC,WAAakhB,WAAWlU,OAAOhN,YAEJ,IAAzBkhB,WAAWS,YACXR,GAAQ,oBAAsBD,WAAWU,gBAAgB5hB,WACvD,qBAAuBkhB,WAAWW,iBAAiB7hB,YAGhC,OAArBkhB,WAAWE,QACXD,GAAQ,UAAYD,WAAWE,MAAMphB,YAGjB,OAApBkhB,WAAWG,OACXF,GAAQ,SAAWD,WAAWG,KAAKrhB,aAGN,IAA7BkhB,WAAWK,gBACXJ,GAAQ,wBAGM,IAAdkB,GAA4C,OAAtBnB,WAAWO,SACjCN,GAAQ,WAAaD,WAAWO,OAAOzhB,YAGnB,SAApBkhB,WAAWI,OACXH,GAAQ,SAAWD,WAAWI,MAGX,OAAnBJ,WAAWe,MACXd,GAAQ,QAAUD,WAAWe,KAG1Bd,GAGXmB,UAAW,WAGP,IAFA,IAAIC,EAAY5d,SAAS6d,SAASrB,KAAK7V,MAAM,KACzCmX,EAAMF,EAAUvX,OACbyX,KACH,GAAY,IAARA,EAAW,CACX,IAAIC,EAAIH,EAAUE,GAAKnX,MAAM,KACzB4V,WAAWgB,YAAYQ,EAAE,KAAuB,qBAATA,EAAE,KACzCxB,WAAWwB,EAAE,IAAMC,mBAAmBD,EAAE,UAGxCH,EAAUE,GAAKzX,OAAS,IACxBkW,WAAWC,KAAOoB,EAAUE,IAKxC,IAAIG,EAAW,CAAC,YAAa,cAAe,iBAE5C,IADAH,EAAMG,EAAS5X,OACRyX,KAC+B,SAA9BvB,WAAW0B,EAASH,MAAkD,IAA9BvB,WAAW0B,EAASH,KAAgD,MAA9BvB,WAAW0B,EAASH,KAA+C,IAA9BvB,WAAW0B,EAASH,IACvIvB,WAAW0B,EAASH,KAAQ,EAE5BvB,WAAW0B,EAASH,KAAQ,EAIpC,IAAII,EAAU,CAAC,QAAS,SAAU,kBAAmB,oBAErD,IADAJ,EAAMI,EAAQ7X,OACPyX,KACH,GAAwC,kBAA7BvB,WAAW2B,EAAQJ,IAC1B,IACIvB,WAAW2B,EAAQJ,IAAQK,SAAS5B,WAAW2B,EAAQJ,KAE3D,MAAOhe,GACH3E,QAAQC,IAAI,sCAAwC8iB,EAAQJ,IAC5DvB,WAAW2B,EAAQJ,IAAQ,EA0BvC,OArB0B,OAAtBvB,WAAWO,QAAyC,KAAtBP,WAAWO,QACzCjB,oBAAsB7b,SAAS6d,SAASO,OAAO/iB,WAAa2E,SAAS6d,SAASQ,SAAShjB,WACvFugB,cAAgBW,WAAWO,OAC3BhB,iBAAkB,GAElBS,WAAWO,OAAS,KAGpBP,WAAWlU,OAAS,GAAKkU,WAAWnU,MAAQ,GAC5CmU,WAAWM,cAAe,EAC1BN,WAAWY,WAAY,GAEvBZ,WAAWM,cAAe,EAG1BN,WAAWW,iBAAmB,GAAKX,WAAWU,gBAAkB,EAChEV,WAAWS,WAAY,EAEvBT,WAAWS,WAAY,EAGnBT,WAAWI,MACf,IAAK,QAMD,GALAJ,WAAWE,MAAQ,QACnBF,WAAW+B,SAAU,EACrB/B,WAAWG,MAAO,EAClBH,WAAWa,aAAc,GAEO,IAA5Bb,WAAWM,aAAwB,CACnCN,WAAWM,cAAe,EAC1BN,WAAWlU,OAASkW,KAAKC,MACzB,IACMjb,EAAegZ,WAAWnU,MAA2B,IAAnBmU,WAAWnU,OAD5B,IAEvBmU,WAAWnU,MAAQmU,WAAWlU,OAAS9E,EAG3CmY,mBAAoB,EACpBC,iBAAkB,EAClB,MAEJ,IAAK,OACL,QACIY,WAAWI,KAAO,SAO9B8B,WAAY,WACRC,QAAQC,aAAa,KAAM,GAAIpC,WAAWkB,SAAQ,KAGtDmB,0BAA2B,SAAUC,EAAQzW,EAAOC,GACpB,OAAxByW,sBACAvC,WAAWM,aAAegC,EAC1BtC,WAAWnU,MAAQA,EACnBmU,WAAWlU,OAASA,IAI5B0W,eAAgB,SAAUzB,GACjBA,IACLf,WAAWe,IAAMA,EACjBf,WAAWkC,eAGfO,yBAA0B,SAAUH,EAAQzW,EAAOC,GAiB/C,IAhBe,IAAXwW,IAA8B,OAAVzW,GAA6B,OAAXC,GAAmBD,GAAS,GAAKC,GAAU,GAAKD,GAASC,KAC/FwW,GAAS,EACTzW,EAAQ,EACRC,EAAS,GAGsB,OAA/BxL,OAAOiiB,oBACPvC,WAAWS,UAAY6B,EAEvBtC,WAAWS,WAAY,EAG3BT,WAAWU,gBAAkBhnB,KAAKgpB,MAAM7W,GACxCmU,WAAWW,iBAAmBjnB,KAAKgpB,MAAM5W,GACzCkU,WAAWkC,cAEI,IAAXI,GAAmBzW,EAAQ,GAAKC,EAAS,GAAKD,EAAQC,EAAQ,CAC9D,IAAI6W,EAAK7D,iBAAiBjT,GACtB+W,EAAK9D,iBAAiBhT,GACtB6W,IAAOC,IACPA,EAAK,IAETnf,SAASof,eAAe,4BAA4BC,UAClD,2MAEWH,EAAK,UAAY5D,iBAAiBlT,GAAS,sBAC3C+W,EAAK,UAAY7D,iBAAiBjT,GAAU,4BACpCiX,0EAAcrpB,KAAKgpB,OAAO5W,EAASD,GAAS,MAAS,6OAI1EtL,EAAE,qBAAqBQ,OACvBR,EAAE,qBAAqB+Q,MAAM,OAC7B/Q,EAAE,sBAAsByiB,QAAQ,CAC5BriB,MAAM,EACNG,MAAO,CAAEC,KAAM,IAAKI,KAAM,GAC1BV,UAAW,cAGfF,EAAE,qBAAqBY,OACvBZ,EAAE,qBAAqB+Q,MAAM,SAIrC2R,eAAgB,WACZzD,WAAWvd,SAAS0C,mEAGxBue,cAAe,WACX1D,WAAWvd,SAASyC,oEAI5Bsb,WAAWoB,YAKX,IAAI+B,oBAAsB,EAE1B,SAASC,mBACL,IAA4B,IAAxBD,mBACA,OAAOA,mBAGX,GAAuB,qBAAZE,SAAmD,kBAAjBjgB,aAA2B,CAEpE,IACIA,aAAaC,QAFN,eAGPD,aAAakgB,WAHN,QAIPH,oBAAqB,EAEzB,MAAO5f,GACH3E,QAAQC,IAAI0E,GACZ4f,oBAAqB,QAGzBA,oBAAqB,EAGzB,OAAOA,mBAGX,SAASI,iBAAiBhF,GACtB,IAAIiF,EAAM,KAEV,KAC+B,IAAvBJ,mBACAI,EAAMpgB,aAAaqgB,QAAQlF,GAE3B3f,QAAQC,IAAI,iCAGpB,MAAO6kB,GAEH,OADA9kB,QAAQC,IAAI6kB,GACL,KAGX,MAAmB,qBAARF,GAA+B,OAARA,EACvB,KAKJA,EAGX,SAASG,iBAAiBpF,EAAMliB,GAE5B,IACI,IAA2B,IAAvB+mB,mBAEA,OADAhgB,aAAaC,QAAQkb,EAAMliB,EAAMyC,aAC1B,EAGf,MAAO4kB,GACH9kB,QAAQC,IAAI6kB,GAGhB,OAAO,EAGX,SAASE,SAASC,GACd,GAAwB,UAApB7D,WAAWI,KACX,MAAO,QAGX,IAAIoD,EAAMD,iBAAiB,gBAC3B,MAAmB,qBAARC,GAA+B,OAARA,GAAwB,cAARA,EACvCK,EAEAL,EAIf,SAASM,SAAS5D,GACd,MAAwB,UAApBF,WAAWI,OAIXF,IAAU6D,cAGPJ,iBAAiB,eAAgBzD,IAG5C5f,OAAOyjB,aAAeH,SAAS,SAE/B5iB,QAAQgjB,cACR,IAAIC,iBAAkB,EAqBtB,SAASC,oBAAoBzlB,GAIzB,IAHA,IAC0B6M,EAAG6Y,EADzBC,EAAK,GACL/qB,EAAI,EAAGgrB,GAAK,EAAGC,EAAI,EAEhBhZ,GAAK6Y,EAAI1lB,EAAE8lB,OAAOlrB,MAAMmrB,WAAW,IAAI,CAC1C,IAAIC,EAAKnZ,GAAK,IAAMA,GAAK,GACrBmZ,IAAMH,IACNF,IAAKC,GAAK,GACVC,EAAIG,GAERL,EAAGC,IAAMF,EAGb,OAAOC,EAGX,SAASM,mBAAmBC,EAAGC,GAI3B,IAHA,IAAIC,EAAKX,oBAAoBS,EAAEG,eAC3BC,EAAKb,oBAAoBU,EAAEE,eAEtBzrB,EAAI,EAAGwrB,EAAGxrB,IAAM0rB,EAAG1rB,GAAIA,IAC5B,GAAIwrB,EAAGxrB,KAAO0rB,EAAG1rB,GAAI,CACjB,IAAI2rB,EAAIC,OAAOJ,EAAGxrB,IAAK6rB,EAAID,OAAOF,EAAG1rB,IACrC,OAAI2rB,EAAElmB,aAAe+lB,EAAGxrB,IAAM6rB,EAAEpmB,aAAeimB,EAAG1rB,GACvC2rB,EAAIE,EAEHL,EAAGxrB,GAAK0rB,EAAG1rB,GAAM,GAAK,EAK1C,OAAOwrB,EAAG/a,OAASib,EAAGjb,OAM1B,SAASqb,iBAAiB/b,EAAMgc,GAC5B,IAAIC,EAAO,IAAIC,KAAK,CAAClc,GAAO,CACxB/J,KAAM,6BAGNkmB,EAAMC,IAAIC,gBAAgBJ,GAC1BK,EAAOjiB,SAASkiB,cAAc,KAClCD,EAAKE,aAAa,OAAQL,GAC1BG,EAAKE,aAAa,WAAYR,GAE9B,IAAIS,EAAKpiB,SAASof,eAAe,uBACjCgD,EAAG/C,UAAY,GACf+C,EAAGC,YAAYJ,GAEfK,YAAW,WACPF,EAAGG,YAAYN,GACfF,IAAIS,gBAAgBV,KACrB,IAEHG,EAAKQ,QAGT,SAASC,mBAAmB/c,EAAMgc,GAC9BD,iBAAiBhY,KAAKiZ,UAAUhd,GAAOgc,GAG3C,SAASiB,WAAWd,EAAKpE,GAerB,MAdmB,qBAARoE,IAGPA,EAAM,KAGgB,IAAtBA,EAAIe,QAAQ,OACZf,EAAMA,EAAIgB,UAAU,EAAGhB,EAAIe,QAAQ,OAOhCf,EAJIvF,WAAWkB,QAAQC,GAOlC,SAASqF,cAAcjB,GACnB9hB,SAAS6d,SAAWvB,UAAUsG,WAAWd,GAAK,IAI9CjE,SAASmF,SAzGY,OAArBzG,WAAWE,OACX4D,SAAS9D,WAAWE,OACpB6D,aAAe/D,WAAWE,MAC1B5f,OAAOU,QAAQgjB,eAEfhE,WAAWE,MAAQ6D,aAGC,OAApB/D,WAAWG,MACXwD,iBAAiB,oBAAqB3D,WAAWG,MACjD8D,gBAAkBjE,WAAWG,MAE7BH,WAAWG,KAAOoD,iBAAiB,qBAgGvCjjB,OAAOomB,uBAAyB,SAACnB,GAE7B,OADA9hB,SAAS6d,SAAWvB,UAAUwF,EAAMvF,WAAWkB,YACxC,GAGX,IAAIyF,4BAA8B,EAC9BC,uBAAwB,EACxBC,gBAAiB,EAErB,SAASC,sBAAsB1kB,EAAI2kB,EAAMxB,GACrC,IAAIyB,EAAU,EACVtD,EAAQ,SAERjgB,SAAS6d,SAASxiB,WAAW0f,WAAW,YAAc+G,EAAIzmB,WAAW0f,WAAW,YAIhFwI,EAAU,IACHvjB,SAAS6d,SAASxiB,WAAW0f,WAAW,aAAe+G,EAAIzmB,WAAW0f,WAAW,aACxFkF,EAAQ,eAGZ,IAAIuD,EAAWZ,WAAWd,GAE1BQ,YAAW,WACPtiB,SAASof,eAAe,kBAAkBC,WAAa,gDAAkD/C,UAAUkH,GAAY,qBAAuBxH,oBAAoB8F,GAAO,sDAAwDwB,EAAO,IAAM3kB,EAAK,wCAE3PpB,QAAQkmB,cAAc3B,GAAK,SAAUnc,GACjC,GAAoB,qBAATA,GAAiC,OAATA,GAA8C,kBAAtBA,EAAK+d,cAA6B/d,EAAK+d,eAAiBJ,GAI/G,GAFAtjB,SAASof,eAAekE,EAAO,IAAM3kB,EAAK,WAAW0gB,UAAY,MAE5D+D,eAGD,GAFAA,gBAAiB,EAEbD,sBAAuB,CACvBtmB,OAAO8mB,KAAKrH,UAAUkH,GAAW,UACjCL,uBAAwB,EACxB,IAAMne,EAAmBkX,gBAAgB,oBACzClc,SAASof,eAAe,sBAAsBC,UAAY,4BAA8Bra,EAAiBse,GAAMxI,KAAO,iBAAmBwB,UAAUkH,GAAY,KAAOxH,oBAAoB8F,GAAO,4DAEjM9hB,SAASof,eAAe,sBAAsBC,WAAa,kCAAoCrD,oBAAoB8F,GAAO,WAC1H9hB,SAAS6d,SAAWvB,UAAUkH,GAC9B1mB,EAAE,oBAAoB8mB,MAAM,YAIhB,qBAATje,GAAiC,OAATA,GAA8C,kBAAtBA,EAAK+d,cAA6B/d,EAAK+d,eAAiBJ,IAC/GrD,EAAQ,iBAGZjgB,SAASof,eAAekE,EAAO,IAAM3kB,EAAK,WAAW0gB,UAAYY,IACjEiD,6BACmC,IAC/BC,uBAAwB,EACxBnjB,SAASof,eAAe,sBAAsBC,UAAY,wEAIjE,GAAL1gB,EAAW4kB,GAGnB1mB,OAAOgnB,uBAAyB,SAAgCP,GAG5DF,gBAAiB,EACjB,IAAIU,EAAU,GACR9e,EAAmBkX,gBAAgB,oBACrC4B,EAAM9Y,EAAiBse,GAAMS,cAAc1d,OAC3C2d,EAAQ,EAQZ,IANAhkB,SAASof,eAAe,sBAAsBC,UAAY,GAC1Drf,SAASof,eAAe,kBAAkBC,UAAY,GACtDrf,SAASof,eAAe,kBAAkBC,UAAYra,EAAiBse,GAAMxI,KAC7Ehe,EAAE,oBAAoB8mB,MAAM,QAE5BV,4BAA8BpF,EACvBA,KAAO,CACV,IAAIgE,EAAM9c,EAAiBse,GAAMS,cAAcjG,GAC/CgG,EAAQhC,IAAO,EACfuB,sBAAsBW,IAASV,EAAMxB,GAwBzC,OAnBAQ,YAAW,YACgB,IAAnBc,iBACApjB,SAASof,eAAe,sBAAsBC,UAAY,oDAC1D9hB,QAAQ0mB,eAAeX,EAAMpH,iBAAiB,SAAUvW,GAGpD,IADAmY,EAAMnY,EAAKue,KAAK7d,OACTyX,KAAO,CACV,IAAIgE,EAAMnc,EAAKue,KAAKpG,GAAK,GAEG,qBAAjBgG,EAAQhC,KACfoB,8BACAY,EAAQhC,IAAO,EACfuB,sBAAsBW,IAASV,EAAMxB,WAKtD,MAEI,GAGXjlB,OAAOsnB,2BAA6B,WAChCnkB,SAASof,eAAe,4BAA4BxmB,MAAQsjB,gBAAgB,cAC5Elc,SAASof,eAAe,qBAAqBC,UAAYnD,gBAAgB,kBACzElc,SAASof,eAAe,0BAA0BC,UAAY,GAC9DviB,EAAE,wBAAwB8mB,MAAM,SAGpC/mB,OAAOunB,wBAA0B,WAE7B,IAAMC,EAAgBrkB,SAASof,eAAe,4BAA4BxmB,MAEpD,KAAlByrB,GAAiD,KAAzBA,EAAche,OAEtCvJ,EAAEwnB,KAAK,CACHxC,IAAK5F,gBAAgB,kBAAoB,0CACrCA,gBAAgB,eAAiB,SACjCqI,mBAAmBrI,gBAAgB,aAAe,QAClDqI,mBAAmBC,wDAAiB,OAASH,EACjDI,OAAO,EACPC,OAAO,EACPC,QAAS,CACL,gBAAiB,qBACjB,OAAU,YAEdC,UAAW,CAACC,iBAAiB,KAEhCC,MAAK,SAAUnf,GAGe,kBAF3BA,EAAOpI,QAAQwnB,IAAIC,YAAY,iCAAkCrf,IAEjDkZ,QAAuC,OAAhBlZ,EAAKkZ,SAExC1jB,QAAQoN,KAAK,0DAA2D5C,GACxEA,EAAO,MAGX7I,EAAE,wBAAwB8mB,MAAM,WAEnCqB,MAAK,WAEF9pB,QAAQoN,KAAK,kCACbvI,SAASof,eAAe,0BAA0BC,UAAY,uDAGlErf,SAASof,eAAe,0BAA0BC,UAAY,iDAItE,IAAI6F,mBAAqB,KACrBC,kBAAoB,KAExBtoB,OAAOuoB,2BAA6B,SAAC9B,EAAMxI,EAAMgH,GAC7CoD,mBAAqB5B,EACrB6B,kBAAoBrD,EAEpB9hB,SAASof,eAAe,4BAA4BC,UAAYvE,EAChE9a,SAASof,eAAe,6BAA6BC,UAAYvE,EACjE9a,SAASof,eAAe,2BAA2BC,UAAYyC,EAC/D9hB,SAASof,eAAe,0BAA0BC,UAAY,GAE9DviB,EAAE,wBAAwB8mB,MAAM,SAGpC/mB,OAAOwoB,wBAA0B,WAC7B,IAAMC,EAAatlB,SAASof,eAAe,0BAEvC+F,mBACA5nB,QAAQgoB,eAAerJ,gBAAiBsI,uDAAeW,mBAAmB,SAAUK,GACjE,OAAXA,GACAL,kBAAoB,KACpBroB,EAAE,wBAAwB8mB,MAAM,QAChC7H,WAAWvd,SAAS0D,mEAEpBojB,EAAWjG,UAAY,sEAMvC,IAAI7hB,QAAU,CACVioB,MAAO,GACPC,cAAe,GACf/f,KAAM,KACNrB,SAAU,iBACVqhB,QAAS,UACTC,gBAAiB,UACjBC,MAAO,GAEPC,SAAU,EACVC,aAAc,EAEdC,aAAc,EAEdC,aAAc,KAGlB,SAASD,aAAaE,GAGlB,OAA6B,IAAzB1oB,QAAQwoB,aACD,EAKAxoB,QAAQwoB,aAIvB,SAASG,aAAajF,EAAGC,GACrB,OAAID,EAAEkF,SAAWjF,EAAEiF,UACP,EAERlF,EAAEkF,SAAWjF,EAAEiF,SACR,EAEJnF,mBAAmBC,EAAEpG,KAAMqG,EAAErG,MAGxC,SAASuL,qBAAqBC,GAC1B,IAAIC,EAAM,GACNC,EAAS,GAEb,IAAK,IAAI3e,KAAKye,EACLA,EAAOG,eAAe5e,IAIL,qBAAX0e,EAAI1e,KACX0e,EAAI1e,GAAKye,EAAOze,GAChB2e,EAAOE,KAAK7e,IAcpB,OAVA2e,EAAOG,MAAK,SAAUzF,EAAGC,GACrB,OAAIoF,EAAIrF,GAAGkF,SAAWG,EAAIpF,GAAGiF,UACjB,EAERG,EAAIrF,GAAGkF,SAAWG,EAAIpF,GAAGiF,SAClB,EAEJnF,mBAAmBC,EAAGC,MAG1BqF,EAMX3pB,OAAO+pB,WAAa,SAACpK,GACjB,GAAIA,GAAiB,KAATA,GAAiD,OAAlCxc,SAASof,eAAe5C,GAAgB,CAC/D,IAAIqK,EAAS/pB,EAAE,IAAM0f,GAAMqK,SACL,qBAAXA,GAEP/pB,EAAE,cAAcgqB,QAAQ,CAAEC,UAAWF,EAAOG,IAAM,IAAM,GAKhE,OAAO,GAMXnqB,OAAOoqB,gBAAkB,CACrBC,KAAM,GACNC,QAAS,GACTC,QAAS,IAIb,IAAMC,iBAAmB,CACrBC,oBAAqB,GACrBC,GAAI,UAEJL,KAAM,GACNC,QAAS,GACTC,QAAS,GAITI,UAAW,SAAUC,EAAQ3b,EAAO4b,EAAW1tB,EAAO2tB,GAClD,GAAqB,OAAjBnqB,QAAQmI,MAAgD,qBAAxBnI,QAAQmI,KAAKoC,OAC7C,MAAO,GAGX,GAA0C,qBAA/BvK,QAAQmI,KAAKoC,OAAO+D,GAC3B,MAAO,GAGX,GAAqD,qBAA1CtO,QAAQmI,KAAKoC,OAAO+D,GAAOgD,WAClC,MAAO,GAGX,GAAgE,qBAArDtR,QAAQmI,KAAKoC,OAAO+D,GAAOgD,WAAW4Y,GAC7C,MAAO,GAGX,IAAIve,EAAM2C,EAAQ,IAAM4b,EAcxB,MAZqB,qBAAV1tB,IACPA,EAAQ,IAGiC,qBAAlCJ,KAAK0tB,oBAAoBne,GAChCvP,KAAK0tB,oBAAoBne,GAAO,CAAE6a,MAAO,GAEzCpqB,KAAK0tB,oBAAoBne,GAAK6a,QAK3ByD,EAAS,gDAAkD3b,EAAQ,sHAAwH4b,EAAY,wBAA0BA,EAAY,SAFpPve,EAAMA,EAAM,IAAMvP,KAAK0tB,oBAAoBne,GAAK6a,OAEoN,uBAAyB7a,EAAM,gFAAkFnP,EAAQ,IAAM2tB,GAGvYC,WAAY,SAAUhqB,EAAOiQ,EAAOiB,EAAY7H,GAS5C,MARsB,qBAAXA,IACPA,EAAS,IAGa,qBAAf6H,IACPA,EAAa,IAGV,kFACoBA,EAAa,sEAGlBlR,EAAQ,iBACRiQ,EAAQ,+CAEP5G,EAAS,+BAIpC4gB,aAAc,SAAU/d,EAAKge,EAAM3e,EAAKiX,GACpC,GAA0B,qBAAdtW,EAAIX,GAAuB,CACnC,IAAIvT,EAAIkU,EAAIX,GAAK2e,GAEjB,MAAmB,qBAAPlyB,EACDwqB,EAGQ,oBAAPxqB,EACDA,EAAEyxB,iBAAiBE,IAGvB3xB,EAGX,OAAOwqB,GAGX2H,UAAW,SAAUjc,GACjB,MAAkC,qBAAvBA,EAAMkc,cACLpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,QAASpb,EAAMkc,aAAclc,EAAMkc,cAAc3sB,WAClF,SAAWyQ,EAAMlQ,KAAKqsB,QAAQnc,EAAMlQ,KAAKyK,OAASyF,EAAMkc,aAAa3hB,OAAS,IAAIhL,YAAY2H,QAAQ,KAAM,KAG5GpJ,KAAKiuB,aAAajuB,KAAKstB,KAAM,QAASpb,EAAMob,KAAMpb,EAAMob,MAAO7rB,WAAW2H,QAAQ,KAAM,MAGpGklB,SAAU,SAAUpc,GAChB,MAAkC,qBAAvBA,EAAMkc,aACNpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMkc,aAAc,uCAAuC3sB,WAGpGzB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMob,KAAM,wCAG5DiB,SAAU,SAAUrc,GAChB,MAAkC,qBAAvBA,EAAMkc,aACNpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMkc,aAAc,MAG7DpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMob,KAAM,OAG5DkB,WAAY,SAAUtc,GAClB,MAAkC,qBAAvBA,EAAMkc,aACNpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,SAAUpb,EAAMkc,aAAc,GAG/DpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,SAAUpb,EAAMob,KAAM,IAG9DmB,aAAc,SAAUnB,EAAMC,GAC1B,IAAIhe,EAAM+d,EAAO,IAAMC,EAEnBvpB,EAAQhE,KAAKiuB,aAAajuB,KAAKutB,QAAS,QAAShe,EAAKge,GAAS9rB,WAAW2H,QAAQ,KAAM,KAC5F,OAAIpF,EAAMyI,OAAS,GACPzI,EAAMklB,UAAU,EAAG,IAEhB,MADHllB,EAAMklB,UAAUllB,EAAMyI,OAAS,GAAIzI,EAAMyI,QAG9CzI,GAGX0qB,YAAa,SAAUpB,EAAMC,GACzB,IAAIhe,EAAM+d,EAAO,IAAMC,EACvB,OAAOvtB,KAAKiuB,aAAajuB,KAAKutB,QAAS,OAAQhe,EAAK,OAGxDof,cAAe,SAAUrB,EAAMC,EAASqB,GACpC,IAAIrf,EAAM+d,EAAO,IAAMC,EACvB,OAAOvtB,KAAKiuB,aAAajuB,KAAKutB,QAAS,SAAUhe,EAAK,GAAOqf,GAGjEC,YAAa,SAAU9pB,GACnB,IAAI/I,EAAIgE,KAAKiuB,aAAajuB,KAAKwtB,QAAS,OAAQzoB,EAAI,MAEpD,OAAU,OAAN/I,EACO,uFAAyFA,EAAI,SAE7F,IAIf8yB,kBAAmB,SAAU/pB,GACzB,MAAgC,qBAArB/E,KAAKwtB,QAAQzoB,IAA8D,qBAAhC/E,KAAKwtB,QAAQzoB,GAAIgqB,WAC5D/uB,KAAKwtB,QAAQzoB,GAAIgqB,WAEjB,gBAIfC,cAAe,SAAUjqB,EAAIyhB,GACzB,MAAgC,qBAArBxmB,KAAKwtB,QAAQzoB,IAA0D,qBAA5B/E,KAAKwtB,QAAQzoB,GAAIa,OAC5D4gB,EAAMxmB,KAAKwtB,QAAQzoB,GAAIa,OAEvB4gB,GAIfyI,qBAAsB,SAAUlqB,EAAIyhB,GAChC,MAAgC,qBAArBxmB,KAAKwtB,QAAQzoB,IAAiE,qBAAnC/E,KAAKwtB,QAAQzoB,GAAIkQ,cAC5DjV,KAAKwtB,QAAQzoB,GAAIkQ,cAEjBuR,IAWnB,SAAS0I,gBAAgBhd,GACrB,IAAIid,EAAQjd,EAAMlQ,KAAK+K,MAAM,KACzBqiB,EAAMD,EAAM,GAEhB,OAAQC,GACJ,IAAK,KACL,IAAK,MACL,IAAK,OACL,IAAK,cACDld,EAAMob,KAAO8B,EACb,MAEJ,IAAK,SACDld,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,UAAb0iB,EAAM,GAC1Bjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,OACDld,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,SAAb0iB,EAAM,GAC1Bjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,SACDld,EAAMob,KAAOpb,EAAMlQ,KACfkQ,EAAMnN,GAAGsqB,MAAM,8BAAgCnd,EAAMnN,GAAGsqB,MAAM,4BAC9Dnd,EAAMkc,aAAe,SAErBlc,EAAMkc,aAAe,SAEzB,MAEJ,IAAK,KACDlc,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,WAAb0iB,EAAM,GAC1Bjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,QACGD,EAAM1iB,OAAS,EACfyF,EAAMob,KAAO8B,EAAM,IAAMD,EAAM,GAG/Bjd,EAAMob,KAAO8B,EAEjB,MAEJ,IAAK,MACDld,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,UAAb0iB,EAAM,GAC1Bjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,OACDld,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,WAAb0iB,EAAM,IAAgC,QAAbA,EAAM,GACnDjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,SACL,IAAK,MACDld,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,GAAkB,QAAb0iB,EAAM,GAC1Bjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,UACDld,EAAMob,KAAOpb,EAAMlQ,KACC,GAAhBmtB,EAAM1iB,QAA4B,SAAb0iB,EAAM,GAC3Bjd,EAAMob,KAAO8B,EAAM,IAAMD,EAAM,GACxBA,EAAM1iB,OAAS,GAAkB,SAAb0iB,EAAM,GACjCjd,EAAMkc,aAAegB,EAAM,IAAMD,EAAM,GAChCA,EAAM1iB,OAAS,IACtByF,EAAMkc,aAAegB,GAEzB,MAEJ,IAAK,UACGD,EAAM1iB,QAAU,GAAK0iB,EAAM,GAAGhO,WAAW,eACzCjP,EAAMob,KAAO8B,EAAM,cAEvB,MAEJ,IAAK,KAKD,GAJAld,EAAMob,KAAO8B,EAIS,WAAlBld,EAAMsb,UAAwE,qBAAxC5pB,QAAQkoB,cAAc5Z,EAAMuR,SAA2B7f,QAAQkoB,cAAc5Z,EAAMuR,UAAYvR,EAAMuR,QAAS,CACpJ,IAAIwD,EAAI/U,EAAMgP,KAAKnU,MAAM,KAAK,GAC1Bka,EAAEqI,SAAS,OACX1rB,QAAQkoB,cAAc5Z,EAAMuR,QAAUwD,EAAEoH,MAAM,EAAGpH,EAAEsI,YAAY,QACxDtI,EAAEqI,SAAS,QAClB1rB,QAAQkoB,cAAc5Z,EAAMuR,QAAUwD,EAAEoH,MAAM,EAAGpH,EAAEsI,YAAY,SACxDtI,EAAE9F,WAAW,OACpBvd,QAAQkoB,cAAc5Z,EAAMuR,QAAUwD,EAAEoH,MAAM,EAAGpH,EAAExa,QAC5Cwa,EAAE9F,WAAW,QACpBvd,QAAQkoB,cAAc5Z,EAAMuR,QAAUwD,EAAEoH,MAAM,EAAGpH,EAAExa,QAEnD7I,QAAQkoB,cAAc5Z,EAAMuR,QAAUwD,EAM1C/U,EAAMnN,GAAGsqB,MAAM,YACfnd,EAAMsa,WAGV,MAEJ,QACIta,EAAMob,KAAOpb,EAAMlQ,KACfmtB,EAAM1iB,OAAS,IACfyF,EAAMkc,aAAegB,GAKjCld,EAAMqb,QAAUrb,EAAMuR,OAK1B,SAAS+L,SAAS7B,EAAIxf,EAAQ+d,GAG1B,GAAwB,UAApBvJ,WAAWI,KACX,MAAO,GAGX,IAAI0M,EAAO,GAgKX,MA9JqC,qBAA1BthB,EAAO,iBACdshB,GAAQ,wRAQevD,EAASzqB,WAAa,qDAKd,qBAAxB0M,EAAO,cACdshB,GAAQ,mMAKevD,EAASzqB,WAAa,qEAI7CguB,GAAQ,qMAKevD,EAASzqB,WAAa,sEAIL,qBAA5B0M,EAAO,mBACnBshB,GAAQ,uMAKevD,EAASzqB,WAAa,yEAI7CguB,GAAQ,yMAKevD,EAASzqB,WAAa,0EAKb,qBAAzB0M,EAAO,gBACdshB,GAAQ,4MAMevD,EAASzqB,WAAa,kBACtBkC,QAAQ0J,OAAO,IAAM,+BAIZ,qBAAzBc,EAAO,eACdshB,GAAQ,4MAKevD,EAASzqB,WAAa,sEAI7CguB,GAAQ,yMAKevD,EAASzqB,WAAa,uEAIT,qBAAxB0M,EAAO,cACnBshB,GAAQ,0MAKevD,EAASzqB,WAAa,qEAI7CguB,GAAQ,uMAKevD,EAASzqB,WAAa,sEAIP,qBAA1B0M,EAAO,gBACnBshB,GAAQ,8MAKevD,EAASzqB,WAAa,uEAI7CguB,GAAQ,2MAKevD,EAASzqB,WAAa,wEAIP,qBAA1B0M,EAAO,iBACnBshB,GAAQ,gOAMevD,EAASzqB,WAAa,uEAI7CguB,GAAQ,6NAMevD,EAASzqB,WAAa,wEAKb,qBAAzB0M,EAAO,gBACdshB,GAAQ,2SAQevD,EAASzqB,WAAa,kBACtBkC,QAAQ0J,OAAO,GAAK,+BAIxCoiB,EAGX,SAASC,mBAAmB1tB,EAAMkQ,EAAOga,GACrC,GAAwB,UAApBvJ,WAAWI,KACX,MAAO,GAGX,IAAI0M,EAAO,GACPE,EAAUlC,iBAAiBQ,aAAaR,iBAAiBD,QAASxrB,EAAMkQ,EAAMsb,QAAS,IAC3F,GAAImC,EAAQljB,OAAS,EAEjB,IADA,IAAImjB,EAAK,EAAGC,EAAOF,EAAQljB,OACpBmjB,EAAKC,GACmB,oBAAhBF,EAAQC,GACfH,GAAQE,EAAQC,GAAInC,iBAAiBE,GAAIzb,EAAMnN,IAAIqE,QAAQ,kBAAmB8iB,EAASzqB,YAAY2H,QAAQ,mBAAoB8I,EAAMnN,IAErI0qB,GAAQE,EAAQC,GAAIxmB,QAAQ,kBAAmB8iB,EAASzqB,YAAY2H,QAAQ,mBAAoB8I,EAAMnN,IAE1G6qB,IAGR,OAAOH,EAGX,SAASK,WAAWjE,EAAO9f,GACvB,IAAIgkB,EAAM3pB,SAASof,eAAe,cAC9BwK,EAAc3zB,KAAKG,MAAM,IAAM4vB,aAAalpB,EAAE6sB,GAAK9b,UAGnDiY,EAA2F,GAAhF7vB,KAAKgpB,MAAOniB,EAAE6sB,GAAK9b,QAAU+b,EAAc,IAAMjkB,EAAKogB,aAAe,EAAK,IACzFvoB,QAAQsoB,SAAWA,EACnBtoB,QAAQuoB,aAAepgB,EAAKogB,aAa5B,IAXA,IA4DoC1I,EAAQ+J,EAASptB,EA5DjDkD,EAAO,GACP2sB,EAAU,sEACVC,EAAWV,SAAS/B,iBAAiBE,GAAI5hB,EAAKoC,OAAQ+d,GAGtDiE,EAAO1D,qBAAqBZ,GAC5B5d,EAAI,EAAGiW,EAAMiM,EAAK1jB,OAGhB2jB,EAAyD,WAArC7O,6EAAkBpN,eACtCkc,EAAwBD,EAAoBtqB,kEAAmC,EAC9EmI,EAAIiW,GAAK,CACZ,IAAIoJ,EAAO6C,EAAKliB,KAIZqiB,EAAS3sB,QAAQuF,QAAQ,QAAUokB,GACvC2C,GAAW,0BAA4BK,EAAS,kCAAoCA,EAAS,SAAWzE,EAAMyB,GAAMiD,KAAO,IAAM1E,EAAMyB,GAAMtpB,MAAQ,uBACrJV,GAAQ,iFAAmFgtB,EAAS,oBAAsBzE,EAAMyB,GAAMiD,KAAO,IAAM1E,EAAMyB,GAAMtpB,MAAQ,gEAE9I,OAArB6nB,EAAMyB,GAAMkD,OACZltB,GAAQuoB,EAAMyB,GAAMkD,MAKxB,IAAIC,EAAQ,GACRC,EAAQ,kCAAoCR,EAChDA,EAAW,GAKX,IAFA,IAAIS,EAAMlE,qBAAqBZ,EAAMyB,GAAMsD,UACvCC,EAAK,EAAGC,EAAOH,EAAIlkB,OAChBokB,EAAKC,GAAM,CACd,IAAIvD,EAAUoD,EAAIE,KAGdE,EAAYptB,QAAQuF,QAAQ,QAAUokB,EAAO,YAAcC,GAC/D0C,GAAW,uBAAyBc,EAAY,kCAAoCA,EAAY,SAAWlF,EAAMyB,GAAMsD,SAASrD,GAASvpB,MAAQ,YACjJysB,GAAS,+DAAiEM,EAAY,aAAeA,EAAY,oDAAsDlF,EAAMyB,GAAMsD,SAASrD,GAASvpB,MAAQ,QAElK,OAAvC6nB,EAAMyB,GAAMsD,SAASrD,GAASiD,OAC9BC,GAAS,+EAAiF5E,EAAMyB,GAAMsD,SAASrD,GAASiD,KAAO,UAGnI,IAAIf,EAAO,kCACPuB,EAAQ,GAKZnF,EAAMyB,GAAMsD,SAASrD,GAASpf,OAAO4e,KAAKR,cAE1C,IADA,IAAI0E,EAAK,EAAGC,EAAOrF,EAAMyB,GAAMsD,SAASrD,GAASpf,OAAO1B,OACjDwkB,EAAKC,GAAM,CAAC,IACXhf,EAAQ2Z,EAAMyB,GAAMsD,SAASrD,GAASpf,OAAO8iB,KAGjDP,GAAShB,mBAAmB,YAAaxd,EAAOga,GAChDuD,GAAQC,mBAAmB,QAASxd,EAAOga,GAqBnB,UAApBvJ,WAAWI,OACXiO,GAAS,gDAGb,IAAMG,EAAc1D,iBAAiBuB,cAAc9c,EAAMsb,QAAS5pB,QAAQyoB,cACpEgE,EAENW,GAAS,2DAA6DhB,EAAYvuB,WAAa,OAASgsB,iBAAiBoB,YAAY3c,EAAMsb,SAAW,4CAA8C7pB,QAAQuF,QAAQgJ,EAAMnN,IAAM,mBAAqBmN,EAAMnN,GAAK,oCAEzOosB,EAAY1vB,WAAa,gCACbgsB,iBAAiBqB,kBAAkB5c,EAAMsb,SAAW,cACpE7pB,QAAQuF,QAAQtF,QAAQ8G,SAAW,IAAMwH,EAAMnN,IAAM,kBACjD0oB,iBAAiBQ,aAAaR,iBAAiBD,QAAS,SAAUtb,EAAMsb,QAAS,IAAM,0BAC/EC,iBAAiBwB,qBAAqB/c,EAAMsb,SAAU,GAAK,KACrF4C,EAAoB,iCAAmC,KAjCpC3M,EAkCLvR,EAAMuR,OAlCO+J,EAkCCtb,EAAMsb,QAlCEptB,EAkCO8R,EAAM9R,MAhCjC,qBADTqtB,iBAAiBQ,aAAaR,iBAAiBD,QAAS,YAAaA,OAAS9sB,GAE3E,qBAAuB+iB,EAAS,IAAM+J,EAAU,IAAMptB,EAAQ,IAE9D,IAIf,SAAwBqjB,EAAQ+J,EAASptB,GAErC,MAAiB,qBADTqtB,iBAAiBQ,aAAaR,iBAAiBD,QAAS,YAAaA,OAAS9sB,GAE3E,qBAAuB+iB,EAAS,IAAM+J,EAAU,IAAMptB,EAAQ,IAE9D,GAqBXgxB,CAAelf,EAAMuR,OAAQvR,EAAMsb,QAAStb,EAAM9R,OAClD,mCAEoB,UAApBuiB,WAAWI,OACXiO,GAAS,UAKjBP,IADAhB,GAAQ,UACQuB,EAAQ,SAI5Bf,GAAW,aACX3sB,IAFAotB,GAAS,UAEOD,EAAQ,qCAG5B,IAAMY,EAA4C,aAArBtlB,EAAKulB,YAElCrB,GAAW,sLACXA,GAAW,wJACXA,GAAW,8DACe,IAAtBlkB,EAAKogB,aAAsB,SAAWpgB,EAAKogB,aAAa1qB,WAAa,YAAc,8BACvDsK,EAAKwlB,iBAAiBC,iBAAmB,wBACvEzlB,EAAKrB,SAASjJ,WAAa,8BAC3BsK,EAAK0lB,aAAaD,iBAAmB,oBACpCH,EAAuB,GAAK,KAC7B,mCACAtlB,EAAK2lB,aAAaF,iBAAmB,oBAElCH,IACDpB,GAAW,sUAIfA,GAAW,gDAAkDlkB,EAAKggB,QAAQtqB,WAAa,WAEvFwuB,GAAW,QAEXA,GAAW,8CAGXA,GAAW,QACXF,EAAItK,UAAYniB,EAChB8C,SAASof,eAAe,WAAWC,UAAYwK,GAElB,IAAzBtN,WAAWS,WACXjB,WAAWvd,SAASwC,+DAA6B,CAC7CoH,MAAOmU,WAAWU,gBAClB5U,OAAQkU,WAAWW,oBAIH,UAApBX,WAAWI,KACX4O,YAEAC,eAIR,SAASC,oBAAoB9lB,GACzBnI,QAAQioB,MAAQ,GAChBjoB,QAAQkoB,cAAgB,GAExB,IAEOgG,EAFHjG,EAAQjoB,QAAQioB,MAChB1d,EAASpC,EAAKoC,OAGlB,IAAK,IAAIwZ,KAAKxZ,EACV,GAAKA,EAAO0e,eAAelF,GAA3B,CAIA,IAAIzV,EAAQ/D,EAAOwZ,GACnBuH,gBAAgBhd,GAIQ,qBAAb2Z,EAHXzE,EAAIlV,EAAMob,MAINzB,EAAMzE,GAAK,CACPgH,aAAclc,EAAMkc,aACpB5B,SAAUta,EAAMsa,SAChBoE,SAAU,GACV5sB,MAAOypB,iBAAiBU,UAAUjc,GAClCqe,KAAM9C,iBAAiBa,SAASpc,GAChCse,KAAM/C,iBAAiBc,SAASrc,GAChCtM,OAAQ6nB,iBAAiBe,WAAWtc,GAAStO,QAAQyoB,eAGlB,qBAA3BR,EAAMzE,GAAGgH,eACjBvC,EAAMzE,GAAGgH,aAAelc,EAAMkc,cAG9Blc,EAAMsa,SAAWX,EAAMzE,GAAGoF,WAC1BX,EAAMzE,GAAGoF,SAAWta,EAAMsa,WAIlCsF,EAA+C,qBAA3BjG,EAAMzE,GAAGgH,aAAiCvC,EAAMzE,GAAGgH,aAAehH,EAGtC,qBAArCyE,EAAMzE,GAAGwJ,SAAS1e,EAAMqb,SAC/B1B,EAAMzE,GAAGwJ,SAAS1e,EAAMqb,SAAW,CAC/Bf,SAAUta,EAAMsa,SAChBre,OAAQ,GACRnK,MAAO,KACPwsB,KAAM/C,iBAAiBiB,YAAYoD,EAAU5f,EAAMqb,SACnD3nB,OAAQ6nB,iBAAiBkB,cAAcmD,EAAU5f,EAAMqb,QAAS1B,EAAMzE,GAAGxhB,SAGzEsM,EAAMsa,SAAWX,EAAMzE,GAAGwJ,SAAS1e,EAAMqb,SAASf,WAClDX,EAAMzE,GAAGwJ,SAAS1e,EAAMqb,SAASf,SAAWta,EAAMsa,UAK1DX,EAAMzE,GAAGwJ,SAAS1e,EAAMqb,SAASpf,OAAO2e,KAAK5a,GAKjD,IAAK,IAAIkV,KAAKyE,EACV,GAAKA,EAAMgB,eAAezF,GAI1B,IAAK,IAAIje,KAAK0iB,EAAMzE,GAAGwJ,SACd/E,EAAMzE,GAAGwJ,SAAS/D,eAAe1jB,KAKE,qBAA7BvF,QAAQkoB,cAAc3iB,GAC7B0iB,EAAMzE,GAAGwJ,SAASznB,GAAGnF,MAAQmF,EAAI,KAAOvF,QAAQkoB,cAAc3iB,GAAK,KAEnE2oB,EAA+C,qBAA3BjG,EAAMzE,GAAGgH,aAAiCvC,EAAMzE,GAAGgH,aAAehH,EACtFyE,EAAMzE,GAAGwJ,SAASznB,GAAGnF,MAAQypB,iBAAiBgB,aAAaqD,EAAU3oB,KAKjF2mB,WAAWjE,EAAO9f,GA1kBtB9I,OAAOwqB,iBAAmBA,iBA+kBnB,IAAMsE,aAAe,SAACC,EAAShe,EAASie,GAAnB,OAAgCD,EACvDE,OAAM,SAAChsB,GACJ3E,QAAQoN,KAAK,QAASzI,GACtBisB,MAAM,oCAAD,OAAqCne,OAE7Coe,MAAK,WACFH,QAIR,SAASI,cAAcJ,GACnBF,aACE,kEAA6BK,MAAK,SAACE,GAC/BrvB,OAAOqvB,UAAYA,KAEvB,qBACAL,GAIN,SAASM,mBAAmBN,GACxBF,aACES,QAAQC,IAAI,CACV,sEAA0BL,MAAK,kBAC7B,yEAEF,yEAEF,kBACAH,GAIN,SAASS,oBAAoBT,GACzBF,aACES,QAAQC,IAAI,CACV,sEAA2BL,MAAK,YAA0B,IAAdO,EAAa,EAAtBC,QAC/B3vB,OAAO4vB,OAASF,KAEpB,wEAEF,mBACAV,GAIN,SAASa,aAAab,GAClBF,aAAa,sEAAqB,YAAaE,GAGnD,SAASc,SAASd,GACdF,aACE,sEAAeK,MAAK,YAAwB,IAAZY,EAAW,EAApBJ,QACnB3vB,OAAO+vB,KAAOA,KAElB,OACAf,GAgBN,SAASgB,oBACL,IAAIC,EAAS,8CACTT,EAAM,4HACNU,EAAS,83BAEbd,eAAc,eA0tBdlQ,WAAWvd,SAASiD,uDAAqBqH,QAAQ,CAC7C+iB,SAvtBa,SAAClmB,GAGd,GAFAnI,QAAQwvB,eAAiB,GAEZ,OAATrnB,EAAJ,CAqKA,IAAI/P,EAAGynB,EAAQ9X,EACX0nB,EAAe,EACfC,EAAY,EACZC,EAAW,GACXC,EAAgB,GACpB,IAAKx3B,KAAK+P,EAAKP,OACX,GAAKO,EAAKP,OAAOqhB,eAAe7wB,GAAhC,CAKAynB,GADA9X,EAAQI,EAAKP,OAAOxP,IACLynB,OAGf,IAAIvR,EAAQtO,QAAQmI,KAAKoC,OAAOxC,EAAMuG,OACjB,qBAAVA,IACPA,EAAQtO,QAAQmI,KAAK0nB,eAAe9nB,EAAMuG,QAIzB,qBAAVA,GACP3Q,QAAQC,IAAI,kBAAoBxF,EAAI,uBAAyB2P,EAAMuG,MAAQ,kEAC3EA,EAAQ,CAAEsa,SAAU,UAEO,qBAAfta,EAAMob,MAAiD,qBAAlBpb,EAAMqb,UAGvD9J,EAASvR,EAAMob,KAAO,MAAQpb,EAAMqb,SAGR,qBAArBgG,EAAS9P,KAChB8P,EAAS9P,GAAU,CACfvC,KAAMuC,EACNiQ,IAAK,GACLlH,SAAUta,EAAMsa,UAGpBgH,EAAc1G,KAAKyG,EAAS9P,KAG5BvR,EAAMsa,SAAW+G,EAAS9P,GAAQ+I,WAClC+G,EAAS9P,GAAQ+I,SAAWta,EAAMsa,UAGtC+G,EAAS9P,GAAQiQ,IAAIC,QAAQhoB,GAiBjC,IAbA,IAAIioB,EAAkBJ,EAAczG,MAAK,SAAUzF,EAAGC,GAClD,OAAID,EAAEkF,SAAWjF,EAAEiF,UACP,EAERlF,EAAEkF,SAAWjF,EAAEiF,SACR,EAEJnF,mBAAmBC,EAAEpG,KAAMqG,EAAErG,SAGpCjT,EAAI,EACJ4lB,EAAK,EACL3P,EAAM0P,EAAgBnnB,OACnByX,KAAO,CACVT,EAASmQ,EAAgB3lB,KAAKiT,KAC9B,IAAI4S,GAAsB,EAWf,IAAPD,IACApB,GAAO,8BAQXA,GAAO,gGAAkGoB,EAAGpyB,WAAa,4IAA8IoyB,EAAGpyB,WAAa,oDAAiEoyB,EAAGpyB,WAAa,KAAOgiB,EAAOhiB,WAAa,qCAAuCoyB,EAAGpyB,WAAa,yFAAsGoyB,EAAGpyB,WAAa,oBAAsBoyB,EAAGpyB,WAAa,gDAAkDoyB,EAAGpyB,WAAa,KAExpBmC,QAAQwvB,eAAeS,GAAMN,EAAS9P,GAEtCoQ,IAIA,IAFA,IAAIH,EAAMH,EAAS9P,GAAQiQ,IACvB/L,EAAI+L,EAAIjnB,OACLkb,KAEkB,aADrBhc,EAAQ+nB,EAAI/L,IACF1C,QAAyC,aAAjBtZ,EAAMsZ,SAC/B6O,IACDA,GAAsB,EACtBZ,GAAU,+CAAiDzP,EAAS,mBAExE4P,IACAH,GAAUa,EAAcpoB,GAAO,IAGnC2nB,IAGRJ,GAAU,WACNU,EAAgBnnB,OAAS,IACzBgmB,GAAO,sBAEXA,GAAO,SAKHS,GAHCG,EAGSF,EAFA,uLAQVV,GAHCa,EAGMH,EAFA,iDAKX/sB,SAASof,eAAe,iBAAiBC,UAAYyN,EACrD9sB,SAASof,eAAe,cAAcC,UAAYgN,EAClDuB,4BAEIJ,EAAgBnnB,OAAS,GACzBwnB,EAAkB,GAItB,IAAIC,EAAahxB,EAAE,yBACnBgxB,EAAW5mB,GAAG,oBAAoB,SAAUua,GACxC,IAAIsM,EAASjxB,EAAE2kB,EAAEsM,QAEjBF,EADS/wB,EAAEixB,GAAQpoB,KAAK,gBAG5BmoB,EAAW5mB,GAAG,sBAAsB,SAAUua,GAC1C,IAAIsM,EAASjxB,EAAE2kB,EAAEsM,QACbpvB,EAAK7B,EAAEixB,GAAQpoB,KAAK,YACxB7I,EAAE,cAAgB6B,EAAGtD,YAAY6B,KAAK,OAG1C8C,SAASof,eAAe,cAAcC,UAAY,0DAElD8M,oBAAmB,WACfrvB,EAAE,qBAAqBkxB,eAAe,CAClClM,IAAI,GAAD,OAAK0C,uDAAL,yBACHE,OAAO,EACPuJ,YAAY,EACZC,SAAU,GACVC,sBAAsB,EACtBC,QAAQ,EACRC,cAAe,IACfC,YAAa,OACbC,aAAa,EACbC,YAAY,EACZC,eAAgB,MAChBC,cAAe,CACXC,SAAU,qBAEdC,WAAY,SAAUC,GAGlB,OAFAC,8BAA8BD,EAAI/iB,MAAkB,IAAX+iB,EAAIE,KAAaF,EAAIhQ,QAC9D/hB,EAAE,gBAAgB8mB,MAAM,SACjB,GAEXoL,SAAU,SAAUH,GAChB,OAAQA,EAAIhQ,QACR,IAAK,WACD,MAAO,CAAEoQ,QAAS,UAEtB,IAAK,UACD,MAAO,CAAEA,QAAS,WAEtB,IAAK,YACD,MAAO,CAAEA,QAAS,QAEtB,IAAK,QACD,MAAO,CAAEA,QAAS,WAG1B,MAAO,IAEXC,YAAY,EACZC,YAAY,EACZC,aAAa,EACbC,YAAY,EACZC,UAAU,EACVC,YAAY,EACZC,QAAS,CACL,CACIC,MAAO,OACP7xB,MAAO,aACP8xB,OAAQ,SACRC,aAAc,wCACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOC,EAAgBl3B,EAAO,MAElCm3B,MAAO,SACPC,YAAY,EACZV,UAAU,GAEd,CACIG,MAAO,WACP7xB,MAAO,OACP8xB,OAAQ,SACRC,aAAc,qCACdI,MAAO,SACPE,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,YACP7xB,MAAO,YACP+xB,aAAc,oCACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOK,EAAct3B,IAEzBm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,WACP7xB,MAAO,WACP+xB,aAAc,gDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOK,EAAct3B,IAEzBm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,iBACP7xB,MAAO,iBACP+xB,aAAc,uDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOK,EAAct3B,IAEzBm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,QACP7xB,MAAO,QACP+xB,aAAc,qCACdI,MAAO,SACPL,OAAQ,SACRM,YAAY,EACZV,UAAU,GAEd,CACIG,MAAO,SACP7xB,MAAO,SACP+xB,aAAc,mDACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,OACP7xB,MAAO,QACP+xB,aAAc,2CACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOj3B,EAAMyC,WAAW2H,QAAQ,KAAM,MAE1C+sB,MAAO,SACPL,OAAQ,SACRM,YAAY,EACZV,UAAU,GAEd,CACIG,MAAO,eACP7xB,MAAO,iBACP+xB,aAAc,oDACdI,MAAO,QACPL,OAAQ,SACRJ,UAAU,GAEd,CACIG,MAAO,mBACP7xB,MAAO,qBACP+xB,aAAc,iDACdI,MAAO,QACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,YACP7xB,MAAO,YACP+xB,aAAc,iDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAmB,OAAVj3B,EAAkB3C,KAAKgpB,MAAc,IAARrmB,GAAe,IAAM,OAAOyC,YAEtE00B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,QACP7xB,MAAO,QACP+xB,aAAc,oDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAmB,OAAVj3B,EAAkB3C,KAAKgpB,MAAc,IAARrmB,GAAe,IAAM,OAAOyC,YAEtE00B,MAAO,QACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,QACP7xB,MAAO,QACP+xB,aAAc,sCACdI,MAAO,OACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,aACP7xB,MAAO,aACP+xB,aAAc,kDACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,SACP7xB,MAAO,SACP+xB,aAAc,0DACdI,MAAO,SACPL,OAAQ,SACRM,YAAY,EACZV,UAAU,GAEd,CACIG,MAAO,WACP7xB,MAAO,gBACP+xB,aAAc,2EACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOvQ,0EAAc1mB,EAAO,CAAEu3B,gBAAiB,GAAIC,MAAO,IAAK5R,IAAK,aAExEuR,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,qBACP7xB,MAAO,kBACP+xB,aAAc,4DACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOvQ,0EAAc1mB,EAAO,CAAEu3B,gBAAiB,GAAIC,MAAO,IAAK5R,IAAK,aAExEuR,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,YACP7xB,MAAO,YACP+xB,aAAc,8BACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,YACP7xB,MAAO,mBACP+xB,aAAc,oCACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAI7B,OAAc,IAAVj3B,EACO,OAEA,WAGfm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,UACP7xB,MAAO,iBACP+xB,aAAc,yDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAI7B,OAAc,IAAVj3B,EACO,UAEA,WAGfm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,gBACP7xB,MAAO,gBACP+xB,aAAc,qDACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOK,EAAct3B,IAEzBm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,aACP7xB,MAAO,aACP+xB,aAAc,6DACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOK,EAAct3B,IAEzBm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,OACP7xB,MAAO,SACP+xB,aAAc,8CACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,WACP7xB,MAAO,gBACP+xB,aAAc,4CACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOC,EAAgBl3B,EAAO,MAElCm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,YACP7xB,MAAO,sBACP+xB,aAAc,gCACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAI7B,OAAc,IAAVj3B,EACO,kBAEA,qBAAuBA,EAAMyC,WAAa,KAGzD00B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,QACP7xB,MAAO,eACP+xB,aAAc,qCACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAI7B,OAAOvQ,0EAAc1mB,EAAO,CAAEu3B,gBAAiB,GAAIC,MAAO,IAAK5R,IAAK,aAExEuR,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,wBACP7xB,MAAO,sBACP+xB,aAAc,+DACdC,UAAW,SAAUh3B,EAAOi2B,EAAKgB,GAG7B,OAAOC,EAAgBl3B,EAAO,MAElCm3B,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,OACP7xB,MAAO,cACP+xB,aAAc,mCACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,GAEd,CACIG,MAAO,SACP7xB,MAAO,eACP+xB,aAAc,2CACdI,MAAO,SACPL,OAAQ,SACRO,SAAS,EACTX,UAAU,cA1sBtBtvB,SAASof,eAAe,iBAAiBC,UACvCrf,SAASof,eAAe,cAAcC,UACpCrf,SAASof,eAAe,cAAcC,UACpC,6BAIV,SAAS6Q,EAAcvxB,GACnB,OAAW,IAAPA,EACO,IAGJA,EAAGtD,WAGd,SAASy0B,EAAgBO,EAAWD,GAChC,GAAkB,IAAdC,EACA,MAAO,IAGU,qBAAVD,IACPA,EAAQ,UAGZ,IAAIp1B,EAAI,IAAIujB,KAAiB,IAAZ8R,GASjB,OAAOr1B,EAAEs1B,qBAAuBF,EAAQp1B,EAAEu1B,qBA2B9C,SAAS5C,EAAcpoB,EAAOirB,GAC1B,IAAI1kB,EAAQtO,QAAQmI,KAAKoC,OAAOxC,EAAMuG,OACtC,GAAuB,qBAAXA,GAEe,qBADvBA,EAAQtO,QAAQmI,KAAK0nB,eAAe9nB,EAAMuG,QAMtC,OADA3Q,QAAQC,IAAI,qBAAuBmK,EAAMuG,MAAQ,4CAC1C,GAIf,IAAI2kB,EAAmC,qBAAflrB,EAAMgD,MAA8C,qBAAfhD,EAAMmrB,KAC/DC,EAAS,UAAMnM,uDAAN,mCAA8Cjf,EAAMuG,MAApD,kBAAmEvG,EAAMuV,KAAzE,iBAET8V,EAAiB,4BAA8BrrB,EAAMsrB,UAAY,iNAC8HtrB,EAAMuG,MAAQ,MAAoC,IAA3BvG,EAAMurB,mBAA4B,MAASvrB,EAAMsZ,OAAS,wQACrG8R,EAAY,oRAC0CA,EAAY,wDAE7OzzB,EAAO,sGAAwGqI,EAAMuG,MAAQ,mCAAqC6kB,EAAY,sFAAwFprB,EAAM6kB,KAAO,UAAYwG,EAAiB,kCAExR,qBAAfrrB,EAAMgD,KAAyB,kJAAoJhD,EAAMgD,KAAO,oBAAuB,KACxM,qBAAfhD,EAAMmrB,KAAyB,oJAAsJnrB,EAAMmrB,KAAO,oBAAuB,IAEtO,IAAa,IAATF,EAAe,CACf,IAAIx2B,EAAQ8R,EAAM9R,MACJ,MAAVA,IACAA,EAAQ,SAGZkD,IAAwC,qBAAvBqI,EAAMwrB,aAAiC,uEAxDhE,SAA8BxrB,EAAOuG,GACjC,IAAIgD,EAAa,kBAgBjB,OAdIhD,EAAMgD,WAAWzI,OAAS,IAC1ByI,EAAa,kCAGsB,qBAA5BvJ,EAAMyrB,oBAITliB,EAHIvJ,EAAMyrB,kBAAkBhuB,QAAQ,KAAM,KACpC2D,MAAM,KACVN,OAAS,EACE,kCAAoCd,EAAMyrB,kBAAoB,WAE9D,oCAAsCzrB,EAAMyrB,kBAAoB,YAI9E,SAAWzrB,EAAM0rB,cAAgB,WACpCniB,EAAa,oBAAsBvJ,EAAMuG,MAAQ,2BAC3BwT,0EAAc/Z,EAAMwrB,aAAexrB,EAAM2rB,cAAe,CAAEd,MAAO,WAAc,2BAA6B9Q,0EAAc/Z,EAAM2rB,cAAe,CAAEd,MAAO,WAAc,WAC1L7qB,EAAM4rB,eAAmB,wBAA0B5rB,EAAM4rB,eAAenuB,QAAQ,KAAM,WAAa,UAAa,IAClH,IAmCiIouB,CAAqB7rB,EAAOuG,GAAS,aAAgB,KAC1J,qBAAfvG,EAAM8rB,KAAyB,0GAA4G9rB,EAAM8rB,KAAO,oBAAuB,KACtK,OAAhBvlB,EAAMwlB,MAAmB,mFAAqFxlB,EAAMwlB,MAAQ,IAAMt3B,EAAQ,oBAAuB,KACnJ,OAAd8R,EAAMylB,IAAiB,iFAAmFzlB,EAAMylB,IAAM,IAAMv3B,EAAQ,oBAAuB,IAGjKuL,EAAMisB,kBAAoB,IAC1Bt0B,GAAQ,4EAA8EoiB,0EAAc/Z,EAAMisB,mBAAqB,cAG/HjsB,EAAMksB,kBAAoB,IAC1Bv0B,GAAQ,6EAA+EoiB,0EAAc/Z,EAAMksB,mBAAqB,cAGpI,IAAIp0B,EAAQ,GAwCZ,OAvCKkI,EAAMmsB,kBAAoB,GAAKnsB,EAAMosB,oBAAsB,IAAiC,IAA3BpsB,EAAMqsB,kBAA0BrsB,EAAMssB,mBAAqB,IACzHtsB,EAAMmsB,oBAAsBnsB,EAAMosB,oBAClCt0B,GAAS,0BAA4BiiB,0EAAc/Z,EAAMmsB,kBAAmB,CACxEtB,MAAO,SACPD,gBAAiB,MAGrB9yB,EAAQ,0BACJkI,EAAMmsB,kBAAoB,IAC1Br0B,GAAS,iCAAmCiiB,0EAAc/Z,EAAMmsB,kBAAmB,CAC/EtB,MAAO,SACPD,gBAAiB,KAChB,aAEL5qB,EAAMosB,oBAAsB,IAC5Bt0B,GAAS,+BAAiCiiB,0EAAc/Z,EAAMosB,oBAAqB,CAC/EvB,MAAO,SACPD,gBAAiB,KAChB,cAGkB,IAA3B5qB,EAAMqsB,mBACNv0B,GAAS,iCAAmCkI,EAAMqsB,iBAAiBv2B,WAAa,UAChFgC,GAAS,gCAAkCiiB,0EAAc/Z,EAAMssB,mBAAoB,CAC/EzB,MAAO,SACPD,gBAAiB,KAChB,WAET9yB,GAAS,YAGbH,GAAQ,yEAA2EoiB,0EAAc/Z,EAAMwgB,aAAc,CAC/GqK,MAAO,SACPD,gBAAiB,KAChB,eACa,IAAdM,EAAuB,sGAAwGlrB,EAAMusB,KAAO,UAAYz0B,EAAQ,aAAgB,IAClL,6HAA+HkI,EAAMwsB,OAAS,sCAMtJ,SAASlE,EAAkBlvB,GAIvB,IAHA,IAAIzB,EAAO,wBACPmgB,EAAS7f,QAAQwvB,eAAeruB,GAChCmf,EAAMT,EAAOiQ,IAAIjnB,OACdyX,KAAO,CAEV5gB,GAAQywB,EADItQ,EAAOiQ,IAAIxP,IACM,GAEjC5gB,GAAQ,WAERJ,EAAE,cAAgB6B,EAAGtD,YAAY6B,KAAKA,GACtC0wB,8BAojBJpJ,wEAIR,SAASwN,mCAAmCrsB,GACxC,GAAa,OAATA,EAAe,CACfnI,QAAQ8G,SAAWqB,EAAKrB,SACxB9G,QAAQmI,KAAOA,EACfnI,QAAQmoB,QAAUhgB,EAAKggB,QACvBnoB,QAAQooB,gBAAkBjgB,EAAKigB,gBAC/BpoB,QAAQy0B,SAAWtsB,EAAKssB,SACxB5K,iBAAiBE,GAAK5hB,EAAK4hB,GAED,qBAAf5hB,EAAKkgB,QACZroB,QAAQqoB,MAAQlgB,EAAKkgB,OAIzB7lB,SAASof,eAAe,kBAAkBC,UAAY7hB,QAAQmoB,QAG9D3lB,SAASpC,MAAQJ,QAAQ8G,SAAW,qBAGpCqB,EAAK0nB,eAAiB,GACtB,IACIz3B,EADAmS,EAASpC,EAAKoC,OAElB,IAAKnS,KAAKmS,EACN,GAAKA,EAAO0e,eAAe7wB,GAA3B,CAIA,IAAIkW,EAAQ/D,EAAOnS,GACnB+P,EAAK0nB,eAAevhB,EAAMgP,MAAQhP,EAItC2f,oBAAoB9lB,IAhxB5B9I,OAAOq1B,cAAgB,SAAAC,GACnBjG,UAAUkG,UAAUD,IAGxBt1B,OAAOw1B,wBAA0B,SAAAvQ,GAC7BoK,UAAUkG,UAAU,eAAiBtQ,EAAM,yCAixB/C,IAAIwQ,iBAAmB,CACnBxQ,IAAK,KACLyQ,aAAa,GAKXC,kBAAoBC,6CAAYl5B,sCAAU,kBAC9CuD,EAAEwnB,KAAK,CACHxC,IAAI,GAAD,OAAK0C,uDAAL,qBACHE,OAAO,EACPgO,SAAU,SACV9N,UAAW,CAAEC,iBAAiB,KAEjCI,MAAK,WACF8G,MAAM,4DAIZ,SAAS4G,wBAAwB7Q,EAAK+J,GAClC/uB,EAAEwnB,KAAK,CACHxC,MACA4C,OAAO,EACPgO,SAAU,SACV9N,UAAW,CAAEC,iBAAiB,KAEjCI,MAAK,WACF8G,MAAM,oCAAD,OAAqCjK,OAE7C8Q,QAAO,WACJ91B,EAAE+1B,QAAO,EAAMxL,iBAAkBJ,iBACjC4E,OAIR,SAASiH,gCACLN,oBAAoBxG,MAAK,WAErBzuB,QAAQw1B,cAAcC,YAAYV,iBAAiBxQ,KAAK,SAAUnc,GACjD,OAATA,IACAoW,WAAWvd,SAASsD,+DAA6B,CAAE6D,WACd,IAAjC2sB,iBAAiBC,aAAoD,qBAArB5sB,EAAK4sB,aAAoD,KAArB5sB,EAAK4sB,aAAqD,OAA/B11B,OAAOiiB,oBAEtH6T,wBAAwBnO,uDAAgB7e,EAAK4sB,aAAa,WACtDP,mCAAmCrsB,MAIvCqsB,mCAAmCrsB,UA0BvD,SAASstB,2BAA2BC,GAC5BA,IACAnX,WAAamX,EAEbC,sBAEAb,iBAAiBxQ,IAAM0C,wDAGI,qBAApB1I,kBAAuD,IAApBA,iBAE1C9b,SAASof,eAAe,yBAAyBgU,UAAYd,iBAAiBxQ,IAC9EhlB,EAAE,aAAa8mB,MAAM,SAErBkP,gCAMR,SAASO,WAAWC,GAChBtzB,SAASof,eAAe,mBAAmBC,UAAYiU,EAK3D,SAASC,cAAcC,EAAIC,GACvB,GAAID,GAAMC,EACN,OAAO,EAEP,IAAIC,EAAKF,EAAG7sB,MAAM,KACdgtB,EAAKF,EAAG9sB,MAAM,KAEditB,EAAKzV,SAASuV,EAAG,GAAG5Q,UAAU,EAAG,GAAI,IACrC+Q,EAAK1V,SAASwV,EAAG,GAAG7Q,UAAU,EAAG,GAAI,IACzC,QAAI8Q,EAAKC,KACAD,EAAKC,MAGdD,EAAKzV,SAASuV,EAAG,GAAI,MACrBG,EAAK1V,SAASwV,EAAG,GAAI,QAEZC,EAAKC,IAGdH,EAAKA,EAAG,GAAG/sB,MAAM,KACjBgtB,EAAKA,EAAG,GAAGhtB,MAAM,QAEjBitB,EAAKzV,SAASuV,EAAG,GAAI,MACrBG,EAAK1V,SAASwV,EAAG,GAAI,QAEZC,EAAKC,MAEdD,EAAMF,EAAGrtB,OAAS,EAAK8X,SAASuV,EAAG,GAAI,IAAM,IAC7CG,EAAMF,EAAGttB,OAAS,EAAK8X,SAASwV,EAAG,GAAI,IAAM,QAMrD,SAASG,uBAAuBjI,EAAUkI,GAEtC,IAAIjS,EADJuR,WAAW,gDAIPvR,EADY,WAAZiS,EACM,+DAEA,yEAGVj3B,EAAEwnB,KAAK,CACHxC,IAAKA,EACL2C,OAAO,EACPC,OAAO,IAEVI,MAAK,SAAUnf,GAEZ0tB,WAAW,yCADX1tB,EAAOA,EAAKquB,SAAShxB,QAAQ,sBAAuB,MAEpD6oB,EAASlmB,MAEZsf,MAAK,WACFoO,WAAW,gEACXxH,EAAS,SAIjB,SAASoI,wBAAwBC,EAAOrI,GAIpC,OAHAiI,wBAAuB,SAAUK,GAC7BtI,EAASruB,QAAQmoB,QAASwO,KAC3B32B,QAAQooB,iBACJ,KAkDX,SAASwO,iBACLp0B,SAASof,eAAe,UAAU7f,MAAM80B,QAAU,QAatD,SAAS9I,YACL1uB,OAAOU,QAAQ+2B,WAEX/X,WAAWnU,MAAQ,EACnB2T,WAAWvd,SAASsC,+DAAsB,CAAEsH,MAAOmU,WAAWnU,UAC3B,IAA5BmU,WAAWM,cAClBd,WAAWvd,SAASoC,+DAA0B,CAC1CwH,MAAOmU,WAAWnU,MAClBC,OAAQkU,WAAWlU,UAG3B+rB,iBAKJ,SAASG,gBAAgBzqB,GACrB,OAAOJ,KAAKiZ,UAAU7Y,GAAK,SAAUX,EAAKvQ,GACtC,MAAyB,oBAAVA,EAAwBA,EAAMyC,WAAazC,KAIlE,SAAS47B,YAAYC,KACjB,OAAO/qB,KAAKC,MAAM8qB,KAAK,SAAUtrB,IAAKvQ,OAClC,MAAoB,iBAATA,MACAA,MAEsB,YAAzBA,MAAMkqB,UAAU,EAAG,GAAoB4R,KAAK,IAAM97B,MAAQ,KAAOA,SAxMjFiE,OAAO83B,mBAAqB,WAMxB,OAJAp3B,QAAQwnB,IAAI6P,SAAU,EACtBr3B,QAAQwnB,IAAI8P,kBAAmB,EAC/BvC,iBAAiBC,aAAc,EAC/BO,iCACO,GAIXj2B,OAAOi4B,gBAAkB,WAMrB,OAJAv3B,QAAQwnB,IAAI6P,SAAU,EACtBr3B,QAAQwnB,IAAI8P,kBAAmB,EAC/BvC,iBAAiBC,aAAc,EAC/BO,iCACO,GAgGXj2B,OAAOk4B,gBAAkB,SAACb,GACtBb,WAAW,kCAEX,IAAI7U,EAAMD,KAAKC,MAEf,GAAqB,qBAAV0V,IAAmC,IAAVA,EAAgB,CAChD,IAAI1tB,EAAOsZ,iBAAiB,qBAQ5B,GALItZ,EADgB,kBAATA,EACA2X,SAAS3X,GAET,EAGPgY,EAAMhY,EAAO,MAEb,OAIRytB,wBAAwBC,GAAO,SAAUc,EAAMb,GAC3C,IAAIc,GAAO,EAEX,GAAa,OAATD,EACAC,GAAO,EACP5B,WAAW,iMACR,GAAa,OAATc,EACPc,GAAO,EACP5B,WAAW,uMACR,GAAIE,cAAcyB,EAAMb,GAC3Bc,GAAO,EACP5B,WAAW,gTACR,CACH4B,GAAO,EAEP5B,WAAW,mGAAqGc,EAAO,+eAGvHc,GACA/U,iBAAiB,oBAAqB1B,EAAInjB,gBAYtDwB,OAAOq4B,eAAiB,WACpB,IAAIpT,EAAM9hB,SAAS6d,SAASO,OAAO/iB,WAAa2E,SAAS6d,SAASQ,SAAShjB,WAAa2E,SAAS6d,SAASuQ,OAAO/yB,WAAakhB,WAAWkB,UAAY,cAEjJje,EAAyB,GAAhB21B,OAAO31B,OAAc,IAGlC3C,OAAO8mB,KAAK7B,EAAK,GAAI,SAJT,KAI0BzmB,WAAa,WAAamE,EAAOnE,WAAa,gJACpFyB,EAAE,wBAAwB8mB,MAAM,SAoCpC,IAAIwR,gBAAkB,CAClBC,gBAAiB,KACjBC,mBAAoB,sBAEpBC,aAAc,CACV,KAAQ,CACJC,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAOA,GAGX4yB,kBAAmB,SAAU5yB,GACzB,OAAOA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAOA,IAIf,sBAAuB,CACnByyB,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAO6yB,KAAKhJ,KAAKiJ,QAAQ9yB,EAAG,CAAE+yB,GAAI,aAGtCH,kBAAmB,SAAU5yB,GACzB,OAAOA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAO6pB,KAAKmJ,QAAQC,KAAKjzB,GAAI,CAAE+yB,GAAI,aAI3C,eAAgB,CACZN,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAO6pB,KAAKiJ,QAAQ9yB,EAAG,CAAE+yB,GAAI,YAGjCH,kBAAmB,SAAU5yB,GACzB,OAAOA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAO6pB,KAAKmJ,QAAQhzB,EAAG,CAAE+yB,GAAI,aAIrC,iBAAkB,CACdN,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAOkzB,SAASC,gBAAgBnzB,IAGpC4yB,kBAAmB,SAAU5yB,GACzB,OAAkB,EAAXA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAOkzB,SAASE,oBAAoBpzB,KAI5C,kBAAmB,CACfyyB,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAOkzB,SAASG,iBAAiBrzB,IAGrC4yB,kBAAmB,SAAU5yB,GACzB,OAAOA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAOkzB,SAASI,qBAAqBtzB,KAI7C,eAAgB,CACZyyB,uBAAwB,IACxBC,qBAAsB,IAEtBC,SAAU,SAAU3yB,GAChB,OAAOkzB,SAASK,8BAA8BvzB,IAGlD4yB,kBAAmB,SAAU5yB,GACzB,OAAOA,EAAEsD,QAGboD,WAAY,SAAU1G,GAClB,OAAOkzB,SAASM,kCAAkCxzB,OASlE,SAASyzB,qBAAqBpQ,EAAUkN,GACpCtzB,SAASof,eAAe,sBAAsB1jB,UAAY,eAAiB0qB,EAC3EpmB,SAASof,eAAe,sBAAsBC,UAAYiU,EAG9D,IAAImD,gBAAkB,KA0FtB,SAASC,0BAA0BC,GAC/B,IAAIhV,EAAWpkB,QAAQwnB,IAAI6R,OAAOD,EAAK7b,MACnC+b,EAAK,IAAIC,WACbD,EAAGE,OAAS,SAAUj3B,GAClBE,SAASof,eAAe,wBAAwBC,UAAYsC,EAC5D,IAAI6D,EAAS,KACb,IACIA,EAASjoB,QAAQwnB,IAAIC,YAAY,WAAYtb,KAAKC,MAAM7J,EAAEiuB,OAAOvI,QAAS,qCAG1E,IAAIwR,EAAa,IAAIzY,KAAKiH,EAAOyR,UAC7BC,EAAc,IAAI3Y,KAAKiH,EAAO2R,WAEF,qBAArB3R,EAAO4R,YACd5R,EAAO4R,UAAY,WAGa,qBAAzB5R,EAAO6R,gBACd7R,EAAO6R,cAAgB,GAGO,qBAAvB7R,EAAO8R,cACd9R,EAAO8R,YAAc,QAGO,qBAArB9R,EAAO+R,YACd/R,EAAO+R,UAAY,GAGvBv3B,SAASof,eAAe,wBAAwBC,UAAY,SAAWsC,EAAW,UAClF3hB,SAASof,eAAe,wBAAwBC,UAAY,MAAQmG,EAAOlhB,SAAW,6BAA+BkhB,EAAOgS,gBAAgBn8B,WAAa,OACzJ2E,SAASof,eAAe,mBAAmBC,UAAYmG,EAAO1D,IAC9D9hB,SAASof,eAAe,sBAAsBC,UAAYmG,EAAOzd,OAAOsjB,aAAahwB,WAAa,YAAcmqB,EAAOzd,OAAOojB,iBAAiB9vB,WAAa,gBAAkBmqB,EAAOiS,YAAYp8B,WAAa,0BAA4BpF,KAAKgpB,MAAMuG,EAAOkS,YAAclS,EAAOiS,aAAap8B,WAAa,gBAC3S2E,SAASof,eAAe,oBAAoBC,UAAY,eAAiBmG,EAAOmS,iBAAiBt8B,WAAa,qBAAuBmqB,EAAO4R,UAAU/7B,WAAa,mCAAsCmqB,EAAO6R,cAAgB,EAAM,MAAQ7R,EAAO6R,cAAch8B,WAAa,cAAiB,IAAIA,WAAa,2BAA6BmqB,EAAO8R,YAAYj8B,WAAa,uBAAyBpF,KAAKgpB,MAAyB,IAAnBuG,EAAO+R,UAAkB,KAAO,MAAQ,KAAKl8B,WAAa,MAC7c2E,SAASof,eAAe,yBAAyBC,UAAY,MAAQhE,iBAAiB2b,GAAc,IAAM1b,iBAAiB0b,GAAc,cAAgB3b,iBAAiB6b,GAAe,IAAM5b,iBAAiB4b,GAAe,OAC/Nl3B,SAASof,eAAe,wBAAwBC,WAAcmG,EAAOoS,SAAYpS,EAAOoS,SAAW,IAAIv8B,WACvGm7B,qBAAqB,UAAW,kDAChC15B,EAAE,uBAAuB+6B,YAAY,YAErCpB,gBAAkBjR,EAEtB,MAAO1lB,GACH3E,QAAQC,IAAI0E,GACZE,SAASof,eAAe,sBAAsB1jB,UAAY,qBAC1DsE,SAASof,eAAe,sBAAsBC,UAAY,6BAC1DviB,EAAE,uBAAuBg7B,SAAS,cAK1CjB,EAAGkB,WAAWpB,GAGlB,SAASqB,6BACLh4B,SAASof,eAAe,wBAAwBC,UAAY,GAC5Drf,SAASof,eAAe,wBAAwBC,UAAY,GAC5Drf,SAASof,eAAe,mBAAmBC,UAAY,GACvDrf,SAASof,eAAe,sBAAsBC,UAAY,GAC1Drf,SAASof,eAAe,oBAAoBC,UAAY,GACxDrf,SAASof,eAAe,yBAAyBC,UAAY,GAC7Drf,SAASof,eAAe,wBAAwBC,UAAY,GAC5DmX,qBAAqB,UAAW,oGAChC15B,EAAE,uBAAuBg7B,SAAS,YAtJtCj7B,OAAOo7B,aAAe,WAGlB,GAFAn7B,EAAE,uBAAuBg7B,SAAS,YAEV,OAApBrB,gBAGA,OAFAuB,kCACAxB,qBAAqB,SAAU,4BAInC7J,UAAS,WACLD,cAAa,WAiBT,GAhBA8J,qBAAqB,OAAQ,uCAC7B15B,EAAE,sBAAsB8mB,MAAM,QAE9BlI,mBAAoB,EACpBC,iBAAkB,EAClBC,cAAgB6a,gBAAgB3Z,OAEhC9c,SAASof,eAAe,cAAcC,UAAY,GAClDrf,SAASof,eAAe,WAAWC,UAAY,GAEX,qBAAzBoX,gBAAgBja,KACvBD,WAAWC,KAAOia,gBAAgBja,KAElCD,WAAWC,KAAO,IAGc,qBAAzBia,gBAAgBrM,KAAsB,CAC7C,IAAIA,EAAOoK,YAAYiC,gBAAgBrM,MACd,qBAAdA,EAAKlD,OACZG,iBAAiBH,KAAOkD,EAAKlD,MAGL,qBAAjBkD,EAAKjD,UACZE,iBAAiBF,QAAUiD,EAAKjD,SAGR,qBAAjBiD,EAAKhD,UACZC,iBAAiBD,QAAUgD,EAAKhD,SAIG,kBAAhCqP,gBAAgBa,cACvBb,gBAAgBa,YAAc,QAGuC,qBAA9DlC,gBAAgBG,aAAakB,gBAAgBa,eACpDvL,MAAM,+BAAiC0K,gBAAgBa,aACvDb,gBAAgBa,YAAc,QAGlCb,gBAAgBhtB,WAAa2rB,gBAAgBG,aAAakB,gBAAgBa,aAAa7tB,WAEvF5M,OAAOU,QAAQ+2B,WACfvY,WAAWvd,SAASqD,+DAAmB,CACnCsD,SAAUsxB,mBAGd55B,OAAOiiB,oBAAsB2X,gBAE7Bla,WAAWnU,MAAQquB,gBAAgBQ,SACnC1a,WAAWlU,OAASouB,gBAAgBU,UAEc,qBAAvCV,gBAAgByB,oBACiB,OAAvCzB,gBAAgByB,oBAChBzB,gBAAgByB,mBAAqB,GACU,qBAAxCzB,gBAAgB0B,qBACiB,OAAxC1B,gBAAgB0B,qBAChB1B,gBAAgB0B,oBAAsB,GAEvC5b,WAAWU,gBAAkBwZ,gBAAgByB,mBAC7C3b,WAAWW,iBAAmBuZ,gBAAgB0B,oBAC9C5b,WAAWS,WAAY,IAEvBT,WAAWU,gBAAkB,EAC7BV,WAAWW,iBAAmB,EAC9BX,WAAWS,WAAY,GAG3BlB,iBAAkB,EAClBve,QAAQwnB,IAAI6P,SAAU,EACtBr3B,QAAQwnB,IAAI8P,kBAAmB,EAC/BmD,6BACA/E,oCAsEZ,IAAImF,oCAAqC,EAEzC,SAASC,gCACsC,IAAvCD,qCACAA,oCAAqC,EACrCt7B,EAAE,4BACDoK,GAAG,4DAA4D,SAAUpH,GACtEA,EAAEf,iBACFe,EAAEO,qBAEL6G,GAAG,QAAQ,SAAUpH,GACdA,EAAEw4B,cAAcC,aAAaC,MAAMnyB,OACnCqwB,0BAA0B52B,EAAEw4B,cAAcC,aAAaC,MAAMC,KAAK,KAElET,6BACAxB,qBAAqB,SAAU,yBAM/C35B,OAAO67B,sBAAwB,WAC3B,IAAIF,EAAQx4B,SAASof,eAAe,2BAA2BoZ,MAC/D,IAAKA,EAAMnyB,OAGP,OAFA2xB,kCACAxB,qBAAqB,SAAU,oBAInCA,qBAAqB,OAAQ,mBAE7BE,0BAA0B8B,EAAMC,KAAK,KAMzC,IAAIE,kBAAmB,EAEvB,SAASC,qBACL7c,WAAWvd,SAASq6B,qEACpBF,kBAAmB,EAGvB,IAAIG,8BAA+B,EAEnC,SAASC,0BACgC,IAAjCD,+BACAA,8BAA+B,EAC/Bh8B,EAAE,sBACDoK,GAAG,gBAAiB0xB,oBACpB1xB,GAAG,gBAAiB8xB,uBACpB9xB,GAAG,kBAAkB,WAClBpK,EAAE,iCAAiCm8B,KAAK,wBAAwBnR,KAAK,WAAY,GACjF9nB,SAASof,eAAe,wBAAwB8Z,YAK5D,SAASC,qBAAqB/S,EAAUkN,GACpCtzB,SAASof,eAAe,sBAAsB1jB,UAAY,eAAiB0qB,EAC3EpmB,SAASof,eAAe,sBAAsBC,UAAYiU,EAG9D,SAAS8F,oCACL,IAAI9pB,EAASrZ,KAAKgpB,MAAMoa,yBAA2BC,qCAC/ClT,EAAW,OACXkN,EAAM,uBAENiG,EAAStjC,KAAKgpB,MAGd,IAFDzhB,QAAQmI,KAAK0lB,aAAe+J,gBAAgBC,gBACzC73B,QAAQmI,KAAKwlB,iBAAmB7b,EAAS8lB,gBAAgBG,aAAaiE,yBAAyB/D,sBAC5F,KAAO,MAAQ,GAEpBgE,EAAQxjC,KAAKgpB,MAGb,IAFDzhB,QAAQmI,KAAK0lB,aAAe+J,gBAAgBC,gBACzC73B,QAAQmI,KAAKwlB,iBAAmB7b,EAAS8lB,gBAAgBG,aAAaiE,yBAAyBhE,wBAC5F,KAAO,MAAQ,GAEpB+D,EAAS,KACTnT,EAAW,UACXkN,EAAM,0BAENiG,EAAS,KACTnT,EAAW,UACXkN,EAAM,+BAENiG,EAAS,MACTnT,EAAW,SACXkN,EAAM,sBAGV6F,qBAAqB/S,EAAU,0BAA4B9W,EAAOjU,WAAa,gDAAkDk+B,EAAS,0BAA4BE,EAAQ,YAAcnG,GAGhM,IAAIkG,wBAA0BpE,gBAAgBE,mBAE9C,SAASoE,2BAA2B5e,GAChC0e,wBAA0B1e,EAC1B9a,SAASof,eAAe,+BAA+BC,UAAYma,wBACnEJ,oCAGJ,IAAIO,mBAAqB,KACrBL,oCAAsC,EACtCD,yBAA2B,EAmS3BO,iBAjSJ,SAASZ,wBACLl8B,EAAE,qCAAqCY,OACvCZ,EAAE,gCAAgCQ,OAClC67B,qBAAqB,OAAQ,2CAC7Br8B,EAAE,uBAAuB+6B,YAAY,YAErCvL,qBAAoB,WAChB,IAAMuN,EAAa9d,WAAWM,WAC9Bgd,0BAA6BS,iEAAmBD,GAChD,IAAIE,EAAW9jC,KAAKgpB,MAAMV,KAAKC,MAAmC,IAA3B6a,0BACjC/1B,EAAmB02B,iEAAuBH,GAE5CI,QAAQ32B,KACR+1B,yBAA2BpjC,KAAKgpB,OAAO3b,EAAiB+E,OAAS/E,EAAiB8E,OAAS,KAC3F2xB,EAAWz2B,EAAiB8E,OAGhC,IAAI8xB,EAAa,IAAI3b,KAAKwb,GACtBI,EAAiBD,EAAWE,cAAgBzkC,+DAAQukC,EAAWG,WAAa,GAAK1kC,+DAAQukC,EAAWI,WAAa,IAAM3kC,+DAAQukC,EAAWK,YAAc5kC,+DAAQukC,EAAWM,cAAgB7kC,+DAAQukC,EAAWO,cAElNz6B,SAASof,eAAe,wBAAwBxmB,MAAQ,WAAa4E,QAAQ8G,SAASjJ,WAAa,IAAM8+B,EAAe9+B,WAAa,IAAMg+B,yBAAyBh+B,WAAa,YACjLq+B,2BAA2BF,yBAE3B,IAAIz/B,EAAMyD,QAAQuoB,aACdzsB,EAAMrD,KAAKgpB,MAAMoa,yBAA2B,KAE5CY,QAAQ32B,KACRhK,EAAMrD,KAAKgpB,MAAMoa,yBAA2B,KAGhD,IAAIqB,EAAOzkC,KAAKgpB,MAAMoa,yBAA2BpjC,KAAKgpB,MAAMniB,EAAEkD,SAASof,eAAe,eAAevR,QAAU,IAE3GvU,EAAM,KACNA,EAAM,IAENA,EAAMS,IACNT,EAAMS,GAEN2gC,EAAO3gC,IACP2gC,EAAO3gC,GAEP2gC,EAAOphC,IACPohC,EAAOphC,GAGgB,OAAvBqgC,oBACAA,mBAAmBgB,UAGvBhB,mBAAqB,IAAIlN,OAAO,gCAAiC,CAC7DmO,MAAO,CAAC7gC,EAAK2gC,EAAMphC,GACnBS,IAAKA,EACLT,IAAKA,EACLuhC,KAAMr9B,QAAQuoB,aACdntB,MAAO8hC,EACPI,MAAQxhC,EAAM,IAAO,cAAgB,SACrCimB,QAAS,SACTqQ,UAAW,SAAUh3B,GACbA,EAAQ,IACRA,EAAQ,GAGRA,EAAQ4E,QAAQmI,KAAKogB,eACrBntB,EAAQ4E,QAAQmI,KAAKogB,cAGzBuT,oCAAsC1gC,EACtCwgC,oCAEA,IAAItjC,EAAU,YAKd,OAJc,IAAV8C,IACA9C,EAAU,YAGP8C,EAAQ9C,EAAU,aAAgB8C,IAAU4E,QAAQmI,KAAKogB,aAAgB,mBAAqB,IAAI1qB,iBAMzHwB,OAAOk+B,aAAe,WAClBpO,UAAS,WACLD,cAAa,WACTiM,kBAAmB,EACnB77B,EAAE,qCAAqCQ,OACvCR,EAAE,gCAAgCY,OAClCZ,EAAE,uBAAuBg7B,SAAS,YAElC,IAAInW,EAAW3hB,SAASof,eAAe,wBAAwBxmB,MAE/DugC,qBAAqB,OAAQ,gCAAkCxX,EAAStmB,WAAa,WAGrF,IAAI+mB,EAAKpiB,SAASof,eAAe,gCAC7B4b,EAAQh7B,SAASof,eAAe,oCAEpC5hB,QAAQmI,KAAK0nB,eAAiB,KAC9B,IAAMwM,EAAa9d,WAAWM,WACxB9Y,EAAeu2B,iEAAmBD,GAEpCoB,EAAW,CACX32B,SAAU9G,QAAQ8G,SAClBwY,OAAQ0H,uDACRgT,gBAAiBh6B,QAAQmI,KAAKggB,QAC9BgS,iBAAkB,EAClBV,SAAU1Y,KAAKC,MAAuB,IAAfjb,EACvB4zB,UAAW5Y,KAAKC,MAChB0Z,mBAAoB3b,WAAWU,gBAC/Bkb,oBAAqB5b,WAAWW,iBAChCwa,YAAgC,IAAnBl6B,QAAQsoB,SACrBoV,gBAAwC,IAAvB19B,QAAQuoB,aACzB0R,YAAa,EACb3V,KAA6B,OAAtBvF,WAAWO,OAAmBP,WAAWO,OAAS9c,SAAS6d,SAASO,OAAO/iB,WAAa2E,SAAS6d,SAASQ,SAAShjB,WAAa2E,SAAS6d,SAASuQ,OAAO/yB,YAAYA,WAC5Ku8B,SAAU53B,SAASof,eAAe,wBAAwBxmB,MAAMyC,WAChEmhB,KAAMD,WAAWC,KACjBzU,OAAQvK,QAAQmI,KAChBykB,KAAMmK,gBAAgB,CAClBrN,KAAMG,iBAAiBH,KACvBC,QAASE,iBAAiBF,QAC1BC,QAASC,iBAAiBD,UAE9BgQ,UAAW,EACXC,cAAe,EACfC,YAAakC,wBACbjC,UAAW,EACX5xB,KAAM,IAGwD,qBAAvDyvB,gBAAgBG,aAAa0F,EAAS3D,eAC7CvL,MAAM,+BAAiCkP,EAAS3D,aAChD2D,EAAS3D,YAAc,QAG3B,IAAI5B,EAAWN,gBAAgBG,aAAa0F,EAAS3D,aAAa5B,SAC9DC,EAAoBP,gBAAgBG,aAAa0F,EAAS3D,aAAa3B,kBAE3E,SAASwF,EAAT,GAA+D,IAA5Bx1B,EAA2B,EAA3BA,KAAMy1B,EAAqB,EAArBA,kBACrC,GAAa,OAATz1B,EACA,OAAO,EAGX,IAAI8uB,EAAM/qB,KAAKiZ,UAAUhd,GAErB01B,EAAO3F,EAASjB,GAEpB,OADAwG,EAASt1B,KAAKy1B,GAAqBC,EAC5B1F,EAAkB0F,GAG7B,IAAM/3B,EAAmB02B,iEAAuBH,GAC5CyB,GAAkB,EACjBh4B,GAOD23B,EAAShE,SAAW3zB,EAAiB8E,MACrC6yB,EAAS9D,UAAY7zB,EAAiB+E,SAPtC0T,WAAWvd,SAASoC,+DAA0B,CAC1CwH,MAAO6yB,EAAShE,SAChB5uB,OAAQ4yB,EAAS9D,aAErBmE,GAAkB,GAOtBL,EAASvD,YAAcuD,EAAS9D,UAAY8D,EAAShE,SACrDgE,EAASxD,YAAcxhC,KAAKgpB,OAAOgc,EAAS9D,UAAY8D,EAAShE,WAAmD,IAAtCqC,sCAC9EH,qBAAqB,OAAQ,4BAA8B8B,EAASxD,YAAYp8B,WAAa,iCAE7F0gB,WAAWvd,SAAS+8B,kEAAwB,CACxCxzB,OAAQkzB,EAASlzB,OACjByzB,WAAYP,EAASxD,eAIzB56B,OAAO4+B,oBAAsB,WACzB3+B,EAAE,sBAAsB8mB,MAAM,QAE9B9mB,EAAEslB,GAAIsZ,IAAI,QAAS,MAAM5T,KAAK,gBAAiB,GAC/CkT,EAAM5H,UAAY,KAElBrX,WAAWvd,SAASq6B,qEACpB9c,WAAWvd,SAAS2N,iEAChBmvB,GAEAvf,WAAWvd,SAASqC,kEAGxB/D,EAAE,uBAAuB+6B,YAAY,aAGzC,IAAI8D,EAAO,EACPvR,EAAO,mBAAqBkP,oCAAoCj+B,YAAuD,IAAxCi+B,oCAA6C,WAAa,aAAaj+B,WAAa,iBAEvKwB,OAAO++B,aAAe,YAAyC,IAAtC9vB,EAAqC,EAArCA,MAAOsvB,EAA8B,EAA9BA,kBAAmBz1B,EAAW,EAAXA,MACtB,IAArBgzB,mBACAQ,qBAAqB,OAAQ,cAC7BsC,uBAEJ,IAAMt0B,EAAQ4U,WAAWM,WACnBwf,EAAcC,gEAAqB30B,GACnC40B,EAAWC,gEAA+B70B,GAC1C80B,EAAeC,gEAA8B/0B,GAE7Cg1B,GAAUJ,EAAWE,GAAgBJ,EAAe,IAC1D/+B,EAAEslB,GAAIsZ,IAAI,QAASS,EAAQ,KAAKrU,KAAK,gBAAiBqU,GACtDnB,EAAM5H,UAAYn9B,KAAKgpB,MAAMkd,GAAO9gC,WAAa,OAASyQ,GAASnG,EAAKhH,IAExEg9B,GAAQR,EAAwB,CAAEx1B,OAAMy1B,sBAExCjC,qBAAsB8C,EAAgB,SAAW,OAAQ,oCAAsChmC,KAAKgpB,MAAa,IAAP0c,EAAa,KAAO,MAAQ,KAAKtgC,WAAa,aAAgB4gC,EAAiBA,EAAa5gC,WAAa,uCAA0C,IAAIA,WAAa+uB,GAE9QvtB,OAAOo+B,SAAWA,EAEdc,EAAWE,IAAiBJ,IAC5BZ,EAAS7D,UAAY2E,EACrBd,EAAS5D,cAAgB4E,EACzBhB,EAAS1D,UAAYoE,EAErBjZ,mBAAmBuY,EAAUtZ,GACzBsa,EAAe,GACflQ,MAAM,GAAD,OAAIkQ,EAAJ,6BAETR,sBACAR,EAAW,cAqE/B,IACIrB,iBAAmBwC,KAAKC,iBAAiBC,kBAAkBC,SAC7D,MAAOz8B,GACL3E,QAAQC,IAAI,sCAAwC0E,EAAEzE,YACtDu+B,iBAAmB,mBAGvB,IAAM4C,UAAY,SAACC,GACf,IAAMt1B,EAAQ4U,WAAWM,WACzB,OAAOqgB,iEAAmBD,EAAnBC,CAA2Bv1B,IAItC,SAASw1B,yBACL,IAAIC,EAAuB,WACvB,IAAIC,EAAc,SAAUJ,GACxB,IAAIK,EAAOhgC,EAAE,IAAM2/B,GAEfK,EAAK3gB,KAAK,aAAeqgB,UAAUC,IAEnCK,EAAKC,gBAAgBP,UAAUC,GAAU,KAAO,QAgCxDI,EAAY,mCACZA,EAAY,6BACZA,EAAY,mBACZA,EAAY,mBAEZA,EAAY,sBACZA,EAAY,wBACZA,EAAY,kBAEZA,EAAY,gBApCG//B,EAAE,IAqCC,yBAnCTigC,gBAAiC,UAAjBzc,aAA2B,KAAO,OAoC3Duc,EAAY,aACZA,EAAY,6BACZA,EAAY,eApCY,SAAUJ,GAC9B,IAAIK,EAAOhgC,EAAE,IAAM2/B,GAEfK,EAAK3gB,KAAK,cAAuC,SAAvBqgB,UAAU,WACpCM,EAAKC,gBAAuC,SAAvBP,UAAU,SAAsB,KAAO,QAGnC,IAAzBM,EAAK3gB,KAAK,YACVrf,EAAE,0BAA0BQ,OAC5BR,EAAE,0BAA0BQ,SAE5BR,EAAE,0BAA0BY,OAC5BZ,EAAE,0BAA0BY,QA0BpCs/B,CAAkB,oBAvBK,SAAUP,GAC7B,IAAIK,EAAOhgC,EAAE,IAAM2/B,GAEfK,EAAK3gB,KAAK,cAA6C,YAA7BqgB,UAAU,iBACpCM,EAAKC,gBAA6C,YAA7BP,UAAU,eAA+B,KAAO,OAoB7ES,CAAiB,cACjBJ,EAAY,oBAE4B,IAApCL,UAAU,sBACV1/B,EAAE,6BAA6BY,OAE/BZ,EAAE,6BAA6BQ,QAIvCs/B,IAGA9/B,EAAE,8BAA8BogC,QAAO,WACnCjhB,UAAU,4BAA6Bnf,EAAElD,MAAMuiB,KAAK,eAExDrf,EAAE,oBAAoBogC,QAAO,WACzBjhB,UAAU,kBAAmBnf,EAAElD,MAAMuiB,KAAK,eAE9Crf,EAAE,oBAAoBogC,QAAO,WACzBjhB,UAAU,kBAAmBnf,EAAElD,MAAMuiB,KAAK,eAE9Crf,EAAE,uBAAuBogC,QAAO,WAC5BjhB,UAAU,qBAAsBnf,EAAElD,MAAMuiB,KAAK,eAEjDrf,EAAE,yBAAyBogC,QAAO,WAC9BjhB,UAAU,uBAAwBnf,EAAElD,MAAMuiB,KAAK,eAEnDrf,EAAE,mBAAmBogC,QAAO,WACxBjhB,UAAU,iBAAkBnf,EAAElD,MAAMuiB,KAAK,YACzC4G,mBAEJjmB,EAAE,oCAAoCogC,QAAO,WACzC3gB,WAAWK,eAAiB9f,EAAElD,MAAMuiB,KAAK,WACzCI,WAAWkC,aAEXxC,UAAU,mCAAoCM,WAAWK,kBAE7D9f,EAAE,gBAAgBogC,QAAO,WACrBjhB,UAAU,cAAenf,EAAElD,MAAMuiB,KAAK,eAE1Crf,EAAE,8BAA8BogC,QAAO,WACnCjhB,UAAU,4BAA6Bnf,EAAElD,MAAMuiB,KAAK,eAExDrf,EAAE,oBAAoBogC,QAAO,WACzBjhB,UAAU,kBAAmBnf,EAAElD,MAAMuiB,KAAK,eAG9Crf,EAAE,qBAAqBogC,QAAO,WAC1BjhB,UAAU,QAASnf,EAAElD,MAAMuiB,KAAK,WAAa,OAAS,YACtDygB,OAEJ9/B,EAAE,eAAeogC,QAAO,WACpBjhB,UAAU,cAAenf,EAAElD,MAAMuiB,KAAK,WAAa,UAAY,iBAGnErf,EAAE,iBAAiBogC,QAAO,WACtBjhB,UAAU,eAAgBnf,EAAElD,MAAMuiB,KAAK,YAEvC4G,mBAGJjmB,EAAE,cAAcogC,QAAO,WACnB3gB,WAAWG,KAAO5f,EAAElD,MAAMuiB,KAAK,WAC/BI,WAAWkC,aAEXxC,UAAU,YAAaM,WAAWG,MAClCqG,mBAKJjmB,EAAE,0BAA0BogC,QAAO,WAC/B3gB,WAAWE,MAAQ3f,EAAElD,MAAMuiB,KAAK,WAAa,QAAU,QACvDI,WAAWkC,aAEP4B,SAAS9D,WAAWE,QACpBsG,mBAKZ,IAAMoa,oBAAsB,SACtBC,kBAAoB,GAE1B,SAASC,oBACL,GAAmC,OAA/BxgC,OAAOiiB,qBAA2E,qBAApCjiB,OAAOiiB,oBAAoBtC,KACzEoK,WAAW/pB,OAAOiiB,oBAAoBtC,KAAKxZ,QAAQ,IAAK,UAIxD,GADA4jB,WAAWrK,WAAWC,KAAKxZ,QAAQ,IAAK,KACf,OAArBuZ,WAAWzQ,MAAgB,CAC3B,IAAMwxB,EAAet9B,SAASof,eAAT,UAA2B+d,qBAA3B,OAAiDr6B,yDAAQyZ,WAAWzQ,SACzF,GAAIwxB,EAAc,CACd,IAAMzW,EAASyW,EAAaC,UAAYH,iBACxCp9B,SAASw9B,cAAc,QAAQzW,UAAYF,IAM3D,IAAI4W,oBAAsB,KAyB1B,SAAS7P,4BACL9wB,EAAE,2BAA2ByiB,QAAQ,CACjCme,SAAU,OACVvgC,QAAS,QACTD,MAAM,EACNG,MAAO,CAAEC,KAAM,IAAKI,KAAM,GAC1BV,UAAW,SAEfF,EAAE,2BAA2BC,UA/BjCF,OAAOiyB,8BAAgC,SAAChjB,EAAO6xB,EAAWC,GACtDH,oBAAsB,WAElB,GAAqB,kBAAV3xB,EAAoB,CAC3B,IAAMwxB,EAAet9B,SAASof,eAAT,UAA2B+d,qBAA3B,OAAiDr6B,yDAAQgJ,KAC9E,GAAIwxB,EAAc,CACd,IAAMzW,EAASyW,EAAaC,UAAYH,iBACxCp9B,SAASw9B,cAAc,QAAQzW,UAAYF,GAInD,GAAI,CAAC,UAAW,YAAYgX,SAASD,GAAc,CAE/C7hB,WAAWvd,SAASoC,+DAA0B,CAC1CwH,MAAOu1B,EAFQ,KAGft1B,OAAQs1B,EAHO,WAwB/B,IAAIG,0BAA4B,EAEhC,SAASC,+BACL,GAAkC,IAA9BD,0BAOA,OANAT,oBAGAvgC,EAAEkD,SAASg+B,MAAMC,UAAU,gBAC3BnhC,EAAEkD,SAASg+B,MAAMC,UAAU,WAK/BH,0BAA4Bvf,KAAKC,MASjC,IAAI0f,EAAY,EACZC,EAAc,EACdC,GAAc,EACdC,GAA0B,EAC1BC,EAAkBxhC,EAAED,QAAQ2C,SAAW,EAE3C1C,EAAE,UACGoK,GAAG,iBAAiB,WACG,IAAhBi3B,IACAD,EAAYrhC,OAAO0hC,QAEnBzhC,EAAE,QAAQ4+B,IAAI,CACV8C,SAAU,SACVC,SAAU,QACVzX,KAAMkX,IAGVE,GAAc,EAEwB,OAAlC7gC,QAAQC,QAAQkhC,eAChBnhC,QAAQohC,OAAM,eAEdN,GAA0B,GAE1BA,GAA0B,GAIlCF,OAIHj3B,GAAG,iBAAiB,aAEjBi3B,GAEmB,IACfA,EAAc,EAEdrhC,EAAE,QACG4+B,IAAI,CACD8C,SAAU,GACVC,SAAU,GACVzX,IAAK,KAIblqB,EAAE,cACGgqB,QAAQ,CAAEC,UAAWmX,GAAa,IAGP,IAA5BG,IACA9gC,QAAQqhC,UACRP,GAA0B,GAI9BvhC,EAAEkD,SAASg+B,MAAMC,UAAU,eAIlC/2B,GAAG,mBAAmB,WACC,IAAhBi3B,IACAC,GAAc,GAGiB,oBAAxBX,qBACPA,sBAGJA,oBAAsB,QAO9B3gC,EAAE,YACG+hC,MAAM,CACHhY,OAAQ,CACJG,IAAK,EACL8X,OAAQ,KAGf53B,GAAG,oBAAoB,WAIpBpK,EAAElD,MAAMmlC,WAAW,YAEtB73B,GAAG,sBAAsB,WAItB,GAAIk3B,EACA,OAAO,KAGdl3B,GAAG,yBAAyB,SAAUpH,GAGnC,IAAoB,IAAhBs+B,EAAuB,CACvB,IACI5hB,EADK1f,EAAEgD,EAAEiuB,QACCkL,KAAK,KAAKnR,KAAK,QACT,kBAATtL,GAA8C,MAAzBA,EAAKsG,UAAU,EAAG,KAAiE,IAAnDvG,WAAWC,KAAKzB,WAAWyB,EAAO,eAC9FD,WAAWC,KAAOA,EAClBD,WAAWkC,kBAK3BugB,GAAGC,WAAWj/B,SAASof,eAAe,WAAY,CAC9C8f,WAAY,GACZC,kBAAkB,EAClBC,kBAAkB,EAClBC,mBAAoB,KACpBC,mBAAoB,KACpBC,kBAAkB,EAClBC,iBAAiB,EACjBC,iBAAiB,EACjBC,oBAAqB,EACrBC,oBAAqB,EACrBljB,MAAO,YAMP6hB,EAAkB,MAClBA,EAAkB,KAElBA,EAAkB,KAClBA,EAAkB,IAEtBt+B,SAASg+B,KAAK7b,aAAa,cAAemc,GAI1CjB,oBAEAvgC,EAAEkD,SAASg+B,MAAMC,UAAU,CACvBlQ,OAAQ,WACRlH,OAAQyX,IAMZxhC,EAAE,wBACGoK,GAAG,mBAAmB,WACnBge,mBAAqB,QAM7BpoB,EAAE,gBACGoK,GAAG,iBAAiB,WACjBmsB,WAAW,+BAEdnsB,GAAG,kBAAkB,WAClB6tB,iBAAgB,MAMxBj4B,EAAE,gBACGoK,GAAG,kBAAkB,WAClB2lB,uBAEH3lB,GAAG,mBAAmB,WACnBlH,SAASof,eAAe,iBAAiBC,UACrCrf,SAASof,eAAe,cAAcC,UACtCrf,SAASof,eAAe,cAAcC,UACtC,gBAKZsd,yBACAtE,+BACAU,yBACA3E,iBAKAt3B,EAAE8iC,GAAGC,QAAU,SAAUC,GAGrB,IAAIC,EAAS,CACTC,UAAW,IACXC,aAAc,GACdC,aAAc,MACdC,SAAU,sDACVC,SAAU,wDACVC,OAAQ,WACJ9iC,QAAQ+iC,YAEZC,OAAQ,WACJhjC,QAAQ+iC,YAEZE,OAAQ,KACRtM,OAAO,GAOX,OAJI4L,GACAhjC,EAAE+1B,OAAOkN,EAAQD,KAGjBhjC,EAAElD,MAAM+L,KAAK,oBAAsBo6B,EAAO7L,SAG9Cp3B,EAAElD,MAAM+L,KAAK,kBAAkB,GAE/B7I,EAAEkD,UAAUygC,IAAI,QAAS,aAEzB3jC,EAAEkD,UAAUkH,GAAG,CACXub,MAAO,WAEH,IAAIie,EAAQ5jC,EAAElD,MAkBd,OAjBI8mC,EAAMC,SAAS,SACfD,EAAM7I,YAAY,QAClB6I,EAAMxjC,KAAK6iC,EAAOI,UAClBO,EAAME,SAASC,OAAO/Z,QAAQ,CAAE,OAAU,MAAa,GAAG,WACtD4Z,EAAME,SAASC,OAAOA,OAAOvjC,UAC9BI,KAAK,GAAG,WACPqiC,EAAOM,cAGXK,EAAM5I,SAAS,QACf4I,EAAMxjC,KAAK6iC,EAAOK,UAClBM,EAAME,SAASC,OAAO/Z,QAAQ,CAAE,OAAU,QAAe,GAAG,WACxD4Z,EAAME,SAASC,OAAOA,OAAOnjC,UAC9BJ,KAAK,GAAG,WACPyiC,EAAOQ,cAGR,IAEZ,aAEI3mC,KAAKknC,MAAK,WACb,IAAIJ,EAAQ5jC,EAAElD,MAEViE,EAAU6iC,EAAMxjC,OAEpB,GADiBwjC,EAAMvO,OAAO9rB,OACb05B,EAAOC,UAAYD,EAAOE,aAAc,CACrD,IAAI1e,EAAI1jB,EAAQkjC,OAAO,EAAGhB,EAAOC,WACjC,GAAIze,EAAEsB,QAAQ,MAAQ,EACtB,CAOI,IANA,IAAIme,GAAQ,EACRC,EAAM,GACNC,EAAa,EACbC,EAAW,GACXC,EAAU,KAELv5B,EAAI,EAAGw5B,EAAI,EAAGA,GAAKtB,EAAOC,UAAWn4B,IA4B1C,GA3BmB,MAAfhK,EAAQgK,IAAem5B,IACvBA,GAAQ,EAMW,OAHnBI,EAAUvjC,EAAQilB,UAAUjb,EAAI,EAAGhK,EAAQglB,QAAQ,IAAKhb,KAG5C,GAEJu5B,IAAa,IAAMD,EAAS,GAC5BpB,EAAOS,OAAS,oEAEhBW,EAASG,QAKiB,OAA1BF,EAAQ/f,eACR8f,EAAS5T,QAAQ6T,IAKzBJ,GAAwB,MAAfnjC,EAAQgK,KACjBm5B,GAAQ,GAGRA,EACAC,GAAOpjC,EAAQijB,OAAOjZ,QAItB,GADAw5B,IACIH,GAAcnB,EAAOC,UACrBiB,GAAOpjC,EAAQijB,OAAOjZ,GACtBq5B,SAGA,GAAIC,EAAS96B,OAAS,EAAG,CAGrB,IAAK,IAAIqa,EAAI,EAAGA,EAAIygB,EAAS96B,OAAQqa,IACjCugB,GAAO,KAAOE,EAASzgB,GAAK,IAIhC,MAKhBa,EAAIzkB,EAAE,UAAUI,KAAK+jC,EAAM,uBAAyBlB,EAAOG,aAAe,WAAWhjC,YAErFqkB,GAAKwe,EAAOG,aAGhB,IAAIhjC,EAAO,6BAA+BqkB,EACtC,iCAAmC1jB,EACnC,4DAA8DkiC,EAAOI,SAAW,cAEpFO,EAAMxjC,KAAKA,GACXwjC,EAAMzH,KAAK,eAAev7B,OAC1BZ,EAAE,uBAAwB4jC,GAAOhF,IAAI,gBAAiB,SAMtE,SAASlQ,eACDjP,WAAWnU,MAAQ,EACnB2T,WAAWvd,SAASsC,+DAAsB,CAAEsH,MAAOmU,WAAWnU,UAC3B,IAA5BmU,WAAWM,cAClBd,WAAWvd,SAASoC,+DAA0B,CAC1CwH,MAAOmU,WAAWnU,MAClBC,OAAQkU,WAAWlU,UAQ3B9K,QAAQ+2B,WAGR/2B,QAAQqhC,UAERb,+BACAjhC,EAAE,YAAY+iC,UACdjS,4BAEK2T,gDACDxM,mBAG2B,IAA3BxY,WAAWa,aACXkF,YAAW,WACPxlB,EAAE,gBAAgB8mB,MAAM,UACzB,KAGPrmB,QAAQikC,iBAAmB,WACvBxC,GAAGyC,OAAOzhC,SAASof,eAAe,aAEtC7hB,QAAQikC,mBAE2B,OAA/B3kC,OAAOiiB,qBACP/C,WAAWvd,SAASoC,+DAA0B,CAC1CwH,MAAOvL,OAAOiiB,oBAAoBmY,SAClC5uB,OAAQxL,OAAOiiB,oBAAoBqY,aAK/Ct6B,OAAO6kC,sBAAwB,WAC3B3lB,WAAWvd,SAASoD,kEAGpB2a,WAAWK,eAAgB,EAC3BL,WAAWG,MAAO,EAClBH,WAAWE,MAAQ,QACnBF,WAAWkC,aAEXsE,iBAKG,IAAMoQ,oBAAsB,WAC3BoO,+CACAvhC,SAASof,eAAe,YAAY7f,MAAM80B,QAAU,SAEnB,IAA7B9X,WAAWK,eACXX,UAAU,mCAAoCM,WAAWK,gBAKrE/f,OAAO8kC,yBAA2B,SAAU1P,EAAUpT,GAIlD,GAFA7e,SAASof,eAAe,0BAA0BC,UAAY,GAExC,qBAAXR,EAGP5C,UAAU,2BAA4BgW,GAEjC2P,yDAAiB3P,IAUdn1B,EAAE,mBAAmBqf,KAAK,YAC1Brf,EAAE,mBAAmBigC,gBAAgB,OAEzC9gB,UAAU,WAAYgW,KAZtBhW,UAAU,WAAY,WAEjBnf,EAAE,mBAAmBqf,KAAK,YAC3Brf,EAAE,mBAAmBigC,gBAAgB,MAGzC/8B,SAASof,eAAe,0BAA0BC,UAAY,uGAC9DpD,UAAU,2BAA4Bze,QAAQy0B,gBAO/C,IAAe,IAAXpT,EAEP5C,UAAU,WAAY,eACnB,CAGH,IAAI4lB,EAAwBrF,UAAU,4BACR,YAA1BqF,IACA5lB,UAAU,2BAA4Bze,QAAQy0B,UAC9C4P,EAAwBrkC,QAAQy0B,UAG/B2P,yDAAiBC,GAUlB5lB,UAAU,WAAY4lB,IATtB5lB,UAAU,WAAY,WAEjBnf,EAAE,mBAAmBqf,KAAK,YAC3Brf,EAAE,mBAAmBigC,gBAAgB,MAGzC/8B,SAASof,eAAe,0BAA0BC,UAAY,wBAA0B4S,EAAS52B,WAAa,sEAC9G4gB,UAAU,2BAA4Bze,QAAQy0B,WAMtD,IAAM6P,EAAiBtF,UAAU,YAEjC,OADAx8B,SAASof,eAAe,oBAAoBgU,UAAgC,YAAnB0O,EAAgC,+BAAiCA,GACnH,GAGJ,IAAIC,gBAAkB9O,2BAE7Bp2B,OAAOmlC,gBAAkB,WACrBhiC,SAASof,eAAe,gBAAgBC,UAAYnD,gBAAgB,kBACpEpf,EAAE,gBAAgB8mB,MAAM,SAG5B/mB,OAAOolC,iBAAmB,WACtBnlC,EAAE,gBAAgB8mB,MAAM,QACxB7H,WAAWvd,SAAS0jC,uE,gCCv9HxB,0KAAO,IAAM1hC,EAAW,SAIX2hC,EAAc,MAIdC,EAAyB,IAEzBn5B,EAA0B,0BAE1Bo5B,EAA+B,CAAC,UAAW,QAAS,UAAW,YAAa,gBAAiB,QAAS,a,+FCsDpGC,EARkB,SAAC,GAA4D,IAA1DC,EAAyD,EAAzDA,WAAYC,EAA6C,EAA7CA,WAAYC,EAAiC,EAAjCA,KAK1D,OAJkCC,mBAAsC,WACtE,OAxDyC,SAAC,GAAD,IAC3CF,EAD2C,EAC3CA,WACAD,EAF2C,EAE3CA,WAF2C,EAG3CE,KAH2C,MAIc,CACzD7kC,MAAO,kCACPu0B,KAAM,CACJwQ,OAAQ,WACN,OACE,kBAAC,IAAD,8BACyB,IACvB,kBAAC,IAAD,CAAMC,QAAM,GAAiB,SAAfJ,EAAwB,YAAc,iBAFtD,sBAOJK,QACiB,aAAfL,EACI,CAEE,kBACE,kBAAC,IAAD,sEACiE,IAC/D,kBAAC,IAAD,CACEzU,OAAO,SACP+U,IAAI,sBACJC,KAAK,gEAHP,cAFF,OAaJ,GACNhW,OAAQ,kBACN,kBAAC,IAAD,eACU,IACR,kBAAC,IAAD,CAAM6V,QAAM,GACM,cAAfL,EACG,YACe,kBAAfA,EACA,aACA,iBACE,IARV,sBAaJS,KAAM,CACJ7Q,KAAM,6BAMC8Q,CAA8B,CAAEV,aAAYC,aAAYC,WAC9D,CAACF,EAAYC,EAAYC,K,QC2CfS,EA/EoB,SAAC,GAOI,IANtCtlC,EAMqC,EANrCA,MACAu0B,EAKqC,EALrCA,KACA6Q,EAIqC,EAJrCA,KACAG,EAGqC,EAHrCA,WACAC,EAEqC,EAFrCA,UACAC,EACqC,EADrCA,eAEMC,EAAoBzkC,uBACxB,YAAiC,IAA9BojB,EAA6B,EAA7BA,KACDkhB,IACAtmC,OAAOghB,SAASklB,KAAO9gB,IAEzB,CAACkhB,IAGH,OACE,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAct1B,MAAO,IAAK01B,WAAW,mBACnC,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAIC,OAAQ,CAAC,IAAK5lC,GAClB,kBAAC,IAAD,CAAkB6lC,QAASN,KAE7B,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAMO,QAAS,CAAC,EAAG,EAAG,EAAG,GAAIC,QAAM,EAACC,IAAK,GACtCzR,EAAKwQ,OAAO,IACZxQ,EAAK0Q,QAAQx8B,OAAS,GACrB,kBAAC,IAAD,CAAMs9B,QAAM,EAACC,IAAK,GAChB,kBAAC,IAAD,CAAMD,QAAM,EAACC,IAAK,EAAGC,GAAI,MACtB1R,EAAK0Q,QAAQj7B,KAAI,SAACk8B,EAAQjU,GACzB,MAAsB,oBAAXiU,EACF,wBAAI36B,IAAK0mB,GAAT,IAAkBiU,KAGzB,wBAAI36B,IAAK26B,GACP,kBAAC,IAAD,KAAOA,SAOlB3R,EAAKpF,WAGV,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAKgX,cAAY,OAAOP,OAAQ,CAAC,EAAG,EAAG,EAAG,GAAI31B,MAAO,CAAE9T,IAAK,KAC1D,kBAAC,IAAD,CAAWiqC,cAAe,CAAEnmC,QAAS,sBAAuBomC,SAvDvD,0BAwDF,gBAAGhiB,EAAH,EAAGA,KAAH,OACC,kBAAC,IAAD,CACEiiB,UAAA,qCACAC,SAAUd,EACVe,cAAc,OACdL,cAAY,cACZ/nC,QAAS,kBAAMsnC,EAAkB,CAAErhB,UACnCpU,MAAM,OACNw2B,MAAOrB,EAAK7Q,WAKpB,kBAAC,IAAD,CACE+R,UAAA,2CACAloC,QAASonC,EACT5jC,OAAQ,GACR9D,UAAU,kBACVqoC,cAAY,cACZl2B,MAAO,CAAE9T,IAAK,KAEd,kBAAC,IAAD,CAAK8pC,GAAIS,IAAMC,GAAI,CAAEC,WAAY,MAAOC,WAAY,SAApD,kB,wBChCGC,IArDe,WAC5B,IAAM7+B,EAAiBpJ,YAAYkoC,KAC7BC,EAAenoC,YAAYooC,KAFC,EAIEzmC,oBAAS,GAJX,mBAI3B0mC,EAJ2B,KAIdC,EAJc,KAK5BC,EAA4B1C,EAAyB,CACzDC,YAA0B,OAAd18B,QAAc,IAAdA,OAAA,EAAAA,EAAgB08B,aAAc,UAC1CC,YAA0B,OAAd38B,QAAc,IAAdA,OAAA,EAAAA,EAAgBo/B,eAAgB,WAC5CxC,KAAM,KAGR9lC,qBAAU,WAENqD,SAASklC,gBAAgB3lC,MAAMi/B,SAD7BsG,EACwC,SAEA,SAE3C,CAACA,IAEJ,IAAMK,EAAYtmC,uBAAY,WAC5BkmC,GAAa,KACZ,IAEG5B,EAAatkC,uBAAY,WAC7BkmC,GAAa,KACZ,IAEG3B,EAAY7gC,cAElB,OAAKqiC,EAGH,kBAAC,IAAD,CAAMjB,QAAM,GACV,kBAAC,IAAD,CACEO,UAAA,sCACAH,cAAY,oCACZ/nC,QAASmpC,EACTC,QAAQ,WAJV,uBAQCN,GACC,kBAAC,EAAD,iBACME,EADN,CAEE3B,eAAiD,UAAnB,OAAdx9B,QAAc,IAAdA,OAAA,EAAAA,EAAgBo/B,cAChC9B,WAAYA,EACZC,UAAWA,MAjBO,O,4UCnCfiC,EAAc,SAACzvC,GAAD,OAAeA,EAAI,GAEjC0vC,EAA0B,iCAE1BC,EAAe,IAIfC,EAAe,SAACthC,EAAsBuhC,GAAvB,gBAA2CvhC,EAA3C,mBAAkEuhC,IACjFC,EAAe,mCAEfjiC,EAA4B,WACvC,IAAMkmB,EAAM3pB,SAASof,eAAe,cACpC,OAAKuK,EAQ4D,IAAzD1zB,KAAKgpB,MAAM0K,EAAIgc,wBAAwB93B,MAAQ,EAAI,KANzD1S,QAAQ8kB,MAAM,2DACN,MAUChU,EAA4B,SACvCxS,EACAmsC,EACAC,GAEA,OAAKD,EAGEnsC,EAAKmQ,QACV,SAACC,EAAKV,GAAN,sBACKU,EADL,eAEGV,EAAM28B,YAAOF,EAAKz8B,GAAM08B,EAAM18B,IAAQy8B,EAAKz8B,GAAO08B,EAAK18B,OAE1D08B,GAPOA,GAeJ,SAASE,IAA2D,IAAD,uBAAxBC,EAAwB,yBAAxBA,EAAwB,gBACxE,OAAO,SAAkBC,GACvBD,EAAIprC,SAAQ,SAAAglC,GACR,OAAFA,QAAE,IAAFA,KAAKqG,S,oBCvDX,UAcIC,EAAO,QAAD,6BASF,WAEJ,IAIIC,EAAmB,EAWvBC,EAAkB,CAClBzD,OAAQ,CACJ/kC,MAAO,CACHu0B,KAAU,GACVkU,MAAU,UACVC,SAAU,GACV9B,WAAY,OACZ+B,KAAU,SAEdC,SAAU,CACNrU,KAAU,GACVkU,MAAU,UACVC,SAAU,GACV9B,WAAY,OACZ+B,KAAU,SAEd1oB,SAAU,aACV4oB,qBAAsB,GAE1B1Z,OAAQ,CACJoF,KAAS,GACTkU,MAAU,UACVC,SAAU,GACV9B,WAAY,OACZ+B,KAAU,QACV1oB,SAAU,QAEd8d,KAAM,CACF+K,aAAc,IACdC,YAAa,IACbC,eAAgB,KAChBC,eAAgB,MAEpBlhC,KAAM,CACFmhC,UAAW,OACXC,oBAAqB,CACjBnS,SAAS,EACToS,UAAW,aACXpuC,MAAO,MAEXquC,qBAAsB,CAClBrS,SAAS,EACTh8B,MAAO,EACPouC,UAAW,aACX3C,MAAO,QACPgC,MAAO,WAEXxoC,QAAS,IAEbsQ,OAAQ,CACJ+4B,MAAO,CACHC,OAAQ,QACRC,2BAA4B,KAC5BC,YAAa,IAEjBC,MAAO,CACHH,OAAQ,aACRC,2BAA4B,MAEhCG,UAAW,CACPlB,MAAO,UACPE,KAAM,QACN/B,WAAY,SACZ8B,SAAU,IAEdkB,WAAY,CACRnB,MAAO,UACPE,KAAM,QACN/B,WAAY,OACZ8B,SAAU,GACVmB,cAAe,GAEnB7uC,MAAO,CACHytC,MAAO,UACP7B,WAAY,OACZ+B,KAAM,QACND,SAAU,IAEdoB,MAAO,CACH9S,SAAS,EACTr1B,MAAO,SACP8mC,MAAO,WAEXsB,WAAY,CACR/S,SAAS,EACTgT,eAAgB,IAExBhY,UAAW,MAEXiY,QAAS,CACLC,KAAM,CACFC,OAAQ,OACRC,MAAO,KAEXC,sBAAuB,CACnBF,OAAQ,OACRC,MAAO,IACPrM,KAAM,IAEVuM,6BAA6B,EAC7BC,qBAAsB,IAE1BC,SAAU,CACNxT,SAAS,EACTh5B,KAAM,cACNg7B,OAAQ,GACRyR,kBAAmB,KACnBC,OAAQ,CACRC,YAAa,IACbC,gBAAiB,UACjBC,kBAAmB,GACnBpC,MAAO,UACPqC,aAAc,EACdnC,KAAM,QACN/B,WAAY,OACZ8B,SAAU,GACV5C,QAAS,IAGbiF,KAAM,CACF1hC,OAAQ,CACJs8B,WAAY,KACZqF,SAAU,CACN,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UACnG,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UACnG,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UACnG,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UACnG,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,UAAW,WAEvGC,cAAe,WAEnBC,SAAU,CACNlU,SAAS,EACT4S,WAAY,GACZnB,MAAO,WAEX0C,cAAe,CACX/hB,IAAK,EACL9c,MAAO,EACP40B,OAAQ,EACR90B,KAAM,GAEVg/B,gBAAiB,CACbpzC,EAAG,EACHgrB,EAAG,GAEPqoB,UAAW,MAEfC,UAAW,CACPnS,OAAQ,KACRoS,mBAAoB,KACpBC,kBAAmB,KACnBC,eAAgB,OAKpBC,EAGc,SAASC,GACnB,IAAIN,EAAYM,EAAIN,UAChBn8B,EAAUy8B,EAAIz8B,QACdtP,EAAU+rC,EAAI/rC,QAGlB,IAAKX,OAAO2sC,KAAO3sC,OAAO2sC,GAAG/iB,eAAe,WAExC,OADAtrB,QAAQ8kB,MAAM,qCACP,EAIX,KAAMnT,aAAmB28B,aAAe38B,aAAmB48B,YAEvD,OADAvuC,QAAQ8kB,MAAM,kGACP,EAIX,IAAM,0BAA0B/S,KAAK+7B,GAEjC,OADA9tC,QAAQ8kB,MAAM,gDACP,EAIX,IAAK0pB,EAAQC,QAAQpsC,EAAQmI,KAAK9H,SAE9B,OADA1C,QAAQ8kB,MAAM,0EACP,EAEX,GAAoC,IAAhCziB,EAAQmI,KAAK9H,QAAQwI,OAErB,OADAlL,QAAQ8kB,MAAM,mCACP,EAKX,IADA,IAAIta,EAAO,GACFkC,EAAE,EAAGA,EAAErK,EAAQmI,KAAK9H,QAAQwI,OAAQwB,IACI,kBAAlCrK,EAAQmI,KAAK9H,QAAQgK,GAAGjP,OAAsBixC,MAAMrsC,EAAQmI,KAAK9H,QAAQgK,GAAGjP,OACnFuC,QAAQC,IAAI,cAAeoC,EAAQmI,KAAK9H,QAAQgK,IAGhDrK,EAAQmI,KAAK9H,QAAQgK,GAAGjP,OAAS,EACjCuC,QAAQC,IAAI,2CAA4CoC,EAAQmI,KAAK9H,QAAQgK,IAGjFlC,EAAK+gB,KAAKlpB,EAAQmI,KAAK9H,QAAQgK,IAOnC,OALA0hC,EAAI/rC,QAAQmI,KAAK9H,QAAU8H,GAKpB,GAKXgkC,EAAU,CAGVG,YAAa,SAASP,GAClB,IAAIz8B,EAAUy8B,EAAIz8B,QACd65B,EAAc4C,EAAI/rC,QAAQm+B,KAAKgL,YAC/BD,EAAe6C,EAAI/rC,QAAQm+B,KAAK+K,aAChC8B,EAAkBe,EAAI/rC,QAAQmrC,KAAK1hC,OAAOs8B,WAE1CwG,EAAMP,GAAGQ,OAAOl9B,GAASm9B,OAAO,WAC/BniB,KAAK,QAAS6e,GACd7e,KAAK,SAAU4e,GAMpB,MAJwB,gBAApB8B,GACAuB,EAAIxqC,MAAM,oBAAoB,WAAa,OAAOipC,KAG/CuB,GAGXG,aAAc,SAAS3jC,GAGnB,IAFA,IAAiC4jC,EAAQC,EAArCC,EAAe9jC,EAAMF,OAElB,IAAMgkC,GACTD,EAAcn0C,KAAKG,MAAMH,KAAKq0C,SAAWD,GAIzCF,EAAS5jC,EAHT8jC,GAAgB,GAIhB9jC,EAAM8jC,GAAgB9jC,EAAM6jC,GAC5B7jC,EAAM6jC,GAAeD,EAEzB,OAAO5jC,GAGXgkC,WAAY,SAASzgC,EAAK0gC,EAAI5xC,GAC1B,MAAkB,kBAAP4xC,EACAb,EAAQY,WAAWzgC,EAAK0gC,EAAG7jC,MAAM,KAAM/N,GACzB,IAAd4xC,EAAGnkC,aAA0B/L,IAAV1B,GAC1BkR,EAAI0gC,EAAG,IAAM5xC,EACNkR,EAAI0gC,EAAG,KACO,IAAdA,EAAGnkC,OACHyD,EAEA6/B,EAAQY,WAAWzgC,EAAI0gC,EAAG,IAAKA,EAAGviB,MAAM,GAAIrvB,IAI3D6xC,cAAe,SAASroB,GACH,kBAAPA,IACNA,EAAKpiB,SAASof,eAAegD,IAEjC,IAAIsoB,EAAI,EAAGC,EAAI,EACf,GAAIvoB,EAAI,CACJ,IAAItT,EAAasT,EAAGwoB,UACpBF,EAAI57B,EAAWjB,MACf88B,EAAI77B,EAAWtP,YAGfrE,QAAQC,IAAI,0BAA4BuD,GAAK,eAGjD,MAAO,CAAE+rC,EAAGA,EAAGC,EAAGA,IAStBE,cAAe,SAASC,EAAIC,GAexB,QAZKA,EAAGn1C,EAAKk1C,EAAGl1C,EAAIk1C,EAAGJ,GAGjBK,EAAGn1C,EAAIm1C,EAAGL,EAAKI,EAAGl1C,GAGlBm1C,EAAGnqB,EAAImqB,EAAGJ,EAAKG,EAAGlqB,GAGnBmqB,EAAGnqB,EAAKkqB,EAAGlqB,EAAIkqB,EAAGH,IAY3BK,cAAe,SAASC,EAAKC,IAGzBD,EAAME,OAAOF,GAAKjoC,QAAQ,cAAe,KACjCqD,OAAS,IACb4kC,EAAMA,EAAI,GAAGA,EAAI,GAAGA,EAAI,GAAGA,EAAI,GAAGA,EAAI,GAAGA,EAAI,IAEjDC,EAAMA,GAAO,EAIb,IADA,IAAIE,EAAS,IACJvjC,EAAE,EAAGA,EAAE,EAAGA,IAAK,CACpB,IAAI0Z,EAAIpD,SAAS8sB,EAAIlK,OAAW,EAAJl5B,EAAO,GAAI,IAEvCujC,IAAW,MADX7pB,EAAItrB,KAAKgpB,MAAMhpB,KAAK8D,IAAI9D,KAAKqD,IAAI,EAAGioB,EAAKA,EAAI2pB,GAAO,MAAM7vC,SAAS,MAC9C0lC,OAAOxf,EAAElb,QAGlC,OAAO+kC,GAaXC,kBAAmB,SAAS9B,GAOxB,IANA,IAAI5jC,EAAS4jC,EAAI/rC,QAAQmI,KAAK9H,QAC1BoJ,EAASsiC,EAAI/rC,QAAQmrC,KAAK1hC,OAAO2hC,SAIjC0C,EAAc,GACTzjC,EAAE,EAAGA,EAAElC,EAAKU,OAAQwB,IACrBlC,EAAKkC,GAAG4e,eAAe,SACvB6kB,EAAY5kB,KAAK/gB,EAAKkC,GAAGw+B,OAEzBiF,EAAY5kB,KAAKzf,EAAOY,IAIhC,OAAOyjC,GAGXC,0BAA2B,SAAS5lC,EAAMshC,GACtC,IAAIuE,EACmC,eAAnCvE,EAAqBD,YACrBwE,EAAYC,EAAKC,gBAAgB/lC,IAOrC,IAHA,IAAIgmC,EAAU,GACVC,EAAc,GACdC,EAAmB,EACdhkC,EAAE,EAAGA,EAAElC,EAAKU,OAAQwB,IACzB,GAAuC,eAAnCo/B,EAAqBD,UAA4B,CAEjD,GADmBrhC,EAAKkC,GAAGjP,MAAQ4yC,EAAa,KAC7BvE,EAAqBruC,MAAO,CAC3CgzC,EAAYllB,KAAK/gB,EAAKkC,IACtBgkC,GAAoBlmC,EAAKkC,GAAGjP,MAC5B,SAEJ+M,EAAKkC,GAAGikC,WAAY,EACpBH,EAAQjlB,KAAK/gB,EAAKkC,QACf,CACH,GAAIlC,EAAKkC,GAAGjP,OAASquC,EAAqBruC,MAAO,CAC7CgzC,EAAYllB,KAAK/gB,EAAKkC,IACtBgkC,GAAoBlmC,EAAKkC,GAAGjP,MAC5B,SAEJ+M,EAAKkC,GAAGikC,WAAY,EACpBH,EAAQjlB,KAAK/gB,EAAKkC,IAe1B,OAVI+jC,EAAYvlC,QACZslC,EAAQjlB,KAAK,CACT2f,MAAOY,EAAqBZ,MAC5BhC,MAAO4C,EAAqB5C,MAC5BzrC,MAAOizC,EACPC,WAAW,EACXF,YAAaA,IAIdD,GAIXI,UAAW,SAAShC,EAAKn0C,EAAGgrB,GACxBmpB,EAAIE,OAAO,UAAUniB,KAAK,KAAMlyB,GAAGkyB,KAAK,KAAMlH,GAAGkH,KAAK,IAAK,GAAGvoB,MAAM,OAAQ,UAGhFysC,WAAY,SAASC,GAEjB,OAAOA,GAA8D,sBADvD,GACoB5wC,SAAS6wC,KAAKD,IAGpDrC,QAAS,SAASuC,GACd,MAA6C,mBAAtCxxC,OAAOyxC,UAAU/wC,SAAS6wC,KAAKC,KAM1CtZ,EAAS,SAATA,IACA,IAAIr1B,EAASsd,EAAMuxB,EAAKC,EAAMC,EAAaC,EAAOze,EAAS0e,UAAU,IAAM,GACvE5kC,EAAI,EACJxB,EAASomC,UAAUpmC,OACnBqmC,GAAO,EACPrxC,EAAWV,OAAOyxC,UAAU/wC,SAC5BsxC,EAAShyC,OAAOyxC,UAAU3lB,eAC1BmmB,EAAa,CACT,mBAAoB,UACpB,kBAAmB,SACnB,kBAAmB,SACnB,oBAAqB,WACrB,iBAAkB,QAClB,gBAAiB,OACjB,kBAAmB,SACnB,kBAAmB,UAGvBC,EAAS,CACLb,WAAY,SAAUliC,GAClB,MAA4B,aAArB+iC,EAAOjxC,KAAKkO,IAEvB8/B,QAASjvB,MAAMivB,SACX,SAAU9/B,GACN,MAA4B,UAArB+iC,EAAOjxC,KAAKkO,IAE3BgjC,SAAU,SAAUhjC,GAChB,OAAe,OAARA,GAAgBA,IAAQA,EAAIjN,QAEvCkwC,UAAW,SAAUjjC,GACjB,OAAQ+/B,MAAMmD,WAAWljC,KAASmjC,SAASnjC,IAE/ClO,KAAM,SAAUkO,GACZ,OAAe,OAARA,EAAeqhC,OAAOrhC,GAAO8iC,EAAWvxC,EAAS6wC,KAAKpiC,KAAS,UAE1EojC,cAAe,SAAUpjC,GACrB,IAAKA,GAA4B,WAArB+iC,EAAOjxC,KAAKkO,IAAqBA,EAAIqjC,SAC7C,OAAO,EAEX,IACI,GAAIrjC,EAAIsjC,cAAgBT,EAAOT,KAAKpiC,EAAK,iBAAmB6iC,EAAOT,KAAKpiC,EAAIsjC,YAAYhB,UAAW,iBAC/F,OAAO,EAEb,MAAOtsC,GACL,OAAO,EAEX,IAAIqJ,EACJ,IAAKA,KAAOW,GACZ,YAAexP,IAAR6O,GAAqBwjC,EAAOT,KAAKpiC,EAAKX,KAezD,IAZsB,mBAAX4kB,IACP2e,EAAO3e,EACPA,EAAS0e,UAAU,IAAM,GACzB5kC,EAAI,GAEc,kBAAXkmB,GAAwB8e,EAAOb,WAAWje,KACjDA,EAAS,IAET1nB,IAAWwB,IACXkmB,EAASn0B,OACPiO,GAEEA,EAAIxB,EAAQwB,IAChB,GAAiC,QAA5BrK,EAAUivC,UAAU5kC,IACrB,IAAKiT,KAAQtd,EACT6uC,EAAMte,EAAOjT,GAETiT,KADJue,EAAO9uC,EAAQsd,MAIX4xB,GAAQJ,IAASO,EAAOK,cAAcZ,KAAUC,EAAcM,EAAOjD,QAAQ0C,MACzEC,GACAA,GAAc,EACdC,EAAQH,GAAOQ,EAAOjD,QAAQyC,GAAOA,EAAM,IAE3CG,EAAQH,GAAOQ,EAAOK,cAAcb,GAAOA,EAAM,GAGrDte,EAAOjT,GAAQ+X,EAAO6Z,EAAMF,EAAOF,SACnBhyC,IAATgyC,IACPve,EAAOjT,GAAQwxB,IAK/B,OAAOve,GAGP0d,EAAO,CAEP4B,UAAW,SAASC,GAChB,OAAOA,GAAWr3C,KAAKs3C,GAAK,MAGhCC,UAAW,SAASC,GAChB,OAAOA,GAAW,IAAMx3C,KAAKs3C,KAGjCG,iBAAkB,SAASnE,GACvB,IAAI5N,EAAO4N,EAAI/rC,QAAQm+B,KACnBoN,EAAgBQ,EAAI/rC,QAAQmrC,KAAKI,cAQjC2B,EAAI/O,EAAKgL,YAAcoC,EAAc/+B,KAAO++B,EAAc7+B,MAC1DygC,EAAIhP,EAAK+K,aAAeqC,EAAc/hB,IAAM+hB,EAAcjK,OAG1B,eAAhCyK,EAAI/rC,QAAQmlC,OAAO9kB,WACnB8sB,GAAKpB,EAAIoE,eAAeC,cAGxBrE,EAAIoE,eAAe5gB,OAAO8gB,SAC1BlD,GAAKpB,EAAIoE,eAAe5gB,OAAO4d,GAMnC,IACImD,EAAaC,EADbC,GAAgBtD,GAFpBC,EAAKA,EAAI,EAAK,EAAIA,GAEWD,EAAIC,GAAK,EAItC,GAA4B,OAAxBhP,EAAKkL,eACL,GAAI,IAAI35B,KAAKyuB,EAAKkL,gBAAiB,CAG/BkH,GADAA,GADAA,EAAU5vB,SAASwd,EAAKkL,eAAe7jC,QAAQ,OAAQ,IAAK,KACvC,GAAM,GAAK+qC,GACX,EAAK,EAAIA,EAE9B,IAAIE,EAAqBvD,EAAIC,EAAKD,EAAIC,EAGtC,GAAwC,SAApCpB,EAAI/rC,QAAQ2Q,OAAO+4B,MAAMC,OAAmB,CAC5C,IAAI+G,EAAwE,EAArD/vB,SAASorB,EAAI/rC,QAAQ2Q,OAAO+4B,MAAMG,YAAa,IAClE4G,EAAoBC,EAAmB,IACvCD,GAAqBC,GAI7BF,EAAc/3C,KAAKG,MAAO63C,EAAoB,IAAOF,GAAW,OAEhEC,EAAc7vB,SAASwd,EAAKkL,eAAgB,IAKhD,IAAI35B,KAAKyuB,EAAKiL,iBAGdmH,GADAA,GADAA,EAAU5vB,SAASwd,EAAKiL,eAAe5jC,QAAQ,OAAQ,IAAK,KACvC,GAAM,GAAK+qC,GACX,EAAK,EAAIA,EAC9BD,EAAc73C,KAAKG,MAAO43C,EAAc,IAAOD,IAE/CD,EAAc3vB,SAASwd,EAAKiL,eAAgB,IAGhD2C,EAAIuE,YAAcA,EAClBvE,EAAIyE,YAAcA,GAGtBtC,gBAAiB,SAAS/lC,GAEtB,IADA,IAAI6lC,EAAY,EACP3jC,EAAE,EAAGA,EAAElC,EAAKU,OAAQwB,IACzB2jC,GAAa7lC,EAAKkC,GAAGjP,MAEzB,OAAO4yC,GAGX2C,YAAa,SAAS5E,GAClB,IAAI5jC,EAAuB4jC,EAAI/rC,QAAQmI,KAAK9H,QAG5C,OAF2B0rC,EAAI/rC,QAAQmI,KAAKmhC,WAGxC,IAAK,OAED,MACJ,IAAK,SACDnhC,EAAOgkC,EAAQO,aAAavkC,GAC5B,MACJ,IAAK,YACDA,EAAKghB,MAAK,SAASzF,EAAGC,GAAK,OAAQD,EAAEtoB,MAAQuoB,EAAEvoB,OAAU,EAAI,KAC7D,MACJ,IAAK,aACD+M,EAAKghB,MAAK,SAASzF,EAAGC,GAAK,OAAQD,EAAEtoB,MAAQuoB,EAAEvoB,MAAS,GAAK,KAC7D,MACJ,IAAK,YACD+M,EAAKghB,MAAK,SAASzF,EAAGC,GAAK,OAAQD,EAAEmjB,MAAMhjB,cAAgBF,EAAEkjB,MAAMhjB,cAAiB,GAAK,KACzF,MACJ,IAAK,aACD1b,EAAKghB,MAAK,SAASzF,EAAGC,GAAK,OAAQD,EAAEmjB,MAAMhjB,cAAgBF,EAAEkjB,MAAMhjB,cAAiB,GAAK,KAIjG,OAAO1b,GAIXyoC,sBAAuB,SAASC,GAC5B,MAAO,aAAeA,EAAUz4C,EAAI,IAAMy4C,EAAUztB,EAAI,KAQ5D0tB,mBAAoB,SAAS/E,GACzB,IAAIP,EAAkBO,EAAI/rC,QAAQmrC,KAAKK,gBACnCuF,EAAkBhF,EAAIoE,eAAe/vC,MAAMiwC,QAA0C,eAAhCtE,EAAI/rC,QAAQmlC,OAAO9kB,SACxE2wB,EAAkBjF,EAAIoE,eAAenH,SAASqH,QAA0C,eAAhCtE,EAAI/rC,QAAQmlC,OAAO9kB,SAE3E4wB,EAAelF,EAAI/rC,QAAQmrC,KAAKI,cAAc/hB,IAC9CunB,GAAeC,EACfC,GAAgBlF,EAAIoE,eAAe/vC,MAAM+sC,EAAIpB,EAAI/rC,QAAQmlC,OAAO8D,qBAAuB8C,EAAIoE,eAAenH,SAASmE,EAC5G4D,EACPE,GAAgBlF,EAAIoE,eAAe/vC,MAAM+sC,EAClC6D,IACPC,GAAgBlF,EAAIoE,eAAenH,SAASmE,GAGhD,IAAI+D,EAAe,EACfnF,EAAIoE,eAAe5gB,OAAO8gB,SAC1Ba,EAAenF,EAAIoE,eAAe5gB,OAAO4d,EAAIpB,EAAI/rC,QAAQmrC,KAAKI,cAAcjK,QAGhF,IAAIlpC,GAAM2zC,EAAI/rC,QAAQm+B,KAAKgL,YAAc4C,EAAI/rC,QAAQmrC,KAAKI,cAAc/+B,KAAOu/B,EAAI/rC,QAAQmrC,KAAKI,cAAc7+B,OAAS,EAAKq/B,EAAI/rC,QAAQmrC,KAAKI,cAAc/+B,KACvJ4W,GAAM2oB,EAAI/rC,QAAQm+B,KAAK+K,aAAegI,EAAeD,GAAgB,EAAKA,EAE9E74C,GAAKozC,EAAgBpzC,EACrBgrB,GAAKooB,EAAgBpoB,EAErB2oB,EAAI8E,UAAY,CAAEz4C,EAAGA,EAAGgrB,EAAGA,IAa/B+tB,OAAQ,SAAS/4C,EAAGgrB,EAAGguB,EAAIC,EAAI3tB,GAE3BA,EAAIA,EAAIjrB,KAAKs3C,GAAK,IAElB,IAAIuB,EAAM74C,KAAK64C,IACXC,EAAM94C,KAAK84C,IAKf,MAAO,CAAEn5C,GAHHA,EAAIg5C,GAAME,EAAI5tB,IAAMN,EAAIiuB,GAAME,EAAI7tB,GAAK0tB,EAG7BhuB,GAFVhrB,EAAIg5C,GAAMG,EAAI7tB,IAAMN,EAAIiuB,GAAMC,EAAI5tB,GAAK2tB,IAYjDG,UAAW,SAASp5C,EAAGgrB,EAAGa,EAAGP,GACzB,IAAI+tB,EAAOxD,EAAK4B,UAAUnsB,GAC1B,MAAO,CACHtrB,EAAGA,EAAI6rB,EAAIxrB,KAAK84C,IAAIE,GACpBruB,EAAGA,EAAIa,EAAIxrB,KAAK64C,IAAIG,KAK5BC,aAAc,SAASC,EAAIC,EAAQC,GAG/B,IAAIvE,EAAKuE,EAAMvB,aAANuB,CAAoBD,GACzBrE,EAAKsE,EAAMrB,aAANqB,CAAoBD,GACzBE,EAASD,EAAME,YAANF,CAAmBD,GAC5BI,EAASH,EAAMI,UAANJ,CAAiBD,GAE1BM,EAAOP,EAAGv5C,EAAIu5C,EAAGv5C,EAAIu5C,EAAGvuB,EAAIuuB,EAAGvuB,EAC/B+uB,EAAQ15C,KAAK25C,MAAMT,EAAGv5C,GAAIu5C,EAAGvuB,GAIjC,OAFA+uB,EAASA,EAAQ,EAAMA,EAAkB,EAAV15C,KAAKs3C,GAAUoC,EAEtC7E,EAAKA,GAAM4E,GAAUA,GAAQ3E,EAAKA,GACrCuE,GAAUK,GAAWA,GAASH,IAKvCrhC,EAAS,CAST0hC,IAAK,SAAStG,EAAKuG,EAASC,GACxB,IAAIC,EAAU7hC,EAAO8hC,YAAYF,GAC7BjQ,EAAWyJ,EAAI/rC,QAAQ2Q,OAGvB+hC,EAAa3G,EAAIQ,IAAIoG,OAAO,IAAK,IAAM5G,EAAIN,UAAY,UAAY6G,GAClEhoB,KAAK,QAASyhB,EAAIN,UAAY,UAAY6G,GAE3CM,EAAa7G,EAAI8G,SAASP,GAAWI,EAAWI,UAAU,IAAM/G,EAAIN,UAAY,cAAgB6G,GAC/FnqC,KAAK4jC,EAAI/rC,QAAQmI,KAAK9H,SACtB0yC,QACAtG,OAAO,KACPniB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,aAAephC,EAAI,IAAMioC,KAC5EhoB,KAAK,cAAc,SAASrG,EAAG5Z,GAAK,OAAOA,KAC3CigB,KAAK,QAASyhB,EAAIN,UAAY,cAAgB6G,GAC9CvwC,MAAM,UAAW,GAElBixC,EAAmB,CAAEV,QAASA,EAASC,mBAAoBA,GAG3DC,EAAQzI,WACR6I,EAAWnG,OAAO,QACbniB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,mBAAqBphC,EAAI,IAAMioC,KAClFhoB,KAAK,QAASyhB,EAAIN,UAAY,oBAAsB6G,GACpD3d,MAAK,SAAS1Q,EAAG5Z,GACd,IAAI4sB,EAAMhT,EAAE4iB,MAaZ,OATIvE,EAASlQ,WACT4gB,EAAiB3gB,MAAQhoB,EACzB2oC,EAAiBC,KAAO,YACxBD,EAAiB53C,MAAQ6oB,EAAE7oB,MAC3B43C,EAAiBnM,MAAQ5P,EACzBA,EAAMqL,EAASlQ,UAAU4gB,IAClB1Q,EAAS6H,WAAW/S,SAAWnT,EAAE4iB,MAAMh+B,OAASy5B,EAAS6H,WAAWC,iBAC3EnT,EAAMhT,EAAE4iB,MAAMvhB,UAAU,EAAGgd,EAAS6H,WAAWC,gBAAkB,OAE9DnT,KAEVl1B,MAAM,YAAaugC,EAASyH,UAAUjB,SAAW,MACjD/mC,MAAM,cAAeugC,EAASyH,UAAUhB,MACxChnC,MAAM,cAAeugC,EAASyH,UAAU/C,YACxCjlC,MAAM,QAAQ,SAASkiB,EAAG5Z,GACvB,MAAqC,YAA7Bi4B,EAASyH,UAAUlB,MAAuBkD,EAAI/rC,QAAQyJ,OAAOY,GAAKi4B,EAASyH,UAAUlB,SAKrG2J,EAAQxI,YACR4I,EAAWnG,OAAO,QACbniB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,oBAAsBphC,EAAI,IAAMioC,KACnFhoB,KAAK,QAASyhB,EAAIN,UAAY,qBAAuB6G,GACrD3d,MAAK,SAAS1Q,EAAG5Z,GACd,IAAI2/B,EAAa/lB,EAAE+lB,WAUnB,OATI1H,EAASlQ,WACT4gB,EAAiB3gB,MAAQhoB,EACzB2oC,EAAiBC,KAAO,aACxBD,EAAiB53C,MAAQ6oB,EAAE7oB,MAC3B43C,EAAiBnM,MAAQ5iB,EAAE+lB,WAC3BA,EAAa1H,EAASlQ,UAAU4gB,IAEhChJ,GAAc,IAEXA,KAEVjoC,MAAM,YAAaugC,EAAS0H,WAAWlB,SAAW,MAClD/mC,MAAM,cAAeugC,EAAS0H,WAAWjB,MACzChnC,MAAM,cAAeugC,EAAS0H,WAAWhD,YACzCjlC,MAAM,OAAQugC,EAAS0H,WAAWnB,OAIvC2J,EAAQp3C,OACRw3C,EAAWnG,OAAO,QACbniB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAa,eAAiBphC,EAAI,IAAMioC,KAC/EhoB,KAAK,QAASyhB,EAAIN,UAAY,gBAAkB6G,GAChD3d,MAAK,SAAS1Q,EAAG5Z,GAKd,OAJA2oC,EAAiB3gB,MAAQhoB,EACzB2oC,EAAiBC,KAAO,QACxBD,EAAiB53C,MAAQ6oB,EAAE7oB,MAC3B43C,EAAiBnM,MAAQ5iB,EAAE7oB,MACpBknC,EAASlQ,UAAYkQ,EAASlQ,UAAU4gB,EAAkB/uB,EAAE7oB,OAAS6oB,EAAE7oB,SAEjF2G,MAAM,YAAaugC,EAASlnC,MAAM0tC,SAAW,MAC7C/mC,MAAM,cAAeugC,EAASlnC,MAAM2tC,MACpChnC,MAAM,cAAeugC,EAASlnC,MAAM4rC,YACpCjlC,MAAM,OAAQugC,EAASlnC,MAAMytC,QAO1CqK,sBAAuB,SAASnH,EAAKuG,EAASC,GAC1C5hC,EAAO,cAAgB2hC,GAAW,GAGhBvG,EAAI8G,SAASP,GACnBhP,MAAK,SAASrf,EAAG5Z,GACzB,IAAI0/B,EAAaiC,GAAGQ,OAAOpwC,MAAM02C,UAAU,IAAM/G,EAAIN,UAAY,oBAAsB6G,GACnFtI,EAAagC,GAAGQ,OAAOpwC,MAAM02C,UAAU,IAAM/G,EAAIN,UAAY,qBAAuB6G,GACpFl3C,EAAa4wC,GAAGQ,OAAOpwC,MAAM02C,UAAU,IAAM/G,EAAIN,UAAY,gBAAkB6G,GAEnF3hC,EAAO,cAAgB2hC,GAASppB,KAAK,CACjC6gB,UAAkC,OAArBA,EAAUn6B,OAAmBm6B,EAAUn6B,OAAOw9B,UAAY,KACvEpD,WAAmC,OAAtBA,EAAWp6B,OAAmBo6B,EAAWp6B,OAAOw9B,UAAY,KACzEhyC,MAA8B,OAAjBA,EAAMwU,OAAmBxU,EAAMwU,OAAOw9B,UAAY,UAIvE,IACI+F,EAAOxiC,EAAO,cAAgB2hC,GAClC,OAAQC,GACJ,IAAK,eACDxG,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,gBAAkB6G,GACrDhoB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO8oC,EAAK9oC,GAAG0/B,UAAU15B,MAL9C,KAMZ,MACJ,IAAK,eACD07B,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,gBAAkB6G,GACrDhoB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO8oC,EAAK9oC,GAAG0/B,UAAU/nC,UAC1D,MACJ,IAAK,oBACD+pC,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,qBAAuB6G,GAC1DhoB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO8oC,EAAK9oC,GAAG0/B,UAAU15B,MAb9C,KAcZ,MACJ,IAAK,oBACD07B,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,qBAAuB6G,GAC1DhoB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAQ8oC,EAAK9oC,GAAG0/B,UAAU15B,MAAQ,EAAM8iC,EAAK9oC,GAAG2/B,WAAW35B,MAAQ,KAC/Fia,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO8oC,EAAK9oC,GAAG0/B,UAAU/nC,YAKtEoxC,0BAA2B,SAASrH,GAChCA,EAAIsH,gBAAkB,GACtBtH,EAAI8G,SAASnJ,MACRpG,MAAK,SAASrf,EAAG5Z,GAAK,OAAOsG,EAAO2iC,oBAAoBvH,EAAK1hC,OAGtEipC,oBAAqB,SAASvH,EAAK1hC,GAC/B,IAOIkpC,EAAIC,EAAIC,EAAIC,EAPZvB,EAAQ/G,EAASuI,gBAAgBtpC,EAAG0hC,EAAI/rC,QAAQmI,KAAK9H,QAAS0rC,EAAIiC,UAAW,CAAE4F,UAAU,IACzFC,EAAe5F,EAAKkD,OAAOpF,EAAI8E,UAAUz4C,EAAG2zC,EAAI8E,UAAUztB,EAAI2oB,EAAIyE,YAAazE,EAAI8E,UAAUz4C,EAAG2zC,EAAI8E,UAAUztB,EAAG+uB,GACjH2B,EAAe/H,EAAIgI,oBAAoB1pC,GAAG8iC,EAAI,EAG9C6G,EAAUv7C,KAAKG,MAAMu5C,EAAQ,IASjC,OAJgB,IAAZ6B,GAA2B,MAAV7B,IACjB6B,EAAU,GAGNA,GACJ,KAAK,EACDT,EAAKxH,EAAIgI,oBAAoB1pC,GAAGjS,EAbrB,GAa0C2zC,EAAIgI,oBAAoB1pC,GAAGjS,EAbrE,EAawFy7C,EAAaz7C,GAAK,EACrHo7C,EAAKzH,EAAIgI,oBAAoB1pC,GAAG+Y,GAAMywB,EAAazwB,EAAI2oB,EAAIgI,oBAAoB1pC,GAAG+Y,GAX3E,EAYPqwB,EAAK1H,EAAIgI,oBAAoB1pC,GAAGjS,EAfrB,EAgBXs7C,EAAK3H,EAAIgI,oBAAoB1pC,GAAG+Y,EAAI0wB,EACpC,MACJ,KAAK,EACDP,EAAKM,EAAaz7C,GAAK2zC,EAAIgI,oBAAoB1pC,GAAGjS,EAAIy7C,EAAaz7C,GAhB5D,EAiBPo7C,EAAKK,EAAazwB,GAAK2oB,EAAIgI,oBAAoB1pC,GAAG+Y,EAAIywB,EAAazwB,GAjB5D,EAkBPqwB,EAAK1H,EAAIgI,oBAAoB1pC,GAAGjS,EArBrB,EAsBXs7C,EAAK3H,EAAIgI,oBAAoB1pC,GAAG+Y,EAAI0wB,EACpC,MACJ,KAAK,EACD,IAAIG,EAAgBlI,EAAIgI,oBAAoB1pC,GAAGjS,EAAI2zC,EAAIgI,oBAAoB1pC,GAAG6iC,EAzBnE,EA0BXqG,EAAKM,EAAaz7C,GAAKy7C,EAAaz7C,EAAI67C,GAvBjC,EAwBPT,EAAKK,EAAazwB,GAAK2oB,EAAIgI,oBAAoB1pC,GAAG+Y,EAAIywB,EAAazwB,GAxB5D,EAyBPqwB,EAAK1H,EAAIgI,oBAAoB1pC,GAAGjS,EAAI2zC,EAAIgI,oBAAoB1pC,GAAG6iC,EA5BpD,EA6BXwG,EAAK3H,EAAIgI,oBAAoB1pC,GAAG+Y,EAAI0wB,EACpC,MACJ,KAAK,EACD,IAAII,EAAenI,EAAIgI,oBAAoB1pC,GAAGjS,EAAI2zC,EAAIgI,oBAAoB1pC,GAAG6iC,EAhClE,EAiCXqG,EAAKW,GAAiBL,EAAaz7C,EAAI87C,GA9BhC,EA+BPV,EAAKzH,EAAIgI,oBAAoB1pC,GAAG+Y,GAAKywB,EAAazwB,EAAI2oB,EAAIgI,oBAAoB1pC,GAAG+Y,GA/B1E,EAgCPqwB,EAAK1H,EAAIgI,oBAAoB1pC,GAAGjS,EAAI2zC,EAAIgI,oBAAoB1pC,GAAG6iC,EAnCpD,EAoCXwG,EAAK3H,EAAIgI,oBAAoB1pC,GAAG+Y,EAAI0wB,EASL,aAAnC/H,EAAI/rC,QAAQ2Q,OAAOu5B,MAAMnoC,MACzBgqC,EAAIsH,gBAAgBhpC,GAAK,CACrB,CAAEjS,EAAGy7C,EAAaz7C,EAAGgrB,EAAGywB,EAAazwB,GACrC,CAAEhrB,EAAGq7C,EAAIrwB,EAAGswB,IAGhB3H,EAAIsH,gBAAgBhpC,GAAK,CACrB,CAAEjS,EAAGy7C,EAAaz7C,EAAGgrB,EAAGywB,EAAazwB,GACrC,CAAEhrB,EAAGm7C,EAAInwB,EAAGowB,GACZ,CAAEp7C,EAAGq7C,EAAIrwB,EAAGswB,KAKxBS,cAAe,SAASpI,GACpB,IAIIqI,EAJarI,EAAIQ,IAAIoG,OAAO,IAAK,IAAM5G,EAAIN,UAAY,YACtDnhB,KAAK,QAASyhB,EAAIN,UAAY,cAC9B1pC,MAAM,UAAW,GAEK+wC,UAAU,IAAM/G,EAAIN,UAAY,aACtDtjC,KAAK4jC,EAAIsH,iBACTN,QACAtG,OAAO,KACPniB,KAAK,QAASyhB,EAAIN,UAAY,aAE/B4I,EAAerI,GAAGsI,OACjBC,MAAMvI,GAAGwI,YACTp8C,GAAE,SAAS6rB,GAAK,OAAOA,EAAE7rB,KACzBgrB,GAAE,SAASa,GAAK,OAAOA,EAAEb,KAE9BgxB,EAAU3H,OAAO,QACZniB,KAAK,IAAK+pB,GACV/pB,KAAK,UAAU,SAASrG,EAAG5Z,GACxB,MAA2C,YAAnC0hC,EAAI/rC,QAAQ2Q,OAAOu5B,MAAMrB,MAAuBkD,EAAI/rC,QAAQyJ,OAAOY,GAAK0hC,EAAI/rC,QAAQ2Q,OAAOu5B,MAAMrB,SAE5Gve,KAAK,eAAgB,GACrBA,KAAK,OAAQ,QACbvoB,MAAM,WAAW,SAASkiB,EAAG5Z,GAC1B,IAAI2/B,EAAa+B,EAAI/rC,QAAQ2Q,OAAO+4B,MAAME,2BAE1C,OAD+B,OAAfI,GAAuB/lB,EAAE+lB,WAAaA,GAAqD,KAAtC+B,EAAI/rC,QAAQmI,KAAK9H,QAAQgK,GAAGw8B,MAC/E,EAAI,MAIlC4N,oBAAqB,SAAS1I,EAAKuG,GACY,SAAvCvG,EAAI/rC,QAAQ2Q,OAAO2hC,GAAS3I,QAGhCoC,EAAI8G,SAASP,GACRvwC,MAAM,WAAW,SAASkiB,EAAG5Z,GAC1B,IAAI2/B,EAAa+B,EAAI/rC,QAAQ2Q,OAAO2hC,GAAS1I,2BAC7C,OAAuB,OAAfI,GAAuB/lB,EAAE+lB,WAAaA,EAAc,EAAI,KAEnE1f,KAAK,aAAa,SAASrG,EAAG5Z,GAC3B,IAAIjS,EAAGgrB,EACP,GAAgB,UAAZkvB,EACAl6C,EAAI2zC,EAAIgI,oBAAoB1pC,GAAGjS,EAC/BgrB,EAAI2oB,EAAIgI,oBAAoB1pC,GAAG+Y,MAC5B,CACH,IAAIsxB,EAAgBrf,GAAO,EAAM,GAAI0W,EAAI8E,WAGzC,GAAI9E,EAAIuE,YAAc,EAAG,CACrB,IAAI6B,EAAQ/G,EAASuI,gBAAgBtpC,EAAG0hC,EAAI/rC,QAAQmI,KAAK9H,QAAS0rC,EAAIiC,UAAW,CAAE4F,UAAU,IACzFe,EAAY1G,EAAKuD,UAAUzF,EAAI8E,UAAUz4C,EAAG2zC,EAAI8E,UAAUztB,EAAG2oB,EAAIuE,YAAa6B,GAClFuC,EAAct8C,EAAIu8C,EAAUv8C,EAC5Bs8C,EAActxB,EAAIuxB,EAAUvxB,EAGhC,IAAI+vB,EAAOhH,EAAQc,cAAclB,EAAIN,UAAY,aAAephC,EAAI,UAChEuqC,EAAUzB,EAAKjG,EAAI,EACnB2H,EAAU1B,EAAKhG,EAAI,EAEvB/0C,EAAIs8C,EAAct8C,GAAK2zC,EAAIsH,gBAAgBhpC,GAAG,GAAGjS,EAAIs8C,EAAct8C,GAAK,IACxEgrB,EAAIsxB,EAActxB,GAAK2oB,EAAIsH,gBAAgBhpC,GAAG,GAAG+Y,EAAIsxB,EAActxB,GAAK,IAExEhrB,GAAQw8C,EACRxxB,GAAQyxB,EAGZ,MAAO,aAAez8C,EAAI,IAAMgrB,EAAI,QAKhDqvB,YAAa,SAASqC,GAClB,IAAIC,GAAgB,EAChBC,GAAgB,EAChBC,GAAgB,EAEpB,OAAQH,GACJ,IAAK,QACDC,GAAe,EACf,MACJ,IAAK,QACDC,GAAW,EACX,MACJ,IAAK,aACDC,GAAgB,EAChB,MACJ,IAAK,eACL,IAAK,eACDF,GAAe,EACfC,GAAW,EACX,MACJ,IAAK,oBACL,IAAK,oBACDD,GAAe,EACfE,GAAgB,EAGxB,MAAO,CACHlL,UAAWgL,EACX35C,MAAO45C,EACPhL,WAAYiL,IAUpBC,wBAAyB,SAASnJ,GAG9BA,EAAI8G,SAASnJ,MACRpG,MAAK,SAASrf,EAAG5Z,GACd,OAAOsG,EAAOwkC,4BAA4BpJ,EAAK1hC,MAIvDsG,EAAOykC,4BAA4BrJ,IAMvCqJ,4BAA6B,SAASrJ,GACtC,GAAwC,SAApCA,EAAI/rC,QAAQ2Q,OAAO+4B,MAAMC,OAA7B,CAII,IAAIxL,EAAO4N,EAAI/rC,QAAQmI,KAAK9H,QAAQwI,OACpC8H,EAAO0kC,cAActJ,EAAK,EAAG,YAAa5N,GAC1CxtB,EAAO0kC,cAActJ,EAAK5N,EAAK,EAAG,gBAAiBA,KAGvDkX,cAAe,SAAStJ,EAAKuJ,EAAWC,EAAWpX,GACnD,IAAI9zB,EAAGmrC,EAEH,KAAIrX,GAAQ,GAAZ,CAIA,IAAIsX,EAAsB1J,EAAIgI,oBAAoBuB,GAAWI,GAC7D,IAAkB,cAAdH,GAAqD,UAAxBE,KAGf,kBAAdF,GAAyD,SAAxBE,GAArC,CAGA,IAAIE,EAA2B,cAAdJ,EAA6BD,EAAU,EAAIA,EAAU,EAIlEM,EAAiB7J,EAAIgI,oBAAoBuB,GAGzCO,EAAqB9J,EAAIgI,oBAAoB4B,GAE7C/oB,EAAO,CACPkpB,aAAc/J,EAAIgI,oBAAoB,GAAG5G,EACzC4I,OAAQhK,EAAI8E,UACZmF,WAAajK,EAAIyE,YAAczE,EAAI/rC,QAAQ2Q,OAAO+4B,MAAMG,YACxDoM,aAAclK,EAAIgI,oBAAoB,GAAG5G,EAAI,GAKjD,GAAkB,cAAdoI,GAEA,IADNlrC,EAAI,EACSA,GAAGirC,EAAWjrC,IAKjB,GAJAmrC,EAAOzJ,EAAIgI,oBAAoB1pC,IAI1BsG,EAAOulC,cAAcnK,EAAK1hC,IAAM8hC,EAAQkB,cAAcmI,EAAMK,GAAqB,CAClFllC,EAAOwlC,eAAepK,EAAK4J,EAAWC,EAAgBhpB,GACtD,YAKR,IADNviB,EAAI8zB,EAAO,EACE9zB,GAAKirC,EAAWjrC,IAKnB,GAJAmrC,EAAOzJ,EAAIgI,oBAAoB1pC,IAI1BsG,EAAOulC,cAAcnK,EAAK1hC,IAAM8hC,EAAQkB,cAAcmI,EAAMK,GAAqB,CAClFllC,EAAOwlC,eAAepK,EAAK4J,EAAWC,EAAgBhpB,GACtD,MAIZjc,EAAO0kC,cAActJ,EAAK4J,EAAWJ,EAAWpX,MAGpD+X,cAAe,SAASnK,EAAK1Z,GACzB,IAAI2X,EAAa+B,EAAI/rC,QAAQ2Q,OAAO+4B,MAAME,2BAC1C,OAAuB,OAAfI,GAAuB/lB,EAAE+lB,WAAaA,GAAyD,KAA1C+B,EAAI/rC,QAAQmI,KAAK9H,QAAQgyB,GAAOwU,OAIjGsP,eAAgB,SAASpK,EAAK4J,EAAWS,EAA8BxpB,GACnE,IAAIypB,EAAOC,EAAOC,EAASC,EAC3BA,EAAUJ,EAA6BhzB,EAAIwJ,EAAKqpB,aAChDK,EAAQ1pB,EAAKmpB,OAAO3yB,EAAIozB,EAGpBH,EADA59C,KAAKC,IAAIk0B,EAAKopB,YAAcv9C,KAAKC,IAAI49C,GAC7B79C,KAAKg+C,KAAM7pB,EAAKopB,WAAappB,EAAKopB,WAAeM,EAAQA,GAEzD79C,KAAKg+C,KAAMH,EAAQA,EAAU1pB,EAAKopB,WAAappB,EAAKopB,YAI5DO,EADoC,UAApCH,EAA6BV,GACnB9oB,EAAKmpB,OAAO39C,EAAIi+C,EAEhBzpB,EAAKmpB,OAAO39C,EAAIi+C,EAAQtK,EAAIgI,oBAAoB4B,GAAWzI,EAGzEnB,EAAIgI,oBAAoB4B,GAAWv9C,EAAIm+C,EACvCxK,EAAIgI,oBAAoB4B,GAAWvyB,EAAIozB,GAM3CrB,4BAA6B,SAASpJ,EAAK1hC,GACvC,IAAIqsC,EAAiB3K,EAAIQ,IAAIC,OAAO,IAAMT,EAAIN,UAAY,aAAephC,EAAI,UAAUuF,OACvF,GAAK8mC,EAAL,CAEA,IAAIC,EAAiBD,EAAetJ,UAChC+E,EAAQ/G,EAASuI,gBAAgBtpC,EAAG0hC,EAAI/rC,QAAQmI,KAAK9H,QAAS0rC,EAAIiC,UAAW,CAAE4F,UAAU,IAEzFgD,EAAY7K,EAAI8E,UAAUz4C,EAC1By+C,EAAY9K,EAAI8E,UAAUztB,GAAK2oB,EAAIyE,YAAczE,EAAI/rC,QAAQ2Q,OAAO+4B,MAAMG,aAC1E8K,EAAY1G,EAAKkD,OAAOyF,EAAWC,EAAW9K,EAAI8E,UAAUz4C,EAAG2zC,EAAI8E,UAAUztB,EAAG+uB,GAGhF2E,EAAa,QACb3E,EAAQ,KACRwC,EAAUv8C,GAAMu+C,EAAetmC,MAAQ,EACvCymC,EAAa,QAEbnC,EAAUv8C,GAAK,EAGnB2zC,EAAIgI,oBAAoB1pC,GAAK,CACzBjS,EAAGu8C,EAAUv8C,EACbgrB,EAAGuxB,EAAUvxB,EACb8pB,EAAGyJ,EAAetmC,MAClB88B,EAAGwJ,EAAe30C,OAClB0zC,GAAIoB,MAMZ1L,EAAW,CAEX2L,UAAW,CACP,KAAQ/K,GAAGgL,WACX,OAAUhL,GAAGiL,WACb,OAAUjL,GAAGgL,WACb,IAAOhL,GAAGkL,QACV,QAAWlL,GAAGmL,YACd,KAAQnL,GAAGoL,SACX,KAAQpL,GAAGqL,SACX,OAAUrL,GAAGsL,WACb,IAAOtL,GAAGuL,SAOdC,OAAQ,SAASzL,GACb,IAAI8E,EAAY9E,EAAI8E,UAChBpnC,EAASsiC,EAAI/rC,QAAQyJ,OAErB4hC,GADcU,EAAI/rC,QAAQqqC,QAAQC,KAClByB,EAAI/rC,QAAQmrC,KAAK1hC,OAAO4hC,eAGxCoM,EAAkB1L,EAAIQ,IAAIoG,OAAO,IAAK,IAAM5G,EAAIN,UAAY,SAC3DnhB,KAAK,aAAa,WAAa,OAAO2jB,EAAK2C,sBAAsBC,MACjEvmB,KAAK,QAASyhB,EAAIN,UAAY,YAE/BiM,EAAM1L,GAAG0L,MACRpH,YAAYvE,EAAIuE,aAChBE,YAAYzE,EAAIyE,aAChBuB,WAAW,GACXE,UAAS,SAAShuB,GACf,OAAQA,EAAE7oB,MAAQ2wC,EAAIiC,UAAa,EAAIv1C,KAAKs3C,MAG5C0H,EAAgB3E,UAAU,IAAM/G,EAAIN,UAAY,OACnDtjC,KAAK4jC,EAAI/rC,QAAQmI,KAAK9H,SACtB0yC,QACAtG,OAAO,KACPniB,KAAK,QAASyhB,EAAIN,UAAY,OAQjCgB,OAAO,QACJniB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,UAAYphC,KAC/DigB,KAAK,QAAQ,SAASrG,EAAG5Z,GACtB,IAAIw+B,EAAQp/B,EAAOY,GAInB,OAHI0hC,EAAI/rC,QAAQmrC,KAAKG,SAASlU,UAC1ByR,EAAQ,QAAUkD,EAAIN,UAAY,OAASphC,EAAI,KAE5Cw+B,KAEV9mC,MAAM,SAAUspC,GAChBtpC,MAAM,eAAgB,GAItBuoB,KAAK,cAAc,SAASrG,EAAG5Z,GAAK,OAAOA,KAC3CigB,KAAK,IAAKotB,GAWf3L,EAAIQ,IAAIuG,UAAU,KAAO/G,EAAIN,UAAY,OACpCnhB,KAAK,aACN,SAASrG,EAAG5Z,GACR,IAAI8nC,EAAQ,EAIZ,OAHI9nC,EAAI,IACJ8nC,EAAQ/G,EAASuI,gBAAgBtpC,EAAE,EAAG0hC,EAAI/rC,QAAQmI,KAAK9H,QAAS0rC,EAAIiC,YAEjE,UAAYmE,EAAQ,OAGnCpG,EAAI2L,IAAMA,GAGdC,aAAc,SAAS5L,GACnB,IAAI6L,EAAQ7L,EAAIQ,IAAIE,OAAO,QACtBqG,UAAU,kBACV3qC,KAAK4jC,EAAI/rC,QAAQmI,KAAK9H,SACtB0yC,QAAQtG,OAAO,kBACfniB,KAAK,gBAAiB,kBACtBA,KAAK,KAAM,GACXA,KAAK,KAAM,GACXA,KAAK,IAAK,QACVA,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,OAASphC,KAEjEutC,EAAMnL,OAAO,QAAQniB,KAAK,SAAU,MAAMvoB,MAAM,cAAc,SAASkiB,EAAG5Z,GAAK,OAAO0hC,EAAI/rC,QAAQyJ,OAAOY,MACzGutC,EAAMnL,OAAO,QAAQniB,KAAK,SAAUyhB,EAAI/rC,QAAQmrC,KAAKG,SAAStB,WAAa,KAAKjoC,MAAM,aAAcgqC,EAAI/rC,QAAQmrC,KAAKG,SAASzC,QAGlIgP,wBAAyB,SAAS9L,GAC9B,IAAI2L,EAAM3L,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,QAClDiM,EAAMA,EAAII,MAAM/L,EAAI8G,SAAS/I,MAAMgO,MAAM/L,EAAI8G,SAASnJ,SAElDhgC,GAAG,SAAS,WACZ,IACIquC,EADAC,EAAYhM,GAAGQ,OAAOpwC,MAI1B,GAAI47C,EAAU1tB,KAAK,WAAayhB,EAAIN,UAAY,MAC5CsM,EAAUC,EAAUxL,OAAO,YACxB,CACH,IAAIna,EAAQ2lB,EAAU1tB,KAAK,cAC3BytB,EAAU/L,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAYpZ,GAG1D,IAAI4lB,EAAaF,EAAQztB,KAAK,WAAayhB,EAAIN,UAAY,WAC3DL,EAAS8M,eAAenM,EAAKA,EAAI/rC,QAAQ0rC,UAAUG,eAAgBkM,EAASE,GACnB,SAArDlM,EAAI/rC,QAAQqqC,QAAQI,sBAAsBF,SACtC0N,EACA7M,EAAS+M,aAAapM,EAAKgM,EAAQnoC,QAEnCw7B,EAASgN,YAAYrM,EAAKgM,EAAQnoC,YAK9C8nC,EAAIhuC,GAAG,aAAa,WAChB,IACIquC,EAAS1lB,EADT2lB,EAAYhM,GAAGQ,OAAOpwC,MAU1B,GAPI47C,EAAU1tB,KAAK,WAAayhB,EAAIN,UAAY,MAC5CsM,EAAUC,EAAUxL,OAAO,SAE3Bna,EAAQ2lB,EAAU1tB,KAAK,cACvBytB,EAAU/L,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAYpZ,IAGtD0Z,EAAI/rC,QAAQqqC,QAAQK,4BAA6B,CACjDrY,EAAQ0lB,EAAQztB,KAAK,cACrB,IAAI+tB,EAAWtM,EAAI/rC,QAAQyJ,OAAO4oB,GAClC0lB,EAAQh2C,MAAM,OAAQoqC,EAAQqB,cAAc6K,EAAUtM,EAAI/rC,QAAQqqC,QAAQM,sBAG1EoB,EAAI/rC,QAAQ4qC,SAASxT,UACrB/E,EAAQ0lB,EAAQztB,KAAK,cACrBguB,EAAGC,YAAYxM,EAAK1Z,IAGxB,IAAI4lB,EAAaF,EAAQztB,KAAK,WAAayhB,EAAIN,UAAY,WAC3DL,EAAS8M,eAAenM,EAAKA,EAAI/rC,QAAQ0rC,UAAUC,mBAAoBoM,EAASE,MAGpFP,EAAIhuC,GAAG,aAAa,WAChB4uC,EAAGE,YAAYzM,MAGnB2L,EAAIhuC,GAAG,YAAY,WACf,IACIquC,EAAS1lB,EADT2lB,EAAYhM,GAAGQ,OAAOpwC,MAU1B,GAPI47C,EAAU1tB,KAAK,WAAayhB,EAAIN,UAAY,MAC5CsM,EAAUC,EAAUxL,OAAO,SAE3Bna,EAAQ2lB,EAAU1tB,KAAK,cACvBytB,EAAU/L,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAYpZ,IAGtD0Z,EAAI/rC,QAAQqqC,QAAQK,4BAA6B,CACjDrY,EAAQ0lB,EAAQztB,KAAK,cACrB,IAAIue,EAAQkD,EAAI/rC,QAAQyJ,OAAO4oB,GAC3B0Z,EAAI/rC,QAAQmrC,KAAKG,SAASlU,UAC1ByR,EAAQ,QAAUkD,EAAIN,UAAY,OAASpZ,EAAQ,KAEvD0lB,EAAQh2C,MAAM,OAAQ8mC,GAGtBkD,EAAI/rC,QAAQ4qC,SAASxT,UACrB/E,EAAQ0lB,EAAQztB,KAAK,cACrBguB,EAAGG,YAAY1M,EAAK1Z,IAGxB,IAAI4lB,EAAaF,EAAQztB,KAAK,WAAayhB,EAAIN,UAAY,WAC3DL,EAAS8M,eAAenM,EAAKA,EAAI/rC,QAAQ0rC,UAAUE,kBAAmBmM,EAASE,OAKvFC,eAAgB,SAASnM,EAAK2M,EAAMX,EAASE,GACzC,GAAK9L,EAAQqC,WAAWkK,GAAxB,CAGA,IAAIrmB,EAAQ1R,SAASo3B,EAAQztB,KAAK,cAAe,IACjDouB,EAAK,CACDX,QAASA,EAAQnoC,OACjByiB,MAAOA,EACPsmB,SAAUV,EACV9vC,KAAM4jC,EAAI/rC,QAAQmI,KAAK9H,QAAQgyB,OAIvC+lB,YAAa,SAASrM,EAAKgM,GACnBhM,EAAI6M,mBAGR7M,EAAI6M,kBAAmB,EAEvBxN,EAASyN,sBAAsB9M,GAE/BC,GAAGQ,OAAOuL,GACLe,aACAC,KAAK3N,EAAS2L,UAAUhL,EAAI/rC,QAAQqqC,QAAQI,sBAAsBF,SAClEjiB,SAASyjB,EAAI/rC,QAAQqqC,QAAQI,sBAAsBD,OACnDlgB,KAAK,aAAa,SAASrG,EAAG5Z,GAC3B,IAAI0Z,EAAIgoB,EAAI2L,IAAIsB,SAAS/0B,GACrB7rB,EAAI2rB,EAAE,GACNX,EAAIW,EAAE,GACNopB,EAAI10C,KAAKg+C,KAAKr+C,EAAEA,EAAIgrB,EAAEA,GACtB61B,EAAct4B,SAASorB,EAAI/rC,QAAQqqC,QAAQI,sBAAsBtM,KAAM,IAE3E,MAAO,aAAiB/lC,EAAE+0C,EAAK8L,EAAe,IAAQ71B,EAAE+pB,EAAK8L,EAAe,OAE/EvvC,GAAG,OAAO,SAASua,EAAG5Z,GACnB0hC,EAAImN,qBAAuBnB,EAC3BhM,EAAI6M,kBAAmB,EACvB5M,GAAGQ,OAAOuL,GAASztB,KAAK,QAASyhB,EAAIN,UAAY,iBAI7DoN,sBAAuB,SAAS9M,GACT,qBAARA,GAAuBA,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,YAAYtN,OAAS,GAC3FiN,EAAS+M,aAAapM,EAAKA,EAAIQ,IAAIC,OAAO,IAAMT,EAAIN,UAAY,YAAY77B,SAIpFuoC,aAAc,SAASpM,EAAKgM,GACxB/L,GAAGQ,OAAOuL,GACLe,aACAxwB,SAAS,KACTgC,KAAK,YAAa,kBAClB5gB,GAAG,OAAO,SAASua,EAAG5Z,GACnB2hC,GAAGQ,OAAOuL,GAASztB,KAAK,QAAS,IACjCyhB,EAAImN,qBAAuB,SAIvCC,YAAa,SAASv0B,GAClB,IAAIw0B,EAAOx0B,EAAGwoB,UACd,MAAO,CACHh1C,EAAGghD,EAAKhhD,EAAIghD,EAAK/oC,MAAQ,EACzB+S,EAAGg2B,EAAKh2B,EAAIg2B,EAAKp3C,OAAS,IASlC2xC,gBAAiB,SAASthB,EAAOlqB,EAAM6lC,EAAWqL,GAC9C,IASIC,EATAt5C,EAAUq1B,EAAO,CAEjBkkB,YAAY,EAGZ3F,UAAU,GACXyF,GAECG,EAAYrxC,EAAKkqB,GAAOj3B,MAE5B,GAAI4E,EAAQu5C,WAAY,CACpBD,EAAY,EAGZ,IAAK,IAAIjvC,EAAE,EAAGA,GAAGgoB,EAAOhoB,IACpBivC,GAAanxC,EAAKkC,GAAGjP,MAIJ,qBAAdk+C,IACPA,EAAYE,GAIhB,IAAIrH,EAASmH,EAAYtL,EAAa,IAQtC,OALIhuC,EAAQ4zC,WAERzB,GADiBqH,EAAYxL,EAAa,IACpB,GAGnBmE,IAMXxd,EAAO,CACP8kB,gBAAiB,IAEjBC,SAAU,SAAS3N,GACfA,EAAI4N,QAAU5N,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,SACjDtjC,KAAK,CAAC4jC,EAAI/rC,QAAQmlC,OAAO/kC,QACzB2yC,QACAtG,OAAO,QACP9X,MAAK,SAAS1Q,GAAK,OAAOA,EAAE0Q,QAC5BrK,KAAK,KAAMyhB,EAAIN,UAAY,SAC3BnhB,KAAK,QAASyhB,EAAIN,UAAY,SAC9BnhB,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,eAAe,WAOjB,MALoC,eAAhCyhB,EAAI/rC,QAAQmlC,OAAO9kB,UAA6D,eAAhC0rB,EAAI/rC,QAAQmlC,OAAO9kB,SACxD,SAEA,UAIlBiK,KAAK,QAAQ,SAASrG,GAAK,OAAOA,EAAE4kB,SACpC9mC,MAAM,aAAa,SAASkiB,GAAK,OAAOA,EAAE6kB,SAAW,QACrD/mC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE+iB,cAC5CjlC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE8kB,SAGrD6Q,cAAe,SAAS7N,GACpB,IAMI3zC,EANA+3C,EAAiBpE,EAAIoE,eACrB0J,EAAiB9N,EAAI/rC,QAAQmlC,OAAO9kB,SACpCkrB,EAAgBQ,EAAI/rC,QAAQmrC,KAAKI,cACjCpC,EAAc4C,EAAI/rC,QAAQm+B,KAAKgL,YAC/BF,EAAuB8C,EAAI/rC,QAAQmlC,OAAO8D,qBAI1C7wC,EADmB,aAAnByhD,EACItO,EAAc/+B,MAEZ28B,EAAcoC,EAAc7+B,OAAS,EAAK6+B,EAAc/+B,KAIlEpU,GAAK2zC,EAAI/rC,QAAQmrC,KAAKK,gBAAgBpzC,EAEtC,IAAIgrB,EAAImoB,EAAc/hB,IAAM2mB,EAAe/vC,MAAM+sC,EAE1B,eAAnB0M,IACAz2B,EAAI2oB,EAAI8E,UAAUztB,EAGd+sB,EAAenH,SAASqH,OAExBjtB,EAAIA,GADmB+sB,EAAe/vC,MAAM+sC,EAAIlE,EAAuBkH,EAAenH,SAASmE,GACnE,EAAKgD,EAAe/vC,MAAM+sC,EAEtD/pB,GAAM+sB,EAAe/vC,MAAM+sC,EAAI,GAIvCpB,EAAI4N,QACCrvB,KAAK,IAAKlyB,GACVkyB,KAAK,IAAKlH,IAGnB02B,YAAa,SAAS/N,GAClB,IAAI8N,EAAiB9N,EAAI/rC,QAAQmlC,OAAO9kB,SAExC0rB,EAAIgO,WAAahO,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,YACpDtjC,KAAK,CAAC4jC,EAAI/rC,QAAQmlC,OAAO6D,WACzB+J,QACAtG,OAAO,QACP9X,MAAK,SAAS1Q,GAAK,OAAOA,EAAE0Q,QAC5BrK,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,KAAMyhB,EAAIN,UAAY,YAC3BnhB,KAAK,QAASyhB,EAAIN,UAAY,YAC9BnhB,KAAK,eAAe,WAOjB,MALuB,eAAnBuvB,GAAsD,eAAnBA,EACxB,SAEA,UAIlBvvB,KAAK,QAAQ,SAASrG,GAAK,OAAOA,EAAE4kB,SACpC9mC,MAAM,aAAa,SAASkiB,GAAK,OAAOA,EAAE6kB,SAAW,QACrD/mC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE+iB,cAC5CjlC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE8kB,SAGrDiR,iBAAkB,SAASjO,GACvB,IAGI3zC,EAHAmzC,EAAgBQ,EAAI/rC,QAAQmrC,KAAKI,cACjCpC,EAAc4C,EAAI/rC,QAAQm+B,KAAKgL,YAI/B/wC,EADgC,aAAhC2zC,EAAI/rC,QAAQmlC,OAAO9kB,SACfkrB,EAAc/+B,MAEZ28B,EAAcoC,EAAc7+B,OAAS,EAAK6+B,EAAc/+B,KAIlEpU,GAAK2zC,EAAI/rC,QAAQmrC,KAAKK,gBAAgBpzC,EAEtC,IAAIgrB,EAAIuR,EAAKslB,gBAAgBlO,GAE7BA,EAAIgO,WACCzvB,KAAK,IAAKlyB,GACVkyB,KAAK,IAAKlH,IAGnB82B,UAAW,SAASnO,GAChBA,EAAIoO,SAAWpO,EAAIQ,IAAIuG,UAAU,IAAM/G,EAAIN,UAAY,UAClDtjC,KAAK,CAAC4jC,EAAI/rC,QAAQuvB,SAClBwjB,QACAtG,OAAO,QACP9X,MAAK,SAAS1Q,GAAK,OAAOA,EAAE0Q,QAC5BrK,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,IAAKqK,EAAK8kB,gBACfnvB,KAAK,KAAMyhB,EAAIN,UAAY,UAC3BnhB,KAAK,QAASyhB,EAAIN,UAAY,UAC9BnhB,KAAK,eAAe,WACjB,IAAIjK,EAAW,OAMf,MALoC,kBAAhC0rB,EAAI/rC,QAAQuvB,OAAOlP,SACnBA,EAAW,SAC4B,iBAAhC0rB,EAAI/rC,QAAQuvB,OAAOlP,WAC1BA,EAAW,QAERA,KAEViK,KAAK,QAAQ,SAASrG,GAAK,OAAOA,EAAE4kB,SACpC9mC,MAAM,aAAa,SAASkiB,GAAK,OAAOA,EAAE6kB,SAAW,QACrD/mC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE+iB,cAC5CjlC,MAAM,eAAe,SAASkiB,GAAK,OAAOA,EAAE8kB,SAGrDqR,eAAgB,SAASrO,GACrB,IAMI3zC,EANAiiD,EAAiBtO,EAAI/rC,QAAQuvB,OAAOlP,SACpCi6B,EAAcvO,EAAIoE,eAAe5gB,OAAO2d,EACxC/D,EAAc4C,EAAI/rC,QAAQm+B,KAAKgL,YAC/BD,EAAe6C,EAAI/rC,QAAQm+B,KAAK+K,aAChCqC,EAAgBQ,EAAI/rC,QAAQmrC,KAAKI,cAIjCnzC,EADmB,gBAAnBiiD,EACI9O,EAAc/+B,KACQ,iBAAnB6tC,EACHlR,EAAcmR,EAAc/O,EAAc7+B,MAE1Cy8B,EAAc,EAGtB4C,EAAIoO,SACC7vB,KAAK,IAAKlyB,GACVkyB,KAAK,IAAK4e,EAAeqC,EAAcjK,SAGhD2Y,gBAAiB,SAASlO,GACtB,IAAIoB,EACJ,GAAIpB,EAAIoE,eAAe/vC,MAAMiwC,OAAQ,CAGjC,IAAIkK,EAAmBxO,EAAIoE,eAAe/vC,MAAM+sC,EAAIpB,EAAI/rC,QAAQmlC,OAAO8D,qBAAuB8C,EAAIoE,eAAenH,SAASmE,EAEtHA,EADgC,eAAhCpB,EAAI/rC,QAAQmlC,OAAO9kB,SACf0rB,EAAI8E,UAAUztB,EAAKm3B,EAAmB,EAAKA,EAE3CA,EAAmBxO,EAAI/rC,QAAQmrC,KAAKI,cAAc/hB,SAG1D,GAAoC,eAAhCuiB,EAAI/rC,QAAQmlC,OAAO9kB,SAA2B,CAC9C,IAAIm6B,EAAoBzO,EAAI/rC,QAAQmrC,KAAKI,cAAcjK,OAASyK,EAAIoE,eAAe5gB,OAAO4d,EAC1FA,GAAMpB,EAAI/rC,QAAQm+B,KAAK+K,aAAesR,GAAqB,EAAKzO,EAAI/rC,QAAQmrC,KAAKI,cAAc/hB,IAAOuiB,EAAIoE,eAAenH,SAASmE,EAAI,OAEtIA,EAAIpB,EAAI/rC,QAAQmrC,KAAKI,cAAc/hB,IAAMuiB,EAAIoE,eAAenH,SAASmE,EAG7E,OAAOA,IAKXmL,EAAK,CACLmC,YAAa,SAAS1O,GAGtB,IAAInB,EAAWmB,EAAIQ,IAAIoG,OAAO,KACzBroB,KAAK,QAASyhB,EAAIN,UAAY,YAEnCb,EAASkI,UAAU,IAAM/G,EAAIN,UAAY,WACpCtjC,KAAK4jC,EAAI/rC,QAAQmI,KAAK9H,SACtB0yC,QACAtG,OAAO,KACPniB,KAAK,QAASyhB,EAAIN,UAAY,WAC9BnhB,KAAK,MAAM,SAASrG,EAAG5Z,GAAK,OAAO0hC,EAAIN,UAAY,UAAYphC,KAC/DtI,MAAM,UAAW,GACjB0qC,OAAO,QACPniB,KAAK,KAAMyhB,EAAI/rC,QAAQ4qC,SAASE,OAAOI,cACvC5gB,KAAK,KAAMyhB,EAAI/rC,QAAQ4qC,SAASE,OAAOI,cACvC5gB,KAAK,KAAMyhB,EAAI/rC,QAAQ4qC,SAASE,OAAO5E,SACvC5b,KAAK,UAAWyhB,EAAI/rC,QAAQ4qC,SAASE,OAAOG,mBAC5ClpC,MAAM,OAAQgqC,EAAI/rC,QAAQ4qC,SAASE,OAAOE,iBAE/CJ,EAASkI,UAAU,IAAM/G,EAAIN,UAAY,WACpCtjC,KAAK4jC,EAAI/rC,QAAQmI,KAAK9H,SACtBosC,OAAO,QACPniB,KAAK,QAAQ,SAASrG,GAAK,OAAO8nB,EAAI/rC,QAAQ4qC,SAASE,OAAOjC,SAC9D9mC,MAAM,aAAa,SAASkiB,GAAK,OAAO8nB,EAAI/rC,QAAQ4qC,SAASE,OAAOhC,YACpE/mC,MAAM,eAAe,SAASkiB,GAAK,OAAO8nB,EAAI/rC,QAAQ4qC,SAASE,OAAO9D,cACtEjlC,MAAM,eAAe,SAASkiB,GAAK,OAAO8nB,EAAI/rC,QAAQ4qC,SAASE,OAAO/B,QACtEpU,MAAK,SAAS1Q,EAAG5Z,GACd,IAAIqwC,EAAU3O,EAAI/rC,QAAQ4qC,SAASxR,OAInC,MAHkC,YAA9B2S,EAAI/rC,QAAQ4qC,SAASxsC,OACrBs8C,EAAUz2B,EAAEy2B,SAETpC,EAAGqC,oBAAoB5O,EAAK2O,EAASrwC,EAAG,CAC3Cw8B,MAAO5iB,EAAE4iB,MACTzrC,MAAO6oB,EAAE7oB,MACT4uC,WAAY/lB,EAAE+lB,gBAItBY,EAASkI,UAAU,IAAM/G,EAAIN,UAAY,gBACpCnhB,KAAK,SAAS,SAAUrG,EAAG5Z,GAExB,OADW8hC,EAAQc,cAAclB,EAAIN,UAAY,UAAYphC,GACjD6iC,EAAK,EAAInB,EAAI/rC,QAAQ4qC,SAASE,OAAO5E,WAEpD5b,KAAK,UAAU,SAAUrG,EAAG5Z,GAEzB,OADW8hC,EAAQc,cAAclB,EAAIN,UAAY,UAAYphC,GACjD8iC,EAAK,EAAIpB,EAAI/rC,QAAQ4qC,SAASE,OAAO5E,WAEpD5b,KAAK,KAAK,SAAUrG,EAAG5Z,GAEpB,OADW8hC,EAAQc,cAAclB,EAAIN,UAAY,UAAYphC,GAC/C8iC,EAAI,EAAK,MAInCoL,YAAa,SAASxM,EAAK1Z,GACvB,IAAI0Y,EAAcgB,EAAI/rC,QAAQ4qC,SAASE,OAAOC,YAC1CuN,EAAGsC,iBAAmBvoB,IACtB0Y,EAAc,GAGlBuN,EAAGsC,eAAiBvoB,EACpB2Z,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAYpZ,GACvCymB,aACAxwB,SAASyiB,GACThpC,MAAM,WAAW,WAAa,OAAO,KAE1Cu2C,EAAGE,YAAYzM,IAGnByM,YAAa,SAASzM,GAClBC,GAAG8G,UAAU,IAAM/G,EAAIN,UAAY,UAAY6M,EAAGsC,gBAC7CtwB,KAAK,aAAa,SAASrG,GACxB,IAAI42B,EAAc7O,GAAG8O,MAAM1+C,KAAK2+C,YAG5B,MAAO,cAFHF,EAAY,GAAK9O,EAAI/rC,QAAQ4qC,SAASE,OAAO5E,QAAU,GAEjC,KADtB2U,EAAY,GAAM,EAAI9O,EAAI/rC,QAAQ4qC,SAASE,OAAO5E,QAAW,GAC7B,QAIpDuS,YAAa,SAAS1M,EAAK1Z,GACvB2Z,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAYpZ,GACvCtwB,MAAM,WAAW,WAAa,OAAO,KAI1CiqC,GAAGQ,OAAO,IAAMT,EAAIN,UAAY,UAAY6M,EAAGsC,gBAC1CtwB,KAAK,aAAa,SAASrG,EAAG5Z,GAI3B,MAAO,cAFC0hC,EAAI/rC,QAAQm+B,KAAKgL,YAAc,KAEb,KADlB4C,EAAI/rC,QAAQm+B,KAAK+K,aAAe,KACJ,QAIhDyR,oBAAqB,SAAS5O,EAAK9U,EAAK5E,EAAO2oB,GAiB3C,OAdI7O,EAAQqC,WAAWzC,EAAI/rC,QAAQ4qC,SAASC,oBACxCkB,EAAI/rC,QAAQ4qC,SAASC,kBAAkBxY,EAAO2oB,GAa3C/jB,EAAIzxB,QAAQ,cATR,SAASimB,GACZ,IAAIwvB,EAAchM,UAAU,GAC5B,OAAI+L,EAAa/xB,eAAegyB,GACrBD,EAAa/L,UAAU,IAEvBA,UAAU,QAY7BiM,EAAQ,SAAS5rC,EAAStP,GAI1B,GADA5D,KAAKkT,QAAUA,EACQ,kBAAZA,EAAsB,CAC7B,IAAIsV,EAAKtV,EAAQ9J,QAAQ,KAAM,IAC/BpJ,KAAKkT,QAAU9M,SAASof,eAAegD,GAG3C,IAAIy0B,EAAO,GACXhkB,GAAO,EAAMgkB,EAAMzQ,EAAiB5oC,GACpC5D,KAAK4D,QAAUq5C,EAGqB,OAAhCj9C,KAAK4D,QAAQmrC,KAAKM,UAClBrvC,KAAKqvC,UAAYrvC,KAAK4D,QAAQmrC,KAAKM,WAEnCrvC,KAAKqvC,UAAY,IAAM9C,EAAmB,IAC1CA,KAKCmD,EAAsB1vC,QAK3B4vC,GAAGQ,OAAOpwC,KAAKkT,SAASgb,KAvyDV,QACH,SAyyDX6wB,EAAWzM,KAAKtyC,MAChBg/C,EAAM1M,KAAKtyC,QAGf8+C,EAAMtM,UAAUyM,SAAW,WAElBvP,EAAsB1vC,QAI3B++C,EAAWzM,KAAKtyC,MAChBg/C,EAAM1M,KAAKtyC,QAGf8+C,EAAMtM,UAAU0M,OAAS,WACrBl/C,KAAKkT,QAAQuS,UAAY,GACzBu5B,EAAM1M,KAAKtyC,OAGf8+C,EAAMtM,UAAUzR,QAAU,WACtB/gC,KAAKkT,QAAQuS,UAAY,GACzBmqB,GAAGQ,OAAOpwC,KAAKkT,SAASgb,KA/zDV,QA+zD4B,OAY9C4wB,EAAMtM,UAAU2M,eAAiB,WAC7B,IAAIxD,EAAU37C,KAAK88C,qBACnB,GAAgB,OAAZnB,GAAuC,qBAAZA,EAAyB,CACpD,IAAI1lB,EAAQ1R,SAASqrB,GAAGQ,OAAOuL,GAASztB,KAAK,cAAe,IAC5D,MAAO,CACHhb,QAASyoC,EACT1lB,MAAOA,EACPlqB,KAAM/L,KAAK4D,QAAQmI,KAAK9H,QAAQgyB,IAGpC,OAAO,MAIf6oB,EAAMtM,UAAUwJ,YAAc,SAAS/lB,IACnCA,EAAQ1R,SAAS0R,EAAO,KACZ,GAAKA,EAAQj2B,KAAK4D,QAAQmI,KAAK9H,QAAQwI,OAAO,GAG1DuiC,EAASgN,YAAYh8C,KAAM4vC,GAAGQ,OAAO,IAAMpwC,KAAKqvC,UAAY,UAAYpZ,GAAOziB,SAGnFsrC,EAAMtM,UAAUuJ,aAAe,WAC3B/M,EAASyN,sBAAsBz8C,OAMnC8+C,EAAMtM,UAAU4M,WAAa,SAASC,EAASrgD,GAC3C,OAAQqgD,GACJ,IAAK,oBACD,IAAIC,EAASvP,EAAQY,WAAW3wC,KAAK4D,QAASy7C,GAC9CtP,EAAQY,WAAW3wC,KAAK4D,QAASy7C,EAASrgD,GAC1C4wC,GAAGQ,OAAO,IAAMpwC,KAAKqvC,UAAY,SAAS/rC,KAAKtE,IAC/B,KAAXsgD,GAA2B,KAAVtgD,GAA6B,KAAXsgD,GAA2B,KAAVtgD,IACrDgB,KAAKk/C,SAET,MAEJ,IAAK,uBACD,IAAIK,EAAWxP,EAAQY,WAAW3wC,KAAK4D,QAASy7C,GAChDtP,EAAQY,WAAW3wC,KAAK4D,QAASy7C,EAASrgD,GAC1C4wC,GAAGQ,OAAO,IAAMpwC,KAAKqvC,UAAY,YAAY/rC,KAAKtE,IAChC,KAAbugD,GAA6B,KAAVvgD,GAA+B,KAAbugD,GAA6B,KAAVvgD,IACzDgB,KAAKk/C,SAET,MAEJ,IAAK,mBACL,IAAK,+BACL,IAAK,8BACL,IAAK,2BACL,IAAK,uCACL,IAAK,sCACL,IAAK,qCACL,IAAK,sCACL,IAAK,8BACDnP,EAAQY,WAAW3wC,KAAK4D,QAASy7C,EAASrgD,GAC1C,MAGJ,QACI+wC,EAAQY,WAAW3wC,KAAK4D,QAASy7C,EAASrgD,GAE1CgB,KAAK+gC,UACL/gC,KAAKi/C,aAQjB,IAAIF,EAAa,WACb/+C,KAAK4D,QAAQmI,KAAK9H,QAAU4tC,EAAK0C,YAAYv0C,MACzCA,KAAK4D,QAAQmI,KAAKshC,qBAAqBrS,UACvCh7B,KAAK4D,QAAQmI,KAAK9H,QAAU8rC,EAAQ4B,0BAA0B3xC,KAAK4D,QAAQmI,KAAK9H,QAASjE,KAAK4D,QAAQmI,KAAKshC,uBAI/GrtC,KAAK4D,QAAQyJ,OAAS0iC,EAAQ0B,kBAAkBzxC,MAChDA,KAAK4xC,UAAiBC,EAAKC,gBAAgB9xC,KAAK4D,QAAQmI,KAAK9H,SAK7D,IAHA,IAAIu7C,EAAKx/C,KAAK4D,QAAQ2Q,OAAOq5B,WAAWC,cAG/B5/B,EAAE,EAAGA,EAAEjO,KAAK4D,QAAQmI,KAAK9H,QAAQwI,OAAQwB,IAC9CjO,KAAK4D,QAAQmI,KAAK9H,QAAQgK,GAAG2/B,WAAa6R,EAAez/C,KAAK4D,QAAQmI,KAAK9H,QAAQgK,GAAGjP,MAAOgB,KAAK4xC,UAAW4N,GAKjH,IADA,IAAIE,EAAkB,EACb54B,EAAE,EAAGA,EAAE9mB,KAAK4D,QAAQmI,KAAK9H,QAAQwI,OAAQqa,IAC1CA,IAAM9mB,KAAK4D,QAAQmI,KAAK9H,QAAQwI,OAAS,IACzCzM,KAAK4D,QAAQmI,KAAK9H,QAAQ6iB,GAAG8mB,YAAc,IAAM8R,GAAiB3iD,QAAQyiD,IAE9EE,GAAmBtM,WAAWpzC,KAAK4D,QAAQmI,KAAK9H,QAAQ6iB,GAAG8mB,aAI/DoR,EAAQ,WAGRh/C,KAAKmwC,IAAMJ,EAAQG,YAAYlwC,MAG/BA,KAAK+zC,eAAiB,CAClBC,aAAc,EACdhwC,MAAO,CACHiwC,OAA2C,KAAnCj0C,KAAK4D,QAAQmlC,OAAO/kC,MAAMu0B,KAClCwY,EAAG,EACHD,EAAG,GAEPlE,SAAU,CACNqH,OAA8C,KAAtCj0C,KAAK4D,QAAQmlC,OAAO6D,SAASrU,KACrCwY,EAAG,EACHD,EAAG,GAEP3d,OAAQ,CACJ8gB,OAAqC,KAA7Bj0C,KAAK4D,QAAQuvB,OAAOoF,KAC5BwY,EAAG,EACHD,EAAG,IAIX9wC,KAAK23C,oBAAsB,GAGvB33C,KAAK+zC,eAAe/vC,MAAMiwC,QAAQ1b,EAAK+kB,SAASt9C,MAChDA,KAAK+zC,eAAenH,SAASqH,QAAQ1b,EAAKmlB,YAAY19C,MAC1Du4B,EAAKulB,UAAU99C,MAMfu4B,EAAKylB,eADMh+C,MAEX,IAAI4vC,EAAKG,EAAQc,cAFN7wC,KAEyB+9C,SAASvqC,QAI7C,GANWxT,KAGN+zC,eAAe5gB,OAAO4d,EAAInB,EAAGmB,EAHvB/wC,KAIN+zC,eAAe5gB,OAAO2d,EAAIlB,EAAGkB,EAJvB9wC,KAMF+zC,eAAe/vC,MAAMiwC,OAAQ,CAClC,IAAI3uB,EAAKyqB,EAAQc,cAPV7wC,KAO6Bu9C,QAAQ/pC,QAPrCxT,KAQF+zC,eAAe/vC,MAAM+sC,EAAIzrB,EAAGyrB,EAR1B/wC,KASF+zC,eAAe/vC,MAAM8sC,EAAIxrB,EAAGwrB,EAGrC,GAZW9wC,KAYF+zC,eAAenH,SAASqH,OAAQ,CACrC,IAAI1uB,EAAKwqB,EAAQc,cAbV7wC,KAa6B29C,WAAWnqC,QAbxCxT,KAcF+zC,eAAenH,SAASmE,EAAIxrB,EAAGwrB,EAd7B/wC,KAeF+zC,eAAenH,SAASkE,EAAIvrB,EAAGurB,EAIxC,GAnBW9wC,KAmBF+zC,eAAe/vC,MAAMiwC,QAnBnBj0C,KAmBkC+zC,eAAenH,SAASqH,OAAQ,CACzE,IAAID,EAAe,EApBZh0C,KAqBE+zC,eAAe/vC,MAAMiwC,SAC1BD,GAtBGh0C,KAsBkB+zC,eAAe/vC,MAAM+sC,EAtBvC/wC,KAuBM+zC,eAAenH,SAASqH,SAC7BD,GAxBDh0C,KAwBsB4D,QAAQmlC,OAAO8D,uBAxBrC7sC,KA2BE+zC,eAAenH,SAASqH,SAC7BD,GA5BGh0C,KA4BkB+zC,eAAenH,SAASmE,GA5B1C/wC,KA8BF+zC,eAAeC,aAAeA,EA0CvC,GAtCAnC,EAAKiC,iBAlCM9zC,MAsCX6xC,EAAK6C,mBAtCM10C,MAyCXu4B,EAAKilB,cAzCMx9C,MA0CXu4B,EAAKqlB,iBA1CM59C,WA6CF4D,QAAQmrC,KAAKG,SAASlU,SAC3BgU,EAASuM,aA9CFv7C,MAgDXgvC,EAASoM,OAhDEp7C,WAkDNy2C,SAAW,GAChBliC,EAAO0hC,IAnDIj2C,KAmDM,QAnDNA,KAmDoB4D,QAAQ2Q,OAAOm5B,MAAMH,QACpDh5B,EAAO0hC,IApDIj2C,KAoDM,QApDNA,KAoDoB4D,QAAQ2Q,OAAO+4B,MAAMC,QAGpDh5B,EAAOuiC,sBAvDI92C,KAuDwB,QAvDxBA,KAuDsC4D,QAAQ2Q,OAAOm5B,MAAMH,QACtEh5B,EAAOuiC,sBAxDI92C,KAwDwB,QAxDxBA,KAwDsC4D,QAAQ2Q,OAAO+4B,MAAMC,QACtEh5B,EAAOukC,wBAzDI94C,MA4DXuU,EAAO8jC,oBA5DIr4C,KA4DsB,SAGjCuU,EAAOyiC,0BA/DIh3C,WAkEF4D,QAAQ2Q,OAAOu5B,MAAM9S,SAAgD,SAlEnEh7B,KAkEmC4D,QAAQ2Q,OAAO+4B,MAAMC,QAC/Dh5B,EAAOwjC,cAnEA/3C,MAsEXuU,EAAO8jC,oBAtEIr4C,KAsEsB,SAE7B+vC,EAAQqC,WAxEDpyC,KAwEiB4D,QAAQ0rC,UAAUnS,QAC1C,IAzEOn9B,KA0EE4D,QAAQ0rC,UAAUnS,SACzB,MAAOj3B,IA3EFlG,KA+EF4D,QAAQ4qC,SAASxT,SACtBkhB,EAAGmC,YAhFIr+C,MAmFXgvC,EAASyM,wBAnFEz7C,OAsFXy/C,EAAiB,SAASzgD,EAAOstB,EAAOuhB,GACxC,IAAI8R,EAAiB3gD,EAAQstB,EAC7B,OAAIuhB,GAAiB,EACVxxC,KAAKgpB,MAAuB,IAAjBs6B,IAEO,IAAjBA,GAAsB5iD,QAAQ8wC,IAI9C,OAAOiR,IA7jED,gC,iCCdV,6E,4FCAA,iLASMc,EAAgBC,IAClB,CAAEpN,IAAK,wCACPrsC,SAASw5C,cAyBPE,EAAmB,WACvB,GAAIC,IACF,MAAO,yBAKT,GAAIn2C,IAAU,CACZ,IAAM6a,EAAWxhB,OAAOghB,SAASQ,SAC9Brb,QAAQ,aAAc,IAEtBA,QAAQ,eAAgB,IAC3B,OAAOnG,OAAOghB,SAASO,OAASC,EAASrb,QAAQ,WAAY,IAI/D,OAxBqC,SAAC+uB,GAItC,OAAOA,EAAO/uB,QAAQ,IAAI42C,OAFF,0BAE2B,IAChD52C,QAFkB,aAEI,IAmBlB62C,CAlCQC,YAAK,CAClB,CAAC7f,QAAS1gC,KAGV,CAACwgD,IAAG,kBAAMvzC,YAAKxG,SAASg6C,qBAAqB,cAJhCF,CAKZN,GAEWnN,KA2ByBrpC,QAAQ,WAAY,KAIhDi3C,EAAqBH,YAAK,CACrC,CAACI,YAAK1zC,IAAMs/B,YAAO,MAAOvsC,KAC1B,CAACwgD,IAAG,SAACnkD,GAAD,OAAeukD,YAAOvkD,EAAG,SAGlB4uB,EAAwBy1B,EACnCp9C,OAAO+e,eAAiB89B,KAGbU,EAAuBT,IAChC,IACAM,EAAmBP,M,gCCnEvB,ocAUaW,EAAoB,SAAClzC,GAAD,OAAsBA,EAAM3G,MAChD85C,EAAyBC,YACpCF,GACA,SAACG,EAAD,YAAe77C,MACf,SAAC87C,EAAa97C,GAAd,OAAqB87C,EAAY97C,IAAOiM,OAG7B8vC,EAAkBH,YAC7BD,GACA,SAACK,GAAD,OAAgBA,EAAW9vC,aAGvB+vC,EAAsCL,YAC1CD,EAAwBn+B,YAAK,kBAIzB0+B,EAAsBN,YAC1BO,IACAF,GACA,SAACG,EAAiBC,GAAlB,OAA6CD,GAAmBC,KAE5DC,EAA0BV,YAAeD,EAAwBn+B,YAAK,sBAE/D++B,EAAiC,kBAAMX,YAClDM,EACAI,GACA,SAAClwC,EAAeQ,GAAhB,MAAuC,CAAER,gBAAeQ,yBAG7C4vC,EAAuBZ,YAClCD,GACA,SAACK,GAAD,OAAgBA,EAAWxvC,aAGhBiwC,EAA4Bb,YACvCD,GACA,SAACK,GAAD,OAAgBA,EAAWvvC,kBAGhBiwC,EAA6Bd,YACxCD,GACA,SAACK,GAAD,OAAgBA,EAAW1vC,mBAGhBqwC,EAAqBf,YAChCD,GACA,SAACK,GAAD,OAAgBA,EAAWr8C,gBAGhBi9C,EAAwBhB,YAAeD,EAAwBn+B,YAAK,oBAUpEq/B,GAJ0BjB,YAAeF,GAAmB,SAAAI,GAAW,OAClF9/C,OAAOsN,OAAOwyC,GAAagB,MAAK,qBAAGrwC,qBAGMmvC,YACzCF,GACA,SAACI,GAAD,OAAiB9/C,OAAOsN,OAAOwyC,GAC5B7wC,QAAO,SAACC,EAAK8wC,GAAN,OAAqB9wC,GAVJ,SAAC8wC,GAAD,OAA4BA,EAAWtvC,oBAC/D4uB,QAAQ0gB,EAAW9vC,YAAc8vC,EAAWrvC,sBASTowC,CAAqBf,GAAc,EAAI,KAAI,OAGtE7e,EAAuBye,YAClCF,GACA,SAACI,GAAD,OAAiB9/C,OAAOlB,KAAKghD,GAAap0C,UAG/Bs1C,EAA+BpB,YAC1CF,GACA,SAACI,GAAD,uBAAiB9/C,OAAOsN,OAAOwyC,GAC5BxhB,MAAK,SAAC0hB,GAAD,OAAgBA,EAAWvvC,yBADnC,aAAiB,EACmCN,WAGzCkxB,EAAiCue,YAC5CF,GACA,SAACI,GAAD,OAAiB9/C,OAAOsN,OAAOwyC,GAC5B7wC,QAAO,SAACC,EAAK8wC,GAAN,OAAqB9wC,GAAO8wC,EAAWjvC,aAAe,EAAI,KAAI,MAG7DwwB,EAAgCqe,YAC3CF,GACA,SAACI,GAAD,OAAiB9/C,OAAOsN,OAAOwyC,GAC5B7wC,QAAO,SAACC,EAAK8wC,GAAN,OAAqB9wC,GAAO8wC,EAAWlvC,oBAAsB,EAAI,KAAI,O,yKC3F3E9V,EAAU,SAAAC,GACd,OAAIA,GAAK,IAAMA,EAAI,GACX,IAAN,OAAWA,EAAEyF,YAERzF,EAAEyF,YAGEugD,KAAgCxf,MAAQA,KAAKC,gBAAkBwf,UAAUC,UAEhFC,EAAe,SAAAt6B,GAAC,MAAkB,kBAANA,EAAiB,IAAIlD,KAAKkD,GAAKA,GAGpDu6B,EAAyB,SAAAv6B,GAAC,OAAIs6B,EAAat6B,GAAG6O,sBAC9C2rB,EAAyB,SAAAx6B,GAAC,OAAIs6B,EAAat6B,GAAG8O,sBAC9C2rB,EAAwB,SAAAz6B,GACnC,IAAMghB,EAAOsZ,EAAat6B,GAC1B,MAAM,GAAN,OAAU9rB,EAAQ8sC,EAAKlI,YAAvB,YAAsC5kC,EAAQ8sC,EAAKjI,cAAnD,YAAoE7kC,EAAQ8sC,EAAKhI,gBAGtEmH,EAAmB,SAAArF,GAC9B,IACEH,KAAKC,eAAewf,UAAUC,SAAU,CACtCK,cAAe,WACfC,cAAe,WACfC,QAAS,QACTC,KAAM,UACNC,MAAO,QACPC,IAAK,UACLjgB,aAEF,MAAOz8B,GACP,OAAO,EAET,OAAO,GAkBH28C,EAAa,SAACha,EAAD,OAASia,EAAT,EAASA,OAAWl/C,EAApB,iCACjB,IAAI4+B,KAAKC,eAAT,OAAwBqgB,QAAxB,IAAwBA,IAAUb,UAAUC,SAd3B,SAAC,GAAD,IAAGa,EAAH,EAAGA,KAAMC,EAAT,EAASA,OAAQC,EAAjB,EAAiBA,KAAM5qB,EAAvB,EAAuBA,SAAvB,oBACjB6qB,UAAW,OACPF,EACA,GACAD,EACA,CAAEN,QAAS,QAASC,KAAM,UAAWC,MAAO,QAASC,IAAK,WAC1D,CAAEO,UAAW,SANA,GAObH,GAAU,CACZI,UAAWH,EAAO,SAAW,SARd,CAUjBtgB,SAAUtK,IAI4CgrB,CAAWz/C,IAAU2pC,OAAO1E,IAE9Eya,EAAc,SAAAjrB,GAAQ,MAAkB,KAAbA,GAAgC,YAAbA,EAAyBA,OAAW33B,GAE3E6iD,EAAc,WACzB,IAAMlrB,EAAWx1B,YAAY2gD,KACvBv0C,EAAYpM,YAAY4gD,KAExBhiC,EAAmBqnB,mBAAQ,WAC/B,OAAOkZ,EACH,SAACnZ,EAAMjlC,GAAP,OACEi/C,EAAWha,EAAD,aAASka,MAAM,EAAM1qB,SAAUirB,EAAYjrB,IAAcz0B,KACrEw+C,IACH,CAAC/pB,IAEE3W,EAAmBonB,mBAAQ,WAC/B,OAAOkZ,EACH,SAACnZ,EAAMjlC,GAAP,OACEi/C,EAAWha,EAAD,aACRoa,MAAM,EACND,QAAQ,EACR3qB,SAAUirB,EAAYjrB,IACnBz0B,KAEPy+C,IACH,CAAChqB,IAEEqrB,EAAkB5a,mBAAQ,WAC9B,OAAOkZ,EACH,SAAAnZ,GAAI,OAAIga,EAAWha,EAAM,CAAEoa,MAAM,EAAMD,QAAQ,EAAM3qB,SAAUirB,EAAYjrB,MAC3EiqB,IACH,CAACjqB,IAQJ,MAAO,CACL5W,mBACAC,mBACAiiC,gBATsB7a,mBAAQ,WAC9B,OAAOkZ,EACH,SAAAnZ,GAAI,OAAIga,EAAWha,EAAM,CAAEka,MAAM,EAAM1qB,SAAUirB,EAAYjrB,MAC7DiqB,IACH,CAACjqB,IAMFqrB,kBACAz0C,e,gCCrGJ,gOASa0yB,EAA0Bh7B,uBAAY,UAC9CC,IAD8C,0BAItCq4B,EAAyBt4B,uBAAa,GAAD,OAAIC,IAAJ,4BAGrCg9C,EAAwBj9C,uBAAY,UAC5CC,IAD4C,qBAIpC0hC,EAAyB3hC,uBAAa,GAAD,OAAIC,IAAJ,sBAGrCi9C,EAAmBl9C,uBAAY,UAAsBC,IAAtB,sBAG/Bk9C,EAAmBn9C,uBAAY,UAAsBC,IAAtB,uB,gCC1B5C,sGAAO,IAAMA,EAAW,QAEXm9C,EAA6B,IAI7BC,EAAkB,K,6ECNlBt+B,EAAgB,SAC3Bu+B,EAA+BzuC,GAE/B,IAAM0uC,EAA0C,CAC9Ct/B,IAAK,MACL4R,MAAO,IACPD,gBAAiB,MACjBqsB,IAAK,MACLrmD,KAAM,OACN4nD,KAAM,OACN1nD,MAAO,QACP2nD,OAAQ,MACR1nD,QAAS,OACT2nD,OAAQ,MACRnoD,QAAS,OACTooD,IAAK,OAGD1gD,EAAqC,kBAApB4R,EAAP,eACP0uC,EADO,GACY1uC,GACxB0uC,EAEAhoD,EAAkC,kBAAjB+nD,EACjB1/B,SAAS0/B,EAAc,IACvBA,EAEJ,GAAgB,IAAZ/nD,EACF,OAAO0H,EAAQghB,IAGjB,IAAImJ,EAAS,GACT7xB,EAAU,IACZA,GAAWA,EACqB,KAA5B0H,EAAQ2yB,kBACVxI,EAASnqB,EAAQ4yB,MAAQ5yB,EAAQ2yB,kBAIrC,IAAMh6B,EAAOF,KAAKG,MAAMN,EAAU,OAClCA,GAAmB,MAAPK,EAEZ,IAAME,EAAQJ,KAAKG,MAAMN,EAAU,MACnCA,GAAoB,KAARO,EAEZ,IAAMC,EAAUL,KAAKG,MAAMN,EAAU,IACrCA,GAAsB,GAAVQ,EAEZ,IAAM6nD,EAAU,GA0BhB,GAxBIhoD,EAAO,EACTgoD,EAAQz3B,KAAKvwB,EAAKkF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQrH,MACrC,IAATA,GACTgoD,EAAQz3B,KAAKvwB,EAAKkF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQg/C,KAGrDnmD,EAAQ,EACV8nD,EAAQz3B,KAAKrwB,EAAMgF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQnH,OACrC,IAAVA,GACT8nD,EAAQz3B,KAAKrwB,EAAMgF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQugD,MAGtDznD,EAAU,EACZ6nD,EAAQz3B,KAAKpwB,EAAQ+E,WAAamC,EAAQ4yB,MAAQ5yB,EAAQlH,SACrC,IAAZA,GACT6nD,EAAQz3B,KAAKpwB,EAAQ+E,WAAamC,EAAQ4yB,MAAQ5yB,EAAQwgD,QAGxDloD,EAAU,EACZqoD,EAAQz3B,KAAKzwB,KAAKG,MAAMN,GAASuF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQ1H,SACjD,IAAZA,GACTqoD,EAAQz3B,KAAKzwB,KAAKG,MAAMN,GAASuF,WAAamC,EAAQ4yB,MAAQ5yB,EAAQygD,QAGjD,IAAnBE,EAAQ93C,OACV,OAAO83C,EAAQC,MAAQz2B,EAGzB,IAAMnhB,EAAO23C,EAAQC,MACrB,MAAM,GAAN,OAAUD,EAAQE,KAAK,MAAvB,YAAgC7gD,EAAQ0gD,IAAxC,YAA+C13C,GAA/C,OAAsDmhB,K,+DC9ExD,MAEqE22B,EAAOC,QAA6N,SAASvjD,EAAE8E,EAAEohB,EAAErZ,GAAG,SAASgZ,EAAEsrB,EAAEppC,GAAG,IAAIme,EAAEirB,GAAG,CAAC,IAAIrsC,EAAEqsC,GAAG,CAA2C,IAAIppC,GAAxC,mBAAmBy7C,GAASA,EAAiB,OAAOC,EAAEtS,GAAE,GAAI,GAAG9K,EAAE,OAAOA,EAAE8K,GAAE,GAAI,IAAIxB,EAAE,IAAI+T,MAAM,uBAAuBvS,EAAE,KAAK,MAAMxB,EAAEgU,KAAK,mBAAmBhU,EAAE,IAAIiU,EAAE19B,EAAEirB,GAAG,CAACoS,QAAQ,IAAIz+C,EAAEqsC,GAAG,GAAGD,KAAK0S,EAAEL,SAAQ,SAASvjD,GAAoB,OAAO6lB,EAAlB/gB,EAAEqsC,GAAG,GAAGnxC,IAAeA,KAAI4jD,EAAEA,EAAEL,QAAQvjD,EAAE8E,EAAEohB,EAAErZ,GAAG,OAAOqZ,EAAEirB,GAAGoS,QAAQ,IAAI,IAAIld,EAAE,mBAAmBmd,GAASA,EAAQrS,EAAE,EAAEA,EAAEtkC,EAAExB,OAAO8lC,IAAItrB,EAAEhZ,EAAEskC,IAAI,OAAOtrB,EAAjb,CAAob,CAACg+B,EAAE,CAAC,SAAS7jD,EAAE8E,EAAEohB,GAAG,SAASrZ,IAAI,MAAM,IAAI62C,MAAM,mCAAmC,SAAS79B,IAAI,MAAM,IAAI69B,MAAM,qCAAqC,SAASrd,EAAErmC,GAAG,GAAGymB,IAAIa,WAAW,OAAOA,WAAWtnB,EAAE,GAAG,IAAIymB,IAAI5Z,IAAI4Z,IAAIa,WAAW,OAAOb,EAAEa,WAAWA,WAAWtnB,EAAE,GAAG,IAAI,OAAOymB,EAAEzmB,EAAE,GAAG,MAAM8E,GAAG,IAAI,OAAO2hB,EAAEyqB,KAAK,KAAKlxC,EAAE,GAAG,MAAM8E,GAAG,OAAO2hB,EAAEyqB,KAAKtyC,KAAKoB,EAAE,KAAqN,SAAS+H,IAAI+7C,GAAGC,IAAID,GAAE,EAAGC,EAAE14C,OAAO24C,EAAED,EAAE5E,OAAO6E,GAAGxE,GAAG,EAAEwE,EAAE34C,QAAQo4C,KAAK,SAASA,IAAI,IAAIK,EAAE,CAAC,IAAI9jD,EAAEqmC,EAAEt+B,GAAG+7C,GAAE,EAAG,IAAI,IAAIh/C,EAAEk/C,EAAE34C,OAAOvG,GAAG,CAAC,IAAIi/C,EAAEC,EAAEA,EAAE,KAAKxE,EAAE16C,GAAGi/C,GAAGA,EAAEvE,GAAGyE,MAAMzE,GAAG,EAAE16C,EAAEk/C,EAAE34C,OAAO04C,EAAE,KAAKD,GAAE,EAA1Y,SAAW9jD,GAAG,GAAGumB,IAAI29B,aAAa,OAAOA,aAAalkD,GAAG,IAAIumB,IAAIV,IAAIU,IAAI29B,aAAa,OAAO39B,EAAE29B,aAAaA,aAAalkD,GAAG,IAAWumB,EAAEvmB,GAAG,MAAM8E,GAAG,IAAI,OAAOyhB,EAAE2qB,KAAK,KAAKlxC,GAAG,MAAM8E,GAAG,OAAOyhB,EAAE2qB,KAAKtyC,KAAKoB,KAAkMmxC,CAAEnxC,IAAI,SAAS2vC,EAAE3vC,EAAE8E,GAAGlG,KAAKulD,IAAInkD,EAAEpB,KAAK2M,MAAMzG,EAAE,SAAS8+C,KAAK,IAAIn9B,EAAEF,EAAExD,EAAEje,EAAEy+C,QAAQ,IAAI,WAAW,IAAI98B,EAAE,mBAAmBa,WAAWA,WAAWza,EAAE,MAAM7M,GAAGymB,EAAE5Z,EAAE,IAAI0Z,EAAE,mBAAmB29B,aAAaA,aAAar+B,EAAE,MAAM7lB,GAAGumB,EAAEV,GAAzI,GAA+I,IAAIk+B,EAAEC,EAAE,GAAGF,GAAE,EAAGtE,GAAG,EAAEz8B,EAAEqhC,SAAS,SAASpkD,GAAG,IAAI8E,EAAE,IAAI6a,MAAM8xB,UAAUpmC,OAAO,GAAG,GAAGomC,UAAUpmC,OAAO,EAAE,IAAI,IAAI6a,EAAE,EAAEA,EAAEurB,UAAUpmC,OAAO6a,IAAIphB,EAAEohB,EAAE,GAAGurB,UAAUvrB,GAAG89B,EAAEt4B,KAAK,IAAIikB,EAAE3vC,EAAE8E,IAAI,IAAIk/C,EAAE34C,QAAQy4C,GAAGzd,EAAEod,IAAI9T,EAAEyB,UAAU6S,IAAI,WAAWrlD,KAAKulD,IAAIE,MAAM,KAAKzlD,KAAK2M,QAAQwX,EAAEngB,MAAM,UAAUmgB,EAAEuhC,SAAQ,EAAGvhC,EAAEwhC,IAAI,GAAGxhC,EAAEyhC,KAAK,GAAGzhC,EAAE4H,QAAQ,GAAG5H,EAAE0hC,SAAS,GAAG1hC,EAAE7W,GAAG03C,EAAE7gC,EAAE2hC,YAAYd,EAAE7gC,EAAE4hC,KAAKf,EAAE7gC,EAAE0iB,IAAIme,EAAE7gC,EAAE6hC,eAAehB,EAAE7gC,EAAE8hC,mBAAmBjB,EAAE7gC,EAAE+hC,KAAKlB,EAAE7gC,EAAEgiC,gBAAgBnB,EAAE7gC,EAAEiiC,oBAAoBpB,EAAE7gC,EAAEkiC,UAAU,SAASjlD,GAAG,MAAM,IAAI+iB,EAAEmiC,QAAQ,SAASllD,GAAG,MAAM,IAAI0jD,MAAM,qCAAqC3gC,EAAEoiC,IAAI,WAAW,MAAM,KAAKpiC,EAAEqiC,MAAM,SAASplD,GAAG,MAAM,IAAI0jD,MAAM,mCAAmC3gC,EAAEsiC,MAAM,WAAW,OAAO,IAAI,IAAIC,EAAE,CAAC,SAAStlD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAkBioB,EAAE,SAAS7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAAdA,EAAE,WAAiEqmC,EAAE,cAAaA,EAAE+K,UAAU,IAAIvrB,EAAE2L,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAEwgB,EAAE8K,EAAE,GAAGppC,EAAEme,EAAErnB,IAAI,YAAY4kD,EAAE,EAAEA,EAAEzjD,EAAEqL,OAAOo4C,IAAI52C,EAAE7M,EAAEyjD,GAAG,GAAGpd,EAAErmC,EAAEyjD,GAAG3+C,GAAGiD,GAAG,OAAOs+B,IAAIA,EAAE,IAAI,GAAGA,EAAE,IAAI,GAAGA,EAAE,IAAI,KAAKA,EAAE,MAAM,OAAOA,EAAU,QAAPxgB,EAAEwgB,EAAE,KAAawI,MAAMhpB,GAAGsrB,EAAEzlB,KAAK,CAAC7e,EAAEgZ,EAAE,CAACA,EAAEA,KAAKsrB,EAAEzlB,KAAK,CAAC7e,EAAEgZ,EAAE,CAACwgB,EAAE,GAAGA,EAAE,MAAO8K,EAAEzlB,KAAK,CAAC7e,EAAE,KAAK,CAAC,KAAK,QAAQ,OAAOskC,GAAG9K,EAAE+K,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,GAAGphB,EAAE7J,KAAK8D,IAAI+F,EAAE9E,EAAEqL,QAAQ,IAAIwB,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAE9T,EAAEiU,EAAE,GAAG,IAAI/9B,EAAE,EAAEsrB,EAAE,EAAE9K,EAAE,EAAEt+B,EAAE,EAAE07C,EAAE,EAAEA,EAAEzjD,EAAEqL,OAAOo4C,IAAI,CAAC,GAAG52C,EAAE7M,EAAEyjD,GAAG,GAAG9T,EAAE3vC,EAAEyjD,GAAG,GAAGG,EAAEH,GAAGzjD,EAAEyjD,GAAG,OAAO52C,GAAGgiC,MAAMhiC,KAAKgZ,GAAG8pB,EAAE,GAAGwB,GAAGtkC,EAAEw5B,GAAGsJ,EAAE,GAAG5nC,GAAG,GAAG07C,EAAE3+C,GAAG,EAAE,CAAC,IAAI2hB,EAAEzmB,EAAEyjD,EAAE3+C,GAAG,OAAO2hB,EAAE,IAAIooB,MAAMpoB,EAAE,MAAMZ,GAAGY,EAAE,GAAG,GAAG0qB,GAAG1qB,EAAE,GAAG4f,GAAG5f,EAAE,GAAG,GAAG1e,GAAG,GAAG67C,EAAEH,GAAG17C,EAAE,CAAC/H,EAAEyjD,GAAG,GAAG,EAAEtS,EAAEppC,EAAE,CAAC,EAAE8d,EAAE9d,EAAE,EAAEs+B,EAAEt+B,IAAI,CAAC/H,EAAEyjD,GAAG,GAAG,KAAK,CAAC,KAAK,OAAO,OAAOG,GAAG19B,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,SAAS,IAAIm0B,EAAE,CAAC,SAAS3lD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAkBioB,EAAE,SAAS7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAAdA,EAAE,WAAiEqmC,EAAE,cAAaA,EAAE+K,UAAU,IAAIvrB,EAAE2L,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE,GAAG07C,EAAEv9B,EAAErnB,IAAI,SAAS8wC,EAAEzpB,EAAErnB,IAAI,YAAY+kD,EAAE,EAAEA,EAAE5jD,EAAEqL,OAAOu4C,IAAI/2C,EAAE7M,EAAE4jD,GAAG,GAAGzS,EAAEnxC,EAAE4jD,GAAG9+C,GAAG6qC,GAAG,OAAOwB,IAAIA,EAAE,IAAI,GAAGA,EAAE,GAAGsS,EAAEtS,EAAE,IAAI,KAAKA,EAAE,MAAM,OAAOA,EAAU,QAAPtrB,EAAEsrB,EAAE,KAAatC,MAAMhpB,GAAG9d,EAAE2jB,KAAK,CAAC7e,EAAEgZ,EAAE,CAACA,EAAEA,EAAEA,MAAMwgB,EAAEod,EAAEtS,EAAE,GAAGppC,EAAE2jB,KAAK,CAAC7e,EAAEgZ,EAAE,CAACA,EAAEwgB,EAAExgB,EAAEwgB,EAAE8K,EAAE,OAAQppC,EAAE2jB,KAAK,CAAC7e,EAAE,KAAK,CAAC,KAAK,KAAK,QAAQ,OAAO9E,GAAGs+B,EAAE+K,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,GAAGphB,EAAE7J,KAAK8D,IAAI+F,EAAE9E,EAAEqL,QAAQ,IAAIwB,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAE9T,EAAEiU,EAAEn9B,EAAEF,EAAE,GAAGxD,EAAEmD,EAAErnB,IAAI,SAAS,IAAIgO,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAI,CAAC,IAAI9E,EAAE,EAAE67C,EAAE,EAAEH,EAAE,EAAE59B,EAAE5qB,KAAKqD,IAAI,EAAEuO,EAAE/H,EAAE,GAAG+gB,EAAEhZ,EAAE,EAAEgZ,IAAI,QAAQwgB,EAAErmC,EAAE6lB,GAAG,KAAKgpB,MAAMxI,KAAKod,IAAI17C,GAAGs+B,EAAEud,GAAG3oD,KAAK2qD,IAAI5lD,EAAE6lB,GAAG,GAAG,GAAG,IAAI49B,GAAG9T,EAAE10C,KAAKg+C,KAAK2K,GAAGH,EAAEh9B,EAAE1e,EAAE07C,EAAEl9B,EAAE1Z,GAAG,CAAC7M,EAAE6M,GAAG,GAAG4Z,EAAE,CAACA,EAAE1D,EAAE4sB,EAAElpB,EAAE1D,EAAE4sB,MAAMwB,EAAE,GAAGrsC,EAAE9E,EAAE6M,GAAG,GAAG,KAAK0Z,EAAE1Z,GAAG,CAAC7M,EAAE6M,GAAG,GAAGskC,EAAE,CAACA,EAAEA,KAAK,OAAO5qB,GAAGL,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,SAAS,IAAIq0B,EAAE,CAAC,SAAS7lD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAkBioB,EAAE,SAAS7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAAdA,EAAE,WAAiEqmC,EAAE,cAAaA,EAAE+K,UAAU,IAAIvrB,EAAE2L,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAIG,EAAEn9B,EAAE,GAAGF,EAAEL,EAAErnB,IAAI,SAASkkB,EAAEmD,EAAErnB,IAAI,YAAYklD,EAAE,EAAEA,EAAE/jD,EAAEqL,OAAO04C,IAAIl3C,EAAE7M,EAAE+jD,GAAG,GAAG1d,EAAErmC,EAAE+jD,GAAGj/C,GAAGie,GAAG,OAAOsjB,IAAIA,EAAE,IAAI,GAAGA,EAAE,IAAI,KAAKA,EAAE,MAAM,OAAOA,GAAG8K,EAAE9K,EAAE,GAAGt+B,EAAEs+B,EAAE,GAAG,OAAO8K,GAAGtC,MAAMsC,GAAG1qB,EAAEiF,KAAK,CAAC7e,EAAEskC,EAAE,CAACA,EAAEA,EAAEA,EAAEppC,MAAM07C,EAAE17C,EAAEopC,EAAEppC,EAAE,EAA+B67C,EAAE,KAA7B77C,EAAEwe,EAAEtrB,KAAKg+C,KAAKwK,GAAG,EAAEA,GAAG17C,GAAG,GAAU8d,EAAE,IAAI49B,EAAEh9B,EAAEiF,KAAK,CAAC7e,EAAEgZ,EAAE,CAACA,EAAE+9B,EAAE/9B,EAAE+9B,EAAEzS,EAAEppC,OAAO0e,EAAEiF,KAAK,CAAC7e,EAAE,KAAK,CAAC,KAAK,KAAK,KAAK,QAAQ,OAAO4Z,GAAG4f,EAAE+K,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,GAAGphB,EAAE7J,KAAK8D,IAAI+F,EAAE9E,EAAEqL,QAAQ,IAAIwB,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE,GAAG07C,EAAEv9B,EAAErnB,IAAI,SAAS8wC,EAAEzpB,EAAErnB,IAAI,kBAAkB+kD,EAAE,EAAEn9B,EAAE,EAAE,IAAI4f,EAAE,EAAEA,EAAErmC,EAAEqL,OAAOg7B,IAAI,CAACud,GAAG5jD,EAAEqmC,GAAG,GAAG,GAAG5f,GAAGzmB,EAAEqmC,GAAG,GAAG,GAAGA,EAAEvhC,GAAG,IAAI8+C,GAAG5jD,EAAEqmC,EAAEvhC,GAAG,GAAG,GAAG2hB,GAAGzmB,EAAEqmC,EAAEvhC,GAAG,GAAG,IAAI,IAAIyhB,EAAEvmB,EAAEqmC,GAAG,GAAGtjB,EAAE0D,EAAEm9B,EAAEn9B,EAAE,EAAE,GAAGkpB,EAAE,GAAGlpB,EAAE,CAAC,IAAIs9B,EAAEhhC,EAAE,EAAE,EAAEA,EAAEihC,EAAEv9B,EAAEq9B,EAAEL,EAAExoD,KAAKg+C,KAAK8K,GAAG,EAAEA,GAAGC,EAAEP,EAAEA,GAAG,EAAEO,EAAEA,IAAIxE,EAAE,EAAEiE,EAAEA,EAAEh9B,EAAE5Z,GAAGk3C,EAAEN,EAAEA,GAAG,EAAEh9B,GAAGq9B,GAAGtE,EAAE35B,GAAGk+B,EAAEN,EAAEA,GAAG,EAAEh9B,GAAGq9B,GAAGtE,EAAEz3C,EAAEs+B,GAAG,CAAC9f,EAAE,IAAIw9B,EAAE,CAAC,IAAIl3C,EAAE,IAAIgZ,SAAS9d,EAAEs+B,GAAG,CAAC9f,EAAE,EAAE,CAAC,EAAE,SAAS4qB,EAAE1qB,EAAEg9B,EAAExoD,KAAKg+C,KAAKl2B,GAAG,EAAEA,GAAG0D,GAAG,EAAE1e,EAAEs+B,GAAG,CAAC9f,EAAE,IAAIxD,EAAE,CAAC,KAAKA,EAAEouB,GAAG,KAAKpuB,EAAEouB,KAAK,OAAOppC,GAAGme,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,SAAS,IAAIs0B,EAAE,CAAC,SAAS9lD,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,EAAE7M,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAGL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAyByoC,EAAEx5B,EAArB7M,EAAE,kBAAiD+H,EAAE8E,EAAzB7M,EAAE,sBAA4ByjD,EAAE,WAAWpd,EAAE7U,QAAQ0f,KAAKtyC,QAAO6kD,EAAErS,UAAU,IAAI/K,EAAE7U,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,KAAKu9B,EAAErS,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,KAAKu9B,EAAErS,UAAU2U,iBAAiB,SAAS/lD,EAAE8E,GAAG,IAAI,IAAIohB,EAAE,EAAEA,EAAElmB,EAAEqL,SAAS6a,EAAE,CAAC,IAAIrZ,EAAE7M,EAAEkmB,GAAGL,EAAE/gB,EAAEohB,GAAGL,EAAEmgC,MAAMC,IAAIpgC,EAAEqgC,SAASD,IAAIpgC,EAAEsgC,WAAW9f,EAAE7U,QAAQwgB,WAAWnlC,EAAE,GAAG,IAAIgZ,EAAEugC,UAAU/f,EAAE7U,QAAQwgB,WAAWnlC,EAAE,GAAG,MAAM42C,EAAErS,UAAUiV,kBAAkB,SAASrmD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAE,KAAKwgB,EAAE,KAAK8K,EAAEnxC,EAAEqL,OAAO,EAAEtD,EAAE,EAAEA,GAAGopC,EAAEppC,IAAI,GAAG,QAAQ8E,EAAE7M,EAAE+H,GAAG,MAAM8mC,MAAMhiC,GAAG,CAAC,IAAI42C,EAAEzjD,EAAE+H,GAAG,GAAG,GAAG4nC,EAAE3vC,EAAE+H,GAAG,GAAG,GAAG07C,EAAE52C,IAAI42C,EAAE52C,GAAG8iC,EAAE9iC,IAAI8iC,EAAE9iC,IAAI,OAAOw5B,GAAGsJ,EAAEtJ,KAAKA,EAAEsJ,IAAI,OAAO9pB,GAAG49B,EAAE59B,KAAKA,EAAE49B,GAAG,MAAM,CAAC59B,EAAEwgB,IAAIod,EAAErS,UAAUkV,gBAAgB,SAAStmD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAE,EAAEA,EAAE7lB,EAAEqL,OAAOwa,KAAIhZ,EAAE7M,EAAE6lB,IAAKmgC,MAAMj+C,EAAEypB,QAAQ+0B,aAAazhD,EAAE+H,EAAEs5C,WAAWjgC,GAAGrZ,EAAEq5C,SAASn+C,EAAEypB,QAAQ+0B,aAAazhD,EAAE+H,EAAEu5C,UAAUlgC,IAAIA,EAAEsL,QAAQiyB,EAAE3+C,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,oBAAoB,GAAG,gBAAgB,IAAIg1B,EAAE,CAAC,SAASxmD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAIiP,EAAE,aAAagZ,EAAEhZ,EAAEgZ,EAAE4gC,EAAE,EAAE5gC,EAAE6gC,EAAE,EAAE7gC,EAAE8gC,OAAO,EAAE9gC,EAAEurB,UAAUqU,cAAc,SAASzlD,EAAE8E,EAAEohB,KAAKL,EAAEurB,UAAUwV,eAAe,SAAS5mD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAE,GAAGw5B,EAAE,EAAEA,EAAErmC,EAAEqL,SAASg7B,EAAE,CAAC,IAAI8K,EAAEnxC,EAAEqmC,GAAGt+B,EAAEopC,EAAE,GAAGsS,EAAE,OAAO17C,EAAE,KAAK8d,EAAEmsB,WAAWjqC,GAAG4nC,EAAE,CAAC/0C,EAAEqrD,IAAIrgC,EAAEqgC,IAAIY,KAAKhhC,EAAEmsB,WAAWb,EAAE,IAAI2V,KAAKrD,EAAE3jC,KAAKhb,EAAEymB,IAAI8a,EAAEngB,EAAE6gC,QAAQd,IAAIe,QAAQf,KAAKp5C,EAAE6e,KAAKikB,GAAG,OAAO/wC,KAAKmnD,iBAAiB/lD,EAAE6M,GAAGA,GAAGgZ,EAAEurB,UAAU2U,iBAAiB,SAAS/lD,EAAE8E,KAAK+gB,EAAEurB,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,KAAKL,EAAEurB,UAAUiV,kBAAkB,SAASrmD,EAAE8E,EAAEohB,KAAKL,EAAEurB,UAAUkV,gBAAgB,SAAStmD,EAAE8E,EAAEohB,KAAKL,EAAEmsB,WAAW,SAAShyC,GAAG,OAAO,OAAOA,EAAEimD,IAAIjmD,GAAGkmB,EAAEsL,QAAQ3kB,EAAE/H,EAAEy+C,QAAQr9B,EAAEsL,SAAS,IAAIy1B,EAAE,CAAC,SAASjnD,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,EAAE7M,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAGL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAyByoC,GAAGx5B,EAAtB7M,EAAE,kBAAyBA,EAAE,cAAcmxC,EAAEtkC,EAAEw5B,GAAGt+B,EAAE,cAAaA,EAAEqpC,UAAU,IAAID,EAAE3f,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAI4nC,EAAE,GAAGiU,EAAE19B,EAAErnB,IAAI,YAAY4nB,EAAE,EAAEA,EAAEzmB,EAAEqL,OAAOob,IAAI5Z,EAAE7M,EAAEymB,GAAG,GAAG4f,EAAErmC,EAAEymB,GAAG3hB,GAAG8+C,GAAG,OAAOvd,IAAIA,EAAE,IAAI,GAAGA,EAAE,IAAI,KAAKA,EAAE,MAAM,OAAOA,GAAG8K,EAAE9K,EAAE,GAAGt+B,EAAEs+B,EAAE,GAAG,OAAO8K,GAAGtC,MAAMsC,GAAGxB,EAAEjkB,KAAK,CAAC7e,EAAEskC,EAAE,CAACA,EAAEppC,MAAgB8d,EAAE,KAAV9d,EAAEopC,EAAEppC,EAAE,GAAU4nC,EAAEjkB,KAAK,CAAC7e,EAAEgZ,EAAE,CAACsrB,EAAEppC,OAAO4nC,EAAEjkB,KAAK,CAAC7e,EAAE,KAAK,CAAC,KAAK,QAAQ,OAAO8iC,GAAG5nC,EAAEqpC,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,GAAGphB,EAAE7J,KAAK8D,IAAI+F,EAAE9E,EAAEqL,QAAQ,IAAIwB,EAAEgZ,EAAE,GAAGwgB,EAAE,EAAE8K,EAAE,EAAE,IAAItkC,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAI,CAACw5B,GAAGrmC,EAAE6M,GAAG,GAAG,GAAGskC,GAAGnxC,EAAE6M,GAAG,GAAG,GAAGA,EAAE/H,GAAG,IAAIuhC,GAAGrmC,EAAE6M,EAAE/H,GAAG,GAAG,GAAGqsC,GAAGnxC,EAAE6M,EAAE/H,GAAG,GAAG,IAAI,IAAIiD,EAAE/H,EAAE6M,GAAG,GAAG42C,EAAEtS,EAAE9K,EAAE8K,EAAE,EAAEtrB,EAAEhZ,GAAG,CAAC9E,EAAE,IAAI07C,GAAG,OAAO59B,GAAGK,EAAEsL,QAAQzpB,EAAEjD,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,gBAAgB,EAAE,YAAY,IAAI01B,EAAE,CAAC,SAASlnD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAyBioB,EAAE,SAAS7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAArBA,EAAE,kBAAwEqmC,EAAE,cAAaA,EAAE+K,UAAU,IAAIvrB,EAAE2L,SAAoBi0B,cAAc,SAASzlD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAE,GAAGgZ,EAAEK,EAAErnB,IAAI,YAAYwnC,EAAE,EAAEA,EAAErmC,EAAEqL,OAAOg7B,IAAI,CAAC,IAAI8K,EAAEnxC,EAAEqmC,GAAG,GAAGt+B,EAAE/H,EAAEqmC,GAAGvhC,GAAG+gB,GAAG9d,GAAG,IAAIA,EAAE,MAAM8E,EAAE6e,KAAK,CAACylB,EAAEppC,IAAI,OAAO8E,GAAGw5B,EAAE+K,UAAUsU,eAAe,SAAS1lD,EAAE8E,EAAEohB,GAA0B,IAAIrZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAE,GAAG,GAAG,IAA7C3+C,EAAE7J,KAAK8D,IAAI+F,EAAE9E,EAAEqL,SAAmC,OAAOrL,EAAE,IAAI6M,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAI,CAAC,IAAIskC,EAAE,EAAEppC,EAAE,EAAE8d,EAAE5qB,KAAKqD,IAAI,EAAEuO,EAAE/H,EAAE,GAAG+gB,EAAEhZ,EAAE,EAAEgZ,IAAI,QAAQwgB,EAAErmC,EAAE6lB,GAAG,KAAKgpB,MAAMxI,KAAKt+B,IAAIopC,GAAGnxC,EAAE6lB,GAAG,IAAI49B,EAAE52C,GAAG9E,EAAE,CAAC/H,EAAE6M,GAAG,GAAGskC,EAAEppC,GAAG,CAAC/H,EAAE6M,GAAG,GAAG,MAAM,OAAO42C,GAAGpd,EAAE+K,UAAUiV,kBAAkB,SAASrmD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAEgZ,EAAE,KAAKwgB,EAAE,KAAK8K,EAAEnxC,EAAEqL,OAAO,EAAEtD,EAAE,EAAEA,GAAGopC,EAAEppC,IAAI,QAAQ8E,EAAE7M,EAAE+H,GAAG,KAAK8mC,MAAMhiC,MAAM,OAAOw5B,GAAGx5B,EAAEw5B,KAAKA,EAAEx5B,IAAI,OAAOgZ,GAAGhZ,EAAEgZ,KAAKA,EAAEhZ,IAAI,MAAM,CAACgZ,EAAEwgB,IAAIngB,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,gBAAgB,IAAI21B,EAAE,CAAC,SAASnnD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2BioB,EAAE,SAAS7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAAiMmxC,EAAE,SAASnxC,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAAjBA,EAAE,cAAoE+H,EAAE,SAAS/H,EAAE8E,EAAEohB,EAAErZ,GAAG,GAAGjO,KAAKwoD,SAASpnD,EAAEpB,KAAKyoD,OAAOx6C,EAAEjO,KAAKkT,QAAQhN,EAAElG,KAAK0oD,eAAephC,EAAEtnB,KAAK4F,OAAOxE,EAAEunD,QAAQ3oD,KAAKiU,MAAM7S,EAAEwnD,QAAQ3hC,EAAE4hC,kBAAkB7oD,KAAKkT,SAAS,KAAK,2BAA2BlT,KAAK8oD,KAAK76C,EAAE86C,cAAc,IAAIthB,EAAEznC,KAAKwoD,SAASQ,YAAYvhB,EAAEwhB,YAAYxhB,EAAEyhB,KAAKlpD,KAAK8oD,KAAK9sD,EAAEgE,KAAK8oD,KAAK9hC,EAAEhnB,KAAK8oD,KAAKhY,EAAE9wC,KAAK8oD,KAAK/X,GAAGtJ,EAAE0hB,QAAO1hB,EAAEznC,KAAKwoD,SAASY,aAAcH,YAAYxhB,EAAEyhB,KAAKlpD,KAAK8oD,KAAK9sD,EAAEgE,KAAK8oD,KAAK9hC,EAAEhnB,KAAK8oD,KAAKhY,EAAE9wC,KAAK8oD,KAAK/X,GAAGtJ,EAAE0hB,QAAQhgD,EAAEqpC,UAAU6W,MAAM,WAAWrpD,KAAK0oD,eAAeY,UAAU,EAAE,EAAEtpD,KAAKiU,MAAMjU,KAAK4F,SAASuD,EAAEqpC,UAAU+W,OAAO,WAAWvpD,KAAKwpD,gBAAgBxpD,KAAKypD,oBAAoBtgD,EAAEugD,sBAAsB,SAAStoD,GAAG,OAAOA,EAAE+H,EAAEwgD,+BAA+B,MAAMxgD,EAAEwgD,+BAA+B,SAASvoD,EAAE8E,GAAG,OAAO,OAAO9E,EAAE8E,GAAGgiD,MAAM/+C,EAAEygD,gBAAgB,SAASxoD,EAAE8E,EAAEohB,EAAErZ,EAAEw5B,EAAE8K,EAAEsS,GAAG,IAAI9T,EAAE3vC,EAAEyoD,QAAQ7E,EAAEjU,EAAE+Y,iBAAiB,WAAW1oD,EAAE2oD,SAAS9iC,EAAE+iC,YAAY/7C,KAAKA,EAAE,MAAM,IAAI4Z,EAAEkpB,EAAE+Y,iBAAiB,oBAAoB1oD,EAAE2oD,SAASpiC,EAAEvmB,EAAEsU,OAAOyO,EAAE/iB,EAAE2oD,QAAQ5E,EAAEl+B,EAAEgjC,eAAetiC,EAAE,EAAEA,EAAElb,OAAOtD,EAAEugD,sBAAsB3Y,EAAE+Y,iBAAiB,yBAAyB3lC,KAAKihC,EAAEn3C,GAAGA,EAAExB,QAAQ,EAAEy4C,EAAE9jD,EAAE8oD,eAAehF,EAAE7pB,OAAO+pB,GAAGF,EAAEiF,aAAajF,EAAEiF,YAAYl8C,GAAG,IAAI2yC,EAAEz3C,EAAEihD,YAAYhpD,EAAE+jD,EAAE79B,EAAEu9B,EAAEpd,EAAE5f,EAAEm9B,EAAE9+C,GAAGiD,EAAEkhD,kBAAkBjpD,EAAEw/C,EAAErO,EAAErsC,EAAE2+C,GAAGO,GAAGF,EAAEiF,aAAajF,EAAEiF,YAAY,IAAIjF,EAAEoF,WAAWnhD,EAAEihD,YAAY,SAAShpD,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,GAAG,IAAI07C,EAAE9T,EAAEiU,EAAE,KAAKn9B,EAAE,KAAKF,EAAE,KAAKxD,EAAE,GAAGghC,GAAE,EAAGC,EAAEhkD,EAAE8oD,eAAe9E,EAAE6D,YAAY7D,EAAEmF,YAAYphD,EAAEi8C,EAAEoF,UAAUljC,EAAE,IAAI,IAAI49B,EAAEh/C,EAAEukD,OAAO7J,EAAE16C,EAAEwkD,KAAK1jC,EAAE9gB,EAAEykD,WAAW3uD,EAAEkK,EAAE0kD,OAAO5uD,EAAE4kD,EAAE5kD,IAAI,CAAC,GAAG+0C,EAAEmU,EAAElpD,GAAGgrB,EAAE,CAAC,KAAKhrB,EAAE4kD,IAAI55B,EAAEk+B,EAAElpD,IAAIA,IAAI,GAAGA,GAAG4kD,EAAE,MAAM7P,EAAEmU,EAAElpD,GAAG,GAAG,OAAO+0C,EAAEqX,SAASrX,EAAEqX,SAASrX,EAAEqX,QAAQ7V,GAAG,OAAOyS,IAAII,EAAEyF,OAAO7F,EAAEn9B,GAAGu9B,EAAE0F,OAAO/Z,EAAEoX,QAAQtgC,IAAIm9B,EAAEn9B,EAAE,SAAS,CAAC,GAAGg9B,GAAE,EAAGpd,GAAG,OAAOud,EAAE,CAAC9+C,EAAE6kD,SAAS/uD,EAAEkK,EAAE8kD,OAAuC,IAAI5jC,EAAE,QAAtCO,EAAEzhB,EAAE+kD,QAAQ/kD,EAAEglD,KAAK9C,QAAQ,OAAqBzgC,GAAGA,EAAEk9B,EAAE,OAAOG,GAAG59B,EAAEqgB,KAAK0d,GAAG,OAAOH,GAAG9+C,EAAE+kD,SAAS7jC,KAAKy9B,GAAE,GAAI,OAAOG,EAAE19B,IAAIirB,IAAI6S,EAAEyF,OAAO7F,EAAEn9B,GAAGu9B,EAAE0F,OAAO/Z,EAAEoX,QAAQtgC,IAAIu9B,EAAE0F,OAAO/Z,EAAEoX,QAAQpX,EAAEqX,UAAUhD,EAAEyF,OAAO9Z,EAAEoX,QAAQpX,EAAEqX,UAAUnhC,GAAG49B,IAAI1gC,EAAE2I,KAAK,CAACikB,EAAEoX,QAAQpX,EAAEqX,QAAQrX,EAAEpkB,MAAMq4B,EAAEjU,EAAEoX,QAAQtgC,EAAEkpB,EAAEqX,QAAQjD,GAAE,EAAG,OAAOC,EAAE+F,SAAShnC,GAAGhb,EAAEkhD,kBAAkB,SAASjpD,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,GAAG,IAAI,IAAIwgB,EAAErmC,EAAE8oD,eAAe3X,EAAE,EAAEA,EAAErsC,EAAEuG,OAAO8lC,IAAI,CAAC,IAAIppC,EAAEjD,EAAEqsC,GAAG9K,EAAEpM,OAAO/T,EAAEgrB,KAAKlxC,EAAEyoD,QAAQzoD,EAAEyoD,QAAQzoD,EAAE2oD,QAAQtiB,EAAEt+B,EAAE,GAAGA,EAAE,GAAG8E,EAAEgZ,EAAE9d,EAAE,IAAIs+B,EAAE6iB,YAAYnhD,EAAEqpC,UAAUgX,cAAc,WAAW,IAAI,IAAIpoD,EAAEpB,KAAKyoD,OAAO/yC,OAAOxP,EAAE9E,EAAEqL,OAAOvG,KAAK,IAAI,IAAIohB,EAAElmB,EAAE8E,GAAG+H,EAAEqZ,EAAE7a,OAAOwB,KAAK,CAAC,IAAIgZ,EAAEK,EAAErZ,GAAGgZ,EAAEkhC,QAAQnoD,KAAK8oD,KAAKhY,EAAE7pB,EAAEjrB,EAAEgE,KAAK8oD,KAAK9sD,EAAEirB,EAAEmhC,QAAQpoD,KAAK8oD,KAAK/X,EAAE9pB,EAAED,EAAEhnB,KAAK8oD,KAAK9hC,IAAI7d,EAAEqpC,UAAUiX,iBAAiB,SAASroD,EAAE8E,GAAG,IAAIohB,EAAErZ,EAAEw5B,EAAEvhC,GAAGlG,KAAK0oD,eAAenW,EAAEvyC,KAAKyoD,OAAO/yC,OAAOvM,EAAEnJ,KAAKyoD,OAAO2C,SAASprD,KAAKqN,OAAOrN,KAAKwoD,SAAS6C,WAAW,IAAIxG,EAAE7kD,KAAKwoD,SAAS5lB,UAAU,WAAWmO,EAAE8T,EAAE59B,EAAE+iC,YAAYjZ,KAAKA,EAAE,CAACA,IAAI,IAAIiU,EAAE,GAAG,IAAI19B,EAAE,EAAEA,EAAEne,EAAEsD,OAAO6a,IAAI,CAACrZ,EAAE9E,EAAEme,GAAG,IAAIO,EAAE7nB,KAAKwoD,SAAS5lB,UAAU,UAAU30B,GAAG4Z,GAAGg9B,IAAIG,EAAE/2C,GAAG4Z,GAAG,IAAIP,EAAE,EAAEA,EAAEypB,EAAEtkC,OAAO6a,IAAI,IAAI,IAAIK,EAAEopB,EAAEzpB,GAAGnD,EAAEmD,GAAGypB,EAAEtkC,OAAO,EAAE04C,EAAE,EAAEA,EAAE5S,EAAE9lC,OAAO04C,IAAI,GAAGl3C,EAAE9E,EAAEg8C,IAAI/jD,GAAG6M,GAAG7M,EAAE,CAAC,IAAIgkD,EAAE7S,EAAE4S,GAAGD,EAAEv9B,EAAE,GAAG1Z,KAAK+2C,EAAE,CAAC,IAAI7gC,EAAE,SAAS+gC,EAAEF,EAAE/2C,GAAG,IAAI2yC,EAAE5gD,KAAKqN,OAAOY,GAAG+Y,EAAEhnB,KAAKwoD,SAAS5lB,UAAU,cAAc30B,GAAGw5B,EAAEpM,OAAOoM,EAAE8iB,YAAY3J,EAAEnZ,EAAE+iB,UAAUxjC,EAAEk+B,EAAE,CAACxvC,OAAO0vC,EAAE2E,QAAQ97C,EAAEi8C,eAAeziB,EAAEgF,MAAMmU,EAAE0K,YAAYtkC,EAAE6iC,QAAQ7pD,KAAKwoD,SAAS+C,KAAKvrD,KAAKwoD,SAASgD,wBAAwBv9C,GAAGw9C,SAASzrD,KAAK8oD,KAAK4C,YAAYvG,EAAEwG,YAAYpZ,EAAE9lC,OAAOm/C,iBAAiBxqD,EAAEyqD,gBAAgBtZ,IAAI9K,EAAE6iB,YAAYnhD,EAAE2iD,UAAU,CAACC,YAAY,SAAS3qD,GAAG+H,EAAE6iD,aAAa5qD,IAAI6qD,YAAY,SAAS7qD,GAAG+H,EAAE+iD,aAAa9qD,IAAI+qD,aAAa,SAAS/qD,GAAG+H,EAAEijD,cAAchrD,KAAK+H,EAAE6iD,aAAa,SAAS5qD,GAAG,IAAI8E,EAAE9E,EAAEyoD,QAAQviC,EAAElmB,EAAE2oD,QAAQ97C,EAAE7M,EAAEkqD,YAAY7jB,EAAEvhC,EAAEmmD,iBAAiB,oBAAoB/kC,GAAGirB,EAAErsC,EAAE08B,UAAU,oBAAoBtb,IAAIL,EAAEqlC,QAAQC,QAAQ1H,EAAE3+C,EAAE08B,UAAU,gBAAgBtb,GAAGypB,EAAE7qC,EAAE4jD,iBAAiB,aAAaxiC,GAAG09B,EAAE9+C,EAAEmmD,iBAAiB,YAAY/kC,GAAGmgB,GAAGx5B,GAAG9E,EAAEygD,gBAAgBxoD,EAAE8E,EAAE08B,UAAU,oBAAoBtb,GAAGrZ,EAAE,EAAEw5B,EAAEod,EAAE9T,EAAEwB,EAAEyS,GAAG77C,EAAEygD,gBAAgBxoD,EAAEA,EAAEqrC,MAAMx+B,EAAE42C,EAAE9T,EAAEwB,EAAEyS,IAAI77C,EAAEijD,cAAc,SAAShrD,GAAG,IAAI8E,EAAE9E,EAAEyoD,QAAQviC,EAAElmB,EAAE2oD,QAAQ,GAAG7jD,EAAE4jD,iBAAiB,cAAc5jD,EAAE4jD,iBAAiB,cAAc,CAAC5jD,EAAE4jD,iBAAiB,YAAYxiC,IAAI/lB,QAAQoN,KAAK,8CAA8C,IAAIV,EAAEw5B,EAAErmC,EAAE8oD,eAAe3X,EAAEnxC,EAAEqrC,MAAMoY,EAAE3+C,EAAEmmD,iBAAiB,YAAY/kC,GAAGypB,EAAE7qC,EAAE4jD,iBAAiB,WAAWxiC,GAAG09B,EAAE5jD,EAAEsU,OAAOmS,EAAEZ,EAAEgjC,eAAejF,EAAE,EAAEA,EAAEv4C,OAAOtD,EAAEugD,sBAAsBxjD,EAAE4jD,iBAAiB,yBAAyBxiC,KAAKK,EAAE0/B,IAAIljC,EAAEkjC,IAAIlC,EAAE,EAAE,GAAG,GAAGC,EAAEn+B,EAAEulC,OAAOja,GAAG2S,EAAE,QAAQE,EAAE3d,EAAE,IAAI2d,EAAED,EAAE,IAAIC,EAAE79B,EAAE,IAAIs9B,EAAE,IAAIpd,EAAEglB,UAAUvH,EAAEzd,EAAEwhB,YAAY,IAAI,IAAIrI,EAAE,SAASx/C,GAAG,OAAO,OAAOA,QAAG,IAASA,GAAG6uC,MAAM7uC,IAAIymB,EAAEojC,SAAS,CAAC,IAAIjkC,EAAEa,EAAEmjC,QAAQja,GAAG6P,EAAE55B,EAAEA,IAAI+pB,IAAId,MAAM9rB,IAAIy8B,EAAEz8B,GAAGwD,EAAE0/B,KAAKp5C,EAAE,CAAC+Y,EAAEsgC,SAAStgC,EAAEogC,OAAOrW,IAAI5sB,EAAE6C,EAAEA,GAAGipB,MAAMhiC,EAAE,MAAMA,EAAE,GAAG+Y,EAAEA,GAAGipB,MAAMhiC,EAAE,MAAMA,EAAE,GAAG+Y,EAAEA,GAAG/Y,EAAE,GAAG7M,EAAEqqD,SAAS1a,EAAE9iC,EAAE,GAAG7M,EAAEqqD,SAASzkC,EAAE/Y,EAAE,GAAG7M,EAAEqqD,SAAS1a,EAAE9iC,EAAE,GAAG7M,EAAEqqD,SAASzkC,EAAEipB,MAAMtoB,KAAKopB,GAAGtJ,EAAEojB,OAAOljC,EAAEw9B,EAAE,IAAI1d,EAAEqjB,OAAO9jC,EAAEmhC,QAAQhD,EAAE,IAAI1d,EAAEqjB,OAAO9jC,EAAEmhC,QAAQhD,EAAE,MAAM1d,EAAEojB,OAAOljC,EAAEw9B,EAAE,IAAI1d,EAAEqjB,OAAO9jC,EAAEmhC,QAAQl6C,EAAE,IAAIw5B,EAAEqjB,OAAO9jC,EAAEmhC,QAAQl6C,EAAE,KAAKw5B,EAAEqjB,OAAOnjC,EAAEw9B,EAAE,IAAI1d,EAAEilB,aAAavH,EAAEl3C,EAAE0Z,EAAEX,EAAEmhC,SAAS1gB,EAAEklB,SAASxjD,EAAEyjD,iBAAiB,SAASxrD,GAAG,IAAI8E,EAAE,GAAGohB,EAAE,KAAKrZ,EAAE,KAAKgZ,EAAE,EAA6fsrB,EAAE,SAASjrB,IAApgB,SAASlmB,GAAG,KAAK8E,EAAEuG,QAAQ,GAAG,CAAC,IAAI,IAAI6a,EAAEphB,EAAEuG,OAAO,EAAE6a,EAAE,EAAEA,IAAgB,GAAG,IAAVrZ,EAAE/H,EAAEohB,IAAW,GAAG,CAAC,IAAIL,EAAE/gB,EAAEohB,EAAE,GAAGL,EAAE,IAAIhZ,EAAE,IAAIgZ,EAAE,IAAIhZ,EAAE,IAAI/H,EAAE2mD,OAAOvlC,EAAE,GAAI,IAAQA,EAAE,EAAEA,EAAEphB,EAAEuG,OAAO,GAAe,IAAPwB,EAAE/H,EAAEohB,IAAQ,IAAI,GAAGphB,EAAEohB,EAAE,GAAG,GAAGphB,EAAE2mD,OAAOvlC,EAAE,GAAGA,IAAI,GAAGphB,EAAEuG,OAAO,IAAIrL,EAAE,CAAC,IAAIqmC,EAAE,EAAE,GAAGvhC,EAAE,GAAG,IAAIuhC,IAAQ,IAAI8K,EAAE,KAAKppC,EAAE,KAAjB,IAAsBme,EAAEmgB,EAAEngB,EAAEphB,EAAEuG,OAAO6a,IAAI,CAAC,IAAIrZ,EAAO,GAAG,IAAVA,EAAE/H,EAAEohB,IAAW,GAAG,GAAG,OAAOirB,GAAG,OAAOppC,EAAEopC,EAAEjrB,EAAEne,EAAEme,MAAM,CAAC,IAAIu9B,EAAE52C,EAAE,GAAG42C,EAAE3+C,EAAEqsC,GAAG,GAAGA,EAAEjrB,EAAEu9B,EAAE3+C,EAAEiD,GAAG,KAAKA,EAAEme,IAAI,IAAIypB,EAAE7qC,EAAEqsC,GAAGyS,EAAE9+C,EAAEiD,GAAGjD,EAAE2mD,OAAOplB,EAAEvhC,EAAEuG,OAAOg7B,GAAG8K,EAAEppC,GAAGjD,EAAE4mB,KAAKikB,GAAG7qC,EAAE4mB,KAAKk4B,IAAIzS,EAAEppC,GAAGjD,EAAE4mB,KAAKk4B,GAAG9+C,EAAE4mB,KAAKikB,IAAI7qC,EAAE4mB,KAAKikB,KAAoBtJ,CAAEngB,GAAG,IAAI,IAAIirB,EAAE,EAAEppC,EAAEjD,EAAEuG,OAAO8lC,EAAEppC,EAAEopC,IAAI,CAAC,IAAIsS,EAAE3+C,EAAEqsC,GAAG,GAAGsS,EAAE,GAAGzjD,EAAE0pD,OAAOjG,EAAE,GAAGA,EAAE,IAAI,GAAGA,EAAE,IAAIzjD,EAAEypD,OAAOhG,EAAE,GAAGA,EAAE,IAAI3+C,EAAEuG,SAASwB,EAAE/H,EAAEA,EAAEuG,OAAO,GAAG,IAAIwa,GAAG/gB,EAAEuG,OAAOvG,EAAE,IAAIiD,EAAE,SAAS/H,EAAE6lB,EAAEwgB,GAAG,IAAIt+B,EAAE9M,KAAKgpB,MAAM4B,GAAM,OAAOK,GAAGne,GAAGme,IAAuBirB,EAAdjrB,EAAErZ,EAAE,GAAI9E,EAAEme,EAAE,GAAUA,EAAEne,GAAEjD,EAAE4mB,KAAK,CAAC1rB,EAAE6lB,EAAEwgB,KAAK,MAAM,CAACojB,OAAO,SAASzpD,EAAE8E,GAAGiD,EAAE,EAAE/H,EAAE8E,IAAI4kD,OAAO,SAAS1pD,EAAE8E,GAAGiD,EAAE,EAAE/H,EAAE8E,IAAIilD,OAAO,WAAW5Y,GAAE,GAAInxC,EAAE+pD,UAAUwB,KAAK,WAAWpa,GAAE,GAAInxC,EAAEurD,QAAQ1D,UAAU,WAAW1W,GAAE,GAAInxC,EAAE6nD,aAAayD,UAAU,WAAWna,GAAE,GAAInxC,EAAEsrD,aAAaI,OAAO,WAAW,OAAO7lC,KAAK9d,EAAE+iD,aAAa,SAAS9qD,GAAG,IAAIA,EAAEwqD,kBAAkB,IAAIxqD,EAAEsqD,YAAY,CAAC,IAAI,IAAIxlD,EAAE9E,EAAEyoD,QAAQviC,EAAEphB,EAAE6mD,YAAY1+B,MAAM,GAAGpgB,EAAEqZ,EAAE7a,OAAOwB,GAAG,EAAEA,IAAI/H,EAAE8mD,aAAa/+C,IAAIqZ,EAAEulC,OAAO5+C,EAAE,GAAG,GAAG,WAAW,IAAI,IAAI7M,EAAE,EAAEA,EAAEkmB,EAAE7a,OAAOrL,IAAI,GAAG8E,EAAE4jD,iBAAiB,YAAYxiC,EAAElmB,IAAI,OAAM,EAAG,OAAM,EAA7F,GAAmG,IAAI,IAAIqmC,EAAEod,EAAE9T,EAAE3vC,EAAEqqD,SAASzG,EAAE5jD,EAAEyqD,gBAAgBhkC,EAAEm9B,EAAEv4C,OAAOkb,EAAEzhB,EAAE4jD,iBAAiB,gBAAgB3lC,EAAEje,EAAE+mD,YAAY9H,EAAE,GAAGC,EAAE,SAAShkD,EAAE8E,EAAEohB,EAAErZ,GAAG,GAAG7M,EAAE0pD,OAAO5kD,EAAEohB,GAAGK,EAAE,IAAI,IAAIV,EAAEhZ,EAAExB,OAAO,EAAEwa,GAAG,EAAEA,IAAI,CAAC,IAAIwgB,EAAEx5B,EAAEgZ,GAAG7lB,EAAE0pD,OAAOrjB,EAAE,GAAGA,EAAE,MAAMyd,EAAEr9B,EAAE,EAAEq9B,GAAG,EAAEA,IAAI,CAAC,IAAItE,EAAEx/C,EAAE8oD,eAAeljC,EAAEM,EAAE49B,GAAG,GAAGh/C,EAAE4jD,iBAAiB,YAAY9iC,GAAG,CAAC,IAAIhrB,EAAEkK,EAAEmmD,iBAAiB,YAAYrlC,GAAGI,EAAElhB,EAAE4jD,iBAAiB,WAAW9iC,GAAGO,EAAEpD,EAAE+gC,GAAGpU,EAAE5qC,EAAEslD,wBAAwBxkC,GAAGkmC,EAAE,EAAEpc,EAAEqc,QAAQrc,EAAEsc,OAAOF,EAAE,EAAEA,EAAE,EAAEA,EAAE,IAAIA,EAAE,GAAGA,EAAEnc,EAAEA,EAAEmc,EAAEnc,EAAE/pB,EAAE,IAAIqmC,EAAEC,EAAEtI,EAAEE,GAAGqI,EAAEtmC,EAAEgjC,eAAeqD,EAAE,EAAEA,EAAE7gD,OAAOtD,EAAEugD,sBAAsBxjD,EAAE4jD,iBAAiB,yBAAyB9iC,KAAKwmC,EAAEnG,IAAIlH,EAAE,EAAE,GAAG,GAAGsN,EAAExmC,EAAEulC,OAAOjlC,GAAGmmC,EAAE,QAAQD,EAAEhmB,EAAE,IAAIgmB,EAAEtI,EAAE,IAAIsI,EAAElmC,EAAE,IAAIvrB,EAAE,IAAI4kD,EAAE6L,UAAUiB,EAAE9M,EAAEqI,YAAY,IAAI0E,EAAEC,GAAE,GAAIN,EAAE7gD,OAAO,EAAEvG,EAAE0iD,QAAQrW,EAAE3f,QAAQi7B,oBAAoBjN,EAAEz3C,EAAEyjD,iBAAiBhM,IAAI,IAAI,IAAIkN,EAAEC,EAAE,GAAGR,EAAEtC,SAAS,GAAG6C,EAAEP,EAAEvC,OAAO/jC,EAAE+mC,KAAKF,EAAE9mC,IAAII,EAAE,CAAC,GAAGO,EAAE,CAAC,IAAIimC,GAAGD,GAAGG,EAAE7F,KAAK,SAAsC,IAAIgG,EAAjCL,GAAE,EAAGD,EAAEG,EAAE7F,KAA0BgG,OAAE,KAAvBxmB,EAAE0d,EAAE2I,EAAE3F,UAA4B+E,EAAErI,EAAEpd,EAAE,GAAGA,EAAE4lB,EAAE,CAACS,EAAE1F,QAAQ6F,GAAG7mC,GAAG,IAAI+4B,EAAE,GAAGgF,EAAE2I,EAAE3F,SAAS,CAAC2F,EAAE1F,QAAQ8E,GAAG/H,EAAE2I,EAAE3F,SAAS,CAAC2F,EAAE1F,QAAQjI,EAAE,IAAIgF,EAAE2I,EAAE3F,SAAS2F,EAAE1F,aAAaiF,EAAEpd,MAAM6d,EAAE1F,UAAUhhC,EAAE,CAAC2pB,EAAE/pB,EAAE+pB,EAAEA,EAAEmc,GAAG,CAACY,EAAE1F,QAAQ8E,GAAGjd,MAAMud,IAAI5M,EAAEiK,OAAOiD,EAAE3F,QAAQkF,EAAE,IAAIzM,EAAEkK,OAAOgD,EAAE3F,QAAQkF,EAAE,MAAMjmC,GAAGw5B,EAAEkK,OAAOgD,EAAE3F,QAAQhI,EAAE,IAAIS,EAAEkK,OAAOgD,EAAE3F,QAAQkF,EAAE,KAAKzM,EAAEkK,OAAOgD,EAAE3F,QAAQkF,EAAE,IAAI1lC,IAAIomC,EAAEjhC,KAAK,CAAC0gC,EAAErN,EAAE,KAAK0E,GAAGpd,EAAEsmB,EAAEjhC,KAAK,CAACghC,EAAE3F,QAAQ1gB,EAAE,KAAKsmB,EAAEjhC,KAAK,CAACghC,EAAE3F,QAAQkF,EAAE,OAAOlN,EAAEkN,EAAEG,EAAEM,EAAE3F,aAAa/C,EAAExE,EAAE4M,EAAErN,EAAE,GAAG4N,GAAGA,EAAE,GAAGP,EAAEnG,IAAI,OAAOyG,EAAEI,WAAWje,MAAM6d,EAAEI,aAAa/I,EAAE2I,EAAE3F,SAASpX,EAAEA,EAAE+c,EAAEI,UAAUnd,EAAE/pB,GAAG69B,EAAEz9B,EAAEimC,GAAGS,IAAI1I,EAAExE,EAAEkN,EAAE3F,QAAQkF,EAAE,GAAGU,GAAGA,EAAE,IAAInN,EAAE+L,WAAWrlC,EAAEsL,QAAQzpB,EAAEjD,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,YAAY,GAAG,kBAAkB,KAAKu7B,GAAG,CAAC,SAAS/sD,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,EAAE7M,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAG,SAAS6lB,EAAE7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAAEnF,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA6BuzC,EAAEtrB,EAAzB7lB,EAAE,sBAA+DyjD,EAAE52C,EAAnC7M,EAAE,gCAA8D4jD,EAAE/2C,EAAxB7M,EAAE,qBAAkDumB,EAAEV,EAAvB7lB,EAAE,oBAA0B+iB,EAAE,CAACiqC,oBAAoB,EAAEC,oBAAoB,KAAKC,+BAA+B,GAAGC,+BAA+B,qBAAqBC,qBAAoB,EAAGC,sBAAqB,EAAGC,WAAU,EAAGC,YAAW,EAAGC,uBAAsB,EAAGC,mBAAmB,EAAEC,eAAe,EAAEC,QAAQ,KAAKzD,YAAY,EAAE0D,kBAAkB,EAAEC,kBAAkB,QAAQC,aAAa,EAAEC,kBAAkB,GAAGC,SAAS,EAAEC,YAAW,EAAGC,kBAAa,EAAOC,UAAU,IAAIC,MAAM,EAAEC,WAAU,EAAGC,WAAU,EAAGC,gBAAe,EAAGC,YAAW,EAAGC,WAAU,EAAGC,UAAU,IAAIC,wBAAuB,EAAGC,cAAa,EAAGC,oBAAoB,MAAMC,uBAAsB,EAAGh8C,OAAO,cAAci8C,UAAS,EAAGC,UAAU,EAAEC,UAAU,KAAKC,gBAAe,EAAGC,YAAY,GAAGC,aAAa,GAAGC,YAAY,GAAGC,cAAc,QAAQC,cAAc,GAAGC,cAAc,GAAGC,eAAe,GAAGC,cAAc,mBAAmBC,iBAAiBlM,EAAEjyB,QAAQo+B,aAAaC,eAAc,EAAGC,mBAAkB,EAAGC,oBAAoB,GAAGC,6BAA6B,UAAUC,mCAAmC,QAAQC,2BAA2B,UAAUC,mCAAmC,OAAOC,iCAAiC,EAAEC,2BAA2B,IAAIC,mCAAmC,QAAQC,iCAAiC,EAAEC,mBAAmB,GAAGC,oBAAoB,KAAKC,QAAQ,CAAC9M,EAAEpyB,QAAQs5B,aAAalH,EAAEpyB,QAAQw5B,cAAcpH,EAAEpyB,QAAQo5B,cAAc+F,QAAQ,GAAGC,KAAK,CAACh2D,EAAE,CAACi2D,eAAe,GAAGpB,eAAe,GAAGqB,mBAAmBvqC,EAAEwqC,uBAAuBC,eAAezqC,EAAE0qC,mBAAmBC,UAAS,EAAGC,UAAS,EAAGC,kBAAiB,EAAGC,OAAOlgB,EAAEmgB,YAAY1rC,EAAE,CAAC6pC,eAAe,GAAGoB,eAAe,GAAGG,eAAezqC,EAAEgrC,qBAAqBT,mBAAmBvqC,EAAEirC,yBAAyBN,UAAS,EAAGC,UAAS,EAAGC,kBAAiB,EAAGC,OAAOlgB,EAAEsgB,cAAczb,GAAG,CAACyZ,eAAe,GAAGoB,eAAe,GAAGG,eAAezqC,EAAEgrC,qBAAqBT,mBAAmBvqC,EAAEirC,yBAAyBL,UAAS,EAAGD,UAAS,EAAGE,kBAAiB,EAAGC,OAAOlgB,EAAEsgB,gBAAgBvrC,EAAEsL,QAAQzO,EAAEje,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,mBAAmB,EAAE,8BAA8B,GAAG,oBAAoB,GAAG,kBAAkB,KAAKkgC,GAAG,CAAC,SAAS1xD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAqBioB,EAAE,SAAS7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAA9C,CAAjBA,EAAE,cAAoEqmC,EAAE,SAASrmC,GAAGpB,KAAKoD,UAAUhC,GAAGqmC,EAAE+K,UAAUugB,KAAK,SAAS3xD,EAAE8E,GAAGlG,KAAKoD,UAAUqiB,UAAU,QAAG,IAASzlB,KAAKgzD,YAAYhzD,KAAKgzD,WAAWjyB,UAAU/gC,KAAKgzD,WAAW,IAAI/rC,EAAE2L,QAAQ5yB,KAAKoD,UAAUhC,EAAE8E,IAAIuhC,EAAE+K,UAAUygB,aAAa,SAAS7xD,GAAG,IAAI8E,GAAE,EAAG9E,EAAEqL,SAASvG,EAAE9E,EAAE,GAAG6zB,KAAKj1B,KAAKgzD,WAAWC,aAAa/sD,IAAIuhC,EAAE+K,UAAU0gB,aAAa,WAAW,IAAI9xD,EAAE,GAAG8E,EAAElG,KAAKgzD,WAAWE,eAAe,GAAGhtD,EAAE,EAAE,OAAO9E,EAAE,IAAI,IAAIkmB,EAAEtnB,KAAKgzD,WAAWG,QAAQz9C,OAAOzH,EAAE,EAAEA,EAAEqZ,EAAE7a,SAASwB,EAAE7M,EAAE0rB,KAAK,CAACmI,IAAI/uB,EAAE6jC,OAAO97B,EAAE,IAAI,OAAO7M,GAAGkmB,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,YAAY,KAAKwgC,GAAG,CAAC,SAAShyD,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2BioB,EAAE,SAAS7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAAgLqmC,EAAE,CAAGA,yBAA2B,SAASrmC,EAAE8E,EAAEohB,GAAGA,EAAE+rC,SAASpsC,EAAEqsC,UAAUlyD,EAAEkmB,GAAGA,EAAEisC,SAAStsC,EAAEusC,UAAUpyD,EAAEkmB,GAAG,IAAIrZ,EAAE5R,KAAKC,IAAIgrB,EAAE+rC,SAAS/rC,EAAEmsC,YAAYlhB,EAAEl2C,KAAKC,IAAIgrB,EAAEisC,SAASjsC,EAAEosC,YAAYzlD,EAAE,GAAGskC,EAAE,QAAG,IAASrsC,EAAEytD,SAAS,GAAGztD,EAAEytD,QAAQlsB,EAAEmsB,oBAAoB1tD,EAAE9E,EAAEkmB,GAAGA,EAAEusC,YAAY5lD,EAAEqZ,EAAEwsC,aAAavhB,GAAG9K,SAAW,SAASrmC,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAEw5B,EAAEngB,EAAEysC,WAAU,EAAG,IAAIxhB,EAAErsC,EAAE8tD,aAAa,GAAG9tD,EAAE+tD,iBAAiB,WAAW,MAAM3sC,EAAE4sC,oBAAoBjtC,EAAEktC,MAAM5hB,EAAE,IAAIjrB,EAAE8sC,UAAUntC,EAAEktC,MAAM5hB,EAAE,IAAItrB,EAAEktC,MAAM5hB,EAAE,MAAMjrB,EAAE4sC,oBAAoB3hB,EAAE,GAAGjrB,EAAE8sC,UAAU7hB,EAAE,GAAGA,EAAE,IAAIjrB,EAAE+sC,eAAe/sC,EAAE8sC,WAAWluD,EAAEouD,SAASxL,KAAKhY,EAAE,GAAG5qC,EAAEmmD,iBAAiB,mBAAmB,CAAC,IAAIljD,EAAEjD,EAAE0iD,OAAO1iD,EAAEmmD,iBAAiB,mBAAmBxH,EAAE3+C,EAAEquD,gBAAgBxjB,EAAE7qC,EAAEsuD,YAAY3P,EAAE,IAAI17C,EAAE67C,EAAE9+C,EAAEsuD,YAAY3P,EAAE,IAAI17C,EAAE0e,EAAE3hB,EAAEuuD,aAAa1jB,GAAGppB,EAAEzhB,EAAEuuD,aAAazP,GAAG19B,EAAEotC,aAAa,CAAC7sC,EAAEF,GAAG,IAAIxD,EAAE,GAAGghC,EAAEj/C,EAAEyiD,QAAQziD,EAAEmmD,iBAAiB,mBAAmB,IAAIp+C,EAAE,EAAEA,EAAE/H,EAAEyuD,MAAMloD,OAAOwB,IAAI,CAAc,IAAIm3C,GAAjB3d,EAAEvhC,EAAEyuD,MAAM1mD,IAAW2mD,aAAa1P,EAAEh/C,EAAE2uD,YAAYzP,EAAE,GAAGn3C,GAAGk3C,EAAEvE,EAAE16C,EAAE2uD,YAAYzP,EAAE,GAAGn3C,GAAGk3C,EAAEn+B,EAAE9gB,EAAE4uD,aAAa5P,EAAEj3C,GAAGjS,EAAEkK,EAAE4uD,aAAalU,EAAE3yC,GAAGkW,EAAElW,GAAG,CAAC+Y,EAAEhrB,GAAGsrB,EAAEytC,cAAc5wC,EAAE,IAAImD,EAAE0tC,SAAQ,EAAG1tC,EAAE0qC,KAAK,GAAG/jD,EAAE,EAAEA,EAAE/H,EAAEyuD,MAAMloD,OAAOwB,IAAI,CAACw5B,EAAEvhC,EAAEyuD,MAAM1mD,GAAG,IAAImZ,EAAE,GAAGG,EAAErhB,EAAE+uD,WAAWhnD,GAAG/H,EAAEgvD,YAAYC,WAAW,WAAWlnD,IAAImZ,EAAEguC,gBAAgBnuC,EAAEktC,MAAM5sC,EAAE,IAAIH,EAAEiuC,eAAepuC,EAAEktC,MAAM5sC,EAAE,IAAIN,EAAEktC,MAAM5sC,EAAE,MAAMH,EAAEguC,gBAAgB7tC,EAAE,GAAGH,EAAEiuC,eAAe9tC,EAAE,GAAGA,EAAE,IAAIH,EAAEkuC,cAAcluC,EAAEiuC,gBAAgBnvD,EAAEouD,SAASxL,KAAK/X,EAAE,GAAGzpB,EAAE0qC,KAAKllC,KAAK1F,GAAGqgB,EAAE1Y,aAAazH,EAAE0tC,SAAQ,KAAMvtB,QAAU,SAASrmC,EAAE8E,EAAEohB,GAAGA,EAAE+rC,SAASpsC,EAAEqsC,UAAUlyD,EAAEkmB,GAAGA,EAAEisC,SAAStsC,EAAEusC,UAAUpyD,EAAEkmB,GAAG,IAAIrZ,EAAEqZ,EAAE4sC,qBAAqB5sC,EAAE+rC,SAAS/rC,EAAEmsC,YAAYnsC,EAAE+sC,eAAe/sC,EAAEotC,eAAezmD,EAAE5R,KAAKqD,IAAIuO,EAAEqZ,EAAEotC,aAAa,KAAK,IAAIjtB,EAAEx5B,EAAEqZ,EAAE8sC,UAAU,GAAG9sC,EAAEotC,cAAcjtB,EAAEngB,EAAEotC,aAAa,KAA4BjtB,GAAvBx5B,GAAGw5B,EAAEngB,EAAEotC,aAAa,IAAOptC,EAAE8sC,WAAWluD,EAAE+tD,iBAAiB,WAAW,KAAK/tD,EAAEqvD,YAAY,CAACl5D,KAAK2qD,IAAI//B,EAAEuuC,UAAUvnD,GAAG5R,KAAK2qD,IAAI//B,EAAEuuC,UAAU/tB,IAAIvhC,EAAEqvD,YAAY,CAACtnD,EAAEw5B,GAAGngB,EAAE0tC,QAAQ,IAAI,IAAIziB,EAAEjrB,EAAEisC,SAASjsC,EAAEosC,WAAWvqD,EAAE,EAAEA,EAAEjD,EAAEyuD,MAAMloD,OAAOtD,IAAI,CAAC,IAAI07C,EAAE3+C,EAAEyuD,MAAMxrD,GAAG4nC,EAAEzpB,EAAE0qC,KAAK7oD,GAAG67C,EAAEzS,EAAExB,EAAEukB,cAAcztC,EAAEP,EAAEytC,cAAcztC,EAAEytC,cAAc5rD,GAAG,KAAKwe,EAAEopB,EAAEqkB,gBAAgBpQ,EAAEn9B,IAAIF,EAAEtrB,KAAK8D,IAAIwnB,EAAEE,EAAE,KAAK,IAAI1D,EAAEwD,EAAEopB,EAAEskB,eAAextC,GAAG1D,EAAE0D,EAAE,KAAe1D,GAAVwD,GAAGxD,EAAE0D,EAAE,IAAOkpB,EAAEskB,gBAAgBnvD,EAAEgvD,YAAYC,WAAW,WAAWhsD,GAAG07C,EAAE91B,WAAW,CAAC1yB,KAAK2qD,IAAI//B,EAAEuuC,UAAUrxC,GAAG9nB,KAAK2qD,IAAI//B,EAAEuuC,UAAU7tC,IAAIk9B,EAAE91B,WAAW,CAAC5K,EAAEwD,GAAGzhB,EAAEuvD,YAAW,KAAKhuB,EAAEiuB,OAAOjuB,EAAEkuB,yBAAyBluB,EAAEmuB,UAAU,SAASx0D,EAAE8E,EAAEohB,GAAGA,EAAEuuC,WAAU,EAAGvuC,EAAEwuC,WAAU,GAAIruB,EAAEsuB,SAAS,SAAS30D,EAAE8E,EAAEohB,GAAGA,EAAEwuC,WAAU,EAAGxuC,EAAE+rC,SAASpsC,EAAEqsC,UAAUlyD,EAAEkmB,GAAGA,EAAEisC,SAAStsC,EAAEusC,UAAUpyD,EAAEkmB,GAAG,IAAIrZ,EAAE5R,KAAKC,IAAIgrB,EAAEmsC,WAAWnsC,EAAE+rC,UAAU5rB,EAAEprC,KAAKC,IAAIgrB,EAAEosC,WAAWpsC,EAAEisC,UAAUjsC,EAAE0uC,cAAc/nD,EAAEw5B,EAAE,EAAExgB,EAAEgvC,SAAShvC,EAAEivC,WAAWhwD,EAAEiwD,cAAc7uC,EAAE0uC,cAAc1uC,EAAEmsC,WAAWnsC,EAAE+rC,SAAS/rC,EAAEosC,WAAWpsC,EAAEisC,SAASjsC,EAAE8uC,kBAAkB9uC,EAAE+uC,SAAS/uC,EAAEgvC,UAAUhvC,EAAE+uC,SAAS/uC,EAAE+rC,SAAS/rC,EAAEgvC,SAAShvC,EAAEisC,SAASjsC,EAAE8uC,kBAAkB9uC,EAAE0uC,eAAevuB,EAAEmsB,oBAAoB,SAASxyD,EAAE8E,EAAEohB,GAAG,IAAI,IAAIrZ,EAAE7M,EAAEm1D,kBAAkB,iBAAiBtvC,EAAE7lB,EAAEm1D,kBAAkB,sBAAsB9uB,EAAE,KAAK8K,GAAG,EAAEppC,EAAEye,OAAO4uC,UAAU3R,EAAE,EAAEA,EAAEzjD,EAAEq1D,WAAWhqD,OAAOo4C,IAAI,CAAC,IAAI9T,EAAE3vC,EAAEq1D,WAAW5R,GAAGG,EAAE3oD,KAAK2qD,IAAIjW,EAAEoX,QAAQ7gC,EAAE+rC,SAAS,GAAGh3D,KAAK2qD,IAAIjW,EAAEqX,QAAQ9gC,EAAEisC,SAAS,IAAItjB,MAAM+U,MAAM,GAAGzS,GAAGyS,EAAE77C,KAAKA,EAAE67C,EAAEzS,EAAEsS,GAAG,IAAIh9B,EAAEzmB,EAAEirD,iBAAiB,uBAAuB,EAAE,GAAGljD,GAAG0e,EAAEA,IAAI4f,EAAErmC,EAAEq1D,WAAWlkB,IAAI9K,EAAE,CAAC,IAAI9f,EAAE,CAAC+uC,YAAW,EAAGC,MAAMlvB,EAAE0gB,QAAQ7gC,EAAE+rC,SAASjL,QAAQ9gC,EAAEisC,UAAU,GAAGnyD,EAAEw1D,eAAe,aAAajvC,GAAG,OAAOV,GAAGA,EAAEqrB,KAAKlxC,EAAE8E,EAAEuhC,GAAO9f,EAAE,CAAC+uC,YAAW,EAAGzO,KAAK7mD,EAAEuyD,OAAOkD,IAAIz1D,EAAEq1D,WAAWtO,QAAQ7gC,EAAE+rC,SAASjL,QAAQ9gC,EAAEisC,UAAUnyD,EAAEw1D,eAAe,QAAQjvC,IAAI1Z,GAAGA,EAAEqkC,KAAKlxC,EAAE8E,EAAE9E,EAAEuyD,OAAOvyD,EAAEq1D,aAAahvB,EAAEqvB,QAAQ,SAAS11D,EAAE8E,EAAEohB,GAAGphB,EAAE6wD,iBAAiBzvC,EAAEuuC,WAAU,EAAGpuB,EAAEkuB,yBAAyBv0D,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAE/H,EAAE8wD,UAAU,GAAG1vC,EAAEusC,aAAa,IAAIvsC,EAAE0uC,eAAe/uC,EAAEivC,WAAW,CAAC,IAAI3jB,EAAEl2C,KAAK8D,IAAImnB,EAAEmsC,WAAWnsC,EAAE+rC,UAAUlqD,EAAE9M,KAAKqD,IAAI4nB,EAAEmsC,WAAWnsC,EAAE+rC,WAAU9gB,EAAEl2C,KAAKqD,IAAI6yC,EAAEtkC,EAAEjS,KAAGmN,EAAE9M,KAAK8D,IAAIgJ,EAAE8E,EAAEjS,EAAEiS,EAAE6iC,KAAQ5qC,EAAE+wD,SAAS1kB,EAAEppC,GAAGme,EAAE4vC,oBAAmB,OAAQ,GAAG5vC,EAAEwsC,cAAc,IAAIxsC,EAAE0uC,eAAe/uC,EAAEgvC,SAAS,CAAC,IAAIpR,EAAExoD,KAAK8D,IAAImnB,EAAEosC,WAAWpsC,EAAEisC,UAAUxiB,EAAE10C,KAAKqD,IAAI4nB,EAAEosC,WAAWpsC,EAAEisC,WAAU1O,EAAExoD,KAAKqD,IAAImlD,EAAE52C,EAAE+Y,KAAG+pB,EAAE10C,KAAK8D,IAAI4wC,EAAE9iC,EAAE+Y,EAAE/Y,EAAE8iC,KAAQ7qC,EAAEixD,SAAStS,EAAE9T,GAAGzpB,EAAE4vC,oBAAmB,EAAG5vC,EAAEmsC,WAAW,KAAKnsC,EAAEosC,WAAW,MAAMjsB,EAAE2vB,WAAW,SAASh2D,EAAE8E,EAAEohB,GAAGlmB,EAAE+D,iBAAiB/D,EAAEkE,QAAQmH,OAAO,IAAI6a,EAAE+vC,wBAAwB,MAAM,IAAI,IAAIppD,EAAE,GAAGgZ,EAAE,EAAEA,EAAE7lB,EAAEkE,QAAQmH,OAAOwa,IAAI,CAAC,IAAIwgB,EAAErmC,EAAEkE,QAAQ2hB,GAAGhZ,EAAE6e,KAAK,CAACwqC,MAAM7vB,EAAE6vB,MAAMC,MAAM9vB,EAAE8vB,MAAMC,MAAMtxD,EAAEuuD,aAAahtB,EAAE6vB,OAAOG,MAAMvxD,EAAE4uD,aAAartB,EAAE8vB,SAAS,GAAGjwC,EAAEowC,eAAezpD,EAAE,GAAGA,EAAExB,OAAO6a,EAAEqwC,mBAAmB1pD,EAAE,GAAGqZ,EAAEswC,gBAAgB,CAAC57D,GAAE,EAAGgrB,GAAE,QAAS,GAAG/Y,EAAExB,QAAQ,EAAE,CAAC6a,EAAEqwC,mBAAmB,CAACL,MAAM,IAAIrpD,EAAE,GAAGqpD,MAAMrpD,EAAE,GAAGqpD,OAAOC,MAAM,IAAItpD,EAAE,GAAGspD,MAAMtpD,EAAE,GAAGspD,OAAOC,MAAM,IAAIvpD,EAAE,GAAGupD,MAAMvpD,EAAE,GAAGupD,OAAOC,MAAM,IAAIxpD,EAAE,GAAGwpD,MAAMxpD,EAAE,GAAGwpD,QAAQ,IAAIllB,EAAE,IAAIl2C,KAAKs3C,GAAGt3C,KAAK25C,MAAM1uB,EAAEqwC,mBAAmBJ,MAAMtpD,EAAE,GAAGspD,MAAMtpD,EAAE,GAAGqpD,MAAMhwC,EAAEqwC,mBAAmBL,QAAO/kB,EAAEl2C,KAAKC,IAAIi2C,IAAK,KAAKA,EAAE,GAAGA,GAAGjrB,EAAEswC,gBAAgB,CAAC57D,EAAEu2C,EAAE,KAAKvrB,EAAEurB,EAAE,MAAMjrB,EAAEuwC,aAAa,CAAC77D,EAAEkK,EAAE8tD,aAAahtC,EAAE9gB,EAAE+uD,eAAextB,EAAEqwB,UAAU,SAAS12D,EAAE8E,EAAEohB,GAAGA,EAAE+vC,wBAAwB,KAAK,IAAIppD,EAAEgZ,EAAE,GAAG,IAAIhZ,EAAE,EAAEA,EAAE7M,EAAEkE,QAAQmH,OAAOwB,IAAI,CAAC,IAAIw5B,EAAErmC,EAAEkE,QAAQ2I,GAAGgZ,EAAE6F,KAAK,CAACwqC,MAAM7vB,EAAE6vB,MAAMC,MAAM9vB,EAAE8vB,QAAQ,IAAIhlB,EAAuV5qB,EAAExD,EAAvVhb,EAAEme,EAAEowC,eAAe7S,EAAEv9B,EAAEqwC,mBAA8G5mB,EAAE,CAACumB,OAA9F/kB,EAAE,GAAGtrB,EAAExa,OAAOwa,EAAE,GAAG,CAACqwC,MAAM,IAAIrwC,EAAE,GAAGqwC,MAAMrwC,EAAE,GAAGqwC,OAAOC,MAAM,IAAItwC,EAAE,GAAGswC,MAAMtwC,EAAE,GAAGswC,SAAuBD,MAAMzS,EAAEyS,MAAMC,MAAMhlB,EAAEglB,MAAM1S,EAAE0S,OAAOvS,EAAE19B,EAAEuwC,aAAa77D,EAAE,GAAGsrB,EAAEuwC,aAAa77D,EAAE,GAAG6rB,EAAEP,EAAEuwC,aAAa7wC,EAAE,GAAGM,EAAEuwC,aAAa7wC,EAAE,GAAmF,GAAhF+pB,EAAEymB,MAAMzmB,EAAEumB,MAAMpxD,EAAEouD,SAASxL,KAAKhY,EAAEkU,EAAEjU,EAAE0mB,MAAM1mB,EAAEwmB,MAAMrxD,EAAEouD,SAASxL,KAAK/X,EAAElpB,EAAa,GAAGZ,EAAExa,OAAOkb,EAAE,EAAExD,EAAE,OAAO,GAAG8C,EAAExa,QAAQ,EAAE,CAAC,IAAI04C,EAAEh8C,EAAE,GAAGmuD,MAAMzS,EAAEyS,MAAM3vC,GAAGV,EAAE,GAAGqwC,MAAM/kB,EAAE+kB,OAAOnS,EAAE,IAAIC,EAAEj8C,EAAE,GAAGouD,MAAM1S,EAAE0S,MAAMpzC,GAAG8C,EAAE,GAAGswC,MAAMhlB,EAAEglB,OAAOnS,EAAEz9B,EAAEtrB,KAAK8D,IAAI,EAAE9D,KAAKqD,IAAI,KAAKioB,IAAIxD,EAAE9nB,KAAK8D,IAAI,EAAE9D,KAAKqD,IAAI,KAAKykB,IAAI,IAAI+gC,GAAE,EAAG,GAAG59B,EAAEswC,gBAAgB57D,IAAIkK,EAAEqvD,YAAY,CAAC1Q,EAAE2S,MAAMzmB,EAAEymB,OAAOlwC,EAAEuwC,aAAa77D,EAAE,GAAG6oD,EAAE2S,OAAO7vC,EAAEk9B,EAAE2S,MAAMzmB,EAAEymB,OAAOlwC,EAAEuwC,aAAa77D,EAAE,GAAG6oD,EAAE2S,OAAO7vC,GAAGu9B,GAAE,GAAI59B,EAAEswC,gBAAgB5wC,EAAE,IAAI/Y,EAAE,EAAEA,EAAE,EAAEA,IAAI,CAAC,IAAI2yC,EAAE16C,EAAEyuD,MAAM1mD,GAAK/H,EAAEgvD,YAAYC,WAAW,WAAWlnD,KAAO2yC,EAAE7xB,WAAW,CAAC81B,EAAE4S,MAAM1mB,EAAE0mB,OAAOnwC,EAAEuwC,aAAa7wC,EAAE,GAAG69B,EAAE4S,OAAOtzC,EAAE0gC,EAAE4S,MAAM1mB,EAAE0mB,OAAOnwC,EAAEuwC,aAAa7wC,EAAE,GAAG69B,EAAE4S,OAAOtzC,GAAG+gC,GAAE,GAAI,GAAGh/C,EAAEuvD,YAAW,GAAIvQ,GAAGj+B,EAAExa,OAAO,GAAGvG,EAAEqwD,kBAAkB,gBAAgB,CAAC,IAAIv6D,EAAEkK,EAAE8tD,aAAa9tD,EAAEqwD,kBAAkB,gBAAgBjkB,KAAKpsC,EAAElK,EAAE,GAAGA,EAAE,GAAGkK,EAAE6xD,iBAAiBtwB,EAAEuwB,SAAS,SAAS52D,EAAE8E,EAAEohB,GAAG,GAAG,IAAIlmB,EAAEkE,QAAQmH,OAAOg7B,EAAE2vB,WAAWh2D,EAAE8E,EAAEohB,QAAQ,GAAG,GAAGlmB,EAAE62D,eAAexrD,OAAO,CAAC,IAAIwB,GAAG,IAAI0W,MAAMuzC,UAAUjxC,EAAE7lB,EAAE62D,eAAe,GAAG3wC,EAAE+vC,yBAAyBppD,EAAEqZ,EAAE+vC,wBAAwB,KAAK/vC,EAAE6wC,YAAY97D,KAAKC,IAAIgrB,EAAE6wC,WAAWlxC,EAAEmxC,SAAS,IAAI9wC,EAAE+wC,YAAYh8D,KAAKC,IAAIgrB,EAAE+wC,WAAWpxC,EAAEqxC,SAAS,GAAGpyD,EAAEqyD,aAAajxC,EAAE+vC,wBAAwBppD,EAAEqZ,EAAE6wC,WAAWlxC,EAAEmxC,QAAQ9wC,EAAE+wC,WAAWpxC,EAAEqxC,WAAW,IAAI/lB,EAAE,SAASnxC,EAAE8E,EAAEohB,GAAG,OAAOlmB,EAAE8E,EAAEA,EAAE9E,EAAEA,EAAEkmB,EAAElmB,EAAEkmB,EAAE,GAAGne,EAAE,SAAS/H,EAAE8E,GAAG,IAAIohB,EAAEL,EAAEuxC,QAAQtyD,EAAEuyD,SAASxqD,EAAE,CAACmC,KAAKkX,EAAEtrB,EAAEsU,MAAMgX,EAAEtrB,EAAEkK,EAAEuyD,QAAQC,YAAYtrC,IAAI9F,EAAEN,EAAEke,OAAO5d,EAAEN,EAAE9gB,EAAEuyD,QAAQE,cAAclxB,EAAE,CAACzrC,EAAEirB,EAAEqwC,MAAMl2D,GAAG4lB,EAAEC,EAAEswC,MAAMn2D,IAAI+H,EAAEopC,EAAE9K,EAAEzrC,EAAEiS,EAAEmC,KAAKnC,EAAEqC,OAAOu0C,EAAEtS,EAAE9K,EAAEzgB,EAAE/Y,EAAEmf,IAAInf,EAAEi3B,QAAQ,OAAO7oC,KAAKqD,IAAIyJ,EAAE07C,IAAIpd,EAAEupB,aAAa,CAAC4H,UAAU,SAASx3D,EAAE8E,EAAEohB,GAAG,IAAIlmB,EAAEy3D,QAAQ,GAAGz3D,EAAEy3D,OAAO,CAACvxC,EAAEwxC,oBAAoB13D,EAAE8E,EAAEohB,GAAGlmB,EAAE23D,QAAQ33D,EAAE43D,SAASvxB,EAAEwxB,SAAS73D,EAAE8E,EAAEohB,GAAGmgB,EAAEmuB,UAAUx0D,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAE,SAAS7M,GAAMkmB,EAAEuuC,UAAW1sD,EAAE/H,EAAE8E,GAAG,IAAIuhC,EAAEsuB,SAAS30D,EAAE8E,EAAEohB,GAAG,OAAOA,EAAE+rC,WAAW/rC,EAAE+rC,SAAS,KAAK/rC,EAAEisC,SAAS,KAAKrtD,EAAE6wD,kBAAuBzvC,EAAEysC,WAAWtsB,EAAEyxB,QAAQ93D,EAAE8E,EAAEohB,IAAyNphB,EAAEizD,iBAAiB/yD,SAAS,YAAY6H,GAAG/H,EAAEizD,iBAAiB/yD,SAAS,WAA1R,SAAShF,EAAEmxC,GAAGjrB,EAAEuuC,UAAU,OAAOvuC,EAAE+rC,SAAS5rB,EAAEqvB,QAAQvkB,EAAErsC,EAAEohB,GAAGmgB,EAAEkuB,yBAAyBpjB,EAAErsC,EAAEohB,GAAGA,EAAEysC,WAAWtsB,EAAEiuB,OAAOnjB,EAAErsC,EAAEohB,GAAGL,EAAEmyC,YAAYhzD,SAAS,YAAY6H,GAAGgZ,EAAEmyC,YAAYhzD,SAAS,UAAUhF,GAAGkmB,EAAEyZ,eAAiGs4B,0BAAyB,EAAGC,WAAW,SAASl4D,EAAE8E,EAAEohB,GAAGmgB,EAAE2vB,WAAWh2D,EAAE8E,EAAEohB,IAAIiyC,UAAU,SAASn4D,EAAE8E,EAAEohB,GAAGmgB,EAAEqwB,UAAU12D,EAAE8E,EAAEohB,IAAIkyC,SAAS,SAASp4D,EAAE8E,EAAEohB,GAAGmgB,EAAEuwB,SAAS52D,EAAE8E,EAAEohB,IAAImyC,SAAS,SAASr4D,EAAE8E,EAAEohB,GAAG,GAAGA,EAAE4vC,mBAA+B5vC,EAAE4vC,oBAAmB,MAAzD,CAA6D,IAAIjpD,EAAE,CAACk6C,QAAQ7gC,EAAE+rC,SAASjL,QAAQ9gC,EAAEisC,SAASmD,YAAW,GAAIxwD,EAAE0wD,eAAe,WAAW3oD,IAAI7M,EAAE23D,QAAQ33D,EAAE43D,UAAU9yD,EAAEqyD,eAAc9wB,EAAEiyB,qBAAqB,CAACd,UAAU,SAASx3D,EAAE8E,EAAEohB,GAAGA,EAAEwxC,oBAAoB13D,EAAE8E,EAAEohB,IAAIqyC,QAAQlyB,EAAEkuB,0BAA0BluB,EAAEmyB,0BAA0B,CAAChB,UAAU,SAASx3D,EAAE8E,EAAEohB,GAAGA,EAAEwxC,oBAAoB13D,EAAE8E,EAAEohB,GAAGmgB,EAAEwxB,SAAS73D,EAAE8E,EAAEohB,IAAIuyC,UAAU,SAASz4D,EAAE8E,EAAEohB,GAAGA,EAAEysC,WAAWtsB,EAAEyxB,QAAQ93D,EAAE8E,EAAEohB,IAAIqyC,QAAQ,SAASv4D,EAAE8E,EAAEohB,GAAGA,EAAEysC,WAAWtsB,EAAEiuB,OAAOt0D,EAAE8E,EAAEohB,KAAKA,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,kBAAkB,KAAKknC,GAAG,CAAC,SAAS14D,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2BioB,EAAE,SAAS7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAAgLqmC,EAAE,SAASrmC,GAAGpB,KAAKwoD,SAASpnD,EAAEpB,KAAK0V,OAAO,GAAG1V,KAAKorD,SAAS,GAAGprD,KAAK+5D,YAAY,GAAG/5D,KAAKg6D,OAAO,KAAKh6D,KAAKi6D,QAAQ,KAAKj6D,KAAKk6D,QAAQ,MAAMzyB,EAAE+K,UAAU2nB,WAAW,SAAS/4D,EAAE8E,GAAGlG,KAAK0V,OAAOoX,KAAK5mB,GAAGlG,KAAKorD,SAASt+B,KAAK1rB,IAAIqmC,EAAE+K,UAAUuW,YAAY,WAAW,OAAO/oD,KAAKo6D,OAAO3yB,EAAE+K,UAAU6nB,gBAAgB,WAAW,IAAIj5D,EAAE,CAACpF,EAAE,EAAEgrB,EAAE,GAAG5lB,EAAE0vC,EAAE9wC,KAAKwoD,SAASI,OAAOxnD,EAAEpF,EAAEgE,KAAKwoD,SAAS5lB,UAAU,YAAYxhC,EAAE2vC,EAAE/wC,KAAKwoD,SAASG,QAAQ,IAAIziD,EAAE,CAACo0D,UAAUt6D,KAAKwoD,SAAS+R,SAASC,iBAAiB,SAASt0D,GAAG,IAAIohB,EAAE,CAACtrB,EAAEoF,EAAEpF,EAAEgrB,EAAE5lB,EAAE4lB,EAAE8pB,EAAE5qC,EAAE6qC,EAAE3vC,EAAE2vC,GAAG,OAAO3vC,EAAEpF,GAAGkK,EAAE9E,EAAE0vC,GAAG5qC,EAAEohB,GAAGmzC,kBAAkB,SAASv0D,GAAG,IAAIohB,EAAE,CAACtrB,EAAEoF,EAAEpF,EAAEoF,EAAE0vC,EAAE5qC,EAAE8gB,EAAE5lB,EAAE4lB,EAAE8pB,EAAE5qC,EAAE6qC,EAAE3vC,EAAE2vC,GAAG,OAAO3vC,EAAE0vC,GAAG5qC,EAAEohB,GAAGozC,gBAAgB,SAASx0D,GAAG,IAAIohB,EAAE,CAACtrB,EAAEoF,EAAEpF,EAAEgrB,EAAE5lB,EAAE4lB,EAAE8pB,EAAE1vC,EAAE0vC,EAAEC,EAAE7qC,GAAG,OAAO9E,EAAE4lB,GAAG9gB,EAAE9E,EAAE2vC,GAAG7qC,EAAEohB,GAAGqzC,mBAAmB,SAASz0D,GAAG,IAAIohB,EAAE,CAACtrB,EAAEoF,EAAEpF,EAAEgrB,EAAE5lB,EAAE4lB,EAAE5lB,EAAE2vC,EAAE7qC,EAAE4qC,EAAE1vC,EAAE0vC,EAAEC,EAAE7qC,GAAG,OAAO9E,EAAE2vC,GAAG7qC,EAAEohB,GAAGszC,UAAU,WAAW,MAAM,CAAC5+D,EAAEoF,EAAEpF,EAAEgrB,EAAE5lB,EAAE4lB,EAAE8pB,EAAE1vC,EAAE0vC,EAAEC,EAAE3vC,EAAE2vC,KAAK/wC,KAAKwoD,SAASoO,eAAe,SAAS1wD,GAAGlG,KAAKo6D,MAAMh5D,GAAGqmC,EAAE+K,UAAUqoB,eAAe,SAASz5D,GAAGpB,KAAK+5D,YAAY,GAAG,IAAI,IAAI7zD,EAAElG,KAAKwoD,SAAS5lB,UAAU,iBAAiB,SAASxhC,GAAG,OAAOA,GAAGkmB,EAAE,EAAEA,EAAElmB,EAAEqL,OAAO6a,IAAI,CAAC,IAAIrZ,EAAE,GAAG,IAAI7M,EAAEkmB,GAAG2gC,WAAM,IAAS7mD,EAAEkmB,GAAGtrB,EAAE,YAAYuF,QAAQ8kB,MAAM,yCAAyC,GAAGjlB,EAAEkmB,GAAGiJ,QAAQnvB,EAAEkmB,GAAGuF,eAAe,WAAWzrB,EAAEkmB,GAAGuF,eAAe,WAAW,YAAYtrB,QAAQ8kB,MAAM,mEAAmEY,EAAE4gB,OAAO55B,EAAE7M,EAAEkmB,IAAIrZ,EAAEg6C,OAAOh6C,EAAEg6C,KAAK/hD,EAAE+H,EAAEjS,IAAIgE,KAAK+5D,YAAYjtC,KAAK7e,KAAKw5B,EAAE+K,UAAUsoB,UAAU,SAAS15D,GAAGpB,KAAKi6D,QAAQ74D,GAAGqmC,EAAE+K,UAAUuoB,SAAS,SAAS35D,GAAGpB,KAAKg6D,OAAO54D,GAAGqmC,EAAE+K,UAAUwoB,SAAS,WAAWh7D,KAAKi7D,OAAO,GAAGj7D,KAAKk7D,kBAAkBl7D,KAAKm7D,sBAAsBn7D,KAAKo7D,qBAAqBp7D,KAAKq7D,wBAAwB5zB,EAAE+K,UAAU0oB,gBAAgB,WAAW,IAAI95D,EAAEpB,KAAKwoD,SAASwL,aAAah0D,KAAKi7D,OAAOK,OAAOl6D,EAAE,GAAGpB,KAAKi7D,OAAOM,OAAOn6D,EAAE,GAAG,IAAI8E,EAAE9E,EAAE,GAAGA,EAAE,GAAGpB,KAAKi7D,OAAO/5B,MAAM,IAAIh7B,EAAE,EAAEA,EAAE,EAAElG,KAAKwoD,SAASyL,iBAAiB,WAAW,OAAOj0D,KAAKi7D,OAAOO,UAAUv0C,EAAEktC,MAAMn0D,KAAKi7D,OAAOM,QAAQt0C,EAAEktC,MAAMn0D,KAAKi7D,OAAOK,QAAQt7D,KAAKi7D,OAAOQ,UAAU,IAAIz7D,KAAKi7D,OAAOO,UAAU,EAAEx7D,KAAKi7D,OAAOO,UAAU,GAAG,IAAI,IAAIl0C,EAAE,EAAEA,EAAEtnB,KAAKg6D,OAAOvtD,OAAO6a,IAAI,CAAC,IAAIrZ,EAAEjO,KAAKg6D,OAAO1yC,GAAGrZ,EAAEk/C,QAAQl/C,EAAEytD,mBAAmB,GAAGztD,EAAE0tD,QAAQ1tD,EAAEytD,mBAAmB,GAAGztD,EAAE2tD,OAAO3tD,EAAE0tD,QAAQ1tD,EAAEk/C,QAAQl/C,EAAEm/C,OAAO,IAAIn/C,EAAE2tD,OAAO,EAAE3tD,EAAE2tD,OAAO,EAAE57D,KAAKwoD,SAAS5lB,UAAU,cAAc30B,EAAE4tD,UAAU50C,EAAEktC,MAAMlmD,EAAE0tD,SAAS10C,EAAEktC,MAAMlmD,EAAEk/C,SAASl/C,EAAE6tD,UAAU,IAAI7tD,EAAE4tD,UAAU,EAAE5tD,EAAE4tD,UAAU,EAAExoB,SAASplC,EAAE4tD,aAAa5rB,MAAMhiC,EAAE4tD,YAAYt6D,QAAQ8kB,MAAM,QAAQiB,EAAE,gBAAgBrZ,EAAEk3C,EAAE,+CAA+Cl3C,EAAEk/C,QAAQ,MAAMl/C,EAAE0tD,QAAQ,QAAQl0B,EAAEs0B,aAAa,SAAS36D,EAAE8E,EAAEohB,GAAG,OAAOA,GAAGL,EAAEktC,MAAM/yD,GAAG6lB,EAAEktC,MAAMjuD,EAAEo1D,SAASp1D,EAAEu1D,WAAWr6D,EAAE8E,EAAEo1D,QAAQp1D,EAAEg7B,OAAOuG,EAAEkgB,aAAa,SAASvmD,EAAE8E,EAAEohB,GAAG,GAAGA,EAAE,CAAC,IAAIrZ,EAAE,GAAGgZ,EAAEktC,MAAMjuD,GAAG+gB,EAAEktC,MAAM/yD,EAAE+rD,UAAU/rD,EAAE06D,UAAU,OAAOzoB,SAASplC,GAAGA,EAAEo5C,IAAI,OAAO,GAAGnhD,EAAE9E,EAAE+rD,SAAS/rD,EAAEgsD,QAAQ3lB,EAAE+K,UAAU2oB,oBAAoB,WAAW,IAAI,IAAI/5D,EAAEpB,KAAKwoD,SAAS5lB,UAAU,gBAAgB18B,EAAElG,KAAKwoD,SAASyL,iBAAiB,WAAW,KAAK3sC,EAAE,EAAEA,EAAEtnB,KAAK0V,OAAOjJ,OAAO6a,IAAI,CAAC,IAAI,IAAIrZ,EAAEjO,KAAK0V,OAAO4R,GAAGL,EAAEjnB,KAAKorD,SAAS9jC,GAAGirB,EAAEvyC,KAAKwoD,SAAS5lB,UAAU,yBAAyB3b,GAAG9d,EAAEnJ,KAAKwoD,SAASgD,wBAAwBvkC,GAAG49B,EAAE7kD,KAAKwoD,SAAS0M,YAAY8G,aAAa,WAAW/0C,GAAG8pB,EAAE,EAAEA,EAAE9iC,EAAExB,OAAOskC,IAAI,CAAC,IAAIiU,EAAE/2C,EAAE8iC,GAAGiU,EAAEhpD,EAAEyrC,EAAEs0B,aAAa/W,EAAEiD,KAAKjoD,KAAKi7D,OAAO/0D,GAAG,IAAI2hB,EAAEm9B,EAAEkD,KAAK9mD,IAAI4jD,EAAEkJ,UAAUzmB,EAAEkgB,aAAax+C,EAAE67C,EAAEiX,aAAapX,GAClv+B,OAAOh9B,GAAGooB,MAAMpoB,KAAKA,EAAEm9B,EAAEiX,eAAe,OAAOp0C,IAAIA,EAAEw/B,IAAI9U,IAAIyS,EAAEkD,KAAKb,MAAMrC,EAAEh+B,EAAEygB,EAAEkgB,aAAax+C,EAAE0e,EAAEg9B,GAAG7kD,KAAKwoD,SAAS0T,aAAaxU,gBAAgBz5C,EAAE9E,EAAE07C,KAAKpd,EAAE+K,UAAU4oB,mBAAmB,WAAW,IAAIh6D,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE,IAAIznC,KAAKm8D,OAAO,GAAG/6D,EAAE,EAAEA,EAAEpB,KAAKi6D,QAAQxtD,OAAOrL,IAAsBkmB,GAAlBphB,EAAElG,KAAKi6D,QAAQ74D,IAAOqpC,MAAyBxjB,GAAnBwgB,IAAI,YAAYvhC,IAAOA,EAAEg/C,EAAEh/C,EAAEk2D,SAASnuD,EAAEjO,KAAKwoD,SAAS6T,gBAAgBp1C,KAAK,GAAGhZ,EAAE,GAAGjO,KAAKm8D,OAAOrvC,KAAK,CAACwvC,IAAIruD,EAAEw8B,MAAMnjB,EAAEi1C,SAAS90B,IAAI,IAAIznC,KAAKw8D,OAAO,GAAGp7D,EAAE,EAAEA,EAAEpB,KAAKg6D,OAAOvtD,OAAOrL,IAAI,IAAI,IAAImxC,EAAEvyC,KAAKg6D,OAAO54D,GAAG+H,EAAE,EAAEA,EAAEopC,EAAEvR,MAAMv0B,OAAOtD,IAAiBme,GAAbphB,EAAEqsC,EAAEvR,MAAM73B,IAAOshC,MAAyBxjB,GAAnBwgB,IAAI,YAAYvhC,IAAOA,EAAEg/C,EAAEh/C,EAAEk2D,SAASnuD,EAAEjO,KAAKwoD,SAASiU,gBAAgBx1C,EAAE7lB,IAAI,GAAG6M,GAAG,GAAGjO,KAAKw8D,OAAO1vC,KAAK,CAACy+B,KAAKnqD,EAAEk7D,IAAIruD,EAAEw8B,MAAMnjB,EAAEi1C,SAAS90B,KAAKA,EAAE+K,UAAU6oB,qBAAqB,WAAW,IAAIj6D,EAAE8E,EAAE,GAAG,IAAI9E,EAAE,EAAEA,EAAEpB,KAAK+5D,YAAYttD,OAAOrL,IAAI,CAAC,IAAIkmB,EAAEtnB,KAAK+5D,YAAY34D,GAAG8E,EAAEohB,EAAE2gC,KAAK,IAAI3gC,EAAEo1C,QAAQp1C,EAAE,GAAGtnB,KAAK28D,iBAAiB,GAAG38D,KAAK+5D,aAAa/5D,KAAK+5D,YAAYttD,OAAO,IAAI,IAAIwB,EAAE,EAAEA,EAAEjO,KAAK0V,OAAOjJ,OAAOwB,IAAI,CAAC,IAAIgZ,EAAEjnB,KAAK0V,OAAOzH,GAAG,IAAI7M,EAAE,EAAEA,EAAE6lB,EAAExa,OAAOrL,IAAI,CAAC,IAAIqmC,EAAExgB,EAAE7lB,GAAGmxC,EAAE9K,EAAEwgB,KAAK,IAAIxgB,EAAEvmB,KAAKqxB,KAAKrsC,IAAIuhC,EAAEm1B,WAAW12D,EAAEqsC,GAAGvyC,KAAK28D,iBAAiB7vC,KAAK2a,OAAOA,EAAE+K,UAAUqqB,kBAAkB,kBAAkB78D,KAAK0V,cAAc1V,KAAKorD,gBAAgBprD,KAAK88D,wBAAwB98D,KAAK+8D,iBAAiB/8D,KAAK0V,OAAO,GAAG1V,KAAKorD,SAAS,GAAGprD,KAAK88D,iBAAiB,GAAG98D,KAAK+8D,iBAAiB,IAAIz1C,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,kBAAkB,KAAKoqC,GAAG,CAAC,SAAS57D,EAAE8E,EAAEohB,IAAG,SAAUlmB,GAAG,aAAaL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAA+BsoB,EAAEsL,QAAtB,KAAgC1sB,EAAEy+C,QAAQr9B,EAAEsL,UAAU0f,KAAKtyC,KAAKoB,EAAE,cAAc,CAAC67D,SAAS,IAAIC,GAAG,CAAC,SAAS97D,EAAE8E,EAAEohB,IAAG,SAAUrZ,GAAG,aAAa,SAASgZ,EAAE7lB,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAGL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2BuzC,EAAE,SAASnxC,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAA+MyjD,EAAE59B,EAA/B7lB,EAAE,4BAAqE4jD,GAAG/9B,EAApC7lB,EAAE,gCAAuC,SAASA,GAAGpB,KAAKwoD,SAASpnD,EAAEpB,KAAKg6D,OAAO,GAAGh6D,KAAKm9D,OAAO,GAAGn9D,KAAKo9D,QAAQ,GAAGp9D,KAAKq9D,QAAQr9D,KAAKwoD,SAAS8U,OAAOt9D,KAAKu9D,MAAMv9D,KAAKwoD,SAASgV,aAAa,GAAGx9D,KAAKy9D,QAAQ,GAAGz9D,KAAK09D,iBAAiB19D,KAAKC,IAAI,wBAAwB,GAAGD,KAAK29D,kBAAqB3Y,EAAE4Y,sBAAsB,CAAC52C,EAAE,EAAE8gC,EAAE,EAAE+V,GAAG,EAAEC,GAAG,EAAE1mB,GAAG,EAAE2mB,GAAG,GAAG/Y,EAAEgZ,aAAa,SAAS58D,GAAG,GAAG,iBAAiBA,EAAE,CAAC,GAAG4jD,EAAE4Y,sBAAsB/wC,eAAezrB,GAAG,OAAO4jD,EAAE4Y,sBAAsBx8D,GAAG,KAAK,kBAAkBA,EAAE,GAAG,iBAAiBA,EAAE,CAAC,GAAG,IAAIA,GAAG,IAAIA,EAAE,OAAOA,EAAE,KAAK,uDAAuD,GAAGA,EAAE,KAAK,kBAAkBA,EAAE,OAAO,GAAG4jD,EAAExS,UAAUmrB,cAAc,WAAW,IAAIv8D,EAAEpB,KAAKC,IAAI,UAAU,GAAGmB,EAAE,CAACpB,KAAKy9D,QAAQr8D,EAAEitB,MAAM,GAAGruB,KAAKg6D,OAAO,CAAC,CAAC0C,OAAO,GAAG94D,QAAQ,KAAK5D,KAAKm9D,OAAO,CAACv5D,QAAQ,IAAI5D,KAAKo9D,QAAQ,GAAG,IAAI,IAAIl3D,EAAElG,KAAKu9D,MAAMb,QAAQ,GAAGp1C,EAAE,EAAEA,EAAEtnB,KAAKy9D,QAAQhxD,OAAO6a,IAAI,CAAC,IAAIrZ,EAAEjO,KAAKy9D,QAAQn2C,GAAGL,EAAE/gB,EAAE+H,IAAI,GAAGw5B,EAAEud,EAAEgZ,aAAa/2C,EAAEskC,MAAMvrD,KAAKo9D,QAAQnvD,GAAG,CAAC0e,IAAIrF,EAAE22C,MAAMx2B,EAAE7jC,QAAQqjB,GAAGjnB,KAAKg6D,OAAOvyB,GAAGznC,KAAKg6D,OAAOvyB,GAAGi1B,OAAO5vC,KAAK7e,GAAGjO,KAAKg6D,OAAOvyB,GAAG,CAACi1B,OAAO,CAACzuD,GAAGrK,QAAQ,IAAI,IAAIuF,EAAEnJ,KAAKu9D,MAAMvL,MAAM,GAAGzf,EAAE1K,OAAO7nC,KAAKg6D,OAAO,GAAGp2D,QAAQuF,EAAE6d,GAAG,IAAIhnB,KAAKg6D,OAAOvtD,OAAO,GAAG8lC,EAAE1K,OAAO7nC,KAAKg6D,OAAO,GAAGp2D,QAAQuF,EAAEiuC,IAAI,IAAI7E,EAAE1K,OAAO7nC,KAAKm9D,OAAOv5D,QAAQuF,EAAEnN,GAAG,MAAMgpD,EAAExS,UAAUvyC,IAAI,SAASmB,GAAG,IAAI8E,EAAElG,KAAKk+D,eAAe98D,GAAG,OAAO,OAAO8E,EAAEA,EAAElG,KAAKm+D,kBAAkB/8D,IAAI4jD,EAAExS,UAAU0rB,eAAe,SAAS98D,GAAG,OAAOpB,KAAKu9D,MAAM1wC,eAAezrB,GAAGpB,KAAKu9D,MAAMn8D,GAAG,MAAM4jD,EAAExS,UAAU2rB,kBAAkB,SAAS/8D,GAAG,OAAOpB,KAAKq9D,QAAQxwC,eAAezrB,GAAGpB,KAAKq9D,QAAQj8D,GAAGyjD,EAAEjyB,QAAQ/F,eAAezrB,GAAGyjD,EAAEjyB,QAAQxxB,GAAG,MAAM4jD,EAAExS,UAAU2iB,WAAW,SAAS/zD,EAAE8E,GAAG,IAAIohB,EAAErZ,EAAE,GAAG,iBAAiB/H,EAAM+H,EAAE,KAANqZ,EAAEphB,GAAU,IAAI,SAAS,CAAC,GAAG,MAAMA,IAAIA,EAAE,KAAK,KAAKA,EAAEohB,EAAE,OAAO,GAAG,MAAMphB,EAAEohB,EAAE,MAAM,CAAC,GAAG,KAAKphB,EAAE,KAAK,gBAAgBA,EAAEohB,GAAG,EAAErZ,EAAE/H,EAAE,IAAI+gB,GAAG,GAAGK,EAAEtnB,KAAKm9D,OAAOn9D,KAAKg6D,OAAO1yC,GAAG,GAAGL,EAAE,CAAC,IAAIwgB,EAAExgB,EAAErjB,QAAQ,GAAG6jC,EAAE5a,eAAezrB,GAAG,OAAOqmC,EAAErmC,GAAG,GAAG,MAAM8E,GAAG,aAAa9E,EAAE,CAAC,IAAImxC,EAAEvyC,KAAKk+D,eAAe98D,GAAG,GAAG,OAAOmxC,EAAE,OAAOA,EAAE,IAAIppC,EAAE07C,EAAEjyB,QAAQo/B,KAAK/jD,GAAG,OAAO9E,EAAE0jB,eAAezrB,GAAG+H,EAAE/H,GAAGpB,KAAKm+D,kBAAkB/8D,IAAI4jD,EAAExS,UAAUwpB,aAAa,SAAS56D,EAAE8E,GAAG,GAAGA,IAAIlG,KAAKwoD,SAAS4V,sBAAsBp+D,KAAK09D,iBAAiB7wC,eAAezrB,GAAG,OAAOpB,KAAK09D,iBAAiBt8D,GAAG,IAAIpB,KAAKo9D,QAAQvwC,eAAe3mB,GAAG,KAAK,mBAAmBA,EAAE,IAAIohB,EAAEtnB,KAAKo9D,QAAQl3D,GAAG+H,EAAEqZ,EAAE1jB,QAAQ,OAAOqK,EAAE4e,eAAezrB,GAAG6M,EAAE7M,GAAGpB,KAAKm1D,WAAW/zD,EAAEkmB,EAAE22C,QAAQjZ,EAAExS,UAAU6rB,QAAQ,WAAW,OAAOr+D,KAAKg6D,OAAOvtD,QAAQu4C,EAAExS,UAAU8rB,cAAc,SAASl9D,GAAG,OAAOpB,KAAKo9D,QAAQh8D,GAAG68D,OAAOjZ,EAAExS,UAAU+rB,YAAY,SAASn9D,GAAG,OAAOpB,KAAKg6D,OAAO54D,GAAGwC,SAASohD,EAAExS,UAAUgsB,cAAc,SAASp9D,GAAG,OAAOpB,KAAKg6D,OAAO54D,GAAGs7D,QAAQ1X,EAAExS,UAAUisB,YAAY,WAAW,OAAOz+D,KAAKy9D,SAAqBn2C,EAAEsL,QAAQoyB,EAAE9+C,EAAEy+C,QAAQr9B,EAAEsL,UAAU0f,KAAKtyC,KAAKoB,EAAE,cAAc,CAAC,0BAA0B,GAAG,8BAA8B,GAAG,kBAAkB,GAAG67D,SAAS,IAAIyB,GAAG,CAAC,SAASt9D,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2BioB,EAAE,SAAS7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAAqQkmB,EAAEq3C,mBAArF,SAASv9D,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,GAAG,OAAO8K,EAAEnxC,EAAE8E,EAAEohB,GAAE,SAASlmB,GAAG,MAAM,aAAaA,GAAG6M,EAAE7M,KAAI6lB,EAAEwgB,IAA2B,IAAI8K,EAAE,SAASnxC,EAAE8E,EAAEohB,EAAErZ,EAAEw5B,EAAE8K,GAAG,IAAIppC,EAAE07C,EAAE9T,EAAEiU,EAAEr9B,EAAE1Z,EAAE,kBAAkBkW,EAAE,GAAG,GAAGouB,EAAE,IAAIppC,EAAE,EAAEA,EAAEopC,EAAE9lC,OAAOtD,IAAIgb,EAAE2I,KAAK,CAACo4B,EAAE3S,EAAEppC,SAAS,CAAC,GAAG8E,EAAE,YAAY,CAAC+2C,EAAE3oD,KAAKG,MAAM8qB,EAAEK,GAAG,IAAIw9B,EAAEl+B,EAAE23C,aAAax9D,EAAEymB,EAAE,GAAGu9B,EAAEn+B,EAAE23C,aAAa14D,EAAE2hB,GAAG,IAAI,GAAGs9B,IAAIA,EAAE,IAAI,GAAGC,IAAIA,EAAEv9B,EAAEpb,OAAO,GAAG,IAAIy4C,EAAE,KAAK,GAAGE,EAAED,GAAGH,EAAE,EAAE,CAAC,IAAI,IAAIpE,EAAEwE,EAAExE,GAAGuE,EAAEvE,IAAI,CAAC,IAAI55B,EAAEa,EAAE+4B,GAAG5kD,EAAEK,KAAKmF,IAAIwlB,EAAE5lB,GAAG/E,KAAKmF,IAAI0E,EAAE9E,GAAGkmB,EAAEF,EAAE,CAAC89B,EAAEl+B,GAAG,OAAOk+B,EAAEA,EAAE,CAAC2Z,UAAU73C,EAAE83C,YAAY9iE,GAAGK,KAAKC,IAAIN,EAAEkpD,EAAE4Z,cAAcn3C,EAAEu9B,EAAE,CAAC2Z,UAAU73C,EAAE83C,YAAY9iE,GAAGorB,EAAEqjB,MAAM,GAAGtmB,EAAE2I,KAAK1F,GAAGjD,EAAE46C,WAAW,GAAG,IAAI56C,EAAE1X,OAAO,CAAC,IAAI8a,EAAEupB,EAAI7iC,EAAE,eAAiBsZ,EAAE,CAAC,EAAE,EAAE,EAAE,EAAE,GAAG,GAAG,GAAG,IAAI,KAAKupB,EAAE,KAAKvpB,EAAE,CAAC,EAAE,EAAE,EAAE,GAAG,GAAG,GAAG,KAAKupB,EAAE,IAAI,IAAIuc,EAAEC,EAAEC,EAAEC,EAAEnxD,KAAK2iE,KAAK13C,EAAEK,GAAGw4B,EAAE9jD,KAAKC,IAAI4J,EAAE9E,GAAGosD,EAAEC,EAAEpxD,KAAKG,MAAMH,KAAKmF,IAAI2+C,GAAG9jD,KAAKmF,IAAIsvC,IAAI4c,EAAErxD,KAAK2qD,IAAIlW,EAAE2c,GAAG,IAAI5I,EAAE,EAAEA,EAAEt9B,EAAE9a,SAAS4gD,EAAEK,EAAEnmC,EAAEs9B,GAAGyI,EAAEjxD,KAAKG,MAAM4E,EAAEisD,GAAGA,EAAEE,EAAElxD,KAAK2iE,KAAK94D,EAAEmnD,GAAGA,IAAsB/lC,GAApB09B,EAAE3oD,KAAKC,IAAIixD,EAAED,GAAGD,GAAQ1lC,IAAIk9B,KAAK,IAAIyI,EAAEC,IAAIF,IAAI,GAAGlkD,EAAE,EAAEA,GAAG67C,EAAE77C,IAAI4nC,EAAEuc,EAAEnkD,EAAEkkD,EAAElpC,EAAE2I,KAAK,CAACo4B,EAAEnU,KAAK,IAAI4c,EAAE1/C,EAAE,sBAAsB,IAAI9E,EAAE,EAAEA,EAAEgb,EAAE1X,OAAOtD,SAAI,IAASgb,EAAEhb,GAAGshC,QAAQtmB,EAAEhb,GAAGshC,MAAMkjB,EAAErb,KAAK7K,EAAEtjB,EAAEhb,GAAG+7C,EAAE,EAAEj3C,EAAEw5B,IAAI,OAAOtjB,GAAGmD,EAAEurC,aAAatgB,EAA4EjrB,EAAEorC,WAAtE,SAAStxD,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,GAAG,IAAI8K,EAAE5qB,EAAEvmB,EAAE8E,EAAEohB,EAAErZ,GAAG,OAAOskC,GAAG,EAAE4S,EAAE/jD,EAAE8E,EAAEqsC,EAAEtkC,EAAEgZ,GAAG,IAAmB,IAAI49B,EAAE,CAACoa,cAAc,EAAEC,kBAAkB,EAAEC,mBAAmB,EAAEC,kBAAkB,EAAEC,oBAAoB,EAAEC,sBAAsB,EAAEC,2BAA2B,EAAEC,SAAS,EAAEC,aAAa,EAAEC,cAAc,EAAEC,aAAa,GAAGC,gBAAgB,GAAGC,SAAS,GAAGC,aAAa,GAAGC,cAAc,GAAGC,aAAa,GAAGC,gBAAgB,GAAGC,OAAO,GAAGC,WAAW,GAAGC,WAAW,GAAGC,MAAM,GAAGC,UAAU,GAAGC,OAAO,GAAGC,QAAQ,GAAGC,UAAU,GAAGC,SAAS,GAAGC,OAAO,GAAGC,QAAQ,GAAGC,WAAW,GAAGC,kBAAkB,IAAIx5C,EAAEy5C,YAAYlc,EAAE,IAAI9T,EAAe,EAAfA,EAA6B,EAA7BA,EAA2C,EAA3CA,EAA0D,EAA1DA,EAAyE,EAAzEA,EAAwF,EAAxFA,EAAuG,EAAvGA,EAAwH,EAAGiU,EAAE,GAAGA,EAAEH,EAAEoa,eAAe,CAAC+B,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,GAAGjc,EAAEH,EAAEqa,mBAAmB,CAAC8B,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,GAAGjc,EAAEH,EAAEsa,oBAAoB,CAAC6B,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,GAAGjc,EAAEH,EAAEua,mBAAmB,CAAC4B,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,IAAIjc,EAAEH,EAAEwa,qBAAqB,CAAC2B,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,IAAIjc,EAAEH,EAAEya,uBAAuB,CAAC0B,UAAUjwB,EAAe9P,KAAK,IAAIggC,QAAQ,KAAKjc,EAAEH,EAAE0a,4BAA4B,CAACyB,UAAUjwB,EAAe9P,KAAK,IAAIggC,QAAQ,KAAKjc,EAAEH,EAAE2a,UAAU,CAACwB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,KAAKjc,EAAEH,EAAE4a,cAAc,CAACuB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,KAAKjc,EAAEH,EAAE6a,eAAe,CAACsB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,KAAKjc,EAAEH,EAAE8a,cAAc,CAACqB,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,KAAKjc,EAAEH,EAAE+a,iBAAiB,CAACoB,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,KAAKjc,EAAEH,EAAEgb,UAAU,CAACmB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,KAAKjc,EAAEH,EAAEib,cAAc,CAACkB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,MAAMjc,EAAEH,EAAEkb,eAAe,CAACiB,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,KAAKjc,EAAEH,EAAEmb,cAAc,CAACgB,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,KAAKjc,EAAEH,EAAEob,iBAAiB,CAACe,UAAUjwB,EAAe9P,KAAK,GAAGggC,QAAQ,MAAMjc,EAAEH,EAAEqb,QAAQ,CAACc,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,MAAMjc,EAAEH,EAAEsb,YAAY,CAACa,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,MAAMjc,EAAEH,EAAEub,YAAY,CAACY,UAAUjwB,EAAe9P,KAAK,EAAEggC,QAAQ,OAAOjc,EAAEH,EAAEwb,OAAO,CAACW,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,OAAOjc,EAAEH,EAAEyb,WAAW,CAACU,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,QAAQjc,EAAEH,EAAE0b,QAAQ,CAACS,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,QAAQjc,EAAEH,EAAE2b,SAAS,CAACQ,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,YAAYjc,EAAEH,EAAE4b,WAAW,CAACO,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,MAAM,UAAUjc,EAAEH,EAAE6b,UAAU,CAACM,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,MAAM,UAAUjc,EAAEH,EAAE8b,QAAQ,CAACK,UAAUjwB,EAAc9P,KAAK,EAAEggC,QAAQ,MAAM,UAAUjc,EAAEH,EAAE+b,SAAS,CAACI,UAAUjwB,EAAc9P,KAAK,GAAGggC,QAAQ,cAAcjc,EAAEH,EAAEgc,YAAY,CAACG,UAAUjwB,EAAc9P,KAAK,IAAIggC,QAAQ,cAAc,IAAIp5C,EAAE,WAAW,IAAI,IAAIzmB,EAAE,GAAG8E,GAAG,GAAGA,GAAG,GAAGA,IAAI,IAAI,IAAIohB,EAAEjrB,KAAK2qD,IAAI,GAAG9gD,GAAG+H,EAAE,EAAEA,GAAG,EAAEA,IAAI,CAAC,IAAIgZ,EAAEK,EAAErZ,EAAE7M,EAAE0rB,KAAK7F,GAAG,OAAO7lB,EAA1G,GAA+GumB,EAAE,SAASvmB,EAAE8E,EAAEohB,EAAErZ,GAAG,IAAI,IAAIgZ,EAAEhZ,EAAE,kBAAkBw5B,EAAE,EAAEA,EAAEod,EAAEic,kBAAkBr5B,IAAK,GAAGngB,EAAEnD,EAAE/iB,EAAE8E,EAAEuhC,IAAIxgB,EAAE,OAAOwgB,EAAE,OAAO,GAAGtjB,EAAE,SAAS/iB,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAE+2C,EAAE19B,GAAG25C,QAAQ,OAAO5kE,KAAKgpB,MAAM,GAAGnf,EAAE9E,GAAG6M,IAAIk3C,EAAE,SAAS/jD,EAAE8E,EAAEohB,EAAErZ,EAAEw5B,GAAG,IAAI8K,EAAEtkC,EAAE,sBAAuC4Z,EAAf5Z,EAAE,aAAiBgZ,EAAEi6C,iBAAiBj6C,EAAEk6C,mBAAmBx5C,EAAEq9B,EAAE19B,GAAG05C,UAAU78C,EAAE6gC,EAAE19B,GAAG2Z,KAAKkkB,EAAEH,EAAE19B,GAAG25C,QAAQ7b,EAAE,IAAIzgC,KAAKvjB,GAAG8jD,EAAE,GAAGA,EAAEnU,GAAelpB,EAAE2Y,YAAY4kB,GAAGF,EAAEnU,GAAelpB,EAAE4Y,SAAS2kB,GAAGF,EAAEnU,GAAelpB,EAAE6Y,QAAQ0kB,GAAGF,EAAEnU,GAAgBlpB,EAAE8Y,SAASykB,GAAGF,EAAEnU,GAAgBlpB,EAAE+Y,WAAWwkB,GAAGF,EAAEnU,GAAgBlpB,EAAEgZ,WAAWukB,GAAGF,EAAEnU,GAAgBlpB,EAAEu5C,gBAAgBhc,GAAG,IAAIxE,EAAEsE,EAAEv9B,GAAGxD,EAAEmD,GAAGu9B,EAAE0b,SAAS3f,EAAE/4B,EAAEw5C,OAAOjc,IAAIF,EAAEv9B,IAAIi5B,EAAE,IAAI,IAAI55B,EAAEW,EAAE,EAAEX,EAAE+pB,EAAiB/pB,IAAIk+B,EAAEl+B,GAAGA,IAAI+pB,EAAc,EAAE,EAAE,IAAI/0C,EAAE,GAAGorB,EAAES,EAAEy5C,SAAS7b,MAAM,KAAKP,GAAG39B,EAAEH,EAAE8wC,UAAU,GAAG5wC,GAAGu9B,EAAEqb,OAAO,IAAI34C,EAAEnmB,IAAImmB,GAAG49B,EAAE/9B,EAAE,IAAIzC,KAAK4C,IAAIA,GAAGrhB,GAAGlK,EAAE8wB,KAAK,CAACo4B,EAAE39B,EAAEkjB,MAAM8H,EAAED,KAAK7K,EAAErgB,EAAEE,EAAErZ,EAAEw5B,KAAKlgB,GAAG49B,EAAE/9B,EAAE,IAAIzC,KAAK4C,QAAQ,IAAIA,EAAEnmB,IAAI8jD,EAAEv9B,IAAIxD,EAA6BoD,GAA3BH,EAAES,EAAEy5C,SAAS7b,MAAM,KAAKP,IAAOgT,WAAW3wC,GAAGrhB,IAAIohB,GAAGu9B,EAAEwb,OAAOx4C,EAAE8Y,SAASvZ,GAAGjD,GAAG,IAAInoB,EAAE8wB,KAAK,CAACo4B,EAAE39B,EAAEkjB,MAAM8H,EAAED,KAAK7K,EAAErgB,EAAEE,EAAErZ,EAAEw5B,KAAKyd,EAAEv9B,IAAIxD,EAA6BoD,GAA3BH,EAAES,EAAEy5C,SAAS7b,MAAM,KAAKP,IAAOgT,UAAU,OAAOl8D,GAAGsrB,EAAEi6C,YAAYpc,GAAG,CAAC,kBAAkB,KAAKqc,GAAG,CAAC,SAASpgE,EAAE8E,EAAEohB,GAAG,aAAywB,SAASne,EAAE/H,GAAG,OAAOA,EAAEk2D,OAAOl2D,EAAEk2D,MAAM,EAAE,EAAEl2D,EAAEk2D,MAAM,SAASzS,EAAEzjD,GAAG,OAAOA,EAAEm2D,OAAOn2D,EAAEm2D,MAAM,EAAE,EAAEn2D,EAAEm2D,MAA6O,SAASpzC,EAAE/iB,EAAE8E,GAAG,IAAIohB,EAAEjrB,KAAK8D,IAAI9D,KAAKqD,IAAI,EAAEwG,GAAG,GAAG,IAAI,OAAO7J,KAAKC,IAAI8E,GAAG,MAAM,IAAIA,EAAEA,EAAEqgE,cAAcn6C,EAAE,GAAGlmB,EAAEsgE,YAAYp6C,GAAG,SAAS69B,EAAE/jD,GAAG,OAAOA,EAAE,GAAG,IAAIA,EAAE,GAAGA,EAAE,SAASgkD,EAAEhkD,EAAE8E,EAAEohB,EAAErZ,GAAG,IAAIgZ,EAAEk+B,EAAE/jD,GAAG,IAAI+jD,EAAEj/C,GAAG,GAAGohB,IAAIL,GAAG,IAAIk+B,EAAE79B,GAAGrZ,GAAG,CAAC,IAAIw5B,EAAE,GAAGx5B,EAAEgZ,GAAG,KAAK,MAAMwgB,GAAGve,UAAUue,EAAEh7B,QAAQ,OAAOwa,EAAE,SAASi+B,EAAE9jD,EAAE8E,GAAG,IAAIohB,EAAEphB,EAAEg2C,EAAGh5C,EAAE+K,EAAE,IAAI0W,KAAKvjB,GAAG6lB,EAAEK,EAAEkZ,YAAYvyB,GAAGw5B,EAAEngB,EAAEmZ,SAASxyB,GAAGskC,EAAEjrB,EAAEoZ,QAAQzyB,GAAG9E,EAAEme,EAAEqZ,SAAS1yB,GAAG42C,EAAEv9B,EAAEsZ,WAAW3yB,GAAG8iC,EAAEzpB,EAAEuZ,WAAW5yB,GAAG+2C,EAAE19B,EAAE85C,gBAAgBnzD,GAAiD2yC,EAAzC35B,EAA6C,IAAzCk+B,EAAE1d,EAAE,GAA2C,IAAtC0d,EAAE5S,GAA0C,OAArC,KAAKppC,EAAE,GAAG07C,EAAE9T,EAAE,KAAKiU,IAA6BpE,GAAG,IAAIwE,EAAEj8C,EAAE07C,EAAE9T,EAAEiU,IAAIpE,EAAE,SAASA,EAAEx/C,EAAE8E,GAAG,IAAIohB,EAAEjrB,KAAK2qD,IAAI,GAAG9gD,GAAG,OAAO7J,KAAKgpB,MAAMjkB,EAAEkmB,GAAGA,EAA8zB,SAASF,EAAEhmB,GAAG,OAAO,IAAIujB,KAAKvjB,GAAG82D,UAA+d,SAAShL,EAAE9rD,GAAG,IAAI8E,SAAS9E,EAAE,OAAO,UAAU8E,GAAG,YAAYA,GAAG,mBAAmB9E,EAAEy9B,OAAO,OAAOz9B,GAAG,iBAAiBA,EAAEqL,QAAQ,IAAIrL,EAAEmyC,SAAsd,SAAS4M,EAAE/+C,EAAE8E,EAAEohB,EAAErZ,GAAG/H,EAAEA,GAAG,EAAEohB,EAAEA,GAAGlmB,EAAEqL,OAAOzM,KAAKirD,SAAQ,EAAGjrD,KAAKkrD,KAAK,KAAKlrD,KAAK4qD,OAAO1kD,EAAElG,KAAKyqD,OAAOrpD,EAAEpB,KAAK2qD,WAAW18C,EAAEjO,KAAK0qD,KAAKruD,KAAK8D,IAAIiB,EAAEqL,OAAOvG,EAAEohB,GAAGtnB,KAAK+qD,SAAS7kD,EAAE,EAAElG,KAAKgrD,OAAo9B,SAAS+C,EAAE3sD,EAAE8E,GAAG,OAAOA,EAAE,EAAE,EAAE7J,KAAK2qD,IAAI5lD,GAAG8E,GAAG7J,KAAK2qD,IAAI5lD,EAAE8E,GAAG,SAAS+nD,EAAE7sD,GAAG,IAAI8E,EAAEy7D,EAAGzpC,KAAK92B,GAAG,IAAI8E,EAAE,OAAO,KAAK,IAAIohB,EAAE/C,SAASre,EAAE,GAAG,IAAI+H,EAAEsW,SAASre,EAAE,GAAG,IAAI+gB,EAAE1C,SAASre,EAAE,GAAG,IAAI,OAAOA,EAAE,GAAG,CAACuhC,EAAEngB,EAAE69B,EAAEl3C,EAAEsZ,EAAEN,EAAEK,EAAE8rB,WAAWltC,EAAE,KAAK,CAACuhC,EAAEngB,EAAE69B,EAAEl3C,EAAEsZ,EAAEN,GAAmnB,SAAS6gC,EAAE1mD,EAAE8E,GAAG,IAAIohB,EAAEphB,EAAE,WAAW,GAAG,OAAOohB,EAAE,OAAOnD,EAAE/iB,EAAEkmB,GAAG,IAAIrZ,EAAEgZ,EAAE/gB,EAAE,sBAAsBuhC,EAAEvhC,EAAE,kBAAkBqsC,EAAErsC,EAAE,aAAaiD,EAAEjD,EAAE,cAAc,GAAG+H,EAAE,IAAI7M,IAAI/E,KAAKC,IAAI8E,IAAI/E,KAAK2qD,IAAI,GAAGvf,IAAIprC,KAAKC,IAAI8E,GAAG/E,KAAK2qD,IAAI,IAAI//B,IAAI7lB,EAAEqgE,cAAcx6C,GAAG,GAAG25B,EAAEx/C,EAAE6lB,GAAGsrB,GAAGppC,EAAE,CAAC,IAAI07C,EAAE9T,EAAE,GAAGiU,EAAE,GAAGzS,IAAIsS,EAAE,IAAI9T,EAAE6wB,GAAIz4D,IAAIopC,GAAGhxC,QAAQoN,KAAK,oDAAoDk2C,EAAE,KAAK9T,EAAE8wB,EAAG7c,EAAE8c,GAAI,IAAI,IAAIj6C,EAAExrB,KAAKC,IAAI8E,GAAGumB,EAAEomC,EAAElJ,EAAE9T,EAAEtkC,QAAQ04C,EAAEpU,EAAEtkC,OAAO,EAAE04C,GAAG,EAAEA,IAAIx9B,GAAGk9B,EAAE,GAAGh9B,GAAGF,EAAE,CAAC1Z,EAAE2yC,EAAEx/C,EAAEumB,EAAEV,GAAG8pB,EAAEoU,GAAG,MAAM,GAAGh8C,EAAE,CAAC,IAAIi8C,EAAE7T,OAAOnwC,EAAEqgE,iBAAiB10D,MAAM,MAAM,IAAIq4C,EAAE34C,QAAQ24C,EAAE,IAAI,GAAGA,EAAE,IAAI,KAAKn3C,EAAEm3C,EAAE,GAAG,EAAE,EAAExE,EAAEwE,EAAE,GAAG2I,EAAE,GAAG3I,EAAE,GAAG,GAAGn+B,GAAGW,OAAOw9B,EAAE,IAAIroD,QAAQ,GAAGkR,GAAG+2C,EAAE3oD,KAAKG,MAAM4oD,EAAE,GAAG,GAAG,KAAK,OAAOn3C,EAA0jBlN,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAKsoB,EAAE8xC,YAA3wN,SAAWh4D,EAAE8E,EAAEohB,GAAGlmB,EAAEmF,oBAAoBL,EAAEohB,GAAE,IAA6uNA,EAAEy6C,YAA3uN,SAAW3gE,GAAG,OAAOA,EAAEA,GAAG6B,OAAOiC,OAAQuB,iBAAiBrF,EAAEqF,kBAAkBrF,EAAE+D,gBAAgB/D,EAAE+D,iBAAiB/D,EAAE4gE,cAAa,EAAG5gE,EAAE6gE,QAAO,EAAG7gE,EAAE8gE,aAAY,GAAG,GAAulN56C,EAAE66C,SAAtlN,SAAW/gE,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAEgZ,EAAEwgB,EAAE,GAAG,IAAIvhC,EAAE+H,EAAEqZ,EAAEL,EAAEK,EAAEmgB,EAAEngB,MAAM,CAAC,IAAIirB,EAAEl2C,KAAKG,MAAM,EAAE4E,GAAG+H,EAAE,EAAE/H,EAAEmxC,EAAEsS,EAAEv9B,GAAG,EAAEphB,GAAG6qC,EAAEzpB,GAAG,EAAEphB,EAAEiD,GAAG67C,EAAE19B,GAAG,EAAEphB,GAAG,EAAEiD,IAAI,OAAOopC,GAAG,KAAK,EAAEtkC,EAAE8iC,EAAE9pB,EAAEK,EAAEmgB,EAAEod,EAAE,MAAM,KAAK,EAAE52C,EAAE42C,EAAE59B,EAAEK,EAAEmgB,EAAEud,EAAE,MAAM,KAAK,EAAE/2C,EAAE42C,EAAE59B,EAAE8pB,EAAEtJ,EAAEngB,EAAE,MAAM,KAAK,EAAErZ,EAAE+2C,EAAE/9B,EAAE49B,EAAEpd,EAAEngB,EAAE,MAAM,KAAK,EAAErZ,EAAEqZ,EAAEL,EAAE49B,EAAEpd,EAAEsJ,EAAE,MAAM,KAAK,EAAE,KAAK,EAAE9iC,EAAEqZ,EAAEL,EAAE+9B,EAAEvd,EAAEod,GAAG,MAA4E,QAArE52C,EAAE5R,KAAKG,MAAM,IAAIyR,EAAE,KAA2D,KAAvDgZ,EAAE5qB,KAAKG,MAAM,IAAIyqB,EAAE,KAA0C,KAAtCwgB,EAAEprC,KAAKG,MAAM,IAAIirC,EAAE,KAAyB,KAAouMngB,EAAEkxC,QAAluM,SAAWp3D,GAAG,IAAI8E,EAAE9E,EAAE2qC,wBAAwBzkB,EAAErkB,OAAOgL,EAAE7H,SAASklC,gBAAgB,MAAM,CAACtvC,EAAEkK,EAAEkK,MAAMkX,EAAE86C,aAAan0D,EAAEo0D,YAAYr7C,EAAE9gB,EAAEknB,KAAK9F,EAAEg7C,aAAar0D,EAAEkf,aAAklM7F,EAAEgwC,MAAMnuD,EAAEme,EAAEiwC,MAAM1S,EAAEv9B,EAAEgsC,UAAx/L,SAAWlyD,EAAE8E,GAAG,OAAOiD,EAAE/H,GAAG8E,EAAEq8D,IAAs+Lj7C,EAAEksC,UAAr+L,SAAWpyD,EAAE8E,GAAG,OAAO2+C,EAAEzjD,GAAG8E,EAAEs8D,IAAm9Ll7C,EAAE0mC,KAAl9L,SAAW5sD,GAAG,QAAQA,IAAI6uC,MAAM7uC,IAAy7LkmB,EAAEm7C,aAAx7L,SAAWrhE,EAAE8E,GAAG,QAAQ9E,GAAI,OAAOA,EAAE8mD,MAAO,OAAO9mD,EAAEpF,QAAG,IAASoF,EAAEpF,GAAI,OAAOoF,EAAE4lB,QAAG,IAAS5lB,EAAE4lB,KAAKipB,MAAM7uC,EAAEpF,KAAKkK,GAAG+pC,MAAM7uC,EAAE4lB,KAA40LM,EAAEo7C,YAAYv+C,EAAEmD,EAAEvrB,QAAQopD,EAAE79B,EAAEq7C,WAAWvd,EAAE99B,EAAEs7C,YAAY1d,EAAE59B,EAAEu7C,OAAOjiB,EAAEt5B,EAAEs3C,aAAnzK,SAAWx9D,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,GAAG,IAAI,IAAIwgB,GAAE,EAAGA,GAAG,CAAC,IAAI8K,EAAEnxC,EAAE+H,EAAEjD,EAAE2+C,EAAEv9B,EAAEypB,EAAE9iC,EAAE+2C,EAAE/9B,EAAE,GAAGwgB,GAAE,EAAG,OAAOsJ,QAAG,IAASA,GAAG,OAAOiU,QAAG,IAASA,IAAIjU,EAAE,EAAEiU,EAAE77C,EAAEsD,OAAO,GAAGskC,EAAEiU,EAAE,OAAO,EAAE,OAAOH,QAAG,IAASA,IAAIA,EAAE,GAAG,IAAIh9B,EAAEF,EAAE,SAASvmB,GAAG,OAAOA,GAAG,GAAGA,EAAE+H,EAAEsD,QAAQ0X,EAAEI,UAAUwsB,EAAEiU,GAAG,EAAE,IAAIG,EAAEh8C,EAAEgb,GAAG,GAAGghC,GAAG5S,EAAE,OAAOpuB,EAAE,GAAGghC,EAAE5S,EAAE,CAAC,GAAGsS,EAAE,GAAUl9B,EAANE,EAAE1D,EAAE,IAAQhb,EAAE0e,GAAG0qB,EAAG,OAAOpuB,EAAE/iB,EAAEmxC,EAAErsC,EAAEiD,EAAEme,EAAEu9B,EAAE52C,EAAE8iC,EAAE9pB,EAAE9C,EAAE,EAAEsjB,GAAE,EAAG9f,EAAExD,EAAEghC,EAAEt9B,OAAE,MAAW,CAAC,KAAKs9B,EAAE5S,GAAG,OAAO,EAAE,GAAGsS,EAAE,GAAUl9B,EAANE,EAAE1D,EAAE,IAAQhb,EAAE0e,GAAG0qB,EAAG,OAAOpuB,EAAE/iB,EAAEmxC,EAAErsC,EAAEiD,EAAEme,EAAEu9B,EAAE52C,EAAEkW,EAAE,EAAE8C,EAAE+9B,EAAEvd,GAAE,EAAG9f,EAAExD,EAAEghC,EAAEt9B,OAAE,KAA83JP,EAAEw7C,WAAv3J,SAAW1hE,GAAG,IAAI8E,EAAEohB,EAAE,KAAK,GAAGlmB,EAAEozB,OAAO,OAAO,GAAGpzB,EAAEozB,OAAO,OAAO,GAAGpzB,EAAEozB,OAAO,QAAQlN,EAAEF,EAAEhmB,MAAM6uC,MAAM3oB,GAAG,OAAOA,EAAE,IAAI,GAAGlmB,EAAEozB,OAAO,KAAK,CAAC,IAAItuB,EAAE9E,EAAEgI,QAAQ,IAAI,IAAI,MAAM,GAAGlD,EAAEsuB,OAAO,MAAMtuB,EAAEA,EAAEkD,QAAQ,IAAI,KAAKke,EAAEF,EAAElhB,QAAyEohB,EAAjE,GAAGlmB,EAAEqL,OAA8D2a,EAAtDlhB,EAAE9E,EAAE+lC,OAAO,EAAE,GAAG,IAAI/lC,EAAE+lC,OAAO,EAAE,GAAG,IAAI/lC,EAAE+lC,OAAO,EAAE,IAAa/f,EAAEhmB,GAAG,OAAOkmB,IAAI2oB,MAAM3oB,IAAI/lB,QAAQ8kB,MAAM,kBAAkBjlB,EAAE,cAAckmB,GAAuhJA,EAAEy7C,gBAAgB37C,EAAEE,EAAEugB,OAAhgJ,SAAWzmC,EAAE8E,GAAG,QAAG,IAASA,GAAG,OAAOA,EAAE,IAAI,IAAIohB,KAAKphB,EAAEA,EAAE2mB,eAAevF,KAAKlmB,EAAEkmB,GAAGphB,EAAEohB,IAAI,OAAOlmB,GAA06IkmB,EAAE07C,WAA16I,SAASlyB,EAAE1vC,EAAE8E,GAAG,QAAG,IAASA,GAAG,OAAOA,EAAE,IAAI,IAAIohB,KAAKphB,EAAEA,EAAE2mB,eAAevF,KAAK,OAAOphB,EAAEohB,GAAGlmB,EAAEkmB,GAAG,KAAK4lC,EAAEhnD,EAAEohB,IAAIlmB,EAAEkmB,GAAGphB,EAAEohB,GAAG+G,QAAS,SAASjtB,GAAG,MAAM,iBAAiB6hE,KAAK7hE,aAAa6hE,KAAK,iBAAiB7hE,GAAG,iBAAiBA,EAAEmyC,UAAU,iBAAiBnyC,EAAE8hE,SAA9H,CAAwIh9D,EAAEohB,KAAK,iBAAiBphB,EAAEohB,GAAgElmB,EAAEkmB,GAAGphB,EAAEohB,IAAnE,iBAAiBlmB,EAAEkmB,IAAI,OAAOlmB,EAAEkmB,KAAKlmB,EAAEkmB,GAAG,IAAIwpB,EAAE1vC,EAAEkmB,GAAGphB,EAAEohB,MAAgB,OAAOlmB,GAAqkIkmB,EAAE0iC,YAAYkD,EAAE5lC,EAAE67C,WAAr8H,SAAW/hE,GAAG,MAAM,iBAAiBA,GAAG,OAAOA,GAAG,mBAAmBA,EAAE82D,SAA24H5wC,EAAEsrB,MAAr4H,SAAS0a,EAAElsD,GAAG,IAAI,IAAI8E,EAAE,GAAGohB,EAAE,EAAEA,EAAElmB,EAAEqL,OAAO6a,IAAI4lC,EAAE9rD,EAAEkmB,IAAIphB,EAAE4mB,KAAKwgC,EAAElsD,EAAEkmB,KAAKphB,EAAE4mB,KAAK1rB,EAAEkmB,IAAI,OAAOphB,GAAmzHohB,EAAE87C,aAAnzH,WAAa,OAAOh9D,SAASkiB,cAAc,WAAuxHhB,EAAE+7C,qBAA/wH,SAAWjiE,GAAG,IAAI,IAAI8E,EAAEjD,OAAOqgE,iBAAiBh8C,EAAElmB,EAAEmiE,8BAA8BniE,EAAEoiE,2BAA2BpiE,EAAEqiE,0BAA0BriE,EAAEsiE,yBAAyBtiE,EAAEuiE,wBAAwB,EAAE,YAAO,IAASz9D,EAAEA,EAAEohB,EAAE,EAAE,MAAMlmB,GAAG,OAAO,IAA4jHkmB,EAAEs8C,SAASzjB,EAAE74B,EAAE2iC,eAAn5G,SAAW7oD,EAAE8E,EAAEohB,EAAErZ,GAAG,OAAO,IAAIkyC,EAAE/+C,EAAE8E,EAAEohB,EAAErZ,IAA63GqZ,EAAEu8C,iBAA53G,SAAWziE,EAAE8E,EAAEohB,EAAErZ,GAAG,IAAIgZ,EAAEwgB,EAAE,EAAE8K,GAAG,IAAI5tB,MAAMuzC,UAAU,GAAG92D,EAAEqmC,GAAG,GAAGvhC,EAAX,CAA6B,IAAIiD,EAAEjD,EAAE,GAAG,SAAS2+C,IAAIpd,GAAGvhC,GAAG49D,EAAGxxB,KAAKrvC,QAAO,WAAW,IAA2B8tC,GAApB,IAAIpsB,MAAMuzC,UAAc3lB,EAAEtrB,EAAEwgB,EAAoB,IAAIud,GAAtBvd,EAAEprC,KAAKG,MAAMu0C,EAAEzpB,IAAWL,EAAEwgB,EAAEud,EAAE77C,GAAGs+B,GAAGt+B,GAAG/H,EAAE+H,GAAG8E,MAAM,IAAI+2C,GAAG5jD,EAAEqmC,GAAGod,QAAlJ,QAAf52C,KAAi0GqZ,EAAEy8C,0BAAxpG,SAAW3iE,EAAE8E,GAAG,IAAIohB,EAAE,GAAG,GAAGlmB,EAAE,IAAI,IAAI6M,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAIqZ,EAAElmB,EAAE6M,KAAI,EAAG,IAAIgZ,EAAE,SAAS7lB,GAAG,IAAI,IAAI8E,KAAK9E,EAAE,GAAGA,EAAEyrB,eAAe3mB,KAAK89D,EAAG99D,GAAG,OAAM,EAAG,OAAM,GAAI,IAAI,IAAIuhC,KAAKvhC,EAAE,GAAGA,EAAE2mB,eAAe4a,GAAG,GAAG,uBAAuBA,GAAGngB,EAAEmgB,KAAKvhC,EAAEw2D,QAAQ,GAAGz1C,EAAE/gB,EAAEuhC,IAAI,OAAM,OAAQ,GAAG,UAAUA,GAAG,QAAQA,EAAE,CAAC,IAAI8K,EAAErsC,EAAEuhC,GAAG,IAAI,IAAIt+B,KAAKopC,EAAE,GAAGA,EAAE1lB,eAAe1jB,IAAI8d,EAAEsrB,EAAEppC,IAAI,OAAM,OAAQ,IAAI66D,EAAGv8B,GAAG,OAAM,EAAG,OAAM,GAAozFngB,EAAE28C,oBAAnzF,SAAW7iE,GAAG,IAAI,IAAI8E,EAAE,EAAEA,EAAE9E,EAAEqL,OAAOvG,IAAI,CAAC,IAAIohB,EAAElmB,EAAE8lB,OAAOhhB,GAAG,GAAG,OAAOohB,EAAE,OAAOphB,EAAE,EAAE9E,EAAEqL,QAAQ,OAAOrL,EAAE8lB,OAAOhhB,EAAE,GAAG,OAAOohB,EAAE,GAAG,OAAOA,EAAE,OAAOphB,EAAE,EAAE9E,EAAEqL,QAAQ,OAAOrL,EAAE8lB,OAAOhhB,EAAE,GAAG,OAAOohB,EAAE,OAAO,MAAwoFA,EAAE48C,kBAAroF,SAAW9iE,EAAE8E,GAAG,GAAG,OAAOA,GAAG,OAAO9E,EAAE,OAAM,EAAG,IAAI,IAAIkmB,EAAElmB,EAAEkmB,GAAGA,IAAIphB,GAAGohB,EAAEA,EAAEq3B,WAAW,OAAOr3B,IAAIphB,GAA0jFohB,EAAE0/B,IAAI+G,EAAEzmC,EAAEklC,OAA51E,SAAWprD,GAAG,IAAI8E,EAAE+nD,EAAE7sD,GAAG,GAAG8E,EAAE,OAAOA,EAAE,IAAIohB,EAAElhB,SAASkiB,cAAc,OAAOhB,EAAE3hB,MAAMipC,gBAAgBxtC,EAAEkmB,EAAE3hB,MAAMqnD,WAAW,SAAS5mD,SAASg+B,KAAK3b,YAAYnB,GAAG,IAAIrZ,EAAEhL,OAAOkhE,iBAAiB78C,EAAE,MAAMsnB,gBAAgB,OAAOxoC,SAASg+B,KAAKzb,YAAYrB,GAAG2mC,EAAEhgD,IAA2mEqZ,EAAEuhC,kBAA1mE,SAAWznD,GAAG,KAAKA,GAAGgF,SAASkiB,cAAc,WAAW87C,WAAW,MAAM,MAAMhjE,GAAG,OAAM,EAAG,OAAM,GAA6hEkmB,EAAE+8C,YAA5hE,SAAWjjE,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAEmlC,WAAWhyC,GAAG,IAAI6uC,MAAMhiC,GAAG,OAAOA,EAAE,GAAG,OAAOqF,KAAKlS,GAAG,OAAO,KAAK,GAAG,aAAakS,KAAKlS,GAAG,OAAOimD,IAAI,IAAIpgC,EAAE,oBAAoB7lB,EAAE,gBAAgB,YAAO,IAASkmB,QAAG,IAASphB,IAAI+gB,GAAG,aAAa,GAAG/gB,GAAG,IAAI,MAAMohB,EAAE,cAAc/lB,QAAQ8kB,MAAMY,GAAG,MAAgyDK,EAAEqrC,qBAAqB7K,EAAExgC,EAAEsrC,yBAAxqC,SAAWxxD,EAAE8E,EAAEohB,GAAG,OAAOwgC,EAAExV,KAAKtyC,KAAKoB,EAAEkmB,IAA4pCA,EAAE6qC,uBAA3pC,SAAW/wD,EAAE8E,EAAEohB,GAAG,IAAqBL,EAAfK,EAAE,aAAiB40B,EAAGh5C,EAAEukC,EAAExgB,EAAEuZ,YAAYp/B,GAAGmxC,EAAEtrB,EAAEwZ,SAASr/B,GAAG+H,EAAE8d,EAAEyZ,QAAQt/B,GAAGyjD,EAAE59B,EAAE0Z,SAASv/B,GAAG2vC,EAAE9pB,EAAE2Z,WAAWx/B,GAAG4jD,EAAE/9B,EAAE4Z,WAAWz/B,GAAGymB,EAAEZ,EAAEm6C,gBAAgBhgE,GAAG,GAAG8E,GAAGo+D,EAAEvD,YAAYH,QAAQ,MAAM,GAAGn5B,EAAE,GAAGvhC,GAAGo+D,EAAEvD,YAAYP,QAAQ,OAAO+D,EAAGhyB,GAAG,SAAS9K,EAAE,GAAG,IAAI,KAAKod,EAAE,GAAG9T,EAAEiU,EAAE,KAAKn9B,GAAG3hB,GAAGo+D,EAAEvD,YAAYV,MAAM,OAAOlb,EAAEh8C,GAAG,SAASo7D,EAAGhyB,GAAG,GAAGrsC,EAAEo+D,EAAEvD,YAAYvB,SAAS,CAAC,IAAI73C,EAAE,GAAGE,EAAE,OAAOs9B,EAAEH,GAAG,KAAK,MAAMr9B,GAAGuB,UAAUvB,EAAElb,QAAQ,OAAOvG,EAAEo+D,EAAEvD,YAAYlB,SAASza,EAAEP,EAAE9T,EAAEiU,EAAE,GAAGI,EAAEP,EAAE9T,EAAEiU,EAAEn9B,IAAotBP,EAAE+qC,mBAAntB,SAAWjxD,EAAE8E,GAAG,OAAOg/C,EAAE9jD,EAAE8E,EAAE,eAA2sB,IAA6Bo+D,EAAE,SAASljE,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAzB9E,EAAE,sBAAkLkmB,EAAEkuC,UAAU,GAAG,IAAIgP,EAAEnoE,KAAKmF,IAAI,IAAI8lB,EAAEm9C,OAAOD,EAAE,IAAIE,EAAE,SAAStjE,GAAG,OAAO/E,KAAKmF,IAAIJ,GAAGojE,GAAGl9C,EAAE6sC,MAAMuQ,EAA6Ep9C,EAAEq9C,iBAAvE,SAASvjE,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAEy2D,EAAEtjE,GAAUqmC,EAAEx5B,EAAEqZ,GAATo9C,EAAEx+D,GAAY+H,GAAG,OAAO5R,KAAK2qD,IAAI,GAAGvf,IAAqCngB,EAAEs9C,YAAR,CAAC,EAAE,GAA+Bt9C,EAAEu9C,YAAR,CAAC,EAAE,GAAmCv9C,EAAEw9C,cAAZ,CAAC,EAAE,EAAE,EAAE,GAAqBx9C,EAAE4uC,WAAW,EAAE5uC,EAAE2uC,SAAS,EAA+C3uC,EAAE88C,WAAzC,SAAShjE,GAAG,OAAOA,EAAEgjE,WAAW,OAAwE98C,EAAEy9C,SAA9C,SAAS3jE,EAAE8E,EAAEohB,GAAGlmB,EAAEiF,iBAAiBH,EAAEohB,GAAE,IAAkB,IAAIpkB,EAAE,CAACs9B,YAAY,SAASp/B,GAAG,OAAOA,EAAEo/B,eAAeC,SAAS,SAASr/B,GAAG,OAAOA,EAAEq/B,YAAYC,QAAQ,SAASt/B,GAAG,OAAOA,EAAEs/B,WAAWC,SAAS,SAASv/B,GAAG,OAAOA,EAAEu/B,YAAYC,WAAW,SAASx/B,GAAG,OAAOA,EAAEw/B,cAAcC,WAAW,SAASz/B,GAAG,OAAOA,EAAEy/B,cAAcugC,gBAAgB,SAAShgE,GAAG,OAAOA,EAAEggE,mBAAmBC,OAAO,SAASjgE,GAAG,OAAOA,EAAEigE,UAAUC,SAAS,SAASlgE,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,GAAG,OAAO,IAAI5tB,KAAKvjB,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,KAAKjrB,EAAE65C,mBAAmBj+D,EAAE,IAAIg5C,EAAG,CAAC1b,YAAY,SAASp/B,GAAG,OAAOA,EAAE4jE,kBAAkBvkC,SAAS,SAASr/B,GAAG,OAAOA,EAAE6jE,eAAevkC,QAAQ,SAASt/B,GAAG,OAAOA,EAAE8jE,cAAcvkC,SAAS,SAASv/B,GAAG,OAAOA,EAAE+jE,eAAevkC,WAAW,SAASx/B,GAAG,OAAOA,EAAEgkE,iBAAiBvkC,WAAW,SAASz/B,GAAG,OAAOA,EAAEikE,iBAAiBjE,gBAAgB,SAAShgE,GAAG,OAAOA,EAAEkkE,sBAAsBjE,OAAO,SAASjgE,GAAG,OAAOA,EAAEmkE,aAAajE,SAAS,SAASlgE,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,GAAG,OAAO,IAAI5tB,KAAKA,KAAK6gD,IAAIpkE,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,MAAMjrB,EAAE45C,iBAAiBhlB,EAAGiE,EAAE3N,UAAUwY,KAAK,WAAW,IAAIhrD,KAAKirD,QAAQ,OAAO,KAAK,IAAI,IAAI7pD,EAAEpB,KAAKkrD,KAAKhlD,EAAElG,KAAK+qD,SAAS,EAAEzjC,GAAE,EAAGphB,EAAElG,KAAK0qD,MAAM,CAAC,IAAI1qD,KAAK2qD,YAAY3qD,KAAK2qD,WAAW3qD,KAAKyqD,OAAOvkD,GAAG,CAAClG,KAAKkrD,KAAKlrD,KAAKyqD,OAAOvkD,GAAGohB,GAAE,EAAG,MAAMphB,IAAI,OAAOlG,KAAK+qD,SAAS7kD,EAAEohB,IAAItnB,KAAKirD,SAAQ,EAAGjrD,KAAKkrD,KAAK,MAAM9pD,GAAG,IAAI0iE,EAAqB7gE,OAAOwiE,uBAAuBxiE,OAAOyiE,6BAA6BziE,OAAO0iE,0BAA0B1iE,OAAO2iE,wBAAwB3iE,OAAO4iE,yBAAyB,SAASzkE,GAAG6B,OAAOylB,WAAWtnB,EAAE,IAAI,KAAQkmB,EAAEw+C,iBAAiBhC,EAAG,IAAIE,EAAG,CAAC+B,wBAAuB,EAAGC,2BAA0B,EAAGC,2BAA0B,EAAGC,4BAA2B,EAAGxV,eAAc,EAAGC,eAAc,EAAGwV,eAAc,EAAGC,cAAa,EAAGC,4BAA2B,EAAGC,YAAW,EAAGC,mBAAkB,EAAGjU,UAAS,EAAGxC,WAAU,EAAGgB,eAAc,EAAGF,eAAc,EAAGV,uBAAsB,EAAGsW,mBAAkB,EAAGpY,qBAAoB,EAAG2C,kBAAiB,EAAG0V,WAAU,EAAG/X,WAAU,EAAGC,YAAW,EAAGH,qBAAoB,EAAGC,sBAAqB,EAAGv6C,QAAO,EAAGwyD,iBAAgB,EAAGC,iBAAgB,EAAGC,oBAAmB,EAAGC,WAAU,EAAGvV,4BAA2B,EAAGD,oCAAmC,EAAGD,8BAA6B,EAAGG,oCAAmC,EAAGC,kCAAiC,EAAGC,4BAA2B,EAAGC,oCAAmC,EAAGC,kCAAiC,EAAGC,oBAAmB,EAAGhD,uBAAsB,EAAGS,YAAW,EAAG/D,aAAY,EAAGwb,kBAAiB,EAAGC,qBAAoB,EAAGC,cAAa,GAAIC,EAAG,CAAC1a,QAAQ,SAASnrD,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,GAAGjrB,EAAE2hC,YAAY3hC,EAAEmlC,UAAUhlB,EAAEngB,EAAEg0B,IAAIrtC,EAAEgZ,EAAEsrB,EAAE,EAAE,EAAEl2C,KAAKs3C,IAAG,GAAIrsB,EAAEqlC,SAASrlC,EAAEglC,QAAQ2a,EAAG,IAAItF,EAAG,0EAA0EC,EAAG,CAAC,IAAI,IAAI,IAAI,IAAI,KAAKC,EAAG,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,KAAKC,EAAG,CAAC,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,KAAKyC,EAAG,CAAC,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,MAAM,QAAQ,CAAC,oBAAoB,KAAK2C,GAAG,CAAC,SAAS9lE,EAAE8E,EAAEohB,IAAG,SAAUrZ,GAAG,aAAa,SAASgZ,EAAE7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAAE,SAASuhC,EAAErmC,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAqUL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAImK,EAA6P,SAASjD,EAAEohB,GAAG,GAAGvG,MAAMivB,QAAQ9pC,GAAG,OAAOA,EAAE,GAAGihE,OAAOC,YAAYrmE,OAAOmF,GAAG,OAA3T,SAAW9E,EAAE8E,GAAG,IAAIohB,EAAE,GAAGrZ,GAAE,EAAGgZ,GAAE,EAAGwgB,OAAE,EAAO,IAAI,IAAI,IAAI8K,EAAEppC,EAAE/H,EAAE+lE,OAAOC,cAAcn5D,GAAGskC,EAAEppC,EAAE6hD,QAAQ9/B,QAAQ5D,EAAEwF,KAAKylB,EAAEvzC,QAAQkH,GAAGohB,EAAE7a,SAASvG,GAAG+H,GAAE,IAAK,MAAM7M,GAAG6lB,GAAE,EAAGwgB,EAAErmC,EAAvH,QAAiI,KAAK6M,GAAG9E,EAAEk+D,QAAQl+D,EAAEk+D,SAApB,QAAqC,GAAGpgD,EAAE,MAAMwgB,GAAG,OAAOngB,EAA2FlmB,CAAE8E,EAAEohB,GAAG,MAAM,IAAIggD,UAAU,yDAAoFv2B,EAAEtJ,EAAxBrmC,EAAE,qBAAmDymB,EAAE4f,EAAxBrmC,EAAE,qBAAoD+iB,EAAEsjB,EAAzBrmC,EAAE,sBAA+DgkD,EAAE3d,EAAnCrmC,EAAE,gCAA+Dw/C,EAAE35B,EAAzB7lB,EAAE,sBAAmDpF,EAAEirB,EAAvB7lB,EAAE,oBAAyDmmB,EAAEkgB,EAA/BrmC,EAAE,4BAAqE8rD,GAAGzlB,EAApCrmC,EAAE,gCAAuCA,EAAE,kBAAkBisD,EAAE5lB,EAAEylB,GAAgCK,EAAE9lB,EAA7BrmC,EAAE,0BAAgE++C,EAAE1Y,EAAhCrmC,EAAE,6BAAoEssD,EAAEjmB,EAAjCrmC,EAAE,8BAA2EwsD,EAAEnmB,EAAvCrmC,EAAE,oCAA8E2sD,EAAEtmB,EAApCrmC,EAAE,iCAAiEmmE,EAAE9/B,EAA1BrmC,EAAE,uBAA0DomE,EAAE//B,EAA7BrmC,EAAE,0BAAsDymD,EAAEpgB,EAAtBrmC,EAAE,mBAAuDqmE,EAAEhgC,EAA9BrmC,EAAE,2BAAuDkjE,EAAE78B,EAAtBrmC,EAAE,mBAAiDsjE,EAAEj9B,EAAxBrmC,EAAE,qBAA2D0lB,EAAE2gB,EAAhCrmC,EAAE,6BAAyDsmE,EAAEjgC,EAAtBrmC,EAAE,mBAAyBumE,EAAE,SAASvmE,EAAE8E,EAAEohB,GAAGtnB,KAAK4nE,SAASxmE,EAAE8E,EAAEohB,IAAIqgD,EAAEE,KAAK,UAAUF,EAAEG,QAAQ,QAAQH,EAAEI,oBAAoB,EAAEJ,EAAEK,cAAc,IAAIL,EAAEM,eAAe,IAAIN,EAAEO,gBAAgB,GAAGP,EAAEQ,mBAAmB,IAAIR,EAAES,SAASvgD,EAAE+K,QAAQk5B,UAAU6b,EAAEU,oBAAmB,EAAGV,EAAEn1B,UAAUo1B,SAAS,SAASxmE,EAAE8E,EAAEohB,GAAG,GAAGtnB,KAAKsoE,kBAAiB,EAAGtoE,KAAKuoE,UAAU,GAAG,OAAOjhD,QAAG,IAASA,IAAIA,EAAE,IAAIA,EAAEqgD,EAAEa,eAAelhD,GAAG,iBAAiBlmB,IAAIA,EAAEgF,SAASof,eAAepkB,KAAKA,EAAE,MAAM,IAAI0jD,MAAM,iDAAiD9kD,KAAKyoE,SAASrnE,EAAEpB,KAAK0oE,MAAMxiE,EAAElG,KAAK2oE,YAAYrhD,EAAEshD,YAAYjB,EAAEI,oBAAoB/nE,KAAK6oE,oBAAoB,EAAE7oE,KAAK8oE,WAAWxhD,EAAEooC,YAAW,EAAG1vD,KAAKu1D,YAAYjuC,EAAEyhD,YAAY,KAAK/oE,KAAKgpE,aAAa,GAAG5nE,EAAEqkB,UAAU,GAAG,KAAKrkB,EAAEuE,MAAMsO,OAAOqT,EAAErT,QAAQ7S,EAAEuE,MAAMsO,MAAMqT,EAAErT,MAAM,MAAM,KAAK7S,EAAEuE,MAAMC,QAAQ0hB,EAAE1hB,SAASxE,EAAEuE,MAAMC,OAAO0hB,EAAE1hB,OAAO,MAAM,KAAKxE,EAAEuE,MAAMC,QAAQ,IAAIxE,EAAEqD,eAAerD,EAAEuE,MAAMC,OAAO+hE,EAAEM,eAAe,KAAK,KAAK7mE,EAAEuE,MAAMsO,QAAQ7S,EAAEuE,MAAMsO,MAAM0zD,EAAEK,cAAc,OAAOhoE,KAAK4oD,OAAOxnD,EAAE6nE,aAAa3hD,EAAErT,OAAO,EAAEjU,KAAK2oD,QAAQvnD,EAAEqD,cAAc6iB,EAAE1hB,QAAQ,EAAE0hB,EAAE0oC,eAAe1oC,EAAEuoC,WAAU,GAAI7vD,KAAKw9D,YAAY,GAAGxhE,EAAE6rC,OAAO7nC,KAAKw9D,YAAYl2C,GAAGtnB,KAAKs9D,OAAO,GAAGthE,EAAEgnE,WAAWhjE,KAAKs9D,OAAO/1C,EAAEqL,SAAS5yB,KAAKkpE,aAAa,GAAGlpE,KAAKmpE,gBAAgB,GAAGnpE,KAAKopE,cAAc,GAAGppE,KAAKqpE,kBAAkB,GAAGrpE,KAAKspE,gBAAgB,GAAGtpE,KAAKk1D,YAAY,IAAI/wC,EAAEyO,QAAQ5yB,MAAMA,KAAKupE,mBAAmBvpE,KAAKwpE,SAAS,GAAG,IAAI,IAAIv7D,EAAE05D,EAAE8B,QAAQlpB,OAAOvgD,KAAK4iC,UAAU,YAAY3b,EAAE,EAAEA,EAAEhZ,EAAExB,OAAOwa,IAAI,CAAC,IAAIwgB,EAAE8K,EAAEtkC,EAAEgZ,GAAqC9d,EAAE,CAACugE,OAArCjiC,OAAE,IAAS8K,EAAEo3B,SAASp3B,EAAE,IAAIA,EAAkBq3B,OAAO,GAAGhmE,QAAQ,GAAGimE,cAAc,IAAIhlB,EAAEpd,EAAEkiC,SAAS3pE,MAAM,IAAI,IAAI+wC,KAAK8T,EAAEA,EAAEh4B,eAAekkB,KAAK5nC,EAAEygE,OAAO74B,GAAG8T,EAAE9T,IAAI/wC,KAAKwpE,SAAS18C,KAAK3jB,GAAG,IAAQ8d,EAAE,EAAEA,EAAEjnB,KAAKwpE,SAAS/8D,OAAOwa,IAAI,CAAC,IAAI+9B,EAAEhlD,KAAKwpE,SAASviD,GAAG,IAAI,IAAI8pB,KAAKiU,EAAE4kB,OAAO,GAAG5kB,EAAE4kB,OAAO/8C,eAAekkB,GAAG,CAAC,IAAIlpB,EAAEm9B,EAAE4kB,OAAO74B,GAAGppB,EAAE,CAACq9B,EAAE0kB,OAAO7hD,GAAGkpB,KAAK/wC,KAAKspE,gBAAgBtpE,KAAKspE,gBAAgBv4B,GAAGjkB,KAAKnF,GAAG3nB,KAAKspE,gBAAgBv4B,GAAG,CAACppB,IAAI3nB,KAAK8pE,uBAAuB9pE,KAAK4qD,UAAU+c,EAAEn1B,UAAUokB,eAAe,SAASx1D,EAAE8E,GAAG,KAAK9E,KAAKpB,KAAKspE,iBAAiB,OAAM,EAAG,IAAIhiD,EAAE,CAACuiC,QAAQ7pD,KAAK02D,YAAW,EAAGqT,kBAAiB,EAAG5kE,eAAe,WAAW,IAAImiB,EAAEovC,WAAW,KAAK,sDAAsDpvC,EAAEyiD,kBAAiB,GAAIC,oBAAmB,EAAGvjE,gBAAgB,WAAW6gB,EAAE0iD,oBAAmB,IAAKhuE,EAAE6rC,OAAOvgB,EAAEphB,GAAG,IAAI+H,EAAEjO,KAAKspE,gBAAgBloE,GAAG,GAAG6M,EAAE,IAAI,IAAIgZ,EAAEhZ,EAAExB,OAAO,EAAEwa,GAAG,EAAEA,IAAI,CAAC,IAAIwgB,EAAEx5B,EAAEgZ,GAAG,GAAa,GAARhZ,EAAEgZ,GAAG,GAAQqrB,KAAK7K,EAAEngB,GAAGA,EAAE0iD,mBAAmB,MAAM,OAAO1iD,EAAEyiD,kBAAkBpC,EAAEn1B,UAAUy3B,mBAAmB,SAAS7oE,GAAG,IAAI,IAAI8E,EAAE,EAAEA,EAAElG,KAAKwpE,SAAS/8D,OAAOvG,IAAI,CAAC,IAAIohB,EAAEtnB,KAAKwpE,SAAStjE,GAAG,GAAGohB,EAAEoiD,kBAAkBtoE,EAAE,OAAOkmB,EAAEoiD,OAAO,OAAO,MAAM/B,EAAEn1B,UAAU03B,SAAS,SAAS9oE,GAAG,IAAI8E,IAAIlG,KAAKu1D,YAAY,GAAG,MAAMn0D,EAAE,OAAO8E,EAAE,IAAIohB,EAAEtnB,KAAK20D,MAAM3mD,KAAI,SAAS5M,GAAG,QAAQA,EAAE2tB,cAAa9F,SAAQ,IAAK,EAAE,GAAG,OAAO7nB,QAAG,IAASA,EAAE,OAAO8E,GAAGohB,EAAE,GAAG,MAAMlmB,EAAE,OAAOkmB,EAAE,MAAM,IAAIw9B,MAAM,sBAAsB1jD,EAAE,gCAAgCumE,EAAEn1B,UAAU/wC,SAAS,WAAW,IAAIL,EAAEpB,KAAKyoE,SAAS,MAAM,aAAarnE,GAAGA,EAAE2D,GAAG3D,EAAE2D,GAAG3D,GAAG,KAAKumE,EAAEn1B,UAAU23B,MAAM,SAAS/oE,EAAE8E,GAAG,OAAOA,EAAElG,KAAKk1D,YAAY8G,aAAa56D,EAAE8E,GAAGlG,KAAKk1D,YAAYj1D,IAAImB,IAAIumE,EAAEn1B,UAAU5P,UAAU,SAASxhC,EAAE8E,GAAG,OAAOlG,KAAKmqE,MAAM/oE,EAAE8E,IAAIyhE,EAAEn1B,UAAU6Z,iBAAiB,SAASjrD,EAAE8E,GAAG,OAAOlG,KAAK4iC,UAAUxhC,EAAE8E,IAAIyhE,EAAEn1B,UAAU43B,gBAAgB,SAAShpE,EAAE8E,GAAG,OAAOlG,KAAK4iC,UAAUxhC,EAAE8E,IAAIyhE,EAAEn1B,UAAUsX,iBAAiB,SAAS1oD,EAAE8E,GAAG,OAAOlG,KAAK4iC,UAAUxhC,EAAE8E,IAAIyhE,EAAEn1B,UAAU+jB,kBAAkB,SAASn1D,EAAE8E,GAAG,OAAOlG,KAAK4iC,UAAUxhC,EAAE8E,IAAIyhE,EAAEn1B,UAAUyhB,iBAAiB,SAAS7yD,EAAE8E,GAAG,OAAOlG,KAAKk1D,YAAYC,WAAW/zD,EAAE8E,IAAIyhE,EAAEn1B,UAAU63B,oBAAoB,SAASjpE,GAAG,IAAI8E,EAAElG,KAAK,OAAO,SAASsnB,GAAG,IAAIrZ,EAAE/H,EAAEs3D,YAAYxL,KAAK,OAAO/jD,GAAGA,EAAE7M,IAAI6M,EAAE7M,GAAGyrB,eAAevF,GAAGrZ,EAAE7M,GAAGkmB,IAAI,MAAMlmB,GAAG,aAAakmB,UAAK,IAASphB,EAAEs3D,YAAYl2C,GAAGphB,EAAEs3D,YAAYl2C,IAAIrZ,EAAE/H,EAAEo3D,OAAOtL,OAAQ/jD,EAAE7M,IAAI6M,EAAE7M,GAAGyrB,eAAevF,GAAGrZ,EAAE7M,GAAGkmB,GAAG,KAAKlmB,GAAG8E,EAAEyuD,MAAM,GAAG9nC,eAAevF,GAAGphB,EAAEyuD,MAAM,GAAGrtC,GAAG,MAAMlmB,GAAG8E,EAAEyuD,MAAM,GAAG9nC,eAAevF,GAAGphB,EAAEyuD,MAAM,GAAGrtC,GAAGphB,EAAEikE,MAAM7iD,MAAOqgD,EAAEn1B,UAAUo2B,WAAW,WAAW,OAAO5oE,KAAK2oE,aAAahB,EAAEn1B,UAAUwhB,WAAW,WAAW,OAAOh0D,KAAKu1D,YAAYv1D,KAAKu1D,YAAYv1D,KAAKu0D,iBAAiBoT,EAAEn1B,UAAU+hB,cAAc,WAAW,IAAInzD,EAAEpB,KAAKqsD,iBAAiB,aAAarsD,KAAKs0D,SAASxL,KAAKhY,EAAE,GAAG,IAAI9wC,KAAKsqE,UAAU,MAAM,CAAC,EAAElpE,EAAE,EAAEA,GAAG,IAAI8E,EAAElG,KAAKuqE,SAAS,GAAG,GAAGjjD,EAAEtnB,KAAKuqE,SAASvqE,KAAKuqE,SAAS99D,OAAO,GAAG,GAAG,GAAGrL,EAAE,CAAC,IAAI6M,EAAEqZ,EAAEphB,EAAEA,GAAG+H,EAAE7M,EAAEkmB,GAAGrZ,EAAE7M,EAAE,MAAM,CAAC8E,EAAEohB,IAAIqgD,EAAEn1B,UAAUg4B,cAAc,WAAW,IAAoDtkE,EAA9ClG,KAAKyqE,gBAAgBzqE,KAAK0qE,cAAc,MAAUC,SAASrjD,EAAEtnB,KAAK20D,MAAM30D,KAAK4qE,oBAAoB1kE,GAAG,IAAI+H,EAAEjO,KAAK20D,MAAM,OAAO30D,KAAK20D,MAAMrtC,EAAErZ,EAAED,KAAI,SAAS5M,GAAG,OAAOA,EAAEwzD,iBAAgB+S,EAAEn1B,UAAUyiB,WAAW,SAAS7zD,GAAG,QAAG,IAASA,IAAIA,EAAE,GAAGA,EAAE,GAAGA,GAAGpB,KAAK20D,MAAMloD,OAAO,OAAO,KAAK,IAAIvG,EAAElG,KAAK20D,MAAMvzD,GAAG,MAAM,CAAC8E,EAAEw1D,mBAAmB,GAAGx1D,EAAEw1D,mBAAmB,KAAKiM,EAAEn1B,UAAUulB,YAAY,WAAW,IAAI,IAAI32D,EAAE,GAAG8E,EAAE,EAAEA,EAAElG,KAAK20D,MAAMloD,OAAOvG,IAAI9E,EAAE0rB,KAAK9sB,KAAKi1D,WAAW/uD,IAAI,OAAO9E,GAAGumE,EAAEn1B,UAAUq4B,YAAY,SAASzpE,EAAE8E,EAAEohB,GAAG,MAAM,CAACtnB,KAAKw0D,YAAYpzD,GAAGpB,KAAK60D,YAAY3uD,EAAEohB,KAAKqgD,EAAEn1B,UAAUgiB,YAAY,SAASpzD,GAAG,GAAG,OAAOA,EAAE,OAAO,KAAK,IAAI8E,EAAElG,KAAKs0D,SAASxL,KAAKxhC,EAAEtnB,KAAKg0D,aAAa,OAAO9tD,EAAElK,GAAGoF,EAAEkmB,EAAE,KAAKA,EAAE,GAAGA,EAAE,IAAIphB,EAAE4qC,GAAG62B,EAAEn1B,UAAUqiB,YAAY,SAASzzD,EAAE8E,GAAG,IAAIohB,EAAEtnB,KAAKy8D,gBAAgBr7D,EAAE8E,GAAG,GAAG,OAAOohB,EAAE,OAAO,KAAK,IAAIrZ,EAAEjO,KAAKs0D,SAASxL,KAAK,OAAO76C,EAAE+Y,EAAEM,EAAErZ,EAAE8iC,GAAG42B,EAAEn1B,UAAUs4B,aAAa,SAAS1pE,EAAE8E,EAAEohB,GAAG,MAAM,CAACtnB,KAAKy0D,aAAarzD,GAAGpB,KAAK80D,aAAa5uD,EAAEohB,KAAKqgD,EAAEn1B,UAAUiiB,aAAa,SAASrzD,GAAG,GAAG,OAAOA,EAAE,OAAO,KAAK,IAAI8E,EAAElG,KAAKs0D,SAASxL,KAAKxhC,EAAEtnB,KAAKg0D,aAAa,GAAGh0D,KAAKk1D,YAAYC,WAAW,WAAW,KAAK,CAAC,IAAIlnD,GAAG7M,EAAE8E,EAAElK,GAAGkK,EAAE4qC,EAAE,OAAO90C,EAAE2oE,iBAAiBr9C,EAAE,GAAGA,EAAE,GAAGrZ,GAAG,OAAOqZ,EAAE,IAAIlmB,EAAE8E,EAAElK,GAAGkK,EAAE4qC,GAAGxpB,EAAE,GAAGA,EAAE,KAAKqgD,EAAEn1B,UAAUsiB,aAAa,SAAS1zD,EAAE8E,GAAG,GAAG,OAAO9E,EAAE,OAAO,KAAK,IAAIkmB,EAAEtnB,KAAKs0D,SAASxL,KAAK76C,EAAEjO,KAAKi1D,WAAW/uD,GAAG,QAAG,IAASA,IAAIA,EAAE,GAAGlG,KAAKk1D,YAAYC,WAAW,WAAWjvD,GAAG,CAAC,IAAI+gB,GAAG7lB,EAAEkmB,EAAEN,GAAGM,EAAEypB,EAAE,OAAO/0C,EAAE2oE,iBAAiB12D,EAAE,GAAGA,EAAE,GAAGgZ,GAAG,OAAOhZ,EAAE,IAAIqZ,EAAEN,EAAEM,EAAEypB,EAAE3vC,GAAGkmB,EAAEypB,GAAG9iC,EAAE,GAAGA,EAAE,KAAK05D,EAAEn1B,UAAUiqB,gBAAgB,SAASr7D,EAAE8E,GAAG,GAAG,OAAO9E,EAAE,OAAO,UAAK,IAAS8E,IAAIA,EAAE,GAAG,IAAIohB,EAAErZ,EAAEjO,KAAKi1D,WAAW/uD,GAAG,GAAGlG,KAAKk1D,YAAYC,WAAW,WAAWjvD,GAAG,CAAC,IAAI+gB,EAAEjrB,EAAEm4D,MAAMlmD,EAAE,IAAIw5B,EAAEzrC,EAAEm4D,MAAMlmD,EAAE,IAAIqZ,GAAGmgB,EAAEzrC,EAAEm4D,MAAM/yD,KAAKqmC,EAAExgB,QAAQK,GAAGrZ,EAAE,GAAG7M,IAAI6M,EAAE,GAAGA,EAAE,IAAI,OAAOqZ,GAAGqgD,EAAEn1B,UAAU6pB,gBAAgB,SAASj7D,GAAG,GAAG,OAAOA,EAAE,OAAO,KAAK,IAAI8E,EAAEohB,EAAEtnB,KAAKg0D,aAAa,IAAG,IAAKh0D,KAAKk1D,YAAYC,WAAW,WAAW,KAAK,CAAC,IAAIlnD,EAAEjS,EAAEm4D,MAAM7sC,EAAE,IAAIL,EAAEjrB,EAAEm4D,MAAM7sC,EAAE,IAAIphB,GAAGlK,EAAEm4D,MAAM/yD,GAAG6M,IAAIgZ,EAAEhZ,QAAQ/H,GAAG9E,EAAEkmB,EAAE,KAAKA,EAAE,GAAGA,EAAE,IAAI,OAAOphB,GAAGyhE,EAAEn1B,UAAUu4B,WAAW,WAAW,OAAO/qE,KAAKuqE,SAASvqE,KAAKuqE,SAAS,GAAGvqE,KAAKuqE,SAAS,GAAG99D,OAAOzM,KAAKmqE,MAAM,UAAU19D,OAAO,GAAGk7D,EAAEn1B,UAAU83B,QAAQ,WAAW,OAAOtqE,KAAKuqE,SAASvqE,KAAKuqE,SAAS99D,OAAO,GAAGk7D,EAAEn1B,UAAUw4B,SAAS,SAAS5pE,EAAE8E,GAAG,OAAO9E,EAAE,GAAGA,EAAEpB,KAAKuqE,SAAS99D,OAAO,KAAKvG,EAAE,GAAGA,EAAElG,KAAKuqE,SAASnpE,GAAGqL,OAAO,KAAKzM,KAAKuqE,SAASnpE,GAAG8E,IAAIyhE,EAAEn1B,UAAU+2B,iBAAiB,WAAW,IAAInoE,EAAEpB,KAAKyoE,SAASzoE,KAAKu6D,SAASn0D,SAASkiB,cAAc,OAAOtoB,KAAKu6D,SAAS50D,MAAMslE,UAAU,OAAOjrE,KAAKu6D,SAAS50D,MAAMk/B,SAAS,WAAWzjC,EAAEqnB,YAAYzoB,KAAKu6D,UAAUv6D,KAAKy4D,QAAQz8D,EAAEonE,eAAepjE,KAAKy4D,QAAQ9yD,MAAMk/B,SAAS,WAAW7kC,KAAKkrE,QAAQlrE,KAAKmrE,qBAAqBnrE,KAAKy4D,SAASz4D,KAAKgpD,YAAYhtD,EAAEooE,WAAWpkE,KAAKy4D,SAASz4D,KAAKopD,YAAYptD,EAAEooE,WAAWpkE,KAAKkrE,SAASlrE,KAAKorE,kBAAkBprE,KAAKu6D,SAAS9xC,YAAYzoB,KAAKkrE,SAASlrE,KAAKu6D,SAAS9xC,YAAYzoB,KAAKy4D,SAASz4D,KAAKqrE,mBAAmBrrE,KAAKsrE,2BAA2BtrE,KAAKmzD,QAAQ,IAAIpiB,EAAEne,QAAQ5yB,MAAM,IAAIkG,EAAElG,KAAKA,KAAKurE,kBAAkB,SAASnqE,GAAG8E,EAAEslE,WAAWpqE,IAAIpB,KAAKyrE,iBAAiB,SAASrqE,GAAG,IAAIkmB,EAAElmB,EAAE+yB,QAAQ/yB,EAAEsqE,YAAYz9D,EAAE7M,EAAEuqE,eAAevqE,EAAEwqE,UAAU5vE,EAAEkoE,kBAAkB58C,EAAEphB,EAAEq0D,YAAYv+D,EAAEkoE,kBAAkBj2D,EAAE/H,EAAEq0D,WAAWr0D,EAAE2lE,UAAUzqE,IAAIpB,KAAKm5D,iBAAiBl2D,OAAO,WAAWjD,KAAKyrE,kBAAkBzrE,KAAKm5D,iBAAiBn5D,KAAKqrE,mBAAmB,YAAYrrE,KAAKurE,mBAAmBvrE,KAAK8rE,iBAAiB9rE,KAAK8rE,eAAe,SAAS1qE,GAAG8E,EAAEuK,UAAUzQ,KAAKm5D,iBAAiBl2D,OAAO,SAASjD,KAAK8rE,kBAAkBnE,EAAEn1B,UAAU44B,gBAAgB,WAAWprE,KAAKu6D,SAAS50D,MAAMsO,MAAMjU,KAAK4oD,OAAO,KAAK5oD,KAAKu6D,SAAS50D,MAAMC,OAAO5F,KAAK2oD,QAAQ,KAAK,IAAIvnD,EAAEpB,KAAKqsD,iBAAiB,cAAcnmD,EAAE9E,GAAGpF,EAAEqnE,qBAAqBrjE,KAAKgpD,aAAahpD,KAAKy4D,QAAQxkD,MAAMjU,KAAK4oD,OAAO1iD,EAAElG,KAAKy4D,QAAQ7yD,OAAO5F,KAAK2oD,QAAQziD,EAAElG,KAAKy4D,QAAQ9yD,MAAMsO,MAAMjU,KAAK4oD,OAAO,KAAK5oD,KAAKy4D,QAAQ9yD,MAAMC,OAAO5F,KAAK2oD,QAAQ,KAAK,IAAIziD,GAAGlG,KAAKgpD,YAAY9nB,MAAMh7B,EAAEA,GAAG,IAAIohB,EAAElmB,GAAGpF,EAAEqnE,qBAAqBrjE,KAAKopD,aAAappD,KAAKkrE,QAAQj3D,MAAMjU,KAAK4oD,OAAOthC,EAAEtnB,KAAKkrE,QAAQtlE,OAAO5F,KAAK2oD,QAAQrhC,EAAEtnB,KAAKkrE,QAAQvlE,MAAMsO,MAAMjU,KAAK4oD,OAAO,KAAK5oD,KAAKkrE,QAAQvlE,MAAMC,OAAO5F,KAAK2oD,QAAQ,KAAK,IAAIrhC,GAAGtnB,KAAKopD,YAAYloB,MAAM5Z,EAAEA,IAAIqgD,EAAEn1B,UAAUzR,QAAQ,WAAW/gC,KAAKgpD,YAAYsB,UAAUtqD,KAAKopD,YAAYkB,UAAU,IAAI,IAAIlpD,EAAEpB,KAAKwpE,SAAS/8D,OAAO,EAAErL,GAAG,EAAEA,IAAI,CAAC,IAAI8E,EAAElG,KAAKwpE,SAAShlB,MAAMt+C,EAAEwjE,OAAO3oC,SAAS76B,EAAEwjE,OAAO3oC,UAAU/gC,KAAK+rE,uBAAuB/vE,EAAEo9D,YAAYn2D,OAAO,WAAWjD,KAAKyrE,kBAAkBzvE,EAAEo9D,YAAYp5D,KAAKqrE,mBAAmB,YAAYrrE,KAAKurE,mBAAmBvvE,EAAEo9D,YAAYn2D,OAAO,SAASjD,KAAK8rE,gBAAgB9rE,KAAK8rE,eAAe,KAAK,SAAS1qE,EAAE8E,GAAG,KAAKA,EAAE8lE,iBAAiB5qE,EAAE8E,EAAE+lE,YAAY/lE,EAAEyiB,YAAYziB,EAAE+lE,YAAtE,CAAmFjsE,KAAKyoE,UAAU,IAAInhD,EAAE,SAASlmB,GAAG,IAAI,IAAI8E,KAAK9E,EAAE,iBAAiBA,EAAE8E,KAAK9E,EAAE8E,GAAG,OAAOohB,EAAEtnB,KAAKmzD,SAAS7rC,EAAEtnB,KAAKs0D,UAAUhtC,EAAEtnB,OAAO2nE,EAAEn1B,UAAU24B,qBAAqB,SAAS/pE,GAAG,IAAI8E,EAAElK,EAAEonE,eAAe,OAAOl9D,EAAEP,MAAMk/B,SAAS,WAAW3+B,EAAEP,MAAMynB,IAAIhsB,EAAEuE,MAAMynB,IAAIlnB,EAAEP,MAAMyK,KAAKhP,EAAEuE,MAAMyK,KAC9u+BlK,EAAE+N,MAAMjU,KAAK4oD,OAAO1iD,EAAEN,OAAO5F,KAAK2oD,QAAQziD,EAAEP,MAAMsO,MAAMjU,KAAK4oD,OAAO,KAAK1iD,EAAEP,MAAMC,OAAO5F,KAAK2oD,QAAQ,KAAKziD,GAAGyhE,EAAEn1B,UAAU84B,yBAAyB,WAAW,OAAOtrE,KAAKy4D,SAASkP,EAAEn1B,UAAU05B,WAAW,WAAW,IAAI9qE,EAAEpB,KAAK+sD,YAAY7mD,EAAE9E,EAAEqL,OAAO,EAAEzM,KAAKmsE,QAAQ,GAAGnsE,KAAKqrD,WAAW,GAAG,IAAI,IAAI/jC,EAAEtnB,KAAKqsD,iBAAiB,oBAAoB,EAAEp+C,EAAEjO,KAAKqsD,iBAAiB,eAAe,GAAGplC,EAAE5qB,KAAK2iE,KAAK94D,EAAE,GAAGuhC,EAAEznC,KAAK4iC,UAAU,UAAU2P,EAAEvyC,KAAKgtD,aAAa7jD,EAAE,EAAEA,EAAEjD,EAAEiD,IAAI,GAAGopC,EAAEppC,GAAG,CAAC,IAAI07C,EAAEzjD,EAAE+H,EAAE,GAAG4nC,EAAE/wC,KAAKk1D,YAAY8G,aAAa,QAAQnX,GAAG,IAAI9T,EAAE,GAAGtJ,EAAEsJ,EAAEtJ,EAAEt+B,EAAEs+B,EAAEh7B,YAAY,CAAC,IAAuCob,EAAE,GAAnC1e,EAAE,EAAE8d,GAAG9d,EAAE,GAAG,EAAE9M,KAAK2iE,MAAM71D,EAAE,GAAG,KAAU,EAAEjD,GAAG6qC,EAAE/0C,EAAEmmE,SAASt6C,EAAEP,EAAErZ,GAAGjO,KAAKmsE,QAAQr/C,KAAKikB,GAAG/wC,KAAKqrD,WAAWxG,GAAG9T,IAAI42B,EAAEn1B,UAAUya,UAAU,WAAW,OAAOjtD,KAAKmsE,SAASxE,EAAEn1B,UAAU45B,uBAAuB,SAAShrE,GAAG,IAAI,IAAI8E,GAAG,EAAEohB,EAAEtnB,KAAK+sD,YAAY9+C,EAAE,EAAEA,EAAEqZ,EAAE7a,OAAOwB,IAAI,GAAGqZ,EAAErZ,IAAI7M,EAAE,CAAC8E,EAAE+H,EAAE,MAAM,OAAO,GAAG/H,EAAE,KAAK,CAACgb,KAAK9f,EAAE2oC,OAAO7jC,EAAEmwB,QAAQr2B,KAAKgtD,aAAa9mD,EAAE,GAAGumC,MAAMzsC,KAAKqrD,WAAWjqD,GAAGmqD,KAAK,EAAEvrD,KAAKk1D,YAAYoJ,cAAcl9D,KAAKumE,EAAEn1B,UAAU65B,qBAAqB,WAAW,IAAIjrE,EAAEpB,KAAKkG,EAAElG,KAAKssE,QAAQpmE,IAAIlG,KAAKssE,QAAQpmE,EAAEE,SAASkiB,cAAc,SAASpiB,EAAElE,KAAK,OAAOkE,EAAEP,MAAM80B,QAAQ,OAAOv0B,EAAEpE,UAAU,iBAAiB9B,KAAKu6D,SAAS9xC,YAAYviB,IAAI,IAAIohB,EAAEtnB,KAAK8pD,iBAAiB,cAAc,QAAQ,OAAO77C,EAAEjO,KAAKg3D,UAAU/vC,EAAE,CAACmG,IAAInf,EAAE+Y,EAAE/Y,EAAE8iC,EAAE,GAAG,KAAK3gC,KAAKnC,EAAEjS,EAAE,EAAE,KAAKy+B,QAAQnT,GAAGphB,EAAE67B,KAAK,IAAI77B,EAAElH,MAAMgB,KAAK2oE,YAAY3sE,EAAE6rC,OAAO3hC,EAAEP,MAAMshB,GAAG/gB,EAAEqmE,SAAS,WAAW,OAAOnrE,EAAEorE,WAAWtmE,EAAElH,SAAS2oE,EAAEn1B,UAAUs3B,qBAAqB,WAAW,IAAI1oE,EAAE,CAACy0D,WAAU,EAAG9B,WAAU,EAAGiB,SAAQ,EAAGvB,WAAW,KAAKC,WAAW,KAAKL,SAAS,KAAKE,SAAS,KAAKyC,cAAc,KAAKK,SAAS,KAAKC,SAAS,KAAKF,kBAAkB,KAAKc,oBAAmB,EAAGhD,oBAAoB,KAAKG,eAAe,KAAKD,UAAU,KAAKmO,GAAG,EAAEC,GAAG,EAAE9N,aAAa,KAAKK,cAAc,KAAK0X,KAAK,IAAIpf,EAAEz6B,QAAQkmC,oBAAoB,SAAS13D,EAAE8E,EAAEohB,GAAGlmB,EAAE+D,eAAe/D,EAAE+D,kBAAkB/D,EAAE8gE,aAAY,EAAG9gE,EAAE4gE,cAAa,GAAI,IAAI/zD,EAAEjS,EAAEw8D,QAAQtyD,EAAEuyD,SAASnxC,EAAEi7C,GAAGt0D,EAAEjS,EAAEsrB,EAAEk7C,GAAGv0D,EAAE+Y,EAAEM,EAAEmsC,WAAWz3D,EAAEs3D,UAAUlyD,EAAEkmB,GAAGA,EAAEosC,WAAW13D,EAAEw3D,UAAUpyD,EAAEkmB,GAAGA,EAAE4vC,oBAAmB,EAAG5vC,EAAEmlD,KAAKC,SAAS3rC,QAAQ,WAAW,IAAI3/B,EAAEpB,KAAK,IAAIoB,EAAEy0D,WAAWz0D,EAAE2yD,aAAa3yD,EAAEy0D,WAAU,EAAGz0D,EAAEqyD,WAAW,KAAKryD,EAAEsyD,WAAW,MAAMtyD,EAAE2yD,UAAU,CAAC3yD,EAAE2yD,WAAU,EAAG3yD,EAAEurE,aAAa,KAAKvrE,EAAEgzD,UAAU,KAAK,IAAI,IAAIluD,EAAE,EAAEA,EAAEohB,EAAEqtC,MAAMloD,OAAOvG,WAAWohB,EAAEqtC,MAAMzuD,GAAG0mE,qBAAqBtlD,EAAEqtC,MAAMzuD,GAAGmvD,eAAej0D,EAAEqrE,KAAKI,YAAY3mE,EAAElG,KAAK4iC,UAAU,oBAAoBtb,EAAEtnB,KAAK,IAAI,IAAIiO,KAAK/H,EAAEA,EAAE2mB,eAAe5e,IAAIjO,KAAKm5D,iBAAiBn5D,KAAKqrE,mBAAmBp9D,EAAE,SAAS/H,GAAG,OAAO,SAAS+H,GAAG/H,EAAE+H,EAAEqZ,EAAElmB,IAArC,CAA0C8E,EAAE+H,KAAS/H,EAAEmzD,0BAAyDr5D,KAAKm5D,iBAAiB/yD,SAAS,WAAxD,SAASF,GAAG9E,EAAE2/B,cAAyD4mC,EAAEn1B,UAAU2jB,cAAc,SAAS/0D,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,GAAG,IAAI07C,EAAE7kD,KAAKgpD,YAAYvhB,GAAGzrC,EAAEk6D,WAAWrR,EAAEyE,UAAUjtD,KAAK8D,IAAI+F,EAAEqsC,GAAGvyC,KAAKmzD,QAAQpK,cAAc/hC,EAAE3qB,KAAKC,IAAI4J,EAAEqsC,GAAGvyC,KAAKmzD,QAAQpK,cAAchY,GAAGtJ,GAAGzrC,EAAEi6D,UAAUpR,EAAEyE,UAAUtpD,KAAKmzD,QAAQpK,cAAc/sD,EAAEK,KAAK8D,IAAI8N,EAAE9E,GAAGnJ,KAAKmzD,QAAQpK,cAAcjY,EAAEz0C,KAAKC,IAAI2R,EAAE9E,IAAI/H,GAAGpF,EAAEk6D,WAAW5uC,GAAGphB,IAAI2+C,EAAE4H,UAAU,yBAAyB5H,EAAEioB,SAASzwE,KAAK8D,IAAI+F,EAAEohB,GAAGtnB,KAAKmzD,QAAQpK,cAAc/hC,EAAE3qB,KAAKC,IAAIgrB,EAAEphB,GAAGlG,KAAKmzD,QAAQpK,cAAchY,IAAI3vC,GAAGpF,EAAEi6D,UAAUhvC,GAAGhZ,IAAI42C,EAAE4H,UAAU,yBAAyB5H,EAAEioB,SAAS9sE,KAAKmzD,QAAQpK,cAAc/sD,EAAEK,KAAK8D,IAAI8N,EAAEgZ,GAAGjnB,KAAKmzD,QAAQpK,cAAcjY,EAAEz0C,KAAKC,IAAI2qB,EAAEhZ,MAAM05D,EAAEn1B,UAAUukB,eAAe,WAAW/2D,KAAK+sE,qBAAqB,KAAK/sE,KAAKgpD,YAAYM,UAAU,EAAE,EAAEtpD,KAAK4oD,OAAO5oD,KAAK2oD,UAAUgf,EAAEn1B,UAAUykB,SAAS,SAAS71D,EAAE8E,GAAGlG,KAAK+sE,qBAAqB,KAAK,IAAIzlD,EAAEtnB,KAAKy0D,aAAarzD,GAAG6M,EAAEjO,KAAKy0D,aAAavuD,GAAGlG,KAAKgtE,cAAc1lD,EAAErZ,IAAI05D,EAAEn1B,UAAUw6B,cAAc,SAAS5rE,EAAE8E,GAAG,IAAIohB,EAAEtnB,KAAKiO,EAAEjO,KAAKg0D,aAAa/sC,EAAE,CAAC7lB,EAAE8E,GAAGuhC,EAAEznC,KAAKu2D,kBAAkB,gBAAgBv2D,KAAKitE,eAAeh/D,EAAEgZ,EAAE,KAAK,MAAK,WAAWwgB,GAAGA,EAAE6K,KAAKhrB,EAAElmB,EAAE8E,EAAEohB,EAAEywC,mBAAkB4P,EAAEn1B,UAAU2kB,SAAS,SAAS/1D,EAAE8E,GAAG,IAAIohB,EAAEtnB,KAAKA,KAAK+sE,qBAAqB,KAAK,IAAI,IAAI9+D,EAAEjO,KAAK+3D,cAAc9wC,EAAE,GAAGwgB,EAAE,EAAEA,EAAEznC,KAAK20D,MAAMloD,OAAOg7B,IAAI,CAAC,IAAI8K,EAAEvyC,KAAK80D,aAAa1zD,EAAEqmC,GAAGod,EAAE7kD,KAAK80D,aAAa5uD,EAAEuhC,GAAGxgB,EAAE6F,KAAK,CAAC+3B,EAAEtS,IAAI,IAAIxB,EAAE/wC,KAAKu2D,kBAAkB,gBAAgBv2D,KAAKitE,eAAe,KAAK,KAAKh/D,EAAEgZ,GAAE,WAAW,GAAG8pB,EAAE,CAAC,IAAI3vC,EAAEkmB,EAAE0sC,aAAa9tD,EAAEiD,EAAE/H,EAAE,GAAG6M,EAAE/H,EAAE,GAAG+gB,EAAE/gB,EAAE,GAAG6qC,EAAEuB,KAAKhrB,EAAErZ,EAAEgZ,EAAEK,EAAEywC,oBAAmB4P,EAAEuF,sBAAsB,SAAS9rE,EAAE8E,GAAG,OAAO,EAAE7J,KAAK2qD,IAAI,KAAK5lD,KAAK,EAAE/E,KAAK2qD,IAAI,KAAK9gD,KAAKyhE,EAAEn1B,UAAU+lB,UAAU,WAAW,IAAIn3D,EAAEpB,KAAKkG,EAAElG,KAAKkqE,SAAS,KAAK5iD,EAAEtnB,KAAKkqE,SAAS,KAAKj8D,EAAE/H,GAAGohB,EAAE,GAAGtnB,KAAKmtE,iBAAiBl/D,EAAE,CAAC,IAAIgZ,EAAEjnB,KAAKu0D,gBAAgB9sB,EAAEt+B,EAAE8d,EAAE,GAAGsrB,EAAE9K,EAAE,GAAGod,EAAEpd,EAAE,GAAGsJ,EAAE/wC,KAAK8pD,iBAAiB,iBAAiB9E,EAAEhlD,KAAKu2D,kBAAkB,gBAAgB,IAAIxlB,EAAE,OAAO/wC,KAAKu1D,YAAY,KAAKv1D,KAAK20D,MAAM3zD,SAAQ,SAASI,GAAGA,EAAE2tB,mBAAmB3tB,EAAE2tB,cAAa/uB,KAAKy1D,kBAAkBzQ,GAAGA,EAAE1S,KAAKtyC,KAAKuyC,EAAEsS,EAAE7kD,KAAK+3D,gBAAgB,IAAIlwC,EAAE,KAAKF,EAAE,KAAKxD,EAAE,KAAKghC,EAAE,KAAKj/C,IAAI2hB,EAAE7nB,KAAKg0D,aAAarsC,EAAE,CAAC4qB,EAAEsS,IAAIv9B,IAAInD,EAAEnkB,KAAK+3D,cAAc5S,EAAEnlD,KAAKwqE,iBAAiBxqE,KAAKitE,eAAeplD,EAAEF,EAAExD,EAAEghC,GAAE,WAAW/jD,EAAEm0D,YAAY,KAAKn0D,EAAEuzD,MAAM3zD,SAAQ,SAASI,GAAGA,EAAE2tB,mBAAmB3tB,EAAE2tB,cAAai2B,GAAGA,EAAE1S,KAAKlxC,EAAEmxC,EAAEsS,EAAEzjD,EAAE22D,oBAAmB4P,EAAEn1B,UAAUy6B,eAAe,SAAS7rE,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,GAAG,IAAIwgB,EAAE8K,EAAEppC,EAAEnJ,KAAK6kD,EAAE7kD,KAAK8pD,iBAAiB,iBAAiB6d,EAAEO,gBAAgB,EAAEn3B,EAAE,GAAGiU,EAAE,GAAG,GAAG,OAAO5jD,GAAG,OAAO8E,EAAE,IAAIuhC,EAAE,EAAEA,GAAGod,EAAEpd,IAAI8K,EAAEo1B,EAAEuF,sBAAsBzlC,EAAEod,GAAG9T,EAAEtJ,EAAE,GAAG,CAACrmC,EAAE,IAAI,EAAEmxC,GAAGA,EAAErsC,EAAE,GAAG9E,EAAE,IAAI,EAAEmxC,GAAGA,EAAErsC,EAAE,IAAI,GAAG,OAAOohB,GAAG,OAAOrZ,EAAE,IAAIw5B,EAAE,EAAEA,GAAGod,EAAEpd,IAAI,CAAC8K,EAAEo1B,EAAEuF,sBAAsBzlC,EAAEod,GAAG,IAAI,IAAIh9B,EAAE,GAAGF,EAAE,EAAEA,EAAE3nB,KAAK20D,MAAMloD,OAAOkb,IAAIE,EAAEiF,KAAK,CAACxF,EAAEK,GAAG,IAAI,EAAE4qB,GAAGA,EAAEtkC,EAAE0Z,GAAG,GAAGL,EAAEK,GAAG,IAAI,EAAE4qB,GAAGA,EAAEtkC,EAAE0Z,GAAG,KAAKq9B,EAAEvd,EAAE,GAAG5f,EAAE7rB,EAAE6nE,kBAAiB,SAASziE,GAAG,GAAG4jD,EAAEv4C,OAAO,IAAI,IAAIvG,EAAE,EAAEA,EAAEiD,EAAEwrD,MAAMloD,OAAOvG,IAAI,CAAC,IAAIohB,EAAE09B,EAAE5jD,GAAG8E,GAAGiD,EAAEwrD,MAAMzuD,GAAG6oB,WAAW,CAACzH,EAAE,GAAGA,EAAE,IAAIypB,EAAEtkC,SAAStD,EAAEosD,YAAYxkB,EAAE3vC,IAAI+H,EAAEssD,eAAc5Q,EAAE8iB,EAAEQ,mBAAmBtjB,EAAE59B,IAAI0gD,EAAEn1B,UAAUwkB,QAAQ,WAAW,OAAOh3D,KAAKs0D,SAASxL,MAAM6e,EAAEn1B,UAAU46B,iBAAiB,SAAShsE,GAAG,GAAGA,EAAEisE,SAASjsE,EAAEksE,QAAQ,MAAM,CAAClsE,EAAEisE,QAAQjsE,EAAEksE,SAAS,IAAIpnE,EAAElK,EAAEw8D,QAAQx4D,KAAKqrE,oBAAoB,MAAM,CAACrvE,EAAEs7D,MAAMl2D,GAAG8E,EAAElK,EAAEA,EAAEu7D,MAAMn2D,GAAG8E,EAAE8gB,IAAI2gD,EAAEn1B,UAAU+6B,eAAe,SAASnsE,GAAG,IAAI,IAAI8E,EAAE,IAAIohB,GAAG,EAAErZ,EAAEjO,KAAKmzD,QAAQz9C,OAAOuR,EAAE,EAAEA,EAAEhZ,EAAExB,OAAOwa,IAAI,IAAI,IAAIwgB,EAAEx5B,EAAEgZ,GAAGsrB,EAAE9K,EAAEh7B,OAAOtD,EAAE,EAAEA,EAAEopC,EAAEppC,IAAI,CAAC,IAAI07C,EAAEpd,EAAEt+B,GAAG,GAAGnN,EAAEymE,aAAa5d,GAAE,GAAI,CAAC,IAAI9T,EAAE10C,KAAKC,IAAIuoD,EAAEsD,QAAQ/mD,GAAG2vC,EAAE7qC,IAAIA,EAAE6qC,EAAEzpB,EAAEu9B,EAAEl4B,MAAM,OAAOrF,GAAGqgD,EAAEn1B,UAAUg7B,iBAAiB,SAASpsE,EAAE8E,GAAG,IAAI,IAAIohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAE9T,EAAE,IAAIiU,EAAEhlD,KAAKmzD,QAAQz9C,OAAOjJ,OAAO,EAAEu4C,GAAG,IAAIA,EAAE,IAAI,IAAIn9B,EAAE7nB,KAAKmzD,QAAQz9C,OAAOsvC,GAAGr9B,EAAE,EAAEA,EAAEE,EAAEpb,SAASkb,EAAE8f,EAAE5f,EAAEF,GAAG3rB,EAAEymE,aAAah7B,KAAkCngB,GAA7BrZ,EAAEw5B,EAAE0gB,QAAQ/mD,GAAqB6M,GAAnBgZ,EAAEwgB,EAAE2gB,QAAQliD,GAAW+gB,GAAG8pB,IAAIA,EAAEzpB,EAAEirB,EAAE9K,EAAEt+B,EAAE67C,EAAEH,EAAEpd,EAAE9a,KAAM,MAAM,CAACsI,IAAI4vB,EAAE4oB,WAAWztE,KAAKmzD,QAAQ/H,SAASjiD,GAAGwtD,MAAMpkB,IAAIo1B,EAAEn1B,UAAUk7B,iBAAiB,SAAStsE,EAAE8E,GAAG,IAAI,IAAIohB,EAAErZ,EAAEgZ,EAAEjnB,KAAKutE,eAAensE,GAAGqmC,EAAE,EAAEA,EAAEznC,KAAKmzD,QAAQz9C,OAAOjJ,SAASg7B,EAAE,CAAC,IAA+Bt+B,EAAE8d,EAA3BjnB,KAAK2tE,iBAAiBlmC,GAASod,EAAE7kD,KAAKmzD,QAAQz9C,OAAO+xB,GAAG,KAAKt+B,GAAG07C,EAAEp4C,QAAQ,CAAC,IAAIskC,EAAE8T,EAAE17C,GAAG,GAAGnN,EAAEymE,aAAa1xB,GAAG,CAAC,IAAIiU,EAAEjU,EAAEqX,QAAQ,GAAGhnD,EAAE2vC,EAAEoX,SAASh/C,EAAE,EAAE07C,EAAEp4C,OAAO,CAAC,IAAIob,EAAEg9B,EAAE17C,EAAE,GAAMnN,EAAEymE,aAAa56C,KAAQF,EAAEE,EAAEsgC,QAAQpX,EAAEoX,SAAa,IAAyBnD,IAAf5jD,EAAE2vC,EAAEoX,SAASxgC,GAAQE,EAAEugC,QAAQrX,EAAEqX,eAAgB,GAAGhnD,EAAE2vC,EAAEoX,SAASh/C,EAAE,EAAE,CAAC,IAAuCwe,EAAnCw9B,EAAEN,EAAE17C,EAAE,GAAMnN,EAAEymE,aAAatd,KAAQx9B,EAAEopB,EAAEoX,QAAQhD,EAAEgD,SAAa,IAAyBnD,IAAfjU,EAAEoX,QAAQ/mD,GAAGumB,GAAQw9B,EAAEiD,QAAQrX,EAAEqX,WAAY,IAAI3gB,GAAGud,EAAE9+C,KAAKohB,EAAEypB,EAAE9iC,EAAEw5B,KAAK,MAAM,CAACxS,IAAIhO,EAAEwmD,WAAWztE,KAAKmzD,QAAQ/H,SAASn9C,GAAG0oD,MAAMrvC,IAAIqgD,EAAEn1B,UAAUg5B,WAAW,SAASpqE,GAAG,IAAI8E,EAAElG,KAAKmzD,QAAQz9C,OAAO,QAAG,IAASxP,GAAG,OAAOA,EAAE,CAAC,IAAIohB,EAAEtnB,KAAKotE,iBAAiBhsE,GAAG6M,EAAEqZ,EAAE,GAAGL,EAAEK,EAAE,GAA2CirB,GAAE,EAAG,GAA3CvyC,KAAK4iC,UAAU,yBAAmC5iC,KAAK4tE,iBAAiB,CAAC,IAAIzkE,EAAEA,EAAEnJ,KAAK8pD,iBAAiB,gBAAgB9pD,KAAK0tE,iBAAiBz/D,EAAEgZ,GAAGjnB,KAAKwtE,iBAAiBv/D,EAAEgZ,GAAGsrB,EAAEvyC,KAAKizD,aAAa9pD,EAAE8rB,IAAI9rB,EAAEskE,gBAAgB,CAAC,IAAI5oB,EAAE7kD,KAAKutE,eAAet/D,GAAGskC,EAAEvyC,KAAKizD,aAAapO,GAAG,IAAI9T,EAAE/wC,KAAKu2D,kBAAkB,qBAAqBxlB,GAAGwB,GAAGxB,EAAEuB,KAAKtyC,KAAKoB,EAAEpB,KAAK2zD,OAAO3zD,KAAKy2D,WAAWz2D,KAAK6tE,SAAS7tE,KAAK8tE,iBAAiBnG,EAAEn1B,UAAUm7B,iBAAiB,SAASvsE,GAAG,GAAGpB,KAAKkpE,aAAa9nE,GAAG,OAAOpB,KAAKkpE,aAAa9nE,GAAG,GAAG,IAAI,IAAI8E,EAAE,EAAEA,EAAElG,KAAKkpE,aAAaz8D,OAAOvG,IAAI,QAAG,IAASlG,KAAKkpE,aAAahjE,GAAG,OAAOlG,KAAKkpE,aAAahjE,GAAG,GAAG,OAAO,GAAGyhE,EAAEn1B,UAAUu7B,kBAAkB,SAAS3sE,QAAG,IAASpB,KAAKguE,YAAYhuE,KAAKguE,UAAU,QAAG,IAAShuE,KAAKiuE,YAAYjuE,KAAKiuE,UAAU,GAAG,IAAI/nE,EAAElG,KAAKguE,UAAU1mD,EAAElmB,EAAE,EAAE8E,EAAE,GAAGA,EAAE,GAAGohB,GAAG,EAActnB,KAAKguE,WAAWhuE,KAAKkuE,iBAAiB,OAA1D,CAA8D,IAAIjgE,IAAIjO,KAAKiuE,UAAUhnD,EAAEjnB,KAA4EhE,EAAE6nE,kBAAiB,SAAS39D,GAAG+gB,EAAEgnD,WAAWhgE,IAAIgZ,EAAE+mD,WAAW5sE,EAAE,IAAI6lB,EAAE+mD,UAAU/mD,EAAEkmD,iBAAiBlmD,EAAEinD,iBAAiBjnD,EAAE+mD,UAAU,OAAM1mD,EAAE,IAA9M,WAAW,IAAIL,EAAE+mD,WAAW5sE,EAAE,IAAI6lB,EAAE+mD,UAAU,EAAE/mD,EAAEkmD,uBAAmKxF,EAAEn1B,UAAU07B,iBAAiB,SAAS9sE,GAAGpB,KAAK42D,eAAe,SAAS,CAACuX,aAAa,IAAInuE,KAAK6tE,cAAS,EAAO7tE,KAAK6tE,SAASO,WAAW,IAAIpuE,KAAK2zD,YAAO,EAAO3zD,KAAK2zD,OAAO0a,eAAeruE,KAAKy2D,aAAa,IAAIvwD,EAAEohB,EAAEtnB,KAAKgpD,YAAY,GAAGhpD,KAAK4iC,UAAU,uBAAuB,CAACtb,EAAEgiC,UAAU,EAAE,EAAEtpD,KAAK4oD,OAAO5oD,KAAK2oD,SAAS,IAAI16C,EAAE,EAAEjO,KAAKqsD,iBAAiB,kCAAkCplC,EAAEjrB,EAAEwwD,OAAOxsD,KAAK4iC,UAAU,mCAAmC,GAAG30B,EAAE,CAAC,QAAG,IAAS7M,EAAE,YAAYpB,KAAK+tE,kBAAkB,GAAG9/D,GAAG7M,EAAEkmB,EAAEmlC,UAAU,QAAQxlC,EAAEwgB,EAAE,IAAIxgB,EAAEk+B,EAAE,IAAIl+B,EAAEM,EAAE,IAAItZ,EAAE,IAAIqZ,EAAEwlD,SAAS,EAAE,EAAE9sE,KAAK4oD,OAAO5oD,KAAK2oD,SAAS3oD,KAAKs0D,SAAS7K,iBAAiBzpD,KAAK8tE,cAAcxmD,QAAQ,GAAGtnB,KAAK6oE,oBAAoB,EAAE,CAAC,IAAIphC,EAAE,EAAE8K,EAAEvyC,KAAKmqE,MAAM,UAAU,IAAIjkE,EAAE,EAAEA,EAAEqsC,EAAE9lC,OAAOvG,IAAI,CAAC,IAAIiD,EAAEnJ,KAAKqsD,iBAAiB,sBAAsB9Z,EAAErsC,IAAIiD,EAAEs+B,IAAIA,EAAEt+B,GAAG,IAAI07C,EAAE7kD,KAAK6oE,mBAAmBvhD,EAAEgiC,UAAUzE,EAAEpd,EAAE,EAAE,EAAE,EAAEA,EAAE,EAAEznC,KAAK2oD,SAAS,GAAG3oD,KAAKy2D,WAAWhqD,OAAO,EAAE,CAAC,IAAIskC,EAAE/wC,KAAKy2D,WAAW,GAAGtO,QAAQ,IAAI7gC,EAAE+T,OAAOn1B,EAAE,EAAEA,EAAElG,KAAKy2D,WAAWhqD,OAAOvG,IAAI,CAAC,IAAI8+C,EAAEhlD,KAAKy2D,WAAWvwD,GAAG,IAAI+pC,MAAM+U,EAAEoD,SAAS,CAAC,IAAIvgC,EAAE7nB,KAAKqsD,iBAAiB,sBAAsBrH,EAAE9jC,MAAMyG,EAAE3nB,KAAKu2D,kBAAkB,6BAA6BvR,EAAE9jC,MAAMiD,EAAEnkB,KAAKs0D,SAASjnD,OAAO23C,EAAE9jC,MAAMyG,IAAIA,EAAE3rB,EAAEswD,QAAQC,SAASjlC,EAAEkjC,UAAUxqD,KAAKqsD,iBAAiB,cAAcrH,EAAE9jC,MAAMoG,EAAEijC,YAAYpmC,EAAEmD,EAAEmlC,UAAUtoC,EAAEwD,EAAE2qB,KAAKtyC,KAAKA,KAAKglD,EAAE9jC,KAAKoG,EAAEypB,EAAEiU,EAAEoD,QAAQjkC,EAAE0D,EAAEm9B,EAAEr4B,MAAMrF,EAAEgjC,UAAUtqD,KAAK6oE,mBAAmB93B,IAAI42B,EAAEn1B,UAAUygB,aAAa,SAAS7xD,EAAE8E,EAAEohB,GAAGtnB,KAAKy2D,WAAW,GAAG,IAAIxoD,GAAE,EAAG,IAAG,IAAK7M,GAAGA,GAAG,EAAE,CAACA,GAAGpB,KAAK6tE,WAAW5/D,GAAE,GAAIjO,KAAK6tE,SAASzsE,EAAE,IAAI,IAAI6lB,EAAE,EAAEA,EAAEjnB,KAAKmzD,QAAQz9C,OAAOjJ,SAASwa,EAAE,CAAC,IAAIwgB,EAAEznC,KAAKmzD,QAAQz9C,OAAOuR,GAAGsrB,EAAEnxC,EAAEpB,KAAK2tE,iBAAiB1mD,GAAG,GAAGsrB,GAAG,GAAGA,EAAE9K,EAAEh7B,QAAQg7B,EAAE8K,GAAG5lB,KAAKvrB,EAAc,QAAP+H,EAAEs+B,EAAE8K,IAAY2V,MAAMloD,KAAKy2D,WAAW3pC,KAAK3jB,QAAQ,IAAI,IAAI07C,EAAE,EAAEA,EAAEpd,EAAEh7B,SAASo4C,EAAE,CAAC,IAAI17C,EAAO,IAAPA,EAAEs+B,EAAEod,IAAQl4B,KAAKvrB,EAAE,CAAC,OAAO+H,EAAE++C,MAAMloD,KAAKy2D,WAAW3pC,KAAK3jB,GAAG,cAAcnJ,KAAK6tE,UAAU,IAAI5/D,GAAE,GAAIjO,KAAK6tE,UAAU,EAAE,OAAO7tE,KAAKy2D,WAAWhqD,OAAOzM,KAAK2zD,OAAO3zD,KAAKy2D,WAAW,GAAGxO,KAAKjoD,KAAK2zD,QAAQ,OAAE,IAASztD,IAAIlG,KAAK8tE,gBAAgB5nE,IAAI+H,GAAE,GAAIjO,KAAK8tE,cAAc5nE,QAAG,IAASohB,IAAItnB,KAAKsuE,WAAWhnD,GAAGrZ,GAAGjO,KAAKkuE,sBAAiB,GAAQjgE,GAAG05D,EAAEn1B,UAAUq5B,UAAU,SAASzqE,GAAGpB,KAAKu2D,kBAAkB,wBAAwBv2D,KAAKu2D,kBAAkB,uBAAuBjkB,KAAKtyC,KAAKoB,GAAGpB,KAAK8pD,iBAAiB,2BAA2B9pD,KAAKsuE,YAAYtuE,KAAKmtE,kBAAkBxF,EAAEn1B,UAAU26B,eAAe,WAAcntE,KAAK42D,eAAe,WAAW,IAAI52D,KAAKsuE,YAAW,EAAGtuE,KAAKguE,UAAsBhuE,KAAK+tE,mBAAmB,IAAG/tE,KAAKgpD,YAAYM,UAAU,EAAE,EAAEtpD,KAAK4oD,OAAO5oD,KAAK2oD,SAAS3oD,KAAKguE,UAAU,EAAEhuE,KAAKy2D,WAAW,GAAGz2D,KAAK2zD,QAAQ,EAAE3zD,KAAK6tE,UAAU,EAAE7tE,KAAK8tE,cAAc,OAAMnG,EAAEn1B,UAAU0gB,aAAa,WAAW,IAAIlzD,KAAKy2D,YAAYz2D,KAAKy2D,WAAWhqD,OAAO,EAAE,OAAO,EAAE,IAAI,IAAIrL,EAAE,EAAEA,EAAEpB,KAAKmzD,QAAQz9C,OAAOjJ,OAAOrL,IAAI,IAAI,IAAI8E,EAAElG,KAAKmzD,QAAQz9C,OAAOtU,GAAGkmB,EAAE,EAAEA,EAAEphB,EAAEuG,OAAO6a,IAAI,GAAGphB,EAAEohB,GAAGtrB,GAAGgE,KAAKy2D,WAAW,GAAGz6D,EAAE,OAAOkK,EAAEohB,GAAGqF,IAAI,OAAO,GAAGg7C,EAAEn1B,UAAU4rB,mBAAmB,WAAW,OAAOp+D,KAAK8tE,eAAenG,EAAEn1B,UAAUo7B,eAAe,WAAW,OAAO5tE,KAAKsuE,YAAY3G,EAAEn1B,UAAU+7B,aAAa,SAASntE,GAAGpB,KAAKuqE,SAASvqE,KAAKwuE,UAAUptE,GAAGpB,KAAKyuE,6BAA6BzuE,KAAK0uE,YAAY/G,EAAEn1B,UAAUm8B,WAAW,WAAW,IAAIvtE,EAAEA,EAAEpB,KAAKu1D,YAAY,CAACv1D,KAAKu1D,YAAY,GAAGv1D,KAAKu1D,YAAY,IAAIv1D,KAAKu0D,gBAAgB,IAAIruD,EAAElG,KAAKqqE,oBAAoB,KAAK/iD,EAAEphB,EAAE,SAAFA,CAAY9E,EAAE,GAAGA,EAAE,GAAGpB,KAAKs0D,SAASxL,KAAKhY,EAAE5qC,EAAElG,MAAMA,KAAKmzD,QAAQ2H,UAAUxzC,IAAIqgD,EAAEn1B,UAAUo8B,iBAAiB,WAAW,OAAO5uE,KAAKmqE,MAAM,eAAenqE,KAAKmqE,MAAM,eAAenqE,KAAK8oE,WAAW9oE,KAAK8pD,iBAAiB,aAAaiE,EAAEn7B,QAAQg7B,EAAEh7B,QAAQ5yB,KAAK8pD,iBAAiB,cAAc4D,EAAE96B,QAAQ5yB,KAAK8pD,iBAAiB,aAAa3J,EAAEvtB,QAAQ26B,EAAE36B,SAAS+0C,EAAEn1B,UAAUk8B,SAAS,WAAW,IAAIttE,EAAE,IAAIujB,KAAK3kB,KAAKk8D,aAAa,IAAIl8D,KAAK4uE,oBAAoB5uE,KAAKmzD,QAAQkH,kBAAkBr6D,KAAK6uE,gBAAgB7uE,KAAKsoE,mBAAmBtoE,KAAKgpD,YAAYsB,UAAUtqD,KAAKopD,YAAYkB,WAAWtqD,KAAKgpD,YAAY3tB,OAAOr7B,KAAKopD,YAAY/tB,OAAOr7B,KAAKs0D,SAAS,IAAIzsC,EAAE+K,QAAQ5yB,KAAKA,KAAKkrE,QAAQlrE,KAAKopD,YAAYppD,KAAKmzD,SAASnzD,KAAKqsE,uBAAuBrsE,KAAK42D,eAAe,WAAW52D,KAAK0qE,cAAc,CAAC,MAAM,IAAI,IAAIxkE,EAAE,EAAEA,EAAElG,KAAK+qE,aAAa7kE,IAAI,CAAC,IAAIohB,EAAEtnB,KAAKk8D,aAAarV,cAAc7mD,KAAKuqE,SAASrkE,EAAElG,KAAKk1D,aAAal1D,KAAK2oE,YAAY,IAAIrhD,EAAEtnB,KAAKk8D,aAAapV,eAAex/B,EAAEtnB,KAAK2oE,YAAY3oE,KAAKk1D,cAAcl1D,KAAK0qE,cAAc59C,KAAKxF,GAAGtnB,KAAKy1D,aAAa,IAAIxnD,EAAE,IAAI0W,KAAK3kB,KAAK8uE,eAAe7gE,EAAE7M,GAAGumE,EAAEoH,eAAU,EAAOpH,EAAEqH,aAAa,SAAS5tE,EAAE8E,EAAEohB,EAAErZ,GAAG,IAAI,IAAIgZ,EAAE,KAAKwgB,EAAE,KAAK8K,EAAE,KAAKppC,GAAG,EAAE07C,EAAE,EAAEA,EAAEzjD,EAAEqL,SAASo4C,EAAE,CAAC,IAAI9T,EAAE3vC,EAAEyjD,GAAGG,EAAEjU,EAAEkX,UAAK,IAAS/hD,EAAE8+C,KAAK9+C,EAAE8+C,GAAG,GAAG,IAAIn9B,EAAEkpB,EAAEmX,KAAKjY,MAAMpoB,IAAI,OAAOA,EAAE,QAAQ5Z,EAAE4Z,EAAE,GAAI,SAAS3hB,GAAG,KAAKiD,GAAGjD,GAAG,IAAI,IAAIohB,EAAEphB,EAAEohB,EAAElmB,EAAEqL,SAAS6a,EAAE,GAAGirB,EAAE,MAAMtC,MAAM7uC,EAAEkmB,GAAG4gC,OAAO,OAAO9mD,EAAEkmB,GAAG4gC,KAAK,CAAC/+C,EAAEme,EAAEirB,EAAEnxC,EAAEkmB,GAAG,OAA5G,CAAoHu9B,GAAGh9B,EAAE4f,GAAG8K,GAAG,QAAQtkC,EAAEw5B,EAAEygB,MAAM3V,EAAE2V,KAAKzgB,EAAEygB,QAAQlD,EAAEvd,EAAEwgB,OAAO1V,EAAE0V,KAAKxgB,EAAEwgB,OAAOxgB,GAAG,OAAOx5B,EAAEw5B,EAAEygB,KAAK3V,GAAG,OAAOtkC,EAAEskC,EAAE2V,KAAK,GAAGzgB,EAAEsJ,EAAE,IAAIppB,EAAEzhB,EAAE8+C,GAAG/9B,GAAG+9B,IAAIr9B,GAAGE,EAAE3hB,EAAE8+C,GAAGr9B,GAAGV,EAAE+9B,EAAEjU,EAAEkrB,aAAat0C,EAAEA,EAAEL,EAAE,KAAKA,EAAE,GAAGK,GAAGA,EAAEL,EAAE,KAAKA,EAAE,GAAGK,KAAKggD,EAAEn1B,UAAUi4B,gBAAgB,SAASrpE,EAAE8E,GAAG,IAAIohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,EAAEppC,EAAE07C,EAAE,GAAG9T,EAAE,GAAGiU,EAAE,GAAGn9B,EAAE,GAAgB,IAAIP,EAAflmB,EAAEqL,OAAO,EAAU6a,GAAG,EAAEA,IAAI,GAAGtnB,KAAKgtD,aAAa1lC,EAAE,GAAG,CAAC,GAAGphB,EAAE,CAACiD,EAAE/H,EAAEkmB,GAAG,IAAInD,EAAEje,EAAE,GAAGi/C,EAAEj/C,EAAE,GAAG,IAAI+gB,EAAE,KAAKwgB,EAAE,KAAKx5B,EAAE,EAAEA,EAAE9E,EAAEsD,OAAOwB,IAAI9E,EAAE8E,GAAG,IAAIkW,GAAG,OAAO8C,IAAIA,EAAEhZ,GAAG9E,EAAE8E,GAAG,IAAIk3C,IAAI1d,EAAEx5B,GAAG,OAAOgZ,IAAIA,EAAE,GAAG,IAAI,IAAIm+B,EAAEn+B,EAAEi+B,GAAE,EAAGA,GAAGE,EAAE,GAAOF,EAAE,OAAO/7C,IAAbi8C,GAAkB,GAAG,OAAO3d,IAAIA,EAAEt+B,EAAEsD,OAAO,GAAG,IAAIm0C,EAAEnZ,EAAE,IAAIyd,GAAE,EAAGA,GAAGtE,EAAEz3C,EAAEsD,OAAO,GAAOy4C,EAAE,OAAO/7C,IAAby3C,GAAkB,GAAGwE,IAAIn+B,IAAIA,EAAEm+B,GAAGxE,IAAInZ,IAAIA,EAAEmZ,GAAGiE,EAAEv9B,EAAE,GAAG,CAACL,EAAEwgB,GAAGt+B,EAAEA,EAAEklB,MAAMpH,EAAEwgB,EAAE,QAAQt+B,EAAE/H,EAAEkmB,GAAGu9B,EAAEv9B,EAAE,GAAG,CAAC,EAAEne,EAAEsD,OAAO,GAAG,IAAIua,EAAEhnB,KAAKmqE,MAAM,UAAU7iD,GAAGtrB,EAAEgE,KAAKk8D,aAAazU,kBAAkBt+C,EAAEjD,EAAElG,KAAK8pD,iBAAiB,WAAW9iC,IAAII,EAAEpnB,KAAKk8D,aAAalU,eAAe7+C,EAAE6d,EAAE69B,EAAEv9B,EAAE,GAAG,IAAItnB,KAAK8pD,iBAAiB,uBAAsD,IAAS9E,EAA7CzS,EAAEvyC,KAAKk1D,YAAYoJ,cAAct3C,MAAmBg+B,EAAEzS,GAAG,IAAIo1B,EAAEqH,aAAa5nD,EAAE49B,EAAEzS,GAAGv2C,EAAEgE,KAAK8pD,iBAAiB,yBAAyBjiC,EAAEb,GAAGhrB,EAAE+0C,EAAEzpB,GAAGF,EAAE,MAAM,CAAC1R,OAAOq7B,EAAE45B,SAAS9iD,EAAEonD,YAAYpqB,IAAI8iB,EAAEn1B,UAAUijB,WAAW,WAAW,IAAIr0D,EAAE,IAAIujB,KAAKze,EAAElG,KAAKsoE,iBAAiBtoE,KAAKsoE,kBAAiB,EAAGtoE,KAAKmzD,QAAQ0J,oBAAoB78D,KAAKksE,aAAalsE,KAAKs9D,OAAOuJ,UAAU,GAAG7mE,KAAKqsD,iBAAiB,uBAAuB,IAAI/kC,EAAEtnB,KAAKyqE,gBAAgBzqE,KAAK0qE,cAAc1qE,KAAKu1D,aAAatnD,EAAEqZ,EAAE5R,OAAOuR,EAAEK,EAAEqjD,SAAS3qE,KAAKkpE,aAAa5hD,EAAE2nD,YAAYjvE,KAAKmpE,gBAAgB,GAAG,IAAI,IAAI1hC,EAAEznC,KAAKmqE,MAAM,UAAU53B,EAAE,EAAEppC,EAAE,EAAEA,EAAE8E,EAAExB,OAAOtD,IAAInJ,KAAKgtD,aAAa7jD,EAAE,KAAKnJ,KAAKmzD,QAAQgH,WAAW1yB,EAAEt+B,GAAG8E,EAAE9E,IAAInJ,KAAKopE,cAAcjgE,GAAGopC,KAAK,IAAQppC,EAAE,EAAEA,EAAEs+B,EAAEh7B,OAAOtD,IAAInJ,KAAKmpE,gBAAgB1hC,EAAEt+B,IAAIA,EAAE,GAAGnJ,KAAK4qE,oBAAoB3jD,GAAGjnB,KAAKmzD,QAAQ4H,SAAS/6D,KAAK20D,OAAO30D,KAAK2uE,aAAa3uE,KAAKmzD,QAAQ6H,WAAWh7D,KAAKkvE,aAAahpE,GAAGlG,KAAKoqE,gBAAgB,cAAc,CAAC,IAAIvlB,EAAE,IAAIlgC,KAAKpjB,QAAQC,IAAIxB,KAAKoqE,gBAAgB,cAAc,kBAAkBvlB,EAAEzjD,GAAG,QAAQumE,EAAEn1B,UAAU08B,aAAa,SAAS9tE,GAAGpB,KAAK42D,eAAe,cAAc52D,KAAKs0D,SAASjL,QAAQ,IAAInjD,EAAElG,KAAKu2D,kBAAkB,oBAAoBrwD,GAAGA,EAAEosC,KAAKtyC,KAAKA,KAAKopD,YAAYppD,KAAKmzD,QAAQpK,cAAc/oD,KAAKA,MAAM,IAAIsnB,EAAE,CAAC6nD,OAAOnvE,KAAKkrE,QAAQhhB,eAAelqD,KAAKopD,aAAappD,KAAK42D,eAAe,gBAAgBtvC,GAAGtnB,KAAKs0D,SAAS/K,SAASvpD,KAAK42D,eAAe,eAAetvC,GAAGtnB,KAAK6tE,UAAU,EAAE7tE,KAAKy4D,QAAQ2L,WAAW,MAAM9a,UAAU,EAAE,EAAEtpD,KAAK4oD,OAAO5oD,KAAK2oD,SAAS,IAAI16C,EAAEjO,KAAKu2D,kBAAkB,gBAAgB,GAAG,OAAOtoD,GAAGA,EAAEqkC,KAAKtyC,KAAKA,KAAKoB,GAAGA,EAAE,IAAIpB,KAAKovE,aAAY,EAAGpvE,KAAKuoE,UAAU97D,OAAO,GAAUzM,KAAKuoE,UAAU/jB,KAAMv9B,CAAEjnB,OAAQ2nE,EAAEn1B,UAAUq8B,cAAc,WAAW,IAAIztE,EAAE8E,EAAEohB,EAAE,IAAItnB,KAAK20D,MAAM,GAAGvzD,EAAE,EAAEA,EAAEpB,KAAKk1D,YAAYmJ,UAAUj9D,IAAI8E,EAAE,CAACi/C,EAAEnlD,MAAMhE,EAAE6rC,OAAO3hC,EAAElG,KAAKk1D,YAAYqJ,YAAYn9D,IAAIpB,KAAK20D,MAAMvzD,GAAG8E,EAAE,IAAI9E,EAAE,EAAEA,EAAEpB,KAAK20D,MAAMloD,OAAOrL,IAAI,GAAG,IAAIA,GAA8CkmB,GAA5CphB,EAAElG,KAAKqqE,oBAAoB,KAAKjpE,EAAE,IAAI,MAAU,iBAAiBpB,KAAK20D,MAAMvzD,GAAG2tB,WAAWzH,OAAO,CAAC,IAAIrZ,EAAEjO,KAAKw9D,YAAYxL,KAAK/jD,GAAGA,EAAEmpC,KAAK9vB,EAAErZ,EAAEmpC,GAAGroB,cAAc/uB,KAAK20D,MAAMvzD,GAAG2tB,WAAWzH,KAAKqgD,EAAEn1B,UAAU6rB,QAAQ,WAAW,OAAOr+D,KAAKk1D,YAAYmJ,WAAWsJ,EAAEn1B,UAAUgZ,wBAAwB,SAASpqD,GAAG,OAAOpB,KAAK20D,MAAM30D,KAAKk1D,YAAYoJ,cAAcl9D,KAAKumE,EAAEn1B,UAAUo4B,oBAAoB,SAASxpE,GAAG,IAAI,IAAI8E,EAAEohB,EAAErZ,EAAEgZ,EAAEwgB,EAAE8K,EAAE,SAASnxC,GAAG,OAAO6uC,MAAMmD,WAAWhyC,KAAK+H,EAAEnJ,KAAKk1D,YAAYmJ,UAAUxZ,EAAE,EAAEA,EAAE17C,EAAE07C,IAAI,CAAC,IAAI9T,EAAE/wC,KAAK20D,MAAM9P,GAAGG,EAAEhlD,KAAKk1D,YAAYC,WAAW,WAAWtQ,GAAGh9B,EAAE7nB,KAAKk1D,YAAYC,WAAW,cAActQ,GAAGl9B,EAAE3nB,KAAKk1D,YAAYC,WAAW,mBAAmBtQ,GAAG52C,EAAEjO,KAAKk1D,YAAYsJ,cAAc3Z,GAAG3+C,GAAE,EAAG+gB,EAAE,GAAG,IAAI9C,EAAEnkB,KAAKqsD,iBAAiB,aAAa,GAAG,OAAOloC,IAAIje,GAAE,EAAG+gB,EAAE9C,EAAEnkB,KAAKs0D,SAASxL,KAAK/X,GAAG,IAAI9iC,EAAExB,OAAOskC,EAAE6jB,aAAa,CAAC,EAAE,OAAO,CAAC,IAAI,IAAIzP,EAAEC,EAAEF,EAAE,IAAItE,GAAE,IAAK55B,EAAE,EAAEA,EAAE/Y,EAAExB,OAAOua,IAAI5lB,EAAEyrB,eAAe5e,EAAE+Y,MAAmB,QAAbm+B,EAAE/jD,EAAE6M,EAAE+Y,IAAI,MAAck+B,EAAE7oD,KAAK8D,IAAIglD,EAAED,IAAI,QAAQE,EAAEhkD,EAAE6M,EAAE+Y,IAAI,MAAM45B,EAAEvkD,KAAKqD,IAAI0lD,EAAExE,KAAK/4B,IAAIm9B,IAAIE,EAAE,IAAIA,EAAE,GAAGtE,EAAE,IAAIA,EAAE,IAAIsE,GAAG,MAAMA,EAAE,GAAGtE,IAAG,MAAOA,EAAE,GAAS,KAANt5B,EAAEs5B,EAAEsE,KAAU,IAAItE,EAAEt5B,EAAEjrB,KAAKC,IAAIskD,IAAIA,EAAE,EAAEt5B,EAAE,IAAI,IAAIF,EAAEw5B,EAAEr5B,EAAE29B,EAAEh/C,IAAI8+C,GAAG59B,EAAEw5B,EAAE35B,EAAEK,EAAEC,EAAE29B,KAAY39B,EAAE29B,EAAEj+B,EAAEK,GAAI,GAAG49B,GAAG,IAAI39B,EAAE,IAA9BH,EAAEw5B,EAAE35B,EAAEK,GAA6B,GAAGs5B,GAAG,IAAIx5B,EAAE,KAAK2pB,EAAE6jB,aAAa,CAACrtC,EAAEH,GAAG,GAAG2pB,EAAEhiB,WAAW,CAAC,IAAI+hB,EAAEyB,EAAExB,EAAEhiB,WAAW,IAAIgiB,EAAE6jB,aAAa,GAAG7jB,EAAEhiB,WAAW,GAAGm+B,EAAE3a,EAAExB,EAAEhiB,WAAW,IAAIgiB,EAAE6jB,aAAa,GAAG7jB,EAAEhiB,WAAW,GAAGgiB,EAAE2qB,mBAAmB,CAAC5qB,EAAEoc,QAAQnc,EAAE2qB,mBAAmB3qB,EAAE6jB,aAAa,IAAI1uD,EAAE,IAAG4qC,EAAEC,EAAE2qB,mBAAmB,OAAGxO,EAAEnc,EAAE2qB,mBAAmB,MAAW5qB,GAAG,GAAGoc,GAAG,IAAIlI,EAAE,CAAC,IAAIqI,EAAEpmC,GAAG,EAAEA,EAAE,GAAGqmC,GAAGrmC,EAAE,IAAI,EAAEA,EAAE,GAAG8pB,EAAE2qB,mBAAmB,GAAG1/D,EAAE2oE,iBAAiB7zB,EAAEoc,EAAEG,GAAGtc,EAAE2qB,mBAAmB,GAAG1/D,EAAE2oE,iBAAiB7zB,EAAEoc,EAAEI,QAAQhmC,EAAE4lC,EAAEpc,EAAEC,EAAE2qB,mBAAmB,GAAG5qB,EAAExpB,EAAEL,EAAE8pB,EAAE2qB,mBAAmB,GAAGxO,EAAE5lC,EAAEL,EAAE,GAAGU,EAAE,CAACopB,EAAEyhB,iBAAiB7qC,EAAE,IAA+C6lC,GAA3CD,EAAEvtD,KAAKqqE,oBAAoB,KAAKxlB,EAAE,IAAI,MAAS,UAAU9T,EAAE/P,MAAMwsB,EAAEzc,EAAE2qB,mBAAmB,GAAG3qB,EAAE2qB,mBAAmB,GAAG17D,KAAKs0D,SAASxL,KAAK/X,EAAEwc,EAAEvtD,MAAMynC,IAAIA,EAAEsJ,IAAI,QAAG,IAAStJ,EAAE,KAAK,8FAA8F,IAAQod,EAAE,EAAEA,EAAE17C,EAAE07C,IAAyB,KAAhB9T,EAAE/wC,KAAK20D,MAAM9P,IAAS2N,iBAAiB,CAAoDhF,GAA3CD,EAAEvtD,KAAKqqE,oBAAoB,KAAKxlB,EAAE,IAAI,MAAS,UAAvD,IAAI,IAAI0I,EAAyDpN,EAAE1Y,EAAEzG,MAAMysB,EAAEhmB,EAAEi0B,mBAAmB,GAAGj0B,EAAEi0B,mBAAmB,GAAGhO,EAAE3c,EAAE2qB,mBAAmB,GAAG3qB,EAAE2qB,mBAAmB,GAAG/N,EAAE,GAAGC,EAAE,EAAEA,EAAEzN,EAAE1zC,OAAOmhD,IAAI,CAAC,IAAIE,GAAG3N,EAAEyN,GAAG1I,EAAEzd,EAAEi0B,mBAAmB,IAAIjO,EAAEM,EAAEhd,EAAE2qB,mBAAmB,GAAG5N,EAAEJ,EAAEC,EAAE7gC,KAAKihC,GAAGhd,EAAE/P,MAAMwsB,EAAEzc,EAAE2qB,mBAAmB,GAAG3qB,EAAE2qB,mBAAmB,GAAG17D,KAAKs0D,SAASxL,KAAK/X,EAAEwc,EAAEvtD,KAAK2tD,KAAMga,EAAEn1B,UAAU68B,sBAAsB,SAASjuE,GAAG,IAAI8E,GAAE,EAAGohB,EAAElmB,EAAE6nB,QAAQ,KAAK3B,EAAE,GAAG,KAAKlmB,EAAEkmB,EAAE,IAAI,KAAKlmB,EAAEkmB,EAAE,IAAIlmB,EAAE6nB,QAAQ,MAAM,GAAGgnB,MAAMmD,WAAWhyC,IAAI8E,GAAE,EAAG,GAAG9E,EAAEqL,QAAQrL,EAAE,YAAYA,EAAE,aAAa8E,GAAE,GAAIlG,KAAKsvE,iBAAiBppE,IAAIyhE,EAAEn1B,UAAU88B,iBAAiB,SAASluE,GAAGA,GAAGpB,KAAKs9D,OAAOhO,aAAatzD,EAAE8mE,WAAW9iE,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAep2D,EAAEq2D,mBAAmBryD,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAE8R,WAAW1yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBl2D,EAAEm2D,yBAAyBnyD,KAAKs9D,OAAOhO,aAAa,SAASluD,GAAG,OAAOgyC,WAAWhyC,IAAIpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAe,SAAShxD,GAAG,OAAOA,GAAGpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAEiS,aAAa7yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBlyD,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,iBAAiBuV,EAAEn1B,UAAUg8B,UAAU,SAASptE,GAAG,IAAI8E,EAAEohB,EAAErZ,EAAE,GAAGgZ,EAAEjrB,EAAEioE,oBAAoB7iE,GAAGqmC,EAAErmC,EAAE2L,MAAMka,GAAG,MAAMsrB,EAAEvyC,KAAKoqE,gBAAgB,cAAc,GAAG3iC,EAAE,GAAGxe,QAAQspB,IAAI9K,EAAE,GAAGxe,QAAQ,OAAO,IAAIspB,EAAE,MAAM,IAAIppC,EAAE,EAAE,WAAWnJ,KAAKw9D,cAAcr0D,EAAE,EAAEnJ,KAAKs9D,OAAO/oD,OAAOkzB,EAAE,GAAG16B,MAAMwlC,GAAGvyC,KAAKk1D,YAAYyI,iBAAiB,IAAI,IAAI9Y,EAAE9T,GAAE,EAAGiU,EAAEhlD,KAAKmqE,MAAM,UAAU19D,OAAOob,GAAE,EAAGF,EAAExe,EAAEwe,EAAE8f,EAAEh7B,OAAOkb,IAAI,CAAC,IAAIxD,EAAEsjB,EAAE9f,GAAG,GAAK,IAAIxD,EAAE1X,QAAQ,KAAK0X,EAAE,GAAG,CAAC,IAAIghC,EAAEhhC,EAAEpX,MAAMwlC,GAAG,KAAK4S,EAAE14C,OAAO,GAAG,CAAC,IAAI24C,EAAE,GAAG,GAAGrU,IAAI/wC,KAAKqvE,sBAAsBlqB,EAAE,IAAIN,EAAE7kD,KAAKu2D,kBAAkB,gBAAgBxlB,GAAE,GAAIqU,EAAE,GAAGP,EAAEM,EAAE,GAAGnlD,MAAMA,KAAK8oE,WAAW,IAAIxhD,EAAE,EAAEA,EAAE69B,EAAE14C,OAAO6a,IAAsB,IAAlBphB,EAAEi/C,EAAE79B,GAAGva,MAAM,MAAUN,QAAQlL,QAAQ8kB,MAAM,wEAAwE8+B,EAAE79B,GAAG,cAAc,EAAEK,GAAG,MAAMxD,EAAE,iCAAiCihC,EAAE99B,GAAG,CAAC,EAAE,IAAI89B,EAAE99B,GAAG,CAACtrB,EAAEqoE,YAAYn+D,EAAE,GAAGyhB,EAAExD,GAAGnoB,EAAEqoE,YAAYn+D,EAAE,GAAGyhB,EAAExD,SAAS,GAAGnkB,KAAK8pD,iBAAiB,aAAa,IAAI3E,EAAE14C,OAAO,GAAG,GAAGlL,QAAQ8kB,MAAM,oEAAoE,EAAEsB,GAAG,kCAAkCw9B,EAAE14C,OAAO,GAAG,OAAO0X,EAAE,KAAKmD,EAAE,EAAEA,EAAE69B,EAAE14C,OAAO6a,GAAG,EAAE89B,GAAG99B,EAAE,GAAG,GAAG,CAACtrB,EAAEqoE,YAAYlf,EAAE79B,GAAGK,EAAExD,GAAGnoB,EAAEqoE,YAAYlf,EAAE79B,EAAE,GAAGK,EAAExD,SAAS,GAAGnkB,KAAK8pD,iBAAiB,cAAc,IAAIxiC,EAAE,EAAEA,EAAE69B,EAAE14C,OAAO6a,IAAI,CAAC,IAAI49B,EAAEC,EAAE79B,GAAG,OAAOhU,KAAK4xC,GAAGE,EAAE99B,GAAG,CAAC,KAAK,KAAK,MAAsB,IAAfphB,EAAEg/C,EAAEn4C,MAAM,MAAUN,OAAO24C,EAAE99B,GAAG,CAACtrB,EAAEqoE,YAAYn+D,EAAE,GAAGyhB,EAAExD,GAAGnoB,EAAEqoE,YAAYn+D,EAAE,GAAGyhB,EAAExD,GAAGnoB,EAAEqoE,YAAYn+D,EAAE,GAAGyhB,EAAExD,IAAI5iB,QAAQoN,KAAK,wFAAwFu2C,EAAE,cAAc,EAAEv9B,SAAU,IAAIL,EAAE,EAAEA,EAAE69B,EAAE14C,OAAO6a,IAAI89B,EAAE99B,GAAGtrB,EAAEqoE,YAAYlf,EAAE79B,GAAGK,EAAExD,GAAG,GAAGlW,EAAExB,OAAO,GAAG24C,EAAE,GAAGn3C,EAAEA,EAAExB,OAAO,GAAG,KAAKob,GAAE,GAAIu9B,EAAE34C,QAAQu4C,GAAGzjD,QAAQ8kB,MAAM,6BAA6BsB,EAAE,KAAKy9B,EAAE34C,OAAO,2CAA2Cu4C,EAAE,KAAK7gC,GAAG,IAAIwD,GAAG3nB,KAAKmqE,MAAM,UAAU,CAAC,IAAIvpB,GAAE,EAAG,IAAIt5B,EAAE,EAAEs5B,GAAGt5B,EAAE89B,EAAE34C,OAAO6a,IAAI89B,EAAE99B,KAAKs5B,GAAE,GAAI,GAAGA,EAAE,CAACr/C,QAAQoN,KAAK,wEAAwEwV,EAAE,0FAA0F,UAAUlW,EAAE6e,KAAKs4B,KAAK,OAAOv9B,IAAItmB,QAAQoN,KAAK,6DAA6DV,EAAE8e,MAAK,SAAS3rB,EAAE8E,GAAG,OAAO9E,EAAE,GAAG8E,EAAE,OAAM+H,GAAG05D,EAAEn1B,UAAU+8B,YAAY,SAASnuE,GAAG,GAAG,IAAIA,EAAEqL,OAAO,OAAOlL,QAAQ8kB,MAAM,6BAA6B,KAAK,GAAG,IAAIjlB,EAAE,GAAGqL,OAAO,OAAOlL,QAAQ8kB,MAAM,wCAAwC,KAAU,IAAIngB,EAAE,GADqU,SAAW9E,GAAG,IAAI8E,EAAE9E,EAAE,GAAGkmB,EAAEphB,EAAE,GAAG,GAAG,iBAAiBohB,IAAItrB,EAAEmnE,WAAW77C,GAAG,MAAM,IAAIw9B,MAAM,0CAA0Cx9B,EAAE,KAAKA,EAAE,KAAK,IAAI,IAAIrZ,EAAE,EAAEA,EAAE/H,EAAEuG,OAAOwB,IAAI,CAAC,IAAIgZ,EAAE/gB,EAAE+H,GAAG,GAAG,OAAOgZ,QAAG,IAASA,GAAI,iBAAiBA,IAAIjrB,EAAEguD,YAAY/iC,GAAI,MAAM,IAAI69B,MAAM,2CAA2C79B,EAAE,KAAKA,EAAE,MAC5oBsrB,CAAEnxC,GAAY,OAAOpB,KAAKmqE,MAAM,UAAU,CAAC,IAAI5oE,QAAQoN,KAAK,qFAAqF3O,KAAKs9D,OAAO/oD,OAAO,CAAC,KAAKrO,EAAE,EAAEA,EAAE9E,EAAE,GAAGqL,OAAOvG,IAAIlG,KAAKs9D,OAAO/oD,OAAOuY,KAAK,IAAI5mB,GAAGlG,KAAKk1D,YAAYyI,oBAAoB,CAAC,IAAIr2C,EAAEtnB,KAAKmqE,MAAM,UAAU,GAAG7iD,EAAE7a,QAAQrL,EAAE,GAAGqL,OAAO,OAAOlL,QAAQ8kB,MAAM,sCAAsCiB,EAAE,qCAAqClmB,EAAE,GAAGqL,OAAO,KAAK,KAAK,GAAGzQ,EAAEmnE,WAAW/hE,EAAE,GAAG,IAAI,CAACpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAep2D,EAAEq2D,mBAAmBryD,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAE8R,WAAW1yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBl2D,EAAEm2D,uBAAuB,IAAIlkD,EAAEjS,EAAE42C,MAAMxxC,GAAG,IAAI8E,EAAE,EAAEA,EAAE9E,EAAEqL,OAAOvG,IAAI,CAAC,GAAG,IAAI+H,EAAE/H,GAAGuG,OAAO,OAAOlL,QAAQ8kB,MAAM,QAAQ,EAAEngB,GAAG,qBAAqB,KAAK,GAAG,OAAO+H,EAAE/H,GAAG,IAAI,mBAAmB+H,EAAE/H,GAAG,GAAGgyD,SAASjoB,MAAMhiC,EAAE/H,GAAG,GAAGgyD,WAAW,OAAO32D,QAAQ8kB,MAAM,mBAAmB,EAAEngB,GAAG,kBAAkB,KAAK+H,EAAE/H,GAAG,GAAG+H,EAAE/H,GAAG,GAAGgyD,UAAU,OAAOjqD,EAAE,OAAOjO,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAe,SAAShxD,GAAG,OAAOA,GAAGpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAEiS,aAAa7yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBl2D,EAAE42D,yBAAyBxxD,GAAGumE,EAAEn1B,UAAUg9B,gBAAgB,SAASpuE,GAAG,IAAI8E,EAAE9E,EAAEquE,qBAAqBnoD,EAAElmB,EAAEsuE,kBAAkBzhE,EAAE7M,EAAEuuE,cAAc,GAAG,GAAG,QAAQ1hE,GAAG,YAAYA,EAAEjO,KAAKs9D,OAAOhO,aAAatzD,EAAE8mE,WAAW9iE,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAep2D,EAAEq2D,mBAAmBryD,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAE8R,WAAW1yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBl2D,EAAEm2D,2BAA2B,CAAC,GAAG,UAAUlkD,EAAE,MAAM,IAAI62C,MAAM,kGAAkG72C,EAAE,MAAMjO,KAAKs9D,OAAOhO,aAAa,SAASluD,GAAG,OAAOgyC,WAAWhyC,IAAIpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAe,SAAShxD,GAAG,OAAOA,GAAGpB,KAAKs9D,OAAOtL,KAAKh2D,EAAEy2D,OAAO7R,EAAEiS,aAAa7yD,KAAKs9D,OAAOtL,KAAKh2D,EAAEk2D,mBAAmBlyD,KAAKs9D,OAAOtL,KAAKh2D,EAAEo2D,eAAe,IAAInrC,EAAEwgB,EAAE8K,EAAE,GAAGppC,EAAE,GAAG07C,GAAE,EAAG,IAAI59B,EAAE,EAAEA,EAAE/gB,EAAE+gB,IAAI,CAAC,IAAI8pB,EAAE3vC,EAAEuuE,cAAc1oD,GAAG,GAAG,UAAU8pB,EAAEwB,EAAEzlB,KAAK7F,OAAO,CAAC,GAAG,UAAU8pB,IAAI/wC,KAAK8pD,iBAAiB,sBAAsB,MAAM,IAAIhF,MAAM,sHAAsH,IAAIE,EAAEzS,EAAEA,EAAE9lC,OAAO,GAAGtD,EAAE0jB,eAAem4B,GAAG77C,EAAE67C,GAAGl4B,KAAK7F,GAAG9d,EAAE67C,GAAG,CAAC/9B,GAAG49B,GAAE,GAAI,IAAIh9B,EAAE,CAACzmB,EAAEwuE,eAAe,IAAI,IAAI3oD,EAAE,EAAEA,EAAEsrB,EAAE9lC,OAAOwa,IAAIY,EAAEiF,KAAK1rB,EAAEwuE,eAAer9B,EAAEtrB,KAAKjnB,KAAK8pD,iBAAiB,eAAe7iC,GAAG,GAAGjnB,KAAKs9D,OAAO/oD,OAAOsT,EAAE3hB,EAAE2hB,EAAEpb,OAAO,IAAIkb,EAAE,GAAGxD,GAAE,EAAGghC,EAAE,GAAG,IAAIl+B,EAAE,EAAEA,EAAEK,EAAEL,IAAI,CAAC,IAAIm+B,EAAE,GAAG,QAAG,IAAShkD,EAAE4pE,SAAS/jD,EAAE,IAAI,OAAO7lB,EAAE4pE,SAAS/jD,EAAE,GAAG,CAAC,GAAG,QAAQhZ,GAAG,YAAYA,EAAEm3C,EAAEt4B,KAAK1rB,EAAE4pE,SAAS/jD,EAAE,GAAGixC,WAAW9S,EAAEt4B,KAAK1rB,EAAE4pE,SAAS/jD,EAAE,IAAIjnB,KAAK8pD,iBAAiB,aAAa,IAAIriB,EAAE,EAAEA,EAAEvhC,EAAE,EAAEuhC,IAAI2d,EAAEt4B,KAAK,CAAC1rB,EAAE4pE,SAAS/jD,EAAE,EAAE,EAAEwgB,GAAGrmC,EAAE4pE,SAAS/jD,EAAE,EAAE,EAAEwgB,SAAS,CAAC,IAAIA,EAAE,EAAEA,EAAE8K,EAAE9lC,OAAOg7B,IAAI,CAAC,IAAIyd,EAAE3S,EAAE9K,GAAG,GAAG2d,EAAEt4B,KAAK1rB,EAAE4pE,SAAS/jD,EAAEi+B,IAAIL,GAAG17C,EAAE0jB,eAAeq4B,IAAI,OAAO9jD,EAAE4pE,SAAS/jD,EAAE9d,EAAE+7C,GAAG,IAAI,CAAC,IAAIl+B,EAAE,GAAGA,EAAE01C,OAAOt7D,EAAEwuE,eAAe1qB,GAAGl+B,EAAEihC,KAAK7C,EAAE,GAAGp+B,EAAE6oD,UAAU,SAASzuE,GAAG,IAAI8E,EAAEqrC,OAAOu+B,aAAa,GAAG1uE,EAAE,IAAI,IAAIA,EAAE/E,KAAKG,MAAM4E,EAAE,IAAIA,EAAE,GAAG8E,EAAEqrC,OAAOu+B,aAAa,IAAI1uE,EAAE,GAAG,IAAI8E,EAAEuhB,cAAcrmB,EAAE/E,KAAKG,OAAO4E,EAAE,GAAG,IAAI,OAAO8E,EAA5J,CAA+Ji/C,EAAE14C,QAAQua,EAAEuR,KAAK,GAAG,IAAI,IAAInR,EAAE,EAAEA,EAAEje,EAAE+7C,GAAGz4C,OAAO2a,IAAIA,IAAIJ,EAAEuR,MAAM,MAAMvR,EAAEuR,MAAMn3B,EAAE4pE,SAAS/jD,EAAE9d,EAAE+7C,GAAG99B,IAAI+9B,EAAEr4B,KAAK9F,IAAI,IAAIygB,EAAE,EAAEA,EAAE2d,EAAE34C,OAAOg7B,IAAI4L,SAAS+R,EAAE3d,MAAM2d,EAAE3d,GAAG,MAAM9f,EAAElb,OAAO,GAAG24C,EAAE,GAAGz9B,EAAEA,EAAElb,OAAO,GAAG,KAAK0X,GAAE,GAAIwD,EAAEmF,KAAKs4B,QAAQ7jD,QAAQoN,KAAK,gBAAgBsY,EAAE,4DAA4D9C,IAAI5iB,QAAQoN,KAAK,mEAAmEgZ,EAAEoF,MAAK,SAAS3rB,EAAE8E,GAAG,OAAO9E,EAAE,GAAG8E,EAAE,OAAMlG,KAAKuqE,SAAS5iD,EAAEw9B,EAAE14C,OAAO,GAAGzM,KAAK66D,eAAe1V,GAAE,GAAInlD,KAAKk1D,YAAYyI,iBAAiBgK,EAAEn1B,UAAUi8B,2BAA2B,WAAWzuE,KAAK42D,eAAe,gBAAgB,KAAK+Q,EAAEn1B,UAAUoY,OAAO,WAAW,IAAIxpD,EAAEpB,KAAK0oE,MAAM,GAAG,mBAAmBtnE,IAAIA,EAAEA,KAAKpF,EAAEguD,YAAY5oD,GAAGpB,KAAKuqE,SAASvqE,KAAKuvE,YAAYnuE,GAAGpB,KAAKyuE,6BAA6BzuE,KAAK0uE,gBAAgB,GAAG,iBAAiBttE,GAAG,mBAAmBA,EAAE2uE,eAAe/vE,KAAKwvE,gBAAgBpuE,GAAGpB,KAAKyuE,6BAA6BzuE,KAAK0uE,gBAAgB,GAAG,iBAAiBttE,EAAkC,GAAzBpF,EAAEioE,oBAAoB7iE,GAAQpB,KAAKuuE,aAAantE,OAAO,CAAC,IAAIkmB,EAAEA,EAAErkB,OAAO+sE,eAAe,IAAIA,eAAe,IAAIC,cAAc,qBAAqB,IAAIhiE,EAAEjO,KAAKsnB,EAAE4oD,mBAAmB,WAAW,GAAG5oD,EAAE6oD,aAAa,MAAM7oD,EAAErC,QAAQ,IAAIqC,EAAErC,QAAQhX,EAAEsgE,aAAajnD,EAAE8oD,gBAAgB9oD,EAAEyC,KAAK,MAAM3oB,GAAE,GAAIkmB,EAAE+oD,KAAK,WAAY9uE,QAAQ8kB,MAAM,+BAA+BjlB,IAAIumE,EAAEn1B,UAAU89B,cAAc,SAASlvE,EAAE8E,QAAG,IAASA,IAAIA,GAAE,GAAI,IAAIohB,EAAElmB,EAAE27B,KAAK9uB,EAAE05D,EAAEa,eAAepnE,GAAG,eAAe6M,IAAIjO,KAAK2oE,YAAY16D,EAAE26D,YAAY,eAAe36D,IAAIjO,KAAKu1D,YAAYtnD,EAAE86D,YAAY,IAAI9hD,EAAEjrB,EAAE+nE,0BAA0B/jE,KAAKmqE,MAAM,UAAUl8D,GAAGjS,EAAEgnE,WAAWhjE,KAAKw9D,YAAYvvD,GAAGjO,KAAKk1D,YAAYyI,gBAAgBr2C,GAAGtnB,KAAK42D,eAAe,iBAAiB,IAAI52D,KAAK0oE,MAAMphD,EAAEphB,GAAGlG,KAAK4qD,UAAU1kD,IAAI+gB,EAAEjnB,KAAK0uE,WAAW1uE,KAAKkvE,cAAa,KAAMvH,EAAEa,eAAe,SAASpnE,GAAG,IAAI8E,EAAE,GAAG,IAAI,IAAIohB,KAAKlmB,EAAEA,EAAEyrB,eAAevF,IAAI,QAAQA,GAAGlmB,EAAEyrB,eAAevF,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,GAAGyhE,EAAEn1B,UAAU/hC,OAAO,SAASrP,EAAE8E,GAAG,IAAIlG,KAAKuwE,YAAY,CAACvwE,KAAKuwE,aAAY,EAAG,OAAOnvE,IAAI,OAAO8E,KAAK3E,QAAQoN,KAAK,8GAA8GvN,EAAE8E,EAAE,MAAM,IAAIohB,EAAEtnB,KAAK4oD,OAAO36C,EAAEjO,KAAK2oD,QAAQvnD,GAAGpB,KAAKyoE,SAAS9iE,MAAMsO,MAAM7S,EAAE,KAAKpB,KAAKyoE,SAAS9iE,MAAMC,OAAOM,EAAE,KAAKlG,KAAK4oD,OAAOxnD,EAAEpB,KAAK2oD,QAAQziD,IAAIlG,KAAK4oD,OAAO5oD,KAAKyoE,SAASQ,YAAYjpE,KAAK2oD,QAAQ3oD,KAAKyoE,SAAShkE,cAAc6iB,GAAGtnB,KAAK4oD,QAAQ36C,GAAGjO,KAAK2oD,UAAU3oD,KAAKorE,kBAAkBprE,KAAK0uE,YAAY1uE,KAAKuwE,aAAY,IAAK5I,EAAEn1B,UAAUg6B,WAAW,SAASprE,GAAGpB,KAAK2oE,YAAYvnE,EAAEpB,KAAK0uE,YAAY/G,EAAEn1B,UAAUwa,WAAW,WAAW,IAAIhtD,KAAK4iC,UAAU,gBAAgB5iC,KAAKs9D,OAAOtQ,WAAW,IAAIhtD,KAAK4iC,UAAU,cAAcn2B,OAAOzM,KAAK+qE,aAAa,GAAG/qE,KAAKs9D,OAAOtQ,WAAWlgC,MAAK,GAAI,OAAO9sB,KAAK4iC,UAAU,eAAe+kC,EAAEn1B,UAAUg+B,cAAc,SAASpvE,EAAE8E,GAAG,IAAIohB,EAAEtnB,KAAKgtD,aAAa/+C,GAAE,EAAG,GAAG8S,MAAMivB,QAAQ5uC,KAAK,OAAOA,GAAG,iBAAiBA,EAAE6M,GAAE,EAAG7M,EAAE,CAACA,IAAI6M,EAAE,IAAI,IAAIgZ,KAAK7lB,EAAEA,EAAEyrB,eAAe5F,KAAKA,EAAE,GAAGA,GAAGK,EAAE7a,OAAOlL,QAAQoN,KAAK,2CAA2CsY,GAAGK,EAAEL,GAAG7lB,EAAE6lB,SAAS,IAAQA,EAAE,EAAEA,EAAE7lB,EAAEqL,OAAOwa,IAAI,kBAAkB7lB,EAAE6lB,GAAGA,GAAGK,EAAE7a,OAAOlL,QAAQoN,KAAK,2CAA2CsY,GAAGK,EAAEL,GAAG7lB,EAAE6lB,GAAG7lB,EAAE6lB,GAAG,GAAG7lB,EAAE6lB,IAAIK,EAAE7a,OAAOlL,QAAQoN,KAAK,2CAA2CvN,EAAE6lB,IAAIK,EAAElmB,EAAE6lB,IAAI/gB,EAAElG,KAAK0uE,YAAY/G,EAAEn1B,UAAUzQ,KAAK,WAAW,MAAM,CAAC9tB,MAAMjU,KAAK4oD,OAAOhjD,OAAO5F,KAAK2oD,UAAUgf,EAAEn1B,UAAUqoB,eAAe,SAASz5D,EAAE8E,GAAMlG,KAAKgpE,aAAa5nE,EAAGpB,KAAKmzD,SAAkKnzD,KAAKmzD,QAAQ0H,eAAe76D,KAAKgpE,cAAc9iE,GAAGlG,KAAK0uE,YAArMntE,QAAQoN,KAAK,kIAAoMg5D,EAAEn1B,UAAUunB,YAAY,WAAW,OAAO/5D,KAAKgpE,cAAcrB,EAAEn1B,UAAUua,UAAU,WAAW,IAAI3rD,EAAEpB,KAAKmqE,MAAM,UAAU,OAAO/oE,EAAEA,EAAEitB,QAAQ,MAAMs5C,EAAEn1B,UAAUi+B,iBAAiB,SAASrvE,GAAG,OAAOpB,KAAKmpE,gBAAgB/nE,IAAIumE,EAAEn1B,UAAUk+B,WAAW,SAAStvE,GAAG,IAAI,IAAI8E,EAAE,EAAEohB,EAAEtnB,KAAKsqE,UAAU,EAAEpkE,GAAGohB,GAAG,CAAC,IAAIrZ,EAAEqZ,EAAEphB,GAAG,EAAE+gB,EAAEjnB,KAAKgrE,SAAS/8D,EAAE,GAAG,GAAGgZ,EAAE7lB,EAAE8E,EAAE+H,EAAE,OAAO,GAAGgZ,EAAE7lB,EAAEkmB,EAAErZ,EAAE,MAAM,CAAC,GAAG/H,GAAG+H,EAAE,OAAOA,EAAEqZ,EAAErZ,GAAG,OAAO,MAAM05D,EAAEn1B,UAAUm+B,MAAM,SAASvvE,GAAGpB,KAAKsoE,iBAAiBtoE,KAAKuoE,UAAUz7C,KAAK1rB,GAAGA,EAAEkxC,KAAKtyC,KAAKA,OAAO2nE,EAAEn1B,UAAU2mB,iBAAiB,SAAS/3D,EAAE8E,EAAEohB,GAAGtrB,EAAE+oE,SAAS3jE,EAAE8E,EAAEohB,GAAGtnB,KAAKqpE,kBAAkBv8C,KAAK,CAAC8jD,KAAKxvE,EAAEY,KAAKkE,EAAE8/B,GAAG1e,KAAKqgD,EAAEn1B,UAAUu5B,qBAAqB,WAAW,GAAG/rE,KAAKqpE,kBAAkB,IAAI,IAAIjoE,EAAE,EAAEA,EAAEpB,KAAKqpE,kBAAkB58D,OAAOrL,IAAI,CAAC,IAAI8E,EAAElG,KAAKqpE,kBAAkBjoE,GAAGpF,EAAEo9D,YAAYlzD,EAAE0qE,KAAK1qE,EAAElE,KAAKkE,EAAE8/B,IAAIhmC,KAAKqpE,kBAAkB,IAAI1B,EAAE8B,QAAQ,CAAC/E,EAAE9xC,QAAQi1B,EAAEj1B,QAAQ9L,EAAE8L,QAAQ60C,EAAE70C,QAAQ40C,EAAE50C,QAAQ0xC,EAAE1xC,SAAS+0C,EAAEkJ,UAAUnJ,EAAE90C,QAAQ+0C,EAAE9C,YAAY7oE,EAAE6oE,YAAY8C,EAAE7C,cAAc9oE,EAAE8oE,cAAc6C,EAAExV,uBAAuBn2D,EAAEm2D,uBAAuBwV,EAAEnb,OAAOxwD,EAAEwwD,OAAOmb,EAAEnP,QAAQx8D,EAAEw8D,QAAQmP,EAAErQ,MAAMt7D,EAAEs7D,MAAMqQ,EAAEpQ,MAAMv7D,EAAEu7D,MAAMoQ,EAAE/E,YAAY5mE,EAAE4mE,YAAY+E,EAAEmJ,wBAAwB1rB,EAAExyB,QAAQo+B,aAAa2W,EAAEoJ,oBAAoBpJ,EAAEjO,qBAAqBtU,EAAExyB,QAAQ8mC,qBAAqBiO,EAAErb,QAAQtwD,EAAEswD,QAAQqb,EAAEqJ,QAAQ,CAACC,OAAOvM,EAAE9xC,QAAQs+C,KAAKrpB,EAAEj1B,QAAQu+C,YAAY3J,EAAE50C,QAAQw+C,YAAY3J,EAAE70C,QAAQy+C,KAAK/M,EAAE1xC,QAAQ0+C,cAAcxqD,EAAE8L,SAAS+0C,EAAE4J,aAAa,CAACC,eAAejkB,EAAE36B,QAAQ6+C,YAAYlK,EAAE30C,QAAQ8+C,kBAAkBhkB,EAAE96B,QAAQ++C,uBAAuB/jB,EAAEh7B,QAAQg/C,iBAAiBzxB,EAAEvtB,QAAQi/C,qBAAqB9jB,EAAEn7B,SAAS+0C,EAAE1O,SAAS7T,EAAExyB,QAAQqmC,SAAS0O,EAAE/R,UAAUxQ,EAAExyB,QAAQgjC,UAAU+R,EAAEzO,QAAQ9T,EAAExyB,QAAQsmC,QAAQyO,EAAE5R,SAAS3Q,EAAExyB,QAAQmjC,SAAS4R,EAAEjS,OAAOtQ,EAAExyB,QAAQ8iC,OAAOiS,EAAE7Q,QAAQ1R,EAAExyB,QAAQkkC,QAAQ6Q,EAAEhJ,mBAAmB/d,EAAE+d,mBAAmBgJ,EAAE9U,aAAajS,EAAEiS,aAAa8U,EAAEjV,WAAW9R,EAAE8R,WAAWiV,EAAE5G,YAAYngB,EAAEmgB,YAAY4G,EAAEpG,YAAY3gB,EAAE2gB,YAAYoG,EAAEjF,YAAY1mE,EAAE0mE,YAAYp7C,EAAEsL,QAAQ+0C,EAAEzhE,EAAEy+C,QAAQr9B,EAAEsL,UAAU0f,KAAKtyC,KAAKoB,EAAE,cAAc,CAAC,qBAAqB,EAAE,4BAA4B,EAAE,2BAA2B,EAAE,+BAA+B,EAAE,wBAAwB,EAAE,kCAAkC,EAAE,mBAAmB,EAAE,0BAA0B,GAAG,iBAAiB,GAAG,8BAA8B,GAAG,mBAAmB,GAAG,oBAAoB,GAAG,8BAA8B,GAAG,oBAAoB,GAAG,kBAAkB,GAAG,gBAAgB,GAAG,wBAAwB,GAAG,iBAAiB,GAAG,yBAAyB,GAAG,iBAAiB,GAAG,mBAAmB,GAAG,2BAA2B,GAAG67D,SAAS,IAAI6U,GAAG,CAAC,SAAS1wE,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,IAAIjO,KAAK+xE,MAAM,GAAGhxE,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA2ByoC,EAAE,SAASrmC,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAvB9E,EAAE,oBAAgL6M,EAAEukC,UAAUk6B,MAAM,WAAW,IAAI,IAAItrE,EAAEgF,SAASg6C,qBAAqB,UAAUl6C,EAAE,EAAEA,EAAE9E,EAAEqL,OAAOvG,IAAI,CAAC,IAAIohB,EAAElmB,EAAE8E,GAAG+H,EAAEw5B,EAAE+wB,QAAQlxC,GAAGL,EAAEhZ,EAAEjS,EAAEu2C,EAAEtkC,EAAE+Y,EAAE7d,EAAEme,EAAEoxC,YAAY7T,EAAEv9B,EAAEqxC,aAAa5nB,EAAE3qC,SAASkiB,cAAc,OAAOyoB,EAAEprC,MAAMk/B,SAAS,WAAWkM,EAAEprC,MAAMyK,KAAK6W,EAAE,KAAK8pB,EAAEprC,MAAMynB,IAAImlB,EAAE,KAAKxB,EAAEprC,MAAMsO,MAAM9K,EAAE,KAAK4nC,EAAEprC,MAAMC,OAAOi/C,EAAE,KAAK9T,EAAEprC,MAAMqsE,OAAO,IAAI5rE,SAASg+B,KAAK3b,YAAYsoB,GAAG/wC,KAAK+xE,MAAMjlD,KAAKikB,KAAK9iC,EAAEukC,UAAUq6B,QAAQ,WAAW,IAAI,IAAIzrE,EAAE,EAAEA,EAAEpB,KAAK+xE,MAAMtlE,OAAOrL,IAAIpB,KAAK+xE,MAAM3wE,GAAGu9C,WAAWh2B,YAAY3oB,KAAK+xE,MAAM3wE,IAAIpB,KAAK+xE,MAAM,IAAIzqD,EAAEsL,QAAQ3kB,EAAE/H,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,kBAAkB,KAAKq/C,GAAG,CAAC,SAAS7wE,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAIiP,EAAE,WAAWjO,KAAKgpE,aAAa,IAAI/6D,EAAEukC,UAAU/wC,SAAS,WAAW,MAAM,sBAAsBwM,EAAEukC,UAAUm3B,SAAS,SAASvoE,GAAG,MAAM,CAAC8wE,WAAWlyE,KAAKkyE,WAAWC,aAAanyE,KAAKmyE,eAAelkE,EAAEukC,UAAU4/B,aAAa,WAAW,IAAI,IAAIhxE,EAAE,EAAEA,EAAEpB,KAAKgpE,aAAav8D,OAAOrL,IAAI,CAAC,IAAI8E,EAAElG,KAAKgpE,aAAa5nE,GAAG8E,EAAEy4C,YAAYz4C,EAAEy4C,WAAWh2B,YAAYziB,GAAGlG,KAAKgpE,aAAa5nE,GAAG,KAAKpB,KAAKgpE,aAAa,IAAI/6D,EAAEukC,UAAU0/B,WAAW,SAAS9wE,GAAGpB,KAAKoyE,gBAAgBnkE,EAAEukC,UAAU2/B,aAAa,SAAS/wE,GAAG,IAAI8E,EAAE9E,EAAEyoD,QAAQviC,EAAEphB,EAAEitD,QAAQwJ,iBAAiB,GAAGr1C,GAAG,IAAIA,EAAE7a,OAAO,IAAI,IAAIwB,EAAE7M,EAAE+tE,OAAOxwB,WAAW13B,EAAE,SAAS7lB,EAAEkmB,EAAErZ,GAAG,OAAO,SAASgZ,GAAG,IAAIwgB,EAAEx5B,EAAE2uD,WAAWn1B,EAAE5a,eAAezrB,GAAGqmC,EAAErmC,GAAGqmC,EAAEx5B,EAAE/H,EAAE+gB,GAAG/gB,EAAE08B,UAAUtb,IAAIphB,EAAE08B,UAAUtb,EAAZphB,CAAeuhC,EAAEx5B,EAAE/H,EAAE+gB,KAAKwgB,EAAErmC,EAAEyoD,QAAQmN,UAAUzkB,EAAE,GAAGppC,EAAE,EAAEA,EAAEme,EAAE7a,OAAOtD,IAAI,CAAC,IAAI07C,EAAEv9B,EAAEne,GAAG,KAAK07C,EAAEsD,QAAQ1gB,EAAEzrC,GAAG6oD,EAAEsD,QAAQ1gB,EAAEzrC,EAAEyrC,EAAEqJ,GAAG+T,EAAEuD,QAAQ3gB,EAAEzgB,GAAG69B,EAAEuD,QAAQ3gB,EAAEzgB,EAAEygB,EAAEsJ,GAAG,CAAC,IAAIA,EAAE8T,EAAE+X,WAAW5X,EAAE,EAAEjU,EAAElkB,eAAe,gBAAgBm4B,EAAEjU,EAAEshC,YAAY,IAAIxqD,EAAEzhB,SAASkiB,cAAc,OAAOT,EAAEliB,MAAM+mC,SAASxmC,EAAE08B,UAAU,qBAAqB,KAChv+B,IAAIjb,EAAE,qBAAqBopB,EAAElkB,eAAe,UAAUlF,GAAG,wDAAwDopB,EAAElkB,eAAe,cAAclF,GAAG,IAAIopB,EAAEuhC,UAAUzqD,EAAE/lB,UAAU6lB,EAAE,IAAIxD,EAAE4sB,EAAElkB,eAAe,SAASkkB,EAAE98B,MAAM,GAAGkxC,EAAEpU,EAAElkB,eAAe,UAAUkkB,EAAEnrC,OAAO,GAAG,GAAGmrC,EAAElkB,eAAe,QAAQ,CAAC,IAAIu4B,EAAEh/C,SAASkiB,cAAc,OAAO88B,EAAE3S,IAAI1B,EAAExgB,KAAK60B,EAAEnxC,MAAMkQ,EAAEihC,EAAEx/C,OAAOu/C,EAAEt9B,EAAEY,YAAY28B,QAAQP,EAAE+X,WAAW/vC,eAAe,cAAchF,EAAEY,YAAYriB,SAASmsE,eAAe1tB,EAAE+X,WAAWiT,YAAY,IAAI3qB,EAAEL,EAAEsD,QAAQhkC,EAAE,EAAE0D,EAAEliB,MAAMyK,KAAK80C,EAAE,KAAK,IAAItE,EAAE,EAAE,GAAG7P,EAAEyhC,eAAe,CAAC,IAAIxrD,EAAEygB,EAAEzgB,EAAEygB,EAAEsJ,EAAEoU,EAAEH,EAAEzS,EAAE2S,GAAGl+B,GAAGurB,EAAE2S,GAAG3S,EAAE2S,GAAG,EAAE3S,EAAE2S,IAAIF,EAAEG,EAAEvE,EAAE55B,OAAO45B,EAAEiE,EAAEuD,QAAQjD,EAAEH,EAAEn9B,EAAEliB,MAAMynB,IAAIwzB,EAAE,KAAK/4B,EAAEliB,MAAMsO,MAAMkQ,EAAE,KAAK0D,EAAEliB,MAAMC,OAAOu/C,EAAE,KAAKt9B,EAAE7jB,MAAM6gD,EAAE+X,WAAWrkC,KAAK1Q,EAAEliB,MAAM8mC,MAAMvmC,EAAEmlD,WAAWxG,EAAE3jC,MAAM2G,EAAEliB,MAAM8sE,YAAYvsE,EAAEmlD,WAAWxG,EAAE3jC,MAAM6vB,EAAEhhB,IAAIlI,EAAE3hB,EAAEizD,iBAAiBtxC,EAAE,QAAQZ,EAAE,eAAe,yBAAyB49B,IAAI3+C,EAAEizD,iBAAiBtxC,EAAE,YAAYZ,EAAE,mBAAmB,6BAA6B49B,IAAI3+C,EAAEizD,iBAAiBtxC,EAAE,WAAWZ,EAAE,kBAAkB,4BAA4B49B,IAAI3+C,EAAEizD,iBAAiBtxC,EAAE,WAAWZ,EAAE,kBAAkB,4BAA4B49B,IAAI52C,EAAEwa,YAAYZ,GAAG7nB,KAAKgpE,aAAal8C,KAAKjF,GAAG,IAAI7rB,EAAEoF,EAAE8oD,eAAkBluD,EAAEq/B,OAAOr/B,EAAEuuD,YAAYxZ,EAAElkB,eAAe,aAAakkB,EAAE2hC,UAAUxsE,EAAEmlD,WAAWxG,EAAE3jC,MAAMllB,EAAEwuD,UAAUzZ,EAAElkB,eAAe,aAAakkB,EAAE4hC,UAAUzsE,EAAE08B,UAAU,eAAe5mC,EAAEitD,YAAYlY,EAAEyhC,gBAAoBxrD,EAAE45B,EAAEuE,EAAEnpD,EAAE6uD,OAAOhG,EAAEsD,QAAQnhC,GAAGhrB,EAAE8uD,OAAOjG,EAAEsD,QAAQnhC,EAAEg+B,KAAQhpD,EAAE6uD,OAAOhG,EAAEsD,QAAQtD,EAAEuD,SAASpsD,EAAE8uD,OAAOjG,EAAEsD,QAAQtD,EAAEuD,QAAQ,EAAEpD,IAAGhpD,EAAE0wD,YAAY1wD,EAAEmvD,SAASnvD,EAAEsuD,aAAar8C,EAAEukC,UAAUzR,QAAQ,WAAW/gC,KAAKoyE,gBAAgB9qD,EAAEsL,QAAQ3kB,EAAE/H,EAAEy+C,QAAQr9B,EAAEsL,SAAS,IAAIggD,GAAG,CAAC,SAASxxE,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA4BioB,EAAE,SAAS7lB,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAxB9E,EAAE,qBAAiLqmC,EAAE,WAAWznC,KAAK6yE,SAAS,GAAG7yE,KAAK8yE,SAAS,IAAIrrC,EAAE+K,UAAU/wC,SAAS,WAAW,MAAM,eAAegmC,EAAE+K,UAAUm3B,SAAS,SAASvoE,GAAG,MAAM,CAACqnD,OAAOzoD,KAAKyoD,OAAOypB,WAAWlyE,KAAKkyE,WAAWa,cAAc/yE,KAAK+yE,gBAAgBtrC,EAAE+K,UAAUiW,OAAO,SAASrnD,GAAG,IAAgN6M,EAA5M/H,EAAE9E,EAAEyoD,QAAQ,GAAG3jD,EAAE+tD,iBAAiB,WAAW,KAAK,CAAC,IAAI3sC,EAAEphB,EAAE+tD,iBAAiB,iBAAiB,KAAK,EAAE/tD,EAAE+tD,iBAAiB,eAAe,KAAK7yD,EAAEo5D,iBAAiBlzC,GAAMphB,EAAE+tD,iBAAiB,WAAW,OAAYhmD,EAAE/H,EAAE08B,UAAU,eAAe18B,EAAE08B,UAAU,eAAe18B,EAAE+tD,iBAAiB,oBAAoB,KAAK,EAAE/tD,EAAE+tD,iBAAiB,eAAe,KAAK7yD,EAAEu5D,mBAAmB1sD,IAAM,GAAG/H,EAAEm4D,UAAcn4D,EAAE+tD,iBAAiB,WAAW,QAAW3sC,EAAEphB,EAAE+tD,iBAAiB,iBAAiB,MAAM,EAAE/tD,EAAE+tD,iBAAiB,eAAe,MAAM7yD,EAAEq5D,kBAAkBnzC,IAASphB,EAAEm4D,UAAU,GAAGn4D,EAAEmgB,MAAM,8DAA8DngB,EAAEm4D,UAAU,MAAM52B,EAAE+K,UAAU4/B,aAAa,WAAW,SAAShxE,EAAEA,GAAG,IAAI,IAAI8E,EAAE,EAAEA,EAAE9E,EAAEqL,OAAOvG,IAAI,CAAC,IAAIohB,EAAElmB,EAAE8E,GAAGohB,EAAEq3B,YAAYr3B,EAAEq3B,WAAWh2B,YAAYrB,IAAIlmB,EAAEpB,KAAK6yE,UAAUzxE,EAAEpB,KAAK8yE,UAAU9yE,KAAK6yE,SAAS,GAAG7yE,KAAK8yE,SAAS,IAAIrrC,EAAE+K,UAAU0/B,WAAW,SAAS9wE,GAAGpB,KAAKoyE,gBAAgB3qC,EAAE+K,UAAUugC,cAAc,SAAS3xE,GAAG,SAAS8E,EAAE9E,GAAG,OAAO/E,KAAKgpB,MAAMjkB,GAAG,GAAG,SAASkmB,EAAElmB,GAAG,OAAO/E,KAAKgpB,MAAMjkB,GAAG,GAAG,IAAI6M,EAAEjO,KAAKynC,EAAErmC,EAAEyoD,QAAQ,GAAGpiB,EAAEwsB,iBAAiB,WAAW,MAAMxsB,EAAEwsB,iBAAiB,WAAW,MAAMxsB,EAAEwsB,iBAAiB,WAAW,MAAM,CAAC,IAAI1hB,EAAEppC,EAAE07C,EAAE9T,EAAE3vC,EAAE8oD,eAAelF,EAAE5jD,EAAE+tE,OAAOxwB,WAAW92B,EAAE4f,EAAEmhB,OAAOjhC,EAAE8f,EAAEkhB,QAAQxkC,EAAE,SAAS/iB,GAAG,MAAM,CAACyjC,SAAS,WAAW6H,SAASjF,EAAEwsB,iBAAiB,oBAAoB7yD,GAAG,KAAK6S,MAAMwzB,EAAEwsB,iBAAiB,iBAAiB7yD,GAAG,OAAO+jD,EAAE,CAACnpD,EAAEmoB,EAAE,KAAK6C,EAAE7C,EAAE,KAAKizB,GAAGjzB,EAAE,OAAOihC,EAAE,SAAShkD,EAAE8E,EAAEohB,GAAG,IAAIrZ,EAAE7H,SAASkiB,cAAc,OAAOmf,EAAE0d,EAAE,MAAM79B,EAAE,KAAKphB,GAAG+gB,EAAE4gB,OAAO55B,EAAEtI,MAAM8hC,GAAG,IAAI8K,EAAEnsC,SAASkiB,cAAc,OAAO,OAAOiqB,EAAEzwC,UAAU,yCAAyCoE,GAAGohB,EAAE,uBAAuBA,EAAE,IAAIirB,EAAE9sB,UAAUrkB,EAAE6M,EAAEwa,YAAY8pB,GAAGtkC,GAAG8iC,EAAE1V,OAAO,IAAI6pB,EAAEzd,EAAE0rB,QAAQvS,EAAEx/C,EAAEyoD,QAAQyK,SAASxL,KAAK9hC,EAAE,SAAS5lB,GAAG,OAAO,SAAS8E,GAAG,OAAOuhC,EAAEwsB,iBAAiB/tD,EAAE9E,KAAK,GAAGqmC,EAAEwsB,iBAAiB,WAAW,KAAK,CAAC,GAAG/O,EAAEsX,QAAQtX,EAAEsX,OAAO/vD,OAAO,EAAE,CAAC,IAAIzQ,EAAEyrC,EAAE42B,UAAUj3C,EAAE,CAACJ,EAAE,KAAKA,EAAE,OAAOk+B,EAAEsX,OAAOx7D,SAAQ,SAASI,GAAG,QAAG,IAASA,EAAEqpC,MAAM,CAACthC,EAAEy3C,EAAE5kD,EAAE,IAAIkK,EAAE,KAAKohB,EAAEF,EAAE,GAAG,GAAGhmB,EAAEmqD,OAAOpiD,EAAEy3C,EAAE5kD,EAAE4kD,EAAE9P,EAAK5qC,EAAE,KAAKohB,EAAEF,EAAE,IAAI,IAAIH,EAAEK,EAAE,qBAAqBu9B,EAAEjE,EAAE55B,EAAE5lB,EAAEk7D,IAAI1b,EAAE7P,EAAEwB,EAAE6S,EAAEhkD,EAAEqpC,MAAM,IAAI,GAAGzuC,EAAEkK,EAAE,MAAM,IAAIuhC,EAAEod,EAAE59B,EAAE,EAAEwgB,EAAE,IAAIA,EAAE,GAAGA,EAAExgB,EAAE,EAAEU,EAAE4qB,EAAE5sC,MAAMu/B,OAAO,IAAIqN,EAAE5sC,MAAMynB,IAAIqa,EAAE,KAAK,IAAIrmC,EAAEmqD,MAAMhZ,EAAE5sC,MAAMyK,KAAKwwC,EAAE5kD,EAAEsrB,EAAE,kBAAkBA,EAAE,gBAAgB,KAAKirB,EAAE5sC,MAAMslE,UAAU,SAAS,GAAG7pE,EAAEmqD,OAAOhZ,EAAE5sC,MAAMyK,KAAKwwC,EAAE5kD,EAAE4kD,EAAE9P,EAAExpB,EAAE,gBAAgB,KAAKirB,EAAE5sC,MAAMslE,UAAU,QAAQ14B,EAAE5sC,MAAMsO,MAAMqT,EAAE,kBAAkB,KAAK09B,EAAEv8B,YAAY8pB,GAAGtkC,EAAE6kE,SAAShmD,KAAKylB,OAAM,IAAIhrB,EAAEvnB,KAAK8yE,SAAS,GAAGhiC,EAAErJ,EAAEwsB,iBAAiB,oBAAoB,KAAK1vC,SAASgD,EAAE5hB,MAAMynB,IAAI,IAAI0jB,EAAEnpB,EAAEmpB,IAAIvpB,EAAE5hB,MAAMynB,IAAI7I,SAASgD,EAAE5hB,MAAMynB,IAAI,IAAI0jB,EAAE,EAAE,MAAM,IAAIoc,EAAKzlB,EAAE7E,UAAU,qBAAuByqB,EAAE5lB,EAAE40B,gBAAgB,IAAM,GAAGhP,EAAE,GAAGpd,MAAMod,MAAMA,EAAE,GAAGH,EAAEhnD,EAAE06C,EAAE5kD,EAAEqxD,EAAEzM,EAAE9P,IAAQoc,EAAEhnD,EAAE06C,EAAE5kD,GAAG+0C,EAAEwZ,YAAY9iB,EAAEwsB,iBAAiB,gBAAgB,KAAKljB,EAAEyZ,UAAU/iB,EAAEwsB,iBAAiB,gBAAgB,KAAKljB,EAAEkY,YAAYlY,EAAE8Z,OAAOqC,EAAE5lC,EAAEs5B,EAAE55B,IAAI+pB,EAAE+Z,OAAOoC,EAAE5lC,EAAEs5B,EAAE55B,EAAE45B,EAAE7P,IAAIA,EAAE2b,YAAY3b,EAAEoa,SAAS,GAAG1jB,EAAE42B,YAAYttB,EAAEwZ,YAAY9iB,EAAEwsB,iBAAiB,gBAAgB,MAAMljB,EAAEyZ,UAAU/iB,EAAEwsB,iBAAiB,gBAAgB,MAAMljB,EAAEkY,YAAYlY,EAAE8Z,OAAOvjC,EAAEs5B,EAAE5kD,EAAE4kD,EAAE9P,GAAGxpB,EAAEs5B,EAAE55B,IAAI+pB,EAAE+Z,OAAOxjC,EAAEs5B,EAAE5kD,EAAE4kD,EAAE9P,GAAGxpB,EAAEs5B,EAAE55B,EAAE45B,EAAE7P,IAAIA,EAAE2b,YAAY3b,EAAEoa,UAAU,GAAG1jB,EAAEwsB,iBAAiB,WAAW,KAAK,CAAC,GAAG/O,EAAEiX,OAAO,CAAC,IAAI7O,EAAEtmC,EAAE,KAAKk+B,EAAEiX,OAAOn7D,SAAQ,SAASI,GAAG,QAAG,IAASA,EAAEqpC,MAAM,CAACthC,EAAEy3C,EAAE5kD,EAAEoF,EAAEk7D,IAAI1b,EAAE9P,EAAE+T,EAAEjE,EAAE55B,EAAE45B,EAAE7P,GAAEwB,EAAE6S,EAAEhkD,EAAEqpC,MAAM,MAAO9kC,MAAMslE,UAAU,SAAS14B,EAAE5sC,MAAMynB,IAAIy3B,EAAEyI,EAAE,gBAAgB,KAAK,IAAIpnD,EAAEiD,EAAEmkD,EAAE,kBAAkB,EAAEpnD,EAAEonD,EAAE,kBAAkBzlC,IAAI3hB,EAAE2hB,EAAEylC,EAAE,kBAAkB/a,EAAE5sC,MAAMslE,UAAU,SAAS/kE,EAAE,IAAIA,EAAE,EAAEqsC,EAAE5sC,MAAMslE,UAAU,QAAQ14B,EAAE5sC,MAAMyK,KAAKlK,EAAE,KAAKqsC,EAAE5sC,MAAMsO,MAAMq5C,EAAE,kBAAkB,KAAKtI,EAAEv8B,YAAY8pB,GAAGtkC,EAAE4kE,SAAS/lD,KAAKylB,OAA8H,IAAIgb,EAAwCF,EAApKtc,EAAEwZ,YAAY9iB,EAAEwsB,iBAAiB,gBAAgB,KAAKljB,EAAEyZ,UAAU/iB,EAAEwsB,iBAAiB,gBAAgB,KAAKljB,EAAEkY,YAAqBxhB,EAAE7E,UAAU,qBAAuByqB,EAAE5lB,EAAEg1B,gBAAgB,EAAE,IAAM,GAAGpP,EAAE,KAAKA,EAAE,GAAGE,EAAEjmC,EAAEs5B,EAAE55B,EAAEqmC,EAAEzM,EAAE7P,IAAQwc,EAAEjmC,EAAEs5B,EAAE55B,EAAE45B,EAAE7P,GAAGA,EAAE8Z,OAAO3kD,EAAE06C,EAAE5kD,GAAGuxD,GAAGxc,EAAE+Z,OAAO5kD,EAAE06C,EAAE5kD,EAAE4kD,EAAE9P,GAAGyc,GAAGxc,EAAE2b,YAAY3b,EAAEoa,SAASpa,EAAEuZ,YAAYhjC,EAAEsL,QAAQ6U,EAAEvhC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,mBAAmB,KAAKogD,GAAG,CAAC,SAAS5xE,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAIiP,EAAE,WAAWjO,KAAKizE,WAAW,KAAKjzE,KAAKkzE,YAAY,KAAKlzE,KAAKmzE,YAAY,KAAKnzE,KAAKozE,aAAa,MAAMnlE,EAAEukC,UAAU/wC,SAAS,WAAW,MAAM,sBAAsBwM,EAAEukC,UAAUm3B,SAAS,SAASvoE,GAAG,MAAM,CAACqnD,OAAOzoD,KAAKyoD,OAAO0pB,aAAanyE,KAAKmyE,eAAe,IAAIlrD,EAAE,SAAS7lB,GAAG,IAAI8E,EAAEE,SAASkiB,cAAc,OAAO,OAAOpiB,EAAEP,MAAMk/B,SAAS,WAAW3+B,EAAEP,MAAMyK,KAAKhP,EAAEpF,EAAE,KAAKkK,EAAEP,MAAMynB,IAAIhsB,EAAE4lB,EAAE,KAAK9gB,EAAEP,MAAMsO,MAAM7S,EAAE0vC,EAAE,KAAK5qC,EAAEP,MAAMC,OAAOxE,EAAE2vC,EAAE,KAAK7qC,GAAG+H,EAAEukC,UAAU6gC,cAAc,WAAW,IAAI,IAAIjyE,EAAE,CAACpB,KAAKizE,WAAWjzE,KAAKkzE,YAAYlzE,KAAKmzE,YAAYnzE,KAAKozE,cAAcltE,EAAE,EAAEA,EAAE9E,EAAEqL,OAAOvG,IAAI,CAAC,IAAIohB,EAAElmB,EAAE8E,GAAGohB,GAAIA,EAAEq3B,YAAYr3B,EAAEq3B,WAAWh2B,YAAYrB,GAAItnB,KAAKizE,WAAW,KAAKjzE,KAAKkzE,YAAY,KAAKlzE,KAAKmzE,YAAY,KAAKnzE,KAAKozE,aAAa,MAAM,IAAI3rC,EAAE,SAASrmC,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,GAAG,IAAIwgB,EAAErhC,SAASkiB,cAAc,OAAOmf,EAAE9hC,MAAMk/B,SAAS,WAAW4C,EAAE9hC,MAAMyK,KAAK,GAAGkX,EAAE,MAAMphB,EAAElK,EAAE,KAAKyrC,EAAE9hC,MAAMynB,IAAIlnB,EAAE8gB,EAAE,KAAKygB,EAAE9hC,MAAMsO,MAAM/N,EAAE4qC,EAAE,KAAKrJ,EAAE9hC,MAAMC,OAAOM,EAAE6qC,EAAE,KAAKtJ,EAAE9hC,MAAM+mC,SAAStrC,EAAEwhC,UAAU,eAAe,EAAE,KAAK,IAAI2P,EAAEnsC,SAASkiB,cAAc,OAAOiqB,EAAE5sC,MAAMk/B,SAAS,WAAW0N,EAAE5sC,MAAMsO,MAAM/N,EAAE6qC,EAAE,KAAKwB,EAAE5sC,MAAMC,OAAOM,EAAE4qC,EAAE,KAAKyB,EAAE5sC,MAAMynB,IAAIlnB,EAAE6qC,EAAE,EAAE7qC,EAAE4qC,EAAE,EAAE,KAAKyB,EAAE5sC,MAAMyK,KAAKlK,EAAE4qC,EAAE,EAAE5qC,EAAE6qC,EAAE,EAAE,KAAKwB,EAAEzwC,UAAU,yBAAyB,GAAGwlB,EAAE,QAAQ,QAAQ,IAAIne,EAAE/C,SAASkiB,cAAc,OAAO,OAAOnf,EAAErH,UAAUmM,EAAE9E,EAAEsc,UAAUwB,EAAEsrB,EAAE9pB,YAAYtf,GAAGs+B,EAAEhf,YAAY8pB,GAAG9K,GAAGx5B,EAAEukC,UAAUiW,OAAO,SAASrnD,GAAGpB,KAAKqzE,gBAAgB,IAAIntE,EAAE9E,EAAEyoD,QAAQviC,EAAElmB,EAAEk5D,UAAU,GAAGp0D,EAAE08B,UAAU,SAAS,CAAC,IAAI30B,EAAE7M,EAAEs5D,gBAAgBx0D,EAAE08B,UAAU,gBAAgB5iC,KAAKizE,WAAWhsD,EAAEhZ,GAAGjO,KAAKizE,WAAWttE,MAAM+mC,SAASxmC,EAAE08B,UAAU,eAAe,EAAE,MAAS2P,EAAEnsC,SAASkiB,cAAc,QAASxmB,UAAU,8BAA8BywC,EAAE9sB,UAAUvf,EAAE08B,UAAU,SAAS5iC,KAAKizE,WAAWxqD,YAAY8pB,GAAGjrB,EAAEmB,YAAYzoB,KAAKizE,YAAY,GAAG/sE,EAAE08B,UAAU,UAAU,CAAC,IAAqJ2P,EAAjJppC,EAAE/H,EAAEu5D,mBAAmBz0D,EAAE08B,UAAU,iBAAiB5iC,KAAKkzE,YAAYjsD,EAAE9d,GAAGnJ,KAAKkzE,YAAYvtE,MAAM+mC,SAASxmC,EAAE08B,UAAU,gBAAgB,EAAE,MAAS2P,EAAEnsC,SAASkiB,cAAc,QAASxmB,UAAU,+BAA+BywC,EAAE9sB,UAAUvf,EAAE08B,UAAU,UAAU5iC,KAAKkzE,YAAYzqD,YAAY8pB,GAAGjrB,EAAEmB,YAAYzoB,KAAKkzE,aAAa,GAAGhtE,EAAE08B,UAAU,UAAU,CAAC,IAAIiiB,EAAEzjD,EAAEo5D,iBAAiB,GAAGx6D,KAAKmzE,YAAY1rC,EAAEvhC,EAAE2+C,EAAE,EAAE,+BAA+B3+C,EAAE08B,UAAU,WAAWtb,EAAEmB,YAAYzoB,KAAKmzE,aAAa,GAAGjtE,EAAE08B,UAAU,YAAY,GAAG18B,EAAEm4D,UAAU,CAAC,IAAIttB,EAAE3vC,EAAEq5D,kBAAkB,GAAGz6D,KAAKozE,aAAa3rC,EAAEvhC,EAAE6qC,EAAE,EAAE,gCAAgC7qC,EAAE08B,UAAU,YAAYtb,EAAEmB,YAAYzoB,KAAKozE,gBAAgBnlE,EAAEukC,UAAU2/B,aAAa,SAAS/wE,GAAG,IAAI8E,EAAE9E,EAAEyoD,QAAQ7pD,KAAKizE,aAAajzE,KAAKizE,WAAWpxE,SAAS,GAAG4jB,UAAUvf,EAAE08B,UAAU,UAAU5iC,KAAKkzE,cAAclzE,KAAKkzE,YAAYrxE,SAAS,GAAG4jB,UAAUvf,EAAE08B,UAAU,WAAW5iC,KAAKmzE,cAAcnzE,KAAKmzE,YAAYtxE,SAAS,GAAGA,SAAS,GAAG4jB,UAAUvf,EAAE08B,UAAU,WAAW5iC,KAAKozE,eAAepzE,KAAKozE,aAAavxE,SAAS,GAAGA,SAAS,GAAG4jB,UAAUvf,EAAE08B,UAAU,aAAa30B,EAAEukC,UAAU0/B,WAAW,aAAajkE,EAAEukC,UAAUzR,QAAQ,WAAW/gC,KAAKqzE,iBAAiB/rD,EAAEsL,QAAQ3kB,EAAE/H,EAAEy+C,QAAQr9B,EAAEsL,SAAS,IAAI0gD,GAAG,CAAC,SAASlyE,EAAE8E,EAAEohB,GAAG,aAAavmB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAAIiP,EAAE,aAAaA,EAAEukC,UAAU/wC,SAAS,WAAW,MAAM,mBAAmBwM,EAAEukC,UAAUm3B,SAAS,SAASvoE,GAAG,MAAM,CAAC2xE,cAAc/yE,KAAK+yE,gBAAgB9kE,EAAEukC,UAAUugC,cAAc,SAAS3xE,GAAG,SAAS8E,EAAE9E,GAAG,OAAO/E,KAAKgpB,MAAMjkB,GAAG,GAAG,SAASkmB,EAAElmB,GAAG,OAAO/E,KAAKgpB,MAAMjkB,GAAG,GAAG,IAAI6M,EAAEgZ,EAAIsrB,EAAEppC,EAAE/H,EAAEyoD,QAAQhF,EAAEzjD,EAAE8oD,eAAenZ,EAAE5nC,EAAEgqD,QAAQnO,EAAE5jD,EAAEyoD,QAAQyK,SAASxL,KAAK,GAAG3/C,EAAE8qD,iBAAiB,WAAW,KAAK,CAAC,IAAI,IAAIpsC,EAAE,CAAC,IAAI,MAAMF,EAAE,GAAGxD,EAAE,GAAGghC,EAAE,GAAGC,EAAE,GAAGF,EAAE,GAAGzd,EAAE,EAAEA,EAAE5f,EAAEpb,OAAOg7B,IAAI0d,EAAE1d,GAAGt+B,EAAE8qD,iBAAiB,WAAWpsC,EAAE4f,IAAI0d,EAAE1d,KAAK9f,EAAE8f,GAAGt+B,EAAE8qD,iBAAiB,gBAAgBpsC,EAAE4f,IAAItjB,EAAEsjB,GAAGt+B,EAAE8qD,iBAAiB,gBAAgBpsC,EAAE4f,IAAIyd,EAAEzd,GAAGt+B,EAAE8qD,iBAAiB,kBAAkBpsC,EAAE4f,IAAI2d,EAAE3d,GAAGyd,EAAEzd,IAAIyd,EAAEzd,GAAGh7B,QAAQ,GAAG8lC,EAAExB,EAAEyrB,OAAO3X,EAAExpB,OAAOkX,EAAEvxC,SAAQ,SAASI,GAAG,GAAGA,EAAEm7D,SAAS,CAAC,IAAI90B,EAAErmC,EAAEmqD,KAAKpG,EAAE1d,KAAKod,EAAExpB,OAAO+pB,EAAE3d,IAAIod,EAAEsF,aAAatF,EAAEsF,YAAYjF,EAAEzd,IAAIod,EAAE0F,YAAY5iC,EAAE8f,GAAGod,EAAE2F,UAAUrmC,EAAEsjB,GAAGx5B,EAAE/H,EAAE8+C,EAAEhpD,GAAGirB,EAAEK,EAAE09B,EAAEh+B,EAAE5lB,EAAEk7D,IAAItX,EAAEjU,GAAG8T,EAAEoE,YAAYpE,EAAEgG,OAAO58C,EAAEgZ,GAAG49B,EAAEiG,OAAO78C,EAAE+2C,EAAElU,EAAE7pB,GAAG49B,EAAEsG,SAAStG,EAAEyF,eAAczF,EAAEyF,UAAanhD,EAAE8qD,iBAAiB,WAAW,OAAM1hB,EAAExB,EAAEorB,OAAOtX,EAAExpB,OAAW6pB,EAAE/7C,EAAE8qD,iBAAiB,kBAAkB,MAAK7O,EAAEF,GAAGA,EAAEz4C,QAAQ,IAAKo4C,EAAEsF,aAAatF,EAAEsF,YAAYjF,GAAGL,EAAE0F,YAAYphD,EAAE8qD,iBAAiB,gBAAgB,KAAKpP,EAAE2F,UAAUrhD,EAAE8qD,iBAAiB,gBAAgB,KAAK1hB,EAAEvxC,SAAQ,SAASI,GAAGA,EAAEm7D,WAAWtuD,EAAE/H,EAAE8+C,EAAEhpD,EAAEoF,EAAEk7D,IAAItX,EAAElU,GAAG7pB,EAAEK,EAAE09B,EAAEh+B,EAAEg+B,EAAEjU,GAAG8T,EAAEoE,YAAYpE,EAAEgG,OAAO58C,EAAEgZ,GAAG49B,EAAEiG,OAAO78C,EAAE+2C,EAAEh+B,GAAG69B,EAAE6H,YAAY7H,EAAEsG,aAAY/F,GAAGP,EAAEsF,aAAatF,EAAEsF,YAAY,IAAItF,EAAEyF,YAAYr8C,EAAEukC,UAAUzR,QAAQ,aAAazZ,EAAEsL,QAAQ3kB,EAAE/H,EAAEy+C,QAAQr9B,EAAEsL,SAAS,IAAI2gD,GAAG,CAAC,SAASnyE,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,EAAE7M,EAAE8E,EAAEohB,GAAG,IAAIlmB,GAAGA,EAAEqL,QAAQ,EAAE,MAAM,gEAAgEvG,EAAE,YAAY,IAAI+H,EAAEgZ,EAAEwgB,EAAIt+B,EAAE07C,EAAE,EAAE9T,EAAE,EAAEiU,EAAE,GAAG,IAAI/2C,EAAE,EAAEA,GAAG7M,EAAEqL,OAAOwB,IAAI42C,GAAGzjD,EAAE6M,EAAE7M,EAAEqL,QAAQ,IAAItD,EAAE9M,KAAKG,MAAM8qB,GAAGu9B,EAAEzjD,EAAE,MAAM,EAAE,CAAC,IAAI6M,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAI+2C,EAAE/2C,GAAG7M,EAAE6M,GAAGqZ,EAAEypB,EAAEiU,EAAEv4C,WAAW,CAAC,IAAItD,EAAE,EAAE8E,EAAE,EAAEA,EAAE7M,EAAEqL,OAAOwB,IAAI+2C,EAAE/2C,GAAG7M,EAAE6M,GAAG42C,EAAE9T,EAAEiU,EAAEv4C,OAAO,EAAE,IAAIob,EAAE,GAAG,IAAIZ,EAAE,EAAEA,EAAE9d,EAAE8d,IAAI,IAAIhZ,EAAE,EAAEA,EAAE8iC,EAAE9iC,GAAG,EAAEw5B,EAAEud,EAAE/2C,EAAE+2C,EAAEv4C,QAAyCob,GAAG,0DAAlC5Z,EAAE7M,EAAEqL,OAAOu4C,GAAG/2C,EAAE,GAAG+2C,EAAEv4C,QAAQ,GAAgE,qBAAqBg7B,EAAE,cAAc,OAAO5f,EAAE9mB,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA4ByoC,EAAE,SAASrmC,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAxB9E,EAAE,qBAAiLmxC,EAAE,WAAWvyC,KAAKwzE,YAAY,KAAKxzE,KAAKyzE,mBAAkB,GAAIlhC,EAAEC,UAAU/wC,SAAS,WAAW,MAAM,iBAAiB8wC,EAAEC,UAAUm3B,SAAS,SAASvoE,GAAG,IAAI8E,EAAEohB,EAAElmB,EAAEwhC,UAAU,aAAa,OAAOtb,GAAG,OAAOA,EAAEphB,EAAE,iBAAiBohB,GAAGA,aAAaiqB,OAAOnrC,SAASof,eAAe8B,GAAGA,IAAGphB,EAAEE,SAASkiB,cAAc,QAASxmB,UAAU,iBAAiBV,EAAEm5D,SAAS9xC,YAAYviB,GAAGlG,KAAKyzE,mBAAkB,GAAIzzE,KAAKwzE,YAAYttE,EAAElG,KAAK0zE,cAAc,GAAG,CAACtjC,OAAOpwC,KAAKowC,OAAOujC,SAAS3zE,KAAK2zE,SAASC,QAAQ5zE,KAAK4zE,QAAQzB,aAAanyE,KAAKmyE,eAAe,IAA2LttB,EAAE,SAASzjD,GAAG,OAAOA,EAAEgI,QAAQ,KAAK,SAASA,QAAQ,KAAK,UAAUA,QAAQ,KAAK,QAAQA,QAAQ,KAAK,SAASmpC,EAAEC,UAAUpC,OAAO,SAAShvC,GAAG,IAAI8E,EAAE9E,EAAEgtE,UAAU9mD,EAAElmB,EAAEitE,eAAepgE,EAAE7M,EAAE+sE,YAAYlnD,EAAE7lB,EAAEyoD,QAAQjnB,UAAU,UAAU,GAAG,UAAU3b,EAAb,CAAkE,GAAG,WAAWA,EAAE,CAAC,IAAIwgB,EAAErmC,EAAEyoD,QAAQyK,SAASxL,KAAK3/C,EAAEnJ,KAAKwzE,YAAY9a,YAAY7T,EAAEzjD,EAAEyoD,QAAQoK,iBAAiB,iBAAiB,KAAKljB,EAAEzpB,EAAE,GAAGtrB,EAAEyrC,EAAEqJ,EAAE,GAAGkU,EAAE19B,EAAE,GAAGN,EAAEygB,EAAEsJ,EAAE,GAAGA,EAAE5nC,EAAE,EAAEs+B,EAAEqJ,IAAIC,EAAEA,EAAE,IAAI5nC,GAAG07C,EAAEpd,EAAEzrC,IAAIoF,EAAEyoD,QAAQ0Q,SAAS9xC,YAAYzoB,KAAKwzE,aAAaxzE,KAAKwzE,YAAY7tE,MAAMyK,KAAKy0C,EAAE9T,EAAE,KAAK/wC,KAAKwzE,YAAY7tE,MAAMynB,IAAI43B,EAAE,KAAK,IAAIn9B,EAAE0qB,EAAEshC,mBAAmBzyE,EAAEyoD,QAAQ3jD,EAAEohB,EAAEtnB,KAAK0zE,cAAczlE,GAAGjO,KAAKwzE,YAAY/tD,UAAUoC,EAAE7nB,KAAKwzE,YAAY7tE,MAAM80B,QAAQ,QAA1dz6B,KAAKwzE,YAAY7tE,MAAM80B,QAAQ,QAA+b8X,EAAEC,UAAUmhC,SAAS,SAASvyE,GAAG,WAAWA,EAAEyoD,QAAQjnB,UAAU,YAAY5iC,KAAKwzE,YAAY7tE,MAAM80B,QAAQ,QAAQ,IAAIv0B,EAAxgC,SAAS9E,GAAG,IAAI8E,EAAEE,SAASkiB,cAAc,QAAQpiB,EAAEqiB,aAAa,QAAQ,6CAA6CnnB,EAAEqnB,YAAYviB,GAAG,IAAIohB,EAAEphB,EAAEwyD,YAAY,OAAOt3D,EAAEunB,YAAYziB,GAAGohB,EAAw1Bne,CAAEnJ,KAAKwzE,aAAaxzE,KAAK0zE,cAAcxtE,EAAE,IAAIohB,EAAEirB,EAAEshC,mBAAmBzyE,EAAEyoD,aAAQ,OAAO,EAAO3jD,EAAE,MAAMlG,KAAKwzE,YAAY/tD,UAAU6B,GAAGirB,EAAEC,UAAU2/B,aAAa,SAAS/wE,GAAGpB,KAAK2zE,SAASvyE,IAAImxC,EAAEC,UAAUohC,QAAQ,SAASxyE,GAAG,GAAGpB,KAAKyzE,kBAAkB,CAACryE,EAAEyoD,QAAQ0Q,SAAS9xC,YAAYzoB,KAAKwzE,aAAa,IAAIttE,EAAE9E,EAAEyoD,QAAQmN,UAAU1vC,EAAEtnB,KAAKwzE,YAAY9a,YAAY14D,KAAKwzE,YAAY7tE,MAAMyK,KAAKlK,EAAElK,EAAEkK,EAAE4qC,EAAExpB,EAAE,EAAE,KAAKtnB,KAAKwzE,YAAY7tE,MAAMynB,IAAIlnB,EAAE8gB,EAAE,OAAOurB,EAAEC,UAAUzR,QAAQ,WAAW/gC,KAAKwzE,YAAY,MAAMjhC,EAAEshC,mBAAmB,SAASzyE,EAAE8E,EAAEohB,EAAEL,EAAE9d,GAAG,IAAI4nC,EAAE,CAAC8Y,QAAQzoD,EAAEpF,EAAEkK,EAAEw2D,OAAO,IAAI1X,EAAE,GAAGn9B,EAAEzmB,EAAE2rD,YAAY,GAAGllC,EAAE,IAAI,IAAIF,EAAE,EAAEA,EAAEE,EAAEpb,OAAOkb,IAAI,CAAC,IAAIxD,EAAE/iB,EAAEgrE,uBAAuBvkD,EAAEF,IAAwCy9B,EAAE,CAAC0uB,SAAS7lE,EAA9C7M,EAAEwhC,UAAU,gBAAgB/a,EAAEF,IAAoBxD,EAAEsoB,MAAMxlB,GAAGwjB,MAAM5iB,EAAEF,GAAGosD,UAAUlvB,EAAEh9B,EAAEF,IAAIqsD,UAAU7vD,EAAEkS,QAAQoW,MAAMtoB,EAAEsoB,OAAOsE,EAAE2rB,OAAO5vC,KAAKs4B,GAAGJ,EAAEn9B,EAAEF,IAAIy9B,EAAE,QAAG,IAASl/C,EAAE,CAAC,IAAIg/C,EAAE9jD,EAAEipE,oBAAoB,KAAKzpB,EAAEsE,EAAE,kBAAkBnU,EAAEkjC,MAAMrzB,EAAEtO,KAAKlxC,EAAE8E,EAAEg/C,EAAEr9B,EAAE,GAAGzmB,EAAE+H,EAAE,GAAO,IAAI6d,EAAE,GAAGhrB,EAAEoF,EAAEi9D,UAAjB,IAA2B12C,EAAE,EAAEA,EAAE3rB,EAAE2rB,IAAIX,EAAEW,GAAGvmB,EAAEipE,oBAAoB,KAAK1iD,EAAE,EAAEA,EAAE,KAAK,IAAIP,EAAEhmB,EAAEwhC,UAAU,wBAAwBrb,EAAEnmB,EAAEg9D,qBAAqB,IAAIz2C,EAAE,EAAEA,EAAEL,EAAE7a,OAAOkb,IAAI,CAAC,IAAImpB,EAAExpB,EAAEK,GAAe,IAAZy9B,EAAEJ,EAAElU,EAAE5vB,OAAW8F,EAAE8pB,EAAEoX,KAAK,IAAIpX,EAAEoX,OAAO9gC,GAAG6oB,MAAMa,EAAEsX,SAAShD,EAAE4uB,WAAU,MAAO,CAAC,IAAuC9mB,EAAElmC,GAArC7C,EAAE/iB,EAAEgrE,uBAAuBt7B,EAAE5vB,OAAYqqC,KAAK,GAAyB+B,EAApBJ,EAAE,kBAAsB5a,KAAKlxC,EAAE0vC,EAAEoX,KAAKgF,EAAEpc,EAAE5vB,KAAK9f,EAAE+H,EAAE0e,EAAEoB,QAAQ6nB,EAAE5vB,OAAOumB,EAAEI,OAAOud,EAAE,CAAC8uB,MAAM5mB,IAAIxc,EAAE5vB,MAAMqG,IAAI69B,EAAE+uB,eAAc,KAAM,OAAO/yE,EAAEwhC,UAAU,oBAAoB2P,EAAE6hC,kBAAkB9hC,KAAKlxC,EAAE2vC,IAAIwB,EAAE6hC,iBAAiB,SAAShzE,GAAG,IAAI8E,EAAE9E,EAAEyoD,QAAQ,IAAG,IAAK3jD,EAAE08B,UAAU,yBAAyB,MAAM,GAAG,IAAItb,EAAErZ,EAAE/H,EAAE08B,UAAU,uBAAuB,QAAG,IAASxhC,EAAEpF,EAAE,CAAC,GAAG,UAAUkK,EAAE08B,UAAU,UAAU,MAAM,GAAGtb,EAAE,GAAG,IAAI,IAAIL,EAAE,EAAEA,EAAE7lB,EAAEs7D,OAAOjwD,OAAOwa,KAASwgB,EAAErmC,EAAEs7D,OAAOz1C,IAAK+sD,YAAY,KAAK1sD,IAAIA,GAAGrZ,EAAE,QAAQ,KAAKqZ,GAAG,0CAA0CmgB,EAAEgF,MAAM,MAAMhF,EAAEqsC,SAAS,IAAIrsC,EAAEssC,UAAU,WAAW,OAAOzsD,EAAgB,IAAdA,EAAElmB,EAAE6yE,MAAM,IAAYhtD,EAAE,EAAEA,EAAE7lB,EAAEs7D,OAAOjwD,OAAOwa,IAAI,CAAC,IAAIwgB,KAAErmC,EAAEs7D,OAAOz1C,IAAQ+sD,YAAW/lE,IAAIqZ,GAAG,QAAQA,GAAG,SAASmgB,EAAE0sC,cAAc,qBAAqB,IAAI,4BAA4B1sC,EAAEgF,MAAM,MAAMhF,EAAEssC,UAAU,qBAAqBtsC,EAAEysC,MAAM,WAAW,OAAO5sD,GAAGA,EAAEsL,QAAQ2f,EAAErsC,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,mBAAmB,KAAKyhD,GAAG,CAAC,SAASjzE,EAAE8E,EAAEohB,GAAG,aAAa,SAASrZ,EAAE7M,GAAG,OAAOA,GAAGA,EAAEwlD,WAAWxlD,EAAE,CAACwxB,QAAQxxB,GAAGL,OAAO4lD,eAAer/B,EAAE,aAAa,CAACtoB,OAAM,IAAK,IAA4ByoC,EAAE,SAASrmC,GAAG,GAAGA,GAAGA,EAAEwlD,WAAW,OAAOxlD,EAAE,IAAI8E,EAAE,GAAG,GAAG,MAAM9E,EAAE,IAAI,IAAIkmB,KAAKlmB,EAAEL,OAAOyxC,UAAU3lB,eAAeylB,KAAKlxC,EAAEkmB,KAAKphB,EAAEohB,GAAGlmB,EAAEkmB,IAAI,OAAOphB,EAAE0sB,QAAQxxB,EAAE8E,EAArJ,CAAxB9E,EAAE,qBAAqN+H,EAAE8E,EAApC7M,EAAE,iCAA6D2vC,EAAE9iC,EAAtB7M,EAAE,mBAAyB4jD,EAAE,WAAWhlD,KAAKs0E,mBAAmB,oBAAoBC,WAAWv0E,KAAKw0E,gBAAgB,mBAAmBlhE,KAAK2uC,UAAUwyB,YAAYz0E,KAAK00E,mBAAkB,GAAI1vB,EAAExS,UAAU/wC,SAAS,WAAW,MAAM,wBAAwBujD,EAAExS,UAAUm3B,SAAS,SAASvoE,GAAG,OAAOpB,KAAKwoD,SAASpnD,EAAEpB,KAAK20E,WAAW,sBAAsB30E,KAAKupE,mBAAmB,CAAC9gB,OAAOzoD,KAAK40E,cAAchB,QAAQ5zE,KAAK60E,mBAAmB1C,aAAanyE,KAAK80E,0BAA0B9vB,EAAExS,UAAUzR,QAAQ,WAAW/gC,KAAK+0E,UAAU,KAAK/0E,KAAKg1E,UAAU,KAAKh1E,KAAKi1E,gBAAgB,KAAKj1E,KAAKk1E,iBAAiB,MAAMlwB,EAAExS,UAAUmiC,WAAW,SAASvzE,EAAE8E,GAAG,OAAOlG,KAAKwoD,SAAS5lB,UAAUxhC,EAAE8E,IAAI8+C,EAAExS,UAAU2iC,kBAAkB,SAAS/zE,EAAE8E,GAAGlG,KAAKwoD,SAAS8U,OAAOl8D,GAAG8E,GAAG8+C,EAAExS,UAAU+2B,iBAAiB,WAAWvpE,KAAKo1E,kBAAkBp1E,KAAKq1E,qBAAqBr1E,KAAKs1E,mBAAmBt1E,KAAK20E,WAAW,mBAAmBpzE,QAAQoN,KAAK,kFAAkF3O,KAAKwoD,SAAS8nB,cAAc,CAACrf,eAAc,IAAI,IAAKjxD,KAAK00E,mBAAkB,EAAG10E,KAAKu1E,eAAevwB,EAAExS,UAAU+iC,YAAY,WAAW,IAAIn0E,EAAEpB,KAAKw1E,UAAUx1E,KAAKwoD,SAAS+R,SAASn5D,EAAEqnB,YAAYzoB,KAAK+0E,WAAW3zE,EAAEqnB,YAAYzoB,KAAKg1E,WAAW5zE,EAAEqnB,YAAYzoB,KAAKi1E,iBAAiB7zE,EAAEqnB,YAAYzoB,KAAKk1E,mBAAmBlwB,EAAExS,UAAUijC,iBAAiB,WAAW,IAAIr0E,EAAEpB,KAAKw1E,UAAUp0E,EAAEunB,YAAY3oB,KAAK+0E,WAAW3zE,EAAEunB,YAAY3oB,KAAKg1E,WAAW5zE,EAAEunB,YAAY3oB,KAAKi1E,iBAAiB7zE,EAAEunB,YAAY3oB,KAAKk1E,kBAAkBl1E,KAAKw1E,UAAU,MAAMxwB,EAAExS,UAAUoiC,cAAc,SAASxzE,GAAGpB,KAAK20E,WAAW,sBAAsBvzE,EAAEu5D,mBAAmB36D,KAAK20E,WAAW,uBAAuB,IAAI3vB,EAAExS,UAAUqiC,mBAAmB,WAAW70E,KAAK01E,sBAAsB11E,KAAK21E,UAAU31E,KAAK41E,qBAAqB5wB,EAAExS,UAAUsiC,wBAAwB,WAAW90E,KAAK01E,sBAAsB11E,KAAK61E,mBAAmB71E,KAAK81E,oBAAoB91E,KAAK+1E,0BAA0B/wB,EAAExS,UAAUkjC,kBAAkB,WAAW,IAAIt0E,EAAEpB,KAAK20E,WAAW,qBAAqB,GAAGvzE,EAAEpB,KAAK00E,kBAAkB10E,KAAKw1E,WAAWx1E,KAAKw1E,UAAU72B,YAAY3+C,KAAKu1E,cAAcv1E,KAAKupE,wBAAwB,GAAGvpE,KAAKw1E,UAAU,CAACx1E,KAAKy1E,mBAAmB,IAAIvvE,EAAElG,KAAKwoD,SAAS9/B,YAAW,WAAWxiB,EAAE0iD,OAAO,EAAE1iD,EAAEuK,WAAU,GAAG,OAAOrP,GAAG4jD,EAAExS,UAAUmjC,QAAQ,WAAW,SAASv0E,EAAEA,EAAE8E,EAAEohB,EAAErZ,GAAG,IAAIgZ,EAAEhZ,GAAGw5B,EAAE47B,qBAAqBn9D,GAAG9E,EAAEuE,MAAMynB,IAAI9F,EAAEN,EAAE,KAAK5lB,EAAEuE,MAAMyK,KAAKkX,EAAEtrB,EAAE,KAAKoF,EAAE6S,MAAMqT,EAAEwpB,EAAE7pB,EAAE7lB,EAAEwE,OAAO0hB,EAAEypB,EAAE9pB,EAAE7lB,EAAEuE,MAAMsO,MAAMqT,EAAEwpB,EAAE,KAAK1vC,EAAEuE,MAAMC,OAAO0hB,EAAEypB,EAAE,KAAK,GAAG9pB,GAAG/gB,EAAEg7B,MAAMja,EAAEA,GAAG,IAAI/gB,EAAElG,KAAKwoD,SAAS2K,QAAQpK,cAAczhC,EAAE,EAAEtnB,KAAKwoD,SAASyL,iBAAiB,WAAW,OAAO3sC,EAAEtnB,KAAK20E,WAAW,gBAAgB30E,KAAK20E,WAAW,qBAAqB,EAAE30E,KAAK20E,WAAW,iBAAiB30E,KAAKg2E,YAAY,CAACh6E,EAAEkK,EAAElK,EAAEgrB,EAAE9gB,EAAE8gB,EAAE9gB,EAAE6qC,EAAEzpB,EAAE,EAAEwpB,EAAE5qC,EAAE4qC,EAAEC,EAAE/wC,KAAK20E,WAAW,wBAAwB,IAAI1mE,EAAEjO,KAAKwoD,SAAS6D,iBAAiB,cAAcjrD,EAAEpB,KAAK+0E,UAAU/0E,KAAKi2E,cAAcj2E,KAAKg2E,YAAY/nE,GAAG7M,EAAEpB,KAAKg1E,UAAUh1E,KAAKk2E,cAAcl2E,KAAKg2E,YAAY/nE,IAAI+2C,EAAExS,UAAU4iC,gBAAgB,WAAWp1E,KAAK+0E,UAAUttC,EAAE27B,eAAepjE,KAAK+0E,UAAUjzE,UAAU,4BAA4B9B,KAAK+0E,UAAUpvE,MAAMk/B,SAAS,WAAW7kC,KAAK+0E,UAAUpvE,MAAMqsE,OAAO,EAAEhyE,KAAKi2E,cAAcxuC,EAAE28B,WAAWpkE,KAAK+0E,WAAW/0E,KAAKg1E,UAAUvtC,EAAE27B,eAAepjE,KAAKg1E,UAAUlzE,UAAU,4BAA4B9B,KAAKg1E,UAAUrvE,MAAMk/B,SAAS,WAAW7kC,KAAKg1E,UAAUrvE,MAAMqsE,OAAO,EAAEhyE,KAAKg1E,UAAUrvE,MAAMwwE,OAAO,UAAUn2E,KAAKk2E,cAAczuC,EAAE28B,WAAWpkE,KAAKg1E,YAAYhwB,EAAExS,UAAU6iC,mBAAmB,WAAW,IAAIj0E,EAAE,IAAIg1E,MAAMh1E,EAAEU,UAAU,8BAA8BV,EAAEuE,MAAMk/B,SAAS,WAAWzjC,EAAEuE,MAAMqsE,OAAO,GAAG5wE,EAAEuE,MAAMqnD,WAAW,SAAS5rD,EAAEuE,MAAMwwE,OAAO,aAAa/0E,EAAE6S,MAAM,EAAE7S,EAAEwE,OAAO,GAAGxE,EAAEqxC,IAAI,iYAAiYzyC,KAAKw0E,kBAAkBpzE,EAAE6S,OAAO,EAAE7S,EAAEwE,QAAQ,GAAG5F,KAAKi1E,gBAAgB7zE,EAAEpB,KAAKk1E,iBAAiB9zE,EAAEi1E,WAAU,IAAKrxB,EAAExS,UAAU8iC,iBAAiB,WAAW,IAAIl0E,EAAE8E,EAAEohB,EAAErZ,EAAEgZ,EAAEsrB,EAAEsS,EAAEG,EAAEn9B,EAAEF,EAAExD,EAAEghC,EAAEC,EAAEF,EAAEtE,EAAE5gD,KAAKgnB,EAAE5gB,SAASpK,EAAE,EAAEorB,EAAE,KAAKG,GAAE,EAAGupB,GAAE,EAAGoc,GAAGltD,KAAKw0E,gBAAgBnnB,EAAE,IAAItc,EAAEne,QAAQxxB,EAAE,SAASA,GAAG,IAAI8E,EAAE06C,EAAE4H,SAAS+L,gBAAgBjtC,GAAGphB,EAAE,GAAGA,EAAE,IAAI06C,EAAEo1B,YAAYllC,EAAE,MAAM,CAAC5qC,EAAE,IAAI9E,EAAEk1E,cAAc11B,EAAEo1B,YAAYh6E,GAAGsrB,EAAEphB,EAAE,IAAI9E,EAAEm1E,eAAe31B,EAAEo1B,YAAYh6E,GAAGsrB,IAAIphB,EAAE,SAAS9E,GAAG,OAAOqmC,EAAEs6B,YAAY3gE,GAAGmmB,GAAE,EAAGvrB,EAAEoF,EAAEo1E,QAAQpvD,EAAEhmB,EAAE+yB,OAAO/yB,EAAE+yB,OAAO/yB,EAAEq1E,WAAW,cAAcr1E,EAAEY,MAAM,cAAcZ,EAAEY,OAAOylC,EAAEs9B,SAAS/9C,EAAE,YAAYM,GAAGmgB,EAAEs9B,SAAS/9C,EAAE,UAAU/Y,IAAI2yC,EAAEo0B,UAAUrvE,MAAMwwE,OAAO,aAAa9oB,EAAEqf,SAAQ,GAAIplD,EAAE,SAASlmB,GAAG,IAAImmB,EAAE,OAAM,EAAGkgB,EAAEs6B,YAAY3gE,GAAG,IAAI8E,EAAE9E,EAAEo1E,QAAQx6E,EAAE,GAAGK,KAAKC,IAAI4J,GAAG,EAAE,OAAM,EAAGlK,EAAEoF,EAAEo1E,QAAQ,IAAIlvD,EAAErZ,EAAE2yC,EAAE81B,uBAAuBtvD,GAAGw5B,EAAEq0B,iBAAiB3tD,EAAErZ,EAAEqoE,cAAcpwE,EAAEohB,EAAEjrB,KAAK8D,IAAImnB,EAAErZ,EAAEsoE,eAAenvD,EAAEnT,MAAM,GAAGqT,EAAEjrB,KAAKqD,IAAI4nB,EAAEs5B,EAAEo1B,YAAYh6E,KAAKsrB,EAAErZ,EAAEsoE,eAAerwE,EAAEohB,EAAEjrB,KAAK8D,IAAImnB,EAAEs5B,EAAEo1B,YAAYh6E,EAAE4kD,EAAEo1B,YAAYllC,GAAGxpB,EAAEjrB,KAAKqD,IAAI4nB,EAAErZ,EAAEqoE,cAAclvD,EAAEnT,MAAM,IAAI,IAAIs+B,EAAEnrB,EAAEnT,MAAM,EAAE,OAAOmT,EAAEzhB,MAAMyK,KAAKkX,EAAEirB,EAAE,KAAKqO,EAAEm1B,wBAAwB7oB,GAAGjmC,KAAI,GAAIhZ,EAAE,SAAS7M,GAAG,QAAQmmB,IAAIA,GAAE,EAAG8lC,EAAEwf,UAAUplC,EAAE2xB,YAAYpyC,EAAE,YAAYM,GAAGmgB,EAAE2xB,YAAYpyC,EAAE,UAAU/Y,GAAG2yC,EAAEo0B,UAAUrvE,MAAMwwE,OAAO,UAAUjpB,GAAGjmC,KAAI,IAAKA,EAAE,WAAW,IAAI,IAAI/gB,EAAE06C,EAAE81B,uBAAuB,GAAG91B,EAAEi1B,kBAAiB,EAAG3vE,EAAEgkE,SAAS,CAAC,IAAI5iD,EAAElmB,EAAE8E,GAAG06C,EAAE4H,SAASwkB,cAAc1lD,EAAE,GAAGA,EAAE,SAASs5B,EAAE4H,SAAS+P,YAAvI,QAA2J3X,EAAEi1B,kBAAiB,IAAKtjC,EAAE,SAASnxC,GAAG,IAAI8E,EAAE06C,EAAEq0B,gBAAgBlpC,wBAAwBzkB,EAAEphB,EAAEkK,KAAKlK,EAAE+N,MAAM,EAAmDhG,GAAjD/H,EAAE06C,EAAEs0B,iBAAiBnpC,yBAAgC37B,KAAKlK,EAAE+N,MAAM,EAAE,OAAO7S,EAAEo1E,QAAQlvD,GAAGlmB,EAAEo1E,QAAQvoE,GAAG42C,EAAE,SAASzjD,GAAG,QAAQ0vC,IAAIyB,EAAEnxC,KAAKw/C,EAAE81B,uBAAuBxM,YAAYziC,EAAEs6B,YAAY3gE,GAAG0vC,GAAE,EAAG90C,EAAEoF,EAAEo1E,QAAQ,cAAcp1E,EAAEY,OAAOylC,EAAEs9B,SAAS/9C,EAAE,YAAYg+B,GAAGvd,EAAEs9B,SAAS/9C,EAAE,UAAUa,KAAI,IAAKm9B,EAAE,SAAS5jD,GAAG,IAAI0vC,EAAE,OAAM,EAAGrJ,EAAEs6B,YAAY3gE,GAAG,IAAI8E,EAAE9E,EAAEo1E,QAAQx6E,EAAE,GAAGK,KAAKC,IAAI4J,GAAG,EAAE,OAAM,EAAGlK,EAAEoF,EAAEo1E,QAAQ,IAAIlvD,EAAEs5B,EAAE81B,uBAAuBzoE,EAAEqZ,EAAEgvD,cAAcrvD,EAAEK,EAAEivD,eAAehkC,EAAEtrB,EAAEhZ,EAAEA,EAAE/H,GAAG06C,EAAEo1B,YAAYh6E,EAAqBirB,GAAlBhZ,EAAE2yC,EAAEo1B,YAAYh6E,GAAMu2C,EAAGtrB,EAAE/gB,GAAG06C,EAAEo1B,YAAYh6E,EAAE4kD,EAAEo1B,YAAYllC,EAAqC7iC,GAAlCgZ,EAAE25B,EAAEo1B,YAAYh6E,EAAE4kD,EAAEo1B,YAAYllC,GAAMyB,GAAItkC,GAAG/H,EAAE+gB,GAAG/gB,GAAG,IAAIiD,EAAEy3C,EAAEq0B,gBAAgBhhE,MAAM,EAAE,OAAO2sC,EAAEq0B,gBAAgBtvE,MAAMyK,KAAKnC,EAAE9E,EAAE,KAAKy3C,EAAEs0B,iBAAiBvvE,MAAMyK,KAAK6W,EAAE9d,EAAE,KAAKy3C,EAAEm1B,wBAAwB7oB,GAAGvlC,KAAI,GAAIE,EAAE,SAASzmB,GAAG,QAAQ0vC,IAAIA,GAAE,EAAGrJ,EAAE2xB,YAAYpyC,EAAE,YAAYg+B,GAAGvd,EAAE2xB,YAAYpyC,EAAE,UAAUa,GAAGqlC,GAAGvlC,KAAI,IAAKA,EAAE,WAAW,IAAIi5B,EAAEi1B,kBAAiB,EAAGj1B,EAAE4H,SAAS+M,YAAYn0D,EAAEw/C,EAAE81B,wBAAwB91B,EAAE4H,SAASiN,YAAW,GAAnG,QAA+G7U,EAAEi1B,kBAAiB,IAAK1xD,EAAE,SAAS/iB,GAAG,IAAImmB,IAAIupB,EAAE,CAAC,IAAI5qC,EAAEqsC,EAAEnxC,GAAG,OAAO,UAAU8E,GAAG06C,EAAEo0B,UAAUrvE,MAAMwwE,SAASv1B,EAAEo0B,UAAUrvE,MAAMwwE,OAAOjwE,KAAKi/C,EAAE,SAAS/jD,GAAG,cAAcA,EAAEY,MAAM,GAAGZ,EAAEu1E,cAAclqE,OAAOvG,EAAE9E,EAAEu1E,cAAc,KAAKlvC,EAAEs6B,YAAY3gE,GAAG,aAAaA,EAAEY,MAAM,GAAGZ,EAAEu1E,cAAclqE,OAAO6a,EAAElmB,EAAEu1E,cAAc,KAAKlvC,EAAEs6B,YAAY3gE,GAAG6M,EAAE7M,IAAIgkD,EAAE,SAAShkD,GAAG,cAAcA,EAAEY,MAAM,GAAGZ,EAAEu1E,cAAclqE,OAAOo4C,EAAEzjD,EAAEu1E,cAAc,KAAKlvC,EAAEs6B,YAAY3gE,GAAG,aAAaA,EAAEY,MAAM,GAAGZ,EAAEu1E,cAAclqE,OAAOu4C,EAAE5jD,EAAEu1E,cAAc,KAAKlvC,EAAEs6B,YAAY3gE,GAAGymB,EAAEzmB,IAAI8jD,EAAE,SAAS9jD,EAAE8E,GAAG,IAAI,IAAIohB,EAAE,CAAC,aAAa,WAAW,YAAY,eAAerZ,EAAE,EAAEA,EAAEqZ,EAAE7a,OAAOwB,IAAI2yC,EAAE4H,SAAS2Q,iBAAiB/3D,EAAEkmB,EAAErZ,GAAG/H,IAAIlG,KAAKm1E,kBAAkB,mBAAmBhsE,EAAEypB,QAAQgnC,2BAA2B55D,KAAKm1E,kBAAkB,kBAAkB,MAAM,IAAI7nB,EAAErqD,OAAO2zE,MAAM,YAAY,YAAY52E,KAAKwoD,SAAS2Q,iBAAiBn5D,KAAKi1E,gBAAgB3nB,EAAEpnD,GAAGlG,KAAKwoD,SAAS2Q,iBAAiBn5D,KAAKk1E,iBAAiB5nB,EAAEpnD,GAAGlG,KAAKwoD,SAAS2Q,iBAAiBn5D,KAAKg1E,UAAU,YAAYnwB,GAAG7kD,KAAKwoD,SAAS2Q,iBAAiBn5D,KAAKg1E,UAAU,YAAY7wD,GAAGnkB,KAAKs0E,qBAAqBpvB,EAAEllD,KAAKi1E,gBAAgB9vB,GAAGD,EAAEllD,KAAKk1E,iBAAiB/vB,GAAGD,EAAEllD,KAAKg1E,UAAU5vB,KAAKJ,EAAExS,UAAUojC,iBAAiB,WAAW,IAAIx0E,EAAEpB,KAAKi2E,cAAc70E,EAAEkoD,UAAU,EAAE,EAAEtpD,KAAKg2E,YAAYllC,EAAE9wC,KAAKg2E,YAAYjlC,GAAG,IAAI/wC,KAAK62E,gBAAgB,MAAMz1E,GAAGG,QAAQoN,KAAKvN,GAAGpB,KAAKi2E,cAAczrB,UAAUxqD,KAAK20E,WAAW,oCAAoCvzE,EAAEmpD,YAAYvqD,KAAK20E,WAAW,sCAAsCvzE,EAAE6nD,YAAY7nD,EAAEypD,OAAO,GAAG,IAAIzpD,EAAE0pD,OAAO,GAAG9qD,KAAKg2E,YAAYjlC,EAAE,IAAI3vC,EAAE0pD,OAAO9qD,KAAKg2E,YAAYllC,EAAE,GAAG9wC,KAAKg2E,YAAYjlC,EAAE,IAAI3vC,EAAE0pD,OAAO9qD,KAAKg2E,YAAYllC,EAAE,GAAG,IAAI1vC,EAAE+pD,UAAUnG,EAAExS,UAAUqkC,cAAc,WAAW,IAAIz1E,EAAEpB,KAAK20E,WAAW,8BAA8BzuE,EAAElG,KAAK20E,WAAW,sCAAsCrtD,EAAEtnB,KAAK20E,WAAW,gCAAgC,GAAGvzE,GAAGkmB,EAAE,CAAC,IAAIrZ,EAAEjO,KAAK20E,WAAW,YAAY1tD,EAAEjnB,KAAK82E,kCAAkCrvC,EAAExgB,EAAE8vD,KAAK9vD,EAAE+vD,KAAKzkC,EAAEvyC,KAAKi2E,cAAc9sE,EAAEnJ,KAAKwoD,SAAS+L,gBAAgB1P,EAAExoD,KAAKqD,IAAIyJ,EAAE,GAAGA,EAAE,GAAG,OAAO4nC,GAAG/wC,KAAKg2E,YAAYllC,EAAE,IAAI+T,EAAEG,GAAGhlD,KAAKg2E,YAAYjlC,EAAE,IAAItJ,EAAE5f,EAAE7nB,KAAKg2E,YAAYllC,EAAE,GAAGnpB,EAAE3nB,KAAKg2E,YAAYjlC,EAAE,GAAG5sB,EAAE,KAAKghC,EAAE,KAAK5S,EAAE0W,YAAY1W,EAAEsY,OAAO,GAAGljC,GAAG,IAAI,IAAIy9B,EAAE,EAAEA,EAAEn+B,EAAElb,KAAKU,OAAO24C,IAAI,CAAC,IAAIF,EAAEj+B,EAAElb,KAAKq5C,GAAGxE,EAAE,OAAOsE,EAAE,IAAIA,EAAE,GAAG/7C,EAAE,IAAI4nC,EAAEsW,IAAIrgC,EAAE,OAAOk+B,EAAE,GAAGv9B,GAAGu9B,EAAE,GAAGj+B,EAAE+vD,MAAMhyB,EAAEqC,KAAKp5C,GAAG,OAAOkW,GAAG9nB,KAAKgpB,MAAMu7B,IAAIvkD,KAAKgpB,MAAMlB,MAAMkvB,SAASuN,IAAIvN,SAASrsB,IAAI,OAAO7C,EAAEouB,EAAEuY,OAAOlK,EAAEj5B,GAAG1Z,GAAGskC,EAAEuY,OAAOlK,EAAEuE,GAAG5S,EAAEuY,OAAOlK,EAAE55B,GAAG7C,EAAEy8B,EAAEuE,EAAEn+B,IAAI,OAAO7C,IAAIlW,GAAGskC,EAAEuY,OAAOlK,EAAEuE,GAAG5S,EAAEuY,OAAOlK,EAAEj5B,IAAI4qB,EAAEuY,OAAO3mC,EAAEwD,IAAIxD,EAAEghC,EAAE,OAAO,GAAG5S,EAAEuY,OAAOjjC,EAAEF,GAAG4qB,EAAEma,YAAYtrD,EAAE,CAAC,IAAIpF,EAAEgE,KAAKi2E,cAAcgB,qBAAqB,EAAE,EAAE,EAAEtvD,GAAGzhB,GAAGlK,EAAEk7E,aAAa,EAAEhxE,GAAGlK,EAAEk7E,aAAa,EAAE91E,GAAGpB,KAAKi2E,cAAcxpB,UAAUzwD,EAAEu2C,EAAEoa,OAAOrlC,IAAItnB,KAAKi2E,cAAc1rB,YAAYjjC,EAAEtnB,KAAKi2E,cAAczrB,UAAUxqD,KAAK20E,WAAW,8BAA8BpiC,EAAE4Y,YAAYnG,EAAExS,UAAUskC,gCAAgC,WAAW,IAAI11E,EAAE8E,EAAElG,KAAKwoD,SAASlhC,EAAEtnB,KAAK20E,WAAW,YAAY1mE,EAAE/H,EAAE6kE,aAAa9jD,EAAE/gB,EAAE6mD,YAAYxa,EAAE,IAAIxxB,MAAM9S,GAAG9E,GAAE,EAAG07C,EAAE3+C,EAAE8mD,aAAajc,EAAE,GAAG,IAAI3vC,EAAE,EAAEA,EAAE6M,EAAE7M,IAAI,CAAC,IAAI4jD,EAAEhlD,KAAK20E,WAAW,sBAAsB1tD,EAAE7lB,IAAI2vC,EAAEjkB,KAAKk4B,GAAG,OAAOA,IAAI77C,GAAE,GAAI,GAAGA,EAAE,IAAI/H,EAAE,EAAEA,EAAE6M,EAAE7M,IAAImxC,EAAEnxC,GAAG2vC,EAAE3vC,EAAE,QAAQ,IAAIA,EAAE,EAAEA,EAAE6M,EAAE7M,IAAImxC,EAAEnxC,GAAGyjD,EAAEzjD,EAAE,GAAG,IAAIymB,EAAE,GAAGF,EAAEzhB,EAAEg2D,aAAa/3C,EAAEje,EAAEgvD,YAAY,IAAI9zD,EAAE,EAAEA,EAAE8E,EAAE6kE,aAAa3pE,IAAI,GAAGmxC,EAAEnxC,GAAG,CAAC,IAAI+jD,EAAEx9B,EAAEk/B,cAAc3gD,EAAEqkE,SAASnpE,EAAE+iB,GAAGje,EAAE0iE,aAAa,IAAIzjB,EAAEx9B,EAAEm/B,eAAe3B,EAAEj/C,EAAE0iE,aAAazkD,IAAI0D,EAAEiF,KAAKq4B,GAAG,IAAIC,EAAE,GAAG,IAAIhkD,EAAE,EAAEA,EAAEymB,EAAE,GAAGpb,OAAOrL,IAAI,CAAC,IAAI,IAAI8jD,EAAE,EAAEtE,EAAE,EAAE55B,EAAE,EAAEA,EAAEa,EAAEpb,OAAOua,IAAI,CAAC,IAAIhrB,EAAE6rB,EAAEb,GAAG5lB,GAAG,GAAG,OAAOpF,GAAGi0C,MAAMj0C,KAAK4kD,IAAIsE,GAAGlpD,GAAGopD,EAAEt4B,KAAK,CAACjF,EAAE,GAAGzmB,GAAG,GAAG8jD,EAAEtE,IAAI,IAAIx5B,EAAEQ,OAAO4uC,UAAUjvC,GAAGK,OAAO4uC,UAAU,IAAIp1D,EAAE,EAAEA,EAAEgkD,EAAE34C,OAAOrL,IAAI,CAAC,IAAI0vC,EAAEsU,EAAEhkD,GAAG,GAAG,OAAO0vC,GAAGuC,SAASvC,MAAMxpB,GAAGwpB,EAAE,KAAK1pB,EAAE/qB,KAAK8D,IAAIinB,EAAE0pB,GAAGvpB,EAAElrB,KAAKqD,IAAI6nB,EAAEupB,IAAI,GAAGxpB,EAAE,IAAIC,EAAEkgB,EAAE0sB,MAAM5sC,GAAGA,GAAG,IAAIA,EAAEH,EAAEqgB,EAAE0sB,MAAM/sC,GAAGhmB,EAAE,EAAEA,EAAEgkD,EAAE34C,OAAOrL,IAAIgkD,EAAEhkD,GAAG,GAAGqmC,EAAE0sB,MAAM/O,EAAEhkD,GAAG,QAAQ,CAAC,IAAI8rD,EAAEG,EAAE9lC,EAAEH,EAAoCG,GAAlC2lC,EAAEG,GAAGzlC,OAAOuvD,UAAU,IAAI5vD,EAAE,IAAI8lC,EAAOjmC,GAAG8lC,EAAE,MAAM,CAACnhD,KAAKq5C,EAAE4xB,KAAK5vD,EAAE2vD,KAAKxvD,IAAIy9B,EAAExS,UAAUsjC,kBAAkB,WAAW,IAAI10E,EAAEpB,KAAKwoD,SAAS+L,gBAAgBruD,EAAElG,KAAKwoD,SAASwL,aAAa1sC,EAAElmB,EAAE,GAAGA,EAAE,GAAG6M,EAAE5R,KAAKqD,IAAI,GAAGwG,EAAE,GAAG9E,EAAE,IAAIkmB,GAAGL,EAAE5qB,KAAKqD,IAAI,GAAG0B,EAAE,GAAG8E,EAAE,IAAIohB,GAAGmgB,EAAEznC,KAAKg2E,YAAYh6E,EAAEgE,KAAKg2E,YAAYllC,EAAE7iC,EAAEskC,EAAEvyC,KAAKg2E,YAAYh6E,EAAEgE,KAAKg2E,YAAYllC,GAAG,EAAE7pB,GAAG9d,EAAE9M,KAAKqD,IAAIM,KAAKg2E,YAAYhvD,EAAEhnB,KAAKg2E,YAAYhvD,GAAGhnB,KAAKg2E,YAAYjlC,EAAE/wC,KAAKi1E,gBAAgBrvE,QAAQ,GAAGi/C,EAAE7kD,KAAKi1E,gBAAgBhhE,MAAM,EAAEjU,KAAKi1E,gBAAgBtvE,MAAMyK,KAAKq3B,EAAEod,EAAE,KAAK7kD,KAAKi1E,gBAAgBtvE,MAAMynB,IAAIjkB,EAAE,KAAKnJ,KAAKk1E,iBAAiBvvE,MAAMyK,KAAKmiC,EAAEsS,EAAE,KAAK7kD,KAAKk1E,iBAAiBvvE,MAAMynB,IAAIptB,KAAKi1E,gBAAgBtvE,MAAMynB,IAAIptB,KAAKi1E,gBAAgBtvE,MAAMqnD,WAAW,UAAUhtD,KAAKk1E,iBAAiBvvE,MAAMqnD,WAAW,WAAWhI,EAAExS,UAAUujC,sBAAsB,WAAW,IAAI30E,EAAEpB,KAAKk2E,cAAc90E,EAAEkoD,UAAU,EAAE,EAAEtpD,KAAKg2E,YAAYllC,EAAE9wC,KAAKg2E,YAAYjlC,GAAG,IAAI7qC,EAAElG,KAAKg2E,YAAYllC,EAAE,EAAExpB,EAAEtnB,KAAKg2E,YAAYjlC,EAAE,EAAE9iC,EAAEjO,KAAK02E,uBAAuB,GAAGt1E,EAAEmpD,YAAYvqD,KAAK20E,WAAW,sCAAsCvzE,EAAEopD,UAAUxqD,KAAK20E,WAAW,oCAAoC1mE,EAAEi8D,SAAS,CAAC,IAAIjjD,EAAE5qB,KAAKqD,IAAI,EAAEuO,EAAEqoE,cAAct2E,KAAKg2E,YAAYh6E,GAAGyrC,EAAEprC,KAAK8D,IAAI+F,EAAE+H,EAAEsoE,eAAev2E,KAAKg2E,YAAYh6E,GAAGoF,EAAEqrD,UAAU,uBAAuBzsD,KAAK20E,WAAW,sBAAsBlzE,WAAW,IAAIL,EAAE0rE,SAAS,EAAE,EAAE7lD,EAAEjnB,KAAKg2E,YAAYjlC,GAAG3vC,EAAE0rE,SAASrlC,EAAE,EAAEznC,KAAKg2E,YAAYllC,EAAErJ,EAAEznC,KAAKg2E,YAAYjlC,GAAG3vC,EAAE6nD,YAAY7nD,EAAEypD,OAAO,EAAE,GAAGzpD,EAAE0pD,OAAO7jC,EAAE,GAAG7lB,EAAE0pD,OAAO7jC,EAAEK,GAAGlmB,EAAE0pD,OAAOrjB,EAAEngB,GAAGlmB,EAAE0pD,OAAOrjB,EAAE,GAAGrmC,EAAE0pD,OAAO5kD,EAAE,GAAG9E,EAAE+pD,cAAc/pD,EAAE6nD,YAAY7nD,EAAEypD,OAAO,EAAE,GAAGzpD,EAAE0pD,OAAO,EAAExjC,GAAGlmB,EAAE0pD,OAAO5kD,EAAEohB,GAAGlmB,EAAE0pD,OAAO5kD,EAAE,GAAG9E,EAAE+pD,UAAUnG,EAAExS,UAAUkkC,qBAAqB,WAAW,IAAIt1E,EAAEpB,KAAKi1E,gBAAgBhhE,MAAM,EAAE/N,EAAEktC,WAAWpzC,KAAKi1E,gBAAgBtvE,MAAMyK,MAAMhP,EAAEkmB,EAAE8rB,WAAWpzC,KAAKk1E,iBAAiBvvE,MAAMyK,MAAMhP,EAAE,MAAM,CAACk1E,cAAcpwE,EAAEqwE,eAAejvD,EAAE4iD,SAAShkE,EAAE,EAAElG,KAAKg2E,YAAYh6E,GAAGsrB,EAAE,EAAEtnB,KAAKg2E,YAAYh6E,EAAEgE,KAAKg2E,YAAYllC,IAAIxpB,EAAEsL,QAAQoyB,EAAE9+C,EAAEy+C,QAAQr9B,EAAEsL,SAAS,CAAC,+BAA+B,GAAG,mBAAmB,GAAG,iBAAiB,MAAM,GAAG,CAAC,IAHx31B,CAG631B,K,kGCJtq2B,oJAEa9sB,EAAmC,GAEnCsxE,EAA2B,SAACC,EAAsBlmE,GAC7D,IAAI0pB,EAAM,IACNrN,EAAU,GAwBd,OAtBI6pD,GAAgD,kBAA1BlmE,EAAcqc,UAEtCA,EAAUrc,EAAcqc,SAGU,kBAAzBrc,EAAcu4D,QAAgD,KAAzBv4D,EAAcu4D,SAC5D7uC,EAAM1pB,EAAcu4D,QAEZp6C,SAAS,aACfuL,EAAMA,EAAI3R,UAAU,EAAG2R,EAAIpuB,OAAS,IAGF,kBAAzB0E,EAAcuzC,QAAgD,KAAzBvzC,EAAcuzC,SAC5D7pB,GAAG,WAAQ1pB,EAAcuzC,SAGvB2yB,GAA2B,KAAZ7pD,IACjBqN,GAAG,YAASrN,KAEL6pD,GAA2B,KAAZ7pD,IACxBqN,EAAMrN,GAEDqN,GAGIy8C,EAA0B,SAACrmE,EAAsBE,GAC5D,IAAMomE,EAAYpmE,EAAcgb,aAE1BqrD,EAASvmE,EAAUwmE,kBACzB,OAAIF,IAAcC,EACV,cAAN,OAAqB9xD,YAAc6xD,IAG/B,cAAN,OAAqB7xD,YAAc8xD,GAAnC,6BAA+D9xD,YAAc6xD,KAUlEG,EAAqD,SAAC,GAK5D,IAMDC,EAVJC,EAII,EAJJA,cACAziE,EAGI,EAHJA,mBACA0iE,EAEI,EAFJA,qBACAC,EACI,EADJA,qBAIMC,EAAkD,IAA9B5iE,EAAmB1I,OAAemrE,EAAgBziE,EACtE6iE,EAAsBD,EAAkB9zC,SAAS4zC,GAevD,OAPEF,EALGG,KACEE,GAAuBD,EAAkBtrE,OAAS,IAAOurE,EAGrDA,EACeD,EAAkBjqE,QACxC,SAACggB,GAAD,OAAeA,IAAc+pD,KAGPE,EAAkBx3B,OAAOs3B,GANzB,CAACA,IASDprE,SAAWmrE,EAAcnrE,OAC1C,GAEFkrE,I,iNCjEItuE,EAAuB,CAClC4uE,gBAAgB,EAChBC,eAAgB,KAChBC,mBAAoB,KACpBC,YAAY,EACZC,SAAS,GAGEC,EAAmBnsE,wBAAsB,GAAI9C,GAE1DivE,EAAiBhrE,GAAGq0B,KAAyB,SAACp0B,EAAD,OAAUY,EAAV,EAAUA,OAAQyzB,EAAlB,EAAkBA,WAAlB,sBACxCr0B,EADwC,CAE3C2qE,eAAgB/pE,EAChBgqE,mBAAoBv2C,EACpBq2C,gBAAgB,OAGlBK,EAAiBhrE,GAAG2xB,KAAwB,SAAC1xB,GAAD,sBACvCA,EADuC,CAE1C0qE,eAAgB5uE,EAAa4uE,eAC7BC,eAAgB7uE,EAAa6uE,eAC7BC,mBAAoB9uE,EAAa8uE,wBAGnCG,EAAiBhrE,GAAGu2C,KAAkB,SAACt2C,EAAD,OAAU6qE,EAAV,EAAUA,WAAV,sBACjC7qE,EADiC,CAEpC6qE,kBAGFE,EAAiBhrE,GAAGw2C,KAAkB,SAACv2C,EAAD,OAAU8qE,EAAV,EAAUA,QAAV,sBACjC9qE,EADiC,CAEpC8qE,e,cClCaE,eAAe,mBAC3BC,IAAYtsE,KADe,cAE3BusE,IAAW1mE,KAFgB,cAK3B2mE,IAAeJ,GALY,I,0DCTjBK,EAAgBC,IAAMx9B,OAAO,CACxCrwB,QAAS,CACP,gBAAiB,qBACjB8tD,OAAQ,YAEV5tD,iBAAiB,I,gFCLN0tD,EAAgBC,IAAMx9B,OAAO,CAExCrwB,QAAS,CACP,gBAAiB,qBACjB8tD,OAAQ,YAEV5tD,iBAAiB,ICkBN6tD,EAAiB,SAACC,GAC7B,IAAMC,EAAS,IAAIC,IACbC,EAAc,IAAID,IAElBE,EAAUC,aAAS,gBACvBlxD,EADuB,EACvBA,IADuB,IAClB3S,cADkB,MACT,MADS,MACF8jE,cADE,MACO,GADP,EACWttE,EADX,EACWA,KAAMutE,EADjB,EACiBA,gBAAiBC,EADlC,EACkCA,kBAAmBC,EADrD,EACqDA,kBADrD,OAGvBx4D,YAAK23D,EAAczpE,QAAQ,CACzBgZ,MACA3S,SACA8jE,SACAttE,OACA0tE,QAdkB,KAelBC,YAAW,OAAEF,QAAF,IAAEA,OAAF,EAAEA,EAAmBG,SAC9Br5B,KACFs5B,aAAI,YAA6B,IAApBC,EAAmB,EAAzB9tE,KAA2BwtE,EAAkBM,MACpDC,aAAW,SAACzzD,GAMV,MApCuB,gCAgCd,OAALA,QAAK,IAALA,OAAA,EAAAA,EAAOpd,UACT1H,QAAQoN,KAAK,cAAeuZ,GAE9BoxD,EAAgBjzD,GACT0zD,oBAGVhB,GAQH,OANeG,EAAY54B,KACzB05B,YAAU,MACVC,aAAU,kBAAMjB,EAAO14B,KAAK64B,OAGvBe,YACA,CAAClB,EAAQE,I,mCCFLiB,EAAkB,SAACpuE,EAAiBquE,GAC/C,MAAoB,SAAhBruE,EAAKwhC,OArBqB,SAACxhC,EAAmBquE,GAClD,IAAMC,EAAkBtuE,EAAK0rE,kBAC7B,IAAK1rE,EAAK6f,OAAO7f,KAAKU,OACpB,OAAOV,EAET,IAAMuuE,EAAsBvuE,EAAK6f,OAAO7f,KAAK,GAAG,GAAKquE,EAAmBC,EAClEE,EAAaC,YAAKzuE,EAAK6f,OAAOrX,QAAQvG,KAAI,kBAAM,QAChDysE,EAAQ,IAAI15D,MAAMq5D,GACrBztB,KAAK,MACL3+C,KAAI,SAAC4yC,EAAG3yC,GAAJ,OAAWqsE,EAAsBrsE,EAAIosE,GAArC,mBAAyDE,OAChE,OAAO,eACFxuE,EADL,CAEEyC,MAAOzC,EAAKyC,MAAQ6rE,EAAkBD,EACtCxuD,OAAO,eACF7f,EAAK6f,OADJ,CAEJ7f,KAAM0uE,EAAMl6B,OAAOx0C,EAAK6f,OAAO7f,UAO1B2uE,CAAiB3uE,EAAqBquE,GAExCruE,GAGH4uE,EAAuB,GAChBC,EAAmB,SAAC7uE,EAAiBwhC,EAAgBstC,GAChE,GAAe,UAAXttC,GAAsC,SAAhBxhC,EAAKwhC,OAAmB,CAChD,GAAIxsB,MAAMivB,QAAQjkC,EAAK6f,QAAS,OAAO7f,EAEvC,IAAM+uE,EAAaD,EACf9b,YAAShzD,EAAqB6f,OAAO7f,MACpCA,EAAqB6f,OAAO7f,KACjC,OAAO,eACFA,EADL,CAGEwhC,OAAQ,QACR3hB,OAAQkvD,EAAW9qE,QAAO,SAACC,EAAe8qE,GAExC,OADAA,EAAUrzC,QACJ,GAAN,mBAAWz3B,GAAX,CAAgB+qE,YAAID,OACnBJ,KAGP,OAAO5uE,G,uBC7EIkvE,EAAYC,IAAOnrD,IAAV,oEAAGmrD,CAAH,qGAGNC,YAAU,IAGbA,YAAU,GAAMA,YAAU,GAAMA,YAAU,GAAMA,YAAU,IAG1DC,EAAcF,IAAOnrD,IAAV,sEAAGmrD,CAAH,+DAOXG,GAAmBH,IAAOnrD,IAAV,2EAAGmrD,CAAH,MAEhBI,GAAaJ,YAAOxwC,KAAV,qEAAGwwC,CAAH,iEACZ,gBAAG70D,EAAH,EAAGA,MAAH,SAAUlX,SAA0BosE,YAAS,YAChDl1D,GAASk1D,YAAS,WAGPJ,eAGNK,GAAcN,YAAOO,KAAV,sEAAGP,CAAH,8BAEb,qBAAG70D,OAAsBk1D,YAAS,UAAaA,YAAS,aCjBtDG,GAAiB,SAACC,GAA8B,IAEzD5yC,EACE4yC,EADF5yC,OAAQxQ,EACNojD,EADMpjD,KAAMqjD,EACZD,EADYC,YAAaC,EACzBF,EADyBE,aAAcC,EACvCH,EADuCG,cAAe3sE,EACtDwsE,EADsDxsE,QAASkX,EAC/Ds1D,EAD+Dt1D,MAEnE,OACE,kBAAC,EAAD,KACGu1D,GAAe,kBAACR,EAAD,KAAcQ,GAC9B,kBAACP,GAAD,KACGtyC,GACC,kBAACuyC,GAAD,CAAYnsE,QAASA,EAASkX,MAAOA,GAClC0iB,GAGJxQ,GACC,kBAACijD,GAAD,CAAarsE,QAASA,EAASkX,MAAOA,GACnCkS,GAGJujD,GAAiBA,EAAcH,IAEjCE,GAAgB,kBAACT,EAAD,CAAa9qE,OAAK,GAAEurE,KAO9BE,GAAuB,SAACJ,GAAD,OAA8B,kBAAC,GAAmBA,ICxCzEK,GAAoBd,IAAOnrD,IAAV,4EAAGmrD,CAAH,kGACnBC,YAAU,GACTA,YAAU,GACJA,YAAU,IAMfc,GAAmBf,IAAO5zD,EAAV,2EAAG4zD,CAAH,yDCJhBgB,GAAe,CAC1Br3C,SAAUs3C,IAAMC,SAASC,aACzBC,UAAW,IACXC,kBAAkB,GAGPC,GAA2C,WACtD,IAKMC,EAAwBV,GAAqB,eAL5B,CACrBhzC,OAAQ,qBACRxQ,KAAM,mGAG0C,CAEhDlS,OAAO,EACPu1D,YACE,kBAAC,GAAD,KACE,kBAAC,IAAD,CAAM16D,KAAK,OAAO6gB,KAAK,QAAQ0K,MAAM,cAI3C0vC,IAAM91D,MAAMo2D,EAAuBP,KAGxBQ,GAAyC,WACpD,IAAMC,EAAiB,CACrB5zC,OAAQ,qBACRxQ,KACE,kBAAC,GAAD,CACE4Q,KAAK,oGACLhV,OAAO,UAFT,sEAQEsoD,EAAwBV,GAAqB,eAC9CY,EAD6C,CAEhDt2D,OAAO,EACPu1D,YACE,kBAAC,GAAD,KACE,kBAAC,IAAD,CAAM16D,KAAK,OAAO6gB,KAAK,QAAQ0K,MAAM,cAI3C0vC,IAAM91D,MAAMo2D,EAAuBP,K,0DChD/BU,GAAoB,CACxBh6D,KAAM,IACNC,MAAO,KACPC,KAAM,KACNC,KAAM,OACNC,eAAe,EACfC,cAAc,EACdC,OAAQ,KACR1U,MAAO,EACPC,OAAQ,EACR2U,WAAW,EACXC,gBAAiB,EACjBC,iBAAkB,EAClBC,WAAW,EACXC,aAAa,EACbtR,MAAO,KACPuR,OAAQ,KACR9X,MAAO,KACPkxE,gBAAiB,EACjBC,SAAU,EACVC,eAAgB,EAChBC,WAAY,GAORC,GAAiB,SAAC,GAA+D,IAAD,mBAA7D1tE,EAA6D,KAAxDvQ,EAAwD,KACpF,OALoB,SAAC,GAAD,uBAAEuQ,EAAF,KAAOvQ,EAAP,iBACO0B,IAA3Bk8E,GAAkBrtE,SAAgC7O,IAAV1B,EAIpCk+E,CAAc,CAAC3tE,EAAKvQ,IACf,GAEF,eACJuQ,EAAM6U,mBAAmBplB,KAejBm+E,GAAiC,UAXtB78B,aACtBvzC,aAAM,KACNqwE,cAAW,SAACp+E,EAAOi3B,GAAR,OACE,IAAVA,EAAe,CAAErT,KAAM5jB,GAAUi+E,GAAgBj+E,EAAM+N,MAAM,SAEhEc,KACAwvE,aAAWT,IAGKU,CAASl3E,SAAS6d,SAASrB,MAERG,K,qBCRpBw6D,I,YAsDPC,I,YAkNAC,I,YAiCAC,I,YAkDOC,IAjWXC,GAAiCh0E,IAAW,GAAK,GAIjDi0E,GAA2B1jD,cAE1B,SAAUojD,KAAV,iFAEa,OAFb,SAEmBO,YAAKD,IAFxB,WAEGE,EAFH,QAOQ/7E,OAASiQ,KAAgB9C,QAAQ1N,WAPzC,iBAWyB,OAHpBqH,EAAWi1E,EAAOj1E,QAChByI,EAAczI,EAAQuI,gBAAtBE,UATP,cAUoBA,EAVpB,GAUMysE,EAVN,KAUaC,EAVb,eAW+B7tC,YAC9BhQ,MAZD,sBAgBK49C,GAAS,GAAKC,GAAO,GAhB1B,iBAkBC,OAlBD,UAkBOC,YAAI/rE,aAAsB,CAC9BpN,GAAI+D,EAAQ/D,MAnBf,8CA0BH,OA1BG,UA0BGm5E,YAAIH,GA1BP,gEAiCP,IAAMI,GAAyB,SAACjpE,EAAgCtR,GAAjC,qBAGXlD,IAAfwU,EACC,OACAyV,mBAAmBzV,GALM,YAMzByV,mBAAmB/mB,KAInBw6E,IAAwC,EAExCC,GAAmB,SAACvpE,GACxB,MAAgB,UAAZA,EAA4B,OAChB,SAAZA,GAAkC,cAAZA,EAAgCA,EACpD,SAAN,OAAgBA,I,GAGMgkE,EACtBqE,GAzDmC,EAyDUS,IADxCU,G,qBAGP,SAAUd,GAAV,sJAYmB,OAZO10E,EAA1B,EAA0BA,QAGtB8K,EAOE9K,EAPF8K,KAAM4Z,EAOJ1kB,EAPI0kB,QAAStb,EAObpJ,EAPaoJ,MAAOq7B,EAOpBzkC,EAPoBykC,OAAQ73B,EAO5B5M,EAP4B4M,OAAQ6oE,EAOpCz1E,EAPoCy1E,MAAOjpE,EAO3CxM,EAP2CwM,MAAO1R,EAOlDkF,EAPkDlF,QACpD4K,EAME1F,EANF0F,MAAOC,EAML3F,EANK2F,OAAQyG,EAMbpM,EANaoM,WAAYX,EAMzBzL,EANyByL,OAAQC,EAMjC1L,EANiC0L,YAAaC,EAM9C3L,EAN8C2L,sBAChDH,EAKExL,EALFwL,WAAYI,EAKV5L,EALU4L,qBAAsBK,EAKhCjM,EALgCiM,QAASlB,EAKzC/K,EALyC+K,WAL/C,EAUM/K,EAJFgM,eANJ,MAMc,YANd,IAUMhM,EAHF6L,kBAPJ,MAOiB,GAPjB,EASItD,EACEvI,EADFuI,gBAAiBtM,EACf+D,EADe/D,GAAIy0E,EACnB1wE,EADmB0wE,kBATzB,SAYyBppC,YAAOouC,MAZhC,YAYQjzE,EAZR,4BAeUkzE,EAA2BN,GAAuBjpE,EAAYtR,GAC9D86E,EAAc39E,OAAOlB,KAAK0L,EAASQ,MAAMszB,MAAK,SAACs/C,GAAD,OAClDA,EAAYx9D,WAAWjP,IAAUysE,EAAY16C,SAASw6C,MAjB5D,wBAqBMl9E,QAAQoN,KAAR,iDAAuDuD,EAAvD,mBAAuEnN,IArB7E,2BAyBI,OADMgH,EAAOR,EAASQ,KAAK2yE,GAxB/B,UAyBUR,YAAIjsE,KAAgB9C,QAAQ,CAChC8B,UAAWlF,EACXsF,kBACAtM,QA5BN,0CAiCQmjB,EAAMte,IAAQ,UACby2C,YAAmBzsC,GADN,eAEhBA,EAEEgrE,EAAuBh7E,EAAQmJ,MAAM,KACrC8xE,EAAUD,EAAqB36C,SAAS,QAGxC66C,GAFAC,GAAqBX,KAA0CS,GAGjED,EAAqBr+B,OAAO,QAAUq+B,EAEpCI,EAAc,CAClBX,GAAiBvpE,GACjBN,GAAW,gBAAaA,IACxB1G,OAAOuyB,SAEH4+C,EAA8B,SAAfprE,EAAwB,CAE3C9H,KAAK,aACH+B,OAAQ,CACNiH,UACAyY,UACAtY,WAAYA,EAAaA,EAAWnI,MAAM,aAAUrM,EACpD6T,UAEF/F,QACAC,SACAiH,SACA6oE,QACAjpE,QACA4pE,cAAeJ,GACXrqE,GAAyB,CAAE0qE,yBAA0B,CAAC1qE,IAbxD,CAcF2qE,aAAc,CAAa,cAAZtqE,GAA2B,CACxCS,OAAQb,GAAwB,MAChCI,QAAQ,CAAE,SAAH,mBAAekqE,KAEZ,UAAZlqE,GAAA,aACES,OAAQjB,EACRQ,QAASkqE,GACLrqE,EAAWlI,QAAU,CAAE8H,OAAQI,KAClC7G,OAAOuyB,YAEV,CACFg5C,OAAQ,CACNnnE,QACA0uC,GAAG,IAAIj8B,MAAO06D,UACd9xC,SACA73B,SACA6oE,QACAjpE,QACA1R,UACA4K,QACAC,SACAyG,eAIEqkE,EAAoB,SAACxtE,GACzB,GAAI,OAACA,QAAD,IAACA,OAAD,EAACA,EAAM6f,OAEJ,CAAC,IACE0zD,EAAsBjuE,EAAtBiuE,kBAEFC,EAAqB3E,EACxB7uE,EACDwhC,EACAwxC,GAGI9tE,EAAS,eACVsuE,EADU,GAGR,yBAA0BxzE,EAAK6f,QAAW,CAC7CnX,wBACAK,UACAN,cACAG,aAEA6qE,eAAgBzzE,EAAK6f,OAAO6zD,qBAAqBhrE,KAIrDopE,GAAyBK,IAAIjsE,KAAgB9C,QAAQ,CACnD8B,UAAWquE,EACPnF,EAAgBlpE,EAAwBquE,GACxCruE,EACJI,kBACAtM,aA5BF84E,GAAyBK,IAAIjsE,KAAgB7C,QAAQ,CAAErK,SAiCrDu0E,EAAkB,SAACjzD,GACvB9kB,QAAQoN,KAAK,2BAA4B0X,GACzCw3D,GAAyBK,IAAIjsE,KAAgB7C,QAAQ,CAAErK,SAGzDu5E,GAActzB,KAAd,eACKi0B,EADL,CAEE1pE,OAAQ1B,GAAc,MACtBqU,MACAoxD,kBACAC,oBACAC,uBAxIJ,yC,OA4IoDV,EAtMX,G,qBAsMlC4G,G,MAAmBC,G,MAC1B,SAASC,GAAT,GAAqF,IAAjD92E,EAAgD,EAAhDA,QAEhC8K,EAKE9K,EALF8K,KAAM1B,EAKJpJ,EALIoJ,MAAOq7B,EAKXzkC,EALWykC,OAAQ73B,EAKnB5M,EALmB4M,OAAQ6oE,EAK3Bz1E,EAL2By1E,MAAOjpE,EAKlCxM,EALkCwM,MAAO1R,EAKzCkF,EALyClF,QAC3C4K,EAIE1F,EAJF0F,MAAOC,EAIL3F,EAJK2F,OAAQyG,EAIbpM,EAJaoM,WAAYZ,EAIzBxL,EAJyBwL,WAC3BQ,EAGEhM,EAHFgM,QACAC,EAEEjM,EAFFiM,QACAjB,EACEhL,EADFgL,aAAc/O,EACZ+D,EADY/D,GAKVy8B,EAAiB,UAAMtvB,EAAN,YAAe4B,EAAf,YAA+BqqE,GACpDjpE,EACAtR,IAGIskB,EAAG,UAAMm4B,YAAmBzsC,GAAzB,eACHylE,EAAM,aACVnnE,QACA0uC,GAAG,IAAIj8B,MAAO06D,UACd9xC,SACA73B,SACA6oE,QACAjpE,QACA1R,UACA4K,QACAC,SACAyG,cACIZ,GAAc,CAAEurE,YAAavrE,GAXvB,GAYNS,GAAW,CAAE+qE,SAAU/qE,EAAQ0vC,KAAK,MAZ9B,GAaN3vC,GAAW,CAAEA,YA0BnB4qE,GAAkB10B,KAAK,CACrB9iC,MACAmxD,SACAC,gBAbsB,WACtBuE,GAAyBK,IAAI5rE,KAA2BlD,QAAQ,CAAErK,QAElE9B,OAAO++B,aAAa,CAClBR,oBACAtvB,QACAnG,KAAM,QAQRwtE,kBA3BwB,SAACxtE,GACzB8xE,GAAyBK,IAAI5rE,KAA2BnD,QAAQ,CAC9D2C,aAAc/F,EACdhH,QAIF9B,OAAO++B,aAAa,CAClBR,oBACAz1B,YAsBN,SAASg0E,KAEPJ,GAAuB30B,OAGzB,SAAUyyB,GAAV,sFAGmB,OAHQ30E,EAA3B,EAA2BA,QACjBoJ,EAAoBpJ,EAApBoJ,MAAOnN,EAAa+D,EAAb/D,GAAI6O,EAAS9K,EAAT8K,KADrB,SAGyBw8B,YAAOouC,MAHhC,YAGQjzE,EAHR,wBAKI,OALJ,SAKU2yE,YAAIzrE,KAAiBtD,QAAQ,CACjCgC,cAAe5F,EAAS4C,OAAOA,OAAO+D,GACtCnN,QAPN,wCAiBe,OAJPmjB,EAAMte,IAAQ,UACby2C,YAAmBzsC,GADN,gBAEhBA,EAAKxK,QAAQ,QAAS,UAf5B,oBAiBqBkpC,YAAKqmC,EAAc14E,IAAKioB,EAAK,CAC5CmxD,OAAQ,CACNnnE,WAnBR,QAiBI8tE,EAjBJ,+BAwBI,OAxBJ,2BAuBIz+E,QAAQoN,KAAK,+BAvBjB,UAwBUuvE,YAAIzrE,KAAiBrD,QAAQ,CAAErK,QAxBzC,0CA2BE,OA3BF,UA2BQm5E,YAAIzrE,KAAiBtD,QAAQ,CACjCgC,cAAe6uE,EAASj0E,KACxBhH,QA7BJ,wDAiCA,SAAU24E,GAAV,8FAQ+C,OARrB50E,EAA1B,EAA0BA,QAChBm3E,EAASn3E,EAATm3E,KACJt1E,GAAiB,EACjBE,GAAiB,EACjBD,GAAmB,EACnBE,GAAkB,EALxB,kBAQqDslC,YAAO5tB,MAR5D,OAYqB,OAJXnY,EARV,OASU61E,EATV,OAS8B71E,QAT9B,IAS8BA,OAT9B,EAS8BA,EAAUO,iBAC9Bu1E,EAVV,OAU6B91E,QAV7B,IAU6BA,OAV7B,EAU6BA,EAAUS,gBAVvC,UAY2BwnC,YAAKqmC,EAAc14E,IAAf,UAAuB2qB,IAAvB,iBAZ/B,QAkBI,OAlBJ,SAYY7e,EAZZ,EAYYA,KACRnB,GAAuB,OAAJmB,QAAI,IAAJA,OAAA,EAAAA,EAAO,sBAAsB,EAChDpB,GAAqB,OAAJoB,QAAI,IAAJA,OAAA,EAAAA,EAAO,oBAAoB,EAC5ClB,GAAqB,OAAJkB,QAAI,IAAJA,OAAA,EAAAA,EAAO,oBAAoB,EAC5CjB,GAAsB,OAAJiB,QAAI,IAAJA,OAAA,EAAAA,EAAO,qBAAqB,EAhBlD,UAkBUmyE,YAAI5uE,KAAgBH,QAAQ,CAChCvE,mBAAkBD,iBAAgBE,iBAAgBC,kBAAiBG,gBAAiBc,KAnB1F,QAsBQpB,GAAyC,OAAtBu1E,IAAgCt1E,GAErD4xE,KAEE5xE,GAAoBC,IAAwC,IAArBs1E,IAAgCr1E,GAEzE4xE,KA5BN,wBAwCI,OAxCJ,0BAuCIn7E,QAAQoN,KAAK,4BAvCjB,UAwCUuvE,YAAI5uE,KAAgBF,WAxC9B,aA2CM6wE,GAAQt1E,GAAkBE,GA3ChC,iBA4CI,OA5CJ,UA4CUpH,YAAM+kC,KA5ChB,QA6CI,OA7CJ,UA6CU01C,YAAI5uE,aAAgB,CAAE2wE,MAAM,KA7CtC,uDAkDO,SAAUtC,KAAV,iEACL,OADK,SACCyC,YAAUnuE,KAAgB/C,QAASsuE,IADpC,OAEL,OAFK,SAEC4C,YAAU3tE,KAAiBvD,QAASuuE,IAFrC,OAGL,OAHK,SAGC2C,YAAU9tE,KAA2BpD,QAAS0wE,IAH/C,OAIL,OAJK,SAICQ,YAAUnhD,IAAwB8gD,IAJnC,OAKL,OALK,UAKCK,YAAU9wE,KAAgBJ,QAASwuE,IALpC,QAML,OANK,UAMC2C,YAAM9C,IANP,yC,0CCxYM+C,GAAmC,G,4DCoEtCC,I,YAqIAC,I,YA0CAvgF,I,YAQAwgF,I,YAsBAC,I,YAgBAC,I,YAMOC,IAxRXC,GAA8B,IAC9BC,GAAsB,IAKtBC,GAAkC,IAGlCC,GAA4B,iBAAkB/9E,OAE9Cg+E,GAAuBh+E,OAAOi+E,2BAIhCC,GAAsB,EACtBC,GAAqB,EAgBnBC,GAAgB,SAAC11E,GACA,kBAAVA,IAdS,SAAC21E,GACrB,GAAuB,kBAAZA,EAAsB,CAC/B,IAAM59C,EAAet9B,SAASw9B,cAAT,iBAAiC16B,aAAQo4E,KAC9D,GAAI59C,EAAc,CAChB,IAAMzW,EAAUyW,EAAgCC,WArB7B,GAuBnB,OADCv9B,SAASw9B,cAAc,QAAwBzW,UAAYF,GACrD,GAGX,OAAO,EAMiBs0D,CAAc51E,EAAMuG,QAExCjP,OAAOq8B,UAKPkiD,GAAqB,WACrBR,IAC8B,YAA5BS,aAAaC,YACfD,aAAaE,qBAKbC,GAAkC,kBAAOZ,IACd,YAA5BS,aAAaC,YAGlB,SAAUnB,GAAOsB,EAA+Bj3D,GAAhD,yEAEqB,OAFrB,kBAE2B0nB,YACrBqmC,EAAc14E,IADW,UAEtB2qB,EAFsB,mCAEkBi3D,IAJjD,uBAEY91E,EAFZ,EAEYA,KAFZ,kBAOWA,GAPX,uCASIxK,QAAQoN,KAAK,4BAAb,MATJ,kBAUW,MAVX,sDAoBA,IAAMmzE,GAAkB,SACtBC,EAAiBt2E,EAA4Bu2E,GAE7C,IAAID,EAAME,QAAV,CAKA,IAAIC,EAAcH,EAAMI,aAClB/gF,EAAIqK,EAAaD,OAAb,UAAuBu2E,EAAM7vE,MAA7B,YAAsC6vE,EAAM7gE,OACrC,qBAAN9f,GACN2gF,EAAM98D,SAAW7jB,EAAE6jB,QACO,qBAAnB7jB,EAAE+gF,eAEZD,EAAc9gF,EAAE+gF,cAGlB,IAAMjhE,EAAO6gE,EAAM7gE,KAAK9X,QAAQ,KAAM,KAClC6b,EAAS88D,EAAM98D,OAAOwC,cACtBzjB,EAAK,UAAMkd,EAAN,cAAgBghE,GACnBE,EAAML,EAAMjF,SACdvsD,EAAO,iCACP8xD,GAAc,EACd3+E,GAAO,EAGX,OAAQq+E,EAAM98D,QACZ,IAAK,UACHvhB,GAAO,EACP,MAEF,IAAK,YAGL,IAAK,gBACH,OAEF,IAAK,QACH,GAAIq+E,EAAMO,UAAYN,EAEpB,OAEF,GAAyB,kBAArBD,EAAMQ,YAAuD,cAArBR,EAAMQ,WAEhD,OAEF,GAAIR,EAAMS,sBAER,OAEFx+E,EAAK,UAAMkd,EAAN,4BAA8BghE,EAA9B,KACL3xD,EAAO,oCACP8xD,GAAc,EACd,MAEF,IAAK,UACsB,aAArBN,EAAMQ,aACRt9D,EAAM,qBAAiB88D,EAAM98D,OAAOwC,gBAGtC8I,EAAO,8BACP8xD,GAAc,EACd,MAEF,IAAK,WACsB,YAArBN,EAAMQ,aACRt9D,EAAM,uBAAmB88D,EAAM98D,OAAOwC,gBAGxC8I,EAAO,2BACP8xD,GAAc,EACd,MAEF,QAEE,YADA9gF,QAAQoN,KAAR,+BAAqCozE,EAAM98D,SAU/C,OAAIvhB,IACkC,oBAAzBu9E,KACTv9E,EAAOu9E,GAAqBc,IAG1Br+E,GAGK,CACL++E,kBAAmBz+E,EACnB0+E,oBAAqB,CACnBt+C,KAAK,GAAD,OAAK29C,EAAMr3E,SAAX,cAAyBq3E,EAAM7vE,MAA/B,aAAyC6vE,EAAMt+D,OAA/C,eAA4DwB,EAA5D,aAAuE88D,EAAMvxD,MACjF4xD,IAAI,GAAD,OAAKA,GACRO,mBAAoBN,EACpB9xD,KAAMiwB,IAAejwB,EACrBxkB,KAAMg2E,GAERa,oBAAqB,SAAC19E,GAEpB,GADAA,EAAMC,iBACFD,EAAMivB,OAAQ,CAAC,IACTpoB,EAAS7G,EAAMivB,OAAfpoB,KACRs1E,GAAct1E,WArBxB,IA6BF,SAAUy0E,GAAU51D,EAAuBnf,GAA3C,uFAC+B,OAD/B,SACqC6mC,YAAKiuC,GAAQa,GAAoBx2D,GADtE,UAEoB,QADZi4D,EADR,SAEiD,kBAAdA,EAFnC,uBAGIthF,QAAQoN,KAAK,+BAHjB,6BAO2B,IAArBk0E,EAAUp2E,OAPhB,uBAQIlL,QAAQC,IAAI,4BARhB,0BAYQshF,EAAaC,aAAOxgE,aAAK,aAAcsgE,GAGvCG,EAAUF,EAAWh1E,QAAO,qBAAGw0E,UAA4BlB,MAC3D6B,EAAgBD,EACnBh1E,KAAI,SAAC+zE,GAAD,OAAYD,GAAgBC,EAAOt2E,EAAc01E,OACrDrzE,QAAO,SAAC9R,GAAD,YAAa0E,IAAN1E,KAERiS,EAAI,EApBf,aAoBkBA,EAAIg1E,EAAcx2E,QApBpC,iBA8BI,OA9BJ,EAuBQw2E,EAAch1E,GADhBw0E,EAtBN,EAsBMA,kBAAmBC,EAtBzB,EAsByBA,oBAAqBE,EAtB9C,EAsB8CA,oBAErB,IAAInB,aACvBgB,EACAC,GAEWQ,QAAUN,EA5B3B,UA8BUn/E,YAAMs9E,IA9BhB,QAoB4C9yE,GAAK,EApBjD,wBAkCEmzE,GAAsBx0E,aAAKk2E,GAAyBR,WAER,qBAAjCr/E,OAAOkgF,uBAAyClgF,OAAOkgF,wBAChEp9E,aAAaC,QAAQ,uBAArB,UAAgDo7E,KArCpD,yCA0CA,SAAUnhF,GAAImjF,EAAcx4D,GAA5B,yEACmB,OADnB,SACyB0nB,YAAKqmC,EAAc14E,IAAf,UAAuB2qB,EAAvB,0BAAsDw4D,IADnF,uBACUr3E,EADV,EACUA,KACoB,IAAxBo1E,IAAwE,kBAApCp1E,EAAKs3E,6BAC3ClC,GAAsBp1E,EAAKs3E,4BAH/B,kBAKSt3E,GALT,wCAQA,SAAU00E,GAAW71D,GAArB,uEAE0B,OAF1B,SAEgC0nB,YAAKryC,GAAK,SAAU2qB,GAFpD,YAEUnf,EAFV,yBAIM,OAJN,SAIYyyE,YAAIp2E,aAAyB,CAAE2D,kBAJ3C,YAMQm2E,MAEIn2E,EAAa43E,2BAA6BjC,IARtD,iBAUQ,OAVR,UAUc9uC,YAAKkuC,GAAW51D,EAAenf,GAV7C,YAYoC,IAAxBA,EAAawZ,OAZzB,qDAkBI,OAlBJ,UAkBUxhB,YAAMq9E,IAlBhB,gEAsBA,SAAUJ,KAAV,2EAEuD,OAFvD,SAE6D5C,YAAKl2E,MAFlE,OAKE,OALF,SAEUkB,EAFV,EAEUA,QACA8hB,EAAkB9hB,EAAlB8hB,cAHV,SAKQnnB,YAAMo9E,IALd,OASE,OAFAO,KAAuBr7E,aAAaqgB,QAAQ,yBAA2Bg7D,IACvEI,KARF,UASQlvC,YAAKmuC,GAAY71D,GATzB,yCAgBA,SAAU+1D,GAAV,gFAEoB,OAFW73E,EAA/B,EAA+BA,QACrBmpB,EAA4BnpB,EAA5BmpB,SAAUrH,EAAkB9hB,EAAlB8hB,cADpB,SAE0B0nB,YAAKryC,GAAK,MAAO2qB,GAF3C,OAEQ04D,EAFR,OAGErxD,EAASqxD,GAHX,wCAMO,SAAU1C,KAAV,iEACL,OADK,SACCP,YAAMK,IADP,OAEL,OAFK,SAECN,YAAUv4E,KAAqBqH,QAASyxE,IAFzC,wCCvSP,IAAM4C,GAAyB,CAC7B,GACA,0BACA,0BACA,2BAGWC,GAAoB,SAACC,GAAD,OAAsBF,GAAuBt/C,SAASw/C,IAClFA,EAASn0D,SAAS,oBAClBm0D,EAAStiE,WAAW,wBACpBsiE,EAASn0D,SAAS,mBAClBm0D,EAAStiE,WAAW,+B,YCiCRuiE,I,YAOPC,I,YAIAC,I,YAoOAC,I,YAwEAC,I,YAUOC,IA5UXC,GAAqB7pD,cAEpB,SAAS8pD,KACdhhF,OAAOoD,iBAAiB,SAAS,WAC/B29E,GAAmB9F,IAAI32E,aAAwB,CAAEyC,gBAAgB,QAEnE/G,OAAOoD,iBAAiB,QAAQ,WAC9B29E,GAAmB9F,IAAI32E,aAAwB,CAAEyC,gBAAgB,QAI9D,SAAU05E,KAAV,uEAEY,OAFZ,SAEkB5F,YAAKkG,IAFvB,OAGH,OADMjG,EAFH,gBAGGG,YAAIH,GAHP,8DAOP,SAAU4F,KAAV,iEACU,OADV,SACgB7F,YAAKxuE,KAAgBH,SADrC,uCAC+CrG,QAAQmC,iBADvD,wCAIA,SAAU24E,GAAc14E,EAAqBC,GAA7C,qFACMlI,OAAOihF,QADb,iDAI6B,OAJ7B,SAImC9zC,YAAO+zC,MAJ1C,2CAKQ,OALR,SAKc7xC,YAAKqxC,IALnB,kEAMO,GANP,QAIQnzD,EAJR,KAUG,SAASpvB,EAAE8E,GAAG,IAAIqsC,EAAEtrB,EAAE9C,EAAEsjB,EAAEvhC,EAAEk+E,OAAOnhF,OAAOihF,QAAQh+E,EAAEA,EAAEm+E,GAAG,GAAGn+E,EAAE8G,KAAK,SAASiB,EAAE9E,EAAEme,GAAG,SAAS69B,EAAE/jD,EAAE8E,GAAG,IAAIqsC,EAAErsC,EAAE6G,MAAM,KAAK,GAAGwlC,EAAE9lC,SAASrL,EAAEA,EAAEmxC,EAAE,IAAIrsC,EAAEqsC,EAAE,IAAInxC,EAAE8E,GAAG,WAAW9E,EAAE0rB,KAAK,CAAC5mB,GAAGq6C,OAAOx/B,MAAMyxB,UAAUnkB,MAAMikB,KAAKO,UAAU,OAAO1uB,EAAE/iB,EAAEknB,cAAc,WAAWtmB,KAAK,kBAAkBmiB,EAAE0G,OAAM,EAAG1G,EAAEsuB,IAAItpC,EAAEm7E,SAAS,oBAAoB78C,EAAErmC,EAAEg/C,qBAAqB,UAAU,IAAIzB,WAAW4lC,aAAapgE,EAAEsjB,GAAG,IAAIud,EAAE9+C,EAAE,SAAI,IAASohB,EAAE09B,EAAE9+C,EAAEohB,GAAG,GAAGA,EAAE,UAAU09B,EAAEw/B,OAAOx/B,EAAEw/B,QAAQ,GAAGx/B,EAAEvjD,SAAS,SAASL,GAAG,IAAI8E,EAAE,UAAU,MAAM,YAAYohB,IAAIphB,GAAG,IAAIohB,GAAGlmB,IAAI8E,GAAG,WAAWA,GAAG8+C,EAAEw/B,OAAO/iF,SAAS,WAAW,OAAOujD,EAAEvjD,SAAS,GAAG,kBAAkB8wC,EAAE,kMAAkMxlC,MAAM,KAAKka,EAAE,EAAEA,EAAEsrB,EAAE9lC,OAAOwa,IAAIk+B,EAAEH,EAAEzS,EAAEtrB,IAAI/gB,EAAEm+E,GAAGv3D,KAAK,CAAC7e,EAAE9E,EAAEme,KAAKphB,EAAEk+E,KAAK,GAAt3B,CAA03Bh+E,SAASnD,OAAOihF,SAAS,IAEp5BjhF,OAAOihF,QAAQl3E,KAAK,8CAA+C,CACjEs3E,SAAU,0BACVG,OAAQ,SAACP,GACH/4E,GACF+4E,EAAQQ,SAASv5E,MAIjBw5E,GAAsBh9C,OAAW67C,GAAkBp9E,SAASq9E,UACnD,SACfxgF,OAAOihF,QAAQU,SAEb92E,cAAO,SAAC9O,GAAD,YAAqB0B,IAAV1B,GAAiC,OAAVA,IACvC,CACE6lF,IAAK,YACLC,aAAcn9C,KAAS,KAAO,kBAC9Bo9C,UAAWp9C,KAAS,KAAO,oBAC3Bq9C,MAAOr9C,KAAS,KAAO,uBAEvBs9C,0BAA2BN,EAVlB,SAUgD,KACzDO,kBAAmBP,EAXV,SAWwC,KACjDQ,kBAAmBR,EAZV,SAYwC,KACjDS,UAAWT,EAbF,SAagC,KAEzCU,aAAc,kBAEdznD,gBAAiBpN,EAAKzE,QACtBu5D,qBAAsBp6E,EACtBq6E,kBAAmBp6E,GAAc,cACjCq6E,kBAAmBh1D,EAAI,UACvBi1D,wBAAyBj1D,EAAK,mBAC9Bk1D,oBAAmB,UAAEl1D,EAAKm1D,sBAAP,aAAE,EAAqBl5E,OAC1Cm5E,cAAa,UAAEp1D,EAAKhlB,cAAP,aAAE,EAAaq6E,OAC5BC,eAAc,UAAEt1D,EAAKhlB,cAAP,aAAE,EAAau6E,QAC7BC,gBAAiBx1D,EAAKhlB,OAAOy6E,SAC7BC,aAAc11D,EAAK21D,QACnBC,WAAY51D,EAAK61D,MACjBC,gBAAiB91D,EAAK+1D,WACtBC,gBAAiBh2D,EAAKi2D,WACtBC,mBAAoBl2D,EAAKm2D,cACzBC,kBAAmBp2D,EAAKq2D,aACxBC,mBAAoBt2D,EAAKu2D,YACzBC,wBAAyBx2D,EAAKy2D,iBAC9BC,gBAAiB12D,EAAK22D,SACtBC,iBAAkB52D,EAAK62D,UACvBC,mBAAoB92D,EAAK+2D,YACzBC,sBAAuBh3D,EAAKi3D,eAC5BC,oBAAqBl3D,EAAKm3D,aAC1BC,sBAAuBp3D,EAAKq3D,eAC5BC,sBAAuBt3D,EAAKu3D,eAC5BC,iBAAkBx3D,EAAKptB,UACvB6kF,2BAA4Bz3D,EAAK03D,oBACjCC,kBAAmB33D,EAAK23D,kBACxBC,gBAAiB53D,EAAK43D,gBACtBC,qBAAsB73D,EAAK63D,qBAC3BC,qBAAsB93D,EAAK83D,qBAC3BC,wBAAyB/3D,EAAK+3D,wBAC9BC,sBAAuBh4D,EAAKi4D,WAAWh8E,OACvCi8E,mBAAoBl4D,EAAK,iBACzBm4D,qBAAsBn4D,EAAK,mBAC3Bo4D,mBAAoBp4D,EAAK,iBACzBq4D,oBAAqBr4D,EAAK,kBAC1Bs4D,yBAA0Bt4D,EAAK,uBAC/Bu4D,0BAA2Bv4D,EAAK,wBAChCw4D,gCAAiCx4D,EAAK,8BACtCy4D,2BAA4Bz4D,EAAK,yBACjC04D,kBAAmB14D,EAAK,gBACxB24D,oBAAqB34D,EAAK,kBAC1B44D,mBAAoB54D,EAAK,iBACzB64D,0BAA2B74D,EAAK,wBAChC84D,mBAAoB94D,EAAK,eACzB+4D,yBAA0B/4D,EAAK,qBAC/Bg5D,4BAA6Bh5D,EAAK,wBAClCi5D,uBAAwBj5D,EAAK,mBAC7Bk5D,qBAAsBl5D,EAAK,iBAC3Bm5D,0BAA2Bn5D,EAAK,sBAChCo5D,uBAAwBp5D,EAAK,mBAC7Bq5D,sBAAuBr5D,EAAK,kBAC5Bs5D,mBAAoBt5D,EAAK,eAEzBu5D,eAAc,UAAEv5D,EAAKw5D,mBAAP,aAAE,EAAkBC,WAClCC,yBAA0B15D,EAAK25D,sBAC5Br8E,QAAO,qBAAGs8E,aAA2B39E,OACxC49E,2BAA4B75D,EAAK25D,sBAC9Br8E,QAAO,qBAAGs8E,aAA4B39E,OACzC69E,gBAAiB95D,EAAKi4D,WACtB8B,iBAAkB/5D,EAAKg6D,eAlG/B,yCA4HA,IAAMC,GAAiC,SAAjCA,EAAkC,GAAD,IACrCv/E,EADqC,EACrCA,YAAaw/E,EADwB,EACxBA,aAAcxpE,EADU,EACVA,KAAM5V,EADI,EACJA,eAAgB4c,EADZ,EACYA,IADZ,OAEjCywD,EAAc14E,IAAd,UAAqBqL,EAArB,oBAAuD,CAC3Dyf,QAAS,CACP,gBAAiB,qBACjB8tD,OAAQ,YAEVQ,OAAQ,CACN0E,OAAQ,SACR4M,QAASz/E,EACTgW,OACAgH,OAEF+C,iBAAiB,IAChBmH,MAAK,YAAe,IAAZrmB,EAAW,EAAXA,KAEH6+E,EAAsC,kBAAlB7+E,EAAK1B,SAE3BwgF,EAAa9+E,EAMjB,GAL2B,kBAAhBA,EAAKkZ,QAAuC,OAAhBlZ,EAAKkZ,SAE1C4lE,EAAa,MAGI,OAAfA,EACF,OAAID,GAAcF,EAAe,EACxBD,EAAe,CACpBC,aAAcA,EAAe,EAC7Bx/E,cACAgW,OACA5V,eAAgBS,EAAK1B,SACrB6d,QAGG,CAAE5c,kBAEX,IAAMgf,EAAOve,EAAKue,KAAKxc,QAAO,SAACk3C,GAAD,OAAyBA,EAAE,KAAOzc,OAChE,MAAO,CACLp9B,WAAYY,EAAK++E,aAAe,KAChCx/E,iBACAgf,WAED4H,OAAM,WAGP,OADA3wB,QAAQoN,KAAK,0BAA2BrD,GACjC,SAkBIy/E,GAAmC,SAACC,GAE/C,IAAM5/E,EAAuD,GAE7D4/E,EACG38D,QACA0wC,UACA/9D,SAAQ,YAA4D,IAAD,mBAAzD0oB,EAAyD,KAAnDxB,EAAmD,KAA9C+iE,EAA8C,KAA/BC,EAA+B,KAArBhqE,EAAqB,KAC5DiqE,EAAc//E,EAAiBse,IAAS,CAC5CuhE,cAAe,EACfC,SAAU,EACV/gE,cAAe,GACfT,KAAM,GACNxB,IAAK,GACLhH,KAAM,IAEFkqE,EAAUD,EAAYF,cAAgBA,EACtCI,EAA4B,CAChC3hE,KAAMyhE,EAAYzhE,MAAQA,EAC1BxB,IAAKkjE,EAAUljE,EAAMijE,EAAYjjE,IACjC+iE,cAAeG,EAAUH,EAAgBE,EAAYF,cACrDC,SAAUC,EAAYD,SAAWA,EACjChqE,KAAMkqE,EAAUlqE,EAAOiqE,EAAYjqE,KACnCiJ,cAAeghE,EAAYhhE,cAAco2B,OAAOr4B,IAElD9c,EAAiBse,GAAQ2hE,KAG7B,IAAMhgF,EAAwBigF,aAE5BN,EACG38D,QACA0wC,UACA/wD,KAAI,2CACPA,KAAI,SAAC0b,GAAD,OAAUte,EAAiBse,MACjC,MAAO,CACLte,mBACAC,0BAIJ,SAAUw4E,GAAV,sGAKe,OALY/6E,EAA3B,EAA2BA,QACjB8hB,EAAkB9hB,EAAlB8hB,cACF2gE,EAFR,UAE0B3gE,EAF1B,kDAKqB0nB,YAAKqmC,EAAc14E,IAAKsrF,EAAc,CACrDxgE,QAAS,CACP,gBAAiB,qBACjB8tD,OAAQ,YAEV5tD,iBAAiB,IAVvB,OAKI+0D,EALJ,8BAcI,OAdJ,yBAaIz+E,QAAQoN,KAAK,uDAbjB,UAcUuvE,YAAI12E,KAAiB4H,WAd/B,0CAuBE,OANM9E,EAAe01E,EAASj0E,KAAKy/E,eAC3B9gF,EAAas1E,EAASj0E,KAAtBrB,SACFQ,EAAc80E,EAASj0E,KAAK+d,aAC5Bxe,EAAiB00E,EAASj0E,KAAK1B,SAC/BohF,EAAwBngF,IAAmBogC,KArBnD,UAuBQwyC,YAAI12E,KAAiB2H,QAAQ,CACjC7E,eACAI,WACA+gF,wBACAvgF,iBA3BJ,QAkCyD,OAJjDgW,EAAOxW,EACPwd,EAAM0C,EA/Bd,UAkC+D0nB,YAAKm4C,GAAgB,CAChFv/E,cACAw/E,aAAc,EACdxpE,OACA5V,iBACA4c,QAvCJ,WAkCQwjE,EAlCR,QA0CM1L,EAASj0E,KAAK4/E,qBA1CpB,iBA2CI,OA3CJ,UA2CUtL,YAAMuD,GAAe5D,EAASj0E,KAAK+d,aAA9B,OAA4C4hE,QAA5C,IAA4CA,OAA5C,EAA4CA,EAAwBvgF,YA3CnF,cA8C4B,OAAtBugF,QAAsB,IAAtBA,OAAA,EAAAA,EAAwBphE,QAAxB,OAAgCohE,QAAhC,IAAgCA,OAAhC,EAAgCA,EAAwBvgF,aA9C9D,iBAiDI,OAFM6/E,EAAaD,GAAgBW,EAAuBphE,MAClDlf,EAA4C4/E,EAA5C5/E,iBAAkBC,EAA0B2/E,EAA1B3/E,sBAhD9B,UAiDU6yE,YAAIx2E,aAAuB,CAC/ByD,WAAYugF,EAAuBvgF,WACnCC,mBACAC,2BApDN,QAwDE,OAxDF,UAwDQ6yE,YAAIv2E,aAA4B,CACpC2D,gBAAsC,OAAtBogF,QAAsB,IAAtBA,OAAA,EAAAA,EAAwBpgF,iBAAkBA,KAzD9D,sDA6DA,IAAMsgF,GAA4B,SAACr8E,GAAD,wBAA4BA,IAC9D,SAASs8E,GAAT,GAA8D,IAArC/iF,EAAoC,EAApCA,QACfyG,EAAezG,EAAfyG,IAAKvQ,EAAU8J,EAAV9J,MACD,oCAARuQ,GAKJxJ,aAAaC,QAAQ4lF,GAA0Br8E,GAAMO,KAAKiZ,UAAU/pB,IAGtE,SAAU8kF,GAAV,0EAME,OANyBh7E,EAA3B,EAA2BA,SACbyF,SACVnI,SAASg+B,KAAKtiC,UAAY,aAE1BsE,SAASg+B,KAAKtiC,UAAY,GAJ9B,SAMQ2B,YAAyC,IAAnC68E,IANd,OAOE,OAPF,SAOQpC,YAAI91E,aAAiC,CAAEmG,SAAUzF,EAAQyF,YAPjE,wCAUO,SAAUw1E,KAAV,iEACL,OADK,SACC1D,YAAM4D,IADP,OAEL,OAFK,SAEC5D,YAAMqD,IAFP,OAGL,OAHK,SAGCtD,YAAU54E,KAAiB0H,QAAS20E,IAHrC,OAIL,OAJK,SAICxD,YAAMO,IAJP,OAKL,OALK,UAKCR,YAAUr4E,KAAiB8jF,IAL5B,QAML,OANK,UAMCzL,YAAUj4E,KAA2B27E,IANtC,yC,yBC7TGgI,I,YAWOC,IA1CJC,GAA2B,2BAExC,SAASC,GAAT,GAAwF,IAAlDnjF,EAAiD,EAAjDA,QAC5B0F,EAAkB1F,EAAlB0F,MAAOC,EAAW3F,EAAX2F,OACf,GAAIxL,OAAO0f,WAEL1f,OAAO0f,WAAWnU,QAAUA,GAASvL,OAAO0f,WAAWlU,SAAWA,GACpExL,OAAO0f,WAAWyC,0BAAyB,EAAM5W,EAAOC,OAErD,CAEL,IAAMy9E,EAAaC,eACb9oE,EAAkBhnB,KAAKgpB,MAAM7W,GAAO/M,WACpC6hB,EAAmBjnB,KAAKgpB,MAAM5W,GAAQhN,WAE1CyqF,EAAW7oE,kBAAoBA,GAC/B6oE,EAAW5oE,mBAAqBA,GAEhC8oE,aAAc,CAAE/oE,kBAAiBC,sBAKvC,SAAS+oE,KACHppF,OAAO0f,WACT1f,OAAO0f,WAAWyC,0BAAyB,EAAO,EAAG,GAErDknE,aAAiB,CAAC,kBAAmB,qBAIzC,SAAUR,GAAV,+EAA2BhjF,EAA3B,EAA2BA,SACrB7F,OAAOmlC,gBADb,gBAII,OAFAnlC,OAAOmlC,kBAFX,SAIU01C,YAAKx1C,KAJf,OAKYikD,EAAmBzjF,EAAnByjF,eACRtpF,OAAO8C,aAAaC,QAAQgmF,GAA0B,QACtD/oF,OAAOghB,SAASklB,KAAOojD,EAP3B,wCAWO,SAAUR,KAAV,iEACL,OADK,SACC3L,YAAUh5E,KAA8B6kF,IADzC,OAEL,OAFK,SAEC7L,YAAU94E,KAAsB+kF,IAFjC,OAGL,OAHK,SAGCjM,YAAUx8B,IAAuBkoC,IAHlC,wC,gBCrDUU,IAAV,SAAUA,KAAV,iEACL,OADK,SACCnM,YAAM0D,IADP,OAEL,OAFK,SAEC1D,YAAM1C,IAFP,OAGL,OAHK,SAGC0C,YAAM0L,IAHP,wCCDP,IAAMU,GAAiBC,cAoBVC,GATiB,WAC5B,IAAMA,EAAQC,YACZC,EAJAC,YAAQC,YAAgBN,MAQ1B,OADAA,GAAepnC,IAAImnC,IACZG,EAGYK,G,+ECvBrB/pF,OAAOC,EAAIA,KACXD,OAAOgwC,OAAS/vC,K,8CCAH+pF,GAAmB,SAAC9jD,GAI/B,OAAO,IAAI3W,SAAQ,SAAC06D,EAASC,GAC3B,IAAMC,EAAUhnF,SAASkiB,cAAc,QACvC8kE,EAAQ7kE,aAAa,MAAO,cAC5B6kE,EAAQ7kE,aAAa,OAAQ,YAC7B6kE,EAAQ7kE,aAAa,OAAQ4gB,GAE7BikD,EAAQjwD,OAAS,WACf+vD,KAGFE,EAAQC,QAAU,WAChBF,EAAOroC,MAAM,sBAAD,OAAuB3b,MAGrC/iC,SAASg6C,qBAAqB,QAAQ,GAAG33B,YAAY2kE,O,qHCUnDE,GAAyC,SAACrsE,GAAD,MACjB,cAA5BA,EAAWlL,cAGAw3E,GAAiD,CAC5D1jC,QAAS,CAYP2jC,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,8BAC3BzS,OAAQ,OACR3pC,QAjBO,SAiBCqd,GACN,MAA+B,oBAApBjhB,KAAK0tF,WAER,UAAN,OAAiB1tF,KAAK0tF,WAAWzsE,GAAc,OAAS,IAEnD,IAET0sE,UAxBO,SAwBG1sE,GAAwB,MAGNA,EAAlB/M,cAHwB,SAIhC,OAAQo5E,GAAmBrsE,IAAeof,QAAQnsB,IAUpD05E,aAAa,EACbn4E,eAAiB,SAACwL,GAAD,OAA6BqsE,GAAmBrsE,GAAc,EAAI,GAInFysE,WA3CO,SA2CIzsE,GACT,MAAmC,aAA5BA,EAAWlL,cAEpB83E,eA9CO,SA8CQ5sE,GACb,OAAOjhB,KAAK2tF,UAAU1sE,GAClBhf,KACA,gCAC8B,WAA9Bgf,EAAW9M,gBAA+B,yCAE1C,sBASRyZ,UAAW,CAOT4/D,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,yBAC3BzS,OAAQ,QACR3pC,QAAS,iBAAM,YACf+pF,UAAW,kBAAM,GASjBC,aAAa,EACbn4E,eAAgB,kBAAM,GACtBo4E,eAAgB,iBAAM,sBAExBC,MAAO,CAOLN,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,yBAC3BzS,OAAQ,WACR3pC,QAAS,iBAAM,sBACf+pF,UAAW,kBAAM,GASjBC,aAAa,EACbn4E,eAAgB,kBAAM,GACtBo4E,eAAgB,iBAAM,sBAExBE,OAAQ,CAONP,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,8BAC3BzS,OAAQ,YACR3pC,QAAS,iBAAM,IACf+pF,UAAW,kBAAM,GASjBC,aAAa,EACbn4E,eAAgB,kBAAM,GACtBo4E,eAAgB,iBAAM,sBAExB/uC,MAAO,CAOL0uC,sBAAsB,EACtBC,eAAgB,IAAIztC,OAAO,8BAC3BzS,OAAQ,OACRogD,UAAW,kBAAM,GACjB/pF,QAAS,iBAAM,iBASfgqF,aAAa,EACbn4E,eAAgB,kBAAM,IACtBo4E,eAAgB,iBAAM,sBA8CxBG,aAAc,CAOZR,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,yBAC3BzS,OAAQ,QACR3pC,QAZY,WAaV,MAAO,YAET+pF,UAfY,WAgBV,OAAO,GAUTC,aAAa,EACbn4E,eAAgB,kBAAM,GACtBw4E,YAAa,IACbJ,eAAgB,iBAAM,mCAExBK,MAAO,CAOLV,sBAAsB,EAGtBC,eAAgB,IAAIztC,OAAO,yBAC3BzS,OAAQ,QACR3pC,QAAS,iBAAM,YACf+pF,UAAW,kBAAM,GASjBC,aAAa,EACbn4E,eAAgB,kBAAM,GACtBw4E,YAAa,GACbJ,eAAgB,iBAAM,4BAExBM,SAAU,CAKRN,eAAgB,iBAAM,qBAGtBtgD,OAAQ,QAKRogD,UAAW,kBAAM,GAKjB/pF,QAAS,iBAAM,YACf6R,eAAgB,kBAAM,GACtBm4E,aAAa,EAEbH,eAAgB,IAAIztC,OAAO,0BAE7BouC,SAAU,CACRP,eAAgB,iBAAM,qBACtBF,UAAW,kBAAM,GACjB/pF,QAAS,iBAAM,YACf2pC,OAAQ,OACRqgD,aAAa,EACbn4E,eAAgB,kBAAM,GACtBg4E,eAAgB,IAAIztC,OAAO,2BCzUlBquC,GAAqB,SAChCptE,EAAwBqtE,GACpB,IAEFj5E,EAEE4L,EAFF5L,cACAG,EACEyL,EADFzL,gBAEE2Q,EAAM,GAsBV,OApBAA,GAAO3Q,EACHA,EAAgB/T,WAChB8rF,GAAuBtsE,EAAWnN,cAAclQ,QAAQqd,GAE/B,kBAAlB5L,IACT8Q,GAAG,WAAQwE,mBAAmBtV,KAGhC8Q,GAAO,YAEHmoE,IACFnoE,GAAO,aAG+B,eAApClF,EAAWvM,uBACVuM,EAAWvM,sBAAwBuM,EAAWnM,SAAkC,cAAvBmM,EAAWnM,WAEvEqR,GAAO,aAGFA,G,UChCIooE,GAA0B,WAAK,IAAK,E,mBCSpCC,GAAS,SAAC,GAGT,IAFZC,EAEW,EAFXA,cACAC,EACW,EADXA,aAKMC,EAAe1rF,OAAOs4B,OAAO31B,OAG/BmrC,EAAI09C,EAAchqF,aAGhBmqF,EAAOvyF,KAAKqD,IAAQ,GAAJqxC,EAAS,GAC/BA,GAAK69C,EAGL,IAAIC,GAAcD,EAAO,GAAK,EAIxB99C,EAAI29C,EAAcxlB,YAAc,GAClCl4B,EAAID,IACN+9C,IAAe99C,EAAID,GAAK,EACxBC,EAAID,GAKFC,EAAI49C,EAAe,KACrBE,IAAe99C,EAAK49C,EAAe,IAAO,EAC1C59C,EAAI49C,EAAe,IAGrB,IAAMlkD,EAAQikD,EAAe,SAAW,WAClCvsF,EAAWusF,EAAe,SAAW,UAE3C,OACE,yBACE5sF,UAAU,uBACV6D,MAAO,CACL+mC,SAAUqE,EACV89C,eAGF,kBAAC,KAAD,CAAM1sF,SAAUA,IACfsoC,I,oBCtDMqkD,GAAuC,SAAC5pF,GACnD,OAAIA,EAAM6pF,QACD9rF,OAAOU,QAAQC,QAAQZ,QAAQgsF,oBAClC/rF,OAAOU,QAAQC,QAAQZ,QAAQisF,uCAC/B/pF,EAAM8zD,SACH/1D,OAAOU,QAAQC,QAAQZ,QAAQgsF,oBAClC/rF,OAAOU,QAAQC,QAAQZ,QAAQksF,qCAC/BhqF,EAAM6zD,OACH91D,OAAOU,QAAQC,QAAQZ,QAAQgsF,oBAClC/rF,OAAOU,QAAQC,QAAQZ,QAAQmsF,mCAE9BlsF,OAAOU,QAAQC,QAAQZ,QAAQgsF,qB,oBCZ3BI,GAAiB,SAAC9nE,EAAYC,GACzC,OAAID,IAAMC,GAGHK,OAAOqoB,MAAM3oB,IAAgBM,OAAOqoB,MAAM1oB,ICW7C8nE,GAAyB,GACzBC,GAA6B,GAC7BC,GAAmB,SAACpvF,EAAaT,GACrC,IAAM6P,EAAM7P,EACZ,OAAIS,IAAQT,GAC0B,qBAAzB2vF,GAAgB9/E,KACzB8/E,GAAgB9/E,GAAO,IAAIizB,KAAKgtD,kBAAa9uF,EAAW,CACtD+uF,aAAa,EACbC,sBAAuBvvF,EACvBwvF,sBAAuBjwF,KAIpB2vF,GAAgB9/E,IAEb,IAARpP,GACsC,qBAA7BmvF,GAAoB//E,KAC7B+/E,GAAoB//E,GAAO,IAAIizB,KAAKgtD,kBAAa9uF,EAAW,CAC1D+uF,aAAa,EACbC,sBAAuBvvF,EACvBwvF,sBAAuBjwF,KAIpB4vF,GAAoB//E,IAKtB,IAAIizB,KAAKgtD,kBAAa9uF,EAAW,CACtC+uF,aAAa,EACbC,sBAAuBvvF,EACvBwvF,sBAAuBjwF,KAIrBkwF,GACJ,SACEC,EACAC,EACAC,GAHF,OAKA,SAAC/wF,GACC,GAAqB,kBAAVA,EACT,MAAO,IAGT,IASIgxF,EACAC,EAVEC,EAAiBL,EAAa7wF,GACpC,GAA8B,kBAAnBkxF,EACT,OAAOA,EAGT,GAAyB,OAArBJ,EACF,OAAOA,EAAiBviD,OAAO2iD,GAKjC,IAA4B,IAAxBH,EACFC,EAAOD,EACPE,EAAOF,MACF,CACLC,EAAO,EACP,IAAM1zF,EAAM4zF,EAAiB,GAAKA,EAAiBA,EAEjDD,EADE3zF,EAAM,IACD,EACEA,EAAM,GACR,EACEA,EAAM,EACR,EACEA,EAAM,GACR,EACEA,EAAM,IACR,EACEA,EAAM,KACR,EACEA,EAAM,KACR,EAEA,EAIX,OAAOizF,GAAiBS,EAAMC,GAAM1iD,OAAO2iD,KAalCC,GAAgB,SAAC,GAOZ,IANhBlvE,EAMe,EANfA,WACAlV,EAKe,EALfA,KACA3L,EAIe,EAJfA,MACAgU,EAGe,EAHfA,YACAC,EAEe,EAFfA,aACAnU,EACe,EADfA,KAEMM,EAAqBqC,aAAYutF,MACjC3vF,EAAuBoC,aAAYwtF,MAF1B,EAKyB7rF,oBAAoB,kBAAM7E,QALnD,mBAKRkwF,EALQ,KAKMS,EALN,OAQO9rF,qBARP,mBAQRrE,EARQ,KAQHowF,EARG,OASO/rF,qBATP,mBASR9E,EATQ,KASH8wF,EATG,OAYyBhsF,mBAAiBpE,GAZ1C,mBAYRqwF,EAZQ,KAYMC,EAZN,OAciBlsF,oBAAkB,GAdnC,mBAcRmsF,EAdQ,KAcEC,EAdF,OAeiCpsF,mBAAmC,MAfpE,mBAeRsrF,EAfQ,KAeUe,EAfV,OAoBX5vE,EADFhM,qBAnBa,OAmBI,EAnBJ,EAsBT67E,EAAuChoD,mBAC3C,kBAAM8mD,GAAqBC,EAAcC,EAAkB76E,KAC3D,CAAC46E,EAAc56E,EAAe66E,IAG1BiB,EAAuBpuF,iBAAOmuF,GAC9BE,EAA6B,SACjCC,EACAC,EACAC,GAEAJ,EAAqB/tF,QAAU4sF,GAC7BqB,EACAC,EACAC,IA+GJ,MAAO,CACLL,oBACAM,oCA7G0CnsF,uBAC1C,SAACosF,EAAgBC,GACf,GAAIlC,GAAejvF,EAAKkxF,IAAWjC,GAAe1vF,EAAK4xF,GACrD,OAAOP,EAAqB/tF,QAI9ButF,EAAOc,GACPb,EAAOc,GAEP,IAAML,EAAkBrxF,KAAuBK,IAC7CC,EACAmxF,EACAC,EACAlxF,EACAiU,EACAD,GACA,SAAAm9E,GACEb,EAAgBa,KAKlB/wF,EACAC,GAIF6vF,GAAgB,kBAAMW,KAEtB,IASIO,EATEC,EAAeR,EAAgBI,GAC/BK,EAAeT,EAAgBK,GAGrC,GAA4B,kBAAjBG,GAAqD,kBAAjBC,EAE7C,OADAV,EAA2BC,EAAiBnB,EAAkB76E,GACvD87E,EAAqB/tF,QAK9B,GAAI+I,EAAK5L,MAAQ4L,EAAKrM,IAEpB8xF,GAAe,OACV,IAAuB,IAAnBv8E,EAETu8E,EAAcv8E,MACT,CAEL,IAAI08E,EASFH,GANAG,EADEF,IAAiBC,EACXr1F,KAAKC,IAAIm1F,GAETp1F,KAAKC,IAAIo1F,EAAeD,IAGtB,IACI,EACLE,EAAQ,GACH,EACLA,EAAQ,EACH,EACLA,EAAQ,GACH,EACLA,EAAQ,IACH,EACLA,EAAQ,KACH,EACLA,EAAQ,KACH,EAEA,EAIlB,IAAIT,EAAsBpB,EAY1B,OAVI0B,IAAgBb,IAEhBO,EADEM,EAAc,EACM,KAEAjC,GAAiBiC,EAAaA,GAEtDX,GAAoB,kBAAMK,KAC1BN,EAAYY,IAEdR,EAA2BC,EAAiBC,EAAqBM,GAC1DT,EAAqB/tF,UAE9B,CACE2tF,EACA17E,EACA9U,EACAT,EACAQ,EACAM,EACAJ,EACAiU,EACAD,EACA3T,EACAsL,EAAK5L,IACL4L,EAAKrM,IACLowF,IAOFW,iB,SC5QEmB,GAAe,CACnBnqD,EAAG,IACH0d,EAAG,EACH59B,EAAG,G,SCAQsqE,GAAkB3W,IAAOnrD,IAAV,6FAAGmrD,CAAH,yCACTC,YAAU,IAIhB2W,GAAiB5W,IAAOnrD,IAAV,4FAAGmrD,CAAH,gEAMd6W,GAAkB7W,IAAOnrD,IAAV,6FAAGmrD,CAAH,gEAMf8W,GAAa9W,IAAOnrD,IAAV,wFAAGmrD,CAAH,MAGV+W,GAAoB/W,IAAOgX,KAAV,+FAAGhX,CAAH,mBAIjBiX,GAAcjX,IAAOnrD,IAAV,yFAAGmrD,CAAH,gEAOXkX,GAAgBlX,IAAOnrD,IAAV,2FAAGmrD,CAAH,mJAGf,qBAAGzuC,SAGD,qBAAG4lD,WAA+B,GAAM,QAWxCC,GAAkCpX,IAAOnrD,IAAV,6GAAGmrD,CAAH,8BAK/BqX,GAAgBrX,IAAOnrD,IAAV,2FAAGmrD,CAAH,mFAKJ,qBAAGzuC,SAGZ+lD,GAAiBtX,IAAOgX,KAAV,4FAAGhX,CAAH,sBAIduX,GAAiBvX,IAAOgX,KAAV,6FAAGhX,CAAH,qCAKdwX,GAAmBxX,IAAOnrD,IAAV,+FAAGmrD,CAAH,0CCnEvByX,GAAc,GAEdC,GAAWjyC,aACfG,MACA,gBAAoBtzC,EAApB,EAAGqlF,gBAAH,IAAoChzF,KAApC,MAA8D,CAC5D2N,iBACA3N,UAFF,MAA2C8yF,GAA3C,MAuBaG,GAjBI,SAAC,GAAmB,IAAjB/tF,EAAgB,EAAhBA,GAAIkxB,EAAY,EAAZA,MAAY,EACHpzB,aAAYoC,uBAAY,SAAAsI,GAAK,OAAIqlF,GAASrlF,EAAO,CAAExI,SAAO,CAACA,KAApFyI,EAD4B,EAC5BA,eAAgB3N,EADY,EACZA,KAChBqS,EAAgBrS,EAAhBqS,MAAOsB,EAAS3T,EAAT2T,KAEf,GAAItB,GAASsB,GAAqC,IAA7BzS,OAAOlB,KAAKA,GAAM4M,OACrC,OACE,kBAAC+lF,GAAD,KACGtgF,EAAM+jB,GADT,IACkBziB,EAAKyiB,IAK3B,IAAM/U,EAAO1T,EAAeyoB,GAE5B,OAAO,kBAACu8D,GAAD,KAAiBtxE,ICAb6xE,GAAmB,SAAC,GAanB,IAZZ1uF,EAYW,EAZXA,UACA8M,EAWW,EAXXA,cACA2C,EAUW,EAVXA,aACAzG,EASW,EATXA,OACA2lF,EAQW,EARXA,WACAjpF,EAOW,EAPXA,SACA+mF,EAMW,EANXA,kBACAmC,EAKW,EALXA,iBACA99E,EAIW,EAJXA,mBACA+9E,EAGW,EAHXA,iBACAzC,EAEW,EAFXA,aACA0C,EACW,EADXA,WAEMliF,EAAYpO,aAChBoC,uBAAY,SAACsI,GAAD,OAAgBuzC,aAAgBvzC,EAAO,CAAExI,GAAIV,MAAc,CAACA,KAEjDmJ,EAAgDyD,EAAjE4hF,gBAAgDO,EAAiBniF,EAAhCoiF,cAOnCC,GAAgC,IAAhBN,IAAsBE,EAGtCK,EAAa,IAAI5uE,KAAK5a,GAAYopF,GAMlCK,EAAmBvwF,OAAOU,QAAQC,QAAQZ,QAAvB,6BACDmO,EAAcsiF,aArB3B,EAwBoClwC,eAAvC9hC,EAxBG,EAwBHA,iBAAkBC,EAxBf,EAwBeA,iBAEpBgyE,EAAe/wF,iBAAO,MAmB5B,OAlBAI,qBAAU,WACJ2wF,EAAa1wF,SACfC,OAAOmiC,GAAGC,WAAWquD,EAAa1wF,QAAS,CACzCsiC,WAAY,GACZC,kBAAkB,EAClBC,kBAAkB,EAClBC,mBAAoB,KACpBC,mBAAoB,KACpBC,kBAAkB,EAClBC,iBAAiB,EACjBC,iBAAiB,EACjBC,oBAAqB,EACrBC,oBAAqB,EACrBljB,MAAO,cAGV,CAAC6wE,IAGF,yBAAK5xF,UAAWG,KACd,uBADwB,kBAEb6R,EAFa,aAKxB,0BACEhS,UAAU,4BACVkC,MAAOozE,cAAyB,EAAMjmE,IAErCmiF,EACGlc,cAAyB,EAAOjmE,GAChCsQ,EAAiB8xE,IAEvB,6BACA,0BACEzxF,UAAU,4BACVkC,MAAOszE,aAAwBrmE,EAAWE,IAEzCmiF,EACGniF,EAAcqc,QAAQ/rB,WACtBigB,EAAiB6xE,IAEvB,6BACA,0BAAMzxF,UAAU,8BAA8B2uF,GAC9C,6BACA,yBAAK3uF,UAAU,wBAAwBF,IAAK8xF,GAC1C,yBAAK5xF,UAAU,iCACZsxF,EAAaplF,KAAI,SAAC2lF,EAAa1lF,GAC9B,IAQIjP,EARE+O,EAAgBP,EAAeS,GAE/Bw+B,EAAQp/B,EAAOU,GACf6lF,EH7GwB,SAACviD,GAGzC,IAAKA,EACH,OAAOugD,GAET,IAAMiC,EAAUxiD,EAAIjoC,QAJG,oCAIqB,SAACge,EAAGqgB,EAAG0d,EAAG59B,GAAV,OAAgBkgB,EAAIA,EAAI0d,EAAIA,EAAI59B,EAAIA,KAE1EqE,EAAS,4CAA4CsM,KAAK27D,GAIhE,OAHKjoE,GACHrqB,QAAQoN,KAAK,sBAAuB0iC,GAE/BzlB,EACH,CACA6b,EAAGljB,SAASqH,EAAO,GAAI,IACvBu5B,EAAG5gC,SAASqH,EAAO,GAAI,IACvBrE,EAAGhD,SAASqH,EAAO,GAAI,KACrBgmE,GG4FgBkC,CAAarnD,GAEnBsnD,EAA2C,IAA9B5+E,EAAmB1I,QACjC0I,EAAmB8uB,SAASl2B,GAGjC,GAAIulF,EACFt0F,EAAQ,UACH,IAAoB,IAAhBg0F,EAAmB,CAC5B,IAAMgB,EAAoB/iF,EAAU2a,OAAO7f,KAAKinF,GAEhDh0F,EAAQg1F,EAAoBA,EAAkB/lF,EAAI,GAAK,UAEvDjP,EAAQiS,EAAUgjF,mBAAmBhmF,GAGvC,OACE,kBAAC,WAAD,CAAUsB,IAAKokF,GACN,IAAN1lF,GAAW,6BAEZ,0BACEjK,MAAO+J,EACPjM,UAAWG,KACT,sBACA8xF,EAAa,WAAa,gBAE5B3xF,QAAS,SAAC8C,GACR+tF,EAAiBllF,EAAe7I,IAElCgvF,KAAK,SACLvuF,MAAO,CAAE8mC,SACT0nD,SAAU,GAEV,2BACEryF,UAAS,oCAA+BqP,EAAcsiF,YACtD9tF,MAAO,CACLipC,gBAAgB,QAAD,OAAUglD,EAAInsD,EAAd,YAAmBmsD,EAAIzuC,EAAvB,YAA4ByuC,EAAIrsE,EAAhC,YAAqCisE,EAArC,OAGjB,+BACE,wBAAI1xF,UAAU,0BACZ,wBAAIA,UAAU,8BAInB,IACD,kBAAC,GAAD,CAAYiD,GAAIV,EAAW4xB,MAAOhoB,KAGpC,0BACEjK,MAAO+J,EACPjM,UAAWG,KACT,wBACC8xF,GAAc,UAEjB3xF,QAAS,SAAC8C,GACR+tF,EAAiBllF,EAAe7I,IAElCgvF,KAAK,SACLvuF,MAAO,CAAE8mC,SACT0nD,SAAU,GAETrD,EACC9xF,YC5JPo1F,GAAiB,SAAC,GAMnB,IALVjjF,EAKS,EALTA,cACAmiF,EAIS,EAJTA,cACAvpF,EAGS,EAHTA,SACAopF,EAES,EAFTA,WACAliF,EACS,EADTA,UACS,EACsCsyC,eAAvC9hC,EADC,EACDA,iBAAkBC,EADjB,EACiBA,iBAEpB6xE,EAAa,IAAI5uE,KAAK5a,GAAYopF,GAExC,OACE,6BACE,0BAAMnvF,MAAOozE,cAAyB,EAAMjmE,IACzCmiF,EACGlc,cAAyB,EAAOjmE,GAChCsQ,EAAiB8xE,IAEvB,kBAAC,GAAD,UACA,0BAAMvvF,MAAOszE,aAAwBrmE,EAAWE,IAC7CmiF,EAAgBniF,EAAcqc,QAAQ/rB,WAAaigB,EAAiB6xE,MAMhEc,GAAoB,SAAC,GAcpB,IAbZhwF,EAaW,EAbXA,UACA8M,EAYW,EAZXA,cACA9D,EAWW,EAXXA,OACA2lF,EAUW,EAVXA,WACAjpF,EASW,EATXA,SACA+mF,EAQW,EARXA,kBACAmC,EAOW,EAPXA,iBACA99E,EAMW,EANXA,mBACA+9E,EAKW,EALXA,iBACAzC,EAIW,EAJXA,aACA0C,EAGW,EAHXA,WACAmB,EAEW,EAFXA,cACAC,EACW,EADXA,cAEMjB,GAAgC,IAAhBN,IAAsBE,EACtCjiF,EAAYpO,aAChBoC,uBAAY,SAACsI,GAAD,OAAgBuzC,aAAgBvzC,EAAO,CAAExI,GAAIV,MAAc,CAACA,KAEjDmJ,EAAgDyD,EAAjE4hF,gBAAgDO,EAAiBniF,EAAhCoiF,cAEzC,OACE,kBAAC,GAAD,KACE,kBAAC,GAAD,KACE,kBAAC,GAAD,KAAe5C,GACf,kBAAC,GAAD,CACEt/E,cAAeA,EACfmiF,cAAeA,EACfvpF,SAAUA,EACVopF,WAAYA,EACZliF,UAAWA,KAGf,kBAAC,GAAD,KACE,kBAAC,GAAD,KACGmiF,EAAaplF,KAAI,SAAC2lF,EAAa1lF,GAC9B,IAMIjP,EANE+O,EAAgBP,EAAeS,GAC/Bw+B,EAAQp/B,EAAOU,GAEfgmF,EAC0B,IAA9B5+E,EAAmB1I,QAAgB0I,EAAmB8uB,SAASl2B,GAGjE,GAAIulF,EACFt0F,EAAQ,UACH,IAAoB,IAAhBg0F,EAAmB,CAC5B,IAAMgB,EAAoB/iF,EAAU2a,OAAO7f,KAAKinF,GAEhDh0F,EAAQg1F,EAAoBA,EAAkB/lF,EAAI,GAAK,UAEvDjP,EAAQiS,EAAUgjF,mBAAmBhmF,GAEvC,OACE,kBAAC,GAAD,CACEw+B,MAAOA,EACPrqC,QAAS,SAAA8C,GACP+tF,EAAiBllF,EAAe7I,IAElCgvF,KAAK,SACLC,SAAU,EACV9B,YAAa0B,EACbxkF,IAAKokF,GAEL,kBAAC,GAAD,CAAiB3vF,MAAO+J,EAAe0+B,MAAOA,IAC9C,kBAAC,GAAD,CAAY1nC,GAAIV,EAAW4xB,MAAOhoB,IAClC,kBAAC,GAAD,KAAmB8lF,GAAcjD,EAAkB9xF,QAIzD,kBAAC,GAAD,OAEF,kBAAC,GAAD,KACGs1F,EACAC,MC/FEC,GAAc,SAAC,GAgBd,IAfZvzE,EAeW,EAfXA,WACA5c,EAcW,EAdXA,UACA8M,EAaW,EAbXA,cACA2C,EAYW,EAZXA,aACAzG,EAWW,EAXXA,OACA2lF,EAUW,EAVXA,WACAjpF,EASW,EATXA,SACA+mF,EAQW,EARXA,kBACA37E,EAOW,EAPXA,mBACAs/E,EAMW,EANXA,sBACAvB,EAKW,EALXA,iBACAzC,EAIW,EAJXA,aACA0C,EAGW,EAHXA,WACAmB,EAEW,EAFXA,cACAC,EACW,EADXA,cAEM1B,EAAkBhwF,aACtBoC,uBAAY,SAACsI,GAAD,OAAgBuzC,aAAgBvzC,EAAO,CAAExI,GAAIV,IAAawuF,kBAAiB,CACrFxuF,KAIE4uF,EAAmB,SAACpb,EAA8B3yE,GACtDA,EAAMC,iBACN,IAAM2yE,EAAuB5yE,EAAM8zD,UAAY9zD,EAAM6pF,QAC/CpX,EAAwBD,aAAyB,CACrDE,cAAeib,EACf19E,qBACA0iE,uBACAC,yBAEF2c,EAAsB9c,IAGxB,MAAkC,WAA9B12D,EAAW9M,eAEX,kBAAC,GAAD,CACE9P,UAAWA,EACXyP,aAAcA,EACd3C,cAAeA,EACf9D,OAAQA,EACR2lF,WAAYA,EACZjpF,SAAUA,EACV+mF,kBAAmBA,EACnBmC,iBAAkBA,EAClB99E,mBAAoBA,EACpB+9E,iBAAkBA,EAClBzC,aAAcA,EACd0C,WAAYA,EACZmB,cAAeA,EACfC,cAAeA,IAMnB,kBAAC,GAAD,CACElwF,UAAWA,EACXyP,aAAcA,EACd3C,cAAeA,EACf9D,OAAQA,EACR2lF,WAAYA,EACZjpF,SAAUA,EACV+mF,kBAAmBA,EACnBmC,iBAAkBA,EAClB99E,mBAAoBA,EACpB+9E,iBAAkBA,EAClBzC,aAAcA,EACd0C,WAAYA,K,UCzFLuB,GAAgB,SAAC,GAAD,IAC3BC,EAD2B,EAC3BA,mBACAC,EAF2B,EAE3BA,oBACAC,EAH2B,EAG3BA,qBACAC,EAJ2B,EAI3BA,sBAJ2B,OAM3B,yBAAKhzF,UAAU,0BACb,kBAAC,KAAD,CACEA,UAAU,gCACVM,QAASuyF,EACTxyF,SAAS,OACTM,aAAa,WACbD,eAAe,yKAGjB,kBAAC,KAAD,CACEV,UAAU,gCACVM,QAASwyF,EACTzyF,SAAS,QACTM,aAAa,YACbD,eAAe,0KAGjB,kBAAC,KAAD,CACEV,UAAU,gCACVM,QAASyyF,EACT1yF,SAAS,SACTM,aAAa,gBACbD,eAAe,0OAIjB,kBAAC,KAAD,CACEV,UAAU,gCACVM,QAAS0yF,EACT3yF,SAAS,UACTM,aAAa,iBACbD,eAAe,6L,8GCxCRuyF,GAAwB,SACnChpF,EACAipF,GAFmC,OAGpBjpF,EAAKiC,KAAI,SAAC2oD,GAAW,IAAD,eACJA,GAAxBlgC,EAD4B,KACdpoB,EADc,WAE7BtM,EAAiB,GACnBkzF,EAAa,EACbC,EAAa,EAkBjB,OAjBA7mF,EACGL,KAAI,SAAChP,EAAOiP,GAAR,MAAe,CAAE+lE,UAAWghB,EAAqB/mF,GAAIjP,YAEzDqvB,QAAQ0wC,UACR/9D,SAAQ,YAA2B,IAAxBgzE,EAAuB,EAAvBA,UAAWh1E,EAAY,EAAZA,MAChBg1E,EAIDh1E,GAAS,GACXk2F,GAAcl2F,EACd+C,EAAK+qB,KAAKooE,KAEVD,GAAcj2F,EACd+C,EAAK+qB,KAAKmoE,IARVlzF,EAAK+qB,KAAK,MAWV,CACJ2J,GADF,OAEK10B,OAQMozF,GAAsB,SACjCl0E,EAAwBhQ,EAAwBE,EAChDikF,GAEA,IARoBn0F,EAQdysF,EAAc0H,EAAc1H,WAA4CzsE,GAD3E,EAKCA,EAFFpL,YAAaw/E,OAHZ,MAGmClkF,EAAcsiF,WAHjD,EAID3+E,EACEmM,EADFnM,QAGF,GAAIA,GAAuB,cAAZA,IAd+B,gBAA1B7T,EAcmCkQ,EAAc/Q,QAbzD,YAATa,IACuB,IAAvBA,EAAKgoB,QAAQ,MAad,MAAO,OAIT,IAAIqsE,EAAmBD,EAOvB,MANyB,YAArBC,GAA2D,IAAzBrkF,EAAUiE,aAC9CogF,EAAmB,QAEI,YAArBA,GAAkC5H,IACpC4H,EAAmB,QAEdA,GAGHC,GAAqB18D,aACzBG,aAAO,SACP,kBAAMw8D,KAAMvyF,OAAOU,QAAQyJ,OAAOpK,QAAQ2mC,eAK/B8rD,GAAkB,SAACpoF,GAAD,OAC7BA,EAAOW,KAAI,SAACy+B,GAAD,OAAW+oD,KAAM/oD,GAAOipD,IAAIH,KAAsB,IAAKlkD,UAGvDskD,GAAsB,SACjCC,EAAwBN,GADS,OAE7BM,EACF3yF,OAAOU,QAAQC,QAAQZ,QAAQ6yF,gCACV,YAArBP,EACEryF,OAAOU,QAAQC,QAAQZ,QAAQ8yF,2BAC/B7yF,OAAOU,QAAQC,QAAQZ,QAAQ+yF,yB,mBCtDtBC,GAzBW,SACxBC,EACAC,GAC6D,IAAD,EACvBC,cAAU,GADa,mBACrDC,EADqD,KAC1CC,EAD0C,KAGtDz0F,EAAMe,iBAAoB,MAE1B2zF,EAAiBrxF,uBAAY,SAACkgD,GAAgB,IAC1CnpD,EAAMmpD,EAAE6R,UAARh7D,EACFu6F,EAAWpxC,EAAEqP,YAAqD,IAAzC0hC,EAASlzF,QAAQiO,UAAUulF,aACpDC,EAAeF,EAAWv6F,EAGhC,GAFAq6F,EAAgBI,GAEZA,GAAgB70F,EAAIoB,QAAS,CAAC,IACxB4C,EAAWqwF,EAASjzF,QAAQ+oC,wBAA5BnmC,OACRhE,EAAIoB,QAAQ2C,MAAMyK,KAAlB,UAA4BpU,EAA5B,MACA4F,EAAIoB,QAAQ2C,MAAM2K,MAAlB,sBAAyCimF,EAAzC,OACA30F,EAAIoB,QAAQ2C,MAAMynB,IAAlB,UAA2BxnB,EAAS,EAApC,SAED,IAEH,MAAO,CAACwwF,EAAWx0F,EAAK00F,ICvBpBI,GAAoB,SAAC90F,EAAK5F,EAAG6oC,EAAU8xD,GAC3C/0F,EAAIoB,QAAQ2C,MAAMyK,KAAlB,UAA4BpU,EAA5B,MACA4F,EAAIoB,QAAQ2C,MAAM2K,MAAlB,sBAAyCu0B,EAAzC,OACAjjC,EAAIoB,QAAQ2C,MAAMynB,IAAMupE,GCLpB1b,GAAYC,IAAOnrD,IAAV,2FAAGmrD,CAAH,oBAuBA0b,GAnBkBC,sBAAW,SAC1Clb,EACA/5E,GAF0C,OAI1C,kBAAC,GAAD,CAAWA,IAAKA,EAAKE,UAAU,uBAAuBqoC,cAAY,4BAChE,0BAAMroC,UAAU,gCAAhB,oDAEE,6BACA,uBAAGqnC,KAAK,6DAA6DhV,OAAO,SAAS+U,IAAI,sBAAsBiB,cAAY,sCAA3H,0BAEE,uCALJ,qBAQE,uBAAGhB,KAAK,0DAA0DhV,OAAO,SAAS+U,IAAI,sBAAsBiB,cAAY,mCAAxH,aARF,SCTE2sD,GAAqB,CACzBC,QAAS,UACTC,SAAU,UACVC,MAAO,WAEI1B,GAAqB,SAACtwE,GAAD,OAAY6xE,GAAmB7xE,IAAW,MAEtEiyE,GAAiB,CACrBH,QAAS,UACTC,SAAU,UACVC,MAAO,WAEIE,GAAiB,SAAClyE,GAAD,OAAYiyE,GAAejyE,IAAW,MAE9DmyE,GAAe,CACnBL,QAAS,UACTC,SAAU,UACVC,MAAO,WAEI1b,GAAW,SAACt2D,GAAD,OAAYmyE,GAAanyE,IAAW,MAEtDg2D,GAAYC,IAAOnrD,IAAV,wEAAGmrD,CAAH,uGASTmc,GAAQnc,IAAOnrD,IAAV,oEAAGmrD,CAAH,6KAIK,qBAAGvxC,cACG,qBAAG2tD,UACd,qBAAG7qD,SAOCoqD,yBAAW,WAExBj1F,GAFwB,IACtBoyE,EADsB,EACtBA,UAAW/uD,EADW,EACXA,OAAQwlB,EADG,EACHA,MADG,OAIxB,kBAAC,GAAD,CAAW7oC,IAAKA,GACboyE,GACC,kBAACqjB,GAAD,CACE1tD,WAAY4rD,GAAmBtwE,GAC/BqyE,OAAQH,GAAelyE,GACvBwnB,MAAO8uC,GAASt2D,IAEfwlB,OC+BH8sD,GAA2B,SAAC,GAcD,IAb/Bt2E,EAa8B,EAb9BA,WACAhQ,EAY8B,EAZ9BA,UACAE,EAW8B,EAX9BA,cACAikF,EAU8B,EAV9BA,cACAJ,EAS8B,EAT9BA,qBACAwC,EAQ8B,EAR9BA,sBACA5B,EAO8B,EAP9BA,cACA6B,EAM8B,EAN9BA,cACAC,EAK8B,EAL9BA,UACAC,EAI8B,EAJ9BA,iBACAlH,EAG8B,EAH9BA,aACA9sC,EAE8B,EAF9BA,gBACAD,EAC8B,EAD9BA,gBAEMk0C,EAA0C,cAA5B32E,EAAWlL,aACzBq4C,EAAsBwpC,EAAc,EAAI,EAExClK,EAAc0H,EAAc1H,WAA4CzsE,GACxEq0E,EAAmBH,GAAoBl0E,EAAYhQ,EAAWE,EAAeikF,GALrD,EAW1Bn0E,EAJFjL,qBAP4B,MAOS,SAArBs/E,IACVsC,EARsB,IAW1B32E,EAFFhI,uBAT4B,SAYxB1U,EAAsC,WADxC0c,EADF9M,eAV4B,EAuE1B8M,EAxDFhL,qBAf4B,MAeZwhF,EAfY,IAuE1Bx2E,EAvDF/K,uBAhB4B,MAgBV,EAhBU,IAuE1B+K,EAtDF9K,gCAjB4B,WAuE1B8K,EArDF7K,yBAlB4B,WAuE1B6K,EApDF5K,oBAnB4B,MAmBb4K,EAAWjd,OAASmN,EAAcnN,MAnBrB,IAuE1Bid,EAnDF3K,0BApB4B,MAoBP,GApBO,IAuE1B2K,EAlDF1K,qBArB4B,MAqBZ,SArBY,IAuE1B0K,EAjDFzK,wBAtB4B,MAsBTghF,EAtBS,IAuE1Bv2E,EAhDFxK,iCAvB4B,WAuE1BwK,EA/CFvK,0BAxB4B,MAwBc,YAArB4+E,EAxBO,IAuE1Br0E,EA9CFtK,6BAzB4B,WAuE1BsK,EA7CFrK,oCA1B4B,WAuE1BqK,EA5CFpK,oCA3B4B,WAuE1BoK,EA3CFnK,wBA5B4B,MA4BT,EA5BS,IAuE1BmK,EA1CFlK,wBA7B4B,MA6BT,EA7BS,KAuE1BkK,EAzCFnL,0BA9B4B,OA8BR,CAAC,KAAM,MA9BC,MAuE1BmL,EAxCFjK,2BA/B4B,OA+BP,GA/BO,MAuE1BiK,EAtCFhK,2BAjC4B,OAiCc,YAArBq+E,EACjB,IACmB,IAAlBt/E,EACC,IACA,GArCsB,GAuC5BkB,GAgCE+J,EAhCF/J,qBAvC4B,GAuE1B+J,EA/BF9J,0BAxC4B,cAuE1B8J,EA9BF7J,iCAzC4B,cAuE1B6J,EA7BF5J,sCA1C4B,cAuE1B4J,EA5BF3J,yBA3C4B,OA2CT,EA3CS,MAuE1B2J,EA3BF1J,wBA5C4B,cAuE1B0J,EA1BFzJ,iCA7C4B,OA6CDvU,OAAOU,QAAQyJ,OAAOpK,QAAQ2mC,WA7C7B,MAuE1B1oB,EAzBFxJ,iCA9C4B,OA8CD,EA9CC,MAuE1BwJ,EAxBFvJ,yBA/C4B,OA+Ca,SAArB49E,GAAoD,YAArBA,EA/CvB,MAuE1Br0E,EAvBFtJ,yBAhD4B,OAgDTg+E,GAAoBC,EAAeN,GAhD1B,MAuE1Br0E,EAtBFrJ,4BAjD4B,OAiDe,YAArB09E,IAAmCM,EAjD7B,MAuE1B30E,EArBFpJ,mCAlD4B,OAkDC,OAlDD,MAuE1BoJ,EApBFnJ,iCAnD4B,OAmDD,GAnDC,MAuE1BmJ,EAnBFlJ,6BApD4B,OAoDL9U,OAAOU,QAAQyJ,OAAOpK,QAAQuoD,KApDzB,MAuE1BtqC,EAlBFjJ,6BArD4B,OAqDL,EArDK,MAuE1BiJ,EAjBFhJ,wBAtD4B,WAuD5BC,GAgBE+I,EAhBF/I,uBAvD4B,GAuE1B+I,EAfF9I,6BAxD4B,OAwDL,EAxDK,MAuE1B8I,EAdF7I,6BAzD4B,OAyDLnV,OAAOU,QAAQyJ,OAAOpK,QAAQ60F,KAzDzB,MAuE1B52E,EAbF5I,8BA1D4B,OA0DJ,EA1DI,GA2D5BC,GAYE2I,EAZF3I,eA3D4B,GAuE1B2I,EAXF1I,kCA5D4B,OA4DA,EA5DA,MAuE1B0I,EAVFzI,kCA7D4B,OA6DA41C,EA7DA,GA8D5B31C,GASEwI,EATFxI,2BACAC,GAQEuI,EARFvI,sCA/D4B,GAuE1BuI,EANFtI,+BAjE4B,OAiEH,GAjEG,MAuE1BsI,EALFrI,+BAlE4B,OAkEH,GAlEG,MAuE1BqI,EAJFpI,yBAnE4B,OAmETI,EAnES,MAuE1BgI,EAHFnI,+BApE4B,OAoEH,GApEG,MAuE1BmI,EAFFlI,+BArE4B,OAqEHxU,EAAmB,GAAK,GArErB,MAuE1B0c,EADFjI,yBAtE4B,OAsETC,EAtES,GAwE9B,MAAO,CACL5L,OAAQuoF,EAAgBH,GAAgB12B,YAAQ9oD,IAAkBA,EAGlEm5C,SAAUwoC,EAAc,EAAI1hF,EAC5Bg7C,kBAAmB/6C,EACnBk5C,WAAYj5C,EACZpS,MAAO4zF,OAAcl3F,EAAY2V,EACjCk6C,YAAaj6C,EACbpC,OAAQqC,EACRhC,OAAQtD,EAAU2a,OAAOrX,OACzBkyD,UAAWjwD,EAEXg4C,sBAAqBopC,GAAqBnhF,EAC1Cg4C,sBAAsBi/B,GAAqB/2E,EAC3C+3C,WAAW,EACXC,YAAY,EACZC,sBAAuBh4C,EACvBs5C,sBAAuBr5C,EACvBihF,YAAaphF,EACb05C,UAAWt5C,EACXu5C,UAAWunC,EAAc,EAAI7gF,EAC7BgY,WAAYjZ,GACZiiF,OAASH,GAAerzF,OAAoB7D,EAAY+vF,EACxDhgC,YAAcmnC,GAAerzF,EAAoB,EAAIyS,GAGrD86C,QAAU97C,GAAiB2hF,EAAoB10F,OAAO+0F,cAAgB,KAItE1sC,YAAar0C,GACbghF,cAAe/gF,GAMfovD,WAAYnvD,GAIZ+gF,kBAAmB9gF,GACnB24C,wBAAwB29B,GAAqBr2E,GAC7CwvD,UAAWvvD,GAGX64C,SAAU54C,GAIV03C,kBAAmBz3C,GACnBw3C,kBAAmBv3C,GACnBo4C,UAAWn4C,GACXo4C,UAAWn4C,GACXq4C,aAAcp4C,GACdq4C,oBAAqBp4C,GACrB06C,UAAUqlC,GAAsB3+E,EAChCk2C,kBAAmBr3C,GACnB44C,cAAe34C,GACf44C,cAAe34C,GACfs6C,UAAUslC,GAAsB3/E,GAChCkgF,gBAAiBjgF,GACjB04C,cAAez4C,GACf24C,cAAe14C,GACf02C,eAAgBz2C,GAChB02C,QAASz2C,GACTu2C,mBAAoBt2C,GACpB61C,oBAAqB51C,GACrB61C,oBAAqB51C,GAErB61C,+BAAgC51C,GAChCs0C,WAAYgoC,EACZoD,SAAU1K,EAEV17B,KAAM,CACJh2D,EAAG,CACDi2D,eAAgBt5C,GAGhB85C,OAAQ4lC,KAAQ3lC,WAChB7B,eAAgBj4C,GAChB25C,UAAUqlC,GAAsB/+E,GAChCq5C,mBAAoB,SAACrqC,GAAD,OAAwBA,EAAWywE,eAAen3E,WAAW,YAC7EwiC,EAAgB97B,GAChB67B,EAAgB77B,KAGtBb,EAAG,CACDoxE,SAAU1K,EACVz7B,eAAgBn5C,GAChB+3C,eAAgB93C,GAChBw5C,UAAUqlC,GAAsB5+E,GAEhCk5C,mBANC,SAMkBlrC,GASjB,OARkB0wE,EAAU,CAG1B13F,KAAK20D,MAAM,GAAGC,aAAa,GAG3B50D,KAAK20D,MAAM,GAAGC,aAAa,IAEtB5+B,CAAUhP,QAwCduxE,GAAe,SAAC,GAyBf,IAxBZt3E,EAwBW,EAxBXA,WACAhQ,EAuBW,EAvBXA,UACAE,EAsBW,EAtBXA,cACAqnF,EAqBW,EArBXA,sBACAC,EAoBW,EApBXA,eACA3kF,EAmBW,EAnBXA,aAEAzP,EAiBW,EAjBXA,UACA2wF,EAgBW,EAhBXA,qBACAtG,EAeW,EAfXA,aACAf,EAcW,EAdXA,UACAr8E,EAaW,EAbXA,qBACAonF,EAYW,EAZXA,wBACAjB,EAWW,EAXXA,cACAkB,EAUW,EAVXA,8BAEA3F,EAQW,EARXA,WACAjpF,EAOW,EAPXA,SACA6uF,EAMW,EANXA,uBACAC,EAKW,EALXA,YACAnB,EAIW,EAJXA,UACAjH,EAGW,EAHXA,aACAqI,EAEW,EAFXA,UACA3F,EACW,EADXA,WAEMrpF,EAAsBjH,aAAYk2F,MAClCC,EAAgBn2F,aAAYo2F,MAC5BttF,GAAqB,OAAbqtF,QAAa,IAAbA,OAAA,EAAAA,EAAe9nF,WAAYD,EAAUlM,GAAKi0F,EAAgB,KAElE3gE,EAAWx1B,aAAY2gD,MALlB,EAOkCD,eAArCI,EAPG,EAOHA,gBAAiBD,EAPd,EAOcA,gBACnB0xC,EAAgB7H,GAAuBz5E,GACvC0jF,EAAqB,UAAMnzF,EAAN,qBAErBixF,EAAmBH,GAAoBl0E,EAAYhQ,EAAWE,EAAeikF,GAO7EQ,EAAgB3kF,EAAU9Q,IAAM,GAA0B,YAArBm1F,EACrC39E,EAAmBg+E,GAAoBC,EAAeN,GAEtD5xD,EAAe/gC,iBAAuB,MAEtCu2F,EAAuBj0F,uBAAY,YAIlC,IAHLuJ,EAGI,EAHJA,MAAOC,EAGH,EAHGA,OACPwjB,EAEI,EAFJA,SACAknE,EACI,EADJA,8BAEAT,EAAwB,CACtBlqF,QACAC,SACAwjB,WACAvjB,SAAUrK,EACV80F,oCAED,CAAC90F,EAAWq0F,IAGTU,EAAkBz2F,mBAElB02F,EAAqB12F,kBAAO,GAE5B22F,EAAc32F,kBAAO,GAErB42F,EAAwB52F,iBAAsB,MAE9C62F,EAAuB72F,iBAAO,GAE9B82F,EAAwB92F,iBAAO,GAE/B+2F,EAAsB/2F,mBAEtBiC,EAAWC,eACX80F,EAAmB92F,aAAY+2F,MAE/BC,EAAwB50F,uBAAY,WACxCo0F,EAAmBr2F,SAAU,EACzBo2F,EAAgBp2F,SAGlBo2F,EAAgBp2F,QAAQstE,cAAc,CAGpCvH,WAAY,OAKdnkE,EADE+0F,EACO1yF,eAEA2L,aAA2B,CAAE7N,GAAIV,OAE3C,CAACA,EAAWO,EAAU+0F,IAxEd,EH3UE,WAAO,IAAD,EACoBxD,cAAU,GAD9B,mBACZ2D,EADY,KACAC,EADA,KAGbn4F,EAAMe,iBAAO,MAgBnB,MAAO,CAACm3F,EAAYl4F,EAdG,SAACoyE,EAAW7uB,EAAGtgB,GAA8C,IAApCm1D,EAAmC,uDAAtBtD,GAC3D,GAAK1iB,GAKL,GAAIpyE,EAAIoB,QAAS,CACf+2F,GAAiB,GADF,MAED50C,EAAE6R,UAARh7D,EAFO,EAEPA,EAERg+F,EAAWp4F,EAAK5F,EAAG6oC,EAvBF,cAejBk1D,GAAiB,KG8Y0CE,GA1EpD,oBA0EJC,GA1EI,MA0EiBC,GA1EjB,MA0EgCC,GA1EhC,MA+ELlE,GAAWvzF,iBAAO,CACtBgJ,QACAsF,YACAnH,sBACAC,WACA4uF,gCAEAkB,wBACAjB,yBACAwB,oBACAlB,uBACAJ,YACA3F,eA3FS,GAgGP6C,GAAkBtyD,EAAcwyD,IAhGzB,qBA+FTmE,GA/FS,MA+FIC,GA/FJ,MA+FsBC,GA/FtB,MAkGXC,2BAAgB,WACdtE,GAASlzF,QAAQ2I,MAAQA,EACzBuqF,GAASlzF,QAAQiO,UAAYA,EAC7BilF,GAASlzF,QAAQ+G,SAAWA,EAC5BmsF,GAASlzF,QAAQ21F,8BAAgCA,EACjDzC,GAASlzF,QAAQ8G,oBAAsBA,EACvCosF,GAASlzF,QAAQ62F,sBAAwBA,EACzC3D,GAASlzF,QAAQ41F,uBAAyBA,EAC1C1C,GAASlzF,QAAQo3F,iBAAmBA,GACpClE,GAASlzF,QAAQk2F,qBAAuBA,EACxChD,GAASlzF,QAAQ81F,UAAYA,EAC7B5C,GAASlzF,QAAQmwF,WAAaA,IAC7B,CACDxnF,EACAsF,EACAnH,EACAC,EACA4uF,EACAkB,EACAjB,EACAwB,GACAlB,EACAJ,EACA3F,IAGF,IAAMwE,GAAmB90F,aAAY43F,MACrCD,2BAAgB,WACd,GAAI92D,GAAgBA,EAAa1gC,UAAYo2F,EAAgBp2F,UAAY0rF,EAAc,CACrF,IAAMgM,EAAuBnD,GAAyB,CACpDt2E,aACAhQ,YACAE,gBACAikF,gBACAJ,uBACAwC,wBACA5B,gBACA6B,gBACAC,YACAC,oBACAlH,eACA9sC,kBACAD,oBAGF21C,EAAmBr2F,SAAU,EAE7B,IAAM23F,EAAc,eACfD,EADe,CAMlB3xB,WAAY,CAACmtB,GAASlzF,QAAQ81F,UAAW5C,GAASlzF,QAAQmwF,YAE1D3sB,kBARkB,SAShBthE,EAAmB+iD,GAKnB,IAAM2yC,EAActB,EAAYt2F,QAC5B,KACAilD,EAGA2yC,IADoB1E,GAASlzF,QAAQ+G,UAEvC8uF,EAAY+B,IAIhB7zB,oBAxBkB,WA2BkB,OAA9BmvB,GAASlzF,QAAQ+G,UACnB8uF,EAAY,OAGhBzyB,aA/BkB,SA+BLvc,GAUX,GAAIwvC,EAAmBr2F,QAAS,CAC9Bq2F,EAAmBr2F,SAAU,EAC7B,IAAM63F,EAAShxC,EAAQmK,aACjBxlD,EAAQnS,KAAKgpB,MAAMw1E,EAAO,IAC1BpsF,EAASpS,KAAKgpB,MAAMw1E,EAAO,KA/cc,SAAC,GAAD,IAAGrsF,EAAH,EAAGA,MAAOC,EAAV,EAAUA,OAAQwC,EAAlB,EAAkBA,UAAlB,OACzDzC,GAAkC,IAAxByC,EAAUulF,aAAuB/nF,GAAkC,IAAvBwC,EAAU6pF,YAgdlDC,CAAyB,CAC3BvsF,QAAOC,SAAQwC,UAAWilF,GAASlzF,QAAQiO,aAE3CilF,GAASlzF,QAAQk2F,qBAAqB,CAAE1qF,QAAOC,aAIrDu4D,aAAc,SAACg0B,EAAiBC,GAC9B5B,EAAmBr2F,SAAU,EAC7BkzF,GAASlzF,QAAQk2F,qBAAqB,CAAE1qF,MAAOwsF,EAASvsF,OAAQwsF,KAGlEn0B,iBA3DkB,SA2DDqI,EAAkCrmB,EAAmB3D,GAGpE,GAFAo1C,GAAuBp1C,GAEnB+wC,GAASlzF,QAAQ2I,MAAO,CAAC,IACZuvF,EAAiBhF,GAASlzF,QAAjC2I,MAEFwvF,EAAgBh2C,EAAEqP,YAAgC,IAApB0mC,EAAa/lE,MAC3CimE,EAAYjE,GAAe+D,EAAaj2E,QAG9CwgD,uBAAsB,WACpB0J,EAAO1iB,UAAY2uC,EACnB,IAAMC,EAAmBlsB,EAAOmsB,YAChCnsB,EAAOmsB,YAAc,GACrBnsB,EAAOrC,SAASquB,EANQ,EAM2BryC,EAAK9hC,EAAG,EAAuB8hC,EAAK/X,GACvFo+B,EAAOmsB,YAAcD,KAGvBnF,GAASlzF,QAAQo3F,iBACflE,GAASlzF,QAAQ2I,MACjBw5C,EACAg2C,EAbwB,GAoB5B,GAAIjF,GAASlzF,QAAQ8G,oBAAqB,CAAC,IAAD,EACdosF,GAASlzF,QAAQ8G,oBAAnC0E,EADgC,EAChCA,MAAOC,EADyB,EACzBA,OAEf,GAAID,EAAQC,EAAQ,CAClB,IACM8sF,EAAap2C,EAAE0lB,YAAYr8D,GADI,IAE/BgtF,EAAWr2C,EAAE0lB,YAAYp8D,EAFM,IAI/B2B,EAAOmrF,EAAW,GAClBjrF,EAAQkrF,EAAS,GAGvBrsB,EAAO1iB,UAAYxpD,OAAOU,QAAQyJ,OAAOpK,QAAQogB,UACjD+rD,EAAOrC,SAAS18D,EAAM04C,EAAK9hC,EAAG1W,EAAQF,EAAM04C,EAAK/X,MAOvDggB,iBAAkB,CAChB6H,UADgB,SACN1zD,EAAmB2kD,EAAkBr8B,GAEzCtoB,EAAM2zD,QAA2B,IAAjB3zD,EAAM2zD,SAI1BwgC,EAAmBr2F,SAAU,EAC7Bs2F,EAAYt2F,SAAU,EACtBwqB,EAAQsrC,oBAAoB5zD,EAAO2kD,EAASr8B,GAI5CA,EAAQi/C,KAAKsF,MAA+BvkD,EAAQi/C,KAAKsF,MLhgB7DjkE,QAAO,SAACoF,GACZ,IAAMuoF,EAA8B7zE,OAAO1U,EAAQvN,MAAMyK,KAAKhH,QAAQ,KAAM,KAAO,IAInF,OAHIqyF,GACFvoF,EAAQyrC,WAAYh2B,YAAYzV,IAE1BuoF,KK6fA72F,EAAS2D,gBAELrD,EAAM2zD,QAA2B,IAAjB3zD,EAAM2zD,OAGpB3zD,EAAM8zD,UAERugC,EAAsBv2F,QAAU,KAEhCq1F,KAAQp/B,SAAS/zD,EAAO2kD,EAASr8B,IACxBtoB,EAAM6zD,QAAU7zD,EAAM6pF,SAAW7pF,EAAMw2F,SAEhDnC,EAAsBv2F,QAAU6mD,EAAQ4K,aAAavvD,EAAMmoE,SAE3DgrB,KAAQziC,UAAU1wD,EAAO2kD,EAASr8B,KAGlC+rE,EAAsBv2F,QAAU,KAEhCq1F,KAAQziC,UAAU1wD,EAAO2kD,EAASr8B,IAE3BtoB,EAAM8zD,UAEfugC,EAAsBv2F,QAAU,KAEhCq1F,KAAQziC,UAAU1wD,EAAO2kD,EAASr8B,IACzBtoB,EAAM6zD,QAAU7zD,EAAM6pF,SAAW7pF,EAAMw2F,SAEhDnC,EAAsBv2F,QAAU6mD,EAAQ4K,aAAavvD,EAAMmoE,SAE3DgrB,KAAQziC,UAAU1wD,EAAO2kD,EAASr8B,KAGlC+rE,EAAsBv2F,QAAU,KAEhCq1F,KAAQp/B,SAAS/zD,EAAO2kD,EAASr8B,MAIrCqsC,UAtDgB,SAsDN30D,EAAmB2kD,EAAkBr8B,GAGP,OAAlC+rE,EAAsBv2F,SAExBq2F,EAAmBr2F,SAAU,EAE7Bq1F,KAAQtiC,SAAS7wD,EAAO2kD,EAASr8B,GACjCtoB,EAAMC,kBACGqoB,EAAQumC,WACjBslC,EAAmBr2F,SAAU,EAE7BwqB,EAAQwnC,SAAU,EAElBqjC,KAAQn/B,QAAQh0D,EAAO2kD,EAASr8B,IACvBA,EAAQqoC,WAEjBwiC,KAAQtiC,SAAS7wD,EAAO2kD,EAASr8B,IAIrCmsC,QA3EgB,SA2ERz0D,EAAmB2kD,EAAkBr8B,GAE3C,GADA8rE,EAAYt2F,SAAU,EACgB,OAAlCu2F,EAAsBv2F,QAAkB,CAC1C,IAAM24F,EAAc5Y,cAAO,SAAC/mF,GAAD,OAAQA,IAAG,CACpCu9F,EAAsBv2F,QACtB6mD,EAAQ4K,aAAavvD,EAAMmoE,WAG7B6oB,GAASlzF,QAAQ41F,uBAAuB,CACtCpqF,MAAOmtF,EAAY,GACnBltF,OAAQktF,EAAY,GACpBjtF,SAAUuC,EAAUlM,KAEtBw0F,EAAsBv2F,QAAU,KAEhCwqB,EAAQqoC,WAAY,EAKpBhM,EAAQkN,iBAKRlN,EAAQ4L,YAAW,QACVjoC,EAAQumC,WACjBslC,EAAmBr2F,SAAU,EAE7Bq1F,KAAQ3iC,OAAOxwD,EAAO2kD,EAASr8B,GAC/B0oE,GAASlzF,QAAQ21F,iCACRnrE,EAAQqoC,YACjBwjC,EAAmBr2F,SAAU,EAE7Bq1F,KAAQvhC,QAAQ5xD,EAAO2kD,EAASr8B,GAChC0oE,GAASlzF,QAAQ21F,kCAIrBiD,MAlHgB,SAkHV12F,EAAmB2kD,GACvB,GAAK3kD,EAAM8zD,UAAa9zD,EAAM6zD,OAA9B,CAEAsgC,EAAmBr2F,SAAU,EAC7BkC,EAAMC,iBACND,EAAMuB,kBAGN,IAoCMo1F,EACwB,kBAArB32F,EAAM42F,YAA4Bl0E,OAAOqoB,MAAM/qC,EAAM42F,aAExC,IAAhB52F,EAAM62F,OADN72F,EAAM42F,WAAa,GAInBluD,GADS1oC,EAAM82F,QAAyB,EAAhB92F,EAAM82F,OAAcH,GACtB,GAEvB32F,EAAMmoE,UAASnoE,EAAMmoE,QAAUnoE,EAAM+2F,OAAS/2F,EAAMivB,OAAO+nE,YAChE,IAAMC,EAvBqB,SAACh3C,EAAGkoB,GAAY,MAEfloB,EAAE0lB,YAAY1lB,EAAE6O,aAAa,GAAI,MAApDooC,EAFkC,oBAKnCpgG,EAAIqxE,EAAU+uB,EAGdtrD,EAAIqU,EAAE0lB,YAAY1lB,EAAE6O,aAAa,GAAI,MAAM,GAAKooC,EAGtD,OAAa,IAANtrD,EAAU,EAAI90C,EAAI80C,EAYdurD,CAAmBxyC,EAAS3kD,EAAMmoE,UA7ClC,SAACloB,EAAGm3C,EAAkBC,GACjCA,EAAOA,GAAQ,GAD2B,MAEVp3C,EAAE6O,aAFQ,mBAEnCwoC,EAFmC,KAExBC,EAFwB,KAIpCC,GADQD,EAAaD,GACDF,EAGpB9tF,EAAQguF,EAF6BE,EAAYH,EAGjD9tF,EAASguF,EAH8CC,GAAa,EAAIH,GAK9ErG,GAASlzF,QAAQk2F,qBAAqB,CACpC1qF,QACAC,SACA0qF,+BAA+B,EAC/BlnE,SAAU,SAAC0qE,EAAsBC,GAC/B/yC,EAAQymB,cAAc,CACpBvH,WAAY,CAAC4zB,EAAcC,QA+BnCC,CAAKhzC,EAASjc,EAAYuuD,KAG5BtzE,MA5KgB,SA4KV3jB,GACJA,EAAMC,kBAGRs0D,SAhLgB,WAiLd70D,EAAS4D,aAAuB,CAAEuG,WAAW,KAC7CmnF,GAASlzF,QAAQ62F,yBAGnBvgC,WArLgB,SAqLLp0D,EAAmB2kD,EAAkBr8B,GAC9C8rE,EAAYt2F,SAAU,EACtBq2F,EAAmBr2F,SAAU,EAK7Bq1F,KAAQvnB,wBAAwBxX,WAAWp0D,EAAO2kD,EAASr8B,GAK3DA,EAAQoqC,gBAAkB,CAAE57D,GAAG,EAAMgrB,GAAG,GAExCwyE,EAAqBx2F,QAAU,EAEO,kBAA3BkC,EAAMI,QAAQ,GAAGgyD,MAC1BmiC,EAAsBz2F,QAAUkC,EAAMI,QAAQ,GAAGgyD,MAEjDmiC,EAAsBz2F,QAAU,GAGpCu2D,UA3MgB,SA2MNr0D,EAAmB2kD,EAAkBr8B,GAC7C6rE,EAAmBr2F,SAAU,EAC7Bq1F,KAAQvnB,wBAAwBvX,UAAUr0D,EAAO2kD,EAASr8B,GAE1DgsE,EAAqBx2F,QAAU2hB,KAAKC,OAGtC40C,SAlNgB,SAkNPt0D,EAAmB2kD,EAAkBr8B,GAM5C,GALA8rE,EAAYt2F,SAAU,EACtBq2F,EAAmBr2F,SAAU,EAC7Bq1F,KAAQvnB,wBAAwBtX,SAASt0D,EAAO2kD,EAASr8B,GAGpB,IAAjCgsE,EAAqBx2F,SAAmD,IAAlCy2F,EAAsBz2F,SAC3D0gC,EAAa1gC,QAChB,CACAq2F,EAAmBr2F,SAAU,EAI7B,IAAM85F,EAAiBjzC,EAAQyK,SACzByoC,GAAOtD,EAAsBz2F,SACjC85F,EAAeh0C,KAAK9sD,EAAI0nC,EAAa1gC,QAAQ+oC,wBAAwB37B,OAClE0sF,EAAeh0C,KAAKhY,EAEjB9tC,EAAYkzF,GAAZlzF,QACF5B,EAAI/E,KAAKgpB,MAAMriB,EAAQ81F,WACxB91F,EAAQmwF,WAAanwF,EAAQ81F,WAAaiE,GAE/ClE,EAAYz3F,GAAG,GAIjB,IAAMwjB,EAAMD,KAAKC,MAC0B,qBAAhC80E,EAAoB12F,UACQ,IAAjCw2F,EAAqBx2F,SACZ4hB,EAAM80E,EAAoB12F,SAC3BC,OAAOU,QAAQC,QAAQZ,QAAQg6F,oBACvC9G,GAASlzF,QAAQ62F,yBAMvBH,EAAoB12F,QAAU4hB,EAC9BsxE,GAASlzF,QAAQ21F,oCAKjB5sF,EAAO6pF,EACTb,GAAsB9jF,EAAU2a,OAAO7f,KAAMipF,GAC7C/jF,EAAU2a,OAAO7f,KACfkxF,EAAW,IAAI5E,KAAS30D,EAAa1gC,QAAU+I,EAAM4uF,GAC3DvB,EAAgBp2F,QAAUi6F,KAE3B,CAACh8E,EAAYhQ,EAAWE,EAAeikF,EAAe/wF,EAAW2wF,EAClEtG,EAAc8I,EAAuB5B,EACrC6B,EAAeoB,EAAanB,EAAWC,GAAkBlH,EACzD9sC,EAAiBD,EAAiB62C,GAAwB31F,IAE5Ds4F,cAAgB,WACd,GAAI9D,EAAgBp2F,QAAS,CAC3B,IAAM40F,EAA0C,cAA5B32E,EAAWlL,aACzBxR,EAAiD,WAA9B0c,EAAW9M,eACpCilF,EAAgBp2F,QAAQstE,cAAc,CACpCynB,OAASH,GAAerzF,OAAoB7D,EAAY+vF,OAG3D,CAACxvE,EAAYwvE,IAIhByM,cAAgB,WACV9D,EAAgBp2F,SAClBo2F,EAAgBp2F,QAAQstE,cAAc,MAGvC,CAAC3kE,EAAO7B,IAEX,IAAMM,GAAkCvH,aAAYs6F,MACpDD,cAAgB,WACV9D,EAAgBp2F,SAGlBC,OAAOwiE,uBAAsB,WACvB2zB,EAAgBp2F,SACjBo2F,EAAgBp2F,QAA2ByN,cAIjD,CAACrG,KAIJ8yF,cAAgB,WAEd,GAAI9D,EAAgBp2F,UAAY0rF,EAAc,CAE5C,IAAM0O,EAAkB,CAACtE,EAAW3F,GAS9Bn/B,EAAaolC,EAAgBp2F,QAAQgxD,aAErCqpC,EAAqBhhG,KAAKC,IAAK62F,EAAa2F,GAAc9kC,EAAW,GAAKA,EAAW,KA50BtE,IA+0BfspC,EAAuCnK,GAAc,GACvDn/B,EAAW,GAAKm/B,GAEhBn/B,EAAW,GAAK8kC,IAChBuE,EAEEE,EAAqBjsF,IAAyBgsF,EAChD,CAAEv0B,WAAYq0B,GACd,GAxBwC,EA0BFn8E,EAAlChL,qBA1BoC,MA0BpBwhF,EA1BoB,EA2BtC16D,EAAO64D,EACTb,GAAsB9jF,EAAU2a,OAAO7f,KAAMipF,GAC7C/jF,EAAU2a,OAAO7f,KAEf+rF,EAA8C,IAAhC9C,EAAqBvoF,QACvCuoF,EAAqBlnF,QAAO,SAAA9R,GAAC,OAAU,IAANA,KAAYyQ,OAAS,EAExD2sF,EAAgBp2F,QAAQstE,cAAxB,eACKitB,EADL,CAEElwF,OAAQuoF,EAAgBH,GAAgB12B,YAAQ9oD,IAAkBA,EAClE8mB,OACAxoB,OAAQtD,EAAU2a,OAAOrX,OACzBu7C,UAAWn4C,GACc,YAArB29E,EAAiC,CAAEwC,eAAgB,GANzD,CAOE9nC,aAAmC,YAArBslC,IAAmCM,EAEjD5oC,WAAY4oC,EAAgB72B,YAAQi2B,GAAwBA,QAG/D,CAAC/zE,EAAYhQ,EAAU2a,OAAQvnB,EAAW2wF,EAAsBM,EACjE39E,EAAkB+2E,EAAckH,EAAetkF,EAAsBmmF,EACrEqB,EAAW3F,IAEb+J,cAAgB,WACd,GAAK9D,EAAgBp2F,QAArB,CAIA,IAAM03F,EAAuBnD,GAAyB,CACpDt2E,aACAhQ,YACAE,gBACAikF,gBACAJ,uBACAwC,wBACA5B,gBACA6B,gBACAC,YACAC,oBACAlH,eACA9sC,kBACAD,oBAEGgrC,GAAc0K,EAAgBp2F,QAAQstE,cAAcoqB,MACxD,CAACpF,EAAkBj9D,IAGtB,IAAM5uB,GAA2B5G,aAAY26F,MAC7ChD,2BAAgB,WACd,GAAIpB,EAAgBp2F,SAAWyG,KAA6BpF,EAAW,CACrE,IAAoB,IAAhB2uF,EAKF,aAHgD,IAA5CoG,EAAgBp2F,QAAQkwD,gBAC1BkmC,EAAgBp2F,QAAQmqE,kBAI5BisB,EAAgBp2F,QAAQiwD,aAAa+/B,MAEtC,CAAC/hF,EAAW5M,EAAWoF,GAA0BupF,EAClD8F,EAAW3F,IAIb,IAAMzuF,GAAe7B,cACnB,SAAC0K,GAAD,OAAsBm0C,aAAmBn0C,EAAO,CAAExI,GAAIV,OAExDm2F,2BAAgB,WACVpB,EAAgBp2F,SACjBo2F,EAAgBp2F,QAA2ByN,WAE7C,CAAC/L,GAAcuM,EAAU4hF,gBAAgBpmF,SAG5C,IAAMgxF,GAAiB56F,cAAY,SAAC0K,GAAD,OACjC0T,EAAW1X,UACPm0F,aAAgBnwF,EAAO0T,EAAW1X,gBAClC7I,KAEAi9F,GAAiB96F,cAAY,SAAC0K,GAAD,OACjC0T,EAAWzX,UACPo0F,aAAgBrwF,EAAO0T,EAAWzX,gBAClC9I,KAGN85F,2BAAgB,WAAO,IACFtsF,EAA0C+S,EAArD1X,UAAoC+E,EAAiB2S,EAA5BzX,UAEjC,GACE4vF,EAAgBp2F,UACZkL,GAAgBI,GACpB,CACA,IAAMq8D,EAAYyuB,EAAgBp2F,QAA2BwnE,gBAAgB,GAD7E,cAEiCG,EAFjC,GAEOsqB,EAFP,KAEmBC,EAFnB,OAMIj0E,EADFnL,yBALF,MAKsB,CAAC,KAAM,MAL7B,EAQM+nF,EAA8C,OAAzB/nF,EAAkB,GACvCgoF,EAA8C,OAAzBhoF,EAAkB,GAGzCioF,GAAe,EACfhvE,EAAoB,YAAO47C,GAiB/B,GAbIz8D,GAAgB2vF,GACdJ,IAAkBA,GAAervF,eAAiB6mF,IACpDlmE,EAAW,GAAK0uE,GAAervF,eAC/B2vF,GAAe,GAGfzvF,GAAgBwvF,GACdH,IAAkBA,GAAevvF,eAAiB8mF,IACpDnmE,EAAW,GAAK4uE,GAAevvF,eAC/B2vF,GAAe,GAIfA,EAAc,CAChB3E,EAAgBp2F,QAAQstE,cAAc,CAAEvhD,eACxC,IAAMivE,EAAe5E,EAAgBp2F,QAA2BwnE,gBAAgB,GAEhFz7C,EAAU,YAAOivE,GAIf9vF,GAAgB2vF,GACd9uE,EAAW,MAAX,OAAkB0uE,SAAlB,IAAkBA,QAAlB,EAAkBA,GAAgBtvF,OAAO9J,KAE7CO,EAASiC,aAAmB,CAAExC,YAAW6J,eAAclP,MAAO+vB,EAAW,MAEvEzgB,GAAgBwvF,GACd/uE,EAAW,MAAX,OAAkB4uE,SAAlB,IAAkBA,QAAlB,EAAkBA,GAAgBxvF,OAAO9J,KAE7CO,EAASkC,aAAmB,CAAEzC,YAAWiK,eAActP,MAAO+vB,EAAW,SAG5E,CAAC9N,EAAYhQ,EAAU2a,OAAQvnB,EAAWo5F,GAAgBE,GAAgB/4F,IAE7E41F,2BAAgB,WACVH,IAAejB,EAAgBp2F,SACjCu3F,GAAuBnB,EAAgBp2F,WAGxC,CAACq3F,KAEJ4D,cAAW,WACL7E,EAAgBp2F,SAClBo2F,EAAgBp2F,QAAQ+9B,aAO5B,IAAMm9D,GAAiBv7F,mBACvBw7F,cAAS,WACP,GAAKl9E,EAAWrL,aAAhB,CAIA,IAAIwoF,GAAyB,EACvBC,EC5jCc,SAAoC/hD,EAASgiD,GACnE,IAAI7kB,EAAgD,KAUpD,OARkB,WAA6B,IAAD,uBAAxB8kB,EAAwB,yBAAxBA,EAAwB,gBAC5B,OAAZ9kB,IACFn0B,aAAam0B,GACbA,EAAU,MAEZA,EAAU/wD,YAAW,kBAAM4zB,EAAI,WAAJ,EAAQiiD,KAAOD,IDojChBE,EAAS,WAC5BJ,EAKDhF,EAAgBp2F,SACjBo2F,EAAgBp2F,QAA2ByN,SAL5C2tF,GAAyB,IAhgCC,KAygC9BF,GAAel7F,QAAU,IAAIy7F,MAAe,WAC1CJ,OAEFH,GAAel7F,QAAQ07F,QAAQh7D,EAAa1gC,aAG9Ci7F,cAAW,WACT7E,EAAgBp2F,QAAU,KACtBk7F,GAAel7F,SACjBk7F,GAAel7F,QAAQ27F,gBAI3B,IAAMp6F,GAAiD,WAA9B0c,EAAW9M,eAEpC,OACE,oCACE,yBACEvS,IAAK8hC,EACL3+B,GAAI0zF,EACJ32F,UAAWG,KACTu2F,EACA,CAAE,+BAAgCj0F,OAGrC81F,IAAe1M,GACd,kBAAC,GAAD,CAA0B/rF,IAAK04F,MAE3B,OAAL3uF,QAAK,IAALA,OAAA,EAAAA,EAAO3M,QAAS2uF,GAEf,kBAAC,GAAD,CACE3Z,UAAWkmB,GACXt4F,IAAKu4F,GACLl1E,OAAQtZ,EAAMsZ,OACdwlB,MAAO9+B,EAAM3M,QAGjB,yBAAK8C,UAAU,+BAA+BiD,GAAIyyF,M,+BEthC3CoH,GAAe,SAAC,GAaf,IAZZ39E,EAYW,EAZXA,WACAhQ,EAWW,EAXXA,UACAE,EAUW,EAVXA,cACAqnF,EASW,EATXA,sBACAC,EAQW,EARXA,eACAoG,EAOW,EAPXA,WACA7L,EAMW,EANXA,WACAlC,EAKW,EALXA,kBACA2G,EAIW,EAJXA,cACAC,EAGW,EAHXA,UACApE,EAEW,EAFXA,cACA7C,EACW,EADXA,aAEM/sD,EAAe/gC,iBAAuB,MADjC,EAE+B6B,qBAF/B,mBAEJs6F,EAFI,KAEWC,EAFX,KAILC,GAA6B,IAAhBhM,EACf,EACC/hF,EAAU2a,OAAOnf,OAAS,EAAIumF,EAC7Bh0F,EAAQs0F,EAAgB,KAAOriF,EAAU2a,OAAOozE,GAP3C,EAaP/9E,EAFF/H,qBAAsB/Y,OAXb,MAWmB8Q,EAAU9Q,IAX7B,IAaP8gB,EADF9H,qBAAsBzZ,OAZb,MAYmBuR,EAAUvR,IAZ7B,EAiBLu/F,EAAa3+C,aACjBtyC,cAAI,SAAChS,GAAD,OAAgBA,KACpB+mF,aAAOpjF,OACP,mCAAEu/F,EAAF,KAAQC,EAAR,WAA4B,CAAC9iG,KAAK8D,IAAI++F,EAAMlgG,GAAS,GAAI3C,KAAKqD,IAAIy/F,EAAMngG,GAAS,MAHhEshD,CAIjB,CAACngD,EAAKT,IAERqD,qBAAU,WACR20F,EAAUuH,KAET,CAACA,IAEJ,IAAM18D,EA/GqD,SAAC,GAIvD,IAWDA,EAXA,IAHJvjC,aAGI,MAHI,EAGJ,MAHOmB,WAGP,MAHa,EAGb,MAHgBT,WAGhB,MAHsB,EAGtB,EAgCJ,OAhCI,EAFJ0/F,eAMsBj/F,EAAM,IAC1BA,EAAM,IALJ,EADJk/F,eAQsB3/F,EAAM,IAC1BA,EAAM,GAKJS,EAAM,GAAKT,EAAM,GAGnBA,GAAQS,EAAMT,GAAQS,EAAMT,EAC5B6iC,EAAQlmC,KAAKgpB,MAAe,IAARrmB,EAAeU,IAC1BV,GAAS,GAAKmB,GAAO,GAAKT,GAAO,EAG5B,KADd6iC,EAAQlmC,KAAKgpB,MAAuB,KAAfrmB,EAAQmB,IAAeT,EAAMS,OAEhDoiC,EAAQ,IAKI,KADdA,EAAQlmC,KAAKgpB,MAAuB,KAAfrmB,EAAQU,IAAeA,EAAMS,OAEhDoiC,GAAS,IAINA,EA2EO+8D,CAA0B,CACtCtgG,MAAOs0F,EAAgB,EAAKt0F,EAC5BmB,IAAK8+F,EAAW,GAChBv/F,IAAKu/F,EAAW,GAChBG,mBAAmD1+F,IAApCugB,EAAW/H,qBAC1BmmF,mBAAmD3+F,IAApCugB,EAAW9H,uBAG5BpW,qBAAU,WACR,GAAI2gC,EAAa1gC,UAAY87F,EAAe,CAC1C,IAAM3zC,EAASjL,aAAK,CAClB,CAAC,SAACgF,GAAD,OAAOA,EAAI,GAAGlsB,aAAO,IACtB,CAACmnB,KAAGxgD,OAFSugD,CAGZ7jD,KAAKG,MAAMqiG,EAAa,KAJe,EAiBtC59E,EAVF5H,8BAPwC,MAOfpW,OAAOU,QAAQyJ,OAAOpK,QAAQu8F,mBAPf,IAiBtCt+E,EATF3H,8BARwC,MAQfrW,OAAOU,QAAQyJ,OAAOpK,QAAQw8F,mBARf,IAiBtCv+E,EARF1H,+BATwC,MASd,EATc,IAiBtC0H,EAPFzH,2BAVwC,MAUlB,QAVkB,IAiBtCyH,EANFxH,6BAXwC,MAWhB0xC,EAXgB,EAYxCzxC,EAKEuH,EALFvH,uBAZwC,EAiBtCuH,EAJFtH,wBAbwC,MAarBklF,EAbqB,IAiBtC59E,EAHFrH,0BAdwC,MAcnB,EAdmB,IAiBtCqH,EAFFpH,2BAfwC,MAelB,CAAEqS,SAAU,IAAK8O,SAAS,GAfR,EAgBxClhB,EACEmH,EADFnH,mBAGI2lF,EAAmB,IAAIC,KAAQh8D,EAAa1gC,QAAS,CACzD28F,SAAUlI,EAAc,GACxBmI,WAAYvmF,EACZwmF,WAAYvmF,EACZwmF,YAAavmF,EACbwmF,QAASvmF,EACTgxC,UAAW/wC,EACXumF,WAAYtmF,EACZqoB,KAAMpoB,EACNo7B,OAAQn7B,EACRsT,QAASrT,EACTomF,OAAQnmF,IAEVilF,EAAiBU,MAElB,CAACx+E,EAAYhQ,EAAW6tF,EAAeD,EAAYpH,IAGtD10F,qBAAU,WACR,GAAI+7F,EAAe,CACjB,IAAMoB,GAAqC,IAAhBlN,IAAsBM,EAE7C4M,IAAuBpB,EAAcl7F,QAAQspB,QAAQ8N,QACvD8jE,EAAcqB,mBACJD,GAAsBpB,EAAcl7F,QAAQspB,QAAQ8N,SAC9D8jE,EAAcsB,mBAGhB13E,YAAW,WAETo2E,EAAcj3D,OAAOtF,KACpB,MAEJ,CAACu8D,EAAe9L,EAAYzwD,EAAO+wD,IAEtC,IAAM+M,EAA8B,EAAbxB,EAAkB,EAAI,EACvCyB,EAAWjkG,KAAKgpB,OAAOw5E,EAAawB,EAAiBxB,EAAa,IAAO,GAEzE0B,EAAgBlkG,KAAKgpB,MAAuB,IAAhBg7E,EAAuB,GACnDG,EAAWnkG,KAAKgpB,MAAMi7E,EAA4B,EAAhBC,EAAsB1B,EAAa,IAErE4B,EAAepkG,KAAKgpB,MAAsB,GAAhBk7E,GAC1BG,EAAUrkG,KAAKgpB,MAAMi7E,GAAYD,EAAgBI,GAAiB5B,EAAa,IAGrF,OACE,yBAAKj9F,IAAK8hC,EAAc3+B,GAAI0zF,EAAgB32F,UAAW02F,GACrD,0BACE12F,UAAU,oBACV6D,MAAO,CACL+mC,SAAU2zD,EACVjzE,IAAKkzE,IAGNxP,EAAkB9xF,IAErB,0BACE8C,UAAU,oBACV6D,MAAO,CACL+mC,SAAU6zD,EACVnzE,IAAKozE,IAGNv/E,EAAWjd,OAASmN,EAAcnN,OAErC,0BACElC,UAAU,oBACV6D,MAAO,CACL+mC,SAAU+zD,EACVrzE,IAAKszE,IAGNjQ,K,UCxNHkQ,GAAc,SAAC3kG,GAAD,MACL,kBAANA,GAqCI4kG,GAAa,SAAC,GAeb,IAdZ3/E,EAcW,EAdXA,WACAhQ,EAaW,EAbXA,UACAE,EAYW,EAZXA,cACAqnF,EAWW,EAXXA,sBACAC,EAUW,EAVXA,eACAp0F,EASW,EATXA,UACA8sB,EAQW,EARXA,YACA0tE,EAOW,EAPXA,WACA7L,EAMW,EANXA,WACAlC,EAKW,EALXA,kBACA2G,EAIW,EAJXA,cACAC,EAGW,EAHXA,UACApE,EAEW,EAFXA,cACA7C,EACW,EADXA,aAEMoQ,EAAqBl+F,iBAA0B,MAD1C,EAE+B6B,qBAF/B,mBAEJs6F,EAFI,KAEWC,EAFX,KAILC,GAA6B,IAAhBhM,EACf,EACC/hF,EAAU2a,OAAOnf,OAAS,EAAIumF,EAC7Bh0F,EAAQiS,EAAU2a,OAAOozE,GAId8B,EAEb7/E,EAFFlH,cACegnF,EACb9/E,EADFjH,cAGI7Z,EAAMwgG,GAAYG,GAAgBA,EAAe7vF,EAAU9Q,IAC3DT,EAAMihG,GAAYI,GAAgBA,EAAe9vF,EAAUvR,IAhBtD,EAsBgB4gD,aAEzBtyC,cAAI,SAAChS,GAAD,OAAgBA,MAGpB,mCAAEkjG,EAAF,KAAQC,EAAR,WAA4B,EACxBwB,GAAYG,IAAiB5B,EAAO,EAAK,EAAIA,GAC7CyB,GAAYI,IAAiB5B,EAAO,EAAK,EAAIA,KAGjDpc,aAAOpjF,OACP,mCAAEu/F,EAAF,KAAQC,EAAR,WAA4B,CAAC9iG,KAAK8D,IAAI++F,EAAMlgG,GAAQ3C,KAAKqD,IAAIy/F,EAAMngG,MAX1CshD,CAYzB,CAACngD,EAAKT,IAlCG,mBAsBJshG,EAtBI,KAsBKC,EAtBL,KAqCXl+F,qBAAU,WACR20F,EAAU,CAACsJ,EAASC,MAEnB,CAACD,EAASC,IAEb,IAAM1+D,EAAQ+d,aACZtnB,aAA4B,KAAnBh6B,EAAQgiG,IAAmBC,EAAUD,KAG9C,SAACE,GAAD,OAAoB7kG,KAAKqD,IAAI,KAAOwhG,MACpC,SAACA,GAAD,OAAoB7kG,KAAK8D,IAAI,OAAQ+gG,KALzB5gD,GAQdv9C,qBAAU,WACR,GAAI89F,EAAmB79F,UAAY87F,EAAe,CAAC,IAAD,EAO5C79E,EALFhH,yBAF8C,MAE1BhX,OAAOU,QAAQyJ,OAAOpK,QAAQm+F,cAFJ,IAO5ClgF,EAJF/G,wBAH8C,MAG3BjX,OAAOU,QAAQyJ,OAAOpK,QAAQo+F,aAHH,IAO5CngF,EAHF9G,uBAJ8C,MAI5Bs9E,EAAc,GAJc,EAK9Cr9E,EAEE6G,EAFF7G,eAL8C,EAO5C6G,EADF5G,sBAGIzW,EAAU,CACdkqC,MAAO,GACPiI,MAAO,IACPyU,UAAW,IACX62C,YAAa,EACbC,QAAS,CACP70F,OAAQ,IACR6+C,YAAa,KACb7e,MAAOxyB,GAKTsnF,UAAU,EACVC,UAAU,EACVC,WAAYtnF,EACZunF,UAAWtnF,EACXunF,YAAaznF,EACb0nF,kBAA6C,UA3BC,UA4B9CC,aAAc,EACdC,gBAAgB,GAGZrC,EAAmB,IAAIsC,SAAMlB,EAAmB79F,SAASg/F,WAAWp+F,GAG1E67F,EAAiBwC,SAAW,EAC5BxC,EAAiByC,SAAW,IAE5BnD,EAAiBU,MAElB,CAACx+E,EAAYhQ,EAAW6tF,EAAeD,EAAYpH,IAGtD10F,qBAAU,WACR,GAAI+7F,EAAe,CAEjB,IAEM1wD,GAFqC,IAAhB4kD,IAAsBM,EAEd,GAAK,IACxCwL,EAAcqD,eAAiB/zD,EAC/B1lB,YAAW,WACTo2E,EAAcsD,IAAI9O,EAAgB,EAAI/wD,KACrC,MAEJ,CAACu8D,EAAe3tE,EAAa0tE,EAAY7L,EAAYzwD,EAAO+wD,IAE/D,IAAM+M,EAAgBhkG,KAAKG,MAAM20B,EAAc,GACzCkxE,EAAWhmG,KAAKgpB,OAAO8L,EAAckvE,GAAiB,KAEtDE,EAAgBlkG,KAAKgpB,MAAMg7E,EAAgB,KAG3CI,EAAepkG,KAAKgpB,MAAsB,GAAhBk7E,GAE1B+B,EAAiBjmG,KAAKgpB,MAAsB,IAAhBg7E,GAClC,OACE,yBACEt7F,GAAI0zF,EACJ32F,UAAW02F,GAEX,4BACE52F,IAAKi/F,EACL/+F,UAAU,aACViD,GAAE,gBAAWV,EAAX,WACFsB,MAAO,CACLsO,MAAO4qF,EACPj5F,OAAQurB,KAGZ,0BACErvB,UAAU,kBACV6D,MAAO,CACL+mC,SAAU2zD,EACVjzE,IAAKi1E,IAGNvR,EAAkBwC,EAAgB,KAAOt0F,IAE5C,0BACE8C,UAAU,kBACV6D,MAAO,CACL+mC,SAAU6zD,EACVnzE,IAhCS,IAmCVnM,EAAWjd,OAASmN,EAAcnN,OAErC,0BACElC,UAAU,kBACV6D,MAAO,CACL+mC,SAAU+zD,IAGXhQ,GAEH,0BAAM3uF,UAAU,gBAAgB6D,MAAO,CAAE+mC,SAAU41D,IAChDxR,EAAkBwC,EAAgB,KAAO0N,IAE5C,0BAAMl/F,UAAU,gBAAgB6D,MAAO,CAAE+mC,SAAU41D,IAChDxR,EAAkBwC,EAAgB,KAAO2N,M,uCC7NrCsB,GAA6BjiD,aACxC7+C,KACA2H,aAAQ,cAAe,IACvB82C,aAAK,CACH,CAAC,SAACrlB,GAAD,OAASA,EAAIpuB,OAAS,GAAG,SAACouB,GAAD,OAASA,EAAI,GAAKA,EAAI,GAAKA,EAAI,GAAKA,EAAI,GAAKA,EAAI,GAAKA,EAAI,KACpF,CAACslB,KAAGxgD,SAIK6iG,GAAiB,SAACnxD,GAAkC,IAArBC,EAAoB,uDAAN,EAClDmxD,EAAgBF,GAAalxD,GAG7BuiD,EAAMtzC,cAEV,SAACzlB,GAAD,OAAiB6nE,aAAW,EAAG7nE,KAC/B7sB,aACEsyC,cACE,SAACzlB,GAAD,OAAiBtW,SAASsW,EAAK,OAC/B,SAAC8nE,GAAD,OAAQtmG,KAAKgpB,MACXhpB,KAAK8D,IACH9D,KAAKqD,IAAI,EAAGijG,EAAMA,EAAKrxD,GACvB,MAEF7vC,SAAS,OACX,SAACo5B,GAAD,MAAS,YAAKA,GAAMsM,OAAOtM,EAAIpuB,aAGnC,SAACzQ,GAAD,OAAOA,EAAEyoD,KAAK,MAfJnE,CAgBVmiD,GACF,MAAM,IAAN,OAAW7O,ICrBPgP,GAAqB,SAACC,GAC1B,OAAIA,EAAS,EACJA,GAEF,IAAIl+E,MAAO06D,UAAYwjB,GAkDnBC,GAAiB,SAAC,GAWjB,IAVZ7hF,EAUW,EAVXA,WACA7c,EASW,EATXA,sBACA6M,EAQW,EARXA,UACAE,EAOW,EAPXA,cACAqnF,EAMW,EANXA,sBACAC,EAKW,EALXA,eACAhB,EAIW,EAJXA,cACAhH,EAGW,EAHXA,aACAsS,EAEW,EAFXA,wBACAC,EACW,EADXA,yBAEMt/D,EAAe/gC,iBAAuB,MADjC,EAI+B6B,qBAJ/B,mBAIJy+F,EAJI,KAIWC,EAJX,KAKLC,EAAmBxgG,mBALd,EAOmDse,EAAW7L,gBA7DtC,SACnCnE,EACAM,GAEA,IAAM6xF,EAAiBR,GAAmBrxF,EAAU,IAC9C8xF,EAAkBT,GAAmBrxF,EAAU,IAC/C/C,EAAQyC,EAAUzC,MAAQm9B,KAG1B23D,EAFSryF,EAAUxC,OAASk9B,KAEDn9B,EAC3B+0F,EAAoBF,EAAkBD,EAI5C,GAAIE,EAAkBC,EADY,KAEhC,MAAO,GAGT,IAAMC,EAAaF,EAAkBC,EAKrC,MAAO,CACLE,sBAHyB,WAAcj1F,EAAQ40F,IADzBC,EAAkBD,GACT,IAAN,KAIzBI,cAqCEE,CAA6BzyF,EAAW,CAAC8xF,EAAyBC,IAClE,GATO,IAOHS,6BAPG,WAOqB/iG,EAPrB,MAOgC8iG,kBAPhC,MAO6C,EAP7C,EAwBXzgG,qBAAU,WAAO,IAAD,EACoCke,EAA1C1G,0BADM,MACek9E,EAAc,GAD7B,EAERkM,EACyB,SAA7BxyF,EAAcsiF,WACVxwF,OAAOU,QAAQyJ,OAAOpK,QAAQ2mC,WAC9B64D,GAAejoF,EAAoBtX,OAAOU,QAAQoQ,cAAc6vF,gBAChEC,EAAa5iF,EAAWjd,OAASmN,EAAcnN,MAE/C8/F,EAAuB,SAAC9nG,GAAD,MAAkC,YAANA,EAAkB,GAAKA,GARlE,EAmBVilB,EARF3G,qBAXY,MAWI,OAXJ,IAmBV2G,EAPFzG,0BAZY,MAYSmpF,EAZT,IAmBV1iF,EANFjF,mCAbY,WAmBViF,EALFhF,gCAdY,WAmBVgF,EAJF9E,iCAfY,WAmBV8E,EAHF7E,iCAhBY,MAgBgB,IAhBhB,IAmBV6E,EAFFvE,8BAjBY,iBAiBiB+zE,GAjBjB,IAmBVxvE,EADFlE,gCAlBY,MAkBe,SAACkK,GAAD,OAAeA,EAAElqB,QAAQ,IAlBxC,EAqBRgnG,EAAuB,CAC3B/hG,KAAMsY,EACN0pF,UAAWzpF,EACX6gF,UAAW5gF,EACXypF,cAAehjF,EAAWxG,uBAC1BypF,cAAejjF,EAAWvG,uBAC1BypF,UAAWljF,EAAWtG,mBACtBypF,iBAAkBnjF,EAAWrG,0BAC7BypF,gBAAiBpjF,EAAWpG,yBAC5BypF,mBAAoBrjF,EAAWnG,4BAE/BypF,mBAAoBtjF,EAAWlG,4BAC/BypF,sBAAuBvjF,EAAWjG,+BAClCypF,UAAWX,EAAqB7iF,EAAWhG,oBAC3CypF,aAAcZ,EAAqB7iF,EAAW/F,uBAC9CypF,aAAcb,EAAqB7iF,EAAW9F,uBAC9CypF,WAAY3jF,EAAW7F,oBACvBypF,WAAY5jF,EAAW5F,oBACvBypF,mBAAoB7jF,EAAW3F,4BAC/BypF,mBAAoB9jF,EAAW1F,4BAC/BivC,UAAWvpC,EAAWzF,mBACtBwpF,eAAgB/jF,EAAWxF,wBAC3BwpF,eAAgBhkF,EAAWvF,wBAC3BwpF,gBAAiBjkF,EAAWtF,yBAC5BwpF,QAASlkF,EAAWrF,iBACpBwpF,eAAgBnkF,EAAWpF,wBAC3BwpF,eAAgBpkF,EAAWnF,wBAC3BwpF,eAAgBrkF,EAAWlF,wBAC3BwpF,mBAAoBvpF,EACpBwpF,gBAAiBvpF,EACjBwpF,iBAAkBtpF,EAClBupF,iBAAkBtpF,EAClBupF,eAAgB1kF,EAAW5E,wBAC3BupF,iBAAkB3kF,EAAW3E,0BAC7BupF,iBAAkB5kF,EAAW1E,0BAC7BupF,kBAAmBjC,EACnBkC,cAAe9kF,EAAWzE,uBAC1BwpF,cAAe/kF,EAAWxE,uBAC1BwpF,cAAevpF,EACfwpF,gBAAiBjlF,EAAWtE,yBAC5BwpF,oBAAqBllF,EAAWrE,6BAChCwpF,uBAAwBnlF,EAAWpE,gCACnCwpF,0BAA2BplF,EAAWnE,mCACtCwpF,gBAAiBvpF,EACjBwpF,oBAAqBtlF,EAAWjE,6BAChCwpF,kBAAmBvlF,EAAWhE,2BAC9BwpF,sBAAuBxlF,EAAW/D,+BAClC+zC,cAAehwC,EAAW9D,wBAE5BgmF,EAAiBngG,QAAU+gG,EAEtBrgE,EAAa1gC,UAAWigG,GAE7BC,GAAiB,kBAAMjgG,OAAOC,EAAEwgC,EAAa1gC,cAC5C,CACDigG,EACAhiF,EACA7c,EACA6M,EAAU2a,OACVza,EACAsmF,EACAhH,EACA+S,IA3GS,IA8GHtnF,EAAqB+E,EAArB/E,iBACRnZ,qBAAU,WACR,GAAKkgG,GAAkB/mF,EAAvB,CAEA,IAAMwqF,EAAU,kBAAMxqF,EAAiB,OACjCyqF,EAAW,SAAC,GAAsC,IAAD,gBAAnCC,WAAmC,MAC1BC,yBAAnB7qG,EAD6C,EAC7CA,EAAGgrB,EAD0C,EAC1CA,EACX9K,EAAiB,CAAElgB,IAAGgrB,OAKxB,OADAi8E,EAAc6D,KAAK,wBAAyBH,GAAUG,KAAK,aAAcJ,GAClE,WAELzD,EAAc8D,OAAO,wBAAyBJ,GAAUI,OAAO,aAAcL,OAE9E,CAACzD,EAAe/mF,IAGnBnZ,qBAtH8B,WAC5B,GAAKkgG,EAAL,CADkC,MAGR7+F,EAAsB2nC,wBAAxC93B,EAH0B,EAG1BA,MAAOrO,EAHmB,EAGnBA,OAEfq9F,EAAcr1E,UAAU3c,EAAU2a,OAAlC,eACKu3E,EAAiBngG,QADtB,CAEEiR,MAAO5X,KAAKG,MAAMyX,EAAQuvF,GAC1B59F,OAAQvJ,KAAKG,MAAMoJ,SA8GU,CAACq9F,EAAehyF,EAAU2a,SAE3D,IAAMjmB,EAAQ89F,EACV,CACEx4B,UAAW,UACX+7B,YAAavD,QAEf/iG,EAEJ,OACE,yBAAKkB,IAAK8hC,EAAc3+B,GAAI0zF,EAAgB32F,UAAW02F,EAAuB7yF,MAAOA,K,UCxNzF1C,OAAO2sC,GAAKA,G,ICFRq3D,G,8BCmBEC,GAAe,CACnBz8D,MAAO,UACPzrC,MAAO,IACPytC,MAAO,WASH06D,GAA6B,SAAC,GAG7B,IAFLl2F,EAEI,EAFJA,UAAWglB,EAEP,EAFOA,MACXxU,EACI,EADJA,iBAAkBC,EACd,EADcA,iBAEZ0lF,EAAK/qG,KAAKgpB,OAAOpU,EAAUxC,OAASwC,EAAUzC,MAAQ,GAAKyC,EAAUyE,QACrE2xF,EAAW3hF,aAAc0hF,GAEzB34F,EAASwC,EAAU2a,OAAO7f,KAAKkqB,GAAOqxE,KACtC94F,EAAQC,EAAe,IAAL24F,EAElB9hF,EAAK7D,EAAiBjT,GACtB+4F,EAAK7lF,EAAiBlT,GACtB+W,EAAK9D,EAAiBhT,GACtB+4F,EAAK9lF,EAAiBjT,GAE5B,OAAI6W,IAAOC,EACH,GAAN,OAAUD,EAAV,YAAgBiiF,EAAhB,eAAyBC,EAAzB,aAAgCH,GAG5B,GAAN,OAAU/hF,EAAV,YAAgBiiF,EAAhB,eAAyBhiF,EAAzB,YAA+BiiF,EAA/B,aAAsCH,IAoB3BI,GAAa,SAAC,GAab,IAZZxmF,EAYW,EAZXA,WACA7c,EAWW,EAXXA,sBACA6M,EAUW,EAVXA,UACAE,EASW,EATXA,cACAqnF,EAQW,EARXA,sBACAC,EAOW,EAPXA,eACAzF,EAMW,EANXA,WACAjpF,EAKW,EALXA,SACA+mF,EAIW,EAJXA,kBACA2G,EAGW,EAHXA,cACAC,EAEW,EAFXA,UACAjH,EACW,EADXA,aAEM/sD,EAAe/gC,iBAAuB,MAEtCouF,EAAuBpuF,iBAAOmuF,GACpCC,EAAqB/tF,QAAU8tF,EAJpB,MAM+BtsF,qBAN/B,mBAMJkjG,EANI,KAMWC,EANX,KAOLC,EAAejlG,mBAPV,EASoC4gD,eAAvC9hC,EATG,EASHA,iBAAkBC,EATf,EASeA,iBAoT1B,OAjTA3e,qBAAU,WACR,GAAI2gC,EAAa1gC,UAAY0kG,EAAe,CAG1ChQ,EAAU,CAACzmF,EAAU9Q,IAAK8Q,EAAUvR,MAEpC,IACMuE,EAAUu2E,YAAKvpE,EAAU2a,OAAOrX,QAAQvG,KAAI,SAACy8B,EAAOx8B,GAGxD,MAAO,CACLw8B,QACAzrC,MAJYiS,EAAU2a,OAAO7f,KAFnB,GAE+B0+B,GAKzCgC,MAJYgrD,EAAcxpF,OAM3BH,QAAO,SAAC9R,GAAD,OAAmB,OAAZA,EAAEgD,OAAkBhD,EAAEgD,MAAQ,KACzC6oG,EAAc5jG,EAAQwI,OAAS,EAAIxI,EAAUijG,GAE7CY,EAAe7mF,EAAWjd,OAASmN,EAAcnN,MACjDowD,EAAY+yC,GAAa,CAC7Bl2F,YACAglB,MAAO,EACPxU,mBACAC,qBAtBwC,EAqFtCT,EA5DF7D,kBAzBwC,MAyB3B0qF,EAzB2B,IAqFtC7mF,EA3DF5D,qBA1BwC,MA0BxBozE,EA1BwB,IAqFtCxvE,EA1DF3D,mBA3BwC,MA2B1B82C,EA3B0B,IAqFtCnzC,EAzDF1D,uBA5BwC,MA4BtBta,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAM96C,MA5Bd,IAqFtCid,EAxDFzD,0BA7BwC,MA6BnB,GA7BmB,IAqFtCyD,EAvDFxD,4BA9BwC,MA8BjB,OA9BiB,IAqFtCwD,EAtDFvD,sBA/BwC,MA+BvB,QA/BuB,IAqFtCuD,EArDFtD,0BAhCwC,MAgCnB1a,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMlS,SAhCjB,IAqFtC3rB,EApDFrD,6BAjCwC,MAiChB,GAjCgB,IAqFtCqD,EAnDFpD,+BAlCwC,MAkCd,SAlCc,IAqFtCoD,EAlDFnD,yBAnCwC,MAmCpB,QAnCoB,IAqFtCmD,EAjDFlD,wBApCwC,MAoCrB9a,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAM3rB,OApCf,IAqFtClS,EAhDFjD,2BArCwC,MAqClB,EArCkB,IAqFtCiD,EA/CFhD,6BAtCwC,MAsChB,OAtCgB,IAqFtCgD,EA9CF/C,uBAvCwC,MAuCtB,QAvCsB,IAqFtC+C,EA7CF9C,2BAxCwC,MAwClB,gBAxCkB,IAqFtC8C,EA3CF7C,2BA1CwC,MA0ClB,MA1CkB,IAqFtC6C,EA1CF5C,4BA3CwC,MA2ClB,MA3CkB,KAqFtC4C,EAzCF3C,uBA5CwC,OA4CvB,aA5CuB,MAqFtC2C,EAxCF1C,yCA7CwC,cAqFtC0C,EAvCFzC,uCA9CwC,OA8CP,EA9CO,MAqFtCyC,EAtCFxC,2CA/CwC,OA+CH,aA/CG,MAqFtCwC,EArCFvC,uCAhDwC,OAgDP,QAhDO,MAqFtCuC,EApCFtC,uCAjDwC,OAiDP1b,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMipD,MAjD7B,MAqFtC9mF,EAlCFrC,+BAnDwC,OAmDf,eAnDe,MAqFtCqC,EAjCFpC,mDApDwC,OAoDK,KApDL,MAqFtCoC,EAhCFnC,oCArDwC,OAqDV,GArDU,MAqFtCmC,EA/BFlC,+BAtDwC,OAsDf,aAtDe,MAqFtCkC,EA9BFjC,mDAvDwC,OAuDK,EAvDL,MAqFtCiC,EA5BFhC,kCAzDwC,OAyDZhc,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMkpD,UAzDxB,MAqFtC/mF,EA3BF/B,iCA1DwC,OA0Db,QA1Da,MAqFtC+B,EA1BF9B,qCA3DwC,OA2DT,GA3DS,MAqFtC8B,EAzBF7B,uCA5DwC,OA4DP,SA5DO,MAqFtC6B,EAvBF5B,mCA9DwC,OA8DXpc,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMlR,WA9DzB,MAqFtC3sB,EAtBF3B,kCA/DwC,OA+DZ,QA/DY,MAqFtC2B,EArBF1B,sCAhEwC,OAgER,GAhEQ,MAqFtC0B,EApBFzB,wCAjEwC,OAiEN,OAjEM,MAqFtCyB,EAlBFxB,8BAnEwC,OAmEhBxc,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAM9/C,MAnEpB,MAqFtCiiB,EAjBFvB,6BApEwC,OAoEjB,QApEiB,MAqFtCuB,EAhBFtB,iCArEwC,OAqEb,GArEa,MAqFtCsB,EAfFrB,mCAtEwC,OAsEX,OAtEW,MAqFtCqB,EAbFpB,gCAxEwC,cAqFtCoB,EAZFnB,8BAzEwC,OAyEhB,SAzEgB,MAqFtCmB,EAXFlB,8BA1EwC,OA0EhB,UA1EgB,MAqFtCkB,EATFjB,qCA5EwC,cAqFtCiB,EARFhB,4CA7EwC,OA6EF,GA7EE,MAqFtCgB,EANFf,qCA/EwC,OA+ETjd,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMmpD,eA/E3B,MAqFtChnF,EALFd,iCAhFwC,cAqFtCc,EAJFb,kCAjFwC,OAiFZ,GAjFY,MAqFtCa,EAHFZ,+BAlFwC,OAkFfpd,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMopD,eAlFrB,MAqFtCjnF,EADFX,uBApFwC,OAoFvB,KApFuB,MAuFhBlc,EAAsB2nC,wBAAxC93B,GAvFkC,GAuFlCA,MAAOrO,GAvF2B,GAuF3BA,OAETuiG,GAAsB,CAC1Bp/D,OAAQ,CACN/kC,MAAO,CACLu0B,KAAMnb,EACNqvB,MAAOlvB,EACPmvB,SAAUlvB,EACVotB,WAAYntB,EACZkvB,KAAMjvB,GAERkvB,SAAU,CACRrU,KAAMlb,EACNovB,MAAO9uB,EACP+uB,SAAU9uB,EACVgtB,WAAY/sB,EACZ8uB,KAAM7uB,GAER+uB,qBAAsB,GAExB1Z,OAAQ,CACNoF,KAAMjb,EACNmvB,MAAO1uB,EACP2uB,SAAU1uB,EACV4sB,WAAY3sB,EACZ0uB,KAAMzuB,EACN+F,SAAU9F,GAEZ4jB,KAAM,CACJ+K,aAAczwC,KAAKG,MAAMoJ,IACzBmnC,YAAa1wC,KAAKG,MAAMyX,IACxB+4B,eAAgB5uB,EAChB6uB,eAAgB5uB,IAElBtS,KAAM,CAEJmhC,UAAW5uB,GACX+uB,qBAAsB,CACpBrS,QAASzc,GACTvf,MAAOwf,GAEP4uB,UAAW3uB,GACXgsB,MAAO/rB,GACP+tB,MAAO9tB,IAKT1a,QAAS4jG,GAIXtzF,OAAQ,CACN+4B,MAAO,CAGLC,OAAQ3uB,GACR4uB,2BAA4B3uB,GAC5B4uB,YAAa3uB,IAEf4uB,MAAO,CAGLH,OAAQxuB,GACRyuB,2BAA4BxuB,IAE9B2uB,UAAW,CACTlB,MAAOxtB,GACP0tB,KAAMztB,GACNwtB,SAAUvtB,GACVyrB,WAAYxrB,IAEdwuB,WAAY,CACVnB,MAAOptB,GACPstB,KAAMrtB,GACNotB,SAAUntB,GACVqrB,WAAYprB,GACZquB,cAAe,GAEjB7uC,MAAO,CACLytC,MAAOhtB,GACPktB,KAAMjtB,GACNgtB,SAAU/sB,GACVirB,WAAYhrB,IAEdkuB,MAAO,CACL9S,QAASnb,GACTla,MAAOma,GACP2sB,MAAO1sB,IAETguB,WAAY,CACV/S,QAAShb,GACTguB,eAAgB/tB,IAElB+V,UA1CM,SA0CIxI,GACR,MAAqB,UAAjBA,EAAQqpB,KACHk6C,EAAqB/tF,QAAQwqB,EAAQxuB,OAEzB,eAAjBwuB,EAAQqpB,KACJ,GAAN,OAAUrpB,EAAQid,MAAlB,KAGKjd,EAAQid,QAGnBwD,QAAS,CACPC,KAAM,CACJC,OAAQ,OACRC,MAAO,GAETC,sBAAuB,CACrBF,OAAQ,SACRC,MAAO,IACPrM,KAAM,GAERuM,6BAA6B,EAC7BC,qBAAsB,IAExBC,SAAU,CACRxT,SAAS,EACTh5B,KAAM,cACNg7B,OAAQ,GACRyR,kBAAmB,KACnBC,OAAQ,CACNC,YAAa,IACbC,gBAAiB3rC,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMspD,WACrDv5D,kBAAmB,GACnBpC,MAAOxpC,OAAOU,QAAQyJ,OAAOpK,QAAQ87C,MAAMupD,WAC3Cv5D,aAAc,EACdnC,KAAM,QACND,SAAU,GACV5C,QAAS,IAGbiF,KAAM,CACJ1hC,OAAQ,CACNs8B,WAAY,cAEZsF,cAAe/uB,IAEjBgvB,SAAU,CACRlU,QAAS7a,GACTytB,WAAYxtB,GACZqsB,MAAOpsB,IAET8uB,cAAe,CACb/hB,IAAK,EACL9c,MAAO,EACP40B,OAAQ,EACR90B,KAAM,GAERg/B,gBAAiB,CACfpzC,EAAG,EACHgrB,EAAG,GAELqoB,UAAW/uB,IAEbgvB,UAAW,CACTnS,OAAQ,KACRoS,mBAAoB,KACpBC,kBAAmB,KACnBC,eAAgB,OAId64D,GAAmB,IAAIxpD,KAAMpb,EAAa1gC,QAASmlG,IACzDP,EAAa5kG,QAAUmlG,GACvBR,GAAiB,kBAAMW,SAExB,CAACrnF,EAAY7c,EAAuB6M,EAAWE,EAAeu2F,EAAe5W,EAC9ErvE,EAAkBC,EAAkB+1E,EAAeC,EAAWjH,IAGhE1tF,qBAAU,WACR,GAAI2kG,GAAiBE,EAAa5kG,QAAS,CACzC,IAAMoxD,EAAY+yC,GAAa,CAC7Bl2F,YACAglB,MAAO,EACPxU,mBACAC,qBALuC,EAUrCT,EAFF5D,qBARuC,MAQvBozE,EARuB,IAUrCxvE,EADF3D,mBATuC,MASzB82C,EATyB,EAanCm0C,IAAuBx+F,IAA6B,IAAhBipF,EACpCwV,EAAOv3F,EAAU2a,OAAO7f,KAAKU,OAASumF,EAAa,EAEnD/8D,EAASuyE,EAAO,GAAKA,GAAQv3F,EAAU2a,OAAO7f,KAAKU,OACrD,EACA+7F,EAEEvkG,EAAUu2E,YAAKvpE,EAAU2a,OAAOrX,QAAQvG,KAAI,SAACy8B,EAAOx8B,GAGxD,MAAO,CACLw8B,QACAzrC,MAJYiS,EAAU2a,OAAO7f,KAAKkqB,GAAOwU,GAKzCgC,MAJYgrD,EAAcxpF,OAM3BH,QAAO,SAAC9R,GAAD,OAAmB,OAAZA,EAAEgD,OAAkBhD,EAAEgD,MAAQ,KACzC6oG,EAAe5jG,EAAQwI,OAAS,IAAM87F,EACxCtkG,EACA,CAACijG,IAELQ,EAAc9jG,QAAQmlC,OAAO6D,SAASrU,KAAOlb,EAC7CqqF,EAAc9jG,QAAQuvB,OAAOoF,KAAOjb,EAEpCoqF,EAAc9jG,QAAQmI,KAAK9H,QAAU4jG,EACrCH,EAAc3mE,UACd2mE,EAAczoD,cAEf,CAACh+B,EAAYhQ,EAAWy2F,EAAe1U,EAAYjpF,EAAU0X,EAC9DC,EAAkB+1E,EAAehH,IAGjC,yBAAK7uF,IAAK8hC,EAAc3+B,GAAI0zF,EAAgB32F,UAAW02F,KC9X9CiQ,I,OAAa,SAAC,GAQb,IAPZxnF,EAOW,EAPXA,WACA7c,EAMW,EANXA,sBACA6M,EAKW,EALXA,UACAE,EAIW,EAJXA,cACAqnF,EAGW,EAHXA,sBACAC,EAEW,EAFXA,eACAhB,EACW,EADXA,cAEM/zD,EAAe/gC,iBAAuB,MADjC,EAI+B6B,qBAJ/B,mBAIJy+F,EAJI,KAIWC,EAJX,KAKLwF,EAAe/lG,mBAqDrB,OA3CA63F,2BAAgB,WACd,GAAI92D,EAAa1gC,UAAYigG,EAAe,CAC1C,IAAM0F,EAAW1lG,OAAOC,EAAEwgC,EAAa1gC,SADG,EAGhBoB,EAAsB2nC,wBAAxC93B,EAHkC,EAGlCA,MAAOrO,EAH2B,EAG3BA,OAH2B,EAOtCqb,EADFV,wBANwC,MAMrB,EANqB,EAQpCqoF,EAAmB,CACvBz9C,OAAQloD,OAAOU,QAAQyJ,OAAOpK,QAAQ6lG,WACtCv9C,YAAa/qC,EACbtM,MAAO5X,KAAKG,MAAMyX,GAClBrO,OAAQvJ,KAAKG,MAAMoJ,GACnB+mD,KAAM1pD,OAAOU,QAAQyJ,OAAOpK,QAAQ6lG,YAGtC3F,GAAiB,kBAAMyF,KACvBD,EAAa1lG,QAAU4lG,KAExB,CAAC3nF,EAAYgiF,EAAe7+F,IAG/Bo2F,2BAAgB,WACd,GAAIyI,GAAiByF,EAAa1lG,QAAS,CACzC,IAKM8lG,EAAc,eACfJ,EAAa1lG,QADE,CAElBmoD,OAAQssC,EAAc,GAEtB9qC,KAAO8qC,EAAc,KAAOiR,EAAa1lG,QAAQmoD,OAC7Cu9C,EAAa1lG,QAAQ2pD,KATI,SAA7Bx7C,EAAcsiF,WACVxwF,OAAOU,QAAQyJ,OAAOpK,QAAQ2mC,WAC9B64D,GAAe/K,EAAc,GAAIx0F,OAAOU,QAAQoQ,cAAc6vF,kBAUpEX,EAAcnV,MAAM,OAAQgb,GAC5BJ,EAAa1lG,QAAU8lG,KAExB,CAAC7F,EAAehyF,EAAWE,EAAesmF,IAG3C,yBACE71F,IAAK8hC,EACL3+B,GAAI0zF,EACJ32F,UAAW02F,GAEVvnF,EAAU2a,UC1EJm9E,GAAc,SAAC,GAQd,IAPZ9nF,EAOW,EAPXA,WACAhQ,EAMW,EANXA,UACAE,EAKW,EALXA,cACAqnF,EAIW,EAJXA,sBACAC,EAGW,EAHXA,eACAhB,EAEW,EAFXA,cACAhH,EACW,EADXA,aAEM/sD,EAAe/gC,iBAAuB,MACtCqmG,EAAsBrmG,mBAFjB,EAMqC6B,oBAAS,GAN9C,mBAMJykG,EANI,KAMcC,EANd,MHpBPjC,KAGJA,GAAe,IAAIz0E,SAAQ,SAAC06D,EAASC,GACnCzkE,YAAW,WACT,IAAMygF,EAAS/iG,SAASkiB,cAAc,UACtC6gF,EAAOnnG,KAAO,kBACdmnG,EAAOt+E,OAAQ,EACfs+E,EAAO12D,IAXa,+BAapB02D,EAAO9b,QAAU,WACfF,EAAOroC,MAAM,iCAEfqkD,EAAOhsE,OAAS,WACd+vD,EAAQ,OAGV,IAAMkc,EAAchjG,SAASg6C,qBAAqB,UAAU,GAC3DgpD,EAAYzqD,WAAoB4lC,aAAa4kB,EAAQC,KACrD,QACFh3E,MAAK,kBAAM,IAAII,SAAQ,SAAC06D,GACzBjqF,OAAO8qF,OAAO7/C,KAAK,gBAAiB,MAAO,CACzCm7D,SAAU,CAAC,YAAa,YACxBp3E,SAAUi7D,YGKX96D,MAAK,WACJ82E,GAAoB,MAGxB,IAAMI,EAAgB3mG,mBAoGtB,OAjGA63F,2BAAgB,WACd,GAAIwO,EAAoBhmG,SAAWsmG,EAActmG,QAAS,CACxD,IAAMumG,EAAY,IAAItmG,OAAO8qF,OAAOyb,cAAcC,UAAUx4F,EAAU2a,QACtEo9E,EAAoBhmG,QAAQ+vD,KAAKw2C,EAAWD,EAActmG,YAE3D,CAACiO,IAGJupF,2BAAgB,WACd,GAAI92D,EAAa1gC,UAAYsmG,EAActmG,SAAWimG,EAAkB,CACtE,IAAMM,EAAY,IAAItmG,OAAO8qF,OAAOyb,cAAcC,UAAUx4F,EAAU2a,QADA,EAKlE3K,EADFjd,aAJoE,MAI5DmN,EAAcnN,MAJ8C,EAMhE0lG,EAAYv4F,EAAcsiF,WAC1BkW,EAAc,IAAIC,IAAI,CAC1B,CAAC,OAAQ3mG,OAAOU,QAAQC,QAAQZ,QAAQ+yF,yBACxC,CAAC,UAAW9yF,OAAOU,QAAQC,QAAQZ,QAAQ8yF,8BAC1C71F,IAAIypG,IAAc,GACfG,EAAuB,CAC3Bx8F,OAAQoqF,EAKRjtC,UAAyB,SAAdk/C,EAAuB,EAAI,EACtC1lG,QACA0oC,SAAU,GACVo9D,MAAO,CAGLC,eAAgB,YAChBC,aAAa,EACbz8D,OAAQ,WACR08D,UAAW,CACTv9D,SAAU,GAEZw9D,UAAW,CACTz9D,MAAO,SAGX09D,MAAO,CACLnmG,MAAOysF,EACPsZ,eAA+B,SAAdL,GAAsC,YAAdA,EACrC,YACA,SACJzH,SAAwB,YAAdyH,OAA0BhpG,GAAa,GACjDwhG,SAAwB,YAAdwH,OAA0BhpG,EAAY,GAChDy4C,UAAW,EACX8wD,UAAW,CACTv9D,SAAU,GAEZw9D,UAAW,CACTz9D,MAAO,SAGX29D,UAAW,CACTn2F,MAAO,MACPrO,OAAQ,OAEVykG,YAAa,WACbztC,WAAY,CACV3X,EAAG,CACDt/C,MAAO,SAGX2kG,eAAe,EACfC,cAAe,MACfC,eAAgB,CACd99D,SAAU,IAEZ/mB,QAAS,CACP8kF,QAAQ,EACRC,cAAc,EACdT,UAAW,CACTv9D,SAAU,IAGdi+D,UAAW,WACXhB,cACAiB,UAAyB,YAAdlB,GAGPmB,EAAiB,CAAC,OAAQ,WAAW5mE,SAAS9yB,EAAcsiF,YAC9D,IAAIxwF,OAAO8qF,OAAOyb,cAAcsB,UAAUpnE,EAAa1gC,SACvD,IAAIC,OAAO8qF,OAAOyb,cAAcuB,UAAUrnE,EAAa1gC,SAE3D6nG,EAAe93C,KAAKw2C,EAAWM,GAE/BP,EAActmG,QAAU6mG,EACxBb,EAAoBhmG,QAAU6nG,KAE/B,CAAC5pF,EAAYhQ,EAAU2a,OAAQza,EAAeuyB,EAAculE,EAAkBxR,EAC/EhH,IAIA,yBACE7uF,IAAK8hC,EACL3+B,GAAI0zF,EACJ32F,UAAW02F,KClIJwS,GAAW,SAAC,GAKX,IAJZ/pF,EAIW,EAJXA,WACAhQ,EAGW,EAHXA,UACAunF,EAEW,EAFXA,sBACAC,EACW,EADXA,eACW,EACqEx3E,EAAxET,6BADG,MACqB,EADrB,IACqES,EAA7CR,sBADxB,MACyC,GADzC,IACqEQ,EAAxBP,sBAD7C,MAC8D,GAD9D,EAILuqF,EAAS,SAAG,GAAMzqF,GAClBxhB,EAAQ3C,KAAKgpB,MAAMpU,EAAU2a,OAAO,GAAKq/E,GAAaA,EAEtDC,EAA0C,IAA5Bj6F,EAAU2a,OAAOnf,OAAe,GAAKgU,EAAiBzhB,EAAQ0hB,EAElF,OACE,yBAAK3b,GAAI0zF,EAAgB32F,UAAW02F,GACjC0S,I,SCtBMC,GAAqB9uG,KAAKgpB,MAAM,GAAK,GAErC+lF,GAAiB,eAACC,EAAD,uDAJC,GAI4BvhE,EAA7B,uDAHA,EAGA,OAC5BuhE,EAAWvhE,GACAwhE,GAAU,SAACv/F,GAAD,IAAOkiF,EAAP,uDAAqBkd,GAArB,OACrB9uG,KAAKg+C,KAAKtuC,EAAKU,OAASwhF,IACbsd,GAAa,SAACC,GAAD,IAAOvd,EAAP,uDAAqBkd,GAArB,OAA4CK,EAAOvd,GAEhEwd,GAAe,SAAC71E,EAASK,GAAV,IAAiBo1E,EAAjB,uDAVG,GAUH,OAC1BhvG,KAAKG,MAAMy5B,EAAQL,GAAWy1E,GACnBK,GAAe,SAAC91E,EAASK,GAAV,IAAiBo1E,EAAjB,uDAZG,GAYH,OAC1BhvG,KAAKG,MAAMy5B,EAAQL,GAAWy1E,GAEnBM,GAAe,SAAC/1E,GAAD,IAAUy1E,EAAV,uDAfG,GAeH,OAAyChvG,KAAK2iE,KAAKppC,GAAWy1E,GAC7EO,GAAgB,SAACJ,GAAD,IAAOH,EAAP,uDAhBE,GAgBiCvhE,EAAnC,uDAhBE,GAgBF,OAC3BztC,KAAK2iE,KAAKwsC,GAAQH,EAAWvhE,GAElB+hE,GAAoB,SAAC5+E,GAAD,IAASo+E,EAAT,uDAnBF,GAmBE,OAC/BhvG,KAAKG,MAAMywB,EAASo+E,IClBP,YACb7iF,EACAoN,EACAtJ,EAHa,GAMT,IAFFw/E,EAEC,EAFDA,aAAcC,EAEb,EAFaA,WAEb,yDADyB,GAA1BV,EACC,EADDA,SAAUW,EACT,EADSA,YAERC,GAAgB,EAEdC,EAAW,SAAAj2E,GACf,IAAMizB,EAAO1gC,EAAGujB,wBACVshC,EAAUo+B,GAAa71E,EAASK,EAAOo1E,GACvC/9B,EAAUo+B,GAAa91E,EAASK,EAAOo1E,GACvCj7F,EAAO84C,EAAK94C,KAAOi9D,EACnBjgD,EAAM87B,EAAK97B,IAAMkgD,EACjB6+B,EAAcf,GAAeC,EAAUW,GAE7C,MAAO,CACL/1E,QACA7lB,OACAgd,MACA9c,MAAOF,EAAO+7F,EACdjnE,OAAQ9X,EAAM++E,EACdl4F,MAAOk4F,EACPvmG,OAAQumG,EACR9+B,UACAC,YAIE8+B,EAAW,WACfL,EAAWG,EAASD,IACpBA,GAAgB,GAGZpyC,EAAY,SAAA3zD,GAAM,IACdmnE,EAAqBnnE,EAArBmnE,QAASC,EAAYpnE,EAAZonE,QACXtxE,EAAI6vG,GAAkBx+B,EAASg+B,GAE/BgB,EADIR,GAAkBv+B,EAAS+9B,GACRz1E,EAAU55B,EAEnCqwG,IAAqBJ,KAEH,IAAlBA,GAAqBG,IAErBC,GAAoB//E,IAExBw/E,EAAaI,EAASG,IACtBJ,EAAeI,KAKjB,OAFA7jF,EAAGniB,iBAAiB,YAAawzD,GACjCrxC,EAAGniB,iBAAiB,WAAY+lG,GACzB,WACL5jF,EAAGjiB,oBAAoB,YAAaszD,GACpCrxC,EAAGjiB,oBAAoB,WAAY6lG,KC3C1BE,GAAW,SAACvgG,GAA0C,IAAD,yDAAP,GAA1BkiF,EAAiC,EAAjCA,YAAaod,EAAoB,EAApBA,SACtCG,EAAOF,GAAQv/F,EAAMkiF,GACrBr4D,EAAU21E,GAAWC,EAAMvd,GACjC,OAAO0d,GAAa/1E,EAASy1E,IAGzBkB,GAAsB,SAACxgG,GAAmD,IAAD,yDAAP,GAAnCkiF,EAA0C,EAA1CA,YAAaod,EAA6B,EAA7BA,SAAUvhE,EAAmB,EAAnBA,QACpD0hE,EAAOF,GAAQv/F,EAAMkiF,GACrBr4D,EAAU21E,GAAWC,EAAMvd,GAC3Bh6E,EAAQ03F,GAAa/1E,EAASy1E,GAC9BzlG,EAASgmG,GAAcJ,EAAMH,EAAUvhE,GAE7C,MAAO,CAAE71B,QAAOrO,SAAQgwB,QAASv5B,KAAK2iE,KAAKppC,KAGvC42E,GAAoB,CAAC,2BAA4B,2BAEjDC,GAAe,SAACp+F,GAAD,IAASq+F,EAAT,uDAAsBF,GAAtB,OACnBG,yBACGC,OAAOC,kBAAOx+F,GAAQ,SAAArP,GAAK,OAAIA,MAC/B8tG,MAAMJ,IClCI,YAAClkF,GACd,OAAOA,EAAGujB,wBAAwB3e,IAAMnqB,OAAO8pG,YAAc,GAAM,MAAQ,UCgBvEC,GAAS,CACb5/E,IAAK,CAAE8X,OAAQ,OACfA,OAAQ,CAAE9X,IAAK,WAmGF6/E,GAhGE,SAAC,GAAwD,IAAtDlhG,EAAqD,EAArDA,KAAMmhG,EAA+C,EAA/CA,cAAkBtpG,EAA6B,yCACjEupG,EAAUxqG,mBACVyqG,EAAYzqG,mBACZ0qG,EAAW1qG,mBAHsD,EAK7C6B,mBAAS,MALoC,mBAKhE8oG,EALgE,KAKzDC,EALyD,KAMjEC,EAAe7qG,kBAAO,GACtB8qG,EAAc9qG,kBAAQ,GACtB+qG,EAAY/qG,mBAEZgrG,EAAQ,WACZN,EAASrqG,QAAQ4qG,gBACjBL,EAAS,MACTC,EAAaxqG,SAAU,EACvByqG,EAAYzqG,SAAW,GAGnB6qG,EAAY,kBAChBpoC,uBAAsB,WACpB8nC,GAAS,SAAAO,GAOP,OALGN,EAAaxqG,UACY,IAAzByqG,EAAYzqG,SAAkByqG,EAAYzqG,WAAZ,OAAwB8qG,QAAxB,IAAwBA,OAAxB,EAAwBA,EAAc73E,QAErE03E,IAEKG,SAIbtT,2BAAgB,WAuBd,OAtBA6S,EAASrqG,QFjBE,SAACwlB,EAAD,GAAqD,IAA9CsjF,EAA6C,EAA7CA,aAAcC,EAA+B,EAA/BA,WAAcnoG,EAAiB,uDAAP,GAClDynG,EAAqEznG,EAArEynG,SAAUW,EAA2DpoG,EAA3DooG,YAD+C,EACYpoG,EAA9CmqG,kBADkC,MACrB,EADqB,IACYnqG,EAA9B4mD,iBADkB,MACN,EADM,EACHkiD,EAAe9oG,EAAf8oG,WACxDv9B,EAAS3mD,EAAG47C,WAAW,MAEzB4pC,GAAa,EACbJ,EAAgB,aAChBK,EAAc,GACdC,EAAc,aAEZ7kD,EAAQ,WACZukD,IACAM,IACA/+B,EAAO7lB,UAAU,EAAG,EAAG9gC,EAAGvU,MAAOuU,EAAG5iB,QACpCupE,EAAOlmB,aAGHphB,EAAS,SAAC,GAAc,IAAZ97B,EAAW,EAAXA,KAAW,EACQwgG,GAAoBxgG,EAAMnI,GAArDqQ,EADmB,EACnBA,MAAOrO,EADY,EACZA,OAAQgwB,EADI,EACJA,QACvBpN,EAAGvU,MAAQsQ,SAAStQ,GACpBuU,EAAG5iB,OAAS2e,SAAS3e,GACrByjD,IACA6kD,IACA,IAAM3yB,EAAWkxB,GAAa1gG,EAAM2gG,GAE9ByB,EAAU,SAACnvG,EAAOi3B,GACtBk5C,EAAO1iB,UAAY8uB,EAASv8E,GAE5B,IAAMquE,EAAUo+B,GAAa71E,EAASK,EAAOo1E,GACvC/9B,EAAUo+B,GAAa91E,EAASK,EAAOo1E,GAEzC7gD,GAAaujD,GACf5+B,EAAO7lB,UACL+jB,EAAU7iB,EACV8iB,EAAU9iB,EACV4gD,GAAeC,EAAUW,GAAe+B,EACxC3C,GAAeC,EAAUW,GAAe+B,GAI5C5+B,EAAOrC,SACLO,EACAC,EACA89B,GAAeC,EAAUW,GACzBZ,GAAeC,EAAUW,KAI7BjgG,EAAK/K,QAAQmtG,GAEbD,EAAcE,GACZ5lF,EACAoN,EACA7pB,EAAKU,OACL,CACEq/F,eACAC,cAEFnoG,GAGFgqG,EAAgB,YACK,IAAfI,GAAkBG,EAAQpiG,EAAKiiG,GAAYA,IAGjDC,EAAc,SAAAh4E,GACZ23E,IACAI,EAAY/3E,EAEZ,IAAMo3C,EAAUo+B,GAAa71E,EAASK,EAAOo1E,GACvC/9B,EAAUo+B,GAAa91E,EAASK,EAAOo1E,GAEzC7gD,GAAaujD,IACf5+B,EAAO3kB,UAAYA,EACnB2kB,EAAO5kB,YAAc,OACrB4kB,EAAOk/B,WACLhhC,EAAU7iB,EACV8iB,EAAU9iB,EACV4gD,GAAeC,EAAUW,GAAe+B,EACxC3C,GAAeC,EAAUW,GAAe+B,MAMhD,MAAO,CACL1kD,QACAxhB,SACAomE,YAAa,SAAAh4E,GAAK,OAAIg4E,EAAYh4E,IAClC23E,cAAe,kBAAMA,MEvEFU,CACjBlB,EAAUpqG,QACV,CACE8oG,aAAc,YAAyB,IAAtB71E,EAAqB,EAArBA,MAAUizB,EAAW,0BACpCukD,EAAYzqG,QAAUizB,EACtBo3E,EAASrqG,QAAQirG,YAAYh4E,GAC7By3E,EAAU1qG,QAAU0lB,YAAW,WAC7B6kF,EAAS,CACPp5E,OAAQ,CAAE4X,sBAAuB,kBAAMmd,IACvCjzB,QACAizB,WAED,MAEL6iD,WAAY,WACV0B,EAAYzqG,SAAW,EACvBsiD,aAAaooD,EAAU1qG,SACvB6qG,MAGJjqG,GAEK,kBAAMypG,EAASrqG,QAAQqmD,WAC7B,IAEHmxC,2BAAgB,WAEZ8S,GACAH,EAAQnqG,SACRmqG,EAAQnqG,QAAQuR,OAAO+4F,EAAMr3E,SAAWlqB,EAAKwI,OAAO+4F,EAAMr3E,QAE1D03E,IAEFR,EAAQnqG,QAAU+I,EAClBshG,EAASrqG,QAAQ6kC,OAAO97B,KACvB,CAACA,IAEJ,IAAMwiG,EAAetpG,uBAAY,WAC/BuoG,EAAaxqG,SAAU,IACtB,IAEGwrG,EAAevpG,uBAAY,WAC/BuoG,EAAaxqG,SAAU,EACvB6qG,MACC,IAEG13E,EAAQm3E,GAASmB,GAASnB,EAAMn5E,QAEtC,OACE,kBAAC,WAAD,KACE,4BAAQgW,cAAY,WAAWvoC,IAAKwrG,IACnCE,GAASJ,GACR,kBAAC,IAAD,CACE/2E,MAAO62E,GAAO72E,GACdhC,OAAQm5E,EAAMn5E,OACdo6E,aAAcA,EACdC,aAAcA,GAEbtB,EAAcI,EAAMr3E,MAAOE,MChGhCu4E,GAAQxzB,IAAOgX,KAAV,wEAAGhX,CAAH,kEAMLyzB,GAAQzzB,YAAO0zB,KAAMC,MAAM,CAC/B5kE,GAAI6kE,IACJ9kE,IAAK,IAFI,wEAAGkxC,CAAH,+CAUL6zB,GAAkB,SAAC,GAMI,IAL3BhjG,EAK0B,EAL1BA,KACA0+B,EAI0B,EAJ1BA,MACAukE,EAG0B,EAH1BA,WACAC,EAE0B,EAF1BA,mBACAC,EAC0B,EAD1BA,iBAEMttG,EAAMe,mBACNwzB,EAAQv0B,EAAIoB,SAAWyrG,GAAS7sG,EAAIoB,SAEpC2C,EAAQmjC,mBAAQ,iBAAO,CAAEqmE,SAAS,GAAD,OAAK7C,GAASvgG,EAAKA,MAAnB,SAAiC,CAACA,IAEnEqjG,EACJF,GACC,SAACj5E,EAAOo5E,GAAR,OAAqBH,EAAiB,CAAE3wB,MAAO9zC,EAAOukE,aAAY74E,MAAOk5E,EAAUp5E,WAEhFq5E,EACJL,GAAuB,kBAAMA,EAAmB,CAAE1wB,MAAO9zC,EAAOukE,aAAY74E,WAE9E,OACE,kBAAC,IAAD,CAAMgU,cAAY,kBAAkBJ,QAAM,EAACwlE,WAAW,QAAQvlE,IAAK,EAAGJ,OAAQ,CAAC,EAAG,EAAG,EAAG,IACtF,kBAAC,IAAD,CAAS3lC,QAASqrG,EAAcn5E,MAAOA,EAAOq5E,OAAK,IAChD,gBAAGC,EAAH,EAAGA,OAAaC,EAAhB,EAAW9tG,IAAoBG,EAA/B,wCACC,kBAAC4sG,GAAD,eACExkE,cAAY,wBACZvoC,IAAK,SAAA4mB,GACH5mB,EAAIoB,QAAUwlB,EACdknF,EAAWlnF,IAEbwgB,OAAQymE,EACR9pG,MAAOA,GACH5D,GAEJ,kBAAC2sG,GAAD,KAAQjkE,GACP1+B,EAAKA,KAAKU,OAAS,GAAK,kCAAQV,EAAKA,KAAKU,OAAlB,SAI/B,kBAAC,GAAD,CAAUV,KAAMA,EAAMmhG,cAAekC,MAsB5BO,GAjBI,SAAC,GAAD,IAAG5jG,EAAH,EAAGA,KAAMwI,EAAT,EAASA,OAAQ26F,EAAjB,EAAiBA,iBAAkBD,EAAnC,EAAmCA,mBAAnC,OACjB,kBAAC,IAAD,CAAM9kE,cAAY,aAAaylE,UAAQ,EAAChrE,SAAU,CAAEirE,SAAU,QAAUC,MAAI,GACzEv7F,EAAOvG,KAAI,SAACy8B,EAAOxU,GAClB,OAAOlqB,EAAKkqB,GAAOlqB,KAAKU,OACtB,kBAAC,GAAD,CACE8C,IAAKk7B,EACLA,MAAOA,EACPukE,WAAY/4E,EACZlqB,KAAMA,EAAKkqB,GACXg5E,mBAAoBA,EACpBC,iBAAkBA,IAElB,UCjFJa,GAAsB70B,YAAO0zB,KAAMC,MAAM,CAAE56F,MAAO,QAASrO,OAAQ,OAAQyf,OAAO,IAA/D,iFAAG61D,CAAH,2DAuBVjK,GAnBA,SAAC,GAAD,IAAGpvE,EAAH,EAAGA,SAAH,OACb,kBAAC,IAAD,CAAMsoC,cAAY,kBAAkBH,IAAK,EAAGulE,WAAW,UACrD,kBAAC,IAAD,CAAUvmE,QAAM,GAAEnnC,GAClB,kBAAC,IAAD,CAAMmoC,IAAK,EAAGulE,WAAW,UACvB,kBAAC,IAAD,WACA,kBAACQ,GAAD,MACA,kBAAC,IAAD,gBChBAx7F,GAAS,CACby7F,eAAgB,CAAEz/E,KAAM,UAAWvsB,MAAO,cAC1CisG,cAAe,CAAE1/E,KAAM,eAAgBvsB,MAAO,QAC9CksG,cAAe,CAAE3/E,KAAM,iBAAkBvsB,MAAO,aAChDmsG,oBAAqB,CAAE5/E,KAAM,kBAAmBvsB,MAAO,mBACvDosG,oBAAqB,CAAE7/E,KAAM,kBAAmBvsB,MAAO,mBACvDqsG,aAAc,CAAE9/E,KAAM,MAAOvsB,MAAO,YACpCssG,mBAAoB,CAAE//E,KAAM,YAAavsB,MAAO,cAGrCusG,GAAWxvG,OAAOlB,KAAK0U,IAErB,YAACxP,GACd,OAAIA,KAAMwP,GAAeA,GAAOxP,GAGzB,CAAEf,MADKe,EAAGqE,QAAQ,OAAO,SAAConG,GAAD,iBAAcA,EAAK,GAAGC,kBAAiBrnG,QAAQ,QAAS,IACxEmnB,KAAM,SCVTmgF,GAFG,kBAAM,kBAAC,IAAD,CAAM9qG,OAAO,MAAMqO,MAAM,OAAO01B,WAAW,eCIpDgnE,GAJA,SAAAh1B,GAAK,OAClB,kBAAC,IAAD,eAAIlvC,MAAM,SAASmkE,UAAU,YAAYzmE,cAAY,qBAAwBwxC,KCElEk1B,GAAY31B,YAAOx5E,KAAQmtG,OAAM,kBAAiB,CAC7DrjE,QAAS,aACTslE,SAAS,EACTC,UAAW,OACXjvG,UAAW,MACXyoC,SAL4C,EAAGrX,OAM/C,cAAe,0BANK,sEAAGgoD,CAAH,sEAYhB,gBAAGhoD,EAAH,EAAGA,OAAQrQ,EAAX,EAAWA,MAAX,OAAuBqQ,GAAM,mCAAgCqoD,YAAS,SAATA,CAAmB,CAAE14D,UAArD,QACtB,gBAAGqQ,EAAH,EAAGA,OAAQrQ,EAAX,EAAWA,MAAX,OAAuB04D,YAASroD,EAAS,SAAW,YAA7BqoD,CAA0C,CAAE14D,aAWjEmuF,GAPF,SAAC,GAAD,IAAGhyG,EAAH,EAAGA,MAAO2nG,EAAV,EAAUA,SAAa5kG,EAAvB,4CACX,kBAAC,IAAD,eAAMooC,cAAY,wBAA2BpoC,GAC3C,kBAAC8uG,GAAD,CAAWpmE,MAAM,UAAUvX,OAAkB,YAAVl0B,EAAqBoD,QAAS,kBAAMukG,EAAS,cAChF,kBAACkK,GAAD,CAAWpmE,MAAM,UAAUvX,OAAkB,YAAVl0B,EAAqBoD,QAAS,kBAAMukG,EAAS,gBCtB9EsK,GAAe/1B,YAAOg2B,YAASxvG,MAASmtG,MAAM,CAClDt+E,KAAM,kBACNka,MAAO,OACPe,QAAS,aACTslE,SAAS,EACTC,UAAW,OACXjvG,UAAW,MACXytG,WAAY,WACZvlE,IAAK,EACLmP,UAAW,eATK,wEAAG+hC,CAAH,yHA8CHi2B,GApBC,SAAC,GAAD,IAAGntG,EAAH,EAAGA,MAAOotG,EAAV,EAAUA,SAAUvvG,EAApB,EAAoBA,SAAUwvG,EAA9B,EAA8BA,SAA9B,OACd,kBAAC,IAAD,CACErnE,IAAK,EACLF,QAAS,CAAC,EAAG,EAAG,GAChBwtD,QAAS+Z,GAAY,CAAEC,KAAM,SAAU7kE,MAAO,aAC9C1C,QAAM,EACNI,cAAY,qBAEZ,kBAAC,IAAD,CAAMonE,eAAe,UAAUpnE,cAAY,4BACzC,kBAAC,IAAD,CAAIsC,MAAM,SAASmkE,UAAU,aAC1B5sG,GAEFotG,GAAY,kBAACH,GAAD,CAAc7uG,QAASgvG,KAEtC,kBAAC,IAAD,CAAMpnE,IAAK,EAAGD,QAAM,EAACI,cAAY,6BAC9BtoC,K,oBCjBM2vG,GAA2C,SACtDvwF,EACAm0E,EACAqc,GAEA,IAAIx9F,EAOArO,EAN4B,kBAArBqb,EAAWhN,MAEpBA,EAAQgN,EAAWhN,MACkB,kBAArBgN,EAAWhN,QAC3BA,EAAK,UAAMgN,EAAWhN,MAAMxS,WAAvB,YAG2Bf,IAA9B00F,EAAcnH,cACiB,kBAAtBhtE,EAAWrb,OAEpBA,EAASqb,EAAWrb,OACkB,kBAAtBqb,EAAWrb,SAC3BA,EAAM,UAAMqb,EAAWrb,OAAOnE,WAAxB,QAGV,IAAM8C,EAAiD,WAA9B0c,EAAW9M,eAE9Bu9F,EAAyBzwF,EAAW3c,SAtCV,SAACqtG,EAAkBptG,GACnD,IAAMqtG,EAAiB7rG,aAAaqgB,QAAb,UAAwBliB,MAAxB,OAAyDytG,IAChF,OAAIC,EACEhqF,OAAOqoB,MAAMroB,OAAOgqF,IACf,KAEH,GAAN,OAAUrtG,EACNqjB,OAAOgqF,GAAkB9rG,KACzB8rG,EAFJ,MAMK,KA2BHC,CAA0B5wF,EAAW3c,SAAUC,GAC/C,KASJ,GAPImtG,IAIF9rG,EAAS8rG,EAAuBtoG,QAAQ,KAAM,KAG5CqoG,EAAwB,CAC1B,IAAMK,EAAkBvtG,EACC,GAArBtB,OAAO8pG,YACc,GAArB9pG,OAAO8pG,YACXnnG,EAAM,UAAMksG,EAAN,MAGR,IAAMC,EAAwB9uG,OAAOU,QAAQoQ,cAAci+F,UAI3D,MAAO,CACLpsG,SACAqO,QACAg+F,SANyC,OAA1BF,OACbrxG,EACAqxG,ICvEAG,GAA4B,CAChCC,KAAM,KACNC,WAAY,MACZC,eAAW3xG,GA8BP4xG,GAtBmC,WACvC,IAAIjsD,EAAwB,GAStBksD,EAAiB,IAAIC,sBARL,SAACC,GACrBA,EAAQzxG,SAAQ,YAAiC,IAAD,EAA7B0xG,EAA6B,EAA7BA,eAAgBv+E,EAAa,EAAbA,OAC3BlC,EAAQ,UAAGo0B,EAAUhnB,MAAK,qBAAGnsB,UAA0BihB,YAA/C,aAAG,EAAqDlC,SAClEA,GACFA,EAASygF,QAIgDR,IAE/D,MAAO,CACLh4B,UAAW,SAAChnE,EAAsB+e,GAChCsgF,EAAe7T,QAAQxrF,GACvBmzC,EAAYA,EAAU9F,OAAO,CAAErtC,UAAS+e,cAE1C0gF,YAAa,SAACC,GACZvsD,EAAYA,EAAUv4C,QAAO,qBAAGoF,UAA0B0/F,OAI7BC,GC3BtBC,GAA0B,SAAC,GAAD,IACrC7xF,EADqC,EACrCA,WADqC,OAGrC,0BAAMtb,MAAO,CAAEk/B,SAAU,WAAYkuE,QAAS,EAAG9+F,MAAO,IACrDgN,EAAWlc,KC0BViuG,GAA8BjtG,aAAaqgB,QAAQ,oBAQ5C6sF,GAAmB,SAAC,GAKnB,IAAD,EAJXhyF,EAIW,EAJXA,WACA5c,EAGW,EAHXA,UACAxC,EAEW,EAFXA,SACAqxG,EACW,EADXA,WAGMtuG,EAAWC,eACjB9B,qBAAU,WACR,OAAO,WACL6B,EAASmO,aAAsB,CAAEhO,GAAIV,QAGtC,IAIH,IAAM+wF,EAAgB7H,GAAuBtsE,EAAWnN,cAZ7C,EAamDtP,oBAAkB,GAbrE,mBAaJ2uG,EAbI,KAaqBC,EAbrB,KAcLC,GAAwB,UAAAxwG,aAAYo2F,aAAZ,eAA0B/nF,WAAY+P,EAAWlc,GAC/Ey1F,2BAAgB,WACd,IAAI2Y,EAAJ,CAGA,IAAM1B,EAAyB4B,GACE,YAA5BpyF,EAAWnN,cACXshF,EAAczH,UAAU1sE,GACvBytB,EAAS8iE,GAAoBvwF,EAAYm0E,EAAeqc,GAC9D6B,cAAkB,SAACt0G,EAAOu0G,GACpBv0G,GACFk0G,EAAWvtG,MAAM6tG,YAAYD,EAAWv0G,KAEzC0vC,GAEHwkE,EAAWpxG,UAAYszF,EAAcvH,eAAe5sE,GACpDmyF,GAA2B,MAC1B,CAACnyF,EAAYm0E,EAAe+d,EAAyBE,EAAuBH,EAC7EE,IAIF,IAAMK,EAAgB5wG,aAAY6wG,MAE5BC,EAAoBhxG,mBACpBixG,EF/C6B,SACnC1gG,EACAygG,GACI,IAAD,EAC+BnvG,oBAAS,GADxC,mBACIwvE,EADJ,KACe6/B,EADf,KAEGC,EAAenxG,iBAAOqxE,GA4B5B,OAvBAjxE,qBAAU,WAkBR,MAjBoC,oBAAzByvG,sBACTF,GAA2Bp4B,UACzBhnE,GACA,SAAC6gG,GACKD,EAAa9wG,UAAY+wG,IACvBJ,EAAkB3wG,UAEpB2wG,EAAkB3wG,QAAQ2C,MAAMqnD,WAAa+mD,EAAe,UAAY,UAG1ED,EAAa9wG,QAAU+wG,EAEvBF,EAAaE,OAKd,WACLzB,GAA2BK,YAAYz/F,MAExC,CAACygG,EAAmBzgG,IAEhB8gE,EEcuBggC,CAAsBd,EAAYS,GAK1DM,EADkBpxG,aAAYqxG,MA9ER,IACD,IAkChB,EA+C2C1vG,oBAAUovG,GA/CrD,mBA+CJO,EA/CI,KA+CiBC,EA/CjB,KAgDXC,cACE,WAEED,GAAwBR,KAE1BK,EACA,CAACL,IAEH,IAAMU,GAAaV,GAAwBO,EAErCI,EAAgC5xG,iBAAOixG,GAc7C,GAbID,EAAkB3wG,SACjBuxG,EAA8BvxG,UAAY4wG,IAE7CW,EAA8BvxG,QAAU4wG,GAG1C7wG,qBAAU,YACHo6E,IAAem3B,GAActB,IAChCpuG,EAASmO,aAAsB,CAAEhO,GAAIV,OAEtC,CAACA,EAAWO,EAAU0vG,IAGrBn3B,GAEF,OAAOt7E,EAGT,GAAIyyG,EAAY,CAEd,GAAIb,EACF,OACE,kBAAC,GAAD,CAAyBxyF,WAAYA,IAIzC,IAAK0yF,EAAkB3wG,QAAS,CAC9B,IAAMwxG,EAAoBzzF,MAAMC,KAAKkyF,EAAWrxG,UAC7CmM,KAAI,SAACymG,GAAD,OAvHW,SAACvhG,GACvB,IAAMwhG,EAASxhG,EAAQmjE,WAAU,GAC3Bs+B,EAAiBD,EAAOE,iBAAiB,UAc/C,OAZA1hG,EAAQ0hG,iBAAiB,UACtB5zG,SAAQ,SAAC6zG,EAAW5+E,GACnB,IAAM6+E,EAAYH,EAAe1+E,GAC3BzI,EAAUsnF,EAAU1wC,WAAW,MAErC0wC,EAAU7gG,MAAQ4gG,EAAU5gG,MAC5B6gG,EAAUlvG,OAASivG,EAAUjvG,OAEzB4nB,GACFA,EAAQunF,UAAUF,EAAW,EAAG,MAG/BH,EAuGeM,CAAgBP,MAE5BQ,EAA0B7uG,SAASkiB,cAAc,OACvD2sF,EAAwBtvG,MAAMqnD,WAAa,SAE3CwnD,EAAkBxzG,SAAQ,SAACyzG,GACzBQ,EAAwBxsF,YAAYgsF,MAGtCd,EAAkB3wG,QAAUiyG,EAG9B,OACE,oCACE,kBAAC,GAAD,CAAyBh0F,WAAYA,IACrC,yBACErf,IAAK,SAACszG,GACAA,GAAevB,EAAkB3wG,SACnCkyG,EAAYzsF,YAAYkrF,EAAkB3wG,aAYtD,OAJKywG,GAAiBE,EAAkB3wG,UACtC2wG,EAAkB3wG,aAAUtC,GAGvBmB,GCrJIszG,GAAiB,SAAC,GAAD,IAC5Bl0F,EAD4B,EAC5BA,WACA9P,EAF4B,EAE5BA,cACA9M,EAH4B,EAG5BA,UACA+wG,EAJ4B,EAI5BA,aACAlC,EAL4B,EAK5BA,WACAmC,EAN4B,EAM5BA,8BACAC,EAP4B,EAO5BA,mBACAp1G,EAR4B,EAQ5BA,KAR4B,OAU5B,kBAAC,GAAD,CAAkB+gB,WAAYA,EAAYiyF,WAAYA,EAAY7uG,UAAWA,GAC3E,kBAAC,GAAD,CACE4c,WAAYA,EACZ5c,UAAWA,EACXgxG,8BAA+BA,EAC/BC,mBAAoBA,EACpBF,aAAcA,EACdG,sBAAuBpkG,EACvB+hG,WAAYA,EACZhzG,KAAMA,MCHC0D,GAGL,KAgBK6pB,GAAmB,CAC9BC,oBAAqB,GACrBC,GAAI,UAEJL,KAAM,GACNC,QAAS,GAMTC,QAAS,GASTI,UApB8B,SAqB5BC,EAAgB3b,EAAe4b,GAC9B,IADiD1tB,EAClD,uDADkE,GAAI2tB,EACtE,uCACA,GAAqB,OAAjBnqB,IAAwD,qBAAxBA,GAAauK,OAC/C,MAAO,GAGT,GAA0C,qBAA/BvK,GAAauK,OAAO+D,GAC7B,MAAO,GAGT,GAAqD,qBAA1CtO,GAAauK,OAAO+D,GAAOgD,WACpC,MAAO,GAGT,GAAgE,qBAArDtR,GAAauK,OAAO+D,GAAOgD,WAAW4Y,GAC/C,MAAO,GAGT,IAAIve,EAAG,UAAM2C,EAAN,YAAe4b,GAUtB,MAR6C,qBAAlC9tB,KAAK0tB,oBAAoBne,GAClCvP,KAAK0tB,oBAAoBne,GAAO,CAAE6a,MAAO,GAEzCpqB,KAAK0tB,oBAAoBne,GAAK6a,OAAS,EAGzC7a,EAAG,UAAMA,EAAN,YAAavP,KAAK0tB,oBAAoBne,GAAK6a,OAExC,GAAN,OAAUyD,EAAV,wDAAgE3b,EAAhE,8JAEoD4b,EAFpD,uCAGuBA,EAHvB,gBAGwCve,EAHxC,uCAIgBA,EAJhB,iGAKanP,EALb,YAKsB2tB,IAGxBC,WAzD8B,SA0D5BhqB,EAAeiQ,GACd,IAD6BiB,EAC9B,uDADmD,GAAI7H,EACvD,uDADwE,GAExE,MAAO,UAAG,mFAAH,OACqB6H,EADrB,KAEH,wDAFG,uBAIalR,EAJb,4BAKaiQ,EALb,KAMH,6EANG,wBASc5G,EATd,KAUH,8BAGN4gB,aAzE8B,SAyEjB/d,EAAoBge,EAAc3e,EAAaiX,EAAcomF,GACxE,GAA0B,qBAAd18F,EAAIX,GAAuB,CACrC,IAAM42B,EAASj2B,EAAIX,GAEbvT,GADmB4wG,EAAM,eAAOzmE,EAAP,GAAkBA,EAAOymE,IAAWzmE,GACxCjY,GAE3B,YAAUxtB,IAAN1E,EACKwqB,EAGU,oBAAPxqB,EACHA,EAAEyxB,GAAiBE,IAGrB3xB,EAGT,OAAOwqB,GAGT2H,UA7F8B,SA6FpBjc,GACR,GAAIA,EAAMsjG,aACR,OAAOtjG,EAAMsjG,aAEf,GAAkC,qBAAvBtjG,EAAMkc,aAA8B,CAC7C,IAAMpsB,EAAOkQ,EAAMlQ,MAAQkQ,EAAMnN,GAAGgI,MAAM,KAAK,GAC/C,MAAO,UAAI/M,KAAKiuB,aAAajuB,KAAKstB,KAAM,QAASpb,EAAMkc,aAAclc,EAAMkc,cACxE3sB,WADI,YAEHO,EAAKqsB,QAAQrsB,EAAKyK,OAASyF,EAAMkc,aAAa3hB,OAAS,IAAIhL,YAC5D2H,QAAQ,KAAM,KAGnB,OAAQpJ,KAAKiuB,aAAajuB,KAAKstB,KAAM,QAASpb,EAAMob,KAAMpb,EAAMob,MAAO7rB,WACpE2H,QAAQ,KAAM,MAGnBklB,SA7G8B,SA6GrBpc,GACP,MAAkC,qBAAvBA,EAAMkc,aACRpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMkc,aAChD,uCAAyC3sB,WAGtCzB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMob,KAAM,wCAG1DiB,SAtH8B,SAsHrBrc,GACP,MAAkC,qBAAvBA,EAAMkc,aACRpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMkc,aAAc,MAG3DpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,OAAQpb,EAAMob,KAAM,OAG1DkB,WA9H8B,SA8HnBtc,GACT,MAAkC,qBAAvBA,EAAMkc,aACRpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,SAAUpb,EAAMkc,aAAc,GAG7DpuB,KAAKiuB,aAAajuB,KAAKstB,KAAM,SAAUpb,EAAMob,KAAM,IAG5DmB,aAtI8B,SAsIjBnB,EAAcC,GACzB,IAAMhe,EAAG,UAAM+d,EAAN,YAAcC,GAEjBvpB,EAAQhE,KAAKiuB,aAAajuB,KAAKutB,QAAS,QAAShe,EAAKge,GACzD9rB,WAAW2H,QAAQ,KAAM,KAC5B,GAAIpF,EAAMyI,OAAS,GAAI,CACrB,IAAM6a,EAAItjB,EAAMklB,UAAU,EAAG,IACvB3B,EAAIvjB,EAAMklB,UAAUllB,EAAMyI,OAAS,GAAIzI,EAAMyI,QACnD,MAAM,GAAN,OAAU6a,EAAV,cAAiBC,GAEnB,OAAOvjB,GAGT0qB,YAnJ8B,SAmJlBpB,EAAcC,GACxB,IAAMhe,EAAG,UAAM+d,EAAN,YAAcC,GACvB,OAAOvtB,KAAKiuB,aAAajuB,KAAKutB,QAAS,OAAQhe,EAAK,OAGtDof,cAxJ8B,SAwJhBrB,EAAcC,EAAiBqB,GAC3C,IAAMrf,EAAG,UAAM+d,EAAN,YAAcC,GACvB,OAAOvtB,KAAKiuB,aAAajuB,KAAKutB,QAAS,SAAUhe,EAAK,GAAOqf,GAG/DC,YA7J8B,SA6JlB9pB,EAAY6nG,GACtB,IAAM5wG,EAAIgE,KAAKiuB,aAAajuB,KAAKwtB,QAAS,OAAQzoB,EAAI,KAAM6nG,GAE5D,OAAU,OAAN5wG,EACI,wEAAN,OACoBA,EADpB,UAGK,IAGT8yB,kBAvK8B,SAuKZ/pB,GAChB,GAAgC,qBAArB/E,KAAKwtB,QAAQzoB,IACoB,qBAAhC/E,KAAKwtB,QAAQzoB,GAAIgqB,WAE3B,IACE,OAAOjf,KAAKC,MAAM/P,KAAKwtB,QAAQzoB,GAAIgqB,YACnC,MAAO7oB,GACP,MAAO,CAAC,KAAM,MAGlB,MAAO,CAAC,KAAM,OAGhB8oB,cApL8B,SAoLhBjqB,EAAYyhB,GACxB,MAAgC,qBAArBxmB,KAAKwtB,QAAQzoB,IAA0D,qBAA5B/E,KAAKwtB,QAAQzoB,GAAIa,OAC9D4gB,EAAMxmB,KAAKwtB,QAAQzoB,GAAIa,OAEzB4gB,GAGTyI,qBA3L8B,SA2LTlqB,EAAYyhB,GAC/B,MAAgC,qBAArBxmB,KAAKwtB,QAAQzoB,IACuB,qBAAnC/E,KAAKwtB,QAAQzoB,GAAIkQ,cAEpBjV,KAAKwtB,QAAQzoB,GAAIkQ,cAEnBuR,IAKXvjB,OAAOwqB,iBAAmBA,GC/O1B,I,MCDYgoF,GDCN/G,GAAQxzB,YAAOxwC,KAAV,2EAAGwwC,CAAH,+DAULw6B,GAAc,CAClBC,IAAK,UACL36B,IAAK,MACL76E,IAAK,MACLT,IAAK,OAIDk2G,GAAsBC,gBAAK,YAAgD,IAZ7D50G,EAYgB8D,EAA4C,EAA5CA,GAAI3E,EAAwC,EAAxCA,MAAOkU,EAAiC,EAAjCA,WAAYwhG,EAAqB,EAArBA,eACnD7kG,EAAYpO,cAAY,SAAC0K,GAAD,OAAsBuzC,aAAgBvzC,EAAO,CAAExI,UAEvE/F,EACsB,kBAAnB82G,EACH7kG,EAAU2a,OAAOkqF,GACjB7kG,EAAUgjF,mBAAmB,GAN2C,EAQlC9D,GAAc,CACxDlvE,WAAY,GACZlV,KAAMkF,EACN7Q,QACAgU,YAAa,KACbC,aAAc,KACdnU,KAAM6E,IANA+rF,EARsE,EAQtEA,kBAAmBL,EARmD,EAQnDA,aASrBslB,EAnBe,SAAA/2G,GAAK,MAAI,UAAG02G,GAAY12G,KAAY,GAmBrCg3G,CAAe1hG,GAEnC,OACE,kBAAC,IAAD,CACEs8F,UAAU,WACVnkE,MAAM,SACN7C,OAAQ,CAAC,EAAG,EAAG,EAAG,QAClBO,cAAY,8BAEX4rE,GACC,kBAAC,IAAD,CAAMnsE,OAAQ,CAAC,EAAG,EAAG,EAAG,GAAI6C,MAAM,YAAYtC,cAAY,mCACvD4rE,GAGJjlB,EAAkB9xF,GA1CP,gBADEiC,EA4CDwvF,GA3Cc,IAAxB,WAAkCxvF,EAAKmI,QAAQ,eAAgB,WAgDlE6sG,GAAa,SAAC,GAAqB,IAAnBlxG,EAAkB,EAAlBA,GAAOhD,EAAW,uBAChCkP,EAAYpO,cAAY,SAAC0K,GAAD,OAAsBuzC,aAAgBvzC,EAAO,CAAExI,UAE7E,OAAKkM,GAAyC,IAA5BA,EAAU2a,OAAOnf,OAC5B,kBAACmpG,GAAD,eAAqB7wG,GAAIA,GAAQhD,IADgB,MAoB3C8zG,mBAhBO,SAAC,GAAuD,IAArD9wG,EAAoD,EAApDA,GAAIoM,EAAgD,EAAhDA,cAAemD,EAAiC,EAAjCA,WAAYwhG,EAAqB,EAArBA,eAC9C11G,EAAmB+Q,EAAnB/Q,MACF4D,EADqBmN,EAAZqc,QACOpkB,QAAQ,WAAY,IACpCmnB,EAAO9C,GAAiBa,SAASnd,GAEvC,OACE,kBAAC,IAAD,CAAM64B,IAAK,EAAGG,cAAY,4BACxB,kBAAC,IAAD,CAAMsC,MAAM,SAASypE,wBAAyB,CAAEC,OAAQ5lF,KACxD,kBAAC,GAAD,CAAOkc,MAAM,SAAStC,cAAY,yBAC/BnmC,GAEH,kBAAC,GAAD,CAAYe,GAAIA,EAAI3E,MAAOA,EAAOkU,WAAYA,EAAYwhG,eAAgBA,QELjED,mBAvED,SAAC,GAAkE,IAAhEO,EAA+D,EAA/DA,WAAYC,EAAmD,EAAnDA,eAAgBtxG,EAAmC,EAAnCA,GAAIkc,EAA+B,EAA/BA,WAAYq1F,EAAmB,EAAnBA,aACrDzzF,EAAQ0zF,qBAAWC,KACnBC,EAAoB9zG,mBAFoD,EAGlC6B,qBAHkC,mBAGvEsxG,EAHuE,KAGvDY,EAHuD,KAIxEC,EAA6B7tE,mBAAQ,kBAAM8tE,aAAS,IAAKF,KAAoB,IAJL,EAK1DlyG,qBAAXqyG,EALqE,oBAO9Erc,2BAAgB,WACdqc,GAAQ,KACP,IAT2E,MAWhB51F,EAAW61F,cACvER,GADMnlG,EAXsE,EAWtEA,cAA2B4lG,EAX2C,EAWvD91F,WAIjB+1F,EAAkBluE,mBACtB,iBAAO,CACL/jC,GAAIoM,EAAcpM,GAElBkP,MAAO,OACPrO,OAAQ,OAERkO,aAAc,YACd0H,mBAAoB,MACpBjB,mBAAoBghE,YAAS,SAATA,CAAmB,CAAE14D,UACzCrI,mBAAoB+gE,YAAS,WAATA,CAAqB,CAAE14D,UAC3CzH,oBAAqB,EACrBa,0BAA0B,EAC1BC,iBAAkB,SAAChX,GAAD,OAAWyxG,EAA0B,OAACzxG,QAAD,IAACA,OAAD,EAACA,EAAOlJ,IAE/D6X,WAAY,OACZD,KAAMqN,EAAWrN,KACjBmB,QAASkM,EAAWlM,QACpBG,WAAY6hG,EAAuB7hG,WACnCZ,WAAYyiG,EAAuBziG,WAEnCC,OAAO,yBACLy7F,eAAgB,CAAC7+F,EAAc8lG,YAAYjH,eAAe,KACzD/uF,EAAWnM,QAAU,CAACshG,IACnBC,GAAc,eAAOp1F,EAAWzM,YAAc,CAAC6hG,QAGvD,CAACllG,EAAe8P,IAGlB,OACE,kBAAC,IAAD,CAAM+oB,IAAK,EAAGD,QAAM,EAACI,cAAY,mBAC/B,yBACEvoC,IAAK60G,EACL9wG,MAAO,CAAEC,OAAQ,OAAQqO,MAAO,QAChCk2B,cAAY,6BAEXssE,EAAkBzzG,SACjB,kBAAC,GAAD,CACEqB,UAAWU,EACXkc,WAAY+1F,EACZ7lG,cAAeA,EACf+hG,WAAYuD,EAAkBzzG,WAIpC,kBAAC,GAAD,CACE+B,GAAIA,EACJuP,WAAY0iG,EAAgB1iG,WAC5BnD,cAAeA,EACf2kG,eAAgBA,QCtElBoB,GAAiBh8B,YAAOnqE,KAAM89F,MAAM,CACxCjlE,OAAQ,CAAC,EAAG,EAAG,EAAG,QAClB6C,MAAO,SACPx4B,MAAO,OACPrO,OAAQ,OACRuxG,UAAW,SACXj2F,KAAM,iBACNgzE,KAAM,SACNlwF,MAAO,aACP,cAAe,kCATG,yEAAGk3E,CAAH,qBA+BLk8B,GAjBF,SAAC,GAAD,IAAG7mF,EAAH,EAAGA,KAAMvsB,EAAT,EAASA,MAAOqzG,EAAhB,EAAgBA,UAAWj1G,EAA3B,EAA2BA,QAA3B,OACX,kBAAC,IAAD,CAAM4nC,IAAK,EAAGulE,WAAW,QAAQplE,cAAY,kBAC3C,kBAAC,IAAD,CAAMl2B,MAAM,OAAOrO,OAAO,OAAOukC,cAAY,uBAC3C,kBAAC,IAAD,CAAMjpB,KAAMqP,EAAMkc,MAAM,SAAS7C,OAAQ,CAAC,EAAG,EAAG,EAAG,GAAI31B,MAAM,OAAOrO,OAAO,UAE7E,kBAAC,IAAD,CAAM6mC,MAAM,SAAStC,cAAY,wBAC9BnmC,GAEFqzG,GACC,kBAAC,IAAD,CAAM5qE,MAAM,SAASmkE,UAAU,YAAYzmE,cAAY,yBACpDktE,GAGJj1G,GAAW,kBAAC80G,GAAD,CAAgB90G,QAASA,MC3BnCk1G,GAAW,SAAC,GAAqB,IAAnBzuE,EAAkB,EAAlBA,KAAM7kC,EAAY,EAAZA,MAAY,EACWu/C,eAAvC9hC,EAD4B,EAC5BA,iBAAkBC,EADU,EACVA,iBAE1B,OACE,kBAAC,GAAD,CACE6O,KAAK,eACLvsB,MAAOA,EACPqzG,UAAS,UAAK51F,EAAiBonB,GAAtB,cAAiCnnB,EAAiBmnB,OAYlD0uE,GAPK,SAAC,GAAD,IAAG9oG,EAAH,EAAGA,OAAQD,EAAX,EAAWA,MAAX,OAClB,kBAAC,GAAD,CAASxK,MAAM,QACb,kBAAC,GAAD,CAAUA,MAAM,OAAO6kC,KAAMr6B,IAC7B,kBAAC,GAAD,CAAUxK,MAAM,KAAK6kC,KAAMp6B,MCOhB+oG,GApBC,SAAC,GAAD,IAAGpB,EAAH,EAAGA,WAAYC,EAAf,EAAeA,eAAgBp1F,EAA/B,EAA+BA,WAAY63E,EAA3C,EAA2CA,UAAW3F,EAAtD,EAAsDA,WAAtD,OACd,kBAAC,IAAD,CAAMnpD,IAAK,EAAGD,QAAM,EAAC91B,MAAM,OAAOk2B,cAAY,qBAC5C,kBAAC,GAAD,CAAa37B,MAAOsqF,EAAWrqF,OAAQ0kF,IACvC,kBAAC,GAAD,CAASnvF,MAAM,UAAUqtG,UAAQ,GAC/B,kBAAC,IAAD,CAAMrnE,IAAK,EAAGD,QAAM,EAACI,cAAY,+BAC9BlpB,EAAW61F,cAAc9oG,KAAI,WAAoBioB,GAApB,IAAG9kB,EAAH,EAAGA,cAAH,OAC5B,kBAAC,GAAD,CACE5B,IAAK4B,EAAcpM,GACnBA,GAAI,CAACqxG,EAAYC,EAAgBp1F,EAAWlc,GAAIoM,EAAcpM,IAAI0/C,KAAK,KACvExjC,WAAYA,EACZq1F,aAAcrgF,EACdmgF,WAAYA,EACZC,eAAgBA,WCTtBoB,GAAgB,SAAC,GAAwD,IAAtDC,EAAqD,EAArDA,QAASC,EAA4C,EAA5CA,MAAOvG,EAAqC,EAArCA,SAAUwG,EAA2B,EAA3BA,YAAgB71G,EAAW,+DACpD81G,GAASH,GAAzB1zG,EADoE,EACpEA,MAAOusB,EAD6D,EAC7DA,KACTunF,EAASH,EAAMtpF,MAAM,EAAG,GACxB0pF,EAAaJ,EAAMlrG,OAAS,EAE5B8rB,EAAOw/E,EAAU,UAAM/zG,EAAN,aAAgB2zG,EAAMlrG,OAAtB,KAAkCzI,EACzD,OACE,kBAAC,GAAD,eAASA,MAAOu0B,EAAM64E,SAAU2G,GAAc3G,GAAcrvG,GACzD+1G,EAAO9pG,KAAI,SAAC6wB,GAAD,OACV,kBAAC,GAAD,CACEtvB,IAAKsvB,EACLtO,KAAMA,EACNvsB,MAAO66B,EACPz8B,QAASw1G,GAAgB,kBAAMA,EAAY/4E,WAsCtCg3E,mBAnBC,SAAC,GAA4C,IAA1CoB,EAAyC,EAAzCA,YAAa7F,EAA4B,EAA5BA,SAAU4G,EAAkB,EAAlBA,YAClCC,EAbY,SAAChB,GACnBA,EAAW,eAAQA,GACnB,IAAMiB,EAAqB3H,GAASvgG,QAAO,SAACC,EAAKynG,GAC/C,OAAMA,KAAWT,UAEVA,EAAYS,GACb,GAAN,mBAAWznG,GAAX,CAAgBynG,KAHsBznG,IAIrC,IAEH,MAAM,GAAN,mBAAWioG,GAAX,YAAkCn3G,OAAOlB,KAAKo3G,KAIlCkB,CAAYlB,GAExB,OACE,kBAAC,IAAD,CAAMjtE,IAAK,EAAGD,QAAM,EAAC91B,MAAM,OAAOk2B,cAAY,qBAC3C8tE,EAAIjqG,KAAI,SAACjJ,EAAIkxB,GAAL,OACP,kBAAC,GAAD,CACE1mB,IAAKxK,EACL2yG,QAAS3yG,EACT4yG,MAAOV,EAAYlyG,GACnBqsG,SAAU,kBAAMA,EAASrsG,IACzBssG,SAAUp7E,IAAUgiF,EAAIxrG,OAAS,EACjCmrG,YAAoB,kBAAP7yG,GAA0BizG,WC7C3CI,GAAel9B,YAAOg2B,YAASxvG,MAASmtG,MAAM,CAClDrjE,QAAS,aACTslE,SAAS,EACTC,UAAW,OACXjvG,UAAW,MACXytG,WAAY,QACZvlE,IAAK,IANW,yEAAGkxC,CAAH,wGAuDHm9B,GAjCF,SAAC,GAA6C,IAA3CX,EAA0C,EAA1CA,QAASC,EAAiC,EAAjCA,MAAOW,EAA0B,EAA1BA,OAAQV,EAAkB,EAAlBA,YAAkB,EAChCC,GAASH,GAAzB1zG,EADgD,EAChDA,MAAOusB,EADyC,EACzCA,KAEf,OACE,kBAAC,IAAD,CAAM3qB,OAAO,OAAOokC,IAAK,EAAGG,cAAY,iBAAiBJ,QAAM,GAC7D,kBAAC,GAAD,KACE,kBAACquE,GAAD,CACE3tE,MAAK,UAAKzmC,EAAL,aAAe2zG,EAAMlrG,OAArB,KACL8jB,KAAK,eACLnuB,QAASk2G,EACTnuE,cAAY,yBAGhB,kBAAC,GAAD,MACA,kBAAC,IAAD,CACEH,IAAK,EACLpF,SAAU,CAAEirE,SAAU,OAAQ0I,WAAY,UAC1CxuE,QAAM,EACNI,cAAY,4BAEXwtE,EAAM3pG,KAAI,SAAC6wB,GAAD,OACT,kBAAC,GAAD,CACEtvB,IAAKsvB,EACLtO,KAAMA,EACNvsB,MAAO66B,EACPz8B,QAASw1G,GAAgB,kBAAMA,EAAY/4E,YC7CjDo8C,GAAY,SAACU,GAAD,OAChB,kBAAC,IAAD,eACEhyC,WAAY,CAAC,cAAe,WAC5BG,QAAS,CAAC,EAAG,GACb71B,MAAM,QACNrO,OAAO,SACH+1E,KAIF68B,GAAgB,SAAC,GAAD,IAAG/tE,EAAH,EAAGA,MAAOzrC,EAAV,EAAUA,MAAO2nG,EAAjB,EAAiBA,SAAU9kG,EAA3B,EAA2BA,SAA3B,OACpB,kBAAC,IAAD,CAAM+D,OAAO,OAAOmkC,QAAM,GACxB,kBAAC,GAAD,KAASU,GACT,kBAAC,GAAD,CAAMzrC,MAAOA,EAAO2nG,SAAUA,EAAU/8D,OAAQ,CAAC,EAAG,EAAG,EAAG,KAC1D,kBAAC,GAAD,MACA,kBAAC,IAAD,CAAMI,IAAK,EAAGpF,SAAU,CAAEirE,SAAU,OAAQ0I,WAAY,UAAY3uE,OAAQ,CAAC,EAAG,EAAG,EAAG,IACnF/nC,KAoDQ42G,GA/CC,SAAC,GASV,IARLz0G,EAQI,EARJA,MACAoyG,EAOI,EAPJA,WACAC,EAMI,EANJA,eACAY,EAKI,EALJA,YACAh2F,EAII,EAJJA,WACAkyE,EAGI,EAHJA,WACA2F,EAEI,EAFJA,UACG/2F,EACC,8GACoByC,mBAAS,WAD7B,mBACGs8B,EADH,KACS43E,EADT,KAGEC,EAAuB,YAAT73E,GAA+B,YAATA,EAElCk3E,EAAgB/2F,EAAhB+2F,YAER,OACE,kBAAC,GAAD,eAAW7tE,cAAY,cAAiBpoC,GACrC42G,GACC,kBAAC,GAAD,CACEjB,QAAS52E,EACT62E,MAAOV,EAAYn2E,GACnB7f,WAAYA,EACZq3F,OAAQ,kBAAMI,EAAQ,YACtBd,YAAsB,kBAAT92E,GAA4Bk3E,KAG3CW,GACA,kBAAC,GAAD,CAAeluE,MAAOzmC,EAAOhF,MAAO8hC,EAAM6lE,SAAU+R,GACxC,YAAT53E,GACC,kBAAC,GAAD,CAASm2E,YAAaA,EAAa7F,SAAUsH,EAASV,YAAaA,IAE3D,YAATl3E,GACC,kBAAC,GAAD,CACEs1E,WAAYA,EACZC,eAAgBA,EAChBp1F,WAAYA,EACZ63E,UAAWA,EACX3F,WAAYA,OC+CXylB,GA5FI,SAAC,GASN,IARZ3nG,EAQW,EARXA,UACAE,EAOW,EAPXA,cACA8P,EAMW,EANXA,WACA63E,EAKW,EALXA,UACA3F,EAIW,EAJXA,WACAH,EAGW,EAHXA,WACAjpF,EAEW,EAFXA,SACAupF,EACW,EADXA,cAEQz+E,EAAiBoM,EAAjBpM,aADG,EAEyCi0B,mBAClD,kBChCW,SAAC73B,EAAW4D,GAAkB,IACnChV,EAAgFoR,EAAhFpR,KAAcg5G,EAAkE5nG,EAA1EsD,OAAqBO,EAAqD7D,EAArD6D,QAASN,EAA4CvD,EAA5CuD,YAAaG,EAA+B1D,EAA/B0D,WAAY6qE,EAAmBvuE,EAAnBuuE,eAC/DR,EAAcn/E,EAAKiV,GACnBgkG,EAAkBj5G,EAAK2U,GAGvBukG,GAFUlkG,GAAgB,YAAIkM,MAAMi+D,EAAYvyE,SAASuB,KAAI,SAACk3C,EAAGjvB,GAAJ,OAAcA,MAEnDjmB,QAAO,SAACC,EAAUgmB,GAC9C,IAAM+iF,EAAah6B,EAAY/oD,GACzB+iF,KAAc/oG,IAClBA,EAAI+oG,GAAc,CAChBzkG,OAAQ,GACR0kG,QAAS,GACThC,YAAa,GACbz3B,eAAgB,KAGpB,IAAM05B,EAAQjpG,EAAI+oG,GAClBE,EAAMD,QAAQnsF,KAAKmJ,GACnBijF,EAAM3kG,OAAOuY,KAAKgsF,EAAgB7iF,IAClCijF,EAAM15B,eAAe1yD,KAAK0yD,EAAevpD,IAEzC,IAAMghF,EAActiG,EAAW3E,QAAO,SAACmpG,EAAW1uE,GAChD,OAAOouE,EAAYpuE,GAAOxU,GAAnB,eACEkjF,EADF,eACc1uE,EAAQouE,EAAYpuE,GAAOxU,KAC5CkjF,IACH,IAEH,OADAD,EAAMjC,YAAYnqF,KAAKmqF,GAChBhnG,IACN,IAEGsE,EAASxT,OAAOlB,KAAKk5G,GAAehsF,MACxC,SAACzF,EAAGC,GAAJ,OAAUwxF,EAAcxxF,GAAG0xF,QAAQxsG,OAASssG,EAAczxF,GAAG2xF,QAAQxsG,UAGjE2sG,EAAY7kG,EAAOvG,KAAI,SAACy8B,GAAD,OAAWsuE,EAActuE,MAEhD4uE,EAAmBD,EAAUprG,KAAI,SAACkrG,GACtC,OAAOvkG,EAAW3E,QAAO,SAACC,EAAKw6B,GAC7B,IAAM6uE,EAAc,IAAIC,IACtBL,EAAMjC,YAAYjnG,QAAO,SAACwpG,EAAgBvC,GACxC,OAAOA,EAAYxsE,GAAZ,sBAAyB+uE,GAAzB,YAA4CvC,EAAYxsE,KAAU+uE,IACxE,KAEL,OAA4B,IAArBF,EAAYv3E,KAAa9xB,EAAzB,eAAoCA,EAApC,eAA0Cw6B,EAAQ1pB,MAAMC,KAAKs4F,OACnE,OAGL,MAAO,CAAE/kG,SAAQxI,KAAMqtG,EAAWnC,YAAaoC,GDfvCI,CAAUxoG,EAAW4D,KAC3B,CAACA,EAAc5D,IAFHyoG,EAFH,EAEH3tG,KAAoBwI,EAFjB,EAEiBA,OAAQ0iG,EAFzB,EAEyBA,YAMlClyG,EAIEkM,EAJFlM,GACUgH,EAGRkF,EAHF2a,OAAU7f,KACV+I,EAEE7D,EAFF6D,QACAN,EACEvD,EADFuD,YAsCImlG,EAAmB7wE,mBAAQ,WAC/B,OAAO4wE,EAAa1rG,KAAI,SAAC4rG,GACvB,MAAO,CACLrlG,OAAQqlG,EAAWrlG,OACnBxI,MACkB,IAAhBinF,GAAqBA,EAAajnF,EAAKU,UAAYumF,KAAcjnF,GAC7D6tG,EAAWp6B,eACXo6B,EAAWX,QAAQjrG,KAAI,SAACioB,GAAD,OAAWlqB,EAAKinF,GAAY/8D,EAAQ,OAAO,SAG3E,CAAClqB,EAAM2tG,EAAc1mB,IAExB,OACE,kBAAC,IAAD,CAAMjpD,QAAM,EAAC91B,MAAM,OAAOrO,OAAO,OAAOokC,IAAK,EAAGF,QAAS,CAAC,EAAG,IAC3D,kBAAC,GAAD,CACE/9B,KAAM4tG,EACNplG,OAAQA,EACR26F,iBApDmB,SAAC,GAAkC,IAAhCF,EAA+B,EAA/BA,WAAY/4E,EAAmB,EAAnBA,MAAOE,EAAY,EAAZA,MACvCsU,EAAQivE,EAAa1K,GAAYz6F,OAAO0hB,GACtCjyB,EAAU6zG,GAASrjG,GAAnBxQ,MAER,OACE,kBAAC,GAAD,CACEmyB,MAAOA,EACPnyB,MAAK,UAAKA,EAAL,aAAeymC,GACpB2rE,WAAY7hG,EAAOy6F,GACnBqH,eAAgB5rE,EAChBwsE,YAAayC,EAAa1K,GAAYiI,YAAYhhF,GAClDhV,WAAYA,EACZkyE,WAAYA,EACZ2F,UAAWA,KAwCXmW,mBAnCqB,SAAC,GAA2B,IAAzBD,EAAwB,EAAxBA,WAAY74E,EAAY,EAAZA,MAClCsU,EAAQl2B,EAAOy6F,GACbhrG,EAAU6zG,GAAS/iG,GAAnB9Q,MAER,OACE,kBAAC,GAAD,CACEmyB,MAAOA,EACPnyB,MAAK,UAAKA,EAAL,aAAeymC,GACpB2rE,WAAY3rE,EACZwsE,YAAaA,EAAYjI,GACzB/tF,WAAYA,EACZkyE,WAAYA,EACZ2F,UAAWA,OAyBb,kBAAC,IAAD,CAAM3uD,cAAY,mBAAmBonE,eAAe,WAClD,kBAAC,GAAD,KAASxsG,GACT,kBAAC,GAAD,CACEoM,cAAeA,EACfmiF,cAAeA,EACfvpF,SAAUA,EACVopF,WAAYA,EACZliF,UAAWA,OEzDR4oG,GAAgB,SAAC,GAyBhB,IAxBZ54F,EAwBW,EAxBXA,WACA7c,EAuBW,EAvBXA,sBACA6M,EAsBW,EAtBXA,UACAE,EAqBW,EArBXA,cACA2C,EAoBW,EApBXA,aACAzG,EAmBW,EAnBXA,OACAhJ,EAkBW,EAlBXA,UACA8sB,EAiBW,EAjBXA,YACA0tE,EAgBW,EAhBXA,WACA7J,EAeW,EAfXA,qBACAtG,EAcW,EAdXA,aACAp9E,EAaW,EAbXA,qBACAw/E,EAYW,EAZXA,kBACA2G,EAWW,EAXXA,cACAzE,EAUW,EAVXA,WACAjpF,EASW,EATXA,SACA2uF,EAQW,EARXA,wBACAC,EAOW,EAPXA,8BACAE,EAMW,EANXA,YACAnB,EAKW,EALXA,UACAxE,EAIW,EAJXA,iBACAzC,EAGW,EAHXA,aACAsS,EAEW,EAFXA,wBACAC,EACW,EADXA,yBAEMp+F,EAAWC,eAEX80F,EAAmB92F,aAAY+2F,MAC/BhB,EAAyB3zF,uBAAY,YAAkC,IAA/BuJ,EAA8B,EAA9BA,MAAOC,EAAuB,EAAvBA,OAAQC,EAAe,EAAfA,SAC3D9J,EAASwC,aAA6B,CAAEoH,QAAOC,SAAQC,cAKrD9J,EADE+0F,EACO3yF,aAA0B,CACjCwH,MAAOu0F,EACPt0F,OAAQu0F,IAGDtwF,aAAyB,CAChClE,MAAOu0F,EACPt0F,OAAQu0F,EACRj+F,GAAIV,OAGP,CAACA,EAAWO,EAAU+0F,EAAkBoJ,EAAyBC,IAE9D5N,EAAgB7H,GAAuBz5E,GACrC65E,EAAcyH,EAAdzH,UACF6K,EAAwB7K,EAAU1sE,GACpChf,KAAW,6BAAD,OACmBgf,EAAW9M,gBAAkB,SADhD,kBAECL,EAFD,6BAIV7R,KACA,gBADU,kBAEC6R,EAFD,WAIR2kF,EAAc,UAAM3kF,EAAN,YAAsBzP,EAAtB,UACdivF,GAAgC,IAAhBN,IAAsBE,EAM5C,OCpGiC,SAAC,GAOA,IANlCjyE,EAMiC,EANjCA,WACAhQ,EAKiC,EALjCA,UACAmkF,EAIiC,EAJjCA,cACApC,EAGiC,EAHjCA,WACAlC,EAEiC,EAFjCA,kBACAwC,EACiC,EADjCA,cAGMwmB,EAA2Bn3G,iBAA+B,IAGhEw7F,cAAS,WAAO,IACNt9E,EAAgBI,EAAhBJ,YAGR,GAAKA,IAAeQ,aAAQR,GAA5B,CAGA,IAAMrT,EAAiByD,EAAU4hF,gBAC3BO,EAAeniF,EAAUoiF,cAC/B7lF,EAAexM,SAAQ,SAAC+M,EAAeE,GACrC,IAAM8rG,EAAgBl5F,EAAY,iBAAD,OAAkB9S,EAAc0Z,iBAC5D5G,EAAY,iBAAD,OAAkBuyE,EAAanlF,GAAGwZ,cAAlC,QAGhBqyF,EAAyB92G,QAAU82G,EAAyB92G,QAAQu9C,OAClEn6C,SAASof,eAAeu0F,WAK9Bh3G,qBAAU,WACR,GAAI+2G,EAAyB92G,QAAQyJ,OAAQ,CAC3C,IAAMutG,EAA0B5kB,EAAcxxF,QAAQqd,GAChDg5F,EAAYD,EAAwB/1E,SAAS,QAGnD,GAAyB,SAArBhzB,EAAUs8B,SAAsBysE,EAAwB/1E,SAAS,cAAe,CAAC,IAC3El4B,EAAUkF,EAA0B2a,OAApC7f,KACFizF,GAA6B,IAAhBhM,EACdjnF,EAAKU,OAAS,EACdumF,EAIC/9D,EAAMlpB,EADKkuG,EAAYjb,EAAcjzF,EAAKU,OAASuyF,EAAa,GAGtE/tF,EAAU4hF,gBAAgB7xF,SAAQ,SAAC+M,EAAemsG,GAChD,IAAMl7G,EAASs0F,IAAkBr+D,EAC7B,GACA67D,EAAkB77D,EAAIilF,EAAiB,IACrChnG,EAAU4mG,EAAyB92G,QAAQk3G,GAC7ChnG,IACFA,EAAQsmB,UAAR,UAAuBx6B,WAK9B,CAACiiB,EAAYhQ,EAAWmkF,EAAepC,EAAYlC,EAAmBwC,IDqCzE6mB,CAAoB,CAClBl5F,aAAYhQ,YAAWmkF,gBAAepC,aAAYlC,oBAAmBwC,kBAGlD,iBAAjBx/E,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZhQ,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChB3kF,aAAcA,EACd+qF,WAAYA,EACZxxF,OAAQA,EACRhJ,UAAWA,EACX2wF,qBAAsBA,EACtB1jF,qBAAsBA,EAGtB/B,IAAKsvF,EACL/N,kBAAmBA,EACnB2G,cAAeA,EACfzE,WAAYA,EACZ0F,wBAAyBA,EACzBE,uBAAwBA,EACxBlB,UAAWA,EACXpE,cAAeA,EACf7C,aAAcA,EACdqI,UAAWiK,EACX5P,WAAY6P,IAKG,UAAjBlvF,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZhQ,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChB3kF,aAAcA,EACdqd,YAAaA,EACb0tE,WAAYA,EACZxxF,OAAQA,EACRhJ,UAAWA,EACX2wF,qBAAsBA,EACtB1jF,qBAAsBA,EACtBw/E,kBAAmBA,EACnB2G,cAAeA,EACfzE,WAAYA,EACZjpF,SAAUA,EACV2uF,wBAAyBA,EACzBE,uBAAwBA,EACxBC,YAAaA,EACbnB,UAAWA,EACXpE,cAAeA,EACf7C,aAAcA,EACdqI,UAAWiK,EACX5P,WAAY6P,IAKG,cAAjBlvF,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZ7c,sBAAuBA,EACvB6M,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChBzD,qBAAsBA,EACtB1jF,qBAAsBA,EACtBmmF,cAAeA,EACfhH,aAAcA,EACdsS,wBAAyBA,EACzBC,yBAA0BA,IAKX,UAAjBlvF,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZ7c,sBAAuBA,EACvB6M,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChBzD,qBAAsBA,EACtBhC,WAAYA,EACZjpF,SAAUA,EACVuH,qBAAsBA,EACtBw/E,kBAAmBA,EACnB2G,cAAeA,EACfC,UAAWA,EACXpE,cAAeA,EACf7C,aAAcA,IAKC,UAAjB38E,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZ7c,sBAAuBA,EACvB6M,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChBhB,cAAeA,IAKA,WAAjB3jF,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZhQ,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChBhB,cAAeA,EACfhH,aAAcA,IAKC,aAAjB38E,EAEA,kBAAC,GAAD,CACEmN,WAAYA,EACZhQ,UAAWA,EACXunF,sBAAuBA,EACvBC,eAAgBA,IAKD,aAAjB3kF,EAEA,kBAAC,GAAD,CACE7C,UAAWA,EACXE,cAAeA,EACf8P,WAAYA,EACZ63E,UAAWiK,EACX5P,WAAY6P,EACZhQ,WAAYA,EACZjpF,SAAUA,EACVupF,cAAeA,IAMnB,kBAAC,GAAD,CACEryE,WAAYA,EACZhQ,UAAWA,EACXE,cAAeA,EACfqnF,sBAAuBA,EACvBC,eAAgBA,EAChB3kF,aAAcA,EACdzG,OAAQA,EACRhJ,UAAWA,EACX2wF,qBAAsBA,EACtBtG,aAAcA,EACdf,UAAWA,EAAU1sE,GACrB3P,qBAAsBA,EACtBmmF,cAAeA,EACfkB,8BAA+BA,EAC/B3F,WAAYA,EACZjpF,SAAUA,EACV2uF,wBAAyBA,EACzBE,uBAAwBA,EACxBC,YAAaA,EACbnB,UAAWA,EACXjH,aAAcA,EACdqI,UAAWiK,EACX5P,WAAY6P,KEjPLoX,GAAQvE,gBACnB,YAkBc,IAjBZ50F,EAiBW,EAjBXA,WACcnN,EAgBH,EAhBXmN,WAAcnN,aACd1P,EAeW,EAfXA,sBACA6M,EAcW,EAdXA,UACAE,EAaW,EAbXA,cACAggB,EAYW,EAZXA,YACA9sB,EAWW,EAXXA,UACAw6F,EAUW,EAVXA,WACAl1F,EASW,EATXA,aACAD,EAQW,EARXA,iBACAglF,EAOW,EAPXA,aACAp9E,EAMW,EANXA,qBACA+oG,EAKW,EALXA,wBACA9oG,EAIW,EAJXA,UACA4D,EAGW,EAHXA,mBACAs/E,EAEW,EAFXA,sBACAvB,EACW,EADXA,iBAEMonB,EAAe/D,qBAAWC,KAC1B+D,EAAqB13G,aAAY23G,MAE/B7sB,EADcJ,GAAuBz5E,GACrC65E,UAJG,EASP1sE,EAHF7gB,aANS,MAMD+Q,EAAc/Q,MANb,EAOTgU,EAEE6M,EAFF7M,YAPS,EASP6M,EADF5M,oBARS,MAQMkmG,EARN,EAaLvlB,EAAuBlsD,mBAC3B,kBACE73B,EAAU4hF,gBAAgB7kF,KAAI,SAAAD,GAAa,OACX,IAA9BoH,EAAmB1I,QAAsB0I,EAAmB8uB,SAASl2B,QAEzE,CAACkD,EAAU4hF,gBAAiB19E,IAGxBslG,EACJ9sB,EAAU1sE,IAAehe,OAAOU,QAAQC,QAAQZ,QAAQ03G,eAEpDC,EACJF,GACAx3G,OAAOU,QAAQC,QAAQZ,QAAQ43G,gBAC9B35F,EAAWtL,kBAER/Q,EAAWC,eACXg2G,EAAoB/xE,mBAAQ,WAKhC,IAAMgyE,EAA6B/5G,OAAOsN,OAAO8C,EAAc+D,YAAYlH,KAAI,SAAAhS,GAAC,OAAIA,EAAEklB,QAChF65F,EAAmC9pG,EAAU4hF,gBAAgB/kF,QACjE,SAAA9R,GAAC,OAAK8+G,EAA2B72E,SAASjoC,MAE5C,OAAO8+G,EAA2Bv6D,OAAOw6D,KACxC,CAAC9pG,EAAU4hF,gBAAiB1hF,EAAc+D,aAC7CnS,qBAAU,WACR6B,EACE8B,aAA0B,CACxB6F,aAAc4E,EAAcqc,QAC5BnpB,YACAgI,gBAAiB4U,EAAW5T,OAC5Bf,sBAAuB2U,EAAWjM,aAClCxH,eAAgBqtG,OAGnB,CACDA,EACA55F,EAAW5T,OACX4T,EAAWjM,aACX7D,EAAcqc,QACdnpB,EACAO,IAzDS,MA4DsEurF,GAAc,CAC7FlvE,aACAlV,KAAMkF,EACN7Q,QACAgU,cACAC,eACAnU,KAAMmE,IANAysF,EA5DG,EA4DHA,kBAAmBM,EA5DhB,EA4DgBA,oCAAqCX,EA5DrD,EA4DqDA,aA5DrD,EAqE+BjsF,mBAAwB,MArEvD,mBAqEJw2G,EArEI,KAqEWC,EArEX,KAuELC,EAAkBr4G,aAAYs4G,MAC9BC,EAAoBn2G,uBACxB,SAAC21F,EAAaygB,GACZ,GAAIH,EAAiB,CACnB,IAAMn9B,EAASs9B,EACX,CAAEh3G,UAAW,KAAM0F,SAAU6wF,GAC7B,CAAEv2F,YAAW0F,SAAU6wF,GAC3Bh2F,EAASmC,aAAyBg3E,SAElCk9B,EAAiBrgB,KAGrB,CAACv2F,EAAWO,EAAUs2G,IAElBI,EAAiBz4G,aAAY04G,MAC7BxxG,EAAWmxG,EAAkBI,EAAiBN,EAG9CliB,EAAYrtD,aAAYl6B,EAAU,IAAMA,EAAU,GAAKN,EAAUzC,MAAQm9B,KACzEwnD,EAAa1nD,aAAYl6B,EAAU,IAAMA,EAAU,GAAKN,EAAUxC,OAASk9B,KAE3Eo3D,EAA0Bt3D,aAAY4uE,EAAwB,IAChEA,EAAwB,GACxBppG,EAAUzC,MAAQm9B,KAChBq3D,EAA2Bv3D,aAAY4uE,EAAwB,IACjEA,EAAwB,GACxBppG,EAAUxC,OAASk9B,KAEjB6vE,EAAevqG,EAAUulF,YAAc7qD,KACvC8vE,EAAcxqG,EAAU6pF,WAAanvD,KAIrC+vE,EAAmB5yE,mBACvB,kBAAMzsC,KAAKgpB,MAAOw5E,EAAa,GAAM1tF,EAAcgb,aAAewf,QAClE,CAACx6B,EAAcgb,aAAc0yE,IAGzBlF,GAAmB92F,aAAY+2F,MAE/B+hB,GAA+BC,cACnC,SAAAC,GACEj3G,EAASoC,aAA0B60G,MAErC,KAGIljB,GAAgC1zF,uBAAY,WAChD02G,GAA6BG,UAC5B,CAACH,KAKEI,GAA8B92G,uBAClC,YAOO,IANLuJ,EAMI,EANJA,MACAC,EAKI,EALJA,OACAwjB,EAII,EAJJA,SAII,IAHJ+pF,8BAGI,SAFJrpG,EAEI,EAFJA,qBACAwmF,EACI,EADJA,8BAEA,KAAI1qF,EAASD,GAAb,CAGA,IAAIytG,EAAcP,EAEZQ,EAAiB7/G,KAAKgpB,MAAM8tE,EAAa2F,GAE3CqjB,EAAc9/G,KAAKgpB,MAAM7W,GACzB4tG,EAAe//G,KAAKgpB,MAAM5W,GACxB4rE,EAAkBppE,EAAUwmE,kBAAoB9rC,KAEtD,GAAIwtD,EAA+B,CACjC,IAAMkjB,EAAQb,EAAenhC,EACvBztE,EAAO6uG,EAAcphC,EAEvB+hC,EAAexvG,IACjBuvG,GAAe1tG,EAAS7B,EACxBwvG,EAAexvG,GAGbuvG,EAAcE,IAChBF,EAAcE,GAUlB,IAAIC,GAHJF,GAAgB/hC,EAAmB+hC,EAAe/hC,IADlD8hC,GAAeA,EAAc9hC,GAOzB6hC,EAAiB7hC,EAAkB4hC,IACrCA,EAAcC,EAAiB7hC,GAMjC,IAAIkiC,GAAa,EACjB,GAAID,EAAiBJ,GAAkBI,EAAiBL,EAAa,CAGnE,IAAM7U,IAFN6U,EAAcP,GAEYY,GAAkB,EAG5CA,GAFAF,GAAgBhV,IAChB+U,GAAe/U,GAEfmV,GAAa,EAGf,IAAMC,EAA8B,EAAlBniC,EACZoiC,EAAWpgH,KAAKC,IAAI8/G,EAAejpB,GAGvC92F,KAAKC,IAAI4/G,EAAiBI,IAAmBE,GAC7CC,GAAYD,GACZD,IAKE5iB,IACFgiB,GAA6B1pF,SAAS,CACpCzjB,MAAO2tG,EACP1tG,OAAQ2tG,EACR1tG,SAAUrK,EACVsO,yBAEEqpG,GACFL,GAA6BG,SAG/Bl3G,EACE8N,aAAyB,CACvBlE,MAAO2tG,EACP1tG,OAAQ2tG,EACRr3G,GAAIV,EACJsO,0BAKF4pG,GAAkC,oBAAbtqF,GACvBA,EAASkqF,EAAaC,OAG1B,CACEnrG,EAAUwmE,kBACVpzE,EACAO,EACA82G,EACA/hB,GACA6hB,EACAC,EACAE,GACA7iB,EACA3F,IAOEupB,GAA0Bz3G,uBAC9B,SAACuJ,EAAeC,GACd,IAAMkuG,EAAWtgH,KAAKqD,IAAI8O,EAAOgtG,GAC3BoB,EAAYvgH,KAAK8D,IAAIsO,EAAQgtG,GACnCM,GAA4B,CAC1BvtG,MAAOmuG,EACPluG,OAAQmuG,EACRjqG,sBAAsB,EACtBqpG,wBAAwB,MAG5B,CAACD,GAA6BP,EAAcC,IAGxCoB,GAAyB53G,uBAC7B,SAACC,GACC,IAAM+7B,GAAQkyD,EAAa2F,GAAahK,GAAkB5pF,GAEpDy3G,EAAW7jB,EAAY73D,EACzB07E,GAAYnB,GACdkB,GAAwBC,EAHRxpB,EAAalyD,KAMjC,CAACy7E,GAAyBlB,EAAc1iB,EAAW3F,IAG/C2pB,GAA0B73G,uBAC9B,SAACC,GACC,IAAM63G,EAAa5pB,EAAa2F,EAC1B73D,EAAO87E,EAAajuB,GAAkB5pF,GACtC03G,EAAYvgH,KAAK8D,IAAIgzF,EAAalyD,EAAMw6E,GAE9CiB,GADiBE,EAAYG,EACKH,KAEpC,CAACF,GAAyBjB,EAAa3iB,EAAW3F,IAG9C6pB,GAA2B/3G,uBAC/B,SAACC,GACC,IAAM+3G,EAA4C,GAA3BnuB,GAAkB5pF,GACzC,GAAKwE,EAUL,GAAIypF,EAAa2F,EAA2C,KAA9B2iB,EAAcD,GAC1CkB,GAAwBlB,EAAcC,OADxC,CAIA,IAAMrU,GAAOjU,EAAa2F,GAAamkB,EAAkB,EAGzDP,GAFiB5jB,EAAYsO,EACXjU,EAAaiU,QAf7BxiG,EACEsC,aAAsB,CACpBsH,MAAOnS,KAAKgpB,MAAM1b,GAAgBszG,EAAiB,SAgB3D,CACEtzG,EACA/E,EACA8E,EACAgzG,GACAlB,EACAC,EACA3iB,EACA3F,IAIE+pB,GAA4Bj4G,uBAChC,SAACC,GACC,IAAM+3G,EAA4C,GAA3BnuB,GAAkB5pF,GACzC,GAAKwE,EAAL,CAQA,IAAM09F,IACFjU,EAAa2F,IAAc,EAAuB,GAAjBmkB,IAAyB9pB,EAAa2F,IAAc,EAGzF4jB,GAFiB5jB,EAAYsO,EACXjU,EAAaiU,QAV7BxiG,EACEsC,aAAsB,CACpBsH,MAAOnS,KAAKgpB,MAAM1b,GAAgBszG,EAAiB,SAW3D,CAACtzG,EAAc/E,EAAU8E,EAAkBgzG,GAAyB5jB,EAAW3F,IAM3EgqB,GAAuBr0E,mBAC3B,kBACEs0E,aAA2B,CACzB7wG,aAAc4E,EAAcqc,QAC5BnpB,YACAgI,gBAAiB4U,EAAW5T,OAC5Bf,sBAAuB2U,EAAWjM,iBAEtC,CAACiM,EAAW5T,OAAQ4T,EAAWjM,aAAc7D,EAAe9M,IAExDgJ,GAASxK,aAAYs6G,IACrB1lB,GAAgB3uD,mBACpB,kBAAM73B,EAAU4hF,gBAAgB7kF,IAAIuU,aAAK86F,KAAIhwG,OAC7C,CAAC4D,EAAW5D,KAGd,IAAKA,GACH,OAAO,+BAGT,IAAMiwG,GAAgBvzG,GAAYA,GAAY+uF,GAAa/uF,GAAYopF,EACjE9Y,GAAkBppE,EAAUwmE,kBAAoB9rC,KAChDqnD,GAAasqB,GACfjhH,KAAKG,OAAQuN,EAAsBkH,EAAUzC,MAAQm9B,MAAgB0uC,KACpE,EAEC91E,GAAiD,WAA9B0c,EAAW9M,eAE9BmgF,GACJ,kBAAC,GAAD,CACEK,mBAAoBkoB,GACpBjoB,oBAAqBkoB,GACrBjoB,qBAAsBmoB,GACtBloB,sBAAuBooB,KAIrB3oB,GAAgBomB,GACpB,kBAAC,KAAD,CACEv2G,sBAAuBA,EACvBC,UAAWA,EACXC,SAAU2c,EAAW3c,SACrBC,iBAAkBA,KAItB,OACE,oCACE,kBAAC,GAAD,CAEEgL,IAAK+qG,EAAap5F,KAClBD,WAAYA,EACZ7c,sBAAuBA,EACvB6M,UAAWA,EACXE,cAAeA,EACf2C,aAAcA,EACdzG,OAAQA,GACRhJ,UAAWA,EACX8sB,YAAaA,EACb0tE,WAAYA,EACZ7J,qBAAsBA,EACtBtG,aAAcA,EACdgK,wBAAyBqjB,GACzBpjB,8BAA+BA,GAC/BrnF,qBAAsBA,EACtBw/E,kBAAmBA,EACnB2G,cAAeA,GACf1tF,SAAUA,EACVipF,WAAYA,GACZ6F,YAAauiB,EACb1jB,UAAW,mCAAEv3F,EAAF,KAAOT,EAAP,YAAgB0xF,EAAoCjxF,EAAKT,IACpEwzF,iBAAkBA,EAClBzC,aAAcA,EACdsS,wBAAyBA,EACzBC,yBAA0BA,IAE3BrV,EAAU1sE,IACT,kBAAC,GAAD,CACEA,WAAYA,EACZ5c,UAAWA,EACX8M,cAAeA,EACf2C,aAAcA,EACdzG,OAAQA,GACRtD,SAAUA,EACVipF,WAAYA,GACZlC,kBAAmBA,EACnB37E,mBAAoBA,EACpBs/E,sBAAuBA,EACvBvB,iBAAkBA,EAClBzC,aAAcA,EACd0C,WAAYA,EACZmB,cAAeA,GACfC,cAAeA,KAGlBkmB,IAAyBl2G,IAAoB+vF,IAC5C/vF,IAAoBgwF,O,8BC1fjBgpB,GAAeriC,IAAOnrD,IAAV,2EAAGmrD,CAAH,6GAIdK,YAAS,CAAC,UAAW,gBAGpBA,YAAS,CAAC,UAAW,iBAIpBiiC,GAAoBtiC,IAAOgX,KAAV,gFAAGhX,CAAH,uBAIjBuiC,GAAUviC,YAAOnqE,KAAV,sEAAGmqE,CAAH,4EAKRK,YAAS,CAAC,UAAW,gBAEnBA,YAAS,CAAC,UAAW,gBCItBmiC,GAAgB,SAAC,GAKhB,IAJZz8F,EAIW,EAJXA,WACAqgE,EAGW,EAHXA,QACAnwE,EAEW,EAFXA,cACAikG,EACW,EADXA,aACW,EACiB5wG,oBAAS,GAD1B,mBACJirG,EADI,KACIkO,EADJ,KAGLC,EAAc,WAClBD,GAAU,IAGZ,OACE,oCACE,kBAAC,GAAD,CACEz8F,KAAK,WACL9e,QAAS,WACPu7G,GAAU,MAGd,kBAAC,KAAD,KACE,kBAAC,KAAD,CAAa5zF,KAAM0lF,EAAQ5lE,QAAS+zE,GAClC,kBAAC,KAAD,KACGxI,EAAapnG,KAAI,gBAAGuiB,EAAH,EAAGA,KAAMka,EAAT,EAASA,MAAOroC,EAAhB,EAAgBA,QAAhB,OAChB,kBAAC,KAAD,CACEmN,IAAKk7B,EACLlS,KACE,kBAAC,GAAD,KACGhI,EACD,kBAAC,GAAD,KACGka,IAIProC,QAAS,WACPA,EAAQ,CAAE6e,aAAY9P,gBAAemwE,YACrCs8B,eC5DZC,GAAkBC,YAAH,sDAaRC,GAAmB7iC,IAAOnrD,IAAV,+EAAGmrD,CAAH,0DAEpB34D,aAAK,OACHA,aAAK,UAIHy7F,GAAS9iC,IAAOnrD,IAAV,qEAAGmrD,CAAH,qGACR34D,aAAK,QACJA,aAAK,QACDg5D,YAAS,UAEcsiC,IAG1BI,GAAU/iC,YAAO8iC,IAAV,sEAAG9iC,CAAH,2CAEH34D,aAAK,iBAGT27F,GAAUhjC,YAAO8iC,IAAV,sEAAG9iC,CAAH,2CAEH34D,aAAK,iBCjCT47F,GAAe,SAAC,GAEf,IADZrqG,EACW,EADXA,aAEMsZ,EAAuB,YAAjBtZ,EAA6B,GAAK,EACxCxD,EAAyB,YAAjBwD,EAA6B,EAAI,EACzCiuB,EAAwB,YAAjBjuB,EAA6B,GAAK,EACzCsqG,EAAgC,YAAjBtqG,EAA6B,EAAI,EACtD,OACE,kBAAC,GAAD,CAAoBsZ,IAAKA,EAAK9c,MAAOA,GACnC,kBAAC,GAAD,CAAUyxB,KAAMA,IAChB,kBAAC,GAAD,CAAWA,KAAMA,EAAMq8E,aAAcA,IACrC,kBAAC,GAAD,CAAWr8E,KAAMA,EAAMq8E,aAAcA,MCd9BC,GAAyBnjC,IAAOnrD,IAAV,qFAAGmrD,CAAH,0ECFA,IC2D7BojC,I,OAA0B,CAC9B,aAAc,QAGV3jC,GAAa,GAaN4jC,GAAkB,SAAC,GASlB,IAAD,UARXt9F,EAQW,EARXA,WACA5c,EAOW,EAPXA,UACAnE,EAMW,EANXA,KACAk1G,EAKW,EALXA,aACAG,EAIW,EAJXA,sBACArC,EAGW,EAHXA,WACAmC,EAEW,EAFXA,8BACAC,EACW,EADXA,mBACW,EAImCr0F,EAAtCrN,YAJG,MAIIgX,IAJJ,EAImB7lB,EAAgBkc,EAAhBlc,GAAIgQ,EAAYkM,EAAZlM,QAC5BnQ,EAAWC,eACX25G,EAA6B11E,kBAAQwY,KAAgC,IANhE,EAOkCz+C,cAAY,SAAC0K,GAAD,OACvDixG,EAA2BjxG,EAAO,CAAE2D,QAASnM,EAAIA,GAAIV,OAD/C8M,EAPG,EAOHA,cAAeQ,EAPZ,EAOYA,kBAGjB8sG,EAAsBlJ,GAAyBpkG,EACrDpO,qBAAU,WACHoO,GAAkBQ,GAAsB4jG,GAC3C3wG,EACE6N,KAAiBvD,QAAQ,CACvBgD,MAAOnN,EACPA,GAAIV,EACJuP,OACAmB,eAIL,CACDhQ,EACAV,EACAO,EACAgP,EACAjC,EACAR,EACAokG,EACAxgG,EACA7U,IAIF,IAAMwJ,EAAmB7G,aAAYu9B,MAC/BhvB,EAAkBvO,cAAY,SAAC0K,GAAD,OAClCo0C,aAAsBp0C,EAAO,CAAExI,GAAIV,OAE/Bq6G,EAAattG,GAAmB1H,EAEhCi1G,IACDj1G,GAAoBA,EAAiBgF,WAAarK,GAAcg8B,QAAQjvB,GACvEuB,GAAiC,OAAV+rG,QAAU,IAAVA,OAAA,EAAAA,EAAY/rG,wBAAwB,EAI3DrB,GAAwBotG,IAAeC,GAAsBhsG,EAE7DtB,EAAkBxO,cAAY,SAAC0K,GAAD,OAClCk0C,aAA2Bl0C,EAAO,CAAExI,GAAIV,OAEpCkN,EAAY1O,cAAY,SAAC0K,GAAD,OAC5Bg0C,aAAqBh0C,EAAO,CAAExI,GAAIV,OAE9B4M,EAAYpO,cAAY,SAAC0K,GAAD,OAAsBuzC,aAAgBvzC,EAAO,CAAExI,GAAIV,OAC3EmN,EAAiB3O,cAAY,SAAC0K,GAAD,OACjCi0C,aAA0Bj0C,EAAO,CAAExI,GAAIV,OAGnC0F,EAAWlH,aAAY04G,MAMvBlhC,EAAkBn6B,aAAK,CAC3B,CAAClnB,eAAS/nB,GAAY,kBAAmD,IAA5CA,EAAwBwmE,oBACrD,CACEz+C,eAASylF,GACT,kBAA4D,IAArDA,EAAsCtyF,eAE/C,CAACg0B,KAAGnnB,aAAO+qB,OANW7D,GAlEb,ECvE6C,SAAC,GAGpD,IAFL0+D,EAEI,EAFJA,eACAC,EACI,EADJA,qBAEM70G,EAAiBnH,aAAYi8G,MAC7BC,EAA6Bl8G,aAAYm8G,MACzC90G,EAAcrH,aAAYo8G,MAE1BC,KAAsBl1G,GAAkB+0G,KAAgC70G,EAL1E,EAOkC1F,oBAAkB,GAPpD,mBAOG26G,EAPH,KAOgBC,EAPhB,OAQ4E56G,oBAAS,GARrF,mBAQG66G,EARH,KAQqCC,EARrC,KAUJv8G,qBAAU,WACJs8G,GAAoCH,IACtCI,GAAoC,GACpCF,GAAe,MAEhB,CAACC,EAAkCC,EAAqCJ,IAG3E,IAAMK,GACHL,GAAqBG,GAAsCliC,GAExDoR,GADAswB,EAYN,OAVAW,cAAY,WACV,GAAIZ,EAAgB,CAClB,IAAKM,EAEH,YADAI,GAAoC,GAGtCF,GAAe,MAGhBG,GACI,CAACJ,EAAaC,GD8GiBK,CAAqB,CACzDb,gBAAiBF,IAAe30G,EAChC80G,qBAAsBxkC,IA5Eb,mBA0EJ8kC,EA1EI,KA0ESC,EA1ET,KA+ELM,EAAsBC,aAAYjB,EAAY16D,KACpDjhD,qBAAU,WACRq8G,GAAe,KACd,CAACM,EAAqBN,IAEzB,IAAMz1G,EAAe9G,aAAYq9B,MAEjCg9D,cAAgB,WACdkiB,GAAe,KACd,CACDn+F,EAAWzS,MACXyS,EAAWxS,OACX9E,EACAsX,EAAW/L,WACX+L,EAAW3M,WACX2M,EAAWnM,UA9FF,MAiG6DmM,EAAhExS,OAAQmxG,OAjGL,MAiGqB38G,OAAOU,QAAQoQ,cAActF,OAjGlD,EAqGLoxG,EAAgB5+F,EAAWzS,OAAS7E,EAEpCyrF,EAAgB7H,GAAuBtsE,EAAWnN,cAChD65E,EAAcyH,EAAdzH,UAGFmyB,GAAqB5M,EAAWnnE,wBAGhC8yD,GAAaihB,GAAmB7rG,OAAS05E,EAAU1sE,GAAc,IAAM,GACvEkQ,GAAc2uF,GAAmBl6G,OAEjCm6G,GAAoB1/E,QAAQx9B,aAAY27E,OACxC8P,GACJzrF,aAAYm9G,OAAwCD,GAChDE,GAA6Bp9G,aAAYq9G,MAEvCC,GAAgBvnC,IAAhBunC,YAEF3mC,GAAoB1wC,mBAAQ,kBAAMq3E,GAAYhoF,WAAU,IAC9D8lE,cAAW,WACTzkB,GAAkBvX,OtFrMS,iCsF2ElB,OAkIyDz9D,oBAAS,GAlIlE,qBAkIJ47G,GAlII,MAkIwBC,GAlIxB,MAmILC,GAAoBF,IAA8B5uG,EACxD6iG,cACE,WACM7iG,GACF6uG,IAA8B,KAGlC,IACA,CAAC7uG,IAEHzO,qBAAU,YACHyO,GAAkB4uG,IACrBC,IAA8B,KAE/B,CAAC7uG,EAAgB4uG,KAKpBr9G,qBAAU,WACR,GAAIo8G,GAAeV,IAAwBjtG,EAAgB,CAEzD,IAEIhD,EACAC,EACA8xG,EAJEC,EAAkBv9G,OAAOU,QAAQC,QAAQ68G,kBAK3CC,EAAmB,EAEvB,GAAIhC,EACF,GAAIC,GAMF,GAFA4B,EAAe,CAHf/xG,EAAQnS,KAAKgpB,MAAMq5F,EAAWlwG,MAAQ,KACtCC,EAASpS,KAAKgpB,MAAMq5F,EAAWjwG,OAAS,MAIpCwxG,GAA4B,CAC9B,IAAMU,EAAmBtkH,KAAKgpB,OAAO5W,EAASD,GAAS,GACvDA,GAASmyG,EACTlyG,GAAUkyG,EACVD,EAAmB,QAGrBlyG,EAAQnS,KAAKgpB,MAAMq5F,EAAWlwG,MAAQ,KACtCC,EAASpS,KAAKgpB,MAAMq5F,EAAWjwG,OAAS,KACxCiyG,EAAmB,OAIrBjyG,EAASmxG,EACTpxG,EAAQqxG,EACRa,EAAmB,EAGrBH,GAAgBA,GAAgB,CAAC/xG,EAAOC,IAAST,KAAI,SAAAhS,GAAC,OAAQ,IAAJA,KAE1D,IAAM4lC,EACJ3gB,EAAWvL,QACXrZ,KAAKgpB,MAAMw5E,GE5Q2C,SAAC,GAExD,IADL59E,EACI,EADJA,WAAYm0E,EACR,EADQA,cAGMwrB,EACd3/F,EADFxL,eAEF,GAAuC,kBAA5BmrG,EACT,OAAOA,EAET,IAAMC,EAAwBzrB,EAAc3/E,eAAewL,GAE3D,OAAO5kB,KAAKqD,IAAL,MAAArD,KAAI,YAAQ,CACjBwkH,EACA59G,OAAOU,QAAQC,QAAQZ,QAAQ89G,kBAC/BhzG,QAAO,SAACy0D,GAAD,MAAsB,kBAAPA,OF8PMw+C,CAAuB,CAAE9/F,aAAYm0E,mBACzD1/E,EAAS8qG,GAAmB5+E,EAAa8+E,EAMzCM,EAJwB//F,EAAW7L,iBAAmBirB,QAAQ12B,GrFvQxC,SAAC,GAKL,IAJ5B6E,EAI2B,EAJ3BA,MACAC,EAG2B,EAH3BA,OACAwyG,EAE2B,EAF3BA,WACAvrG,EAC2B,EAD3BA,OAEMwrG,EAAe7kH,KAAKgpB,OAAM,IAAIV,MAAO06D,UAAY,KACjD8hC,EAAgB3yG,EAAQ,EAAIA,EAAQ0yG,EAAe1yG,EACnD4yG,EAAiB3yG,EAAS,EAAIA,EAASyyG,EAAezyG,EAE5D,GAAI0yG,EAAgBF,EAAY,CAE9B,IACMI,EAAiBD,EAAiBD,EAClCG,EAAiBF,EAFL/kH,KAAKqD,IAAIyhH,EAAeF,GAI1C,OAAO5kH,KAAKgpB,MAAO3P,EAAS4rG,EAAkBD,GAEhD,OAAO,KqF0PCE,CAAmB,CACjB/yG,QACAC,SACAwyG,WAAYxC,EAAoBjoB,YAChC9gF,WAEF,KAEE6oE,EAAQt9D,EAAW1L,QAAUtS,OAAOU,QAAQoQ,cAAcwB,OAChE6pG,GAAe,GACfx6G,EACEqN,KAAgB/C,QAAQ,CAEtB0E,OACA4Z,QAASixF,EAAoBjxF,QAC7Btb,MAAOusG,EAAoB15G,GAC3BwoC,OAAQ6nD,EAAc7nD,OACtB73B,OAAQsrG,GAAmBtrG,EAC3B6oE,QACAjpE,MAAO2L,EAAW3L,OAAS,EAC3B1R,QAASyqF,GAAmBptE,EAAYqtE,IACxC9/E,MAAOA,GAAS,KAChBC,OAAQA,GAAU,KAClByG,WAAY+L,EAAW/L,WACvBX,OAAQ0M,EAAW1M,OACnBC,YAAayM,EAAWzM,YACxBC,sBAAuBwM,EAAWxM,sBAClCH,WAAY2M,EAAW3M,WACvBK,WAAYsM,EAAWtM,WAEvBD,qBACE4pG,GAAwBr9F,EAAWvM,uBACnCuM,EAAWvM,qBACbK,UACAlB,WAAYoN,EAAWpN,WACvBiB,QAASmM,EAAWnM,QAGpBzD,gBAAiB,CAGfiuE,kBAAmB0hC,EAAkBtrG,EAASsrG,OAAkBtgH,EAChE4Q,uBACAC,UAAWgvG,GAEbx7G,GAAIV,EACJm1E,2BAIL,CACDv4D,EACAw9F,EACArpB,EACA/wF,EACAw6F,GACAl1F,EACA/E,EACA+oF,EACA/5E,EACAgsG,EACApuG,EACAmtG,EACArtG,EACAuuG,EACAnB,EACAxL,EACAkM,EACA9wB,GACA2xB,GACAd,EACA3lC,GACAzkE,EACA7U,IAGF2C,aAAYs6F,MAEZ,IAAMqkB,GAA0B,OAAGvgG,QAAH,IAAGA,OAAH,EAAGA,EAAY9L,mBAnRpC,GAoRyC3Q,mBAClDg9G,IAA8B7mC,IArRrB,qBAoRJxlE,GApRI,MAoRgBs/E,GApRhB,MAwRX+F,2BAAgB,WACVgnB,IACF/sB,GAAsB+sB,MAEvB,CAACA,KAEJhnB,2BAAgB,WACd/F,GAAsB+sB,IAA8B7mC,MAEnD,QAAC15D,QAAD,IAACA,OAAD,EAACA,EAAYnM,UAEhB,IAAM2sG,GAA0B34E,mBAC9B,kBACEusE,GACAA,EAA8B,CAC5BC,qBACAr0F,aACA9P,cAAestG,EACfxtG,YACAqwE,QAASv8E,MAEb,CACEuwG,EACAD,EACAp0F,EACAlc,EACA05G,EACAxtG,IAKEy9E,GACyE,KAA7E,UAACz9E,SAAD,yBAAoD2a,cAApD,yBAA4D7f,YAA5D,eAAkEU,SACP,KAA3D,UAACwE,SAAD,yBAAwC2a,cAAxC,eAAgDnf,QAElD,OAAKwE,GAAcwtG,EAgBjB,oCACG/vB,IACC,kBAAC,GAAD,CAAQn/E,IAAG,UAAKm/E,IAAgBA,aAAcA,GAAcD,cAAeykB,IAE7E,kBAAC,GAAD,CACEjyF,WAAYA,EACZ7c,sBAAuB8uG,EACvBjiG,UAAWA,EACXE,cAAestG,EACfp6G,UAAWA,EACX8sB,YAAaA,GACb0tE,WAAYA,GACZl1F,aAAcA,EACdD,iBAAkBA,EAClBglF,aAAcA,GACdp9E,qBAAsBD,EAAgBC,qBAEtC+oG,wBAAyBhpG,EAAgBE,UAEzCA,UAAWA,EACX4D,mBAAoBA,GACpBs/E,sBAAuBA,GACvBvB,kBAAmBwrB,IAEpB4B,IAAqB,kBAAC,GAAD,CAAcxsG,aAAcmN,EAAWnN,eAC5DshG,GAAgBA,EAAa3oG,OAAS,GACrC,kBAAC,GAAD,KACE,kBAAC,GAAD,CACE2oG,aAAcA,EACd9zB,QAASv8E,EACTkc,WAAYA,EACZ9P,cAAestG,KAIpBgD,IAjDD,oCACE,kBAAC,GAAD,CAGElyG,IAAG,UAAKm/E,IACRA,aAAcA,GACdD,cAAeykB,IAEhBoN,IAAqB,kBAAC,GAAD,CAAcxsG,aAAcmN,EAAWnN,iB,UGtZ/D4tG,GAAwB,SAACn0G,GAAD,OAAsBA,EAAM3G,MAE7C+6G,GAAuBhhE,aAClC+gE,GACAn/F,aAAK,mBAGMq/F,GAAwBjhE,aACnC+gE,GACAlvG,aAAK,CAAC,iBAAkB,wBCEpBqvG,GAAiB,SAAC,GAGI,IAF1B5gG,EAEyB,EAFzBA,WACA5c,EACyB,EADzBA,UAEMuP,EAAOqN,EAAWrN,MAAQgX,IACxButD,EAAuBt1E,aAAY++G,IAAnCzpC,mBACFoG,EAAQt9D,EAAW1L,QAAUtS,OAAOU,QAAQoQ,cAAcwB,OACxDzB,EAAiBmN,EAAjBnN,aACFshF,EAAgB7H,GAAuBz5E,GAEvCpK,EAAmB7G,aAAYu9B,MAC/B5xB,EAAS9E,EAAmC8E,MAAQm9B,KACpDl9B,EAAU/E,EAAmC+E,OAASk9B,KAEtD/mC,EAAWC,eAwBjB,OAvBA9B,qBAAU,WACR6B,EAAS0N,KAA2BpD,QAAQ,CAE1C0E,OACA4Z,QAASvM,EAAWlc,GACpBmN,MAAO+O,EAAWlc,GAClBwoC,OAAQ6nD,EAAc7nD,OACtB73B,OAAQyiE,EACRoG,QACAjpE,MAAO2L,EAAW3L,OAAS,EAE3B1R,QAASyqF,GAAmBptE,GAAY,GACxCzS,MAAOA,GAAS,KAChBC,OAAQA,GAAU,KAClByG,WAAY+L,EAAW/L,WACvBZ,WAAY2M,EAAW3M,WACvBS,QAASkM,EAAWlM,QACpBjB,eACA/O,GAAIV,EACJyQ,QAASmM,EAAWnM,cAIjB,MAQIgtG,GAA0B,SAAC,GAGH,IAFnC7gG,EAEkC,EAFlCA,WACA5c,EACkC,EADlCA,UAGA,OADuBxB,aAAY8+G,IAI5B,kBAAC,GAAD,CAAgB1gG,WAAYA,EAAY5c,UAAWA,IAFjD,MC5DE09G,GAAUlM,gBAAK,WAC1B,IAAMmM,EAHoBjhG,MAAMC,KAAK5a,SAASwuG,iBAAiB,mBAI/D,OACE,oCACGoN,EAAMh0G,KAAI,SAACwF,EAAMyiB,GAChB,IAAMgsF,EAAmBthG,aAAcnN,GACjCtC,EAAO,UAAM+wG,EAAiBl9G,GAAvB,YAA6BkxB,GAC1C,OACEisF,uBACE,oCACE,kBAAC,GAAD,CACEjhG,WAAYghG,EACZ59G,UAAW6M,EACXgiG,WAAa1/F,GAEb,kBAAC,GAAD,CACEyN,WAAYghG,EAEZ59G,UAAW6M,EACXgiG,WAAa1/F,KAGjB,kBAAC,GAAD,CACEyN,WAAYghG,EACZ59G,UAAW6M,KAGfsC,UCjCC2uG,GAAU,SACrBj6F,GAGI,IAFJk6F,IAEG,yDADHC,EACG,yCACiC79G,oBAAS,GAD1C,mBACIqH,EADJ,KACgBy2G,EADhB,OAE2B99G,oBAAS,GAFpC,mBAEI+9G,EAFJ,KAEaC,EAFb,OAGqBh+G,mBAAmB,MAHxC,mBAGIuH,EAHJ,KAGU02G,EAHV,KA4BH,OAxBA1/G,qBAAU,WACR,GAAIq/G,GAAkBl6F,EAAK,CACzB,IAAMtkB,EAAUy+G,EACZ,CAAEt3F,QAAS,KAAME,iBAAiB,GAClC,GAEJq3F,GAAc,GACd3pC,EAAc14E,IAAIioB,EAAKtkB,GACpBwuB,MAAK,SAACqV,GACDA,EAAE17B,OACJ02G,EAAQh7E,EAAE17B,MACVy2G,GAAW,GACXF,GAAc,OAGjBpwF,OAAM,SAAC7L,GAEN9kB,QAAQoN,KAAR,yBAA+BuZ,GAAO7B,GACtCm8F,GAAW,GACXF,GAAc,SAGnB,CAACD,EAAYD,EAAgBl6F,IAEzB,CAACnc,EAAMF,EAAY02G,ICjCtBG,GAAYt8G,SAASof,eAAe,cAK7Bm9F,GAAc,SAAC,GAAyB,IAAvB9gH,EAAsB,EAAtBA,SACtBqR,EAAUvQ,iBAAOyD,SAASkiB,cAAc,QAS9C,OARAvlB,qBAAU,WAER,OADA2/G,GAAUj6F,YAAYvV,EAAQlQ,SACvB,WAEL0/G,GAAU/5F,YAAYzV,EAAQlQ,YAE/B,IAEIk/G,uBAAargH,EAAUqR,EAAQlQ,UCL3B4/G,I,OAAa,WACxB,IAAMC,EAAoBlgH,iBAAuB,MAGjDI,qBAAU,WAEJ8/G,EAAkB7/G,SACHC,OAAOC,EAAE2/G,EAAkB7/G,SACnCgnB,MAAM,WAInB,IAAM84F,EAAiBjgH,aAAYq/B,MAC7B6gF,EAAwBlgH,aAAY++C,MACpCohE,EAAyBngH,aAAYk/C,MAErCnU,EAAgC,IAAnBk1E,EACf,EACCC,EAAwBD,EAAkB,IAE/C//G,qBAAU,WACW,MAAf6qC,GACFllB,YAAW,WAETzlB,OAAOC,EAAE2/G,EAAkB7/G,SAASgnB,MAAM,QAC1C/mB,OAAOggH,QACPhgH,OAAO0qG,UA5BqD,OA+B/D,CAAC//D,IAGJ,IAAMs1E,EAAkBF,GAAsB,UACtC3mH,KAAKgpB,MAAMuoB,GAD2B,cACVo1E,GAGpC,OACE,kBAAC,GAAD,KACE,yBACEphH,IAAKihH,EACL/gH,UAAU,aACViD,GAAG,aACHovF,UAAW,EACXD,KAAK,SACLivB,kBAAgB,kBAChBC,gBAAc,QACdC,gBAAc,UAEd,yBAAKvhH,UAAU,wBAAwBoyF,KAAK,YAC1C,yBAAKpyF,UAAU,iBACb,yBAAKA,UAAU,gBACb,4BACEE,KAAK,SACLF,UAAWG,KACT,QACA,CAAE,uCArDQ,IAuDZqhH,eAAa,QACbC,aAAW,SAEX,0BAAMC,cAAY,QAAlB,SAEF,wBAAI1hH,UAAU,cAAciD,GAAG,mBAA/B,wCAIF,yBAAKjD,UAAU,cAAf,8EAEE,yBACEA,UAAU,mCACV6D,MAAO,CAAEC,OAAQ,QAEjB,yBACEb,GAAG,wBACHjD,UAAU,iCACVoyF,KAAK,cACLuvB,gBAAe71E,EACf81E,gBAAe,EACfC,gBAAe,IACfh+G,MAAO,CACLssG,SAAU,MACVh+F,MAAM,GAAD,OAAK25B,EAAL,OAGP,0BACE7oC,GAAG,4BACHY,MAAO,CACLqhG,YAAa,GACbnY,WAAY,EACZniD,SAAU,QACVu+B,UAAW,OACXh3D,MAAO,OACP4wB,SAAU,WACVpK,QAAS,QACTgS,MAAO,UAGRy2E,KA/BT,yEAqCA,yBAAKphH,UAAU,uBCjHd8hH,GAAuB1oC,IAAOnrD,IAAV,kFAAGmrD,CAAH,6EAEpBC,YAAU,GACPI,YAAS,mBAGNJ,YAAU,IAGhB0oC,GAAW3oC,IAAOnrD,IAAV,sEAAGmrD,CAAH,iDAKR4oC,GAAa5oC,IAAOnrD,IAAV,wEAAGmrD,CAAH,MAIV6oC,GAAiB7oC,IAAOnrD,IAAV,4EAAGmrD,CAAH,MAId8oC,GAAkB9oC,IAAO5zD,EAAV,6EAAG4zD,CAAH,0BAEfK,YAAS,SAIT0oC,GAAqB/oC,YAAO8oC,IAAV,gFAAG9oC,CAAH,MAElBgpC,GAAahpC,YAAO8oC,IAAV,wEAAG9oC,CAAH,qBAIVipC,GAAcjpC,YAAO8oC,IAAV,yEAAG9oC,CAAH,qBAIXkpC,GAAelpC,YAAO8oC,IAAV,0EAAG9oC,CAAH,qBAIZw1B,GAAYx1B,IAAOnrD,IAAV,uEAAGmrD,CAAH,6CACNC,YAAU,GACAI,YAAS,cAGtB8oC,GAAYnpC,IAAOnrD,IAAV,wEAAGmrD,CAAH,kFACNC,YAAU,IAMbmpC,GAAgBppC,IAAOgX,KAAV,4EAAGhX,CAAH,qBCrDbqpC,GAAqB,kBAChC,kBAAC,GAAD,KACE,kBAAC,GAAD,KACE,kBAAC,GAAD,KACE,kBAAC,GAAD,6BAGA,kBAAC,GAAD,CAAsBp7E,KAAK,sCAAsChV,OAAO,UAAxE,oBAIF,kBAAC,GAAD,CAAcgV,KAAK,sCAAsChV,OAAO,UAC9D,uBAAGryB,UAAU,oBAGjB,kBAAC,GAAD,MACA,kBAAC,GAAD,KACE,kBAAC,GAAD,4BAGA,kBAAC,GAAD,CAAeqnC,KAAK,oCAAoChV,OAAO,UAC7D,uBAAGryB,UAAU,oBAEf,kBAAC,GAAD,CAAgBqnC,KAAK,yCAAyChV,OAAO,UACnE,uBAAGryB,UAAU,wBCtBR0iH,GAA2B,SAAC,GAE3B,IADZ3iH,EACW,EADXA,SAEMqR,EAAUvQ,iBAAOyD,SAASw9B,cAAc,kCAC9C,OAAOs+E,uBAAargH,EAAUqR,EAAQlQ,UCS3ByhH,I,OAAyBvpC,aAVR,SAAC,GAAD,IAC5Bp5E,EAD4B,EAC5BA,UACGC,EAFyB,qCAI5B,yBAAKD,UAAWA,GAEd,kBAAC,IAAD,iBAAoBC,EAApB,CAA0B2iH,aAAa,SAIR,kGAAGxpC,CAAH,4dbnBA,ea0BtBK,YAAS,CAAC,UAAW,gBAOhBA,YAAS,CAAC,MAAO,aACXA,YAAS,SAKfA,YAAS,CAAC,QAAS,YACbA,YAAS,WASTA,YAAS,WAGTA,YAAS,WCpClB67B,GAbF,SAAC,GAAD,IAAG7mF,EAAH,EAAGA,KAAM1uB,EAAT,EAASA,SAAU8iH,EAAnB,EAAmBA,UAAnB,OACX,kBAAC,IAAD,CACE36E,IAAK,EACLstD,OAAQqtB,GAAa,CAAErT,KAAM,QAAS7kE,MAAO,aAC7C8iE,WAAW,SACXzlE,QAAS,CAAC,EAAG,EAAG,EAAG,GACnBlkC,OAAO,UAEJ2qB,GAAQ,kBAAC,IAAD,CAAMrP,KAAMqP,EAAMkc,MAAM,SAAS7mC,OAAO,SAClD/D,ICPC+iH,GAAmB,SAAAr3G,GACvB,IAAMhC,EAAWgC,EAAMs3G,OAAOt5G,SACxBQ,EAAOwB,EAAMs3G,OAAOj5G,eAAeG,KAEzC,OAAKR,GAAaQ,EACXR,EAAWA,EAASb,SAAWqB,EAAKrB,SADZ,IAgBlBu4D,GAZF,WACX,IAAMv4D,EAAW7H,aAAY+hH,IAE7B,OACE,kBAAC,GAAD,CAAMr0F,KAAK,eACT,kBAAC,IAAD,CAAM4Z,cAAA,0BAAgCz/B,GAAY+hC,MAAM,SAASzD,QAAM,EAAC87E,UAAQ,GAC7Ep6G,KChBHq6G,GAAoB,CAAC,UAAW,SAcvBC,GAZO,SAAC,GAAD,IAAGnjH,EAAH,EAAGA,SAAUojH,EAAb,EAAaA,QAAb,OACpB,kBAAC,IAAD,eACEn7E,QAAS,CAAC,IAAK,GACfF,OAAQ,CAAC,GACTD,WAAYo7E,GACZ1/F,MAAO,IACD4/F,GAAW,CAAEhxG,MAAO,CAAEvU,IAAK,WAEjC,kBAAC,IAAD,CAAW+sC,MAAM,UAAU5qC,KCFhBqjH,GARI,SAACjhH,EAAD,GAA2B,IAAfghH,EAAc,EAAdA,QACvBE,EAAiC,oBAAZlhH,EAAyBA,IAAYA,EAChE,MAAuB,kBAAZA,GAAwBghH,EAC1B,kBAAC,GAAD,CAAeA,QAASA,GAAUE,GAEpCA,GCKMC,GATC,SAAC,GAA6C,IAA3CvjH,EAA0C,EAA1CA,SAAUoC,EAAgC,EAAhCA,QAASghH,EAAuB,EAAvBA,QAAYljH,EAAW,iDACrDsjH,EAAoBpgH,uBAAY,kBAAMigH,GAAWjhH,EAAS,CAAEghH,cAAY,CAAChhH,EAASghH,IACxF,OACE,kBAAC,IAAD,eAAazV,OAAK,EAACnsG,WAAS,EAACY,QAASohH,GAAuBtjH,GAC1DF,ICsCQyjH,GAxCC,WACd,IAAM1gH,EAAWC,eACXzC,EAAU6C,uBAAY,kBAAML,EAAS2D,kBAAyB,CAAC3D,IACrE,OACE,kBAAC,IAAD,CAAMolC,IAAK,EAAGG,cAAY,yBACxB,kBAAC,GAAD,CAASlmC,QAAQ,4BAA4BkyB,MAAM,SAASq5E,OAAK,GAC/D,kBAAC,IAAD,CACEhkE,QAAQ,aACRslE,SAAO,EACPC,UAAU,OACVwU,cAAY,QACZC,cAAY,qBACZj1F,KAAK,cAGT,kBAAC,GAAD,CAAStsB,QAAQ,4BAA4BkyB,MAAM,SAASq5E,OAAK,GAC/D,kBAAC,IAAD,CACEptG,QAASA,EACTopC,QAAQ,aACRslE,SAAO,EACPC,UAAU,OACVwU,cAAY,QACZC,cAAY,qBACZj1F,KAAK,YAGT,kBAAC,GAAD,CAAStsB,QAAQ,sBAAsBkyB,MAAM,SAASq5E,OAAK,GACzD,kBAAC,IAAD,CACEhkE,QAAQ,aACRslE,SAAO,EACPC,UAAU,OACVwU,cAAY,QACZC,cAAY,uBACZj1F,KAAK,aC4CAk1F,GApCQ,SAAC,GAAwC,IAnChC15G,EAmCN25G,EAAqC,EAArCA,eAClBC,EAA4C,WADW,EAArBC,eAAqB,EAErCzD,GA3CS,+DA2C2BwD,GAAwB,GAA7EE,EAFsD,sBAIhC1D,GA3C7B,kFA2CsEwD,GAA/DG,EAJsD,sBAKjC3D,GAAO,OAAC2D,QAAD,IAACA,OAAD,EAACA,EAAoBC,UAAW1lF,QAAQylF,IAApEE,EALsD,oBAOvDC,EAAgBN,EA1CY,QAAJ55G,EA2CF85G,SA3CM,IAAI95G,OAAJ,EAAIA,EAAMquB,SAAShxB,QAAQ,sBAAuB,IA4ChF48G,EA9C8B,SAAAj6G,GAAI,OAAIA,EAAK3C,QAAQ,sBAAuB,IA+C1E88G,CAA4BF,GAC5B,KAEJ,IAAKC,EACH,OAAO,KAET,IAAME,GAjDc,SAACvsF,EAAIC,GACzB,GAAID,IAAOC,EACT,OAAO,EAET,IAAIC,EAAKF,EAAG7sB,MAAM,KACdgtB,EAAKF,EAAG9sB,MAAM,KAEditB,EAAKzV,SAASuV,EAAG,GAAG5Q,UAAU,EAAG,GAAI,IACrC+Q,EAAK1V,SAASwV,EAAG,GAAG7Q,UAAU,EAAG,GAAI,IACzC,QAAI8Q,EAAKC,KACLD,EAAKC,MAGTD,EAAKzV,SAASuV,EAAG,GAAI,MACrBG,EAAK1V,SAASwV,EAAG,GAAI,QAEjBC,EAAKC,IAGTH,EAAKA,EAAG,GAAG/sB,MAAM,KACjBgtB,EAAKA,EAAG,GAAGhtB,MAAM,QAEjBitB,EAAKzV,SAASuV,EAAG,GAAI,MACrBG,EAAK1V,SAASwV,EAAG,GAAI,QAEjBC,EAAKC,MAETD,EAAKF,EAAGrtB,OAAS,EAAI8X,SAASuV,EAAG,GAAI,IAAM,IAC3CG,EAAKF,EAAGttB,OAAS,EAAI8X,SAASwV,EAAG,GAAI,IAAM,QAqBZJ,CAAc+rF,EAAgBO,GAE7D,OACE,kBAAC,GAAD,CAAShiH,QAASkiH,EAAwB,aAAe,gBAAiBhwF,MAAM,SAASq5E,OAAK,GAC5F,kBAAC,IAAD,CACErlE,cAAY,gCACZqB,QAAQ,aACRulE,UAAU,OACVqV,OAAK,EACLtV,SAAUqV,EACVpgC,QAASogC,EACTjlG,KAAMilG,EAAwB,iBAAmB,SACjD51F,KAAM41F,EAAwB,iBAAmB,SACjDZ,cAAY,QACZC,cAAY,mBCzEda,GAAkB,SAAA94G,GAAU,IACxBxB,EAASwB,EAAMs3G,OAAOj5G,eAAtBG,KAER,OAAKA,EAGE,CACLggB,QAFmDhgB,EAA7CggB,QAGN65F,eAHmD75G,EAApCigB,iBAFC,MAgBLs6F,GAPC,WACd,IAAMv6G,EAAOlJ,aAAYwjH,IACzB,OACEt6G,GAAQ,kBAAC,GAAD,CAAgB25G,eAAgB35G,EAAKggB,QAAS65F,eAAgB75G,EAAK65G,kBCDhEzvB,GATG,WAA2B,IAA1BowB,EAAyB,0DACf/hH,qBAAW+hH,GADI,mBACnCvnH,EADmC,KAC5BwnH,EAD4B,KAEpCC,EAASxhH,uBAAY,kBAAMuhH,GAAU,SAAAjnE,GAAQ,OAAKA,OAAW,IAC7DmnE,EAAWzhH,uBAAY,kBAAMuhH,GAAU,KAAO,IAC9CG,EAAY1hH,uBAAY,kBAAMuhH,GAAU,KAAQ,IAEtD,MAAO,CAACxnH,EAAOynH,EAAQC,EAAUC,ICL7BC,GAAsB,SAACr3G,GAAD,MAAM4D,EAAN,uDAAqB,GAArB,iBAC1BrD,KAAKC,MAAMhK,aAAaqgB,QAAQ7W,WADN,QACe4D,GAE5B0zG,GAXS,SAACt3G,EAAK4D,GAAkB,IAAD,EACnB3O,oBAAS,kBAAMoiH,GAAoBr3G,EAAK4D,MADrB,mBACtCnU,EADsC,KAC/B8nH,EAD+B,KAK7C,OAFA/jH,qBAAU,kBAAMgD,aAAaC,QAAQuJ,EAAKO,KAAKiZ,UAAU/pB,MAAS,CAACuQ,EAAKvQ,IAEjE,CAACA,EAAO8nH,I,UCHJC,GAAgB7rC,IAAOnrD,IAAV,4EAAGmrD,CAAH,kFAUb8rC,IAFa9rC,YAAO+rC,MAAV,yEAAG/rC,CAAH,MAEUA,IAAOnrD,IAAV,gFAAGmrD,CAAH,oJAEnBK,YAAS,UAODJ,YAAU,GAAQA,YAAU,GAE/BA,YAAU,KAKb+rC,GAAgBhsC,IAAOnrD,IAAV,4EAAGmrD,CAAH,mBACbC,YAAU,IAGVgsC,GAAajsC,YAAOnqE,KAAV,yEAAGmqE,CAAH,+EAGN,qBAAGksC,SAA2B,QAAU,SAC/C7rC,YAAS,WCjCN8rC,GAAYnsC,IAAOnrD,IAAV,oEAAGmrD,CAAH,yKAGPC,YAAU,KACTA,YAAU,IAGJI,YAAS,kBACpBA,YAAS,QzBAS,IyBKhB+rC,GAAmBpsC,IAAOgX,KAAV,2EAAGhX,CAAH,sJACbC,YAAU,GAGdA,YAAU,GAEJA,YAAU,GAGPA,YAAU,GAGlBI,YAAS,aAGTgsC,GAAyBrsC,IAAOgX,KAAV,iFAAGhX,CAAH,gGACvBC,YAAU,GAAMA,YAAU,IAC3B,gBAAG4Y,EAAH,EAAGA,WAAYlxE,EAAf,EAAeA,MAAf,OAA2B04D,YAASwY,EAAa,UAAY,OAAlCxY,CAA0C,CAAE14D,YAMrE04D,YAAS,aAITisC,GAAiBtsC,aCxCN,SAAC,GASlB,IAAD,EARJl3E,EAQI,EARJA,MACAnC,EAOI,EAPJA,SACAC,EAMI,EANJA,UACA2lH,EAKI,EALJA,YAKI,IAJJhY,cAII,SAHJiY,EAGI,EAHJA,aAGI,IAFJC,oBAEI,MAFW,cAEX,EADJC,EACI,EADJA,aAEMhmH,EAAMe,mBAUZ,OACE,kBAACqkH,GAAD,CAAmBllH,UAAWA,GAC5B,kBAAC,KAAD,KACE,kBAAC,KAAD,CAAaF,IAAKA,EAAKmoB,KAAM0lF,EAAQ5lE,QAPvB,WAClB69E,GAAa,IAMkDC,aAAcA,GAClD,oBAAb9lH,EACN4tG,GACE,kBAACyX,GAAD,KACE,kBAAC,KAAD,KAAOrlH,EAAS,CAAEgmH,UAAS,UAAEjmH,EAAIoB,eAAN,aAAE,EAAamvG,KAAKvwG,IAAI+D,MAAMkiH,cAI7D,kBAACX,GAAD,KACE,kBAAC,KAAD,KAAOrlH,KAIb,kBAACklH,GAAD,CAAe3kH,QAxBG,WACtBslH,GAAcjY,KAwBPzrG,GAAUyjH,GAAeA,IACzBG,EACCA,IAEA,kBAACT,GAAD,CAAYjmG,KAAK,gBAAgBkmG,SAAU/mF,QAAQonF,WDDpC,yEAAGvsC,CAAH,gaAOLK,YAAS,UAMpBA,YAAS,QzB9CgB,eyBkDhBJ,YAAU,GACVI,YAAS,mBAWhBusC,GAAe5sC,YAAOnqE,KAAV,uEAAGmqE,CAAH,qCACfK,YAAS,SAKNwsC,GAAc7sC,IAAO8sC,MAAV,sEAAG9sC,CAAH,gNACFK,YAAS,UAEfA,YAAS,kBAUDA,YAAS,YAGpB0sC,GAAa/sC,YAAOgtC,KAAMrZ,MAAM,CAC3CllE,WAAY,iBACZtkB,MAAO,EACPukB,OAAQ,CAAC,EAAG,EAAG,GACf0tD,OAAQ,CAAEga,KAAM,MAAO7kE,MAAO,qBAC9BppC,WAAW,IALU,qEAAG63E,CAAH,8CASVitC,GAAWjtC,IAAOktC,GAAV,mEAAGltC,CAAH,iEAGMK,YAAS,oBEvFrBs6B,mBAjBI,SAAC,GAAsE,IAApE72G,EAAmE,EAAnEA,MAAOqpH,EAA4D,EAA5DA,OAAQC,EAAoD,EAApDA,WAAYv0B,EAAwC,EAAxCA,WAAYw0B,EAA4B,EAA5BA,aAAcC,EAAc,EAAdA,QACnEpmH,EAAU6C,uBACd,kBAAMsjH,EAAavpH,EAAOspH,KAC1B,CAACtpH,EAAOspH,EAAYC,IAEtB,OACE,kBAACjB,GAAD,CACE/3G,IAAKvQ,EACLoD,QAASA,EACTkoC,UAAA,6CAA+Ck+E,EAA/C,cAA4DxpH,GAC5DmrC,cAAY,oBAEZ,kBAAC,IAAD,CAAMsC,MAAOsnD,EAAa,UAAY,QAASs0B,O,sECRxCI,GAAkB,CAAC,UAAW,QAAS,OAAQ,UAEtDC,GAAqB,CACzBhsH,QATa,GAUbD,MATWksH,KAUXpsH,KATUqsH,MAUVC,OATY,QAYDC,GAAsB,SAACt6G,EAAO85G,GAAR,OACjCjsH,KAAKgpB,MAAM7W,EAAQk6G,GAAmBJ,KAE3BS,GAAmB,SAACC,EAAgBV,GAC/C,IAAMW,EAAchzE,aAAI,IAAItxB,KAAK,GAAV,eACpB2jG,EAAaU,IAEhB,OAAQE,aAAYD,IAGhBE,GAAkB,CACtBC,UAAW,QACXC,QAAS,UAKEC,GAAc,CACzB,CAAEjB,OAAQ,iBAAkBrpH,OAAO,IAAaspH,WAAY,WAC5D,CAAED,OAAQ,kBAAmBrpH,OAAO,IAAcspH,WAAY,WAC9D,CAAED,OAAQ,kBAAmBrpH,OAAO,KAAcspH,WAAY,WAC9D,CAAED,OAAQ,eAAgBrpH,OAAO,KAAWspH,WAAY,SACxD,CAAED,OAAQ,eAAgBrpH,OAAO,MAAWspH,WAAY,SACxD,CAAED,OAAQ,gBAAiBrpH,OAAO,MAAYspH,WAAY,SAC1D,CAAED,OAAQ,WAAYrpH,OArCZ4pH,MAqCyBN,WAAY,QAC/C,CAAED,OAAQ,cAAerpH,OAAO,OAAUspH,WAAY,QACtD,CAAED,OAAQ,cAAerpH,OAAO,OAAUspH,WAAY,SAuB3CiB,GAAoB,SAAC1gF,EAAM55B,GACtC,IAAMu6G,EAAgBC,aAAQ5gF,EAAM,qBAChCA,EACA94B,aAAM84B,EAAM,oBAAqBlkB,KAAKC,OAC1C,OAAO7U,aAAM,GAAD,OAAIy5G,EAAJ,YAfc,SAAAv8F,GAC1B,IAAKA,EAAQ,MAAO,SACpB,IAAMy8F,EAAcz8F,EAAOxrB,WAAWsL,MAAM,KACtC48G,EAAWD,EAAY,GAAK,EAAI,IAAM,IACtCE,EAAiBvtH,KAAKC,IAAIotH,EAAY,IAAIjoH,WAC1CooH,EAAS,UAAMF,GAAN,OAAiBC,EAAeE,SAAS,EAAG,IAC3D,OAAOJ,EAAYj9G,OAAS,EAArB,UACAo9G,EADA,YACat4E,OAAwB,GAAjBm4E,EAAY,IAAUK,OAAO,EAAG,IADpD,UAEAF,EAFA,OAS0BG,CAAa/6G,IAAc,wBAAyB0V,KAAKC,QC1C7EqlG,GAvBK,SAAC,GAAD,IAAGC,EAAH,EAAGA,uBAAwBC,EAA3B,EAA2BA,aAAc3B,EAAzC,EAAyCA,QAAzC,OAClB,kBAAC,IAAD,CACEz+E,QAAM,EACNwnE,eAAe,QACfhC,WAAW,QACX3pG,OAAQ,CAAElG,IAAK,SACfklC,SAAU,CAAEirE,SAAU,UACtB1lE,cAAY,eAEXm/E,GAAYt7G,KAAI,gBAAGq6G,EAAH,EAAGA,OAAQrpH,EAAX,EAAWA,MAAOspH,EAAlB,EAAkBA,WAAlB,OACf,kBAAC,GAAD,CACE/4G,IAAKvQ,EACLA,MAAOA,EACPqpH,OAAQA,EACRC,WAAYA,EACZC,aAAc2B,EACdn2B,WAAYo2B,IAAiBnrH,EAC7BwpH,QAASA,S,UCuEF4B,GAlFU,SAAC,GAA4D,IAA1DF,EAAyD,EAAzDA,uBAAwBlrH,EAAiC,EAAjCA,MAAOspH,EAA0B,EAA1BA,WAAYE,EAAc,EAAdA,QAC/D6B,EAAgB,kBAAOrrH,GAAS,EAAI8pH,IAAqB9pH,EAAOspH,GAAc,GADD,EAE/C9jH,mBAAS6lH,GAFsC,mBAE5EC,EAF4E,KAEhEC,EAFgE,OAG1C/lH,oBAAS,GAHiC,mBAG5EgmH,EAH4E,KAG5DC,EAH4D,KAMnF1nH,qBAAU,kBAAMwnH,EAAcF,OAAkB,CAACrrH,IAEjD,IAAM2nG,EAAW1hG,uBAAY,SAAAiB,GAAC,OAAIqkH,EAAcrkH,EAAEiuB,OAAOn1B,SAAQ,IAE3D0rH,EAASzlH,uBACb,SAAAiB,GACE,IAAMykH,EAAe/iG,OAAO1hB,EAAE0kH,cAAc5rH,OACtC6rH,GACHjjG,OAAOqoB,MAAM06E,IAAiB/iG,OAAOkjG,UAAUH,IAAiBA,EAAe,EAC5EI,EAAa90E,aAAI,IAAItxB,KAAK,GAAV,eACnB2jG,EAAaqC,IAIhB,OADEE,GAAgBG,aAAQD,IAAe7B,aAAY6B,IFvBxB,SEyBpBb,EAAuBnB,GAAiB4B,EAAcrC,GAAaA,GACxDiC,EAAbvrH,GAAS,EAAkB8pH,IAAqB9pH,EAAOspH,GAA6B,KAE7F,CAACA,EAAYtpH,EAAOkrH,IAGhBe,EAAqBhmH,uBACzB,SAAAimH,GACE,OAAO,WACLhB,EAAuBnB,GAAiBuB,EAAYY,GAAgBA,GACpET,GAAe,MAGnB,CAACH,EAAYJ,IASf,OACE,kBAAC,IAAD,CACE3Y,eAAe,QACfhC,WAAW,SACX3pG,OAAQ,EACR0kC,UAAA,2CAA6Ck+E,GAC7Cr+E,cAAY,oBAEZ,kBAAC,IAAD,aACA,kBAAC49E,GAAD,CACE/oH,MAAOsrH,EACP3jB,SAAUA,EACV+jB,OAAQA,EACRpgF,UAAA,2CAA6Ck+E,EAA7C,aAAyD8B,GACzDngF,cAAY,yBAEd,kBAACq9E,GAAD,CACE/X,OAAQ+a,EACR9C,aAAc+C,EACdhD,YAzBc,kBAClB,kBAAC,IAAD,CAAMlY,WAAW,SAASK,UAAU,EAAO37F,MAAM,QAC/C,kBAAC,IAAD,CAAM61B,QAAS,CAAC,EAAG,EAAG,EAAG,IAAKw+E,GAC9B,kBAACR,GAAD,CAAc5mG,KAAK,oBAuBjB0mG,aAAc,kBAAM,QAEnB,kBACCa,GAAgBz6G,KAAI,SAAAm9G,GAAc,OAChC,kBAAC5D,GAAD,CACEh4G,IAAK47G,EACL/oH,QAAS6oH,EAAmBE,GAC5B7gF,UAAA,uCAAyC6gF,EAAzC,aAA4D3C,GAC5Dr+E,cAAY,qBAEXghF,W,+BC1CAC,I,OAtCI,SAAC,GAAD,IACjBC,EADiB,EACjBA,SADiB,IAEjBC,oBAFiB,aAGjBC,kBAHiB,SAIjBnC,EAJiB,EAIjBA,UACAC,EALiB,EAKjBA,QACA1iB,EANiB,EAMjBA,SACA3L,EAPiB,EAOjBA,QACAC,EARiB,EAQjBA,QARiB,IASjBp4C,kBATiB,MASJ,aATI,MAUjB94B,YAViB,aAWjByhG,iBAXiB,aAYjBC,cAZiB,aAajBC,oBAbiB,aAcjBC,mBAdiB,MAcH,EAdG,MAejBC,uBAfiB,aAgBjBC,yBAhBiB,MAgBG,KAhBH,SAkBjB,kBAAC,KAAD,CACER,SAAUA,EACV1kB,SAAUA,EACV2kB,aAAcA,EACdC,WAAYA,EACZnC,UAAWA,EACXC,QAASA,EACTruB,QAASA,EACTC,QAASA,EACTp4C,WAAYA,EACZ94B,KAAMA,EACNyhG,UAAWA,EACXC,OAAQA,EACRC,aAAcA,EACdC,YAAaA,EACbC,gBAAiBA,EACjBC,kBAAmBA,MCnCVC,GAAkB5wC,IAAO8sC,MAAV,8EAAG9sC,CAAH,kOAGNK,YAAS,UAEfA,YAAS,kBASDA,YAAS,YAGpBwwC,GAAiB7wC,IAAOnrD,IAAV,6EAAGmrD,CAAH,ogCACXK,YAAS,kBAMDA,YAAS,QAIbA,YAAS,kBAGZA,YAAS,QAITA,YAAS,YAIXA,YAAS,QAEFA,YAAS,qBAGdA,YAAS,YAOTA,YAAS,QAMTA,YAAS,UACJA,YAAS,WAKdA,YAAS,WACJA,YAAS,qBAKdA,YAAS,UACJA,YAAS,WAIZA,YAAS,UACJywC,YAAY,CAAC,QAAS,WAAY,IAQzCzwC,YAAS,UACJA,YAAS,WAIZA,YAAS,UACJywC,YAAY,CAAC,QAAS,WAAY,KCvC3CC,GA/CS,SAAC,GAMlB,IAAD,IALJ/qG,YAKI,MALG,GAKH,MAJJliB,aAII,MAJI,GAIJ,EAHJktH,EAGI,EAHJA,cACAC,EAEI,EAFJA,QAEI,IADJC,uBACI,MADc,GACd,EACIn9G,EAAcs0C,eAAdt0C,UADJ,EAEgCzK,mBAAS,IAFzC,mBAEG8lH,EAFH,KAEeC,EAFf,KAGE5jB,EAAW1hG,uBAAY,SAAAiB,GAC3B,IAAM2iC,EAAO3iC,EAAEiuB,OAAOn1B,MACtBurH,EAAc1hF,KACb,IACGwjF,EAAoBpnH,uBAAY,SAAAjG,GACpC,GAAIgsH,aAAQhsH,GAAQ,CAClB,IAAMwqH,EAAgBj8E,aAAOvuC,EAAO,qBACpCurH,EAAcf,MAEf,IACGkB,EAASzlH,uBACb,SAAAiB,GACE,IAAMomH,EAAa/C,GAAkBrjH,EAAEiuB,OAAOn1B,MAAOiQ,GAErD,GADoB+7G,aAAQsB,IAAep0D,aAAQo0D,GAAc,EAChD,CACf,IAAM71F,EAAYyhC,aAAQo0D,GAC1BJ,EAAcz1F,GAAW,kBAAM41F,EAAkBrtH,WAC5CqtH,EAAkBrtH,KAE3B,CAACA,EAAOiQ,EAAWi9G,EAAeG,IAKpC,OAFAtpH,qBAAU,kBAAMspH,EAAkBrtH,KAAQ,CAACA,EAAOqtH,IAGhD,kBAACP,GAAD,CACE9pH,KAAK,OACLkf,KAAMA,EACNliB,MAAOA,EAAQsrH,EAAa8B,EAC5BzlB,SAAUA,EACV+jB,OAAQA,EACRyB,QAASA,EACTttE,YAAautE,EACbjiF,cAAY,sB,SCjCHoiF,GAZO,WAAO,IAAD,EACqBhpE,eAAvC7hC,EADkB,EAClBA,iBAAkBD,EADA,EACAA,iBAC1B,OAAOxc,uBACL,SAAA4jC,GACE,MAAM,GAAN,OAAUpnB,EAAiBonB,EAAM,CAAEia,OAAQ,QAASC,MAAM,IAA1D,YAAsErhC,EAAiBmnB,EAAM,CAC3Foa,MAAM,OAGV,CAACvhC,EAAkBD,KCPV+qG,GAAyB,SAAC/1F,EAAWg2F,GAChD,OAAIh2F,EAAY,EACPi2F,aAAO,IAAI/nG,KAAK8nG,EAAch2F,KAC5BA,GAA2B,IAAdA,EACfi2F,aAAO,IAAI/nG,KAAK8nG,GAAc,IAAI9nG,MAAO06D,UAAwB,IAAZ5oD,KACvD,MAcMk2F,GAXW,SAACvD,EAAWC,GACpC,IAAMoD,EAAgBF,KACtB,OAAOzjF,mBACL,iBAAM,CACJ0jF,GAAuBpD,EAAWqD,GAClCD,GAAuBnD,EAASoD,MAElC,CAACrD,EAAWC,EAASoD,KCkFVG,GA1FW,SAAC,GAOpB,IANLxD,EAMI,EANJA,UACAyD,EAKI,EALJA,aACAxD,EAII,EAJJA,QACAyD,EAGI,EAHJA,WACAZ,EAEI,EAFJA,cACAa,EACI,EADJA,aAEMN,EAAgBF,KADlB,EAE2CI,GAAkBvD,EAAWC,GAFxE,mBAEG2D,EAFH,KAEuBC,EAFvB,KAGIh+G,EAAcs0C,eAAdt0C,UACFi+G,EAAoBjoH,uBACxB,SAACmkH,EAAW+D,GAAZ,OACEC,aAASZ,GAAuBpD,EAAWqD,GAAgBQ,GACvDJ,EAAazD,GACb+D,MACN,CAACF,EAAkBR,EAAeI,IAG9BQ,EAAkBpoH,uBACtB,SAACokH,EAAS8D,GAAV,OACEC,aAASJ,EAAoBR,GAAuBnD,EAASoD,IACzDK,EAAWzD,GACX8D,MACN,CAACH,EAAoBP,EAAeK,IAGhCnmB,EAAW1hG,uBACf,SAAAqoH,GAAU,IAAD,cACsBA,EADtB,GACAlE,EADA,KACWC,EADX,KAGDkE,EAAsBnE,EACxBG,GAAkBh8E,aAAO67E,EAAW,qBAAsBn6G,GAC1Dm6G,EACEoE,EAAoBnE,EACtBE,GAAkBh8E,aAAO87E,EAAS,qBAAsBp6G,GACxDo6G,EAEEoE,EAAqBv1D,aAAQq1D,IAAwB,KACrDG,EAAmBx1D,aAAQs1D,IAAsB,KAEvDtB,EAAcuB,EAAoBC,KAEpC,CAACz+G,EAAWi9G,IAGd,OACE,kBAAC,IAAD,CACEniF,QAAM,EACNwnE,eAAe,SACfhC,WAAW,SACXO,KAAM,CAAE6d,KAAM,GACd3jF,IAAK,EACLJ,OAAQ,CAAC,EAAG,EAAG,EAAG,GAClBO,cAAY,sBAEZ,kBAAC,GAAD,CACEkhF,SAAU2B,EACVrmB,SAAUA,EACVyiB,UAAW4D,EACX3D,QAAS4D,EACThyB,QAAS,IAAIt2E,KACbq2E,QAAS,IAAIr2E,KAAK,YAClB8mG,QAAM,EACNC,cAAY,EACZC,YAAa,EACb9oE,WAAW,oBACX+oE,iBAAiB,EACjBC,kBAAmBE,KAErB,kBAAC,IAAD,CAAMxa,eAAe,SAAShC,WAAW,SAASt7F,MAAM,QACtD,kBAAC,GAAD,CACEiN,KAAK,YACLliB,MAAOguH,EACPd,cAAegB,EACff,QAASY,EACTX,gBAAgB,wBAElB,kBAAC,GAAD,CACElrG,KAAK,UACLliB,MAAOiuH,EACPf,cAAemB,EACflB,QAASY,EACTX,gBAAgB,0BChDXwB,GAzCU,SAAC,GAA4B,IAA1BxE,EAAyB,EAAzBA,UAAWC,EAAc,EAAdA,QAAc,EACZsD,GAAkBvD,EAAWC,GADjB,mBAC5CwE,EAD4C,KAC5BC,EAD4B,OAGFhlF,mBAC/C,kBTqCuB,SAACsgF,EAAWC,GAGrC,MAAO,CACL0E,mBAHyBxgF,aAAO67E,EAAW,wBAI3C4E,iBAHuBzgF,aAAO87E,EAAS,yBSvCjC4E,CAAYJ,EAAgBC,KAClC,CAACD,EAAgBC,IAFXC,EAH2C,EAG3CA,mBAAoBC,EAHuB,EAGvBA,iBAItBjD,EAAajiF,mBACjB,kBT4DyB,SAACsgF,EAAWC,GAAZ,OAC3B6E,aAAqBh2D,aAAQkxD,GAAYlxD,aAAQmxD,IS7DzC8E,CAAcN,EAAgBC,KACpC,CAACD,EAAgBC,IAGnB,OACE,kBAAC,IAAD,CAAMve,WAAW,SAASgC,eAAe,UAAUvnE,IAAK,GACtD,kBAAC,IAAD,CAAMulE,WAAW,SAASgC,eAAe,SAASvnE,IAAK,KACrD,kBAAC,IAAD,CAAWhB,QAAM,EAAColF,WAAW,UAA7B,QAGA,kBAAC,IAAD,CAAWA,WAAW,SAASjkF,cAAY,yBACxC4jF,IAGL,kBAAC,IAAD,CAAM7sG,KAAK,aAAa6gB,KAAK,QAAQ0K,MAAM,WAAWsI,OAAQ,IAC9D,kBAAC,IAAD,CAAMw6D,WAAW,SAASgC,eAAe,SAASvnE,IAAK,KACrD,kBAAC,IAAD,CAAWhB,QAAM,EAAColF,WAAW,UAA7B,MAGA,kBAAC,IAAD,CAAWA,WAAW,SAASjkF,cAAY,uBACxC6jF,IAGL,kBAAC,IAAD,CAAMze,WAAW,SAASgC,eAAe,SAASvnE,IAAK,GACrD,kBAAC,IAAD,CAAWokF,WAAW,UAAtB,KACA,kBAAC,IAAD,CAAW3hF,MAAM,WAAW2hF,WAAW,SAASjkF,cAAY,2BACzD4gF,M,qBC/BLsD,GAAgB,CACpB,CAAErvH,MAJe4pH,MAIH3nH,KAAM,KACpB,CAAEjC,MANgB2pH,KAMH1nH,KAAM,KACrB,CAAEjC,MARkBsvH,IAQHrtH,KAAM,OACvB,CAAEjC,MATkBsvH,IASHrtH,KAAM,OACvB,CAAEjC,MAXmB,IAWHiC,KAAM,MAObstH,GAAc,SAACnF,EAAWC,GAAZ,OAAwBmF,KAAOtiG,SAASk9F,EAAUqF,KAAKpF,KAIrEqF,GAAsB,SAAAxiG,GACjC,IAAIhwB,EAAUG,KAAKC,IAAI4vB,GACjByiG,EAAczyH,EAvBAoyH,IAwBpB,OAAOD,GAAcr+G,QAAO,SAACC,EAAD,GAA2B,IAAnBjR,EAAkB,EAAlBA,MAAOiC,EAAW,EAAXA,KACzC,OA1BmB,MA0BfjC,GAAsB2vH,GAC1B1+G,GAPkB,SAACjR,EAAOspH,GAAR,OAAwBtpH,EAAQ,EAAR,UAAe3C,KAAKG,MAAMwC,IAA1B,OAAmCspH,GAAe,GAOhFsG,CAAc1yH,EAAU8C,EAAOiC,GAC3C/E,GAAoB8C,EACbiR,GAHuCA,IAI7C,KCpBUgrE,GATGC,YAAO0zB,KAAV,2EAAG1zB,CAAH,kDAIFK,YAAS,YACVA,YAAS,aCiBNszC,GArBC,SAAC,GAAmD,IAAjDC,EAAgD,EAAhDA,UAAW1F,EAAqC,EAArCA,UAAWC,EAA0B,EAA1BA,QAAS0F,EAAiB,EAAjBA,WAAiB,EAClBxrE,eAAvC7hC,EADyD,EACzDA,iBAAkBD,EADuC,EACvCA,iBAC1B,OACE,kBAAC,IAAD,CAAMuoB,IAAK,GACT,kBAAC,IAAD,CAAWyC,MAAM,OAAO2hF,WAAW,UAChC3sG,EAAiB2nG,EAAW,CAAErmE,MAAM,IADvC,UACmD,IACjD,kBAAC,IAAD,CAAWtW,MAAOqiF,EAAY,SAAW,YAAaV,WAAW,UAC9D1sG,EAAiB0nG,EAAW,CAAEnmE,MAAM,MAGzC,kBAAC,IAAD,CAAM/hC,KAAK,aAAaurB,MAAOqiF,EAAY,SAAW,YAAa/sF,KAAK,QAAQgT,OAAQ,IACxF,kBAAC,IAAD,CAAWtI,MAAM,OAAO2hF,WAAW,WAC/BW,GAAD,UAAkBttG,EAAiB4nG,EAAS,CAAEtmE,MAAM,IAApD,YACD,kBAAC,IAAD,CAAWtW,MAAOqiF,EAAY,SAAW,YAAaV,WAAW,UAC9D1sG,EAAiB2nG,EAAS,CAAEpmE,MAAM,QCE9B+rE,GAjBK,SAAC,GAA6B,IAA3BF,EAA0B,EAA1BA,UAAW5iG,EAAe,EAAfA,SAChC,OACE,kBAAC,IAAD,CAAM8d,IAAK,GACT,kBAAC,IAAD,CAAM/1B,MAAM,OAAOs9F,eAAe,UAC/Bud,GACC,kBAAC,IAAD,CAAWriF,MAAM,OAAO2hF,WAAW,UAAnC,gBAKJ,kBAAC,IAAD,CAAW3hF,MAAM,OAAO2hF,WAAW,UAChCliG,KC2DM+iG,GAzDep4B,sBAC5B,WAEEj1F,GACI,IAFFQ,EAEC,EAFDA,QAEC,IAFQ47E,aAER,MAFgB,IAEhB,EAF6BC,EAE7B,EAF6BA,IAAK6wC,EAElC,EAFkCA,UAAWI,EAE7C,EAF6CA,aAAcC,EAE3D,EAF2DA,eAAgB3G,EAE3E,EAF2EA,QAE3E,EAC+BhkH,qBAD/B,mBACI4qH,EADJ,KACeC,EADf,KAEGjG,EJNkB,SAAAprC,GAAK,OAC/BA,EAAQ,EAAIwwC,KAAO,IAAI7pG,MAAQsxB,IAAI+nC,EAAO,WAAawwC,KAAOxwC,GIK1CsxC,CAAatxC,GACzBqrC,EJLgB,SAAAprC,GAAG,OAAMA,EAA2BuwC,KAAOvwC,GAA5BuwC,KAAO,IAAI7pG,MIKhC4qG,CAAWtxC,GACrBv0E,EAAmB8lH,aAAqBpvF,MAC9Cr9B,qBAAU,WACR,IAAMyL,EAAQ+/G,GAAYnF,EAAWC,GAASp/E,GAAG,WAC5C6kF,GAAaM,IAAc5gH,GAAO6gH,EAAahzH,KAAKgpB,MAAM7W,IAC3DsgH,GAAaM,GAAe1lH,IAC9BylH,EAAe,CAAEnxC,MAAO3hF,KAAKgpB,MAAM+pG,KACnCC,EAAa,SAGd,CAACjG,EAAWC,EAAS+F,EAAWN,IAEnC,IAAMC,EAAajmF,mBAAQ,kBJhBF,SAACsgF,EAAWC,GAAZ,OAAwBD,EAAUqG,OAAOpG,EAAS,OIgB1CqG,CAActG,EAAWC,KAAU,CAACD,EAAWC,IAC1En9F,EAAW4c,mBACf,kBAAM4lF,GAAoBH,GAAYnF,EAAWC,GAASp/E,GAAG,mBAE7D,CAAC6kF,EAAW1F,EAAWC,IAGzB,OACE,kBAAC,GAAD,CACEplH,QAASirH,EAAe,aAAW,4CACnC/4F,MAAM,SACNq5E,OAAK,GAEL,kBAAC,GAAD,CACED,WAAW,SACXgC,eAAe,SACfvnE,IAAK,EACLpkC,OAAO,OACPqO,MAAO,CAAE9T,IAAK,SACdmC,YAAaF,EACb0nC,QAAS,CAAC,EAAG,GACbloC,IAAKA,EACL0oC,UAAA,mCAAqCk+E,GACrCr+E,cAAY,8BAEZ,kBAAC,GAAD,CACE2kF,UAAWA,EACXzF,QAASA,EACTD,UAAWA,EACX2F,WAAYA,IAEd,kBAAC,GAAD,CAAaD,UAAWA,EAAW5iG,SAAUA,SCkHxCyjG,GA3JQ,SAAC,GAMjB,IALLhpB,EAKI,EALJA,SAKI,IAJJt4F,OAAiBuhH,GAIb,aAJuD,GAIvD,GAJM5xC,MAA8B6xC,EAIpC,EAJ+B5xC,IAI/B,IAHJ9qE,oBAGI,OAHW,IAGX,MAFJq1G,eAEI,MAFM,GAEN,EADJsG,EACI,EADJA,UACI,EAC8BtqH,mBAASorH,GADvC,mBACGxG,EADH,KACcyD,EADd,OAE0BroH,mBAASorH,GAFnC,mBAEGvG,EAFH,KAEYyD,EAFZ,OAGgCjG,GAAgB,aAAc,WAH9D,mBAGGyB,EAHH,KAGewH,EAHf,OAIoCtrH,mBAAS,aAJ7C,mBAIGurH,EAJH,KAIiBC,EAJjB,OAK8B75B,KAL9B,mBAKGsZ,EALH,KAKWgX,EALX,KAKqB9Y,EALrB,KAME/rG,EAAMe,mBAENstH,EAAWhrH,uBAAY,YAA6B,IAA1BmkH,EAAyB,EAAzBA,UAAWC,EAAc,EAAdA,QACzCwD,EAAazD,GACb0D,EAAWzD,KACV,IAEHtmH,qBAAU,WACRktH,EAAS,CACP7G,UAAWwG,EACXvG,QAASwG,MAEV,CAACD,EAAkBC,EAAgBI,IAGtC,IAAMC,EAAejrH,uBAAY,kBAAMgrH,EAAS,CAAE7G,UAAWj2G,EAAck2G,QAAS,MAAM,IAEpF0D,EAAe9nH,uBAAY,SAAAiB,GAC1BA,EAAEiuB,OAAOjT,MACd8uG,EAAgB9pH,EAAEiuB,OAAOjT,QACxB,IAEGivG,EAAelrH,uBACnB,SAAAiB,GACEA,EAAEO,kBACFggH,MAEF,CAACA,IAWG2J,EAAetnF,mBAAQ,kBf3CA,SAAAinF,GAAY,OAAI5G,GAAgB4G,Ge2C1BM,CAAgBN,KAAe,CAACA,IAE7DO,EAAkC,OAAdlH,GAAkC,OAAZC,GAAoBD,IAAcC,EAC5EkH,EAAkBnH,IAAcwG,GAAoBvG,IAAYwG,EAEhEW,EAAyB1nF,mBAAQ,kBAAM31B,IAAc,IACrDs9G,EAAkBrH,IAAcoH,EAEhCtG,EAAyBjlH,uBAC7B,SAACqiG,EAAMghB,GACLwH,EAAcxH,GACd2H,EAAS,CACP7G,UAAW9hB,EACX+hB,QAAS,MAGb,CAAC4G,EAAUH,IAQPY,EACJ9uH,EAAIoB,SAAWysG,EACb,kBAACwY,GAAD,CACE9zF,OAAQvyB,EAAIoB,QACZ2tH,eAAe,EACfx6F,MAAO,CAAE/I,IAAK,SAAUhd,KAAM,QAC9BwgH,MAAOjjB,EACPkjB,eAAgBljB,GAEhB,kBAAC0Z,GAAD,CAAWl9E,cAAY,cACrB,kBAAC,IAAD,CAAMonE,eAAe,UAAUhC,WAAW,SAASt7F,MAAM,OAAO61B,QAAS,CAAC,EAAG,EAAG,EAAG,IACjF,kBAAC,IAAD,CAAMC,QAAM,EAACC,IAAK,EAAGJ,OAAQ,CAAC,EAAG,EAAG,EAAG,IACrC,kBAAC,GAAD,CACEsgF,uBAAwBA,EACxBC,aAAcf,EACdZ,QAASA,IAEX,kBAAC,GAAD,CACE0B,uBAAwBA,EACxBlrH,MAAOoqH,EACPd,WAAYA,EACZE,QAASA,KAGb,kBAACL,GAAD,MACA,kBAAC,GAAD,CACEiB,UAAWA,EACXC,QAASA,EACTwD,aAAcA,EACdC,WAAYA,EACZZ,cApCe,SAAC9C,EAAWC,GACrC4G,EAAS,CAAE7G,YAAWC,YAlFC,SACzByH,EACAC,EACAC,EACAC,GAEI,IADJ/rH,EACG,uDADK,gBAER,GAAIjC,OAAOiuH,UAAW,CACpB,IAAMC,EAAY,CAAEjsH,QAAO4rH,gBAAeC,cAAaC,aAAYC,cACnEhuH,OAAOiuH,UAAUpkG,KAAKqkG,IA2EtBC,CAAY,cAAe,oBAAqB5I,EAASj3E,OAD3B,WAAjB6+E,EAA4B/G,GAAWD,EAAYA,GAAaC,KAmCnE0D,aAAcA,KAGlB,kBAAC,IAAD,CACExd,WAAW,SACXgC,eAAgB+e,EAAoB,UAAY,MAChDr8G,MAAM,OACN61B,QAAS,CAAC,EAAG,GACbE,IAAK,GAEJsmF,GAAqB,kBAAC,GAAD,CAAkBlH,UAAWA,EAAWC,QAASA,IACvE,kBAAC,IAAD,CAAM9Z,WAAW,SAASgC,eAAe,SAASvnE,IAAK,GACrD,kBAAC,IAAD,CACES,MAAM,QACNe,QAAQ,SACRppC,QAAS8tH,EACT3lF,SAAUkmF,EACVnmF,UAAA,oCAAsCk+E,EAAtC,YAAiD4H,GACjDjmF,cAAY,qBAEd,kBAAC,IAAD,CACEM,MAAM,QACNroC,QArFO,WACnBukG,EAAS,CACP3oB,MAAOorC,EACPnrC,IAAKorC,IAEP1b,KAiFYpjE,UAAW+lF,GAAqBC,EAChCjmF,UAAA,oCAAsCk+E,EAAtC,YAAiD4H,GACjDjmF,cAAY,yBAMpB,KAEN,OACE,oCACE,kBAAC,GAAD,CACE/nC,QAAS+tH,EACT3H,QAASA,EACT0G,aAAczf,EACdqf,UAAWA,EACXK,eAAgBxoB,EAChB3oB,MAAO4xC,EACP3xC,IAAK4xC,EACLjuH,IAAKA,IAEN8uH,ICnGQW,GA9DkBxb,gBAAK,YAA6B,IAA1B2S,EAAyB,EAAzBA,QAASsG,EAAgB,EAAhBA,UAC1CwC,EAAoBC,eAEpB7nH,EAAmB8lH,aAAqBpvF,MACxCoxF,EAAqBnxF,QAAQ32B,GAE7BC,EAAe6lH,aAAqBtvF,MACpCuxF,EAAe3oF,mBACnB,kBACE0oF,EACI,CAAExzC,MAAOt0E,EAAiB8E,MAAOyvE,IAAKv0E,EAAiB+E,QACvD,CACEuvE,MAAOr0E,EACPs0E,IAAK,KAEb,CAACuzC,EAAoB9nH,EAAkBC,IAoCzC,OATA5G,qBAAU,WAAO,IACPi7E,EAAeyzC,EAAfzzC,MAAOC,EAAQwzC,EAARxzC,IACTzvE,EAAQwvE,EAAMv8E,WACdgN,EAASwvE,EAAIx8E,WACfwB,OAAO0f,WAAWnU,QAAUA,GAASvL,OAAO0f,WAAWlU,SAAWA,GACpExL,OAAO0f,WAAWqC,2BAA0B,EAAMxW,EAAOC,GAE3D29E,aAAc,CAAE59E,QAAOC,aACtB,CAACgjH,IAEF,kBAAC,GAAD,CACEpjH,OAAQojH,EACRt+G,aAAcxJ,EACdg9F,SArCJ,SAAkCttB,GAAS,IACjC2E,EAAe3E,EAAf2E,MAAOC,EAAQ5E,EAAR4E,IACXD,EAAQ,GAEVszC,EAEEpqH,aAAsB,CACpBsH,MAAOwvE,KAGPwzC,GACFF,EAAkBrqH,iBAIpBqqH,EACEtqH,aAA0B,CACxBwH,MAAOwvE,EACPvvE,OAAQwvE,MAoBZuqC,QAASA,EACTsG,UAAWA,OCrDF7zC,GAJGC,YAAO0zB,KAAV,2EAAG1zB,CAAH,qBAXO,SAAC,GAA0B,IAAxBr4D,EAAuB,EAAvBA,MAAOisG,EAAgB,EAAhBA,UAQ9B,OAJW,SAHMjsG,EAAT3B,KAIF8qG,YAAY8C,EAAY,CAAC,QAAS,WAAa,CAAC,UAAW,QAASA,EAAY,GAAM,GACtF9C,YAAY8C,EAAY,CAAC,QAAS,WAAa,CAAC,UAAW,eAE/C,CAAEjsG,aCRhB6uG,GAAgB,SAAC,GAAD,IAAG5C,EAAH,EAAGA,UAAH,OACpBvzC,YAASuzC,EAAY,CAAC,QAAS,WAAa,CAAC,UAAW,UAW3C6C,GATIz2C,YAAO02C,KAAM/iB,OAAM,kBAAoB,CACxDrjE,QADoC,EAAGsjF,UAClB,UAAY,cADnB,6EAAG5zC,CAAH,8CAIEw2C,GACEA,ICgBLG,GAhBO,SAAC,GAAmC,IAAjC/C,EAAgC,EAAhCA,UAAWgD,EAAqB,EAArBA,eAC5BltH,EAAWC,eAIX0rB,EAAOuY,mBAAQ,kBAVP,SAACgmF,EAAWgD,GAC1B,OAAKhD,EACEgD,EAAiB,YAAc,YADf,aASIC,CAAQjD,EAAWgD,KAAiB,CAAChD,EAAWgD,IAE3E,OACE,kBAAC,GAAD,CAAS7tH,QAAS6qH,EAAY,iBAAmB,gBAAiB34F,MAAM,SAASq5E,OAAK,GACpF,kBAAC,GAAD,CAAYj/E,KAAMA,EAAMnuB,QAAS0sH,EALrB,kBAAMlqH,EAAS2D,iBADhB,kBAAM3D,EAAS4D,aAAuB,CAAEuG,WAAW,MAMC+/G,UAAWA,GACvEA,EAAY,UAAY,YCjBpBkD,GAAoB92C,YAAO0zB,KAAV,8EAAG1zB,CAAH,+CAIZK,YAAS,aAGvB,SAAAI,GAAK,OAAIA,EAAM0vC,UAAN,sBAAiC9vC,YAAS,WAATA,CAAqBI,GAAtD,QAyDEs2C,GAtDEp7B,sBACf,WAeEj1F,GACI,IAdF2oC,EAcC,EAdDA,SACA1oC,EAaC,EAbDA,SAaC,IAZDqwH,eAYC,MAZSxnF,IAYT,EAXDtoC,EAWC,EAXDA,QACA+vH,EAUC,EAVDA,OACA5hG,EASC,EATDA,KASC,IARDuZ,eAQC,MARS,CAAC,EAAG,GAQb,MAPDF,cAOC,MAPQ,CAAC,GAOT,MANDvkB,aAMC,MANO,EAMP,EALD+sG,EAKC,EALDA,QACA/G,EAIC,EAJDA,SAIC,IAHDp3G,aAGC,MAHO,OAGP,EACG4U,EAAQ5jB,uBAAY,WACpBslC,GACAnoC,GAASA,MACZ,CAACA,EAASmoC,IAEb,OACE,kBAACynF,GAAD,CACEpwH,IAAKA,EACLguG,UAAU,EACV2B,eAAe,UACfhC,WAAW,SACXzlE,QAASA,EACTF,OAAQA,EACRvkB,MAAOA,EACPjjB,QAASymB,EACTshB,cAAagoF,EACbl+G,MAAOA,EACPo3G,SAAUA,EACV9gF,SAAUA,GAEV,kBAAC,IAAD,CAAMglE,WAAW,SAASvlE,IAAK,EAAG8lE,MAAI,EAACuiB,MAAM,IAC1B,kBAAT9hG,EACN,kBAAC,IAAD,CAAMrP,KAAMqP,EAAMga,SAAUA,EAAUkC,MAAM,OAAO7mC,OAAO,OAAOqO,MAAM,SAEvEsc,EAEF,kBAAC2hG,EAAD,CAASnf,QAASxoE,EAAW,cAAW7pC,EAAWuT,MAAM,SACtDpS,IAGJuwH,MC1DIE,GAAoBp3C,YAAOq3C,KAAI1jB,MAAM,CAAE/kE,QAAS,CAAC,GAAIF,OAAQ,CAAC,KAA7C,0EAAGsxC,CAAH,qBAIxBs3C,GAAgB,SAAC,GAAD,IAAGC,EAAH,EAAGA,WAAYhoF,EAAf,EAAeA,MAAO0nF,EAAtB,EAAsBA,OAAtB,IAA8BxhB,cAA9B,MAAuC2hB,GAAvC,SACpB,kBAAC3hB,EAAD,CAAQxmE,cAAagoF,EAAQ/vH,QAASqwH,GACnChoF,IAWUioF,GAPG,SAAC,GAAD,QAAGjjB,cAAH,SAAmBgjB,EAAnB,EAAmBA,WAAYhoF,EAA/B,EAA+BA,MAAO5oC,EAAtC,EAAsCA,SAAUswH,EAAhD,EAAgDA,OAAQxhB,EAAxD,EAAwDA,OAAxD,OAChB,kBAAC,IAAD,CAAM5mE,QAAM,GACV,kBAAC,GAAD,CAAe4mE,OAAQA,EAAQ8hB,WAAYA,EAAYhoF,MAAOA,EAAO0nF,OAAQA,IAC7E,kBAAC,IAAD,CAAapoG,KAAM0lF,GAAS5tG,KCEjB8wH,IChBQz3C,YAAO0zB,KAAMC,MAAM,CACxC+jB,WAAY,WACZhtH,OAAQ,MACRgkC,OAAQ,CAAC,EAAG,KAHM,sEAAGsxC,CAAH,MDAO,kBACzB,kBAAC,IAAD,CACEpxC,QAAS,CAAC,EAAG,GACbF,OAAQ,CAAC,GACTD,WAAY,CAAC,UAAW,SACxBtkB,MAAO,EACPksF,eAAe,SACft9F,MAAO,CAAEvU,IAAK,UAEd,kBAAC,IAAD,CAAW+sC,MAAM,UAAjB,gLEFEomF,GAAa33C,YAAO0zB,KAAMC,MAAM,CAAE/kE,QAAS,CAAC,GAAIoqD,KAAM,WAA5C,0EAAGhZ,CAAH,qBAIV43C,GAAW53C,YAAO0zB,KAAMC,MAAM,CAClC9kE,QAAQ,EACRD,QAAS,CAAC,GACVH,WAAY,WACZtkB,MAAO,EACPuf,SAAU,CAAEirE,SAAU,QACtBjmE,OAAQ,CAAC,EAAG,EAAG,GACf31B,MAAO,KAPK,wEAAGinE,CAAH,8CAyEC63C,GAFald,gBA3DR,SAAC,GAAgB,IAAd1hF,EAAa,EAAbA,OACfvvB,EAAWC,eADiB,EAETsxF,eAFS,mBAE3BsZ,EAF2B,KAEnBgX,EAFmB,KAI5B9Y,EAAQ,kBAAM8Y,GAAO,IAiB3B,OACE,kBAAC,WAAD,KACIhX,EAOA,kBAACojB,GAAD,CAAYzwH,QAASqkH,EAAQxyG,MAAM,QACjC,kBAAC,IAAD,CAAMiN,KAAK,eAAeurB,MAAM,OAAOx4B,MAAM,OAAOrO,OAAO,UAP7D,kBAAC,GAAD,CAAS3B,QAAS,kBAAC,GAAD,MAAwBkyB,MAAM,SAASq5E,OAAK,GAC5D,kBAACqjB,GAAD,CAAYzwH,QAASqkH,EAAQxyG,MAAM,QACjC,kBAAC,IAAD,CAAMiN,KAAK,eAAeurB,MAAM,OAAOx4B,MAAM,OAAOrO,OAAO,WAQhEuuB,EAAOnxB,SAAWysG,GACjB,kBAAC,IAAD,CACEt7E,OAAQA,EAAOnxB,QACfmzB,MAAO,CAAE/I,IAAK,SAAUhd,KAAM,QAC9BwgH,MAAOjjB,EACPkjB,eAAgBljB,EAChBtqG,WAAS,GAET,kBAAC,GAAD,KACE,kBAAC,GAAD,CAAUgiB,MAAO,EAAGkL,KAAK,cAAcnuB,QArClC,WACbwC,EAAS4D,aAAuB,CAAEuG,WAAW,KAC7C4+F,MAmCQ,QAGA,kBAAC,GAAD,CAAUtoF,MAAO,EAAGkL,KAAK,eAAenuB,QAnClC,WACdwC,EAAS2D,gBACTolG,MAiCQ,SAGA,kBAAC,GAAD,CAAUtoF,MAAO,EAAGkL,KAAK,mBAAmBnuB,QAjClC,WAClBwC,EAAS4D,aAAuB,CAAEuG,WAAW,KAC7C4+F,MA+BQ,oBCpBGqlB,GAtCQ,WACrB,IAAMpxH,EAAMe,mBACNqH,EAAiBnH,aAAYi8G,MAC7BC,EAA6Bl8G,aAAYm8G,MACzCt1G,EAAmB7G,aAAYu9B,MAC/Br2B,EAAWlH,aAAY04G,MACvBrxG,EAAcrH,aAAYo8G,MAE1B6P,EAAYhmF,mBAChB,kBACEzI,SACGr2B,IAAmB+0G,KACjBr1G,IACAK,IACAG,KAEP,CAACF,EAAgB+0G,EAA4Br1G,EAAkBK,EAAUG,IAG3E,OACE,kBAAC,GAAD,CAAMy6G,WAAS,GACb,kBAAC,GAAD,CACEmK,UAAWA,EACXhlF,QAAS,CAAC,EAAG,GACbzkB,OAAK,EACLzf,OAAO,OACP2pG,WAAW,SACXvlE,IAAK,EACLpoC,IAAKA,GAEL,kBAAC,GAAD,CAAektH,UAAWA,EAAWgD,gBAAiB/S,IACtD,kBAAC,GAAD,CAAa5qF,OAAQvyB,IACrB,kBAAC,GAAD,CAA0BktH,UAAWA,EAAWtG,QAlCxC,mBCZVyK,GAAe,CACnBltC,QAAS,UACT1/D,MAAO,WAYMsrG,GATIz2C,YAAO0zB,KAAMC,OAAM,oBAAGxpF,aAAH,MAAW,IAAX,EAAgB6tG,EAAhB,EAAgBA,OAAQvpF,EAAxB,EAAwBA,WAAxB,MAA0C,CAC9EG,QAAS,CAAC,GAAK,GACfzkB,QACAiyE,SAAQ47B,GAAS,CAAE5hB,KAAM,MAAO7kE,MAAO9C,EAAY5H,KAAM,WAH3C,yEAAGm5C,CAAH,oCAKA,gBAAGvxC,EAAH,EAAGA,WAAH,SAAeupF,OAAuBD,GAAatpF,GAAcA,KCDlEioF,GARF/6B,sBAAW,WAAmDj1F,GAAnD,IAAGC,EAAH,EAAGA,SAAU8nC,EAAb,EAAaA,WAAY8C,EAAzB,EAAyBA,MAAOymF,EAAhC,EAAgCA,OAAWnxH,EAA3C,kEACtB,kBAAC,GAAD,eAAY4nC,WAAYA,EAAYupF,OAAQA,EAAQtxH,IAAKA,GAASG,GAChE,kBAAC,IAAD,CAAW0qC,MAAOymF,EAASvpF,EAAa8C,EAAOzD,QAAM,GAClDnnC,OCADsxH,GAAY,CAChB,cAAe,QACf,cAAe,gBAqDFC,GAlDA,WACb,IAAM3nH,EAAe5I,aAAYwwH,MAE3B7nH,EAASs9B,mBAAQ,kBAAOr9B,EAAe1K,OAAOsN,OAAO5C,EAAaD,QAAU,KAAK,CACrFC,IAJiB,EAOWq9B,mBAC5B,kBACEt9B,EAAOwE,QACL,SAACC,EAAD,GAAsB,IAAdgV,EAAa,EAAbA,OAGN,MAFe,aAAXA,IAAuBhV,EAAIg2E,SAAWh2E,EAAIg2E,SAAW,GAC1C,YAAXhhE,IAAsBhV,EAAI81E,QAAU91E,EAAI81E,QAAU,GAC/C91E,IAET,CAAEg2E,SAAU,EAAGF,QAAS,MAE5B,CAACv6E,IAVKy6E,EAPW,EAOXA,SAAUF,EAPC,EAODA,QAalB,OACE,kBAAC,GAAD,CAAMx1D,KAAK,SACT,kBAAC,GAAD,CACEtsB,QACEgiF,EAAQ,UACDA,EADC,0BACyBA,EAASx5E,OAAS,EAAI,IAAM,IACzD,qBAEN0pB,MAAM,SACNq5E,OAAK,GAEL,kBAAC,GAAD,eAAM7lE,WAAW,QAAQupF,QAAM,GAAKC,IACjCltC,IAGL,kBAAC,GAAD,CACEhiF,QACE8hF,EAAO,UAAMA,EAAN,yBAA8BA,EAAQt5E,OAAS,EAAI,IAAM,IAAO,oBAEzE0pB,MAAM,SACNq5E,OAAK,GAEL,kBAAC,GAAD,eAAM7lE,WAAW,UAAUupF,QAAM,GAAKC,IACnCptC,MC9BIutC,GArBF,WACX,OACE,kBAAC,IAAD,CAAWC,IAAI,UACZ,gBAAG9M,EAAH,EAAGA,OAAQ+M,EAAX,EAAWA,SAAX,OACC,kBAAC,GAAD,CAASvvH,QAAQ,OAAOkyB,MAAM,SAASq5E,OAAK,GAC1C,kBAAC,IAAD,CACErlE,cAAY,qBACZ4mE,UAAU,OACV7vF,KAAK,OACLqP,KAAK,WACLib,QAAQ,aACRslE,QAAS0iB,EACTztC,SAAUytC,EACVpxH,QAASqkH,SCFNqM,GAZE53C,YAAO0zB,KAAMC,MAAM,CAClC9kE,QAAQ,EACRD,QAAS,CAAC,GACVH,WAAY,WACZtkB,MAAO,EACPuf,SAAU,CAAEirE,SAAU,QACtBjmE,OAAQ,CAAC,EAAG,EAAG,GACf31B,MAAO,KAPK,qEAAGinE,CAAH,8CCSCu4C,GATKv4C,YAAOw4C,KAAV,+EAAGx4C,CAAH,iECIFy4C,GAJA98B,sBAAW,WAAsBj1F,GAAtB,IAAG5C,EAAH,EAAGA,MAAO2nG,EAAV,EAAUA,SAAV,OACxB,kBAAC,GAAD,CAAaitB,SAAUhyH,EAAK5C,MAAOA,EAAO2nG,SAAUA,EAAU9nD,YAAY,SAASg1E,cAAY,OCMlF54C,GAPGC,YAAO0zB,KAAMC,MAAM,CACnC9kE,QAAQ,EACRD,QAAS,CAAC,EAAG,EAAG,GAChBlF,SAAU,CAAEirE,SAAU,QACtBjqG,OAAQ,CAAElG,IAAK,WAJF,uEAAGw7E,CAAH,MCOAg3C,GAPCh3C,YAAO0zB,KAAMC,MAAM,CACjC0C,eAAgB,UAChBhC,WAAY,SACZt7F,MAAO,OACP+1B,IAAK,IAJM,uEAAGkxC,CAAH,MCeE44C,GAbI,SAAC,GAAqC,IAAnC5yG,EAAkC,EAAlCA,KAAM+L,EAA4B,EAA5BA,OAAQvJ,EAAoB,EAApBA,IAAKqwG,EAAe,EAAfA,SACjC3xH,EAAU6C,uBAAY,kBAAM8uH,EAASrwG,KAAM,CAACA,EAAKqwG,IAEvD,OACE,kBAAC,GAAD,CAAU1uG,MAAO,EAAGjjB,QAASA,EAAS8vH,QAASA,IAC7C,kBAAC,IAAD,CAAMzlF,MAAM,QAAQvrB,GACpB,kBAAC,IAAD,CAAMurB,MAAM,WAAW2hF,WAAW,UAAlC,OACOnhG,KCZA+mG,GAAY,CACvB,CACEh1H,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,+BACN7U,IAAK,CAAC,eAER,CACE1kB,MAAO,SACPi1H,KAAM,IACN17F,KAAM,gCACN7U,IAAK,CAAC,aAAc,iBAAkB,eAAgB,sBAExD,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,SACN7U,IAAK,CACH,aACA,mBACA,mBACA,oBACA,mBAGJ,CACE1kB,MAAO,wBACPi1H,KAAM,OACN17F,KAAM,SACN7U,IAAK,CACH,oBACA,iBACA,eACA,gBACA,oBAGJ,CACE1kB,MAAO,iCACPi1H,KAAM,MACN17F,KAAM,kBACN7U,IAAK,CAAC,yBAER,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,6BACN7U,IAAK,CACH,iBACA,sBACA,kBACA,oBACA,qBACA,YAGJ,CACE1kB,MAAO,4BACPi1H,KAAM,OACN17F,KAAM,UACN7U,IAAK,CACH,kBACA,uBACA,qBACA,kBACA,cAGJ,CACE1kB,MAAO,kCACPi1H,KAAM,MACN17F,KAAM,8BACN7U,IAAK,CAAC,oBAAqB,qBAE7B,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,8BACN7U,IAAK,CACH,gBACA,wBACA,iBACA,mBACA,iBACA,kBACA,sBACA,YAGJ,CACE1kB,MAAO,gCACPi1H,KAAM,OACN17F,KAAM,kBACN7U,IAAK,CACH,iBACA,qBACA,sBACA,oBACA,kBACA,sBACA,YACA,sBAGJ,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,6BACN7U,IAAK,CACH,kBACA,uBACA,4BACA,oBACA,oBACA,8BACA,8BACA,iCACA,sBACA,uBACA,mBACA,mBACA,YAGJ,CACE1kB,MAAO,iCACPi1H,KAAM,MACN17F,KAAM,sCACN7U,IAAK,CACH,yBACA,iBACA,iBACA,sBACA,sBAGJ,CACE1kB,MAAO,+BACPi1H,KAAM,OACN17F,KAAM,eACN7U,IAAK,CAAC,iBAAkB,0BAE1B,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,sBACN7U,IAAK,CACH,iBACA,iBACA,wBACA,mBACA,oBACA,kBACA,eACA,iBACA,qBACA,cAGJ,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,6BACN7U,IAAK,CACH,kBACA,iBACA,6BACA,4BACA,0BACA,kBACA,8BACA,qBACA,mBACA,iBACA,mBACA,kBACA,sBACA,yBACA,sBACA,kBACA,YAGJ,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,iBACN7U,IAAK,CAAC,0BAA2B,wBAAyB,yBAE5D,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,oBAER,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,WACN7U,IAAK,CAAC,qBAER,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,yBACN7U,IAAK,CACH,oBACA,oBACA,kBACA,kBACA,gBACA,qBAGJ,CACE1kB,MAAO,kCACPi1H,KAAM,OACN17F,KAAM,SACN7U,IAAK,CAAC,uBAAwB,mBAEhC,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,uCACN7U,IAAK,CACH,mBACA,kBACA,gBACA,mBACA,uBACA,oBACA,kBACA,mBACA,qBACA,kBACA,qBACA,iBACA,qBACA,iBACA,wBACA,iBACA,kBACA,qBACA,qBACA,wBACA,sBACA,sBACA,wBACA,wBACA,mBACA,mBACA,oBACA,qBACA,kBACA,cAGJ,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,WACN7U,IAAK,CAAC,mBAAoB,sBAE5B,CACE1kB,MAAO,6BACPi1H,KAAM,MACN17F,KAAM,eACN7U,IAAK,CAAC,qBAER,CACE1kB,MAAO,iCACPi1H,KAAM,QACN17F,KAAM,WACN7U,IAAK,CAAC,sBAER,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,eACN7U,IAAK,CACH,6BACA,iCACA,0BACA,6BACA,6BACA,4BACA,4BACA,uBACA,oBACA,kBACA,gBACA,oBAGJ,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,qBACN7U,IAAK,CACH,oBACA,gBACA,kBACA,oBACA,iBACA,qBACA,iBACA,mBACA,qBACA,mBACA,cAGJ,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,YACN7U,IAAK,CAAC,oBAER,CACE1kB,MAAO,2BACPi1H,KAAM,MACN17F,KAAM,aACN7U,IAAK,CAAC,uBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,WACN7U,IAAK,CAAC,kBAER,CACE1kB,MAAO,SACPi1H,KAAM,IACN17F,KAAM,gCACN7U,IAAK,CAAC,kBAAmB,yBAA0B,cAErD,CACE1kB,MAAO,6BACPi1H,KAAM,MACN17F,KAAM,qBACN7U,IAAK,IAEP,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,SACN7U,IAAK,CAAC,uBAAwB,oBAEhC,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,iBACN7U,IAAK,CAAC,sBAAuB,cAE/B,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,aACN7U,IAAK,CAAC,oBAAqB,oBAE7B,CACE1kB,MAAO,MACPi1H,KAAM,MACN17F,KAAM,6BACN7U,IAAK,CAAC,uBAAwB,YAEhC,CACE1kB,MAAO,oBACPi1H,KAAM,MACN17F,KAAM,oBACN7U,IAAK,CAAC,qBAAsB,kBAAmB,gBAAiB,kBAElE,CACE1kB,MAAO,oBACPi1H,KAAM,MACN17F,KAAM,iBACN7U,IAAK,CACH,kBACA,kBACA,mBACA,gBACA,kBAGJ,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,sBACN7U,IAAK,CACH,iBACA,eACA,gBACA,gBACA,gBACA,iBACA,eACA,kBACA,cACA,kBACA,oBACA,qBACA,kBACA,qBACA,uBAGJ,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,mDACN7U,IAAK,CACH,sBACA,mBACA,iBACA,gBACA,kBACA,mBACA,oBACA,eACA,gBACA,cACA,cACA,oBACA,mBACA,eACA,iBACA,gBACA,kBAGJ,CACE1kB,MAAO,+BACPi1H,KAAM,OACN17F,KAAM,oDACN7U,IAAK,CACH,kBACA,oBACA,kBACA,mBACA,mBACA,gBACA,kBAGJ,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,sCACN7U,IAAK,CAAC,eAAgB,kBAAmB,oBAAqB,gBAAiB,iBAEjF,CACE1kB,MAAO,iCACPi1H,KAAM,OACN17F,KAAM,mCACN7U,IAAK,CAAC,kBAAmB,gBAAiB,gBAAiB,kBAE7D,CACE1kB,MAAO,kCACPi1H,KAAM,QACN17F,KAAM,sBACN7U,IAAK,CACH,iBACA,gBACA,qBACA,gBACA,kBACA,eACA,oBACA,gBACA,gBACA,kBACA,gBACA,oBACA,eACA,cAGJ,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,WACN7U,IAAK,CAAC,oBAER,CACE1kB,MAAO,oBACPi1H,KAAM,MACN17F,KAAM,oBACN7U,IAAK,CAAC,eAAgB,gBAAiB,mBAAoB,oBAE7D,CACE1kB,MAAO,4BACPi1H,KAAM,OACN17F,KAAM,SACN7U,IAAK,CAAC,gBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,WACN7U,IAAK,CAAC,kBAER,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,YACN7U,IAAK,CACH,eACA,gBACA,mBACA,kBACA,kBACA,cACA,mBACA,iBACA,cACA,eACA,iBACA,kBACA,iBACA,sBAGJ,CACE1kB,MAAO,6BACPi1H,KAAM,OACN17F,KAAM,mBACN7U,IAAK,CACH,kBACA,mBACA,kBACA,gBACA,sBACA,gBACA,oBACA,gBACA,gBACA,gBACA,iBACA,cAGJ,CACE1kB,MAAO,oBACPi1H,KAAM,MACN17F,KAAM,gDACN7U,IAAK,CACH,kBACA,cACA,mBACA,cACA,eACA,iBACA,kBACA,iBACA,sBAGJ,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,WACN7U,IAAK,CAAC,oBAER,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,YACN7U,IAAK,CAAC,mBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,mBAER,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,eAER,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,4BACPi1H,KAAM,MACN17F,KAAM,cACN7U,IAAK,CAAC,uBAER,CACE1kB,MAAO,qBACPi1H,KAAM,MACN17F,KAAM,iBACN7U,IAAK,CAAC,YAAa,eAAgB,cAAe,aAAc,gBAElE,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,UACN7U,IAAK,CACH,qBACA,gBACA,uBACA,kBACA,cACA,iBACA,kBACA,mBACA,iBACA,mBACA,YACA,sBACA,gBACA,mBAGJ,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,2CACN7U,IAAK,CAAC,eAAgB,gBAAiB,oBAAqB,mBAAoB,iBAElF,CACE1kB,MAAO,cACPi1H,KAAM,OACN17F,KAAM,6BACN7U,IAAK,CAAC,mBAAoB,gBAAiB,qBAE7C,CACE1kB,MAAO,qBACPi1H,KAAM,MACN17F,KAAM,SACN7U,IAAK,CAAC,gBAER,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,oBACN7U,IAAK,CAAC,aAAc,cAAe,cAErC,CACE1kB,MAAO,2BACPi1H,KAAM,MACN17F,KAAM,OACN7U,IAAK,CAAC,cAER,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,aACN7U,IAAK,CAAC,cAAe,mBAAoB,mBAE3C,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,4BACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,eAER,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,qBACN7U,IAAK,CACH,oBACA,aACA,cACA,gBACA,gBACA,YACA,iBACA,gBACA,YACA,mBACA,oBAGJ,CACE1kB,MAAO,qBACPi1H,KAAM,OACN17F,KAAM,gBACN7U,IAAK,CAAC,uBAER,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,qBACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,sCACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,sBACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,YACN7U,IAAK,CAAC,mBAER,CACE1kB,MAAO,6BACPi1H,KAAM,OACN17F,KAAM,sBACN7U,IAAK,CACH,oBACA,cACA,eACA,iBACA,cACA,YACA,kBAGJ,CACE1kB,MAAO,2BACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,aAAc,iBAEtB,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,mBACN7U,IAAK,CAAC,eAAgB,iBAExB,CACE1kB,MAAO,wBACPi1H,KAAM,OACN17F,KAAM,0BACN7U,IAAK,CACH,mBACA,eACA,YACA,eACA,kBACA,iBACA,cACA,iBACA,YACA,qBAGJ,CACE1kB,MAAO,gCACPi1H,KAAM,QACN17F,KAAM,cACN7U,IAAK,CAAC,oBAAqB,mBAAoB,cAEjD,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,wCACN7U,IAAK,CAAC,iBAAkB,aAAc,kBAExC,CACE1kB,MAAO,2BACPi1H,KAAM,OACN17F,KAAM,cACN7U,IAAK,CAAC,qBAER,CACE1kB,MAAO,0BACPi1H,KAAM,OACN17F,KAAM,0BACN7U,IAAK,CACH,cACA,oBACA,eACA,gBACA,cACA,iBACA,cAGJ,CACE1kB,MAAO,6BACPi1H,KAAM,OACN17F,KAAM,QACN7U,IAAK,CAAC,mBAAoB,oBAE5B,CACE1kB,MAAO,uBACPi1H,KAAM,MACN17F,KAAM,SACN7U,IAAK,CAAC,gBAER,CACE1kB,MAAO,4BACPi1H,KAAM,MACN17F,KAAM,cACN7U,IAAK,CAAC,kBAAmB,qBAE3B,CACE1kB,MAAO,gCACPi1H,KAAM,QACN17F,KAAM,UACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,wBACN7U,IAAK,CAAC,YAAa,gBAAiB,aAAc,YAAa,kBAEjE,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,iBAAkB,eAE1B,CACE1kB,MAAO,+BACPi1H,KAAM,OACN17F,KAAM,WACN7U,IAAK,CAAC,qBAAsB,0BAE9B,CACE1kB,MAAO,4BACPi1H,KAAM,OACN17F,KAAM,SACN7U,IAAK,CAAC,qBAER,CACE1kB,MAAO,6BACPi1H,KAAM,OACN17F,KAAM,WACN7U,IAAK,CAAC,qBAAsB,uBAE9B,CACE1kB,MAAO,4BACPi1H,KAAM,OACN17F,KAAM,8BACN7U,IAAK,CAAC,sBAAuB,qBAE/B,CACE1kB,MAAO,6BACPi1H,KAAM,OACN17F,KAAM,qBACN7U,IAAK,CACH,4BACA,aACA,eACA,uBACA,iBACA,iBAGJ,CACE1kB,MAAO,yBACPi1H,KAAM,MACN17F,KAAM,SACN7U,IAAK,CAAC,mBAAoB,qBAE5B,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,aAAc,gBAAiB,iBAEvC,CACE1kB,MAAO,gCACPi1H,KAAM,OACN17F,KAAM,6BACN7U,IAAK,CACH,uBACA,aACA,gBACA,sBACA,iBACA,iBACA,mBAGJ,CACE1kB,MAAO,4BACPi1H,KAAM,MACN17F,KAAM,cACN7U,IAAK,CAAC,gBAAiB,gBAAiB,qBAE1C,CACE1kB,MAAO,4BACPi1H,KAAM,OACN17F,KAAM,uBACN7U,IAAK,CAAC,qBAAsB,qBAE9B,CACE1kB,MAAO,SACPi1H,KAAM,IACN17F,KAAM,gCACN7U,IAAK,CACH,aACA,mBACA,oBACA,iBACA,gBACA,iBACA,eACA,mBAGJ,CACE1kB,MAAO,qBACPi1H,KAAM,MACN17F,KAAM,OACN7U,IAAK,CAAC,iBAER,CACE1kB,MAAO,wBACPi1H,KAAM,MACN17F,KAAM,UACN7U,IAAK,CAAC,cAAe,iBAAkB,eAAgB,uBAEzD,CACE1kB,MAAO,0BACPi1H,KAAM,MACN17F,KAAM,iCACN7U,IAAK,CAAC,mBAER,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,aACN7U,IAAK,CAAC,aAAc,oBAAqB,kBAAmB,sBAE9D,CACE1kB,MAAO,sBACPi1H,KAAM,MACN17F,KAAM,QACN7U,IAAK,CAAC,kBCj7BJkB,GAAM,IAAID,KAoCHuvG,GAAqB,WAGhC,OAFmB,IAAI1xF,KAAKC,eAAe,UAAW,IACvBC,mBClC3BsxF,GDHsB,WAC1B,IAAMG,EAAW,GACjB,OAAOH,GAAUhkH,QAAO,SAACC,EAAKooB,GAAc,IAClC3U,EAAQ2U,EAAR3U,IAER,IAGE,IAHE,EAGiB,IAAI8e,KAAKC,eAAe,KAAM,CAC/CE,SAAUjf,EAAI,GACd0wG,aAAc,UACb7mF,OAAO3oB,IAEwByK,MAAM,gBAAa,GAC/CglG,EAjBY,SAAAC,GAAY,OAAKA,EAAeA,EAAalrH,QAAQ,SAAK,KAAO,GAiB1DmrH,CATvB,qBAWF,GAAIJ,EAASE,GACX,OAAOpkH,EAAIswC,OAAJ,eAAgBloB,EAAhB,CAA0BpL,OAAQknG,EAASE,MAEpD,IAAMG,EA9BW,SAAAF,GACrB,IAAKA,EAAc,MAAO,KAC1B,IAAM5K,EAAc4K,EAAavnH,MAAM,KACvC,OAAO28G,EAAYj9G,OAAS,EAArB,UACAi9G,EAAY,IADZ,QACkBA,EAAY,GAAK,IAAIjoH,WAAW0lC,OAAO,IAC5DuiF,EAAY,GAyBY+K,CAAeJ,GAGvC,OADAF,EAASE,GAAoBG,EACtBvkH,EAAIswC,OAAJ,eAAgBloB,EAAhB,CAA0BpL,OAAQunG,KACzC,MAAOtuH,GACP,OAAO+J,KAER,ICvBaykH,GAAe3nG,MAAK,SAACzF,EAAGC,GAAJ,OAAUD,EAAE2F,OAAS1F,EAAE0F,UACvD0nG,GAAqBX,GD0BfhkH,QAAO,SAACC,EAAD,GAAgC,IAAxByT,EAAuB,EAAvBA,IAAQ2U,EAAe,wBAE9C,OADA3U,EAAI1iB,SAAQ,SAAA69B,GAAI,OAAK5uB,EAAI4uB,GAAJ,eAAiBxG,EAAjB,CAA2B3U,IAAKmb,OAC9C5uB,IACN,ICyEU2kH,GA1FE,WAAO,IAAD,EACKpwH,mBAAS,IADd,mBACdxF,EADc,KACP8nH,EADO,OAEI3wB,eAFJ,mBAEdsZ,EAFc,KAENgX,EAFM,KAIf7kH,EAAMe,mBACNixH,EAAWjxH,mBAETwiB,EAAmBliB,OAAO0f,WAA1BwC,eAERpiB,qBAAU,WACH6wH,EAAS5wH,SAAYysG,GAC1BmkB,EAAS5wH,QAAQs8B,UAChB,CAACmwE,IAEJ,IAAM7qG,EAAWC,eACXgwH,EAAmBhyH,aAAY2gD,MAE/BsxE,EAAiBhsF,mBAAQ,WAAO,IAAD,EACAqjD,eAA3BzoE,IAAKqxG,OADsB,MACP,GADO,IA3BnB,SAACF,EAAkBE,GACrC,IAAM18F,EAAW08F,IAEQ,YAArBF,EACAX,KAAqBvxF,SACrBkyF,GAEJ,OAAOF,GAAKt8F,KAAYs8F,GAAOt8F,EAAW67F,KAAqBvxF,WAAa,GAsBxC2gB,CAAYuxE,EAAkBE,GAF7B,IAE3B9nG,cAF2B,MAElB,GAFkB,MAEdvJ,WAFc,MAER,GAFQ,EASnC,OALIqxG,IAAiBrxG,GAAKyB,EAAezB,GACrCmxG,IAAqBnxG,GAAK9e,EAASmD,aAAgB,CAAEwH,IAAK,WAAYvQ,MAAO0kB,KAEjF9e,EAASmD,aAAgB,CAAEwH,IAAK,YAAavQ,MAAOo0C,WAAWnmB,MAExDA,IAEN,CAAC4nG,IAEEG,EAAQlsF,mBAAQ,WACpB,OAAK9pC,EACEg1H,GAAUlmH,QACf,gBAAGyqB,EAAH,EAAGA,KAAMtL,EAAT,EAASA,OAAT,OACEsL,EAAKk4E,cAAcxsE,SAASjlC,EAAMyxG,gBAAkBxjF,EAAOgX,SAASjlC,MAHrDg1H,KAKlB,CAACh1H,IAEE2uG,EAAQ,WACZ8Y,GAAO,GACPK,EAAS,KAGLiN,EAAW9uH,uBAAY,SAAAye,GAC3ByB,EAAezB,GACf9e,EAASmD,aAAgB,CAAEwH,IAAK,WAAYvQ,MAAO0kB,KACnDiqF,MAEC,IAEGhH,EAAW1hG,uBAAY,SAAAiB,GAAC,OAAI4gH,EAAS5gH,EAAEiuB,OAAOn1B,SAAQ,IAE5D,OACE,kBAAC,GAAD,CAAM2lH,WAAS,GACb,kBAAC,GAAD,CAAUt/F,MAAO,EAAGjjB,QAASqkH,EAAQ7kH,IAAKA,EAAKswH,QAASA,IACtD,kBAAC,IAAD,CAAMloF,IAAK,GACT,kBAAC,IAAD,CAAMyC,MAAM,WAAW2hF,WAAW,UAAlC,OACO0G,IAGT,kBAAC,IAAD,CAAM5zG,KAAK,eAAeurB,MAAM,OAAOx4B,MAAM,OAAOrO,OAAO,UAE5DhE,EAAIoB,SAAWysG,GACd,kBAAC,IAAD,CACEt7E,OAAQvyB,EAAIoB,QACZmzB,MAAO,CAAE/I,IAAK,SAAUhd,KAAM,QAC9BwgH,MAAOjjB,EACPkjB,eAAgBljB,EAChBtqG,WAAS,GAET,kBAAC,GAAD,KACE,kBAAC,GAAD,CAAQrE,MAAOA,EAAO2nG,SAAUA,EAAU/kG,IAAKgyH,IAC/C,kBAAC,GAAD,KACGoB,EAAMhnH,KAAI,gBAAGuqB,EAAH,EAAGA,KAAMtL,EAAT,EAASA,OAAQvJ,EAAjB,EAAiBA,IAAjB,OACT,kBAAC,GAAD,CACEnU,IAAKgpB,EACLrX,KAAMqX,EACNtL,OAAQA,EACRvJ,IAAKA,EAAI,GACTqwG,SAAUA,Y,gDC/FtBkB,GAAkB/5C,YAAO0zB,KAAMC,MAAM,CAAEhqE,SAAU,aAAlC,8EAAGq2C,CAAH,mBAmENg6C,GAhEA,SAAC,GAAkB,IAAhBC,EAAe,EAAfA,SAAe,EACC3wH,oBAAS,GADV,mBACxB4wH,EADwB,KACdC,EADc,KAEzBC,EAAY3yH,mBACZf,EAAMe,mBAHmB,EAKIkkH,aAAgB76B,IALpB,mBAKxBupC,EALwB,KAKbC,EALa,KAMzBlrH,EAAezH,aAAY4yH,MAC3BprH,EAAWxH,aAAY2f,MAEvB5d,EAAWC,eATc,EAWF5B,OAAOghB,SAA5BO,EAXuB,EAWvBA,OAAQC,EAXe,EAWfA,SACVixG,EAAY/qG,mBAAmBtgB,EAASK,UACxCirH,EAAchrG,mBAAmBnG,EAASC,GAE1CmxG,EAAkBhqF,aACtBthC,EADkC,qBAEpBD,EAASa,YAFW,iBAESwqH,EAFT,mBAE6BC,IAGjE3sH,aAAuB,sBAAsB,SAAA0wB,GAC3C47F,EAAUtyH,QAAU02B,KAGtB3wB,eAEA,IAAM8sH,EAAS5wH,uBAAY,WACzBowH,GAAY,GACZ3sG,YAAW,kBAAM9jB,EAASk/C,YAAiB,CAAEu0B,aAA+B33E,IAAtB40H,EAAUtyH,aAA2B,OAE1F,IA6BH,OA3BAD,qBAAU,WACR,IAAMo2E,EAAU,SAAAjzE,IACV,OAACA,QAAD,IAACA,OAAD,EAACA,EAAGiuB,UACJjuB,EAAEiuB,OAAOse,MAAQmjF,GAAoBR,GAAUS,MAIrD,OADA5yH,OAAOoD,iBAAiB,wBAAyB8yE,GAC1C,kBAAMl2E,OAAOsD,oBAAoB,wBAAyB4yE,MAChE,CAACy8C,EAAiBR,EAAUS,IAE/B9yH,qBAAU,WACR,GAAKoyH,GAAavzH,EAAIoB,SACjBqH,EAASiB,gBAAkBjB,EAASiB,iBAAmBogC,MACvD6pF,EAAL,CAEAC,IALc,IAONnqH,EAA0BhB,EAA1BgB,sBACJA,GAAyBA,EAAsBoB,OAAS,GAC1D7D,aAAkBhH,EAAIoB,QAAS,CAC7BhB,KAAM,0BACN8G,QAASuC,OAIZ,CAAC8pH,EAAU9qH,EAAUkrH,IAGtB,kBAACN,GAAD,CAAiBhrF,GAAG,SAASllC,GAAI8D,KAAmB4pC,IAAKmjF,EAAiBC,OAAQA,KCnDvEC,GAnBc,WAC3B,IAAMlxH,EAAWC,cADgB,EAEPgiH,aAAgB,uBAFT,mBAE1B7nH,EAF0B,KAEnB8nH,EAFmB,OAGiBtiH,mBAASxF,GAH1B,mBAG1B+2H,EAH0B,KAGPC,EAHO,KAK3BC,EAAYhxH,uBAAY,SAAAixH,GACxBA,IACFF,EAAqBE,GACrBpP,EAASoP,IAEXtxH,EAASi/C,YAAiB,CAAEu0B,WAAY89C,OAEvC,IAZ8B,EAcdltH,aAAuB,eAAgBitH,GAE1D,MAAO,CAhB0B,oBAgBfF,ICkBLI,GA/BA,WAAO,IAAD,EACAL,KAAZX,EADY,oBAInB,OAFqBtyH,YAAYooC,OAI7B,kBAAC,GAAD,CACEhnC,QAAQ,oIACRkyB,MAAM,SACNq5E,OAAK,GAEL,6BACE,kBAAC,GAAD,CAAc2lB,SAAUA,KACtBA,GACA,kBAAC,KAAD,CAAc/qF,cAAe,CAAEnmC,QAAS,YACrC,gBAAGmyH,EAAH,EAAGA,WAAY/tG,EAAf,EAAeA,KAAMgwD,EAArB,EAAqBA,QAASg+C,EAA9B,EAA8BA,SAA9B,OACC,kBAAC,IAAD,eACElsF,cAAY,gBACZM,MAAM,UACNF,SAAU8tC,GACL+9C,EAAa,CAAEnsF,GAAI,IAAKd,KAAM9gB,GAAS,CAAEjmB,QAASi0H,W,UCP1DC,GAnBE,SAAC,GAAyD,IAAvD7rF,EAAsD,EAAtDA,MAAOvX,EAA+C,EAA/CA,OAAQqjG,EAAuC,EAAvCA,eAAgBhmG,EAAuB,EAAvBA,KAAMimG,EAAiB,EAAjBA,WAKvD,OACE,kBAAC,IAAD,CACEA,WANqB,WACnBtjG,GACAsjG,GAAYA,KAKdjmG,KAAM,kBAAC,IAAD,CAAMrP,KAAMqP,EAAMwR,KAAK,UAC7B00F,OAAK,EACLC,UAAU,EACVH,eAAgBA,EAChBrjG,OAAQA,GAER,kBAAC,IAAD,KAAOuX,KCEAksF,GAAQ,8C,SCffC,GAAkB,SAAC,GAAD,IAAG/0H,EAAH,EAAGA,SAAH,OAAkB,kBAAC,IAAD,CAAM6qC,SAAS,QAAQ7qC,IAEpDg1H,GAAc,CACzBC,KAAM,CACJ/xH,GAAI,OACJ0lC,MAAO,OACP1B,OAAQ,OACRxQ,KAAM,kBACJ,kBAAC,GAAD,+NAMFhI,KAAM,YACNwmG,MAAO,mBAETC,SAAU,CACRjyH,GAAI,WACJ0lC,MAAO,YACP1B,OAAQ,YACRxQ,KAAM,kBACJ,oCACE,kBAAC,GAAD,kQAKA,kBAAC,GAAD,iWAQJhI,KAAM,eACNwmG,MAAO,uBAETE,SAAU,CACRlyH,GAAI,WACJ0lC,MAAO,WACP1B,OAAQ,WACRxQ,KAAM,kBACJ,oCACE,kBAAC,GAAD,oXAOA,kBAAC,GAAD,gPAKA,kBAAC,GAAD,qHAMJhI,KAAM,gBACNwmG,MAAO,uBAETG,MAAO,CACLnyH,GAAI,QACJ0lC,MAAO,QACP1B,OAAQ,QACRxQ,KAAM,kBACJ,kBAAC,GAAD,sQAMFhI,KAAM,eACNwmG,MAAO,oBAETI,WAAY,CACVpyH,GAAI,aACJ0lC,MAAO,aACP1B,OAAQ,aACRxQ,KAAM,kBACJ,kBAAC,GAAD,4NAMFhI,KAAM,YACNwmG,MAAO,yBAETK,OAAQ,CACNryH,GAAI,SACJ0lC,MAAO,SACP1B,OAAQ,SACRxQ,KAAM,kBACJ,kBAAC,GAAD,0VAOFhI,KAAM,QACNwmG,MAAO,qBAETM,UAAW,CACTtyH,GAAI,YACJ0lC,MAAO,YACP1B,OAAQ,WACRxQ,KAAM,kBACJ,kBAAC,GAAD,sRAMFhI,KAAM,gBACN+mG,MACE,yGAEJC,QAAS,CACPxyH,GAAI,UACJ0lC,MAAO,UACP1B,OAAQ,UACRxQ,KAAM,kBACJ,kBAAC,GAAD,0VAOFw+F,MAAO,qBACPxmG,KAAM,WAERinG,QAAS,CACPzyH,GAAI,UACJ0lC,MAAO,UACP1B,OAAQ,UACRxQ,KAAM,kBACJ,oCACE,kBAAC,GAAD,4JAIA,kBAAC,GAAD,0UAI6D,IAC3D,kBAAC,KAAD,CACEpE,OAAO,SACP+U,IAAI,sBACJC,KAAK,2DAHP,+BAMU,IAXZ,sCAcA,kBAAC,GAAD,2SAKE,kBAAC,KAAD,CACEhV,OAAO,SACP+U,IAAI,sBACJC,KAAK,uDAEJ,IALH,oBAWN5Y,KAAM,YChEKknG,GAlGW,SAAC,GASpB,IARLC,EAQI,EARJA,UACAlN,EAOI,EAPJA,eACAmN,EAMI,EANJA,cACAp/F,EAKI,EALJA,KACAwQ,EAII,EAJJA,OACA6uF,EAGI,EAHJA,gBACAb,EAEI,EAFJA,MACAO,EACI,EADJA,MAEA,OAAII,EAAU10H,SAAWwnH,EAErB,kBAAC,IAAD,CACEqN,UAAQ,EACR1tF,cAAY,wBACZymF,MAAO+G,EACPxhG,MAAO,CAAE/I,IAAK,SAAUhd,KAAM,QAC9B+jB,OAAQujG,EAAU10H,QAClB6tH,eAAgB8G,GAEhB,kBAAC,IAAD,CAAchuF,WAAW,mBACvB,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAMK,IAAK,GACT,kBAAC,IAAD,CAAMyC,MAAM,QAAQvrB,KAAK,YACzB,kBAAC,IAAD,CAAI0oB,OAAQ,CAAC,IA1BN,8CA0BP,MAGF,kBAAC,IAAD,CAAkBC,QAAS8tF,KAE7B,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAM5tF,QAAM,EAAC91B,MAAO,IAAKrO,OAAQ,KAC/B,kBAAC,IAAD,CAAMkkC,QAAS,CAAC,EAAG,EAAG,EAAG,GAAIC,QAAM,EAACC,IAAK,GACvC,kBAAC,IAAD,CAAMulE,WAAW,UACf,kBAAC,IAAD,CAAI3lE,OAAQ,CAAC,IAAKb,GAClB,kBAAC,IAAD,CACE4B,GAAI,CAAEmtF,WAAY,QAClB3tF,cAAY,kBACZP,OAAQ,CAAC,EAAG,EAAG,EAAG,GAClB31B,MAAO,CAAE9T,IAAK,KAEd,kBAAC,KAAD,CAAWiqC,cAAe,CAAEC,SAAU,oBACnC,gBAAGhiB,EAAH,EAAGA,KAAH,OACC,kBAAC,IAAD,CACEoiB,MACE,kBAAC,IAAD,CAAMD,cAAc,OAAOxB,QAAM,EAACyD,MAAM,SAAxC,6BAIFx4B,MAAM,OACN7R,QAAS,kBAAMw1H,EAAgB,CAAEvvG,UACjC8hB,cAAY,cACZF,GAAIvoC,IACJ0kH,OAAK,EACL97E,UAAS,4BAMlB/R,KAEFw+F,GACC,kBAAC,IAAD,CAAMnxH,OAAO,OAAOqO,MAAM,OAAO2wB,SAAS,UACxC,kBAAC,IAAD,CACE+F,GAAI,CACF12B,MAAO,OACPrO,OAAQ,QAEVqkC,GAAG,MACHwI,IAAKskF,KAIVO,GACC,kBAAC,IAAD,CAAM1xH,OAAO,OAAOqO,MAAM,QACxB,kBAAC,IAAD,CAAK02B,GAAI,CAAE12B,MAAO,OAAQrO,OAAQ,SAChC,4BACE5B,MAAK,yBACLiQ,MAAM,OACNrO,OAAO,OACP6sC,IAAK6kF,EACLS,YAAY,IACZC,MAAM,2FACNC,iBAAe,QAO3B,kBAAC,IAAD,QAKD,MCzGH/F,GAAUtjB,IACVspB,GAAkBtpB,IA2ETupB,GAzEO,WAAO,IAAD,EACY3zH,oBAAS,GADrB,mBACnB0mC,EADmB,KACNktF,EADM,OAE8B5zH,mBAAS,MAFvC,mBAEnB6zH,EAFmB,KAEGC,EAFH,KAGpBC,EAAoB51H,mBAEpB61H,EAAkB,WACtBJ,GAAe,IAkBjB,OACE,kBAAC,GAAD,CAAStuF,QAAS,CAAC,EAAG,EAAG,EAAG,GAAIjF,SAAS,WAAWj/B,OAAQ,IAC1D,kBAACsyH,GAAD,CACEpuF,QAAS,CAAC,EAAG,EAAG,EAAG,GACnBE,IAAK,EACLpkC,OAAO,OACP2rG,eAAe,SACfhC,WAAW,UAEX,kBAAC,IAAD,CAAM9iE,MAAM,WAAWkqF,GAAvB,KACA,kBAAC,IAAD,CAAM/0H,IAAK22H,GACT,kBAAC,IAAD,KACGx3H,OAAOlB,KAAKg3H,IAAa7oH,KAAI,SAACuB,EAAK0mB,GAAW,IAAD,EAChB4gG,GAAYtnH,GAAhCk7B,EADoC,EACpCA,MAAOla,EAD6B,EAC7BA,KAAMxrB,EADuB,EACvBA,GACf0zH,EAAoBJ,EAAuBA,EAAqBtzH,GAAK,KAC3E,OACE,kBAAC,GAAD,CACEwK,IAAKA,EACLghB,KAAMA,EACN2C,OAAQnuB,IAAO0zH,EACfhuF,MAAOA,EACP8rF,eAA0B,IAAVtgG,EAChBugG,WAAYrqF,aAAQqsF,GAAiB,WA5BnDF,EA6BsCzB,GAAYtnH,eAQhD,kBAAC,GAAD,eACEmoH,UAAWa,EACX/N,eAAgBt/E,GACZmtF,EAHN,CAIEV,cAAexrF,cAlDI,WACvBisF,GAAe,MAUe,WAC9BE,EAAwB,SAuCpBV,gBA/CkB,SAAC,GAAc,IAAZvvG,EAAW,EAAXA,KACzBplB,OAAOghB,SAASklB,KAAO9gB,QCZrB6pG,GAAUh3C,YAAO0zB,KAAMC,MAAM,CACjC5kE,GAAI,SACJpF,SAAU,WACV0sE,eAAgB,UAChB5nE,WAAY,QACZqoC,OAAQ,GACR/9D,MAAO,OACP61B,QAAS,CAAC,EAAG,EAAG,EAAG,KAPR,kEAAGoxC,CAAH,yBAqCEy1B,GAzBA,WACf,IAAM3lE,EAAenoC,YAAYooC,MAEhC,OAAO,kBAAC,GAAD,KACN,kBAAC,IAAD,CAAMskE,WAAW,SAASvlE,IAAK,GAC7B,kBAAC,GAAD,OAEF,kBAAC,IAAD,CAAMunE,eAAe,MAAMhC,WAAW,SAASvlE,IAAK,GAClD,kBAAC,yBAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,MACA,kBAAC,GAAD,OAEFgB,GAAe,kBAAC,IAAD,CAAKL,GAAI,CAAEhB,WAAY,WAAa9E,SAAS,WAAWzX,IAAI,OAAOhd,KAAK,MAAME,MAAM,OACjG,kBAAC,GAAD,SCpCW2gG,GAPM/1B,YAAOx5E,KAAV,6EAAGw5E,CAAH,8C,UCoBHw9C,GAhBC,SAAC,GAAsC,IAAD,IAAnCltF,eAAmC,MAAzB,UAAyB,EAAXzpC,EAAW,4BAC9CuI,EAAezH,YAAY4yH,MAEjC,OACE,kBAAC,IAAD,eACElmB,WAAW,SACXtlE,GAAG,SACHwI,IAAG,UAAK7G,aAAathC,EAAc,YAAhC,iBAAoDkhC,GACvD8rD,OAAQ,CAAEga,KAAM,MAAOvvE,KAAM,OAC7B9tB,MAAO,CAAEvU,IAAK,SACdkG,OAAQ,CAAElG,IAAK,SACXqC,KCRJ42H,GAAa,WAKjB,OACE,kBAAC,KAAD,CAAQvuF,cAAe,CAAEnmC,QAAS,kBAC/B,gBAAGmyH,EAAH,EAAGA,WAAY/tG,EAAf,EAAeA,KAAMguG,EAArB,EAAqBA,SAArB,OACC,kBAAC,IAAD,CAAMj0H,QAASg0H,EAAa,SAAAlwH,GAAC,OAPnB,SAACA,EAAGmiB,GAClBniB,EAAEO,kBACFxD,OAAOghB,SAASklB,KAAO9gB,EAKcjmB,CAAQ8D,EAAGmiB,IAAQguG,GAApD,eAMFuC,GAAqBj4E,cACzB,qBAAGk4E,aACH,qBAAGzgD,cA4EU0gD,GAzEM,WAAO,IAAD,EACA3iC,eADA,mBAClBsZ,EADkB,KACVgX,EADU,KAEnB0O,EAAWtyH,aAAY+1H,IAEvBG,EAAYjwF,mBAChB,uCACMqsF,EACA,CACE,CACEtzH,SAAU,qBACVO,QAAS,kBACPa,OAAO8mB,KAAK,+BAAgC,SAAU,0BAG5D,IATN,YAUMorG,EAAW,CAAC,CAAE6D,WAAW,IAAU,IAVzC,YAWM7D,EACA,CACE,CACEtzH,SACE,kBAAC,GAAD,CACEsoC,cAAY,yBACZqB,QAAQ,aACR5lC,OAAQ,CAAElG,IAAK,YAKvB,CAAC,CAAEmC,SAAU,kBAAC,GAAD,YAEnB,CAACszH,IAGH,OACE,kBAAC,IAAD,CAAetyG,MAAOo2G,KACpB,kBAAC,IAAD,CACE9uF,cAAY,wBACZqB,QAAQ,aACRslE,SAAO,EACPvgF,KAAK,OACLvsB,MAAM,gBACNkd,KAAK,eACL9e,QAASqkH,IAEVhX,GACC,kBAAC,IAAD,CACE5qE,SAAS,cACTgsF,eAAgBpK,EAChBmK,MAAOnK,EACPoR,UAAU,EACVjuF,OAAQ,CAAC,EAAG,KAEZ,kBAAC,IAAD,CAAMG,QAAM,EAAC91B,MAAO,GAAI01B,WAAW,iBAAiBG,QAAS,CAAC,GAAIzkB,OAAK,GACpE0zG,EAAU/qH,KAAI,SAAC6wB,EAAM5wB,GACpB,OAAI4wB,EAAKm6F,UAAkB,kBAAC,IAAD,CAAMpzH,OAAO,MAAM+jC,WAAW,WAAWp6B,IAAKtB,IAEvE,kBAAC,GAAD,eACEsB,IAAKtB,EACL67B,QAAS,CAAC,EAAG,GACbzkB,MAAO,GACFwZ,EAAKz8B,SAAW,CAAEA,QAASy8B,EAAKz8B,UAEpCy8B,EAAKh9B,iBC3ETq3H,GAbQ,kBACrB,kBAAC,IAAMC,SAAP,KACE,kBAAC,IAAD,CACEllH,MAAM,OACNrO,OAAO,OACPyf,MAAO,EACPiyE,OAAQ,CAAEga,KAAM,MAAO7kE,MAAO,SAAU1K,KAAM,MAAO//B,KAAM,YAE7D,kBAAC,IAAD,CAAM4D,OAAO,MAAM+jC,WAAW,YAAY11B,MAAM,SAChD,kBAAC,IAAD,CAAQsc,KAAK,OAAOga,UAAQ,MCSjB6uF,GAfM,WACnB,IAAM9uH,EAAezH,aAAY4yH,MACjC,OACE,kBAAC,IAAD,CACExrF,GAAG,SACHwI,IAAK7G,aAAathC,EAAc,aAChCtG,MAAM,YACN4B,OAAO,OACPqO,MAAM,OACNqjF,OAAQ,CAAEga,KAAM,MAAOvvE,KAAM,OAC7B6C,SAAS,YCqDAy0F,GA5DA,SAAC,GAAoC,IAAlC5pB,EAAiC,EAAjCA,OAAQgX,EAAyB,EAAzBA,OAAQruC,EAAiB,EAAjBA,WAC1BptC,EAAenoC,YAAYooC,MAEjC,OACE,kBAAC,IAAD,CACElB,QAAM,EACNwnE,eAAe,UACf5nE,WAAW,QACXG,QAAS,CAAC,EAAG,GACb71B,MAAM,OACNs7F,WAAW,SACXvlE,IAAK,EACLnF,SAAS,WACTD,SAAS,UAET,kBAAC,IAAD,CAAMmF,QAAM,EAACC,IAAK,EAAGulE,WAAW,SAAS3pG,OAAO,OAAOg/B,SAAS,UAC9D,kBAAC,IAAD,CAAM6H,MAAM,UAAUvrB,KAAK,eAAetb,OAAO,OAAOqO,MAAM,UAC5Dw7F,GACA,kBAAC,GAAD,CACEl/E,KAAK,kBACLnuB,QAASqkH,EACTL,OAAK,EACLtV,SAAO,EACPtlE,QAAQ,aACRulE,UAAU,SAGb/lE,GAAgBotC,GAAc,kBAAC,GAAD,MAC9BptC,IAAiBotC,GAAc,kBAAC,GAAD,OAElC,kBAAC,IAAD,CAAMruC,QAAM,EAACC,IAAK,EAAGulE,WAAW,UAC9B,kBAAC,IAAD,CAAegkB,IAAI,UAChB,SAAA9M,GAAM,OACL,kBAAC,IAAD,CACEj7E,QAAQ,aACRslE,SAAO,EACPC,UAAU,OACVjvG,UAAU,MACVyuB,KAAK,WACLnuB,QAASqkH,EACTziH,MAAM,kBAIZ,kBAAC,IAAD,CACEwnC,QAAQ,aACRslE,SAAO,EACPC,UAAU,OACVjvG,UAAU,MACVyjH,cAAY,QACZC,cAAY,gBACZj1F,KAAK,OACLvsB,MAAM,aAEPgnC,GAAgB,kBAAC,GAAD,SCzDnBnkB,GAAsB,SAACzlB,GAM3B,IALA,IAE+C6M,EAAO6Y,EAFhDC,EAAK,GACP/qB,EAAI,EACJgrB,GAAK,EAAOC,EAAI,EAGbhZ,GAAK6Y,EAAI1lB,EAAE8lB,OAAOlrB,MAAMmrB,WAAW,IAAI,CAC5C,IAAMC,EAAKnZ,GAAK,IAAMA,GAAK,GACvBmZ,IAAMH,IAERF,IAAKC,GAAK,GACVC,EAAIG,GAENL,EAAGC,IAAMF,EAGX,OAAOC,GAIIM,GAAqB,SAACC,EAAWC,GAK5C,IAJA,IAAMC,EAAKX,GAAoBS,EAAEG,eAC3BC,EAAKb,GAAoBU,EAAEE,eAGxBzrB,EAAI,EAAGwrB,EAAGxrB,IAAM0rB,EAAG1rB,GAAIA,IAC9B,GAAIwrB,EAAGxrB,KAAO0rB,EAAG1rB,GAAI,CACnB,IAAM2rB,EAAIC,OAAOJ,EAAGxrB,IAClB6rB,EAAID,OAAOF,EAAG1rB,IAChB,OAAI2rB,EAAElmB,aAAe+lB,EAAGxrB,IAAM6rB,EAAEpmB,aAAeimB,EAAG1rB,GACzC2rB,EAAIE,EAELL,EAAGxrB,GAAK0rB,EAAG1rB,GAAM,GAAK,EAGlC,OAAOwrB,EAAG/a,OAASib,EAAGjb,QC1BlB6sH,GAAa,SAACC,EAAS7uH,GAAV,gBAA0B6uH,EAA1B,iBAA0C7uH,EAA1C,MAgCJ8uH,GA9BE,SAACvtG,EAAOvhB,EAAU+uH,GACjC,IAAKxtG,IAAUvhB,EAAU,MAAO,GAKhC,IAAM6uH,EArBW,SAAA7uH,GACjB,IAAIgvH,EAAOtzH,SAAS6d,SAASO,OAAO/iB,WAAak4H,UAAUvzH,SAAS6d,SAASQ,SAAShjB,YAStF,OARIi4H,EAAKpqG,SAAL,gBAAuB5kB,EAAvB,QACFgvH,EAAOA,EAAKxwG,UAAU,EAAGwwG,EAAKjtH,OAAS,gBAAS/B,EAAT,KAAqBjJ,WAAWgL,SAGrEitH,EAAKpqG,SAAS,OAChBoqG,EAAOA,EAAKxwG,UAAU,EAAGwwG,EAAKjtH,OAAS,IAGlCitH,EAWSE,CAAWlvH,GAkB3B,MAAO,CACLi0C,WAfiB,CACjBj0C,SAX+C,YAQNuhB,EARM,MAQxCvhB,SAIPwd,IAAI,GAAD,OAAKqxG,EAAL,MAcHM,gBAXsB5tG,EACrBoC,MAAM,GACNrgB,KAAI,WAAeioB,GAAf,MAAGvrB,EAAH,EAAGA,SAAH,MAA0B,CAC7BA,WACAwd,IAAKoxG,GAAWC,EAAS7uH,GACzBua,QAAQ,UAAAw0G,EAAYp6F,MAAK,SAAAzrB,GAAI,OAAIA,EAAKlJ,WAAaA,YAA3C,eAAsD0/E,aAAa,MAE5Er9D,MAAK,SAACzF,EAAGC,GAAJ,OAAUF,GAAmBC,EAAE5c,SAAU6c,EAAE7c,eC1BtCovH,GAVA5+C,YAAO0zB,KAAMC,MAAM,CAChC5kE,GAAI,IACJD,IAAK,EACLulE,WAAY,WAHF,oEAAGr0B,CAAH,oCCkBGjY,GAhBF,SAAC,GAA+B,IAA7Bv4D,EAA4B,EAA5BA,SAAUwd,EAAkB,EAAlBA,IAAKjD,EAAa,EAAbA,OAC7B,OACE,kBAAC,GAAD,CAAQkkB,KAAMjhB,EAAKqpF,eAAe,UAAUznE,QAAS,CAAC,EAAG,EAAG,EAAG,IAC7D,kBAAC,IAAD,CAAMylE,WAAW,SAASvlE,IAAK,GAC7B,kBAAC,IAAD,CAAM9oB,KAAK,OAAOurB,MAAM,WACxB,kBAAC,IAAD,CAAMA,MAAM,SAASq4E,UAAQ,GAC1Bp6G,IAGL,kBAAC,GAAD,CAAMi/B,WAAY1kB,EAAS,UAAY,SAAUwnB,MAAM,SAASpnB,MAAO,IACpEJ,EAAS,OAAS,SCRrB0uG,GAASz4C,YAAOw4C,KAAV,8EAAGx4C,CAAH,iCAMN6+C,GAAa7+C,YAAOnqE,KAAV,kFAAGmqE,CAAH,oBACD,qBAAG5qE,MAAqB,iBAAmB,UAuD3C0pH,GApDS,SAAC,GAAqC,IAAnCr7E,EAAkC,EAAlCA,WAAYk7E,EAAsB,EAAtBA,gBAAsB,EAC3Br1H,oBAAS,GADkB,mBACpDy1H,EADoD,KAC1CC,EAD0C,OAEjC11H,mBAAS,IAFwB,mBAEpDxF,EAFoD,KAE7C8nH,EAF6C,KAIrDqT,EAAiBl1H,uBAAY,kBAAMi1H,GAAY,SAAA3nF,GAAC,OAAKA,OAAI,IACzDo0D,EAAW1hG,uBAAY,SAAAiB,GAAC,OAAI4gH,EAAS5gH,EAAEiuB,OAAOn1B,SAAQ,IAEtDgjH,EAAQl5E,mBAAQ,WACpB,OAAK9pC,EACE66H,EAAgB/rH,QAAO,qBAAGpD,SACtB+c,cAAcwc,SAASjlC,EAAMyoB,kBAFrBoyG,IAIlB,CAACA,EAAiB76H,IAErB,OACE,kBAAC,GAAD,CACEywG,OAAQwqB,EACRxH,WAAY0H,EACZ1vF,MACE,kBAAC,IAAD,CAAM8kE,WAAW,SAASgC,eAAe,WACvC,kBAAC,IAAD,CAAMvoE,QAAM,EAACyD,MAAM,UAAnB,oBAGA,kBAACstF,GAAD,CAAYzpH,OAAQ2pH,EAAU/4G,KAAK,eAAe6gB,KAAK,QAAQ0K,MAAM,WAIzE,kBAAC,IAAD,CAAM1C,QAAM,EAACC,IAAK,EAAGF,QAAS,CAAC,EAAG,EAAG,IACnC,kBAAC,GAAD,CAAQG,GAAG,IAAId,KAAMwV,EAAWz2B,IAAKqpF,eAAe,SAClD,kBAAC,IAAD,CAAMrwF,KAAK,QAAQ6gB,KAAK,QAAQ0K,MAAM,WACtC,kBAAC,IAAD,CAAMA,MAAM,UAAUkS,EAAWj0C,WAElCs3G,EAAMv1G,QAAU,GACf,kBAAC,IAAD,CAAMq9B,QAAS,CAAC,EAAG,EAAG,EAAG,IACvB,kBAAC,GAAD,CACE9qC,MAAOA,EACP2nG,SAAUA,EACVyzB,SAAU,kBAAC,IAAD,CAAMl5G,KAAK,WAAW6gB,KAAK,QAAQ0K,MAAM,SACnDonF,cAAY,KAIlB,kBAAC,IAAD,CAAM9pF,QAAM,EAACC,IAAK,GACfg4E,EAAMh0G,KAAI,gBAAGtD,EAAH,EAAGA,SAAUwd,EAAb,EAAaA,IAAKjD,EAAlB,EAAkBA,OAAlB,OACT,kBAAC,GAAD,CAAM1V,IAAK7E,EAAUA,SAAUA,EAAUwd,IAAKA,EAAKjD,OAAQA,WC1BxDo1G,GA5BU,SAAC,GAAqC,IAAnC17E,EAAkC,EAAlCA,WAAYk7E,EAAsB,EAAtBA,gBAChCvvH,EAAezH,aAAY4yH,MAE3B7zH,EAAMe,mBAHgD,EAKhCqG,aAAuB,0BAA5CsxH,EALqD,oBAe5D,OARAv3H,qBAAU,WACHu3H,GAAsB14H,EAAIoB,SAC/B4F,aAAkBhH,EAAIoB,QAAS,CAC7BhB,KAAM,sBACN8G,QAAS,CAAE61C,aAAYk7E,uBAExB,CAACA,EAAiBl7E,EAAY27E,IAG/B,kBAAC,IAAD,CACE14H,IAAKA,EACLqoC,GAAG,SACHwI,IAAK7G,aAAathC,EAAc,eAChCtG,MAAM,cACNiQ,MAAM,OACNrO,OAAO,OACP0xF,OAAQ,CAAEga,KAAM,MAAOvvE,KAAM,UCgCpBw4F,GA3DO,CACpBC,OAAQ,CACNx2H,MAAO,gBACPC,QAAS,CACP,kBAAC,IAAD,CAAWsL,IAAI,IAAIk9B,MAAM,UAAzB,uDACuD,OAI3DguF,OAAQ,CACNz2H,MAAO,4BACPC,QAAS,CACP,kBAAC,IAAD,CAAWsL,IAAI,IAAIk9B,MAAM,UACvB,kBAAC,IAAD,CAAWzD,QAAM,EAACyD,MAAM,UAAxB,kBAEa,IAHf,uBAMA,kBAAC,IAAD,CAAWl9B,IAAI,IAAIk9B,MAAM,UACvB,kBAAC,IAAD,CAAWzD,QAAM,EAACyD,MAAM,UAAxB,yBAEa,IAHf,mBAMA,kBAAC,IAAD,CAAWl9B,IAAI,IAAIk9B,MAAM,UACvB,kBAAC,IAAD,CAAWzD,QAAM,EAACyD,MAAM,UAAxB,qBAEa,IAHf,6CAMA,kBAAC,IAAD,CAAWl9B,IAAI,IAAIk9B,MAAM,UACvB,kBAAC,IAAD,CAAWzD,QAAM,EAACyD,MAAM,UAAxB,uBAEa,IAHf,uCAMA,kBAAC,IAAD,CAAWl9B,IAAI,IAAIk9B,MAAM,UACvB,kBAAC,IAAD,CAAWzD,QAAM,EAACyD,MAAM,UAAxB,sCAEa,IAHf,qCAMA,kBAAC,IAAD,CAAWl9B,IAAI,IAAIk9B,MAAM,UAAzB,eACe,IACb,kBAAC,IAAD,CACExC,GAAG,IACHd,KAAK,yCACLhV,OAAO,SACP+U,IAAI,sBACJF,QAAM,EACNyD,MAAM,UANR,sBCZOiuF,GA9BM,WACnB,OACE,kBAAC,KAAD,CAAQtwF,cAAe,CAAEnmC,QAAS,aAC/B,YAA8C,IAA3CmyH,EAA0C,EAA1CA,WAAY/tG,EAA8B,EAA9BA,KAAMguG,EAAwB,EAAxBA,SAAUh+C,EAAc,EAAdA,QAAc,EACjBkiD,GAAa,OAAhCv2H,EADoC,EACpCA,MAAOC,EAD6B,EAC7BA,QACf,OACE,kBAAC,IAAD,CACE0lC,WAAY,CAAC,UAAW,cACxBI,QAAM,EACNC,IAAK,EACLF,QAAS,CAAC,IACVwtD,OAAQ,CAAEga,KAAM,QAAS7kE,MAAO,UAEhC,kBAAC,IAAD,CAAMA,MAAM,SAASzD,QAAM,GACxBhlC,GAEFC,EAAQ+J,KAAI,SAAAwa,GAAE,OAAIA,KACnB,kBAAC,IAAD,eACEvU,MAAM,OACNw2B,MAAM,UACNF,SAAU8tC,GACL+9C,EAAa,CAAEnsF,GAAI,IAAKd,KAAM9gB,GAAS,CAAEjmB,QAASi0H,UCFtDsE,GAtBG,kBAChB,yBAAK1mH,MAAM,KAAKrO,OAAO,KAAKg1H,QAAQ,YAAYjuE,KAAK,OAAOkuE,MAAM,8BAChE,0BACEhzG,EAAE,0NACF8kC,KAAK,QACLxB,OAAO,YAET,0BACE2vE,SAAS,UACTC,SAAS,UACTlzG,EAAE,gXACF8kC,KAAK,YAEP,0BACEmuE,SAAS,UACTC,SAAS,UACTlzG,EAAE,6nBACF8kC,KAAK,cCHIquE,GAZO,kBACpB,kBAAC,IAAD,CAAMzrB,WAAW,SAAS5lE,WAAY,CAAC,UAAW,cAAeI,QAAM,EAACC,IAAK,EAAGF,QAAS,CAAC,KACxF,kBAAC,IAAD,CAAW2C,MAAM,SAASzD,QAAM,EAACiiC,UAAU,UAA3C,kCAGA,kBAAC,GAAD,MACA,kBAAC,IAAD,CAAUx+B,MAAM,SAASw+B,UAAU,SAASrhC,OAAQ,CAAC,EAAG,EAAG,IAA3D,uF,UCPSqxF,I,qBAAiB//C,IAAOnrD,IAAV,4EAAGmrD,CAAH,6QASXC,YAAU,GAKPA,YAAU,GAIhBI,YAAS,YAOT2/C,GAAWhgD,IAAOnrD,IAAV,sEAAGmrD,CAAH,iIAELC,YAAU,IAQbggD,GAAYjgD,YAAOnqE,KAAV,uEAAGmqE,CAAH,wFAEJC,YAAU,IAOf4+C,GAAa7+C,YAAOnqE,KAAV,wEAAGmqE,CAAH,yDAGLC,YAAU,GAClBI,YAAS,CAAC,OAAQ,aAIf6/C,GAAUlgD,YAAOmgD,IAASC,cAAc,MAAjC,qEAAGpgD,CAAH,yIAEHC,YAAU,IAWdogD,GAAWrgD,YAAOxwC,IAAK4wF,cAAc,MAA7B,sEAAGpgD,CAAH,mFCtDfjY,GAAO,SAAC,GAAD,IAAG/hD,EAAH,EAAGA,KAAMiJ,EAAT,EAASA,cAAejf,EAAxB,EAAwBA,YAAxB,OACX,kBAAC,KAAD,CACEswH,OACE,kBAAC,KAAD,CACEjjG,KACE,oCACE,kBAAC,GAAD,CAAYrX,KAAK,SACjB,kBAACq6G,GAAD,CACE9uF,MAAM,SACNtD,KAAK,GACL/mC,QAAS,SAAA8C,GACPA,EAAMC,iBACND,EAAMuB,kBACNxD,OAAOgnB,uBAAuB/e,KAG/BgW,IAIPu6G,SAAUtxG,EAAc1d,QAAU,mBAItC,kBAAC,IAAD,CAAKm9B,OAAQ,CAAC,EAAG,EAAG,IACjBzf,EAAcnc,KAAI,SAAAka,GAAG,OACpB,kBAACgzG,GAAD,CAAU3rH,IAAK2Y,GACb,kBAACkzG,GAAD,CAASjyF,KAAMjhB,GCzCV,SAACqQ,EAAMmjG,GACpB,GAAInjG,EAAK9rB,QAAUivH,EAAW,OAAOnjG,EAErC,IAAMojG,EAAat/H,KAAKG,OAAOk/H,EAAY,GAAK,GAChD,MAAM,GAAN,OAAUnjG,EAAKrP,UAAU,EAAGyyG,GAA5B,cAA6CpjG,EAAKrP,UAAUqP,EAAK9rB,OAASkvH,IDqC7CC,CAAe1zG,EAAK,KACzC,kBAACizG,GAAD,CACEj6G,KAAK,WACL6gB,KAAK,QACL3/B,QAAS,WACPa,OAAOuoB,2BAA2BtgB,EAAagW,EAAMgH,aA6CpD2zG,GApCa,SAAC,GAAuB,IAC5CC,EAD2C,EAApBC,cAE1BhvG,MAAK,SAACzF,EAAGC,GAAJ,OAAUF,GAAmBC,EAAEpG,KAAMqG,EAAErG,SAC5CpT,QAAO,qBAAGoa,MAAkBqgB,OAHkB,EAKjB/jC,oBAAS,GALQ,mBAK1Cy1H,EAL0C,KAKhCC,EALgC,KAM3CC,EAAiBl1H,uBAAY,kBAAMi1H,GAAY,SAAA3nF,GAAC,OAAKA,OAAI,IAE/D,OACE,kBAAC,GAAD,CACEk9D,OAAQwqB,EACRxH,WAAY0H,EACZ1vF,MACE,kBAAC,IAAD,CAAM8kE,WAAW,SAASgC,eAAe,WACvC,kBAAC,IAAD,CAAMvoE,QAAM,EAACyD,MAAM,UAAnB,iBAGA,kBAAC,GAAD,CAAYn8B,OAAQ2pH,EAAU/4G,KAAK,eAAe6gB,KAAK,QAAQ0K,MAAM,WAIzE,kBAACwuF,GAAD,CAAgBlxF,QAAM,EAACC,IAAK,GACzB8xF,EAAe9tH,KAAI,gBAAGkT,EAAH,EAAGA,KAAMiJ,EAAT,EAASA,cAAeT,EAAxB,EAAwBA,KAAMxB,EAA9B,EAA8BA,IAA9B,OAClB,kBAAC,GAAD,CACEiC,cAAeA,EACf5a,IAAG,UAAK2R,EAAL,YAAawI,GAChBxe,YAAawe,EACbxI,KAAMA,EACNgH,IAAKA,UEvEX8zG,GAA0Br7E,cAC9B,SAAApzC,GAAK,OAAIA,EAAMs3G,OAAOj5G,eAAeG,MAAQ,MAC7C,SAAAwB,GAAK,OAAIA,EAAMs3G,OAAOx6G,SAASY,gBAAgBk/E,uBAAyB,MACxE,WAAsBsvC,GAAtB,IAAGxtG,EAAH,EAAGA,MAAOvhB,EAAV,EAAUA,SAAV,OAAsC8uH,GAASvtG,EAAOvhB,EAAU+uH,MAG5DwC,GAAuBt7E,cAC3B,SAAApzC,GAAK,OAAIA,EAAMs3G,OAAOx6G,YACtB,SAAAA,GAAQ,OAAIA,EAASgB,uBAAyB,MAG1CutH,GAAqBj4E,cACzB,qBAAGk4E,aACH,kBAA8B,CAAEzgD,WAAhC,EAAGA,WAAyCC,QAA5C,EAAeA,YA6DF12E,OAAMk0G,MA1DP,SAAC,GAAwB,IAAtBpG,EAAqB,EAArBA,OAAQgX,EAAa,EAAbA,OAAa,EACc5jH,YAAYm5H,IAD1B,IAC5Br9E,kBAD4B,MACf,GADe,MACXk7E,uBADW,MACO,GADP,EAE9BqC,EAAer5H,YAAYo5H,IAC3BE,EAAiBt5H,YAAYu5H,MAHC,EAIJv5H,YAAY+1H,IAApCxgD,EAJ4B,EAI5BA,WAAYC,EAJgB,EAIhBA,QACdrtC,EAAenoC,YAAYooC,MAE3BoxF,EAAiBp3H,uBAAY,kBAAMhC,OAAOsnB,+BAA8B,IAE9E,OACE,kBAAC,IAAD,CAAatW,MAAO,GAAI01B,WAAW,QAAQ5f,KAAM0lF,EAAQt2D,UAAU,aAAamjF,SAAO,GACrF,kBAAC,IAAD,CACExsB,MAAI,EACJ/lE,QAAM,EACNnF,SAAU,CAAEirE,SAAU,UACtBjmE,OAAQ,CAAC,EAAG,EAAG,GACf0tD,OAAQ,CAAEga,KAAM,OAAQ7kE,MAAO,aAC/B9mC,MAAO,CAAE42H,cAAe,QAExB,kBAAC,IAAD,CAAM33F,SAAU,CAAEirE,SAAU,QAAUC,MAAI,EAAC/lE,QAAM,EAACC,IAAK,EAAGF,QAAS,CAAC,IAClE,kBAAC,IAAD,CAAMqtE,UAAU,OACd,kBAAC,IAAD,CACErG,SAAO,EACPtlE,QAAQ,aACRulE,UAAU,OACVqV,OAAK,EACL71F,KAAK,eACLnuB,QAASqkH,MAGXruC,GACA,sCACKyhD,EAAgBptH,QACjB,kBAAC,GAAD,CAAiBkyC,WAAYA,EAAYk7E,gBAAiBA,MAEzDqC,EAAazvH,QACd,kBAAC,IAAD,CAAMu8B,QAAM,EAACyD,MAAM,UACjB,kBAAC,GAAD,CAAcsvF,cAAeG,MAKpC9jD,GACC,kBAAC,GAAD,CAAkBz5B,WAAYA,EAAYk7E,gBAAiBA,KAG9DsC,GACC,kBAAC,IAAD,CAAM7kC,OAAQ,CAAEga,KAAM,OAASC,eAAe,SAAShC,WAAW,SAASzlE,QAAS,CAAC,IACnF,kBAAC,IAAD,CAAW1nC,QAASi6H,GAApB,qBAGFjkD,GAAcptC,GAAgB,kBAAC,GAAD,MAC/BqtC,GAAWrtC,GAAgB,kBAAC,GAAD,WCrE9BknF,GAAUh3C,YAAO0zB,KAAMC,MAAM,CAAEjpG,OAAQ,QAASosE,OAAQ,KAAjD,mEAAGkJ,CAAH,yBAIP09C,GAAqBj4E,cACzB,qBAAGk4E,aACH,qBAAGzgD,cA6BUz2E,OAAMk0G,MA1BL,WAAO,IAAD,EACUgR,aAAgB,qBAD1B,mBACb0O,EADa,KACJiH,EADI,KAEd/sB,EAAS5sG,YAAY45H,MACrBtH,EAAWtyH,YAAY+1H,IAEvBh0H,EAAWC,cAEX4hH,EAASxhH,uBAAY,WACzBL,EAASuD,aAA0B,CAAEoG,UAAWkhG,KAChD+sB,GAAY/sB,KAEX,CAACA,IAOJ,OALA1sG,qBAAU,WACR6B,EAASuD,aAA0B,CAAEoG,WAAUgnH,GAAUJ,OAExD,CAACA,IAGF,kBAAC,GAAD,KACE,kBAAC,GAAD,CAAQ1lB,OAAQA,EAAQgX,OAAQA,EAAQruC,WAAY+8C,IACpD,kBAAC,GAAD,CAAO1lB,OAAQA,EAAQgX,OAAQA,EAAQpuC,SAAS,QCnChD65C,GAAUh3C,YAAO0zB,KAAMC,MAAM,CACjChqE,SAAU,QACV0sE,eAAgB,QAChBhC,WAAY,QACZt7F,MAAO,OACP+9D,OAAQ,KALG,sEAAGkJ,CAAH,uCAyBEwhD,GAbA,SAAC,GAA6B,IAA3B76H,EAA0B,EAA1BA,SAEhB,OAF0C,EAAhB86H,UAEJ96H,EAGpB,kBAAC,GAAD,KACE,kBAAC,GAAD,MACA,kBAAC,GAAD,MACCA,ICtBM+6H,I,OAAW,SAAC/5G,GAAD,MACrB,CACCg6G,MAAO5D,IACP6D,MAAOC,KACPl6G,IAAUo2G,M,UCsJC+D,GA3IQ,SAAC,GAOjB,IANLC,EAMI,EANJA,wBACAC,EAKI,EALJA,iBACA3zF,EAII,EAJJA,WACA4zF,EAGI,EAHJA,2BACAC,EAEI,EAFJA,oBACAC,EACI,EADJA,uBACI,EAC0D74H,oBAAS,GADnE,mBACG84H,EADH,KAC4BC,EAD5B,KAOE7zF,EAAoBzkC,uBACxB,YAAuB,IAApBojB,EAAmB,EAAnBA,KAAMm1G,EAAa,EAAbA,OACCp0F,EAAS6zF,EAAT7zF,KAEY,aAAhBA,EAAK20C,QACHu/C,IACFJ,EAAiB9zF,EAAKq0F,gBACtBN,EAA2BG,IAEd,UAAXE,IAAoBv6H,OAAOghB,SAASklB,KAAO9gB,GAC/CkhB,KACyB,YAAhBH,EAAK20C,QACds/C,MAGJ,CACEJ,EACAC,EACAI,EACAD,EACAF,EACA5zF,IAIEm0F,EAAoBz4H,uBAAY,WAAO,IACnC04H,EAASV,EAATU,KACJL,IACFJ,EAAiBS,EAAKF,gBACtBN,EAA2BG,IAET,aAAhBK,EAAK5/C,QACkB,YAAhB4/C,EAAK5/C,QACds/C,IAEF9zF,MACC,CACD0zF,EACAC,EACAI,EACAD,EACAF,EACA5zF,IAGF,OAAO0zF,EACL,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAchpH,MAAO,IAAK01B,WAAW,mBACnC,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAIC,OAAQ,CAAC,IAAKqzF,EAAwBj5H,QAE5C,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAM8lC,QAAS,CAAC,EAAG,EAAG,EAAG,GAAIC,QAAM,EAACC,IAAK,GACS,oBAAxCizF,EAAwB1kG,KAAKwQ,OACnCk0F,EAAwB1kG,KAAKwQ,OAAO,IAEpC,kBAAC,IAAD,KAAOk0F,EAAwB1kG,KAAKwQ,QAErCk0F,EAAwB1kG,KAAK0Q,QAAQx8B,OAAS,GAC7C,kBAAC,IAAD,CAAMs9B,QAAM,EAACC,IAAK,GAChB,kBAAC,IAAD,CAAMD,QAAM,EAACC,IAAK,EAAGC,GAAI,MACtBgzF,EAAwB1kG,KAAK0Q,QAAQj7B,KAAI,SAAAk8B,GACxC,MAAsB,oBAAXA,EACF,wBAAI36B,IAAK26B,GAASA,KAGzB,wBAAI36B,IAAK26B,GACP,kBAAC,IAAD,KAAOA,SAOlB+yF,EAAwB1kG,KAAKpF,QAC5B,kBAAC,IAAD,CAAMgX,cAAY,eAAe8yF,EAAwB1kG,KAAKpF,UAIpE,kBAAC,IAAD,KACE,kBAAC,IAAD,CAAKyW,OAAQ,CAAC,EAAG,OAAQ,EAAG,IAC1B,kBAAC,IAAD,CACEU,UAAA,UAAY8yF,EAAZ,yBACAjzF,cAAY,qBACZjgB,QAASozG,EACT32B,SAzFiB,SAAAzgG,GAC3Bq3H,EAA2Br3H,EAAE0kH,cAAc1gG,UAyFjCugB,MAAOwyF,EAAwBW,cAAcrlG,QAGjD,kBAAC,IAAD,CAAK4R,cAAY,OAAOP,OAAQ,CAAC,EAAG,EAAG,EAAG,GAAI31B,MAAO,CAAE9T,IAAK,KAC1D,kBAAC,KAAD,CAAWiqC,cAAe,CAAEnmC,QAASm5H,EAAqB/yF,SA1GrD,0BA2GF,gBAAGhiB,EAAH,EAAGA,KAAH,OACC,kBAAC,IAAD,CACEiiB,UAAA,UAAY8yF,EAAZ,mBACA5yF,cAAc,OACdL,cAAY,cACZ/nC,QAAS,kBACPsnC,EAAkB,CAAErhB,OAAMm1G,OAAQP,EAAwB7zF,KAAKo0F,UAEjEvpH,MAAM,OACNw2B,MAAOwyF,EAAwB7zF,KAAK7Q,WAK3C0kG,EAAwBU,MACvB,kBAAC,IAAD,CACErzF,UAAA,UAAY8yF,EAAZ,mBACAh7H,QAASs7H,EACT93H,OAAQ,GACR9D,UAAU,kBACVqoC,cAAY,OACZl2B,MAAO,CAAE9T,IAAK,KAEd,kBAAC,IAAD,CAAK8pC,GAAIS,IAAMC,GAAI,CAAEC,WAAY,MAAOC,WAAY,SACjDoyF,EAAwBU,KAAKplG,UAOxC,O,SrH7IMk9E,K,0CAAAA,E,0CAAAA,E,gDAAAA,E,oCAAAA,E,wDAAAA,E,sCAAAA,E,uDAAAA,Q,KAiCZ,IAEMooB,GAAoB,SAACC,GAAD,gBACrBhyF,MADqB,OACNiyF,aAAsB,CACtC95H,QAAS65H,EACTzzF,SALa,2BAQJ2zF,IAAsC,qBAChDvoB,GAAqBwoB,oBAAsB,CAC1Cj6H,MAAO,6BACPu0B,KAAM,CACJwQ,OAAQ,kBACN,kBAAC,IAAD,CAAMC,QAAM,GAAZ,oFAIFC,QAAS,CACP,mFACA,0CACA,2BACA,mFAEF9V,OAAQ,uCAEVyqG,cAAe,CACbrlG,KAAM,qBACN2lG,aAAczoB,GAAqBwoB,qBAErC70F,KAAM,CACJ7Q,KAAM,sCACNilG,OAAQ,oBACRz/C,OAAQ,WACR0/C,eAAgB,SAElBE,KAAM,CACJplG,KAAM,qCACNwlD,OAAQ,WACRy/C,OAAQ,uBACRC,eAAgB,WA/B6B,eAkChDhoB,GAAqB0oB,oBAAsB,CAC1Cn6H,MAAO,iDACPu0B,KAAM,CACJwQ,OAAQ,kBACN,oCACE,kBAAC,IAAD,CAAMC,QAAM,GAAZ,6IAKA,kBAAC,IAAD,0FAKJC,QAAS,CACP,mFACA,0CACA,2BACA,mFAEF9V,OAAQ,uCAEVyqG,cAAe,CACbrlG,KAAM,qBACN2lG,aAAczoB,GAAqB0oB,qBAErC/0F,KAAM,CACJ7Q,KAAM,yCACNwlD,OAAQ,WACRy/C,OAAQ,oBACRC,eAAgB,SAElBE,KAAM,CACJplG,KAAM,qCACNilG,OAAQ,uBACRz/C,OAAQ,WACR0/C,eAAgB,WAvE6B,eA0EhDhoB,GAAqB2oB,uBAAyB,CAC7Cp6H,MAAO,+CACPu0B,KAAM,CACJwQ,OAAQ,kBACN,kBAAC,IAAD,CAAMC,QAAM,GAAZ,yFAIFC,QAAS,GACT9V,OAAQ,wCAEVyqG,cAAe,CACbrlG,KAAM,gCACN2lG,aAAczoB,GAAqB2oB,wBAErCh1F,KAAM,CACJ7Q,KAAM,0CACNilG,OAAQ,QACRz/C,OAAQ,WACR0/C,eAAgB,WA7F6B,eAgGhDhoB,GAAqB4oB,iBAAmB,CACvCr6H,MAAO,kDACPu0B,KAAM,CACJwQ,OAAQ,kBACN,kBAAC,IAAD,CAAMC,QAAM,GAAZ,kFAIF7V,OAAQ,sCACR8V,QAAS,CACP,WACE,OACE,kBAAC,IAAD,KACG,IADH,+CAE+C,IAC7C,kBAAC,KAAD,CACE9U,OAAO,SACP+U,IAAI,sBACJC,KAAI,uDAAkD00F,GACpDpoB,GAAqB4oB,kBACrBn1G,UAAU,GAFR,2BAHN,yBAHF,WAgBJ,WACE,OACE,kBAAC,IAAD,4FACkF,IAChF,kBAAC,KAAD,CACEiL,OAAO,SACP+U,IAAI,sBACJC,KAAI,uFAAkF00F,GACpFpoB,GAAqB4oB,kBACrBn1G,UAAU,KALd,8BAFF,QAiBR00G,cAAe,CACbrlG,KAAM,sBACN2lG,aAAczoB,GAAqB4oB,kBAErCj1F,KAAM,CACJ7Q,KAAM,sCACNwlD,OAAQ,WACRy/C,OAAQ,kBACRC,eAAgB,SAElBE,KAAM,CACJplG,KAAM,qCACNwlD,OAAQ,WACRy/C,OAAQ,uBACRC,eAAgB,WA7J6B,eAgKhDhoB,GAAqB6oB,2BAA6B,CACjDt6H,MAAO,oCACPu0B,KAAM,CACJwQ,OAAQ,kBACN,kBAAC,IAAD,CAAMC,QAAM,GAAZ,uKAKFC,QAAS,IAEX20F,cAAe,CACbrlG,KAAM,qBACN2lG,aAAczoB,GAAqB6oB,4BAErCl1F,KAAM,CACJ7Q,KAAM,uCACNwlD,OAAQ,WACRy/C,OAAQ,uBACRC,eAAgB,SAElBE,KAAM,CACJplG,KAAM,qCACNwlD,OAAQ,WACRy/C,OAAQ,uBACRC,eAAgB,WAzL6B,eA4LhDhoB,GAAqB8oB,kBAAoB,CACxCv6H,MAAO,wDACPu0B,KAAM,CACJwQ,OAAQ,YAAmB,EAAhBF,KACT,OACE,oCACE,kBAAC,IAAD,CAAMG,QAAM,GAAZ,iJAOA,kBAAC,IAAD,2EACsE,IACpE,kBAAC,KAAD,CACE7U,OAAO,SACP+U,IAAI,sBACJC,KAAI,uDAAkD00F,GACpDpoB,GAAqB8oB,mBACrBr1G,UAAU,GAFR,qBAHN,kBAaR+f,QAAS,IAEX20F,cAAe,CACbrlG,KAAM,wBACN2lG,aAAczoB,GAAqB8oB,mBAErCn1F,KAAM,CACJ7Q,KAAM,qBACNwlD,OAAQ,UACR0/C,oBAAgB/8H,GAElBi9H,KAAM,CACJplG,KAAM,kCACNilG,OAAQ,aACRz/C,OAAQ,WACR0/C,eAAgB,WAvO6B,eA0OhDhoB,GAAqB+oB,0BAA4B,CAChDx6H,MAAO,oFACPu0B,KAAM,CACJwQ,OAAQ,WACN,OACE,oCACE,kBAAC,IAAD,CAAMC,QAAM,GAAZ,uGAIA,kBAAC,IAAD,6KAONC,QAAS,IAEX20F,cAAe,CACbrlG,KAAM,wBACN2lG,aAAczoB,GAAqB+oB,2BAErCp1F,KAAM,CACJ7Q,KAAM,qBACNwlD,OAAQ,UACR0/C,oBAAgB/8H,GAElBi9H,KAAM,CACJplG,KAAM,kCACNilG,OAAQ,uBACRz/C,OAAQ,WACR0/C,eAAgB,WA1Q6B,IA4W7CgB,IAGL,qBACEhpB,GAAqB8oB,mBA/BE,SAAC,GAAD,IACxBG,EADwB,EACxBA,oBACA/1F,EAFwB,EAExBA,WACAg2F,EAHwB,EAGxBA,kBACAtzF,EAJwB,EAIxBA,aACAp/B,EALwB,EAKxBA,eALwB,MAOA,UAAxByyH,IACgB,cAAf/1F,GAA6C,kBAAfA,IACT,YAAtBg2F,GACiB,aAAjBtzF,GACmB,cAAnBp/B,KAmBD,eAEEwpG,GAAqB+oB,2BA7CQ,SAAC,GAAD,IAC9BE,EAD8B,EAC9BA,oBACA/1F,EAF8B,EAE9BA,WACAg2F,EAH8B,EAG9BA,kBACAtzF,EAJ8B,EAI9BA,aACAp/B,EAL8B,EAK9BA,eAL8B,MAON,UAAxByyH,IACC/1F,IACAg2F,IACAtzF,IACAp/B,KAgCF,eAGEwpG,GAAqB6oB,4BAzDY,SAAC,GAAD,IAClCI,EADkC,EAClCA,oBACA/1F,EAFkC,EAElCA,WACA0C,EAHkC,EAGlCA,aACAp/B,EAJkC,EAIlCA,eAJkC,OAMjCyyH,IACe,cAAf/1F,GAA6C,kBAAfA,IACd,SAAjB0C,GACmB,cAAnBp/B,KA6CD,eAIEwpG,GAAqB4oB,kBAnEO,SAAC,GAAD,IAC7BK,EAD6B,EAC7BA,oBACA/1F,EAF6B,EAE7BA,WACAg2F,EAH6B,EAG7BA,kBAH6B,MAKL,UAAxBD,IACgB,cAAf/1F,GAA6C,kBAAfA,IACT,gBAAtBg2F,KAwDD,eAKElpB,GAAqB2oB,wBA/EM,SAAC,GAAD,IAC5BM,EAD4B,EAC5BA,oBACA/1F,EAF4B,EAE5BA,WACAg2F,EAH4B,EAG5BA,kBACA1yH,EAJ4B,EAI5BA,eAJ4B,MAMJ,UAAxByyH,IACgB,cAAf/1F,GAA6C,kBAAfA,IACT,YAAtBg2F,GACmB,cAAnB1yH,KAiED,eAMEwpG,GAAqB0oB,qBAvFF,SAAC,GAAD,IACpBO,EADoB,EACpBA,oBACA/1F,EAFoB,EAEpBA,WACAg2F,EAHoB,EAGpBA,kBAHoB,MAKI,UAAxBD,GAAkD,YAAf/1F,GAAkD,YAAtBg2F,KA4EhE,eAOElpB,GAAqBwoB,qBA/FF,SAAC,GAAD,IACpBS,EADoB,EACpBA,oBACA/1F,EAFoB,EAEpBA,WACAg2F,EAHoB,EAGpBA,kBAHoB,MAKI,UAAxBD,GAAkD,YAAf/1F,GAAkD,gBAAtBg2F,KAmFhE,IAwCcC,GA9BW,SAAC,GAKR,IAJjBj2F,EAIgB,EAJhBA,WACAg2F,EAGgB,EAHhBA,kBACA1yH,EAEgB,EAFhBA,eACAo/B,EACgB,EADhBA,aACgB,EACgCw7E,aAC9C,yBAFc,mBACT6X,EADS,KACYxB,EADZ,KAKVE,EAAsBt0F,mBAA8B,WACxD,OAAO/nC,OAAOlB,KAAK4+H,IAA+Bp/F,MAAK,SAAAw/F,GACrD,OAAOJ,GAA8BI,GAAa,CAChDl2F,aACAg2F,oBACA1yH,iBACAyyH,sBACArzF,sBAGH,CAAC1C,EAAYg2F,EAAmB1yH,EAAgBo/B,EAAcqzF,IAEjE,MAAO,CACLzB,wBAAyBe,GAAmBZ,GAC5CA,sBACAF,mBACAwB,wBsH1TWI,GAxFU,WACvB,IAAM9zF,EAAenoC,YAAYooC,MAC3B5gC,EAAWxH,YAAY2f,MAEvBu8G,EAAWl8H,aAAY,SAAA0K,GAAK,OAChCyxH,aAAgB,CAAE/6H,QAAS,sBAAuBg7H,KAAM50H,EAASa,aAAjE8zH,CAAgFzxH,MAG5E2xH,EAAap2F,mBAAQ,WAAO,IACxBK,EAASlmC,OAAOghB,SAAhBklB,KACFg2F,EAAcx0G,mBAAmBwe,GACvC,MAAM,GAAN,OAAU41F,EAAV,yBAAmCI,KAClC,CAACJ,IAEE9yH,EAAiBpJ,YAAYkoC,MAdN,EAeOvmC,oBAAS,GAfhB,mBAetB0mC,EAfsB,KAeTC,EAfS,OAiB3ByzF,GAAkB,eACb3yH,IAFCgxH,EAhBqB,EAgBrBA,wBAAyBC,EAhBJ,EAgBIA,iBAAkBwB,EAhBtB,EAgBsBA,oBAAqBtB,EAhB3C,EAgB2CA,oBAKlEc,GAAsC,OAAvBjB,QAAuB,IAAvBA,OAAA,EAAAA,EAAyBW,cAAcM,eAAgB,GArB/C,EA4BUrX,aAAgBqX,GAA9Cf,EA5BoB,oBA6BvBiC,EAAyBr5H,aAAaqgB,QAAQ83G,GAM9CmB,EACJr0F,GACAiyF,GACA/xF,KACEk0F,GAAqD,cAA3BA,GAExB/B,EAAyB10H,eAgC/B,OAzBA5F,qBAAU,WACR,IAAIu8H,EAAiB52G,YAAW,kBAAMyiB,GAAa,KAAO,KAC1D,OAAO,WACLma,aAAag6E,MAEd,IAEHv8H,qBAAU,YtHySa,SAAC,GAAD,IACvB27H,EADuB,EACvBA,oBACA/1F,EAFuB,EAEvBA,WACA0C,EAHuB,EAGvBA,aACAp/B,EAJuB,EAIvBA,eAJuB,MAMC,UAAxByyH,IACgB,cAAf/1F,GAA6C,kBAAfA,IACd,SAAjB0C,GACmB,cAAnBp/B,GsHjTMszH,CAAU,aAAEb,uBAAwBzyH,MACtChJ,OAAOghB,SAASklB,KAAO+1F,KAExB,CAACA,EAAYjzH,EAAgByyH,IAEhC37H,qBAAU,WtHiSc,UsHhSC,CAAE27H,uBtH+RQA,qBsH/RgBn9H,QAAQC,IAAI,sBAC5D,CAACk9H,IAEJ37H,qBAAU,WAENqD,SAASklC,gBAAgB3lC,MAAMi/B,SAD7By6F,EACwC,SAEA,SAE3C,CAACn0F,EAAam0F,IAEbA,EAEA,kBAAC,GAAD,CACElC,2BAA4BA,EAC5BF,wBAAyBA,EACzBC,iBAAkBA,EAClB3zF,WAhDa,WACjB4B,GAAa,IAgDTiyF,oBAAqBA,EACrBC,uBAAwBA,IAIvB,MCxFHmC,GAAaC,IAGnBx8H,OAAOmiC,GAAKA,KAEZ,IA6Fes6F,GA7FO,WACpB,IAAM10F,EAAenoC,aAAYooC,MAE3B0hD,EAAQgzC,cACd58H,qBAAU,WAGRE,OAAOU,QAAQ6H,OAAS,GAExBvI,OAAOU,QAAQohC,MAAQ,SAAA9S,GACrBA,KAEFkW,aAAgBwkD,KAEf,IAduB,MAegBnoF,qBAfhB,mBAenBo7H,EAfmB,KAeJC,EAfI,KAiBpBC,OAA2Cp/H,IAAlBk/H,EAEzBllG,EAAW/3B,kBAAO,WACtBk9H,EAAiBxjI,KAAKq0C,aAGxB3tC,qBAAU,WACR,GAAI+8H,EAAwB,CAC1B,IAAMC,EAAc35H,SAASof,eAAe,eACxCu6G,IACFA,EAAYp6H,MAAM80B,QAAU,WAG/B,CAACqlG,IA9BsB,MAgCqBv8E,eAAvC9hC,EAhCkB,EAgClBA,iBAAkBC,EAhCA,EAgCAA,iBAC1B3e,qBAAU,WACR4e,aAAsB,CACpBF,mBACAC,uBAED,CAACD,EAAkBC,ICzFG,SAACs+G,GAC1B,IAAM31H,EAAWxH,aAAY2f,MAEvB5d,EAAWC,eACjB9B,qBAAU,YACJi9H,GAAsB31H,EAASW,iBAAoBX,EAASG,iBAC1DH,EAASI,kBAEb7F,EAAS4C,KAAiB0H,QAAQ,CAChC0b,uBAGH,CAAChmB,EAAUyF,EAAU21H,ID+ExBC,EAAY,GE3FW,SAACC,GACxB,IAAMx0H,EAAmB7I,aAAYs9H,MAE/Bv7H,EAAWC,eACjB9B,qBAAU,WACJm9H,IAAoBx0H,GACtB9G,EAASgD,aAAkB,CACzBgjB,uBAGH,CAAChmB,EAAU8G,EAAkBw0H,IFkFhCE,EAAU,GG9FW,SAACC,GACtB,IAAMh2H,EAAWxH,aAAY2f,MACvBzX,GAAyB,OAARV,QAAQ,IAARA,OAAA,EAAAA,EAAUU,kBAAkB,EAC7CnG,EAAWC,eACjB9B,qBAAU,WACJs9H,IAAkBt1H,GACpBnG,EAAS0K,KAAgBJ,QAAQ,CAC/B+wE,MAAM,OAGT,CAACr7E,EAAUmG,EAAgBs1H,IHqF9BC,EAAQ,GA1CkB,MA4C8B97H,oBAAS,GA5CvC,mBA4CnB+7H,EA5CmB,KA4CGC,EA5CH,KA6C1BhmC,2BAAgB,WACdhoE,QAAQC,IAAI,CACVw6D,GAAQzsC,IAAev9C,OAAOU,QAAQyJ,OAAOpK,QAAQy9H,eACrDxzC,GAAQzsC,IAAev9C,OAAOU,QAAQyJ,OAAOpK,QAAQ09H,iBACpDtuG,MAAK,WACNouG,GAAwB,QAEzB,IAEH,IAAM50H,EI5GyB,WAAO,IAAD,EACZu2G,GAAO,UAAoBv3F,IAApB,kBAChC,OAFqC,oBJ4Gd+1G,GACjBr2H,EAAezH,aAAY4yH,MAGjCxyH,OAAOU,QAAQ+2B,SAAWA,EAAS13B,QAEnC,IAAMuH,EAAiB1H,aAAY+9H,MAC7B/9G,EAAQhgB,aAAYg+H,MAG1B,OKnHa,WACb,IAAMj8H,EAAWC,eACjBs5F,cAAS,WACP,IAAM9kB,EAAS8S,eACT20C,EAAYznD,EAAM,WACxB,GAAIynD,EAAW,CACb,IAAMC,EAAYn5G,OAAOk5G,GAEnB98F,EAAcq1C,EAAM,aACpB2nD,EAAa3nD,EAAM,YACnB4nD,EAAa5nD,EAAM,YACzB,IAAK5wC,IAAcxE,SAASD,KAAiBg9F,IAAeC,EAC1D,OAGFr8H,EAASyD,aAAe,CACtBsD,MAAO,CACLuF,QAAS8vH,EACT/7G,OAAQ+e,EACRhlC,MAAOiiI,EACP9rG,KAAM4rG,MAIVn8H,EAASoC,aAA0B,CACjCwH,MAAmB,IAAZuyH,EAFO,IAGdtyH,OAAoB,IAAZsyH,EAHM,WL0FpBG,GAGE,kBAAC,IAAD,CAAer+G,MAAO+5G,GAAS/5G,IAC5B09G,GAGC,kBAAC9b,GAAD,MAEF,qCACC74G,GAAkBtB,GAAgBC,GAAkBu1H,GACnD,oCACE,kBAAC,GAAD,CAAQnD,UAAWx/C,IAChBx1C,KAAS,KAAO,kBAAC,GAAD,MAChB44F,GACC,oCACE,kBAACxe,GAAD,CAASxyG,IAAKqwH,IACd,kBAAC,GAAD,KACE,kBAAC,GAAD,OAEDziD,IAAe,kBAAC,GAAD,QAIrBnyC,GAAgB,kBAACw0F,GAAD,CAAY55H,OAAQ,Q,OM/H/Cu7H,IAAS53E,OACP,kBAAC,IAAD,CAAUojC,MAAOA,IACf,kBAAC,GAAD,OAEFvmF,SAASof,eAAe,U,kGCbpB47G,EAAqBzgF,YACzBn+B,KACA,qBAAGlX,iBAAwCogC,OAGvC21F,EAAkB1gF,aACtB,qBAAGk4E,aACH,qBAAGxgD,WAqCU89C,IAlCA,SAAC,GAAiC,IAA/Bt0H,EAA8B,EAA9BA,SAAUuoC,EAAoB,EAApBA,cAAoB,EAClBy8E,YAAgB,uBAArCkP,EADuC,oBAExCuL,EAAYz+H,aAAY,SAAA0K,GAAK,OAAIyxH,YAAgB50F,EAAhB40F,CAA+BzxH,MAChE6oH,EAAavzH,YAAYu+H,GACzB/oD,EAAUx1E,YAAYw+H,GAEtBz8H,EAAWC,cAEXwjB,EAAOygB,mBAAQ,WAAO,IAClBK,EAASlmC,OAAOghB,SAAhBklB,KACFg2F,EAAcx0G,mBAAmBwe,GACvC,MAAM,GAAN,OAAUm4F,EAAV,yBAAoCnC,KACnC,CAACmC,IAEEjL,EAAWpxH,uBACf,kBACEL,EACEg/C,YAAsB,CACpB2oC,eAAgBlkE,OAItB,CAACA,IAGH,OAAOygB,mBACL,iBACsB,oBAAbjnC,EACHA,EAAS,CAAEu0H,aAAY/tG,OAAMguG,WAAUh+C,UAAS09C,sBAChDl0H,IACN,CAACA,EAAUu0H,EAAY/tG,EAAMguG,EAAUh+C,EAAS09C,M,8MC3C9CwL,EAA+B,OAGxBp1C,EAAgB,WAEX,IADhBvpE,EACe,uDADRwB,mBAAmBnhB,OAAOghB,SAASrB,KAAKukB,OAAO,IAEtD,GAAoB,IAAhBvkB,EAAKnW,OACP,MAAO,GAET,IAAM4sE,EAASz2D,EAAK7V,MAAMw0H,GACpBvhD,EAAW3G,EAAOrpE,QAAO,SAACC,EAAiBjN,GAC/C,IAAMmsB,EAAQnsB,EAAQ+J,MAAM,KAD+B,cAEpCoiB,EAFoC,GAEpDqyG,EAFoD,KAE7CxiI,EAF6C,KAI3D,OADAiR,EAAIuxH,GAASxiI,EACNiR,IACN,IACH,OAAO+vE,GAGIyhD,EAAqB,SAACpoD,GACjC,IAAMo5B,EAAU1xG,OAAO0xG,QAAQp5B,GAC/B,OAAuB,IAAnBo5B,EAAQhmG,OACH,GAEFgmG,EACJzkG,KAAI,mCAAEuB,EAAF,KAAOvQ,EAAP,iBAA6B0B,IAAV1B,EAAsBuQ,EAAtB,UAA+BA,EAA/B,YAAsCob,mBAAmB3rB,OAChFylD,KAzB2B,MAsCnB2nC,GAFsB9rC,YAAK6rC,EAAes1C,GAE1B,SAACpoD,GAC5B,IAAMqoD,EAAYv1C,IACZw1C,EAAkBC,YAAcvoD,EAAQqoD,GAC9Cz+H,OAAO6hB,QAAQC,aAAa9hB,OAAO6hB,QAAQvX,MAAO,GAAlD,WAA0Dk0H,EAAmBE,OAGlEx+G,EAAe,SAC1Bq+G,GAD0B,IAE1B5+G,EAF0B,uDAEnBwB,mBAAmBnhB,OAAOghB,SAASrB,KAAKukB,OAAO,IAF5B,OAGfglD,EAAcvpE,GAAM4+G,IAOpBl1C,EAAmB,SAACjT,GAC/Bp2E,OAAO6hB,QAAQC,aAAa9hB,OAAO6hB,QAAQvX,MAAO,GAAlD,WA3B6B,SAC7Bs0H,GAEI,IADJj/G,EACG,uDADIwB,mBAAmBnhB,OAAOghB,SAASrB,KAAKukB,OAAO,IAEhD26F,EAAiB9uH,YAAK6uH,EAAgB11C,EAAcvpE,IAC1D,OAAO6+G,EAAmBK,GAsBgCC,CAAgB1oD,O,sHCvC/DzwE,EAAoB,SAC/Bo5H,EACA/4H,GAEA,IAAMg5H,EACyB,kBAAtBD,EACF57H,SAASof,eAAew8G,GACzBA,EAEFC,EAAcC,eAChBD,EAAcC,cAAcC,YAAYl5H,EAAS,MAIxCD,EAAyB,SACpCo5H,EACAnwG,EACAowG,GACiC,IAAD,EACM79H,mBAAwB69H,GAD9B,mBACzBC,EADyB,KACZC,EADY,KAE1BC,EAAgBv9H,uBACpB,SAAAgE,GACE,IAAM8C,EAAO9C,EAAQ8C,KACjBA,EAAK/J,OAASogI,IAChBG,EAAex2H,EAAKjD,SAChBmpB,GACFA,EAASlmB,EAAKjD,YAIpB,CAACmpB,EAAUmwG,IAEPK,EAAgBx9H,uBAAY,WAChCs9H,EAAeF,KACd,CAACA,IAOJ,OANAt/H,qBAAU,WAER,OADAE,OAAOoD,iBAAiB,UAAWm8H,GAC5B,WACLv/H,OAAOsD,oBAAoB,UAAWi8H,MAEvC,CAACA,EAAeJ,IACZ,CAACE,EAAaG,K,gCC/DvB,sGAAO,IAAM74H,EAAW84H,OACX7iF,GAAe6iF,EACf3iF,GAAmB2iF,G,gCCFhC,kCAAO,IAAM97H,EAAW,a,+BCAxB,qFAaa/D,EAET8/H,IACS99H,EAET+9H,K,gCClBJ,sCAAMzlD,EAAcl6E,OAAOghB,SAASrB,KAAK7V,MAAM,KAAKk3B,SAAS,aAmBhD0D,EAjBK,WAChB,GAAIw1C,EACF,OAAO,EAFa,IAIdzyE,EAAatE,SAAS6d,SAAtBvZ,SACR,OACEA,EAAS4kB,SAAS,mBACf5kB,EAAS4kB,SAAS,kBAClB5kB,EAAS4kB,SAAS,mBAClB5kB,EAAS4kB,SAAS,gBAClB5kB,EAAS4kB,SAAS,kBAClB5kB,EAAS4kB,SAAS,iBAClB5kB,EAAS4kB,SAAS,oBAClB5kB,EAAS4kB,SAAS,kBAIHuzG,I,+BCnBtB,m9CAaazlB,EAA6B,SAAC7e,GAAD,OAA2B,SAAChxF,GACpE,IAAME,EAAUrB,YAA2BmyF,GACrCnsF,EAAW7E,EAAM3G,KAAU0C,iBAAiBmE,GAClD,OAAO2E,GAAYA,EAASlF,WAGjB41H,EAAe,SAACv1H,GAAD,OAAsBA,EAAMs3G,QAE3CnnB,EAAkB/8C,YAC7BmiF,GACA,SAACliF,EAAY1yC,GAAb,OAAsCA,KACtC,SAAC60H,EAAa70H,GAAd,OACE60H,EAAYx5H,UAAU2E,MAIb0vF,EAAkBj9C,YAC7BmiF,GACA,SAACliF,EAAYtyC,GAAb,OAAsCA,KACtC,SAACy0H,EAAaz0H,GAAd,OACEy0H,EAAYv5H,UAAU8E,MAIbitG,EAAwB56D,YAAemiF,EAAcvgH,YAAK,aAE1Di7E,EAA8B78C,YACzCmiF,EACAvgH,YAAK,6BAGM6d,EAAyBugB,YAAemiF,EAAcvgH,YAAK,qBAE3D2d,EAAqBygB,YAAemiF,EAAcvgH,YAAK,iBAEvDw2E,EAA4Bp4C,YAAemiF,EAAcvgH,YAAK,wBAE9Du8F,EAAuBn+D,YAAemiF,EAAcvgH,YAAK,mBACzD08F,EAAoBt+D,YAAemiF,EAAcvgH,YAAK,gBAEtDi8D,EAAiB79B,YAC5BmiF,EACAvgH,YAAK,aAGMC,EAAiBm+B,YAAemiF,EAAcvgH,YAAK,aAEnDkzG,EAAqB90E,YAAen+B,EAAgBD,YAAK,iBAEzDw7G,EAAwB,eAAC3zF,EAAD,uDAAiB,GAAjB,OACnCrpC,OAAOlB,KAAKuqC,GAAep6B,QAAO,SAACC,EAAKV,GAAN,OAAeU,EAAG,eAAYV,EAAZ,YAAmB66B,EAAc76B,MAAS,KAEnFyvH,EAAkB,SAAA50F,GAAa,OAC1CuW,YAAen+B,EAAgBizG,GAAoB,SAACprH,EAAUC,GAC5D,IAAM4W,EAAOyJ,mBAAmBtgB,EAASK,UACnC8Z,EAASmG,mBACb01B,YAAmBp9C,OAAOghB,SAASO,OAASvhB,OAAOghB,SAASQ,WAI9D,MAAM,GAAN,OAAUna,EAAV,uBACED,EAASa,YADX,iBAESgW,EAFT,mBAEwBsD,GAFxB,OAEiCsnB,KAFjC,OAEgDiyF,EAAsB3zF,QAI7DgyF,GADwBz7E,YAAen+B,EAAgBD,YAAK,oBAC9Bo+B,YACzCn+B,GACA,gBAAGlX,EAAH,EAAGA,eAAH,OAAwBA,GAtEM,mCAsEaA,MAKhC2/B,EAAuB0V,YAClCn+B,GACA,SAACnY,GAAD,OAAcA,EAASM,iBAAmBN,EAASI,oBAExCm2H,EAAuBjgF,YAAen+B,EAAgBD,YAAK,mBAC3D4hE,EAAwBxjC,YAAen+B,EAAgBD,YAAK,oBAE5D49G,EAAyBx/E,YACpCmiF,EACAj3F,YAAK,CAAC,SAAU,sBAELwnF,EAAqB1yE,YAChCmiF,GACA,SAACje,GAAD,OAAYA,EAAOr5G,OAAOC,gBAGfwtF,EAAct4C,YACzBmiF,GACA,SAACje,GAAD,OAAYA,EAAOl5G,SAGR8wH,EAA2B97E,YAAemiF,EAAcvgH,YAAK,uBAC7D46E,EAAwCx8C,YACnDmiF,EAAcvgH,YAAK,oCAGRygH,EAAgBriF,YAAemiF,GAAc,SAAAje,GAAM,OAAIA,EAAOjhH,WAE9Dk/B,EAAqB,SAAuBmgG,GAAvB,OAChCtiF,YAAeqiF,GAAe,SAACp/H,GAAD,OAAaA,EAAQq/H,OAGxCvvB,EAAsB5wE,EAAmB,mBACzCk8E,EAAmCl8E,EAC9C,mCAEWk9E,EAAsCl9E,EAAmB,6BACzDoxE,EAAwBpxE,EAAmB,mBAI3Cq4E,GAF0Br4E,EAAmB,sBACjBA,EAAmB,wBACzBA,EAAmB,mBACzC82D,EAAuB92D,EAAmB,qBAE1C+9F,EAAc/9F,EAAmB,SACjChgC,EAAiBggC,EAAmB,aACpCo9E,EAA8Bp9E,EAAmB,6BACjD23D,EAAmB33D,EAAmB,eAEtC03E,EAA2B13E,EAAmB,SAC9CstD,EAA2BttD,EAAmB,eAC9CutD,EAA6BvtD,EAAmB,mBAChD0gB,EAAwB1gB,EAAmB,YAC3C2gB,EAAyB3gB,EAAmB,aAG5CogG,GAF8BpgG,EAAmB,4BAE1B6d,YAClCmiF,GACA,SAACje,GAAD,OAAYA,EAAOj5G,eAAeG,SAGvBm1C,EAAoCP,YAC/CuiF,GACA,SAACtiF,EAAD,YAAe1vC,WACf,SAACiyH,EAAajyH,GAAd,cAA0BiyH,QAA1B,IAA0BA,OAA1B,EAA0BA,EAAah1H,OAAO+C,MAGnC65B,GAAuB4V,YAAemiF,GAAc,SAAAje,GAAM,OAAIA,EAAO54G,mB,0ECrJ5Em3H,EAAaloD,YAAO,KAAV,yEAAGA,CAAH,yDAEHK,YAAS,WAGPA,YAAS,WAITA,YAAS,WAaTu+C,IAJA,SAAC,GAAD,QAAGuJ,iBAAH,MAAe34F,IAAf,EAAwB3oC,EAAxB,oCACb,kBAACshI,EAAD,eAAWp5F,GAAIm5F,GAAgBrhI,M,qFCnBpB0F,EAAsB,SAA2CyZ,GAC5E,IAAM68D,EAASp3E,uBAAuBua,EAAKuvF,eAE3C,OAAO1vG,OAAOuiI,OAAOvlD,EAAQ,CAC3B7uE,QAAS6uE,EACT5uE,QAASxI,uBAAY,UAChBua,EAAKuvF,cADW,aAEnB,SAAC3nG,GAAD,OAAaA,KACb,SAACy6H,GAAD,OAAUA,KAEZn0H,QAASzI,uBAAY,UAChBua,EAAKuvF,cADW,aAEnB,SAAC3nG,GAAD,OAAaA,KACb,SAACy6H,GAAD,sBACKA,EADL,CAEEl9G,OAAO,W,mPCfFxT,EAAoB,oBA2CpB2wH,GAAwB,mBA1CU,mCA8CV,GAJA,2CAMR,GANQ,cAzCN,mBAiDV,GARgB,iCASlB,GATkB,oCAaf,GAbe,sCAeb,GAfa,gCAiBnB,GAjBmB,cAmBlC3wH,GAAoB,GAnBc,cAvCT,gBA6DV,GAtBmB,cAxChB,QA+DV,SAvB0B,0BA0BxBwtB,QAAQp9B,OAAO2jB,mBAAqB3jB,OAAOwgI,oBA1BnB,2CA4BR,GA5BQ,6BA8BtB,GA9BsB,sBAiC5B,QAjC4B,4BAkCtB,WAlCsB,iCAmClB,GAnCkB,yBAoCzB,WApCyB,yCAqCT,WArCS,0BAsCxB,GAtCwB,GAsExBh0H,EAAmC,WAC9C,IAAMi0H,EAA0B3iI,OAAOlB,KAAKkG,cACzC+H,QAAO,SAAAyB,GAAG,OAAIA,EAAI4R,WAAW,aA7GE,iBA6Ga5R,KAC5CvB,KAAI,SAAAuB,GAAG,sBAhCoB,SAAmBA,GAAnB,OAC9BA,EAAInG,QAAQ,aAAc,IAAIA,QA/EI,eAHf,SAkHdu6H,CAAwBp0H,GA9BC,SAAmBA,GACjD,IAOIq0H,EAPE5kI,EAAQ+G,aAAaqgB,QAAQ7W,GAGnC,GAAc,OAAVvQ,GAA4B,cAAVA,EAEpB,OADA+G,aAAakgB,WAAW1W,GACjB,KAGT,IACEq0H,EAAS9zH,KAAKC,MAAM/Q,GACpB,MAAOkH,GAGP,MA/FgC,iBA+F5BqJ,GAAgCvQ,EAC3BA,GAGTuC,QAAQC,IAAR,wCAA6C+N,EAA7C,qBAEAxJ,aAAakgB,WAAW1W,GACjB,MAET,OAAOq0H,EAO6BC,CAAwBt0H,OAEzDzB,QAAO,SAAAykC,GAAC,OAA4B,OAAxBxxC,OAAOsN,OAAOkkC,GAAG,MAE1BuxF,EAAmBj2H,YAAS61H,GAClC,OAAOrmD,YAAWmmD,EAAiBM,IAGxB93H,EAAgCyD,IAChC+R,EAAqBxV,EAA6B,aAElDwD,EAAoB,WACNzO,OAAOlB,KAAKkG,cACpB/E,SAAQ,SAAAuO,IACnBA,EAAI4R,WAAWjd,MAAmCqL,EAAI4R,WAAW,cACnEpb,aAAakgB,WAAW1W,S","file":"static/js/main.e248095a.chunk.js","sourcesContent":["export const zeropad = (x: string | number) => {\n if (x > -10 && x < 10) {\n return `0${x}`\n }\n return `${x}`\n}\n\ninterface ScalableUnits {\n [unitGroupName: string]: {\n [unitName: string]: number\n }\n}\n\nexport const leaveAtLeast1Decimal = (number: number) => {\n const decimalPortion = `${number}`.split(\".\")[1]\n if (decimalPortion && decimalPortion.length > 1) {\n return `${number}`\n }\n\n let tms = number * 10\n const integer = Math.floor(tms / 10)\n\n tms -= integer * 10\n return `${integer}.${tms}`\n}\n\ntype TimeUnit = \"MINUTES\" | \"HOURS\" | \"DAYS\"\nconst seconds2time = (seconds: number, maxTimeUnit: TimeUnit) => {\n let secondsReturn = Math.abs(seconds)\n\n const days = maxTimeUnit === \"DAYS\" ? Math.floor(secondsReturn / 86400) : 0\n secondsReturn -= days * 86400\n\n const hours =\n maxTimeUnit === \"DAYS\" || maxTimeUnit === \"HOURS\" ? Math.floor(secondsReturn / 3600) : 0\n secondsReturn -= hours * 3600\n\n const minutes = Math.floor(secondsReturn / 60)\n secondsReturn -= minutes * 60\n\n const daysString = maxTimeUnit === \"DAYS\" ? `${days}d:` : \"\"\n const hoursString = maxTimeUnit === \"DAYS\" || maxTimeUnit === \"HOURS\" ? `${zeropad(hours)}:` : \"\"\n const minutesString = `${zeropad(minutes)}:`\n let secondsString = zeropad(secondsReturn.toFixed(2))\n\n return `${daysString}${hoursString}${minutesString}${secondsString}`\n}\n\nconst scalableUnits: ScalableUnits = {\n \"packets/s\": {\n pps: 1,\n Kpps: 1000,\n Mpps: 1000000,\n },\n pps: {\n pps: 1,\n Kpps: 1000,\n Mpps: 1000000,\n },\n \"kilobits/s\": {\n \"bits/s\": 1 / 1000,\n \"kilobits/s\": 1,\n \"megabits/s\": 1000,\n \"gigabits/s\": 1000000,\n \"terabits/s\": 1000000000,\n },\n \"bytes/s\": {\n \"bytes/s\": 1,\n \"kilobytes/s\": 1024,\n \"megabytes/s\": 1024 * 1024,\n \"gigabytes/s\": 1024 * 1024 * 1024,\n \"terabytes/s\": 1024 * 1024 * 1024 * 1024,\n },\n \"kilobytes/s\": {\n \"bytes/s\": 1 / 1024,\n \"kilobytes/s\": 1,\n \"megabytes/s\": 1024,\n \"gigabytes/s\": 1024 * 1024,\n \"terabytes/s\": 1024 * 1024 * 1024,\n },\n \"B/s\": {\n \"B/s\": 1,\n \"KiB/s\": 1024,\n \"MiB/s\": 1024 * 1024,\n \"GiB/s\": 1024 * 1024 * 1024,\n \"TiB/s\": 1024 * 1024 * 1024 * 1024,\n },\n \"KB/s\": {\n \"B/s\": 1 / 1024,\n \"KB/s\": 1,\n \"MB/s\": 1024,\n \"GB/s\": 1024 * 1024,\n \"TB/s\": 1024 * 1024 * 1024,\n },\n \"KiB/s\": {\n \"B/s\": 1 / 1024,\n \"KiB/s\": 1,\n \"MiB/s\": 1024,\n \"GiB/s\": 1024 * 1024,\n \"TiB/s\": 1024 * 1024 * 1024,\n },\n bytes: {\n bytes: 1,\n kilobytes: 1024,\n megabytes: 1024 * 1024,\n gigabytes: 1024 * 1024 * 1024,\n terabytes: 1024 * 1024 * 1024 * 1024,\n },\n Hz: {\n Hz: 1,\n kHz: 10 ** 3,\n MHz: 10 ** 6,\n GHz: 10 ** 9,\n THz: 10 ** 12,\n PHz: 10 ** 15,\n EHz: 10 ** 18,\n ZHz: 10 ** 21,\n },\n B: {\n B: 1,\n KiB: 1024,\n MiB: 1024 * 1024,\n GiB: 1024 * 1024 * 1024,\n TiB: 1024 * 1024 * 1024 * 1024,\n PiB: 1024 * 1024 * 1024 * 1024 * 1024,\n },\n KB: {\n B: 1 / 1024,\n KB: 1,\n MB: 1024,\n GB: 1024 * 1024,\n TB: 1024 * 1024 * 1024,\n },\n KiB: {\n B: 1 / 1024,\n KiB: 1,\n MiB: 1024,\n GiB: 1024 * 1024,\n TiB: 1024 * 1024 * 1024,\n },\n MB: {\n B: 1 / (1024 * 1024),\n KB: 1 / 1024,\n MB: 1,\n GB: 1024,\n TB: 1024 * 1024,\n PB: 1024 * 1024 * 1024,\n },\n MiB: {\n B: 1 / (1024 * 1024),\n KiB: 1 / 1024,\n MiB: 1,\n GiB: 1024,\n TiB: 1024 * 1024,\n PiB: 1024 * 1024 * 1024,\n },\n GB: {\n B: 1 / (1024 * 1024 * 1024),\n KB: 1 / (1024 * 1024),\n MB: 1 / 1024,\n GB: 1,\n TB: 1024,\n PB: 1024 * 1024,\n EB: 1024 * 1024 * 1024,\n },\n GiB: {\n B: 1 / (1024 * 1024 * 1024),\n KiB: 1 / (1024 * 1024),\n MiB: 1 / 1024,\n GiB: 1,\n TiB: 1024,\n PiB: 1024 * 1024,\n EiB: 1024 * 1024 * 1024,\n },\n /*\n 'milliseconds': {\n 'seconds': 1000\n },\n 'seconds': {\n 'milliseconds': 0.001,\n 'seconds': 1,\n 'minutes': 60,\n 'hours': 3600,\n 'days': 86400\n }\n */\n}\n\nlet currentTemperatureSetting: \"celsius\" | \"fahrenheit\"\nlet currentSecondsAsTimeSetting: boolean\ninterface ConvertibleUnits {\n [unitIn: string]: {\n [unitOut: string]: {\n check: ((number: number) => boolean) | (() => boolean)\n convert: (number: number) => number | string\n }\n }\n}\n\nconst twoFixed =\n (multiplier: number = 1) =>\n (value: number) =>\n (value * multiplier).toFixed(2)\n\nconst convertibleUnits: ConvertibleUnits = {\n Celsius: {\n Fahrenheit: {\n check() {\n return currentTemperatureSetting === \"fahrenheit\"\n },\n convert(value: number) {\n return (value * 9) / 5 + 32\n },\n },\n },\n celsius: {\n fahrenheit: {\n check() {\n return currentTemperatureSetting === \"fahrenheit\"\n },\n convert(value: number) {\n return (value * 9) / 5 + 32\n },\n },\n },\n milliseconds: {\n microseconds: {\n check: (max: number) => max < 1,\n convert: twoFixed(1000),\n },\n milliseconds: {\n check: (max: number) => max >= 1 && max < 1000,\n convert: twoFixed(),\n },\n seconds: {\n check: (max: number) => max >= 1000 && max < 60000,\n convert: twoFixed(0.001),\n },\n \"MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 60000 && max < 3600_000,\n convert: (value: number) => seconds2time(value / 1000, \"MINUTES\"),\n },\n \"HH:MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 3600_000 && max < 86_400_000,\n convert: (value: number) => seconds2time(value / 1000, \"HOURS\"),\n },\n \"dHH:MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 86_400_000,\n convert: (value: number) => seconds2time(value / 1000, \"DAYS\"),\n },\n },\n\n seconds: {\n microseconds: {\n check: (max: number) => max < 0.001,\n convert: twoFixed(1000_000),\n },\n milliseconds: {\n check: (max: number) => max >= 0.001 && max < 1,\n convert: twoFixed(1000),\n },\n seconds: {\n check: (max: number) => max >= 1 && max < 60,\n convert: twoFixed(1),\n },\n \"MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 60 && max < 3600,\n convert: (value: number) => seconds2time(value, \"MINUTES\"),\n },\n \"HH:MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 3600 && max < 86_400,\n convert: (value: number) => seconds2time(value, \"HOURS\"),\n },\n \"dHH:MM:SS.ms\": {\n check: (max: number) => currentSecondsAsTimeSetting && max >= 86_400,\n convert: (value: number) => seconds2time(value, \"DAYS\"),\n },\n },\n}\n\nconst identity = (value: number) => value\n\ninterface Keys {\n [commonUnitsKey: string]: {\n [uuid: string]: {\n divider: number\n units: string\n }\n }\n}\ninterface Latest {\n [commonUnitsKey: string]: {\n divider: number\n units: string\n }\n}\nexport const unitsConversionCreator = {\n // todo lift the state\n keys: {} as Keys, // keys for data-common-units\n latest: {} as Latest, // latest selected units for data-common-units\n\n globalReset() {\n this.keys = {}\n this.latest = {}\n },\n\n // get a function that converts the units\n // + every time units are switched call the callback\n get(\n uuid: string,\n min: number,\n max: number,\n units: string | undefined,\n desiredUnits: undefined | null | string,\n commonUnitsName: string | null | undefined,\n switchUnitsCallback: (units: string) => void,\n temperatureSetting: \"celsius\" | \"fahrenheit\",\n secondsAsTimeSetting: boolean\n ) {\n // validate the parameters\n if (typeof units === \"undefined\") {\n // eslint-disable-next-line no-param-reassign\n units = \"undefined\"\n }\n\n // it will be removed when we'll lift the state to redux\n currentTemperatureSetting = temperatureSetting\n currentSecondsAsTimeSetting = secondsAsTimeSetting\n\n // check if we support units conversion\n if (typeof scalableUnits[units] === \"undefined\"\n && typeof convertibleUnits[units] === \"undefined\"\n ) {\n // we can't convert these units\n // console.log('DEBUG: ' + uuid.toString() + ' can\\'t convert units: ' + units.toString());\n return (value: number) => value\n }\n\n // check if the caller wants the original units\n if (desiredUnits === undefined || desiredUnits === null || desiredUnits === \"original\"\n || desiredUnits === units\n ) {\n // console.log('DEBUG: ' + uuid.toString() + ' original units wanted');\n switchUnitsCallback(units)\n return identity\n }\n\n // now we know we can convert the units\n // and the caller wants some kind of conversion\n\n let tunits = null\n let tdivider = 0\n\n if (typeof scalableUnits[units] !== \"undefined\") {\n // units that can be scaled\n // we decide a divider\n\n if (desiredUnits === \"auto\") {\n // the caller wants to auto-scale the units\n\n // find the absolute maximum value that is rendered on the chart\n // based on this we decide the scale\n /* eslint-disable no-param-reassign */\n min = Math.abs(min)\n max = Math.abs(max)\n if (min > max) {\n max = min\n }\n /* eslint-enable no-param-reassign */\n\n const scalableUnitsGroup = scalableUnits[units]\n Object.keys(scalableUnitsGroup).forEach((unit) => {\n const unitDivider = scalableUnitsGroup[unit]\n if (unitDivider <= max && unitDivider > tdivider) {\n tunits = unit\n tdivider = unitDivider\n }\n })\n\n if (tunits === null || tdivider <= 0) {\n // we couldn't find auto-scaling candidate for unit\n switchUnitsCallback(units)\n return identity\n }\n\n if (typeof commonUnitsName === \"string\") {\n // the caller wants several charts to have the same units\n // data-common-units\n\n const commonUnitsKey = `${commonUnitsName}-${units}`\n\n // add our divider into the list of keys\n let t = this.keys[commonUnitsKey]\n if (typeof t === \"undefined\") {\n this.keys[commonUnitsKey] = {}\n t = this.keys[commonUnitsKey]\n }\n t[uuid] = {\n units: tunits,\n divider: tdivider,\n }\n\n // find the max divider of all charts\n let commonUnits = t[uuid]\n // todo remove imperative forEach\n Object.keys(t).forEach((x) => {\n if (t[x].divider > commonUnits.divider) {\n commonUnits = t[x]\n }\n })\n\n // save our common_max to the latest keys\n const latest = {\n units: commonUnits.units,\n divider: commonUnits.divider,\n }\n this.latest[commonUnitsKey] = latest\n\n tunits = latest.units\n tdivider = latest.divider\n\n // apply it to this chart\n switchUnitsCallback(tunits)\n return (value: number) => {\n if (tdivider !== latest.divider) {\n // another chart switched our common units\n // we should switch them too\n tunits = latest.units\n tdivider = latest.divider\n switchUnitsCallback(tunits)\n }\n\n return value / tdivider\n }\n }\n // the caller did not give data-common-units\n // this chart auto-scales independently of all others\n\n switchUnitsCallback(tunits)\n return (value: number) => value / tdivider\n }\n // the caller wants specific units\n\n if (typeof scalableUnits[units][desiredUnits] !== \"undefined\") {\n // all good, set the new units\n tdivider = scalableUnits[units][desiredUnits]\n switchUnitsCallback(desiredUnits)\n return (value: number) => value / tdivider\n }\n // oops! switch back to original units\n // eslint-disable-next-line no-console\n console.log(`Units conversion from ${units.toString()} to ${desiredUnits.toString()}\n is not supported.`)\n\n switchUnitsCallback(units)\n return identity\n } if (typeof convertibleUnits[units] !== \"undefined\") {\n // units that can be converted\n if (desiredUnits === \"auto\") {\n let newConvertFunction: ((number: number) => number | string) | undefined\n Object.keys(convertibleUnits[units]).forEach((x) => {\n if (newConvertFunction) { return }\n if (convertibleUnits[(units as string)][x].check(max)) {\n // converting\n switchUnitsCallback(x)\n newConvertFunction = convertibleUnits[(units as string)][x].convert\n }\n })\n if (newConvertFunction) {\n return newConvertFunction\n }\n\n // none checked ok (no conversion available)\n switchUnitsCallback(units)\n return identity\n } if (typeof convertibleUnits[units][desiredUnits] !== \"undefined\") {\n switchUnitsCallback(desiredUnits)\n return convertibleUnits[units][desiredUnits].convert\n }\n // eslint-disable-next-line no-console\n console.log(`Units conversion from ${units.toString()} to ${desiredUnits.toString()}\n is not supported.`)\n switchUnitsCallback(units)\n return identity\n }\n // hm... did we forget to implement the new type?\n // eslint-disable-next-line no-console\n console.log(`Unmatched unit conversion method for units ${units.toString()}`)\n switchUnitsCallback(units)\n return identity\n },\n}\n","import * as React from \"react\"\nimport classNames from \"classnames\"\n\nimport \"./button.css\"\n\ntype Props = React.ButtonHTMLAttributes<HTMLButtonElement>\nexport const Button = React.forwardRef(({\n children,\n className,\n ...rest\n}: Props, ref: React.Ref<HTMLButtonElement>) => (\n <button\n {...rest} // eslint-disable-line react/jsx-props-no-spreading\n type=\"button\"\n className={classNames(\"netdata-reset-button\", className)}\n ref={ref}\n >\n {children}\n </button>\n))\n","import React, { useRef, useEffect } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectShowHelp } from \"domains/global/selectors\"\nimport { Icon, IconType } from \"components/icon\"\nimport { Button } from \"components/button\"\n\ntype ClickCallback = (event: React.MouseEvent) => void\ninterface ToolboxButtonProps {\n className?: string\n iconType: IconType\n onClick?: ClickCallback\n onDoubleClick?: (event: React.MouseEvent) => void\n onMouseDown?: (event: React.MouseEvent) => void\n onTouchStart?: (event: React.TouchEvent) => void\n popoverContent: string\n popoverTitle: string\n}\nexport const ToolboxButton = ({\n className,\n iconType,\n onClick,\n onDoubleClick,\n onMouseDown,\n onTouchStart,\n popoverContent,\n popoverTitle,\n}: ToolboxButtonProps) => {\n const buttonRef = useRef(null)\n const showHelp = useSelector(selectShowHelp)\n useEffect(() => {\n if (buttonRef.current && showHelp) {\n window.$(buttonRef.current).popover({\n container: \"body\",\n animation: false,\n html: true,\n trigger: \"hover\",\n placement: \"bottom\",\n delay: {\n show: window.NETDATA.options.current.show_help_delay_show_ms,\n hide: window.NETDATA.options.current.show_help_delay_hide_ms,\n },\n title: popoverTitle,\n content: popoverContent,\n })\n }\n }, []) // eslint-disable-line react-hooks/exhaustive-deps\n return (\n <Button\n className={classNames(className)}\n onClick={onClick}\n onDoubleClick={onDoubleClick}\n onMouseDown={onMouseDown}\n onTouchStart={onTouchStart}\n ref={buttonRef}\n >\n <Icon iconType={iconType} />\n </Button>\n )\n}\n","import React, { useState, useCallback, useEffect } from \"react\"\nimport { ToolboxButton } from \"domains/chart/components/toolbox-button\"\nimport { setResizeHeightAction } from \"domains/chart/actions\"\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from \"domains/chart/utils/legend-utils\"\nimport { useDispatch } from \"store/redux-separate-context\"\n\nexport const LOCALSTORAGE_HEIGHT_KEY_PREFIX = \"chart_height.\"\n\ninterface Props {\n chartContainerElement: HTMLElement\n chartUuid: string\n heightId: string | undefined\n isLegendOnBottom: boolean\n}\n\nexport const ResizeHandler = ({\n chartContainerElement, chartUuid, heightId, isLegendOnBottom,\n}: Props) => {\n const [resizeHeight, setResizeHeight] = useState(() => chartContainerElement.clientHeight)\n const dispatch = useDispatch()\n\n useEffect(() => {\n // todo when attributes.id are present, hook height to localStorage\n if (resizeHeight >= 70) {\n dispatch(\n setResizeHeightAction({\n id: chartUuid,\n resizeHeight,\n }),\n )\n }\n }, [resizeHeight, chartUuid, heightId, dispatch])\n\n const handleResize = useCallback(\n (event) => {\n event.preventDefault()\n const intialHeight = chartContainerElement.clientHeight\n const eventStartHeight = event.type === \"touchstart\"\n ? event.touches[0].clientY\n : event.clientY\n\n const setHeight = (currentHeight: number) => {\n const nextHeight = intialHeight + currentHeight - eventStartHeight\n // eslint-disable-next-line no-param-reassign\n chartContainerElement.style.height = `${nextHeight.toString()}px`\n setResizeHeight(nextHeight)\n if (heightId) {\n const heightForPersistance = isLegendOnBottom\n ? (nextHeight - LEGEND_BOTTOM_SINGLE_LINE_HEIGHT)\n : nextHeight\n localStorage.setItem(\n `${LOCALSTORAGE_HEIGHT_KEY_PREFIX}${heightId}`,\n `${heightForPersistance}`,\n )\n }\n }\n\n const onMouseMove = (e: MouseEvent) => setHeight(e.clientY)\n const onTouchMove = (e: TouchEvent) => setHeight(e.touches[0].clientY)\n\n const onMouseEnd = () => {\n document.removeEventListener(\"mousemove\", onMouseMove)\n document.removeEventListener(\"mouseup\", onMouseEnd)\n }\n\n const onTouchEnd = () => {\n document.removeEventListener(\"touchmove\", onTouchMove)\n document.removeEventListener(\"touchend\", onTouchEnd)\n }\n\n if (event.type === \"touchstart\") {\n document.addEventListener(\"touchmove\", onTouchMove)\n document.addEventListener(\"touchend\", onTouchEnd)\n } else {\n document.addEventListener(\"mousemove\", onMouseMove)\n document.addEventListener(\"mouseup\", onMouseEnd)\n }\n },\n [chartContainerElement.clientHeight, chartContainerElement.style.height, heightId,\n isLegendOnBottom],\n )\n\n return (\n <ToolboxButton\n className=\"netdata-legend-resize-handler\"\n onDoubleClick={(event: React.MouseEvent) => {\n event.preventDefault()\n event.stopPropagation()\n }}\n onMouseDown={handleResize}\n onTouchStart={handleResize}\n iconType=\"resize\"\n popoverTitle=\"Chart Resize\"\n popoverContent=\"Drag this point with your mouse or your finger (on touch devices), to resize\n the chart vertically. You can also <b>double click it</b> or <b>double tap it</b> to reset\n between 2 states: the default and the one that fits all the values.<br/><small>Help\n can be disabled from the settings.</small>\"\n />\n )\n}\n","import { createAction } from \"redux-act\"\n\nimport { createRequestAction } from \"utils/createRequestAction\"\nimport { RegistryMachine } from \"domains/global/sagas\"\nimport { storeKey } from \"./constants\"\nimport { ActiveAlarms, ChartsMetadata, Snapshot, Alarm, UserNodeAccessMessage } from \"./types\"\n\ninterface RequestCommonColors {\n chartContext: string\n chartUuid: string\n colorsAttribute: string | undefined\n commonColorsAttribute: string | undefined\n dimensionNames: string[]\n}\nexport const requestCommonColorsAction = createAction<RequestCommonColors>(\n `${storeKey}/globalRequestCommonColors`\n)\n\ninterface SetCommonMinAction {\n chartUuid: string\n commonMinKey: string\n value: number\n}\nexport const setCommonMinAction = createAction<SetCommonMinAction>(`${storeKey}/setCommonMin`)\n\ninterface SetCommonMaxAction {\n chartUuid: string\n commonMaxKey: string\n value: number\n}\nexport const setCommonMaxAction = createAction<SetCommonMaxAction>(`${storeKey}/setCommonMax`)\n\ninterface SetGlobalSelectionAction {\n chartUuid: string | null\n hoveredX: number\n}\nexport const setGlobalSelectionAction = createAction<SetGlobalSelectionAction>(\n `${storeKey}/setGlobalSelection`\n)\n\nexport interface SetGlobalPanAndZoomAction {\n after: number\n before: number\n masterID?: string\n shouldForceTimeRange?: boolean\n}\nexport const setGlobalPanAndZoomAction = createAction<SetGlobalPanAndZoomAction>(\n `${storeKey}/setGlobalPanAndZoom`\n)\n\nexport const resetGlobalPanAndZoomAction = createAction(`${storeKey}/resetGlobalPanAndZoomAction`)\n\nexport interface SetDefaultAfterAction {\n after: number\n}\nexport const setDefaultAfterAction = createAction<SetDefaultAfterAction>(\n `${storeKey}/setDefaultAfterAction`\n)\n\nexport const resetDefaultAfterAction = createAction(`${storeKey}/resetDefaultAfterAction`)\n\nexport interface SetGlobalChartUnderlayAction {\n after: number\n before: number\n masterID: string\n}\nexport const setGlobalChartUnderlayAction = createAction<SetGlobalChartUnderlayAction>(\n `${storeKey}/setGlobalChartUnderlay`\n)\n\nexport const centerAroundHighlightAction = createAction(`${storeKey}/centerAroundHighlightAction`)\nexport const clearHighlightAction = createAction(`${storeKey}/clearHighlightAction`)\n\ninterface WindowFocusChangeAction {\n hasWindowFocus: boolean\n}\nexport const windowFocusChangeAction = createAction<WindowFocusChangeAction>(\n `${storeKey}/windowFocusChangeAction`\n)\n\nexport interface FetchHelloPayload {\n serverDefault: string\n}\n/* eslint-disable camelcase */\nexport interface HelloResponse {\n action: \"hello\"\n anonymous_statistics: boolean\n cloud_base_url: string\n hostname: string\n machine_guid: string\n registry: string\n status: string\n}\n/* eslint-enable camelcase */\n\nexport const fetchHelloAction = createRequestAction<\n FetchHelloPayload,\n { cloudBaseURL: string; hostname: string; isCloudEnabled: boolean; machineGuid: string }\n>(`${storeKey}/fetchHelloAction`)\n\ninterface UpdatePersonUrlsAction {\n personGuid: string\n registryMachines: { [key: string]: RegistryMachine }\n registryMachinesArray: RegistryMachine[]\n}\nexport const updatePersonUrlsAction = createAction<UpdatePersonUrlsAction>(\n `${storeKey}/updatePersonUrlsAction`\n)\n\nexport interface AccessRegistrySuccessAction {\n registryServer: string\n}\nexport const accessRegistrySuccessAction = createAction<AccessRegistrySuccessAction>(\n `${storeKey}/accessRegistrySuccessAction`\n)\n\nexport interface StartAlarmsPayload {\n serverDefault: string\n}\nexport const startAlarmsAction = createAction<StartAlarmsPayload>(`${storeKey}/startAlarmsAction`)\n\nexport const fetchAllAlarmsAction = createRequestAction(`${storeKey}/fetchAllAlarmsAction`)\n\nexport interface UpdateActiveAlarmAction {\n activeAlarms: ActiveAlarms\n}\nexport const updateActiveAlarmsAction = createAction<UpdateActiveAlarmAction>(\n `${storeKey}/updateActiveAlarmsAction`\n)\n\nexport interface SetOptionAction {\n key: string\n value: unknown\n}\nexport const setOptionAction = createAction<SetOptionAction>(`${storeKey}/setOptionAction`)\n\nexport const resetOptionsAction = createAction(`${storeKey}/resetOptions`)\n\nexport const loadSnapshotAction = createAction<{ snapshot: Snapshot }>(\n `${storeKey}/loadSnapshotAction`\n)\n\nexport const chartsMetadataRequestSuccess = createAction<{ data: ChartsMetadata }>(\n `${storeKey}/chartsMetadataRequestSuccess`\n)\n\nexport interface SetSpacePanelStatusActionPayload {\n isActive: boolean\n}\nexport const setSpacePanelStatusAction = createAction<SetSpacePanelStatusActionPayload>(\n `${storeKey}/setSpacePanelStatusAction`\n)\n\nexport interface SetSpacePanelTransitionEndPayload {\n isActive: boolean\n}\nexport const setSpacePanelTransitionEndAction = createAction<SetSpacePanelTransitionEndPayload>(\n `${storeKey}/setSpacePanelStatusAction`\n)\n\nexport const setAlarmAction = createAction<{ alarm: Alarm }>(`${storeKey}/setAlarmAction`)\n\nexport const resetRegistry = createAction(`${storeKey}/resetRegistry`)\n\nexport const setGlobalPauseAction = createAction(`${storeKey}/setGlobalPauseAction`)\nexport const resetGlobalPauseAction = createAction<{ forcePlay?: boolean }>(\n `${storeKey}/resetGlobalPauseAction`\n)\nexport const setUTCOffset = createAction<{ utcOffset?: number | string }>(\n `${storeKey}/setUTCOffset`\n)\n\nexport const setUserNodeAccess = createAction<{ message: UserNodeAccessMessage }>(\n `${storeKey}/setUserNodeAccess`\n)\n","import { useCallback } from \"react\"\nimport { useDispatch } from \"react-redux\"\nimport { sendToChildIframe, useListenToPostMessage } from \"utils/post-message\"\nimport { setUserNodeAccess } from \"domains/global/actions\"\nimport { UserNodeAccessMessage } from \"domains/global/types\"\nimport { SIGN_IN_IFRAME_ID } from \"components/header/constants\"\n\nconst useUserNodeAccessMessage = () => {\n const dispatch = useDispatch()\n useListenToPostMessage<UserNodeAccessMessage>(\"user-node-access\", message => {\n dispatch(setUserNodeAccess({ message }))\n })\n}\n\nexport const useRequestRefreshOfAccessMessage = () => {\n return useCallback(() => {\n sendToChildIframe(SIGN_IN_IFRAME_ID, { type: \"request-refresh-access\", payload: true })\n }, [])\n}\n\nexport default useUserNodeAccessMessage\n","export * from \"./utils\"\n","export const name2id = (s: string) => s\n .replace(/ /g, \"_\")\n .replace(/:/g, \"_\")\n .replace(/\\(/g, \"_\")\n .replace(/\\)/g, \"_\")\n .replace(/\\./g, \"_\")\n .replace(/\\//g, \"_\")\n","import { init, last, mergeAll } from \"ramda\"\nimport { createReducer } from \"redux-act\"\n\nimport { getInitialAfterFromWindow } from \"utils/utils\"\nimport { isMainJs } from \"utils/env\"\nimport { RegistryMachine } from \"domains/global/sagas\"\nimport { Alarm, ActiveAlarms, Snapshot, ChartsMetadata } from \"domains/global/types\"\nimport { fetchInfoAction } from \"domains/chart/actions\"\nimport { InfoPayload } from \"./__mocks__/info-mock\"\nimport {\n requestCommonColorsAction,\n setGlobalChartUnderlayAction,\n setGlobalSelectionAction,\n setGlobalPanAndZoomAction,\n centerAroundHighlightAction,\n clearHighlightAction,\n resetGlobalPanAndZoomAction,\n setDefaultAfterAction,\n windowFocusChangeAction,\n fetchHelloAction,\n updatePersonUrlsAction,\n startAlarmsAction,\n updateActiveAlarmsAction,\n setOptionAction,\n loadSnapshotAction,\n chartsMetadataRequestSuccess,\n setCommonMaxAction,\n setCommonMinAction,\n resetOptionsAction,\n setSpacePanelStatusAction,\n setSpacePanelTransitionEndAction,\n resetRegistry,\n accessRegistrySuccessAction,\n resetDefaultAfterAction,\n setAlarmAction,\n setGlobalPauseAction,\n resetGlobalPauseAction,\n setUTCOffset,\n setUserNodeAccess,\n} from \"./actions\"\nimport {\n Options,\n optionsMergedWithLocalStorage,\n getOptionsMergedWithLocalStorage,\n clearLocalStorage,\n} from \"./options\"\nimport { CLOUD_BASE_URL_DISABLED } from \"./constants\"\nimport { UserNodeAccessMessage } from \"./types\"\n\ninterface CommonMinMax {\n [commonKey: string]: {\n charts: {\n [chartUuid: string]: number\n }\n currentExtreme: number\n }\n}\n\nexport type StateT = {\n commonColorsKeys: {\n [key: string]: {\n // key can be uuid, chart's context or commonColors attribute\n assigned: {\n // name-value of dimensions and their colors\n [dimensionName: string]: string\n }\n available: string[] // an array of colors available to be used\n custom: string[] // the array of colors defined by the user\n charts: {} // the charts linked to this todo remove\n copyTheme: boolean\n }\n }\n commonMin: CommonMinMax\n commonMax: CommonMinMax\n currentSelectionMasterId: string | null\n globalPanAndZoom: null | {\n after: number // timestamp in ms\n before: number // timestamp in ms\n masterID?: string\n shouldForceTimeRange?: boolean\n }\n defaultAfter: number\n globalChartUnderlay: null | {\n after: number\n before: number\n masterID: string\n }\n hoveredX: number | null\n hasWindowFocus: boolean\n globalPause: boolean\n\n spacePanelIsActive: boolean\n spacePanelTransitionEndIsActive: boolean\n\n registry: {\n cloudBaseURL: string | null\n hasFetchedHello: boolean\n isHelloCallError: boolean | null\n hasFetchedInfo: boolean\n hostname: string\n isCloudEnabled: boolean | null\n isCloudAvailable: boolean | null\n isAgentClaimed: boolean | null\n isACLKAvailable: boolean | null\n hasStartedInfo: boolean\n fullInfoPayload: InfoPayload | null\n isFetchingHello: boolean\n machineGuid: string | null\n personGuid: string | null\n registryMachines: { [key: string]: RegistryMachine } | null\n registryMachinesArray: RegistryMachine[] | null\n registryServer: string | null\n }\n\n chartsMetadata: {\n isFetching: boolean\n isFetchingError: boolean\n data: null | ChartsMetadata\n }\n\n alarms: {\n activeAlarms: null | ActiveAlarms\n hasStartedAlarms: boolean\n }\n alarm: null | Alarm\n\n snapshot: Snapshot | null\n options: Options\n userNodeAccess: UserNodeAccessMessage\n}\n\nexport const initialDefaultAfter = isMainJs ? getInitialAfterFromWindow() : -900\n\nexport const initialState: StateT = {\n commonColorsKeys: {},\n commonMin: {},\n commonMax: {},\n currentSelectionMasterId: null,\n globalPanAndZoom: null,\n // todo for dashboard calculate it based on width and window.NETDATA.chartDefaults.after\n defaultAfter: initialDefaultAfter,\n globalChartUnderlay: null,\n hoveredX: null,\n hasWindowFocus: document.hasFocus(),\n globalPause: false,\n spacePanelIsActive: false, // set to true only for testing layout\n // the same as property above, just updated after transition ends\n spacePanelTransitionEndIsActive: false,\n\n registry: {\n cloudBaseURL: null,\n hasFetchedInfo: false,\n hasFetchedHello: false,\n isHelloCallError: null,\n hostname: \"unknown\",\n isCloudEnabled: null,\n isCloudAvailable: null,\n isAgentClaimed: null,\n isACLKAvailable: null,\n hasStartedInfo: false,\n isFetchingHello: false,\n fullInfoPayload: null,\n machineGuid: null,\n personGuid: null,\n registryMachines: null,\n registryMachinesArray: null,\n registryServer: null,\n },\n\n snapshot: null,\n alarms: {\n activeAlarms: null,\n hasStartedAlarms: false,\n },\n alarm: null,\n\n chartsMetadata: {\n isFetching: false,\n isFetchingError: false,\n data: null,\n },\n\n options: optionsMergedWithLocalStorage,\n userNodeAccess: null,\n}\n\nexport const globalReducer = createReducer<StateT>({}, initialState)\n\nexport interface GetKeyArguments {\n colorsAttribute: string | undefined\n commonColorsAttribute: string | undefined\n chartUuid: string\n chartContext: string\n}\nexport const getKeyForCommonColorsState = ({\n colorsAttribute,\n commonColorsAttribute,\n chartUuid,\n chartContext,\n}: GetKeyArguments) => {\n const hasCustomColors = typeof colorsAttribute === \"string\" && colorsAttribute.length > 0\n\n // when there's commonColors attribute, share the state between all charts with that attribute\n // if not, when there are custom colors, make each chart independent\n // if not, share the same state between charts with the same context\n return commonColorsAttribute || (hasCustomColors ? chartUuid : chartContext)\n}\n\nconst hasLastOnly = (array: string[]) => last(array) === \"ONLY\"\nconst removeLastOnly = (array: string[]) => (hasLastOnly(array) ? init(array) : array)\nconst createCommonColorsKeysSubstate = (\n colorsAttribute: string | undefined,\n hasCustomColors: boolean\n) => {\n const custom = hasCustomColors ? removeLastOnly((colorsAttribute as string).split(\" \")) : []\n const shouldCopyTheme = hasCustomColors\n ? // disable copyTheme when there's \"ONLY\" keyword in \"data-colors\" attribute\n !hasLastOnly((colorsAttribute as string).split(\" \"))\n : true\n const available = [\n ...custom,\n ...(shouldCopyTheme || custom.length === 0 ? window.NETDATA.themes.current.colors : []),\n ]\n return {\n assigned: {},\n available,\n custom,\n }\n}\n\nglobalReducer.on(\n requestCommonColorsAction,\n //@ts-ignore\n (state, { chartContext, chartUuid, colorsAttribute, commonColorsAttribute, dimensionNames }) => {\n const keyName = getKeyForCommonColorsState({\n colorsAttribute,\n commonColorsAttribute,\n chartUuid,\n chartContext,\n })\n\n const hasCustomColors = typeof colorsAttribute === \"string\" && colorsAttribute.length > 0\n const subState =\n state.commonColorsKeys[keyName] ||\n createCommonColorsKeysSubstate(colorsAttribute, hasCustomColors)\n\n const currentlyAssignedNr = Object.keys(subState.assigned).length\n const requestedDimensionsAssigned = mergeAll(\n dimensionNames\n // dont assign already assigned dimensions\n .filter(dimensionName => !subState.assigned[dimensionName])\n .map((dimensionName, i) => ({\n [dimensionName]:\n subState.available[(i + currentlyAssignedNr) % subState.available.length],\n }))\n )\n const assigned = {\n ...subState.assigned,\n ...requestedDimensionsAssigned,\n }\n\n return {\n ...state,\n commonColorsKeys: {\n ...state.commonColorsKeys,\n [keyName]: {\n ...subState,\n assigned,\n },\n },\n }\n }\n)\n\nglobalReducer.on(setCommonMinAction, (state, { chartUuid, commonMinKey, value }) => {\n const charts = {\n ...state.commonMin[commonMinKey]?.charts,\n [chartUuid]: value,\n }\n const currentExtreme = Math.min(...Object.values(charts))\n\n return {\n ...state,\n commonMin: {\n ...state.commonMin,\n [commonMinKey]: {\n charts,\n currentExtreme,\n },\n },\n }\n})\n\nglobalReducer.on(setCommonMaxAction, (state, { chartUuid, commonMaxKey, value }) => {\n const charts = {\n ...state.commonMax[commonMaxKey]?.charts,\n [chartUuid]: value,\n }\n const currentExtreme = Math.max(...Object.values(charts))\n\n return {\n ...state,\n commonMax: {\n ...state.commonMax,\n [commonMaxKey]: {\n charts,\n currentExtreme,\n },\n },\n }\n})\n\nglobalReducer.on(setSpacePanelStatusAction, (state, { isActive }) => ({\n ...state,\n spacePanelIsActive: isActive,\n}))\n\nglobalReducer.on(setSpacePanelTransitionEndAction, (state, { isActive }) => ({\n ...state,\n spacePanelTransitionEndIsActive: isActive,\n}))\n\nglobalReducer.on(setGlobalSelectionAction, (state, { chartUuid, hoveredX }) => ({\n ...state,\n hoveredX,\n currentSelectionMasterId: chartUuid,\n}))\n\nglobalReducer.on(setGlobalPanAndZoomAction, (state, payload) => ({\n ...state,\n globalPanAndZoom: payload,\n}))\n\nglobalReducer.on(resetGlobalPanAndZoomAction, state => ({\n ...state,\n globalPanAndZoom: initialState.globalPanAndZoom,\n hoveredX: initialState.hoveredX, // need to reset this also on mobile\n}))\n\nglobalReducer.on(setDefaultAfterAction, (state, { after }) => ({\n ...state,\n defaultAfter: after,\n}))\n\nglobalReducer.on(resetDefaultAfterAction, state => ({\n ...state,\n defaultAfter: initialState.defaultAfter,\n}))\n\nglobalReducer.on(setGlobalChartUnderlayAction, (state, { after, before, masterID }) => ({\n ...state,\n globalChartUnderlay: {\n after,\n before,\n masterID,\n },\n}))\n\nglobalReducer.on(centerAroundHighlightAction, state => {\n if (!state.globalChartUnderlay) {\n // eslint-disable-next-line no-console\n console.warn(\"Cannot center around empty selection\")\n return state\n }\n const { after, before } = state.globalChartUnderlay\n const highlightMargin = (before - after) / 2\n return {\n ...state,\n globalPanAndZoom: {\n after: after - highlightMargin,\n before: before + highlightMargin,\n },\n }\n})\n\nglobalReducer.on(\n clearHighlightAction,\n (state, { resetPanAndZoom = true }: { resetPanAndZoom?: boolean } = {}) => ({\n ...state,\n globalChartUnderlay: initialState.globalChartUnderlay,\n ...(resetPanAndZoom ? { globalPanAndZoom: initialState.globalPanAndZoom } : {}),\n })\n)\n\nglobalReducer.on(windowFocusChangeAction, (state, { hasWindowFocus }) => {\n // make additional check, because it's possible to get hasWindowFocus === false\n // message from iframe, after main window makes the state change (race condition)\n const hasFocusNow = document.hasFocus()\n return {\n ...state,\n hasWindowFocus: hasFocusNow || hasWindowFocus,\n }\n})\n\nglobalReducer.on(setGlobalPauseAction, state => ({ ...state, globalPause: true }))\nglobalReducer.on(resetGlobalPauseAction, (state, { forcePlay }) => ({\n ...state,\n globalPause: initialState.globalPause,\n globalPanAndZoom: initialState.globalPanAndZoom,\n hoveredX: initialState.hoveredX,\n options: { ...state.options, stop_updates_when_focus_is_lost: !forcePlay },\n}))\n\nglobalReducer.on(setUTCOffset, (state, { utcOffset }) => ({\n ...state,\n options: { ...state.options, utcOffset },\n}))\n\nglobalReducer.on(fetchHelloAction.request, state => ({\n ...state,\n registry: {\n ...state.registry,\n isFetchingHello: true,\n },\n}))\n\nglobalReducer.on(fetchHelloAction.success, (state, { cloudBaseURL, hostname, machineGuid }) => ({\n ...state,\n registry: {\n ...state.registry,\n cloudBaseURL,\n isFetchingHello: false,\n hasFetchedHello: true,\n hostname,\n machineGuid,\n },\n}))\nglobalReducer.on(fetchHelloAction.failure, state => ({\n ...state,\n registry: {\n ...state.registry,\n cloudBaseURL: CLOUD_BASE_URL_DISABLED,\n isFetchingHello: false,\n isHelloCallError: true,\n },\n}))\nglobalReducer.on(accessRegistrySuccessAction, (state, { registryServer }) => ({\n ...state,\n registry: {\n ...state.registry,\n registryServer,\n },\n}))\n\nglobalReducer.on(resetRegistry, state => ({\n ...state,\n registry: {\n ...state.registry,\n hasFetchedHello: initialState.registry.hasFetchedHello,\n },\n}))\n\nglobalReducer.on(fetchInfoAction, state => ({\n ...state,\n registry: {\n ...state.registry,\n hasStartedInfo: true,\n },\n}))\nglobalReducer.on(\n fetchInfoAction.success,\n (\n state,\n { isCloudAvailable, isCloudEnabled, isAgentClaimed, isACLKAvailable, fullInfoPayload }\n ) => ({\n ...state,\n registry: {\n ...state.registry,\n hasFetchedInfo: true,\n isCloudAvailable,\n isCloudEnabled,\n isAgentClaimed,\n isACLKAvailable,\n fullInfoPayload,\n },\n })\n)\n\nglobalReducer.on(fetchInfoAction.failure, state => ({\n ...state,\n registry: {\n ...state.registry,\n isCloudAvailable: false,\n isCloudEnabled: false,\n isAgentClaimed: false,\n isACLKAvailable: false,\n },\n}))\n\nglobalReducer.on(\n updatePersonUrlsAction,\n (state, { personGuid, registryMachines, registryMachinesArray }) => ({\n ...state,\n registry: {\n ...state.registry,\n personGuid,\n registryMachines,\n registryMachinesArray,\n },\n })\n)\n\nglobalReducer.on(startAlarmsAction, state => ({\n ...state,\n alarms: {\n ...state.alarms,\n hasStartedAlarms: true,\n },\n}))\n\nglobalReducer.on(updateActiveAlarmsAction, (state, { activeAlarms }) => ({\n ...state,\n alarms: {\n ...state.alarms,\n activeAlarms,\n },\n}))\n\nglobalReducer.on(setOptionAction, (state, { key, value }) => ({\n ...state,\n options: {\n ...state.options,\n [key]: value,\n },\n}))\n\nglobalReducer.on(resetOptionsAction, state => {\n clearLocalStorage()\n return {\n ...state,\n options: getOptionsMergedWithLocalStorage(),\n }\n})\n\nglobalReducer.on(loadSnapshotAction, (state, { snapshot }) => {\n const parsedData = Object.keys(snapshot.data)\n .map(dataKey => {\n let uncompressed\n try {\n // @ts-ignore\n uncompressed = snapshot.uncompress(snapshot.data[dataKey])\n\n // repeat old logging\n if (uncompressed === null) {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is null`)\n return null\n }\n\n if (typeof uncompressed === \"undefined\") {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is undefined`)\n return null\n }\n } catch (e) {\n // eslint-disable-next-line no-console\n console.warn(`decompression of snapshot data for key ${dataKey} failed`, e)\n uncompressed = null\n }\n\n if (typeof uncompressed !== \"string\") {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is not string`)\n return {}\n }\n\n let data\n try {\n data = JSON.parse(uncompressed)\n } catch (e) {\n // eslint-disable-next-line no-console\n console.warn(`parsing snapshot data for key ${dataKey} failed`)\n return {}\n }\n\n return { [dataKey]: data }\n })\n .reduce((acc, obj) => ({ ...acc, ...obj }), {})\n\n return {\n ...state,\n snapshot: {\n ...snapshot,\n data: parsedData as { [key: string]: unknown },\n },\n }\n})\n\nglobalReducer.on(setAlarmAction, (state, { alarm }) => ({\n ...state,\n alarm,\n}))\n\nglobalReducer.on(chartsMetadataRequestSuccess, (state, { data }) => ({\n ...state,\n chartsMetadata: {\n ...state.chartsMetadata,\n data,\n },\n}))\n\nglobalReducer.on(setUserNodeAccess, (state, { message }) => ({ ...state, userNodeAccess: message }))\n","import React from \"react\"\nimport classNames from \"classnames\"\n\n// todo add supoort for window.netdataIcons\nexport type IconType = \"left\" | \"reset\" | \"right\" | \"zoomIn\" | \"zoomOut\" | \"resize\" | \"lineChart\"\n | \"areaChart\" | \"noChart\" | \"loading\" | \"noData\"\nconst typeToClassName = (iconType: IconType) => ({\n left: \"fa-backward\",\n reset: \"fa-play\",\n right: \"fa-forward\",\n zoomIn: \"fa-plus\",\n zoomOut: \"fa-minus\",\n resize: \"fa-sort\",\n lineChart: \"fa-chart-line\",\n areaChart: \"fa-chart-area\",\n noChart: \"fa-chart-area\",\n loading: \"fa-sync-alt\",\n noData: \"fa-exclamation-triangle\",\n} as {[key in IconType]: string})[iconType]\n\ninterface Props {\n iconType: IconType\n}\nexport const Icon = ({ iconType }: Props) => (\n <i className={classNames(\"fas\", typeToClassName(iconType))} />\n)\n","import {\n map, omit, assoc, pick,\n} from \"ramda\"\nimport { createReducer } from \"redux-act\"\n\nimport { setOptionAction } from \"domains/global/actions\"\nimport { SYNC_PAN_AND_ZOOM } from \"domains/global/options\"\nimport { useNewKeysOnlyIfDifferent } from \"utils/utils\"\n\nimport {\n fetchDataAction,\n fetchChartAction,\n setResizeHeightAction,\n clearChartStateAction,\n fetchDataForSnapshotAction,\n snapshotExportResetAction,\n setChartPanAndZoomAction,\n resetChartPanAndZoomAction,\n fetchDataCancelAction,\n} from \"./actions\"\nimport { ChartState } from \"./chart-types\"\n\nexport type StateT = {\n [chartID: string]: ChartState\n}\n\nexport const initialState = {\n}\nexport const initialSingleState = {\n chartData: null,\n chartId: null,\n chartMetadata: null,\n chartPanAndZoom: null,\n fetchDataParams: {\n isRemotelyControlled: false,\n viewRange: null,\n },\n isFetchingData: false,\n isFetchDataFailure: false,\n isFetchDetailsFailure: false,\n isFetchingDetails: false,\n resizeHeight: null,\n\n snapshotDataIsFetching: false,\n snapshotDataIsError: false,\n snapshotData: null,\n viewRange: null,\n}\n\nexport const chartReducer = createReducer<StateT>(\n {},\n initialState,\n)\n\nexport const getSubstate = (state: StateT, id: string) => state[id] || initialSingleState\n\nchartReducer.on(fetchDataAction.request, (state, { chart, fetchDataParams, id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartId: chart,\n isFetchingData: true,\n viewRange: fetchDataParams.viewRange,\n },\n}))\n\nchartReducer.on(fetchDataCancelAction, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingData: false,\n },\n}))\n\nchartReducer.on(fetchDataAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingData: false,\n isFetchDataFailure: true,\n },\n}))\n\nchartReducer.on(fetchDataAction.success, (state, { id, chartData, fetchDataParams }) => {\n const substate = getSubstate(state, id)\n return {\n ...state,\n [id]: {\n ...substate,\n chartData: useNewKeysOnlyIfDifferent([\"dimension_names\"], substate.chartData, chartData!),\n fetchDataParams,\n isFetchingData: false,\n isFetchDataFailure: false,\n viewRange: fetchDataParams.viewRange,\n },\n }\n})\n\n\nchartReducer.on(fetchDataForSnapshotAction.request, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: true,\n },\n}))\n\nchartReducer.on(fetchDataForSnapshotAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: false,\n snapshotDataIsError: true,\n },\n}))\n\nchartReducer.on(fetchDataForSnapshotAction.success, (state, { id, snapshotData }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: false,\n snapshotDataIsError: false,\n snapshotData,\n },\n}))\n\nchartReducer.on(snapshotExportResetAction, (state) => map((substate) => ({\n ...substate,\n ...pick([\"snapshotDataIsFetching\", \"snapshotDataIsError\", \"snapshotData\"], initialSingleState),\n}), state))\n\n\nchartReducer.on(fetchChartAction.request, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingDetails: true,\n },\n}))\n\nchartReducer.on(fetchChartAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchDetailsFailure: true,\n },\n}))\n\nchartReducer.on(fetchChartAction.success, (state, { id, chartMetadata }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartMetadata,\n isFetchingDetails: false,\n isFetchDetailsFailure: false,\n },\n}))\n\n// todo handle errors without creating a loop\n// chartReducer.on(fetchChartAction.failure, (state, { id }) => ({\n// ...state,\n// [id]: {\n// ...getSubstate(state, id),\n// isFetchingDetails: false,\n// },\n// }))\n\nchartReducer.on(setResizeHeightAction, (state, { id, resizeHeight }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n resizeHeight,\n },\n}))\n\nchartReducer.on(setChartPanAndZoomAction, (state, {\n after, before, id, shouldForceTimeRange,\n}) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartPanAndZoom: { after, before, shouldForceTimeRange },\n },\n}))\n\nchartReducer.on(resetChartPanAndZoomAction, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartPanAndZoom: initialSingleState.chartPanAndZoom,\n },\n}))\n\nchartReducer.on(setOptionAction, (state, { key, value }) => {\n // clear chartPanAndZoom, when SYNC_PAN_AND_ZOOM flag is turned on\n if (key === SYNC_PAN_AND_ZOOM && value === true) {\n return map(\n assoc(\"chartPanAndZoom\", initialSingleState.chartPanAndZoom),\n state,\n )\n }\n return state\n})\n\nchartReducer.on(clearChartStateAction, (state, { id }) => omit([id], state))\n","import { mapObjIndexed, mergeAll, isEmpty } from \"ramda\"\nimport { Method } from \"axios\"\nimport { initialLegendRight } from \"domains/global/options\"\nimport { ChartLibraryName } from \"./chartLibrariesSettings\"\n\ntype OutputValue = string | boolean | number | null | undefined | any[]\n// almost the same as in old dashboard to ensure readers that it works the same way\nconst getDataAttribute = (element: Element, key: string, defaultValue?: OutputValue) => {\n const dataKey = `data-${key}`\n if (element.hasAttribute(dataKey)) {\n // we know it's not null because of hasAttribute()\n const data = element.getAttribute(dataKey) as string\n\n if (data === \"true\") {\n return true\n }\n if (data === \"false\") {\n return false\n }\n if (data === \"null\") {\n return null\n }\n\n // Only convert to a number if it doesn't change the string\n if (data === `${+data}`) {\n return +data\n }\n\n if (/^(?:\\{[\\w\\W]*\\}|\\[[\\w\\W]*\\])$/.test(data)) {\n return JSON.parse(data)\n }\n\n return data\n }\n // if no default is passed, then it's undefined and can be replaced with default value later\n // it is recommended to do it in props destructuring assignment phase, ie.:\n // const Chart = ({ dygraphPointsize = 1 }) => ....\n return defaultValue\n}\n\nconst getDataAttributeBoolean = (element: Element, key: string, defaultValue?: boolean) => {\n const value = getDataAttribute(element, key, defaultValue)\n\n if (value === true || value === false) { // gmosx: Love this :)\n return value\n }\n\n if (typeof (value) === \"string\") {\n if (value === \"yes\" || value === \"on\") {\n return true\n }\n\n if (value === \"\" || value === \"no\" || value === \"off\" || value === \"null\") {\n return false\n }\n\n return defaultValue\n }\n\n if (typeof (value) === \"number\") {\n return value !== 0\n }\n\n return defaultValue\n}\n\ninterface BaseAttributeConfig {\n key: string\n defaultValue?: OutputValue\n}\ninterface BooleanAttributeConfig extends BaseAttributeConfig {\n type: \"boolean\"\n defaultValue?: boolean\n}\ntype AttributeConfig = BaseAttributeConfig | BooleanAttributeConfig\n\nexport interface StaticAttributes {\n id: string\n host?: string | undefined\n httpMethod?: Method\n title?: string\n chartLibrary: ChartLibraryName\n width: number | string | null\n height?: number | string | null\n after?: number\n before?: number\n legend?: boolean\n legendPosition?: \"bottom\" | \"right\"\n units?: string\n unitsCommon?: string\n unitsDesired?: string\n aggrMethod?: string\n labels?: {[key: string]: string}\n postGroupBy?: string\n dimensionsAggrMethod?: string\n postAggregationMethod?: string\n aggrGroups?: string[]\n selectedChart?: string\n filteredRows?: string[] | null\n groupBy?: string\n nodeIDs?: string[]\n colors?: string\n commonColors?: string\n decimalDigits?: number\n dimensions?: string\n selectedDimensions?: string[]\n forceTimeWindow?: boolean\n\n appendOptions?: string | undefined\n gtime?: number\n method?: string\n overrideOptions?: string\n pixelsPerPoint?: number\n points?: number\n heightId?: string\n hideResizeHandler?: boolean\n detectResize?: boolean\n commonMin?: string\n commonMax?: string\n\n dygraphType?: string\n dygraphValueRange?: any[]\n dygraphTheme?: string\n dygraphSmooth?: boolean\n dygraphColors?: string[]\n dygraphRightGap?: number\n dygraphShowRangeSelector?: boolean\n dygraphShowRoller?: boolean\n dygraphTitle?: string\n dygraphTitleHeight?: number\n dygraphLegend?: \"always\" | \"follow\" | \"onmouseover\" | \"never\"\n dygraphLabelsDiv?: string\n dygraphLabelsSeparateLine?: boolean\n dygraphIncludeZero?: boolean\n dygraphShowZeroValues?: boolean\n dygraphShowLabelsOnHighLight?: boolean\n dygraphHideOverlayOnMouseOut?: boolean\n dygraphXRangePad?: number\n dygraphYRangePad?: number\n dygraphYLabelWidth?: number\n dygraphStrokeWidth?: number\n dygraphStrokePattern?: number[]\n dygraphDrawPoints?: boolean\n dygraphDrawGapEdgePoints?: boolean\n dygraphConnectSeparatedPoints?: boolean\n dygraphPointSize?: number\n dygraphStepPlot?: boolean\n dygraphStrokeBorderColor?: string\n dygraphStrokeBorderWidth?: number\n dygraphFillGraph?: boolean\n dygraphFillAlpha?: number\n dygraphStackedGraph?: boolean\n dygraphStackedGraphNanFill?: string\n dygraphAxisLabelFontSize?: number\n dygraphAxisLineColor?: string\n dygraphAxisLineWidth?: number\n dygraphDrawGrid?: boolean\n dygraphGridLinePattern?: number[]\n dygraphGridLineWidth?: number\n dygraphGridLineColor?: string\n dygraphMaxNumberWidth?: number\n dygraphSigFigs?: number\n dygraphDigitsAfterDecimal?: number\n dygraphHighlighCircleSize?: number\n dygraphHighlightSeriesOpts?: {[options: string]: number}\n dygraphHighlightSeriesBackgroundAlpha?: number\n dygraphXPixelsPerLabel?: number\n dygraphXAxisLabelWidth?: number\n dygraphDrawXAxis?: boolean\n dygraphYPixelsPerLabel?: number\n dygraphYAxisLabelWidth?: number\n dygraphDrawYAxis?: boolean\n dygraphDrawAxis?: boolean\n\n easyPieChartMinValue?: number\n easyPieChartMaxValue?: number\n easyPieChartBarColor?: string\n easyPieChartTrackColor?: string\n easyPieChartScaleColor?: string,\n easyPieChartScaleLength?: number,\n easyPieChartLineCap?: string,\n easyPieChartLineWidth?: string,\n easyPieChartTrackWidth?: string,\n easyPieChartSize?: string,\n easyPieChartRotate?: number,\n easyPieChartAnimate?: string,\n easyPieChartEasing?: string,\n\n gaugeMinValue?: number,\n gaugeMaxValue?: number,\n gaugePointerColor?: string,\n gaugeStrokeColor?: string,\n gaugeStartColor?: string,\n gaugeStopColor?: string,\n gaugeGenerateGradient?: boolean | string[],\n\n sparklineType?: string,\n sparklineLineColor?: string,\n sparklineFillColor?: string,\n sparklineChartRangeMin?: string,\n sparklineChartRangeMax?: string,\n sparklineComposite?: string,\n sparklineEnableTagOptions?: string,\n sparklineTagOptionPrefix?: string,\n sparklineTagValuesAttribute?: string,\n sparklineDisableHiddenCheck?: string,\n sparklineDefaultPixelsPerValue?: string,\n sparklineSpotColor?: string,\n sparklineMinSpotColor?: string,\n sparklineMaxSpotColor?: string,\n sparklineSpotRadius?: string,\n sparklineValueSpots?: string,\n sparklineHighlightSpotColor?: string,\n sparklineHighlightLineColor?: string,\n sparklineLineWidth?: string,\n sparklineNormalRangeMin?: string,\n sparklineNormalRangeMax?: string,\n sparklineDrawNormalOnTop?: string,\n sparklineXvalues?: string,\n sparklineChartRangeClip?: string,\n sparklineChartRangeMinX?: string,\n sparklineChartRangeMaxX?: string,\n sparklineDisableInteraction?: boolean,\n sparklineDisableTooltips?: boolean,\n sparklineOnHover?: Function,\n sparklineDisableHighlight?: boolean,\n sparklineHighlightLighten?: string,\n sparklineHighlightColor?: string,\n sparklineTooltipContainer?: string,\n sparklineTooltipClassname?: string,\n sparklineTooltipFormat?: string,\n sparklineTooltipPrefix?: string,\n sparklineTooltipSuffix?: string,\n sparklineTooltipSkipNull?: boolean,\n sparklineTooltipValueLookups?: string,\n sparklineTooltipFormatFieldlist?: string,\n sparklineTooltipFormatFieldlistKey?: string,\n sparklineNumberFormatter?: (d: number) => string,\n sparklineNumberDigitGroupSep?: string,\n sparklineNumberDecimalMark?: string,\n sparklineNumberDigitGroupCount?: string,\n sparklineAnimatedZooms?: boolean,\n\n\n d3pieTitle?: string,\n d3pieSubtitle?: string,\n d3pieFooter?: string,\n d3pieTitleColor?: string,\n d3pieTitleFontsize?: string,\n d3pieTitleFontweight?: string,\n d3pieTitleFont?: string,\n d3PieSubtitleColor?: string,\n d3PieSubtitleFontsize?: string,\n d3PieSubtitleFontweight?: string,\n d3PieSubtitleFont?: string,\n d3PieFooterColor?: string,\n d3PieFooterFontsize?: string,\n d3PieFooterFontweight?: string,\n d3PieFooterFont?: string,\n d3PieFooterLocation?: string,\n d3PiePieinnerradius?: string,\n d3PiePieouterradius?: string,\n d3PieSortorder?: string,\n d3PieSmallsegmentgroupingEnabled?: boolean,\n d3PieSmallsegmentgroupingValue?: string,\n d3PieSmallsegmentgroupingValuetype?: string,\n d3PieSmallsegmentgroupingLabel?: string,\n d3PieSmallsegmentgroupingColor?: string,\n d3PieLabelsOuterFormat?: string,\n d3PieLabelsOuterHidewhenlessthanpercentage?: string,\n d3PieLabelsOuterPiedistance?: string,\n d3PieLabelsInnerFormat?: string,\n d3PieLabelsInnerHidewhenlessthanpercentage?: string,\n d3PieLabelsMainLabelColor?: string,\n d3PieLabelsMainLabelFont?: string,\n d3PieLabelsMainLabelFontsize?: string,\n d3PieLabelsMainLabelFontweight?: string,\n d3PieLabelsPercentageColor?: string,\n d3PieLabelsPercentageFont?: string,\n d3PieLabelsPercentageFontsize?: string,\n d3PieLabelsPercentageFontweight?: string,\n d3PieLabelsValueColor?: string,\n d3PieLabelsValueFont?: string,\n d3PieLabelsValueFontsize?: string,\n d3PieLabelsValueFontweight?: string,\n d3PieLabelsLinesEnabled?: boolean,\n d3PieLabelsLinesStyle?: string,\n d3PieLabelsLinesColor?: string,\n d3PieLabelsTruncationEnabled?: boolean,\n d3PieLabelsTruncationTruncatelength?: string,\n d3PieMiscColorsSegmentstroke?: string,\n d3PieMiscGradientEnabled?: boolean,\n d3PieMiscColorsPercentage?: string,\n d3PieMiscGradientColor?: string,\n d3PieCssprefix?: string,\n\n peityStrokeWidth?: number,\n\n textOnlyDecimalPlaces?: number,\n textOnlyPrefix?: string,\n textOnlySuffix?: string,\n}\n\nexport interface Attributes extends StaticAttributes {\n // changed structure compared to original dashboard.js (not flat list, but dynamic objects stored\n // in \"showValueOf\" property\n showValueOf?: { [key: string]: string }\n}\n\nexport interface ChartsAttributes {\n [chartID:string]: Attributes\n}\n\nexport type AttributePropKeys = keyof StaticAttributes\n\ntype AttributesMap = {\n [key in AttributePropKeys]: AttributeConfig\n}\n\n// needs to be a getter so all window.NETDATA settings are set\nconst getAttributesMap = (): AttributesMap => ({\n // all properties that don't have `defaultValue` should be \"| undefined\" in Attributes interface\n // todo try to write above rule in TS\n id: { key: \"netdata\" },\n host: { key: \"host\" },\n httpMethod: { key: \"http-method\" },\n title: { key: \"title\" },\n chartLibrary: { key: \"chart-library\", defaultValue: window.NETDATA.chartDefaults.library },\n width: { key: \"width\", defaultValue: window.NETDATA.chartDefaults.width },\n height: { key: \"height\", defaultValue: window.NETDATA.chartDefaults.height },\n // todo use chartDefaults for static custom dashboards\n // after: { key: \"after\", defaultValue: window.NETDATA.chartDefaults.after },\n after: { key: \"after\" },\n before: { key: \"before\", defaultValue: window.NETDATA.chartDefaults.before },\n legend: { key: \"legend\", type: \"boolean\", defaultValue: true },\n legendPosition: { key: \"legend-position\" },\n units: { key: \"units\" },\n unitsCommon: { key: \"common-units\" },\n unitsDesired: { key: \"desired-units\" },\n aggrMethod: { key: \"aggr-method\" },\n labels: { key: \"labels\" },\n postGroupBy: { key: \"post-group-by\" },\n postAggregationMethod: { key: \"post-aggregation-method\" },\n dimensionsAggrMethod: { key: \"dimensions-aggr-method\" },\n aggrGroups: { key: \"aggrGroups\" },\n selectedChart: { key: \"selected-chart\" },\n filteredRows: { key: \"filtered-rows\" },\n groupBy: { key: \"group-by\" },\n nodeIDs: { key: \"node-ids\" },\n colors: { key: \"colors\" },\n commonColors: { key: \"common-colors\" },\n decimalDigits: { key: \"decimal-digits\" },\n dimensions: { key: \"dimensions\" },\n selectedDimensions: { key: \"selected-dimensions\" },\n forceTimeWindow: { key: \"force-time-window\" },\n\n appendOptions: { key: \"append-options\" },\n gtime: { key: \"gtime\" },\n method: { key: \"method\" },\n overrideOptions: { key: \"override-options\" },\n pixelsPerPoint: { key: \"pixels-per-point\" },\n points: { key: \"points\" },\n heightId: { key: \"id\" },\n hideResizeHandler: { key: \"hide-resize-handler\" },\n detectResize: { key: \"detect-resize\" },\n commonMin: { key: \"common-min\" },\n commonMax: { key: \"common-max\" },\n\n // let's not put the default values here, because they will also be needed by the main Agent page\n // and the Cloud App\n dygraphType: { key: \"dygraph-type\" },\n dygraphValueRange: { key: \"dygraph-valuerange\" },\n dygraphTheme: { key: \"dygraph-theme\" },\n dygraphSmooth: { key: \"dygraph-smooth\", type: \"boolean\" },\n dygraphColors: { key: \"dygraph-colors\" }, // not working in original dashboard\n dygraphRightGap: { key: \"dygraph-rightgap\" },\n dygraphShowRangeSelector: { key: \"dygraph-showrangeselector\", type: \"boolean\" },\n dygraphShowRoller: { key: \"dygraph-showroller\", type: \"boolean\" },\n dygraphTitle: { key: \"dygraph-title\" },\n dygraphTitleHeight: { key: \"dygraph-titleheight\" },\n dygraphLegend: { key: \"dygraph-legend\" },\n dygraphLabelsDiv: { key: \"dygraph-labelsdiv\" },\n dygraphLabelsSeparateLine: { key: \"dygraph-labelsseparatelines\", type: \"boolean\" },\n dygraphIncludeZero: { key: \"dygraph-includezero\", type: \"boolean\" },\n dygraphShowZeroValues: { key: \"dygraph-labelsshowzerovalues\", type: \"boolean\" },\n dygraphShowLabelsOnHighLight: { key: \"dygraph-showlabelsonhighlight\", type: \"boolean\" },\n dygraphHideOverlayOnMouseOut: { key: \"dygraph-hideoverlayonmouseout\", type: \"boolean\" },\n dygraphXRangePad: { key: \"dygraph-xrangepad\" },\n dygraphYRangePad: { key: \"dygraph-yrangepad\" },\n dygraphYLabelWidth: { key: \"dygraph-ylabelwidth\" },\n dygraphStrokeWidth: { key: \"dygraph-strokewidth\" },\n dygraphStrokePattern: { key: \"dygraph-strokepattern\" },\n dygraphDrawPoints: { key: \"dygraph-drawpoints\", type: \"boolean\" },\n dygraphDrawGapEdgePoints: { key: \"dygraph-drawgapedgepoints\", type: \"boolean\" },\n dygraphConnectSeparatedPoints: { key: \"dygraph-connectseparatedpoints\", type: \"boolean\" },\n dygraphPointSize: { key: \"dygraph-pointsize\" },\n dygraphStepPlot: { key: \"dygraph-stepplot\", type: \"boolean\" },\n dygraphStrokeBorderColor: { key: \"dygraph-strokebordercolor\" },\n dygraphStrokeBorderWidth: { key: \"dygraph-strokeborderwidth\" },\n // it was not boolean in the old app, but that was most likely a bug\n dygraphFillGraph: { key: \"dygraph-fillgraph\", type: \"boolean\" },\n dygraphFillAlpha: { key: \"dygraph-fillalpha\" },\n // also originally not set as boolean\n dygraphStackedGraph: { key: \"dygraph-stackedgraph\", type: \"boolean\" },\n dygraphStackedGraphNanFill: { key: \"dygraph-stackedgraphnanfill\" },\n dygraphAxisLabelFontSize: { key: \"dygraph-axislabelfontsize\" },\n dygraphAxisLineColor: { key: \"dygraph-axislinecolor\" },\n dygraphAxisLineWidth: { key: \"dygraph-axislinewidth\" },\n dygraphDrawGrid: { key: \"dygraph-drawgrid\", type: \"boolean\" },\n dygraphGridLinePattern: { key: \"dygraph-gridlinepattern\" },\n dygraphGridLineWidth: { key: \"dygraph-gridlinewidth\" },\n dygraphGridLineColor: { key: \"dygraph-gridlinecolor\" },\n dygraphMaxNumberWidth: { key: \"dygraph-maxnumberwidth\" },\n dygraphSigFigs: { key: \"dygraph-sigfigs\" },\n dygraphDigitsAfterDecimal: { key: \"dygraph-digitsafterdecimal\" },\n // dygraphValueFormatter: { key: \"dygraph-valueformatter\" },\n dygraphHighlighCircleSize: { key: \"dygraph-highlightcirclesize\" },\n dygraphHighlightSeriesOpts: { key: \"dygraph-highlightseriesopts\" },\n dygraphHighlightSeriesBackgroundAlpha: { key: \"dygraph-highlightseriesbackgroundalpha\" },\n // dygraphPointClickCallback: { key: \"dygraph-pointclickcallback\" },\n dygraphXPixelsPerLabel: { key: \"dygraph-xpixelsperlabel\" },\n dygraphXAxisLabelWidth: { key: \"dygraph-xaxislabelwidth\" },\n dygraphDrawXAxis: { key: \"dygraph-drawxaxis\", type: \"boolean\" },\n dygraphYPixelsPerLabel: { key: \"dygraph-ypixelsperlabel\" },\n dygraphYAxisLabelWidth: { key: \"dygraph-yaxislabelwidth\" },\n dygraphDrawYAxis: { key: \"dygraph-drawyaxis\", type: \"boolean\" },\n dygraphDrawAxis: { key: \"dygraph-drawaxis\", type: \"boolean\" },\n\n easyPieChartMinValue: { key: \"easypiechart-min-value\" },\n easyPieChartMaxValue: { key: \"easypiechart-max-value\" },\n easyPieChartBarColor: { key: \"easypiechart-barcolor\" },\n easyPieChartTrackColor: { key: \"easypiechart-trackcolor\" },\n easyPieChartScaleColor: { key: \"easypiechart-scalecolor\" },\n easyPieChartScaleLength: { key: \"easypiechart-scalelength\" },\n easyPieChartLineCap: { key: \"easypiechart-linecap\" },\n easyPieChartLineWidth: { key: \"easypiechart-linewidth\" },\n easyPieChartTrackWidth: { key: \"easypiechart-trackwidth\" },\n easyPieChartSize: { key: \"easypiechart-size\" },\n easyPieChartRotate: { key: \"easypiechart-rotate\" },\n easyPieChartAnimate: { key: \"easypiechart-animate\" },\n easyPieChartEasing: { key: \"easypiechart-easing\" },\n\n gaugeMinValue: { key: \"gauge-min-value\" },\n gaugeMaxValue: { key: \"gauge-max-value\" },\n gaugePointerColor: { key: \"gauge-pointer-color\" },\n gaugeStrokeColor: { key: \"gauge-stroke-color\" },\n gaugeStartColor: { key: \"gauge-start-color\" },\n gaugeStopColor: { key: \"gauge-stop-color\" },\n gaugeGenerateGradient: { key: \"gauge-generate-gradient\" },\n\n sparklineType: { key: \"sparkline-type\" },\n sparklineLineColor: { key: \"sparkline-linecolor\" },\n sparklineFillColor: { key: \"sparkline-fillcolor\" },\n sparklineChartRangeMin: { key: \"sparkline-chartrangemin\" },\n sparklineChartRangeMax: { key: \"sparkline-chartrangemax\" },\n sparklineComposite: { key: \"sparkline-composite\" },\n sparklineEnableTagOptions: { key: \"sparkline-enabletagoptions\" },\n sparklineTagOptionPrefix: { key: \"sparkline-tagoptionprefix\" },\n sparklineTagValuesAttribute: { key: \"sparkline-tagvaluesattribute\" },\n sparklineDisableHiddenCheck: { key: \"sparkline-disablehiddencheck\" },\n sparklineDefaultPixelsPerValue: { key: \"sparkline-defaultpixelspervalue\" },\n sparklineSpotColor: { key: \"sparkline-spotcolor\" },\n sparklineMinSpotColor: { key: \"sparkline-minspotcolor\" },\n sparklineMaxSpotColor: { key: \"sparkline-maxspotcolor\" },\n sparklineSpotRadius: { key: \"sparkline-spotradius\" },\n sparklineValueSpots: { key: \"sparkline-valuespots\" },\n sparklineHighlightSpotColor: { key: \"sparkline-highlightspotcolor\" },\n sparklineHighlightLineColor: { key: \"sparkline-highlightlinecolor\" },\n sparklineLineWidth: { key: \"sparkline-linewidth\" },\n sparklineNormalRangeMin: { key: \"sparkline-normalrangemin\" },\n sparklineNormalRangeMax: { key: \"sparkline-normalrangemax\" },\n sparklineDrawNormalOnTop: { key: \"sparkline-drawnormalontop\" },\n sparklineXvalues: { key: \"sparkline-xvalues\" },\n sparklineChartRangeClip: { key: \"sparkline-chartrangeclip\" },\n sparklineChartRangeMinX: { key: \"sparkline-chartrangeminx\" },\n sparklineChartRangeMaxX: { key: \"sparkline-chartrangemaxx\" },\n sparklineDisableInteraction: { key: \"sparkline-disableinteraction\", type: \"boolean\" },\n sparklineDisableTooltips: { key: \"sparkline-disabletooltips\", type: \"boolean\" },\n sparklineOnHover: { key: \"sparkline-on-hover\" },\n sparklineDisableHighlight: { key: \"sparkline-disablehighlight\", type: \"boolean\" },\n sparklineHighlightLighten: { key: \"sparkline-highlightlighten\" },\n sparklineHighlightColor: { key: \"sparkline-highlightcolor\" },\n sparklineTooltipContainer: { key: \"sparkline-tooltipcontainer\" },\n sparklineTooltipClassname: { key: \"sparkline-tooltipclassname\" },\n sparklineTooltipFormat: { key: \"sparkline-tooltipformat\" },\n sparklineTooltipPrefix: { key: \"sparkline-tooltipprefix\" },\n sparklineTooltipSuffix: { key: \"sparkline-tooltipsuffix\" },\n sparklineTooltipSkipNull: { key: \"sparkline-tooltipskipnull\", type: \"boolean\" },\n sparklineTooltipValueLookups: { key: \"sparkline-tooltipvaluelookups\" },\n sparklineTooltipFormatFieldlist: { key: \"sparkline-tooltipformatfieldlist\" },\n sparklineTooltipFormatFieldlistKey: { key: \"sparkline-tooltipformatfieldlistkey\" },\n sparklineNumberFormatter: { key: \"sparkline-numberformatter\" },\n sparklineNumberDigitGroupSep: { key: \"sparkline-numberdigitgroupsep\" },\n sparklineNumberDecimalMark: { key: \"sparkline-numberdecimalmark\" },\n sparklineNumberDigitGroupCount: { key: \"sparkline-numberdigitgroupcount\" },\n sparklineAnimatedZooms: { key: \"sparkline-animatedzooms\", type: \"boolean\" },\n\n d3pieTitle: { key: \"d3pie-title\" },\n d3pieSubtitle: { key: \"d3pie-subtitle\" },\n d3pieFooter: { key: \"d3pie-footer\" },\n d3pieTitleColor: { key: \"d3pie-title-color\" },\n d3pieTitleFontsize: { key: \"d3pie-title-fontsize\" },\n d3pieTitleFontweight: { key: \"d3pie-title-fontweight\" },\n d3pieTitleFont: { key: \"d3pie-title-font\" },\n d3PieSubtitleColor: { key: \"d3pie-subtitle-color\" },\n d3PieSubtitleFontsize: { key: \"d3pie-subtitle-fontsize\" },\n d3PieSubtitleFontweight: { key: \"d3pie-subtitle-fontweight\" },\n d3PieSubtitleFont: { key: \"d3pie-subtitle-font\" },\n d3PieFooterColor: { key: \"d3pie-footer-color\" },\n d3PieFooterFontsize: { key: \"d3pie-footer-fontsize\" },\n d3PieFooterFontweight: { key: \"d3pie-footer-fontweight\" },\n d3PieFooterFont: { key: \"d3pie-footer-font\" },\n d3PieFooterLocation: { key: \"d3pie-footer-location\" },\n d3PiePieinnerradius: { key: \"d3pie-pieinnerradius\" },\n d3PiePieouterradius: { key: \"d3pie-pieouterradius\" },\n d3PieSortorder: { key: \"d3pie-sortorder\" },\n d3PieSmallsegmentgroupingEnabled: { key: \"d3pie-smallsegmentgrouping-enabled\", type: \"boolean\" },\n d3PieSmallsegmentgroupingValue: { key: \"d3pie-smallsegmentgrouping-value\" },\n d3PieSmallsegmentgroupingValuetype: { key: \"d3pie-smallsegmentgrouping-valuetype\" },\n d3PieSmallsegmentgroupingLabel: { key: \"d3pie-smallsegmentgrouping-label\" },\n d3PieSmallsegmentgroupingColor: { key: \"d3pie-smallsegmentgrouping-color\" },\n d3PieLabelsOuterFormat: { key: \"d3pie-labels-outer-format\" },\n d3PieLabelsOuterHidewhenlessthanpercentage: {\n key: \"d3pie-labels-outer-hidewhenlessthanpercentage\",\n },\n d3PieLabelsOuterPiedistance: { key: \"d3pie-labels-outer-piedistance\" },\n d3PieLabelsInnerFormat: { key: \"d3pie-labels-inner-format\" },\n d3PieLabelsInnerHidewhenlessthanpercentage: {\n key: \"d3pie-labels-inner-hidewhenlessthanpercentage\",\n },\n d3PieLabelsMainLabelColor: { key: \"d3pie-labels-mainLabel-color\" },\n d3PieLabelsMainLabelFont: { key: \"d3pie-labels-mainLabel-font\" },\n d3PieLabelsMainLabelFontsize: { key: \"d3pie-labels-mainLabel-fontsize\" },\n d3PieLabelsMainLabelFontweight: { key: \"d3pie-labels-mainLabel-fontweight\" },\n d3PieLabelsPercentageColor: { key: \"d3pie-labels-percentage-color\" },\n d3PieLabelsPercentageFont: { key: \"d3pie-labels-percentage-font\" },\n d3PieLabelsPercentageFontsize: { key: \"d3pie-labels-percentage-fontsize\" },\n d3PieLabelsPercentageFontweight: { key: \"d3pie-labels-percentage-fontweight\" },\n d3PieLabelsValueColor: { key: \"d3pie-labels-value-color\" },\n d3PieLabelsValueFont: { key: \"d3pie-labels-value-font\" },\n d3PieLabelsValueFontsize: { key: \"d3pie-labels-value-fontsize\" },\n d3PieLabelsValueFontweight: { key: \"d3pie-labels-value-fontweight\" },\n d3PieLabelsLinesEnabled: { key: \"d3pie-labels-lines-enabled\", type: \"boolean\" },\n d3PieLabelsLinesStyle: { key: \"d3pie-labels-lines-style\" },\n d3PieLabelsLinesColor: { key: \"d3pie-labels-lines-color\" },\n d3PieLabelsTruncationEnabled: { key: \"d3pie-labels-truncation-enabled\", type: \"boolean\" },\n d3PieLabelsTruncationTruncatelength: { key: \"d3pie-labels-truncation-truncatelength\" },\n d3PieMiscColorsSegmentstroke: { key: \"d3pie-misc-colors-segmentstroke\" },\n d3PieMiscGradientEnabled: { key: \"d3pie-misc-gradient-enabled\", type: \"boolean\" },\n d3PieMiscColorsPercentage: { key: \"d3pie-misc-colors-percentage\" },\n d3PieMiscGradientColor: { key: \"d3pie-misc-gradient-color\" },\n d3PieCssprefix: { key: \"d3pie-cssprefix\" },\n\n peityStrokeWidth: { key: \"peity-strokewidth\" },\n\n textOnlyDecimalPlaces: { key: \"textonly-decimal-places\" },\n textOnlyPrefix: { key: \"textonly-prefix\" },\n textOnlySuffix: { key: \"textonly-suffix\" },\n})\n\nexport const getAttributesStatic = (node: Element): Attributes => mapObjIndexed(\n (attribute: AttributeConfig) => (\n (attribute as BooleanAttributeConfig).type === \"boolean\"\n ? getDataAttributeBoolean(\n node,\n attribute.key,\n attribute.defaultValue as BooleanAttributeConfig[\"defaultValue\"],\n ) : getDataAttribute(node, attribute.key, attribute.defaultValue)\n ),\n getAttributesMap(),\n) as Attributes // need to override because of broken Ramda typings\n\nexport const getAttributesDynamic = (node: Element) => {\n const showValueOfAttribues = Array.from(node.attributes)\n .filter((attribute) => attribute.name.startsWith(\"data-show-value-of\"))\n .map((attribute) => ({\n [attribute.name.replace(\"data-\", \"\")]: attribute.value,\n }))\n const merged = mergeAll(showValueOfAttribues)\n return isEmpty(merged) ? undefined : merged\n}\n\nexport const getAttributes = (node: Element): Attributes => {\n const attributesStatic = getAttributesStatic(node)\n const showValueOf = getAttributesDynamic(node)\n return { ...attributesStatic, showValueOf }\n}\n\nexport const defaultAttributes: Partial<Attributes> = {\n legendPosition: initialLegendRight ? \"right\" : \"bottom\",\n}\n","import { createAction } from \"redux-act\"\nimport { CancelTokenSource, Method } from \"axios\"\n\nimport { createRequestAction } from \"utils/createRequestAction\"\nimport { InfoPayload } from \"domains/global/__mocks__/info-mock\"\n\nimport { storeKey } from \"./constants\"\nimport { ChartData, ChartMetadata } from \"./chart-types\"\n\nexport interface UpdateChartDataAction {\n chartData: ChartData\n id: string\n}\nexport const updateChartDataAction = createAction<UpdateChartDataAction>(\n `${storeKey}/updateChartData`,\n)\n\nexport interface UpdateChartMetadataAction {\n chartMetadata: ChartMetadata\n id: string\n}\nexport const updateChartMetadataAction = createAction<UpdateChartMetadataAction>(\n `${storeKey}/updateChartMetadata`,\n)\n\nexport interface FetchDataParams {\n fillMissingPoints?: number\n isRemotelyControlled: boolean\n viewRange: [number, number]\n}\nexport interface FetchDataUrlParams {\n host: string\n chart: string\n context: string\n format: string\n points: number\n group: string\n gtime: number\n options: string\n after: number | null\n before?: number | null\n dimensions?: string\n labels?: {[key: string]: string}\n postGroupBy?: string\n postAggregationMethod?: string\n aggrMethod?: string\n aggrGroups?: string[]\n dimensionsAggrMethod?: string\n nodeIDs?: string[]\n httpMethod?: Method\n groupBy?: string\n}\nexport interface FetchDataPayload extends FetchDataUrlParams {\n id: string,\n fetchDataParams: FetchDataParams\n cancelTokenSource: CancelTokenSource\n}\n\nexport const fetchDataAction = createRequestAction<\n FetchDataPayload,\n { id: string, chartData: ChartData, fetchDataParams: FetchDataParams }\n>(`${storeKey}/fetchDataAction`)\n\n\nexport interface FetchDataCancelAction { id: string }\nexport const fetchDataCancelAction = createAction<FetchDataCancelAction>(\n `${storeKey}/fetchDataCancelAction`,\n)\n\nexport interface FetchDataForSnapshotPayload extends FetchDataUrlParams {\n chartLibrary: string\n id: string\n}\nexport const fetchDataForSnapshotAction = createRequestAction<\n FetchDataForSnapshotPayload,\n { id: string, snapshotData: ChartData }\n>(`${storeKey}/fetchDataForSnapshotAction`)\n\nexport const snapshotExportResetAction = createRequestAction(\n `${storeKey}/snapshotExportResetAction`,\n)\n\nexport interface FetchChartPayload {\n chart: string\n id: string\n host: string\n nodeIDs?: string[]\n}\n\nexport const fetchChartAction = createRequestAction<\n FetchChartPayload,\n { chartMetadata: ChartMetadata, id: string }\n>(`${storeKey}/fetchChartAction`)\n\n\nexport interface SetResizeHeightAction {\n id: string\n resizeHeight: number\n}\nexport const setResizeHeightAction = createAction<SetResizeHeightAction>(\n `${storeKey}/setResizeHeight`,\n)\n\nexport interface SetChartPanAndZoomAction {\n id: string\n after: number\n before: number\n shouldForceTimeRange?: boolean\n}\nexport const setChartPanAndZoomAction = createAction<SetChartPanAndZoomAction>(\n `${storeKey}/setChartPanAndZoom`,\n)\n\nexport const resetChartPanAndZoomAction = createAction<{ id: string }>(\n `${storeKey}/resetChartPanAndZoomAction`,\n)\n\nexport const clearChartStateAction = createAction<{ id: string }>(\n `${storeKey}/clearChartStateAction`,\n)\n\nexport interface FetchInfoPayload {\n poll?: boolean\n}\nexport interface FetchInfoSuccessPayload {\n isCloudAvailable: boolean\n isCloudEnabled: boolean\n isAgentClaimed: boolean\n isACLKAvailable: boolean\n fullInfoPayload: InfoPayload\n}\nexport const fetchInfoAction = createRequestAction<\n FetchInfoPayload,\n FetchInfoSuccessPayload\n>(`${storeKey}/fetchInfoAction`)\n","export const SIGN_IN_IFRAME_ID = \"sign_in_iframe\"\n","/* eslint-disable */\n// Main JavaScript file for the Netdata GUI.\n\n// Codacy declarations\n/* global NETDATA */\n\nimport { identity, memoizeWith } from \"ramda\"\nimport {\n centerAroundHighlightAction,\n chartsMetadataRequestSuccess,\n clearHighlightAction,\n fetchAllAlarmsAction,\n loadSnapshotAction,\n resetGlobalPanAndZoomAction,\n resetOptionsAction,\n resetRegistry,\n setDefaultAfterAction,\n setGlobalChartUnderlayAction,\n setGlobalPanAndZoomAction,\n setOptionAction,\n} from './domains/global/actions';\nimport {\n createSelectOption,\n selectDefaultAfter,\n selectGlobalPanAndZoom,\n selectRegistry,\n} from \"./domains/global/selectors\"\nimport { seconds4human } from './domains/chart/utils/seconds4human';\nimport { zeropad } from './utils/units-conversion';\nimport {\n explicitlySignInAction,\n startSnapshotModeAction,\n stopSnapshotModeAction,\n} from './domains/dashboard/actions';\nimport { snapshotExportResetAction } from './domains/chart/actions';\nimport {\n selectAmountOfCharts,\n selectAmountOfSnapshotsFailed,\n selectAmountOfSnapshotsFetched,\n} from './domains/chart/selectors';\nimport { serverDefault } from './utils/server-detection';\nimport { name2id } from './utils/name-2-id';\nimport { isProperTimezone } from './utils/date-time';\nimport { NETDATA_REGISTRY_SERVER } from './utils';\nimport { getHashParam } from 'utils/hash-utils';\nimport { isDemo } from \"./utils/is-demo\"\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from './domains/chart/utils/legend-utils';\nimport { defaultAttributes } from './domains/chart/utils/transformDataAttributes';\n\n// this is temporary, hook will be used after the full main.js refactor\nlet localeDateString, localeTimeString\nexport const updateLocaleFunctions = ({\n localeDateString: newLocaleDateString,\n localeTimeString: newLocaleTimeString,\n}) => {\n localeDateString = newLocaleDateString\n localeTimeString = newLocaleTimeString\n}\n\n// enable alarms checking and notifications\nvar netdataShowAlarms = true;\n\n// enable registry updates\nvar netdataRegistry = true;\n\n// forward definition only - not used here\nvar netdataServer = undefined;\nvar netdataServerStatic = undefined;\nvar netdataCheckXSS = undefined;\n\nlet reduxStore\n\nfunction escapeUserInputHTML(s) {\n return s.toString()\n .replace(/&/g, '&')\n .replace(/</g, '<')\n .replace(/>/g, '>')\n .replace(/\"/g, '"')\n .replace(/#/g, '#')\n .replace(/'/g, ''')\n .replace(/\\(/g, '(')\n .replace(/\\)/g, ')')\n .replace(/\\//g, '/');\n}\n\nconst setOption = (key, value) => {\n reduxStore.dispatch(setOptionAction({\n key,\n value,\n }))\n}\n\n// temporary function that will be removed after full main.js migration to react\nconst getFromRegistry = (prop) => {\n const registry = selectRegistry(reduxStore.getState())\n return registry?.[prop]\n}\n\nfunction verifyURL(s) {\n if (typeof (s) === 'string' && (s.startsWith('http://') || s.startsWith('https://'))) {\n return s\n .replace(/'/g, '%22')\n .replace(/\"/g, '%27')\n .replace(/\\)/g, '%28')\n .replace(/\\(/g, '%29');\n }\n\n console.log('invalid URL detected:');\n console.log(s);\n return 'javascript:alert(\"invalid url\");';\n}\n\n// --------------------------------------------------------------------\n// urlOptions\n\nwindow.urlOptions = {\n hash: '#',\n theme: null,\n help: null,\n mode: 'live', // 'live', 'print'\n update_always: false,\n pan_and_zoom: false,\n server: null,\n after: getHashParam('after') ?? 0,\n before: getHashParam('before') ?? 0,\n highlight: false,\n highlight_after: 0,\n highlight_before: 0,\n nowelcome: false,\n show_alarms: false,\n chart: null,\n family: null,\n alarm: null,\n utc: null,\n\n hasProperty: function (property) {\n // console.log('checking property ' + property + ' of type ' + typeof(this[property]));\n return typeof this[property] !== 'undefined';\n },\n\n genHash: function (forReload) {\n var hash = urlOptions.hash;\n\n hash += ';after=' + urlOptions.after.toString() +\n ';before=' + urlOptions.before.toString();\n\n if (urlOptions.highlight === true) {\n hash += ';highlight_after=' + urlOptions.highlight_after.toString() +\n ';highlight_before=' + urlOptions.highlight_before.toString();\n }\n\n if (urlOptions.theme !== null) {\n hash += ';theme=' + urlOptions.theme.toString();\n }\n\n if (urlOptions.help !== null) {\n hash += ';help=' + urlOptions.help.toString();\n }\n\n if (urlOptions.update_always === true) {\n hash += ';update_always=true';\n }\n\n if (forReload === true && urlOptions.server !== null) {\n hash += ';server=' + urlOptions.server.toString();\n }\n\n if (urlOptions.mode !== 'live') {\n hash += ';mode=' + urlOptions.mode;\n }\n\n if (urlOptions.utc !== null) {\n hash += ';utc=' + urlOptions.utc;\n }\n\n return hash;\n },\n\n parseHash: function () {\n var variables = document.location.hash.split(';');\n var len = variables.length;\n while (len--) {\n if (len !== 0) {\n var p = variables[len].split('=');\n if (urlOptions.hasProperty(p[0]) && typeof p[1] !== 'undefined') {\n urlOptions[p[0]] = decodeURIComponent(p[1]);\n }\n } else {\n if (variables[len].length > 0) {\n urlOptions.hash = variables[len];\n }\n }\n }\n\n var booleans = ['nowelcome', 'show_alarms', 'update_always'];\n len = booleans.length;\n while (len--) {\n if (urlOptions[booleans[len]] === 'true' || urlOptions[booleans[len]] === true || urlOptions[booleans[len]] === '1' || urlOptions[booleans[len]] === 1) {\n urlOptions[booleans[len]] = true;\n } else {\n urlOptions[booleans[len]] = false;\n }\n }\n\n var numeric = ['after', 'before', 'highlight_after', 'highlight_before'];\n len = numeric.length;\n while (len--) {\n if (typeof urlOptions[numeric[len]] === 'string') {\n try {\n urlOptions[numeric[len]] = parseInt(urlOptions[numeric[len]]);\n }\n catch (e) {\n console.log('failed to parse URL hash parameter ' + numeric[len]);\n urlOptions[numeric[len]] = 0;\n }\n }\n }\n\n if (urlOptions.server !== null && urlOptions.server !== '') {\n netdataServerStatic = document.location.origin.toString() + document.location.pathname.toString();\n netdataServer = urlOptions.server;\n netdataCheckXSS = true;\n } else {\n urlOptions.server = null;\n }\n\n if (urlOptions.before > 0 && urlOptions.after > 0) {\n urlOptions.pan_and_zoom = true;\n urlOptions.nowelcome = true;\n } else {\n urlOptions.pan_and_zoom = false;\n }\n\n if (urlOptions.highlight_before > 0 && urlOptions.highlight_after > 0) {\n urlOptions.highlight = true;\n } else {\n urlOptions.highlight = false;\n }\n\n switch (urlOptions.mode) {\n case 'print':\n urlOptions.theme = 'white';\n urlOptions.welcome = false;\n urlOptions.help = false;\n urlOptions.show_alarms = false;\n\n if (urlOptions.pan_and_zoom === false) {\n urlOptions.pan_and_zoom = true;\n urlOptions.before = Date.now();\n const fallbackAfter = -600000\n const defaultAfter = urlOptions.after ? urlOptions.after * 1000 : fallbackAfter\n urlOptions.after = urlOptions.before + defaultAfter;\n }\n\n netdataShowAlarms = false;\n netdataRegistry = false;\n break;\n\n case 'live':\n default:\n urlOptions.mode = 'live';\n break;\n }\n\n // console.log(urlOptions);\n },\n\n hashUpdate: function () {\n history.replaceState(null, '', urlOptions.genHash(true));\n },\n\n netdataPanAndZoomCallback: function (status, after, before) {\n if (netdataSnapshotData === null) {\n urlOptions.pan_and_zoom = status;\n urlOptions.after = after;\n urlOptions.before = before;\n }\n },\n\n updateUtcParam: function (utc) {\n if (!utc) return\n urlOptions.utc = utc\n urlOptions.hashUpdate();\n },\n\n netdataHighlightCallback: function (status, after, before) {\n if (status === true && (after === null || before === null || after <= 0 || before <= 0 || after >= before)) {\n status = false;\n after = 0;\n before = 0;\n }\n\n if (window.netdataSnapshotData === null) {\n urlOptions.highlight = status;\n } else {\n urlOptions.highlight = false;\n }\n\n urlOptions.highlight_after = Math.round(after);\n urlOptions.highlight_before = Math.round(before);\n urlOptions.hashUpdate();\n\n if (status === true && after > 0 && before > 0 && after < before) {\n var d1 = localeDateString(after);\n var d2 = localeDateString(before);\n if (d1 === d2) {\n d2 = '';\n }\n document.getElementById('navbar-highlight-content').innerHTML =\n '<span class=\"navbar-highlight-bar highlight-tooltip\" onclick=\"urlOptions.showHighlight();\" title=\"restore the highlighted view\" data-toggle=\"tooltip\" data-placement=\"bottom\">'\n + 'highlighted time-frame'\n + ' <b>' + d1 + ' <code>' + localeTimeString(after) + '</code></b> to '\n + ' <b>' + d2 + ' <code>' + localeTimeString(before) + '</code></b>, '\n + 'duration <b>' + seconds4human(Math.round((before - after) / 1000)) + '</b>'\n + '</span>'\n + '<span class=\"navbar-highlight-button-right highlight-tooltip\" onclick=\"urlOptions.clearHighlight();\" title=\"clear the highlighted time-frame\" data-toggle=\"tooltip\" data-placement=\"bottom\"><i class=\"fas fa-times\"></i></span>';\n\n $('.navbar-highlight').show();\n $('.navbar-highlight').width(\"80%\");\n $('.highlight-tooltip').tooltip({\n html: true,\n delay: { show: 500, hide: 0 },\n container: 'body'\n });\n } else {\n $('.navbar-highlight').hide();\n $('.navbar-highlight').width(\"100%\");\n }\n },\n\n clearHighlight: function () {\n reduxStore.dispatch(clearHighlightAction())\n },\n\n showHighlight: function () {\n reduxStore.dispatch(centerAroundHighlightAction())\n }\n};\n\nurlOptions.parseHash();\n\n// --------------------------------------------------------------------\n// check options that should be processed before loading netdata.js\n\nvar localStorageTested = -1;\n\nfunction localStorageTest() {\n if (localStorageTested !== -1) {\n return localStorageTested;\n }\n\n if (typeof Storage !== \"undefined\" && typeof localStorage === 'object') {\n var test = 'test';\n try {\n localStorage.setItem(test, test);\n localStorage.removeItem(test);\n localStorageTested = true;\n }\n catch (e) {\n console.log(e);\n localStorageTested = false;\n }\n } else {\n localStorageTested = false;\n }\n\n return localStorageTested;\n}\n\nfunction loadLocalStorage(name) {\n var ret = null;\n\n try {\n if (localStorageTest() === true) {\n ret = localStorage.getItem(name);\n } else {\n console.log('localStorage is not available');\n }\n }\n catch (error) {\n console.log(error);\n return null;\n }\n\n if (typeof ret === 'undefined' || ret === null) {\n return null;\n }\n\n // console.log('loaded: ' + name.toString() + ' = ' + ret.toString());\n\n return ret;\n}\n\nfunction saveLocalStorage(name, value) {\n // console.log('saving: ' + name.toString() + ' = ' + value.toString());\n try {\n if (localStorageTest() === true) {\n localStorage.setItem(name, value.toString());\n return true;\n }\n }\n catch (error) {\n console.log(error);\n }\n\n return false;\n}\n\nfunction getTheme(def) {\n if (urlOptions.mode === 'print') {\n return 'white';\n }\n\n var ret = loadLocalStorage('netdataTheme');\n if (typeof ret === 'undefined' || ret === null || ret === 'undefined') {\n return def;\n } else {\n return ret;\n }\n}\n\nfunction setTheme(theme) {\n if (urlOptions.mode === 'print') {\n return false;\n }\n\n if (theme === netdataTheme) {\n return false;\n }\n return saveLocalStorage('netdataTheme', theme);\n}\n\nwindow.netdataTheme = getTheme('slate');\n// this is of course temporary, will be fixed during complete main.js rewrite\nNETDATA.updateTheme()\nvar netdataShowHelp = true;\n\nif (urlOptions.theme !== null) {\n setTheme(urlOptions.theme);\n netdataTheme = urlOptions.theme;\n window.NETDATA.updateTheme()\n} else {\n urlOptions.theme = netdataTheme;\n}\n\nif (urlOptions.help !== null) {\n saveLocalStorage('options.show_help', urlOptions.help);\n netdataShowHelp = urlOptions.help;\n} else {\n urlOptions.help = loadLocalStorage('options.show_help');\n}\n\n// --------------------------------------------------------------------\n// natural sorting\n// http://www.davekoelle.com/files/alphanum.js - LGPL\n\nfunction naturalSortChunkify(t) {\n var tz = [];\n var x = 0, y = -1, n = 0, i, j;\n\n while (i = (j = t.charAt(x++)).charCodeAt(0)) {\n var m = (i >= 48 && i <= 57);\n if (m !== n) {\n tz[++y] = \"\";\n n = m;\n }\n tz[y] += j;\n }\n\n return tz;\n}\n\nfunction naturalSortCompare(a, b) {\n var aa = naturalSortChunkify(a.toLowerCase());\n var bb = naturalSortChunkify(b.toLowerCase());\n\n for (var x = 0; aa[x] && bb[x]; x++) {\n if (aa[x] !== bb[x]) {\n var c = Number(aa[x]), d = Number(bb[x]);\n if (c.toString() === aa[x] && d.toString() === bb[x]) {\n return c - d;\n } else {\n return (aa[x] > bb[x]) ? 1 : -1;\n }\n }\n }\n\n return aa.length - bb.length;\n}\n\n// --------------------------------------------------------------------\n// saving files to client\n\nfunction saveTextToClient(data, filename) {\n var blob = new Blob([data], {\n type: 'application/octet-stream'\n });\n\n var url = URL.createObjectURL(blob);\n var link = document.createElement('a');\n link.setAttribute('href', url);\n link.setAttribute('download', filename);\n\n var el = document.getElementById('hiddenDownloadLinks');\n el.innerHTML = '';\n el.appendChild(link);\n\n setTimeout(function () {\n el.removeChild(link);\n URL.revokeObjectURL(url);\n }, 60);\n\n link.click();\n}\n\nfunction saveObjectToClient(data, filename) {\n saveTextToClient(JSON.stringify(data), filename);\n}\n\nfunction netdataURL(url, forReload) {\n if (typeof url === 'undefined')\n // url = document.location.toString();\n {\n url = '';\n }\n\n if (url.indexOf('#') !== -1) {\n url = url.substring(0, url.indexOf('#'));\n }\n\n var hash = urlOptions.genHash(forReload);\n\n // console.log('netdataURL: ' + url + hash);\n\n return url + hash;\n}\n\nfunction netdataReload(url) {\n document.location = verifyURL(netdataURL(url, true));\n\n // since we play with hash\n // this is needed to reload the page\n location.reload();\n}\n\nwindow.gotoHostedModalHandler = (url) => {\n document.location = verifyURL(url + urlOptions.genHash());\n return false;\n}\n\nvar gotoServerValidateRemaining = 0;\nvar gotoServerMiddleClick = false;\nvar gotoServerStop = false;\n\nfunction gotoServerValidateUrl(id, guid, url) {\n var penalty = 0;\n var error = 'failed';\n\n if (document.location.toString().startsWith('http://') && url.toString().startsWith('https://'))\n // we penalize https only if the current url is http\n // to allow the user walk through all its servers.\n {\n penalty = 500;\n } else if (document.location.toString().startsWith('https://') && url.toString().startsWith('http://')) {\n error = 'can\\'t check';\n }\n\n var finalURL = netdataURL(url);\n\n setTimeout(function () {\n document.getElementById('gotoServerList').innerHTML += '<tr><td style=\"padding-left: 20px;\"><a href=\"' + verifyURL(finalURL) + '\" target=\"_blank\">' + escapeUserInputHTML(url) + '</a></td><td style=\"padding-left: 30px;\"><code id=\"' + guid + '-' + id + '-status\">checking...</code></td></tr>';\n\n NETDATA.registryHello(url, function (data) {\n if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid === guid) {\n // console.log('OK ' + id + ' URL: ' + url);\n document.getElementById(guid + '-' + id + '-status').innerHTML = \"OK\";\n\n if (!gotoServerStop) {\n gotoServerStop = true;\n\n if (gotoServerMiddleClick) {\n window.open(verifyURL(finalURL), '_blank');\n gotoServerMiddleClick = false;\n const registryMachines = getFromRegistry(\"registryMachines\");\n document.getElementById('gotoServerResponse').innerHTML = '<b>Opening new window to ' + registryMachines[guid].name + '<br/><a href=\"' + verifyURL(finalURL) + '\">' + escapeUserInputHTML(url) + '</a></b><br/>(check your pop-up blocker if it fails)';\n } else {\n document.getElementById('gotoServerResponse').innerHTML += 'found it! It is at:<br/><small>' + escapeUserInputHTML(url) + '</small>';\n document.location = verifyURL(finalURL);\n $('#gotoServerModal').modal('hide');\n }\n }\n } else {\n if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid !== guid) {\n error = 'wrong machine';\n }\n\n document.getElementById(guid + '-' + id + '-status').innerHTML = error;\n gotoServerValidateRemaining--;\n if (gotoServerValidateRemaining <= 0) {\n gotoServerMiddleClick = false;\n document.getElementById('gotoServerResponse').innerHTML = '<b>Sorry! I cannot find any operational URL for this server</b>';\n }\n }\n });\n }, (id * 50) + penalty);\n}\n\nwindow.gotoServerModalHandler = function gotoServerModalHandler(guid) {\n // console.log('goto server: ' + guid);\n\n gotoServerStop = false;\n var checked = {};\n const registryMachines = getFromRegistry(\"registryMachines\");\n var len = registryMachines[guid].alternateUrls.length;\n var count = 0;\n\n document.getElementById('gotoServerResponse').innerHTML = '';\n document.getElementById('gotoServerList').innerHTML = '';\n document.getElementById('gotoServerName').innerHTML = registryMachines[guid].name;\n $('#gotoServerModal').modal('show');\n\n gotoServerValidateRemaining = len;\n while (len--) {\n var url = registryMachines[guid].alternateUrls[len];\n checked[url] = true;\n gotoServerValidateUrl(count++, guid, url);\n }\n\n // When the registry is enabled, if the user's known URLs are not working\n // we consult the registry to get additional URLs.\n setTimeout(function () {\n if (gotoServerStop === false) {\n document.getElementById('gotoServerResponse').innerHTML = '<b>Added all the known URLs for this machine.</b>';\n NETDATA.registrySearch(guid, getFromRegistry, function (data) {\n // console.log(data);\n len = data.urls.length;\n while (len--) {\n var url = data.urls[len][1];\n // console.log(url);\n if (typeof checked[url] === 'undefined') {\n gotoServerValidateRemaining++;\n checked[url] = true;\n gotoServerValidateUrl(count++, guid, url);\n }\n }\n });\n }\n }, 2000);\n\n return false;\n}\n\nwindow.switchRegistryModalHandler = () => {\n document.getElementById('switchRegistryPersonGUID').value = getFromRegistry(\"personGuid\");\n document.getElementById('switchRegistryURL').innerHTML = getFromRegistry(\"registryServer\");\n document.getElementById('switchRegistryResponse').innerHTML = '';\n $('#switchRegistryModal').modal('show');\n};\n\nwindow.notifyForSwitchRegistry = () => {\n // it's just old code, with minimal changes\n const newPersonGuid = document.getElementById('switchRegistryPersonGUID').value;\n\n if (newPersonGuid !== '' && newPersonGuid.length === 36) {\n\n $.ajax({\n url: getFromRegistry(\"registryServer\") + '/api/v1/registry?action=switch&machine='\n + getFromRegistry(\"machineGuid\") + '&name='\n + encodeURIComponent(getFromRegistry(\"hostname\")) + '&url='\n + encodeURIComponent(serverDefault) + '&to=' + newPersonGuid,\n async: true,\n cache: false,\n headers: {\n 'Cache-Control': 'no-cache, no-store',\n 'Pragma': 'no-cache'\n },\n xhrFields: {withCredentials: true} // required for the cookie\n })\n .done(function (data) {\n data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data);\n\n if (typeof data.status !== 'string' || data.status !== 'ok') {\n // NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));\n console.warn(\"Netdata registry server send invalid response to SWITCH\", data)\n data = null;\n }\n\n $('#switchRegistryModal').modal('hide');\n })\n .fail(function () {\n // NETDATA.error(414, NETDATA.registry.server);\n console.warn(\"Netdata registry SWITCH failed\")\n document.getElementById('switchRegistryResponse').innerHTML = \"<b>Sorry! The registry rejected your request.</b>\";\n });\n } else {\n document.getElementById('switchRegistryResponse').innerHTML = \"<b>The ID you have entered is not a GUID.</b>\";\n }\n};\n\nvar deleteRegistryGuid = null;\nvar deleteRegistryUrl = null;\n\nwindow.deleteRegistryModalHandler = (guid, name, url) => {\n deleteRegistryGuid = guid;\n deleteRegistryUrl = url;\n\n document.getElementById('deleteRegistryServerName').innerHTML = name;\n document.getElementById('deleteRegistryServerName2').innerHTML = name;\n document.getElementById('deleteRegistryServerURL').innerHTML = url;\n document.getElementById('deleteRegistryResponse').innerHTML = '';\n\n $('#deleteRegistryModal').modal('show');\n}\n\nwindow.notifyForDeleteRegistry = () => {\n const responseEl = document.getElementById('deleteRegistryResponse');\n\n if (deleteRegistryUrl) {\n NETDATA.registryDelete(getFromRegistry, serverDefault, deleteRegistryUrl, function (result) {\n if (result !== null) {\n deleteRegistryUrl = null;\n $('#deleteRegistryModal').modal('hide');\n reduxStore.dispatch(resetRegistry())\n } else {\n responseEl.innerHTML = \"<b>Sorry, this command was rejected by the registry server!</b>\";\n }\n });\n }\n}\n\nvar options = {\n menus: {},\n submenu_names: {},\n data: null,\n hostname: 'netdata_server', // will be overwritten by the netdata server\n version: 'unknown',\n release_channel: 'unknown',\n hosts: [],\n\n duration: 0, // the default duration of the charts\n update_every: 1,\n\n chartsPerRow: 0,\n // chartsMinWidth: 1450,\n chartsHeight: 180,\n};\n\nfunction chartsPerRow(total) {\n void (total);\n\n if (options.chartsPerRow === 0) {\n return 1;\n //var width = Math.floor(total / options.chartsMinWidth);\n //if(width === 0) width = 1;\n //return width;\n } else {\n return options.chartsPerRow;\n }\n}\n\nfunction prioritySort(a, b) {\n if (a.priority < b.priority) {\n return -1;\n }\n if (a.priority > b.priority) {\n return 1;\n }\n return naturalSortCompare(a.name, b.name);\n}\n\nfunction sortObjectByPriority(object) {\n var idx = {};\n var sorted = [];\n\n for (var i in object) {\n if (!object.hasOwnProperty(i)) {\n continue;\n }\n\n if (typeof idx[i] === 'undefined') {\n idx[i] = object[i];\n sorted.push(i);\n }\n }\n\n sorted.sort(function (a, b) {\n if (idx[a].priority < idx[b].priority) {\n return -1;\n }\n if (idx[a].priority > idx[b].priority) {\n return 1;\n }\n return naturalSortCompare(a, b);\n });\n\n return sorted;\n}\n\n// ----------------------------------------------------------------------------\n// scroll to a section, without changing the browser history\n\nwindow.scrollToId = (hash) => {\n if (hash && hash !== '' && document.getElementById(hash) !== null) {\n var offset = $('#' + hash).offset();\n if (typeof offset !== 'undefined') {\n //console.log('scrolling to ' + hash + ' at ' + offset.top.toString());\n $('html, body').animate({ scrollTop: offset.top - 30 }, 0);\n }\n }\n\n // we must return false to prevent the default action\n return false;\n}\n\n// ----------------------------------------------------------------------------\n\n// user editable information\nwindow.customDashboard = {\n menu: {},\n submenu: {},\n context: {}\n};\n\n// netdata standard information\nconst netdataDashboard = {\n sparklines_registry: {},\n os: 'unknown',\n\n menu: {},\n submenu: {},\n context: {},\n\n // generate a sparkline\n // used in the documentation\n sparkline: function (prefix, chart, dimension, units, suffix) {\n if (options.data === null || typeof options.data.charts === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart] === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart].dimensions === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart].dimensions[dimension] === 'undefined') {\n return '';\n }\n\n var key = chart + '.' + dimension;\n\n if (typeof units === 'undefined') {\n units = '';\n }\n\n if (typeof this.sparklines_registry[key] === 'undefined') {\n this.sparklines_registry[key] = { count: 1 };\n } else {\n this.sparklines_registry[key].count++;\n }\n\n key = key + '.' + this.sparklines_registry[key].count;\n\n return prefix + '<div class=\"netdata-container\" data-netdata=\"' + chart + '\" data-width=\"25%\" data-height=\"15px\" data-chart-library=\"dygraph\" data-dygraph-theme=\"sparkline\" data-dimensions=\"' + dimension + '\" data-show-value-of-' + dimension + '-at=\"' + key + '\"></div> (<span id=\"' + key + '\" style=\"display: inline-block; min-width: 50px; text-align: right;\">X</span>' + units + ')' + suffix;\n },\n\n gaugeChart: function (title, width, dimensions, colors) {\n if (typeof colors === 'undefined') {\n colors = '';\n }\n\n if (typeof dimensions === 'undefined') {\n dimensions = '';\n }\n\n return '<div class=\"netdata-container\" data-netdata=\"CHART_UNIQUE_ID\"'\n + ' data-dimensions=\"' + dimensions + '\"'\n + ' data-chart-library=\"gauge\"'\n + ' data-gauge-adjust=\"width\"'\n + ' data-title=\"' + title + '\"'\n + ' data-width=\"' + width + '\"'\n + ' data-points=\"CHART_DURATION\"'\n + ' data-colors=\"' + colors + '\"'\n + ' role=\"application\"></div>';\n },\n\n anyAttribute: function (obj, attr, key, def) {\n if (typeof (obj[key]) !== 'undefined') {\n var x = obj[key][attr];\n\n if (typeof (x) === 'undefined') {\n return def;\n }\n\n if (typeof (x) === 'function') {\n return x(netdataDashboard.os);\n }\n\n return x;\n }\n\n return def;\n },\n\n menuTitle: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return (this.anyAttribute(this.menu, 'title', chart.menu_pattern, chart.menu_pattern).toString()\n + ' ' + chart.type.slice(-(chart.type.length - chart.menu_pattern.length - 1)).toString()).replace(/_/g, ' ');\n }\n\n return (this.anyAttribute(this.menu, 'title', chart.menu, chart.menu)).toString().replace(/_/g, ' ');\n },\n\n menuIcon: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'icon', chart.menu_pattern, '<i class=\"fas fa-puzzle-piece\"></i>').toString();\n }\n\n return this.anyAttribute(this.menu, 'icon', chart.menu, '<i class=\"fas fa-puzzle-piece\"></i>');\n },\n\n menuInfo: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'info', chart.menu_pattern, null);\n }\n\n return this.anyAttribute(this.menu, 'info', chart.menu, null);\n },\n\n menuHeight: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'height', chart.menu_pattern, 1.0);\n }\n\n return this.anyAttribute(this.menu, 'height', chart.menu, 1.0);\n },\n\n submenuTitle: function (menu, submenu) {\n var key = menu + '.' + submenu;\n // console.log(key);\n var title = this.anyAttribute(this.submenu, 'title', key, submenu).toString().replace(/_/g, ' ');\n if (title.length > 28) {\n var a = title.substring(0, 13);\n var b = title.substring(title.length - 12, title.length);\n return a + '...' + b;\n }\n return title;\n },\n\n submenuInfo: function (menu, submenu) {\n var key = menu + '.' + submenu;\n return this.anyAttribute(this.submenu, 'info', key, null);\n },\n\n submenuHeight: function (menu, submenu, relative) {\n var key = menu + '.' + submenu;\n return this.anyAttribute(this.submenu, 'height', key, 1.0) * relative;\n },\n\n contextInfo: function (id) {\n var x = this.anyAttribute(this.context, 'info', id, null);\n\n if (x !== null) {\n return '<div class=\"shorten dashboard-context-info netdata-chart-alignment\" role=\"document\">' + x + '</div>';\n } else {\n return '';\n }\n },\n\n contextValueRange: function (id) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].valueRange !== 'undefined') {\n return this.context[id].valueRange;\n } else {\n return '[null, null]';\n }\n },\n\n contextHeight: function (id, def) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].height !== 'undefined') {\n return def * this.context[id].height;\n } else {\n return def;\n }\n },\n\n contextDecimalDigits: function (id, def) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].decimalDigits !== 'undefined') {\n return this.context[id].decimalDigits;\n } else {\n return def;\n }\n }\n};\nwindow.netdataDashboard = netdataDashboard // share with dashboard_info.js :/\n\n// ----------------------------------------------------------------------------\n\n// enrich the data structure returned by netdata\n// to reflect our menu system and content\n// TODO: this is a shame - we should fix charts naming (issue #807)\nfunction enrichChartData(chart) {\n var parts = chart.type.split('_');\n var tmp = parts[0];\n\n switch (tmp) {\n case 'ap':\n case 'net':\n case 'disk':\n case 'powersupply':\n chart.menu = tmp;\n break;\n\n case 'apache':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'cache') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'bind':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'rndc') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'cgroup':\n chart.menu = chart.type;\n if (chart.id.match(/.*[\\._\\/-:]qemu[\\._\\/-:]*/) || chart.id.match(/.*[\\._\\/-:]kvm[\\._\\/-:]*/)) {\n chart.menu_pattern = 'cgqemu';\n } else {\n chart.menu_pattern = 'cgroup';\n }\n break;\n\n case 'go':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'expvar') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'mount':\n if (parts.length > 2) {\n chart.menu = tmp + '_' + parts[1];\n }\n else {\n chart.menu = tmp;\n }\n break;\n\n case 'isc':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'dhcpd') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'ovpn':\n chart.menu = chart.type;\n if (parts.length > 3 && parts[1] === 'status' && parts[2] === 'log') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'smartd':\n case 'web':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'log') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'dnsmasq':\n chart.menu = chart.type;\n if (parts.length == 2 && parts[1] === 'dhcp') {\n chart.menu = tmp + '_' + parts[1];\n } else if (parts.length > 2 && parts[1] === 'dhcp') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'anomaly':\n if (parts.length >= 2 && parts[1].startsWith('detection')) {\n chart.menu = tmp + '_detection';\n }\n break;\n\n case 'tc':\n chart.menu = tmp;\n\n // find a name for this device from fireqos info\n // we strip '_(in|out)' or '(in|out)_'\n if (chart.context === 'tc.qos' && (typeof options.submenu_names[chart.family] === 'undefined' || options.submenu_names[chart.family] === chart.family)) {\n var n = chart.name.split('.')[1];\n if (n.endsWith('_in')) {\n options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_in'));\n } else if (n.endsWith('_out')) {\n options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_out'));\n } else if (n.startsWith('in_')) {\n options.submenu_names[chart.family] = n.slice(3, n.length);\n } else if (n.startsWith('out_')) {\n options.submenu_names[chart.family] = n.slice(4, n.length);\n } else {\n options.submenu_names[chart.family] = n;\n }\n }\n\n // increase the priority of IFB devices\n // to have inbound appear before outbound\n if (chart.id.match(/.*-ifb$/)) {\n chart.priority--;\n }\n\n break;\n\n default:\n chart.menu = chart.type;\n if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n }\n\n chart.submenu = chart.family;\n}\n\n// ----------------------------------------------------------------------------\n\nfunction headMain(os, charts, duration) {\n void (os);\n\n if (urlOptions.mode === 'print') {\n return '';\n }\n\n var head = '';\n\n if (typeof charts['system.swap'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.swap\"'\n + ' data-dimensions=\"used\"'\n + ' data-append-options=\"percentage\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Used Swap\"'\n + ' data-units=\"%\"'\n + ' data-easypiechart-max-value=\"100\"'\n + ' data-width=\"9%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-colors=\"#DD4400\"'\n + ' role=\"application\"></div>';\n }\n\n if (typeof charts['system.io'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.io\"'\n + ' data-dimensions=\"in\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Disk Read\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.io.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.io\"'\n + ' data-dimensions=\"out\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Disk Write\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.io.mainhead\"'\n + ' role=\"application\"></div>';\n }\n else if (typeof charts['system.pgpgio'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.pgpgio\"'\n + ' data-dimensions=\"in\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Disk Read\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.pgpgio.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.pgpgio\"'\n + ' data-dimensions=\"out\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Disk Write\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.pgpgio.mainhead\"'\n + ' role=\"application\"></div>';\n }\n\n if (typeof charts['system.cpu'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.cpu\"'\n + ' data-chart-library=\"gauge\"'\n + ' data-title=\"CPU\"'\n + ' data-units=\"%\"'\n + ' data-gauge-max-value=\"100\"'\n + ' data-width=\"20%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-colors=\"' + NETDATA.colors[12] + '\"'\n + ' role=\"application\"></div>';\n }\n\n if (typeof charts['system.net'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.net\"'\n + ' data-dimensions=\"received\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Net Inbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.net.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.net\"'\n + ' data-dimensions=\"sent\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Net Outbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.net.mainhead\"'\n + ' role=\"application\"></div>';\n }\n else if (typeof charts['system.ip'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ip\"'\n + ' data-dimensions=\"received\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IP Inbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ip.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ip\"'\n + ' data-dimensions=\"sent\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IP Outbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ip.mainhead\"'\n + ' role=\"application\"></div>';\n }\n else if (typeof charts['system.ipv4'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ipv4\"'\n + ' data-dimensions=\"received\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IPv4 Inbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ipv4.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ipv4\"'\n + ' data-dimensions=\"sent\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IPv4 Outbound\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ipv4.mainhead\"'\n + ' role=\"application\"></div>';\n }\n else if (typeof charts['system.ipv6'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ipv6\"'\n + ' data-dimensions=\"received\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IPv6 Inbound\"'\n + ' data-units=\"kbps\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ipv6.mainhead\"'\n + ' role=\"application\"></div>';\n\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ipv6\"'\n + ' data-dimensions=\"sent\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"IPv6 Outbound\"'\n + ' data-units=\"kbps\"'\n + ' data-width=\"11%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-common-units=\"system.ipv6.mainhead\"'\n + ' role=\"application\"></div>';\n }\n\n if (typeof charts['system.ram'] !== 'undefined') {\n head += '<div class=\"netdata-container\" style=\"margin-right: 10px;\" data-netdata=\"system.ram\"'\n + ' data-dimensions=\"used|buffers|active|wired\"' // active and wired are FreeBSD stats\n + ' data-append-options=\"percentage\"'\n + ' data-chart-library=\"easypiechart\"'\n + ' data-title=\"Used RAM\"'\n + ' data-units=\"%\"'\n + ' data-easypiechart-max-value=\"100\"'\n + ' data-width=\"9%\"'\n + ' data-points=\"' + duration.toString() + '\"'\n + ' data-colors=\"' + NETDATA.colors[7] + '\"'\n + ' role=\"application\"></div>';\n }\n\n return head;\n}\n\nfunction generateHeadCharts(type, chart, duration) {\n if (urlOptions.mode === 'print') {\n return '';\n }\n\n var head = '';\n var hcharts = netdataDashboard.anyAttribute(netdataDashboard.context, type, chart.context, []);\n if (hcharts.length > 0) {\n var hi = 0, hlen = hcharts.length;\n while (hi < hlen) {\n if (typeof hcharts[hi] === 'function') {\n head += hcharts[hi](netdataDashboard.os, chart.id).replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);\n } else {\n head += hcharts[hi].replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);\n }\n hi++;\n }\n }\n return head;\n}\n\nfunction renderPage(menus, data) {\n var div = document.getElementById('charts_div');\n var pcent_width = Math.floor(100 / chartsPerRow($(div).width()));\n\n // find the proper duration for per-second updates\n var duration = Math.round(($(div).width() * pcent_width / 100 * data.update_every / 3) / 60) * 60;\n options.duration = duration; // probably obsolete/not needed\n options.update_every = data.update_every;\n\n var html = '';\n var sidebar = '<ul class=\"nav dashboard-sidenav\" data-spy=\"affix\" id=\"sidebar_ul\">';\n var mainhead = headMain(netdataDashboard.os, data.charts, duration);\n\n // sort the menus\n var main = sortObjectByPriority(menus);\n var i = 0, len = main.length;\n\n // todo hook to options\n const hasChartsOnBottom = defaultAttributes.legendPosition === \"bottom\"\n const chartAdditionalHeight = hasChartsOnBottom ? LEGEND_BOTTOM_SINGLE_LINE_HEIGHT : 0\n while (i < len) {\n var menu = main[i++];\n\n // generate an entry at the main menu\n\n var menuid = NETDATA.name2id('menu_' + menu);\n sidebar += '<li class=\"\"><a href=\"#' + menuid + '\" onClick=\"return scrollToId(\\'' + menuid + '\\');\">' + menus[menu].icon + ' ' + menus[menu].title + '</a><ul class=\"nav\">';\n html += '<div role=\"section\" class=\"dashboard-section\"><div role=\"sectionhead\"><h1 id=\"' + menuid + '\" role=\"heading\">' + menus[menu].icon + ' ' + menus[menu].title + '</h1></div><div role=\"section\" class=\"dashboard-subsection\">';\n\n if (menus[menu].info !== null) {\n html += menus[menu].info;\n }\n\n // console.log(' >> ' + menu + ' (' + menus[menu].priority + '): ' + menus[menu].title);\n\n var shtml = '';\n var mhead = '<div class=\"netdata-chart-row\">' + mainhead;\n mainhead = '';\n\n // sort the submenus of this menu\n var sub = sortObjectByPriority(menus[menu].submenus);\n var si = 0, slen = sub.length;\n while (si < slen) {\n var submenu = sub[si++];\n\n // generate an entry at the submenu\n var submenuid = NETDATA.name2id('menu_' + menu + '_submenu_' + submenu);\n sidebar += '<li class><a href=\"#' + submenuid + '\" onClick=\"return scrollToId(\\'' + submenuid + '\\');\">' + menus[menu].submenus[submenu].title + '</a></li>';\n shtml += '<div role=\"section\" class=\"dashboard-section-container\" id=\"' + submenuid + '\"><h2 id=\"' + submenuid + '\" class=\"netdata-chart-alignment\" role=\"heading\">' + menus[menu].submenus[submenu].title + '</h2>';\n\n if (menus[menu].submenus[submenu].info !== null) {\n shtml += '<div class=\"dashboard-submenu-info netdata-chart-alignment\" role=\"document\">' + menus[menu].submenus[submenu].info + '</div>';\n }\n\n var head = '<div class=\"netdata-chart-row\">';\n var chtml = '';\n\n // console.log(' \\------- ' + submenu + ' (' + menus[menu].submenus[submenu].priority + '): ' + menus[menu].submenus[submenu].title);\n\n // sort the charts in this submenu of this menu\n menus[menu].submenus[submenu].charts.sort(prioritySort);\n var ci = 0, clen = menus[menu].submenus[submenu].charts.length;\n while (ci < clen) {\n var chart = menus[menu].submenus[submenu].charts[ci++];\n\n // generate the submenu heading charts\n mhead += generateHeadCharts('mainheads', chart, duration);\n head += generateHeadCharts('heads', chart, duration);\n\n function chartCommonMin(family, context, units) {\n var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMin', context, undefined);\n if (typeof x !== 'undefined') {\n return ' data-common-min=\"' + family + '/' + context + '/' + units + '\"';\n } else {\n return '';\n }\n }\n\n function chartCommonMax(family, context, units) {\n var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMax', context, undefined);\n if (typeof x !== 'undefined') {\n return ' data-common-max=\"' + family + '/' + context + '/' + units + '\"';\n } else {\n return '';\n }\n }\n\n // generate the chart\n if (urlOptions.mode === 'print') {\n chtml += '<div role=\"row\" class=\"dashboard-print-row\">';\n }\n\n const chartHeight = netdataDashboard.contextHeight(chart.context, options.chartsHeight)\n + chartAdditionalHeight;\n\n chtml += '<div class=\"netdata-chartblock-container\" style=\"width: ' + pcent_width.toString() + '%;\">' + netdataDashboard.contextInfo(chart.context) + '<div class=\"netdata-container\" id=\"chart_' + NETDATA.name2id(chart.id) + '\" data-netdata=\"' + chart.id + '\"'\n + ' data-width=\"100%\"'\n + ' data-height=\"' + chartHeight.toString() + 'px\"'\n + ' data-dygraph-valuerange=\"' + netdataDashboard.contextValueRange(chart.context) + '\"'\n + ' data-id=\"' + NETDATA.name2id(options.hostname + '/' + chart.id) + '\"'\n + ' data-colors=\"' + netdataDashboard.anyAttribute(netdataDashboard.context, 'colors', chart.context, '') + '\"'\n + ' data-decimal-digits=\"' + netdataDashboard.contextDecimalDigits(chart.context, -1) + '\"'\n + (hasChartsOnBottom ? ' data-legend-position=\"bottom\"' : '')\n + chartCommonMin(chart.family, chart.context, chart.units)\n + chartCommonMax(chart.family, chart.context, chart.units)\n + ' role=\"application\"></div></div>';\n\n if (urlOptions.mode === 'print') {\n chtml += '</div>';\n }\n }\n\n head += '</div>';\n shtml += head + chtml + '</div>';\n }\n\n mhead += '</div>';\n sidebar += '</ul></li>';\n html += mhead + shtml + '</div></div><hr role=\"separator\"/>';\n }\n\n const isMemoryModeDbEngine = data.memory_mode === \"dbengine\";\n\n sidebar += '<li class=\"\" style=\"padding-top:15px;\"><a href=\"https://learn.netdata.cloud/docs/agent/collectors/quickstart/\" target=\"_blank\"><i class=\"fas fa-plus\"></i> Add more charts</a></li>';\n sidebar += '<li class=\"\"><a href=\"https://learn.netdata.cloud/docs/agent/health/quickstart/\" target=\"_blank\"><i class=\"fas fa-plus\"></i> Add more alarms</a></li>';\n sidebar += '<li class=\"\" style=\"margin:20px;color:#666;\"><small>Every ' +\n ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ', ' +\n 'Netdata collects <strong>' + data.dimensions_count.toLocaleString() + '</strong> metrics on ' +\n data.hostname.toString() + ', presents them in <strong>' +\n data.charts_count.toLocaleString() + '</strong> charts' +\n (isMemoryModeDbEngine ? '' : ',') + // oxford comma\n ' and monitors them with <strong>' +\n data.alarms_count.toLocaleString() + '</strong> alarms.';\n\n if (!isMemoryModeDbEngine) {\n sidebar += '<br /> <br />Get more history by ' +\n '<a href=\"https://learn.netdata.cloud/guides/longer-metrics-storage#using-the-round-robin-database\" target=_blank>configuring Netdata\\'s <strong>history</strong></a> or switching to the <a href=\"https://learn.netdata.cloud/docs/agent/database/engine\" target=_blank>database engine.</a>';\n }\n\n sidebar += '<br/> <br/><strong>netdata</strong><br/>' + data.version.toString() + '</small>';\n\n sidebar += '</li>'\n\n sidebar += '<li id=\"sidebar-end-portal-container\"></li>'\n\n\n sidebar += '</ul>';\n div.innerHTML = html;\n document.getElementById('sidebar').innerHTML = sidebar;\n\n if (urlOptions.highlight === true) {\n reduxStore.dispatch(setGlobalChartUnderlayAction({\n after: urlOptions.highlight_after,\n before: urlOptions.highlight_before,\n }))\n }\n\n if (urlOptions.mode === 'print') {\n printPage();\n } else {\n finalizePage();\n }\n}\n\nfunction renderChartsAndMenu(data) {\n options.menus = {};\n options.submenu_names = {};\n\n var menus = options.menus;\n var charts = data.charts;\n var m, menu_key;\n\n for (var c in charts) {\n if (!charts.hasOwnProperty(c)) {\n continue;\n }\n\n var chart = charts[c];\n enrichChartData(chart);\n m = chart.menu;\n\n // create the menu\n if (typeof menus[m] === 'undefined') {\n menus[m] = {\n menu_pattern: chart.menu_pattern,\n priority: chart.priority,\n submenus: {},\n title: netdataDashboard.menuTitle(chart),\n icon: netdataDashboard.menuIcon(chart),\n info: netdataDashboard.menuInfo(chart),\n height: netdataDashboard.menuHeight(chart) * options.chartsHeight\n };\n } else {\n if (typeof (menus[m].menu_pattern) === 'undefined') {\n menus[m].menu_pattern = chart.menu_pattern;\n }\n\n if (chart.priority < menus[m].priority) {\n menus[m].priority = chart.priority;\n }\n }\n\n menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;\n\n // create the submenu\n if (typeof menus[m].submenus[chart.submenu] === 'undefined') {\n menus[m].submenus[chart.submenu] = {\n priority: chart.priority,\n charts: [],\n title: null,\n info: netdataDashboard.submenuInfo(menu_key, chart.submenu),\n height: netdataDashboard.submenuHeight(menu_key, chart.submenu, menus[m].height)\n };\n } else {\n if (chart.priority < menus[m].submenus[chart.submenu].priority) {\n menus[m].submenus[chart.submenu].priority = chart.priority;\n }\n }\n\n // index the chart in the menu/submenu\n menus[m].submenus[chart.submenu].charts.push(chart);\n }\n\n // propagate the descriptive subname given to QoS\n // to all the other submenus with the same name\n for (var m in menus) {\n if (!menus.hasOwnProperty(m)) {\n continue;\n }\n\n for (var s in menus[m].submenus) {\n if (!menus[m].submenus.hasOwnProperty(s)) {\n continue;\n }\n\n // set the family using a name\n if (typeof options.submenu_names[s] !== 'undefined') {\n menus[m].submenus[s].title = s + ' (' + options.submenu_names[s] + ')';\n } else {\n menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;\n menus[m].submenus[s].title = netdataDashboard.submenuTitle(menu_key, s);\n }\n }\n }\n\n renderPage(menus, data);\n}\n\n// ----------------------------------------------------------------------------\n\nexport const handleLoadJs = (promise, library, callback) => promise\n .catch((e) => {\n console.warn('error', e);\n alert(`Cannot load required JS library: ${library}`)\n })\n .then(() => {\n callback()\n })\n\n\nfunction loadClipboard(callback) {\n handleLoadJs(\n import(\"clipboard-polyfill\").then((clipboard) => {\n window.clipboard = clipboard\n }),\n \"clipboard-polyfill\",\n callback,\n )\n}\n\nfunction loadBootstrapTable(callback) {\n handleLoadJs(\n Promise.all([\n import(\"bootstrap-table\").then(() => (\n import('bootstrap-table/dist/extensions/export/bootstrap-table-export.min')\n )),\n import(\"tableexport.jquery.plugin\")\n ]),\n \"bootstrap-table\",\n callback,\n )\n}\n\nfunction loadBootstrapSlider(callback) {\n handleLoadJs(\n Promise.all([\n import(\"bootstrap-slider\").then(({ default: slider }) => {\n window.Slider = slider\n }),\n import(\"bootstrap-slider/dist/css/bootstrap-slider.min.css\"),\n ]),\n \"bootstrap-slider\",\n callback,\n )\n}\n\nfunction loadLzString(callback) {\n handleLoadJs(import(\"lz-string\"), \"lz-string\", callback)\n}\n\nfunction loadPako(callback) {\n handleLoadJs(\n import(\"pako\").then(({ default: pako }) => {\n window.pako = pako\n }),\n \"pako\",\n callback,\n )\n}\n\n// ----------------------------------------------------------------------------\n\nwindow.clipboardCopy = text => {\n clipboard.writeText(text);\n};\n\nwindow.clipboardCopyBadgeEmbed = url => {\n clipboard.writeText('<embed src=\"' + url + '\" type=\"image/svg+xml\" height=\"20\"/>');\n};\n\n// ----------------------------------------------------------------------------\n\nfunction alarmsUpdateModal() {\n var active = '<h3>Raised Alarms</h3><table class=\"table\">';\n var all = '<h3>All Running Alarms</h3><div class=\"panel-group\" id=\"alarms_all_accordion\" role=\"tablist\" aria-multiselectable=\"true\">';\n var footer = '<hr/><a href=\"https://github.com/netdata/netdata/tree/master/web/api/badges#netdata-badges\" target=\"_blank\">netdata badges</a> refresh automatically. Their color indicates the state of the alarm: <span style=\"color: #e05d44\"><b> red </b></span> is critical, <span style=\"color:#fe7d37\"><b> orange </b></span> is warning, <span style=\"color: #4c1\"><b> bright green </b></span> is ok, <span style=\"color: #9f9f9f\"><b> light grey </b></span> is undefined (i.e. no data or no status), <span style=\"color: #000\"><b> black </b></span> is not initialized. You can copy and paste their URLs to embed them in any web page.<br/>netdata can send notifications for these alarms. Check <a href=\"https://github.com/netdata/netdata/blob/master/src/health/notifications/health_alarm_notify.conf\" target=\"_blank\">this configuration file</a> for more information.';\n\n loadClipboard(function () {\n });\n\n\n const callback = (data) => {\n options.alarm_families = [];\n\n if (data === null) {\n document.getElementById('alarms_active').innerHTML =\n document.getElementById('alarms_all').innerHTML =\n document.getElementById('alarms_log').innerHTML =\n 'failed to load alarm data!';\n return;\n }\n\n function alarmid4human(id) {\n if (id === 0) {\n return '-';\n }\n\n return id.toString();\n }\n\n function timestamp4human(timestamp, space) {\n if (timestamp === 0) {\n return '-';\n }\n\n if (typeof space === 'undefined') {\n space = ' ';\n }\n\n var t = new Date(timestamp * 1000);\n\n // commented out to always have date+time, to have consistent exports\n // var now = new Date();\n\n // if (t.toDateString() === now.toDateString()) {\n // return t.toLocaleTimeString();\n // }\n\n return t.toLocaleDateString() + space + t.toLocaleTimeString();\n }\n\n function alarm_lookup_explain(alarm, chart) {\n var dimensions = ' of all values ';\n\n if (chart.dimensions.length > 1) {\n dimensions = ' of the sum of all dimensions ';\n }\n\n if (typeof alarm.lookup_dimensions !== 'undefined') {\n var d = alarm.lookup_dimensions.replace(/|/g, ',');\n var x = d.split(',');\n if (x.length > 1) {\n dimensions = 'of the sum of dimensions <code>' + alarm.lookup_dimensions + '</code> ';\n } else {\n dimensions = 'of all values of dimension <code>' + alarm.lookup_dimensions + '</code> ';\n }\n }\n\n return '<code>' + alarm.lookup_method + '</code> '\n + dimensions + ', of chart <code>' + alarm.chart + '</code>'\n + ', starting <code>' + seconds4human(alarm.lookup_after + alarm.lookup_before, { space: ' ' }) + '</code> and up to <code>' + seconds4human(alarm.lookup_before, { space: ' ' }) + '</code>'\n + ((alarm.lookup_options) ? (', with options <code>' + alarm.lookup_options.replace(/ /g, ', ') + '</code>') : '')\n + '.';\n }\n\n function alarm_to_html(alarm, full) {\n var chart = options.data.charts[alarm.chart];\n if (typeof (chart) === 'undefined') {\n chart = options.data.charts_by_name[alarm.chart];\n if (typeof (chart) === 'undefined') {\n // this means the charts loaded are incomplete\n // probably netdata was restarted and more alarms\n // are now available.\n console.log('Cannot find chart ' + alarm.chart + ', you probably need to refresh the page.');\n return '';\n }\n }\n\n var has_alarm = (typeof alarm.warn !== 'undefined' || typeof alarm.crit !== 'undefined');\n var badge_url = `${serverDefault}/api/v1/badge.svg?chart=${alarm.chart}&alarm=${alarm.name}&refresh=auto`;\n\n var action_buttons = '<br/> <br/>role: <b>' + alarm.recipient + '</b><br/> <br/>'\n + '<div class=\"action-button ripple\" title=\"click to scroll the dashboard to the chart of this alarm\" data-toggle=\"tooltip\" data-placement=\"bottom\" onClick=\"scrollToChartAfterHidingModal(\\'' + alarm.chart + '\\', ' + alarm.last_status_change * 1000 + ', \\'' + alarm.status + '\\'); $(\\'#alarmsModal\\').modal(\\'hide\\'); return false;\"><i class=\"fab fa-periscope\"></i></div>'\n + '<div class=\"action-button ripple\" title=\"click to copy to the clipboard the URL of this badge\" data-toggle=\"tooltip\" data-placement=\"bottom\" onClick=\"clipboardCopy(\\'' + badge_url + '\\'); return false;\"><i class=\"far fa-copy\"></i></div>'\n + '<div class=\"action-button ripple\" title=\"click to copy to the clipboard an auto-refreshing <code>embed</code> html element for this badge\" data-toggle=\"tooltip\" data-placement=\"bottom\" onClick=\"clipboardCopyBadgeEmbed(\\'' + badge_url + '\\'); return false;\"><i class=\"fas fa-copy\"></i></div>';\n\n var html = '<tr><td class=\"text-center\" style=\"vertical-align: middle; word-break: break-word;\" width=\"40%\"><b>' + alarm.chart + '</b><br/> <br/><embed src=\"' + badge_url + '\" type=\"image/svg+xml\" height=\"20\"/><br/> <br/><span style=\"font-size: 18px;\">' + alarm.info + '</span>' + action_buttons + '</td>'\n + '<td><table class=\"table\">'\n + ((typeof alarm.warn !== 'undefined') ? ('<tr><td width=\"10%\" style=\"text-align:right\">warning when</td><td><span style=\"font-family: monospace; color:#fe7d37; font-weight: bold;\">' + alarm.warn + '</span></td></tr>') : '')\n + ((typeof alarm.crit !== 'undefined') ? ('<tr><td width=\"10%\" style=\"text-align:right\">critical when</td><td><span style=\"font-family: monospace; color: #e05d44; font-weight: bold;\">' + alarm.crit + '</span></td></tr>') : '');\n\n if (full === true) {\n var units = chart.units;\n if (units === '%') {\n units = '%';\n }\n\n html += ((typeof alarm.lookup_after !== 'undefined') ? ('<tr><td width=\"10%\" style=\"text-align:right\">db lookup</td><td>' + alarm_lookup_explain(alarm, chart) + '</td></tr>') : '')\n + ((typeof alarm.calc !== 'undefined') ? ('<tr><td width=\"10%\" style=\"text-align:right\">calculation</td><td><span style=\"font-family: monospace;\">' + alarm.calc + '</span></td></tr>') : '')\n + ((chart.green !== null) ? ('<tr><td width=\"10%\" style=\"text-align:right\">green threshold</td><td><code>' + chart.green + ' ' + units + '</code></td></tr>') : '')\n + ((chart.red !== null) ? ('<tr><td width=\"10%\" style=\"text-align:right\">red threshold</td><td><code>' + chart.red + ' ' + units + '</code></td></tr>') : '');\n }\n\n if (alarm.warn_repeat_every > 0) {\n html += '<tr><td width=\"10%\" style=\"text-align:right\">repeat warning</td><td>' + seconds4human(alarm.warn_repeat_every) + '</td></tr>';\n }\n\n if (alarm.crit_repeat_every > 0) {\n html += '<tr><td width=\"10%\" style=\"text-align:right\">repeat critical</td><td>' + seconds4human(alarm.crit_repeat_every) + '</td></tr>';\n }\n\n var delay = '';\n if ((alarm.delay_up_duration > 0 || alarm.delay_down_duration > 0) && alarm.delay_multiplier !== 0 && alarm.delay_max_duration > 0) {\n if (alarm.delay_up_duration === alarm.delay_down_duration) {\n delay += '<small><br/>hysteresis ' + seconds4human(alarm.delay_up_duration, {\n space: ' ',\n negative_suffix: ''\n });\n } else {\n delay = '<small><br/>hysteresis ';\n if (alarm.delay_up_duration > 0) {\n delay += 'on escalation <code>' + seconds4human(alarm.delay_up_duration, {\n space: ' ',\n negative_suffix: ''\n }) + '</code>, ';\n }\n if (alarm.delay_down_duration > 0) {\n delay += 'on recovery <code>' + seconds4human(alarm.delay_down_duration, {\n space: ' ',\n negative_suffix: ''\n }) + '</code>, ';\n }\n }\n if (alarm.delay_multiplier !== 1.0) {\n delay += 'multiplied by <code>' + alarm.delay_multiplier.toString() + '</code>';\n delay += ', up to <code>' + seconds4human(alarm.delay_max_duration, {\n space: ' ',\n negative_suffix: ''\n }) + '</code>';\n }\n delay += '</small>';\n }\n\n html += '<tr><td width=\"10%\" style=\"text-align:right\">check every</td><td>' + seconds4human(alarm.update_every, {\n space: ' ',\n negative_suffix: ''\n }) + '</td></tr>'\n + ((has_alarm === true) ? ('<tr><td width=\"10%\" style=\"text-align:right\">execute</td><td><span style=\"font-family: monospace;\">' + alarm.exec + '</span>' + delay + '</td></tr>') : '')\n + '<tr><td width=\"10%\" style=\"text-align:right\">source</td><td><span style=\"font-family: monospace; word-break: break-word;\">' + alarm.source + '</span></td></tr>'\n + '</table></td></tr>';\n\n return html;\n }\n\n function alarm_family_show(id) {\n var html = '<table class=\"table\">';\n var family = options.alarm_families[id];\n var len = family.arr.length;\n while (len--) {\n var alarm = family.arr[len];\n html += alarm_to_html(alarm, true);\n }\n html += '</table>';\n\n $('#alarm_all_' + id.toString()).html(html);\n enableTooltipsAndPopovers();\n }\n\n // find the proper family of each alarm\n var x, family, alarm;\n var count_active = 0;\n var count_all = 0;\n var families = {};\n var families_sort = [];\n for (x in data.alarms) {\n if (!data.alarms.hasOwnProperty(x)) {\n continue;\n }\n\n alarm = data.alarms[x];\n family = alarm.family;\n\n // find the chart\n var chart = options.data.charts[alarm.chart];\n if (typeof chart === 'undefined') {\n chart = options.data.charts_by_name[alarm.chart];\n }\n\n // not found - this should never happen!\n if (typeof chart === 'undefined') {\n console.log('WARNING: alarm ' + x + ' is linked to chart ' + alarm.chart + ', which is not found in the list of chart got from the server.');\n chart = { priority: 9999999 };\n }\n else if (typeof chart.menu !== 'undefined' && typeof chart.submenu !== 'undefined')\n // the family based on the chart\n {\n family = chart.menu + ' - ' + chart.submenu;\n }\n\n if (typeof families[family] === 'undefined') {\n families[family] = {\n name: family,\n arr: [],\n priority: chart.priority\n };\n\n families_sort.push(families[family]);\n }\n\n if (chart.priority < families[family].priority) {\n families[family].priority = chart.priority;\n }\n\n families[family].arr.unshift(alarm);\n }\n\n // sort the families, like the dashboard menu does\n var families_sorted = families_sort.sort(function (a, b) {\n if (a.priority < b.priority) {\n return -1;\n }\n if (a.priority > b.priority) {\n return 1;\n }\n return naturalSortCompare(a.name, b.name);\n });\n\n var i = 0;\n var fc = 0;\n var len = families_sorted.length;\n while (len--) {\n family = families_sorted[i++].name;\n var active_family_added = false;\n\n var expanded = 'false';\n var collapsed = 'class=\"collapsed\"';\n var cin = '';\n\n // uncomment if first family needs to be expanded by default\n // var expanded = 'true';\n // var collapsed = '';\n // var cin = 'in';\n\n if (fc !== 0) {\n all += \"</table></div></div></div>\";\n\n // uncomment if first family needs to be expanded by default\n // expanded = 'false';\n // collapsed = 'class=\"collapsed\"';\n // cin = '';\n }\n\n all += '<div class=\"panel panel-default\"><div class=\"panel-heading\" role=\"tab\" id=\"alarm_all_heading_' + fc.toString() + '\"><h4 class=\"panel-title\"><a ' + collapsed + ' role=\"button\" data-toggle=\"collapse\" data-parent=\"#alarms_all_accordion\" href=\"#alarm_all_' + fc.toString() + '\" aria-expanded=\"' + expanded + '\" aria-controls=\"alarm_all_' + fc.toString() + '\">' + family.toString() + '</a></h4></div><div id=\"alarm_all_' + fc.toString() + '\" class=\"panel-collapse collapse ' + cin + '\" role=\"tabpanel\" aria-labelledby=\"alarm_all_heading_' + fc.toString() + '\" data-alarm-id=\"' + fc.toString() + '\"><div class=\"panel-body\" id=\"alarm_all_body_' + fc.toString() + '\">';\n\n options.alarm_families[fc] = families[family];\n\n fc++;\n\n var arr = families[family].arr;\n var c = arr.length;\n while (c--) {\n alarm = arr[c];\n if (alarm.status === 'WARNING' || alarm.status === 'CRITICAL') {\n if (!active_family_added) {\n active_family_added = true;\n active += '<tr><th class=\"text-center\" colspan=\"2\"><h4>' + family + '</h4></th></tr>';\n }\n count_active++;\n active += alarm_to_html(alarm, true);\n }\n\n count_all++;\n }\n }\n active += \"</table>\";\n if (families_sorted.length > 0) {\n all += \"</div></div></div>\";\n }\n all += \"</div>\";\n\n if (!count_active) {\n active += '<div style=\"width:100%; height: 100px; text-align: center;\"><span style=\"font-size: 50px;\"><i class=\"fas fa-thumbs-up\"></i></span><br/>Everything is normal. No raised alarms.</div>';\n } else {\n active += footer;\n }\n\n if (!count_all) {\n all += \"<h4>No alarms are running in this system.</h4>\";\n } else {\n all += footer;\n }\n\n document.getElementById('alarms_active').innerHTML = active;\n document.getElementById('alarms_all').innerHTML = all;\n enableTooltipsAndPopovers();\n\n if (families_sorted.length > 0) {\n alarm_family_show(0);\n }\n\n // register bootstrap events\n var $accordion = $('#alarms_all_accordion');\n $accordion.on('show.bs.collapse', function (d) {\n var target = $(d.target);\n var id = $(target).data('alarm-id');\n alarm_family_show(id);\n });\n $accordion.on('hidden.bs.collapse', function (d) {\n var target = $(d.target);\n var id = $(target).data('alarm-id');\n $('#alarm_all_' + id.toString()).html('');\n });\n\n document.getElementById('alarms_log').innerHTML = '<h3>Alarm Log</h3><table id=\"alarms_log_table\"></table>';\n\n loadBootstrapTable(function () {\n $('#alarms_log_table').bootstrapTable({\n url: `${serverDefault}/api/v1/alarm_log?all`,\n cache: false,\n pagination: true,\n pageSize: 10,\n showPaginationSwitch: false,\n search: true,\n searchTimeOut: 300,\n searchAlign: 'left',\n showColumns: true,\n showExport: true,\n exportDataType: 'all',\n exportOptions: {\n fileName: 'netdata_alarm_log'\n },\n onClickRow: function (row) {\n scrollToChartAfterHidingModal(row.chart, row.when * 1000, row.status);\n $('#alarmsModal').modal('hide');\n return false;\n },\n rowStyle: function (row) {\n switch (row.status) {\n case 'CRITICAL':\n return { classes: 'danger' };\n break;\n case 'WARNING':\n return { classes: 'warning' };\n break;\n case 'UNDEFINED':\n return { classes: 'info' };\n break;\n case 'CLEAR':\n return { classes: 'success' };\n break;\n }\n return {};\n },\n showFooter: false,\n showHeader: true,\n showRefresh: true,\n showToggle: false,\n sortable: true,\n silentSort: false,\n columns: [\n {\n field: 'when',\n title: 'Event Date',\n valign: 'middle',\n titleTooltip: 'The date and time the even took place',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n switchable: false,\n sortable: true\n },\n {\n field: 'hostname',\n title: 'Host',\n valign: 'middle',\n titleTooltip: 'The host that generated this event',\n align: 'center',\n visible: false,\n sortable: true\n },\n {\n field: 'unique_id',\n title: 'Unique ID',\n titleTooltip: 'The host unique ID for this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'alarm_id',\n title: 'Alarm ID',\n titleTooltip: 'The ID of the alarm that generated this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'alarm_event_id',\n title: 'Alarm Event ID',\n titleTooltip: 'The incremental ID of this event for the given alarm',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'chart',\n title: 'Chart',\n titleTooltip: 'The chart the alarm is attached to',\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'family',\n title: 'Family',\n titleTooltip: 'The family of the chart the alarm is attached to',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'name',\n title: 'Alarm',\n titleTooltip: 'The alarm name that generated this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return value.toString().replace(/_/g, ' ');\n },\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'value_string',\n title: 'Friendly Value',\n titleTooltip: 'The value of the alarm, that triggered this event',\n align: 'right',\n valign: 'middle',\n sortable: true\n },\n {\n field: 'old_value_string',\n title: 'Friendly Old Value',\n titleTooltip: 'The value of the alarm, just before this event',\n align: 'right',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'old_value',\n title: 'Old Value',\n titleTooltip: 'The value of the alarm, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'value',\n title: 'Value',\n titleTooltip: 'The value of the alarm, that triggered this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();\n },\n align: 'right',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'units',\n title: 'Units',\n titleTooltip: 'The units of the value of the alarm',\n align: 'left',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'old_status',\n title: 'Old Status',\n titleTooltip: 'The status of the alarm, just before this event',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'status',\n title: 'Status',\n titleTooltip: 'The status of the alarm, that was set due to this event',\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'duration',\n title: 'Last Duration',\n titleTooltip: 'The duration the alarm was at its previous state, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'non_clear_duration',\n title: 'Raised Duration',\n titleTooltip: 'The duration the alarm was raised, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'recipient',\n title: 'Recipient',\n titleTooltip: 'The recipient of this event',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'processed',\n title: 'Processed Status',\n titleTooltip: 'True when this event is processed',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === true) {\n return 'DONE';\n } else {\n return 'PENDING';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updated',\n title: 'Updated Status',\n titleTooltip: 'True when this event has been updated by another event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === true) {\n return 'UPDATED';\n } else {\n return 'CURRENT';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updated_by_id',\n title: 'Updated By ID',\n titleTooltip: 'The unique ID of the event that obsoleted this one',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updates_id',\n title: 'Updates ID',\n titleTooltip: 'The unique ID of the event obsoleted because of this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec',\n title: 'Script',\n titleTooltip: 'The script to handle the event notification',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec_run',\n title: 'Script Run At',\n titleTooltip: 'The date and time the script has been ran',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec_code',\n title: 'Script Return Value',\n titleTooltip: 'The return code of the script',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === 0) {\n return 'OK (returned 0)';\n } else {\n return 'FAILED (with code ' + value.toString() + ')';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'delay',\n title: 'Script Delay',\n titleTooltip: 'The hysteresis of the notification',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'delay_up_to_timestamp',\n title: 'Script Delay Run At',\n titleTooltip: 'The date and time the script should be run, after hysteresis',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'info',\n title: 'Description',\n titleTooltip: 'A short description of the alarm',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'source',\n title: 'Alarm Source',\n titleTooltip: 'The source of configuration of the alarm',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n }\n ]\n });\n // console.log($('#alarms_log_table').bootstrapTable('getOptions'));\n });\n }\n\n reduxStore.dispatch(fetchAllAlarmsAction.request({\n callback,\n serverDefault,\n }))\n}\n\nfunction initializeDynamicDashboardWithData(data) {\n if (data !== null) {\n options.hostname = data.hostname;\n options.data = data;\n options.version = data.version;\n options.release_channel = data.release_channel;\n options.timezone = data.timezone;\n netdataDashboard.os = data.os;\n\n if (typeof data.hosts !== 'undefined') {\n options.hosts = data.hosts;\n }\n\n // update the dashboard hostname\n document.getElementById('netdataVersion').innerHTML = options.version;\n\n // update the dashboard title\n document.title = options.hostname + ' netdata dashboard';\n\n // create a chart_by_name index\n data.charts_by_name = {};\n var charts = data.charts;\n var x;\n for (x in charts) {\n if (!charts.hasOwnProperty(x)) {\n continue;\n }\n\n var chart = charts[x];\n data.charts_by_name[chart.name] = chart;\n }\n\n // render all charts\n renderChartsAndMenu(data);\n }\n}\n\n// an object to keep initialization configuration\n// needed due to the async nature of the XSS modal\nvar initializeConfig = {\n url: null,\n custom_info: true,\n};\n\n// will be removed when we'll transform dashboard_info.js into DSL\n// memoize so it's fetched only once\nconst loadDashboardInfo = memoizeWith(identity, () => (\n $.ajax({\n url: `${serverDefault}dashboard_info.js`,\n cache: true,\n dataType: 'script',\n xhrFields: { withCredentials: true }, // required for the cookie\n })\n .fail(function () {\n alert(`Cannot load required JS library: dashboard_info.js`);\n })\n))\n\nfunction loadCustomDashboardInfo(url, callback) {\n $.ajax({\n url,\n cache: true,\n dataType: \"script\",\n xhrFields: { withCredentials: true } // required for the cookie\n })\n .fail(function () {\n alert(`Cannot load required JS library: ${url}`);\n })\n .always(function () {\n $.extend(true, netdataDashboard, customDashboard);\n callback();\n })\n}\n\nfunction initializeChartsAndCustomInfo() {\n loadDashboardInfo().then(() => {\n // download all the charts the server knows\n NETDATA.chartRegistry.downloadAll(initializeConfig.url, function (data) {\n if (data !== null) {\n reduxStore.dispatch(chartsMetadataRequestSuccess({ data }))\n if (initializeConfig.custom_info === true && typeof data.custom_info !== 'undefined' && data.custom_info !== \"\" && window.netdataSnapshotData === null) {\n //console.log('loading custom dashboard decorations from server ' + initializeConfig.url);\n loadCustomDashboardInfo(serverDefault + data.custom_info, function () {\n initializeDynamicDashboardWithData(data);\n });\n } else {\n //console.log('not loading custom dashboard decorations from server ' + initializeConfig.url);\n initializeDynamicDashboardWithData(data);\n }\n }\n });\n })\n}\n\nwindow.xssModalDisableXss = () => {\n //console.log('disabling xss checks');\n NETDATA.xss.enabled = false;\n NETDATA.xss.enabled_for_data = false;\n initializeConfig.custom_info = true;\n initializeChartsAndCustomInfo();\n return false;\n};\n\n\nwindow.xssModalKeepXss = () => {\n //console.log('keeping xss checks');\n NETDATA.xss.enabled = true;\n NETDATA.xss.enabled_for_data = true;\n initializeConfig.custom_info = false;\n initializeChartsAndCustomInfo();\n return false;\n};\n\nfunction initializeDynamicDashboard(newReduxStore) {\n if (newReduxStore) {\n reduxStore = newReduxStore\n\n netdataPrepCallback()\n\n initializeConfig.url = serverDefault;\n }\n\n if (typeof netdataCheckXSS !== 'undefined' && netdataCheckXSS === true) {\n //$(\"#loadOverlay\").css(\"display\",\"none\");\n document.getElementById('netdataXssModalServer').innerText = initializeConfig.url;\n $('#xssModal').modal('show');\n } else {\n initializeChartsAndCustomInfo();\n }\n}\n\n// ----------------------------------------------------------------------------\n\nfunction versionLog(msg) {\n document.getElementById('versionCheckLog').innerHTML = msg;\n}\n\n// New way of checking for updates, based only on versions\n\nfunction versionsMatch(v1, v2) {\n if (v1 == v2) {\n return true;\n } else {\n let s1 = v1.split('.');\n let s2 = v2.split('.');\n // Check major version\n let n1 = parseInt(s1[0].substring(1, 2), 10);\n let n2 = parseInt(s2[0].substring(1, 2), 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n // Check minor version\n n1 = parseInt(s1[1], 10);\n n2 = parseInt(s2[1], 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n // Split patch: format could be e.g. 0-22-nightly\n s1 = s1[2].split('-');\n s2 = s2[2].split('-');\n\n n1 = parseInt(s1[0], 10);\n n2 = parseInt(s2[0], 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n n1 = (s1.length > 1) ? parseInt(s1[1], 10) : 0;\n n2 = (s2.length > 1) ? parseInt(s2[1], 10) : 0;\n if (n1 < n2) return false;\n else return true;\n }\n}\n\nfunction getGithubLatestVersion(callback, channel) {\n versionLog('Downloading latest version id from github...');\n let url\n\n if (channel === 'stable') {\n url = 'https://api.github.com/repos/netdata/netdata/releases/latest'\n } else {\n url = 'https://api.github.com/repos/netdata/netdata-nightlies/releases/latest'\n }\n\n $.ajax({\n url: url,\n async: true,\n cache: false\n })\n .done(function (data) {\n data = data.tag_name.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\");\n versionLog('Latest stable version from github is ' + data);\n callback(data);\n })\n .fail(function () {\n versionLog('Failed to download the latest stable version id from github!');\n callback(null);\n });\n}\n\nfunction checkForUpdateByVersion(force, callback) {\n getGithubLatestVersion(function (sha2) {\n callback(options.version, sha2);\n }, options.release_channel);\n return null;\n}\n\nwindow.notifyForUpdate = (force) => {\n versionLog('<p>checking for updates...</p>');\n\n var now = Date.now();\n\n if (typeof force === 'undefined' || force !== true) {\n var last = loadLocalStorage('last_update_check');\n\n if (typeof last === 'string') {\n last = parseInt(last);\n } else {\n last = 0;\n }\n\n if (now - last < 3600000 * 8) {\n // no need to check it - too soon\n return;\n }\n }\n\n checkForUpdateByVersion(force, function (sha1, sha2) {\n var save = false;\n\n if (sha1 === null) {\n save = false;\n versionLog('<p><big>Failed to get your netdata version!</big></p><p>You can always get the latest netdata from <a href=\"https://github.com/netdata/netdata\" target=\"_blank\">its github page</a>.</p>');\n } else if (sha2 === null) {\n save = false;\n versionLog('<p><big>Failed to get the latest netdata version.</big></p><p>You can always get the latest netdata from <a href=\"https://github.com/netdata/netdata\" target=\"_blank\">its github page</a>.</p>');\n } else if (versionsMatch(sha1, sha2)) {\n save = true;\n versionLog('<p><big>You already have the latest netdata!</big></p><p>No update yet?<br/>We probably need some motivation to keep going on!</p><p>If you haven\\'t already, <a href=\"https://github.com/netdata/netdata\" target=\"_blank\">give netdata a <b><i class=\"fas fa-star\"></i></b> at its github page</a>.</p>');\n } else {\n save = true;\n var compare = 'https://learn.netdata.cloud/docs/agent/changelog/';\n versionLog('<p><big><strong>New version of netdata available!</strong></big></p><p>Latest version: <b><code>' + sha2 + '</code></b></p><p><a href=\"' + compare + '\" target=\"_blank\">Click here for the changes log</a> and<br/><a href=\"https://github.com/netdata/netdata/tree/master/packaging/installer/UPDATE.md\" target=\"_blank\">click here for directions on updating</a> your netdata installation.</p><p>We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.<br/>Keeping your netdata updated is generally a good idea.</p>');\n }\n\n if (save) {\n saveLocalStorage('last_update_check', now.toString());\n }\n });\n}\n\n// ----------------------------------------------------------------------------\n// printing dashboards\n\nfunction showPageFooter() {\n document.getElementById('footer').style.display = 'block';\n}\n\nwindow.printPreflight = () => {\n var url = document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString() + urlOptions.genHash() + ';mode=print';\n var width = 990;\n var height = screen.height * 90 / 100;\n //console.log(url);\n //console.log(document.location);\n window.open(url, '', 'width=' + width.toString() + ',height=' + height.toString() + ',menubar=no,toolbar=no,personalbar=no,location=no,resizable=no,scrollbars=yes,status=no,chrome=yes,centerscreen=yes,attention=yes,dialog=yes');\n $('#printPreflightModal').modal('hide');\n}\n\nfunction printPage() {\n window.NETDATA.parseDom();\n\n if (urlOptions.after < 0) {\n reduxStore.dispatch(setDefaultAfterAction({ after: urlOptions.after }))\n } else if (urlOptions.pan_and_zoom === true) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: urlOptions.after,\n before: urlOptions.before,\n }))\n }\n showPageFooter(); // todo after full rewrite the footer should show when charts are loaded\n}\n\n// --------------------------------------------------------------------\n\nfunction jsonStringifyFn(obj) {\n return JSON.stringify(obj, function (key, value) {\n return (typeof value === 'function') ? value.toString() : value;\n });\n}\n\nfunction jsonParseFn(str) {\n return JSON.parse(str, function (key, value) {\n if (typeof value != 'string') {\n return value;\n }\n return (value.substring(0, 8) == 'function') ? eval('(' + value + ')') : value;\n });\n}\n\n// --------------------------------------------------------------------\n\nvar snapshotOptions = {\n bytes_per_chart: 2048,\n compressionDefault: 'pako.deflate.base64',\n\n compressions: {\n 'none': {\n bytes_per_point_memory: 5.2,\n bytes_per_point_disk: 5.6,\n\n compress: function (s) {\n return s;\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return s;\n }\n },\n\n 'pako.deflate.base64': {\n bytes_per_point_memory: 1.8,\n bytes_per_point_disk: 1.9,\n\n compress: function (s) {\n return btoa(pako.deflate(s, { to: 'string' }));\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return pako.inflate(atob(s), { to: 'string' });\n }\n },\n\n 'pako.deflate': {\n bytes_per_point_memory: 1.4,\n bytes_per_point_disk: 3.2,\n\n compress: function (s) {\n return pako.deflate(s, { to: 'string' });\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return pako.inflate(s, { to: 'string' });\n }\n },\n\n 'lzstring.utf16': {\n bytes_per_point_memory: 1.7,\n bytes_per_point_disk: 2.6,\n\n compress: function (s) {\n return LZString.compressToUTF16(s);\n },\n\n compressed_length: function (s) {\n return s.length * 2;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromUTF16(s);\n }\n },\n\n 'lzstring.base64': {\n bytes_per_point_memory: 2.1,\n bytes_per_point_disk: 2.3,\n\n compress: function (s) {\n return LZString.compressToBase64(s);\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromBase64(s);\n }\n },\n\n 'lzstring.uri': {\n bytes_per_point_memory: 2.1,\n bytes_per_point_disk: 2.3,\n\n compress: function (s) {\n return LZString.compressToEncodedURIComponent(s);\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromEncodedURIComponent(s);\n }\n }\n }\n};\n\n// --------------------------------------------------------------------\n// loading snapshots\n\nfunction loadSnapshotModalLog(priority, msg) {\n document.getElementById('loadSnapshotStatus').className = \"alert alert-\" + priority;\n document.getElementById('loadSnapshotStatus').innerHTML = msg;\n}\n\nvar tmpSnapshotData = null;\n\nwindow.loadSnapshot = () => {\n $('#loadSnapshotImport').addClass('disabled');\n\n if (tmpSnapshotData === null) {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'no data have been loaded');\n return;\n }\n\n loadPako(function () {\n loadLzString(function () {\n loadSnapshotModalLog('info', 'Please wait, activating snapshot...');\n $('#loadSnapshotModal').modal('hide');\n\n netdataShowAlarms = false;\n netdataRegistry = false;\n netdataServer = tmpSnapshotData.server;\n\n document.getElementById('charts_div').innerHTML = '';\n document.getElementById('sidebar').innerHTML = '';\n\n if (typeof tmpSnapshotData.hash !== 'undefined') {\n urlOptions.hash = tmpSnapshotData.hash;\n } else {\n urlOptions.hash = '#';\n }\n\n if (typeof tmpSnapshotData.info !== 'undefined') {\n var info = jsonParseFn(tmpSnapshotData.info);\n if (typeof info.menu !== 'undefined') {\n netdataDashboard.menu = info.menu;\n }\n\n if (typeof info.submenu !== 'undefined') {\n netdataDashboard.submenu = info.submenu;\n }\n\n if (typeof info.context !== 'undefined') {\n netdataDashboard.context = info.context;\n }\n }\n\n if (typeof tmpSnapshotData.compression !== 'string') {\n tmpSnapshotData.compression = 'none';\n }\n\n if (typeof snapshotOptions.compressions[tmpSnapshotData.compression] === 'undefined') {\n alert('unknown compression method: ' + tmpSnapshotData.compression);\n tmpSnapshotData.compression = 'none';\n }\n\n tmpSnapshotData.uncompress = snapshotOptions.compressions[tmpSnapshotData.compression].uncompress;\n\n window.NETDATA.parseDom()\n reduxStore.dispatch(loadSnapshotAction({\n snapshot: tmpSnapshotData,\n }))\n\n window.netdataSnapshotData = tmpSnapshotData;\n\n urlOptions.after = tmpSnapshotData.after_ms;\n urlOptions.before = tmpSnapshotData.before_ms;\n\n if (typeof tmpSnapshotData.highlight_after_ms !== 'undefined'\n && tmpSnapshotData.highlight_after_ms !== null\n && tmpSnapshotData.highlight_after_ms > 0\n && typeof tmpSnapshotData.highlight_before_ms !== 'undefined'\n && tmpSnapshotData.highlight_before_ms !== null\n && tmpSnapshotData.highlight_before_ms > 0\n ) {\n urlOptions.highlight_after = tmpSnapshotData.highlight_after_ms;\n urlOptions.highlight_before = tmpSnapshotData.highlight_before_ms;\n urlOptions.highlight = true;\n } else {\n urlOptions.highlight_after = 0;\n urlOptions.highlight_before = 0;\n urlOptions.highlight = false;\n }\n\n netdataCheckXSS = false; // disable the modal - this does not affect XSS checks, since dashboard.js is already loaded\n NETDATA.xss.enabled = true; // we should not do any remote requests, but if we do, check them\n NETDATA.xss.enabled_for_data = true; // check also snapshot data - that have been excluded from the initial check, due to compression\n loadSnapshotPreflightEmpty();\n initializeDynamicDashboard();\n });\n });\n};\n\nfunction loadSnapshotPreflightFile(file) {\n var filename = NETDATA.xss.string(file.name);\n var fr = new FileReader();\n fr.onload = function (e) {\n document.getElementById('loadSnapshotFilename').innerHTML = filename;\n var result = null;\n try {\n result = NETDATA.xss.checkAlways('snapshot', JSON.parse(e.target.result), /^(snapshot\\.info|snapshot\\.data)$/);\n\n //console.log(result);\n var date_after = new Date(result.after_ms);\n var date_before = new Date(result.before_ms);\n\n if (typeof result.charts_ok === 'undefined') {\n result.charts_ok = 'unknown';\n }\n\n if (typeof result.charts_failed === 'undefined') {\n result.charts_failed = 0;\n }\n\n if (typeof result.compression === 'undefined') {\n result.compression = 'none';\n }\n\n if (typeof result.data_size === 'undefined') {\n result.data_size = 0;\n }\n\n document.getElementById('loadSnapshotFilename').innerHTML = '<code>' + filename + '</code>';\n document.getElementById('loadSnapshotHostname').innerHTML = '<b>' + result.hostname + '</b>, netdata version: <b>' + result.netdata_version.toString() + '</b>';\n document.getElementById('loadSnapshotURL').innerHTML = result.url;\n document.getElementById('loadSnapshotCharts').innerHTML = result.charts.charts_count.toString() + ' charts, ' + result.charts.dimensions_count.toString() + ' dimensions, ' + result.data_points.toString() + ' points per dimension, ' + Math.round(result.duration_ms / result.data_points).toString() + ' ms per point';\n document.getElementById('loadSnapshotInfo').innerHTML = 'version: <b>' + result.snapshot_version.toString() + '</b>, includes <b>' + result.charts_ok.toString() + '</b> unique chart data queries ' + ((result.charts_failed > 0) ? ('<b>' + result.charts_failed.toString() + '</b> failed') : '').toString() + ', compressed with <code>' + result.compression.toString() + '</code>, data size ' + (Math.round(result.data_size * 100 / 1024 / 1024) / 100).toString() + ' MB';\n document.getElementById('loadSnapshotTimeRange').innerHTML = '<b>' + localeDateString(date_after) + ' ' + localeTimeString(date_after) + '</b> to <b>' + localeDateString(date_before) + ' ' + localeTimeString(date_before) + '</b>';\n document.getElementById('loadSnapshotComments').innerHTML = ((result.comments) ? result.comments : '').toString();\n loadSnapshotModalLog('success', 'File loaded, click <b>Import</b> to render it!');\n $('#loadSnapshotImport').removeClass('disabled');\n\n tmpSnapshotData = result;\n }\n catch (e) {\n console.log(e);\n document.getElementById('loadSnapshotStatus').className = \"alert alert-danger\";\n document.getElementById('loadSnapshotStatus').innerHTML = \"Failed to parse this file!\";\n $('#loadSnapshotImport').addClass('disabled');\n }\n }\n\n //console.log(file);\n fr.readAsText(file);\n};\n\nfunction loadSnapshotPreflightEmpty() {\n document.getElementById('loadSnapshotFilename').innerHTML = '';\n document.getElementById('loadSnapshotHostname').innerHTML = '';\n document.getElementById('loadSnapshotURL').innerHTML = '';\n document.getElementById('loadSnapshotCharts').innerHTML = '';\n document.getElementById('loadSnapshotInfo').innerHTML = '';\n document.getElementById('loadSnapshotTimeRange').innerHTML = '';\n document.getElementById('loadSnapshotComments').innerHTML = '';\n loadSnapshotModalLog('success', 'Browse for a snapshot file (or drag it and drop it here), then click <b>Import</b> to render it.');\n $('#loadSnapshotImport').addClass('disabled');\n};\n\nvar loadSnapshotDragAndDropInitialized = false;\n\nfunction loadSnapshotDragAndDropSetup() {\n if (loadSnapshotDragAndDropInitialized === false) {\n loadSnapshotDragAndDropInitialized = true;\n $('#loadSnapshotDragAndDrop')\n .on('drag dragstart dragend dragover dragenter dragleave drop', function (e) {\n e.preventDefault();\n e.stopPropagation();\n })\n .on('drop', function (e) {\n if (e.originalEvent.dataTransfer.files.length) {\n loadSnapshotPreflightFile(e.originalEvent.dataTransfer.files.item(0));\n } else {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'No file selected');\n }\n });\n }\n};\n\nwindow.loadSnapshotPreflight = () => {\n var files = document.getElementById('loadSnapshotSelectFiles').files;\n if (!files.length) {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'No file selected');\n return;\n }\n\n loadSnapshotModalLog('info', 'Loading file...');\n\n loadSnapshotPreflightFile(files.item(0));\n}\n\n// --------------------------------------------------------------------\n// saving snapshots\n\nvar saveSnapshotStop = false;\n\nfunction saveSnapshotCancel() {\n reduxStore.dispatch(stopSnapshotModeAction())\n saveSnapshotStop = true;\n}\n\nvar saveSnapshotModalInitialized = false;\n\nfunction saveSnapshotModalSetup() {\n if (saveSnapshotModalInitialized === false) {\n saveSnapshotModalInitialized = true;\n $('#saveSnapshotModal')\n .on('hide.bs.modal', saveSnapshotCancel)\n .on('show.bs.modal', saveSnapshotModalInit)\n .on('shown.bs.modal', function () {\n $('#saveSnapshotResolutionSlider').find(\".slider-handle:first\").attr(\"tabindex\", 1);\n document.getElementById('saveSnapshotComments').focus();\n });\n }\n};\n\nfunction saveSnapshotModalLog(priority, msg) {\n document.getElementById('saveSnapshotStatus').className = \"alert alert-\" + priority;\n document.getElementById('saveSnapshotStatus').innerHTML = msg;\n}\n\nfunction saveSnapshotModalShowExpectedSize() {\n var points = Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint);\n var priority = 'info';\n var msg = 'A moderate snapshot.';\n\n var sizemb = Math.round(\n (options.data.charts_count * snapshotOptions.bytes_per_chart\n + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_disk)\n * 10 / 1024 / 1024) / 10;\n\n var memmb = Math.round(\n (options.data.charts_count * snapshotOptions.bytes_per_chart\n + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_memory)\n * 10 / 1024 / 1024) / 10;\n\n if (sizemb < 10) {\n priority = 'success';\n msg = 'A nice small snapshot!';\n }\n if (sizemb > 50) {\n priority = 'warning';\n msg = 'Will stress your browser...';\n }\n if (sizemb > 100) {\n priority = 'danger';\n msg = 'Hm... good luck...';\n }\n\n saveSnapshotModalLog(priority, 'The snapshot will have ' + points.toString() + ' points per dimension. Expected size on disk ' + sizemb + ' MB, at browser memory ' + memmb + ' MB.<br/>' + msg);\n}\n\nvar saveSnapshotCompression = snapshotOptions.compressionDefault;\n\nfunction saveSnapshotSetCompression(name) {\n saveSnapshotCompression = name;\n document.getElementById('saveSnapshotCompressionName').innerHTML = saveSnapshotCompression;\n saveSnapshotModalShowExpectedSize();\n}\n\nvar saveSnapshotSlider = null;\nvar saveSnapshotSelectedSecondsPerPoint = 1;\nvar saveSnapshotViewDuration = 1;\n\nfunction saveSnapshotModalInit() {\n $('#saveSnapshotModalProgressSection').hide();\n $('#saveSnapshotResolutionRadio').show();\n saveSnapshotModalLog('info', 'Select resolution and click <b>Save</b>');\n $('#saveSnapshotExport').removeClass('disabled');\n\n loadBootstrapSlider(function () {\n const reduxState = reduxStore.getState()\n saveSnapshotViewDuration = - selectDefaultAfter(reduxState)\n var start_ms = Math.round(Date.now() - saveSnapshotViewDuration * 1000);\n const globalPanAndZoom = selectGlobalPanAndZoom(reduxState)\n\n if (Boolean(globalPanAndZoom)) {\n saveSnapshotViewDuration = Math.round((globalPanAndZoom.before - globalPanAndZoom.after) / 1000);\n start_ms = globalPanAndZoom.after;\n }\n\n var start_date = new Date(start_ms);\n var yyyymmddhhssmm = start_date.getFullYear() + zeropad(start_date.getMonth() + 1) + zeropad(start_date.getDate()) + '-' + zeropad(start_date.getHours()) + zeropad(start_date.getMinutes()) + zeropad(start_date.getSeconds());\n\n document.getElementById('saveSnapshotFilename').value = 'netdata-' + options.hostname.toString() + '-' + yyyymmddhhssmm.toString() + '-' + saveSnapshotViewDuration.toString() + '.snapshot';\n saveSnapshotSetCompression(saveSnapshotCompression);\n\n var min = options.update_every;\n var max = Math.round(saveSnapshotViewDuration / 100);\n\n if (Boolean(globalPanAndZoom)) {\n max = Math.round(saveSnapshotViewDuration / 50);\n }\n\n var view = Math.round(saveSnapshotViewDuration / Math.round($(document.getElementById('charts_div')).width() / 2));\n\n if (max < 10) {\n max = 10;\n }\n if (max < min) {\n max = min;\n }\n if (view < min) {\n view = min;\n }\n if (view > max) {\n view = max;\n }\n\n if (saveSnapshotSlider !== null) {\n saveSnapshotSlider.destroy();\n }\n\n saveSnapshotSlider = new Slider('#saveSnapshotResolutionSlider', {\n ticks: [min, view, max],\n min: min,\n max: max,\n step: options.update_every,\n value: view,\n scale: (max > 100) ? 'logarithmic' : 'linear',\n tooltip: 'always',\n formatter: function (value) {\n if (value < 1) {\n value = 1;\n }\n\n if (value < options.data.update_every) {\n value = options.data.update_every;\n }\n\n saveSnapshotSelectedSecondsPerPoint = value;\n saveSnapshotModalShowExpectedSize();\n\n var seconds = ' seconds ';\n if (value === 1) {\n seconds = ' second ';\n }\n\n return value + seconds + 'per point' + ((value === options.data.update_every) ? ', server default' : '').toString();\n }\n });\n });\n}\n\nwindow.saveSnapshot = () => {\n loadPako(function () {\n loadLzString(function () {\n saveSnapshotStop = false;\n $('#saveSnapshotModalProgressSection').show();\n $('#saveSnapshotResolutionRadio').hide();\n $('#saveSnapshotExport').addClass('disabled');\n\n var filename = document.getElementById('saveSnapshotFilename').value;\n // console.log(filename);\n saveSnapshotModalLog('info', 'Generating snapshot as <code>' + filename.toString() + '</code>');\n\n\n var el = document.getElementById('saveSnapshotModalProgressBar');\n var eltxt = document.getElementById('saveSnapshotModalProgressBarText');\n\n options.data.charts_by_name = null;\n const reduxState = reduxStore.getState()\n const defaultAfter = selectDefaultAfter(reduxState)\n\n var saveData = {\n hostname: options.hostname,\n server: serverDefault,\n netdata_version: options.data.version,\n snapshot_version: 1,\n after_ms: Date.now() + defaultAfter * 1000,\n before_ms: Date.now(),\n highlight_after_ms: urlOptions.highlight_after,\n highlight_before_ms: urlOptions.highlight_before,\n duration_ms: options.duration * 1000,\n update_every_ms: options.update_every * 1000,\n data_points: 0,\n url: ((urlOptions.server !== null) ? urlOptions.server : document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString()).toString(),\n comments: document.getElementById('saveSnapshotComments').value.toString(),\n hash: urlOptions.hash,\n charts: options.data,\n info: jsonStringifyFn({\n menu: netdataDashboard.menu,\n submenu: netdataDashboard.submenu,\n context: netdataDashboard.context\n }),\n charts_ok: 0,\n charts_failed: 0,\n compression: saveSnapshotCompression,\n data_size: 0,\n data: {}\n };\n\n if (typeof snapshotOptions.compressions[saveData.compression] === 'undefined') {\n alert('unknown compression method: ' + saveData.compression);\n saveData.compression = 'none';\n }\n\n var compress = snapshotOptions.compressions[saveData.compression].compress;\n var compressed_length = snapshotOptions.compressions[saveData.compression].compressed_length;\n\n function pack_api1_v1_chart_data({ data, chartDataUniqueID }) {\n if (data === null) {\n return 0\n }\n\n var str = JSON.stringify(data);\n\n var cstr = compress(str);\n saveData.data[chartDataUniqueID] = cstr;\n return compressed_length(cstr);\n }\n\n const globalPanAndZoom = selectGlobalPanAndZoom(reduxState)\n var clearPanAndZoom = false;\n if (!globalPanAndZoom) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: saveData.after_ms,\n before: saveData.before_ms,\n }))\n clearPanAndZoom = true;\n } else {\n saveData.after_ms = globalPanAndZoom.after\n saveData.before_ms = globalPanAndZoom.before\n }\n\n\n saveData.duration_ms = saveData.before_ms - saveData.after_ms;\n saveData.data_points = Math.round((saveData.before_ms - saveData.after_ms) / (saveSnapshotSelectedSecondsPerPoint * 1000));\n saveSnapshotModalLog('info', 'Generating snapshot with ' + saveData.data_points.toString() + ' data points per dimension...');\n\n reduxStore.dispatch(startSnapshotModeAction({\n charts: saveData.charts,\n dataPoints: saveData.data_points,\n }))\n\n\n window.saveSnapshotRestore = () => {\n $('#saveSnapshotModal').modal('hide');\n\n $(el).css('width', '0%').attr('aria-valuenow', 0);\n eltxt.innerText = '0%';\n\n reduxStore.dispatch(stopSnapshotModeAction())\n reduxStore.dispatch(snapshotExportResetAction())\n if (clearPanAndZoom) {\n // clear that afterwards\n reduxStore.dispatch(resetGlobalPanAndZoomAction())\n }\n\n $('#saveSnapshotExport').removeClass('disabled');\n }\n\n var size = 0;\n var info = ' Resolution: <b>' + saveSnapshotSelectedSecondsPerPoint.toString() + ((saveSnapshotSelectedSecondsPerPoint === 1) ? ' second ' : ' seconds ').toString() + 'per point</b>.';\n\n window.chartUpdated = ({ chart, chartDataUniqueID, data }) => {\n if (saveSnapshotStop === true) {\n saveSnapshotModalLog('info', 'Cancelled!');\n saveSnapshotRestore()\n }\n const state = reduxStore.getState()\n const chartsCount = selectAmountOfCharts(state)\n const chartsOk = selectAmountOfSnapshotsFetched(state) // hook\n const chartsFailed = selectAmountOfSnapshotsFailed(state)\n\n const pcent = ((chartsOk + chartsFailed) / chartsCount) * 100\n $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);\n eltxt.innerText = Math.round(pcent).toString() + '%, ' + (chart || data.id)\n\n size += pack_api1_v1_chart_data({ data, chartDataUniqueID })\n\n saveSnapshotModalLog((chartsFailed) ? 'danger' : 'info', 'Generated snapshot data size <b>' + (Math.round(size * 100 / 1024 / 1024) / 100).toString() + ' MB</b>. ' + ((chartsFailed) ? (chartsFailed.toString() + ' charts have failed to be downloaded') : '').toString() + info);\n\n window.saveData = saveData\n // better not to use equality against pcent in case of floating point errors\n if (chartsOk + chartsFailed === chartsCount) {\n saveData.charts_ok = chartsOk\n saveData.charts_failed = chartsFailed\n saveData.data_size = size\n\n saveObjectToClient(saveData, filename)\n if (chartsFailed > 0) {\n alert(`${chartsFailed} failed to be downloaded`);\n }\n saveSnapshotRestore()\n saveData = null\n }\n }\n\n // called for every chart\n function update_chart(idx) {\n if (saveSnapshotStop === true) {\n saveSnapshotModalLog('info', 'Cancelled!');\n saveSnapshotRestore();\n return;\n }\n\n var state = NETDATA.options.targets[--idx];\n\n var pcent = (NETDATA.options.targets.length - idx) * 100 / NETDATA.options.targets.length;\n $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);\n eltxt.innerText = Math.round(pcent).toString() + '%, ' + state.id;\n\n setTimeout(function () {\n charts_count++;\n state.isVisible(true);\n state.current.force_after_ms = saveData.after_ms;\n state.current.force_before_ms = saveData.before_ms;\n\n state.updateChart(function (status, reason) {\n state.current.force_after_ms = null;\n state.current.force_before_ms = null;\n\n if (status === true) {\n charts_ok++;\n // state.log('ok');\n size += pack_api1_v1_chart_data(state);\n } else {\n charts_failed++;\n state.log('failed to be updated: ' + reason);\n }\n\n saveSnapshotModalLog((charts_failed) ? 'danger' : 'info', 'Generated snapshot data size <b>' + (Math.round(size * 100 / 1024 / 1024) / 100).toString() + ' MB</b>. ' + ((charts_failed) ? (charts_failed.toString() + ' charts have failed to be downloaded') : '').toString() + info);\n\n if (idx > 0) {\n update_chart(idx);\n } else {\n saveData.charts_ok = charts_ok;\n saveData.charts_failed = charts_failed;\n saveData.data_size = size;\n // console.log(saveData.compression + ': ' + (size / (options.data.dimensions_count * Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint))).toString());\n\n // save it\n // console.log(saveData);\n saveObjectToClient(saveData, filename);\n\n if (charts_failed > 0) {\n alert(charts_failed.toString() + ' failed to be downloaded');\n }\n\n saveSnapshotRestore();\n saveData = null;\n }\n })\n }, 0);\n }\n\n });\n });\n}\n\n// --------------------------------------------------------------------\n// activate netdata on the page\nlet browser_timezone\ntry {\n browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;\n} catch (e) {\n console.log('failed to detect browser timezone: ' + e.toString());\n browser_timezone = 'cannot-detect-it';\n}\n\nconst getOption = (option) => {\n const state = reduxStore.getState()\n return createSelectOption(option)(state)\n}\n\n\nfunction dashboardSettingsSetup() {\n var update_options_modal = function () {\n var sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== getOption(option)) {\n // console.log('switching ' + option.toString());\n self.bootstrapToggle(getOption(option) ? 'on' : 'off');\n }\n };\n\n var theme_sync_option = function (option) {\n var self = $('#' + option);\n\n self.bootstrapToggle(netdataTheme === 'slate' ? 'on' : 'off');\n };\n var units_sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== (getOption('units') === 'auto')) {\n self.bootstrapToggle(getOption('units') === 'auto' ? 'on' : 'off');\n }\n\n if (self.prop('checked') === true) {\n $('#settingsLocaleTempRow').show();\n $('#settingsLocaleTimeRow').show();\n } else {\n $('#settingsLocaleTempRow').hide();\n $('#settingsLocaleTimeRow').hide();\n }\n };\n var temp_sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== (getOption('temperature') === 'celsius')) {\n self.bootstrapToggle(getOption('temperature') === 'celsius' ? 'on' : 'off');\n }\n };\n\n sync_option('stop_updates_when_focus_is_lost');\n sync_option('eliminate_zero_dimensions');\n sync_option('destroy_on_hide');\n sync_option('async_on_scroll');\n\n sync_option('parallel_refresher');\n sync_option('concurrent_refreshes');\n sync_option('sync_selection');\n\n sync_option('legend_right');\n theme_sync_option('netdata_theme_control');\n sync_option('show_help');\n sync_option('pan_and_zoom_data_padding');\n sync_option('smooth_plot');\n\n units_sync_option('units_conversion');\n temp_sync_option('units_temp');\n sync_option('seconds_as_time');\n\n if (getOption('parallel_refresher') === false) {\n $('#concurrent_refreshes_row').hide();\n } else {\n $('#concurrent_refreshes_row').show();\n }\n };\n\n update_options_modal();\n\n // handle options changes\n $('#eliminate_zero_dimensions').change(function () {\n setOption('eliminate_zero_dimensions', $(this).prop('checked'));\n });\n $('#destroy_on_hide').change(function () {\n setOption('destroy_on_hide', $(this).prop('checked'));\n });\n $('#async_on_scroll').change(function () {\n setOption('async_on_scroll', $(this).prop('checked'));\n });\n $('#parallel_refresher').change(function () {\n setOption('parallel_refresher', $(this).prop('checked'));\n });\n $('#concurrent_refreshes').change(function () {\n setOption('concurrent_refreshes', $(this).prop('checked'));\n });\n $('#sync_selection').change(function () {\n setOption('sync_selection', $(this).prop('checked'));\n netdataReload();\n });\n $('#stop_updates_when_focus_is_lost').change(function () {\n urlOptions.update_always = !$(this).prop('checked');\n urlOptions.hashUpdate();\n\n setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);\n });\n $('#smooth_plot').change(function () {\n setOption('smooth_plot', $(this).prop('checked'));\n });\n $('#pan_and_zoom_data_padding').change(function () {\n setOption('pan_and_zoom_data_padding', $(this).prop('checked'));\n });\n $('#seconds_as_time').change(function () {\n setOption('seconds_as_time', $(this).prop('checked'));\n });\n\n $('#units_conversion').change(function () {\n setOption('units', $(this).prop('checked') ? 'auto' : 'original');\n update_options_modal()\n });\n $('#units_temp').change(function () {\n setOption('temperature', $(this).prop('checked') ? 'celsius' : 'fahrenheit');\n });\n\n $('#legend_right').change(function () {\n setOption('legend_right', $(this).prop('checked'));\n // reloading for now, it's much easier than rebuilding charts bootstraping in main.js\n netdataReload();\n });\n\n $('#show_help').change(function () {\n urlOptions.help = $(this).prop('checked');\n urlOptions.hashUpdate();\n\n setOption('show_help', urlOptions.help);\n netdataReload();\n });\n\n // this has to be the last\n // it reloads the page\n $('#netdata_theme_control').change(function () {\n urlOptions.theme = $(this).prop('checked') ? 'slate' : 'white';\n urlOptions.hashUpdate();\n\n if (setTheme(urlOptions.theme)) {\n netdataReload();\n }\n });\n}\n\nconst CHART_DIV_ID_PREFIX = 'chart_'\nconst CHART_DIV_OFFSET = -50\n\nfunction scrollDashboardTo() {\n if (window.netdataSnapshotData !== null && typeof window.netdataSnapshotData.hash !== 'undefined') {\n scrollToId(window.netdataSnapshotData.hash.replace('#', ''));\n } else {\n // check if we have to jump to a specific section\n scrollToId(urlOptions.hash.replace('#', ''));\n if (urlOptions.chart !== null) {\n const chartElement = document.getElementById(`${CHART_DIV_ID_PREFIX}${name2id(urlOptions.chart)}`)\n if (chartElement) {\n const offset = chartElement.offsetTop + CHART_DIV_OFFSET;\n document.querySelector(\"html\").scrollTop = offset\n }\n }\n }\n}\n\nvar modalHiddenCallback = null;\n\nwindow.scrollToChartAfterHidingModal = (chart, alarmDate, alarmStatus) => {\n modalHiddenCallback = function () {\n\n if (typeof chart === 'string') {\n const chartElement = document.getElementById(`${CHART_DIV_ID_PREFIX}${name2id(chart)}`)\n if (chartElement) {\n const offset = chartElement.offsetTop + CHART_DIV_OFFSET;\n document.querySelector(\"html\").scrollTop = offset\n }\n }\n\n if (['WARNING', 'CRITICAL'].includes(alarmStatus)) {\n const twoMinutes = 2 * 60 * 1000\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: alarmDate - twoMinutes,\n before: alarmDate + twoMinutes,\n }))\n }\n };\n}\n\n// ----------------------------------------------------------------------------\n\nfunction enableTooltipsAndPopovers() {\n $('[data-toggle=\"tooltip\"]').tooltip({\n animated: 'fade',\n trigger: 'hover',\n html: true,\n delay: { show: 500, hide: 0 },\n container: 'body'\n });\n $('[data-toggle=\"popover\"]').popover();\n}\n\n// ----------------------------------------------------------------------------\n\nvar runOnceOnDashboardLastRun = 0;\n\nfunction runOnceOnDashboardWithjQuery() {\n if (runOnceOnDashboardLastRun !== 0) {\n scrollDashboardTo();\n\n // restore the scrollspy at the proper position\n $(document.body).scrollspy('refresh');\n $(document.body).scrollspy('process');\n\n return;\n }\n\n runOnceOnDashboardLastRun = Date.now();\n\n // ------------------------------------------------------------------------\n // bootstrap modals\n\n // prevent bootstrap modals from scrolling the page\n // maintains the current scroll position\n // https://stackoverflow.com/a/34754029/4525767\n\n var scrollPos = 0;\n var modal_depth = 0; // how many modals are currently open\n var modal_shown = false; // set to true, if a modal is shown\n var netdata_paused_on_modal = false; // set to true, if the modal paused netdata\n var scrollspyOffset = $(window).height() / 3; // will be updated below - the offset of scrollspy to select an item\n\n $('.modal')\n .on('show.bs.modal', function () {\n if (modal_depth === 0) {\n scrollPos = window.scrollY;\n\n $('body').css({\n overflow: 'hidden',\n position: 'fixed',\n top: -scrollPos\n });\n\n modal_shown = true;\n\n if (NETDATA.options.pauseCallback === null) {\n NETDATA.pause(function () {\n });\n netdata_paused_on_modal = true;\n } else {\n netdata_paused_on_modal = false;\n }\n }\n\n modal_depth++;\n //console.log(urlOptions.after);\n\n })\n .on('hide.bs.modal', function () {\n\n modal_depth--;\n\n if (modal_depth <= 0) {\n modal_depth = 0;\n\n $('body')\n .css({\n overflow: '',\n position: '',\n top: ''\n });\n\n // scroll to the position we had open before the modal\n $('html, body')\n .animate({ scrollTop: scrollPos }, 0);\n\n // unpause netdata, if we paused it\n if (netdata_paused_on_modal === true) {\n NETDATA.unpause();\n netdata_paused_on_modal = false;\n }\n\n // restore the scrollspy at the proper position\n $(document.body).scrollspy('process');\n }\n //console.log(urlOptions.after);\n })\n .on('hidden.bs.modal', function () {\n if (modal_depth === 0) {\n modal_shown = false;\n }\n\n if (typeof modalHiddenCallback === 'function') {\n modalHiddenCallback();\n }\n\n modalHiddenCallback = null;\n //console.log(urlOptions.after);\n });\n\n // ------------------------------------------------------------------------\n // sidebar / affix\n\n $('#sidebar')\n .affix({\n offset: {\n top: 0,\n bottom: 0\n }\n })\n .on('affixed.bs.affix', function () {\n // fix scrolling of very long affix lists\n // http://stackoverflow.com/questions/21691585/bootstrap-3-1-0-affix-too-long\n\n $(this).removeAttr('style');\n })\n .on('affix-top.bs.affix', function () {\n // fix bootstrap affix click bug\n // https://stackoverflow.com/a/37847981/4525767\n\n if (modal_shown) {\n return false;\n }\n })\n .on('activate.bs.scrollspy', function (e) {\n // change the URL based on the current position of the screen\n\n if (modal_shown === false) {\n var el = $(e.target);\n var hash = el.find('a').attr('href');\n if (typeof hash === 'string' && hash.substring(0, 1) === '#' && urlOptions.hash.startsWith(hash + '_submenu_') === false) {\n urlOptions.hash = hash;\n urlOptions.hashUpdate();\n }\n }\n });\n\n Ps.initialize(document.getElementById('sidebar'), {\n wheelSpeed: 0.5,\n wheelPropagation: true,\n swipePropagation: true,\n minScrollbarLength: null,\n maxScrollbarLength: null,\n useBothWheelAxes: false,\n suppressScrollX: true,\n suppressScrollY: false,\n scrollXMarginOffset: 0,\n scrollYMarginOffset: 0,\n theme: 'default'\n });\n\n // ------------------------------------------------------------------------\n // scrollspy\n\n if (scrollspyOffset > 250) {\n scrollspyOffset = 250;\n }\n if (scrollspyOffset < 75) {\n scrollspyOffset = 75;\n }\n document.body.setAttribute('data-offset', scrollspyOffset);\n\n // scroll the dashboard, before activating the scrollspy, so that our\n // hash will not be updated before we got the chance to scroll to it\n scrollDashboardTo();\n\n $(document.body).scrollspy({\n target: '#sidebar',\n offset: scrollspyOffset // controls the diff of the <hX> element to the top, to select it\n });\n\n // ------------------------------------------------------------------------\n // my-netdata menu\n\n $('#deleteRegistryModal')\n .on('hidden.bs.modal', function () {\n deleteRegistryGuid = null;\n });\n\n // ------------------------------------------------------------------------\n // update modal\n\n $('#updateModal')\n .on('show.bs.modal', function () {\n versionLog('checking, please wait...');\n })\n .on('shown.bs.modal', function () {\n notifyForUpdate(true);\n });\n\n // ------------------------------------------------------------------------\n // alarms modal\n\n $('#alarmsModal')\n .on('shown.bs.modal', function () {\n alarmsUpdateModal();\n })\n .on('hidden.bs.modal', function () {\n document.getElementById('alarms_active').innerHTML =\n document.getElementById('alarms_all').innerHTML =\n document.getElementById('alarms_log').innerHTML =\n 'loading...';\n });\n\n // ------------------------------------------------------------------------\n\n dashboardSettingsSetup();\n loadSnapshotDragAndDropSetup();\n saveSnapshotModalSetup();\n showPageFooter();\n\n // ------------------------------------------------------------------------\n // https://github.com/viralpatel/jquery.shorten/blob/master/src/jquery.shorten.js\n\n $.fn.shorten = function (settings) {\n \"use strict\";\n\n var config = {\n showChars: 750,\n minHideChars: 10,\n ellipsesText: \"...\",\n moreText: '<i class=\"fas fa-expand\"></i> show more information',\n lessText: '<i class=\"fas fa-compress\"></i> show less information',\n onLess: function () {\n NETDATA.onscroll();\n },\n onMore: function () {\n NETDATA.onscroll();\n },\n errMsg: null,\n force: false\n };\n\n if (settings) {\n $.extend(config, settings);\n }\n\n if ($(this).data('jquery.shorten') && !config.force) {\n return false;\n }\n $(this).data('jquery.shorten', true);\n\n $(document).off(\"click\", '.morelink');\n\n $(document).on({\n click: function () {\n\n var $this = $(this);\n if ($this.hasClass('less')) {\n $this.removeClass('less');\n $this.html(config.moreText);\n $this.parent().prev().animate({ 'height': '0' + '%' }, 0, function () {\n $this.parent().prev().prev().show();\n }).hide(0, function () {\n config.onLess();\n });\n } else {\n $this.addClass('less');\n $this.html(config.lessText);\n $this.parent().prev().animate({ 'height': '100' + '%' }, 0, function () {\n $this.parent().prev().prev().hide();\n }).show(0, function () {\n config.onMore();\n });\n }\n return false;\n }\n }, '.morelink');\n\n return this.each(function () {\n var $this = $(this);\n\n var content = $this.html();\n var contentlen = $this.text().length;\n if (contentlen > config.showChars + config.minHideChars) {\n var c = content.substr(0, config.showChars);\n if (c.indexOf('<') >= 0) // If there's HTML don't want to cut it\n {\n var inTag = false; // I'm in a tag?\n var bag = ''; // Put the characters to be shown here\n var countChars = 0; // Current bag size\n var openTags = []; // Stack for opened tags, so I can close them later\n var tagName = null;\n\n for (var i = 0, r = 0; r <= config.showChars; i++) {\n if (content[i] === '<' && !inTag) {\n inTag = true;\n\n // This could be \"tag\" or \"/tag\"\n tagName = content.substring(i + 1, content.indexOf('>', i));\n\n // If its a closing tag\n if (tagName[0] === '/') {\n\n if (tagName !== ('/' + openTags[0])) {\n config.errMsg = 'ERROR en HTML: the top of the stack should be the tag that closes';\n } else {\n openTags.shift(); // Pops the last tag from the open tag stack (the tag is closed in the retult HTML!)\n }\n\n } else {\n // There are some nasty tags that don't have a close tag like <br/>\n if (tagName.toLowerCase() !== 'br') {\n openTags.unshift(tagName); // Add to start the name of the tag that opens\n }\n }\n }\n\n if (inTag && content[i] === '>') {\n inTag = false;\n }\n\n if (inTag) {\n bag += content.charAt(i);\n } else {\n // Add tag name chars to the result\n r++;\n if (countChars <= config.showChars) {\n bag += content.charAt(i); // Fix to ie 7 not allowing you to reference string characters using the []\n countChars++;\n } else {\n // Now I have the characters needed\n if (openTags.length > 0) {\n // I have unclosed tags\n\n for (var j = 0; j < openTags.length; j++) {\n bag += '</' + openTags[j] + '>'; // Close all tags that were opened\n\n // You could shift the tag from the stack to check if you end with an empty stack, that means you have closed all open tags\n }\n break;\n }\n }\n }\n }\n c = $('<div/>').html(bag + '<span class=\"ellip\">' + config.ellipsesText + '</span>').html();\n } else {\n c += config.ellipsesText;\n }\n\n var html = '<div class=\"shortcontent\">' + c +\n '</div><div class=\"allcontent\">' + content +\n '</div><span><a href=\"javascript://nop/\" class=\"morelink\">' + config.moreText + '</a></span>';\n\n $this.html(html);\n $this.find(\".allcontent\").hide(); // Hide all text\n $('.shortcontent p:last', $this).css('margin-bottom', 0); //Remove bottom margin on last paragraph as it's likely shortened\n }\n });\n };\n}\n\nfunction finalizePage() {\n if (urlOptions.after < 0) {\n reduxStore.dispatch(setDefaultAfterAction({ after: urlOptions.after }))\n } else if (urlOptions.pan_and_zoom === true) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: urlOptions.after,\n before: urlOptions.before,\n }))\n }\n\n // resize all charts - without starting the background thread\n // this has to be done while NETDATA is paused\n // if we omit this, the affix menu will be wrong, since all\n // the Dom elements are initially zero-sized\n NETDATA.parseDom();\n\n // let it run (update the charts)\n NETDATA.unpause();\n\n runOnceOnDashboardWithjQuery();\n $(\".shorten\").shorten();\n enableTooltipsAndPopovers();\n\n if (!isDemo) {\n notifyForUpdate();\n }\n\n if (urlOptions.show_alarms === true) {\n setTimeout(function () {\n $('#alarmsModal').modal('show');\n }, 1000);\n }\n\n NETDATA.onresizeCallback = function () {\n Ps.update(document.getElementById('sidebar'));\n };\n NETDATA.onresizeCallback();\n\n if (window.netdataSnapshotData !== null) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: window.netdataSnapshotData.after_ms,\n before: window.netdataSnapshotData.before_ms,\n }))\n }\n}\n\nwindow.resetDashboardOptions = () => {\n reduxStore.dispatch(resetOptionsAction())\n\n // it's dirty, but this will be rewritten anyway\n urlOptions.update_always = false;\n urlOptions.help = false;\n urlOptions.theme = \"slate\";\n urlOptions.hashUpdate();\n\n netdataReload()\n}\n\n// callback to add the dashboard info to the\n// parallel javascript downloader in netdata\nexport const netdataPrepCallback = () => {\n if (isDemo) {\n document.getElementById('masthead').style.display = 'block';\n } else {\n if (urlOptions.update_always === true) {\n setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);\n }\n }\n};\n\nwindow.selected_server_timezone = function (timezone, status) {\n // clear the error\n document.getElementById('timezone_error_message').innerHTML = '';\n\n if (typeof status === 'undefined') {\n // the user selected a timezone from the menu\n\n setOption('user_set_server_timezone', timezone);\n\n if (!isProperTimezone(timezone)) {\n setOption(\"timezone\", \"default\")\n\n if (!$('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('on');\n }\n\n document.getElementById('timezone_error_message').innerHTML = 'Ooops! That timezone was not accepted by your browser. Please open a github issue to help us fix it.';\n setOption('user_set_server_timezone', options.timezone);\n } else {\n if ($('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('off');\n }\n setOption(\"timezone\", timezone)\n }\n } else if (status === true) {\n // the user wants the browser default timezone to be activated\n setOption(\"timezone\", \"default\")\n } else {\n // the user wants the server default timezone to be activated\n\n let userSetServerTimezone = getOption(\"user_set_server_timezone\")\n if (userSetServerTimezone === 'default') {\n setOption(\"user_set_server_timezone\", options.timezone) // timezone from /charts endpoint\n userSetServerTimezone = options.timezone\n }\n\n if (!isProperTimezone(userSetServerTimezone)) {\n setOption(\"timezone\", \"default\");\n\n if (!$('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('on');\n }\n\n document.getElementById('timezone_error_message').innerHTML = 'Sorry. The timezone \"' + timezone.toString() + '\" is not accepted by your browser. Please select one from the list.';\n setOption('user_set_server_timezone', options.timezone);\n } else {\n setOption(\"timezone\", userSetServerTimezone)\n }\n }\n\n const timezoneOption = getOption(\"timezone\")\n document.getElementById('current_timezone').innerText = (timezoneOption === 'default') ? 'unset, using browser default' : timezoneOption;\n return false;\n};\n\nexport var netdataCallback = initializeDynamicDashboard;\n\nwindow.showSignInModal = function() {\n document.getElementById(\"sim-registry\").innerHTML = getFromRegistry(\"registryServer\");\n $(\"#signInModal\").modal(\"show\");\n}\n\nwindow.explicitlySignIn = () => {\n $(\"#signInModal\").modal(\"hide\");\n reduxStore.dispatch(explicitlySignInAction())\n};\n","import { AlarmStatus } from \"domains/global/types\"\n\nexport const storeKey = \"global\"\n\nexport const TEMPORARY_MAIN_JS_TIMEOUT = 1000\n\nexport const MASKED_DATA = \"***\"\n\nexport const NOTIFICATIONS_TIMEOUT = 5000\n\nexport const INFO_POLLING_FREQUENCY = 5000\n\nexport const CLOUD_BASE_URL_DISABLED = \"CLOUD_BASE_URL_DISABLED\"\n\nexport const alarmStatuses: AlarmStatus[] = [\"WARNING\", \"ERROR\", \"REMOVED\", \"UNDEFINED\", \"UNINITIALIZED\", \"CLEAR\", \"CRITICAL\"]\n","import React, { useMemo } from \"react\"\nimport { CloudConnectionProps, ConnectionModalStatusContent } from \"./types\"\nimport Anchor from \"@/src/components/anchor\"\n\nimport { Text } from \"@netdata/netdata-ui\"\n\nexport const makeCloudConnectionStatusInfo = ({\n nodeStatus,\n userStatus,\n date,\n}: CloudConnectionProps): ConnectionModalStatusContent => ({\n title: \"Netdata Cloud connection status\",\n text: {\n header: () => {\n return (\n <Text>\n This node is currently{\" \"}\n <Text strong>{nodeStatus === \"LIVE\" ? \"Connected\" : \"Not Connected\"}</Text> to Netdata\n Cloud\n </Text>\n )\n },\n bullets:\n nodeStatus === \"NOT_LIVE\"\n ? [\n // `The node lost its Netdata Cloud connection at ${date}`,\n () => (\n <Text>\n To troubleshoot Netdata Cloud connection issues, please follow{\" \"}\n <Anchor\n target=\"_blank\"\n rel=\"noopener noreferrer\"\n href=\"https://learn.netdata.cloud/docs/agent/claim#troubleshooting\"\n >\n this guide\n </Anchor>\n .\n </Text>\n ),\n ]\n : [],\n footer: () => (\n <Text>\n You are{\" \"}\n <Text strong>\n {userStatus === \"LOGGED_IN\"\n ? \"Logged In\"\n : userStatus === \"EXPIRED_LOGIN\"\n ? \"Logged out\"\n : \"Not signed-up\"}\n </Text>{\" \"}\n to Netdata Cloud\n </Text>\n ),\n },\n CTA1: {\n text: \"Take me to Netdata Cloud\",\n },\n})\n\nconst useCloudConnectionStatus = ({ userStatus, nodeStatus, date }: CloudConnectionProps) => {\n const cloudConnectionStatusInfo = useMemo<ConnectionModalStatusContent>(() => {\n return makeCloudConnectionStatusInfo({ userStatus, nodeStatus, date })\n }, [userStatus, nodeStatus, date])\n\n return cloudConnectionStatusInfo\n}\n\nexport default useCloudConnectionStatus\n","import React, { useCallback } from \"react\"\nimport GoToCloud from \"components/auth/signIn\"\n\nimport {\n Modal,\n ModalContent,\n ModalBody,\n ModalFooter,\n ModalHeader,\n Text,\n Flex,\n H3,\n Button,\n Box,\n ModalCloseButton,\n} from \"@netdata/netdata-ui\"\n\nimport { ConnectionModalStatusContent } from \"./types\"\n\nconst campaign = \"agent_nudge_to_cloud\"\n\nexport type CloudConnectionStatusModalProps = ConnectionModalStatusContent & {\n closeModal: () => void\n onRefresh?: () => void\n isCTA1Disabled: boolean\n}\n\nconst CloudConnectionStatusModal = ({\n title,\n text,\n CTA1,\n closeModal,\n onRefresh,\n isCTA1Disabled,\n}: CloudConnectionStatusModalProps) => {\n const handleClickedCTA1 = useCallback(\n ({ link }: { link: string }) => {\n closeModal()\n window.location.href = link\n },\n [closeModal]\n )\n\n return (\n <Modal>\n <ModalContent width={180} background=\"modalBackground\">\n <ModalHeader>\n <H3 margin={[0]}>{title}</H3>\n <ModalCloseButton onClose={closeModal} />\n </ModalHeader>\n <ModalBody>\n <Flex padding={[0, 0, 4, 0]} column gap={3}>\n {text.header({})}\n {text.bullets.length > 0 && (\n <Flex column gap={3}>\n <Flex column gap={1} as={\"ul\"}>\n {text.bullets.map((bullet, index) => {\n if (typeof bullet === \"function\") {\n return <li key={index}> {bullet()}</li>\n }\n return (\n <li key={bullet}>\n <Text>{bullet}</Text>\n </li>\n )\n })}\n </Flex>\n </Flex>\n )}\n {text.footer()}\n </Flex>\n </ModalBody>\n <ModalFooter>\n <Box data-testid=\"cta1\" margin={[0, 2, 0, 0]} width={{ min: 40 }}>\n <GoToCloud utmParameters={{ content: \"connection_to_cloud\", campaign }}>\n {({ link }) => (\n <Button\n data-ga={`connection-to-cloud::click-ct1::ad`}\n disabled={isCTA1Disabled}\n textTransform=\"none\"\n data-testid=\"cta1-button\"\n onClick={() => handleClickedCTA1({ link })}\n width=\"100%\"\n label={CTA1.text}\n />\n )}\n </GoToCloud>\n </Box>\n <Box\n data-ga={`connection-to-cloud::click-check-now::ad`}\n onClick={onRefresh}\n height={10}\n className=\"btn btn-default\"\n data-testid=\"cta2-button\"\n width={{ min: 40 }}\n >\n <Box as={Text} sx={{ fontWeight: \"500\", lineHeight: \"25px\" }}>\n Check Now\n </Box>\n </Box>\n </ModalFooter>\n </ModalContent>\n </Modal>\n )\n}\n\nexport default CloudConnectionStatusModal\n","import React, { useState, useCallback, useEffect } from \"react\"\nimport useCloudConnectionStatus from \"./use-cloud-connection-status\"\nimport CloudConnectionStatusModal from \"./cloud-connection-status-modal\"\n\nimport { Pill, Flex } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"react-redux\"\nimport { useRequestRefreshOfAccessMessage } from \"hooks/use-user-node-access\"\nimport { selectUserNodeAccess } from \"domains/global/selectors\"\nimport { PromoProps } from \"@/src/domains/dashboard/components/migration-modal\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\n\nconst CloudConnectionStatus = () => {\n const userNodeAccess = useSelector(selectUserNodeAccess) as PromoProps\n const cloudEnabled = useSelector(selectIsCloudEnabled)\n\n const [isModalOpen, setModalOpen] = useState(false)\n const cloudConnectionStatusInfo = useCloudConnectionStatus({\n userStatus: userNodeAccess?.userStatus || \"UNKNOWN\",\n nodeStatus: userNodeAccess?.nodeLiveness || \"NOT_LIVE\",\n date: \"\",\n })\n\n useEffect(() => {\n if (isModalOpen) {\n document.documentElement.style.overflow = \"hidden\"\n } else {\n document.documentElement.style.overflow = \"auto\"\n }\n }, [isModalOpen])\n\n const openModal = useCallback(() => {\n setModalOpen(true)\n }, [])\n\n const closeModal = useCallback(() => {\n setModalOpen(false)\n }, [])\n\n const onRefresh = useRequestRefreshOfAccessMessage()\n\n if (!cloudEnabled) return null\n\n return (\n <Flex column>\n <Pill\n data-ga={`connection-to-cloud::click-pill::ad`}\n data-testid=\"header-connection-to-cloud-button\"\n onClick={openModal}\n flavour=\"neutral\"\n >\n Connection to Cloud\n </Pill>\n {isModalOpen && (\n <CloudConnectionStatusModal\n {...cloudConnectionStatusInfo}\n isCTA1Disabled={userNodeAccess?.nodeLiveness !== \"LIVE\"}\n closeModal={closeModal}\n onRefresh={onRefresh}\n />\n )}\n </Flex>\n )\n}\n\nexport default CloudConnectionStatus\n","import { equals } from \"ramda\"\n\n// we use numbers to specify time. it can be either a timestamp (ms), or a relative value in seconds\n// which is always 0 or less (0 is now, -300 is -5 minutes)\n\nexport const isTimestamp = (x: number) => x > 0\n\nexport const NETDATA_REGISTRY_SERVER = \"https://registry.my-netdata.io\"\n\nexport const MS_IN_SECOND = 1000\nexport const NODE_VIEW_DYGRAPH_TITLE_HEIGHT = 30\nexport const DEFAULT_DASHBOARD_DURATION = 5 * 60\n\nexport const getIframeSrc = (cloudBaseURL: string, path: string) => `${cloudBaseURL}/sso/v2/${path}`\nexport const utmUrlSuffix = \"&utm_source=agent&utm_medium=web\"\n\nexport const getInitialAfterFromWindow = () => {\n const div = document.getElementById(\"charts_div\")\n if (!div) {\n // eslint-disable-next-line no-console\n console.error(\"Couldn't find '.charts_div' element to calculate width\")\n return -900\n }\n // based on https://github.com/netdata/dashboard/blob/7a7b538b00f1c5a4e1550f69cb5333212bb68f95/src/main.js#L1753\n // eslint-disable-next-line max-len\n // var duration = Math.round(($(div).width() * pcent_width / 100 * data.update_every / 3) / 60) * 60;\n return -Math.round(div.getBoundingClientRect().width / 3 / 60) * 60\n}\n\nexport const SPACE_PANEL_STATE = \"space-panel-state\"\n\nexport const useNewKeysOnlyIfDifferent = <T extends {}>(\n keys: (keyof T)[],\n obj1: T | null,\n obj2: T\n): T => {\n if (!obj1) {\n return obj2\n }\n return keys.reduce<T>(\n (acc, key) => ({\n ...acc,\n [key]: equals(obj1[key], obj2![key]) ? obj1[key] : obj2[key],\n }),\n obj2\n )\n}\n\nexport type AnyFunction<T = any> = (...args: T[]) => any\n\nexport type FunctionArguments<T extends Function> = T extends (...args: infer R) => any ? R : never\n\nexport function callAll<T extends AnyFunction>(...fns: (T | undefined)[]) {\n return function mergedFn(arg: FunctionArguments<T>[0]) {\n fns.forEach(fn => {\n fn?.(arg)\n })\n }\n}\n","/* eslint-disable */\n/*!\n * d3pie\n * @author Ben Keen\n * @version 0.1.9\n * @date June 17th, 2015\n * @repo http://github.com/benkeen/d3pie\n * SPDX-License-Identifier: MIT\n */\n\n// UMD pattern from https://github.com/umdjs/umd/blob/master/returnExports.js\n(function(root, factory) {\n if (typeof define === 'function' && define.amd) {\n // AMD. Register as an anonymous module\n define([], factory);\n } else if (typeof exports === 'object') {\n // Node. Does not work with strict CommonJS, but only CommonJS-like environments that support module.exports,\n // like Node\n module.exports = factory();\n } else {\n // browser globals (root is window)\n root.d3pie = factory(root);\n }\n}(this, function() {\n\n var _scriptName = \"d3pie\";\n var _version = \"0.2.1\";\n\n // used to uniquely generate IDs and classes, ensuring no conflict between multiple pies on the same page\n var _uniqueIDCounter = 0;\n\n\n // this section includes all helper libs on the d3pie object. They're populated via grunt-template. Note: to keep\n // the syntax highlighting from getting all messed up, I commented out each line. That REQUIRES each of the files\n // to have an empty first line. Crumby, yes, but acceptable.\n //// --------- _default-settings.js -----------/**\n/**\n * Contains the out-the-box settings for the script. Any of these settings that aren't explicitly overridden for the\n * d3pie instance will inherit from these. This is also included on the main website for use in the generation script.\n */\nvar defaultSettings = {\n header: {\n title: {\n text: \"\",\n color: \"#333333\",\n fontSize: 18,\n fontWeight: \"bold\",\n font: \"arial\"\n },\n subtitle: {\n text: \"\",\n color: \"#666666\",\n fontSize: 14,\n fontWeight: \"bold\",\n font: \"arial\"\n },\n location: \"top-center\",\n titleSubtitlePadding: 8\n },\n footer: {\n text: \t \"\",\n color: \"#666666\",\n fontSize: 14,\n fontWeight: \"bold\",\n font: \"arial\",\n location: \"left\"\n },\n size: {\n canvasHeight: 500,\n canvasWidth: 500,\n pieInnerRadius: \"0%\",\n pieOuterRadius: null\n },\n data: {\n sortOrder: \"none\",\n ignoreSmallSegments: {\n enabled: false,\n valueType: \"percentage\",\n value: null\n },\n smallSegmentGrouping: {\n enabled: false,\n value: 1,\n valueType: \"percentage\",\n label: \"Other\",\n color: \"#cccccc\"\n },\n content: []\n },\n labels: {\n outer: {\n format: \"label\",\n hideWhenLessThanPercentage: null,\n pieDistance: 30\n },\n inner: {\n format: \"percentage\",\n hideWhenLessThanPercentage: null\n },\n mainLabel: {\n color: \"#333333\",\n font: \"arial\",\n fontWeight: \"normal\",\n fontSize: 10\n },\n percentage: {\n color: \"#dddddd\",\n font: \"arial\",\n fontWeight: \"bold\",\n fontSize: 10,\n decimalPlaces: 0\n },\n value: {\n color: \"#cccc44\",\n fontWeight: \"bold\",\n font: \"arial\",\n fontSize: 10\n },\n lines: {\n enabled: true,\n style: \"curved\",\n color: \"segment\"\n },\n truncation: {\n enabled: false,\n truncateLength: 30\n },\n formatter: null\n },\n effects: {\n load: {\n effect: \"none\", // \"default\", commented in the code\n speed: 1000\n },\n pullOutSegmentOnClick: {\n effect: \"none\", // \"bounce\", commented in the code\n speed: 300,\n size: 10\n },\n highlightSegmentOnMouseover: false,\n highlightLuminosity: -0.2\n },\n tooltips: {\n enabled: false,\n type: \"placeholder\", // caption|placeholder\n string: \"\",\n placeholderParser: null,\n styles: {\n fadeInSpeed: 250,\n backgroundColor: \"#000000\",\n backgroundOpacity: 0.5,\n color: \"#efefef\",\n borderRadius: 2,\n font: \"arial\",\n fontWeight: \"bold\",\n fontSize: 10,\n padding: 4\n }\n },\n misc: {\n colors: {\n background: null,\n segments: [\n \"#2484c1\", \"#65a620\", \"#7b6888\", \"#a05d56\", \"#961a1a\", \"#d8d23a\", \"#e98125\", \"#d0743c\", \"#635222\", \"#6ada6a\",\n \"#0c6197\", \"#7d9058\", \"#207f33\", \"#44b9b0\", \"#bca44a\", \"#e4a14b\", \"#a3acb2\", \"#8cc3e9\", \"#69a6f9\", \"#5b388f\",\n \"#546e91\", \"#8bde95\", \"#d2ab58\", \"#273c71\", \"#98bf6e\", \"#4daa4b\", \"#98abc5\", \"#cc1010\", \"#31383b\", \"#006391\",\n \"#c2643f\", \"#b0a474\", \"#a5a39c\", \"#a9c2bc\", \"#22af8c\", \"#7fcecf\", \"#987ac6\", \"#3d3b87\", \"#b77b1c\", \"#c9c2b6\",\n \"#807ece\", \"#8db27c\", \"#be66a2\", \"#9ed3c6\", \"#00644b\", \"#005064\", \"#77979f\", \"#77e079\", \"#9c73ab\", \"#1f79a7\"\n ],\n segmentStroke: \"#ffffff\"\n },\n gradient: {\n enabled: false,\n percentage: 95,\n color: \"#000000\"\n },\n canvasPadding: {\n top: 5,\n right: 5,\n bottom: 5,\n left: 5\n },\n pieCenterOffset: {\n x: 0,\n y: 0\n },\n cssPrefix: null\n },\n callbacks: {\n onload: null,\n onMouseoverSegment: null,\n onMouseoutSegment: null,\n onClickSegment: null\n }\n};\n\n //// --------- validate.js -----------\nvar validate = {\n\n // called whenever a new pie chart is created\n initialCheck: function(pie) {\n var cssPrefix = pie.cssPrefix;\n var element = pie.element;\n var options = pie.options;\n\n // confirm d3 is available [check minimum version]\n if (!window.d3 || !window.d3.hasOwnProperty(\"version\")) {\n console.error(\"d3pie error: d3 is not available\");\n return false;\n }\n\n // confirm element is either a DOM element or a valid string for a DOM element\n if (!(element instanceof HTMLElement || element instanceof SVGElement)) {\n console.error(\"d3pie error: the first d3pie() param must be a valid DOM element (not jQuery) or a ID string.\");\n return false;\n }\n\n // confirm the CSS prefix is valid. It has to start with a-Z and contain nothing but a-Z0-9_-\n if (!(/[a-zA-Z][a-zA-Z0-9_-]*$/.test(cssPrefix))) {\n console.error(\"d3pie error: invalid options.misc.cssPrefix\");\n return false;\n }\n\n // confirm some data has been supplied\n if (!helpers.isArray(options.data.content)) {\n console.error(\"d3pie error: invalid config structure: missing data.content property.\");\n return false;\n }\n if (options.data.content.length === 0) {\n console.error(\"d3pie error: no data supplied.\");\n return false;\n }\n\n // clear out any invalid data. Each data row needs a valid positive number and a label\n var data = [];\n for (var i=0; i<options.data.content.length; i++) {\n if (typeof options.data.content[i].value !== \"number\" || isNaN(options.data.content[i].value)) {\n console.log(\"not valid: \", options.data.content[i]);\n continue;\n }\n if (options.data.content[i].value <= 0) {\n console.log(\"not valid - should have positive value: \", options.data.content[i]);\n continue;\n }\n data.push(options.data.content[i]);\n }\n pie.options.data.content = data;\n\n // labels.outer.hideWhenLessThanPercentage - 1-100\n // labels.inner.hideWhenLessThanPercentage - 1-100\n\n return true;\n }\n};\n\n //// --------- helpers.js -----------\nvar helpers = {\n\n // creates the SVG element\n addSVGSpace: function(pie) {\n var element = pie.element;\n var canvasWidth = pie.options.size.canvasWidth;\n var canvasHeight = pie.options.size.canvasHeight;\n var backgroundColor = pie.options.misc.colors.background;\n\n var svg = d3.select(element).append(\"svg:svg\")\n .attr(\"width\", canvasWidth)\n .attr(\"height\", canvasHeight);\n\n if (backgroundColor !== \"transparent\") {\n svg.style(\"background-color\", function() { return backgroundColor; });\n }\n\n return svg;\n },\n\n shuffleArray: function(array) {\n var currentIndex = array.length, tmpVal, randomIndex;\n\n while (0 !== currentIndex) {\n randomIndex = Math.floor(Math.random() * currentIndex);\n currentIndex -= 1;\n\n // and swap it with the current element\n tmpVal = array[currentIndex];\n array[currentIndex] = array[randomIndex];\n array[randomIndex] = tmpVal;\n }\n return array;\n },\n\n processObj: function(obj, is, value) {\n if (typeof is === 'string') {\n return helpers.processObj(obj, is.split('.'), value);\n } else if (is.length === 1 && value !== undefined) {\n obj[is[0]] = value;\n return obj[is[0]];\n } else if (is.length === 0) {\n return obj;\n } else {\n return helpers.processObj(obj[is[0]], is.slice(1), value);\n }\n },\n\n getDimensions: function(el) {\n if(typeof el === 'string')\n el = document.getElementById(el);\n\n var w = 0, h = 0;\n if (el) {\n var dimensions = el.getBBox();\n w = dimensions.width;\n h = dimensions.height;\n }\n else {\n console.log(\"error: getDimensions() \" + id + \" not found.\");\n }\n\n return { w: w, h: h };\n },\n\n /**\n * This is based on the SVG coordinate system, where top-left is 0,0 and bottom right is n-n.\n * @param r1\n * @param r2\n * @returns {boolean}\n */\n rectIntersect: function(r1, r2) {\n var returnVal = (\n // r2.left > r1.right\n (r2.x > (r1.x + r1.w)) ||\n\n // r2.right < r1.left\n ((r2.x + r2.w) < r1.x) ||\n\n // r2.top < r1.bottom\n ((r2.y + r2.h) < r1.y) ||\n\n // r2.bottom > r1.top\n (r2.y > (r1.y + r1.h))\n );\n\n return !returnVal;\n },\n\n /**\n * Returns a lighter/darker shade of a hex value, based on a luminance value passed.\n * @param hex a hex color value such as “#abc” or “#123456″ (the hash is optional)\n * @param lum the luminosity factor: -0.1 is 10% darker, 0.2 is 20% lighter, etc.\n * @returns {string}\n */\n getColorShade: function(hex, lum) {\n\n // validate hex string\n hex = String(hex).replace(/[^0-9a-f]/gi, '');\n if (hex.length < 6) {\n hex = hex[0]+hex[0]+hex[1]+hex[1]+hex[2]+hex[2];\n }\n lum = lum || 0;\n\n // convert to decimal and change luminosity\n var newHex = \"#\";\n for (var i=0; i<3; i++) {\n var c = parseInt(hex.substr(i * 2, 2), 16);\n c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16);\n newHex += (\"00\" + c).substr(c.length);\n }\n\n return newHex;\n },\n\n /**\n * Users can choose to specify segment colors in three ways (in order of precedence):\n * \t1. include a \"color\" attribute for each row in data.content\n * \t2. include a misc.colors.segments property which contains an array of hex codes\n * \t3. specify nothing at all and rely on this lib provide some reasonable defaults\n *\n * This function sees what's included and populates this.options.colors with whatever's required\n * for this pie chart.\n * @param data\n */\n initSegmentColors: function(pie) {\n var data = pie.options.data.content;\n var colors = pie.options.misc.colors.segments;\n\n // TODO this needs a ton of error handling\n\n var finalColors = [];\n for (var i=0; i<data.length; i++) {\n if (data[i].hasOwnProperty(\"color\")) {\n finalColors.push(data[i].color);\n } else {\n finalColors.push(colors[i]);\n }\n }\n\n return finalColors;\n },\n\n applySmallSegmentGrouping: function(data, smallSegmentGrouping) {\n var totalSize;\n if (smallSegmentGrouping.valueType === \"percentage\") {\n totalSize = math.getTotalPieSize(data);\n }\n\n // loop through each data item\n var newData = [];\n var groupedData = [];\n var totalGroupedData = 0;\n for (var i=0; i<data.length; i++) {\n if (smallSegmentGrouping.valueType === \"percentage\") {\n var dataPercent = (data[i].value / totalSize) * 100;\n if (dataPercent <= smallSegmentGrouping.value) {\n groupedData.push(data[i]);\n totalGroupedData += data[i].value;\n continue;\n }\n data[i].isGrouped = false;\n newData.push(data[i]);\n } else {\n if (data[i].value <= smallSegmentGrouping.value) {\n groupedData.push(data[i]);\n totalGroupedData += data[i].value;\n continue;\n }\n data[i].isGrouped = false;\n newData.push(data[i]);\n }\n }\n\n // we're done! See if there's any small segment groups to add\n if (groupedData.length) {\n newData.push({\n color: smallSegmentGrouping.color,\n label: smallSegmentGrouping.label,\n value: totalGroupedData,\n isGrouped: true,\n groupedData: groupedData\n });\n }\n\n return newData;\n },\n\n // for debugging\n showPoint: function(svg, x, y) {\n svg.append(\"circle\").attr(\"cx\", x).attr(\"cy\", y).attr(\"r\", 2).style(\"fill\", \"black\");\n },\n\n isFunction: function(functionToCheck) {\n var getType = {};\n return functionToCheck && getType.toString.call(functionToCheck) === '[object Function]';\n },\n\n isArray: function(o) {\n return Object.prototype.toString.call(o) === '[object Array]';\n }\n};\n\n\n// taken from jQuery\nvar extend = function() {\n var options, name, src, copy, copyIsArray, clone, target = arguments[0] || {},\n i = 1,\n length = arguments.length,\n deep = false,\n toString = Object.prototype.toString,\n hasOwn = Object.prototype.hasOwnProperty,\n class2type = {\n \"[object Boolean]\": \"boolean\",\n \"[object Number]\": \"number\",\n \"[object String]\": \"string\",\n \"[object Function]\": \"function\",\n \"[object Array]\": \"array\",\n \"[object Date]\": \"date\",\n \"[object RegExp]\": \"regexp\",\n \"[object Object]\": \"object\"\n },\n\n jQuery = {\n isFunction: function (obj) {\n return jQuery.type(obj) === \"function\";\n },\n isArray: Array.isArray ||\n function (obj) {\n return jQuery.type(obj) === \"array\";\n },\n isWindow: function (obj) {\n return obj !== null && obj === obj.window;\n },\n isNumeric: function (obj) {\n return !isNaN(parseFloat(obj)) && isFinite(obj);\n },\n type: function (obj) {\n return obj === null ? String(obj) : class2type[toString.call(obj)] || \"object\";\n },\n isPlainObject: function (obj) {\n if (!obj || jQuery.type(obj) !== \"object\" || obj.nodeType) {\n return false;\n }\n try {\n if (obj.constructor && !hasOwn.call(obj, \"constructor\") && !hasOwn.call(obj.constructor.prototype, \"isPrototypeOf\")) {\n return false;\n }\n } catch (e) {\n return false;\n }\n var key;\n for (key in obj) {}\n return key === undefined || hasOwn.call(obj, key);\n }\n };\n if (typeof target === \"boolean\") {\n deep = target;\n target = arguments[1] || {};\n i = 2;\n }\n if (typeof target !== \"object\" && !jQuery.isFunction(target)) {\n target = {};\n }\n if (length === i) {\n target = this;\n --i;\n }\n for (i; i < length; i++) {\n if ((options = arguments[i]) !== null) {\n for (name in options) {\n src = target[name];\n copy = options[name];\n if (target === copy) {\n continue;\n }\n if (deep && copy && (jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)))) {\n if (copyIsArray) {\n copyIsArray = false;\n clone = src && jQuery.isArray(src) ? src : [];\n } else {\n clone = src && jQuery.isPlainObject(src) ? src : {};\n }\n // WARNING: RECURSION\n target[name] = extend(deep, clone, copy);\n } else if (copy !== undefined) {\n target[name] = copy;\n }\n }\n }\n }\n return target;\n};\n //// --------- math.js -----------\nvar math = {\n\n toRadians: function(degrees) {\n return degrees * (Math.PI / 180);\n },\n\n toDegrees: function(radians) {\n return radians * (180 / Math.PI);\n },\n\n computePieRadius: function(pie) {\n var size = pie.options.size;\n var canvasPadding = pie.options.misc.canvasPadding;\n\n // outer radius is either specified (e.g. through the generator), or omitted altogether\n // and calculated based on the canvas dimensions. Right now the estimated version isn't great - it should\n // be possible to calculate it to precisely generate the maximum sized pie, but it's fussy as heck. Something\n // for the next release.\n\n // first, calculate the default _outerRadius\n var w = size.canvasWidth - canvasPadding.left - canvasPadding.right;\n var h = size.canvasHeight - canvasPadding.top - canvasPadding.bottom;\n\n // now factor in the footer, title & subtitle\n if (pie.options.header.location !== \"pie-center\") {\n h -= pie.textComponents.headerHeight;\n }\n\n if (pie.textComponents.footer.exists) {\n h -= pie.textComponents.footer.h;\n }\n\n // for really teeny pies, h may be < 0. Adjust it back\n h = (h < 0) ? 0 : h;\n\n var outerRadius = ((w < h) ? w : h) / 3;\n var innerRadius, percent;\n\n // if the user specified something, use that instead\n if (size.pieOuterRadius !== null) {\n if (/%/.test(size.pieOuterRadius)) {\n percent = parseInt(size.pieOuterRadius.replace(/[\\D]/, \"\"), 10);\n percent = (percent > 99) ? 99 : percent;\n percent = (percent < 0) ? 0 : percent;\n\n var smallestDimension = (w < h) ? w : h;\n\n // now factor in the label line size\n if (pie.options.labels.outer.format !== \"none\") {\n var pieDistanceSpace = parseInt(pie.options.labels.outer.pieDistance, 10) * 2;\n if (smallestDimension - pieDistanceSpace > 0) {\n smallestDimension -= pieDistanceSpace;\n }\n }\n\n outerRadius = Math.floor((smallestDimension / 100) * percent) / 2;\n } else {\n outerRadius = parseInt(size.pieOuterRadius, 10);\n }\n }\n\n // inner radius\n if (/%/.test(size.pieInnerRadius)) {\n percent = parseInt(size.pieInnerRadius.replace(/[\\D]/, \"\"), 10);\n percent = (percent > 99) ? 99 : percent;\n percent = (percent < 0) ? 0 : percent;\n innerRadius = Math.floor((outerRadius / 100) * percent);\n } else {\n innerRadius = parseInt(size.pieInnerRadius, 10);\n }\n\n pie.innerRadius = innerRadius;\n pie.outerRadius = outerRadius;\n },\n\n getTotalPieSize: function(data) {\n var totalSize = 0;\n for (var i=0; i<data.length; i++) {\n totalSize += data[i].value;\n }\n return totalSize;\n },\n\n sortPieData: function(pie) {\n var data = pie.options.data.content;\n var sortOrder = pie.options.data.sortOrder;\n\n switch (sortOrder) {\n case \"none\":\n // do nothing\n break;\n case \"random\":\n data = helpers.shuffleArray(data);\n break;\n case \"value-asc\":\n data.sort(function(a, b) { return (a.value < b.value) ? -1 : 1; });\n break;\n case \"value-desc\":\n data.sort(function(a, b) { return (a.value < b.value) ? 1 : -1; });\n break;\n case \"label-asc\":\n data.sort(function(a, b) { return (a.label.toLowerCase() > b.label.toLowerCase()) ? 1 : -1; });\n break;\n case \"label-desc\":\n data.sort(function(a, b) { return (a.label.toLowerCase() < b.label.toLowerCase()) ? 1 : -1; });\n break;\n }\n\n return data;\n },\n\n // var pieCenter = math.getPieCenter();\n getPieTranslateCenter: function(pieCenter) {\n return \"translate(\" + pieCenter.x + \",\" + pieCenter.y + \")\";\n },\n\n /**\n * Used to determine where on the canvas the center of the pie chart should be. It takes into account the\n * height and position of the title, subtitle and footer, and the various paddings.\n * @private\n */\n calculatePieCenter: function(pie) {\n var pieCenterOffset = pie.options.misc.pieCenterOffset;\n var hasTopTitle = (pie.textComponents.title.exists && pie.options.header.location !== \"pie-center\");\n var hasTopSubtitle = (pie.textComponents.subtitle.exists && pie.options.header.location !== \"pie-center\");\n\n var headerOffset = pie.options.misc.canvasPadding.top;\n if (hasTopTitle && hasTopSubtitle) {\n headerOffset += pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;\n } else if (hasTopTitle) {\n headerOffset += pie.textComponents.title.h;\n } else if (hasTopSubtitle) {\n headerOffset += pie.textComponents.subtitle.h;\n }\n\n var footerOffset = 0;\n if (pie.textComponents.footer.exists) {\n footerOffset = pie.textComponents.footer.h + pie.options.misc.canvasPadding.bottom;\n }\n\n var x = ((pie.options.size.canvasWidth - pie.options.misc.canvasPadding.left - pie.options.misc.canvasPadding.right) / 2) + pie.options.misc.canvasPadding.left;\n var y = ((pie.options.size.canvasHeight - footerOffset - headerOffset) / 2) + headerOffset;\n\n x += pieCenterOffset.x;\n y += pieCenterOffset.y;\n\n pie.pieCenter = { x: x, y: y };\n },\n\n\n /**\n * Rotates a point (x, y) around an axis (xm, ym) by degrees (a).\n * @param x\n * @param y\n * @param xm\n * @param ym\n * @param a angle in degrees\n * @returns {Array}\n */\n rotate: function(x, y, xm, ym, a) {\n\n a = a * Math.PI / 180; // convert to radians\n\n var cos = Math.cos,\n sin = Math.sin,\n // subtract midpoints, so that midpoint is translated to origin and add it in the end again\n xr = (x - xm) * cos(a) - (y - ym) * sin(a) + xm,\n yr = (x - xm) * sin(a) + (y - ym) * cos(a) + ym;\n\n return { x: xr, y: yr };\n },\n\n /**\n * Translates a point x, y by distance d, and by angle a.\n * @param x\n * @param y\n * @param dist\n * @param a angle in degrees\n */\n translate: function(x, y, d, a) {\n var rads = math.toRadians(a);\n return {\n x: x + d * Math.sin(rads),\n y: y - d * Math.cos(rads)\n };\n },\n\n // from: http://stackoverflow.com/questions/19792552/d3-put-arc-labels-in-a-pie-chart-if-there-is-enough-space\n pointIsInArc: function(pt, ptData, d3Arc) {\n // Center of the arc is assumed to be 0,0\n // (pt.x, pt.y) are assumed to be relative to the center\n var r1 = d3Arc.innerRadius()(ptData), // Note: Using the innerRadius\n r2 = d3Arc.outerRadius()(ptData),\n theta1 = d3Arc.startAngle()(ptData),\n theta2 = d3Arc.endAngle()(ptData);\n\n var dist = pt.x * pt.x + pt.y * pt.y,\n angle = Math.atan2(pt.x, -pt.y); // Note: different coordinate system\n\n angle = (angle < 0) ? (angle + Math.PI * 2) : angle;\n\n return (r1 * r1 <= dist) && (dist <= r2 * r2) &&\n (theta1 <= angle) && (angle <= theta2);\n }\n};\n\n //// --------- labels.js -----------\nvar labels = {\n\n /**\n * Adds the labels to the pie chart, but doesn't position them. There are two locations for the\n * labels: inside (center) of the segments, or outside the segments on the edge.\n * @param section \"inner\" or \"outer\"\n * @param sectionDisplayType \"percentage\", \"value\", \"label\", \"label-value1\", etc.\n * @param pie\n */\n add: function(pie, section, sectionDisplayType) {\n var include = labels.getIncludes(sectionDisplayType);\n var settings = pie.options.labels;\n\n // group the label groups (label, percentage, value) into a single element for simpler positioning\n var outerLabel = pie.svg.insert(\"g\", \".\" + pie.cssPrefix + \"labels-\" + section)\n .attr(\"class\", pie.cssPrefix + \"labels-\" + section);\n\n var labelGroup = pie.__labels[section] = outerLabel.selectAll(\".\" + pie.cssPrefix + \"labelGroup-\" + section)\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"labelGroup\" + i + \"-\" + section; })\n .attr(\"data-index\", function(d, i) { return i; })\n .attr(\"class\", pie.cssPrefix + \"labelGroup-\" + section)\n .style(\"opacity\", 0);\n\n var formatterContext = { section: section, sectionDisplayType: sectionDisplayType };\n\n // 1. Add the main label\n if (include.mainLabel) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentMainLabel\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentMainLabel-\" + section)\n .text(function(d, i) {\n var str = d.label;\n\n // if a custom formatter has been defined, pass it the raw label string - it can do whatever it wants with it.\n // we only apply truncation if it's not defined\n if (settings.formatter) {\n formatterContext.index = i;\n formatterContext.part = 'mainLabel';\n formatterContext.value = d.value;\n formatterContext.label = str;\n str = settings.formatter(formatterContext);\n } else if (settings.truncation.enabled && d.label.length > settings.truncation.truncateLength) {\n str = d.label.substring(0, settings.truncation.truncateLength) + \"...\";\n }\n return str;\n })\n .style(\"font-size\", settings.mainLabel.fontSize + \"px\")\n .style(\"font-family\", settings.mainLabel.font)\n .style(\"font-weight\", settings.mainLabel.fontWeight)\n .style(\"fill\", function(d, i) {\n return (settings.mainLabel.color === \"segment\") ? pie.options.colors[i] : settings.mainLabel.color;\n });\n }\n\n // 2. Add the percentage label\n if (include.percentage) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentPercentage\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentPercentage-\" + section)\n .text(function(d, i) {\n var percentage = d.percentage;\n if (settings.formatter) {\n formatterContext.index = i;\n formatterContext.part = \"percentage\";\n formatterContext.value = d.value;\n formatterContext.label = d.percentage;\n percentage = settings.formatter(formatterContext);\n } else {\n percentage += \"%\";\n }\n return percentage;\n })\n .style(\"font-size\", settings.percentage.fontSize + \"px\")\n .style(\"font-family\", settings.percentage.font)\n .style(\"font-weight\", settings.percentage.fontWeight)\n .style(\"fill\", settings.percentage.color);\n }\n\n // 3. Add the value label\n if (include.value) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentValue\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentValue-\" + section)\n .text(function(d, i) {\n formatterContext.index = i;\n formatterContext.part = \"value\";\n formatterContext.value = d.value;\n formatterContext.label = d.value;\n return settings.formatter ? settings.formatter(formatterContext, d.value) : d.value;\n })\n .style(\"font-size\", settings.value.fontSize + \"px\")\n .style(\"font-family\", settings.value.font)\n .style(\"font-weight\", settings.value.fontWeight)\n .style(\"fill\", settings.value.color);\n }\n },\n\n /**\n * @param section \"inner\" / \"outer\"\n */\n positionLabelElements: function(pie, section, sectionDisplayType) {\n labels[\"dimensions-\" + section] = [];\n\n // get the latest widths, heights\n var labelGroups = pie.__labels[section];\n labelGroups.each(function(d, i) {\n var mainLabel = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentMainLabel-\" + section);\n var percentage = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section);\n var value = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section);\n\n labels[\"dimensions-\" + section].push({\n mainLabel: (mainLabel.node() !== null) ? mainLabel.node().getBBox() : null,\n percentage: (percentage.node() !== null) ? percentage.node().getBBox() : null,\n value: (value.node() !== null) ? value.node().getBBox() : null\n });\n });\n\n var singleLinePad = 5;\n var dims = labels[\"dimensions-\" + section];\n switch (sectionDisplayType) {\n case \"label-value1\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section)\n .attr(\"dx\", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });\n break;\n case \"label-value2\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section)\n .attr(\"dy\", function(d, i) { return dims[i].mainLabel.height; });\n break;\n case \"label-percentage1\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section)\n .attr(\"dx\", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });\n break;\n case \"label-percentage2\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section)\n .attr(\"dx\", function(d, i) { return (dims[i].mainLabel.width / 2) - (dims[i].percentage.width / 2); })\n .attr(\"dy\", function(d, i) { return dims[i].mainLabel.height; });\n break;\n }\n },\n\n computeLabelLinePositions: function(pie) {\n pie.lineCoordGroups = [];\n pie.__labels.outer\n .each(function(d, i) { return labels.computeLinePosition(pie, i); });\n },\n\n computeLinePosition: function(pie, i) {\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n var originCoords = math.rotate(pie.pieCenter.x, pie.pieCenter.y - pie.outerRadius, pie.pieCenter.x, pie.pieCenter.y, angle);\n var heightOffset = pie.outerLabelGroupData[i].h / 5; // TODO check\n var labelXMargin = 6; // the x-distance of the label from the end of the line [TODO configurable]\n\n var quarter = Math.floor(angle / 90);\n var midPoint = 4;\n var x2, y2, x3, y3;\n\n // this resolves an issue when the\n if (quarter === 2 && angle === 180) {\n quarter = 1;\n }\n\n switch (quarter) {\n case 0:\n x2 = pie.outerLabelGroupData[i].x - labelXMargin - ((pie.outerLabelGroupData[i].x - labelXMargin - originCoords.x) / 2);\n y2 = pie.outerLabelGroupData[i].y + ((originCoords.y - pie.outerLabelGroupData[i].y) / midPoint);\n x3 = pie.outerLabelGroupData[i].x - labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 1:\n x2 = originCoords.x + (pie.outerLabelGroupData[i].x - originCoords.x) / midPoint;\n y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x - labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 2:\n var startOfLabelX = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n x2 = originCoords.x - (originCoords.x - startOfLabelX) / midPoint;\n y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 3:\n var startOfLabel = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n x2 = startOfLabel + ((originCoords.x - startOfLabel) / midPoint);\n y2 = pie.outerLabelGroupData[i].y + (originCoords.y - pie.outerLabelGroupData[i].y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n }\n\n /*\n * x1 / y1: the x/y coords of the start of the line, at the mid point of the segments arc on the pie circumference\n * x2 / y2: if \"curved\" line style is being used, this is the midpoint of the line. Other\n * x3 / y3: the end of the line; closest point to the label\n */\n if (pie.options.labels.lines.style === \"straight\") {\n pie.lineCoordGroups[i] = [\n { x: originCoords.x, y: originCoords.y },\n { x: x3, y: y3 }\n ];\n } else {\n pie.lineCoordGroups[i] = [\n { x: originCoords.x, y: originCoords.y },\n { x: x2, y: y2 },\n { x: x3, y: y3 }\n ];\n }\n },\n\n addLabelLines: function(pie) {\n var lineGroups = pie.svg.insert(\"g\", \".\" + pie.cssPrefix + \"pieChart\") // meaning, BEFORE .pieChart\n .attr(\"class\", pie.cssPrefix + \"lineGroups\")\n .style(\"opacity\", 1);\n\n var lineGroup = lineGroups.selectAll(\".\" + pie.cssPrefix + \"lineGroup\")\n .data(pie.lineCoordGroups)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"lineGroup\");\n\n var lineFunction = d3.line()\n .curve(d3.curveBasis)\n .x(function(d) { return d.x; })\n .y(function(d) { return d.y; });\n\n lineGroup.append(\"path\")\n .attr(\"d\", lineFunction)\n .attr(\"stroke\", function(d, i) {\n return (pie.options.labels.lines.color === \"segment\") ? pie.options.colors[i] : pie.options.labels.lines.color;\n })\n .attr(\"stroke-width\", 1)\n .attr(\"fill\", \"none\")\n .style(\"opacity\", function(d, i) {\n var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;\n var isHidden = (percentage !== null && d.percentage < percentage) || pie.options.data.content[i].label === \"\";\n return isHidden ? 0 : 1;\n });\n },\n\n positionLabelGroups: function(pie, section) {\n if (pie.options.labels[section].format === \"none\")\n return;\n\n pie.__labels[section]\n .style(\"opacity\", function(d, i) {\n var percentage = pie.options.labels[section].hideWhenLessThanPercentage;\n return (percentage !== null && d.percentage < percentage) ? 0 : 1;\n })\n .attr(\"transform\", function(d, i) {\n var x, y;\n if (section === \"outer\") {\n x = pie.outerLabelGroupData[i].x;\n y = pie.outerLabelGroupData[i].y;\n } else {\n var pieCenterCopy = extend(true, {}, pie.pieCenter);\n\n // now recompute the \"center\" based on the current _innerRadius\n if (pie.innerRadius > 0) {\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n var newCoords = math.translate(pie.pieCenter.x, pie.pieCenter.y, pie.innerRadius, angle);\n pieCenterCopy.x = newCoords.x;\n pieCenterCopy.y = newCoords.y;\n }\n\n var dims = helpers.getDimensions(pie.cssPrefix + \"labelGroup\" + i + \"-inner\");\n var xOffset = dims.w / 2;\n var yOffset = dims.h / 4; // confusing! Why 4? should be 2, but it doesn't look right\n\n x = pieCenterCopy.x + (pie.lineCoordGroups[i][0].x - pieCenterCopy.x) / 1.8;\n y = pieCenterCopy.y + (pie.lineCoordGroups[i][0].y - pieCenterCopy.y) / 1.8;\n\n x = x - xOffset;\n y = y + yOffset;\n }\n\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n\n getIncludes: function(val) {\n var addMainLabel = false;\n var addValue = false;\n var addPercentage = false;\n\n switch (val) {\n case \"label\":\n addMainLabel = true;\n break;\n case \"value\":\n addValue = true;\n break;\n case \"percentage\":\n addPercentage = true;\n break;\n case \"label-value1\":\n case \"label-value2\":\n addMainLabel = true;\n addValue = true;\n break;\n case \"label-percentage1\":\n case \"label-percentage2\":\n addMainLabel = true;\n addPercentage = true;\n break;\n }\n return {\n mainLabel: addMainLabel,\n value: addValue,\n percentage: addPercentage\n };\n },\n\n\n /**\n * This does the heavy-lifting to compute the actual coordinates for the outer label groups. It does two things:\n * 1. Make a first pass and position them in the ideal positions, based on the pie sizes\n * 2. Do some basic collision avoidance.\n */\n computeOuterLabelCoords: function(pie) {\n\n // 1. figure out the ideal positions for the outer labels\n pie.__labels.outer\n .each(function(d, i) {\n return labels.getIdealOuterLabelPositions(pie, i);\n });\n\n // 2. now adjust those positions to try to accommodate conflicts\n labels.resolveOuterLabelCollisions(pie);\n },\n\n /**\n * This attempts to resolve label positioning collisions.\n */\n resolveOuterLabelCollisions: function(pie) {\n if (pie.options.labels.outer.format === \"none\") {\n return;\n }\n\n var size = pie.options.data.content.length;\n labels.checkConflict(pie, 0, \"clockwise\", size);\n labels.checkConflict(pie, size-1, \"anticlockwise\", size);\n },\n\n checkConflict: function(pie, currIndex, direction, size) {\n var i, curr;\n\n if (size <= 1) {\n return;\n }\n\n var currIndexHemisphere = pie.outerLabelGroupData[currIndex].hs;\n if (direction === \"clockwise\" && currIndexHemisphere !== \"right\") {\n return;\n }\n if (direction === \"anticlockwise\" && currIndexHemisphere !== \"left\") {\n return;\n }\n var nextIndex = (direction === \"clockwise\") ? currIndex+1 : currIndex-1;\n\n // this is the current label group being looked at. We KNOW it's positioned properly (the first item\n // is always correct)\n var currLabelGroup = pie.outerLabelGroupData[currIndex];\n\n // this one we don't know about. That's the one we're going to look at and move if necessary\n var examinedLabelGroup = pie.outerLabelGroupData[nextIndex];\n\n var info = {\n labelHeights: pie.outerLabelGroupData[0].h,\n center: pie.pieCenter,\n lineLength: (pie.outerRadius + pie.options.labels.outer.pieDistance),\n heightChange: pie.outerLabelGroupData[0].h + 1 // 1 = padding\n };\n\n // loop through *ALL* label groups examined so far to check for conflicts. This is because when they're\n // very tightly fitted, a later label group may still appear high up on the page\n if (direction === \"clockwise\") {\n i = 0;\n for (; i<=currIndex; i++) {\n curr = pie.outerLabelGroupData[i];\n\n // if there's a conflict with this label group, shift the label to be AFTER the last known\n // one that's been properly placed\n if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {\n labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);\n break;\n }\n }\n } else {\n i = size - 1;\n for (; i >= currIndex; i--) {\n curr = pie.outerLabelGroupData[i];\n\n // if there's a conflict with this label group, shift the label to be AFTER the last known\n // one that's been properly placed\n if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {\n labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);\n break;\n }\n }\n }\n labels.checkConflict(pie, nextIndex, direction, size);\n },\n\n isLabelHidden: function(pie, index) {\n var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;\n return (percentage !== null && d.percentage < percentage) || pie.options.data.content[index].label === \"\";\n },\n\n // does a little math to shift a label into a new position based on the last properly placed one\n adjustLabelPos: function(pie, nextIndex, lastCorrectlyPositionedLabel, info) {\n var xDiff, yDiff, newXPos, newYPos;\n newYPos = lastCorrectlyPositionedLabel.y + info.heightChange;\n yDiff = info.center.y - newYPos;\n\n if (Math.abs(info.lineLength) > Math.abs(yDiff)) {\n xDiff = Math.sqrt((info.lineLength * info.lineLength) - (yDiff * yDiff));\n } else {\n xDiff = Math.sqrt((yDiff * yDiff) - (info.lineLength * info.lineLength));\n }\n\n if (lastCorrectlyPositionedLabel.hs === \"right\") {\n newXPos = info.center.x + xDiff;\n } else {\n newXPos = info.center.x - xDiff - pie.outerLabelGroupData[nextIndex].w;\n }\n\n pie.outerLabelGroupData[nextIndex].x = newXPos;\n pie.outerLabelGroupData[nextIndex].y = newYPos;\n },\n\n /**\n * @param i 0-N where N is the dataset size - 1.\n */\n getIdealOuterLabelPositions: function(pie, i) {\n var labelGroupNode = pie.svg.select(\"#\" + pie.cssPrefix + \"labelGroup\" + i + \"-outer\").node();\n if (!labelGroupNode) return;\n\n var labelGroupDims = labelGroupNode.getBBox();\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n\n var originalX = pie.pieCenter.x;\n var originalY = pie.pieCenter.y - (pie.outerRadius + pie.options.labels.outer.pieDistance);\n var newCoords = math.rotate(originalX, originalY, pie.pieCenter.x, pie.pieCenter.y, angle);\n\n // if the label is on the left half of the pie, adjust the values\n var hemisphere = \"right\"; // hemisphere\n if (angle > 180) {\n newCoords.x -= (labelGroupDims.width + 8);\n hemisphere = \"left\";\n } else {\n newCoords.x += 8;\n }\n\n pie.outerLabelGroupData[i] = {\n x: newCoords.x,\n y: newCoords.y,\n w: labelGroupDims.width,\n h: labelGroupDims.height,\n hs: hemisphere\n };\n }\n};\n\n //// --------- segments.js -----------\nvar segments = {\n\n effectMap: {\n \"none\": d3.easeLinear,\n \"bounce\": d3.easeBounce,\n \"linear\": d3.easeLinear,\n \"sin\": d3.easeSin,\n \"elastic\": d3.easeElastic,\n \"back\": d3.easeBack,\n \"quad\": d3.easeQuad,\n \"circle\": d3.easeCircle,\n \"exp\": d3.easeExp\n },\n\n /**\n * Creates the pie chart segments and displays them according to the desired load effect.\n * @private\n */\n create: function(pie) {\n var pieCenter = pie.pieCenter;\n var colors = pie.options.colors;\n var loadEffects = pie.options.effects.load;\n var segmentStroke = pie.options.misc.colors.segmentStroke;\n\n // we insert the pie chart BEFORE the title, to ensure the title overlaps the pie\n var pieChartElement = pie.svg.insert(\"g\", \"#\" + pie.cssPrefix + \"title\")\n .attr(\"transform\", function() { return math.getPieTranslateCenter(pieCenter); })\n .attr(\"class\", pie.cssPrefix + \"pieChart\");\n\n var arc = d3.arc()\n .innerRadius(pie.innerRadius)\n .outerRadius(pie.outerRadius)\n .startAngle(0)\n .endAngle(function(d) {\n return (d.value / pie.totalSize) * 2 * Math.PI;\n });\n\n var g = pieChartElement.selectAll(\".\" + pie.cssPrefix + \"arc\")\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"arc\");\n\n // if we're not fading in the pie, just set the load speed to 0\n //var loadSpeed = loadEffects.speed;\n //if (loadEffects.effect === \"none\") {\n //\tloadSpeed = 0;\n //}\n\n g.append(\"path\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segment\" + i; })\n .attr(\"fill\", function(d, i) {\n var color = colors[i];\n if (pie.options.misc.gradient.enabled) {\n color = \"url(#\" + pie.cssPrefix + \"grad\" + i + \")\";\n }\n return color;\n })\n .style(\"stroke\", segmentStroke)\n .style(\"stroke-width\", 1)\n //.transition()\n //.ease(d3.easeCubicInOut)\n //.duration(loadSpeed)\n .attr(\"data-index\", function(d, i) { return i; })\n .attr(\"d\", arc);\n/*\n .attrTween(\"d\", function(b) {\n var i = d3.interpolate({ value: 0 }, b);\n return function(t) {\n var ret = pie.arc(i(t));\n console.log(ret);\n return ret;\n };\n });\n*/\n pie.svg.selectAll(\"g.\" + pie.cssPrefix + \"arc\")\n .attr(\"transform\",\n function(d, i) {\n var angle = 0;\n if (i > 0) {\n angle = segments.getSegmentAngle(i-1, pie.options.data.content, pie.totalSize);\n }\n return \"rotate(\" + angle + \")\";\n }\n );\n pie.arc = arc;\n },\n\n addGradients: function(pie) {\n var grads = pie.svg.append(\"defs\")\n .selectAll(\"radialGradient\")\n .data(pie.options.data.content)\n .enter().append(\"radialGradient\")\n .attr(\"gradientUnits\", \"userSpaceOnUse\")\n .attr(\"cx\", 0)\n .attr(\"cy\", 0)\n .attr(\"r\", \"120%\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"grad\" + i; });\n\n grads.append(\"stop\").attr(\"offset\", \"0%\").style(\"stop-color\", function(d, i) { return pie.options.colors[i]; });\n grads.append(\"stop\").attr(\"offset\", pie.options.misc.gradient.percentage + \"%\").style(\"stop-color\", pie.options.misc.gradient.color);\n },\n\n addSegmentEventHandlers: function(pie) {\n var arc = pie.svg.selectAll(\".\" + pie.cssPrefix + \"arc\");\n arc = arc.merge(pie.__labels.inner.merge(pie.__labels.outer));\n\n arc.on(\"click\", function() {\n var currentEl = d3.select(this);\n var segment;\n\n // mouseover works on both the segments AND the segment labels, hence the following\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n var index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onClickSegment, segment, isExpanded);\n if (pie.options.effects.pullOutSegmentOnClick.effect !== \"none\") {\n if (isExpanded) {\n segments.closeSegment(pie, segment.node());\n } else {\n segments.openSegment(pie, segment.node());\n }\n }\n });\n\n arc.on(\"mouseover\", function() {\n var currentEl = d3.select(this);\n var segment, index;\n\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n if (pie.options.effects.highlightSegmentOnMouseover) {\n index = segment.attr(\"data-index\");\n var segColor = pie.options.colors[index];\n segment.style(\"fill\", helpers.getColorShade(segColor, pie.options.effects.highlightLuminosity));\n }\n\n if (pie.options.tooltips.enabled) {\n index = segment.attr(\"data-index\");\n tt.showTooltip(pie, index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoverSegment, segment, isExpanded);\n });\n\n arc.on(\"mousemove\", function() {\n tt.moveTooltip(pie);\n });\n\n arc.on(\"mouseout\", function() {\n var currentEl = d3.select(this);\n var segment, index;\n\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n if (pie.options.effects.highlightSegmentOnMouseover) {\n index = segment.attr(\"data-index\");\n var color = pie.options.colors[index];\n if (pie.options.misc.gradient.enabled) {\n color = \"url(#\" + pie.cssPrefix + \"grad\" + index + \")\";\n }\n segment.style(\"fill\", color);\n }\n\n if (pie.options.tooltips.enabled) {\n index = segment.attr(\"data-index\");\n tt.hideTooltip(pie, index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoutSegment, segment, isExpanded);\n });\n },\n\n // helper function used to call the click, mouseover, mouseout segment callback functions\n onSegmentEvent: function(pie, func, segment, isExpanded) {\n if (!helpers.isFunction(func)) {\n return;\n }\n var index = parseInt(segment.attr(\"data-index\"), 10);\n func({\n segment: segment.node(),\n index: index,\n expanded: isExpanded,\n data: pie.options.data.content[index]\n });\n },\n\n openSegment: function(pie, segment) {\n if (pie.isOpeningSegment) {\n return;\n }\n pie.isOpeningSegment = true;\n\n segments.maybeCloseOpenSegment(pie);\n\n d3.select(segment)\n .transition()\n .ease(segments.effectMap[pie.options.effects.pullOutSegmentOnClick.effect])\n .duration(pie.options.effects.pullOutSegmentOnClick.speed)\n .attr(\"transform\", function(d, i) {\n var c = pie.arc.centroid(d),\n x = c[0],\n y = c[1],\n h = Math.sqrt(x*x + y*y),\n pullOutSize = parseInt(pie.options.effects.pullOutSegmentOnClick.size, 10);\n\n return \"translate(\" + ((x/h) * pullOutSize) + ',' + ((y/h) * pullOutSize) + \")\";\n })\n .on(\"end\", function(d, i) {\n pie.currentlyOpenSegment = segment;\n pie.isOpeningSegment = false;\n d3.select(segment).attr(\"class\", pie.cssPrefix + \"expanded\");\n });\n },\n\n maybeCloseOpenSegment: function(pie) {\n if (typeof pie !== 'undefined' && pie.svg.selectAll(\".\" + pie.cssPrefix + \"expanded\").size() > 0) {\n segments.closeSegment(pie, pie.svg.select(\".\" + pie.cssPrefix + \"expanded\").node());\n }\n },\n\n closeSegment: function(pie, segment) {\n d3.select(segment)\n .transition()\n .duration(400)\n .attr(\"transform\", \"translate(0,0)\")\n .on(\"end\", function(d, i) {\n d3.select(segment).attr(\"class\", \"\");\n pie.currentlyOpenSegment = null;\n });\n },\n\n getCentroid: function(el) {\n var bbox = el.getBBox();\n return {\n x: bbox.x + bbox.width / 2,\n y: bbox.y + bbox.height / 2\n };\n },\n\n /**\n * General helper function to return a segment's angle, in various different ways.\n * @param index\n * @param opts optional object for fine-tuning exactly what you want.\n */\n getSegmentAngle: function(index, data, totalSize, opts) {\n var options = extend({\n // if true, this returns the full angle from the origin. Otherwise it returns the single segment angle\n compounded: true,\n\n // optionally returns the midpoint of the angle instead of the full angle\n midpoint: false\n }, opts);\n\n var currValue = data[index].value;\n var fullValue;\n if (options.compounded) {\n fullValue = 0;\n\n // get all values up to and including the specified index\n for (var i=0; i<=index; i++) {\n fullValue += data[i].value;\n }\n }\n\n if (typeof fullValue === 'undefined') {\n fullValue = currValue;\n }\n\n // now convert the full value to an angle\n var angle = (fullValue / totalSize) * 360;\n\n // lastly, if we want the midpoint, factor that sucker in\n if (options.midpoint) {\n var currAngle = (currValue / totalSize) * 360;\n angle -= (currAngle / 2);\n }\n\n return angle;\n }\n\n};\n\n //// --------- text.js -----------\nvar text = {\n offscreenCoord: -10000,\n\n addTitle: function(pie) {\n pie.__title = pie.svg.selectAll(\".\" + pie.cssPrefix + \"title\")\n .data([pie.options.header.title])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"id\", pie.cssPrefix + \"title\")\n .attr(\"class\", pie.cssPrefix + \"title\")\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"text-anchor\", function() {\n var location;\n if (pie.options.header.location === \"top-center\" || pie.options.header.location === \"pie-center\") {\n location = \"middle\";\n } else {\n location = \"left\";\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionTitle: function(pie) {\n var textComponents = pie.textComponents;\n var headerLocation = pie.options.header.location;\n var canvasPadding = pie.options.misc.canvasPadding;\n var canvasWidth = pie.options.size.canvasWidth;\n var titleSubtitlePadding = pie.options.header.titleSubtitlePadding;\n\n var x;\n if (headerLocation === \"top-left\") {\n x = canvasPadding.left;\n } else {\n x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;\n }\n\n // add whatever offset has been added by user\n x += pie.options.misc.pieCenterOffset.x;\n\n var y = canvasPadding.top + textComponents.title.h;\n\n if (headerLocation === \"pie-center\") {\n y = pie.pieCenter.y;\n\n // still not fully correct\n if (textComponents.subtitle.exists) {\n var totalTitleHeight = textComponents.title.h + titleSubtitlePadding + textComponents.subtitle.h;\n y = y - (totalTitleHeight / 2) + textComponents.title.h;\n } else {\n y += (textComponents.title.h / 4);\n }\n }\n\n pie.__title\n .attr(\"x\", x)\n .attr(\"y\", y);\n },\n\n addSubtitle: function(pie) {\n var headerLocation = pie.options.header.location;\n\n pie.__subtitle = pie.svg.selectAll(\".\" + pie.cssPrefix + \"subtitle\")\n .data([pie.options.header.subtitle])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"id\", pie.cssPrefix + \"subtitle\")\n .attr(\"class\", pie.cssPrefix + \"subtitle\")\n .attr(\"text-anchor\", function() {\n var location;\n if (headerLocation === \"top-center\" || headerLocation === \"pie-center\") {\n location = \"middle\";\n } else {\n location = \"left\";\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionSubtitle: function(pie) {\n var canvasPadding = pie.options.misc.canvasPadding;\n var canvasWidth = pie.options.size.canvasWidth;\n\n var x;\n if (pie.options.header.location === \"top-left\") {\n x = canvasPadding.left;\n } else {\n x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;\n }\n\n // add whatever offset has been added by user\n x += pie.options.misc.pieCenterOffset.x;\n\n var y = text.getHeaderHeight(pie);\n\n pie.__subtitle\n .attr(\"x\", x)\n .attr(\"y\", y);\n },\n\n addFooter: function(pie) {\n pie.__footer = pie.svg.selectAll(\".\" + pie.cssPrefix + \"footer\")\n .data([pie.options.footer])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"id\", pie.cssPrefix + \"footer\")\n .attr(\"class\", pie.cssPrefix + \"footer\")\n .attr(\"text-anchor\", function() {\n var location = \"left\";\n if (pie.options.footer.location === \"bottom-center\") {\n location = \"middle\";\n } else if (pie.options.footer.location === \"bottom-right\") {\n location = \"left\"; // on purpose. We have to change the x-coord to make it properly right-aligned\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionFooter: function(pie) {\n var footerLocation = pie.options.footer.location;\n var footerWidth = pie.textComponents.footer.w;\n var canvasWidth = pie.options.size.canvasWidth;\n var canvasHeight = pie.options.size.canvasHeight;\n var canvasPadding = pie.options.misc.canvasPadding;\n\n var x;\n if (footerLocation === \"bottom-left\") {\n x = canvasPadding.left;\n } else if (footerLocation === \"bottom-right\") {\n x = canvasWidth - footerWidth - canvasPadding.right;\n } else {\n x = canvasWidth / 2; // TODO - shouldn't this also take into account padding?\n }\n\n pie.__footer\n .attr(\"x\", x)\n .attr(\"y\", canvasHeight - canvasPadding.bottom);\n },\n\n getHeaderHeight: function(pie) {\n var h;\n if (pie.textComponents.title.exists) {\n\n // if the subtitle isn't defined, it'll be set to 0\n var totalTitleHeight = pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;\n if (pie.options.header.location === \"pie-center\") {\n h = pie.pieCenter.y - (totalTitleHeight / 2) + totalTitleHeight;\n } else {\n h = totalTitleHeight + pie.options.misc.canvasPadding.top;\n }\n } else {\n if (pie.options.header.location === \"pie-center\") {\n var footerPlusPadding = pie.options.misc.canvasPadding.bottom + pie.textComponents.footer.h;\n h = ((pie.options.size.canvasHeight - footerPlusPadding) / 2) + pie.options.misc.canvasPadding.top + (pie.textComponents.subtitle.h / 2);\n } else {\n h = pie.options.misc.canvasPadding.top + pie.textComponents.subtitle.h;\n }\n }\n return h;\n }\n};\n\n //// --------- validate.js -----------\nvar tt = {\n addTooltips: function(pie) {\n\n // group the label groups (label, percentage, value) into a single element for simpler positioning\n var tooltips = pie.svg.insert(\"g\")\n .attr(\"class\", pie.cssPrefix + \"tooltips\");\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip\")\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"tooltip\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"tooltip\" + i; })\n .style(\"opacity\", 0)\n .append(\"rect\")\n .attr(\"rx\", pie.options.tooltips.styles.borderRadius)\n .attr(\"ry\", pie.options.tooltips.styles.borderRadius)\n .attr(\"x\", -pie.options.tooltips.styles.padding)\n .attr(\"opacity\", pie.options.tooltips.styles.backgroundOpacity)\n .style(\"fill\", pie.options.tooltips.styles.backgroundColor);\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip\")\n .data(pie.options.data.content)\n .append(\"text\")\n .attr(\"fill\", function(d) { return pie.options.tooltips.styles.color; })\n .style(\"font-size\", function(d) { return pie.options.tooltips.styles.fontSize; })\n .style(\"font-weight\", function(d) { return pie.options.tooltips.styles.fontWeight; })\n .style(\"font-family\", function(d) { return pie.options.tooltips.styles.font; })\n .text(function(d, i) {\n var caption = pie.options.tooltips.string;\n if (pie.options.tooltips.type === \"caption\") {\n caption = d.caption;\n }\n return tt.replacePlaceholders(pie, caption, i, {\n label: d.label,\n value: d.value,\n percentage: d.percentage\n });\n });\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip rect\")\n .attr(\"width\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return dims.w + (2 * pie.options.tooltips.styles.padding);\n })\n .attr(\"height\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return dims.h + (2 * pie.options.tooltips.styles.padding);\n })\n .attr(\"y\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return -(dims.h / 2) + 1;\n });\n },\n\n showTooltip: function(pie, index) {\n var fadeInSpeed = pie.options.tooltips.styles.fadeInSpeed;\n if (tt.currentTooltip === index) {\n fadeInSpeed = 1;\n }\n\n tt.currentTooltip = index;\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + index)\n .transition()\n .duration(fadeInSpeed)\n .style(\"opacity\", function() { return 1; });\n\n tt.moveTooltip(pie);\n },\n\n moveTooltip: function(pie) {\n d3.selectAll(\"#\" + pie.cssPrefix + \"tooltip\" + tt.currentTooltip)\n .attr(\"transform\", function(d) {\n var mouseCoords = d3.mouse(this.parentNode);\n var x = mouseCoords[0] + pie.options.tooltips.styles.padding + 2;\n var y = mouseCoords[1] - (2 * pie.options.tooltips.styles.padding) - 2;\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n hideTooltip: function(pie, index) {\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + index)\n .style(\"opacity\", function() { return 0; });\n\n // move the tooltip offscreen. This ensures that when the user next mouseovers the segment the hidden\n // element won't interfere\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + tt.currentTooltip)\n .attr(\"transform\", function(d, i) {\n // klutzy, but it accounts for tooltip padding which could push it onscreen\n var x = pie.options.size.canvasWidth + 1000;\n var y = pie.options.size.canvasHeight + 1000;\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n replacePlaceholders: function(pie, str, index, replacements) {\n\n // if the user has defined a placeholderParser function, call it before doing the replacements\n if (helpers.isFunction(pie.options.tooltips.placeholderParser)) {\n pie.options.tooltips.placeholderParser(index, replacements);\n }\n\n var replacer = function() {\n return function(match) {\n var placeholder = arguments[1];\n if (replacements.hasOwnProperty(placeholder)) {\n return replacements[arguments[1]];\n } else {\n return arguments[0];\n }\n };\n };\n return str.replace(/\\{(\\w+)\\}/g, replacer(replacements));\n }\n};\n\n\n // --------------------------------------------------------------------------------------------\n\n // our constructor\n var d3pie = function(element, options) {\n\n // element can be an ID or DOM element\n this.element = element;\n if (typeof element === \"string\") {\n var el = element.replace(/^#/, \"\"); // replace any jQuery-like ID hash char\n this.element = document.getElementById(el);\n }\n\n var opts = {};\n extend(true, opts, defaultSettings, options);\n this.options = opts;\n\n // if the user specified a custom CSS element prefix (ID, class), use it\n if (this.options.misc.cssPrefix !== null) {\n this.cssPrefix = this.options.misc.cssPrefix;\n } else {\n this.cssPrefix = \"p\" + _uniqueIDCounter + \"_\";\n _uniqueIDCounter++;\n }\n\n\n // now run some validation on the user-defined info\n if (!validate.initialCheck(this)) {\n return;\n }\n\n // add a data-role to the DOM node to let anyone know that it contains a d3pie instance, and the d3pie version\n d3.select(this.element).attr(_scriptName, _version);\n\n // things that are done once\n _setupData.call(this);\n _init.call(this);\n };\n\n d3pie.prototype.recreate = function() {\n // now run some validation on the user-defined info\n if (!validate.initialCheck(this)) {\n return;\n }\n\n _setupData.call(this);\n _init.call(this);\n };\n\n d3pie.prototype.redraw = function() {\n this.element.innerHTML = \"\";\n _init.call(this);\n };\n\n d3pie.prototype.destroy = function() {\n this.element.innerHTML = \"\"; // clear out the SVG\n d3.select(this.element).attr(_scriptName, null); // remove the data attr\n };\n\n /**\n * Returns all pertinent info about the current open info. Returns null if nothing's open, or if one is, an object of\n * the following form:\n * \t{\n * \t element: DOM NODE,\n * \t index: N,\n * \t data: {}\n * \t}\n */\n d3pie.prototype.getOpenSegment = function() {\n var segment = this.currentlyOpenSegment;\n if (segment !== null && typeof segment !== \"undefined\") {\n var index = parseInt(d3.select(segment).attr(\"data-index\"), 10);\n return {\n element: segment,\n index: index,\n data: this.options.data.content[index]\n };\n } else {\n return null;\n }\n };\n\n d3pie.prototype.openSegment = function(index) {\n index = parseInt(index, 10);\n if (index < 0 || index > this.options.data.content.length-1) {\n return;\n }\n segments.openSegment(this, d3.select(\"#\" + this.cssPrefix + \"segment\" + index).node());\n };\n\n d3pie.prototype.closeSegment = function() {\n segments.maybeCloseOpenSegment(this);\n };\n\n // this let's the user dynamically update aspects of the pie chart without causing a complete redraw. It\n // intelligently re-renders only the part of the pie that the user specifies. Some things cause a repaint, others\n // just redraw the single element\n d3pie.prototype.updateProp = function(propKey, value) {\n switch (propKey) {\n case \"header.title.text\":\n var oldVal = helpers.processObj(this.options, propKey);\n helpers.processObj(this.options, propKey, value);\n d3.select(\"#\" + this.cssPrefix + \"title\").html(value);\n if ((oldVal === \"\" && value !== \"\") || (oldVal !== \"\" && value === \"\")) {\n this.redraw();\n }\n break;\n\n case \"header.subtitle.text\":\n var oldValue = helpers.processObj(this.options, propKey);\n helpers.processObj(this.options, propKey, value);\n d3.select(\"#\" + this.cssPrefix + \"subtitle\").html(value);\n if ((oldValue === \"\" && value !== \"\") || (oldValue !== \"\" && value === \"\")) {\n this.redraw();\n }\n break;\n\n case \"callbacks.onload\":\n case \"callbacks.onMouseoverSegment\":\n case \"callbacks.onMouseoutSegment\":\n case \"callbacks.onClickSegment\":\n case \"effects.pullOutSegmentOnClick.effect\":\n case \"effects.pullOutSegmentOnClick.speed\":\n case \"effects.pullOutSegmentOnClick.size\":\n case \"effects.highlightSegmentOnMouseover\":\n case \"effects.highlightLuminosity\":\n helpers.processObj(this.options, propKey, value);\n break;\n\n // everything else, attempt to update it & do a repaint\n default:\n helpers.processObj(this.options, propKey, value);\n\n this.destroy();\n this.recreate();\n break;\n }\n };\n\n\n // ------------------------------------------------------------------------------------------------\n\n var _setupData = function () {\n this.options.data.content = math.sortPieData(this);\n if (this.options.data.smallSegmentGrouping.enabled) {\n this.options.data.content = helpers.applySmallSegmentGrouping(this.options.data.content, this.options.data.smallSegmentGrouping);\n }\n\n\n this.options.colors = helpers.initSegmentColors(this);\n this.totalSize = math.getTotalPieSize(this.options.data.content);\n\n var dp = this.options.labels.percentage.decimalPlaces;\n\n // add in percentage data to content\n for (var i=0; i<this.options.data.content.length; i++) {\n this.options.data.content[i].percentage = _getPercentage(this.options.data.content[i].value, this.totalSize, dp);\n }\n\n // adjust the final item to ensure the percentage always adds up to precisely 100%. This is necessary\n var totalPercentage = 0;\n for (var j=0; j<this.options.data.content.length; j++) {\n if (j === this.options.data.content.length - 1) {\n this.options.data.content[j].percentage = (100 - totalPercentage).toFixed(dp);\n }\n totalPercentage += parseFloat(this.options.data.content[j].percentage);\n }\n };\n\n var _init = function() {\n\n // prep-work\n this.svg = helpers.addSVGSpace(this);\n\n // store info about the main text components as part of the d3pie object instance\n this.textComponents = {\n headerHeight: 0,\n title: {\n exists: this.options.header.title.text !== \"\",\n h: 0,\n w: 0\n },\n subtitle: {\n exists: this.options.header.subtitle.text !== \"\",\n h: 0,\n w: 0\n },\n footer: {\n exists: this.options.footer.text !== \"\",\n h: 0,\n w: 0\n }\n };\n\n this.outerLabelGroupData = [];\n\n // add the key text components offscreen (title, subtitle, footer). We need to know their widths/heights for later computation\n if (this.textComponents.title.exists) text.addTitle(this);\n if (this.textComponents.subtitle.exists) text.addSubtitle(this);\n text.addFooter(this);\n\n // console.log(this);\n\n // the footer never moves. Put it in place now\n var self = this;\n text.positionFooter(self);\n var d3 = helpers.getDimensions(self.__footer.node());\n self.textComponents.footer.h = d3.h;\n self.textComponents.footer.w = d3.w;\n\n if (self.textComponents.title.exists) {\n var d1 = helpers.getDimensions(self.__title.node());\n self.textComponents.title.h = d1.h;\n self.textComponents.title.w = d1.w;\n }\n\n if (self.textComponents.subtitle.exists) {\n var d2 = helpers.getDimensions(self.__subtitle.node());\n self.textComponents.subtitle.h = d2.h;\n self.textComponents.subtitle.w = d2.w;\n }\n\n // now compute the full header height\n if (self.textComponents.title.exists || self.textComponents.subtitle.exists) {\n var headerHeight = 0;\n if (self.textComponents.title.exists) {\n headerHeight += self.textComponents.title.h;\n if (self.textComponents.subtitle.exists) {\n headerHeight += self.options.header.titleSubtitlePadding;\n }\n }\n if (self.textComponents.subtitle.exists) {\n headerHeight += self.textComponents.subtitle.h;\n }\n self.textComponents.headerHeight = headerHeight;\n }\n\n // at this point, all main text component dimensions have been calculated\n math.computePieRadius(self);\n\n // this value is used all over the place for placing things and calculating locations. We figure it out ONCE\n // and store it as part of the object\n math.calculatePieCenter(self);\n\n // position the title and subtitle\n text.positionTitle(self);\n text.positionSubtitle(self);\n\n // now create the pie chart segments, and gradients if the user desired\n if (self.options.misc.gradient.enabled) {\n segments.addGradients(self);\n }\n segments.create(self); // also creates this.arc\n\n self.__labels = {};\n labels.add(self, \"inner\", self.options.labels.inner.format);\n labels.add(self, \"outer\", self.options.labels.outer.format);\n\n // position the label elements relatively within their individual group (label, percentage, value)\n labels.positionLabelElements(self, \"inner\", self.options.labels.inner.format);\n labels.positionLabelElements(self, \"outer\", self.options.labels.outer.format);\n labels.computeOuterLabelCoords(self);\n\n // this is (and should be) dumb. It just places the outer groups at their calculated, collision-free positions\n labels.positionLabelGroups(self, \"outer\");\n\n // we use the label line positions for many other calculations, so ALWAYS compute them\n labels.computeLabelLinePositions(self);\n\n // only add them if they're actually enabled\n if (self.options.labels.lines.enabled && self.options.labels.outer.format !== \"none\") {\n labels.addLabelLines(self);\n }\n\n labels.positionLabelGroups(self, \"inner\");\n\n if (helpers.isFunction(self.options.callbacks.onload)) {\n try {\n self.options.callbacks.onload();\n } catch (e) { }\n }\n\n // add and position the tooltips\n if (self.options.tooltips.enabled) {\n tt.addTooltips(self);\n }\n\n segments.addSegmentEventHandlers(self);\n };\n\n var _getPercentage = function(value, total, decimalPlaces) {\n var relativeAmount = value / total;\n if (decimalPlaces <= 0) {\n return Math.round(relativeAmount * 100);\n } else {\n return (relativeAmount * 100).toFixed(decimalPlaces);\n }\n };\n\n return d3pie;\n}));\n","export * from \"./types\"\n\nexport { default as CloudConnectionStatus } from \"./cloud-connection-status\"\n","import {\n concat, cond, equals, identity, last, pipe, T,\n} from \"ramda\"\n\nimport { isDevelopmentEnv, isMainJs, isTestingEnv } from \"utils/env\"\n\n\n// this part needs to be static and should run immediately because otherwise document.currentScript\n// will be null\nconst currentScript = isTestingEnv\n ? { src: \"http://localhost:3000/some-script.js\" } // test env doesn't have document.currentScript\n : document.currentScript\n\nconst getScriptSource = () => {\n // logic based on old dashboard\n\n // http://stackoverflow.com/questions/984510/what-is-my-script-src-url\n // http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url\n const script = cond([\n [Boolean, identity],\n // \"last\" typings don't work well with HTMLScriptElement\n // if document.currentScript is not available\n [T, () => last(document.getElementsByTagName(\"script\") as unknown as [HTMLScriptElement])],\n ])(currentScript)\n\n return script.src\n}\n\nexport const getPathFromScriptSource = (source: string) => {\n // match strings not containing slash, ending with `.js`, with optional suffix started by `?`\n const jsFilenameRegex = \"[^\\\\/]*\\\\.js(\\\\/?.*)?$\"\n const staticJsPath = \"/static/js\"\n return source.replace(new RegExp(jsFilenameRegex), \"\")\n .replace(staticJsPath, \"\")\n}\n\nconst getDefaultServer = () => {\n if (isDevelopmentEnv) {\n return \"http://localhost:19999\"\n }\n\n // Agent Dashboard does not need sophisticated server-detection, which is causing problems\n // when navigating through streamed nodes. Let's overwrite that setting\n if (isMainJs) {\n const pathname = window.location.pathname\n .replace(\"index.html\", \"\")\n // todo consider .replace(/[^\\/]*\\.html/, \"\") (every .html file in the url)\n .replace(\"default.html\", \"\") // for netdata demo servers\n return window.location.origin + pathname.replace(/\\/v1\\/?$/, \"\")\n }\n\n const source = getScriptSource()\n return getPathFromScriptSource(source).replace(/\\/v1\\/?$/, \"\")\n}\n\n// append \"/\" at the end, if it's not already there\nexport const alwaysEndWithSlash = cond([\n [pipe(last, equals(\"/\")), identity],\n [T, (x: string) => concat(x, \"/\")], // R.__ typings don't work well\n])\n\nexport const serverDefault: string = alwaysEndWithSlash(\n window.netdataServer || getDefaultServer(),\n)\n\nexport const serverStatic: string = isDevelopmentEnv\n ? \"/\" // for localhost:3000/css/...\n : alwaysEndWithSlash(getDefaultServer()) // by default, load from netdata server\n","import { prop } from \"ramda\"\nimport { createSelector } from \"reselect\"\n\nimport { AppStateT } from \"store/app-state\"\nimport { selectChartMetadataFromChartsCall } from \"domains/global/selectors\"\n\nimport { ChartState } from \"./chart-types\"\nimport { initialSingleState } from \"./reducer\"\nimport { storeKey } from \"./constants\"\n\nexport const selectChartsState = (state: AppStateT) => state[storeKey]\nexport const selectSingleChartState = createSelector(\n selectChartsState,\n (_: unknown, { id }: { chartId?: string, id: string }) => id,\n (chartsState, id) => chartsState[id] || initialSingleState,\n)\n\nexport const selectChartData = createSelector(\n selectSingleChartState,\n (chartState) => chartState.chartData,\n)\n\nconst selectChartMetadataFromExplicitCall = createSelector(\n selectSingleChartState, prop(\"chartMetadata\"),\n)\n// dashboard.js normally fetches metadata for every individual charts, but we can prevent it\n// if metadata for ALL charts will be present in state.global (from single call)\nconst selectChartMetadata = createSelector(\n selectChartMetadataFromChartsCall,\n selectChartMetadataFromExplicitCall,\n (metadataFromAll, metadataFromSingleCall) => metadataFromAll || metadataFromSingleCall,\n)\nconst selectIsFetchingDetails = createSelector(selectSingleChartState, prop(\"isFetchingDetails\"))\n\nexport const makeSelectChartMetadataRequest = () => createSelector(\n selectChartMetadata,\n selectIsFetchingDetails,\n (chartMetadata, isFetchingDetails) => ({ chartMetadata, isFetchingDetails }),\n)\n\nexport const selectChartViewRange = createSelector(\n selectSingleChartState,\n (chartState) => chartState.viewRange,\n)\n\nexport const selectChartIsFetchingData = createSelector(\n selectSingleChartState,\n (chartState) => chartState.isFetchingData,\n)\n\nexport const selectChartFetchDataParams = createSelector(\n selectSingleChartState,\n (chartState) => chartState.fetchDataParams,\n)\n\nexport const selectResizeHeight = createSelector(\n selectSingleChartState,\n (chartState) => chartState.resizeHeight,\n)\n\nexport const selectChartPanAndZoom = createSelector(selectSingleChartState, prop(\"chartPanAndZoom\"))\n\n// count the nr of \"success\" or \"failure\" charts\nconst hasCompletedFetching = (chartState: ChartState) => chartState.isFetchDataFailure\n || Boolean(chartState.chartData) || chartState.isFetchDetailsFailure\n\nexport const selectChartsAreFetching = createSelector(selectChartsState, chartsState =>\n Object.values(chartsState).some(({ isFetchingData }) => isFetchingData)\n)\n\nexport const selectAmountOfFetchedCharts = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (hasCompletedFetching(chartState) ? 1 : 0), 0),\n)\n\nexport const selectAmountOfCharts = createSelector(\n selectChartsState,\n (chartsState) => Object.keys(chartsState).length,\n)\n\nexport const selectNameOfAnyFetchingChart = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .find((chartState) => chartState.isFetchingData)?.chartId,\n)\n\nexport const selectAmountOfSnapshotsFetched = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (chartState.snapshotData ? 1 : 0), 0),\n)\n\nexport const selectAmountOfSnapshotsFailed = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (chartState.snapshotDataIsError ? 1 : 0), 0),\n)\n","import { useMemo } from \"react\"\nimport moment from \"moment\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectTimezoneSetting, selectUTCOffsetSetting } from \"domains/global/selectors\"\n\nconst zeropad = x => {\n if (x > -10 && x < 10) {\n return `0${x.toString()}`\n }\n return x.toString()\n}\n\nexport const isSupportingDateTimeFormat = !!(Intl && Intl.DateTimeFormat && navigator.language)\n\nconst narrowToDate = d => (typeof d === \"number\" ? new Date(d) : d)\n// these are the old netdata functions\n// we fallback to these, if the new ones fail\nexport const localeDateStringNative = d => narrowToDate(d).toLocaleDateString()\nexport const localeTimeStringNative = d => narrowToDate(d).toLocaleTimeString()\nexport const xAxisTimeStringNative = d => {\n const date = narrowToDate(d)\n return `${zeropad(date.getHours())}:${zeropad(date.getMinutes())}:${zeropad(date.getSeconds())}`\n}\n\nexport const isProperTimezone = timeZone => {\n try {\n Intl.DateTimeFormat(navigator.language, {\n localeMatcher: \"best fit\",\n formatMatcher: \"best fit\",\n weekday: \"short\",\n year: \"numeric\",\n month: \"short\",\n day: \"2-digit\",\n timeZone,\n })\n } catch (e) {\n return false\n }\n return true\n}\n\nexport const getDateWithOffset = (date, offset) => moment(date).utcOffset(offset)\n\nconst getOptions = ({ long, isTime, secs, timezone }) => ({\n hourCycle: \"h23\",\n ...(isTime\n ? {}\n : long\n ? { weekday: \"short\", year: \"numeric\", month: \"short\", day: \"2-digit\" }\n : { dateStyle: \"short\" }),\n ...(isTime && {\n timeStyle: secs ? \"medium\" : \"short\",\n }),\n timeZone: timezone,\n})\n\nconst dateFormat = (date, { locale, ...options }) =>\n new Intl.DateTimeFormat(locale ?? navigator.language, getOptions(options)).format(date)\n\nconst getTimezone = timezone => (timezone !== \"\" && timezone !== \"default\" ? timezone : undefined)\n\nexport const useDateTime = () => {\n const timezone = useSelector(selectTimezoneSetting)\n const utcOffset = useSelector(selectUTCOffsetSetting)\n\n const localeDateString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? (date, options) =>\n dateFormat(date, { long: true, timezone: getTimezone(timezone), ...options })\n : localeDateStringNative\n }, [timezone])\n\n const localeTimeString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? (date, options) =>\n dateFormat(date, {\n secs: true,\n isTime: true,\n timezone: getTimezone(timezone),\n ...options,\n })\n : localeTimeStringNative\n }, [timezone])\n\n const xAxisTimeString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? date => dateFormat(date, { secs: true, isTime: true, timezone: getTimezone(timezone) })\n : xAxisTimeStringNative\n }, [timezone])\n\n const xAxisDateString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? date => dateFormat(date, { long: true, timezone: getTimezone(timezone) })\n : xAxisTimeStringNative\n }, [timezone])\n\n return {\n localeDateString,\n localeTimeString,\n xAxisDateString,\n xAxisTimeString,\n utcOffset,\n }\n}\n","import { createAction } from \"redux-act\"\nimport { ChartsMetadata } from \"domains/global/types\"\n\nimport { storeKey } from \"./constants\"\n\nexport interface startSnapshotModeAction {\n charts: ChartsMetadata\n dataPoints: number\n}\nexport const startSnapshotModeAction = createAction<startSnapshotModeAction>(\n `${storeKey}/isSnapshotModeAction`,\n)\n\nexport const stopSnapshotModeAction = createAction(`${storeKey}/stopSnapshotModeAction`)\n\nexport interface ShowSignInModalAction { signInLinkHref: string }\nexport const showSignInModalAction = createAction<ShowSignInModalAction>(\n `${storeKey}/showSignInModal`,\n)\n\nexport const explicitlySignInAction = createAction(`${storeKey}/explicitlySignIn`)\n\nexport interface IsSignedInAction { isSignedIn: boolean }\nexport const isSignedInAction = createAction<IsSignedInAction>(`${storeKey}/isSignedInAction`)\n\nexport interface SetOfflineAction { offline: boolean }\nexport const setOfflineAction = createAction<SetOfflineAction>(`${storeKey}/setOfflineAction`)\n","export const storeKey = \"chart\"\n\nexport const fallbackUpdateTimeInterval = 2000\n\n// corresponds to force_update_at in old dashboard\n// throttle time between use globalPanAndZoom change actions - and requests sent to server\nexport const panAndZoomDelay = 300\n","export const seconds4human = (\n totalSeconds: number | string, overrideOptions?: {[key: string]: string},\n) => {\n const defaultOptions: {[key: string]: string} = {\n now: \"now\",\n space: \" \",\n negative_suffix: \"ago\",\n day: \"day\",\n days: \"days\",\n hour: \"hour\",\n hours: \"hours\",\n minute: \"min\",\n minutes: \"mins\",\n second: \"sec\",\n seconds: \"secs\",\n and: \"and\",\n }\n\n const options = typeof overrideOptions === \"object\"\n ? { ...defaultOptions, ...overrideOptions }\n : defaultOptions\n\n let seconds = typeof totalSeconds === \"string\"\n ? parseInt(totalSeconds, 10)\n : totalSeconds\n\n if (seconds === 0) {\n return options.now\n }\n\n let suffix = \"\"\n if (seconds < 0) {\n seconds = -seconds\n if (options.negative_suffix !== \"\") {\n suffix = options.space + options.negative_suffix\n }\n }\n\n const days = Math.floor(seconds / 86400)\n seconds -= (days * 86400)\n\n const hours = Math.floor(seconds / 3600)\n seconds -= (hours * 3600)\n\n const minutes = Math.floor(seconds / 60)\n seconds -= (minutes * 60)\n\n const strings = []\n\n if (days > 1) {\n strings.push(days.toString() + options.space + options.days)\n } else if (days === 1) {\n strings.push(days.toString() + options.space + options.day)\n }\n\n if (hours > 1) {\n strings.push(hours.toString() + options.space + options.hours)\n } else if (hours === 1) {\n strings.push(hours.toString() + options.space + options.hour)\n }\n\n if (minutes > 1) {\n strings.push(minutes.toString() + options.space + options.minutes)\n } else if (minutes === 1) {\n strings.push(minutes.toString() + options.space + options.minute)\n }\n\n if (seconds > 1) {\n strings.push(Math.floor(seconds).toString() + options.space + options.seconds)\n } else if (seconds === 1) {\n strings.push(Math.floor(seconds).toString() + options.space + options.second)\n }\n\n if (strings.length === 1) {\n return strings.pop() + suffix\n }\n\n const last = strings.pop()\n return `${strings.join(\", \")} ${options.and} ${last}${suffix}`\n}\n","/*! @license Copyright 2017 Dan Vanderkam (danvdk@gmail.com) MIT-licensed (http://opensource.org/licenses/MIT) */\n// SPDX-License-Identifier: MIT\n!function(t){if(\"object\"==typeof exports&&\"undefined\"!=typeof module)module.exports=t();else if(\"function\"==typeof define&&define.amd)define([],t);else{var e;e=\"undefined\"!=typeof window?window:\"undefined\"!=typeof global?global:\"undefined\"!=typeof self?self:this,e.Dygraph=t()}}(function(){return function t(e,a,i){function n(o,s){if(!a[o]){if(!e[o]){var l=\"function\"==typeof require&&require;if(!s&&l)return l(o,!0);if(r)return r(o,!0);var h=new Error(\"Cannot find module '\"+o+\"'\");throw h.code=\"MODULE_NOT_FOUND\",h}var u=a[o]={exports:{}};e[o][0].call(u.exports,function(t){var a=e[o][1][t];return n(a||t)},u,u.exports,t,e,a,i)}return a[o].exports}for(var r=\"function\"==typeof require&&require,o=0;o<i.length;o++)n(i[o]);return n}({1:[function(t,e,a){function i(){throw new Error(\"setTimeout has not been defined\")}function n(){throw new Error(\"clearTimeout has not been defined\")}function r(t){if(d===setTimeout)return setTimeout(t,0);if((d===i||!d)&&setTimeout)return d=setTimeout,setTimeout(t,0);try{return d(t,0)}catch(e){try{return d.call(null,t,0)}catch(e){return d.call(this,t,0)}}}function o(t){if(c===clearTimeout)return clearTimeout(t);if((c===n||!c)&&clearTimeout)return c=clearTimeout,clearTimeout(t);try{return c(t)}catch(e){try{return c.call(null,t)}catch(e){return c.call(this,t)}}}function s(){v&&g&&(v=!1,g.length?f=g.concat(f):_=-1,f.length&&l())}function l(){if(!v){var t=r(s);v=!0;for(var e=f.length;e;){for(g=f,f=[];++_<e;)g&&g[_].run();_=-1,e=f.length}g=null,v=!1,o(t)}}function h(t,e){this.fun=t,this.array=e}function u(){}var d,c,p=e.exports={};!function(){try{d=\"function\"==typeof setTimeout?setTimeout:i}catch(t){d=i}try{c=\"function\"==typeof clearTimeout?clearTimeout:n}catch(t){c=n}}();var g,f=[],v=!1,_=-1;p.nextTick=function(t){var e=new Array(arguments.length-1);if(arguments.length>1)for(var a=1;a<arguments.length;a++)e[a-1]=arguments[a];f.push(new h(t,e)),1!==f.length||v||r(l)},h.prototype.run=function(){this.fun.apply(null,this.array)},p.title=\"browser\",p.browser=!0,p.env={},p.argv=[],p.version=\"\",p.versions={},p.on=u,p.addListener=u,p.once=u,p.off=u,p.removeListener=u,p.removeAllListeners=u,p.emit=u,p.prependListener=u,p.prependOnceListener=u,p.listeners=function(t){return[]},p.binding=function(t){throw new Error(\"process.binding is not supported\")},p.cwd=function(){return\"/\"},p.chdir=function(t){throw new Error(\"process.chdir is not supported\")},p.umask=function(){return 0}},{}],2:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./bars\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o=[],s=a.get(\"logscale\"),l=0;l<t.length;l++)i=t[l][0],r=t[l][e],s&&null!==r&&(r[0]<=0||r[1]<=0||r[2]<=0)&&(r=null),null!==r?(n=r[1],null===n||isNaN(n)?o.push([i,n,[n,n]]):o.push([i,n,[r[0],r[2]]])):o.push([i,null,[null,null]]);return o},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l,h,u=[];for(n=0,o=0,r=0,s=0,l=0;l<t.length;l++){if(i=t[l][1],h=t[l][2],u[l]=t[l],null===i||isNaN(i)||(n+=h[0],o+=i,r+=h[1],s+=1),l-e>=0){var d=t[l-e];null===d[1]||isNaN(d[1])||(n-=d[2][0],o-=d[1],r-=d[2][1],s-=1)}u[l]=s?[t[l][0],1*o/s,[1*n/s,1*r/s]]:[t[l][0],null,[null,null]]}return u},a.default=r,e.exports=a.default},{\"./bars\":5}],3:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./bars\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s=[],l=a.get(\"sigma\"),h=a.get(\"logscale\"),u=0;u<t.length;u++)i=t[u][0],o=t[u][e],h&&null!==o&&(o[0]<=0||o[0]-l*o[1]<=0)&&(o=null),null!==o?(n=o[0],null===n||isNaN(n)?s.push([i,n,[n,n,n]]):(r=l*o[1],s.push([i,n,[n-r,n+r,o[1]]]))):s.push([i,null,[null,null,null]]);return s},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l,h,u,d,c=[],p=a.get(\"sigma\");for(i=0;i<t.length;i++){for(s=0,u=0,l=0,n=Math.max(0,i-e+1);n<i+1;n++)null===(r=t[n][1])||isNaN(r)||(l++,s+=r,u+=Math.pow(t[n][2][2],2));l?(h=Math.sqrt(u)/l,d=s/l,c[i]=[t[i][0],d,[d-p*h,d+p*h]]):(o=1==e?t[i][1]:null,c[i]=[t[i][0],o,[o,o]])}return c},a.default=r,e.exports=a.default},{\"./bars\":5}],4:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./bars\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s,l,h,u,d=[],c=a.get(\"sigma\"),p=a.get(\"logscale\"),g=0;g<t.length;g++)i=t[g][0],r=t[g][e],p&&null!==r&&(r[0]<=0||r[1]<=0)&&(r=null),null!==r?(o=r[0],s=r[1],null===o||isNaN(o)?d.push([i,o,[o,o,o,s]]):(l=s?o/s:0,h=s?c*Math.sqrt(l*(1-l)/s):1,u=100*h,n=100*l,d.push([i,n,[n-u,n+u,o,s]]))):d.push([i,null,[null,null,null,null]]);return d},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s=[],l=a.get(\"sigma\"),h=a.get(\"wilsonInterval\"),u=0,d=0;for(r=0;r<t.length;r++){u+=t[r][2][2],d+=t[r][2][3],r-e>=0&&(u-=t[r-e][2][2],d-=t[r-e][2][3]);var c=t[r][0],p=d?u/d:0;if(h)if(d){var g=p<0?0:p,f=d,v=l*Math.sqrt(g*(1-g)/f+l*l/(4*f*f)),_=1+l*l/d;i=(g+l*l/(2*d)-v)/_,n=(g+l*l/(2*d)+v)/_,s[r]=[c,100*g,[100*i,100*n]]}else s[r]=[c,0,[0,0]];else o=d?l*Math.sqrt(p*(1-p)/d):1,s[r]=[c,100*p,[100*(p-o),100*(p+o)]]}return s},a.default=r,e.exports=a.default},{\"./bars\":5}],5:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"./datahandler\"),r=i(n),o=t(\"../dygraph-layout\"),s=i(o),l=function(){r.default.call(this)};l.prototype=new r.default,l.prototype.extractSeries=function(t,e,a){},l.prototype.rollingAverage=function(t,e,a){},l.prototype.onPointsCreated_=function(t,e){for(var a=0;a<t.length;++a){var i=t[a],n=e[a];n.y_top=NaN,n.y_bottom=NaN,n.yval_minus=r.default.parseFloat(i[2][0]),n.yval_plus=r.default.parseFloat(i[2][1])}},l.prototype.getExtremeYValues=function(t,e,a){for(var i,n=null,r=null,o=t.length-1,s=0;s<=o;s++)if(null!==(i=t[s][1])&&!isNaN(i)){var l=t[s][2][0],h=t[s][2][1];l>i&&(l=i),h<i&&(h=i),(null===r||h>r)&&(r=h),(null===n||l<n)&&(n=l)}return[n,r]},l.prototype.onLineEvaluated=function(t,e,a){for(var i,n=0;n<t.length;n++)i=t[n],i.y_top=s.default.calcYNormal_(e,i.yval_minus,a),i.y_bottom=s.default.calcYNormal_(e,i.yval_plus,a)},a.default=l,e.exports=a.default},{\"../dygraph-layout\":13,\"./datahandler\":6}],6:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=function(){},n=i;n.X=0,n.Y=1,n.EXTRAS=2,n.prototype.extractSeries=function(t,e,a){},n.prototype.seriesToPoints=function(t,e,a){for(var i=[],r=0;r<t.length;++r){var o=t[r],s=o[1],l=null===s?null:n.parseFloat(s),h={x:NaN,y:NaN,xval:n.parseFloat(o[0]),yval:l,name:e,idx:r+a,canvasx:NaN,canvasy:NaN};i.push(h)}return this.onPointsCreated_(t,i),i},n.prototype.onPointsCreated_=function(t,e){},n.prototype.rollingAverage=function(t,e,a){},n.prototype.getExtremeYValues=function(t,e,a){},n.prototype.onLineEvaluated=function(t,e,a){},n.parseFloat=function(t){return null===t?NaN:t},a.default=i,e.exports=a.default},{}],7:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"./datahandler\"),r=(i(n),t(\"./default\")),o=i(r),s=function(){};s.prototype=new o.default,s.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s,l,h=[],u=a.get(\"logscale\"),d=0;d<t.length;d++)i=t[d][0],r=t[d][e],u&&null!==r&&(r[0]<=0||r[1]<=0)&&(r=null),null!==r?(o=r[0],s=r[1],null===o||isNaN(o)?h.push([i,o,[o,s]]):(l=s?o/s:0,n=100*l,h.push([i,n,[o,s]]))):h.push([i,null,[null,null]]);return h},s.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n=[],r=0,o=0;for(i=0;i<t.length;i++){r+=t[i][2][0],o+=t[i][2][1],i-e>=0&&(r-=t[i-e][2][0],o-=t[i-e][2][1]);var s=t[i][0],l=o?r/o:0;n[i]=[s,100*l]}return n},a.default=s,e.exports=a.default},{\"./datahandler\":6,\"./default\":8}],8:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./datahandler\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i=[],n=a.get(\"logscale\"),r=0;r<t.length;r++){var o=t[r][0],s=t[r][e];n&&s<=0&&(s=null),i.push([o,s])}return i},r.prototype.rollingAverage=function(t,e,a){e=Math.min(e,t.length);var i,n,r,o,s,l=[];if(1==e)return t;for(i=0;i<t.length;i++){for(o=0,s=0,n=Math.max(0,i-e+1);n<i+1;n++)null===(r=t[n][1])||isNaN(r)||(s++,o+=t[n][1]);l[i]=s?[t[i][0],o/s]:[t[i][0],null]}return l},r.prototype.getExtremeYValues=function(t,e,a){for(var i,n=null,r=null,o=t.length-1,s=0;s<=o;s++)null===(i=t[s][1])||isNaN(i)||((null===r||i>r)&&(r=i),(null===n||i<n)&&(n=i));return[n,r]},a.default=r,e.exports=a.default},{\"./datahandler\":6}],9:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=t(\"./dygraph\"),o=function(t){return t&&t.__esModule?t:{default:t}}(r),s=function(t,e,a,i){if(this.dygraph_=t,this.layout=i,this.element=e,this.elementContext=a,this.height=t.height_,this.width=t.width_,!n.isCanvasSupported(this.element))throw\"Canvas is not supported.\";this.area=i.getPlotArea();var r=this.dygraph_.canvas_ctx_;r.beginPath(),r.rect(this.area.x,this.area.y,this.area.w,this.area.h),r.clip(),r=this.dygraph_.hidden_ctx_,r.beginPath(),r.rect(this.area.x,this.area.y,this.area.w,this.area.h),r.clip()};s.prototype.clear=function(){this.elementContext.clearRect(0,0,this.width,this.height)},s.prototype.render=function(){this._updatePoints(),this._renderLineChart()},s._getIteratorPredicate=function(t){return t?s._predicateThatSkipsEmptyPoints:null},s._predicateThatSkipsEmptyPoints=function(t,e){return null!==t[e].yval},s._drawStyledLine=function(t,e,a,i,r,o,l){var h=t.dygraph,u=h.getBooleanOption(\"stepPlot\",t.setName);n.isArrayLike(i)||(i=null);var d=h.getBooleanOption(\"drawGapEdgePoints\",t.setName),c=t.points,p=t.setName,g=n.createIterator(c,0,c.length,s._getIteratorPredicate(h.getBooleanOption(\"connectSeparatedPoints\",p))),f=i&&i.length>=2,v=t.drawingContext;v.save(),f&&v.setLineDash&&v.setLineDash(i);var _=s._drawSeries(t,g,a,l,r,d,u,e);s._drawPointsOnLine(t,_,o,e,l),f&&v.setLineDash&&v.setLineDash([]),v.restore()},s._drawSeries=function(t,e,a,i,n,r,o,s){var l,h,u=null,d=null,c=null,p=[],g=!0,f=t.drawingContext;f.beginPath(),f.strokeStyle=s,f.lineWidth=a;for(var v=e.array_,_=e.end_,y=e.predicate_,x=e.start_;x<_;x++){if(h=v[x],y){for(;x<_&&!y(v,x);)x++;if(x==_)break;h=v[x]}if(null===h.canvasy||h.canvasy!=h.canvasy)o&&null!==u&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),u=d=null;else{if(l=!1,r||null===u){e.nextIdx_=x,e.next(),c=e.hasNext?e.peek.canvasy:null;var m=null===c||c!=c;l=null===u&&m,r&&(!g&&null===u||e.hasNext&&m)&&(l=!0)}null!==u?a&&(o&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),f.lineTo(h.canvasx,h.canvasy)):f.moveTo(h.canvasx,h.canvasy),(n||l)&&p.push([h.canvasx,h.canvasy,h.idx]),u=h.canvasx,d=h.canvasy}g=!1}return f.stroke(),p},s._drawPointsOnLine=function(t,e,a,i,n){for(var r=t.drawingContext,o=0;o<e.length;o++){var s=e[o];r.save(),a.call(t.dygraph,t.dygraph,t.setName,r,s[0],s[1],i,n,s[2]),r.restore()}},s.prototype._updatePoints=function(){for(var t=this.layout.points,e=t.length;e--;)for(var a=t[e],i=a.length;i--;){var n=a[i];n.canvasx=this.area.w*n.x+this.area.x,n.canvasy=this.area.h*n.y+this.area.y}},s.prototype._renderLineChart=function(t,e){var a,i,r=e||this.elementContext,o=this.layout.points,s=this.layout.setNames;this.colors=this.dygraph_.colorsMap_;var l=this.dygraph_.getOption(\"plotter\"),h=l;n.isArrayLike(h)||(h=[h]);var u={};for(a=0;a<s.length;a++){i=s[a];var d=this.dygraph_.getOption(\"plotter\",i);d!=l&&(u[i]=d)}for(a=0;a<h.length;a++)for(var c=h[a],p=a==h.length-1,g=0;g<o.length;g++)if(i=s[g],!t||i==t){var f=o[g],v=c;if(i in u){if(!p)continue;v=u[i]}var _=this.colors[i],y=this.dygraph_.getOption(\"strokeWidth\",i);r.save(),r.strokeStyle=_,r.lineWidth=y,v({points:f,setName:i,drawingContext:r,color:_,strokeWidth:y,dygraph:this.dygraph_,axis:this.dygraph_.axisPropertiesForSeries(i),plotArea:this.area,seriesIndex:g,seriesCount:o.length,singleSeriesName:t,allSeriesPoints:o}),r.restore()}},s._Plotters={linePlotter:function(t){s._linePlotter(t)},fillPlotter:function(t){s._fillPlotter(t)},errorPlotter:function(t){s._errorPlotter(t)}},s._linePlotter=function(t){var e=t.dygraph,a=t.setName,i=t.strokeWidth,r=e.getNumericOption(\"strokeBorderWidth\",a),o=e.getOption(\"drawPointCallback\",a)||n.Circles.DEFAULT,l=e.getOption(\"strokePattern\",a),h=e.getBooleanOption(\"drawPoints\",a),u=e.getNumericOption(\"pointSize\",a);r&&i&&s._drawStyledLine(t,e.getOption(\"strokeBorderColor\",a),i+2*r,l,h,o,u),s._drawStyledLine(t,t.color,i,l,h,o,u)},s._errorPlotter=function(t){var e=t.dygraph,a=t.setName;if(e.getBooleanOption(\"errorBars\")||e.getBooleanOption(\"customBars\")){e.getBooleanOption(\"fillGraph\",a)&&console.warn(\"Can't use fillGraph option with error bars\");var i,r=t.drawingContext,o=t.color,l=e.getNumericOption(\"fillAlpha\",a),h=e.getBooleanOption(\"stepPlot\",a),u=t.points,d=n.createIterator(u,0,u.length,s._getIteratorPredicate(e.getBooleanOption(\"connectSeparatedPoints\",a))),c=NaN,p=NaN,g=[-1,-1],f=n.toRGB_(o),v=\"rgba(\"+f.r+\",\"+f.g+\",\"+f.b+\",\"+l+\")\";r.fillStyle=v,r.beginPath();for(var _=function(t){return null===t||void 0===t||isNaN(t)};d.hasNext;){var y=d.next();!h&&_(y.y)||h&&!isNaN(p)&&_(p)?c=NaN:(i=[y.y_bottom,y.y_top],h&&(p=y.y),isNaN(i[0])&&(i[0]=y.y),isNaN(i[1])&&(i[1]=y.y),i[0]=t.plotArea.h*i[0]+t.plotArea.y,i[1]=t.plotArea.h*i[1]+t.plotArea.y,isNaN(c)||(h?(r.moveTo(c,g[0]),r.lineTo(y.canvasx,g[0]),r.lineTo(y.canvasx,g[1])):(r.moveTo(c,g[0]),r.lineTo(y.canvasx,i[0]),r.lineTo(y.canvasx,i[1])),r.lineTo(c,g[1]),r.closePath()),g=i,c=y.canvasx)}r.fill()}},s._fastCanvasProxy=function(t){var e=[],a=null,i=null,n=0,r=function(t){if(!(e.length<=1)){for(var a=e.length-1;a>0;a--){var i=e[a];if(2==i[0]){var n=e[a-1];n[1]==i[1]&&n[2]==i[2]&&e.splice(a,1)}}for(var a=0;a<e.length-1;){var i=e[a];2==i[0]&&2==e[a+1][0]?e.splice(a,1):a++}if(e.length>2&&!t){var r=0;2==e[0][0]&&r++;for(var o=null,s=null,a=r;a<e.length;a++){var i=e[a];if(1==i[0])if(null===o&&null===s)o=a,s=a;else{var l=i[2];l<e[o][2]?o=a:l>e[s][2]&&(s=a)}}var h=e[o],u=e[s];e.splice(r,e.length-r),o<s?(e.push(h),e.push(u)):o>s?(e.push(u),e.push(h)):e.push(h)}}},o=function(a){r(a);for(var o=0,s=e.length;o<s;o++){var l=e[o];1==l[0]?t.lineTo(l[1],l[2]):2==l[0]&&t.moveTo(l[1],l[2])}e.length&&(i=e[e.length-1][1]),n+=e.length,e=[]},s=function(t,n,r){var s=Math.round(n);if(null===a||s!=a){var l=a-i>1,h=s-a>1;o(l||h),a=s}e.push([t,n,r])};return{moveTo:function(t,e){s(2,t,e)},lineTo:function(t,e){s(1,t,e)},stroke:function(){o(!0),t.stroke()},fill:function(){o(!0),t.fill()},beginPath:function(){o(!0),t.beginPath()},closePath:function(){o(!0),t.closePath()},_count:function(){return n}}},s._fillPlotter=function(t){if(!t.singleSeriesName&&0===t.seriesIndex){for(var e=t.dygraph,a=e.getLabels().slice(1),i=a.length;i>=0;i--)e.visibility()[i]||a.splice(i,1);if(function(){for(var t=0;t<a.length;t++)if(e.getBooleanOption(\"fillGraph\",a[t]))return!0;return!1}())for(var r,l,h=t.plotArea,u=t.allSeriesPoints,d=u.length,c=e.getBooleanOption(\"stackedGraph\"),p=e.getColors(),g={},f=function(t,e,a,i){if(t.lineTo(e,a),c)for(var n=i.length-1;n>=0;n--){var r=i[n];t.lineTo(r[0],r[1])}},v=d-1;v>=0;v--){var _=t.drawingContext,y=a[v];if(e.getBooleanOption(\"fillGraph\",y)){var x=e.getNumericOption(\"fillAlpha\",y),m=e.getBooleanOption(\"stepPlot\",y),b=p[v],w=e.axisPropertiesForSeries(y),A=1+w.minyval*w.yscale;A<0?A=0:A>1&&(A=1),A=h.h*A+h.y;var O,D=u[v],E=n.createIterator(D,0,D.length,s._getIteratorPredicate(e.getBooleanOption(\"connectSeparatedPoints\",y))),L=NaN,T=[-1,-1],S=n.toRGB_(b),P=\"rgba(\"+S.r+\",\"+S.g+\",\"+S.b+\",\"+x+\")\";_.fillStyle=P,_.beginPath();var C,M=!0;(D.length>2*e.width_||o.default.FORCE_FAST_PROXY)&&(_=s._fastCanvasProxy(_));for(var N,F=[];E.hasNext;)if(N=E.next(),n.isOK(N.y)||m){if(c){if(!M&&C==N.xval)continue;M=!1,C=N.xval,r=g[N.canvasx];var k;k=void 0===r?A:l?r[0]:r,O=[N.canvasy,k],m?-1===T[0]?g[N.canvasx]=[N.canvasy,A]:g[N.canvasx]=[N.canvasy,T[0]]:g[N.canvasx]=N.canvasy}else O=isNaN(N.canvasy)&&m?[h.y+h.h,A]:[N.canvasy,A];isNaN(L)?(_.moveTo(N.canvasx,O[1]),_.lineTo(N.canvasx,O[0])):(m?(_.lineTo(N.canvasx,T[0]),_.lineTo(N.canvasx,O[0])):_.lineTo(N.canvasx,O[0]),c&&(F.push([L,T[1]]),l&&r?F.push([N.canvasx,r[1]]):F.push([N.canvasx,O[1]]))),T=O,L=N.canvasx}else f(_,L,T[1],F),F=[],L=NaN,null===N.y_stacked||isNaN(N.y_stacked)||(g[N.canvasx]=h.h*N.y_stacked+h.y);l=m,O&&N&&(f(_,N.canvasx,O[1],F),F=[]),_.fill()}}}},a.default=s,e.exports=a.default},{\"./dygraph\":18,\"./dygraph-utils\":17}],10:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}function n(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}Object.defineProperty(a,\"__esModule\",{value:!0});var r=t(\"./dygraph-tickers\"),o=n(r),s=t(\"./dygraph-interaction-model\"),l=i(s),h=t(\"./dygraph-canvas\"),u=i(h),d=t(\"./dygraph-utils\"),c=n(d),p={highlightCircleSize:3,highlightSeriesOpts:null,highlightSeriesBackgroundAlpha:.5,highlightSeriesBackgroundColor:\"rgb(255, 255, 255)\",labelsSeparateLines:!1,labelsShowZeroValues:!0,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:!0,digitsAfterDecimal:2,maxNumberWidth:6,sigFigs:null,strokeWidth:1,strokeBorderWidth:0,strokeBorderColor:\"white\",axisTickSize:3,axisLabelFontSize:14,rightGap:5,showRoller:!1,xValueParser:void 0,delimiter:\",\",sigma:2,errorBars:!1,fractions:!1,wilsonInterval:!0,customBars:!1,fillGraph:!1,fillAlpha:.15,connectSeparatedPoints:!1,stackedGraph:!1,stackedGraphNaNFill:\"all\",hideOverlayOnMouseOut:!0,legend:\"onmouseover\",stepPlot:!1,xRangePad:0,yRangePad:null,drawAxesAtZero:!1,titleHeight:28,xLabelHeight:18,yLabelWidth:18,axisLineColor:\"black\",axisLineWidth:.3,gridLineWidth:.3,axisLabelWidth:50,gridLineColor:\"rgb(128,128,128)\",interactionModel:l.default.defaultModel,animatedZooms:!1,showRangeSelector:!1,rangeSelectorHeight:40,rangeSelectorPlotStrokeColor:\"#808FAB\",rangeSelectorPlotFillGradientColor:\"white\",rangeSelectorPlotFillColor:\"#A7B1C4\",rangeSelectorBackgroundStrokeColor:\"gray\",rangeSelectorBackgroundLineWidth:1,rangeSelectorPlotLineWidth:1.5,rangeSelectorForegroundStrokeColor:\"black\",rangeSelectorForegroundLineWidth:1,rangeSelectorAlpha:.6,showInRangeSelector:null,plotter:[u.default._fillPlotter,u.default._errorPlotter,u.default._linePlotter],plugins:[],axes:{x:{pixelsPerLabel:70,axisLabelWidth:60,axisLabelFormatter:c.dateAxisLabelFormatter,valueFormatter:c.dateValueFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.dateTicker},y:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.numericTicks},y2:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawAxis:!0,drawGrid:!1,independentTicks:!1,ticker:o.numericTicks}}};a.default=p,e.exports=a.default},{\"./dygraph-canvas\":9,\"./dygraph-interaction-model\":12,\"./dygraph-tickers\":16,\"./dygraph-utils\":17}],11:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(t){this.container=t};r.prototype.draw=function(t,e){this.container.innerHTML=\"\",void 0!==this.date_graph&&this.date_graph.destroy(),this.date_graph=new n.default(this.container,t,e)},r.prototype.setSelection=function(t){var e=!1;t.length&&(e=t[0].row),this.date_graph.setSelection(e)},r.prototype.getSelection=function(){var t=[],e=this.date_graph.getSelection();if(e<0)return t;for(var a=this.date_graph.layout_.points,i=0;i<a.length;++i)t.push({row:e,column:i+1});return t},a.default=r,e.exports=a.default},{\"./dygraph\":18}],12:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r={};r.maybeTreatMouseOpAsClick=function(t,e,a){a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=Math.abs(a.dragEndX-a.dragStartX),o=Math.abs(a.dragEndY-a.dragStartY);i<2&&o<2&&void 0!==e.lastx_&&-1!=e.lastx_&&r.treatMouseOpAsClick(e,t,a),a.regionWidth=i,a.regionHeight=o},r.startPan=function(t,e,a){var i,r;a.isPanning=!0;var o=e.xAxisRange();if(e.getOptionForAxis(\"logscale\",\"x\")?(a.initialLeftmostDate=n.log10(o[0]),a.dateRange=n.log10(o[1])-n.log10(o[0])):(a.initialLeftmostDate=o[0],a.dateRange=o[1]-o[0]),a.xUnitsPerPixel=a.dateRange/(e.plotter_.area.w-1),e.getNumericOption(\"panEdgeFraction\")){var s=e.width_*e.getNumericOption(\"panEdgeFraction\"),l=e.xAxisExtremes(),h=e.toDomXCoord(l[0])-s,u=e.toDomXCoord(l[1])+s,d=e.toDataXCoord(h),c=e.toDataXCoord(u);a.boundedDates=[d,c];var p=[],g=e.height_*e.getNumericOption(\"panEdgeFraction\");for(i=0;i<e.axes_.length;i++){r=e.axes_[i];var f=r.extremeRange,v=e.toDomYCoord(f[0],i)+g,_=e.toDomYCoord(f[1],i)-g,y=e.toDataYCoord(v,i),x=e.toDataYCoord(_,i);p[i]=[y,x]}a.boundedValues=p}for(a.is2DPan=!1,a.axes=[],i=0;i<e.axes_.length;i++){r=e.axes_[i];var m={},b=e.yAxisRange(i);e.attributes_.getForAxis(\"logscale\",i)?(m.initialTopValue=n.log10(b[1]),m.dragValueRange=n.log10(b[1])-n.log10(b[0])):(m.initialTopValue=b[1],m.dragValueRange=b[1]-b[0]),m.unitsPerPixel=m.dragValueRange/(e.plotter_.area.h-1),a.axes.push(m),r.valueRange&&(a.is2DPan=!0)}},r.movePan=function(t,e,a){a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=a.initialLeftmostDate-(a.dragEndX-a.dragStartX)*a.xUnitsPerPixel;a.boundedDates&&(i=Math.max(i,a.boundedDates[0]));var r=i+a.dateRange;if(a.boundedDates&&r>a.boundedDates[1]&&(i-=r-a.boundedDates[1],r=i+a.dateRange),e.getOptionForAxis(\"logscale\",\"x\")?e.dateWindow_=[Math.pow(n.LOG_SCALE,i),Math.pow(n.LOG_SCALE,r)]:e.dateWindow_=[i,r],a.is2DPan)for(var o=a.dragEndY-a.dragStartY,s=0;s<e.axes_.length;s++){var l=e.axes_[s],h=a.axes[s],u=o*h.unitsPerPixel,d=a.boundedValues?a.boundedValues[s]:null,c=h.initialTopValue+u;d&&(c=Math.min(c,d[1]));var p=c-h.dragValueRange;d&&p<d[0]&&(c-=p-d[0],p=c-h.dragValueRange),e.attributes_.getForAxis(\"logscale\",s)?l.valueRange=[Math.pow(n.LOG_SCALE,p),Math.pow(n.LOG_SCALE,c)]:l.valueRange=[p,c]}e.drawGraph_(!1)},r.endPan=r.maybeTreatMouseOpAsClick,r.startZoom=function(t,e,a){a.isZooming=!0,a.zoomMoved=!1},r.moveZoom=function(t,e,a){a.zoomMoved=!0,a.dragEndX=n.dragGetX_(t,a),a.dragEndY=n.dragGetY_(t,a);var i=Math.abs(a.dragStartX-a.dragEndX),r=Math.abs(a.dragStartY-a.dragEndY);a.dragDirection=i<r/2?n.VERTICAL:n.HORIZONTAL,e.drawZoomRect_(a.dragDirection,a.dragStartX,a.dragEndX,a.dragStartY,a.dragEndY,a.prevDragDirection,a.prevEndX,a.prevEndY),a.prevEndX=a.dragEndX,a.prevEndY=a.dragEndY,a.prevDragDirection=a.dragDirection},r.treatMouseOpAsClick=function(t,e,a){for(var i=t.getFunctionOption(\"clickCallback\"),n=t.getFunctionOption(\"pointClickCallback\"),r=null,o=-1,s=Number.MAX_VALUE,l=0;l<t.selPoints_.length;l++){var h=t.selPoints_[l],u=Math.pow(h.canvasx-a.dragEndX,2)+Math.pow(h.canvasy-a.dragEndY,2);!isNaN(u)&&(-1==o||u<s)&&(s=u,o=l)}var d=t.getNumericOption(\"highlightCircleSize\")+2;if(s<=d*d&&(r=t.selPoints_[o]),r){var c={cancelable:!0,point:r,canvasx:a.dragEndX,canvasy:a.dragEndY};if(t.cascadeEvents_(\"pointClick\",c))return;n&&n.call(t,e,r)}var c={cancelable:!0,xval:t.lastx_,pts:t.selPoints_,canvasx:a.dragEndX,canvasy:a.dragEndY};t.cascadeEvents_(\"click\",c)||i&&i.call(t,e,t.lastx_,t.selPoints_)},r.endZoom=function(t,e,a){e.clearZoomRect_(),a.isZooming=!1,r.maybeTreatMouseOpAsClick(t,e,a);var i=e.getArea();if(a.regionWidth>=10&&a.dragDirection==n.HORIZONTAL){var o=Math.min(a.dragStartX,a.dragEndX),s=Math.max(a.dragStartX,a.dragEndX);o=Math.max(o,i.x),s=Math.min(s,i.x+i.w),o<s&&e.doZoomX_(o,s),a.cancelNextDblclick=!0}else if(a.regionHeight>=10&&a.dragDirection==n.VERTICAL){var l=Math.min(a.dragStartY,a.dragEndY),h=Math.max(a.dragStartY,a.dragEndY);l=Math.max(l,i.y),h=Math.min(h,i.y+i.h),l<h&&e.doZoomY_(l,h),a.cancelNextDblclick=!0}a.dragStartX=null,a.dragStartY=null},r.startTouch=function(t,e,a){t.preventDefault(),t.touches.length>1&&(a.startTimeForDoubleTapMs=null);for(var i=[],n=0;n<t.touches.length;n++){var r=t.touches[n];i.push({pageX:r.pageX,pageY:r.pageY,dataX:e.toDataXCoord(r.pageX),dataY:e.toDataYCoord(r.pageY)})}if(a.initialTouches=i,1==i.length)a.initialPinchCenter=i[0],a.touchDirections={x:!0,y:!0};else if(i.length>=2){a.initialPinchCenter={pageX:.5*(i[0].pageX+i[1].pageX),pageY:.5*(i[0].pageY+i[1].pageY),dataX:.5*(i[0].dataX+i[1].dataX),dataY:.5*(i[0].dataY+i[1].dataY)};var o=180/Math.PI*Math.atan2(a.initialPinchCenter.pageY-i[0].pageY,i[0].pageX-a.initialPinchCenter.pageX);o=Math.abs(o),o>90&&(o=90-o),a.touchDirections={x:o<67.5,y:o>22.5}}a.initialRange={x:e.xAxisRange(),y:e.yAxisRange()}},r.moveTouch=function(t,e,a){a.startTimeForDoubleTapMs=null;var i,n=[];for(i=0;i<t.touches.length;i++){var r=t.touches[i];n.push({pageX:r.pageX,pageY:r.pageY})}var o,s=a.initialTouches,l=a.initialPinchCenter;o=1==n.length?n[0]:{pageX:.5*(n[0].pageX+n[1].pageX),pageY:.5*(n[0].pageY+n[1].pageY)};var h={pageX:o.pageX-l.pageX,pageY:o.pageY-l.pageY},u=a.initialRange.x[1]-a.initialRange.x[0],d=a.initialRange.y[0]-a.initialRange.y[1];h.dataX=h.pageX/e.plotter_.area.w*u,h.dataY=h.pageY/e.plotter_.area.h*d;var c,p;if(1==n.length)c=1,p=1;else if(n.length>=2){var g=s[1].pageX-l.pageX;c=(n[1].pageX-o.pageX)/g;var f=s[1].pageY-l.pageY;p=(n[1].pageY-o.pageY)/f}c=Math.min(8,Math.max(.125,c)),p=Math.min(8,Math.max(.125,p));var v=!1;if(a.touchDirections.x&&(e.dateWindow_=[l.dataX-h.dataX+(a.initialRange.x[0]-l.dataX)/c,l.dataX-h.dataX+(a.initialRange.x[1]-l.dataX)/c],v=!0),a.touchDirections.y)for(i=0;i<1;i++){var _=e.axes_[i],y=e.attributes_.getForAxis(\"logscale\",i);y||(_.valueRange=[l.dataY-h.dataY+(a.initialRange.y[0]-l.dataY)/p,l.dataY-h.dataY+(a.initialRange.y[1]-l.dataY)/p],v=!0)}if(e.drawGraph_(!1),v&&n.length>1&&e.getFunctionOption(\"zoomCallback\")){var x=e.xAxisRange();e.getFunctionOption(\"zoomCallback\").call(e,x[0],x[1],e.yAxisRanges())}},r.endTouch=function(t,e,a){if(0!==t.touches.length)r.startTouch(t,e,a);else if(1==t.changedTouches.length){var i=(new Date).getTime(),n=t.changedTouches[0];a.startTimeForDoubleTapMs&&i-a.startTimeForDoubleTapMs<500&&a.doubleTapX&&Math.abs(a.doubleTapX-n.screenX)<50&&a.doubleTapY&&Math.abs(a.doubleTapY-n.screenY)<50?e.resetZoom():(a.startTimeForDoubleTapMs=i,a.doubleTapX=n.screenX,a.doubleTapY=n.screenY)}};var o=function(t,e,a){return t<e?e-t:t>a?t-a:0},s=function(t,e){var a=n.findPos(e.canvas_),i={left:a.x,right:a.x+e.canvas_.offsetWidth,top:a.y,bottom:a.y+e.canvas_.offsetHeight},r={x:n.pageX(t),y:n.pageY(t)},s=o(r.x,i.left,i.right),l=o(r.y,i.top,i.bottom);return Math.max(s,l)};r.defaultModel={mousedown:function(t,e,a){if(!t.button||2!=t.button){a.initializeMouseDown(t,e,a),t.altKey||t.shiftKey?r.startPan(t,e,a):r.startZoom(t,e,a);var i=function(t){if(a.isZooming){s(t,e)<100?r.moveZoom(t,e,a):null!==a.dragEndX&&(a.dragEndX=null,a.dragEndY=null,e.clearZoomRect_())}else a.isPanning&&r.movePan(t,e,a)},o=function t(o){a.isZooming?null!==a.dragEndX?r.endZoom(o,e,a):r.maybeTreatMouseOpAsClick(o,e,a):a.isPanning&&r.endPan(o,e,a),n.removeEvent(document,\"mousemove\",i),n.removeEvent(document,\"mouseup\",t),a.destroy()};e.addAndTrackEvent(document,\"mousemove\",i),e.addAndTrackEvent(document,\"mouseup\",o)}},willDestroyContextMyself:!0,touchstart:function(t,e,a){r.startTouch(t,e,a)},touchmove:function(t,e,a){r.moveTouch(t,e,a)},touchend:function(t,e,a){r.endTouch(t,e,a)},dblclick:function(t,e,a){if(a.cancelNextDblclick)return void(a.cancelNextDblclick=!1);var i={canvasx:a.dragEndX,canvasy:a.dragEndY,cancelable:!0};e.cascadeEvents_(\"dblclick\",i)||t.altKey||t.shiftKey||e.resetZoom()}},r.nonInteractiveModel_={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a)},mouseup:r.maybeTreatMouseOpAsClick},r.dragIsPanInteractionModel={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a),r.startPan(t,e,a)},mousemove:function(t,e,a){a.isPanning&&r.movePan(t,e,a)},mouseup:function(t,e,a){a.isPanning&&r.endPan(t,e,a)}},a.default=r,e.exports=a.default},{\"./dygraph-utils\":17}],13:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t){this.dygraph_=t,this.points=[],this.setNames=[],this.annotations=[],this.yAxes_=null,this.xTicks_=null,this.yTicks_=null};r.prototype.addDataset=function(t,e){this.points.push(e),this.setNames.push(t)},r.prototype.getPlotArea=function(){return this.area_},r.prototype.computePlotArea=function(){var t={x:0,y:0};t.w=this.dygraph_.width_-t.x-this.dygraph_.getOption(\"rightGap\"),t.h=this.dygraph_.height_;var e={chart_div:this.dygraph_.graphDiv,reserveSpaceLeft:function(e){var a={x:t.x,y:t.y,w:e,h:t.h};return t.x+=e,t.w-=e,a},reserveSpaceRight:function(e){var a={x:t.x+t.w-e,y:t.y,w:e,h:t.h};return t.w-=e,a},reserveSpaceTop:function(e){var a={x:t.x,y:t.y,w:t.w,h:e};return t.y+=e,t.h-=e,a},reserveSpaceBottom:function(e){var a={x:t.x,y:t.y+t.h-e,w:t.w,h:e};return t.h-=e,a},chartRect:function(){return{x:t.x,y:t.y,w:t.w,h:t.h}}};this.dygraph_.cascadeEvents_(\"layout\",e),this.area_=t},r.prototype.setAnnotations=function(t){this.annotations=[];for(var e=this.dygraph_.getOption(\"xValueParser\")||function(t){return t},a=0;a<t.length;a++){var i={};if(!t[a].xval&&void 0===t[a].x)return void console.error(\"Annotations must have an 'x' property\");if(t[a].icon&&(!t[a].hasOwnProperty(\"width\")||!t[a].hasOwnProperty(\"height\")))return void console.error(\"Must set width and height when setting annotation.icon property\");n.update(i,t[a]),i.xval||(i.xval=e(i.x)),this.annotations.push(i)}},r.prototype.setXTicks=function(t){this.xTicks_=t},r.prototype.setYAxes=function(t){this.yAxes_=t},r.prototype.evaluate=function(){this._xAxis={},this._evaluateLimits(),this._evaluateLineCharts(),this._evaluateLineTicks(),this._evaluateAnnotations()},r.prototype._evaluateLimits=function(){var t=this.dygraph_.xAxisRange();this._xAxis.minval=t[0],this._xAxis.maxval=t[1];var e=t[1]-t[0];this._xAxis.scale=0!==e?1/e:1,this.dygraph_.getOptionForAxis(\"logscale\",\"x\")&&(this._xAxis.xlogrange=n.log10(this._xAxis.maxval)-n.log10(this._xAxis.minval),this._xAxis.xlogscale=0!==this._xAxis.xlogrange?1/this._xAxis.xlogrange:1);for(var a=0;a<this.yAxes_.length;a++){var i=this.yAxes_[a];i.minyval=i.computedValueRange[0],i.maxyval=i.computedValueRange[1],i.yrange=i.maxyval-i.minyval,i.yscale=0!==i.yrange?1/i.yrange:1,this.dygraph_.getOption(\"logscale\")&&(i.ylogrange=n.log10(i.maxyval)-n.log10(i.minyval),i.ylogscale=0!==i.ylogrange?1/i.ylogrange:1,isFinite(i.ylogrange)&&!isNaN(i.ylogrange)||console.error(\"axis \"+a+\" of graph at \"+i.g+\" can't be displayed in log scale for range [\"+i.minyval+\" - \"+i.maxyval+\"]\"))}},r.calcXNormal_=function(t,e,a){return a?(n.log10(t)-n.log10(e.minval))*e.xlogscale:(t-e.minval)*e.scale},r.calcYNormal_=function(t,e,a){if(a){var i=1-(n.log10(e)-n.log10(t.minyval))*t.ylogscale;return isFinite(i)?i:NaN}return 1-(e-t.minyval)*t.yscale},r.prototype._evaluateLineCharts=function(){for(var t=this.dygraph_.getOption(\"stackedGraph\"),e=this.dygraph_.getOptionForAxis(\"logscale\",\"x\"),a=0;a<this.points.length;a++){for(var i=this.points[a],n=this.setNames[a],o=this.dygraph_.getOption(\"connectSeparatedPoints\",n),s=this.dygraph_.axisPropertiesForSeries(n),l=this.dygraph_.attributes_.getForSeries(\"logscale\",n),h=0;h<i.length;h++){var u=i[h];u.x=r.calcXNormal_(u.xval,this._xAxis,e);var d=u.yval;t&&(u.y_stacked=r.calcYNormal_(s,u.yval_stacked,l),\nnull===d||isNaN(d)||(d=u.yval_stacked)),null===d&&(d=NaN,o||(u.yval=NaN)),u.y=r.calcYNormal_(s,d,l)}this.dygraph_.dataHandler_.onLineEvaluated(i,s,l)}},r.prototype._evaluateLineTicks=function(){var t,e,a,i,n,r;for(this.xticks=[],t=0;t<this.xTicks_.length;t++)e=this.xTicks_[t],a=e.label,r=!(\"label_v\"in e),n=r?e.v:e.label_v,(i=this.dygraph_.toPercentXCoord(n))>=0&&i<1&&this.xticks.push({pos:i,label:a,has_tick:r});for(this.yticks=[],t=0;t<this.yAxes_.length;t++)for(var o=this.yAxes_[t],s=0;s<o.ticks.length;s++)e=o.ticks[s],a=e.label,r=!(\"label_v\"in e),n=r?e.v:e.label_v,(i=this.dygraph_.toPercentYCoord(n,t))>0&&i<=1&&this.yticks.push({axis:t,pos:i,label:a,has_tick:r})},r.prototype._evaluateAnnotations=function(){var t,e={};for(t=0;t<this.annotations.length;t++){var a=this.annotations[t];e[a.xval+\",\"+a.series]=a}if(this.annotated_points=[],this.annotations&&this.annotations.length)for(var i=0;i<this.points.length;i++){var n=this.points[i];for(t=0;t<n.length;t++){var r=n[t],o=r.xval+\",\"+r.name;o in e&&(r.annotation=e[o],this.annotated_points.push(r))}}},r.prototype.removeAllDatasets=function(){delete this.points,delete this.setNames,delete this.setPointsLengths,delete this.setPointsOffsets,this.points=[],this.setNames=[],this.setPointsLengths=[],this.setPointsOffsets=[]},a.default=r,e.exports=a.default},{\"./dygraph-utils\":17}],14:[function(t,e,a){(function(t){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=null;if(void 0!==t);a.default=i,e.exports=a.default}).call(this,t(\"_process\"))},{_process:1}],15:[function(t,e,a){(function(i){\"use strict\";function n(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var r=t(\"./dygraph-utils\"),o=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(r),s=t(\"./dygraph-default-attrs\"),l=n(s),h=t(\"./dygraph-options-reference\"),u=(n(h),function(t){this.dygraph_=t,this.yAxes_=[],this.xAxis_={},this.series_={},this.global_=this.dygraph_.attrs_,this.user_=this.dygraph_.user_attrs_||{},this.labels_=[],this.highlightSeries_=this.get(\"highlightSeriesOpts\")||{},this.reparseSeries()});if(u.AXIS_STRING_MAPPINGS_={y:0,Y:0,y1:0,Y1:0,y2:1,Y2:1},u.axisToIndex_=function(t){if(\"string\"==typeof t){if(u.AXIS_STRING_MAPPINGS_.hasOwnProperty(t))return u.AXIS_STRING_MAPPINGS_[t];throw\"Unknown axis : \"+t}if(\"number\"==typeof t){if(0===t||1===t)return t;throw\"Dygraphs only supports two y-axes, indexed from 0-1.\"}if(t)throw\"Unknown axis : \"+t;return 0},u.prototype.reparseSeries=function(){var t=this.get(\"labels\");if(t){this.labels_=t.slice(1),this.yAxes_=[{series:[],options:{}}],this.xAxis_={options:{}},this.series_={};for(var e=this.user_.series||{},a=0;a<this.labels_.length;a++){var i=this.labels_[a],n=e[i]||{},r=u.axisToIndex_(n.axis);this.series_[i]={idx:a,yAxis:r,options:n},this.yAxes_[r]?this.yAxes_[r].series.push(i):this.yAxes_[r]={series:[i],options:{}}}var s=this.user_.axes||{};o.update(this.yAxes_[0].options,s.y||{}),this.yAxes_.length>1&&o.update(this.yAxes_[1].options,s.y2||{}),o.update(this.xAxis_.options,s.x||{})}},u.prototype.get=function(t){var e=this.getGlobalUser_(t);return null!==e?e:this.getGlobalDefault_(t)},u.prototype.getGlobalUser_=function(t){return this.user_.hasOwnProperty(t)?this.user_[t]:null},u.prototype.getGlobalDefault_=function(t){return this.global_.hasOwnProperty(t)?this.global_[t]:l.default.hasOwnProperty(t)?l.default[t]:null},u.prototype.getForAxis=function(t,e){var a,i;if(\"number\"==typeof e)a=e,i=0===a?\"y\":\"y2\";else{if(\"y1\"==e&&(e=\"y\"),\"y\"==e)a=0;else if(\"y2\"==e)a=1;else{if(\"x\"!=e)throw\"Unknown axis \"+e;a=-1}i=e}var n=-1==a?this.xAxis_:this.yAxes_[a];if(n){var r=n.options;if(r.hasOwnProperty(t))return r[t]}if(\"x\"!==e||\"logscale\"!==t){var o=this.getGlobalUser_(t);if(null!==o)return o}var s=l.default.axes[i];return s.hasOwnProperty(t)?s[t]:this.getGlobalDefault_(t)},u.prototype.getForSeries=function(t,e){if(e===this.dygraph_.getHighlightSeries()&&this.highlightSeries_.hasOwnProperty(t))return this.highlightSeries_[t];if(!this.series_.hasOwnProperty(e))throw\"Unknown series: \"+e;var a=this.series_[e],i=a.options;return i.hasOwnProperty(t)?i[t]:this.getForAxis(t,a.yAxis)},u.prototype.numAxes=function(){return this.yAxes_.length},u.prototype.axisForSeries=function(t){return this.series_[t].yAxis},u.prototype.axisOptions=function(t){return this.yAxes_[t].options},u.prototype.seriesForAxis=function(t){return this.yAxes_[t].series},u.prototype.seriesNames=function(){return this.labels_},void 0!==i);a.default=u,e.exports=a.default}).call(this,t(\"_process\"))},{\"./dygraph-default-attrs\":10,\"./dygraph-options-reference\":14,\"./dygraph-utils\":17,_process:1}],16:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t,e,a,i,n,r){return o(t,e,a,function(t){return\"logscale\"!==t&&i(t)},n,r)};a.numericLinearTicks=r;var o=function(t,e,a,i,r,o){var s,l,h,u,c=i(\"pixelsPerLabel\"),p=[];if(o)for(s=0;s<o.length;s++)p.push({v:o[s]});else{if(i(\"logscale\")){u=Math.floor(a/c);var g=n.binarySearch(t,d,1),f=n.binarySearch(e,d,-1);-1==g&&(g=0),-1==f&&(f=d.length-1);var v=null;if(f-g>=u/4){for(var _=f;_>=g;_--){var y=d[_],x=Math.log(y/t)/Math.log(e/t)*a,m={v:y};null===v?v={tickValue:y,pixel_coord:x}:Math.abs(x-v.pixel_coord)>=c?v={tickValue:y,pixel_coord:x}:m.label=\"\",p.push(m)}p.reverse()}}if(0===p.length){var b,w,A=i(\"labelsKMG2\");A?(b=[1,2,4,8,16,32,64,128,256],w=16):(b=[1,2,5,10,20,50,100],w=10);var O,D,E,L=Math.ceil(a/c),T=Math.abs(e-t)/L,S=Math.floor(Math.log(T)/Math.log(w)),P=Math.pow(w,S);for(l=0;l<b.length&&(O=P*b[l],D=Math.floor(t/O)*O,E=Math.ceil(e/O)*O,u=Math.abs(E-D)/O,!(a/u>c));l++);for(D>E&&(O*=-1),s=0;s<=u;s++)h=D+s*O,p.push({v:h})}}var C=i(\"axisLabelFormatter\");for(s=0;s<p.length;s++)void 0===p[s].label&&(p[s].label=C.call(r,p[s].v,0,i,r));return p};a.numericTicks=o;var s=function(t,e,a,i,n,r){var o=c(t,e,a,i);return o>=0?g(t,e,o,i,n):[]};a.dateTicker=s;var l={MILLISECONDLY:0,TWO_MILLISECONDLY:1,FIVE_MILLISECONDLY:2,TEN_MILLISECONDLY:3,FIFTY_MILLISECONDLY:4,HUNDRED_MILLISECONDLY:5,FIVE_HUNDRED_MILLISECONDLY:6,SECONDLY:7,TWO_SECONDLY:8,FIVE_SECONDLY:9,TEN_SECONDLY:10,THIRTY_SECONDLY:11,MINUTELY:12,TWO_MINUTELY:13,FIVE_MINUTELY:14,TEN_MINUTELY:15,THIRTY_MINUTELY:16,HOURLY:17,TWO_HOURLY:18,SIX_HOURLY:19,DAILY:20,TWO_DAILY:21,WEEKLY:22,MONTHLY:23,QUARTERLY:24,BIANNUAL:25,ANNUAL:26,DECADAL:27,CENTENNIAL:28,NUM_GRANULARITIES:29};a.Granularity=l;var h={DATEFIELD_Y:0,DATEFIELD_M:1,DATEFIELD_D:2,DATEFIELD_HH:3,DATEFIELD_MM:4,DATEFIELD_SS:5,DATEFIELD_MS:6,NUM_DATEFIELDS:7},u=[];u[l.MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:1,spacing:1},u[l.TWO_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:2,spacing:2},u[l.FIVE_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:5,spacing:5},u[l.TEN_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:10,spacing:10},u[l.FIFTY_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:50,spacing:50},u[l.HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:100,spacing:100},u[l.FIVE_HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:500,spacing:500},u[l.SECONDLY]={datefield:h.DATEFIELD_SS,step:1,spacing:1e3},u[l.TWO_SECONDLY]={datefield:h.DATEFIELD_SS,step:2,spacing:2e3},u[l.FIVE_SECONDLY]={datefield:h.DATEFIELD_SS,step:5,spacing:5e3},u[l.TEN_SECONDLY]={datefield:h.DATEFIELD_SS,step:10,spacing:1e4},u[l.THIRTY_SECONDLY]={datefield:h.DATEFIELD_SS,step:30,spacing:3e4},u[l.MINUTELY]={datefield:h.DATEFIELD_MM,step:1,spacing:6e4},u[l.TWO_MINUTELY]={datefield:h.DATEFIELD_MM,step:2,spacing:12e4},u[l.FIVE_MINUTELY]={datefield:h.DATEFIELD_MM,step:5,spacing:3e5},u[l.TEN_MINUTELY]={datefield:h.DATEFIELD_MM,step:10,spacing:6e5},u[l.THIRTY_MINUTELY]={datefield:h.DATEFIELD_MM,step:30,spacing:18e5},u[l.HOURLY]={datefield:h.DATEFIELD_HH,step:1,spacing:36e5},u[l.TWO_HOURLY]={datefield:h.DATEFIELD_HH,step:2,spacing:72e5},u[l.SIX_HOURLY]={datefield:h.DATEFIELD_HH,step:6,spacing:216e5},u[l.DAILY]={datefield:h.DATEFIELD_D,step:1,spacing:864e5},u[l.TWO_DAILY]={datefield:h.DATEFIELD_D,step:2,spacing:1728e5},u[l.WEEKLY]={datefield:h.DATEFIELD_D,step:7,spacing:6048e5},u[l.MONTHLY]={datefield:h.DATEFIELD_M,step:1,spacing:2629817280},u[l.QUARTERLY]={datefield:h.DATEFIELD_M,step:3,spacing:216e5*365.2524},u[l.BIANNUAL]={datefield:h.DATEFIELD_M,step:6,spacing:432e5*365.2524},u[l.ANNUAL]={datefield:h.DATEFIELD_Y,step:1,spacing:864e5*365.2524},u[l.DECADAL]={datefield:h.DATEFIELD_Y,step:10,spacing:315578073600},u[l.CENTENNIAL]={datefield:h.DATEFIELD_Y,step:100,spacing:3155780736e3};var d=function(){for(var t=[],e=-39;e<=39;e++)for(var a=Math.pow(10,e),i=1;i<=9;i++){var n=a*i;t.push(n)}return t}(),c=function(t,e,a,i){for(var n=i(\"pixelsPerLabel\"),r=0;r<l.NUM_GRANULARITIES;r++){if(a/p(t,e,r)>=n)return r}return-1},p=function(t,e,a){var i=u[a].spacing;return Math.round(1*(e-t)/i)},g=function(t,e,a,i,r){var o=i(\"axisLabelFormatter\"),s=i(\"labelsUTC\"),d=s?n.DateAccessorsUTC:n.DateAccessorsLocal,c=u[a].datefield,p=u[a].step,g=u[a].spacing,f=new Date(t),v=[];v[h.DATEFIELD_Y]=d.getFullYear(f),v[h.DATEFIELD_M]=d.getMonth(f),v[h.DATEFIELD_D]=d.getDate(f),v[h.DATEFIELD_HH]=d.getHours(f),v[h.DATEFIELD_MM]=d.getMinutes(f),v[h.DATEFIELD_SS]=d.getSeconds(f),v[h.DATEFIELD_MS]=d.getMilliseconds(f);var _=v[c]%p;a==l.WEEKLY&&(_=d.getDay(f)),v[c]-=_;for(var y=c+1;y<h.NUM_DATEFIELDS;y++)v[y]=y===h.DATEFIELD_D?1:0;var x=[],m=d.makeDate.apply(null,v),b=m.getTime();if(a<=l.HOURLY)for(b<t&&(b+=g,m=new Date(b));b<=e;)x.push({v:b,label:o.call(r,m,a,i,r)}),b+=g,m=new Date(b);else for(b<t&&(v[c]+=p,m=d.makeDate.apply(null,v),b=m.getTime());b<=e;)(a>=l.DAILY||d.getHours(m)%p==0)&&x.push({v:b,label:o.call(r,m,a,i,r)}),v[c]+=p,m=d.makeDate.apply(null,v),b=m.getTime();return x};a.getDateAxis=g},{\"./dygraph-utils\":17}],17:[function(t,e,a){\"use strict\";function i(t,e,a){t.removeEventListener(e,a,!1)}function n(t){return t=t||window.event,t.stopPropagation&&t.stopPropagation(),t.preventDefault&&t.preventDefault(),t.cancelBubble=!0,t.cancel=!0,t.returnValue=!1,!1}function r(t,e,a){var i,n,r;if(0===e)i=a,n=a,r=a;else{var o=Math.floor(6*t),s=6*t-o,l=a*(1-e),h=a*(1-e*s),u=a*(1-e*(1-s));switch(o){case 1:i=h,n=a,r=l;break;case 2:i=l,n=a,r=u;break;case 3:i=l,n=h,r=a;break;case 4:i=u,n=l,r=a;break;case 5:i=a,n=l,r=h;break;case 6:case 0:i=a,n=u,r=l}}return i=Math.floor(255*i+.5),n=Math.floor(255*n+.5),r=Math.floor(255*r+.5),\"rgb(\"+i+\",\"+n+\",\"+r+\")\"}function o(t){var e=t.getBoundingClientRect(),a=window,i=document.documentElement;return{x:e.left+(a.pageXOffset||i.scrollLeft),y:e.top+(a.pageYOffset||i.scrollTop)}}function s(t){return!t.pageX||t.pageX<0?0:t.pageX}function l(t){return!t.pageY||t.pageY<0?0:t.pageY}function h(t,e){return s(t)-e.px}function u(t,e){return l(t)-e.py}function d(t){return!!t&&!isNaN(t)}function c(t,e){return!!t&&(null!==t.yval&&(null!==t.x&&void 0!==t.x&&(null!==t.y&&void 0!==t.y&&!(isNaN(t.x)||!e&&isNaN(t.y)))))}function p(t,e){var a=Math.min(Math.max(1,e||2),21);return Math.abs(t)<.001&&0!==t?t.toExponential(a-1):t.toPrecision(a)}function g(t){return t<10?\"0\"+t:\"\"+t}function f(t,e,a,i){var n=g(t)+\":\"+g(e);if(a&&(n+=\":\"+g(a),i)){var r=\"\"+i;n+=\".\"+(\"000\"+r).substring(r.length)}return n}function v(t,e){var a=e?tt:$,i=new Date(t),n=a.getFullYear(i),r=a.getMonth(i),o=a.getDate(i),s=a.getHours(i),l=a.getMinutes(i),h=a.getSeconds(i),u=a.getMilliseconds(i),d=\"\"+n,c=g(r+1),p=g(o),v=3600*s+60*l+h+.001*u,_=d+\"/\"+c+\"/\"+p;return v&&(_+=\" \"+f(s,l,h,u)),_}function _(t,e){var a=Math.pow(10,e);return Math.round(t*a)/a}function y(t,e,a,i,n){for(var r=!0;r;){var o=t,s=e,l=a,h=i,u=n;if(r=!1,null!==h&&void 0!==h&&null!==u&&void 0!==u||(h=0,u=s.length-1),h>u)return-1;null!==l&&void 0!==l||(l=0);var d,c=function(t){return t>=0&&t<s.length},p=parseInt((h+u)/2,10),g=s[p];if(g==o)return p;if(g>o){if(l>0&&(d=p-1,c(d)&&s[d]<o))return p;t=o,e=s,a=l,i=h,n=p-1,r=!0,c=p=g=d=void 0}else{if(!(g<o))return-1;if(l<0&&(d=p+1,c(d)&&s[d]>o))return p;t=o,e=s,a=l,i=p+1,n=u,r=!0,c=p=g=d=void 0}}}function x(t){var e,a;if((-1==t.search(\"-\")||-1!=t.search(\"T\")||-1!=t.search(\"Z\"))&&(a=m(t))&&!isNaN(a))return a;if(-1!=t.search(\"-\")){for(e=t.replace(\"-\",\"/\",\"g\");-1!=e.search(\"-\");)e=e.replace(\"-\",\"/\");a=m(e)}else 8==t.length?(e=t.substr(0,4)+\"/\"+t.substr(4,2)+\"/\"+t.substr(6,2),a=m(e)):a=m(t);return a&&!isNaN(a)||console.error(\"Couldn't parse \"+t+\" as a date\"),a}function m(t){return new Date(t).getTime()}function b(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(t[a]=e[a]);return t}function w(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(null===e[a]?t[a]=null:A(e[a])?t[a]=e[a].slice():!function(t){return\"object\"==typeof Node?t instanceof Node:\"object\"==typeof t&&\"number\"==typeof t.nodeType&&\"string\"==typeof t.nodeName}(e[a])&&\"object\"==typeof e[a]?(\"object\"==typeof t[a]&&null!==t[a]||(t[a]={}),w(t[a],e[a])):t[a]=e[a]);return t}function A(t){var e=typeof t;return(\"object\"==e||\"function\"==e&&\"function\"==typeof t.item)&&null!==t&&\"number\"==typeof t.length&&3!==t.nodeType}function O(t){return\"object\"==typeof t&&null!==t&&\"function\"==typeof t.getTime}function D(t){for(var e=[],a=0;a<t.length;a++)A(t[a])?e.push(D(t[a])):e.push(t[a]);return e}function E(){return document.createElement(\"canvas\")}function L(t){try{var e=window.devicePixelRatio,a=t.webkitBackingStorePixelRatio||t.mozBackingStorePixelRatio||t.msBackingStorePixelRatio||t.oBackingStorePixelRatio||t.backingStorePixelRatio||1;return void 0!==e?e/a:1}catch(t){return 1}}function T(t,e,a,i){e=e||0,a=a||t.length,this.hasNext=!0,this.peek=null,this.start_=e,this.array_=t,this.predicate_=i,this.end_=Math.min(t.length,e+a),this.nextIdx_=e-1,this.next()}function S(t,e,a,i){return new T(t,e,a,i)}function P(t,e,a,i){var n,r=0,o=(new Date).getTime();if(t(r),1==e)return void i();var s=e-1;!function l(){r>=e||et.call(window,function(){var e=(new Date).getTime(),h=e-o;n=r,r=Math.floor(h/a);var u=r-n;r+u>s||r>=s?(t(s),i()):(0!==u&&t(r),l())})}()}function C(t,e){var a={};if(t)for(var i=1;i<t.length;i++)a[t[i]]=!0;var n=function(t){for(var e in t)if(t.hasOwnProperty(e)&&!at[e])return!0;return!1};for(var r in e)if(e.hasOwnProperty(r))if(\"highlightSeriesOpts\"==r||a[r]&&!e.series){if(n(e[r]))return!0}else if(\"series\"==r||\"axes\"==r){var o=e[r];for(var s in o)if(o.hasOwnProperty(s)&&n(o[s]))return!0}else if(!at[r])return!0;return!1}function M(t){for(var e=0;e<t.length;e++){var a=t.charAt(e);if(\"\\r\"===a)return e+1<t.length&&\"\\n\"===t.charAt(e+1)?\"\\r\\n\":a;if(\"\\n\"===a)return e+1<t.length&&\"\\r\"===t.charAt(e+1)?\"\\n\\r\":a}return null}function N(t,e){if(null===e||null===t)return!1;for(var a=t;a&&a!==e;)a=a.parentNode;return a===e}function F(t,e){return e<0?1/Math.pow(t,-e):Math.pow(t,e)}function k(t){var e=nt.exec(t);if(!e)return null;var a=parseInt(e[1],10),i=parseInt(e[2],10),n=parseInt(e[3],10);return e[4]?{r:a,g:i,b:n,a:parseFloat(e[4])}:{r:a,g:i,b:n}}function R(t){var e=k(t);if(e)return e;var a=document.createElement(\"div\");a.style.backgroundColor=t,a.style.visibility=\"hidden\",document.body.appendChild(a);var i=window.getComputedStyle(a,null).backgroundColor;return document.body.removeChild(a),k(i)}function I(t){try{(t||document.createElement(\"canvas\")).getContext(\"2d\")}catch(t){return!1}return!0}function H(t,e,a){var i=parseFloat(t);if(!isNaN(i))return i;if(/^ *$/.test(t))return null;if(/^ *nan *$/i.test(t))return NaN;var n=\"Unable to parse '\"+t+\"' as a number\";return void 0!==a&&void 0!==e&&(n+=\" on line \"+(1+(e||0))+\" ('\"+a+\"') of CSV.\"),console.error(n),null}function Y(t,e){var a=e(\"sigFigs\");if(null!==a)return p(t,a);var i,n=e(\"digitsAfterDecimal\"),r=e(\"maxNumberWidth\"),o=e(\"labelsKMB\"),s=e(\"labelsKMG2\");if(i=0!==t&&(Math.abs(t)>=Math.pow(10,r)||Math.abs(t)<Math.pow(10,-n))?t.toExponential(n):\"\"+_(t,n),o||s){var l,h=[],u=[];o&&(l=1e3,h=rt),s&&(o&&console.warn(\"Setting both labelsKMB and labelsKMG2. Pick one!\"),l=1024,h=ot,u=st);for(var d=Math.abs(t),c=F(l,h.length),g=h.length-1;g>=0;g--,c/=l)if(d>=c){i=_(t/c,n)+h[g];break}if(s){var f=String(t.toExponential()).split(\"e-\");2===f.length&&f[1]>=3&&f[1]<=24&&(i=f[1]%3>0?_(f[0]/F(10,f[1]%3),n):Number(f[0]).toFixed(2),i+=u[Math.floor(f[1]/3)-1])}}return i}function X(t,e,a){return Y.call(this,t,a)}function V(t,e,a){var i=a(\"labelsUTC\"),n=i?tt:$,r=n.getFullYear(t),o=n.getMonth(t),s=n.getDate(t),l=n.getHours(t),h=n.getMinutes(t),u=n.getSeconds(t),d=n.getMilliseconds(t);if(e>=G.Granularity.DECADAL)return\"\"+r;if(e>=G.Granularity.MONTHLY)return lt[o]+\" \"+r;if(0===3600*l+60*h+u+.001*d||e>=G.Granularity.DAILY)return g(s)+\" \"+lt[o];if(e<G.Granularity.SECONDLY){var c=\"\"+d;return g(u)+\".\"+(\"000\"+c).substring(c.length)}return e>G.Granularity.MINUTELY?f(l,h,u,0):f(l,h,u,d)}function Z(t,e){return v(t,e(\"labelsUTC\"))}Object.defineProperty(a,\"__esModule\",{value:!0}),a.removeEvent=i,a.cancelEvent=n,a.hsvToRGB=r,a.findPos=o,a.pageX=s,a.pageY=l,a.dragGetX_=h,a.dragGetY_=u,a.isOK=d,a.isValidPoint=c,a.floatFormat=p,a.zeropad=g,a.hmsString_=f,a.dateString_=v,a.round_=_,a.binarySearch=y,a.dateParser=x,a.dateStrToMillis=m,a.update=b,a.updateDeep=w,a.isArrayLike=A,a.isDateLike=O,a.clone=D,a.createCanvas=E,a.getContextPixelRatio=L,a.Iterator=T,a.createIterator=S,a.repeatAndCleanup=P,a.isPixelChangingOptionList=C,a.detectLineDelimiter=M,a.isNodeContainedBy=N,a.pow=F,a.toRGB_=R,a.isCanvasSupported=I,a.parseFloat_=H,a.numberValueFormatter=Y,a.numberAxisLabelFormatter=X,a.dateAxisLabelFormatter=V,a.dateValueFormatter=Z;var B=t(\"./dygraph-tickers\"),G=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(B);a.LOG_SCALE=10;var W=Math.log(10);a.LN_TEN=W;var U=function(t){return Math.log(t)/W};a.log10=U;var z=function(t,e,a){var i=U(t),n=U(e),r=i+a*(n-i);return Math.pow(10,r)};a.logRangeFraction=z;var j=[2,2];a.DOTTED_LINE=j;var K=[7,3];a.DASHED_LINE=K;var q=[7,2,2,2];a.DOT_DASH_LINE=q;a.HORIZONTAL=1;a.VERTICAL=2;var Q=function(t){return t.getContext(\"2d\")};a.getContext=Q;var J=function(t,e,a){t.addEventListener(e,a,!1)};a.addEvent=J;var $={getFullYear:function(t){return t.getFullYear()},getMonth:function(t){return t.getMonth()},getDate:function(t){return t.getDate()},getHours:function(t){return t.getHours()},getMinutes:function(t){return t.getMinutes()},getSeconds:function(t){return t.getSeconds()},getMilliseconds:function(t){return t.getMilliseconds()},getDay:function(t){return t.getDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(t,e,a,i,n,r,o)}};a.DateAccessorsLocal=$;var tt={getFullYear:function(t){return t.getUTCFullYear()},getMonth:function(t){return t.getUTCMonth()},getDate:function(t){return t.getUTCDate()},getHours:function(t){return t.getUTCHours()},getMinutes:function(t){return t.getUTCMinutes()},getSeconds:function(t){return t.getUTCSeconds()},getMilliseconds:function(t){return t.getUTCMilliseconds()},getDay:function(t){return t.getUTCDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(Date.UTC(t,e,a,i,n,r,o))}};a.DateAccessorsUTC=tt,T.prototype.next=function(){if(!this.hasNext)return null;for(var t=this.peek,e=this.nextIdx_+1,a=!1;e<this.end_;){if(!this.predicate_||this.predicate_(this.array_,e)){this.peek=this.array_[e],a=!0;break}e++}return this.nextIdx_=e,a||(this.hasNext=!1,this.peek=null),t};var et=function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){window.setTimeout(t,1e3/60)}}();a.requestAnimFrame=et;var at={annotationClickHandler:!0,annotationDblClickHandler:!0,annotationMouseOutHandler:!0,annotationMouseOverHandler:!0,axisLineColor:!0,axisLineWidth:!0,clickCallback:!0,drawCallback:!0,drawHighlightPointCallback:!0,drawPoints:!0,drawPointCallback:!0,drawGrid:!0,fillAlpha:!0,gridLineColor:!0,gridLineWidth:!0,hideOverlayOnMouseOut:!0,highlightCallback:!0,highlightCircleSize:!0,interactionModel:!0,labelsDiv:!0,labelsKMB:!0,labelsKMG2:!0,labelsSeparateLines:!0,labelsShowZeroValues:!0,legend:!0,panEdgeFraction:!0,pixelsPerYLabel:!0,pointClickCallback:!0,pointSize:!0,rangeSelectorPlotFillColor:!0,rangeSelectorPlotFillGradientColor:!0,rangeSelectorPlotStrokeColor:!0,rangeSelectorBackgroundStrokeColor:!0,rangeSelectorBackgroundLineWidth:!0,rangeSelectorPlotLineWidth:!0,rangeSelectorForegroundStrokeColor:!0,rangeSelectorForegroundLineWidth:!0,rangeSelectorAlpha:!0,showLabelsOnHighlight:!0,showRoller:!0,strokeWidth:!0,underlayCallback:!0,unhighlightCallback:!0,zoomCallback:!0},it={DEFAULT:function(t,e,a,i,n,r,o){a.beginPath(),a.fillStyle=r,a.arc(i,n,o,0,2*Math.PI,!1),a.fill()}};a.Circles=it;var nt=/^rgba?\\((\\d{1,3}),\\s*(\\d{1,3}),\\s*(\\d{1,3})(?:,\\s*([01](?:\\.\\d+)?))?\\)$/,rt=[\"K\",\"M\",\"B\",\"T\",\"Q\"],ot=[\"k\",\"M\",\"G\",\"T\",\"P\",\"E\",\"Z\",\"Y\"],st=[\"m\",\"u\",\"n\",\"p\",\"f\",\"a\",\"z\",\"y\"],lt=[\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]},{\"./dygraph-tickers\":16}],18:[function(t,e,a){(function(i){\"use strict\";function n(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}function r(t){return t&&t.__esModule?t:{default:t}}function o(t){var e=t[0],a=e[0];if(\"number\"!=typeof a&&!x.isDateLike(a))throw new Error(\"Expected number or date but got \"+typeof a+\": \"+a+\".\");for(var i=1;i<e.length;i++){var n=e[i];if(null!==n&&void 0!==n&&(\"number\"!=typeof n&&!x.isArrayLike(n)))throw new Error(\"Expected number or array but got \"+typeof n+\": \"+n+\".\")}}Object.defineProperty(a,\"__esModule\",{value:!0});var s=function(){function t(t,e){var a=[],i=!0,n=!1,r=void 0;try{for(var o,s=t[Symbol.iterator]();!(i=(o=s.next()).done)&&(a.push(o.value),!e||a.length!==e);i=!0);}catch(t){n=!0,r=t}finally{try{!i&&s.return&&s.return()}finally{if(n)throw r}}return a}return function(e,a){if(Array.isArray(e))return e;if(Symbol.iterator in Object(e))return t(e,a);throw new TypeError(\"Invalid attempt to destructure non-iterable instance\")}}(),l=t(\"./dygraph-layout\"),h=r(l),u=t(\"./dygraph-canvas\"),d=r(u),c=t(\"./dygraph-options\"),p=r(c),g=t(\"./dygraph-interaction-model\"),f=r(g),v=t(\"./dygraph-tickers\"),_=n(v),y=t(\"./dygraph-utils\"),x=n(y),m=t(\"./dygraph-default-attrs\"),b=r(m),w=t(\"./dygraph-options-reference\"),A=(r(w),t(\"./iframe-tarp\")),O=r(A),D=t(\"./datahandler/default\"),E=r(D),L=t(\"./datahandler/bars-error\"),T=r(L),S=t(\"./datahandler/bars-custom\"),P=r(S),C=t(\"./datahandler/default-fractions\"),M=r(C),N=t(\"./datahandler/bars-fractions\"),F=r(N),k=t(\"./datahandler/bars\"),R=r(k),I=t(\"./plugins/annotations\"),H=r(I),Y=t(\"./plugins/axes\"),X=r(Y),V=t(\"./plugins/chart-labels\"),Z=r(V),B=t(\"./plugins/grid\"),G=r(B),W=t(\"./plugins/legend\"),U=r(W),z=t(\"./plugins/range-selector\"),j=r(z),K=t(\"./dygraph-gviz\"),q=r(K),Q=function(t,e,a){this.__init__(t,e,a)};Q.NAME=\"Dygraph\",Q.VERSION=\"2.1.0\",Q.DEFAULT_ROLL_PERIOD=1,Q.DEFAULT_WIDTH=480,Q.DEFAULT_HEIGHT=320,Q.ANIMATION_STEPS=12,Q.ANIMATION_DURATION=200,Q.Plotters=d.default._Plotters,Q.addedAnnotationCSS=!1,Q.prototype.__init__=function(t,e,a){if(this.is_initial_draw_=!0,this.readyFns_=[],null!==a&&void 0!==a||(a={}),a=Q.copyUserAttrs_(a),\"string\"==typeof t&&(t=document.getElementById(t)),!t)throw new Error(\"Constructing dygraph with a non-existent div!\");this.maindiv_=t,this.file_=e,this.rollPeriod_=a.rollPeriod||Q.DEFAULT_ROLL_PERIOD,this.previousVerticalX_=-1,this.fractions_=a.fractions||!1,this.dateWindow_=a.dateWindow||null,this.annotations_=[],t.innerHTML=\"\",\"\"===t.style.width&&a.width&&(t.style.width=a.width+\"px\"),\"\"===t.style.height&&a.height&&(t.style.height=a.height+\"px\"),\"\"===t.style.height&&0===t.clientHeight&&(t.style.height=Q.DEFAULT_HEIGHT+\"px\",\"\"===t.style.width&&(t.style.width=Q.DEFAULT_WIDTH+\"px\")),this.width_=t.clientWidth||a.width||0,this.height_=t.clientHeight||a.height||0,a.stackedGraph&&(a.fillGraph=!0),this.user_attrs_={},x.update(this.user_attrs_,a),this.attrs_={},x.updateDeep(this.attrs_,b.default),this.boundaryIds_=[],this.setIndexByName_={},this.datasetIndex_=[],this.registeredEvents_=[],this.eventListeners_={},this.attributes_=new p.default(this),this.createInterface_(),this.plugins_=[];for(var i=Q.PLUGINS.concat(this.getOption(\"plugins\")),n=0;n<i.length;n++){var r,o=i[n];r=void 0!==o.activate?o:new o;var s={plugin:r,events:{},options:{},pluginOptions:{}},l=r.activate(this);for(var h in l)l.hasOwnProperty(h)&&(s.events[h]=l[h]);this.plugins_.push(s)}for(var n=0;n<this.plugins_.length;n++){var u=this.plugins_[n];for(var h in u.events)if(u.events.hasOwnProperty(h)){var d=u.events[h],c=[u.plugin,d];h in this.eventListeners_?this.eventListeners_[h].push(c):this.eventListeners_[h]=[c]}}this.createDragInterface_(),this.start_()},Q.prototype.cascadeEvents_=function(t,e){if(!(t in this.eventListeners_))return!1;var a={dygraph:this,cancelable:!1,defaultPrevented:!1,preventDefault:function(){if(!a.cancelable)throw\"Cannot call preventDefault on non-cancelable event.\";a.defaultPrevented=!0},propagationStopped:!1,stopPropagation:function(){a.propagationStopped=!0}};x.update(a,e);var i=this.eventListeners_[t];if(i)for(var n=i.length-1;n>=0;n--){var r=i[n][0],o=i[n][1];if(o.call(r,a),a.propagationStopped)break}return a.defaultPrevented},Q.prototype.getPluginInstance_=function(t){for(var e=0;e<this.plugins_.length;e++){var a=this.plugins_[e];if(a.plugin instanceof t)return a.plugin}return null},Q.prototype.isZoomed=function(t){var e=!!this.dateWindow_;if(\"x\"===t)return e;var a=this.axes_.map(function(t){return!!t.valueRange}).indexOf(!0)>=0;if(null===t||void 0===t)return e||a;if(\"y\"===t)return a;throw new Error(\"axis parameter is [\"+t+\"] must be null, 'x' or 'y'.\")},Q.prototype.toString=function(){var t=this.maindiv_;return\"[Dygraph \"+(t&&t.id?t.id:t)+\"]\"},Q.prototype.attr_=function(t,e){return e?this.attributes_.getForSeries(t,e):this.attributes_.get(t)},Q.prototype.getOption=function(t,e){return this.attr_(t,e)},Q.prototype.getNumericOption=function(t,e){return this.getOption(t,e)},Q.prototype.getStringOption=function(t,e){return this.getOption(t,e)},Q.prototype.getBooleanOption=function(t,e){return this.getOption(t,e)},Q.prototype.getFunctionOption=function(t,e){return this.getOption(t,e)},Q.prototype.getOptionForAxis=function(t,e){return this.attributes_.getForAxis(t,e)},Q.prototype.optionsViewForAxis_=function(t){var e=this;return function(a){var i=e.user_attrs_.axes;return i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:(\"x\"!==t||\"logscale\"!==a)&&(void 0!==e.user_attrs_[a]?e.user_attrs_[a]:(i=e.attrs_.axes,i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:\"y\"==t&&e.axes_[0].hasOwnProperty(a)?e.axes_[0][a]:\"y2\"==t&&e.axes_[1].hasOwnProperty(a)?e.axes_[1][a]:e.attr_(a)))}},Q.prototype.rollPeriod=function(){return this.rollPeriod_},Q.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},Q.prototype.xAxisExtremes=function(){var t=this.getNumericOption(\"xRangePad\")/this.plotter_.area.w;if(0===this.numRows())return[0-t,1+t];var e=this.rawData_[0][0],a=this.rawData_[this.rawData_.length-1][0];if(t){var i=a-e;e-=i*t,a+=i*t}return[e,a]},Q.prototype.yAxisExtremes=function(){var t=this.gatherDatasets_(this.rolledSeries_,null),e=t.extremes,a=this.axes_;this.computeYAxisRanges_(e);var i=this.axes_;return this.axes_=a,i.map(function(t){return t.extremeRange})},Q.prototype.yAxisRange=function(t){if(void 0===t&&(t=0),t<0||t>=this.axes_.length)return null;var e=this.axes_[t];return[e.computedValueRange[0],e.computedValueRange[1]]},Q.prototype.yAxisRanges=function(){for(var t=[],e=0;e<this.axes_.length;e++)t.push(this.yAxisRange(e));return t},Q.prototype.toDomCoords=function(t,e,a){return[this.toDomXCoord(t),this.toDomYCoord(e,a)]},Q.prototype.toDomXCoord=function(t){if(null===t)return null;var e=this.plotter_.area,a=this.xAxisRange();return e.x+(t-a[0])/(a[1]-a[0])*e.w},Q.prototype.toDomYCoord=function(t,e){var a=this.toPercentYCoord(t,e);if(null===a)return null;var i=this.plotter_.area;return i.y+a*i.h},Q.prototype.toDataCoords=function(t,e,a){return[this.toDataXCoord(t),this.toDataYCoord(e,a)]},Q.prototype.toDataXCoord=function(t){if(null===t)return null;var e=this.plotter_.area,a=this.xAxisRange();if(this.attributes_.getForAxis(\"logscale\",\"x\")){var i=(t-e.x)/e.w;return x.logRangeFraction(a[0],a[1],i)}return a[0]+(t-e.x)/e.w*(a[1]-a[0])},Q.prototype.toDataYCoord=function(t,e){if(null===t)return null;var a=this.plotter_.area,i=this.yAxisRange(e);if(void 0===e&&(e=0),this.attributes_.getForAxis(\"logscale\",e)){var n=(t-a.y)/a.h;return x.logRangeFraction(i[1],i[0],n)}return i[0]+(a.y+a.h-t)/a.h*(i[1]-i[0])},Q.prototype.toPercentYCoord=function(t,e){if(null===t)return null;void 0===e&&(e=0);var a,i=this.yAxisRange(e);if(this.attributes_.getForAxis(\"logscale\",e)){var n=x.log10(i[0]),r=x.log10(i[1]);a=(r-x.log10(t))/(r-n)}else a=(i[1]-t)/(i[1]-i[0]);return a},Q.prototype.toPercentXCoord=function(t){if(null===t)return null;var e,a=this.xAxisRange();if(!0===this.attributes_.getForAxis(\"logscale\",\"x\")){var i=x.log10(a[0]),n=x.log10(a[1]);e=(x.log10(t)-i)/(n-i)}else e=(t-a[0])/(a[1]-a[0]);return e},Q.prototype.numColumns=function(){return this.rawData_?this.rawData_[0]?this.rawData_[0].length:this.attr_(\"labels\").length:0},Q.prototype.numRows=function(){return this.rawData_?this.rawData_.length:0},Q.prototype.getValue=function(t,e){return t<0||t>this.rawData_.length?null:e<0||e>this.rawData_[t].length?null:this.rawData_[t][e]},Q.prototype.createInterface_=function(){var t=this.maindiv_;this.graphDiv=document.createElement(\"div\"),this.graphDiv.style.textAlign=\"left\",this.graphDiv.style.position=\"relative\",t.appendChild(this.graphDiv),this.canvas_=x.createCanvas(),this.canvas_.style.position=\"absolute\",this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=x.getContext(this.canvas_),this.hidden_ctx_=x.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new h.default(this);var e=this;this.mouseMoveHandler_=function(t){e.mouseMove_(t)},this.mouseOutHandler_=function(t){var a=t.target||t.fromElement,i=t.relatedTarget||t.toElement;x.isNodeContainedBy(a,e.graphDiv)&&!x.isNodeContainedBy(i,e.graphDiv)&&e.mouseOut_(t)},this.addAndTrackEvent(window,\"mouseout\",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,\"mousemove\",this.mouseMoveHandler_),this.resizeHandler_||(this.resizeHandler_=function(t){e.resize()},this.addAndTrackEvent(window,\"resize\",this.resizeHandler_))},Q.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+\"px\",this.graphDiv.style.height=this.height_+\"px\";var t=this.getNumericOption(\"pixelRatio\"),e=t||x.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*e,this.canvas_.height=this.height_*e,this.canvas_.style.width=this.width_+\"px\",this.canvas_.style.height=this.height_+\"px\",1!==e&&this.canvas_ctx_.scale(e,e);var a=t||x.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*a,this.hidden_.height=this.height_*a,this.hidden_.style.width=this.width_+\"px\",this.hidden_.style.height=this.height_+\"px\",1!==a&&this.hidden_ctx_.scale(a,a)},Q.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var t=this.plugins_.length-1;t>=0;t--){var e=this.plugins_.pop();e.plugin.destroy&&e.plugin.destroy()}this.removeTrackedEvents_(),x.removeEvent(window,\"mouseout\",this.mouseOutHandler_),x.removeEvent(this.mouseEventElement_,\"mousemove\",this.mouseMoveHandler_),x.removeEvent(window,\"resize\",this.resizeHandler_),this.resizeHandler_=null,function t(e){for(;e.hasChildNodes();)t(e.firstChild),e.removeChild(e.firstChild)}(this.maindiv_);var a=function(t){for(var e in t)\"object\"==typeof t[e]&&(t[e]=null)};a(this.layout_),a(this.plotter_),a(this)},Q.prototype.createPlotKitCanvas_=function(t){var e=x.createCanvas();return e.style.position=\"absolute\",e.style.top=t.style.top,e.style.left=t.style.left,\ne.width=this.width_,e.height=this.height_,e.style.width=this.width_+\"px\",e.style.height=this.height_+\"px\",e},Q.prototype.createMouseEventElement_=function(){return this.canvas_},Q.prototype.setColors_=function(){var t=this.getLabels(),e=t.length-1;this.colors_=[],this.colorsMap_={};for(var a=this.getNumericOption(\"colorSaturation\")||1,i=this.getNumericOption(\"colorValue\")||.5,n=Math.ceil(e/2),r=this.getOption(\"colors\"),o=this.visibility(),s=0;s<e;s++)if(o[s]){var l=t[s+1],h=this.attributes_.getForSeries(\"color\",l);if(!h)if(r)h=r[s%r.length];else{var u=s%2?n+(s+1)/2:Math.ceil((s+1)/2),d=1*u/(1+e);h=x.hsvToRGB(d,a,i)}this.colors_.push(h),this.colorsMap_[l]=h}},Q.prototype.getColors=function(){return this.colors_},Q.prototype.getPropertiesForSeries=function(t){for(var e=-1,a=this.getLabels(),i=1;i<a.length;i++)if(a[i]==t){e=i;break}return-1==e?null:{name:t,column:e,visible:this.visibility()[e-1],color:this.colorsMap_[t],axis:1+this.attributes_.axisForSeries(t)}},Q.prototype.createRollInterface_=function(){var t=this,e=this.roller_;e||(this.roller_=e=document.createElement(\"input\"),e.type=\"text\",e.style.display=\"none\",e.className=\"dygraph-roller\",this.graphDiv.appendChild(e));var a=this.getBooleanOption(\"showRoller\")?\"block\":\"none\",i=this.getArea(),n={top:i.y+i.h-25+\"px\",left:i.x+1+\"px\",display:a};e.size=\"2\",e.value=this.rollPeriod_,x.update(e.style,n),e.onchange=function(){return t.adjustRoll(e.value)}},Q.prototype.createDragInterface_=function(){var t={isZooming:!1,isPanning:!1,is2DPan:!1,dragStartX:null,dragStartY:null,dragEndX:null,dragEndY:null,dragDirection:null,prevEndX:null,prevEndY:null,prevDragDirection:null,cancelNextDblclick:!1,initialLeftmostDate:null,xUnitsPerPixel:null,dateRange:null,px:0,py:0,boundedDates:null,boundedValues:null,tarp:new O.default,initializeMouseDown:function(t,e,a){t.preventDefault?t.preventDefault():(t.returnValue=!1,t.cancelBubble=!0);var i=x.findPos(e.canvas_);a.px=i.x,a.py=i.y,a.dragStartX=x.dragGetX_(t,a),a.dragStartY=x.dragGetY_(t,a),a.cancelNextDblclick=!1,a.tarp.cover()},destroy:function(){var t=this;if((t.isZooming||t.isPanning)&&(t.isZooming=!1,t.dragStartX=null,t.dragStartY=null),t.isPanning){t.isPanning=!1,t.draggingDate=null,t.dateRange=null;for(var e=0;e<a.axes_.length;e++)delete a.axes_[e].draggingValue,delete a.axes_[e].dragValueRange}t.tarp.uncover()}},e=this.getOption(\"interactionModel\"),a=this;for(var i in e)e.hasOwnProperty(i)&&this.addAndTrackEvent(this.mouseEventElement_,i,function(e){return function(i){e(i,a,t)}}(e[i]));if(!e.willDestroyContextMyself){var n=function(e){t.destroy()};this.addAndTrackEvent(document,\"mouseup\",n)}},Q.prototype.drawZoomRect_=function(t,e,a,i,n,r,o,s){var l=this.canvas_ctx_;r==x.HORIZONTAL?l.clearRect(Math.min(e,o),this.layout_.getPlotArea().y,Math.abs(e-o),this.layout_.getPlotArea().h):r==x.VERTICAL&&l.clearRect(this.layout_.getPlotArea().x,Math.min(i,s),this.layout_.getPlotArea().w,Math.abs(i-s)),t==x.HORIZONTAL?a&&e&&(l.fillStyle=\"rgba(128,128,128,0.33)\",l.fillRect(Math.min(e,a),this.layout_.getPlotArea().y,Math.abs(a-e),this.layout_.getPlotArea().h)):t==x.VERTICAL&&n&&i&&(l.fillStyle=\"rgba(128,128,128,0.33)\",l.fillRect(this.layout_.getPlotArea().x,Math.min(i,n),this.layout_.getPlotArea().w,Math.abs(n-i)))},Q.prototype.clearZoomRect_=function(){this.currentZoomRectArgs_=null,this.canvas_ctx_.clearRect(0,0,this.width_,this.height_)},Q.prototype.doZoomX_=function(t,e){this.currentZoomRectArgs_=null;var a=this.toDataXCoord(t),i=this.toDataXCoord(e);this.doZoomXDates_(a,i)},Q.prototype.doZoomXDates_=function(t,e){var a=this,i=this.xAxisRange(),n=[t,e],r=this.getFunctionOption(\"zoomCallback\");this.doAnimatedZoom(i,n,null,null,function(){r&&r.call(a,t,e,a.yAxisRanges())})},Q.prototype.doZoomY_=function(t,e){var a=this;this.currentZoomRectArgs_=null;for(var i=this.yAxisRanges(),n=[],r=0;r<this.axes_.length;r++){var o=this.toDataYCoord(t,r),l=this.toDataYCoord(e,r);n.push([l,o])}var h=this.getFunctionOption(\"zoomCallback\");this.doAnimatedZoom(null,null,i,n,function(){if(h){var t=a.xAxisRange(),e=s(t,2),i=e[0],n=e[1];h.call(a,i,n,a.yAxisRanges())}})},Q.zoomAnimationFunction=function(t,e){return(1-Math.pow(1.5,-t))/(1-Math.pow(1.5,-e))},Q.prototype.resetZoom=function(){var t=this,e=this.isZoomed(\"x\"),a=this.isZoomed(\"y\"),i=e||a;if(this.clearSelection(),i){var n=this.xAxisExtremes(),r=s(n,2),o=r[0],l=r[1],h=this.getBooleanOption(\"animatedZooms\"),u=this.getFunctionOption(\"zoomCallback\");if(!h)return this.dateWindow_=null,this.axes_.forEach(function(t){t.valueRange&&delete t.valueRange}),this.drawGraph_(),void(u&&u.call(this,o,l,this.yAxisRanges()));var d=null,c=null,p=null,g=null;e&&(d=this.xAxisRange(),c=[o,l]),a&&(p=this.yAxisRanges(),g=this.yAxisExtremes()),this.doAnimatedZoom(d,c,p,g,function(){t.dateWindow_=null,t.axes_.forEach(function(t){t.valueRange&&delete t.valueRange}),u&&u.call(t,o,l,t.yAxisRanges())})}},Q.prototype.doAnimatedZoom=function(t,e,a,i,n){var r,o,s=this,l=this.getBooleanOption(\"animatedZooms\")?Q.ANIMATION_STEPS:1,h=[],u=[];if(null!==t&&null!==e)for(r=1;r<=l;r++)o=Q.zoomAnimationFunction(r,l),h[r-1]=[t[0]*(1-o)+o*e[0],t[1]*(1-o)+o*e[1]];if(null!==a&&null!==i)for(r=1;r<=l;r++){o=Q.zoomAnimationFunction(r,l);for(var d=[],c=0;c<this.axes_.length;c++)d.push([a[c][0]*(1-o)+o*i[c][0],a[c][1]*(1-o)+o*i[c][1]]);u[r-1]=d}x.repeatAndCleanup(function(t){if(u.length)for(var e=0;e<s.axes_.length;e++){var a=u[t][e];s.axes_[e].valueRange=[a[0],a[1]]}h.length&&(s.dateWindow_=h[t]),s.drawGraph_()},l,Q.ANIMATION_DURATION/l,n)},Q.prototype.getArea=function(){return this.plotter_.area},Q.prototype.eventToDomCoords=function(t){if(t.offsetX&&t.offsetY)return[t.offsetX,t.offsetY];var e=x.findPos(this.mouseEventElement_);return[x.pageX(t)-e.x,x.pageY(t)-e.y]},Q.prototype.findClosestRow=function(t){for(var e=1/0,a=-1,i=this.layout_.points,n=0;n<i.length;n++)for(var r=i[n],o=r.length,s=0;s<o;s++){var l=r[s];if(x.isValidPoint(l,!0)){var h=Math.abs(l.canvasx-t);h<e&&(e=h,a=l.idx)}}return a},Q.prototype.findClosestPoint=function(t,e){for(var a,i,n,r,o,s,l,h=1/0,u=this.layout_.points.length-1;u>=0;--u)for(var d=this.layout_.points[u],c=0;c<d.length;++c)r=d[c],x.isValidPoint(r)&&(i=r.canvasx-t,n=r.canvasy-e,(a=i*i+n*n)<h&&(h=a,o=r,s=u,l=r.idx));return{row:l,seriesName:this.layout_.setNames[s],point:o}},Q.prototype.findStackedPoint=function(t,e){for(var a,i,n=this.findClosestRow(t),r=0;r<this.layout_.points.length;++r){var o=this.getLeftBoundary_(r),s=n-o,l=this.layout_.points[r];if(!(s>=l.length)){var h=l[s];if(x.isValidPoint(h)){var u=h.canvasy;if(t>h.canvasx&&s+1<l.length){var d=l[s+1];if(x.isValidPoint(d)){var c=d.canvasx-h.canvasx;if(c>0){var p=(t-h.canvasx)/c;u+=p*(d.canvasy-h.canvasy)}}}else if(t<h.canvasx&&s>0){var g=l[s-1];if(x.isValidPoint(g)){var c=h.canvasx-g.canvasx;if(c>0){var p=(h.canvasx-t)/c;u+=p*(g.canvasy-h.canvasy)}}}(0===r||u<e)&&(a=h,i=r)}}}return{row:n,seriesName:this.layout_.setNames[i],point:a}},Q.prototype.mouseMove_=function(t){var e=this.layout_.points;if(void 0!==e&&null!==e){var a=this.eventToDomCoords(t),i=a[0],n=a[1],r=this.getOption(\"highlightSeriesOpts\"),o=!1;if(r&&!this.isSeriesLocked()){var s;s=this.getBooleanOption(\"stackedGraph\")?this.findStackedPoint(i,n):this.findClosestPoint(i,n),o=this.setSelection(s.row,s.seriesName)}else{var l=this.findClosestRow(i);o=this.setSelection(l)}var h=this.getFunctionOption(\"highlightCallback\");h&&o&&h.call(this,t,this.lastx_,this.selPoints_,this.lastRow_,this.highlightSet_)}},Q.prototype.getLeftBoundary_=function(t){if(this.boundaryIds_[t])return this.boundaryIds_[t][0];for(var e=0;e<this.boundaryIds_.length;e++)if(void 0!==this.boundaryIds_[e])return this.boundaryIds_[e][0];return 0},Q.prototype.animateSelection_=function(t){void 0===this.fadeLevel&&(this.fadeLevel=0),void 0===this.animateId&&(this.animateId=0);var e=this.fadeLevel,a=t<0?e:10-e;if(a<=0)return void(this.fadeLevel&&this.updateSelection_(1));var i=++this.animateId,n=this,r=function(){0!==n.fadeLevel&&t<0&&(n.fadeLevel=0,n.clearSelection())};x.repeatAndCleanup(function(e){n.animateId==i&&(n.fadeLevel+=t,0===n.fadeLevel?n.clearSelection():n.updateSelection_(n.fadeLevel/10))},a,30,r)},Q.prototype.updateSelection_=function(t){this.cascadeEvents_(\"select\",{selectedRow:-1===this.lastRow_?void 0:this.lastRow_,selectedX:-1===this.lastx_?void 0:this.lastx_,selectedPoints:this.selPoints_});var e,a=this.canvas_ctx_;if(this.getOption(\"highlightSeriesOpts\")){a.clearRect(0,0,this.width_,this.height_);var i=1-this.getNumericOption(\"highlightSeriesBackgroundAlpha\"),n=x.toRGB_(this.getOption(\"highlightSeriesBackgroundColor\"));if(i){if(void 0===t)return void this.animateSelection_(1);i*=t,a.fillStyle=\"rgba(\"+n.r+\",\"+n.g+\",\"+n.b+\",\"+i+\")\",a.fillRect(0,0,this.width_,this.height_)}this.plotter_._renderLineChart(this.highlightSet_,a)}else if(this.previousVerticalX_>=0){var r=0,o=this.attr_(\"labels\");for(e=1;e<o.length;e++){var s=this.getNumericOption(\"highlightCircleSize\",o[e]);s>r&&(r=s)}var l=this.previousVerticalX_;a.clearRect(l-r-1,0,2*r+2,this.height_)}if(this.selPoints_.length>0){var h=this.selPoints_[0].canvasx;for(a.save(),e=0;e<this.selPoints_.length;e++){var u=this.selPoints_[e];if(!isNaN(u.canvasy)){var d=this.getNumericOption(\"highlightCircleSize\",u.name),c=this.getFunctionOption(\"drawHighlightPointCallback\",u.name),p=this.plotter_.colors[u.name];c||(c=x.Circles.DEFAULT),a.lineWidth=this.getNumericOption(\"strokeWidth\",u.name),a.strokeStyle=p,a.fillStyle=p,c.call(this,this,u.name,a,h,u.canvasy,p,d,u.idx)}}a.restore(),this.previousVerticalX_=h}},Q.prototype.setSelection=function(t,e,a){this.selPoints_=[];var i=!1;if(!1!==t&&t>=0){t!=this.lastRow_&&(i=!0),this.lastRow_=t;for(var n=0;n<this.layout_.points.length;++n){var r=this.layout_.points[n],o=t-this.getLeftBoundary_(n);if(o>=0&&o<r.length&&r[o].idx==t){var s=r[o];null!==s.yval&&this.selPoints_.push(s)}else for(var l=0;l<r.length;++l){var s=r[l];if(s.idx==t){null!==s.yval&&this.selPoints_.push(s);break}}}}else this.lastRow_>=0&&(i=!0),this.lastRow_=-1;return this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=-1,void 0!==e&&(this.highlightSet_!==e&&(i=!0),this.highlightSet_=e),void 0!==a&&(this.lockedSet_=a),i&&this.updateSelection_(void 0),i},Q.prototype.mouseOut_=function(t){this.getFunctionOption(\"unhighlightCallback\")&&this.getFunctionOption(\"unhighlightCallback\").call(this,t),this.getBooleanOption(\"hideOverlayOnMouseOut\")&&!this.lockedSet_&&this.clearSelection()},Q.prototype.clearSelection=function(){if(this.cascadeEvents_(\"deselect\",{}),this.lockedSet_=!1,this.fadeLevel)return void this.animateSelection_(-1);this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=-1,this.lastRow_=-1,this.highlightSet_=null},Q.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var t=0;t<this.layout_.points.length;t++)for(var e=this.layout_.points[t],a=0;a<e.length;a++)if(e[a].x==this.selPoints_[0].x)return e[a].idx;return-1},Q.prototype.getHighlightSeries=function(){return this.highlightSet_},Q.prototype.isSeriesLocked=function(){return this.lockedSet_},Q.prototype.loadedEvent_=function(t){this.rawData_=this.parseCSV_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_()},Q.prototype.addXTicks_=function(){var t;t=this.dateWindow_?[this.dateWindow_[0],this.dateWindow_[1]]:this.xAxisExtremes();var e=this.optionsViewForAxis_(\"x\"),a=e(\"ticker\")(t[0],t[1],this.plotter_.area.w,e,this);this.layout_.setXTicks(a)},Q.prototype.getHandlerClass_=function(){return this.attr_(\"dataHandler\")?this.attr_(\"dataHandler\"):this.fractions_?this.getBooleanOption(\"errorBars\")?F.default:M.default:this.getBooleanOption(\"customBars\")?P.default:this.getBooleanOption(\"errorBars\")?T.default:E.default},Q.prototype.predraw_=function(){var t=new Date;this.dataHandler_=new(this.getHandlerClass_()),this.layout_.computePlotArea(),this.computeYAxes_(),this.is_initial_draw_||(this.canvas_ctx_.restore(),this.hidden_ctx_.restore()),this.canvas_ctx_.save(),this.hidden_ctx_.save(),this.plotter_=new d.default(this,this.hidden_,this.hidden_ctx_,this.layout_),this.createRollInterface_(),this.cascadeEvents_(\"predraw\"),this.rolledSeries_=[null];for(var e=1;e<this.numColumns();e++){var a=this.dataHandler_.extractSeries(this.rawData_,e,this.attributes_);this.rollPeriod_>1&&(a=this.dataHandler_.rollingAverage(a,this.rollPeriod_,this.attributes_)),this.rolledSeries_.push(a)}this.drawGraph_();var i=new Date;this.drawingTimeMs_=i-t},Q.PointType=void 0,Q.stackPoints_=function(t,e,a,i){for(var n=null,r=null,o=null,s=-1,l=0;l<t.length;++l){var h=t[l],u=h.xval;void 0===e[u]&&(e[u]=0);var d=h.yval;isNaN(d)||null===d?\"none\"==i?d=0:(!function(e){if(!(s>=e))for(var a=e;a<t.length;++a)if(o=null,!isNaN(t[a].yval)&&null!==t[a].yval){s=a,o=t[a];break}}(l),d=r&&o&&\"none\"!=i?r.yval+(o.yval-r.yval)*((u-r.xval)/(o.xval-r.xval)):r&&\"all\"==i?r.yval:o&&\"all\"==i?o.yval:0):r=h;var c=e[u];n!=u&&(c+=d,e[u]=c),n=u,h.yval_stacked=c,c>a[1]&&(a[1]=c),c<a[0]&&(a[0]=c)}},Q.prototype.gatherDatasets_=function(t,e){var a,i,n,r,o,s,l=[],h=[],u=[],d={},c=t.length-1;for(a=c;a>=1;a--)if(this.visibility()[a-1]){if(e){s=t[a];var p=e[0],g=e[1];for(n=null,r=null,i=0;i<s.length;i++)s[i][0]>=p&&null===n&&(n=i),s[i][0]<=g&&(r=i);null===n&&(n=0);for(var f=n,v=!0;v&&f>0;)f--,v=null===s[f][1];null===r&&(r=s.length-1);var _=r;for(v=!0;v&&_<s.length-1;)_++,v=null===s[_][1];f!==n&&(n=f),_!==r&&(r=_),l[a-1]=[n,r],s=s.slice(n,r+1)}else s=t[a],l[a-1]=[0,s.length-1];var y=this.attr_(\"labels\")[a],x=this.dataHandler_.getExtremeYValues(s,e,this.getBooleanOption(\"stepPlot\",y)),m=this.dataHandler_.seriesToPoints(s,y,l[a-1][0]);this.getBooleanOption(\"stackedGraph\")&&(o=this.attributes_.axisForSeries(y),void 0===u[o]&&(u[o]=[]),Q.stackPoints_(m,u[o],x,this.getBooleanOption(\"stackedGraphNaNFill\"))),d[y]=x,h[a]=m}return{points:h,extremes:d,boundaryIds:l}},Q.prototype.drawGraph_=function(){var t=new Date,e=this.is_initial_draw_;this.is_initial_draw_=!1,this.layout_.removeAllDatasets(),this.setColors_(),this.attrs_.pointSize=.5*this.getNumericOption(\"highlightCircleSize\");var a=this.gatherDatasets_(this.rolledSeries_,this.dateWindow_),i=a.points,n=a.extremes;this.boundaryIds_=a.boundaryIds,this.setIndexByName_={};for(var r=this.attr_(\"labels\"),o=0,s=1;s<i.length;s++)this.visibility()[s-1]&&(this.layout_.addDataset(r[s],i[s]),this.datasetIndex_[s]=o++);for(var s=0;s<r.length;s++)this.setIndexByName_[r[s]]=s;if(this.computeYAxisRanges_(n),this.layout_.setYAxes(this.axes_),this.addXTicks_(),this.layout_.evaluate(),this.renderGraph_(e),this.getStringOption(\"timingName\")){var l=new Date;console.log(this.getStringOption(\"timingName\")+\" - drawGraph: \"+(l-t)+\"ms\")}},Q.prototype.renderGraph_=function(t){this.cascadeEvents_(\"clearChart\"),this.plotter_.clear();var e=this.getFunctionOption(\"underlayCallback\");e&&e.call(this,this.hidden_ctx_,this.layout_.getPlotArea(),this,this);var a={canvas:this.hidden_,drawingContext:this.hidden_ctx_};this.cascadeEvents_(\"willDrawChart\",a),this.plotter_.render(),this.cascadeEvents_(\"didDrawChart\",a),this.lastRow_=-1,this.canvas_.getContext(\"2d\").clearRect(0,0,this.width_,this.height_);var i=this.getFunctionOption(\"drawCallback\");if(null!==i&&i.call(this,this,t),t)for(this.readyFired_=!0;this.readyFns_.length>0;){var n=this.readyFns_.pop();n(this)}},Q.prototype.computeYAxes_=function(){var t,e,a;for(this.axes_=[],t=0;t<this.attributes_.numAxes();t++)e={g:this},x.update(e,this.attributes_.axisOptions(t)),this.axes_[t]=e;for(t=0;t<this.axes_.length;t++)if(0===t)e=this.optionsViewForAxis_(\"y\"+(t?\"2\":\"\")),(a=e(\"valueRange\"))&&(this.axes_[t].valueRange=a);else{var i=this.user_attrs_.axes;i&&i.y2&&(a=i.y2.valueRange)&&(this.axes_[t].valueRange=a)}},Q.prototype.numAxes=function(){return this.attributes_.numAxes()},Q.prototype.axisPropertiesForSeries=function(t){return this.axes_[this.attributes_.axisForSeries(t)]},Q.prototype.computeYAxisRanges_=function(t){for(var e,a,i,n,r,o=function(t){return isNaN(parseFloat(t))},s=this.attributes_.numAxes(),l=0;l<s;l++){var h=this.axes_[l],u=this.attributes_.getForAxis(\"logscale\",l),d=this.attributes_.getForAxis(\"includeZero\",l),c=this.attributes_.getForAxis(\"independentTicks\",l);i=this.attributes_.seriesForAxis(l),e=!0,n=.1;var p=this.getNumericOption(\"yRangePad\");if(null!==p&&(e=!1,n=p/this.plotter_.area.h),0===i.length)h.extremeRange=[0,1];else{for(var g,f,v=1/0,_=-1/0,y=0;y<i.length;y++)t.hasOwnProperty(i[y])&&(g=t[i[y]][0],null!==g&&(v=Math.min(g,v)),null!==(f=t[i[y]][1])&&(_=Math.max(f,_)));d&&!u&&(v>0&&(v=0),_<0&&(_=0)),v==1/0&&(v=0),_==-1/0&&(_=1),a=_-v,0===a&&(0!==_?a=Math.abs(_):(_=1,a=1));var m=_,b=v;e&&(u?(m=_+n*a,b=v):(m=_+n*a,b=v-n*a,b<0&&v>=0&&(b=0),m>0&&_<=0&&(m=0))),h.extremeRange=[b,m]}if(h.valueRange){var w=o(h.valueRange[0])?h.extremeRange[0]:h.valueRange[0],A=o(h.valueRange[1])?h.extremeRange[1]:h.valueRange[1];h.computedValueRange=[w,A]}else h.computedValueRange=h.extremeRange;if(!e)if(w=h.computedValueRange[0],A=h.computedValueRange[1],w===A&&(w-=.5,A+=.5),u){var O=n/(2*n-1),D=(n-1)/(2*n-1);h.computedValueRange[0]=x.logRangeFraction(w,A,O),h.computedValueRange[1]=x.logRangeFraction(w,A,D)}else a=A-w,h.computedValueRange[0]=w-a*n,h.computedValueRange[1]=A+a*n;if(c){h.independentTicks=c;var E=this.optionsViewForAxis_(\"y\"+(l?\"2\":\"\")),L=E(\"ticker\");h.ticks=L(h.computedValueRange[0],h.computedValueRange[1],this.plotter_.area.h,E,this),r||(r=h)}}if(void 0===r)throw'Configuration Error: At least one axis has to have the \"independentTicks\" option activated.';for(var l=0;l<s;l++){var h=this.axes_[l];if(!h.independentTicks){for(var E=this.optionsViewForAxis_(\"y\"+(l?\"2\":\"\")),L=E(\"ticker\"),T=r.ticks,S=r.computedValueRange[1]-r.computedValueRange[0],P=h.computedValueRange[1]-h.computedValueRange[0],C=[],M=0;M<T.length;M++){var N=(T[M].v-r.computedValueRange[0])/S,F=h.computedValueRange[0]+N*P;C.push(F)}h.ticks=L(h.computedValueRange[0],h.computedValueRange[1],this.plotter_.area.h,E,this,C)}}},Q.prototype.detectTypeFromString_=function(t){var e=!1,a=t.indexOf(\"-\");a>0&&\"e\"!=t[a-1]&&\"E\"!=t[a-1]||t.indexOf(\"/\")>=0||isNaN(parseFloat(t))?e=!0:8==t.length&&t>\"19700101\"&&t<\"20371231\"&&(e=!0),this.setXAxisOptions_(e)},Q.prototype.setXAxisOptions_=function(t){t?(this.attrs_.xValueParser=x.dateParser,this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(t){return parseFloat(t)},this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},Q.prototype.parseCSV_=function(t){var e,a,i=[],n=x.detectLineDelimiter(t),r=t.split(n||\"\\n\"),o=this.getStringOption(\"delimiter\");-1==r[0].indexOf(o)&&r[0].indexOf(\"\\t\")>=0&&(o=\"\\t\");var s=0;\"labels\"in this.user_attrs_||(s=1,this.attrs_.labels=r[0].split(o),this.attributes_.reparseSeries());for(var l,h=!1,u=this.attr_(\"labels\").length,d=!1,c=s;c<r.length;c++){var p=r[c];if(c,0!==p.length&&\"#\"!=p[0]){var g=p.split(o);if(!(g.length<2)){var f=[];if(h||(this.detectTypeFromString_(g[0]),l=this.getFunctionOption(\"xValueParser\"),h=!0),f[0]=l(g[0],this),this.fractions_)for(a=1;a<g.length;a++)e=g[a].split(\"/\"),2!=e.length?(console.error('Expected fractional \"num/den\" values in CSV data but found a value \\''+g[a]+\"' on line \"+(1+c)+\" ('\"+p+\"') which is not of this form.\"),f[a]=[0,0]):f[a]=[x.parseFloat_(e[0],c,p),x.parseFloat_(e[1],c,p)];else if(this.getBooleanOption(\"errorBars\"))for(g.length%2!=1&&console.error(\"Expected alternating (value, stdev.) pairs in CSV data but line \"+(1+c)+\" has an odd number of values (\"+(g.length-1)+\"): '\"+p+\"'\"),a=1;a<g.length;a+=2)f[(a+1)/2]=[x.parseFloat_(g[a],c,p),x.parseFloat_(g[a+1],c,p)];else if(this.getBooleanOption(\"customBars\"))for(a=1;a<g.length;a++){var v=g[a];/^ *$/.test(v)?f[a]=[null,null,null]:(e=v.split(\";\"),3==e.length?f[a]=[x.parseFloat_(e[0],c,p),x.parseFloat_(e[1],c,p),x.parseFloat_(e[2],c,p)]:console.warn('When using customBars, values must be either blank or \"low;center;high\" tuples (got \"'+v+'\" on line '+(1+c)))}else for(a=1;a<g.length;a++)f[a]=x.parseFloat_(g[a],c,p);if(i.length>0&&f[0]<i[i.length-1][0]&&(d=!0),f.length!=u&&console.error(\"Number of columns in line \"+c+\" (\"+f.length+\") does not agree with number of labels (\"+u+\") \"+p),0===c&&this.attr_(\"labels\")){var _=!0;for(a=0;_&&a<f.length;a++)f[a]&&(_=!1);if(_){console.warn(\"The dygraphs 'labels' option is set, but the first row of CSV data ('\"+p+\"') appears to also contain labels. Will drop the CSV labels and use the option labels.\");continue}}i.push(f)}}}return d&&(console.warn(\"CSV is out of order; order it correctly to speed loading.\"),i.sort(function(t,e){return t[0]-e[0]})),i},Q.prototype.parseArray_=function(t){if(0===t.length)return console.error(\"Can't plot empty data set\"),null;if(0===t[0].length)return console.error(\"Data set cannot contain an empty row\"),null;o(t);var e;if(null===this.attr_(\"labels\")){for(console.warn(\"Using default labels. Set labels explicitly via 'labels' in the options parameter\"),this.attrs_.labels=[\"X\"],e=1;e<t[0].length;e++)this.attrs_.labels.push(\"Y\"+e);this.attributes_.reparseSeries()}else{var a=this.attr_(\"labels\");if(a.length!=t[0].length)return console.error(\"Mismatch between number of labels (\"+a+\") and number of columns in array (\"+t[0].length+\")\"),null}if(x.isDateLike(t[0][0])){this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter;var i=x.clone(t);for(e=0;e<t.length;e++){if(0===i[e].length)return console.error(\"Row \"+(1+e)+\" of data is empty\"),null;if(null===i[e][0]||\"function\"!=typeof i[e][0].getTime||isNaN(i[e][0].getTime()))return console.error(\"x value in row \"+(1+e)+\" is not a Date\"),null;i[e][0]=i[e][0].getTime()}return i}return this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=x.numberAxisLabelFormatter,t},Q.prototype.parseDataTable_=function(t){var e=t.getNumberOfColumns(),a=t.getNumberOfRows(),i=t.getColumnType(0);if(\"date\"==i||\"datetime\"==i)this.attrs_.xValueParser=x.dateParser,this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter;else{if(\"number\"!=i)throw new Error(\"only 'date', 'datetime' and 'number' types are supported for column 1 of DataTable input (Got '\"+i+\"')\");this.attrs_.xValueParser=function(t){return parseFloat(t)},this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter}var n,r,o=[],s={},l=!1;for(n=1;n<e;n++){var h=t.getColumnType(n);if(\"number\"==h)o.push(n);else{if(\"string\"!=h||!this.getBooleanOption(\"displayAnnotations\"))throw new Error(\"Only 'number' is supported as a dependent type with Gviz. 'string' is only supported if displayAnnotations is true\");var u=o[o.length-1];s.hasOwnProperty(u)?s[u].push(n):s[u]=[n],l=!0}}var d=[t.getColumnLabel(0)];for(n=0;n<o.length;n++)d.push(t.getColumnLabel(o[n])),this.getBooleanOption(\"errorBars\")&&(n+=1);this.attrs_.labels=d,e=d.length;var c=[],p=!1,g=[];for(n=0;n<a;n++){var f=[];if(void 0!==t.getValue(n,0)&&null!==t.getValue(n,0)){if(\"date\"==i||\"datetime\"==i?f.push(t.getValue(n,0).getTime()):f.push(t.getValue(n,0)),this.getBooleanOption(\"errorBars\"))for(r=0;r<e-1;r++)f.push([t.getValue(n,1+2*r),t.getValue(n,2+2*r)]);else{for(r=0;r<o.length;r++){var v=o[r];if(f.push(t.getValue(n,v)),l&&s.hasOwnProperty(v)&&null!==t.getValue(n,s[v][0])){var y={};y.series=t.getColumnLabel(v),y.xval=f[0],y.shortText=function(t){var e=String.fromCharCode(65+t%26);for(t=Math.floor(t/26);t>0;)e=String.fromCharCode(65+(t-1)%26)+e.toLowerCase(),t=Math.floor((t-1)/26);return e}(g.length),y.text=\"\";for(var m=0;m<s[v].length;m++)m&&(y.text+=\"\\n\"),y.text+=t.getValue(n,s[v][m]);g.push(y)}}for(r=0;r<f.length;r++)isFinite(f[r])||(f[r]=null)}c.length>0&&f[0]<c[c.length-1][0]&&(p=!0),c.push(f)}else console.warn(\"Ignoring row \"+n+\" of DataTable because of undefined or null first column.\")}p&&(console.warn(\"DataTable is out of order; order it correctly to speed loading.\"),c.sort(function(t,e){return t[0]-e[0]})),this.rawData_=c,g.length>0&&this.setAnnotations(g,!0),this.attributes_.reparseSeries()},Q.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_(\"dataDidUpdate\",{})},Q.prototype.start_=function(){var t=this.file_;if(\"function\"==typeof t&&(t=t()),x.isArrayLike(t))this.rawData_=this.parseArray_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if(\"object\"==typeof t&&\"function\"==typeof t.getColumnRange)this.parseDataTable_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if(\"string\"==typeof t){var e=x.detectLineDelimiter(t);if(e)this.loadedEvent_(t);else{var a;a=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject(\"Microsoft.XMLHTTP\");var i=this;a.onreadystatechange=function(){4==a.readyState&&(200!==a.status&&0!==a.status||i.loadedEvent_(a.responseText))},a.open(\"GET\",t,!0),a.send(null)}}else console.error(\"Unknown data format: \"+typeof t)},Q.prototype.updateOptions=function(t,e){void 0===e&&(e=!1);var a=t.file,i=Q.copyUserAttrs_(t);\"rollPeriod\"in i&&(this.rollPeriod_=i.rollPeriod),\"dateWindow\"in i&&(this.dateWindow_=i.dateWindow);var n=x.isPixelChangingOptionList(this.attr_(\"labels\"),i);x.updateDeep(this.user_attrs_,i),this.attributes_.reparseSeries(),a?(this.cascadeEvents_(\"dataWillUpdate\",{}),this.file_=a,e||this.start_()):e||(n?this.predraw_():this.renderGraph_(!1))},Q.copyUserAttrs_=function(t){var e={};for(var a in t)t.hasOwnProperty(a)&&\"file\"!=a&&t.hasOwnProperty(a)&&(e[a]=t[a]);return e},Q.prototype.resize=function(t,e){if(!this.resize_lock){this.resize_lock=!0,null===t!=(null===e)&&(console.warn(\"Dygraph.resize() should be called with zero parameters or two non-NULL parameters. Pretending it was zero.\"),t=e=null);var a=this.width_,i=this.height_;t?(this.maindiv_.style.width=t+\"px\",this.maindiv_.style.height=e+\"px\",this.width_=t,this.height_=e):(this.width_=this.maindiv_.clientWidth,this.height_=this.maindiv_.clientHeight),a==this.width_&&i==this.height_||(this.resizeElements_(),this.predraw_()),this.resize_lock=!1}},Q.prototype.adjustRoll=function(t){this.rollPeriod_=t,this.predraw_()},Q.prototype.visibility=function(){for(this.getOption(\"visibility\")||(this.attrs_.visibility=[]);this.getOption(\"visibility\").length<this.numColumns()-1;)this.attrs_.visibility.push(!0);return this.getOption(\"visibility\")},Q.prototype.setVisibility=function(t,e){var a=this.visibility(),i=!1;if(Array.isArray(t)||(null!==t&&\"object\"==typeof t?i=!0:t=[t]),i)for(var n in t)t.hasOwnProperty(n)&&(n<0||n>=a.length?console.warn(\"Invalid series number in setVisibility: \"+n):a[n]=t[n]);else for(var n=0;n<t.length;n++)\"boolean\"==typeof t[n]?n>=a.length?console.warn(\"Invalid series number in setVisibility: \"+n):a[n]=t[n]:t[n]<0||t[n]>=a.length?console.warn(\"Invalid series number in setVisibility: \"+t[n]):a[t[n]]=e;this.predraw_()},Q.prototype.size=function(){return{width:this.width_,height:this.height_}},Q.prototype.setAnnotations=function(t,e){if(this.annotations_=t,!this.layout_)return void console.warn(\"Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html\");this.layout_.setAnnotations(this.annotations_),e||this.predraw_()},Q.prototype.annotations=function(){return this.annotations_},Q.prototype.getLabels=function(){var t=this.attr_(\"labels\");return t?t.slice():null},Q.prototype.indexFromSetName=function(t){return this.setIndexByName_[t]},Q.prototype.getRowForX=function(t){for(var e=0,a=this.numRows()-1;e<=a;){var i=a+e>>1,n=this.getValue(i,0);if(n<t)e=i+1;else if(n>t)a=i-1;else{if(e==i)return i;a=i}}return null},Q.prototype.ready=function(t){this.is_initial_draw_?this.readyFns_.push(t):t.call(this,this)},Q.prototype.addAndTrackEvent=function(t,e,a){x.addEvent(t,e,a),this.registeredEvents_.push({elem:t,type:e,fn:a})},Q.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var t=0;t<this.registeredEvents_.length;t++){var e=this.registeredEvents_[t];x.removeEvent(e.elem,e.type,e.fn)}this.registeredEvents_=[]},Q.PLUGINS=[U.default,X.default,j.default,Z.default,H.default,G.default],Q.GVizChart=q.default,Q.DASHED_LINE=x.DASHED_LINE,Q.DOT_DASH_LINE=x.DOT_DASH_LINE,Q.dateAxisLabelFormatter=x.dateAxisLabelFormatter,Q.toRGB_=x.toRGB_,Q.findPos=x.findPos,Q.pageX=x.pageX,Q.pageY=x.pageY,Q.dateString_=x.dateString_,Q.defaultInteractionModel=f.default.defaultModel,Q.nonInteractiveModel=Q.nonInteractiveModel_=f.default.nonInteractiveModel_,Q.Circles=x.Circles,Q.Plugins={Legend:U.default,Axes:X.default,Annotations:H.default,ChartLabels:Z.default,Grid:G.default,RangeSelector:j.default},Q.DataHandlers={DefaultHandler:E.default,BarsHandler:R.default,CustomBarsHandler:P.default,DefaultFractionHandler:M.default,ErrorBarsHandler:T.default,FractionsBarsHandler:F.default},Q.startPan=f.default.startPan,Q.startZoom=f.default.startZoom,Q.movePan=f.default.movePan,Q.moveZoom=f.default.moveZoom,Q.endPan=f.default.endPan,Q.endZoom=f.default.endZoom,Q.numericLinearTicks=_.numericLinearTicks,Q.numericTicks=_.numericTicks,Q.dateTicker=_.dateTicker,Q.Granularity=_.Granularity,Q.getDateAxis=_.getDateAxis,Q.floatFormat=x.floatFormat,a.default=Q,e.exports=a.default}).call(this,t(\"_process\"))},{\"./datahandler/bars\":5,\"./datahandler/bars-custom\":2,\"./datahandler/bars-error\":3,\"./datahandler/bars-fractions\":4,\"./datahandler/default\":8,\"./datahandler/default-fractions\":7,\"./dygraph-canvas\":9,\"./dygraph-default-attrs\":10,\"./dygraph-gviz\":11,\"./dygraph-interaction-model\":12,\"./dygraph-layout\":13,\"./dygraph-options\":15,\"./dygraph-options-reference\":14,\"./dygraph-tickers\":16,\"./dygraph-utils\":17,\"./iframe-tarp\":19,\"./plugins/annotations\":20,\"./plugins/axes\":21,\"./plugins/chart-labels\":22,\"./plugins/grid\":23,\"./plugins/legend\":24,\"./plugins/range-selector\":25,_process:1}],19:[function(t,e,a){\"use strict\";function i(){this.tarps=[]}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"./dygraph-utils\"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n);i.prototype.cover=function(){for(var t=document.getElementsByTagName(\"iframe\"),e=0;e<t.length;e++){var a=t[e],i=r.findPos(a),n=i.x,o=i.y,s=a.offsetWidth,l=a.offsetHeight,h=document.createElement(\"div\");h.style.position=\"absolute\",h.style.left=n+\"px\",h.style.top=o+\"px\",h.style.width=s+\"px\",h.style.height=l+\"px\",h.style.zIndex=999,document.body.appendChild(h),this.tarps.push(h)}},i.prototype.uncover=function(){for(var t=0;t<this.tarps.length;t++)this.tarps[t].parentNode.removeChild(this.tarps[t]);this.tarps=[]},a.default=i,e.exports=a.default},{\"./dygraph-utils\":17}],20:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=function(){this.annotations_=[]};i.prototype.toString=function(){return\"Annotations Plugin\"},i.prototype.activate=function(t){return{clearChart:this.clearChart,didDrawChart:this.didDrawChart}},i.prototype.detachLabels=function(){for(var t=0;t<this.annotations_.length;t++){var e=this.annotations_[t];e.parentNode&&e.parentNode.removeChild(e),this.annotations_[t]=null}this.annotations_=[]},i.prototype.clearChart=function(t){this.detachLabels()},i.prototype.didDrawChart=function(t){var e=t.dygraph,a=e.layout_.annotated_points;if(a&&0!==a.length)for(var i=t.canvas.parentNode,n=function(t,a,i){return function(n){var r=i.annotation;r.hasOwnProperty(t)?r[t](r,i,e,n):e.getOption(a)&&e.getOption(a)(r,i,e,n)}},r=t.dygraph.getArea(),o={},s=0;s<a.length;s++){var l=a[s];if(!(l.canvasx<r.x||l.canvasx>r.x+r.w||l.canvasy<r.y||l.canvasy>r.y+r.h)){var h=l.annotation,u=6;h.hasOwnProperty(\"tickHeight\")&&(u=h.tickHeight);var d=document.createElement(\"div\");d.style.fontSize=e.getOption(\"axisLabelFontSize\")+\"px\"\n;var c=\"dygraph-annotation\";h.hasOwnProperty(\"icon\")||(c+=\" dygraphDefaultAnnotation dygraph-default-annotation\"),h.hasOwnProperty(\"cssClass\")&&(c+=\" \"+h.cssClass),d.className=c;var p=h.hasOwnProperty(\"width\")?h.width:16,g=h.hasOwnProperty(\"height\")?h.height:16;if(h.hasOwnProperty(\"icon\")){var f=document.createElement(\"img\");f.src=h.icon,f.width=p,f.height=g,d.appendChild(f)}else l.annotation.hasOwnProperty(\"shortText\")&&d.appendChild(document.createTextNode(l.annotation.shortText));var v=l.canvasx-p/2;d.style.left=v+\"px\";var _=0;if(h.attachAtBottom){var y=r.y+r.h-g-u;o[v]?y-=o[v]:o[v]=0,o[v]+=u+g,_=y}else _=l.canvasy-g-u;d.style.top=_+\"px\",d.style.width=p+\"px\",d.style.height=g+\"px\",d.title=l.annotation.text,d.style.color=e.colorsMap_[l.name],d.style.borderColor=e.colorsMap_[l.name],h.div=d,e.addAndTrackEvent(d,\"click\",n(\"clickHandler\",\"annotationClickHandler\",l)),e.addAndTrackEvent(d,\"mouseover\",n(\"mouseOverHandler\",\"annotationMouseOverHandler\",l)),e.addAndTrackEvent(d,\"mouseout\",n(\"mouseOutHandler\",\"annotationMouseOutHandler\",l)),e.addAndTrackEvent(d,\"dblclick\",n(\"dblClickHandler\",\"annotationDblClickHandler\",l)),i.appendChild(d),this.annotations_.push(d);var x=t.drawingContext;if(x.save(),x.strokeStyle=h.hasOwnProperty(\"tickColor\")?h.tickColor:e.colorsMap_[l.name],x.lineWidth=h.hasOwnProperty(\"tickWidth\")?h.tickWidth:e.getOption(\"strokeWidth\"),x.beginPath(),h.attachAtBottom){var y=_+g;x.moveTo(l.canvasx,y),x.lineTo(l.canvasx,y+u)}else x.moveTo(l.canvasx,l.canvasy),x.lineTo(l.canvasx,l.canvasy-2-u);x.closePath(),x.stroke(),x.restore()}}},i.prototype.destroy=function(){this.detachLabels()},a.default=i,e.exports=a.default},{}],21:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"../dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(){this.xlabels_=[],this.ylabels_=[]};r.prototype.toString=function(){return\"Axes Plugin\"},r.prototype.activate=function(t){return{layout:this.layout,clearChart:this.clearChart,willDrawChart:this.willDrawChart}},r.prototype.layout=function(t){var e=t.dygraph;if(e.getOptionForAxis(\"drawAxis\",\"y\")){var a=e.getOptionForAxis(\"axisLabelWidth\",\"y\")+2*e.getOptionForAxis(\"axisTickSize\",\"y\");t.reserveSpaceLeft(a)}if(e.getOptionForAxis(\"drawAxis\",\"x\")){var i;i=e.getOption(\"xAxisHeight\")?e.getOption(\"xAxisHeight\"):e.getOptionForAxis(\"axisLabelFontSize\",\"x\")+2*e.getOptionForAxis(\"axisTickSize\",\"x\"),t.reserveSpaceBottom(i)}if(2==e.numAxes()){if(e.getOptionForAxis(\"drawAxis\",\"y2\")){var a=e.getOptionForAxis(\"axisLabelWidth\",\"y2\")+2*e.getOptionForAxis(\"axisTickSize\",\"y2\");t.reserveSpaceRight(a)}}else e.numAxes()>2&&e.error(\"Only two y-axes are supported at this time. (Trying to use \"+e.numAxes()+\")\")},r.prototype.detachLabels=function(){function t(t){for(var e=0;e<t.length;e++){var a=t[e];a.parentNode&&a.parentNode.removeChild(a)}}t(this.xlabels_),t(this.ylabels_),this.xlabels_=[],this.ylabels_=[]},r.prototype.clearChart=function(t){this.detachLabels()},r.prototype.willDrawChart=function(t){function e(t){return Math.round(t)+.5}function a(t){return Math.round(t)-.5}var i=this,r=t.dygraph;if(r.getOptionForAxis(\"drawAxis\",\"x\")||r.getOptionForAxis(\"drawAxis\",\"y\")||r.getOptionForAxis(\"drawAxis\",\"y2\")){var o,s,l,h=t.drawingContext,u=t.canvas.parentNode,d=r.width_,c=r.height_,p=function(t){return{position:\"absolute\",fontSize:r.getOptionForAxis(\"axisLabelFontSize\",t)+\"px\",width:r.getOptionForAxis(\"axisLabelWidth\",t)+\"px\"}},g={x:p(\"x\"),y:p(\"y\"),y2:p(\"y2\")},f=function(t,e,a){var i=document.createElement(\"div\"),r=g[\"y2\"==a?\"y2\":e];n.update(i.style,r);var o=document.createElement(\"div\");return o.className=\"dygraph-axis-label dygraph-axis-label-\"+e+(a?\" dygraph-axis-label-\"+a:\"\"),o.innerHTML=t,i.appendChild(o),i};h.save();var v=r.layout_,_=t.dygraph.plotter_.area,y=function(t){return function(e){return r.getOptionForAxis(e,t)}};if(r.getOptionForAxis(\"drawAxis\",\"y\")){if(v.yticks&&v.yticks.length>0){var x=r.numAxes(),m=[y(\"y\"),y(\"y2\")];v.yticks.forEach(function(t){if(void 0!==t.label){s=_.x;var e=\"y1\",a=m[0];1==t.axis&&(s=_.x+_.w,-1,e=\"y2\",a=m[1]);var n=a(\"axisLabelFontSize\");l=_.y+t.pos*_.h,o=f(t.label,\"y\",2==x?e:null);var r=l-n/2;r<0&&(r=0),r+n+3>c?o.style.bottom=\"0\":o.style.top=r+\"px\",0===t.axis?(o.style.left=_.x-a(\"axisLabelWidth\")-a(\"axisTickSize\")+\"px\",o.style.textAlign=\"right\"):1==t.axis&&(o.style.left=_.x+_.w+a(\"axisTickSize\")+\"px\",o.style.textAlign=\"left\"),o.style.width=a(\"axisLabelWidth\")+\"px\",u.appendChild(o),i.ylabels_.push(o)}});var b=this.ylabels_[0],w=r.getOptionForAxis(\"axisLabelFontSize\",\"y\");parseInt(b.style.top,10)+w>c-w&&(b.style.top=parseInt(b.style.top,10)-w/2+\"px\")}var A;if(r.getOption(\"drawAxesAtZero\")){var O=r.toPercentXCoord(0);(O>1||O<0||isNaN(O))&&(O=0),A=e(_.x+O*_.w)}else A=e(_.x);h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"y\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"y\"),h.beginPath(),h.moveTo(A,a(_.y)),h.lineTo(A,a(_.y+_.h)),h.closePath(),h.stroke(),2==r.numAxes()&&(h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"y2\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"y2\"),h.beginPath(),h.moveTo(a(_.x+_.w),a(_.y)),h.lineTo(a(_.x+_.w),a(_.y+_.h)),h.closePath(),h.stroke())}if(r.getOptionForAxis(\"drawAxis\",\"x\")){if(v.xticks){var D=y(\"x\");v.xticks.forEach(function(t){if(void 0!==t.label){s=_.x+t.pos*_.w,l=_.y+_.h,o=f(t.label,\"x\"),o.style.textAlign=\"center\",o.style.top=l+D(\"axisTickSize\")+\"px\";var e=s-D(\"axisLabelWidth\")/2;e+D(\"axisLabelWidth\")>d&&(e=d-D(\"axisLabelWidth\"),o.style.textAlign=\"right\"),e<0&&(e=0,o.style.textAlign=\"left\"),o.style.left=e+\"px\",o.style.width=D(\"axisLabelWidth\")+\"px\",u.appendChild(o),i.xlabels_.push(o)}})}h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"x\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"x\"),h.beginPath();var E;if(r.getOption(\"drawAxesAtZero\")){var O=r.toPercentYCoord(0,0);(O>1||O<0)&&(O=1),E=a(_.y+O*_.h)}else E=a(_.y+_.h);h.moveTo(e(_.x),E),h.lineTo(e(_.x+_.w),E),h.closePath(),h.stroke()}h.restore()}},a.default=r,e.exports=a.default},{\"../dygraph-utils\":17}],22:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=function(){this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};i.prototype.toString=function(){return\"ChartLabels Plugin\"},i.prototype.activate=function(t){return{layout:this.layout,didDrawChart:this.didDrawChart}};var n=function(t){var e=document.createElement(\"div\");return e.style.position=\"absolute\",e.style.left=t.x+\"px\",e.style.top=t.y+\"px\",e.style.width=t.w+\"px\",e.style.height=t.h+\"px\",e};i.prototype.detachLabels_=function(){for(var t=[this.title_div_,this.xlabel_div_,this.ylabel_div_,this.y2label_div_],e=0;e<t.length;e++){var a=t[e];a&&(a.parentNode&&a.parentNode.removeChild(a))}this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};var r=function(t,e,a,i,n){var r=document.createElement(\"div\");r.style.position=\"absolute\",r.style.left=1==a?\"0px\":e.x+\"px\",r.style.top=e.y+\"px\",r.style.width=e.w+\"px\",r.style.height=e.h+\"px\",r.style.fontSize=t.getOption(\"yLabelWidth\")-2+\"px\";var o=document.createElement(\"div\");o.style.position=\"absolute\",o.style.width=e.h+\"px\",o.style.height=e.w+\"px\",o.style.top=e.h/2-e.w/2+\"px\",o.style.left=e.w/2-e.h/2+\"px\",o.className=\"dygraph-label-rotate-\"+(1==a?\"right\":\"left\");var s=document.createElement(\"div\");return s.className=i,s.innerHTML=n,o.appendChild(s),r.appendChild(o),r};i.prototype.layout=function(t){this.detachLabels_();var e=t.dygraph,a=t.chart_div;if(e.getOption(\"title\")){var i=t.reserveSpaceTop(e.getOption(\"titleHeight\"));this.title_div_=n(i),this.title_div_.style.fontSize=e.getOption(\"titleHeight\")-8+\"px\";var o=document.createElement(\"div\");o.className=\"dygraph-label dygraph-title\",o.innerHTML=e.getOption(\"title\"),this.title_div_.appendChild(o),a.appendChild(this.title_div_)}if(e.getOption(\"xlabel\")){var s=t.reserveSpaceBottom(e.getOption(\"xLabelHeight\"));this.xlabel_div_=n(s),this.xlabel_div_.style.fontSize=e.getOption(\"xLabelHeight\")-2+\"px\";var o=document.createElement(\"div\");o.className=\"dygraph-label dygraph-xlabel\",o.innerHTML=e.getOption(\"xlabel\"),this.xlabel_div_.appendChild(o),a.appendChild(this.xlabel_div_)}if(e.getOption(\"ylabel\")){var l=t.reserveSpaceLeft(0);this.ylabel_div_=r(e,l,1,\"dygraph-label dygraph-ylabel\",e.getOption(\"ylabel\")),a.appendChild(this.ylabel_div_)}if(e.getOption(\"y2label\")&&2==e.numAxes()){var h=t.reserveSpaceRight(0);this.y2label_div_=r(e,h,2,\"dygraph-label dygraph-y2label\",e.getOption(\"y2label\")),a.appendChild(this.y2label_div_)}},i.prototype.didDrawChart=function(t){var e=t.dygraph;this.title_div_&&(this.title_div_.children[0].innerHTML=e.getOption(\"title\")),this.xlabel_div_&&(this.xlabel_div_.children[0].innerHTML=e.getOption(\"xlabel\")),this.ylabel_div_&&(this.ylabel_div_.children[0].children[0].innerHTML=e.getOption(\"ylabel\")),this.y2label_div_&&(this.y2label_div_.children[0].children[0].innerHTML=e.getOption(\"y2label\"))},i.prototype.clearChart=function(){},i.prototype.destroy=function(){this.detachLabels_()},a.default=i,e.exports=a.default},{}],23:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=function(){};i.prototype.toString=function(){return\"Gridline Plugin\"},i.prototype.activate=function(t){return{willDrawChart:this.willDrawChart}},i.prototype.willDrawChart=function(t){function e(t){return Math.round(t)+.5}function a(t){return Math.round(t)-.5}var i,n,r,o,s=t.dygraph,l=t.drawingContext,h=s.layout_,u=t.dygraph.plotter_.area;if(s.getOptionForAxis(\"drawGrid\",\"y\")){for(var d=[\"y\",\"y2\"],c=[],p=[],g=[],f=[],v=[],r=0;r<d.length;r++)g[r]=s.getOptionForAxis(\"drawGrid\",d[r]),g[r]&&(c[r]=s.getOptionForAxis(\"gridLineColor\",d[r]),p[r]=s.getOptionForAxis(\"gridLineWidth\",d[r]),v[r]=s.getOptionForAxis(\"gridLinePattern\",d[r]),f[r]=v[r]&&v[r].length>=2);o=h.yticks,l.save(),o.forEach(function(t){if(t.has_tick){var r=t.axis;g[r]&&(l.save(),f[r]&&l.setLineDash&&l.setLineDash(v[r]),l.strokeStyle=c[r],l.lineWidth=p[r],i=e(u.x),n=a(u.y+t.pos*u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i+u.w,n),l.stroke(),l.restore())}}),l.restore()}if(s.getOptionForAxis(\"drawGrid\",\"x\")){o=h.xticks,l.save();var v=s.getOptionForAxis(\"gridLinePattern\",\"x\"),f=v&&v.length>=2;f&&l.setLineDash&&l.setLineDash(v),l.strokeStyle=s.getOptionForAxis(\"gridLineColor\",\"x\"),l.lineWidth=s.getOptionForAxis(\"gridLineWidth\",\"x\"),o.forEach(function(t){t.has_tick&&(i=e(u.x+t.pos*u.w),n=a(u.y+u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i,u.y),l.closePath(),l.stroke())}),f&&l.setLineDash&&l.setLineDash([]),l.restore()}},i.prototype.destroy=function(){},a.default=i,e.exports=a.default},{}],24:[function(t,e,a){\"use strict\";function i(t,e,a){if(!t||t.length<=1)return'<div class=\"dygraph-legend-line\" style=\"border-bottom-color: '+e+';\"></div>';var i,n,r,o,s,l=0,h=0,u=[];for(i=0;i<=t.length;i++)l+=t[i%t.length];if((s=Math.floor(a/(l-t[0])))>1){for(i=0;i<t.length;i++)u[i]=t[i]/a;h=u.length}else{for(s=1,i=0;i<t.length;i++)u[i]=t[i]/l;h=u.length+1}var d=\"\";for(n=0;n<s;n++)for(i=0;i<h;i+=2)r=u[i%u.length],o=i<t.length?u[(i+1)%u.length]:0,d+='<div class=\"dygraph-legend-dash\" style=\"margin-right: '+o+\"em; padding-left: \"+r+'em;\"></div>';return d}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"../dygraph-utils\"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=function(){this.legend_div_=null,this.is_generated_div_=!1};o.prototype.toString=function(){return\"Legend Plugin\"},o.prototype.activate=function(t){var e,a=t.getOption(\"labelsDiv\");return a&&null!==a?e=\"string\"==typeof a||a instanceof String?document.getElementById(a):a:(e=document.createElement(\"div\"),e.className=\"dygraph-legend\",t.graphDiv.appendChild(e),this.is_generated_div_=!0),this.legend_div_=e,this.one_em_width_=10,{select:this.select,deselect:this.deselect,predraw:this.predraw,didDrawChart:this.didDrawChart}};var s=function(t){var e=document.createElement(\"span\");e.setAttribute(\"style\",\"margin: 0; padding: 0 0 0 1em; border: 0;\"),t.appendChild(e);var a=e.offsetWidth;return t.removeChild(e),a},l=function(t){return t.replace(/&/g,\"&\").replace(/\"/g,\""\").replace(/</g,\"<\").replace(/>/g,\">\")};o.prototype.select=function(t){var e=t.selectedX,a=t.selectedPoints,i=t.selectedRow,n=t.dygraph.getOption(\"legend\");if(\"never\"===n)return void(this.legend_div_.style.display=\"none\");if(\"follow\"===n){var r=t.dygraph.plotter_.area,s=this.legend_div_.offsetWidth,l=t.dygraph.getOptionForAxis(\"axisLabelWidth\",\"y\"),h=a[0].x*r.w+50,u=a[0].y*r.h-50;h+s+1>r.w&&(h=h-100-s-(l-r.x)),t.dygraph.graphDiv.appendChild(this.legend_div_),this.legend_div_.style.left=l+h+\"px\",this.legend_div_.style.top=u+\"px\"}var d=o.generateLegendHTML(t.dygraph,e,a,this.one_em_width_,i);this.legend_div_.innerHTML=d,this.legend_div_.style.display=\"\"},o.prototype.deselect=function(t){\"always\"!==t.dygraph.getOption(\"legend\")&&(this.legend_div_.style.display=\"none\");var e=s(this.legend_div_);this.one_em_width_=e;var a=o.generateLegendHTML(t.dygraph,void 0,void 0,e,null);this.legend_div_.innerHTML=a},o.prototype.didDrawChart=function(t){this.deselect(t)},o.prototype.predraw=function(t){if(this.is_generated_div_){t.dygraph.graphDiv.appendChild(this.legend_div_);var e=t.dygraph.getArea(),a=this.legend_div_.offsetWidth;this.legend_div_.style.left=e.x+e.w-a-1+\"px\",this.legend_div_.style.top=e.y+\"px\"}},o.prototype.destroy=function(){this.legend_div_=null},o.generateLegendHTML=function(t,e,a,n,s){var h={dygraph:t,x:e,series:[]},u={},d=t.getLabels();if(d)for(var c=1;c<d.length;c++){var p=t.getPropertiesForSeries(d[c]),g=t.getOption(\"strokePattern\",d[c]),f={dashHTML:i(g,p.color,n),label:d[c],labelHTML:l(d[c]),isVisible:p.visible,color:p.color};h.series.push(f),u[d[c]]=f}if(void 0!==e){var v=t.optionsViewForAxis_(\"x\"),_=v(\"valueFormatter\");h.xHTML=_.call(t,e,v,d[0],t,s,0);for(var y=[],x=t.numAxes(),c=0;c<x;c++)y[c]=t.optionsViewForAxis_(\"y\"+(c?1+c:\"\"));var m=t.getOption(\"labelsShowZeroValues\"),b=t.getHighlightSeries();for(c=0;c<a.length;c++){var w=a[c],f=u[w.name];if(f.y=w.yval,0===w.yval&&!m||isNaN(w.canvasy))f.isVisible=!1;else{var p=t.getPropertiesForSeries(w.name),A=y[p.axis-1],O=A(\"valueFormatter\"),D=O.call(t,w.yval,A,w.name,t,s,d.indexOf(w.name));r.update(f,{yHTML:D}),w.name==b&&(f.isHighlighted=!0)}}}return(t.getOption(\"legendFormatter\")||o.defaultFormatter).call(t,h)},o.defaultFormatter=function(t){var e=t.dygraph;if(!0!==e.getOption(\"showLabelsOnHighlight\"))return\"\";var a,i=e.getOption(\"labelsSeparateLines\");if(void 0===t.x){if(\"always\"!=e.getOption(\"legend\"))return\"\";a=\"\";for(var n=0;n<t.series.length;n++){var r=t.series[n];r.isVisible&&(\"\"!==a&&(a+=i?\"<br/>\":\" \"),a+=\"<span style='font-weight: bold; color: \"+r.color+\";'>\"+r.dashHTML+\" \"+r.labelHTML+\"</span>\")}return a}a=t.xHTML+\":\";for(var n=0;n<t.series.length;n++){var r=t.series[n];if(r.isVisible){i&&(a+=\"<br>\");a+=\"<span\"+(r.isHighlighted?' class=\"highlight\"':\"\")+\"> <b><span style='color: \"+r.color+\";'>\"+r.labelHTML+\"</span></b>: \"+r.yHTML+\"</span>\"}}return a},a.default=o,e.exports=a.default},{\"../dygraph-utils\":17}],25:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"../dygraph-utils\"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=t(\"../dygraph-interaction-model\"),s=i(o),l=t(\"../iframe-tarp\"),h=i(l),u=function(){this.hasTouchInterface_=\"undefined\"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};u.prototype.toString=function(){return\"RangeSelector Plugin\"},u.prototype.activate=function(t){return this.dygraph_=t,this.getOption_(\"showRangeSelector\")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},u.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},u.prototype.getOption_=function(t,e){return this.dygraph_.getOption(t,e)},u.prototype.setDefaultOption_=function(t,e){this.dygraph_.attrs_[t]=e},u.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_(\"animatedZooms\")&&(console.warn(\"Animated zooms and range selector are not compatible; disabling animatedZooms.\"),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},u.prototype.addToGraph_=function(){var t=this.graphDiv_=this.dygraph_.graphDiv;t.appendChild(this.bgcanvas_),t.appendChild(this.fgcanvas_),t.appendChild(this.leftZoomHandle_),t.appendChild(this.rightZoomHandle_)},u.prototype.removeFromGraph_=function(){var t=this.graphDiv_;t.removeChild(this.bgcanvas_),t.removeChild(this.fgcanvas_),t.removeChild(this.leftZoomHandle_),t.removeChild(this.rightZoomHandle_),this.graphDiv_=null},u.prototype.reserveSpace_=function(t){this.getOption_(\"showRangeSelector\")&&t.reserveSpaceBottom(this.getOption_(\"rangeSelectorHeight\")+4)},u.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},u.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},u.prototype.updateVisibility_=function(){var t=this.getOption_(\"showRangeSelector\");if(t)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var e=this.dygraph_;setTimeout(function(){e.width_=0,e.resize()},1)}return t},u.prototype.resize_=function(){function t(t,e,a,i){var n=i||r.getContextPixelRatio(e);t.style.top=a.y+\"px\",t.style.left=a.x+\"px\",t.width=a.w*n,t.height=a.h*n,t.style.width=a.w+\"px\",t.style.height=a.h+\"px\",1!=n&&e.scale(n,n)}var e=this.dygraph_.layout_.getPlotArea(),a=0;this.dygraph_.getOptionForAxis(\"drawAxis\",\"x\")&&(a=this.getOption_(\"xAxisHeight\")||this.getOption_(\"axisLabelFontSize\")+2*this.getOption_(\"axisTickSize\")),this.canvasRect_={x:e.x,y:e.y+e.h+a+4,w:e.w,h:this.getOption_(\"rangeSelectorHeight\")};var i=this.dygraph_.getNumericOption(\"pixelRatio\");t(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,i),t(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,i)},u.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className=\"dygraph-rangesel-bgcanvas\",this.bgcanvas_.style.position=\"absolute\",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className=\"dygraph-rangesel-fgcanvas\",this.fgcanvas_.style.position=\"absolute\",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor=\"default\",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},u.prototype.createZoomHandles_=function(){var t=new Image;t.className=\"dygraph-rangesel-zoomhandle\",t.style.position=\"absolute\",t.style.zIndex=10,t.style.visibility=\"hidden\",t.style.cursor=\"col-resize\",t.width=9,t.height=16,t.src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=\",this.isMobileDevice_&&(t.width*=2,t.height*=2),this.leftZoomHandle_=t,this.rightZoomHandle_=t.cloneNode(!1)},u.prototype.initInteraction_=function(){var t,e,a,i,n,o,l,u,d,c,p,g,f,v,_=this,y=document,x=0,m=null,b=!1,w=!1,A=!this.isMobileDevice_,O=new h.default;t=function(t){var e=_.dygraph_.xAxisExtremes(),a=(e[1]-e[0])/_.canvasRect_.w;return[e[0]+(t.leftHandlePos-_.canvasRect_.x)*a,e[0]+(t.rightHandlePos-_.canvasRect_.x)*a]},e=function(t){return r.cancelEvent(t),b=!0,x=t.clientX,m=t.target?t.target:t.srcElement,\"mousedown\"!==t.type&&\"dragstart\"!==t.type||(r.addEvent(y,\"mousemove\",a),r.addEvent(y,\"mouseup\",i)),_.fgcanvas_.style.cursor=\"col-resize\",O.cover(),!0},a=function(t){if(!b)return!1;r.cancelEvent(t);var e=t.clientX-x;if(Math.abs(e)<4)return!0;x=t.clientX;var a,i=_.getZoomHandleStatus_();m==_.leftZoomHandle_?(a=i.leftHandlePos+e,a=Math.min(a,i.rightHandlePos-m.width-3),a=Math.max(a,_.canvasRect_.x)):(a=i.rightHandlePos+e,a=Math.min(a,_.canvasRect_.x+_.canvasRect_.w),a=Math.max(a,i.leftHandlePos+m.width+3));var o=m.width/2;return m.style.left=a-o+\"px\",_.drawInteractiveLayer_(),A&&n(),!0},i=function(t){return!!b&&(b=!1,O.uncover(),r.removeEvent(y,\"mousemove\",a),r.removeEvent(y,\"mouseup\",i),_.fgcanvas_.style.cursor=\"default\",A||n(),!0)},n=function(){try{var e=_.getZoomHandleStatus_();if(_.isChangingRange_=!0,e.isZoomed){var a=t(e);_.dygraph_.doZoomXDates_(a[0],a[1])}else _.dygraph_.resetZoom()}finally{_.isChangingRange_=!1}},o=function(t){var e=_.leftZoomHandle_.getBoundingClientRect(),a=e.left+e.width/2;e=_.rightZoomHandle_.getBoundingClientRect();var i=e.left+e.width/2;return t.clientX>a&&t.clientX<i},l=function(t){return!(w||!o(t)||!_.getZoomHandleStatus_().isZoomed)&&(r.cancelEvent(t),w=!0,x=t.clientX,\"mousedown\"===t.type&&(r.addEvent(y,\"mousemove\",u),r.addEvent(y,\"mouseup\",d)),!0)},u=function(t){if(!w)return!1;r.cancelEvent(t);var e=t.clientX-x;if(Math.abs(e)<4)return!0;x=t.clientX;var a=_.getZoomHandleStatus_(),i=a.leftHandlePos,n=a.rightHandlePos,o=n-i;i+e<=_.canvasRect_.x?(i=_.canvasRect_.x,n=i+o):n+e>=_.canvasRect_.x+_.canvasRect_.w?(n=_.canvasRect_.x+_.canvasRect_.w,i=n-o):(i+=e,n+=e);var s=_.leftZoomHandle_.width/2;return _.leftZoomHandle_.style.left=i-s+\"px\",_.rightZoomHandle_.style.left=n-s+\"px\",_.drawInteractiveLayer_(),A&&c(),!0},d=function(t){return!!w&&(w=!1,r.removeEvent(y,\"mousemove\",u),r.removeEvent(y,\"mouseup\",d),A||c(),!0)},c=function(){try{_.isChangingRange_=!0,_.dygraph_.dateWindow_=t(_.getZoomHandleStatus_()),_.dygraph_.drawGraph_(!1)}finally{_.isChangingRange_=!1}},p=function(t){if(!b&&!w){var e=o(t)?\"move\":\"default\";e!=_.fgcanvas_.style.cursor&&(_.fgcanvas_.style.cursor=e)}},g=function(t){\"touchstart\"==t.type&&1==t.targetTouches.length?e(t.targetTouches[0])&&r.cancelEvent(t):\"touchmove\"==t.type&&1==t.targetTouches.length?a(t.targetTouches[0])&&r.cancelEvent(t):i(t)},f=function(t){\"touchstart\"==t.type&&1==t.targetTouches.length?l(t.targetTouches[0])&&r.cancelEvent(t):\"touchmove\"==t.type&&1==t.targetTouches.length?u(t.targetTouches[0])&&r.cancelEvent(t):d(t)},v=function(t,e){for(var a=[\"touchstart\",\"touchend\",\"touchmove\",\"touchcancel\"],i=0;i<a.length;i++)_.dygraph_.addAndTrackEvent(t,a[i],e)},this.setDefaultOption_(\"interactionModel\",s.default.dragIsPanInteractionModel),this.setDefaultOption_(\"panEdgeFraction\",1e-4);var D=window.opera?\"mousedown\":\"dragstart\";this.dygraph_.addAndTrackEvent(this.leftZoomHandle_,D,e),this.dygraph_.addAndTrackEvent(this.rightZoomHandle_,D,e),this.dygraph_.addAndTrackEvent(this.fgcanvas_,\"mousedown\",l),this.dygraph_.addAndTrackEvent(this.fgcanvas_,\"mousemove\",p),this.hasTouchInterface_&&(v(this.leftZoomHandle_,g),v(this.rightZoomHandle_,g),v(this.fgcanvas_,f))},u.prototype.drawStaticLayer_=function(){var t=this.bgcanvas_ctx_;t.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);try{this.drawMiniPlot_()}catch(t){console.warn(t)}this.bgcanvas_ctx_.lineWidth=this.getOption_(\"rangeSelectorBackgroundLineWidth\"),t.strokeStyle=this.getOption_(\"rangeSelectorBackgroundStrokeColor\"),t.beginPath(),t.moveTo(.5,.5),t.lineTo(.5,this.canvasRect_.h-.5),t.lineTo(this.canvasRect_.w-.5,this.canvasRect_.h-.5),t.lineTo(this.canvasRect_.w-.5,.5),t.stroke()},u.prototype.drawMiniPlot_=function(){var t=this.getOption_(\"rangeSelectorPlotFillColor\"),e=this.getOption_(\"rangeSelectorPlotFillGradientColor\"),a=this.getOption_(\"rangeSelectorPlotStrokeColor\");if(t||a){var i=this.getOption_(\"stepPlot\"),n=this.computeCombinedSeriesAndLimits_(),r=n.yMax-n.yMin,o=this.bgcanvas_ctx_,s=this.dygraph_.xAxisExtremes(),l=Math.max(s[1]-s[0],1e-30),h=(this.canvasRect_.w-.5)/l,u=(this.canvasRect_.h-.5)/r,d=this.canvasRect_.w-.5,c=this.canvasRect_.h-.5,p=null,g=null;o.beginPath(),o.moveTo(.5,c);for(var f=0;f<n.data.length;f++){var v=n.data[f],_=null!==v[0]?(v[0]-s[0])*h:NaN,y=null!==v[1]?c-(v[1]-n.yMin)*u:NaN;(i||null===p||Math.round(_)!=Math.round(p))&&(isFinite(_)&&isFinite(y)?(null===p?o.lineTo(_,c):i&&o.lineTo(_,g),o.lineTo(_,y),p=_,g=y):(null!==p&&(i?(o.lineTo(_,g),o.lineTo(_,c)):o.lineTo(p,c)),p=g=null))}if(o.lineTo(d,c),o.closePath(),t){var x=this.bgcanvas_ctx_.createLinearGradient(0,0,0,c);e&&x.addColorStop(0,e),x.addColorStop(1,t),this.bgcanvas_ctx_.fillStyle=x,o.fill()}a&&(this.bgcanvas_ctx_.strokeStyle=a,this.bgcanvas_ctx_.lineWidth=this.getOption_(\"rangeSelectorPlotLineWidth\"),o.stroke())}},u.prototype.computeCombinedSeriesAndLimits_=function(){var t,e=this.dygraph_,a=this.getOption_(\"logscale\"),i=e.numColumns(),n=e.getLabels(),o=new Array(i),s=!1,l=e.visibility(),h=[];for(t=1;t<i;t++){var u=this.getOption_(\"showInRangeSelector\",n[t]);h.push(u),null!==u&&(s=!0)}if(s)for(t=1;t<i;t++)o[t]=h[t-1];else for(t=1;t<i;t++)o[t]=l[t-1];var d=[],c=e.dataHandler_,p=e.attributes_;for(t=1;t<e.numColumns();t++)if(o[t]){var g=c.extractSeries(e.rawData_,t,p);e.rollPeriod()>1&&(g=c.rollingAverage(g,e.rollPeriod(),p)),d.push(g)}var f=[];for(t=0;t<d[0].length;t++){for(var v=0,_=0,y=0;y<d.length;y++){var x=d[y][t][1];null===x||isNaN(x)||(_++,v+=x)}f.push([d[0][t][0],v/_])}var m=Number.MAX_VALUE,b=-Number.MAX_VALUE;for(t=0;t<f.length;t++){var w=f[t][1];null!==w&&isFinite(w)&&(!a||w>0)&&(m=Math.min(m,w),b=Math.max(b,w))}if(a)for(b=r.log10(b),b+=.25*b,m=r.log10(m),t=0;t<f.length;t++)f[t][1]=r.log10(f[t][1]);else{var A,O=b-m;A=O<=Number.MIN_VALUE?.25*b:.25*O,b+=A,m-=A}return{data:f,yMin:m,yMax:b}},u.prototype.placeZoomHandles_=function(){var t=this.dygraph_.xAxisExtremes(),e=this.dygraph_.xAxisRange(),a=t[1]-t[0],i=Math.max(0,(e[0]-t[0])/a),n=Math.max(0,(t[1]-e[1])/a),r=this.canvasRect_.x+this.canvasRect_.w*i,o=this.canvasRect_.x+this.canvasRect_.w*(1-n),s=Math.max(this.canvasRect_.y,this.canvasRect_.y+(this.canvasRect_.h-this.leftZoomHandle_.height)/2),l=this.leftZoomHandle_.width/2;this.leftZoomHandle_.style.left=r-l+\"px\",this.leftZoomHandle_.style.top=s+\"px\",this.rightZoomHandle_.style.left=o-l+\"px\",this.rightZoomHandle_.style.top=this.leftZoomHandle_.style.top,this.leftZoomHandle_.style.visibility=\"visible\",this.rightZoomHandle_.style.visibility=\"visible\"},u.prototype.drawInteractiveLayer_=function(){var t=this.fgcanvas_ctx_;t.clearRect(0,0,this.canvasRect_.w,this.canvasRect_.h);var e=this.canvasRect_.w-1,a=this.canvasRect_.h-1,i=this.getZoomHandleStatus_();if(t.strokeStyle=this.getOption_(\"rangeSelectorForegroundStrokeColor\"),t.lineWidth=this.getOption_(\"rangeSelectorForegroundLineWidth\"),i.isZoomed){var n=Math.max(1,i.leftHandlePos-this.canvasRect_.x),r=Math.min(e,i.rightHandlePos-this.canvasRect_.x);t.fillStyle=\"rgba(240, 240, 240, \"+this.getOption_(\"rangeSelectorAlpha\").toString()+\")\",t.fillRect(0,0,n,this.canvasRect_.h),t.fillRect(r,0,this.canvasRect_.w-r,this.canvasRect_.h),t.beginPath(),t.moveTo(1,1),t.lineTo(n,1),t.lineTo(n,a),t.lineTo(r,a),t.lineTo(r,1),t.lineTo(e,1),t.stroke()}else t.beginPath(),t.moveTo(1,1),t.lineTo(1,a),t.lineTo(e,a),t.lineTo(e,1),t.stroke()},u.prototype.getZoomHandleStatus_=function(){var t=this.leftZoomHandle_.width/2,e=parseFloat(this.leftZoomHandle_.style.left)+t,a=parseFloat(this.rightZoomHandle_.style.left)+t;return{leftHandlePos:e,rightHandlePos:a,isZoomed:e-1>this.canvasRect_.x||a+1<this.canvasRect_.x+this.canvasRect_.w}},a.default=u,e.exports=a.default},{\"../dygraph-interaction-model\":12,\"../dygraph-utils\":17,\"../iframe-tarp\":19}]},{},[18])(18)});\n//# sourceMappingURL=dist/dygraph.min.js.map\n","import { ChartData, ChartMetadata } from \"../chart-types\"\nimport { seconds4human } from \"./seconds4human\"\n\nexport const LEGEND_BOTTOM_SINGLE_LINE_HEIGHT = 80\n\nexport const legendPluginModuleString = (withContext: boolean, chartMetadata: ChartMetadata) => {\n let str = \" \"\n let context = \"\"\n\n if (withContext && typeof chartMetadata.context === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n context = chartMetadata.context\n }\n\n if (typeof chartMetadata.plugin === \"string\" && chartMetadata.plugin !== \"\") {\n str = chartMetadata.plugin\n\n if (str.endsWith(\".plugin\")) {\n str = str.substring(0, str.length - 7)\n }\n\n if (typeof chartMetadata.module === \"string\" && chartMetadata.module !== \"\") {\n str += `:${chartMetadata.module}`\n }\n\n if (withContext && context !== \"\") {\n str += `, ${context}`\n }\n } else if (withContext && context !== \"\") {\n str = context\n }\n return str\n}\n\nexport const legendResolutionTooltip = (chartData: ChartData, chartMetadata: ChartMetadata) => {\n const collected = chartMetadata.update_every\n // todo if there's no data (but maybe there won't be situation like this), then use \"collected\"\n const viewed = chartData.view_update_every\n if (collected === viewed) {\n return `resolution ${seconds4human(collected)}`\n }\n\n return `resolution ${seconds4human(viewed)}, collected every ${seconds4human(collected)}`\n}\n\ntype GetNewSelectedDimensions = (arg: {\n allDimensions: string[],\n selectedDimensions: string[],\n clickedDimensionName: string,\n isModifierKeyPressed: boolean,\n}) => string[]\n\nexport const getNewSelectedDimensions: GetNewSelectedDimensions = ({\n allDimensions,\n selectedDimensions,\n clickedDimensionName,\n isModifierKeyPressed,\n}) => {\n // when selectedDimensions is empty, then all dimensions should be enabled\n // let's narrow this case now\n const enabledDimensions = selectedDimensions.length === 0 ? allDimensions : selectedDimensions\n const isCurrentlySelected = enabledDimensions.includes(clickedDimensionName)\n\n let newSelectedDimensions: string[]\n if (!isModifierKeyPressed\n && ((isCurrentlySelected && enabledDimensions.length > 1) || !isCurrentlySelected)\n ) {\n newSelectedDimensions = [clickedDimensionName]\n } else if (isCurrentlySelected) { // modifier key pressed\n newSelectedDimensions = enabledDimensions.filter(\n (dimension) => dimension !== clickedDimensionName,\n )\n } else { // modifier key pressed\n newSelectedDimensions = enabledDimensions.concat(clickedDimensionName)\n }\n\n if (newSelectedDimensions.length === allDimensions.length) {\n return []\n }\n return newSelectedDimensions\n}\n","import { createReducer } from \"redux-act\"\n\nimport { ChartsMetadata } from \"domains/global/types\"\n\nimport { startSnapshotModeAction, stopSnapshotModeAction, isSignedInAction, setOfflineAction } from \"./actions\"\n\nexport type StateT = {\n isSnapshotMode: boolean\n snapshotCharts: ChartsMetadata | null\n snapshotDataPoints: number | null\n isSignedIn: boolean\n offline: boolean\n}\n\nexport const initialState: StateT = {\n isSnapshotMode: false,\n snapshotCharts: null,\n snapshotDataPoints: null,\n isSignedIn: false,\n offline: false\n}\n\nexport const dashboardReducer = createReducer<StateT>({}, initialState)\n\ndashboardReducer.on(startSnapshotModeAction, (state, { charts, dataPoints }) => ({\n ...state,\n snapshotCharts: charts, // todo integrate with /charts result\n snapshotDataPoints: dataPoints,\n isSnapshotMode: true,\n}))\n\ndashboardReducer.on(stopSnapshotModeAction, (state) => ({\n ...state,\n isSnapshotMode: initialState.isSnapshotMode,\n snapshotCharts: initialState.snapshotCharts,\n snapshotDataPoints: initialState.snapshotDataPoints,\n}))\n\ndashboardReducer.on(isSignedInAction, (state, { isSignedIn }) => ({\n ...state,\n isSignedIn\n}))\n\ndashboardReducer.on(setOfflineAction, (state, { offline }) => ({\n ...state,\n offline\n}))\n","import { combineReducers } from \"redux\"\n\nimport { globalReducer } from \"domains/global/reducer\"\nimport { storeKey as globalKey } from \"domains/global/constants\"\n\nimport { chartReducer } from \"domains/chart/reducer\"\nimport { storeKey as chartKey } from \"domains/chart/constants\"\n\nimport { dashboardReducer } from \"domains/dashboard/reducer\"\nimport { storeKey as dashboardKey } from \"domains/dashboard/constants\"\n\nexport default combineReducers({\n [globalKey]: globalReducer,\n [chartKey]: chartReducer,\n\n // todo lazy-load and inject those reducers, when they are not needed (dashboard.js, cloud)\n [dashboardKey]: dashboardReducer,\n})\n","import axios from \"axios\"\n\nexport const axiosInstance = axios.create({\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n})\n","import axios from \"axios\"\n\nexport const axiosInstance = axios.create({\n // timeout: 30 * 1000, // todo\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n})\n","import {\n Subject, from, empty,\n} from \"rxjs\"\nimport {\n mergeMap, tap, catchError, startWith, switchMap,\n} from \"rxjs/operators\"\nimport { CancelTokenSource, Method } from \"axios\"\n\nimport { UnknownStringKeyT } from \"types/common\"\n\nimport { axiosInstance } from \"./axios-instance\"\n\nexport const CHART_UNMOUNTED = \"Chart scrolled out of view\"\n\ninterface FetchInputEvent {\n url: string\n method?: Method\n params?: UnknownStringKeyT\n data?: UnknownStringKeyT\n onErrorCallback: (error: Error) => void\n onSuccessCallback: (data: { [key: string]: unknown }) => void\n cancelTokenSource?: CancelTokenSource\n}\n\nconst METRICS_TIMEOUT = 15_000\n\nexport const getFetchStream = (concurrentCallsLimit: number) => {\n const fetch$ = new Subject<FetchInputEvent>()\n const resetFetch$ = new Subject()\n\n const handler = mergeMap(({\n url, method = \"GET\", params = {}, data, onErrorCallback, onSuccessCallback, cancelTokenSource,\n }: FetchInputEvent) => (\n from(axiosInstance.request({\n url,\n method,\n params,\n data,\n timeout: METRICS_TIMEOUT,\n cancelToken: cancelTokenSource?.token,\n })).pipe(\n tap(({ data: responseData }) => { onSuccessCallback(responseData) }),\n catchError((error: Error) => {\n // todo implement error handling to support NETDATA.options.current.retries_on_data_failures\n if (error?.message !== CHART_UNMOUNTED) {\n console.warn(\"fetch error\", url) // eslint-disable-line no-console\n }\n onErrorCallback(error)\n return empty()\n }),\n )\n ), concurrentCallsLimit)\n\n const output = resetFetch$.pipe(\n startWith(null),\n switchMap(() => fetch$.pipe(handler)),\n )\n\n output.subscribe()\n return [fetch$, resetFetch$]\n}\n","import { tail, sum, reverse } from \"ramda\"\nimport { ChartData, DygraphData } from \"domains/chart/chart-types\"\n\n/*\nwhen requesting for bigger time interval than available history in the agent,\nwe get only the available range. Dashboard was first designed to not allow zooming-out too much.\nBut we want to show the requested time-range, so to do it consistently, we return nr of points\nwhen making the request, and after getting result, we add `null`s at the beginning\n */\n\ninterface GetCorrectedPointsArg {\n after: number\n before: number\n firstEntry: number\n points: number\n}\nexport const getCorrectedPoints = ({\n after,\n before,\n firstEntry,\n points,\n}: GetCorrectedPointsArg) => {\n const nowInSeconds = Math.round(new Date().valueOf() / 1000)\n const afterAbsolute = after > 0 ? after : nowInSeconds + after\n const beforeAbsolute = before > 0 ? before : nowInSeconds + before\n\n if (afterAbsolute < firstEntry) {\n // take into account first_entry\n const realAfter = Math.max(afterAbsolute, firstEntry)\n const requestedRange = beforeAbsolute - afterAbsolute\n const availableRange = beforeAbsolute - realAfter\n\n return Math.round((points * availableRange) / requestedRange)\n }\n return null\n}\n\nexport const addPointsDygraph = (data: DygraphData, nrOfPointsToFill: number) => {\n const viewUpdateEvery = data.view_update_every\n if (!data.result.data.length) {\n return data\n }\n const firstAddedTimestamp = data.result.data[0][0] - nrOfPointsToFill * viewUpdateEvery\n const emptyPoint = tail(data.result.labels).map(() => null)\n const nulls = new Array(nrOfPointsToFill)\n .fill(null)\n .map((_, i) => [firstAddedTimestamp + i * viewUpdateEvery, ...emptyPoint])\n return {\n ...data,\n after: data.after - viewUpdateEvery * nrOfPointsToFill,\n result: {\n ...data.result,\n data: nulls.concat(data.result.data),\n },\n }\n}\n\nexport const fillMissingData = (data: ChartData, nrOfPointsToFill: number) => {\n if (data.format === \"json\") {\n return addPointsDygraph(data as DygraphData, nrOfPointsToFill)\n }\n return data\n}\n\nconst emptyArray: number[] = []\nexport const transformResults = (data: ChartData, format: string, shouldRevertFlip: boolean) => {\n if (format === \"array\" && data.format === \"json\") {\n if (Array.isArray(data.result)) return data\n\n const dataResult = shouldRevertFlip\n ? reverse((data as DygraphData).result.data)\n : (data as DygraphData).result.data\n return {\n ...data,\n // set proper output type so other functions like fillMissingData work properly\n format: \"array\",\n result: dataResult.reduce((acc: number[], pointData: number[]) => {\n pointData.shift()\n return [...acc, sum(pointData)]\n }, emptyArray),\n }\n }\n return data\n}\n\nexport const mapDefaultAggrMethod = (unit: string): string => {\n if (unit.length === 0) {\n return \"sum\"\n }\n const avgUnits: any = {\n percentage: true,\n percent: true,\n \"rotations/min\": true,\n ratio: true,\n seconds: true,\n \"seconds ago\": true,\n milliseconds: true,\n millisec: true,\n ms: true,\n \"log2 s\": true,\n minutes: true,\n hours: true,\n interval: true,\n ticks: true,\n celsius: true,\n c: true,\n mhz: true,\n hz: true,\n volts: true,\n kwh: true,\n ampere: true,\n amps: true,\n dbm: true,\n value: true,\n stratum: true,\n units: true,\n watt: true,\n temperature: true,\n \"random number\": true,\n rpm: true,\n quadro: true,\n \"adv/item\": true,\n multiplier: true,\n geforce: true,\n }\n if (avgUnits[unit.toLowerCase()]) {\n return \"avg\"\n }\n const avgUnitsRegExes: any = [\".*%.*\", \".*/operation\", \".*/run\", \".*/ run\", \".*/request\"]\n if (\n avgUnitsRegExes.some((regEx: string) => {\n const regExpression = RegExp(regEx, \"i\")\n return regExpression.test(unit.toLowerCase())\n })\n ) {\n return \"avg\"\n }\n return \"sum\"\n}\n","import styled from \"styled-components\"\nimport {\n getSizeBy, Text, TextSmall, getColor,\n} from \"@netdata/netdata-ui\"\n\nexport const Container = styled.div`\n width: 100%;\n height: 100%;\n min-height: ${getSizeBy(10)};\n display: flex;\n flex-flow: row nowrap;\n padding: ${getSizeBy(2)} ${getSizeBy(2)} ${getSizeBy(2)} ${getSizeBy(2)};\n`\n\nexport const SideContent = styled.div<{ right?: boolean }>`\n flex-grow: 0;\n flex-shrink: 0;\n height: 100%;\n align-self: stretch;\n`\n\nexport const ContentContainer = styled.div``\n\nexport const HeaderText = styled(Text)<{ error?: boolean; success?: boolean }>`\n color: ${({ error, success }) => (success && getColor(\"success\"))\n || (error && getColor(\"error\"))};\n font-weight: bold;\n display: block;\n margin-bottom: ${getSizeBy()};\n`\n\nexport const ContentText = styled(TextSmall)<{ error?: boolean; success?: boolean }>`\n display: block;\n color: ${({ error }) => (error && getColor(\"error\")) || getColor(\"border\")};\n`\n","import React from \"react\"\nimport {\n Container, SideContent, ContentContainer, HeaderText, ContentText,\n} from \"./styled\"\n\ninterface NotificationProps {\n header?: string\n leftContent?: React.ReactNode\n rightContent?: React.ReactNode\n text?: React.ReactNode\n className?: string\n renderContent?: (props: NotificationProps) => React.ReactNode | React.ReactNodeArray | null\n success?: boolean\n error?: boolean\n}\n\nexport const UINotification = (props: NotificationProps) => {\n const {\n header, text, leftContent, rightContent, renderContent, success, error,\n } = props\n return (\n <Container>\n {leftContent && <SideContent>{leftContent}</SideContent>}\n <ContentContainer>\n {header && (\n <HeaderText success={success} error={error}>\n {header}\n </HeaderText>\n )}\n {text && (\n <ContentText success={success} error={error}>\n {text}\n </ContentText>\n )}\n {renderContent && renderContent(props)}\n </ContentContainer>\n {rightContent && <SideContent right>{rightContent}</SideContent>}\n </Container>\n )\n}\n\n// for usage in non-jsx contexts\n// eslint-disable-next-line react/jsx-props-no-spreading\nexport const createUINotification = (props: NotificationProps) => <UINotification {...props} />\n","import styled from \"styled-components\"\nimport { getSizeBy } from \"@netdata/netdata-ui\"\n\nexport const NodeIconContainer = styled.div`\n width: ${getSizeBy(5)};\n height: ${getSizeBy(5)};\n margin-right: ${getSizeBy(2)};\n display: flex;\n justify-content: center;\n align-items: center;\n`\n\nexport const NotificationLink = styled.a`\n &,\n &:hover {\n text-decoration: underline;\n color: inherit;\n }\n`\n","import React from \"react\"\nimport { Icon } from \"@netdata/netdata-ui\"\nimport { toast } from \"react-toastify\"\n\nimport { createUINotification } from \"components/ui-notification\"\n\nimport * as S from \"./styled\"\n\nexport const toastOptions = {\n position: toast.POSITION.BOTTOM_RIGHT,\n autoClose: 10000,\n pauseOnFocusLoss: false,\n}\n\nexport const showCloudInstallationProblemNotification = () => {\n const uiNotification = {\n header: \"Installation error\",\n text: \"The installer could not prepare the required dependencies to enable Netdata Cloud\"\n + \" functionality\",\n }\n const notificationComponent = createUINotification({\n ...uiNotification,\n error: true,\n leftContent: (\n <S.NodeIconContainer>\n <Icon name=\"gear\" size=\"large\" color=\"error\" />\n </S.NodeIconContainer>\n ),\n })\n toast.error(notificationComponent, toastOptions)\n}\n\nexport const showCloudConnectionProblemNotification = () => {\n const uiNotification = {\n header: \"Connection Problem\",\n text: (\n <S.NotificationLink\n href=\"https://learn.netdata.cloud/docs/agent/packaging/installer#automatic-one-line-installation-script\"\n target=\"_blank\"\n >\n To access Cloud install again your agent via the kickstart script\n </S.NotificationLink>\n ),\n }\n const notificationComponent = createUINotification({\n ...uiNotification,\n error: true,\n leftContent: (\n <S.NodeIconContainer>\n <Icon name=\"gear\" size=\"large\" color=\"error\" />\n </S.NodeIconContainer>\n ),\n })\n toast.error(notificationComponent, toastOptions)\n}\n","import {\n mergeAll, pipe, split, mergeRight,\n} from \"ramda\"\nimport { mapIndexed } from \"ramda-adjunct\"\n\nconst defaultUrlOptions = {\n hash: \"#\",\n theme: null,\n help: null,\n mode: \"live\", // 'live', 'print'\n update_always: false,\n pan_and_zoom: false,\n server: null,\n after: 0,\n before: 0,\n highlight: false,\n highlight_after: 0,\n highlight_before: 0,\n nowelcome: false,\n show_alarms: false,\n chart: null,\n family: null,\n alarm: null,\n alarm_unique_id: 0,\n alarm_id: 0,\n alarm_event_id: 0,\n alarm_when: 0,\n} as {[key: string]: unknown}\n\nconst isInvalidPair = ([key, value]: [string, string]) => (\n defaultUrlOptions[key] === undefined || value === undefined\n)\n\nconst parseQueryPair = ([key, value]: [string, string]): {[key: string] : unknown} => {\n if (isInvalidPair([key, value])) {\n return {}\n }\n return {\n [key]: decodeURIComponent(value),\n }\n}\n\nexport const parseUrl = pipe(\n split(\";\"),\n mapIndexed((value, index) => (\n (index === 0) ? { hash: value } : parseQueryPair((value.split(\"=\") as [string, string]))\n )),\n mergeAll,\n mergeRight(defaultUrlOptions),\n)\n\nconst urlParsed = parseUrl(document.location.hash)\n\nexport const isPrintMode = urlParsed.mode === \"print\"\n","import {\n call,\n put,\n takeEvery,\n select,\n spawn,\n take,\n delay,\n} from \"redux-saga/effects\"\nimport { channel } from \"redux-saga\"\nimport { Action } from \"redux-act\"\n\nimport { axiosInstance } from \"utils/api\"\nimport { alwaysEndWithSlash, serverDefault } from \"utils/server-detection\"\nimport { getFetchStream } from \"utils/netdata-sdk\"\nimport { isMainJs } from \"utils/env\"\nimport { fillMissingData, transformResults } from \"utils/fill-missing-data\"\nimport {\n showCloudInstallationProblemNotification, showCloudConnectionProblemNotification,\n} from \"components/notifications\"\nimport { selectGlobalPanAndZoom, selectSnapshot, selectRegistry } from \"domains/global/selectors\"\nimport { StateT as GlobalStateT } from \"domains/global/reducer\"\nimport { stopSnapshotModeAction } from \"domains/dashboard/actions\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\nimport { INFO_POLLING_FREQUENCY } from \"domains/global/constants\"\n\nimport {\n fetchDataAction,\n FetchDataPayload,\n fetchChartAction,\n FetchChartPayload,\n fetchDataForSnapshotAction,\n FetchDataForSnapshotPayload,\n fetchInfoAction,\n FetchInfoPayload,\n fetchDataCancelAction,\n} from \"./actions\"\nimport { ChartData } from \"./chart-types\"\n\nconst CONCURRENT_CALLS_LIMIT_METRICS = isMainJs ? 30 : 60\nconst CONCURRENT_CALLS_LIMIT_PRINT = 2\nconst CONCURRENT_CALLS_LIMIT_SNAPSHOTS = 1\n\nconst fetchDataResponseChannel = channel()\n\nexport function* watchFetchDataResponseChannel() {\n while (true) {\n const action = (yield take(fetchDataResponseChannel))\n\n // special case - if requested relative timeRange, and during request the mode has been changed\n // to absolute global-pan-and-zoom, cancel the store update\n // todo do xss check of data\n if (action.type === fetchDataAction.success.toString()) {\n const payload = (action.payload as FetchDataPayload)\n const { viewRange } = payload.fetchDataParams\n const [start, end] = viewRange\n const globalPanAndZoom = (yield select(\n selectGlobalPanAndZoom,\n )) as GlobalStateT[\"globalPanAndZoom\"]\n\n if (globalPanAndZoom\n && (start <= 0 || end <= 0) // check if they are not timestamps\n ) {\n yield put(fetchDataCancelAction({\n id: payload.id,\n }))\n // eslint-disable-next-line no-continue\n continue\n }\n }\n\n yield put(action)\n }\n}\n\n// todo construct a new version of key that will be safer to be used in future\n// (while keeping old key supported for some time)\n// perhaps the key could be passed as attribute to the chart, to avoid matching\nconst constructCompatibleKey = (dimensions: undefined | string, options: string) => (\n // strange transformations for backwards compatibility. old snapshot keys were encoded this way\n // that empty dimensions were actually \"null\" string\n `${dimensions === undefined\n ? \"null\"\n : encodeURIComponent(dimensions)\n },${encodeURIComponent(options)}`\n)\n\n// currently BE always transforms data as if `flip` was there\nconst IS_FLIP_RESPECTED_IN_COMPOSITE_CHARTS = false\n\nconst getGroupByValues = (groupBy) => {\n if (groupBy === \"chart\") return \"node\"\n if (groupBy === \"node\" || groupBy === \"dimension\") return groupBy\n return `label=${groupBy}`\n}\n\nconst [fetchMetrics$] = getFetchStream(\n isPrintMode ? CONCURRENT_CALLS_LIMIT_PRINT : CONCURRENT_CALLS_LIMIT_METRICS,\n)\nfunction* fetchDataSaga({ payload }: Action<FetchDataPayload>) {\n const {\n // props for api\n host, context, chart, format, points, group, gtime, options,\n after, before, dimensions, labels, postGroupBy, postAggregationMethod,\n aggrMethod, dimensionsAggrMethod, nodeIDs, httpMethod,\n groupBy = \"dimension\", // group by node, dimension, or label keys\n aggrGroups = [],\n // props for the store\n fetchDataParams, id, cancelTokenSource,\n } = payload\n\n const snapshot = yield select(selectSnapshot)\n if (snapshot) {\n // if reading snapshot\n const dimensionsWithUrlOptions = constructCompatibleKey(dimensions, options)\n const matchingKey = Object.keys(snapshot.data).find((snapshotKey) => (\n snapshotKey.startsWith(chart) && snapshotKey.includes(dimensionsWithUrlOptions)\n ))\n if (!matchingKey) {\n // eslint-disable-next-line no-console\n console.warn(`Could not find snapshot key for chart: ${chart} and id ${id}`)\n return\n }\n const data = snapshot.data[matchingKey]\n yield put(fetchDataAction.success({\n chartData: data,\n fetchDataParams,\n id,\n }))\n return\n }\n\n const url = isMainJs\n ? `${alwaysEndWithSlash(host)}api/v1/data`\n : host\n\n const agentOptionsOriginal = options.split(\"|\")\n const hasFlip = agentOptionsOriginal.includes(\"flip\")\n const shouldAddFakeFlip = !IS_FLIP_RESPECTED_IN_COMPOSITE_CHARTS && !hasFlip\n // if flip is not respected in composite-charts, send it always (like dygraph charts normally do)\n const agentOptions = shouldAddFakeFlip\n ? agentOptionsOriginal.concat(\"flip\") : agentOptionsOriginal\n\n const groupValues = [\n getGroupByValues(groupBy),\n postGroupBy && `label=${postGroupBy}`,\n ].filter(Boolean)\n\n const axiosOptions = httpMethod === \"POST\" ? {\n // used by cloud's room-overview\n data: {\n filter: {\n nodeIDs,\n context,\n dimensions: dimensions ? dimensions.split(/['|]/) : undefined,\n labels,\n },\n after,\n before,\n points,\n group,\n gtime,\n agent_options: agentOptions,\n ...(postAggregationMethod && { post_aggregation_methods: [postAggregationMethod] }),\n aggregations: [groupBy !== \"dimension\" && {\n method: dimensionsAggrMethod || \"sum\",\n groupBy: [\"chart\", ...groupValues],\n },\n groupBy !== \"chart\" && {\n method: aggrMethod,\n groupBy: groupValues,\n ...(aggrGroups.length && { labels: aggrGroups }),\n }].filter(Boolean),\n },\n } : {\n params: {\n chart,\n _: new Date().valueOf(),\n format,\n points,\n group,\n gtime,\n options,\n after,\n before,\n dimensions,\n },\n }\n\n const onSuccessCallback = (data: { [id: string]: unknown}) => {\n if (!data?.result) {\n fetchDataResponseChannel.put(fetchDataAction.failure({ id }))\n } else {\n const { fillMissingPoints } = fetchDataParams\n\n const transformedResults = transformResults(\n (data as unknown) as ChartData,\n format,\n shouldAddFakeFlip,\n )\n\n const chartData = {\n ...transformedResults,\n // @ts-ignore\n ...((\"post_aggregated_data\" in data.result) && {\n postAggregationMethod,\n groupBy,\n postGroupBy,\n aggrGroups,\n // @ts-ignore\n postAggregated: data.result.post_aggregated_data[postAggregationMethod],\n }),\n }\n\n fetchDataResponseChannel.put(fetchDataAction.success({\n chartData: fillMissingPoints\n ? fillMissingData(chartData as ChartData, fillMissingPoints)\n : chartData,\n fetchDataParams,\n id,\n }))\n }\n }\n\n const onErrorCallback = (error: Error) => {\n console.warn(\"fetch chart data failure\", error) // eslint-disable-line no-console\n fetchDataResponseChannel.put(fetchDataAction.failure({ id }))\n }\n\n fetchMetrics$.next({\n ...axiosOptions,\n method: httpMethod || \"GET\",\n url,\n onErrorCallback,\n onSuccessCallback,\n cancelTokenSource,\n })\n}\n\nconst [fetchForSnapshot$, resetFetchForSnapshot$] = getFetchStream(CONCURRENT_CALLS_LIMIT_SNAPSHOTS)\nfunction fetchDataForSnapshotSaga({ payload }: Action<FetchDataForSnapshotPayload>) {\n const {\n host, chart, format, points, group, gtime, options,\n after, before, dimensions, aggrMethod,\n groupBy,\n nodeIDs,\n chartLibrary, id,\n } = payload\n\n // backwards-compatibility, the keys look like this:\n // net_errors.stf0,dygraph,null,ms%7Cflip%7Cjsonwrap%7Cnonzero\n const chartDataUniqueID = `${chart},${chartLibrary},${constructCompatibleKey(\n dimensions,\n options,\n )}`\n\n const url = `${alwaysEndWithSlash(host)}api/v1/data`\n const params = {\n chart,\n _: new Date().valueOf(),\n format,\n points,\n group,\n gtime,\n options,\n after,\n before,\n dimensions,\n ...(aggrMethod && { aggr_method: aggrMethod }),\n ...(nodeIDs && { node_ids: nodeIDs.join(\",\") }),\n ...(groupBy && { groupBy }),\n }\n\n const onSuccessCallback = (data: unknown) => {\n fetchDataResponseChannel.put(fetchDataForSnapshotAction.success({\n snapshotData: data,\n id,\n }))\n // temporarily, until main.js finished rewrite\n // @ts-ignore\n window.chartUpdated({\n chartDataUniqueID,\n data,\n })\n }\n\n const onErrorCallback = () => {\n fetchDataResponseChannel.put(fetchDataForSnapshotAction.failure({ id }))\n // @ts-ignore\n window.chartUpdated({\n chartDataUniqueID,\n chart,\n data: null,\n })\n }\n\n fetchForSnapshot$.next({\n url,\n params,\n onErrorCallback,\n onSuccessCallback,\n })\n}\n\nfunction stopSnapshotModeSaga() {\n // any calls in the queue should stop when save-snapshot modal is closed\n resetFetchForSnapshot$.next()\n}\n\nfunction* fetchChartSaga({ payload }: Action<FetchChartPayload>) {\n const { chart, id, host } = payload\n\n const snapshot = yield select(selectSnapshot)\n if (snapshot) {\n yield put(fetchChartAction.success({\n chartMetadata: snapshot.charts.charts[chart],\n id,\n }))\n return\n }\n\n let response\n const url = isMainJs\n ? `${alwaysEndWithSlash(host)}api/v1/chart`\n : host.replace(\"/data\", \"/chart\")\n try {\n response = yield call(axiosInstance.get, url, {\n params: {\n chart,\n },\n })\n } catch (e) {\n console.warn(\"fetch chart details failure\") // eslint-disable-line no-console\n yield put(fetchChartAction.failure({ id }))\n return\n }\n yield put(fetchChartAction.success({\n chartMetadata: response.data,\n id,\n }))\n}\n\nfunction* fetchInfoSaga({ payload }: Action<FetchInfoPayload>) {\n const { poll } = payload\n let isCloudEnabled = false\n let isAgentClaimed = false\n let isCloudAvailable = false\n let isACLKAvailable = false\n\n try {\n const registry: GlobalStateT[\"registry\"] = yield select(selectRegistry)\n const wasCloudAvailable = registry?.isCloudAvailable\n const wasACLKAvailable = registry?.isACLKAvailable\n\n const { data } = yield call(axiosInstance.get, `${serverDefault}/api/v1/info`)\n isCloudAvailable = data?.[\"cloud-available\"] || false\n isCloudEnabled = data?.[\"cloud-enabled\"] || false\n isAgentClaimed = data?.[\"agent-claimed\"] || false\n isACLKAvailable = data?.[\"aclk-available\"] || false\n\n yield put(fetchInfoAction.success({\n isCloudAvailable, isCloudEnabled, isAgentClaimed, isACLKAvailable, fullInfoPayload: data,\n }))\n\n if (isCloudEnabled && (wasCloudAvailable === null) && !isCloudAvailable) {\n // show only once per session\n showCloudInstallationProblemNotification()\n }\n if (isCloudAvailable && isAgentClaimed && (wasACLKAvailable !== false) && !isACLKAvailable) {\n // show at session-init and if we see a change of isACLKAvailable from true to false\n showCloudConnectionProblemNotification()\n }\n // TODO: No success notification spec`ed?\n // else if (!wasACLKAvailable && isACLKAvailable) {\n // toast.success(\"Connected to the Cloud!\", {\n // position: \"bottom-right\",\n // type: toast.TYPE.SUCCESS,\n // autoClose: NOTIFICATIONS_TIMEOUT,\n // })\n // }\n } catch (e) {\n console.warn(\"fetch agent info failure\") // eslint-disable-line no-console\n yield put(fetchInfoAction.failure())\n }\n\n if (poll && isCloudEnabled && isAgentClaimed) {\n yield delay(INFO_POLLING_FREQUENCY)\n yield put(fetchInfoAction({ poll: true }))\n }\n}\n\n\nexport function* chartSagas() {\n yield takeEvery(fetchDataAction.request, fetchDataSaga)\n yield takeEvery(fetchChartAction.request, fetchChartSaga)\n yield takeEvery(fetchDataForSnapshotAction.request, fetchDataForSnapshotSaga)\n yield takeEvery(stopSnapshotModeAction, stopSnapshotModeSaga)\n yield takeEvery(fetchInfoAction.request, fetchInfoSaga)\n yield spawn(watchFetchDataResponseChannel)\n}\n","export const sidePanelTransitionTimeInSeconds = 0.2\n","import { sortBy, prop, last } from \"ramda\"\nimport { Action } from \"redux-act\"\nimport {\n call, delay, spawn, take, takeEvery, put,\n} from \"redux-saga/effects\"\n\nimport { axiosInstance } from \"utils/api\"\nimport { serverStatic } from \"utils/server-detection\"\nimport { name2id } from \"utils/name-2-id\"\n\nimport {\n startAlarmsAction, StartAlarmsPayload, fetchAllAlarmsAction, updateActiveAlarmsAction,\n} from \"./actions\"\nimport { AlarmLogs, AlarmLog, ActiveAlarms } from \"./types\"\n\nconst ALARMS_INITIALIZATION_DELAY = 1000\nconst ALARMS_UPDATE_EVERY = 10000 // the time in ms between alarm checks\nconst CHART_DIV_OFFSET = -50\n\n// firefox moves the alarms off-screen (above, outside the top of the screen)\n// if alarms are shown faster than: one per 500ms\nconst ALARMS_MS_BETWEEN_NOTIFICATIONS = 500\n\n// equal to old NETDATA.alarms.notifications\nconst areNotificationsAvailable = \"Notification\" in window\n\nconst notificationCallback = window.netdataAlarmsNotifCallback\n\n\n// todo this doesn't change in the session, but should be moved to the redux state anyway\nlet firstNotificationId = 0\nlet lastNotificationId = 0\n\n\nconst scrollToChart = (chartID: unknown): boolean => {\n if (typeof chartID === \"string\") {\n const chartElement = document.querySelector(`#chart_${name2id(chartID)}`)\n if (chartElement) {\n const offset = (chartElement as HTMLDivElement).offsetTop + CHART_DIV_OFFSET;\n (document.querySelector(\"html\") as HTMLElement).scrollTop = offset\n return true\n }\n }\n return false\n}\n\n// perhaps sagas are not the best place for this\nconst scrollToAlarm = (alarm: AlarmLog) => {\n if (typeof alarm === \"object\") {\n const hasFoundChart = scrollToChart(alarm.chart)\n if (hasFoundChart) {\n window.focus()\n }\n }\n}\n\nconst requestPermissions = () => {\n if (areNotificationsAvailable) {\n if (Notification.permission === \"default\") {\n Notification.requestPermission()\n }\n }\n}\n\nconst hasGivenNotificationPermissions = () => (areNotificationsAvailable\n && Notification.permission === \"granted\"\n)\n\nfunction* getLog(lastNotificationIdArg: number, serverDefault: string) {\n try {\n const { data } = yield call(\n axiosInstance.get,\n `${serverDefault}/api/v1/alarm_log?after=${lastNotificationIdArg}`,\n )\n // todo xss check\n return data\n } catch (error) {\n console.warn(\"Error fetching alarms log\", error) // eslint-disable-line no-console\n return null\n }\n}\n\ninterface NotificationConfig {\n notificationTitle: string\n notificationOptions: NotificationOptions\n notificationHandler: (event: Event) => void\n}\n// called \"notify\" in old codebase\nconst getNotification = (\n entry: AlarmLog, activeAlarms: ActiveAlarms, firstNotificationIdArg: number,\n): NotificationConfig | undefined => {\n if (entry.updated) {\n // has been updated by another alarm\n return\n }\n\n let valueString = entry.value_string\n const t = activeAlarms.alarms[`${entry.chart}.${entry.name}`]\n if (typeof t !== \"undefined\"\n && entry.status === t.status\n && typeof t.value_string !== \"undefined\"\n ) {\n valueString = t.value_string\n }\n\n const name = entry.name.replace(/_/g, \" \")\n let status = entry.status.toLowerCase()\n let title = `${name} = ${valueString}`\n const tag = entry.alarm_id\n let icon = \"images/banner-icon-144x144.png\"\n let interaction = false\n let show = true\n\n // switch/case left here to simplify refractor (it's very similar to old code)\n switch (entry.status) {\n case \"REMOVED\":\n show = false\n break\n\n case \"UNDEFINED\":\n return\n\n case \"UNINITIALIZED\":\n return\n\n case \"CLEAR\":\n if (entry.unique_id < firstNotificationIdArg) {\n // alarm is not current\n return\n }\n if (entry.old_status === \"UNINITIALIZED\" || entry.old_status === \"UNDEFINED\") {\n // alarm switch to CLEAR from old_status\n return\n }\n if (entry.no_clear_notification) {\n // alarm is CLEAR but has no_clear_notification flag\n return\n }\n title = `${name} back to normal (${valueString})`\n icon = \"images/check-mark-2-128-green.png\"\n interaction = false\n break\n\n case \"WARNING\":\n if (entry.old_status === \"CRITICAL\") {\n status = `demoted to ${entry.status.toLowerCase()}`\n }\n\n icon = \"images/alert-128-orange.png\"\n interaction = false\n break\n\n case \"CRITICAL\":\n if (entry.old_status === \"WARNING\") {\n status = `escalated to ${entry.status.toLowerCase()}`\n }\n\n icon = \"images/alert-128-red.png\"\n interaction = true\n break\n\n default:\n console.warn(`invalid alarm status ${entry.status}`) // eslint-disable-line no-console\n return\n }\n\n // filter recipients\n // if (show) {\n // show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients)\n // }\n\n\n if (show) {\n if (typeof notificationCallback === \"function\") {\n show = notificationCallback(entry)\n }\n\n if (show) {\n // show this notification\n // eslint-disable-next-line consistent-return\n return {\n notificationTitle: title,\n notificationOptions: {\n body: `${entry.hostname} - ${entry.chart} (${entry.family}) - ${status}: ${entry.info}`,\n tag: `${tag}`,\n requireInteraction: interaction,\n icon: serverStatic + icon,\n data: entry,\n },\n notificationHandler: (event: Event) => {\n event.preventDefault()\n if (event.target) {\n const { data } = event.target as Notification\n scrollToAlarm(data)\n }\n },\n }\n }\n }\n}\n\nfunction* notifyAll(serverDefault: string, activeAlarms: ActiveAlarms) {\n const alarmLogs: AlarmLogs = yield call(getLog, lastNotificationId, serverDefault)\n if (alarmLogs === null || typeof alarmLogs !== \"object\") {\n console.warn(\"invalid alarms log response\") // eslint-disable-line no-console\n return\n }\n\n if (alarmLogs.length === 0) {\n console.log(\"received empty alarm log\") // eslint-disable-line no-console\n return\n }\n\n const logsSorted = sortBy(prop(\"unique_id\"), alarmLogs)\n\n // eslint-disable-next-line camelcase\n const newLogs = logsSorted.filter(({ unique_id }) => unique_id > lastNotificationId)\n const notifications = newLogs\n .map((entry) => (getNotification(entry, activeAlarms, firstNotificationId)))\n .filter((x) => x !== undefined) as NotificationConfig[]\n\n for (let i = 0; i < notifications.length; i += 1) {\n const {\n notificationTitle, notificationOptions, notificationHandler,\n } = notifications[i]\n const notification = new Notification(\n notificationTitle,\n notificationOptions,\n )\n notification.onclick = notificationHandler\n\n yield delay(ALARMS_MS_BETWEEN_NOTIFICATIONS)\n }\n\n // todo put to redux store\n lastNotificationId = (last(logsSorted) as AlarmLog).unique_id\n\n if (typeof window.netdataAlarmsRemember === \"undefined\" || window.netdataAlarmsRemember) {\n localStorage.setItem(\"last_notification_id\", `${lastNotificationId}`)\n }\n}\n\n\nfunction* get(what: string, serverDefault: string) {\n const { data } = yield call(axiosInstance.get, `${serverDefault}/api/v1/alarms?${what}`)\n if (firstNotificationId === 0 && typeof data.latest_alarm_log_unique_id === \"number\") {\n firstNotificationId = data.latest_alarm_log_unique_id\n }\n return data\n}\n\nfunction* alarmsLoop(serverDefault: string) {\n while (true) {\n const activeAlarms = (yield call(get, \"active\", serverDefault)) as ActiveAlarms\n if (activeAlarms) {\n yield put(updateActiveAlarmsAction({ activeAlarms }))\n if (\n hasGivenNotificationPermissions()\n // timestamps in seconds\n && (activeAlarms.latest_alarm_log_unique_id > lastNotificationId)\n ) {\n yield call(notifyAll, serverDefault, activeAlarms)\n\n if (activeAlarms.status === false) {\n // Health monitoring is disabled on this netdata\n break\n }\n }\n }\n yield delay(ALARMS_UPDATE_EVERY)\n }\n}\n\nfunction* startAlarms() {\n // make sure we handle that action only once, we don't want multiple intervals/loops\n const { payload }: { payload: StartAlarmsPayload } = yield take(startAlarmsAction)\n const { serverDefault } = payload\n\n yield delay(ALARMS_INITIALIZATION_DELAY)\n\n lastNotificationId = +(localStorage.getItem(\"last_notification_id\") || lastNotificationId)\n requestPermissions()\n yield call(alarmsLoop, serverDefault)\n}\n\ntype FetchAllAlarmsPayload = {\n callback: (x: unknown) => void,\n serverDefault: string,\n}\nfunction* fetchAllAlarmsSaga({ payload }: Action<FetchAllAlarmsPayload>) {\n const { callback, serverDefault } = payload\n const allAlarms = yield call(get, \"all\", serverDefault)\n callback(allAlarms)\n}\n\nexport function* alarmsSagas() {\n yield spawn(startAlarms)\n yield takeEvery(fetchAllAlarmsAction.request, fetchAllAlarmsSaga)\n}\n","const allowedReferrerDomains = [\n \"\",\n \"https://www.google.com/\",\n \"https://duckduckgo.com/\",\n \"https://www.reddit.com/\",\n]\n\nexport const isAllowedReferrer = (referrer: string) => allowedReferrerDomains.includes(referrer)\n || referrer.endsWith(\".my-netdata.io/\")\n || referrer.startsWith(\"https://github.com/\")\n || referrer.endsWith(\"netdata.cloud/\")\n || referrer.startsWith(\"https://app.netdata.cloud/\")\n","import { uniq, filter } from \"ramda\"\nimport {\n spawn, take, put, takeEvery, call, delay, select,\n} from \"redux-saga/effects\"\nimport { channel } from \"redux-saga\"\nimport { AxiosResponse } from \"axios\"\nimport { Action } from \"redux-act\"\n\nimport { NETDATA_REGISTRY_SERVER } from \"utils/utils\"\nimport { axiosInstance } from \"utils/api\"\nimport { isDemo } from \"utils/is-demo\"\nimport { sidePanelTransitionTimeInSeconds } from \"components/space-panel/settings\"\nimport { fetchInfoAction } from \"domains/chart/actions\"\n\nimport {\n fetchHelloAction,\n FetchHelloPayload,\n windowFocusChangeAction,\n updatePersonUrlsAction,\n SetOptionAction,\n setOptionAction,\n setSpacePanelStatusAction,\n SetSpacePanelStatusActionPayload,\n setSpacePanelTransitionEndAction,\n HelloResponse,\n accessRegistrySuccessAction,\n} from \"./actions\"\nimport { alarmsSagas } from \"./alarms-sagas\"\nimport { MASKED_DATA } from \"./constants\"\nimport { selectFullInfoPayload } from \"./selectors\"\nimport { isAllowedReferrer } from \"./utils\"\nimport { InfoPayload } from \"./__mocks__/info-mock\"\n\nconst windowFocusChannel = channel()\n\nexport function listenToWindowFocus() {\n window.addEventListener(\"focus\", () => {\n windowFocusChannel.put(windowFocusChangeAction({ hasWindowFocus: true }))\n })\n window.addEventListener(\"blur\", () => {\n windowFocusChannel.put(windowFocusChangeAction({ hasWindowFocus: false }))\n })\n}\n\nexport function* watchWindowFocusChannel() {\n while (true) {\n const action = yield take(windowFocusChannel)\n yield put(action)\n }\n}\n\nfunction* waitForFullInfoPayload() {\n return (yield take(fetchInfoAction.success)).payload.fullInfoPayload\n}\n\nfunction* injectPosthog(machineGuid: string, personGuid?: string) {\n if (window.posthog) {\n return\n }\n const info: InfoPayload = (yield select(selectFullInfoPayload))\n || (yield call(waitForFullInfoPayload))\n || {}\n\n /* eslint-disable */\n // @ts-ignore\n !function(t,e){var o,n,p,r;e.__SV||(window.posthog=e,e._i=[],e.init=function(i,s,a){function g(t,e){var o=e.split(\".\");2==o.length&&(t=t[o[0]],e=o[1]),t[e]=function(){t.push([e].concat(Array.prototype.slice.call(arguments,0)))}}(p=t.createElement(\"script\")).type=\"text/javascript\",p.async=!0,p.src=s.api_host+\"/static/array.js\",(r=t.getElementsByTagName(\"script\")[0]).parentNode.insertBefore(p,r);var u=e;for(void 0!==a?u=e[a]=[]:a=\"posthog\",u.people=u.people||[],u.toString=function(t){var e=\"posthog\";return\"posthog\"!==a&&(e+=\".\"+a),t||(e+=\" (stub)\"),e},u.people.toString=function(){return u.toString(1)+\".people (stub)\"},o=\"capture identify alias people.set people.set_once set_config register register_once unregister opt_out_capturing has_opted_out_capturing opt_in_capturing reset isFeatureEnabled onFeatureFlags\".split(\" \"),n=0;n<o.length;n++)g(u,o[n]);e._i.push([i,s,a])},e.__SV=1)}(document,window.posthog||[]);\n /* eslint-enable */\n window.posthog.init(\"mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y\", {\n api_host: \"https://app.posthog.com\",\n loaded: (posthog: any) => {\n if (personGuid) {\n posthog.identify(personGuid)\n }\n },\n })\n const shouldMaskReferrer = !isDemo && !isAllowedReferrer(document.referrer)\n const MASKED = \"masked\"\n window.posthog.register(\n // remove properties with unavailable values\n filter((value) => value !== undefined && value !== null,\n {\n $ip: \"127.0.0.1\",\n $current_url: isDemo ? null : \"agent dashboard\",\n $pathname: isDemo ? null : \"netdata-dashboard\",\n $host: isDemo ? null : \"dashboard.netdata.io\",\n\n $initial_referring_domain: shouldMaskReferrer ? MASKED : null,\n $initial_referrer: shouldMaskReferrer ? MASKED : null,\n $referring_domain: shouldMaskReferrer ? MASKED : null,\n $referrer: shouldMaskReferrer ? MASKED : null,\n\n event_source: \"agent dashboard\",\n\n netdata_version: info.version,\n netdata_machine_guid: machineGuid,\n netdata_person_id: personGuid || \"Unavailable\",\n netdata_buildinfo: info[\"buildinfo\"],\n netdata_release_channel: info[\"release-channel\"],\n mirrored_host_count: info.mirrored_hosts?.length,\n alarms_normal: info.alarms?.normal,\n alarms_warning: info.alarms?.warning,\n alarms_critical: info.alarms.critical,\n host_os_name: info.os_name,\n host_os_id: info.os_id,\n host_os_id_like: info.os_id_like,\n host_os_version: info.os_version,\n host_os_version_id: info.os_version_id,\n host_os_detection: info.os_detection,\n system_cores_total: info.cores_total,\n system_total_disk_space: info.total_disk_space,\n system_cpu_freq: info.cpu_freq,\n system_ram_total: info.ram_total,\n system_kernel_name: info.kernel_name,\n system_kernel_version: info.kernel_version,\n system_architecture: info.architecture,\n system_virtualization: info.virtualization,\n system_virt_detection: info.virt_detection,\n system_container: info.container,\n system_container_detection: info.container_detection,\n container_os_name: info.container_os_name,\n container_os_id: info.container_os_id,\n container_os_id_like: info.container_os_id_like,\n container_os_version: info.container_os_version,\n container_os_version_id: info.container_os_version_id,\n host_collectors_count: info.collectors.length,\n host_cloud_enabled: info[\"cloud-enabled\"],\n host_cloud_available: info[\"cloud-available\"],\n host_agent_claimed: info[\"agent-claimed\"],\n host_aclk_available: info[\"aclk-available\"],\n host_aclk_implementation: info[\"aclk-implementation\"],\n host_allmetrics_json_used: info[\"allmetrics-json-used\"],\n host_allmetrics_prometheus_used: info[\"allmetrics-prometheus-used\"],\n host_allmetrics_shell_used: info[\"allmetrics-shell-used\"],\n host_charts_count: info[\"charts-count\"],\n host_dashboard_used: info[\"dashboard-used\"],\n host_metrics_count: info[\"metrics-count\"],\n host_notification_methods: info[\"notification-methods\"],\n config_memory_mode: info[\"memory-mode\"],\n config_exporting_enabled: info[\"exporting-enabled\"],\n config_exporting_connectors: info[\"exporting-connectors\"],\n config_hosts_available: info[\"hosts-available\"],\n config_https_enabled: info[\"https-enabled\"],\n config_multidb_disk_quota: info[\"multidb-disk-quota\"],\n config_page_cache_size: info[\"page-cache-size\"],\n config_stream_enabled: info[\"stream-enabled\"],\n config_web_enabled: info[\"web-enabled\"],\n // eslint-disable-next-line camelcase\n host_is_parent: info.host_labels?._is_parent,\n mirrored_hosts_reachable: info.mirrored_hosts_status\n .filter(({ reachable }) => reachable).length,\n mirrored_hosts_unreachable: info.mirrored_hosts_status\n .filter(({ reachable }) => !reachable).length,\n host_collectors: info.collectors,\n host_is_k8s_node: info.is_k8s_node,\n }),\n )\n}\n\nexport type PersonUrl = [\n string, // guid\n string, // url\n number, // last timestamp (ms)\n number, // accesses\n string // name\n]\n\ntype AccessRegistryResponse = null | {\n personGuid?: string\n registryServer: string\n urls?: PersonUrl[]\n}\n\ntype AccessRegistry = (args: {\n machineGuid: string\n maxRedirects: number\n name: string\n registryServer: string\n url: string\n}) => Promise<AccessRegistryResponse>\nconst accessRegistry: AccessRegistry = ({\n machineGuid, maxRedirects, name, registryServer, url,\n}) => axiosInstance.get(`${registryServer}/api/v1/registry`, {\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n params: {\n action: \"access\",\n machine: machineGuid,\n name,\n url,\n },\n withCredentials: true, // required for the cookie\n}).then(({ data }) => {\n // todo xss check\n const isRedirect = typeof data.registry === \"string\"\n\n let returnData = data\n if (typeof data.status !== \"string\" || data.status !== \"ok\") {\n // todo throw error (409 in old dashboard)\n returnData = null\n }\n\n if (returnData === null) {\n if (isRedirect && maxRedirects > 0) {\n return accessRegistry({\n maxRedirects: maxRedirects - 1,\n machineGuid,\n name,\n registryServer: data.registry,\n url,\n })\n }\n return { registryServer }\n }\n const urls = data.urls.filter((u: [string, string]) => u[1] !== MASKED_DATA)\n return {\n personGuid: data.person_guid || null,\n registryServer,\n urls,\n }\n}).catch(() => {\n // todo handle error in better way (410 in old dashboard)\n console.warn(\"error calling registry:\", registryServer) // eslint-disable-line no-console\n return null\n})\n\nexport interface RegistryMachine {\n guid: string\n url: string\n lastTimestamp: number\n accesses: number\n name: string\n alternateUrls: string[]\n}\n\ntype ParsePersonUrls = (\n personUrls: PersonUrl[]\n) => {\n registryMachines: { [key: string]: RegistryMachine }\n registryMachinesArray: RegistryMachine[]\n}\nexport const parsePersonUrls: ParsePersonUrls = (personUrls) => {\n // todo main.js is using registryMachines, but should use only the array\n const registryMachines: { [key: string]: RegistryMachine } = {}\n\n personUrls\n .slice()\n .reverse()\n .forEach(([guid, url, lastTimestamp, accesses, name]: PersonUrl) => {\n const existingObj = registryMachines[guid] || {\n lastTimestamp: 0,\n accesses: 0,\n alternateUrls: [],\n guid: \"\",\n url: \"\",\n name: \"\"\n }\n const isNewer = existingObj.lastTimestamp < lastTimestamp\n const extended: RegistryMachine = {\n guid: existingObj.guid || guid,\n url: isNewer ? url : existingObj.url,\n lastTimestamp: isNewer ? lastTimestamp : existingObj.lastTimestamp,\n accesses: existingObj.accesses + accesses,\n name: isNewer ? name : existingObj.name,\n alternateUrls: existingObj.alternateUrls.concat(url),\n }\n registryMachines[guid] = extended\n })\n\n const registryMachinesArray = uniq(\n // not sure if reverse is needed, but it was in old dashboard\n personUrls\n .slice()\n .reverse()\n .map(([guid]: PersonUrl) => guid),\n ).map((guid) => registryMachines[guid])\n return {\n registryMachines,\n registryMachinesArray,\n }\n}\n\nfunction* fetchHelloSaga({ payload }: Action<FetchHelloPayload>) {\n const { serverDefault } = payload\n const helloCallUrl = `${serverDefault}api/v1/registry?action=hello`\n let response: AxiosResponse<HelloResponse>\n try {\n response = yield call(axiosInstance.get, helloCallUrl, {\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n })\n } catch (error) {\n console.warn(\"error accessing registry or Do-Not-Track is enabled\") // eslint-disable-line\n yield put(fetchHelloAction.failure())\n return\n }\n const cloudBaseURL = response.data.cloud_base_url\n const { hostname } = response.data\n const machineGuid = response.data.machine_guid\n const registryServer = response.data.registry\n const isUsingGlobalRegistry = registryServer === NETDATA_REGISTRY_SERVER\n\n yield put(fetchHelloAction.success({\n cloudBaseURL,\n hostname,\n isUsingGlobalRegistry,\n machineGuid,\n }))\n\n const name = hostname\n const url = serverDefault\n\n // now make access call - max_redirects, callback, etc...\n const accessRegistryResponse: AccessRegistryResponse = yield call(accessRegistry, {\n machineGuid,\n maxRedirects: 2,\n name,\n registryServer,\n url,\n })\n\n if (response.data.anonymous_statistics) {\n yield spawn(injectPosthog, response.data.machine_guid, accessRegistryResponse?.personGuid)\n }\n\n if (accessRegistryResponse?.urls && accessRegistryResponse?.personGuid) {\n const personUrls = parsePersonUrls(accessRegistryResponse.urls)\n const { registryMachines, registryMachinesArray } = personUrls\n yield put(updatePersonUrlsAction({\n personGuid: accessRegistryResponse.personGuid,\n registryMachines,\n registryMachinesArray,\n }))\n }\n\n yield put(accessRegistrySuccessAction({\n registryServer: accessRegistryResponse?.registryServer || registryServer,\n }))\n}\n\nconst constructOptionStorageKey = (key: string) => `options.${key}`\nfunction setOptionSaga({ payload }: Action<SetOptionAction>) {\n const { key, value } = payload\n if (key === \"stop_updates_when_focus_is_lost\") {\n // old dashboard was saving that property to localStorage, but was always omitting it when\n // reading. it was only possible to persist this setting via url (update_always hash param)\n return\n }\n localStorage.setItem(constructOptionStorageKey(key), JSON.stringify(value))\n}\n\nfunction* spacePanelSaga({ payload }: Action<SetSpacePanelStatusActionPayload>) {\n if (payload.isActive) {\n document.body.className = \"with-panel\"\n } else {\n document.body.className = \"\"\n }\n yield delay(sidePanelTransitionTimeInSeconds * 1000)\n yield put(setSpacePanelTransitionEndAction({ isActive: payload.isActive }))\n}\n\nexport function* globalSagas() {\n yield spawn(listenToWindowFocus)\n yield spawn(watchWindowFocusChannel)\n yield takeEvery(fetchHelloAction.request, fetchHelloSaga)\n yield spawn(alarmsSagas)\n yield takeEvery(setOptionAction, setOptionSaga)\n yield takeEvery(setSpacePanelStatusAction, spacePanelSaga)\n}\n","/* eslint-disable camelcase */\n/* eslint-disable operator-linebreak */\nimport { take, takeEvery } from \"redux-saga/effects\"\nimport { Action } from \"redux-act\"\n\nimport {\n clearHighlightAction,\n SetGlobalChartUnderlayAction,\n setGlobalChartUnderlayAction,\n} from \"domains/global/actions\"\nimport {\n explicitlySignInAction,\n showSignInModalAction,\n ShowSignInModalAction,\n} from \"domains/dashboard/actions\"\nimport { setHashParams, getHashParams, removeHashParams } from \"utils/hash-utils\"\n\nexport const LOCAL_STORAGE_NEEDS_SYNC = \"LOCAL-STORAGE-NEEDS-SYNC\"\n\nfunction setGlobalChartUnderlaySaga({ payload }: Action<SetGlobalChartUnderlayAction>) {\n const { after, before } = payload\n if (window.urlOptions) {\n // additional check to prevent loop, after setting initial state from url\n if (window.urlOptions.after !== after || window.urlOptions.before !== before) {\n window.urlOptions.netdataHighlightCallback(true, after, before)\n }\n } else {\n // TODO: Consider a setting to control whether the component sets these hash params\n const hashParams = getHashParams()\n const highlight_after = Math.round(after).toString()\n const highlight_before = Math.round(before).toString()\n if (\n hashParams.highlight_after !== highlight_after ||\n hashParams.highlight_before !== highlight_before\n ) {\n setHashParams({ highlight_after, highlight_before })\n }\n }\n}\n\nfunction clearHighlightSaga() {\n if (window.urlOptions) {\n window.urlOptions.netdataHighlightCallback(false, 0, 0)\n } else {\n removeHashParams([\"highlight_after\", \"highlight_before\"])\n }\n}\n\nfunction* showSignInSaga({ payload }: Action<ShowSignInModalAction>) {\n if (window.showSignInModal) {\n window.showSignInModal()\n\n yield take(explicitlySignInAction)\n const { signInLinkHref } = payload\n window.localStorage.setItem(LOCAL_STORAGE_NEEDS_SYNC, \"true\")\n window.location.href = signInLinkHref\n }\n}\n\nexport function* mainJsSagas() {\n yield takeEvery(setGlobalChartUnderlayAction, setGlobalChartUnderlaySaga)\n yield takeEvery(clearHighlightAction, clearHighlightSaga)\n yield takeEvery(showSignInModalAction, showSignInSaga)\n}\n","import { spawn } from \"redux-saga/effects\"\n\nimport { chartSagas } from \"domains/chart/sagas\"\nimport { globalSagas } from \"domains/global/sagas\"\nimport { mainJsSagas } from \"domains/dashboard/sagas\"\n\nexport function* rootSaga() {\n yield spawn(globalSagas)\n yield spawn(chartSagas)\n yield spawn(mainJsSagas)\n}\n","import { compose, applyMiddleware, createStore } from \"redux\"\nimport createSagaMiddleware from \"redux-saga\"\nimport rootReducer from \"./root-reducer\"\nimport { rootSaga } from \"./root-saga\"\n\nconst sagaMiddleware = createSagaMiddleware()\n\nconst reduxDevTools = process.env.NODE_ENV === \"development\"\n && window.__REDUX_DEVTOOLS_EXTENSION__\n // @ts-ignore\n && window.__REDUX_DEVTOOLS_EXTENSION__({ name: \"Dashboard Charts\" })\n\nconst composeMiddlewaresWithDevTools = () => (reduxDevTools\n ? compose(applyMiddleware(sagaMiddleware), reduxDevTools)\n : compose(applyMiddleware(sagaMiddleware)))\n\nexport const configureStore = () => {\n const store = createStore(\n rootReducer,\n composeMiddlewaresWithDevTools(),\n )\n sagaMiddleware.run(rootSaga)\n return store\n}\n\nexport const store = configureStore()\n","import $ from \"jquery\"\n\nwindow.$ = $\nwindow.jQuery = $\n","let loadCssPromise: Promise<void>\n\ntype LoadCss = (href: string) => Promise<void>\nexport const loadCss: LoadCss = (href) => {\n if (loadCssPromise) {\n return loadCssPromise\n }\n return new Promise((resolve, reject) => {\n const fileRef = document.createElement(\"link\")\n fileRef.setAttribute(\"rel\", \"stylesheet\")\n fileRef.setAttribute(\"type\", \"text/css\")\n fileRef.setAttribute(\"href\", href)\n\n fileRef.onload = () => {\n resolve()\n }\n\n fileRef.onerror = () => {\n reject(Error(`Error loading css: ${href}`))\n }\n\n document.getElementsByTagName(\"head\")[0].appendChild(fileRef)\n })\n}\n","import classNames from \"classnames\"\nimport { Attributes } from \"./transformDataAttributes\"\n\nexport type ChartLibraryName =\n | \"dygraph\"\n | \"sparkline\"\n | \"peity\"\n | \"google\"\n // | \"d3\"\n | \"d3pie\"\n | \"easypiechart\"\n | \"gauge\"\n | \"textonly\"\n | \"groupbox\"\nexport interface ChartLibraryConfig {\n aspectRatio?: number\n format: string\n hasLegend: (attributes: Attributes) => boolean\n hasToolboxPanAndZoom?: boolean\n isLogScale?: (attributes: Attributes) => boolean\n options: (attributes: Attributes) => string\n trackColors: boolean\n pixelsPerPoint: (attributes: Attributes) => number\n xssRegexIgnore: RegExp\n containerClass: (attributes: Attributes) => string\n}\nexport type ChartLibrariesSettings = {\n [key in ChartLibraryName]: ChartLibraryConfig\n}\n\ntype IsDygraphSparkline = (attributes: Attributes) => boolean\nconst isDygraphSparkline: IsDygraphSparkline = (attributes) => (\n attributes.dygraphTheme === \"sparkline\"\n)\n\nexport const chartLibrariesSettings: ChartLibrariesSettings = {\n dygraph: {\n // initialize: window.NETDATA.dygraphInitialize,\n // create: window.NETDATA.dygraphChartCreate,\n // update: window.NETDATA.dygraphChartUpdate,\n // resize(state) {\n // if (typeof state.tmp.dygraph_instance !== \"undefined\"\n // && typeof state.tmp.dygraph_instance.resize === \"function\") {\n // state.tmp.dygraph_instance.resize()\n // }\n // },\n // setSelection: window.NETDATA.dygraphSetSelection,\n // clearSelection: window.NETDATA.dygraphClearSelection,\n hasToolboxPanAndZoom: true,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.data$\"),\n format: \"json\",\n options(attributes: Attributes) {\n if (typeof this.isLogScale === \"function\") {\n // flip - in proper order (from oldest to newest)\n return `ms|flip${this.isLogScale(attributes) ? \"|abs\" : \"\"}`\n }\n return \"\"\n },\n hasLegend(attributes: Attributes) {\n // not using __hasLegendCache__ as in old-dashboard, because performance tweaks like this\n // probably won't be needed in react app\n const { legend = true } = attributes\n return !isDygraphSparkline(attributes) && Boolean(legend)\n },\n // autoresize(state) {\n // void (state)\n // return true\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: ((attributes: Attributes) => (isDygraphSparkline(attributes) ? 2 : 3)),\n // pixels_per_point(state) {\n // return (this.isSparkline(state) === false) ? 3 : 2\n // },\n isLogScale(attributes: Attributes) {\n return attributes.dygraphTheme === \"logscale\"\n },\n containerClass(attributes: Attributes) {\n return this.hasLegend(attributes)\n ? classNames(\n \"netdata-container-with-legend\",\n attributes.legendPosition === \"bottom\" && \"netdata-container-with-legend--bottom\",\n )\n : \"netdata-container\"\n },\n // container_class(state) {\n // if (this.legend(state) !== null) {\n // return \"netdata-container-with-legend\"\n // }\n // return \"netdata-container\"\n // },\n },\n sparkline: {\n // initialize: window.NETDATA.sparklineInitialize,\n // create: window.NETDATA.sparklineChartCreate,\n // update: window.NETDATA.sparklineChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options: () => \"flip|abs\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 3,\n containerClass: () => \"netdata-container\",\n },\n peity: {\n // initialize: window.NETDATA.peityInitialize,\n // create: window.NETDATA.peityChartCreate,\n // update: window.NETDATA.peityChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"ssvcomma\",\n options: () => \"null2zero|flip|abs\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 3,\n containerClass: () => \"netdata-container\",\n },\n google: {\n // initialize: window.NETDATA.googleInitialize,\n // create: window.NETDATA.googleChartCreate,\n // update: window.NETDATA.googleChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.rows$\"),\n format: \"datatable\",\n options: () => \"\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 300\n // },\n trackColors: false,\n pixelsPerPoint: () => 4,\n containerClass: () => \"netdata-container\",\n },\n d3pie: {\n // initialize: window.NETDATA.d3pieInitialize,\n // create: window.NETDATA.d3pieChartCreate,\n // update: window.NETDATA.d3pieChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.d3pieSetSelection,\n // clearSelection: window.NETDATA.d3pieClearSelection,\n hasToolboxPanAndZoom: false,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.data$\"),\n format: \"json\",\n hasLegend: () => false,\n options: () => \"objectrows|ms\",\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 15,\n containerClass: () => \"netdata-container\",\n },\n // d3: {\n // initialize: window.NETDATA.d3Initialize,\n // create: window.NETDATA.d3ChartCreate,\n // update: window.NETDATA.d3ChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n // toolboxPanAndZoom: null,\n // initialized: false,\n // enabled: true,\n // xssRegexIgnore: new RegExp(\"^/api/v1/data\\.result.data$\"),\n // format(state) {\n // void (state)\n // return \"json\"\n // },\n // options(state) {\n // void (state)\n // return \"\"\n // },\n // legend(state) {\n // void (state)\n // return null\n // },\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n // track_colors(state) {\n // void (state)\n // return false\n // },\n // pixels_per_point(state) {\n // void (state)\n // return 3\n // },\n // container_class(state) {\n // void (state)\n // return \"netdata-container\"\n // },\n // },\n easypiechart: {\n // initialize: window.NETDATA.easypiechartInitialize,\n // create: window.NETDATA.easypiechartChartCreate,\n // update: window.NETDATA.easypiechartChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.easypiechartSetSelection,\n // clearSelection: window.NETDATA.easypiechartClearSelection,\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options() {\n return \"absolute\"\n },\n hasLegend() {\n return false\n },\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: () => 3,\n aspectRatio: 100,\n containerClass: () => \"netdata-container-easypiechart\",\n },\n gauge: {\n // initialize: window.NETDATA.gaugeInitialize,\n // create: window.NETDATA.gaugeChartCreate,\n // update: window.NETDATA.gaugeChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.gaugeSetSelection,\n // clearSelection: window.NETDATA.gaugeClearSelection,\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options: () => \"absolute\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: () => 3,\n aspectRatio: 60,\n containerClass: () => \"netdata-container-gauge\",\n },\n textonly: {\n // autoresize(state) {\n // void (state)\n // return false\n // },\n containerClass: () => \"netdata-container\",\n // create: window.NETDATA.textOnlyCreate,\n // enabled: true,\n format: \"array\",\n // initialized: true,\n // initialize(callback) {\n // callback()\n // },\n hasLegend: () => false,\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n options: () => \"absolute\",\n pixelsPerPoint: () => 3,\n trackColors: false,\n // update: window.NETDATA.textOnlyUpdate,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n },\n groupbox: {\n containerClass: () => \"netdata-container\",\n hasLegend: () => false,\n options: () => \"absolute\",\n format: \"json\",\n trackColors: false,\n pixelsPerPoint: () => 3,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n },\n}\n","import { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"domains/chart/utils/chartLibrariesSettings\"\n\nexport const getChartURLOptions = (\n attributes: Attributes, shouldEliminateZeroDimensions: boolean,\n) => {\n const {\n appendOptions,\n overrideOptions,\n } = attributes\n let ret = \"\"\n\n ret += overrideOptions\n ? overrideOptions.toString()\n : chartLibrariesSettings[attributes.chartLibrary].options(attributes)\n\n if (typeof appendOptions === \"string\") {\n ret += `|${encodeURIComponent(appendOptions)}`\n }\n\n ret += \"|jsonwrap\"\n\n if (shouldEliminateZeroDimensions) {\n ret += \"|nonzero\"\n }\n\n if (attributes.dimensionsAggrMethod === \"sum-of-abs\"\n || (!attributes.dimensionsAggrMethod && attributes.groupBy && attributes.groupBy !== \"dimension\")\n ) {\n ret += \"|absolute\"\n }\n\n return ret\n}\n","export const BIGGEST_INTERVAL_NUMBER = 2 ** 31 - 1\n","import React from \"react\"\n\nimport { Icon } from \"components/icon\"\n\ninterface Props {\n containerNode: HTMLElement\n hasEmptyData: boolean\n}\n\nexport const Loader = ({\n containerNode,\n hasEmptyData,\n}: Props) => {\n // below is 90% of original logic.\n // since it rerenders when IntersectionObserver turns the chart back on,\n // it's not that important to detect screen height and container sizes changes\n const screenHeight = window.screen.height\n\n // normally we want a font size, as tall as the element\n let h = containerNode.clientHeight\n\n // but give it some air, 20% let's say, or 5 pixels min\n const lost = Math.max(h * 0.2, 5)\n h -= lost\n\n // center the text, vertically\n let paddingTop = (lost - 5) / 2\n\n // but check the width too\n // it should fit 10 characters in it\n const w = containerNode.clientWidth / 10\n if (h > w) {\n paddingTop += (h - w) / 2\n h = w\n }\n\n // and don't make it too huge\n // 5% of the screen size is good\n if (h > screenHeight / 20) {\n paddingTop += (h - (screenHeight / 20)) / 2\n h = screenHeight / 20\n }\n\n const label = hasEmptyData ? \" empty\" : \" netdata\"\n const iconType = hasEmptyData ? \"noData\" : \"loading\"\n\n return (\n <div\n className=\"netdata-message icon\"\n style={{\n fontSize: h,\n paddingTop,\n }}\n >\n <Icon iconType={iconType} />\n {label}\n </div>\n )\n}\n","type GetPanAndZoomStep = (event: React.MouseEvent) => number\nexport const getPanAndZoomStep: GetPanAndZoomStep = (event) => {\n if (event.ctrlKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_control\n } if (event.shiftKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_shift\n } if (event.altKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_alt\n }\n return window.NETDATA.options.current.pan_and_zoom_factor\n}\n","export const safeEqualCheck = (a: unknown, b: unknown) => {\n if (a === b) {\n return true\n }\n return Number.isNaN(a as number) && Number.isNaN(b as number)\n}\n","import { identity } from \"ramda\"\nimport { useCallback, useState, useMemo, useRef } from \"react\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectTemperatureSetting, selectSecondsAsTimeSetting } from \"domains/global/selectors\"\nimport { unitsConversionCreator } from \"utils/units-conversion\"\nimport { safeEqualCheck } from \"utils/safe-equal-check\"\n\nimport { ChartData } from \"../chart-types\"\nimport { Attributes } from \"./transformDataAttributes\"\n\ntype Converter = (v: number) => number | string\n// only time units are converted into strings, the rest are numbers\n\n// todo - memoization similar to the one as in old dashboard, but probably not needed\nconst formattersFixed: any[] = []\nconst formattersZeroBased: any[] = []\nconst fastNumberFormat = (min: number, max: number) => {\n const key = max\n if (min === max) {\n if (typeof formattersFixed[key] === \"undefined\") {\n formattersFixed[key] = new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n }\n\n return formattersFixed[key]\n }\n if (min === 0) {\n if (typeof formattersZeroBased[key] === \"undefined\") {\n formattersZeroBased[key] = new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n }\n\n return formattersZeroBased[key]\n }\n // (old dashboard comment)\n // this is never used\n // it is added just for completeness\n return new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n}\n\nconst getLegendFormatValue =\n (\n convertUnits: Converter,\n intlNumberFormat: Intl.NumberFormat | null,\n valueDecimalDetail: number\n ) =>\n (value: number | string | null) => {\n if (typeof value !== \"number\") {\n return \"-\"\n }\n\n const convertedValue = convertUnits(value)\n if (typeof convertedValue !== \"number\") {\n return convertedValue\n }\n\n if (intlNumberFormat !== null) {\n return intlNumberFormat.format(convertedValue)\n }\n\n let dmin\n let dmax\n if (valueDecimalDetail !== -1) {\n dmin = valueDecimalDetail\n dmax = valueDecimalDetail\n } else {\n dmin = 0\n const abs = convertedValue < 0 ? -convertedValue : convertedValue\n if (abs > 1000) {\n dmax = 0\n } else if (abs > 10) {\n dmax = 1\n } else if (abs > 1) {\n dmax = 2\n } else if (abs > 0.1) {\n dmax = 2\n } else if (abs > 0.01) {\n dmax = 4\n } else if (abs > 0.001) {\n dmax = 5\n } else if (abs > 0.0001) {\n dmax = 6\n } else {\n dmax = 7\n }\n }\n\n return fastNumberFormat(dmin, dmax).format(convertedValue)\n }\n\ntype LegendFormatValue = (value: string | number | null) => string | number\n\ninterface Arguments {\n attributes: Attributes\n data: ChartData\n units: string\n unitsCommon: string | undefined\n unitsDesired: string\n uuid: string\n}\nexport const useFormatters = ({\n attributes,\n data,\n units,\n unitsCommon,\n unitsDesired,\n uuid,\n}: Arguments) => {\n const temperatureSetting = useSelector(selectTemperatureSetting)\n const secondsAsTimeSetting = useSelector(selectSecondsAsTimeSetting)\n\n // previously _unitsConversion\n const [convertUnits, setConvertUnits] = useState<Converter>(() => identity)\n\n // probably can also be removed\n const [min, setMin] = useState<number>()\n const [max, setMax] = useState<number>()\n\n // todo most of this state is not needed, that hook can be refactored\n const [unitsCurrent, setUnitsCurrent] = useState<string>(units)\n\n const [decimals, setDecimals] = useState<number>(-1)\n const [intlNumberFormat, setIntlNumberFormat] = useState<Intl.NumberFormat | null>(null)\n\n const {\n // \"valueDecimalDetail\" in old app\n decimalDigits = -1,\n } = attributes\n\n const legendFormatValue: LegendFormatValue = useMemo(\n () => getLegendFormatValue(convertUnits, intlNumberFormat, decimalDigits),\n [convertUnits, decimalDigits, intlNumberFormat]\n )\n\n const legendFormatValueRef = useRef(legendFormatValue)\n const updateLegendFormatValueRef = (\n newConvertUnits: Converter,\n newIntlNumberFormat: any,\n newDecimalDigits: any\n ) => {\n legendFormatValueRef.current = getLegendFormatValue(\n newConvertUnits,\n newIntlNumberFormat,\n newDecimalDigits\n )\n }\n\n const legendFormatValueDecimalsFromMinMax = useCallback(\n (newMin: number, newMax: number) => {\n if (safeEqualCheck(min, newMin) && safeEqualCheck(max, newMax)) {\n return legendFormatValueRef.current\n }\n // we should call the convertUnits-creation only when original app was doing this\n // so we don't get new updates in improper places\n setMin(newMin)\n setMax(newMax)\n\n const newConvertUnits = unitsConversionCreator.get(\n uuid,\n newMin,\n newMax,\n units,\n unitsDesired,\n unitsCommon,\n switchedUnits => {\n setUnitsCurrent(switchedUnits)\n // that.legendSetUnitsString(that.units_current);\n // that.legendSetUnitsString just populates some DOM with unitsCurrent\n // on all occurrences just take the unitsCurrent from this state\n },\n temperatureSetting,\n secondsAsTimeSetting\n )\n\n // as function, so useState() interprets it properly\n setConvertUnits(() => newConvertUnits)\n\n const convertedMin = newConvertUnits(newMin)\n const convertedMax = newConvertUnits(newMax)\n\n // if number is returned, we format it!!!!\n if (typeof convertedMin !== \"number\" || typeof convertedMax !== \"number\") {\n updateLegendFormatValueRef(newConvertUnits, intlNumberFormat, decimalDigits)\n return legendFormatValueRef.current\n }\n\n let newDecimals\n\n if (data.min === data.max) {\n // it is a fixed number, let the visualizer decide based on the value\n newDecimals = -1\n } else if (decimalDigits !== -1) {\n // there is an override\n newDecimals = decimalDigits\n } else {\n // ok, let's calculate the proper number of decimal points\n let delta\n\n if (convertedMin === convertedMax) {\n delta = Math.abs(convertedMin)\n } else {\n delta = Math.abs(convertedMax - convertedMin)\n }\n\n if (delta > 1000) {\n newDecimals = 0\n } else if (delta > 10) {\n newDecimals = 1\n } else if (delta > 1) {\n newDecimals = 2\n } else if (delta > 0.1) {\n newDecimals = 2\n } else if (delta > 0.01) {\n newDecimals = 4\n } else if (delta > 0.001) {\n newDecimals = 5\n } else if (delta > 0.0001) {\n newDecimals = 6\n } else {\n newDecimals = 7\n }\n }\n\n let newIntlNumberFormat = intlNumberFormat\n\n if (newDecimals !== decimals) {\n if (newDecimals < 0) {\n newIntlNumberFormat = null\n } else {\n newIntlNumberFormat = fastNumberFormat(newDecimals, newDecimals)\n }\n setIntlNumberFormat(() => newIntlNumberFormat)\n setDecimals(newDecimals)\n }\n updateLegendFormatValueRef(newConvertUnits, newIntlNumberFormat, newDecimals)\n return legendFormatValueRef.current\n },\n [\n decimals,\n decimalDigits,\n min,\n max,\n uuid,\n temperatureSetting,\n units,\n unitsDesired,\n unitsCommon,\n secondsAsTimeSetting,\n data.min,\n data.max,\n intlNumberFormat,\n ]\n )\n\n return {\n legendFormatValue,\n legendFormatValueDecimalsFromMinMax,\n unitsCurrent,\n }\n}\n","const defaultColor = {\n r: 255,\n g: 0,\n b: 0,\n}\n\ntype ColorHex2Rgb = (hex: string) => {\n r: number,\n g: number,\n b: number\n}\nexport const colorHex2Rgb: ColorHex2Rgb = (hex) => {\n // Expand shorthand form (e.g. \"03F\") to full form (e.g. \"0033FF\")\n const shorthandRegex = /^#?([a-f\\d])([a-f\\d])([a-f\\d])$/i\n if (!hex) {\n return defaultColor\n }\n const hexFull = hex.replace(shorthandRegex, (m, r, g, b) => r + r + g + g + b + b)\n\n const result = /^#?([a-f\\d]{2})([a-f\\d]{2})([a-f\\d]{2})$/i.exec(hexFull)\n if (!result) {\n console.warn(\"wrong color format:\", hex) // eslint-disable-line no-console\n }\n return result\n ? {\n r: parseInt(result[1], 16),\n g: parseInt(result[2], 16),\n b: parseInt(result[3], 16),\n } : defaultColor\n}\n","import styled from \"styled-components\"\nimport { getSizeBy } from \"@netdata/netdata-ui\"\n\nexport const LegendContainer = styled.div`\n margin-bottom: ${getSizeBy(3)};\n padding-left: 35px;\n`\n\nexport const LegendFirstRow = styled.div`\n margin-top: 4px;\n display: flex;\n justify-content: space-between;\n`\n\nexport const LegendSecondRow = styled.div`\n margin-top: 4px;\n display: flex;\n justify-content: space-between;\n`\n\nexport const LegendUnit = styled.div`\n`\n\nexport const DateTimeSeparator = styled.span`\n margin: 0 3px;\n`\n\nexport const LegendItems = styled.div`\n display: flex;\n flex-wrap: wrap;\n overflow: auto;\n max-height: 80px;\n`\n\nexport const DimensionItem = styled.div<{ color: string, isDisabled: boolean }>`\n display: flex;\n align-items: center;\n color: ${({ color }) => color};\n margin-right: 12px;\n cursor: pointer;\n opacity: ${({ isDisabled }) => (isDisabled ? 0.3 : null)};\n user-select: none;\n font-size: 11px;\n &:focus {\n outline: none;\n }\n`\n\n// toolbox is based on \"absolute\", so to make sure it's not put on top of dimension-item\n// let's put a transparent block as last in dimension-items container. Toolbox will soon be moved\n// to other place so it's temporary\nexport const DimensionItemToolboxPlaceholder = styled.div`\n width: 140px;\n height: 20px;\n`\n\nexport const DimensionIcon = styled.div<{ color: string }>`\n width: 14px;\n height: 7px;\n border-radius: 4px;\n overflow: hidden;\n background-color: ${({ color }) => color};\n`\n\nexport const DimensionLabel = styled.span`\n margin-left: 3px;\n`\n\nexport const DimensionValue = styled.span`\n margin-left: 5px;\n min-width: 30px;\n`\n\nexport const ToolboxContainer = styled.div`\n position: relative;\n touch-action: none;\n`\n","import React, { useCallback } from \"react\"\nimport { createSelector } from \"reselect\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { DimensionLabel } from \"./chart-legend-bottom.styled\"\n\nconst emptyObject = {}\n\nconst selector = createSelector(\n selectChartData,\n ({ dimension_names: dimensionNames, keys = emptyObject }) => ({\n dimensionNames,\n keys,\n })\n)\n\nconst LegendText = ({ id, index }) => {\n const { dimensionNames, keys } = useSelector(useCallback(state => selector(state, { id }), [id]))\n const { chart, node } = keys\n\n if (chart && node && Object.keys(keys).length === 2) {\n return (\n <DimensionLabel>\n {chart[index]}@{node[index]}\n </DimensionLabel>\n )\n }\n\n const name = dimensionNames[index]\n\n return <DimensionLabel>{name}</DimensionLabel>\n}\n\nexport default LegendText\n","import React, { Fragment, useRef, useEffect, useCallback } from \"react\"\nimport classNames from \"classnames\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\n\nimport { colorHex2Rgb } from \"utils/color-hex-2-rgb\"\nimport { useDateTime } from \"utils/date-time\"\n\nimport { legendResolutionTooltip, legendPluginModuleString } from \"../utils/legend-utils\"\n\nimport { ChartMetadata } from \"../chart-types\"\nimport LegendText from \"./legendText\"\n\ninterface Props {\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => (number | string)\n onDimensionClick: (clickedDimensionName: string, event: React.MouseEvent) => void\n selectedDimensions: string[]\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n}\n\nexport const ChartLegendRight = ({\n chartUuid,\n chartMetadata,\n chartLibrary,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n onDimensionClick,\n selectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n}: Props) => {\n const chartData = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }), [chartUuid])\n )\n const { dimension_names: dimensionNames, dimension_ids: dimensionIds } = chartData\n\n // todo handle also this case:\n // const netdataLast = chartData.last_entry * 1000\n // const dataUpdateEvery = chartData.view_update_every * 1000\n // showUndefined = Math.abs(netdataLast - viewBefore) > dataUpdateEvery\n // (showUndefined also when difference between last and before is bigger than granularity)\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n\n // todo support timezone\n const legendDate = new Date(hoveredX || viewBefore)\n\n // todo make a possibility to add chartLegened when there's not chartData\n // (if this situation is possible)\n\n // @ts-ignore ignoring because options.current has inconsistent structure\n const colorFillOpacity = window.NETDATA.options.current[\n `color_fill_opacity_${chartMetadata.chart_type}`\n ]\n\n const { localeDateString, localeTimeString } = useDateTime()\n\n const scrollbarRef = useRef(null)\n useEffect(() => {\n if (scrollbarRef.current) {\n window.Ps.initialize(scrollbarRef.current, {\n wheelSpeed: 0.2,\n wheelPropagation: true,\n swipePropagation: true,\n minScrollbarLength: null,\n maxScrollbarLength: null,\n useBothWheelAxes: false,\n suppressScrollX: true,\n suppressScrollY: false,\n scrollXMarginOffset: 0,\n scrollYMarginOffset: 0,\n theme: \"default\",\n })\n }\n }, [scrollbarRef])\n\n return (\n <div className={classNames(\n \"netdata-chart-legend\",\n `netdata-${chartLibrary}-legend`,\n )}\n >\n <span\n className=\"netdata-legend-title-date\"\n title={legendPluginModuleString(true, chartMetadata)}\n >\n {showUndefined\n ? legendPluginModuleString(false, chartMetadata)\n : localeDateString(legendDate)}\n </span>\n <br />\n <span\n className=\"netdata-legend-title-time\"\n title={legendResolutionTooltip(chartData, chartMetadata)}\n >\n {showUndefined\n ? chartMetadata.context.toString()\n : localeTimeString(legendDate)}\n </span>\n <br />\n <span className=\"netdata-legend-title-units\">{unitsCurrent}</span>\n <br />\n <div className=\"netdata-legend-series\" ref={scrollbarRef}>\n <div className=\"netdata-legend-series-content\">\n {dimensionIds.map((dimensionId, i) => {\n const dimensionName = dimensionNames[i]\n // todo dimension could be a separate component\n const color = colors[dimensionName]\n const rgb = colorHex2Rgb(color)\n\n const isSelected = selectedDimensions.length === 0\n || selectedDimensions.includes(dimensionName)\n\n let value\n if (showUndefined) {\n value = null\n } else if (hoveredRow !== -1) {\n const hoveredValueArray = chartData.result.data[hoveredRow]\n // [timestamp, valueDim1, valueDim2, ...]\n value = hoveredValueArray ? hoveredValueArray[i + 1] : null\n } else {\n value = chartData.view_latest_values[i]\n }\n\n return (\n <Fragment key={dimensionId}>\n {i !== 0 && <br />}\n {/* eslint-disable-next-line jsx-a11y/click-events-have-key-events */}\n <span\n title={dimensionName}\n className={classNames(\n \"netdata-legend-name\",\n isSelected ? \"selected\" : \"not-selected\",\n )}\n onClick={(event) => {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n style={{ color }}\n tabIndex={0}\n >\n <table\n className={`netdata-legend-name-table-${chartMetadata.chart_type}`}\n style={{\n backgroundColor: `rgba(${rgb.r},${rgb.g},${rgb.b},${colorFillOpacity})`,\n }}\n >\n <tbody>\n <tr className=\"netdata-legend-name-tr\">\n <td className=\"netdata-legend-name-td\" />\n </tr>\n </tbody>\n </table>\n {\" \"}\n <LegendText id={chartUuid} index={i} />\n </span>\n {/* eslint-disable-next-line jsx-a11y/click-events-have-key-events */}\n <span\n title={dimensionName}\n className={classNames(\n \"netdata-legend-value\",\n !isSelected && \"hidden\",\n )}\n onClick={(event) => {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n style={{ color }} // omitted !important during refractor, react doesn't support it\n tabIndex={0}\n >\n {legendFormatValue(\n value,\n )}\n </span>\n </Fragment>\n )\n })}\n </div>\n </div>\n </div>\n )\n}\n","import React, { useCallback } from \"react\"\nimport { useDateTime } from \"utils/date-time\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { legendPluginModuleString, legendResolutionTooltip } from \"domains/chart/utils/legend-utils\"\nimport { ChartMetadata } from \"../chart-types\"\nimport LegendText from \"./legendText\"\nimport * as S from \"./chart-legend-bottom.styled\"\ninterface Props {\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => number | string\n onDimensionClick: (clickedDimensionName: string, event: React.MouseEvent) => void\n selectedDimensions: string[]\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n legendToolbox: JSX.Element\n resizeHandler: React.ReactNode\n}\n\nexport const ChartTimeframe = ({\n chartMetadata,\n showUndefined,\n hoveredX,\n viewBefore,\n chartData,\n}: any) => {\n const { localeDateString, localeTimeString } = useDateTime()\n\n const legendDate = new Date(hoveredX || viewBefore)\n\n return (\n <div>\n <span title={legendPluginModuleString(true, chartMetadata)}>\n {showUndefined\n ? legendPluginModuleString(false, chartMetadata)\n : localeDateString(legendDate)}\n </span>\n <S.DateTimeSeparator>|</S.DateTimeSeparator>\n <span title={legendResolutionTooltip(chartData, chartMetadata)}>\n {showUndefined ? chartMetadata.context.toString() : localeTimeString(legendDate)}\n </span>\n </div>\n )\n}\n\nexport const ChartLegendBottom = ({\n chartUuid,\n chartMetadata,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n onDimensionClick,\n selectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n legendToolbox,\n resizeHandler,\n}: Props) => {\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n const chartData = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }), [chartUuid])\n )\n const { dimension_names: dimensionNames, dimension_ids: dimensionIds } = chartData\n\n return (\n <S.LegendContainer>\n <S.LegendFirstRow>\n <S.LegendUnit>{unitsCurrent}</S.LegendUnit>\n <ChartTimeframe\n chartMetadata={chartMetadata}\n showUndefined={showUndefined}\n hoveredX={hoveredX}\n viewBefore={viewBefore}\n chartData={chartData}\n />\n </S.LegendFirstRow>\n <S.LegendSecondRow>\n <S.LegendItems>\n {dimensionIds.map((dimensionId, i) => {\n const dimensionName = dimensionNames[i]\n const color = colors[dimensionName]\n\n const isSelected =\n selectedDimensions.length === 0 || selectedDimensions.includes(dimensionName)\n\n let value\n if (showUndefined) {\n value = null\n } else if (hoveredRow !== -1) {\n const hoveredValueArray = chartData.result.data[hoveredRow]\n // [timestamp, valueDim1, valueDim2, ...]\n value = hoveredValueArray ? hoveredValueArray[i + 1] : null\n } else {\n value = chartData.view_latest_values[i]\n }\n return (\n <S.DimensionItem\n color={color}\n onClick={event => {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n tabIndex={0}\n isDisabled={!isSelected}\n key={dimensionId}\n >\n <S.DimensionIcon title={dimensionName} color={color} />\n <LegendText id={chartUuid} index={i} />\n <S.DimensionValue>{isSelected && legendFormatValue(value)}</S.DimensionValue>\n </S.DimensionItem>\n )\n })}\n <S.DimensionItemToolboxPlaceholder />\n </S.LegendItems>\n <S.ToolboxContainer>\n {legendToolbox}\n {resizeHandler}\n </S.ToolboxContainer>\n </S.LegendSecondRow>\n </S.LegendContainer>\n )\n}\n","import React, { useCallback } from \"react\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { getNewSelectedDimensions } from \"domains/chart/utils/legend-utils\"\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { ChartMetadata } from \"../chart-types\"\n\nimport { ChartLegendRight } from \"./chart-legend-right\"\nimport { ChartLegendBottom } from \"./chart-legend-bottom\"\n\ninterface Props {\n attributes: Attributes\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => number | string\n selectedDimensions: string[]\n setSelectedDimensions: (selectedDimensions: string[]) => void\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n legendToolbox: JSX.Element\n resizeHandler: React.ReactNode\n}\n\nexport const ChartLegend = ({\n attributes,\n chartUuid,\n chartMetadata,\n chartLibrary,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n selectedDimensions,\n setSelectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n legendToolbox,\n resizeHandler,\n}: Props) => {\n const dimension_names = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }).dimension_names, [\n chartUuid,\n ])\n )\n\n const onDimensionClick = (clickedDimensionName: string, event: React.MouseEvent) => {\n event.preventDefault()\n const isModifierKeyPressed = event.shiftKey || event.ctrlKey\n const newSelectedDimensions = getNewSelectedDimensions({\n allDimensions: dimension_names,\n selectedDimensions,\n clickedDimensionName,\n isModifierKeyPressed,\n })\n setSelectedDimensions(newSelectedDimensions)\n }\n\n if (attributes.legendPosition === \"bottom\") {\n return (\n <ChartLegendBottom\n chartUuid={chartUuid}\n chartLibrary={chartLibrary}\n chartMetadata={chartMetadata}\n colors={colors}\n hoveredRow={hoveredRow}\n hoveredX={hoveredX}\n legendFormatValue={legendFormatValue}\n onDimensionClick={onDimensionClick}\n selectedDimensions={selectedDimensions}\n showLatestOnBlur={showLatestOnBlur}\n unitsCurrent={unitsCurrent}\n viewBefore={viewBefore}\n legendToolbox={legendToolbox}\n resizeHandler={resizeHandler}\n />\n )\n }\n\n return (\n <ChartLegendRight\n chartUuid={chartUuid}\n chartLibrary={chartLibrary}\n chartMetadata={chartMetadata}\n colors={colors}\n hoveredRow={hoveredRow}\n hoveredX={hoveredX}\n legendFormatValue={legendFormatValue}\n onDimensionClick={onDimensionClick}\n selectedDimensions={selectedDimensions}\n showLatestOnBlur={showLatestOnBlur}\n unitsCurrent={unitsCurrent}\n viewBefore={viewBefore}\n />\n )\n}\n","import React from \"react\"\n\nimport { ToolboxButton } from \"./toolbox-button\"\n\ntype ClickCallback = (event: React.MouseEvent) => void\ninterface Props {\n onToolboxLeftClick: ClickCallback\n onToolboxRightClick: ClickCallback\n onToolboxZoomInClick: ClickCallback\n onToolboxZoomOutClick: ClickCallback\n}\nexport const LegendToolbox = ({\n onToolboxLeftClick,\n onToolboxRightClick,\n onToolboxZoomInClick,\n onToolboxZoomOutClick,\n}: Props) => (\n <div className=\"netdata-legend-toolbox\">\n <ToolboxButton\n className=\"netdata-legend-toolbox-button\"\n onClick={onToolboxLeftClick}\n iconType=\"left\"\n popoverTitle=\"Pan Left\"\n popoverContent=\"Pan the chart to the left. You can also <b>drag it</b> with your mouse or your\n finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>\"\n />\n <ToolboxButton\n className=\"netdata-legend-toolbox-button\"\n onClick={onToolboxRightClick}\n iconType=\"right\"\n popoverTitle=\"Pan Right\"\n popoverContent=\"Pan the chart to the right. You can also <b>drag it</b> with your mouse or\n your finger (on touch devices).<br/><small>Help can be disabled from the settings.</small>\"\n />\n <ToolboxButton\n className=\"netdata-legend-toolbox-button\"\n onClick={onToolboxZoomInClick}\n iconType=\"zoomIn\"\n popoverTitle=\"Chart Zoom In\"\n popoverContent=\"Zoom in the chart. You can also press SHIFT and select an area of the chart,\n or press SHIFT or ALT and use the mouse wheel or 2-finger touchpad scroll to zoom in or out.\n <br/><small>Help can be disabled from the settings.</small>\"\n />\n <ToolboxButton\n className=\"netdata-legend-toolbox-button\"\n onClick={onToolboxZoomOutClick}\n iconType=\"zoomOut\"\n popoverTitle=\"Chart Zoom Out\"\n popoverContent=\"Zoom out the chart. You can also press SHIFT or ALT and use the mouse wheel,\n or 2-finger touchpad scroll to zoom in or out.<br/><small>Help can be disabled from the\n settings.</small>\"\n />\n </div>\n)\n","/* eslint-disable no-nested-ternary */\nimport { always, memoizeWith } from \"ramda\"\nimport Color from \"color\"\n\nimport { ChartMetadata, DygraphData } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartLibraryConfig } from \"domains/chart/utils/chartLibrariesSettings\"\n\nexport const getDataForFakeStacked = (\n data: number[][],\n dimensionsVisibility: boolean[],\n): number[][] => data.map((point) => {\n const [timestamp, ...values] = point\n const rest: number[] = []\n let currentMin = 0\n let currentMax = 0\n values\n .map((value, i) => ({ isVisible: dimensionsVisibility[i], value }))\n // reverse because first dimensions should be \"on top\" (at least positive ones)\n .slice().reverse()\n .forEach(({ isVisible, value }) => {\n if (!isVisible) {\n rest.push(0) // push with value '0'. it won't be visible but needs to be present in array\n return\n }\n if (value >= 0) {\n currentMax += value\n rest.push(currentMax)\n } else {\n currentMin += value\n rest.push(currentMin)\n }\n })\n return [\n timestamp,\n ...rest,\n ]\n})\n\nconst isPercentage = (unit: string) => unit === \"percentage\"\n || unit === \"percent\"\n || unit.indexOf(\"%\") !== -1\n\nexport const getDygraphChartType = (\n attributes: Attributes, chartData: DygraphData, chartMetadata: ChartMetadata,\n chartSettings: ChartLibraryConfig,\n) => {\n const isLogScale = (chartSettings.isLogScale as ((a: Attributes) => boolean))(attributes)\n const {\n dygraphType: dygraphRequestedType = chartMetadata.chart_type,\n groupBy,\n } = attributes\n\n if (groupBy && groupBy !== \"dimension\" && isPercentage(chartMetadata.units)) {\n return \"line\"\n }\n\n // corresponds to state.tmp.dygraph_chart_type in old app\n let dygraphChartType = dygraphRequestedType\n if (dygraphChartType === \"stacked\" && chartData.dimensions === 1) {\n dygraphChartType = \"area\"\n }\n if (dygraphChartType === \"stacked\" && isLogScale) {\n dygraphChartType = \"area\"\n }\n return dygraphChartType\n}\n\nconst getBackgroundColor = memoizeWith(\n always(\"true\"),\n () => Color(window.NETDATA.themes.current.background),\n)\n// when in \"fakeStacked\" mode, we cannot use opacity for fill in charts, because the areas would\n// be visible under each other. So the darkening / whitening needs to be added directly to colors\n// (the colors are too saturated for areas and in stacked mode they were with 0.8 opacity)\nexport const transformColors = (colors: string[]) => (\n colors.map((color) => Color(color).mix(getBackgroundColor(), 0.2).hex())\n)\n\nexport const getDygraphFillAlpha = (\n isFakeStacked: boolean, dygraphChartType: string,\n) => (isFakeStacked\n ? window.NETDATA.options.current.color_fill_opacity_fake_stacked\n : dygraphChartType === \"stacked\"\n ? window.NETDATA.options.current.color_fill_opacity_stacked\n : window.NETDATA.options.current.color_fill_opacity_area)\n\n\n// https://github.com/danvk/dygraphs/blob/master/src/iframe-tarp.js#L1-L23\n// On mouseUp dygraphs put rectangles above all iframes so mouseUp can be properly intercepted.\n// this causes a problem with some analytics iframes that place themselves in regions where they\n// aren't visible anyway (for example hubspot iframe on Cloud), and this creates a problematic\n// horizontal scrollbar to appear. This function filters those \"rectangles\" (tarps) to omit\n// elements with unreachable \"left\" styles\nexport const hackDygraphIFrameTarps = (tarps: HTMLDivElement[]): HTMLDivElement[] => (\n tarps.filter((element: HTMLDivElement) => {\n const isOutsideReasonableViewport = Number(element.style.left.replace(\"px\", \"\")) > 10000\n if (isOutsideReasonableViewport) {\n element.parentNode!.removeChild(element)\n }\n return !isOutsideReasonableViewport\n })\n)\n","/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable function-paren-newline */\n/* eslint-disable comma-dangle */\nimport React, { useRef, useCallback } from \"react\"\nimport { useToggle } from \"react-use\"\n\nconst useProceededChart = (\n chartRef: any,\n propsRef: any\n): [boolean, React.Ref<HTMLElement>, (g: Dygraph) => void] => {\n const [proceeded, toggleProceeded] = useToggle(false)\n\n const ref = useRef<HTMLElement>(null)\n\n const updatePosition = useCallback((g: Dygraph) => {\n const { x } = g.getArea()\n const distance = g.toDomXCoord(propsRef.current.chartData.first_entry * 1000)\n const hasProceeded = distance > x\n toggleProceeded(hasProceeded)\n\n if (hasProceeded && ref.current) {\n const { height } = chartRef.current.getBoundingClientRect()\n ref.current.style.left = `${x}px`\n ref.current.style.right = `calc(100% - ${distance}px)`\n ref.current.style.top = `${height / 2}px`\n }\n }, [])\n\n return [proceeded, ref, updatePosition]\n}\n\nexport default useProceededChart\n","import { useRef } from \"react\"\nimport { useToggle } from \"react-use\"\n\nconst badgeTopMargin = \"40px\"\n\nconst defaultPositionTo = (ref, x, position, topMargin) => {\n ref.current.style.left = `${x}px`\n ref.current.style.right = `calc(100% - ${position}px)`\n ref.current.style.top = topMargin\n}\n\nexport default () => {\n const [isRendered, toggleIsRendered] = useToggle(false)\n\n const ref = useRef(null)\n\n const updatePosition = (isVisible, g, position, positionTo = defaultPositionTo) => {\n if (!isVisible) {\n toggleIsRendered(false)\n return\n }\n\n if (ref.current) {\n toggleIsRendered(true)\n const { x } = g.getArea()\n\n positionTo(ref, x, position, badgeTopMargin)\n }\n }\n\n return [isRendered, ref, updatePosition]\n}\n","import React, { forwardRef } from \"react\"\nimport styled from \"styled-components\"\n\nconst Container = styled.div`\n display: block;\n`\n\nconst ProceededChartDisclaimer = forwardRef((\n props: React.HTMLAttributes<HTMLElement>,\n ref: React.Ref<HTMLDivElement>,\n) => (\n <Container ref={ref} className=\"dygraph__history-tip\" data-testid=\"proceededChartDisclaimer\">\n <span className=\"dygraph__history-tip-content\">\n Want to extend your history of real-time metrics?\n <br />\n <a href=\"https://learn.netdata.cloud/guides/longer-metrics-storage/\" target=\"_blank\" rel=\"noopener noreferrer\" data-testid=\"proceededChartDisclaimer-configure\">\n Configure Netdata's \n <b>history</b>\n </a>\n  or use the \n <a href=\"https://learn.netdata.cloud/docs/agent/database/engine/\" target=\"_blank\" rel=\"noopener noreferrer\" data-testid=\"proceededChartDisclaimer-engine\">DB engine</a>\n .\n </span>\n </Container>\n))\n\nexport default ProceededChartDisclaimer\n","import React, { forwardRef } from \"react\"\nimport styled from \"styled-components\"\n\nconst backgroundColorMap = {\n WARNING: \"#FFF8E1\",\n CRITICAL: \"#FFEBEF\",\n CLEAR: \"#E5F5E8\",\n}\nexport const getBackgroundColor = (status) => backgroundColorMap[status] || null\n\nconst borderColorMap = {\n WARNING: \"#FFC300\",\n CRITICAL: \"#F59B9B\",\n CLEAR: \"#68C47D\",\n}\nexport const getBorderColor = (status) => borderColorMap[status] || null\n\nconst textColorMap = {\n WARNING: \"#536775\",\n CRITICAL: \"#FF4136\",\n CLEAR: \"#00AB44\",\n}\nexport const getColor = (status) => textColorMap[status] || null\n\nconst Container = styled.div`\n position: absolute;\n margin-right: 10px;\n overflow: hidden;\n pointer-events: none;\n direction: rtl;\n z-index: 10; // higher than chart\n`\n\nconst Badge = styled.div`\n display: inline-block;\n border-radius: 36px;\n padding: 2px 12px;\n background: ${({ background }) => background};\n border: 1px solid ${({ border }) => border};\n color: ${({ color }) => color};\n font-size: 12px;\n font-weight: 700;\n direction: ltr;\n white-space: nowrap;\n`\n\nexport default forwardRef((\n { isVisible, status, label },\n ref,\n) => (\n <Container ref={ref}>\n {isVisible && (\n <Badge\n background={getBackgroundColor(status)}\n border={getBorderColor(status)}\n color={getColor(status)}\n >\n {label}\n </Badge>\n )}\n </Container>\n))\n","//@ts-nocheck\nimport { sortBy, reverse } from \"ramda\"\nimport React, { useLayoutEffect, useRef, useCallback } from \"react\"\nimport classNames from \"classnames\"\nimport { useUpdateEffect, useUnmount, useMount } from \"react-use\"\n// this version is needed because it contains a fix for handling constant value in the chart\n// ie. https://github.com/danvk/dygraphs/pull/909\nimport Dygraph from \"vendor/dygraph-c91c859.min\"\nimport \"dygraphs/src-es5/extras/smooth-plotter\"\nimport ResizeObserver from \"resize-observer-polyfill\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { AppStateT } from \"store/app-state\"\nimport { DygraphArea, NetdataDygraph } from \"types/vendor-overrides\"\nimport { TimeRange } from \"types/common\"\nimport { useDateTime } from \"utils/date-time\"\nimport { debounce } from \"utils/debounce\"\n\nimport {\n selectCommonMin,\n selectCommonMax,\n selectGlobalChartUnderlay,\n selectGlobalSelectionMaster,\n selectSmoothPlot,\n selectSyncPanAndZoom,\n selectSpacePanelTransitionEndIsActive,\n selectAlarm,\n selectTimezoneSetting,\n} from \"domains/global/selectors\"\nimport {\n resetGlobalPanAndZoomAction,\n setCommonMaxAction,\n setCommonMinAction,\n setGlobalPauseAction,\n resetGlobalPauseAction,\n} from \"domains/global/actions\"\n\nimport { resetChartPanAndZoomAction } from \"domains/chart/actions\"\n\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport {\n chartLibrariesSettings,\n ChartLibraryConfig,\n ChartLibraryName,\n} from \"../../utils/chartLibrariesSettings\"\nimport { ChartMetadata, DygraphData } from \"../../chart-types\"\nimport { selectResizeHeight } from \"../../selectors\"\n\nimport {\n getDygraphChartType,\n getDataForFakeStacked,\n transformColors,\n getDygraphFillAlpha,\n hackDygraphIFrameTarps,\n} from \"./dygraph/utils\"\nimport \"./dygraph-chart.css\"\n\nimport useProceededChart from \"../../hooks/use-proceeded-chart\"\nimport useDygraphBadge from \"../../hooks/useDygraphBadge\"\nimport ProceededChartDisclaimer from \"./proceeded-chart-disclaimer\"\nimport AlarmBadge, { getBorderColor } from \"./alarmBadge\"\n\n// This is the threshold above which we assume chart shown duration has changed\nconst timeframeThreshold = 5000\nconst dygraphResizeDebounceTime = 500\n\ntype IsInRangeOfAvailableData = (props: {\n after: number, before: number, chartData: DygraphData,\n}) => boolean\nconst isInRangeOfAvailableData: IsInRangeOfAvailableData = ({ after, before, chartData }) => (\n after >= (chartData.first_entry * 1000) && before <= (chartData.last_entry * 1000)\n)\n\ninterface GetInitialDygraphOptions {\n attributes: Attributes,\n chartData: DygraphData,\n chartMetadata: ChartMetadata,\n chartSettings: ChartLibraryConfig,\n dimensionsVisibility: boolean[]\n hiddenLabelsElementId: string,\n isFakeStacked: boolean,\n orderedColors: string[],\n setMinMax: (minMax: TimeRange) => void\n shouldSmoothPlot: boolean,\n unitsCurrent: string,\n xAxisDateString: (d: Date) => string,\n xAxisTimeString: (d: Date) => string,\n}\nconst getInitialDygraphOptions = ({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n}: GetInitialDygraphOptions) => {\n const isSparkline = attributes.dygraphTheme === \"sparkline\"\n const highlightCircleSize = isSparkline ? 3 : 4\n\n const isLogScale = (chartSettings.isLogScale as ((a: Attributes) => boolean))(attributes)\n const dygraphChartType = getDygraphChartType(attributes, chartData, chartMetadata, chartSettings)\n const {\n dygraphSmooth = dygraphChartType === \"line\"\n && !isSparkline,\n dygraphDrawAxis = true,\n legendPosition,\n } = attributes\n const isLegendOnBottom = legendPosition === \"bottom\"\n const {\n // destructuring with default values\n dygraphColors = orderedColors,\n dygraphRightGap = 5,\n dygraphShowRangeSelector = false,\n dygraphShowRoller = false,\n dygraphTitle = attributes.title || chartMetadata.title,\n dygraphTitleHeight = 19,\n dygraphLegend = \"always\",\n dygraphLabelsDiv = hiddenLabelsElementId,\n dygraphLabelsSeparateLine = true,\n dygraphIncludeZero = dygraphChartType === \"stacked\",\n dygraphShowZeroValues = true,\n dygraphShowLabelsOnHighLight = true,\n dygraphHideOverlayOnMouseOut = true,\n dygraphXRangePad = 0,\n dygraphYRangePad = 1,\n dygraphValueRange = [null, null],\n dygraphYLabelWidth = 12,\n // eslint-disable-next-line no-nested-ternary\n dygraphStrokeWidth = dygraphChartType === \"stacked\"\n ? 0.1\n : (dygraphSmooth === true\n ? 1.5\n : 0.7),\n\n dygraphStrokePattern,\n dygraphDrawPoints = false,\n dygraphDrawGapEdgePoints = true,\n dygraphConnectSeparatedPoints = false,\n dygraphPointSize = 1,\n dygraphStepPlot = false,\n dygraphStrokeBorderColor = window.NETDATA.themes.current.background,\n dygraphStrokeBorderWidth = 0,\n dygraphFillGraph = (dygraphChartType === \"area\" || dygraphChartType === \"stacked\"),\n dygraphFillAlpha = getDygraphFillAlpha(isFakeStacked, dygraphChartType),\n dygraphStackedGraph = dygraphChartType === \"stacked\" && !isFakeStacked,\n dygraphStackedGraphNanFill = \"none\",\n dygraphAxisLabelFontSize = 10,\n dygraphAxisLineColor = window.NETDATA.themes.current.axis,\n dygraphAxisLineWidth = 1.0,\n dygraphDrawGrid = true,\n dygraphGridLinePattern,\n dygraphGridLineWidth = 1.0,\n dygraphGridLineColor = window.NETDATA.themes.current.grid,\n dygraphMaxNumberWidth = 8,\n dygraphSigFigs,\n dygraphDigitsAfterDecimal = 2,\n dygraphHighlighCircleSize = highlightCircleSize,\n dygraphHighlightSeriesOpts,\n dygraphHighlightSeriesBackgroundAlpha,\n\n dygraphXPixelsPerLabel = 50,\n dygraphXAxisLabelWidth = 60,\n dygraphDrawXAxis = dygraphDrawAxis,\n dygraphYPixelsPerLabel = 15,\n dygraphYAxisLabelWidth = isLegendOnBottom ? 30 : 50,\n dygraphDrawYAxis = dygraphDrawAxis,\n } = attributes\n return {\n colors: isFakeStacked ? transformColors(reverse(dygraphColors)) : dygraphColors,\n\n // leave a few pixels empty on the right of the chart\n rightGap: isSparkline ? 0 : dygraphRightGap,\n showRangeSelector: dygraphShowRangeSelector,\n showRoller: dygraphShowRoller,\n title: isSparkline ? undefined : dygraphTitle,\n titleHeight: dygraphTitleHeight,\n legend: dygraphLegend, // we need this to get selection events\n labels: chartData.result.labels,\n labelsDiv: dygraphLabelsDiv,\n\n labelsSeparateLines: isSparkline ? true : dygraphLabelsSeparateLine,\n labelsShowZeroValues: isLogScale ? false : dygraphShowZeroValues,\n labelsKMB: false,\n labelsKMG2: false,\n showLabelsOnHighlight: dygraphShowLabelsOnHighLight,\n hideOverlayOnMouseOut: dygraphHideOverlayOnMouseOut,\n includeZero: dygraphIncludeZero,\n xRangePad: dygraphXRangePad,\n yRangePad: isSparkline ? 1 : dygraphYRangePad,\n valueRange: dygraphValueRange,\n ylabel: (isSparkline || isLegendOnBottom) ? undefined : unitsCurrent,\n yLabelWidth: (isSparkline || isLegendOnBottom) ? 0 : dygraphYLabelWidth,\n\n // the function to plot the chart\n plotter: (dygraphSmooth && shouldSmoothPlot) ? window.smoothPlotter : null,\n\n // The width of the lines connecting data points.\n // This can be used to increase the contrast or some graphs.\n strokeWidth: dygraphStrokeWidth,\n strokePattern: dygraphStrokePattern,\n\n // The size of the dot to draw on each point in pixels (see drawPoints).\n // A dot is always drawn when a point is \"isolated\",\n // i.e. there is a missing point on either side of it.\n // This also controls the size of those dots.\n drawPoints: dygraphDrawPoints,\n\n // Draw points at the edges of gaps in the data.\n // This improves visibility of small data segments or other data irregularities.\n drawGapEdgePoints: dygraphDrawGapEdgePoints,\n connectSeparatedPoints: isLogScale ? false : dygraphConnectSeparatedPoints,\n pointSize: dygraphPointSize,\n\n // enabling this makes the chart with little square lines\n stepPlot: dygraphStepPlot,\n\n // Draw a border around graph lines to make crossing lines more easily\n // distinguishable. Useful for graphs with many lines.\n strokeBorderColor: dygraphStrokeBorderColor,\n strokeBorderWidth: dygraphStrokeBorderWidth,\n fillGraph: dygraphFillGraph,\n fillAlpha: dygraphFillAlpha,\n stackedGraph: dygraphStackedGraph,\n stackedGraphNaNFill: dygraphStackedGraphNanFill,\n drawAxis: isSparkline ? false : dygraphDrawAxis,\n axisLabelFontSize: dygraphAxisLabelFontSize,\n axisLineColor: dygraphAxisLineColor,\n axisLineWidth: dygraphAxisLineWidth,\n drawGrid: isSparkline ? false : dygraphDrawGrid,\n gridLinePattern: dygraphGridLinePattern,\n gridLineWidth: dygraphGridLineWidth,\n gridLineColor: dygraphGridLineColor,\n maxNumberWidth: dygraphMaxNumberWidth,\n sigFigs: dygraphSigFigs,\n digitsAfterDecimal: dygraphDigitsAfterDecimal,\n highlightCircleSize: dygraphHighlighCircleSize,\n highlightSeriesOpts: dygraphHighlightSeriesOpts, // TOO SLOW: { strokeWidth: 1.5 },\n // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5,\n highlightSeriesBackgroundAlpha: dygraphHighlightSeriesBackgroundAlpha,\n visibility: dimensionsVisibility,\n logscale: isLogScale,\n\n axes: {\n x: {\n pixelsPerLabel: dygraphXPixelsPerLabel,\n // insufficient typings for Dygraph\n // @ts-ignore\n ticker: Dygraph.dateTicker,\n axisLabelWidth: dygraphXAxisLabelWidth,\n drawAxis: isSparkline ? false : dygraphDrawXAxis,\n axisLabelFormatter: (d: Date | number) => ((d as Date).toTimeString().startsWith(\"00:00:00\")\n ? xAxisDateString(d as Date)\n : xAxisTimeString(d as Date)\n ),\n },\n y: {\n logscale: isLogScale,\n pixelsPerLabel: dygraphYPixelsPerLabel,\n axisLabelWidth: dygraphYAxisLabelWidth,\n drawAxis: isSparkline ? false : dygraphDrawYAxis,\n // axisLabelFormatter is added on the updates\n axisLabelFormatter(y: Date | number) {\n const formatter = setMinMax([\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n this.axes_[0].extremeRange[0],\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n this.axes_[0].extremeRange[1],\n ]) as unknown as ((value: Date | number) => string)\n return formatter(y as number)\n },\n },\n },\n }\n}\n\ninterface Props {\n attributes: Attributes\n chartData: DygraphData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n dimensionsVisibility: boolean[]\n hasEmptyData: boolean\n hasLegend: boolean\n isRemotelyControlled: boolean\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n immediatelyDispatchPanAndZoom: () => void\n\n hoveredRow: number\n hoveredX: number | null\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: TimeRange) => void\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const DygraphChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartLibrary,\n // colors,\n chartUuid,\n dimensionsVisibility,\n hasEmptyData,\n hasLegend,\n isRemotelyControlled,\n onUpdateChartPanAndZoom,\n orderedColors,\n immediatelyDispatchPanAndZoom,\n\n hoveredRow,\n hoveredX,\n setGlobalChartUnderlay,\n setHoveredX,\n setMinMax,\n unitsCurrent,\n viewAfter,\n viewBefore,\n}: Props) => {\n const globalChartUnderlay = useSelector(selectGlobalChartUnderlay)\n const selectedAlarm = useSelector(selectAlarm)\n const alarm = selectedAlarm?.chartId === chartData.id ? selectedAlarm : null\n\n const timezone = useSelector(selectTimezoneSetting)\n\n const { xAxisDateString, xAxisTimeString } = useDateTime()\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const hiddenLabelsElementId = `${chartUuid}-hidden-labels-id`\n\n const dygraphChartType = getDygraphChartType(attributes, chartData, chartMetadata, chartSettings)\n // isFakeStacked - is a special mode for displaying stacked charts with both positive and negative\n // values. Dygraph.js doesn't support it so in this case we need to sum the values manually\n // and display the chart as \"area\" type, but with keeping all styling (fill etc.) properties\n // as in \"stacked\" type\n // because first values need to be \"on top\" (at least for positive values), the dimension order\n // needs to be reversed (in getDataForFakeStacked function and when assigning dimension colors)\n const isFakeStacked = chartData.min < 0 && dygraphChartType === \"stacked\"\n const dygraphFillAlpha = getDygraphFillAlpha(isFakeStacked, dygraphChartType)\n\n const chartElement = useRef<HTMLDivElement>(null)\n\n const updateChartPanOrZoom = useCallback(({\n after, before,\n callback,\n shouldNotExceedAvailableRange,\n }) => {\n onUpdateChartPanAndZoom({\n after,\n before,\n callback,\n masterID: chartUuid,\n shouldNotExceedAvailableRange,\n })\n }, [chartUuid, onUpdateChartPanAndZoom])\n\n // keep in ref to prevent additional updates\n const dygraphInstance = useRef<Dygraph | null>()\n // state.tmp.dygraph_user_action in old dashboard\n const latestIsUserAction = useRef(false)\n // state.tmp.dygraph_mouse_down in old dashboard\n const isMouseDown = useRef(false)\n // state.tmp.dygraph_highlight_after in old dashboard\n const dygraphHighlightAfter = useRef<null | number>(null)\n // state.dygraph_last_touch_move in old dashboard\n const dygraphLastTouchMove = useRef(0)\n // state.dygraph_last_touch_page_x in old dashboard\n const dygraphLastTouchPageX = useRef(0)\n // state.dygraph_last_touch_end in old dashboard\n const dygraphLastTouchEnd = useRef<undefined | number>()\n\n const dispatch = useDispatch()\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n\n const resetGlobalPanAndZoom = useCallback(() => {\n latestIsUserAction.current = false // prevent starting panAndZoom\n if (dygraphInstance.current) {\n // todo on toolbox reset click, do updateOptions({ dateWindow: null })\n // (issue existed also before rewrite)\n dygraphInstance.current.updateOptions({\n // reset dateWindow to the current\n // @ts-ignore external typings dont support null\n dateWindow: null,\n })\n }\n\n if (isSyncPanAndZoom) {\n dispatch(resetGlobalPanAndZoomAction())\n } else {\n dispatch(resetChartPanAndZoomAction({ id: chartUuid }))\n }\n }, [chartUuid, dispatch, isSyncPanAndZoom])\n\n const [isAlarmBadgeVisible, alarmBadgeRef, updateAlarmBadge] = useDygraphBadge() as any\n\n // setGlobalChartUnderlay is using state from closure (chartData.after), so we need to have always\n // the newest callback. Unfortunately we cannot use Dygraph.updateOptions() (library restriction)\n // for interactionModel callbacks so we need to keep the callback in mutable ref\n const propsRef = useRef({\n alarm,\n chartData,\n globalChartUnderlay,\n hoveredX,\n immediatelyDispatchPanAndZoom,\n // put it to ref to prevent additional updateOptions() after creating dygraph\n resetGlobalPanAndZoom,\n setGlobalChartUnderlay,\n updateAlarmBadge,\n updateChartPanOrZoom,\n viewAfter,\n viewBefore,\n })\n\n const [\n isProceeded, precededChartRef, updatePrecededPosition,\n ] = useProceededChart(chartElement, propsRef)\n\n useLayoutEffect(() => {\n propsRef.current.alarm = alarm\n propsRef.current.chartData = chartData\n propsRef.current.hoveredX = hoveredX\n propsRef.current.immediatelyDispatchPanAndZoom = immediatelyDispatchPanAndZoom\n propsRef.current.globalChartUnderlay = globalChartUnderlay\n propsRef.current.resetGlobalPanAndZoom = resetGlobalPanAndZoom\n propsRef.current.setGlobalChartUnderlay = setGlobalChartUnderlay\n propsRef.current.updateAlarmBadge = updateAlarmBadge\n propsRef.current.updateChartPanOrZoom = updateChartPanOrZoom\n propsRef.current.viewAfter = viewAfter\n propsRef.current.viewBefore = viewBefore\n }, [\n alarm,\n chartData,\n globalChartUnderlay,\n hoveredX,\n immediatelyDispatchPanAndZoom,\n resetGlobalPanAndZoom,\n setGlobalChartUnderlay,\n updateAlarmBadge,\n updateChartPanOrZoom,\n viewAfter,\n viewBefore,\n ])\n\n const shouldSmoothPlot = useSelector(selectSmoothPlot)\n useLayoutEffect(() => {\n if (chartElement && chartElement.current && !dygraphInstance.current && !hasEmptyData) {\n const dygraphOptionsStatic = getInitialDygraphOptions({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n })\n\n latestIsUserAction.current = false\n\n const dygraphOptions = {\n ...dygraphOptionsStatic,\n // set dateWindow on init - this is needed when chart is globalPanAndZoom-master\n // and user scrolls down/up so the chart hides and then unhides. This causes the chart\n // to re-create, but the data has additional padding which should be outside of\n // visible range\n dateWindow: [propsRef.current.viewAfter, propsRef.current.viewBefore],\n\n highlightCallback(\n event: MouseEvent, xval: number,\n ) {\n // todo\n // state.pauseChart()\n\n const newHoveredX = isMouseDown.current\n ? null\n : xval\n\n const currentHoveredX = propsRef.current.hoveredX\n if (newHoveredX !== currentHoveredX) {\n setHoveredX(newHoveredX)\n }\n },\n\n unhighlightCallback() {\n // todo\n // state.unpauseChart();\n if (propsRef.current.hoveredX !== null) {\n setHoveredX(null)\n }\n },\n drawCallback(dygraph: Dygraph) {\n // the user has panned the chart and this is called to re-draw the chart\n // 1. refresh this chart by adding data to it\n // 2. notify all the other charts about the update they need\n\n // to prevent an infinite loop (feedback), we use\n // state.tmp.dygraph_user_action\n // - when true, this is initiated by a user\n // - when false, this is feedback\n\n if (latestIsUserAction.current) {\n latestIsUserAction.current = false\n const xRange = dygraph.xAxisRange()\n const after = Math.round(xRange[0])\n const before = Math.round(xRange[1])\n\n if (isInRangeOfAvailableData({\n after, before, chartData: propsRef.current.chartData,\n })) {\n propsRef.current.updateChartPanOrZoom({ after, before })\n }\n }\n },\n zoomCallback: (minDate: number, maxDate: number) => {\n latestIsUserAction.current = true\n propsRef.current.updateChartPanOrZoom({ after: minDate, before: maxDate })\n },\n\n underlayCallback(canvas: CanvasRenderingContext2D, area: DygraphArea, g: Dygraph) {\n updatePrecededPosition(g)\n\n if (propsRef.current.alarm) {\n const { alarm: currentAlarm } = propsRef.current\n\n const alarmPosition = g.toDomXCoord(currentAlarm.when * 1000)\n const fillColor = getBorderColor(currentAlarm.status)\n const horizontalPadding = 3\n // use RAF, because dygraph doesn't provide any callback called after drawing the chart\n requestAnimationFrame(() => {\n canvas.fillStyle = fillColor\n const globalAlphaCache = canvas.globalAlpha\n canvas.globalAlpha = 0.7\n canvas.fillRect(alarmPosition - horizontalPadding, area.y, 2 * horizontalPadding, area.h)\n canvas.globalAlpha = globalAlphaCache\n })\n\n propsRef.current.updateAlarmBadge(\n propsRef.current.alarm,\n g,\n alarmPosition - horizontalPadding,\n )\n }\n\n // the chart is about to be drawn\n // this function renders global highlighted time-frame\n\n if (propsRef.current.globalChartUnderlay) {\n const { after, before } = propsRef.current.globalChartUnderlay\n\n if (after < before) {\n const HIGHLIGHT_HORIZONTAL_PADDING = 20\n const bottomLeft = g.toDomCoords(after, -HIGHLIGHT_HORIZONTAL_PADDING)\n const topRight = g.toDomCoords(before, HIGHLIGHT_HORIZONTAL_PADDING)\n\n const left = bottomLeft[0]\n const right = topRight[0]\n\n // eslint-disable-next-line no-param-reassign\n canvas.fillStyle = window.NETDATA.themes.current.highlight\n canvas.fillRect(left, area.y, right - left, area.h)\n }\n }\n },\n\n // interactionModel cannot be replaced with updateOptions(). we need to keep all changing\n // values and callbacks in mutable ref,\n interactionModel: {\n mousedown(event: MouseEvent, dygraph: Dygraph, context: any) {\n // Right-click should not initiate anything.\n if (event.button && event.button === 2) {\n return\n }\n\n latestIsUserAction.current = true\n isMouseDown.current = true\n context.initializeMouseDown(event, dygraph, context)\n\n // limit problematic dygraph's feature, more info above the function\n // eslint-disable-next-line no-param-reassign\n context.tarp.tarps = hackDygraphIFrameTarps(context.tarp.tarps)\n\n dispatch(setGlobalPauseAction())\n\n if (event.button && event.button === 1) {\n // middle mouse button\n\n if (event.shiftKey) {\n // panning\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startPan(event, dygraph, context)\n } else if (event.altKey || event.ctrlKey || event.metaKey) {\n // middle mouse button highlight\n dygraphHighlightAfter.current = dygraph.toDataXCoord(event.offsetX)\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else {\n // middle mouse button selection for zoom\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n }\n } else if (event.shiftKey) {\n // left mouse button selection for zoom (ZOOM)\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else if (event.altKey || event.ctrlKey || event.metaKey) {\n // left mouse button highlight\n dygraphHighlightAfter.current = dygraph.toDataXCoord(event.offsetX)\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else {\n // left mouse button dragging (PAN)\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startPan(event, dygraph, context)\n }\n },\n\n mousemove(event: MouseEvent, dygraph: Dygraph, context: any) {\n // if (state.tmp.dygraph_highlight_after !== null) {\n // else if (\n if (dygraphHighlightAfter.current !== null) {\n // highlight selection\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.moveZoom(event, dygraph, context)\n event.preventDefault()\n } else if (context.isPanning) {\n latestIsUserAction.current = true\n // eslint-disable-next-line no-param-reassign\n context.is2DPan = false\n // @ts-ignore\n Dygraph.movePan(event, dygraph, context)\n } else if (context.isZooming) {\n // @ts-ignore\n Dygraph.moveZoom(event, dygraph, context)\n }\n },\n\n mouseup(event: MouseEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = false\n if (dygraphHighlightAfter.current !== null) {\n const sortedRange = sortBy((x) => +x, [\n dygraphHighlightAfter.current,\n dygraph.toDataXCoord(event.offsetX),\n ])\n\n propsRef.current.setGlobalChartUnderlay({\n after: sortedRange[0],\n before: sortedRange[1],\n masterID: chartData.id,\n })\n dygraphHighlightAfter.current = null\n // eslint-disable-next-line no-param-reassign\n context.isZooming = false\n\n // old dashboard code\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n dygraph.clearZoomRect_()\n // this call probably fixes the broken selection circle during highlighting\n // and forces underlayCallback to fire (and draw highlight-rect\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n dygraph.drawGraph_(false)\n } else if (context.isPanning) {\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.endPan(event, dygraph, context)\n propsRef.current.immediatelyDispatchPanAndZoom()\n } else if (context.isZooming) {\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.endZoom(event, dygraph, context)\n propsRef.current.immediatelyDispatchPanAndZoom()\n }\n },\n\n wheel(event: WheelEvent, dygraph: Dygraph) {\n if (!event.shiftKey && !event.altKey) return\n\n latestIsUserAction.current = true\n event.preventDefault()\n event.stopPropagation()\n\n // https://dygraphs.com/gallery/interaction-api.js\n const zoom = (g, zoomInPercentage, bias) => {\n bias = bias || 0.5\n const [afterAxis, beforeAxis] = g.xAxisRange()\n const delta = beforeAxis - afterAxis\n const increment = delta * zoomInPercentage\n const [afterIncrement, beforeIncrement] = [increment * bias, increment * (1 - bias)]\n\n const after = afterAxis + afterIncrement\n const before = beforeAxis - beforeIncrement\n\n propsRef.current.updateChartPanOrZoom({\n after,\n before,\n shouldNotExceedAvailableRange: true,\n callback: (updatedAfter: number, updatedBefore: number) => {\n dygraph.updateOptions({\n dateWindow: [updatedAfter, updatedBefore],\n })\n },\n })\n }\n\n const offsetToPercentage = (g, offsetX) => {\n // This is calculating the pixel offset of the leftmost date.\n const [axisAfterOffset] = g.toDomCoords(g.xAxisRange()[0], null)\n // x and w are relative to the corner of the drawing area,\n // so that the upper corner of the drawing area is (0, 0).\n const x = offsetX - axisAfterOffset\n // This is computing the rightmost pixel, effectively defining the\n // width.\n const w = g.toDomCoords(g.xAxisRange()[1], null)[0] - axisAfterOffset\n\n // Percentage from the left.\n return w === 0 ? 0 : x / w\n }\n\n const normalDef =\n typeof event.wheelDelta === \"number\" && !Number.isNaN(event.wheelDelta)\n ? event.wheelDelta / 40\n : event.deltaY * -1.2\n\n const normal = event.detail ? event.detail * -1 : normalDef\n const percentage = normal / 50\n\n if (!event.offsetX) event.offsetX = event.layerX - event.target.offsetLeft\n const xPct = offsetToPercentage(dygraph, event.offsetX)\n\n zoom(dygraph, percentage, xPct)\n },\n\n click(event: MouseEvent) {\n event.preventDefault()\n },\n\n dblclick() {\n dispatch(resetGlobalPauseAction({ forcePlay: false }))\n propsRef.current.resetGlobalPanAndZoom()\n },\n\n touchstart(event: TouchEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = true\n latestIsUserAction.current = true\n\n // todo\n // state.pauseChart()\n\n Dygraph.defaultInteractionModel.touchstart(event, dygraph, context)\n\n // we overwrite the touch directions at the end, to overwrite\n // the internal default of dygraph\n // eslint-disable-next-line no-param-reassign\n context.touchDirections = { x: true, y: false }\n\n dygraphLastTouchMove.current = 0\n\n if (typeof event.touches[0].pageX === \"number\") {\n dygraphLastTouchPageX.current = event.touches[0].pageX\n } else {\n dygraphLastTouchPageX.current = 0\n }\n },\n touchmove(event: TouchEvent, dygraph: Dygraph, context: any) {\n latestIsUserAction.current = true\n Dygraph.defaultInteractionModel.touchmove(event, dygraph, context)\n\n dygraphLastTouchMove.current = Date.now()\n },\n\n touchend(event: TouchEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = false\n latestIsUserAction.current = true\n Dygraph.defaultInteractionModel.touchend(event, dygraph, context)\n\n // if it didn't move, it is a selection\n if (dygraphLastTouchMove.current === 0 && dygraphLastTouchPageX.current !== 0\n && chartElement.current // this is just for TS\n ) {\n latestIsUserAction.current = false // prevent updating pan-and-zoom\n // internal api of dygraph\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n const dygraphPlotter = dygraph.plotter_\n const pct = (dygraphLastTouchPageX.current - (\n dygraphPlotter.area.x + chartElement.current.getBoundingClientRect().left\n )) / dygraphPlotter.area.w\n\n const { current } = propsRef\n const t = Math.round(current.viewAfter\n + (current.viewBefore - current.viewAfter) * pct)\n // dont set \"master\" so the highlight is recalculated (to match existing row)\n setHoveredX(t, true)\n }\n\n // if it was double tap within double click time, reset the charts\n const now = Date.now()\n if (typeof dygraphLastTouchEnd.current !== \"undefined\") {\n if (dygraphLastTouchMove.current === 0) {\n const dt = now - dygraphLastTouchEnd.current\n if (dt <= window.NETDATA.options.current.double_click_speed) {\n propsRef.current.resetGlobalPanAndZoom()\n }\n }\n }\n\n // remember the timestamp of the last touch end\n dygraphLastTouchEnd.current = now\n propsRef.current.immediatelyDispatchPanAndZoom()\n },\n },\n }\n\n const data = isFakeStacked\n ? getDataForFakeStacked(chartData.result.data, dimensionsVisibility)\n : chartData.result.data\n const instance = new Dygraph((chartElement.current), data, dygraphOptions)\n dygraphInstance.current = instance\n }\n }, [attributes, chartData, chartMetadata, chartSettings, chartUuid, dimensionsVisibility,\n hasEmptyData, hiddenLabelsElementId, isFakeStacked,\n orderedColors, setHoveredX, setMinMax, shouldSmoothPlot, unitsCurrent,\n xAxisDateString, xAxisTimeString, updatePrecededPosition, dispatch])\n\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n const isSparkline = attributes.dygraphTheme === \"sparkline\"\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n dygraphInstance.current.updateOptions({\n ylabel: (isSparkline || isLegendOnBottom) ? undefined : unitsCurrent,\n })\n }\n }, [attributes, unitsCurrent])\n\n\n // immediately update when changing global chart underlay or currently showed alarm\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n dygraphInstance.current.updateOptions({})\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [alarm, globalChartUnderlay])\n\n const spacePanelTransitionEndIsActive = useSelector(selectSpacePanelTransitionEndIsActive)\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n // dygraph always resizes on browser width change, but doesn't resize when the container\n // has different width.\n window.requestAnimationFrame(() => {\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n })\n }\n }, [spacePanelTransitionEndIsActive])\n\n // update data of the chart\n // first effect should only be made by new DygraphInstance()\n useUpdateEffect(() => {\n // dont update when there is no data - in this case we should still show old chart\n if (dygraphInstance.current && !hasEmptyData) {\n // todo support state.tmp.dygraph_force_zoom\n const forceDateWindow = [viewAfter, viewBefore]\n\n // in old dashboard, when chart needed to reset internal dateWindow state,\n // dateWindow was set to null, and new dygraph got the new dateWindow from results.\n // this caused small unsync between dateWindow of parent (master) and child charts\n // i also detected that forceDateWindow timestamps have slightly better performance (10%)\n // so if the chart needs to change local dateWindow, we'll always use timestamps instead of\n // null.\n\n const xAxisRange = dygraphInstance.current.xAxisRange()\n // eslint-disable-next-line max-len\n const hasChangedDuration = Math.abs((viewBefore - viewAfter) - (xAxisRange[1] - xAxisRange[0])) > timeframeThreshold\n\n // check if the time is relative\n const hasScrolledToTheFutureDuringPlayMode = viewBefore <= 0\n && (xAxisRange[1] > viewBefore)\n // if viewAfter is bigger than current dateWindow start, just reset dateWindow\n && (xAxisRange[0] > viewAfter)\n && !hasChangedDuration\n\n const optionsDateWindow = (isRemotelyControlled && !hasScrolledToTheFutureDuringPlayMode)\n ? { dateWindow: forceDateWindow }\n : {}\n\n const { dygraphColors = orderedColors } = attributes\n const file = isFakeStacked\n ? getDataForFakeStacked(chartData.result.data, dimensionsVisibility)\n : chartData.result.data\n\n const includeZero = dimensionsVisibility.length === 1 ||\n dimensionsVisibility.filter(x => x === true).length > 1\n\n dygraphInstance.current.updateOptions({\n ...optionsDateWindow,\n colors: isFakeStacked ? transformColors(reverse(dygraphColors)) : dygraphColors,\n file,\n labels: chartData.result.labels,\n fillAlpha: dygraphFillAlpha,\n ...(dygraphChartType === \"stacked\" ? { includeZero } : {}),\n stackedGraph: dygraphChartType === \"stacked\" && !isFakeStacked,\n // see explanation about reversing before isFakeStacked assignment\n visibility: isFakeStacked ? reverse(dimensionsVisibility) : dimensionsVisibility,\n })\n }\n }, [attributes, chartData.result, chartUuid, dimensionsVisibility, dygraphChartType,\n dygraphFillAlpha, hasEmptyData, isFakeStacked, isRemotelyControlled, orderedColors,\n viewAfter, viewBefore])\n\n useUpdateEffect(() => {\n if (!dygraphInstance.current) {\n return\n }\n\n const dygraphOptionsStatic = getInitialDygraphOptions({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n })\n if (!hasEmptyData) dygraphInstance.current.updateOptions(dygraphOptionsStatic)\n }, [dygraphChartType, timezone])\n\n // set selection\n const currentSelectionMasterId = useSelector(selectGlobalSelectionMaster)\n useLayoutEffect(() => {\n if (dygraphInstance.current && currentSelectionMasterId !== chartUuid) {\n if (hoveredRow === -1) {\n // getSelection is 100 times faster that clearSelection\n if (dygraphInstance.current.getSelection() !== -1) {\n dygraphInstance.current.clearSelection()\n }\n return\n }\n dygraphInstance.current.setSelection(hoveredRow)\n }\n }, [chartData, chartUuid, currentSelectionMasterId, hoveredRow,\n viewAfter, viewBefore])\n\n\n // handle resizeHeight change\n const resizeHeight = useSelector(\n (state: AppStateT) => selectResizeHeight(state, { id: chartUuid }),\n )\n useLayoutEffect(() => {\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n }, [resizeHeight, chartData.dimension_names.length])\n\n\n const commonMinState = useSelector((state: AppStateT) => (\n attributes.commonMin\n ? selectCommonMin(state, attributes.commonMin)\n : undefined\n ))\n const commonMaxState = useSelector((state: AppStateT) => (\n attributes.commonMax\n ? selectCommonMax(state, attributes.commonMax)\n : undefined\n ))\n\n useLayoutEffect(() => {\n const { commonMin: commonMinKey, commonMax: commonMaxKey } = attributes\n\n if (\n dygraphInstance.current\n && (commonMinKey || commonMaxKey)\n ) {\n const extremes = (dygraphInstance.current as NetdataDygraph).yAxisExtremes()[0]\n const [currentMin, currentMax] = extremes\n\n const {\n dygraphValueRange = [null, null],\n } = attributes\n // if the user gave a valueRange, respect it\n const shouldUseCommonMin = dygraphValueRange[0] === null\n const shouldUseCommonMax = dygraphValueRange[1] === null\n\n\n let shouldUpdate = false\n let valueRange: number[] = [...extremes]\n\n // check if current extreme (painted by dygraph) is not more extreme than commonMin/Max\n // if yes - update the chart\n if (commonMinKey && shouldUseCommonMin) {\n if (commonMinState && commonMinState.currentExtreme < currentMin) {\n valueRange[0] = commonMinState.currentExtreme\n shouldUpdate = true\n }\n }\n if (commonMaxKey && shouldUseCommonMax) {\n if (commonMaxState && commonMaxState.currentExtreme > currentMax) {\n valueRange[1] = commonMaxState.currentExtreme\n shouldUpdate = true\n }\n }\n\n if (shouldUpdate) {\n dygraphInstance.current.updateOptions({ valueRange })\n const newExtremes = (dygraphInstance.current as NetdataDygraph).yAxisExtremes()[0]\n // get updated valueRange (rounded by dygraph)\n valueRange = [...newExtremes]\n }\n\n // if the value is different than the one stored in state, update redux state\n if (commonMinKey && shouldUseCommonMin\n && (valueRange[0] !== commonMinState?.charts[chartUuid])\n ) {\n dispatch(setCommonMinAction({ chartUuid, commonMinKey, value: valueRange[0] }))\n }\n if (commonMaxKey && shouldUseCommonMax\n && (valueRange[1] !== commonMaxState?.charts[chartUuid])\n ) {\n dispatch(setCommonMaxAction({ chartUuid, commonMaxKey, value: valueRange[1] }))\n }\n }\n }, [attributes, chartData.result, chartUuid, commonMinState, commonMaxState, dispatch])\n\n useLayoutEffect(() => {\n if (isProceeded && dygraphInstance.current) {\n updatePrecededPosition(dygraphInstance.current)\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [isProceeded])\n\n useUnmount(() => {\n if (dygraphInstance.current) {\n dygraphInstance.current.destroy()\n }\n })\n\n /**\n * resize with ResizeObserver\n */\n const resizeObserver = useRef<ResizeObserver>()\n useMount(() => {\n if (!attributes.detectResize) {\n return\n }\n // flag used to prevent first callback (and resize) on dygraph initial draw\n let hasOmitedFirstCallback = false\n const callbackDebounced = debounce(() => {\n if (!hasOmitedFirstCallback) {\n hasOmitedFirstCallback = true\n return\n }\n\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n }, dygraphResizeDebounceTime)\n\n resizeObserver.current = new ResizeObserver(() => {\n callbackDebounced()\n })\n resizeObserver.current.observe(chartElement.current as HTMLDivElement)\n })\n\n useUnmount(() => {\n dygraphInstance.current = null // clear it for debounce purposes\n if (resizeObserver.current) {\n resizeObserver.current.disconnect()\n }\n })\n\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n return (\n <>\n <div\n ref={chartElement}\n id={chartElementId}\n className={classNames(\n chartElementClassName,\n { \"dygraph-chart--legend-bottom\": isLegendOnBottom },\n )}\n />\n {isProceeded && hasLegend && (\n <ProceededChartDisclaimer ref={precededChartRef as React.Ref<HTMLDivElement>} />\n )}\n {alarm?.value && hasLegend && (\n // @ts-ignore\n <AlarmBadge\n isVisible={isAlarmBadgeVisible}\n ref={alarmBadgeRef}\n status={alarm.status}\n label={alarm.value}\n />\n )}\n <div className=\"dygraph-chart__labels-hidden\" id={hiddenLabelsElementId} />\n </>\n )\n}\n","// https://gist.github.com/ca0v/73a31f57b397606c9813472f7493a940\n\nexport const debounce = <F extends (...args: any[]) => any>(func: F, waitFor: number) => {\n let timeout: ReturnType<typeof setTimeout> | null = null\n\n const debounced = (...args: Parameters<F>) => {\n if (timeout !== null) {\n clearTimeout(timeout)\n timeout = null\n }\n timeout = setTimeout(() => func(...args), waitFor)\n }\n\n return debounced as (...args: Parameters<F>) => ReturnType<F>\n}\n","import React, { useRef, useEffect, useState } from \"react\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport EasyPie from \"easy-pie-chart\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { ChartLibraryName } from \"domains/chart/utils/chartLibrariesSettings\"\nimport {\n always, cond, identity, T, sortBy, map, pipe,\n} from \"ramda\"\n\ntype GetPercentFromValueMinMax = (arg: {\n value: number | undefined\n min: number | undefined\n max: number | undefined\n isMinOverride: boolean\n isMaxOverride: boolean\n}) => number\nconst getPercentFromValueMinMax: GetPercentFromValueMinMax = ({\n value = 0, min = 0, max = 0,\n isMinOverride,\n isMaxOverride,\n}) => {\n /* eslint-disable no-param-reassign */\n // todo refractor old logic to readable functions\n // if no easyPiechart-min-value attribute\n if (!isMinOverride && min > 0) {\n min = 0\n }\n if (!isMaxOverride && max < 0) {\n max = 0\n }\n\n let pcent\n\n if (min < 0 && max > 0) {\n // it is both positive and negative\n // zero at the top center of the chart\n max = (-min > max) ? -min : max\n pcent = Math.round((value * 100) / max)\n } else if (value >= 0 && min >= 0 && max >= 0) {\n // clockwise\n pcent = Math.round(((value - min) * 100) / (max - min))\n if (pcent === 0) {\n pcent = 0.1\n }\n } else {\n // counter clockwise\n pcent = Math.round(((value - max) * 100) / (max - min))\n if (pcent === 0) {\n pcent = -0.1\n }\n }\n /* eslint-enable no-param-reassign */\n return pcent\n}\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n chartWidth: number\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n\n hoveredRow: number\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const EasyPieChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartWidth,\n hoveredRow,\n legendFormatValue,\n orderedColors,\n setMinMax,\n showUndefined,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef<HTMLDivElement>(null)\n const [chartInstance, setChartInstance] = useState()\n\n const valueIndex = hoveredRow === -1\n ? 0\n : (chartData.result.length - 1 - hoveredRow) // because data for easy-pie-chart are flipped\n const value = showUndefined ? null : chartData.result[valueIndex]\n\n const {\n // if this is set, then we're overriding commonMin\n easyPieChartMinValue: min = chartData.min, // todo replace with commonMin\n easyPieChartMaxValue: max = chartData.max, // todo replace with commonMax\n } = attributes\n\n // make sure the order is correct and that value is not outside those boundaries\n // (this check was present in old dashboard but perhaps it's not needed)\n const safeMinMax = pipe(\n map((x: number) => +x),\n sortBy(identity),\n ([_min, _max]: number[]) => [Math.min(_min, value || 0), Math.max(_max, value || 0)],\n )([min, max])\n\n useEffect(() => {\n setMinMax(safeMinMax as [number, number])\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [safeMinMax])\n\n const pcent = getPercentFromValueMinMax({\n value: showUndefined ? 0 : (value as number),\n min: safeMinMax[0],\n max: safeMinMax[1],\n isMinOverride: attributes.easyPieChartMinValue !== undefined,\n isMaxOverride: attributes.easyPieChartMaxValue !== undefined,\n })\n\n useEffect(() => {\n if (chartElement.current && !chartInstance) {\n const stroke = cond([\n [(v) => v < 3, always(2)],\n [T, identity],\n ])(Math.floor(chartWidth / 22))\n\n const {\n easyPieChartTrackColor = window.NETDATA.themes.current.easypiechart_track,\n easyPieChartScaleColor = window.NETDATA.themes.current.easypiechart_scale,\n easyPieChartScaleLength = 5,\n easyPieChartLineCap = \"round\",\n easyPieChartLineWidth = stroke,\n easyPieChartTrackWidth,\n easyPieChartSize = chartWidth,\n easyPieChartRotate = 0,\n easyPieChartAnimate = { duration: 500, enabled: true },\n easyPieChartEasing,\n } = attributes\n\n const newChartInstance = new EasyPie(chartElement.current, {\n barColor: orderedColors[0],\n trackColor: easyPieChartTrackColor,\n scaleColor: easyPieChartScaleColor,\n scaleLength: easyPieChartScaleLength,\n lineCap: easyPieChartLineCap,\n lineWidth: easyPieChartLineWidth,\n trackWidth: easyPieChartTrackWidth,\n size: easyPieChartSize,\n rotate: easyPieChartRotate,\n animate: easyPieChartAnimate,\n easing: easyPieChartEasing,\n })\n setChartInstance(newChartInstance)\n }\n }, [attributes, chartData, chartInstance, chartWidth, orderedColors])\n\n // update with value\n useEffect(() => {\n if (chartInstance) {\n const shouldUseAnimation = hoveredRow === -1 && !showUndefined\n\n if (shouldUseAnimation && !chartInstance.options.animate.enabled) {\n chartInstance.enableAnimation()\n } else if (!shouldUseAnimation && chartInstance.options.animate.enabled) {\n chartInstance.disableAnimation()\n }\n\n setTimeout(() => {\n // need to be in timeout to trigger animation properly\n chartInstance.update(pcent)\n }, 0)\n }\n }, [chartInstance, hoveredRow, pcent, showUndefined])\n\n const valueFontSize = (chartWidth * 2) / 3 / 5\n const valuetop = Math.round((chartWidth - valueFontSize - (chartWidth / 40)) / 2)\n\n const titleFontSize = Math.round((valueFontSize * 1.6) / 3)\n const titletop = Math.round(valuetop - (titleFontSize * 2) - (chartWidth / 40))\n\n const unitFontSize = Math.round(titleFontSize * 0.9)\n const unitTop = Math.round(valuetop + (valueFontSize + unitFontSize) + (chartWidth / 40))\n // to update, just label innerText and pcent are changed\n\n return (\n <div ref={chartElement} id={chartElementId} className={chartElementClassName}>\n <span\n className=\"easyPieChartLabel\"\n style={{\n fontSize: valueFontSize,\n top: valuetop,\n }}\n >\n {legendFormatValue(value)}\n </span>\n <span\n className=\"easyPieChartTitle\"\n style={{\n fontSize: titleFontSize,\n top: titletop,\n }}\n >\n {attributes.title || chartMetadata.title}\n </span>\n <span\n className=\"easyPieChartUnits\"\n style={{\n fontSize: unitFontSize,\n top: unitTop,\n }}\n >\n {unitsCurrent}\n </span>\n\n </div>\n )\n}\n","import React, {\n useRef, useEffect, useState,\n} from \"react\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport { Gauge } from \"gaugeJS\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { ChartLibraryName } from \"domains/chart/utils/chartLibrariesSettings\"\nimport {\n identity, sortBy, map, pipe, always,\n} from \"ramda\"\n\nconst isSetByUser = (x: undefined | number): x is number => (\n typeof x === \"number\"\n)\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n chartHeight: number\n chartWidth: number\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n\n hoveredRow: number\n hoveredX: number | null\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const GaugeChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartUuid,\n chartHeight,\n chartWidth,\n hoveredRow,\n legendFormatValue,\n orderedColors,\n setMinMax,\n showUndefined,\n unitsCurrent,\n}: Props) => {\n const chartCanvasElement = useRef<HTMLCanvasElement>(null)\n const [chartInstance, setChartInstance] = useState()\n\n const valueIndex = hoveredRow === -1\n ? 0\n : (chartData.result.length - 1 - hoveredRow) // because data for easy-pie-chart are flipped\n const value = chartData.result[valueIndex]\n\n const {\n // if this is set, then we're overriding commonMin\n gaugeMinValue: minAttribute,\n gaugeMaxValue: maxAttribute,\n } = attributes\n\n const min = isSetByUser(minAttribute) ? minAttribute : chartData.min\n const max = isSetByUser(maxAttribute) ? maxAttribute : chartData.max\n // we should use minAttribute if it's existing\n // old app was using commonMin\n\n // make sure the order is correct and that value is not outside those boundaries\n // (this check was present in old dashboard but perhaps it's not needed)\n const [safeMin, safeMax] = pipe(\n // if they are attributes, make sure they're converted to numbers\n map((x: number) => +x),\n // make sure it is zero based\n // but only if it has not been set by the user\n ([_min, _max]: number[]) => [\n (!isSetByUser(minAttribute) && _min > 0) ? 0 : _min,\n (!isSetByUser(maxAttribute) && _max < 0) ? 0 : _max,\n ],\n // make sure min <= max\n sortBy(identity),\n ([_min, _max]: number[]) => [Math.min(_min, value), Math.max(_max, value)],\n )([min, max])\n // calling outside \"useEffect\" intentionally,\n // because it should update the values first, and only then render the chart in useEffect()\n useEffect(() => {\n setMinMax([safeMin, safeMax])\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [safeMin, safeMax])\n\n const pcent = pipe(\n always(((value - safeMin) * 100) / (safeMax - safeMin)),\n // bug fix for gauge.js 1.3.1\n // if the value is the absolute min or max, the chart is broken\n (_pcent: number) => Math.max(0.001, _pcent),\n (_pcent: number) => Math.min(99.999, _pcent),\n )()\n\n useEffect(() => {\n if (chartCanvasElement.current && !chartInstance) {\n const {\n gaugePointerColor = window.NETDATA.themes.current.gauge_pointer,\n gaugeStrokeColor = window.NETDATA.themes.current.gauge_stroke,\n gaugeStartColor = orderedColors[0],\n gaugeStopColor,\n gaugeGenerateGradient = false,\n } = attributes\n\n const options = {\n lines: 12, // The number of lines to draw\n angle: 0.14, // The span of the gauge arc\n lineWidth: 0.57, // The line thickness\n radiusScale: 1.0, // Relative radius\n pointer: {\n length: 0.85, // 0.9 The radius of the inner circle\n strokeWidth: 0.045, // The rotation offset\n color: gaugePointerColor, // Fill color\n },\n\n // If false, the max value of the gauge will be updated if value surpass max\n // If true, the min value of the gauge will be fixed unless you set it manually\n limitMax: true,\n limitMin: true,\n colorStart: gaugeStartColor,\n colorStop: gaugeStopColor,\n strokeColor: gaugeStrokeColor,\n generateGradient: (gaugeGenerateGradient === true), // gmosx:\n gradientType: 0,\n highDpiSupport: true, // High resolution support\n }\n\n const newChartInstance = new Gauge(chartCanvasElement.current).setOptions(options)\n\n // we will always feed a percentage (copied from old dashboard)\n newChartInstance.minValue = 0\n newChartInstance.maxValue = 100\n\n setChartInstance(newChartInstance)\n }\n }, [attributes, chartData, chartInstance, chartWidth, orderedColors])\n\n // update with value\n useEffect(() => {\n if (chartInstance) {\n // gauge animation\n const shouldUseAnimation = hoveredRow === -1 && !showUndefined\n // animation doesn't work in newest, 1.3.7 version!\n const speed = shouldUseAnimation ? 32 : 1000000000\n chartInstance.animationSpeed = speed\n setTimeout(() => {\n chartInstance.set(showUndefined ? 0 : pcent)\n }, 0)\n }\n }, [chartInstance, chartHeight, chartWidth, hoveredRow, pcent, showUndefined])\n\n const valueFontSize = Math.floor(chartHeight / 5)\n const valueTop = Math.round((chartHeight - valueFontSize) / 3.2)\n\n const titleFontSize = Math.round(valueFontSize / 2.1)\n const titleTop = 0\n\n const unitFontSize = Math.round(titleFontSize * 0.9)\n\n const minMaxFontSize = Math.round(valueFontSize * 0.75)\n return (\n <div\n id={chartElementId}\n className={chartElementClassName}\n >\n <canvas\n ref={chartCanvasElement}\n className=\"gaugeChart\"\n id={`gauge-${chartUuid}-canvas`}\n style={{\n width: chartWidth,\n height: chartHeight,\n }}\n />\n <span\n className=\"gaugeChartLabel\"\n style={{\n fontSize: valueFontSize,\n top: valueTop,\n }}\n >\n {legendFormatValue(showUndefined ? null : value)}\n </span>\n <span\n className=\"gaugeChartTitle\"\n style={{\n fontSize: titleFontSize,\n top: titleTop,\n }}\n >\n {attributes.title || chartMetadata.title}\n </span>\n <span\n className=\"gaugeChartUnits\"\n style={{\n fontSize: unitFontSize,\n }}\n >\n {unitsCurrent}\n </span>\n <span className=\"gaugeChartMin\" style={{ fontSize: minMaxFontSize }}>\n {legendFormatValue(showUndefined ? null : safeMin)}\n </span>\n <span className=\"gaugeChartMax\" style={{ fontSize: minMaxFontSize }}>\n {legendFormatValue(showUndefined ? null : safeMax)}\n </span>\n </div>\n )\n}\n","import {\n cond, identity, map, pipe, replace, splitEvery, T, toString,\n} from \"ramda\"\n\ntype NormalizeHex = (hex: string) => string\nexport const normalizeHex: NormalizeHex = pipe(\n toString,\n replace(/[^0-9a-f]/gi, \"\"),\n cond([\n [(str) => str.length < 6, (str) => str[0] + str[0] + str[1] + str[1] + str[2] + str[2]],\n [T, identity],\n ]),\n)\n\nexport const colorLuminance = (hex: string, lum: number = 0) => {\n const hexNormalized = normalizeHex(hex)\n\n // convert to decimal and change luminosity\n const rgb = pipe(\n // point-free version generates ts error\n (str: string) => splitEvery(2, str),\n map(\n pipe(\n (str: string) => parseInt(str, 16),\n (nr) => Math.round(\n Math.min(\n Math.max(0, nr + (nr * lum)),\n 255,\n ),\n ).toString(16),\n (str) => `00${str}`.substr(str.length),\n ),\n ),\n (x) => x.join(\"\"),\n )(hexNormalized)\n return `#${rgb}`\n}\n","/* eslint-disable indent */\n/* eslint-disable operator-linebreak */\n/* eslint-disable comma-dangle */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable consistent-return */\nimport \"jquery-sparkline\"\nimport React, { useRef, useEffect, useState } from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { colorLuminance } from \"domains/chart/utils/color-luminance\"\nimport { MS_IN_SECOND } from \"utils/utils\"\nimport { TimeRange } from \"types/common\"\n\nconst convertToTimestamp = (number: number) => {\n if (number > 0) {\n return number\n }\n return new Date().valueOf() + number // number is negative or zero\n}\n\ninterface TimeWindowCorrection {\n paddingLeftPercentage?: string\n widthRatio?: number\n}\nconst getForceTimeWindowCorrection = (\n chartData: EasyPieChartData,\n viewRange: TimeRange\n): TimeWindowCorrection => {\n const requestedAfter = convertToTimestamp(viewRange[0])\n const requestedBefore = convertToTimestamp(viewRange[1])\n const after = chartData.after * MS_IN_SECOND\n const before = chartData.before * MS_IN_SECOND\n\n const currentDuration = before - after\n const requestedDuration = requestedBefore - requestedAfter\n // don't do overrides when current (available) duration is bigger or only slightly lower\n // than requested duration\n const DURATION_CHANGE_TOLERANCE = 1.03\n if (currentDuration > requestedDuration / DURATION_CHANGE_TOLERANCE) {\n return {}\n }\n\n const widthRatio = currentDuration / requestedDuration\n\n const visibleDuration = requestedBefore - requestedAfter\n const paddingLeftPercentage = `${100 * ((after - requestedAfter) / visibleDuration)}%`\n\n return {\n paddingLeftPercentage,\n widthRatio,\n }\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n orderedColors: string[]\n unitsCurrent: string\n viewAfterForCurrentData: number\n viewBeforeForCurrentData: number\n}\nexport const SparklineChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n unitsCurrent,\n viewAfterForCurrentData,\n viewBeforeForCurrentData,\n}: Props) => {\n const chartElement = useRef<HTMLDivElement>(null)\n\n // update width, height automatically each time\n const [$chartElement, set$chartElement] = useState()\n const sparklineOptions = useRef<{ [key: string]: any }>()\n\n const { paddingLeftPercentage = undefined, widthRatio = 1 } = attributes.forceTimeWindow\n ? getForceTimeWindowCorrection(chartData, [viewAfterForCurrentData, viewBeforeForCurrentData])\n : {}\n\n const updateSparklineValues = () => {\n if (!$chartElement) return\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n // @ts-ignore\n $chartElement.sparkline(chartData.result, {\n ...sparklineOptions.current,\n width: Math.floor(width * widthRatio),\n height: Math.floor(height),\n })\n }\n\n // create chart\n useEffect(() => {\n const { sparklineLineColor = orderedColors[0] } = attributes\n const defaultFillColor =\n chartMetadata.chart_type === \"line\"\n ? window.NETDATA.themes.current.background\n : colorLuminance(sparklineLineColor, window.NETDATA.chartDefaults.fill_luminance)\n const chartTitle = attributes.title || chartMetadata.title\n\n const emptyStringIfDisable = (x: string | undefined) => (x === \"disable\" ? \"\" : x)\n\n const {\n sparklineType = \"line\",\n sparklineFillColor = defaultFillColor,\n sparklineDisableInteraction = false,\n sparklineDisableTooltips = false,\n sparklineDisableHighlight = false,\n sparklineHighlightLighten = 1.4,\n sparklineTooltipSuffix = ` ${unitsCurrent}`,\n sparklineNumberFormatter = (n: number) => n.toFixed(2),\n } = attributes\n\n const sparklineInitOptions = {\n type: sparklineType,\n lineColor: sparklineLineColor,\n fillColor: sparklineFillColor,\n chartRangeMin: attributes.sparklineChartRangeMin,\n chartRangeMax: attributes.sparklineChartRangeMax,\n composite: attributes.sparklineComposite,\n enableTagOptions: attributes.sparklineEnableTagOptions,\n tagOptionPrefix: attributes.sparklineTagOptionPrefix,\n tagValuesAttribute: attributes.sparklineTagValuesAttribute,\n\n disableHiddenCheck: attributes.sparklineDisableHiddenCheck,\n defaultPixelsPerValue: attributes.sparklineDefaultPixelsPerValue,\n spotColor: emptyStringIfDisable(attributes.sparklineSpotColor),\n minSpotColor: emptyStringIfDisable(attributes.sparklineMinSpotColor),\n maxSpotColor: emptyStringIfDisable(attributes.sparklineMaxSpotColor),\n spotRadius: attributes.sparklineSpotRadius,\n valueSpots: attributes.sparklineValueSpots,\n highlightSpotColor: attributes.sparklineHighlightSpotColor,\n highlightLineColor: attributes.sparklineHighlightLineColor,\n lineWidth: attributes.sparklineLineWidth,\n normalRangeMin: attributes.sparklineNormalRangeMin,\n normalRangeMax: attributes.sparklineNormalRangeMax,\n drawNormalOnTop: attributes.sparklineDrawNormalOnTop,\n xvalues: attributes.sparklineXvalues,\n chartRangeClip: attributes.sparklineChartRangeClip,\n chartRangeMinX: attributes.sparklineChartRangeMinX,\n chartRangeMaxX: attributes.sparklineChartRangeMaxX,\n disableInteraction: sparklineDisableInteraction,\n disableTooltips: sparklineDisableTooltips,\n disableHighlight: sparklineDisableHighlight,\n highlightLighten: sparklineHighlightLighten,\n highlightColor: attributes.sparklineHighlightColor,\n tooltipContainer: attributes.sparklineTooltipContainer,\n tooltipClassname: attributes.sparklineTooltipClassname,\n tooltipChartTitle: chartTitle,\n tooltipFormat: attributes.sparklineTooltipFormat,\n tooltipPrefix: attributes.sparklineTooltipPrefix,\n tooltipSuffix: sparklineTooltipSuffix,\n tooltipSkipNull: attributes.sparklineTooltipSkipNull,\n tooltipValueLookups: attributes.sparklineTooltipValueLookups,\n tooltipFormatFieldlist: attributes.sparklineTooltipFormatFieldlist,\n tooltipFormatFieldlistKey: attributes.sparklineTooltipFormatFieldlistKey,\n numberFormatter: sparklineNumberFormatter,\n numberDigitGroupSep: attributes.sparklineNumberDigitGroupSep,\n numberDecimalMark: attributes.sparklineNumberDecimalMark,\n numberDigitGroupCount: attributes.sparklineNumberDigitGroupCount,\n animatedZooms: attributes.sparklineAnimatedZooms,\n }\n sparklineOptions.current = sparklineInitOptions\n\n if (!chartElement.current || $chartElement) return\n\n set$chartElement(() => window.$(chartElement.current))\n }, [\n $chartElement,\n attributes,\n chartContainerElement,\n chartData.result,\n chartMetadata,\n orderedColors,\n unitsCurrent,\n widthRatio,\n ])\n\n const { sparklineOnHover } = attributes\n useEffect(() => {\n if (!$chartElement || !sparklineOnHover) return\n\n const onLeave = () => sparklineOnHover(null)\n const onChange = ({ sparklines: [sparkline] }: any) => {\n const { x, y } = sparkline.getCurrentRegionFields()\n sparklineOnHover({ x, y })\n }\n\n // @ts-ignore\n $chartElement.bind(\"sparklineRegionChange\", onChange).bind(\"mouseleave\", onLeave)\n return () => {\n // @ts-ignore\n $chartElement.unbind(\"sparklineRegionChange\", onChange).unbind(\"mouseleave\", onLeave)\n }\n }, [$chartElement, sparklineOnHover])\n\n // update chart\n useEffect(updateSparklineValues, [$chartElement, chartData.result])\n\n const style = paddingLeftPercentage\n ? {\n textAlign: \"initial\" as \"initial\", // :) typescript\n paddingLeft: paddingLeftPercentage,\n }\n : undefined\n\n return (\n <div ref={chartElement} id={chartElementId} className={chartElementClassName} style={style} />\n )\n}\n","import * as d3 from \"d3\"\n\nwindow.d3 = d3\n","let fetchPromise: Promise<string>\n\nconst GOOGLE_JS_API_SRC = \"https://www.google.com/jsapi\"\n\nexport const loadGoogleVisualizationApi = () => {\n if (fetchPromise) {\n return fetchPromise\n }\n fetchPromise = new Promise((resolve, reject) => {\n setTimeout(() => {\n const script = document.createElement(\"script\")\n script.type = \"text/javascript\"\n script.async = true\n script.src = GOOGLE_JS_API_SRC\n\n script.onerror = () => {\n reject(Error(\"error loading google.js api\"))\n }\n script.onload = () => {\n resolve(\"ok\")\n }\n\n const firstScript = document.getElementsByTagName(\"script\")[0] as HTMLScriptElement\n (firstScript.parentNode as Node).insertBefore(script, firstScript)\n }, 1000)\n }).then(() => new Promise((resolve) => {\n window.google.load(\"visualization\", \"1.1\", {\n packages: [\"corechart\", \"controls\"],\n callback: resolve,\n })\n }))\n return fetchPromise\n}\n","// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport \"jquery-sparkline\"\nimport React, {\n useRef, useEffect, useState,\n} from \"react\"\n\nimport \"../../utils/d3-loader\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport d3pie from \"vendor/d3pie-0.2.1-netdata-3\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport {\n ChartMetadata,\n D3pieChartData,\n} from \"domains/chart/chart-types\"\nimport { seconds4human } from \"domains/chart/utils/seconds4human\"\nimport { useDateTime } from \"utils/date-time\"\nimport { tail } from \"ramda\"\n\nconst emptyContent = {\n label: \"no data\",\n value: 100,\n color: \"#666666\",\n}\n\ntype GetDateRange = (arg: {\n chartData: D3pieChartData,\n index: number,\n localeDateString: (date: number | Date) => string,\n localeTimeString: (time: number | Date) => string,\n}) => string\nconst getDateRange: GetDateRange = ({\n chartData, index,\n localeDateString, localeTimeString,\n}) => {\n const dt = Math.round((chartData.before - chartData.after + 1) / chartData.points)\n const dtString = seconds4human(dt)\n\n const before = chartData.result.data[index].time\n const after = before - (dt * 1000)\n\n const d1 = localeDateString(after)\n const t1 = localeTimeString(after)\n const d2 = localeDateString(before)\n const t2 = localeTimeString(before)\n\n if (d1 === d2) {\n return `${d1} ${t1} to ${t2}, ${dtString}`\n }\n\n return `${d1} ${t1} to ${d2} ${t2}, ${dtString}`\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: D3pieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n dimensionsVisibility: boolean[]\n hoveredRow: number\n hoveredX: number | null\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n orderedColors: string[]\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n}\nexport const D3pieChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n orderedColors,\n setMinMax,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef<HTMLDivElement>(null)\n\n const legendFormatValueRef = useRef(legendFormatValue)\n legendFormatValueRef.current = legendFormatValue\n\n const [d3pieInstance, setD3pieInstance] = useState()\n const d3pieOptions = useRef<{[key: string]: any}>()\n\n const { localeDateString, localeTimeString } = useDateTime()\n\n // create chart\n useEffect(() => {\n if (chartElement.current && !d3pieInstance) {\n // d3pieSetContent\n // todo this should be set in chart.tsx, when creating hook\n setMinMax([chartData.min, chartData.max])\n // index is ROW! it's !== 0 only when selection is made\n const index = 0\n const content = tail(chartData.result.labels).map((label, i) => {\n const value = chartData.result.data[index][label]\n const color = orderedColors[i]\n return {\n label,\n value,\n color,\n }\n }).filter((x) => x.value !== null && x.value > 0)\n const safeContent = content.length > 0 ? content : emptyContent\n\n const defaultTitle = attributes.title || chartMetadata.title\n const dateRange = getDateRange({\n chartData,\n index: 0,\n localeDateString,\n localeTimeString,\n })\n const {\n d3pieTitle = defaultTitle,\n d3pieSubtitle = unitsCurrent,\n d3pieFooter = dateRange,\n d3pieTitleColor = window.NETDATA.themes.current.d3pie.title,\n d3pieTitleFontsize = 12,\n d3pieTitleFontweight = \"bold\",\n d3pieTitleFont = \"arial\",\n d3PieSubtitleColor = window.NETDATA.themes.current.d3pie.subtitle,\n d3PieSubtitleFontsize = 10,\n d3PieSubtitleFontweight = \"normal\",\n d3PieSubtitleFont = \"arial\",\n d3PieFooterColor = window.NETDATA.themes.current.d3pie.footer,\n d3PieFooterFontsize = 9,\n d3PieFooterFontweight = \"bold\",\n d3PieFooterFont = \"arial\",\n d3PieFooterLocation = \"bottom-center\",\n\n d3PiePieinnerradius = \"45%\",\n d3PiePieouterradius = \"80%\",\n d3PieSortorder = \"value-desc\",\n d3PieSmallsegmentgroupingEnabled = false,\n d3PieSmallsegmentgroupingValue = 1,\n d3PieSmallsegmentgroupingValuetype = \"percentage\",\n d3PieSmallsegmentgroupingLabel = \"other\",\n d3PieSmallsegmentgroupingColor = window.NETDATA.themes.current.d3pie.other,\n\n d3PieLabelsOuterFormat = \"label-value1\",\n d3PieLabelsOuterHidewhenlessthanpercentage = null,\n d3PieLabelsOuterPiedistance = 15,\n d3PieLabelsInnerFormat = \"percentage\",\n d3PieLabelsInnerHidewhenlessthanpercentage = 2,\n\n d3PieLabelsMainLabelColor = window.NETDATA.themes.current.d3pie.mainlabel,\n d3PieLabelsMainLabelFont = \"arial\",\n d3PieLabelsMainLabelFontsize = 10,\n d3PieLabelsMainLabelFontweight = \"normal\",\n\n d3PieLabelsPercentageColor = window.NETDATA.themes.current.d3pie.percentage,\n d3PieLabelsPercentageFont = \"arial\",\n d3PieLabelsPercentageFontsize = 10,\n d3PieLabelsPercentageFontweight = \"bold\",\n\n d3PieLabelsValueColor = window.NETDATA.themes.current.d3pie.value,\n d3PieLabelsValueFont = \"arial\",\n d3PieLabelsValueFontsize = 10,\n d3PieLabelsValueFontweight = \"bold\",\n\n d3PieLabelsLinesEnabled = true,\n d3PieLabelsLinesStyle = \"curved\",\n d3PieLabelsLinesColor = \"segment\", // \"segment\" or a hex color\n\n d3PieLabelsTruncationEnabled = false,\n d3PieLabelsTruncationTruncatelength = 30,\n\n d3PieMiscColorsSegmentstroke = window.NETDATA.themes.current.d3pie.segment_stroke,\n d3PieMiscGradientEnabled = false,\n d3PieMiscColorsPercentage = 95,\n d3PieMiscGradientColor = window.NETDATA.themes.current.d3pie.gradient_color,\n\n d3PieCssprefix = null,\n } = attributes\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n\n const initialD3pieOptions = {\n header: {\n title: {\n text: d3pieTitle,\n color: d3pieTitleColor,\n fontSize: d3pieTitleFontsize,\n fontWeight: d3pieTitleFontweight,\n font: d3pieTitleFont,\n },\n subtitle: {\n text: d3pieSubtitle,\n color: d3PieSubtitleColor,\n fontSize: d3PieSubtitleFontsize,\n fontWeight: d3PieSubtitleFontweight,\n font: d3PieSubtitleFont,\n },\n titleSubtitlePadding: 1,\n },\n footer: {\n text: d3pieFooter,\n color: d3PieFooterColor,\n fontSize: d3PieFooterFontsize,\n fontWeight: d3PieFooterFontweight,\n font: d3PieFooterFont,\n location: d3PieFooterLocation,\n },\n size: {\n canvasHeight: Math.floor(height),\n canvasWidth: Math.floor(width),\n pieInnerRadius: d3PiePieinnerradius,\n pieOuterRadius: d3PiePieouterradius,\n },\n data: {\n // none, random, value-asc, value-desc, label-asc, label-desc\n sortOrder: d3PieSortorder,\n smallSegmentGrouping: {\n enabled: d3PieSmallsegmentgroupingEnabled,\n value: d3PieSmallsegmentgroupingValue,\n // percentage, value\n valueType: d3PieSmallsegmentgroupingValuetype,\n label: d3PieSmallsegmentgroupingLabel,\n color: d3PieSmallsegmentgroupingColor,\n },\n\n // REQUIRED! This is where you enter your pie data; it needs to be an array of objects\n // of this form: { label: \"label\", value: 1.5, color: \"#000000\" } - color is optional\n content: safeContent,\n },\n\n\n labels: {\n outer: {\n // label, value, percentage, label-value1, label-value2, label-percentage1,\n // label-percentage2\n format: d3PieLabelsOuterFormat,\n hideWhenLessThanPercentage: d3PieLabelsOuterHidewhenlessthanpercentage,\n pieDistance: d3PieLabelsOuterPiedistance,\n },\n inner: {\n // label, value, percentage, label-value1, label-value2, label-percentage1,\n // label-percentage2\n format: d3PieLabelsInnerFormat,\n hideWhenLessThanPercentage: d3PieLabelsInnerHidewhenlessthanpercentage,\n },\n mainLabel: {\n color: d3PieLabelsMainLabelColor, // or 'segment' for dynamic color\n font: d3PieLabelsMainLabelFont,\n fontSize: d3PieLabelsMainLabelFontsize,\n fontWeight: d3PieLabelsMainLabelFontweight,\n },\n percentage: {\n color: d3PieLabelsPercentageColor,\n font: d3PieLabelsPercentageFont,\n fontSize: d3PieLabelsPercentageFontsize,\n fontWeight: d3PieLabelsPercentageFontweight,\n decimalPlaces: 0,\n },\n value: {\n color: d3PieLabelsValueColor,\n font: d3PieLabelsValueFont,\n fontSize: d3PieLabelsValueFontsize,\n fontWeight: d3PieLabelsValueFontweight,\n },\n lines: {\n enabled: d3PieLabelsLinesEnabled,\n style: d3PieLabelsLinesStyle,\n color: d3PieLabelsLinesColor,\n },\n truncation: {\n enabled: d3PieLabelsTruncationEnabled,\n truncateLength: d3PieLabelsTruncationTruncatelength,\n },\n formatter(context: any) {\n if (context.part === \"value\") {\n return legendFormatValueRef.current(context.value)\n }\n if (context.part === \"percentage\") {\n return `${context.label}%`\n }\n\n return context.label\n },\n },\n effects: {\n load: {\n effect: \"none\", // none / default\n speed: 0, // commented in the d3pie code to speed it up\n },\n pullOutSegmentOnClick: {\n effect: \"bounce\", // none / linear / bounce / elastic / back\n speed: 400,\n size: 5,\n },\n highlightSegmentOnMouseover: true,\n highlightLuminosity: -0.2,\n },\n tooltips: {\n enabled: false,\n type: \"placeholder\", // caption|placeholder\n string: \"\",\n placeholderParser: null, // function\n styles: {\n fadeInSpeed: 250,\n backgroundColor: window.NETDATA.themes.current.d3pie.tooltip_bg,\n backgroundOpacity: 0.5,\n color: window.NETDATA.themes.current.d3pie.tooltip_fg,\n borderRadius: 2,\n font: \"arial\",\n fontSize: 12,\n padding: 4,\n },\n },\n misc: {\n colors: {\n background: \"transparent\", // transparent or color #\n // segments: state.chartColors(),\n segmentStroke: d3PieMiscColorsSegmentstroke,\n },\n gradient: {\n enabled: d3PieMiscGradientEnabled,\n percentage: d3PieMiscColorsPercentage,\n color: d3PieMiscGradientColor,\n },\n canvasPadding: {\n top: 5,\n right: 5,\n bottom: 5,\n left: 5,\n },\n pieCenterOffset: {\n x: 0,\n y: 0,\n },\n cssPrefix: d3PieCssprefix,\n },\n callbacks: {\n onload: null,\n onMouseoverSegment: null,\n onMouseoutSegment: null,\n onClickSegment: null,\n },\n }\n // eslint-disable-next-line new-cap\n const newD3pieInstance = new d3pie(chartElement.current, initialD3pieOptions)\n d3pieOptions.current = initialD3pieOptions\n setD3pieInstance(() => newD3pieInstance)\n }\n }, [attributes, chartContainerElement, chartData, chartMetadata, d3pieInstance, legendFormatValue,\n localeDateString, localeTimeString, orderedColors, setMinMax, unitsCurrent])\n\n // update chart\n useEffect(() => {\n if (d3pieInstance && d3pieOptions.current) {\n const dateRange = getDateRange({\n chartData,\n index: 0,\n localeDateString,\n localeTimeString,\n })\n const {\n d3pieSubtitle = unitsCurrent,\n d3pieFooter = dateRange,\n } = attributes\n\n\n const isHoveredButNoData = !!hoveredX && (hoveredRow === -1)\n const slot = chartData.result.data.length - hoveredRow - 1\n\n const index = (slot < 0 || slot >= chartData.result.data.length)\n ? 0\n : slot\n\n const content = tail(chartData.result.labels).map((label, i) => {\n const value = chartData.result.data[index][label]\n const color = orderedColors[i]\n return {\n label,\n value,\n color,\n }\n }).filter((x) => x.value !== null && x.value > 0)\n const safeContent = (content.length > 0 && !isHoveredButNoData)\n ? content\n : [emptyContent]\n\n d3pieInstance.options.header.subtitle.text = d3pieSubtitle\n d3pieInstance.options.footer.text = d3pieFooter\n\n d3pieInstance.options.data.content = safeContent\n d3pieInstance.destroy()\n d3pieInstance.recreate()\n }\n }, [attributes, chartData, d3pieInstance, hoveredRow, hoveredX, localeDateString,\n localeTimeString, orderedColors, unitsCurrent])\n\n return (\n <div ref={chartElement} id={chartElementId} className={chartElementClassName} />\n )\n}\n","// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport \"peity\"\nimport React, {\n useRef, useState, useLayoutEffect,\n} from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { colorLuminance } from \"domains/chart/utils/color-luminance\"\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n orderedColors: string[]\n}\nexport const PeityChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n}: Props) => {\n const chartElement = useRef<HTMLDivElement>(null)\n\n // update width, height automatically each time\n const [$chartElement, set$chartElement] = useState()\n const peityOptions = useRef<{\n stroke: string,\n fill: string,\n strokeWidth: number,\n width: number,\n height: number,\n }>()\n\n\n // create chart\n useLayoutEffect(() => {\n if (chartElement.current && !$chartElement) {\n const $element = window.$(chartElement.current)\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n\n const {\n peityStrokeWidth = 1,\n } = attributes\n const peityInitOptions = {\n stroke: window.NETDATA.themes.current.foreground,\n strokeWidth: peityStrokeWidth,\n width: Math.floor(width),\n height: Math.floor(height),\n fill: window.NETDATA.themes.current.foreground,\n }\n\n set$chartElement(() => $element)\n peityOptions.current = peityInitOptions\n }\n }, [attributes, $chartElement, chartContainerElement])\n\n // update chart\n useLayoutEffect(() => {\n if ($chartElement && peityOptions.current) {\n const getFillOverride = () => (\n chartMetadata.chart_type === \"line\"\n ? window.NETDATA.themes.current.background\n : colorLuminance(orderedColors[0], window.NETDATA.chartDefaults.fill_luminance)\n )\n const updatedOptions = {\n ...peityOptions.current,\n stroke: orderedColors[0],\n // optimizatino from old dashboard, perhaps could be transformed to useMemo()\n fill: (orderedColors[0] === peityOptions.current.stroke)\n ? peityOptions.current.fill\n : getFillOverride(),\n }\n $chartElement.peity(\"line\", updatedOptions)\n peityOptions.current = updatedOptions\n }\n }, [$chartElement, chartData, chartMetadata, orderedColors])\n\n return (\n <div\n ref={chartElement}\n id={chartElementId}\n className={chartElementClassName}\n >\n {chartData.result}\n </div>\n )\n}\n","import React, {\n useRef, useState, useLayoutEffect,\n} from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { loadGoogleVisualizationApi } from \"domains/chart/utils/google-visualization-loader\"\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n orderedColors: string[]\n unitsCurrent: string\n}\nexport const GoogleChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef<HTMLDivElement>(null)\n const googleChartInstance = useRef<\n google.visualization.AreaChart |\n google.visualization.LineChart>()\n\n const [hasApiBeenLoaded, setHasApiBeenLoaded] = useState(false)\n loadGoogleVisualizationApi()\n .then(() => {\n setHasApiBeenLoaded(true)\n })\n\n const googleOptions = useRef<{[key: string]: unknown}>()\n\n // update chart\n useLayoutEffect(() => {\n if (googleChartInstance.current && googleOptions.current) {\n const dataTable = new window.google.visualization.DataTable(chartData.result)\n googleChartInstance.current.draw(dataTable, googleOptions.current)\n }\n }, [chartData])\n\n // create chart\n useLayoutEffect(() => {\n if (chartElement.current && !googleOptions.current && hasApiBeenLoaded) {\n const dataTable = new window.google.visualization.DataTable(chartData.result)\n\n const {\n title = chartMetadata.title,\n } = attributes\n const chartType = chartMetadata.chart_type\n const areaOpacity = new Map([\n [\"area\", window.NETDATA.options.current.color_fill_opacity_area],\n [\"stacked\", window.NETDATA.options.current.color_fill_opacity_stacked],\n ]).get(chartType) || 0.3\n const initialGoogleOptions = {\n colors: orderedColors,\n\n // do not set width, height - the chart resizes itself\n // width: state.chartWidth(),\n // height: state.chartHeight(),\n lineWidth: chartType === \"line\" ? 2 : 1,\n title,\n fontSize: 11,\n hAxis: {\n // title: \"Time of Day\",\n // format:'HH:mm:ss',\n viewWindowMode: \"maximized\",\n slantedText: false,\n format: \"HH:mm:ss\",\n textStyle: {\n fontSize: 9,\n },\n gridlines: {\n color: \"#EEE\",\n },\n },\n vAxis: {\n title: unitsCurrent,\n viewWindowMode: (chartType === \"area\" || chartType === \"stacked\")\n ? \"maximized\"\n : \"pretty\",\n minValue: chartType === \"stacked\" ? undefined : -0.1,\n maxValue: chartType === \"stacked\" ? undefined : 0.1,\n direction: 1,\n textStyle: {\n fontSize: 9,\n },\n gridlines: {\n color: \"#EEE\",\n },\n },\n chartArea: {\n width: \"65%\",\n height: \"80%\",\n },\n focusTarget: \"category\",\n annotation: {\n 1: {\n style: \"line\",\n },\n },\n pointsVisible: false,\n titlePosition: \"out\",\n titleTextStyle: {\n fontSize: 11,\n },\n tooltip: {\n isHtml: false,\n ignoreBounds: true,\n textStyle: {\n fontSize: 9,\n },\n },\n curveType: \"function\" as \"function\",\n areaOpacity,\n isStacked: chartType === \"stacked\",\n }\n\n const googleInstance = [\"area\", \"stacked\"].includes(chartMetadata.chart_type)\n ? new window.google.visualization.AreaChart(chartElement.current)\n : new window.google.visualization.LineChart(chartElement.current)\n\n googleInstance.draw(dataTable, initialGoogleOptions)\n\n googleOptions.current = initialGoogleOptions\n googleChartInstance.current = googleInstance\n }\n }, [attributes, chartData.result, chartMetadata, chartElement, hasApiBeenLoaded, orderedColors,\n unitsCurrent])\n\n\n return (\n <div\n ref={chartElement}\n id={chartElementId}\n className={chartElementClassName}\n />\n )\n}\n","import React from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { EasyPieChartData } from \"domains/chart/chart-types\"\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartElementClassName: string\n chartElementId: string\n}\nexport const TextOnly = ({\n attributes,\n chartData,\n chartElementClassName,\n chartElementId,\n}: Props) => {\n const { textOnlyDecimalPlaces = 1, textOnlyPrefix = \"\", textOnlySuffix = \"\" } = attributes\n\n // Round based on number of decimal places to show\n const precision = 10 ** textOnlyDecimalPlaces\n const value = Math.round(chartData.result[0] * precision) / precision\n\n const textContent = chartData.result.length === 0 ? \"\" : textOnlyPrefix + value + textOnlySuffix\n\n return (\n <div id={chartElementId} className={chartElementClassName}>\n {textContent}\n </div>\n )\n}\n","/* eslint-disable no-param-reassign */\n// @ts-nocheck\n\nexport const defaultCellSize = 11\nexport const defaultPadding = 1\nexport const defaultAspectRatio = Math.round(16 / 9)\n\nexport const getCellBoxSize = (cellSize = defaultCellSize, padding = defaultPadding) =>\n cellSize - padding\nexport const getRows = (data, aspectRatio = defaultAspectRatio) =>\n Math.sqrt(data.length / aspectRatio)\nexport const getColumns = (rows, aspectRatio = defaultAspectRatio) => rows * aspectRatio\n\nexport const getXPosition = (columns, index, cellSize = defaultCellSize) =>\n Math.floor(index % columns) * cellSize\nexport const getYPosition = (columns, index, cellSize = defaultCellSize) =>\n Math.floor(index / columns) * cellSize\n\nexport const getFullWidth = (columns, cellSize = defaultCellSize) => Math.ceil(columns) * cellSize\nexport const getFullHeight = (rows, cellSize = defaultCellSize, padding = defaultCellSize) =>\n Math.ceil(rows) * cellSize + padding\n\nexport const getOffsetPosition = (offset, cellSize = defaultCellSize) =>\n Math.floor(offset / cellSize)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable no-param-reassign */\n// @ts-nocheck\nimport { getCellBoxSize, getXPosition, getYPosition, getOffsetPosition } from \"./utilities\"\n\nexport default (\n el,\n columns,\n total,\n { onMouseenter, onMouseout },\n { cellSize, cellPadding } = {}\n) => {\n let hoveredIndex = -1\n\n const getEvent = index => {\n const rect = el.getBoundingClientRect()\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n const left = rect.left + offsetX\n const top = rect.top + offsetY\n const cellBoxSize = getCellBoxSize(cellSize, cellPadding)\n\n return {\n index,\n left,\n top,\n right: left + cellBoxSize,\n bottom: top + cellBoxSize,\n width: cellBoxSize,\n height: cellBoxSize,\n offsetX,\n offsetY,\n }\n }\n\n const mouseout = () => {\n onMouseout(getEvent(hoveredIndex))\n hoveredIndex = -1\n }\n\n const mousemove = e => {\n const { offsetX, offsetY } = e\n const x = getOffsetPosition(offsetX, cellSize)\n const y = getOffsetPosition(offsetY, cellSize)\n const nextHoveredIndex = y * columns + x\n\n if (nextHoveredIndex === hoveredIndex) return\n\n if (hoveredIndex !== -1) mouseout()\n\n if (nextHoveredIndex >= total) return\n\n onMouseenter(getEvent(nextHoveredIndex))\n hoveredIndex = nextHoveredIndex\n }\n\n el.addEventListener(\"mousemove\", mousemove)\n el.addEventListener(\"mouseout\", mouseout)\n return () => {\n el.removeEventListener(\"mousemove\", mousemove)\n el.removeEventListener(\"mouseout\", mouseout)\n }\n}\n","/* eslint-disable object-curly-newline */\n/* eslint-disable comma-dangle */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable no-param-reassign */\n// @ts-nocheck\nimport { scaleLinear, extent } from \"d3\"\nimport {\n getCellBoxSize,\n getRows,\n getColumns,\n getXPosition,\n getYPosition,\n getFullWidth,\n getFullHeight,\n} from \"./utilities\"\nimport registerEvents from \"./events\"\n\nexport const getWidth = (data, { aspectRatio, cellSize } = {}) => {\n const rows = getRows(data, aspectRatio)\n const columns = getColumns(rows, aspectRatio)\n return getFullWidth(columns, cellSize)\n}\n\nconst getCanvasAttributes = (data, { aspectRatio, cellSize, padding } = {}) => {\n const rows = getRows(data, aspectRatio)\n const columns = getColumns(rows, aspectRatio)\n const width = getFullWidth(columns, cellSize)\n const height = getFullHeight(rows, cellSize, padding)\n\n return { width, height, columns: Math.ceil(columns) }\n}\n\nconst defaultColorRange = [\"rgba(198, 227, 246, 0.9)\", \"rgba(14, 154, 255, 0.9)\"]\n\nconst makeGetColor = (values, colorRange = defaultColorRange) =>\n scaleLinear()\n .domain(extent(values, value => value))\n .range(colorRange)\n\nexport default (el, { onMouseenter, onMouseout }, options = {}) => {\n const { cellSize, cellPadding, cellStroke = 2, lineWidth = 1, colorRange } = options\n const canvas = el.getContext(\"2d\")\n\n let activeBox = -1\n let deactivateBox = () => {}\n let activateBox = {}\n let clearEvents = () => {}\n\n const clear = () => {\n deactivateBox()\n clearEvents()\n canvas.clearRect(0, 0, el.width, el.height)\n canvas.beginPath()\n }\n\n const update = ({ data }) => {\n const { width, height, columns } = getCanvasAttributes(data, options)\n el.width = parseInt(width)\n el.height = parseInt(height)\n clear()\n clearEvents()\n const getColor = makeGetColor(data, colorRange)\n\n const drawBox = (value, index) => {\n canvas.fillStyle = getColor(value)\n\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n\n if (lineWidth && cellStroke) {\n canvas.clearRect(\n offsetX - lineWidth,\n offsetY - lineWidth,\n getCellBoxSize(cellSize, cellPadding) + cellStroke,\n getCellBoxSize(cellSize, cellPadding) + cellStroke\n )\n }\n\n canvas.fillRect(\n offsetX,\n offsetY,\n getCellBoxSize(cellSize, cellPadding),\n getCellBoxSize(cellSize, cellPadding)\n )\n }\n\n data.forEach(drawBox)\n\n clearEvents = registerEvents(\n el,\n columns,\n data.length,\n {\n onMouseenter,\n onMouseout,\n },\n options\n )\n\n deactivateBox = () => {\n if (activeBox !== -1) drawBox(data[activeBox], activeBox)\n }\n\n activateBox = index => {\n deactivateBox()\n activeBox = index\n\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n\n if (lineWidth && cellStroke) {\n canvas.lineWidth = lineWidth\n canvas.strokeStyle = \"#fff\"\n canvas.strokeRect(\n offsetX + lineWidth,\n offsetY + lineWidth,\n getCellBoxSize(cellSize, cellPadding) - cellStroke,\n getCellBoxSize(cellSize, cellPadding) - cellStroke\n )\n }\n }\n }\n\n return {\n clear,\n update,\n activateBox: index => activateBox(index),\n deactivateBox: () => deactivateBox(),\n }\n}\n","/* eslint-disable arrow-body-style */\n// @ts-nocheck\n\nexport default (el) => {\n return el.getBoundingClientRect().top / window.innerHeight > 0.5 ? \"top\" : \"bottom\"\n}\n","/* eslint-disable operator-linebreak */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable react/jsx-fragments */\n// @ts-nocheck\nimport React, { useRef, useLayoutEffect, Fragment, useState, useCallback } from \"react\"\nimport { Drop } from \"@netdata/netdata-ui\"\nimport drawBoxes from \"./drawBoxes\"\nimport getAlign from \"./getAlign\"\n\ninterface GroupboxData {\n data: number[]\n labels: string[]\n}\n\ninterface GroupBoxProps {\n data: GroupboxData[]\n}\n\nconst aligns = {\n top: { bottom: \"top\" },\n bottom: { top: \"bottom\" },\n}\n\nconst GroupBox = ({ data, renderTooltip, ...options }: GroupBoxProps) => {\n const dataRef = useRef()\n const canvasRef = useRef()\n const boxesRef = useRef()\n\n const [hover, setHover] = useState(null)\n const dropHoverRef = useRef(false)\n const boxHoverRef = useRef(-1)\n const timeoutId = useRef()\n\n const close = () => {\n boxesRef.current.deactivateBox()\n setHover(null)\n dropHoverRef.current = false\n boxHoverRef.current = -1\n }\n\n const closeDrop = () =>\n requestAnimationFrame(() => {\n setHover(currentHover => {\n if (\n !dropHoverRef.current &&\n (boxHoverRef.current === -1 || boxHoverRef.current !== currentHover?.index)\n ) {\n close()\n }\n return currentHover\n })\n })\n\n useLayoutEffect(() => {\n boxesRef.current = drawBoxes(\n canvasRef.current,\n {\n onMouseenter: ({ index, ...rect }) => {\n boxHoverRef.current = index\n boxesRef.current.activateBox(index)\n timeoutId.current = setTimeout(() => {\n setHover({\n target: { getBoundingClientRect: () => rect },\n index,\n rect,\n })\n }, 600)\n },\n onMouseout: () => {\n boxHoverRef.current = -1\n clearTimeout(timeoutId.current)\n closeDrop()\n },\n },\n options\n )\n return () => boxesRef.current.clear()\n }, [])\n\n useLayoutEffect(() => {\n if (\n hover &&\n dataRef.current &&\n dataRef.current.labels[hover.index] !== data.labels[hover.index]\n ) {\n close()\n }\n dataRef.current = data\n boxesRef.current.update(data)\n }, [data])\n\n const onMouseEnter = useCallback(() => {\n dropHoverRef.current = true\n }, [])\n\n const onMouseLeave = useCallback(() => {\n dropHoverRef.current = false\n closeDrop()\n }, [])\n\n const align = hover && getAlign(hover.target)\n\n return (\n <Fragment>\n <canvas data-testid=\"groupBox\" ref={canvasRef} />\n {hover && renderTooltip && (\n <Drop\n align={aligns[align]}\n target={hover.target}\n onMouseEnter={onMouseEnter}\n onMouseLeave={onMouseLeave}\n >\n {renderTooltip(hover.index, align)}\n </Drop>\n )}\n </Fragment>\n )\n}\n\nexport default GroupBox\n","/* eslint-disable operator-linebreak */\n/* eslint-disable object-curly-newline */\n/* eslint-disable arrow-body-style */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/jsx-one-expression-per-line */\n// @ts-nocheck\nimport React, { useRef, useMemo } from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, TextMicro, Popover } from \"@netdata/netdata-ui\"\nimport GroupBox from \"./groupBox\"\nimport { getWidth } from \"./drawBoxes\"\nimport getAlign from \"./getAlign\"\n\ninterface GroupBoxWrapperProps {\n data: any\n title: string\n}\n\nconst Title = styled.span`\n white-space: nowrap;\n text-overflow: ellipsis;\n overflow-x: hidden;\n`\n\nconst Label = styled(Flex).attrs({\n as: TextMicro,\n gap: 1,\n})`\n cursor: default;\n &:hover {\n font-weight: bold;\n }\n`\n\nconst GroupBoxWrapper = ({\n data,\n label,\n groupIndex,\n renderGroupPopover,\n renderBoxPopover,\n}: GroupBoxWrapperProps) => {\n const ref = useRef()\n const align = ref.current && getAlign(ref.current)\n\n const style = useMemo(() => ({ maxWidth: `${getWidth(data.data)}px` }), [data])\n\n const boxPopover =\n renderBoxPopover &&\n ((index, boxAlign) => renderBoxPopover({ group: label, groupIndex, align: boxAlign, index }))\n\n const groupPopover =\n renderGroupPopover && (() => renderGroupPopover({ group: label, groupIndex, align }))\n\n return (\n <Flex data-testid=\"groupBoxWrapper\" column alignItems=\"start\" gap={1} margin={[0, 4, 0, 0]}>\n <Popover content={groupPopover} align={align} plain>\n {({ isOpen, ref: popoverRef, ...rest }) => (\n <Label\n data-testid=\"groupBoxWrapper-title\"\n ref={el => {\n ref.current = el\n popoverRef(el)\n }}\n strong={isOpen}\n style={style}\n {...rest}\n >\n <Title>{label}\n {data.data.length > 3 && ({data.data.length})}\n \n )}\n \n \n \n )\n}\n\nconst GroupBoxes = ({ data, labels, renderBoxPopover, renderGroupPopover }: any) => (\n \n {labels.map((label, index) => {\n return data[index].data.length ? (\n \n ) : null\n })}\n \n)\n\nexport default GroupBoxes\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, TextNano } from \"@netdata/netdata-ui\"\n\ninterface LegendProps {\n children?: React.ReactNode\n}\n\nconst LinearColorScaleBar = styled(Flex).attrs({ width: \"120px\", height: \"12px\", round: true })`\n background: linear-gradient(to right, #c6e3f6, #0e9aff);\n`\n\nconst Legend = ({ children }: LegendProps) => (\n \n {children}\n \n 0%\n \n 100%\n \n {/* \n Alarms\n \n \n \n Warnings\n \n */}\n \n)\n\nexport default Legend\n","// @ts-nocheck\n\nconst labels = {\n k8s_cluster_id: { icon: \"cluster\", title: \"Cluster Id\" },\n k8s_node_name: { icon: \"nodes_hollow\", title: \"Node\" },\n k8s_namespace: { icon: \"cluster_spaces\", title: \"Namespace\" },\n k8s_controller_kind: { icon: \"controller_kind\", title: \"Controller Kind\" },\n k8s_controller_name: { icon: \"controller_name\", title: \"Controller Name\" },\n k8s_pod_name: { icon: \"pod\", title: \"Pod Name\" },\n k8s_container_name: { icon: \"container\", title: \"Container\" },\n}\n\nexport const labelIds = Object.keys(labels)\n\nexport default (id) => {\n if (id in labels) return labels[id]\n // k8s_custom_label -> Custom Label\n const title = id.replace(/_./g, (word) => ` ${word[1].toUpperCase()}`).replace(/^k8s /, \"\")\n return { title, icon: \"node\" }\n}\n","/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Separator = () => \n\nexport default Separator\n","/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { H5 } from \"@netdata/netdata-ui\"\n\nconst Header = props => (\n
\n)\n\nexport default Header\n","/* eslint-disable indent */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, getColor } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nexport const TabButton = styled(Button).attrs(({ active }) => ({\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n disabled: active,\n \"data-testid\": \"k8sPopoverChart-tab\",\n}))`\n &&& {\n height: initial;\n width: initial;\n padding: 2px 20px;\n ${({ active, theme }) => active && `border-bottom: 3px solid ${getColor(\"bright\")({ theme })};`}\n color: ${({ active, theme }) => getColor(active ? \"bright\" : \"separator\")({ theme })}\n }\n`\n\nconst Tabs = ({ value, onChange, ...rest }) => (\n \n onChange(\"context\")} />\n onChange(\"metrics\")} />\n \n)\n\nexport default Tabs\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, H6, makeFlex } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nconst ExpandButton = styled(makeFlex(Button)).attrs({\n icon: \"chevron_right_s\",\n label: \"More\",\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n alignItems: \"baseline\",\n gap: 1,\n direction: \"rowReverse\",\n})`\n &&& {\n padding: 0;\n margin: 0;\n font-weight: normal;\n height: initial;\n width: initial;\n\n svg {\n height: 6px;\n width: 6px;\n position: initial;\n }\n }\n`\n\nconst Section = ({ title, onExpand, children, noBorder }) => (\n \n \n
\n {title}\n
\n {onExpand && }\n
\n \n {children}\n \n \n)\n\nexport default Section\n","import { LOCALSTORAGE_HEIGHT_KEY_PREFIX } from \"domains/chart/components/resize-handler\"\n\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from \"domains/chart/utils/legend-utils\"\nimport { Attributes } from \"./transformDataAttributes\"\nimport { ChartLibraryConfig } from \"./chartLibrariesSettings\"\n\ntype GetPortalNodeStyles = (\n attributes: Attributes,\n chartSettings: ChartLibraryConfig,\n shouldAddSpecialHeight: boolean,\n) => {\n height: string | undefined,\n width: string | undefined,\n minWidth: string | undefined\n}\n\nconst getHeightFromLocalStorage = (heightID: string, isLegendOnBottom: boolean) => {\n const persitedHeight = localStorage.getItem(`${LOCALSTORAGE_HEIGHT_KEY_PREFIX}${heightID}`)\n if (persitedHeight) {\n if (Number.isNaN(Number(persitedHeight))) {\n return null\n }\n return `${isLegendOnBottom\n ? Number(persitedHeight) + LEGEND_BOTTOM_SINGLE_LINE_HEIGHT\n : persitedHeight\n }px`\n }\n\n return null\n}\n\nexport const getPortalNodeStyles: GetPortalNodeStyles = (\n attributes,\n chartSettings,\n shouldAddSpecialHeight,\n) => {\n let width\n if (typeof attributes.width === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n width = attributes.width\n } else if (typeof attributes.width === \"number\") {\n width = `${attributes.width.toString()}px`\n }\n let height\n if (chartSettings.aspectRatio === undefined) {\n if (typeof attributes.height === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n height = attributes.height\n } else if (typeof attributes.height === \"number\") {\n height = `${attributes.height.toString()}px`\n }\n }\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n const heightFromLocalStorage = attributes.heightId\n ? getHeightFromLocalStorage(attributes.heightId, isLegendOnBottom)\n : null\n\n if (heightFromLocalStorage) {\n // .replace() is for backwards compatibility - old dashboard was always doing\n // JSON.stringify when setting localStorage so many users have '\"180px\"' values set.\n // We can remove .replace() after some time\n height = heightFromLocalStorage.replace(/\"/g, \"\")\n }\n\n if (shouldAddSpecialHeight) {\n const heightOverriden = isLegendOnBottom\n ? window.innerHeight * 0.5\n : window.innerHeight * 0.4\n height = `${heightOverriden}px`\n }\n\n const chartDefaultsMinWidth = window.NETDATA.chartDefaults.min_width\n const minWidth = chartDefaultsMinWidth === null\n ? undefined\n : chartDefaultsMinWidth\n return {\n height,\n width,\n minWidth,\n }\n}\n","import {\n useEffect, useRef, useState, MutableRefObject,\n} from \"react\"\n\nconst globalIntersectionOptions = {\n root: null,\n rootMargin: \"0px\",\n threshold: undefined,\n}\n\ntype IntersectionCallback = (isVisible: boolean) => void\ntype Listener = {\n element: HTMLElement,\n callback: IntersectionCallback,\n}\nconst createGlobalIntersectionObserver = () => {\n let listeners: Listener[] = []\n const globalHandler = (entries: IntersectionObserverEntry[]) => {\n entries.forEach(({ isIntersecting, target }) => {\n const callback = listeners.find(({ element }) => element === target)?.callback\n if (callback) {\n callback(isIntersecting)\n }\n })\n }\n const globalObserver = new IntersectionObserver(globalHandler, globalIntersectionOptions)\n\n return {\n subscribe: (element: HTMLElement, callback: IntersectionCallback) => {\n globalObserver.observe(element)\n listeners = listeners.concat({ element, callback })\n },\n unsubscribe: (elementToUnsubscribe: HTMLElement) => {\n listeners = listeners.filter(({ element }) => element !== elementToUnsubscribe)\n },\n }\n}\nconst globalIntersectionObserver = createGlobalIntersectionObserver()\n\n\n// this hook is created for 2 reasons:\n// 1) to use the same IntersectionObserver for all charts (contrary to use-intersection from\n// react-use, which creates new observer for every hook)\n// 2) to update the isVisible state only when necessary (contrary to what \"use-in-view\" hook from\n// https://github.com/thebuilder/react-intersection-observer does)\nexport const useCommonIntersection = (\n element: HTMLElement,\n clonedChildrenRef: MutableRefObject,\n) => {\n const [isVisible, setIsVisible] = useState(false)\n const isVisibleRef = useRef(isVisible)\n // the ref is just to prevent most updates on init - charts are not visible on first intersection\n // observer callback, but it still tries to set the state. UseState does not bail out when\n // state doesn't change\n\n useEffect(() => {\n if (typeof IntersectionObserver === \"function\") {\n globalIntersectionObserver.subscribe(\n element,\n (newIsVisible) => {\n if (isVisibleRef.current !== newIsVisible) {\n if (clonedChildrenRef.current) {\n // eslint-disable-next-line no-param-reassign\n clonedChildrenRef.current.style.visibility = newIsVisible ? \"visible\" : \"hidden\"\n }\n\n isVisibleRef.current = newIsVisible\n // we need to mirror it in `use-state` to cause react update\n setIsVisible(newIsVisible)\n }\n },\n )\n }\n return () => {\n globalIntersectionObserver.unsubscribe(element)\n }\n }, [clonedChildrenRef, element])\n\n return isVisible\n}\n","import React from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\n\ninterface Props {\n attributes: Attributes\n}\n\n// rendered on init (for example when chart is not visible)\n// and when it's rendering after being hidden previously\nexport const InvisibleSearchableText = ({\n attributes,\n}: Props) => (\n \n {attributes.id}\n \n)\n","import React, {\n useEffect, useLayoutEffect, useState, useRef,\n} from \"react\"\nimport { useDebounce } from \"react-use\"\nimport { forEachObjIndexed } from \"ramda\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\nimport { selectDestroyOnHide, selectIsAsyncOnScroll, selectAlarm } from \"domains/global/selectors\"\nimport { getPortalNodeStyles } from \"domains/chart/utils/get-portal-node-styles\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"domains/chart/utils/chartLibrariesSettings\"\nimport { useCommonIntersection } from \"hooks/use-common-intersection\"\n\nimport { clearChartStateAction } from \"../actions\"\n\nimport { InvisibleSearchableText } from \"./invisible-searchable-text\"\n\nconst SCROLL_DEBOUNCE_ASYNC = 750\nconst SCROLL_DEBOUNCE_SYNC = 100\n\nconst cloneWithCanvas = (element: HTMLElement) => {\n const cloned = element.cloneNode(true) as HTMLElement\n const clonedCanvases = cloned.querySelectorAll(\"canvas\")\n\n element.querySelectorAll(\"canvas\")\n .forEach((oldCanvas, index) => {\n const newCanvas = clonedCanvases[index]\n const context = newCanvas.getContext(\"2d\")\n\n newCanvas.width = oldCanvas.width\n newCanvas.height = oldCanvas.height\n\n if (context) {\n context.drawImage(oldCanvas, 0, 0)\n }\n })\n return cloned\n}\n\nconst shouldCleanChartStateAlways = localStorage.getItem(\"wipe-chart-state\")\n\ninterface Props {\n attributes: Attributes\n chartUuid: string\n children: any\n portalNode: HTMLElement\n}\nexport const DisableOutOfView = ({\n attributes,\n chartUuid,\n children,\n portalNode,\n}: Props) => {\n /* when unmounting, clear redux state for this chart */\n const dispatch = useDispatch()\n useEffect(() => { // eslint-disable-line arrow-body-style\n return () => {\n dispatch(clearChartStateAction({ id: chartUuid }))\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n\n /* separate functionality - adding custom styles to portalNode */\n const chartSettings = chartLibrariesSettings[attributes.chartLibrary]\n const [hasPortalNodeBeenStyled, setHasPortalNodeBeenStyled] = useState(false)\n const isShowingAlarmOnChart = useSelector(selectAlarm)?.chartId === attributes.id\n useLayoutEffect(() => {\n if (hasPortalNodeBeenStyled) {\n return\n }\n const shouldAddSpecialHeight = isShowingAlarmOnChart\n && attributes.chartLibrary === \"dygraph\"\n && chartSettings.hasLegend(attributes)\n const styles = getPortalNodeStyles(attributes, chartSettings, shouldAddSpecialHeight)\n forEachObjIndexed((value, styleName) => {\n if (value) {\n portalNode.style.setProperty(styleName, value)\n }\n }, styles)\n // eslint-disable-next-line no-param-reassign\n portalNode.className = chartSettings.containerClass(attributes)\n setHasPortalNodeBeenStyled(true)\n }, [attributes, chartSettings, hasPortalNodeBeenStyled, isShowingAlarmOnChart, portalNode,\n setHasPortalNodeBeenStyled ])\n /* end of \"adding custom styles to portalNode\" */\n\n\n const destroyOnHide = useSelector(selectDestroyOnHide)\n\n const clonedChildrenRef = useRef()\n const isVisibleIntersection = useCommonIntersection(portalNode, clonedChildrenRef)\n\n // todo hook to scroll (observe on visible items) instead of changes in intersectionRatio\n // because intersectinnRatio maxes out on 1.0 when element is fully visible\n const isAsyncOnScroll = useSelector(selectIsAsyncOnScroll)\n const debounceTime = isAsyncOnScroll ? SCROLL_DEBOUNCE_ASYNC : SCROLL_DEBOUNCE_SYNC\n\n // \"should hide because of debounced scroll handler\"\n const [shouldHideDebounced, setShouldHideDebounced] = useState(!isVisibleIntersection)\n useDebounce(\n () => {\n // start rendering, when intersectionRatio is not 0 and it hasn't changed for 1500 ms\n setShouldHideDebounced(!isVisibleIntersection)\n },\n debounceTime,\n [isVisibleIntersection],\n )\n const shouldHide = isVisibleIntersection ? shouldHideDebounced : true\n\n const previousIsVisibleIntersection = useRef(isVisibleIntersection)\n if (clonedChildrenRef.current\n && previousIsVisibleIntersection.current !== isVisibleIntersection\n ) {\n previousIsVisibleIntersection.current = isVisibleIntersection\n }\n\n useEffect(() => {\n if (!isPrintMode && shouldHide && shouldCleanChartStateAlways) {\n dispatch(clearChartStateAction({ id: chartUuid }))\n }\n }, [chartUuid, dispatch, shouldHide])\n\n\n if (isPrintMode) {\n // we should show everything in this case\n return children\n }\n\n if (shouldHide) {\n // todo perhaps loader should be added here to both scenarios\n if (destroyOnHide) {\n return (\n \n )\n }\n\n if (!clonedChildrenRef.current) {\n const newClonedChildren = Array.from(portalNode.children)\n .map((child) => cloneWithCanvas(child as HTMLElement))\n\n const clonedChildrenContainer = document.createElement(\"div\")\n clonedChildrenContainer.style.visibility = \"hidden\"\n\n newClonedChildren.forEach((child) => {\n clonedChildrenContainer.appendChild(child)\n })\n\n clonedChildrenRef.current = clonedChildrenContainer\n }\n\n return (\n <>\n \n {\n if (nodeElement && clonedChildrenRef.current) {\n nodeElement.appendChild(clonedChildrenRef.current)\n }\n }}\n />\n \n )\n }\n\n if (!destroyOnHide && clonedChildrenRef.current) {\n clonedChildrenRef.current = undefined\n }\n\n return children\n}\n","import React from \"react\"\n\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport { ChartMetadata } from \"../../chart-types\"\nimport { ChartWithLoader, RenderCustomElementForDygraph } from \"../chart-with-loader\"\nimport { DisableOutOfView } from \"../disable-out-of-view\"\nimport { DropdownMenu } from \"../chart-dropdown\"\n\nexport type Props = {\n attributes: Attributes\n // warning! this is not the same as chartId in old dashboard\n // here, the chartID must be unique across all agents\n chartUuid: string\n uuid?: string\n portalNode: HTMLElement\n chartMetadata?: ChartMetadata | undefined\n dropdownMenu?: DropdownMenu\n renderCustomElementForDygraph?: RenderCustomElementForDygraph\n onAttributesChange?: any\n}\n\nexport const ChartContainer = ({\n attributes,\n chartMetadata,\n chartUuid,\n dropdownMenu,\n portalNode,\n renderCustomElementForDygraph,\n onAttributesChange,\n uuid,\n}: Props) => (\n \n \n \n)\n","/* eslint-disable max-len */\nimport { ChartsMetadata } from \"domains/global/types\"\nimport { AnyStringKeyT } from \"types/common\"\nimport { ChartEnriched } from \"domains/chart/chart-types\"\n\nexport interface Submenus {\n [submenus: string]: {\n charts: ChartEnriched[]\n height: number\n info: string | null\n priority: number\n title: string | null\n }\n}\n\nexport interface CorrelationMetadata {\n scoredCount?: number\n totalCount?: number\n averageScore?: number\n}\n\nexport interface Menu {\n // eslint-disable-next-line camelcase\n menu_pattern: string\n priority: number\n submenus: Submenus\n title: string\n icon: string\n info: string\n height: number\n correlationsMetadata?: CorrelationMetadata\n}\n\nexport interface Menus {\n [menu: string]: Menu\n}\n\nexport const options = {\n menus: {} as Menus,\n submenu_names: {} as {[family: string]: string},\n data: null as (ChartsMetadata | null),\n hostname: \"netdata_server\", // will be overwritten by the netdata server\n version: \"unknown\",\n release_channel: \"unknown\",\n hosts: [],\n\n duration: 0, // the default duration of the charts\n update_every: 1,\n\n chartsPerRow: 0,\n // chartsMinWidth: 1450,\n chartsHeight: 180,\n}\n\n\n// netdata standard information\nexport const netdataDashboard = {\n sparklines_registry: {} as {[key: string]: { count: number }},\n os: \"unknown\",\n\n menu: {},\n submenu: {} as {\n [family: string]: {\n info?: string | ((os: string) => string)\n title?: string\n }\n },\n context: {} as {\n [id: string]: {\n valueRange: string // examples: \"[0, 100]\", \"[null, null]\"\n height: number\n decimalDigits: number\n }},\n\n // generate a sparkline\n // used in the documentation\n sparkline(\n prefix: string, chart: string, dimension: string, units: string = \"\", suffix: string,\n ) {\n if (options.data === null || typeof options.data.charts === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart] === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart].dimensions === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart].dimensions[dimension] === \"undefined\") {\n return \"\"\n }\n\n let key = `${chart}.${dimension}`\n\n if (typeof this.sparklines_registry[key] === \"undefined\") {\n this.sparklines_registry[key] = { count: 1 }\n } else {\n this.sparklines_registry[key].count += 1\n }\n\n key = `${key}.${this.sparklines_registry[key].count}`\n\n return `${prefix}
\n (\n X${units})${suffix}`\n },\n\n gaugeChart(\n title: string, width: string, dimensions: string = \"\", colors: string = \"\",\n ) {\n return `${\"
\"\n },\n\n anyAttribute(obj: AnyStringKeyT, attr: string, key: string, def: unknown, domain?: string) {\n if (typeof (obj[key]) !== \"undefined\") {\n const config = obj[key]\n const configWithDomain = domain ? {...config, ...config[domain]} : config\n const x = configWithDomain[attr]\n\n if (x === undefined) {\n return def\n }\n\n if (typeof (x) === \"function\") {\n return x(netdataDashboard.os)\n }\n\n return x\n }\n\n return def\n },\n\n menuTitle(chart: ChartEnriched) {\n if (chart.sectionTitle) {\n return chart.sectionTitle\n }\n if (typeof chart.menu_pattern !== \"undefined\") {\n const type = chart.type || chart.id.split(\".\")[0]\n return (`${this.anyAttribute(this.menu, \"title\", chart.menu_pattern, chart.menu_pattern)\n .toString()\n } ${type.slice(-(type.length - chart.menu_pattern.length - 1)).toString()}`)\n .replace(/_/g, \" \")\n }\n\n return (this.anyAttribute(this.menu, \"title\", chart.menu, chart.menu)).toString()\n .replace(/_/g, \" \")\n },\n\n menuIcon(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"icon\", chart.menu_pattern,\n \"\").toString()\n }\n\n return this.anyAttribute(this.menu, \"icon\", chart.menu, \"\")\n },\n\n menuInfo(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"info\", chart.menu_pattern, null)\n }\n\n return this.anyAttribute(this.menu, \"info\", chart.menu, null)\n },\n\n menuHeight(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"height\", chart.menu_pattern, 1.0)\n }\n\n return this.anyAttribute(this.menu, \"height\", chart.menu, 1.0)\n },\n\n submenuTitle(menu: string, submenu: string) {\n const key = `${menu}.${submenu}`\n // console.log(key);\n const title = this.anyAttribute(this.submenu, \"title\", key, submenu)\n .toString().replace(/_/g, \" \") as string\n if (title.length > 28) {\n const a = title.substring(0, 13)\n const b = title.substring(title.length - 12, title.length)\n return `${a}...${b}`\n }\n return title\n },\n\n submenuInfo(menu: string, submenu: string) {\n const key = `${menu}.${submenu}`\n return this.anyAttribute(this.submenu, \"info\", key, null) as (string | null)\n },\n\n submenuHeight(menu: string, submenu: string, relative: number) {\n const key = `${menu}.${submenu}`\n return this.anyAttribute(this.submenu, \"height\", key, 1.0) * relative\n },\n\n contextInfo(id: string, domain?: string) {\n const x = this.anyAttribute(this.context, \"info\", id, null, domain)\n\n if (x !== null) {\n return `
${x}
`\n }\n return \"\"\n },\n\n contextValueRange(id: string) {\n if (typeof this.context[id] !== \"undefined\"\n && typeof this.context[id].valueRange !== \"undefined\"\n ) {\n try {\n return JSON.parse(this.context[id].valueRange)\n } catch (e) {\n return [null, null]\n }\n }\n return [null, null]\n },\n\n contextHeight(id: string, def: number) {\n if (typeof this.context[id] !== \"undefined\" && typeof this.context[id].height !== \"undefined\") {\n return def * this.context[id].height\n }\n return def\n },\n\n contextDecimalDigits(id: string, def: number) {\n if (typeof this.context[id] !== \"undefined\"\n && typeof this.context[id].decimalDigits !== \"undefined\"\n ) {\n return this.context[id].decimalDigits\n }\n return def\n },\n}\n\n// @ts-ignore\nwindow.netdataDashboard = netdataDashboard\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/destructuring-assignment */\n/* eslint-disable operator-linebreak */\n/* eslint-disable arrow-body-style */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { memo } from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, Text } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { useFormatters } from \"domains/chart/utils/formatters\"\nimport { netdataDashboard } from \"domains/dashboard/utils/netdata-dashboard\"\nimport { selectChartData } from \"domains/chart/selectors\"\n\nconst Title = styled(Text)`\n text-overflow: ellipsis;\n max-width: 120px;\n overflow-x: hidden;\n`\n\nconst getUnitSign = unit => {\n return unit === \"percentage\" ? \"%\" : ` ${unit.replace(/milliseconds/, \"ms\")}`\n}\n\nconst aggrMethods = {\n avg: \"Average\",\n sum: \"Sum\",\n min: \"Min\",\n max: \"Max\",\n}\nconst getAggregation = value => `${aggrMethods[value]}` || \"\"\n\nconst ChartValueContainer = memo(({ id, units, aggrMethod, displayedIndex }) => {\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id }))\n\n const value =\n typeof displayedIndex === \"number\"\n ? chartData.result[displayedIndex]\n : chartData.view_latest_values[0]\n\n const { legendFormatValue, unitsCurrent } = useFormatters({\n attributes: {},\n data: chartData,\n units,\n unitsCommon: null,\n unitsDesired: null,\n uuid: id,\n })\n\n const aggregation = getAggregation(aggrMethod)\n\n return (\n \n {aggregation && (\n \n {aggregation}\n \n )}\n {legendFormatValue(value)}\n {getUnitSign(unitsCurrent)}\n \n )\n})\n\nconst ChartValue = ({ id, ...rest }) => {\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id }))\n\n if (!chartData || chartData.result.length === 0) return null\n return \n}\n\nconst ChartOverview = ({ id, chartMetadata, aggrMethod, displayedIndex }) => {\n const { units, context } = chartMetadata\n const title = context.replace(/cgroup\\./, \"\")\n const icon = netdataDashboard.menuIcon(chartMetadata)\n\n return (\n \n \n \n {title}\n \n \n \n )\n}\n\nexport default memo(ChartOverview)\n","import React, { useMemo } from \"react\"\nimport Anchor from \"@/src/components/anchor\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { useLocalStorage } from \"react-use\"\nimport { utmUrlSuffix } from \"utils/utils\"\nimport { utmParametersToString } from \"domains/global/selectors\"\n\nexport type UserStatus = \"LOGGED_IN\" | \"EXPIRED_LOGIN\" | \"UNKNOWN\"\nexport type NodeClaimedStatus = \"NOT_CLAIMED\" | \"CLAIMED\"\nexport type UserNodeAccess = \"NO_ACCESS\" | \"ACCESS_OK\"\ntype UserPreference = \"AGENT\" | \"CLOUD\" | \"UNDEFINED\"\nexport type NodeLiveness = \"LIVE\" | \"NOT_LIVE\"\ntype CTATYPE = \"NAVIGATE\" | \"REFRESH\"\n\nexport enum MigrationModalPromos {\n PROMO_SIGN_IN_CLOUD = \"PROMO_SIGN_IN_CLOUD\",\n PROMO_SIGN_UP_CLOUD = \"PROMO_SIGN_UP_CLOUD\",\n PROMO_IVNITED_TO_SPACE = \"PROMO_IVNITED_TO_SPACE\",\n PROMO_CLAIM_NODE = \"PROMO_CLAIM_NODE\",\n PROMO_TO_USE_NEW_DASHBAORD = \"PROMO_TO_USE_NEW_DASHBAORD\",\n FALLBACK_TO_AGENT = \"FALLBACK_TO_AGENT\",\n NO_INFO_FALLBACK_TO_AGENT = \"NO_INFO_FALLBACK_TO_AGENT\",\n}\n\ntype MigrationModalActions = {\n text: string\n action: CTATYPE\n toPath?: string\n userPreference?: UserPreference | \"DONT_SHOW\"\n}\n\ntype MigrationModalContent = {\n title: string\n text: {\n header: ((props: any) => React.ReactNode) | string\n bullets?: Array React.ReactNode)>\n footer?: ((props: any) => React.ReactNode) | string\n }\n tickBoxOption: { text: string; preferenceID: MigrationModalPromos }\n CTA1: MigrationModalActions\n CTA2?: MigrationModalActions\n}\n\ntype MigrationModalInfo = {\n [key in MigrationModalPromos]: MigrationModalContent\n}\n\nconst campaign = \"agent_nudge_to_cloud\"\n\nconst makeUTMParameters = (modalPromo: MigrationModalPromos) =>\n `${utmUrlSuffix}${utmParametersToString({\n content: modalPromo,\n campaign,\n })}`\n\nexport const migrationmodalInfo: MigrationModalInfo = {\n [MigrationModalPromos.PROMO_SIGN_UP_CLOUD]: {\n title: \"Learn about Netdata Cloud!\",\n text: {\n header: () => (\n \n Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:\n \n ),\n bullets: [\n \"Infrastructure level dashboards (each chart aggregates data from multiple nodes)\",\n \"Central dispatch of alert notifications\",\n \"Custom dashboards editor\",\n \"Intelligence assisted troubleshooting, to help surface the root cause of issues\",\n ],\n footer: \"Have a look, you will be surprised!\",\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_SIGN_UP_CLOUD,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud\",\n toPath: \"path/signup/cloud\",\n action: \"NAVIGATE\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_SIGN_IN_CLOUD]: {\n title: \"Sign-in to Netdata Cloud or get an invitation!\",\n text: {\n header: () => (\n <>\n \n This node is connected to Netdata Cloud but you are not. If you have a Netdata Cloud\n account sign-in, if not ask for an invitation to it.\n \n\n \n Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:\n \n \n ),\n bullets: [\n \"Infrastructure level dashboards (each chart aggregates data from multiple nodes)\",\n \"Central dispatch of alert notifications\",\n \"Custom dashboards editor\",\n \"Intelligence assisted troubleshooting, to help surface the root cause of issues\",\n ],\n footer: \"Have a look, you will be surprised!\",\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_SIGN_IN_CLOUD,\n },\n CTA1: {\n text: \"Sign-in or get a Netdata Cloud account\",\n action: \"NAVIGATE\",\n toPath: \"path/signin/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n toPath: \"path/agent-dashboard\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_IVNITED_TO_SPACE]: {\n title: \"Get an invitation to this Node’s Space!\",\n text: {\n header: () => (\n \n This node is connected to Netdata Cloud but it isnt available on one of your Spaces.\n \n ),\n bullets: [],\n footer: \"Ask for an invitation to this Space!\",\n },\n tickBoxOption: {\n text: \"Don't remind me of this again\",\n preferenceID: MigrationModalPromos.PROMO_IVNITED_TO_SPACE,\n },\n CTA1: {\n text: \"Thanks, stay at Agent dashboard for now\",\n toPath: \"agent\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_CLAIM_NODE]: {\n title: \"This node isn’t connected to Netdata Cloud\",\n text: {\n header: () => (\n \n For you to be able to see this node on Netdata Cloud you will either need to:\n \n ),\n footer: \"Have a look, you will be surprised!\",\n bullets: [\n () => {\n return (\n \n {\" \"}\n Connect this node directly (documentation on{\" \"}\n \n how to connect a node\n \n ) , or\n \n )\n },\n () => {\n return (\n \n Αctivate streaming to a parent node that is already connected (documentation on{\" \"}\n \n how to configure streaming\n \n )\n \n )\n },\n ],\n },\n tickBoxOption: {\n text: \"Remember my choice.\",\n preferenceID: MigrationModalPromos.PROMO_CLAIM_NODE,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud\",\n action: \"NAVIGATE\",\n toPath: \"path/node/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD]: {\n title: \"Use the Old or the New dashboard?\",\n text: {\n header: () => (\n \n This node is available in your Netdata Cloud account. So, you have full access to the NEW\n dashboards, charts, intelligence-assisted troubleshooting and many more!\n \n ),\n bullets: [],\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud \",\n action: \"NAVIGATE\",\n toPath: \"path/dashboard/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.FALLBACK_TO_AGENT]: {\n title: \"Oops! This node has lost connection to Netdata Cloud!\",\n text: {\n header: ({ date = \"\" }) => {\n return (\n <>\n \n Unfortunately, it seems that this node is not currently connected to Netdata Cloud.\n So, the old agent dashboard is the only option available.\n \n {/* \n The node lost its Netdata Cloud connection at {date}.\n */}\n \n To troubleshoot Netdata Cloud connection issues, please follow this{\" \"}\n \n this guide.\n \n \n \n )\n },\n bullets: [],\n },\n tickBoxOption: {\n text: \"Don't show this again\",\n preferenceID: MigrationModalPromos.FALLBACK_TO_AGENT,\n },\n CTA1: {\n text: \"Check again please\",\n action: \"REFRESH\",\n userPreference: undefined,\n },\n CTA2: {\n text: \"Thanks, stay at Agent dashboard\",\n toPath: \"path/agent\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT]: {\n title: \"Oops! We aren't able to get information of this node in regards to Netdata Cloud!\",\n text: {\n header: () => {\n return (\n <>\n \n Unfortunately, it seems we aren't able to get information on this node in regards to\n Netdata Cloud.\n \n \n This could be from internet connectivity issues from your end or some temporary issue\n with our services. So, the old agent dashboard is the only option available.\n \n \n )\n },\n bullets: [],\n },\n tickBoxOption: {\n text: \"Don't show this again\",\n preferenceID: MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT,\n },\n CTA1: {\n text: \"Check again please\",\n action: \"REFRESH\",\n userPreference: undefined,\n },\n CTA2: {\n text: \"Thanks, stay at Agent dashboard\",\n toPath: \"path/agent-dashboard\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n}\n\nexport type PromoProps = {\n userSavedPreference?: UserPreference\n userStatus?: UserStatus\n nodeClaimedStatus?: NodeClaimedStatus\n userNodeAccess?: UserNodeAccess\n nodeLiveness?: NodeLiveness\n}\n\nconst isPromoSignUp = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" && userStatus === \"UNKNOWN\" && nodeClaimedStatus === \"NOT_CLAIMED\"\n\nconst isPromoSignIn = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" && userStatus === \"UNKNOWN\" && nodeClaimedStatus === \"CLAIMED\"\n\nconst isPromoInvitedToSpace = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"CLAIMED\" &&\n userNodeAccess === \"NO_ACCESS\"\n\nconst isPromoToClaimThisNode = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"NOT_CLAIMED\"\n\nconst isPromoToNewDasboardOnCloud = ({\n userSavedPreference,\n userStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n !userSavedPreference &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeLiveness === \"LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nconst isNoInfoFallbackToAgent = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference === \"CLOUD\" &&\n !userStatus &&\n !nodeClaimedStatus &&\n !nodeLiveness &&\n !userNodeAccess\n\nconst isFallbackToAgent = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"CLAIMED\" &&\n nodeLiveness === \"NOT_LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nexport const goToAgentDashboard = ({ userSavedPreference }: PromoProps) =>\n userSavedPreference === \"AGENT\"\n\nexport const goToCloud = ({\n userSavedPreference,\n userStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps) =>\n userSavedPreference === \"CLOUD\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeLiveness === \"LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nconst modalStatusWithPromoFunctions: Record<\n MigrationModalPromos,\n (props: PromoProps) => boolean\n> = {\n [MigrationModalPromos.FALLBACK_TO_AGENT]: isFallbackToAgent,\n [MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT]: isNoInfoFallbackToAgent,\n [MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD]: isPromoToNewDasboardOnCloud,\n [MigrationModalPromos.PROMO_CLAIM_NODE]: isPromoToClaimThisNode,\n [MigrationModalPromos.PROMO_IVNITED_TO_SPACE]: isPromoInvitedToSpace,\n [MigrationModalPromos.PROMO_SIGN_IN_CLOUD]: isPromoSignIn,\n [MigrationModalPromos.PROMO_SIGN_UP_CLOUD]: isPromoSignUp,\n}\n\nconst useMigrationModal = ({\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n nodeLiveness,\n}: PromoProps) => {\n const [userSavedPreference, setUserPrefrence] = useLocalStorage(\n \"USER_SAVED_PREFERENCE\"\n )\n\n const migrationModalPromo = useMemo(() => {\n return Object.keys(modalStatusWithPromoFunctions).find(modalStatus => {\n return modalStatusWithPromoFunctions[modalStatus]({\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n userSavedPreference,\n nodeLiveness,\n })\n }) as MigrationModalPromos\n }, [userStatus, nodeClaimedStatus, userNodeAccess, nodeLiveness, userSavedPreference])\n\n return {\n migrationModalPromoInfo: migrationmodalInfo[migrationModalPromo],\n migrationModalPromo,\n setUserPrefrence,\n userSavedPreference,\n }\n}\n\nexport default useMigrationModal\n","/* eslint-disable comma-dangle */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useRef, useContext, useLayoutEffect, useState, memo, useMemo } from \"react\"\nimport { throttle } from \"throttle-debounce\"\nimport { ChartContainer } from \"domains/chart/components/chart-container\"\nimport { ThemeContext } from \"styled-components\"\nimport { Flex, getColor } from \"@netdata/netdata-ui\"\nimport ChartOverview from \"./chartOverview\"\n\nconst Chart = ({ groupLabel, postGroupLabel, id, attributes, relatedIndex }) => {\n const theme = useContext(ThemeContext)\n const chartContainerRef = useRef()\n const [displayedIndex, setDisplayedIndex] = useState()\n const setDisplayedIndexThrottled = useMemo(() => throttle(400, setDisplayedIndex), [])\n const [, repaint] = useState()\n\n useLayoutEffect(() => {\n repaint(true)\n }, [])\n\n const { chartMetadata, attributes: relatedChartAttributes } = attributes.relatedCharts[\n relatedIndex\n ]\n\n const chartAttributes = useMemo(\n () => ({\n id: chartMetadata.id,\n\n width: \"100%\",\n height: \"60px\",\n\n chartLibrary: \"sparkline\",\n sparklineLineWidth: \"2px\",\n sparklineLineColor: getColor(\"border\")({ theme }),\n sparklineFillColor: getColor(\"disabled\")({ theme }),\n sparklineSpotRadius: 0,\n sparklineDisableTooltips: true,\n sparklineOnHover: (event) => setDisplayedIndexThrottled(event?.x),\n\n httpMethod: \"POST\",\n host: attributes.host,\n nodeIDs: attributes.nodeIDs,\n dimensions: relatedChartAttributes.dimensions,\n aggrMethod: relatedChartAttributes.aggrMethod,\n\n labels: {\n k8s_cluster_id: [chartMetadata.chartLabels.k8s_cluster_id[0]],\n [attributes.groupBy]: [groupLabel],\n ...(postGroupLabel && { [attributes.postGroupBy]: [postGroupLabel] }),\n },\n }),\n [chartMetadata, attributes]\n )\n\n return (\n \n \n {chartContainerRef.current && (\n \n )}\n \n \n \n )\n}\n\nexport default memo(Chart)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport styled from \"styled-components\"\nimport { Text, Flex, Icon } from \"@netdata/netdata-ui\"\n\nconst ExternalButton = styled(Icon).attrs({\n margin: [0, 0, 0, \"auto\"],\n color: \"bright\",\n width: \"10px\",\n height: \"10px\",\n alignSelf: \"center\",\n name: \"nav_arrow_goto\",\n role: \"button\",\n title: \"Go to node\",\n \"data-testid\": \"k8sPopoverItem-externalButton\",\n})`\n cursor: pointer;\n`\n\nconst Item = ({ icon, title, secondary, onClick }) => (\n \n \n \n \n \n {title}\n \n {secondary && (\n \n {secondary}\n \n )}\n {onClick && }\n \n)\n\nexport default Item\n","/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { useDateTime } from \"utils/date-time\"\nimport Item from \"./item\"\nimport Section from \"./section\"\n\nconst DateItem = ({ date, title }) => {\n const { localeDateString, localeTimeString } = useDateTime()\n\n return (\n \n )\n}\n\nconst DateSection = ({ before, after }) => (\n
\n \n \n
\n)\n\nexport default DateSection\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport Section from \"./section\"\nimport Chart from \"./chart\"\nimport DateSection from \"./dateSection\"\n\nconst Metrics = ({ groupLabel, postGroupLabel, attributes, viewAfter, viewBefore }) => (\n \n \n
\n \n {attributes.relatedCharts.map(({ chartMetadata }, index) => (\n \n ))}\n \n
\n
\n)\n\nexport default Metrics\n","/* eslint-disable no-param-reassign */\n/* eslint-disable comma-dangle */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { memo } from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport Item from \"./item\"\nimport Section from \"./section\"\nimport getLabel, { labelIds } from \"./getLabel\"\n\nconst LabelsSection = ({ labelId, items, onExpand, onItemClick, ...rest }) => {\n const { title, icon } = getLabel(labelId)\n const sliced = items.slice(0, 3)\n const expandable = items.length > 3\n\n const text = expandable ? `${title} (${items.length})` : title\n return (\n
\n {sliced.map((item) => (\n onItemClick(item))}\n />\n ))}\n
\n )\n}\n\nconst getLabelIds = (chartLabels) => {\n chartLabels = { ...chartLabels }\n const predefinedLabelIds = labelIds.reduce((acc, labelId) => {\n if (!(labelId in chartLabels)) return acc\n\n delete chartLabels[labelId]\n return [...acc, labelId]\n }, [])\n\n return [...predefinedLabelIds, ...Object.keys(chartLabels)]\n}\n\nconst Context = ({ chartLabels, onExpand, onNodeClick }) => {\n const ids = getLabelIds(chartLabels)\n\n return (\n \n {ids.map((id, index) => (\n onExpand(id)}\n noBorder={index === ids.length - 1}\n onItemClick={id === \"k8s_node_name\" && onNodeClick}\n />\n ))}\n \n )\n}\n\nexport default memo(Context)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, makeFlex } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\nimport Separator from \"./separator\"\nimport Header from \"./header\"\nimport Item from \"./item\"\nimport getLabel from \"./getLabel\"\n\nconst StyledButton = styled(makeFlex(Button)).attrs({\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n alignItems: \"start\",\n gap: 1,\n})`\n &&& {\n padding: 0;\n margin: 0;\n height: initial;\n width: initial;\n\n svg {\n height: 18px;\n width: 18px;\n position: initial;\n }\n }\n`\n\nconst List = ({ labelId, items, onBack, onItemClick }) => {\n const { title, icon } = getLabel(labelId)\n\n return (\n \n
\n \n
\n \n \n {items.map((item) => (\n onItemClick(item))}\n />\n ))}\n
\n \n )\n}\n\nexport default List\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useState } from \"react\"\nimport { Flex, DropContainer } from \"@netdata/netdata-ui\"\nimport Separator from \"./separator\"\nimport Header from \"./header\"\nimport Tabs from \"./tabs\"\nimport Metrics from \"./metrics\"\nimport Context from \"./context\"\nimport List from \"./list\"\n\nconst Container = (props) => (\n \n)\n\nconst TabsContainer = ({ label, value, onChange, children }) => (\n \n
{label}
\n \n \n \n {children}\n \n
\n)\n\nconst Popover = ({\n title,\n groupLabel,\n postGroupLabel,\n chartLabels,\n attributes,\n viewBefore,\n viewAfter,\n ...rest\n}) => {\n const [view, setView] = useState(\"context\")\n\n const isLabelView = view !== \"context\" && view !== \"metrics\"\n\n const { onNodeClick } = attributes\n\n return (\n \n {isLabelView && (\n setView(\"context\")}\n onItemClick={view === \"k8s_node_name\" && onNodeClick}\n />\n )}\n {!isLabelView && (\n \n {view === \"context\" && (\n \n )}\n {view === \"metrics\" && (\n \n )}\n \n )}\n \n )\n}\n\nexport default Popover\n","/* eslint-disable arrow-body-style */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable comma-dangle */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useMemo } from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { ChartMetadata } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes.ts\"\nimport { ChartTimeframe } from \"domains/chart/components/chart-legend-bottom\"\nimport GroupBoxes from \"domains/chart/components/lib-charts/group-box-chart/groupBoxes\"\nimport Legend from \"domains/chart/components/lib-charts/group-box-chart/legend\"\nimport getLabel from \"./getLabel\"\nimport transform from \"./transform\"\nimport Popover from \"./popover\"\n\ninterface Props {\n chartData: any\n chartMetadata: ChartMetadata\n attributes: Attributes\n viewAfter: number\n viewBefore: number\n hoveredRow: number\n hoveredX: number | null\n showUndefined: boolean\n}\n\nconst Kubernetes = ({\n chartData,\n chartMetadata,\n attributes,\n viewAfter,\n viewBefore,\n hoveredRow,\n hoveredX,\n showUndefined,\n}: Props) => {\n const { filteredRows } = attributes\n const { data: groupBoxData, labels, chartLabels } = useMemo(\n () => transform(chartData, filteredRows),\n [filteredRows, chartData]\n )\n\n const {\n id,\n result: { data },\n groupBy,\n postGroupBy,\n } = chartData\n\n const renderBoxPopover = ({ groupIndex, index, align }) => {\n const label = groupBoxData[groupIndex].labels[index]\n const { title } = getLabel(postGroupBy)\n\n return (\n \n )\n }\n\n const renderGroupPopover = ({ groupIndex, align }) => {\n const label = labels[groupIndex]\n const { title } = getLabel(groupBy)\n\n return (\n \n )\n }\n\n const groupedBoxesData = useMemo(() => {\n return groupBoxData.map((groupedBox) => {\n return {\n labels: groupedBox.labels,\n data:\n hoveredRow === -1 || hoveredRow > data.length || !(hoveredRow in data)\n ? groupedBox.postAggregated\n : groupedBox.indexes.map((index) => data[hoveredRow][index + 1]) || [],\n }\n })\n }, [data, groupBoxData, hoveredRow])\n\n return (\n \n \n \n {id}\n \n \n \n )\n}\n\nexport default Kubernetes\n","/* eslint-disable arrow-body-style */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable comma-dangle */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\n\nexport default (chartData, filteredRows) => {\n const { keys, labels: labelValues, groupBy, postGroupBy, aggrGroups, postAggregated } = chartData\n const groupValues = keys[groupBy]\n const postGroupValues = keys[postGroupBy]\n const indexes = filteredRows || [...Array(groupValues.length)].map((v, index) => index)\n\n const postGroupData = indexes.reduce((acc: any, index: number) => {\n const groupValue = groupValues[index]\n if (!(groupValue in acc)) {\n acc[groupValue] = {\n labels: [],\n indexes: [],\n chartLabels: [],\n postAggregated: [],\n }\n }\n const boxes = acc[groupValue]\n boxes.indexes.push(index)\n boxes.labels.push(postGroupValues[index])\n boxes.postAggregated.push(postAggregated[index])\n\n const chartLabels = aggrGroups.reduce((labelsAcc, label) => {\n return labelValues[label][index]\n ? { ...labelsAcc, [label]: labelValues[label][index] }\n : labelsAcc\n }, {})\n boxes.chartLabels.push(chartLabels)\n return acc\n }, {})\n\n const labels = Object.keys(postGroupData).sort(\n (a, b) => postGroupData[b].indexes.length - postGroupData[a].indexes.length\n )\n\n const groupData = labels.map((label) => postGroupData[label])\n\n const groupChartLabels = groupData.map((boxes) => {\n return aggrGroups.reduce((acc, label) => {\n const groupLabels = new Set(\n boxes.chartLabels.reduce((accChartLabels, chartLabels) => {\n return chartLabels[label] ? [...accChartLabels, ...chartLabels[label]] : accChartLabels\n }, [])\n )\n return groupLabels.size === 0 ? acc : { ...acc, [label]: Array.from(groupLabels) }\n }, {})\n })\n\n return { labels, data: groupData, chartLabels: groupChartLabels }\n}\n","import React, { useCallback } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { setGlobalChartUnderlayAction, setGlobalPanAndZoomAction } from \"domains/global/actions\"\nimport { selectSyncPanAndZoom } from \"domains/global/selectors\"\nimport { setChartPanAndZoomAction } from \"domains/chart/actions\"\nimport { useShowValueOutside } from \"hooks/use-show-value-outside\"\n\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport {\n ChartData, ChartMetadata, DygraphData, EasyPieChartData, D3pieChartData,\n} from \"../chart-types\"\nimport { chartLibrariesSettings, ChartLibraryName } from \"../utils/chartLibrariesSettings\"\n\nimport { DygraphChart } from \"./lib-charts/dygraph-chart\"\nimport { EasyPieChart } from \"./lib-charts/easy-pie-chart\"\nimport { GaugeChart } from \"./lib-charts/gauge-chart\"\nimport { SparklineChart } from \"./lib-charts/sparkline-chart\"\nimport { D3pieChart } from \"./lib-charts/d3pie-chart\"\nimport { PeityChart } from \"./lib-charts/peity-chart\"\nimport { GoogleChart } from \"./lib-charts/google-chart\"\nimport { TextOnly } from \"./lib-charts/text-only\"\nimport { KubernetesGroupBoxes } from \"./lib-charts/group-box-chart\"\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: ChartData\n chartMetadata: ChartMetadata\n chartLibrary: ChartLibraryName\n colors: {\n [key: string]: string\n }\n chartUuid: string\n chartHeight: number\n chartWidth: number\n dimensionsVisibility: boolean[]\n hasEmptyData: boolean\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n orderedColors: string[]\n hoveredX: number | null\n onUpdateChartPanAndZoom: (arg: { after: number, before: number, masterID: string }) => void\n immediatelyDispatchPanAndZoom: () => void\n\n hoveredRow: number\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: [number, number]) => void\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewAfterForCurrentData: number,\n viewBeforeForCurrentData: number,\n}\n\nexport const AbstractChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartLibrary,\n colors,\n chartUuid,\n chartHeight,\n chartWidth,\n dimensionsVisibility,\n hasEmptyData,\n isRemotelyControlled,\n legendFormatValue,\n orderedColors,\n hoveredRow,\n hoveredX,\n onUpdateChartPanAndZoom,\n immediatelyDispatchPanAndZoom,\n setHoveredX,\n setMinMax,\n showLatestOnBlur,\n unitsCurrent,\n viewAfterForCurrentData,\n viewBeforeForCurrentData,\n}: Props) => {\n const dispatch = useDispatch()\n\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n const setGlobalChartUnderlay = useCallback(({ after, before, masterID }) => {\n dispatch(setGlobalChartUnderlayAction({ after, before, masterID }))\n\n // freeze charts\n // don't send masterID, so no padding is applied\n if (isSyncPanAndZoom) {\n dispatch(setGlobalPanAndZoomAction({\n after: viewAfterForCurrentData,\n before: viewBeforeForCurrentData,\n }))\n } else {\n dispatch(setChartPanAndZoomAction({\n after: viewAfterForCurrentData,\n before: viewBeforeForCurrentData,\n id: chartUuid,\n }))\n }\n }, [chartUuid, dispatch, isSyncPanAndZoom, viewAfterForCurrentData, viewBeforeForCurrentData])\n\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const { hasLegend } = chartSettings\n const chartElementClassName = hasLegend(attributes)\n ? classNames(\n `netdata-chart-with-legend-${attributes.legendPosition || \"right\"}`,\n `netdata-${chartLibrary}-chart-with-legend-right`,\n )\n : classNames(\n \"netdata-chart\",\n `netdata-${chartLibrary}-chart`,\n )\n const chartElementId = `${chartLibrary}-${chartUuid}-chart`\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n\n useShowValueOutside({\n attributes, chartData, chartSettings, hoveredRow, legendFormatValue, showUndefined,\n })\n\n if (chartLibrary === \"easypiechart\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"gauge\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"sparkline\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"d3pie\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"peity\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"google\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"textonly\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"groupbox\") {\n return (\n \n )\n }\n\n return (\n \n )\n}\n","import { useEffect, useRef } from \"react\"\nimport { isEmpty } from \"ramda\"\nimport { useMount } from \"react-use\"\n\nimport { ChartData, DygraphData } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartLibraryConfig } from \"domains/chart/utils/chartLibrariesSettings\"\n\n\ninterface UseShowValueOutsideArgument {\n attributes: Attributes\n chartData: ChartData\n chartSettings: ChartLibraryConfig\n hoveredRow: number\n legendFormatValue: ((v: number | string | null) => number | string)\n showUndefined: boolean\n}\n\n// example of the attribute:\n// show-value-of-iowait-at: \"system.cpu.iowait.1\"\n\nexport const useShowValueOutside = ({\n attributes,\n chartData,\n chartSettings,\n hoveredRow,\n legendFormatValue,\n showUndefined,\n}: UseShowValueOutsideArgument) => {\n // a ref to store found elements, just once per lifetime of component\n const showValueAttributesNodes = useRef<(HTMLElement | null)[]>([])\n\n // find the nodes that will have populated values\n useMount(() => {\n const { showValueOf } = attributes\n // showValueOf will be undefined if not used, but additional isEmpty check can prevent\n // regression performance issue in the future\n if (!showValueOf || isEmpty(showValueOf)) {\n return\n }\n const dimensionNames = chartData.dimension_names\n const dimensionIds = chartData.dimension_ids\n dimensionNames.forEach((dimensionName, i) => {\n const userElementId = showValueOf[`show-value-of-${dimensionName.toLowerCase()}`]\n || showValueOf[`show-value-of-${dimensionIds[i].toLowerCase()}-at`]\n\n // if element is not found, just add null\n showValueAttributesNodes.current = showValueAttributesNodes.current.concat(\n document.getElementById(userElementId),\n )\n })\n })\n\n useEffect(() => {\n if (showValueAttributesNodes.current.length) {\n const chartSettingCallOptions = chartSettings.options(attributes)\n const isFlipped = chartSettingCallOptions.includes(\"flip\")\n\n // \"objectrows\" is for d3pie, which has different data format\n if (chartData.format === \"json\" && !chartSettingCallOptions.includes(\"objectrows\")) {\n const { data } = (chartData as DygraphData).result\n const valueIndex = hoveredRow === -1\n ? (data.length - 1)\n : (hoveredRow) // because data for easy-pie-chart are flipped\n\n // yes, \"flipped\" value means chronological order (from oldest to newest) :)\n const rowIndex = isFlipped ? valueIndex : (data.length - valueIndex - 1)\n const row = data[rowIndex]\n\n chartData.dimension_names.forEach((dimensionName, dimensionIndex) => {\n const value = (showUndefined || !row)\n ? \"\"\n : legendFormatValue(row[dimensionIndex + 1])\n const element = showValueAttributesNodes.current[dimensionIndex]\n if (element) {\n element.innerText = `${value}`\n }\n })\n }\n }\n }, [attributes, chartData, chartSettings, hoveredRow, legendFormatValue, showUndefined])\n}\n","import { __, prop } from \"ramda\"\nimport React, { useEffect, useState, useCallback, useMemo, memo, useContext } from \"react\"\nimport { ThemeContext } from \"styled-components\"\nimport { useDebouncedCallback } from \"use-debounce\"\n\nimport {\n requestCommonColorsAction,\n setDefaultAfterAction,\n setGlobalPanAndZoomAction,\n setGlobalSelectionAction,\n} from \"domains/global/actions\"\nimport {\n createSelectAssignedColors,\n selectGlobalSelection,\n selectSyncPanAndZoom,\n selectSyncSelection,\n selectUnitsScalingMethod,\n} from \"domains/global/selectors\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { TimeRange } from \"types/common\"\nimport { MS_IN_SECOND, isTimestamp } from \"utils/utils\"\n\nimport { setChartPanAndZoomAction } from \"domains/chart/actions\"\n\nimport { getPanAndZoomStep } from \"../utils/get-pan-and-zoom-step\"\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"../utils/chartLibrariesSettings\"\nimport { useFormatters } from \"../utils/formatters\"\nimport { ChartData, ChartMetadata } from \"../chart-types\"\n\nimport { ChartLegend } from \"./chart-legend\"\nimport { LegendToolbox } from \"./legend-toolbox\"\nimport { ResizeHandler } from \"./resize-handler\"\nimport { AbstractChart } from \"./abstract-chart\"\n\ninterface GlobalPanAndZoomState {\n after: number // timestamp in ms\n before: number // timestamp in ms\n masterID?: string\n shouldForceTimeRange?: boolean\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: ChartData\n chartMetadata: ChartMetadata\n chartHeight: number\n chartUuid: string\n chartWidth: number\n defaultAfter: number\n globalPanAndZoom: null | GlobalPanAndZoomState\n hasEmptyData: boolean\n isRemotelyControlled: boolean\n viewRangeForCurrentData: TimeRange\n viewRange: TimeRange\n selectedDimensions: string[]\n setSelectedDimensions: (newState: string[]) => void\n showLatestOnBlur: boolean\n}\n\nexport const Chart = memo(\n ({\n attributes,\n attributes: { chartLibrary },\n chartContainerElement,\n chartData,\n chartMetadata,\n chartHeight,\n chartUuid,\n chartWidth,\n defaultAfter,\n globalPanAndZoom,\n hasEmptyData,\n isRemotelyControlled,\n viewRangeForCurrentData,\n viewRange,\n selectedDimensions,\n setSelectedDimensions,\n showLatestOnBlur,\n }: Props) => {\n const themeContext = useContext(ThemeContext)\n const unitsScalingMethod = useSelector(selectUnitsScalingMethod)\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const { hasLegend } = chartSettings\n const {\n units = chartMetadata.units,\n unitsCommon,\n unitsDesired = unitsScalingMethod,\n } = attributes\n\n // we need to have empty selectedDimensions work as {all enabled}, in case\n // new dimensions show up (when all are enabled, the new dimensions should also auto-enable)\n const dimensionsVisibility = useMemo(\n () =>\n chartData.dimension_names.map(dimensionName =>\n selectedDimensions.length === 0 ? true : selectedDimensions.includes(dimensionName)\n ),\n [chartData.dimension_names, selectedDimensions]\n )\n\n const shouldDisplayToolbox =\n hasLegend(attributes) && window.NETDATA.options.current.legend_toolbox\n\n const shouldDisplayResizeHandler =\n shouldDisplayToolbox &&\n window.NETDATA.options.current.resize_charts && // legacy way of turning off for print mode\n !attributes.hideResizeHandler\n\n const dispatch = useDispatch()\n const allDimensionNames = useMemo(() => {\n // metadata and chartData dimensions match each other, but we need to first parse\n // dimensions from metadata, to keep the same order (when browser parsers dimensions object,\n // it sorts them in *some* way which is hard to reproduce). And people can get used to colors\n // so let's keep them as they were before\n const dimensionNamesFromMetadata = Object.values(chartMetadata.dimensions).map(x => x.name)\n const additionalDimensionNamesFromData = chartData.dimension_names.filter(\n x => !dimensionNamesFromMetadata.includes(x)\n )\n return dimensionNamesFromMetadata.concat(additionalDimensionNamesFromData)\n }, [chartData.dimension_names, chartMetadata.dimensions])\n useEffect(() => {\n dispatch(\n requestCommonColorsAction({\n chartContext: chartMetadata.context,\n chartUuid,\n colorsAttribute: attributes.colors,\n commonColorsAttribute: attributes.commonColors,\n dimensionNames: allDimensionNames,\n })\n )\n }, [\n allDimensionNames,\n attributes.colors,\n attributes.commonColors,\n chartMetadata.context,\n chartUuid,\n dispatch,\n ])\n\n const { legendFormatValue, legendFormatValueDecimalsFromMinMax, unitsCurrent } = useFormatters({\n attributes,\n data: chartData,\n units,\n unitsCommon,\n unitsDesired,\n uuid: chartUuid,\n })\n\n const [localHoveredX, setLocalHoveredX] = useState(null)\n\n const isSyncSelection = useSelector(selectSyncSelection)\n const handleSetHoveredX = useCallback(\n (newHoveredX, noMaster) => {\n if (isSyncSelection) {\n const action = noMaster\n ? { chartUuid: null, hoveredX: newHoveredX }\n : { chartUuid, hoveredX: newHoveredX }\n dispatch(setGlobalSelectionAction(action))\n } else {\n setLocalHoveredX(newHoveredX)\n }\n },\n [chartUuid, dispatch, isSyncSelection]\n )\n const globalHoveredX = useSelector(selectGlobalSelection)\n const hoveredX = isSyncSelection ? globalHoveredX : localHoveredX\n\n // time-frames for requested data (even when request is pending)\n const viewAfter = isTimestamp(viewRange[0]) ? viewRange[0] : chartData.after * MS_IN_SECOND\n const viewBefore = isTimestamp(viewRange[1]) ? viewRange[1] : chartData.before * MS_IN_SECOND\n\n const viewAfterForCurrentData = isTimestamp(viewRangeForCurrentData[0])\n ? viewRangeForCurrentData[0]\n : chartData.after * MS_IN_SECOND\n const viewBeforeForCurrentData = isTimestamp(viewRangeForCurrentData[1])\n ? viewRangeForCurrentData[1]\n : chartData.before * MS_IN_SECOND // when 'before' is 0 or negative\n\n const netdataFirst = chartData.first_entry * MS_IN_SECOND\n const netdataLast = chartData.last_entry * MS_IN_SECOND\n\n // old dashboard persists min duration based on first chartWidth, i assume it's a bug\n // and will update fixedMinDuration when width changes\n const fixedMinDuration = useMemo(\n () => Math.round((chartWidth / 30) * chartMetadata.update_every * MS_IN_SECOND),\n [chartMetadata.update_every, chartWidth]\n )\n\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n\n const setGlobalPanAndZoomDebounced = useDebouncedCallback(\n newGlobalPanAndZoom => {\n dispatch(setGlobalPanAndZoomAction(newGlobalPanAndZoom))\n },\n 400 // corresponds to global_pan_sync_time in old dashboard\n )\n\n const immediatelyDispatchPanAndZoom = useCallback(() => {\n setGlobalPanAndZoomDebounced.flush()\n }, [setGlobalPanAndZoomDebounced])\n\n /**\n * pan-and-zoom handler (both for toolbox and mouse events)\n */\n const handleUpdateChartPanAndZoom = useCallback(\n ({\n after,\n before,\n callback,\n shouldFlushImmediately = false,\n shouldForceTimeRange,\n shouldNotExceedAvailableRange,\n }) => {\n if (before < after) {\n return\n }\n let minDuration = fixedMinDuration\n\n const currentDuraton = Math.round(viewBefore - viewAfter)\n\n let afterForced = Math.round(after)\n let beforeForced = Math.round(before)\n const viewUpdateEvery = chartData.view_update_every * MS_IN_SECOND\n\n if (shouldNotExceedAvailableRange) {\n const first = netdataFirst + viewUpdateEvery\n const last = netdataLast + viewUpdateEvery\n // first check \"before\"\n if (beforeForced > last) {\n afterForced -= before - last\n beforeForced = last\n }\n\n if (afterForced < first) {\n afterForced = first\n }\n }\n\n // align them to update_every\n // stretching them further away\n afterForced -= afterForced % viewUpdateEvery\n beforeForced += viewUpdateEvery - (beforeForced % viewUpdateEvery)\n\n // the final wanted duration\n let wantedDuration = beforeForced - afterForced\n\n // to allow panning, accept just a point below our minimum\n if (currentDuraton - viewUpdateEvery < minDuration) {\n minDuration = currentDuraton - viewUpdateEvery\n }\n\n // we do it, but we adjust to minimum size and return false\n // when the wanted size is below the current and the minimum\n // and we zoom\n let doCallback = true\n if (wantedDuration < currentDuraton && wantedDuration < minDuration) {\n minDuration = fixedMinDuration\n\n const dt = (minDuration - wantedDuration) / 2\n beforeForced += dt\n afterForced -= dt\n wantedDuration = beforeForced - afterForced\n doCallback = false\n }\n\n const tolerance = viewUpdateEvery * 2\n const movement = Math.abs(beforeForced - viewBefore)\n\n if (\n Math.abs(currentDuraton - wantedDuration) <= tolerance &&\n movement <= tolerance &&\n doCallback\n ) {\n return\n }\n\n if (isSyncPanAndZoom) {\n setGlobalPanAndZoomDebounced.callback({\n after: afterForced,\n before: beforeForced,\n masterID: chartUuid,\n shouldForceTimeRange,\n })\n if (shouldFlushImmediately) {\n setGlobalPanAndZoomDebounced.flush()\n }\n } else {\n dispatch(\n setChartPanAndZoomAction({\n after: afterForced,\n before: beforeForced,\n id: chartUuid,\n shouldForceTimeRange,\n })\n )\n }\n\n if (doCallback && typeof callback === \"function\") {\n callback(afterForced, beforeForced)\n }\n },\n [\n chartData.view_update_every,\n chartUuid,\n dispatch,\n fixedMinDuration,\n isSyncPanAndZoom,\n netdataFirst,\n netdataLast,\n setGlobalPanAndZoomDebounced,\n viewAfter,\n viewBefore,\n ]\n )\n\n /**\n * toolbox handlers\n */\n const handleToolBoxPanAndZoom = useCallback(\n (after: number, before: number) => {\n const newAfter = Math.max(after, netdataFirst)\n const newBefore = Math.min(before, netdataLast)\n handleUpdateChartPanAndZoom({\n after: newAfter,\n before: newBefore,\n shouldForceTimeRange: true,\n shouldFlushImmediately: true,\n })\n },\n [handleUpdateChartPanAndZoom, netdataFirst, netdataLast]\n )\n\n const handleToolboxLeftClick = useCallback(\n (event: React.MouseEvent) => {\n const step = (viewBefore - viewAfter) * getPanAndZoomStep(event)\n const newBefore = viewBefore - step\n const newAfter = viewAfter - step\n if (newAfter >= netdataFirst) {\n handleToolBoxPanAndZoom(newAfter, newBefore)\n }\n },\n [handleToolBoxPanAndZoom, netdataFirst, viewAfter, viewBefore]\n )\n\n const handleToolboxRightClick = useCallback(\n (event: React.MouseEvent) => {\n const timeWindow = viewBefore - viewAfter\n const step = timeWindow * getPanAndZoomStep(event)\n const newBefore = Math.min(viewBefore + step, netdataLast)\n const newAfter = newBefore - timeWindow\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [handleToolBoxPanAndZoom, netdataLast, viewAfter, viewBefore]\n )\n\n const handleToolboxZoomInClick = useCallback(\n (event: React.MouseEvent) => {\n const panAndZoomStep = getPanAndZoomStep(event) * 0.8\n if (!globalPanAndZoom) {\n dispatch(\n setDefaultAfterAction({\n after: Math.round(defaultAfter / (panAndZoomStep + 1)),\n })\n )\n return\n }\n // if visible time range is much bigger than available time range in history, first zoom-in\n // should just fit to available range\n if (viewBefore - viewAfter > (netdataLast - netdataFirst) * 1.2) {\n handleToolBoxPanAndZoom(netdataFirst, netdataLast)\n return\n }\n const dt = ((viewBefore - viewAfter) * panAndZoomStep) / 2\n const newAfter = viewAfter + dt\n const newBefore = viewBefore - dt\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [\n defaultAfter,\n dispatch,\n globalPanAndZoom,\n handleToolBoxPanAndZoom,\n netdataFirst,\n netdataLast,\n viewAfter,\n viewBefore,\n ]\n )\n\n const handleToolboxZoomOutClick = useCallback(\n (event: React.MouseEvent) => {\n const panAndZoomStep = getPanAndZoomStep(event) * 0.8\n if (!globalPanAndZoom) {\n dispatch(\n setDefaultAfterAction({\n after: Math.round(defaultAfter * (panAndZoomStep + 1)),\n })\n )\n return\n }\n const dt =\n ((viewBefore - viewAfter) / (1.0 - panAndZoomStep * 0.8) - (viewBefore - viewAfter)) / 2\n const newAfter = viewAfter - dt\n const newBefore = viewBefore + dt\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [defaultAfter, dispatch, globalPanAndZoom, handleToolBoxPanAndZoom, viewAfter, viewBefore]\n )\n\n /**\n * assign colors\n */\n const selectAssignedColors = useMemo(\n () =>\n createSelectAssignedColors({\n chartContext: chartMetadata.context,\n chartUuid,\n colorsAttribute: attributes.colors,\n commonColorsAttribute: attributes.commonColors,\n }),\n [attributes.colors, attributes.commonColors, chartMetadata, chartUuid]\n )\n const colors = useSelector(selectAssignedColors)\n const orderedColors = useMemo(\n () => chartData.dimension_names.map(prop(__, colors)),\n [chartData, colors]\n )\n\n if (!colors) {\n return // wait for createSelectAssignedColors reducer result to come back\n }\n\n const isTimeVisible = hoveredX && hoveredX >= viewAfter && hoveredX <= viewBefore\n const viewUpdateEvery = chartData.view_update_every * MS_IN_SECOND\n const hoveredRow = isTimeVisible\n ? Math.floor(((hoveredX as number) - chartData.after * MS_IN_SECOND) / viewUpdateEvery)\n : -1\n\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n const legendToolbox = (\n \n )\n\n const resizeHandler = shouldDisplayResizeHandler && (\n \n )\n\n return (\n <>\n legendFormatValueDecimalsFromMinMax(min, max)}\n showLatestOnBlur={showLatestOnBlur}\n unitsCurrent={unitsCurrent}\n viewAfterForCurrentData={viewAfterForCurrentData}\n viewBeforeForCurrentData={viewBeforeForCurrentData}\n />\n {hasLegend(attributes) && (\n \n )}\n {shouldDisplayToolbox && !isLegendOnBottom && legendToolbox}\n {!isLegendOnBottom && resizeHandler}\n \n )\n }\n)\n","import styled from \"styled-components\"\nimport { getColor, Icon } from \"@netdata/netdata-ui\"\n\nexport const DropdownItem = styled.div`\n display: flex;\n flex-direction: start;\n align-items: center;\n color: ${getColor([\"neutral\", \"limedSpruce\"])};\n white-space: nowrap;\n & > svg use {\n fill: ${getColor([\"neutral\", \"limedSpruce\"])};\n }\n`\n\nexport const DropdownItemLabel = styled.span`\n margin-left: 12px;\n`\n\nexport const DotsBtn = styled(Icon)`\n width: 6px;\n height: 10px;\n cursor: pointer;\n & use {\n fill: ${getColor([\"neutral\", \"limedSpruce\"])};\n & :hover {\n fill: ${getColor([\"neutral\", \"regentgrey\"])};\n }\n }\n`\n","import React, { useState, ReactNode } from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata } from \"domains/chart/chart-types\"\n\nimport { List, SimpleListItem } from \"@rmwc/list\"\nimport { MenuSurface, MenuSurfaceAnchor } from \"@rmwc/menu\"\n\nimport * as S from \"./styled\"\n\ninterface DropdownMenuCallbackProps {\n attributes: Attributes,\n chartMetadata: ChartMetadata,\n chartID: string,\n}\n\nexport type DropdownMenu = {\n icon: ReactNode,\n label: string,\n onClick: (dropdownMenuCallbackProps: DropdownMenuCallbackProps) => void,\n}[]\n\ninterface Props {\n attributes: Attributes\n chartID: string\n chartMetadata: ChartMetadata\n dropdownMenu: DropdownMenu\n}\n\nexport const ChartDropdown = ({\n attributes,\n chartID,\n chartMetadata,\n dropdownMenu,\n}: Props) => {\n const [isOpen, setIsOpen] = useState(false)\n\n const handleClose = () => {\n setIsOpen(false)\n }\n\n return (\n <>\n {\n setIsOpen(true)\n }}\n />\n \n \n \n {dropdownMenu.map(({ icon, label, onClick }) => (\n \n {icon}\n \n {label}\n \n \n )}\n onClick={() => {\n onClick({ attributes, chartMetadata, chartID })\n handleClose()\n }}\n />\n ))}\n \n \n \n \n )\n}\n","import { prop } from \"ramda\"\nimport styled, { keyframes } from \"styled-components\"\n\nimport { getColor } from \"@netdata/netdata-ui\"\n\nconst circleAnimation = keyframes`\n 0% {\n opacity: .1;\n }\n 50% {\n opacity: .5;\n }\n 100% {\n opacity: .1;\n }\n`\n\n\nexport const SpinnerContainer = styled.div<{ top: number, right: number }>`\n position: absolute;\n top: ${prop(\"top\")}px;\n right: ${prop(\"right\")}px;\n display: flex;\n`\n\nexport const Circle = styled.div<{ size: number }>`\n width: ${prop(\"size\")}px;\n height: ${prop(\"size\")}px;\n background: ${getColor(\"border\")};\n border-radius: 50%;\n animation: 1s linear infinite both ${circleAnimation};\n`\n\nexport const Circle2 = styled(Circle)<{ spaceBetween: number }>`\n animation-delay: .3s; \n margin-left: ${prop(\"spaceBetween\")}px;\n`\n\nexport const Circle3 = styled(Circle)<{ spaceBetween: number }>`\n animation-delay: .6s; \n margin-left: ${prop(\"spaceBetween\")}px;\n`\n","import React from \"react\"\n\nimport * as S from \"./styled\"\n\ninterface Props {\n chartLibrary: string\n}\nexport const ChartSpinner = ({\n chartLibrary,\n}: Props) => {\n const top = chartLibrary === \"dygraph\" ? 33 : 0\n const right = chartLibrary === \"dygraph\" ? 8 : 0\n const size = chartLibrary === \"dygraph\" ? 10 : 7\n const spaceBetween = chartLibrary === \"dygraph\" ? 4 : 2\n return (\n \n \n \n \n \n )\n}\n","import styled from \"styled-components\"\n\nimport { chartDropdownZIndex } from \"styles/z-index\"\n\nexport const ChartDropdownContainer = styled.div`\n position: absolute;\n top: 0;\n left: 40px;\n width: 20px;\n height: 20px;\n z-index: ${chartDropdownZIndex};\n`\n","export const notificationsZIndex = \"z-index: 50;\"\n\nexport const chartDropdownZIndex = 10\n\nexport const spacesBarZIndex = 8\n\nexport const spacePanelZIndex = 6\n\nexport const appHeaderZIndex = 5\n\n// the same as in cloud\nexport const portalSidebarZIndex = \"z-index: 35;\"\nexport const customDropdownZIndex = \"z-index: 45;\"\nexport const dialogsZIndex = 60\n","import { cond, always, T } from \"ramda\"\nimport axios from \"axios\"\nimport React, { useEffect, useState, useMemo, useLayoutEffect } from \"react\"\nimport { useThrottle, useUpdateEffect, useUnmount, useDebounce } from \"react-use\"\n\nimport { AppStateT } from \"store/app-state\"\nimport { useSelector, useDispatch } from \"store/redux-separate-context\"\n\nimport {\n selectGlobalPanAndZoom,\n selectGlobalSelection,\n selectShouldEliminateZeroDimensions,\n selectPanAndZoomDataPadding,\n selectSnapshot,\n selectSpacePanelTransitionEndIsActive,\n selectDefaultAfter,\n} from \"domains/global/selectors\"\nimport { serverDefault } from \"utils/server-detection\"\nimport { CHART_UNMOUNTED } from \"utils/netdata-sdk\"\nimport { getCorrectedPoints } from \"utils/fill-missing-data\"\n\nimport { fallbackUpdateTimeInterval, panAndZoomDelay } from \"../../constants\"\nimport { getChartURLOptions } from \"../../utils/get-chart-url-options\"\nimport { chartLibrariesSettings } from \"../../utils/chartLibrariesSettings\"\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport { getChartPixelsPerPoint } from \"../../utils/get-chart-pixels-per-point\"\nimport { useFetchNewDataClock } from \"../../hooks/use-fetch-new-data-clock\"\n\nimport { fetchChartAction, fetchDataAction } from \"../../actions\"\nimport {\n selectChartData,\n selectChartFetchDataParams,\n makeSelectChartMetadataRequest,\n selectChartPanAndZoom,\n selectChartIsFetchingData,\n selectChartViewRange,\n} from \"../../selectors\"\nimport {\n ChartData,\n ChartMetadata,\n D3pieChartData,\n DygraphData,\n EasyPieChartData,\n} from \"../../chart-types\"\n\nimport { Loader } from \"../loader\"\nimport { Chart } from \"../chart\"\nimport { ChartDropdown, DropdownMenu } from \"../chart-dropdown\"\nimport { ChartSpinner } from \"../chart-spinner/chart-spinner\"\n\nimport * as S from \"./styled\"\nimport \"./chart-with-loader.css\"\n\nexport type RenderCustomElementForDygraph = (selectedChartConfiguration: {\n attributes: Attributes\n onAttributesChange: any\n chartMetadata: ChartMetadata\n chartID: string\n chartData: ChartData | null\n}) => JSX.Element\n\nconst dimensionsAggrMethodMap = {\n \"sum-of-abs\": \"sum\",\n}\n\nconst emptyArray = [] as any\n\nexport type Props = {\n attributes: Attributes\n chartUuid: string\n uuid?: string\n dropdownMenu?: DropdownMenu\n externalChartMetadata?: ChartMetadata\n portalNode: HTMLElement\n renderCustomElementForDygraph?: RenderCustomElementForDygraph\n onAttributesChange?: any\n}\n\nexport const ChartWithLoader = ({\n attributes,\n chartUuid,\n uuid,\n dropdownMenu,\n externalChartMetadata,\n portalNode,\n renderCustomElementForDygraph,\n onAttributesChange,\n}: Props) => {\n /**\n * fetch chart details\n */\n const { host = serverDefault, id, nodeIDs } = attributes\n const dispatch = useDispatch()\n const selectChartMetadataRequest = useMemo(makeSelectChartMetadataRequest, [])\n const { chartMetadata, isFetchingDetails } = useSelector((state: AppStateT) =>\n selectChartMetadataRequest(state, { chartId: id, id: chartUuid })\n )\n const actualChartMetadata = externalChartMetadata || chartMetadata\n useEffect(() => {\n if (!chartMetadata && !isFetchingDetails && !externalChartMetadata) {\n dispatch(\n fetchChartAction.request({\n chart: id,\n id: chartUuid,\n host,\n nodeIDs,\n })\n )\n }\n }, [\n id,\n chartUuid,\n dispatch,\n host,\n isFetchingDetails,\n chartMetadata,\n externalChartMetadata,\n nodeIDs,\n uuid,\n ])\n\n // todo local state option\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const chartPanAndZoom = useSelector((state: AppStateT) =>\n selectChartPanAndZoom(state, { id: chartUuid })\n )\n const panAndZoom = chartPanAndZoom || globalPanAndZoom\n\n const isPanAndZoomMaster =\n (!!globalPanAndZoom && globalPanAndZoom.masterID === chartUuid) || Boolean(chartPanAndZoom)\n const shouldForceTimeRange = panAndZoom?.shouldForceTimeRange || false\n\n // (isRemotelyControlled === false) only during globalPanAndZoom, when chart is panAndZoomMaster\n // and when no toolbox is used at that time\n const isRemotelyControlled = !panAndZoom || !isPanAndZoomMaster || shouldForceTimeRange // used when zooming/shifting in toolbox\n\n const fetchDataParams = useSelector((state: AppStateT) =>\n selectChartFetchDataParams(state, { id: chartUuid })\n )\n const viewRange = useSelector((state: AppStateT) =>\n selectChartViewRange(state, { id: chartUuid })\n )\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id: chartUuid }))\n const isFetchingData = useSelector((state: AppStateT) =>\n selectChartIsFetchingData(state, { id: chartUuid })\n )\n\n const hoveredX = useSelector(selectGlobalSelection)\n\n // periodical update of newest data\n // default to 2000ms. When chartMetadata has been fetched, use chartMetadata.update_every\n // if chartData has been fetched, use chartData.view_update_every instead\n // todo add support to \"data-update-every\" attribute\n const viewUpdateEvery = cond([\n [always(!!chartData), () => (chartData as ChartData).view_update_every * 1000],\n [\n always(!!actualChartMetadata),\n () => (actualChartMetadata as ChartMetadata).update_every * 1000,\n ],\n [T, always(fallbackUpdateTimeInterval)],\n ])()\n const [shouldFetch, setShouldFetch] = useFetchNewDataClock({\n areCriteriaMet: !panAndZoom && !hoveredX,\n preferedIntervalTime: viewUpdateEvery,\n })\n\n const panAndZoomThrottled = useThrottle(panAndZoom, panAndZoomDelay)\n useEffect(() => {\n setShouldFetch(true)\n }, [panAndZoomThrottled, setShouldFetch])\n\n const defaultAfter = useSelector(selectDefaultAfter)\n // when after/before changes, don't wait for next interval, just fetch immediately\n useUpdateEffect(() => {\n setShouldFetch(true)\n }, [\n attributes.after,\n attributes.before,\n defaultAfter,\n attributes.dimensions,\n attributes.aggrMethod,\n attributes.groupBy,\n ])\n\n const { before: initialBefore = window.NETDATA.chartDefaults.before } = attributes\n\n // attributes.after should be now used only for old custom dashboard\n // and in the future for setting timeframe per-chart\n const liveModeAfter = attributes.after || defaultAfter\n\n const chartSettings = chartLibrariesSettings[attributes.chartLibrary]\n const { hasLegend } = chartSettings\n\n // todo optimize by using resizeObserver (optionally)\n const boundingClientRect = portalNode.getBoundingClientRect()\n\n // from old dashboard\n const chartWidth = boundingClientRect.width - (hasLegend(attributes) ? 140 : 0)\n const chartHeight = boundingClientRect.height\n\n const isShowingSnapshot = Boolean(useSelector(selectSnapshot))\n const shouldEliminateZeroDimensions =\n useSelector(selectShouldEliminateZeroDimensions) || isShowingSnapshot\n const shouldUsePanAndZoomPadding = useSelector(selectPanAndZoomDataPadding)\n\n const { CancelToken } = axios\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const cancelTokenSource = useMemo(() => CancelToken.source(), [])\n useUnmount(() => {\n cancelTokenSource.cancel(CHART_UNMOUNTED)\n })\n\n /**\n * spinner state\n * show spinner when it's fetching for more than 2 seconds\n * hide spinner immediately when it's not fetching\n */\n const [shouldShowSpinnerDebounced, setShouldShowSpinnerDebounced] = useState(false)\n const shouldShowSpinner = shouldShowSpinnerDebounced && isFetchingData\n useDebounce(\n () => {\n if (isFetchingData) {\n setShouldShowSpinnerDebounced(true)\n }\n },\n 2000,\n [isFetchingData]\n )\n useEffect(() => {\n if (!isFetchingData && shouldShowSpinnerDebounced) {\n setShouldShowSpinnerDebounced(false)\n }\n }, [isFetchingData, shouldShowSpinnerDebounced])\n\n /**\n * fetch data\n */\n useEffect(() => {\n if (shouldFetch && actualChartMetadata && !isFetchingData) {\n // todo can be overridden by main.js\n const forceDataPoints = window.NETDATA.options.force_data_points\n\n let after\n let before\n let newViewRange\n let pointsMultiplier = 1\n\n if (panAndZoom) {\n if (isPanAndZoomMaster) {\n after = Math.round(panAndZoom.after / 1000)\n before = Math.round(panAndZoom.before / 1000)\n\n newViewRange = [after, before]\n\n if (shouldUsePanAndZoomPadding) {\n const requestedPadding = Math.round((before - after) / 2)\n after -= requestedPadding\n before += requestedPadding\n pointsMultiplier = 2\n }\n } else {\n after = Math.round(panAndZoom.after / 1000)\n before = Math.round(panAndZoom.before / 1000)\n pointsMultiplier = 1\n }\n } else {\n // no panAndZoom\n before = initialBefore\n after = liveModeAfter\n pointsMultiplier = 1\n }\n\n newViewRange = (newViewRange || [after, before]).map(x => x * 1000) as [number, number]\n\n const dataPoints =\n attributes.points ||\n Math.round(chartWidth / getChartPixelsPerPoint({ attributes, chartSettings }))\n const points = forceDataPoints || dataPoints * pointsMultiplier\n\n const shouldForceTimeWindow = attributes.forceTimeWindow || Boolean(defaultAfter)\n // if we want to add fake points, we need first need to request less\n // to have the desired frequency\n // this will be removed when Agents will support forcing time-window between points\n const correctedPoints = shouldForceTimeWindow\n ? getCorrectedPoints({\n after,\n before,\n firstEntry: actualChartMetadata.first_entry,\n points,\n })\n : null\n\n const group = attributes.method || window.NETDATA.chartDefaults.method\n setShouldFetch(false)\n dispatch(\n fetchDataAction.request({\n // properties to be passed to API\n host,\n context: actualChartMetadata.context,\n chart: actualChartMetadata.id,\n format: chartSettings.format,\n points: correctedPoints || points,\n group,\n gtime: attributes.gtime || 0,\n options: getChartURLOptions(attributes, shouldEliminateZeroDimensions),\n after: after || null,\n before: before || null,\n dimensions: attributes.dimensions,\n labels: attributes.labels,\n postGroupBy: attributes.postGroupBy,\n postAggregationMethod: attributes.postAggregationMethod,\n aggrMethod: attributes.aggrMethod,\n aggrGroups: attributes.aggrGroups,\n // @ts-ignore\n dimensionsAggrMethod:\n dimensionsAggrMethodMap[attributes.dimensionsAggrMethod] ||\n attributes.dimensionsAggrMethod,\n nodeIDs,\n httpMethod: attributes.httpMethod,\n groupBy: attributes.groupBy,\n\n // properties for the reducer\n fetchDataParams: {\n // we store it here so it is only available when data is fetched\n // those params should be synced with data\n fillMissingPoints: correctedPoints ? points - correctedPoints : undefined,\n isRemotelyControlled,\n viewRange: newViewRange,\n },\n id: chartUuid,\n cancelTokenSource,\n })\n )\n }\n }, [\n attributes,\n actualChartMetadata,\n chartSettings,\n chartUuid,\n chartWidth,\n defaultAfter,\n dispatch,\n hasLegend,\n host,\n initialBefore,\n isFetchingData,\n isPanAndZoomMaster,\n isRemotelyControlled,\n liveModeAfter,\n panAndZoom,\n portalNode,\n setShouldFetch,\n shouldEliminateZeroDimensions,\n shouldUsePanAndZoomPadding,\n shouldFetch,\n cancelTokenSource,\n nodeIDs,\n uuid,\n ])\n\n useSelector(selectSpacePanelTransitionEndIsActive)\n\n const externalSelectedDimensions = attributes?.selectedDimensions\n const [selectedDimensions, setSelectedDimensions] = useState(\n externalSelectedDimensions || emptyArray\n )\n\n useLayoutEffect(() => {\n if (externalSelectedDimensions) {\n setSelectedDimensions(externalSelectedDimensions)\n }\n }, [externalSelectedDimensions])\n\n useLayoutEffect(() => {\n setSelectedDimensions(externalSelectedDimensions || emptyArray)\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [attributes?.groupBy])\n\n const customElementForDygraph = useMemo(\n () =>\n renderCustomElementForDygraph &&\n renderCustomElementForDygraph({\n onAttributesChange,\n attributes,\n chartMetadata: actualChartMetadata as ChartMetadata,\n chartData,\n chartID: id,\n }),\n [\n onAttributesChange,\n renderCustomElementForDygraph,\n attributes,\n id,\n actualChartMetadata,\n chartData,\n ]\n )\n\n // eslint-disable-next-line max-len\n const hasEmptyData =\n (chartData as DygraphData | D3pieChartData | null)?.result?.data?.length === 0 ||\n (chartData as EasyPieChartData | null)?.result?.length === 0\n\n if (!chartData || !actualChartMetadata) {\n return (\n <>\n \n {shouldShowSpinner && }\n \n )\n }\n\n return (\n <>\n {hasEmptyData && (\n \n )}\n \n {shouldShowSpinner && }\n {dropdownMenu && dropdownMenu.length > 0 && (\n \n \n \n )}\n {customElementForDygraph}\n \n )\n}\n","import { useEffect, useState } from \"react\"\nimport { useInterval } from \"react-use\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport {\n selectHasWindowFocus,\n selectStopUpdatesWhenFocusIsLost,\n selectGlobalPause,\n} from \"domains/global/selectors\"\nimport { BIGGEST_INTERVAL_NUMBER } from \"utils/biggest-interval-number\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\n\ntype UseFetchNewDataClock = (arg: {\n areCriteriaMet: boolean\n preferedIntervalTime: number\n}) => [boolean, (shouldFetch: boolean) => void]\nexport const useFetchNewDataClock: UseFetchNewDataClock = ({\n areCriteriaMet,\n preferedIntervalTime,\n}) => {\n const hasWindowFocus = useSelector(selectHasWindowFocus)\n const stopUpdatesWhenFocusIsLost = useSelector(selectStopUpdatesWhenFocusIsLost)\n const globalPause = useSelector(selectGlobalPause)\n\n const shouldBeUpdating = !(!hasWindowFocus && stopUpdatesWhenFocusIsLost) && !globalPause\n\n const [shouldFetch, setShouldFetch] = useState(true)\n const [shouldFetchImmediatelyAfterFocus, setShouldFetchImmediatelyAfterFocus] = useState(false)\n\n useEffect(() => {\n if (shouldFetchImmediatelyAfterFocus && shouldBeUpdating) {\n setShouldFetchImmediatelyAfterFocus(false)\n setShouldFetch(true)\n }\n }, [shouldFetchImmediatelyAfterFocus, setShouldFetchImmediatelyAfterFocus, shouldBeUpdating])\n\n // don't use setInterval when we loose focus\n const intervalTime =\n (shouldBeUpdating || !shouldFetchImmediatelyAfterFocus) && !isPrintMode\n ? preferedIntervalTime\n : BIGGEST_INTERVAL_NUMBER\n useInterval(() => {\n if (areCriteriaMet) {\n if (!shouldBeUpdating) {\n setShouldFetchImmediatelyAfterFocus(true)\n return\n }\n setShouldFetch(true)\n }\n // when there's no focus, don't ask for updated data\n }, intervalTime)\n return [shouldFetch, setShouldFetch]\n}\n","import { Attributes } from \"./transformDataAttributes\"\nimport { ChartLibraryConfig } from \"./chartLibrariesSettings\"\n\ntype GetChartPixelsPerPoint = (arg: {\n attributes: Attributes,\n chartSettings: ChartLibraryConfig,\n}) => number\n\nexport const getChartPixelsPerPoint: GetChartPixelsPerPoint = ({\n attributes, chartSettings,\n}) => {\n const {\n pixelsPerPoint: pixelsPerPointAttribute,\n } = attributes\n if (typeof pixelsPerPointAttribute === \"number\") {\n return pixelsPerPointAttribute\n }\n const pixelsPerPointSetting = chartSettings.pixelsPerPoint(attributes)\n\n return Math.max(...[\n pixelsPerPointSetting,\n window.NETDATA.options.current.pixels_per_point,\n ].filter((px) => typeof px === \"number\"))\n}\n","import { prop, pick } from \"ramda\"\nimport { createSelector } from \"reselect\"\n\nimport { AppStateT } from \"store/app-state\"\n\nimport { storeKey } from \"./constants\"\n\nconst selectDashboardDomain = (state: AppStateT) => state[storeKey]\n\nexport const selectIsSnapshotMode = createSelector(\n selectDashboardDomain,\n prop(\"isSnapshotMode\"),\n)\n\nexport const selectSnapshotOptions = createSelector(\n selectDashboardDomain,\n pick([\"snapshotCharts\", \"snapshotDataPoints\"]),\n)\n","import React, { useEffect } from \"react\"\n\nimport { MS_IN_SECOND } from \"utils/utils\"\nimport { serverDefault } from \"utils/server-detection\"\nimport { selectIsSnapshotMode, selectSnapshotOptions } from \"domains/dashboard/selectors\"\nimport { selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { TimeRangeObjT } from \"types/common\"\n\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { fetchDataForSnapshotAction } from \"../actions\"\nimport { chartLibrariesSettings } from \"../utils/chartLibrariesSettings\"\nimport { getChartURLOptions } from \"../utils/get-chart-url-options\"\n\ninterface SnapshotLoaderProps {\n attributes: Attributes\n chartUuid: string\n}\nconst SnapshotLoader = ({\n attributes,\n chartUuid,\n}: SnapshotLoaderProps) => {\n const host = attributes.host || serverDefault\n const { snapshotDataPoints } = useSelector(selectSnapshotOptions)\n const group = attributes.method || window.NETDATA.chartDefaults.method\n const { chartLibrary } = attributes\n const chartSettings = chartLibrariesSettings[chartLibrary]\n\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const after = (globalPanAndZoom as TimeRangeObjT).after / MS_IN_SECOND\n const before = (globalPanAndZoom as TimeRangeObjT).before / MS_IN_SECOND\n\n const dispatch = useDispatch()\n useEffect(() => {\n dispatch(fetchDataForSnapshotAction.request({\n // properties to be passed to API\n host,\n context: attributes.id,\n chart: attributes.id,\n format: chartSettings.format,\n points: snapshotDataPoints as number,\n group,\n gtime: attributes.gtime || 0,\n // for snapshots, always eliminate zero dimensions\n options: getChartURLOptions(attributes, true),\n after: after || null,\n before: before || null,\n dimensions: attributes.dimensions,\n aggrMethod: attributes.aggrMethod,\n nodeIDs: attributes.nodeIDs,\n chartLibrary,\n id: chartUuid,\n groupBy: attributes.groupBy,\n }))\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }) // todo fetch based on state\n return null\n}\n\n\ninterface SnapshotLoaderContainerProps {\n attributes: Attributes\n chartUuid: string\n}\nexport const SnapshotLoaderContainer = ({\n attributes,\n chartUuid,\n}: SnapshotLoaderContainerProps) => {\n const isSnapshotMode = useSelector(selectIsSnapshotMode)\n if (!isSnapshotMode) {\n return null\n }\n return \n}\n","import React, { memo } from \"react\"\nimport { createPortal } from \"react-dom\"\n\nimport { getAttributes } from \"../utils/transformDataAttributes\"\nimport { ChartWithLoader } from \"./chart-with-loader\"\nimport { DisableOutOfView } from \"./disable-out-of-view\"\nimport { SnapshotLoaderContainer } from \"./snapshot-loader\"\n\nconst getNodesArray = () => Array.from(document.querySelectorAll(\"[data-netdata]\"))\n\nexport const Portals = memo(() => {\n const nodes = getNodesArray()\n return (\n <>\n {nodes.map((node, index) => {\n const attributesMapped = getAttributes(node)\n const chartId = `${attributesMapped.id}-${index}`\n return (\n createPortal(\n <>\n \n \n \n \n ,\n node,\n )\n )\n })}\n \n )\n})\n","import { useEffect, useState } from \"react\"\n\nimport { axiosInstance } from \"utils/api\"\n\nexport const useHttp = (\n url: string | undefined,\n shouldMakeCall : boolean = true,\n isExternal?: boolean,\n) => {\n const [isFetching, setIsFetching] = useState(false)\n const [isError, setIsError] = useState(false)\n const [data, setData] = useState(null)\n useEffect(() => {\n if (shouldMakeCall && url) {\n const options = isExternal\n ? { headers: null, withCredentials: false }\n : {}\n\n setIsFetching(true)\n axiosInstance.get(url, options)\n .then((r) => {\n if (r.data) {\n setData(r.data)\n setIsError(false)\n setIsFetching(false)\n }\n })\n .catch((error) => {\n // eslint-disable-next-line no-console\n console.warn(`error fetching ${url}`, error)\n setIsError(true)\n setIsFetching(false)\n })\n }\n }, [isExternal, shouldMakeCall, url])\n // force triple instead of array\n return [data, isFetching, isError] as [T | null, boolean, boolean]\n}\n","import { ReactNode, useEffect, useRef } from \"react\"\nimport { createPortal } from \"react-dom\"\n\nconst modalRoot = document.getElementById(\"modal-root\") as HTMLElement\n\ntype Props = {\n children: ReactNode\n}\nexport const ModalPortal = ({ children }: Props) => {\n const element = useRef(document.createElement(\"div\"))\n useEffect(() => {\n modalRoot.appendChild(element.current)\n return () => {\n // eslint-disable-next-line react-hooks/exhaustive-deps\n modalRoot.removeChild(element.current)\n }\n }, [])\n\n return createPortal(children, element.current)\n}\n","import React, { useRef, useEffect } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { ModalPortal } from \"domains/dashboard/components/modal-portal\"\nimport {\n selectAmountOfCharts, selectAmountOfFetchedCharts, selectNameOfAnyFetchingChart,\n} from \"domains/chart/selectors\"\n\nimport \"./print-modal.scss\"\n\nconst TIMEOUT_DURATION_TO_MAKE_SURE_ALL_CHARTS_HAVE_BEEN_RENDERED = 1000\n\nexport const PrintModal = () => {\n const printModalElement = useRef(null)\n const isFetchingMetrics = true\n\n useEffect(() => {\n // todo replace bootstrap with newer solution (custom or react-compatible library)\n if (printModalElement.current) {\n const $element = window.$(printModalElement.current)\n $element.modal(\"show\")\n }\n }) // render just once\n\n const amountOfCharts = useSelector(selectAmountOfCharts)\n const amountOfFetchedCharts = useSelector(selectAmountOfFetchedCharts)\n const nameOfAnyFetchingChart = useSelector(selectNameOfAnyFetchingChart)\n\n const percentage = amountOfCharts === 0\n ? 0\n : (amountOfFetchedCharts / amountOfCharts) * 100\n\n useEffect(() => {\n if (percentage === 100) {\n setTimeout(() => {\n // in case browser will not be able to close the window\n window.$(printModalElement.current).modal(\"hide\")\n window.print()\n window.close()\n }, TIMEOUT_DURATION_TO_MAKE_SURE_ALL_CHARTS_HAVE_BEEN_RENDERED)\n }\n }, [percentage])\n\n\n const progressBarText = nameOfAnyFetchingChart\n && `${Math.round(percentage)}%, ${nameOfAnyFetchingChart}`\n\n\n return (\n \n \n
\n
\n
\n \n ×\n \n

\n Preparing dashboard for printing...\n

\n
\n
\n Please wait while we initialize and render all the charts on the dashboard.\n \n \n \n {progressBarText}\n \n
\n
\n The print dialog will appear as soon as we finish rendering the page.\n
\n
\n
\n \n \n
\n )\n}\n","import styled from \"styled-components\"\nimport { getSizeBy, getColor } from \"@netdata/netdata-ui\"\n\nexport const SocialMediaContainer = styled.div`\n width: 185px;\n padding: ${getSizeBy(2)};\n background: ${getColor(\"borderSecondary\")};\n\n font-size: 12px;\n margin-bottom: ${getSizeBy(3)};\n`\n\nexport const FirstRow = styled.div`\n display: flex;\n justify-content: space-between;\n`\n\nexport const GithubCopy = styled.div`\n\n`\n\nexport const GithubCopyLine = styled.div`\n\n`\n\nexport const SocialMediaLink = styled.a`\n &, &:hover {\n color: ${getColor(\"main\")};\n }\n`\n\nexport const GithubStarQuestion = styled(SocialMediaLink)``\n\nexport const GithubIcon = styled(SocialMediaLink)`\n font-size: 24px;\n`\n\nexport const TwitterIcon = styled(SocialMediaLink)`\n font-size: 17px;\n`\n\nexport const FacebookIcon = styled(SocialMediaLink)`\n font-size: 23px;\n`\n\nexport const Separator = styled.div`\n margin-top: ${getSizeBy(2)};\n border-top: 1px solid ${getColor(\"separator\")};\n\n`\nexport const SecondRow = styled.div`\n margin-top: ${getSizeBy(2)};\n display: flex;\n align-items: center;\n justify-content: space-between;\n`\n\nexport const SecondRowText = styled.span`\n font-size: 10px;\n`\n","import React from \"react\"\n\nimport * as S from \"./styled\"\n\nexport const SidebarSocialMedia = () => (\n \n \n \n \n Do you like Netdata?\n \n \n Give us a star!\n \n \n \n \n \n \n \n \n \n And share the word!\n \n \n \n \n \n \n \n \n \n)\n","import React, { useRef } from \"react\"\nimport { createPortal } from \"react-dom\"\n\ninterface Props {\n children: React.ReactNode\n}\nexport const SidebarSocialMediaPortal = ({\n children,\n}: Props) => {\n const element = useRef(document.querySelector(\"#sidebar-end-portal-container\"))\n return createPortal(children, element.current!)\n}\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { ToastContainer, ToastContainerProps } from \"react-toastify\"\nimport \"react-toastify/dist/ReactToastify.min.css\"\n\nimport { getColor } from \"@netdata/netdata-ui\"\n\nimport { notificationsZIndex } from \"styles/z-index\"\n\nconst WrappedToastContainer = ({\n className,\n ...rest\n}: ToastContainerProps & { className?: string }) => (\n
\n {/* eslint-disable-next-line react/jsx-props-no-spreading */}\n \n
\n)\n\nexport const NotificationsContainer = styled(WrappedToastContainer)`\n .Toastify__toast-container {\n position: fixed;\n width: unset;\n min-width: 400px;\n max-width: 500px;\n ${notificationsZIndex};\n color: ${getColor([\"neutral\", \"limedSpruce\"])};\n }\n .Toastify__toast {\n padding: 0;\n padding-top: 5px;\n }\n .Toastify__toast--error {\n background: ${getColor([\"red\", \"lavender\"])};\n border: 1px solid ${getColor(\"error\")};\n }\n .Toastify__toast--warning {\n }\n .Toastify__toast--success {\n background: ${getColor([\"green\", \"frostee\"])};\n border: 1px solid ${getColor(\"success\")};\n }\n .Toastify__toast-body {\n }\n .Toastify__progress-bar {\n bottom: unset;\n top: 0;\n }\n .Toastify__progress-bar--success {\n background-color: ${getColor(\"success\")};\n }\n .Toastify__progress-bar--error {\n background-color: ${getColor(\"error\")};\n }\n`\n","import React from \"react\"\nimport { Icon, Flex } from \"@netdata/netdata-ui\"\n\nconst Item = ({ icon, children, hasBorder }) => (\n \n {!!icon && }\n {children}\n \n)\n\nexport default Item\n","import React from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport Item from \"../item\"\nimport { useSelector } from \"@/src/store/redux-separate-context\"\n\nconst hostNameSelector = state => {\n const snapshot = state.global.snapshot\n const data = state.global.chartsMetadata.data\n\n if (!snapshot && !data) return \"\"\n return snapshot ? snapshot.hostname : data.hostname\n}\n\nconst Node = () => {\n const hostname = useSelector(hostNameSelector)\n\n return (\n \n \n {hostname}\n \n \n )\n}\n\nexport default Node\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst tooltipBackground = [\"neutral\", \"black\"]\n\nconst CustomTooltip = ({ children, isBasic }) => (\n \n {children}\n \n)\n\nexport default CustomTooltip\n","import React from \"react\"\nimport CustomTooltip from \"@/src/components/tooltips/customTooltip\"\n\nconst getContent = (content, { isBasic }) => {\n const contentNode = typeof content === \"function\" ? content() : content\n if (typeof content === \"string\" || isBasic) {\n return {contentNode}\n }\n return contentNode\n}\n\nexport default getContent\n","import React, { useCallback } from \"react\"\nimport { Tooltip as BaseTooltip } from \"@netdata/netdata-ui\"\nimport getContent from \"./getContent\"\n\nconst Tooltip = ({ children, content, isBasic, ...rest }) => {\n const getTooltipContent = useCallback(() => getContent(content, { isBasic }), [content, isBasic])\n return (\n \n {children}\n \n )\n}\n\nexport default Tooltip\n","import React, { useCallback } from \"react\"\nimport { Flex, Button } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport { setGlobalPauseAction } from \"domains/global/actions\"\nimport { useDispatch } from \"store/redux-separate-context\"\n\nconst Options = () => {\n const dispatch = useDispatch()\n const onClick = useCallback(() => dispatch(setGlobalPauseAction()), [dispatch])\n return (\n \n \n \n \n \n \n \n \n \n \n \n )\n}\n\nexport default Options\n","import React from \"react\"\nimport { Button } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nimport { useHttp } from \"hooks/use-http\"\n\nconst NETDATA_LATEST_VERSION_URL = \"https://api.github.com/repos/netdata/netdata/releases/latest\"\nconst NETDATA_LATEST_GCS_VERSION_URL =\n \"https://www.googleapis.com/storage/v1/b/netdata-nightlies/o/latest-version.txt\"\n\nconst transformGcsVersionResponse = data => data.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\")\n\nconst transformGithubResponse = data => data?.tag_name.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\")\n\nconst versionsMatch = (v1, v2) => {\n if (v1 === v2) {\n return true\n }\n let s1 = v1.split(\".\")\n let s2 = v2.split(\".\")\n // Check major version\n let n1 = parseInt(s1[0].substring(1, 2), 10)\n let n2 = parseInt(s2[0].substring(1, 2), 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n // Check minor version\n n1 = parseInt(s1[1], 10)\n n2 = parseInt(s2[1], 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n // Split patch: format could be e.g. 0-22-nightly\n s1 = s1[2].split(\"-\")\n s2 = s2[2].split(\"-\")\n\n n1 = parseInt(s1[0], 10)\n n2 = parseInt(s2[0], 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n n1 = s1.length > 1 ? parseInt(s1[1], 10) : 0\n n2 = s2.length > 1 ? parseInt(s2[1], 10) : 0\n if (n1 < n2) return false\n return true\n}\n\nconst VersionControl = ({ currentVersion, releaseChannel }) => {\n const isStableReleaseChannel = releaseChannel === \"stable\"\n const [githubVersion] = useHttp(NETDATA_LATEST_VERSION_URL, isStableReleaseChannel, true)\n\n const [gcsVersionResponse] = useHttp(NETDATA_LATEST_GCS_VERSION_URL, !isStableReleaseChannel)\n const [mediaLinkResponse] = useHttp(gcsVersionResponse?.mediaLink, Boolean(gcsVersionResponse))\n\n const latestVersion = isStableReleaseChannel\n ? transformGithubResponse(githubVersion)\n : mediaLinkResponse\n ? transformGcsVersionResponse(mediaLinkResponse)\n : null\n\n if (!latestVersion) {\n return null\n }\n const isNewVersionAvailable = !versionsMatch(currentVersion, latestVersion)\n\n return (\n \n \n \n )\n}\n\nexport default VersionControl\n","import React from \"react\"\nimport VersionControl from \"components/app-header/components/versionControl\"\nimport { useSelector } from \"@/src/store/redux-separate-context\"\n\nconst versionSelector = state => {\n const { data } = state.global.chartsMetadata\n\n if (!data) return null\n\n const { version, release_channel: releaseChannel } = data\n return {\n version,\n releaseChannel,\n }\n}\n\nconst Version = () => {\n const data = useSelector(versionSelector)\n return (\n data && \n )\n}\n\nexport default Version\n","import { useState, useCallback } from \"react\"\n\n/**\n * @example\n * const [value, toggle, toggleOn, toggleOff] = useToggle(false);\n *\n * @param {Boolean} initialValue\n */\n\nconst useToggle = (initialValue = false) => {\n const [value, setToggle] = useState(!!initialValue)\n const toggle = useCallback(() => setToggle(oldValue => !oldValue), [])\n const toggleOn = useCallback(() => setToggle(true), [])\n const toggleOff = useCallback(() => setToggle(false), [])\n\n return [value, toggle, toggleOn, toggleOff]\n}\n\nexport default useToggle\n","import { useEffect, useState } from \"react\"\n\nconst useLocalStorage = (key, defaultValue) => {\n const [value, setValue] = useState(() => getValueFromStorage(key, defaultValue))\n\n useEffect(() => localStorage.setItem(key, JSON.stringify(value)), [key, value])\n\n return [value, setValue]\n}\n\nconst getValueFromStorage = (key, defaultValue = \"\") =>\n JSON.parse(localStorage.getItem(key)) ?? defaultValue\n\nexport default useLocalStorage\n","import styled from \"styled-components\"\nimport { getColor, getSizeBy, Icon } from \"@netdata/netdata-ui\"\nimport { Menu } from \"@rmwc/menu\"\n\nexport const RootContainer = styled.div`\n width: 100%;\n height: 100%;\n display: flex;\n flex-flow: row nowrap;\n align-items: center;\n`\n\nexport const StyledMenu = styled(Menu)``\n\nexport const DropdownContainer = styled.div`\n cursor: pointer;\n color: ${getColor(\"bright\")};\n .mdc-menu-surface {\n border-radius: 0;\n .mdc-list {\n padding: 0;\n }\n .mdc-list-item {\n padding: 0 ${getSizeBy(5)} 0 ${getSizeBy(5)};\n font-size: 14px;\n height: ${getSizeBy(6)};\n }\n }\n`\n\nexport const ListContainer = styled.div`\n padding: ${getSizeBy(3)} 0;\n`\n\nexport const OpenerIcon = styled(Icon)`\n flex-shrink: 0;\n flex-grow: 0;\n margin-left: ${({ noMargin }) => (noMargin ? \"unset\" : \"16px\")};\n fill: ${getColor(\"bright\")};\n width: 10px;\n height: 5px;\n`\n","import styled from \"styled-components\"\nimport { getColor, getSizeBy, Icon, Drop } from \"@netdata/netdata-ui\"\nimport { Dropdown } from \"@/src/components/mdx-components/dropdown\"\nimport { dialogsZIndex, customDropdownZIndex } from \"@/src/styles/z-index\"\n\nexport const PickerBox = styled.div`\n display: flex;\n position: relative;\n min-width: ${getSizeBy(102)};\n min-height: ${getSizeBy(43)};\n flex-direction: column;\n align-items: flex-end;\n background-color: ${getColor(\"mainBackground\")};\n color: ${getColor(\"text\")};\n z-index: ${dialogsZIndex};\n border-radius: 8px;\n`\n\nexport const StyledTimePeriod = styled.span`\n margin-top: ${getSizeBy(3)};\n cursor: pointer;\n width: 187px;\n height: ${getSizeBy(2)};\n &:first-of-type {\n margin-top: ${getSizeBy(1)};\n }\n &:last-of-type {\n margin-bottom: ${getSizeBy(1)};\n }\n & > span:hover {\n color: ${getColor(\"textLite\")};\n }\n`\nexport const StyledCustomTimePeriod = styled.span`\n margin: ${getSizeBy(1)} ${getSizeBy(3)} 0;\n color: ${({ isSelected, theme }) => getColor(isSelected ? \"primary\" : \"text\")({ theme })};\n cursor: pointer;\n &:first-of-type {\n margin-top: 0;\n }\n &:hover {\n color: ${getColor(\"textLite\")};\n }\n`\n\nexport const StyledDropdown = styled(Dropdown)`\n width: 88px;\n height: 32px;\n padding-top: 8px;\n padding-bottom: 8px;\n padding-left: 8px;\n padding-right: 7px;\n border: 1px solid ${getColor(\"border\")};\n box-sizing: border-box;\n border-radius: 4px;\n display: flex;\n justify-content: center;\n align-items: center;\n color: ${getColor(\"text\")};\n .mdc-menu-surface--anchor {\n .mdc-menu-surface--open {\n ${customDropdownZIndex}\n margin-top: ${getSizeBy(2)};\n background: ${getColor(\"mainBackground\")};\n border-radius: 4px;\n }\n }\n .mdc-list {\n display: flex;\n flex-direction: column;\n justify-content: center;\n align-items: center;\n }\n`\nexport const DropdownIcon = styled(Icon)`\n fill: ${getColor(\"text\")};\n width: 12px;\n height: 12px;\n`\n\nexport const CustomInput = styled.input`\n border: 1px solid ${getColor(\"border\")};\n color: inherit;\n background: ${getColor(\"mainBackground\")};\n box-sizing: border-box;\n border-radius: 4px;\n padding: 4px;\n width: 32px;\n height: 32px;\n margin-left: 10px;\n margin-right: 10px;\n outline: none;\n &:focus {\n border: 1px solid ${getColor(\"primary\")};\n }\n`\nexport const StyledDrop = styled(Drop).attrs({\n background: \"mainBackground\",\n round: 2,\n margin: [4, 0, 0],\n border: { side: \"all\", color: \"elementBackground\" },\n animation: true,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\nexport const StyledHR = styled.hr`\n border: none;\n margin: 0;\n border-left: 1px solid ${getColor(\"borderSecondary\")};\n height: 284px;\n`\n","import React, { useRef } from \"react\"\nimport { List } from \"@rmwc/list\"\nimport { MenuSurfaceAnchor, MenuSurface } from \"@rmwc/menu\"\nimport { RootContainer, ListContainer, DropdownContainer, OpenerIcon } from \"./styled\"\n\nexport const Dropdown = ({\n title,\n children,\n className,\n renderTitle,\n isOpen = false,\n onMenuToggle,\n anchorCorner = \"bottomStart\",\n renderOpener,\n}) => {\n const ref = useRef()\n\n const handleOpenState = () => {\n onMenuToggle(!isOpen)\n }\n\n const handleClose = () => {\n onMenuToggle(false)\n }\n\n return (\n \n \n \n {typeof children === \"function\" ? (\n isOpen && (\n \n {children({ maxHeight: ref.current?.root.ref.style.maxHeight })}\n \n )\n ) : (\n \n {children}\n \n )}\n \n \n {title || (renderTitle && renderTitle())}\n {renderOpener ? (\n renderOpener()\n ) : (\n \n )}\n \n \n \n )\n}\n","import React, { memo, useCallback } from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { StyledTimePeriod } from \"./styled\"\n\nconst TimePeriod = ({ value, period, resolution, isSelected, setTimeRange, tagging }) => {\n const onClick = useCallback(\n () => setTimeRange(value, resolution),\n [value, resolution, setTimeRange]\n )\n return (\n \n {period}\n \n )\n}\n\nexport default memo(TimePeriod)\n","import { format, formatDistanceStrict, parse, getTime, getUnixTime, add, isMatch } from \"date-fns\"\n\nconst MINUTE = 60\nconst HOUR = MINUTE * 60\nconst DAY = HOUR * 24\nconst MONTH = 30 * DAY\n\nexport const maxTimePeriodInUnix = 94694400\nexport const dateResolutions = [\"minutes\", \"hours\", \"days\", \"months\"]\n\nconst resolutionsMapping = {\n minutes: MINUTE,\n hours: HOUR,\n days: DAY,\n months: MONTH,\n}\n\nexport const getCustomTimePeriod = (after, resolution) =>\n Math.round(after / resolutionsMapping[resolution])\n\nexport const parseInputPeriod = (timeCorrection, resolution) => {\n const customRange = add(new Date(0), {\n [resolution]: timeCorrection,\n })\n return -getUnixTime(customRange)\n}\n\nconst focusTaggingMap = {\n startDate: \"start\",\n endDate: \"finish\",\n}\n\nexport const getFocusTagging = focusedInput => focusTaggingMap[focusedInput]\n\nexport const timePeriods = [\n { period: \"Last 5 minutes\", value: -5 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 15 minutes\", value: -15 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 30 minutes\", value: -30 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 2 hours\", value: -2 * HOUR, resolution: \"hours\" },\n { period: \"Last 6 hours\", value: -6 * HOUR, resolution: \"hours\" },\n { period: \"Last 12 hours\", value: -12 * HOUR, resolution: \"hours\" },\n { period: \"Last Day\", value: -DAY, resolution: \"days\" },\n { period: \"Last 2 Days\", value: -2 * DAY, resolution: \"days\" },\n { period: \"Last 7 Days\", value: -7 * DAY, resolution: \"days\" },\n]\n\nexport const formatDates = (startDate, endDate) => {\n const formattedStartDate = format(startDate, \"MMMM d yyyy, H:mm:ss\")\n const formattedEndDate = format(endDate, \"MMMM d yyyy, H:mm:ss\")\n return {\n formattedStartDate,\n formattedEndDate,\n }\n}\n\nexport const formatOffset = offset => {\n if (!offset) return \"+00:00\"\n const splitOffset = offset.toString().split(\".\")\n const mathSign = splitOffset[0] > 0 ? \"+\" : \"-\"\n const absoluteNumber = Math.abs(splitOffset[0]).toString()\n const firstPart = `${mathSign}${absoluteNumber.padStart(2, 0)}`\n return splitOffset.length > 1\n ? `${firstPart}:${String(splitOffset[1] * 0.6).padEnd(2, 0)}`\n : `${firstPart}:00`\n}\n\nexport const getDateWithOffset = (date, utcOffset) => {\n const formattedDate = isMatch(date, \"MMMM d yyyy, H:mm\")\n ? date\n : parse(date, \"MMMM d yyyy, H:mm\", Date.now())\n return parse(`${formattedDate} ${formatOffset(utcOffset)}`, \"MMMM d yyyy, H:mm xxx\", Date.now())\n}\n\nexport const getTimePeriod = (startDate, endDate) =>\n formatDistanceStrict(getTime(startDate), getTime(endDate))\n","import React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport TimePeriod from \"./timePeriod\"\nimport { timePeriods } from \"./utils\"\n\nconst TimePeriods = ({ handleTimePeriodChange, selectedDate, tagging }) => (\n \n {timePeriods.map(({ period, value, resolution }) => (\n \n ))}\n \n)\n\nexport default TimePeriods\n","import React, { useCallback, useEffect, useState } from \"react\"\nimport { isValid, add, getUnixTime } from \"date-fns\"\nimport { Flex, Text } from \"@netdata/netdata-ui\"\nimport {\n getCustomTimePeriod,\n parseInputPeriod,\n dateResolutions,\n maxTimePeriodInUnix,\n} from \"./utils\"\nimport { StyledDropdown, DropdownIcon, CustomInput, StyledCustomTimePeriod } from \"./styled\"\n\nconst CustomTimePeriod = ({ handleTimePeriodChange, value, resolution, tagging }) => {\n const getInputValue = () => (value <= 0 ? getCustomTimePeriod(-value, resolution) : 0)\n const [inputValue, setInputValue] = useState(getInputValue)\n const [isDropdownOpen, toggleDropdown] = useState(false)\n\n // eslint-disable-next-line react-hooks/exhaustive-deps\n useEffect(() => setInputValue(getInputValue()), [value])\n\n const onChange = useCallback(e => setInputValue(e.target.value), [])\n\n const onBlur = useCallback(\n e => {\n const currentValue = Number(e.currentTarget.value)\n const isValidInput =\n !Number.isNaN(currentValue) && Number.isInteger(currentValue) && currentValue > 0\n const timePeriod = add(new Date(0), {\n [resolution]: currentValue,\n })\n const isValidTimePeriod =\n isValidInput && isValid(timePeriod) && getUnixTime(timePeriod) <= maxTimePeriodInUnix\n if (isValidTimePeriod)\n return handleTimePeriodChange(parseInputPeriod(currentValue, resolution), resolution)\n return value <= 0 ? setInputValue(getCustomTimePeriod(-value, resolution)) : setInputValue(0)\n },\n [resolution, value, handleTimePeriodChange]\n )\n\n const onChangeResolution = useCallback(\n newResolution => {\n return () => {\n handleTimePeriodChange(parseInputPeriod(inputValue, newResolution), newResolution)\n toggleDropdown(false)\n }\n },\n [inputValue, handleTimePeriodChange]\n )\n\n const renderTitle = () => (\n \n {resolution}\n \n \n )\n return (\n \n Last\n \n null}\n >\n {() =>\n dateResolutions.map(dateResolution => (\n \n {dateResolution}\n \n ))\n }\n \n \n )\n}\n\nexport default CustomTimePeriod\n","import React from \"react\"\nimport DatePickerLib from \"react-datepicker\"\nimport \"react-datepicker/dist/react-datepicker.css\"\n\nconst DatePicker = ({\n selected,\n selectsStart = false,\n selectsEnd = false,\n startDate,\n endDate,\n onChange,\n minDate,\n maxDate,\n dateFormat = \"MM/dd/yyyy\",\n open = false,\n startOpen = false,\n inline = false,\n selectsRange = false,\n monthsShown = 1,\n showPopperArrow = true,\n calendarContainer = null,\n}) => (\n \n)\n\nexport default DatePicker\n","import { getColor, getRgbColor } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nexport const StyledDateInput = styled.input`\n width: 100%;\n text-align: center;\n border: 1px solid ${getColor(\"border\")};\n color: inherit;\n background: ${getColor(\"mainBackground\")};\n box-sizing: border-box;\n border-radius: 4px;\n padding: 4px;\n height: 32px;\n margin-left: 20px;\n margin-right: 20px;\n outline: none;\n &:focus {\n border: 1px solid ${getColor(\"primary\")};\n }\n`\nexport const StyledCalendar = styled.div`\n background: ${getColor(\"mainBackground\")};\n border: 0;\n .react-datepicker {\n &__navigation {\n top: 8px;\n &-icon::before {\n border-color: ${getColor(\"text\")};\n }\n }\n &__header {\n background: ${getColor(\"mainBackground\")};\n border: 0;\n .react-datepicker__current-month {\n color: ${getColor(\"main\")};\n font-weight: normal;\n }\n .react-datepicker__day-name {\n color: ${getColor(\"textLite\")};\n }\n }\n &__day {\n color: ${getColor(\"main\")};\n &:hover {\n background: ${getColor(\"elementBackground\")};\n }\n &--disabled {\n color: ${getColor(\"textLite\")};\n &:hover {\n background: inherit;\n }\n }\n &--keyboard-selected,\n &--keyboard-selected:hover {\n color: ${getColor(\"main\")};\n background: inherit;\n border-radius: inherit;\n }\n &--selected,\n &--selected:hover {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-radius: 8px;\n }\n &--in-selecting-range,\n &--in-range {\n color: ${getColor(\"primary\")};\n background: ${getColor(\"elementBackground\")};\n border-radius: 0;\n }\n &--selecting-range-start,\n &--range-start {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-top-left-radius: 8px;\n border-bottom-left-radius: 8px;\n &:hover {\n color: ${getColor(\"bright\")};\n background: ${getRgbColor([\"green\", \"netdata\"], 0.8)};\n border-radius: 0;\n border-top-left-radius: 8px;\n border-bottom-left-radius: 8px;\n }\n }\n &--selecting-range-end,\n &--range-end {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-top-right-radius: 8px;\n border-bottom-right-radius: 8px;\n &:hover {\n color: ${getColor(\"bright\")};\n background: ${getRgbColor([\"green\", \"netdata\"], 0.8)};\n border-top-right-radius: 8px;\n border-bottom-right-radius: 8px;\n }\n }\n }\n }\n`\n","import React, { useState, useEffect, useCallback } from \"react\"\nimport { format, isValid, getTime } from \"date-fns\"\nimport { getDateWithOffset } from \"./utils\"\nimport { StyledDateInput } from \"../datePicker/styled\"\nimport { useDateTime } from \"@/src/utils/date-time\"\n\nconst DatePickerInput = ({\n name = \"\",\n value = \"\",\n onDatesChange,\n onFocus,\n placeholderText = \"\",\n}) => {\n const { utcOffset } = useDateTime()\n const [inputValue, setInputValue] = useState(\"\")\n const onChange = useCallback(e => {\n const date = e.target.value\n setInputValue(date)\n }, [])\n const setFormattedValue = useCallback(value => {\n if (isValid(value)) {\n const formattedDate = format(value, \"MMMM d yyyy, H:mm\")\n setInputValue(formattedDate)\n }\n }, [])\n const onBlur = useCallback(\n e => {\n const parsedDate = getDateWithOffset(e.target.value, utcOffset)\n const isValidDate = isValid(parsedDate) && getTime(parsedDate) > 0\n if (isValidDate) {\n const timestamp = getTime(parsedDate)\n onDatesChange(timestamp, () => setFormattedValue(value))\n } else setFormattedValue(value)\n },\n [value, utcOffset, onDatesChange, setFormattedValue]\n )\n\n useEffect(() => setFormattedValue(value), [value, setFormattedValue])\n\n return (\n \n )\n}\n\nexport default DatePickerInput\n","import { useDateTime } from \"@/src/utils/date-time\"\nimport { useCallback } from \"react\"\n\nconst useLocaleDate = () => {\n const { localeTimeString, localeDateString } = useDateTime()\n return useCallback(\n date => {\n return `${localeDateString(date, { locale: \"en-us\", long: false })} ${localeTimeString(date, {\n secs: false,\n })}`\n },\n [localeTimeString, localeDateString]\n )\n}\n\nexport default useLocaleDate\n","import { useMemo } from \"react\"\nimport { toDate } from \"date-fns\"\nimport useLocaleDate from \"./useLocaleDate\"\n\nexport const convertTimestampToDate = (timestamp, getLocaleDate) => {\n if (timestamp > 0) {\n return toDate(new Date(getLocaleDate(timestamp)))\n } else if (timestamp || timestamp === 0)\n return toDate(new Date(getLocaleDate(new Date().valueOf() + timestamp * 1000)))\n return null\n}\n\nconst useConvertedDates = (startDate, endDate) => {\n const getLocaleDate = useLocaleDate()\n return useMemo(\n () => [\n convertTimestampToDate(startDate, getLocaleDate),\n convertTimestampToDate(endDate, getLocaleDate),\n ],\n [startDate, endDate, getLocaleDate]\n )\n}\n\nexport default useConvertedDates\n","import { Flex } from \"@netdata/netdata-ui\"\nimport React, { useCallback } from \"react\"\nimport { getTime, isBefore, format } from \"date-fns\"\nimport { useDateTime } from \"@/src/utils/date-time\"\nimport DatePicker from \"../datePicker/datePickerLib\"\nimport DatePickerInput from \"./datePickerInput\"\nimport useConvertedDates, { convertTimestampToDate } from \"./useConvertedDate\"\nimport useLocaleDate from \"./useLocaleDate\"\nimport { getDateWithOffset } from \"./utils\"\nimport { StyledCalendar } from \"../datePicker/styled\"\n\nconst DatePickerWrapper = ({\n startDate,\n setStartDate,\n endDate,\n setEndDate,\n onDatesChange,\n onInputFocus,\n}) => {\n const getLocaleDate = useLocaleDate()\n const [convertedStartDate, convertedEndDate] = useConvertedDates(startDate, endDate)\n const { utcOffset } = useDateTime()\n const setValidStartDate = useCallback(\n (startDate, setPreviousValue) =>\n isBefore(convertTimestampToDate(startDate, getLocaleDate), convertedEndDate)\n ? setStartDate(startDate)\n : setPreviousValue(),\n [convertedEndDate, getLocaleDate, setStartDate]\n )\n\n const setValidEndDate = useCallback(\n (endDate, setPreviousValue) =>\n isBefore(convertedStartDate, convertTimestampToDate(endDate, getLocaleDate))\n ? setEndDate(endDate)\n : setPreviousValue(),\n [convertedStartDate, getLocaleDate, setEndDate]\n )\n\n const onChange = useCallback(\n dates => {\n const [startDate, endDate] = dates\n\n const startDateWithOffset = startDate\n ? getDateWithOffset(format(startDate, \"MMMM d yyyy, H:mm\"), utcOffset)\n : startDate\n const endDateWithOffset = endDate\n ? getDateWithOffset(format(endDate, \"MMMM d yyyy, H:mm\"), utcOffset)\n : endDate\n\n const startDateTimestamp = getTime(startDateWithOffset) || null\n const endDateTimestamp = getTime(endDateWithOffset) || null\n\n onDatesChange(startDateTimestamp, endDateTimestamp)\n },\n [utcOffset, onDatesChange]\n )\n\n return (\n \n \n \n \n \n \n \n )\n}\n\nexport default DatePickerWrapper\n","import React, { useMemo } from \"react\"\nimport { Flex, Icon, TextSmall } from \"@netdata/netdata-ui\"\nimport { formatDates, getTimePeriod } from \"./utils\"\nimport useConvertedDates from \"./useConvertedDate\"\n\nconst PeriodIndication = ({ startDate, endDate }) => {\n const [convertedStart, convertedEnd] = useConvertedDates(startDate, endDate)\n\n const { formattedStartDate, formattedEndDate } = useMemo(\n () => formatDates(convertedStart, convertedEnd),\n [convertedStart, convertedEnd]\n )\n const timePeriod = useMemo(\n () => getTimePeriod(convertedStart, convertedEnd),\n [convertedStart, convertedEnd]\n )\n\n return (\n \n \n \n From\n \n \n {formattedStartDate}\n \n \n \n \n \n To\n \n \n {formattedEndDate}\n \n \n \n /\n \n {timePeriod}\n \n \n \n )\n}\n\nexport default PeriodIndication\n","import moment from \"moment\"\n\nexport const SECONDS = 1000\nexport const MINUTE = SECONDS * 60\nexport const HOUR = MINUTE * 60\nexport const DAY = HOUR * 24\nexport const MONTH = DAY * 30\n\nconst resolutionMap = [\n { value: DAY, unit: \"d\" },\n { value: HOUR, unit: \"h\" },\n { value: MINUTE, unit: \"min\" },\n { value: MINUTE, unit: \"min\" },\n { value: SECONDS, unit: \"s\" },\n]\n\nexport const getStartDate = start =>\n start < 0 ? moment(new Date()).add(start, \"seconds\") : moment(start)\nexport const getEndDate = end => (!end ? moment(new Date()) : moment(end))\nexport const getIsSameDate = (startDate, endDate) => startDate.isSame(endDate, \"day\")\nexport const getDuration = (startDate, endDate) => moment.duration(startDate.diff(endDate))\n\nconst getResolution = (value, resolution) => (value > 1 ? `${Math.floor(value)}${resolution}` : \"\")\n\nexport const getGranularDuration = duration => {\n let seconds = Math.abs(duration)\n const showSeconds = seconds < MINUTE\n return resolutionMap.reduce((acc, { value, unit }) => {\n if (value === SECONDS && !showSeconds) return acc\n acc = acc + getResolution(seconds / value, unit)\n seconds = seconds % value\n return acc\n }, \"\")\n}","import styled from \"styled-components\"\nimport { Flex, getColor } from \"@netdata/netdata-ui\"\n\nconst Container = styled(Flex)`\n cursor: pointer;\n\n &:hover * {\n color: ${getColor(\"textLite\")};\n fill: ${getColor(\"textLite\")};\n }\n`\n\nexport default Container\n","import React from \"react\"\nimport { Flex, TextSmall, Icon } from \"@netdata/netdata-ui\"\nimport { useDateTime } from \"utils/date-time\"\n\nconst DateBox = ({ isPlaying, startDate, endDate, isSameDate }) => {\n const { localeTimeString, localeDateString } = useDateTime()\n return (\n \n \n {localeDateString(startDate, { long: false })} •{\" \"}\n \n {localeTimeString(startDate, { secs: false })}\n \n \n \n \n {!isSameDate && `${localeDateString(endDate, { long: false })} • `}\n \n {localeTimeString(endDate, { secs: false })}\n \n \n \n )\n}\n\nexport default DateBox\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst DurationBox = ({ isPlaying, duration }) => {\n return (\n \n \n {isPlaying && (\n \n • last\n \n )}\n \n \n {duration}\n \n \n )\n}\n\nexport default DurationBox\n","import React, { useState, useMemo, useEffect, forwardRef } from \"react\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport { useSelector as useDashboardSelector } from \"store/redux-separate-context\"\nimport { selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport {\n getStartDate,\n getEndDate,\n getIsSameDate,\n getDuration,\n MINUTE,\n getGranularDuration,\n} from \"./utils\"\nimport Container from \"./container\"\nimport DateBox from \"./dateBox\"\nimport DurationBox from \"./durationBox\"\n\nconst PickerAccessorElement = forwardRef(\n (\n { onClick, start = 15 * MINUTE, end, isPlaying, isPickerOpen, setRangeValues, tagging },\n ref\n ) => {\n const [timeframe, setTimeframe] = useState()\n const startDate = getStartDate(start)\n const endDate = getEndDate(end)\n const globalPanAndZoom = useDashboardSelector(selectGlobalPanAndZoom)\n useEffect(() => {\n const after = getDuration(startDate, endDate).as(\"seconds\")\n if (!isPlaying && timeframe !== after) setTimeframe(Math.round(after))\n if (isPlaying && timeframe && !!globalPanAndZoom) {\n setRangeValues({ start: Math.round(timeframe) })\n setTimeframe(null)\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [startDate, endDate, timeframe, isPlaying])\n\n const isSameDate = useMemo(() => getIsSameDate(startDate, endDate), [startDate, endDate])\n const duration = useMemo(\n () => getGranularDuration(getDuration(startDate, endDate).as(\"milliseconds\")),\n // eslint-disable-next-line react-hooks/exhaustive-deps\n [isPlaying, startDate, endDate]\n )\n\n return (\n {} : \"Select a predefined or a custom timeframe\"}\n align=\"bottom\"\n plain\n >\n \n \n \n \n \n )\n }\n)\n\nexport default PickerAccessorElement\n","import React, { useState, useEffect, useMemo, useRef, useCallback } from \"react\"\nimport { Button, Flex } from \"@netdata/netdata-ui\"\nimport useToggle from \"hooks/useToggle\"\nimport useLocalStorage from \"hooks/useLocalStorage\"\nimport TimePeriods from \"./timePeriods\"\nimport CustomTimePeriod from \"./customTimePeriod\"\nimport DatePickerWrapper from \"./datePickerWrapper\"\nimport { getFocusTagging } from \"./utils\"\nimport PeriodIndication from \"./periodIndication\"\nimport AccessorElement from \"./accessorElement\"\nimport { PickerBox, StyledDrop, StyledHR } from \"./styled\"\n\nexport const reportEvent = (\n eventCategory,\n eventAction,\n eventLabel,\n eventValue,\n event = \"gaCustomEvent\"\n) => {\n if (window.dataLayer) {\n const eventData = { event, eventCategory, eventAction, eventLabel, eventValue }\n window.dataLayer.push(eventData)\n }\n}\n\nconst DatePickerDrop = ({\n onChange,\n values: { start: initialStartDate, end: initialEndDate } = {},\n defaultValue = -60 * 15,\n tagging = \"\",\n isPlaying,\n}) => {\n const [startDate, setStartDate] = useState(initialStartDate)\n const [endDate, setEndDate] = useState(initialStartDate)\n const [resolution, setResolution] = useLocalStorage(\"resolution\", \"minutes\")\n const [focusedInput, setFocusedInput] = useState(\"startDate\")\n const [isOpen, toggle, , close] = useToggle()\n const ref = useRef()\n\n const setDates = useCallback(({ startDate, endDate }) => {\n setStartDate(startDate)\n setEndDate(endDate)\n }, [])\n\n useEffect(() => {\n setDates({\n startDate: initialStartDate,\n endDate: initialEndDate,\n })\n }, [initialStartDate, initialEndDate, setDates])\n\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const clearChanges = useCallback(() => setDates({ startDate: defaultValue, endDate: 0 }), [])\n\n const onInputFocus = useCallback(e => {\n if (!e.target.name) return\n setFocusedInput(e.target.name)\n }, [])\n\n const togglePicker = useCallback(\n e => {\n e.stopPropagation()\n toggle()\n },\n [toggle]\n )\n\n const applyChanges = () => {\n onChange({\n start: startDate,\n end: endDate,\n })\n close()\n }\n\n const focusTagging = useMemo(() => getFocusTagging(focusedInput), [focusedInput])\n\n const isValidTimePeriod = startDate !== null && endDate !== null && startDate !== endDate\n const isApplyDisabled = startDate === initialStartDate && endDate === initialEndDate\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const consistentDefaultValue = useMemo(() => defaultValue, [])\n const isClearDisabled = startDate === consistentDefaultValue\n\n const handleTimePeriodChange = useCallback(\n (time, resolution) => {\n setResolution(resolution)\n setDates({\n startDate: time,\n endDate: 0,\n })\n },\n [setDates, setResolution]\n )\n const onDatepickerChange = (startDate, endDate) => {\n setDates({ startDate, endDate })\n const date = focusTagging === \"finish\" ? endDate || startDate : startDate || endDate\n reportEvent(\"date-picker\", \"click-date-picker\", tagging, String(date))\n }\n\n const pickerDrop =\n ref.current && isOpen ? (\n \n \n \n \n \n \n \n \n \n \n \n {isValidTimePeriod && }\n \n \n \n \n \n \n \n ) : null\n\n return (\n <>\n \n {pickerDrop}\n \n )\n}\n\nexport default DatePickerDrop\n","import React, { memo, useEffect, useMemo } from \"react\"\nimport {\n useDispatch as useDashboardDispatch,\n useSelector as useDashboardSelector,\n} from \"store/redux-separate-context\"\nimport {\n resetGlobalPanAndZoomAction,\n setGlobalPanAndZoomAction,\n setDefaultAfterAction,\n} from \"domains/global/actions\"\nimport { selectDefaultAfter, selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport { setHashParams } from \"utils/hash-utils\"\nimport DatePickerDrop from \"./datePickerDrop\"\n\nconst ReduxDatePickerContainer = memo(({ tagging, isPlaying }) => {\n const dashboardDispatch = useDashboardDispatch()\n\n const globalPanAndZoom = useDashboardSelector(selectGlobalPanAndZoom)\n const isGlobalPanAndZoom = Boolean(globalPanAndZoom)\n\n const defaultAfter = useDashboardSelector(selectDefaultAfter)\n const pickedValues = useMemo(\n () =>\n isGlobalPanAndZoom\n ? { start: globalPanAndZoom.after, end: globalPanAndZoom.before }\n : {\n start: defaultAfter,\n end: 0,\n },\n [isGlobalPanAndZoom, globalPanAndZoom, defaultAfter]\n )\n\n function handlePickedValuesChange(params) {\n const { start, end } = params\n if (start < 0) {\n // live mode\n dashboardDispatch(\n // changes the default value, so it becomes inconsistent\n setDefaultAfterAction({\n after: start,\n })\n )\n if (isGlobalPanAndZoom) {\n dashboardDispatch(resetGlobalPanAndZoomAction())\n }\n } else {\n // global-pan-and-zoom mode\n dashboardDispatch(\n setGlobalPanAndZoomAction({\n after: start,\n before: end,\n })\n )\n }\n }\n\n useEffect(() => {\n const { start, end } = pickedValues\n const after = start.toString()\n const before = end.toString()\n if (window.urlOptions.after !== after || window.urlOptions.before !== before) {\n window.urlOptions.netdataPanAndZoomCallback(true, after, before)\n }\n setHashParams({ after, before })\n }, [pickedValues])\n return (\n \n )\n})\n\nexport default ReduxDatePickerContainer\n","import styled from \"styled-components\"\nimport { Flex, getRgbColor } from \"@netdata/netdata-ui\"\n\nconst getBackground = ({ theme, isPlaying }) => {\n const { name } = theme\n\n const background =\n name === \"Dark\"\n ? getRgbColor(isPlaying ? [\"green\", \"netdata\"] : [\"neutral\", \"tuna\"], isPlaying ? 0.3 : 1)\n : getRgbColor(isPlaying ? [\"green\", \"frostee\"] : [\"neutral\", \"blackhaze\"])\n\n return background({ theme })\n}\n\nconst Container = styled(Flex)`\n background: ${getBackground};\n`\n\nexport default Container\n","import styled from \"styled-components\"\nimport { Pill, getColor } from \"@netdata/netdata-ui\"\n\nconst getHoverColor = ({ isPlaying }) =>\n getColor(isPlaying ? [\"green\", \"chateau\"] : [\"neutral\", \"iron\"])\n\nconst StyledPill = styled(Pill).attrs(({ isPlaying }) => ({\n flavour: isPlaying ? \"success\" : \"neutral\",\n}))`\n &:hover {\n background: ${getHoverColor};\n border-color: ${getHoverColor};\n }\n`\n\nexport default StyledPill\n","import React, { useMemo } from \"react\"\nimport { useDispatch } from \"store/redux-separate-context\"\nimport { resetGlobalPauseAction, setGlobalPauseAction } from \"domains/global/actions\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport StyledPill from \"./styledPill\"\n\nconst getIcon = (isPlaying, isForcePlaying) => {\n if (!isPlaying) return \"pauseSolid\"\n return isForcePlaying ? \"forcePlay\" : \"playSolid\"\n}\n\nconst PlayPausePill = ({ isPlaying, isForcePlaying }) => {\n const dispatch = useDispatch()\n\n const onPlay = () => dispatch(resetGlobalPauseAction({ forcePlay: false }))\n const onPause = () => dispatch(setGlobalPauseAction())\n const icon = useMemo(() => getIcon(isPlaying, isForcePlaying), [isPlaying, isForcePlaying])\n\n return (\n \n \n {isPlaying ? \"Playing\" : \"Paused\"}\n \n \n )\n}\n\nexport default PlayPausePill\n","import React, { useCallback, forwardRef } from \"react\"\nimport styled from \"styled-components\"\nimport { getColor, Flex, Icon, Text } from \"@netdata/netdata-ui\"\n\nexport const PanelRowContainer = styled(Flex)`\n cursor: pointer;\n\n &:hover {\n background: ${getColor(\"selected\")};\n }\n\n ${props => props.selected && `background: ${getColor(\"selected\")(props)};`}\n`\n\nconst MenuItem = forwardRef(\n (\n {\n disabled,\n children,\n Wrapper = Text,\n onClick,\n testid,\n icon,\n padding = [2, 3],\n margin = [0],\n round = 0,\n actions,\n selected,\n width = \"100%\",\n },\n ref\n ) => {\n const click = useCallback(() => {\n if (disabled) return\n if (onClick) onClick()\n }, [onClick, disabled])\n\n return (\n \n \n {typeof icon === \"string\" ? (\n \n ) : (\n icon\n )}\n \n {children}\n \n \n {actions}\n \n )\n }\n)\n\nexport default MenuItem\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, H4, Collapsible } from \"@netdata/netdata-ui\"\n\nexport const DefaultListHeader = styled(H4).attrs({ padding: [0], margin: [0] })`\n cursor: pointer;\n`\n\nconst SectionHandle = ({ toggleOpen, label, testid, Header = DefaultListHeader }) => (\n
\n {label}\n
\n)\n\nconst ItemsList = ({ isOpen = false, toggleOpen, label, children, testid, Header }) => (\n \n \n {children}\n \n)\n\nexport default ItemsList\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst PlayOptionsTooltip = () => (\n \n \n Play to refresh and have live content, pause to see historical, or force play to keep\n refreshing even when the tab loses focus at the expense of some system performance.\n \n \n)\n\nexport default PlayOptionsTooltip\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nexport const Divider = styled(Flex).attrs({\n bacgkround: \"disabled\",\n height: \"1px\",\n margin: [2, 6],\n})``\n","import React, { memo, Fragment } from \"react\"\nimport styled from \"styled-components\"\nimport { useToggle } from \"react-use\"\nimport { Flex, Icon, Drop } from \"@netdata/netdata-ui\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport { useDispatch } from \"store/redux-separate-context\"\nimport { resetGlobalPauseAction, setGlobalPauseAction } from \"domains/global/actions\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport PlayOptionsTooltip from \"./playOptionsTooltip\"\n\nconst MenuButton = styled(Flex).attrs({ padding: [1], role: \"button\" })`\n cursor: pointer;\n`\n\nconst Dropdown = styled(Flex).attrs({\n column: true,\n padding: [2],\n background: \"dropdown\",\n round: 1,\n overflow: { vertical: \"auto\" },\n margin: [2, 0, 0],\n width: 40,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\n\nconst PlayOptions = ({ target }) => {\n const dispatch = useDispatch()\n const [isOpen, toggle] = useToggle()\n\n const close = () => toggle(false)\n\n const onPlay = () => {\n dispatch(resetGlobalPauseAction({ forcePlay: false }))\n close()\n }\n\n const onPause = () => {\n dispatch(setGlobalPauseAction())\n close()\n }\n\n const onForcePlay = () => {\n dispatch(resetGlobalPauseAction({ forcePlay: true }))\n close()\n }\n\n return (\n \n {!isOpen ? (\n } align=\"bottom\" plain>\n \n \n \n \n ) : (\n \n \n \n )}\n {target.current && isOpen && (\n \n \n \n Play\n \n \n Pause\n \n \n Force Play\n \n \n \n )}\n \n )\n}\n\nconst MemoizedPlayOptions = memo(PlayOptions)\n\nexport default MemoizedPlayOptions\n","import React, { useMemo, useRef } from \"react\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport {\n selectHasWindowFocus,\n selectStopUpdatesWhenFocusIsLost,\n selectGlobalPanAndZoom,\n selectGlobalPause,\n selectGlobalSelection,\n} from \"domains/global/selectors\"\nimport { ReduxDatePickerContainer } from \"components/date-picker\"\nimport Item from \"../item\"\nimport Container from \"./container\"\nimport PlayPausePill from \"./playPausePill\"\nimport PlayOptions from \"./playOptions\"\n\nconst tagging = \"global-view\"\n\nconst GlobalControls = () => {\n const ref = useRef()\n const hasWindowFocus = useSelector(selectHasWindowFocus)\n const stopUpdatesWhenFocusIsLost = useSelector(selectStopUpdatesWhenFocusIsLost)\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const hoveredX = useSelector(selectGlobalSelection)\n const globalPause = useSelector(selectGlobalPause)\n\n const isPlaying = useMemo(\n () =>\n Boolean(\n (hasWindowFocus || !stopUpdatesWhenFocusIsLost) &&\n !globalPanAndZoom &&\n !hoveredX &&\n !globalPause\n ),\n [hasWindowFocus, stopUpdatesWhenFocusIsLost, globalPanAndZoom, hoveredX, globalPause]\n )\n\n return (\n \n \n \n \n \n \n \n )\n}\n\nexport default GlobalControls\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst hollowColors = {\n warning: \"#FFF8E1\",\n error: \"#FFEBEF\",\n}\n\nconst StyledPill = styled(Flex).attrs(({ round = 999, hollow, background }) => ({\n padding: [0.5, 2],\n round,\n border: hollow ? { side: \"all\", color: background, size: \"1px\" } : false,\n}))`\n background: ${({ background, hollow }) => (hollow ? hollowColors[background] : background)};\n cursor: pointer;\n`\n\nexport default StyledPill\n","import React, { forwardRef } from \"react\"\nimport { TextMicro } from \"@netdata/netdata-ui\"\nimport StyledPill from \"./styled\"\n\nconst Pill = forwardRef(({ children, background, color, hollow, ...rest }, ref) => (\n \n \n {children}\n \n \n))\n\nexport default Pill\n","import React, { useMemo } from \"react\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectActiveAlarms } from \"domains/global/selectors\"\nimport Item from \"./item\"\nimport Pill from \"./pill\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst pillProps = {\n \"data-toggle\": \"modal\",\n \"data-target\": \"#alarmsModal\",\n}\n\nconst Alarms = () => {\n const activeAlarms = useSelector(selectActiveAlarms)\n\n const alarms = useMemo(() => (activeAlarms ? Object.values(activeAlarms.alarms) : []), [\n activeAlarms,\n ])\n\n const { critical, warning } = useMemo(\n () =>\n alarms.reduce(\n (acc, { status }) => {\n if (status === \"CRITICAL\") acc.critical = acc.critical + 1\n if (status === \"WARNING\") acc.warning = acc.warning + 1\n return acc\n },\n { critical: 0, warning: 0 }\n ),\n [alarms]\n )\n\n return (\n \n 1 ? \"s\" : \"\"}`\n : \"No critical alerts\"\n }\n align=\"bottom\"\n plain\n >\n \n {critical}\n \n \n 1 ? \"s\" : \"\"}` : \"No warning alerts\"\n }\n align=\"bottom\"\n plain\n >\n \n {warning}\n \n \n \n )\n}\n\nexport default Alarms\n","import React from \"react\"\nimport { Button, News as AgentNews } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst News = () => {\n return (\n \n {({ toggle, upToDate }) => (\n \n \n \n )}\n \n )\n}\n\nexport default News\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Dropdown = styled(Flex).attrs({\n column: true,\n padding: [2],\n background: \"dropdown\",\n round: 1,\n overflow: { vertical: \"auto\" },\n margin: [2, 0, 0],\n width: 80,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\n\nexport default Dropdown\n","import styled from \"styled-components\"\nimport { TextInput } from \"@netdata/netdata-ui\"\n\nconst SearchInput = styled(TextInput)`\n & input {\n background: transparent;\n }\n\n & > label {\n margin-bottom: 0;\n }\n`\nexport default SearchInput\n","import React, { forwardRef } from \"react\"\nimport SearchInput from \"./searchInput\"\n\nconst Search = forwardRef(({ value, onChange }, ref) => (\n \n))\n\nexport default Search\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Container = styled(Flex).attrs({\n column: true,\n padding: [2, 0, 0],\n overflow: { vertical: \"auto\" },\n height: { max: \"320px\" },\n})``\n\nexport default Container\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Wrapper = styled(Flex).attrs({\n justifyContent: \"between\",\n alignItems: \"center\",\n width: \"100%\",\n gap: 2,\n})``\n\nexport default Wrapper\n","import React, { useCallback } from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport Wrapper from \"./wrapper\"\n\nconst OffsetItem = ({ name, offset, utc, onSelect }) => {\n const onClick = useCallback(() => onSelect(utc), [utc, onSelect])\n\n return (\n \n {name}\n \n UTC {offset}\n \n \n )\n}\n\nexport default OffsetItem\n","export const timezones = [\n {\n value: \"Dateline Standard Time\",\n abbr: \"DST\",\n text: \"International Date Line West\",\n utc: [\"Etc/GMT+12\"],\n },\n {\n value: \"UTC-11\",\n abbr: \"U\",\n text: \"Coordinated Universal Time-11\",\n utc: [\"Etc/GMT+11\", \"Pacific/Midway\", \"Pacific/Niue\", \"Pacific/Pago_Pago\"],\n },\n {\n value: \"Hawaiian Standard Time\",\n abbr: \"HST\",\n text: \"Hawaii\",\n utc: [\n \"Etc/GMT+10\",\n \"Pacific/Honolulu\",\n \"Pacific/Johnston\",\n \"Pacific/Rarotonga\",\n \"Pacific/Tahiti\",\n ],\n },\n {\n value: \"Alaskan Standard Time\",\n abbr: \"AKDT\",\n text: \"Alaska\",\n utc: [\n \"America/Anchorage\",\n \"America/Juneau\",\n \"America/Nome\",\n \"America/Sitka\",\n \"America/Yakutat\",\n ],\n },\n {\n value: \"Pacific Standard Time (Mexico)\",\n abbr: \"PDT\",\n text: \"Baja California\",\n utc: [\"America/Santa_Isabel\"],\n },\n {\n value: \"Pacific Standard Time\",\n abbr: \"PST\",\n text: \"Pacific Time (US & Canada)\",\n utc: [\n \"America/Dawson\",\n \"America/Los_Angeles\",\n \"America/Tijuana\",\n \"America/Vancouver\",\n \"America/Whitehorse\",\n \"PST8PDT\",\n ],\n },\n {\n value: \"US Mountain Standard Time\",\n abbr: \"UMST\",\n text: \"Arizona\",\n utc: [\n \"America/Creston\",\n \"America/Dawson_Creek\",\n \"America/Hermosillo\",\n \"America/Phoenix\",\n \"Etc/GMT+7\",\n ],\n },\n {\n value: \"Mountain Standard Time (Mexico)\",\n abbr: \"MDT\",\n text: \"Chihuahua, La Paz, Mazatlan\",\n utc: [\"America/Chihuahua\", \"America/Mazatlan\"],\n },\n {\n value: \"Mountain Standard Time\",\n abbr: \"MDT\",\n text: \"Mountain Time (US & Canada)\",\n utc: [\n \"America/Boise\",\n \"America/Cambridge_Bay\",\n \"America/Denver\",\n \"America/Edmonton\",\n \"America/Inuvik\",\n \"America/Ojinaga\",\n \"America/Yellowknife\",\n \"MST7MDT\",\n ],\n },\n {\n value: \"Central America Standard Time\",\n abbr: \"CAST\",\n text: \"Central America\",\n utc: [\n \"America/Belize\",\n \"America/Costa_Rica\",\n \"America/El_Salvador\",\n \"America/Guatemala\",\n \"America/Managua\",\n \"America/Tegucigalpa\",\n \"Etc/GMT+6\",\n \"Pacific/Galapagos\",\n ],\n },\n {\n value: \"Central Standard Time\",\n abbr: \"CDT\",\n text: \"Central Time (US & Canada)\",\n utc: [\n \"America/Chicago\",\n \"America/Indiana/Knox\",\n \"America/Indiana/Tell_City\",\n \"America/Matamoros\",\n \"America/Menominee\",\n \"America/North_Dakota/Beulah\",\n \"America/North_Dakota/Center\",\n \"America/North_Dakota/New_Salem\",\n \"America/Rainy_River\",\n \"America/Rankin_Inlet\",\n \"America/Resolute\",\n \"America/Winnipeg\",\n \"CST6CDT\",\n ],\n },\n {\n value: \"Central Standard Time (Mexico)\",\n abbr: \"CDT\",\n text: \"Guadalajara, Mexico City, Monterrey\",\n utc: [\n \"America/Bahia_Banderas\",\n \"America/Cancun\",\n \"America/Merida\",\n \"America/Mexico_City\",\n \"America/Monterrey\",\n ],\n },\n {\n value: \"Canada Central Standard Time\",\n abbr: \"CCST\",\n text: \"Saskatchewan\",\n utc: [\"America/Regina\", \"America/Swift_Current\"],\n },\n {\n value: \"SA Pacific Standard Time\",\n abbr: \"SPST\",\n text: \"Bogota, Lima, Quito\",\n utc: [\n \"America/Bogota\",\n \"America/Cayman\",\n \"America/Coral_Harbour\",\n \"America/Eirunepe\",\n \"America/Guayaquil\",\n \"America/Jamaica\",\n \"America/Lima\",\n \"America/Panama\",\n \"America/Rio_Branco\",\n \"Etc/GMT+5\",\n ],\n },\n {\n value: \"Eastern Standard Time\",\n abbr: \"EDT\",\n text: \"Eastern Time (US & Canada)\",\n utc: [\n \"America/Detroit\",\n \"America/Havana\",\n \"America/Indiana/Petersburg\",\n \"America/Indiana/Vincennes\",\n \"America/Indiana/Winamac\",\n \"America/Iqaluit\",\n \"America/Kentucky/Monticello\",\n \"America/Louisville\",\n \"America/Montreal\",\n \"America/Nassau\",\n \"America/New_York\",\n \"America/Nipigon\",\n \"America/Pangnirtung\",\n \"America/Port-au-Prince\",\n \"America/Thunder_Bay\",\n \"America/Toronto\",\n \"EST5EDT\",\n ],\n },\n {\n value: \"US Eastern Standard Time\",\n abbr: \"UEDT\",\n text: \"Indiana (East)\",\n utc: [\"America/Indiana/Marengo\", \"America/Indiana/Vevay\", \"America/Indianapolis\"],\n },\n {\n value: \"Venezuela Standard Time\",\n abbr: \"VST\",\n text: \"Caracas\",\n utc: [\"America/Caracas\"],\n },\n {\n value: \"Paraguay Standard Time\",\n abbr: \"PYT\",\n text: \"Asuncion\",\n utc: [\"America/Asuncion\"],\n },\n {\n value: \"Atlantic Standard Time\",\n abbr: \"ADT\",\n text: \"Atlantic Time (Canada)\",\n utc: [\n \"America/Glace_Bay\",\n \"America/Goose_Bay\",\n \"America/Halifax\",\n \"America/Moncton\",\n \"America/Thule\",\n \"Atlantic/Bermuda\",\n ],\n },\n {\n value: \"Central Brazilian Standard Time\",\n abbr: \"CBST\",\n text: \"Cuiaba\",\n utc: [\"America/Campo_Grande\", \"America/Cuiaba\"],\n },\n {\n value: \"SA Western Standard Time\",\n abbr: \"SWST\",\n text: \"Georgetown, La Paz, Manaus, San Juan\",\n utc: [\n \"America/Anguilla\",\n \"America/Antigua\",\n \"America/Aruba\",\n \"America/Barbados\",\n \"America/Blanc-Sablon\",\n \"America/Boa_Vista\",\n \"America/Curacao\",\n \"America/Dominica\",\n \"America/Grand_Turk\",\n \"America/Grenada\",\n \"America/Guadeloupe\",\n \"America/Guyana\",\n \"America/Kralendijk\",\n \"America/La_Paz\",\n \"America/Lower_Princes\",\n \"America/Manaus\",\n \"America/Marigot\",\n \"America/Martinique\",\n \"America/Montserrat\",\n \"America/Port_of_Spain\",\n \"America/Porto_Velho\",\n \"America/Puerto_Rico\",\n \"America/Santo_Domingo\",\n \"America/St_Barthelemy\",\n \"America/St_Kitts\",\n \"America/St_Lucia\",\n \"America/St_Thomas\",\n \"America/St_Vincent\",\n \"America/Tortola\",\n \"Etc/GMT+4\",\n ],\n },\n {\n value: \"Pacific SA Standard Time\",\n abbr: \"PSST\",\n text: \"Santiago\",\n utc: [\"America/Santiago\", \"Antarctica/Palmer\"],\n },\n {\n value: \"Newfoundland Standard Time\",\n abbr: \"NDT\",\n text: \"Newfoundland\",\n utc: [\"America/St_Johns\"],\n },\n {\n value: \"E. South America Standard Time\",\n abbr: \"ESAST\",\n text: \"Brasilia\",\n utc: [\"America/Sao_Paulo\"],\n },\n {\n value: \"Argentina Standard Time\",\n abbr: \"AST\",\n text: \"Buenos Aires\",\n utc: [\n \"America/Argentina/La_Rioja\",\n \"America/Argentina/Rio_Gallegos\",\n \"America/Argentina/Salta\",\n \"America/Argentina/San_Juan\",\n \"America/Argentina/San_Luis\",\n \"America/Argentina/Tucuman\",\n \"America/Argentina/Ushuaia\",\n \"America/Buenos_Aires\",\n \"America/Catamarca\",\n \"America/Cordoba\",\n \"America/Jujuy\",\n \"America/Mendoza\",\n ],\n },\n {\n value: \"SA Eastern Standard Time\",\n abbr: \"SEST\",\n text: \"Cayenne, Fortaleza\",\n utc: [\n \"America/Araguaina\",\n \"America/Belem\",\n \"America/Cayenne\",\n \"America/Fortaleza\",\n \"America/Maceio\",\n \"America/Paramaribo\",\n \"America/Recife\",\n \"America/Santarem\",\n \"Antarctica/Rothera\",\n \"Atlantic/Stanley\",\n \"Etc/GMT+3\",\n ],\n },\n {\n value: \"Greenland Standard Time\",\n abbr: \"GDT\",\n text: \"Greenland\",\n utc: [\"America/Godthab\"],\n },\n {\n value: \"Montevideo Standard Time\",\n abbr: \"MST\",\n text: \"Montevideo\",\n utc: [\"America/Montevideo\"],\n },\n {\n value: \"Bahia Standard Time\",\n abbr: \"BST\",\n text: \"Salvador\",\n utc: [\"America/Bahia\"],\n },\n {\n value: \"UTC-02\",\n abbr: \"U\",\n text: \"Coordinated Universal Time-02\",\n utc: [\"America/Noronha\", \"Atlantic/South_Georgia\", \"Etc/GMT+2\"],\n },\n {\n value: \"Mid-Atlantic Standard Time\",\n abbr: \"MDT\",\n text: \"Mid-Atlantic - Old\",\n utc: [],\n },\n {\n value: \"Azores Standard Time\",\n abbr: \"ADT\",\n text: \"Azores\",\n utc: [\"America/Scoresbysund\", \"Atlantic/Azores\"],\n },\n {\n value: \"Cape Verde Standard Time\",\n abbr: \"CVST\",\n text: \"Cape Verde Is.\",\n utc: [\"Atlantic/Cape_Verde\", \"Etc/GMT+1\"],\n },\n {\n value: \"Morocco Standard Time\",\n abbr: \"MDT\",\n text: \"Casablanca\",\n utc: [\"Africa/Casablanca\", \"Africa/El_Aaiun\"],\n },\n {\n value: \"UTC\",\n abbr: \"UTC\",\n text: \"Coordinated Universal Time\",\n utc: [\"America/Danmarkshavn\", \"Etc/GMT\"],\n },\n {\n value: \"GMT Standard Time\",\n abbr: \"GMT\",\n text: \"Edinburgh, London\",\n utc: [\"Europe/Isle_of_Man\", \"Europe/Guernsey\", \"Europe/Jersey\", \"Europe/London\"],\n },\n {\n value: \"GMT Standard Time\",\n abbr: \"GDT\",\n text: \"Dublin, Lisbon\",\n utc: [\n \"Atlantic/Canary\",\n \"Atlantic/Faeroe\",\n \"Atlantic/Madeira\",\n \"Europe/Dublin\",\n \"Europe/Lisbon\",\n ],\n },\n {\n value: \"Greenwich Standard Time\",\n abbr: \"GST\",\n text: \"Monrovia, Reykjavik\",\n utc: [\n \"Africa/Abidjan\",\n \"Africa/Accra\",\n \"Africa/Bamako\",\n \"Africa/Banjul\",\n \"Africa/Bissau\",\n \"Africa/Conakry\",\n \"Africa/Dakar\",\n \"Africa/Freetown\",\n \"Africa/Lome\",\n \"Africa/Monrovia\",\n \"Africa/Nouakchott\",\n \"Africa/Ouagadougou\",\n \"Africa/Sao_Tome\",\n \"Atlantic/Reykjavik\",\n \"Atlantic/St_Helena\",\n ],\n },\n {\n value: \"W. Europe Standard Time\",\n abbr: \"WEDT\",\n text: \"Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna\",\n utc: [\n \"Arctic/Longyearbyen\",\n \"Europe/Amsterdam\",\n \"Europe/Andorra\",\n \"Europe/Berlin\",\n \"Europe/Busingen\",\n \"Europe/Gibraltar\",\n \"Europe/Luxembourg\",\n \"Europe/Malta\",\n \"Europe/Monaco\",\n \"Europe/Oslo\",\n \"Europe/Rome\",\n \"Europe/San_Marino\",\n \"Europe/Stockholm\",\n \"Europe/Vaduz\",\n \"Europe/Vatican\",\n \"Europe/Vienna\",\n \"Europe/Zurich\",\n ],\n },\n {\n value: \"Central Europe Standard Time\",\n abbr: \"CEDT\",\n text: \"Belgrade, Bratislava, Budapest, Ljubljana, Prague\",\n utc: [\n \"Europe/Belgrade\",\n \"Europe/Bratislava\",\n \"Europe/Budapest\",\n \"Europe/Ljubljana\",\n \"Europe/Podgorica\",\n \"Europe/Prague\",\n \"Europe/Tirane\",\n ],\n },\n {\n value: \"Romance Standard Time\",\n abbr: \"RDT\",\n text: \"Brussels, Copenhagen, Madrid, Paris\",\n utc: [\"Africa/Ceuta\", \"Europe/Brussels\", \"Europe/Copenhagen\", \"Europe/Madrid\", \"Europe/Paris\"],\n },\n {\n value: \"Central European Standard Time\",\n abbr: \"CEDT\",\n text: \"Sarajevo, Skopje, Warsaw, Zagreb\",\n utc: [\"Europe/Sarajevo\", \"Europe/Skopje\", \"Europe/Warsaw\", \"Europe/Zagreb\"],\n },\n {\n value: \"W. Central Africa Standard Time\",\n abbr: \"WCAST\",\n text: \"West Central Africa\",\n utc: [\n \"Africa/Algiers\",\n \"Africa/Bangui\",\n \"Africa/Brazzaville\",\n \"Africa/Douala\",\n \"Africa/Kinshasa\",\n \"Africa/Lagos\",\n \"Africa/Libreville\",\n \"Africa/Luanda\",\n \"Africa/Malabo\",\n \"Africa/Ndjamena\",\n \"Africa/Niamey\",\n \"Africa/Porto-Novo\",\n \"Africa/Tunis\",\n \"Etc/GMT-1\",\n ],\n },\n {\n value: \"Namibia Standard Time\",\n abbr: \"NST\",\n text: \"Windhoek\",\n utc: [\"Africa/Windhoek\"],\n },\n {\n value: \"GTB Standard Time\",\n abbr: \"GDT\",\n text: \"Athens, Bucharest\",\n utc: [\"Asia/Nicosia\", \"Europe/Athens\", \"Europe/Bucharest\", \"Europe/Chisinau\"],\n },\n {\n value: \"Middle East Standard Time\",\n abbr: \"MEDT\",\n text: \"Beirut\",\n utc: [\"Asia/Beirut\"],\n },\n {\n value: \"Egypt Standard Time\",\n abbr: \"EST\",\n text: \"Cairo\",\n utc: [\"Africa/Cairo\"],\n },\n {\n value: \"Syria Standard Time\",\n abbr: \"SDT\",\n text: \"Damascus\",\n utc: [\"Asia/Damascus\"],\n },\n {\n value: \"E. Europe Standard Time\",\n abbr: \"EEDT\",\n text: \"E. Europe\",\n utc: [\n \"Asia/Nicosia\",\n \"Europe/Athens\",\n \"Europe/Bucharest\",\n \"Europe/Chisinau\",\n \"Europe/Helsinki\",\n \"Europe/Kiev\",\n \"Europe/Mariehamn\",\n \"Europe/Nicosia\",\n \"Europe/Riga\",\n \"Europe/Sofia\",\n \"Europe/Tallinn\",\n \"Europe/Uzhgorod\",\n \"Europe/Vilnius\",\n \"Europe/Zaporozhye\",\n ],\n },\n {\n value: \"South Africa Standard Time\",\n abbr: \"SAST\",\n text: \"Harare, Pretoria\",\n utc: [\n \"Africa/Blantyre\",\n \"Africa/Bujumbura\",\n \"Africa/Gaborone\",\n \"Africa/Harare\",\n \"Africa/Johannesburg\",\n \"Africa/Kigali\",\n \"Africa/Lubumbashi\",\n \"Africa/Lusaka\",\n \"Africa/Maputo\",\n \"Africa/Maseru\",\n \"Africa/Mbabane\",\n \"Etc/GMT-2\",\n ],\n },\n {\n value: \"FLE Standard Time\",\n abbr: \"FDT\",\n text: \"Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius\",\n utc: [\n \"Europe/Helsinki\",\n \"Europe/Kiev\",\n \"Europe/Mariehamn\",\n \"Europe/Riga\",\n \"Europe/Sofia\",\n \"Europe/Tallinn\",\n \"Europe/Uzhgorod\",\n \"Europe/Vilnius\",\n \"Europe/Zaporozhye\",\n ],\n },\n {\n value: \"Turkey Standard Time\",\n abbr: \"TDT\",\n text: \"Istanbul\",\n utc: [\"Europe/Istanbul\"],\n },\n {\n value: \"Israel Standard Time\",\n abbr: \"JDT\",\n text: \"Jerusalem\",\n utc: [\"Asia/Jerusalem\"],\n },\n {\n value: \"Libya Standard Time\",\n abbr: \"LST\",\n text: \"Tripoli\",\n utc: [\"Africa/Tripoli\"],\n },\n {\n value: \"Jordan Standard Time\",\n abbr: \"JST\",\n text: \"Amman\",\n utc: [\"Asia/Amman\"],\n },\n {\n value: \"Arabic Standard Time\",\n abbr: \"AST\",\n text: \"Baghdad\",\n utc: [\"Asia/Baghdad\"],\n },\n {\n value: \"Kaliningrad Standard Time\",\n abbr: \"KST\",\n text: \"Kaliningrad\",\n utc: [\"Europe/Kaliningrad\"],\n },\n {\n value: \"Arab Standard Time\",\n abbr: \"AST\",\n text: \"Kuwait, Riyadh\",\n utc: [\"Asia/Aden\", \"Asia/Bahrain\", \"Asia/Kuwait\", \"Asia/Qatar\", \"Asia/Riyadh\"],\n },\n {\n value: \"E. Africa Standard Time\",\n abbr: \"EAST\",\n text: \"Nairobi\",\n utc: [\n \"Africa/Addis_Ababa\",\n \"Africa/Asmera\",\n \"Africa/Dar_es_Salaam\",\n \"Africa/Djibouti\",\n \"Africa/Juba\",\n \"Africa/Kampala\",\n \"Africa/Khartoum\",\n \"Africa/Mogadishu\",\n \"Africa/Nairobi\",\n \"Antarctica/Syowa\",\n \"Etc/GMT-3\",\n \"Indian/Antananarivo\",\n \"Indian/Comoro\",\n \"Indian/Mayotte\",\n ],\n },\n {\n value: \"Moscow Standard Time\",\n abbr: \"MSK\",\n text: \"Moscow, St. Petersburg, Volgograd, Minsk\",\n utc: [\"Europe/Kirov\", \"Europe/Moscow\", \"Europe/Simferopol\", \"Europe/Volgograd\", \"Europe/Minsk\"],\n },\n {\n value: \"Samara Time\",\n abbr: \"SAMT\",\n text: \"Samara, Ulyanovsk, Saratov\",\n utc: [\"Europe/Astrakhan\", \"Europe/Samara\", \"Europe/Ulyanovsk\"],\n },\n {\n value: \"Iran Standard Time\",\n abbr: \"IDT\",\n text: \"Tehran\",\n utc: [\"Asia/Tehran\"],\n },\n {\n value: \"Arabian Standard Time\",\n abbr: \"AST\",\n text: \"Abu Dhabi, Muscat\",\n utc: [\"Asia/Dubai\", \"Asia/Muscat\", \"Etc/GMT-4\"],\n },\n {\n value: \"Azerbaijan Standard Time\",\n abbr: \"ADT\",\n text: \"Baku\",\n utc: [\"Asia/Baku\"],\n },\n {\n value: \"Mauritius Standard Time\",\n abbr: \"MST\",\n text: \"Port Louis\",\n utc: [\"Indian/Mahe\", \"Indian/Mauritius\", \"Indian/Reunion\"],\n },\n {\n value: \"Georgian Standard Time\",\n abbr: \"GET\",\n text: \"Tbilisi\",\n utc: [\"Asia/Tbilisi\"],\n },\n {\n value: \"Caucasus Standard Time\",\n abbr: \"CST\",\n text: \"Yerevan\",\n utc: [\"Asia/Yerevan\"],\n },\n {\n value: \"Afghanistan Standard Time\",\n abbr: \"AST\",\n text: \"Kabul\",\n utc: [\"Asia/Kabul\"],\n },\n {\n value: \"West Asia Standard Time\",\n abbr: \"WAST\",\n text: \"Ashgabat, Tashkent\",\n utc: [\n \"Antarctica/Mawson\",\n \"Asia/Aqtau\",\n \"Asia/Aqtobe\",\n \"Asia/Ashgabat\",\n \"Asia/Dushanbe\",\n \"Asia/Oral\",\n \"Asia/Samarkand\",\n \"Asia/Tashkent\",\n \"Etc/GMT-5\",\n \"Indian/Kerguelen\",\n \"Indian/Maldives\",\n ],\n },\n {\n value: \"Yekaterinburg Time\",\n abbr: \"YEKT\",\n text: \"Yekaterinburg\",\n utc: [\"Asia/Yekaterinburg\"],\n },\n {\n value: \"Pakistan Standard Time\",\n abbr: \"PKT\",\n text: \"Islamabad, Karachi\",\n utc: [\"Asia/Karachi\"],\n },\n {\n value: \"India Standard Time\",\n abbr: \"IST\",\n text: \"Chennai, Kolkata, Mumbai, New Delhi\",\n utc: [\"Asia/Kolkata\"],\n },\n {\n value: \"Sri Lanka Standard Time\",\n abbr: \"SLST\",\n text: \"Sri Jayawardenepura\",\n utc: [\"Asia/Colombo\"],\n },\n {\n value: \"Nepal Standard Time\",\n abbr: \"NST\",\n text: \"Kathmandu\",\n utc: [\"Asia/Kathmandu\"],\n },\n {\n value: \"Central Asia Standard Time\",\n abbr: \"CAST\",\n text: \"Nur-Sultan (Astana)\",\n utc: [\n \"Antarctica/Vostok\",\n \"Asia/Almaty\",\n \"Asia/Bishkek\",\n \"Asia/Qyzylorda\",\n \"Asia/Urumqi\",\n \"Etc/GMT-6\",\n \"Indian/Chagos\",\n ],\n },\n {\n value: \"Bangladesh Standard Time\",\n abbr: \"BST\",\n text: \"Dhaka\",\n utc: [\"Asia/Dhaka\", \"Asia/Thimphu\"],\n },\n {\n value: \"Myanmar Standard Time\",\n abbr: \"MST\",\n text: \"Yangon (Rangoon)\",\n utc: [\"Asia/Rangoon\", \"Indian/Cocos\"],\n },\n {\n value: \"SE Asia Standard Time\",\n abbr: \"SAST\",\n text: \"Bangkok, Hanoi, Jakarta\",\n utc: [\n \"Antarctica/Davis\",\n \"Asia/Bangkok\",\n \"Asia/Hovd\",\n \"Asia/Jakarta\",\n \"Asia/Phnom_Penh\",\n \"Asia/Pontianak\",\n \"Asia/Saigon\",\n \"Asia/Vientiane\",\n \"Etc/GMT-7\",\n \"Indian/Christmas\",\n ],\n },\n {\n value: \"N. Central Asia Standard Time\",\n abbr: \"NCAST\",\n text: \"Novosibirsk\",\n utc: [\"Asia/Novokuznetsk\", \"Asia/Novosibirsk\", \"Asia/Omsk\"],\n },\n {\n value: \"China Standard Time\",\n abbr: \"CST\",\n text: \"Beijing, Chongqing, Hong Kong, Urumqi\",\n utc: [\"Asia/Hong_Kong\", \"Asia/Macau\", \"Asia/Shanghai\"],\n },\n {\n value: \"North Asia Standard Time\",\n abbr: \"NAST\",\n text: \"Krasnoyarsk\",\n utc: [\"Asia/Krasnoyarsk\"],\n },\n {\n value: \"Singapore Standard Time\",\n abbr: \"MPST\",\n text: \"Kuala Lumpur, Singapore\",\n utc: [\n \"Asia/Brunei\",\n \"Asia/Kuala_Lumpur\",\n \"Asia/Kuching\",\n \"Asia/Makassar\",\n \"Asia/Manila\",\n \"Asia/Singapore\",\n \"Etc/GMT-8\",\n ],\n },\n {\n value: \"W. Australia Standard Time\",\n abbr: \"WAST\",\n text: \"Perth\",\n utc: [\"Antarctica/Casey\", \"Australia/Perth\"],\n },\n {\n value: \"Taipei Standard Time\",\n abbr: \"TST\",\n text: \"Taipei\",\n utc: [\"Asia/Taipei\"],\n },\n {\n value: \"Ulaanbaatar Standard Time\",\n abbr: \"UST\",\n text: \"Ulaanbaatar\",\n utc: [\"Asia/Choibalsan\", \"Asia/Ulaanbaatar\"],\n },\n {\n value: \"North Asia East Standard Time\",\n abbr: \"NAEST\",\n text: \"Irkutsk\",\n utc: [\"Asia/Irkutsk\"],\n },\n {\n value: \"Japan Standard Time\",\n abbr: \"JST\",\n text: \"Osaka, Sapporo, Tokyo\",\n utc: [\"Asia/Dili\", \"Asia/Jayapura\", \"Asia/Tokyo\", \"Etc/GMT-9\", \"Pacific/Palau\"],\n },\n {\n value: \"Korea Standard Time\",\n abbr: \"KST\",\n text: \"Seoul\",\n utc: [\"Asia/Pyongyang\", \"Asia/Seoul\"],\n },\n {\n value: \"Cen. Australia Standard Time\",\n abbr: \"CAST\",\n text: \"Adelaide\",\n utc: [\"Australia/Adelaide\", \"Australia/Broken_Hill\"],\n },\n {\n value: \"AUS Central Standard Time\",\n abbr: \"ACST\",\n text: \"Darwin\",\n utc: [\"Australia/Darwin\"],\n },\n {\n value: \"E. Australia Standard Time\",\n abbr: \"EAST\",\n text: \"Brisbane\",\n utc: [\"Australia/Brisbane\", \"Australia/Lindeman\"],\n },\n {\n value: \"AUS Eastern Standard Time\",\n abbr: \"AEST\",\n text: \"Canberra, Melbourne, Sydney\",\n utc: [\"Australia/Melbourne\", \"Australia/Sydney\"],\n },\n {\n value: \"West Pacific Standard Time\",\n abbr: \"WPST\",\n text: \"Guam, Port Moresby\",\n utc: [\n \"Antarctica/DumontDUrville\",\n \"Etc/GMT-10\",\n \"Pacific/Guam\",\n \"Pacific/Port_Moresby\",\n \"Pacific/Saipan\",\n \"Pacific/Truk\",\n ],\n },\n {\n value: \"Tasmania Standard Time\",\n abbr: \"TST\",\n text: \"Hobart\",\n utc: [\"Australia/Currie\", \"Australia/Hobart\"],\n },\n {\n value: \"Yakutsk Standard Time\",\n abbr: \"YST\",\n text: \"Yakutsk\",\n utc: [\"Asia/Chita\", \"Asia/Khandyga\", \"Asia/Yakutsk\"],\n },\n {\n value: \"Central Pacific Standard Time\",\n abbr: \"CPST\",\n text: \"Solomon Is., New Caledonia\",\n utc: [\n \"Antarctica/Macquarie\",\n \"Etc/GMT-11\",\n \"Pacific/Efate\",\n \"Pacific/Guadalcanal\",\n \"Pacific/Kosrae\",\n \"Pacific/Noumea\",\n \"Pacific/Ponape\",\n ],\n },\n {\n value: \"Vladivostok Standard Time\",\n abbr: \"VST\",\n text: \"Vladivostok\",\n utc: [\"Asia/Sakhalin\", \"Asia/Ust-Nera\", \"Asia/Vladivostok\"],\n },\n {\n value: \"New Zealand Standard Time\",\n abbr: \"NZST\",\n text: \"Auckland, Wellington\",\n utc: [\"Antarctica/McMurdo\", \"Pacific/Auckland\"],\n },\n {\n value: \"UTC+12\",\n abbr: \"U\",\n text: \"Coordinated Universal Time+12\",\n utc: [\n \"Etc/GMT-12\",\n \"Pacific/Funafuti\",\n \"Pacific/Kwajalein\",\n \"Pacific/Majuro\",\n \"Pacific/Nauru\",\n \"Pacific/Tarawa\",\n \"Pacific/Wake\",\n \"Pacific/Wallis\",\n ],\n },\n {\n value: \"Fiji Standard Time\",\n abbr: \"FST\",\n text: \"Fiji\",\n utc: [\"Pacific/Fiji\"],\n },\n {\n value: \"Magadan Standard Time\",\n abbr: \"MST\",\n text: \"Magadan\",\n utc: [\"Asia/Anadyr\", \"Asia/Kamchatka\", \"Asia/Magadan\", \"Asia/Srednekolymsk\"],\n },\n {\n value: \"Kamchatka Standard Time\",\n abbr: \"KDT\",\n text: \"Petropavlovsk-Kamchatsky - Old\",\n utc: [\"Asia/Kamchatka\"],\n },\n {\n value: \"Tonga Standard Time\",\n abbr: \"TST\",\n text: \"Nuku'alofa\",\n utc: [\"Etc/GMT-13\", \"Pacific/Enderbury\", \"Pacific/Fakaofo\", \"Pacific/Tongatapu\"],\n },\n {\n value: \"Samoa Standard Time\",\n abbr: \"SST\",\n text: \"Samoa\",\n utc: [\"Pacific/Apia\"],\n },\n]\n","import { timezones } from \"./timezones\"\n\nconst digitizeOffset = parsedOffset => {\n if (!parsedOffset) return \"+0\"\n const splitOffset = parsedOffset.split(\":\")\n return splitOffset.length > 1\n ? `${splitOffset[0]}${(splitOffset[1] / 60).toString().substr(1)}`\n : splitOffset[0]\n}\n\nconst normalizeOffset = parsedOffset => (parsedOffset ? parsedOffset.replace(\"−\", \"-\") : \"\")\n\nconst now = new Date()\nexport const timezoneList = () => {\n const memoized = {}\n return timezones.reduce((acc, timezone) => {\n const { utc } = timezone\n\n try {\n // We use 'fr' locale because it is the only one that returns back the UTC offset (dd/mm/yyyy, UTC-x) \n // so we can parse it later and digitize it.\n const dateString = new Intl.DateTimeFormat(\"fr\", {\n timeZone: utc[0],\n timeZoneName: \"short\",\n }).format(now)\n\n const [parsedOffset] = dateString.match(/[−+].+/) || []\n const normalizedOffset = normalizeOffset(parsedOffset)\n\n if (memoized[normalizedOffset])\n return acc.concat({ ...timezone, offset: memoized[normalizedOffset] })\n\n const digitizedOffset = digitizeOffset(normalizedOffset)\n\n memoized[normalizedOffset] = digitizedOffset\n return acc.concat({ ...timezone, offset: digitizedOffset })\n } catch (e) {\n return acc\n }\n }, [])\n}\n\nexport const timezonesById = timezones =>\n timezones.reduce((acc, { utc, ...timezone }) => {\n utc.forEach(item => (acc[item] = { ...timezone, utc: item }))\n return acc\n }, {})\n\nexport const getDefaultTimezone = () => {\n const dateFormat = new Intl.DateTimeFormat(\"default\", {})\n const usedOptions = dateFormat.resolvedOptions()\n return usedOptions\n}\n","import React, { useRef, useState, useEffect, useMemo, useCallback } from \"react\"\nimport { useToggle } from \"react-use\"\nimport { Drop, Flex, Text, Icon } from \"@netdata/netdata-ui\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { setOptionAction } from \"@/src/domains/global/actions\"\nimport { selectTimezoneSetting } from \"domains/global/selectors\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport Item from \"../item\"\nimport Dropdown from \"./dropdown\"\nimport Search from \"./search\"\nimport Container from \"./container\"\nimport Wrapper from \"./wrapper\"\nimport OffsetItem from \"./offsetItem\"\nimport { getDefaultTimezone, timezoneList, timezonesById } from \"./utils\"\nimport { getHashParams } from \"utils/hash-utils\"\n\nconst timezones = timezoneList().sort((a, b) => a.offset - b.offset)\nconst byId = timezonesById(timezones)\n\nconst getTimezone = (selectedTimezone, timezoneHash) => {\n const timezone = timezoneHash\n ? timezoneHash\n : selectedTimezone === \"default\"\n ? getDefaultTimezone().timeZone\n : selectedTimezone\n\n return byId[timezone in byId ? timezone : getDefaultTimezone().timeZone] || {}\n}\n\nconst Timezone = () => {\n const [value, setValue] = useState(\"\")\n const [isOpen, toggle] = useToggle()\n\n const ref = useRef()\n const inputRef = useRef()\n\n const { updateUtcParam } = window.urlOptions\n\n useEffect(() => {\n if (!inputRef.current || !isOpen) return\n inputRef.current.focus()\n }, [isOpen])\n\n const dispatch = useDispatch()\n const selectedTimezone = useSelector(selectTimezoneSetting)\n\n const selectedOffset = useMemo(() => {\n const { utc: timezoneHash = \"\" } = getHashParams()\n const { offset = \"\", utc = \"\" } = getTimezone(selectedTimezone, timezoneHash)\n\n if (timezoneHash !== utc) updateUtcParam(utc)\n if (selectedTimezone !== utc) dispatch(setOptionAction({ key: \"timezone\", value: utc }))\n\n dispatch(setOptionAction({ key: \"utcOffset\", value: parseFloat(offset) }))\n\n return offset\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [selectedTimezone])\n\n const zones = useMemo(() => {\n if (!value) return timezones\n return timezones.filter(\n ({ text, offset }) =>\n text.toUpperCase().includes(value.toUpperCase()) || offset.includes(value)\n )\n }, [value])\n\n const close = () => {\n toggle(false)\n setValue(\"\")\n }\n\n const onSelect = useCallback(utc => {\n updateUtcParam(utc)\n dispatch(setOptionAction({ key: \"timezone\", value: utc }))\n close()\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n const onChange = useCallback(e => setValue(e.target.value), [])\n\n return (\n \n \n \n \n UTC {selectedOffset}\n \n \n \n \n {ref.current && isOpen && (\n \n \n \n \n {zones.map(({ text, offset, utc }) => (\n \n ))}\n \n \n \n )}\n \n )\n}\n\nexport default Timezone\n","import React, { useCallback, useEffect, useState, useRef } from \"react\"\nimport styled from \"styled-components\"\nimport { useSelector, useDispatch } from \"store/redux-separate-context\"\nimport { useLocalStorage } from \"react-use\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { sendToChildIframe, useListenToPostMessage } from \"utils/post-message\"\nimport { getIframeSrc, NETDATA_REGISTRY_SERVER } from \"utils/utils\"\nimport useUserNodeAccessMessage from \"hooks/use-user-node-access\"\nimport { selectRegistry, selectCloudBaseUrl } from \"domains/global/selectors\"\nimport { LOCAL_STORAGE_NEEDS_SYNC } from \"domains/dashboard/sagas\"\nimport { setOfflineAction } from \"@/src/domains/dashboard/actions\"\nimport { SIGN_IN_IFRAME_ID } from \"components/header/constants\"\n\nconst IframeContainer = styled(Flex).attrs({ position: \"absolute\" })`\n display: none;\n`\nconst Iframe = ({ signedIn }) => {\n const [rendered, setRendered] = useState(false)\n const signInMsg = useRef()\n const ref = useRef()\n\n const [lsValue, , removeLsValue] = useLocalStorage(LOCAL_STORAGE_NEEDS_SYNC)\n const cloudBaseURL = useSelector(selectCloudBaseUrl)\n const registry = useSelector(selectRegistry)\n\n const dispatch = useDispatch()\n\n const { origin, pathname } = window.location\n const nameParam = encodeURIComponent(registry.hostname)\n const originParam = encodeURIComponent(origin + pathname)\n\n const signInIframeUrl = getIframeSrc(\n cloudBaseURL,\n `sign-in?id=${registry.machineGuid}&name=${nameParam}&origin=${originParam}`\n )\n\n useListenToPostMessage(\"hello-from-sign-in\", msg => {\n signInMsg.current = msg\n })\n\n useUserNodeAccessMessage()\n\n const onLoad = useCallback(() => {\n setRendered(true)\n setTimeout(() => dispatch(setOfflineAction({ offline: signInMsg.current === undefined })), 500)\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n useEffect(() => {\n const handler = e => {\n if (!e?.target) return\n if (e.target.src === signInIframeUrl && !rendered) onLoad()\n }\n\n window.addEventListener(\"DOMFrameContentLoaded\", handler)\n return () => window.removeEventListener(\"DOMFrameContentLoaded\", handler)\n }, [signInIframeUrl, rendered, onLoad])\n\n useEffect(() => {\n if (!signedIn || !ref.current) return\n if (!registry.registryServer || registry.registryServer === NETDATA_REGISTRY_SERVER) return\n if (!lsValue) return\n\n removeLsValue()\n\n const { registryMachinesArray } = registry\n if (registryMachinesArray && registryMachinesArray.length > 0) {\n sendToChildIframe(ref.current, {\n type: \"synced-private-registry\",\n payload: registryMachinesArray,\n })\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [signedIn, registry, lsValue])\n\n return (\n \n )\n}\n\nexport default Iframe\n","import { useCallback, useState } from \"react\"\nimport { useLocalStorage } from \"react-use\"\nimport { useDispatch } from \"react-redux\"\nimport { useListenToPostMessage } from \"@/src/utils/post-message\"\nimport { isSignedInAction } from \"@/src/domains/dashboard/actions\"\n\nconst useCheckSignInStatus = () => {\n const dispatch = useDispatch()\n const [value, setValue] = useLocalStorage(\"has-sign-in-history\")\n const [hasSignedInBefore, setHasSignedInBefore] = useState(value)\n\n const onMessage = useCallback(isNew => {\n if (isNew) {\n setHasSignedInBefore(isNew)\n setValue(isNew)\n }\n dispatch(isSignedInAction({ isSignedIn: isNew }))\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n const [signedIn] = useListenToPostMessage(\"is-signed-in\", onMessage)\n\n return [signedIn, hasSignedInBefore]\n}\n\nexport default useCheckSignInStatus\n","import React from \"react\"\nimport { useSelector } from \"react-redux\"\nimport { Button } from \"@netdata/netdata-ui\"\nimport SignInButton from \"components/auth/signIn\"\nimport SignInIframe from \"components/auth/signIn/iframe\"\nimport useCheckSignInStatus from \"components/auth/signIn/useCheckSignInStatus\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst SignIn = () => {\n const [signedIn] = useCheckSignInStatus()\n const cloudEnabled = useSelector(selectIsCloudEnabled)\n\n return (\n cloudEnabled && (\n \n
\n \n {!signedIn && (\n \n {({ isRegistry, link, offline, onSignIn }) => (\n \n )}\n \n )}\n
\n \n )\n )\n}\n\nexport default SignIn\n","import { Text, NavigationTab, Icon } from \"@netdata/netdata-ui\"\nimport React from \"react\"\n\nconst CloudTab = ({ label, active, showBorderLeft, icon, onActivate }) => {\n const handleOnActivate = () => {\n if (active) return\n if (onActivate) onActivate()\n }\n return (\n }\n fixed\n closable={false}\n showBorderLeft={showBorderLeft}\n active={active}\n >\n {label}\n \n )\n}\n\nexport default CloudTab\n","import React from \"react\"\nimport {\n Flex,\n Button,\n Box,\n Text,\n H3,\n H4,\n Modal,\n ModalContent,\n ModalBody,\n ModalHeader,\n ModalCloseButton,\n ModalFooter,\n Icon,\n} from \"@netdata/netdata-ui\"\n\nimport GoToCloud from \"components/auth/signIn\"\n\nexport const TITLE = \"Discover the free benefits of Netdata Cloud\"\n\nconst DiscoverCloudModal = ({ closeModal, text, header, handleGoToCloud, image, video }) => {\n return (\n \n \n \n \n \n

{TITLE}:

\n
\n\n \n
\n \n \n \n \n

{header}

\n \n \n {({ link }) => (\n \n Sign in to Netdata Cloud!\n \n }\n width=\"100%\"\n onClick={() => handleGoToCloud({ link })}\n as={Button}\n small\n data-ga={\"go-to-cloud-button\"}\n data-testid=\"cta1-button\"\n />\n )}\n \n \n
\n {text()}\n
\n {image && (\n \n \n \n )}\n {video && (\n \n \n \n \n \n )}\n
\n
\n \n
\n
\n )\n}\n\nexport default DiscoverCloudModal\n","import { Text } from \"@netdata/netdata-ui\"\nimport Anchor from \"@/src/components/anchor\"\nimport React from \"react\"\n\nconst TabsContentText = ({ children }) => {children}\n\nexport const TabsContent = {\n Home: {\n id: \"Home\",\n label: \"Home\",\n header: \"Home\",\n text: () => (\n \n The Home view in Netdata cloud provides summarized relevant information in an easily\n digestible display. You can see information about your nodes, data collection and retention\n stats, alerts, users and dashboards.\n \n ),\n icon: \"room_home\",\n image: \"images/home.png\",\n },\n nodeView: {\n id: \"nodeView\",\n label: \"Node View\",\n header: \"Node View\",\n text: () => (\n <>\n \n The single node view you are currently using will of course be available on Netdata Cloud\n as well. In addition, the charts and visualization on Netdata Cloud will be more flexible\n and powerful for troubleshooting than what is available on the agent.\n \n \n Netdata Cloud also comes with the Metric Correlations feature that lets you quickly find\n metrics and charts related to a particular window of interest that you want to explore\n further. By displaying the standard Netdata dashboard, filtered to show only charts that\n are relevant to the window of interest, you can get to the root cause sooner.\n \n \n ),\n icon: \"nodes_hollow\",\n image: \"images/nodeView.png\",\n },\n Overview: {\n id: \"Overview\",\n label: \"Overview\",\n header: \"Overview\",\n text: () => (\n <>\n \n The Overview tab is a great way to monitor your infrastructure using Netdata Cloud. While\n the interface might look similar to local dashboards served by an Agent, or even the\n single-node dashboards in Netdata Cloud, Overview uses composite charts. These charts\n display real-time aggregated metrics from all the nodes (or a filtered selection) in a\n given War Room.\n \n \n With Overview's composite charts, you can see your infrastructure from a single pane of\n glass, discover trends or anomalies, then drill down by grouping metrics by node and\n jumping to single-node dashboards for root cause analysis.\n \n \n Here's an example of a composite chart visualizing Disk I/O bandwidth from 5 different\n nodes in one chart.\n \n \n ),\n icon: \"room_overview\",\n image: \"images/overview.png\",\n },\n Nodes: {\n id: \"Nodes\",\n label: \"Nodes\",\n header: \"Nodes\",\n text: () => (\n \n The Nodes view in Netdata Cloud lets you see and customize key metrics from any number of\n Agent-monitored nodes and seamlessly navigate to any node's dashboard for troubleshooting\n performance issues or anomalies using Netdata's highly-granular metrics.\n \n ),\n icon: \"nodes_hollow\",\n image: \"images/nodes.jpg\",\n },\n Dashboards: {\n id: \"Dashboards\",\n label: \"Dashboards\",\n header: \"Dashboards\",\n text: () => (\n \n With Netdata Cloud, you can build new dashboards that target your infrastructure's unique\n needs. Put key metrics from any number of distributed systems in one place for a bird's eye\n view of your infrastructure.\n \n ),\n icon: \"dashboard\",\n image: \"images/dashboards.png\",\n },\n Alerts: {\n id: \"Alerts\",\n label: \"Alerts\",\n header: \"Alerts\",\n text: () => (\n \n The Alerts view gives you a high level of availability and performance information for every\n node you're monitoring with Netdata Cloud. It also offers an easy way to drill down into any\n particular alert by taking the user to the dedicated alert view from where the user can run\n metrics correlation or take further troubleshooting steps.\n \n ),\n icon: \"alarm\",\n image: \"images/alerts.jpg\",\n },\n Anomalies: {\n id: \"Anomalies\",\n label: \"Anomalies\",\n header: \"Anomaies\",\n text: () => (\n \n The Anomalies view on Netdata Cloud lets you quickly surface potentially anomalous metrics\n and charts related to a particular highlight window of interest using Anomaly Advisor.\n Anomalies are detected using per metric unsupervised machine learning running at the edge!\n \n ),\n icon: \"anomaliesLens\",\n video:\n \"https://user-images.githubusercontent.com/24860547/165943403-1acb9759-7446-4704-8955-c566d04ad7ab.mp4\",\n },\n Pricing: {\n id: \"Pricing\",\n label: \"Pricing\",\n header: \"Pricing\",\n text: () => (\n \n Netdata Cloud’s distributed architecture—with processing occurring at the individual\n nodes—enables us to add any number of users at marginal cost. Couple this with our upcoming\n paid plan with added functionality for enterprise customers, and it means we can commit to\n providing our current functionality for free, always.\n \n ),\n image: \"images/pricing.png\",\n icon: \"pricing\",\n },\n Privacy: {\n id: \"Privacy\",\n label: \"Privacy\",\n header: \"Privacy\",\n text: () => (\n <>\n \n Data privacy is very important to us. We firmly believe that your data belongs to you.\n This is why we don't store any metric data in Netdata Cloud.\n \n \n Your local installations of the Netdata Agent form the basis for the Netdata Cloud. All\n the data that you see in the web browser when using Netdata Cloud, is actually streamed\n directly from the Netdata Agent to the Netdata Cloud dashboard. The data passes through\n our systems, but it isn't stored. You can learn more about{\" \"}\n \n the Agent's security design\n {\" \"}\n design in the Agent documentation.\n \n \n However, to be able to offer the stunning visualizations and advanced functionality of\n Netdata Cloud, it does store a limited number of metadata. This metadata is ONLY available\n to Netdata and NEVER to any 3rd parties. You can learn more about what metadata is stored\n in Netdata cloud in our\n \n {\" \"}\n documentation\n \n \n \n ),\n icon: \"privacy\",\n },\n}\n","import React from \"react\"\nimport {\n Text,\n Drop,\n ModalContent,\n ModalBody,\n ModalHeader,\n ModalCloseButton,\n ModalFooter,\n Flex,\n Button,\n Box,\n H3,\n H4,\n Icon,\n} from \"@netdata/netdata-ui\"\n\nimport GoToCloud from \"components/auth/signIn\"\n\nexport const TITLE = \"Discover the free benefits of Netdata Cloud\"\n\nconst DiscoverCloudDrop = ({\n parentRef,\n isDropdownOpen,\n closeDropdown,\n text,\n header,\n handleGoToCloud,\n image,\n video,\n}) => {\n if (parentRef.current && isDropdownOpen)\n return (\n \n \n \n \n \n

{TITLE}:

\n
\n\n \n
\n \n \n \n \n

{header}

\n \n \n {({ link }) => (\n \n Sign in to Netdata Cloud!\n \n }\n width=\"100%\"\n onClick={() => handleGoToCloud({ link })}\n data-testid=\"cta1-button\"\n as={Button}\n small\n data-ga={\"go-to-cloud-button\"}\n />\n )}\n \n \n
\n {text()}\n
\n {image && (\n \n \n \n )}\n {video && (\n \n \n \n \n \n )}\n
\n
\n \n
\n \n )\n\n return null\n}\n\nexport default DiscoverCloudDrop\n","import React, { useState, useRef } from \"react\"\nimport { Text, Flex, NavigationTabs } from \"@netdata/netdata-ui\"\n\nimport CloudTab from \"./cloudTab\"\nimport { TITLE } from \"./discoverCloudModal\"\n\nimport { callAll } from \"@/src/utils/utils\"\nimport { TabsContent } from \"./contents\"\n\nimport DiscoverCloudDrop from \"./discoverCloudDrop\"\n\nconst Wrapper = Flex\nconst InnerPositioner = Flex\n\nconst DiscoverCloud = () => {\n const [isModalOpen, setIsModalOpen] = useState(false)\n const [selectedModalContent, setSelectedModalContent] = useState(null)\n const dropDownParentRef = useRef()\n\n const handleOpenModal = () => {\n setIsModalOpen(true)\n }\n\n const handleCloseModal = () => {\n setIsModalOpen(false)\n }\n\n const handleGoToCloud = ({ link }) => {\n window.location.href = link\n }\n\n const handleSetModalContent = content => {\n setSelectedModalContent(content)\n }\n const handleResetModalContent = () => {\n setSelectedModalContent(null)\n }\n\n return (\n \n \n {TITLE}:\n \n \n {Object.keys(TabsContent).map((key, index) => {\n const { label, icon, id } = TabsContent[key]\n const selectedContentId = selectedModalContent ? selectedModalContent.id : null\n return (\n \n handleSetModalContent(TabsContent[key])\n )}\n />\n )\n })}\n \n \n \n \n {/* {isModalOpen && selectedModalContent && (\n \n )} */}\n \n )\n}\n\nexport default DiscoverCloud\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, Box } from \"@netdata/netdata-ui\"\nimport Node from \"./node\"\nimport Options from \"./options\"\nimport Version from \"./version\"\nimport GlobalControls from \"./globalControls\"\nimport Alarms from \"./alarms\"\nimport News from \"./news\"\nimport Timezone from \"./timezone\"\nimport SignIn from \"./signIn\"\nimport { CloudConnectionStatus } from \"./ACLK\"\nimport { DiscoverCloud } from \"@/src/components/discover-cloud\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\nimport { useSelector } from \"react-redux\"\n\nconst Wrapper = styled(Flex).attrs({\n as: \"header\",\n position: \"relative\",\n justifyContent: \"between\",\n background: \"panel\",\n zIndex: 20,\n width: \"100%\",\n padding: [2, 4, 2, 4],\n})`\n pointer-events: all;\n`\n\nconst Header = () => {\nconst cloudEnabled = useSelector(selectIsCloudEnabled)\n\n return \n \n \n \n \n \n \n \n \n \n \n \n \n \n {cloudEnabled&& \n \n } \n\n}\n\n\n\nexport default Header\n","import styled from \"styled-components\"\nimport { Button } from \"@netdata/netdata-ui\"\n\nconst ExpandButton = styled(Button)`\n&& {\n > .button-icon {\n width: 6px;\n height: 9px;\n }\n`\nexport default ExpandButton\n","import React from \"react\"\nimport { useSelector } from \"react-redux\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { selectCloudBaseUrl } from \"domains/global/selectors\"\nimport { getIframeSrc } from \"@/src/utils\"\n\n\nconst SignOut = ({ flavour = \"default\", ...rest }) => {\n const cloudBaseURL = useSelector(selectCloudBaseUrl)\n\n return (\n \n )\n}\n\nexport default SignOut\n","import React, { useMemo } from \"react\"\nimport { ThemeProvider } from \"styled-components\"\nimport { createSelector } from \"reselect\"\nimport { useToggle } from \"react-use\"\nimport { Flex, Button, DarkTheme, Text, Layer } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { MenuItem } from \"components/menus\"\nimport SignOut from \"components/auth/signOut\"\nimport SignIn from \"components/auth/signIn\"\n\nconst SignInItem = () => {\n const onClick = (e, link) => {\n e.stopPropagation()\n window.location.href = link\n }\n return (\n \n {({ isRegistry, link, onSignIn }) => (\n onClick(e, link) : onSignIn}>Sign in\n )}\n \n )\n}\n\nconst isSignedInSelector = createSelector(\n ({ dashboard }) => dashboard,\n ({ isSignedIn }) => isSignedIn\n)\n\nconst UserSettings = () => {\n const [isOpen, toggle] = useToggle()\n const signedIn = useSelector(isSignedInSelector)\n\n const menuItems = useMemo(\n () => [\n ...(signedIn\n ? [\n {\n children: \"Operational Status\",\n onClick: () =>\n window.open(\"https://status.netdata.cloud\", \"_blank\", \"noopener,noreferrer\"),\n },\n ]\n : []),\n ...(signedIn ? [{ separator: true }] : []),\n ...(signedIn\n ? [\n {\n children: (\n \n ),\n },\n ]\n : [{ children: }]),\n ],\n [signedIn]\n )\n\n return (\n \n \n {isOpen && (\n \n \n {menuItems.map((item, i) => {\n if (item.separator) return \n return (\n \n {item.children}\n \n )\n })}\n \n \n )}\n \n )\n}\n\nexport default UserSettings\n","import React from \"react\"\nimport { Flex, Button } from \"@netdata/netdata-ui\"\n\nconst SpacesSkeleton = () => (\n \n \n \n \n))\n","import React, { useRef, useEffect } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectShowHelp } from \"domains/global/selectors\"\nimport { Icon, IconType } from \"components/icon\"\nimport { Button } from \"components/button\"\n\ntype ClickCallback = (event: React.MouseEvent) => void\ninterface ToolboxButtonProps {\n className?: string\n iconType: IconType\n onClick?: ClickCallback\n onDoubleClick?: (event: React.MouseEvent) => void\n onMouseDown?: (event: React.MouseEvent) => void\n onTouchStart?: (event: React.TouchEvent) => void\n popoverContent: string\n popoverTitle: string\n}\nexport const ToolboxButton = ({\n className,\n iconType,\n onClick,\n onDoubleClick,\n onMouseDown,\n onTouchStart,\n popoverContent,\n popoverTitle,\n}: ToolboxButtonProps) => {\n const buttonRef = useRef(null)\n const showHelp = useSelector(selectShowHelp)\n useEffect(() => {\n if (buttonRef.current && showHelp) {\n window.$(buttonRef.current).popover({\n container: \"body\",\n animation: false,\n html: true,\n trigger: \"hover\",\n placement: \"bottom\",\n delay: {\n show: window.NETDATA.options.current.show_help_delay_show_ms,\n hide: window.NETDATA.options.current.show_help_delay_hide_ms,\n },\n title: popoverTitle,\n content: popoverContent,\n })\n }\n }, []) // eslint-disable-line react-hooks/exhaustive-deps\n return (\n \n \n \n )\n}\n","import React, { useState, useCallback, useEffect } from \"react\"\nimport { ToolboxButton } from \"domains/chart/components/toolbox-button\"\nimport { setResizeHeightAction } from \"domains/chart/actions\"\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from \"domains/chart/utils/legend-utils\"\nimport { useDispatch } from \"store/redux-separate-context\"\n\nexport const LOCALSTORAGE_HEIGHT_KEY_PREFIX = \"chart_height.\"\n\ninterface Props {\n chartContainerElement: HTMLElement\n chartUuid: string\n heightId: string | undefined\n isLegendOnBottom: boolean\n}\n\nexport const ResizeHandler = ({\n chartContainerElement, chartUuid, heightId, isLegendOnBottom,\n}: Props) => {\n const [resizeHeight, setResizeHeight] = useState(() => chartContainerElement.clientHeight)\n const dispatch = useDispatch()\n\n useEffect(() => {\n // todo when attributes.id are present, hook height to localStorage\n if (resizeHeight >= 70) {\n dispatch(\n setResizeHeightAction({\n id: chartUuid,\n resizeHeight,\n }),\n )\n }\n }, [resizeHeight, chartUuid, heightId, dispatch])\n\n const handleResize = useCallback(\n (event) => {\n event.preventDefault()\n const intialHeight = chartContainerElement.clientHeight\n const eventStartHeight = event.type === \"touchstart\"\n ? event.touches[0].clientY\n : event.clientY\n\n const setHeight = (currentHeight: number) => {\n const nextHeight = intialHeight + currentHeight - eventStartHeight\n // eslint-disable-next-line no-param-reassign\n chartContainerElement.style.height = `${nextHeight.toString()}px`\n setResizeHeight(nextHeight)\n if (heightId) {\n const heightForPersistance = isLegendOnBottom\n ? (nextHeight - LEGEND_BOTTOM_SINGLE_LINE_HEIGHT)\n : nextHeight\n localStorage.setItem(\n `${LOCALSTORAGE_HEIGHT_KEY_PREFIX}${heightId}`,\n `${heightForPersistance}`,\n )\n }\n }\n\n const onMouseMove = (e: MouseEvent) => setHeight(e.clientY)\n const onTouchMove = (e: TouchEvent) => setHeight(e.touches[0].clientY)\n\n const onMouseEnd = () => {\n document.removeEventListener(\"mousemove\", onMouseMove)\n document.removeEventListener(\"mouseup\", onMouseEnd)\n }\n\n const onTouchEnd = () => {\n document.removeEventListener(\"touchmove\", onTouchMove)\n document.removeEventListener(\"touchend\", onTouchEnd)\n }\n\n if (event.type === \"touchstart\") {\n document.addEventListener(\"touchmove\", onTouchMove)\n document.addEventListener(\"touchend\", onTouchEnd)\n } else {\n document.addEventListener(\"mousemove\", onMouseMove)\n document.addEventListener(\"mouseup\", onMouseEnd)\n }\n },\n [chartContainerElement.clientHeight, chartContainerElement.style.height, heightId,\n isLegendOnBottom],\n )\n\n return (\n {\n event.preventDefault()\n event.stopPropagation()\n }}\n onMouseDown={handleResize}\n onTouchStart={handleResize}\n iconType=\"resize\"\n popoverTitle=\"Chart Resize\"\n popoverContent=\"Drag this point with your mouse or your finger (on touch devices), to resize\n the chart vertically. You can also double click it or double tap it to reset\n between 2 states: the default and the one that fits all the values.
Help\n can be disabled from the settings.\"\n />\n )\n}\n","import { createAction } from \"redux-act\"\n\nimport { createRequestAction } from \"utils/createRequestAction\"\nimport { RegistryMachine } from \"domains/global/sagas\"\nimport { storeKey } from \"./constants\"\nimport { ActiveAlarms, ChartsMetadata, Snapshot, Alarm, UserNodeAccessMessage } from \"./types\"\n\ninterface RequestCommonColors {\n chartContext: string\n chartUuid: string\n colorsAttribute: string | undefined\n commonColorsAttribute: string | undefined\n dimensionNames: string[]\n}\nexport const requestCommonColorsAction = createAction(\n `${storeKey}/globalRequestCommonColors`\n)\n\ninterface SetCommonMinAction {\n chartUuid: string\n commonMinKey: string\n value: number\n}\nexport const setCommonMinAction = createAction(`${storeKey}/setCommonMin`)\n\ninterface SetCommonMaxAction {\n chartUuid: string\n commonMaxKey: string\n value: number\n}\nexport const setCommonMaxAction = createAction(`${storeKey}/setCommonMax`)\n\ninterface SetGlobalSelectionAction {\n chartUuid: string | null\n hoveredX: number\n}\nexport const setGlobalSelectionAction = createAction(\n `${storeKey}/setGlobalSelection`\n)\n\nexport interface SetGlobalPanAndZoomAction {\n after: number\n before: number\n masterID?: string\n shouldForceTimeRange?: boolean\n}\nexport const setGlobalPanAndZoomAction = createAction(\n `${storeKey}/setGlobalPanAndZoom`\n)\n\nexport const resetGlobalPanAndZoomAction = createAction(`${storeKey}/resetGlobalPanAndZoomAction`)\n\nexport interface SetDefaultAfterAction {\n after: number\n}\nexport const setDefaultAfterAction = createAction(\n `${storeKey}/setDefaultAfterAction`\n)\n\nexport const resetDefaultAfterAction = createAction(`${storeKey}/resetDefaultAfterAction`)\n\nexport interface SetGlobalChartUnderlayAction {\n after: number\n before: number\n masterID: string\n}\nexport const setGlobalChartUnderlayAction = createAction(\n `${storeKey}/setGlobalChartUnderlay`\n)\n\nexport const centerAroundHighlightAction = createAction(`${storeKey}/centerAroundHighlightAction`)\nexport const clearHighlightAction = createAction(`${storeKey}/clearHighlightAction`)\n\ninterface WindowFocusChangeAction {\n hasWindowFocus: boolean\n}\nexport const windowFocusChangeAction = createAction(\n `${storeKey}/windowFocusChangeAction`\n)\n\nexport interface FetchHelloPayload {\n serverDefault: string\n}\n/* eslint-disable camelcase */\nexport interface HelloResponse {\n action: \"hello\"\n anonymous_statistics: boolean\n cloud_base_url: string\n hostname: string\n machine_guid: string\n registry: string\n status: string\n}\n/* eslint-enable camelcase */\n\nexport const fetchHelloAction = createRequestAction<\n FetchHelloPayload,\n { cloudBaseURL: string; hostname: string; isCloudEnabled: boolean; machineGuid: string }\n>(`${storeKey}/fetchHelloAction`)\n\ninterface UpdatePersonUrlsAction {\n personGuid: string\n registryMachines: { [key: string]: RegistryMachine }\n registryMachinesArray: RegistryMachine[]\n}\nexport const updatePersonUrlsAction = createAction(\n `${storeKey}/updatePersonUrlsAction`\n)\n\nexport interface AccessRegistrySuccessAction {\n registryServer: string\n}\nexport const accessRegistrySuccessAction = createAction(\n `${storeKey}/accessRegistrySuccessAction`\n)\n\nexport interface StartAlarmsPayload {\n serverDefault: string\n}\nexport const startAlarmsAction = createAction(`${storeKey}/startAlarmsAction`)\n\nexport const fetchAllAlarmsAction = createRequestAction(`${storeKey}/fetchAllAlarmsAction`)\n\nexport interface UpdateActiveAlarmAction {\n activeAlarms: ActiveAlarms\n}\nexport const updateActiveAlarmsAction = createAction(\n `${storeKey}/updateActiveAlarmsAction`\n)\n\nexport interface SetOptionAction {\n key: string\n value: unknown\n}\nexport const setOptionAction = createAction(`${storeKey}/setOptionAction`)\n\nexport const resetOptionsAction = createAction(`${storeKey}/resetOptions`)\n\nexport const loadSnapshotAction = createAction<{ snapshot: Snapshot }>(\n `${storeKey}/loadSnapshotAction`\n)\n\nexport const chartsMetadataRequestSuccess = createAction<{ data: ChartsMetadata }>(\n `${storeKey}/chartsMetadataRequestSuccess`\n)\n\nexport interface SetSpacePanelStatusActionPayload {\n isActive: boolean\n}\nexport const setSpacePanelStatusAction = createAction(\n `${storeKey}/setSpacePanelStatusAction`\n)\n\nexport interface SetSpacePanelTransitionEndPayload {\n isActive: boolean\n}\nexport const setSpacePanelTransitionEndAction = createAction(\n `${storeKey}/setSpacePanelStatusAction`\n)\n\nexport const setAlarmAction = createAction<{ alarm: Alarm }>(`${storeKey}/setAlarmAction`)\n\nexport const resetRegistry = createAction(`${storeKey}/resetRegistry`)\n\nexport const setGlobalPauseAction = createAction(`${storeKey}/setGlobalPauseAction`)\nexport const resetGlobalPauseAction = createAction<{ forcePlay?: boolean }>(\n `${storeKey}/resetGlobalPauseAction`\n)\nexport const setUTCOffset = createAction<{ utcOffset?: number | string }>(\n `${storeKey}/setUTCOffset`\n)\n\nexport const setUserNodeAccess = createAction<{ message: UserNodeAccessMessage }>(\n `${storeKey}/setUserNodeAccess`\n)\n","import { useCallback } from \"react\"\nimport { useDispatch } from \"react-redux\"\nimport { sendToChildIframe, useListenToPostMessage } from \"utils/post-message\"\nimport { setUserNodeAccess } from \"domains/global/actions\"\nimport { UserNodeAccessMessage } from \"domains/global/types\"\nimport { SIGN_IN_IFRAME_ID } from \"components/header/constants\"\n\nconst useUserNodeAccessMessage = () => {\n const dispatch = useDispatch()\n useListenToPostMessage(\"user-node-access\", message => {\n dispatch(setUserNodeAccess({ message }))\n })\n}\n\nexport const useRequestRefreshOfAccessMessage = () => {\n return useCallback(() => {\n sendToChildIframe(SIGN_IN_IFRAME_ID, { type: \"request-refresh-access\", payload: true })\n }, [])\n}\n\nexport default useUserNodeAccessMessage\n","export * from \"./utils\"\n","export const name2id = (s: string) => s\n .replace(/ /g, \"_\")\n .replace(/:/g, \"_\")\n .replace(/\\(/g, \"_\")\n .replace(/\\)/g, \"_\")\n .replace(/\\./g, \"_\")\n .replace(/\\//g, \"_\")\n","import { init, last, mergeAll } from \"ramda\"\nimport { createReducer } from \"redux-act\"\n\nimport { getInitialAfterFromWindow } from \"utils/utils\"\nimport { isMainJs } from \"utils/env\"\nimport { RegistryMachine } from \"domains/global/sagas\"\nimport { Alarm, ActiveAlarms, Snapshot, ChartsMetadata } from \"domains/global/types\"\nimport { fetchInfoAction } from \"domains/chart/actions\"\nimport { InfoPayload } from \"./__mocks__/info-mock\"\nimport {\n requestCommonColorsAction,\n setGlobalChartUnderlayAction,\n setGlobalSelectionAction,\n setGlobalPanAndZoomAction,\n centerAroundHighlightAction,\n clearHighlightAction,\n resetGlobalPanAndZoomAction,\n setDefaultAfterAction,\n windowFocusChangeAction,\n fetchHelloAction,\n updatePersonUrlsAction,\n startAlarmsAction,\n updateActiveAlarmsAction,\n setOptionAction,\n loadSnapshotAction,\n chartsMetadataRequestSuccess,\n setCommonMaxAction,\n setCommonMinAction,\n resetOptionsAction,\n setSpacePanelStatusAction,\n setSpacePanelTransitionEndAction,\n resetRegistry,\n accessRegistrySuccessAction,\n resetDefaultAfterAction,\n setAlarmAction,\n setGlobalPauseAction,\n resetGlobalPauseAction,\n setUTCOffset,\n setUserNodeAccess,\n} from \"./actions\"\nimport {\n Options,\n optionsMergedWithLocalStorage,\n getOptionsMergedWithLocalStorage,\n clearLocalStorage,\n} from \"./options\"\nimport { CLOUD_BASE_URL_DISABLED } from \"./constants\"\nimport { UserNodeAccessMessage } from \"./types\"\n\ninterface CommonMinMax {\n [commonKey: string]: {\n charts: {\n [chartUuid: string]: number\n }\n currentExtreme: number\n }\n}\n\nexport type StateT = {\n commonColorsKeys: {\n [key: string]: {\n // key can be uuid, chart's context or commonColors attribute\n assigned: {\n // name-value of dimensions and their colors\n [dimensionName: string]: string\n }\n available: string[] // an array of colors available to be used\n custom: string[] // the array of colors defined by the user\n charts: {} // the charts linked to this todo remove\n copyTheme: boolean\n }\n }\n commonMin: CommonMinMax\n commonMax: CommonMinMax\n currentSelectionMasterId: string | null\n globalPanAndZoom: null | {\n after: number // timestamp in ms\n before: number // timestamp in ms\n masterID?: string\n shouldForceTimeRange?: boolean\n }\n defaultAfter: number\n globalChartUnderlay: null | {\n after: number\n before: number\n masterID: string\n }\n hoveredX: number | null\n hasWindowFocus: boolean\n globalPause: boolean\n\n spacePanelIsActive: boolean\n spacePanelTransitionEndIsActive: boolean\n\n registry: {\n cloudBaseURL: string | null\n hasFetchedHello: boolean\n isHelloCallError: boolean | null\n hasFetchedInfo: boolean\n hostname: string\n isCloudEnabled: boolean | null\n isCloudAvailable: boolean | null\n isAgentClaimed: boolean | null\n isACLKAvailable: boolean | null\n hasStartedInfo: boolean\n fullInfoPayload: InfoPayload | null\n isFetchingHello: boolean\n machineGuid: string | null\n personGuid: string | null\n registryMachines: { [key: string]: RegistryMachine } | null\n registryMachinesArray: RegistryMachine[] | null\n registryServer: string | null\n }\n\n chartsMetadata: {\n isFetching: boolean\n isFetchingError: boolean\n data: null | ChartsMetadata\n }\n\n alarms: {\n activeAlarms: null | ActiveAlarms\n hasStartedAlarms: boolean\n }\n alarm: null | Alarm\n\n snapshot: Snapshot | null\n options: Options\n userNodeAccess: UserNodeAccessMessage\n}\n\nexport const initialDefaultAfter = isMainJs ? getInitialAfterFromWindow() : -900\n\nexport const initialState: StateT = {\n commonColorsKeys: {},\n commonMin: {},\n commonMax: {},\n currentSelectionMasterId: null,\n globalPanAndZoom: null,\n // todo for dashboard calculate it based on width and window.NETDATA.chartDefaults.after\n defaultAfter: initialDefaultAfter,\n globalChartUnderlay: null,\n hoveredX: null,\n hasWindowFocus: document.hasFocus(),\n globalPause: false,\n spacePanelIsActive: false, // set to true only for testing layout\n // the same as property above, just updated after transition ends\n spacePanelTransitionEndIsActive: false,\n\n registry: {\n cloudBaseURL: null,\n hasFetchedInfo: false,\n hasFetchedHello: false,\n isHelloCallError: null,\n hostname: \"unknown\",\n isCloudEnabled: null,\n isCloudAvailable: null,\n isAgentClaimed: null,\n isACLKAvailable: null,\n hasStartedInfo: false,\n isFetchingHello: false,\n fullInfoPayload: null,\n machineGuid: null,\n personGuid: null,\n registryMachines: null,\n registryMachinesArray: null,\n registryServer: null,\n },\n\n snapshot: null,\n alarms: {\n activeAlarms: null,\n hasStartedAlarms: false,\n },\n alarm: null,\n\n chartsMetadata: {\n isFetching: false,\n isFetchingError: false,\n data: null,\n },\n\n options: optionsMergedWithLocalStorage,\n userNodeAccess: null,\n}\n\nexport const globalReducer = createReducer({}, initialState)\n\nexport interface GetKeyArguments {\n colorsAttribute: string | undefined\n commonColorsAttribute: string | undefined\n chartUuid: string\n chartContext: string\n}\nexport const getKeyForCommonColorsState = ({\n colorsAttribute,\n commonColorsAttribute,\n chartUuid,\n chartContext,\n}: GetKeyArguments) => {\n const hasCustomColors = typeof colorsAttribute === \"string\" && colorsAttribute.length > 0\n\n // when there's commonColors attribute, share the state between all charts with that attribute\n // if not, when there are custom colors, make each chart independent\n // if not, share the same state between charts with the same context\n return commonColorsAttribute || (hasCustomColors ? chartUuid : chartContext)\n}\n\nconst hasLastOnly = (array: string[]) => last(array) === \"ONLY\"\nconst removeLastOnly = (array: string[]) => (hasLastOnly(array) ? init(array) : array)\nconst createCommonColorsKeysSubstate = (\n colorsAttribute: string | undefined,\n hasCustomColors: boolean\n) => {\n const custom = hasCustomColors ? removeLastOnly((colorsAttribute as string).split(\" \")) : []\n const shouldCopyTheme = hasCustomColors\n ? // disable copyTheme when there's \"ONLY\" keyword in \"data-colors\" attribute\n !hasLastOnly((colorsAttribute as string).split(\" \"))\n : true\n const available = [\n ...custom,\n ...(shouldCopyTheme || custom.length === 0 ? window.NETDATA.themes.current.colors : []),\n ]\n return {\n assigned: {},\n available,\n custom,\n }\n}\n\nglobalReducer.on(\n requestCommonColorsAction,\n //@ts-ignore\n (state, { chartContext, chartUuid, colorsAttribute, commonColorsAttribute, dimensionNames }) => {\n const keyName = getKeyForCommonColorsState({\n colorsAttribute,\n commonColorsAttribute,\n chartUuid,\n chartContext,\n })\n\n const hasCustomColors = typeof colorsAttribute === \"string\" && colorsAttribute.length > 0\n const subState =\n state.commonColorsKeys[keyName] ||\n createCommonColorsKeysSubstate(colorsAttribute, hasCustomColors)\n\n const currentlyAssignedNr = Object.keys(subState.assigned).length\n const requestedDimensionsAssigned = mergeAll(\n dimensionNames\n // dont assign already assigned dimensions\n .filter(dimensionName => !subState.assigned[dimensionName])\n .map((dimensionName, i) => ({\n [dimensionName]:\n subState.available[(i + currentlyAssignedNr) % subState.available.length],\n }))\n )\n const assigned = {\n ...subState.assigned,\n ...requestedDimensionsAssigned,\n }\n\n return {\n ...state,\n commonColorsKeys: {\n ...state.commonColorsKeys,\n [keyName]: {\n ...subState,\n assigned,\n },\n },\n }\n }\n)\n\nglobalReducer.on(setCommonMinAction, (state, { chartUuid, commonMinKey, value }) => {\n const charts = {\n ...state.commonMin[commonMinKey]?.charts,\n [chartUuid]: value,\n }\n const currentExtreme = Math.min(...Object.values(charts))\n\n return {\n ...state,\n commonMin: {\n ...state.commonMin,\n [commonMinKey]: {\n charts,\n currentExtreme,\n },\n },\n }\n})\n\nglobalReducer.on(setCommonMaxAction, (state, { chartUuid, commonMaxKey, value }) => {\n const charts = {\n ...state.commonMax[commonMaxKey]?.charts,\n [chartUuid]: value,\n }\n const currentExtreme = Math.max(...Object.values(charts))\n\n return {\n ...state,\n commonMax: {\n ...state.commonMax,\n [commonMaxKey]: {\n charts,\n currentExtreme,\n },\n },\n }\n})\n\nglobalReducer.on(setSpacePanelStatusAction, (state, { isActive }) => ({\n ...state,\n spacePanelIsActive: isActive,\n}))\n\nglobalReducer.on(setSpacePanelTransitionEndAction, (state, { isActive }) => ({\n ...state,\n spacePanelTransitionEndIsActive: isActive,\n}))\n\nglobalReducer.on(setGlobalSelectionAction, (state, { chartUuid, hoveredX }) => ({\n ...state,\n hoveredX,\n currentSelectionMasterId: chartUuid,\n}))\n\nglobalReducer.on(setGlobalPanAndZoomAction, (state, payload) => ({\n ...state,\n globalPanAndZoom: payload,\n}))\n\nglobalReducer.on(resetGlobalPanAndZoomAction, state => ({\n ...state,\n globalPanAndZoom: initialState.globalPanAndZoom,\n hoveredX: initialState.hoveredX, // need to reset this also on mobile\n}))\n\nglobalReducer.on(setDefaultAfterAction, (state, { after }) => ({\n ...state,\n defaultAfter: after,\n}))\n\nglobalReducer.on(resetDefaultAfterAction, state => ({\n ...state,\n defaultAfter: initialState.defaultAfter,\n}))\n\nglobalReducer.on(setGlobalChartUnderlayAction, (state, { after, before, masterID }) => ({\n ...state,\n globalChartUnderlay: {\n after,\n before,\n masterID,\n },\n}))\n\nglobalReducer.on(centerAroundHighlightAction, state => {\n if (!state.globalChartUnderlay) {\n // eslint-disable-next-line no-console\n console.warn(\"Cannot center around empty selection\")\n return state\n }\n const { after, before } = state.globalChartUnderlay\n const highlightMargin = (before - after) / 2\n return {\n ...state,\n globalPanAndZoom: {\n after: after - highlightMargin,\n before: before + highlightMargin,\n },\n }\n})\n\nglobalReducer.on(\n clearHighlightAction,\n (state, { resetPanAndZoom = true }: { resetPanAndZoom?: boolean } = {}) => ({\n ...state,\n globalChartUnderlay: initialState.globalChartUnderlay,\n ...(resetPanAndZoom ? { globalPanAndZoom: initialState.globalPanAndZoom } : {}),\n })\n)\n\nglobalReducer.on(windowFocusChangeAction, (state, { hasWindowFocus }) => {\n // make additional check, because it's possible to get hasWindowFocus === false\n // message from iframe, after main window makes the state change (race condition)\n const hasFocusNow = document.hasFocus()\n return {\n ...state,\n hasWindowFocus: hasFocusNow || hasWindowFocus,\n }\n})\n\nglobalReducer.on(setGlobalPauseAction, state => ({ ...state, globalPause: true }))\nglobalReducer.on(resetGlobalPauseAction, (state, { forcePlay }) => ({\n ...state,\n globalPause: initialState.globalPause,\n globalPanAndZoom: initialState.globalPanAndZoom,\n hoveredX: initialState.hoveredX,\n options: { ...state.options, stop_updates_when_focus_is_lost: !forcePlay },\n}))\n\nglobalReducer.on(setUTCOffset, (state, { utcOffset }) => ({\n ...state,\n options: { ...state.options, utcOffset },\n}))\n\nglobalReducer.on(fetchHelloAction.request, state => ({\n ...state,\n registry: {\n ...state.registry,\n isFetchingHello: true,\n },\n}))\n\nglobalReducer.on(fetchHelloAction.success, (state, { cloudBaseURL, hostname, machineGuid }) => ({\n ...state,\n registry: {\n ...state.registry,\n cloudBaseURL,\n isFetchingHello: false,\n hasFetchedHello: true,\n hostname,\n machineGuid,\n },\n}))\nglobalReducer.on(fetchHelloAction.failure, state => ({\n ...state,\n registry: {\n ...state.registry,\n cloudBaseURL: CLOUD_BASE_URL_DISABLED,\n isFetchingHello: false,\n isHelloCallError: true,\n },\n}))\nglobalReducer.on(accessRegistrySuccessAction, (state, { registryServer }) => ({\n ...state,\n registry: {\n ...state.registry,\n registryServer,\n },\n}))\n\nglobalReducer.on(resetRegistry, state => ({\n ...state,\n registry: {\n ...state.registry,\n hasFetchedHello: initialState.registry.hasFetchedHello,\n },\n}))\n\nglobalReducer.on(fetchInfoAction, state => ({\n ...state,\n registry: {\n ...state.registry,\n hasStartedInfo: true,\n },\n}))\nglobalReducer.on(\n fetchInfoAction.success,\n (\n state,\n { isCloudAvailable, isCloudEnabled, isAgentClaimed, isACLKAvailable, fullInfoPayload }\n ) => ({\n ...state,\n registry: {\n ...state.registry,\n hasFetchedInfo: true,\n isCloudAvailable,\n isCloudEnabled,\n isAgentClaimed,\n isACLKAvailable,\n fullInfoPayload,\n },\n })\n)\n\nglobalReducer.on(fetchInfoAction.failure, state => ({\n ...state,\n registry: {\n ...state.registry,\n isCloudAvailable: false,\n isCloudEnabled: false,\n isAgentClaimed: false,\n isACLKAvailable: false,\n },\n}))\n\nglobalReducer.on(\n updatePersonUrlsAction,\n (state, { personGuid, registryMachines, registryMachinesArray }) => ({\n ...state,\n registry: {\n ...state.registry,\n personGuid,\n registryMachines,\n registryMachinesArray,\n },\n })\n)\n\nglobalReducer.on(startAlarmsAction, state => ({\n ...state,\n alarms: {\n ...state.alarms,\n hasStartedAlarms: true,\n },\n}))\n\nglobalReducer.on(updateActiveAlarmsAction, (state, { activeAlarms }) => ({\n ...state,\n alarms: {\n ...state.alarms,\n activeAlarms,\n },\n}))\n\nglobalReducer.on(setOptionAction, (state, { key, value }) => ({\n ...state,\n options: {\n ...state.options,\n [key]: value,\n },\n}))\n\nglobalReducer.on(resetOptionsAction, state => {\n clearLocalStorage()\n return {\n ...state,\n options: getOptionsMergedWithLocalStorage(),\n }\n})\n\nglobalReducer.on(loadSnapshotAction, (state, { snapshot }) => {\n const parsedData = Object.keys(snapshot.data)\n .map(dataKey => {\n let uncompressed\n try {\n // @ts-ignore\n uncompressed = snapshot.uncompress(snapshot.data[dataKey])\n\n // repeat old logging\n if (uncompressed === null) {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is null`)\n return null\n }\n\n if (typeof uncompressed === \"undefined\") {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is undefined`)\n return null\n }\n } catch (e) {\n // eslint-disable-next-line no-console\n console.warn(`decompression of snapshot data for key ${dataKey} failed`, e)\n uncompressed = null\n }\n\n if (typeof uncompressed !== \"string\") {\n // eslint-disable-next-line no-console\n console.warn(`uncompressed snapshot data for key ${dataKey} is not string`)\n return {}\n }\n\n let data\n try {\n data = JSON.parse(uncompressed)\n } catch (e) {\n // eslint-disable-next-line no-console\n console.warn(`parsing snapshot data for key ${dataKey} failed`)\n return {}\n }\n\n return { [dataKey]: data }\n })\n .reduce((acc, obj) => ({ ...acc, ...obj }), {})\n\n return {\n ...state,\n snapshot: {\n ...snapshot,\n data: parsedData as { [key: string]: unknown },\n },\n }\n})\n\nglobalReducer.on(setAlarmAction, (state, { alarm }) => ({\n ...state,\n alarm,\n}))\n\nglobalReducer.on(chartsMetadataRequestSuccess, (state, { data }) => ({\n ...state,\n chartsMetadata: {\n ...state.chartsMetadata,\n data,\n },\n}))\n\nglobalReducer.on(setUserNodeAccess, (state, { message }) => ({ ...state, userNodeAccess: message }))\n","import React from \"react\"\nimport classNames from \"classnames\"\n\n// todo add supoort for window.netdataIcons\nexport type IconType = \"left\" | \"reset\" | \"right\" | \"zoomIn\" | \"zoomOut\" | \"resize\" | \"lineChart\"\n | \"areaChart\" | \"noChart\" | \"loading\" | \"noData\"\nconst typeToClassName = (iconType: IconType) => ({\n left: \"fa-backward\",\n reset: \"fa-play\",\n right: \"fa-forward\",\n zoomIn: \"fa-plus\",\n zoomOut: \"fa-minus\",\n resize: \"fa-sort\",\n lineChart: \"fa-chart-line\",\n areaChart: \"fa-chart-area\",\n noChart: \"fa-chart-area\",\n loading: \"fa-sync-alt\",\n noData: \"fa-exclamation-triangle\",\n} as {[key in IconType]: string})[iconType]\n\ninterface Props {\n iconType: IconType\n}\nexport const Icon = ({ iconType }: Props) => (\n \n)\n","import {\n map, omit, assoc, pick,\n} from \"ramda\"\nimport { createReducer } from \"redux-act\"\n\nimport { setOptionAction } from \"domains/global/actions\"\nimport { SYNC_PAN_AND_ZOOM } from \"domains/global/options\"\nimport { useNewKeysOnlyIfDifferent } from \"utils/utils\"\n\nimport {\n fetchDataAction,\n fetchChartAction,\n setResizeHeightAction,\n clearChartStateAction,\n fetchDataForSnapshotAction,\n snapshotExportResetAction,\n setChartPanAndZoomAction,\n resetChartPanAndZoomAction,\n fetchDataCancelAction,\n} from \"./actions\"\nimport { ChartState } from \"./chart-types\"\n\nexport type StateT = {\n [chartID: string]: ChartState\n}\n\nexport const initialState = {\n}\nexport const initialSingleState = {\n chartData: null,\n chartId: null,\n chartMetadata: null,\n chartPanAndZoom: null,\n fetchDataParams: {\n isRemotelyControlled: false,\n viewRange: null,\n },\n isFetchingData: false,\n isFetchDataFailure: false,\n isFetchDetailsFailure: false,\n isFetchingDetails: false,\n resizeHeight: null,\n\n snapshotDataIsFetching: false,\n snapshotDataIsError: false,\n snapshotData: null,\n viewRange: null,\n}\n\nexport const chartReducer = createReducer(\n {},\n initialState,\n)\n\nexport const getSubstate = (state: StateT, id: string) => state[id] || initialSingleState\n\nchartReducer.on(fetchDataAction.request, (state, { chart, fetchDataParams, id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartId: chart,\n isFetchingData: true,\n viewRange: fetchDataParams.viewRange,\n },\n}))\n\nchartReducer.on(fetchDataCancelAction, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingData: false,\n },\n}))\n\nchartReducer.on(fetchDataAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingData: false,\n isFetchDataFailure: true,\n },\n}))\n\nchartReducer.on(fetchDataAction.success, (state, { id, chartData, fetchDataParams }) => {\n const substate = getSubstate(state, id)\n return {\n ...state,\n [id]: {\n ...substate,\n chartData: useNewKeysOnlyIfDifferent([\"dimension_names\"], substate.chartData, chartData!),\n fetchDataParams,\n isFetchingData: false,\n isFetchDataFailure: false,\n viewRange: fetchDataParams.viewRange,\n },\n }\n})\n\n\nchartReducer.on(fetchDataForSnapshotAction.request, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: true,\n },\n}))\n\nchartReducer.on(fetchDataForSnapshotAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: false,\n snapshotDataIsError: true,\n },\n}))\n\nchartReducer.on(fetchDataForSnapshotAction.success, (state, { id, snapshotData }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n snapshotDataIsFetching: false,\n snapshotDataIsError: false,\n snapshotData,\n },\n}))\n\nchartReducer.on(snapshotExportResetAction, (state) => map((substate) => ({\n ...substate,\n ...pick([\"snapshotDataIsFetching\", \"snapshotDataIsError\", \"snapshotData\"], initialSingleState),\n}), state))\n\n\nchartReducer.on(fetchChartAction.request, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchingDetails: true,\n },\n}))\n\nchartReducer.on(fetchChartAction.failure, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n isFetchDetailsFailure: true,\n },\n}))\n\nchartReducer.on(fetchChartAction.success, (state, { id, chartMetadata }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartMetadata,\n isFetchingDetails: false,\n isFetchDetailsFailure: false,\n },\n}))\n\n// todo handle errors without creating a loop\n// chartReducer.on(fetchChartAction.failure, (state, { id }) => ({\n// ...state,\n// [id]: {\n// ...getSubstate(state, id),\n// isFetchingDetails: false,\n// },\n// }))\n\nchartReducer.on(setResizeHeightAction, (state, { id, resizeHeight }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n resizeHeight,\n },\n}))\n\nchartReducer.on(setChartPanAndZoomAction, (state, {\n after, before, id, shouldForceTimeRange,\n}) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartPanAndZoom: { after, before, shouldForceTimeRange },\n },\n}))\n\nchartReducer.on(resetChartPanAndZoomAction, (state, { id }) => ({\n ...state,\n [id]: {\n ...getSubstate(state, id),\n chartPanAndZoom: initialSingleState.chartPanAndZoom,\n },\n}))\n\nchartReducer.on(setOptionAction, (state, { key, value }) => {\n // clear chartPanAndZoom, when SYNC_PAN_AND_ZOOM flag is turned on\n if (key === SYNC_PAN_AND_ZOOM && value === true) {\n return map(\n assoc(\"chartPanAndZoom\", initialSingleState.chartPanAndZoom),\n state,\n )\n }\n return state\n})\n\nchartReducer.on(clearChartStateAction, (state, { id }) => omit([id], state))\n","import { mapObjIndexed, mergeAll, isEmpty } from \"ramda\"\nimport { Method } from \"axios\"\nimport { initialLegendRight } from \"domains/global/options\"\nimport { ChartLibraryName } from \"./chartLibrariesSettings\"\n\ntype OutputValue = string | boolean | number | null | undefined | any[]\n// almost the same as in old dashboard to ensure readers that it works the same way\nconst getDataAttribute = (element: Element, key: string, defaultValue?: OutputValue) => {\n const dataKey = `data-${key}`\n if (element.hasAttribute(dataKey)) {\n // we know it's not null because of hasAttribute()\n const data = element.getAttribute(dataKey) as string\n\n if (data === \"true\") {\n return true\n }\n if (data === \"false\") {\n return false\n }\n if (data === \"null\") {\n return null\n }\n\n // Only convert to a number if it doesn't change the string\n if (data === `${+data}`) {\n return +data\n }\n\n if (/^(?:\\{[\\w\\W]*\\}|\\[[\\w\\W]*\\])$/.test(data)) {\n return JSON.parse(data)\n }\n\n return data\n }\n // if no default is passed, then it's undefined and can be replaced with default value later\n // it is recommended to do it in props destructuring assignment phase, ie.:\n // const Chart = ({ dygraphPointsize = 1 }) => ....\n return defaultValue\n}\n\nconst getDataAttributeBoolean = (element: Element, key: string, defaultValue?: boolean) => {\n const value = getDataAttribute(element, key, defaultValue)\n\n if (value === true || value === false) { // gmosx: Love this :)\n return value\n }\n\n if (typeof (value) === \"string\") {\n if (value === \"yes\" || value === \"on\") {\n return true\n }\n\n if (value === \"\" || value === \"no\" || value === \"off\" || value === \"null\") {\n return false\n }\n\n return defaultValue\n }\n\n if (typeof (value) === \"number\") {\n return value !== 0\n }\n\n return defaultValue\n}\n\ninterface BaseAttributeConfig {\n key: string\n defaultValue?: OutputValue\n}\ninterface BooleanAttributeConfig extends BaseAttributeConfig {\n type: \"boolean\"\n defaultValue?: boolean\n}\ntype AttributeConfig = BaseAttributeConfig | BooleanAttributeConfig\n\nexport interface StaticAttributes {\n id: string\n host?: string | undefined\n httpMethod?: Method\n title?: string\n chartLibrary: ChartLibraryName\n width: number | string | null\n height?: number | string | null\n after?: number\n before?: number\n legend?: boolean\n legendPosition?: \"bottom\" | \"right\"\n units?: string\n unitsCommon?: string\n unitsDesired?: string\n aggrMethod?: string\n labels?: {[key: string]: string}\n postGroupBy?: string\n dimensionsAggrMethod?: string\n postAggregationMethod?: string\n aggrGroups?: string[]\n selectedChart?: string\n filteredRows?: string[] | null\n groupBy?: string\n nodeIDs?: string[]\n colors?: string\n commonColors?: string\n decimalDigits?: number\n dimensions?: string\n selectedDimensions?: string[]\n forceTimeWindow?: boolean\n\n appendOptions?: string | undefined\n gtime?: number\n method?: string\n overrideOptions?: string\n pixelsPerPoint?: number\n points?: number\n heightId?: string\n hideResizeHandler?: boolean\n detectResize?: boolean\n commonMin?: string\n commonMax?: string\n\n dygraphType?: string\n dygraphValueRange?: any[]\n dygraphTheme?: string\n dygraphSmooth?: boolean\n dygraphColors?: string[]\n dygraphRightGap?: number\n dygraphShowRangeSelector?: boolean\n dygraphShowRoller?: boolean\n dygraphTitle?: string\n dygraphTitleHeight?: number\n dygraphLegend?: \"always\" | \"follow\" | \"onmouseover\" | \"never\"\n dygraphLabelsDiv?: string\n dygraphLabelsSeparateLine?: boolean\n dygraphIncludeZero?: boolean\n dygraphShowZeroValues?: boolean\n dygraphShowLabelsOnHighLight?: boolean\n dygraphHideOverlayOnMouseOut?: boolean\n dygraphXRangePad?: number\n dygraphYRangePad?: number\n dygraphYLabelWidth?: number\n dygraphStrokeWidth?: number\n dygraphStrokePattern?: number[]\n dygraphDrawPoints?: boolean\n dygraphDrawGapEdgePoints?: boolean\n dygraphConnectSeparatedPoints?: boolean\n dygraphPointSize?: number\n dygraphStepPlot?: boolean\n dygraphStrokeBorderColor?: string\n dygraphStrokeBorderWidth?: number\n dygraphFillGraph?: boolean\n dygraphFillAlpha?: number\n dygraphStackedGraph?: boolean\n dygraphStackedGraphNanFill?: string\n dygraphAxisLabelFontSize?: number\n dygraphAxisLineColor?: string\n dygraphAxisLineWidth?: number\n dygraphDrawGrid?: boolean\n dygraphGridLinePattern?: number[]\n dygraphGridLineWidth?: number\n dygraphGridLineColor?: string\n dygraphMaxNumberWidth?: number\n dygraphSigFigs?: number\n dygraphDigitsAfterDecimal?: number\n dygraphHighlighCircleSize?: number\n dygraphHighlightSeriesOpts?: {[options: string]: number}\n dygraphHighlightSeriesBackgroundAlpha?: number\n dygraphXPixelsPerLabel?: number\n dygraphXAxisLabelWidth?: number\n dygraphDrawXAxis?: boolean\n dygraphYPixelsPerLabel?: number\n dygraphYAxisLabelWidth?: number\n dygraphDrawYAxis?: boolean\n dygraphDrawAxis?: boolean\n\n easyPieChartMinValue?: number\n easyPieChartMaxValue?: number\n easyPieChartBarColor?: string\n easyPieChartTrackColor?: string\n easyPieChartScaleColor?: string,\n easyPieChartScaleLength?: number,\n easyPieChartLineCap?: string,\n easyPieChartLineWidth?: string,\n easyPieChartTrackWidth?: string,\n easyPieChartSize?: string,\n easyPieChartRotate?: number,\n easyPieChartAnimate?: string,\n easyPieChartEasing?: string,\n\n gaugeMinValue?: number,\n gaugeMaxValue?: number,\n gaugePointerColor?: string,\n gaugeStrokeColor?: string,\n gaugeStartColor?: string,\n gaugeStopColor?: string,\n gaugeGenerateGradient?: boolean | string[],\n\n sparklineType?: string,\n sparklineLineColor?: string,\n sparklineFillColor?: string,\n sparklineChartRangeMin?: string,\n sparklineChartRangeMax?: string,\n sparklineComposite?: string,\n sparklineEnableTagOptions?: string,\n sparklineTagOptionPrefix?: string,\n sparklineTagValuesAttribute?: string,\n sparklineDisableHiddenCheck?: string,\n sparklineDefaultPixelsPerValue?: string,\n sparklineSpotColor?: string,\n sparklineMinSpotColor?: string,\n sparklineMaxSpotColor?: string,\n sparklineSpotRadius?: string,\n sparklineValueSpots?: string,\n sparklineHighlightSpotColor?: string,\n sparklineHighlightLineColor?: string,\n sparklineLineWidth?: string,\n sparklineNormalRangeMin?: string,\n sparklineNormalRangeMax?: string,\n sparklineDrawNormalOnTop?: string,\n sparklineXvalues?: string,\n sparklineChartRangeClip?: string,\n sparklineChartRangeMinX?: string,\n sparklineChartRangeMaxX?: string,\n sparklineDisableInteraction?: boolean,\n sparklineDisableTooltips?: boolean,\n sparklineOnHover?: Function,\n sparklineDisableHighlight?: boolean,\n sparklineHighlightLighten?: string,\n sparklineHighlightColor?: string,\n sparklineTooltipContainer?: string,\n sparklineTooltipClassname?: string,\n sparklineTooltipFormat?: string,\n sparklineTooltipPrefix?: string,\n sparklineTooltipSuffix?: string,\n sparklineTooltipSkipNull?: boolean,\n sparklineTooltipValueLookups?: string,\n sparklineTooltipFormatFieldlist?: string,\n sparklineTooltipFormatFieldlistKey?: string,\n sparklineNumberFormatter?: (d: number) => string,\n sparklineNumberDigitGroupSep?: string,\n sparklineNumberDecimalMark?: string,\n sparklineNumberDigitGroupCount?: string,\n sparklineAnimatedZooms?: boolean,\n\n\n d3pieTitle?: string,\n d3pieSubtitle?: string,\n d3pieFooter?: string,\n d3pieTitleColor?: string,\n d3pieTitleFontsize?: string,\n d3pieTitleFontweight?: string,\n d3pieTitleFont?: string,\n d3PieSubtitleColor?: string,\n d3PieSubtitleFontsize?: string,\n d3PieSubtitleFontweight?: string,\n d3PieSubtitleFont?: string,\n d3PieFooterColor?: string,\n d3PieFooterFontsize?: string,\n d3PieFooterFontweight?: string,\n d3PieFooterFont?: string,\n d3PieFooterLocation?: string,\n d3PiePieinnerradius?: string,\n d3PiePieouterradius?: string,\n d3PieSortorder?: string,\n d3PieSmallsegmentgroupingEnabled?: boolean,\n d3PieSmallsegmentgroupingValue?: string,\n d3PieSmallsegmentgroupingValuetype?: string,\n d3PieSmallsegmentgroupingLabel?: string,\n d3PieSmallsegmentgroupingColor?: string,\n d3PieLabelsOuterFormat?: string,\n d3PieLabelsOuterHidewhenlessthanpercentage?: string,\n d3PieLabelsOuterPiedistance?: string,\n d3PieLabelsInnerFormat?: string,\n d3PieLabelsInnerHidewhenlessthanpercentage?: string,\n d3PieLabelsMainLabelColor?: string,\n d3PieLabelsMainLabelFont?: string,\n d3PieLabelsMainLabelFontsize?: string,\n d3PieLabelsMainLabelFontweight?: string,\n d3PieLabelsPercentageColor?: string,\n d3PieLabelsPercentageFont?: string,\n d3PieLabelsPercentageFontsize?: string,\n d3PieLabelsPercentageFontweight?: string,\n d3PieLabelsValueColor?: string,\n d3PieLabelsValueFont?: string,\n d3PieLabelsValueFontsize?: string,\n d3PieLabelsValueFontweight?: string,\n d3PieLabelsLinesEnabled?: boolean,\n d3PieLabelsLinesStyle?: string,\n d3PieLabelsLinesColor?: string,\n d3PieLabelsTruncationEnabled?: boolean,\n d3PieLabelsTruncationTruncatelength?: string,\n d3PieMiscColorsSegmentstroke?: string,\n d3PieMiscGradientEnabled?: boolean,\n d3PieMiscColorsPercentage?: string,\n d3PieMiscGradientColor?: string,\n d3PieCssprefix?: string,\n\n peityStrokeWidth?: number,\n\n textOnlyDecimalPlaces?: number,\n textOnlyPrefix?: string,\n textOnlySuffix?: string,\n}\n\nexport interface Attributes extends StaticAttributes {\n // changed structure compared to original dashboard.js (not flat list, but dynamic objects stored\n // in \"showValueOf\" property\n showValueOf?: { [key: string]: string }\n}\n\nexport interface ChartsAttributes {\n [chartID:string]: Attributes\n}\n\nexport type AttributePropKeys = keyof StaticAttributes\n\ntype AttributesMap = {\n [key in AttributePropKeys]: AttributeConfig\n}\n\n// needs to be a getter so all window.NETDATA settings are set\nconst getAttributesMap = (): AttributesMap => ({\n // all properties that don't have `defaultValue` should be \"| undefined\" in Attributes interface\n // todo try to write above rule in TS\n id: { key: \"netdata\" },\n host: { key: \"host\" },\n httpMethod: { key: \"http-method\" },\n title: { key: \"title\" },\n chartLibrary: { key: \"chart-library\", defaultValue: window.NETDATA.chartDefaults.library },\n width: { key: \"width\", defaultValue: window.NETDATA.chartDefaults.width },\n height: { key: \"height\", defaultValue: window.NETDATA.chartDefaults.height },\n // todo use chartDefaults for static custom dashboards\n // after: { key: \"after\", defaultValue: window.NETDATA.chartDefaults.after },\n after: { key: \"after\" },\n before: { key: \"before\", defaultValue: window.NETDATA.chartDefaults.before },\n legend: { key: \"legend\", type: \"boolean\", defaultValue: true },\n legendPosition: { key: \"legend-position\" },\n units: { key: \"units\" },\n unitsCommon: { key: \"common-units\" },\n unitsDesired: { key: \"desired-units\" },\n aggrMethod: { key: \"aggr-method\" },\n labels: { key: \"labels\" },\n postGroupBy: { key: \"post-group-by\" },\n postAggregationMethod: { key: \"post-aggregation-method\" },\n dimensionsAggrMethod: { key: \"dimensions-aggr-method\" },\n aggrGroups: { key: \"aggrGroups\" },\n selectedChart: { key: \"selected-chart\" },\n filteredRows: { key: \"filtered-rows\" },\n groupBy: { key: \"group-by\" },\n nodeIDs: { key: \"node-ids\" },\n colors: { key: \"colors\" },\n commonColors: { key: \"common-colors\" },\n decimalDigits: { key: \"decimal-digits\" },\n dimensions: { key: \"dimensions\" },\n selectedDimensions: { key: \"selected-dimensions\" },\n forceTimeWindow: { key: \"force-time-window\" },\n\n appendOptions: { key: \"append-options\" },\n gtime: { key: \"gtime\" },\n method: { key: \"method\" },\n overrideOptions: { key: \"override-options\" },\n pixelsPerPoint: { key: \"pixels-per-point\" },\n points: { key: \"points\" },\n heightId: { key: \"id\" },\n hideResizeHandler: { key: \"hide-resize-handler\" },\n detectResize: { key: \"detect-resize\" },\n commonMin: { key: \"common-min\" },\n commonMax: { key: \"common-max\" },\n\n // let's not put the default values here, because they will also be needed by the main Agent page\n // and the Cloud App\n dygraphType: { key: \"dygraph-type\" },\n dygraphValueRange: { key: \"dygraph-valuerange\" },\n dygraphTheme: { key: \"dygraph-theme\" },\n dygraphSmooth: { key: \"dygraph-smooth\", type: \"boolean\" },\n dygraphColors: { key: \"dygraph-colors\" }, // not working in original dashboard\n dygraphRightGap: { key: \"dygraph-rightgap\" },\n dygraphShowRangeSelector: { key: \"dygraph-showrangeselector\", type: \"boolean\" },\n dygraphShowRoller: { key: \"dygraph-showroller\", type: \"boolean\" },\n dygraphTitle: { key: \"dygraph-title\" },\n dygraphTitleHeight: { key: \"dygraph-titleheight\" },\n dygraphLegend: { key: \"dygraph-legend\" },\n dygraphLabelsDiv: { key: \"dygraph-labelsdiv\" },\n dygraphLabelsSeparateLine: { key: \"dygraph-labelsseparatelines\", type: \"boolean\" },\n dygraphIncludeZero: { key: \"dygraph-includezero\", type: \"boolean\" },\n dygraphShowZeroValues: { key: \"dygraph-labelsshowzerovalues\", type: \"boolean\" },\n dygraphShowLabelsOnHighLight: { key: \"dygraph-showlabelsonhighlight\", type: \"boolean\" },\n dygraphHideOverlayOnMouseOut: { key: \"dygraph-hideoverlayonmouseout\", type: \"boolean\" },\n dygraphXRangePad: { key: \"dygraph-xrangepad\" },\n dygraphYRangePad: { key: \"dygraph-yrangepad\" },\n dygraphYLabelWidth: { key: \"dygraph-ylabelwidth\" },\n dygraphStrokeWidth: { key: \"dygraph-strokewidth\" },\n dygraphStrokePattern: { key: \"dygraph-strokepattern\" },\n dygraphDrawPoints: { key: \"dygraph-drawpoints\", type: \"boolean\" },\n dygraphDrawGapEdgePoints: { key: \"dygraph-drawgapedgepoints\", type: \"boolean\" },\n dygraphConnectSeparatedPoints: { key: \"dygraph-connectseparatedpoints\", type: \"boolean\" },\n dygraphPointSize: { key: \"dygraph-pointsize\" },\n dygraphStepPlot: { key: \"dygraph-stepplot\", type: \"boolean\" },\n dygraphStrokeBorderColor: { key: \"dygraph-strokebordercolor\" },\n dygraphStrokeBorderWidth: { key: \"dygraph-strokeborderwidth\" },\n // it was not boolean in the old app, but that was most likely a bug\n dygraphFillGraph: { key: \"dygraph-fillgraph\", type: \"boolean\" },\n dygraphFillAlpha: { key: \"dygraph-fillalpha\" },\n // also originally not set as boolean\n dygraphStackedGraph: { key: \"dygraph-stackedgraph\", type: \"boolean\" },\n dygraphStackedGraphNanFill: { key: \"dygraph-stackedgraphnanfill\" },\n dygraphAxisLabelFontSize: { key: \"dygraph-axislabelfontsize\" },\n dygraphAxisLineColor: { key: \"dygraph-axislinecolor\" },\n dygraphAxisLineWidth: { key: \"dygraph-axislinewidth\" },\n dygraphDrawGrid: { key: \"dygraph-drawgrid\", type: \"boolean\" },\n dygraphGridLinePattern: { key: \"dygraph-gridlinepattern\" },\n dygraphGridLineWidth: { key: \"dygraph-gridlinewidth\" },\n dygraphGridLineColor: { key: \"dygraph-gridlinecolor\" },\n dygraphMaxNumberWidth: { key: \"dygraph-maxnumberwidth\" },\n dygraphSigFigs: { key: \"dygraph-sigfigs\" },\n dygraphDigitsAfterDecimal: { key: \"dygraph-digitsafterdecimal\" },\n // dygraphValueFormatter: { key: \"dygraph-valueformatter\" },\n dygraphHighlighCircleSize: { key: \"dygraph-highlightcirclesize\" },\n dygraphHighlightSeriesOpts: { key: \"dygraph-highlightseriesopts\" },\n dygraphHighlightSeriesBackgroundAlpha: { key: \"dygraph-highlightseriesbackgroundalpha\" },\n // dygraphPointClickCallback: { key: \"dygraph-pointclickcallback\" },\n dygraphXPixelsPerLabel: { key: \"dygraph-xpixelsperlabel\" },\n dygraphXAxisLabelWidth: { key: \"dygraph-xaxislabelwidth\" },\n dygraphDrawXAxis: { key: \"dygraph-drawxaxis\", type: \"boolean\" },\n dygraphYPixelsPerLabel: { key: \"dygraph-ypixelsperlabel\" },\n dygraphYAxisLabelWidth: { key: \"dygraph-yaxislabelwidth\" },\n dygraphDrawYAxis: { key: \"dygraph-drawyaxis\", type: \"boolean\" },\n dygraphDrawAxis: { key: \"dygraph-drawaxis\", type: \"boolean\" },\n\n easyPieChartMinValue: { key: \"easypiechart-min-value\" },\n easyPieChartMaxValue: { key: \"easypiechart-max-value\" },\n easyPieChartBarColor: { key: \"easypiechart-barcolor\" },\n easyPieChartTrackColor: { key: \"easypiechart-trackcolor\" },\n easyPieChartScaleColor: { key: \"easypiechart-scalecolor\" },\n easyPieChartScaleLength: { key: \"easypiechart-scalelength\" },\n easyPieChartLineCap: { key: \"easypiechart-linecap\" },\n easyPieChartLineWidth: { key: \"easypiechart-linewidth\" },\n easyPieChartTrackWidth: { key: \"easypiechart-trackwidth\" },\n easyPieChartSize: { key: \"easypiechart-size\" },\n easyPieChartRotate: { key: \"easypiechart-rotate\" },\n easyPieChartAnimate: { key: \"easypiechart-animate\" },\n easyPieChartEasing: { key: \"easypiechart-easing\" },\n\n gaugeMinValue: { key: \"gauge-min-value\" },\n gaugeMaxValue: { key: \"gauge-max-value\" },\n gaugePointerColor: { key: \"gauge-pointer-color\" },\n gaugeStrokeColor: { key: \"gauge-stroke-color\" },\n gaugeStartColor: { key: \"gauge-start-color\" },\n gaugeStopColor: { key: \"gauge-stop-color\" },\n gaugeGenerateGradient: { key: \"gauge-generate-gradient\" },\n\n sparklineType: { key: \"sparkline-type\" },\n sparklineLineColor: { key: \"sparkline-linecolor\" },\n sparklineFillColor: { key: \"sparkline-fillcolor\" },\n sparklineChartRangeMin: { key: \"sparkline-chartrangemin\" },\n sparklineChartRangeMax: { key: \"sparkline-chartrangemax\" },\n sparklineComposite: { key: \"sparkline-composite\" },\n sparklineEnableTagOptions: { key: \"sparkline-enabletagoptions\" },\n sparklineTagOptionPrefix: { key: \"sparkline-tagoptionprefix\" },\n sparklineTagValuesAttribute: { key: \"sparkline-tagvaluesattribute\" },\n sparklineDisableHiddenCheck: { key: \"sparkline-disablehiddencheck\" },\n sparklineDefaultPixelsPerValue: { key: \"sparkline-defaultpixelspervalue\" },\n sparklineSpotColor: { key: \"sparkline-spotcolor\" },\n sparklineMinSpotColor: { key: \"sparkline-minspotcolor\" },\n sparklineMaxSpotColor: { key: \"sparkline-maxspotcolor\" },\n sparklineSpotRadius: { key: \"sparkline-spotradius\" },\n sparklineValueSpots: { key: \"sparkline-valuespots\" },\n sparklineHighlightSpotColor: { key: \"sparkline-highlightspotcolor\" },\n sparklineHighlightLineColor: { key: \"sparkline-highlightlinecolor\" },\n sparklineLineWidth: { key: \"sparkline-linewidth\" },\n sparklineNormalRangeMin: { key: \"sparkline-normalrangemin\" },\n sparklineNormalRangeMax: { key: \"sparkline-normalrangemax\" },\n sparklineDrawNormalOnTop: { key: \"sparkline-drawnormalontop\" },\n sparklineXvalues: { key: \"sparkline-xvalues\" },\n sparklineChartRangeClip: { key: \"sparkline-chartrangeclip\" },\n sparklineChartRangeMinX: { key: \"sparkline-chartrangeminx\" },\n sparklineChartRangeMaxX: { key: \"sparkline-chartrangemaxx\" },\n sparklineDisableInteraction: { key: \"sparkline-disableinteraction\", type: \"boolean\" },\n sparklineDisableTooltips: { key: \"sparkline-disabletooltips\", type: \"boolean\" },\n sparklineOnHover: { key: \"sparkline-on-hover\" },\n sparklineDisableHighlight: { key: \"sparkline-disablehighlight\", type: \"boolean\" },\n sparklineHighlightLighten: { key: \"sparkline-highlightlighten\" },\n sparklineHighlightColor: { key: \"sparkline-highlightcolor\" },\n sparklineTooltipContainer: { key: \"sparkline-tooltipcontainer\" },\n sparklineTooltipClassname: { key: \"sparkline-tooltipclassname\" },\n sparklineTooltipFormat: { key: \"sparkline-tooltipformat\" },\n sparklineTooltipPrefix: { key: \"sparkline-tooltipprefix\" },\n sparklineTooltipSuffix: { key: \"sparkline-tooltipsuffix\" },\n sparklineTooltipSkipNull: { key: \"sparkline-tooltipskipnull\", type: \"boolean\" },\n sparklineTooltipValueLookups: { key: \"sparkline-tooltipvaluelookups\" },\n sparklineTooltipFormatFieldlist: { key: \"sparkline-tooltipformatfieldlist\" },\n sparklineTooltipFormatFieldlistKey: { key: \"sparkline-tooltipformatfieldlistkey\" },\n sparklineNumberFormatter: { key: \"sparkline-numberformatter\" },\n sparklineNumberDigitGroupSep: { key: \"sparkline-numberdigitgroupsep\" },\n sparklineNumberDecimalMark: { key: \"sparkline-numberdecimalmark\" },\n sparklineNumberDigitGroupCount: { key: \"sparkline-numberdigitgroupcount\" },\n sparklineAnimatedZooms: { key: \"sparkline-animatedzooms\", type: \"boolean\" },\n\n d3pieTitle: { key: \"d3pie-title\" },\n d3pieSubtitle: { key: \"d3pie-subtitle\" },\n d3pieFooter: { key: \"d3pie-footer\" },\n d3pieTitleColor: { key: \"d3pie-title-color\" },\n d3pieTitleFontsize: { key: \"d3pie-title-fontsize\" },\n d3pieTitleFontweight: { key: \"d3pie-title-fontweight\" },\n d3pieTitleFont: { key: \"d3pie-title-font\" },\n d3PieSubtitleColor: { key: \"d3pie-subtitle-color\" },\n d3PieSubtitleFontsize: { key: \"d3pie-subtitle-fontsize\" },\n d3PieSubtitleFontweight: { key: \"d3pie-subtitle-fontweight\" },\n d3PieSubtitleFont: { key: \"d3pie-subtitle-font\" },\n d3PieFooterColor: { key: \"d3pie-footer-color\" },\n d3PieFooterFontsize: { key: \"d3pie-footer-fontsize\" },\n d3PieFooterFontweight: { key: \"d3pie-footer-fontweight\" },\n d3PieFooterFont: { key: \"d3pie-footer-font\" },\n d3PieFooterLocation: { key: \"d3pie-footer-location\" },\n d3PiePieinnerradius: { key: \"d3pie-pieinnerradius\" },\n d3PiePieouterradius: { key: \"d3pie-pieouterradius\" },\n d3PieSortorder: { key: \"d3pie-sortorder\" },\n d3PieSmallsegmentgroupingEnabled: { key: \"d3pie-smallsegmentgrouping-enabled\", type: \"boolean\" },\n d3PieSmallsegmentgroupingValue: { key: \"d3pie-smallsegmentgrouping-value\" },\n d3PieSmallsegmentgroupingValuetype: { key: \"d3pie-smallsegmentgrouping-valuetype\" },\n d3PieSmallsegmentgroupingLabel: { key: \"d3pie-smallsegmentgrouping-label\" },\n d3PieSmallsegmentgroupingColor: { key: \"d3pie-smallsegmentgrouping-color\" },\n d3PieLabelsOuterFormat: { key: \"d3pie-labels-outer-format\" },\n d3PieLabelsOuterHidewhenlessthanpercentage: {\n key: \"d3pie-labels-outer-hidewhenlessthanpercentage\",\n },\n d3PieLabelsOuterPiedistance: { key: \"d3pie-labels-outer-piedistance\" },\n d3PieLabelsInnerFormat: { key: \"d3pie-labels-inner-format\" },\n d3PieLabelsInnerHidewhenlessthanpercentage: {\n key: \"d3pie-labels-inner-hidewhenlessthanpercentage\",\n },\n d3PieLabelsMainLabelColor: { key: \"d3pie-labels-mainLabel-color\" },\n d3PieLabelsMainLabelFont: { key: \"d3pie-labels-mainLabel-font\" },\n d3PieLabelsMainLabelFontsize: { key: \"d3pie-labels-mainLabel-fontsize\" },\n d3PieLabelsMainLabelFontweight: { key: \"d3pie-labels-mainLabel-fontweight\" },\n d3PieLabelsPercentageColor: { key: \"d3pie-labels-percentage-color\" },\n d3PieLabelsPercentageFont: { key: \"d3pie-labels-percentage-font\" },\n d3PieLabelsPercentageFontsize: { key: \"d3pie-labels-percentage-fontsize\" },\n d3PieLabelsPercentageFontweight: { key: \"d3pie-labels-percentage-fontweight\" },\n d3PieLabelsValueColor: { key: \"d3pie-labels-value-color\" },\n d3PieLabelsValueFont: { key: \"d3pie-labels-value-font\" },\n d3PieLabelsValueFontsize: { key: \"d3pie-labels-value-fontsize\" },\n d3PieLabelsValueFontweight: { key: \"d3pie-labels-value-fontweight\" },\n d3PieLabelsLinesEnabled: { key: \"d3pie-labels-lines-enabled\", type: \"boolean\" },\n d3PieLabelsLinesStyle: { key: \"d3pie-labels-lines-style\" },\n d3PieLabelsLinesColor: { key: \"d3pie-labels-lines-color\" },\n d3PieLabelsTruncationEnabled: { key: \"d3pie-labels-truncation-enabled\", type: \"boolean\" },\n d3PieLabelsTruncationTruncatelength: { key: \"d3pie-labels-truncation-truncatelength\" },\n d3PieMiscColorsSegmentstroke: { key: \"d3pie-misc-colors-segmentstroke\" },\n d3PieMiscGradientEnabled: { key: \"d3pie-misc-gradient-enabled\", type: \"boolean\" },\n d3PieMiscColorsPercentage: { key: \"d3pie-misc-colors-percentage\" },\n d3PieMiscGradientColor: { key: \"d3pie-misc-gradient-color\" },\n d3PieCssprefix: { key: \"d3pie-cssprefix\" },\n\n peityStrokeWidth: { key: \"peity-strokewidth\" },\n\n textOnlyDecimalPlaces: { key: \"textonly-decimal-places\" },\n textOnlyPrefix: { key: \"textonly-prefix\" },\n textOnlySuffix: { key: \"textonly-suffix\" },\n})\n\nexport const getAttributesStatic = (node: Element): Attributes => mapObjIndexed(\n (attribute: AttributeConfig) => (\n (attribute as BooleanAttributeConfig).type === \"boolean\"\n ? getDataAttributeBoolean(\n node,\n attribute.key,\n attribute.defaultValue as BooleanAttributeConfig[\"defaultValue\"],\n ) : getDataAttribute(node, attribute.key, attribute.defaultValue)\n ),\n getAttributesMap(),\n) as Attributes // need to override because of broken Ramda typings\n\nexport const getAttributesDynamic = (node: Element) => {\n const showValueOfAttribues = Array.from(node.attributes)\n .filter((attribute) => attribute.name.startsWith(\"data-show-value-of\"))\n .map((attribute) => ({\n [attribute.name.replace(\"data-\", \"\")]: attribute.value,\n }))\n const merged = mergeAll(showValueOfAttribues)\n return isEmpty(merged) ? undefined : merged\n}\n\nexport const getAttributes = (node: Element): Attributes => {\n const attributesStatic = getAttributesStatic(node)\n const showValueOf = getAttributesDynamic(node)\n return { ...attributesStatic, showValueOf }\n}\n\nexport const defaultAttributes: Partial = {\n legendPosition: initialLegendRight ? \"right\" : \"bottom\",\n}\n","import { createAction } from \"redux-act\"\nimport { CancelTokenSource, Method } from \"axios\"\n\nimport { createRequestAction } from \"utils/createRequestAction\"\nimport { InfoPayload } from \"domains/global/__mocks__/info-mock\"\n\nimport { storeKey } from \"./constants\"\nimport { ChartData, ChartMetadata } from \"./chart-types\"\n\nexport interface UpdateChartDataAction {\n chartData: ChartData\n id: string\n}\nexport const updateChartDataAction = createAction(\n `${storeKey}/updateChartData`,\n)\n\nexport interface UpdateChartMetadataAction {\n chartMetadata: ChartMetadata\n id: string\n}\nexport const updateChartMetadataAction = createAction(\n `${storeKey}/updateChartMetadata`,\n)\n\nexport interface FetchDataParams {\n fillMissingPoints?: number\n isRemotelyControlled: boolean\n viewRange: [number, number]\n}\nexport interface FetchDataUrlParams {\n host: string\n chart: string\n context: string\n format: string\n points: number\n group: string\n gtime: number\n options: string\n after: number | null\n before?: number | null\n dimensions?: string\n labels?: {[key: string]: string}\n postGroupBy?: string\n postAggregationMethod?: string\n aggrMethod?: string\n aggrGroups?: string[]\n dimensionsAggrMethod?: string\n nodeIDs?: string[]\n httpMethod?: Method\n groupBy?: string\n}\nexport interface FetchDataPayload extends FetchDataUrlParams {\n id: string,\n fetchDataParams: FetchDataParams\n cancelTokenSource: CancelTokenSource\n}\n\nexport const fetchDataAction = createRequestAction<\n FetchDataPayload,\n { id: string, chartData: ChartData, fetchDataParams: FetchDataParams }\n>(`${storeKey}/fetchDataAction`)\n\n\nexport interface FetchDataCancelAction { id: string }\nexport const fetchDataCancelAction = createAction(\n `${storeKey}/fetchDataCancelAction`,\n)\n\nexport interface FetchDataForSnapshotPayload extends FetchDataUrlParams {\n chartLibrary: string\n id: string\n}\nexport const fetchDataForSnapshotAction = createRequestAction<\n FetchDataForSnapshotPayload,\n { id: string, snapshotData: ChartData }\n>(`${storeKey}/fetchDataForSnapshotAction`)\n\nexport const snapshotExportResetAction = createRequestAction(\n `${storeKey}/snapshotExportResetAction`,\n)\n\nexport interface FetchChartPayload {\n chart: string\n id: string\n host: string\n nodeIDs?: string[]\n}\n\nexport const fetchChartAction = createRequestAction<\n FetchChartPayload,\n { chartMetadata: ChartMetadata, id: string }\n>(`${storeKey}/fetchChartAction`)\n\n\nexport interface SetResizeHeightAction {\n id: string\n resizeHeight: number\n}\nexport const setResizeHeightAction = createAction(\n `${storeKey}/setResizeHeight`,\n)\n\nexport interface SetChartPanAndZoomAction {\n id: string\n after: number\n before: number\n shouldForceTimeRange?: boolean\n}\nexport const setChartPanAndZoomAction = createAction(\n `${storeKey}/setChartPanAndZoom`,\n)\n\nexport const resetChartPanAndZoomAction = createAction<{ id: string }>(\n `${storeKey}/resetChartPanAndZoomAction`,\n)\n\nexport const clearChartStateAction = createAction<{ id: string }>(\n `${storeKey}/clearChartStateAction`,\n)\n\nexport interface FetchInfoPayload {\n poll?: boolean\n}\nexport interface FetchInfoSuccessPayload {\n isCloudAvailable: boolean\n isCloudEnabled: boolean\n isAgentClaimed: boolean\n isACLKAvailable: boolean\n fullInfoPayload: InfoPayload\n}\nexport const fetchInfoAction = createRequestAction<\n FetchInfoPayload,\n FetchInfoSuccessPayload\n>(`${storeKey}/fetchInfoAction`)\n","export const SIGN_IN_IFRAME_ID = \"sign_in_iframe\"\n","/* eslint-disable */\n// Main JavaScript file for the Netdata GUI.\n\n// Codacy declarations\n/* global NETDATA */\n\nimport { identity, memoizeWith } from \"ramda\"\nimport {\n centerAroundHighlightAction,\n chartsMetadataRequestSuccess,\n clearHighlightAction,\n fetchAllAlarmsAction,\n loadSnapshotAction,\n resetGlobalPanAndZoomAction,\n resetOptionsAction,\n resetRegistry,\n setDefaultAfterAction,\n setGlobalChartUnderlayAction,\n setGlobalPanAndZoomAction,\n setOptionAction,\n} from './domains/global/actions';\nimport {\n createSelectOption,\n selectDefaultAfter,\n selectGlobalPanAndZoom,\n selectRegistry,\n} from \"./domains/global/selectors\"\nimport { seconds4human } from './domains/chart/utils/seconds4human';\nimport { zeropad } from './utils/units-conversion';\nimport {\n explicitlySignInAction,\n startSnapshotModeAction,\n stopSnapshotModeAction,\n} from './domains/dashboard/actions';\nimport { snapshotExportResetAction } from './domains/chart/actions';\nimport {\n selectAmountOfCharts,\n selectAmountOfSnapshotsFailed,\n selectAmountOfSnapshotsFetched,\n} from './domains/chart/selectors';\nimport { serverDefault } from './utils/server-detection';\nimport { name2id } from './utils/name-2-id';\nimport { isProperTimezone } from './utils/date-time';\nimport { NETDATA_REGISTRY_SERVER } from './utils';\nimport { getHashParam } from 'utils/hash-utils';\nimport { isDemo } from \"./utils/is-demo\"\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from './domains/chart/utils/legend-utils';\nimport { defaultAttributes } from './domains/chart/utils/transformDataAttributes';\n\n// this is temporary, hook will be used after the full main.js refactor\nlet localeDateString, localeTimeString\nexport const updateLocaleFunctions = ({\n localeDateString: newLocaleDateString,\n localeTimeString: newLocaleTimeString,\n}) => {\n localeDateString = newLocaleDateString\n localeTimeString = newLocaleTimeString\n}\n\n// enable alarms checking and notifications\nvar netdataShowAlarms = true;\n\n// enable registry updates\nvar netdataRegistry = true;\n\n// forward definition only - not used here\nvar netdataServer = undefined;\nvar netdataServerStatic = undefined;\nvar netdataCheckXSS = undefined;\n\nlet reduxStore\n\nfunction escapeUserInputHTML(s) {\n return s.toString()\n .replace(/&/g, '&')\n .replace(//g, '>')\n .replace(/\"/g, '"')\n .replace(/#/g, '#')\n .replace(/'/g, ''')\n .replace(/\\(/g, '(')\n .replace(/\\)/g, ')')\n .replace(/\\//g, '/');\n}\n\nconst setOption = (key, value) => {\n reduxStore.dispatch(setOptionAction({\n key,\n value,\n }))\n}\n\n// temporary function that will be removed after full main.js migration to react\nconst getFromRegistry = (prop) => {\n const registry = selectRegistry(reduxStore.getState())\n return registry?.[prop]\n}\n\nfunction verifyURL(s) {\n if (typeof (s) === 'string' && (s.startsWith('http://') || s.startsWith('https://'))) {\n return s\n .replace(/'/g, '%22')\n .replace(/\"/g, '%27')\n .replace(/\\)/g, '%28')\n .replace(/\\(/g, '%29');\n }\n\n console.log('invalid URL detected:');\n console.log(s);\n return 'javascript:alert(\"invalid url\");';\n}\n\n// --------------------------------------------------------------------\n// urlOptions\n\nwindow.urlOptions = {\n hash: '#',\n theme: null,\n help: null,\n mode: 'live', // 'live', 'print'\n update_always: false,\n pan_and_zoom: false,\n server: null,\n after: getHashParam('after') ?? 0,\n before: getHashParam('before') ?? 0,\n highlight: false,\n highlight_after: 0,\n highlight_before: 0,\n nowelcome: false,\n show_alarms: false,\n chart: null,\n family: null,\n alarm: null,\n utc: null,\n\n hasProperty: function (property) {\n // console.log('checking property ' + property + ' of type ' + typeof(this[property]));\n return typeof this[property] !== 'undefined';\n },\n\n genHash: function (forReload) {\n var hash = urlOptions.hash;\n\n hash += ';after=' + urlOptions.after.toString() +\n ';before=' + urlOptions.before.toString();\n\n if (urlOptions.highlight === true) {\n hash += ';highlight_after=' + urlOptions.highlight_after.toString() +\n ';highlight_before=' + urlOptions.highlight_before.toString();\n }\n\n if (urlOptions.theme !== null) {\n hash += ';theme=' + urlOptions.theme.toString();\n }\n\n if (urlOptions.help !== null) {\n hash += ';help=' + urlOptions.help.toString();\n }\n\n if (urlOptions.update_always === true) {\n hash += ';update_always=true';\n }\n\n if (forReload === true && urlOptions.server !== null) {\n hash += ';server=' + urlOptions.server.toString();\n }\n\n if (urlOptions.mode !== 'live') {\n hash += ';mode=' + urlOptions.mode;\n }\n\n if (urlOptions.utc !== null) {\n hash += ';utc=' + urlOptions.utc;\n }\n\n return hash;\n },\n\n parseHash: function () {\n var variables = document.location.hash.split(';');\n var len = variables.length;\n while (len--) {\n if (len !== 0) {\n var p = variables[len].split('=');\n if (urlOptions.hasProperty(p[0]) && typeof p[1] !== 'undefined') {\n urlOptions[p[0]] = decodeURIComponent(p[1]);\n }\n } else {\n if (variables[len].length > 0) {\n urlOptions.hash = variables[len];\n }\n }\n }\n\n var booleans = ['nowelcome', 'show_alarms', 'update_always'];\n len = booleans.length;\n while (len--) {\n if (urlOptions[booleans[len]] === 'true' || urlOptions[booleans[len]] === true || urlOptions[booleans[len]] === '1' || urlOptions[booleans[len]] === 1) {\n urlOptions[booleans[len]] = true;\n } else {\n urlOptions[booleans[len]] = false;\n }\n }\n\n var numeric = ['after', 'before', 'highlight_after', 'highlight_before'];\n len = numeric.length;\n while (len--) {\n if (typeof urlOptions[numeric[len]] === 'string') {\n try {\n urlOptions[numeric[len]] = parseInt(urlOptions[numeric[len]]);\n }\n catch (e) {\n console.log('failed to parse URL hash parameter ' + numeric[len]);\n urlOptions[numeric[len]] = 0;\n }\n }\n }\n\n if (urlOptions.server !== null && urlOptions.server !== '') {\n netdataServerStatic = document.location.origin.toString() + document.location.pathname.toString();\n netdataServer = urlOptions.server;\n netdataCheckXSS = true;\n } else {\n urlOptions.server = null;\n }\n\n if (urlOptions.before > 0 && urlOptions.after > 0) {\n urlOptions.pan_and_zoom = true;\n urlOptions.nowelcome = true;\n } else {\n urlOptions.pan_and_zoom = false;\n }\n\n if (urlOptions.highlight_before > 0 && urlOptions.highlight_after > 0) {\n urlOptions.highlight = true;\n } else {\n urlOptions.highlight = false;\n }\n\n switch (urlOptions.mode) {\n case 'print':\n urlOptions.theme = 'white';\n urlOptions.welcome = false;\n urlOptions.help = false;\n urlOptions.show_alarms = false;\n\n if (urlOptions.pan_and_zoom === false) {\n urlOptions.pan_and_zoom = true;\n urlOptions.before = Date.now();\n const fallbackAfter = -600000\n const defaultAfter = urlOptions.after ? urlOptions.after * 1000 : fallbackAfter\n urlOptions.after = urlOptions.before + defaultAfter;\n }\n\n netdataShowAlarms = false;\n netdataRegistry = false;\n break;\n\n case 'live':\n default:\n urlOptions.mode = 'live';\n break;\n }\n\n // console.log(urlOptions);\n },\n\n hashUpdate: function () {\n history.replaceState(null, '', urlOptions.genHash(true));\n },\n\n netdataPanAndZoomCallback: function (status, after, before) {\n if (netdataSnapshotData === null) {\n urlOptions.pan_and_zoom = status;\n urlOptions.after = after;\n urlOptions.before = before;\n }\n },\n\n updateUtcParam: function (utc) {\n if (!utc) return\n urlOptions.utc = utc\n urlOptions.hashUpdate();\n },\n\n netdataHighlightCallback: function (status, after, before) {\n if (status === true && (after === null || before === null || after <= 0 || before <= 0 || after >= before)) {\n status = false;\n after = 0;\n before = 0;\n }\n\n if (window.netdataSnapshotData === null) {\n urlOptions.highlight = status;\n } else {\n urlOptions.highlight = false;\n }\n\n urlOptions.highlight_after = Math.round(after);\n urlOptions.highlight_before = Math.round(before);\n urlOptions.hashUpdate();\n\n if (status === true && after > 0 && before > 0 && after < before) {\n var d1 = localeDateString(after);\n var d2 = localeDateString(before);\n if (d1 === d2) {\n d2 = '';\n }\n document.getElementById('navbar-highlight-content').innerHTML =\n ''\n + 'highlighted time-frame'\n + ' ' + d1 + ' ' + localeTimeString(after) + ' to '\n + ' ' + d2 + ' ' + localeTimeString(before) + ', '\n + 'duration ' + seconds4human(Math.round((before - after) / 1000)) + ''\n + ''\n + '';\n\n $('.navbar-highlight').show();\n $('.navbar-highlight').width(\"80%\");\n $('.highlight-tooltip').tooltip({\n html: true,\n delay: { show: 500, hide: 0 },\n container: 'body'\n });\n } else {\n $('.navbar-highlight').hide();\n $('.navbar-highlight').width(\"100%\");\n }\n },\n\n clearHighlight: function () {\n reduxStore.dispatch(clearHighlightAction())\n },\n\n showHighlight: function () {\n reduxStore.dispatch(centerAroundHighlightAction())\n }\n};\n\nurlOptions.parseHash();\n\n// --------------------------------------------------------------------\n// check options that should be processed before loading netdata.js\n\nvar localStorageTested = -1;\n\nfunction localStorageTest() {\n if (localStorageTested !== -1) {\n return localStorageTested;\n }\n\n if (typeof Storage !== \"undefined\" && typeof localStorage === 'object') {\n var test = 'test';\n try {\n localStorage.setItem(test, test);\n localStorage.removeItem(test);\n localStorageTested = true;\n }\n catch (e) {\n console.log(e);\n localStorageTested = false;\n }\n } else {\n localStorageTested = false;\n }\n\n return localStorageTested;\n}\n\nfunction loadLocalStorage(name) {\n var ret = null;\n\n try {\n if (localStorageTest() === true) {\n ret = localStorage.getItem(name);\n } else {\n console.log('localStorage is not available');\n }\n }\n catch (error) {\n console.log(error);\n return null;\n }\n\n if (typeof ret === 'undefined' || ret === null) {\n return null;\n }\n\n // console.log('loaded: ' + name.toString() + ' = ' + ret.toString());\n\n return ret;\n}\n\nfunction saveLocalStorage(name, value) {\n // console.log('saving: ' + name.toString() + ' = ' + value.toString());\n try {\n if (localStorageTest() === true) {\n localStorage.setItem(name, value.toString());\n return true;\n }\n }\n catch (error) {\n console.log(error);\n }\n\n return false;\n}\n\nfunction getTheme(def) {\n if (urlOptions.mode === 'print') {\n return 'white';\n }\n\n var ret = loadLocalStorage('netdataTheme');\n if (typeof ret === 'undefined' || ret === null || ret === 'undefined') {\n return def;\n } else {\n return ret;\n }\n}\n\nfunction setTheme(theme) {\n if (urlOptions.mode === 'print') {\n return false;\n }\n\n if (theme === netdataTheme) {\n return false;\n }\n return saveLocalStorage('netdataTheme', theme);\n}\n\nwindow.netdataTheme = getTheme('slate');\n// this is of course temporary, will be fixed during complete main.js rewrite\nNETDATA.updateTheme()\nvar netdataShowHelp = true;\n\nif (urlOptions.theme !== null) {\n setTheme(urlOptions.theme);\n netdataTheme = urlOptions.theme;\n window.NETDATA.updateTheme()\n} else {\n urlOptions.theme = netdataTheme;\n}\n\nif (urlOptions.help !== null) {\n saveLocalStorage('options.show_help', urlOptions.help);\n netdataShowHelp = urlOptions.help;\n} else {\n urlOptions.help = loadLocalStorage('options.show_help');\n}\n\n// --------------------------------------------------------------------\n// natural sorting\n// http://www.davekoelle.com/files/alphanum.js - LGPL\n\nfunction naturalSortChunkify(t) {\n var tz = [];\n var x = 0, y = -1, n = 0, i, j;\n\n while (i = (j = t.charAt(x++)).charCodeAt(0)) {\n var m = (i >= 48 && i <= 57);\n if (m !== n) {\n tz[++y] = \"\";\n n = m;\n }\n tz[y] += j;\n }\n\n return tz;\n}\n\nfunction naturalSortCompare(a, b) {\n var aa = naturalSortChunkify(a.toLowerCase());\n var bb = naturalSortChunkify(b.toLowerCase());\n\n for (var x = 0; aa[x] && bb[x]; x++) {\n if (aa[x] !== bb[x]) {\n var c = Number(aa[x]), d = Number(bb[x]);\n if (c.toString() === aa[x] && d.toString() === bb[x]) {\n return c - d;\n } else {\n return (aa[x] > bb[x]) ? 1 : -1;\n }\n }\n }\n\n return aa.length - bb.length;\n}\n\n// --------------------------------------------------------------------\n// saving files to client\n\nfunction saveTextToClient(data, filename) {\n var blob = new Blob([data], {\n type: 'application/octet-stream'\n });\n\n var url = URL.createObjectURL(blob);\n var link = document.createElement('a');\n link.setAttribute('href', url);\n link.setAttribute('download', filename);\n\n var el = document.getElementById('hiddenDownloadLinks');\n el.innerHTML = '';\n el.appendChild(link);\n\n setTimeout(function () {\n el.removeChild(link);\n URL.revokeObjectURL(url);\n }, 60);\n\n link.click();\n}\n\nfunction saveObjectToClient(data, filename) {\n saveTextToClient(JSON.stringify(data), filename);\n}\n\nfunction netdataURL(url, forReload) {\n if (typeof url === 'undefined')\n // url = document.location.toString();\n {\n url = '';\n }\n\n if (url.indexOf('#') !== -1) {\n url = url.substring(0, url.indexOf('#'));\n }\n\n var hash = urlOptions.genHash(forReload);\n\n // console.log('netdataURL: ' + url + hash);\n\n return url + hash;\n}\n\nfunction netdataReload(url) {\n document.location = verifyURL(netdataURL(url, true));\n\n // since we play with hash\n // this is needed to reload the page\n location.reload();\n}\n\nwindow.gotoHostedModalHandler = (url) => {\n document.location = verifyURL(url + urlOptions.genHash());\n return false;\n}\n\nvar gotoServerValidateRemaining = 0;\nvar gotoServerMiddleClick = false;\nvar gotoServerStop = false;\n\nfunction gotoServerValidateUrl(id, guid, url) {\n var penalty = 0;\n var error = 'failed';\n\n if (document.location.toString().startsWith('http://') && url.toString().startsWith('https://'))\n // we penalize https only if the current url is http\n // to allow the user walk through all its servers.\n {\n penalty = 500;\n } else if (document.location.toString().startsWith('https://') && url.toString().startsWith('http://')) {\n error = 'can\\'t check';\n }\n\n var finalURL = netdataURL(url);\n\n setTimeout(function () {\n document.getElementById('gotoServerList').innerHTML += '
' + escapeUserInputHTML(url) + 'checking...';\n\n NETDATA.registryHello(url, function (data) {\n if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid === guid) {\n // console.log('OK ' + id + ' URL: ' + url);\n document.getElementById(guid + '-' + id + '-status').innerHTML = \"OK\";\n\n if (!gotoServerStop) {\n gotoServerStop = true;\n\n if (gotoServerMiddleClick) {\n window.open(verifyURL(finalURL), '_blank');\n gotoServerMiddleClick = false;\n const registryMachines = getFromRegistry(\"registryMachines\");\n document.getElementById('gotoServerResponse').innerHTML = 'Opening new window to ' + registryMachines[guid].name + '
' + escapeUserInputHTML(url) + '

(check your pop-up blocker if it fails)';\n } else {\n document.getElementById('gotoServerResponse').innerHTML += 'found it! It is at:
' + escapeUserInputHTML(url) + '';\n document.location = verifyURL(finalURL);\n $('#gotoServerModal').modal('hide');\n }\n }\n } else {\n if (typeof data !== 'undefined' && data !== null && typeof data.machine_guid === 'string' && data.machine_guid !== guid) {\n error = 'wrong machine';\n }\n\n document.getElementById(guid + '-' + id + '-status').innerHTML = error;\n gotoServerValidateRemaining--;\n if (gotoServerValidateRemaining <= 0) {\n gotoServerMiddleClick = false;\n document.getElementById('gotoServerResponse').innerHTML = 'Sorry! I cannot find any operational URL for this server';\n }\n }\n });\n }, (id * 50) + penalty);\n}\n\nwindow.gotoServerModalHandler = function gotoServerModalHandler(guid) {\n // console.log('goto server: ' + guid);\n\n gotoServerStop = false;\n var checked = {};\n const registryMachines = getFromRegistry(\"registryMachines\");\n var len = registryMachines[guid].alternateUrls.length;\n var count = 0;\n\n document.getElementById('gotoServerResponse').innerHTML = '';\n document.getElementById('gotoServerList').innerHTML = '';\n document.getElementById('gotoServerName').innerHTML = registryMachines[guid].name;\n $('#gotoServerModal').modal('show');\n\n gotoServerValidateRemaining = len;\n while (len--) {\n var url = registryMachines[guid].alternateUrls[len];\n checked[url] = true;\n gotoServerValidateUrl(count++, guid, url);\n }\n\n // When the registry is enabled, if the user's known URLs are not working\n // we consult the registry to get additional URLs.\n setTimeout(function () {\n if (gotoServerStop === false) {\n document.getElementById('gotoServerResponse').innerHTML = 'Added all the known URLs for this machine.';\n NETDATA.registrySearch(guid, getFromRegistry, function (data) {\n // console.log(data);\n len = data.urls.length;\n while (len--) {\n var url = data.urls[len][1];\n // console.log(url);\n if (typeof checked[url] === 'undefined') {\n gotoServerValidateRemaining++;\n checked[url] = true;\n gotoServerValidateUrl(count++, guid, url);\n }\n }\n });\n }\n }, 2000);\n\n return false;\n}\n\nwindow.switchRegistryModalHandler = () => {\n document.getElementById('switchRegistryPersonGUID').value = getFromRegistry(\"personGuid\");\n document.getElementById('switchRegistryURL').innerHTML = getFromRegistry(\"registryServer\");\n document.getElementById('switchRegistryResponse').innerHTML = '';\n $('#switchRegistryModal').modal('show');\n};\n\nwindow.notifyForSwitchRegistry = () => {\n // it's just old code, with minimal changes\n const newPersonGuid = document.getElementById('switchRegistryPersonGUID').value;\n\n if (newPersonGuid !== '' && newPersonGuid.length === 36) {\n\n $.ajax({\n url: getFromRegistry(\"registryServer\") + '/api/v1/registry?action=switch&machine='\n + getFromRegistry(\"machineGuid\") + '&name='\n + encodeURIComponent(getFromRegistry(\"hostname\")) + '&url='\n + encodeURIComponent(serverDefault) + '&to=' + newPersonGuid,\n async: true,\n cache: false,\n headers: {\n 'Cache-Control': 'no-cache, no-store',\n 'Pragma': 'no-cache'\n },\n xhrFields: {withCredentials: true} // required for the cookie\n })\n .done(function (data) {\n data = NETDATA.xss.checkAlways('/api/v1/registry?action=switch', data);\n\n if (typeof data.status !== 'string' || data.status !== 'ok') {\n // NETDATA.error(413, NETDATA.registry.server + ' responded with: ' + JSON.stringify(data));\n console.warn(\"Netdata registry server send invalid response to SWITCH\", data)\n data = null;\n }\n\n $('#switchRegistryModal').modal('hide');\n })\n .fail(function () {\n // NETDATA.error(414, NETDATA.registry.server);\n console.warn(\"Netdata registry SWITCH failed\")\n document.getElementById('switchRegistryResponse').innerHTML = \"Sorry! The registry rejected your request.\";\n });\n } else {\n document.getElementById('switchRegistryResponse').innerHTML = \"The ID you have entered is not a GUID.\";\n }\n};\n\nvar deleteRegistryGuid = null;\nvar deleteRegistryUrl = null;\n\nwindow.deleteRegistryModalHandler = (guid, name, url) => {\n deleteRegistryGuid = guid;\n deleteRegistryUrl = url;\n\n document.getElementById('deleteRegistryServerName').innerHTML = name;\n document.getElementById('deleteRegistryServerName2').innerHTML = name;\n document.getElementById('deleteRegistryServerURL').innerHTML = url;\n document.getElementById('deleteRegistryResponse').innerHTML = '';\n\n $('#deleteRegistryModal').modal('show');\n}\n\nwindow.notifyForDeleteRegistry = () => {\n const responseEl = document.getElementById('deleteRegistryResponse');\n\n if (deleteRegistryUrl) {\n NETDATA.registryDelete(getFromRegistry, serverDefault, deleteRegistryUrl, function (result) {\n if (result !== null) {\n deleteRegistryUrl = null;\n $('#deleteRegistryModal').modal('hide');\n reduxStore.dispatch(resetRegistry())\n } else {\n responseEl.innerHTML = \"Sorry, this command was rejected by the registry server!\";\n }\n });\n }\n}\n\nvar options = {\n menus: {},\n submenu_names: {},\n data: null,\n hostname: 'netdata_server', // will be overwritten by the netdata server\n version: 'unknown',\n release_channel: 'unknown',\n hosts: [],\n\n duration: 0, // the default duration of the charts\n update_every: 1,\n\n chartsPerRow: 0,\n // chartsMinWidth: 1450,\n chartsHeight: 180,\n};\n\nfunction chartsPerRow(total) {\n void (total);\n\n if (options.chartsPerRow === 0) {\n return 1;\n //var width = Math.floor(total / options.chartsMinWidth);\n //if(width === 0) width = 1;\n //return width;\n } else {\n return options.chartsPerRow;\n }\n}\n\nfunction prioritySort(a, b) {\n if (a.priority < b.priority) {\n return -1;\n }\n if (a.priority > b.priority) {\n return 1;\n }\n return naturalSortCompare(a.name, b.name);\n}\n\nfunction sortObjectByPriority(object) {\n var idx = {};\n var sorted = [];\n\n for (var i in object) {\n if (!object.hasOwnProperty(i)) {\n continue;\n }\n\n if (typeof idx[i] === 'undefined') {\n idx[i] = object[i];\n sorted.push(i);\n }\n }\n\n sorted.sort(function (a, b) {\n if (idx[a].priority < idx[b].priority) {\n return -1;\n }\n if (idx[a].priority > idx[b].priority) {\n return 1;\n }\n return naturalSortCompare(a, b);\n });\n\n return sorted;\n}\n\n// ----------------------------------------------------------------------------\n// scroll to a section, without changing the browser history\n\nwindow.scrollToId = (hash) => {\n if (hash && hash !== '' && document.getElementById(hash) !== null) {\n var offset = $('#' + hash).offset();\n if (typeof offset !== 'undefined') {\n //console.log('scrolling to ' + hash + ' at ' + offset.top.toString());\n $('html, body').animate({ scrollTop: offset.top - 30 }, 0);\n }\n }\n\n // we must return false to prevent the default action\n return false;\n}\n\n// ----------------------------------------------------------------------------\n\n// user editable information\nwindow.customDashboard = {\n menu: {},\n submenu: {},\n context: {}\n};\n\n// netdata standard information\nconst netdataDashboard = {\n sparklines_registry: {},\n os: 'unknown',\n\n menu: {},\n submenu: {},\n context: {},\n\n // generate a sparkline\n // used in the documentation\n sparkline: function (prefix, chart, dimension, units, suffix) {\n if (options.data === null || typeof options.data.charts === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart] === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart].dimensions === 'undefined') {\n return '';\n }\n\n if (typeof options.data.charts[chart].dimensions[dimension] === 'undefined') {\n return '';\n }\n\n var key = chart + '.' + dimension;\n\n if (typeof units === 'undefined') {\n units = '';\n }\n\n if (typeof this.sparklines_registry[key] === 'undefined') {\n this.sparklines_registry[key] = { count: 1 };\n } else {\n this.sparklines_registry[key].count++;\n }\n\n key = key + '.' + this.sparklines_registry[key].count;\n\n return prefix + '
(X' + units + ')' + suffix;\n },\n\n gaugeChart: function (title, width, dimensions, colors) {\n if (typeof colors === 'undefined') {\n colors = '';\n }\n\n if (typeof dimensions === 'undefined') {\n dimensions = '';\n }\n\n return '
';\n },\n\n anyAttribute: function (obj, attr, key, def) {\n if (typeof (obj[key]) !== 'undefined') {\n var x = obj[key][attr];\n\n if (typeof (x) === 'undefined') {\n return def;\n }\n\n if (typeof (x) === 'function') {\n return x(netdataDashboard.os);\n }\n\n return x;\n }\n\n return def;\n },\n\n menuTitle: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return (this.anyAttribute(this.menu, 'title', chart.menu_pattern, chart.menu_pattern).toString()\n + ' ' + chart.type.slice(-(chart.type.length - chart.menu_pattern.length - 1)).toString()).replace(/_/g, ' ');\n }\n\n return (this.anyAttribute(this.menu, 'title', chart.menu, chart.menu)).toString().replace(/_/g, ' ');\n },\n\n menuIcon: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'icon', chart.menu_pattern, '').toString();\n }\n\n return this.anyAttribute(this.menu, 'icon', chart.menu, '');\n },\n\n menuInfo: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'info', chart.menu_pattern, null);\n }\n\n return this.anyAttribute(this.menu, 'info', chart.menu, null);\n },\n\n menuHeight: function (chart) {\n if (typeof chart.menu_pattern !== 'undefined') {\n return this.anyAttribute(this.menu, 'height', chart.menu_pattern, 1.0);\n }\n\n return this.anyAttribute(this.menu, 'height', chart.menu, 1.0);\n },\n\n submenuTitle: function (menu, submenu) {\n var key = menu + '.' + submenu;\n // console.log(key);\n var title = this.anyAttribute(this.submenu, 'title', key, submenu).toString().replace(/_/g, ' ');\n if (title.length > 28) {\n var a = title.substring(0, 13);\n var b = title.substring(title.length - 12, title.length);\n return a + '...' + b;\n }\n return title;\n },\n\n submenuInfo: function (menu, submenu) {\n var key = menu + '.' + submenu;\n return this.anyAttribute(this.submenu, 'info', key, null);\n },\n\n submenuHeight: function (menu, submenu, relative) {\n var key = menu + '.' + submenu;\n return this.anyAttribute(this.submenu, 'height', key, 1.0) * relative;\n },\n\n contextInfo: function (id) {\n var x = this.anyAttribute(this.context, 'info', id, null);\n\n if (x !== null) {\n return '
' + x + '
';\n } else {\n return '';\n }\n },\n\n contextValueRange: function (id) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].valueRange !== 'undefined') {\n return this.context[id].valueRange;\n } else {\n return '[null, null]';\n }\n },\n\n contextHeight: function (id, def) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].height !== 'undefined') {\n return def * this.context[id].height;\n } else {\n return def;\n }\n },\n\n contextDecimalDigits: function (id, def) {\n if (typeof this.context[id] !== 'undefined' && typeof this.context[id].decimalDigits !== 'undefined') {\n return this.context[id].decimalDigits;\n } else {\n return def;\n }\n }\n};\nwindow.netdataDashboard = netdataDashboard // share with dashboard_info.js :/\n\n// ----------------------------------------------------------------------------\n\n// enrich the data structure returned by netdata\n// to reflect our menu system and content\n// TODO: this is a shame - we should fix charts naming (issue #807)\nfunction enrichChartData(chart) {\n var parts = chart.type.split('_');\n var tmp = parts[0];\n\n switch (tmp) {\n case 'ap':\n case 'net':\n case 'disk':\n case 'powersupply':\n chart.menu = tmp;\n break;\n\n case 'apache':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'cache') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'bind':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'rndc') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'cgroup':\n chart.menu = chart.type;\n if (chart.id.match(/.*[\\._\\/-:]qemu[\\._\\/-:]*/) || chart.id.match(/.*[\\._\\/-:]kvm[\\._\\/-:]*/)) {\n chart.menu_pattern = 'cgqemu';\n } else {\n chart.menu_pattern = 'cgroup';\n }\n break;\n\n case 'go':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'expvar') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'mount':\n if (parts.length > 2) {\n chart.menu = tmp + '_' + parts[1];\n }\n else {\n chart.menu = tmp;\n }\n break;\n\n case 'isc':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'dhcpd') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'ovpn':\n chart.menu = chart.type;\n if (parts.length > 3 && parts[1] === 'status' && parts[2] === 'log') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'smartd':\n case 'web':\n chart.menu = chart.type;\n if (parts.length > 2 && parts[1] === 'log') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'dnsmasq':\n chart.menu = chart.type;\n if (parts.length == 2 && parts[1] === 'dhcp') {\n chart.menu = tmp + '_' + parts[1];\n } else if (parts.length > 2 && parts[1] === 'dhcp') {\n chart.menu_pattern = tmp + '_' + parts[1];\n } else if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n\n case 'anomaly':\n if (parts.length >= 2 && parts[1].startsWith('detection')) {\n chart.menu = tmp + '_detection';\n }\n break;\n\n case 'tc':\n chart.menu = tmp;\n\n // find a name for this device from fireqos info\n // we strip '_(in|out)' or '(in|out)_'\n if (chart.context === 'tc.qos' && (typeof options.submenu_names[chart.family] === 'undefined' || options.submenu_names[chart.family] === chart.family)) {\n var n = chart.name.split('.')[1];\n if (n.endsWith('_in')) {\n options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_in'));\n } else if (n.endsWith('_out')) {\n options.submenu_names[chart.family] = n.slice(0, n.lastIndexOf('_out'));\n } else if (n.startsWith('in_')) {\n options.submenu_names[chart.family] = n.slice(3, n.length);\n } else if (n.startsWith('out_')) {\n options.submenu_names[chart.family] = n.slice(4, n.length);\n } else {\n options.submenu_names[chart.family] = n;\n }\n }\n\n // increase the priority of IFB devices\n // to have inbound appear before outbound\n if (chart.id.match(/.*-ifb$/)) {\n chart.priority--;\n }\n\n break;\n\n default:\n chart.menu = chart.type;\n if (parts.length > 1) {\n chart.menu_pattern = tmp;\n }\n break;\n }\n\n chart.submenu = chart.family;\n}\n\n// ----------------------------------------------------------------------------\n\nfunction headMain(os, charts, duration) {\n void (os);\n\n if (urlOptions.mode === 'print') {\n return '';\n }\n\n var head = '';\n\n if (typeof charts['system.swap'] !== 'undefined') {\n head += '
';\n }\n\n if (typeof charts['system.io'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n else if (typeof charts['system.pgpgio'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n\n if (typeof charts['system.cpu'] !== 'undefined') {\n head += '
';\n }\n\n if (typeof charts['system.net'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n else if (typeof charts['system.ip'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n else if (typeof charts['system.ipv4'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n else if (typeof charts['system.ipv6'] !== 'undefined') {\n head += '
';\n\n head += '
';\n }\n\n if (typeof charts['system.ram'] !== 'undefined') {\n head += '
';\n }\n\n return head;\n}\n\nfunction generateHeadCharts(type, chart, duration) {\n if (urlOptions.mode === 'print') {\n return '';\n }\n\n var head = '';\n var hcharts = netdataDashboard.anyAttribute(netdataDashboard.context, type, chart.context, []);\n if (hcharts.length > 0) {\n var hi = 0, hlen = hcharts.length;\n while (hi < hlen) {\n if (typeof hcharts[hi] === 'function') {\n head += hcharts[hi](netdataDashboard.os, chart.id).replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);\n } else {\n head += hcharts[hi].replace(/CHART_DURATION/g, duration.toString()).replace(/CHART_UNIQUE_ID/g, chart.id);\n }\n hi++;\n }\n }\n return head;\n}\n\nfunction renderPage(menus, data) {\n var div = document.getElementById('charts_div');\n var pcent_width = Math.floor(100 / chartsPerRow($(div).width()));\n\n // find the proper duration for per-second updates\n var duration = Math.round(($(div).width() * pcent_width / 100 * data.update_every / 3) / 60) * 60;\n options.duration = duration; // probably obsolete/not needed\n options.update_every = data.update_every;\n\n var html = '';\n var sidebar = '
    ';\n var mainhead = headMain(netdataDashboard.os, data.charts, duration);\n\n // sort the menus\n var main = sortObjectByPriority(menus);\n var i = 0, len = main.length;\n\n // todo hook to options\n const hasChartsOnBottom = defaultAttributes.legendPosition === \"bottom\"\n const chartAdditionalHeight = hasChartsOnBottom ? LEGEND_BOTTOM_SINGLE_LINE_HEIGHT : 0\n while (i < len) {\n var menu = main[i++];\n\n // generate an entry at the main menu\n\n var menuid = NETDATA.name2id('menu_' + menu);\n sidebar += '
  • ' + menus[menu].icon + ' ' + menus[menu].title + '
      ';\n html += '

      ' + menus[menu].icon + ' ' + menus[menu].title + '

      ';\n\n if (menus[menu].info !== null) {\n html += menus[menu].info;\n }\n\n // console.log(' >> ' + menu + ' (' + menus[menu].priority + '): ' + menus[menu].title);\n\n var shtml = '';\n var mhead = '
      ' + mainhead;\n mainhead = '';\n\n // sort the submenus of this menu\n var sub = sortObjectByPriority(menus[menu].submenus);\n var si = 0, slen = sub.length;\n while (si < slen) {\n var submenu = sub[si++];\n\n // generate an entry at the submenu\n var submenuid = NETDATA.name2id('menu_' + menu + '_submenu_' + submenu);\n sidebar += '
    • ' + menus[menu].submenus[submenu].title + '
    • ';\n shtml += '

      ' + menus[menu].submenus[submenu].title + '

      ';\n\n if (menus[menu].submenus[submenu].info !== null) {\n shtml += '
      ' + menus[menu].submenus[submenu].info + '
      ';\n }\n\n var head = '
      ';\n var chtml = '';\n\n // console.log(' \\------- ' + submenu + ' (' + menus[menu].submenus[submenu].priority + '): ' + menus[menu].submenus[submenu].title);\n\n // sort the charts in this submenu of this menu\n menus[menu].submenus[submenu].charts.sort(prioritySort);\n var ci = 0, clen = menus[menu].submenus[submenu].charts.length;\n while (ci < clen) {\n var chart = menus[menu].submenus[submenu].charts[ci++];\n\n // generate the submenu heading charts\n mhead += generateHeadCharts('mainheads', chart, duration);\n head += generateHeadCharts('heads', chart, duration);\n\n function chartCommonMin(family, context, units) {\n var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMin', context, undefined);\n if (typeof x !== 'undefined') {\n return ' data-common-min=\"' + family + '/' + context + '/' + units + '\"';\n } else {\n return '';\n }\n }\n\n function chartCommonMax(family, context, units) {\n var x = netdataDashboard.anyAttribute(netdataDashboard.context, 'commonMax', context, undefined);\n if (typeof x !== 'undefined') {\n return ' data-common-max=\"' + family + '/' + context + '/' + units + '\"';\n } else {\n return '';\n }\n }\n\n // generate the chart\n if (urlOptions.mode === 'print') {\n chtml += '
      ';\n }\n\n const chartHeight = netdataDashboard.contextHeight(chart.context, options.chartsHeight)\n + chartAdditionalHeight;\n\n chtml += '
      ' + netdataDashboard.contextInfo(chart.context) + '
      ';\n\n if (urlOptions.mode === 'print') {\n chtml += '
      ';\n }\n }\n\n head += '
      ';\n shtml += head + chtml + '
      ';\n }\n\n mhead += '
      ';\n sidebar += '
  • ';\n html += mhead + shtml + '
    ';\n }\n\n const isMemoryModeDbEngine = data.memory_mode === \"dbengine\";\n\n sidebar += '
  • Add more charts
  • ';\n sidebar += '
  • Add more alarms
  • ';\n sidebar += '
  • Every ' +\n ((data.update_every === 1) ? 'second' : data.update_every.toString() + ' seconds') + ', ' +\n 'Netdata collects ' + data.dimensions_count.toLocaleString() + ' metrics on ' +\n data.hostname.toString() + ', presents them in ' +\n data.charts_count.toLocaleString() + ' charts' +\n (isMemoryModeDbEngine ? '' : ',') + // oxford comma\n ' and monitors them with ' +\n data.alarms_count.toLocaleString() + ' alarms.';\n\n if (!isMemoryModeDbEngine) {\n sidebar += '
     
    Get more history by ' +\n 'configuring Netdata\\'s history or switching to the database engine.';\n }\n\n sidebar += '
     
    netdata
    ' + data.version.toString() + '
    ';\n\n sidebar += '
  • '\n\n sidebar += '
  • '\n\n\n sidebar += '
';\n div.innerHTML = html;\n document.getElementById('sidebar').innerHTML = sidebar;\n\n if (urlOptions.highlight === true) {\n reduxStore.dispatch(setGlobalChartUnderlayAction({\n after: urlOptions.highlight_after,\n before: urlOptions.highlight_before,\n }))\n }\n\n if (urlOptions.mode === 'print') {\n printPage();\n } else {\n finalizePage();\n }\n}\n\nfunction renderChartsAndMenu(data) {\n options.menus = {};\n options.submenu_names = {};\n\n var menus = options.menus;\n var charts = data.charts;\n var m, menu_key;\n\n for (var c in charts) {\n if (!charts.hasOwnProperty(c)) {\n continue;\n }\n\n var chart = charts[c];\n enrichChartData(chart);\n m = chart.menu;\n\n // create the menu\n if (typeof menus[m] === 'undefined') {\n menus[m] = {\n menu_pattern: chart.menu_pattern,\n priority: chart.priority,\n submenus: {},\n title: netdataDashboard.menuTitle(chart),\n icon: netdataDashboard.menuIcon(chart),\n info: netdataDashboard.menuInfo(chart),\n height: netdataDashboard.menuHeight(chart) * options.chartsHeight\n };\n } else {\n if (typeof (menus[m].menu_pattern) === 'undefined') {\n menus[m].menu_pattern = chart.menu_pattern;\n }\n\n if (chart.priority < menus[m].priority) {\n menus[m].priority = chart.priority;\n }\n }\n\n menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;\n\n // create the submenu\n if (typeof menus[m].submenus[chart.submenu] === 'undefined') {\n menus[m].submenus[chart.submenu] = {\n priority: chart.priority,\n charts: [],\n title: null,\n info: netdataDashboard.submenuInfo(menu_key, chart.submenu),\n height: netdataDashboard.submenuHeight(menu_key, chart.submenu, menus[m].height)\n };\n } else {\n if (chart.priority < menus[m].submenus[chart.submenu].priority) {\n menus[m].submenus[chart.submenu].priority = chart.priority;\n }\n }\n\n // index the chart in the menu/submenu\n menus[m].submenus[chart.submenu].charts.push(chart);\n }\n\n // propagate the descriptive subname given to QoS\n // to all the other submenus with the same name\n for (var m in menus) {\n if (!menus.hasOwnProperty(m)) {\n continue;\n }\n\n for (var s in menus[m].submenus) {\n if (!menus[m].submenus.hasOwnProperty(s)) {\n continue;\n }\n\n // set the family using a name\n if (typeof options.submenu_names[s] !== 'undefined') {\n menus[m].submenus[s].title = s + ' (' + options.submenu_names[s] + ')';\n } else {\n menu_key = (typeof (menus[m].menu_pattern) !== 'undefined') ? menus[m].menu_pattern : m;\n menus[m].submenus[s].title = netdataDashboard.submenuTitle(menu_key, s);\n }\n }\n }\n\n renderPage(menus, data);\n}\n\n// ----------------------------------------------------------------------------\n\nexport const handleLoadJs = (promise, library, callback) => promise\n .catch((e) => {\n console.warn('error', e);\n alert(`Cannot load required JS library: ${library}`)\n })\n .then(() => {\n callback()\n })\n\n\nfunction loadClipboard(callback) {\n handleLoadJs(\n import(\"clipboard-polyfill\").then((clipboard) => {\n window.clipboard = clipboard\n }),\n \"clipboard-polyfill\",\n callback,\n )\n}\n\nfunction loadBootstrapTable(callback) {\n handleLoadJs(\n Promise.all([\n import(\"bootstrap-table\").then(() => (\n import('bootstrap-table/dist/extensions/export/bootstrap-table-export.min')\n )),\n import(\"tableexport.jquery.plugin\")\n ]),\n \"bootstrap-table\",\n callback,\n )\n}\n\nfunction loadBootstrapSlider(callback) {\n handleLoadJs(\n Promise.all([\n import(\"bootstrap-slider\").then(({ default: slider }) => {\n window.Slider = slider\n }),\n import(\"bootstrap-slider/dist/css/bootstrap-slider.min.css\"),\n ]),\n \"bootstrap-slider\",\n callback,\n )\n}\n\nfunction loadLzString(callback) {\n handleLoadJs(import(\"lz-string\"), \"lz-string\", callback)\n}\n\nfunction loadPako(callback) {\n handleLoadJs(\n import(\"pako\").then(({ default: pako }) => {\n window.pako = pako\n }),\n \"pako\",\n callback,\n )\n}\n\n// ----------------------------------------------------------------------------\n\nwindow.clipboardCopy = text => {\n clipboard.writeText(text);\n};\n\nwindow.clipboardCopyBadgeEmbed = url => {\n clipboard.writeText('');\n};\n\n// ----------------------------------------------------------------------------\n\nfunction alarmsUpdateModal() {\n var active = '

Raised Alarms

';\n var all = '

All Running Alarms

';\n var footer = '
netdata badges refresh automatically. Their color indicates the state of the alarm:  red  is critical,  orange  is warning,  bright green  is ok,  light grey  is undefined (i.e. no data or no status),  black  is not initialized. You can copy and paste their URLs to embed them in any web page.
netdata can send notifications for these alarms. Check this configuration file for more information.';\n\n loadClipboard(function () {\n });\n\n\n const callback = (data) => {\n options.alarm_families = [];\n\n if (data === null) {\n document.getElementById('alarms_active').innerHTML =\n document.getElementById('alarms_all').innerHTML =\n document.getElementById('alarms_log').innerHTML =\n 'failed to load alarm data!';\n return;\n }\n\n function alarmid4human(id) {\n if (id === 0) {\n return '-';\n }\n\n return id.toString();\n }\n\n function timestamp4human(timestamp, space) {\n if (timestamp === 0) {\n return '-';\n }\n\n if (typeof space === 'undefined') {\n space = ' ';\n }\n\n var t = new Date(timestamp * 1000);\n\n // commented out to always have date+time, to have consistent exports\n // var now = new Date();\n\n // if (t.toDateString() === now.toDateString()) {\n // return t.toLocaleTimeString();\n // }\n\n return t.toLocaleDateString() + space + t.toLocaleTimeString();\n }\n\n function alarm_lookup_explain(alarm, chart) {\n var dimensions = ' of all values ';\n\n if (chart.dimensions.length > 1) {\n dimensions = ' of the sum of all dimensions ';\n }\n\n if (typeof alarm.lookup_dimensions !== 'undefined') {\n var d = alarm.lookup_dimensions.replace(/|/g, ',');\n var x = d.split(',');\n if (x.length > 1) {\n dimensions = 'of the sum of dimensions ' + alarm.lookup_dimensions + ' ';\n } else {\n dimensions = 'of all values of dimension ' + alarm.lookup_dimensions + ' ';\n }\n }\n\n return '' + alarm.lookup_method + ' '\n + dimensions + ', of chart ' + alarm.chart + ''\n + ', starting ' + seconds4human(alarm.lookup_after + alarm.lookup_before, { space: ' ' }) + ' and up to ' + seconds4human(alarm.lookup_before, { space: ' ' }) + ''\n + ((alarm.lookup_options) ? (', with options ' + alarm.lookup_options.replace(/ /g, ', ') + '') : '')\n + '.';\n }\n\n function alarm_to_html(alarm, full) {\n var chart = options.data.charts[alarm.chart];\n if (typeof (chart) === 'undefined') {\n chart = options.data.charts_by_name[alarm.chart];\n if (typeof (chart) === 'undefined') {\n // this means the charts loaded are incomplete\n // probably netdata was restarted and more alarms\n // are now available.\n console.log('Cannot find chart ' + alarm.chart + ', you probably need to refresh the page.');\n return '';\n }\n }\n\n var has_alarm = (typeof alarm.warn !== 'undefined' || typeof alarm.crit !== 'undefined');\n var badge_url = `${serverDefault}/api/v1/badge.svg?chart=${alarm.chart}&alarm=${alarm.name}&refresh=auto`;\n\n var action_buttons = '
 
role: ' + alarm.recipient + '
 
'\n + '
'\n + '
'\n + '
embed html element for this badge\" data-toggle=\"tooltip\" data-placement=\"bottom\" onClick=\"clipboardCopyBadgeEmbed(\\'' + badge_url + '\\'); return false;\">
';\n\n var html = '
'\n + '';\n\n return html;\n }\n\n function alarm_family_show(id) {\n var html = '
' + alarm.chart + '
 

 
' + alarm.info + '' + action_buttons + '
'\n + ((typeof alarm.warn !== 'undefined') ? ('') : '')\n + ((typeof alarm.crit !== 'undefined') ? ('') : '');\n\n if (full === true) {\n var units = chart.units;\n if (units === '%') {\n units = '%';\n }\n\n html += ((typeof alarm.lookup_after !== 'undefined') ? ('') : '')\n + ((typeof alarm.calc !== 'undefined') ? ('') : '')\n + ((chart.green !== null) ? ('') : '')\n + ((chart.red !== null) ? ('') : '');\n }\n\n if (alarm.warn_repeat_every > 0) {\n html += '';\n }\n\n if (alarm.crit_repeat_every > 0) {\n html += '';\n }\n\n var delay = '';\n if ((alarm.delay_up_duration > 0 || alarm.delay_down_duration > 0) && alarm.delay_multiplier !== 0 && alarm.delay_max_duration > 0) {\n if (alarm.delay_up_duration === alarm.delay_down_duration) {\n delay += '
hysteresis ' + seconds4human(alarm.delay_up_duration, {\n space: ' ',\n negative_suffix: ''\n });\n } else {\n delay = '
hysteresis ';\n if (alarm.delay_up_duration > 0) {\n delay += 'on escalation ' + seconds4human(alarm.delay_up_duration, {\n space: ' ',\n negative_suffix: ''\n }) + ', ';\n }\n if (alarm.delay_down_duration > 0) {\n delay += 'on recovery ' + seconds4human(alarm.delay_down_duration, {\n space: ' ',\n negative_suffix: ''\n }) + ', ';\n }\n }\n if (alarm.delay_multiplier !== 1.0) {\n delay += 'multiplied by ' + alarm.delay_multiplier.toString() + '';\n delay += ', up to ' + seconds4human(alarm.delay_max_duration, {\n space: ' ',\n negative_suffix: ''\n }) + '';\n }\n delay += '
';\n }\n\n html += '
'\n + ((has_alarm === true) ? ('') : '')\n + ''\n + '
warning when' + alarm.warn + '
critical when' + alarm.crit + '
db lookup' + alarm_lookup_explain(alarm, chart) + '
calculation' + alarm.calc + '
green threshold' + chart.green + ' ' + units + '
red threshold' + chart.red + ' ' + units + '
repeat warning' + seconds4human(alarm.warn_repeat_every) + '
repeat critical' + seconds4human(alarm.crit_repeat_every) + '
check every' + seconds4human(alarm.update_every, {\n space: ' ',\n negative_suffix: ''\n }) + '
execute' + alarm.exec + '' + delay + '
source' + alarm.source + '
';\n var family = options.alarm_families[id];\n var len = family.arr.length;\n while (len--) {\n var alarm = family.arr[len];\n html += alarm_to_html(alarm, true);\n }\n html += '
';\n\n $('#alarm_all_' + id.toString()).html(html);\n enableTooltipsAndPopovers();\n }\n\n // find the proper family of each alarm\n var x, family, alarm;\n var count_active = 0;\n var count_all = 0;\n var families = {};\n var families_sort = [];\n for (x in data.alarms) {\n if (!data.alarms.hasOwnProperty(x)) {\n continue;\n }\n\n alarm = data.alarms[x];\n family = alarm.family;\n\n // find the chart\n var chart = options.data.charts[alarm.chart];\n if (typeof chart === 'undefined') {\n chart = options.data.charts_by_name[alarm.chart];\n }\n\n // not found - this should never happen!\n if (typeof chart === 'undefined') {\n console.log('WARNING: alarm ' + x + ' is linked to chart ' + alarm.chart + ', which is not found in the list of chart got from the server.');\n chart = { priority: 9999999 };\n }\n else if (typeof chart.menu !== 'undefined' && typeof chart.submenu !== 'undefined')\n // the family based on the chart\n {\n family = chart.menu + ' - ' + chart.submenu;\n }\n\n if (typeof families[family] === 'undefined') {\n families[family] = {\n name: family,\n arr: [],\n priority: chart.priority\n };\n\n families_sort.push(families[family]);\n }\n\n if (chart.priority < families[family].priority) {\n families[family].priority = chart.priority;\n }\n\n families[family].arr.unshift(alarm);\n }\n\n // sort the families, like the dashboard menu does\n var families_sorted = families_sort.sort(function (a, b) {\n if (a.priority < b.priority) {\n return -1;\n }\n if (a.priority > b.priority) {\n return 1;\n }\n return naturalSortCompare(a.name, b.name);\n });\n\n var i = 0;\n var fc = 0;\n var len = families_sorted.length;\n while (len--) {\n family = families_sorted[i++].name;\n var active_family_added = false;\n\n var expanded = 'false';\n var collapsed = 'class=\"collapsed\"';\n var cin = '';\n\n // uncomment if first family needs to be expanded by default\n // var expanded = 'true';\n // var collapsed = '';\n // var cin = 'in';\n\n if (fc !== 0) {\n all += \"\";\n\n // uncomment if first family needs to be expanded by default\n // expanded = 'false';\n // collapsed = 'class=\"collapsed\"';\n // cin = '';\n }\n\n all += '
';\n\n options.alarm_families[fc] = families[family];\n\n fc++;\n\n var arr = families[family].arr;\n var c = arr.length;\n while (c--) {\n alarm = arr[c];\n if (alarm.status === 'WARNING' || alarm.status === 'CRITICAL') {\n if (!active_family_added) {\n active_family_added = true;\n active += '

' + family + '

';\n }\n count_active++;\n active += alarm_to_html(alarm, true);\n }\n\n count_all++;\n }\n }\n active += \"\";\n if (families_sorted.length > 0) {\n all += \"
\";\n }\n all += \"\";\n\n if (!count_active) {\n active += '

Everything is normal. No raised alarms.
';\n } else {\n active += footer;\n }\n\n if (!count_all) {\n all += \"

No alarms are running in this system.

\";\n } else {\n all += footer;\n }\n\n document.getElementById('alarms_active').innerHTML = active;\n document.getElementById('alarms_all').innerHTML = all;\n enableTooltipsAndPopovers();\n\n if (families_sorted.length > 0) {\n alarm_family_show(0);\n }\n\n // register bootstrap events\n var $accordion = $('#alarms_all_accordion');\n $accordion.on('show.bs.collapse', function (d) {\n var target = $(d.target);\n var id = $(target).data('alarm-id');\n alarm_family_show(id);\n });\n $accordion.on('hidden.bs.collapse', function (d) {\n var target = $(d.target);\n var id = $(target).data('alarm-id');\n $('#alarm_all_' + id.toString()).html('');\n });\n\n document.getElementById('alarms_log').innerHTML = '

Alarm Log

';\n\n loadBootstrapTable(function () {\n $('#alarms_log_table').bootstrapTable({\n url: `${serverDefault}/api/v1/alarm_log?all`,\n cache: false,\n pagination: true,\n pageSize: 10,\n showPaginationSwitch: false,\n search: true,\n searchTimeOut: 300,\n searchAlign: 'left',\n showColumns: true,\n showExport: true,\n exportDataType: 'all',\n exportOptions: {\n fileName: 'netdata_alarm_log'\n },\n onClickRow: function (row) {\n scrollToChartAfterHidingModal(row.chart, row.when * 1000, row.status);\n $('#alarmsModal').modal('hide');\n return false;\n },\n rowStyle: function (row) {\n switch (row.status) {\n case 'CRITICAL':\n return { classes: 'danger' };\n break;\n case 'WARNING':\n return { classes: 'warning' };\n break;\n case 'UNDEFINED':\n return { classes: 'info' };\n break;\n case 'CLEAR':\n return { classes: 'success' };\n break;\n }\n return {};\n },\n showFooter: false,\n showHeader: true,\n showRefresh: true,\n showToggle: false,\n sortable: true,\n silentSort: false,\n columns: [\n {\n field: 'when',\n title: 'Event Date',\n valign: 'middle',\n titleTooltip: 'The date and time the even took place',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n switchable: false,\n sortable: true\n },\n {\n field: 'hostname',\n title: 'Host',\n valign: 'middle',\n titleTooltip: 'The host that generated this event',\n align: 'center',\n visible: false,\n sortable: true\n },\n {\n field: 'unique_id',\n title: 'Unique ID',\n titleTooltip: 'The host unique ID for this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'alarm_id',\n title: 'Alarm ID',\n titleTooltip: 'The ID of the alarm that generated this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'alarm_event_id',\n title: 'Alarm Event ID',\n titleTooltip: 'The incremental ID of this event for the given alarm',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'chart',\n title: 'Chart',\n titleTooltip: 'The chart the alarm is attached to',\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'family',\n title: 'Family',\n titleTooltip: 'The family of the chart the alarm is attached to',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'name',\n title: 'Alarm',\n titleTooltip: 'The alarm name that generated this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return value.toString().replace(/_/g, ' ');\n },\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'value_string',\n title: 'Friendly Value',\n titleTooltip: 'The value of the alarm, that triggered this event',\n align: 'right',\n valign: 'middle',\n sortable: true\n },\n {\n field: 'old_value_string',\n title: 'Friendly Old Value',\n titleTooltip: 'The value of the alarm, just before this event',\n align: 'right',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'old_value',\n title: 'Old Value',\n titleTooltip: 'The value of the alarm, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'value',\n title: 'Value',\n titleTooltip: 'The value of the alarm, that triggered this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return ((value !== null) ? Math.round(value * 100) / 100 : 'NaN').toString();\n },\n align: 'right',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'units',\n title: 'Units',\n titleTooltip: 'The units of the value of the alarm',\n align: 'left',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'old_status',\n title: 'Old Status',\n titleTooltip: 'The status of the alarm, just before this event',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'status',\n title: 'Status',\n titleTooltip: 'The status of the alarm, that was set due to this event',\n align: 'center',\n valign: 'middle',\n switchable: false,\n sortable: true\n },\n {\n field: 'duration',\n title: 'Last Duration',\n titleTooltip: 'The duration the alarm was at its previous state, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'non_clear_duration',\n title: 'Raised Duration',\n titleTooltip: 'The duration the alarm was raised, just before this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'recipient',\n title: 'Recipient',\n titleTooltip: 'The recipient of this event',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'processed',\n title: 'Processed Status',\n titleTooltip: 'True when this event is processed',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === true) {\n return 'DONE';\n } else {\n return 'PENDING';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updated',\n title: 'Updated Status',\n titleTooltip: 'True when this event has been updated by another event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === true) {\n return 'UPDATED';\n } else {\n return 'CURRENT';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updated_by_id',\n title: 'Updated By ID',\n titleTooltip: 'The unique ID of the event that obsoleted this one',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'updates_id',\n title: 'Updates ID',\n titleTooltip: 'The unique ID of the event obsoleted because of this event',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return alarmid4human(value);\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec',\n title: 'Script',\n titleTooltip: 'The script to handle the event notification',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec_run',\n title: 'Script Run At',\n titleTooltip: 'The date and time the script has been ran',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'exec_code',\n title: 'Script Return Value',\n titleTooltip: 'The return code of the script',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n if (value === 0) {\n return 'OK (returned 0)';\n } else {\n return 'FAILED (with code ' + value.toString() + ')';\n }\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'delay',\n title: 'Script Delay',\n titleTooltip: 'The hysteresis of the notification',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n\n return seconds4human(value, { negative_suffix: '', space: ' ', now: 'no time' });\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'delay_up_to_timestamp',\n title: 'Script Delay Run At',\n titleTooltip: 'The date and time the script should be run, after hysteresis',\n formatter: function (value, row, index) {\n void (row);\n void (index);\n return timestamp4human(value, ' ');\n },\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'info',\n title: 'Description',\n titleTooltip: 'A short description of the alarm',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n },\n {\n field: 'source',\n title: 'Alarm Source',\n titleTooltip: 'The source of configuration of the alarm',\n align: 'center',\n valign: 'middle',\n visible: false,\n sortable: true\n }\n ]\n });\n // console.log($('#alarms_log_table').bootstrapTable('getOptions'));\n });\n }\n\n reduxStore.dispatch(fetchAllAlarmsAction.request({\n callback,\n serverDefault,\n }))\n}\n\nfunction initializeDynamicDashboardWithData(data) {\n if (data !== null) {\n options.hostname = data.hostname;\n options.data = data;\n options.version = data.version;\n options.release_channel = data.release_channel;\n options.timezone = data.timezone;\n netdataDashboard.os = data.os;\n\n if (typeof data.hosts !== 'undefined') {\n options.hosts = data.hosts;\n }\n\n // update the dashboard hostname\n document.getElementById('netdataVersion').innerHTML = options.version;\n\n // update the dashboard title\n document.title = options.hostname + ' netdata dashboard';\n\n // create a chart_by_name index\n data.charts_by_name = {};\n var charts = data.charts;\n var x;\n for (x in charts) {\n if (!charts.hasOwnProperty(x)) {\n continue;\n }\n\n var chart = charts[x];\n data.charts_by_name[chart.name] = chart;\n }\n\n // render all charts\n renderChartsAndMenu(data);\n }\n}\n\n// an object to keep initialization configuration\n// needed due to the async nature of the XSS modal\nvar initializeConfig = {\n url: null,\n custom_info: true,\n};\n\n// will be removed when we'll transform dashboard_info.js into DSL\n// memoize so it's fetched only once\nconst loadDashboardInfo = memoizeWith(identity, () => (\n $.ajax({\n url: `${serverDefault}dashboard_info.js`,\n cache: true,\n dataType: 'script',\n xhrFields: { withCredentials: true }, // required for the cookie\n })\n .fail(function () {\n alert(`Cannot load required JS library: dashboard_info.js`);\n })\n))\n\nfunction loadCustomDashboardInfo(url, callback) {\n $.ajax({\n url,\n cache: true,\n dataType: \"script\",\n xhrFields: { withCredentials: true } // required for the cookie\n })\n .fail(function () {\n alert(`Cannot load required JS library: ${url}`);\n })\n .always(function () {\n $.extend(true, netdataDashboard, customDashboard);\n callback();\n })\n}\n\nfunction initializeChartsAndCustomInfo() {\n loadDashboardInfo().then(() => {\n // download all the charts the server knows\n NETDATA.chartRegistry.downloadAll(initializeConfig.url, function (data) {\n if (data !== null) {\n reduxStore.dispatch(chartsMetadataRequestSuccess({ data }))\n if (initializeConfig.custom_info === true && typeof data.custom_info !== 'undefined' && data.custom_info !== \"\" && window.netdataSnapshotData === null) {\n //console.log('loading custom dashboard decorations from server ' + initializeConfig.url);\n loadCustomDashboardInfo(serverDefault + data.custom_info, function () {\n initializeDynamicDashboardWithData(data);\n });\n } else {\n //console.log('not loading custom dashboard decorations from server ' + initializeConfig.url);\n initializeDynamicDashboardWithData(data);\n }\n }\n });\n })\n}\n\nwindow.xssModalDisableXss = () => {\n //console.log('disabling xss checks');\n NETDATA.xss.enabled = false;\n NETDATA.xss.enabled_for_data = false;\n initializeConfig.custom_info = true;\n initializeChartsAndCustomInfo();\n return false;\n};\n\n\nwindow.xssModalKeepXss = () => {\n //console.log('keeping xss checks');\n NETDATA.xss.enabled = true;\n NETDATA.xss.enabled_for_data = true;\n initializeConfig.custom_info = false;\n initializeChartsAndCustomInfo();\n return false;\n};\n\nfunction initializeDynamicDashboard(newReduxStore) {\n if (newReduxStore) {\n reduxStore = newReduxStore\n\n netdataPrepCallback()\n\n initializeConfig.url = serverDefault;\n }\n\n if (typeof netdataCheckXSS !== 'undefined' && netdataCheckXSS === true) {\n //$(\"#loadOverlay\").css(\"display\",\"none\");\n document.getElementById('netdataXssModalServer').innerText = initializeConfig.url;\n $('#xssModal').modal('show');\n } else {\n initializeChartsAndCustomInfo();\n }\n}\n\n// ----------------------------------------------------------------------------\n\nfunction versionLog(msg) {\n document.getElementById('versionCheckLog').innerHTML = msg;\n}\n\n// New way of checking for updates, based only on versions\n\nfunction versionsMatch(v1, v2) {\n if (v1 == v2) {\n return true;\n } else {\n let s1 = v1.split('.');\n let s2 = v2.split('.');\n // Check major version\n let n1 = parseInt(s1[0].substring(1, 2), 10);\n let n2 = parseInt(s2[0].substring(1, 2), 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n // Check minor version\n n1 = parseInt(s1[1], 10);\n n2 = parseInt(s2[1], 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n // Split patch: format could be e.g. 0-22-nightly\n s1 = s1[2].split('-');\n s2 = s2[2].split('-');\n\n n1 = parseInt(s1[0], 10);\n n2 = parseInt(s2[0], 10);\n if (n1 < n2) return false;\n else if (n1 > n2) return true;\n\n n1 = (s1.length > 1) ? parseInt(s1[1], 10) : 0;\n n2 = (s2.length > 1) ? parseInt(s2[1], 10) : 0;\n if (n1 < n2) return false;\n else return true;\n }\n}\n\nfunction getGithubLatestVersion(callback, channel) {\n versionLog('Downloading latest version id from github...');\n let url\n\n if (channel === 'stable') {\n url = 'https://api.github.com/repos/netdata/netdata/releases/latest'\n } else {\n url = 'https://api.github.com/repos/netdata/netdata-nightlies/releases/latest'\n }\n\n $.ajax({\n url: url,\n async: true,\n cache: false\n })\n .done(function (data) {\n data = data.tag_name.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\");\n versionLog('Latest stable version from github is ' + data);\n callback(data);\n })\n .fail(function () {\n versionLog('Failed to download the latest stable version id from github!');\n callback(null);\n });\n}\n\nfunction checkForUpdateByVersion(force, callback) {\n getGithubLatestVersion(function (sha2) {\n callback(options.version, sha2);\n }, options.release_channel);\n return null;\n}\n\nwindow.notifyForUpdate = (force) => {\n versionLog('

checking for updates...

');\n\n var now = Date.now();\n\n if (typeof force === 'undefined' || force !== true) {\n var last = loadLocalStorage('last_update_check');\n\n if (typeof last === 'string') {\n last = parseInt(last);\n } else {\n last = 0;\n }\n\n if (now - last < 3600000 * 8) {\n // no need to check it - too soon\n return;\n }\n }\n\n checkForUpdateByVersion(force, function (sha1, sha2) {\n var save = false;\n\n if (sha1 === null) {\n save = false;\n versionLog('

Failed to get your netdata version!

You can always get the latest netdata from its github page.

');\n } else if (sha2 === null) {\n save = false;\n versionLog('

Failed to get the latest netdata version.

You can always get the latest netdata from its github page.

');\n } else if (versionsMatch(sha1, sha2)) {\n save = true;\n versionLog('

You already have the latest netdata!

No update yet?
We probably need some motivation to keep going on!

If you haven\\'t already, give netdata a at its github page.

');\n } else {\n save = true;\n var compare = 'https://learn.netdata.cloud/docs/agent/changelog/';\n versionLog('

New version of netdata available!

Latest version: ' + sha2 + '

Click here for the changes log and
click here for directions on updating your netdata installation.

We suggest to review the changes log for new features you may be interested, or important bug fixes you may need.
Keeping your netdata updated is generally a good idea.

');\n }\n\n if (save) {\n saveLocalStorage('last_update_check', now.toString());\n }\n });\n}\n\n// ----------------------------------------------------------------------------\n// printing dashboards\n\nfunction showPageFooter() {\n document.getElementById('footer').style.display = 'block';\n}\n\nwindow.printPreflight = () => {\n var url = document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString() + urlOptions.genHash() + ';mode=print';\n var width = 990;\n var height = screen.height * 90 / 100;\n //console.log(url);\n //console.log(document.location);\n window.open(url, '', 'width=' + width.toString() + ',height=' + height.toString() + ',menubar=no,toolbar=no,personalbar=no,location=no,resizable=no,scrollbars=yes,status=no,chrome=yes,centerscreen=yes,attention=yes,dialog=yes');\n $('#printPreflightModal').modal('hide');\n}\n\nfunction printPage() {\n window.NETDATA.parseDom();\n\n if (urlOptions.after < 0) {\n reduxStore.dispatch(setDefaultAfterAction({ after: urlOptions.after }))\n } else if (urlOptions.pan_and_zoom === true) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: urlOptions.after,\n before: urlOptions.before,\n }))\n }\n showPageFooter(); // todo after full rewrite the footer should show when charts are loaded\n}\n\n// --------------------------------------------------------------------\n\nfunction jsonStringifyFn(obj) {\n return JSON.stringify(obj, function (key, value) {\n return (typeof value === 'function') ? value.toString() : value;\n });\n}\n\nfunction jsonParseFn(str) {\n return JSON.parse(str, function (key, value) {\n if (typeof value != 'string') {\n return value;\n }\n return (value.substring(0, 8) == 'function') ? eval('(' + value + ')') : value;\n });\n}\n\n// --------------------------------------------------------------------\n\nvar snapshotOptions = {\n bytes_per_chart: 2048,\n compressionDefault: 'pako.deflate.base64',\n\n compressions: {\n 'none': {\n bytes_per_point_memory: 5.2,\n bytes_per_point_disk: 5.6,\n\n compress: function (s) {\n return s;\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return s;\n }\n },\n\n 'pako.deflate.base64': {\n bytes_per_point_memory: 1.8,\n bytes_per_point_disk: 1.9,\n\n compress: function (s) {\n return btoa(pako.deflate(s, { to: 'string' }));\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return pako.inflate(atob(s), { to: 'string' });\n }\n },\n\n 'pako.deflate': {\n bytes_per_point_memory: 1.4,\n bytes_per_point_disk: 3.2,\n\n compress: function (s) {\n return pako.deflate(s, { to: 'string' });\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return pako.inflate(s, { to: 'string' });\n }\n },\n\n 'lzstring.utf16': {\n bytes_per_point_memory: 1.7,\n bytes_per_point_disk: 2.6,\n\n compress: function (s) {\n return LZString.compressToUTF16(s);\n },\n\n compressed_length: function (s) {\n return s.length * 2;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromUTF16(s);\n }\n },\n\n 'lzstring.base64': {\n bytes_per_point_memory: 2.1,\n bytes_per_point_disk: 2.3,\n\n compress: function (s) {\n return LZString.compressToBase64(s);\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromBase64(s);\n }\n },\n\n 'lzstring.uri': {\n bytes_per_point_memory: 2.1,\n bytes_per_point_disk: 2.3,\n\n compress: function (s) {\n return LZString.compressToEncodedURIComponent(s);\n },\n\n compressed_length: function (s) {\n return s.length;\n },\n\n uncompress: function (s) {\n return LZString.decompressFromEncodedURIComponent(s);\n }\n }\n }\n};\n\n// --------------------------------------------------------------------\n// loading snapshots\n\nfunction loadSnapshotModalLog(priority, msg) {\n document.getElementById('loadSnapshotStatus').className = \"alert alert-\" + priority;\n document.getElementById('loadSnapshotStatus').innerHTML = msg;\n}\n\nvar tmpSnapshotData = null;\n\nwindow.loadSnapshot = () => {\n $('#loadSnapshotImport').addClass('disabled');\n\n if (tmpSnapshotData === null) {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'no data have been loaded');\n return;\n }\n\n loadPako(function () {\n loadLzString(function () {\n loadSnapshotModalLog('info', 'Please wait, activating snapshot...');\n $('#loadSnapshotModal').modal('hide');\n\n netdataShowAlarms = false;\n netdataRegistry = false;\n netdataServer = tmpSnapshotData.server;\n\n document.getElementById('charts_div').innerHTML = '';\n document.getElementById('sidebar').innerHTML = '';\n\n if (typeof tmpSnapshotData.hash !== 'undefined') {\n urlOptions.hash = tmpSnapshotData.hash;\n } else {\n urlOptions.hash = '#';\n }\n\n if (typeof tmpSnapshotData.info !== 'undefined') {\n var info = jsonParseFn(tmpSnapshotData.info);\n if (typeof info.menu !== 'undefined') {\n netdataDashboard.menu = info.menu;\n }\n\n if (typeof info.submenu !== 'undefined') {\n netdataDashboard.submenu = info.submenu;\n }\n\n if (typeof info.context !== 'undefined') {\n netdataDashboard.context = info.context;\n }\n }\n\n if (typeof tmpSnapshotData.compression !== 'string') {\n tmpSnapshotData.compression = 'none';\n }\n\n if (typeof snapshotOptions.compressions[tmpSnapshotData.compression] === 'undefined') {\n alert('unknown compression method: ' + tmpSnapshotData.compression);\n tmpSnapshotData.compression = 'none';\n }\n\n tmpSnapshotData.uncompress = snapshotOptions.compressions[tmpSnapshotData.compression].uncompress;\n\n window.NETDATA.parseDom()\n reduxStore.dispatch(loadSnapshotAction({\n snapshot: tmpSnapshotData,\n }))\n\n window.netdataSnapshotData = tmpSnapshotData;\n\n urlOptions.after = tmpSnapshotData.after_ms;\n urlOptions.before = tmpSnapshotData.before_ms;\n\n if (typeof tmpSnapshotData.highlight_after_ms !== 'undefined'\n && tmpSnapshotData.highlight_after_ms !== null\n && tmpSnapshotData.highlight_after_ms > 0\n && typeof tmpSnapshotData.highlight_before_ms !== 'undefined'\n && tmpSnapshotData.highlight_before_ms !== null\n && tmpSnapshotData.highlight_before_ms > 0\n ) {\n urlOptions.highlight_after = tmpSnapshotData.highlight_after_ms;\n urlOptions.highlight_before = tmpSnapshotData.highlight_before_ms;\n urlOptions.highlight = true;\n } else {\n urlOptions.highlight_after = 0;\n urlOptions.highlight_before = 0;\n urlOptions.highlight = false;\n }\n\n netdataCheckXSS = false; // disable the modal - this does not affect XSS checks, since dashboard.js is already loaded\n NETDATA.xss.enabled = true; // we should not do any remote requests, but if we do, check them\n NETDATA.xss.enabled_for_data = true; // check also snapshot data - that have been excluded from the initial check, due to compression\n loadSnapshotPreflightEmpty();\n initializeDynamicDashboard();\n });\n });\n};\n\nfunction loadSnapshotPreflightFile(file) {\n var filename = NETDATA.xss.string(file.name);\n var fr = new FileReader();\n fr.onload = function (e) {\n document.getElementById('loadSnapshotFilename').innerHTML = filename;\n var result = null;\n try {\n result = NETDATA.xss.checkAlways('snapshot', JSON.parse(e.target.result), /^(snapshot\\.info|snapshot\\.data)$/);\n\n //console.log(result);\n var date_after = new Date(result.after_ms);\n var date_before = new Date(result.before_ms);\n\n if (typeof result.charts_ok === 'undefined') {\n result.charts_ok = 'unknown';\n }\n\n if (typeof result.charts_failed === 'undefined') {\n result.charts_failed = 0;\n }\n\n if (typeof result.compression === 'undefined') {\n result.compression = 'none';\n }\n\n if (typeof result.data_size === 'undefined') {\n result.data_size = 0;\n }\n\n document.getElementById('loadSnapshotFilename').innerHTML = '' + filename + '';\n document.getElementById('loadSnapshotHostname').innerHTML = '' + result.hostname + ', netdata version: ' + result.netdata_version.toString() + '';\n document.getElementById('loadSnapshotURL').innerHTML = result.url;\n document.getElementById('loadSnapshotCharts').innerHTML = result.charts.charts_count.toString() + ' charts, ' + result.charts.dimensions_count.toString() + ' dimensions, ' + result.data_points.toString() + ' points per dimension, ' + Math.round(result.duration_ms / result.data_points).toString() + ' ms per point';\n document.getElementById('loadSnapshotInfo').innerHTML = 'version: ' + result.snapshot_version.toString() + ', includes ' + result.charts_ok.toString() + ' unique chart data queries ' + ((result.charts_failed > 0) ? ('' + result.charts_failed.toString() + ' failed') : '').toString() + ', compressed with ' + result.compression.toString() + ', data size ' + (Math.round(result.data_size * 100 / 1024 / 1024) / 100).toString() + ' MB';\n document.getElementById('loadSnapshotTimeRange').innerHTML = '' + localeDateString(date_after) + ' ' + localeTimeString(date_after) + ' to ' + localeDateString(date_before) + ' ' + localeTimeString(date_before) + '';\n document.getElementById('loadSnapshotComments').innerHTML = ((result.comments) ? result.comments : '').toString();\n loadSnapshotModalLog('success', 'File loaded, click Import to render it!');\n $('#loadSnapshotImport').removeClass('disabled');\n\n tmpSnapshotData = result;\n }\n catch (e) {\n console.log(e);\n document.getElementById('loadSnapshotStatus').className = \"alert alert-danger\";\n document.getElementById('loadSnapshotStatus').innerHTML = \"Failed to parse this file!\";\n $('#loadSnapshotImport').addClass('disabled');\n }\n }\n\n //console.log(file);\n fr.readAsText(file);\n};\n\nfunction loadSnapshotPreflightEmpty() {\n document.getElementById('loadSnapshotFilename').innerHTML = '';\n document.getElementById('loadSnapshotHostname').innerHTML = '';\n document.getElementById('loadSnapshotURL').innerHTML = '';\n document.getElementById('loadSnapshotCharts').innerHTML = '';\n document.getElementById('loadSnapshotInfo').innerHTML = '';\n document.getElementById('loadSnapshotTimeRange').innerHTML = '';\n document.getElementById('loadSnapshotComments').innerHTML = '';\n loadSnapshotModalLog('success', 'Browse for a snapshot file (or drag it and drop it here), then click Import to render it.');\n $('#loadSnapshotImport').addClass('disabled');\n};\n\nvar loadSnapshotDragAndDropInitialized = false;\n\nfunction loadSnapshotDragAndDropSetup() {\n if (loadSnapshotDragAndDropInitialized === false) {\n loadSnapshotDragAndDropInitialized = true;\n $('#loadSnapshotDragAndDrop')\n .on('drag dragstart dragend dragover dragenter dragleave drop', function (e) {\n e.preventDefault();\n e.stopPropagation();\n })\n .on('drop', function (e) {\n if (e.originalEvent.dataTransfer.files.length) {\n loadSnapshotPreflightFile(e.originalEvent.dataTransfer.files.item(0));\n } else {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'No file selected');\n }\n });\n }\n};\n\nwindow.loadSnapshotPreflight = () => {\n var files = document.getElementById('loadSnapshotSelectFiles').files;\n if (!files.length) {\n loadSnapshotPreflightEmpty();\n loadSnapshotModalLog('danger', 'No file selected');\n return;\n }\n\n loadSnapshotModalLog('info', 'Loading file...');\n\n loadSnapshotPreflightFile(files.item(0));\n}\n\n// --------------------------------------------------------------------\n// saving snapshots\n\nvar saveSnapshotStop = false;\n\nfunction saveSnapshotCancel() {\n reduxStore.dispatch(stopSnapshotModeAction())\n saveSnapshotStop = true;\n}\n\nvar saveSnapshotModalInitialized = false;\n\nfunction saveSnapshotModalSetup() {\n if (saveSnapshotModalInitialized === false) {\n saveSnapshotModalInitialized = true;\n $('#saveSnapshotModal')\n .on('hide.bs.modal', saveSnapshotCancel)\n .on('show.bs.modal', saveSnapshotModalInit)\n .on('shown.bs.modal', function () {\n $('#saveSnapshotResolutionSlider').find(\".slider-handle:first\").attr(\"tabindex\", 1);\n document.getElementById('saveSnapshotComments').focus();\n });\n }\n};\n\nfunction saveSnapshotModalLog(priority, msg) {\n document.getElementById('saveSnapshotStatus').className = \"alert alert-\" + priority;\n document.getElementById('saveSnapshotStatus').innerHTML = msg;\n}\n\nfunction saveSnapshotModalShowExpectedSize() {\n var points = Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint);\n var priority = 'info';\n var msg = 'A moderate snapshot.';\n\n var sizemb = Math.round(\n (options.data.charts_count * snapshotOptions.bytes_per_chart\n + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_disk)\n * 10 / 1024 / 1024) / 10;\n\n var memmb = Math.round(\n (options.data.charts_count * snapshotOptions.bytes_per_chart\n + options.data.dimensions_count * points * snapshotOptions.compressions[saveSnapshotCompression].bytes_per_point_memory)\n * 10 / 1024 / 1024) / 10;\n\n if (sizemb < 10) {\n priority = 'success';\n msg = 'A nice small snapshot!';\n }\n if (sizemb > 50) {\n priority = 'warning';\n msg = 'Will stress your browser...';\n }\n if (sizemb > 100) {\n priority = 'danger';\n msg = 'Hm... good luck...';\n }\n\n saveSnapshotModalLog(priority, 'The snapshot will have ' + points.toString() + ' points per dimension. Expected size on disk ' + sizemb + ' MB, at browser memory ' + memmb + ' MB.
' + msg);\n}\n\nvar saveSnapshotCompression = snapshotOptions.compressionDefault;\n\nfunction saveSnapshotSetCompression(name) {\n saveSnapshotCompression = name;\n document.getElementById('saveSnapshotCompressionName').innerHTML = saveSnapshotCompression;\n saveSnapshotModalShowExpectedSize();\n}\n\nvar saveSnapshotSlider = null;\nvar saveSnapshotSelectedSecondsPerPoint = 1;\nvar saveSnapshotViewDuration = 1;\n\nfunction saveSnapshotModalInit() {\n $('#saveSnapshotModalProgressSection').hide();\n $('#saveSnapshotResolutionRadio').show();\n saveSnapshotModalLog('info', 'Select resolution and click Save');\n $('#saveSnapshotExport').removeClass('disabled');\n\n loadBootstrapSlider(function () {\n const reduxState = reduxStore.getState()\n saveSnapshotViewDuration = - selectDefaultAfter(reduxState)\n var start_ms = Math.round(Date.now() - saveSnapshotViewDuration * 1000);\n const globalPanAndZoom = selectGlobalPanAndZoom(reduxState)\n\n if (Boolean(globalPanAndZoom)) {\n saveSnapshotViewDuration = Math.round((globalPanAndZoom.before - globalPanAndZoom.after) / 1000);\n start_ms = globalPanAndZoom.after;\n }\n\n var start_date = new Date(start_ms);\n var yyyymmddhhssmm = start_date.getFullYear() + zeropad(start_date.getMonth() + 1) + zeropad(start_date.getDate()) + '-' + zeropad(start_date.getHours()) + zeropad(start_date.getMinutes()) + zeropad(start_date.getSeconds());\n\n document.getElementById('saveSnapshotFilename').value = 'netdata-' + options.hostname.toString() + '-' + yyyymmddhhssmm.toString() + '-' + saveSnapshotViewDuration.toString() + '.snapshot';\n saveSnapshotSetCompression(saveSnapshotCompression);\n\n var min = options.update_every;\n var max = Math.round(saveSnapshotViewDuration / 100);\n\n if (Boolean(globalPanAndZoom)) {\n max = Math.round(saveSnapshotViewDuration / 50);\n }\n\n var view = Math.round(saveSnapshotViewDuration / Math.round($(document.getElementById('charts_div')).width() / 2));\n\n if (max < 10) {\n max = 10;\n }\n if (max < min) {\n max = min;\n }\n if (view < min) {\n view = min;\n }\n if (view > max) {\n view = max;\n }\n\n if (saveSnapshotSlider !== null) {\n saveSnapshotSlider.destroy();\n }\n\n saveSnapshotSlider = new Slider('#saveSnapshotResolutionSlider', {\n ticks: [min, view, max],\n min: min,\n max: max,\n step: options.update_every,\n value: view,\n scale: (max > 100) ? 'logarithmic' : 'linear',\n tooltip: 'always',\n formatter: function (value) {\n if (value < 1) {\n value = 1;\n }\n\n if (value < options.data.update_every) {\n value = options.data.update_every;\n }\n\n saveSnapshotSelectedSecondsPerPoint = value;\n saveSnapshotModalShowExpectedSize();\n\n var seconds = ' seconds ';\n if (value === 1) {\n seconds = ' second ';\n }\n\n return value + seconds + 'per point' + ((value === options.data.update_every) ? ', server default' : '').toString();\n }\n });\n });\n}\n\nwindow.saveSnapshot = () => {\n loadPako(function () {\n loadLzString(function () {\n saveSnapshotStop = false;\n $('#saveSnapshotModalProgressSection').show();\n $('#saveSnapshotResolutionRadio').hide();\n $('#saveSnapshotExport').addClass('disabled');\n\n var filename = document.getElementById('saveSnapshotFilename').value;\n // console.log(filename);\n saveSnapshotModalLog('info', 'Generating snapshot as ' + filename.toString() + '');\n\n\n var el = document.getElementById('saveSnapshotModalProgressBar');\n var eltxt = document.getElementById('saveSnapshotModalProgressBarText');\n\n options.data.charts_by_name = null;\n const reduxState = reduxStore.getState()\n const defaultAfter = selectDefaultAfter(reduxState)\n\n var saveData = {\n hostname: options.hostname,\n server: serverDefault,\n netdata_version: options.data.version,\n snapshot_version: 1,\n after_ms: Date.now() + defaultAfter * 1000,\n before_ms: Date.now(),\n highlight_after_ms: urlOptions.highlight_after,\n highlight_before_ms: urlOptions.highlight_before,\n duration_ms: options.duration * 1000,\n update_every_ms: options.update_every * 1000,\n data_points: 0,\n url: ((urlOptions.server !== null) ? urlOptions.server : document.location.origin.toString() + document.location.pathname.toString() + document.location.search.toString()).toString(),\n comments: document.getElementById('saveSnapshotComments').value.toString(),\n hash: urlOptions.hash,\n charts: options.data,\n info: jsonStringifyFn({\n menu: netdataDashboard.menu,\n submenu: netdataDashboard.submenu,\n context: netdataDashboard.context\n }),\n charts_ok: 0,\n charts_failed: 0,\n compression: saveSnapshotCompression,\n data_size: 0,\n data: {}\n };\n\n if (typeof snapshotOptions.compressions[saveData.compression] === 'undefined') {\n alert('unknown compression method: ' + saveData.compression);\n saveData.compression = 'none';\n }\n\n var compress = snapshotOptions.compressions[saveData.compression].compress;\n var compressed_length = snapshotOptions.compressions[saveData.compression].compressed_length;\n\n function pack_api1_v1_chart_data({ data, chartDataUniqueID }) {\n if (data === null) {\n return 0\n }\n\n var str = JSON.stringify(data);\n\n var cstr = compress(str);\n saveData.data[chartDataUniqueID] = cstr;\n return compressed_length(cstr);\n }\n\n const globalPanAndZoom = selectGlobalPanAndZoom(reduxState)\n var clearPanAndZoom = false;\n if (!globalPanAndZoom) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: saveData.after_ms,\n before: saveData.before_ms,\n }))\n clearPanAndZoom = true;\n } else {\n saveData.after_ms = globalPanAndZoom.after\n saveData.before_ms = globalPanAndZoom.before\n }\n\n\n saveData.duration_ms = saveData.before_ms - saveData.after_ms;\n saveData.data_points = Math.round((saveData.before_ms - saveData.after_ms) / (saveSnapshotSelectedSecondsPerPoint * 1000));\n saveSnapshotModalLog('info', 'Generating snapshot with ' + saveData.data_points.toString() + ' data points per dimension...');\n\n reduxStore.dispatch(startSnapshotModeAction({\n charts: saveData.charts,\n dataPoints: saveData.data_points,\n }))\n\n\n window.saveSnapshotRestore = () => {\n $('#saveSnapshotModal').modal('hide');\n\n $(el).css('width', '0%').attr('aria-valuenow', 0);\n eltxt.innerText = '0%';\n\n reduxStore.dispatch(stopSnapshotModeAction())\n reduxStore.dispatch(snapshotExportResetAction())\n if (clearPanAndZoom) {\n // clear that afterwards\n reduxStore.dispatch(resetGlobalPanAndZoomAction())\n }\n\n $('#saveSnapshotExport').removeClass('disabled');\n }\n\n var size = 0;\n var info = ' Resolution: ' + saveSnapshotSelectedSecondsPerPoint.toString() + ((saveSnapshotSelectedSecondsPerPoint === 1) ? ' second ' : ' seconds ').toString() + 'per point.';\n\n window.chartUpdated = ({ chart, chartDataUniqueID, data }) => {\n if (saveSnapshotStop === true) {\n saveSnapshotModalLog('info', 'Cancelled!');\n saveSnapshotRestore()\n }\n const state = reduxStore.getState()\n const chartsCount = selectAmountOfCharts(state)\n const chartsOk = selectAmountOfSnapshotsFetched(state) // hook\n const chartsFailed = selectAmountOfSnapshotsFailed(state)\n\n const pcent = ((chartsOk + chartsFailed) / chartsCount) * 100\n $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);\n eltxt.innerText = Math.round(pcent).toString() + '%, ' + (chart || data.id)\n\n size += pack_api1_v1_chart_data({ data, chartDataUniqueID })\n\n saveSnapshotModalLog((chartsFailed) ? 'danger' : 'info', 'Generated snapshot data size ' + (Math.round(size * 100 / 1024 / 1024) / 100).toString() + ' MB. ' + ((chartsFailed) ? (chartsFailed.toString() + ' charts have failed to be downloaded') : '').toString() + info);\n\n window.saveData = saveData\n // better not to use equality against pcent in case of floating point errors\n if (chartsOk + chartsFailed === chartsCount) {\n saveData.charts_ok = chartsOk\n saveData.charts_failed = chartsFailed\n saveData.data_size = size\n\n saveObjectToClient(saveData, filename)\n if (chartsFailed > 0) {\n alert(`${chartsFailed} failed to be downloaded`);\n }\n saveSnapshotRestore()\n saveData = null\n }\n }\n\n // called for every chart\n function update_chart(idx) {\n if (saveSnapshotStop === true) {\n saveSnapshotModalLog('info', 'Cancelled!');\n saveSnapshotRestore();\n return;\n }\n\n var state = NETDATA.options.targets[--idx];\n\n var pcent = (NETDATA.options.targets.length - idx) * 100 / NETDATA.options.targets.length;\n $(el).css('width', pcent + '%').attr('aria-valuenow', pcent);\n eltxt.innerText = Math.round(pcent).toString() + '%, ' + state.id;\n\n setTimeout(function () {\n charts_count++;\n state.isVisible(true);\n state.current.force_after_ms = saveData.after_ms;\n state.current.force_before_ms = saveData.before_ms;\n\n state.updateChart(function (status, reason) {\n state.current.force_after_ms = null;\n state.current.force_before_ms = null;\n\n if (status === true) {\n charts_ok++;\n // state.log('ok');\n size += pack_api1_v1_chart_data(state);\n } else {\n charts_failed++;\n state.log('failed to be updated: ' + reason);\n }\n\n saveSnapshotModalLog((charts_failed) ? 'danger' : 'info', 'Generated snapshot data size ' + (Math.round(size * 100 / 1024 / 1024) / 100).toString() + ' MB. ' + ((charts_failed) ? (charts_failed.toString() + ' charts have failed to be downloaded') : '').toString() + info);\n\n if (idx > 0) {\n update_chart(idx);\n } else {\n saveData.charts_ok = charts_ok;\n saveData.charts_failed = charts_failed;\n saveData.data_size = size;\n // console.log(saveData.compression + ': ' + (size / (options.data.dimensions_count * Math.round(saveSnapshotViewDuration / saveSnapshotSelectedSecondsPerPoint))).toString());\n\n // save it\n // console.log(saveData);\n saveObjectToClient(saveData, filename);\n\n if (charts_failed > 0) {\n alert(charts_failed.toString() + ' failed to be downloaded');\n }\n\n saveSnapshotRestore();\n saveData = null;\n }\n })\n }, 0);\n }\n\n });\n });\n}\n\n// --------------------------------------------------------------------\n// activate netdata on the page\nlet browser_timezone\ntry {\n browser_timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;\n} catch (e) {\n console.log('failed to detect browser timezone: ' + e.toString());\n browser_timezone = 'cannot-detect-it';\n}\n\nconst getOption = (option) => {\n const state = reduxStore.getState()\n return createSelectOption(option)(state)\n}\n\n\nfunction dashboardSettingsSetup() {\n var update_options_modal = function () {\n var sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== getOption(option)) {\n // console.log('switching ' + option.toString());\n self.bootstrapToggle(getOption(option) ? 'on' : 'off');\n }\n };\n\n var theme_sync_option = function (option) {\n var self = $('#' + option);\n\n self.bootstrapToggle(netdataTheme === 'slate' ? 'on' : 'off');\n };\n var units_sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== (getOption('units') === 'auto')) {\n self.bootstrapToggle(getOption('units') === 'auto' ? 'on' : 'off');\n }\n\n if (self.prop('checked') === true) {\n $('#settingsLocaleTempRow').show();\n $('#settingsLocaleTimeRow').show();\n } else {\n $('#settingsLocaleTempRow').hide();\n $('#settingsLocaleTimeRow').hide();\n }\n };\n var temp_sync_option = function (option) {\n var self = $('#' + option);\n\n if (self.prop('checked') !== (getOption('temperature') === 'celsius')) {\n self.bootstrapToggle(getOption('temperature') === 'celsius' ? 'on' : 'off');\n }\n };\n\n sync_option('stop_updates_when_focus_is_lost');\n sync_option('eliminate_zero_dimensions');\n sync_option('destroy_on_hide');\n sync_option('async_on_scroll');\n\n sync_option('parallel_refresher');\n sync_option('concurrent_refreshes');\n sync_option('sync_selection');\n\n sync_option('legend_right');\n theme_sync_option('netdata_theme_control');\n sync_option('show_help');\n sync_option('pan_and_zoom_data_padding');\n sync_option('smooth_plot');\n\n units_sync_option('units_conversion');\n temp_sync_option('units_temp');\n sync_option('seconds_as_time');\n\n if (getOption('parallel_refresher') === false) {\n $('#concurrent_refreshes_row').hide();\n } else {\n $('#concurrent_refreshes_row').show();\n }\n };\n\n update_options_modal();\n\n // handle options changes\n $('#eliminate_zero_dimensions').change(function () {\n setOption('eliminate_zero_dimensions', $(this).prop('checked'));\n });\n $('#destroy_on_hide').change(function () {\n setOption('destroy_on_hide', $(this).prop('checked'));\n });\n $('#async_on_scroll').change(function () {\n setOption('async_on_scroll', $(this).prop('checked'));\n });\n $('#parallel_refresher').change(function () {\n setOption('parallel_refresher', $(this).prop('checked'));\n });\n $('#concurrent_refreshes').change(function () {\n setOption('concurrent_refreshes', $(this).prop('checked'));\n });\n $('#sync_selection').change(function () {\n setOption('sync_selection', $(this).prop('checked'));\n netdataReload();\n });\n $('#stop_updates_when_focus_is_lost').change(function () {\n urlOptions.update_always = !$(this).prop('checked');\n urlOptions.hashUpdate();\n\n setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);\n });\n $('#smooth_plot').change(function () {\n setOption('smooth_plot', $(this).prop('checked'));\n });\n $('#pan_and_zoom_data_padding').change(function () {\n setOption('pan_and_zoom_data_padding', $(this).prop('checked'));\n });\n $('#seconds_as_time').change(function () {\n setOption('seconds_as_time', $(this).prop('checked'));\n });\n\n $('#units_conversion').change(function () {\n setOption('units', $(this).prop('checked') ? 'auto' : 'original');\n update_options_modal()\n });\n $('#units_temp').change(function () {\n setOption('temperature', $(this).prop('checked') ? 'celsius' : 'fahrenheit');\n });\n\n $('#legend_right').change(function () {\n setOption('legend_right', $(this).prop('checked'));\n // reloading for now, it's much easier than rebuilding charts bootstraping in main.js\n netdataReload();\n });\n\n $('#show_help').change(function () {\n urlOptions.help = $(this).prop('checked');\n urlOptions.hashUpdate();\n\n setOption('show_help', urlOptions.help);\n netdataReload();\n });\n\n // this has to be the last\n // it reloads the page\n $('#netdata_theme_control').change(function () {\n urlOptions.theme = $(this).prop('checked') ? 'slate' : 'white';\n urlOptions.hashUpdate();\n\n if (setTheme(urlOptions.theme)) {\n netdataReload();\n }\n });\n}\n\nconst CHART_DIV_ID_PREFIX = 'chart_'\nconst CHART_DIV_OFFSET = -50\n\nfunction scrollDashboardTo() {\n if (window.netdataSnapshotData !== null && typeof window.netdataSnapshotData.hash !== 'undefined') {\n scrollToId(window.netdataSnapshotData.hash.replace('#', ''));\n } else {\n // check if we have to jump to a specific section\n scrollToId(urlOptions.hash.replace('#', ''));\n if (urlOptions.chart !== null) {\n const chartElement = document.getElementById(`${CHART_DIV_ID_PREFIX}${name2id(urlOptions.chart)}`)\n if (chartElement) {\n const offset = chartElement.offsetTop + CHART_DIV_OFFSET;\n document.querySelector(\"html\").scrollTop = offset\n }\n }\n }\n}\n\nvar modalHiddenCallback = null;\n\nwindow.scrollToChartAfterHidingModal = (chart, alarmDate, alarmStatus) => {\n modalHiddenCallback = function () {\n\n if (typeof chart === 'string') {\n const chartElement = document.getElementById(`${CHART_DIV_ID_PREFIX}${name2id(chart)}`)\n if (chartElement) {\n const offset = chartElement.offsetTop + CHART_DIV_OFFSET;\n document.querySelector(\"html\").scrollTop = offset\n }\n }\n\n if (['WARNING', 'CRITICAL'].includes(alarmStatus)) {\n const twoMinutes = 2 * 60 * 1000\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: alarmDate - twoMinutes,\n before: alarmDate + twoMinutes,\n }))\n }\n };\n}\n\n// ----------------------------------------------------------------------------\n\nfunction enableTooltipsAndPopovers() {\n $('[data-toggle=\"tooltip\"]').tooltip({\n animated: 'fade',\n trigger: 'hover',\n html: true,\n delay: { show: 500, hide: 0 },\n container: 'body'\n });\n $('[data-toggle=\"popover\"]').popover();\n}\n\n// ----------------------------------------------------------------------------\n\nvar runOnceOnDashboardLastRun = 0;\n\nfunction runOnceOnDashboardWithjQuery() {\n if (runOnceOnDashboardLastRun !== 0) {\n scrollDashboardTo();\n\n // restore the scrollspy at the proper position\n $(document.body).scrollspy('refresh');\n $(document.body).scrollspy('process');\n\n return;\n }\n\n runOnceOnDashboardLastRun = Date.now();\n\n // ------------------------------------------------------------------------\n // bootstrap modals\n\n // prevent bootstrap modals from scrolling the page\n // maintains the current scroll position\n // https://stackoverflow.com/a/34754029/4525767\n\n var scrollPos = 0;\n var modal_depth = 0; // how many modals are currently open\n var modal_shown = false; // set to true, if a modal is shown\n var netdata_paused_on_modal = false; // set to true, if the modal paused netdata\n var scrollspyOffset = $(window).height() / 3; // will be updated below - the offset of scrollspy to select an item\n\n $('.modal')\n .on('show.bs.modal', function () {\n if (modal_depth === 0) {\n scrollPos = window.scrollY;\n\n $('body').css({\n overflow: 'hidden',\n position: 'fixed',\n top: -scrollPos\n });\n\n modal_shown = true;\n\n if (NETDATA.options.pauseCallback === null) {\n NETDATA.pause(function () {\n });\n netdata_paused_on_modal = true;\n } else {\n netdata_paused_on_modal = false;\n }\n }\n\n modal_depth++;\n //console.log(urlOptions.after);\n\n })\n .on('hide.bs.modal', function () {\n\n modal_depth--;\n\n if (modal_depth <= 0) {\n modal_depth = 0;\n\n $('body')\n .css({\n overflow: '',\n position: '',\n top: ''\n });\n\n // scroll to the position we had open before the modal\n $('html, body')\n .animate({ scrollTop: scrollPos }, 0);\n\n // unpause netdata, if we paused it\n if (netdata_paused_on_modal === true) {\n NETDATA.unpause();\n netdata_paused_on_modal = false;\n }\n\n // restore the scrollspy at the proper position\n $(document.body).scrollspy('process');\n }\n //console.log(urlOptions.after);\n })\n .on('hidden.bs.modal', function () {\n if (modal_depth === 0) {\n modal_shown = false;\n }\n\n if (typeof modalHiddenCallback === 'function') {\n modalHiddenCallback();\n }\n\n modalHiddenCallback = null;\n //console.log(urlOptions.after);\n });\n\n // ------------------------------------------------------------------------\n // sidebar / affix\n\n $('#sidebar')\n .affix({\n offset: {\n top: 0,\n bottom: 0\n }\n })\n .on('affixed.bs.affix', function () {\n // fix scrolling of very long affix lists\n // http://stackoverflow.com/questions/21691585/bootstrap-3-1-0-affix-too-long\n\n $(this).removeAttr('style');\n })\n .on('affix-top.bs.affix', function () {\n // fix bootstrap affix click bug\n // https://stackoverflow.com/a/37847981/4525767\n\n if (modal_shown) {\n return false;\n }\n })\n .on('activate.bs.scrollspy', function (e) {\n // change the URL based on the current position of the screen\n\n if (modal_shown === false) {\n var el = $(e.target);\n var hash = el.find('a').attr('href');\n if (typeof hash === 'string' && hash.substring(0, 1) === '#' && urlOptions.hash.startsWith(hash + '_submenu_') === false) {\n urlOptions.hash = hash;\n urlOptions.hashUpdate();\n }\n }\n });\n\n Ps.initialize(document.getElementById('sidebar'), {\n wheelSpeed: 0.5,\n wheelPropagation: true,\n swipePropagation: true,\n minScrollbarLength: null,\n maxScrollbarLength: null,\n useBothWheelAxes: false,\n suppressScrollX: true,\n suppressScrollY: false,\n scrollXMarginOffset: 0,\n scrollYMarginOffset: 0,\n theme: 'default'\n });\n\n // ------------------------------------------------------------------------\n // scrollspy\n\n if (scrollspyOffset > 250) {\n scrollspyOffset = 250;\n }\n if (scrollspyOffset < 75) {\n scrollspyOffset = 75;\n }\n document.body.setAttribute('data-offset', scrollspyOffset);\n\n // scroll the dashboard, before activating the scrollspy, so that our\n // hash will not be updated before we got the chance to scroll to it\n scrollDashboardTo();\n\n $(document.body).scrollspy({\n target: '#sidebar',\n offset: scrollspyOffset // controls the diff of the element to the top, to select it\n });\n\n // ------------------------------------------------------------------------\n // my-netdata menu\n\n $('#deleteRegistryModal')\n .on('hidden.bs.modal', function () {\n deleteRegistryGuid = null;\n });\n\n // ------------------------------------------------------------------------\n // update modal\n\n $('#updateModal')\n .on('show.bs.modal', function () {\n versionLog('checking, please wait...');\n })\n .on('shown.bs.modal', function () {\n notifyForUpdate(true);\n });\n\n // ------------------------------------------------------------------------\n // alarms modal\n\n $('#alarmsModal')\n .on('shown.bs.modal', function () {\n alarmsUpdateModal();\n })\n .on('hidden.bs.modal', function () {\n document.getElementById('alarms_active').innerHTML =\n document.getElementById('alarms_all').innerHTML =\n document.getElementById('alarms_log').innerHTML =\n 'loading...';\n });\n\n // ------------------------------------------------------------------------\n\n dashboardSettingsSetup();\n loadSnapshotDragAndDropSetup();\n saveSnapshotModalSetup();\n showPageFooter();\n\n // ------------------------------------------------------------------------\n // https://github.com/viralpatel/jquery.shorten/blob/master/src/jquery.shorten.js\n\n $.fn.shorten = function (settings) {\n \"use strict\";\n\n var config = {\n showChars: 750,\n minHideChars: 10,\n ellipsesText: \"...\",\n moreText: ' show more information',\n lessText: ' show less information',\n onLess: function () {\n NETDATA.onscroll();\n },\n onMore: function () {\n NETDATA.onscroll();\n },\n errMsg: null,\n force: false\n };\n\n if (settings) {\n $.extend(config, settings);\n }\n\n if ($(this).data('jquery.shorten') && !config.force) {\n return false;\n }\n $(this).data('jquery.shorten', true);\n\n $(document).off(\"click\", '.morelink');\n\n $(document).on({\n click: function () {\n\n var $this = $(this);\n if ($this.hasClass('less')) {\n $this.removeClass('less');\n $this.html(config.moreText);\n $this.parent().prev().animate({ 'height': '0' + '%' }, 0, function () {\n $this.parent().prev().prev().show();\n }).hide(0, function () {\n config.onLess();\n });\n } else {\n $this.addClass('less');\n $this.html(config.lessText);\n $this.parent().prev().animate({ 'height': '100' + '%' }, 0, function () {\n $this.parent().prev().prev().hide();\n }).show(0, function () {\n config.onMore();\n });\n }\n return false;\n }\n }, '.morelink');\n\n return this.each(function () {\n var $this = $(this);\n\n var content = $this.html();\n var contentlen = $this.text().length;\n if (contentlen > config.showChars + config.minHideChars) {\n var c = content.substr(0, config.showChars);\n if (c.indexOf('<') >= 0) // If there's HTML don't want to cut it\n {\n var inTag = false; // I'm in a tag?\n var bag = ''; // Put the characters to be shown here\n var countChars = 0; // Current bag size\n var openTags = []; // Stack for opened tags, so I can close them later\n var tagName = null;\n\n for (var i = 0, r = 0; r <= config.showChars; i++) {\n if (content[i] === '<' && !inTag) {\n inTag = true;\n\n // This could be \"tag\" or \"/tag\"\n tagName = content.substring(i + 1, content.indexOf('>', i));\n\n // If its a closing tag\n if (tagName[0] === '/') {\n\n if (tagName !== ('/' + openTags[0])) {\n config.errMsg = 'ERROR en HTML: the top of the stack should be the tag that closes';\n } else {\n openTags.shift(); // Pops the last tag from the open tag stack (the tag is closed in the retult HTML!)\n }\n\n } else {\n // There are some nasty tags that don't have a close tag like
\n if (tagName.toLowerCase() !== 'br') {\n openTags.unshift(tagName); // Add to start the name of the tag that opens\n }\n }\n }\n\n if (inTag && content[i] === '>') {\n inTag = false;\n }\n\n if (inTag) {\n bag += content.charAt(i);\n } else {\n // Add tag name chars to the result\n r++;\n if (countChars <= config.showChars) {\n bag += content.charAt(i); // Fix to ie 7 not allowing you to reference string characters using the []\n countChars++;\n } else {\n // Now I have the characters needed\n if (openTags.length > 0) {\n // I have unclosed tags\n\n for (var j = 0; j < openTags.length; j++) {\n bag += ''; // Close all tags that were opened\n\n // You could shift the tag from the stack to check if you end with an empty stack, that means you have closed all open tags\n }\n break;\n }\n }\n }\n }\n c = $('
').html(bag + '' + config.ellipsesText + '').html();\n } else {\n c += config.ellipsesText;\n }\n\n var html = '
' + c +\n '
' + content +\n '
' + config.moreText + '';\n\n $this.html(html);\n $this.find(\".allcontent\").hide(); // Hide all text\n $('.shortcontent p:last', $this).css('margin-bottom', 0); //Remove bottom margin on last paragraph as it's likely shortened\n }\n });\n };\n}\n\nfunction finalizePage() {\n if (urlOptions.after < 0) {\n reduxStore.dispatch(setDefaultAfterAction({ after: urlOptions.after }))\n } else if (urlOptions.pan_and_zoom === true) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: urlOptions.after,\n before: urlOptions.before,\n }))\n }\n\n // resize all charts - without starting the background thread\n // this has to be done while NETDATA is paused\n // if we omit this, the affix menu will be wrong, since all\n // the Dom elements are initially zero-sized\n NETDATA.parseDom();\n\n // let it run (update the charts)\n NETDATA.unpause();\n\n runOnceOnDashboardWithjQuery();\n $(\".shorten\").shorten();\n enableTooltipsAndPopovers();\n\n if (!isDemo) {\n notifyForUpdate();\n }\n\n if (urlOptions.show_alarms === true) {\n setTimeout(function () {\n $('#alarmsModal').modal('show');\n }, 1000);\n }\n\n NETDATA.onresizeCallback = function () {\n Ps.update(document.getElementById('sidebar'));\n };\n NETDATA.onresizeCallback();\n\n if (window.netdataSnapshotData !== null) {\n reduxStore.dispatch(setGlobalPanAndZoomAction({\n after: window.netdataSnapshotData.after_ms,\n before: window.netdataSnapshotData.before_ms,\n }))\n }\n}\n\nwindow.resetDashboardOptions = () => {\n reduxStore.dispatch(resetOptionsAction())\n\n // it's dirty, but this will be rewritten anyway\n urlOptions.update_always = false;\n urlOptions.help = false;\n urlOptions.theme = \"slate\";\n urlOptions.hashUpdate();\n\n netdataReload()\n}\n\n// callback to add the dashboard info to the\n// parallel javascript downloader in netdata\nexport const netdataPrepCallback = () => {\n if (isDemo) {\n document.getElementById('masthead').style.display = 'block';\n } else {\n if (urlOptions.update_always === true) {\n setOption('stop_updates_when_focus_is_lost', !urlOptions.update_always);\n }\n }\n};\n\nwindow.selected_server_timezone = function (timezone, status) {\n // clear the error\n document.getElementById('timezone_error_message').innerHTML = '';\n\n if (typeof status === 'undefined') {\n // the user selected a timezone from the menu\n\n setOption('user_set_server_timezone', timezone);\n\n if (!isProperTimezone(timezone)) {\n setOption(\"timezone\", \"default\")\n\n if (!$('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('on');\n }\n\n document.getElementById('timezone_error_message').innerHTML = 'Ooops! That timezone was not accepted by your browser. Please open a github issue to help us fix it.';\n setOption('user_set_server_timezone', options.timezone);\n } else {\n if ($('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('off');\n }\n setOption(\"timezone\", timezone)\n }\n } else if (status === true) {\n // the user wants the browser default timezone to be activated\n setOption(\"timezone\", \"default\")\n } else {\n // the user wants the server default timezone to be activated\n\n let userSetServerTimezone = getOption(\"user_set_server_timezone\")\n if (userSetServerTimezone === 'default') {\n setOption(\"user_set_server_timezone\", options.timezone) // timezone from /charts endpoint\n userSetServerTimezone = options.timezone\n }\n\n if (!isProperTimezone(userSetServerTimezone)) {\n setOption(\"timezone\", \"default\");\n\n if (!$('#local_timezone').prop('checked')) {\n $('#local_timezone').bootstrapToggle('on');\n }\n\n document.getElementById('timezone_error_message').innerHTML = 'Sorry. The timezone \"' + timezone.toString() + '\" is not accepted by your browser. Please select one from the list.';\n setOption('user_set_server_timezone', options.timezone);\n } else {\n setOption(\"timezone\", userSetServerTimezone)\n }\n }\n\n const timezoneOption = getOption(\"timezone\")\n document.getElementById('current_timezone').innerText = (timezoneOption === 'default') ? 'unset, using browser default' : timezoneOption;\n return false;\n};\n\nexport var netdataCallback = initializeDynamicDashboard;\n\nwindow.showSignInModal = function() {\n document.getElementById(\"sim-registry\").innerHTML = getFromRegistry(\"registryServer\");\n $(\"#signInModal\").modal(\"show\");\n}\n\nwindow.explicitlySignIn = () => {\n $(\"#signInModal\").modal(\"hide\");\n reduxStore.dispatch(explicitlySignInAction())\n};\n","import { AlarmStatus } from \"domains/global/types\"\n\nexport const storeKey = \"global\"\n\nexport const TEMPORARY_MAIN_JS_TIMEOUT = 1000\n\nexport const MASKED_DATA = \"***\"\n\nexport const NOTIFICATIONS_TIMEOUT = 5000\n\nexport const INFO_POLLING_FREQUENCY = 5000\n\nexport const CLOUD_BASE_URL_DISABLED = \"CLOUD_BASE_URL_DISABLED\"\n\nexport const alarmStatuses: AlarmStatus[] = [\"WARNING\", \"ERROR\", \"REMOVED\", \"UNDEFINED\", \"UNINITIALIZED\", \"CLEAR\", \"CRITICAL\"]\n","import React, { useMemo } from \"react\"\nimport { CloudConnectionProps, ConnectionModalStatusContent } from \"./types\"\nimport Anchor from \"@/src/components/anchor\"\n\nimport { Text } from \"@netdata/netdata-ui\"\n\nexport const makeCloudConnectionStatusInfo = ({\n nodeStatus,\n userStatus,\n date,\n}: CloudConnectionProps): ConnectionModalStatusContent => ({\n title: \"Netdata Cloud connection status\",\n text: {\n header: () => {\n return (\n \n This node is currently{\" \"}\n {nodeStatus === \"LIVE\" ? \"Connected\" : \"Not Connected\"} to Netdata\n Cloud\n \n )\n },\n bullets:\n nodeStatus === \"NOT_LIVE\"\n ? [\n // `The node lost its Netdata Cloud connection at ${date}`,\n () => (\n \n To troubleshoot Netdata Cloud connection issues, please follow{\" \"}\n \n this guide\n \n .\n \n ),\n ]\n : [],\n footer: () => (\n \n You are{\" \"}\n \n {userStatus === \"LOGGED_IN\"\n ? \"Logged In\"\n : userStatus === \"EXPIRED_LOGIN\"\n ? \"Logged out\"\n : \"Not signed-up\"}\n {\" \"}\n to Netdata Cloud\n \n ),\n },\n CTA1: {\n text: \"Take me to Netdata Cloud\",\n },\n})\n\nconst useCloudConnectionStatus = ({ userStatus, nodeStatus, date }: CloudConnectionProps) => {\n const cloudConnectionStatusInfo = useMemo(() => {\n return makeCloudConnectionStatusInfo({ userStatus, nodeStatus, date })\n }, [userStatus, nodeStatus, date])\n\n return cloudConnectionStatusInfo\n}\n\nexport default useCloudConnectionStatus\n","import React, { useCallback } from \"react\"\nimport GoToCloud from \"components/auth/signIn\"\n\nimport {\n Modal,\n ModalContent,\n ModalBody,\n ModalFooter,\n ModalHeader,\n Text,\n Flex,\n H3,\n Button,\n Box,\n ModalCloseButton,\n} from \"@netdata/netdata-ui\"\n\nimport { ConnectionModalStatusContent } from \"./types\"\n\nconst campaign = \"agent_nudge_to_cloud\"\n\nexport type CloudConnectionStatusModalProps = ConnectionModalStatusContent & {\n closeModal: () => void\n onRefresh?: () => void\n isCTA1Disabled: boolean\n}\n\nconst CloudConnectionStatusModal = ({\n title,\n text,\n CTA1,\n closeModal,\n onRefresh,\n isCTA1Disabled,\n}: CloudConnectionStatusModalProps) => {\n const handleClickedCTA1 = useCallback(\n ({ link }: { link: string }) => {\n closeModal()\n window.location.href = link\n },\n [closeModal]\n )\n\n return (\n \n \n \n

{title}

\n \n
\n \n \n {text.header({})}\n {text.bullets.length > 0 && (\n \n \n {text.bullets.map((bullet, index) => {\n if (typeof bullet === \"function\") {\n return
  • {bullet()}
  • \n }\n return (\n
  • \n {bullet}\n
  • \n )\n })}\n
    \n
    \n )}\n {text.footer()}\n
    \n
    \n \n \n \n {({ link }) => (\n handleClickedCTA1({ link })}\n width=\"100%\"\n label={CTA1.text}\n />\n )}\n \n \n \n \n Check Now\n \n \n \n
    \n
    \n )\n}\n\nexport default CloudConnectionStatusModal\n","import React, { useState, useCallback, useEffect } from \"react\"\nimport useCloudConnectionStatus from \"./use-cloud-connection-status\"\nimport CloudConnectionStatusModal from \"./cloud-connection-status-modal\"\n\nimport { Pill, Flex } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"react-redux\"\nimport { useRequestRefreshOfAccessMessage } from \"hooks/use-user-node-access\"\nimport { selectUserNodeAccess } from \"domains/global/selectors\"\nimport { PromoProps } from \"@/src/domains/dashboard/components/migration-modal\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\n\nconst CloudConnectionStatus = () => {\n const userNodeAccess = useSelector(selectUserNodeAccess) as PromoProps\n const cloudEnabled = useSelector(selectIsCloudEnabled)\n\n const [isModalOpen, setModalOpen] = useState(false)\n const cloudConnectionStatusInfo = useCloudConnectionStatus({\n userStatus: userNodeAccess?.userStatus || \"UNKNOWN\",\n nodeStatus: userNodeAccess?.nodeLiveness || \"NOT_LIVE\",\n date: \"\",\n })\n\n useEffect(() => {\n if (isModalOpen) {\n document.documentElement.style.overflow = \"hidden\"\n } else {\n document.documentElement.style.overflow = \"auto\"\n }\n }, [isModalOpen])\n\n const openModal = useCallback(() => {\n setModalOpen(true)\n }, [])\n\n const closeModal = useCallback(() => {\n setModalOpen(false)\n }, [])\n\n const onRefresh = useRequestRefreshOfAccessMessage()\n\n if (!cloudEnabled) return null\n\n return (\n \n \n Connection to Cloud\n \n {isModalOpen && (\n \n )}\n \n )\n}\n\nexport default CloudConnectionStatus\n","import { equals } from \"ramda\"\n\n// we use numbers to specify time. it can be either a timestamp (ms), or a relative value in seconds\n// which is always 0 or less (0 is now, -300 is -5 minutes)\n\nexport const isTimestamp = (x: number) => x > 0\n\nexport const NETDATA_REGISTRY_SERVER = \"https://registry.my-netdata.io\"\n\nexport const MS_IN_SECOND = 1000\nexport const NODE_VIEW_DYGRAPH_TITLE_HEIGHT = 30\nexport const DEFAULT_DASHBOARD_DURATION = 5 * 60\n\nexport const getIframeSrc = (cloudBaseURL: string, path: string) => `${cloudBaseURL}/sso/v2/${path}`\nexport const utmUrlSuffix = \"&utm_source=agent&utm_medium=web\"\n\nexport const getInitialAfterFromWindow = () => {\n const div = document.getElementById(\"charts_div\")\n if (!div) {\n // eslint-disable-next-line no-console\n console.error(\"Couldn't find '.charts_div' element to calculate width\")\n return -900\n }\n // based on https://github.com/netdata/dashboard/blob/7a7b538b00f1c5a4e1550f69cb5333212bb68f95/src/main.js#L1753\n // eslint-disable-next-line max-len\n // var duration = Math.round(($(div).width() * pcent_width / 100 * data.update_every / 3) / 60) * 60;\n return -Math.round(div.getBoundingClientRect().width / 3 / 60) * 60\n}\n\nexport const SPACE_PANEL_STATE = \"space-panel-state\"\n\nexport const useNewKeysOnlyIfDifferent = (\n keys: (keyof T)[],\n obj1: T | null,\n obj2: T\n): T => {\n if (!obj1) {\n return obj2\n }\n return keys.reduce(\n (acc, key) => ({\n ...acc,\n [key]: equals(obj1[key], obj2![key]) ? obj1[key] : obj2[key],\n }),\n obj2\n )\n}\n\nexport type AnyFunction = (...args: T[]) => any\n\nexport type FunctionArguments = T extends (...args: infer R) => any ? R : never\n\nexport function callAll(...fns: (T | undefined)[]) {\n return function mergedFn(arg: FunctionArguments[0]) {\n fns.forEach(fn => {\n fn?.(arg)\n })\n }\n}\n","/* eslint-disable */\n/*!\n * d3pie\n * @author Ben Keen\n * @version 0.1.9\n * @date June 17th, 2015\n * @repo http://github.com/benkeen/d3pie\n * SPDX-License-Identifier: MIT\n */\n\n// UMD pattern from https://github.com/umdjs/umd/blob/master/returnExports.js\n(function(root, factory) {\n if (typeof define === 'function' && define.amd) {\n // AMD. Register as an anonymous module\n define([], factory);\n } else if (typeof exports === 'object') {\n // Node. Does not work with strict CommonJS, but only CommonJS-like environments that support module.exports,\n // like Node\n module.exports = factory();\n } else {\n // browser globals (root is window)\n root.d3pie = factory(root);\n }\n}(this, function() {\n\n var _scriptName = \"d3pie\";\n var _version = \"0.2.1\";\n\n // used to uniquely generate IDs and classes, ensuring no conflict between multiple pies on the same page\n var _uniqueIDCounter = 0;\n\n\n // this section includes all helper libs on the d3pie object. They're populated via grunt-template. Note: to keep\n // the syntax highlighting from getting all messed up, I commented out each line. That REQUIRES each of the files\n // to have an empty first line. Crumby, yes, but acceptable.\n //// --------- _default-settings.js -----------/**\n/**\n * Contains the out-the-box settings for the script. Any of these settings that aren't explicitly overridden for the\n * d3pie instance will inherit from these. This is also included on the main website for use in the generation script.\n */\nvar defaultSettings = {\n header: {\n title: {\n text: \"\",\n color: \"#333333\",\n fontSize: 18,\n fontWeight: \"bold\",\n font: \"arial\"\n },\n subtitle: {\n text: \"\",\n color: \"#666666\",\n fontSize: 14,\n fontWeight: \"bold\",\n font: \"arial\"\n },\n location: \"top-center\",\n titleSubtitlePadding: 8\n },\n footer: {\n text: \t \"\",\n color: \"#666666\",\n fontSize: 14,\n fontWeight: \"bold\",\n font: \"arial\",\n location: \"left\"\n },\n size: {\n canvasHeight: 500,\n canvasWidth: 500,\n pieInnerRadius: \"0%\",\n pieOuterRadius: null\n },\n data: {\n sortOrder: \"none\",\n ignoreSmallSegments: {\n enabled: false,\n valueType: \"percentage\",\n value: null\n },\n smallSegmentGrouping: {\n enabled: false,\n value: 1,\n valueType: \"percentage\",\n label: \"Other\",\n color: \"#cccccc\"\n },\n content: []\n },\n labels: {\n outer: {\n format: \"label\",\n hideWhenLessThanPercentage: null,\n pieDistance: 30\n },\n inner: {\n format: \"percentage\",\n hideWhenLessThanPercentage: null\n },\n mainLabel: {\n color: \"#333333\",\n font: \"arial\",\n fontWeight: \"normal\",\n fontSize: 10\n },\n percentage: {\n color: \"#dddddd\",\n font: \"arial\",\n fontWeight: \"bold\",\n fontSize: 10,\n decimalPlaces: 0\n },\n value: {\n color: \"#cccc44\",\n fontWeight: \"bold\",\n font: \"arial\",\n fontSize: 10\n },\n lines: {\n enabled: true,\n style: \"curved\",\n color: \"segment\"\n },\n truncation: {\n enabled: false,\n truncateLength: 30\n },\n formatter: null\n },\n effects: {\n load: {\n effect: \"none\", // \"default\", commented in the code\n speed: 1000\n },\n pullOutSegmentOnClick: {\n effect: \"none\", // \"bounce\", commented in the code\n speed: 300,\n size: 10\n },\n highlightSegmentOnMouseover: false,\n highlightLuminosity: -0.2\n },\n tooltips: {\n enabled: false,\n type: \"placeholder\", // caption|placeholder\n string: \"\",\n placeholderParser: null,\n styles: {\n fadeInSpeed: 250,\n backgroundColor: \"#000000\",\n backgroundOpacity: 0.5,\n color: \"#efefef\",\n borderRadius: 2,\n font: \"arial\",\n fontWeight: \"bold\",\n fontSize: 10,\n padding: 4\n }\n },\n misc: {\n colors: {\n background: null,\n segments: [\n \"#2484c1\", \"#65a620\", \"#7b6888\", \"#a05d56\", \"#961a1a\", \"#d8d23a\", \"#e98125\", \"#d0743c\", \"#635222\", \"#6ada6a\",\n \"#0c6197\", \"#7d9058\", \"#207f33\", \"#44b9b0\", \"#bca44a\", \"#e4a14b\", \"#a3acb2\", \"#8cc3e9\", \"#69a6f9\", \"#5b388f\",\n \"#546e91\", \"#8bde95\", \"#d2ab58\", \"#273c71\", \"#98bf6e\", \"#4daa4b\", \"#98abc5\", \"#cc1010\", \"#31383b\", \"#006391\",\n \"#c2643f\", \"#b0a474\", \"#a5a39c\", \"#a9c2bc\", \"#22af8c\", \"#7fcecf\", \"#987ac6\", \"#3d3b87\", \"#b77b1c\", \"#c9c2b6\",\n \"#807ece\", \"#8db27c\", \"#be66a2\", \"#9ed3c6\", \"#00644b\", \"#005064\", \"#77979f\", \"#77e079\", \"#9c73ab\", \"#1f79a7\"\n ],\n segmentStroke: \"#ffffff\"\n },\n gradient: {\n enabled: false,\n percentage: 95,\n color: \"#000000\"\n },\n canvasPadding: {\n top: 5,\n right: 5,\n bottom: 5,\n left: 5\n },\n pieCenterOffset: {\n x: 0,\n y: 0\n },\n cssPrefix: null\n },\n callbacks: {\n onload: null,\n onMouseoverSegment: null,\n onMouseoutSegment: null,\n onClickSegment: null\n }\n};\n\n //// --------- validate.js -----------\nvar validate = {\n\n // called whenever a new pie chart is created\n initialCheck: function(pie) {\n var cssPrefix = pie.cssPrefix;\n var element = pie.element;\n var options = pie.options;\n\n // confirm d3 is available [check minimum version]\n if (!window.d3 || !window.d3.hasOwnProperty(\"version\")) {\n console.error(\"d3pie error: d3 is not available\");\n return false;\n }\n\n // confirm element is either a DOM element or a valid string for a DOM element\n if (!(element instanceof HTMLElement || element instanceof SVGElement)) {\n console.error(\"d3pie error: the first d3pie() param must be a valid DOM element (not jQuery) or a ID string.\");\n return false;\n }\n\n // confirm the CSS prefix is valid. It has to start with a-Z and contain nothing but a-Z0-9_-\n if (!(/[a-zA-Z][a-zA-Z0-9_-]*$/.test(cssPrefix))) {\n console.error(\"d3pie error: invalid options.misc.cssPrefix\");\n return false;\n }\n\n // confirm some data has been supplied\n if (!helpers.isArray(options.data.content)) {\n console.error(\"d3pie error: invalid config structure: missing data.content property.\");\n return false;\n }\n if (options.data.content.length === 0) {\n console.error(\"d3pie error: no data supplied.\");\n return false;\n }\n\n // clear out any invalid data. Each data row needs a valid positive number and a label\n var data = [];\n for (var i=0; i r1.right\n (r2.x > (r1.x + r1.w)) ||\n\n // r2.right < r1.left\n ((r2.x + r2.w) < r1.x) ||\n\n // r2.top < r1.bottom\n ((r2.y + r2.h) < r1.y) ||\n\n // r2.bottom > r1.top\n (r2.y > (r1.y + r1.h))\n );\n\n return !returnVal;\n },\n\n /**\n * Returns a lighter/darker shade of a hex value, based on a luminance value passed.\n * @param hex a hex color value such as “#abc” or “#123456″ (the hash is optional)\n * @param lum the luminosity factor: -0.1 is 10% darker, 0.2 is 20% lighter, etc.\n * @returns {string}\n */\n getColorShade: function(hex, lum) {\n\n // validate hex string\n hex = String(hex).replace(/[^0-9a-f]/gi, '');\n if (hex.length < 6) {\n hex = hex[0]+hex[0]+hex[1]+hex[1]+hex[2]+hex[2];\n }\n lum = lum || 0;\n\n // convert to decimal and change luminosity\n var newHex = \"#\";\n for (var i=0; i<3; i++) {\n var c = parseInt(hex.substr(i * 2, 2), 16);\n c = Math.round(Math.min(Math.max(0, c + (c * lum)), 255)).toString(16);\n newHex += (\"00\" + c).substr(c.length);\n }\n\n return newHex;\n },\n\n /**\n * Users can choose to specify segment colors in three ways (in order of precedence):\n * \t1. include a \"color\" attribute for each row in data.content\n * \t2. include a misc.colors.segments property which contains an array of hex codes\n * \t3. specify nothing at all and rely on this lib provide some reasonable defaults\n *\n * This function sees what's included and populates this.options.colors with whatever's required\n * for this pie chart.\n * @param data\n */\n initSegmentColors: function(pie) {\n var data = pie.options.data.content;\n var colors = pie.options.misc.colors.segments;\n\n // TODO this needs a ton of error handling\n\n var finalColors = [];\n for (var i=0; i 99) ? 99 : percent;\n percent = (percent < 0) ? 0 : percent;\n\n var smallestDimension = (w < h) ? w : h;\n\n // now factor in the label line size\n if (pie.options.labels.outer.format !== \"none\") {\n var pieDistanceSpace = parseInt(pie.options.labels.outer.pieDistance, 10) * 2;\n if (smallestDimension - pieDistanceSpace > 0) {\n smallestDimension -= pieDistanceSpace;\n }\n }\n\n outerRadius = Math.floor((smallestDimension / 100) * percent) / 2;\n } else {\n outerRadius = parseInt(size.pieOuterRadius, 10);\n }\n }\n\n // inner radius\n if (/%/.test(size.pieInnerRadius)) {\n percent = parseInt(size.pieInnerRadius.replace(/[\\D]/, \"\"), 10);\n percent = (percent > 99) ? 99 : percent;\n percent = (percent < 0) ? 0 : percent;\n innerRadius = Math.floor((outerRadius / 100) * percent);\n } else {\n innerRadius = parseInt(size.pieInnerRadius, 10);\n }\n\n pie.innerRadius = innerRadius;\n pie.outerRadius = outerRadius;\n },\n\n getTotalPieSize: function(data) {\n var totalSize = 0;\n for (var i=0; i b.label.toLowerCase()) ? 1 : -1; });\n break;\n case \"label-desc\":\n data.sort(function(a, b) { return (a.label.toLowerCase() < b.label.toLowerCase()) ? 1 : -1; });\n break;\n }\n\n return data;\n },\n\n // var pieCenter = math.getPieCenter();\n getPieTranslateCenter: function(pieCenter) {\n return \"translate(\" + pieCenter.x + \",\" + pieCenter.y + \")\";\n },\n\n /**\n * Used to determine where on the canvas the center of the pie chart should be. It takes into account the\n * height and position of the title, subtitle and footer, and the various paddings.\n * @private\n */\n calculatePieCenter: function(pie) {\n var pieCenterOffset = pie.options.misc.pieCenterOffset;\n var hasTopTitle = (pie.textComponents.title.exists && pie.options.header.location !== \"pie-center\");\n var hasTopSubtitle = (pie.textComponents.subtitle.exists && pie.options.header.location !== \"pie-center\");\n\n var headerOffset = pie.options.misc.canvasPadding.top;\n if (hasTopTitle && hasTopSubtitle) {\n headerOffset += pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;\n } else if (hasTopTitle) {\n headerOffset += pie.textComponents.title.h;\n } else if (hasTopSubtitle) {\n headerOffset += pie.textComponents.subtitle.h;\n }\n\n var footerOffset = 0;\n if (pie.textComponents.footer.exists) {\n footerOffset = pie.textComponents.footer.h + pie.options.misc.canvasPadding.bottom;\n }\n\n var x = ((pie.options.size.canvasWidth - pie.options.misc.canvasPadding.left - pie.options.misc.canvasPadding.right) / 2) + pie.options.misc.canvasPadding.left;\n var y = ((pie.options.size.canvasHeight - footerOffset - headerOffset) / 2) + headerOffset;\n\n x += pieCenterOffset.x;\n y += pieCenterOffset.y;\n\n pie.pieCenter = { x: x, y: y };\n },\n\n\n /**\n * Rotates a point (x, y) around an axis (xm, ym) by degrees (a).\n * @param x\n * @param y\n * @param xm\n * @param ym\n * @param a angle in degrees\n * @returns {Array}\n */\n rotate: function(x, y, xm, ym, a) {\n\n a = a * Math.PI / 180; // convert to radians\n\n var cos = Math.cos,\n sin = Math.sin,\n // subtract midpoints, so that midpoint is translated to origin and add it in the end again\n xr = (x - xm) * cos(a) - (y - ym) * sin(a) + xm,\n yr = (x - xm) * sin(a) + (y - ym) * cos(a) + ym;\n\n return { x: xr, y: yr };\n },\n\n /**\n * Translates a point x, y by distance d, and by angle a.\n * @param x\n * @param y\n * @param dist\n * @param a angle in degrees\n */\n translate: function(x, y, d, a) {\n var rads = math.toRadians(a);\n return {\n x: x + d * Math.sin(rads),\n y: y - d * Math.cos(rads)\n };\n },\n\n // from: http://stackoverflow.com/questions/19792552/d3-put-arc-labels-in-a-pie-chart-if-there-is-enough-space\n pointIsInArc: function(pt, ptData, d3Arc) {\n // Center of the arc is assumed to be 0,0\n // (pt.x, pt.y) are assumed to be relative to the center\n var r1 = d3Arc.innerRadius()(ptData), // Note: Using the innerRadius\n r2 = d3Arc.outerRadius()(ptData),\n theta1 = d3Arc.startAngle()(ptData),\n theta2 = d3Arc.endAngle()(ptData);\n\n var dist = pt.x * pt.x + pt.y * pt.y,\n angle = Math.atan2(pt.x, -pt.y); // Note: different coordinate system\n\n angle = (angle < 0) ? (angle + Math.PI * 2) : angle;\n\n return (r1 * r1 <= dist) && (dist <= r2 * r2) &&\n (theta1 <= angle) && (angle <= theta2);\n }\n};\n\n //// --------- labels.js -----------\nvar labels = {\n\n /**\n * Adds the labels to the pie chart, but doesn't position them. There are two locations for the\n * labels: inside (center) of the segments, or outside the segments on the edge.\n * @param section \"inner\" or \"outer\"\n * @param sectionDisplayType \"percentage\", \"value\", \"label\", \"label-value1\", etc.\n * @param pie\n */\n add: function(pie, section, sectionDisplayType) {\n var include = labels.getIncludes(sectionDisplayType);\n var settings = pie.options.labels;\n\n // group the label groups (label, percentage, value) into a single element for simpler positioning\n var outerLabel = pie.svg.insert(\"g\", \".\" + pie.cssPrefix + \"labels-\" + section)\n .attr(\"class\", pie.cssPrefix + \"labels-\" + section);\n\n var labelGroup = pie.__labels[section] = outerLabel.selectAll(\".\" + pie.cssPrefix + \"labelGroup-\" + section)\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"labelGroup\" + i + \"-\" + section; })\n .attr(\"data-index\", function(d, i) { return i; })\n .attr(\"class\", pie.cssPrefix + \"labelGroup-\" + section)\n .style(\"opacity\", 0);\n\n var formatterContext = { section: section, sectionDisplayType: sectionDisplayType };\n\n // 1. Add the main label\n if (include.mainLabel) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentMainLabel\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentMainLabel-\" + section)\n .text(function(d, i) {\n var str = d.label;\n\n // if a custom formatter has been defined, pass it the raw label string - it can do whatever it wants with it.\n // we only apply truncation if it's not defined\n if (settings.formatter) {\n formatterContext.index = i;\n formatterContext.part = 'mainLabel';\n formatterContext.value = d.value;\n formatterContext.label = str;\n str = settings.formatter(formatterContext);\n } else if (settings.truncation.enabled && d.label.length > settings.truncation.truncateLength) {\n str = d.label.substring(0, settings.truncation.truncateLength) + \"...\";\n }\n return str;\n })\n .style(\"font-size\", settings.mainLabel.fontSize + \"px\")\n .style(\"font-family\", settings.mainLabel.font)\n .style(\"font-weight\", settings.mainLabel.fontWeight)\n .style(\"fill\", function(d, i) {\n return (settings.mainLabel.color === \"segment\") ? pie.options.colors[i] : settings.mainLabel.color;\n });\n }\n\n // 2. Add the percentage label\n if (include.percentage) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentPercentage\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentPercentage-\" + section)\n .text(function(d, i) {\n var percentage = d.percentage;\n if (settings.formatter) {\n formatterContext.index = i;\n formatterContext.part = \"percentage\";\n formatterContext.value = d.value;\n formatterContext.label = d.percentage;\n percentage = settings.formatter(formatterContext);\n } else {\n percentage += \"%\";\n }\n return percentage;\n })\n .style(\"font-size\", settings.percentage.fontSize + \"px\")\n .style(\"font-family\", settings.percentage.font)\n .style(\"font-weight\", settings.percentage.fontWeight)\n .style(\"fill\", settings.percentage.color);\n }\n\n // 3. Add the value label\n if (include.value) {\n labelGroup.append(\"text\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segmentValue\" + i + \"-\" + section; })\n .attr(\"class\", pie.cssPrefix + \"segmentValue-\" + section)\n .text(function(d, i) {\n formatterContext.index = i;\n formatterContext.part = \"value\";\n formatterContext.value = d.value;\n formatterContext.label = d.value;\n return settings.formatter ? settings.formatter(formatterContext, d.value) : d.value;\n })\n .style(\"font-size\", settings.value.fontSize + \"px\")\n .style(\"font-family\", settings.value.font)\n .style(\"font-weight\", settings.value.fontWeight)\n .style(\"fill\", settings.value.color);\n }\n },\n\n /**\n * @param section \"inner\" / \"outer\"\n */\n positionLabelElements: function(pie, section, sectionDisplayType) {\n labels[\"dimensions-\" + section] = [];\n\n // get the latest widths, heights\n var labelGroups = pie.__labels[section];\n labelGroups.each(function(d, i) {\n var mainLabel = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentMainLabel-\" + section);\n var percentage = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section);\n var value = d3.select(this).selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section);\n\n labels[\"dimensions-\" + section].push({\n mainLabel: (mainLabel.node() !== null) ? mainLabel.node().getBBox() : null,\n percentage: (percentage.node() !== null) ? percentage.node().getBBox() : null,\n value: (value.node() !== null) ? value.node().getBBox() : null\n });\n });\n\n var singleLinePad = 5;\n var dims = labels[\"dimensions-\" + section];\n switch (sectionDisplayType) {\n case \"label-value1\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section)\n .attr(\"dx\", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });\n break;\n case \"label-value2\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentValue-\" + section)\n .attr(\"dy\", function(d, i) { return dims[i].mainLabel.height; });\n break;\n case \"label-percentage1\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section)\n .attr(\"dx\", function(d, i) { return dims[i].mainLabel.width + singleLinePad; });\n break;\n case \"label-percentage2\":\n pie.svg.selectAll(\".\" + pie.cssPrefix + \"segmentPercentage-\" + section)\n .attr(\"dx\", function(d, i) { return (dims[i].mainLabel.width / 2) - (dims[i].percentage.width / 2); })\n .attr(\"dy\", function(d, i) { return dims[i].mainLabel.height; });\n break;\n }\n },\n\n computeLabelLinePositions: function(pie) {\n pie.lineCoordGroups = [];\n pie.__labels.outer\n .each(function(d, i) { return labels.computeLinePosition(pie, i); });\n },\n\n computeLinePosition: function(pie, i) {\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n var originCoords = math.rotate(pie.pieCenter.x, pie.pieCenter.y - pie.outerRadius, pie.pieCenter.x, pie.pieCenter.y, angle);\n var heightOffset = pie.outerLabelGroupData[i].h / 5; // TODO check\n var labelXMargin = 6; // the x-distance of the label from the end of the line [TODO configurable]\n\n var quarter = Math.floor(angle / 90);\n var midPoint = 4;\n var x2, y2, x3, y3;\n\n // this resolves an issue when the\n if (quarter === 2 && angle === 180) {\n quarter = 1;\n }\n\n switch (quarter) {\n case 0:\n x2 = pie.outerLabelGroupData[i].x - labelXMargin - ((pie.outerLabelGroupData[i].x - labelXMargin - originCoords.x) / 2);\n y2 = pie.outerLabelGroupData[i].y + ((originCoords.y - pie.outerLabelGroupData[i].y) / midPoint);\n x3 = pie.outerLabelGroupData[i].x - labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 1:\n x2 = originCoords.x + (pie.outerLabelGroupData[i].x - originCoords.x) / midPoint;\n y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x - labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 2:\n var startOfLabelX = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n x2 = originCoords.x - (originCoords.x - startOfLabelX) / midPoint;\n y2 = originCoords.y + (pie.outerLabelGroupData[i].y - originCoords.y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n case 3:\n var startOfLabel = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n x2 = startOfLabel + ((originCoords.x - startOfLabel) / midPoint);\n y2 = pie.outerLabelGroupData[i].y + (originCoords.y - pie.outerLabelGroupData[i].y) / midPoint;\n x3 = pie.outerLabelGroupData[i].x + pie.outerLabelGroupData[i].w + labelXMargin;\n y3 = pie.outerLabelGroupData[i].y - heightOffset;\n break;\n }\n\n /*\n * x1 / y1: the x/y coords of the start of the line, at the mid point of the segments arc on the pie circumference\n * x2 / y2: if \"curved\" line style is being used, this is the midpoint of the line. Other\n * x3 / y3: the end of the line; closest point to the label\n */\n if (pie.options.labels.lines.style === \"straight\") {\n pie.lineCoordGroups[i] = [\n { x: originCoords.x, y: originCoords.y },\n { x: x3, y: y3 }\n ];\n } else {\n pie.lineCoordGroups[i] = [\n { x: originCoords.x, y: originCoords.y },\n { x: x2, y: y2 },\n { x: x3, y: y3 }\n ];\n }\n },\n\n addLabelLines: function(pie) {\n var lineGroups = pie.svg.insert(\"g\", \".\" + pie.cssPrefix + \"pieChart\") // meaning, BEFORE .pieChart\n .attr(\"class\", pie.cssPrefix + \"lineGroups\")\n .style(\"opacity\", 1);\n\n var lineGroup = lineGroups.selectAll(\".\" + pie.cssPrefix + \"lineGroup\")\n .data(pie.lineCoordGroups)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"lineGroup\");\n\n var lineFunction = d3.line()\n .curve(d3.curveBasis)\n .x(function(d) { return d.x; })\n .y(function(d) { return d.y; });\n\n lineGroup.append(\"path\")\n .attr(\"d\", lineFunction)\n .attr(\"stroke\", function(d, i) {\n return (pie.options.labels.lines.color === \"segment\") ? pie.options.colors[i] : pie.options.labels.lines.color;\n })\n .attr(\"stroke-width\", 1)\n .attr(\"fill\", \"none\")\n .style(\"opacity\", function(d, i) {\n var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;\n var isHidden = (percentage !== null && d.percentage < percentage) || pie.options.data.content[i].label === \"\";\n return isHidden ? 0 : 1;\n });\n },\n\n positionLabelGroups: function(pie, section) {\n if (pie.options.labels[section].format === \"none\")\n return;\n\n pie.__labels[section]\n .style(\"opacity\", function(d, i) {\n var percentage = pie.options.labels[section].hideWhenLessThanPercentage;\n return (percentage !== null && d.percentage < percentage) ? 0 : 1;\n })\n .attr(\"transform\", function(d, i) {\n var x, y;\n if (section === \"outer\") {\n x = pie.outerLabelGroupData[i].x;\n y = pie.outerLabelGroupData[i].y;\n } else {\n var pieCenterCopy = extend(true, {}, pie.pieCenter);\n\n // now recompute the \"center\" based on the current _innerRadius\n if (pie.innerRadius > 0) {\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n var newCoords = math.translate(pie.pieCenter.x, pie.pieCenter.y, pie.innerRadius, angle);\n pieCenterCopy.x = newCoords.x;\n pieCenterCopy.y = newCoords.y;\n }\n\n var dims = helpers.getDimensions(pie.cssPrefix + \"labelGroup\" + i + \"-inner\");\n var xOffset = dims.w / 2;\n var yOffset = dims.h / 4; // confusing! Why 4? should be 2, but it doesn't look right\n\n x = pieCenterCopy.x + (pie.lineCoordGroups[i][0].x - pieCenterCopy.x) / 1.8;\n y = pieCenterCopy.y + (pie.lineCoordGroups[i][0].y - pieCenterCopy.y) / 1.8;\n\n x = x - xOffset;\n y = y + yOffset;\n }\n\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n\n getIncludes: function(val) {\n var addMainLabel = false;\n var addValue = false;\n var addPercentage = false;\n\n switch (val) {\n case \"label\":\n addMainLabel = true;\n break;\n case \"value\":\n addValue = true;\n break;\n case \"percentage\":\n addPercentage = true;\n break;\n case \"label-value1\":\n case \"label-value2\":\n addMainLabel = true;\n addValue = true;\n break;\n case \"label-percentage1\":\n case \"label-percentage2\":\n addMainLabel = true;\n addPercentage = true;\n break;\n }\n return {\n mainLabel: addMainLabel,\n value: addValue,\n percentage: addPercentage\n };\n },\n\n\n /**\n * This does the heavy-lifting to compute the actual coordinates for the outer label groups. It does two things:\n * 1. Make a first pass and position them in the ideal positions, based on the pie sizes\n * 2. Do some basic collision avoidance.\n */\n computeOuterLabelCoords: function(pie) {\n\n // 1. figure out the ideal positions for the outer labels\n pie.__labels.outer\n .each(function(d, i) {\n return labels.getIdealOuterLabelPositions(pie, i);\n });\n\n // 2. now adjust those positions to try to accommodate conflicts\n labels.resolveOuterLabelCollisions(pie);\n },\n\n /**\n * This attempts to resolve label positioning collisions.\n */\n resolveOuterLabelCollisions: function(pie) {\n if (pie.options.labels.outer.format === \"none\") {\n return;\n }\n\n var size = pie.options.data.content.length;\n labels.checkConflict(pie, 0, \"clockwise\", size);\n labels.checkConflict(pie, size-1, \"anticlockwise\", size);\n },\n\n checkConflict: function(pie, currIndex, direction, size) {\n var i, curr;\n\n if (size <= 1) {\n return;\n }\n\n var currIndexHemisphere = pie.outerLabelGroupData[currIndex].hs;\n if (direction === \"clockwise\" && currIndexHemisphere !== \"right\") {\n return;\n }\n if (direction === \"anticlockwise\" && currIndexHemisphere !== \"left\") {\n return;\n }\n var nextIndex = (direction === \"clockwise\") ? currIndex+1 : currIndex-1;\n\n // this is the current label group being looked at. We KNOW it's positioned properly (the first item\n // is always correct)\n var currLabelGroup = pie.outerLabelGroupData[currIndex];\n\n // this one we don't know about. That's the one we're going to look at and move if necessary\n var examinedLabelGroup = pie.outerLabelGroupData[nextIndex];\n\n var info = {\n labelHeights: pie.outerLabelGroupData[0].h,\n center: pie.pieCenter,\n lineLength: (pie.outerRadius + pie.options.labels.outer.pieDistance),\n heightChange: pie.outerLabelGroupData[0].h + 1 // 1 = padding\n };\n\n // loop through *ALL* label groups examined so far to check for conflicts. This is because when they're\n // very tightly fitted, a later label group may still appear high up on the page\n if (direction === \"clockwise\") {\n i = 0;\n for (; i<=currIndex; i++) {\n curr = pie.outerLabelGroupData[i];\n\n // if there's a conflict with this label group, shift the label to be AFTER the last known\n // one that's been properly placed\n if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {\n labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);\n break;\n }\n }\n } else {\n i = size - 1;\n for (; i >= currIndex; i--) {\n curr = pie.outerLabelGroupData[i];\n\n // if there's a conflict with this label group, shift the label to be AFTER the last known\n // one that's been properly placed\n if (!labels.isLabelHidden(pie, i) && helpers.rectIntersect(curr, examinedLabelGroup)) {\n labels.adjustLabelPos(pie, nextIndex, currLabelGroup, info);\n break;\n }\n }\n }\n labels.checkConflict(pie, nextIndex, direction, size);\n },\n\n isLabelHidden: function(pie, index) {\n var percentage = pie.options.labels.outer.hideWhenLessThanPercentage;\n return (percentage !== null && d.percentage < percentage) || pie.options.data.content[index].label === \"\";\n },\n\n // does a little math to shift a label into a new position based on the last properly placed one\n adjustLabelPos: function(pie, nextIndex, lastCorrectlyPositionedLabel, info) {\n var xDiff, yDiff, newXPos, newYPos;\n newYPos = lastCorrectlyPositionedLabel.y + info.heightChange;\n yDiff = info.center.y - newYPos;\n\n if (Math.abs(info.lineLength) > Math.abs(yDiff)) {\n xDiff = Math.sqrt((info.lineLength * info.lineLength) - (yDiff * yDiff));\n } else {\n xDiff = Math.sqrt((yDiff * yDiff) - (info.lineLength * info.lineLength));\n }\n\n if (lastCorrectlyPositionedLabel.hs === \"right\") {\n newXPos = info.center.x + xDiff;\n } else {\n newXPos = info.center.x - xDiff - pie.outerLabelGroupData[nextIndex].w;\n }\n\n pie.outerLabelGroupData[nextIndex].x = newXPos;\n pie.outerLabelGroupData[nextIndex].y = newYPos;\n },\n\n /**\n * @param i 0-N where N is the dataset size - 1.\n */\n getIdealOuterLabelPositions: function(pie, i) {\n var labelGroupNode = pie.svg.select(\"#\" + pie.cssPrefix + \"labelGroup\" + i + \"-outer\").node();\n if (!labelGroupNode) return;\n\n var labelGroupDims = labelGroupNode.getBBox();\n var angle = segments.getSegmentAngle(i, pie.options.data.content, pie.totalSize, { midpoint: true });\n\n var originalX = pie.pieCenter.x;\n var originalY = pie.pieCenter.y - (pie.outerRadius + pie.options.labels.outer.pieDistance);\n var newCoords = math.rotate(originalX, originalY, pie.pieCenter.x, pie.pieCenter.y, angle);\n\n // if the label is on the left half of the pie, adjust the values\n var hemisphere = \"right\"; // hemisphere\n if (angle > 180) {\n newCoords.x -= (labelGroupDims.width + 8);\n hemisphere = \"left\";\n } else {\n newCoords.x += 8;\n }\n\n pie.outerLabelGroupData[i] = {\n x: newCoords.x,\n y: newCoords.y,\n w: labelGroupDims.width,\n h: labelGroupDims.height,\n hs: hemisphere\n };\n }\n};\n\n //// --------- segments.js -----------\nvar segments = {\n\n effectMap: {\n \"none\": d3.easeLinear,\n \"bounce\": d3.easeBounce,\n \"linear\": d3.easeLinear,\n \"sin\": d3.easeSin,\n \"elastic\": d3.easeElastic,\n \"back\": d3.easeBack,\n \"quad\": d3.easeQuad,\n \"circle\": d3.easeCircle,\n \"exp\": d3.easeExp\n },\n\n /**\n * Creates the pie chart segments and displays them according to the desired load effect.\n * @private\n */\n create: function(pie) {\n var pieCenter = pie.pieCenter;\n var colors = pie.options.colors;\n var loadEffects = pie.options.effects.load;\n var segmentStroke = pie.options.misc.colors.segmentStroke;\n\n // we insert the pie chart BEFORE the title, to ensure the title overlaps the pie\n var pieChartElement = pie.svg.insert(\"g\", \"#\" + pie.cssPrefix + \"title\")\n .attr(\"transform\", function() { return math.getPieTranslateCenter(pieCenter); })\n .attr(\"class\", pie.cssPrefix + \"pieChart\");\n\n var arc = d3.arc()\n .innerRadius(pie.innerRadius)\n .outerRadius(pie.outerRadius)\n .startAngle(0)\n .endAngle(function(d) {\n return (d.value / pie.totalSize) * 2 * Math.PI;\n });\n\n var g = pieChartElement.selectAll(\".\" + pie.cssPrefix + \"arc\")\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"arc\");\n\n // if we're not fading in the pie, just set the load speed to 0\n //var loadSpeed = loadEffects.speed;\n //if (loadEffects.effect === \"none\") {\n //\tloadSpeed = 0;\n //}\n\n g.append(\"path\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"segment\" + i; })\n .attr(\"fill\", function(d, i) {\n var color = colors[i];\n if (pie.options.misc.gradient.enabled) {\n color = \"url(#\" + pie.cssPrefix + \"grad\" + i + \")\";\n }\n return color;\n })\n .style(\"stroke\", segmentStroke)\n .style(\"stroke-width\", 1)\n //.transition()\n //.ease(d3.easeCubicInOut)\n //.duration(loadSpeed)\n .attr(\"data-index\", function(d, i) { return i; })\n .attr(\"d\", arc);\n/*\n .attrTween(\"d\", function(b) {\n var i = d3.interpolate({ value: 0 }, b);\n return function(t) {\n var ret = pie.arc(i(t));\n console.log(ret);\n return ret;\n };\n });\n*/\n pie.svg.selectAll(\"g.\" + pie.cssPrefix + \"arc\")\n .attr(\"transform\",\n function(d, i) {\n var angle = 0;\n if (i > 0) {\n angle = segments.getSegmentAngle(i-1, pie.options.data.content, pie.totalSize);\n }\n return \"rotate(\" + angle + \")\";\n }\n );\n pie.arc = arc;\n },\n\n addGradients: function(pie) {\n var grads = pie.svg.append(\"defs\")\n .selectAll(\"radialGradient\")\n .data(pie.options.data.content)\n .enter().append(\"radialGradient\")\n .attr(\"gradientUnits\", \"userSpaceOnUse\")\n .attr(\"cx\", 0)\n .attr(\"cy\", 0)\n .attr(\"r\", \"120%\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"grad\" + i; });\n\n grads.append(\"stop\").attr(\"offset\", \"0%\").style(\"stop-color\", function(d, i) { return pie.options.colors[i]; });\n grads.append(\"stop\").attr(\"offset\", pie.options.misc.gradient.percentage + \"%\").style(\"stop-color\", pie.options.misc.gradient.color);\n },\n\n addSegmentEventHandlers: function(pie) {\n var arc = pie.svg.selectAll(\".\" + pie.cssPrefix + \"arc\");\n arc = arc.merge(pie.__labels.inner.merge(pie.__labels.outer));\n\n arc.on(\"click\", function() {\n var currentEl = d3.select(this);\n var segment;\n\n // mouseover works on both the segments AND the segment labels, hence the following\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n var index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onClickSegment, segment, isExpanded);\n if (pie.options.effects.pullOutSegmentOnClick.effect !== \"none\") {\n if (isExpanded) {\n segments.closeSegment(pie, segment.node());\n } else {\n segments.openSegment(pie, segment.node());\n }\n }\n });\n\n arc.on(\"mouseover\", function() {\n var currentEl = d3.select(this);\n var segment, index;\n\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n if (pie.options.effects.highlightSegmentOnMouseover) {\n index = segment.attr(\"data-index\");\n var segColor = pie.options.colors[index];\n segment.style(\"fill\", helpers.getColorShade(segColor, pie.options.effects.highlightLuminosity));\n }\n\n if (pie.options.tooltips.enabled) {\n index = segment.attr(\"data-index\");\n tt.showTooltip(pie, index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoverSegment, segment, isExpanded);\n });\n\n arc.on(\"mousemove\", function() {\n tt.moveTooltip(pie);\n });\n\n arc.on(\"mouseout\", function() {\n var currentEl = d3.select(this);\n var segment, index;\n\n if (currentEl.attr(\"class\") === pie.cssPrefix + \"arc\") {\n segment = currentEl.select(\"path\");\n } else {\n index = currentEl.attr(\"data-index\");\n segment = d3.select(\"#\" + pie.cssPrefix + \"segment\" + index);\n }\n\n if (pie.options.effects.highlightSegmentOnMouseover) {\n index = segment.attr(\"data-index\");\n var color = pie.options.colors[index];\n if (pie.options.misc.gradient.enabled) {\n color = \"url(#\" + pie.cssPrefix + \"grad\" + index + \")\";\n }\n segment.style(\"fill\", color);\n }\n\n if (pie.options.tooltips.enabled) {\n index = segment.attr(\"data-index\");\n tt.hideTooltip(pie, index);\n }\n\n var isExpanded = segment.attr(\"class\") === pie.cssPrefix + \"expanded\";\n segments.onSegmentEvent(pie, pie.options.callbacks.onMouseoutSegment, segment, isExpanded);\n });\n },\n\n // helper function used to call the click, mouseover, mouseout segment callback functions\n onSegmentEvent: function(pie, func, segment, isExpanded) {\n if (!helpers.isFunction(func)) {\n return;\n }\n var index = parseInt(segment.attr(\"data-index\"), 10);\n func({\n segment: segment.node(),\n index: index,\n expanded: isExpanded,\n data: pie.options.data.content[index]\n });\n },\n\n openSegment: function(pie, segment) {\n if (pie.isOpeningSegment) {\n return;\n }\n pie.isOpeningSegment = true;\n\n segments.maybeCloseOpenSegment(pie);\n\n d3.select(segment)\n .transition()\n .ease(segments.effectMap[pie.options.effects.pullOutSegmentOnClick.effect])\n .duration(pie.options.effects.pullOutSegmentOnClick.speed)\n .attr(\"transform\", function(d, i) {\n var c = pie.arc.centroid(d),\n x = c[0],\n y = c[1],\n h = Math.sqrt(x*x + y*y),\n pullOutSize = parseInt(pie.options.effects.pullOutSegmentOnClick.size, 10);\n\n return \"translate(\" + ((x/h) * pullOutSize) + ',' + ((y/h) * pullOutSize) + \")\";\n })\n .on(\"end\", function(d, i) {\n pie.currentlyOpenSegment = segment;\n pie.isOpeningSegment = false;\n d3.select(segment).attr(\"class\", pie.cssPrefix + \"expanded\");\n });\n },\n\n maybeCloseOpenSegment: function(pie) {\n if (typeof pie !== 'undefined' && pie.svg.selectAll(\".\" + pie.cssPrefix + \"expanded\").size() > 0) {\n segments.closeSegment(pie, pie.svg.select(\".\" + pie.cssPrefix + \"expanded\").node());\n }\n },\n\n closeSegment: function(pie, segment) {\n d3.select(segment)\n .transition()\n .duration(400)\n .attr(\"transform\", \"translate(0,0)\")\n .on(\"end\", function(d, i) {\n d3.select(segment).attr(\"class\", \"\");\n pie.currentlyOpenSegment = null;\n });\n },\n\n getCentroid: function(el) {\n var bbox = el.getBBox();\n return {\n x: bbox.x + bbox.width / 2,\n y: bbox.y + bbox.height / 2\n };\n },\n\n /**\n * General helper function to return a segment's angle, in various different ways.\n * @param index\n * @param opts optional object for fine-tuning exactly what you want.\n */\n getSegmentAngle: function(index, data, totalSize, opts) {\n var options = extend({\n // if true, this returns the full angle from the origin. Otherwise it returns the single segment angle\n compounded: true,\n\n // optionally returns the midpoint of the angle instead of the full angle\n midpoint: false\n }, opts);\n\n var currValue = data[index].value;\n var fullValue;\n if (options.compounded) {\n fullValue = 0;\n\n // get all values up to and including the specified index\n for (var i=0; i<=index; i++) {\n fullValue += data[i].value;\n }\n }\n\n if (typeof fullValue === 'undefined') {\n fullValue = currValue;\n }\n\n // now convert the full value to an angle\n var angle = (fullValue / totalSize) * 360;\n\n // lastly, if we want the midpoint, factor that sucker in\n if (options.midpoint) {\n var currAngle = (currValue / totalSize) * 360;\n angle -= (currAngle / 2);\n }\n\n return angle;\n }\n\n};\n\n //// --------- text.js -----------\nvar text = {\n offscreenCoord: -10000,\n\n addTitle: function(pie) {\n pie.__title = pie.svg.selectAll(\".\" + pie.cssPrefix + \"title\")\n .data([pie.options.header.title])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"id\", pie.cssPrefix + \"title\")\n .attr(\"class\", pie.cssPrefix + \"title\")\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"text-anchor\", function() {\n var location;\n if (pie.options.header.location === \"top-center\" || pie.options.header.location === \"pie-center\") {\n location = \"middle\";\n } else {\n location = \"left\";\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionTitle: function(pie) {\n var textComponents = pie.textComponents;\n var headerLocation = pie.options.header.location;\n var canvasPadding = pie.options.misc.canvasPadding;\n var canvasWidth = pie.options.size.canvasWidth;\n var titleSubtitlePadding = pie.options.header.titleSubtitlePadding;\n\n var x;\n if (headerLocation === \"top-left\") {\n x = canvasPadding.left;\n } else {\n x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;\n }\n\n // add whatever offset has been added by user\n x += pie.options.misc.pieCenterOffset.x;\n\n var y = canvasPadding.top + textComponents.title.h;\n\n if (headerLocation === \"pie-center\") {\n y = pie.pieCenter.y;\n\n // still not fully correct\n if (textComponents.subtitle.exists) {\n var totalTitleHeight = textComponents.title.h + titleSubtitlePadding + textComponents.subtitle.h;\n y = y - (totalTitleHeight / 2) + textComponents.title.h;\n } else {\n y += (textComponents.title.h / 4);\n }\n }\n\n pie.__title\n .attr(\"x\", x)\n .attr(\"y\", y);\n },\n\n addSubtitle: function(pie) {\n var headerLocation = pie.options.header.location;\n\n pie.__subtitle = pie.svg.selectAll(\".\" + pie.cssPrefix + \"subtitle\")\n .data([pie.options.header.subtitle])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"id\", pie.cssPrefix + \"subtitle\")\n .attr(\"class\", pie.cssPrefix + \"subtitle\")\n .attr(\"text-anchor\", function() {\n var location;\n if (headerLocation === \"top-center\" || headerLocation === \"pie-center\") {\n location = \"middle\";\n } else {\n location = \"left\";\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionSubtitle: function(pie) {\n var canvasPadding = pie.options.misc.canvasPadding;\n var canvasWidth = pie.options.size.canvasWidth;\n\n var x;\n if (pie.options.header.location === \"top-left\") {\n x = canvasPadding.left;\n } else {\n x = ((canvasWidth - canvasPadding.right) / 2) + canvasPadding.left;\n }\n\n // add whatever offset has been added by user\n x += pie.options.misc.pieCenterOffset.x;\n\n var y = text.getHeaderHeight(pie);\n\n pie.__subtitle\n .attr(\"x\", x)\n .attr(\"y\", y);\n },\n\n addFooter: function(pie) {\n pie.__footer = pie.svg.selectAll(\".\" + pie.cssPrefix + \"footer\")\n .data([pie.options.footer])\n .enter()\n .append(\"text\")\n .text(function(d) { return d.text; })\n .attr(\"x\", text.offscreenCoord)\n .attr(\"y\", text.offscreenCoord)\n .attr(\"id\", pie.cssPrefix + \"footer\")\n .attr(\"class\", pie.cssPrefix + \"footer\")\n .attr(\"text-anchor\", function() {\n var location = \"left\";\n if (pie.options.footer.location === \"bottom-center\") {\n location = \"middle\";\n } else if (pie.options.footer.location === \"bottom-right\") {\n location = \"left\"; // on purpose. We have to change the x-coord to make it properly right-aligned\n }\n return location;\n })\n .attr(\"fill\", function(d) { return d.color; })\n .style(\"font-size\", function(d) { return d.fontSize + \"px\"; })\n .style(\"font-weight\", function(d) { return d.fontWeight; })\n .style(\"font-family\", function(d) { return d.font; });\n },\n\n positionFooter: function(pie) {\n var footerLocation = pie.options.footer.location;\n var footerWidth = pie.textComponents.footer.w;\n var canvasWidth = pie.options.size.canvasWidth;\n var canvasHeight = pie.options.size.canvasHeight;\n var canvasPadding = pie.options.misc.canvasPadding;\n\n var x;\n if (footerLocation === \"bottom-left\") {\n x = canvasPadding.left;\n } else if (footerLocation === \"bottom-right\") {\n x = canvasWidth - footerWidth - canvasPadding.right;\n } else {\n x = canvasWidth / 2; // TODO - shouldn't this also take into account padding?\n }\n\n pie.__footer\n .attr(\"x\", x)\n .attr(\"y\", canvasHeight - canvasPadding.bottom);\n },\n\n getHeaderHeight: function(pie) {\n var h;\n if (pie.textComponents.title.exists) {\n\n // if the subtitle isn't defined, it'll be set to 0\n var totalTitleHeight = pie.textComponents.title.h + pie.options.header.titleSubtitlePadding + pie.textComponents.subtitle.h;\n if (pie.options.header.location === \"pie-center\") {\n h = pie.pieCenter.y - (totalTitleHeight / 2) + totalTitleHeight;\n } else {\n h = totalTitleHeight + pie.options.misc.canvasPadding.top;\n }\n } else {\n if (pie.options.header.location === \"pie-center\") {\n var footerPlusPadding = pie.options.misc.canvasPadding.bottom + pie.textComponents.footer.h;\n h = ((pie.options.size.canvasHeight - footerPlusPadding) / 2) + pie.options.misc.canvasPadding.top + (pie.textComponents.subtitle.h / 2);\n } else {\n h = pie.options.misc.canvasPadding.top + pie.textComponents.subtitle.h;\n }\n }\n return h;\n }\n};\n\n //// --------- validate.js -----------\nvar tt = {\n addTooltips: function(pie) {\n\n // group the label groups (label, percentage, value) into a single element for simpler positioning\n var tooltips = pie.svg.insert(\"g\")\n .attr(\"class\", pie.cssPrefix + \"tooltips\");\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip\")\n .data(pie.options.data.content)\n .enter()\n .append(\"g\")\n .attr(\"class\", pie.cssPrefix + \"tooltip\")\n .attr(\"id\", function(d, i) { return pie.cssPrefix + \"tooltip\" + i; })\n .style(\"opacity\", 0)\n .append(\"rect\")\n .attr(\"rx\", pie.options.tooltips.styles.borderRadius)\n .attr(\"ry\", pie.options.tooltips.styles.borderRadius)\n .attr(\"x\", -pie.options.tooltips.styles.padding)\n .attr(\"opacity\", pie.options.tooltips.styles.backgroundOpacity)\n .style(\"fill\", pie.options.tooltips.styles.backgroundColor);\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip\")\n .data(pie.options.data.content)\n .append(\"text\")\n .attr(\"fill\", function(d) { return pie.options.tooltips.styles.color; })\n .style(\"font-size\", function(d) { return pie.options.tooltips.styles.fontSize; })\n .style(\"font-weight\", function(d) { return pie.options.tooltips.styles.fontWeight; })\n .style(\"font-family\", function(d) { return pie.options.tooltips.styles.font; })\n .text(function(d, i) {\n var caption = pie.options.tooltips.string;\n if (pie.options.tooltips.type === \"caption\") {\n caption = d.caption;\n }\n return tt.replacePlaceholders(pie, caption, i, {\n label: d.label,\n value: d.value,\n percentage: d.percentage\n });\n });\n\n tooltips.selectAll(\".\" + pie.cssPrefix + \"tooltip rect\")\n .attr(\"width\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return dims.w + (2 * pie.options.tooltips.styles.padding);\n })\n .attr(\"height\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return dims.h + (2 * pie.options.tooltips.styles.padding);\n })\n .attr(\"y\", function (d, i) {\n var dims = helpers.getDimensions(pie.cssPrefix + \"tooltip\" + i);\n return -(dims.h / 2) + 1;\n });\n },\n\n showTooltip: function(pie, index) {\n var fadeInSpeed = pie.options.tooltips.styles.fadeInSpeed;\n if (tt.currentTooltip === index) {\n fadeInSpeed = 1;\n }\n\n tt.currentTooltip = index;\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + index)\n .transition()\n .duration(fadeInSpeed)\n .style(\"opacity\", function() { return 1; });\n\n tt.moveTooltip(pie);\n },\n\n moveTooltip: function(pie) {\n d3.selectAll(\"#\" + pie.cssPrefix + \"tooltip\" + tt.currentTooltip)\n .attr(\"transform\", function(d) {\n var mouseCoords = d3.mouse(this.parentNode);\n var x = mouseCoords[0] + pie.options.tooltips.styles.padding + 2;\n var y = mouseCoords[1] - (2 * pie.options.tooltips.styles.padding) - 2;\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n hideTooltip: function(pie, index) {\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + index)\n .style(\"opacity\", function() { return 0; });\n\n // move the tooltip offscreen. This ensures that when the user next mouseovers the segment the hidden\n // element won't interfere\n d3.select(\"#\" + pie.cssPrefix + \"tooltip\" + tt.currentTooltip)\n .attr(\"transform\", function(d, i) {\n // klutzy, but it accounts for tooltip padding which could push it onscreen\n var x = pie.options.size.canvasWidth + 1000;\n var y = pie.options.size.canvasHeight + 1000;\n return \"translate(\" + x + \",\" + y + \")\";\n });\n },\n\n replacePlaceholders: function(pie, str, index, replacements) {\n\n // if the user has defined a placeholderParser function, call it before doing the replacements\n if (helpers.isFunction(pie.options.tooltips.placeholderParser)) {\n pie.options.tooltips.placeholderParser(index, replacements);\n }\n\n var replacer = function() {\n return function(match) {\n var placeholder = arguments[1];\n if (replacements.hasOwnProperty(placeholder)) {\n return replacements[arguments[1]];\n } else {\n return arguments[0];\n }\n };\n };\n return str.replace(/\\{(\\w+)\\}/g, replacer(replacements));\n }\n};\n\n\n // --------------------------------------------------------------------------------------------\n\n // our constructor\n var d3pie = function(element, options) {\n\n // element can be an ID or DOM element\n this.element = element;\n if (typeof element === \"string\") {\n var el = element.replace(/^#/, \"\"); // replace any jQuery-like ID hash char\n this.element = document.getElementById(el);\n }\n\n var opts = {};\n extend(true, opts, defaultSettings, options);\n this.options = opts;\n\n // if the user specified a custom CSS element prefix (ID, class), use it\n if (this.options.misc.cssPrefix !== null) {\n this.cssPrefix = this.options.misc.cssPrefix;\n } else {\n this.cssPrefix = \"p\" + _uniqueIDCounter + \"_\";\n _uniqueIDCounter++;\n }\n\n\n // now run some validation on the user-defined info\n if (!validate.initialCheck(this)) {\n return;\n }\n\n // add a data-role to the DOM node to let anyone know that it contains a d3pie instance, and the d3pie version\n d3.select(this.element).attr(_scriptName, _version);\n\n // things that are done once\n _setupData.call(this);\n _init.call(this);\n };\n\n d3pie.prototype.recreate = function() {\n // now run some validation on the user-defined info\n if (!validate.initialCheck(this)) {\n return;\n }\n\n _setupData.call(this);\n _init.call(this);\n };\n\n d3pie.prototype.redraw = function() {\n this.element.innerHTML = \"\";\n _init.call(this);\n };\n\n d3pie.prototype.destroy = function() {\n this.element.innerHTML = \"\"; // clear out the SVG\n d3.select(this.element).attr(_scriptName, null); // remove the data attr\n };\n\n /**\n * Returns all pertinent info about the current open info. Returns null if nothing's open, or if one is, an object of\n * the following form:\n * \t{\n * \t element: DOM NODE,\n * \t index: N,\n * \t data: {}\n * \t}\n */\n d3pie.prototype.getOpenSegment = function() {\n var segment = this.currentlyOpenSegment;\n if (segment !== null && typeof segment !== \"undefined\") {\n var index = parseInt(d3.select(segment).attr(\"data-index\"), 10);\n return {\n element: segment,\n index: index,\n data: this.options.data.content[index]\n };\n } else {\n return null;\n }\n };\n\n d3pie.prototype.openSegment = function(index) {\n index = parseInt(index, 10);\n if (index < 0 || index > this.options.data.content.length-1) {\n return;\n }\n segments.openSegment(this, d3.select(\"#\" + this.cssPrefix + \"segment\" + index).node());\n };\n\n d3pie.prototype.closeSegment = function() {\n segments.maybeCloseOpenSegment(this);\n };\n\n // this let's the user dynamically update aspects of the pie chart without causing a complete redraw. It\n // intelligently re-renders only the part of the pie that the user specifies. Some things cause a repaint, others\n // just redraw the single element\n d3pie.prototype.updateProp = function(propKey, value) {\n switch (propKey) {\n case \"header.title.text\":\n var oldVal = helpers.processObj(this.options, propKey);\n helpers.processObj(this.options, propKey, value);\n d3.select(\"#\" + this.cssPrefix + \"title\").html(value);\n if ((oldVal === \"\" && value !== \"\") || (oldVal !== \"\" && value === \"\")) {\n this.redraw();\n }\n break;\n\n case \"header.subtitle.text\":\n var oldValue = helpers.processObj(this.options, propKey);\n helpers.processObj(this.options, propKey, value);\n d3.select(\"#\" + this.cssPrefix + \"subtitle\").html(value);\n if ((oldValue === \"\" && value !== \"\") || (oldValue !== \"\" && value === \"\")) {\n this.redraw();\n }\n break;\n\n case \"callbacks.onload\":\n case \"callbacks.onMouseoverSegment\":\n case \"callbacks.onMouseoutSegment\":\n case \"callbacks.onClickSegment\":\n case \"effects.pullOutSegmentOnClick.effect\":\n case \"effects.pullOutSegmentOnClick.speed\":\n case \"effects.pullOutSegmentOnClick.size\":\n case \"effects.highlightSegmentOnMouseover\":\n case \"effects.highlightLuminosity\":\n helpers.processObj(this.options, propKey, value);\n break;\n\n // everything else, attempt to update it & do a repaint\n default:\n helpers.processObj(this.options, propKey, value);\n\n this.destroy();\n this.recreate();\n break;\n }\n };\n\n\n // ------------------------------------------------------------------------------------------------\n\n var _setupData = function () {\n this.options.data.content = math.sortPieData(this);\n if (this.options.data.smallSegmentGrouping.enabled) {\n this.options.data.content = helpers.applySmallSegmentGrouping(this.options.data.content, this.options.data.smallSegmentGrouping);\n }\n\n\n this.options.colors = helpers.initSegmentColors(this);\n this.totalSize = math.getTotalPieSize(this.options.data.content);\n\n var dp = this.options.labels.percentage.decimalPlaces;\n\n // add in percentage data to content\n for (var i=0; i {\n // logic based on old dashboard\n\n // http://stackoverflow.com/questions/984510/what-is-my-script-src-url\n // http://stackoverflow.com/questions/6941533/get-protocol-domain-and-port-from-url\n const script = cond([\n [Boolean, identity],\n // \"last\" typings don't work well with HTMLScriptElement\n // if document.currentScript is not available\n [T, () => last(document.getElementsByTagName(\"script\") as unknown as [HTMLScriptElement])],\n ])(currentScript)\n\n return script.src\n}\n\nexport const getPathFromScriptSource = (source: string) => {\n // match strings not containing slash, ending with `.js`, with optional suffix started by `?`\n const jsFilenameRegex = \"[^\\\\/]*\\\\.js(\\\\/?.*)?$\"\n const staticJsPath = \"/static/js\"\n return source.replace(new RegExp(jsFilenameRegex), \"\")\n .replace(staticJsPath, \"\")\n}\n\nconst getDefaultServer = () => {\n if (isDevelopmentEnv) {\n return \"http://localhost:19999\"\n }\n\n // Agent Dashboard does not need sophisticated server-detection, which is causing problems\n // when navigating through streamed nodes. Let's overwrite that setting\n if (isMainJs) {\n const pathname = window.location.pathname\n .replace(\"index.html\", \"\")\n // todo consider .replace(/[^\\/]*\\.html/, \"\") (every .html file in the url)\n .replace(\"default.html\", \"\") // for netdata demo servers\n return window.location.origin + pathname.replace(/\\/v1\\/?$/, \"\")\n }\n\n const source = getScriptSource()\n return getPathFromScriptSource(source).replace(/\\/v1\\/?$/, \"\")\n}\n\n// append \"/\" at the end, if it's not already there\nexport const alwaysEndWithSlash = cond([\n [pipe(last, equals(\"/\")), identity],\n [T, (x: string) => concat(x, \"/\")], // R.__ typings don't work well\n])\n\nexport const serverDefault: string = alwaysEndWithSlash(\n window.netdataServer || getDefaultServer(),\n)\n\nexport const serverStatic: string = isDevelopmentEnv\n ? \"/\" // for localhost:3000/css/...\n : alwaysEndWithSlash(getDefaultServer()) // by default, load from netdata server\n","import { prop } from \"ramda\"\nimport { createSelector } from \"reselect\"\n\nimport { AppStateT } from \"store/app-state\"\nimport { selectChartMetadataFromChartsCall } from \"domains/global/selectors\"\n\nimport { ChartState } from \"./chart-types\"\nimport { initialSingleState } from \"./reducer\"\nimport { storeKey } from \"./constants\"\n\nexport const selectChartsState = (state: AppStateT) => state[storeKey]\nexport const selectSingleChartState = createSelector(\n selectChartsState,\n (_: unknown, { id }: { chartId?: string, id: string }) => id,\n (chartsState, id) => chartsState[id] || initialSingleState,\n)\n\nexport const selectChartData = createSelector(\n selectSingleChartState,\n (chartState) => chartState.chartData,\n)\n\nconst selectChartMetadataFromExplicitCall = createSelector(\n selectSingleChartState, prop(\"chartMetadata\"),\n)\n// dashboard.js normally fetches metadata for every individual charts, but we can prevent it\n// if metadata for ALL charts will be present in state.global (from single call)\nconst selectChartMetadata = createSelector(\n selectChartMetadataFromChartsCall,\n selectChartMetadataFromExplicitCall,\n (metadataFromAll, metadataFromSingleCall) => metadataFromAll || metadataFromSingleCall,\n)\nconst selectIsFetchingDetails = createSelector(selectSingleChartState, prop(\"isFetchingDetails\"))\n\nexport const makeSelectChartMetadataRequest = () => createSelector(\n selectChartMetadata,\n selectIsFetchingDetails,\n (chartMetadata, isFetchingDetails) => ({ chartMetadata, isFetchingDetails }),\n)\n\nexport const selectChartViewRange = createSelector(\n selectSingleChartState,\n (chartState) => chartState.viewRange,\n)\n\nexport const selectChartIsFetchingData = createSelector(\n selectSingleChartState,\n (chartState) => chartState.isFetchingData,\n)\n\nexport const selectChartFetchDataParams = createSelector(\n selectSingleChartState,\n (chartState) => chartState.fetchDataParams,\n)\n\nexport const selectResizeHeight = createSelector(\n selectSingleChartState,\n (chartState) => chartState.resizeHeight,\n)\n\nexport const selectChartPanAndZoom = createSelector(selectSingleChartState, prop(\"chartPanAndZoom\"))\n\n// count the nr of \"success\" or \"failure\" charts\nconst hasCompletedFetching = (chartState: ChartState) => chartState.isFetchDataFailure\n || Boolean(chartState.chartData) || chartState.isFetchDetailsFailure\n\nexport const selectChartsAreFetching = createSelector(selectChartsState, chartsState =>\n Object.values(chartsState).some(({ isFetchingData }) => isFetchingData)\n)\n\nexport const selectAmountOfFetchedCharts = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (hasCompletedFetching(chartState) ? 1 : 0), 0),\n)\n\nexport const selectAmountOfCharts = createSelector(\n selectChartsState,\n (chartsState) => Object.keys(chartsState).length,\n)\n\nexport const selectNameOfAnyFetchingChart = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .find((chartState) => chartState.isFetchingData)?.chartId,\n)\n\nexport const selectAmountOfSnapshotsFetched = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (chartState.snapshotData ? 1 : 0), 0),\n)\n\nexport const selectAmountOfSnapshotsFailed = createSelector(\n selectChartsState,\n (chartsState) => Object.values(chartsState)\n .reduce((acc, chartState) => acc + (chartState.snapshotDataIsError ? 1 : 0), 0),\n)\n","import { useMemo } from \"react\"\nimport moment from \"moment\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectTimezoneSetting, selectUTCOffsetSetting } from \"domains/global/selectors\"\n\nconst zeropad = x => {\n if (x > -10 && x < 10) {\n return `0${x.toString()}`\n }\n return x.toString()\n}\n\nexport const isSupportingDateTimeFormat = !!(Intl && Intl.DateTimeFormat && navigator.language)\n\nconst narrowToDate = d => (typeof d === \"number\" ? new Date(d) : d)\n// these are the old netdata functions\n// we fallback to these, if the new ones fail\nexport const localeDateStringNative = d => narrowToDate(d).toLocaleDateString()\nexport const localeTimeStringNative = d => narrowToDate(d).toLocaleTimeString()\nexport const xAxisTimeStringNative = d => {\n const date = narrowToDate(d)\n return `${zeropad(date.getHours())}:${zeropad(date.getMinutes())}:${zeropad(date.getSeconds())}`\n}\n\nexport const isProperTimezone = timeZone => {\n try {\n Intl.DateTimeFormat(navigator.language, {\n localeMatcher: \"best fit\",\n formatMatcher: \"best fit\",\n weekday: \"short\",\n year: \"numeric\",\n month: \"short\",\n day: \"2-digit\",\n timeZone,\n })\n } catch (e) {\n return false\n }\n return true\n}\n\nexport const getDateWithOffset = (date, offset) => moment(date).utcOffset(offset)\n\nconst getOptions = ({ long, isTime, secs, timezone }) => ({\n hourCycle: \"h23\",\n ...(isTime\n ? {}\n : long\n ? { weekday: \"short\", year: \"numeric\", month: \"short\", day: \"2-digit\" }\n : { dateStyle: \"short\" }),\n ...(isTime && {\n timeStyle: secs ? \"medium\" : \"short\",\n }),\n timeZone: timezone,\n})\n\nconst dateFormat = (date, { locale, ...options }) =>\n new Intl.DateTimeFormat(locale ?? navigator.language, getOptions(options)).format(date)\n\nconst getTimezone = timezone => (timezone !== \"\" && timezone !== \"default\" ? timezone : undefined)\n\nexport const useDateTime = () => {\n const timezone = useSelector(selectTimezoneSetting)\n const utcOffset = useSelector(selectUTCOffsetSetting)\n\n const localeDateString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? (date, options) =>\n dateFormat(date, { long: true, timezone: getTimezone(timezone), ...options })\n : localeDateStringNative\n }, [timezone])\n\n const localeTimeString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? (date, options) =>\n dateFormat(date, {\n secs: true,\n isTime: true,\n timezone: getTimezone(timezone),\n ...options,\n })\n : localeTimeStringNative\n }, [timezone])\n\n const xAxisTimeString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? date => dateFormat(date, { secs: true, isTime: true, timezone: getTimezone(timezone) })\n : xAxisTimeStringNative\n }, [timezone])\n\n const xAxisDateString = useMemo(() => {\n return isSupportingDateTimeFormat\n ? date => dateFormat(date, { long: true, timezone: getTimezone(timezone) })\n : xAxisTimeStringNative\n }, [timezone])\n\n return {\n localeDateString,\n localeTimeString,\n xAxisDateString,\n xAxisTimeString,\n utcOffset,\n }\n}\n","import { createAction } from \"redux-act\"\nimport { ChartsMetadata } from \"domains/global/types\"\n\nimport { storeKey } from \"./constants\"\n\nexport interface startSnapshotModeAction {\n charts: ChartsMetadata\n dataPoints: number\n}\nexport const startSnapshotModeAction = createAction(\n `${storeKey}/isSnapshotModeAction`,\n)\n\nexport const stopSnapshotModeAction = createAction(`${storeKey}/stopSnapshotModeAction`)\n\nexport interface ShowSignInModalAction { signInLinkHref: string }\nexport const showSignInModalAction = createAction(\n `${storeKey}/showSignInModal`,\n)\n\nexport const explicitlySignInAction = createAction(`${storeKey}/explicitlySignIn`)\n\nexport interface IsSignedInAction { isSignedIn: boolean }\nexport const isSignedInAction = createAction(`${storeKey}/isSignedInAction`)\n\nexport interface SetOfflineAction { offline: boolean }\nexport const setOfflineAction = createAction(`${storeKey}/setOfflineAction`)\n","export const storeKey = \"chart\"\n\nexport const fallbackUpdateTimeInterval = 2000\n\n// corresponds to force_update_at in old dashboard\n// throttle time between use globalPanAndZoom change actions - and requests sent to server\nexport const panAndZoomDelay = 300\n","export const seconds4human = (\n totalSeconds: number | string, overrideOptions?: {[key: string]: string},\n) => {\n const defaultOptions: {[key: string]: string} = {\n now: \"now\",\n space: \" \",\n negative_suffix: \"ago\",\n day: \"day\",\n days: \"days\",\n hour: \"hour\",\n hours: \"hours\",\n minute: \"min\",\n minutes: \"mins\",\n second: \"sec\",\n seconds: \"secs\",\n and: \"and\",\n }\n\n const options = typeof overrideOptions === \"object\"\n ? { ...defaultOptions, ...overrideOptions }\n : defaultOptions\n\n let seconds = typeof totalSeconds === \"string\"\n ? parseInt(totalSeconds, 10)\n : totalSeconds\n\n if (seconds === 0) {\n return options.now\n }\n\n let suffix = \"\"\n if (seconds < 0) {\n seconds = -seconds\n if (options.negative_suffix !== \"\") {\n suffix = options.space + options.negative_suffix\n }\n }\n\n const days = Math.floor(seconds / 86400)\n seconds -= (days * 86400)\n\n const hours = Math.floor(seconds / 3600)\n seconds -= (hours * 3600)\n\n const minutes = Math.floor(seconds / 60)\n seconds -= (minutes * 60)\n\n const strings = []\n\n if (days > 1) {\n strings.push(days.toString() + options.space + options.days)\n } else if (days === 1) {\n strings.push(days.toString() + options.space + options.day)\n }\n\n if (hours > 1) {\n strings.push(hours.toString() + options.space + options.hours)\n } else if (hours === 1) {\n strings.push(hours.toString() + options.space + options.hour)\n }\n\n if (minutes > 1) {\n strings.push(minutes.toString() + options.space + options.minutes)\n } else if (minutes === 1) {\n strings.push(minutes.toString() + options.space + options.minute)\n }\n\n if (seconds > 1) {\n strings.push(Math.floor(seconds).toString() + options.space + options.seconds)\n } else if (seconds === 1) {\n strings.push(Math.floor(seconds).toString() + options.space + options.second)\n }\n\n if (strings.length === 1) {\n return strings.pop() + suffix\n }\n\n const last = strings.pop()\n return `${strings.join(\", \")} ${options.and} ${last}${suffix}`\n}\n","/*! @license Copyright 2017 Dan Vanderkam (danvdk@gmail.com) MIT-licensed (http://opensource.org/licenses/MIT) */\n// SPDX-License-Identifier: MIT\n!function(t){if(\"object\"==typeof exports&&\"undefined\"!=typeof module)module.exports=t();else if(\"function\"==typeof define&&define.amd)define([],t);else{var e;e=\"undefined\"!=typeof window?window:\"undefined\"!=typeof global?global:\"undefined\"!=typeof self?self:this,e.Dygraph=t()}}(function(){return function t(e,a,i){function n(o,s){if(!a[o]){if(!e[o]){var l=\"function\"==typeof require&&require;if(!s&&l)return l(o,!0);if(r)return r(o,!0);var h=new Error(\"Cannot find module '\"+o+\"'\");throw h.code=\"MODULE_NOT_FOUND\",h}var u=a[o]={exports:{}};e[o][0].call(u.exports,function(t){var a=e[o][1][t];return n(a||t)},u,u.exports,t,e,a,i)}return a[o].exports}for(var r=\"function\"==typeof require&&require,o=0;o1)for(var a=1;a=0){var d=t[l-e];null===d[1]||isNaN(d[1])||(n-=d[2][0],o-=d[1],r-=d[2][1],s-=1)}u[l]=s?[t[l][0],1*o/s,[1*n/s,1*r/s]]:[t[l][0],null,[null,null]]}return u},a.default=r,e.exports=a.default},{\"./bars\":5}],3:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./bars\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i,n,r,o,s=[],l=a.get(\"sigma\"),h=a.get(\"logscale\"),u=0;u=0&&(u-=t[r-e][2][2],d-=t[r-e][2][3]);var c=t[r][0],p=d?u/d:0;if(h)if(d){var g=p<0?0:p,f=d,v=l*Math.sqrt(g*(1-g)/f+l*l/(4*f*f)),_=1+l*l/d;i=(g+l*l/(2*d)-v)/_,n=(g+l*l/(2*d)+v)/_,s[r]=[c,100*g,[100*i,100*n]]}else s[r]=[c,0,[0,0]];else o=d?l*Math.sqrt(p*(1-p)/d):1,s[r]=[c,100*p,[100*(p-o),100*(p+o)]]}return s},a.default=r,e.exports=a.default},{\"./bars\":5}],5:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"./datahandler\"),r=i(n),o=t(\"../dygraph-layout\"),s=i(o),l=function(){r.default.call(this)};l.prototype=new r.default,l.prototype.extractSeries=function(t,e,a){},l.prototype.rollingAverage=function(t,e,a){},l.prototype.onPointsCreated_=function(t,e){for(var a=0;ai&&(l=i),hr)&&(r=h),(null===n||l=0&&(r-=t[i-e][2][0],o-=t[i-e][2][1]);var s=t[i][0],l=o?r/o:0;n[i]=[s,100*l]}return n},a.default=s,e.exports=a.default},{\"./datahandler\":6,\"./default\":8}],8:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./datahandler\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(){};r.prototype=new n.default,r.prototype.extractSeries=function(t,e,a){for(var i=[],n=a.get(\"logscale\"),r=0;rr)&&(r=i),(null===n||i=2,v=t.drawingContext;v.save(),f&&v.setLineDash&&v.setLineDash(i);var _=s._drawSeries(t,g,a,l,r,d,u,e);s._drawPointsOnLine(t,_,o,e,l),f&&v.setLineDash&&v.setLineDash([]),v.restore()},s._drawSeries=function(t,e,a,i,n,r,o,s){var l,h,u=null,d=null,c=null,p=[],g=!0,f=t.drawingContext;f.beginPath(),f.strokeStyle=s,f.lineWidth=a;for(var v=e.array_,_=e.end_,y=e.predicate_,x=e.start_;x<_;x++){if(h=v[x],y){for(;x<_&&!y(v,x);)x++;if(x==_)break;h=v[x]}if(null===h.canvasy||h.canvasy!=h.canvasy)o&&null!==u&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),u=d=null;else{if(l=!1,r||null===u){e.nextIdx_=x,e.next(),c=e.hasNext?e.peek.canvasy:null;var m=null===c||c!=c;l=null===u&&m,r&&(!g&&null===u||e.hasNext&&m)&&(l=!0)}null!==u?a&&(o&&(f.moveTo(u,d),f.lineTo(h.canvasx,d)),f.lineTo(h.canvasx,h.canvasy)):f.moveTo(h.canvasx,h.canvasy),(n||l)&&p.push([h.canvasx,h.canvasy,h.idx]),u=h.canvasx,d=h.canvasy}g=!1}return f.stroke(),p},s._drawPointsOnLine=function(t,e,a,i,n){for(var r=t.drawingContext,o=0;o0;a--){var i=e[a];if(2==i[0]){var n=e[a-1];n[1]==i[1]&&n[2]==i[2]&&e.splice(a,1)}}for(var a=0;a2&&!t){var r=0;2==e[0][0]&&r++;for(var o=null,s=null,a=r;ae[s][2]&&(s=a)}}var h=e[o],u=e[s];e.splice(r,e.length-r),os?(e.push(u),e.push(h)):e.push(h)}}},o=function(a){r(a);for(var o=0,s=e.length;o1,h=s-a>1;o(l||h),a=s}e.push([t,n,r])};return{moveTo:function(t,e){s(2,t,e)},lineTo:function(t,e){s(1,t,e)},stroke:function(){o(!0),t.stroke()},fill:function(){o(!0),t.fill()},beginPath:function(){o(!0),t.beginPath()},closePath:function(){o(!0),t.closePath()},_count:function(){return n}}},s._fillPlotter=function(t){if(!t.singleSeriesName&&0===t.seriesIndex){for(var e=t.dygraph,a=e.getLabels().slice(1),i=a.length;i>=0;i--)e.visibility()[i]||a.splice(i,1);if(function(){for(var t=0;t=0;n--){var r=i[n];t.lineTo(r[0],r[1])}},v=d-1;v>=0;v--){var _=t.drawingContext,y=a[v];if(e.getBooleanOption(\"fillGraph\",y)){var x=e.getNumericOption(\"fillAlpha\",y),m=e.getBooleanOption(\"stepPlot\",y),b=p[v],w=e.axisPropertiesForSeries(y),A=1+w.minyval*w.yscale;A<0?A=0:A>1&&(A=1),A=h.h*A+h.y;var O,D=u[v],E=n.createIterator(D,0,D.length,s._getIteratorPredicate(e.getBooleanOption(\"connectSeparatedPoints\",y))),L=NaN,T=[-1,-1],S=n.toRGB_(b),P=\"rgba(\"+S.r+\",\"+S.g+\",\"+S.b+\",\"+x+\")\";_.fillStyle=P,_.beginPath();var C,M=!0;(D.length>2*e.width_||o.default.FORCE_FAST_PROXY)&&(_=s._fastCanvasProxy(_));for(var N,F=[];E.hasNext;)if(N=E.next(),n.isOK(N.y)||m){if(c){if(!M&&C==N.xval)continue;M=!1,C=N.xval,r=g[N.canvasx];var k;k=void 0===r?A:l?r[0]:r,O=[N.canvasy,k],m?-1===T[0]?g[N.canvasx]=[N.canvasy,A]:g[N.canvasx]=[N.canvasy,T[0]]:g[N.canvasx]=N.canvasy}else O=isNaN(N.canvasy)&&m?[h.y+h.h,A]:[N.canvasy,A];isNaN(L)?(_.moveTo(N.canvasx,O[1]),_.lineTo(N.canvasx,O[0])):(m?(_.lineTo(N.canvasx,T[0]),_.lineTo(N.canvasx,O[0])):_.lineTo(N.canvasx,O[0]),c&&(F.push([L,T[1]]),l&&r?F.push([N.canvasx,r[1]]):F.push([N.canvasx,O[1]]))),T=O,L=N.canvasx}else f(_,L,T[1],F),F=[],L=NaN,null===N.y_stacked||isNaN(N.y_stacked)||(g[N.canvasx]=h.h*N.y_stacked+h.y);l=m,O&&N&&(f(_,N.canvasx,O[1],F),F=[]),_.fill()}}}},a.default=s,e.exports=a.default},{\"./dygraph\":18,\"./dygraph-utils\":17}],10:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}function n(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}Object.defineProperty(a,\"__esModule\",{value:!0});var r=t(\"./dygraph-tickers\"),o=n(r),s=t(\"./dygraph-interaction-model\"),l=i(s),h=t(\"./dygraph-canvas\"),u=i(h),d=t(\"./dygraph-utils\"),c=n(d),p={highlightCircleSize:3,highlightSeriesOpts:null,highlightSeriesBackgroundAlpha:.5,highlightSeriesBackgroundColor:\"rgb(255, 255, 255)\",labelsSeparateLines:!1,labelsShowZeroValues:!0,labelsKMB:!1,labelsKMG2:!1,showLabelsOnHighlight:!0,digitsAfterDecimal:2,maxNumberWidth:6,sigFigs:null,strokeWidth:1,strokeBorderWidth:0,strokeBorderColor:\"white\",axisTickSize:3,axisLabelFontSize:14,rightGap:5,showRoller:!1,xValueParser:void 0,delimiter:\",\",sigma:2,errorBars:!1,fractions:!1,wilsonInterval:!0,customBars:!1,fillGraph:!1,fillAlpha:.15,connectSeparatedPoints:!1,stackedGraph:!1,stackedGraphNaNFill:\"all\",hideOverlayOnMouseOut:!0,legend:\"onmouseover\",stepPlot:!1,xRangePad:0,yRangePad:null,drawAxesAtZero:!1,titleHeight:28,xLabelHeight:18,yLabelWidth:18,axisLineColor:\"black\",axisLineWidth:.3,gridLineWidth:.3,axisLabelWidth:50,gridLineColor:\"rgb(128,128,128)\",interactionModel:l.default.defaultModel,animatedZooms:!1,showRangeSelector:!1,rangeSelectorHeight:40,rangeSelectorPlotStrokeColor:\"#808FAB\",rangeSelectorPlotFillGradientColor:\"white\",rangeSelectorPlotFillColor:\"#A7B1C4\",rangeSelectorBackgroundStrokeColor:\"gray\",rangeSelectorBackgroundLineWidth:1,rangeSelectorPlotLineWidth:1.5,rangeSelectorForegroundStrokeColor:\"black\",rangeSelectorForegroundLineWidth:1,rangeSelectorAlpha:.6,showInRangeSelector:null,plotter:[u.default._fillPlotter,u.default._errorPlotter,u.default._linePlotter],plugins:[],axes:{x:{pixelsPerLabel:70,axisLabelWidth:60,axisLabelFormatter:c.dateAxisLabelFormatter,valueFormatter:c.dateValueFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.dateTicker},y:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawGrid:!0,drawAxis:!0,independentTicks:!0,ticker:o.numericTicks},y2:{axisLabelWidth:50,pixelsPerLabel:30,valueFormatter:c.numberValueFormatter,axisLabelFormatter:c.numberAxisLabelFormatter,drawAxis:!0,drawGrid:!1,independentTicks:!1,ticker:o.numericTicks}}};a.default=p,e.exports=a.default},{\"./dygraph-canvas\":9,\"./dygraph-interaction-model\":12,\"./dygraph-tickers\":16,\"./dygraph-utils\":17}],11:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph\"),n=function(t){return t&&t.__esModule?t:{default:t}}(i),r=function(t){this.container=t};r.prototype.draw=function(t,e){this.container.innerHTML=\"\",void 0!==this.date_graph&&this.date_graph.destroy(),this.date_graph=new n.default(this.container,t,e)},r.prototype.setSelection=function(t){var e=!1;t.length&&(e=t[0].row),this.date_graph.setSelection(e)},r.prototype.getSelection=function(){var t=[],e=this.date_graph.getSelection();if(e<0)return t;for(var a=this.date_graph.layout_.points,i=0;ia.boundedDates[1]&&(i-=r-a.boundedDates[1],r=i+a.dateRange),e.getOptionForAxis(\"logscale\",\"x\")?e.dateWindow_=[Math.pow(n.LOG_SCALE,i),Math.pow(n.LOG_SCALE,r)]:e.dateWindow_=[i,r],a.is2DPan)for(var o=a.dragEndY-a.dragStartY,s=0;s=10&&a.dragDirection==n.HORIZONTAL){var o=Math.min(a.dragStartX,a.dragEndX),s=Math.max(a.dragStartX,a.dragEndX);o=Math.max(o,i.x),s=Math.min(s,i.x+i.w),o=10&&a.dragDirection==n.VERTICAL){var l=Math.min(a.dragStartY,a.dragEndY),h=Math.max(a.dragStartY,a.dragEndY);l=Math.max(l,i.y),h=Math.min(h,i.y+i.h),l1&&(a.startTimeForDoubleTapMs=null);for(var i=[],n=0;n=2){a.initialPinchCenter={pageX:.5*(i[0].pageX+i[1].pageX),pageY:.5*(i[0].pageY+i[1].pageY),dataX:.5*(i[0].dataX+i[1].dataX),dataY:.5*(i[0].dataY+i[1].dataY)};var o=180/Math.PI*Math.atan2(a.initialPinchCenter.pageY-i[0].pageY,i[0].pageX-a.initialPinchCenter.pageX);o=Math.abs(o),o>90&&(o=90-o),a.touchDirections={x:o<67.5,y:o>22.5}}a.initialRange={x:e.xAxisRange(),y:e.yAxisRange()}},r.moveTouch=function(t,e,a){a.startTimeForDoubleTapMs=null;var i,n=[];for(i=0;i=2){var g=s[1].pageX-l.pageX;c=(n[1].pageX-o.pageX)/g;var f=s[1].pageY-l.pageY;p=(n[1].pageY-o.pageY)/f}c=Math.min(8,Math.max(.125,c)),p=Math.min(8,Math.max(.125,p));var v=!1;if(a.touchDirections.x&&(e.dateWindow_=[l.dataX-h.dataX+(a.initialRange.x[0]-l.dataX)/c,l.dataX-h.dataX+(a.initialRange.x[1]-l.dataX)/c],v=!0),a.touchDirections.y)for(i=0;i<1;i++){var _=e.axes_[i],y=e.attributes_.getForAxis(\"logscale\",i);y||(_.valueRange=[l.dataY-h.dataY+(a.initialRange.y[0]-l.dataY)/p,l.dataY-h.dataY+(a.initialRange.y[1]-l.dataY)/p],v=!0)}if(e.drawGraph_(!1),v&&n.length>1&&e.getFunctionOption(\"zoomCallback\")){var x=e.xAxisRange();e.getFunctionOption(\"zoomCallback\").call(e,x[0],x[1],e.yAxisRanges())}},r.endTouch=function(t,e,a){if(0!==t.touches.length)r.startTouch(t,e,a);else if(1==t.changedTouches.length){var i=(new Date).getTime(),n=t.changedTouches[0];a.startTimeForDoubleTapMs&&i-a.startTimeForDoubleTapMs<500&&a.doubleTapX&&Math.abs(a.doubleTapX-n.screenX)<50&&a.doubleTapY&&Math.abs(a.doubleTapY-n.screenY)<50?e.resetZoom():(a.startTimeForDoubleTapMs=i,a.doubleTapX=n.screenX,a.doubleTapY=n.screenY)}};var o=function(t,e,a){return ta?t-a:0},s=function(t,e){var a=n.findPos(e.canvas_),i={left:a.x,right:a.x+e.canvas_.offsetWidth,top:a.y,bottom:a.y+e.canvas_.offsetHeight},r={x:n.pageX(t),y:n.pageY(t)},s=o(r.x,i.left,i.right),l=o(r.y,i.top,i.bottom);return Math.max(s,l)};r.defaultModel={mousedown:function(t,e,a){if(!t.button||2!=t.button){a.initializeMouseDown(t,e,a),t.altKey||t.shiftKey?r.startPan(t,e,a):r.startZoom(t,e,a);var i=function(t){if(a.isZooming){s(t,e)<100?r.moveZoom(t,e,a):null!==a.dragEndX&&(a.dragEndX=null,a.dragEndY=null,e.clearZoomRect_())}else a.isPanning&&r.movePan(t,e,a)},o=function t(o){a.isZooming?null!==a.dragEndX?r.endZoom(o,e,a):r.maybeTreatMouseOpAsClick(o,e,a):a.isPanning&&r.endPan(o,e,a),n.removeEvent(document,\"mousemove\",i),n.removeEvent(document,\"mouseup\",t),a.destroy()};e.addAndTrackEvent(document,\"mousemove\",i),e.addAndTrackEvent(document,\"mouseup\",o)}},willDestroyContextMyself:!0,touchstart:function(t,e,a){r.startTouch(t,e,a)},touchmove:function(t,e,a){r.moveTouch(t,e,a)},touchend:function(t,e,a){r.endTouch(t,e,a)},dblclick:function(t,e,a){if(a.cancelNextDblclick)return void(a.cancelNextDblclick=!1);var i={canvasx:a.dragEndX,canvasy:a.dragEndY,cancelable:!0};e.cascadeEvents_(\"dblclick\",i)||t.altKey||t.shiftKey||e.resetZoom()}},r.nonInteractiveModel_={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a)},mouseup:r.maybeTreatMouseOpAsClick},r.dragIsPanInteractionModel={mousedown:function(t,e,a){a.initializeMouseDown(t,e,a),r.startPan(t,e,a)},mousemove:function(t,e,a){a.isPanning&&r.movePan(t,e,a)},mouseup:function(t,e,a){a.isPanning&&r.endPan(t,e,a)}},a.default=r,e.exports=a.default},{\"./dygraph-utils\":17}],13:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t){this.dygraph_=t,this.points=[],this.setNames=[],this.annotations=[],this.yAxes_=null,this.xTicks_=null,this.yTicks_=null};r.prototype.addDataset=function(t,e){this.points.push(e),this.setNames.push(t)},r.prototype.getPlotArea=function(){return this.area_},r.prototype.computePlotArea=function(){var t={x:0,y:0};t.w=this.dygraph_.width_-t.x-this.dygraph_.getOption(\"rightGap\"),t.h=this.dygraph_.height_;var e={chart_div:this.dygraph_.graphDiv,reserveSpaceLeft:function(e){var a={x:t.x,y:t.y,w:e,h:t.h};return t.x+=e,t.w-=e,a},reserveSpaceRight:function(e){var a={x:t.x+t.w-e,y:t.y,w:e,h:t.h};return t.w-=e,a},reserveSpaceTop:function(e){var a={x:t.x,y:t.y,w:t.w,h:e};return t.y+=e,t.h-=e,a},reserveSpaceBottom:function(e){var a={x:t.x,y:t.y+t.h-e,w:t.w,h:e};return t.h-=e,a},chartRect:function(){return{x:t.x,y:t.y,w:t.w,h:t.h}}};this.dygraph_.cascadeEvents_(\"layout\",e),this.area_=t},r.prototype.setAnnotations=function(t){this.annotations=[];for(var e=this.dygraph_.getOption(\"xValueParser\")||function(t){return t},a=0;a=0&&i<1&&this.xticks.push({pos:i,label:a,has_tick:r});for(this.yticks=[],t=0;t0&&i<=1&&this.yticks.push({axis:t,pos:i,label:a,has_tick:r})},r.prototype._evaluateAnnotations=function(){var t,e={};for(t=0;t1&&o.update(this.yAxes_[1].options,s.y2||{}),o.update(this.xAxis_.options,s.x||{})}},u.prototype.get=function(t){var e=this.getGlobalUser_(t);return null!==e?e:this.getGlobalDefault_(t)},u.prototype.getGlobalUser_=function(t){return this.user_.hasOwnProperty(t)?this.user_[t]:null},u.prototype.getGlobalDefault_=function(t){return this.global_.hasOwnProperty(t)?this.global_[t]:l.default.hasOwnProperty(t)?l.default[t]:null},u.prototype.getForAxis=function(t,e){var a,i;if(\"number\"==typeof e)a=e,i=0===a?\"y\":\"y2\";else{if(\"y1\"==e&&(e=\"y\"),\"y\"==e)a=0;else if(\"y2\"==e)a=1;else{if(\"x\"!=e)throw\"Unknown axis \"+e;a=-1}i=e}var n=-1==a?this.xAxis_:this.yAxes_[a];if(n){var r=n.options;if(r.hasOwnProperty(t))return r[t]}if(\"x\"!==e||\"logscale\"!==t){var o=this.getGlobalUser_(t);if(null!==o)return o}var s=l.default.axes[i];return s.hasOwnProperty(t)?s[t]:this.getGlobalDefault_(t)},u.prototype.getForSeries=function(t,e){if(e===this.dygraph_.getHighlightSeries()&&this.highlightSeries_.hasOwnProperty(t))return this.highlightSeries_[t];if(!this.series_.hasOwnProperty(e))throw\"Unknown series: \"+e;var a=this.series_[e],i=a.options;return i.hasOwnProperty(t)?i[t]:this.getForAxis(t,a.yAxis)},u.prototype.numAxes=function(){return this.yAxes_.length},u.prototype.axisForSeries=function(t){return this.series_[t].yAxis},u.prototype.axisOptions=function(t){return this.yAxes_[t].options},u.prototype.seriesForAxis=function(t){return this.yAxes_[t].series},u.prototype.seriesNames=function(){return this.labels_},void 0!==i);a.default=u,e.exports=a.default}).call(this,t(\"_process\"))},{\"./dygraph-default-attrs\":10,\"./dygraph-options-reference\":14,\"./dygraph-utils\":17,_process:1}],16:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"./dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(t,e,a,i,n,r){return o(t,e,a,function(t){return\"logscale\"!==t&&i(t)},n,r)};a.numericLinearTicks=r;var o=function(t,e,a,i,r,o){var s,l,h,u,c=i(\"pixelsPerLabel\"),p=[];if(o)for(s=0;s=u/4){for(var _=f;_>=g;_--){var y=d[_],x=Math.log(y/t)/Math.log(e/t)*a,m={v:y};null===v?v={tickValue:y,pixel_coord:x}:Math.abs(x-v.pixel_coord)>=c?v={tickValue:y,pixel_coord:x}:m.label=\"\",p.push(m)}p.reverse()}}if(0===p.length){var b,w,A=i(\"labelsKMG2\");A?(b=[1,2,4,8,16,32,64,128,256],w=16):(b=[1,2,5,10,20,50,100],w=10);var O,D,E,L=Math.ceil(a/c),T=Math.abs(e-t)/L,S=Math.floor(Math.log(T)/Math.log(w)),P=Math.pow(w,S);for(l=0;lc));l++);for(D>E&&(O*=-1),s=0;s<=u;s++)h=D+s*O,p.push({v:h})}}var C=i(\"axisLabelFormatter\");for(s=0;s=0?g(t,e,o,i,n):[]};a.dateTicker=s;var l={MILLISECONDLY:0,TWO_MILLISECONDLY:1,FIVE_MILLISECONDLY:2,TEN_MILLISECONDLY:3,FIFTY_MILLISECONDLY:4,HUNDRED_MILLISECONDLY:5,FIVE_HUNDRED_MILLISECONDLY:6,SECONDLY:7,TWO_SECONDLY:8,FIVE_SECONDLY:9,TEN_SECONDLY:10,THIRTY_SECONDLY:11,MINUTELY:12,TWO_MINUTELY:13,FIVE_MINUTELY:14,TEN_MINUTELY:15,THIRTY_MINUTELY:16,HOURLY:17,TWO_HOURLY:18,SIX_HOURLY:19,DAILY:20,TWO_DAILY:21,WEEKLY:22,MONTHLY:23,QUARTERLY:24,BIANNUAL:25,ANNUAL:26,DECADAL:27,CENTENNIAL:28,NUM_GRANULARITIES:29};a.Granularity=l;var h={DATEFIELD_Y:0,DATEFIELD_M:1,DATEFIELD_D:2,DATEFIELD_HH:3,DATEFIELD_MM:4,DATEFIELD_SS:5,DATEFIELD_MS:6,NUM_DATEFIELDS:7},u=[];u[l.MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:1,spacing:1},u[l.TWO_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:2,spacing:2},u[l.FIVE_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:5,spacing:5},u[l.TEN_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:10,spacing:10},u[l.FIFTY_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:50,spacing:50},u[l.HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:100,spacing:100},u[l.FIVE_HUNDRED_MILLISECONDLY]={datefield:h.DATEFIELD_MS,step:500,spacing:500},u[l.SECONDLY]={datefield:h.DATEFIELD_SS,step:1,spacing:1e3},u[l.TWO_SECONDLY]={datefield:h.DATEFIELD_SS,step:2,spacing:2e3},u[l.FIVE_SECONDLY]={datefield:h.DATEFIELD_SS,step:5,spacing:5e3},u[l.TEN_SECONDLY]={datefield:h.DATEFIELD_SS,step:10,spacing:1e4},u[l.THIRTY_SECONDLY]={datefield:h.DATEFIELD_SS,step:30,spacing:3e4},u[l.MINUTELY]={datefield:h.DATEFIELD_MM,step:1,spacing:6e4},u[l.TWO_MINUTELY]={datefield:h.DATEFIELD_MM,step:2,spacing:12e4},u[l.FIVE_MINUTELY]={datefield:h.DATEFIELD_MM,step:5,spacing:3e5},u[l.TEN_MINUTELY]={datefield:h.DATEFIELD_MM,step:10,spacing:6e5},u[l.THIRTY_MINUTELY]={datefield:h.DATEFIELD_MM,step:30,spacing:18e5},u[l.HOURLY]={datefield:h.DATEFIELD_HH,step:1,spacing:36e5},u[l.TWO_HOURLY]={datefield:h.DATEFIELD_HH,step:2,spacing:72e5},u[l.SIX_HOURLY]={datefield:h.DATEFIELD_HH,step:6,spacing:216e5},u[l.DAILY]={datefield:h.DATEFIELD_D,step:1,spacing:864e5},u[l.TWO_DAILY]={datefield:h.DATEFIELD_D,step:2,spacing:1728e5},u[l.WEEKLY]={datefield:h.DATEFIELD_D,step:7,spacing:6048e5},u[l.MONTHLY]={datefield:h.DATEFIELD_M,step:1,spacing:2629817280},u[l.QUARTERLY]={datefield:h.DATEFIELD_M,step:3,spacing:216e5*365.2524},u[l.BIANNUAL]={datefield:h.DATEFIELD_M,step:6,spacing:432e5*365.2524},u[l.ANNUAL]={datefield:h.DATEFIELD_Y,step:1,spacing:864e5*365.2524},u[l.DECADAL]={datefield:h.DATEFIELD_Y,step:10,spacing:315578073600},u[l.CENTENNIAL]={datefield:h.DATEFIELD_Y,step:100,spacing:3155780736e3};var d=function(){for(var t=[],e=-39;e<=39;e++)for(var a=Math.pow(10,e),i=1;i<=9;i++){var n=a*i;t.push(n)}return t}(),c=function(t,e,a,i){for(var n=i(\"pixelsPerLabel\"),r=0;r=n)return r}return-1},p=function(t,e,a){var i=u[a].spacing;return Math.round(1*(e-t)/i)},g=function(t,e,a,i,r){var o=i(\"axisLabelFormatter\"),s=i(\"labelsUTC\"),d=s?n.DateAccessorsUTC:n.DateAccessorsLocal,c=u[a].datefield,p=u[a].step,g=u[a].spacing,f=new Date(t),v=[];v[h.DATEFIELD_Y]=d.getFullYear(f),v[h.DATEFIELD_M]=d.getMonth(f),v[h.DATEFIELD_D]=d.getDate(f),v[h.DATEFIELD_HH]=d.getHours(f),v[h.DATEFIELD_MM]=d.getMinutes(f),v[h.DATEFIELD_SS]=d.getSeconds(f),v[h.DATEFIELD_MS]=d.getMilliseconds(f);var _=v[c]%p;a==l.WEEKLY&&(_=d.getDay(f)),v[c]-=_;for(var y=c+1;y=l.DAILY||d.getHours(m)%p==0)&&x.push({v:b,label:o.call(r,m,a,i,r)}),v[c]+=p,m=d.makeDate.apply(null,v),b=m.getTime();return x};a.getDateAxis=g},{\"./dygraph-utils\":17}],17:[function(t,e,a){\"use strict\";function i(t,e,a){t.removeEventListener(e,a,!1)}function n(t){return t=t||window.event,t.stopPropagation&&t.stopPropagation(),t.preventDefault&&t.preventDefault(),t.cancelBubble=!0,t.cancel=!0,t.returnValue=!1,!1}function r(t,e,a){var i,n,r;if(0===e)i=a,n=a,r=a;else{var o=Math.floor(6*t),s=6*t-o,l=a*(1-e),h=a*(1-e*s),u=a*(1-e*(1-s));switch(o){case 1:i=h,n=a,r=l;break;case 2:i=l,n=a,r=u;break;case 3:i=l,n=h,r=a;break;case 4:i=u,n=l,r=a;break;case 5:i=a,n=l,r=h;break;case 6:case 0:i=a,n=u,r=l}}return i=Math.floor(255*i+.5),n=Math.floor(255*n+.5),r=Math.floor(255*r+.5),\"rgb(\"+i+\",\"+n+\",\"+r+\")\"}function o(t){var e=t.getBoundingClientRect(),a=window,i=document.documentElement;return{x:e.left+(a.pageXOffset||i.scrollLeft),y:e.top+(a.pageYOffset||i.scrollTop)}}function s(t){return!t.pageX||t.pageX<0?0:t.pageX}function l(t){return!t.pageY||t.pageY<0?0:t.pageY}function h(t,e){return s(t)-e.px}function u(t,e){return l(t)-e.py}function d(t){return!!t&&!isNaN(t)}function c(t,e){return!!t&&(null!==t.yval&&(null!==t.x&&void 0!==t.x&&(null!==t.y&&void 0!==t.y&&!(isNaN(t.x)||!e&&isNaN(t.y)))))}function p(t,e){var a=Math.min(Math.max(1,e||2),21);return Math.abs(t)<.001&&0!==t?t.toExponential(a-1):t.toPrecision(a)}function g(t){return t<10?\"0\"+t:\"\"+t}function f(t,e,a,i){var n=g(t)+\":\"+g(e);if(a&&(n+=\":\"+g(a),i)){var r=\"\"+i;n+=\".\"+(\"000\"+r).substring(r.length)}return n}function v(t,e){var a=e?tt:$,i=new Date(t),n=a.getFullYear(i),r=a.getMonth(i),o=a.getDate(i),s=a.getHours(i),l=a.getMinutes(i),h=a.getSeconds(i),u=a.getMilliseconds(i),d=\"\"+n,c=g(r+1),p=g(o),v=3600*s+60*l+h+.001*u,_=d+\"/\"+c+\"/\"+p;return v&&(_+=\" \"+f(s,l,h,u)),_}function _(t,e){var a=Math.pow(10,e);return Math.round(t*a)/a}function y(t,e,a,i,n){for(var r=!0;r;){var o=t,s=e,l=a,h=i,u=n;if(r=!1,null!==h&&void 0!==h&&null!==u&&void 0!==u||(h=0,u=s.length-1),h>u)return-1;null!==l&&void 0!==l||(l=0);var d,c=function(t){return t>=0&&to){if(l>0&&(d=p-1,c(d)&&s[d]o))return p;t=o,e=s,a=l,i=p+1,n=u,r=!0,c=p=g=d=void 0}}}function x(t){var e,a;if((-1==t.search(\"-\")||-1!=t.search(\"T\")||-1!=t.search(\"Z\"))&&(a=m(t))&&!isNaN(a))return a;if(-1!=t.search(\"-\")){for(e=t.replace(\"-\",\"/\",\"g\");-1!=e.search(\"-\");)e=e.replace(\"-\",\"/\");a=m(e)}else 8==t.length?(e=t.substr(0,4)+\"/\"+t.substr(4,2)+\"/\"+t.substr(6,2),a=m(e)):a=m(t);return a&&!isNaN(a)||console.error(\"Couldn't parse \"+t+\" as a date\"),a}function m(t){return new Date(t).getTime()}function b(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(t[a]=e[a]);return t}function w(t,e){if(void 0!==e&&null!==e)for(var a in e)e.hasOwnProperty(a)&&(null===e[a]?t[a]=null:A(e[a])?t[a]=e[a].slice():!function(t){return\"object\"==typeof Node?t instanceof Node:\"object\"==typeof t&&\"number\"==typeof t.nodeType&&\"string\"==typeof t.nodeName}(e[a])&&\"object\"==typeof e[a]?(\"object\"==typeof t[a]&&null!==t[a]||(t[a]={}),w(t[a],e[a])):t[a]=e[a]);return t}function A(t){var e=typeof t;return(\"object\"==e||\"function\"==e&&\"function\"==typeof t.item)&&null!==t&&\"number\"==typeof t.length&&3!==t.nodeType}function O(t){return\"object\"==typeof t&&null!==t&&\"function\"==typeof t.getTime}function D(t){for(var e=[],a=0;a=e||et.call(window,function(){var e=(new Date).getTime(),h=e-o;n=r,r=Math.floor(h/a);var u=r-n;r+u>s||r>=s?(t(s),i()):(0!==u&&t(r),l())})}()}function C(t,e){var a={};if(t)for(var i=1;i=Math.pow(10,r)||Math.abs(t)=0;g--,c/=l)if(d>=c){i=_(t/c,n)+h[g];break}if(s){var f=String(t.toExponential()).split(\"e-\");2===f.length&&f[1]>=3&&f[1]<=24&&(i=f[1]%3>0?_(f[0]/F(10,f[1]%3),n):Number(f[0]).toFixed(2),i+=u[Math.floor(f[1]/3)-1])}}return i}function X(t,e,a){return Y.call(this,t,a)}function V(t,e,a){var i=a(\"labelsUTC\"),n=i?tt:$,r=n.getFullYear(t),o=n.getMonth(t),s=n.getDate(t),l=n.getHours(t),h=n.getMinutes(t),u=n.getSeconds(t),d=n.getMilliseconds(t);if(e>=G.Granularity.DECADAL)return\"\"+r;if(e>=G.Granularity.MONTHLY)return lt[o]+\" \"+r;if(0===3600*l+60*h+u+.001*d||e>=G.Granularity.DAILY)return g(s)+\" \"+lt[o];if(eG.Granularity.MINUTELY?f(l,h,u,0):f(l,h,u,d)}function Z(t,e){return v(t,e(\"labelsUTC\"))}Object.defineProperty(a,\"__esModule\",{value:!0}),a.removeEvent=i,a.cancelEvent=n,a.hsvToRGB=r,a.findPos=o,a.pageX=s,a.pageY=l,a.dragGetX_=h,a.dragGetY_=u,a.isOK=d,a.isValidPoint=c,a.floatFormat=p,a.zeropad=g,a.hmsString_=f,a.dateString_=v,a.round_=_,a.binarySearch=y,a.dateParser=x,a.dateStrToMillis=m,a.update=b,a.updateDeep=w,a.isArrayLike=A,a.isDateLike=O,a.clone=D,a.createCanvas=E,a.getContextPixelRatio=L,a.Iterator=T,a.createIterator=S,a.repeatAndCleanup=P,a.isPixelChangingOptionList=C,a.detectLineDelimiter=M,a.isNodeContainedBy=N,a.pow=F,a.toRGB_=R,a.isCanvasSupported=I,a.parseFloat_=H,a.numberValueFormatter=Y,a.numberAxisLabelFormatter=X,a.dateAxisLabelFormatter=V,a.dateValueFormatter=Z;var B=t(\"./dygraph-tickers\"),G=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(B);a.LOG_SCALE=10;var W=Math.log(10);a.LN_TEN=W;var U=function(t){return Math.log(t)/W};a.log10=U;var z=function(t,e,a){var i=U(t),n=U(e),r=i+a*(n-i);return Math.pow(10,r)};a.logRangeFraction=z;var j=[2,2];a.DOTTED_LINE=j;var K=[7,3];a.DASHED_LINE=K;var q=[7,2,2,2];a.DOT_DASH_LINE=q;a.HORIZONTAL=1;a.VERTICAL=2;var Q=function(t){return t.getContext(\"2d\")};a.getContext=Q;var J=function(t,e,a){t.addEventListener(e,a,!1)};a.addEvent=J;var $={getFullYear:function(t){return t.getFullYear()},getMonth:function(t){return t.getMonth()},getDate:function(t){return t.getDate()},getHours:function(t){return t.getHours()},getMinutes:function(t){return t.getMinutes()},getSeconds:function(t){return t.getSeconds()},getMilliseconds:function(t){return t.getMilliseconds()},getDay:function(t){return t.getDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(t,e,a,i,n,r,o)}};a.DateAccessorsLocal=$;var tt={getFullYear:function(t){return t.getUTCFullYear()},getMonth:function(t){return t.getUTCMonth()},getDate:function(t){return t.getUTCDate()},getHours:function(t){return t.getUTCHours()},getMinutes:function(t){return t.getUTCMinutes()},getSeconds:function(t){return t.getUTCSeconds()},getMilliseconds:function(t){return t.getUTCMilliseconds()},getDay:function(t){return t.getUTCDay()},makeDate:function(t,e,a,i,n,r,o){return new Date(Date.UTC(t,e,a,i,n,r,o))}};a.DateAccessorsUTC=tt,T.prototype.next=function(){if(!this.hasNext)return null;for(var t=this.peek,e=this.nextIdx_+1,a=!1;e=0;n--){var r=i[n][0],o=i[n][1];if(o.call(r,a),a.propagationStopped)break}return a.defaultPrevented},Q.prototype.getPluginInstance_=function(t){for(var e=0;e=0;if(null===t||void 0===t)return e||a;if(\"y\"===t)return a;throw new Error(\"axis parameter is [\"+t+\"] must be null, 'x' or 'y'.\")},Q.prototype.toString=function(){var t=this.maindiv_;return\"[Dygraph \"+(t&&t.id?t.id:t)+\"]\"},Q.prototype.attr_=function(t,e){return e?this.attributes_.getForSeries(t,e):this.attributes_.get(t)},Q.prototype.getOption=function(t,e){return this.attr_(t,e)},Q.prototype.getNumericOption=function(t,e){return this.getOption(t,e)},Q.prototype.getStringOption=function(t,e){return this.getOption(t,e)},Q.prototype.getBooleanOption=function(t,e){return this.getOption(t,e)},Q.prototype.getFunctionOption=function(t,e){return this.getOption(t,e)},Q.prototype.getOptionForAxis=function(t,e){return this.attributes_.getForAxis(t,e)},Q.prototype.optionsViewForAxis_=function(t){var e=this;return function(a){var i=e.user_attrs_.axes;return i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:(\"x\"!==t||\"logscale\"!==a)&&(void 0!==e.user_attrs_[a]?e.user_attrs_[a]:(i=e.attrs_.axes,i&&i[t]&&i[t].hasOwnProperty(a)?i[t][a]:\"y\"==t&&e.axes_[0].hasOwnProperty(a)?e.axes_[0][a]:\"y2\"==t&&e.axes_[1].hasOwnProperty(a)?e.axes_[1][a]:e.attr_(a)))}},Q.prototype.rollPeriod=function(){return this.rollPeriod_},Q.prototype.xAxisRange=function(){return this.dateWindow_?this.dateWindow_:this.xAxisExtremes()},Q.prototype.xAxisExtremes=function(){var t=this.getNumericOption(\"xRangePad\")/this.plotter_.area.w;if(0===this.numRows())return[0-t,1+t];var e=this.rawData_[0][0],a=this.rawData_[this.rawData_.length-1][0];if(t){var i=a-e;e-=i*t,a+=i*t}return[e,a]},Q.prototype.yAxisExtremes=function(){var t=this.gatherDatasets_(this.rolledSeries_,null),e=t.extremes,a=this.axes_;this.computeYAxisRanges_(e);var i=this.axes_;return this.axes_=a,i.map(function(t){return t.extremeRange})},Q.prototype.yAxisRange=function(t){if(void 0===t&&(t=0),t<0||t>=this.axes_.length)return null;var e=this.axes_[t];return[e.computedValueRange[0],e.computedValueRange[1]]},Q.prototype.yAxisRanges=function(){for(var t=[],e=0;ethis.rawData_.length?null:e<0||e>this.rawData_[t].length?null:this.rawData_[t][e]},Q.prototype.createInterface_=function(){var t=this.maindiv_;this.graphDiv=document.createElement(\"div\"),this.graphDiv.style.textAlign=\"left\",this.graphDiv.style.position=\"relative\",t.appendChild(this.graphDiv),this.canvas_=x.createCanvas(),this.canvas_.style.position=\"absolute\",this.hidden_=this.createPlotKitCanvas_(this.canvas_),this.canvas_ctx_=x.getContext(this.canvas_),this.hidden_ctx_=x.getContext(this.hidden_),this.resizeElements_(),this.graphDiv.appendChild(this.hidden_),this.graphDiv.appendChild(this.canvas_),this.mouseEventElement_=this.createMouseEventElement_(),this.layout_=new h.default(this);var e=this;this.mouseMoveHandler_=function(t){e.mouseMove_(t)},this.mouseOutHandler_=function(t){var a=t.target||t.fromElement,i=t.relatedTarget||t.toElement;x.isNodeContainedBy(a,e.graphDiv)&&!x.isNodeContainedBy(i,e.graphDiv)&&e.mouseOut_(t)},this.addAndTrackEvent(window,\"mouseout\",this.mouseOutHandler_),this.addAndTrackEvent(this.mouseEventElement_,\"mousemove\",this.mouseMoveHandler_),this.resizeHandler_||(this.resizeHandler_=function(t){e.resize()},this.addAndTrackEvent(window,\"resize\",this.resizeHandler_))},Q.prototype.resizeElements_=function(){this.graphDiv.style.width=this.width_+\"px\",this.graphDiv.style.height=this.height_+\"px\";var t=this.getNumericOption(\"pixelRatio\"),e=t||x.getContextPixelRatio(this.canvas_ctx_);this.canvas_.width=this.width_*e,this.canvas_.height=this.height_*e,this.canvas_.style.width=this.width_+\"px\",this.canvas_.style.height=this.height_+\"px\",1!==e&&this.canvas_ctx_.scale(e,e);var a=t||x.getContextPixelRatio(this.hidden_ctx_);this.hidden_.width=this.width_*a,this.hidden_.height=this.height_*a,this.hidden_.style.width=this.width_+\"px\",this.hidden_.style.height=this.height_+\"px\",1!==a&&this.hidden_ctx_.scale(a,a)},Q.prototype.destroy=function(){this.canvas_ctx_.restore(),this.hidden_ctx_.restore();for(var t=this.plugins_.length-1;t>=0;t--){var e=this.plugins_.pop();e.plugin.destroy&&e.plugin.destroy()}this.removeTrackedEvents_(),x.removeEvent(window,\"mouseout\",this.mouseOutHandler_),x.removeEvent(this.mouseEventElement_,\"mousemove\",this.mouseMoveHandler_),x.removeEvent(window,\"resize\",this.resizeHandler_),this.resizeHandler_=null,function t(e){for(;e.hasChildNodes();)t(e.firstChild),e.removeChild(e.firstChild)}(this.maindiv_);var a=function(t){for(var e in t)\"object\"==typeof t[e]&&(t[e]=null)};a(this.layout_),a(this.plotter_),a(this)},Q.prototype.createPlotKitCanvas_=function(t){var e=x.createCanvas();return e.style.position=\"absolute\",e.style.top=t.style.top,e.style.left=t.style.left,\ne.width=this.width_,e.height=this.height_,e.style.width=this.width_+\"px\",e.style.height=this.height_+\"px\",e},Q.prototype.createMouseEventElement_=function(){return this.canvas_},Q.prototype.setColors_=function(){var t=this.getLabels(),e=t.length-1;this.colors_=[],this.colorsMap_={};for(var a=this.getNumericOption(\"colorSaturation\")||1,i=this.getNumericOption(\"colorValue\")||.5,n=Math.ceil(e/2),r=this.getOption(\"colors\"),o=this.visibility(),s=0;s=0;--u)for(var d=this.layout_.points[u],c=0;c=l.length)){var h=l[s];if(x.isValidPoint(h)){var u=h.canvasy;if(t>h.canvasx&&s+10){var p=(t-h.canvasx)/c;u+=p*(d.canvasy-h.canvasy)}}}else if(t0){var g=l[s-1];if(x.isValidPoint(g)){var c=h.canvasx-g.canvasx;if(c>0){var p=(h.canvasx-t)/c;u+=p*(g.canvasy-h.canvasy)}}}(0===r||u=0){var r=0,o=this.attr_(\"labels\");for(e=1;er&&(r=s)}var l=this.previousVerticalX_;a.clearRect(l-r-1,0,2*r+2,this.height_)}if(this.selPoints_.length>0){var h=this.selPoints_[0].canvasx;for(a.save(),e=0;e=0){t!=this.lastRow_&&(i=!0),this.lastRow_=t;for(var n=0;n=0&&o=0&&(i=!0),this.lastRow_=-1;return this.selPoints_.length?this.lastx_=this.selPoints_[0].xval:this.lastx_=-1,void 0!==e&&(this.highlightSet_!==e&&(i=!0),this.highlightSet_=e),void 0!==a&&(this.lockedSet_=a),i&&this.updateSelection_(void 0),i},Q.prototype.mouseOut_=function(t){this.getFunctionOption(\"unhighlightCallback\")&&this.getFunctionOption(\"unhighlightCallback\").call(this,t),this.getBooleanOption(\"hideOverlayOnMouseOut\")&&!this.lockedSet_&&this.clearSelection()},Q.prototype.clearSelection=function(){if(this.cascadeEvents_(\"deselect\",{}),this.lockedSet_=!1,this.fadeLevel)return void this.animateSelection_(-1);this.canvas_ctx_.clearRect(0,0,this.width_,this.height_),this.fadeLevel=0,this.selPoints_=[],this.lastx_=-1,this.lastRow_=-1,this.highlightSet_=null},Q.prototype.getSelection=function(){if(!this.selPoints_||this.selPoints_.length<1)return-1;for(var t=0;t1&&(a=this.dataHandler_.rollingAverage(a,this.rollPeriod_,this.attributes_)),this.rolledSeries_.push(a)}this.drawGraph_();var i=new Date;this.drawingTimeMs_=i-t},Q.PointType=void 0,Q.stackPoints_=function(t,e,a,i){for(var n=null,r=null,o=null,s=-1,l=0;l=e))for(var a=e;aa[1]&&(a[1]=c),c=1;a--)if(this.visibility()[a-1]){if(e){s=t[a];var p=e[0],g=e[1];for(n=null,r=null,i=0;i=p&&null===n&&(n=i),s[i][0]<=g&&(r=i);null===n&&(n=0);for(var f=n,v=!0;v&&f>0;)f--,v=null===s[f][1];null===r&&(r=s.length-1);var _=r;for(v=!0;v&&_0;){var n=this.readyFns_.pop();n(this)}},Q.prototype.computeYAxes_=function(){var t,e,a;for(this.axes_=[],t=0;t0&&(v=0),_<0&&(_=0)),v==1/0&&(v=0),_==-1/0&&(_=1),a=_-v,0===a&&(0!==_?a=Math.abs(_):(_=1,a=1));var m=_,b=v;e&&(u?(m=_+n*a,b=v):(m=_+n*a,b=v-n*a,b<0&&v>=0&&(b=0),m>0&&_<=0&&(m=0))),h.extremeRange=[b,m]}if(h.valueRange){var w=o(h.valueRange[0])?h.extremeRange[0]:h.valueRange[0],A=o(h.valueRange[1])?h.extremeRange[1]:h.valueRange[1];h.computedValueRange=[w,A]}else h.computedValueRange=h.extremeRange;if(!e)if(w=h.computedValueRange[0],A=h.computedValueRange[1],w===A&&(w-=.5,A+=.5),u){var O=n/(2*n-1),D=(n-1)/(2*n-1);h.computedValueRange[0]=x.logRangeFraction(w,A,O),h.computedValueRange[1]=x.logRangeFraction(w,A,D)}else a=A-w,h.computedValueRange[0]=w-a*n,h.computedValueRange[1]=A+a*n;if(c){h.independentTicks=c;var E=this.optionsViewForAxis_(\"y\"+(l?\"2\":\"\")),L=E(\"ticker\");h.ticks=L(h.computedValueRange[0],h.computedValueRange[1],this.plotter_.area.h,E,this),r||(r=h)}}if(void 0===r)throw'Configuration Error: At least one axis has to have the \"independentTicks\" option activated.';for(var l=0;l0&&\"e\"!=t[a-1]&&\"E\"!=t[a-1]||t.indexOf(\"/\")>=0||isNaN(parseFloat(t))?e=!0:8==t.length&&t>\"19700101\"&&t<\"20371231\"&&(e=!0),this.setXAxisOptions_(e)},Q.prototype.setXAxisOptions_=function(t){t?(this.attrs_.xValueParser=x.dateParser,this.attrs_.axes.x.valueFormatter=x.dateValueFormatter,this.attrs_.axes.x.ticker=_.dateTicker,this.attrs_.axes.x.axisLabelFormatter=x.dateAxisLabelFormatter):(this.attrs_.xValueParser=function(t){return parseFloat(t)},this.attrs_.axes.x.valueFormatter=function(t){return t},this.attrs_.axes.x.ticker=_.numericTicks,this.attrs_.axes.x.axisLabelFormatter=this.attrs_.axes.x.valueFormatter)},Q.prototype.parseCSV_=function(t){var e,a,i=[],n=x.detectLineDelimiter(t),r=t.split(n||\"\\n\"),o=this.getStringOption(\"delimiter\");-1==r[0].indexOf(o)&&r[0].indexOf(\"\\t\")>=0&&(o=\"\\t\");var s=0;\"labels\"in this.user_attrs_||(s=1,this.attrs_.labels=r[0].split(o),this.attributes_.reparseSeries());for(var l,h=!1,u=this.attr_(\"labels\").length,d=!1,c=s;c0&&f[0]0;)e=String.fromCharCode(65+(t-1)%26)+e.toLowerCase(),t=Math.floor((t-1)/26);return e}(g.length),y.text=\"\";for(var m=0;m0&&f[0]0&&this.setAnnotations(g,!0),this.attributes_.reparseSeries()},Q.prototype.cascadeDataDidUpdateEvent_=function(){this.cascadeEvents_(\"dataDidUpdate\",{})},Q.prototype.start_=function(){var t=this.file_;if(\"function\"==typeof t&&(t=t()),x.isArrayLike(t))this.rawData_=this.parseArray_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if(\"object\"==typeof t&&\"function\"==typeof t.getColumnRange)this.parseDataTable_(t),this.cascadeDataDidUpdateEvent_(),this.predraw_();else if(\"string\"==typeof t){var e=x.detectLineDelimiter(t);if(e)this.loadedEvent_(t);else{var a;a=window.XMLHttpRequest?new XMLHttpRequest:new ActiveXObject(\"Microsoft.XMLHTTP\");var i=this;a.onreadystatechange=function(){4==a.readyState&&(200!==a.status&&0!==a.status||i.loadedEvent_(a.responseText))},a.open(\"GET\",t,!0),a.send(null)}}else console.error(\"Unknown data format: \"+typeof t)},Q.prototype.updateOptions=function(t,e){void 0===e&&(e=!1);var a=t.file,i=Q.copyUserAttrs_(t);\"rollPeriod\"in i&&(this.rollPeriod_=i.rollPeriod),\"dateWindow\"in i&&(this.dateWindow_=i.dateWindow);var n=x.isPixelChangingOptionList(this.attr_(\"labels\"),i);x.updateDeep(this.user_attrs_,i),this.attributes_.reparseSeries(),a?(this.cascadeEvents_(\"dataWillUpdate\",{}),this.file_=a,e||this.start_()):e||(n?this.predraw_():this.renderGraph_(!1))},Q.copyUserAttrs_=function(t){var e={};for(var a in t)t.hasOwnProperty(a)&&\"file\"!=a&&t.hasOwnProperty(a)&&(e[a]=t[a]);return e},Q.prototype.resize=function(t,e){if(!this.resize_lock){this.resize_lock=!0,null===t!=(null===e)&&(console.warn(\"Dygraph.resize() should be called with zero parameters or two non-NULL parameters. Pretending it was zero.\"),t=e=null);var a=this.width_,i=this.height_;t?(this.maindiv_.style.width=t+\"px\",this.maindiv_.style.height=e+\"px\",this.width_=t,this.height_=e):(this.width_=this.maindiv_.clientWidth,this.height_=this.maindiv_.clientHeight),a==this.width_&&i==this.height_||(this.resizeElements_(),this.predraw_()),this.resize_lock=!1}},Q.prototype.adjustRoll=function(t){this.rollPeriod_=t,this.predraw_()},Q.prototype.visibility=function(){for(this.getOption(\"visibility\")||(this.attrs_.visibility=[]);this.getOption(\"visibility\").length=a.length?console.warn(\"Invalid series number in setVisibility: \"+n):a[n]=t[n]);else for(var n=0;n=a.length?console.warn(\"Invalid series number in setVisibility: \"+n):a[n]=t[n]:t[n]<0||t[n]>=a.length?console.warn(\"Invalid series number in setVisibility: \"+t[n]):a[t[n]]=e;this.predraw_()},Q.prototype.size=function(){return{width:this.width_,height:this.height_}},Q.prototype.setAnnotations=function(t,e){if(this.annotations_=t,!this.layout_)return void console.warn(\"Tried to setAnnotations before dygraph was ready. Try setting them in a ready() block. See dygraphs.com/tests/annotation.html\");this.layout_.setAnnotations(this.annotations_),e||this.predraw_()},Q.prototype.annotations=function(){return this.annotations_},Q.prototype.getLabels=function(){var t=this.attr_(\"labels\");return t?t.slice():null},Q.prototype.indexFromSetName=function(t){return this.setIndexByName_[t]},Q.prototype.getRowForX=function(t){for(var e=0,a=this.numRows()-1;e<=a;){var i=a+e>>1,n=this.getValue(i,0);if(nt)a=i-1;else{if(e==i)return i;a=i}}return null},Q.prototype.ready=function(t){this.is_initial_draw_?this.readyFns_.push(t):t.call(this,this)},Q.prototype.addAndTrackEvent=function(t,e,a){x.addEvent(t,e,a),this.registeredEvents_.push({elem:t,type:e,fn:a})},Q.prototype.removeTrackedEvents_=function(){if(this.registeredEvents_)for(var t=0;tr.x+r.w||l.canvasyr.y+r.h)){var h=l.annotation,u=6;h.hasOwnProperty(\"tickHeight\")&&(u=h.tickHeight);var d=document.createElement(\"div\");d.style.fontSize=e.getOption(\"axisLabelFontSize\")+\"px\"\n;var c=\"dygraph-annotation\";h.hasOwnProperty(\"icon\")||(c+=\" dygraphDefaultAnnotation dygraph-default-annotation\"),h.hasOwnProperty(\"cssClass\")&&(c+=\" \"+h.cssClass),d.className=c;var p=h.hasOwnProperty(\"width\")?h.width:16,g=h.hasOwnProperty(\"height\")?h.height:16;if(h.hasOwnProperty(\"icon\")){var f=document.createElement(\"img\");f.src=h.icon,f.width=p,f.height=g,d.appendChild(f)}else l.annotation.hasOwnProperty(\"shortText\")&&d.appendChild(document.createTextNode(l.annotation.shortText));var v=l.canvasx-p/2;d.style.left=v+\"px\";var _=0;if(h.attachAtBottom){var y=r.y+r.h-g-u;o[v]?y-=o[v]:o[v]=0,o[v]+=u+g,_=y}else _=l.canvasy-g-u;d.style.top=_+\"px\",d.style.width=p+\"px\",d.style.height=g+\"px\",d.title=l.annotation.text,d.style.color=e.colorsMap_[l.name],d.style.borderColor=e.colorsMap_[l.name],h.div=d,e.addAndTrackEvent(d,\"click\",n(\"clickHandler\",\"annotationClickHandler\",l)),e.addAndTrackEvent(d,\"mouseover\",n(\"mouseOverHandler\",\"annotationMouseOverHandler\",l)),e.addAndTrackEvent(d,\"mouseout\",n(\"mouseOutHandler\",\"annotationMouseOutHandler\",l)),e.addAndTrackEvent(d,\"dblclick\",n(\"dblClickHandler\",\"annotationDblClickHandler\",l)),i.appendChild(d),this.annotations_.push(d);var x=t.drawingContext;if(x.save(),x.strokeStyle=h.hasOwnProperty(\"tickColor\")?h.tickColor:e.colorsMap_[l.name],x.lineWidth=h.hasOwnProperty(\"tickWidth\")?h.tickWidth:e.getOption(\"strokeWidth\"),x.beginPath(),h.attachAtBottom){var y=_+g;x.moveTo(l.canvasx,y),x.lineTo(l.canvasx,y+u)}else x.moveTo(l.canvasx,l.canvasy),x.lineTo(l.canvasx,l.canvasy-2-u);x.closePath(),x.stroke(),x.restore()}}},i.prototype.destroy=function(){this.detachLabels()},a.default=i,e.exports=a.default},{}],21:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=t(\"../dygraph-utils\"),n=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(i),r=function(){this.xlabels_=[],this.ylabels_=[]};r.prototype.toString=function(){return\"Axes Plugin\"},r.prototype.activate=function(t){return{layout:this.layout,clearChart:this.clearChart,willDrawChart:this.willDrawChart}},r.prototype.layout=function(t){var e=t.dygraph;if(e.getOptionForAxis(\"drawAxis\",\"y\")){var a=e.getOptionForAxis(\"axisLabelWidth\",\"y\")+2*e.getOptionForAxis(\"axisTickSize\",\"y\");t.reserveSpaceLeft(a)}if(e.getOptionForAxis(\"drawAxis\",\"x\")){var i;i=e.getOption(\"xAxisHeight\")?e.getOption(\"xAxisHeight\"):e.getOptionForAxis(\"axisLabelFontSize\",\"x\")+2*e.getOptionForAxis(\"axisTickSize\",\"x\"),t.reserveSpaceBottom(i)}if(2==e.numAxes()){if(e.getOptionForAxis(\"drawAxis\",\"y2\")){var a=e.getOptionForAxis(\"axisLabelWidth\",\"y2\")+2*e.getOptionForAxis(\"axisTickSize\",\"y2\");t.reserveSpaceRight(a)}}else e.numAxes()>2&&e.error(\"Only two y-axes are supported at this time. (Trying to use \"+e.numAxes()+\")\")},r.prototype.detachLabels=function(){function t(t){for(var e=0;e0){var x=r.numAxes(),m=[y(\"y\"),y(\"y2\")];v.yticks.forEach(function(t){if(void 0!==t.label){s=_.x;var e=\"y1\",a=m[0];1==t.axis&&(s=_.x+_.w,-1,e=\"y2\",a=m[1]);var n=a(\"axisLabelFontSize\");l=_.y+t.pos*_.h,o=f(t.label,\"y\",2==x?e:null);var r=l-n/2;r<0&&(r=0),r+n+3>c?o.style.bottom=\"0\":o.style.top=r+\"px\",0===t.axis?(o.style.left=_.x-a(\"axisLabelWidth\")-a(\"axisTickSize\")+\"px\",o.style.textAlign=\"right\"):1==t.axis&&(o.style.left=_.x+_.w+a(\"axisTickSize\")+\"px\",o.style.textAlign=\"left\"),o.style.width=a(\"axisLabelWidth\")+\"px\",u.appendChild(o),i.ylabels_.push(o)}});var b=this.ylabels_[0],w=r.getOptionForAxis(\"axisLabelFontSize\",\"y\");parseInt(b.style.top,10)+w>c-w&&(b.style.top=parseInt(b.style.top,10)-w/2+\"px\")}var A;if(r.getOption(\"drawAxesAtZero\")){var O=r.toPercentXCoord(0);(O>1||O<0||isNaN(O))&&(O=0),A=e(_.x+O*_.w)}else A=e(_.x);h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"y\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"y\"),h.beginPath(),h.moveTo(A,a(_.y)),h.lineTo(A,a(_.y+_.h)),h.closePath(),h.stroke(),2==r.numAxes()&&(h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"y2\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"y2\"),h.beginPath(),h.moveTo(a(_.x+_.w),a(_.y)),h.lineTo(a(_.x+_.w),a(_.y+_.h)),h.closePath(),h.stroke())}if(r.getOptionForAxis(\"drawAxis\",\"x\")){if(v.xticks){var D=y(\"x\");v.xticks.forEach(function(t){if(void 0!==t.label){s=_.x+t.pos*_.w,l=_.y+_.h,o=f(t.label,\"x\"),o.style.textAlign=\"center\",o.style.top=l+D(\"axisTickSize\")+\"px\";var e=s-D(\"axisLabelWidth\")/2;e+D(\"axisLabelWidth\")>d&&(e=d-D(\"axisLabelWidth\"),o.style.textAlign=\"right\"),e<0&&(e=0,o.style.textAlign=\"left\"),o.style.left=e+\"px\",o.style.width=D(\"axisLabelWidth\")+\"px\",u.appendChild(o),i.xlabels_.push(o)}})}h.strokeStyle=r.getOptionForAxis(\"axisLineColor\",\"x\"),h.lineWidth=r.getOptionForAxis(\"axisLineWidth\",\"x\"),h.beginPath();var E;if(r.getOption(\"drawAxesAtZero\")){var O=r.toPercentYCoord(0,0);(O>1||O<0)&&(O=1),E=a(_.y+O*_.h)}else E=a(_.y+_.h);h.moveTo(e(_.x),E),h.lineTo(e(_.x+_.w),E),h.closePath(),h.stroke()}h.restore()}},a.default=r,e.exports=a.default},{\"../dygraph-utils\":17}],22:[function(t,e,a){\"use strict\";Object.defineProperty(a,\"__esModule\",{value:!0});var i=function(){this.title_div_=null,this.xlabel_div_=null,this.ylabel_div_=null,this.y2label_div_=null};i.prototype.toString=function(){return\"ChartLabels Plugin\"},i.prototype.activate=function(t){return{layout:this.layout,didDrawChart:this.didDrawChart}};var n=function(t){var e=document.createElement(\"div\");return e.style.position=\"absolute\",e.style.left=t.x+\"px\",e.style.top=t.y+\"px\",e.style.width=t.w+\"px\",e.style.height=t.h+\"px\",e};i.prototype.detachLabels_=function(){for(var t=[this.title_div_,this.xlabel_div_,this.ylabel_div_,this.y2label_div_],e=0;e=2);o=h.yticks,l.save(),o.forEach(function(t){if(t.has_tick){var r=t.axis;g[r]&&(l.save(),f[r]&&l.setLineDash&&l.setLineDash(v[r]),l.strokeStyle=c[r],l.lineWidth=p[r],i=e(u.x),n=a(u.y+t.pos*u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i+u.w,n),l.stroke(),l.restore())}}),l.restore()}if(s.getOptionForAxis(\"drawGrid\",\"x\")){o=h.xticks,l.save();var v=s.getOptionForAxis(\"gridLinePattern\",\"x\"),f=v&&v.length>=2;f&&l.setLineDash&&l.setLineDash(v),l.strokeStyle=s.getOptionForAxis(\"gridLineColor\",\"x\"),l.lineWidth=s.getOptionForAxis(\"gridLineWidth\",\"x\"),o.forEach(function(t){t.has_tick&&(i=e(u.x+t.pos*u.w),n=a(u.y+u.h),l.beginPath(),l.moveTo(i,n),l.lineTo(i,u.y),l.closePath(),l.stroke())}),f&&l.setLineDash&&l.setLineDash([]),l.restore()}},i.prototype.destroy=function(){},a.default=i,e.exports=a.default},{}],24:[function(t,e,a){\"use strict\";function i(t,e,a){if(!t||t.length<=1)return'
    ';var i,n,r,o,s,l=0,h=0,u=[];for(i=0;i<=t.length;i++)l+=t[i%t.length];if((s=Math.floor(a/(l-t[0])))>1){for(i=0;i
    ';return d}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"../dygraph-utils\"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=function(){this.legend_div_=null,this.is_generated_div_=!1};o.prototype.toString=function(){return\"Legend Plugin\"},o.prototype.activate=function(t){var e,a=t.getOption(\"labelsDiv\");return a&&null!==a?e=\"string\"==typeof a||a instanceof String?document.getElementById(a):a:(e=document.createElement(\"div\"),e.className=\"dygraph-legend\",t.graphDiv.appendChild(e),this.is_generated_div_=!0),this.legend_div_=e,this.one_em_width_=10,{select:this.select,deselect:this.deselect,predraw:this.predraw,didDrawChart:this.didDrawChart}};var s=function(t){var e=document.createElement(\"span\");e.setAttribute(\"style\",\"margin: 0; padding: 0 0 0 1em; border: 0;\"),t.appendChild(e);var a=e.offsetWidth;return t.removeChild(e),a},l=function(t){return t.replace(/&/g,\"&\").replace(/\"/g,\""\").replace(//g,\">\")};o.prototype.select=function(t){var e=t.selectedX,a=t.selectedPoints,i=t.selectedRow,n=t.dygraph.getOption(\"legend\");if(\"never\"===n)return void(this.legend_div_.style.display=\"none\");if(\"follow\"===n){var r=t.dygraph.plotter_.area,s=this.legend_div_.offsetWidth,l=t.dygraph.getOptionForAxis(\"axisLabelWidth\",\"y\"),h=a[0].x*r.w+50,u=a[0].y*r.h-50;h+s+1>r.w&&(h=h-100-s-(l-r.x)),t.dygraph.graphDiv.appendChild(this.legend_div_),this.legend_div_.style.left=l+h+\"px\",this.legend_div_.style.top=u+\"px\"}var d=o.generateLegendHTML(t.dygraph,e,a,this.one_em_width_,i);this.legend_div_.innerHTML=d,this.legend_div_.style.display=\"\"},o.prototype.deselect=function(t){\"always\"!==t.dygraph.getOption(\"legend\")&&(this.legend_div_.style.display=\"none\");var e=s(this.legend_div_);this.one_em_width_=e;var a=o.generateLegendHTML(t.dygraph,void 0,void 0,e,null);this.legend_div_.innerHTML=a},o.prototype.didDrawChart=function(t){this.deselect(t)},o.prototype.predraw=function(t){if(this.is_generated_div_){t.dygraph.graphDiv.appendChild(this.legend_div_);var e=t.dygraph.getArea(),a=this.legend_div_.offsetWidth;this.legend_div_.style.left=e.x+e.w-a-1+\"px\",this.legend_div_.style.top=e.y+\"px\"}},o.prototype.destroy=function(){this.legend_div_=null},o.generateLegendHTML=function(t,e,a,n,s){var h={dygraph:t,x:e,series:[]},u={},d=t.getLabels();if(d)for(var c=1;c\":\" \"),a+=\"\"+r.dashHTML+\" \"+r.labelHTML+\"\")}return a}a=t.xHTML+\":\";for(var n=0;n\");a+=\" \"+r.labelHTML+\": \"+r.yHTML+\"\"}}return a},a.default=o,e.exports=a.default},{\"../dygraph-utils\":17}],25:[function(t,e,a){\"use strict\";function i(t){return t&&t.__esModule?t:{default:t}}Object.defineProperty(a,\"__esModule\",{value:!0});var n=t(\"../dygraph-utils\"),r=function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var a in t)Object.prototype.hasOwnProperty.call(t,a)&&(e[a]=t[a]);return e.default=t,e}(n),o=t(\"../dygraph-interaction-model\"),s=i(o),l=t(\"../iframe-tarp\"),h=i(l),u=function(){this.hasTouchInterface_=\"undefined\"!=typeof TouchEvent,this.isMobileDevice_=/mobile|android/gi.test(navigator.appVersion),this.interfaceCreated_=!1};u.prototype.toString=function(){return\"RangeSelector Plugin\"},u.prototype.activate=function(t){return this.dygraph_=t,this.getOption_(\"showRangeSelector\")&&this.createInterface_(),{layout:this.reserveSpace_,predraw:this.renderStaticLayer_,didDrawChart:this.renderInteractiveLayer_}},u.prototype.destroy=function(){this.bgcanvas_=null,this.fgcanvas_=null,this.leftZoomHandle_=null,this.rightZoomHandle_=null},u.prototype.getOption_=function(t,e){return this.dygraph_.getOption(t,e)},u.prototype.setDefaultOption_=function(t,e){this.dygraph_.attrs_[t]=e},u.prototype.createInterface_=function(){this.createCanvases_(),this.createZoomHandles_(),this.initInteraction_(),this.getOption_(\"animatedZooms\")&&(console.warn(\"Animated zooms and range selector are not compatible; disabling animatedZooms.\"),this.dygraph_.updateOptions({animatedZooms:!1},!0)),this.interfaceCreated_=!0,this.addToGraph_()},u.prototype.addToGraph_=function(){var t=this.graphDiv_=this.dygraph_.graphDiv;t.appendChild(this.bgcanvas_),t.appendChild(this.fgcanvas_),t.appendChild(this.leftZoomHandle_),t.appendChild(this.rightZoomHandle_)},u.prototype.removeFromGraph_=function(){var t=this.graphDiv_;t.removeChild(this.bgcanvas_),t.removeChild(this.fgcanvas_),t.removeChild(this.leftZoomHandle_),t.removeChild(this.rightZoomHandle_),this.graphDiv_=null},u.prototype.reserveSpace_=function(t){this.getOption_(\"showRangeSelector\")&&t.reserveSpaceBottom(this.getOption_(\"rangeSelectorHeight\")+4)},u.prototype.renderStaticLayer_=function(){this.updateVisibility_()&&(this.resize_(),this.drawStaticLayer_())},u.prototype.renderInteractiveLayer_=function(){this.updateVisibility_()&&!this.isChangingRange_&&(this.placeZoomHandles_(),this.drawInteractiveLayer_())},u.prototype.updateVisibility_=function(){var t=this.getOption_(\"showRangeSelector\");if(t)this.interfaceCreated_?this.graphDiv_&&this.graphDiv_.parentNode||this.addToGraph_():this.createInterface_();else if(this.graphDiv_){this.removeFromGraph_();var e=this.dygraph_;setTimeout(function(){e.width_=0,e.resize()},1)}return t},u.prototype.resize_=function(){function t(t,e,a,i){var n=i||r.getContextPixelRatio(e);t.style.top=a.y+\"px\",t.style.left=a.x+\"px\",t.width=a.w*n,t.height=a.h*n,t.style.width=a.w+\"px\",t.style.height=a.h+\"px\",1!=n&&e.scale(n,n)}var e=this.dygraph_.layout_.getPlotArea(),a=0;this.dygraph_.getOptionForAxis(\"drawAxis\",\"x\")&&(a=this.getOption_(\"xAxisHeight\")||this.getOption_(\"axisLabelFontSize\")+2*this.getOption_(\"axisTickSize\")),this.canvasRect_={x:e.x,y:e.y+e.h+a+4,w:e.w,h:this.getOption_(\"rangeSelectorHeight\")};var i=this.dygraph_.getNumericOption(\"pixelRatio\");t(this.bgcanvas_,this.bgcanvas_ctx_,this.canvasRect_,i),t(this.fgcanvas_,this.fgcanvas_ctx_,this.canvasRect_,i)},u.prototype.createCanvases_=function(){this.bgcanvas_=r.createCanvas(),this.bgcanvas_.className=\"dygraph-rangesel-bgcanvas\",this.bgcanvas_.style.position=\"absolute\",this.bgcanvas_.style.zIndex=9,this.bgcanvas_ctx_=r.getContext(this.bgcanvas_),this.fgcanvas_=r.createCanvas(),this.fgcanvas_.className=\"dygraph-rangesel-fgcanvas\",this.fgcanvas_.style.position=\"absolute\",this.fgcanvas_.style.zIndex=9,this.fgcanvas_.style.cursor=\"default\",this.fgcanvas_ctx_=r.getContext(this.fgcanvas_)},u.prototype.createZoomHandles_=function(){var t=new Image;t.className=\"dygraph-rangesel-zoomhandle\",t.style.position=\"absolute\",t.style.zIndex=10,t.style.visibility=\"hidden\",t.style.cursor=\"col-resize\",t.width=9,t.height=16,t.src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAQCAYAAADESFVDAAAAAXNSR0IArs4c6QAAAAZiS0dEANAAzwDP4Z7KegAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB9sHGw0cMqdt1UwAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAAaElEQVQoz+3SsRFAQBCF4Z9WJM8KCDVwownl6YXsTmCUsyKGkZzcl7zkz3YLkypgAnreFmDEpHkIwVOMfpdi9CEEN2nGpFdwD03yEqDtOgCaun7sqSTDH32I1pQA2Pb9sZecAxc5r3IAb21d6878xsAAAAAASUVORK5CYII=\",this.isMobileDevice_&&(t.width*=2,t.height*=2),this.leftZoomHandle_=t,this.rightZoomHandle_=t.cloneNode(!1)},u.prototype.initInteraction_=function(){var t,e,a,i,n,o,l,u,d,c,p,g,f,v,_=this,y=document,x=0,m=null,b=!1,w=!1,A=!this.isMobileDevice_,O=new h.default;t=function(t){var e=_.dygraph_.xAxisExtremes(),a=(e[1]-e[0])/_.canvasRect_.w;return[e[0]+(t.leftHandlePos-_.canvasRect_.x)*a,e[0]+(t.rightHandlePos-_.canvasRect_.x)*a]},e=function(t){return r.cancelEvent(t),b=!0,x=t.clientX,m=t.target?t.target:t.srcElement,\"mousedown\"!==t.type&&\"dragstart\"!==t.type||(r.addEvent(y,\"mousemove\",a),r.addEvent(y,\"mouseup\",i)),_.fgcanvas_.style.cursor=\"col-resize\",O.cover(),!0},a=function(t){if(!b)return!1;r.cancelEvent(t);var e=t.clientX-x;if(Math.abs(e)<4)return!0;x=t.clientX;var a,i=_.getZoomHandleStatus_();m==_.leftZoomHandle_?(a=i.leftHandlePos+e,a=Math.min(a,i.rightHandlePos-m.width-3),a=Math.max(a,_.canvasRect_.x)):(a=i.rightHandlePos+e,a=Math.min(a,_.canvasRect_.x+_.canvasRect_.w),a=Math.max(a,i.leftHandlePos+m.width+3));var o=m.width/2;return m.style.left=a-o+\"px\",_.drawInteractiveLayer_(),A&&n(),!0},i=function(t){return!!b&&(b=!1,O.uncover(),r.removeEvent(y,\"mousemove\",a),r.removeEvent(y,\"mouseup\",i),_.fgcanvas_.style.cursor=\"default\",A||n(),!0)},n=function(){try{var e=_.getZoomHandleStatus_();if(_.isChangingRange_=!0,e.isZoomed){var a=t(e);_.dygraph_.doZoomXDates_(a[0],a[1])}else _.dygraph_.resetZoom()}finally{_.isChangingRange_=!1}},o=function(t){var e=_.leftZoomHandle_.getBoundingClientRect(),a=e.left+e.width/2;e=_.rightZoomHandle_.getBoundingClientRect();var i=e.left+e.width/2;return t.clientX>a&&t.clientX=_.canvasRect_.x+_.canvasRect_.w?(n=_.canvasRect_.x+_.canvasRect_.w,i=n-o):(i+=e,n+=e);var s=_.leftZoomHandle_.width/2;return _.leftZoomHandle_.style.left=i-s+\"px\",_.rightZoomHandle_.style.left=n-s+\"px\",_.drawInteractiveLayer_(),A&&c(),!0},d=function(t){return!!w&&(w=!1,r.removeEvent(y,\"mousemove\",u),r.removeEvent(y,\"mouseup\",d),A||c(),!0)},c=function(){try{_.isChangingRange_=!0,_.dygraph_.dateWindow_=t(_.getZoomHandleStatus_()),_.dygraph_.drawGraph_(!1)}finally{_.isChangingRange_=!1}},p=function(t){if(!b&&!w){var e=o(t)?\"move\":\"default\";e!=_.fgcanvas_.style.cursor&&(_.fgcanvas_.style.cursor=e)}},g=function(t){\"touchstart\"==t.type&&1==t.targetTouches.length?e(t.targetTouches[0])&&r.cancelEvent(t):\"touchmove\"==t.type&&1==t.targetTouches.length?a(t.targetTouches[0])&&r.cancelEvent(t):i(t)},f=function(t){\"touchstart\"==t.type&&1==t.targetTouches.length?l(t.targetTouches[0])&&r.cancelEvent(t):\"touchmove\"==t.type&&1==t.targetTouches.length?u(t.targetTouches[0])&&r.cancelEvent(t):d(t)},v=function(t,e){for(var a=[\"touchstart\",\"touchend\",\"touchmove\",\"touchcancel\"],i=0;i1&&(g=c.rollingAverage(g,e.rollPeriod(),p)),d.push(g)}var f=[];for(t=0;t0)&&(m=Math.min(m,w),b=Math.max(b,w))}if(a)for(b=r.log10(b),b+=.25*b,m=r.log10(m),t=0;tthis.canvasRect_.x||a+1 {\n let str = \" \"\n let context = \"\"\n\n if (withContext && typeof chartMetadata.context === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n context = chartMetadata.context\n }\n\n if (typeof chartMetadata.plugin === \"string\" && chartMetadata.plugin !== \"\") {\n str = chartMetadata.plugin\n\n if (str.endsWith(\".plugin\")) {\n str = str.substring(0, str.length - 7)\n }\n\n if (typeof chartMetadata.module === \"string\" && chartMetadata.module !== \"\") {\n str += `:${chartMetadata.module}`\n }\n\n if (withContext && context !== \"\") {\n str += `, ${context}`\n }\n } else if (withContext && context !== \"\") {\n str = context\n }\n return str\n}\n\nexport const legendResolutionTooltip = (chartData: ChartData, chartMetadata: ChartMetadata) => {\n const collected = chartMetadata.update_every\n // todo if there's no data (but maybe there won't be situation like this), then use \"collected\"\n const viewed = chartData.view_update_every\n if (collected === viewed) {\n return `resolution ${seconds4human(collected)}`\n }\n\n return `resolution ${seconds4human(viewed)}, collected every ${seconds4human(collected)}`\n}\n\ntype GetNewSelectedDimensions = (arg: {\n allDimensions: string[],\n selectedDimensions: string[],\n clickedDimensionName: string,\n isModifierKeyPressed: boolean,\n}) => string[]\n\nexport const getNewSelectedDimensions: GetNewSelectedDimensions = ({\n allDimensions,\n selectedDimensions,\n clickedDimensionName,\n isModifierKeyPressed,\n}) => {\n // when selectedDimensions is empty, then all dimensions should be enabled\n // let's narrow this case now\n const enabledDimensions = selectedDimensions.length === 0 ? allDimensions : selectedDimensions\n const isCurrentlySelected = enabledDimensions.includes(clickedDimensionName)\n\n let newSelectedDimensions: string[]\n if (!isModifierKeyPressed\n && ((isCurrentlySelected && enabledDimensions.length > 1) || !isCurrentlySelected)\n ) {\n newSelectedDimensions = [clickedDimensionName]\n } else if (isCurrentlySelected) { // modifier key pressed\n newSelectedDimensions = enabledDimensions.filter(\n (dimension) => dimension !== clickedDimensionName,\n )\n } else { // modifier key pressed\n newSelectedDimensions = enabledDimensions.concat(clickedDimensionName)\n }\n\n if (newSelectedDimensions.length === allDimensions.length) {\n return []\n }\n return newSelectedDimensions\n}\n","import { createReducer } from \"redux-act\"\n\nimport { ChartsMetadata } from \"domains/global/types\"\n\nimport { startSnapshotModeAction, stopSnapshotModeAction, isSignedInAction, setOfflineAction } from \"./actions\"\n\nexport type StateT = {\n isSnapshotMode: boolean\n snapshotCharts: ChartsMetadata | null\n snapshotDataPoints: number | null\n isSignedIn: boolean\n offline: boolean\n}\n\nexport const initialState: StateT = {\n isSnapshotMode: false,\n snapshotCharts: null,\n snapshotDataPoints: null,\n isSignedIn: false,\n offline: false\n}\n\nexport const dashboardReducer = createReducer({}, initialState)\n\ndashboardReducer.on(startSnapshotModeAction, (state, { charts, dataPoints }) => ({\n ...state,\n snapshotCharts: charts, // todo integrate with /charts result\n snapshotDataPoints: dataPoints,\n isSnapshotMode: true,\n}))\n\ndashboardReducer.on(stopSnapshotModeAction, (state) => ({\n ...state,\n isSnapshotMode: initialState.isSnapshotMode,\n snapshotCharts: initialState.snapshotCharts,\n snapshotDataPoints: initialState.snapshotDataPoints,\n}))\n\ndashboardReducer.on(isSignedInAction, (state, { isSignedIn }) => ({\n ...state,\n isSignedIn\n}))\n\ndashboardReducer.on(setOfflineAction, (state, { offline }) => ({\n ...state,\n offline\n}))\n","import { combineReducers } from \"redux\"\n\nimport { globalReducer } from \"domains/global/reducer\"\nimport { storeKey as globalKey } from \"domains/global/constants\"\n\nimport { chartReducer } from \"domains/chart/reducer\"\nimport { storeKey as chartKey } from \"domains/chart/constants\"\n\nimport { dashboardReducer } from \"domains/dashboard/reducer\"\nimport { storeKey as dashboardKey } from \"domains/dashboard/constants\"\n\nexport default combineReducers({\n [globalKey]: globalReducer,\n [chartKey]: chartReducer,\n\n // todo lazy-load and inject those reducers, when they are not needed (dashboard.js, cloud)\n [dashboardKey]: dashboardReducer,\n})\n","import axios from \"axios\"\n\nexport const axiosInstance = axios.create({\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n})\n","import axios from \"axios\"\n\nexport const axiosInstance = axios.create({\n // timeout: 30 * 1000, // todo\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n})\n","import {\n Subject, from, empty,\n} from \"rxjs\"\nimport {\n mergeMap, tap, catchError, startWith, switchMap,\n} from \"rxjs/operators\"\nimport { CancelTokenSource, Method } from \"axios\"\n\nimport { UnknownStringKeyT } from \"types/common\"\n\nimport { axiosInstance } from \"./axios-instance\"\n\nexport const CHART_UNMOUNTED = \"Chart scrolled out of view\"\n\ninterface FetchInputEvent {\n url: string\n method?: Method\n params?: UnknownStringKeyT\n data?: UnknownStringKeyT\n onErrorCallback: (error: Error) => void\n onSuccessCallback: (data: { [key: string]: unknown }) => void\n cancelTokenSource?: CancelTokenSource\n}\n\nconst METRICS_TIMEOUT = 15_000\n\nexport const getFetchStream = (concurrentCallsLimit: number) => {\n const fetch$ = new Subject()\n const resetFetch$ = new Subject()\n\n const handler = mergeMap(({\n url, method = \"GET\", params = {}, data, onErrorCallback, onSuccessCallback, cancelTokenSource,\n }: FetchInputEvent) => (\n from(axiosInstance.request({\n url,\n method,\n params,\n data,\n timeout: METRICS_TIMEOUT,\n cancelToken: cancelTokenSource?.token,\n })).pipe(\n tap(({ data: responseData }) => { onSuccessCallback(responseData) }),\n catchError((error: Error) => {\n // todo implement error handling to support NETDATA.options.current.retries_on_data_failures\n if (error?.message !== CHART_UNMOUNTED) {\n console.warn(\"fetch error\", url) // eslint-disable-line no-console\n }\n onErrorCallback(error)\n return empty()\n }),\n )\n ), concurrentCallsLimit)\n\n const output = resetFetch$.pipe(\n startWith(null),\n switchMap(() => fetch$.pipe(handler)),\n )\n\n output.subscribe()\n return [fetch$, resetFetch$]\n}\n","import { tail, sum, reverse } from \"ramda\"\nimport { ChartData, DygraphData } from \"domains/chart/chart-types\"\n\n/*\nwhen requesting for bigger time interval than available history in the agent,\nwe get only the available range. Dashboard was first designed to not allow zooming-out too much.\nBut we want to show the requested time-range, so to do it consistently, we return nr of points\nwhen making the request, and after getting result, we add `null`s at the beginning\n */\n\ninterface GetCorrectedPointsArg {\n after: number\n before: number\n firstEntry: number\n points: number\n}\nexport const getCorrectedPoints = ({\n after,\n before,\n firstEntry,\n points,\n}: GetCorrectedPointsArg) => {\n const nowInSeconds = Math.round(new Date().valueOf() / 1000)\n const afterAbsolute = after > 0 ? after : nowInSeconds + after\n const beforeAbsolute = before > 0 ? before : nowInSeconds + before\n\n if (afterAbsolute < firstEntry) {\n // take into account first_entry\n const realAfter = Math.max(afterAbsolute, firstEntry)\n const requestedRange = beforeAbsolute - afterAbsolute\n const availableRange = beforeAbsolute - realAfter\n\n return Math.round((points * availableRange) / requestedRange)\n }\n return null\n}\n\nexport const addPointsDygraph = (data: DygraphData, nrOfPointsToFill: number) => {\n const viewUpdateEvery = data.view_update_every\n if (!data.result.data.length) {\n return data\n }\n const firstAddedTimestamp = data.result.data[0][0] - nrOfPointsToFill * viewUpdateEvery\n const emptyPoint = tail(data.result.labels).map(() => null)\n const nulls = new Array(nrOfPointsToFill)\n .fill(null)\n .map((_, i) => [firstAddedTimestamp + i * viewUpdateEvery, ...emptyPoint])\n return {\n ...data,\n after: data.after - viewUpdateEvery * nrOfPointsToFill,\n result: {\n ...data.result,\n data: nulls.concat(data.result.data),\n },\n }\n}\n\nexport const fillMissingData = (data: ChartData, nrOfPointsToFill: number) => {\n if (data.format === \"json\") {\n return addPointsDygraph(data as DygraphData, nrOfPointsToFill)\n }\n return data\n}\n\nconst emptyArray: number[] = []\nexport const transformResults = (data: ChartData, format: string, shouldRevertFlip: boolean) => {\n if (format === \"array\" && data.format === \"json\") {\n if (Array.isArray(data.result)) return data\n\n const dataResult = shouldRevertFlip\n ? reverse((data as DygraphData).result.data)\n : (data as DygraphData).result.data\n return {\n ...data,\n // set proper output type so other functions like fillMissingData work properly\n format: \"array\",\n result: dataResult.reduce((acc: number[], pointData: number[]) => {\n pointData.shift()\n return [...acc, sum(pointData)]\n }, emptyArray),\n }\n }\n return data\n}\n\nexport const mapDefaultAggrMethod = (unit: string): string => {\n if (unit.length === 0) {\n return \"sum\"\n }\n const avgUnits: any = {\n percentage: true,\n percent: true,\n \"rotations/min\": true,\n ratio: true,\n seconds: true,\n \"seconds ago\": true,\n milliseconds: true,\n millisec: true,\n ms: true,\n \"log2 s\": true,\n minutes: true,\n hours: true,\n interval: true,\n ticks: true,\n celsius: true,\n c: true,\n mhz: true,\n hz: true,\n volts: true,\n kwh: true,\n ampere: true,\n amps: true,\n dbm: true,\n value: true,\n stratum: true,\n units: true,\n watt: true,\n temperature: true,\n \"random number\": true,\n rpm: true,\n quadro: true,\n \"adv/item\": true,\n multiplier: true,\n geforce: true,\n }\n if (avgUnits[unit.toLowerCase()]) {\n return \"avg\"\n }\n const avgUnitsRegExes: any = [\".*%.*\", \".*/operation\", \".*/run\", \".*/ run\", \".*/request\"]\n if (\n avgUnitsRegExes.some((regEx: string) => {\n const regExpression = RegExp(regEx, \"i\")\n return regExpression.test(unit.toLowerCase())\n })\n ) {\n return \"avg\"\n }\n return \"sum\"\n}\n","import styled from \"styled-components\"\nimport {\n getSizeBy, Text, TextSmall, getColor,\n} from \"@netdata/netdata-ui\"\n\nexport const Container = styled.div`\n width: 100%;\n height: 100%;\n min-height: ${getSizeBy(10)};\n display: flex;\n flex-flow: row nowrap;\n padding: ${getSizeBy(2)} ${getSizeBy(2)} ${getSizeBy(2)} ${getSizeBy(2)};\n`\n\nexport const SideContent = styled.div<{ right?: boolean }>`\n flex-grow: 0;\n flex-shrink: 0;\n height: 100%;\n align-self: stretch;\n`\n\nexport const ContentContainer = styled.div``\n\nexport const HeaderText = styled(Text)<{ error?: boolean; success?: boolean }>`\n color: ${({ error, success }) => (success && getColor(\"success\"))\n || (error && getColor(\"error\"))};\n font-weight: bold;\n display: block;\n margin-bottom: ${getSizeBy()};\n`\n\nexport const ContentText = styled(TextSmall)<{ error?: boolean; success?: boolean }>`\n display: block;\n color: ${({ error }) => (error && getColor(\"error\")) || getColor(\"border\")};\n`\n","import React from \"react\"\nimport {\n Container, SideContent, ContentContainer, HeaderText, ContentText,\n} from \"./styled\"\n\ninterface NotificationProps {\n header?: string\n leftContent?: React.ReactNode\n rightContent?: React.ReactNode\n text?: React.ReactNode\n className?: string\n renderContent?: (props: NotificationProps) => React.ReactNode | React.ReactNodeArray | null\n success?: boolean\n error?: boolean\n}\n\nexport const UINotification = (props: NotificationProps) => {\n const {\n header, text, leftContent, rightContent, renderContent, success, error,\n } = props\n return (\n \n {leftContent && {leftContent}}\n \n {header && (\n \n {header}\n \n )}\n {text && (\n \n {text}\n \n )}\n {renderContent && renderContent(props)}\n \n {rightContent && {rightContent}}\n \n )\n}\n\n// for usage in non-jsx contexts\n// eslint-disable-next-line react/jsx-props-no-spreading\nexport const createUINotification = (props: NotificationProps) => \n","import styled from \"styled-components\"\nimport { getSizeBy } from \"@netdata/netdata-ui\"\n\nexport const NodeIconContainer = styled.div`\n width: ${getSizeBy(5)};\n height: ${getSizeBy(5)};\n margin-right: ${getSizeBy(2)};\n display: flex;\n justify-content: center;\n align-items: center;\n`\n\nexport const NotificationLink = styled.a`\n &,\n &:hover {\n text-decoration: underline;\n color: inherit;\n }\n`\n","import React from \"react\"\nimport { Icon } from \"@netdata/netdata-ui\"\nimport { toast } from \"react-toastify\"\n\nimport { createUINotification } from \"components/ui-notification\"\n\nimport * as S from \"./styled\"\n\nexport const toastOptions = {\n position: toast.POSITION.BOTTOM_RIGHT,\n autoClose: 10000,\n pauseOnFocusLoss: false,\n}\n\nexport const showCloudInstallationProblemNotification = () => {\n const uiNotification = {\n header: \"Installation error\",\n text: \"The installer could not prepare the required dependencies to enable Netdata Cloud\"\n + \" functionality\",\n }\n const notificationComponent = createUINotification({\n ...uiNotification,\n error: true,\n leftContent: (\n \n \n \n ),\n })\n toast.error(notificationComponent, toastOptions)\n}\n\nexport const showCloudConnectionProblemNotification = () => {\n const uiNotification = {\n header: \"Connection Problem\",\n text: (\n \n To access Cloud install again your agent via the kickstart script\n \n ),\n }\n const notificationComponent = createUINotification({\n ...uiNotification,\n error: true,\n leftContent: (\n \n \n \n ),\n })\n toast.error(notificationComponent, toastOptions)\n}\n","import {\n mergeAll, pipe, split, mergeRight,\n} from \"ramda\"\nimport { mapIndexed } from \"ramda-adjunct\"\n\nconst defaultUrlOptions = {\n hash: \"#\",\n theme: null,\n help: null,\n mode: \"live\", // 'live', 'print'\n update_always: false,\n pan_and_zoom: false,\n server: null,\n after: 0,\n before: 0,\n highlight: false,\n highlight_after: 0,\n highlight_before: 0,\n nowelcome: false,\n show_alarms: false,\n chart: null,\n family: null,\n alarm: null,\n alarm_unique_id: 0,\n alarm_id: 0,\n alarm_event_id: 0,\n alarm_when: 0,\n} as {[key: string]: unknown}\n\nconst isInvalidPair = ([key, value]: [string, string]) => (\n defaultUrlOptions[key] === undefined || value === undefined\n)\n\nconst parseQueryPair = ([key, value]: [string, string]): {[key: string] : unknown} => {\n if (isInvalidPair([key, value])) {\n return {}\n }\n return {\n [key]: decodeURIComponent(value),\n }\n}\n\nexport const parseUrl = pipe(\n split(\";\"),\n mapIndexed((value, index) => (\n (index === 0) ? { hash: value } : parseQueryPair((value.split(\"=\") as [string, string]))\n )),\n mergeAll,\n mergeRight(defaultUrlOptions),\n)\n\nconst urlParsed = parseUrl(document.location.hash)\n\nexport const isPrintMode = urlParsed.mode === \"print\"\n","import {\n call,\n put,\n takeEvery,\n select,\n spawn,\n take,\n delay,\n} from \"redux-saga/effects\"\nimport { channel } from \"redux-saga\"\nimport { Action } from \"redux-act\"\n\nimport { axiosInstance } from \"utils/api\"\nimport { alwaysEndWithSlash, serverDefault } from \"utils/server-detection\"\nimport { getFetchStream } from \"utils/netdata-sdk\"\nimport { isMainJs } from \"utils/env\"\nimport { fillMissingData, transformResults } from \"utils/fill-missing-data\"\nimport {\n showCloudInstallationProblemNotification, showCloudConnectionProblemNotification,\n} from \"components/notifications\"\nimport { selectGlobalPanAndZoom, selectSnapshot, selectRegistry } from \"domains/global/selectors\"\nimport { StateT as GlobalStateT } from \"domains/global/reducer\"\nimport { stopSnapshotModeAction } from \"domains/dashboard/actions\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\nimport { INFO_POLLING_FREQUENCY } from \"domains/global/constants\"\n\nimport {\n fetchDataAction,\n FetchDataPayload,\n fetchChartAction,\n FetchChartPayload,\n fetchDataForSnapshotAction,\n FetchDataForSnapshotPayload,\n fetchInfoAction,\n FetchInfoPayload,\n fetchDataCancelAction,\n} from \"./actions\"\nimport { ChartData } from \"./chart-types\"\n\nconst CONCURRENT_CALLS_LIMIT_METRICS = isMainJs ? 30 : 60\nconst CONCURRENT_CALLS_LIMIT_PRINT = 2\nconst CONCURRENT_CALLS_LIMIT_SNAPSHOTS = 1\n\nconst fetchDataResponseChannel = channel()\n\nexport function* watchFetchDataResponseChannel() {\n while (true) {\n const action = (yield take(fetchDataResponseChannel))\n\n // special case - if requested relative timeRange, and during request the mode has been changed\n // to absolute global-pan-and-zoom, cancel the store update\n // todo do xss check of data\n if (action.type === fetchDataAction.success.toString()) {\n const payload = (action.payload as FetchDataPayload)\n const { viewRange } = payload.fetchDataParams\n const [start, end] = viewRange\n const globalPanAndZoom = (yield select(\n selectGlobalPanAndZoom,\n )) as GlobalStateT[\"globalPanAndZoom\"]\n\n if (globalPanAndZoom\n && (start <= 0 || end <= 0) // check if they are not timestamps\n ) {\n yield put(fetchDataCancelAction({\n id: payload.id,\n }))\n // eslint-disable-next-line no-continue\n continue\n }\n }\n\n yield put(action)\n }\n}\n\n// todo construct a new version of key that will be safer to be used in future\n// (while keeping old key supported for some time)\n// perhaps the key could be passed as attribute to the chart, to avoid matching\nconst constructCompatibleKey = (dimensions: undefined | string, options: string) => (\n // strange transformations for backwards compatibility. old snapshot keys were encoded this way\n // that empty dimensions were actually \"null\" string\n `${dimensions === undefined\n ? \"null\"\n : encodeURIComponent(dimensions)\n },${encodeURIComponent(options)}`\n)\n\n// currently BE always transforms data as if `flip` was there\nconst IS_FLIP_RESPECTED_IN_COMPOSITE_CHARTS = false\n\nconst getGroupByValues = (groupBy) => {\n if (groupBy === \"chart\") return \"node\"\n if (groupBy === \"node\" || groupBy === \"dimension\") return groupBy\n return `label=${groupBy}`\n}\n\nconst [fetchMetrics$] = getFetchStream(\n isPrintMode ? CONCURRENT_CALLS_LIMIT_PRINT : CONCURRENT_CALLS_LIMIT_METRICS,\n)\nfunction* fetchDataSaga({ payload }: Action) {\n const {\n // props for api\n host, context, chart, format, points, group, gtime, options,\n after, before, dimensions, labels, postGroupBy, postAggregationMethod,\n aggrMethod, dimensionsAggrMethod, nodeIDs, httpMethod,\n groupBy = \"dimension\", // group by node, dimension, or label keys\n aggrGroups = [],\n // props for the store\n fetchDataParams, id, cancelTokenSource,\n } = payload\n\n const snapshot = yield select(selectSnapshot)\n if (snapshot) {\n // if reading snapshot\n const dimensionsWithUrlOptions = constructCompatibleKey(dimensions, options)\n const matchingKey = Object.keys(snapshot.data).find((snapshotKey) => (\n snapshotKey.startsWith(chart) && snapshotKey.includes(dimensionsWithUrlOptions)\n ))\n if (!matchingKey) {\n // eslint-disable-next-line no-console\n console.warn(`Could not find snapshot key for chart: ${chart} and id ${id}`)\n return\n }\n const data = snapshot.data[matchingKey]\n yield put(fetchDataAction.success({\n chartData: data,\n fetchDataParams,\n id,\n }))\n return\n }\n\n const url = isMainJs\n ? `${alwaysEndWithSlash(host)}api/v1/data`\n : host\n\n const agentOptionsOriginal = options.split(\"|\")\n const hasFlip = agentOptionsOriginal.includes(\"flip\")\n const shouldAddFakeFlip = !IS_FLIP_RESPECTED_IN_COMPOSITE_CHARTS && !hasFlip\n // if flip is not respected in composite-charts, send it always (like dygraph charts normally do)\n const agentOptions = shouldAddFakeFlip\n ? agentOptionsOriginal.concat(\"flip\") : agentOptionsOriginal\n\n const groupValues = [\n getGroupByValues(groupBy),\n postGroupBy && `label=${postGroupBy}`,\n ].filter(Boolean)\n\n const axiosOptions = httpMethod === \"POST\" ? {\n // used by cloud's room-overview\n data: {\n filter: {\n nodeIDs,\n context,\n dimensions: dimensions ? dimensions.split(/['|]/) : undefined,\n labels,\n },\n after,\n before,\n points,\n group,\n gtime,\n agent_options: agentOptions,\n ...(postAggregationMethod && { post_aggregation_methods: [postAggregationMethod] }),\n aggregations: [groupBy !== \"dimension\" && {\n method: dimensionsAggrMethod || \"sum\",\n groupBy: [\"chart\", ...groupValues],\n },\n groupBy !== \"chart\" && {\n method: aggrMethod,\n groupBy: groupValues,\n ...(aggrGroups.length && { labels: aggrGroups }),\n }].filter(Boolean),\n },\n } : {\n params: {\n chart,\n _: new Date().valueOf(),\n format,\n points,\n group,\n gtime,\n options,\n after,\n before,\n dimensions,\n },\n }\n\n const onSuccessCallback = (data: { [id: string]: unknown}) => {\n if (!data?.result) {\n fetchDataResponseChannel.put(fetchDataAction.failure({ id }))\n } else {\n const { fillMissingPoints } = fetchDataParams\n\n const transformedResults = transformResults(\n (data as unknown) as ChartData,\n format,\n shouldAddFakeFlip,\n )\n\n const chartData = {\n ...transformedResults,\n // @ts-ignore\n ...((\"post_aggregated_data\" in data.result) && {\n postAggregationMethod,\n groupBy,\n postGroupBy,\n aggrGroups,\n // @ts-ignore\n postAggregated: data.result.post_aggregated_data[postAggregationMethod],\n }),\n }\n\n fetchDataResponseChannel.put(fetchDataAction.success({\n chartData: fillMissingPoints\n ? fillMissingData(chartData as ChartData, fillMissingPoints)\n : chartData,\n fetchDataParams,\n id,\n }))\n }\n }\n\n const onErrorCallback = (error: Error) => {\n console.warn(\"fetch chart data failure\", error) // eslint-disable-line no-console\n fetchDataResponseChannel.put(fetchDataAction.failure({ id }))\n }\n\n fetchMetrics$.next({\n ...axiosOptions,\n method: httpMethod || \"GET\",\n url,\n onErrorCallback,\n onSuccessCallback,\n cancelTokenSource,\n })\n}\n\nconst [fetchForSnapshot$, resetFetchForSnapshot$] = getFetchStream(CONCURRENT_CALLS_LIMIT_SNAPSHOTS)\nfunction fetchDataForSnapshotSaga({ payload }: Action) {\n const {\n host, chart, format, points, group, gtime, options,\n after, before, dimensions, aggrMethod,\n groupBy,\n nodeIDs,\n chartLibrary, id,\n } = payload\n\n // backwards-compatibility, the keys look like this:\n // net_errors.stf0,dygraph,null,ms%7Cflip%7Cjsonwrap%7Cnonzero\n const chartDataUniqueID = `${chart},${chartLibrary},${constructCompatibleKey(\n dimensions,\n options,\n )}`\n\n const url = `${alwaysEndWithSlash(host)}api/v1/data`\n const params = {\n chart,\n _: new Date().valueOf(),\n format,\n points,\n group,\n gtime,\n options,\n after,\n before,\n dimensions,\n ...(aggrMethod && { aggr_method: aggrMethod }),\n ...(nodeIDs && { node_ids: nodeIDs.join(\",\") }),\n ...(groupBy && { groupBy }),\n }\n\n const onSuccessCallback = (data: unknown) => {\n fetchDataResponseChannel.put(fetchDataForSnapshotAction.success({\n snapshotData: data,\n id,\n }))\n // temporarily, until main.js finished rewrite\n // @ts-ignore\n window.chartUpdated({\n chartDataUniqueID,\n data,\n })\n }\n\n const onErrorCallback = () => {\n fetchDataResponseChannel.put(fetchDataForSnapshotAction.failure({ id }))\n // @ts-ignore\n window.chartUpdated({\n chartDataUniqueID,\n chart,\n data: null,\n })\n }\n\n fetchForSnapshot$.next({\n url,\n params,\n onErrorCallback,\n onSuccessCallback,\n })\n}\n\nfunction stopSnapshotModeSaga() {\n // any calls in the queue should stop when save-snapshot modal is closed\n resetFetchForSnapshot$.next()\n}\n\nfunction* fetchChartSaga({ payload }: Action) {\n const { chart, id, host } = payload\n\n const snapshot = yield select(selectSnapshot)\n if (snapshot) {\n yield put(fetchChartAction.success({\n chartMetadata: snapshot.charts.charts[chart],\n id,\n }))\n return\n }\n\n let response\n const url = isMainJs\n ? `${alwaysEndWithSlash(host)}api/v1/chart`\n : host.replace(\"/data\", \"/chart\")\n try {\n response = yield call(axiosInstance.get, url, {\n params: {\n chart,\n },\n })\n } catch (e) {\n console.warn(\"fetch chart details failure\") // eslint-disable-line no-console\n yield put(fetchChartAction.failure({ id }))\n return\n }\n yield put(fetchChartAction.success({\n chartMetadata: response.data,\n id,\n }))\n}\n\nfunction* fetchInfoSaga({ payload }: Action) {\n const { poll } = payload\n let isCloudEnabled = false\n let isAgentClaimed = false\n let isCloudAvailable = false\n let isACLKAvailable = false\n\n try {\n const registry: GlobalStateT[\"registry\"] = yield select(selectRegistry)\n const wasCloudAvailable = registry?.isCloudAvailable\n const wasACLKAvailable = registry?.isACLKAvailable\n\n const { data } = yield call(axiosInstance.get, `${serverDefault}/api/v1/info`)\n isCloudAvailable = data?.[\"cloud-available\"] || false\n isCloudEnabled = data?.[\"cloud-enabled\"] || false\n isAgentClaimed = data?.[\"agent-claimed\"] || false\n isACLKAvailable = data?.[\"aclk-available\"] || false\n\n yield put(fetchInfoAction.success({\n isCloudAvailable, isCloudEnabled, isAgentClaimed, isACLKAvailable, fullInfoPayload: data,\n }))\n\n if (isCloudEnabled && (wasCloudAvailable === null) && !isCloudAvailable) {\n // show only once per session\n showCloudInstallationProblemNotification()\n }\n if (isCloudAvailable && isAgentClaimed && (wasACLKAvailable !== false) && !isACLKAvailable) {\n // show at session-init and if we see a change of isACLKAvailable from true to false\n showCloudConnectionProblemNotification()\n }\n // TODO: No success notification spec`ed?\n // else if (!wasACLKAvailable && isACLKAvailable) {\n // toast.success(\"Connected to the Cloud!\", {\n // position: \"bottom-right\",\n // type: toast.TYPE.SUCCESS,\n // autoClose: NOTIFICATIONS_TIMEOUT,\n // })\n // }\n } catch (e) {\n console.warn(\"fetch agent info failure\") // eslint-disable-line no-console\n yield put(fetchInfoAction.failure())\n }\n\n if (poll && isCloudEnabled && isAgentClaimed) {\n yield delay(INFO_POLLING_FREQUENCY)\n yield put(fetchInfoAction({ poll: true }))\n }\n}\n\n\nexport function* chartSagas() {\n yield takeEvery(fetchDataAction.request, fetchDataSaga)\n yield takeEvery(fetchChartAction.request, fetchChartSaga)\n yield takeEvery(fetchDataForSnapshotAction.request, fetchDataForSnapshotSaga)\n yield takeEvery(stopSnapshotModeAction, stopSnapshotModeSaga)\n yield takeEvery(fetchInfoAction.request, fetchInfoSaga)\n yield spawn(watchFetchDataResponseChannel)\n}\n","export const sidePanelTransitionTimeInSeconds = 0.2\n","import { sortBy, prop, last } from \"ramda\"\nimport { Action } from \"redux-act\"\nimport {\n call, delay, spawn, take, takeEvery, put,\n} from \"redux-saga/effects\"\n\nimport { axiosInstance } from \"utils/api\"\nimport { serverStatic } from \"utils/server-detection\"\nimport { name2id } from \"utils/name-2-id\"\n\nimport {\n startAlarmsAction, StartAlarmsPayload, fetchAllAlarmsAction, updateActiveAlarmsAction,\n} from \"./actions\"\nimport { AlarmLogs, AlarmLog, ActiveAlarms } from \"./types\"\n\nconst ALARMS_INITIALIZATION_DELAY = 1000\nconst ALARMS_UPDATE_EVERY = 10000 // the time in ms between alarm checks\nconst CHART_DIV_OFFSET = -50\n\n// firefox moves the alarms off-screen (above, outside the top of the screen)\n// if alarms are shown faster than: one per 500ms\nconst ALARMS_MS_BETWEEN_NOTIFICATIONS = 500\n\n// equal to old NETDATA.alarms.notifications\nconst areNotificationsAvailable = \"Notification\" in window\n\nconst notificationCallback = window.netdataAlarmsNotifCallback\n\n\n// todo this doesn't change in the session, but should be moved to the redux state anyway\nlet firstNotificationId = 0\nlet lastNotificationId = 0\n\n\nconst scrollToChart = (chartID: unknown): boolean => {\n if (typeof chartID === \"string\") {\n const chartElement = document.querySelector(`#chart_${name2id(chartID)}`)\n if (chartElement) {\n const offset = (chartElement as HTMLDivElement).offsetTop + CHART_DIV_OFFSET;\n (document.querySelector(\"html\") as HTMLElement).scrollTop = offset\n return true\n }\n }\n return false\n}\n\n// perhaps sagas are not the best place for this\nconst scrollToAlarm = (alarm: AlarmLog) => {\n if (typeof alarm === \"object\") {\n const hasFoundChart = scrollToChart(alarm.chart)\n if (hasFoundChart) {\n window.focus()\n }\n }\n}\n\nconst requestPermissions = () => {\n if (areNotificationsAvailable) {\n if (Notification.permission === \"default\") {\n Notification.requestPermission()\n }\n }\n}\n\nconst hasGivenNotificationPermissions = () => (areNotificationsAvailable\n && Notification.permission === \"granted\"\n)\n\nfunction* getLog(lastNotificationIdArg: number, serverDefault: string) {\n try {\n const { data } = yield call(\n axiosInstance.get,\n `${serverDefault}/api/v1/alarm_log?after=${lastNotificationIdArg}`,\n )\n // todo xss check\n return data\n } catch (error) {\n console.warn(\"Error fetching alarms log\", error) // eslint-disable-line no-console\n return null\n }\n}\n\ninterface NotificationConfig {\n notificationTitle: string\n notificationOptions: NotificationOptions\n notificationHandler: (event: Event) => void\n}\n// called \"notify\" in old codebase\nconst getNotification = (\n entry: AlarmLog, activeAlarms: ActiveAlarms, firstNotificationIdArg: number,\n): NotificationConfig | undefined => {\n if (entry.updated) {\n // has been updated by another alarm\n return\n }\n\n let valueString = entry.value_string\n const t = activeAlarms.alarms[`${entry.chart}.${entry.name}`]\n if (typeof t !== \"undefined\"\n && entry.status === t.status\n && typeof t.value_string !== \"undefined\"\n ) {\n valueString = t.value_string\n }\n\n const name = entry.name.replace(/_/g, \" \")\n let status = entry.status.toLowerCase()\n let title = `${name} = ${valueString}`\n const tag = entry.alarm_id\n let icon = \"images/banner-icon-144x144.png\"\n let interaction = false\n let show = true\n\n // switch/case left here to simplify refractor (it's very similar to old code)\n switch (entry.status) {\n case \"REMOVED\":\n show = false\n break\n\n case \"UNDEFINED\":\n return\n\n case \"UNINITIALIZED\":\n return\n\n case \"CLEAR\":\n if (entry.unique_id < firstNotificationIdArg) {\n // alarm is not current\n return\n }\n if (entry.old_status === \"UNINITIALIZED\" || entry.old_status === \"UNDEFINED\") {\n // alarm switch to CLEAR from old_status\n return\n }\n if (entry.no_clear_notification) {\n // alarm is CLEAR but has no_clear_notification flag\n return\n }\n title = `${name} back to normal (${valueString})`\n icon = \"images/check-mark-2-128-green.png\"\n interaction = false\n break\n\n case \"WARNING\":\n if (entry.old_status === \"CRITICAL\") {\n status = `demoted to ${entry.status.toLowerCase()}`\n }\n\n icon = \"images/alert-128-orange.png\"\n interaction = false\n break\n\n case \"CRITICAL\":\n if (entry.old_status === \"WARNING\") {\n status = `escalated to ${entry.status.toLowerCase()}`\n }\n\n icon = \"images/alert-128-red.png\"\n interaction = true\n break\n\n default:\n console.warn(`invalid alarm status ${entry.status}`) // eslint-disable-line no-console\n return\n }\n\n // filter recipients\n // if (show) {\n // show = NETDATA.alarms.recipientMatches(entry.recipient, NETDATA.alarms.recipients)\n // }\n\n\n if (show) {\n if (typeof notificationCallback === \"function\") {\n show = notificationCallback(entry)\n }\n\n if (show) {\n // show this notification\n // eslint-disable-next-line consistent-return\n return {\n notificationTitle: title,\n notificationOptions: {\n body: `${entry.hostname} - ${entry.chart} (${entry.family}) - ${status}: ${entry.info}`,\n tag: `${tag}`,\n requireInteraction: interaction,\n icon: serverStatic + icon,\n data: entry,\n },\n notificationHandler: (event: Event) => {\n event.preventDefault()\n if (event.target) {\n const { data } = event.target as Notification\n scrollToAlarm(data)\n }\n },\n }\n }\n }\n}\n\nfunction* notifyAll(serverDefault: string, activeAlarms: ActiveAlarms) {\n const alarmLogs: AlarmLogs = yield call(getLog, lastNotificationId, serverDefault)\n if (alarmLogs === null || typeof alarmLogs !== \"object\") {\n console.warn(\"invalid alarms log response\") // eslint-disable-line no-console\n return\n }\n\n if (alarmLogs.length === 0) {\n console.log(\"received empty alarm log\") // eslint-disable-line no-console\n return\n }\n\n const logsSorted = sortBy(prop(\"unique_id\"), alarmLogs)\n\n // eslint-disable-next-line camelcase\n const newLogs = logsSorted.filter(({ unique_id }) => unique_id > lastNotificationId)\n const notifications = newLogs\n .map((entry) => (getNotification(entry, activeAlarms, firstNotificationId)))\n .filter((x) => x !== undefined) as NotificationConfig[]\n\n for (let i = 0; i < notifications.length; i += 1) {\n const {\n notificationTitle, notificationOptions, notificationHandler,\n } = notifications[i]\n const notification = new Notification(\n notificationTitle,\n notificationOptions,\n )\n notification.onclick = notificationHandler\n\n yield delay(ALARMS_MS_BETWEEN_NOTIFICATIONS)\n }\n\n // todo put to redux store\n lastNotificationId = (last(logsSorted) as AlarmLog).unique_id\n\n if (typeof window.netdataAlarmsRemember === \"undefined\" || window.netdataAlarmsRemember) {\n localStorage.setItem(\"last_notification_id\", `${lastNotificationId}`)\n }\n}\n\n\nfunction* get(what: string, serverDefault: string) {\n const { data } = yield call(axiosInstance.get, `${serverDefault}/api/v1/alarms?${what}`)\n if (firstNotificationId === 0 && typeof data.latest_alarm_log_unique_id === \"number\") {\n firstNotificationId = data.latest_alarm_log_unique_id\n }\n return data\n}\n\nfunction* alarmsLoop(serverDefault: string) {\n while (true) {\n const activeAlarms = (yield call(get, \"active\", serverDefault)) as ActiveAlarms\n if (activeAlarms) {\n yield put(updateActiveAlarmsAction({ activeAlarms }))\n if (\n hasGivenNotificationPermissions()\n // timestamps in seconds\n && (activeAlarms.latest_alarm_log_unique_id > lastNotificationId)\n ) {\n yield call(notifyAll, serverDefault, activeAlarms)\n\n if (activeAlarms.status === false) {\n // Health monitoring is disabled on this netdata\n break\n }\n }\n }\n yield delay(ALARMS_UPDATE_EVERY)\n }\n}\n\nfunction* startAlarms() {\n // make sure we handle that action only once, we don't want multiple intervals/loops\n const { payload }: { payload: StartAlarmsPayload } = yield take(startAlarmsAction)\n const { serverDefault } = payload\n\n yield delay(ALARMS_INITIALIZATION_DELAY)\n\n lastNotificationId = +(localStorage.getItem(\"last_notification_id\") || lastNotificationId)\n requestPermissions()\n yield call(alarmsLoop, serverDefault)\n}\n\ntype FetchAllAlarmsPayload = {\n callback: (x: unknown) => void,\n serverDefault: string,\n}\nfunction* fetchAllAlarmsSaga({ payload }: Action) {\n const { callback, serverDefault } = payload\n const allAlarms = yield call(get, \"all\", serverDefault)\n callback(allAlarms)\n}\n\nexport function* alarmsSagas() {\n yield spawn(startAlarms)\n yield takeEvery(fetchAllAlarmsAction.request, fetchAllAlarmsSaga)\n}\n","const allowedReferrerDomains = [\n \"\",\n \"https://www.google.com/\",\n \"https://duckduckgo.com/\",\n \"https://www.reddit.com/\",\n]\n\nexport const isAllowedReferrer = (referrer: string) => allowedReferrerDomains.includes(referrer)\n || referrer.endsWith(\".my-netdata.io/\")\n || referrer.startsWith(\"https://github.com/\")\n || referrer.endsWith(\"netdata.cloud/\")\n || referrer.startsWith(\"https://app.netdata.cloud/\")\n","import { uniq, filter } from \"ramda\"\nimport {\n spawn, take, put, takeEvery, call, delay, select,\n} from \"redux-saga/effects\"\nimport { channel } from \"redux-saga\"\nimport { AxiosResponse } from \"axios\"\nimport { Action } from \"redux-act\"\n\nimport { NETDATA_REGISTRY_SERVER } from \"utils/utils\"\nimport { axiosInstance } from \"utils/api\"\nimport { isDemo } from \"utils/is-demo\"\nimport { sidePanelTransitionTimeInSeconds } from \"components/space-panel/settings\"\nimport { fetchInfoAction } from \"domains/chart/actions\"\n\nimport {\n fetchHelloAction,\n FetchHelloPayload,\n windowFocusChangeAction,\n updatePersonUrlsAction,\n SetOptionAction,\n setOptionAction,\n setSpacePanelStatusAction,\n SetSpacePanelStatusActionPayload,\n setSpacePanelTransitionEndAction,\n HelloResponse,\n accessRegistrySuccessAction,\n} from \"./actions\"\nimport { alarmsSagas } from \"./alarms-sagas\"\nimport { MASKED_DATA } from \"./constants\"\nimport { selectFullInfoPayload } from \"./selectors\"\nimport { isAllowedReferrer } from \"./utils\"\nimport { InfoPayload } from \"./__mocks__/info-mock\"\n\nconst windowFocusChannel = channel()\n\nexport function listenToWindowFocus() {\n window.addEventListener(\"focus\", () => {\n windowFocusChannel.put(windowFocusChangeAction({ hasWindowFocus: true }))\n })\n window.addEventListener(\"blur\", () => {\n windowFocusChannel.put(windowFocusChangeAction({ hasWindowFocus: false }))\n })\n}\n\nexport function* watchWindowFocusChannel() {\n while (true) {\n const action = yield take(windowFocusChannel)\n yield put(action)\n }\n}\n\nfunction* waitForFullInfoPayload() {\n return (yield take(fetchInfoAction.success)).payload.fullInfoPayload\n}\n\nfunction* injectPosthog(machineGuid: string, personGuid?: string) {\n if (window.posthog) {\n return\n }\n const info: InfoPayload = (yield select(selectFullInfoPayload))\n || (yield call(waitForFullInfoPayload))\n || {}\n\n /* eslint-disable */\n // @ts-ignore\n !function(t,e){var o,n,p,r;e.__SV||(window.posthog=e,e._i=[],e.init=function(i,s,a){function g(t,e){var o=e.split(\".\");2==o.length&&(t=t[o[0]],e=o[1]),t[e]=function(){t.push([e].concat(Array.prototype.slice.call(arguments,0)))}}(p=t.createElement(\"script\")).type=\"text/javascript\",p.async=!0,p.src=s.api_host+\"/static/array.js\",(r=t.getElementsByTagName(\"script\")[0]).parentNode.insertBefore(p,r);var u=e;for(void 0!==a?u=e[a]=[]:a=\"posthog\",u.people=u.people||[],u.toString=function(t){var e=\"posthog\";return\"posthog\"!==a&&(e+=\".\"+a),t||(e+=\" (stub)\"),e},u.people.toString=function(){return u.toString(1)+\".people (stub)\"},o=\"capture identify alias people.set people.set_once set_config register register_once unregister opt_out_capturing has_opted_out_capturing opt_in_capturing reset isFeatureEnabled onFeatureFlags\".split(\" \"),n=0;n {\n if (personGuid) {\n posthog.identify(personGuid)\n }\n },\n })\n const shouldMaskReferrer = !isDemo && !isAllowedReferrer(document.referrer)\n const MASKED = \"masked\"\n window.posthog.register(\n // remove properties with unavailable values\n filter((value) => value !== undefined && value !== null,\n {\n $ip: \"127.0.0.1\",\n $current_url: isDemo ? null : \"agent dashboard\",\n $pathname: isDemo ? null : \"netdata-dashboard\",\n $host: isDemo ? null : \"dashboard.netdata.io\",\n\n $initial_referring_domain: shouldMaskReferrer ? MASKED : null,\n $initial_referrer: shouldMaskReferrer ? MASKED : null,\n $referring_domain: shouldMaskReferrer ? MASKED : null,\n $referrer: shouldMaskReferrer ? MASKED : null,\n\n event_source: \"agent dashboard\",\n\n netdata_version: info.version,\n netdata_machine_guid: machineGuid,\n netdata_person_id: personGuid || \"Unavailable\",\n netdata_buildinfo: info[\"buildinfo\"],\n netdata_release_channel: info[\"release-channel\"],\n mirrored_host_count: info.mirrored_hosts?.length,\n alarms_normal: info.alarms?.normal,\n alarms_warning: info.alarms?.warning,\n alarms_critical: info.alarms.critical,\n host_os_name: info.os_name,\n host_os_id: info.os_id,\n host_os_id_like: info.os_id_like,\n host_os_version: info.os_version,\n host_os_version_id: info.os_version_id,\n host_os_detection: info.os_detection,\n system_cores_total: info.cores_total,\n system_total_disk_space: info.total_disk_space,\n system_cpu_freq: info.cpu_freq,\n system_ram_total: info.ram_total,\n system_kernel_name: info.kernel_name,\n system_kernel_version: info.kernel_version,\n system_architecture: info.architecture,\n system_virtualization: info.virtualization,\n system_virt_detection: info.virt_detection,\n system_container: info.container,\n system_container_detection: info.container_detection,\n container_os_name: info.container_os_name,\n container_os_id: info.container_os_id,\n container_os_id_like: info.container_os_id_like,\n container_os_version: info.container_os_version,\n container_os_version_id: info.container_os_version_id,\n host_collectors_count: info.collectors.length,\n host_cloud_enabled: info[\"cloud-enabled\"],\n host_cloud_available: info[\"cloud-available\"],\n host_agent_claimed: info[\"agent-claimed\"],\n host_aclk_available: info[\"aclk-available\"],\n host_aclk_implementation: info[\"aclk-implementation\"],\n host_allmetrics_json_used: info[\"allmetrics-json-used\"],\n host_allmetrics_prometheus_used: info[\"allmetrics-prometheus-used\"],\n host_allmetrics_shell_used: info[\"allmetrics-shell-used\"],\n host_charts_count: info[\"charts-count\"],\n host_dashboard_used: info[\"dashboard-used\"],\n host_metrics_count: info[\"metrics-count\"],\n host_notification_methods: info[\"notification-methods\"],\n config_memory_mode: info[\"memory-mode\"],\n config_exporting_enabled: info[\"exporting-enabled\"],\n config_exporting_connectors: info[\"exporting-connectors\"],\n config_hosts_available: info[\"hosts-available\"],\n config_https_enabled: info[\"https-enabled\"],\n config_multidb_disk_quota: info[\"multidb-disk-quota\"],\n config_page_cache_size: info[\"page-cache-size\"],\n config_stream_enabled: info[\"stream-enabled\"],\n config_web_enabled: info[\"web-enabled\"],\n // eslint-disable-next-line camelcase\n host_is_parent: info.host_labels?._is_parent,\n mirrored_hosts_reachable: info.mirrored_hosts_status\n .filter(({ reachable }) => reachable).length,\n mirrored_hosts_unreachable: info.mirrored_hosts_status\n .filter(({ reachable }) => !reachable).length,\n host_collectors: info.collectors,\n host_is_k8s_node: info.is_k8s_node,\n }),\n )\n}\n\nexport type PersonUrl = [\n string, // guid\n string, // url\n number, // last timestamp (ms)\n number, // accesses\n string // name\n]\n\ntype AccessRegistryResponse = null | {\n personGuid?: string\n registryServer: string\n urls?: PersonUrl[]\n}\n\ntype AccessRegistry = (args: {\n machineGuid: string\n maxRedirects: number\n name: string\n registryServer: string\n url: string\n}) => Promise\nconst accessRegistry: AccessRegistry = ({\n machineGuid, maxRedirects, name, registryServer, url,\n}) => axiosInstance.get(`${registryServer}/api/v1/registry`, {\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n params: {\n action: \"access\",\n machine: machineGuid,\n name,\n url,\n },\n withCredentials: true, // required for the cookie\n}).then(({ data }) => {\n // todo xss check\n const isRedirect = typeof data.registry === \"string\"\n\n let returnData = data\n if (typeof data.status !== \"string\" || data.status !== \"ok\") {\n // todo throw error (409 in old dashboard)\n returnData = null\n }\n\n if (returnData === null) {\n if (isRedirect && maxRedirects > 0) {\n return accessRegistry({\n maxRedirects: maxRedirects - 1,\n machineGuid,\n name,\n registryServer: data.registry,\n url,\n })\n }\n return { registryServer }\n }\n const urls = data.urls.filter((u: [string, string]) => u[1] !== MASKED_DATA)\n return {\n personGuid: data.person_guid || null,\n registryServer,\n urls,\n }\n}).catch(() => {\n // todo handle error in better way (410 in old dashboard)\n console.warn(\"error calling registry:\", registryServer) // eslint-disable-line no-console\n return null\n})\n\nexport interface RegistryMachine {\n guid: string\n url: string\n lastTimestamp: number\n accesses: number\n name: string\n alternateUrls: string[]\n}\n\ntype ParsePersonUrls = (\n personUrls: PersonUrl[]\n) => {\n registryMachines: { [key: string]: RegistryMachine }\n registryMachinesArray: RegistryMachine[]\n}\nexport const parsePersonUrls: ParsePersonUrls = (personUrls) => {\n // todo main.js is using registryMachines, but should use only the array\n const registryMachines: { [key: string]: RegistryMachine } = {}\n\n personUrls\n .slice()\n .reverse()\n .forEach(([guid, url, lastTimestamp, accesses, name]: PersonUrl) => {\n const existingObj = registryMachines[guid] || {\n lastTimestamp: 0,\n accesses: 0,\n alternateUrls: [],\n guid: \"\",\n url: \"\",\n name: \"\"\n }\n const isNewer = existingObj.lastTimestamp < lastTimestamp\n const extended: RegistryMachine = {\n guid: existingObj.guid || guid,\n url: isNewer ? url : existingObj.url,\n lastTimestamp: isNewer ? lastTimestamp : existingObj.lastTimestamp,\n accesses: existingObj.accesses + accesses,\n name: isNewer ? name : existingObj.name,\n alternateUrls: existingObj.alternateUrls.concat(url),\n }\n registryMachines[guid] = extended\n })\n\n const registryMachinesArray = uniq(\n // not sure if reverse is needed, but it was in old dashboard\n personUrls\n .slice()\n .reverse()\n .map(([guid]: PersonUrl) => guid),\n ).map((guid) => registryMachines[guid])\n return {\n registryMachines,\n registryMachinesArray,\n }\n}\n\nfunction* fetchHelloSaga({ payload }: Action) {\n const { serverDefault } = payload\n const helloCallUrl = `${serverDefault}api/v1/registry?action=hello`\n let response: AxiosResponse\n try {\n response = yield call(axiosInstance.get, helloCallUrl, {\n headers: {\n \"Cache-Control\": \"no-cache, no-store\",\n Pragma: \"no-cache\",\n },\n withCredentials: true,\n })\n } catch (error) {\n console.warn(\"error accessing registry or Do-Not-Track is enabled\") // eslint-disable-line\n yield put(fetchHelloAction.failure())\n return\n }\n const cloudBaseURL = response.data.cloud_base_url\n const { hostname } = response.data\n const machineGuid = response.data.machine_guid\n const registryServer = response.data.registry\n const isUsingGlobalRegistry = registryServer === NETDATA_REGISTRY_SERVER\n\n yield put(fetchHelloAction.success({\n cloudBaseURL,\n hostname,\n isUsingGlobalRegistry,\n machineGuid,\n }))\n\n const name = hostname\n const url = serverDefault\n\n // now make access call - max_redirects, callback, etc...\n const accessRegistryResponse: AccessRegistryResponse = yield call(accessRegistry, {\n machineGuid,\n maxRedirects: 2,\n name,\n registryServer,\n url,\n })\n\n if (response.data.anonymous_statistics) {\n yield spawn(injectPosthog, response.data.machine_guid, accessRegistryResponse?.personGuid)\n }\n\n if (accessRegistryResponse?.urls && accessRegistryResponse?.personGuid) {\n const personUrls = parsePersonUrls(accessRegistryResponse.urls)\n const { registryMachines, registryMachinesArray } = personUrls\n yield put(updatePersonUrlsAction({\n personGuid: accessRegistryResponse.personGuid,\n registryMachines,\n registryMachinesArray,\n }))\n }\n\n yield put(accessRegistrySuccessAction({\n registryServer: accessRegistryResponse?.registryServer || registryServer,\n }))\n}\n\nconst constructOptionStorageKey = (key: string) => `options.${key}`\nfunction setOptionSaga({ payload }: Action) {\n const { key, value } = payload\n if (key === \"stop_updates_when_focus_is_lost\") {\n // old dashboard was saving that property to localStorage, but was always omitting it when\n // reading. it was only possible to persist this setting via url (update_always hash param)\n return\n }\n localStorage.setItem(constructOptionStorageKey(key), JSON.stringify(value))\n}\n\nfunction* spacePanelSaga({ payload }: Action) {\n if (payload.isActive) {\n document.body.className = \"with-panel\"\n } else {\n document.body.className = \"\"\n }\n yield delay(sidePanelTransitionTimeInSeconds * 1000)\n yield put(setSpacePanelTransitionEndAction({ isActive: payload.isActive }))\n}\n\nexport function* globalSagas() {\n yield spawn(listenToWindowFocus)\n yield spawn(watchWindowFocusChannel)\n yield takeEvery(fetchHelloAction.request, fetchHelloSaga)\n yield spawn(alarmsSagas)\n yield takeEvery(setOptionAction, setOptionSaga)\n yield takeEvery(setSpacePanelStatusAction, spacePanelSaga)\n}\n","/* eslint-disable camelcase */\n/* eslint-disable operator-linebreak */\nimport { take, takeEvery } from \"redux-saga/effects\"\nimport { Action } from \"redux-act\"\n\nimport {\n clearHighlightAction,\n SetGlobalChartUnderlayAction,\n setGlobalChartUnderlayAction,\n} from \"domains/global/actions\"\nimport {\n explicitlySignInAction,\n showSignInModalAction,\n ShowSignInModalAction,\n} from \"domains/dashboard/actions\"\nimport { setHashParams, getHashParams, removeHashParams } from \"utils/hash-utils\"\n\nexport const LOCAL_STORAGE_NEEDS_SYNC = \"LOCAL-STORAGE-NEEDS-SYNC\"\n\nfunction setGlobalChartUnderlaySaga({ payload }: Action) {\n const { after, before } = payload\n if (window.urlOptions) {\n // additional check to prevent loop, after setting initial state from url\n if (window.urlOptions.after !== after || window.urlOptions.before !== before) {\n window.urlOptions.netdataHighlightCallback(true, after, before)\n }\n } else {\n // TODO: Consider a setting to control whether the component sets these hash params\n const hashParams = getHashParams()\n const highlight_after = Math.round(after).toString()\n const highlight_before = Math.round(before).toString()\n if (\n hashParams.highlight_after !== highlight_after ||\n hashParams.highlight_before !== highlight_before\n ) {\n setHashParams({ highlight_after, highlight_before })\n }\n }\n}\n\nfunction clearHighlightSaga() {\n if (window.urlOptions) {\n window.urlOptions.netdataHighlightCallback(false, 0, 0)\n } else {\n removeHashParams([\"highlight_after\", \"highlight_before\"])\n }\n}\n\nfunction* showSignInSaga({ payload }: Action) {\n if (window.showSignInModal) {\n window.showSignInModal()\n\n yield take(explicitlySignInAction)\n const { signInLinkHref } = payload\n window.localStorage.setItem(LOCAL_STORAGE_NEEDS_SYNC, \"true\")\n window.location.href = signInLinkHref\n }\n}\n\nexport function* mainJsSagas() {\n yield takeEvery(setGlobalChartUnderlayAction, setGlobalChartUnderlaySaga)\n yield takeEvery(clearHighlightAction, clearHighlightSaga)\n yield takeEvery(showSignInModalAction, showSignInSaga)\n}\n","import { spawn } from \"redux-saga/effects\"\n\nimport { chartSagas } from \"domains/chart/sagas\"\nimport { globalSagas } from \"domains/global/sagas\"\nimport { mainJsSagas } from \"domains/dashboard/sagas\"\n\nexport function* rootSaga() {\n yield spawn(globalSagas)\n yield spawn(chartSagas)\n yield spawn(mainJsSagas)\n}\n","import { compose, applyMiddleware, createStore } from \"redux\"\nimport createSagaMiddleware from \"redux-saga\"\nimport rootReducer from \"./root-reducer\"\nimport { rootSaga } from \"./root-saga\"\n\nconst sagaMiddleware = createSagaMiddleware()\n\nconst reduxDevTools = process.env.NODE_ENV === \"development\"\n && window.__REDUX_DEVTOOLS_EXTENSION__\n // @ts-ignore\n && window.__REDUX_DEVTOOLS_EXTENSION__({ name: \"Dashboard Charts\" })\n\nconst composeMiddlewaresWithDevTools = () => (reduxDevTools\n ? compose(applyMiddleware(sagaMiddleware), reduxDevTools)\n : compose(applyMiddleware(sagaMiddleware)))\n\nexport const configureStore = () => {\n const store = createStore(\n rootReducer,\n composeMiddlewaresWithDevTools(),\n )\n sagaMiddleware.run(rootSaga)\n return store\n}\n\nexport const store = configureStore()\n","import $ from \"jquery\"\n\nwindow.$ = $\nwindow.jQuery = $\n","let loadCssPromise: Promise\n\ntype LoadCss = (href: string) => Promise\nexport const loadCss: LoadCss = (href) => {\n if (loadCssPromise) {\n return loadCssPromise\n }\n return new Promise((resolve, reject) => {\n const fileRef = document.createElement(\"link\")\n fileRef.setAttribute(\"rel\", \"stylesheet\")\n fileRef.setAttribute(\"type\", \"text/css\")\n fileRef.setAttribute(\"href\", href)\n\n fileRef.onload = () => {\n resolve()\n }\n\n fileRef.onerror = () => {\n reject(Error(`Error loading css: ${href}`))\n }\n\n document.getElementsByTagName(\"head\")[0].appendChild(fileRef)\n })\n}\n","import classNames from \"classnames\"\nimport { Attributes } from \"./transformDataAttributes\"\n\nexport type ChartLibraryName =\n | \"dygraph\"\n | \"sparkline\"\n | \"peity\"\n | \"google\"\n // | \"d3\"\n | \"d3pie\"\n | \"easypiechart\"\n | \"gauge\"\n | \"textonly\"\n | \"groupbox\"\nexport interface ChartLibraryConfig {\n aspectRatio?: number\n format: string\n hasLegend: (attributes: Attributes) => boolean\n hasToolboxPanAndZoom?: boolean\n isLogScale?: (attributes: Attributes) => boolean\n options: (attributes: Attributes) => string\n trackColors: boolean\n pixelsPerPoint: (attributes: Attributes) => number\n xssRegexIgnore: RegExp\n containerClass: (attributes: Attributes) => string\n}\nexport type ChartLibrariesSettings = {\n [key in ChartLibraryName]: ChartLibraryConfig\n}\n\ntype IsDygraphSparkline = (attributes: Attributes) => boolean\nconst isDygraphSparkline: IsDygraphSparkline = (attributes) => (\n attributes.dygraphTheme === \"sparkline\"\n)\n\nexport const chartLibrariesSettings: ChartLibrariesSettings = {\n dygraph: {\n // initialize: window.NETDATA.dygraphInitialize,\n // create: window.NETDATA.dygraphChartCreate,\n // update: window.NETDATA.dygraphChartUpdate,\n // resize(state) {\n // if (typeof state.tmp.dygraph_instance !== \"undefined\"\n // && typeof state.tmp.dygraph_instance.resize === \"function\") {\n // state.tmp.dygraph_instance.resize()\n // }\n // },\n // setSelection: window.NETDATA.dygraphSetSelection,\n // clearSelection: window.NETDATA.dygraphClearSelection,\n hasToolboxPanAndZoom: true,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.data$\"),\n format: \"json\",\n options(attributes: Attributes) {\n if (typeof this.isLogScale === \"function\") {\n // flip - in proper order (from oldest to newest)\n return `ms|flip${this.isLogScale(attributes) ? \"|abs\" : \"\"}`\n }\n return \"\"\n },\n hasLegend(attributes: Attributes) {\n // not using __hasLegendCache__ as in old-dashboard, because performance tweaks like this\n // probably won't be needed in react app\n const { legend = true } = attributes\n return !isDygraphSparkline(attributes) && Boolean(legend)\n },\n // autoresize(state) {\n // void (state)\n // return true\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: ((attributes: Attributes) => (isDygraphSparkline(attributes) ? 2 : 3)),\n // pixels_per_point(state) {\n // return (this.isSparkline(state) === false) ? 3 : 2\n // },\n isLogScale(attributes: Attributes) {\n return attributes.dygraphTheme === \"logscale\"\n },\n containerClass(attributes: Attributes) {\n return this.hasLegend(attributes)\n ? classNames(\n \"netdata-container-with-legend\",\n attributes.legendPosition === \"bottom\" && \"netdata-container-with-legend--bottom\",\n )\n : \"netdata-container\"\n },\n // container_class(state) {\n // if (this.legend(state) !== null) {\n // return \"netdata-container-with-legend\"\n // }\n // return \"netdata-container\"\n // },\n },\n sparkline: {\n // initialize: window.NETDATA.sparklineInitialize,\n // create: window.NETDATA.sparklineChartCreate,\n // update: window.NETDATA.sparklineChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options: () => \"flip|abs\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 3,\n containerClass: () => \"netdata-container\",\n },\n peity: {\n // initialize: window.NETDATA.peityInitialize,\n // create: window.NETDATA.peityChartCreate,\n // update: window.NETDATA.peityChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"ssvcomma\",\n options: () => \"null2zero|flip|abs\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 3,\n containerClass: () => \"netdata-container\",\n },\n google: {\n // initialize: window.NETDATA.googleInitialize,\n // create: window.NETDATA.googleChartCreate,\n // update: window.NETDATA.googleChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.rows$\"),\n format: \"datatable\",\n options: () => \"\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 300\n // },\n trackColors: false,\n pixelsPerPoint: () => 4,\n containerClass: () => \"netdata-container\",\n },\n d3pie: {\n // initialize: window.NETDATA.d3pieInitialize,\n // create: window.NETDATA.d3pieChartCreate,\n // update: window.NETDATA.d3pieChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.d3pieSetSelection,\n // clearSelection: window.NETDATA.d3pieClearSelection,\n hasToolboxPanAndZoom: false,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result.data$\"),\n format: \"json\",\n hasLegend: () => false,\n options: () => \"objectrows|ms\",\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: false,\n pixelsPerPoint: () => 15,\n containerClass: () => \"netdata-container\",\n },\n // d3: {\n // initialize: window.NETDATA.d3Initialize,\n // create: window.NETDATA.d3ChartCreate,\n // update: window.NETDATA.d3ChartUpdate,\n // resize: null,\n // setSelection: undefined, // function(state, t) { void(state); return true; },\n // clearSelection: undefined, // function(state) { void(state); return true; },\n // toolboxPanAndZoom: null,\n // initialized: false,\n // enabled: true,\n // xssRegexIgnore: new RegExp(\"^/api/v1/data\\.result.data$\"),\n // format(state) {\n // void (state)\n // return \"json\"\n // },\n // options(state) {\n // void (state)\n // return \"\"\n // },\n // legend(state) {\n // void (state)\n // return null\n // },\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n // track_colors(state) {\n // void (state)\n // return false\n // },\n // pixels_per_point(state) {\n // void (state)\n // return 3\n // },\n // container_class(state) {\n // void (state)\n // return \"netdata-container\"\n // },\n // },\n easypiechart: {\n // initialize: window.NETDATA.easypiechartInitialize,\n // create: window.NETDATA.easypiechartChartCreate,\n // update: window.NETDATA.easypiechartChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.easypiechartSetSelection,\n // clearSelection: window.NETDATA.easypiechartClearSelection,\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options() {\n return \"absolute\"\n },\n hasLegend() {\n return false\n },\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: () => 3,\n aspectRatio: 100,\n containerClass: () => \"netdata-container-easypiechart\",\n },\n gauge: {\n // initialize: window.NETDATA.gaugeInitialize,\n // create: window.NETDATA.gaugeChartCreate,\n // update: window.NETDATA.gaugeChartUpdate,\n // resize: null,\n // setSelection: window.NETDATA.gaugeSetSelection,\n // clearSelection: window.NETDATA.gaugeClearSelection,\n hasToolboxPanAndZoom: false,\n // initialized: false,\n // enabled: true,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n format: \"array\",\n options: () => \"absolute\",\n hasLegend: () => false,\n // autoresize(state) {\n // void (state)\n // return false\n // },\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n trackColors: true,\n pixelsPerPoint: () => 3,\n aspectRatio: 60,\n containerClass: () => \"netdata-container-gauge\",\n },\n textonly: {\n // autoresize(state) {\n // void (state)\n // return false\n // },\n containerClass: () => \"netdata-container\",\n // create: window.NETDATA.textOnlyCreate,\n // enabled: true,\n format: \"array\",\n // initialized: true,\n // initialize(callback) {\n // callback()\n // },\n hasLegend: () => false,\n // max_updates_to_recreate(state) {\n // void (state)\n // return 5000\n // },\n options: () => \"absolute\",\n pixelsPerPoint: () => 3,\n trackColors: false,\n // update: window.NETDATA.textOnlyUpdate,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n },\n groupbox: {\n containerClass: () => \"netdata-container\",\n hasLegend: () => false,\n options: () => \"absolute\",\n format: \"json\",\n trackColors: false,\n pixelsPerPoint: () => 3,\n xssRegexIgnore: new RegExp(\"^/api/v1/data.result$\"),\n },\n}\n","import { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"domains/chart/utils/chartLibrariesSettings\"\n\nexport const getChartURLOptions = (\n attributes: Attributes, shouldEliminateZeroDimensions: boolean,\n) => {\n const {\n appendOptions,\n overrideOptions,\n } = attributes\n let ret = \"\"\n\n ret += overrideOptions\n ? overrideOptions.toString()\n : chartLibrariesSettings[attributes.chartLibrary].options(attributes)\n\n if (typeof appendOptions === \"string\") {\n ret += `|${encodeURIComponent(appendOptions)}`\n }\n\n ret += \"|jsonwrap\"\n\n if (shouldEliminateZeroDimensions) {\n ret += \"|nonzero\"\n }\n\n if (attributes.dimensionsAggrMethod === \"sum-of-abs\"\n || (!attributes.dimensionsAggrMethod && attributes.groupBy && attributes.groupBy !== \"dimension\")\n ) {\n ret += \"|absolute\"\n }\n\n return ret\n}\n","export const BIGGEST_INTERVAL_NUMBER = 2 ** 31 - 1\n","import React from \"react\"\n\nimport { Icon } from \"components/icon\"\n\ninterface Props {\n containerNode: HTMLElement\n hasEmptyData: boolean\n}\n\nexport const Loader = ({\n containerNode,\n hasEmptyData,\n}: Props) => {\n // below is 90% of original logic.\n // since it rerenders when IntersectionObserver turns the chart back on,\n // it's not that important to detect screen height and container sizes changes\n const screenHeight = window.screen.height\n\n // normally we want a font size, as tall as the element\n let h = containerNode.clientHeight\n\n // but give it some air, 20% let's say, or 5 pixels min\n const lost = Math.max(h * 0.2, 5)\n h -= lost\n\n // center the text, vertically\n let paddingTop = (lost - 5) / 2\n\n // but check the width too\n // it should fit 10 characters in it\n const w = containerNode.clientWidth / 10\n if (h > w) {\n paddingTop += (h - w) / 2\n h = w\n }\n\n // and don't make it too huge\n // 5% of the screen size is good\n if (h > screenHeight / 20) {\n paddingTop += (h - (screenHeight / 20)) / 2\n h = screenHeight / 20\n }\n\n const label = hasEmptyData ? \" empty\" : \" netdata\"\n const iconType = hasEmptyData ? \"noData\" : \"loading\"\n\n return (\n \n \n {label}\n \n )\n}\n","type GetPanAndZoomStep = (event: React.MouseEvent) => number\nexport const getPanAndZoomStep: GetPanAndZoomStep = (event) => {\n if (event.ctrlKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_control\n } if (event.shiftKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_shift\n } if (event.altKey) {\n return window.NETDATA.options.current.pan_and_zoom_factor\n * window.NETDATA.options.current.pan_and_zoom_factor_multiplier_alt\n }\n return window.NETDATA.options.current.pan_and_zoom_factor\n}\n","export const safeEqualCheck = (a: unknown, b: unknown) => {\n if (a === b) {\n return true\n }\n return Number.isNaN(a as number) && Number.isNaN(b as number)\n}\n","import { identity } from \"ramda\"\nimport { useCallback, useState, useMemo, useRef } from \"react\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectTemperatureSetting, selectSecondsAsTimeSetting } from \"domains/global/selectors\"\nimport { unitsConversionCreator } from \"utils/units-conversion\"\nimport { safeEqualCheck } from \"utils/safe-equal-check\"\n\nimport { ChartData } from \"../chart-types\"\nimport { Attributes } from \"./transformDataAttributes\"\n\ntype Converter = (v: number) => number | string\n// only time units are converted into strings, the rest are numbers\n\n// todo - memoization similar to the one as in old dashboard, but probably not needed\nconst formattersFixed: any[] = []\nconst formattersZeroBased: any[] = []\nconst fastNumberFormat = (min: number, max: number) => {\n const key = max\n if (min === max) {\n if (typeof formattersFixed[key] === \"undefined\") {\n formattersFixed[key] = new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n }\n\n return formattersFixed[key]\n }\n if (min === 0) {\n if (typeof formattersZeroBased[key] === \"undefined\") {\n formattersZeroBased[key] = new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n }\n\n return formattersZeroBased[key]\n }\n // (old dashboard comment)\n // this is never used\n // it is added just for completeness\n return new Intl.NumberFormat(undefined, {\n useGrouping: true,\n minimumFractionDigits: min,\n maximumFractionDigits: max,\n })\n}\n\nconst getLegendFormatValue =\n (\n convertUnits: Converter,\n intlNumberFormat: Intl.NumberFormat | null,\n valueDecimalDetail: number\n ) =>\n (value: number | string | null) => {\n if (typeof value !== \"number\") {\n return \"-\"\n }\n\n const convertedValue = convertUnits(value)\n if (typeof convertedValue !== \"number\") {\n return convertedValue\n }\n\n if (intlNumberFormat !== null) {\n return intlNumberFormat.format(convertedValue)\n }\n\n let dmin\n let dmax\n if (valueDecimalDetail !== -1) {\n dmin = valueDecimalDetail\n dmax = valueDecimalDetail\n } else {\n dmin = 0\n const abs = convertedValue < 0 ? -convertedValue : convertedValue\n if (abs > 1000) {\n dmax = 0\n } else if (abs > 10) {\n dmax = 1\n } else if (abs > 1) {\n dmax = 2\n } else if (abs > 0.1) {\n dmax = 2\n } else if (abs > 0.01) {\n dmax = 4\n } else if (abs > 0.001) {\n dmax = 5\n } else if (abs > 0.0001) {\n dmax = 6\n } else {\n dmax = 7\n }\n }\n\n return fastNumberFormat(dmin, dmax).format(convertedValue)\n }\n\ntype LegendFormatValue = (value: string | number | null) => string | number\n\ninterface Arguments {\n attributes: Attributes\n data: ChartData\n units: string\n unitsCommon: string | undefined\n unitsDesired: string\n uuid: string\n}\nexport const useFormatters = ({\n attributes,\n data,\n units,\n unitsCommon,\n unitsDesired,\n uuid,\n}: Arguments) => {\n const temperatureSetting = useSelector(selectTemperatureSetting)\n const secondsAsTimeSetting = useSelector(selectSecondsAsTimeSetting)\n\n // previously _unitsConversion\n const [convertUnits, setConvertUnits] = useState(() => identity)\n\n // probably can also be removed\n const [min, setMin] = useState()\n const [max, setMax] = useState()\n\n // todo most of this state is not needed, that hook can be refactored\n const [unitsCurrent, setUnitsCurrent] = useState(units)\n\n const [decimals, setDecimals] = useState(-1)\n const [intlNumberFormat, setIntlNumberFormat] = useState(null)\n\n const {\n // \"valueDecimalDetail\" in old app\n decimalDigits = -1,\n } = attributes\n\n const legendFormatValue: LegendFormatValue = useMemo(\n () => getLegendFormatValue(convertUnits, intlNumberFormat, decimalDigits),\n [convertUnits, decimalDigits, intlNumberFormat]\n )\n\n const legendFormatValueRef = useRef(legendFormatValue)\n const updateLegendFormatValueRef = (\n newConvertUnits: Converter,\n newIntlNumberFormat: any,\n newDecimalDigits: any\n ) => {\n legendFormatValueRef.current = getLegendFormatValue(\n newConvertUnits,\n newIntlNumberFormat,\n newDecimalDigits\n )\n }\n\n const legendFormatValueDecimalsFromMinMax = useCallback(\n (newMin: number, newMax: number) => {\n if (safeEqualCheck(min, newMin) && safeEqualCheck(max, newMax)) {\n return legendFormatValueRef.current\n }\n // we should call the convertUnits-creation only when original app was doing this\n // so we don't get new updates in improper places\n setMin(newMin)\n setMax(newMax)\n\n const newConvertUnits = unitsConversionCreator.get(\n uuid,\n newMin,\n newMax,\n units,\n unitsDesired,\n unitsCommon,\n switchedUnits => {\n setUnitsCurrent(switchedUnits)\n // that.legendSetUnitsString(that.units_current);\n // that.legendSetUnitsString just populates some DOM with unitsCurrent\n // on all occurrences just take the unitsCurrent from this state\n },\n temperatureSetting,\n secondsAsTimeSetting\n )\n\n // as function, so useState() interprets it properly\n setConvertUnits(() => newConvertUnits)\n\n const convertedMin = newConvertUnits(newMin)\n const convertedMax = newConvertUnits(newMax)\n\n // if number is returned, we format it!!!!\n if (typeof convertedMin !== \"number\" || typeof convertedMax !== \"number\") {\n updateLegendFormatValueRef(newConvertUnits, intlNumberFormat, decimalDigits)\n return legendFormatValueRef.current\n }\n\n let newDecimals\n\n if (data.min === data.max) {\n // it is a fixed number, let the visualizer decide based on the value\n newDecimals = -1\n } else if (decimalDigits !== -1) {\n // there is an override\n newDecimals = decimalDigits\n } else {\n // ok, let's calculate the proper number of decimal points\n let delta\n\n if (convertedMin === convertedMax) {\n delta = Math.abs(convertedMin)\n } else {\n delta = Math.abs(convertedMax - convertedMin)\n }\n\n if (delta > 1000) {\n newDecimals = 0\n } else if (delta > 10) {\n newDecimals = 1\n } else if (delta > 1) {\n newDecimals = 2\n } else if (delta > 0.1) {\n newDecimals = 2\n } else if (delta > 0.01) {\n newDecimals = 4\n } else if (delta > 0.001) {\n newDecimals = 5\n } else if (delta > 0.0001) {\n newDecimals = 6\n } else {\n newDecimals = 7\n }\n }\n\n let newIntlNumberFormat = intlNumberFormat\n\n if (newDecimals !== decimals) {\n if (newDecimals < 0) {\n newIntlNumberFormat = null\n } else {\n newIntlNumberFormat = fastNumberFormat(newDecimals, newDecimals)\n }\n setIntlNumberFormat(() => newIntlNumberFormat)\n setDecimals(newDecimals)\n }\n updateLegendFormatValueRef(newConvertUnits, newIntlNumberFormat, newDecimals)\n return legendFormatValueRef.current\n },\n [\n decimals,\n decimalDigits,\n min,\n max,\n uuid,\n temperatureSetting,\n units,\n unitsDesired,\n unitsCommon,\n secondsAsTimeSetting,\n data.min,\n data.max,\n intlNumberFormat,\n ]\n )\n\n return {\n legendFormatValue,\n legendFormatValueDecimalsFromMinMax,\n unitsCurrent,\n }\n}\n","const defaultColor = {\n r: 255,\n g: 0,\n b: 0,\n}\n\ntype ColorHex2Rgb = (hex: string) => {\n r: number,\n g: number,\n b: number\n}\nexport const colorHex2Rgb: ColorHex2Rgb = (hex) => {\n // Expand shorthand form (e.g. \"03F\") to full form (e.g. \"0033FF\")\n const shorthandRegex = /^#?([a-f\\d])([a-f\\d])([a-f\\d])$/i\n if (!hex) {\n return defaultColor\n }\n const hexFull = hex.replace(shorthandRegex, (m, r, g, b) => r + r + g + g + b + b)\n\n const result = /^#?([a-f\\d]{2})([a-f\\d]{2})([a-f\\d]{2})$/i.exec(hexFull)\n if (!result) {\n console.warn(\"wrong color format:\", hex) // eslint-disable-line no-console\n }\n return result\n ? {\n r: parseInt(result[1], 16),\n g: parseInt(result[2], 16),\n b: parseInt(result[3], 16),\n } : defaultColor\n}\n","import styled from \"styled-components\"\nimport { getSizeBy } from \"@netdata/netdata-ui\"\n\nexport const LegendContainer = styled.div`\n margin-bottom: ${getSizeBy(3)};\n padding-left: 35px;\n`\n\nexport const LegendFirstRow = styled.div`\n margin-top: 4px;\n display: flex;\n justify-content: space-between;\n`\n\nexport const LegendSecondRow = styled.div`\n margin-top: 4px;\n display: flex;\n justify-content: space-between;\n`\n\nexport const LegendUnit = styled.div`\n`\n\nexport const DateTimeSeparator = styled.span`\n margin: 0 3px;\n`\n\nexport const LegendItems = styled.div`\n display: flex;\n flex-wrap: wrap;\n overflow: auto;\n max-height: 80px;\n`\n\nexport const DimensionItem = styled.div<{ color: string, isDisabled: boolean }>`\n display: flex;\n align-items: center;\n color: ${({ color }) => color};\n margin-right: 12px;\n cursor: pointer;\n opacity: ${({ isDisabled }) => (isDisabled ? 0.3 : null)};\n user-select: none;\n font-size: 11px;\n &:focus {\n outline: none;\n }\n`\n\n// toolbox is based on \"absolute\", so to make sure it's not put on top of dimension-item\n// let's put a transparent block as last in dimension-items container. Toolbox will soon be moved\n// to other place so it's temporary\nexport const DimensionItemToolboxPlaceholder = styled.div`\n width: 140px;\n height: 20px;\n`\n\nexport const DimensionIcon = styled.div<{ color: string }>`\n width: 14px;\n height: 7px;\n border-radius: 4px;\n overflow: hidden;\n background-color: ${({ color }) => color};\n`\n\nexport const DimensionLabel = styled.span`\n margin-left: 3px;\n`\n\nexport const DimensionValue = styled.span`\n margin-left: 5px;\n min-width: 30px;\n`\n\nexport const ToolboxContainer = styled.div`\n position: relative;\n touch-action: none;\n`\n","import React, { useCallback } from \"react\"\nimport { createSelector } from \"reselect\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { DimensionLabel } from \"./chart-legend-bottom.styled\"\n\nconst emptyObject = {}\n\nconst selector = createSelector(\n selectChartData,\n ({ dimension_names: dimensionNames, keys = emptyObject }) => ({\n dimensionNames,\n keys,\n })\n)\n\nconst LegendText = ({ id, index }) => {\n const { dimensionNames, keys } = useSelector(useCallback(state => selector(state, { id }), [id]))\n const { chart, node } = keys\n\n if (chart && node && Object.keys(keys).length === 2) {\n return (\n \n {chart[index]}@{node[index]}\n \n )\n }\n\n const name = dimensionNames[index]\n\n return {name}\n}\n\nexport default LegendText\n","import React, { Fragment, useRef, useEffect, useCallback } from \"react\"\nimport classNames from \"classnames\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\n\nimport { colorHex2Rgb } from \"utils/color-hex-2-rgb\"\nimport { useDateTime } from \"utils/date-time\"\n\nimport { legendResolutionTooltip, legendPluginModuleString } from \"../utils/legend-utils\"\n\nimport { ChartMetadata } from \"../chart-types\"\nimport LegendText from \"./legendText\"\n\ninterface Props {\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => (number | string)\n onDimensionClick: (clickedDimensionName: string, event: React.MouseEvent) => void\n selectedDimensions: string[]\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n}\n\nexport const ChartLegendRight = ({\n chartUuid,\n chartMetadata,\n chartLibrary,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n onDimensionClick,\n selectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n}: Props) => {\n const chartData = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }), [chartUuid])\n )\n const { dimension_names: dimensionNames, dimension_ids: dimensionIds } = chartData\n\n // todo handle also this case:\n // const netdataLast = chartData.last_entry * 1000\n // const dataUpdateEvery = chartData.view_update_every * 1000\n // showUndefined = Math.abs(netdataLast - viewBefore) > dataUpdateEvery\n // (showUndefined also when difference between last and before is bigger than granularity)\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n\n // todo support timezone\n const legendDate = new Date(hoveredX || viewBefore)\n\n // todo make a possibility to add chartLegened when there's not chartData\n // (if this situation is possible)\n\n // @ts-ignore ignoring because options.current has inconsistent structure\n const colorFillOpacity = window.NETDATA.options.current[\n `color_fill_opacity_${chartMetadata.chart_type}`\n ]\n\n const { localeDateString, localeTimeString } = useDateTime()\n\n const scrollbarRef = useRef(null)\n useEffect(() => {\n if (scrollbarRef.current) {\n window.Ps.initialize(scrollbarRef.current, {\n wheelSpeed: 0.2,\n wheelPropagation: true,\n swipePropagation: true,\n minScrollbarLength: null,\n maxScrollbarLength: null,\n useBothWheelAxes: false,\n suppressScrollX: true,\n suppressScrollY: false,\n scrollXMarginOffset: 0,\n scrollYMarginOffset: 0,\n theme: \"default\",\n })\n }\n }, [scrollbarRef])\n\n return (\n
    \n \n {showUndefined\n ? legendPluginModuleString(false, chartMetadata)\n : localeDateString(legendDate)}\n \n
    \n \n {showUndefined\n ? chartMetadata.context.toString()\n : localeTimeString(legendDate)}\n \n
    \n {unitsCurrent}\n
    \n
    \n
    \n {dimensionIds.map((dimensionId, i) => {\n const dimensionName = dimensionNames[i]\n // todo dimension could be a separate component\n const color = colors[dimensionName]\n const rgb = colorHex2Rgb(color)\n\n const isSelected = selectedDimensions.length === 0\n || selectedDimensions.includes(dimensionName)\n\n let value\n if (showUndefined) {\n value = null\n } else if (hoveredRow !== -1) {\n const hoveredValueArray = chartData.result.data[hoveredRow]\n // [timestamp, valueDim1, valueDim2, ...]\n value = hoveredValueArray ? hoveredValueArray[i + 1] : null\n } else {\n value = chartData.view_latest_values[i]\n }\n\n return (\n \n {i !== 0 &&
    }\n {/* eslint-disable-next-line jsx-a11y/click-events-have-key-events */}\n {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n style={{ color }}\n tabIndex={0}\n >\n \n \n \n \n \n \n \n {\" \"}\n \n \n {/* eslint-disable-next-line jsx-a11y/click-events-have-key-events */}\n {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n style={{ color }} // omitted !important during refractor, react doesn't support it\n tabIndex={0}\n >\n {legendFormatValue(\n value,\n )}\n \n
    \n )\n })}\n
    \n
    \n
    \n )\n}\n","import React, { useCallback } from \"react\"\nimport { useDateTime } from \"utils/date-time\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { legendPluginModuleString, legendResolutionTooltip } from \"domains/chart/utils/legend-utils\"\nimport { ChartMetadata } from \"../chart-types\"\nimport LegendText from \"./legendText\"\nimport * as S from \"./chart-legend-bottom.styled\"\ninterface Props {\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => number | string\n onDimensionClick: (clickedDimensionName: string, event: React.MouseEvent) => void\n selectedDimensions: string[]\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n legendToolbox: JSX.Element\n resizeHandler: React.ReactNode\n}\n\nexport const ChartTimeframe = ({\n chartMetadata,\n showUndefined,\n hoveredX,\n viewBefore,\n chartData,\n}: any) => {\n const { localeDateString, localeTimeString } = useDateTime()\n\n const legendDate = new Date(hoveredX || viewBefore)\n\n return (\n
    \n \n {showUndefined\n ? legendPluginModuleString(false, chartMetadata)\n : localeDateString(legendDate)}\n \n |\n \n {showUndefined ? chartMetadata.context.toString() : localeTimeString(legendDate)}\n \n
    \n )\n}\n\nexport const ChartLegendBottom = ({\n chartUuid,\n chartMetadata,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n onDimensionClick,\n selectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n legendToolbox,\n resizeHandler,\n}: Props) => {\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n const chartData = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }), [chartUuid])\n )\n const { dimension_names: dimensionNames, dimension_ids: dimensionIds } = chartData\n\n return (\n \n \n {unitsCurrent}\n \n \n \n \n {dimensionIds.map((dimensionId, i) => {\n const dimensionName = dimensionNames[i]\n const color = colors[dimensionName]\n\n const isSelected =\n selectedDimensions.length === 0 || selectedDimensions.includes(dimensionName)\n\n let value\n if (showUndefined) {\n value = null\n } else if (hoveredRow !== -1) {\n const hoveredValueArray = chartData.result.data[hoveredRow]\n // [timestamp, valueDim1, valueDim2, ...]\n value = hoveredValueArray ? hoveredValueArray[i + 1] : null\n } else {\n value = chartData.view_latest_values[i]\n }\n return (\n {\n onDimensionClick(dimensionName, event)\n }}\n role=\"button\"\n tabIndex={0}\n isDisabled={!isSelected}\n key={dimensionId}\n >\n \n \n {isSelected && legendFormatValue(value)}\n \n )\n })}\n \n \n \n {legendToolbox}\n {resizeHandler}\n \n \n \n )\n}\n","import React, { useCallback } from \"react\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectChartData } from \"domains/chart/selectors\"\nimport { getNewSelectedDimensions } from \"domains/chart/utils/legend-utils\"\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { ChartMetadata } from \"../chart-types\"\n\nimport { ChartLegendRight } from \"./chart-legend-right\"\nimport { ChartLegendBottom } from \"./chart-legend-bottom\"\n\ninterface Props {\n attributes: Attributes\n chartUuid: string\n chartMetadata: ChartMetadata\n chartLibrary: string\n colors: {\n [key: string]: string\n }\n hoveredRow: number\n hoveredX: number | null\n legendFormatValue: (value: number | string | null) => number | string\n selectedDimensions: string[]\n setSelectedDimensions: (selectedDimensions: string[]) => void\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewBefore: number\n legendToolbox: JSX.Element\n resizeHandler: React.ReactNode\n}\n\nexport const ChartLegend = ({\n attributes,\n chartUuid,\n chartMetadata,\n chartLibrary,\n colors,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n selectedDimensions,\n setSelectedDimensions,\n showLatestOnBlur,\n unitsCurrent,\n viewBefore,\n legendToolbox,\n resizeHandler,\n}: Props) => {\n const dimension_names = useSelector(\n useCallback((state: any) => selectChartData(state, { id: chartUuid }).dimension_names, [\n chartUuid,\n ])\n )\n\n const onDimensionClick = (clickedDimensionName: string, event: React.MouseEvent) => {\n event.preventDefault()\n const isModifierKeyPressed = event.shiftKey || event.ctrlKey\n const newSelectedDimensions = getNewSelectedDimensions({\n allDimensions: dimension_names,\n selectedDimensions,\n clickedDimensionName,\n isModifierKeyPressed,\n })\n setSelectedDimensions(newSelectedDimensions)\n }\n\n if (attributes.legendPosition === \"bottom\") {\n return (\n \n )\n }\n\n return (\n \n )\n}\n","import React from \"react\"\n\nimport { ToolboxButton } from \"./toolbox-button\"\n\ntype ClickCallback = (event: React.MouseEvent) => void\ninterface Props {\n onToolboxLeftClick: ClickCallback\n onToolboxRightClick: ClickCallback\n onToolboxZoomInClick: ClickCallback\n onToolboxZoomOutClick: ClickCallback\n}\nexport const LegendToolbox = ({\n onToolboxLeftClick,\n onToolboxRightClick,\n onToolboxZoomInClick,\n onToolboxZoomOutClick,\n}: Props) => (\n
    \n drag it with your mouse or your\n finger (on touch devices).
    Help can be disabled from the settings.\"\n />\n drag it with your mouse or\n your finger (on touch devices).
    Help can be disabled from the settings.\"\n />\n Help can be disabled from the settings.\"\n />\n Help can be disabled from the\n settings.\"\n />\n
    \n)\n","/* eslint-disable no-nested-ternary */\nimport { always, memoizeWith } from \"ramda\"\nimport Color from \"color\"\n\nimport { ChartMetadata, DygraphData } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartLibraryConfig } from \"domains/chart/utils/chartLibrariesSettings\"\n\nexport const getDataForFakeStacked = (\n data: number[][],\n dimensionsVisibility: boolean[],\n): number[][] => data.map((point) => {\n const [timestamp, ...values] = point\n const rest: number[] = []\n let currentMin = 0\n let currentMax = 0\n values\n .map((value, i) => ({ isVisible: dimensionsVisibility[i], value }))\n // reverse because first dimensions should be \"on top\" (at least positive ones)\n .slice().reverse()\n .forEach(({ isVisible, value }) => {\n if (!isVisible) {\n rest.push(0) // push with value '0'. it won't be visible but needs to be present in array\n return\n }\n if (value >= 0) {\n currentMax += value\n rest.push(currentMax)\n } else {\n currentMin += value\n rest.push(currentMin)\n }\n })\n return [\n timestamp,\n ...rest,\n ]\n})\n\nconst isPercentage = (unit: string) => unit === \"percentage\"\n || unit === \"percent\"\n || unit.indexOf(\"%\") !== -1\n\nexport const getDygraphChartType = (\n attributes: Attributes, chartData: DygraphData, chartMetadata: ChartMetadata,\n chartSettings: ChartLibraryConfig,\n) => {\n const isLogScale = (chartSettings.isLogScale as ((a: Attributes) => boolean))(attributes)\n const {\n dygraphType: dygraphRequestedType = chartMetadata.chart_type,\n groupBy,\n } = attributes\n\n if (groupBy && groupBy !== \"dimension\" && isPercentage(chartMetadata.units)) {\n return \"line\"\n }\n\n // corresponds to state.tmp.dygraph_chart_type in old app\n let dygraphChartType = dygraphRequestedType\n if (dygraphChartType === \"stacked\" && chartData.dimensions === 1) {\n dygraphChartType = \"area\"\n }\n if (dygraphChartType === \"stacked\" && isLogScale) {\n dygraphChartType = \"area\"\n }\n return dygraphChartType\n}\n\nconst getBackgroundColor = memoizeWith(\n always(\"true\"),\n () => Color(window.NETDATA.themes.current.background),\n)\n// when in \"fakeStacked\" mode, we cannot use opacity for fill in charts, because the areas would\n// be visible under each other. So the darkening / whitening needs to be added directly to colors\n// (the colors are too saturated for areas and in stacked mode they were with 0.8 opacity)\nexport const transformColors = (colors: string[]) => (\n colors.map((color) => Color(color).mix(getBackgroundColor(), 0.2).hex())\n)\n\nexport const getDygraphFillAlpha = (\n isFakeStacked: boolean, dygraphChartType: string,\n) => (isFakeStacked\n ? window.NETDATA.options.current.color_fill_opacity_fake_stacked\n : dygraphChartType === \"stacked\"\n ? window.NETDATA.options.current.color_fill_opacity_stacked\n : window.NETDATA.options.current.color_fill_opacity_area)\n\n\n// https://github.com/danvk/dygraphs/blob/master/src/iframe-tarp.js#L1-L23\n// On mouseUp dygraphs put rectangles above all iframes so mouseUp can be properly intercepted.\n// this causes a problem with some analytics iframes that place themselves in regions where they\n// aren't visible anyway (for example hubspot iframe on Cloud), and this creates a problematic\n// horizontal scrollbar to appear. This function filters those \"rectangles\" (tarps) to omit\n// elements with unreachable \"left\" styles\nexport const hackDygraphIFrameTarps = (tarps: HTMLDivElement[]): HTMLDivElement[] => (\n tarps.filter((element: HTMLDivElement) => {\n const isOutsideReasonableViewport = Number(element.style.left.replace(\"px\", \"\")) > 10000\n if (isOutsideReasonableViewport) {\n element.parentNode!.removeChild(element)\n }\n return !isOutsideReasonableViewport\n })\n)\n","/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable function-paren-newline */\n/* eslint-disable comma-dangle */\nimport React, { useRef, useCallback } from \"react\"\nimport { useToggle } from \"react-use\"\n\nconst useProceededChart = (\n chartRef: any,\n propsRef: any\n): [boolean, React.Ref, (g: Dygraph) => void] => {\n const [proceeded, toggleProceeded] = useToggle(false)\n\n const ref = useRef(null)\n\n const updatePosition = useCallback((g: Dygraph) => {\n const { x } = g.getArea()\n const distance = g.toDomXCoord(propsRef.current.chartData.first_entry * 1000)\n const hasProceeded = distance > x\n toggleProceeded(hasProceeded)\n\n if (hasProceeded && ref.current) {\n const { height } = chartRef.current.getBoundingClientRect()\n ref.current.style.left = `${x}px`\n ref.current.style.right = `calc(100% - ${distance}px)`\n ref.current.style.top = `${height / 2}px`\n }\n }, [])\n\n return [proceeded, ref, updatePosition]\n}\n\nexport default useProceededChart\n","import { useRef } from \"react\"\nimport { useToggle } from \"react-use\"\n\nconst badgeTopMargin = \"40px\"\n\nconst defaultPositionTo = (ref, x, position, topMargin) => {\n ref.current.style.left = `${x}px`\n ref.current.style.right = `calc(100% - ${position}px)`\n ref.current.style.top = topMargin\n}\n\nexport default () => {\n const [isRendered, toggleIsRendered] = useToggle(false)\n\n const ref = useRef(null)\n\n const updatePosition = (isVisible, g, position, positionTo = defaultPositionTo) => {\n if (!isVisible) {\n toggleIsRendered(false)\n return\n }\n\n if (ref.current) {\n toggleIsRendered(true)\n const { x } = g.getArea()\n\n positionTo(ref, x, position, badgeTopMargin)\n }\n }\n\n return [isRendered, ref, updatePosition]\n}\n","import React, { forwardRef } from \"react\"\nimport styled from \"styled-components\"\n\nconst Container = styled.div`\n display: block;\n`\n\nconst ProceededChartDisclaimer = forwardRef((\n props: React.HTMLAttributes,\n ref: React.Ref,\n) => (\n \n \n Want to extend your history of real-time metrics?\n
    \n \n Configure Netdata's \n history\n \n  or use the \n DB engine\n .\n
    \n
    \n))\n\nexport default ProceededChartDisclaimer\n","import React, { forwardRef } from \"react\"\nimport styled from \"styled-components\"\n\nconst backgroundColorMap = {\n WARNING: \"#FFF8E1\",\n CRITICAL: \"#FFEBEF\",\n CLEAR: \"#E5F5E8\",\n}\nexport const getBackgroundColor = (status) => backgroundColorMap[status] || null\n\nconst borderColorMap = {\n WARNING: \"#FFC300\",\n CRITICAL: \"#F59B9B\",\n CLEAR: \"#68C47D\",\n}\nexport const getBorderColor = (status) => borderColorMap[status] || null\n\nconst textColorMap = {\n WARNING: \"#536775\",\n CRITICAL: \"#FF4136\",\n CLEAR: \"#00AB44\",\n}\nexport const getColor = (status) => textColorMap[status] || null\n\nconst Container = styled.div`\n position: absolute;\n margin-right: 10px;\n overflow: hidden;\n pointer-events: none;\n direction: rtl;\n z-index: 10; // higher than chart\n`\n\nconst Badge = styled.div`\n display: inline-block;\n border-radius: 36px;\n padding: 2px 12px;\n background: ${({ background }) => background};\n border: 1px solid ${({ border }) => border};\n color: ${({ color }) => color};\n font-size: 12px;\n font-weight: 700;\n direction: ltr;\n white-space: nowrap;\n`\n\nexport default forwardRef((\n { isVisible, status, label },\n ref,\n) => (\n \n {isVisible && (\n \n {label}\n \n )}\n \n))\n","//@ts-nocheck\nimport { sortBy, reverse } from \"ramda\"\nimport React, { useLayoutEffect, useRef, useCallback } from \"react\"\nimport classNames from \"classnames\"\nimport { useUpdateEffect, useUnmount, useMount } from \"react-use\"\n// this version is needed because it contains a fix for handling constant value in the chart\n// ie. https://github.com/danvk/dygraphs/pull/909\nimport Dygraph from \"vendor/dygraph-c91c859.min\"\nimport \"dygraphs/src-es5/extras/smooth-plotter\"\nimport ResizeObserver from \"resize-observer-polyfill\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { AppStateT } from \"store/app-state\"\nimport { DygraphArea, NetdataDygraph } from \"types/vendor-overrides\"\nimport { TimeRange } from \"types/common\"\nimport { useDateTime } from \"utils/date-time\"\nimport { debounce } from \"utils/debounce\"\n\nimport {\n selectCommonMin,\n selectCommonMax,\n selectGlobalChartUnderlay,\n selectGlobalSelectionMaster,\n selectSmoothPlot,\n selectSyncPanAndZoom,\n selectSpacePanelTransitionEndIsActive,\n selectAlarm,\n selectTimezoneSetting,\n} from \"domains/global/selectors\"\nimport {\n resetGlobalPanAndZoomAction,\n setCommonMaxAction,\n setCommonMinAction,\n setGlobalPauseAction,\n resetGlobalPauseAction,\n} from \"domains/global/actions\"\n\nimport { resetChartPanAndZoomAction } from \"domains/chart/actions\"\n\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport {\n chartLibrariesSettings,\n ChartLibraryConfig,\n ChartLibraryName,\n} from \"../../utils/chartLibrariesSettings\"\nimport { ChartMetadata, DygraphData } from \"../../chart-types\"\nimport { selectResizeHeight } from \"../../selectors\"\n\nimport {\n getDygraphChartType,\n getDataForFakeStacked,\n transformColors,\n getDygraphFillAlpha,\n hackDygraphIFrameTarps,\n} from \"./dygraph/utils\"\nimport \"./dygraph-chart.css\"\n\nimport useProceededChart from \"../../hooks/use-proceeded-chart\"\nimport useDygraphBadge from \"../../hooks/useDygraphBadge\"\nimport ProceededChartDisclaimer from \"./proceeded-chart-disclaimer\"\nimport AlarmBadge, { getBorderColor } from \"./alarmBadge\"\n\n// This is the threshold above which we assume chart shown duration has changed\nconst timeframeThreshold = 5000\nconst dygraphResizeDebounceTime = 500\n\ntype IsInRangeOfAvailableData = (props: {\n after: number, before: number, chartData: DygraphData,\n}) => boolean\nconst isInRangeOfAvailableData: IsInRangeOfAvailableData = ({ after, before, chartData }) => (\n after >= (chartData.first_entry * 1000) && before <= (chartData.last_entry * 1000)\n)\n\ninterface GetInitialDygraphOptions {\n attributes: Attributes,\n chartData: DygraphData,\n chartMetadata: ChartMetadata,\n chartSettings: ChartLibraryConfig,\n dimensionsVisibility: boolean[]\n hiddenLabelsElementId: string,\n isFakeStacked: boolean,\n orderedColors: string[],\n setMinMax: (minMax: TimeRange) => void\n shouldSmoothPlot: boolean,\n unitsCurrent: string,\n xAxisDateString: (d: Date) => string,\n xAxisTimeString: (d: Date) => string,\n}\nconst getInitialDygraphOptions = ({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n}: GetInitialDygraphOptions) => {\n const isSparkline = attributes.dygraphTheme === \"sparkline\"\n const highlightCircleSize = isSparkline ? 3 : 4\n\n const isLogScale = (chartSettings.isLogScale as ((a: Attributes) => boolean))(attributes)\n const dygraphChartType = getDygraphChartType(attributes, chartData, chartMetadata, chartSettings)\n const {\n dygraphSmooth = dygraphChartType === \"line\"\n && !isSparkline,\n dygraphDrawAxis = true,\n legendPosition,\n } = attributes\n const isLegendOnBottom = legendPosition === \"bottom\"\n const {\n // destructuring with default values\n dygraphColors = orderedColors,\n dygraphRightGap = 5,\n dygraphShowRangeSelector = false,\n dygraphShowRoller = false,\n dygraphTitle = attributes.title || chartMetadata.title,\n dygraphTitleHeight = 19,\n dygraphLegend = \"always\",\n dygraphLabelsDiv = hiddenLabelsElementId,\n dygraphLabelsSeparateLine = true,\n dygraphIncludeZero = dygraphChartType === \"stacked\",\n dygraphShowZeroValues = true,\n dygraphShowLabelsOnHighLight = true,\n dygraphHideOverlayOnMouseOut = true,\n dygraphXRangePad = 0,\n dygraphYRangePad = 1,\n dygraphValueRange = [null, null],\n dygraphYLabelWidth = 12,\n // eslint-disable-next-line no-nested-ternary\n dygraphStrokeWidth = dygraphChartType === \"stacked\"\n ? 0.1\n : (dygraphSmooth === true\n ? 1.5\n : 0.7),\n\n dygraphStrokePattern,\n dygraphDrawPoints = false,\n dygraphDrawGapEdgePoints = true,\n dygraphConnectSeparatedPoints = false,\n dygraphPointSize = 1,\n dygraphStepPlot = false,\n dygraphStrokeBorderColor = window.NETDATA.themes.current.background,\n dygraphStrokeBorderWidth = 0,\n dygraphFillGraph = (dygraphChartType === \"area\" || dygraphChartType === \"stacked\"),\n dygraphFillAlpha = getDygraphFillAlpha(isFakeStacked, dygraphChartType),\n dygraphStackedGraph = dygraphChartType === \"stacked\" && !isFakeStacked,\n dygraphStackedGraphNanFill = \"none\",\n dygraphAxisLabelFontSize = 10,\n dygraphAxisLineColor = window.NETDATA.themes.current.axis,\n dygraphAxisLineWidth = 1.0,\n dygraphDrawGrid = true,\n dygraphGridLinePattern,\n dygraphGridLineWidth = 1.0,\n dygraphGridLineColor = window.NETDATA.themes.current.grid,\n dygraphMaxNumberWidth = 8,\n dygraphSigFigs,\n dygraphDigitsAfterDecimal = 2,\n dygraphHighlighCircleSize = highlightCircleSize,\n dygraphHighlightSeriesOpts,\n dygraphHighlightSeriesBackgroundAlpha,\n\n dygraphXPixelsPerLabel = 50,\n dygraphXAxisLabelWidth = 60,\n dygraphDrawXAxis = dygraphDrawAxis,\n dygraphYPixelsPerLabel = 15,\n dygraphYAxisLabelWidth = isLegendOnBottom ? 30 : 50,\n dygraphDrawYAxis = dygraphDrawAxis,\n } = attributes\n return {\n colors: isFakeStacked ? transformColors(reverse(dygraphColors)) : dygraphColors,\n\n // leave a few pixels empty on the right of the chart\n rightGap: isSparkline ? 0 : dygraphRightGap,\n showRangeSelector: dygraphShowRangeSelector,\n showRoller: dygraphShowRoller,\n title: isSparkline ? undefined : dygraphTitle,\n titleHeight: dygraphTitleHeight,\n legend: dygraphLegend, // we need this to get selection events\n labels: chartData.result.labels,\n labelsDiv: dygraphLabelsDiv,\n\n labelsSeparateLines: isSparkline ? true : dygraphLabelsSeparateLine,\n labelsShowZeroValues: isLogScale ? false : dygraphShowZeroValues,\n labelsKMB: false,\n labelsKMG2: false,\n showLabelsOnHighlight: dygraphShowLabelsOnHighLight,\n hideOverlayOnMouseOut: dygraphHideOverlayOnMouseOut,\n includeZero: dygraphIncludeZero,\n xRangePad: dygraphXRangePad,\n yRangePad: isSparkline ? 1 : dygraphYRangePad,\n valueRange: dygraphValueRange,\n ylabel: (isSparkline || isLegendOnBottom) ? undefined : unitsCurrent,\n yLabelWidth: (isSparkline || isLegendOnBottom) ? 0 : dygraphYLabelWidth,\n\n // the function to plot the chart\n plotter: (dygraphSmooth && shouldSmoothPlot) ? window.smoothPlotter : null,\n\n // The width of the lines connecting data points.\n // This can be used to increase the contrast or some graphs.\n strokeWidth: dygraphStrokeWidth,\n strokePattern: dygraphStrokePattern,\n\n // The size of the dot to draw on each point in pixels (see drawPoints).\n // A dot is always drawn when a point is \"isolated\",\n // i.e. there is a missing point on either side of it.\n // This also controls the size of those dots.\n drawPoints: dygraphDrawPoints,\n\n // Draw points at the edges of gaps in the data.\n // This improves visibility of small data segments or other data irregularities.\n drawGapEdgePoints: dygraphDrawGapEdgePoints,\n connectSeparatedPoints: isLogScale ? false : dygraphConnectSeparatedPoints,\n pointSize: dygraphPointSize,\n\n // enabling this makes the chart with little square lines\n stepPlot: dygraphStepPlot,\n\n // Draw a border around graph lines to make crossing lines more easily\n // distinguishable. Useful for graphs with many lines.\n strokeBorderColor: dygraphStrokeBorderColor,\n strokeBorderWidth: dygraphStrokeBorderWidth,\n fillGraph: dygraphFillGraph,\n fillAlpha: dygraphFillAlpha,\n stackedGraph: dygraphStackedGraph,\n stackedGraphNaNFill: dygraphStackedGraphNanFill,\n drawAxis: isSparkline ? false : dygraphDrawAxis,\n axisLabelFontSize: dygraphAxisLabelFontSize,\n axisLineColor: dygraphAxisLineColor,\n axisLineWidth: dygraphAxisLineWidth,\n drawGrid: isSparkline ? false : dygraphDrawGrid,\n gridLinePattern: dygraphGridLinePattern,\n gridLineWidth: dygraphGridLineWidth,\n gridLineColor: dygraphGridLineColor,\n maxNumberWidth: dygraphMaxNumberWidth,\n sigFigs: dygraphSigFigs,\n digitsAfterDecimal: dygraphDigitsAfterDecimal,\n highlightCircleSize: dygraphHighlighCircleSize,\n highlightSeriesOpts: dygraphHighlightSeriesOpts, // TOO SLOW: { strokeWidth: 1.5 },\n // TOO SLOW: (state.tmp.dygraph_chart_type === 'stacked')?0.7:0.5,\n highlightSeriesBackgroundAlpha: dygraphHighlightSeriesBackgroundAlpha,\n visibility: dimensionsVisibility,\n logscale: isLogScale,\n\n axes: {\n x: {\n pixelsPerLabel: dygraphXPixelsPerLabel,\n // insufficient typings for Dygraph\n // @ts-ignore\n ticker: Dygraph.dateTicker,\n axisLabelWidth: dygraphXAxisLabelWidth,\n drawAxis: isSparkline ? false : dygraphDrawXAxis,\n axisLabelFormatter: (d: Date | number) => ((d as Date).toTimeString().startsWith(\"00:00:00\")\n ? xAxisDateString(d as Date)\n : xAxisTimeString(d as Date)\n ),\n },\n y: {\n logscale: isLogScale,\n pixelsPerLabel: dygraphYPixelsPerLabel,\n axisLabelWidth: dygraphYAxisLabelWidth,\n drawAxis: isSparkline ? false : dygraphDrawYAxis,\n // axisLabelFormatter is added on the updates\n axisLabelFormatter(y: Date | number) {\n const formatter = setMinMax([\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n this.axes_[0].extremeRange[0],\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n this.axes_[0].extremeRange[1],\n ]) as unknown as ((value: Date | number) => string)\n return formatter(y as number)\n },\n },\n },\n }\n}\n\ninterface Props {\n attributes: Attributes\n chartData: DygraphData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n dimensionsVisibility: boolean[]\n hasEmptyData: boolean\n hasLegend: boolean\n isRemotelyControlled: boolean\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n immediatelyDispatchPanAndZoom: () => void\n\n hoveredRow: number\n hoveredX: number | null\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: TimeRange) => void\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const DygraphChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartLibrary,\n // colors,\n chartUuid,\n dimensionsVisibility,\n hasEmptyData,\n hasLegend,\n isRemotelyControlled,\n onUpdateChartPanAndZoom,\n orderedColors,\n immediatelyDispatchPanAndZoom,\n\n hoveredRow,\n hoveredX,\n setGlobalChartUnderlay,\n setHoveredX,\n setMinMax,\n unitsCurrent,\n viewAfter,\n viewBefore,\n}: Props) => {\n const globalChartUnderlay = useSelector(selectGlobalChartUnderlay)\n const selectedAlarm = useSelector(selectAlarm)\n const alarm = selectedAlarm?.chartId === chartData.id ? selectedAlarm : null\n\n const timezone = useSelector(selectTimezoneSetting)\n\n const { xAxisDateString, xAxisTimeString } = useDateTime()\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const hiddenLabelsElementId = `${chartUuid}-hidden-labels-id`\n\n const dygraphChartType = getDygraphChartType(attributes, chartData, chartMetadata, chartSettings)\n // isFakeStacked - is a special mode for displaying stacked charts with both positive and negative\n // values. Dygraph.js doesn't support it so in this case we need to sum the values manually\n // and display the chart as \"area\" type, but with keeping all styling (fill etc.) properties\n // as in \"stacked\" type\n // because first values need to be \"on top\" (at least for positive values), the dimension order\n // needs to be reversed (in getDataForFakeStacked function and when assigning dimension colors)\n const isFakeStacked = chartData.min < 0 && dygraphChartType === \"stacked\"\n const dygraphFillAlpha = getDygraphFillAlpha(isFakeStacked, dygraphChartType)\n\n const chartElement = useRef(null)\n\n const updateChartPanOrZoom = useCallback(({\n after, before,\n callback,\n shouldNotExceedAvailableRange,\n }) => {\n onUpdateChartPanAndZoom({\n after,\n before,\n callback,\n masterID: chartUuid,\n shouldNotExceedAvailableRange,\n })\n }, [chartUuid, onUpdateChartPanAndZoom])\n\n // keep in ref to prevent additional updates\n const dygraphInstance = useRef()\n // state.tmp.dygraph_user_action in old dashboard\n const latestIsUserAction = useRef(false)\n // state.tmp.dygraph_mouse_down in old dashboard\n const isMouseDown = useRef(false)\n // state.tmp.dygraph_highlight_after in old dashboard\n const dygraphHighlightAfter = useRef(null)\n // state.dygraph_last_touch_move in old dashboard\n const dygraphLastTouchMove = useRef(0)\n // state.dygraph_last_touch_page_x in old dashboard\n const dygraphLastTouchPageX = useRef(0)\n // state.dygraph_last_touch_end in old dashboard\n const dygraphLastTouchEnd = useRef()\n\n const dispatch = useDispatch()\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n\n const resetGlobalPanAndZoom = useCallback(() => {\n latestIsUserAction.current = false // prevent starting panAndZoom\n if (dygraphInstance.current) {\n // todo on toolbox reset click, do updateOptions({ dateWindow: null })\n // (issue existed also before rewrite)\n dygraphInstance.current.updateOptions({\n // reset dateWindow to the current\n // @ts-ignore external typings dont support null\n dateWindow: null,\n })\n }\n\n if (isSyncPanAndZoom) {\n dispatch(resetGlobalPanAndZoomAction())\n } else {\n dispatch(resetChartPanAndZoomAction({ id: chartUuid }))\n }\n }, [chartUuid, dispatch, isSyncPanAndZoom])\n\n const [isAlarmBadgeVisible, alarmBadgeRef, updateAlarmBadge] = useDygraphBadge() as any\n\n // setGlobalChartUnderlay is using state from closure (chartData.after), so we need to have always\n // the newest callback. Unfortunately we cannot use Dygraph.updateOptions() (library restriction)\n // for interactionModel callbacks so we need to keep the callback in mutable ref\n const propsRef = useRef({\n alarm,\n chartData,\n globalChartUnderlay,\n hoveredX,\n immediatelyDispatchPanAndZoom,\n // put it to ref to prevent additional updateOptions() after creating dygraph\n resetGlobalPanAndZoom,\n setGlobalChartUnderlay,\n updateAlarmBadge,\n updateChartPanOrZoom,\n viewAfter,\n viewBefore,\n })\n\n const [\n isProceeded, precededChartRef, updatePrecededPosition,\n ] = useProceededChart(chartElement, propsRef)\n\n useLayoutEffect(() => {\n propsRef.current.alarm = alarm\n propsRef.current.chartData = chartData\n propsRef.current.hoveredX = hoveredX\n propsRef.current.immediatelyDispatchPanAndZoom = immediatelyDispatchPanAndZoom\n propsRef.current.globalChartUnderlay = globalChartUnderlay\n propsRef.current.resetGlobalPanAndZoom = resetGlobalPanAndZoom\n propsRef.current.setGlobalChartUnderlay = setGlobalChartUnderlay\n propsRef.current.updateAlarmBadge = updateAlarmBadge\n propsRef.current.updateChartPanOrZoom = updateChartPanOrZoom\n propsRef.current.viewAfter = viewAfter\n propsRef.current.viewBefore = viewBefore\n }, [\n alarm,\n chartData,\n globalChartUnderlay,\n hoveredX,\n immediatelyDispatchPanAndZoom,\n resetGlobalPanAndZoom,\n setGlobalChartUnderlay,\n updateAlarmBadge,\n updateChartPanOrZoom,\n viewAfter,\n viewBefore,\n ])\n\n const shouldSmoothPlot = useSelector(selectSmoothPlot)\n useLayoutEffect(() => {\n if (chartElement && chartElement.current && !dygraphInstance.current && !hasEmptyData) {\n const dygraphOptionsStatic = getInitialDygraphOptions({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n })\n\n latestIsUserAction.current = false\n\n const dygraphOptions = {\n ...dygraphOptionsStatic,\n // set dateWindow on init - this is needed when chart is globalPanAndZoom-master\n // and user scrolls down/up so the chart hides and then unhides. This causes the chart\n // to re-create, but the data has additional padding which should be outside of\n // visible range\n dateWindow: [propsRef.current.viewAfter, propsRef.current.viewBefore],\n\n highlightCallback(\n event: MouseEvent, xval: number,\n ) {\n // todo\n // state.pauseChart()\n\n const newHoveredX = isMouseDown.current\n ? null\n : xval\n\n const currentHoveredX = propsRef.current.hoveredX\n if (newHoveredX !== currentHoveredX) {\n setHoveredX(newHoveredX)\n }\n },\n\n unhighlightCallback() {\n // todo\n // state.unpauseChart();\n if (propsRef.current.hoveredX !== null) {\n setHoveredX(null)\n }\n },\n drawCallback(dygraph: Dygraph) {\n // the user has panned the chart and this is called to re-draw the chart\n // 1. refresh this chart by adding data to it\n // 2. notify all the other charts about the update they need\n\n // to prevent an infinite loop (feedback), we use\n // state.tmp.dygraph_user_action\n // - when true, this is initiated by a user\n // - when false, this is feedback\n\n if (latestIsUserAction.current) {\n latestIsUserAction.current = false\n const xRange = dygraph.xAxisRange()\n const after = Math.round(xRange[0])\n const before = Math.round(xRange[1])\n\n if (isInRangeOfAvailableData({\n after, before, chartData: propsRef.current.chartData,\n })) {\n propsRef.current.updateChartPanOrZoom({ after, before })\n }\n }\n },\n zoomCallback: (minDate: number, maxDate: number) => {\n latestIsUserAction.current = true\n propsRef.current.updateChartPanOrZoom({ after: minDate, before: maxDate })\n },\n\n underlayCallback(canvas: CanvasRenderingContext2D, area: DygraphArea, g: Dygraph) {\n updatePrecededPosition(g)\n\n if (propsRef.current.alarm) {\n const { alarm: currentAlarm } = propsRef.current\n\n const alarmPosition = g.toDomXCoord(currentAlarm.when * 1000)\n const fillColor = getBorderColor(currentAlarm.status)\n const horizontalPadding = 3\n // use RAF, because dygraph doesn't provide any callback called after drawing the chart\n requestAnimationFrame(() => {\n canvas.fillStyle = fillColor\n const globalAlphaCache = canvas.globalAlpha\n canvas.globalAlpha = 0.7\n canvas.fillRect(alarmPosition - horizontalPadding, area.y, 2 * horizontalPadding, area.h)\n canvas.globalAlpha = globalAlphaCache\n })\n\n propsRef.current.updateAlarmBadge(\n propsRef.current.alarm,\n g,\n alarmPosition - horizontalPadding,\n )\n }\n\n // the chart is about to be drawn\n // this function renders global highlighted time-frame\n\n if (propsRef.current.globalChartUnderlay) {\n const { after, before } = propsRef.current.globalChartUnderlay\n\n if (after < before) {\n const HIGHLIGHT_HORIZONTAL_PADDING = 20\n const bottomLeft = g.toDomCoords(after, -HIGHLIGHT_HORIZONTAL_PADDING)\n const topRight = g.toDomCoords(before, HIGHLIGHT_HORIZONTAL_PADDING)\n\n const left = bottomLeft[0]\n const right = topRight[0]\n\n // eslint-disable-next-line no-param-reassign\n canvas.fillStyle = window.NETDATA.themes.current.highlight\n canvas.fillRect(left, area.y, right - left, area.h)\n }\n }\n },\n\n // interactionModel cannot be replaced with updateOptions(). we need to keep all changing\n // values and callbacks in mutable ref,\n interactionModel: {\n mousedown(event: MouseEvent, dygraph: Dygraph, context: any) {\n // Right-click should not initiate anything.\n if (event.button && event.button === 2) {\n return\n }\n\n latestIsUserAction.current = true\n isMouseDown.current = true\n context.initializeMouseDown(event, dygraph, context)\n\n // limit problematic dygraph's feature, more info above the function\n // eslint-disable-next-line no-param-reassign\n context.tarp.tarps = hackDygraphIFrameTarps(context.tarp.tarps)\n\n dispatch(setGlobalPauseAction())\n\n if (event.button && event.button === 1) {\n // middle mouse button\n\n if (event.shiftKey) {\n // panning\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startPan(event, dygraph, context)\n } else if (event.altKey || event.ctrlKey || event.metaKey) {\n // middle mouse button highlight\n dygraphHighlightAfter.current = dygraph.toDataXCoord(event.offsetX)\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else {\n // middle mouse button selection for zoom\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n }\n } else if (event.shiftKey) {\n // left mouse button selection for zoom (ZOOM)\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else if (event.altKey || event.ctrlKey || event.metaKey) {\n // left mouse button highlight\n dygraphHighlightAfter.current = dygraph.toDataXCoord(event.offsetX)\n // @ts-ignore\n Dygraph.startZoom(event, dygraph, context)\n } else {\n // left mouse button dragging (PAN)\n dygraphHighlightAfter.current = null\n // @ts-ignore\n Dygraph.startPan(event, dygraph, context)\n }\n },\n\n mousemove(event: MouseEvent, dygraph: Dygraph, context: any) {\n // if (state.tmp.dygraph_highlight_after !== null) {\n // else if (\n if (dygraphHighlightAfter.current !== null) {\n // highlight selection\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.moveZoom(event, dygraph, context)\n event.preventDefault()\n } else if (context.isPanning) {\n latestIsUserAction.current = true\n // eslint-disable-next-line no-param-reassign\n context.is2DPan = false\n // @ts-ignore\n Dygraph.movePan(event, dygraph, context)\n } else if (context.isZooming) {\n // @ts-ignore\n Dygraph.moveZoom(event, dygraph, context)\n }\n },\n\n mouseup(event: MouseEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = false\n if (dygraphHighlightAfter.current !== null) {\n const sortedRange = sortBy((x) => +x, [\n dygraphHighlightAfter.current,\n dygraph.toDataXCoord(event.offsetX),\n ])\n\n propsRef.current.setGlobalChartUnderlay({\n after: sortedRange[0],\n before: sortedRange[1],\n masterID: chartData.id,\n })\n dygraphHighlightAfter.current = null\n // eslint-disable-next-line no-param-reassign\n context.isZooming = false\n\n // old dashboard code\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n dygraph.clearZoomRect_()\n // this call probably fixes the broken selection circle during highlighting\n // and forces underlayCallback to fire (and draw highlight-rect\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n dygraph.drawGraph_(false)\n } else if (context.isPanning) {\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.endPan(event, dygraph, context)\n propsRef.current.immediatelyDispatchPanAndZoom()\n } else if (context.isZooming) {\n latestIsUserAction.current = true\n // @ts-ignore\n Dygraph.endZoom(event, dygraph, context)\n propsRef.current.immediatelyDispatchPanAndZoom()\n }\n },\n\n wheel(event: WheelEvent, dygraph: Dygraph) {\n if (!event.shiftKey && !event.altKey) return\n\n latestIsUserAction.current = true\n event.preventDefault()\n event.stopPropagation()\n\n // https://dygraphs.com/gallery/interaction-api.js\n const zoom = (g, zoomInPercentage, bias) => {\n bias = bias || 0.5\n const [afterAxis, beforeAxis] = g.xAxisRange()\n const delta = beforeAxis - afterAxis\n const increment = delta * zoomInPercentage\n const [afterIncrement, beforeIncrement] = [increment * bias, increment * (1 - bias)]\n\n const after = afterAxis + afterIncrement\n const before = beforeAxis - beforeIncrement\n\n propsRef.current.updateChartPanOrZoom({\n after,\n before,\n shouldNotExceedAvailableRange: true,\n callback: (updatedAfter: number, updatedBefore: number) => {\n dygraph.updateOptions({\n dateWindow: [updatedAfter, updatedBefore],\n })\n },\n })\n }\n\n const offsetToPercentage = (g, offsetX) => {\n // This is calculating the pixel offset of the leftmost date.\n const [axisAfterOffset] = g.toDomCoords(g.xAxisRange()[0], null)\n // x and w are relative to the corner of the drawing area,\n // so that the upper corner of the drawing area is (0, 0).\n const x = offsetX - axisAfterOffset\n // This is computing the rightmost pixel, effectively defining the\n // width.\n const w = g.toDomCoords(g.xAxisRange()[1], null)[0] - axisAfterOffset\n\n // Percentage from the left.\n return w === 0 ? 0 : x / w\n }\n\n const normalDef =\n typeof event.wheelDelta === \"number\" && !Number.isNaN(event.wheelDelta)\n ? event.wheelDelta / 40\n : event.deltaY * -1.2\n\n const normal = event.detail ? event.detail * -1 : normalDef\n const percentage = normal / 50\n\n if (!event.offsetX) event.offsetX = event.layerX - event.target.offsetLeft\n const xPct = offsetToPercentage(dygraph, event.offsetX)\n\n zoom(dygraph, percentage, xPct)\n },\n\n click(event: MouseEvent) {\n event.preventDefault()\n },\n\n dblclick() {\n dispatch(resetGlobalPauseAction({ forcePlay: false }))\n propsRef.current.resetGlobalPanAndZoom()\n },\n\n touchstart(event: TouchEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = true\n latestIsUserAction.current = true\n\n // todo\n // state.pauseChart()\n\n Dygraph.defaultInteractionModel.touchstart(event, dygraph, context)\n\n // we overwrite the touch directions at the end, to overwrite\n // the internal default of dygraph\n // eslint-disable-next-line no-param-reassign\n context.touchDirections = { x: true, y: false }\n\n dygraphLastTouchMove.current = 0\n\n if (typeof event.touches[0].pageX === \"number\") {\n dygraphLastTouchPageX.current = event.touches[0].pageX\n } else {\n dygraphLastTouchPageX.current = 0\n }\n },\n touchmove(event: TouchEvent, dygraph: Dygraph, context: any) {\n latestIsUserAction.current = true\n Dygraph.defaultInteractionModel.touchmove(event, dygraph, context)\n\n dygraphLastTouchMove.current = Date.now()\n },\n\n touchend(event: TouchEvent, dygraph: Dygraph, context: any) {\n isMouseDown.current = false\n latestIsUserAction.current = true\n Dygraph.defaultInteractionModel.touchend(event, dygraph, context)\n\n // if it didn't move, it is a selection\n if (dygraphLastTouchMove.current === 0 && dygraphLastTouchPageX.current !== 0\n && chartElement.current // this is just for TS\n ) {\n latestIsUserAction.current = false // prevent updating pan-and-zoom\n // internal api of dygraph\n // @ts-ignore\n // eslint-disable-next-line no-underscore-dangle\n const dygraphPlotter = dygraph.plotter_\n const pct = (dygraphLastTouchPageX.current - (\n dygraphPlotter.area.x + chartElement.current.getBoundingClientRect().left\n )) / dygraphPlotter.area.w\n\n const { current } = propsRef\n const t = Math.round(current.viewAfter\n + (current.viewBefore - current.viewAfter) * pct)\n // dont set \"master\" so the highlight is recalculated (to match existing row)\n setHoveredX(t, true)\n }\n\n // if it was double tap within double click time, reset the charts\n const now = Date.now()\n if (typeof dygraphLastTouchEnd.current !== \"undefined\") {\n if (dygraphLastTouchMove.current === 0) {\n const dt = now - dygraphLastTouchEnd.current\n if (dt <= window.NETDATA.options.current.double_click_speed) {\n propsRef.current.resetGlobalPanAndZoom()\n }\n }\n }\n\n // remember the timestamp of the last touch end\n dygraphLastTouchEnd.current = now\n propsRef.current.immediatelyDispatchPanAndZoom()\n },\n },\n }\n\n const data = isFakeStacked\n ? getDataForFakeStacked(chartData.result.data, dimensionsVisibility)\n : chartData.result.data\n const instance = new Dygraph((chartElement.current), data, dygraphOptions)\n dygraphInstance.current = instance\n }\n }, [attributes, chartData, chartMetadata, chartSettings, chartUuid, dimensionsVisibility,\n hasEmptyData, hiddenLabelsElementId, isFakeStacked,\n orderedColors, setHoveredX, setMinMax, shouldSmoothPlot, unitsCurrent,\n xAxisDateString, xAxisTimeString, updatePrecededPosition, dispatch])\n\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n const isSparkline = attributes.dygraphTheme === \"sparkline\"\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n dygraphInstance.current.updateOptions({\n ylabel: (isSparkline || isLegendOnBottom) ? undefined : unitsCurrent,\n })\n }\n }, [attributes, unitsCurrent])\n\n\n // immediately update when changing global chart underlay or currently showed alarm\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n dygraphInstance.current.updateOptions({})\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [alarm, globalChartUnderlay])\n\n const spacePanelTransitionEndIsActive = useSelector(selectSpacePanelTransitionEndIsActive)\n useUpdateEffect(() => {\n if (dygraphInstance.current) {\n // dygraph always resizes on browser width change, but doesn't resize when the container\n // has different width.\n window.requestAnimationFrame(() => {\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n })\n }\n }, [spacePanelTransitionEndIsActive])\n\n // update data of the chart\n // first effect should only be made by new DygraphInstance()\n useUpdateEffect(() => {\n // dont update when there is no data - in this case we should still show old chart\n if (dygraphInstance.current && !hasEmptyData) {\n // todo support state.tmp.dygraph_force_zoom\n const forceDateWindow = [viewAfter, viewBefore]\n\n // in old dashboard, when chart needed to reset internal dateWindow state,\n // dateWindow was set to null, and new dygraph got the new dateWindow from results.\n // this caused small unsync between dateWindow of parent (master) and child charts\n // i also detected that forceDateWindow timestamps have slightly better performance (10%)\n // so if the chart needs to change local dateWindow, we'll always use timestamps instead of\n // null.\n\n const xAxisRange = dygraphInstance.current.xAxisRange()\n // eslint-disable-next-line max-len\n const hasChangedDuration = Math.abs((viewBefore - viewAfter) - (xAxisRange[1] - xAxisRange[0])) > timeframeThreshold\n\n // check if the time is relative\n const hasScrolledToTheFutureDuringPlayMode = viewBefore <= 0\n && (xAxisRange[1] > viewBefore)\n // if viewAfter is bigger than current dateWindow start, just reset dateWindow\n && (xAxisRange[0] > viewAfter)\n && !hasChangedDuration\n\n const optionsDateWindow = (isRemotelyControlled && !hasScrolledToTheFutureDuringPlayMode)\n ? { dateWindow: forceDateWindow }\n : {}\n\n const { dygraphColors = orderedColors } = attributes\n const file = isFakeStacked\n ? getDataForFakeStacked(chartData.result.data, dimensionsVisibility)\n : chartData.result.data\n\n const includeZero = dimensionsVisibility.length === 1 ||\n dimensionsVisibility.filter(x => x === true).length > 1\n\n dygraphInstance.current.updateOptions({\n ...optionsDateWindow,\n colors: isFakeStacked ? transformColors(reverse(dygraphColors)) : dygraphColors,\n file,\n labels: chartData.result.labels,\n fillAlpha: dygraphFillAlpha,\n ...(dygraphChartType === \"stacked\" ? { includeZero } : {}),\n stackedGraph: dygraphChartType === \"stacked\" && !isFakeStacked,\n // see explanation about reversing before isFakeStacked assignment\n visibility: isFakeStacked ? reverse(dimensionsVisibility) : dimensionsVisibility,\n })\n }\n }, [attributes, chartData.result, chartUuid, dimensionsVisibility, dygraphChartType,\n dygraphFillAlpha, hasEmptyData, isFakeStacked, isRemotelyControlled, orderedColors,\n viewAfter, viewBefore])\n\n useUpdateEffect(() => {\n if (!dygraphInstance.current) {\n return\n }\n\n const dygraphOptionsStatic = getInitialDygraphOptions({\n attributes,\n chartData,\n chartMetadata,\n chartSettings,\n dimensionsVisibility,\n hiddenLabelsElementId,\n isFakeStacked,\n orderedColors,\n setMinMax,\n shouldSmoothPlot,\n unitsCurrent,\n xAxisDateString,\n xAxisTimeString,\n })\n if (!hasEmptyData) dygraphInstance.current.updateOptions(dygraphOptionsStatic)\n }, [dygraphChartType, timezone])\n\n // set selection\n const currentSelectionMasterId = useSelector(selectGlobalSelectionMaster)\n useLayoutEffect(() => {\n if (dygraphInstance.current && currentSelectionMasterId !== chartUuid) {\n if (hoveredRow === -1) {\n // getSelection is 100 times faster that clearSelection\n if (dygraphInstance.current.getSelection() !== -1) {\n dygraphInstance.current.clearSelection()\n }\n return\n }\n dygraphInstance.current.setSelection(hoveredRow)\n }\n }, [chartData, chartUuid, currentSelectionMasterId, hoveredRow,\n viewAfter, viewBefore])\n\n\n // handle resizeHeight change\n const resizeHeight = useSelector(\n (state: AppStateT) => selectResizeHeight(state, { id: chartUuid }),\n )\n useLayoutEffect(() => {\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n }, [resizeHeight, chartData.dimension_names.length])\n\n\n const commonMinState = useSelector((state: AppStateT) => (\n attributes.commonMin\n ? selectCommonMin(state, attributes.commonMin)\n : undefined\n ))\n const commonMaxState = useSelector((state: AppStateT) => (\n attributes.commonMax\n ? selectCommonMax(state, attributes.commonMax)\n : undefined\n ))\n\n useLayoutEffect(() => {\n const { commonMin: commonMinKey, commonMax: commonMaxKey } = attributes\n\n if (\n dygraphInstance.current\n && (commonMinKey || commonMaxKey)\n ) {\n const extremes = (dygraphInstance.current as NetdataDygraph).yAxisExtremes()[0]\n const [currentMin, currentMax] = extremes\n\n const {\n dygraphValueRange = [null, null],\n } = attributes\n // if the user gave a valueRange, respect it\n const shouldUseCommonMin = dygraphValueRange[0] === null\n const shouldUseCommonMax = dygraphValueRange[1] === null\n\n\n let shouldUpdate = false\n let valueRange: number[] = [...extremes]\n\n // check if current extreme (painted by dygraph) is not more extreme than commonMin/Max\n // if yes - update the chart\n if (commonMinKey && shouldUseCommonMin) {\n if (commonMinState && commonMinState.currentExtreme < currentMin) {\n valueRange[0] = commonMinState.currentExtreme\n shouldUpdate = true\n }\n }\n if (commonMaxKey && shouldUseCommonMax) {\n if (commonMaxState && commonMaxState.currentExtreme > currentMax) {\n valueRange[1] = commonMaxState.currentExtreme\n shouldUpdate = true\n }\n }\n\n if (shouldUpdate) {\n dygraphInstance.current.updateOptions({ valueRange })\n const newExtremes = (dygraphInstance.current as NetdataDygraph).yAxisExtremes()[0]\n // get updated valueRange (rounded by dygraph)\n valueRange = [...newExtremes]\n }\n\n // if the value is different than the one stored in state, update redux state\n if (commonMinKey && shouldUseCommonMin\n && (valueRange[0] !== commonMinState?.charts[chartUuid])\n ) {\n dispatch(setCommonMinAction({ chartUuid, commonMinKey, value: valueRange[0] }))\n }\n if (commonMaxKey && shouldUseCommonMax\n && (valueRange[1] !== commonMaxState?.charts[chartUuid])\n ) {\n dispatch(setCommonMaxAction({ chartUuid, commonMaxKey, value: valueRange[1] }))\n }\n }\n }, [attributes, chartData.result, chartUuid, commonMinState, commonMaxState, dispatch])\n\n useLayoutEffect(() => {\n if (isProceeded && dygraphInstance.current) {\n updatePrecededPosition(dygraphInstance.current)\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [isProceeded])\n\n useUnmount(() => {\n if (dygraphInstance.current) {\n dygraphInstance.current.destroy()\n }\n })\n\n /**\n * resize with ResizeObserver\n */\n const resizeObserver = useRef()\n useMount(() => {\n if (!attributes.detectResize) {\n return\n }\n // flag used to prevent first callback (and resize) on dygraph initial draw\n let hasOmitedFirstCallback = false\n const callbackDebounced = debounce(() => {\n if (!hasOmitedFirstCallback) {\n hasOmitedFirstCallback = true\n return\n }\n\n if (dygraphInstance.current) {\n (dygraphInstance.current as NetdataDygraph).resize()\n }\n }, dygraphResizeDebounceTime)\n\n resizeObserver.current = new ResizeObserver(() => {\n callbackDebounced()\n })\n resizeObserver.current.observe(chartElement.current as HTMLDivElement)\n })\n\n useUnmount(() => {\n dygraphInstance.current = null // clear it for debounce purposes\n if (resizeObserver.current) {\n resizeObserver.current.disconnect()\n }\n })\n\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n return (\n <>\n \n {isProceeded && hasLegend && (\n } />\n )}\n {alarm?.value && hasLegend && (\n // @ts-ignore\n \n )}\n
    \n \n )\n}\n","// https://gist.github.com/ca0v/73a31f57b397606c9813472f7493a940\n\nexport const debounce = any>(func: F, waitFor: number) => {\n let timeout: ReturnType | null = null\n\n const debounced = (...args: Parameters) => {\n if (timeout !== null) {\n clearTimeout(timeout)\n timeout = null\n }\n timeout = setTimeout(() => func(...args), waitFor)\n }\n\n return debounced as (...args: Parameters) => ReturnType\n}\n","import React, { useRef, useEffect, useState } from \"react\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport EasyPie from \"easy-pie-chart\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { ChartLibraryName } from \"domains/chart/utils/chartLibrariesSettings\"\nimport {\n always, cond, identity, T, sortBy, map, pipe,\n} from \"ramda\"\n\ntype GetPercentFromValueMinMax = (arg: {\n value: number | undefined\n min: number | undefined\n max: number | undefined\n isMinOverride: boolean\n isMaxOverride: boolean\n}) => number\nconst getPercentFromValueMinMax: GetPercentFromValueMinMax = ({\n value = 0, min = 0, max = 0,\n isMinOverride,\n isMaxOverride,\n}) => {\n /* eslint-disable no-param-reassign */\n // todo refractor old logic to readable functions\n // if no easyPiechart-min-value attribute\n if (!isMinOverride && min > 0) {\n min = 0\n }\n if (!isMaxOverride && max < 0) {\n max = 0\n }\n\n let pcent\n\n if (min < 0 && max > 0) {\n // it is both positive and negative\n // zero at the top center of the chart\n max = (-min > max) ? -min : max\n pcent = Math.round((value * 100) / max)\n } else if (value >= 0 && min >= 0 && max >= 0) {\n // clockwise\n pcent = Math.round(((value - min) * 100) / (max - min))\n if (pcent === 0) {\n pcent = 0.1\n }\n } else {\n // counter clockwise\n pcent = Math.round(((value - max) * 100) / (max - min))\n if (pcent === 0) {\n pcent = -0.1\n }\n }\n /* eslint-enable no-param-reassign */\n return pcent\n}\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n chartWidth: number\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n\n hoveredRow: number\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const EasyPieChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartWidth,\n hoveredRow,\n legendFormatValue,\n orderedColors,\n setMinMax,\n showUndefined,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef(null)\n const [chartInstance, setChartInstance] = useState()\n\n const valueIndex = hoveredRow === -1\n ? 0\n : (chartData.result.length - 1 - hoveredRow) // because data for easy-pie-chart are flipped\n const value = showUndefined ? null : chartData.result[valueIndex]\n\n const {\n // if this is set, then we're overriding commonMin\n easyPieChartMinValue: min = chartData.min, // todo replace with commonMin\n easyPieChartMaxValue: max = chartData.max, // todo replace with commonMax\n } = attributes\n\n // make sure the order is correct and that value is not outside those boundaries\n // (this check was present in old dashboard but perhaps it's not needed)\n const safeMinMax = pipe(\n map((x: number) => +x),\n sortBy(identity),\n ([_min, _max]: number[]) => [Math.min(_min, value || 0), Math.max(_max, value || 0)],\n )([min, max])\n\n useEffect(() => {\n setMinMax(safeMinMax as [number, number])\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [safeMinMax])\n\n const pcent = getPercentFromValueMinMax({\n value: showUndefined ? 0 : (value as number),\n min: safeMinMax[0],\n max: safeMinMax[1],\n isMinOverride: attributes.easyPieChartMinValue !== undefined,\n isMaxOverride: attributes.easyPieChartMaxValue !== undefined,\n })\n\n useEffect(() => {\n if (chartElement.current && !chartInstance) {\n const stroke = cond([\n [(v) => v < 3, always(2)],\n [T, identity],\n ])(Math.floor(chartWidth / 22))\n\n const {\n easyPieChartTrackColor = window.NETDATA.themes.current.easypiechart_track,\n easyPieChartScaleColor = window.NETDATA.themes.current.easypiechart_scale,\n easyPieChartScaleLength = 5,\n easyPieChartLineCap = \"round\",\n easyPieChartLineWidth = stroke,\n easyPieChartTrackWidth,\n easyPieChartSize = chartWidth,\n easyPieChartRotate = 0,\n easyPieChartAnimate = { duration: 500, enabled: true },\n easyPieChartEasing,\n } = attributes\n\n const newChartInstance = new EasyPie(chartElement.current, {\n barColor: orderedColors[0],\n trackColor: easyPieChartTrackColor,\n scaleColor: easyPieChartScaleColor,\n scaleLength: easyPieChartScaleLength,\n lineCap: easyPieChartLineCap,\n lineWidth: easyPieChartLineWidth,\n trackWidth: easyPieChartTrackWidth,\n size: easyPieChartSize,\n rotate: easyPieChartRotate,\n animate: easyPieChartAnimate,\n easing: easyPieChartEasing,\n })\n setChartInstance(newChartInstance)\n }\n }, [attributes, chartData, chartInstance, chartWidth, orderedColors])\n\n // update with value\n useEffect(() => {\n if (chartInstance) {\n const shouldUseAnimation = hoveredRow === -1 && !showUndefined\n\n if (shouldUseAnimation && !chartInstance.options.animate.enabled) {\n chartInstance.enableAnimation()\n } else if (!shouldUseAnimation && chartInstance.options.animate.enabled) {\n chartInstance.disableAnimation()\n }\n\n setTimeout(() => {\n // need to be in timeout to trigger animation properly\n chartInstance.update(pcent)\n }, 0)\n }\n }, [chartInstance, hoveredRow, pcent, showUndefined])\n\n const valueFontSize = (chartWidth * 2) / 3 / 5\n const valuetop = Math.round((chartWidth - valueFontSize - (chartWidth / 40)) / 2)\n\n const titleFontSize = Math.round((valueFontSize * 1.6) / 3)\n const titletop = Math.round(valuetop - (titleFontSize * 2) - (chartWidth / 40))\n\n const unitFontSize = Math.round(titleFontSize * 0.9)\n const unitTop = Math.round(valuetop + (valueFontSize + unitFontSize) + (chartWidth / 40))\n // to update, just label innerText and pcent are changed\n\n return (\n
    \n \n {legendFormatValue(value)}\n \n \n {attributes.title || chartMetadata.title}\n \n \n {unitsCurrent}\n \n\n
    \n )\n}\n","import React, {\n useRef, useEffect, useState,\n} from \"react\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport { Gauge } from \"gaugeJS\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { ChartLibraryName } from \"domains/chart/utils/chartLibrariesSettings\"\nimport {\n identity, sortBy, map, pipe, always,\n} from \"ramda\"\n\nconst isSetByUser = (x: undefined | number): x is number => (\n typeof x === \"number\"\n)\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n chartLibrary: ChartLibraryName\n chartUuid: string\n colors: {\n [key: string]: string\n }\n chartHeight: number\n chartWidth: number\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n onUpdateChartPanAndZoom: (arg: {\n after: number, before: number,\n callback: (after: number, before: number) => void,\n masterID: string,\n shouldNotExceedAvailableRange: boolean,\n }) => void\n orderedColors: string[]\n\n hoveredRow: number\n hoveredX: number | null\n setGlobalChartUnderlay: (arg: { after: number, before: number, masterID: string }) => void\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n viewAfter: number\n viewBefore: number\n}\nexport const GaugeChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n chartUuid,\n chartHeight,\n chartWidth,\n hoveredRow,\n legendFormatValue,\n orderedColors,\n setMinMax,\n showUndefined,\n unitsCurrent,\n}: Props) => {\n const chartCanvasElement = useRef(null)\n const [chartInstance, setChartInstance] = useState()\n\n const valueIndex = hoveredRow === -1\n ? 0\n : (chartData.result.length - 1 - hoveredRow) // because data for easy-pie-chart are flipped\n const value = chartData.result[valueIndex]\n\n const {\n // if this is set, then we're overriding commonMin\n gaugeMinValue: minAttribute,\n gaugeMaxValue: maxAttribute,\n } = attributes\n\n const min = isSetByUser(minAttribute) ? minAttribute : chartData.min\n const max = isSetByUser(maxAttribute) ? maxAttribute : chartData.max\n // we should use minAttribute if it's existing\n // old app was using commonMin\n\n // make sure the order is correct and that value is not outside those boundaries\n // (this check was present in old dashboard but perhaps it's not needed)\n const [safeMin, safeMax] = pipe(\n // if they are attributes, make sure they're converted to numbers\n map((x: number) => +x),\n // make sure it is zero based\n // but only if it has not been set by the user\n ([_min, _max]: number[]) => [\n (!isSetByUser(minAttribute) && _min > 0) ? 0 : _min,\n (!isSetByUser(maxAttribute) && _max < 0) ? 0 : _max,\n ],\n // make sure min <= max\n sortBy(identity),\n ([_min, _max]: number[]) => [Math.min(_min, value), Math.max(_max, value)],\n )([min, max])\n // calling outside \"useEffect\" intentionally,\n // because it should update the values first, and only then render the chart in useEffect()\n useEffect(() => {\n setMinMax([safeMin, safeMax])\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [safeMin, safeMax])\n\n const pcent = pipe(\n always(((value - safeMin) * 100) / (safeMax - safeMin)),\n // bug fix for gauge.js 1.3.1\n // if the value is the absolute min or max, the chart is broken\n (_pcent: number) => Math.max(0.001, _pcent),\n (_pcent: number) => Math.min(99.999, _pcent),\n )()\n\n useEffect(() => {\n if (chartCanvasElement.current && !chartInstance) {\n const {\n gaugePointerColor = window.NETDATA.themes.current.gauge_pointer,\n gaugeStrokeColor = window.NETDATA.themes.current.gauge_stroke,\n gaugeStartColor = orderedColors[0],\n gaugeStopColor,\n gaugeGenerateGradient = false,\n } = attributes\n\n const options = {\n lines: 12, // The number of lines to draw\n angle: 0.14, // The span of the gauge arc\n lineWidth: 0.57, // The line thickness\n radiusScale: 1.0, // Relative radius\n pointer: {\n length: 0.85, // 0.9 The radius of the inner circle\n strokeWidth: 0.045, // The rotation offset\n color: gaugePointerColor, // Fill color\n },\n\n // If false, the max value of the gauge will be updated if value surpass max\n // If true, the min value of the gauge will be fixed unless you set it manually\n limitMax: true,\n limitMin: true,\n colorStart: gaugeStartColor,\n colorStop: gaugeStopColor,\n strokeColor: gaugeStrokeColor,\n generateGradient: (gaugeGenerateGradient === true), // gmosx:\n gradientType: 0,\n highDpiSupport: true, // High resolution support\n }\n\n const newChartInstance = new Gauge(chartCanvasElement.current).setOptions(options)\n\n // we will always feed a percentage (copied from old dashboard)\n newChartInstance.minValue = 0\n newChartInstance.maxValue = 100\n\n setChartInstance(newChartInstance)\n }\n }, [attributes, chartData, chartInstance, chartWidth, orderedColors])\n\n // update with value\n useEffect(() => {\n if (chartInstance) {\n // gauge animation\n const shouldUseAnimation = hoveredRow === -1 && !showUndefined\n // animation doesn't work in newest, 1.3.7 version!\n const speed = shouldUseAnimation ? 32 : 1000000000\n chartInstance.animationSpeed = speed\n setTimeout(() => {\n chartInstance.set(showUndefined ? 0 : pcent)\n }, 0)\n }\n }, [chartInstance, chartHeight, chartWidth, hoveredRow, pcent, showUndefined])\n\n const valueFontSize = Math.floor(chartHeight / 5)\n const valueTop = Math.round((chartHeight - valueFontSize) / 3.2)\n\n const titleFontSize = Math.round(valueFontSize / 2.1)\n const titleTop = 0\n\n const unitFontSize = Math.round(titleFontSize * 0.9)\n\n const minMaxFontSize = Math.round(valueFontSize * 0.75)\n return (\n \n \n \n {legendFormatValue(showUndefined ? null : value)}\n \n \n {attributes.title || chartMetadata.title}\n \n \n {unitsCurrent}\n \n \n {legendFormatValue(showUndefined ? null : safeMin)}\n \n \n {legendFormatValue(showUndefined ? null : safeMax)}\n \n
    \n )\n}\n","import {\n cond, identity, map, pipe, replace, splitEvery, T, toString,\n} from \"ramda\"\n\ntype NormalizeHex = (hex: string) => string\nexport const normalizeHex: NormalizeHex = pipe(\n toString,\n replace(/[^0-9a-f]/gi, \"\"),\n cond([\n [(str) => str.length < 6, (str) => str[0] + str[0] + str[1] + str[1] + str[2] + str[2]],\n [T, identity],\n ]),\n)\n\nexport const colorLuminance = (hex: string, lum: number = 0) => {\n const hexNormalized = normalizeHex(hex)\n\n // convert to decimal and change luminosity\n const rgb = pipe(\n // point-free version generates ts error\n (str: string) => splitEvery(2, str),\n map(\n pipe(\n (str: string) => parseInt(str, 16),\n (nr) => Math.round(\n Math.min(\n Math.max(0, nr + (nr * lum)),\n 255,\n ),\n ).toString(16),\n (str) => `00${str}`.substr(str.length),\n ),\n ),\n (x) => x.join(\"\"),\n )(hexNormalized)\n return `#${rgb}`\n}\n","/* eslint-disable indent */\n/* eslint-disable operator-linebreak */\n/* eslint-disable comma-dangle */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable consistent-return */\nimport \"jquery-sparkline\"\nimport React, { useRef, useEffect, useState } from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { colorLuminance } from \"domains/chart/utils/color-luminance\"\nimport { MS_IN_SECOND } from \"utils/utils\"\nimport { TimeRange } from \"types/common\"\n\nconst convertToTimestamp = (number: number) => {\n if (number > 0) {\n return number\n }\n return new Date().valueOf() + number // number is negative or zero\n}\n\ninterface TimeWindowCorrection {\n paddingLeftPercentage?: string\n widthRatio?: number\n}\nconst getForceTimeWindowCorrection = (\n chartData: EasyPieChartData,\n viewRange: TimeRange\n): TimeWindowCorrection => {\n const requestedAfter = convertToTimestamp(viewRange[0])\n const requestedBefore = convertToTimestamp(viewRange[1])\n const after = chartData.after * MS_IN_SECOND\n const before = chartData.before * MS_IN_SECOND\n\n const currentDuration = before - after\n const requestedDuration = requestedBefore - requestedAfter\n // don't do overrides when current (available) duration is bigger or only slightly lower\n // than requested duration\n const DURATION_CHANGE_TOLERANCE = 1.03\n if (currentDuration > requestedDuration / DURATION_CHANGE_TOLERANCE) {\n return {}\n }\n\n const widthRatio = currentDuration / requestedDuration\n\n const visibleDuration = requestedBefore - requestedAfter\n const paddingLeftPercentage = `${100 * ((after - requestedAfter) / visibleDuration)}%`\n\n return {\n paddingLeftPercentage,\n widthRatio,\n }\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n dimensionsVisibility: boolean[]\n isRemotelyControlled: boolean\n orderedColors: string[]\n unitsCurrent: string\n viewAfterForCurrentData: number\n viewBeforeForCurrentData: number\n}\nexport const SparklineChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n unitsCurrent,\n viewAfterForCurrentData,\n viewBeforeForCurrentData,\n}: Props) => {\n const chartElement = useRef(null)\n\n // update width, height automatically each time\n const [$chartElement, set$chartElement] = useState()\n const sparklineOptions = useRef<{ [key: string]: any }>()\n\n const { paddingLeftPercentage = undefined, widthRatio = 1 } = attributes.forceTimeWindow\n ? getForceTimeWindowCorrection(chartData, [viewAfterForCurrentData, viewBeforeForCurrentData])\n : {}\n\n const updateSparklineValues = () => {\n if (!$chartElement) return\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n // @ts-ignore\n $chartElement.sparkline(chartData.result, {\n ...sparklineOptions.current,\n width: Math.floor(width * widthRatio),\n height: Math.floor(height),\n })\n }\n\n // create chart\n useEffect(() => {\n const { sparklineLineColor = orderedColors[0] } = attributes\n const defaultFillColor =\n chartMetadata.chart_type === \"line\"\n ? window.NETDATA.themes.current.background\n : colorLuminance(sparklineLineColor, window.NETDATA.chartDefaults.fill_luminance)\n const chartTitle = attributes.title || chartMetadata.title\n\n const emptyStringIfDisable = (x: string | undefined) => (x === \"disable\" ? \"\" : x)\n\n const {\n sparklineType = \"line\",\n sparklineFillColor = defaultFillColor,\n sparklineDisableInteraction = false,\n sparklineDisableTooltips = false,\n sparklineDisableHighlight = false,\n sparklineHighlightLighten = 1.4,\n sparklineTooltipSuffix = ` ${unitsCurrent}`,\n sparklineNumberFormatter = (n: number) => n.toFixed(2),\n } = attributes\n\n const sparklineInitOptions = {\n type: sparklineType,\n lineColor: sparklineLineColor,\n fillColor: sparklineFillColor,\n chartRangeMin: attributes.sparklineChartRangeMin,\n chartRangeMax: attributes.sparklineChartRangeMax,\n composite: attributes.sparklineComposite,\n enableTagOptions: attributes.sparklineEnableTagOptions,\n tagOptionPrefix: attributes.sparklineTagOptionPrefix,\n tagValuesAttribute: attributes.sparklineTagValuesAttribute,\n\n disableHiddenCheck: attributes.sparklineDisableHiddenCheck,\n defaultPixelsPerValue: attributes.sparklineDefaultPixelsPerValue,\n spotColor: emptyStringIfDisable(attributes.sparklineSpotColor),\n minSpotColor: emptyStringIfDisable(attributes.sparklineMinSpotColor),\n maxSpotColor: emptyStringIfDisable(attributes.sparklineMaxSpotColor),\n spotRadius: attributes.sparklineSpotRadius,\n valueSpots: attributes.sparklineValueSpots,\n highlightSpotColor: attributes.sparklineHighlightSpotColor,\n highlightLineColor: attributes.sparklineHighlightLineColor,\n lineWidth: attributes.sparklineLineWidth,\n normalRangeMin: attributes.sparklineNormalRangeMin,\n normalRangeMax: attributes.sparklineNormalRangeMax,\n drawNormalOnTop: attributes.sparklineDrawNormalOnTop,\n xvalues: attributes.sparklineXvalues,\n chartRangeClip: attributes.sparklineChartRangeClip,\n chartRangeMinX: attributes.sparklineChartRangeMinX,\n chartRangeMaxX: attributes.sparklineChartRangeMaxX,\n disableInteraction: sparklineDisableInteraction,\n disableTooltips: sparklineDisableTooltips,\n disableHighlight: sparklineDisableHighlight,\n highlightLighten: sparklineHighlightLighten,\n highlightColor: attributes.sparklineHighlightColor,\n tooltipContainer: attributes.sparklineTooltipContainer,\n tooltipClassname: attributes.sparklineTooltipClassname,\n tooltipChartTitle: chartTitle,\n tooltipFormat: attributes.sparklineTooltipFormat,\n tooltipPrefix: attributes.sparklineTooltipPrefix,\n tooltipSuffix: sparklineTooltipSuffix,\n tooltipSkipNull: attributes.sparklineTooltipSkipNull,\n tooltipValueLookups: attributes.sparklineTooltipValueLookups,\n tooltipFormatFieldlist: attributes.sparklineTooltipFormatFieldlist,\n tooltipFormatFieldlistKey: attributes.sparklineTooltipFormatFieldlistKey,\n numberFormatter: sparklineNumberFormatter,\n numberDigitGroupSep: attributes.sparklineNumberDigitGroupSep,\n numberDecimalMark: attributes.sparklineNumberDecimalMark,\n numberDigitGroupCount: attributes.sparklineNumberDigitGroupCount,\n animatedZooms: attributes.sparklineAnimatedZooms,\n }\n sparklineOptions.current = sparklineInitOptions\n\n if (!chartElement.current || $chartElement) return\n\n set$chartElement(() => window.$(chartElement.current))\n }, [\n $chartElement,\n attributes,\n chartContainerElement,\n chartData.result,\n chartMetadata,\n orderedColors,\n unitsCurrent,\n widthRatio,\n ])\n\n const { sparklineOnHover } = attributes\n useEffect(() => {\n if (!$chartElement || !sparklineOnHover) return\n\n const onLeave = () => sparklineOnHover(null)\n const onChange = ({ sparklines: [sparkline] }: any) => {\n const { x, y } = sparkline.getCurrentRegionFields()\n sparklineOnHover({ x, y })\n }\n\n // @ts-ignore\n $chartElement.bind(\"sparklineRegionChange\", onChange).bind(\"mouseleave\", onLeave)\n return () => {\n // @ts-ignore\n $chartElement.unbind(\"sparklineRegionChange\", onChange).unbind(\"mouseleave\", onLeave)\n }\n }, [$chartElement, sparklineOnHover])\n\n // update chart\n useEffect(updateSparklineValues, [$chartElement, chartData.result])\n\n const style = paddingLeftPercentage\n ? {\n textAlign: \"initial\" as \"initial\", // :) typescript\n paddingLeft: paddingLeftPercentage,\n }\n : undefined\n\n return (\n
    \n )\n}\n","import * as d3 from \"d3\"\n\nwindow.d3 = d3\n","let fetchPromise: Promise\n\nconst GOOGLE_JS_API_SRC = \"https://www.google.com/jsapi\"\n\nexport const loadGoogleVisualizationApi = () => {\n if (fetchPromise) {\n return fetchPromise\n }\n fetchPromise = new Promise((resolve, reject) => {\n setTimeout(() => {\n const script = document.createElement(\"script\")\n script.type = \"text/javascript\"\n script.async = true\n script.src = GOOGLE_JS_API_SRC\n\n script.onerror = () => {\n reject(Error(\"error loading google.js api\"))\n }\n script.onload = () => {\n resolve(\"ok\")\n }\n\n const firstScript = document.getElementsByTagName(\"script\")[0] as HTMLScriptElement\n (firstScript.parentNode as Node).insertBefore(script, firstScript)\n }, 1000)\n }).then(() => new Promise((resolve) => {\n window.google.load(\"visualization\", \"1.1\", {\n packages: [\"corechart\", \"controls\"],\n callback: resolve,\n })\n }))\n return fetchPromise\n}\n","// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport \"jquery-sparkline\"\nimport React, {\n useRef, useEffect, useState,\n} from \"react\"\n\nimport \"../../utils/d3-loader\"\n// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport d3pie from \"vendor/d3pie-0.2.1-netdata-3\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport {\n ChartMetadata,\n D3pieChartData,\n} from \"domains/chart/chart-types\"\nimport { seconds4human } from \"domains/chart/utils/seconds4human\"\nimport { useDateTime } from \"utils/date-time\"\nimport { tail } from \"ramda\"\n\nconst emptyContent = {\n label: \"no data\",\n value: 100,\n color: \"#666666\",\n}\n\ntype GetDateRange = (arg: {\n chartData: D3pieChartData,\n index: number,\n localeDateString: (date: number | Date) => string,\n localeTimeString: (time: number | Date) => string,\n}) => string\nconst getDateRange: GetDateRange = ({\n chartData, index,\n localeDateString, localeTimeString,\n}) => {\n const dt = Math.round((chartData.before - chartData.after + 1) / chartData.points)\n const dtString = seconds4human(dt)\n\n const before = chartData.result.data[index].time\n const after = before - (dt * 1000)\n\n const d1 = localeDateString(after)\n const t1 = localeTimeString(after)\n const d2 = localeDateString(before)\n const t2 = localeTimeString(before)\n\n if (d1 === d2) {\n return `${d1} ${t1} to ${t2}, ${dtString}`\n }\n\n return `${d1} ${t1} to ${d2} ${t2}, ${dtString}`\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: D3pieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n dimensionsVisibility: boolean[]\n hoveredRow: number\n hoveredX: number | null\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n orderedColors: string[]\n setMinMax: (minMax: [number, number]) => void\n showUndefined: boolean\n unitsCurrent: string\n}\nexport const D3pieChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n hoveredRow,\n hoveredX,\n legendFormatValue,\n orderedColors,\n setMinMax,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef(null)\n\n const legendFormatValueRef = useRef(legendFormatValue)\n legendFormatValueRef.current = legendFormatValue\n\n const [d3pieInstance, setD3pieInstance] = useState()\n const d3pieOptions = useRef<{[key: string]: any}>()\n\n const { localeDateString, localeTimeString } = useDateTime()\n\n // create chart\n useEffect(() => {\n if (chartElement.current && !d3pieInstance) {\n // d3pieSetContent\n // todo this should be set in chart.tsx, when creating hook\n setMinMax([chartData.min, chartData.max])\n // index is ROW! it's !== 0 only when selection is made\n const index = 0\n const content = tail(chartData.result.labels).map((label, i) => {\n const value = chartData.result.data[index][label]\n const color = orderedColors[i]\n return {\n label,\n value,\n color,\n }\n }).filter((x) => x.value !== null && x.value > 0)\n const safeContent = content.length > 0 ? content : emptyContent\n\n const defaultTitle = attributes.title || chartMetadata.title\n const dateRange = getDateRange({\n chartData,\n index: 0,\n localeDateString,\n localeTimeString,\n })\n const {\n d3pieTitle = defaultTitle,\n d3pieSubtitle = unitsCurrent,\n d3pieFooter = dateRange,\n d3pieTitleColor = window.NETDATA.themes.current.d3pie.title,\n d3pieTitleFontsize = 12,\n d3pieTitleFontweight = \"bold\",\n d3pieTitleFont = \"arial\",\n d3PieSubtitleColor = window.NETDATA.themes.current.d3pie.subtitle,\n d3PieSubtitleFontsize = 10,\n d3PieSubtitleFontweight = \"normal\",\n d3PieSubtitleFont = \"arial\",\n d3PieFooterColor = window.NETDATA.themes.current.d3pie.footer,\n d3PieFooterFontsize = 9,\n d3PieFooterFontweight = \"bold\",\n d3PieFooterFont = \"arial\",\n d3PieFooterLocation = \"bottom-center\",\n\n d3PiePieinnerradius = \"45%\",\n d3PiePieouterradius = \"80%\",\n d3PieSortorder = \"value-desc\",\n d3PieSmallsegmentgroupingEnabled = false,\n d3PieSmallsegmentgroupingValue = 1,\n d3PieSmallsegmentgroupingValuetype = \"percentage\",\n d3PieSmallsegmentgroupingLabel = \"other\",\n d3PieSmallsegmentgroupingColor = window.NETDATA.themes.current.d3pie.other,\n\n d3PieLabelsOuterFormat = \"label-value1\",\n d3PieLabelsOuterHidewhenlessthanpercentage = null,\n d3PieLabelsOuterPiedistance = 15,\n d3PieLabelsInnerFormat = \"percentage\",\n d3PieLabelsInnerHidewhenlessthanpercentage = 2,\n\n d3PieLabelsMainLabelColor = window.NETDATA.themes.current.d3pie.mainlabel,\n d3PieLabelsMainLabelFont = \"arial\",\n d3PieLabelsMainLabelFontsize = 10,\n d3PieLabelsMainLabelFontweight = \"normal\",\n\n d3PieLabelsPercentageColor = window.NETDATA.themes.current.d3pie.percentage,\n d3PieLabelsPercentageFont = \"arial\",\n d3PieLabelsPercentageFontsize = 10,\n d3PieLabelsPercentageFontweight = \"bold\",\n\n d3PieLabelsValueColor = window.NETDATA.themes.current.d3pie.value,\n d3PieLabelsValueFont = \"arial\",\n d3PieLabelsValueFontsize = 10,\n d3PieLabelsValueFontweight = \"bold\",\n\n d3PieLabelsLinesEnabled = true,\n d3PieLabelsLinesStyle = \"curved\",\n d3PieLabelsLinesColor = \"segment\", // \"segment\" or a hex color\n\n d3PieLabelsTruncationEnabled = false,\n d3PieLabelsTruncationTruncatelength = 30,\n\n d3PieMiscColorsSegmentstroke = window.NETDATA.themes.current.d3pie.segment_stroke,\n d3PieMiscGradientEnabled = false,\n d3PieMiscColorsPercentage = 95,\n d3PieMiscGradientColor = window.NETDATA.themes.current.d3pie.gradient_color,\n\n d3PieCssprefix = null,\n } = attributes\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n\n const initialD3pieOptions = {\n header: {\n title: {\n text: d3pieTitle,\n color: d3pieTitleColor,\n fontSize: d3pieTitleFontsize,\n fontWeight: d3pieTitleFontweight,\n font: d3pieTitleFont,\n },\n subtitle: {\n text: d3pieSubtitle,\n color: d3PieSubtitleColor,\n fontSize: d3PieSubtitleFontsize,\n fontWeight: d3PieSubtitleFontweight,\n font: d3PieSubtitleFont,\n },\n titleSubtitlePadding: 1,\n },\n footer: {\n text: d3pieFooter,\n color: d3PieFooterColor,\n fontSize: d3PieFooterFontsize,\n fontWeight: d3PieFooterFontweight,\n font: d3PieFooterFont,\n location: d3PieFooterLocation,\n },\n size: {\n canvasHeight: Math.floor(height),\n canvasWidth: Math.floor(width),\n pieInnerRadius: d3PiePieinnerradius,\n pieOuterRadius: d3PiePieouterradius,\n },\n data: {\n // none, random, value-asc, value-desc, label-asc, label-desc\n sortOrder: d3PieSortorder,\n smallSegmentGrouping: {\n enabled: d3PieSmallsegmentgroupingEnabled,\n value: d3PieSmallsegmentgroupingValue,\n // percentage, value\n valueType: d3PieSmallsegmentgroupingValuetype,\n label: d3PieSmallsegmentgroupingLabel,\n color: d3PieSmallsegmentgroupingColor,\n },\n\n // REQUIRED! This is where you enter your pie data; it needs to be an array of objects\n // of this form: { label: \"label\", value: 1.5, color: \"#000000\" } - color is optional\n content: safeContent,\n },\n\n\n labels: {\n outer: {\n // label, value, percentage, label-value1, label-value2, label-percentage1,\n // label-percentage2\n format: d3PieLabelsOuterFormat,\n hideWhenLessThanPercentage: d3PieLabelsOuterHidewhenlessthanpercentage,\n pieDistance: d3PieLabelsOuterPiedistance,\n },\n inner: {\n // label, value, percentage, label-value1, label-value2, label-percentage1,\n // label-percentage2\n format: d3PieLabelsInnerFormat,\n hideWhenLessThanPercentage: d3PieLabelsInnerHidewhenlessthanpercentage,\n },\n mainLabel: {\n color: d3PieLabelsMainLabelColor, // or 'segment' for dynamic color\n font: d3PieLabelsMainLabelFont,\n fontSize: d3PieLabelsMainLabelFontsize,\n fontWeight: d3PieLabelsMainLabelFontweight,\n },\n percentage: {\n color: d3PieLabelsPercentageColor,\n font: d3PieLabelsPercentageFont,\n fontSize: d3PieLabelsPercentageFontsize,\n fontWeight: d3PieLabelsPercentageFontweight,\n decimalPlaces: 0,\n },\n value: {\n color: d3PieLabelsValueColor,\n font: d3PieLabelsValueFont,\n fontSize: d3PieLabelsValueFontsize,\n fontWeight: d3PieLabelsValueFontweight,\n },\n lines: {\n enabled: d3PieLabelsLinesEnabled,\n style: d3PieLabelsLinesStyle,\n color: d3PieLabelsLinesColor,\n },\n truncation: {\n enabled: d3PieLabelsTruncationEnabled,\n truncateLength: d3PieLabelsTruncationTruncatelength,\n },\n formatter(context: any) {\n if (context.part === \"value\") {\n return legendFormatValueRef.current(context.value)\n }\n if (context.part === \"percentage\") {\n return `${context.label}%`\n }\n\n return context.label\n },\n },\n effects: {\n load: {\n effect: \"none\", // none / default\n speed: 0, // commented in the d3pie code to speed it up\n },\n pullOutSegmentOnClick: {\n effect: \"bounce\", // none / linear / bounce / elastic / back\n speed: 400,\n size: 5,\n },\n highlightSegmentOnMouseover: true,\n highlightLuminosity: -0.2,\n },\n tooltips: {\n enabled: false,\n type: \"placeholder\", // caption|placeholder\n string: \"\",\n placeholderParser: null, // function\n styles: {\n fadeInSpeed: 250,\n backgroundColor: window.NETDATA.themes.current.d3pie.tooltip_bg,\n backgroundOpacity: 0.5,\n color: window.NETDATA.themes.current.d3pie.tooltip_fg,\n borderRadius: 2,\n font: \"arial\",\n fontSize: 12,\n padding: 4,\n },\n },\n misc: {\n colors: {\n background: \"transparent\", // transparent or color #\n // segments: state.chartColors(),\n segmentStroke: d3PieMiscColorsSegmentstroke,\n },\n gradient: {\n enabled: d3PieMiscGradientEnabled,\n percentage: d3PieMiscColorsPercentage,\n color: d3PieMiscGradientColor,\n },\n canvasPadding: {\n top: 5,\n right: 5,\n bottom: 5,\n left: 5,\n },\n pieCenterOffset: {\n x: 0,\n y: 0,\n },\n cssPrefix: d3PieCssprefix,\n },\n callbacks: {\n onload: null,\n onMouseoverSegment: null,\n onMouseoutSegment: null,\n onClickSegment: null,\n },\n }\n // eslint-disable-next-line new-cap\n const newD3pieInstance = new d3pie(chartElement.current, initialD3pieOptions)\n d3pieOptions.current = initialD3pieOptions\n setD3pieInstance(() => newD3pieInstance)\n }\n }, [attributes, chartContainerElement, chartData, chartMetadata, d3pieInstance, legendFormatValue,\n localeDateString, localeTimeString, orderedColors, setMinMax, unitsCurrent])\n\n // update chart\n useEffect(() => {\n if (d3pieInstance && d3pieOptions.current) {\n const dateRange = getDateRange({\n chartData,\n index: 0,\n localeDateString,\n localeTimeString,\n })\n const {\n d3pieSubtitle = unitsCurrent,\n d3pieFooter = dateRange,\n } = attributes\n\n\n const isHoveredButNoData = !!hoveredX && (hoveredRow === -1)\n const slot = chartData.result.data.length - hoveredRow - 1\n\n const index = (slot < 0 || slot >= chartData.result.data.length)\n ? 0\n : slot\n\n const content = tail(chartData.result.labels).map((label, i) => {\n const value = chartData.result.data[index][label]\n const color = orderedColors[i]\n return {\n label,\n value,\n color,\n }\n }).filter((x) => x.value !== null && x.value > 0)\n const safeContent = (content.length > 0 && !isHoveredButNoData)\n ? content\n : [emptyContent]\n\n d3pieInstance.options.header.subtitle.text = d3pieSubtitle\n d3pieInstance.options.footer.text = d3pieFooter\n\n d3pieInstance.options.data.content = safeContent\n d3pieInstance.destroy()\n d3pieInstance.recreate()\n }\n }, [attributes, chartData, d3pieInstance, hoveredRow, hoveredX, localeDateString,\n localeTimeString, orderedColors, unitsCurrent])\n\n return (\n
    \n )\n}\n","// @ts-ignore \"declare module\" doesn't work properly when importing dashboard in cloud\nimport \"peity\"\nimport React, {\n useRef, useState, useLayoutEffect,\n} from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { colorLuminance } from \"domains/chart/utils/color-luminance\"\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n orderedColors: string[]\n}\nexport const PeityChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n}: Props) => {\n const chartElement = useRef(null)\n\n // update width, height automatically each time\n const [$chartElement, set$chartElement] = useState()\n const peityOptions = useRef<{\n stroke: string,\n fill: string,\n strokeWidth: number,\n width: number,\n height: number,\n }>()\n\n\n // create chart\n useLayoutEffect(() => {\n if (chartElement.current && !$chartElement) {\n const $element = window.$(chartElement.current)\n\n const { width, height } = chartContainerElement.getBoundingClientRect()\n\n const {\n peityStrokeWidth = 1,\n } = attributes\n const peityInitOptions = {\n stroke: window.NETDATA.themes.current.foreground,\n strokeWidth: peityStrokeWidth,\n width: Math.floor(width),\n height: Math.floor(height),\n fill: window.NETDATA.themes.current.foreground,\n }\n\n set$chartElement(() => $element)\n peityOptions.current = peityInitOptions\n }\n }, [attributes, $chartElement, chartContainerElement])\n\n // update chart\n useLayoutEffect(() => {\n if ($chartElement && peityOptions.current) {\n const getFillOverride = () => (\n chartMetadata.chart_type === \"line\"\n ? window.NETDATA.themes.current.background\n : colorLuminance(orderedColors[0], window.NETDATA.chartDefaults.fill_luminance)\n )\n const updatedOptions = {\n ...peityOptions.current,\n stroke: orderedColors[0],\n // optimizatino from old dashboard, perhaps could be transformed to useMemo()\n fill: (orderedColors[0] === peityOptions.current.stroke)\n ? peityOptions.current.fill\n : getFillOverride(),\n }\n $chartElement.peity(\"line\", updatedOptions)\n peityOptions.current = updatedOptions\n }\n }, [$chartElement, chartData, chartMetadata, orderedColors])\n\n return (\n \n {chartData.result}\n
    \n )\n}\n","import React, {\n useRef, useState, useLayoutEffect,\n} from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata, EasyPieChartData } from \"domains/chart/chart-types\"\nimport { loadGoogleVisualizationApi } from \"domains/chart/utils/google-visualization-loader\"\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartMetadata: ChartMetadata\n chartElementClassName: string\n chartElementId: string\n orderedColors: string[]\n unitsCurrent: string\n}\nexport const GoogleChart = ({\n attributes,\n chartData,\n chartMetadata,\n chartElementClassName,\n chartElementId,\n orderedColors,\n unitsCurrent,\n}: Props) => {\n const chartElement = useRef(null)\n const googleChartInstance = useRef<\n google.visualization.AreaChart |\n google.visualization.LineChart>()\n\n const [hasApiBeenLoaded, setHasApiBeenLoaded] = useState(false)\n loadGoogleVisualizationApi()\n .then(() => {\n setHasApiBeenLoaded(true)\n })\n\n const googleOptions = useRef<{[key: string]: unknown}>()\n\n // update chart\n useLayoutEffect(() => {\n if (googleChartInstance.current && googleOptions.current) {\n const dataTable = new window.google.visualization.DataTable(chartData.result)\n googleChartInstance.current.draw(dataTable, googleOptions.current)\n }\n }, [chartData])\n\n // create chart\n useLayoutEffect(() => {\n if (chartElement.current && !googleOptions.current && hasApiBeenLoaded) {\n const dataTable = new window.google.visualization.DataTable(chartData.result)\n\n const {\n title = chartMetadata.title,\n } = attributes\n const chartType = chartMetadata.chart_type\n const areaOpacity = new Map([\n [\"area\", window.NETDATA.options.current.color_fill_opacity_area],\n [\"stacked\", window.NETDATA.options.current.color_fill_opacity_stacked],\n ]).get(chartType) || 0.3\n const initialGoogleOptions = {\n colors: orderedColors,\n\n // do not set width, height - the chart resizes itself\n // width: state.chartWidth(),\n // height: state.chartHeight(),\n lineWidth: chartType === \"line\" ? 2 : 1,\n title,\n fontSize: 11,\n hAxis: {\n // title: \"Time of Day\",\n // format:'HH:mm:ss',\n viewWindowMode: \"maximized\",\n slantedText: false,\n format: \"HH:mm:ss\",\n textStyle: {\n fontSize: 9,\n },\n gridlines: {\n color: \"#EEE\",\n },\n },\n vAxis: {\n title: unitsCurrent,\n viewWindowMode: (chartType === \"area\" || chartType === \"stacked\")\n ? \"maximized\"\n : \"pretty\",\n minValue: chartType === \"stacked\" ? undefined : -0.1,\n maxValue: chartType === \"stacked\" ? undefined : 0.1,\n direction: 1,\n textStyle: {\n fontSize: 9,\n },\n gridlines: {\n color: \"#EEE\",\n },\n },\n chartArea: {\n width: \"65%\",\n height: \"80%\",\n },\n focusTarget: \"category\",\n annotation: {\n 1: {\n style: \"line\",\n },\n },\n pointsVisible: false,\n titlePosition: \"out\",\n titleTextStyle: {\n fontSize: 11,\n },\n tooltip: {\n isHtml: false,\n ignoreBounds: true,\n textStyle: {\n fontSize: 9,\n },\n },\n curveType: \"function\" as \"function\",\n areaOpacity,\n isStacked: chartType === \"stacked\",\n }\n\n const googleInstance = [\"area\", \"stacked\"].includes(chartMetadata.chart_type)\n ? new window.google.visualization.AreaChart(chartElement.current)\n : new window.google.visualization.LineChart(chartElement.current)\n\n googleInstance.draw(dataTable, initialGoogleOptions)\n\n googleOptions.current = initialGoogleOptions\n googleChartInstance.current = googleInstance\n }\n }, [attributes, chartData.result, chartMetadata, chartElement, hasApiBeenLoaded, orderedColors,\n unitsCurrent])\n\n\n return (\n \n )\n}\n","import React from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { EasyPieChartData } from \"domains/chart/chart-types\"\n\ninterface Props {\n attributes: Attributes\n chartData: EasyPieChartData\n chartElementClassName: string\n chartElementId: string\n}\nexport const TextOnly = ({\n attributes,\n chartData,\n chartElementClassName,\n chartElementId,\n}: Props) => {\n const { textOnlyDecimalPlaces = 1, textOnlyPrefix = \"\", textOnlySuffix = \"\" } = attributes\n\n // Round based on number of decimal places to show\n const precision = 10 ** textOnlyDecimalPlaces\n const value = Math.round(chartData.result[0] * precision) / precision\n\n const textContent = chartData.result.length === 0 ? \"\" : textOnlyPrefix + value + textOnlySuffix\n\n return (\n
    \n {textContent}\n
    \n )\n}\n","/* eslint-disable no-param-reassign */\n// @ts-nocheck\n\nexport const defaultCellSize = 11\nexport const defaultPadding = 1\nexport const defaultAspectRatio = Math.round(16 / 9)\n\nexport const getCellBoxSize = (cellSize = defaultCellSize, padding = defaultPadding) =>\n cellSize - padding\nexport const getRows = (data, aspectRatio = defaultAspectRatio) =>\n Math.sqrt(data.length / aspectRatio)\nexport const getColumns = (rows, aspectRatio = defaultAspectRatio) => rows * aspectRatio\n\nexport const getXPosition = (columns, index, cellSize = defaultCellSize) =>\n Math.floor(index % columns) * cellSize\nexport const getYPosition = (columns, index, cellSize = defaultCellSize) =>\n Math.floor(index / columns) * cellSize\n\nexport const getFullWidth = (columns, cellSize = defaultCellSize) => Math.ceil(columns) * cellSize\nexport const getFullHeight = (rows, cellSize = defaultCellSize, padding = defaultCellSize) =>\n Math.ceil(rows) * cellSize + padding\n\nexport const getOffsetPosition = (offset, cellSize = defaultCellSize) =>\n Math.floor(offset / cellSize)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable no-param-reassign */\n// @ts-nocheck\nimport { getCellBoxSize, getXPosition, getYPosition, getOffsetPosition } from \"./utilities\"\n\nexport default (\n el,\n columns,\n total,\n { onMouseenter, onMouseout },\n { cellSize, cellPadding } = {}\n) => {\n let hoveredIndex = -1\n\n const getEvent = index => {\n const rect = el.getBoundingClientRect()\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n const left = rect.left + offsetX\n const top = rect.top + offsetY\n const cellBoxSize = getCellBoxSize(cellSize, cellPadding)\n\n return {\n index,\n left,\n top,\n right: left + cellBoxSize,\n bottom: top + cellBoxSize,\n width: cellBoxSize,\n height: cellBoxSize,\n offsetX,\n offsetY,\n }\n }\n\n const mouseout = () => {\n onMouseout(getEvent(hoveredIndex))\n hoveredIndex = -1\n }\n\n const mousemove = e => {\n const { offsetX, offsetY } = e\n const x = getOffsetPosition(offsetX, cellSize)\n const y = getOffsetPosition(offsetY, cellSize)\n const nextHoveredIndex = y * columns + x\n\n if (nextHoveredIndex === hoveredIndex) return\n\n if (hoveredIndex !== -1) mouseout()\n\n if (nextHoveredIndex >= total) return\n\n onMouseenter(getEvent(nextHoveredIndex))\n hoveredIndex = nextHoveredIndex\n }\n\n el.addEventListener(\"mousemove\", mousemove)\n el.addEventListener(\"mouseout\", mouseout)\n return () => {\n el.removeEventListener(\"mousemove\", mousemove)\n el.removeEventListener(\"mouseout\", mouseout)\n }\n}\n","/* eslint-disable object-curly-newline */\n/* eslint-disable comma-dangle */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable no-param-reassign */\n// @ts-nocheck\nimport { scaleLinear, extent } from \"d3\"\nimport {\n getCellBoxSize,\n getRows,\n getColumns,\n getXPosition,\n getYPosition,\n getFullWidth,\n getFullHeight,\n} from \"./utilities\"\nimport registerEvents from \"./events\"\n\nexport const getWidth = (data, { aspectRatio, cellSize } = {}) => {\n const rows = getRows(data, aspectRatio)\n const columns = getColumns(rows, aspectRatio)\n return getFullWidth(columns, cellSize)\n}\n\nconst getCanvasAttributes = (data, { aspectRatio, cellSize, padding } = {}) => {\n const rows = getRows(data, aspectRatio)\n const columns = getColumns(rows, aspectRatio)\n const width = getFullWidth(columns, cellSize)\n const height = getFullHeight(rows, cellSize, padding)\n\n return { width, height, columns: Math.ceil(columns) }\n}\n\nconst defaultColorRange = [\"rgba(198, 227, 246, 0.9)\", \"rgba(14, 154, 255, 0.9)\"]\n\nconst makeGetColor = (values, colorRange = defaultColorRange) =>\n scaleLinear()\n .domain(extent(values, value => value))\n .range(colorRange)\n\nexport default (el, { onMouseenter, onMouseout }, options = {}) => {\n const { cellSize, cellPadding, cellStroke = 2, lineWidth = 1, colorRange } = options\n const canvas = el.getContext(\"2d\")\n\n let activeBox = -1\n let deactivateBox = () => {}\n let activateBox = {}\n let clearEvents = () => {}\n\n const clear = () => {\n deactivateBox()\n clearEvents()\n canvas.clearRect(0, 0, el.width, el.height)\n canvas.beginPath()\n }\n\n const update = ({ data }) => {\n const { width, height, columns } = getCanvasAttributes(data, options)\n el.width = parseInt(width)\n el.height = parseInt(height)\n clear()\n clearEvents()\n const getColor = makeGetColor(data, colorRange)\n\n const drawBox = (value, index) => {\n canvas.fillStyle = getColor(value)\n\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n\n if (lineWidth && cellStroke) {\n canvas.clearRect(\n offsetX - lineWidth,\n offsetY - lineWidth,\n getCellBoxSize(cellSize, cellPadding) + cellStroke,\n getCellBoxSize(cellSize, cellPadding) + cellStroke\n )\n }\n\n canvas.fillRect(\n offsetX,\n offsetY,\n getCellBoxSize(cellSize, cellPadding),\n getCellBoxSize(cellSize, cellPadding)\n )\n }\n\n data.forEach(drawBox)\n\n clearEvents = registerEvents(\n el,\n columns,\n data.length,\n {\n onMouseenter,\n onMouseout,\n },\n options\n )\n\n deactivateBox = () => {\n if (activeBox !== -1) drawBox(data[activeBox], activeBox)\n }\n\n activateBox = index => {\n deactivateBox()\n activeBox = index\n\n const offsetX = getXPosition(columns, index, cellSize)\n const offsetY = getYPosition(columns, index, cellSize)\n\n if (lineWidth && cellStroke) {\n canvas.lineWidth = lineWidth\n canvas.strokeStyle = \"#fff\"\n canvas.strokeRect(\n offsetX + lineWidth,\n offsetY + lineWidth,\n getCellBoxSize(cellSize, cellPadding) - cellStroke,\n getCellBoxSize(cellSize, cellPadding) - cellStroke\n )\n }\n }\n }\n\n return {\n clear,\n update,\n activateBox: index => activateBox(index),\n deactivateBox: () => deactivateBox(),\n }\n}\n","/* eslint-disable arrow-body-style */\n// @ts-nocheck\n\nexport default (el) => {\n return el.getBoundingClientRect().top / window.innerHeight > 0.5 ? \"top\" : \"bottom\"\n}\n","/* eslint-disable operator-linebreak */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable react/jsx-fragments */\n// @ts-nocheck\nimport React, { useRef, useLayoutEffect, Fragment, useState, useCallback } from \"react\"\nimport { Drop } from \"@netdata/netdata-ui\"\nimport drawBoxes from \"./drawBoxes\"\nimport getAlign from \"./getAlign\"\n\ninterface GroupboxData {\n data: number[]\n labels: string[]\n}\n\ninterface GroupBoxProps {\n data: GroupboxData[]\n}\n\nconst aligns = {\n top: { bottom: \"top\" },\n bottom: { top: \"bottom\" },\n}\n\nconst GroupBox = ({ data, renderTooltip, ...options }: GroupBoxProps) => {\n const dataRef = useRef()\n const canvasRef = useRef()\n const boxesRef = useRef()\n\n const [hover, setHover] = useState(null)\n const dropHoverRef = useRef(false)\n const boxHoverRef = useRef(-1)\n const timeoutId = useRef()\n\n const close = () => {\n boxesRef.current.deactivateBox()\n setHover(null)\n dropHoverRef.current = false\n boxHoverRef.current = -1\n }\n\n const closeDrop = () =>\n requestAnimationFrame(() => {\n setHover(currentHover => {\n if (\n !dropHoverRef.current &&\n (boxHoverRef.current === -1 || boxHoverRef.current !== currentHover?.index)\n ) {\n close()\n }\n return currentHover\n })\n })\n\n useLayoutEffect(() => {\n boxesRef.current = drawBoxes(\n canvasRef.current,\n {\n onMouseenter: ({ index, ...rect }) => {\n boxHoverRef.current = index\n boxesRef.current.activateBox(index)\n timeoutId.current = setTimeout(() => {\n setHover({\n target: { getBoundingClientRect: () => rect },\n index,\n rect,\n })\n }, 600)\n },\n onMouseout: () => {\n boxHoverRef.current = -1\n clearTimeout(timeoutId.current)\n closeDrop()\n },\n },\n options\n )\n return () => boxesRef.current.clear()\n }, [])\n\n useLayoutEffect(() => {\n if (\n hover &&\n dataRef.current &&\n dataRef.current.labels[hover.index] !== data.labels[hover.index]\n ) {\n close()\n }\n dataRef.current = data\n boxesRef.current.update(data)\n }, [data])\n\n const onMouseEnter = useCallback(() => {\n dropHoverRef.current = true\n }, [])\n\n const onMouseLeave = useCallback(() => {\n dropHoverRef.current = false\n closeDrop()\n }, [])\n\n const align = hover && getAlign(hover.target)\n\n return (\n \n \n {hover && renderTooltip && (\n \n {renderTooltip(hover.index, align)}\n \n )}\n \n )\n}\n\nexport default GroupBox\n","/* eslint-disable operator-linebreak */\n/* eslint-disable object-curly-newline */\n/* eslint-disable arrow-body-style */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/jsx-one-expression-per-line */\n// @ts-nocheck\nimport React, { useRef, useMemo } from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, TextMicro, Popover } from \"@netdata/netdata-ui\"\nimport GroupBox from \"./groupBox\"\nimport { getWidth } from \"./drawBoxes\"\nimport getAlign from \"./getAlign\"\n\ninterface GroupBoxWrapperProps {\n data: any\n title: string\n}\n\nconst Title = styled.span`\n white-space: nowrap;\n text-overflow: ellipsis;\n overflow-x: hidden;\n`\n\nconst Label = styled(Flex).attrs({\n as: TextMicro,\n gap: 1,\n})`\n cursor: default;\n &:hover {\n font-weight: bold;\n }\n`\n\nconst GroupBoxWrapper = ({\n data,\n label,\n groupIndex,\n renderGroupPopover,\n renderBoxPopover,\n}: GroupBoxWrapperProps) => {\n const ref = useRef()\n const align = ref.current && getAlign(ref.current)\n\n const style = useMemo(() => ({ maxWidth: `${getWidth(data.data)}px` }), [data])\n\n const boxPopover =\n renderBoxPopover &&\n ((index, boxAlign) => renderBoxPopover({ group: label, groupIndex, align: boxAlign, index }))\n\n const groupPopover =\n renderGroupPopover && (() => renderGroupPopover({ group: label, groupIndex, align }))\n\n return (\n \n \n {({ isOpen, ref: popoverRef, ...rest }) => (\n {\n ref.current = el\n popoverRef(el)\n }}\n strong={isOpen}\n style={style}\n {...rest}\n >\n {label}\n {data.data.length > 3 && ({data.data.length})}\n \n )}\n \n \n \n )\n}\n\nconst GroupBoxes = ({ data, labels, renderBoxPopover, renderGroupPopover }: any) => (\n \n {labels.map((label, index) => {\n return data[index].data.length ? (\n \n ) : null\n })}\n \n)\n\nexport default GroupBoxes\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, TextNano } from \"@netdata/netdata-ui\"\n\ninterface LegendProps {\n children?: React.ReactNode\n}\n\nconst LinearColorScaleBar = styled(Flex).attrs({ width: \"120px\", height: \"12px\", round: true })`\n background: linear-gradient(to right, #c6e3f6, #0e9aff);\n`\n\nconst Legend = ({ children }: LegendProps) => (\n \n {children}\n \n 0%\n \n 100%\n \n {/* \n Alarms\n \n \n \n Warnings\n \n */}\n \n)\n\nexport default Legend\n","// @ts-nocheck\n\nconst labels = {\n k8s_cluster_id: { icon: \"cluster\", title: \"Cluster Id\" },\n k8s_node_name: { icon: \"nodes_hollow\", title: \"Node\" },\n k8s_namespace: { icon: \"cluster_spaces\", title: \"Namespace\" },\n k8s_controller_kind: { icon: \"controller_kind\", title: \"Controller Kind\" },\n k8s_controller_name: { icon: \"controller_name\", title: \"Controller Name\" },\n k8s_pod_name: { icon: \"pod\", title: \"Pod Name\" },\n k8s_container_name: { icon: \"container\", title: \"Container\" },\n}\n\nexport const labelIds = Object.keys(labels)\n\nexport default (id) => {\n if (id in labels) return labels[id]\n // k8s_custom_label -> Custom Label\n const title = id.replace(/_./g, (word) => ` ${word[1].toUpperCase()}`).replace(/^k8s /, \"\")\n return { title, icon: \"node\" }\n}\n","/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Separator = () => \n\nexport default Separator\n","/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { H5 } from \"@netdata/netdata-ui\"\n\nconst Header = props => (\n
    \n)\n\nexport default Header\n","/* eslint-disable indent */\n/* eslint-disable implicit-arrow-linebreak */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, getColor } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nexport const TabButton = styled(Button).attrs(({ active }) => ({\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n disabled: active,\n \"data-testid\": \"k8sPopoverChart-tab\",\n}))`\n &&& {\n height: initial;\n width: initial;\n padding: 2px 20px;\n ${({ active, theme }) => active && `border-bottom: 3px solid ${getColor(\"bright\")({ theme })};`}\n color: ${({ active, theme }) => getColor(active ? \"bright\" : \"separator\")({ theme })}\n }\n`\n\nconst Tabs = ({ value, onChange, ...rest }) => (\n \n onChange(\"context\")} />\n onChange(\"metrics\")} />\n \n)\n\nexport default Tabs\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, H6, makeFlex } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nconst ExpandButton = styled(makeFlex(Button)).attrs({\n icon: \"chevron_right_s\",\n label: \"More\",\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n alignItems: \"baseline\",\n gap: 1,\n direction: \"rowReverse\",\n})`\n &&& {\n padding: 0;\n margin: 0;\n font-weight: normal;\n height: initial;\n width: initial;\n\n svg {\n height: 6px;\n width: 6px;\n position: initial;\n }\n }\n`\n\nconst Section = ({ title, onExpand, children, noBorder }) => (\n \n \n
    \n {title}\n
    \n {onExpand && }\n
    \n \n {children}\n \n \n)\n\nexport default Section\n","import { LOCALSTORAGE_HEIGHT_KEY_PREFIX } from \"domains/chart/components/resize-handler\"\n\nimport { LEGEND_BOTTOM_SINGLE_LINE_HEIGHT } from \"domains/chart/utils/legend-utils\"\nimport { Attributes } from \"./transformDataAttributes\"\nimport { ChartLibraryConfig } from \"./chartLibrariesSettings\"\n\ntype GetPortalNodeStyles = (\n attributes: Attributes,\n chartSettings: ChartLibraryConfig,\n shouldAddSpecialHeight: boolean,\n) => {\n height: string | undefined,\n width: string | undefined,\n minWidth: string | undefined\n}\n\nconst getHeightFromLocalStorage = (heightID: string, isLegendOnBottom: boolean) => {\n const persitedHeight = localStorage.getItem(`${LOCALSTORAGE_HEIGHT_KEY_PREFIX}${heightID}`)\n if (persitedHeight) {\n if (Number.isNaN(Number(persitedHeight))) {\n return null\n }\n return `${isLegendOnBottom\n ? Number(persitedHeight) + LEGEND_BOTTOM_SINGLE_LINE_HEIGHT\n : persitedHeight\n }px`\n }\n\n return null\n}\n\nexport const getPortalNodeStyles: GetPortalNodeStyles = (\n attributes,\n chartSettings,\n shouldAddSpecialHeight,\n) => {\n let width\n if (typeof attributes.width === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n width = attributes.width\n } else if (typeof attributes.width === \"number\") {\n width = `${attributes.width.toString()}px`\n }\n let height\n if (chartSettings.aspectRatio === undefined) {\n if (typeof attributes.height === \"string\") {\n // eslint-disable-next-line prefer-destructuring\n height = attributes.height\n } else if (typeof attributes.height === \"number\") {\n height = `${attributes.height.toString()}px`\n }\n }\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n const heightFromLocalStorage = attributes.heightId\n ? getHeightFromLocalStorage(attributes.heightId, isLegendOnBottom)\n : null\n\n if (heightFromLocalStorage) {\n // .replace() is for backwards compatibility - old dashboard was always doing\n // JSON.stringify when setting localStorage so many users have '\"180px\"' values set.\n // We can remove .replace() after some time\n height = heightFromLocalStorage.replace(/\"/g, \"\")\n }\n\n if (shouldAddSpecialHeight) {\n const heightOverriden = isLegendOnBottom\n ? window.innerHeight * 0.5\n : window.innerHeight * 0.4\n height = `${heightOverriden}px`\n }\n\n const chartDefaultsMinWidth = window.NETDATA.chartDefaults.min_width\n const minWidth = chartDefaultsMinWidth === null\n ? undefined\n : chartDefaultsMinWidth\n return {\n height,\n width,\n minWidth,\n }\n}\n","import {\n useEffect, useRef, useState, MutableRefObject,\n} from \"react\"\n\nconst globalIntersectionOptions = {\n root: null,\n rootMargin: \"0px\",\n threshold: undefined,\n}\n\ntype IntersectionCallback = (isVisible: boolean) => void\ntype Listener = {\n element: HTMLElement,\n callback: IntersectionCallback,\n}\nconst createGlobalIntersectionObserver = () => {\n let listeners: Listener[] = []\n const globalHandler = (entries: IntersectionObserverEntry[]) => {\n entries.forEach(({ isIntersecting, target }) => {\n const callback = listeners.find(({ element }) => element === target)?.callback\n if (callback) {\n callback(isIntersecting)\n }\n })\n }\n const globalObserver = new IntersectionObserver(globalHandler, globalIntersectionOptions)\n\n return {\n subscribe: (element: HTMLElement, callback: IntersectionCallback) => {\n globalObserver.observe(element)\n listeners = listeners.concat({ element, callback })\n },\n unsubscribe: (elementToUnsubscribe: HTMLElement) => {\n listeners = listeners.filter(({ element }) => element !== elementToUnsubscribe)\n },\n }\n}\nconst globalIntersectionObserver = createGlobalIntersectionObserver()\n\n\n// this hook is created for 2 reasons:\n// 1) to use the same IntersectionObserver for all charts (contrary to use-intersection from\n// react-use, which creates new observer for every hook)\n// 2) to update the isVisible state only when necessary (contrary to what \"use-in-view\" hook from\n// https://github.com/thebuilder/react-intersection-observer does)\nexport const useCommonIntersection = (\n element: HTMLElement,\n clonedChildrenRef: MutableRefObject,\n) => {\n const [isVisible, setIsVisible] = useState(false)\n const isVisibleRef = useRef(isVisible)\n // the ref is just to prevent most updates on init - charts are not visible on first intersection\n // observer callback, but it still tries to set the state. UseState does not bail out when\n // state doesn't change\n\n useEffect(() => {\n if (typeof IntersectionObserver === \"function\") {\n globalIntersectionObserver.subscribe(\n element,\n (newIsVisible) => {\n if (isVisibleRef.current !== newIsVisible) {\n if (clonedChildrenRef.current) {\n // eslint-disable-next-line no-param-reassign\n clonedChildrenRef.current.style.visibility = newIsVisible ? \"visible\" : \"hidden\"\n }\n\n isVisibleRef.current = newIsVisible\n // we need to mirror it in `use-state` to cause react update\n setIsVisible(newIsVisible)\n }\n },\n )\n }\n return () => {\n globalIntersectionObserver.unsubscribe(element)\n }\n }, [clonedChildrenRef, element])\n\n return isVisible\n}\n","import React from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\n\ninterface Props {\n attributes: Attributes\n}\n\n// rendered on init (for example when chart is not visible)\n// and when it's rendering after being hidden previously\nexport const InvisibleSearchableText = ({\n attributes,\n}: Props) => (\n \n {attributes.id}\n \n)\n","import React, {\n useEffect, useLayoutEffect, useState, useRef,\n} from \"react\"\nimport { useDebounce } from \"react-use\"\nimport { forEachObjIndexed } from \"ramda\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\nimport { selectDestroyOnHide, selectIsAsyncOnScroll, selectAlarm } from \"domains/global/selectors\"\nimport { getPortalNodeStyles } from \"domains/chart/utils/get-portal-node-styles\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"domains/chart/utils/chartLibrariesSettings\"\nimport { useCommonIntersection } from \"hooks/use-common-intersection\"\n\nimport { clearChartStateAction } from \"../actions\"\n\nimport { InvisibleSearchableText } from \"./invisible-searchable-text\"\n\nconst SCROLL_DEBOUNCE_ASYNC = 750\nconst SCROLL_DEBOUNCE_SYNC = 100\n\nconst cloneWithCanvas = (element: HTMLElement) => {\n const cloned = element.cloneNode(true) as HTMLElement\n const clonedCanvases = cloned.querySelectorAll(\"canvas\")\n\n element.querySelectorAll(\"canvas\")\n .forEach((oldCanvas, index) => {\n const newCanvas = clonedCanvases[index]\n const context = newCanvas.getContext(\"2d\")\n\n newCanvas.width = oldCanvas.width\n newCanvas.height = oldCanvas.height\n\n if (context) {\n context.drawImage(oldCanvas, 0, 0)\n }\n })\n return cloned\n}\n\nconst shouldCleanChartStateAlways = localStorage.getItem(\"wipe-chart-state\")\n\ninterface Props {\n attributes: Attributes\n chartUuid: string\n children: any\n portalNode: HTMLElement\n}\nexport const DisableOutOfView = ({\n attributes,\n chartUuid,\n children,\n portalNode,\n}: Props) => {\n /* when unmounting, clear redux state for this chart */\n const dispatch = useDispatch()\n useEffect(() => { // eslint-disable-line arrow-body-style\n return () => {\n dispatch(clearChartStateAction({ id: chartUuid }))\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n\n /* separate functionality - adding custom styles to portalNode */\n const chartSettings = chartLibrariesSettings[attributes.chartLibrary]\n const [hasPortalNodeBeenStyled, setHasPortalNodeBeenStyled] = useState(false)\n const isShowingAlarmOnChart = useSelector(selectAlarm)?.chartId === attributes.id\n useLayoutEffect(() => {\n if (hasPortalNodeBeenStyled) {\n return\n }\n const shouldAddSpecialHeight = isShowingAlarmOnChart\n && attributes.chartLibrary === \"dygraph\"\n && chartSettings.hasLegend(attributes)\n const styles = getPortalNodeStyles(attributes, chartSettings, shouldAddSpecialHeight)\n forEachObjIndexed((value, styleName) => {\n if (value) {\n portalNode.style.setProperty(styleName, value)\n }\n }, styles)\n // eslint-disable-next-line no-param-reassign\n portalNode.className = chartSettings.containerClass(attributes)\n setHasPortalNodeBeenStyled(true)\n }, [attributes, chartSettings, hasPortalNodeBeenStyled, isShowingAlarmOnChart, portalNode,\n setHasPortalNodeBeenStyled ])\n /* end of \"adding custom styles to portalNode\" */\n\n\n const destroyOnHide = useSelector(selectDestroyOnHide)\n\n const clonedChildrenRef = useRef()\n const isVisibleIntersection = useCommonIntersection(portalNode, clonedChildrenRef)\n\n // todo hook to scroll (observe on visible items) instead of changes in intersectionRatio\n // because intersectinnRatio maxes out on 1.0 when element is fully visible\n const isAsyncOnScroll = useSelector(selectIsAsyncOnScroll)\n const debounceTime = isAsyncOnScroll ? SCROLL_DEBOUNCE_ASYNC : SCROLL_DEBOUNCE_SYNC\n\n // \"should hide because of debounced scroll handler\"\n const [shouldHideDebounced, setShouldHideDebounced] = useState(!isVisibleIntersection)\n useDebounce(\n () => {\n // start rendering, when intersectionRatio is not 0 and it hasn't changed for 1500 ms\n setShouldHideDebounced(!isVisibleIntersection)\n },\n debounceTime,\n [isVisibleIntersection],\n )\n const shouldHide = isVisibleIntersection ? shouldHideDebounced : true\n\n const previousIsVisibleIntersection = useRef(isVisibleIntersection)\n if (clonedChildrenRef.current\n && previousIsVisibleIntersection.current !== isVisibleIntersection\n ) {\n previousIsVisibleIntersection.current = isVisibleIntersection\n }\n\n useEffect(() => {\n if (!isPrintMode && shouldHide && shouldCleanChartStateAlways) {\n dispatch(clearChartStateAction({ id: chartUuid }))\n }\n }, [chartUuid, dispatch, shouldHide])\n\n\n if (isPrintMode) {\n // we should show everything in this case\n return children\n }\n\n if (shouldHide) {\n // todo perhaps loader should be added here to both scenarios\n if (destroyOnHide) {\n return (\n \n )\n }\n\n if (!clonedChildrenRef.current) {\n const newClonedChildren = Array.from(portalNode.children)\n .map((child) => cloneWithCanvas(child as HTMLElement))\n\n const clonedChildrenContainer = document.createElement(\"div\")\n clonedChildrenContainer.style.visibility = \"hidden\"\n\n newClonedChildren.forEach((child) => {\n clonedChildrenContainer.appendChild(child)\n })\n\n clonedChildrenRef.current = clonedChildrenContainer\n }\n\n return (\n <>\n \n {\n if (nodeElement && clonedChildrenRef.current) {\n nodeElement.appendChild(clonedChildrenRef.current)\n }\n }}\n />\n \n )\n }\n\n if (!destroyOnHide && clonedChildrenRef.current) {\n clonedChildrenRef.current = undefined\n }\n\n return children\n}\n","import React from \"react\"\n\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport { ChartMetadata } from \"../../chart-types\"\nimport { ChartWithLoader, RenderCustomElementForDygraph } from \"../chart-with-loader\"\nimport { DisableOutOfView } from \"../disable-out-of-view\"\nimport { DropdownMenu } from \"../chart-dropdown\"\n\nexport type Props = {\n attributes: Attributes\n // warning! this is not the same as chartId in old dashboard\n // here, the chartID must be unique across all agents\n chartUuid: string\n uuid?: string\n portalNode: HTMLElement\n chartMetadata?: ChartMetadata | undefined\n dropdownMenu?: DropdownMenu\n renderCustomElementForDygraph?: RenderCustomElementForDygraph\n onAttributesChange?: any\n}\n\nexport const ChartContainer = ({\n attributes,\n chartMetadata,\n chartUuid,\n dropdownMenu,\n portalNode,\n renderCustomElementForDygraph,\n onAttributesChange,\n uuid,\n}: Props) => (\n \n \n \n)\n","/* eslint-disable max-len */\nimport { ChartsMetadata } from \"domains/global/types\"\nimport { AnyStringKeyT } from \"types/common\"\nimport { ChartEnriched } from \"domains/chart/chart-types\"\n\nexport interface Submenus {\n [submenus: string]: {\n charts: ChartEnriched[]\n height: number\n info: string | null\n priority: number\n title: string | null\n }\n}\n\nexport interface CorrelationMetadata {\n scoredCount?: number\n totalCount?: number\n averageScore?: number\n}\n\nexport interface Menu {\n // eslint-disable-next-line camelcase\n menu_pattern: string\n priority: number\n submenus: Submenus\n title: string\n icon: string\n info: string\n height: number\n correlationsMetadata?: CorrelationMetadata\n}\n\nexport interface Menus {\n [menu: string]: Menu\n}\n\nexport const options = {\n menus: {} as Menus,\n submenu_names: {} as {[family: string]: string},\n data: null as (ChartsMetadata | null),\n hostname: \"netdata_server\", // will be overwritten by the netdata server\n version: \"unknown\",\n release_channel: \"unknown\",\n hosts: [],\n\n duration: 0, // the default duration of the charts\n update_every: 1,\n\n chartsPerRow: 0,\n // chartsMinWidth: 1450,\n chartsHeight: 180,\n}\n\n\n// netdata standard information\nexport const netdataDashboard = {\n sparklines_registry: {} as {[key: string]: { count: number }},\n os: \"unknown\",\n\n menu: {},\n submenu: {} as {\n [family: string]: {\n info?: string | ((os: string) => string)\n title?: string\n }\n },\n context: {} as {\n [id: string]: {\n valueRange: string // examples: \"[0, 100]\", \"[null, null]\"\n height: number\n decimalDigits: number\n }},\n\n // generate a sparkline\n // used in the documentation\n sparkline(\n prefix: string, chart: string, dimension: string, units: string = \"\", suffix: string,\n ) {\n if (options.data === null || typeof options.data.charts === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart] === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart].dimensions === \"undefined\") {\n return \"\"\n }\n\n if (typeof options.data.charts[chart].dimensions[dimension] === \"undefined\") {\n return \"\"\n }\n\n let key = `${chart}.${dimension}`\n\n if (typeof this.sparklines_registry[key] === \"undefined\") {\n this.sparklines_registry[key] = { count: 1 }\n } else {\n this.sparklines_registry[key].count += 1\n }\n\n key = `${key}.${this.sparklines_registry[key].count}`\n\n return `${prefix}
    \n (\n X${units})${suffix}`\n },\n\n gaugeChart(\n title: string, width: string, dimensions: string = \"\", colors: string = \"\",\n ) {\n return `${\"
    \"\n },\n\n anyAttribute(obj: AnyStringKeyT, attr: string, key: string, def: unknown, domain?: string) {\n if (typeof (obj[key]) !== \"undefined\") {\n const config = obj[key]\n const configWithDomain = domain ? {...config, ...config[domain]} : config\n const x = configWithDomain[attr]\n\n if (x === undefined) {\n return def\n }\n\n if (typeof (x) === \"function\") {\n return x(netdataDashboard.os)\n }\n\n return x\n }\n\n return def\n },\n\n menuTitle(chart: ChartEnriched) {\n if (chart.sectionTitle) {\n return chart.sectionTitle\n }\n if (typeof chart.menu_pattern !== \"undefined\") {\n const type = chart.type || chart.id.split(\".\")[0]\n return (`${this.anyAttribute(this.menu, \"title\", chart.menu_pattern, chart.menu_pattern)\n .toString()\n } ${type.slice(-(type.length - chart.menu_pattern.length - 1)).toString()}`)\n .replace(/_/g, \" \")\n }\n\n return (this.anyAttribute(this.menu, \"title\", chart.menu, chart.menu)).toString()\n .replace(/_/g, \" \")\n },\n\n menuIcon(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"icon\", chart.menu_pattern,\n \"\").toString()\n }\n\n return this.anyAttribute(this.menu, \"icon\", chart.menu, \"\")\n },\n\n menuInfo(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"info\", chart.menu_pattern, null)\n }\n\n return this.anyAttribute(this.menu, \"info\", chart.menu, null)\n },\n\n menuHeight(chart: ChartEnriched) {\n if (typeof chart.menu_pattern !== \"undefined\") {\n return this.anyAttribute(this.menu, \"height\", chart.menu_pattern, 1.0)\n }\n\n return this.anyAttribute(this.menu, \"height\", chart.menu, 1.0)\n },\n\n submenuTitle(menu: string, submenu: string) {\n const key = `${menu}.${submenu}`\n // console.log(key);\n const title = this.anyAttribute(this.submenu, \"title\", key, submenu)\n .toString().replace(/_/g, \" \") as string\n if (title.length > 28) {\n const a = title.substring(0, 13)\n const b = title.substring(title.length - 12, title.length)\n return `${a}...${b}`\n }\n return title\n },\n\n submenuInfo(menu: string, submenu: string) {\n const key = `${menu}.${submenu}`\n return this.anyAttribute(this.submenu, \"info\", key, null) as (string | null)\n },\n\n submenuHeight(menu: string, submenu: string, relative: number) {\n const key = `${menu}.${submenu}`\n return this.anyAttribute(this.submenu, \"height\", key, 1.0) * relative\n },\n\n contextInfo(id: string, domain?: string) {\n const x = this.anyAttribute(this.context, \"info\", id, null, domain)\n\n if (x !== null) {\n return `
    ${x}
    `\n }\n return \"\"\n },\n\n contextValueRange(id: string) {\n if (typeof this.context[id] !== \"undefined\"\n && typeof this.context[id].valueRange !== \"undefined\"\n ) {\n try {\n return JSON.parse(this.context[id].valueRange)\n } catch (e) {\n return [null, null]\n }\n }\n return [null, null]\n },\n\n contextHeight(id: string, def: number) {\n if (typeof this.context[id] !== \"undefined\" && typeof this.context[id].height !== \"undefined\") {\n return def * this.context[id].height\n }\n return def\n },\n\n contextDecimalDigits(id: string, def: number) {\n if (typeof this.context[id] !== \"undefined\"\n && typeof this.context[id].decimalDigits !== \"undefined\"\n ) {\n return this.context[id].decimalDigits\n }\n return def\n },\n}\n\n// @ts-ignore\nwindow.netdataDashboard = netdataDashboard\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/destructuring-assignment */\n/* eslint-disable operator-linebreak */\n/* eslint-disable arrow-body-style */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { memo } from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, Text } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { useFormatters } from \"domains/chart/utils/formatters\"\nimport { netdataDashboard } from \"domains/dashboard/utils/netdata-dashboard\"\nimport { selectChartData } from \"domains/chart/selectors\"\n\nconst Title = styled(Text)`\n text-overflow: ellipsis;\n max-width: 120px;\n overflow-x: hidden;\n`\n\nconst getUnitSign = unit => {\n return unit === \"percentage\" ? \"%\" : ` ${unit.replace(/milliseconds/, \"ms\")}`\n}\n\nconst aggrMethods = {\n avg: \"Average\",\n sum: \"Sum\",\n min: \"Min\",\n max: \"Max\",\n}\nconst getAggregation = value => `${aggrMethods[value]}` || \"\"\n\nconst ChartValueContainer = memo(({ id, units, aggrMethod, displayedIndex }) => {\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id }))\n\n const value =\n typeof displayedIndex === \"number\"\n ? chartData.result[displayedIndex]\n : chartData.view_latest_values[0]\n\n const { legendFormatValue, unitsCurrent } = useFormatters({\n attributes: {},\n data: chartData,\n units,\n unitsCommon: null,\n unitsDesired: null,\n uuid: id,\n })\n\n const aggregation = getAggregation(aggrMethod)\n\n return (\n \n {aggregation && (\n \n {aggregation}\n \n )}\n {legendFormatValue(value)}\n {getUnitSign(unitsCurrent)}\n \n )\n})\n\nconst ChartValue = ({ id, ...rest }) => {\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id }))\n\n if (!chartData || chartData.result.length === 0) return null\n return \n}\n\nconst ChartOverview = ({ id, chartMetadata, aggrMethod, displayedIndex }) => {\n const { units, context } = chartMetadata\n const title = context.replace(/cgroup\\./, \"\")\n const icon = netdataDashboard.menuIcon(chartMetadata)\n\n return (\n \n \n \n {title}\n \n \n \n )\n}\n\nexport default memo(ChartOverview)\n","import React, { useMemo } from \"react\"\nimport Anchor from \"@/src/components/anchor\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { useLocalStorage } from \"react-use\"\nimport { utmUrlSuffix } from \"utils/utils\"\nimport { utmParametersToString } from \"domains/global/selectors\"\n\nexport type UserStatus = \"LOGGED_IN\" | \"EXPIRED_LOGIN\" | \"UNKNOWN\"\nexport type NodeClaimedStatus = \"NOT_CLAIMED\" | \"CLAIMED\"\nexport type UserNodeAccess = \"NO_ACCESS\" | \"ACCESS_OK\"\ntype UserPreference = \"AGENT\" | \"CLOUD\" | \"UNDEFINED\"\nexport type NodeLiveness = \"LIVE\" | \"NOT_LIVE\"\ntype CTATYPE = \"NAVIGATE\" | \"REFRESH\"\n\nexport enum MigrationModalPromos {\n PROMO_SIGN_IN_CLOUD = \"PROMO_SIGN_IN_CLOUD\",\n PROMO_SIGN_UP_CLOUD = \"PROMO_SIGN_UP_CLOUD\",\n PROMO_IVNITED_TO_SPACE = \"PROMO_IVNITED_TO_SPACE\",\n PROMO_CLAIM_NODE = \"PROMO_CLAIM_NODE\",\n PROMO_TO_USE_NEW_DASHBAORD = \"PROMO_TO_USE_NEW_DASHBAORD\",\n FALLBACK_TO_AGENT = \"FALLBACK_TO_AGENT\",\n NO_INFO_FALLBACK_TO_AGENT = \"NO_INFO_FALLBACK_TO_AGENT\",\n}\n\ntype MigrationModalActions = {\n text: string\n action: CTATYPE\n toPath?: string\n userPreference?: UserPreference | \"DONT_SHOW\"\n}\n\ntype MigrationModalContent = {\n title: string\n text: {\n header: ((props: any) => React.ReactNode) | string\n bullets?: Array React.ReactNode)>\n footer?: ((props: any) => React.ReactNode) | string\n }\n tickBoxOption: { text: string; preferenceID: MigrationModalPromos }\n CTA1: MigrationModalActions\n CTA2?: MigrationModalActions\n}\n\ntype MigrationModalInfo = {\n [key in MigrationModalPromos]: MigrationModalContent\n}\n\nconst campaign = \"agent_nudge_to_cloud\"\n\nconst makeUTMParameters = (modalPromo: MigrationModalPromos) =>\n `${utmUrlSuffix}${utmParametersToString({\n content: modalPromo,\n campaign,\n })}`\n\nexport const migrationmodalInfo: MigrationModalInfo = {\n [MigrationModalPromos.PROMO_SIGN_UP_CLOUD]: {\n title: \"Learn about Netdata Cloud!\",\n text: {\n header: () => (\n \n Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:\n \n ),\n bullets: [\n \"Infrastructure level dashboards (each chart aggregates data from multiple nodes)\",\n \"Central dispatch of alert notifications\",\n \"Custom dashboards editor\",\n \"Intelligence assisted troubleshooting, to help surface the root cause of issues\",\n ],\n footer: \"Have a look, you will be surprised!\",\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_SIGN_UP_CLOUD,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud\",\n toPath: \"path/signup/cloud\",\n action: \"NAVIGATE\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_SIGN_IN_CLOUD]: {\n title: \"Sign-in to Netdata Cloud or get an invitation!\",\n text: {\n header: () => (\n <>\n \n This node is connected to Netdata Cloud but you are not. If you have a Netdata Cloud\n account sign-in, if not ask for an invitation to it.\n \n\n \n Netdata Cloud is a FREE service that complements the Netdata Agent, to provide:\n \n \n ),\n bullets: [\n \"Infrastructure level dashboards (each chart aggregates data from multiple nodes)\",\n \"Central dispatch of alert notifications\",\n \"Custom dashboards editor\",\n \"Intelligence assisted troubleshooting, to help surface the root cause of issues\",\n ],\n footer: \"Have a look, you will be surprised!\",\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_SIGN_IN_CLOUD,\n },\n CTA1: {\n text: \"Sign-in or get a Netdata Cloud account\",\n action: \"NAVIGATE\",\n toPath: \"path/signin/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n toPath: \"path/agent-dashboard\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_IVNITED_TO_SPACE]: {\n title: \"Get an invitation to this Node’s Space!\",\n text: {\n header: () => (\n \n This node is connected to Netdata Cloud but it isnt available on one of your Spaces.\n \n ),\n bullets: [],\n footer: \"Ask for an invitation to this Space!\",\n },\n tickBoxOption: {\n text: \"Don't remind me of this again\",\n preferenceID: MigrationModalPromos.PROMO_IVNITED_TO_SPACE,\n },\n CTA1: {\n text: \"Thanks, stay at Agent dashboard for now\",\n toPath: \"agent\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_CLAIM_NODE]: {\n title: \"This node isn’t connected to Netdata Cloud\",\n text: {\n header: () => (\n \n For you to be able to see this node on Netdata Cloud you will either need to:\n \n ),\n footer: \"Have a look, you will be surprised!\",\n bullets: [\n () => {\n return (\n \n {\" \"}\n Connect this node directly (documentation on{\" \"}\n \n how to connect a node\n \n ) , or\n \n )\n },\n () => {\n return (\n \n Αctivate streaming to a parent node that is already connected (documentation on{\" \"}\n \n how to configure streaming\n \n )\n \n )\n },\n ],\n },\n tickBoxOption: {\n text: \"Remember my choice.\",\n preferenceID: MigrationModalPromos.PROMO_CLAIM_NODE,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud\",\n action: \"NAVIGATE\",\n toPath: \"path/node/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD]: {\n title: \"Use the Old or the New dashboard?\",\n text: {\n header: () => (\n \n This node is available in your Netdata Cloud account. So, you have full access to the NEW\n dashboards, charts, intelligence-assisted troubleshooting and many more!\n \n ),\n bullets: [],\n },\n tickBoxOption: {\n text: \"Remember my choice\",\n preferenceID: MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD,\n },\n CTA1: {\n text: \"Wow! Let’s go to Netdata Cloud \",\n action: \"NAVIGATE\",\n toPath: \"path/dashboard/cloud\",\n userPreference: \"CLOUD\",\n },\n CTA2: {\n text: \"Later, stay at the Agent dashboard\",\n action: \"NAVIGATE\",\n toPath: \"path/agent-dashboard\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.FALLBACK_TO_AGENT]: {\n title: \"Oops! This node has lost connection to Netdata Cloud!\",\n text: {\n header: ({ date = \"\" }) => {\n return (\n <>\n \n Unfortunately, it seems that this node is not currently connected to Netdata Cloud.\n So, the old agent dashboard is the only option available.\n \n {/* \n The node lost its Netdata Cloud connection at {date}.\n */}\n \n To troubleshoot Netdata Cloud connection issues, please follow this{\" \"}\n \n this guide.\n \n \n \n )\n },\n bullets: [],\n },\n tickBoxOption: {\n text: \"Don't show this again\",\n preferenceID: MigrationModalPromos.FALLBACK_TO_AGENT,\n },\n CTA1: {\n text: \"Check again please\",\n action: \"REFRESH\",\n userPreference: undefined,\n },\n CTA2: {\n text: \"Thanks, stay at Agent dashboard\",\n toPath: \"path/agent\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n [MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT]: {\n title: \"Oops! We aren't able to get information of this node in regards to Netdata Cloud!\",\n text: {\n header: () => {\n return (\n <>\n \n Unfortunately, it seems we aren't able to get information on this node in regards to\n Netdata Cloud.\n \n \n This could be from internet connectivity issues from your end or some temporary issue\n with our services. So, the old agent dashboard is the only option available.\n \n \n )\n },\n bullets: [],\n },\n tickBoxOption: {\n text: \"Don't show this again\",\n preferenceID: MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT,\n },\n CTA1: {\n text: \"Check again please\",\n action: \"REFRESH\",\n userPreference: undefined,\n },\n CTA2: {\n text: \"Thanks, stay at Agent dashboard\",\n toPath: \"path/agent-dashboard\",\n action: \"NAVIGATE\",\n userPreference: \"AGENT\",\n },\n },\n}\n\nexport type PromoProps = {\n userSavedPreference?: UserPreference\n userStatus?: UserStatus\n nodeClaimedStatus?: NodeClaimedStatus\n userNodeAccess?: UserNodeAccess\n nodeLiveness?: NodeLiveness\n}\n\nconst isPromoSignUp = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" && userStatus === \"UNKNOWN\" && nodeClaimedStatus === \"NOT_CLAIMED\"\n\nconst isPromoSignIn = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" && userStatus === \"UNKNOWN\" && nodeClaimedStatus === \"CLAIMED\"\n\nconst isPromoInvitedToSpace = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"CLAIMED\" &&\n userNodeAccess === \"NO_ACCESS\"\n\nconst isPromoToClaimThisNode = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"NOT_CLAIMED\"\n\nconst isPromoToNewDasboardOnCloud = ({\n userSavedPreference,\n userStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n !userSavedPreference &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeLiveness === \"LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nconst isNoInfoFallbackToAgent = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference === \"CLOUD\" &&\n !userStatus &&\n !nodeClaimedStatus &&\n !nodeLiveness &&\n !userNodeAccess\n\nconst isFallbackToAgent = ({\n userSavedPreference,\n userStatus,\n nodeClaimedStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps): boolean =>\n userSavedPreference !== \"AGENT\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeClaimedStatus === \"CLAIMED\" &&\n nodeLiveness === \"NOT_LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nexport const goToAgentDashboard = ({ userSavedPreference }: PromoProps) =>\n userSavedPreference === \"AGENT\"\n\nexport const goToCloud = ({\n userSavedPreference,\n userStatus,\n nodeLiveness,\n userNodeAccess,\n}: PromoProps) =>\n userSavedPreference === \"CLOUD\" &&\n (userStatus === \"LOGGED_IN\" || userStatus === \"EXPIRED_LOGIN\") &&\n nodeLiveness === \"LIVE\" &&\n userNodeAccess === \"ACCESS_OK\"\n\nconst modalStatusWithPromoFunctions: Record<\n MigrationModalPromos,\n (props: PromoProps) => boolean\n> = {\n [MigrationModalPromos.FALLBACK_TO_AGENT]: isFallbackToAgent,\n [MigrationModalPromos.NO_INFO_FALLBACK_TO_AGENT]: isNoInfoFallbackToAgent,\n [MigrationModalPromos.PROMO_TO_USE_NEW_DASHBAORD]: isPromoToNewDasboardOnCloud,\n [MigrationModalPromos.PROMO_CLAIM_NODE]: isPromoToClaimThisNode,\n [MigrationModalPromos.PROMO_IVNITED_TO_SPACE]: isPromoInvitedToSpace,\n [MigrationModalPromos.PROMO_SIGN_IN_CLOUD]: isPromoSignIn,\n [MigrationModalPromos.PROMO_SIGN_UP_CLOUD]: isPromoSignUp,\n}\n\nconst useMigrationModal = ({\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n nodeLiveness,\n}: PromoProps) => {\n const [userSavedPreference, setUserPrefrence] = useLocalStorage(\n \"USER_SAVED_PREFERENCE\"\n )\n\n const migrationModalPromo = useMemo(() => {\n return Object.keys(modalStatusWithPromoFunctions).find(modalStatus => {\n return modalStatusWithPromoFunctions[modalStatus]({\n userStatus,\n nodeClaimedStatus,\n userNodeAccess,\n userSavedPreference,\n nodeLiveness,\n })\n }) as MigrationModalPromos\n }, [userStatus, nodeClaimedStatus, userNodeAccess, nodeLiveness, userSavedPreference])\n\n return {\n migrationModalPromoInfo: migrationmodalInfo[migrationModalPromo],\n migrationModalPromo,\n setUserPrefrence,\n userSavedPreference,\n }\n}\n\nexport default useMigrationModal\n","/* eslint-disable comma-dangle */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useRef, useContext, useLayoutEffect, useState, memo, useMemo } from \"react\"\nimport { throttle } from \"throttle-debounce\"\nimport { ChartContainer } from \"domains/chart/components/chart-container\"\nimport { ThemeContext } from \"styled-components\"\nimport { Flex, getColor } from \"@netdata/netdata-ui\"\nimport ChartOverview from \"./chartOverview\"\n\nconst Chart = ({ groupLabel, postGroupLabel, id, attributes, relatedIndex }) => {\n const theme = useContext(ThemeContext)\n const chartContainerRef = useRef()\n const [displayedIndex, setDisplayedIndex] = useState()\n const setDisplayedIndexThrottled = useMemo(() => throttle(400, setDisplayedIndex), [])\n const [, repaint] = useState()\n\n useLayoutEffect(() => {\n repaint(true)\n }, [])\n\n const { chartMetadata, attributes: relatedChartAttributes } = attributes.relatedCharts[\n relatedIndex\n ]\n\n const chartAttributes = useMemo(\n () => ({\n id: chartMetadata.id,\n\n width: \"100%\",\n height: \"60px\",\n\n chartLibrary: \"sparkline\",\n sparklineLineWidth: \"2px\",\n sparklineLineColor: getColor(\"border\")({ theme }),\n sparklineFillColor: getColor(\"disabled\")({ theme }),\n sparklineSpotRadius: 0,\n sparklineDisableTooltips: true,\n sparklineOnHover: (event) => setDisplayedIndexThrottled(event?.x),\n\n httpMethod: \"POST\",\n host: attributes.host,\n nodeIDs: attributes.nodeIDs,\n dimensions: relatedChartAttributes.dimensions,\n aggrMethod: relatedChartAttributes.aggrMethod,\n\n labels: {\n k8s_cluster_id: [chartMetadata.chartLabels.k8s_cluster_id[0]],\n [attributes.groupBy]: [groupLabel],\n ...(postGroupLabel && { [attributes.postGroupBy]: [postGroupLabel] }),\n },\n }),\n [chartMetadata, attributes]\n )\n\n return (\n \n \n {chartContainerRef.current && (\n \n )}\n
    \n \n \n )\n}\n\nexport default memo(Chart)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport styled from \"styled-components\"\nimport { Text, Flex, Icon } from \"@netdata/netdata-ui\"\n\nconst ExternalButton = styled(Icon).attrs({\n margin: [0, 0, 0, \"auto\"],\n color: \"bright\",\n width: \"10px\",\n height: \"10px\",\n alignSelf: \"center\",\n name: \"nav_arrow_goto\",\n role: \"button\",\n title: \"Go to node\",\n \"data-testid\": \"k8sPopoverItem-externalButton\",\n})`\n cursor: pointer;\n`\n\nconst Item = ({ icon, title, secondary, onClick }) => (\n \n \n \n \n \n {title}\n \n {secondary && (\n \n {secondary}\n \n )}\n {onClick && }\n \n)\n\nexport default Item\n","/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { useDateTime } from \"utils/date-time\"\nimport Item from \"./item\"\nimport Section from \"./section\"\n\nconst DateItem = ({ date, title }) => {\n const { localeDateString, localeTimeString } = useDateTime()\n\n return (\n \n )\n}\n\nconst DateSection = ({ before, after }) => (\n
    \n \n \n
    \n)\n\nexport default DateSection\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport Section from \"./section\"\nimport Chart from \"./chart\"\nimport DateSection from \"./dateSection\"\n\nconst Metrics = ({ groupLabel, postGroupLabel, attributes, viewAfter, viewBefore }) => (\n \n \n
    \n \n {attributes.relatedCharts.map(({ chartMetadata }, index) => (\n \n ))}\n \n
    \n
    \n)\n\nexport default Metrics\n","/* eslint-disable no-param-reassign */\n/* eslint-disable comma-dangle */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { memo } from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport Item from \"./item\"\nimport Section from \"./section\"\nimport getLabel, { labelIds } from \"./getLabel\"\n\nconst LabelsSection = ({ labelId, items, onExpand, onItemClick, ...rest }) => {\n const { title, icon } = getLabel(labelId)\n const sliced = items.slice(0, 3)\n const expandable = items.length > 3\n\n const text = expandable ? `${title} (${items.length})` : title\n return (\n
    \n {sliced.map((item) => (\n onItemClick(item))}\n />\n ))}\n
    \n )\n}\n\nconst getLabelIds = (chartLabels) => {\n chartLabels = { ...chartLabels }\n const predefinedLabelIds = labelIds.reduce((acc, labelId) => {\n if (!(labelId in chartLabels)) return acc\n\n delete chartLabels[labelId]\n return [...acc, labelId]\n }, [])\n\n return [...predefinedLabelIds, ...Object.keys(chartLabels)]\n}\n\nconst Context = ({ chartLabels, onExpand, onNodeClick }) => {\n const ids = getLabelIds(chartLabels)\n\n return (\n \n {ids.map((id, index) => (\n onExpand(id)}\n noBorder={index === ids.length - 1}\n onItemClick={id === \"k8s_node_name\" && onNodeClick}\n />\n ))}\n \n )\n}\n\nexport default memo(Context)\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React from \"react\"\nimport { Flex, Button, makeFlex } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\nimport Separator from \"./separator\"\nimport Header from \"./header\"\nimport Item from \"./item\"\nimport getLabel from \"./getLabel\"\n\nconst StyledButton = styled(makeFlex(Button)).attrs({\n flavour: \"borderless\",\n neutral: true,\n themeType: \"dark\",\n className: \"btn\",\n alignItems: \"start\",\n gap: 1,\n})`\n &&& {\n padding: 0;\n margin: 0;\n height: initial;\n width: initial;\n\n svg {\n height: 18px;\n width: 18px;\n position: initial;\n }\n }\n`\n\nconst List = ({ labelId, items, onBack, onItemClick }) => {\n const { title, icon } = getLabel(labelId)\n\n return (\n \n
    \n \n
    \n \n \n {items.map((item) => (\n onItemClick(item))}\n />\n ))}\n
    \n \n )\n}\n\nexport default List\n","/* eslint-disable object-curly-newline */\n/* eslint-disable react/jsx-props-no-spreading */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useState } from \"react\"\nimport { Flex, DropContainer } from \"@netdata/netdata-ui\"\nimport Separator from \"./separator\"\nimport Header from \"./header\"\nimport Tabs from \"./tabs\"\nimport Metrics from \"./metrics\"\nimport Context from \"./context\"\nimport List from \"./list\"\n\nconst Container = (props) => (\n \n)\n\nconst TabsContainer = ({ label, value, onChange, children }) => (\n \n
    {label}
    \n \n \n \n {children}\n \n
    \n)\n\nconst Popover = ({\n title,\n groupLabel,\n postGroupLabel,\n chartLabels,\n attributes,\n viewBefore,\n viewAfter,\n ...rest\n}) => {\n const [view, setView] = useState(\"context\")\n\n const isLabelView = view !== \"context\" && view !== \"metrics\"\n\n const { onNodeClick } = attributes\n\n return (\n \n {isLabelView && (\n setView(\"context\")}\n onItemClick={view === \"k8s_node_name\" && onNodeClick}\n />\n )}\n {!isLabelView && (\n \n {view === \"context\" && (\n \n )}\n {view === \"metrics\" && (\n \n )}\n \n )}\n \n )\n}\n\nexport default Popover\n","/* eslint-disable arrow-body-style */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable comma-dangle */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\nimport React, { useMemo } from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { ChartMetadata } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes.ts\"\nimport { ChartTimeframe } from \"domains/chart/components/chart-legend-bottom\"\nimport GroupBoxes from \"domains/chart/components/lib-charts/group-box-chart/groupBoxes\"\nimport Legend from \"domains/chart/components/lib-charts/group-box-chart/legend\"\nimport getLabel from \"./getLabel\"\nimport transform from \"./transform\"\nimport Popover from \"./popover\"\n\ninterface Props {\n chartData: any\n chartMetadata: ChartMetadata\n attributes: Attributes\n viewAfter: number\n viewBefore: number\n hoveredRow: number\n hoveredX: number | null\n showUndefined: boolean\n}\n\nconst Kubernetes = ({\n chartData,\n chartMetadata,\n attributes,\n viewAfter,\n viewBefore,\n hoveredRow,\n hoveredX,\n showUndefined,\n}: Props) => {\n const { filteredRows } = attributes\n const { data: groupBoxData, labels, chartLabels } = useMemo(\n () => transform(chartData, filteredRows),\n [filteredRows, chartData]\n )\n\n const {\n id,\n result: { data },\n groupBy,\n postGroupBy,\n } = chartData\n\n const renderBoxPopover = ({ groupIndex, index, align }) => {\n const label = groupBoxData[groupIndex].labels[index]\n const { title } = getLabel(postGroupBy)\n\n return (\n \n )\n }\n\n const renderGroupPopover = ({ groupIndex, align }) => {\n const label = labels[groupIndex]\n const { title } = getLabel(groupBy)\n\n return (\n \n )\n }\n\n const groupedBoxesData = useMemo(() => {\n return groupBoxData.map((groupedBox) => {\n return {\n labels: groupedBox.labels,\n data:\n hoveredRow === -1 || hoveredRow > data.length || !(hoveredRow in data)\n ? groupedBox.postAggregated\n : groupedBox.indexes.map((index) => data[hoveredRow][index + 1]) || [],\n }\n })\n }, [data, groupBoxData, hoveredRow])\n\n return (\n \n \n \n {id}\n \n \n \n )\n}\n\nexport default Kubernetes\n","/* eslint-disable arrow-body-style */\n/* eslint-disable object-curly-newline */\n/* eslint-disable react-hooks/exhaustive-deps */\n/* eslint-disable comma-dangle */\n/* eslint-disable react/prop-types */\n// @ts-nocheck\n\nexport default (chartData, filteredRows) => {\n const { keys, labels: labelValues, groupBy, postGroupBy, aggrGroups, postAggregated } = chartData\n const groupValues = keys[groupBy]\n const postGroupValues = keys[postGroupBy]\n const indexes = filteredRows || [...Array(groupValues.length)].map((v, index) => index)\n\n const postGroupData = indexes.reduce((acc: any, index: number) => {\n const groupValue = groupValues[index]\n if (!(groupValue in acc)) {\n acc[groupValue] = {\n labels: [],\n indexes: [],\n chartLabels: [],\n postAggregated: [],\n }\n }\n const boxes = acc[groupValue]\n boxes.indexes.push(index)\n boxes.labels.push(postGroupValues[index])\n boxes.postAggregated.push(postAggregated[index])\n\n const chartLabels = aggrGroups.reduce((labelsAcc, label) => {\n return labelValues[label][index]\n ? { ...labelsAcc, [label]: labelValues[label][index] }\n : labelsAcc\n }, {})\n boxes.chartLabels.push(chartLabels)\n return acc\n }, {})\n\n const labels = Object.keys(postGroupData).sort(\n (a, b) => postGroupData[b].indexes.length - postGroupData[a].indexes.length\n )\n\n const groupData = labels.map((label) => postGroupData[label])\n\n const groupChartLabels = groupData.map((boxes) => {\n return aggrGroups.reduce((acc, label) => {\n const groupLabels = new Set(\n boxes.chartLabels.reduce((accChartLabels, chartLabels) => {\n return chartLabels[label] ? [...accChartLabels, ...chartLabels[label]] : accChartLabels\n }, [])\n )\n return groupLabels.size === 0 ? acc : { ...acc, [label]: Array.from(groupLabels) }\n }, {})\n })\n\n return { labels, data: groupData, chartLabels: groupChartLabels }\n}\n","import React, { useCallback } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { setGlobalChartUnderlayAction, setGlobalPanAndZoomAction } from \"domains/global/actions\"\nimport { selectSyncPanAndZoom } from \"domains/global/selectors\"\nimport { setChartPanAndZoomAction } from \"domains/chart/actions\"\nimport { useShowValueOutside } from \"hooks/use-show-value-outside\"\n\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport {\n ChartData, ChartMetadata, DygraphData, EasyPieChartData, D3pieChartData,\n} from \"../chart-types\"\nimport { chartLibrariesSettings, ChartLibraryName } from \"../utils/chartLibrariesSettings\"\n\nimport { DygraphChart } from \"./lib-charts/dygraph-chart\"\nimport { EasyPieChart } from \"./lib-charts/easy-pie-chart\"\nimport { GaugeChart } from \"./lib-charts/gauge-chart\"\nimport { SparklineChart } from \"./lib-charts/sparkline-chart\"\nimport { D3pieChart } from \"./lib-charts/d3pie-chart\"\nimport { PeityChart } from \"./lib-charts/peity-chart\"\nimport { GoogleChart } from \"./lib-charts/google-chart\"\nimport { TextOnly } from \"./lib-charts/text-only\"\nimport { KubernetesGroupBoxes } from \"./lib-charts/group-box-chart\"\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: ChartData\n chartMetadata: ChartMetadata\n chartLibrary: ChartLibraryName\n colors: {\n [key: string]: string\n }\n chartUuid: string\n chartHeight: number\n chartWidth: number\n dimensionsVisibility: boolean[]\n hasEmptyData: boolean\n isRemotelyControlled: boolean\n legendFormatValue: ((v: number | string | null) => number | string)\n orderedColors: string[]\n hoveredX: number | null\n onUpdateChartPanAndZoom: (arg: { after: number, before: number, masterID: string }) => void\n immediatelyDispatchPanAndZoom: () => void\n\n hoveredRow: number\n setHoveredX: (hoveredX: number | null, noMaster?: boolean) => void\n setMinMax: (minMax: [number, number]) => void\n showLatestOnBlur: boolean\n unitsCurrent: string\n viewAfterForCurrentData: number,\n viewBeforeForCurrentData: number,\n}\n\nexport const AbstractChart = ({\n attributes,\n chartContainerElement,\n chartData,\n chartMetadata,\n chartLibrary,\n colors,\n chartUuid,\n chartHeight,\n chartWidth,\n dimensionsVisibility,\n hasEmptyData,\n isRemotelyControlled,\n legendFormatValue,\n orderedColors,\n hoveredRow,\n hoveredX,\n onUpdateChartPanAndZoom,\n immediatelyDispatchPanAndZoom,\n setHoveredX,\n setMinMax,\n showLatestOnBlur,\n unitsCurrent,\n viewAfterForCurrentData,\n viewBeforeForCurrentData,\n}: Props) => {\n const dispatch = useDispatch()\n\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n const setGlobalChartUnderlay = useCallback(({ after, before, masterID }) => {\n dispatch(setGlobalChartUnderlayAction({ after, before, masterID }))\n\n // freeze charts\n // don't send masterID, so no padding is applied\n if (isSyncPanAndZoom) {\n dispatch(setGlobalPanAndZoomAction({\n after: viewAfterForCurrentData,\n before: viewBeforeForCurrentData,\n }))\n } else {\n dispatch(setChartPanAndZoomAction({\n after: viewAfterForCurrentData,\n before: viewBeforeForCurrentData,\n id: chartUuid,\n }))\n }\n }, [chartUuid, dispatch, isSyncPanAndZoom, viewAfterForCurrentData, viewBeforeForCurrentData])\n\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const { hasLegend } = chartSettings\n const chartElementClassName = hasLegend(attributes)\n ? classNames(\n `netdata-chart-with-legend-${attributes.legendPosition || \"right\"}`,\n `netdata-${chartLibrary}-chart-with-legend-right`,\n )\n : classNames(\n \"netdata-chart\",\n `netdata-${chartLibrary}-chart`,\n )\n const chartElementId = `${chartLibrary}-${chartUuid}-chart`\n const showUndefined = hoveredRow === -1 && !showLatestOnBlur\n\n useShowValueOutside({\n attributes, chartData, chartSettings, hoveredRow, legendFormatValue, showUndefined,\n })\n\n if (chartLibrary === \"easypiechart\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"gauge\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"sparkline\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"d3pie\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"peity\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"google\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"textonly\") {\n return (\n \n )\n }\n\n if (chartLibrary === \"groupbox\") {\n return (\n \n )\n }\n\n return (\n \n )\n}\n","import { useEffect, useRef } from \"react\"\nimport { isEmpty } from \"ramda\"\nimport { useMount } from \"react-use\"\n\nimport { ChartData, DygraphData } from \"domains/chart/chart-types\"\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartLibraryConfig } from \"domains/chart/utils/chartLibrariesSettings\"\n\n\ninterface UseShowValueOutsideArgument {\n attributes: Attributes\n chartData: ChartData\n chartSettings: ChartLibraryConfig\n hoveredRow: number\n legendFormatValue: ((v: number | string | null) => number | string)\n showUndefined: boolean\n}\n\n// example of the attribute:\n// show-value-of-iowait-at: \"system.cpu.iowait.1\"\n\nexport const useShowValueOutside = ({\n attributes,\n chartData,\n chartSettings,\n hoveredRow,\n legendFormatValue,\n showUndefined,\n}: UseShowValueOutsideArgument) => {\n // a ref to store found elements, just once per lifetime of component\n const showValueAttributesNodes = useRef<(HTMLElement | null)[]>([])\n\n // find the nodes that will have populated values\n useMount(() => {\n const { showValueOf } = attributes\n // showValueOf will be undefined if not used, but additional isEmpty check can prevent\n // regression performance issue in the future\n if (!showValueOf || isEmpty(showValueOf)) {\n return\n }\n const dimensionNames = chartData.dimension_names\n const dimensionIds = chartData.dimension_ids\n dimensionNames.forEach((dimensionName, i) => {\n const userElementId = showValueOf[`show-value-of-${dimensionName.toLowerCase()}`]\n || showValueOf[`show-value-of-${dimensionIds[i].toLowerCase()}-at`]\n\n // if element is not found, just add null\n showValueAttributesNodes.current = showValueAttributesNodes.current.concat(\n document.getElementById(userElementId),\n )\n })\n })\n\n useEffect(() => {\n if (showValueAttributesNodes.current.length) {\n const chartSettingCallOptions = chartSettings.options(attributes)\n const isFlipped = chartSettingCallOptions.includes(\"flip\")\n\n // \"objectrows\" is for d3pie, which has different data format\n if (chartData.format === \"json\" && !chartSettingCallOptions.includes(\"objectrows\")) {\n const { data } = (chartData as DygraphData).result\n const valueIndex = hoveredRow === -1\n ? (data.length - 1)\n : (hoveredRow) // because data for easy-pie-chart are flipped\n\n // yes, \"flipped\" value means chronological order (from oldest to newest) :)\n const rowIndex = isFlipped ? valueIndex : (data.length - valueIndex - 1)\n const row = data[rowIndex]\n\n chartData.dimension_names.forEach((dimensionName, dimensionIndex) => {\n const value = (showUndefined || !row)\n ? \"\"\n : legendFormatValue(row[dimensionIndex + 1])\n const element = showValueAttributesNodes.current[dimensionIndex]\n if (element) {\n element.innerText = `${value}`\n }\n })\n }\n }\n }, [attributes, chartData, chartSettings, hoveredRow, legendFormatValue, showUndefined])\n}\n","import { __, prop } from \"ramda\"\nimport React, { useEffect, useState, useCallback, useMemo, memo, useContext } from \"react\"\nimport { ThemeContext } from \"styled-components\"\nimport { useDebouncedCallback } from \"use-debounce\"\n\nimport {\n requestCommonColorsAction,\n setDefaultAfterAction,\n setGlobalPanAndZoomAction,\n setGlobalSelectionAction,\n} from \"domains/global/actions\"\nimport {\n createSelectAssignedColors,\n selectGlobalSelection,\n selectSyncPanAndZoom,\n selectSyncSelection,\n selectUnitsScalingMethod,\n} from \"domains/global/selectors\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { TimeRange } from \"types/common\"\nimport { MS_IN_SECOND, isTimestamp } from \"utils/utils\"\n\nimport { setChartPanAndZoomAction } from \"domains/chart/actions\"\n\nimport { getPanAndZoomStep } from \"../utils/get-pan-and-zoom-step\"\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { chartLibrariesSettings } from \"../utils/chartLibrariesSettings\"\nimport { useFormatters } from \"../utils/formatters\"\nimport { ChartData, ChartMetadata } from \"../chart-types\"\n\nimport { ChartLegend } from \"./chart-legend\"\nimport { LegendToolbox } from \"./legend-toolbox\"\nimport { ResizeHandler } from \"./resize-handler\"\nimport { AbstractChart } from \"./abstract-chart\"\n\ninterface GlobalPanAndZoomState {\n after: number // timestamp in ms\n before: number // timestamp in ms\n masterID?: string\n shouldForceTimeRange?: boolean\n}\n\ninterface Props {\n attributes: Attributes\n chartContainerElement: HTMLElement\n chartData: ChartData\n chartMetadata: ChartMetadata\n chartHeight: number\n chartUuid: string\n chartWidth: number\n defaultAfter: number\n globalPanAndZoom: null | GlobalPanAndZoomState\n hasEmptyData: boolean\n isRemotelyControlled: boolean\n viewRangeForCurrentData: TimeRange\n viewRange: TimeRange\n selectedDimensions: string[]\n setSelectedDimensions: (newState: string[]) => void\n showLatestOnBlur: boolean\n}\n\nexport const Chart = memo(\n ({\n attributes,\n attributes: { chartLibrary },\n chartContainerElement,\n chartData,\n chartMetadata,\n chartHeight,\n chartUuid,\n chartWidth,\n defaultAfter,\n globalPanAndZoom,\n hasEmptyData,\n isRemotelyControlled,\n viewRangeForCurrentData,\n viewRange,\n selectedDimensions,\n setSelectedDimensions,\n showLatestOnBlur,\n }: Props) => {\n const themeContext = useContext(ThemeContext)\n const unitsScalingMethod = useSelector(selectUnitsScalingMethod)\n const chartSettings = chartLibrariesSettings[chartLibrary]\n const { hasLegend } = chartSettings\n const {\n units = chartMetadata.units,\n unitsCommon,\n unitsDesired = unitsScalingMethod,\n } = attributes\n\n // we need to have empty selectedDimensions work as {all enabled}, in case\n // new dimensions show up (when all are enabled, the new dimensions should also auto-enable)\n const dimensionsVisibility = useMemo(\n () =>\n chartData.dimension_names.map(dimensionName =>\n selectedDimensions.length === 0 ? true : selectedDimensions.includes(dimensionName)\n ),\n [chartData.dimension_names, selectedDimensions]\n )\n\n const shouldDisplayToolbox =\n hasLegend(attributes) && window.NETDATA.options.current.legend_toolbox\n\n const shouldDisplayResizeHandler =\n shouldDisplayToolbox &&\n window.NETDATA.options.current.resize_charts && // legacy way of turning off for print mode\n !attributes.hideResizeHandler\n\n const dispatch = useDispatch()\n const allDimensionNames = useMemo(() => {\n // metadata and chartData dimensions match each other, but we need to first parse\n // dimensions from metadata, to keep the same order (when browser parsers dimensions object,\n // it sorts them in *some* way which is hard to reproduce). And people can get used to colors\n // so let's keep them as they were before\n const dimensionNamesFromMetadata = Object.values(chartMetadata.dimensions).map(x => x.name)\n const additionalDimensionNamesFromData = chartData.dimension_names.filter(\n x => !dimensionNamesFromMetadata.includes(x)\n )\n return dimensionNamesFromMetadata.concat(additionalDimensionNamesFromData)\n }, [chartData.dimension_names, chartMetadata.dimensions])\n useEffect(() => {\n dispatch(\n requestCommonColorsAction({\n chartContext: chartMetadata.context,\n chartUuid,\n colorsAttribute: attributes.colors,\n commonColorsAttribute: attributes.commonColors,\n dimensionNames: allDimensionNames,\n })\n )\n }, [\n allDimensionNames,\n attributes.colors,\n attributes.commonColors,\n chartMetadata.context,\n chartUuid,\n dispatch,\n ])\n\n const { legendFormatValue, legendFormatValueDecimalsFromMinMax, unitsCurrent } = useFormatters({\n attributes,\n data: chartData,\n units,\n unitsCommon,\n unitsDesired,\n uuid: chartUuid,\n })\n\n const [localHoveredX, setLocalHoveredX] = useState(null)\n\n const isSyncSelection = useSelector(selectSyncSelection)\n const handleSetHoveredX = useCallback(\n (newHoveredX, noMaster) => {\n if (isSyncSelection) {\n const action = noMaster\n ? { chartUuid: null, hoveredX: newHoveredX }\n : { chartUuid, hoveredX: newHoveredX }\n dispatch(setGlobalSelectionAction(action))\n } else {\n setLocalHoveredX(newHoveredX)\n }\n },\n [chartUuid, dispatch, isSyncSelection]\n )\n const globalHoveredX = useSelector(selectGlobalSelection)\n const hoveredX = isSyncSelection ? globalHoveredX : localHoveredX\n\n // time-frames for requested data (even when request is pending)\n const viewAfter = isTimestamp(viewRange[0]) ? viewRange[0] : chartData.after * MS_IN_SECOND\n const viewBefore = isTimestamp(viewRange[1]) ? viewRange[1] : chartData.before * MS_IN_SECOND\n\n const viewAfterForCurrentData = isTimestamp(viewRangeForCurrentData[0])\n ? viewRangeForCurrentData[0]\n : chartData.after * MS_IN_SECOND\n const viewBeforeForCurrentData = isTimestamp(viewRangeForCurrentData[1])\n ? viewRangeForCurrentData[1]\n : chartData.before * MS_IN_SECOND // when 'before' is 0 or negative\n\n const netdataFirst = chartData.first_entry * MS_IN_SECOND\n const netdataLast = chartData.last_entry * MS_IN_SECOND\n\n // old dashboard persists min duration based on first chartWidth, i assume it's a bug\n // and will update fixedMinDuration when width changes\n const fixedMinDuration = useMemo(\n () => Math.round((chartWidth / 30) * chartMetadata.update_every * MS_IN_SECOND),\n [chartMetadata.update_every, chartWidth]\n )\n\n const isSyncPanAndZoom = useSelector(selectSyncPanAndZoom)\n\n const setGlobalPanAndZoomDebounced = useDebouncedCallback(\n newGlobalPanAndZoom => {\n dispatch(setGlobalPanAndZoomAction(newGlobalPanAndZoom))\n },\n 400 // corresponds to global_pan_sync_time in old dashboard\n )\n\n const immediatelyDispatchPanAndZoom = useCallback(() => {\n setGlobalPanAndZoomDebounced.flush()\n }, [setGlobalPanAndZoomDebounced])\n\n /**\n * pan-and-zoom handler (both for toolbox and mouse events)\n */\n const handleUpdateChartPanAndZoom = useCallback(\n ({\n after,\n before,\n callback,\n shouldFlushImmediately = false,\n shouldForceTimeRange,\n shouldNotExceedAvailableRange,\n }) => {\n if (before < after) {\n return\n }\n let minDuration = fixedMinDuration\n\n const currentDuraton = Math.round(viewBefore - viewAfter)\n\n let afterForced = Math.round(after)\n let beforeForced = Math.round(before)\n const viewUpdateEvery = chartData.view_update_every * MS_IN_SECOND\n\n if (shouldNotExceedAvailableRange) {\n const first = netdataFirst + viewUpdateEvery\n const last = netdataLast + viewUpdateEvery\n // first check \"before\"\n if (beforeForced > last) {\n afterForced -= before - last\n beforeForced = last\n }\n\n if (afterForced < first) {\n afterForced = first\n }\n }\n\n // align them to update_every\n // stretching them further away\n afterForced -= afterForced % viewUpdateEvery\n beforeForced += viewUpdateEvery - (beforeForced % viewUpdateEvery)\n\n // the final wanted duration\n let wantedDuration = beforeForced - afterForced\n\n // to allow panning, accept just a point below our minimum\n if (currentDuraton - viewUpdateEvery < minDuration) {\n minDuration = currentDuraton - viewUpdateEvery\n }\n\n // we do it, but we adjust to minimum size and return false\n // when the wanted size is below the current and the minimum\n // and we zoom\n let doCallback = true\n if (wantedDuration < currentDuraton && wantedDuration < minDuration) {\n minDuration = fixedMinDuration\n\n const dt = (minDuration - wantedDuration) / 2\n beforeForced += dt\n afterForced -= dt\n wantedDuration = beforeForced - afterForced\n doCallback = false\n }\n\n const tolerance = viewUpdateEvery * 2\n const movement = Math.abs(beforeForced - viewBefore)\n\n if (\n Math.abs(currentDuraton - wantedDuration) <= tolerance &&\n movement <= tolerance &&\n doCallback\n ) {\n return\n }\n\n if (isSyncPanAndZoom) {\n setGlobalPanAndZoomDebounced.callback({\n after: afterForced,\n before: beforeForced,\n masterID: chartUuid,\n shouldForceTimeRange,\n })\n if (shouldFlushImmediately) {\n setGlobalPanAndZoomDebounced.flush()\n }\n } else {\n dispatch(\n setChartPanAndZoomAction({\n after: afterForced,\n before: beforeForced,\n id: chartUuid,\n shouldForceTimeRange,\n })\n )\n }\n\n if (doCallback && typeof callback === \"function\") {\n callback(afterForced, beforeForced)\n }\n },\n [\n chartData.view_update_every,\n chartUuid,\n dispatch,\n fixedMinDuration,\n isSyncPanAndZoom,\n netdataFirst,\n netdataLast,\n setGlobalPanAndZoomDebounced,\n viewAfter,\n viewBefore,\n ]\n )\n\n /**\n * toolbox handlers\n */\n const handleToolBoxPanAndZoom = useCallback(\n (after: number, before: number) => {\n const newAfter = Math.max(after, netdataFirst)\n const newBefore = Math.min(before, netdataLast)\n handleUpdateChartPanAndZoom({\n after: newAfter,\n before: newBefore,\n shouldForceTimeRange: true,\n shouldFlushImmediately: true,\n })\n },\n [handleUpdateChartPanAndZoom, netdataFirst, netdataLast]\n )\n\n const handleToolboxLeftClick = useCallback(\n (event: React.MouseEvent) => {\n const step = (viewBefore - viewAfter) * getPanAndZoomStep(event)\n const newBefore = viewBefore - step\n const newAfter = viewAfter - step\n if (newAfter >= netdataFirst) {\n handleToolBoxPanAndZoom(newAfter, newBefore)\n }\n },\n [handleToolBoxPanAndZoom, netdataFirst, viewAfter, viewBefore]\n )\n\n const handleToolboxRightClick = useCallback(\n (event: React.MouseEvent) => {\n const timeWindow = viewBefore - viewAfter\n const step = timeWindow * getPanAndZoomStep(event)\n const newBefore = Math.min(viewBefore + step, netdataLast)\n const newAfter = newBefore - timeWindow\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [handleToolBoxPanAndZoom, netdataLast, viewAfter, viewBefore]\n )\n\n const handleToolboxZoomInClick = useCallback(\n (event: React.MouseEvent) => {\n const panAndZoomStep = getPanAndZoomStep(event) * 0.8\n if (!globalPanAndZoom) {\n dispatch(\n setDefaultAfterAction({\n after: Math.round(defaultAfter / (panAndZoomStep + 1)),\n })\n )\n return\n }\n // if visible time range is much bigger than available time range in history, first zoom-in\n // should just fit to available range\n if (viewBefore - viewAfter > (netdataLast - netdataFirst) * 1.2) {\n handleToolBoxPanAndZoom(netdataFirst, netdataLast)\n return\n }\n const dt = ((viewBefore - viewAfter) * panAndZoomStep) / 2\n const newAfter = viewAfter + dt\n const newBefore = viewBefore - dt\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [\n defaultAfter,\n dispatch,\n globalPanAndZoom,\n handleToolBoxPanAndZoom,\n netdataFirst,\n netdataLast,\n viewAfter,\n viewBefore,\n ]\n )\n\n const handleToolboxZoomOutClick = useCallback(\n (event: React.MouseEvent) => {\n const panAndZoomStep = getPanAndZoomStep(event) * 0.8\n if (!globalPanAndZoom) {\n dispatch(\n setDefaultAfterAction({\n after: Math.round(defaultAfter * (panAndZoomStep + 1)),\n })\n )\n return\n }\n const dt =\n ((viewBefore - viewAfter) / (1.0 - panAndZoomStep * 0.8) - (viewBefore - viewAfter)) / 2\n const newAfter = viewAfter - dt\n const newBefore = viewBefore + dt\n handleToolBoxPanAndZoom(newAfter, newBefore)\n },\n [defaultAfter, dispatch, globalPanAndZoom, handleToolBoxPanAndZoom, viewAfter, viewBefore]\n )\n\n /**\n * assign colors\n */\n const selectAssignedColors = useMemo(\n () =>\n createSelectAssignedColors({\n chartContext: chartMetadata.context,\n chartUuid,\n colorsAttribute: attributes.colors,\n commonColorsAttribute: attributes.commonColors,\n }),\n [attributes.colors, attributes.commonColors, chartMetadata, chartUuid]\n )\n const colors = useSelector(selectAssignedColors)\n const orderedColors = useMemo(\n () => chartData.dimension_names.map(prop(__, colors)),\n [chartData, colors]\n )\n\n if (!colors) {\n return // wait for createSelectAssignedColors reducer result to come back\n }\n\n const isTimeVisible = hoveredX && hoveredX >= viewAfter && hoveredX <= viewBefore\n const viewUpdateEvery = chartData.view_update_every * MS_IN_SECOND\n const hoveredRow = isTimeVisible\n ? Math.floor(((hoveredX as number) - chartData.after * MS_IN_SECOND) / viewUpdateEvery)\n : -1\n\n const isLegendOnBottom = attributes.legendPosition === \"bottom\"\n\n const legendToolbox = (\n \n )\n\n const resizeHandler = shouldDisplayResizeHandler && (\n \n )\n\n return (\n <>\n legendFormatValueDecimalsFromMinMax(min, max)}\n showLatestOnBlur={showLatestOnBlur}\n unitsCurrent={unitsCurrent}\n viewAfterForCurrentData={viewAfterForCurrentData}\n viewBeforeForCurrentData={viewBeforeForCurrentData}\n />\n {hasLegend(attributes) && (\n \n )}\n {shouldDisplayToolbox && !isLegendOnBottom && legendToolbox}\n {!isLegendOnBottom && resizeHandler}\n \n )\n }\n)\n","import styled from \"styled-components\"\nimport { getColor, Icon } from \"@netdata/netdata-ui\"\n\nexport const DropdownItem = styled.div`\n display: flex;\n flex-direction: start;\n align-items: center;\n color: ${getColor([\"neutral\", \"limedSpruce\"])};\n white-space: nowrap;\n & > svg use {\n fill: ${getColor([\"neutral\", \"limedSpruce\"])};\n }\n`\n\nexport const DropdownItemLabel = styled.span`\n margin-left: 12px;\n`\n\nexport const DotsBtn = styled(Icon)`\n width: 6px;\n height: 10px;\n cursor: pointer;\n & use {\n fill: ${getColor([\"neutral\", \"limedSpruce\"])};\n & :hover {\n fill: ${getColor([\"neutral\", \"regentgrey\"])};\n }\n }\n`\n","import React, { useState, ReactNode } from \"react\"\n\nimport { Attributes } from \"domains/chart/utils/transformDataAttributes\"\nimport { ChartMetadata } from \"domains/chart/chart-types\"\n\nimport { List, SimpleListItem } from \"@rmwc/list\"\nimport { MenuSurface, MenuSurfaceAnchor } from \"@rmwc/menu\"\n\nimport * as S from \"./styled\"\n\ninterface DropdownMenuCallbackProps {\n attributes: Attributes,\n chartMetadata: ChartMetadata,\n chartID: string,\n}\n\nexport type DropdownMenu = {\n icon: ReactNode,\n label: string,\n onClick: (dropdownMenuCallbackProps: DropdownMenuCallbackProps) => void,\n}[]\n\ninterface Props {\n attributes: Attributes\n chartID: string\n chartMetadata: ChartMetadata\n dropdownMenu: DropdownMenu\n}\n\nexport const ChartDropdown = ({\n attributes,\n chartID,\n chartMetadata,\n dropdownMenu,\n}: Props) => {\n const [isOpen, setIsOpen] = useState(false)\n\n const handleClose = () => {\n setIsOpen(false)\n }\n\n return (\n <>\n {\n setIsOpen(true)\n }}\n />\n \n \n \n {dropdownMenu.map(({ icon, label, onClick }) => (\n \n {icon}\n \n {label}\n \n \n )}\n onClick={() => {\n onClick({ attributes, chartMetadata, chartID })\n handleClose()\n }}\n />\n ))}\n \n \n \n \n )\n}\n","import { prop } from \"ramda\"\nimport styled, { keyframes } from \"styled-components\"\n\nimport { getColor } from \"@netdata/netdata-ui\"\n\nconst circleAnimation = keyframes`\n 0% {\n opacity: .1;\n }\n 50% {\n opacity: .5;\n }\n 100% {\n opacity: .1;\n }\n`\n\n\nexport const SpinnerContainer = styled.div<{ top: number, right: number }>`\n position: absolute;\n top: ${prop(\"top\")}px;\n right: ${prop(\"right\")}px;\n display: flex;\n`\n\nexport const Circle = styled.div<{ size: number }>`\n width: ${prop(\"size\")}px;\n height: ${prop(\"size\")}px;\n background: ${getColor(\"border\")};\n border-radius: 50%;\n animation: 1s linear infinite both ${circleAnimation};\n`\n\nexport const Circle2 = styled(Circle)<{ spaceBetween: number }>`\n animation-delay: .3s; \n margin-left: ${prop(\"spaceBetween\")}px;\n`\n\nexport const Circle3 = styled(Circle)<{ spaceBetween: number }>`\n animation-delay: .6s; \n margin-left: ${prop(\"spaceBetween\")}px;\n`\n","import React from \"react\"\n\nimport * as S from \"./styled\"\n\ninterface Props {\n chartLibrary: string\n}\nexport const ChartSpinner = ({\n chartLibrary,\n}: Props) => {\n const top = chartLibrary === \"dygraph\" ? 33 : 0\n const right = chartLibrary === \"dygraph\" ? 8 : 0\n const size = chartLibrary === \"dygraph\" ? 10 : 7\n const spaceBetween = chartLibrary === \"dygraph\" ? 4 : 2\n return (\n \n \n \n \n \n )\n}\n","import styled from \"styled-components\"\n\nimport { chartDropdownZIndex } from \"styles/z-index\"\n\nexport const ChartDropdownContainer = styled.div`\n position: absolute;\n top: 0;\n left: 40px;\n width: 20px;\n height: 20px;\n z-index: ${chartDropdownZIndex};\n`\n","export const notificationsZIndex = \"z-index: 50;\"\n\nexport const chartDropdownZIndex = 10\n\nexport const spacesBarZIndex = 8\n\nexport const spacePanelZIndex = 6\n\nexport const appHeaderZIndex = 5\n\n// the same as in cloud\nexport const portalSidebarZIndex = \"z-index: 35;\"\nexport const customDropdownZIndex = \"z-index: 45;\"\nexport const dialogsZIndex = 60\n","import { cond, always, T } from \"ramda\"\nimport axios from \"axios\"\nimport React, { useEffect, useState, useMemo, useLayoutEffect } from \"react\"\nimport { useThrottle, useUpdateEffect, useUnmount, useDebounce } from \"react-use\"\n\nimport { AppStateT } from \"store/app-state\"\nimport { useSelector, useDispatch } from \"store/redux-separate-context\"\n\nimport {\n selectGlobalPanAndZoom,\n selectGlobalSelection,\n selectShouldEliminateZeroDimensions,\n selectPanAndZoomDataPadding,\n selectSnapshot,\n selectSpacePanelTransitionEndIsActive,\n selectDefaultAfter,\n} from \"domains/global/selectors\"\nimport { serverDefault } from \"utils/server-detection\"\nimport { CHART_UNMOUNTED } from \"utils/netdata-sdk\"\nimport { getCorrectedPoints } from \"utils/fill-missing-data\"\n\nimport { fallbackUpdateTimeInterval, panAndZoomDelay } from \"../../constants\"\nimport { getChartURLOptions } from \"../../utils/get-chart-url-options\"\nimport { chartLibrariesSettings } from \"../../utils/chartLibrariesSettings\"\nimport { Attributes } from \"../../utils/transformDataAttributes\"\nimport { getChartPixelsPerPoint } from \"../../utils/get-chart-pixels-per-point\"\nimport { useFetchNewDataClock } from \"../../hooks/use-fetch-new-data-clock\"\n\nimport { fetchChartAction, fetchDataAction } from \"../../actions\"\nimport {\n selectChartData,\n selectChartFetchDataParams,\n makeSelectChartMetadataRequest,\n selectChartPanAndZoom,\n selectChartIsFetchingData,\n selectChartViewRange,\n} from \"../../selectors\"\nimport {\n ChartData,\n ChartMetadata,\n D3pieChartData,\n DygraphData,\n EasyPieChartData,\n} from \"../../chart-types\"\n\nimport { Loader } from \"../loader\"\nimport { Chart } from \"../chart\"\nimport { ChartDropdown, DropdownMenu } from \"../chart-dropdown\"\nimport { ChartSpinner } from \"../chart-spinner/chart-spinner\"\n\nimport * as S from \"./styled\"\nimport \"./chart-with-loader.css\"\n\nexport type RenderCustomElementForDygraph = (selectedChartConfiguration: {\n attributes: Attributes\n onAttributesChange: any\n chartMetadata: ChartMetadata\n chartID: string\n chartData: ChartData | null\n}) => JSX.Element\n\nconst dimensionsAggrMethodMap = {\n \"sum-of-abs\": \"sum\",\n}\n\nconst emptyArray = [] as any\n\nexport type Props = {\n attributes: Attributes\n chartUuid: string\n uuid?: string\n dropdownMenu?: DropdownMenu\n externalChartMetadata?: ChartMetadata\n portalNode: HTMLElement\n renderCustomElementForDygraph?: RenderCustomElementForDygraph\n onAttributesChange?: any\n}\n\nexport const ChartWithLoader = ({\n attributes,\n chartUuid,\n uuid,\n dropdownMenu,\n externalChartMetadata,\n portalNode,\n renderCustomElementForDygraph,\n onAttributesChange,\n}: Props) => {\n /**\n * fetch chart details\n */\n const { host = serverDefault, id, nodeIDs } = attributes\n const dispatch = useDispatch()\n const selectChartMetadataRequest = useMemo(makeSelectChartMetadataRequest, [])\n const { chartMetadata, isFetchingDetails } = useSelector((state: AppStateT) =>\n selectChartMetadataRequest(state, { chartId: id, id: chartUuid })\n )\n const actualChartMetadata = externalChartMetadata || chartMetadata\n useEffect(() => {\n if (!chartMetadata && !isFetchingDetails && !externalChartMetadata) {\n dispatch(\n fetchChartAction.request({\n chart: id,\n id: chartUuid,\n host,\n nodeIDs,\n })\n )\n }\n }, [\n id,\n chartUuid,\n dispatch,\n host,\n isFetchingDetails,\n chartMetadata,\n externalChartMetadata,\n nodeIDs,\n uuid,\n ])\n\n // todo local state option\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const chartPanAndZoom = useSelector((state: AppStateT) =>\n selectChartPanAndZoom(state, { id: chartUuid })\n )\n const panAndZoom = chartPanAndZoom || globalPanAndZoom\n\n const isPanAndZoomMaster =\n (!!globalPanAndZoom && globalPanAndZoom.masterID === chartUuid) || Boolean(chartPanAndZoom)\n const shouldForceTimeRange = panAndZoom?.shouldForceTimeRange || false\n\n // (isRemotelyControlled === false) only during globalPanAndZoom, when chart is panAndZoomMaster\n // and when no toolbox is used at that time\n const isRemotelyControlled = !panAndZoom || !isPanAndZoomMaster || shouldForceTimeRange // used when zooming/shifting in toolbox\n\n const fetchDataParams = useSelector((state: AppStateT) =>\n selectChartFetchDataParams(state, { id: chartUuid })\n )\n const viewRange = useSelector((state: AppStateT) =>\n selectChartViewRange(state, { id: chartUuid })\n )\n const chartData = useSelector((state: AppStateT) => selectChartData(state, { id: chartUuid }))\n const isFetchingData = useSelector((state: AppStateT) =>\n selectChartIsFetchingData(state, { id: chartUuid })\n )\n\n const hoveredX = useSelector(selectGlobalSelection)\n\n // periodical update of newest data\n // default to 2000ms. When chartMetadata has been fetched, use chartMetadata.update_every\n // if chartData has been fetched, use chartData.view_update_every instead\n // todo add support to \"data-update-every\" attribute\n const viewUpdateEvery = cond([\n [always(!!chartData), () => (chartData as ChartData).view_update_every * 1000],\n [\n always(!!actualChartMetadata),\n () => (actualChartMetadata as ChartMetadata).update_every * 1000,\n ],\n [T, always(fallbackUpdateTimeInterval)],\n ])()\n const [shouldFetch, setShouldFetch] = useFetchNewDataClock({\n areCriteriaMet: !panAndZoom && !hoveredX,\n preferedIntervalTime: viewUpdateEvery,\n })\n\n const panAndZoomThrottled = useThrottle(panAndZoom, panAndZoomDelay)\n useEffect(() => {\n setShouldFetch(true)\n }, [panAndZoomThrottled, setShouldFetch])\n\n const defaultAfter = useSelector(selectDefaultAfter)\n // when after/before changes, don't wait for next interval, just fetch immediately\n useUpdateEffect(() => {\n setShouldFetch(true)\n }, [\n attributes.after,\n attributes.before,\n defaultAfter,\n attributes.dimensions,\n attributes.aggrMethod,\n attributes.groupBy,\n ])\n\n const { before: initialBefore = window.NETDATA.chartDefaults.before } = attributes\n\n // attributes.after should be now used only for old custom dashboard\n // and in the future for setting timeframe per-chart\n const liveModeAfter = attributes.after || defaultAfter\n\n const chartSettings = chartLibrariesSettings[attributes.chartLibrary]\n const { hasLegend } = chartSettings\n\n // todo optimize by using resizeObserver (optionally)\n const boundingClientRect = portalNode.getBoundingClientRect()\n\n // from old dashboard\n const chartWidth = boundingClientRect.width - (hasLegend(attributes) ? 140 : 0)\n const chartHeight = boundingClientRect.height\n\n const isShowingSnapshot = Boolean(useSelector(selectSnapshot))\n const shouldEliminateZeroDimensions =\n useSelector(selectShouldEliminateZeroDimensions) || isShowingSnapshot\n const shouldUsePanAndZoomPadding = useSelector(selectPanAndZoomDataPadding)\n\n const { CancelToken } = axios\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const cancelTokenSource = useMemo(() => CancelToken.source(), [])\n useUnmount(() => {\n cancelTokenSource.cancel(CHART_UNMOUNTED)\n })\n\n /**\n * spinner state\n * show spinner when it's fetching for more than 2 seconds\n * hide spinner immediately when it's not fetching\n */\n const [shouldShowSpinnerDebounced, setShouldShowSpinnerDebounced] = useState(false)\n const shouldShowSpinner = shouldShowSpinnerDebounced && isFetchingData\n useDebounce(\n () => {\n if (isFetchingData) {\n setShouldShowSpinnerDebounced(true)\n }\n },\n 2000,\n [isFetchingData]\n )\n useEffect(() => {\n if (!isFetchingData && shouldShowSpinnerDebounced) {\n setShouldShowSpinnerDebounced(false)\n }\n }, [isFetchingData, shouldShowSpinnerDebounced])\n\n /**\n * fetch data\n */\n useEffect(() => {\n if (shouldFetch && actualChartMetadata && !isFetchingData) {\n // todo can be overridden by main.js\n const forceDataPoints = window.NETDATA.options.force_data_points\n\n let after\n let before\n let newViewRange\n let pointsMultiplier = 1\n\n if (panAndZoom) {\n if (isPanAndZoomMaster) {\n after = Math.round(panAndZoom.after / 1000)\n before = Math.round(panAndZoom.before / 1000)\n\n newViewRange = [after, before]\n\n if (shouldUsePanAndZoomPadding) {\n const requestedPadding = Math.round((before - after) / 2)\n after -= requestedPadding\n before += requestedPadding\n pointsMultiplier = 2\n }\n } else {\n after = Math.round(panAndZoom.after / 1000)\n before = Math.round(panAndZoom.before / 1000)\n pointsMultiplier = 1\n }\n } else {\n // no panAndZoom\n before = initialBefore\n after = liveModeAfter\n pointsMultiplier = 1\n }\n\n newViewRange = (newViewRange || [after, before]).map(x => x * 1000) as [number, number]\n\n const dataPoints =\n attributes.points ||\n Math.round(chartWidth / getChartPixelsPerPoint({ attributes, chartSettings }))\n const points = forceDataPoints || dataPoints * pointsMultiplier\n\n const shouldForceTimeWindow = attributes.forceTimeWindow || Boolean(defaultAfter)\n // if we want to add fake points, we need first need to request less\n // to have the desired frequency\n // this will be removed when Agents will support forcing time-window between points\n const correctedPoints = shouldForceTimeWindow\n ? getCorrectedPoints({\n after,\n before,\n firstEntry: actualChartMetadata.first_entry,\n points,\n })\n : null\n\n const group = attributes.method || window.NETDATA.chartDefaults.method\n setShouldFetch(false)\n dispatch(\n fetchDataAction.request({\n // properties to be passed to API\n host,\n context: actualChartMetadata.context,\n chart: actualChartMetadata.id,\n format: chartSettings.format,\n points: correctedPoints || points,\n group,\n gtime: attributes.gtime || 0,\n options: getChartURLOptions(attributes, shouldEliminateZeroDimensions),\n after: after || null,\n before: before || null,\n dimensions: attributes.dimensions,\n labels: attributes.labels,\n postGroupBy: attributes.postGroupBy,\n postAggregationMethod: attributes.postAggregationMethod,\n aggrMethod: attributes.aggrMethod,\n aggrGroups: attributes.aggrGroups,\n // @ts-ignore\n dimensionsAggrMethod:\n dimensionsAggrMethodMap[attributes.dimensionsAggrMethod] ||\n attributes.dimensionsAggrMethod,\n nodeIDs,\n httpMethod: attributes.httpMethod,\n groupBy: attributes.groupBy,\n\n // properties for the reducer\n fetchDataParams: {\n // we store it here so it is only available when data is fetched\n // those params should be synced with data\n fillMissingPoints: correctedPoints ? points - correctedPoints : undefined,\n isRemotelyControlled,\n viewRange: newViewRange,\n },\n id: chartUuid,\n cancelTokenSource,\n })\n )\n }\n }, [\n attributes,\n actualChartMetadata,\n chartSettings,\n chartUuid,\n chartWidth,\n defaultAfter,\n dispatch,\n hasLegend,\n host,\n initialBefore,\n isFetchingData,\n isPanAndZoomMaster,\n isRemotelyControlled,\n liveModeAfter,\n panAndZoom,\n portalNode,\n setShouldFetch,\n shouldEliminateZeroDimensions,\n shouldUsePanAndZoomPadding,\n shouldFetch,\n cancelTokenSource,\n nodeIDs,\n uuid,\n ])\n\n useSelector(selectSpacePanelTransitionEndIsActive)\n\n const externalSelectedDimensions = attributes?.selectedDimensions\n const [selectedDimensions, setSelectedDimensions] = useState(\n externalSelectedDimensions || emptyArray\n )\n\n useLayoutEffect(() => {\n if (externalSelectedDimensions) {\n setSelectedDimensions(externalSelectedDimensions)\n }\n }, [externalSelectedDimensions])\n\n useLayoutEffect(() => {\n setSelectedDimensions(externalSelectedDimensions || emptyArray)\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [attributes?.groupBy])\n\n const customElementForDygraph = useMemo(\n () =>\n renderCustomElementForDygraph &&\n renderCustomElementForDygraph({\n onAttributesChange,\n attributes,\n chartMetadata: actualChartMetadata as ChartMetadata,\n chartData,\n chartID: id,\n }),\n [\n onAttributesChange,\n renderCustomElementForDygraph,\n attributes,\n id,\n actualChartMetadata,\n chartData,\n ]\n )\n\n // eslint-disable-next-line max-len\n const hasEmptyData =\n (chartData as DygraphData | D3pieChartData | null)?.result?.data?.length === 0 ||\n (chartData as EasyPieChartData | null)?.result?.length === 0\n\n if (!chartData || !actualChartMetadata) {\n return (\n <>\n \n {shouldShowSpinner && }\n \n )\n }\n\n return (\n <>\n {hasEmptyData && (\n \n )}\n \n {shouldShowSpinner && }\n {dropdownMenu && dropdownMenu.length > 0 && (\n \n \n \n )}\n {customElementForDygraph}\n \n )\n}\n","import { useEffect, useState } from \"react\"\nimport { useInterval } from \"react-use\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport {\n selectHasWindowFocus,\n selectStopUpdatesWhenFocusIsLost,\n selectGlobalPause,\n} from \"domains/global/selectors\"\nimport { BIGGEST_INTERVAL_NUMBER } from \"utils/biggest-interval-number\"\nimport { isPrintMode } from \"domains/dashboard/utils/parse-url\"\n\ntype UseFetchNewDataClock = (arg: {\n areCriteriaMet: boolean\n preferedIntervalTime: number\n}) => [boolean, (shouldFetch: boolean) => void]\nexport const useFetchNewDataClock: UseFetchNewDataClock = ({\n areCriteriaMet,\n preferedIntervalTime,\n}) => {\n const hasWindowFocus = useSelector(selectHasWindowFocus)\n const stopUpdatesWhenFocusIsLost = useSelector(selectStopUpdatesWhenFocusIsLost)\n const globalPause = useSelector(selectGlobalPause)\n\n const shouldBeUpdating = !(!hasWindowFocus && stopUpdatesWhenFocusIsLost) && !globalPause\n\n const [shouldFetch, setShouldFetch] = useState(true)\n const [shouldFetchImmediatelyAfterFocus, setShouldFetchImmediatelyAfterFocus] = useState(false)\n\n useEffect(() => {\n if (shouldFetchImmediatelyAfterFocus && shouldBeUpdating) {\n setShouldFetchImmediatelyAfterFocus(false)\n setShouldFetch(true)\n }\n }, [shouldFetchImmediatelyAfterFocus, setShouldFetchImmediatelyAfterFocus, shouldBeUpdating])\n\n // don't use setInterval when we loose focus\n const intervalTime =\n (shouldBeUpdating || !shouldFetchImmediatelyAfterFocus) && !isPrintMode\n ? preferedIntervalTime\n : BIGGEST_INTERVAL_NUMBER\n useInterval(() => {\n if (areCriteriaMet) {\n if (!shouldBeUpdating) {\n setShouldFetchImmediatelyAfterFocus(true)\n return\n }\n setShouldFetch(true)\n }\n // when there's no focus, don't ask for updated data\n }, intervalTime)\n return [shouldFetch, setShouldFetch]\n}\n","import { Attributes } from \"./transformDataAttributes\"\nimport { ChartLibraryConfig } from \"./chartLibrariesSettings\"\n\ntype GetChartPixelsPerPoint = (arg: {\n attributes: Attributes,\n chartSettings: ChartLibraryConfig,\n}) => number\n\nexport const getChartPixelsPerPoint: GetChartPixelsPerPoint = ({\n attributes, chartSettings,\n}) => {\n const {\n pixelsPerPoint: pixelsPerPointAttribute,\n } = attributes\n if (typeof pixelsPerPointAttribute === \"number\") {\n return pixelsPerPointAttribute\n }\n const pixelsPerPointSetting = chartSettings.pixelsPerPoint(attributes)\n\n return Math.max(...[\n pixelsPerPointSetting,\n window.NETDATA.options.current.pixels_per_point,\n ].filter((px) => typeof px === \"number\"))\n}\n","import { prop, pick } from \"ramda\"\nimport { createSelector } from \"reselect\"\n\nimport { AppStateT } from \"store/app-state\"\n\nimport { storeKey } from \"./constants\"\n\nconst selectDashboardDomain = (state: AppStateT) => state[storeKey]\n\nexport const selectIsSnapshotMode = createSelector(\n selectDashboardDomain,\n prop(\"isSnapshotMode\"),\n)\n\nexport const selectSnapshotOptions = createSelector(\n selectDashboardDomain,\n pick([\"snapshotCharts\", \"snapshotDataPoints\"]),\n)\n","import React, { useEffect } from \"react\"\n\nimport { MS_IN_SECOND } from \"utils/utils\"\nimport { serverDefault } from \"utils/server-detection\"\nimport { selectIsSnapshotMode, selectSnapshotOptions } from \"domains/dashboard/selectors\"\nimport { selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { TimeRangeObjT } from \"types/common\"\n\nimport { Attributes } from \"../utils/transformDataAttributes\"\nimport { fetchDataForSnapshotAction } from \"../actions\"\nimport { chartLibrariesSettings } from \"../utils/chartLibrariesSettings\"\nimport { getChartURLOptions } from \"../utils/get-chart-url-options\"\n\ninterface SnapshotLoaderProps {\n attributes: Attributes\n chartUuid: string\n}\nconst SnapshotLoader = ({\n attributes,\n chartUuid,\n}: SnapshotLoaderProps) => {\n const host = attributes.host || serverDefault\n const { snapshotDataPoints } = useSelector(selectSnapshotOptions)\n const group = attributes.method || window.NETDATA.chartDefaults.method\n const { chartLibrary } = attributes\n const chartSettings = chartLibrariesSettings[chartLibrary]\n\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const after = (globalPanAndZoom as TimeRangeObjT).after / MS_IN_SECOND\n const before = (globalPanAndZoom as TimeRangeObjT).before / MS_IN_SECOND\n\n const dispatch = useDispatch()\n useEffect(() => {\n dispatch(fetchDataForSnapshotAction.request({\n // properties to be passed to API\n host,\n context: attributes.id,\n chart: attributes.id,\n format: chartSettings.format,\n points: snapshotDataPoints as number,\n group,\n gtime: attributes.gtime || 0,\n // for snapshots, always eliminate zero dimensions\n options: getChartURLOptions(attributes, true),\n after: after || null,\n before: before || null,\n dimensions: attributes.dimensions,\n aggrMethod: attributes.aggrMethod,\n nodeIDs: attributes.nodeIDs,\n chartLibrary,\n id: chartUuid,\n groupBy: attributes.groupBy,\n }))\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }) // todo fetch based on state\n return null\n}\n\n\ninterface SnapshotLoaderContainerProps {\n attributes: Attributes\n chartUuid: string\n}\nexport const SnapshotLoaderContainer = ({\n attributes,\n chartUuid,\n}: SnapshotLoaderContainerProps) => {\n const isSnapshotMode = useSelector(selectIsSnapshotMode)\n if (!isSnapshotMode) {\n return null\n }\n return \n}\n","import React, { memo } from \"react\"\nimport { createPortal } from \"react-dom\"\n\nimport { getAttributes } from \"../utils/transformDataAttributes\"\nimport { ChartWithLoader } from \"./chart-with-loader\"\nimport { DisableOutOfView } from \"./disable-out-of-view\"\nimport { SnapshotLoaderContainer } from \"./snapshot-loader\"\n\nconst getNodesArray = () => Array.from(document.querySelectorAll(\"[data-netdata]\"))\n\nexport const Portals = memo(() => {\n const nodes = getNodesArray()\n return (\n <>\n {nodes.map((node, index) => {\n const attributesMapped = getAttributes(node)\n const chartId = `${attributesMapped.id}-${index}`\n return (\n createPortal(\n <>\n \n \n \n \n ,\n node,\n )\n )\n })}\n \n )\n})\n","import { useEffect, useState } from \"react\"\n\nimport { axiosInstance } from \"utils/api\"\n\nexport const useHttp = (\n url: string | undefined,\n shouldMakeCall : boolean = true,\n isExternal?: boolean,\n) => {\n const [isFetching, setIsFetching] = useState(false)\n const [isError, setIsError] = useState(false)\n const [data, setData] = useState(null)\n useEffect(() => {\n if (shouldMakeCall && url) {\n const options = isExternal\n ? { headers: null, withCredentials: false }\n : {}\n\n setIsFetching(true)\n axiosInstance.get(url, options)\n .then((r) => {\n if (r.data) {\n setData(r.data)\n setIsError(false)\n setIsFetching(false)\n }\n })\n .catch((error) => {\n // eslint-disable-next-line no-console\n console.warn(`error fetching ${url}`, error)\n setIsError(true)\n setIsFetching(false)\n })\n }\n }, [isExternal, shouldMakeCall, url])\n // force triple instead of array\n return [data, isFetching, isError] as [T | null, boolean, boolean]\n}\n","import { ReactNode, useEffect, useRef } from \"react\"\nimport { createPortal } from \"react-dom\"\n\nconst modalRoot = document.getElementById(\"modal-root\") as HTMLElement\n\ntype Props = {\n children: ReactNode\n}\nexport const ModalPortal = ({ children }: Props) => {\n const element = useRef(document.createElement(\"div\"))\n useEffect(() => {\n modalRoot.appendChild(element.current)\n return () => {\n // eslint-disable-next-line react-hooks/exhaustive-deps\n modalRoot.removeChild(element.current)\n }\n }, [])\n\n return createPortal(children, element.current)\n}\n","import React, { useRef, useEffect } from \"react\"\nimport classNames from \"classnames\"\n\nimport { useSelector } from \"store/redux-separate-context\"\nimport { ModalPortal } from \"domains/dashboard/components/modal-portal\"\nimport {\n selectAmountOfCharts, selectAmountOfFetchedCharts, selectNameOfAnyFetchingChart,\n} from \"domains/chart/selectors\"\n\nimport \"./print-modal.scss\"\n\nconst TIMEOUT_DURATION_TO_MAKE_SURE_ALL_CHARTS_HAVE_BEEN_RENDERED = 1000\n\nexport const PrintModal = () => {\n const printModalElement = useRef(null)\n const isFetchingMetrics = true\n\n useEffect(() => {\n // todo replace bootstrap with newer solution (custom or react-compatible library)\n if (printModalElement.current) {\n const $element = window.$(printModalElement.current)\n $element.modal(\"show\")\n }\n }) // render just once\n\n const amountOfCharts = useSelector(selectAmountOfCharts)\n const amountOfFetchedCharts = useSelector(selectAmountOfFetchedCharts)\n const nameOfAnyFetchingChart = useSelector(selectNameOfAnyFetchingChart)\n\n const percentage = amountOfCharts === 0\n ? 0\n : (amountOfFetchedCharts / amountOfCharts) * 100\n\n useEffect(() => {\n if (percentage === 100) {\n setTimeout(() => {\n // in case browser will not be able to close the window\n window.$(printModalElement.current).modal(\"hide\")\n window.print()\n window.close()\n }, TIMEOUT_DURATION_TO_MAKE_SURE_ALL_CHARTS_HAVE_BEEN_RENDERED)\n }\n }, [percentage])\n\n\n const progressBarText = nameOfAnyFetchingChart\n && `${Math.round(percentage)}%, ${nameOfAnyFetchingChart}`\n\n\n return (\n \n \n
    \n
    \n
    \n \n ×\n \n

    \n Preparing dashboard for printing...\n

    \n
    \n
    \n Please wait while we initialize and render all the charts on the dashboard.\n \n \n \n {progressBarText}\n \n
    \n
    \n The print dialog will appear as soon as we finish rendering the page.\n
    \n
    \n
    \n \n \n
    \n )\n}\n","import styled from \"styled-components\"\nimport { getSizeBy, getColor } from \"@netdata/netdata-ui\"\n\nexport const SocialMediaContainer = styled.div`\n width: 185px;\n padding: ${getSizeBy(2)};\n background: ${getColor(\"borderSecondary\")};\n\n font-size: 12px;\n margin-bottom: ${getSizeBy(3)};\n`\n\nexport const FirstRow = styled.div`\n display: flex;\n justify-content: space-between;\n`\n\nexport const GithubCopy = styled.div`\n\n`\n\nexport const GithubCopyLine = styled.div`\n\n`\n\nexport const SocialMediaLink = styled.a`\n &, &:hover {\n color: ${getColor(\"main\")};\n }\n`\n\nexport const GithubStarQuestion = styled(SocialMediaLink)``\n\nexport const GithubIcon = styled(SocialMediaLink)`\n font-size: 24px;\n`\n\nexport const TwitterIcon = styled(SocialMediaLink)`\n font-size: 17px;\n`\n\nexport const FacebookIcon = styled(SocialMediaLink)`\n font-size: 23px;\n`\n\nexport const Separator = styled.div`\n margin-top: ${getSizeBy(2)};\n border-top: 1px solid ${getColor(\"separator\")};\n\n`\nexport const SecondRow = styled.div`\n margin-top: ${getSizeBy(2)};\n display: flex;\n align-items: center;\n justify-content: space-between;\n`\n\nexport const SecondRowText = styled.span`\n font-size: 10px;\n`\n","import React from \"react\"\n\nimport * as S from \"./styled\"\n\nexport const SidebarSocialMedia = () => (\n \n \n \n \n Do you like Netdata?\n \n \n Give us a star!\n \n \n \n \n \n \n \n \n \n And share the word!\n \n \n \n \n \n \n \n \n \n)\n","import React, { useRef } from \"react\"\nimport { createPortal } from \"react-dom\"\n\ninterface Props {\n children: React.ReactNode\n}\nexport const SidebarSocialMediaPortal = ({\n children,\n}: Props) => {\n const element = useRef(document.querySelector(\"#sidebar-end-portal-container\"))\n return createPortal(children, element.current!)\n}\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { ToastContainer, ToastContainerProps } from \"react-toastify\"\nimport \"react-toastify/dist/ReactToastify.min.css\"\n\nimport { getColor } from \"@netdata/netdata-ui\"\n\nimport { notificationsZIndex } from \"styles/z-index\"\n\nconst WrappedToastContainer = ({\n className,\n ...rest\n}: ToastContainerProps & { className?: string }) => (\n
    \n {/* eslint-disable-next-line react/jsx-props-no-spreading */}\n \n
    \n)\n\nexport const NotificationsContainer = styled(WrappedToastContainer)`\n .Toastify__toast-container {\n position: fixed;\n width: unset;\n min-width: 400px;\n max-width: 500px;\n ${notificationsZIndex};\n color: ${getColor([\"neutral\", \"limedSpruce\"])};\n }\n .Toastify__toast {\n padding: 0;\n padding-top: 5px;\n }\n .Toastify__toast--error {\n background: ${getColor([\"red\", \"lavender\"])};\n border: 1px solid ${getColor(\"error\")};\n }\n .Toastify__toast--warning {\n }\n .Toastify__toast--success {\n background: ${getColor([\"green\", \"frostee\"])};\n border: 1px solid ${getColor(\"success\")};\n }\n .Toastify__toast-body {\n }\n .Toastify__progress-bar {\n bottom: unset;\n top: 0;\n }\n .Toastify__progress-bar--success {\n background-color: ${getColor(\"success\")};\n }\n .Toastify__progress-bar--error {\n background-color: ${getColor(\"error\")};\n }\n`\n","import React from \"react\"\nimport { Icon, Flex } from \"@netdata/netdata-ui\"\n\nconst Item = ({ icon, children, hasBorder }) => (\n \n {!!icon && }\n {children}\n \n)\n\nexport default Item\n","import React from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport Item from \"../item\"\nimport { useSelector } from \"@/src/store/redux-separate-context\"\n\nconst hostNameSelector = state => {\n const snapshot = state.global.snapshot\n const data = state.global.chartsMetadata.data\n\n if (!snapshot && !data) return \"\"\n return snapshot ? snapshot.hostname : data.hostname\n}\n\nconst Node = () => {\n const hostname = useSelector(hostNameSelector)\n\n return (\n \n \n {hostname}\n \n \n )\n}\n\nexport default Node\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst tooltipBackground = [\"neutral\", \"black\"]\n\nconst CustomTooltip = ({ children, isBasic }) => (\n \n {children}\n \n)\n\nexport default CustomTooltip\n","import React from \"react\"\nimport CustomTooltip from \"@/src/components/tooltips/customTooltip\"\n\nconst getContent = (content, { isBasic }) => {\n const contentNode = typeof content === \"function\" ? content() : content\n if (typeof content === \"string\" || isBasic) {\n return {contentNode}\n }\n return contentNode\n}\n\nexport default getContent\n","import React, { useCallback } from \"react\"\nimport { Tooltip as BaseTooltip } from \"@netdata/netdata-ui\"\nimport getContent from \"./getContent\"\n\nconst Tooltip = ({ children, content, isBasic, ...rest }) => {\n const getTooltipContent = useCallback(() => getContent(content, { isBasic }), [content, isBasic])\n return (\n \n {children}\n \n )\n}\n\nexport default Tooltip\n","import React, { useCallback } from \"react\"\nimport { Flex, Button } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport { setGlobalPauseAction } from \"domains/global/actions\"\nimport { useDispatch } from \"store/redux-separate-context\"\n\nconst Options = () => {\n const dispatch = useDispatch()\n const onClick = useCallback(() => dispatch(setGlobalPauseAction()), [dispatch])\n return (\n \n \n \n \n \n \n \n \n \n \n \n )\n}\n\nexport default Options\n","import React from \"react\"\nimport { Button } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nimport { useHttp } from \"hooks/use-http\"\n\nconst NETDATA_LATEST_VERSION_URL = \"https://api.github.com/repos/netdata/netdata/releases/latest\"\nconst NETDATA_LATEST_GCS_VERSION_URL =\n \"https://www.googleapis.com/storage/v1/b/netdata-nightlies/o/latest-version.txt\"\n\nconst transformGcsVersionResponse = data => data.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\")\n\nconst transformGithubResponse = data => data?.tag_name.replace(/(\\r\\n|\\n|\\r| |\\t)/gm, \"\")\n\nconst versionsMatch = (v1, v2) => {\n if (v1 === v2) {\n return true\n }\n let s1 = v1.split(\".\")\n let s2 = v2.split(\".\")\n // Check major version\n let n1 = parseInt(s1[0].substring(1, 2), 10)\n let n2 = parseInt(s2[0].substring(1, 2), 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n // Check minor version\n n1 = parseInt(s1[1], 10)\n n2 = parseInt(s2[1], 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n // Split patch: format could be e.g. 0-22-nightly\n s1 = s1[2].split(\"-\")\n s2 = s2[2].split(\"-\")\n\n n1 = parseInt(s1[0], 10)\n n2 = parseInt(s2[0], 10)\n if (n1 < n2) return false\n if (n1 > n2) return true\n\n n1 = s1.length > 1 ? parseInt(s1[1], 10) : 0\n n2 = s2.length > 1 ? parseInt(s2[1], 10) : 0\n if (n1 < n2) return false\n return true\n}\n\nconst VersionControl = ({ currentVersion, releaseChannel }) => {\n const isStableReleaseChannel = releaseChannel === \"stable\"\n const [githubVersion] = useHttp(NETDATA_LATEST_VERSION_URL, isStableReleaseChannel, true)\n\n const [gcsVersionResponse] = useHttp(NETDATA_LATEST_GCS_VERSION_URL, !isStableReleaseChannel)\n const [mediaLinkResponse] = useHttp(gcsVersionResponse?.mediaLink, Boolean(gcsVersionResponse))\n\n const latestVersion = isStableReleaseChannel\n ? transformGithubResponse(githubVersion)\n : mediaLinkResponse\n ? transformGcsVersionResponse(mediaLinkResponse)\n : null\n\n if (!latestVersion) {\n return null\n }\n const isNewVersionAvailable = !versionsMatch(currentVersion, latestVersion)\n\n return (\n \n \n \n )\n}\n\nexport default VersionControl\n","import React from \"react\"\nimport VersionControl from \"components/app-header/components/versionControl\"\nimport { useSelector } from \"@/src/store/redux-separate-context\"\n\nconst versionSelector = state => {\n const { data } = state.global.chartsMetadata\n\n if (!data) return null\n\n const { version, release_channel: releaseChannel } = data\n return {\n version,\n releaseChannel,\n }\n}\n\nconst Version = () => {\n const data = useSelector(versionSelector)\n return (\n data && \n )\n}\n\nexport default Version\n","import { useState, useCallback } from \"react\"\n\n/**\n * @example\n * const [value, toggle, toggleOn, toggleOff] = useToggle(false);\n *\n * @param {Boolean} initialValue\n */\n\nconst useToggle = (initialValue = false) => {\n const [value, setToggle] = useState(!!initialValue)\n const toggle = useCallback(() => setToggle(oldValue => !oldValue), [])\n const toggleOn = useCallback(() => setToggle(true), [])\n const toggleOff = useCallback(() => setToggle(false), [])\n\n return [value, toggle, toggleOn, toggleOff]\n}\n\nexport default useToggle\n","import { useEffect, useState } from \"react\"\n\nconst useLocalStorage = (key, defaultValue) => {\n const [value, setValue] = useState(() => getValueFromStorage(key, defaultValue))\n\n useEffect(() => localStorage.setItem(key, JSON.stringify(value)), [key, value])\n\n return [value, setValue]\n}\n\nconst getValueFromStorage = (key, defaultValue = \"\") =>\n JSON.parse(localStorage.getItem(key)) ?? defaultValue\n\nexport default useLocalStorage\n","import styled from \"styled-components\"\nimport { getColor, getSizeBy, Icon } from \"@netdata/netdata-ui\"\nimport { Menu } from \"@rmwc/menu\"\n\nexport const RootContainer = styled.div`\n width: 100%;\n height: 100%;\n display: flex;\n flex-flow: row nowrap;\n align-items: center;\n`\n\nexport const StyledMenu = styled(Menu)``\n\nexport const DropdownContainer = styled.div`\n cursor: pointer;\n color: ${getColor(\"bright\")};\n .mdc-menu-surface {\n border-radius: 0;\n .mdc-list {\n padding: 0;\n }\n .mdc-list-item {\n padding: 0 ${getSizeBy(5)} 0 ${getSizeBy(5)};\n font-size: 14px;\n height: ${getSizeBy(6)};\n }\n }\n`\n\nexport const ListContainer = styled.div`\n padding: ${getSizeBy(3)} 0;\n`\n\nexport const OpenerIcon = styled(Icon)`\n flex-shrink: 0;\n flex-grow: 0;\n margin-left: ${({ noMargin }) => (noMargin ? \"unset\" : \"16px\")};\n fill: ${getColor(\"bright\")};\n width: 10px;\n height: 5px;\n`\n","import styled from \"styled-components\"\nimport { getColor, getSizeBy, Icon, Drop } from \"@netdata/netdata-ui\"\nimport { Dropdown } from \"@/src/components/mdx-components/dropdown\"\nimport { dialogsZIndex, customDropdownZIndex } from \"@/src/styles/z-index\"\n\nexport const PickerBox = styled.div`\n display: flex;\n position: relative;\n min-width: ${getSizeBy(102)};\n min-height: ${getSizeBy(43)};\n flex-direction: column;\n align-items: flex-end;\n background-color: ${getColor(\"mainBackground\")};\n color: ${getColor(\"text\")};\n z-index: ${dialogsZIndex};\n border-radius: 8px;\n`\n\nexport const StyledTimePeriod = styled.span`\n margin-top: ${getSizeBy(3)};\n cursor: pointer;\n width: 187px;\n height: ${getSizeBy(2)};\n &:first-of-type {\n margin-top: ${getSizeBy(1)};\n }\n &:last-of-type {\n margin-bottom: ${getSizeBy(1)};\n }\n & > span:hover {\n color: ${getColor(\"textLite\")};\n }\n`\nexport const StyledCustomTimePeriod = styled.span`\n margin: ${getSizeBy(1)} ${getSizeBy(3)} 0;\n color: ${({ isSelected, theme }) => getColor(isSelected ? \"primary\" : \"text\")({ theme })};\n cursor: pointer;\n &:first-of-type {\n margin-top: 0;\n }\n &:hover {\n color: ${getColor(\"textLite\")};\n }\n`\n\nexport const StyledDropdown = styled(Dropdown)`\n width: 88px;\n height: 32px;\n padding-top: 8px;\n padding-bottom: 8px;\n padding-left: 8px;\n padding-right: 7px;\n border: 1px solid ${getColor(\"border\")};\n box-sizing: border-box;\n border-radius: 4px;\n display: flex;\n justify-content: center;\n align-items: center;\n color: ${getColor(\"text\")};\n .mdc-menu-surface--anchor {\n .mdc-menu-surface--open {\n ${customDropdownZIndex}\n margin-top: ${getSizeBy(2)};\n background: ${getColor(\"mainBackground\")};\n border-radius: 4px;\n }\n }\n .mdc-list {\n display: flex;\n flex-direction: column;\n justify-content: center;\n align-items: center;\n }\n`\nexport const DropdownIcon = styled(Icon)`\n fill: ${getColor(\"text\")};\n width: 12px;\n height: 12px;\n`\n\nexport const CustomInput = styled.input`\n border: 1px solid ${getColor(\"border\")};\n color: inherit;\n background: ${getColor(\"mainBackground\")};\n box-sizing: border-box;\n border-radius: 4px;\n padding: 4px;\n width: 32px;\n height: 32px;\n margin-left: 10px;\n margin-right: 10px;\n outline: none;\n &:focus {\n border: 1px solid ${getColor(\"primary\")};\n }\n`\nexport const StyledDrop = styled(Drop).attrs({\n background: \"mainBackground\",\n round: 2,\n margin: [4, 0, 0],\n border: { side: \"all\", color: \"elementBackground\" },\n animation: true,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\nexport const StyledHR = styled.hr`\n border: none;\n margin: 0;\n border-left: 1px solid ${getColor(\"borderSecondary\")};\n height: 284px;\n`\n","import React, { useRef } from \"react\"\nimport { List } from \"@rmwc/list\"\nimport { MenuSurfaceAnchor, MenuSurface } from \"@rmwc/menu\"\nimport { RootContainer, ListContainer, DropdownContainer, OpenerIcon } from \"./styled\"\n\nexport const Dropdown = ({\n title,\n children,\n className,\n renderTitle,\n isOpen = false,\n onMenuToggle,\n anchorCorner = \"bottomStart\",\n renderOpener,\n}) => {\n const ref = useRef()\n\n const handleOpenState = () => {\n onMenuToggle(!isOpen)\n }\n\n const handleClose = () => {\n onMenuToggle(false)\n }\n\n return (\n \n \n \n {typeof children === \"function\" ? (\n isOpen && (\n \n {children({ maxHeight: ref.current?.root.ref.style.maxHeight })}\n \n )\n ) : (\n \n {children}\n \n )}\n \n \n {title || (renderTitle && renderTitle())}\n {renderOpener ? (\n renderOpener()\n ) : (\n \n )}\n \n \n \n )\n}\n","import React, { memo, useCallback } from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { StyledTimePeriod } from \"./styled\"\n\nconst TimePeriod = ({ value, period, resolution, isSelected, setTimeRange, tagging }) => {\n const onClick = useCallback(\n () => setTimeRange(value, resolution),\n [value, resolution, setTimeRange]\n )\n return (\n \n {period}\n \n )\n}\n\nexport default memo(TimePeriod)\n","import { format, formatDistanceStrict, parse, getTime, getUnixTime, add, isMatch } from \"date-fns\"\n\nconst MINUTE = 60\nconst HOUR = MINUTE * 60\nconst DAY = HOUR * 24\nconst MONTH = 30 * DAY\n\nexport const maxTimePeriodInUnix = 94694400\nexport const dateResolutions = [\"minutes\", \"hours\", \"days\", \"months\"]\n\nconst resolutionsMapping = {\n minutes: MINUTE,\n hours: HOUR,\n days: DAY,\n months: MONTH,\n}\n\nexport const getCustomTimePeriod = (after, resolution) =>\n Math.round(after / resolutionsMapping[resolution])\n\nexport const parseInputPeriod = (timeCorrection, resolution) => {\n const customRange = add(new Date(0), {\n [resolution]: timeCorrection,\n })\n return -getUnixTime(customRange)\n}\n\nconst focusTaggingMap = {\n startDate: \"start\",\n endDate: \"finish\",\n}\n\nexport const getFocusTagging = focusedInput => focusTaggingMap[focusedInput]\n\nexport const timePeriods = [\n { period: \"Last 5 minutes\", value: -5 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 15 minutes\", value: -15 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 30 minutes\", value: -30 * MINUTE, resolution: \"minutes\" },\n { period: \"Last 2 hours\", value: -2 * HOUR, resolution: \"hours\" },\n { period: \"Last 6 hours\", value: -6 * HOUR, resolution: \"hours\" },\n { period: \"Last 12 hours\", value: -12 * HOUR, resolution: \"hours\" },\n { period: \"Last Day\", value: -DAY, resolution: \"days\" },\n { period: \"Last 2 Days\", value: -2 * DAY, resolution: \"days\" },\n { period: \"Last 7 Days\", value: -7 * DAY, resolution: \"days\" },\n]\n\nexport const formatDates = (startDate, endDate) => {\n const formattedStartDate = format(startDate, \"MMMM d yyyy, H:mm:ss\")\n const formattedEndDate = format(endDate, \"MMMM d yyyy, H:mm:ss\")\n return {\n formattedStartDate,\n formattedEndDate,\n }\n}\n\nexport const formatOffset = offset => {\n if (!offset) return \"+00:00\"\n const splitOffset = offset.toString().split(\".\")\n const mathSign = splitOffset[0] > 0 ? \"+\" : \"-\"\n const absoluteNumber = Math.abs(splitOffset[0]).toString()\n const firstPart = `${mathSign}${absoluteNumber.padStart(2, 0)}`\n return splitOffset.length > 1\n ? `${firstPart}:${String(splitOffset[1] * 0.6).padEnd(2, 0)}`\n : `${firstPart}:00`\n}\n\nexport const getDateWithOffset = (date, utcOffset) => {\n const formattedDate = isMatch(date, \"MMMM d yyyy, H:mm\")\n ? date\n : parse(date, \"MMMM d yyyy, H:mm\", Date.now())\n return parse(`${formattedDate} ${formatOffset(utcOffset)}`, \"MMMM d yyyy, H:mm xxx\", Date.now())\n}\n\nexport const getTimePeriod = (startDate, endDate) =>\n formatDistanceStrict(getTime(startDate), getTime(endDate))\n","import React from \"react\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport TimePeriod from \"./timePeriod\"\nimport { timePeriods } from \"./utils\"\n\nconst TimePeriods = ({ handleTimePeriodChange, selectedDate, tagging }) => (\n \n {timePeriods.map(({ period, value, resolution }) => (\n \n ))}\n \n)\n\nexport default TimePeriods\n","import React, { useCallback, useEffect, useState } from \"react\"\nimport { isValid, add, getUnixTime } from \"date-fns\"\nimport { Flex, Text } from \"@netdata/netdata-ui\"\nimport {\n getCustomTimePeriod,\n parseInputPeriod,\n dateResolutions,\n maxTimePeriodInUnix,\n} from \"./utils\"\nimport { StyledDropdown, DropdownIcon, CustomInput, StyledCustomTimePeriod } from \"./styled\"\n\nconst CustomTimePeriod = ({ handleTimePeriodChange, value, resolution, tagging }) => {\n const getInputValue = () => (value <= 0 ? getCustomTimePeriod(-value, resolution) : 0)\n const [inputValue, setInputValue] = useState(getInputValue)\n const [isDropdownOpen, toggleDropdown] = useState(false)\n\n // eslint-disable-next-line react-hooks/exhaustive-deps\n useEffect(() => setInputValue(getInputValue()), [value])\n\n const onChange = useCallback(e => setInputValue(e.target.value), [])\n\n const onBlur = useCallback(\n e => {\n const currentValue = Number(e.currentTarget.value)\n const isValidInput =\n !Number.isNaN(currentValue) && Number.isInteger(currentValue) && currentValue > 0\n const timePeriod = add(new Date(0), {\n [resolution]: currentValue,\n })\n const isValidTimePeriod =\n isValidInput && isValid(timePeriod) && getUnixTime(timePeriod) <= maxTimePeriodInUnix\n if (isValidTimePeriod)\n return handleTimePeriodChange(parseInputPeriod(currentValue, resolution), resolution)\n return value <= 0 ? setInputValue(getCustomTimePeriod(-value, resolution)) : setInputValue(0)\n },\n [resolution, value, handleTimePeriodChange]\n )\n\n const onChangeResolution = useCallback(\n newResolution => {\n return () => {\n handleTimePeriodChange(parseInputPeriod(inputValue, newResolution), newResolution)\n toggleDropdown(false)\n }\n },\n [inputValue, handleTimePeriodChange]\n )\n\n const renderTitle = () => (\n \n {resolution}\n \n \n )\n return (\n \n Last\n \n null}\n >\n {() =>\n dateResolutions.map(dateResolution => (\n \n {dateResolution}\n \n ))\n }\n \n \n )\n}\n\nexport default CustomTimePeriod\n","import React from \"react\"\nimport DatePickerLib from \"react-datepicker\"\nimport \"react-datepicker/dist/react-datepicker.css\"\n\nconst DatePicker = ({\n selected,\n selectsStart = false,\n selectsEnd = false,\n startDate,\n endDate,\n onChange,\n minDate,\n maxDate,\n dateFormat = \"MM/dd/yyyy\",\n open = false,\n startOpen = false,\n inline = false,\n selectsRange = false,\n monthsShown = 1,\n showPopperArrow = true,\n calendarContainer = null,\n}) => (\n \n)\n\nexport default DatePicker\n","import { getColor, getRgbColor } from \"@netdata/netdata-ui\"\nimport styled from \"styled-components\"\n\nexport const StyledDateInput = styled.input`\n width: 100%;\n text-align: center;\n border: 1px solid ${getColor(\"border\")};\n color: inherit;\n background: ${getColor(\"mainBackground\")};\n box-sizing: border-box;\n border-radius: 4px;\n padding: 4px;\n height: 32px;\n margin-left: 20px;\n margin-right: 20px;\n outline: none;\n &:focus {\n border: 1px solid ${getColor(\"primary\")};\n }\n`\nexport const StyledCalendar = styled.div`\n background: ${getColor(\"mainBackground\")};\n border: 0;\n .react-datepicker {\n &__navigation {\n top: 8px;\n &-icon::before {\n border-color: ${getColor(\"text\")};\n }\n }\n &__header {\n background: ${getColor(\"mainBackground\")};\n border: 0;\n .react-datepicker__current-month {\n color: ${getColor(\"main\")};\n font-weight: normal;\n }\n .react-datepicker__day-name {\n color: ${getColor(\"textLite\")};\n }\n }\n &__day {\n color: ${getColor(\"main\")};\n &:hover {\n background: ${getColor(\"elementBackground\")};\n }\n &--disabled {\n color: ${getColor(\"textLite\")};\n &:hover {\n background: inherit;\n }\n }\n &--keyboard-selected,\n &--keyboard-selected:hover {\n color: ${getColor(\"main\")};\n background: inherit;\n border-radius: inherit;\n }\n &--selected,\n &--selected:hover {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-radius: 8px;\n }\n &--in-selecting-range,\n &--in-range {\n color: ${getColor(\"primary\")};\n background: ${getColor(\"elementBackground\")};\n border-radius: 0;\n }\n &--selecting-range-start,\n &--range-start {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-top-left-radius: 8px;\n border-bottom-left-radius: 8px;\n &:hover {\n color: ${getColor(\"bright\")};\n background: ${getRgbColor([\"green\", \"netdata\"], 0.8)};\n border-radius: 0;\n border-top-left-radius: 8px;\n border-bottom-left-radius: 8px;\n }\n }\n &--selecting-range-end,\n &--range-end {\n color: ${getColor(\"bright\")};\n background: ${getColor(\"primary\")};\n border-top-right-radius: 8px;\n border-bottom-right-radius: 8px;\n &:hover {\n color: ${getColor(\"bright\")};\n background: ${getRgbColor([\"green\", \"netdata\"], 0.8)};\n border-top-right-radius: 8px;\n border-bottom-right-radius: 8px;\n }\n }\n }\n }\n`\n","import React, { useState, useEffect, useCallback } from \"react\"\nimport { format, isValid, getTime } from \"date-fns\"\nimport { getDateWithOffset } from \"./utils\"\nimport { StyledDateInput } from \"../datePicker/styled\"\nimport { useDateTime } from \"@/src/utils/date-time\"\n\nconst DatePickerInput = ({\n name = \"\",\n value = \"\",\n onDatesChange,\n onFocus,\n placeholderText = \"\",\n}) => {\n const { utcOffset } = useDateTime()\n const [inputValue, setInputValue] = useState(\"\")\n const onChange = useCallback(e => {\n const date = e.target.value\n setInputValue(date)\n }, [])\n const setFormattedValue = useCallback(value => {\n if (isValid(value)) {\n const formattedDate = format(value, \"MMMM d yyyy, H:mm\")\n setInputValue(formattedDate)\n }\n }, [])\n const onBlur = useCallback(\n e => {\n const parsedDate = getDateWithOffset(e.target.value, utcOffset)\n const isValidDate = isValid(parsedDate) && getTime(parsedDate) > 0\n if (isValidDate) {\n const timestamp = getTime(parsedDate)\n onDatesChange(timestamp, () => setFormattedValue(value))\n } else setFormattedValue(value)\n },\n [value, utcOffset, onDatesChange, setFormattedValue]\n )\n\n useEffect(() => setFormattedValue(value), [value, setFormattedValue])\n\n return (\n \n )\n}\n\nexport default DatePickerInput\n","import { useDateTime } from \"@/src/utils/date-time\"\nimport { useCallback } from \"react\"\n\nconst useLocaleDate = () => {\n const { localeTimeString, localeDateString } = useDateTime()\n return useCallback(\n date => {\n return `${localeDateString(date, { locale: \"en-us\", long: false })} ${localeTimeString(date, {\n secs: false,\n })}`\n },\n [localeTimeString, localeDateString]\n )\n}\n\nexport default useLocaleDate\n","import { useMemo } from \"react\"\nimport { toDate } from \"date-fns\"\nimport useLocaleDate from \"./useLocaleDate\"\n\nexport const convertTimestampToDate = (timestamp, getLocaleDate) => {\n if (timestamp > 0) {\n return toDate(new Date(getLocaleDate(timestamp)))\n } else if (timestamp || timestamp === 0)\n return toDate(new Date(getLocaleDate(new Date().valueOf() + timestamp * 1000)))\n return null\n}\n\nconst useConvertedDates = (startDate, endDate) => {\n const getLocaleDate = useLocaleDate()\n return useMemo(\n () => [\n convertTimestampToDate(startDate, getLocaleDate),\n convertTimestampToDate(endDate, getLocaleDate),\n ],\n [startDate, endDate, getLocaleDate]\n )\n}\n\nexport default useConvertedDates\n","import { Flex } from \"@netdata/netdata-ui\"\nimport React, { useCallback } from \"react\"\nimport { getTime, isBefore, format } from \"date-fns\"\nimport { useDateTime } from \"@/src/utils/date-time\"\nimport DatePicker from \"../datePicker/datePickerLib\"\nimport DatePickerInput from \"./datePickerInput\"\nimport useConvertedDates, { convertTimestampToDate } from \"./useConvertedDate\"\nimport useLocaleDate from \"./useLocaleDate\"\nimport { getDateWithOffset } from \"./utils\"\nimport { StyledCalendar } from \"../datePicker/styled\"\n\nconst DatePickerWrapper = ({\n startDate,\n setStartDate,\n endDate,\n setEndDate,\n onDatesChange,\n onInputFocus,\n}) => {\n const getLocaleDate = useLocaleDate()\n const [convertedStartDate, convertedEndDate] = useConvertedDates(startDate, endDate)\n const { utcOffset } = useDateTime()\n const setValidStartDate = useCallback(\n (startDate, setPreviousValue) =>\n isBefore(convertTimestampToDate(startDate, getLocaleDate), convertedEndDate)\n ? setStartDate(startDate)\n : setPreviousValue(),\n [convertedEndDate, getLocaleDate, setStartDate]\n )\n\n const setValidEndDate = useCallback(\n (endDate, setPreviousValue) =>\n isBefore(convertedStartDate, convertTimestampToDate(endDate, getLocaleDate))\n ? setEndDate(endDate)\n : setPreviousValue(),\n [convertedStartDate, getLocaleDate, setEndDate]\n )\n\n const onChange = useCallback(\n dates => {\n const [startDate, endDate] = dates\n\n const startDateWithOffset = startDate\n ? getDateWithOffset(format(startDate, \"MMMM d yyyy, H:mm\"), utcOffset)\n : startDate\n const endDateWithOffset = endDate\n ? getDateWithOffset(format(endDate, \"MMMM d yyyy, H:mm\"), utcOffset)\n : endDate\n\n const startDateTimestamp = getTime(startDateWithOffset) || null\n const endDateTimestamp = getTime(endDateWithOffset) || null\n\n onDatesChange(startDateTimestamp, endDateTimestamp)\n },\n [utcOffset, onDatesChange]\n )\n\n return (\n \n \n \n \n \n \n \n )\n}\n\nexport default DatePickerWrapper\n","import React, { useMemo } from \"react\"\nimport { Flex, Icon, TextSmall } from \"@netdata/netdata-ui\"\nimport { formatDates, getTimePeriod } from \"./utils\"\nimport useConvertedDates from \"./useConvertedDate\"\n\nconst PeriodIndication = ({ startDate, endDate }) => {\n const [convertedStart, convertedEnd] = useConvertedDates(startDate, endDate)\n\n const { formattedStartDate, formattedEndDate } = useMemo(\n () => formatDates(convertedStart, convertedEnd),\n [convertedStart, convertedEnd]\n )\n const timePeriod = useMemo(\n () => getTimePeriod(convertedStart, convertedEnd),\n [convertedStart, convertedEnd]\n )\n\n return (\n \n \n \n From\n \n \n {formattedStartDate}\n \n \n \n \n \n To\n \n \n {formattedEndDate}\n \n \n \n /\n \n {timePeriod}\n \n \n \n )\n}\n\nexport default PeriodIndication\n","import moment from \"moment\"\n\nexport const SECONDS = 1000\nexport const MINUTE = SECONDS * 60\nexport const HOUR = MINUTE * 60\nexport const DAY = HOUR * 24\nexport const MONTH = DAY * 30\n\nconst resolutionMap = [\n { value: DAY, unit: \"d\" },\n { value: HOUR, unit: \"h\" },\n { value: MINUTE, unit: \"min\" },\n { value: MINUTE, unit: \"min\" },\n { value: SECONDS, unit: \"s\" },\n]\n\nexport const getStartDate = start =>\n start < 0 ? moment(new Date()).add(start, \"seconds\") : moment(start)\nexport const getEndDate = end => (!end ? moment(new Date()) : moment(end))\nexport const getIsSameDate = (startDate, endDate) => startDate.isSame(endDate, \"day\")\nexport const getDuration = (startDate, endDate) => moment.duration(startDate.diff(endDate))\n\nconst getResolution = (value, resolution) => (value > 1 ? `${Math.floor(value)}${resolution}` : \"\")\n\nexport const getGranularDuration = duration => {\n let seconds = Math.abs(duration)\n const showSeconds = seconds < MINUTE\n return resolutionMap.reduce((acc, { value, unit }) => {\n if (value === SECONDS && !showSeconds) return acc\n acc = acc + getResolution(seconds / value, unit)\n seconds = seconds % value\n return acc\n }, \"\")\n}","import styled from \"styled-components\"\nimport { Flex, getColor } from \"@netdata/netdata-ui\"\n\nconst Container = styled(Flex)`\n cursor: pointer;\n\n &:hover * {\n color: ${getColor(\"textLite\")};\n fill: ${getColor(\"textLite\")};\n }\n`\n\nexport default Container\n","import React from \"react\"\nimport { Flex, TextSmall, Icon } from \"@netdata/netdata-ui\"\nimport { useDateTime } from \"utils/date-time\"\n\nconst DateBox = ({ isPlaying, startDate, endDate, isSameDate }) => {\n const { localeTimeString, localeDateString } = useDateTime()\n return (\n \n \n {localeDateString(startDate, { long: false })} •{\" \"}\n \n {localeTimeString(startDate, { secs: false })}\n \n \n \n \n {!isSameDate && `${localeDateString(endDate, { long: false })} • `}\n \n {localeTimeString(endDate, { secs: false })}\n \n \n \n )\n}\n\nexport default DateBox\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst DurationBox = ({ isPlaying, duration }) => {\n return (\n \n \n {isPlaying && (\n \n • last\n \n )}\n \n \n {duration}\n \n \n )\n}\n\nexport default DurationBox\n","import React, { useState, useMemo, useEffect, forwardRef } from \"react\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport { useSelector as useDashboardSelector } from \"store/redux-separate-context\"\nimport { selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport {\n getStartDate,\n getEndDate,\n getIsSameDate,\n getDuration,\n MINUTE,\n getGranularDuration,\n} from \"./utils\"\nimport Container from \"./container\"\nimport DateBox from \"./dateBox\"\nimport DurationBox from \"./durationBox\"\n\nconst PickerAccessorElement = forwardRef(\n (\n { onClick, start = 15 * MINUTE, end, isPlaying, isPickerOpen, setRangeValues, tagging },\n ref\n ) => {\n const [timeframe, setTimeframe] = useState()\n const startDate = getStartDate(start)\n const endDate = getEndDate(end)\n const globalPanAndZoom = useDashboardSelector(selectGlobalPanAndZoom)\n useEffect(() => {\n const after = getDuration(startDate, endDate).as(\"seconds\")\n if (!isPlaying && timeframe !== after) setTimeframe(Math.round(after))\n if (isPlaying && timeframe && !!globalPanAndZoom) {\n setRangeValues({ start: Math.round(timeframe) })\n setTimeframe(null)\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [startDate, endDate, timeframe, isPlaying])\n\n const isSameDate = useMemo(() => getIsSameDate(startDate, endDate), [startDate, endDate])\n const duration = useMemo(\n () => getGranularDuration(getDuration(startDate, endDate).as(\"milliseconds\")),\n // eslint-disable-next-line react-hooks/exhaustive-deps\n [isPlaying, startDate, endDate]\n )\n\n return (\n {} : \"Select a predefined or a custom timeframe\"}\n align=\"bottom\"\n plain\n >\n \n \n \n \n \n )\n }\n)\n\nexport default PickerAccessorElement\n","import React, { useState, useEffect, useMemo, useRef, useCallback } from \"react\"\nimport { Button, Flex } from \"@netdata/netdata-ui\"\nimport useToggle from \"hooks/useToggle\"\nimport useLocalStorage from \"hooks/useLocalStorage\"\nimport TimePeriods from \"./timePeriods\"\nimport CustomTimePeriod from \"./customTimePeriod\"\nimport DatePickerWrapper from \"./datePickerWrapper\"\nimport { getFocusTagging } from \"./utils\"\nimport PeriodIndication from \"./periodIndication\"\nimport AccessorElement from \"./accessorElement\"\nimport { PickerBox, StyledDrop, StyledHR } from \"./styled\"\n\nexport const reportEvent = (\n eventCategory,\n eventAction,\n eventLabel,\n eventValue,\n event = \"gaCustomEvent\"\n) => {\n if (window.dataLayer) {\n const eventData = { event, eventCategory, eventAction, eventLabel, eventValue }\n window.dataLayer.push(eventData)\n }\n}\n\nconst DatePickerDrop = ({\n onChange,\n values: { start: initialStartDate, end: initialEndDate } = {},\n defaultValue = -60 * 15,\n tagging = \"\",\n isPlaying,\n}) => {\n const [startDate, setStartDate] = useState(initialStartDate)\n const [endDate, setEndDate] = useState(initialStartDate)\n const [resolution, setResolution] = useLocalStorage(\"resolution\", \"minutes\")\n const [focusedInput, setFocusedInput] = useState(\"startDate\")\n const [isOpen, toggle, , close] = useToggle()\n const ref = useRef()\n\n const setDates = useCallback(({ startDate, endDate }) => {\n setStartDate(startDate)\n setEndDate(endDate)\n }, [])\n\n useEffect(() => {\n setDates({\n startDate: initialStartDate,\n endDate: initialEndDate,\n })\n }, [initialStartDate, initialEndDate, setDates])\n\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const clearChanges = useCallback(() => setDates({ startDate: defaultValue, endDate: 0 }), [])\n\n const onInputFocus = useCallback(e => {\n if (!e.target.name) return\n setFocusedInput(e.target.name)\n }, [])\n\n const togglePicker = useCallback(\n e => {\n e.stopPropagation()\n toggle()\n },\n [toggle]\n )\n\n const applyChanges = () => {\n onChange({\n start: startDate,\n end: endDate,\n })\n close()\n }\n\n const focusTagging = useMemo(() => getFocusTagging(focusedInput), [focusedInput])\n\n const isValidTimePeriod = startDate !== null && endDate !== null && startDate !== endDate\n const isApplyDisabled = startDate === initialStartDate && endDate === initialEndDate\n // eslint-disable-next-line react-hooks/exhaustive-deps\n const consistentDefaultValue = useMemo(() => defaultValue, [])\n const isClearDisabled = startDate === consistentDefaultValue\n\n const handleTimePeriodChange = useCallback(\n (time, resolution) => {\n setResolution(resolution)\n setDates({\n startDate: time,\n endDate: 0,\n })\n },\n [setDates, setResolution]\n )\n const onDatepickerChange = (startDate, endDate) => {\n setDates({ startDate, endDate })\n const date = focusTagging === \"finish\" ? endDate || startDate : startDate || endDate\n reportEvent(\"date-picker\", \"click-date-picker\", tagging, String(date))\n }\n\n const pickerDrop =\n ref.current && isOpen ? (\n \n \n \n \n \n \n \n \n \n \n \n {isValidTimePeriod && }\n \n \n \n \n \n \n \n ) : null\n\n return (\n <>\n \n {pickerDrop}\n \n )\n}\n\nexport default DatePickerDrop\n","import React, { memo, useEffect, useMemo } from \"react\"\nimport {\n useDispatch as useDashboardDispatch,\n useSelector as useDashboardSelector,\n} from \"store/redux-separate-context\"\nimport {\n resetGlobalPanAndZoomAction,\n setGlobalPanAndZoomAction,\n setDefaultAfterAction,\n} from \"domains/global/actions\"\nimport { selectDefaultAfter, selectGlobalPanAndZoom } from \"domains/global/selectors\"\nimport { setHashParams } from \"utils/hash-utils\"\nimport DatePickerDrop from \"./datePickerDrop\"\n\nconst ReduxDatePickerContainer = memo(({ tagging, isPlaying }) => {\n const dashboardDispatch = useDashboardDispatch()\n\n const globalPanAndZoom = useDashboardSelector(selectGlobalPanAndZoom)\n const isGlobalPanAndZoom = Boolean(globalPanAndZoom)\n\n const defaultAfter = useDashboardSelector(selectDefaultAfter)\n const pickedValues = useMemo(\n () =>\n isGlobalPanAndZoom\n ? { start: globalPanAndZoom.after, end: globalPanAndZoom.before }\n : {\n start: defaultAfter,\n end: 0,\n },\n [isGlobalPanAndZoom, globalPanAndZoom, defaultAfter]\n )\n\n function handlePickedValuesChange(params) {\n const { start, end } = params\n if (start < 0) {\n // live mode\n dashboardDispatch(\n // changes the default value, so it becomes inconsistent\n setDefaultAfterAction({\n after: start,\n })\n )\n if (isGlobalPanAndZoom) {\n dashboardDispatch(resetGlobalPanAndZoomAction())\n }\n } else {\n // global-pan-and-zoom mode\n dashboardDispatch(\n setGlobalPanAndZoomAction({\n after: start,\n before: end,\n })\n )\n }\n }\n\n useEffect(() => {\n const { start, end } = pickedValues\n const after = start.toString()\n const before = end.toString()\n if (window.urlOptions.after !== after || window.urlOptions.before !== before) {\n window.urlOptions.netdataPanAndZoomCallback(true, after, before)\n }\n setHashParams({ after, before })\n }, [pickedValues])\n return (\n \n )\n})\n\nexport default ReduxDatePickerContainer\n","import styled from \"styled-components\"\nimport { Flex, getRgbColor } from \"@netdata/netdata-ui\"\n\nconst getBackground = ({ theme, isPlaying }) => {\n const { name } = theme\n\n const background =\n name === \"Dark\"\n ? getRgbColor(isPlaying ? [\"green\", \"netdata\"] : [\"neutral\", \"tuna\"], isPlaying ? 0.3 : 1)\n : getRgbColor(isPlaying ? [\"green\", \"frostee\"] : [\"neutral\", \"blackhaze\"])\n\n return background({ theme })\n}\n\nconst Container = styled(Flex)`\n background: ${getBackground};\n`\n\nexport default Container\n","import styled from \"styled-components\"\nimport { Pill, getColor } from \"@netdata/netdata-ui\"\n\nconst getHoverColor = ({ isPlaying }) =>\n getColor(isPlaying ? [\"green\", \"chateau\"] : [\"neutral\", \"iron\"])\n\nconst StyledPill = styled(Pill).attrs(({ isPlaying }) => ({\n flavour: isPlaying ? \"success\" : \"neutral\",\n}))`\n &:hover {\n background: ${getHoverColor};\n border-color: ${getHoverColor};\n }\n`\n\nexport default StyledPill\n","import React, { useMemo } from \"react\"\nimport { useDispatch } from \"store/redux-separate-context\"\nimport { resetGlobalPauseAction, setGlobalPauseAction } from \"domains/global/actions\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport StyledPill from \"./styledPill\"\n\nconst getIcon = (isPlaying, isForcePlaying) => {\n if (!isPlaying) return \"pauseSolid\"\n return isForcePlaying ? \"forcePlay\" : \"playSolid\"\n}\n\nconst PlayPausePill = ({ isPlaying, isForcePlaying }) => {\n const dispatch = useDispatch()\n\n const onPlay = () => dispatch(resetGlobalPauseAction({ forcePlay: false }))\n const onPause = () => dispatch(setGlobalPauseAction())\n const icon = useMemo(() => getIcon(isPlaying, isForcePlaying), [isPlaying, isForcePlaying])\n\n return (\n \n \n {isPlaying ? \"Playing\" : \"Paused\"}\n \n \n )\n}\n\nexport default PlayPausePill\n","import React, { useCallback, forwardRef } from \"react\"\nimport styled from \"styled-components\"\nimport { getColor, Flex, Icon, Text } from \"@netdata/netdata-ui\"\n\nexport const PanelRowContainer = styled(Flex)`\n cursor: pointer;\n\n &:hover {\n background: ${getColor(\"selected\")};\n }\n\n ${props => props.selected && `background: ${getColor(\"selected\")(props)};`}\n`\n\nconst MenuItem = forwardRef(\n (\n {\n disabled,\n children,\n Wrapper = Text,\n onClick,\n testid,\n icon,\n padding = [2, 3],\n margin = [0],\n round = 0,\n actions,\n selected,\n width = \"100%\",\n },\n ref\n ) => {\n const click = useCallback(() => {\n if (disabled) return\n if (onClick) onClick()\n }, [onClick, disabled])\n\n return (\n \n \n {typeof icon === \"string\" ? (\n \n ) : (\n icon\n )}\n \n {children}\n \n \n {actions}\n \n )\n }\n)\n\nexport default MenuItem\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, H4, Collapsible } from \"@netdata/netdata-ui\"\n\nexport const DefaultListHeader = styled(H4).attrs({ padding: [0], margin: [0] })`\n cursor: pointer;\n`\n\nconst SectionHandle = ({ toggleOpen, label, testid, Header = DefaultListHeader }) => (\n
    \n {label}\n
    \n)\n\nconst ItemsList = ({ isOpen = false, toggleOpen, label, children, testid, Header }) => (\n \n \n {children}\n \n)\n\nexport default ItemsList\n","import React from \"react\"\nimport { Flex, TextSmall } from \"@netdata/netdata-ui\"\n\nconst PlayOptionsTooltip = () => (\n \n \n Play to refresh and have live content, pause to see historical, or force play to keep\n refreshing even when the tab loses focus at the expense of some system performance.\n \n \n)\n\nexport default PlayOptionsTooltip\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nexport const Divider = styled(Flex).attrs({\n bacgkround: \"disabled\",\n height: \"1px\",\n margin: [2, 6],\n})``\n","import React, { memo, Fragment } from \"react\"\nimport styled from \"styled-components\"\nimport { useToggle } from \"react-use\"\nimport { Flex, Icon, Drop } from \"@netdata/netdata-ui\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport { useDispatch } from \"store/redux-separate-context\"\nimport { resetGlobalPauseAction, setGlobalPauseAction } from \"domains/global/actions\"\nimport Tooltip from \"@/src/components/tooltips\"\nimport PlayOptionsTooltip from \"./playOptionsTooltip\"\n\nconst MenuButton = styled(Flex).attrs({ padding: [1], role: \"button\" })`\n cursor: pointer;\n`\n\nconst Dropdown = styled(Flex).attrs({\n column: true,\n padding: [2],\n background: \"dropdown\",\n round: 1,\n overflow: { vertical: \"auto\" },\n margin: [2, 0, 0],\n width: 40,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\n\nconst PlayOptions = ({ target }) => {\n const dispatch = useDispatch()\n const [isOpen, toggle] = useToggle()\n\n const close = () => toggle(false)\n\n const onPlay = () => {\n dispatch(resetGlobalPauseAction({ forcePlay: false }))\n close()\n }\n\n const onPause = () => {\n dispatch(setGlobalPauseAction())\n close()\n }\n\n const onForcePlay = () => {\n dispatch(resetGlobalPauseAction({ forcePlay: true }))\n close()\n }\n\n return (\n \n {!isOpen ? (\n } align=\"bottom\" plain>\n \n \n \n \n ) : (\n \n \n \n )}\n {target.current && isOpen && (\n \n \n \n Play\n \n \n Pause\n \n \n Force Play\n \n \n \n )}\n \n )\n}\n\nconst MemoizedPlayOptions = memo(PlayOptions)\n\nexport default MemoizedPlayOptions\n","import React, { useMemo, useRef } from \"react\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport {\n selectHasWindowFocus,\n selectStopUpdatesWhenFocusIsLost,\n selectGlobalPanAndZoom,\n selectGlobalPause,\n selectGlobalSelection,\n} from \"domains/global/selectors\"\nimport { ReduxDatePickerContainer } from \"components/date-picker\"\nimport Item from \"../item\"\nimport Container from \"./container\"\nimport PlayPausePill from \"./playPausePill\"\nimport PlayOptions from \"./playOptions\"\n\nconst tagging = \"global-view\"\n\nconst GlobalControls = () => {\n const ref = useRef()\n const hasWindowFocus = useSelector(selectHasWindowFocus)\n const stopUpdatesWhenFocusIsLost = useSelector(selectStopUpdatesWhenFocusIsLost)\n const globalPanAndZoom = useSelector(selectGlobalPanAndZoom)\n const hoveredX = useSelector(selectGlobalSelection)\n const globalPause = useSelector(selectGlobalPause)\n\n const isPlaying = useMemo(\n () =>\n Boolean(\n (hasWindowFocus || !stopUpdatesWhenFocusIsLost) &&\n !globalPanAndZoom &&\n !hoveredX &&\n !globalPause\n ),\n [hasWindowFocus, stopUpdatesWhenFocusIsLost, globalPanAndZoom, hoveredX, globalPause]\n )\n\n return (\n \n \n \n \n \n \n \n )\n}\n\nexport default GlobalControls\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst hollowColors = {\n warning: \"#FFF8E1\",\n error: \"#FFEBEF\",\n}\n\nconst StyledPill = styled(Flex).attrs(({ round = 999, hollow, background }) => ({\n padding: [0.5, 2],\n round,\n border: hollow ? { side: \"all\", color: background, size: \"1px\" } : false,\n}))`\n background: ${({ background, hollow }) => (hollow ? hollowColors[background] : background)};\n cursor: pointer;\n`\n\nexport default StyledPill\n","import React, { forwardRef } from \"react\"\nimport { TextMicro } from \"@netdata/netdata-ui\"\nimport StyledPill from \"./styled\"\n\nconst Pill = forwardRef(({ children, background, color, hollow, ...rest }, ref) => (\n \n \n {children}\n \n \n))\n\nexport default Pill\n","import React, { useMemo } from \"react\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { selectActiveAlarms } from \"domains/global/selectors\"\nimport Item from \"./item\"\nimport Pill from \"./pill\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst pillProps = {\n \"data-toggle\": \"modal\",\n \"data-target\": \"#alarmsModal\",\n}\n\nconst Alarms = () => {\n const activeAlarms = useSelector(selectActiveAlarms)\n\n const alarms = useMemo(() => (activeAlarms ? Object.values(activeAlarms.alarms) : []), [\n activeAlarms,\n ])\n\n const { critical, warning } = useMemo(\n () =>\n alarms.reduce(\n (acc, { status }) => {\n if (status === \"CRITICAL\") acc.critical = acc.critical + 1\n if (status === \"WARNING\") acc.warning = acc.warning + 1\n return acc\n },\n { critical: 0, warning: 0 }\n ),\n [alarms]\n )\n\n return (\n \n 1 ? \"s\" : \"\"}`\n : \"No critical alerts\"\n }\n align=\"bottom\"\n plain\n >\n \n {critical}\n \n \n 1 ? \"s\" : \"\"}` : \"No warning alerts\"\n }\n align=\"bottom\"\n plain\n >\n \n {warning}\n \n \n \n )\n}\n\nexport default Alarms\n","import React from \"react\"\nimport { Button, News as AgentNews } from \"@netdata/netdata-ui\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst News = () => {\n return (\n \n {({ toggle, upToDate }) => (\n \n \n \n )}\n \n )\n}\n\nexport default News\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Dropdown = styled(Flex).attrs({\n column: true,\n padding: [2],\n background: \"dropdown\",\n round: 1,\n overflow: { vertical: \"auto\" },\n margin: [2, 0, 0],\n width: 80,\n})`\n box-shadow: 0px 4px 4px rgba(0, 0, 0, 0.25);\n`\n\nexport default Dropdown\n","import styled from \"styled-components\"\nimport { TextInput } from \"@netdata/netdata-ui\"\n\nconst SearchInput = styled(TextInput)`\n & input {\n background: transparent;\n }\n\n & > label {\n margin-bottom: 0;\n }\n`\nexport default SearchInput\n","import React, { forwardRef } from \"react\"\nimport SearchInput from \"./searchInput\"\n\nconst Search = forwardRef(({ value, onChange }, ref) => (\n \n))\n\nexport default Search\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Container = styled(Flex).attrs({\n column: true,\n padding: [2, 0, 0],\n overflow: { vertical: \"auto\" },\n height: { max: \"320px\" },\n})``\n\nexport default Container\n","import styled from \"styled-components\"\nimport { Flex } from \"@netdata/netdata-ui\"\n\nconst Wrapper = styled(Flex).attrs({\n justifyContent: \"between\",\n alignItems: \"center\",\n width: \"100%\",\n gap: 2,\n})``\n\nexport default Wrapper\n","import React, { useCallback } from \"react\"\nimport { Text } from \"@netdata/netdata-ui\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport Wrapper from \"./wrapper\"\n\nconst OffsetItem = ({ name, offset, utc, onSelect }) => {\n const onClick = useCallback(() => onSelect(utc), [utc, onSelect])\n\n return (\n \n {name}\n \n UTC {offset}\n \n \n )\n}\n\nexport default OffsetItem\n","export const timezones = [\n {\n value: \"Dateline Standard Time\",\n abbr: \"DST\",\n text: \"International Date Line West\",\n utc: [\"Etc/GMT+12\"],\n },\n {\n value: \"UTC-11\",\n abbr: \"U\",\n text: \"Coordinated Universal Time-11\",\n utc: [\"Etc/GMT+11\", \"Pacific/Midway\", \"Pacific/Niue\", \"Pacific/Pago_Pago\"],\n },\n {\n value: \"Hawaiian Standard Time\",\n abbr: \"HST\",\n text: \"Hawaii\",\n utc: [\n \"Etc/GMT+10\",\n \"Pacific/Honolulu\",\n \"Pacific/Johnston\",\n \"Pacific/Rarotonga\",\n \"Pacific/Tahiti\",\n ],\n },\n {\n value: \"Alaskan Standard Time\",\n abbr: \"AKDT\",\n text: \"Alaska\",\n utc: [\n \"America/Anchorage\",\n \"America/Juneau\",\n \"America/Nome\",\n \"America/Sitka\",\n \"America/Yakutat\",\n ],\n },\n {\n value: \"Pacific Standard Time (Mexico)\",\n abbr: \"PDT\",\n text: \"Baja California\",\n utc: [\"America/Santa_Isabel\"],\n },\n {\n value: \"Pacific Standard Time\",\n abbr: \"PST\",\n text: \"Pacific Time (US & Canada)\",\n utc: [\n \"America/Dawson\",\n \"America/Los_Angeles\",\n \"America/Tijuana\",\n \"America/Vancouver\",\n \"America/Whitehorse\",\n \"PST8PDT\",\n ],\n },\n {\n value: \"US Mountain Standard Time\",\n abbr: \"UMST\",\n text: \"Arizona\",\n utc: [\n \"America/Creston\",\n \"America/Dawson_Creek\",\n \"America/Hermosillo\",\n \"America/Phoenix\",\n \"Etc/GMT+7\",\n ],\n },\n {\n value: \"Mountain Standard Time (Mexico)\",\n abbr: \"MDT\",\n text: \"Chihuahua, La Paz, Mazatlan\",\n utc: [\"America/Chihuahua\", \"America/Mazatlan\"],\n },\n {\n value: \"Mountain Standard Time\",\n abbr: \"MDT\",\n text: \"Mountain Time (US & Canada)\",\n utc: [\n \"America/Boise\",\n \"America/Cambridge_Bay\",\n \"America/Denver\",\n \"America/Edmonton\",\n \"America/Inuvik\",\n \"America/Ojinaga\",\n \"America/Yellowknife\",\n \"MST7MDT\",\n ],\n },\n {\n value: \"Central America Standard Time\",\n abbr: \"CAST\",\n text: \"Central America\",\n utc: [\n \"America/Belize\",\n \"America/Costa_Rica\",\n \"America/El_Salvador\",\n \"America/Guatemala\",\n \"America/Managua\",\n \"America/Tegucigalpa\",\n \"Etc/GMT+6\",\n \"Pacific/Galapagos\",\n ],\n },\n {\n value: \"Central Standard Time\",\n abbr: \"CDT\",\n text: \"Central Time (US & Canada)\",\n utc: [\n \"America/Chicago\",\n \"America/Indiana/Knox\",\n \"America/Indiana/Tell_City\",\n \"America/Matamoros\",\n \"America/Menominee\",\n \"America/North_Dakota/Beulah\",\n \"America/North_Dakota/Center\",\n \"America/North_Dakota/New_Salem\",\n \"America/Rainy_River\",\n \"America/Rankin_Inlet\",\n \"America/Resolute\",\n \"America/Winnipeg\",\n \"CST6CDT\",\n ],\n },\n {\n value: \"Central Standard Time (Mexico)\",\n abbr: \"CDT\",\n text: \"Guadalajara, Mexico City, Monterrey\",\n utc: [\n \"America/Bahia_Banderas\",\n \"America/Cancun\",\n \"America/Merida\",\n \"America/Mexico_City\",\n \"America/Monterrey\",\n ],\n },\n {\n value: \"Canada Central Standard Time\",\n abbr: \"CCST\",\n text: \"Saskatchewan\",\n utc: [\"America/Regina\", \"America/Swift_Current\"],\n },\n {\n value: \"SA Pacific Standard Time\",\n abbr: \"SPST\",\n text: \"Bogota, Lima, Quito\",\n utc: [\n \"America/Bogota\",\n \"America/Cayman\",\n \"America/Coral_Harbour\",\n \"America/Eirunepe\",\n \"America/Guayaquil\",\n \"America/Jamaica\",\n \"America/Lima\",\n \"America/Panama\",\n \"America/Rio_Branco\",\n \"Etc/GMT+5\",\n ],\n },\n {\n value: \"Eastern Standard Time\",\n abbr: \"EDT\",\n text: \"Eastern Time (US & Canada)\",\n utc: [\n \"America/Detroit\",\n \"America/Havana\",\n \"America/Indiana/Petersburg\",\n \"America/Indiana/Vincennes\",\n \"America/Indiana/Winamac\",\n \"America/Iqaluit\",\n \"America/Kentucky/Monticello\",\n \"America/Louisville\",\n \"America/Montreal\",\n \"America/Nassau\",\n \"America/New_York\",\n \"America/Nipigon\",\n \"America/Pangnirtung\",\n \"America/Port-au-Prince\",\n \"America/Thunder_Bay\",\n \"America/Toronto\",\n \"EST5EDT\",\n ],\n },\n {\n value: \"US Eastern Standard Time\",\n abbr: \"UEDT\",\n text: \"Indiana (East)\",\n utc: [\"America/Indiana/Marengo\", \"America/Indiana/Vevay\", \"America/Indianapolis\"],\n },\n {\n value: \"Venezuela Standard Time\",\n abbr: \"VST\",\n text: \"Caracas\",\n utc: [\"America/Caracas\"],\n },\n {\n value: \"Paraguay Standard Time\",\n abbr: \"PYT\",\n text: \"Asuncion\",\n utc: [\"America/Asuncion\"],\n },\n {\n value: \"Atlantic Standard Time\",\n abbr: \"ADT\",\n text: \"Atlantic Time (Canada)\",\n utc: [\n \"America/Glace_Bay\",\n \"America/Goose_Bay\",\n \"America/Halifax\",\n \"America/Moncton\",\n \"America/Thule\",\n \"Atlantic/Bermuda\",\n ],\n },\n {\n value: \"Central Brazilian Standard Time\",\n abbr: \"CBST\",\n text: \"Cuiaba\",\n utc: [\"America/Campo_Grande\", \"America/Cuiaba\"],\n },\n {\n value: \"SA Western Standard Time\",\n abbr: \"SWST\",\n text: \"Georgetown, La Paz, Manaus, San Juan\",\n utc: [\n \"America/Anguilla\",\n \"America/Antigua\",\n \"America/Aruba\",\n \"America/Barbados\",\n \"America/Blanc-Sablon\",\n \"America/Boa_Vista\",\n \"America/Curacao\",\n \"America/Dominica\",\n \"America/Grand_Turk\",\n \"America/Grenada\",\n \"America/Guadeloupe\",\n \"America/Guyana\",\n \"America/Kralendijk\",\n \"America/La_Paz\",\n \"America/Lower_Princes\",\n \"America/Manaus\",\n \"America/Marigot\",\n \"America/Martinique\",\n \"America/Montserrat\",\n \"America/Port_of_Spain\",\n \"America/Porto_Velho\",\n \"America/Puerto_Rico\",\n \"America/Santo_Domingo\",\n \"America/St_Barthelemy\",\n \"America/St_Kitts\",\n \"America/St_Lucia\",\n \"America/St_Thomas\",\n \"America/St_Vincent\",\n \"America/Tortola\",\n \"Etc/GMT+4\",\n ],\n },\n {\n value: \"Pacific SA Standard Time\",\n abbr: \"PSST\",\n text: \"Santiago\",\n utc: [\"America/Santiago\", \"Antarctica/Palmer\"],\n },\n {\n value: \"Newfoundland Standard Time\",\n abbr: \"NDT\",\n text: \"Newfoundland\",\n utc: [\"America/St_Johns\"],\n },\n {\n value: \"E. South America Standard Time\",\n abbr: \"ESAST\",\n text: \"Brasilia\",\n utc: [\"America/Sao_Paulo\"],\n },\n {\n value: \"Argentina Standard Time\",\n abbr: \"AST\",\n text: \"Buenos Aires\",\n utc: [\n \"America/Argentina/La_Rioja\",\n \"America/Argentina/Rio_Gallegos\",\n \"America/Argentina/Salta\",\n \"America/Argentina/San_Juan\",\n \"America/Argentina/San_Luis\",\n \"America/Argentina/Tucuman\",\n \"America/Argentina/Ushuaia\",\n \"America/Buenos_Aires\",\n \"America/Catamarca\",\n \"America/Cordoba\",\n \"America/Jujuy\",\n \"America/Mendoza\",\n ],\n },\n {\n value: \"SA Eastern Standard Time\",\n abbr: \"SEST\",\n text: \"Cayenne, Fortaleza\",\n utc: [\n \"America/Araguaina\",\n \"America/Belem\",\n \"America/Cayenne\",\n \"America/Fortaleza\",\n \"America/Maceio\",\n \"America/Paramaribo\",\n \"America/Recife\",\n \"America/Santarem\",\n \"Antarctica/Rothera\",\n \"Atlantic/Stanley\",\n \"Etc/GMT+3\",\n ],\n },\n {\n value: \"Greenland Standard Time\",\n abbr: \"GDT\",\n text: \"Greenland\",\n utc: [\"America/Godthab\"],\n },\n {\n value: \"Montevideo Standard Time\",\n abbr: \"MST\",\n text: \"Montevideo\",\n utc: [\"America/Montevideo\"],\n },\n {\n value: \"Bahia Standard Time\",\n abbr: \"BST\",\n text: \"Salvador\",\n utc: [\"America/Bahia\"],\n },\n {\n value: \"UTC-02\",\n abbr: \"U\",\n text: \"Coordinated Universal Time-02\",\n utc: [\"America/Noronha\", \"Atlantic/South_Georgia\", \"Etc/GMT+2\"],\n },\n {\n value: \"Mid-Atlantic Standard Time\",\n abbr: \"MDT\",\n text: \"Mid-Atlantic - Old\",\n utc: [],\n },\n {\n value: \"Azores Standard Time\",\n abbr: \"ADT\",\n text: \"Azores\",\n utc: [\"America/Scoresbysund\", \"Atlantic/Azores\"],\n },\n {\n value: \"Cape Verde Standard Time\",\n abbr: \"CVST\",\n text: \"Cape Verde Is.\",\n utc: [\"Atlantic/Cape_Verde\", \"Etc/GMT+1\"],\n },\n {\n value: \"Morocco Standard Time\",\n abbr: \"MDT\",\n text: \"Casablanca\",\n utc: [\"Africa/Casablanca\", \"Africa/El_Aaiun\"],\n },\n {\n value: \"UTC\",\n abbr: \"UTC\",\n text: \"Coordinated Universal Time\",\n utc: [\"America/Danmarkshavn\", \"Etc/GMT\"],\n },\n {\n value: \"GMT Standard Time\",\n abbr: \"GMT\",\n text: \"Edinburgh, London\",\n utc: [\"Europe/Isle_of_Man\", \"Europe/Guernsey\", \"Europe/Jersey\", \"Europe/London\"],\n },\n {\n value: \"GMT Standard Time\",\n abbr: \"GDT\",\n text: \"Dublin, Lisbon\",\n utc: [\n \"Atlantic/Canary\",\n \"Atlantic/Faeroe\",\n \"Atlantic/Madeira\",\n \"Europe/Dublin\",\n \"Europe/Lisbon\",\n ],\n },\n {\n value: \"Greenwich Standard Time\",\n abbr: \"GST\",\n text: \"Monrovia, Reykjavik\",\n utc: [\n \"Africa/Abidjan\",\n \"Africa/Accra\",\n \"Africa/Bamako\",\n \"Africa/Banjul\",\n \"Africa/Bissau\",\n \"Africa/Conakry\",\n \"Africa/Dakar\",\n \"Africa/Freetown\",\n \"Africa/Lome\",\n \"Africa/Monrovia\",\n \"Africa/Nouakchott\",\n \"Africa/Ouagadougou\",\n \"Africa/Sao_Tome\",\n \"Atlantic/Reykjavik\",\n \"Atlantic/St_Helena\",\n ],\n },\n {\n value: \"W. Europe Standard Time\",\n abbr: \"WEDT\",\n text: \"Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna\",\n utc: [\n \"Arctic/Longyearbyen\",\n \"Europe/Amsterdam\",\n \"Europe/Andorra\",\n \"Europe/Berlin\",\n \"Europe/Busingen\",\n \"Europe/Gibraltar\",\n \"Europe/Luxembourg\",\n \"Europe/Malta\",\n \"Europe/Monaco\",\n \"Europe/Oslo\",\n \"Europe/Rome\",\n \"Europe/San_Marino\",\n \"Europe/Stockholm\",\n \"Europe/Vaduz\",\n \"Europe/Vatican\",\n \"Europe/Vienna\",\n \"Europe/Zurich\",\n ],\n },\n {\n value: \"Central Europe Standard Time\",\n abbr: \"CEDT\",\n text: \"Belgrade, Bratislava, Budapest, Ljubljana, Prague\",\n utc: [\n \"Europe/Belgrade\",\n \"Europe/Bratislava\",\n \"Europe/Budapest\",\n \"Europe/Ljubljana\",\n \"Europe/Podgorica\",\n \"Europe/Prague\",\n \"Europe/Tirane\",\n ],\n },\n {\n value: \"Romance Standard Time\",\n abbr: \"RDT\",\n text: \"Brussels, Copenhagen, Madrid, Paris\",\n utc: [\"Africa/Ceuta\", \"Europe/Brussels\", \"Europe/Copenhagen\", \"Europe/Madrid\", \"Europe/Paris\"],\n },\n {\n value: \"Central European Standard Time\",\n abbr: \"CEDT\",\n text: \"Sarajevo, Skopje, Warsaw, Zagreb\",\n utc: [\"Europe/Sarajevo\", \"Europe/Skopje\", \"Europe/Warsaw\", \"Europe/Zagreb\"],\n },\n {\n value: \"W. Central Africa Standard Time\",\n abbr: \"WCAST\",\n text: \"West Central Africa\",\n utc: [\n \"Africa/Algiers\",\n \"Africa/Bangui\",\n \"Africa/Brazzaville\",\n \"Africa/Douala\",\n \"Africa/Kinshasa\",\n \"Africa/Lagos\",\n \"Africa/Libreville\",\n \"Africa/Luanda\",\n \"Africa/Malabo\",\n \"Africa/Ndjamena\",\n \"Africa/Niamey\",\n \"Africa/Porto-Novo\",\n \"Africa/Tunis\",\n \"Etc/GMT-1\",\n ],\n },\n {\n value: \"Namibia Standard Time\",\n abbr: \"NST\",\n text: \"Windhoek\",\n utc: [\"Africa/Windhoek\"],\n },\n {\n value: \"GTB Standard Time\",\n abbr: \"GDT\",\n text: \"Athens, Bucharest\",\n utc: [\"Asia/Nicosia\", \"Europe/Athens\", \"Europe/Bucharest\", \"Europe/Chisinau\"],\n },\n {\n value: \"Middle East Standard Time\",\n abbr: \"MEDT\",\n text: \"Beirut\",\n utc: [\"Asia/Beirut\"],\n },\n {\n value: \"Egypt Standard Time\",\n abbr: \"EST\",\n text: \"Cairo\",\n utc: [\"Africa/Cairo\"],\n },\n {\n value: \"Syria Standard Time\",\n abbr: \"SDT\",\n text: \"Damascus\",\n utc: [\"Asia/Damascus\"],\n },\n {\n value: \"E. Europe Standard Time\",\n abbr: \"EEDT\",\n text: \"E. Europe\",\n utc: [\n \"Asia/Nicosia\",\n \"Europe/Athens\",\n \"Europe/Bucharest\",\n \"Europe/Chisinau\",\n \"Europe/Helsinki\",\n \"Europe/Kiev\",\n \"Europe/Mariehamn\",\n \"Europe/Nicosia\",\n \"Europe/Riga\",\n \"Europe/Sofia\",\n \"Europe/Tallinn\",\n \"Europe/Uzhgorod\",\n \"Europe/Vilnius\",\n \"Europe/Zaporozhye\",\n ],\n },\n {\n value: \"South Africa Standard Time\",\n abbr: \"SAST\",\n text: \"Harare, Pretoria\",\n utc: [\n \"Africa/Blantyre\",\n \"Africa/Bujumbura\",\n \"Africa/Gaborone\",\n \"Africa/Harare\",\n \"Africa/Johannesburg\",\n \"Africa/Kigali\",\n \"Africa/Lubumbashi\",\n \"Africa/Lusaka\",\n \"Africa/Maputo\",\n \"Africa/Maseru\",\n \"Africa/Mbabane\",\n \"Etc/GMT-2\",\n ],\n },\n {\n value: \"FLE Standard Time\",\n abbr: \"FDT\",\n text: \"Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius\",\n utc: [\n \"Europe/Helsinki\",\n \"Europe/Kiev\",\n \"Europe/Mariehamn\",\n \"Europe/Riga\",\n \"Europe/Sofia\",\n \"Europe/Tallinn\",\n \"Europe/Uzhgorod\",\n \"Europe/Vilnius\",\n \"Europe/Zaporozhye\",\n ],\n },\n {\n value: \"Turkey Standard Time\",\n abbr: \"TDT\",\n text: \"Istanbul\",\n utc: [\"Europe/Istanbul\"],\n },\n {\n value: \"Israel Standard Time\",\n abbr: \"JDT\",\n text: \"Jerusalem\",\n utc: [\"Asia/Jerusalem\"],\n },\n {\n value: \"Libya Standard Time\",\n abbr: \"LST\",\n text: \"Tripoli\",\n utc: [\"Africa/Tripoli\"],\n },\n {\n value: \"Jordan Standard Time\",\n abbr: \"JST\",\n text: \"Amman\",\n utc: [\"Asia/Amman\"],\n },\n {\n value: \"Arabic Standard Time\",\n abbr: \"AST\",\n text: \"Baghdad\",\n utc: [\"Asia/Baghdad\"],\n },\n {\n value: \"Kaliningrad Standard Time\",\n abbr: \"KST\",\n text: \"Kaliningrad\",\n utc: [\"Europe/Kaliningrad\"],\n },\n {\n value: \"Arab Standard Time\",\n abbr: \"AST\",\n text: \"Kuwait, Riyadh\",\n utc: [\"Asia/Aden\", \"Asia/Bahrain\", \"Asia/Kuwait\", \"Asia/Qatar\", \"Asia/Riyadh\"],\n },\n {\n value: \"E. Africa Standard Time\",\n abbr: \"EAST\",\n text: \"Nairobi\",\n utc: [\n \"Africa/Addis_Ababa\",\n \"Africa/Asmera\",\n \"Africa/Dar_es_Salaam\",\n \"Africa/Djibouti\",\n \"Africa/Juba\",\n \"Africa/Kampala\",\n \"Africa/Khartoum\",\n \"Africa/Mogadishu\",\n \"Africa/Nairobi\",\n \"Antarctica/Syowa\",\n \"Etc/GMT-3\",\n \"Indian/Antananarivo\",\n \"Indian/Comoro\",\n \"Indian/Mayotte\",\n ],\n },\n {\n value: \"Moscow Standard Time\",\n abbr: \"MSK\",\n text: \"Moscow, St. Petersburg, Volgograd, Minsk\",\n utc: [\"Europe/Kirov\", \"Europe/Moscow\", \"Europe/Simferopol\", \"Europe/Volgograd\", \"Europe/Minsk\"],\n },\n {\n value: \"Samara Time\",\n abbr: \"SAMT\",\n text: \"Samara, Ulyanovsk, Saratov\",\n utc: [\"Europe/Astrakhan\", \"Europe/Samara\", \"Europe/Ulyanovsk\"],\n },\n {\n value: \"Iran Standard Time\",\n abbr: \"IDT\",\n text: \"Tehran\",\n utc: [\"Asia/Tehran\"],\n },\n {\n value: \"Arabian Standard Time\",\n abbr: \"AST\",\n text: \"Abu Dhabi, Muscat\",\n utc: [\"Asia/Dubai\", \"Asia/Muscat\", \"Etc/GMT-4\"],\n },\n {\n value: \"Azerbaijan Standard Time\",\n abbr: \"ADT\",\n text: \"Baku\",\n utc: [\"Asia/Baku\"],\n },\n {\n value: \"Mauritius Standard Time\",\n abbr: \"MST\",\n text: \"Port Louis\",\n utc: [\"Indian/Mahe\", \"Indian/Mauritius\", \"Indian/Reunion\"],\n },\n {\n value: \"Georgian Standard Time\",\n abbr: \"GET\",\n text: \"Tbilisi\",\n utc: [\"Asia/Tbilisi\"],\n },\n {\n value: \"Caucasus Standard Time\",\n abbr: \"CST\",\n text: \"Yerevan\",\n utc: [\"Asia/Yerevan\"],\n },\n {\n value: \"Afghanistan Standard Time\",\n abbr: \"AST\",\n text: \"Kabul\",\n utc: [\"Asia/Kabul\"],\n },\n {\n value: \"West Asia Standard Time\",\n abbr: \"WAST\",\n text: \"Ashgabat, Tashkent\",\n utc: [\n \"Antarctica/Mawson\",\n \"Asia/Aqtau\",\n \"Asia/Aqtobe\",\n \"Asia/Ashgabat\",\n \"Asia/Dushanbe\",\n \"Asia/Oral\",\n \"Asia/Samarkand\",\n \"Asia/Tashkent\",\n \"Etc/GMT-5\",\n \"Indian/Kerguelen\",\n \"Indian/Maldives\",\n ],\n },\n {\n value: \"Yekaterinburg Time\",\n abbr: \"YEKT\",\n text: \"Yekaterinburg\",\n utc: [\"Asia/Yekaterinburg\"],\n },\n {\n value: \"Pakistan Standard Time\",\n abbr: \"PKT\",\n text: \"Islamabad, Karachi\",\n utc: [\"Asia/Karachi\"],\n },\n {\n value: \"India Standard Time\",\n abbr: \"IST\",\n text: \"Chennai, Kolkata, Mumbai, New Delhi\",\n utc: [\"Asia/Kolkata\"],\n },\n {\n value: \"Sri Lanka Standard Time\",\n abbr: \"SLST\",\n text: \"Sri Jayawardenepura\",\n utc: [\"Asia/Colombo\"],\n },\n {\n value: \"Nepal Standard Time\",\n abbr: \"NST\",\n text: \"Kathmandu\",\n utc: [\"Asia/Kathmandu\"],\n },\n {\n value: \"Central Asia Standard Time\",\n abbr: \"CAST\",\n text: \"Nur-Sultan (Astana)\",\n utc: [\n \"Antarctica/Vostok\",\n \"Asia/Almaty\",\n \"Asia/Bishkek\",\n \"Asia/Qyzylorda\",\n \"Asia/Urumqi\",\n \"Etc/GMT-6\",\n \"Indian/Chagos\",\n ],\n },\n {\n value: \"Bangladesh Standard Time\",\n abbr: \"BST\",\n text: \"Dhaka\",\n utc: [\"Asia/Dhaka\", \"Asia/Thimphu\"],\n },\n {\n value: \"Myanmar Standard Time\",\n abbr: \"MST\",\n text: \"Yangon (Rangoon)\",\n utc: [\"Asia/Rangoon\", \"Indian/Cocos\"],\n },\n {\n value: \"SE Asia Standard Time\",\n abbr: \"SAST\",\n text: \"Bangkok, Hanoi, Jakarta\",\n utc: [\n \"Antarctica/Davis\",\n \"Asia/Bangkok\",\n \"Asia/Hovd\",\n \"Asia/Jakarta\",\n \"Asia/Phnom_Penh\",\n \"Asia/Pontianak\",\n \"Asia/Saigon\",\n \"Asia/Vientiane\",\n \"Etc/GMT-7\",\n \"Indian/Christmas\",\n ],\n },\n {\n value: \"N. Central Asia Standard Time\",\n abbr: \"NCAST\",\n text: \"Novosibirsk\",\n utc: [\"Asia/Novokuznetsk\", \"Asia/Novosibirsk\", \"Asia/Omsk\"],\n },\n {\n value: \"China Standard Time\",\n abbr: \"CST\",\n text: \"Beijing, Chongqing, Hong Kong, Urumqi\",\n utc: [\"Asia/Hong_Kong\", \"Asia/Macau\", \"Asia/Shanghai\"],\n },\n {\n value: \"North Asia Standard Time\",\n abbr: \"NAST\",\n text: \"Krasnoyarsk\",\n utc: [\"Asia/Krasnoyarsk\"],\n },\n {\n value: \"Singapore Standard Time\",\n abbr: \"MPST\",\n text: \"Kuala Lumpur, Singapore\",\n utc: [\n \"Asia/Brunei\",\n \"Asia/Kuala_Lumpur\",\n \"Asia/Kuching\",\n \"Asia/Makassar\",\n \"Asia/Manila\",\n \"Asia/Singapore\",\n \"Etc/GMT-8\",\n ],\n },\n {\n value: \"W. Australia Standard Time\",\n abbr: \"WAST\",\n text: \"Perth\",\n utc: [\"Antarctica/Casey\", \"Australia/Perth\"],\n },\n {\n value: \"Taipei Standard Time\",\n abbr: \"TST\",\n text: \"Taipei\",\n utc: [\"Asia/Taipei\"],\n },\n {\n value: \"Ulaanbaatar Standard Time\",\n abbr: \"UST\",\n text: \"Ulaanbaatar\",\n utc: [\"Asia/Choibalsan\", \"Asia/Ulaanbaatar\"],\n },\n {\n value: \"North Asia East Standard Time\",\n abbr: \"NAEST\",\n text: \"Irkutsk\",\n utc: [\"Asia/Irkutsk\"],\n },\n {\n value: \"Japan Standard Time\",\n abbr: \"JST\",\n text: \"Osaka, Sapporo, Tokyo\",\n utc: [\"Asia/Dili\", \"Asia/Jayapura\", \"Asia/Tokyo\", \"Etc/GMT-9\", \"Pacific/Palau\"],\n },\n {\n value: \"Korea Standard Time\",\n abbr: \"KST\",\n text: \"Seoul\",\n utc: [\"Asia/Pyongyang\", \"Asia/Seoul\"],\n },\n {\n value: \"Cen. Australia Standard Time\",\n abbr: \"CAST\",\n text: \"Adelaide\",\n utc: [\"Australia/Adelaide\", \"Australia/Broken_Hill\"],\n },\n {\n value: \"AUS Central Standard Time\",\n abbr: \"ACST\",\n text: \"Darwin\",\n utc: [\"Australia/Darwin\"],\n },\n {\n value: \"E. Australia Standard Time\",\n abbr: \"EAST\",\n text: \"Brisbane\",\n utc: [\"Australia/Brisbane\", \"Australia/Lindeman\"],\n },\n {\n value: \"AUS Eastern Standard Time\",\n abbr: \"AEST\",\n text: \"Canberra, Melbourne, Sydney\",\n utc: [\"Australia/Melbourne\", \"Australia/Sydney\"],\n },\n {\n value: \"West Pacific Standard Time\",\n abbr: \"WPST\",\n text: \"Guam, Port Moresby\",\n utc: [\n \"Antarctica/DumontDUrville\",\n \"Etc/GMT-10\",\n \"Pacific/Guam\",\n \"Pacific/Port_Moresby\",\n \"Pacific/Saipan\",\n \"Pacific/Truk\",\n ],\n },\n {\n value: \"Tasmania Standard Time\",\n abbr: \"TST\",\n text: \"Hobart\",\n utc: [\"Australia/Currie\", \"Australia/Hobart\"],\n },\n {\n value: \"Yakutsk Standard Time\",\n abbr: \"YST\",\n text: \"Yakutsk\",\n utc: [\"Asia/Chita\", \"Asia/Khandyga\", \"Asia/Yakutsk\"],\n },\n {\n value: \"Central Pacific Standard Time\",\n abbr: \"CPST\",\n text: \"Solomon Is., New Caledonia\",\n utc: [\n \"Antarctica/Macquarie\",\n \"Etc/GMT-11\",\n \"Pacific/Efate\",\n \"Pacific/Guadalcanal\",\n \"Pacific/Kosrae\",\n \"Pacific/Noumea\",\n \"Pacific/Ponape\",\n ],\n },\n {\n value: \"Vladivostok Standard Time\",\n abbr: \"VST\",\n text: \"Vladivostok\",\n utc: [\"Asia/Sakhalin\", \"Asia/Ust-Nera\", \"Asia/Vladivostok\"],\n },\n {\n value: \"New Zealand Standard Time\",\n abbr: \"NZST\",\n text: \"Auckland, Wellington\",\n utc: [\"Antarctica/McMurdo\", \"Pacific/Auckland\"],\n },\n {\n value: \"UTC+12\",\n abbr: \"U\",\n text: \"Coordinated Universal Time+12\",\n utc: [\n \"Etc/GMT-12\",\n \"Pacific/Funafuti\",\n \"Pacific/Kwajalein\",\n \"Pacific/Majuro\",\n \"Pacific/Nauru\",\n \"Pacific/Tarawa\",\n \"Pacific/Wake\",\n \"Pacific/Wallis\",\n ],\n },\n {\n value: \"Fiji Standard Time\",\n abbr: \"FST\",\n text: \"Fiji\",\n utc: [\"Pacific/Fiji\"],\n },\n {\n value: \"Magadan Standard Time\",\n abbr: \"MST\",\n text: \"Magadan\",\n utc: [\"Asia/Anadyr\", \"Asia/Kamchatka\", \"Asia/Magadan\", \"Asia/Srednekolymsk\"],\n },\n {\n value: \"Kamchatka Standard Time\",\n abbr: \"KDT\",\n text: \"Petropavlovsk-Kamchatsky - Old\",\n utc: [\"Asia/Kamchatka\"],\n },\n {\n value: \"Tonga Standard Time\",\n abbr: \"TST\",\n text: \"Nuku'alofa\",\n utc: [\"Etc/GMT-13\", \"Pacific/Enderbury\", \"Pacific/Fakaofo\", \"Pacific/Tongatapu\"],\n },\n {\n value: \"Samoa Standard Time\",\n abbr: \"SST\",\n text: \"Samoa\",\n utc: [\"Pacific/Apia\"],\n },\n]\n","import { timezones } from \"./timezones\"\n\nconst digitizeOffset = parsedOffset => {\n if (!parsedOffset) return \"+0\"\n const splitOffset = parsedOffset.split(\":\")\n return splitOffset.length > 1\n ? `${splitOffset[0]}${(splitOffset[1] / 60).toString().substr(1)}`\n : splitOffset[0]\n}\n\nconst normalizeOffset = parsedOffset => (parsedOffset ? parsedOffset.replace(\"−\", \"-\") : \"\")\n\nconst now = new Date()\nexport const timezoneList = () => {\n const memoized = {}\n return timezones.reduce((acc, timezone) => {\n const { utc } = timezone\n\n try {\n // We use 'fr' locale because it is the only one that returns back the UTC offset (dd/mm/yyyy, UTC-x) \n // so we can parse it later and digitize it.\n const dateString = new Intl.DateTimeFormat(\"fr\", {\n timeZone: utc[0],\n timeZoneName: \"short\",\n }).format(now)\n\n const [parsedOffset] = dateString.match(/[−+].+/) || []\n const normalizedOffset = normalizeOffset(parsedOffset)\n\n if (memoized[normalizedOffset])\n return acc.concat({ ...timezone, offset: memoized[normalizedOffset] })\n\n const digitizedOffset = digitizeOffset(normalizedOffset)\n\n memoized[normalizedOffset] = digitizedOffset\n return acc.concat({ ...timezone, offset: digitizedOffset })\n } catch (e) {\n return acc\n }\n }, [])\n}\n\nexport const timezonesById = timezones =>\n timezones.reduce((acc, { utc, ...timezone }) => {\n utc.forEach(item => (acc[item] = { ...timezone, utc: item }))\n return acc\n }, {})\n\nexport const getDefaultTimezone = () => {\n const dateFormat = new Intl.DateTimeFormat(\"default\", {})\n const usedOptions = dateFormat.resolvedOptions()\n return usedOptions\n}\n","import React, { useRef, useState, useEffect, useMemo, useCallback } from \"react\"\nimport { useToggle } from \"react-use\"\nimport { Drop, Flex, Text, Icon } from \"@netdata/netdata-ui\"\nimport { useDispatch, useSelector } from \"store/redux-separate-context\"\nimport { setOptionAction } from \"@/src/domains/global/actions\"\nimport { selectTimezoneSetting } from \"domains/global/selectors\"\nimport { MenuItem } from \"@/src/components/menus\"\nimport Item from \"../item\"\nimport Dropdown from \"./dropdown\"\nimport Search from \"./search\"\nimport Container from \"./container\"\nimport Wrapper from \"./wrapper\"\nimport OffsetItem from \"./offsetItem\"\nimport { getDefaultTimezone, timezoneList, timezonesById } from \"./utils\"\nimport { getHashParams } from \"utils/hash-utils\"\n\nconst timezones = timezoneList().sort((a, b) => a.offset - b.offset)\nconst byId = timezonesById(timezones)\n\nconst getTimezone = (selectedTimezone, timezoneHash) => {\n const timezone = timezoneHash\n ? timezoneHash\n : selectedTimezone === \"default\"\n ? getDefaultTimezone().timeZone\n : selectedTimezone\n\n return byId[timezone in byId ? timezone : getDefaultTimezone().timeZone] || {}\n}\n\nconst Timezone = () => {\n const [value, setValue] = useState(\"\")\n const [isOpen, toggle] = useToggle()\n\n const ref = useRef()\n const inputRef = useRef()\n\n const { updateUtcParam } = window.urlOptions\n\n useEffect(() => {\n if (!inputRef.current || !isOpen) return\n inputRef.current.focus()\n }, [isOpen])\n\n const dispatch = useDispatch()\n const selectedTimezone = useSelector(selectTimezoneSetting)\n\n const selectedOffset = useMemo(() => {\n const { utc: timezoneHash = \"\" } = getHashParams()\n const { offset = \"\", utc = \"\" } = getTimezone(selectedTimezone, timezoneHash)\n\n if (timezoneHash !== utc) updateUtcParam(utc)\n if (selectedTimezone !== utc) dispatch(setOptionAction({ key: \"timezone\", value: utc }))\n\n dispatch(setOptionAction({ key: \"utcOffset\", value: parseFloat(offset) }))\n\n return offset\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [selectedTimezone])\n\n const zones = useMemo(() => {\n if (!value) return timezones\n return timezones.filter(\n ({ text, offset }) =>\n text.toUpperCase().includes(value.toUpperCase()) || offset.includes(value)\n )\n }, [value])\n\n const close = () => {\n toggle(false)\n setValue(\"\")\n }\n\n const onSelect = useCallback(utc => {\n updateUtcParam(utc)\n dispatch(setOptionAction({ key: \"timezone\", value: utc }))\n close()\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n const onChange = useCallback(e => setValue(e.target.value), [])\n\n return (\n \n \n \n \n UTC {selectedOffset}\n \n \n \n \n {ref.current && isOpen && (\n \n \n \n \n {zones.map(({ text, offset, utc }) => (\n \n ))}\n \n \n \n )}\n \n )\n}\n\nexport default Timezone\n","import React, { useCallback, useEffect, useState, useRef } from \"react\"\nimport styled from \"styled-components\"\nimport { useSelector, useDispatch } from \"store/redux-separate-context\"\nimport { useLocalStorage } from \"react-use\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { sendToChildIframe, useListenToPostMessage } from \"utils/post-message\"\nimport { getIframeSrc, NETDATA_REGISTRY_SERVER } from \"utils/utils\"\nimport useUserNodeAccessMessage from \"hooks/use-user-node-access\"\nimport { selectRegistry, selectCloudBaseUrl } from \"domains/global/selectors\"\nimport { LOCAL_STORAGE_NEEDS_SYNC } from \"domains/dashboard/sagas\"\nimport { setOfflineAction } from \"@/src/domains/dashboard/actions\"\nimport { SIGN_IN_IFRAME_ID } from \"components/header/constants\"\n\nconst IframeContainer = styled(Flex).attrs({ position: \"absolute\" })`\n display: none;\n`\nconst Iframe = ({ signedIn }) => {\n const [rendered, setRendered] = useState(false)\n const signInMsg = useRef()\n const ref = useRef()\n\n const [lsValue, , removeLsValue] = useLocalStorage(LOCAL_STORAGE_NEEDS_SYNC)\n const cloudBaseURL = useSelector(selectCloudBaseUrl)\n const registry = useSelector(selectRegistry)\n\n const dispatch = useDispatch()\n\n const { origin, pathname } = window.location\n const nameParam = encodeURIComponent(registry.hostname)\n const originParam = encodeURIComponent(origin + pathname)\n\n const signInIframeUrl = getIframeSrc(\n cloudBaseURL,\n `sign-in?id=${registry.machineGuid}&name=${nameParam}&origin=${originParam}`\n )\n\n useListenToPostMessage(\"hello-from-sign-in\", msg => {\n signInMsg.current = msg\n })\n\n useUserNodeAccessMessage()\n\n const onLoad = useCallback(() => {\n setRendered(true)\n setTimeout(() => dispatch(setOfflineAction({ offline: signInMsg.current === undefined })), 500)\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n useEffect(() => {\n const handler = e => {\n if (!e?.target) return\n if (e.target.src === signInIframeUrl && !rendered) onLoad()\n }\n\n window.addEventListener(\"DOMFrameContentLoaded\", handler)\n return () => window.removeEventListener(\"DOMFrameContentLoaded\", handler)\n }, [signInIframeUrl, rendered, onLoad])\n\n useEffect(() => {\n if (!signedIn || !ref.current) return\n if (!registry.registryServer || registry.registryServer === NETDATA_REGISTRY_SERVER) return\n if (!lsValue) return\n\n removeLsValue()\n\n const { registryMachinesArray } = registry\n if (registryMachinesArray && registryMachinesArray.length > 0) {\n sendToChildIframe(ref.current, {\n type: \"synced-private-registry\",\n payload: registryMachinesArray,\n })\n }\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [signedIn, registry, lsValue])\n\n return (\n \n )\n}\n\nexport default Iframe\n","import { useCallback, useState } from \"react\"\nimport { useLocalStorage } from \"react-use\"\nimport { useDispatch } from \"react-redux\"\nimport { useListenToPostMessage } from \"@/src/utils/post-message\"\nimport { isSignedInAction } from \"@/src/domains/dashboard/actions\"\n\nconst useCheckSignInStatus = () => {\n const dispatch = useDispatch()\n const [value, setValue] = useLocalStorage(\"has-sign-in-history\")\n const [hasSignedInBefore, setHasSignedInBefore] = useState(value)\n\n const onMessage = useCallback(isNew => {\n if (isNew) {\n setHasSignedInBefore(isNew)\n setValue(isNew)\n }\n dispatch(isSignedInAction({ isSignedIn: isNew }))\n // eslint-disable-next-line react-hooks/exhaustive-deps\n }, [])\n\n const [signedIn] = useListenToPostMessage(\"is-signed-in\", onMessage)\n\n return [signedIn, hasSignedInBefore]\n}\n\nexport default useCheckSignInStatus\n","import React from \"react\"\nimport { useSelector } from \"react-redux\"\nimport { Button } from \"@netdata/netdata-ui\"\nimport SignInButton from \"components/auth/signIn\"\nimport SignInIframe from \"components/auth/signIn/iframe\"\nimport useCheckSignInStatus from \"components/auth/signIn/useCheckSignInStatus\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\nimport Tooltip from \"@/src/components/tooltips\"\n\nconst SignIn = () => {\n const [signedIn] = useCheckSignInStatus()\n const cloudEnabled = useSelector(selectIsCloudEnabled)\n\n return (\n cloudEnabled && (\n \n
    \n \n {!signedIn && (\n \n {({ isRegistry, link, offline, onSignIn }) => (\n \n )}\n \n )}\n
    \n \n )\n )\n}\n\nexport default SignIn\n","import { Text, NavigationTab, Icon } from \"@netdata/netdata-ui\"\nimport React from \"react\"\n\nconst CloudTab = ({ label, active, showBorderLeft, icon, onActivate }) => {\n const handleOnActivate = () => {\n if (active) return\n if (onActivate) onActivate()\n }\n return (\n }\n fixed\n closable={false}\n showBorderLeft={showBorderLeft}\n active={active}\n >\n {label}\n \n )\n}\n\nexport default CloudTab\n","import React from \"react\"\nimport {\n Flex,\n Button,\n Box,\n Text,\n H3,\n H4,\n Modal,\n ModalContent,\n ModalBody,\n ModalHeader,\n ModalCloseButton,\n ModalFooter,\n Icon,\n} from \"@netdata/netdata-ui\"\n\nimport GoToCloud from \"components/auth/signIn\"\n\nexport const TITLE = \"Discover the free benefits of Netdata Cloud\"\n\nconst DiscoverCloudModal = ({ closeModal, text, header, handleGoToCloud, image, video }) => {\n return (\n \n \n \n \n \n

    {TITLE}:

    \n
    \n\n \n
    \n \n \n \n \n

    {header}

    \n \n \n {({ link }) => (\n \n Sign in to Netdata Cloud!\n \n }\n width=\"100%\"\n onClick={() => handleGoToCloud({ link })}\n as={Button}\n small\n data-ga={\"go-to-cloud-button\"}\n data-testid=\"cta1-button\"\n />\n )}\n \n \n
    \n {text()}\n
    \n {image && (\n \n \n \n )}\n {video && (\n \n \n \n \n \n )}\n
    \n
    \n \n
    \n
    \n )\n}\n\nexport default DiscoverCloudModal\n","import { Text } from \"@netdata/netdata-ui\"\nimport Anchor from \"@/src/components/anchor\"\nimport React from \"react\"\n\nconst TabsContentText = ({ children }) => {children}\n\nexport const TabsContent = {\n Home: {\n id: \"Home\",\n label: \"Home\",\n header: \"Home\",\n text: () => (\n \n The Home view in Netdata cloud provides summarized relevant information in an easily\n digestible display. You can see information about your nodes, data collection and retention\n stats, alerts, users and dashboards.\n \n ),\n icon: \"room_home\",\n image: \"images/home.png\",\n },\n nodeView: {\n id: \"nodeView\",\n label: \"Node View\",\n header: \"Node View\",\n text: () => (\n <>\n \n The single node view you are currently using will of course be available on Netdata Cloud\n as well. In addition, the charts and visualization on Netdata Cloud will be more flexible\n and powerful for troubleshooting than what is available on the agent.\n \n \n Netdata Cloud also comes with the Metric Correlations feature that lets you quickly find\n metrics and charts related to a particular window of interest that you want to explore\n further. By displaying the standard Netdata dashboard, filtered to show only charts that\n are relevant to the window of interest, you can get to the root cause sooner.\n \n \n ),\n icon: \"nodes_hollow\",\n image: \"images/nodeView.png\",\n },\n Overview: {\n id: \"Overview\",\n label: \"Overview\",\n header: \"Overview\",\n text: () => (\n <>\n \n The Overview tab is a great way to monitor your infrastructure using Netdata Cloud. While\n the interface might look similar to local dashboards served by an Agent, or even the\n single-node dashboards in Netdata Cloud, Overview uses composite charts. These charts\n display real-time aggregated metrics from all the nodes (or a filtered selection) in a\n given War Room.\n \n \n With Overview's composite charts, you can see your infrastructure from a single pane of\n glass, discover trends or anomalies, then drill down by grouping metrics by node and\n jumping to single-node dashboards for root cause analysis.\n \n \n Here's an example of a composite chart visualizing Disk I/O bandwidth from 5 different\n nodes in one chart.\n \n \n ),\n icon: \"room_overview\",\n image: \"images/overview.png\",\n },\n Nodes: {\n id: \"Nodes\",\n label: \"Nodes\",\n header: \"Nodes\",\n text: () => (\n \n The Nodes view in Netdata Cloud lets you see and customize key metrics from any number of\n Agent-monitored nodes and seamlessly navigate to any node's dashboard for troubleshooting\n performance issues or anomalies using Netdata's highly-granular metrics.\n \n ),\n icon: \"nodes_hollow\",\n image: \"images/nodes.jpg\",\n },\n Dashboards: {\n id: \"Dashboards\",\n label: \"Dashboards\",\n header: \"Dashboards\",\n text: () => (\n \n With Netdata Cloud, you can build new dashboards that target your infrastructure's unique\n needs. Put key metrics from any number of distributed systems in one place for a bird's eye\n view of your infrastructure.\n \n ),\n icon: \"dashboard\",\n image: \"images/dashboards.png\",\n },\n Alerts: {\n id: \"Alerts\",\n label: \"Alerts\",\n header: \"Alerts\",\n text: () => (\n \n The Alerts view gives you a high level of availability and performance information for every\n node you're monitoring with Netdata Cloud. It also offers an easy way to drill down into any\n particular alert by taking the user to the dedicated alert view from where the user can run\n metrics correlation or take further troubleshooting steps.\n \n ),\n icon: \"alarm\",\n image: \"images/alerts.jpg\",\n },\n Anomalies: {\n id: \"Anomalies\",\n label: \"Anomalies\",\n header: \"Anomaies\",\n text: () => (\n \n The Anomalies view on Netdata Cloud lets you quickly surface potentially anomalous metrics\n and charts related to a particular highlight window of interest using Anomaly Advisor.\n Anomalies are detected using per metric unsupervised machine learning running at the edge!\n \n ),\n icon: \"anomaliesLens\",\n video:\n \"https://user-images.githubusercontent.com/24860547/165943403-1acb9759-7446-4704-8955-c566d04ad7ab.mp4\",\n },\n Pricing: {\n id: \"Pricing\",\n label: \"Pricing\",\n header: \"Pricing\",\n text: () => (\n \n Netdata Cloud’s distributed architecture—with processing occurring at the individual\n nodes—enables us to add any number of users at marginal cost. Couple this with our upcoming\n paid plan with added functionality for enterprise customers, and it means we can commit to\n providing our current functionality for free, always.\n \n ),\n image: \"images/pricing.png\",\n icon: \"pricing\",\n },\n Privacy: {\n id: \"Privacy\",\n label: \"Privacy\",\n header: \"Privacy\",\n text: () => (\n <>\n \n Data privacy is very important to us. We firmly believe that your data belongs to you.\n This is why we don't store any metric data in Netdata Cloud.\n \n \n Your local installations of the Netdata Agent form the basis for the Netdata Cloud. All\n the data that you see in the web browser when using Netdata Cloud, is actually streamed\n directly from the Netdata Agent to the Netdata Cloud dashboard. The data passes through\n our systems, but it isn't stored. You can learn more about{\" \"}\n \n the Agent's security design\n {\" \"}\n design in the Agent documentation.\n \n \n However, to be able to offer the stunning visualizations and advanced functionality of\n Netdata Cloud, it does store a limited number of metadata. This metadata is ONLY available\n to Netdata and NEVER to any 3rd parties. You can learn more about what metadata is stored\n in Netdata cloud in our\n \n {\" \"}\n documentation\n \n \n \n ),\n icon: \"privacy\",\n },\n}\n","import React from \"react\"\nimport {\n Text,\n Drop,\n ModalContent,\n ModalBody,\n ModalHeader,\n ModalCloseButton,\n ModalFooter,\n Flex,\n Button,\n Box,\n H3,\n H4,\n Icon,\n} from \"@netdata/netdata-ui\"\n\nimport GoToCloud from \"components/auth/signIn\"\n\nexport const TITLE = \"Discover the free benefits of Netdata Cloud\"\n\nconst DiscoverCloudDrop = ({\n parentRef,\n isDropdownOpen,\n closeDropdown,\n text,\n header,\n handleGoToCloud,\n image,\n video,\n}) => {\n if (parentRef.current && isDropdownOpen)\n return (\n \n \n \n \n \n

    {TITLE}:

    \n
    \n\n \n
    \n \n \n \n \n

    {header}

    \n \n \n {({ link }) => (\n \n Sign in to Netdata Cloud!\n \n }\n width=\"100%\"\n onClick={() => handleGoToCloud({ link })}\n data-testid=\"cta1-button\"\n as={Button}\n small\n data-ga={\"go-to-cloud-button\"}\n />\n )}\n \n \n
    \n {text()}\n
    \n {image && (\n \n \n \n )}\n {video && (\n \n \n \n \n \n )}\n
    \n
    \n \n
    \n \n )\n\n return null\n}\n\nexport default DiscoverCloudDrop\n","import React, { useState, useRef } from \"react\"\nimport { Text, Flex, NavigationTabs } from \"@netdata/netdata-ui\"\n\nimport CloudTab from \"./cloudTab\"\nimport { TITLE } from \"./discoverCloudModal\"\n\nimport { callAll } from \"@/src/utils/utils\"\nimport { TabsContent } from \"./contents\"\n\nimport DiscoverCloudDrop from \"./discoverCloudDrop\"\n\nconst Wrapper = Flex\nconst InnerPositioner = Flex\n\nconst DiscoverCloud = () => {\n const [isModalOpen, setIsModalOpen] = useState(false)\n const [selectedModalContent, setSelectedModalContent] = useState(null)\n const dropDownParentRef = useRef()\n\n const handleOpenModal = () => {\n setIsModalOpen(true)\n }\n\n const handleCloseModal = () => {\n setIsModalOpen(false)\n }\n\n const handleGoToCloud = ({ link }) => {\n window.location.href = link\n }\n\n const handleSetModalContent = content => {\n setSelectedModalContent(content)\n }\n const handleResetModalContent = () => {\n setSelectedModalContent(null)\n }\n\n return (\n \n \n {TITLE}:\n \n \n {Object.keys(TabsContent).map((key, index) => {\n const { label, icon, id } = TabsContent[key]\n const selectedContentId = selectedModalContent ? selectedModalContent.id : null\n return (\n \n handleSetModalContent(TabsContent[key])\n )}\n />\n )\n })}\n \n \n \n \n {/* {isModalOpen && selectedModalContent && (\n \n )} */}\n \n )\n}\n\nexport default DiscoverCloud\n","import React from \"react\"\nimport styled from \"styled-components\"\nimport { Flex, Box } from \"@netdata/netdata-ui\"\nimport Node from \"./node\"\nimport Options from \"./options\"\nimport Version from \"./version\"\nimport GlobalControls from \"./globalControls\"\nimport Alarms from \"./alarms\"\nimport News from \"./news\"\nimport Timezone from \"./timezone\"\nimport SignIn from \"./signIn\"\nimport { CloudConnectionStatus } from \"./ACLK\"\nimport { DiscoverCloud } from \"@/src/components/discover-cloud\"\nimport { selectIsCloudEnabled } from \"domains/global/selectors\"\nimport { useSelector } from \"react-redux\"\n\nconst Wrapper = styled(Flex).attrs({\n as: \"header\",\n position: \"relative\",\n justifyContent: \"between\",\n background: \"panel\",\n zIndex: 20,\n width: \"100%\",\n padding: [2, 4, 2, 4],\n})`\n pointer-events: all;\n`\n\nconst Header = () => {\nconst cloudEnabled = useSelector(selectIsCloudEnabled)\n\n return \n \n \n \n \n \n \n \n \n \n \n \n \n \n {cloudEnabled&& \n \n } \n\n}\n\n\n\nexport default Header\n","import styled from \"styled-components\"\nimport { Button } from \"@netdata/netdata-ui\"\n\nconst ExpandButton = styled(Button)`\n&& {\n > .button-icon {\n width: 6px;\n height: 9px;\n }\n`\nexport default ExpandButton\n","import React from \"react\"\nimport { useSelector } from \"react-redux\"\nimport { Flex } from \"@netdata/netdata-ui\"\nimport { selectCloudBaseUrl } from \"domains/global/selectors\"\nimport { getIframeSrc } from \"@/src/utils\"\n\n\nconst SignOut = ({ flavour = \"default\", ...rest }) => {\n const cloudBaseURL = useSelector(selectCloudBaseUrl)\n\n return (\n \n )\n}\n\nexport default SignOut\n","import React, { useMemo } from \"react\"\nimport { ThemeProvider } from \"styled-components\"\nimport { createSelector } from \"reselect\"\nimport { useToggle } from \"react-use\"\nimport { Flex, Button, DarkTheme, Text, Layer } from \"@netdata/netdata-ui\"\nimport { useSelector } from \"store/redux-separate-context\"\nimport { MenuItem } from \"components/menus\"\nimport SignOut from \"components/auth/signOut\"\nimport SignIn from \"components/auth/signIn\"\n\nconst SignInItem = () => {\n const onClick = (e, link) => {\n e.stopPropagation()\n window.location.href = link\n }\n return (\n \n {({ isRegistry, link, onSignIn }) => (\n onClick(e, link) : onSignIn}>Sign in\n )}\n \n )\n}\n\nconst isSignedInSelector = createSelector(\n ({ dashboard }) => dashboard,\n ({ isSignedIn }) => isSignedIn\n)\n\nconst UserSettings = () => {\n const [isOpen, toggle] = useToggle()\n const signedIn = useSelector(isSignedInSelector)\n\n const menuItems = useMemo(\n () => [\n ...(signedIn\n ? [\n {\n children: \"Operational Status\",\n onClick: () =>\n window.open(\"https://status.netdata.cloud\", \"_blank\", \"noopener,noreferrer\"),\n },\n ]\n : []),\n ...(signedIn ? [{ separator: true }] : []),\n ...(signedIn\n ? [\n {\n children: (\n \n ),\n },\n ]\n : [{ children: }]),\n ],\n [signedIn]\n )\n\n return (\n \n \n {isOpen && (\n \n \n {menuItems.map((item, i) => {\n if (item.separator) return \n return (\n \n {item.children}\n \n )\n })}\n \n \n )}\n \n )\n}\n\nexport default UserSettings\n","import React from \"react\"\nimport { Flex, Button } from \"@netdata/netdata-ui\"\n\nconst SpacesSkeleton = () => (\n \n \n \n